summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog13
-rw-r--r--ChangeLog.0721
-rw-r--r--ChangeLog.00858
-rw-r--r--Makefile176
-rw-r--r--bogus/bootstrap_symbols.h1
-rw-r--r--bogus/fast_tas.h1
-rw-r--r--bogus/hw_footprint.h1
-rw-r--r--bogus/mach_assert.h5
-rw-r--r--bogus/mach_counters.h1
-rw-r--r--bogus/mach_debug.h1
-rw-r--r--bogus/mach_fixpri.h1
-rw-r--r--bogus/mach_host.h1
-rw-r--r--bogus/mach_ipc_compat.h1
-rw-r--r--bogus/mach_ipc_debug.h1
-rw-r--r--bogus/mach_ipc_test.h1
-rw-r--r--bogus/mach_kdb.h5
-rw-r--r--bogus/mach_ldebug.h1
-rw-r--r--bogus/mach_lock_mon.h1
-rw-r--r--bogus/mach_machine_routines.h1
-rw-r--r--bogus/mach_mp_debug.h1
-rw-r--r--bogus/mach_pagemap.h1
-rw-r--r--bogus/mach_pcsample.h1
-rw-r--r--bogus/mach_ttd.h1
-rw-r--r--bogus/mach_vm_debug.h1
-rw-r--r--bogus/net_atm.h1
-rw-r--r--bogus/norma_device.h1
-rw-r--r--bogus/norma_ether.h1
-rw-r--r--bogus/norma_ipc.h1
-rw-r--r--bogus/norma_task.h1
-rw-r--r--bogus/norma_vm.h1
-rw-r--r--bogus/panic.c1
-rw-r--r--bogus/power_save.h1
-rw-r--r--bogus/simple_clock.h1
-rw-r--r--bogus/stat_time.h1
-rw-r--r--bogus/xpr_debug.h1
-rw-r--r--chips/atm.c302
-rw-r--r--chips/atmreg.h89
-rw-r--r--chips/audio.c733
-rw-r--r--chips/audio_config.h130
-rw-r--r--chips/audio_defs.h129
-rw-r--r--chips/bt431.c239
-rw-r--r--chips/bt431.h78
-rw-r--r--chips/bt455.c222
-rw-r--r--chips/bt455.h43
-rw-r--r--chips/bt459.c384
-rw-r--r--chips/bt459.h82
-rw-r--r--chips/bt478.c243
-rw-r--r--chips/bt478.h44
-rw-r--r--chips/build_font.c132
-rw-r--r--chips/busses.c230
-rw-r--r--chips/busses.h154
-rw-r--r--chips/cfb_hdw.c188
-rw-r--r--chips/cfb_misc.c249
-rw-r--r--chips/dc503.c189
-rw-r--r--chips/dc503.h69
-rw-r--r--chips/dtop.h241
-rw-r--r--chips/dtop_handlers.c441
-rw-r--r--chips/dtop_hdw.c644
-rw-r--r--chips/dz_7085.h153
-rw-r--r--chips/dz_defs.h65
-rw-r--r--chips/dz_hdw.c649
-rw-r--r--chips/eccreg.h110
-rw-r--r--chips/fb_hdw.c219
-rw-r--r--chips/fb_misc.c242
-rw-r--r--chips/fdc_82077.h525
-rw-r--r--chips/fdc_82077_hdw.c821
-rw-r--r--chips/frc.c150
-rw-r--r--chips/ims332.c312
-rw-r--r--chips/ims332.h137
-rw-r--r--chips/isdn_79c30.h165
-rw-r--r--chips/isdn_79c30_hdw.c602
-rw-r--r--chips/kernel_font.c3083
-rw-r--r--chips/kernel_font.data3108
-rw-r--r--chips/lance.c1570
-rw-r--r--chips/lance.h284
-rw-r--r--chips/lance_mapped.c417
-rw-r--r--chips/lk201.c695
-rw-r--r--chips/lk201.h241
-rw-r--r--chips/mc_clock.c516
-rw-r--r--chips/mc_clock.h147
-rw-r--r--chips/mouse.c321
-rw-r--r--chips/nc.c851
-rw-r--r--chips/nc.h232
-rw-r--r--chips/nw.h494
-rw-r--r--chips/nw_mk.c1323
-rw-r--r--chips/nw_mk.h97
-rw-r--r--chips/pm_defs.h57
-rw-r--r--chips/pm_hdw.c201
-rw-r--r--chips/pm_misc.c594
-rw-r--r--chips/scc_8530.h355
-rw-r--r--chips/scc_8530_hdw.c1145
-rw-r--r--chips/screen.c1103
-rw-r--r--chips/screen.h289
-rw-r--r--chips/screen_defs.h97
-rw-r--r--chips/screen_switch.c154
-rw-r--r--chips/screen_switch.h85
-rw-r--r--chips/serial_console.c694
-rw-r--r--chips/serial_defs.h53
-rw-r--r--chips/sfb_hdw.c253
-rw-r--r--chips/sfb_misc.c133
-rw-r--r--chips/spans.c114
-rw-r--r--chips/spans.h58
-rw-r--r--chips/tca100.c360
-rw-r--r--chips/tca100.h200
-rw-r--r--chips/tca100_if.c1377
-rw-r--r--chips/tca100_if.h89
-rw-r--r--chips/vs42x_rb.h267
-rw-r--r--chips/xcfb_hdw.c230
-rw-r--r--chips/xcfb_misc.c246
-rw-r--r--chips/xcfb_monitor.h56
-rw-r--r--ddb/db_access.c137
-rw-r--r--ddb/db_access.h73
-rw-r--r--ddb/db_aout.c507
-rw-r--r--ddb/db_break.c733
-rw-r--r--ddb/db_break.h86
-rw-r--r--ddb/db_command.c597
-rw-r--r--ddb/db_command.h70
-rw-r--r--ddb/db_cond.c181
-rw-r--r--ddb/db_examine.c506
-rw-r--r--ddb/db_expr.c391
-rw-r--r--ddb/db_expr.h26
-rw-r--r--ddb/db_ext_symtab.c123
-rw-r--r--ddb/db_input.c378
-rw-r--r--ddb/db_lex.c455
-rw-r--r--ddb/db_lex.h93
-rw-r--r--ddb/db_macro.c183
-rw-r--r--ddb/db_mp.c339
-rw-r--r--ddb/db_output.c240
-rw-r--r--ddb/db_output.h44
-rw-r--r--ddb/db_print.c511
-rw-r--r--ddb/db_print.h110
-rw-r--r--ddb/db_run.c441
-rw-r--r--ddb/db_run.h37
-rw-r--r--ddb/db_sym.c523
-rw-r--r--ddb/db_sym.h200
-rw-r--r--ddb/db_task_thread.c303
-rw-r--r--ddb/db_task_thread.h51
-rw-r--r--ddb/db_trap.c107
-rw-r--r--ddb/db_variables.c241
-rw-r--r--ddb/db_variables.h78
-rw-r--r--ddb/db_watch.c318
-rw-r--r--ddb/db_watch.h63
-rw-r--r--ddb/db_write_cmd.c109
-rw-r--r--ddb/nlist.h63
-rw-r--r--ddb/stab.h69
-rw-r--r--ddb/tr.h112
-rw-r--r--device/blkio.c238
-rw-r--r--device/buf.h102
-rw-r--r--device/chario.c1089
-rw-r--r--device/cirbuf.c298
-rw-r--r--device/cirbuf.h62
-rw-r--r--device/conf.h114
-rw-r--r--device/cons.c267
-rw-r--r--device/cons.h50
-rw-r--r--device/dev_forward.defs44
-rw-r--r--device/dev_hdr.h108
-rw-r--r--device/dev_lookup.c409
-rw-r--r--device/dev_master.h60
-rw-r--r--device/dev_name.c237
-rw-r--r--device/dev_pager.c741
-rw-r--r--device/device.srv29
-rw-r--r--device/device_init.c73
-rw-r--r--device/device_pager.srv44
-rw-r--r--device/device_port.h41
-rw-r--r--device/device_reply.cli27
-rw-r--r--device/device_types_kernel.h44
-rw-r--r--device/dk_label.c98
-rw-r--r--device/ds_routines.c1820
-rw-r--r--device/ds_routines.h52
-rw-r--r--device/errno.h45
-rw-r--r--device/if_ether.h57
-rw-r--r--device/if_hdr.h150
-rw-r--r--device/io_req.h141
-rw-r--r--device/memory_object_reply.cli27
-rw-r--r--device/net_io.c2168
-rw-r--r--device/net_io.h80
-rw-r--r--device/param.h49
-rw-r--r--device/subrs.c140
-rw-r--r--device/tty.h203
-rw-r--r--gensym.awk78
-rw-r--r--i386/Makefrag111
-rw-r--r--i386/bogus/aha.h1
-rw-r--r--i386/bogus/asc.h1
-rw-r--r--i386/bogus/at3c501.h1
-rw-r--r--i386/bogus/blit.h1
-rw-r--r--i386/bogus/com.h1
-rw-r--r--i386/bogus/de6c.h1
-rw-r--r--i386/bogus/eaha.h1
-rw-r--r--i386/bogus/evc.h1
-rw-r--r--i386/bogus/fd.h1
-rw-r--r--i386/bogus/fpe.h1
-rw-r--r--i386/bogus/hd.h1
-rw-r--r--i386/bogus/hpp.h1
-rw-r--r--i386/bogus/lpr.h1
-rw-r--r--i386/bogus/mach_machine_routines.h1
-rw-r--r--i386/bogus/ne.h1
-rw-r--r--i386/bogus/ns8390.h1
-rw-r--r--i386/bogus/nscsi.h1
-rw-r--r--i386/bogus/par.h1
-rw-r--r--i386/bogus/pc586.h1
-rw-r--r--i386/bogus/platforms.h1
-rw-r--r--i386/bogus/rc.h16
-rw-r--r--i386/bogus/sbic.h1
-rw-r--r--i386/bogus/sci.h1
-rw-r--r--i386/bogus/sii.h1
-rw-r--r--i386/bogus/siop.h1
-rw-r--r--i386/bogus/ul.h1
-rw-r--r--i386/bogus/wd.h1
-rw-r--r--i386/bogus/wt.h1
-rw-r--r--i386/dos/dos_buf.c29
-rw-r--r--i386/dos/dos_check_err.c36
-rw-r--r--i386/dos/dos_close.c39
-rw-r--r--i386/dos/dos_fstat.c75
-rw-r--r--i386/dos/dos_gettimeofday.c77
-rw-r--r--i386/dos/dos_io.h85
-rw-r--r--i386/dos/dos_open.c158
-rw-r--r--i386/dos/dos_read.c75
-rw-r--r--i386/dos/dos_rename.c80
-rw-r--r--i386/dos/dos_seek.c45
-rw-r--r--i386/dos/dos_tcgetattr.c51
-rw-r--r--i386/dos/dos_unlink.c48
-rw-r--r--i386/dos/dos_write.c75
-rw-r--r--i386/dos/i16/gdt.h32
-rw-r--r--i386/dos/i16/gdt_sels.h36
-rw-r--r--i386/dos/i16/i16_crt0.S274
-rw-r--r--i386/dos/i16/i16_crt0.h32
-rw-r--r--i386/dos/i16/i16_dos.h146
-rw-r--r--i386/dos/i16/i16_dos_mem.c182
-rw-r--r--i386/dos/i16/i16_exit.c50
-rw-r--r--i386/dos/i16/i16_main.c60
-rw-r--r--i386/dos/i16/i16_putchar.c36
-rw-r--r--i386/dos/i16/i16_vcpi.c564
-rw-r--r--i386/dos/i16/i16_xms.c175
-rw-r--r--i386/dos/i16/idt.h39
-rw-r--r--i386/dos/i16/phys_mem_sources.h28
-rw-r--r--i386/dos/putchar.c41
-rw-r--r--i386/i386/ast.h47
-rw-r--r--i386/i386/ast_check.c61
-rw-r--r--i386/i386/ast_types.h36
-rw-r--r--i386/i386/cpu_number.h49
-rw-r--r--i386/i386/cswitch.S142
-rw-r--r--i386/i386/db_disasm.c1423
-rw-r--r--i386/i386/db_interface.c558
-rw-r--r--i386/i386/db_machdep.h110
-rw-r--r--i386/i386/db_trace.c674
-rw-r--r--i386/i386/debug.h72
-rw-r--r--i386/i386/debug_i386.c147
-rw-r--r--i386/i386/debug_trace.S55
-rw-r--r--i386/i386/eflags.h35
-rwxr-xr-xi386/i386/fpe.b478
-rw-r--r--i386/i386/fpe.b_elf576
-rw-r--r--i386/i386/fpe_linkage.c359
-rw-r--r--i386/i386/fpu.c750
-rw-r--r--i386/i386/fpu.h130
-rw-r--r--i386/i386/gdt.c88
-rw-r--r--i386/i386/gdt.h72
-rw-r--r--i386/i386/hardclock.c99
-rw-r--r--i386/i386/i386asm.sym139
-rw-r--r--i386/i386/idt-gen.h47
-rw-r--r--i386/i386/idt.c59
-rw-r--r--i386/i386/idt_inittab.S121
-rw-r--r--i386/i386/io_emulate.c108
-rw-r--r--i386/i386/io_emulate.h43
-rw-r--r--i386/i386/io_map.c58
-rw-r--r--i386/i386/io_port.h43
-rw-r--r--i386/i386/iopb.c615
-rw-r--r--i386/i386/iopb.h62
-rw-r--r--i386/i386/ipl.h77
-rw-r--r--i386/i386/ktss.c61
-rw-r--r--i386/i386/ktss.h30
-rw-r--r--i386/i386/kttd_interface.c577
-rw-r--r--i386/i386/kttd_machdep.h59
-rw-r--r--i386/i386/ldt.c64
-rw-r--r--i386/i386/ldt.h66
-rw-r--r--i386/i386/lock.h130
-rw-r--r--i386/i386/locore.S1726
-rw-r--r--i386/i386/loose_ends.c82
-rw-r--r--i386/i386/mach_i386.srv27
-rw-r--r--i386/i386/mach_param.h31
-rw-r--r--i386/i386/machine_routines.h37
-rw-r--r--i386/i386/machspl.h29
-rw-r--r--i386/i386/mp_desc.c235
-rw-r--r--i386/i386/mp_desc.h84
-rw-r--r--i386/i386/pcb.c769
-rw-r--r--i386/i386/phys.c102
-rw-r--r--i386/i386/pic.c270
-rw-r--r--i386/i386/pic.h197
-rw-r--r--i386/i386/pio.h61
-rw-r--r--i386/i386/pit.c236
-rw-r--r--i386/i386/pit.h118
-rw-r--r--i386/i386/pmap.h30
-rw-r--r--i386/i386/proc_reg.h150
-rw-r--r--i386/i386/sched_param.h40
-rw-r--r--i386/i386/seg.c5
-rw-r--r--i386/i386/seg.h184
-rw-r--r--i386/i386/setjmp.h36
-rw-r--r--i386/i386/spl.S220
-rw-r--r--i386/i386/spl.h51
-rw-r--r--i386/i386/thread.h195
-rw-r--r--i386/i386/time_stamp.h30
-rw-r--r--i386/i386/timer.h71
-rw-r--r--i386/i386/trap.c1139
-rw-r--r--i386/i386/trap.h38
-rw-r--r--i386/i386/tss.h76
-rw-r--r--i386/i386/user_ldt.c389
-rw-r--r--i386/i386/user_ldt.h55
-rw-r--r--i386/i386/vm_param.h64
-rw-r--r--i386/i386/vm_tuning.h35
-rw-r--r--i386/i386/xpr.h32
-rw-r--r--i386/i386/zalloc.h29
-rw-r--r--i386/i386at/asm_startup.h42
-rw-r--r--i386/i386at/autoconf.c484
-rw-r--r--i386/i386at/blit.c948
-rw-r--r--i386/i386at/blitreg.h404
-rw-r--r--i386/i386at/blituser.h73
-rw-r--r--i386/i386at/blitvar.h116
-rw-r--r--i386/i386at/boothdr.S62
-rw-r--r--i386/i386at/com.c891
-rw-r--r--i386/i386at/comreg.h134
-rw-r--r--i386/i386at/conf.c399
-rw-r--r--i386/i386at/cons_conf.c50
-rw-r--r--i386/i386at/cram.h75
-rw-r--r--i386/i386at/dev_hdr.h43
-rw-r--r--i386/i386at/device_emul.h64
-rw-r--r--i386/i386at/disk.h186
-rw-r--r--i386/i386at/ds8390.h166
-rw-r--r--i386/i386at/eisa.h110
-rw-r--r--i386/i386at/fd.c1701
-rw-r--r--i386/i386at/fdreg.h368
-rw-r--r--i386/i386at/gpl/if_hpp.c690
-rw-r--r--i386/i386at/gpl/if_ns.c642
-rw-r--r--i386/i386at/gpl/if_nsreg.h169
-rw-r--r--i386/i386at/gpl/if_ul.c489
-rw-r--r--i386/i386at/gpl/if_wd.c581
-rw-r--r--i386/i386at/gpl/linux/block/cmd640.c738
-rw-r--r--i386/i386at/gpl/linux/block/floppy.c4100
-rw-r--r--i386/i386at/gpl/linux/block/genhd.c610
-rw-r--r--i386/i386at/gpl/linux/block/ide-cd.c2770
-rw-r--r--i386/i386at/gpl/linux/block/ide.c3087
-rw-r--r--i386/i386at/gpl/linux/block/ide.h655
-rw-r--r--i386/i386at/gpl/linux/block/ide_modes.h142
-rw-r--r--i386/i386at/gpl/linux/block/rz1000.c56
-rw-r--r--i386/i386at/gpl/linux/block/triton.c459
-rw-r--r--i386/i386at/gpl/linux/include/asm/bitops.h137
-rw-r--r--i386/i386at/gpl/linux/include/asm/byteorder.h90
-rw-r--r--i386/i386at/gpl/linux/include/asm/delay.h59
-rw-r--r--i386/i386at/gpl/linux/include/asm/dma.h271
-rw-r--r--i386/i386at/gpl/linux/include/asm/errno.h252
-rw-r--r--i386/i386at/gpl/linux/include/asm/fcntl.h64
-rw-r--r--i386/i386at/gpl/linux/include/asm/floppy.h56
-rw-r--r--i386/i386at/gpl/linux/include/asm/io.h213
-rw-r--r--i386/i386at/gpl/linux/include/asm/ioctl.h75
-rw-r--r--i386/i386at/gpl/linux/include/asm/irq.h346
-rw-r--r--i386/i386at/gpl/linux/include/asm/page.h64
-rw-r--r--i386/i386at/gpl/linux/include/asm/param.h20
-rw-r--r--i386/i386at/gpl/linux/include/asm/processor.h146
-rw-r--r--i386/i386at/gpl/linux/include/asm/ptrace.h52
-rw-r--r--i386/i386at/gpl/linux/include/asm/resource.h37
-rw-r--r--i386/i386at/gpl/linux/include/asm/segment.h347
-rw-r--r--i386/i386at/gpl/linux/include/asm/sigcontext.h29
-rw-r--r--i386/i386at/gpl/linux/include/asm/signal.h95
-rw-r--r--i386/i386at/gpl/linux/include/asm/socket.h31
-rw-r--r--i386/i386at/gpl/linux/include/asm/stat.h41
-rw-r--r--i386/i386at/gpl/linux/include/asm/statfs.h21
-rw-r--r--i386/i386at/gpl/linux/include/asm/string.h593
-rw-r--r--i386/i386at/gpl/linux/include/asm/system.h301
-rw-r--r--i386/i386at/gpl/linux/include/asm/termios.h304
-rw-r--r--i386/i386at/gpl/linux/include/asm/types.h109
-rw-r--r--i386/i386at/gpl/linux/include/asm/unistd.h322
-rw-r--r--i386/i386at/gpl/linux/include/linux/autoconf.h210
-rw-r--r--i386/i386at/gpl/linux/include/linux/binfmts.h60
-rw-r--r--i386/i386at/gpl/linux/include/linux/bios32.h61
-rw-r--r--i386/i386at/gpl/linux/include/linux/blk.h424
-rw-r--r--i386/i386at/gpl/linux/include/linux/blkdev.h56
-rw-r--r--i386/i386at/gpl/linux/include/linux/cdrom.h465
-rw-r--r--i386/i386at/gpl/linux/include/linux/config.h41
-rw-r--r--i386/i386at/gpl/linux/include/linux/delay.h14
-rw-r--r--i386/i386at/gpl/linux/include/linux/errno.h16
-rw-r--r--i386/i386at/gpl/linux/include/linux/etherdevice.h55
-rw-r--r--i386/i386at/gpl/linux/include/linux/fcntl.h6
-rw-r--r--i386/i386at/gpl/linux/include/linux/fd.h368
-rw-r--r--i386/i386at/gpl/linux/include/linux/fdreg.h127
-rw-r--r--i386/i386at/gpl/linux/include/linux/fs.h720
-rw-r--r--i386/i386at/gpl/linux/include/linux/genhd.h73
-rw-r--r--i386/i386at/gpl/linux/include/linux/hdreg.h171
-rw-r--r--i386/i386at/gpl/linux/include/linux/head.h19
-rw-r--r--i386/i386at/gpl/linux/include/linux/if.h167
-rw-r--r--i386/i386at/gpl/linux/include/linux/if_arp.h103
-rw-r--r--i386/i386at/gpl/linux/include/linux/if_ether.h96
-rw-r--r--i386/i386at/gpl/linux/include/linux/if_tr.h104
-rw-r--r--i386/i386at/gpl/linux/include/linux/igmp.h117
-rw-r--r--i386/i386at/gpl/linux/include/linux/in.h149
-rw-r--r--i386/i386at/gpl/linux/include/linux/inet.h52
-rw-r--r--i386/i386at/gpl/linux/include/linux/interrupt.h91
-rw-r--r--i386/i386at/gpl/linux/include/linux/ioctl.h7
-rw-r--r--i386/i386at/gpl/linux/include/linux/ioport.h31
-rw-r--r--i386/i386at/gpl/linux/include/linux/ip.h113
-rw-r--r--i386/i386at/gpl/linux/include/linux/ipc.h67
-rw-r--r--i386/i386at/gpl/linux/include/linux/kdev_t.h114
-rw-r--r--i386/i386at/gpl/linux/include/linux/kernel.h94
-rw-r--r--i386/i386at/gpl/linux/include/linux/kernel_stat.h32
-rw-r--r--i386/i386at/gpl/linux/include/linux/limits.h17
-rw-r--r--i386/i386at/gpl/linux/include/linux/linkage.h59
-rw-r--r--i386/i386at/gpl/linux/include/linux/locks.h66
-rw-r--r--i386/i386at/gpl/linux/include/linux/major.h119
-rw-r--r--i386/i386at/gpl/linux/include/linux/malloc.h16
-rw-r--r--i386/i386at/gpl/linux/include/linux/math_emu.h43
-rw-r--r--i386/i386at/gpl/linux/include/linux/mc146818rtc.h109
-rw-r--r--i386/i386at/gpl/linux/include/linux/minix_fs.h135
-rw-r--r--i386/i386at/gpl/linux/include/linux/minix_fs_sb.h25
-rw-r--r--i386/i386at/gpl/linux/include/linux/mm.h297
-rw-r--r--i386/i386at/gpl/linux/include/linux/module.h115
-rw-r--r--i386/i386at/gpl/linux/include/linux/mount.h30
-rw-r--r--i386/i386at/gpl/linux/include/linux/net.h132
-rw-r--r--i386/i386at/gpl/linux/include/linux/netdevice.h332
-rw-r--r--i386/i386at/gpl/linux/include/linux/nfs.h172
-rw-r--r--i386/i386at/gpl/linux/include/linux/notifier.h100
-rw-r--r--i386/i386at/gpl/linux/include/linux/pagemap.h131
-rw-r--r--i386/i386at/gpl/linux/include/linux/param.h6
-rw-r--r--i386/i386at/gpl/linux/include/linux/pci.h618
-rw-r--r--i386/i386at/gpl/linux/include/linux/personality.h51
-rw-r--r--i386/i386at/gpl/linux/include/linux/proc_fs.h269
-rw-r--r--i386/i386at/gpl/linux/include/linux/ptrace.h26
-rw-r--r--i386/i386at/gpl/linux/include/linux/quota.h219
-rw-r--r--i386/i386at/gpl/linux/include/linux/resource.h60
-rw-r--r--i386/i386at/gpl/linux/include/linux/route.h78
-rw-r--r--i386/i386at/gpl/linux/include/linux/sched.h492
-rw-r--r--i386/i386at/gpl/linux/include/linux/scsi.h198
-rw-r--r--i386/i386at/gpl/linux/include/linux/scsicam.h17
-rw-r--r--i386/i386at/gpl/linux/include/linux/sem.h112
-rw-r--r--i386/i386at/gpl/linux/include/linux/signal.h6
-rw-r--r--i386/i386at/gpl/linux/include/linux/skbuff.h474
-rw-r--r--i386/i386at/gpl/linux/include/linux/smp.h54
-rw-r--r--i386/i386at/gpl/linux/include/linux/socket.h126
-rw-r--r--i386/i386at/gpl/linux/include/linux/sockios.h91
-rw-r--r--i386/i386at/gpl/linux/include/linux/stat.h53
-rw-r--r--i386/i386at/gpl/linux/include/linux/stddef.h15
-rw-r--r--i386/i386at/gpl/linux/include/linux/string.h44
-rw-r--r--i386/i386at/gpl/linux/include/linux/tasks.h19
-rw-r--r--i386/i386at/gpl/linux/include/linux/tcp.h71
-rw-r--r--i386/i386at/gpl/linux/include/linux/termios.h7
-rw-r--r--i386/i386at/gpl/linux/include/linux/time.h50
-rw-r--r--i386/i386at/gpl/linux/include/linux/timer.h101
-rw-r--r--i386/i386at/gpl/linux/include/linux/tqueue.h163
-rw-r--r--i386/i386at/gpl/linux/include/linux/trdevice.h40
-rw-r--r--i386/i386at/gpl/linux/include/linux/tty.h340
-rw-r--r--i386/i386at/gpl/linux/include/linux/tty_driver.h189
-rw-r--r--i386/i386at/gpl/linux/include/linux/tty_ldisc.h46
-rw-r--r--i386/i386at/gpl/linux/include/linux/types.h72
-rw-r--r--i386/i386at/gpl/linux/include/linux/uio.h25
-rw-r--r--i386/i386at/gpl/linux/include/linux/unistd.h11
-rw-r--r--i386/i386at/gpl/linux/include/linux/utsname.h35
-rw-r--r--i386/i386at/gpl/linux/include/linux/version.h8
-rw-r--r--i386/i386at/gpl/linux/include/linux/vfs.h6
-rw-r--r--i386/i386at/gpl/linux/include/linux/vm86.h109
-rw-r--r--i386/i386at/gpl/linux/include/linux/wait.h38
-rw-r--r--i386/i386at/gpl/linux/include/net/af_unix.h4
-rw-r--r--i386/i386at/gpl/linux/include/net/arp.h17
-rw-r--r--i386/i386at/gpl/linux/include/net/atalkcall.h2
-rw-r--r--i386/i386at/gpl/linux/include/net/ax25.h246
-rw-r--r--i386/i386at/gpl/linux/include/net/ax25call.h2
-rw-r--r--i386/i386at/gpl/linux/include/net/checksum.h25
-rw-r--r--i386/i386at/gpl/linux/include/net/datalink.h16
-rw-r--r--i386/i386at/gpl/linux/include/net/icmp.h40
-rw-r--r--i386/i386at/gpl/linux/include/net/ip.h154
-rw-r--r--i386/i386at/gpl/linux/include/net/ip_alias.h23
-rw-r--r--i386/i386at/gpl/linux/include/net/ip_forward.h10
-rw-r--r--i386/i386at/gpl/linux/include/net/ipip.h4
-rw-r--r--i386/i386at/gpl/linux/include/net/ipx.h85
-rw-r--r--i386/i386at/gpl/linux/include/net/ipxcall.h2
-rw-r--r--i386/i386at/gpl/linux/include/net/netlink.h26
-rw-r--r--i386/i386at/gpl/linux/include/net/netrom.h139
-rw-r--r--i386/i386at/gpl/linux/include/net/nrcall.h2
-rw-r--r--i386/i386at/gpl/linux/include/net/p8022.h2
-rw-r--r--i386/i386at/gpl/linux/include/net/p8022call.h2
-rw-r--r--i386/i386at/gpl/linux/include/net/protocol.h55
-rw-r--r--i386/i386at/gpl/linux/include/net/psnap.h2
-rw-r--r--i386/i386at/gpl/linux/include/net/psnapcall.h2
-rw-r--r--i386/i386at/gpl/linux/include/net/rarp.h12
-rw-r--r--i386/i386at/gpl/linux/include/net/raw.h34
-rw-r--r--i386/i386at/gpl/linux/include/net/route.h280
-rw-r--r--i386/i386at/gpl/linux/include/net/slhc.h6
-rw-r--r--i386/i386at/gpl/linux/include/net/snmp.h107
-rw-r--r--i386/i386at/gpl/linux/include/net/sock.h486
-rw-r--r--i386/i386at/gpl/linux/include/net/tcp.h329
-rw-r--r--i386/i386at/gpl/linux/include/net/udp.h52
-rw-r--r--i386/i386at/gpl/linux/linux_autoirq.c161
-rw-r--r--i386/i386at/gpl/linux/linux_block.c2579
-rw-r--r--i386/i386at/gpl/linux/linux_dma.c52
-rw-r--r--i386/i386at/gpl/linux/linux_emul.h32
-rw-r--r--i386/i386at/gpl/linux/linux_init.c412
-rw-r--r--i386/i386at/gpl/linux/linux_irq.c246
-rw-r--r--i386/i386at/gpl/linux/linux_kmem.c481
-rw-r--r--i386/i386at/gpl/linux/linux_misc.c303
-rw-r--r--i386/i386at/gpl/linux/linux_net.c520
-rw-r--r--i386/i386at/gpl/linux/linux_port.c79
-rw-r--r--i386/i386at/gpl/linux/linux_printk.c47
-rw-r--r--i386/i386at/gpl/linux/linux_sched.c237
-rw-r--r--i386/i386at/gpl/linux/linux_soft.c74
-rw-r--r--i386/i386at/gpl/linux/linux_timer.c190
-rw-r--r--i386/i386at/gpl/linux/linux_version.c33
-rw-r--r--i386/i386at/gpl/linux/linux_vsprintf.c341
-rw-r--r--i386/i386at/gpl/linux/net/3c501.c860
-rw-r--r--i386/i386at/gpl/linux/net/3c503.c627
-rw-r--r--i386/i386at/gpl/linux/net/3c503.h91
-rw-r--r--i386/i386at/gpl/linux/net/3c505.c1518
-rw-r--r--i386/i386at/gpl/linux/net/3c505.h245
-rw-r--r--i386/i386at/gpl/linux/net/3c507.c923
-rw-r--r--i386/i386at/gpl/linux/net/3c509.c739
-rw-r--r--i386/i386at/gpl/linux/net/3c59x.c1066
-rw-r--r--i386/i386at/gpl/linux/net/8390.c727
-rw-r--r--i386/i386at/gpl/linux/net/8390.h168
-rw-r--r--i386/i386at/gpl/linux/net/Space.c400
-rw-r--r--i386/i386at/gpl/linux/net/ac3200.c385
-rw-r--r--i386/i386at/gpl/linux/net/apricot.c1046
-rw-r--r--i386/i386at/gpl/linux/net/at1700.c677
-rw-r--r--i386/i386at/gpl/linux/net/atp.c787
-rw-r--r--i386/i386at/gpl/linux/net/atp.h264
-rw-r--r--i386/i386at/gpl/linux/net/de4x5.c2788
-rw-r--r--i386/i386at/gpl/linux/net/de4x5.h645
-rw-r--r--i386/i386at/gpl/linux/net/de600.c853
-rw-r--r--i386/i386at/gpl/linux/net/de620.c1045
-rw-r--r--i386/i386at/gpl/linux/net/de620.h117
-rw-r--r--i386/i386at/gpl/linux/net/depca.c1901
-rw-r--r--i386/i386at/gpl/linux/net/depca.h185
-rw-r--r--i386/i386at/gpl/linux/net/dev.c1413
-rw-r--r--i386/i386at/gpl/linux/net/e2100.c456
-rw-r--r--i386/i386at/gpl/linux/net/eepro.c1169
-rw-r--r--i386/i386at/gpl/linux/net/eexpress.c1034
-rw-r--r--i386/i386at/gpl/linux/net/eth16i.c1214
-rw-r--r--i386/i386at/gpl/linux/net/ewrk3.c1933
-rw-r--r--i386/i386at/gpl/linux/net/ewrk3.h322
-rw-r--r--i386/i386at/gpl/linux/net/hp-plus.c483
-rw-r--r--i386/i386at/gpl/linux/net/hp.c451
-rw-r--r--i386/i386at/gpl/linux/net/hp100.c1144
-rw-r--r--i386/i386at/gpl/linux/net/hp100.h374
-rw-r--r--i386/i386at/gpl/linux/net/i82586.h408
-rw-r--r--i386/i386at/gpl/linux/net/iow.h6
-rw-r--r--i386/i386at/gpl/linux/net/lance.c1129
-rw-r--r--i386/i386at/gpl/linux/net/ne.c733
-rw-r--r--i386/i386at/gpl/linux/net/net_init.c380
-rw-r--r--i386/i386at/gpl/linux/net/ni52.c1110
-rw-r--r--i386/i386at/gpl/linux/net/ni52.h284
-rw-r--r--i386/i386at/gpl/linux/net/ni65.c648
-rw-r--r--i386/i386at/gpl/linux/net/ni65.h130
-rw-r--r--i386/i386at/gpl/linux/net/seeq8005.c760
-rw-r--r--i386/i386at/gpl/linux/net/seeq8005.h156
-rw-r--r--i386/i386at/gpl/linux/net/sk_g16.c2111
-rw-r--r--i386/i386at/gpl/linux/net/sk_g16.h171
-rw-r--r--i386/i386at/gpl/linux/net/smc-ultra.c419
-rw-r--r--i386/i386at/gpl/linux/net/tulip.c782
-rw-r--r--i386/i386at/gpl/linux/net/wavelan.c2526
-rw-r--r--i386/i386at/gpl/linux/net/wavelan.h252
-rw-r--r--i386/i386at/gpl/linux/net/wd.c513
-rw-r--r--i386/i386at/gpl/linux/net/znet.c746
-rw-r--r--i386/i386at/gpl/linux/pci/bios32.c460
-rw-r--r--i386/i386at/gpl/linux/pci/pci.c915
-rw-r--r--i386/i386at/gpl/linux/scsi/53c7,8xx.c6381
-rw-r--r--i386/i386at/gpl/linux/scsi/53c7,8xx.h1584
-rw-r--r--i386/i386at/gpl/linux/scsi/53c8xx_d.h2677
-rw-r--r--i386/i386at/gpl/linux/scsi/53c8xx_u.h97
-rw-r--r--i386/i386at/gpl/linux/scsi/AM53C974.c2249
-rw-r--r--i386/i386at/gpl/linux/scsi/AM53C974.h419
-rw-r--r--i386/i386at/gpl/linux/scsi/BusLogic.c2779
-rw-r--r--i386/i386at/gpl/linux/scsi/BusLogic.h977
-rw-r--r--i386/i386at/gpl/linux/scsi/NCR5380.h363
-rw-r--r--i386/i386at/gpl/linux/scsi/NCR5380.src3035
-rw-r--r--i386/i386at/gpl/linux/scsi/NCR53c406a.c1079
-rw-r--r--i386/i386at/gpl/linux/scsi/NCR53c406a.h83
-rw-r--r--i386/i386at/gpl/linux/scsi/advansys.c9061
-rw-r--r--i386/i386at/gpl/linux/scsi/advansys.h131
-rw-r--r--i386/i386at/gpl/linux/scsi/aha152x.c2985
-rw-r--r--i386/i386at/gpl/linux/scsi/aha152x.h373
-rw-r--r--i386/i386at/gpl/linux/scsi/aha1542.c1323
-rw-r--r--i386/i386at/gpl/linux/scsi/aha1542.h177
-rw-r--r--i386/i386at/gpl/linux/scsi/aha1740.c528
-rw-r--r--i386/i386at/gpl/linux/scsi/aha1740.h193
-rw-r--r--i386/i386at/gpl/linux/scsi/aic7xxx.c4645
-rw-r--r--i386/i386at/gpl/linux/scsi/aic7xxx.h67
-rw-r--r--i386/i386at/gpl/linux/scsi/aic7xxx_proc.src271
-rw-r--r--i386/i386at/gpl/linux/scsi/aic7xxx_reg.h746
-rw-r--r--i386/i386at/gpl/linux/scsi/aic7xxx_seq.h374
-rw-r--r--i386/i386at/gpl/linux/scsi/constants.c649
-rw-r--r--i386/i386at/gpl/linux/scsi/constants.h6
-rw-r--r--i386/i386at/gpl/linux/scsi/eata.c1099
-rw-r--r--i386/i386at/gpl/linux/scsi/eata.h41
-rw-r--r--i386/i386at/gpl/linux/scsi/eata_dma.c1375
-rw-r--r--i386/i386at/gpl/linux/scsi/eata_dma.h119
-rw-r--r--i386/i386at/gpl/linux/scsi/eata_dma_proc.h260
-rw-r--r--i386/i386at/gpl/linux/scsi/eata_dma_proc.src488
-rw-r--r--i386/i386at/gpl/linux/scsi/eata_generic.h397
-rw-r--r--i386/i386at/gpl/linux/scsi/eata_pio.c1051
-rw-r--r--i386/i386at/gpl/linux/scsi/eata_pio.h116
-rw-r--r--i386/i386at/gpl/linux/scsi/eata_pio_proc.src150
-rw-r--r--i386/i386at/gpl/linux/scsi/fdomain.c2016
-rw-r--r--i386/i386at/gpl/linux/scsi/fdomain.h61
-rw-r--r--i386/i386at/gpl/linux/scsi/g_NCR5380.c588
-rw-r--r--i386/i386at/gpl/linux/scsi/g_NCR5380.h166
-rw-r--r--i386/i386at/gpl/linux/scsi/hosts.c436
-rw-r--r--i386/i386at/gpl/linux/scsi/hosts.h409
-rw-r--r--i386/i386at/gpl/linux/scsi/in2000.c731
-rw-r--r--i386/i386at/gpl/linux/scsi/in2000.h122
-rw-r--r--i386/i386at/gpl/linux/scsi/pas16.c553
-rw-r--r--i386/i386at/gpl/linux/scsi/pas16.h193
-rw-r--r--i386/i386at/gpl/linux/scsi/qlogic.c678
-rw-r--r--i386/i386at/gpl/linux/scsi/qlogic.h40
-rw-r--r--i386/i386at/gpl/linux/scsi/scsi.c3204
-rw-r--r--i386/i386at/gpl/linux/scsi/scsi.h618
-rw-r--r--i386/i386at/gpl/linux/scsi/scsi_debug.c710
-rw-r--r--i386/i386at/gpl/linux/scsi/scsi_debug.h30
-rw-r--r--i386/i386at/gpl/linux/scsi/scsi_ioctl.c397
-rw-r--r--i386/i386at/gpl/linux/scsi/scsi_ioctl.h21
-rw-r--r--i386/i386at/gpl/linux/scsi/scsi_proc.c317
-rw-r--r--i386/i386at/gpl/linux/scsi/scsicam.c214
-rw-r--r--i386/i386at/gpl/linux/scsi/sd.c1543
-rw-r--r--i386/i386at/gpl/linux/scsi/sd.h65
-rw-r--r--i386/i386at/gpl/linux/scsi/sd_ioctl.c94
-rw-r--r--i386/i386at/gpl/linux/scsi/seagate.c1744
-rw-r--r--i386/i386at/gpl/linux/scsi/seagate.h139
-rw-r--r--i386/i386at/gpl/linux/scsi/sr.c1191
-rw-r--r--i386/i386at/gpl/linux/scsi/sr.h40
-rw-r--r--i386/i386at/gpl/linux/scsi/sr_ioctl.c489
-rw-r--r--i386/i386at/gpl/linux/scsi/t128.c413
-rw-r--r--i386/i386at/gpl/linux/scsi/t128.h176
-rw-r--r--i386/i386at/gpl/linux/scsi/u14-34f.c1044
-rw-r--r--i386/i386at/gpl/linux/scsi/u14-34f.h38
-rw-r--r--i386/i386at/gpl/linux/scsi/ultrastor.c1160
-rw-r--r--i386/i386at/gpl/linux/scsi/ultrastor.h102
-rw-r--r--i386/i386at/gpl/linux/scsi/wd7000.c1237
-rw-r--r--i386/i386at/gpl/linux/scsi/wd7000.h55
-rw-r--r--i386/i386at/i386at_ds_routines.c270
-rw-r--r--i386/i386at/i8250.h129
-rw-r--r--i386/i386at/i82586.h264
-rw-r--r--i386/i386at/idt.h37
-rw-r--r--i386/i386at/if_3c501.c1240
-rw-r--r--i386/i386at/if_3c501.h175
-rw-r--r--i386/i386at/if_3c503.h116
-rw-r--r--i386/i386at/if_de6c.c1777
-rw-r--r--i386/i386at/if_de6c.h113
-rw-r--r--i386/i386at/if_de6s.S278
-rw-r--r--i386/i386at/if_ne.c1081
-rw-r--r--i386/i386at/if_nereg.h66
-rw-r--r--i386/i386at/if_ns8390.c2578
-rw-r--r--i386/i386at/if_ns8390.h203
-rw-r--r--i386/i386at/if_par.c456
-rw-r--r--i386/i386at/if_par.h36
-rw-r--r--i386/i386at/if_pc586.c2076
-rw-r--r--i386/i386at/if_pc586.h139
-rw-r--r--i386/i386at/if_wd8003.h315
-rw-r--r--i386/i386at/immc.c77
-rw-r--r--i386/i386at/int_init.c39
-rw-r--r--i386/i386at/interrupt.S48
-rw-r--r--i386/i386at/iopl.c287
-rw-r--r--i386/i386at/kd.c2990
-rw-r--r--i386/i386at/kd.h663
-rw-r--r--i386/i386at/kd_event.c560
-rw-r--r--i386/i386at/kd_mouse.c899
-rw-r--r--i386/i386at/kd_queue.c115
-rw-r--r--i386/i386at/kd_queue.h79
-rw-r--r--i386/i386at/kdasm.S145
-rw-r--r--i386/i386at/kdsoft.h201
-rw-r--r--i386/i386at/lpr.c419
-rw-r--r--i386/i386at/lprreg.h33
-rw-r--r--i386/i386at/model_dep.c651
-rw-r--r--i386/i386at/nfd.c1484
-rw-r--r--i386/i386at/nfdreg.h110
-rw-r--r--i386/i386at/nhd.c1430
-rw-r--r--i386/i386at/nhdreg.h120
-rw-r--r--i386/i386at/phys_mem_grab_page.c1
-rw-r--r--i386/i386at/pic_isa.c68
-rw-r--r--i386/i386at/rtc.c237
-rw-r--r--i386/i386at/rtc.h137
-rw-r--r--i386/imps/Makefile.in66
-rw-r--r--i386/imps/apic.h96
-rw-r--r--i386/imps/cpu_number.h50
-rw-r--r--i386/imps/cpus.h24
-rw-r--r--i386/imps/imps.c51
-rw-r--r--i386/imps/impsasm.sym29
-rw-r--r--i386/include/Makefile.in36
-rw-r--r--i386/include/Makerules47
-rw-r--r--i386/include/mach/i386/asm.h114
-rw-r--r--i386/include/mach/i386/bios.h64
-rw-r--r--i386/include/mach/i386/boolean.h37
-rw-r--r--i386/include/mach/i386/code16.h29
-rw-r--r--i386/include/mach/i386/cthreads.h58
-rw-r--r--i386/include/mach/i386/debug_reg.h196
-rw-r--r--i386/include/mach/i386/disk.h120
-rw-r--r--i386/include/mach/i386/dpmi.h537
-rw-r--r--i386/include/mach/i386/eflags.h49
-rw-r--r--i386/include/mach/i386/exception.h85
-rw-r--r--i386/include/mach/i386/exec/elf.h36
-rw-r--r--i386/include/mach/i386/far_ptr.h35
-rw-r--r--i386/include/mach/i386/fp_reg.h108
-rw-r--r--i386/include/mach/i386/ioccom.h32
-rw-r--r--i386/include/mach/i386/kern_return.h40
-rw-r--r--i386/include/mach/i386/mach_i386.defs68
-rw-r--r--i386/include/mach/i386/mach_i386_types.h49
-rwxr-xr-xi386/include/mach/i386/machine_types.defs71
-rw-r--r--i386/include/mach/i386/multiboot.h182
-rw-r--r--i386/include/mach/i386/paging.h134
-rw-r--r--i386/include/mach/i386/pio.h70
-rw-r--r--i386/include/mach/i386/pmode.h93
-rw-r--r--i386/include/mach/i386/proc_reg.h340
-rw-r--r--i386/include/mach/i386/rpc.h9
-rw-r--r--i386/include/mach/i386/seg.h190
-rw-r--r--i386/include/mach/i386/syscall_sw.h47
-rw-r--r--i386/include/mach/i386/thread_status.h147
-rw-r--r--i386/include/mach/i386/time_stamp.h29
-rw-r--r--i386/include/mach/i386/trap.h60
-rw-r--r--i386/include/mach/i386/tss.h67
-rw-r--r--i386/include/mach/i386/vcpi.h37
-rw-r--r--i386/include/mach/i386/vm_param.h73
-rw-r--r--i386/include/mach/i386/vm_types.h108
-rw-r--r--i386/include/mach/proc_ops.h40
-rw-r--r--i386/include/mach/sa/stdarg.h46
-rw-r--r--i386/include/mach/sa/sys/varargs.h41
-rw-r--r--i386/include/mach/setjmp.h58
-rw-r--r--i386/intel/pmap.c2563
-rw-r--r--i386/intel/pmap.h401
-rw-r--r--i386/intel/read_fault.c178
-rw-r--r--i386/pc/Makerules30
-rw-r--r--i386/pc/NOTES34
-rw-r--r--i386/pc/debug.h71
-rw-r--r--i386/pc/exit.c32
-rw-r--r--i386/pc/gdt.h32
-rw-r--r--i386/pc/gdt_sels.h34
-rw-r--r--i386/pc/i16/i16_a20.c144
-rw-r--r--i386/pc/i16/i16_a20.h32
-rw-r--r--i386/pc/i16/i16_bios.h98
-rw-r--r--i386/pc/i16/i16_exit.c37
-rw-r--r--i386/pc/i16/i16_ext_mem.c151
-rw-r--r--i386/pc/i16/i16_init.c67
-rw-r--r--i386/pc/i16/i16_main.c38
-rw-r--r--i386/pc/i16/i16_pic.c66
-rw-r--r--i386/pc/i16/i16_putchar.c36
-rw-r--r--i386/pc/i16/i16_raw.c265
-rw-r--r--i386/pc/i16/i16_raw_test_a20.S35
-rw-r--r--i386/pc/i16/i16_real_int.S84
-rw-r--r--i386/pc/i16/i16_switch.h30
-rw-r--r--i386/pc/i16/phys_mem_collect.c34
-rw-r--r--i386/pc/i16/phys_mem_sources.h25
-rw-r--r--i386/pc/i16/raw_exit.c41
-rw-r--r--i386/pc/i16/raw_real_int.c46
-rw-r--r--i386/pc/ipl.h74
-rw-r--r--i386/pc/irq.h49
-rw-r--r--i386/pc/irq_list.h43
-rw-r--r--i386/pc/pc_asm.sym42
-rw-r--r--i386/pc/phys_mem.h55
-rw-r--r--i386/pc/phys_mem_add.c62
-rw-r--r--i386/pc/pic.c283
-rw-r--r--i386/pc/pic.h204
-rw-r--r--i386/pc/putchar.c96
-rw-r--r--i386/pc/real.h60
-rw-r--r--i386/pc/real_tss.c59
-rw-r--r--i386/pc/real_tss.h34
-rw-r--r--i386/pc/real_tss_def.S35
-rw-r--r--i386/pc/rv86/config.h29
-rw-r--r--i386/pc/rv86/gdt_sels.h60
-rw-r--r--i386/pc/rv86/idt_irq_init.c40
-rw-r--r--i386/pc/rv86/rv86_real_int.c276
-rw-r--r--i386/pc/rv86/rv86_real_int_asm.S119
-rw-r--r--i386/pc/rv86/rv86_reflect_irq.S113
-rw-r--r--i386/pc/rv86/rv86_trap_handler.S167
-rw-r--r--i386/pc/rv86/trap_handler.S39
-rw-r--r--i386/util/Makerules38
-rw-r--r--i386/util/NOTES35
-rw-r--r--i386/util/anno.c92
-rw-r--r--i386/util/anno.h92
-rw-r--r--i386/util/cpu.h57
-rw-r--r--i386/util/cpu_subs.h26
-rw-r--r--i386/util/cpu_tables_init.c37
-rw-r--r--i386/util/cpu_tables_load.c70
-rw-r--r--i386/util/crtn.S33
-rw-r--r--i386/util/debug.h50
-rw-r--r--i386/util/gdt.c80
-rw-r--r--i386/util/gdt.h88
-rw-r--r--i386/util/gdt_sels.h45
-rw-r--r--i386/util/i16/debug.h42
-rw-r--r--i386/util/i16/i16.h58
-rw-r--r--i386/util/i16/i16_die.c34
-rw-r--r--i386/util/i16/i16_gdt_init_temp.c53
-rw-r--r--i386/util/i16/i16_nanodelay.c35
-rw-r--r--i386/util/i16/i16_puts.c34
-rw-r--r--i386/util/i16/i16_writehex.c57
-rw-r--r--i386/util/i386_asm.sym36
-rw-r--r--i386/util/idt.c50
-rw-r--r--i386/util/idt.h43
-rw-r--r--i386/util/idt_inittab.S128
-rw-r--r--i386/util/idt_inittab.h57
-rw-r--r--i386/util/ldt.h45
-rw-r--r--i386/util/trap.h100
-rw-r--r--i386/util/trap_asm.sym45
-rw-r--r--i386/util/trap_dump.c63
-rw-r--r--i386/util/trap_dump_die.c12
-rw-r--r--i386/util/trap_handler.S32
-rw-r--r--i386/util/trap_return.S39
-rw-r--r--i386/util/tss.c45
-rw-r--r--i386/util/tss.h30
-rw-r--r--i386/util/tss_dump.c44
-rw-r--r--i386/util/vm_param.h89
-rw-r--r--include/device/audio_status.h168
-rw-r--r--include/device/bpf.h249
-rw-r--r--include/device/device.defs151
-rw-r--r--include/device/device_reply.defs104
-rw-r--r--include/device/device_request.defs81
-rw-r--r--include/device/device_types.defs64
-rw-r--r--include/device/device_types.h133
-rw-r--r--include/device/disk_status.h306
-rw-r--r--include/device/net_status.h187
-rw-r--r--include/device/tape_status.h128
-rw-r--r--include/device/tty_status.h127
-rw-r--r--include/mach.h39
-rw-r--r--include/mach/alert.h37
-rw-r--r--include/mach/boolean.h63
-rw-r--r--include/mach/boot.h93
-rw-r--r--include/mach/bootstrap.defs49
-rw-r--r--include/mach/cthreads.h424
-rw-r--r--include/mach/default_pager.defs65
-rw-r--r--include/mach/default_pager_helper.defs53
-rw-r--r--include/mach/default_pager_types.defs44
-rw-r--r--include/mach/default_pager_types.h58
-rw-r--r--include/mach/error.h95
-rw-r--r--include/mach/errorlib.h69
-rw-r--r--include/mach/exc.defs47
-rw-r--r--include/mach/exception.h58
-rw-r--r--include/mach/exec/a.out.h68
-rw-r--r--include/mach/exec/elf.h298
-rw-r--r--include/mach/exec/exec.h130
-rw-r--r--include/mach/flick_mach3.h75
-rw-r--r--include/mach/flick_mach3_glue.h218
-rw-r--r--include/mach/flick_mach3mig_glue.h449
-rw-r--r--include/mach/host_info.h93
-rw-r--r--include/mach/inline.h27
-rw-r--r--include/mach/kern_return.h160
-rw-r--r--include/mach/lmm.h60
-rw-r--r--include/mach/mach.defs958
-rw-r--r--include/mach/mach4.defs82
-rw-r--r--include/mach/mach_host.defs379
-rw-r--r--include/mach/mach_norma.defs120
-rw-r--r--include/mach/mach_param.h53
-rw-r--r--include/mach/mach_port.defs346
-rw-r--r--include/mach/mach_traps.h132
-rw-r--r--include/mach/mach_types.defs249
-rw-r--r--include/mach/mach_types.h93
-rw-r--r--include/mach/machine.h267
-rw-r--r--include/mach/macro_help.h18
-rw-r--r--include/mach/memory_object.defs313
-rw-r--r--include/mach/memory_object.h83
-rw-r--r--include/mach/memory_object_default.defs121
-rw-r--r--include/mach/message.h750
-rw-r--r--include/mach/mig_errors.h105
-rw-r--r--include/mach/mig_support.h67
-rw-r--r--include/mach/msg_type.h42
-rw-r--r--include/mach/multiboot.h164
-rw-r--r--include/mach/norma_special_ports.h84
-rw-r--r--include/mach/norma_task.defs43
-rw-r--r--include/mach/notify.defs95
-rw-r--r--include/mach/notify.h122
-rw-r--r--include/mach/pc_sample.h66
-rw-r--r--include/mach/policy.h45
-rw-r--r--include/mach/port.h189
-rw-r--r--include/mach/proc_ops.h51
-rw-r--r--include/mach/processor_info.h104
-rw-r--r--include/mach/profil.h212
-rw-r--r--include/mach/profilparam.h62
-rw-r--r--include/mach/rpc.h35
-rw-r--r--include/mach/sa/a.out.h28
-rw-r--r--include/mach/sa/alloca.h25
-rw-r--r--include/mach/sa/assert.h44
-rw-r--r--include/mach/sa/ctype.h59
-rw-r--r--include/mach/sa/errno.h98
-rw-r--r--include/mach/sa/fcntl.h22
-rw-r--r--include/mach/sa/limits.h32
-rw-r--r--include/mach/sa/malloc.h51
-rw-r--r--include/mach/sa/memory.h25
-rw-r--r--include/mach/sa/stddef.h24
-rw-r--r--include/mach/sa/stdio.h72
-rw-r--r--include/mach/sa/stdlib.h56
-rw-r--r--include/mach/sa/string.h56
-rw-r--r--include/mach/sa/strings.h28
-rw-r--r--include/mach/sa/sys/cdefs.h48
-rw-r--r--include/mach/sa/sys/ioctl.h52
-rw-r--r--include/mach/sa/sys/mman.h41
-rw-r--r--include/mach/sa/sys/reboot.h123
-rw-r--r--include/mach/sa/sys/signal.h206
-rw-r--r--include/mach/sa/sys/stat.h126
-rw-r--r--include/mach/sa/sys/termios.h207
-rw-r--r--include/mach/sa/sys/time.h53
-rw-r--r--include/mach/sa/sys/types.h91
-rw-r--r--include/mach/sa/time.h26
-rw-r--r--include/mach/sa/unistd.h18
-rw-r--r--include/mach/std_types.defs131
-rw-r--r--include/mach/std_types.h48
-rw-r--r--include/mach/syscall_sw.h140
-rw-r--r--include/mach/task_info.h111
-rw-r--r--include/mach/task_special_ports.h86
-rw-r--r--include/mach/thread_info.h116
-rw-r--r--include/mach/thread_special_ports.h79
-rw-r--r--include/mach/thread_status.h55
-rw-r--r--include/mach/thread_switch.h40
-rw-r--r--include/mach/time_value.h80
-rw-r--r--include/mach/version.h68
-rw-r--r--include/mach/vm_attributes.h63
-rw-r--r--include/mach/vm_inherit.h55
-rw-r--r--include/mach/vm_param.h98
-rw-r--r--include/mach/vm_prot.h79
-rw-r--r--include/mach/vm_statistics.h75
-rw-r--r--include/mach_debug/hash_info.h41
-rw-r--r--include/mach_debug/ipc_info.h100
-rw-r--r--include/mach_debug/mach_debug.defs241
-rw-r--r--include/mach_debug/mach_debug_types.defs65
-rw-r--r--include/mach_debug/mach_debug_types.h40
-rw-r--r--include/mach_debug/pc_info.h43
-rw-r--r--include/mach_debug/vm_info.h132
-rw-r--r--include/mach_debug/zone_info.h61
-rw-r--r--include/mach_error.h67
-rw-r--r--include/mach_init.h84
-rw-r--r--include/servers/machid.defs550
-rw-r--r--include/servers/machid_debug.defs127
-rw-r--r--include/servers/machid_dpager.defs56
-rw-r--r--include/servers/machid_lib.h172
-rw-r--r--include/servers/machid_types.defs73
-rw-r--r--include/servers/machid_types.h110
-rw-r--r--include/servers/netname.defs63
-rw-r--r--include/servers/netname_defs.h50
-rw-r--r--ipc/fipc.c795
-rw-r--r--ipc/fipc.h95
-rw-r--r--ipc/ipc_entry.c858
-rw-r--r--ipc/ipc_entry.h158
-rw-r--r--ipc/ipc_hash.c626
-rw-r--r--ipc/ipc_hash.h94
-rw-r--r--ipc/ipc_init.c139
-rw-r--r--ipc/ipc_init.h58
-rw-r--r--ipc/ipc_kmsg.c3484
-rw-r--r--ipc/ipc_kmsg.h291
-rw-r--r--ipc/ipc_kmsg_queue.h31
-rwxr-xr-xipc/ipc_machdep.h40
-rw-r--r--ipc/ipc_marequest.c485
-rw-r--r--ipc/ipc_marequest.h98
-rw-r--r--ipc/ipc_mqueue.c754
-rw-r--r--ipc/ipc_mqueue.h108
-rw-r--r--ipc/ipc_notify.c593
-rw-r--r--ipc/ipc_notify.h72
-rw-r--r--ipc/ipc_object.c1346
-rw-r--r--ipc/ipc_object.h192
-rw-r--r--ipc/ipc_port.c1545
-rw-r--r--ipc/ipc_port.h407
-rw-r--r--ipc/ipc_pset.c349
-rw-r--r--ipc/ipc_pset.h95
-rw-r--r--ipc/ipc_right.c2762
-rw-r--r--ipc/ipc_right.h124
-rw-r--r--ipc/ipc_space.c317
-rw-r--r--ipc/ipc_space.h164
-rw-r--r--ipc/ipc_splay.c920
-rw-r--r--ipc/ipc_splay.h114
-rw-r--r--ipc/ipc_table.c205
-rw-r--r--ipc/ipc_table.h138
-rw-r--r--ipc/ipc_target.c78
-rw-r--r--ipc/ipc_target.h68
-rw-r--r--ipc/ipc_thread.c107
-rw-r--r--ipc/ipc_thread.h123
-rw-r--r--ipc/ipc_types.h31
-rw-r--r--ipc/mach_debug.c618
-rw-r--r--ipc/mach_msg.c2279
-rw-r--r--ipc/mach_msg.h68
-rw-r--r--ipc/mach_port.c2505
-rw-r--r--ipc/mach_port.srv27
-rw-r--r--ipc/mach_rpc.c148
-rw-r--r--ipc/port.h90
-rw-r--r--kern/act.c1134
-rw-r--r--kern/act.h200
-rw-r--r--kern/assert.h58
-rw-r--r--kern/ast.c242
-rw-r--r--kern/ast.h132
-rw-r--r--kern/bootstrap.c489
-rw-r--r--kern/compat_xxx_defs.h64
-rw-r--r--kern/counters.c82
-rw-r--r--kern/counters.h107
-rw-r--r--kern/cpu_number.h43
-rw-r--r--kern/debug.c192
-rw-r--r--kern/debug.h60
-rw-r--r--kern/elf-load.c88
-rw-r--r--kern/eventcount.c372
-rw-r--r--kern/eventcount.h57
-rw-r--r--kern/exception.c1003
-rw-r--r--kern/host.c380
-rw-r--r--kern/host.h48
-rw-r--r--kern/ipc_host.c488
-rw-r--r--kern/ipc_host.h72
-rw-r--r--kern/ipc_kobject.c391
-rw-r--r--kern/ipc_kobject.h118
-rw-r--r--kern/ipc_mig.c1134
-rw-r--r--kern/ipc_sched.c287
-rw-r--r--kern/ipc_sched.h32
-rw-r--r--kern/ipc_tt.c1398
-rw-r--r--kern/ipc_tt.h88
-rw-r--r--kern/kalloc.c237
-rw-r--r--kern/kalloc.h40
-rw-r--r--kern/kern_types.h70
-rw-r--r--kern/lock.c637
-rw-r--r--kern/lock.h177
-rw-r--r--kern/lock_mon.c375
-rw-r--r--kern/mach.srv42
-rw-r--r--kern/mach4.srv32
-rw-r--r--kern/mach_clock.c569
-rw-r--r--kern/mach_debug.srv26
-rw-r--r--kern/mach_factor.c153
-rw-r--r--kern/mach_host.srv39
-rw-r--r--kern/mach_param.h67
-rw-r--r--kern/machine.c765
-rw-r--r--kern/macro_help.h55
-rw-r--r--kern/pc_sample.c299
-rw-r--r--kern/pc_sample.h90
-rw-r--r--kern/printf.c637
-rw-r--r--kern/priority.c225
-rw-r--r--kern/processor.c1039
-rw-r--r--kern/processor.h327
-rw-r--r--kern/profile.c413
-rw-r--r--kern/queue.c131
-rw-r--r--kern/queue.h369
-rw-r--r--kern/refcount.h70
-rw-r--r--kern/sched.h181
-rw-r--r--kern/sched_prim.c2062
-rw-r--r--kern/sched_prim.h163
-rw-r--r--kern/server_loop.ch102
-rw-r--r--kern/shuttle.h71
-rw-r--r--kern/startup.c305
-rw-r--r--kern/strings.c174
-rw-r--r--kern/strings.h53
-rw-r--r--kern/syscall_emulation.c518
-rw-r--r--kern/syscall_emulation.h61
-rw-r--r--kern/syscall_subr.c399
-rw-r--r--kern/syscall_subr.h35
-rw-r--r--kern/syscall_sw.c289
-rw-r--r--kern/syscall_sw.h51
-rw-r--r--kern/task.c1238
-rw-r--r--kern/task.h183
-rw-r--r--kern/thread.c2575
-rw-r--r--kern/thread.h371
-rw-r--r--kern/thread_swap.c190
-rw-r--r--kern/thread_swap.h44
-rw-r--r--kern/time_out.h83
-rw-r--r--kern/time_stamp.c74
-rw-r--r--kern/time_stamp.h65
-rw-r--r--kern/timer.c525
-rw-r--r--kern/timer.h157
-rw-r--r--kern/xpr.c192
-rw-r--r--kern/xpr.h101
-rw-r--r--kern/zalloc.c971
-rw-r--r--kern/zalloc.h135
-rw-r--r--scsi/adapters/README290
-rw-r--r--scsi/adapters/scsi_33C93.h396
-rw-r--r--scsi/adapters/scsi_33C93_hdw.c2078
-rw-r--r--scsi/adapters/scsi_5380.h126
-rw-r--r--scsi/adapters/scsi_5380_hdw.c2423
-rw-r--r--scsi/adapters/scsi_53C700.h327
-rw-r--r--scsi/adapters/scsi_53C700_hdw.c696
-rw-r--r--scsi/adapters/scsi_53C94.h253
-rw-r--r--scsi/adapters/scsi_53C94_hdw.c2840
-rw-r--r--scsi/adapters/scsi_7061.h230
-rw-r--r--scsi/adapters/scsi_7061_hdw.c2603
-rw-r--r--scsi/adapters/scsi_89352.h231
-rw-r--r--scsi/adapters/scsi_89352_hdw.c2192
-rw-r--r--scsi/adapters/scsi_aha15.h347
-rw-r--r--scsi/adapters/scsi_aha15_hdw.c1467
-rw-r--r--scsi/adapters/scsi_aha17_hdw.c1371
-rw-r--r--scsi/adapters/scsi_dma.h150
-rw-r--r--scsi/adapters/scsi_user_dma.c171
-rw-r--r--scsi/adapters/scsi_user_dma.h47
-rw-r--r--scsi/compat_30.h163
-rw-r--r--scsi/disk_label.c692
-rw-r--r--scsi/mapped_scsi.c586
-rw-r--r--scsi/mapped_scsi.h90
-rw-r--r--scsi/pc_scsi_label.c196
-rw-r--r--scsi/rz.c462
-rw-r--r--scsi/rz.h60
-rw-r--r--scsi/rz_audio.c1901
-rw-r--r--scsi/rz_cpu.c450
-rw-r--r--scsi/rz_disk.c1222
-rw-r--r--scsi/rz_disk_bbr.c259
-rw-r--r--scsi/rz_host.c108
-rw-r--r--scsi/rz_labels.h243
-rw-r--r--scsi/rz_tape.c560
-rw-r--r--scsi/scsi.c642
-rw-r--r--scsi/scsi.h599
-rw-r--r--scsi/scsi2.h447
-rw-r--r--scsi/scsi_alldevs.c858
-rw-r--r--scsi/scsi_comm.c115
-rw-r--r--scsi/scsi_cpu.c109
-rw-r--r--scsi/scsi_defs.h284
-rw-r--r--scsi/scsi_disk.c624
-rw-r--r--scsi/scsi_endian.h66
-rw-r--r--scsi/scsi_jukebox.c57
-rw-r--r--scsi/scsi_optical.c57
-rw-r--r--scsi/scsi_printer.c57
-rw-r--r--scsi/scsi_rom.c401
-rw-r--r--scsi/scsi_scanner.c54
-rw-r--r--scsi/scsi_tape.c415
-rw-r--r--scsi/scsi_worm.c51
-rw-r--r--util/Makerules32
-rw-r--r--util/about_to_die.c34
-rw-r--r--util/config.h23
-rw-r--r--util/cpu.c27
-rw-r--r--util/cpu.h41
-rw-r--r--util/cpu_init.c40
-rw-r--r--util/cpu_subs.h23
-rw-r--r--util/cpus.h27
-rw-r--r--util/debug.h81
-rw-r--r--util/die.c54
-rw-r--r--util/panic.c48
-rw-r--r--util/phys_mem.h34
-rw-r--r--util/putchar.c28
-rw-r--r--util/puts.c37
-rw-r--r--util/ref_count.h68
-rw-r--r--version.c1
-rw-r--r--vm/memory_object.c1191
-rw-r--r--vm/memory_object.h43
-rw-r--r--vm/memory_object_default.cli28
-rw-r--r--vm/memory_object_user.cli28
-rw-r--r--vm/pmap.h267
-rw-r--r--vm/vm_debug.c499
-rw-r--r--vm/vm_external.c159
-rw-r--r--vm/vm_external.h89
-rw-r--r--vm/vm_fault.c2182
-rw-r--r--vm/vm_fault.h64
-rw-r--r--vm/vm_init.c84
-rw-r--r--vm/vm_kern.c1072
-rw-r--r--vm/vm_kern.h63
-rw-r--r--vm/vm_map.c5244
-rw-r--r--vm/vm_map.h448
-rw-r--r--vm/vm_object.c3090
-rw-r--r--vm/vm_object.h374
-rw-r--r--vm/vm_page.h322
-rw-r--r--vm/vm_pageout.c924
-rw-r--r--vm/vm_pageout.h46
-rw-r--r--vm/vm_resident.c1505
-rw-r--r--vm/vm_user.c397
-rw-r--r--vm/vm_user.h50
1138 files changed, 373332 insertions, 0 deletions
diff --git a/ChangeLog b/ChangeLog
new file mode 100644
index 00000000..cd84c19a
--- /dev/null
+++ b/ChangeLog
@@ -0,0 +1,13 @@
+Tue Feb 25 15:42:23 1997 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+
+ * i386/Makefrag (INCLUDES): Find `include' directory in new
+ location.
+ * Makefile (INCLUDES): Find `include' directory in new location.
+ (%.symc): Find gensym.awk in new location.
+
+ * Reorganized directories into new layout and unified mach4 and
+ mach4-i386 into a single tree.
+
+
+Older changes in ChangeLog.00 (for i386 directory) and ChangeLog.0 (for
+all other files).
diff --git a/ChangeLog.0 b/ChangeLog.0
new file mode 100644
index 00000000..2ef943a8
--- /dev/null
+++ b/ChangeLog.0
@@ -0,0 +1,721 @@
+Wed Feb 12 16:22:07 1997 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+
+ * kernel/kern/debug.c (panic): Insert a delay loop. Do a reboot
+ instead of a mere halt.
+
+Wed Feb 5 12:07:30 1997 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+
+ * kernel/util/cpu.h (struct cpu): Add dummy field so that zero
+ size structure doesn't become an undefined variable.
+
+ * kernel/version.c: New file.
+
+ * kernel/kern/elf-load.c: New file. (Was mach4/libmach/exec/elf.c
+ in Utah distribution.) Define exec_load instead of exec_load_elf.
+
+ * kernel/device/chario.c: Include "device_reply.h" rather than
+ <device/device_reply.h>.
+
+Thu Mar 28 17:59:36 1996 Linus Kamb <kamb@cs.utah.edu>
+
+ * kernel/ipc/fipc.c:
+
+ changed fipc_send() to take a fipc_endpoint_t structure
+ which has the destination hardware address and destination
+ port, instead of the two integer arguments used before.
+
+ changed fipc_recv() to also take a fipc_endpoint_t
+ parameter that returns the senders hardware address.
+ (Their is no concept of a sending fipc port.)
+
+ Also added some spl stuff to avoid a possible race
+ condition, and generally cleaned it up.
+
+ * include/mach/syscall_sw.h: changed the argument #'s for fipc calls.
+
+ * kernel/kern/syscall_sw.c: changed the argument #'s for fipc calls.
+
+ * kernel/device/device_init.c: Changed call to fipc_thread()
+ to call to fipc_init().
+
+Mon Mar 25 01:39:45 1996 steve clawson <sclawson@marker.cs.utah.edu>
+
+ * changes for VM_PROT_NOTIFY added for Godmar Back
+ (gback@cs.utah.edu):
+
+ * include/mach/kern_return.h: Added KERN_WRITE_PROTECTION_FAILURE
+ return value.
+
+ * include/mach/vm_prot.h: Added VM_PROT_NOTIFY.
+
+ * kernel/vm/vm_map.c (vm_map_protect): or in VM_PROT_NOTIFY
+ when we check the new protection against max protection (should
+ always be able to set the NOTIFY bit, ).
+
+ * kernel/vm/vm_map.c (vm_map_lookup): If VM_PROT_NOTIFY is set and
+ we get a write protection violation, return
+ KERN_WRITE_PROTECTION_FAILURE.
+
+ * kernel/vm/vm_user.c (vm_protect): Make sure that we accept
+ VM_PROT_NOTIFY (since it's not in VM_PROT_ALL).
+
+Sun Mar 24 13:17:45 1996 Shantanu Goel <goel@toner.cs.utah.edu>
+
+ * kernel/device/dev_hdr.h: Following changes #ifdef'ed for i386.
+ Include i386at/dev_hdr.h.
+ (device): Renamed to mach_device. All users changed.
+ (device_t): Renamed to mach_device_t. All users changed.
+
+ * kernel/device/dev_lookup.c: Following changes #ifdef'ed for i386.
+ (dev_port_enter): Initialize emulation ops and data.
+ (convert_device_to_port): Use emulation ops.
+ (device_reference): Renamed to mach_device_reference.
+ Interface changed. All callers changed.
+ (device_allocate): Renamed to mach_device_deallocate.
+ Interface changed. All callers changed.
+
+ * kernel/device/ds_routines.c: Following changes #ifdef'ed for i386.
+ Include i386at/device_emul.h.
+ (mach_convert_device_to_port): New function. Replaces
+ convert_device_to_port. All callers changed.
+ (ds_device_open, ds_device_close, ds_device_write,
+ ds_device_write_inband, ds_device_read, ds_device_read_inband,
+ ds_device_set_status, ds_device_get_status, ds_device_set_filter,
+ ds_device_map, ds_device_write_trap, ds_device_writev_trap):
+ Declaration made static. Dropped "ds_" prefix. Interface changed.
+ (ds_notify): Deleted.
+ (ds_nosenders): Declaration made static.
+ (io_done_thread_continue): (free_skbuffs): Use it.
+ (mach_device_emulation_ops): New variable.
+
+Mon Mar 4 10:39:03 MST 1996 Linus Kamb (kamb@cs.utah.edu)
+
+ * Checked in FIPC code:
+
+ * Added kernel/ipc/fipc.[ch]
+ fipc_send(int a0, int a1, char *buf, int buf_len);
+ a0 and a1 together contain the 6 byte ethernet
+ address of the receiving machine, and the 16 bit
+ unsigned "fipc_port".
+
+ fipc_recv(unsigned short fipc_port, char *buf, int *buf_len);
+ Returns as much as buf_len bytes to the user's buf from
+ port fipc_port. Will block if there is no available
+ message on the fipc port. Buf_len is an IN/OUT
+ parameter, giving the size of the user's buffer,
+ and returning the actual number of bytes in the
+ message.
+
+ * Changed kernel/device/device_init.c
+ starts the fipc_thread, which does necessary initialization.
+
+ * Changed kernel/device/net_io.c
+ added net_fwrite() which is very similar to net_write(), but
+ is called by nefoutput() (if_ne.c) which is the path for
+ fipc_send calls.
+
+ * Changed kernel/kern/syscall_sw.c
+ adding -96 and -97 for FIPC traps
+
+ * Changed include/mach/syscall_sw.h
+ adding kernel_trap() delcarations for fipc_send and fipc_recv
+
+Tue Feb 13 22:34:27 1996 Kevin T. Van Maren <vanmaren@fast.cs.utah.edu>
+
+ * More diskslice changes are integrated:
+
+ * kernel/scsi/disk_label.c: Updated this file I added last time.
+ Full support for dos partitions and BSD disklabels. No longer
+ uses a disklabel structure for partition information encoding.
+,
+ * kernel/scsi/pc_scsi_label.c: No code changes
+
+ * kernel/scsi/rz.h: rzpartition() definition changed for slices
+
+ * kernel/scsi/rz_disk.c: Modified to use the array of partitions
+ and the common partitioning code. Disklabels are still there
+ but are fairly meaningless to the kernel, and incorrect in any
+ event.
+
+ * kernel/scsi/rz_disk_bbr.c: Uses new partitioning. NOT TESTED.
+
+ * kernel/scsi/rz_labels.h: Added diskpart structure. Also added
+ #defines for partition types in this structure.
+
+ * kernel/scsi/scsi_defs.h: MAX_SCSI_PARTS defined
+
+ * kernel/device/dev_name.c: Now converts a block device string to
+ the correct slice+partition fields for the partition code.
+
+Thu Feb 8 00:27:33 MST 1996 Bryan Ford <baford@cs.utah.edu>
+
+ Merged in changes from UK02p20 to end of UK02-freeze branch,
+ and terminated the last remnant of the UK02-freeze branch.
+
+ Fri Feb 2 17:03:38 1996 steve clawson <sclawson@marker.cs.utah.edu>
+
+ * kernel/ipc/ipc_notify.c: added ipc_notify_init_msg_accepted
+ call to ipc_notify_init().
+
+ Wed Jan 17 17:04:24 1996 Bart Robinson <lomew@fast.cs.utah.edu>
+
+ * Serial console working for real.
+
+ * kernel/device/cons.c, cons.h: added. These are the same as in
+ mach4-parisc/kernel/hpdev but changed slightly to make cninit
+ re-entrant. They can and should be used for the PA too.
+
+ * kernel/util/console.c, console_buf.c, console_list.h: removed.
+ The console is handled differently now.
+
+ Wed Jan 17 13:51:46 1996 Kevin Van Maren <vanmaren.cs.utah.edu>
+
+ * Unified the partition code for IDE & SCSI device drivers
+
+ * kernel/scsi/disklabel.c: new file
+ * kernel/scsi/rz_labels.c: deleted
+ * kerenl/scsi/pc_label.c: deleted
+
+ * kernel/scsi/rz_labels.h: Added more partition ID types. Also
+ added the prefix scsi_ to structs.
+
+ * kernel/scsi/pc_scsi_label.c: removed grab_bob_label(),
+ default_label is now scsi_default_label.
+
+ * kernel/scsi/rz_disk.c: Replaced scsi-specific partition code
+ with code that calls the unified partition code.
+
+Thu Jan 11 20:06:33 MST 1996 Bryan Ford <baford@cs.utah.edu>
+
+ Added remote debugging support to MOSS;
+ fixed a number of bugs.
+
+Mon Oct 30 09:21:10 MST 1995 Bryan Ford <baford@cs.utah.edu>
+
+ Merged in changes from UK02p15 to UK02p20:
+
+ Mon Oct 23 11:03:10 1995 steve clawson <sclawson@marker.cs.utah.edu>
+
+ * bootstrap/bootstrap.c: Added back code to ask for a root
+ device (the '-a' switch from the command line). Added
+ `translate_root' function that converts from Linux device
+ names to Mach ones.
+
+ Sun Oct 22 18:36:49 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * Integrated Steve's and Bart's changes from the UK02p16-work
+ branch.
+
+ * bootstrap: use exec_load() to find startup's symbol table,
+ instead of trying to hand-interpret the a.out header.
+
+ * bootstrap: no longer try to load emulator symbols; that's
+ Lites' job.
+
+ * bootstrap: if user types in a new server directory name,
+ use the new device name as the root device as well.
+
+ * include/mach/sa/sys/reboot.h: added BSD flag definitions.
+
+ * mach4/kernel/ddb/db_aout.c: got rid of ddb_init();
+ instead just call aout_db_sym_init() directly from model_dep.c.
+
+ * mach4/kernel/kern/bootstrap.c: Use MultiBoot kernel command
+ line to find root device and flags, instead of old kludgy
+ boothowto and bootdev values.
+
+ Fri Oct 13 16:47:52 1995 steve clawson <sclawson@marker.cs.utah.edu>
+
+ * include/mach/multiboot.h: Added entries for symbol table
+ information and for boothowto and bootdev into the boot_info
+ structure.
+
+ * include/mach/exec/exec.h: Cleaned up text (>80 columns).
+ * include/mach/exec/a.out.h: Added some macros from FreeBSD.
+
+ * kernel/ddb/db_aout.c: Enabled code to load kernel symbol
+ table.
+
+ Thu Sep 28 16:57:07 1995 steve clawson <sclawson@marker.cs.utah.edu>
+
+ * kernel/scsi/adapters/scsi_aha15.h: Added #defines for the
+ 1542CF.
+
+ * kernel/scsi/adapters/scsi_aha15_hdw.c: Added support for the
+ 1542CF. #defined db_printf to just be printf.
+
+ * kernel/scsi/adapters/scsi_aha17_hdw.c: #defined db_printf to
+ just be printf.
+
+ Mon Jun 26 13:51:42 MDT 1995 Wolfram Stering <wolfi@cosy.sbg.ac.at>
+
+ * kernel/scsi/*label*: fixed to support Linux-style
+ partitioning.
+
+ Fri May 19 11:17:13 MDT 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * Released UK02p17, as a private snapshot for goel.
+
+ Tue May 16 01:45:22 1995 Shantanu Goel <goel@monk.mcl.cs.columbia.edu>
+
+ * kernel/device/ds_routines.c (ds_device_set_status):
+ Pass device structure to Linux driver.
+ (ds_device_get_status): Likewise.
+
+ Fri May 12 10:47:41 MDT 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * Released UK02p16, as an internal snapshot for rdukes
+
+Wed Oct 4 20:04:27 MDT 1995 Bryan Ford <baford@cs.utah.edu>
+
+ Released moss-0.80 (tag moss-0-80)
+
+Wed Oct 4 12:05:57 MDT 1995 Bryan Ford <baford@cs.utah.edu>
+
+ Merged in changes made for DOS extender at home:
+ * better mach/sa include files
+ * new list-based flexible memory allocator
+
+Wed May 3 13:59:54 MDT 1995 Bryan Ford <baford@cs.utah.edu>
+
+ Merged in diffs from UK02p12 to UK02p15:
+
+ Wed May 3 10:47:41 MDT 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * Released UK02p15.
+ (UK02p14 was just an intermediate tag after adding ELF support.)
+
+ Configuration-related changes:
+
+ * configrules: now looks for mach4 sources in a default location
+ "next to" the machine-dependent sources if no '--with-mach4='
+ option is specified.
+
+ ELF support changes:
+
+ * Got rid of old ex_get_header() routine from libmach_sa.a;
+ instead provide a new library, libmach_exec.a (in libmach/exec)
+ which provides a less a.out-specific exec_load() function.
+
+ * bootstrap/load.c, kernel/kern/bootstrap.c: use new exec_load() function.
+
+ * Added include/mach/exec/elf.h, from OSF's server bootstrap code
+
+ Makefile-related changes:
+
+ * Starting to phase out GSECTIONS/MSECTIONS, because
+ that design forced all machine-dependent directories
+ to come before all machine-independent directories,
+ which isn't the right behavior in all cases.
+ Now makefiles should just build SRCDIRS directly.
+
+ * We now generate public header files and libraries in
+ $(OBJDIR)/include and $(OBJDIR)/lib before installing them.
+ Added mach4/Makerules.lib, for various library targets to use.
+
+ * mach4/Makerules: sort all object files at once, so that
+ .c files can override .S files and such.
+
+ * Split out common part of mach4-i386/Makefile.in and
+ mach4-parisc/Makefile.in, and put them in mach4/Makerules.top.
+
+ Miscellaneous changes:
+
+ * (fjl) Various additions and fixes to the HTML pages.
+
+ * kernel/scsi: merged in MK83a changes, including new aha17 (eaha) driver.
+
+ * gensym.awk: 'size' command can now take an optional fourth parameter
+ indicating the name of the symbol to declare.
+
+ * Moved errorlib.h to the public include/mach directory.
+
+ * include/mach/machine/vm_param.h:
+
+ * include/mach/sa: Added some common POSIX errno's and other useful stuff.
+
+ * libmach/standalone: Moved generic C-library stuff to libmach/c.
+
+ * libmach/c/stdio: created, containing extremely simple high-level file I/O
+ functions that do no buffering, instead going straight to the low-level
+ Unix-like file I/O routines.
+
+ Fri Apr 7 17:43:22 MDT 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * Released UK02p13.
+
+Fri Apr 7 13:08:16 MDT 1995 Bryan Ford <baford@cs.utah.edu>
+
+ Merged in diffs from UK02p9 to UK02p12:
+
+ Mon Apr 3 19:55:41 MDT 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * Released UK02p11.
+ Too many changes to describe concisely,
+ almost all affecting only the boot mechanism for now.
+ Also, integrated the kernel/util, kernel/pc, kernel/dos code
+ from my DOS extender project at home.
+
+ Mon Feb 27 16:32:59 MST 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * Released UK02p10 as a non-functional snapshot
+
+Fri Feb 10 13:25:54 MST 1995 Bryan Ford <baford@cs.utah.edu>
+
+ Merged in diffs from UK02p7 to UK02p9:
+
+ Thu Jan 26 19:37:04 1995 steve clawson <sclawson@fast.cs.utah.edu>
+
+ * kernel/scsi/adapters/scsi_aha15_hdw.c:
+
+ Merged in MK83-MK83a changes.
+
+ Added code to recognize 1542CF controllers.
+ (changes marked with a + imported from netbsd).
+
+ aha_probe():
+ added 1542CF case in the id switch.
+ +added code to unlock the mailbox interface on the C/CF's.
+ +added code to set DMA on/off times
+ moved initialization around a bit:
+ commented out the code to call aha_init_1().
+ changed the call to aha_init_2() into aha_init().
+
+ * kernel/scsi/adapters/scsi_aha15.h:
+
+ Added AHA_BID_1542CF, AHA_EXT_BIOS and AHA_MBX_ENABLE defines.
+ Added struct aha_extbios.
+
+ * bootstrap/fs.h:
+ Added fs_state field to struct fs. (fix suggested by Remy Card)
+
+
+ Tue Jan 17 15:55:01 MST 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * Released UK02p8
+
+ 03-Dec-94 Johannes Helander (jvh) at Helsinki University of Technology
+
+ * bootstrap/ffs_file_io.c:
+
+ Added support for 4.4 BSD fastlinks. The change breaks 4.3
+ slowlinks of length four or less.
+
+ Interpret 4.4 BSD directory entries correctly also on little
+ endian machines.
+
+ 12 Jan 1995 17:52:44 -0500 Shantanu Goel <goel@cs.columbia.edu>
+
+ * vm/vm_map.c: Incorporated the device_write bug fix from MK83a.
+
+ 09-Dec-94 Ian Dall (dall@hfrd.dsto.gov.au)
+
+ * Fixed char_write_done to use ds_device_write_reply_inband when
+ appropriate.
+
+ 16-May-94 Ian Dall (idall@eleceng.adelaide.edu.au) at University of Adelaide
+
+ * Increase tty_outq_size to 2048. *Must* be greater than largest
+ tthiwat entry or bad things happen.
+
+ Wed Dec 7 11:43:02 MST 1994 Roland McGrath (roland@gnu.ai.mit.edu)
+
+ * Fix to bootstrap code to interpret BSD 4.4 FFS filesystems.
+
+Fri Nov 25 13:56:32 MST 1994 Bryan Ford (baford@cs.utah.edu)
+
+ Merged in diffs from UK02p6 to UK02p7:
+
+ * GNU make should now no longer mistakenly delete any more
+ intermediate files the first time around.
+
+ * 'make configure' should now work without producing bogus
+ error messages. (Hint: don't put any bogus 'AC_*' text
+ strings in input files to autoconf, even if they're
+ in comments!)
+
+ * Don't install 'Makefile.in' into the public include directory.
+
+ * mig/Makerules (parser.o):
+ Added explicit dependencies to protect files that were
+ automatically deleted by GNU make after the first compilation on
+ an empty object tree. These were deleted because GNU make
+ decided that they were intermediary files. However, because
+ they are listed in the automatically generated dependency file,
+ GNU make realizes that it must regenerate them on a subsequent
+ "make" pass. Since they have explicit dependencies (in the
+ "depend" file) on that subsequent pass, GNU make does not delete
+ them.
+
+Tue Nov 1 19:12:55 1994 Jeff Law (law@snake.cs.utah.edu)
+
+ * kernel/kern/{processor.h, task.h, thread.h}: Enclose full
+ prototypes inside #if KERNEL (for "klg" files where foo_t
+ is typically mapped to mach_port_t, which causes decl and defn
+ not to match).
+
+ * Merge in machine-independent kernel changes from MK83-MK83a.
+ Detailed change entries in CHANGES.MK83a.log. Effected files:
+
+ * include/device/bpf.h: MK83a Changes.
+ * include/mach/{exe, mach4, mach_types}.defs: Likewise.
+ * include/mach/{host_info, pc_sample, processor_info}.h: Likewise.
+ * include/mach/vm_statistics.h: Likewise.
+ * kernel/chips/{audio.h, busses.h}: Likewise.
+ * kernel/device/{chario.c, conf.h, net_io.c}: Likewise.
+ * kernel/kern/{exception.c, host.c, mach_clock.c}: Likewise.
+ * kernel/kern/{host.c mach_clock.c pc_sample.c}: Likewise.
+ * kernel/kern/{processor.h, strings.c, task.c, task.h}: Likewise.
+ * kernel/kern/{thread.c, thread.h}: Likewise.
+ * kernel/scsi/rz_labels.c: Likewise.
+ * kernel/scsi/adapters/scsi_aha15_hdw.c: Likewise.
+ * kernel/vm/{vm_fault, vm_map, vm_resident}: Likewise.
+
+ * kernel/bogus/eaha.h: New file for MK83a support.
+ * kernel/kern/pc_sample.h: Likewise.
+ * kernel/scsi/adapters/scsi_aha17_hdw.c: Likewise.
+
+Sat Oct 22 14:56:33 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Got rid of various rinky-dink C library routines
+ that were duplicated in both the kernel and libmach.
+ In the system-independent source tree, this just
+ means kern/sscanf.c so far, but I've got my eye
+ on printf.c as well. In the i386-specific tree
+ it includes all kinds of little things such as bcopy.
+
+Wed Oct 19 12:58:13 1994 Jeff Law (law@snake.cs.utah.edu)
+
+ * kernel/kern/mach.srv: Use immediate forms of act_get_state and
+ act_set_state for thread_{get,set}_state.
+
+Thu Oct 13 09:50:24 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * include/mach/sys_*.h: moved to include/mach/sa/sys/*.h.
+ Also added some other C-library-like files in there
+ for the benefit of standalone programs as well as my
+ Mach-on-DOS ("MOSS") C library.
+
+ * Makerules: For all programs compiled on the target machine
+ (actually host machine in proper terminology),
+ add include/mach/sa to the include path.
+
+ * Makefiles: minor cleanups, added some .PRECIOUS rules
+ to reduce the number of files gratuitously recompiled
+ the second time around.
+
+ * libmach/unixoid: Cleaned out. We no longer need the
+ unixoid version of libmach at all, because neither
+ the Hurd nor Lites need it (or ever will). Good riddance.
+ We might start generating a 'libmach.a' again soon,
+ but it will contain only the "pure" Mach parts -
+ nothing related to Unix or C libraries.
+
+ * libmach/standalone/printf.c: Broke into multiple files
+ and added various tidbits of functionality.
+
+Fri Sep 30 01:43:16 1994 Jeff Law (law@snake.cs.utah.edu)
+
+ * kernel/bogus/bogus.c: New file.
+
+Mon Sep 26 12:58:57 1994 Jeff Law (law@snake.cs.utah.edu)
+
+ * kernel/kern/processor.c (pset_init): Nuke debugging printf.
+
+ * kernel/kern/thread.c (thread_create_in): Grab a reference to the
+ processor set for the new thread.
+
+Fri Sep 9 00:00:53 1994 Jeff Law (law@snake.cs.utah.edu)
+
+ * kernel/device/ds_routines.c (io_done_thread): Nuke call to
+ stack_privilege.
+
+Thu Sep 8 11:37:03 1994 Jeff Law (law@snake.cs.utah.edu)
+
+ * kernel/kern/act.c (act_set_state, act_get_state): Use natural_t
+ to match prototype.
+
+ * kernel/kern/zalloc.c: lock_zone has been renames to zone_lock,
+ likewise for unlock_zone.
+
+ * kernel/bogus/mach_pcsample.h: Disable for now.
+ * kernel/bogus/xpr_debug.h: Likewise.
+
+ * include/mach/rpc.h: Add c-signature stuff.
+
+ * kernel/device/chario.c (ttstart): New function.
+ (ttrstrt): Use it.
+
+ * include/mach/mach4.defs: Wrap PC sampling stuff inside
+ an appropriate #ifdef.
+
+Wed Aug 31 10:59:51 1994 Jeff Law (law@fast.cs.utah.edu)
+
+ * kernel/ipc/ipc_notify.c: Use MACH_MSG_TYPE_INTEGER_32 rather
+ than MACH_MSG_TYPE_INTEGER_T.
+
+ * kernel/kern/ipc_sched.c: Don't include thread_swap.h
+
+ * kernel/kern/sched_prim.c (thread_wakeup_prim): "event" should
+ be an event_t. Cast argument to wait_hash to an int.
+ (various): Chagne thread->runq to thread->shuttle.runq.
+
+ * kernel/device/net_io.c: Don't typedef u_long here.
+
+ * kernel/kern/ipc_kobject.c: Include mach_machine_routines.h
+
+ * kernel/device/ds_routines: Include current.h for current_thread.
+
+ * include/mach/sys_types.h: Put u_long back.
+
+Tue Aug 30 13:45:05 MDT 1994
+
+ * Makefiles: Don't use CFLAGS etc. to build variables in;
+ that way the user can specify them on the `make' line
+ and still get sensible results.
+ Also, named some variables HOST_xxx and TARGET_xxx
+ to better support cross-compilation.
+
+Fri Aug 26 12:06:35 1994 Jeff Law (law@fast.cs.utah.edu)
+
+ * include/mach/mach_port.defs: Use thread_t, not act_t.
+
+Mon Aug 1 18:15:00 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Got most of the migrating threads changes merged in
+
+Tue Oct 25 12:32:28 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Released UK02p6.
+
+ * Moved exec header interpretation stuff from the bootstrap code
+ into libmach so the kernel could also use it when initializing
+ the bootstrap program itself.
+
+Sat Oct 22 15:44:42 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Moved cthreads.h from the top-level public include directory
+ into the mach/ include subtree. Higher-level OS personalities
+ can create their own cthreads.h, or just make a stub
+ that cross-includes mach/cthreads.h if they want to use
+ the "standard" implementation.
+
+Mon Oct 17 10:54:38 ??? 1994 Csizmazia Balazs (csb@ullman.elte.hu)
+
+ * Added MINIX filesystem support to the bootstrap loader.
+
+Sat Oct 15 17:22:01 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Temporary kludge: added servers/ include directory
+ containing libmachid and libnetname headers;
+ added corresponding library code to libmach.
+ This is so we can compile Lites against Mach4
+ without having to create a full new USER package yet.
+
+ * Changed ifdef's of KERNEL to MACH_KERNEL
+ in the public header files.
+ (Really should get rid of them entirely.)
+
+Fri Sep 16 11:33:44 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Released UK02p5.
+
+ * Added copyrights to the makefiles.
+
+Fri Sep 9 10:44:49 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * configure.in: if user specifies --enable-debug,
+ add -DDEBUG to the compiler flags.
+
+Tue Sep 6 09:58:49 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Added blank .SUFFIXES line
+ to eliminate the requirement of using `make -r'.
+ (Things still go faster with `-r' though.)
+
+Mon Sep 5 22:44:00 1994 Patrick Doyle (wpd@peanutbutter)
+
+ * README (DIRECTORY STRUCTURE): Added a distillation of a
+ discussion with Bryan regarding organization of and differences
+ between the two libraries produced by the distribution. (Also
+ cleaned up references to libmach_ux.a when I stumbled across
+ them in the source).
+
+Mon Sep 5 12:16:30 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Full cross compilation support
+
+ * Cleaned up all install rules
+ so they will no longer gratuitously reinstall things.
+
+Sun Sep 4 11:33:03 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * As suggested by Roland McGrath,
+ changed the genassym mechanism to cross-compile better,
+ by compiling to assembly language source
+ instead of an executable program.
+
+ Also refined and extended this technique
+ to make it more convenient,
+ and to allow MIG to use it too.
+
+Sun Sep 4 08:10:05 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Makefiles: better cross-compilation support.
+
+Sat Sep 3 15:14:36 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * bootstrap/*: got FFS support back in;
+ now both ext2 and FFS are supported at the same time
+ (although in a horrendously kludgy way).
+
+Wed Jul 20 14:00:45 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * include/mach/mig_support.h: Got rid of cross-includes
+ of some old bogus header files.
+
+ * Released UK02
+
+
+Mon Jul 18 18:28:00 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ Integrated microkernel changes needed by LITES,
+ from Johannes Helander (jvh) and Tero Kivinen (kivinen) @cs.hut.fi:
+
+ * (jvh) include/device/device_reply.defs: added SEQNOS support.
+
+ * (jvh) include/mach/error.h: Surrounded typedefs with ifndef ASSEMBLER.
+
+ * (jvh) mig, libmach: mig_dealloc_reply_port() now takes an argument,
+ which is ignored in the default libmach implementation of this function,
+ but may be needed for custom reply port allocators.
+ MIG stubs now pass the reply port to be deallocated.
+
+ * (jvh) mig, libmach: new function mig_put_reply_port(),
+ which does nothing in the default libmach implementation.
+ MIG stubs call this function when done with a reply port,
+ if it wasn't deallocated with mig_dealloc_reply_port().
+
+ * (jvh) mig/mig.sh: added -migcom flag to specify the location of migcom.
+
+ * (jvh) libmach/err_kern.sub: added new errno strings for 4.4BSD and Lite.
+
+ * (???) libthreads: added wait_type to cproc structure
+ keeping track of what type of event is being waited for.
+
+ * (???) libthreads: added a holder field to mutex structure
+ keeping track of who's holding a mutex, for debugging.
+
+ * (kivinen) libthreads: Renamed cthread_t->catch to cthread_t->catch_exit,
+ because catch is a reserved word in C++.
+
+ * (jvh) libthreads: Added CONDITION_NAMED_INITIALIZER and MUTEX_NAMED_INITIALIZER
+ macros. They take one argument: a name string.
+
+
+Fri Jul 15 11:46:19 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * kern/bootstrap.c: gutted and rewrote
+ to get the boot image from the bmod mechanism.
+ Needs to be gutted still more.
+
diff --git a/ChangeLog.00 b/ChangeLog.00
new file mode 100644
index 00000000..553b5fea
--- /dev/null
+++ b/ChangeLog.00
@@ -0,0 +1,858 @@
+Wed Feb 19 15:51:34 1997 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+
+ * kernel/i386at/gpl/linux/include/linux/autoconf.h
+ (CONFIG_SCSI_AHA1740, CONFIG_EL3, CONFIG_WD80x3): Turn on these
+ devices.
+
+Wed Feb 5 11:35:39 1997 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+
+ * kernel/i386at/idt.h: Include "idt-gen.h" instead of
+ include-next. Safer this way.
+ * kernel/i386/idt.h: Moved to ...
+ * kernel/i386/idt-gen.h: ... here.
+
+Mon Dec 16 21:01:37 1996 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+
+ * kernel/i386/ast.h: Comment out chained include; generic
+ kern/ast.h includes us always, and that's all the ast.h's there
+ are.
+ * kernel/i386/thread.h: Likewise.
+
+ * kernel/chips/busses.c: Delete file (duplicates generic copy).
+
+Thu Apr 18 22:54:26 1996 steve clawson <sclawson@marker.cs.utah.edu>
+
+ * configrules: Changed --with-elf option to define __ELF__ instead
+ of ELF.
+ * configure: likewise
+
+ * include/mach/machine/asm.h: Changed references from ELF to
+ __ELF__.
+ * kernel/i386at/boothdr: likewise.
+ * kernel/util/crtn.S: likewise.
+ * libmach/standalone/mach-crt0.c: likewise.
+
+ * boot/bsd/main.c: Changes to conform with Multiboot 0.6 (adds a
+ checksum field to the multiboot header). From Erich Boleyn
+ (erich@uruk.org).
+ * boot/linux/misc.c: likewise.
+ * include/mach/machine/multiboot.h: likewise.
+ * kernel/i386at/boothdr: likewise.
+
+Thu Apr 4 11:02:40 MST 1996 Bryan Ford <baford@cs.utah.edu>
+
+ * boot/boot_info_dump.c: dump the string associated with
+ each boot module, if any.
+
+ * boot/bsd/main.c: pass along the associated boot module string
+
+ * boot/bsd/mkbsdimage.sh: allow strings to be associated with
+ boot modules, using the syntax "filename:string".
+
+Thu Apr 18 22:54:26 1996 steve clawson <sclawson@marker.cs.utah.edu>
+
+ * configrules: Changed --with-elf option to define __ELF__ instead
+ of ELF.
+ * configure: likewise
+
+ * include/mach/machine/asm.h: Changed references from ELF to
+ __ELF__.
+ * kernel/i386at/boothdr: likewise.
+ * kernel/util/crtn.S: likewise.
+ * libmach/standalone/mach-crt0.c: likewise.
+
+ * boot/bsd/main.c: Changes to conform with Multiboot 0.6 (adds a
+ checksum field to the multiboot header). From Erich Boleyn
+ (erich@uruk.org).
+ * boot/linux/misc.c: likewise.
+ * include/mach/machine/multiboot.h: likewise.
+ * kernel/i386at/boothdr: likewise.
+
+Thu Apr 4 11:02:40 MST 1996 Bryan Ford <baford@cs.utah.edu>
+
+ * boot/boot_info_dump.c: dump the string associated with
+ each boot module, if any.
+
+ * boot/bsd/main.c: pass along the associated boot module string
+
+ * boot/bsd/mkbsdimage.sh: allow strings to be associated with
+ boot modules, using the syntax "filename:string".
+
+Fri Mar 29 02:00:29 1996 steve clawson <sclawson@marker.cs.utah.edu>
+
+ * html/mach4-UK22.html: new file.
+ * html/mach4-UK22-linuxdev.html: likewise.
+
+ * configrules: Added --enable-fipc option, which adds -DFIPC to DEFS.
+ * configure: likewise.
+
+Sun Mar 24 13:12:14 1996 Shantanu Goel <goel@toner.cs.utah.edu>
+
+ * kernel/i386at/model_dep.c: (init_alloc_aligned): Check for end
+ of memory.
+
+Sat Mar 16 20:26:28 1996 Shantanu Goel <goel@bottles.cs.utah.edu>
+
+ Following changes for new driver emulation framework.
+
+ * kernel/Makerules: Exclude SCSI directories if LINUX_DEV.
+
+ * kernel/i386/iopb.c: Renamed device_t to mach_device_t.
+ Renamed convert_device_to_port to mach_convert_device_to_port.
+
+ * kernel/i386/trap.c: (kernel_trap): Panic if page fault at address
+ 0 or kernel text segment.
+
+ * kernel/i386at/autoconf.c: (bus_master_init): Don't probe for
+ block drivers if the kernel is configured to use Linux block
+ drivers.
+ (bus_device_init): Likewise.
+
+ * kernel/i386at/kd.c: Renamed device_t to mach_device_t.
+ kernel/i386at/iopl.c: Likewise.
+ kernel/i386at/kd.c: Likewise.
+ kernel/i386/io_emulate.c: Likewise.
+
+ * kernel/i386at/conf.c: (dev_name_list): Don't include block
+ driver entries if the kernel is configured to use Linux block
+ drivers.
+
+ * kernel/i386at/i386at_ds_routines.c: New file.
+ kernel/i386at/dev_hdr.h: New file.
+ kernel/i386at/device_emul.h: New file.
+
+ * kernel/i386at/model_dep.c: (use_all_mem): Set it for LINUX_DEV.
+ (machine_init): Set BIOS warm boot magic. Unmap page zero after
+ device initialization.
+ (halt_all_cpus): Removed BIOS warm boot magic.
+
+ * kernel/i386at/nhd.c: Don't compile if using Linux drivers.
+
+ * kernel/i386at/gpl/linux/linux_block.c: New file.
+ kernel/i386at/gpl/linux/linux_emul.h: New file.
+ kernel/i386at/gpl/linux/linux_misc.c: New file.
+ kernel/i386at/gpl/linux/linux_sched.c: New file.
+ kernel/i386at/gpl/linux/linux_version.c: New file.
+
+ * kernel/i386at/gpl/linux/linux_net.c: Rewritten from scratch.
+
+ * kernel/i386at/gpl/linux/block: New directory. Block drivers
+ from Linux 1.3.68.
+
+ * kernel/i386at/gpl/linux/scsi: New directory. SCSI drivers from
+ Linux 1.3.68.
+
+ * kernel/i386at/gpl/linux/pci: New directory. PCI bus support from
+ Linux 1.3.68.
+
+ * kernel/i386at/gpl/linux/include: All files upgraded to Linux 1.3.68.
+ kernel/i386at/gpl/linux/net: Likewise.
+
+ * kernel/intel/pmap.c: (pmap_unmap_page_zero): New function.
+
+Wed Mar 6 18:14:22 1996 steve clawson <sclawson@marker.cs.utah.edu>
+
+ * include/mach/setjmp.h: Changed definition of jmp_buf[] so that
+ it's big enough for NetBSD, FreeBSD and linux's libc setjmp
+ functions. Otherwise their setjmp ends up overwriting useful info
+ in the cthread_t structures.
+
+Tue Mar 5 17:47:59 MST 1996 Linus Kamb (kamb@cs.utah.edu)
+
+ * Changed kernel/i386at/if_ne.c
+ nerecv() recognizes FIPC_MSG_TYPE packets.
+ nefoutput() called by co-opting d_port_death.
+
+ * Changed kernel/i386at/conf.c
+ maps d_port_death to nefoutput()
+
+Fri Feb 23 19:44:52 1996 Bryan Ford <baford@cs.utah.edu>
+
+ Merged in Erich Boleyn's changes for the latest version of MultiBoot:
+
+ * boot/Proposal: updated to Erich's latest version of the proposal;
+ bumped the version number to 0.5.
+
+ * include/mach/machine/multiboot.h: added memory descriptions,
+ ELF symbol table passing support.
+
+ * kernel/i386at/boothdr.S: flag requiring memory info;
+ receive multiboot_info pointer in ebx instead of eax.
+
+ * kernel/i386at/model_dep.c: new way of finding available memory
+
+ * boot/boot_info_dump.c, boot/boot_start.c, boot/do_boot.S:
+ updated for new entry protocol and boot_info format.
+
+ * boot/bsd/main.c: updated to pass BIOS memory info.
+
+ * boot/linux/misc.c: [baford] updated to pass BIOS memory info.
+
+Thu Feb 15 18:36:54 1996 steve clawson <sclawson@marker.cs.utah.edu>
+
+ * kernel/i386/locore.S: added cpu_shutdown() function from OSF
+ Mach mk6.1_free, originally from Dave Slattengren
+ <slatteng@mtxinu.COM>.
+
+ * kernel/i386at/kd.c: Made kdreboot() call cpu_shutdown() if
+ the attempt to reset the machine through the keyboard controller
+ dosen't work.
+
+ * kernel/i386at/kd.c: Dosen't include <com.h> anymore, or set
+ RCLINE. This isn't needed with the serial console changes.
+
+ * kernel/i386at/if_3c503.h: added 3com vendor ID numbers.
+
+ * kernel/i386at/if_ns8390.c: Added code from Linux to check for
+ 3com's vendor ID in the Etherlink II probe routine.
+
+Tue Feb 13 22:34:27 1996 Kevin T. Van Maren <vanmaren@fast.cs.utah.edu>
+
+ * kernel/i386at/conf.c: Changed the number of subdevices for the
+ nhd and sd devices to 2^10 for slice numbering.
+
+ * kernel/i386at/nhd.c: Modified to use the array of partitions
+ and the common partitioning code. Disklabels are still there,
+ but are fairly meaningless to the kernel (except for the basic
+ geometry information from the BIOS).
+
+ * kernel/i386at/disk.h: minor addition of BAD_BLK from the
+ old hdreg.h file (which is gone)
+
+ * kernel/i386at/pchd.c: removed as obsolete
+
+ * kernel/i386at/hdreg.h: removed as obsolete
+
+Tue Feb 13 16:22:48 1996 steve clawson <sclawson@marker.cs.utah.edu>
+
+ * kernel/i386at/if_ne.c: Changed initialization code to follow the
+ National Semiconductor guidelines for setting up the 8390.
+
+Sun Jan 28 22:56:38 MST 1996 Bryan Ford <baford@cs.utah.edu>
+
+ * Checked in major MOSS changes from work at home:
+ got DPMI support working (Win 3.1, Win95, OS/2)
+ modulo a few minor incompatibilities;
+ VCPI and DOS support are fully functional (again);
+ VCPI and DPMI now support fastints.
+
+Wed Jan 17 17:18:57 1996 Bart Robinson <lomew@fast.cs.utah.edu>
+
+ * Serial console working for real.
+
+ * kernel/bogus/rc.h: removed RCBAUD and added RCADDR, prettied up.
+
+ * boot/serial.S: added. This is used by the boot adaptor part,
+ the rest of the kernel uses mach4/kernel/device/cons.[ch]. This
+ file was taken from FreeBSD who took it from Mach sometime.
+
+ * kernel/pc/putchar.c: changed to use serial.S if <rc.h> says so.
+
+ * boot/bsd/Makerules: serial.S needs kernel/bogus to be on the -I
+ list
+
+ * boot/linux/Makerules: ditto
+
+ * kernel/i386at/model_dep.c: changed to call cninit() rather than
+ kdinit(). Also if booted with -d will call cninit() before
+ Debugger(). It used to have kd_cnmaygetc call cninit (yuk!)
+
+ * kernel/i386at/console_list.h: removed. The console is handled
+ differently now.
+
+ * kernel/i386at/conf.c: console entry in the device list is now
+ the `virtual' one "cn" instead of "kd"
+
+ * kernel/i386at/cons_conf.c: added. Defines the constab array
+ which tells us which console-like things to look for.
+
+ * kernel/i386at/com.c: tweaked to export an interface that
+ mach4/kernel/device/cons.[ch] wants. RCBAUD is in here now
+ rather than rc.h.
+
+ * kernel/i386at/kd.c: ditto
+
+Wed Jan 17 13:51:46 1996 Kevin Van Maren <vanmaren.cs.utah.edu>
+
+ * Unified the partition code for IDE & SCSI device drivers
+
+ * kernel/i386at/pc_label.c: deleted
+ * kernel/i386/pc_hd_label.c: deleted (used by hd.c)
+ * kernel/i386/hd.c: deleted (old 'ide' driver)
+
+ * kernel/i386at/nhdreg.h: Removed the definition of PDLOCATION,
+ should be in <i386at/disk.h> not here!
+
+ * kernel/i386at/nhd.c: Removed IDE specific PC partition code.
+ Changed to use the unified partition code.
+
+Mon Oct 30 08:33:01 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * mach4-i386/kernel/i386at/interrupt.S: from Shantanu: proper fix
+ for interrupt disable bug.
+ * mach4-i386/kernel/i386/spl.S: likewise.
+
+Mon Oct 23 11:08:33 1995 steve clawson <sclawson@marker.cs.utah.edu>
+
+ * bootstrap/translate_root.c: new file
+ bootstrap/translate_root.h: likewise
+
+ Code to convert strange root device encodings (like Linux device
+ numbers) into what Mach wants.
+
+Sun Oct 22 18:36:49 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * Integrated Steve's and Bart's changes from the UK02p16-work branch.
+
+ * boot: updated booter/kernel interface to be (mostly) conformant
+ to MultiBoot-0.3 proposal.
+
+ * boot/bsd/crt0.S: pass a boot loader value (Mach/NetBSD/FreeBSD)
+ as well as boothowto and bootdev.
+
+ * boot/bsd/main.c: Fabricate a proper MultiBoot kernel command line
+ from the boothowto and bootdev values.
+
+ * boot/bsd/main.c: Cleaned up symbol table loading, so it should
+ work with all a.out format variants.
+
+ * boot/bsd/main.c: Provide the kernel info on the device we booted from.
+
+ * mach4-i386/kernel/i386at/model_dep.c: MultiBoot-0.3 compliance.
+
+ * mach4-i386/kernel/i386at/interrupt.S: mostly fix interrupt nesting
+ problem with Linux driver changes.
+
+Mon Oct 16 14:49:01 1995 steve clawson <sclawson@marker.cs.utah.edu>
+
+ * boot/bsd/crt0.S: Now should be able to differentiate between
+ the original Mach bootblocks, the old BSD (which NetBSD uses)
+ bootblocks and the new-style FreeBSD bootblocks.
+
+Fri Oct 13 16:59:10 1995 steve clawson <sclawson@marker.cs.utah.edu>
+
+ * boot/boot_info.c: Add code to add the symbol table into the
+ occupied list that we hand to the kernel.
+
+ * boot/bsd/crt0.S: Added in code to pass along the boothowto and
+ bootdev. We should be able to tell between Mach, FreeBSD and
+ NetBSD bootblocks, but we don't tell anyone about it yet.
+
+ * boot/bsd/main.c: Now tries to find the kernel symbol table and
+ put it into a `safe' place for the kernel to grab later. It
+ also sets the bsd_boothowto and bsd_bootdev fields of the
+ boot_info struct.
+
+ * bootstrap/exec.c: Enabled code to load symbol tables.
+
+ * kernel/i386/setroot.c: Changed code to work with BSD bootblocks
+ (and their major device numbering...sd == 4, instead of 3).
+
+ * kernel/i386at/model_dep.c: Now sets kern_sym_start and
+ kern_sym_size from boot_info, along with boothowto and
+ bootinfo from the bootblocks.
+
+Wed Oct 4 20:04:27 MDT 1995 Bryan Ford <baford@cs.utah.edu>
+
+ Released moss-0.80 (tag moss-0-80)
+
+Wed Oct 4 12:05:57 MDT 1995 Bryan Ford <baford@cs.utah.edu>
+
+ Merged in changes made for DOS extender at home:
+ * added mach/i386/debug_reg.h,
+ containing definitions for accessing the debug registers.
+ * changed i16 code to add physical memory to the malloc pool
+ (assuming we're using the lmm to handle the malloc pool).
+
+Tue Oct 3 13:22:50 1995 steve clawson <sclawson@marker.cs.utah.edu>
+
+ * kernel/pc/i16/i16_raw.c: baford's changes to work with gcc
+ 2.7.0.
+ boot/boot_start.c: Likewise.
+
+Thu Sep 28 17:37:44 1995 steve clawson <sclawson@marker.cs.utah.edu>
+
+ * Makefile.in: added image, bsdimage and linuximage targets.
+ image will build both the linux and bsd images (Mach and zMach),
+ in the Object directory, the others will only build their their
+ respective images.
+
+ * kernel/bogus/hpp.h: Changed #define to be 1, so that the driver
+ is included by default.
+
+ * kernel/i386at/autoconf.c: Changes for NE2000.
+ kernel/i386at/conf.c: Likewise.
+ kernel/i386at/if_ne.c: NE2000 driver.
+ kernel/i386at/if_nereg.h: NE2000 driver header file.
+
+ * kernel/i386at/gpl/if_hpp.c: Changed #include of vm_param.h to
+ "vm_param.h", since it wasn't getting found in the usual place.
+ kernel/i386at/gpl/if_ul.c: Likewise.
+ kernel/i386at/gpl/if_wd.c: Likewise.
+
+
+Fri Aug 25 23:39:19 1995 Shantanu Goel <goel@monk.mcl.cs.columbia.edu>
+
+ * kernel/i386/hardclock.c: (cold): Deleted. The Linux code now
+ uses its own clock interrupt handler.
+
+ * kernel/i386/spl.S: Use EXT to reference global variables.
+
+ * kernel/i386/spl.h: spl_t must be int; asm code assume this.
+
+ * kernel/i386at/autoconf.c: Changed default IRQ for "wd" to 9.
+ (probeio): Don't compare dev->ctlr against -1. GCC 2.7.0 doesn't
+ like it.
+
+ * kernel/i386at/interrupt.S: (interrupt): Removed check
+ for spurious interrupts.
+
+ * kernel/i386at/model_dep.c: (cold): Deleted.
+ (startrtclock): Deleted check against multiple invocations;
+ it's no longer needed.
+
+ * kernel/i386at/pic_isa.c: (intpri): Initialize clock interrupt
+ priority to 0 to disable clock interrupts.
+
+ * kernel/i386at/gpl/linux/linux_dma.c:
+ (linux_request_dma, linux_free_dma): Made function declarations
+ consistent with prototype.
+
+ * kernel/i386at/gpl/linux/linux_init.c: (linux_init): Deleted
+ call to startrtclock(). Install temporary clock interrupt handler
+ for Linux device initialization.
+
+ * kernel/i386at/gpl/linux/linux_net.c: (linux_net_dev_init): Added
+ entries for SEEQ8005, HP100, and Intel EtherExpress Pro.
+ (linux_alloc_skb): No longer takes a 2nd argument in accordance
+ with Linux 1.3.x.
+ (net_soft): Cleaned up DMA handling.
+
+ * kernel/i386at/gpl/linux/linux_port.c: (linux_request_region): Made
+ function declaration consistent with prototype.
+
+ * kernel/i386at/gpl/linux/linux_printk.c: Get GCC's stdarg.h.
+
+ * kernel/i386at/gpl/linux/linux_vsprintf.c: Get GCC's stdarg.h.
+
+ * kernel/i386at/gpl/linux/net: All files in this directory
+ have been upgraded to Linux 1.3.20.
+
+Mon Jun 26 12:41:56 MDT 1995 Miles Bader <miles@gnu.ai.mit.edu>
+
+ * boot/boot_start.c: changed "m" asm constraints to "rm"
+ to make gcc 2.7.0 happy.
+ * kernel/pc/i16/i16_raw.c: avoid using #ifdef's
+ in the middle of macro invocations; gcc 2.7.0 doesn't like them.
+
+Fri May 19 11:17:13 MDT 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * Released UK02p17, as a private snapshot for goel.
+
+Tue May 16 00:59:56 1995 Shantanu Goel <goel@monk.mcl.cs.columbia.edu>
+
+ * configure: Recognise --enable-linuxdev.
+
+ * kernel/Makerules: Recognise LINUX_DEV.
+ Don't compile ported Linux drivers if using the native Linux ones.
+
+ * include/mach/machine/asm.h (TEXT_ALIGN, DATA_ALIGN): New defines.
+
+ * kernel/i386/ipl.h (NSPL): New define.
+ (intpri): Make type int.
+ (SPLHI, SPL7, IPLHI): Defined to be 7.
+
+ * kernel/i386/pic.h (PIC_MASTER_ICW, PIC_MASTER_OCW): New defines.
+ (PIC_SLAVE_ICW, PIC_SLAVE_OCW): New defines.
+
+ * kernel/i386/pic.c (pic_mask, curr_pic_mask): Make type int.
+ (NSPL): Use it.
+
+ * kernel/i386/spl.S: Rewritten from scratch.
+
+ * kernel/i386/hardclock.c (hardclock): Linux support changes:
+
+ * Don't call the Mach interrupt handler during configuration.
+
+ * Never pass SPL0 flag to clock_interrupt.
+
+ * Handle Linux timers.
+
+ * kernel/i386at/conf.c: Added entries for Linux network drivers.
+ Disable native Mach drivers when using the Linux ones.
+
+ * kernel/i386at/autoconf.c: Disable probing of native Mach drivers
+ when using the Linux ones.
+ Disable printer driver when using Linux drivers because
+ it blindly allocates the irq, causing conflicts with the parallel
+ port Linux network drivers.
+
+ * kernel/i386at/model_dep.c (machine_init): Call linux_init.
+ (alloc_dma_mem): Modified so it can be called any time while
+ the system is running. Interface changed.
+ (startrtclock): Protect against multiple invocations.
+
+ * kernel/i386at/nfd.c (fdprobe): Updated call to alloc_dma_mem
+ to account for interface change.
+
+ * kernel/i386at/interrupt.S: Rewritten from scratch.
+
+ * kernel/i386at/pic_isa.c (intpri): Make type int.
+
+ * kernel/i386at/gpl/linux: Directory and files under it added.
+ These provide native Linux network driver support.
+
+Fri May 12 10:47:41 MDT 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * Released UK02p16, as an internal snapshot for rdukes
+
+Wed May 3 10:47:41 MDT 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * Released UK02p15.
+ (UK02p14 was just an intermediate tag after adding ELF support.)
+
+ Configuration-related changes:
+
+ ELF support changes:
+
+ * configure: set 'enable_elf' if user specifies --enable-elf.
+
+ * include/mach/machine/asm.h: don't add underscores to symbols if ELF.
+
+ * All over: make proper use of EXT/LEXT/ENTRY macros
+ instead of assuming all public symbols are prefixed with underscores.
+
+ * boot/bsd.main.c: if a.out-kludge bit isn't set, use exec_load()
+ from libmach_exec.a to interpret the boot image executable header.
+
+ * kernel/i386/fpe.b_elf: created a uuencoded ELF version of fpe.b
+
+ * libmach/loader_info.c: deleted, became libmach/exec/a_out.c.
+
+ Miscellaneous changes:
+
+ * Makeconf.in: got rid of LD86 and AS86; no longer needed
+
+ * Makerules: various cleanups and fixes, ELF stuff.
+
+ * include/mach/machine/vm_param.h: Moved private kernel parts
+ to kernel/i386/vm_param.h.
+
+ * kernel/dos/*: added a bunch of Unix-like low-level file I/O
+ routines that translate to the appropriate DOS software interrupts.
+
+ * kernel/i386/seg.h: use MACH_INLINE macro for inline functions.
+
+ * kernel/i386at/eisa.h: added from MK83a, was missing for some reason.
+
+Fri Apr 7 17:43:22 MDT 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * Released UK02p13.
+
+ * Got rid of the UK02-freeze branch in the i386 tree.
+ As a result, all the MK83a changes in this tree are now merged in.
+
+Fri Apr 7 14:19:55 MDT 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * Merged in diffs from UK02p9 to UK02p12:
+
+ Mon Apr 3 19:55:41 MDT 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * Released UK02p11.
+ Too many changes to describe concisely,
+ almost all affecting only the boot mechanism for now.
+ Also, integrated the kernel/util, kernel/pc, kernel/dos code
+ from my DOS extender project at home.
+
+ Mon Feb 27 16:32:59 MST 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * Released UK02p10 as a non-functional snapshot
+
+Fri Feb 10 13:25:54 MST 1995 Bryan Ford <baford@cs.utah.edu>
+
+ Merged in diffs from UK02p7 to UK02p9:
+
+ Tue Jan 17 15:55:01 MST 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * Released UK02p8
+
+ Tue Jan 17 12:03:37 MST 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * boot/bsd/Makerules: BSD default boot image is now
+ named 'Mach.all' instead of 'boot.all'.
+
+ Sat Jan 14 15:13:54 MST 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * libmach/unixoid/Makefile: enable building libmach.a
+ only if user specifies --enable-libmach to configure.
+
+ Fri Jan 13 15:14:43 1995 steve clawson <sclawson@fast.cs.utah.edu>
+
+ * Integrated Shantanu Goel's new floppy and hard drive
+ device drivers. (sg@jahil.cs.columbia.edu)
+
+ * kernel/i386at/nhd.c: Rewritten from scratch.
+ kernel/i386at/nhdreg.h: Likewise.
+ kernel/i386at/nfd.c: Likewise.
+ kernel/i386at/nfdreg.h: Likewise.
+
+ * kernel/i386at/model_dep.c:
+ Added alloc_dma_mem().
+
+ * kernel/bogus/fd.h:
+ Set NFD to 4 to include floppy support.
+
+ * Added an #if 0/#endif wrapper around:
+ kernel/i386/pc_hd_label.c
+ kernel/i386at/pchd.c
+ kernel/i386at/hd.c
+ kernel/i386at/fd.c
+
+ Fri Jan 13 14:56:35 1995 steve clawson <sclawson@fast.cs.utah.edu>
+
+ * added .../kernel/i386at/gpl directory to hold gpl'd
+ code (currently it holds ethernet drivers ported from
+ Linux).
+
+ * Integrated Shantanu Goel's ethernet driver ports from
+ Linux into the gpl directory. (sg@jahil.cs.columbia.edu)
+
+ * kernel/i386at/gpl/if_ns.c: New file.
+ kernel/i386at/gpl/if_nsreg.h: New file.
+ kernel/i386at/gpl/if_ul.c: New file.
+ kernel/i386at/gpl/if_wd.c: New file.
+ kernel/bogus/ul.h: New file.
+ kernel/bogus/wd.h: New file.
+
+ * kernel/i386at/autoconf.c:
+ Added entries for new drivers, "ul", "wd".
+
+ * kernel/i386at/conf.c:
+ Added entries for new drivers, "ul", "wd".
+
+ * kernel/i386at/if_ns8390.c:
+ Compile file only #if NNS8390 > 0. This and if_wd.c
+ are probably mutually exclusive. (ie. don't try and stick
+ both in your kernel...)
+
+ * Added HP PC LAN Plus support code ported from Linux.
+
+ * kernel/i386at/gpl/if_hpp.c: New file.
+ kernel/bogus/hpp.h: New file.
+
+ * kernel/i386at/autoconf.c:
+ Added entry for new driver, "hpp".
+
+ * kernel/i386at/conf.c:
+ Added entry for new driver, "hpp".
+
+ * kernel/i386at/gpl/if_ns.c: nsintr(): Added back in the
+ code to read the counter values from the card if they
+ overflow. The HP PC LAN Plus needs to have this (just
+ acking the interrupt dosen't reset the counters on the
+ card I've got).
+
+ 13 Jan 1995 19:53:53 +0100 (MET) Remy.Card@masi.ibp.fr (Remy CARD)
+
+ * libmach/unixoid/Makefile.in: reenabled building of libmach.a.
+
+ Fri Jan 13 14:52:42 MST 1995 Bryan Ford <baford@cs.utah.edu>
+
+ * libmach/loader_info.c: fixed bug interpreting QMAGIC executables.
+
+Fri Nov 25 13:56:32 MST 1994 Bryan Ford (baford@cs.utah.edu)
+
+ Merged in diffs from UK02p6 to UK02p7:
+
+ Sun Nov 20 11:56:51 MST 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Changed the ifdefs referring to '__liXnux__' back to '__linux__'
+ so that partition interpretation will once again default to
+ standard PC-style when compiled on a Linux machine.
+ If you're cross-compiling from Linux and want BSD partitioning
+ anyway, add '-U__linux__' to the DEFINES line in Makeconf.
+
+ * Modified Alvin's compressed boot image mechanism slightly
+ so the Linux-derived files can be taken _completely_
+ unmodified from the Linux kernel source distribution.
+ Easier to keep up-to-date this way. Also created a little
+ shell script to update these files automatically.
+
+ * Separated out the 'mkbootimage' script and supporting stuff
+ into a separate 'boot/bsd' subdirectory, just like the
+ Linux boot image stuff was separated out. Now the top-level
+ 'boot' directory contains _only_ stuff related to
+ boot _modules_.
+
+ * The top-level Makefile is no longer responsible for creating
+ the "default boot image" containing all available boot modules.
+ Instead, boot/bsd/Makefile now produces a default 32-bit boot
+ image ('boot.all'), and boot/linux/Makefile produces a
+ default 16-bit compressed Linux boot image ('zMach.all').
+
+ * Cleaned up the console code a little bit.
+
+ Mon Nov 14 22:50:57 -0500 1994 Alvin Starr <alvin@eyepoint.com>
+
+ * Separated out the Linux boot image creation mechanisms
+ into a separate 'boot/linux' subdirectory, and changed
+ it to produce compressed Linux boot images. The Linux
+ boot images produced are now _exactly_ like those
+ produced by Linux, in that they use all the same 16-bit
+ setup code and decompression code and everything.
+ This also means you can 'dd' one of these boot images
+ straight onto a floppy to create a Mach boot floppy.
+
+ To create compressed Linux boot images now, use the
+ new 'mkliloimage' script. It works just like 'mkbootimage'.
+
+ The old 'mkbootimage' script is still there, but is greatly
+ simplified since it no longer needs to produce Linux-compatible
+ boot images at all. One side-effect of this is that the
+ 32-bit boot images produced by this script will be in the
+ "native" executable format of the build environment in use.
+ For example, if you build under NetBSD, the produced 32-bit
+ boot images will load fine from BSD boot blocks, but probably
+ not from Mach boot blocks, because the Mach boot blocks don't
+ know about BSD's newer a.out format.
+
+ * Enabled SCSI drivers.
+
+ * God kernel debugger working again.
+
+ * Changed kd.c to handle tabs correctly.
+
+ Fri Sep 30 21:38:31 1994 Patrick Doyle (wpd@peanutbutter)
+
+ * Makefile.in
+ (SORTED_SUBDIRS):
+ Added a macro to generated a "sorted" list of subdirectories
+ such that those listed in $(FIRST_SUBDIRS) are first. Changed
+ the "all" and "install" rules to use this macro so we don't
+ have to descend into those subdirs twice. (Idea was stolen
+ from glibc makefile).
+ (boot.all):
+ Added rule to generate "boot.all" in the top level object
+ directory and changed "all" to create this target.
+
+Sat Oct 22 14:59:03 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Incorporated tons of minor changes and additions
+ produced by my Mach-based DOS extender project.
+ Mostly involved cleaning up and adding to libmach_sa.
+
+Tue Oct 25 12:32:28 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Released UK02p6.
+
+ * Moved exec header interpretation stuff from the bootstrap code
+ into libmach so the kernel could also use it when initializing
+ the bootstrap program itself.
+
+Fri Sep 16 11:33:44 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Fixed a bug in locore.S that caused occasional
+ "null pv_list" panics from within pmap.c.
+
+ * Fixed a bug in pmap.c: forgot to clear the kernel
+ page directory before initializing (only part of) it.
+
+Fri Sep 9 10:45:26 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Added kernel/imps directory
+ for Intel MultiProcessor Spec compliant PCs;
+ only enabled if user specifies --enable-mp.
+
+ * boot image building scripts: exit when errors occur
+
+ * boot/mkbmod.sh: changed -s to -no-strip to avoid confusion
+
+Thu Sep 8 16:51:15 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * bootstrap/exec.c:
+ support NetBSD big-endian ZMAGIC executables
+
+ * include/Makefile.in:
+ fixed installation directories
+
+Tue Sep 6 08:49:39 1994 Patrick Doyle (wpd@peanutbutter)
+
+ * Makefile.in (install): Changed "install" rule so that it doesn't
+ gratuitously reinstall boot.all.
+
+Mon Sep 5 12:11:45 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Full cross compilation support
+
+ * Changed default Mach boot image filename
+ from `machboot' to just `boot'.
+
+ * Automatically create a `boot.all' upon installation
+ containing all boot modules.
+
+Sat Sep 3 07:13:44 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Bugfix: boot code didn't clear NT flag properly,
+ and the VTech Platinum BIOS was leaving it set.
+
+ * Added get_esp() and get_eflags() in proc_reg.h.
+
+ * Added debug_trace facility.
+
+ * bootstrap/exec.c: got rid of #ifdef __linux__'s;
+ instead, support both at once and detect at runtime.
+
+ * include/mach/machine/disk.h,
+ kernel/i386/pc_hd_label.c:
+ hacked #ifdef __linux__'s to #ifdef __liXnux__
+ to get back to BSD-style partition table interpretation.
+
+Fri Aug 26 09:52:35 1994 Jeff Law (law@fast.cs.utah.edu)
+
+ * configure: Add "-print" argument to find so that it works with
+ non-GNU finds.
+
+Mon Aug 1 18:15:00 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Got most of the migrating threads changes merged in
+
+Mon Aug 1 14:45:52 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Integrated Louis-D. Dubeau's pchd (PC hard drive) patches.
+
+
+Wed Jul 20 14:00:45 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * include/Makefile: top-level header files weren't getting installed.
+
+ * Moved kernel i386/eflags.h to include/mach/machine/eflags.h,
+ so outsiders such as LITES can use it if they want to.
+
+ * Released UK02
+
+
+Fri Jul 15 11:46:19 MDT 1994 Bryan Ford (baford@cs.utah.edu)
+
+ * Kernel address space and physical memory
+ is now mapped at virtual address 0 instead of 0xC0000000.
+ (It's still up high in linear memory,
+ but the kernel segment registers are used to shift addresses.)
+
+ * All copyin/copyout code now uses segment registers
+ to access user space, like Linux does.
+
+ * Separated the console stuff out of kd.c and into (new) console.c.
+ No longer initialize the console at the very beginning in i386_init;
+ instead do it with the rest of the devices, in machine_startup.
+
+ * console.c: Created a kludgy little "immediate console" for kernel debugging
+ which spews things onto a CGA/EGA/VGA display
+ without requiring _any_ initialization first.
+ It gets disabled automatically when the real console is initialized.
+
+ * include/mach/boot_info.h: deleted, because it was only for the old
+ `makeboot'-based server bootstrap mechanism.
+ Superseded by the new include/mach/boot.h.
+
+ * Lots of other minor changes.
+
diff --git a/Makefile b/Makefile
new file mode 100644
index 00000000..558b094e
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,176 @@
+# Makefile for Mach 4 kernel directory
+# Copyright 1996 Free Software Foundation, Inc.
+# This file is part of GNU Mach. Redistribution terms are not yet decided.
+
+
+
+# Set at entry:
+# $(srcdir) $(systype) $(installed-clib)
+
+sysdep = $(srcdir)/$(systype)
+
+ifeq ($(MIG),)
+MIG := mig
+endif
+
+ifeq ($(AWK),)
+AWK := awk
+endif
+
+all:
+
+# All the source in each directory. Note that `bogus' contains no source,
+# only header files.
+
+# Generic code for various hardware drivers
+chips-files = atm.c audio.c bt431.c bt455.c bt459.c build_font.c busses.c \
+ cfb_hdw.c cfb_misc.c dc503.c dtop_handlers.c dtop_hdw.c dz_hdw.c \
+ fb_hdw.c fb_misc.c fdc_82077_hdw.c frc.c ims332.c isdn_79c30_hdw.c \
+ kernel_font.c lance.c lance_mapped.c lk201.c mc_clock.c mouse.c \
+ nc.c nw_mk.c pm_hdw.c pm_misc.c scc_8530_hdw.c screen.c \
+ screen_switch.c serial_console.c sfb_hdw.c sfb_misc.c spans.c \
+ tca100.c tca100_if.c xcfb_hdb.c xcfb_misc.c
+
+# Generic code for various SCSI unit types
+scsi-files = disk_label.c mapped_scsi.c pc_scsi_label.c rz.c rz_audio.c \
+ rz_cpu.c rz_disk.c rz_disk_bbr.c rz_host.c rz_tape.c scsi.c \
+ scsi_alldevs.c scsi_comm.c scsi_cpu.c scsi_disk.c scsi_jukebox.c \
+ scsi_optical.c scsi_printer.c scsi_rom.c scsi_scanner.c \
+ scsi_tape.c scsi_worm.c
+
+# Icky kernel debugger
+ddb-files = $(addprefix db_,$(ddb-names))
+ddb-names = access.c aout.c break.c command.c cond.c examine.c expr.c \
+ ext_symtab.c input.c lex.c macro.c mp.c output.c print.c run.c \
+ sym.c task_thread.c trap.c variables.c watch.c write_cmd.c
+
+# Device support interfaces
+device-files = blkio.c chario.c cirbuf.c cons.c dev_lookup.c dev_name.c \
+ dev_pager.c device_init.c dk_label.c ds_routines.c net_io.c subrs.c
+
+# IPC implementation
+ipc-files = $(addprefix ipc_,$(ipc-names)) \
+ mach_msg.c mach_port.c mach_rpc.c mach_debug.c fipc.c
+ipc-names = entry.c hash.c init.c kmsg.c marequest.c mqueue.c \
+ notify.c object.c port.c pset.c right.c space.c splay.c \
+ table.c target.c thread.c
+
+# "kernel" implementation (tasks, threads, trivia, etc.)
+kern-files = act.c ast.c bootstrap.c counters.c debug.c eventcount.c \
+ exception.c host.c ipc_host.c ipc_kobject.c ipc_mig.c ipc_sched.c \
+ ipc_tt.c kalloc.c lock.c lock_mon.c mach_clock.c mach_factor.c \
+ machine.c pc_sample.c printf.c priority.c processor.c profile.c \
+ queue.c sched_prim.c startup.c strings.c syscall_emulation.c \
+ syscall_subr.c syscall_sw.c task.c thread.c thread_swap.c \
+ time_stamp.c timer.c xpr.c zalloc.c elf-load.c
+
+# Still more trivia
+util-files = about_to_die.c cpu.c cpu_init.c die.c putchar.c puts.c
+
+# Virtual memory implementation
+vm-files = $(addprefix vm_,$(vm-names)) memory_object.c
+vm-names = debug.c external.c fault.c init.c kern.c map.c \
+ object.c pageout.c resident.c user.c
+
+
+
+# Object files that go into the kernel image. (This will be augmented by the
+# machine dependent Makefile fragment.)
+
+# Basic kernel source for Mach
+objfiles := $(subst .c,.o,$(ipc-files) $(kern-files) $(util-files) $(vm-files))
+vpath %.c $(srcdir)/ipc $(srcdir)/kern $(srcdir)/util $(srcdir)/vm
+
+# These device support files are always needed; the others are needed only
+# if particular drivers want the routines.
+# XXX functions in device/subrs.c should each be moved elsewhere
+objfiles += cons.o dev_lookup.o dev_name.o dev_pager.o device_init.o \
+ ds_routines.o subrs.o net_io.o blkio.o chario.o
+vpath %.c $(srcdir)/device
+
+# DDB support -- eventually to die. Please.
+objfiles += $(subst .c,.o,$(ddb-files))
+vpath %.c $(srcdir)/ddb
+
+# Version number
+objfiles += version.o
+vpath version.c $(srcdir)
+
+
+# We steal routines from the C library and put them here.
+objfiles += clib-routines.o
+
+clib-routines = memcpy memset bcopy bzero htonl ntohl ntohs
+
+clib-routines.o: $(installed-clib)
+ $(LD) -o clib-routines.o -r $(addprefix -u ,$(clib-routines)) $(installed-clib)
+
+
+# Automatically generated source
+
+# User stubs
+objfiles += memory_object_user_user.o memory_object_default_user.o \
+ device_reply_user.o memory_object_reply_user.o
+
+# Server stubs
+objfiles += device_server.o device_pager_server.o mach_port_server.o \
+ mach_server.o mach4_server.o mach_debug_server.o mach_host_server.o
+
+# Where to find the relevant Mig source files
+vpath %.cli $(srcdir)/vm $(srcdir)/device
+vpath %.srv $(srcdir)/device $(srcdir)/ipc $(srcdir)/kern
+
+
+# XXXX temporary
+vm_fault.o: memory_object_user.h
+vm_object.o: memory_object_default.h
+ds_routines.o: device_reply.h
+
+
+
+
+#
+# Compilation flags
+#
+
+DEFINES += -DMACH -DCMU -DMACH_KERNEL -DKERNEL
+INCLUDES += -I. -I$(srcdir) -I$(srcdir)/util -I$(srcdir)/bogus \
+ -I$(srcdir)/kern -I$(srcdir)/device \
+ -I$(srcdir)/include -I$(srcdir)/include/mach/sa
+
+include $(sysdep)/Makefrag
+
+CPPFLAGS += -nostdinc $(DEFINES) $(INCLUDES)
+
+MIGFLAGS += $(CPPFLAGS)
+
+#
+# Image
+#
+# (The newline in this command makes it much easier to read in make output.)
+all: kernel
+kernel: $(objfiles)
+ $(LD) -o $@ $(LDFLAGS) \
+ $(objfiles)
+#
+# How to do some things
+#
+
+# Building foo.h from foo.sym:
+%.symc: %.sym
+ $(AWK) -f $(srcdir)/gensym.awk $< >$*.symc
+%.symc.o: %.symc
+ $(CC) -S $(CPPFLAGS) $(CFLAGS) $(CPPFLAGS-$@) -x c -o $@ $<
+%.h: %.symc.o
+ sed <$< -e 's/^[^*].*$$//' | \
+ sed -e 's/^[*]/#define/' -e 's/mAgIc[^-0-9]*//' >$@
+
+# Building from foo.cli
+%.h %_user.c: %.cli
+ $(MIG) $(MIGFLAGS) -header $*.h -user $*_user.c -server /dev/null $<
+
+# Building from foo.srv
+%_interface.h %_server.c: %.srv
+ $(MIG) $(MIGFLAGS) -header $*_interface.h -server $*_server.c -user /dev/null $<
+
+
diff --git a/bogus/bootstrap_symbols.h b/bogus/bootstrap_symbols.h
new file mode 100644
index 00000000..77dcfc25
--- /dev/null
+++ b/bogus/bootstrap_symbols.h
@@ -0,0 +1 @@
+#define BOOTSTRAP_SYMBOLS 0
diff --git a/bogus/fast_tas.h b/bogus/fast_tas.h
new file mode 100644
index 00000000..0fcb1077
--- /dev/null
+++ b/bogus/fast_tas.h
@@ -0,0 +1 @@
+#define FAST_TAS 0
diff --git a/bogus/hw_footprint.h b/bogus/hw_footprint.h
new file mode 100644
index 00000000..c3e8eb34
--- /dev/null
+++ b/bogus/hw_footprint.h
@@ -0,0 +1 @@
+#define HW_FOOTPRINT 0
diff --git a/bogus/mach_assert.h b/bogus/mach_assert.h
new file mode 100644
index 00000000..5e5124a8
--- /dev/null
+++ b/bogus/mach_assert.h
@@ -0,0 +1,5 @@
+#ifdef DEBUG
+#define MACH_ASSERT 1
+#else
+#define MACH_ASSERT 0
+#endif
diff --git a/bogus/mach_counters.h b/bogus/mach_counters.h
new file mode 100644
index 00000000..d2ac85a9
--- /dev/null
+++ b/bogus/mach_counters.h
@@ -0,0 +1 @@
+#define MACH_COUNTERS 0
diff --git a/bogus/mach_debug.h b/bogus/mach_debug.h
new file mode 100644
index 00000000..cc309b10
--- /dev/null
+++ b/bogus/mach_debug.h
@@ -0,0 +1 @@
+#define MACH_DEBUG 1
diff --git a/bogus/mach_fixpri.h b/bogus/mach_fixpri.h
new file mode 100644
index 00000000..f3dc988a
--- /dev/null
+++ b/bogus/mach_fixpri.h
@@ -0,0 +1 @@
+#define MACH_FIXPRI 1
diff --git a/bogus/mach_host.h b/bogus/mach_host.h
new file mode 100644
index 00000000..0fc2b44e
--- /dev/null
+++ b/bogus/mach_host.h
@@ -0,0 +1 @@
+#define MACH_HOST 0
diff --git a/bogus/mach_ipc_compat.h b/bogus/mach_ipc_compat.h
new file mode 100644
index 00000000..f6ce886c
--- /dev/null
+++ b/bogus/mach_ipc_compat.h
@@ -0,0 +1 @@
+#define MACH_IPC_COMPAT 1
diff --git a/bogus/mach_ipc_debug.h b/bogus/mach_ipc_debug.h
new file mode 100644
index 00000000..035c5969
--- /dev/null
+++ b/bogus/mach_ipc_debug.h
@@ -0,0 +1 @@
+#define MACH_IPC_DEBUG 1
diff --git a/bogus/mach_ipc_test.h b/bogus/mach_ipc_test.h
new file mode 100644
index 00000000..e0891917
--- /dev/null
+++ b/bogus/mach_ipc_test.h
@@ -0,0 +1 @@
+#define MACH_IPC_TEST 0
diff --git a/bogus/mach_kdb.h b/bogus/mach_kdb.h
new file mode 100644
index 00000000..128c2854
--- /dev/null
+++ b/bogus/mach_kdb.h
@@ -0,0 +1,5 @@
+#ifdef DEBUG
+#define MACH_KDB 1
+#else
+#define MACH_KDB 0
+#endif
diff --git a/bogus/mach_ldebug.h b/bogus/mach_ldebug.h
new file mode 100644
index 00000000..e82f4000
--- /dev/null
+++ b/bogus/mach_ldebug.h
@@ -0,0 +1 @@
+#define MACH_LDEBUG 0
diff --git a/bogus/mach_lock_mon.h b/bogus/mach_lock_mon.h
new file mode 100644
index 00000000..c218f14e
--- /dev/null
+++ b/bogus/mach_lock_mon.h
@@ -0,0 +1 @@
+#define MACH_LOCK_MON 0
diff --git a/bogus/mach_machine_routines.h b/bogus/mach_machine_routines.h
new file mode 100644
index 00000000..1f510f99
--- /dev/null
+++ b/bogus/mach_machine_routines.h
@@ -0,0 +1 @@
+#define MACH_MACHINE_ROUTINES 0
diff --git a/bogus/mach_mp_debug.h b/bogus/mach_mp_debug.h
new file mode 100644
index 00000000..b396db7f
--- /dev/null
+++ b/bogus/mach_mp_debug.h
@@ -0,0 +1 @@
+#define MACH_MP_DEBUG 0
diff --git a/bogus/mach_pagemap.h b/bogus/mach_pagemap.h
new file mode 100644
index 00000000..debeed76
--- /dev/null
+++ b/bogus/mach_pagemap.h
@@ -0,0 +1 @@
+#define MACH_PAGEMAP 1
diff --git a/bogus/mach_pcsample.h b/bogus/mach_pcsample.h
new file mode 100644
index 00000000..ba9db4dc
--- /dev/null
+++ b/bogus/mach_pcsample.h
@@ -0,0 +1 @@
+#define MACH_PCSAMPLE 1
diff --git a/bogus/mach_ttd.h b/bogus/mach_ttd.h
new file mode 100644
index 00000000..b57ea755
--- /dev/null
+++ b/bogus/mach_ttd.h
@@ -0,0 +1 @@
+#define MACH_TTD 0
diff --git a/bogus/mach_vm_debug.h b/bogus/mach_vm_debug.h
new file mode 100644
index 00000000..b746dca0
--- /dev/null
+++ b/bogus/mach_vm_debug.h
@@ -0,0 +1 @@
+#define MACH_VM_DEBUG 1
diff --git a/bogus/net_atm.h b/bogus/net_atm.h
new file mode 100644
index 00000000..103a8b20
--- /dev/null
+++ b/bogus/net_atm.h
@@ -0,0 +1 @@
+#define NET_ATM 0
diff --git a/bogus/norma_device.h b/bogus/norma_device.h
new file mode 100644
index 00000000..c0c948cf
--- /dev/null
+++ b/bogus/norma_device.h
@@ -0,0 +1 @@
+#define NORMA_DEVICE 0
diff --git a/bogus/norma_ether.h b/bogus/norma_ether.h
new file mode 100644
index 00000000..2d282d4f
--- /dev/null
+++ b/bogus/norma_ether.h
@@ -0,0 +1 @@
+#define NORMA_ETHER 0
diff --git a/bogus/norma_ipc.h b/bogus/norma_ipc.h
new file mode 100644
index 00000000..54803de7
--- /dev/null
+++ b/bogus/norma_ipc.h
@@ -0,0 +1 @@
+#define NORMA_IPC 0 /* can no longer be turned on */
diff --git a/bogus/norma_task.h b/bogus/norma_task.h
new file mode 100644
index 00000000..9b453ca7
--- /dev/null
+++ b/bogus/norma_task.h
@@ -0,0 +1 @@
+#define NORMA_TASK 0
diff --git a/bogus/norma_vm.h b/bogus/norma_vm.h
new file mode 100644
index 00000000..2074cd6b
--- /dev/null
+++ b/bogus/norma_vm.h
@@ -0,0 +1 @@
+#define NORMA_VM 0
diff --git a/bogus/panic.c b/bogus/panic.c
new file mode 100644
index 00000000..64a17de3
--- /dev/null
+++ b/bogus/panic.c
@@ -0,0 +1 @@
+/*XXX*/
diff --git a/bogus/power_save.h b/bogus/power_save.h
new file mode 100644
index 00000000..5591def4
--- /dev/null
+++ b/bogus/power_save.h
@@ -0,0 +1 @@
+#define POWER_SAVE 0
diff --git a/bogus/simple_clock.h b/bogus/simple_clock.h
new file mode 100644
index 00000000..ccb3cbec
--- /dev/null
+++ b/bogus/simple_clock.h
@@ -0,0 +1 @@
+#define SIMPLE_CLOCK 0
diff --git a/bogus/stat_time.h b/bogus/stat_time.h
new file mode 100644
index 00000000..abf9b3f4
--- /dev/null
+++ b/bogus/stat_time.h
@@ -0,0 +1 @@
+#define STAT_TIME 1
diff --git a/bogus/xpr_debug.h b/bogus/xpr_debug.h
new file mode 100644
index 00000000..f4d627a7
--- /dev/null
+++ b/bogus/xpr_debug.h
@@ -0,0 +1 @@
+#define XPR_DEBUG 1
diff --git a/chips/atm.c b/chips/atm.c
new file mode 100644
index 00000000..165cd233
--- /dev/null
+++ b/chips/atm.c
@@ -0,0 +1,302 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <atm.h>
+
+#if NATM > 0
+
+#include <vm/vm_kern.h>
+#include <machine/machspl.h> /* spl definitions */
+#include <kern/time_out.h> /* ? maybe */
+#include <device/errno.h>
+#include <device/io_req.h>
+#include <device/net_status.h>
+
+#include <chips/busses.h>
+#include <chips/atmreg.h>
+
+#include <kern/eventcount.h>
+
+#include <mips/mips_cpu.h>
+
+
+struct bus_device *atm_info[NATM];
+int atm_probe();
+static void atm_attach();
+
+struct bus_driver atm_driver =
+ { atm_probe, 0, atm_attach, 0, /* csr */ 0, "atm", atm_info,
+ "", 0, /* flags */ 0 }; /* ENABLED BUS INTR? */
+
+/* XX "", 0, BUS_INTR_DISABLED}; */
+
+typedef struct atm_softc {
+ struct atm_device *atm_dev;
+ struct evc atm_eventcounter;
+ mapped_atm_info_t atm_mapped_info;
+} atm_softc_t;
+
+
+natural_t atm_nintrs = 0;
+
+atm_softc_t atm_softc[NATM];
+
+atm_probe(reg, ui)
+ vm_offset_t reg;
+ register struct bus_device *ui;
+{
+ register atm_softc_t *atm;
+ mapped_atm_info_t info; /* info struct to hand to users */
+ vm_offset_t addr;
+ int unit = ui->unit;
+
+ if (check_memory(reg, 0)) {
+ return 0;
+ }
+
+ atm_info[unit] = ui;
+ atm = &atm_softc[unit];
+ atm->atm_dev = (struct atm_device *) reg; /* k1 address */
+
+ evc_init(&atm->atm_eventcounter);
+
+printf("evc_init of atm: event counter id is %d\n", atm->atm_eventcounter.ev_id);
+
+ /* initialize the interface to deliver. No interrupts by default */
+ atm->atm_dev->sreg = 0;
+ atm->atm_dev->creg = (CR_RX_RESET | CR_TX_RESET);
+ atm->atm_dev->creg = 0;
+ atm->atm_dev->creg_set = (CR_RX_ENABLE | CR_TX_ENABLE);
+#ifdef notdef
+ atm->atm_dev->rxthresh = 0;
+ atm->atm_dev->rxtimerv = 0;
+ atm->atm_dev->creg_s = RX_EOM_INTR; /* enable interrupt on end of message */
+#endif
+
+ /*
+ * Grab a page to be mapped later to users
+ */
+ (void) kmem_alloc_wired(kernel_map, &addr, PAGE_SIZE); /* kseg2 */
+ bzero(addr, PAGE_SIZE);
+ addr = pmap_extract(pmap_kernel(), addr); /* phys */
+ info = (mapped_atm_info_t) PHYS_TO_K0SEG(addr);
+ atm->atm_mapped_info = info;
+
+ /*
+ * Set some permanent info
+ */
+ info->hello_world = 0xdeadbeef;
+ info->interrupt_count = 0;
+ info->wait_event = atm->atm_eventcounter.ev_id;
+ info->saved_status_reg = 0;
+
+ return 1;
+}
+
+static void
+atm_attach(ui)
+register struct bus_device *ui;
+{
+}
+
+int atm_disable_interrupts_after_each = 1;
+
+
+#define ATM_INTERRUPTS (RX_COUNT_INTR | RX_EOM_INTR | RX_TIME_INTR)
+
+atm_intr(unit, spllevel)
+int unit;
+int spllevel;
+{
+ register struct atm_softc *atm = &atm_softc[unit];
+ struct atm_device *atm_dev = atm->atm_dev;
+ unsigned int intr;
+
+ if (atm_dev == 0) {
+ printf("atm: stray interrupt\n");
+ return;
+ }
+
+ /* Acknowledge interrupt request */
+ intr = ATM_READ_REG(atm_dev->sreg);
+ atm_dev->sreg = ~(intr & ATM_INTERRUPTS);
+
+ /* clear the reason for the interrupt */
+ if (atm_disable_interrupts_after_each)
+ atm_dev->creg &= ~intr;
+
+ splx(spllevel); /* drop priority now */
+
+ atm_intr_occurred();
+
+
+ /* Pass status info up to user */
+ if (atm->atm_mapped_info) {
+ atm->atm_mapped_info->interrupt_count++;
+ atm->atm_mapped_info->saved_status_reg = intr;
+ }
+
+ /* Awake user thread */
+
+ evc_signal(&atm->atm_eventcounter);
+
+ /* NOTE: INTERRUPTS ARE DISABLED. */
+}
+
+atm_intr_occurred()
+{
+ atm_nintrs++;
+}
+
+
+atm_output(dev, ior)
+int dev;
+io_req_t ior;
+{
+}
+
+atm_start(unit)
+int unit;
+{
+}
+
+atm_open(dev, flag, ior)
+ int dev;
+ int flag;
+ io_req_t ior;
+{
+ register int unit = dev;
+ register atm_softc_t *atm = &atm_softc[unit];
+
+ if (unit >= NATM)
+ return EINVAL;
+ if (!atm->atm_dev)
+ return ENXIO;
+
+ return KERN_SUCCESS;
+}
+
+atm_close(dev, flag)
+int dev;
+{
+}
+
+atm_read(dev, ior)
+int dev;
+io_req_t ior;
+{
+}
+
+atm_write(dev, ior)
+int dev;
+io_req_t ior;
+{
+}
+
+atm_get_status(dev, flavor, status, status_count)
+ int dev;
+ int flavor;
+ dev_status_t status; /* pointer to OUT array */
+ natural_t *status_count; /* out */
+{
+ switch (flavor) {
+ case NET_STATUS:
+ {
+ register struct net_status *ns = (struct net_status *)status;
+
+ ns->min_packet_size = sizeof(struct sar_data);
+ ns->max_packet_size = sizeof(struct sar_data);
+ ns->header_format = 999; /* XX */
+ ns->header_size = sizeof(int); /* XX */
+ ns->address_size = 0;
+ ns->flags = 0;
+ ns->mapped_size = sizeof(struct atm_device) + PAGE_SIZE;
+ *status_count = NET_STATUS_COUNT;
+ break;
+ }
+ case NET_ADDRESS:
+ /* This would be a good place for it */
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+
+atm_set_status(dev, flavor, status, status_count)
+ int dev;
+ int flavor;
+ dev_status_t status;
+ natural_t status_count;
+{
+}
+
+atm_mmap(dev, off, prot)
+ int dev;
+ vm_offset_t off;
+ int prot;
+{
+ int unit = dev;
+ vm_offset_t addr;
+
+ /*
+ * Layout of mapped area is:
+ * 000 -- FA_END: DEVICE
+ * FA_END -- FA_END + PAGE_SIZE: SHARED STATE
+ */
+ if (off < sizeof(struct atm_device)) {
+ addr = K1SEG_TO_PHYS((vm_offset_t)(atm_softc[unit].atm_dev)) + off;
+ } else if (off < sizeof(struct atm_device) + PAGE_SIZE) {
+ addr = K0SEG_TO_PHYS(atm_softc[unit].atm_mapped_info);
+ } else return -1;
+ return mips_btop(addr);
+}
+
+atm_setinput(dev, receive_port, priority, filter, filter_count)
+ int dev;
+ ipc_port_t receive_port;
+ int priority;
+ /*filter_t *filter;*/
+ natural_t filter_count;
+{
+}
+
+atm_restart(ifp)
+/* register struct ifnet *ifp; */
+{
+}
+
+atm_portdeath(dev, port)
+ int dev;
+ mach_port_t port;
+{
+}
+
+#endif NATM > 0
+
+
+
+
+
diff --git a/chips/atmreg.h b/chips/atmreg.h
new file mode 100644
index 00000000..a8957837
--- /dev/null
+++ b/chips/atmreg.h
@@ -0,0 +1,89 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*** FORE TCA-100 Turbochannel ATM computer interface ***/
+
+#define RX_COUNT_INTR 0x0001
+#define RX_EOM_INTR 0x0002
+#define RX_TIME_INTR 0x0004
+#define TX_COUNT_INTR 0x0008
+#define RX_CELL_LOST 0x0010
+#define RX_NO_CARRIER 0x0020
+#define CR_RX_ENABLE 0x0040
+#define CR_TX_ENABLE 0x0080
+#define CR_RX_RESET 0x0100
+#define CR_TX_RESET 0x0200
+
+#define ATM_READ_REG(reg) ((reg) & 0x3ff) /* 10 bit register mask */
+
+
+struct atm_device {
+ unsigned int prom[64 * 1024 / 4];
+ volatile unsigned int sreg;
+ volatile unsigned int creg_set;
+ volatile unsigned int creg_clr;
+ volatile unsigned int creg;
+ volatile unsigned int rxtimer;
+ unsigned int pad1;
+ volatile unsigned int rxtimerv;
+ unsigned int pad2;
+ volatile unsigned int rxcount;
+ unsigned int pad3;
+ volatile unsigned int rxthresh;
+ unsigned int pad4;
+ volatile unsigned int txcount;
+ unsigned int pad5;
+ volatile unsigned int txthresh;
+ unsigned int pad6[64*1024/4 - 15];
+ volatile unsigned int rxfifo[14];
+ unsigned int pad7[64*1024/4 - 14];
+ volatile unsigned int txfifo[14];
+ unsigned int pad8[64*1024/4 - 14];
+};
+/* MUST BE PAGE ALIGNED OR YOU WILL GET KILLED BELOW WITH ATM_INFO */
+
+struct sar_data {
+ int header;
+ int payload[12];
+ int trailer;
+};
+
+
+/*
+ * Information for mapped atm device
+ */
+typedef struct mapped_atm_info {
+ volatile unsigned int interrupt_count; /* tot interrupts received */
+ volatile unsigned short saved_status_reg; /* copy of status reg from last interrupt */
+ unsigned int hello_world;
+ unsigned wait_event;
+} *mapped_atm_info_t;
+
+
+
+#define ATM_DEVICE(p) (struct atm_device*)(p)
+#define ATM_INFO(p) (mapped_atm_info_t)( (p) + sizeof(struct atm_device) )
+
diff --git a/chips/audio.c b/chips/audio.c
new file mode 100644
index 00000000..00bf2be9
--- /dev/null
+++ b/chips/audio.c
@@ -0,0 +1,733 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*-
+ * Copyright (c) 1991, 1992 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the Computer Systems
+ * Engineering Group at Lawrence Berkeley Laboratory.
+ * 4. The name of the Laboratory may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <audio.h>
+#if NAUDIO > 0
+
+#include <mach_kdb.h>
+#include <platforms.h>
+
+#include <mach/std_types.h>
+#include <machine/machspl.h>
+#include <kern/kalloc.h>
+#include <kern/sched_prim.h>
+#include <chips/busses.h>
+
+#include <device/device_types.h>
+#include <device/io_req.h>
+#include <device/ds_routines.h>
+#include <device/audio_status.h> /* user interface */
+#include <chips/audio_defs.h> /* chip interface */
+#include <chips/audio_config.h> /* machdep config */
+
+#define private static
+
+/*
+ * Exported functions and data structures
+ * [see header file for listing]
+ */
+int audio_blocksize = DEFBLKSIZE; /* patchable */
+int audio_backlog = 400; /* 50ms in samples */
+
+/*
+ * Software state, per AMD79C30 audio chip.
+ */
+private
+struct audio_softc {
+ void *hw; /* chip status */
+ audio_switch_t *ops; /* chip operations */
+ au_io_t *sc_au; /* recv and xmit buffers, etc */
+
+
+ unsigned int sc_wseek; /* timestamp of last frame written */
+ unsigned int sc_rseek; /* timestamp of last frame read */
+#if 0
+ struct selinfo sc_wsel; /* write selector */
+ struct selinfo sc_rsel; /* read selector */
+#endif
+
+} audio_softc_data[NAUDIO];
+
+#define unit_to_softc(u) &audio_softc_data[u]
+
+
+/* forward declarations */
+private int audio_sleep (au_cb_t *cb, int thresh);
+private void audio_swintr (struct audio_softc *sc);
+
+/*
+ * Audio chip found.
+ */
+void
+audio_attach(
+ void *hw, /* IN, chip status */
+ audio_switch_t *ops,
+ void **audio_status) /* OUT, audio status */
+{
+ register struct audio_softc *sc;
+ static int next = 0;
+
+ if (next >= NAUDIO)
+ panic("Please configure more than %d audio devices\n", NAUDIO);
+ sc = &audio_softc_data[next++];
+
+ printf(" audio");
+
+ sc->hw = hw;
+ sc->ops = ops;
+
+ *audio_status = (void *)sc;
+}
+
+
+private int audio_setinfo (struct audio_softc *, audio_info_t *);
+private int audio_getinfo (struct audio_softc *, audio_info_t *);
+
+io_return_t
+audio_open(
+ int unit,
+ int mode,
+ io_req_t req)
+{
+ register struct audio_softc *sc;
+ register au_io_t *au;
+
+ sc = unit_to_softc(unit);
+ if (unit > NAUDIO || (!sc->hw))
+ return (D_NO_SUCH_DEVICE);
+
+ if (!sc->sc_au) {
+ sc->sc_au = (au_io_t *) kalloc(sizeof(au_io_t));
+ bzero(sc->sc_au, sizeof(au_io_t));
+ }
+ au = sc->sc_au;
+
+ au->au_lowat = audio_blocksize;
+ au->au_hiwat = AUCB_SIZE - au->au_lowat;
+ au->au_blksize = audio_blocksize;
+ au->au_backlog = audio_backlog;
+
+ /* set up read and write blocks and `dead sound' zero value. */
+ AUCB_INIT(&au->au_rb);
+ au->au_rb.cb_thresh = AUCB_SIZE;
+ AUCB_INIT(&au->au_wb);
+ au->au_wb.cb_thresh = -1;
+
+ /* nothing read or written yet */
+ sc->sc_rseek = 0;
+ sc->sc_wseek = 0;
+
+ (*sc->ops->init)(sc->hw);
+
+ return (0);
+}
+
+private int
+audio_drain(
+ register au_io_t *au)
+{
+ register int error;
+
+ while (!AUCB_EMPTY(&au->au_wb))
+ if ((error = audio_sleep(&au->au_wb, 0)) != 0)
+ return (error);
+ return (0);
+}
+
+/*
+ * Close an audio chip.
+ */
+/* ARGSUSED */
+io_return_t
+audio_close(
+ int unit)
+{
+ register struct audio_softc *sc = unit_to_softc(unit);
+ register au_cb_t *cb;
+ register spl_t s;
+
+ /*
+ * Block until output drains, but allow ^C interrupt.
+ */
+ sc->sc_au->au_lowat = 0; /* avoid excessive wakeups */
+
+ /*
+ * If there is pending output, let it drain (unless
+ * the output is paused).
+ */
+ cb = &sc->sc_au->au_wb;
+ s = splaudio();
+ if (!AUCB_EMPTY(cb) && !cb->cb_pause)
+ (void)audio_drain(sc->sc_au);
+ /*
+ * Disable interrupts, and done.
+ */
+ (*sc->ops->close)(sc->hw);
+ splx(s);
+ return (D_SUCCESS);
+}
+
+private int
+audio_sleep(
+ register au_cb_t *cb,
+ register int thresh)
+{
+ register spl_t s = splaudio();
+
+ cb->cb_thresh = thresh;
+ assert_wait((event_t)cb, TRUE);
+ splx(s);
+ thread_block((void (*)()) 0);
+ return (0); /* XXXX */
+}
+
+io_return_t
+audio_read(
+ int unit,
+ io_req_t ior)
+{
+ register struct audio_softc *sc = unit_to_softc(unit);
+ register au_cb_t *cb;
+ register int n, head, taildata;
+ register int blocksize = sc->sc_au->au_blksize;
+ io_return_t rc;
+ unsigned char *data;
+
+ /*
+ * Allocate read buffer
+ */
+ rc = device_read_alloc(ior, (vm_size_t)ior->io_count);
+ if (rc != KERN_SUCCESS)
+ return rc;
+ data = (unsigned char *) ior->io_data;
+ ior->io_residual = ior->io_count;
+
+ cb = &sc->sc_au->au_rb;
+ cb->cb_drops = 0;
+ sc->sc_rseek = sc->sc_au->au_stamp - AUCB_LEN(cb);
+ do {
+ while (AUCB_LEN(cb) < blocksize) {
+
+ if (ior->io_mode & D_NODELAY)
+ return (D_WOULD_BLOCK);
+
+ if ((rc = audio_sleep(cb, blocksize)) != 0)
+ return(rc);
+ }
+ /*
+ * The space calculation can only err on the short
+ * side if an interrupt occurs during processing:
+ * only cb_tail is altered in the interrupt code.
+ */
+ head = cb->cb_head;
+ if ((n = AUCB_LEN(cb)) > ior->io_residual)
+ n = ior->io_residual;
+ taildata = AUCB_SIZE - head;
+
+ if (n > taildata) {
+ bcopy(cb->cb_data + head, data, taildata);
+ bcopy(cb->cb_data, data + taildata, n - taildata);
+ } else
+ bcopy(cb->cb_data + head, data, n);
+ data += n;
+ ior->io_residual -= n;
+
+ head = AUCB_MOD(head + n);
+ cb->cb_head = head;
+ } while (ior->io_residual >= blocksize);
+
+ return (rc);
+}
+
+io_return_t
+audio_write(
+ int unit,
+ io_req_t ior)
+{
+ register struct audio_softc *sc = unit_to_softc(unit);
+ register au_io_t *au = sc->sc_au;
+ register au_cb_t *cb = &au->au_wb;
+ register int n, tail, tailspace, first, watermark;
+ io_return_t rc;
+ unsigned char *data;
+ vm_offset_t addr = 0;
+
+ if (!(ior->io_op & IO_INBAND)) {
+ /*
+ * Copy out-of-line data into kernel address space.
+ * Since data is copied as page list, it will be
+ * accessible.
+ */
+ vm_map_copy_t copy = (vm_map_copy_t) ior->io_data;
+ kern_return_t kr;
+
+ kr = vm_map_copyout(device_io_map, &addr, copy);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ data = (unsigned char *) addr;
+ } else
+ data = (unsigned char *) ior->io_data;
+ ior->io_residual = ior->io_count;
+
+ rc = D_SUCCESS;
+ first = 1;
+ while (ior->io_residual > 0) {
+ watermark = au->au_hiwat;
+ while (AUCB_LEN(cb) > watermark) {
+
+ if (ior->io_mode & D_NODELAY) {
+ rc = D_WOULD_BLOCK;
+ goto out;
+ }
+
+ if ((rc = audio_sleep(cb, watermark)) != 0)
+ goto out;
+
+ watermark = au->au_lowat;
+ }
+ /*
+ * The only value that can change on an interrupt is
+ * cb->cb_head. We only pull that out once to decide
+ * how much to write into cb_data; if we lose a race
+ * and cb_head changes, we will merely be overly
+ * conservative. For a legitimate time stamp,
+ * however, we need to synchronize the accesses to
+ * au_stamp and cb_head at a high ipl below.
+ */
+ tail = cb->cb_tail;
+ if ((n = (AUCB_SIZE - 1) - AUCB_LEN(cb)) > ior->io_residual) {
+ n = ior->io_residual;
+ if (cb->cb_head == tail &&
+ n <= au->au_blksize &&
+ au->au_stamp - sc->sc_wseek > 400) {
+ /*
+ * the write is 'small', the buffer is empty
+ * and we have been silent for at least 50ms
+ * so we might be dealing with an application
+ * that writes frames synchronously with
+ * reading them. If so, we need an output
+ * backlog to cover scheduling delays or
+ * there will be gaps in the sound output.
+ * Also take this opportunity to reset the
+ * buffer pointers in case we ended up on
+ * a bad boundary (odd byte, blksize bytes
+ * from end, etc.).
+ */
+ register unsigned long *ip;
+ register unsigned long muzero;
+ spl_t s;
+ register int i;
+
+ s = splaudio();
+ cb->cb_head = cb->cb_tail = 0;
+ splx(s);
+
+ tail = au->au_backlog;
+ ip = (unsigned long *)cb->cb_data;
+ muzero = sample_rpt_long(0x7fL);
+ for (i = tail / sizeof muzero; --i >= 0; )
+ *ip++ = muzero;
+ }
+ }
+ tailspace = AUCB_SIZE - tail;
+ if (n > tailspace) {
+ /* write first part at tail and rest at head */
+ bcopy(data, cb->cb_data + tail, tailspace);
+ bcopy(data + tailspace, cb->cb_data,
+ n - tailspace);
+ } else
+ bcopy(data, cb->cb_data + tail, n);
+ data += n;
+ ior->io_residual -= n;
+
+ tail = AUCB_MOD(tail + n);
+ if (first) {
+ register spl_t s = splaudio();
+ sc->sc_wseek = AUCB_LEN(cb) + au->au_stamp + 1;
+ /*
+ * To guarantee that a write is contiguous in the
+ * sample space, we clear the drop count the first
+ * time through. If we later get drops, we will
+ * break out of the loop below, before writing
+ * a new frame.
+ */
+ cb->cb_drops = 0;
+ cb->cb_tail = tail;
+ splx(s);
+ first = 0;
+ } else {
+#if 0
+ if (cb->cb_drops != 0)
+ break;
+#endif
+ cb->cb_tail = tail;
+ }
+ }
+out:
+ if (!(ior->io_op & IO_INBAND))
+ (void) vm_deallocate(device_io_map, addr, ior->io_count);
+ return (rc);
+}
+
+#include <sys/ioctl.h>
+
+io_return_t
+audio_get_status(
+ int unit,
+ dev_flavor_t flavor,
+ dev_status_t status,
+ natural_t *status_count)
+{
+ register struct audio_softc *sc = unit_to_softc(unit);
+ register au_io_t *au = sc->sc_au;
+ io_return_t rc = D_SUCCESS;
+ spl_t s;
+
+ switch (flavor) {
+
+ case AUDIO_GETMAP:
+ case AUDIOGETREG:
+ rc = (*sc->ops->getstate)(sc->hw, flavor,
+ (void *)status, status_count);
+ break;
+
+ /*
+ * Number of read samples dropped. We don't know where or
+ * when they were dropped.
+ */
+ case AUDIO_RERROR:
+ *(int *)status = au->au_rb.cb_drops;
+ *status_count = 1;
+ break;
+
+ case AUDIO_WERROR:
+ *(int *)status = au->au_wb.cb_drops;
+ *status_count = 1;
+ break;
+
+ /*
+ * How many samples will elapse until mike hears the first
+ * sample of what we last wrote?
+ */
+ case AUDIO_WSEEK:
+ s = splaudio();
+ *(unsigned int *)status = sc->sc_wseek - au->au_stamp
+ + AUCB_LEN(&au->au_rb);
+ splx(s);
+ *status_count = 1;
+ break;
+
+ case AUDIO_GETINFO:
+ rc = audio_getinfo(sc, (audio_info_t *)status);
+ *status_count = sizeof(audio_info_t) / sizeof(int);
+ break;
+
+ default:
+ rc = D_INVALID_OPERATION;
+ break;
+ }
+ return (rc);
+}
+
+io_return_t
+audio_set_status(
+ int unit,
+ dev_flavor_t flavor,
+ dev_status_t status,
+ natural_t status_count)
+{
+ register struct audio_softc *sc = unit_to_softc(unit);
+ register au_io_t *au = sc->sc_au;
+ io_return_t rc = D_SUCCESS;
+ spl_t s;
+
+ switch (flavor) {
+
+ case AUDIO_SETMAP:
+ case AUDIOSETREG:
+ rc = (*sc->ops->setstate)(sc->hw, flavor,
+ (void *)status, status_count);
+ break;
+
+ case AUDIO_FLUSH:
+ s = splaudio();
+ AUCB_INIT(&au->au_rb);
+ AUCB_INIT(&au->au_wb);
+ au->au_stamp = 0;
+ splx(s);
+ sc->sc_wseek = 0;
+ sc->sc_rseek = 0;
+ break;
+
+ case AUDIO_SETINFO:
+ rc = audio_setinfo(sc, (audio_info_t *)status);
+ break;
+
+ case AUDIO_DRAIN:
+ rc = audio_drain(au);
+ break;
+
+ default:
+ rc = D_INVALID_OPERATION;
+ break;
+ }
+ return (rc);
+}
+
+
+/*
+ * Interrupt routine
+ */
+boolean_t
+audio_hwintr(
+ void *status,
+ unsigned int s_in,
+ unsigned int *s_out)
+{
+ register au_io_t *au = ((struct audio_softc *) status)->sc_au;
+ register au_cb_t *cb;
+ register int h, t, k;
+ register boolean_t wakeit = FALSE;
+
+ ++au->au_stamp;
+
+ /* receive incoming data */
+ cb = &au->au_rb;
+ h = cb->cb_head;
+ t = cb->cb_tail;
+ k = AUCB_MOD(t + 1);
+ if (h == k)
+ cb->cb_drops++;
+ else if (cb->cb_pause != 0)
+ cb->cb_pdrops++;
+ else {
+ cb->cb_data[t] = s_in;
+ cb->cb_tail = t = k;
+ }
+ if (AUCB_MOD(t - h) >= cb->cb_thresh) {
+ cb->cb_thresh = AUCB_SIZE;
+ cb->cb_waking = 1;
+ wakeit = TRUE;
+ }
+ /* send outgoing data */
+ cb = &au->au_wb;
+ h = cb->cb_head;
+ t = cb->cb_tail;
+ k = 0;
+ if (h == t)
+ cb->cb_drops++;
+ else if (cb->cb_pause != 0)
+ cb->cb_pdrops++;
+ else {
+ cb->cb_head = h = AUCB_MOD(h + 1);
+ *s_out = cb->cb_data[h];
+ k = 1;
+ }
+ if (AUCB_MOD(t - h) <= cb->cb_thresh) {
+ cb->cb_thresh = -1;
+ cb->cb_waking = 1;
+ wakeit = TRUE;
+ }
+ if (wakeit)
+ audio_swintr((struct audio_softc *) status);
+ return (k == 1);
+}
+
+private void
+audio_swintr(
+ register struct audio_softc *sc)
+{
+ register au_io_t *au = sc->sc_au;
+
+ if (au->au_rb.cb_waking != 0) {
+ au->au_rb.cb_waking = 0;
+ wakeup(&au->au_rb);
+ }
+ if (au->au_wb.cb_waking != 0) {
+ au->au_wb.cb_waking = 0;
+ wakeup(&au->au_wb);
+ }
+}
+
+private int
+audio_setinfo(
+ struct audio_softc *sc,
+ audio_info_t *ai)
+{
+ struct audio_prinfo *r = &ai->record, *p = &ai->play;
+ register int bsize;
+ register au_io_t *au = sc->sc_au;
+ spl_t s;
+
+ (*sc->ops->setgains)(sc->hw, p->gain, r->gain, ai->monitor_gain );
+
+ if (p->pause != (unsigned char)~0)
+ au->au_wb.cb_pause = p->pause;
+ if (r->pause != (unsigned char)~0)
+ au->au_rb.cb_pause = r->pause;
+
+ if (p->port != ~0)
+ (*sc->ops->setport)(sc->hw, p->port);
+
+ if (ai->blocksize != ~0) {
+ if (ai->blocksize == 0)
+ bsize = ai->blocksize = DEFBLKSIZE;
+ else if (ai->blocksize > MAXBLKSIZE)
+ bsize = ai->blocksize = MAXBLKSIZE;
+ else
+ bsize = ai->blocksize;
+
+ s = splaudio();
+ au->au_blksize = bsize;
+ /* AUDIO_FLUSH */
+ AUCB_INIT(&au->au_rb);
+ AUCB_INIT(&au->au_wb);
+ splx(s);
+
+ }
+ if (ai->hiwat != ~0 && (unsigned)ai->hiwat < AUCB_SIZE)
+ au->au_hiwat = ai->hiwat;
+ if (ai->lowat != ~0 && ai->lowat < AUCB_SIZE)
+ au->au_lowat = ai->lowat;
+ if (ai->backlog != ~0 && ai->backlog < (AUCB_SIZE/2))
+ au->au_backlog = ai->backlog;
+
+ return (0);
+}
+
+private int
+audio_getinfo(
+ struct audio_softc *sc,
+ audio_info_t *ai)
+{
+ struct audio_prinfo *r = &ai->record, *p = &ai->play;
+ register au_io_t *au = sc->sc_au;
+
+ p->sample_rate = r->sample_rate = 8000;
+ p->channels = r->channels = 1;
+ p->precision = r->precision = 8;
+ p->encoding = r->encoding = AUDIO_ENCODING_ULAW;
+
+ (*sc->ops->getgains)(sc->hw, &p->gain, &r->gain, &ai->monitor_gain );
+
+ r->port = AUDIO_MIKE;
+ p->port = (*sc->ops->getport)(sc->hw);
+
+ p->pause = au->au_wb.cb_pause;
+ r->pause = au->au_rb.cb_pause;
+ p->error = au->au_wb.cb_drops != 0;
+ r->error = au->au_rb.cb_drops != 0;
+
+ /* Now this is funny. If you got here it means you must have
+ opened the device, so how could it possibly be closed ?
+ Unless we upgrade the berkeley code to check if the chip
+ is currently playing and/or recording... Later. */
+ p->open = TRUE;
+ r->open = TRUE;
+
+ p->samples = au->au_stamp - au->au_wb.cb_pdrops;
+ r->samples = au->au_stamp - au->au_rb.cb_pdrops;
+
+ p->seek = sc->sc_wseek;
+ r->seek = sc->sc_rseek;
+
+ ai->blocksize = au->au_blksize;
+ ai->hiwat = au->au_hiwat;
+ ai->lowat = au->au_lowat;
+ ai->backlog = au->au_backlog;
+
+ return (0);
+}
+
+#if MACH_KDB
+#include <ddb/db_output.h>
+
+void audio_queue_status( au_cb_t *cb, char *logo)
+{
+ db_printf("%s ring status:\n", logo);
+ db_printf(" h %x t %x sh %x w %d p %d d %x pd %x\n",
+ cb->cb_head, cb->cb_tail, cb->cb_thresh,
+ cb->cb_waking, cb->cb_pause, (long)cb->cb_drops,
+ (long)cb->cb_pdrops);
+}
+
+int audio_status(int unit)
+{
+ struct audio_softc *sc = unit_to_softc(unit);
+ au_io_t *au;
+
+ if (!sc) {
+ db_printf("No such thing\n");
+ return 0;
+ }
+ db_printf("@%lx: wseek %d rseek %d, au @%lx\n",
+ sc, sc->sc_wseek, sc->sc_rseek, sc->sc_au);
+ if (!(au = sc->sc_au)) return 0;
+
+ db_printf("au: stamp %x lo %x hi %x blk %x blg %x\n",
+ au->au_stamp, au->au_lowat, au->au_hiwat,
+ au->au_blksize, au->au_backlog);
+ audio_queue_status(&au->au_rb, "read");
+ audio_queue_status(&au->au_wb, "write");
+
+ return 0;
+}
+#endif /* MACH_KDB */
+
+#endif /* NAUDIO > 0 */
+
diff --git a/chips/audio_config.h b/chips/audio_config.h
new file mode 100644
index 00000000..bd2c0a07
--- /dev/null
+++ b/chips/audio_config.h
@@ -0,0 +1,130 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Here platform specific code to define sample_t & co
+ * [to cope with weird DMA engines], and other customs
+ */
+#ifdef FLAMINGO
+#define splaudio splbio
+#define sample_t unsigned char /* later */
+#define samples_to_chars bcopy
+#define chars_to_samples bcopy
+/* Sparse space ! */
+typedef struct {
+ volatile unsigned long cr; /* command register (wo) */
+/*#define ir cr /* interrupt register (ro) */
+ volatile unsigned long dr; /* data register (rw) */
+ volatile unsigned long dsr1; /* D-channel status register 1 (ro) */
+ volatile unsigned long der; /* D-channel error register (ro) */
+ volatile unsigned long dctb; /* D-channel transmit register (wo) */
+/*#define dcrb dctb /* D-channel receive register (ro) */
+ volatile unsigned long bbtb; /* Bb-channel transmit register (wo) */
+/*#define bbrb bbtb /* Bb-channel receive register (ro) */
+ volatile unsigned long bctb; /* Bc-channel transmit register (wo)*/
+/*#define bcrb bctb /* Bc-channel receive register (ro) */
+ volatile unsigned long dsr2; /* D-channel status register 2 (ro) */
+} amd79c30_padded_regs_t;
+
+/* give the chip 400ns in between accesses */
+#define read_reg(r,v) \
+ { (v) = ((r) >> 8) & 0xff; delay(1); }
+
+#define write_reg(r,v) \
+ { (r) = (((v) & 0xff) << 8) | \
+ 0x200000000L; /*bytemask*/ \
+ delay(1); wbflush(); \
+ }
+
+/* Write 16 bits of data from variable v to the data port of the audio chip */
+#define WAMD16(regs, v) \
+ { write_reg((regs)->dr,v); \
+ write_reg((regs)->dr,v>>8); }
+
+#define mb() wbflush()
+
+#endif /* FLAMINGO */
+
+
+#ifdef MAXINE
+#define splaudio splhigh
+typedef struct {
+ volatile unsigned char cr; /* command register (wo) */
+/*#define ir cr /* interrupt register (ro) */
+ char pad0[63];
+ volatile unsigned char dr; /* data register (rw) */
+ char pad1[63];
+ volatile unsigned char dsr1; /* D-channel status register 1 (ro) */
+ char pad2[63];
+ volatile unsigned char der; /* D-channel error register (ro) */
+ char pad3[63];
+ volatile unsigned char dctb; /* D-channel transmit register (wo) */
+/*#define dcrb dctb /* D-channel receive register (ro) */
+ char pad4[63];
+ volatile unsigned char bbtb; /* Bb-channel transmit register (wo) */
+/*#define bbrb bbtb /* Bb-channel receive register (ro) */
+ char pad5[63];
+ volatile unsigned char bctb; /* Bc-channel transmit register (wo)*/
+/*#define bcrb bctb /* Bc-channel receive register (ro) */
+ char pad6[63];
+ volatile unsigned char dsr2; /* D-channel status register 2 (ro) */
+ char pad7[63];
+} amd79c30_padded_regs_t;
+
+/* give the chip 400ns in between accesses */
+#define read_reg(r,v) \
+ { (v) = (r); delay(1); }
+
+#define write_reg(r,v) \
+ { (r) = (v); delay(1); wbflush(); }
+
+/* Write 16 bits of data from variable v to the data port of the audio chip */
+#define WAMD16(regs, v) \
+ { write_reg((regs)->dr,v); \
+ write_reg((regs)->dr,v>>8); }
+
+#define mb()
+
+#endif /* MAXINE */
+
+
+#ifndef sample_t
+#define sample_t unsigned char
+#define samples_to_chars bcopy
+#define chars_to_samples bcopy
+#endif
+
+/*
+ * More architecture-specific customizations
+ */
+#ifdef alpha
+#define sample_rpt_int(x) (((x)<<24)|((x)<<16)|((x)<<8)|((x)<<0))
+#define sample_rpt_long(x) ((sample_rpt_int(x)<<32)|sample_rpt_int(x))
+#endif
+
+#ifndef sample_rpt_long
+#define sample_rpt_long(x) (((x)<<24)|((x)<<16)|((x)<<8)|((x)<<0))
+#endif
+
diff --git a/chips/audio_defs.h b/chips/audio_defs.h
new file mode 100644
index 00000000..448153e4
--- /dev/null
+++ b/chips/audio_defs.h
@@ -0,0 +1,129 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*-
+ * Copyright (c) 1991, 1992 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the Computer Systems
+ * Engineering Group at Lawrence Berkeley Laboratory.
+ * 4. The name of the Laboratory may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define AUCB_SIZE 4096
+#define AUCB_MOD(k) ((k) & (AUCB_SIZE - 1))
+
+#define AUCB_INIT(cb) ((cb)->cb_head = (cb)->cb_tail = (cb)->cb_drops = \
+ (cb)->cb_pdrops = 0)
+
+#define AUCB_EMPTY(cb) ((cb)->cb_head == (cb)->cb_tail)
+#define AUCB_FULL(cb) (AUCB_MOD((cb)->cb_tail + 1) == (cb)->cb_head)
+#define AUCB_LEN(cb) (AUCB_MOD((cb)->cb_tail - (cb)->cb_head))
+
+#define MAXBLKSIZE (AUCB_SIZE / 2)
+#define DEFBLKSIZE 128
+
+#ifndef LOCORE
+
+/*
+ * Our own circular buffers, used if not doing DMA.
+ * [af: with some work we could use the circbuf.c code instead]
+ */
+typedef struct au_cb {
+ int cb_head; /* queue head */
+ int cb_tail; /* queue tail */
+ int cb_thresh; /* threshold for wakeup */
+ unsigned int cb_waking; /* needs wakeup at softint level */
+ unsigned int cb_pause; /* io paused */
+ unsigned int cb_drops; /* missed samples from over/underrun */
+ unsigned int cb_pdrops; /* sun compat -- paused samples */
+ unsigned char cb_data[AUCB_SIZE]; /* data buffer */
+} au_cb_t;
+
+/*
+ * Handle on a bi-directional stream of samples
+ */
+typedef struct au_io {
+ unsigned int au_stamp; /* time stamp */
+ int au_lowat; /* xmit low water mark (for wakeup) */
+ int au_hiwat; /* xmit high water mark (for wakeup) */
+ int au_blksize; /* recv block (chunk) size */
+ int au_backlog; /* # samples of xmit backlog to gen. */
+ struct au_cb au_rb; /* read (recv) buffer */
+ struct au_cb au_wb; /* write (xmit) buffer */
+} au_io_t;
+
+/*
+ * Interface to specific chips
+ */
+typedef struct {
+ void (*init)();
+ void (*close)();
+ void (*setport)();
+ int (*getport)();
+ void (*setgains)();
+ void (*getgains)();
+ io_return_t (*setstate)();
+ io_return_t (*getstate)();
+} audio_switch_t;
+
+/*
+ * Callbacks into audio module, and interface to kernel
+ */
+void audio_attach( void *, audio_switch_t *, void **);
+boolean_t audio_hwintr( void *, unsigned int, unsigned int *);
+
+extern io_return_t audio_open( int, int, io_req_t );
+extern io_return_t audio_close( int );
+extern io_return_t audio_read( int, io_req_t );
+extern io_return_t audio_write( int, io_req_t );
+extern io_return_t audio_get_status( int, dev_flavor_t, dev_status_t, natural_t *);
+extern io_return_t audio_set_status( int, dev_flavor_t, dev_status_t, natural_t);
+
+#endif
diff --git a/chips/bt431.c b/chips/bt431.c
new file mode 100644
index 00000000..0c6368c9
--- /dev/null
+++ b/chips/bt431.c
@@ -0,0 +1,239 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: bt431.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 8/91
+ *
+ * Routines for the bt431 Cursor
+ */
+
+#include <platforms.h>
+
+#include <chips/bt431.h>
+#include <chips/screen.h>
+
+#ifdef DECSTATION
+/*
+ * This configuration uses two twin 431s
+ */
+#define set_value(x) (((x)<<8)|((x)&0xff))
+#define get_value(x) ((x)&0xff)
+
+typedef struct {
+ volatile unsigned short addr_lo;
+ short pad0;
+ volatile unsigned short addr_hi;
+ short pad1;
+ volatile unsigned short addr_cmap;
+ short pad2;
+ volatile unsigned short addr_reg;
+ short pad3;
+} bt431_padded_regmap_t;
+
+#else /*DECSTATION*/
+
+#define set_value(x) x
+#define get_value(x) x
+typedef bt431_regmap_t bt431_padded_regmap_t;
+#define wbflush()
+
+#endif /*DECSTATION*/
+
+/*
+ * Generic register access
+ */
+void
+bt431_select_reg( regs, regno)
+ bt431_padded_regmap_t *regs;
+{
+ regs->addr_lo = set_value(regno&0xff);
+ regs->addr_hi = set_value((regno >> 8) & 0xff);
+ wbflush();
+}
+
+void
+bt431_write_reg( regs, regno, val)
+ bt431_padded_regmap_t *regs;
+{
+ bt431_select_reg( regs, regno );
+ regs->addr_reg = set_value(val);
+ wbflush();
+}
+
+unsigned char
+bt431_read_reg( regs, regno)
+ bt431_padded_regmap_t *regs;
+{
+ bt431_select_reg( regs, regno );
+ return get_value(regs->addr_reg);
+}
+
+/* when using autoincrement */
+#define bt431_write_reg_autoi( regs, regno, val) \
+ { \
+ (regs)->addr_reg = set_value(val); \
+ wbflush(); \
+ }
+#define bt431_read_reg_autoi( regs, regno) \
+ get_value(((regs)->addr_reg))
+
+#define bt431_write_cmap_autoi( regs, regno, val) \
+ { \
+ (regs)->addr_cmap = (val); \
+ wbflush(); \
+ }
+#define bt431_read_cmap_autoi( regs, regno) \
+ ((regs)->addr_cmap)
+
+
+/*
+ * Cursor ops
+ */
+bt431_cursor_on(regs)
+ bt431_padded_regmap_t *regs;
+{
+ bt431_write_reg( regs, BT431_REG_CMD,
+ BT431_CMD_CURS_ENABLE|BT431_CMD_OR_CURSORS|
+ BT431_CMD_4_1_MUX|BT431_CMD_THICK_1);
+}
+
+bt431_cursor_off(regs)
+ bt431_padded_regmap_t *regs;
+{
+ bt431_write_reg( regs, BT431_REG_CMD, BT431_CMD_4_1_MUX);
+}
+
+bt431_pos_cursor(regs,x,y)
+ bt431_padded_regmap_t *regs;
+ register int x,y;
+{
+#define lo(v) ((v)&0xff)
+#define hi(v) (((v)&0xf00)>>8)
+
+ /*
+ * Cx = x + D + H - P
+ * P = 37 if 1:1, 52 if 4:1, 57 if 5:1
+ * D = pixel skew between outdata and external data
+ * H = pixels between HSYNCH falling and active video
+ *
+ * Cy = y + V - 32
+ * V = scanlines between HSYNCH falling, two or more
+ * clocks after VSYNCH falling, and active video
+ */
+
+ bt431_write_reg( regs, BT431_REG_CXLO, lo(x + 360));
+ /* use autoincr feature */
+ bt431_write_reg_autoi( regs, BT431_REG_CXHI, hi(x + 360));
+ bt431_write_reg_autoi( regs, BT431_REG_CYLO, lo(y + 36));
+ bt431_write_reg_autoi( regs, BT431_REG_CYHI, hi(y + 36));
+}
+
+
+bt431_cursor_sprite( regs, cursor)
+ bt431_padded_regmap_t *regs;
+ register unsigned short *cursor;
+{
+ register int i;
+
+ bt431_select_reg( regs, BT431_REG_CRAM_BASE+0);
+ for (i = 0; i < 512; i++)
+ bt431_write_cmap_autoi( regs, BT431_REG_CRAM_BASE+i, *cursor++);
+}
+
+#if 1
+bt431_print_cursor(regs)
+ bt431_padded_regmap_t *regs;
+{
+ unsigned short curs[512];
+ register int i;
+
+ bt431_select_reg( regs, BT431_REG_CRAM_BASE+0);
+ for (i = 0; i < 512; i++) {
+ curs[i] = bt431_read_cmap_autoi( regs, BT431_REG_CRAM_BASE+i);
+ }
+ for (i = 0; i < 512; i += 16)
+ printf("%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",
+ curs[i], curs[i+1], curs[i+2], curs[i+3],
+ curs[i+4], curs[i+5], curs[i+6], curs[i+7],
+ curs[i+8], curs[i+9], curs[i+10], curs[i+11],
+ curs[i+12], curs[i+13], curs[i+14], curs[i+15]);
+}
+
+#endif
+
+/*
+ * Initialization
+ */
+unsigned /*char*/short bt431_default_cursor[64*8] = {
+ 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0xffff, 0, 0, 0, 0, 0, 0, 0,
+ 0,
+};
+
+bt431_init(regs)
+ bt431_padded_regmap_t *regs;
+{
+ register int i;
+
+ /* use 4:1 input mux */
+ bt431_write_reg( regs, BT431_REG_CMD,
+ BT431_CMD_CURS_ENABLE|BT431_CMD_OR_CURSORS|
+ BT431_CMD_4_1_MUX|BT431_CMD_THICK_1);
+
+ /* home cursor */
+ bt431_write_reg_autoi( regs, BT431_REG_CXLO, 0x00);
+ bt431_write_reg_autoi( regs, BT431_REG_CXHI, 0x00);
+ bt431_write_reg_autoi( regs, BT431_REG_CYLO, 0x00);
+ bt431_write_reg_autoi( regs, BT431_REG_CYHI, 0x00);
+
+ /* no crosshair window */
+ bt431_write_reg_autoi( regs, BT431_REG_WXLO, 0x00);
+ bt431_write_reg_autoi( regs, BT431_REG_WXHI, 0x00);
+ bt431_write_reg_autoi( regs, BT431_REG_WYLO, 0x00);
+ bt431_write_reg_autoi( regs, BT431_REG_WYHI, 0x00);
+ bt431_write_reg_autoi( regs, BT431_REG_WWLO, 0x00);
+ bt431_write_reg_autoi( regs, BT431_REG_WWHI, 0x00);
+ bt431_write_reg_autoi( regs, BT431_REG_WHLO, 0x00);
+ bt431_write_reg_autoi( regs, BT431_REG_WHHI, 0x00);
+
+ /* load default cursor */
+ bt431_cursor_sprite( regs, bt431_default_cursor);
+}
diff --git a/chips/bt431.h b/chips/bt431.h
new file mode 100644
index 00000000..d98b08db
--- /dev/null
+++ b/chips/bt431.h
@@ -0,0 +1,78 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: bt431.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 8/91
+ *
+ * Defines for the bt431 Cursor chip
+ */
+
+
+typedef struct {
+ volatile unsigned char addr_lo;
+ volatile unsigned char addr_hi;
+ volatile unsigned char addr_cmap;
+ volatile unsigned char addr_reg;
+} bt431_regmap_t;
+
+/*
+ * Additional registers addressed indirectly
+ */
+
+#define BT431_REG_CMD 0x0000
+#define BT431_REG_CXLO 0x0001
+#define BT431_REG_CXHI 0x0002
+#define BT431_REG_CYLO 0x0003
+#define BT431_REG_CYHI 0x0004
+#define BT431_REG_WXLO 0x0005
+#define BT431_REG_WXHI 0x0006
+#define BT431_REG_WYLO 0x0007
+#define BT431_REG_WYHI 0x0008
+#define BT431_REG_WWLO 0x0009
+#define BT431_REG_WWHI 0x000a
+#define BT431_REG_WHLO 0x000b
+#define BT431_REG_WHHI 0x000c
+
+#define BT431_REG_CRAM_BASE 0x0000
+#define BT431_REG_CRAM_END 0x01ff
+
+/*
+ * Command register
+ */
+
+#define BT431_CMD_CURS_ENABLE 0x40
+#define BT431_CMD_XHAIR_ENABLE 0x20
+#define BT431_CMD_OR_CURSORS 0x10
+#define BT431_CMD_AND_CURSORS 0x00
+#define BT431_CMD_1_1_MUX 0x00
+#define BT431_CMD_4_1_MUX 0x04
+#define BT431_CMD_5_1_MUX 0x08
+#define BT431_CMD_xxx_MUX 0x0c
+#define BT431_CMD_THICK_1 0x00
+#define BT431_CMD_THICK_3 0x01
+#define BT431_CMD_THICK_5 0x02
+#define BT431_CMD_THICK_7 0x03
diff --git a/chips/bt455.c b/chips/bt455.c
new file mode 100644
index 00000000..12acecb7
--- /dev/null
+++ b/chips/bt455.c
@@ -0,0 +1,222 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: bt455.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 7/91
+ *
+ * Routines for the bt454/bt455 RAMDAC
+ */
+
+#include <platforms.h>
+
+#include <chips/bt455.h>
+#include <chips/screen.h>
+
+#ifdef DECSTATION
+
+typedef struct {
+ volatile unsigned char addr_cmap;
+ char pad0[3];
+ volatile unsigned char addr_cmap_data;
+ char pad1[3];
+ volatile unsigned char addr_clr;
+ char pad2[3];
+ volatile unsigned char addr_ovly;
+ char pad3[3];
+} bt455_padded_regmap_t;
+
+#else /*DECSTATION*/
+
+typedef bt455_regmap_t bt455_padded_regmap_t;
+#define wbflush()
+
+#endif /*DECSTATION*/
+
+
+/*
+ * Generic register access
+ */
+#define bt455_select_entry(regs, regno) \
+ { \
+ (regs)->addr_cmap = (regno)&0x0f; \
+ wbflush(); \
+ }
+
+
+/*
+ * Color map
+ */
+bt455_load_colormap( regs, map)
+ bt455_padded_regmap_t *regs;
+ color_map_t *map;
+{
+ register int i;
+
+ bt455_select_entry(regs, 0);
+
+ for (i = 0; i < 16; i++, map++) {
+ regs->addr_cmap_data = map->red >> 4;
+ wbflush();
+ regs->addr_cmap_data = map->green >> 4;
+ wbflush();
+ regs->addr_cmap_data = map->blue >> 4;
+ wbflush();
+ }
+}
+
+bt455_load_colormap_entry( regs, entry, map)
+ register bt455_padded_regmap_t *regs;
+ register color_map_t *map;
+{
+ bt455_select_entry(regs, entry);
+
+ regs->addr_cmap_data = map->red >> 4;
+ wbflush();
+ regs->addr_cmap_data = map->green >> 4;
+ wbflush();
+ regs->addr_cmap_data = map->blue >> 4;
+ wbflush();
+}
+
+bt455_init_colormap( regs)
+ bt455_padded_regmap_t *regs;
+{
+ register int i;
+ color_map_t m[2];
+
+ m[0].red = m[0].green = m[0].blue = 0;
+ m[1].red = m[1].green = m[1].blue = 0xff;
+
+ for (i = 0; i < 16; i++)
+ bt455_load_colormap_entry(regs, i, &m[0]);
+
+ bt455_load_colormap_entry(regs, 1, &m[1]);
+
+ bt455_cursor_color( regs, &m[0]);
+}
+
+#if 1/*debug*/
+bt455_print_colormap( regs)
+ bt455_padded_regmap_t *regs;
+{
+ register int i;
+
+ for (i = 0; i < 16; i++) {
+ register unsigned char red, green, blue;
+
+ bt455_select_entry(regs, i);
+ red = regs->addr_cmap_data;
+ green = regs->addr_cmap_data;
+ blue = regs->addr_cmap_data;
+ printf("%x->[x%x x%x x%x]\n", i,
+ red, green, blue);
+
+ }
+}
+#endif
+
+/*
+ * Video on/off
+ */
+bt455_video_off(regs, up)
+ bt455_padded_regmap_t *regs;
+ user_info_t *up;
+{
+ color_map_t m;
+ unsigned char *save;
+
+ /* Yes, this is awful */
+ save = (unsigned char *)up->dev_dep_2.gx.colormap;
+
+ bt455_select_entry( regs, 0);
+
+ *save++ = regs->addr_cmap_data; /* entry 0 */
+ *save++ = regs->addr_cmap_data;
+ *save++ = regs->addr_cmap_data;
+
+ *save++ = regs->addr_cmap_data; /* entry 1 */
+ *save++ = regs->addr_cmap_data;
+ *save++ = regs->addr_cmap_data;
+
+ m.red = m.green = m.blue = 0;
+ bt455_load_colormap_entry(regs, 0, &m);
+ bt455_load_colormap_entry(regs, 1, &m);
+}
+
+bt455_video_on(regs, up)
+ bt455_padded_regmap_t *regs;
+ user_info_t *up;
+{
+ unsigned char *save;
+
+ /* Like I said.. */
+ save = (unsigned char *)up->dev_dep_2.gx.colormap;
+
+ bt455_select_entry( regs, 0);
+
+ regs->addr_cmap_data = *save++; wbflush();
+ regs->addr_cmap_data = *save++; wbflush();
+ regs->addr_cmap_data = *save++; wbflush();
+
+ regs->addr_cmap_data = *save++; wbflush();
+ regs->addr_cmap_data = *save++; wbflush();
+ regs->addr_cmap_data = *save;
+
+}
+
+/*
+ * Cursor 'color' [as used on DEC's board]
+ */
+bt455_cursor_color( regs, color)
+ bt455_padded_regmap_t *regs;
+ color_map_t *color;
+{
+ register int i;
+
+ /* Bg is the first in color */
+ bt455_load_colormap_entry( regs, 8, color);
+ bt455_load_colormap_entry( regs, 9, color);
+
+ /* Fg is overlay */
+ color++;
+ regs->addr_ovly = color->red >> 4;
+ wbflush();
+ regs->addr_ovly = color->green >> 4;
+ wbflush();
+ regs->addr_ovly = color->blue >> 4;
+ wbflush();
+}
+
+/*
+ * Initialization
+ */
+bt455_init(regs)
+ bt455_padded_regmap_t *regs;
+{
+ /* Nothing really needed */
+}
+
diff --git a/chips/bt455.h b/chips/bt455.h
new file mode 100644
index 00000000..10145efc
--- /dev/null
+++ b/chips/bt455.h
@@ -0,0 +1,43 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: bt455.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 7/91
+ *
+ * Defines for the bt455 RAMDAC
+ */
+
+typedef struct {
+ volatile unsigned char addr_cmap;
+ volatile unsigned char addr_cmap_data;
+ volatile unsigned char addr_clr;
+ volatile unsigned char addr_ovly;
+} bt455_regmap_t;
+
+/*
+ * Color Map entries 00-0f are accessed indirectly
+ */
diff --git a/chips/bt459.c b/chips/bt459.c
new file mode 100644
index 00000000..0bee2f10
--- /dev/null
+++ b/chips/bt459.c
@@ -0,0 +1,384 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: bt459.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Routines for the bt459 RAMDAC
+ */
+
+#include <platforms.h>
+
+#include <chips/bt459.h>
+#include <chips/screen.h>
+
+#ifdef DECSTATION
+
+typedef struct {
+ volatile unsigned char addr_lo;
+ char pad0[3];
+ volatile unsigned char addr_hi;
+ char pad1[3];
+ volatile unsigned char addr_reg;
+ char pad2[3];
+ volatile unsigned char addr_cmap;
+ char pad3[3];
+} bt459_ds_padded_regmap_t;
+#define bt459_padded_regmap_t bt459_ds_padded_regmap_t
+
+#define mb() /* no write/read reordering problems */
+
+#endif /* DECSTATION */
+
+#ifdef FLAMINGO
+
+/* Sparse space ! */
+typedef struct {
+ volatile unsigned int addr_lo;
+ int pad0;
+ volatile unsigned int addr_hi;
+ int pad1;
+ volatile unsigned int addr_reg;
+ int pad2;
+ volatile unsigned int addr_cmap;
+ int pad3;
+} bt459_fl_padded_regmap_t;
+#define bt459_padded_regmap_t bt459_fl_padded_regmap_t
+
+#define mb() wbflush()
+
+#endif /* FLAMINGO */
+
+
+#ifndef bt459_padded_regmap_t
+typedef bt459_regmap_t bt459_padded_regmap_t;
+#define wbflush()
+#endif
+
+/*
+ * Generic register access
+ */
+#define bt459_select_reg_macro(r,n) \
+ (r)->addr_lo = (n); mb(); \
+ (r)->addr_hi = (n) >> 8; \
+ wbflush();
+
+void
+bt459_select_reg(
+ bt459_padded_regmap_t *regs,
+ int regno)
+{
+ bt459_select_reg_macro( regs, regno);
+}
+
+void
+bt459_write_reg(
+ bt459_padded_regmap_t *regs,
+ int regno,
+ unsigned char val)
+{
+ bt459_select_reg_macro( regs, regno );
+ regs->addr_reg = val;
+ wbflush();
+}
+
+unsigned char
+bt459_read_reg(
+ bt459_padded_regmap_t *regs,
+ int regno)
+{
+ bt459_select_reg_macro( regs, regno );
+ return regs->addr_reg;
+}
+
+
+/*
+ * Color map
+ */
+bt459_load_colormap_entry(
+ bt459_padded_regmap_t *regs,
+ int entry,
+ color_map_t *map)
+{
+ bt459_select_reg(regs, entry & 0xff);
+
+ regs->addr_cmap = map->red;
+ wbflush();
+ regs->addr_cmap = map->green;
+ wbflush();
+ regs->addr_cmap = map->blue;
+ wbflush();
+}
+
+bt459_init_colormap(
+ bt459_padded_regmap_t *regs)
+{
+ register int i;
+
+ bt459_select_reg(regs, 0);
+ regs->addr_cmap = 0;
+ wbflush();
+ regs->addr_cmap = 0;
+ wbflush();
+ regs->addr_cmap = 0;
+ wbflush();
+
+ regs->addr_cmap = 0xff;
+ wbflush();
+ regs->addr_cmap = 0xff;
+ wbflush();
+ regs->addr_cmap = 0xff;
+ wbflush();
+
+ bt459_select_reg(regs, 255);
+ regs->addr_cmap = 0xff;
+ wbflush();
+ regs->addr_cmap = 0xff;
+ wbflush();
+ regs->addr_cmap = 0xff;
+ wbflush();
+
+}
+
+#if 1/*debug*/
+bt459_print_colormap(
+ bt459_padded_regmap_t *regs)
+{
+ register int i;
+
+ for (i = 0; i < 256; i++) {
+ register unsigned char red, green, blue;
+
+ bt459_select_reg(regs, i);
+ red = regs->addr_cmap; wbflush();
+ green = regs->addr_cmap; wbflush();
+ blue = regs->addr_cmap; wbflush();
+ printf("%x->[x%x x%x x%x]\n", i,
+ red, green, blue);
+
+ }
+}
+#endif
+
+/*
+ * Video on/off
+ *
+ * It is unfortunate that X11 goes backward with white@0
+ * and black@1. So we must stash away the zero-th entry
+ * and fix it while screen is off. Also must remember
+ * it, sigh.
+ */
+struct vstate {
+ bt459_padded_regmap_t *regs;
+ unsigned short off;
+};
+
+bt459_video_off(
+ struct vstate *vstate,
+ user_info_t *up)
+{
+ register bt459_padded_regmap_t *regs = vstate->regs;
+ unsigned char *save;
+
+ if (vstate->off)
+ return;
+
+ /* Yes, this is awful */
+ save = (unsigned char *)up->dev_dep_2.gx.colormap;
+
+ bt459_select_reg(regs, 0);
+ *save++ = regs->addr_cmap;
+ *save++ = regs->addr_cmap;
+ *save++ = regs->addr_cmap;
+
+ bt459_select_reg(regs, 0);
+ regs->addr_cmap = 0;
+ wbflush();
+ regs->addr_cmap = 0;
+ wbflush();
+ regs->addr_cmap = 0;
+ wbflush();
+
+ bt459_write_reg( regs, BT459_REG_PRM, 0);
+ bt459_write_reg( regs, BT459_REG_CCR, 0);
+
+ vstate->off = 1;
+}
+
+bt459_video_on(
+ struct vstate *vstate,
+ user_info_t *up)
+{
+ register bt459_padded_regmap_t *regs = vstate->regs;
+ unsigned char *save;
+
+ if (!vstate->off)
+ return;
+
+ /* Like I said.. */
+ save = (unsigned char *)up->dev_dep_2.gx.colormap;
+
+ bt459_select_reg(regs, 0);
+ regs->addr_cmap = *save++;
+ wbflush();
+ regs->addr_cmap = *save++;
+ wbflush();
+ regs->addr_cmap = *save++;
+ wbflush();
+
+ bt459_write_reg( regs, BT459_REG_PRM, 0xff);
+ bt459_write_reg( regs, BT459_REG_CCR, 0xc0);
+
+ vstate->off = 0;
+}
+
+/*
+ * Cursor
+ */
+bt459_pos_cursor(
+ bt459_padded_regmap_t *regs,
+ register int x,
+ register int y)
+{
+#define lo(v) ((v)&0xff)
+#define hi(v) (((v)&0xf00)>>8)
+ bt459_write_reg( regs, BT459_REG_CXLO, lo(x + 219));
+ bt459_write_reg( regs, BT459_REG_CXHI, hi(x + 219));
+ bt459_write_reg( regs, BT459_REG_CYLO, lo(y + 34));
+ bt459_write_reg( regs, BT459_REG_CYHI, hi(y + 34));
+}
+
+
+bt459_cursor_color(
+ bt459_padded_regmap_t *regs,
+ color_map_t *color)
+{
+ register int i;
+
+ bt459_select_reg_macro( regs, BT459_REG_CCOLOR_2);
+ for (i = 0; i < 2; i++) {
+ regs->addr_reg = color->red;
+ wbflush();
+ regs->addr_reg = color->green;
+ wbflush();
+ regs->addr_reg = color->blue;
+ wbflush();
+ color++;
+ }
+}
+
+bt459_cursor_sprite(
+ bt459_padded_regmap_t *regs,
+ unsigned char *cursor)
+{
+ register int i, j;
+
+ /*
+ * As per specs, must run a check to see if we
+ * had contention. If so, re-write the cursor.
+ */
+ for (i = 0, j = 0; j < 2; j++) {
+ /* loop once to write */
+ for ( ; i < 1024; i++)
+ bt459_write_reg( regs, BT459_REG_CRAM_BASE+i, cursor[i]);
+
+ /* loop to check, if fail write again */
+ for (i = 0; i < 1024; i++)
+ if (bt459_read_reg( regs, BT459_REG_CRAM_BASE+i) != cursor[i])
+ break;
+ if (i == 1024)
+ break;/* all is well now */
+ }
+}
+
+/*
+ * Initialization
+ */
+bt459_init(
+ bt459_padded_regmap_t *regs,
+ volatile char *reset,
+ int mux)
+{
+ if (bt459_read_reg(regs, BT459_REG_ID) != 0x4a)
+ panic("bt459");
+
+ if (mux == 4) {
+ /* use 4:1 input mux */
+ bt459_write_reg( regs, BT459_REG_CMD0, 0x40);
+ } else if (mux == 5) {
+ /* use 5:1 input mux */
+ bt459_write_reg( regs, BT459_REG_CMD0, 0xc0);
+ } /* else donno */
+
+ *reset = 0; /* force chip reset */
+
+ /* no zooming, no panning */
+ bt459_write_reg( regs, BT459_REG_CMD1, 0x00);
+
+ /* signature test, X-windows cursor, no overlays, SYNC* PLL,
+ normal RAM select, 7.5 IRE pedestal, do sync */
+ bt459_write_reg( regs, BT459_REG_CMD2, 0xc2);
+
+ /* get all pixel bits */
+ bt459_write_reg( regs, BT459_REG_PRM, 0xff);
+
+ /* no blinking */
+ bt459_write_reg( regs, BT459_REG_PBM, 0x00);
+
+ /* no overlay */
+ bt459_write_reg( regs, BT459_REG_ORM, 0x00);
+
+ /* no overlay blink */
+ bt459_write_reg( regs, BT459_REG_OBM, 0x00);
+
+ /* no interleave, no underlay */
+ bt459_write_reg( regs, BT459_REG_ILV, 0x00);
+
+ /* normal operation, no signature analysis */
+ bt459_write_reg( regs, BT459_REG_TEST, 0x00);
+
+ /* no blinking, 1bit cross hair, XOR reg&crosshair,
+ no crosshair on either plane 0 or 1,
+ regular cursor on both planes */
+ bt459_write_reg( regs, BT459_REG_CCR, 0xc0);
+
+ /* home cursor */
+ bt459_write_reg( regs, BT459_REG_CXLO, 0x00);
+ bt459_write_reg( regs, BT459_REG_CXHI, 0x00);
+ bt459_write_reg( regs, BT459_REG_CYLO, 0x00);
+ bt459_write_reg( regs, BT459_REG_CYHI, 0x00);
+
+ /* no crosshair window */
+ bt459_write_reg( regs, BT459_REG_WXLO, 0x00);
+ bt459_write_reg( regs, BT459_REG_WXHI, 0x00);
+ bt459_write_reg( regs, BT459_REG_WYLO, 0x00);
+ bt459_write_reg( regs, BT459_REG_WYHI, 0x00);
+ bt459_write_reg( regs, BT459_REG_WWLO, 0x00);
+ bt459_write_reg( regs, BT459_REG_WWHI, 0x00);
+ bt459_write_reg( regs, BT459_REG_WHLO, 0x00);
+ bt459_write_reg( regs, BT459_REG_WHHI, 0x00);
+}
diff --git a/chips/bt459.h b/chips/bt459.h
new file mode 100644
index 00000000..66012d2a
--- /dev/null
+++ b/chips/bt459.h
@@ -0,0 +1,82 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: bt459.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Defines for the bt459 Cursor/RAMDAC chip
+ */
+
+typedef struct {
+ volatile unsigned char addr_lo;
+ volatile unsigned char addr_hi;
+ volatile unsigned char addr_reg;
+ volatile unsigned char addr_cmap;
+} bt459_regmap_t;
+
+/*
+ * Additional registers addressed indirectly
+ */
+
+ /* 0000-00ff Color Map entries */
+ /* 0100-010f Overlay color regs, unsupp */
+#define BT459_REG_CCOLOR_1 0x0181 /* Cursor color regs */
+#define BT459_REG_CCOLOR_2 0x0182
+#define BT459_REG_CCOLOR_3 0x0183
+#define BT459_REG_ID 0x0200 /* read-only, gives "4a" */
+#define BT459_REG_CMD0 0x0201
+#define BT459_REG_CMD1 0x0202
+#define BT459_REG_CMD2 0x0203
+#define BT459_REG_PRM 0x0204
+ /* 0205 reserved */
+#define BT459_REG_PBM 0x0206
+ /* 0207 reserved */
+#define BT459_REG_ORM 0x0208
+#define BT459_REG_OBM 0x0209
+#define BT459_REG_ILV 0x020a
+#define BT459_REG_TEST 0x020b
+#define BT459_REG_RSIG 0x020c
+#define BT459_REG_GSIG 0x020d
+#define BT459_REG_BSIG 0x020e
+ /* 020f-02ff reserved */
+#define BT459_REG_CCR 0x0300
+#define BT459_REG_CXLO 0x0301
+#define BT459_REG_CXHI 0x0302
+#define BT459_REG_CYLO 0x0303
+#define BT459_REG_CYHI 0x0304
+#define BT459_REG_WXLO 0x0305
+#define BT459_REG_WXHI 0x0306
+#define BT459_REG_WYLO 0x0307
+#define BT459_REG_WYHI 0x0308
+#define BT459_REG_WWLO 0x0309
+#define BT459_REG_WWHI 0x030a
+#define BT459_REG_WHLO 0x030b
+#define BT459_REG_WHHI 0x030c
+ /* 030d-03ff reserved */
+#define BT459_REG_CRAM_BASE 0x0400
+#define BT459_REG_CRAM_END 0x07ff
+
diff --git a/chips/bt478.c b/chips/bt478.c
new file mode 100644
index 00000000..841728fd
--- /dev/null
+++ b/chips/bt478.c
@@ -0,0 +1,243 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: bt478.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Routines for the bt478 Cursor/RAMDAC chip
+ */
+
+#include <platforms.h>
+
+#include <chips/bt478.h>
+#include <chips/screen.h>
+
+#ifdef DECSTATION
+
+typedef struct {
+ volatile unsigned char addr_mapwa;
+ char pad0[3];
+ volatile unsigned char addr_map;
+ char pad1[3];
+ volatile unsigned char addr_mask;
+ char pad2[3];
+ volatile unsigned char addr_mapra;
+ char pad3[3];
+ volatile unsigned char addr_overwa;
+ char pad4[3];
+ volatile unsigned char addr_over;
+ char pad5[3];
+ volatile unsigned char addr_xxxx;
+ char pad6[3];
+ volatile unsigned char addr_overra;
+ char pad7[3];
+} bt478_padded_regmap_t;
+
+#else /*DECSTATION*/
+
+typedef bt478_regmap_t bt478_padded_regmap_t;
+#define wbflush()
+
+#endif /*DECSTATION*/
+
+
+/*
+ * Cursor color
+ */
+static
+bt478_load_cc(bt478, bg, fg)
+ register bt478_padded_regmap_t *bt478;
+ unsigned int *bg, *fg;
+{
+ register int i;
+
+ /* See init function for gotchas */
+
+ bt478->addr_overwa = 4;
+ wbflush();
+ for (i = 0; i < 3; i++) {
+ bt478->addr_over = (*bg++) >> 8;
+ wbflush();
+ }
+
+ bt478->addr_overwa = 8;
+ wbflush();
+ bt478->addr_over = 0x00;
+ wbflush();
+ bt478->addr_over = 0x00;
+ wbflush();
+ bt478->addr_over = 0x7f;
+ wbflush();
+
+ bt478->addr_overwa = 12;
+ wbflush();
+ for (i = 0; i < 3; i++) {
+ bt478->addr_over = (*fg++) >> 8;
+ wbflush();
+ }
+
+}
+
+
+bt478_cursor_color(bt478, color)
+ bt478_padded_regmap_t *bt478;
+ cursor_color_t *color;
+{
+ register int i;
+ register unsigned int *p;
+
+ /* Do it twice, in case of collisions */
+
+ bt478_load_cc(bt478, color->Bg_rgb, color->Fg_rgb);
+
+ p = color->Bg_rgb;
+ for (i = 0; i < 3; i++) {
+ bt478->addr_over = (*p++) >> 8;
+ wbflush();
+ }
+
+ p = color->Fg_rgb;
+ for (i = 0; i < 3; i++) {
+ bt478->addr_over = (*p++) >> 8;
+ wbflush();
+ }
+
+ bt478_load_cc(bt478, color->Bg_rgb, color->Fg_rgb);
+}
+
+/*
+ * Color map
+ */
+bt478_load_colormap( regs, map)
+ bt478_padded_regmap_t *regs;
+ color_map_t *map;
+{
+ register int i;
+
+ regs->addr_mapwa = 0;
+ wbflush();
+ for (i = 0; i < 256; i++, map++) {
+ regs->addr_map = map->red;
+ wbflush();
+ regs->addr_map = map->green;
+ wbflush();
+ regs->addr_map = map->blue;
+ wbflush();
+ }
+}
+
+bt478_load_colormap_entry( regs, entry, map)
+ bt478_padded_regmap_t *regs;
+ color_map_t *map;
+{
+ regs->addr_mapwa = entry & 0xff;
+ wbflush();
+ regs->addr_map = map->red;
+ wbflush();
+ regs->addr_map = map->green;
+ wbflush();
+ regs->addr_map = map->blue;
+ wbflush();
+}
+
+/*
+ * Video on/off (unused)
+ */
+bt478_video_on(pregs, up)
+ bt478_padded_regmap_t **pregs;
+{
+ (*pregs)->addr_mask = 0xff;
+}
+
+bt478_video_off(pregs, up)
+ bt478_padded_regmap_t **pregs;
+{
+ (*pregs)->addr_mask = 0;
+}
+
+/*
+ * Initialization
+ */
+static
+bt478_overlay(regs, plane)
+ bt478_padded_regmap_t *regs;
+ unsigned char *plane;
+{
+ *plane = 0xff;
+
+ /* Overlay planes 0 and 1 are wired zero, overlay plane 2
+ is plane "B" of the cursor (second half of it), plane 3
+ is plane "A" of the cursor. Soo, we get three colors
+ for the cursor, at map entries 4, 8 and 12 */
+# define ovv(i,r,g,b) \
+ regs->addr_overwa = i; wbflush(); \
+ regs->addr_over = r; wbflush(); \
+ regs->addr_over = b; wbflush(); \
+ regs->addr_over = g; wbflush();
+
+ ovv(4,0,0,0); ovv(8,0,0,0x7f); ovv(12,0xff,0xff,0xff);
+
+# undef ovv
+
+ /* enable data input */
+ regs->addr_mask = 0xff;
+}
+
+bt478_init_bw_map(regs, plane)
+ bt478_padded_regmap_t *regs;
+{
+ register int i;
+
+ /* Set overlay color registers */
+ bt478_overlay(regs, plane);
+
+ /* loadup vdac map */
+# define mvv(i,v) { \
+ regs->addr_mapwa = i; wbflush(); \
+ regs->addr_map = v; wbflush(); \
+ regs->addr_map = v; wbflush(); \
+ regs->addr_map = v; wbflush();}
+
+ for (i = 0; i < 128; i++) mvv(i,0x00);
+ for (i = i; i < 256; i++) mvv(i,0xff);
+
+}
+
+bt478_init_color_map( regs, plane)
+ bt478_padded_regmap_t *regs;
+{
+ register int i;
+
+ bt478_overlay(regs, plane);
+
+ mvv(0,0);
+ mvv(1,0xff);
+ mvv(255,0xff);
+
+# undef mvv
+}
+
diff --git a/chips/bt478.h b/chips/bt478.h
new file mode 100644
index 00000000..a60108f3
--- /dev/null
+++ b/chips/bt478.h
@@ -0,0 +1,44 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: bt478.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Defines for the bt478 Cursor/RAMDAC chip
+ */
+
+typedef struct {
+ volatile unsigned char addr_mapwa;
+ volatile unsigned char addr_map;
+ volatile unsigned char addr_mask;
+ volatile unsigned char addr_mapra;
+ volatile unsigned char addr_overwa;
+ volatile unsigned char addr_over;
+ volatile unsigned char addr_xxxx;
+ volatile unsigned char addr_overra;
+} bt478_regmap_t;
+
diff --git a/chips/build_font.c b/chips/build_font.c
new file mode 100644
index 00000000..2542351e
--- /dev/null
+++ b/chips/build_font.c
@@ -0,0 +1,132 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: build_font.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ *
+ * Takes a font description file and generates a C source
+ * appropriate for use as kernel font on mips/vax boxes.
+ * This basically means encoding and mirroring the bitmaps.
+ */
+
+#include <stdio.h>
+
+main(argc,argv)
+ char **argv;
+{
+ int fd;
+ FILE *fout;
+ int i, j, k, n, l;
+ int first, last;
+ char *fname = "kernel_font.data";
+ char buf[16*9];
+ int verbose = 0;
+
+ if (argc > 1 && argv[1][0] == '+')
+ verbose++, argc--, argv++;
+
+ first = 0;
+ last = 190; /* 8-bit ASCII, offset by 'space' */
+ if (argc > 1) {
+ first = atoi(argv[1]);
+ last = first + 1;
+ }
+ if (argc > 2)
+ last = atoi(argv[2]) + 1;
+ if (argc > 3)
+ fname = argv[3];
+
+ fd = open(fname, 0, 0);
+ fout = fopen("kernel_font.c", "w");
+
+ fprintf(fout, "/* \n\
+ * Mach Operating System\n\
+ * Copyright (c) 1989 Carnegie-Mellon University\n\
+ * All rights reserved. The CMU software License Agreement specifies\n\
+ * the terms and conditions for use and redistribution.\n\
+ */\n\
+/*\n\
+ * THIS FILE WAS GENERATED BY %s FROM %s\n\
+ * IF YOU NEED TO, BE SURE YOU EDIT THE REAL THING!\n\
+ */\n\
+/*\n\
+ * Object:\n\
+ * kfont_7x14 EXPORTED array\n\
+ *\n\
+ * Kernel font for printable ASCII chars\n\
+ *\n\
+ * The smallest index in this array corresponds to a\n\
+ * space. So, we start at 0x20 in the ascii table.\n\
+ * Note that glyphs are mirrored (byteorder, I think)\n\
+ * the commented bitmap shows how they really look like\n\
+ */\n\
+\n\
+unsigned char kfont_7x14[] = {\n", argv[0], fname);
+
+skip_comments:
+ read(fd, buf, 1);
+ if (buf[0] == '#') {
+ do
+ read(fd, buf, 1);
+ while (buf[0] != '\n');
+ goto skip_comments;
+ }
+ lseek(fd, -1, 1); /* put char back */
+
+ /* if must skip some */
+ for (l = 0; l < first; l++)
+ read(fd, buf, 2+(9*15));
+
+ /* scan for real now */
+ for (i = first; i < last; i++) {
+ /* read one full glyph */
+ if (read(fd, buf, 2+(9*15)) < 0)
+ break;
+ if (verbose)
+ printf("Character '%c':\n\t", buf[0]);
+ /* index and char itself in comments */
+ fprintf(fout, "/* %3x '%c' */\n", i, 0x20+i);
+
+ /* encode and mirror each one of the 15 scanlines */
+ for (n = 0; n < 15; n++) {
+ unsigned char cc[8], swap = 0;
+ /* 8 bits per scanline */
+ for (k = 2+(n*9), j = 0; j < 8; k++, j++) {
+ if (verbose)
+ printf("%c", (buf[k] == '1') ? '@' : ' ');
+ swap = ((buf[k] - '0') << 7) | (swap >> 1);
+ cc[j] = buf[k];
+ }
+ fprintf(fout,"\t/* %8s */\t%#2x,\n", cc, (unsigned char)swap);
+ if (verbose)
+ printf("\n\t");
+ }
+ }
+ fprintf(fout, "};\n");
+ fclose(fout);
+}
diff --git a/chips/busses.c b/chips/busses.c
new file mode 100644
index 00000000..dd64f489
--- /dev/null
+++ b/chips/busses.c
@@ -0,0 +1,230 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: busses.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 4/90
+ *
+ * Generic autoconfiguration functions,
+ * usable to probe and attach devices
+ * on any bus that suits the generic bus
+ * structure, such as VME, TURBOChannel,
+ * and all the VAX busses.
+ *
+ */
+
+#include <mach/boolean.h>
+#include <mach/std_types.h>
+#include <chips/busses.h>
+
+
+
+
+/*
+ * configure_bus_master
+ *
+ * Given the name of a bus_ctlr, look it up in the
+ * init table. If found, probe it. If there can be
+ * slaves attached, walk the device's init table
+ * for those that might be attached to this controller.
+ * Call the 'slave' function on each one to see if
+ * ok, then the 'attach' one.
+ *
+ * Returns 0 if the controller is not there.
+ *
+ */
+boolean_t configure_bus_master(
+ char *name,
+ vm_offset_t virt,
+ vm_offset_t phys,
+ int adpt_no,
+ char *bus_name)
+{
+ register struct bus_device *device;
+ register struct bus_ctlr *master;
+ register struct bus_driver *driver;
+
+ int found = 0;
+
+ /*
+ * Match the name in the table, then pick the entry that has the
+ * right adaptor number, or one that has it wildcarded. Entries
+ * already allocated are marked alive, skip them.
+ */
+ for (master = bus_master_init; master->driver; master++) {
+ if (master->alive)
+ continue;
+ if (((master->adaptor == adpt_no) || (master->adaptor == '?')) &&
+ (strcmp(master->name, name) == 0)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return FALSE;
+
+ /*
+ * Found a match, probe it
+ */
+ driver = master->driver;
+ if ((*driver->probe) (virt, master) == 0)
+ return FALSE;
+
+ master->alive = 1;
+ master->adaptor = adpt_no;
+
+ /*
+ * Remember which controller this device is attached to
+ */
+ driver->minfo[master->unit] = master;
+
+ printf("%s%d: at %s%d\n", master->name, master->unit, bus_name, adpt_no);
+
+ /*
+ * Now walk all devices to check those that might be attached to this
+ * controller. We match the unallocated ones that have the right
+ * controller number, or that have a widcarded controller number.
+ */
+ for (device = bus_device_init; device->driver; device++) {
+ int ctlr;
+ if (device->alive || device->driver != driver ||
+ (device->adaptor != '?' && device->adaptor != adpt_no))
+ continue;
+ ctlr = device->ctlr;
+ if (ctlr == '?') device->ctlr = master->unit;
+ /*
+ * A matching entry. See if the slave-probing routine is
+ * happy.
+ */
+ if ((device->ctlr != master->unit) ||
+ ((*driver->slave) (device, virt) == 0)) {
+ device->ctlr = ctlr;
+ continue;
+ }
+
+ device->alive = 1;
+ device->adaptor = adpt_no;
+ device->ctlr = master->unit;
+
+ /*
+ * Save a backpointer to the controller
+ */
+ device->mi = master;
+
+ /*
+ * ..and to the device
+ */
+ driver->dinfo[device->unit] = device;
+
+ if (device->slave >= 0)
+ printf(" %s%d: at %s%d slave %d",
+ device->name, device->unit,
+ driver->mname, master->unit, device->slave);
+ else
+ printf(" %s%d: at %s%d",
+ device->name, device->unit,
+ driver->mname, master->unit);
+
+ /*
+ * Now attach this slave
+ */
+ (*driver->attach) (device);
+ printf("\n");
+ }
+ return TRUE;
+}
+
+/*
+ * configure_bus_device
+ *
+ * Given the name of a bus_device, look it up in the
+ * init table. If found, probe it. If it is present,
+ * call the driver's 'attach' function.
+ *
+ * Returns 0 if the device is not there.
+ *
+ */
+boolean_t configure_bus_device(
+ char *name,
+ vm_offset_t virt,
+ vm_offset_t phys,
+ int adpt_no,
+ char *bus_name)
+{
+ register struct bus_device *device;
+ register struct bus_driver *driver;
+
+ int found = 0;
+
+ /*
+ * Walk all devices to find one with the right name
+ * and adaptor number (or wildcard). The entry should
+ * be unallocated, and also the slave number should
+ * be wildcarded.
+ */
+ for (device = bus_device_init; device->driver; device++) {
+ if (device->alive)
+ continue;
+ if (((device->adaptor == adpt_no) || (device->adaptor == '?')) &&
+ (device->slave == -1) &&
+ ((!device->phys_address) ||
+ ((device->phys_address == phys) && (device->address == virt))) &&
+ (strcmp(device->name, name) == 0)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return FALSE;
+
+ /*
+ * Found an entry, probe the device
+ */
+ driver = device->driver;
+ if ((*driver->probe) (virt, (struct bus_ctlr *)device) == 0)
+ return FALSE;
+
+ device->alive = 1;
+ device->adaptor = adpt_no;
+
+ printf("%s%d: at %s%d", device->name, device->unit, bus_name, adpt_no);
+
+ /*
+ * Remember which driver this device is attached to
+ */
+ driver->dinfo[device->unit] = device;
+
+ /*
+ * Attach the device
+ */
+ (*driver->attach) (device);
+ printf("\n");
+
+ return TRUE;
+}
+
diff --git a/chips/busses.h b/chips/busses.h
new file mode 100644
index 00000000..56a9ed37
--- /dev/null
+++ b/chips/busses.h
@@ -0,0 +1,154 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: busses.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 4/90
+ *
+ * Structures used by configuration routines to
+ * explore a given bus structure.
+ */
+
+#ifndef _CHIPS_BUSSES_H_
+#define _CHIPS_BUSSES_H_
+
+#include <mach/boolean.h>
+#include <mach/machine/vm_types.h>
+
+/*
+ *
+ * This is mildly modeled after the Unibus on Vaxen,
+ * one of the most complicated bus structures.
+ * Therefore, let's hope this can be done once and for all.
+ *
+ * At the bottom level there is a "bus_device", which
+ * might exist in isolation (e.g. a clock on the CPU
+ * board) or be a standard component of an architecture
+ * (e.g. the bitmap display on some workstations).
+ *
+ * Disk devices and communication lines support multiple
+ * units, hence the "bus_driver" structure which is more
+ * flexible and allows probing and dynamic configuration
+ * of the number and type of attached devices.
+ *
+ * At the top level there is a "bus_ctlr" structure, used
+ * in systems where the I/O bus(ses) are separate from
+ * the memory bus(ses), and/or when memory boards can be
+ * added to the main bus (and they must be config-ed
+ * and/or can interrupt the processor for ECC errors).
+ *
+ * The autoconfiguration process typically starts at
+ * the top level and walks down tables that are
+ * defined either in a generic file or are specially
+ * created by config.
+ */
+
+/*
+ * Per-controller structure.
+ */
+struct bus_ctlr {
+ struct bus_driver *driver; /* myself, as a device */
+ char *name; /* readability */
+ int unit; /* index in driver */
+ int (*intr)(); /* interrupt handler(s) */
+ vm_offset_t address; /* device virtual address */
+ int am; /* address modifier */
+ vm_offset_t phys_address;/* device phys address */
+ char adaptor; /* slot where found */
+ char alive; /* probed successfully */
+ char flags; /* any special conditions */
+ vm_offset_t sysdep; /* On some systems, queue of
+ * operations in-progress */
+ natural_t sysdep1; /* System dependent */
+};
+
+
+/*
+ * Per-``device'' structure
+ */
+struct bus_device {
+ struct bus_driver *driver; /* autoconf info */
+ char *name; /* my name */
+ int unit;
+ int (*intr)();
+ vm_offset_t address; /* device address */
+ int am; /* address modifier */
+ vm_offset_t phys_address;/* device phys address */
+ char adaptor;
+ char alive;
+ char ctlr;
+ char slave;
+ int flags;
+ struct bus_ctlr *mi; /* backpointer to controller */
+ struct bus_device *next; /* optional chaining */
+ vm_offset_t sysdep; /* System dependent */
+ natural_t sysdep1; /* System dependent */
+};
+
+/*
+ * General flag definitions
+ */
+#define BUS_INTR_B4_PROBE 0x01 /* enable interrupts before probe */
+#define BUS_INTR_DISABLED 0x02 /* ignore all interrupts */
+#define BUS_CTLR 0x04 /* descriptor for a bus adaptor */
+#define BUS_XCLU 0x80 /* want exclusive use of bdp's */
+
+/*
+ * Per-driver structure.
+ *
+ * Each bus driver defines entries for a set of routines
+ * that are used at boot time by the configuration program.
+ */
+struct bus_driver {
+ int (*probe)( /* see if the driver is there */
+ /* vm_offset_t address,
+ struct bus_ctlr * */ );
+ int (*slave)( /* see if any slave is there */
+ /* struct bus_device *,
+ vm_offset_t */ );
+ void (*attach)( /* setup driver after probe */
+ /* struct bus_device * */);
+ int (*dgo)(); /* start transfer */
+ vm_offset_t *addr; /* device csr addresses */
+ char *dname; /* name of a device */
+ struct bus_device **dinfo; /* backpointers to init structs */
+ char *mname; /* name of a controller */
+ struct bus_ctlr **minfo; /* backpointers to init structs */
+ int flags;
+};
+
+#ifdef KERNEL
+extern struct bus_ctlr bus_master_init[];
+extern struct bus_device bus_device_init[];
+
+extern boolean_t configure_bus_master(char *, vm_offset_t, vm_offset_t,
+ int, char * );
+extern boolean_t configure_bus_device(char *, vm_offset_t, vm_offset_t,
+ int, char * );
+#endif /* KERNEL */
+
+
+#endif /* _CHIPS_BUSSES_H_ */
diff --git a/chips/cfb_hdw.c b/chips/cfb_hdw.c
new file mode 100644
index 00000000..78764aab
--- /dev/null
+++ b/chips/cfb_hdw.c
@@ -0,0 +1,188 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: cfb_hdw.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Driver for the 3max Color Frame Buffer Display,
+ * hardware-level operations.
+ */
+
+#include <cfb.h>
+#if (NCFB > 0)
+
+#include <platforms.h>
+
+#include <machine/machspl.h>
+#include <mach/std_types.h>
+#include <chips/busses.h>
+#include <chips/screen_defs.h>
+#include <chips/pm_defs.h>
+
+typedef pm_softc_t cfb_softc_t;
+
+#ifdef DECSTATION
+#include <mips/PMAX/pmag_ba.h>
+#include <mips/PMAX/tc.h>
+#endif
+
+#ifdef FLAMINGO
+#include <mips/PMAX/pmag_ba.h> /* XXX fixme */
+#include <alpha/DEC/tc.h>
+#endif
+
+/*
+ * Definition of the driver for the auto-configuration program.
+ */
+
+int cfb_probe(), cfb_intr();
+static void cfb_attach();
+
+vm_offset_t cfb_std[NCFB] = { 0 };
+struct bus_device *cfb_info[NCFB];
+struct bus_driver cfb_driver =
+ { cfb_probe, 0, cfb_attach, 0, cfb_std, "cfb", cfb_info,
+ 0, 0, BUS_INTR_DISABLED};
+
+/*
+ * Probe/Attach functions
+ */
+
+cfb_probe( /* reg, ui */)
+{
+ static probed_once = 0;
+
+ /*
+ * Probing was really done sweeping the TC long ago
+ */
+ if (tc_probe("cfb") == 0)
+ return 0;
+ if (probed_once++ > 1)
+ printf("[mappable] ");
+ return 1;
+}
+
+static void
+cfb_attach(ui)
+ struct bus_device *ui;
+{
+ /* ... */
+ printf(": color display");
+}
+
+
+/*
+ * Interrupt routine
+ */
+
+cfb_intr(unit,spllevel)
+ spl_t spllevel;
+{
+ register volatile char *ack;
+
+ /* acknowledge interrupt */
+ ack = (volatile char *) cfb_info[unit]->address + CFB_OFFSET_IREQ;
+ *ack = 0;
+
+#ifdef mips
+ splx(spllevel);
+#endif
+ lk201_led(unit);
+}
+
+cfb_vretrace(cfb, on)
+ cfb_softc_t *cfb;
+{
+ int i;
+
+ for (i = 0; i < NCFB; i++)
+ if (cfb_info[i]->address == (vm_offset_t)cfb->framebuffer)
+ break;
+ if (i == NCFB) return;
+
+ (*tc_enable_interrupt)(cfb_info[i]->adaptor, on, 0);
+}
+
+/*
+ * Boot time initialization: must make device
+ * usable as console asap.
+ */
+extern int
+ cfb_soft_reset(), cfb_set_status(),
+ bt459_pos_cursor(), bt459_video_on(),
+ bt459_video_off(), cfb_vretrace(),
+ pm_get_status(), pm_char_paint(),
+ pm_insert_line(), pm_remove_line(),
+ pm_clear_bitmap(), pm_map_page();
+
+static struct screen_switch cfb_sw = {
+ screen_noop, /* graphic_open */
+ cfb_soft_reset, /* graphic_close */
+ cfb_set_status, /* set_status */
+ pm_get_status, /* get_status */
+ pm_char_paint, /* char_paint */
+ bt459_pos_cursor, /* pos_cursor */
+ pm_insert_line, /* insert_line */
+ pm_remove_line, /* remove_line */
+ pm_clear_bitmap, /* clear_bitmap */
+ bt459_video_on, /* video_on */
+ bt459_video_off, /* video_off */
+ cfb_vretrace, /* intr_enable */
+ pm_map_page /* map_page */
+};
+
+cfb_cold_init(unit, up)
+ user_info_t *up;
+{
+ cfb_softc_t *cfb;
+ screen_softc_t sc = screen(unit);
+ int base = tc_probe("cfb");
+
+ bcopy(&cfb_sw, &sc->sw, sizeof(sc->sw));
+ sc->flags |= COLOR_SCREEN;
+ sc->frame_scanline_width = 1024;
+ sc->frame_height = 1024;
+ sc->frame_visible_width = 1024;
+ sc->frame_visible_height = 864;
+
+ pm_init_screen_params(sc,up);
+ (void) screen_up(unit, up);
+
+ cfb = pm_alloc(unit, base + CFB_OFFSET_BT459, base + CFB_OFFSET_VRAM, -1);
+
+ screen_default_colors(up);
+
+ cfb_soft_reset(sc);
+
+ /*
+ * Clearing the screen at boot saves from scrolling
+ * much, and speeds up booting quite a bit.
+ */
+ screen_blitc( unit, 'C'-'@');/* clear screen */
+}
+
+#endif (NCFB > 0)
diff --git a/chips/cfb_misc.c b/chips/cfb_misc.c
new file mode 100644
index 00000000..2bda86bd
--- /dev/null
+++ b/chips/cfb_misc.c
@@ -0,0 +1,249 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: cfb_misc.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Driver for the PMAG-BA simple color framebuffer
+ *
+ */
+
+#include <cfb.h>
+#include <sfb.h> /* shares code */
+#if ((NCFB > 0) || (NSFB > 0))
+#include <platforms.h>
+
+/*
+ * NOTE: This driver relies heavily on the pm one.
+ */
+
+#include <device/device_types.h>
+#include <chips/screen_defs.h>
+#include <chips/pm_defs.h>
+typedef pm_softc_t cfb_softc_t;
+
+#include <chips/bt459.h>
+#define bt459 cursor_registers
+
+#ifdef DECSTATION
+#include <mips/PMAX/pmag_ba.h>
+#endif
+
+#ifdef FLAMINGO
+#include <mips/PMAX/pmag_ba.h> /* XXX fixme */
+#endif
+
+/*
+ * Initialize color map, for kernel use
+ */
+cfb_init_colormap(
+ screen_softc_t sc)
+{
+ cfb_softc_t *cfb = (cfb_softc_t*)sc->hw_state;
+ user_info_t *up = sc->up;
+ color_map_t Bg_Fg[2];
+ register int i;
+
+ bt459_init_colormap( cfb->bt459 );
+
+ /* init bg/fg colors */
+ for (i = 0; i < 3; i++) {
+ up->dev_dep_2.pm.Bg_color[i] = 0x00;
+ up->dev_dep_2.pm.Fg_color[i] = 0xff;
+ }
+
+ Bg_Fg[0].red = Bg_Fg[0].green = Bg_Fg[0].blue = 0x00;
+ Bg_Fg[1].red = Bg_Fg[1].green = Bg_Fg[1].blue = 0xff;
+ bt459_cursor_color( cfb->bt459, Bg_Fg);
+}
+
+/*
+ * Large viz small cursor
+ */
+cfb_small_cursor_to_large(
+ user_info_t *up,
+ cursor_sprite_t cursor)
+{
+ unsigned short new_cursor[32];
+ unsigned char *curbytes, fg, fbg;
+ int i, j, k;
+ char *sprite;
+
+ /* Clear out old cursor */
+ bzero( up->dev_dep_2.pm.cursor_sprite,
+ sizeof(up->dev_dep_2.pm.cursor_sprite));
+
+ /* small cursor is 32x2 bytes, fg first */
+ curbytes = (unsigned char *) cursor;
+
+ /* use the upper left corner of the large cursor
+ * as a 64x1 cursor, fg&bg alternated */
+ for (i = 0; i < 32; i++) {
+ fg = curbytes[i];
+ fbg = fg | curbytes[i + 32];
+ new_cursor[i] = 0;
+ for (j = 0, k = 15; j < 8; j++, k -= 2) {
+ new_cursor[i] |= ((fbg >> j) & 0x1) << (k);
+ new_cursor[i] |= ((fg >> j) & 0x1) << (k - 1);
+ }
+ }
+
+ /* Now stick it in the proper place */
+
+ curbytes = (unsigned char *) new_cursor;
+ sprite = up->dev_dep_2.pm.cursor_sprite;
+ for (i = 0; i < 64; i += 4) {
+ /* butterfly as we go */
+ *sprite++ = curbytes[i + 1];
+ *sprite++ = curbytes[i + 0];
+ *sprite++ = curbytes[i + 3];
+ *sprite++ = curbytes[i + 2];
+ sprite += 12; /* skip rest of the line */
+ }
+}
+
+
+/*
+ * Device-specific set status
+ */
+cfb_set_status(
+ screen_softc_t sc,
+ dev_flavor_t flavor,
+ dev_status_t status,
+ natural_t status_count)
+{
+ cfb_softc_t *cfb = (cfb_softc_t*) sc->hw_state;
+
+ switch (flavor) {
+
+ case SCREEN_ADJ_MAPPED_INFO:
+ return pm_set_status(sc, flavor, status, status_count);
+
+ case SCREEN_LOAD_CURSOR:
+
+ if (status_count < sizeof(cursor_sprite_t)/sizeof(int))
+ return D_INVALID_SIZE;
+/* cfb_small_cursor_to_large(sc->up, (cursor_sprite_t) status);*/
+ cfb_small_cursor_to_large(sc->up, (unsigned short *) status);
+
+ /* Fall through */
+
+ case SCREEN_LOAD_CURSOR_LONG: /* 3max only */
+
+ sc->flags |= SCREEN_BEING_UPDATED;
+ bt459_cursor_sprite(cfb->bt459, sc->up->dev_dep_2.pm.cursor_sprite);
+ sc->flags &= ~SCREEN_BEING_UPDATED;
+
+ break;
+
+ case SCREEN_SET_CURSOR_COLOR: {
+ color_map_t c[2];
+ register cursor_color_t *cc = (cursor_color_t*) status;
+
+ c[0].red = cc->Bg_rgb[0];
+ c[0].green = cc->Bg_rgb[1];
+ c[0].blue = cc->Bg_rgb[2];
+ c[1].red = cc->Fg_rgb[0];
+ c[1].green = cc->Fg_rgb[1];
+ c[1].blue = cc->Fg_rgb[2];
+
+ sc->flags |= SCREEN_BEING_UPDATED;
+ bt459_cursor_color (cfb->bt459, c );
+ sc->flags &= ~SCREEN_BEING_UPDATED;
+
+ break;
+ }
+
+ case SCREEN_SET_CMAP_ENTRY: {
+ color_map_entry_t *e = (color_map_entry_t*) status;
+
+ if (e->index < 256) {
+ sc->flags |= SCREEN_BEING_UPDATED;
+ bt459_load_colormap_entry( cfb->bt459, e->index, &e->value);
+ sc->flags &= ~SCREEN_BEING_UPDATED;
+ }
+ break;
+ }
+
+ default:
+ return D_INVALID_OPERATION;
+ }
+ return D_SUCCESS;
+}
+
+#if (NCFB > 0)
+/*
+ * Hardware initialization
+ */
+cfb_init_screen(
+ cfb_softc_t *cfb)
+{
+ bt459_init( cfb->bt459,
+ cfb->bt459 + (CFB_OFFSET_RESET - CFB_OFFSET_BT459),
+ 4 /* 4:1 MUX */);
+}
+
+/*
+ * Do what's needed when X exits
+ */
+cfb_soft_reset(
+ screen_softc_t sc)
+{
+ cfb_softc_t *cfb = (cfb_softc_t*) sc->hw_state;
+ user_info_t *up = sc->up;
+ extern cursor_sprite_t dc503_default_cursor;
+
+ /*
+ * Restore params in mapped structure
+ */
+ pm_init_screen_params(sc,up);
+ up->row = up->max_row - 1;
+
+ up->dev_dep_2.pm.x26 = 2; /* you do not want to know */
+ up->dev_dep_1.pm.x18 = (short*)2;
+
+ /*
+ * Restore RAMDAC chip to default state
+ */
+ cfb_init_screen(cfb);
+
+ /*
+ * Load kernel's cursor sprite: just use the same pmax one
+ */
+ cfb_small_cursor_to_large(up, dc503_default_cursor);
+ bt459_cursor_sprite(cfb->bt459, up->dev_dep_2.pm.cursor_sprite);
+
+ /*
+ * Color map and cursor color
+ */
+ cfb_init_colormap(sc);
+}
+#endif /* NCFB > 0 */
+
+
+
+#endif /* (NCFB > 0) || (NSFB > 0) */
diff --git a/chips/dc503.c b/chips/dc503.c
new file mode 100644
index 00000000..8bfab509
--- /dev/null
+++ b/chips/dc503.c
@@ -0,0 +1,189 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: dc503.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Routines for the DEC DC503 Programmable Cursor Chip
+ */
+#include <platforms.h>
+
+#include <chips/pm_defs.h>
+#include <chips/dc503.h>
+
+
+#if defined(DECSTATION) || defined(VAXSTATION)
+
+typedef struct {
+ volatile unsigned short pcc_cmdr; /* all regs are wo */
+ short pad0;
+ volatile unsigned short pcc_xpos;
+ short pad1;
+ volatile unsigned short pcc_ypos;
+ short pad2;
+ volatile unsigned short pcc_xmin1;
+ short pad3;
+ volatile unsigned short pcc_xmax1;
+ short pad4;
+ volatile unsigned short pcc_ymin1;
+ short pad5;
+ volatile unsigned short pcc_ymax1;
+ short pad6[9];
+ volatile unsigned short pcc_xmin2;
+ short pad7;
+ volatile unsigned short pcc_xmax2;
+ short pad8;
+ volatile unsigned short pcc_ymin2;
+ short pad9;
+ volatile unsigned short pcc_ymax2;
+ short pad10;
+ volatile unsigned short pcc_memory;
+ short pad11;
+} dc503_padded_regmap_t;
+
+#else
+
+typedef dc503_regmap_t dc503_padded_regmap_t;
+
+#endif
+
+#ifdef VAXSTATION
+#define X_CSHIFT 216
+#define Y_CSHIFT 34
+#define wbflush()
+#define PCC_STATE (DC503_CMD_ENPA | DC503_CMD_ENPB | DC503_CMD_HSHI)
+#endif /*VAXSTATION*/
+
+/* defaults, for the innocents */
+
+#ifndef X_CSHIFT
+#define X_CSHIFT 212
+#define Y_CSHIFT 34
+#define PCC_STATE (DC503_CMD_ENPA | DC503_CMD_ENPB)
+#endif
+
+/*
+ * Cursor
+ */
+dc503_pos_cursor( regs, x, y)
+ dc503_padded_regmap_t *regs;
+{
+ regs->pcc_xpos = x + X_CSHIFT;
+ regs->pcc_ypos = y + Y_CSHIFT;
+ wbflush();
+}
+
+dc503_load_cursor( pm, cursor)
+ pm_softc_t *pm;
+ unsigned short *cursor;
+{
+ dc503_padded_regmap_t *regs;
+ register int i;
+
+ regs = (dc503_padded_regmap_t*)pm->cursor_registers;
+
+ pm->cursor_state |= DC503_CMD_LODSA;
+ regs->pcc_cmdr = pm->cursor_state;
+ wbflush();
+ for (i = 0; i < 32; i++) {
+ regs->pcc_memory = *cursor++;
+ wbflush();
+ }
+ pm->cursor_state &= ~DC503_CMD_LODSA;
+ regs->pcc_cmdr = pm->cursor_state;
+}
+
+
+unsigned short dc503_default_cursor[16+16] = {
+/* Plane A */
+ 0x00ff, 0x00ff, 0x00ff, 0x00ff, 0x00ff, 0x00ff, 0x00ff, 0x00ff,
+ 0x00ff, 0x00ff, 0x00ff, 0x00ff, 0x00ff, 0x00ff, 0x00ff, 0x00ff,
+/* Plane B */
+ 0x00ff, 0x00ff, 0x00ff, 0x00ff, 0x00ff, 0x00ff, 0x00ff, 0x00ff,
+ 0x00ff, 0x00ff, 0x00ff, 0x00ff, 0x00ff, 0x00ff, 0x00ff, 0x00ff
+};
+
+/*
+ * Vert retrace interrupt
+ */
+dc503_vretrace( pm, on)
+ pm_softc_t *pm;
+{
+ if (on)
+ pm->cursor_state |= DC503_CMD_ENRG2;
+ else
+ pm->cursor_state &= ~DC503_CMD_ENRG2;
+ ((dc503_padded_regmap_t*)pm->cursor_registers)->pcc_cmdr = pm->cursor_state;
+}
+
+/*
+ * Video on/off
+ */
+dc503_video_on( pm, up)
+ pm_softc_t *pm;
+{
+ pm->cursor_state = DC503_CMD_ENPA | (pm->cursor_state & ~DC503_CMD_FOPB);
+ ((dc503_padded_regmap_t*)pm->cursor_registers)->pcc_cmdr = pm->cursor_state;
+}
+
+dc503_video_off( pm, up)
+ pm_softc_t *pm;
+{
+ pm->cursor_state = DC503_CMD_FOPB | (pm->cursor_state & ~DC503_CMD_ENPA);
+ ((dc503_padded_regmap_t*)pm->cursor_registers)->pcc_cmdr = pm->cursor_state;
+}
+
+
+/*
+ * Initialization
+ */
+dc503_init( pm )
+ pm_softc_t *pm;
+{
+ dc503_padded_regmap_t *regs;
+
+ regs = (dc503_padded_regmap_t*)pm->cursor_registers;
+
+ dc503_load_cursor( pm, dc503_default_cursor);
+ dc503_pos_cursor( regs, 0, 0); /* XXX off screen */
+
+ regs->pcc_xmin1 = 0; /* test only */
+ regs->pcc_xmax1 = 0;
+ regs->pcc_ymin1 = 0;
+ regs->pcc_ymax1 = 0;
+
+ regs->pcc_xmin2 = 212; /* vert retrace detector */
+ regs->pcc_xmax2 = 212+1023;
+ regs->pcc_ymin2 = 34+863;
+ regs->pcc_ymax2 = 34+863;
+
+#if 0
+ regs->pcc_cmdr = DC503_CMD_FOPB | DC503_CMD_VBHI;/* reset */
+#endif
+ pm->cursor_state = PCC_STATE;
+ regs->pcc_cmdr = pm->cursor_state;
+}
diff --git a/chips/dc503.h b/chips/dc503.h
new file mode 100644
index 00000000..e3c330c4
--- /dev/null
+++ b/chips/dc503.h
@@ -0,0 +1,69 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: dc503.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Defines for the DEC DC503 Programmable Cursor
+ */
+
+typedef struct {
+ volatile unsigned short pcc_cmdr; /* all regs are wo */
+ volatile unsigned short pcc_xpos;
+ volatile unsigned short pcc_ypos;
+ volatile unsigned short pcc_xmin1;
+ volatile unsigned short pcc_xmax1;
+ volatile unsigned short pcc_ymin1;
+ volatile unsigned short pcc_ymax1;
+ volatile unsigned short pcc_xmin2;
+ volatile unsigned short pcc_xmax2;
+ volatile unsigned short pcc_ymin2;
+ volatile unsigned short pcc_ymax2;
+ volatile unsigned short pcc_memory;
+} dc503_regmap_t;
+
+/*
+ * Command register bits
+ */
+
+#define DC503_CMD_TEST 0x8000 /* cursor test flip-flop */
+#define DC503_CMD_HSHI 0x4000 /* Hor sync polarity */
+#define DC503_CMD_VBHI 0x2000 /* Ver blank polarity */
+#define DC503_CMD_LODSA 0x1000 /* load sprite array */
+#define DC503_CMD_FORG2 0x0800 /* force detector2 to one */
+#define DC503_CMD_ENRG2 0x0400 /* enable detector2 */
+#define DC503_CMD_FORG1 0x0200 /* force detector1 to one */
+#define DC503_CMD_ENRG1 0x0100 /* enable detector1 */
+#define DC503_CMD_XHWID 0x0080 /* hair cursor (double) width */
+#define DC503_CMD_XHCL1 0x0040 /* hair clip region */
+#define DC503_CMD_XHCLP 0x0020 /* clip hair inside region */
+#define DC503_CMD_XHAIR 0x0010 /* enable hair cursor */
+#define DC503_CMD_FOPB 0x0008 /* force curs plane B to one */
+#define DC503_CMD_ENPB 0x0004 /* enable curs plane B */
+#define DC503_CMD_FOPA 0x0002 /* force curs plane A to one */
+#define DC503_CMD_ENPA 0x0001 /* enable curs plane A */
+
diff --git a/chips/dtop.h b/chips/dtop.h
new file mode 100644
index 00000000..e37ba04e
--- /dev/null
+++ b/chips/dtop.h
@@ -0,0 +1,241 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: dtop.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 1/92
+ *
+ * Definitions for the Desktop serial bus (i2c aka ACCESS).
+ */
+
+#ifndef _DTOP_H_
+#define _DTOP_H_
+
+#define DTOP_MAX_DEVICES 14
+#define DTOP_MAX_MSG_SIZE 36 /* 3 hdr + 32 data + 1 checksum */
+
+typedef struct {
+
+ unsigned char dest_address; /* low bit is zero */
+ unsigned char src_address; /* ditto */
+ union {
+ struct {
+ unsigned char len : 5, /* message byte len */
+ sub : 2, /* sub-address */
+ P : 1; /* Control(1)/Data(0) marker */
+ } val;
+ unsigned char bits; /* quick check */
+ } code;
+
+ /* varzise, checksum byte at end */
+ unsigned char body[DTOP_MAX_MSG_SIZE-3];
+
+} dtop_message, *dtop_message_t;
+
+/*
+ * Standard addresses
+ */
+
+#define DTOP_ADDR_HOST 0x50 /* address for the (only) host */
+#define DTOP_ADDR_DEFAULT 0x6e /* power-up default address */
+#define DTOP_ADDR_FIRST 0x52 /* first assignable address */
+#define DTOP_ADDR_LAST 0x6c /* last, inclusive */
+
+#define DTOP_ADDR_KBD 0x6c /* as used by DEC */
+#define DTOP_ADDR_MOUSE 0x6a
+
+/*
+ * Standard messages
+ */
+
+/* from host to devices */
+
+#define DTOP_MSG_RESET 0xf0 /* preceeded by 0x81: P,len 1 */
+
+#define DTOP_MSG_ID_REQUEST 0xf1 /* preceeded by 0x81: P,len 1 */
+
+#define DTOP_MSG_ASSIGN_ADDRESS 0xf2 /* preceeded by 0x9e: P,len 30 */
+ /* followed by a dtop_id_reply_t */
+ /* and by the new_IC_address */
+
+#define DTOP_MSG_CAP_REQUEST 0xf3 /* preceeded by 0x83: P,len 3 */
+ /* followed by a 16 bit u_offset */
+
+#define DTOP_MSG_APPL_TEST 0xb1 /* preceed by P, sub, len 1 */
+
+/* from devices to host */
+
+#define DTOP_MSG_ATTENTION 0xe0 /* preceeded by P, len */
+# define DTOP_ATN_OK_STATUS 0x00 /* anything else bad */
+ /* followed by 0-30 bytes */
+
+#define DTOP_MSG_ID_REPLY 0xe1 /* preceeded by P,len (29..32) */
+
+typedef struct {
+ unsigned char module_revision[8]; /* ascii, blank padded */
+ unsigned char vendor_name[8];
+ unsigned char module_name[8];
+ int device_number; /* 32 bits cpl-2 */
+ /* 0-3 optional bytes follow, ignore */
+} dtop_id_reply_t;
+
+#define DTOP_MSG_CAP_REPLY 0xe3 /* preceeded by P,len (3..32) */
+ /* followed by 16 bit u_offset */
+ /* followed by data */
+
+#define DTOP_MSG_APPL_SIGNAL 0xa0 /* application level signal */
+# define DTOP_SIG_ATTENTION 0x00
+# define DTOP_SIG_RESET 0x01
+# define DTOP_SIG_HALT 0x02
+
+#define DTOP_MSG_APPL_TREPLY 0xa1 /* followed by status (0-->ok) */
+ /* and 0..30 bytes of result data */
+
+/* reserved message codes (testing, manifacturing) */
+
+#define DTOP_MSG_RES0 0xc0
+#define DTOP_MSG_RES1 0xc1
+#define DTOP_MSG_RES2 0xc2
+#define DTOP_MSG_RES3 0xc3
+
+
+/*
+ * Device specific definitions: Keyboard
+ */
+
+/* from host to keyboard */
+
+#define DTOP_KMSG_CLICK 0x01 /* preceeded by P, sub len 2 */
+# define DTOP_CLICK_VOLUME_MAX 0x7 /* followed by one byte */
+
+#define DTOP_KMSG_BELL 0x02 /* preceeded by P, sub len 2 */
+ /* same as above */
+
+#define DTOP_KMSG_LED 0x03 /* preceeded by P, sub len 2 */
+ /* four lower bits turn leds on */
+
+#define DTOP_KMSG_POLL 0x04 /* preceeded by P, sub len 1 */
+
+/* keyboard sends up to 11 codes in a data message, distinguished values: */
+#define DTOP_KBD_EMPTY 0x00
+#define DTOP_KBD_OUT_ERR 0x01
+#define DTOP_KBD_IN_ERR 0x02
+
+#define DTOP_KBD_KEY_MIN 0x08
+#define DTOP_KBD_KEY_MAX 0xff
+
+/* powerup status values: 0 ok, else.. */
+#define DTOP_KBD_ROM_FAIL 0x01
+#define DTOP_KBD_RAM_FAIL 0x02
+#define DTOP_KBD_KEY_DOWN 0x03
+
+
+/*
+ * Device specific definitions: Locators (mouse)
+ */
+
+/* locator sends this type of report data */
+
+typedef struct {
+ unsigned short buttons; /* 1->pressed */
+ short x;
+ short y;
+ short z;
+ /* possibly 3 more dimensions for gloves */
+} dtop_locator_msg_t;
+
+#define DTOP_LMSG_SET_RATE 0x01 /* preceeded by P,sub, len 2 */
+ /* followed by sampling interval,
+ from 8 to 25 msecs (0->polled */
+
+#define DTOP_LMSG_POLL 0x02 /* preceeded by P,sub, len 1 */
+
+/* Powerup codes same as keyboard */
+
+
+/*
+ * Implementation specific definitions
+ */
+
+typedef union {
+
+ dtop_message unknown_report;
+
+ struct {
+ char last_codes_count;
+ unsigned char last_codes[11]; /* max as per specs */
+ unsigned int last_msec; /* autorepeat state */
+ unsigned short poll_frequency;
+ unsigned char k_ar_state;
+# define K_AR_IDLE 0 /* quiescent, no polling */
+# define K_AR_OFF 4 /* turn off polling pls */
+# define K_AR_ACTIVE 2 /* polling, no autos yet */
+# define K_AR_TRIGGER 1 /* sent one autorepeat */
+ unsigned char bell_volume;
+ unsigned char led_status;
+ } keyboard;
+
+ struct {
+ unsigned char type : 7, /* DEV_MOUSE, DEV_TABLET, .. */
+ relative : 1;
+ unsigned char n_coords;
+ unsigned short prev_buttons;
+# define L_BUTTON_MAX 16
+ unsigned char button_code[L_BUTTON_MAX];
+# define L_COORD_MAX 6
+ unsigned int coordinate[L_COORD_MAX]; /* max 6D */
+ } locator;
+
+ /* add more as they come along */
+
+} dtop_device, *dtop_device_t;
+
+/* All handler functions should have this interface */
+extern int
+ dtop_null_device_handler(
+ dtop_device_t dev,
+ dtop_message_t msg,
+ int event,
+ unsigned char outc),
+ dtop_locator_handler(
+ dtop_device_t dev,
+ dtop_message_t msg,
+ int event,
+ unsigned char outc),
+ dtop_keyboard_handler(
+ dtop_device_t dev,
+ dtop_message_t msg,
+ int event,
+ unsigned char outc);
+
+
+#define DTOP_EVENT_RECEIVE_PACKET 1
+#define DTOP_EVENT_BAD_PACKET 2
+#define DTOP_EVENT_PUTC 4
+#define DTOP_EVENT_POLL 8
+
+
+#endif /* _DTOP_H_ */
diff --git a/chips/dtop_handlers.c b/chips/dtop_handlers.c
new file mode 100644
index 00000000..7a83ea29
--- /dev/null
+++ b/chips/dtop_handlers.c
@@ -0,0 +1,441 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: dtop_handlers.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 1/92
+ *
+ * Handler functions for devices attached to the DESKTOP bus.
+ */
+
+#include <dtop.h>
+#if NDTOP > 0
+
+#include <mach_kdb.h>
+
+#include <machine/machspl.h> /* spl definitions */
+#include <mach/std_types.h>
+#include <device/io_req.h>
+#include <device/tty.h>
+
+#include <chips/busses.h>
+#include <chips/serial_defs.h>
+#include <chips/screen_defs.h>
+#include <mips/PMAX/tc.h>
+
+#include <chips/dtop.h>
+#include <chips/lk201.h>
+
+/*
+ * Default handler function
+ */
+int
+dtop_null_device_handler(
+ dtop_device_t dev,
+ dtop_message_t msg,
+ int event,
+ unsigned char outc)
+{
+ /* See if the message was to the default address (powerup) */
+
+ /* Uhmm, donno how to handle this. Drop it */
+ if (event == DTOP_EVENT_RECEIVE_PACKET)
+ dev->unknown_report = *msg;
+ return 0;
+}
+
+/*
+ * Handler for locator devices (mice)
+ */
+int
+dtop_locator_handler(
+ dtop_device_t dev,
+ dtop_message_t msg,
+ int event,
+ unsigned char outc)
+{
+ register unsigned short buttons;
+ register short coord;
+#if BYTE_MSF
+# define get_short(b0,b1) (((b1)<<8)|(b0))
+#else
+# define get_short(b0,b1) (((b0)<<8)|(b1))
+#endif
+
+ /*
+ * Do the position first
+ */
+ {
+ register int i;
+ register boolean_t moved;
+ int delta_coords[L_COORD_MAX];
+
+ /*
+ * Get all coords, see if it moved at all (buttons!)
+ */
+ moved = FALSE;
+ for (i = 0; i < dev->locator.n_coords; i++) {
+
+ coord = get_short(msg->body[2+(i<<1)],
+ msg->body[3+(i<<1)]);
+
+ if (dev->locator.relative) {
+ /*
+ * Flame on
+ * I am getting tired of this, why do they have to
+ * keep this bug around ? Religion ? Damn, they
+ * design a keyboard for X11 use and forget the mouse ?
+ * Flame off
+ */
+#define BOGUS_DEC_X_AXIS
+#ifdef BOGUS_DEC_X_AXIS
+ if (i == 1) coord = - coord;
+#endif /* BOGUS_DEC_X_AXIS */
+ /* dev->locator.coordinate[i] += coord; */
+ } else {
+ register unsigned int old_coord;
+
+ old_coord = dev->locator.coordinate[i];
+ dev->locator.coordinate[i] = coord;
+ coord = old_coord - coord;
+ }
+ delta_coords[i] = coord;
+ if (coord != 0)
+ moved = TRUE;
+ }
+ if (moved) {
+ /* scale and threshold done higher up */
+ screen_motion_event( 0,
+ dev->locator.type,
+ delta_coords[0],
+ delta_coords[1]);
+ }
+ }
+
+ /*
+ * Time for the buttons now
+ */
+#define new_buttons coord
+ new_buttons = get_short(msg->body[0],msg->body[1]);
+ buttons = new_buttons ^ dev->locator.prev_buttons;
+ if (buttons) {
+ register int i, type;
+
+ dev->locator.prev_buttons = new_buttons;
+ for (i = 0; buttons; i++, buttons >>= 1) {
+
+ if ((buttons & 1) == 0) continue;
+
+ type = (new_buttons & (1<<i)) ?
+ EVT_BUTTON_DOWN : EVT_BUTTON_UP;
+ screen_keypress_event( 0,
+ dev->locator.type,
+ dev->locator.button_code[i],
+ type);
+ }
+ }
+#undef new_buttons
+}
+
+/*
+ * Handler for keyboard devices
+ * Special case: outc set for recv packet means
+ * we are inside the kernel debugger
+ */
+int
+dtop_keyboard_handler(
+ dtop_device_t dev,
+ dtop_message_t msg,
+ int event,
+ unsigned char outc)
+{
+ char save[11];
+ register int msg_len, c;
+
+ /*
+ * Well, really this code handles just an lk401 and in
+ * a very primitive way at that. Should at least emulate
+ * an lk201 decently, and make that a pluggable module.
+ * Sigh.
+ */
+
+ if (event != DTOP_EVENT_RECEIVE_PACKET) {
+ switch (event) {
+ case DTOP_EVENT_POLL:
+ {
+ register unsigned int t, t0;
+
+ /*
+ * Note we will always have at least the
+ * end-of-list marker present (a zero)
+ * Here stop and trigger of autorepeat.
+ * Do not repeat shift keys, either.
+ */
+ {
+ register unsigned char uc, i = 0;
+
+rpt_char:
+ uc = dev->keyboard.last_codes[i];
+
+ if (uc == DTOP_KBD_EMPTY) {
+ dev->keyboard.k_ar_state = K_AR_OFF;
+ return 0;
+ }
+ if ((uc >= LK_R_SHIFT) && (uc <= LK_R_ALT)) {
+ /* sometimes swapped. Grrr. */
+ if (++i < dev->keyboard.last_codes_count)
+ goto rpt_char;
+ dev->keyboard.k_ar_state = K_AR_OFF;
+ return 0;
+ }
+ c = uc;
+ }
+
+ /*
+ * Got a char. See if enough time from stroke,
+ * or from last repeat.
+ */
+ t0 = (dev->keyboard.k_ar_state == K_AR_TRIGGER) ? 30 : 500;
+ t = approx_time_in_msec();
+ if ((t - dev->keyboard.last_msec) < t0)
+ return 0;
+
+ dev->keyboard.k_ar_state = K_AR_TRIGGER;
+
+ /*
+ * Simplest thing to do is to mimic lk201
+ */
+ outc = lk201_input(0, LK_REPEAT);
+ if ( ! screen_keypress_event( 0,
+ DEV_KEYBD,
+ c,
+ EVT_BUTTON_UP)) {
+ if (outc > 0) cons_input(0, outc, 0);
+ } else
+ screen_keypress_event( 0,
+ DEV_KEYBD,
+ c,
+ EVT_BUTTON_DOWN);
+ return 0;
+ }
+ default: gimmeabreak();/*fornow*/
+ }
+ return -1;
+ }
+
+ msg_len = msg->code.val.len;
+
+ /* Check for errors */
+ c = msg->body[0];
+ if ((c < DTOP_KBD_KEY_MIN) && (c != DTOP_KBD_EMPTY)) {
+ printf("Keyboard error: %x %x %x..\n", msg_len, c, msg->body[1]);
+ if (c != DTOP_KBD_OUT_ERR) return -1;
+ /* spec sez if scan list overflow still there is data */
+ msg->body[0] = 0;
+ }
+
+ dev->keyboard.last_msec = approx_time_in_msec();
+
+ switch (dev->keyboard.k_ar_state) {
+ case K_AR_IDLE:
+ /* if from debugger, timeouts might not be working yet */
+ if (outc == 0xff)
+ break;
+ dtop_keyboard_autorepeat( dev );
+ /* fall through */
+ case K_AR_TRIGGER:
+ dev->keyboard.k_ar_state = K_AR_ACTIVE;
+ break;
+ case K_AR_ACTIVE:
+ break;
+ case K_AR_OFF: gimmeabreak(); /* ??? */
+ dev->keyboard.k_ar_state = K_AR_IDLE;
+ }
+
+ /*
+ * We can only assume that pressed keys are reported in the
+ * same order (a minimum of sanity, please) across scans.
+ * To make things readable, do a first pass cancelling out
+ * all keys that are still pressed, and a second one generating
+ * events. While generating events, do the upstrokes first
+ * from oldest to youngest, then the downstrokes from oldest
+ * to youngest. This copes with lost packets and provides
+ * a reasonable model even if scans are too slow.
+ */
+
+ /* make a copy of new state first */
+ {
+ register char *p, *q, *e;
+
+ p = save;
+ q = (char*)msg->body;
+ e = (char*)&msg->body[msg_len];
+
+ while (q < e)
+ *p++ = *q++;
+ }
+
+ /*
+ * Do the cancelling pass
+ */
+ {
+ register char *ls, *le, *ns, *ne, *sync;
+
+ ls = (char*)dev->keyboard.last_codes;
+ le = (char*)&dev->keyboard.last_codes[dev->keyboard.last_codes_count];
+ ns = (char*)msg->body;
+ ne = (char*)&msg->body[msg_len];
+
+ /* sync marks where to restart scanning, saving
+ time thanks to ordering constraints */
+ for (sync = ns; ls < le; ls++) {
+ register char c = *ls;
+ for (ns = sync; ns < ne; ns++)
+ if (c == *ns) {
+ *ls = *ns = 0;
+ sync = ns + 1;
+ break;
+ }
+ /* we could already tell if c is an upstroke,
+ but see the above discussion about errors */
+ }
+ }
+ /*
+ * Now generate all upstrokes
+ */
+ {
+ register char *ls, *le;
+ register unsigned char c;
+
+ le = (char*)dev->keyboard.last_codes;
+ ls = (char*)&dev->keyboard.last_codes[dev->keyboard.last_codes_count - 1];
+
+ for ( ; ls >= le; ls--)
+ if (c = *ls) {
+ /* keep kernel notion of lk201 state consistent */
+ (void) lk201_input(0,c);
+
+ if (outc == 0)
+ screen_keypress_event(0,
+ DEV_KEYBD,
+ c,
+ EVT_BUTTON_UP);
+ }
+ }
+ /*
+ * And finally the downstrokes
+ */
+ {
+ register char *ns, *ne, c, retc;
+
+ ne = (char*)msg->body;
+ ns = (char*)&msg->body[msg_len - 1];
+ retc = 0;
+
+ for ( ; ns >= ne; ns--)
+ if (c = *ns) {
+ register unsigned char data;
+
+ data = c;
+ c = lk201_input(0, data);
+
+ if (c == -2) { /* just returned from kdb */
+ /* NOTE: many things have happened while
+ we were sitting on the stack, now it
+ is last_codes that holds the truth */
+#if 1
+ /* But the truth might not be welcome.
+ If we get out because we hit RETURN
+ on the rconsole line all is well,
+ but if we did it from the keyboard
+ we get here on the downstroke. Then
+ we will get the upstroke which we
+ would give to X11. People complained
+ about this extra keypress.. so they
+ lose everything. */
+
+ dev->keyboard.last_codes_count = 1;
+ dev->keyboard.last_codes[0] = 0;
+#endif
+ return -1;
+ }
+
+ /*
+ * If X11 had better code for the keyboard this
+ * would be an EVT_BUTTON_DOWN. But that would
+ * screwup the REPEAT function. Grrr.
+ */
+ /* outc non zero sez we are in the debugger */
+ if (outc == 0) {
+ if (screen_keypress_event(0,
+ DEV_KEYBD,
+ data,
+ EVT_BUTTON_DOWN))
+ c = -1; /* consumed by X */
+ else
+ if (c > 0) cons_input(0, c, 0);
+ }
+ /* return the xlated keycode anyways */
+ if ((c > 0) && (retc == 0))
+ retc = c;
+ }
+ outc = retc;
+ }
+ /* install new scan state */
+ {
+ register char *p, *q, *e;
+
+ p = (char*)dev->keyboard.last_codes;
+ q = (char*)save;
+ e = (char*)&save[msg_len];
+
+ while (q < e)
+ *p++ = *q++;
+ dev->keyboard.last_codes_count = msg_len;
+ }
+ return outc;
+}
+
+/*
+ * Polled operations: we must do autorepeat by hand. Sigh.
+ */
+dtop_keyboard_autorepeat(
+ dtop_device_t dev)
+{
+ spl_t s = spltty();
+
+ if (dev->keyboard.k_ar_state != K_AR_IDLE)
+ dtop_keyboard_handler( dev, 0, DTOP_EVENT_POLL, 0);
+
+ if (dev->keyboard.k_ar_state == K_AR_OFF)
+ dev->keyboard.k_ar_state = K_AR_IDLE;
+ else
+ timeout( dtop_keyboard_autorepeat, dev, dev->keyboard.poll_frequency);
+
+ splx(s);
+}
+
+#endif /*NDTOP>0*/
diff --git a/chips/dtop_hdw.c b/chips/dtop_hdw.c
new file mode 100644
index 00000000..6962d709
--- /dev/null
+++ b/chips/dtop_hdw.c
@@ -0,0 +1,644 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: dtop_hdw.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 1/92
+ *
+ * Hardware-level operations for the Desktop serial line
+ * bus (i2c aka ACCESS).
+ */
+
+#include <dtop.h>
+#if NDTOP > 0
+#include <bm.h>
+#include <platforms.h>
+
+#include <machine/machspl.h> /* spl definitions */
+#include <mach/std_types.h>
+#include <device/io_req.h>
+#include <device/tty.h>
+
+#include <chips/busses.h>
+#include <chips/serial_defs.h>
+#include <chips/screen_defs.h>
+#include <chips/lk201.h>
+#include <mips/PMAX/tc.h>
+
+#include <chips/dtop.h>
+
+#define DTOP_MAX_POLL 0x7fff /* about half a sec */
+
+#ifdef MAXINE
+
+typedef volatile unsigned int *data_reg_t; /* uC */
+#define DTOP_GET_BYTE(data) (((*(data)) >> 8) & 0xff)
+#define DTOP_PUT_BYTE(data,c) { *(data) = (c) << 8; }
+
+typedef volatile unsigned int *poll_reg_t; /* SIR */
+#define DTOP_RX_AVAIL(poll) (*(poll) & 1)
+#define DTOP_TX_AVAIL(poll) (*(poll) & 2)
+
+#else
+
+define how to get/put DTOP packets on this box
+
+#endif
+
+/*
+ * Driver status
+ */
+
+struct dtop_softc {
+ data_reg_t data;
+ poll_reg_t poll;
+ char polling_mode;
+ char probed_once;
+ short bad_pkts;
+
+ struct dtop_ds {
+ int (*handler)(dtop_device_t,
+ dtop_message_t,
+ int,
+ unsigned char);
+ dtop_device status;
+ } device[(DTOP_ADDR_DEFAULT - DTOP_ADDR_FIRST) >> 1];
+
+# define DTOP_DEVICE_NO(address) (((address)-DTOP_ADDR_FIRST)>>1)
+
+} dtop_softc_data[NDTOP];
+
+typedef struct dtop_softc *dtop_softc_t;
+
+dtop_softc_t dtop_softc[NDTOP];
+
+/*
+ * Definition of the driver for the auto-configuration program.
+ */
+
+int dtop_probe(), dtop_intr();
+static void dtop_attach();
+
+vm_offset_t dtop_std[NDTOP] = { 0 };
+struct bus_device *dtop_info[NDTOP];
+struct bus_driver dtop_driver =
+ { dtop_probe, 0, dtop_attach, 0, dtop_std, "dtop", dtop_info,};
+
+
+int dtop_print_debug = 0;
+
+/*
+ * Adapt/Probe/Attach functions
+ */
+
+set_dtop_address( dtopunit, poll_reg)
+ data_reg_t poll_reg;
+{
+ int i;
+
+ extern int dtop_probe(), dtop_param(), dtop_start(),
+ dtop_putc(), dtop_getc(),
+ dtop_pollc(), dtop_mctl(), dtop_softCAR();
+
+ dtop_std[dtopunit] = (vm_offset_t)poll_reg;
+
+ /* Do this here */
+ console_probe = dtop_probe;
+ console_param = dtop_param;
+ console_start = dtop_start;
+ console_putc = dtop_putc;
+ console_getc = dtop_getc;
+ console_pollc = dtop_pollc;
+ console_mctl = dtop_mctl;
+ console_softCAR = dtop_softCAR;
+
+}
+
+dtop_probe( data_reg, ui)
+ data_reg_t data_reg;
+ struct bus_device *ui;
+{
+ int dtopunit = ui->unit, i;
+ dtop_softc_t dtop;
+
+ dtop = &dtop_softc_data[dtopunit];
+ dtop_softc[dtopunit] = dtop;
+
+ dtop->poll = (poll_reg_t)dtop_std[dtopunit];
+ dtop->data = data_reg;
+
+ for (i = 0; i < DTOP_MAX_DEVICES; i++)
+ dtop->device[i].handler = dtop_null_device_handler;
+
+ /* a lot more needed here, fornow: */
+ dtop->device[DTOP_DEVICE_NO(0x6a)].handler = dtop_locator_handler;
+ dtop->device[DTOP_DEVICE_NO(0x6a)].status.locator.type =
+ DEV_MOUSE;
+ dtop->device[DTOP_DEVICE_NO(0x6a)].status.locator.relative =
+ 1;
+ dtop->device[DTOP_DEVICE_NO(0x6a)].status.locator.button_code[0] =
+ KEY_LEFT_BUTTON;
+ dtop->device[DTOP_DEVICE_NO(0x6a)].status.locator.button_code[1] =
+ KEY_RIGHT_BUTTON;
+ dtop->device[DTOP_DEVICE_NO(0x6a)].status.locator.button_code[2] =
+ KEY_MIDDLE_BUTTON;
+ dtop->device[DTOP_DEVICE_NO(0x6a)].status.locator.n_coords =
+ 2;
+
+ dtop->device[DTOP_DEVICE_NO(0x6c)].handler = dtop_keyboard_handler;
+ dtop->device[DTOP_DEVICE_NO(0x6c)].status.keyboard.poll_frequency =
+ (hz * 5) / 100; /* x0.01 secs */
+ dtop->device[DTOP_DEVICE_NO(0x6c)].status.keyboard.bell_volume =
+ DTOP_CLICK_VOLUME_MAX;
+
+ return 1;
+}
+
+static void
+dtop_attach(ui)
+ struct bus_device *ui;
+{
+ int i;
+
+ /* Initialize all the console ttys */
+ for (i = 0; i < 4; i++)
+ ttychars(console_tty[i]);
+ /* Mark keyboard and mouse present */
+ for (i = 0; i < 2; i++)
+ console_tty[i]->t_addr = (char*)1;
+}
+
+/*
+ * Polled I/O (debugger)
+ */
+dtop_pollc(unit, on)
+ boolean_t on;
+{
+ dtop_softc_t dtop;
+
+ dtop = dtop_softc[unit];
+ if (on) {
+ dtop->polling_mode++;
+#if NBM > 0
+ screen_on_off(unit, TRUE);
+#endif NBM > 0
+ } else
+ dtop->polling_mode--;
+}
+
+/*
+ * Interrupt routine
+ */
+dtop_intr (unit, spllevel, recvd)
+ spl_t spllevel;
+ boolean_t recvd;
+{
+
+ if (recvd) {
+ dtop_message msg;
+ int devno;
+ dtop_softc_t dtop;
+
+ ssaver_bump(unit);
+
+#ifdef mips
+ splx(spllevel);
+#endif
+
+ dtop = dtop_softc[unit];
+ if (dtop_get_packet(dtop, &msg) < 0) {
+ if (dtop_print_debug)
+ printf("%s", "dtop: overrun (or stray)\n");
+ return;
+ }
+
+ devno = DTOP_DEVICE_NO(msg.src_address);
+ if (devno < 0 || devno > 15) return; /* sanity */
+
+ (void) (*dtop->device[devno].handler)
+ (&dtop->device[devno].status, &msg,
+ DTOP_EVENT_RECEIVE_PACKET, 0);
+
+ } else {
+ /* fornow xmit is not intr based */
+ (*tc_enable_interrupt)( dtop_info[unit]->adaptor, FALSE, TRUE);
+ }
+}
+
+boolean_t
+dtop_start(tp)
+ struct tty *tp;
+{
+ register int line, temp;
+
+ /* no, we do not need a char out first */
+ return FALSE;
+}
+
+dtop_w_test(n, a,b,c,d,e,f,g,h)
+{
+ int *p = (int*)0xbc2a0000;
+
+ if (n <= 0) return;
+
+ a <<= 8; *p = a;
+ if (--n == 0) goto out;
+ delay(20);
+ b <<= 8; *p = b;
+ if (--n == 0) goto out;
+ delay(20);
+ c <<= 8; *p = c;
+ if (--n == 0) goto out;
+ delay(20);
+ d <<= 8; *p = d;
+ if (--n == 0) goto out;
+ delay(20);
+ e <<= 8; *p = e;
+ if (--n == 0) goto out;
+ delay(20);
+ f <<= 8; *p = f;
+ if (--n == 0) goto out;
+ delay(20);
+ g <<= 8; *p = g;
+ if (--n == 0) goto out;
+ delay(20);
+ h <<= 8; *p = h;
+out:
+ delay(10000);
+ {
+ int buf[100];
+
+ delay(20);
+ a = *p;
+ buf[0] = a;
+ c = 1;
+ for (n = 0; n < 100; n++) {
+ delay(20);
+ b = *p;
+ if (b != a) {
+ buf[c++] = b;
+ b = a;
+ }
+ }
+ for (n = 0; n < c; n++)
+ db_printf("%x ", ((buf[n])>>8)&0xff);
+ }
+ return c;
+}
+
+/*
+ * Take a packet off dtop interface
+ * A packet MUST be there, this is not checked for.
+ */
+#define DTOP_ESC_CHAR 0xf8
+dtop_escape(c)
+{
+ /* I donno much about this stuff.. */
+ switch (c) {
+ case 0xe8: return 0xf8;
+ case 0xe9: return 0xf9;
+ case 0xea: return 0xfa;
+ case 0xeb: return 0xfb;
+ default: /* printf("{esc %x}", c); */
+ return c;
+ }
+}
+
+dtop_get_packet(dtop, pkt)
+ dtop_softc_t dtop;
+ dtop_message_t pkt;
+{
+ register poll_reg_t poll;
+ register data_reg_t data;
+ register int max, i, len;
+ register unsigned char c;
+
+ poll = dtop->poll;
+ data = dtop->data;
+
+ /*
+ * The interface does not handle us the first byte,
+ * which is our address and cannot ever be anything
+ * else but 0x50. This is a good thing, it makes
+ * the average packet exactly one word long, too.
+ */
+ pkt->src_address = DTOP_GET_BYTE(data);
+
+ for (max = 0; (max < DTOP_MAX_POLL) && !DTOP_RX_AVAIL(poll); max++)
+ delay(16);
+ if (max == DTOP_MAX_POLL) goto bad;
+ pkt->code.bits = DTOP_GET_BYTE(data);
+
+ /*
+ * Now get data and checksum
+ */
+ len = pkt->code.val.len + 1;
+ c = 0;
+ for (i = 0; i < len; i++) {
+
+again: for (max = 0; (max < DTOP_MAX_POLL) && !DTOP_RX_AVAIL(poll); max++)
+ delay(16);
+ if (max == DTOP_MAX_POLL) goto bad;
+ if (c == DTOP_ESC_CHAR) {
+ c = dtop_escape(DTOP_GET_BYTE(data) & 0xff);
+ } else {
+ c = DTOP_GET_BYTE(data);
+ if (c == DTOP_ESC_CHAR)
+ goto again;
+ }
+
+ pkt->body[i] = c;
+ }
+ return len;
+bad:
+ dtop->bad_pkts++;
+ return -1;
+}
+
+/* Conversely... */
+dtop_put_packet(dtop, pkt)
+ dtop_softc_t dtop;
+ dtop_message_t pkt;
+{
+ register int i, max;
+ register unsigned char *cp;
+ register unsigned int spl;
+ register unsigned char c;
+
+ spl = spltty();
+ pkt->src_address = pkt->dest_address;
+ i = 0;
+ cp = (unsigned char *)&pkt->src_address;
+ while (i < pkt->code.val.len + 2) {
+ for (max = 0; max < DTOP_MAX_POLL && !DTOP_TX_AVAIL(dtop->poll);
+ max++);
+ if (max == DTOP_MAX_POLL)
+ goto bad;
+ DTOP_PUT_BYTE(dtop->data, *cp);
+ cp++;
+ i++;
+ }
+ for (max = 0; (max < DTOP_MAX_POLL) && !DTOP_RX_AVAIL(dtop->poll); max++)
+ delay(16);
+ if (max == DTOP_MAX_POLL)
+ goto bad;
+ c = DTOP_GET_BYTE(dtop->data);
+ if (c == DTOP_ESC_CHAR) {
+ for (max = 0; (max < DTOP_MAX_POLL)
+ && !DTOP_RX_AVAIL(dtop->poll); max++)
+ delay(16);
+ if (max == DTOP_MAX_POLL)
+ goto bad;
+ c = DTOP_GET_BYTE(dtop->data);
+ }
+ splx(spl);
+ switch (c) {
+ case 0xfb: /* XMT, ok */
+ break;
+ default:
+ return 0;
+ }
+ return 1;
+ bad:
+ splx(spl);
+ return 0;
+}
+
+
+/*
+ * Get a char from a specific DTOP line
+ * [this is only used for console&screen purposes]
+ */
+dtop_getc( unit, line, wait, raw )
+ boolean_t wait;
+ boolean_t raw;
+{
+ register int c;
+ dtop_softc_t dtop;
+
+ dtop = dtop_softc[unit];
+again:
+ c = -1;
+
+ /*
+ * Try rconsole first
+ */
+ if (rcline && line == SCREEN_LINE_KEYBOARD) {
+ c = scc_getc( 0, rcline, FALSE, raw);
+ if (c != -1) return c;
+ }
+
+ /*
+ * Now check keyboard
+ */
+ if (DTOP_RX_AVAIL(dtop->poll)) {
+
+ dtop_message msg;
+ struct dtop_ds *ds;
+
+ if (dtop_get_packet(dtop, &msg) >= 0) {
+
+ ds = &dtop->device[DTOP_DEVICE_NO(msg.src_address)];
+ if (ds->handler == dtop_keyboard_handler) {
+
+ c = dtop_keyboard_handler(
+ &ds->status, &msg,
+ DTOP_EVENT_RECEIVE_PACKET, -1);
+
+ if (c > 0) return c;
+
+ c = -1;
+ }
+ }
+ }
+
+ if (wait && (c == -1)) {
+ delay(100);
+ goto again;
+ }
+
+ return c;
+}
+
+/*
+ * Put a char on a specific DTOP line
+ */
+dtop_putc( unit, line, c )
+{
+ if (rcline && line == rcline) {
+ scc_putc(0, rcline, c);
+ }
+/* dprintf("%c", c); */
+}
+
+dtop_param(tp, line)
+ struct tty *tp;
+{
+ if (tp->t_ispeed == 0)
+ ttymodem(tp, 0);
+ else
+ /* called too early to invoke ttymodem, sigh */
+ tp->t_state |= TS_CARR_ON;
+}
+
+/*
+ * Modem control functions, we don't need 'em
+ */
+dtop_mctl(dev, bits, how)
+ int dev;
+ int bits, how;
+{
+ return 0;
+}
+
+dtop_softCAR(unit, line, on)
+{
+}
+
+/* Some keyboard specific stuff, probably belongs elsewhere */
+
+dtop_kbd_probe(unit)
+{
+ if (dtop_std[unit]) {
+ lk201_probe(unit);
+ return 1;
+ }
+ return 0;
+}
+
+io_return_t
+dtop_set_status(unit, flavor, status, status_count)
+ int unit;
+ int flavor;
+ dev_status_t status;
+ unsigned int status_count;
+{
+ dtop_device_t dev;
+
+ dev = &dtop_softc[unit]->device[DTOP_DEVICE_NO(0x6c)].status;
+
+ switch (flavor) {
+ case LK201_SEND_CMD: {
+ register lk201_cmd_t *cmd = (lk201_cmd_t *)status;
+ unsigned int cnt;
+
+ if ((status_count < (sizeof(*cmd)/sizeof(int))) ||
+ ((cnt = cmd->len) > 2))
+ return D_INVALID_SIZE;
+ switch (cmd->command) {
+ case LK_CMD_ENB_BELL:
+ cmd->params[0] ^= 0x7;
+ if (dtop_print_debug)
+ printf("LK_CMD_ENB_BELL %d\n", cmd->params[0]);
+ dev->keyboard.bell_volume = cmd->params[0] & 0x7;
+ break;
+ case LK_CMD_DIS_BELL:
+ dev->keyboard.bell_volume = 0;
+ break;
+ case LK_CMD_BELL:
+ dtop_ring_bell(unit);
+ break;
+ case LK_CMD_LEDS_ON:
+ cmd->params[0] &= ~0x80;
+ if (dtop_print_debug)
+ printf("LK_CMD_LEDS_ON %d %x\n",
+ cmd->params[0], cmd->params[0]);
+ dev->keyboard.led_status |= cmd->params[0];
+ dtop_leds(unit, dev->keyboard.led_status);
+ break;
+ case LK_CMD_LEDS_OFF:
+ cmd->params[0] &= ~0x80;
+ dev->keyboard.led_status &= ~cmd->params[0];
+ dtop_leds(unit, dev->keyboard.led_status);
+ break;
+ case LK_CMD_ENB_KEYCLK:
+ case LK_CMD_DIS_KEYCLK:
+ case LK_CMD_SOUND_CLK:
+ case LK_CMD_DIS_CTLCLK:
+ case LK_CMD_ENB_CTLCLK:
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return lk201_set_status(unit, flavor, status, status_count);
+}
+
+dtop_kbd_reset(unit)
+{
+ return lk201_reset(unit);
+}
+
+#define DTOP_BITS(p, len) (((p) << 7) | (len))
+
+dtop_ring_bell(unit)
+{
+ dtop_message msg;
+ dtop_device_t dev;
+ int vol;
+
+ dev = &dtop_softc[unit]->device[DTOP_DEVICE_NO(0x6c)].status;
+ vol = dev->keyboard.bell_volume;
+
+ if (dtop_print_debug)
+ printf("dtop_ring_bell: %d\n", vol);
+ msg.dest_address = DTOP_ADDR_KBD;
+ msg.code.bits = DTOP_BITS(1, 2);
+ msg.body[0] = DTOP_KMSG_BELL;
+ msg.body[1] = vol;
+ if (!dtop_put_packet(dtop_softc[unit], &msg)) {
+ if (dtop_print_debug)
+ printf("dtop_ring_bell: dtop_put_packet failed\n");
+ return -1;
+ }
+ return 0;
+}
+
+dtop_leds(unit, mask)
+{
+ dtop_message msg;
+
+ if (dtop_print_debug)
+ printf("dtop_leds %x\n", mask);
+ msg.dest_address = DTOP_ADDR_KBD;
+ msg.code.bits = DTOP_BITS(1, 2);
+ msg.body[0] = DTOP_KMSG_LED;
+ msg.body[1] = mask;
+ if (!dtop_put_packet(dtop_softc[unit], &msg)) {
+ if (dtop_print_debug)
+ printf("dtop_leds: dtop_put_packet failed\n");
+ return -1;
+ }
+ return 0;
+}
+
+
+
+#endif NDTOP > 0
diff --git a/chips/dz_7085.h b/chips/dz_7085.h
new file mode 100644
index 00000000..66c8916b
--- /dev/null
+++ b/chips/dz_7085.h
@@ -0,0 +1,153 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: dz_7085.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Defines for the DEC 7085 Serial Line Controller Chip
+ */
+
+#define NDZ_LINE 4
+
+/*
+ * What's hanging off those 4 lines
+ */
+
+#define DZ_LINE_KEYBOARD 0
+#define DZ_LINE_MOUSE 1
+#define DZ_LINE_MODEM 2
+#define DZ_LINE_PRINTER 3
+
+/*
+ * Register layout, ignoring padding
+ */
+typedef struct {
+ volatile unsigned short dz_csr; /* Control and Status */
+ volatile unsigned short dz_rbuf; /* Rcv buffer (RONLY) */
+ volatile unsigned short dz_tcr; /* Xmt control (R/W)*/
+ volatile unsigned short dz_tbuf; /* Xmt buffer (WONLY)*/
+# define dz_lpr dz_rbuf /* Line parameters (WONLY)*/
+# define dz_msr dz_tbuf /* Modem status (RONLY)*/
+} dz_regmap_t;
+
+/*
+ * CSR bits
+ */
+
+#define DZ_CSR_MBZ 0x3c07 /* Must be zero */
+#define DZ_CSR_MAINT 0x0008 /* rw: Maintenance mode */
+#define DZ_CSR_CLR 0x0010 /* rw: Master clear (init) */
+#define DZ_CSR_MSE 0x0020 /* rw: Master scan enable */
+#define DZ_CSR_RIE 0x0040 /* rw: Rcv Interrupt Enable */
+#define DZ_CSR_RDONE 0x0080 /* ro: Rcv done (silo avail) */
+#define DZ_CSR_TLINE 0x0300 /* ro: Lineno ready for xmt */
+#define DZ_CSR_TIE 0x4000 /* rw: Xmt Interrupt Enable */
+#define DZ_CSR_TRDY 0x8000 /* ro: Xmt ready */
+
+/*
+ * Receiver buffer (top of silo). Read-only.
+ */
+
+#define DZ_SILO_DEEP 64
+
+#define DZ_RBUF_CHAR 0x00ff /* Received character */
+#define DZ_RBUF_RLINE 0x0300 /* Line it came from */
+#define DZ_RBUF_XXXX 0x0c00 /* Reads as zero */
+#define DZ_RBUF_PERR 0x1000 /* Parity error */
+#define DZ_RBUF_FERR 0x2000 /* Framing error (break) */
+#define DZ_RBUF_OERR 0x4000 /* Silo overrun */
+#define DZ_RBUF_VALID 0x8000 /* Info is valid */
+
+/*
+ * Line parameters register. Write-only.
+ */
+
+#define DZ_LPAR_LINE 0x0003 /* Bin encoded line no */
+#define DZ_LPAR_MBZ 0xe004 /* Must be zero */
+#define DZ_LPAR_CLEN 0x0018 /* Character length: */
+# define DZ_LPAR_5BITS 0x0000 /* 5 bits per char */
+# define DZ_LPAR_6BITS 0x0008 /* 6 bits per char */
+# define DZ_LPAR_7BITS 0x0010 /* 7 bits per char */
+# define DZ_LPAR_8BITS 0x0018 /* 8 bits per char */
+#define DZ_LPAR_STOP 0x0020 /* stop bits: off->1, on->2 */
+#define DZ_LPAR_PAR_ENB 0x0040 /* generate/detect parity */
+#define DZ_LPAR_ODD_PAR 0x0080 /* generate/detect ODD parity */
+#define DZ_LPAR_SPEED 0x0f00 /* Speed code: */
+# define DZ_LPAR_50 0x0000 /* 50 baud */
+# define DZ_LPAR_75 0x0100 /* 75 baud */
+# define DZ_LPAR_110 0x0200 /* 110 baud */
+# define DZ_LPAR_134_5 0x0300 /* 134.5 baud */
+# define DZ_LPAR_150 0x0400 /* 150 baud */
+# define DZ_LPAR_300 0x0500 /* 300 baud */
+# define DZ_LPAR_600 0x0600 /* 600 baud */
+# define DZ_LPAR_1200 0x0700 /* 1200 baud */
+# define DZ_LPAR_1800 0x0800 /* 1800 baud */
+# define DZ_LPAR_2000 0x0900 /* 2000 baud */
+# define DZ_LPAR_2400 0x0a00 /* 2400 baud */
+# define DZ_LPAR_3600 0x0b00 /* 3600 baud */
+# define DZ_LPAR_4800 0x0c00 /* 4800 baud */
+# define DZ_LPAR_7200 0x0d00 /* 7200 baud */
+# define DZ_LPAR_9600 0x0e00 /* 9600 baud */
+# define DZ_LPAR_MAX_SPEED 0x0f00 /* 19200/38400 baud */
+#define DZ_LPAR_ENABLE 0x1000 /* Enable receiver */
+
+/*
+ * Xmt control register
+ */
+
+#define DZ_TCR_LNENB 0x000f /* rw: Xmt line enable */
+#define DZ_TCR_MBZ 0xf0f0 /* Must be zero */
+#define DZ_TCR_DTR3 0x0100 /* rw: DTR on printer line */
+#define DZ_TCR_RTS3 0x0200 /* rw: RTS on printer line */
+#define DZ_TCR_DTR2 0x0400 /* rw: DTR on modem line */
+#define DZ_TCR_RTS2 0x0800 /* rw: RTS on modem line */
+
+/*
+ * Modem status register. Read-only.
+ */
+
+#define DZ_MSR_CTS3 0x0001 /* Clear To Send, printer line */
+#define DZ_MSR_DSR3 0x0002 /* Data Set Ready, printer line */
+#define DZ_MSR_CD3 0x0004 /* Carrier Detect, printer line */
+#define DZ_MSR_RI3 0x0008 /* Ring Indicator, printer line */
+#define DZ_MSR_XXXX 0xf0f0 /* Reads as zero */
+#define DZ_MSR_CTS2 0x0100 /* Clear To Send, modem line */
+#define DZ_MSR_DSR2 0x0200 /* Data Set Ready, modem line */
+#define DZ_MSR_CD2 0x0400 /* Carrier Detect, modem line */
+#define DZ_MSR_RI2 0x0800 /* Ring Indicator, modem line */
+
+
+/*
+ * Xmt buffer
+ */
+
+#define DZ_TBUF_CHAR 0x00ff /* Xmt character */
+#define DZ_TBUF_BREAK_0 0x0100 /* set line 0 to space */
+#define DZ_TBUF_BREAK_1 0x0200 /* set line 1 to space */
+#define DZ_TBUF_BREAK_2 0x0400 /* set line 2 to space */
+#define DZ_TBUF_BREAK_3 0x0800 /* set line 3 to space */
+#define DZ_TBUF_MBZ 0xf000 /* Must be zero */
diff --git a/chips/dz_defs.h b/chips/dz_defs.h
new file mode 100644
index 00000000..7c792bb9
--- /dev/null
+++ b/chips/dz_defs.h
@@ -0,0 +1,65 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: dz_defs.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Internal definitions for the DZ Serial Line Driver
+ */
+
+#include <mach/std_types.h>
+#include <chips/busses.h>
+
+#include <kern/time_out.h>
+#include <sys/syslog.h>
+
+#include <device/io_req.h>
+#include <device/conf.h>
+#include <device/tty.h>
+#include <device/errno.h>
+
+#include <chips/dz_7085.h>
+
+extern struct tty *dz_tty[];
+
+extern struct pseudo_dma {
+ dz_regmap_t *p_addr;
+ char *p_mem;
+ char *p_end;
+ int p_arg;
+ int (*p_fcn)();
+} dz_pdma[];
+
+extern int rcline, cnline;
+extern int console;
+
+/*
+ * Modem control operations on DZ lines
+ */
+
+extern unsigned dz_mctl(/* int, int, int */);
+
diff --git a/chips/dz_hdw.c b/chips/dz_hdw.c
new file mode 100644
index 00000000..fa472828
--- /dev/null
+++ b/chips/dz_hdw.c
@@ -0,0 +1,649 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: dz_hdw.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Hardware-level operations for the DZ Serial Line Driver
+ */
+
+#include <dz_.h>
+#if NDZ_ > 0
+#include <bm.h>
+#include <platforms.h>
+
+#include <mach_kdb.h>
+
+#include <machine/machspl.h> /* spl definitions */
+#include <device/io_req.h>
+#include <device/tty.h>
+
+#include <chips/busses.h>
+#include <chips/screen_defs.h>
+#include <chips/serial_defs.h>
+
+#include <chips/dz_7085.h>
+
+
+#ifdef DECSTATION
+#include <mips/mips_cpu.h>
+#include <mips/PMAX/kn01.h>
+#define DZ_REGS_DEFAULT (vm_offset_t)PHYS_TO_K1SEG(KN01_SYS_DZ)
+#define PAD(n) char n[6];
+#endif /*DECSTATION*/
+
+#ifdef VAXSTATION
+#define DZ_REGS_DEFAULT 0
+#define wbflush()
+#define check_memory(addr,dow) ((dow) ? wbadaddr(addr,4) : badaddr(addr,4))
+#define PAD(n) char n[2];
+#endif /*VAXSTATION*/
+
+#ifndef PAD
+#define PAD(n)
+#endif
+
+typedef struct {
+ volatile unsigned short dz_csr; /* Control and Status */
+ PAD(pad0)
+ volatile unsigned short dz_rbuf; /* Rcv buffer (RONLY) */
+ PAD(pad1)
+ volatile unsigned short dz_tcr; /* Xmt control (R/W)*/
+ PAD(pad2)
+ volatile unsigned short dz_tbuf; /* Xmt buffer (WONLY)*/
+# define dz_lpr dz_rbuf /* Line parameters (WONLY)*/
+# define dz_msr dz_tbuf /* Modem status (RONLY)*/
+ PAD(pad3)
+} dz_padded_regmap_t;
+
+
+/* this is ok both for rcv (char) and xmt (csr) */
+#define LINEOF(x) (((x) >> 8) & 0x3)
+
+/*
+ * Driver status
+ */
+struct dz7085_softc {
+ dz_padded_regmap_t *regs;
+ unsigned short breaks;
+ unsigned short fake; /* missing rs232 bits */
+ int polling_mode;
+ unsigned short prev_msr;
+ char softCAR;
+} dz7085_softc_data[NDZ_];
+
+typedef struct dz7085_softc *dz7085_softc_t;
+
+dz7085_softc_t dz7085_softc[NDZ_];
+
+static void check_car();
+static void check_ring();
+
+dz7085_softCAR(unit, line, on)
+{
+ if (on)
+ dz7085_softc[unit]->softCAR |= 1<<line;
+ else
+ dz7085_softc[unit]->softCAR &= ~(1 << line);
+}
+
+static
+short dz7085_speeds[] =
+ { 0, DZ_LPAR_50, DZ_LPAR_75, DZ_LPAR_110, DZ_LPAR_134_5, DZ_LPAR_150,
+ 0, DZ_LPAR_300, DZ_LPAR_600, DZ_LPAR_1200, DZ_LPAR_1800, DZ_LPAR_2400,
+ DZ_LPAR_4800, DZ_LPAR_9600, DZ_LPAR_MAX_SPEED, 0 };
+
+
+/*
+ * Definition of the driver for the auto-configuration program.
+ */
+
+int dz7085_probe(), dz7085_intr();
+static void dz7085_attach();
+
+vm_offset_t dz7085_std[NDZ_] = { DZ_REGS_DEFAULT, };
+struct bus_device *dz7085_info[NDZ_];
+struct bus_driver dz_driver =
+ { dz7085_probe, 0, dz7085_attach, 0, dz7085_std, "dz", dz7085_info,};
+
+/*
+ * Adapt/Probe/Attach functions
+ */
+
+static boolean_t dz7085_full_modem = FALSE;
+boolean_t dz7085_uses_modem_control = FALSE;/* patch this with adb */
+
+set_dz_address( unit, regs, has_modem)
+ vm_offset_t regs;
+ boolean_t has_modem;
+{
+ extern int dz7085_probe(), dz7085_param(), dz7085_start(),
+ dz7085_putc(), dz7085_getc(),
+ dz7085_pollc(), dz7085_mctl(), dz7085_softCAR();
+
+ dz7085_std[unit] = regs;
+ dz7085_full_modem = has_modem & dz7085_uses_modem_control;
+
+ /* Do this here */
+ console_probe = dz7085_probe;
+ console_param = dz7085_param;
+ console_start = dz7085_start;
+ console_putc = dz7085_putc;
+ console_getc = dz7085_getc;
+ console_pollc = dz7085_pollc;
+ console_mctl = dz7085_mctl;
+ console_softCAR = dz7085_softCAR;
+
+}
+
+dz7085_probe( xxx, ui)
+ struct bus_device *ui;
+{
+ int unit = ui->unit;
+ dz7085_softc_t sc;
+ register int cntr;
+ register dz_padded_regmap_t *regs;
+
+ static int probed_once = 0;
+
+ regs = (dz_padded_regmap_t *)dz7085_std[unit]; /* like the old days! */
+ if (regs == 0)
+ return 0;
+ /*
+ * If this is not there we are toast
+ */
+ if (check_memory(regs, 0))
+ return 0;
+
+ if (probed_once++)
+ return 1;
+
+ sc = &dz7085_softc_data[unit];
+ dz7085_softc[unit] = sc;
+ sc->regs = regs;
+
+ for (cntr = unit*NDZ_LINE; cntr < NDZ_LINE*(unit+1); cntr++) {
+ console_tty[cntr]->t_addr = (char*)regs;
+ console_tty[cntr]->t_state |= TS_MIN;
+ }
+
+ /* pmaxen et al. lack many modem bits */
+ dz7085_set_modem_control(sc, dz7085_full_modem);
+
+ regs->dz_tcr = 0;/* disable all lines, drop RTS,DTR */
+ return 1;
+}
+
+boolean_t dz7085_timer_started = FALSE;
+
+static void
+dz7085_attach(ui)
+ register struct bus_device *ui;
+{
+ int unit = ui->unit;
+ extern dz7085_scan();
+ extern int tty_inq_size;
+ int i;
+
+ /* We only have 4 ttys, but always at 9600
+ * Give em a lot of room
+ */
+ tty_inq_size = 2048;
+ for (i = 0; i < (NDZ_*NDZ_LINE); i++)
+ ttychars(console_tty[i]);
+
+ if (!dz7085_timer_started) {
+ dz7085_timer_started = TRUE;
+ dz7085_scan();
+ }
+
+#if NBM > 0
+ if (SCREEN_ISA_CONSOLE()) {
+ printf("\n sl0: "); lk201_attach(0, unit);
+ printf("\n sl1: "); mouse_attach(0, unit);
+ printf("\n sl2: \n sl3: ");
+ if (rcline == 3) printf("( rconsole )");
+ } else {
+#endif /*NBM > 0*/
+ printf("\n sl0:\n sl1:\n sl2:\n sl3: ( alternate console )");
+#if NBM > 0
+ }
+#endif
+}
+
+/*
+ * Would you like to make a phone call ?
+ */
+dz7085_set_modem_control(sc, on)
+ dz7085_softc_t sc;
+ boolean_t on;
+{
+ if (on)
+ /* your problem if the hardware then is broke */
+ sc->fake = 0;
+ else
+ sc->fake = DZ_MSR_CTS3|DZ_MSR_DSR3|DZ_MSR_CD3|
+ DZ_MSR_CTS2|DZ_MSR_CD2;
+}
+
+/*
+ * Polled I/O (debugger)
+ */
+dz7085_pollc(unit, on)
+ boolean_t on;
+{
+ dz7085_softc_t sc = dz7085_softc[unit];
+
+ if (on) {
+ sc->polling_mode++;
+#if NBM > 0
+ screen_on_off(unit, TRUE);
+#endif NBM > 0
+ } else
+ sc->polling_mode--;
+}
+
+/*
+ * Interrupt routine
+ */
+dz_intr(unit,spllevel)
+ spl_t spllevel;
+{
+ dz7085_softc_t sc = dz7085_softc[unit];
+ register dz_padded_regmap_t *regs = sc->regs;
+ register short csr;
+
+ csr = regs->dz_csr;
+
+ if (csr & DZ_CSR_TRDY) {
+ register int c;
+
+ c = cons_simple_tint(unit*NDZ_LINE + LINEOF(csr), FALSE);
+ if (c == -1) {
+ /* no more data for this line */
+ regs->dz_tcr &= ~(1 << LINEOF(csr));
+ c = cons_simple_tint(unit*NDZ_LINE + LINEOF(csr), TRUE);
+ /* because funny race possible ifnot */
+ }
+ if (c != -1) {
+ regs->dz_tbuf = (c & 0xff) | sc->breaks;
+ /* and leave it enabled */
+ }
+ }
+ if (sc->polling_mode)
+ return;
+
+ while (regs->dz_csr & DZ_CSR_RDONE) {
+ short c = regs->dz_rbuf;
+ spl_t oldspl;
+
+#ifdef DECSTATION
+ oldspl = splhigh();
+ splx(spllevel);
+#endif /*DECSTATION*/
+ cons_simple_rint(unit*NDZ_LINE+LINEOF(c), LINEOF(c),
+ c&0xff, c&0xff00);
+#ifdef DECSTATION
+ splx(oldspl);
+#endif /*DECSTATION*/
+ }
+}
+
+/*
+ * Start transmission on a line
+ */
+dz7085_start(tp)
+ struct tty *tp;
+{
+ register dz_padded_regmap_t *regs;
+ register int line;
+
+ line = tp->t_dev;
+
+ regs = (dz_padded_regmap_t*)tp->t_addr;
+ regs->dz_tcr |= (1<<(line&3));
+
+ /* no, we do not need a char out to interrupt */
+}
+
+/*
+ * Get a char from a specific DZ line
+ */
+dz7085_getc( unit, line, wait, raw )
+ boolean_t wait;
+ boolean_t raw;
+{
+ dz7085_softc_t sc = dz7085_softc[unit];
+ spl_t s = spltty();
+ register dz_padded_regmap_t *regs = sc->regs;
+ unsigned short c;
+ int rl;
+
+again:
+ /*
+ * wait till something in silo
+ */
+ while ((regs->dz_csr & DZ_CSR_RDONE) == 0 && wait)
+ delay(10);
+ c = regs->dz_rbuf;
+
+ /*
+ * check if right line. For keyboard, rconsole is ok too
+ */
+ rl = LINEOF(c);
+ if (wait && (line != rl) &&
+ !((line == DZ_LINE_KEYBOARD) && rcline == rl))
+ goto again;
+ /*
+ * bad chars not ok
+ */
+ if ((c & (DZ_RBUF_PERR | DZ_RBUF_OERR | DZ_RBUF_FERR)) && wait)
+ goto again;
+
+ splx(s);
+
+ /*
+ * if nothing found return -1
+ */
+ if ( ! (c & DZ_RBUF_VALID))
+ return -1;
+
+#if NBM > 0
+ if ((rl == DZ_LINE_KEYBOARD) && !raw && SCREEN_ISA_CONSOLE())
+ return lk201_rint(SCREEN_CONS_UNIT(), c, wait, sc->polling_mode);
+ else
+#endif NBM > 0
+ return c & DZ_RBUF_CHAR;
+}
+
+/*
+ * Put a char on a specific DZ line
+ */
+dz7085_putc( unit, line, c )
+{
+ dz7085_softc_t sc = dz7085_softc[unit];
+ register dz_padded_regmap_t *regs = sc->regs;
+ spl_t s = spltty();
+
+ /*
+ * do not change the break status of other lines
+ */
+ c = (c & 0xff) | sc->breaks;
+
+ /*
+ * Xmit line info only valid if TRDY,
+ * but never TRDY if no xmit enabled
+ */
+ if ((regs->dz_tcr & DZ_TCR_LNENB) == 0)
+ goto select_it;
+
+ while ((regs->dz_csr & DZ_CSR_TRDY) == 0)
+ delay(100);
+
+ /*
+ * see if by any chance we are already on the right line
+ */
+ if (LINEOF(regs->dz_csr) == line)
+ regs->dz_tbuf = c;
+ else {
+ unsigned short tcr;
+select_it:
+ tcr = regs->dz_tcr;
+ regs->dz_tcr = (1 << line) | (tcr & 0xff00);
+ wbflush();
+
+ do
+ delay(2);
+ while ((regs->dz_csr & DZ_CSR_TRDY) == 0 ||
+ (LINEOF(regs->dz_csr) != line));
+
+ regs->dz_tbuf = c;
+ wbflush();
+
+ /* restore previous settings */
+ regs->dz_tcr = tcr;
+ }
+
+ splx(s);
+}
+
+
+dz7085_param(tp, line)
+ register struct tty *tp;
+ register int line;
+{
+ register dz_padded_regmap_t *regs;
+ register int lpr;
+
+ line = tp->t_dev;
+ regs = dz7085_softc[line/NDZ_LINE]->regs;
+
+ /*
+ * Do not let user fool around with kbd&mouse
+ */
+#if NBM > 0
+ if (screen_captures(line)) {
+ tp->t_ispeed = tp->t_ospeed = B4800;
+ tp->t_flags |= TF_LITOUT;
+ }
+#endif NBM > 0
+ regs->dz_csr = DZ_CSR_MSE|DZ_CSR_RIE|DZ_CSR_TIE;
+ if (tp->t_ispeed == 0) {
+ (void) (*console_mctl)(tp->t_dev, TM_HUP, DMSET); /* hang up line */
+ return;
+ }
+/* 19200/38400 here */
+ lpr = dz7085_speeds[tp->t_ispeed] | (line&DZ_LPAR_LINE) | DZ_LPAR_ENABLE;
+ lpr |= DZ_LPAR_8BITS;
+
+ if ((tp->t_flags & (TF_ODDP|TF_EVENP)) == TF_ODDP)
+ lpr |= DZ_LPAR_ODD_PAR;
+
+ if (tp->t_ispeed == B110)
+ lpr |= DZ_LPAR_STOP;
+ regs->dz_lpr = lpr;
+}
+
+/*
+ * This is a total mess: not only are bits spread out in
+ * various registers, but we have to fake some for pmaxen.
+ */
+dz7085_mctl(dev, bits, how)
+ int dev;
+ int bits, how;
+{
+ register dz_padded_regmap_t *regs;
+ register int unit;
+ register int tcr, msr, brk, n_tcr, n_brk;
+ int b;
+ spl_t s;
+ dz7085_softc_t sc;
+
+ unit = dev;
+
+ /* no modem support on lines 0 & 1 */
+/* XXX break on 0&1 */
+ if ((unit & 2) == 0)
+ return TM_LE|TM_DTR|TM_CTS|TM_CAR|TM_DSR;
+
+ b = 1 ^ (unit & 1); /* line 2 ? */
+
+ sc = dz7085_softc[unit>>2];
+ regs = sc->regs;
+ s = spltty();
+
+ tcr = ((regs->dz_tcr | (sc->fake>>4)) & 0xf00) >> (8 + b*2);
+ brk = (sc->breaks >> (8 + (unit&3))) & 1; /* THE break bit */
+
+ n_tcr = (bits & (TM_RTS|TM_DTR)) >> 1;
+ n_brk = (bits & TM_BRK) >> 9;
+
+ /* break transitions, must 'send' a char out */
+ bits = (brk ^ n_brk) & 1;
+
+ switch (how) {
+ case DMSET:
+ tcr = n_tcr;
+ brk = n_brk;
+ break;
+
+ case DMBIS:
+ tcr |= n_tcr;
+ brk |= n_brk;
+ break;
+
+ case DMBIC:
+ tcr &= ~n_tcr;
+ brk = 0;
+ break;
+
+ case DMGET:
+ msr = ((regs->dz_msr|sc->fake) & 0xf0f) >> (b*8);
+ (void) splx(s);
+ return (tcr<<1)|/* DTR, RTS */
+ ((msr&1)<<5)|/* CTS */
+ ((msr&2)<<7)|/* DSR */
+ ((msr&0xc)<<4)|/* CD, RNG */
+ (brk << 9)|/* BRK */
+ TM_LE;
+ }
+ n_tcr = (regs->dz_tcr & ~(3 << (8 + b*2))) |
+ (tcr << (8 + b*2));
+
+ regs->dz_tcr = n_tcr;
+ sc->fake = (sc->fake & 0xf0f) | (n_tcr<<4&0xf000);
+
+ sc->breaks = (sc->breaks & ~(1 << (8 + (unit&3)))) |
+ (brk << (8 + (unit&3)));
+ if(bits) (*console_putc)( unit>>2, unit&3, 0);/* force break, now */
+ (void) splx(s);
+ return 0;/* useless to compute it */
+}
+
+/*
+ * Periodically look at the CD signals:
+ * they do not generate interrupts.
+ */
+dz7085_scan()
+{
+ register i;
+ register dz_padded_regmap_t *regs;
+ register msr;
+ register struct tty *tp;
+
+ for (i = 0; i < NDZ_; i++) {
+ dz7085_softc_t sc = dz7085_softc[i];
+ register int temp;
+
+ if (sc == 0)
+ continue;
+ regs = sc->regs;
+
+ tp = console_tty[i * NDZ_LINE];
+
+ msr = regs->dz_msr | (sc->fake & 0xf0f);
+ if (temp = sc->softCAR) {
+ if (temp & 0x4)
+ msr |= DZ_MSR_CD2 | DZ_MSR_CTS2;
+ if (temp & 0x8)
+ msr |= DZ_MSR_CD3 | DZ_MSR_CTS3;
+ }
+
+ /* Lines 0 and 1 have carrier on by definition */
+ /* [horrid casts cuz compiler stupid] */
+ check_car((char*)tp + 0*sizeof(struct tty), 1);
+ check_car((char*)tp + 1*sizeof(struct tty), 1);
+ check_car((char*)tp + 2*sizeof(struct tty), msr & DZ_MSR_CD2);
+ check_car((char*)tp + 3*sizeof(struct tty), msr & DZ_MSR_CD3);
+
+ /* nothing else to do if no msr transitions */
+ if ((temp = sc->prev_msr) == msr)
+ continue;
+ else
+ sc->prev_msr = msr;
+
+ /* see if we have an incoming call */
+#define RING (DZ_MSR_RI2|DZ_MSR_RI3)
+ if ((msr & RING) != (temp & RING)) {
+/*printf("%s %x->%x\n", "ET Phone RI", temp & RING, msr & RING);*/
+ check_ring((char*)tp + 2*sizeof(struct tty),
+ msr & DZ_MSR_RI2, temp & DZ_MSR_RI2);
+ check_ring((char*)tp + 3*sizeof(struct tty),
+ msr & DZ_MSR_RI3, temp & DZ_MSR_RI3);
+ }
+#undef RING
+ /* see if we must do flow-control */
+ if ((msr ^ temp) & DZ_MSR_CTS2) {
+ tty_cts((char*)tp + 2*sizeof(struct tty),
+ msr & DZ_MSR_CTS2);
+ }
+ if ((msr ^ temp) & DZ_MSR_CTS3) {
+ tty_cts((char*)tp + 3*sizeof(struct tty),
+ msr & DZ_MSR_CTS3);
+ }
+ }
+ timeout(dz7085_scan, (vm_offset_t)0, 2*hz);
+}
+
+static dz7085_hup(tp)
+ register struct tty *tp;
+{
+ (*console_mctl)(tp->t_dev, TM_DTR, DMBIC);
+}
+
+static void check_car(tp, car)
+ register struct tty *tp;
+{
+ if (car) {
+ /* cancel modem timeout if need to */
+ if (car & (DZ_MSR_CD2|DZ_MSR_CD3))
+ untimeout(dz7085_hup, (vm_offset_t)tp);
+
+ /* I think this belongs in the MI code */
+ if (tp->t_state & TS_WOPEN)
+ tp->t_state |= TS_ISOPEN;
+ /* carrier present */
+ if ((tp->t_state & TS_CARR_ON) == 0)
+ (void)ttymodem(tp, 1);
+ } else if ((tp->t_state&TS_CARR_ON) && ttymodem(tp, 0) == 0)
+ (*console_mctl)( tp->t_dev, TM_DTR, DMBIC);
+}
+
+int dz7085_ring_timeout = 60; /* seconds, patchable */
+
+static void check_ring(tp, ring, oring)
+ register struct tty *tp;
+{
+ if (ring == oring)
+ return;
+ if (ring) {
+ (*console_mctl)( tp->t_dev, TM_DTR, DMBIS);
+ /* give it ample time to find the right carrier */
+ timeout(dz7085_hup, (vm_offset_t)tp, dz7085_ring_timeout*hz);
+ }
+}
+#endif NDZ_ > 0
diff --git a/chips/eccreg.h b/chips/eccreg.h
new file mode 100644
index 00000000..21c8fb02
--- /dev/null
+++ b/chips/eccreg.h
@@ -0,0 +1,110 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#define FA_BLCK 0x10000
+
+#define FA_ROM 0x00000
+
+#define FA_CTL 0x10000
+#define FA_STAT 0x10000
+#define I_RCV_CNT 0x00001
+#define I_RCV_EOM 0x00002
+#define I_RCV_TIM 0x00004
+#define I_XMT_CNT 0x00008
+#define I_RCV_LOS 0x00010
+#define I_RCV_CARRIER 0x00020
+#define FA_CR_S 0x10004
+#define FA_CR_C 0x10008
+#define FA_CR 0x1000C
+#define ENI_RCV_CNT 0x00001
+#define ENI_RCV_END 0x00002
+#define ENI_RCV_TIM 0x00004
+#define ENI_XMT_CNT 0x00008
+#define EN_TEST 0x00010
+#define EN_UNUSED 0x00020
+#define EN_RCV 0x00040
+#define EN_XMT 0x00080
+#define RESET_RCV 0x00100
+#define RESET_XMT 0x00200
+#define FA_TIM 0x10010
+#define FA_TIM_SET 0x10018
+#define FA_RCV_CNT 0x10020
+#define FA_RCV_CMP 0x10028
+#define FA_XMT_CNT 0x10030
+#define FA_XMT_CMP 0x10038
+
+
+#define FA_DISCARD 0x20000
+#define FA_RCV 0x20000
+#define FA_RCV_HD 0x20000
+#define FA_RCV_PAYLD 0x20004
+#define FA_RCV_TR 0x20034
+
+#define FA_XMT 0x30000
+#define FA_XMT_HD 0x30000
+#define FA_XMT_PAYLD 0x30004
+#define FA_XMT_TR 0x30034
+
+#define FA_END 0x40000
+
+
+struct ecc {
+/* 00000 */ char rom[FA_BLCK];
+/* 10000 */ int stat;
+/* 10004 */ int cr_s;
+/* 10008 */ int cr_c;
+/* 1000C */ int cr;
+/* 10010 */ int tim;
+ int fill1;
+/* 10018 */ int tim_set;
+ int fill2;
+/* 10020 */ int rcv_cnt;
+ int fill3;
+/* 10028 */ int rcv_cmp;
+ int fill4;
+/* 10030 */ int xmt_cnt;
+ int fill5;
+/* 10038 */ int xmt_cmp;
+ int fill6;
+ char pad[FA_BLCK-0x40];
+
+/* 20000 */
+/* 20000 */ char rcv[FA_BLCK];
+/* 30000 */ char xmt[FA_BLCK];
+};
+
+struct sar {
+ int header;
+ int payload[12];
+ int trailer;
+};
+
+typedef struct ecc ecc_t;
+typedef struct sar sar_t;
+
+
+
+
diff --git a/chips/fb_hdw.c b/chips/fb_hdw.c
new file mode 100644
index 00000000..7139a864
--- /dev/null
+++ b/chips/fb_hdw.c
@@ -0,0 +1,219 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: fb_hdw.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 7/91
+ *
+ * Driver for the 3max Monochrome Frame Buffer Display,
+ * hardware-level operations.
+ */
+
+#include <mfb.h>
+#if (NMFB > 0)
+#include <platforms.h>
+
+#include <machine/machspl.h>
+#include <mach/std_types.h>
+#include <chips/busses.h>
+
+#include <chips/screen_defs.h>
+
+#include <chips/pm_defs.h>
+typedef pm_softc_t fb_softc_t;
+
+
+#ifdef DECSTATION
+#include <mips/PMAX/pmag_aa.h>
+#include <mips/PMAX/tc.h>
+#endif
+
+#ifdef FLAMINGO
+#include <mips/PMAX/pmag_aa.h> /* XXX fixme */
+#include <alpha/DEC/tc.h>
+#endif
+
+/*
+ * Definition of the driver for the auto-configuration program.
+ */
+
+int fb_probe(), fb_intr();
+static void fb_attach();
+
+vm_offset_t fb_std[NMFB] = { 0 };
+struct bus_device *fb_info[NMFB];
+struct bus_driver fb_driver =
+ { fb_probe, 0, fb_attach, 0, fb_std, "fb", fb_info,
+ 0, 0, BUS_INTR_DISABLED};
+
+/*
+ * Probe/Attach functions
+ */
+
+fb_probe( /* reg, ui */)
+{
+ static probed_once = 0;
+
+ /*
+ * Probing was really done sweeping the TC long ago
+ */
+ if (tc_probe("fb") == 0)
+ return 0;
+ if (probed_once++ > 1)
+ printf("[mappable] ");
+ return 1;
+}
+
+static void
+fb_attach(ui)
+ struct bus_device *ui;
+{
+ /* ... */
+ printf(": monochrome display");
+}
+
+
+/*
+ * Interrupt routine
+ */
+
+fb_intr(unit,spllevel)
+ spl_t spllevel;
+{
+ register volatile char *ack;
+
+ /* acknowledge interrupt */
+ ack = (volatile char *) fb_info[unit]->address + FB_OFFSET_IREQ;
+ *ack = 0;
+
+#if mips
+ splx(spllevel);
+#endif
+ lk201_led(unit);
+}
+
+fb_vretrace(fb, on)
+ fb_softc_t *fb;
+{
+ int i;
+
+ for (i = 0; i < NMFB; i++)
+ if (fb_info[i]->address == (vm_offset_t)fb->framebuffer)
+ break;
+ if (i == NMFB) return;
+
+ (*tc_enable_interrupt)(fb_info[i]->adaptor, on, 0);
+}
+
+/*
+ * Video on/off
+ */
+fb_video_on(fb, up)
+ fb_softc_t *fb;
+ user_info_t *up;
+{
+ if (!fb->cursor_state) /* video is "on" at boot */
+ return;
+ bt455_video_on(fb->vdac_registers, up);
+ bt431_cursor_on(fb->cursor_registers);
+ fb->cursor_state = 0;
+}
+
+fb_video_off(fb, up)
+ fb_softc_t *fb;
+ user_info_t *up;
+{
+ if (fb->cursor_state)
+ return;
+ bt455_video_off(fb->vdac_registers, up);
+ bt431_cursor_off(fb->cursor_registers);
+ fb->cursor_state = 1;
+}
+
+/*
+ * Boot time initialization: must make device
+ * usable as console asap.
+ */
+extern int
+ fb_soft_reset(), fb_set_status(),
+ bt431_pos_cursor(), fb_video_on(),
+ fb_video_off(), fb_vretrace(),
+ pm_get_status(), pm_char_paint(),
+ pm_insert_line(), pm_remove_line(),
+ pm_clear_bitmap(), pm_map_page();
+
+static struct screen_switch fb_sw = {
+ screen_noop, /* graphic_open */
+ fb_soft_reset, /* graphic_close */
+ fb_set_status, /* set_status */
+ pm_get_status, /* get_status */
+ pm_char_paint, /* char_paint */
+ bt431_pos_cursor, /* pos_cursor */
+ pm_insert_line, /* insert_line */
+ pm_remove_line, /* remove_line */
+ pm_clear_bitmap, /* clear_bitmap */
+ fb_video_on, /* video_on */
+ fb_video_off, /* video_off */
+ fb_vretrace, /* intr_enable */
+ pm_map_page /* map_page */
+};
+
+fb_cold_init(unit, up)
+ user_info_t *up;
+{
+ fb_softc_t *fb;
+ screen_softc_t sc = screen(unit);
+ vm_offset_t base = tc_probe("fb");
+
+ bcopy(&fb_sw, &sc->sw, sizeof(sc->sw));
+#if 0
+ sc->flags |= MONO_SCREEN;
+#else
+ sc->flags |= COLOR_SCREEN;
+#endif
+ sc->frame_scanline_width = 2048;
+ sc->frame_height = 1024;
+ sc->frame_visible_width = 1280;
+ sc->frame_visible_height = 1024;
+
+ pm_init_screen_params(sc, up);
+ (void) screen_up(unit, up);
+
+ fb = pm_alloc(unit, base+FB_OFFSET_BT431, base+FB_OFFSET_VRAM, -1);
+ fb->vdac_registers = (char*) base + FB_OFFSET_BT455;
+
+ screen_default_colors(up);
+
+ fb_soft_reset(sc);
+
+ /*
+ * Clearing the screen at boot saves from scrolling
+ * much, and speeds up booting quite a bit.
+ */
+ screen_blitc( unit, 'C'-'@');/* clear screen */
+}
+
+#endif (NMFB > 0)
diff --git a/chips/fb_misc.c b/chips/fb_misc.c
new file mode 100644
index 00000000..3a99dcd9
--- /dev/null
+++ b/chips/fb_misc.c
@@ -0,0 +1,242 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: fb_misc.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 7/91
+ *
+ * Driver for the PMAG-AA simple mono framebuffer
+ *
+ */
+
+#include <mfb.h>
+#if (NMFB > 0)
+
+/*
+ * NOTE: This driver relies heavily on the pm one.
+ */
+
+#include <device/device_types.h>
+#include <chips/screen_defs.h>
+#include <chips/pm_defs.h>
+typedef pm_softc_t fb_softc_t;
+
+#include <chips/bt455.h>
+
+/*
+ * Initialize color map, for kernel use
+ */
+fb_init_colormap(sc)
+ screen_softc_t sc;
+{
+ fb_softc_t *fb = (fb_softc_t*)sc->hw_state;
+ user_info_t *up = sc->up;
+ color_map_t Bg_Fg[2];
+ register int i;
+
+ bt455_init_colormap( fb->vdac_registers );
+
+ /* init bg/fg colors */
+ for (i = 0; i < 3; i++) {
+ up->dev_dep_2.pm.Bg_color[i] = 0x00;
+ up->dev_dep_2.pm.Fg_color[i] = 0xff;
+ }
+
+ Bg_Fg[0].red = Bg_Fg[0].green = Bg_Fg[0].blue = 0x00;
+ Bg_Fg[1].red = Bg_Fg[1].green = Bg_Fg[1].blue = 0xff;
+ bt455_cursor_color( fb->vdac_registers, Bg_Fg);
+}
+
+/*
+ * Large viz small cursor
+ */
+fb_small_cursor_to_large(up, cursor)
+ user_info_t *up;
+ cursor_sprite_t cursor;
+{
+ unsigned char *curbytes, *sprite;
+ int i;
+ /* Our cursor turns out mirrored, donno why */
+ static unsigned char mirror[256] = {
+ 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
+ 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
+ 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
+ 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
+ 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
+ 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
+ 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
+ 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
+ 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
+ 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
+ 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
+ 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
+ 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
+ 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
+ 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
+ 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
+ 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
+ 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
+ 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
+ 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
+ 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
+ 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
+ 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
+ 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
+ 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
+ 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
+ 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
+ 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
+ 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
+ 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
+ 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
+ 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff
+ };
+
+ /* Clear out old cursor */
+ bzero( up->dev_dep_2.pm.cursor_sprite,
+ sizeof(up->dev_dep_2.pm.cursor_sprite));
+
+ /* small cursor is 32x2 bytes, image(fg) first then mask(bg) */
+ curbytes = (unsigned char *) cursor;
+
+ /* we have even byte --> image, odd byte --> mask;
+ line size is 8 bytes instead of 2 */
+ sprite = (unsigned char *) up->dev_dep_2.pm.cursor_sprite;
+
+ for (i = 0; i < 32; i += 2) {
+ *sprite++ = mirror[curbytes[i]]; /* fg */
+ *sprite++ = mirror[curbytes[i + 32]]; /* bg */
+ *sprite++ = mirror[curbytes[i + 1]]; /* fg */
+ *sprite++ = mirror[curbytes[i + 33]]; /* bg */
+ sprite += 12; /* skip rest of the line */
+ }
+}
+
+/*
+ * Device-specific set status
+ */
+fb_set_status(sc, flavor, status, status_count)
+ screen_softc_t sc;
+ int flavor;
+ dev_status_t status;
+ unsigned int status_count;
+{
+ fb_softc_t *fb = (fb_softc_t*) sc->hw_state;
+
+ switch (flavor) {
+
+ case SCREEN_ADJ_MAPPED_INFO:
+ return pm_set_status(sc, flavor, status, status_count);
+
+ case SCREEN_LOAD_CURSOR:
+
+ if (status_count < sizeof(cursor_sprite_t)/sizeof(int))
+ return D_INVALID_SIZE;
+ fb_small_cursor_to_large(sc->up, (cursor_sprite_t*) status);
+
+ /* Fall through */
+
+ case SCREEN_LOAD_CURSOR_LONG: { /* 3max/3min only */
+
+ sc->flags |= SCREEN_BEING_UPDATED;
+ bt431_cursor_sprite(fb->cursor_registers, sc->up->dev_dep_2.pm.cursor_sprite);
+ sc->flags &= ~SCREEN_BEING_UPDATED;
+
+ break;
+ }
+
+ case SCREEN_SET_CURSOR_COLOR: {
+ color_map_t c[2];
+ register cursor_color_t *cc = (cursor_color_t*) status;
+
+ c[0].red = cc->Bg_rgb[0];
+ c[0].green = cc->Bg_rgb[1];
+ c[0].blue = cc->Bg_rgb[2];
+ c[1].red = cc->Fg_rgb[0];
+ c[1].green = cc->Fg_rgb[1];
+ c[1].blue = cc->Fg_rgb[2];
+
+ sc->flags |= SCREEN_BEING_UPDATED;
+ bt455_cursor_color (fb->vdac_registers, c );
+ sc->flags &= ~SCREEN_BEING_UPDATED;
+
+ break;
+ }
+
+ case SCREEN_SET_CMAP_ENTRY: {
+ color_map_entry_t *e = (color_map_entry_t*) status;
+
+ if (e->index < 8) { /* 8&9 are fg&bg, do not touch */
+ sc->flags |= SCREEN_BEING_UPDATED;
+ bt455_load_colormap_entry( fb->vdac_registers, e->index, &e->value);
+ sc->flags &= ~SCREEN_BEING_UPDATED;
+ }
+ break;
+ }
+
+ default:
+ return D_INVALID_OPERATION;
+ }
+ return D_SUCCESS;
+}
+
+/*
+ * Do what's needed when X exits
+ */
+fb_soft_reset(sc)
+ screen_softc_t sc;
+{
+ fb_softc_t *fb = (fb_softc_t*) sc->hw_state;
+ user_info_t *up = sc->up;
+ extern cursor_sprite_t bt431_default_cursor;
+
+ /*
+ * Restore params in mapped structure
+ */
+ pm_init_screen_params(sc,up);
+ up->row = up->max_row - 1;
+
+ up->dev_dep_2.pm.x26 = 2; /* you do not want to know */
+ up->dev_dep_1.pm.x18 = (short*)2;
+
+ /*
+ * Restore RAMDAC chip to default state, and init cursor
+ */
+ bt455_init(fb->vdac_registers);
+ bt431_init(fb->cursor_registers);
+
+ /*
+ * Load kernel's cursor sprite
+ */
+ bt431_cursor_sprite(fb->cursor_registers, bt431_default_cursor);
+
+ /*
+ * Color map and cursor color
+ */
+ fb_init_colormap(sc);
+}
+
+#endif (NMFB > 0)
diff --git a/chips/fdc_82077.h b/chips/fdc_82077.h
new file mode 100644
index 00000000..95192183
--- /dev/null
+++ b/chips/fdc_82077.h
@@ -0,0 +1,525 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: fdi_82077_hdw.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 1/92
+ *
+ * Defines for the Intel 82077 Floppy Disk Controller chip.
+ * Includes defines for 8272A and 82072.
+ */
+
+#ifndef _FDC_82077_H_
+#define _FDC_82077_H_
+
+/*
+ * Chips we claim to understand, and their modes
+ */
+#define fdc_8272a 0
+#define fdc_82072 1
+#define fdc_82077aa 2
+
+#define at_mode 0
+#define ps2_mode 1
+#define mod30_mode 2
+
+#define DRIVES_PER_FDC 4
+
+/*
+ * Register maps
+ */
+typedef struct {
+ volatile unsigned char fd_sra; /* r: status register A */
+ volatile unsigned char fd_srb; /* r: status register B */
+ volatile unsigned char fd_dor; /* rw: digital output reg */
+ volatile unsigned char fd_tdr; /* rw: tape drive register */
+ volatile unsigned char fd_msr; /* r: main status register */
+#define fd_dsr fd_msr /* w: data rate select reg */
+ volatile unsigned char fd_data; /* rw: fifo */
+ volatile unsigned char fd_xxx; /* --reserved-- */
+ volatile unsigned char fd_dir; /* r: digital input reg */
+#define fd_ccr fd_dir /* w: config control reg */
+} fd_regmap_t;
+
+typedef struct {
+ volatile unsigned char fd_msr; /* r: main status register */
+ volatile unsigned char fd_data; /* rw: data register */
+} fd_8272a_regmap_t;
+
+typedef fd_8272a_regmap_t fd_82072_regmap_t;
+/*#define fd_dsr fd_msr /* w: data rate select reg */
+
+/*
+ * Status register A (82077AA only)
+ *
+ * Only available in PS/2 (ps2) and Model 30 (m30) modes,
+ * not available in PC/AT (at) mode.
+ * Some signals have inverted polarity (~) on mod30
+ */
+
+#define FD_SRA_INT 0x80 /* interrupt */
+#define FD_SRA_DRV2 0x40 /* 2nd drive installed (ps2) */
+#define FD_SRA_DRQ 0x40 /* dma request (mod30) */
+#define FD_SRA_STEP 0x20 /* step pulse (~mod30) */
+#define FD_SRA_TRK0 0x10 /* Track 0 (~mod30) */
+#define FD_SRA_HDSEL 0x08 /* select head 1 (~mod30) */
+#define FD_SRA_INDX 0x04 /* Index hole (~mod30) */
+#define FD_SRA_WP 0x02 /* write protect (~mod30) */
+#define FD_SRA_DIR 0x01 /* step dir, 1->center (~mod30) */
+
+/*
+ * Status register B (82077AA only)
+ * Not available in at mode.
+ */
+
+#define FD_SRB_DRV2 0x80 /* 2nd drive installed (mod30) */
+ /* wired 1 on ps2 */
+
+#define FD_SRB_DS1 0x40 /* drive select 1 (mod30) */
+ /* wired 1 on ps2 */
+
+#define FD_SRB_DS0 0x20 /* drive select 0 */
+#define FD_SRB_WRDATA 0x10 /* out data (toggle or ~trigger) */
+#define FD_SRB_RDDATA 0x08 /* in data (toggle or ~trigger) */
+#define FD_SRB_WE 0x04 /* write enable (~mod30) */
+#define FD_SRB_MOT_1 0x02 /* motor enable drive 1 (ps2) */
+#define FD_SRB_DS3 0x02 /* drive select 3 (mod30) */
+#define FD_SRB_MOT_0 0x01 /* motor enable drive 0 (ps2) */
+#define FD_SRB_DS2 0x01 /* drive select 2 (mod30) */
+
+/*
+ * Digital output register (82077AA only)
+ */
+
+#define FD_DOR_MOT_3 0x80 /* motor enable drive 3 */
+#define FD_DOR_MOT_2 0x40
+#define FD_DOR_MOT_1 0x20
+#define FD_DOR_MOT_0 0x10
+#define FD_DOR_DMA_GATE 0x08 /* enable dma (mod30,at) */
+#define FD_DOR_ENABLE 0x04 /* chip reset (inverted) */
+#define FD_DOR_DRIVE_0 0x00 /* select drive no 0 */
+#define FD_DOR_DRIVE_1 0x01
+#define FD_DOR_DRIVE_2 0x02
+#define FD_DOR_DRIVE_3 0x03
+
+/*
+ * Tape drive register (82077AA only)
+ */
+
+#define FD_TDR_TAPE_1 0x01 /* unit 1 is a tape */
+#define FD_TDR_TAPE_2 0x02
+#define FD_TDR_TAPE_3 0x03
+#define FD_TDR_xxx 0xfc
+
+/*
+ * Data-rate select register (82077AA and 82072)
+ */
+
+#define FD_DSR_RESET 0x80 /* self-clearing reset */
+#define FD_DSR_POWER_DOWN 0x40 /* stop clocks and oscill */
+#define FD_DSR_zero 0x20 /* wired zero on 82077AA */
+#define FD_DSR_EPLL 0x20 /* enable PLL on 82072 */
+
+#define FD_DSR_PRECOMP_MASK 0x1c /* precompensation value */
+# define FD_DSR_PRECOMP_SHIFT 2
+
+# define FD_DSR_PRECOMP_DEFAULT 0 /* 41.67@1Mbps else 125ns */
+# define FD_DSR_PRECOMP_41_67 1
+# define FD_DSR_PRECOMP_83_34 2
+# define FD_DSR_PRECOMP_125_00 3
+# define FD_DSR_PRECOMP_166_67 4
+# define FD_DSR_PRECOMP_208_33 5
+# define FD_DSR_PRECOMP_250_00 6
+# define FD_DSR_PRECOMP_DISABLE 7 /* 0.00ns */
+
+#define FD_DSR_DATA_RATE_MASK 0x03
+#define FD_DSR_SD_250 0x00 /* fm modulation, 250Kbps bit clock */
+#define FD_DSR_SD_150 0x01
+#define FD_DSR_SD_125 0x02
+
+#define FD_DSR_DD_500 0x00 /* mfm modulation, 500Kbps */
+#define FD_DSR_DD_300 0x01
+#define FD_DSR_DD_250 0x02
+#define FD_DSR_DD_1000 0x03 /* illegal for 82077 */
+
+/*
+ * Main status register (all chips)
+ */
+
+#define FD_MSR_RQM 0x80 /* request from master (allowed) */
+#define FD_MSR_DIO 0x40 /* data in/out, 1->master read */
+#define FD_MSR_NON_DMA 0x20 /* dma disabled */
+#define FD_MSR_CMD_BSY 0x10 /* command in progress */
+#define FD_MSR_DRV_3_BSY 0x08 /* drive busy seeking */
+#define FD_MSR_DRV_2_BSY 0x04
+#define FD_MSR_DRV_1_BSY 0x02
+#define FD_MSR_DRV_0_BSY 0x01
+
+/*
+ * FIFO (82077AA and 82072)
+ *
+ * Service delay is
+ * Threshold * 8
+ * delay = ------------- - 1.5 usecs
+ * Data-rate
+ */
+
+#define FD_FIFO_DEEP 16
+
+/*
+ * Digital input register (82077AA only)
+ */
+
+#define FD_DIR_DSK_CHG 0x80 /* disk was changed (~mod30) */
+
+#define FD_DIR_ones 0x78 /* wired ones for ps2 */
+#define FD_DIR_zeroes 0x70 /* wired zeroes for mod30 */
+#define FD_DIR_undef 0x7f /* undefined for at */
+
+#define FD_DIR_DR_MASK_PS2 0x06 /* current data rate (ps2) */
+# define FD_DIR_DR_SHIFT_PS2 1
+#define FD_DIR_LOW_DENS 0x01 /* zero for 500/1M dr (ps2) */
+
+#define FD_DIR_DMA_GATE 0x08 /* same as DOR (mod30) */
+#define FD_DIR_NOPREC 0x04 /* same as CCR (mod30) */
+#define FD_DIR_DR_MASK_M30 0x03 /* current data rate (mod30) */
+# define FD_DIR_DR_SHIFT_M30 0
+
+/*
+ * Configuration control register (82077AA only)
+ */
+
+#define FD_CCR_DATA_RATE_MASK 0x03 /* see DSR for values */
+#define FD_CCR_NOPREC 0x04 /* "has no function" (mod30) */
+
+
+/*
+ * Programming
+ *
+ * Legend for command bytes, when applicable
+ *
+ * hds bit 2 of byte 1, head select (1 -> head 1)
+ * ds bits 0-1 of byte 1, drive select
+ * c cylinder number (max 76 for 8272A, else 255)
+ * h head number
+ * r sector number
+ * n number of bytes in sector
+ * eot end-of-track, e.g. final sector number
+ * gpl gap length
+ * dtl data length (for partial sectors)
+ * st0-3 status byte
+ * srt step rate time
+ * hut head unload time
+ * hlt head load time
+ * nd disable DMA
+ * mot do not turn motor on before checking drive status
+ * pcn present cylinder number
+ * ncn new cylinder number
+ * rcn relative cylinder number (new=present+rcn)
+ * sc sectors/cylinder
+ * d filler byte
+ * ? undefined
+ * hsda high-speed disk adjust (doubles motor on/off delays)
+ * moff motor off timer, one disk revolution increments
+ * mon motor on timer, ditto
+ * eis enable implied seeks
+ * dfifo disable fifo
+ * poll disable poll
+ * fifthr fifo threshold (1 to 16 bytes)
+ * pretrk precomp starts on this trackno (0-255)
+ * wgate change timings of WE signal, in perpendicular mode
+ * gap change gap2 length, in perpendicular mode
+ * ec in verify, qualify terminating conditions (sc viz eot)
+ */
+
+/* First byte of command, qualifiers */
+#define FD_CMD_MT 0x80 /* Multi-track */
+#define FD_CMD_MFM 0x40 /* Double density */
+#define FD_CMD_SK 0x20 /* skip deleted data address mark */
+#define FD_CMD_DIR 0x40 /* relative seek direction (up) */
+
+/* command codes and description */
+
+/*
+ * Read an entire track.
+ * Qualifiers: MFM, SK (8272A only)
+ * Bytes total: 9 code,hds+ds,c,h,r,n,eot,gpl,dtl
+ * Result total: 7 st0,st1,st2,c,h,r,n
+ */
+#define FD_CMD_READ_TRACK 0x02
+
+/*
+ * Specify timers
+ * Qualifiers:
+ * Bytes total: 3 code,srt+hut,hlt+nd
+ * Result total:
+ */
+#define FD_CMD_SPECIFY 0x03
+
+/*
+ * Sense status of drive
+ * Qualifiers:
+ * Bytes total: 2 code,hds+ds +mot(82072 only)
+ * Result total: 1 st3
+ */
+#define FD_CMD_SENSE_DRIVE_STATUS 0x04
+# define FD_CMD_SDS_NO_MOT 0x80
+
+/*
+ * Write
+ * Qualifiers: MT, MFM
+ * Bytes total: 9 code,hds+ds,c,h,r,n,eot,gpl,dtl
+ * Result total: 7 st0,st1,st2,c,h,r,n
+ */
+#define FD_CMD_WRITE_DATA 0x05
+
+/*
+ * Read
+ * Qualifiers: MT, MFM, SK
+ * Bytes total: 9 code,hds+ds,c,h,r,n,eot,gpl,dtl
+ * Result total: 7 st0,st1,st2,c,h,r,n
+ */
+#define FD_CMD_READ_DATA 0x06
+
+/*
+ * Seek to track 0
+ * Qualifiers:
+ * Bytes total: 2 code,ds
+ * Result total:
+ */
+#define FD_CMD_RECALIBRATE 0x07
+
+/*
+ * Sense interrupt status
+ * Qualifiers:
+ * Bytes total: 1 code
+ * Result total: 2 st0,pcn
+ */
+#define FD_CMD_SENSE_INT_STATUS 0x08
+
+/*
+ * Write data and mark deleted
+ * Qualifiers: MT, MFM
+ * Bytes total: 9 code,hds+ds,c,h,r,n,eot,gpl,dtl
+ * Result total: 7 st0,st1,st2,c,h,r,n
+ */
+#define FD_CMD_WRITE_DELETED_DATA 0x09
+
+/*
+ * Read current head position
+ * Qualifiers: MFM
+ * Bytes total: 2 code,hds+ds
+ * Result total: 7 st0,st1,st2,c,h,r,n
+ */
+#define FD_CMD_READ_ID 0x0a
+
+/*
+ * Set value of MOT pin, unconditionally
+ * Qualifiers: see
+ * Bytes total: 1 code+..
+ * Result total: none returns to command phase
+ */
+#define FD_CMD_MOTOR_ON_OFF 0x0b /* 82072 only */
+
+# define FD_CMD_MOT_ON 0x80
+# define FD_CMD_MOT_DS 0x60
+# define FD_CMD_MOT_DS_SHIFT 5
+
+/*
+ * Read data despite deleted address mark
+ * Qualifiers: MT, MFM, SK
+ * Bytes total: 9 code,hds+ds,c,h,r,n,eot,gpl,dtl
+ * Result total: 7 st0,st1,st2,c,h,r,n
+ */
+#define FD_CMD_READ_DELETED_DATA 0x0c
+
+/*
+ * Media initialization
+ * Qualifiers: MFM
+ * Bytes total: 6 code,hds+ds,n,sc,gpl,d
+ * Data: 4*sc/2 c,h,r,n
+ * Result total: 7 st0,st1,st2,?,?,?,?
+ */
+#define FD_CMD_FORMAT_TRACK 0x0d
+
+/*
+ * Dump internal register status
+ * Qualifiers:
+ * Bytes total: 1 code
+ * Result total: 10 pcn0,pcn1,pcn2,pcn3,srt+hut,hlt+nd,
+ * sc/eot,hsda+moff+mon,
+ * eis+dfifo+poll+fifothr, pretrk
+ * Notes: 82077AA does not provide for hsda+moff+mon
+ */
+#define FD_CMD_DUMPREG 0x0e /* not 8272a */
+
+/*
+ * Move head
+ * Qualifiers:
+ * Bytes total: 3 code,hds+ds,ncn
+ * Result total:
+ */
+#define FD_CMD_SEEK 0x0f
+
+/*
+ *
+ * Qualifiers:
+ * Bytes total: 1 code
+ * Result total: 1 version
+ */
+#define FD_CMD_VERSION 0x10 /* 82077AA only */
+# define FD_VERSION_82077AA 0x90
+
+/*
+ * Scan disk data
+ * Qualifiers: MT, MFM, SK
+ * Bytes total: 9 code,hds+ds,c,h,r,n,eot,gpl,dtl
+ * Result total: 7 st0,st1,st2,c,h,r,n
+ */
+#define FD_CMD_SCAN_EQUAL 0x11 /* 8272A only */
+
+/*
+ * Specify timers
+ * Qualifiers:
+ * Bytes total: 2 code,wgate+gap
+ * Result total:
+ */
+#define FD_CMD_PERPENDICULAR_MODE 0x12 /* 82077AA only */
+
+/*
+ * Set configuration parameters
+ * Qualifiers:
+ * Bytes total: 4 code,hsda+moff+mon,eis+dfifo+poll+fifothr,
+ * pretrk
+ * Result total:
+ * Notes: 82077AA does not provide for hsda+moff+mon
+ */
+#define FD_CMD_CONFIGURE 0x13 /* not 8272a */
+
+/*
+ * Verify CRC of disk data
+ * Qualifiers: MT, MFM, SK
+ * Bytes total: 9 code,ec+hds+ds,c,h,r,n,eot,gpl,dtl/sc
+ * Result total: 7 st0,st1,st2,c,h,r,n
+ */
+#define FD_CMD_VERIFY 0x16 /* 82077AA only */
+
+/*
+ * Scan disk data (disk less-or-equal memory)
+ * Qualifiers: MT, MFM, SK
+ * Bytes total: 9 code,hds+ds,c,h,r,n,eot,gpl,dtl
+ * Result total: 7 st0,st1,st2,c,h,r,n
+ */
+#define FD_CMD_SCAN_LOW_OR_EQUAL 0x19 /* 8272A only */
+
+/*
+ * Scan disk data (disk greater-or-equal memory)
+ * Qualifiers: MT, MFM, SK
+ * Bytes total: 9 code,hds+ds,c,h,r,n,eot,gpl,dtl
+ * Result total: 7 st0,st1,st2,c,h,r,n
+ */
+#define FD_CMD_SCAN_HIGH_OR_EQUAL 0x1d /* 8272A only */
+
+/*
+ * Specify timers
+ * Qualifiers: DIR
+ * Bytes total: 3 code,hds+ds,rcn
+ * Result total:
+ */
+#define FD_CMD_RELATIVE_SEEK 0x8f /* not 8272a */
+
+/*
+ * Any invalid command code
+ * Qualifiers:
+ * Bytes total: 1 code
+ * Result total: 1 st0 (st0 == 0x80)
+ */
+#define FD_CMD_INVALID 0xff
+
+
+/*
+ * Results and statii
+ *
+ * The typical command returns three status bytes,
+ * followed by four drive status bytes.
+ */
+
+/*
+ * Status register 0
+ */
+#define FD_ST0_IC_MASK 0xc0 /* interrupt completion code */
+
+# define FD_ST0_IC_OK 0x00 /* terminated ok */
+# define FD_ST0_IC_AT 0x40 /* exec phase ended sour */
+# define FD_ST0_IC_BAD_CMD 0x80 /* didnt grok */
+# define FD_ST0_IC_AT_POLL 0xc0 /* polling got in the way */
+
+#define FD_ST0_SE 0x20 /* (implied) seek ended */
+#define FD_ST0_EC 0x10 /* equipment check */
+#define FD_ST0_NR 0x08 /* not ready (raz for 82077aa) */
+#define FD_ST0_H 0x04 /* currently selected head */
+#define FD_ST0_DS 0x03 /* currently selected drive */
+
+/*
+ * Status register 1
+ */
+
+#define FD_ST1_EN 0x80 /* end of cylinder (TC not set?) */
+#define FD_ST1_zero 0x48
+#define FD_ST1_DE 0x20 /* data error, bad CRC */
+#define FD_ST1_OR 0x10 /* overrun/underrun
+#define FD_ST1_ND 0x04 /* no data, sector not found */
+#define FD_ST1_NW 0x02 /* write protect signal */
+#define FD_ST1_MA 0x01 /* missing address mark */
+
+/*
+ * Status register 2
+ */
+
+#define FD_ST2_zero 0x80
+#define FD_ST2_CM 0x40 /* control mark, improper read */
+#define FD_ST2_DD 0x20 /* the CRC error was for data */
+#define FD_ST2_WC 0x10 /* wrong cylinder */
+#define FD_ST2_SH 0x08 /* scan hit (8272a only) */
+#define FD_ST2_SN 0x04 /* scan not met (8272a only) */
+#define FD_ST2_BC 0x02 /* bad cylinder, has 0xff mark */
+#define FD_ST2_MD 0x01 /* missing data mark */
+
+/*
+ * Status register 3
+ * (sense drive status)
+ */
+
+#define FD_ST3_FT 0x80 /* fault pin (0 if not 8272a) */
+#define FD_ST3_WP 0x40 /* write protect pin */
+#define FD_ST3_RDY 0x20 /* ready pin (1 on 82077aa) */
+#define FD_ST3_T0 0x10 /* track0 pin */
+#define FD_ST3_TS 0x08 /* two-sided pin (1 if not 8272a) */
+#define FD_ST3_HD 0x04 /* hdsel pin */
+#define FD_ST3_DS 0x03 /* drive select pins (1&0) */
+
+
+#endif /* _FDC_82077_H_ */
diff --git a/chips/fdc_82077_hdw.c b/chips/fdc_82077_hdw.c
new file mode 100644
index 00000000..f52da830
--- /dev/null
+++ b/chips/fdc_82077_hdw.c
@@ -0,0 +1,821 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: fdi_82077_hdw.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 1/92
+ *
+ * Driver for the Intel 82077 Floppy Disk Controller.
+ */
+
+#include <fd.h>
+#if NFD > 0
+
+#include <mach/std_types.h>
+#include <machine/machspl.h>
+#include <chips/busses.h>
+
+#include <chips/fdc_82077.h>
+#include <platforms.h>
+
+/* ---- */
+#include <device/param.h>
+#include <device/io_req.h>
+#include <device/device_types.h>
+#include <device/disk_status.h>
+#define UNITNO(d) ((d)>>5)
+#define SLAVENO(d) (((d)>>3)&0x3)
+#define PARAMNO(d) ((d)&0x7)
+/* ---- */
+
+#ifdef MAXINE
+
+/* we can only take one */
+#define MAX_DRIVES 1
+
+#define my_fdc_type fdc_82077aa
+#define the_fdc_type fd->fdc_type
+/* later: #define the_fdc_type my_fdc_type */
+
+/* Registers are read/written as words, byte 0 */
+/* padding is to x40 boundaries */
+typedef struct {
+ volatile unsigned int fd_sra; /* r: status register A */
+ int pad0[15];
+ volatile unsigned int fd_srb; /* r: status register B */
+ int pad1[15];
+ volatile unsigned int fd_dor; /* rw: digital output reg */
+ int pad2[15];
+ volatile unsigned int fd_tdr; /* rw: tape drive register */
+ int pad3[15];
+ volatile unsigned int fd_msr; /* r: main status register */
+/*#define fd_dsr fd_msr; /* w: data rate select reg */
+ int pad4[15];
+ volatile unsigned int fd_data; /* rw: fifo */
+ int pad5[15];
+ volatile unsigned int fd_xxx; /* --reserved-- */
+ int pad6[15];
+ volatile unsigned int fd_dir; /* r: digital input reg */
+/*#define fd_ccr fd_dir; /* w: config control reg */
+} fd_padded_regmap_t;
+
+#define machdep_reset_8272a(f,r)
+
+#else /* MAXINE */
+
+/* Pick your chip and padding */
+#define my_fdc_type fdc_8272a
+#define the_fdc_type my_fdc_type
+
+#define fd_padded_regmap_t fd_8272a_regmap_t
+
+#define machdep_reset_8272a(f,r) 1
+
+#endif /* MAXINE */
+
+
+#ifndef MAX_DRIVES
+#define MAX_DRIVES DRIVES_PER_FDC
+#endif
+
+/*
+ * Autoconf info
+ */
+
+static vm_offset_t fd_std[NFD] = { 0 };
+static struct bus_device *fd_info[NFD];
+static struct bus_ctlr *fd_minfo[NFD];
+static int fd_probe(), fd_slave(), fd_go();
+static void fd_attach();
+
+struct bus_driver fd_driver =
+ { fd_probe, fd_slave, fd_attach, fd_go, fd_std, "fd", fd_info,
+ "fdc", fd_minfo, /*BUS_INTR_B4_PROBE*/};
+
+/*
+ * Externally visible functions
+ */
+int fd_intr(); /* kernel */
+
+/*
+ * Media table
+ *
+ * Cyls,Sec,spc,part,Mtype,RWFpl,FGpl
+ */
+typedef struct {
+ unsigned char d_cylperunit;
+ unsigned char d_secpercyl;
+ unsigned short d_secperunit;
+ unsigned char d_secpertrk;
+ unsigned char d_gpl;
+ unsigned char d_fgpl;
+ unsigned char d_xfer_rate;
+} fd_params_t;
+
+fd_params_t fd_params[8] = {
+ {80, 18, 1440, 9, 0x2a, 0x50, FD_DSR_DD_250}, /* [0] 3.50" 720 Kb */
+ {80, 36, 2880, 18, 0x1b, 0x6c, FD_DSR_DD_500}, /* [1] 3.50" 1.44 Meg */
+ {40, 18, 720, 9, 0x2a, 0x50, FD_DSR_DD_250}, /* [2] 5.25" 360 Kb */
+ {80, 30, 2400, 15, 0x1b, 0x54, FD_DSR_DD_500}, /* [3] 5.25" 1.20 Meg */
+};
+
+/*
+ * Software status of chip
+ */
+struct fd_softc {
+ fd_padded_regmap_t *regs;
+ char fdc_type;
+ char fdc_mode;
+ char messed_up;
+ char slave_active;
+ struct slave_t {
+ io_req_t ior;
+ decl_simple_lock_data(,slave_lock)
+
+ /* status at end of last command */
+ unsigned char st0;
+ unsigned char st1;
+ unsigned char st2;
+ unsigned char c;
+ unsigned char h;
+ unsigned char r;
+ unsigned char n;
+ unsigned char st3;
+ /* ... */
+ unsigned char medium_status;
+# define ST_MEDIUM_PRESENT 1
+# define ST_MEDIUM_KNOWN 2
+ char last_command;
+ char bytes_expected;
+ fd_params_t *params;
+
+ } slave_status[DRIVES_PER_FDC];
+} fd_softc_data[NFD];
+
+typedef struct fd_softc *fd_softc_t;
+
+fd_softc_t fd_softc[NFD];
+
+static char *chip_names[4] = { "8272-A", "82072", "82077-AA", 0 };
+static char *mode_names[4] = { "PC AT", "PS/2", "Model 30", 0 };
+
+/*
+ * Probe chip to see if it is there
+ */
+static fd_probe (reg, ctlr)
+ vm_offset_t reg;
+ struct bus_ctlr *ctlr;
+{
+ int unit = ctlr->unit;
+ fd_softc_t fd;
+ fd_padded_regmap_t *regs;
+
+ /*
+ * See if we are here
+ */
+ if (check_memory(reg, 0)) {
+ /* no rides today */
+ return 0;
+ }
+
+ fd = &fd_softc_data[unit];
+ fd_softc[unit] = fd;
+
+ regs = (fd_padded_regmap_t *)reg;
+ fd->regs = regs;
+ fd->fdc_type = my_fdc_type;
+
+ fd_reset(fd);
+
+ if (the_fdc_type == fdc_82077aa) {
+ /* See if properly functioning */
+ unsigned char temp = FD_CMD_VERSION;
+ if (!fd_go(fd, 0, &temp, 1, 1))
+ return 0; /* total brxage */
+ if (!fd_get_result(fd, &temp, 1, FALSE))
+ return 0; /* partial brxage */
+ if (temp != FD_VERSION_82077AA)
+ printf( "{ %s x%x } ",
+ "Accepting non-82077aa version id",
+ temp);
+ }
+
+ printf("%s%d: %s chip controller",
+ ctlr->name, ctlr->unit, chip_names[fd->fdc_type]);
+ if (the_fdc_type == fdc_82077aa)
+ printf(" in %s mode", mode_names[fd->fdc_mode]);
+ printf(".\n");
+
+ return 1;
+}
+
+/* See if we like this slave */
+static fd_slave(ui, reg)
+ struct bus_device *ui;
+ vm_offset_t reg;
+{
+ int slave = ui->slave;
+ fd_softc_t fd;
+ unsigned char sns[2];
+
+ if (slave >= MAX_DRIVES) return 0;
+
+ fd = fd_softc[ui->ctlr];
+
+ sns[0] = FD_CMD_SENSE_DRIVE_STATUS;
+ sns[1] = slave & 0x3;
+ if (the_fdc_type == fdc_82072)
+ sns[1] |= FD_CMD_SDS_NO_MOT;
+ if (!fd_go(fd, slave, sns, 2, 1)) return 0;
+ if (!fd_get_result(fd, sns, 1, FALSE)) return 0;
+
+ fd->slave_status[slave].st3 = sns[0];
+
+ return 1;
+}
+
+static void
+fd_attach (ui)
+ struct bus_device *ui;
+{
+ /* Attach a slave */
+}
+
+static boolean_t
+fd_go(fd, slave, cmd, cmdlen, reply_count)
+ fd_softc_t fd;
+ unsigned char cmd[];
+{
+
+ /* XXX check who active, enque ifnot */
+
+ fd->slave_active = slave;
+ fd->slave_status[slave].bytes_expected = reply_count;
+ fd->slave_status[slave].last_command = *cmd;
+ return fd_command(fd, cmd, cmdlen);
+}
+
+fd_intr (unit, spllevel)
+{
+ fd_softc_t fd;
+ fd_padded_regmap_t *regs;
+ unsigned char msr;
+ register struct slave_t *slv;
+
+
+ splx(spllevel);
+
+ fd = fd_softc[unit];
+ regs = fd->regs;
+
+ /* did polling see a media change */
+ /* busy bit in msr sez ifasync or not */
+
+ msr = regs->fd_msr;
+ if ((msr & (FD_MSR_RQM|FD_MSR_DIO)) == (FD_MSR_RQM|FD_MSR_DIO)) {
+
+ /* result phase */
+*(unsigned int *)0xbc040100 &= ~0x00600000;
+
+ slv = &fd->slave_status[fd->slave_active];
+ fd_get_result(fd, &slv->st0, slv->bytes_expected, FALSE);
+ fd_start(fd, fd->slave_active, TRUE);
+ return;
+ }
+ /* async interrupt, either seek complete or media change */
+ while (1) {
+ unsigned char st[2];
+ register int slave, m;
+
+ *st = FD_CMD_SENSE_INT_STATUS;
+ fd_command(fd, st, 1);
+
+ fd_get_result(fd, st, 2, FALSE);
+
+ slave = *st & FD_ST0_DS;
+ slv = &fd->slave_status[slave];
+ slv->c = st[1];
+
+ switch (*st & FD_ST0_IC_MASK) {
+
+ case FD_ST0_IC_OK:
+/* we get an FD_ST0_SE for RECALIBRATE. Wait for it or discard ? */
+
+ case FD_ST0_IC_AT:
+
+ case FD_ST0_IC_BAD_CMD:
+ return;
+
+ case FD_ST0_IC_AT_POLL:
+ m = slv->medium_status;
+ if (m & ST_MEDIUM_PRESENT)
+ m &= ~ST_MEDIUM_PRESENT;
+ else
+ m |= ST_MEDIUM_PRESENT;
+ slv->medium_status = m;
+ }
+ }
+}
+
+/*
+ * Non-interface functions and utilities
+ */
+
+fd_reset(fd)
+ fd_softc_t fd;
+{
+ register fd_padded_regmap_t *regs;
+
+ regs = fd->regs;
+
+ /*
+ * Reset the chip
+ */
+ if (the_fdc_type == fdc_82072)
+ /* Fix if your box uses an external PLL */
+ regs->fd_dsr = FD_DSR_RESET | FD_DSR_EPLL;
+ else if (the_fdc_type == fdc_82077aa)
+ regs->fd_dor = 0;
+ else
+ machdep_reset_8272a(fd, regs);
+
+ delay(5); /* 4usecs in specs */
+
+ /*
+ * Be smart with the smart ones
+ */
+ if (the_fdc_type == fdc_82077aa) {
+
+ /*
+ * See in which mood we are (it cannot be changed)
+ */
+ int temp;
+
+ /* Take chip out of hw reset */
+ regs->fd_dor = FD_DOR_ENABLE | FD_DOR_DMA_GATE;
+ delay(10);
+
+ /* what do we readback from the DIR register as datarate ? */
+ regs->fd_ccr = FD_DSR_SD_125;
+ delay(10);
+
+ temp = regs->fd_dir;
+ if ((temp & 0x7) == FD_DSR_SD_125)
+ fd->fdc_mode = mod30_mode;
+ else if ((temp & (FD_DIR_ones | FD_DIR_DR_MASK_PS2)) ==
+ ((FD_DSR_SD_125 << FD_DIR_DR_SHIFT_PS2) | FD_DIR_ones))
+ fd->fdc_mode = ps2_mode;
+ else
+ /* this assumes tri-stated bits 1&2 read the same */
+ fd->fdc_mode = at_mode;
+
+ }
+
+ /*
+ * Send at least 4 sense interrupt cmds, one per slave
+ */
+ {
+
+ unsigned char sns, st[2];
+ int i, nloops;
+
+ sns = FD_CMD_SENSE_INT_STATUS;
+ i = nloops = 0;
+
+ do {
+ nloops++;
+
+ (void) fd_command(fd, &sns, 1);
+
+ st[0] = 0; /* in case bad status */
+ (void) fd_get_result(fd, st, 2, TRUE);
+
+ if ((st[0] & FD_ST0_IC_MASK) == FD_ST0_IC_AT_POLL) {
+ register int slave;
+
+ slave = st[0] & FD_ST0_DS;
+ fd->slave_status[slave].st0 = st[0];
+ fd->slave_status[slave].c = st[1];
+ i++;
+ }
+ } while ( (nloops < 30) &&
+ ((i < 4) || (st[0] != FD_ST0_IC_BAD_CMD)) );
+
+ /* sanity check */
+ if (nloops == 30) {
+ (void) fd_messed_up(fd);
+ return;
+ }
+ }
+
+ /*
+ * Install current parameters
+ */
+ if (the_fdc_type != fdc_8272a) {
+
+ unsigned char cnf[4];
+
+ /* send configure command to turn polling off */
+ cnf[0] = FD_CMD_CONFIGURE;
+ cnf[1] = 0x60; /* moff 110 */
+ cnf[2] = 0x48; /* eis, poll, thr=8 */
+ cnf[3] = 0;
+ if (!fd_command(fd, cnf, 4))
+ return;
+ /* no status */
+ }
+
+ /*
+ * Send specify to select defaults
+ */
+ {
+ unsigned char sfy[3];
+
+ sfy[0] = FD_CMD_SPECIFY;
+#if 0
+ sfy[1] = (12 << 4) | 7; /* step 4, hut 112us @500 */
+ sfy[2] = 2 << 1; /* hlt 29us @500 */
+#else
+ sfy[1] = (13 << 4) | 15;
+ sfy[2] = 1 << 1;
+#endif
+ (void) fd_command(fd, sfy, 3);
+ /* no status */
+ }
+}
+
+#define FD_MAX_WAIT 1000
+
+boolean_t
+fd_command(fd, cmd, cmd_len)
+ fd_softc_t fd;
+ char *cmd;
+{
+ register fd_padded_regmap_t *regs;
+
+ regs = fd->regs;
+
+ while (cmd_len > 0) {
+ register int i, s;
+
+ /* there might be long delays, so we pay this price */
+ s = splhigh();
+ for (i = 0; i < FD_MAX_WAIT; i++)
+ if ((regs->fd_msr & (FD_MSR_RQM|FD_MSR_DIO)) ==
+ FD_MSR_RQM)
+ break;
+ else
+ delay(10);
+ if (i == FD_MAX_WAIT) {
+ splx(s);
+ return fd_messed_up(fd);
+ }
+ regs->fd_data = *cmd++;
+ splx(s);
+ if (--cmd_len) delay(12);
+ }
+
+ return TRUE;
+}
+
+boolean_t
+fd_get_result(fd, st, st_len, ignore_errors)
+ fd_softc_t fd;
+ char *st;
+{
+ register fd_padded_regmap_t *regs;
+
+ regs = fd->regs;
+
+ while (st_len > 0) {
+ register int i, s;
+
+ /* there might be long delays, so we pay this price */
+ s = splhigh();
+ for (i = 0; i < FD_MAX_WAIT; i++)
+ if ((regs->fd_msr & (FD_MSR_RQM|FD_MSR_DIO)) ==
+ (FD_MSR_RQM|FD_MSR_DIO))
+ break;
+ else
+ delay(10);
+ if (i == FD_MAX_WAIT) {
+ splx(s);
+ return (ignore_errors) ? FALSE : fd_messed_up(fd);
+ }
+ *st++ = regs->fd_data;
+ splx(s);
+ st_len--;
+ }
+
+ return TRUE;
+}
+
+
+boolean_t
+fd_messed_up(fd)
+ fd_softc_t fd;
+{
+ fd->messed_up++;
+ printf("fd%d: messed up, disabling.\n", fd - fd_softc_data);
+ /* here code to
+ ior->error = ..;
+ restart
+ */
+ return FALSE;
+}
+
+/*
+ * Debugging aids
+ */
+
+fd_state(unit)
+{
+ fd_softc_t fd = fd_softc[unit];
+ fd_padded_regmap_t *regs;
+
+ if (!fd || !fd->regs) return 0;
+ regs = fd->regs;
+ if (the_fdc_type == fdc_8272a)
+ printf("msr %x\n", regs->fd_msr);
+ else
+ printf("sra %x srb %x dor %x tdr %x msr %x dir %x\n",
+ regs->fd_sra, regs->fd_srb, regs->fd_dor,
+ regs->fd_tdr, regs->fd_msr, regs->fd_dir);
+}
+
+#endif
+
+/* to be moved in separate file, or the above modified to live with scsi */
+
+fd_open(dev, mode, ior)
+ int dev;
+ dev_mode_t mode;
+ io_req_t ior;
+{
+ unsigned char cmd[2];
+ fd_softc_t fd;
+ int slave;
+
+ fd = fd_softc[UNITNO(dev)];
+ slave = SLAVENO(dev);
+
+ /* XXX find out what medium we have, automagically XXX */
+ /* fornow, set params depending on minor */
+ fd->slave_status[slave].params = &fd_params[PARAMNO(dev)];
+
+ /* XXXYYYXXXYYY SEND CONFIGURE if params changed */
+
+ /* Turn motor on */
+ if (the_fdc_type == fdc_82072) {
+
+ cmd[0] = FD_CMD_MOTOR_ON_OFF | FD_CMD_MOT_ON |
+ ((slave << FD_CMD_MOT_DS_SHIFT) & FD_CMD_MOT_DS);
+ (void) fd_go(fd, slave, cmd, 1, 0);
+ /* no status */
+
+ } else if (the_fdc_type == fdc_82077aa) {
+
+ fd->regs->fd_dor |= ((1<<slave)<<4);
+ }
+
+ /* recalibrate to track 0 */
+ cmd[0] = FD_CMD_RECALIBRATE;
+ cmd[1] = slave;
+ if (!fd_go(fd, slave, cmd, 2, 0))
+ return D_DEVICE_DOWN;
+ /* will generate a completion interrupt */
+
+ /* if not writeable return D_READ_ONLY ? */
+
+ return D_SUCCESS;
+}
+
+fd_close(dev)
+ int dev;
+{
+ fd_softc_t fd;
+ register int slave;
+ unsigned char cmd[2];
+
+ slave = SLAVENO(dev);
+ fd = fd_softc[UNITNO(dev)];
+
+ /* do not delete media info, do that iff interrupt sez changed */
+
+ /* Turn motor off */
+ if (the_fdc_type == fdc_82072) {
+
+ cmd[0] = FD_CMD_MOTOR_ON_OFF |
+ ((slave << FD_CMD_MOT_DS_SHIFT) & FD_CMD_MOT_DS);
+ (void) fd_go(fd, 0, cmd, 1, 0);
+ /* no status */
+
+ } else if (the_fdc_type == fdc_82077aa) {
+
+ fd->regs->fd_dor &= ~((1<<slave)<<4);
+ }
+ return D_SUCCESS;
+}
+
+fd_strategy(ior)
+ io_req_t ior;
+{
+#if 0
+ if (ior->io_op & IO_READ)
+ bzero(ior->io_data, ior->io_count);
+ iodone(ior);
+#else
+ struct slave_t *slv;
+ fd_softc_t fd;
+ unsigned int i, rec, max, dev;
+ fd_params_t *params;
+
+ /* readonly */
+
+ dev = ior->io_unit;
+
+ /* only one partition */
+ fd = fd_softc[UNITNO(dev)];
+ slv = &fd->slave_status[SLAVENO(dev)];
+ params = slv->params;
+ max = params->d_secperunit;
+ rec = ior->io_recnum;
+ i = btodb(ior->io_count + DEV_BSIZE - 1);
+ if (((rec + i) > max) || (ior->io_count < 0)) {
+ ior->io_error = D_INVALID_SIZE;
+ ior->io_op |= IO_ERROR;
+ ior->io_residual = ior->io_count;
+ iodone(ior);
+ return;
+ }
+
+ ior->io_residual = rec / params->d_secpercyl;
+
+ /*
+ * Enqueue operation
+ */
+ i = splbio();
+ simple_lock(&slv->slave_lock);
+ if (slv->ior) {
+ disksort(slv->ior, ior);
+ simple_unlock(&slv->slave_lock);
+ } else {
+ ior->io_next = 0;
+ slv->ior = ior;
+ simple_unlock(&slv->slave_lock);
+ fd_start(fd, SLAVENO(dev), FALSE);
+ }
+ splx(i);
+#endif
+}
+
+fd_start(fd, slave, done)
+ boolean_t done;
+ fd_softc_t fd;
+{
+ register io_req_t ior;
+ struct slave_t *slv;
+
+ slv = &fd->slave_status[slave];
+ if ((ior = slv->ior) == 0)
+ return;
+
+ if (done) {
+ /* .. errors .. */
+ /* .. partial xfers .. */
+
+ /* dequeue next one */
+ {
+ io_req_t next;
+
+ simple_lock(&slv->target_lock);
+ next = ior->io_next;
+ slv->ior = next;
+ simple_unlock(&slv->target_lock);
+
+ iodone(ior);
+ if (next == 0)
+ return;
+
+ ior = next;
+ }
+ }
+
+#ifdef no_eis
+ if (slv->c != ior->io_residual) SEEK_it;
+#endif
+
+/* setup dma */
+#if 1
+ if (ior->io_op & IO_READ) /* like SCSI */
+#else
+ if ((ior->io_op & IO_READ) == 0)
+#endif
+ {
+ *(unsigned int *)0xbc040100 |= 0x00200000 | 0x00400000;
+ } else {
+ *(unsigned int *)0xbc040100 &= ~0x00400000;
+ *(unsigned int *)0xbc040100 |= 0x00200000;
+ }
+ *(unsigned int *)0xbc040070 = (((unsigned int)kvtophys(ior->io_data))>>2)<<5;
+ *(unsigned int *)0xbc0401a0 = 13;
+
+#ifdef no_eis
+ if (slv->c == ior->io_residual) {
+#else
+ {
+#endif
+ unsigned char cmd[9];
+ unsigned char head, sec;
+ fd_params_t *params;
+
+ params = slv->params;
+
+ fd->regs->fd_dsr = params->d_xfer_rate;
+
+ sec = ior->io_recnum % params->d_secpercyl;
+ head = sec / params->d_secpertrk;
+ sec = (sec % params->d_secpertrk);
+
+ cmd[0] = (ior->io_op & IO_READ) ?
+ FD_CMD_MT | FD_CMD_MFM | FD_CMD_READ_DATA :
+ FD_CMD_MT | FD_CMD_MFM | FD_CMD_WRITE_DATA;
+ cmd[1] = (head << 2) | slave;
+ cmd[2] = ior->io_residual;
+ cmd[3] = head;
+ cmd[4] = sec + 1; /* 0 starts at 1 :-) */
+ cmd[5] = 0x2; /* 512 byte sectors */
+ cmd[6] = params->d_secpertrk;
+ cmd[7] = params->d_gpl;
+ cmd[8] = 0xff;
+
+ fd_go( fd, slave, cmd, 9, 7);
+
+ }
+}
+
+extern minphys();
+
+fd_read(dev, ior)
+ int dev;
+ io_req_t ior;
+{
+ return block_io(fd_strategy, minphys, ior);
+}
+
+int fdc_write_enable = 1;
+
+fd_write(dev, ior)
+ int dev;
+ io_req_t ior;
+{
+/* check if writeable */
+
+if (fdc_write_enable)
+ return block_io(fd_strategy, minphys, ior);
+else return D_SUCCESS;
+}
+
+fd_set_status(dev, flavor, status, status_count)
+ int dev;
+ int flavor;
+ dev_status_t status;
+ unsigned int *status_count;
+{
+ printf("fdc_set_status(%x, %x, %x, %x)", dev, flavor, status, status_count);
+ return D_SUCCESS;
+}
+
+fd_get_status(dev, flavor, status, status_count)
+ int dev;
+ int flavor;
+ dev_status_t status;
+ unsigned int status_count;
+{
+ printf("fdc_get_status(%x, %x, %x, %x)", dev, flavor, status, status_count);
+ return D_SUCCESS;
+}
+
diff --git a/chips/frc.c b/chips/frc.c
new file mode 100644
index 00000000..7f033c37
--- /dev/null
+++ b/chips/frc.c
@@ -0,0 +1,150 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: frc.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 3/92
+ *
+ * Generic, mappable free running counter driver.
+ */
+
+#include <frc.h>
+#if NFRC > 0
+
+#include <mach/std_types.h>
+#include <chips/busses.h>
+#include <device/device_types.h>
+
+/*
+ * Machine defines
+ * All you need to do to get this working on a
+ * random box is to define one macro and provide
+ * the correct virtual address.
+ */
+#include <platforms.h>
+#ifdef DECSTATION
+#define btop(x) mips_btop(x)
+#endif /* DECSTATION */
+
+/*
+ * Autoconf info
+ */
+
+static vm_offset_t frc_std[NFRC] = { 0 };
+static vm_size_t frc_offset[NFRC] = { 0 };
+static struct bus_device *frc_info[NFRC];
+static int frc_probe(vm_offset_t,struct bus_ctlr *);
+static void frc_attach(struct bus_device *);
+
+struct bus_driver frc_driver =
+ { frc_probe, 0, frc_attach, 0, frc_std, "frc", frc_info, };
+
+/*
+ * Externally visible functions
+ */
+io_return_t frc_openclose(int,int); /* user */
+vm_offset_t frc_mmap(int,vm_offset_t,vm_prot_t);
+void frc_set_address(int,vm_size_t);
+
+/*
+ * FRC's in kernel virtual memory. For in-kernel timestamps.
+ */
+vm_offset_t frc_address[NFRC];
+
+/* machine-specific setups */
+void
+frc_set_address(
+ int unit,
+ vm_size_t offset)
+{
+ if (unit < NFRC) {
+ frc_offset[unit] = offset;
+ }
+}
+
+
+/*
+ * Probe chip to see if it is there
+ */
+static frc_probe (
+ vm_offset_t reg,
+ struct bus_ctlr *ui)
+{
+ /* see if something present at the given address */
+ if (check_memory(reg, 0)) {
+ frc_address[ui->unit] = 0;
+ return 0;
+ }
+ frc_std[ui->unit] = (vm_offset_t) reg;
+ printf("[mappable] ");
+ return 1;
+}
+
+static void
+frc_attach (
+ struct bus_device *ui)
+{
+ if (ui->unit < NFRC) {
+ frc_address[ui->unit] =
+ (vm_offset_t) frc_std[ui->unit] + frc_offset[ui->unit];
+ printf(": free running counter %d at kernel vaddr 0x%x",
+ ui->unit, frc_address[ui->unit]);
+ }
+ else
+ panic("frc: unknown unit number"); /* shouldn't happen */
+}
+
+int frc_intr()
+{
+ /* we do not expect interrupts */
+ panic("frc_intr");
+}
+
+io_return_t
+frc_openclose(
+ int dev,
+ int flag)
+{
+ if (frc_std[dev])
+ return D_SUCCESS;
+ else
+ return D_NO_SUCH_DEVICE;
+}
+
+vm_offset_t
+frc_mmap(
+ int dev,
+ vm_offset_t off,
+ vm_prot_t prot)
+{
+ vm_offset_t addr;
+ if ((prot & VM_PROT_WRITE) || (off >= PAGE_SIZE) )
+ return (-1);
+ addr = (vm_offset_t) frc_std[dev] + frc_offset[dev];
+ return btop(pmap_extract(pmap_kernel(), addr));
+}
+
+#endif
diff --git a/chips/ims332.c b/chips/ims332.c
new file mode 100644
index 00000000..ebe6a6cb
--- /dev/null
+++ b/chips/ims332.c
@@ -0,0 +1,312 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ims332.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 1/92
+ *
+ * Routines for the Inmos IMS-G332 Colour video controller
+ */
+
+#include <platforms.h>
+
+#include <chips/ims332.h>
+#include <chips/screen.h>
+
+#include <chips/xcfb_monitor.h>
+
+/*
+ * Generic register access
+ */
+typedef volatile unsigned char *ims332_padded_regmap_t;
+
+#ifdef MAXINE
+
+unsigned int
+ims332_read_register(regs, regno)
+ unsigned char *regs;
+{
+ unsigned char *rptr;
+ register unsigned int val, v1;
+
+ /* spec sez: */
+ rptr = regs + 0x80000 + (regno << 4);
+ val = * ((volatile unsigned short *) rptr );
+ v1 = * ((volatile unsigned short *) regs );
+
+ return (val & 0xffff) | ((v1 & 0xff00) << 8);
+}
+
+ims332_write_register(regs, regno, val)
+ unsigned char *regs;
+ register unsigned int val;
+{
+ unsigned char *wptr;
+
+ /* spec sez: */
+ wptr = regs + 0xa0000 + (regno << 4);
+ * ((volatile unsigned int *)(regs)) = (val >> 8) & 0xff00;
+ * ((volatile unsigned short *)(wptr)) = val;
+}
+
+#define assert_ims332_reset_bit(r) *r &= ~0x40
+#define deassert_ims332_reset_bit(r) *r |= 0x40
+
+#else /*MAXINE*/
+
+#define ims332_read_register(p,r) \
+ ((unsigned int *)(p)) [ (r) ]
+#define ims332_write_register(p,r,v) \
+ ((unsigned int *)(p)) [ (r) ] = (v)
+
+#endif /*MAXINE*/
+
+
+/*
+ * Color map
+ */
+ims332_load_colormap( regs, map)
+ ims332_padded_regmap_t *regs;
+ color_map_t *map;
+{
+ register int i;
+
+ for (i = 0; i < 256; i++, map++)
+ ims332_load_colormap_entry(regs, i, map);
+}
+
+ims332_load_colormap_entry( regs, entry, map)
+ ims332_padded_regmap_t *regs;
+ color_map_t *map;
+{
+ /* ?? stop VTG */
+ ims332_write_register(regs, IMS332_REG_LUT_BASE + (entry & 0xff),
+ (map->blue << 16) |
+ (map->green << 8) |
+ (map->red));
+}
+
+ims332_init_colormap( regs)
+ ims332_padded_regmap_t *regs;
+{
+ color_map_t m;
+
+ m.red = m.green = m.blue = 0;
+ ims332_load_colormap_entry( regs, 0, &m);
+
+ m.red = m.green = m.blue = 0xff;
+ ims332_load_colormap_entry( regs, 1, &m);
+ ims332_load_colormap_entry( regs, 255, &m);
+
+ /* since we are at it, also fix cursor LUT */
+ ims332_load_colormap_entry( regs, IMS332_REG_CURSOR_LUT_0, &m);
+ ims332_load_colormap_entry( regs, IMS332_REG_CURSOR_LUT_1, &m);
+ /* *we* do not use this, but the prom does */
+ ims332_load_colormap_entry( regs, IMS332_REG_CURSOR_LUT_2, &m);
+}
+
+#if 1/*debug*/
+ims332_print_colormap( regs)
+ ims332_padded_regmap_t *regs;
+{
+ register int i;
+
+ for (i = 0; i < 256; i++) {
+ register unsigned int color;
+
+ color = ims332_read_register( regs, IMS332_REG_LUT_BASE + i);
+ printf("%x->[x%x x%x x%x]\n", i,
+ (color >> 16) & 0xff,
+ (color >> 8) & 0xff,
+ color & 0xff);
+ }
+}
+#endif
+
+/*
+ * Video on/off
+ *
+ * It is unfortunate that X11 goes backward with white@0
+ * and black@1. So we must stash away the zero-th entry
+ * and fix it while screen is off. Also must remember
+ * it, sigh.
+ */
+struct vstate {
+ ims332_padded_regmap_t *regs;
+ unsigned short off;
+};
+
+ims332_video_off(vstate, up)
+ struct vstate *vstate;
+ user_info_t *up;
+{
+ register ims332_padded_regmap_t *regs = vstate->regs;
+ register unsigned *save, csr;
+
+ if (vstate->off)
+ return;
+
+ /* Yes, this is awful */
+ save = (unsigned *)up->dev_dep_2.gx.colormap;
+
+ *save = ims332_read_register(regs, IMS332_REG_LUT_BASE);
+
+ ims332_write_register(regs, IMS332_REG_LUT_BASE, 0);
+
+ ims332_write_register( regs, IMS332_REG_COLOR_MASK, 0);
+
+ /* cursor now */
+ csr = ims332_read_register(regs, IMS332_REG_CSR_A);
+ csr |= IMS332_CSR_A_DISABLE_CURSOR;
+ ims332_write_register(regs, IMS332_REG_CSR_A, csr);
+
+ vstate->off = 1;
+}
+
+ims332_video_on(vstate, up)
+ struct vstate *vstate;
+ user_info_t *up;
+{
+ register ims332_padded_regmap_t *regs = vstate->regs;
+ register unsigned *save, csr;
+
+ if (!vstate->off)
+ return;
+
+ /* Like I said.. */
+ save = (unsigned *)up->dev_dep_2.gx.colormap;
+
+ ims332_write_register(regs, IMS332_REG_LUT_BASE, *save);
+
+ ims332_write_register( regs, IMS332_REG_COLOR_MASK, 0xffffffff);
+
+ /* cursor now */
+ csr = ims332_read_register(regs, IMS332_REG_CSR_A);
+ csr &= ~IMS332_CSR_A_DISABLE_CURSOR;
+ ims332_write_register(regs, IMS332_REG_CSR_A, csr);
+
+ vstate->off = 0;
+}
+
+/*
+ * Cursor
+ */
+ims332_pos_cursor(regs,x,y)
+ ims332_padded_regmap_t *regs;
+ register int x,y;
+{
+ ims332_write_register( regs, IMS332_REG_CURSOR_LOC,
+ ((x & 0xfff) << 12) | (y & 0xfff) );
+}
+
+
+ims332_cursor_color( regs, color)
+ ims332_padded_regmap_t *regs;
+ color_map_t *color;
+{
+ /* Bg is color[0], Fg is color[1] */
+ ims332_write_register(regs, IMS332_REG_CURSOR_LUT_0,
+ (color->blue << 16) |
+ (color->green << 8) |
+ (color->red));
+ color++;
+ ims332_write_register(regs, IMS332_REG_CURSOR_LUT_1,
+ (color->blue << 16) |
+ (color->green << 8) |
+ (color->red));
+}
+
+ims332_cursor_sprite( regs, cursor)
+ ims332_padded_regmap_t *regs;
+ unsigned short *cursor;
+{
+ register int i;
+
+ /* We *could* cut this down a lot... */
+ for (i = 0; i < 512; i++, cursor++)
+ ims332_write_register( regs,
+ IMS332_REG_CURSOR_RAM+i, *cursor);
+}
+
+/*
+ * Initialization
+ */
+ims332_init(regs, reset, mon)
+ ims332_padded_regmap_t *regs;
+ unsigned int *reset;
+ xcfb_monitor_type_t mon;
+{
+ int shortdisplay, broadpulse, frontporch;
+
+ assert_ims332_reset_bit(reset);
+ delay(1); /* specs sez 50ns.. */
+ deassert_ims332_reset_bit(reset);
+
+ /* CLOCKIN appears to receive a 6.25 Mhz clock --> PLL 12 for 75Mhz monitor */
+ ims332_write_register(regs, IMS332_REG_BOOT, 12 | IMS332_BOOT_CLOCK_PLL);
+
+ /* initialize VTG */
+ ims332_write_register(regs, IMS332_REG_CSR_A,
+ IMS332_BPP_8 | IMS332_CSR_A_DISABLE_CURSOR);
+ delay(50); /* spec does not say */
+
+ /* datapath registers (values taken from prom's settings) */
+
+ frontporch = mon->line_time - (mon->half_sync * 2 +
+ mon->back_porch +
+ mon->frame_visible_width / 4);
+
+ shortdisplay = mon->line_time / 2 - (mon->half_sync * 2 +
+ mon->back_porch + frontporch);
+ broadpulse = mon->line_time / 2 - frontporch;
+
+ ims332_write_register( regs, IMS332_REG_HALF_SYNCH, mon->half_sync);
+ ims332_write_register( regs, IMS332_REG_BACK_PORCH, mon->back_porch);
+ ims332_write_register( regs, IMS332_REG_DISPLAY,
+ mon->frame_visible_width / 4);
+ ims332_write_register( regs, IMS332_REG_SHORT_DIS, shortdisplay);
+ ims332_write_register( regs, IMS332_REG_BROAD_PULSE, broadpulse);
+ ims332_write_register( regs, IMS332_REG_V_SYNC, mon->v_sync * 2);
+ ims332_write_register( regs, IMS332_REG_V_PRE_EQUALIZE,
+ mon->v_pre_equalize);
+ ims332_write_register( regs, IMS332_REG_V_POST_EQUALIZE,
+ mon->v_post_equalize);
+ ims332_write_register( regs, IMS332_REG_V_BLANK, mon->v_blank * 2);
+ ims332_write_register( regs, IMS332_REG_V_DISPLAY,
+ mon->frame_visible_height * 2);
+ ims332_write_register( regs, IMS332_REG_LINE_TIME, mon->line_time);
+ ims332_write_register( regs, IMS332_REG_LINE_START, mon->line_start);
+ ims332_write_register( regs, IMS332_REG_MEM_INIT, mon->mem_init);
+ ims332_write_register( regs, IMS332_REG_XFER_DELAY, mon->xfer_delay);
+
+ ims332_write_register( regs, IMS332_REG_COLOR_MASK, 0xffffff);
+
+ ims332_init_colormap( regs );
+
+ ims332_write_register(regs, IMS332_REG_CSR_A,
+ IMS332_BPP_8 | IMS332_CSR_A_DMA_DISABLE | IMS332_CSR_A_VTG_ENABLE);
+
+}
diff --git a/chips/ims332.h b/chips/ims332.h
new file mode 100644
index 00000000..edb2302e
--- /dev/null
+++ b/chips/ims332.h
@@ -0,0 +1,137 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ims332.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 1/92
+ *
+ * Defines for the Inmos IMS-G332 Colour video controller
+ */
+
+
+#ifndef _CHIPS_IMS332_H_
+#define _CHIPS_IMS332_H_ 1
+
+/*
+ * Although the chip is built to be memory-mapped
+ * it can be programmed for 32 or 64 bit addressing.
+ * Moreover, the hardware bits have been twisted
+ * even more on the machine I am writing this for.
+ * So I'll just define the chip's offsets and leave
+ * it to the implementation to define the rest.
+ */
+
+#define IMS332_REG_BOOT 0x000 /* boot time config */
+
+#define IMS332_REG_HALF_SYNCH 0x021 /* datapath registers */
+#define IMS332_REG_BACK_PORCH 0x022
+#define IMS332_REG_DISPLAY 0x023
+#define IMS332_REG_SHORT_DIS 0x024
+#define IMS332_REG_BROAD_PULSE 0x025
+#define IMS332_REG_V_SYNC 0x026
+#define IMS332_REG_V_PRE_EQUALIZE 0x027
+#define IMS332_REG_V_POST_EQUALIZE 0x028
+#define IMS332_REG_V_BLANK 0x029
+#define IMS332_REG_V_DISPLAY 0x02a
+#define IMS332_REG_LINE_TIME 0x02b
+#define IMS332_REG_LINE_START 0x02c
+#define IMS332_REG_MEM_INIT 0x02d
+#define IMS332_REG_XFER_DELAY 0x02e
+
+#define IMS332_REG_COLOR_MASK 0x040 /* color mask register */
+
+#define IMS332_REG_CSR_A 0x060
+
+#define IMS332_REG_CSR_B 0x070
+
+#define IMS332_REG_TOP_SCREEN 0x080 /* top-of-screen offset */
+
+#define IMS332_REG_CURSOR_LUT_0 0x0a1 /* cursor palette */
+#define IMS332_REG_CURSOR_LUT_1 0x0a2
+#define IMS332_REG_CURSOR_LUT_2 0x0a3
+
+#define IMS332_REG_RGB_CKSUM_0 0x0c0 /* test registers */
+#define IMS332_REG_RGB_CKSUM_1 0x0c1
+#define IMS332_REG_RGB_CKSUM_2 0x0c2
+
+#define IMS332_REG_CURSOR_LOC 0x0c7 /* cursor location */
+
+#define IMS332_REG_LUT_BASE 0x100 /* color palette */
+#define IMS332_REG_LUT_END 0x1ff
+
+#define IMS332_REG_CURSOR_RAM 0x200 /* cursor bitmap */
+#define IMS332_REG_CURSOR_RAM_END 0x3ff
+
+/*
+ * Control register A
+ */
+
+#define IMS332_CSR_A_VTG_ENABLE 0x000001 /* vertical timing generator */
+#define IMS332_CSR_A_INTERLACED 0x000002 /* screen format */
+#define IMS332_CSR_A_CCIR 0x000004 /* default is EIA */
+#define IMS332_CSR_A_SLAVE_SYNC 0x000008 /* else from our pll */
+#define IMS332_CSR_A_PLAIN_SYNC 0x000010 /* else tesselated */
+#define IMS332_CSR_A_SEPARATE_SYNC 0x000020 /* else composite */
+#define IMS332_CSR_A_VIDEO_ONLY 0x000040 /* else video+sync */
+#define IMS332_CSR_A_BLANK_PEDESTAL 0x000080 /* blank level */
+#define IMS332_CSR_A_CBLANK_IS_OUT 0x000100
+#define IMS332_CSR_A_CBLANK_NO_DELAY 0x000200
+#define IMS332_CSR_A_FORCE_BLANK 0x000400
+#define IMS332_CSR_A_BLANK_DISABLE 0x000800
+#define IMS332_CSR_A_VRAM_INCREMENT 0x003000
+# define IMS332_VRAM_INC_1 0x000000
+# define IMS332_VRAM_INC_256 0x001000 /* except interlaced->2 */
+# define IMS332_VRAM_INC_512 0x002000
+# define IMS332_VRAM_INC_1024 0x003000
+#define IMS332_CSR_A_DMA_DISABLE 0x004000
+#define IMS332_CSR_A_SYNC_DELAY_MASK 0x038000 /* 0-7 VTG clk delays */
+#define IMS332_CSR_A_PIXEL_INTERLEAVE 0x040000
+#define IMS332_CSR_A_DELAYED_SAMPLING 0x080000
+#define IMS332_CSR_A_BITS_PER_PIXEL 0x700000
+# define IMS332_BPP_1 0x000000
+# define IMS332_BPP_2 0x100000
+# define IMS332_BPP_4 0x200000
+# define IMS332_BPP_8 0x300000
+# define IMS332_BPP_15 0x400000
+# define IMS332_BPP_16 0x500000
+#define IMS332_CSR_A_DISABLE_CURSOR 0x800000
+
+
+/*
+ * Control register B is mbz
+ */
+
+/*
+ * Boot register
+ */
+
+#define IMS332_BOOT_PLL 0x00001f /* xPLL, binary */
+#define IMS332_BOOT_CLOCK_PLL 0x000020 /* else xternal */
+#define IMS332_BOOT_64_BIT_MODE 0x000040 /* else 32 */
+#define IMS332_BOOT_xxx 0xffff80 /* reserved, mbz */
+
+
+#endif _CHIPS_IMS332_H_
diff --git a/chips/isdn_79c30.h b/chips/isdn_79c30.h
new file mode 100644
index 00000000..2e7b5d76
--- /dev/null
+++ b/chips/isdn_79c30.h
@@ -0,0 +1,165 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*-
+ * Copyright (c) 1991, 1992 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the Computer Systems
+ * Engineering Group at Lawrence Berkeley Laboratory.
+ * 4. The name of the Laboratory may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Bit encodings for chip commands from "Microprocessor Access Guide for
+ * Indirect Registers", p.19 Am79C30A/32A Advanced Micro Devices spec
+ * sheet (preliminary).
+ *
+ * Indirect register numbers (the value written into cr to select a given
+ * chip registers) have the form AMDR_*. Register fields look like AMD_*.
+ */
+
+typedef struct {
+ volatile unsigned char cr; /* command register (wo) */
+#define ir cr /* interrupt register (ro) */
+ volatile unsigned char dr; /* data register (rw) */
+ volatile unsigned char dsr1; /* D-channel status register 1 (ro) */
+ volatile unsigned char der; /* D-channel error register (ro) */
+ volatile unsigned char dctb; /* D-channel transmit register (wo) */
+#define dcrb dctb /* D-channel receive register (ro) */
+ volatile unsigned char bbtb; /* Bb-channel transmit register (wo) */
+#define bbrb bbtb /* Bb-channel receive register (ro) */
+ volatile unsigned char bctb; /* Bc-channel transmit register (wo)*/
+#define bcrb bctb /* Bc-channel receive register (ro) */
+ volatile unsigned char dsr2; /* D-channel status register 2 (ro) */
+} amd79c30_regmap_t;
+
+#define AMDR_INIT 0x21
+#define AMD_INIT_PMS_IDLE 0x00
+#define AMD_INIT_PMS_ACTIVE 0x01
+#define AMD_INIT_PMS_ACTIVE_DATA 0x02
+#define AMD_INIT_INT_DISABLE (0x01 << 2)
+#define AMD_INIT_CDS_DIV2 (0x00 << 3)
+#define AMD_INIT_CDS_DIV1 (0x01 << 3)
+#define AMD_INIT_CDS_DIV4 (0x02 << 3)
+#define AMD_INIT_AS_RX (0x01 << 6)
+#define AMD_INIT_AS_TX (0x01 << 7)
+
+#define AMDR_LIU_LSR 0xa1
+#define AMDR_LIU_LPR 0xa2
+#define AMDR_LIU_LMR1 0xa3
+#define AMDR_LIU_LMR2 0xa4
+#define AMDR_LIU_2_4 0xa5
+#define AMDR_LIU_MF 0xa6
+#define AMDR_LIU_MFSB 0xa7
+#define AMDR_LIU_MFQB 0xa8
+
+#define AMDR_MUX_MCR1 0x41
+#define AMDR_MUX_MCR2 0x42
+#define AMDR_MUX_MCR3 0x43
+#define AMD_MCRCHAN_NC 0x00
+#define AMD_MCRCHAN_B1 0x01
+#define AMD_MCRCHAN_B2 0x02
+#define AMD_MCRCHAN_BA 0x03
+#define AMD_MCRCHAN_BB 0x04
+#define AMD_MCRCHAN_BC 0x05
+#define AMD_MCRCHAN_BD 0x06
+#define AMD_MCRCHAN_BE 0x07
+#define AMD_MCRCHAN_BF 0x08
+#define AMDR_MUX_MCR4 0x44
+#define AMD_MCR4_INT_ENABLE (1 << 3)
+#define AMD_MCR4_SWAPBB (1 << 4)
+#define AMD_MCR4_SWAPBC (1 << 5)
+
+#define AMDR_MUX_1_4 0x45
+
+#define AMDR_MAP_X 0x61
+#define AMDR_MAP_R 0x62
+#define AMDR_MAP_GX 0x63
+#define AMDR_MAP_GR 0x64
+#define AMDR_MAP_GER 0x65
+#define AMDR_MAP_STG 0x66
+#define AMDR_MAP_FTGR 0x67
+#define AMDR_MAP_ATGR 0x68
+#define AMDR_MAP_MMR1 0x69
+#define AMD_MMR1_ALAW 0x01
+#define AMD_MMR1_GX 0x02
+#define AMD_MMR1_GR 0x04
+#define AMD_MMR1_GER 0x08
+#define AMD_MMR1_X 0x10
+#define AMD_MMR1_R 0x20
+#define AMD_MMR1_STG 0x40
+#define AMD_MMR1_LOOP 0x80
+#define AMDR_MAP_MMR2 0x6a
+#define AMD_MMR2_AINB 0x01
+#define AMD_MMR2_LS 0x02
+#define AMD_MMR2_DTMF 0x04
+#define AMD_MMR2_GEN 0x08
+#define AMD_MMR2_RNG 0x10
+#define AMD_MMR2_DIS_HPF 0x20
+#define AMD_MMR2_DIS_AZ 0x40
+#define AMDR_MAP_1_10 0x6b
+
+#define AMDR_DLC_FRAR123 0x81
+#define AMDR_DLC_SRAR123 0x82
+#define AMDR_DLC_TAR 0x83
+#define AMDR_DLC_DRLR 0x84
+#define AMDR_DLC_DTCR 0x85
+#define AMDR_DLC_DMR1 0x86
+#define AMDR_DLC_DMR2 0x87
+#define AMDR_DLC_1_7 0x88
+#define AMDR_DLC_DRCR 0x89
+#define AMDR_DLC_RNGR1 0x8a
+#define AMDR_DLC_RNGR2 0x8b
+#define AMDR_DLC_FRAR4 0x8c
+#define AMDR_DLC_SRAR4 0x8d
+#define AMDR_DLC_DMR3 0x8e
+#define AMDR_DLC_DMR4 0x8f
+#define AMDR_DLC_12_15 0x90
+#define AMDR_DLC_ASR 0x91
diff --git a/chips/isdn_79c30_hdw.c b/chips/isdn_79c30_hdw.c
new file mode 100644
index 00000000..769d1cb1
--- /dev/null
+++ b/chips/isdn_79c30_hdw.c
@@ -0,0 +1,602 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: isdn_79c30_hdw.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 1/92
+ *
+ * Driver for the AMD 79c30 ISDN (Integrated Speech and
+ * Data Network) controller chip.
+ */
+
+/*-
+ * Copyright (c) 1991, 1992 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the Computer Systems
+ * Engineering Group at Lawrence Berkeley Laboratory.
+ * 4. The name of the Laboratory may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <isdn.h>
+#if NISDN > 0
+
+#include <platforms.h>
+
+#include <mach/std_types.h>
+#include <machine/machspl.h>
+#include <sys/ioctl.h> /* for Sun compat */
+#include <chips/busses.h>
+
+#include <device/device_types.h>
+#include <device/io_req.h>
+#include <device/audio_status.h>
+#include <chips/audio_defs.h>
+
+#include <chips/isdn_79c30.h>
+
+#include <chips/audio_config.h> /* machdep config */
+
+#define private static
+
+/*
+ * Autoconf info
+ */
+private int isdn_probe (vm_offset_t reg, struct bus_ctlr *ui);
+private void isdn_attach ( struct bus_device *ui);
+
+private vm_offset_t isdn_std[NISDN] = { 0 };
+private struct bus_device *isdn_info[NISDN];
+
+struct bus_driver isdn_driver =
+ { isdn_probe, 0, isdn_attach, 0, isdn_std, "isdn", isdn_info, };
+
+
+/*
+ * Externally visible functions and data
+ */
+int isdn_intr();
+
+
+/*
+ * Status bookeeping and globals
+ */
+typedef struct {
+ amd79c30_padded_regs_t *regs;
+ void *audio_status; /* for upcalls */
+ struct mapreg sc_map; /* MAP status */
+ /*
+ * keep track of levels so we don't have to convert back from
+ * MAP gain constants
+ */
+ int sc_rlevel; /* record level */
+ int sc_plevel; /* play level */
+ int sc_mlevel; /* monitor level */
+} isdn_softc_t;
+
+isdn_softc_t isdn_softc_data[NISDN];
+isdn_softc_t *isdn_softc[NISDN];
+
+private int audio_default_level = 150;
+
+
+/*
+ * Forward decls
+ */
+audio_switch_t isdn_ops;
+
+private void isdn_init( isdn_softc_t *sc );
+
+private void isdn_set_mmr2(
+ register amd79c30_padded_regs_t *regs,
+ register int mmr2);
+
+private void isdn_setgains(
+ isdn_softc_t *sc,
+ int pgain,
+ int rgain,
+ int mgain);
+
+/*
+ * Probe chip to see if it is there
+ */
+private isdn_probe(
+ vm_offset_t reg,
+ struct bus_ctlr *ui)
+{
+ isdn_softc_t *sc = &isdn_softc_data[ui->unit];
+
+ isdn_softc[ui->unit] = sc;
+ sc->regs = (amd79c30_padded_regs_t *)reg;
+
+ return 1;
+}
+
+/*
+ * Attach device to chip-indep driver(s)
+ */
+private void
+isdn_attach(
+ struct bus_device *ui)
+{
+ register isdn_softc_t *sc = isdn_softc[ui->unit];
+ register amd79c30_padded_regs_t *regs = sc->regs;
+
+ /* disable interrupts */
+ write_reg(regs->cr, AMDR_INIT);
+ write_reg(regs->dr, AMD_INIT_PMS_ACTIVE | AMD_INIT_INT_DISABLE);
+
+ /*
+ * Initialize the mux unit. We use MCR3 to route audio (MAP)
+ * through channel Bb. MCR1 and MCR2 are unused.
+ * Setting the INT enable bit in MCR4 will generate an interrupt
+ * on each converted audio sample.
+ */
+ write_reg(regs->cr, AMDR_MUX_1_4);
+ write_reg(regs->dr, 0);
+ write_reg(regs->dr, 0);
+ write_reg(regs->dr, (AMD_MCRCHAN_BB << 4) | AMD_MCRCHAN_BA);
+ write_reg(regs->dr, AMD_MCR4_INT_ENABLE);
+
+ printf(" AMD 79C30A/79C32A");
+
+ audio_attach( sc, &isdn_ops, &sc->audio_status );
+}
+
+/*
+ * Chip re-initialization
+ */
+private void
+isdn_init(
+ isdn_softc_t *sc)
+{
+ register amd79c30_padded_regs_t *regs;
+
+ bzero((char *)&sc->sc_map, sizeof sc->sc_map);
+ /* default to speaker */
+ sc->sc_map.mr_mmr2 = AMD_MMR2_AINB | AMD_MMR2_LS;
+
+ /* enable interrupts and set parameters established above */
+ regs = sc->regs;
+ isdn_set_mmr2 (regs, sc->sc_map.mr_mmr2);
+ isdn_setgains (sc, audio_default_level, audio_default_level, 0);
+ write_reg(regs->cr, AMDR_INIT);
+ write_reg(regs->dr, AMD_INIT_PMS_ACTIVE);
+}
+
+/*
+ * Chip shutdown
+ */
+private void
+isdn_close(
+ isdn_softc_t *sc)
+{
+ register amd79c30_padded_regs_t *regs;
+
+ regs = sc->regs;
+ write_reg(regs->cr, AMDR_INIT);
+ write_reg(regs->dr, AMD_INIT_PMS_ACTIVE | AMD_INIT_INT_DISABLE);
+}
+
+/*
+ * Audio port selection
+ */
+private void
+isdn_setport(
+ isdn_softc_t *sc,
+ int port)
+{
+ if (port == AUDIO_SPEAKER) {
+ sc->sc_map.mr_mmr2 |= AMD_MMR2_LS;
+ isdn_set_mmr2(sc->regs, sc->sc_map.mr_mmr2);
+ } else if (port == AUDIO_HEADPHONE) {
+ sc->sc_map.mr_mmr2 &=~ AMD_MMR2_LS;
+ isdn_set_mmr2(sc->regs, sc->sc_map.mr_mmr2);
+ }
+}
+
+private int
+isdn_getport(
+ isdn_softc_t *sc)
+{
+ return (sc->sc_map.mr_mmr2 & AMD_MMR2_LS) ?
+ AUDIO_SPEAKER : AUDIO_HEADPHONE;
+}
+
+/*
+ * Volume control
+ */
+private void
+isdn_setgains(
+ isdn_softc_t *sc,
+ int pgain,
+ int rgain,
+ int mgain)
+{
+ private void isdn_set_pgain(), isdn_set_rgain(), isdn_set_mgain();
+
+ if (pgain != ~0)
+ isdn_set_pgain(sc, pgain);
+ if (rgain != ~0)
+ isdn_set_rgain(sc, rgain);
+ if (mgain != ~0)
+ isdn_set_mgain(sc, mgain);
+
+}
+
+private void
+isdn_getgains(
+ isdn_softc_t *sc,
+ int *pgain,
+ int *rgain,
+ int *mgain)
+{
+ *mgain = sc->sc_mlevel;
+ *rgain = sc->sc_rlevel;
+ *pgain = sc->sc_plevel;
+}
+
+
+/*
+ * User control over MAP processor
+ */
+private io_return_t
+isdn_setstate(
+ isdn_softc_t *sc,
+ dev_flavor_t flavor,
+ register struct mapreg *map,
+ natural_t n_ints)
+{
+ register amd79c30_padded_regs_t *regs = sc->regs;
+ register int i, v;
+ spl_t s;
+
+ /* Sun compat */
+ if (flavor == AUDIOSETREG) {
+ register struct audio_ioctl *a = (struct audio_ioctl *)map;
+ s = splaudio();
+ write_reg(regs->cr, (a->control >> 8) & 0xff);
+ for (i = 0; i < (a->control & 0xff); i++) {
+ write_reg(regs->dr, a->data[i]);
+ }
+ splx(s);
+ return D_SUCCESS;
+ }
+
+ if (flavor != AUDIO_SETMAP)
+ return D_INVALID_OPERATION;
+
+ if ((n_ints * sizeof(int)) < sizeof(*map))
+ return D_INVALID_SIZE;
+
+ bcopy(map, &sc->sc_map, sizeof(sc->sc_map));
+ sc->sc_map.mr_mmr2 &= 0x7f;
+
+ s = splaudio();
+ write_reg(regs->cr, AMDR_MAP_1_10);
+ for (i = 0; i < 8; i++) {
+ v = map->mr_x[i];
+ WAMD16(regs, v);
+ }
+ for (i = 0; i < 8; ++i) {
+ v = map->mr_r[i];
+ WAMD16(regs, v);
+ }
+ v = map->mr_gx; WAMD16(regs, v);
+ v = map->mr_gr; WAMD16(regs, v);
+ v = map->mr_ger; WAMD16(regs, v);
+ v = map->mr_stgr; WAMD16(regs, v);
+ v = map->mr_ftgr; WAMD16(regs, v);
+ v = map->mr_atgr; WAMD16(regs, v);
+ write_reg(regs->dr, map->mr_mmr1);
+ write_reg(regs->dr, map->mr_mmr2);
+ splx(s);
+ return D_SUCCESS;
+}
+
+private io_return_t
+isdn_getstate(
+ isdn_softc_t *sc,
+ dev_flavor_t flavor,
+ register struct mapreg *map,
+ natural_t *count)
+{
+ register amd79c30_padded_regs_t *regs = sc->regs;
+ spl_t s;
+ int i;
+
+ /* Sun compat */
+ if (flavor == AUDIOGETREG) {
+ register struct audio_ioctl *a = (struct audio_ioctl *)map;
+ s = splaudio();
+ write_reg(regs->cr, (a->control >> 8) & 0xff);
+ for (i = 0; i < (a->control & 0xff); i++) {
+ read_reg(regs->dr,a->data[i]);
+ }
+ splx(s);
+ *count = sizeof(*a) / sizeof(int);
+ return D_SUCCESS;
+ }
+
+ if ( (*count * sizeof(int)) < sizeof(*map))
+ return D_INVALID_SIZE;
+ bcopy(&sc->sc_map, map, sizeof(sc->sc_map));
+ *count = sizeof(*map) / sizeof(int);
+ return D_SUCCESS;
+}
+
+
+
+/*
+ * Set the mmr1 register and one other 16 bit register in the audio chip.
+ * The other register is indicated by op and val.
+ */
+private void
+isdn_set_mmr1(
+ register amd79c30_padded_regs_t *regs,
+ register int mmr1,
+ register int op,
+ register int val)
+{
+ register int s = splaudio();
+
+ write_reg(regs->cr, AMDR_MAP_MMR1);
+ write_reg(regs->dr, mmr1);
+ write_reg(regs->cr, op);
+ WAMD16(regs, val);
+ splx(s);
+}
+
+/*
+ * Set the mmr2 register.
+ */
+private void
+isdn_set_mmr2(
+ register amd79c30_padded_regs_t *regs,
+ register int mmr2)
+{
+ register int s = splaudio();
+
+ write_reg(regs->cr, AMDR_MAP_MMR2);
+ write_reg(regs->dr, mmr2);
+ splx(s);
+}
+
+/*
+ * gx, gr & stg gains. this table must contain 256 elements with
+ * the 0th being "infinity" (the magic value 9008). The remaining
+ * elements match sun's gain curve (but with higher resolution):
+ * -18 to 0dB in .16dB steps then 0 to 12dB in .08dB steps.
+ */
+private const unsigned short gx_coeff[256] = {
+ 0x9008, 0x8b7c, 0x8b51, 0x8b45, 0x8b42, 0x8b3b, 0x8b36, 0x8b33,
+ 0x8b32, 0x8b2a, 0x8b2b, 0x8b2c, 0x8b25, 0x8b23, 0x8b22, 0x8b22,
+ 0x9122, 0x8b1a, 0x8aa3, 0x8aa3, 0x8b1c, 0x8aa6, 0x912d, 0x912b,
+ 0x8aab, 0x8b12, 0x8aaa, 0x8ab2, 0x9132, 0x8ab4, 0x913c, 0x8abb,
+ 0x9142, 0x9144, 0x9151, 0x8ad5, 0x8aeb, 0x8a79, 0x8a5a, 0x8a4a,
+ 0x8b03, 0x91c2, 0x91bb, 0x8a3f, 0x8a33, 0x91b2, 0x9212, 0x9213,
+ 0x8a2c, 0x921d, 0x8a23, 0x921a, 0x9222, 0x9223, 0x922d, 0x9231,
+ 0x9234, 0x9242, 0x925b, 0x92dd, 0x92c1, 0x92b3, 0x92ab, 0x92a4,
+ 0x92a2, 0x932b, 0x9341, 0x93d3, 0x93b2, 0x93a2, 0x943c, 0x94b2,
+ 0x953a, 0x9653, 0x9782, 0x9e21, 0x9d23, 0x9cd2, 0x9c23, 0x9baa,
+ 0x9bde, 0x9b33, 0x9b22, 0x9b1d, 0x9ab2, 0xa142, 0xa1e5, 0x9a3b,
+ 0xa213, 0xa1a2, 0xa231, 0xa2eb, 0xa313, 0xa334, 0xa421, 0xa54b,
+ 0xada4, 0xac23, 0xab3b, 0xaaab, 0xaa5c, 0xb1a3, 0xb2ca, 0xb3bd,
+ 0xbe24, 0xbb2b, 0xba33, 0xc32b, 0xcb5a, 0xd2a2, 0xe31d, 0x0808,
+ 0x72ba, 0x62c2, 0x5c32, 0x52db, 0x513e, 0x4cce, 0x43b2, 0x4243,
+ 0x41b4, 0x3b12, 0x3bc3, 0x3df2, 0x34bd, 0x3334, 0x32c2, 0x3224,
+ 0x31aa, 0x2a7b, 0x2aaa, 0x2b23, 0x2bba, 0x2c42, 0x2e23, 0x25bb,
+ 0x242b, 0x240f, 0x231a, 0x22bb, 0x2241, 0x2223, 0x221f, 0x1a33,
+ 0x1a4a, 0x1acd, 0x2132, 0x1b1b, 0x1b2c, 0x1b62, 0x1c12, 0x1c32,
+ 0x1d1b, 0x1e71, 0x16b1, 0x1522, 0x1434, 0x1412, 0x1352, 0x1323,
+ 0x1315, 0x12bc, 0x127a, 0x1235, 0x1226, 0x11a2, 0x1216, 0x0a2a,
+ 0x11bc, 0x11d1, 0x1163, 0x0ac2, 0x0ab2, 0x0aab, 0x0b1b, 0x0b23,
+ 0x0b33, 0x0c0f, 0x0bb3, 0x0c1b, 0x0c3e, 0x0cb1, 0x0d4c, 0x0ec1,
+ 0x079a, 0x0614, 0x0521, 0x047c, 0x0422, 0x03b1, 0x03e3, 0x0333,
+ 0x0322, 0x031c, 0x02aa, 0x02ba, 0x02f2, 0x0242, 0x0232, 0x0227,
+ 0x0222, 0x021b, 0x01ad, 0x0212, 0x01b2, 0x01bb, 0x01cb, 0x01f6,
+ 0x0152, 0x013a, 0x0133, 0x0131, 0x012c, 0x0123, 0x0122, 0x00a2,
+ 0x011b, 0x011e, 0x0114, 0x00b1, 0x00aa, 0x00b3, 0x00bd, 0x00ba,
+ 0x00c5, 0x00d3, 0x00f3, 0x0062, 0x0051, 0x0042, 0x003b, 0x0033,
+ 0x0032, 0x002a, 0x002c, 0x0025, 0x0023, 0x0022, 0x001a, 0x0021,
+ 0x001b, 0x001b, 0x001d, 0x0015, 0x0013, 0x0013, 0x0012, 0x0012,
+ 0x000a, 0x000a, 0x0011, 0x0011, 0x000b, 0x000b, 0x000c, 0x000e,
+};
+
+/*
+ * second stage play gain.
+ */
+private const unsigned short ger_coeff[] = {
+ 0x431f, /* 5. dB */
+ 0x331f, /* 5.5 dB */
+ 0x40dd, /* 6. dB */
+ 0x11dd, /* 6.5 dB */
+ 0x440f, /* 7. dB */
+ 0x411f, /* 7.5 dB */
+ 0x311f, /* 8. dB */
+ 0x5520, /* 8.5 dB */
+ 0x10dd, /* 9. dB */
+ 0x4211, /* 9.5 dB */
+ 0x410f, /* 10. dB */
+ 0x111f, /* 10.5 dB */
+ 0x600b, /* 11. dB */
+ 0x00dd, /* 11.5 dB */
+ 0x4210, /* 12. dB */
+ 0x110f, /* 13. dB */
+ 0x7200, /* 14. dB */
+ 0x2110, /* 15. dB */
+ 0x2200, /* 15.9 dB */
+ 0x000b, /* 16.9 dB */
+ 0x000f /* 18. dB */
+#define NGER (sizeof(ger_coeff) / sizeof(ger_coeff[0]))
+};
+
+private void
+isdn_set_rgain(
+ register isdn_softc_t *sc,
+ register int level)
+{
+ level &= 0xff;
+ sc->sc_rlevel = level;
+ sc->sc_map.mr_mmr1 |= AMD_MMR1_GX;
+ sc->sc_map.mr_gx = gx_coeff[level];
+ isdn_set_mmr1(sc->regs, sc->sc_map.mr_mmr1,
+ AMDR_MAP_GX, sc->sc_map.mr_gx);
+}
+
+private void
+isdn_set_pgain(
+ register isdn_softc_t *sc,
+ register int level)
+{
+ register int gi, s;
+ register amd79c30_padded_regs_t *regs;
+
+ level &= 0xff;
+ sc->sc_plevel = level;
+ sc->sc_map.mr_mmr1 |= AMD_MMR1_GER|AMD_MMR1_GR;
+ level *= 256 + NGER;
+ level >>= 8;
+ if (level >= 256) {
+ gi = level - 256;
+ level = 255;
+ } else
+ gi = 0;
+ sc->sc_map.mr_ger = ger_coeff[gi];
+ sc->sc_map.mr_gr = gx_coeff[level];
+
+ regs = sc->regs;
+ s = splaudio();
+ write_reg(regs->cr, AMDR_MAP_MMR1);
+ write_reg(regs->dr, sc->sc_map.mr_mmr1);
+ write_reg(regs->cr, AMDR_MAP_GR);
+ gi = sc->sc_map.mr_gr;
+ WAMD16(regs, gi);
+ write_reg(regs->cr, AMDR_MAP_GER);
+ gi = sc->sc_map.mr_ger;
+ WAMD16(regs, gi);
+ splx(s);
+}
+
+private void
+isdn_set_mgain(
+ register isdn_softc_t *sc,
+ register int level)
+{
+ level &= 0xff;
+ sc->sc_mlevel = level;
+ sc->sc_map.mr_mmr1 |= AMD_MMR1_STG;
+ sc->sc_map.mr_stgr = gx_coeff[level];
+ isdn_set_mmr1(sc->regs, sc->sc_map.mr_mmr1,
+ AMDR_MAP_STG, sc->sc_map.mr_stgr);
+}
+
+/*
+ * Interrupt routine
+ */
+#if old
+isdn_intr (unit, spllevel)
+ spl_t spllevel;
+{
+#ifdef MAXINE
+ xine_enable_interrupt(7, 0, 0);
+#endif
+#ifdef FLAMINGO
+ kn15aa_enable_interrupt(12, 0, 0);
+#endif
+ printf("ISDN interrupt");
+}
+#else
+isdn_intr (unit, spllevel)
+ spl_t spllevel;
+{
+ isdn_softc_t *sc = isdn_softc[unit];
+ amd79c30_padded_regs_t *regs = sc->regs;
+ register int i;
+ unsigned int c;
+
+ read_reg(regs->ir, i); mb(); /* clear interrupt, now */
+#if mips
+ splx(spllevel); /* drop priority */
+#endif
+
+#if 0
+ if (..this is an audio interrupt..)
+#endif
+ {
+ read_reg(regs->bbrb, c);
+ if (audio_hwintr(sc->audio_status, c, &c))
+ write_reg(regs->bbtb, c);
+ }
+}
+#endif
+
+
+
+/*
+ * Standard operations vector
+ */
+audio_switch_t isdn_ops = {
+ isdn_init,
+ isdn_close,
+ isdn_setport,
+ isdn_getport,
+ isdn_setgains,
+ isdn_getgains,
+ isdn_setstate,
+ isdn_getstate
+};
+
+#if 1
+write_an_int(int *where, int what) { *where = what;}
+read_an_int(int *where) { return *where;}
+#endif
+
+#endif
diff --git a/chips/kernel_font.c b/chips/kernel_font.c
new file mode 100644
index 00000000..71c52c48
--- /dev/null
+++ b/chips/kernel_font.c
@@ -0,0 +1,3083 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * THIS FILE WAS GENERATED BY build_font FROM kernel_font.data
+ * IF YOU NEED TO, BE SURE YOU EDIT THE REAL THING!
+ */
+/*
+ * Object:
+ * kfont_7x14 EXPORTED array
+ *
+ * Kernel font for printable ASCII chars
+ *
+ * The smallest index in this array corresponds to a
+ * space. So, we start at 0x20 in the ascii table.
+ * Note that glyphs are mirrored (byteorder, I think)
+ * the commented bitmap shows how they really look like
+ */
+
+unsigned char kfont_7x14[] = {
+/* 0 ' ' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 1 '!' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 2 '"' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 3 '#' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01001000 */ 0x12,
+ /* 01001000 */ 0x12,
+ /* 11111100 */ 0x3f,
+ /* 01001000 */ 0x12,
+ /* 01001000 */ 0x12,
+ /* 01001000 */ 0x12,
+ /* 01001000 */ 0x12,
+ /* 11111100 */ 0x3f,
+ /* 01001000 */ 0x12,
+ /* 01001000 */ 0x12,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 4 '$' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 01111110 */ 0x7e,
+ /* 10010000 */ 0x9,
+ /* 10010000 */ 0x9,
+ /* 01111100 */ 0x3e,
+ /* 00010010 */ 0x48,
+ /* 00010010 */ 0x48,
+ /* 11111100 */ 0x3f,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 5 '%' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01000010 */ 0x42,
+ /* 10100100 */ 0x25,
+ /* 01000100 */ 0x22,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00100000 */ 0x4,
+ /* 01000100 */ 0x22,
+ /* 01001010 */ 0x52,
+ /* 10000100 */ 0x21,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 6 '&' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01110000 */ 0xe,
+ /* 10001000 */ 0x11,
+ /* 10001000 */ 0x11,
+ /* 10001000 */ 0x11,
+ /* 01110000 */ 0xe,
+ /* 10001000 */ 0x11,
+ /* 10001010 */ 0x51,
+ /* 10000100 */ 0x21,
+ /* 10001100 */ 0x31,
+ /* 01110010 */ 0x4e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 7 ''' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00011000 */ 0x18,
+ /* 00011000 */ 0x18,
+ /* 00100000 */ 0x4,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 8 '(' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00100000 */ 0x4,
+ /* 00100000 */ 0x4,
+ /* 00100000 */ 0x4,
+ /* 00100000 */ 0x4,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00001000 */ 0x10,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 9 ')' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00001000 */ 0x10,
+ /* 00001000 */ 0x10,
+ /* 00001000 */ 0x10,
+ /* 00001000 */ 0x10,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00100000 */ 0x4,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* a '*' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 10010010 */ 0x49,
+ /* 01010100 */ 0x2a,
+ /* 00111000 */ 0x1c,
+ /* 00111000 */ 0x1c,
+ /* 01010100 */ 0x2a,
+ /* 10010010 */ 0x49,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* b '+' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 11111110 */ 0x7f,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* c ',' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00110000 */ 0xc,
+ /* 00110000 */ 0xc,
+ /* 01000000 */ 0x2,
+ /* 00000000 */ 0,
+/* d '-' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 11111110 */ 0x7f,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* e '.' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00110000 */ 0xc,
+ /* 00110000 */ 0xc,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* f '/' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000010 */ 0x40,
+ /* 00000100 */ 0x20,
+ /* 00000100 */ 0x20,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00100000 */ 0x4,
+ /* 01000000 */ 0x2,
+ /* 01000000 */ 0x2,
+ /* 10000000 */ 0x1,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 10 '0' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00111000 */ 0x1c,
+ /* 01000100 */ 0x22,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01000100 */ 0x22,
+ /* 00111000 */ 0x1c,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 11 '1' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00110000 */ 0xc,
+ /* 01010000 */ 0xa,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 12 '2' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 00000100 */ 0x20,
+ /* 00011000 */ 0x18,
+ /* 00100000 */ 0x4,
+ /* 01000000 */ 0x2,
+ /* 10000000 */ 0x1,
+ /* 11111110 */ 0x7f,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 13 '3' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 00011100 */ 0x38,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 14 '4' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000100 */ 0x20,
+ /* 00001100 */ 0x30,
+ /* 00010100 */ 0x28,
+ /* 00100100 */ 0x24,
+ /* 01000100 */ 0x22,
+ /* 10000100 */ 0x21,
+ /* 11111110 */ 0x7f,
+ /* 00000100 */ 0x20,
+ /* 00000100 */ 0x20,
+ /* 00000100 */ 0x20,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 15 '5' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 11111110 */ 0x7f,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 11111100 */ 0x3f,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 16 '6' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 11111100 */ 0x3f,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 17 '7' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 11111110 */ 0x7f,
+ /* 10000010 */ 0x41,
+ /* 00000010 */ 0x40,
+ /* 00000100 */ 0x20,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00100000 */ 0x4,
+ /* 01000000 */ 0x2,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 18 '8' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 19 '9' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111110 */ 0x7e,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 1a ':' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00110000 */ 0xc,
+ /* 00110000 */ 0xc,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00110000 */ 0xc,
+ /* 00110000 */ 0xc,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 1b ';' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00110000 */ 0xc,
+ /* 00110000 */ 0xc,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00110000 */ 0xc,
+ /* 00110000 */ 0xc,
+ /* 01000000 */ 0x2,
+ /* 00000000 */ 0,
+/* 1c '<' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00100000 */ 0x4,
+ /* 01000000 */ 0x2,
+ /* 10000000 */ 0x1,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00001000 */ 0x10,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 1d '=' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 11111110 */ 0x7f,
+ /* 00000000 */ 0,
+ /* 11111110 */ 0x7f,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 1e '>' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00001000 */ 0x10,
+ /* 00000100 */ 0x20,
+ /* 00000010 */ 0x40,
+ /* 00000100 */ 0x20,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00100000 */ 0x4,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 1f '?' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 00000100 */ 0x20,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 20 '@' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00111100 */ 0x3c,
+ /* 01000010 */ 0x42,
+ /* 10011010 */ 0x59,
+ /* 10101010 */ 0x55,
+ /* 10101010 */ 0x55,
+ /* 10011100 */ 0x39,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 01000010 */ 0x42,
+ /* 00111100 */ 0x3c,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 21 'A' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00111000 */ 0x1c,
+ /* 01000100 */ 0x22,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11111110 */ 0x7f,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 22 'B' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 11111100 */ 0x3f,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11111100 */ 0x3f,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11111100 */ 0x3f,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 23 'C' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 24 'D' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 11111000 */ 0x1f,
+ /* 10000100 */ 0x21,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000100 */ 0x21,
+ /* 11111000 */ 0x1f,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 25 'E' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 11111110 */ 0x7f,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 11111000 */ 0x1f,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 11111110 */ 0x7f,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 26 'F' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 11111110 */ 0x7f,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 11111000 */ 0x1f,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 27 'G' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10011110 */ 0x79,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 28 'H' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11111110 */ 0x7f,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 29 'I' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 2a 'J' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 2b 'K' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000100 */ 0x21,
+ /* 10001000 */ 0x11,
+ /* 10010000 */ 0x9,
+ /* 10100000 */ 0x5,
+ /* 11000000 */ 0x3,
+ /* 10100000 */ 0x5,
+ /* 10010000 */ 0x9,
+ /* 10001000 */ 0x11,
+ /* 10000100 */ 0x21,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 2c 'L' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 11111110 */ 0x7f,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 2d 'M' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11000110 */ 0x63,
+ /* 10101010 */ 0x55,
+ /* 10010010 */ 0x49,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 2e 'N' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11000010 */ 0x43,
+ /* 10100010 */ 0x45,
+ /* 10010010 */ 0x49,
+ /* 10010010 */ 0x49,
+ /* 10001010 */ 0x51,
+ /* 10000110 */ 0x61,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 2f 'O' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 30 'P' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 11111100 */ 0x3f,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11111100 */ 0x3f,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 31 'Q' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00111000 */ 0x1c,
+ /* 01000100 */ 0x22,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10001010 */ 0x51,
+ /* 01000100 */ 0x22,
+ /* 00111010 */ 0x5c,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 32 'R' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 11111100 */ 0x3f,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11111100 */ 0x3f,
+ /* 10100000 */ 0x5,
+ /* 10010000 */ 0x9,
+ /* 10001000 */ 0x11,
+ /* 10000100 */ 0x21,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 33 'S' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 01111100 */ 0x3e,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 34 'T' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 11111110 */ 0x7f,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 35 'U' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 36 'V' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01000100 */ 0x22,
+ /* 01000100 */ 0x22,
+ /* 01000100 */ 0x22,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 37 'W' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10010010 */ 0x49,
+ /* 10010010 */ 0x49,
+ /* 10010010 */ 0x49,
+ /* 01101100 */ 0x36,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 38 'X' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 01000100 */ 0x22,
+ /* 01000100 */ 0x22,
+ /* 00101000 */ 0x14,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00101000 */ 0x14,
+ /* 01000100 */ 0x22,
+ /* 01000100 */ 0x22,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 39 'Y' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01000100 */ 0x22,
+ /* 00101000 */ 0x14,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 3a 'Z' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 11111110 */ 0x7f,
+ /* 00000010 */ 0x40,
+ /* 00000100 */ 0x20,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00100000 */ 0x4,
+ /* 01000000 */ 0x2,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 11111110 */ 0x7f,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 3b '[' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00011110 */ 0x78,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00011110 */ 0x78,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 3c '\' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000000 */ 0x1,
+ /* 01000000 */ 0x2,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00001000 */ 0x10,
+ /* 00000100 */ 0x20,
+ /* 00000100 */ 0x20,
+ /* 00000010 */ 0x40,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 3d ']' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 11110000 */ 0xf,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 11110000 */ 0xf,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 3e '^' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00101000 */ 0x14,
+ /* 01000100 */ 0x22,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 3f '_' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 11111110 */ 0x7f,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 40 '`' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01100000 */ 0x6,
+ /* 01100000 */ 0x6,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 41 'a' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111000 */ 0x1e,
+ /* 00000100 */ 0x20,
+ /* 01111100 */ 0x3e,
+ /* 10000100 */ 0x21,
+ /* 10000100 */ 0x21,
+ /* 01111010 */ 0x5e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 42 'b' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10111100 */ 0x3d,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10111100 */ 0x3d,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 43 'c' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 44 'd' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 01111010 */ 0x5e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111010 */ 0x5e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 45 'e' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11111110 */ 0x7f,
+ /* 10000000 */ 0x1,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 46 'f' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00001000 */ 0x10,
+ /* 00010100 */ 0x28,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00111100 */ 0x3c,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 47 'g' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111010 */ 0x5e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111010 */ 0x5e,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 01111100 */ 0x3e,
+/* 48 'h' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10111100 */ 0x3d,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 49 'i' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00110000 */ 0xc,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 4a 'j' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 01010000 */ 0xa,
+ /* 00100000 */ 0x4,
+/* 4b 'k' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000100 */ 0x21,
+ /* 10001000 */ 0x11,
+ /* 10010000 */ 0x9,
+ /* 10110000 */ 0xd,
+ /* 11001000 */ 0x13,
+ /* 10000100 */ 0x21,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 4c 'l' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00110000 */ 0xc,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 4d 'm' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10101100 */ 0x35,
+ /* 10010010 */ 0x49,
+ /* 10010010 */ 0x49,
+ /* 10010010 */ 0x49,
+ /* 10010010 */ 0x49,
+ /* 10010010 */ 0x49,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 4e 'n' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10111100 */ 0x3d,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 4f 'o' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 50 'p' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10111100 */ 0x3d,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10111100 */ 0x3d,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+/* 51 'q' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111010 */ 0x5e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111010 */ 0x5e,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+/* 52 'r' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10111100 */ 0x3d,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 53 's' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111110 */ 0x7e,
+ /* 10000000 */ 0x1,
+ /* 01111100 */ 0x3e,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 11111100 */ 0x3f,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 54 't' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00111000 */ 0x1c,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010100 */ 0x28,
+ /* 00001000 */ 0x10,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 55 'u' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111010 */ 0x5e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 56 'v' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01000100 */ 0x22,
+ /* 01000100 */ 0x22,
+ /* 00101000 */ 0x14,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 57 'w' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10010010 */ 0x49,
+ /* 10101010 */ 0x55,
+ /* 01000100 */ 0x22,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 58 'x' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000100 */ 0x21,
+ /* 01001000 */ 0x12,
+ /* 00110000 */ 0xc,
+ /* 00110000 */ 0xc,
+ /* 01001000 */ 0x12,
+ /* 10000100 */ 0x21,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 59 'y' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 01111100 */ 0x3e,
+/* 5a 'z' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 11111110 */ 0x7f,
+ /* 00000100 */ 0x20,
+ /* 00001000 */ 0x10,
+ /* 00110000 */ 0xc,
+ /* 01000000 */ 0x2,
+ /* 11111110 */ 0x7f,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 5b '{' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00001000 */ 0x10,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 5c '|' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 5d '}' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00100000 */ 0x4,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 5e '~' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01100000 */ 0x6,
+ /* 10010010 */ 0x49,
+ /* 00001100 */ 0x30,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 5f '' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 60 '€' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+/* 61 '' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 62 '‚' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00001000 */ 0x10,
+ /* 00010100 */ 0x28,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00111100 */ 0x3c,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00110000 */ 0xc,
+ /* 01010010 */ 0x4a,
+ /* 00101100 */ 0x34,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 63 'ƒ' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 64 '„' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01000100 */ 0x22,
+ /* 01111100 */ 0x3e,
+ /* 00010000 */ 0x8,
+ /* 01111100 */ 0x3e,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 65 '…' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 66 '†' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000010 */ 0x40,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 67 '‡' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 68 'ˆ' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10011010 */ 0x59,
+ /* 10100010 */ 0x45,
+ /* 10100010 */ 0x45,
+ /* 10100010 */ 0x45,
+ /* 10011010 */ 0x59,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 69 '‰' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00111000 */ 0x1c,
+ /* 00000100 */ 0x20,
+ /* 00111100 */ 0x3c,
+ /* 01000100 */ 0x22,
+ /* 00111010 */ 0x5c,
+ /* 00000000 */ 0,
+ /* 01111110 */ 0x7e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 6a 'Š' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00001010 */ 0x50,
+ /* 00010100 */ 0x28,
+ /* 00101000 */ 0x14,
+ /* 01010000 */ 0xa,
+ /* 10100000 */ 0x5,
+ /* 01010000 */ 0xa,
+ /* 00101000 */ 0x14,
+ /* 00010100 */ 0x28,
+ /* 00001010 */ 0x50,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 6b '‹' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 6c 'Œ' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 6d '' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 6e 'Ž' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 6f '' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00110000 */ 0xc,
+ /* 01001000 */ 0x12,
+ /* 00110000 */ 0xc,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 70 '' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 11111110 */ 0x7f,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 11111110 */ 0x7f,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 71 '‘' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00111000 */ 0x1c,
+ /* 01000100 */ 0x22,
+ /* 01000100 */ 0x22,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00100000 */ 0x4,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 72 '’' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00111000 */ 0x1c,
+ /* 01000100 */ 0x22,
+ /* 00000100 */ 0x20,
+ /* 00011000 */ 0x18,
+ /* 00000100 */ 0x20,
+ /* 01000100 */ 0x22,
+ /* 00111000 */ 0x1c,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 73 '“' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 74 '”' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01000010 */ 0x42,
+ /* 01000010 */ 0x42,
+ /* 01000010 */ 0x42,
+ /* 01000010 */ 0x42,
+ /* 01100110 */ 0x66,
+ /* 01011010 */ 0x5a,
+ /* 01000000 */ 0x2,
+ /* 10000000 */ 0x1,
+ /* 00000000 */ 0,
+/* 75 '•' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00101000 */ 0x14,
+ /* 01101000 */ 0x16,
+ /* 11101000 */ 0x17,
+ /* 11101000 */ 0x17,
+ /* 11101000 */ 0x17,
+ /* 01101000 */ 0x16,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 76 '–' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00011000 */ 0x18,
+ /* 00011000 */ 0x18,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 77 '—' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 78 '˜' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00110000 */ 0xc,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00111000 */ 0x1c,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 79 '™' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00111100 */ 0x3c,
+ /* 01000010 */ 0x42,
+ /* 01000010 */ 0x42,
+ /* 01000010 */ 0x42,
+ /* 00111100 */ 0x3c,
+ /* 00000000 */ 0,
+ /* 01111110 */ 0x7e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 7a 'š' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 10100000 */ 0x5,
+ /* 01010000 */ 0xa,
+ /* 00101000 */ 0x14,
+ /* 00010100 */ 0x28,
+ /* 00001010 */ 0x50,
+ /* 00010100 */ 0x28,
+ /* 00101000 */ 0x14,
+ /* 01010000 */ 0xa,
+ /* 10100000 */ 0x5,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 7b '›' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01000000 */ 0x2,
+ /* 11000000 */ 0x3,
+ /* 01000010 */ 0x42,
+ /* 01000100 */ 0x22,
+ /* 11101000 */ 0x17,
+ /* 00010100 */ 0x28,
+ /* 00101100 */ 0x34,
+ /* 01010100 */ 0x2a,
+ /* 10011110 */ 0x79,
+ /* 00000100 */ 0x20,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 7c 'œ' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01000000 */ 0x2,
+ /* 11000000 */ 0x3,
+ /* 01000010 */ 0x42,
+ /* 01000100 */ 0x22,
+ /* 11101000 */ 0x17,
+ /* 00011100 */ 0x38,
+ /* 00110010 */ 0x4c,
+ /* 01000100 */ 0x22,
+ /* 10001000 */ 0x11,
+ /* 00011110 */ 0x78,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 7d '' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 7e 'ž' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00100000 */ 0x4,
+ /* 01000000 */ 0x2,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 7f 'Ÿ' */
+ /* 00000000 */ 0,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00000000 */ 0,
+ /* 00111000 */ 0x1c,
+ /* 01000100 */ 0x22,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11111110 */ 0x7f,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 80 ' ' */
+ /* 00000000 */ 0,
+ /* 00000100 */ 0x20,
+ /* 00001000 */ 0x10,
+ /* 00000000 */ 0,
+ /* 00111000 */ 0x1c,
+ /* 01000100 */ 0x22,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11111110 */ 0x7f,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 81 '¡' */
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 00111000 */ 0x1c,
+ /* 01000100 */ 0x22,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11111110 */ 0x7f,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 82 '¢' */
+ /* 00000000 */ 0,
+ /* 00110100 */ 0x2c,
+ /* 01001000 */ 0x12,
+ /* 00000000 */ 0,
+ /* 00111000 */ 0x1c,
+ /* 01000100 */ 0x22,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11111110 */ 0x7f,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 83 '£' */
+ /* 00000000 */ 0,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 00111000 */ 0x1c,
+ /* 01000100 */ 0x22,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11111110 */ 0x7f,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 84 '¤' */
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00101000 */ 0x14,
+ /* 00010000 */ 0x8,
+ /* 00111000 */ 0x1c,
+ /* 01000100 */ 0x22,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11111110 */ 0x7f,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 85 '¥' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00111110 */ 0x7c,
+ /* 01010000 */ 0xa,
+ /* 01010000 */ 0xa,
+ /* 10010000 */ 0x9,
+ /* 10011100 */ 0x39,
+ /* 11110000 */ 0xf,
+ /* 10010000 */ 0x9,
+ /* 10011110 */ 0x79,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 86 '¦' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+/* 87 '§' */
+ /* 00000000 */ 0,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00000000 */ 0,
+ /* 11111110 */ 0x7f,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 11111000 */ 0x1f,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 11111110 */ 0x7f,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 88 '¨' */
+ /* 00000000 */ 0,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 11111110 */ 0x7f,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 11111000 */ 0x1f,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 11111110 */ 0x7f,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 89 '©' */
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 11111110 */ 0x7f,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 11111000 */ 0x1f,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 11111110 */ 0x7f,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 8a 'ª' */
+ /* 00000000 */ 0,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 11111110 */ 0x7f,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 11111000 */ 0x1f,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 11111110 */ 0x7f,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 8b '«' */
+ /* 00000000 */ 0,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 8c '¬' */
+ /* 00000000 */ 0,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 8d '­' */
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 8e '®' */
+ /* 00000000 */ 0,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 8f '¯' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 90 '°' */
+ /* 00000000 */ 0,
+ /* 00110100 */ 0x2c,
+ /* 01001000 */ 0x12,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 11000010 */ 0x43,
+ /* 10100010 */ 0x45,
+ /* 10010010 */ 0x49,
+ /* 10010010 */ 0x49,
+ /* 10001010 */ 0x51,
+ /* 10000110 */ 0x61,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 91 '±' */
+ /* 00000000 */ 0,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 92 '²' */
+ /* 00000000 */ 0,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 93 '³' */
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 94 '´' */
+ /* 00000000 */ 0,
+ /* 00110100 */ 0x2c,
+ /* 01001000 */ 0x12,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 95 'µ' */
+ /* 00000000 */ 0,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 96 '¶' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111110 */ 0x7e,
+ /* 10010000 */ 0x9,
+ /* 10010000 */ 0x9,
+ /* 10010000 */ 0x9,
+ /* 10011100 */ 0x39,
+ /* 10010000 */ 0x9,
+ /* 10010000 */ 0x9,
+ /* 01111110 */ 0x7e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 97 '·' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000010 */ 0x40,
+ /* 01111100 */ 0x3e,
+ /* 10000110 */ 0x61,
+ /* 10001010 */ 0x51,
+ /* 10010010 */ 0x49,
+ /* 10010010 */ 0x49,
+ /* 10100010 */ 0x45,
+ /* 11000010 */ 0x43,
+ /* 01111100 */ 0x3e,
+ /* 10000000 */ 0x1,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 98 '¸' */
+ /* 00000000 */ 0,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 99 '¹' */
+ /* 00000000 */ 0,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 9a 'º' */
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 9b '»' */
+ /* 00000000 */ 0,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 9c '¼' */
+ /* 00000000 */ 0,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01000100 */ 0x22,
+ /* 00101000 */ 0x14,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 9d '½' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* 9e '¾' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10111100 */ 0x3d,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10111100 */ 0x3d,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 00000000 */ 0,
+/* 9f '¿' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00000000 */ 0,
+ /* 01111000 */ 0x1e,
+ /* 00000100 */ 0x20,
+ /* 01111100 */ 0x3e,
+ /* 10000100 */ 0x21,
+ /* 10000100 */ 0x21,
+ /* 01111010 */ 0x5e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* a0 'À' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 01111000 */ 0x1e,
+ /* 00000100 */ 0x20,
+ /* 01111100 */ 0x3e,
+ /* 10000100 */ 0x21,
+ /* 10000100 */ 0x21,
+ /* 01111010 */ 0x5e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* a1 'Á' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 01111000 */ 0x1e,
+ /* 00000100 */ 0x20,
+ /* 01111100 */ 0x3e,
+ /* 10000100 */ 0x21,
+ /* 10000100 */ 0x21,
+ /* 01111010 */ 0x5e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* a2 'Â' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00110100 */ 0x2c,
+ /* 01001000 */ 0x12,
+ /* 00000000 */ 0,
+ /* 01111000 */ 0x1e,
+ /* 00000100 */ 0x20,
+ /* 01111100 */ 0x3e,
+ /* 10000100 */ 0x21,
+ /* 10000100 */ 0x21,
+ /* 01111010 */ 0x5e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* a3 'Ã' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 01111000 */ 0x1e,
+ /* 00000100 */ 0x20,
+ /* 01111100 */ 0x3e,
+ /* 10000100 */ 0x21,
+ /* 10000100 */ 0x21,
+ /* 01111010 */ 0x5e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* a4 'Ä' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00101000 */ 0x14,
+ /* 00010000 */ 0x8,
+ /* 01111000 */ 0x1e,
+ /* 00000100 */ 0x20,
+ /* 01111100 */ 0x3e,
+ /* 10000100 */ 0x21,
+ /* 10000100 */ 0x21,
+ /* 01111010 */ 0x5e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* a5 'Å' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01101100 */ 0x36,
+ /* 00010010 */ 0x48,
+ /* 01111110 */ 0x7e,
+ /* 10010000 */ 0x9,
+ /* 10010010 */ 0x49,
+ /* 01101100 */ 0x36,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* a6 'Æ' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+/* a7 'Ç' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11111110 */ 0x7f,
+ /* 10000000 */ 0x1,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* a8 'È' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11111110 */ 0x7f,
+ /* 10000000 */ 0x1,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* a9 'É' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11111110 */ 0x7f,
+ /* 10000000 */ 0x1,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* aa 'Ê' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 11111110 */ 0x7f,
+ /* 10000000 */ 0x1,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* ab 'Ë' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00110000 */ 0xc,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* ac 'Ì' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00110000 */ 0xc,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* ad 'Í' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 00110000 */ 0xc,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* ae 'Î' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 00110000 */ 0xc,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* af 'Ï' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* b0 'Ð' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00110100 */ 0x2c,
+ /* 01001000 */ 0x12,
+ /* 00000000 */ 0,
+ /* 10111100 */ 0x3d,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* b1 'Ñ' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* b2 'Ò' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* b3 'Ó' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* b4 'Ô' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00110100 */ 0x2c,
+ /* 01001000 */ 0x12,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* b5 'Õ' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* b6 'Ö' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01101100 */ 0x36,
+ /* 10010010 */ 0x49,
+ /* 10011110 */ 0x79,
+ /* 10010000 */ 0x9,
+ /* 10010010 */ 0x49,
+ /* 01101100 */ 0x36,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* b7 '×' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000010 */ 0x40,
+ /* 01111100 */ 0x3e,
+ /* 10001010 */ 0x51,
+ /* 10010010 */ 0x49,
+ /* 10010010 */ 0x49,
+ /* 10100010 */ 0x45,
+ /* 01111100 */ 0x3e,
+ /* 10000000 */ 0x1,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* b8 'Ø' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111010 */ 0x5e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* b9 'Ù' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00001000 */ 0x10,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111010 */ 0x5e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* ba 'Ú' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111010 */ 0x5e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* bb 'Û' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111010 */ 0x5e,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+/* bc 'Ü' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00101000 */ 0x14,
+ /* 00101000 */ 0x14,
+ /* 00000000 */ 0,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 10000010 */ 0x41,
+ /* 01111100 */ 0x3e,
+ /* 00000010 */ 0x40,
+ /* 00000010 */ 0x40,
+ /* 01111100 */ 0x3e,
+/* bd 'Ý' */
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 01111100 */ 0x3e,
+ /* 10000010 */ 0x41,
+ /* 10000000 */ 0x1,
+ /* 10000000 */ 0x1,
+ /* 01000000 */ 0x2,
+ /* 00100000 */ 0x4,
+ /* 00010000 */ 0x8,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00010000 */ 0x8,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+ /* 00000000 */ 0,
+};
diff --git a/chips/kernel_font.data b/chips/kernel_font.data
new file mode 100644
index 00000000..200cf847
--- /dev/null
+++ b/chips/kernel_font.data
@@ -0,0 +1,3108 @@
+#
+# Mach Operating System
+# Copyright (c) 1991,1990,1989 Carnegie Mellon University
+# All Rights Reserved.
+#
+# Permission to use, copy, modify and distribute this software and its
+# documentation is hereby granted, provided that both the copyright
+# notice and this permission notice appear in all copies of the
+# software, derivative works or modified versions, and any portions
+# thereof, and that both notices appear in supporting documentation.
+#
+# CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+# CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+# ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+#
+# Carnegie Mellon requests users of this software to return to
+#
+# Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+# School of Computer Science
+# Carnegie Mellon University
+# Pittsburgh PA 15213-3890
+#
+# any improvements or extensions that they make and grant Carnegie Mellon
+# the rights to redistribute these changes.
+#
+#
+# File : kernel_font.data
+# Content: Bitmaps for a 7x15 ASCII font
+# Authors: Alessandro and Giovanna Forin
+#
+# Copyright (c) 1990 Alessandro and Giovanna Forin
+#
+# All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software and
+# its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appears in all
+# copies and that both the copyright notice and this permission notice
+# appear in supporting documentation, and that the name of the authors
+# not be used in advertising or publicity pertaining to distribution
+# of the software without specific, written prior permission.
+#
+# THE AUTHORS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+# NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+#
+# NOTE: comment lines must start with a "#" character, and are
+# only permitted at the beginning of the file.
+
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+!
+00000000
+00000000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00000000
+00010000
+00000000
+00000000
+00000000
+"
+00000000
+00000000
+00101000
+00101000
+00101000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+#
+00000000
+00000000
+01001000
+01001000
+11111100
+01001000
+01001000
+01001000
+01001000
+11111100
+01001000
+01001000
+00000000
+00000000
+00000000
+$
+00000000
+00000000
+00000000
+00010000
+01111110
+10010000
+10010000
+01111100
+00010010
+00010010
+11111100
+00010000
+00000000
+00000000
+00000000
+%
+00000000
+00000000
+01000010
+10100100
+01000100
+00001000
+00010000
+00010000
+00100000
+01000100
+01001010
+10000100
+00000000
+00000000
+00000000
+&
+00000000
+00000000
+01110000
+10001000
+10001000
+10001000
+01110000
+10001000
+10001010
+10000100
+10001100
+01110010
+00000000
+00000000
+00000000
+'
+00000000
+00000000
+00011000
+00011000
+00100000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+(
+00000000
+00000000
+00001000
+00010000
+00010000
+00100000
+00100000
+00100000
+00100000
+00100000
+00010000
+00010000
+00001000
+00000000
+00000000
+)
+00000000
+00000000
+00100000
+00010000
+00010000
+00001000
+00001000
+00001000
+00001000
+00001000
+00010000
+00010000
+00100000
+00000000
+00000000
+*
+00000000
+00000000
+00000000
+00010000
+10010010
+01010100
+00111000
+00111000
+01010100
+10010010
+00010000
+00000000
+00000000
+00000000
+00000000
++
+00000000
+00000000
+00000000
+00000000
+00010000
+00010000
+00010000
+11111110
+00010000
+00010000
+00010000
+00000000
+00000000
+00000000
+00000000
+,
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00110000
+00110000
+01000000
+00000000
+-
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+11111110
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+.
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00110000
+00110000
+00000000
+00000000
+00000000
+/
+00000000
+00000000
+00000010
+00000100
+00000100
+00001000
+00010000
+00010000
+00100000
+01000000
+01000000
+10000000
+00000000
+00000000
+00000000
+0
+00000000
+00000000
+00111000
+01000100
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+01000100
+00111000
+00000000
+00000000
+00000000
+1
+00000000
+00000000
+00010000
+00110000
+01010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+01111100
+00000000
+00000000
+00000000
+2
+00000000
+00000000
+01111100
+10000010
+00000010
+00000010
+00000100
+00011000
+00100000
+01000000
+10000000
+11111110
+00000000
+00000000
+00000000
+3
+00000000
+00000000
+01111100
+10000010
+00000010
+00000010
+00011100
+00000010
+00000010
+00000010
+10000010
+01111100
+00000000
+00000000
+00000000
+4
+00000000
+00000000
+00000100
+00001100
+00010100
+00100100
+01000100
+10000100
+11111110
+00000100
+00000100
+00000100
+00000000
+00000000
+00000000
+5
+00000000
+00000000
+11111110
+10000000
+10000000
+10000000
+11111100
+00000010
+00000010
+00000010
+10000010
+01111100
+00000000
+00000000
+00000000
+6
+00000000
+00000000
+01111100
+10000000
+10000000
+10000000
+11111100
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+7
+00000000
+00000000
+11111110
+10000010
+00000010
+00000100
+00001000
+00010000
+00100000
+01000000
+10000000
+10000000
+00000000
+00000000
+00000000
+8
+00000000
+00000000
+01111100
+10000010
+10000010
+10000010
+01111100
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+9
+00000000
+00000000
+01111100
+10000010
+10000010
+10000010
+01111110
+00000010
+00000010
+00000010
+00000010
+01111100
+00000000
+00000000
+00000000
+:
+00000000
+00000000
+00000000
+00000000
+00000000
+00110000
+00110000
+00000000
+00000000
+00000000
+00110000
+00110000
+00000000
+00000000
+00000000
+;
+00000000
+00000000
+00000000
+00000000
+00000000
+00110000
+00110000
+00000000
+00000000
+00000000
+00000000
+00110000
+00110000
+01000000
+00000000
+<
+00000000
+00000000
+00000000
+00001000
+00010000
+00100000
+01000000
+10000000
+01000000
+00100000
+00010000
+00001000
+00000000
+00000000
+00000000
+=
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+11111110
+00000000
+11111110
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+>
+00000000
+00000000
+00000000
+00100000
+00010000
+00001000
+00000100
+00000010
+00000100
+00001000
+00010000
+00100000
+00000000
+00000000
+00000000
+?
+00000000
+00000000
+01111100
+10000010
+00000010
+00000010
+00000100
+00001000
+00010000
+00010000
+00000000
+00010000
+00000000
+00000000
+00000000
+@
+00000000
+00000000
+00111100
+01000010
+10011010
+10101010
+10101010
+10011100
+10000000
+10000000
+01000010
+00111100
+00000000
+00000000
+00000000
+A
+00000000
+00000000
+00111000
+01000100
+10000010
+10000010
+10000010
+11111110
+10000010
+10000010
+10000010
+10000010
+00000000
+00000000
+00000000
+B
+00000000
+00000000
+11111100
+10000010
+10000010
+10000010
+11111100
+10000010
+10000010
+10000010
+10000010
+11111100
+00000000
+00000000
+00000000
+C
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+10000000
+10000000
+10000000
+10000000
+10000010
+01111100
+00000000
+00000000
+00000000
+D
+00000000
+00000000
+11111000
+10000100
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+10000100
+11111000
+00000000
+00000000
+00000000
+E
+00000000
+00000000
+11111110
+10000000
+10000000
+10000000
+11111000
+10000000
+10000000
+10000000
+10000000
+11111110
+00000000
+00000000
+00000000
+F
+00000000
+00000000
+11111110
+10000000
+10000000
+10000000
+11111000
+10000000
+10000000
+10000000
+10000000
+10000000
+00000000
+00000000
+00000000
+G
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+10011110
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+H
+00000000
+00000000
+10000010
+10000010
+10000010
+10000010
+11111110
+10000010
+10000010
+10000010
+10000010
+10000010
+00000000
+00000000
+00000000
+I
+00000000
+00000000
+01111100
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+01111100
+00000000
+00000000
+00000000
+J
+00000000
+00000000
+00000010
+00000010
+00000010
+00000010
+00000010
+00000010
+00000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+K
+00000000
+00000000
+10000100
+10001000
+10010000
+10100000
+11000000
+10100000
+10010000
+10001000
+10000100
+10000010
+00000000
+00000000
+00000000
+L
+00000000
+00000000
+10000000
+10000000
+10000000
+10000000
+10000000
+10000000
+10000000
+10000000
+10000000
+11111110
+00000000
+00000000
+00000000
+M
+00000000
+00000000
+10000010
+10000010
+11000110
+10101010
+10010010
+10000010
+10000010
+10000010
+10000010
+10000010
+00000000
+00000000
+00000000
+N
+00000000
+00000000
+10000010
+10000010
+11000010
+10100010
+10010010
+10010010
+10001010
+10000110
+10000010
+10000010
+00000000
+00000000
+00000000
+O
+00000000
+00000000
+01111100
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+P
+00000000
+00000000
+11111100
+10000010
+10000010
+10000010
+11111100
+10000000
+10000000
+10000000
+10000000
+10000000
+00000000
+00000000
+00000000
+Q
+00000000
+00000000
+00111000
+01000100
+10000010
+10000010
+10000010
+10000010
+10000010
+10001010
+01000100
+00111010
+00000000
+00000000
+00000000
+R
+00000000
+00000000
+11111100
+10000010
+10000010
+10000010
+11111100
+10100000
+10010000
+10001000
+10000100
+10000010
+00000000
+00000000
+00000000
+S
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+01111100
+00000010
+00000010
+00000010
+10000010
+01111100
+00000000
+00000000
+00000000
+T
+00000000
+00000000
+11111110
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00000000
+00000000
+00000000
+U
+00000000
+00000000
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+V
+00000000
+00000000
+10000010
+10000010
+10000010
+01000100
+01000100
+01000100
+00101000
+00101000
+00010000
+00010000
+00000000
+00000000
+00000000
+W
+00000000
+00000000
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+10010010
+10010010
+10010010
+01101100
+00000000
+00000000
+00000000
+X
+00000000
+00000000
+10000010
+01000100
+01000100
+00101000
+00010000
+00010000
+00101000
+01000100
+01000100
+10000010
+00000000
+00000000
+00000000
+Y
+00000000
+00000000
+10000010
+10000010
+01000100
+00101000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00000000
+00000000
+00000000
+Z
+00000000
+00000000
+11111110
+00000010
+00000100
+00001000
+00010000
+00100000
+01000000
+10000000
+10000000
+11111110
+00000000
+00000000
+00000000
+[
+00000000
+00000000
+00011110
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00011110
+00000000
+00000000
+00000000
+\
+00000000
+00000000
+10000000
+01000000
+01000000
+00100000
+00010000
+00010000
+00001000
+00000100
+00000100
+00000010
+00000000
+00000000
+00000000
+]
+00000000
+00000000
+11110000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+11110000
+00000000
+00000000
+00000000
+^
+00000000
+00000000
+00010000
+00101000
+01000100
+10000010
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+_
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+11111110
+00000000
+00000000
+00000000
+`
+00000000
+00000000
+01100000
+01100000
+00010000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+a
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+01111000
+00000100
+01111100
+10000100
+10000100
+01111010
+00000000
+00000000
+00000000
+b
+00000000
+00000000
+10000000
+10000000
+10000000
+10000000
+10111100
+10000010
+10000010
+10000010
+10000010
+10111100
+00000000
+00000000
+00000000
+c
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+10000010
+01111100
+00000000
+00000000
+00000000
+d
+00000000
+00000000
+00000010
+00000010
+00000010
+00000010
+01111010
+10000010
+10000010
+10000010
+10000010
+01111010
+00000000
+00000000
+00000000
+e
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+01111100
+10000010
+10000010
+11111110
+10000000
+01111100
+00000000
+00000000
+00000000
+f
+00000000
+00000000
+00001000
+00010100
+00010000
+00010000
+00111100
+00010000
+00010000
+00010000
+00010000
+00010000
+00000000
+00000000
+00000000
+g
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+01111010
+10000010
+10000010
+10000010
+10000010
+01111010
+00000010
+00000010
+01111100
+h
+00000000
+00000000
+10000000
+10000000
+10000000
+10000000
+10111100
+10000010
+10000010
+10000010
+10000010
+10000010
+00000000
+00000000
+00000000
+i
+00000000
+00000000
+00000000
+00010000
+00000000
+00110000
+00010000
+00010000
+00010000
+00010000
+00010000
+01111100
+00000000
+00000000
+00000000
+j
+00000000
+00000000
+00000000
+00010000
+00000000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+01010000
+00100000
+k
+00000000
+00000000
+10000000
+10000000
+10000000
+10000100
+10001000
+10010000
+10110000
+11001000
+10000100
+10000010
+00000000
+00000000
+00000000
+l
+00000000
+00000000
+00110000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+01111100
+00000000
+00000000
+00000000
+m
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+10101100
+10010010
+10010010
+10010010
+10010010
+10010010
+00000000
+00000000
+00000000
+n
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+10111100
+10000010
+10000010
+10000010
+10000010
+10000010
+00000000
+00000000
+00000000
+o
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+01111100
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+p
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+10111100
+10000010
+10000010
+10000010
+10000010
+10111100
+10000000
+10000000
+10000000
+q
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+01111010
+10000010
+10000010
+10000010
+10000010
+01111010
+00000010
+00000010
+00000010
+r
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+10111100
+10000010
+10000000
+10000000
+10000000
+10000000
+00000000
+00000000
+00000000
+s
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+01111110
+10000000
+01111100
+00000010
+00000010
+11111100
+00000000
+00000000
+00000000
+t
+00000000
+00000000
+00000000
+00010000
+00010000
+00111000
+00010000
+00010000
+00010000
+00010000
+00010100
+00001000
+00000000
+00000000
+00000000
+u
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+10000010
+10000010
+10000010
+10000010
+10000010
+01111010
+00000000
+00000000
+00000000
+v
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+10000010
+10000010
+01000100
+01000100
+00101000
+00010000
+00000000
+00000000
+00000000
+w
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+10000010
+10000010
+10000010
+10010010
+10101010
+01000100
+00000000
+00000000
+00000000
+x
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+10000100
+01001000
+00110000
+00110000
+01001000
+10000100
+00000000
+00000000
+00000000
+y
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+10000010
+10000010
+10000010
+10000010
+10000010
+01111100
+00000010
+00000010
+01111100
+z
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+11111110
+00000100
+00001000
+00110000
+01000000
+11111110
+00000000
+00000000
+00000000
+{
+00000000
+00000000
+00001000
+00010000
+00010000
+00010000
+00010000
+00100000
+00010000
+00010000
+00010000
+00010000
+00001000
+00000000
+00000000
+|
+00000000
+00000000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00000000
+00000000
+00000000
+}
+00000000
+00000000
+00100000
+00010000
+00010000
+00010000
+00010000
+00001000
+00010000
+00010000
+00010000
+00010000
+00100000
+00000000
+00000000
+~
+00000000
+00000000
+00000000
+01100000
+10010010
+00001100
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00000000
+00010000
+00000000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+00000000
+
+00000000
+00000000
+00000000
+00000000
+00000000
+00010000
+01111100
+10000010
+10000000
+10000000
+10000010
+01111100
+00010000
+00000000
+00000000
+
+00000000
+00000000
+00001000
+00010100
+00010000
+00010000
+00111100
+00010000
+00010000
+00110000
+01010010
+00101100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+01000000
+00100000
+00010000
+00010000
+00000000
+00010000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+10000010
+10000010
+01000100
+01111100
+00010000
+01111100
+00010000
+00010000
+00010000
+00010000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+01000000
+00100000
+00010000
+00010000
+00000000
+00010000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01111100
+10000010
+10000000
+01111100
+10000010
+10000010
+01111100
+00000010
+10000010
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+10000010
+01111100
+10000010
+10000010
+10000010
+10000010
+01111100
+10000010
+00000000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01111100
+10000010
+10011010
+10100010
+10100010
+10100010
+10011010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00111000
+00000100
+00111100
+01000100
+00111010
+00000000
+01111110
+00000000
+00000000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00001010
+00010100
+00101000
+01010000
+10100000
+01010000
+00101000
+00010100
+00001010
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+01000000
+00100000
+00010000
+00010000
+00000000
+00010000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+01000000
+00100000
+00010000
+00010000
+00000000
+00010000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+01000000
+00100000
+00010000
+00010000
+00000000
+00010000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+01000000
+00100000
+00010000
+00010000
+00000000
+00010000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00110000
+01001000
+00110000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00000000
+00010000
+00010000
+11111110
+00010000
+00010000
+00000000
+11111110
+00000000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00111000
+01000100
+01000100
+00001000
+00010000
+00100000
+01111100
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00111000
+01000100
+00000100
+00011000
+00000100
+01000100
+00111000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+01000000
+00100000
+00010000
+00010000
+00000000
+00010000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+01000010
+01000010
+01000010
+01000010
+01100110
+01011010
+01000000
+10000000
+00000000
+
+00000000
+00000000
+00101000
+01101000
+11101000
+11101000
+11101000
+01101000
+00101000
+00101000
+00101000
+00101000
+00101000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00011000
+00011000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+01000000
+00100000
+00010000
+00010000
+00000000
+00010000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00010000
+00110000
+00010000
+00010000
+00010000
+00010000
+00111000
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00111100
+01000010
+01000010
+01000010
+00111100
+00000000
+01111110
+00000000
+00000000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+10100000
+01010000
+00101000
+00010100
+00001010
+00010100
+00101000
+01010000
+10100000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01000000
+11000000
+01000010
+01000100
+11101000
+00010100
+00101100
+01010100
+10011110
+00000100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01000000
+11000000
+01000010
+01000100
+11101000
+00011100
+00110010
+01000100
+10001000
+00011110
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+01000000
+00100000
+00010000
+00010000
+00000000
+00010000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00010000
+00000000
+00010000
+00010000
+00100000
+01000000
+10000000
+10000000
+10000010
+01111100
+00000000
+00000000
+
+00000000
+01000000
+00100000
+00000000
+00111000
+01000100
+10000010
+10000010
+11111110
+10000010
+10000010
+10000010
+00000000
+00000000
+00000000
+
+00000000
+00000100
+00001000
+00000000
+00111000
+01000100
+10000010
+10000010
+11111110
+10000010
+10000010
+10000010
+00000000
+00000000
+00000000
+
+00000000
+00010000
+00101000
+00000000
+00111000
+01000100
+10000010
+10000010
+11111110
+10000010
+10000010
+10000010
+00000000
+00000000
+00000000
+
+00000000
+00110100
+01001000
+00000000
+00111000
+01000100
+10000010
+10000010
+11111110
+10000010
+10000010
+10000010
+00000000
+00000000
+00000000
+
+00000000
+00101000
+00101000
+00000000
+00111000
+01000100
+10000010
+10000010
+11111110
+10000010
+10000010
+10000010
+00000000
+00000000
+00000000
+
+00000000
+00010000
+00101000
+00010000
+00111000
+01000100
+10000010
+10000010
+11111110
+10000010
+10000010
+10000010
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00000000
+00111110
+01010000
+01010000
+10010000
+10011100
+11110000
+10010000
+10011110
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+10000000
+10000000
+10000000
+10000000
+10000010
+01111100
+00001000
+00010000
+00000000
+
+00000000
+01000000
+00100000
+00000000
+11111110
+10000000
+10000000
+11111000
+10000000
+10000000
+10000000
+11111110
+00000000
+00000000
+00000000
+
+00000000
+00001000
+00010000
+00000000
+11111110
+10000000
+10000000
+11111000
+10000000
+10000000
+10000000
+11111110
+00000000
+00000000
+00000000
+
+00000000
+00010000
+00101000
+00000000
+11111110
+10000000
+10000000
+11111000
+10000000
+10000000
+10000000
+11111110
+00000000
+00000000
+00000000
+
+00000000
+00101000
+00101000
+00000000
+11111110
+10000000
+10000000
+11111000
+10000000
+10000000
+10000000
+11111110
+00000000
+00000000
+00000000
+
+00000000
+01000000
+00100000
+00000000
+01111100
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00001000
+00010000
+00000000
+01111100
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00010000
+00101000
+00000000
+01111100
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00101000
+00101000
+00000000
+01111100
+00010000
+00010000
+00010000
+00010000
+00010000
+00010000
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+01000000
+00100000
+00010000
+00010000
+00000000
+00010000
+00000000
+00000000
+00000000
+
+00000000
+00110100
+01001000
+00000000
+10000010
+11000010
+10100010
+10010010
+10010010
+10001010
+10000110
+10000010
+00000000
+00000000
+00000000
+
+00000000
+01000000
+00100000
+00000000
+01111100
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00001000
+00010000
+00000000
+01111100
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00010000
+00101000
+00000000
+01111100
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00110100
+01001000
+00000000
+01111100
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00101000
+00101000
+00000000
+01111100
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00000000
+01111110
+10010000
+10010000
+10010000
+10011100
+10010000
+10010000
+01111110
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00000010
+01111100
+10000110
+10001010
+10010010
+10010010
+10100010
+11000010
+01111100
+10000000
+00000000
+00000000
+
+00000000
+01000000
+00100000
+00000000
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00001000
+00010000
+00000000
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00010000
+00101000
+00000000
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00101000
+00101000
+00000000
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00101000
+00101000
+00000000
+10000010
+10000010
+01000100
+00101000
+00010000
+00010000
+00010000
+00010000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+01000000
+00100000
+00010000
+00010000
+00000000
+00010000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01111100
+10000010
+10000010
+10000010
+10111100
+10000010
+10000010
+10000010
+10000010
+10111100
+10000000
+10000000
+00000000
+
+00000000
+00000000
+00000000
+01000000
+00100000
+00000000
+01111000
+00000100
+01111100
+10000100
+10000100
+01111010
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00001000
+00010000
+00000000
+01111000
+00000100
+01111100
+10000100
+10000100
+01111010
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00010000
+00101000
+00000000
+01111000
+00000100
+01111100
+10000100
+10000100
+01111010
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00110100
+01001000
+00000000
+01111000
+00000100
+01111100
+10000100
+10000100
+01111010
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00101000
+00101000
+00000000
+01111000
+00000100
+01111100
+10000100
+10000100
+01111010
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00010000
+00101000
+00010000
+01111000
+00000100
+01111100
+10000100
+10000100
+01111010
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+01101100
+00010010
+01111110
+10010000
+10010010
+01101100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+10000010
+01111100
+00001000
+00010000
+00000000
+
+00000000
+00000000
+00000000
+01000000
+00100000
+00000000
+01111100
+10000010
+10000010
+11111110
+10000000
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00001000
+00010000
+00000000
+01111100
+10000010
+10000010
+11111110
+10000000
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00010000
+00101000
+00000000
+01111100
+10000010
+10000010
+11111110
+10000000
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00101000
+00101000
+00000000
+01111100
+10000010
+10000010
+11111110
+10000000
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00100000
+00010000
+00000000
+00110000
+00010000
+00010000
+00010000
+00010000
+00010000
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00001000
+00010000
+00000000
+00110000
+00010000
+00010000
+00010000
+00010000
+00010000
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00010000
+00101000
+00000000
+00110000
+00010000
+00010000
+00010000
+00010000
+00010000
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00101000
+00101000
+00000000
+00110000
+00010000
+00010000
+00010000
+00010000
+00010000
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+01000000
+00100000
+00010000
+00010000
+00000000
+00010000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00110100
+01001000
+00000000
+10111100
+10000010
+10000010
+10000010
+10000010
+10000010
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+01000000
+00100000
+00000000
+01111100
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00001000
+00010000
+00000000
+01111100
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00010000
+00101000
+00000000
+01111100
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00110100
+01001000
+00000000
+01111100
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00101000
+00101000
+00000000
+01111100
+10000010
+10000010
+10000010
+10000010
+01111100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00000000
+00000000
+00000000
+01101100
+10010010
+10011110
+10010000
+10010010
+01101100
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00000000
+00000000
+00000010
+01111100
+10001010
+10010010
+10010010
+10100010
+01111100
+10000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+01000000
+00100000
+00000000
+10000010
+10000010
+10000010
+10000010
+10000010
+01111010
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00001000
+00010000
+00000000
+10000010
+10000010
+10000010
+10000010
+10000010
+01111010
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00010000
+00101000
+00000000
+10000010
+10000010
+10000010
+10000010
+10000010
+01111010
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00101000
+00101000
+00000000
+10000010
+10000010
+10000010
+10000010
+10000010
+01111010
+00000000
+00000000
+00000000
+
+00000000
+00000000
+00000000
+00101000
+00101000
+00000000
+10000010
+10000010
+10000010
+10000010
+10000010
+01111100
+00000010
+00000010
+01111100
+
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+01000000
+00100000
+00010000
+00010000
+00000000
+00010000
+00000000
+00000000
+00000000
+
+00000000
+00000000
+01111100
+10000010
+10000000
+10000000
+01000000
+00100000
+00010000
+00010000
+00000000
+00010000
+00000000
+00000000
+00000000
diff --git a/chips/lance.c b/chips/lance.c
new file mode 100644
index 00000000..750d4c57
--- /dev/null
+++ b/chips/lance.c
@@ -0,0 +1,1570 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: lance.c
+ * Author: Robert V. Baron & Alessandro Forin
+ * Date: 5/90
+ *
+ * Driver for the DEC LANCE Ethernet Controller.
+ */
+
+/*
+
+ Byte ordering issues.
+
+ The lance sees data naturally as half word (16 bit) quantitites.
+ Bit 2 (BSWP) in control register 3 (CSR3) controls byte swapping.
+ To quote the spec:
+
+ 02 BSWP BYTE SWAP allows the chip to
+ operate in systems that consdier bits (15:08) of data pointers
+ by an even addressa and bits (7:0) to be pointed by an
+ odd address.
+
+ When BSWP=1, the chip will swap the high and low bytes on DMA
+ data transfers between the silo and bus memory. Only data from
+ silo transfers is swapped; the Initialization Block data and
+ the Descriptor Ring entries are NOT swapped. (emphasis theirs)
+
+
+ So on systems with BYTE_MSF=1, the BSWP bit should be set. Note,
+ however, that all shorts in the descriptor ring and initialization
+ block need to be swapped. The BITFIELD macros in lance.h handle this
+ magic.
+
+*/
+
+#include <ln.h>
+#if NLN > 0
+#include <platforms.h>
+
+/*
+ * AMD Am7990 LANCE (Ethernet Interface)
+ */
+#include <sys/ioctl.h>
+#include <vm/vm_kern.h>
+
+#include <machine/machspl.h> /* spl definitions */
+#include <kern/time_out.h>
+#include <sys/syslog.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_kmsg.h>
+
+#include <device/device_types.h>
+#include <device/errno.h>
+#include <device/io_req.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+#include <device/net_status.h>
+#include <device/net_io.h>
+
+#ifdef FLAMINGO
+#define se_reg_type unsigned int
+#endif
+
+#include <chips/lance.h>
+#include <chips/busses.h>
+
+#define private static
+#define public
+
+typedef struct se_softc *se_softc_t; /* move above prototypes */
+
+void se_write_reg(); /* forwards */
+void se_read();
+void se_rint();
+void se_tint();
+
+private vm_offset_t se_Hmem_nogap(), se_Hmem_gap16();
+private vm_offset_t se_malloc();
+
+
+/* This config section should go into a separate file */
+
+#ifdef LUNA88K
+# include <luna88k/board.h>
+# define MAPPED 1
+ #undef bcopy
+ extern void bcopy(), bzero();
+
+#define wbflush()
+#define Hmem(lna) (vm_offset_t)((lna) + sc->lnbuf)
+#define Lmem(lna) (vm_offset_t)((lna) + sc->lnoffset)
+
+#define SPACE (TRI_PORT_RAM_SPACE>>1)
+private struct se_switch se_switch[] = {
+ { LANCE_ADDR - TRI_PORT_RAM, /* pointer */
+ SPACE /* host side */,
+ SPACE /* lance side */,
+ - TRI_PORT_RAM,
+ 0, /* romstride */
+ 0, /* ramstride */
+ SPACE,
+ /* desc_copyin */ bcopy,
+ /* desc_copyout */ bcopy,
+ /* data_copyin */ bcopy,
+ /* data_copyout */ bcopy,
+ /* bzero */ bzero,
+ /* mapaddr */ se_Hmem_nogap,
+ /* mapoffs */ se_Hmem_nogap
+ },
+};
+
+#endif
+
+#ifdef DECSTATION
+#include <mips/mips_cpu.h>
+#include <mips/PMAX/pmad_aa.h>
+
+#define MAPPED 1
+
+/*
+ * The LANCE buffer memory as seen from the Pmax cpu is funny.
+ * It is viewed as short words (16bits), spaced at word (32bits)
+ * intervals. The same applies to the registers. From the LANCE
+ * point of view memory is instead contiguous.
+ * The ROM that contains the station address is in the space belonging
+ * to the clock/battery backup memory. This space is again 16 bits
+ * in a 32bit envelope. And the ether address is stored in the "high"
+ * byte of 6 consecutive quantities.
+ *
+ * But Pmaxen and 3maxen (and..) map lance space differently.
+ * This requires dynamic adaptation of the driver, which
+ * is done via the following switches.
+ * For convenience, the switch holds information about
+ * the location of the lance control registers as well.
+ * This could be either absolute (pmax) or relative to
+ * some register base (3max, turbochannel)
+ */
+void copyin_gap16(), copyout_gap16(), bzero_gap16();
+extern void bcopy(), bzero();
+void copyin_gap32(), copyout_gap32();
+
+private struct se_switch se_switch[] = {
+/* pmax */
+ { 0x00000000, 0x01000000, 0x0, 0x05000000, 8, 16, 64*1024,
+ copyin_gap16, copyout_gap16, copyin_gap16, copyout_gap16,
+ bzero_gap16, se_Hmem_gap16, se_Hmem_gap16},
+/* 3max */
+ { PMAD_OFFSET_LANCE, PMAD_OFFSET_RAM, PMAD_OFFSET_RAM, PMAD_OFFSET_ROM,
+ 16, 0, PMAD_RAM_SIZE,
+ bcopy, bcopy, bcopy, bcopy, bzero, se_Hmem_nogap, se_Hmem_nogap},
+/* 3min */
+/* XXX re-use other 64k */
+ { 0/*later*/, 0/*later*/, 0x0, 0/*later*/, 0, 128, 64*1024,
+ copyin_gap16, copyout_gap16, copyin_gap32, copyout_gap32,
+ bzero_gap16, se_Hmem_gap16, se_Hmem_nogap},
+};
+
+/*
+ * "lna" is what se_malloc hands back. They are offsets using
+ * the sizing that the Lance would use. The Lance space is
+ * mapped somewhere in the I/O space, as indicated by the softc.
+ * Hence we have these two macros:
+ */
+/* H & L are not hi and lo but
+ H = HOST == addresses for host to reference board memory
+ L = LOCAL == addresses on board
+ */
+#define Hmem(lna) (vm_offset_t)((se_sw->mapaddr)(lna) + sc->lnbuf)
+#define Lmem(lna) (vm_offset_t)((vm_offset_t)lna + sc->lnoffset)
+#endif /*DECSTATION*/
+
+
+#ifdef VAXSTATION
+#include <vax/ka3100.h>
+
+#define wbflush()
+
+void xzero(x, l) vm_offset_t x; int l; { blkclr(x, l); }
+void xcopy(f, t, l) vm_offset_t f, t; int l; { bcopy(f, t, l); }
+
+private struct se_switch se_switch[] = {
+ /* pvax sees contiguous bits in lower 16Meg of memory */
+ { 0, 0, 0, 0, 0, 0, 64*1024,
+ xcopy, xcopy, xcopy, xcopy, xzero, se_Hmem_nogap, se_Hmem_nogap},
+};
+
+/*
+ * "lna" is what se_malloc hands back. They are offsets using
+ * the sizing that the Lance would use. The Lance space is
+ * mapped somewhere in the I/O space, as indicated by the softc.
+ * Hence we have these two macros:
+ */
+/* H & L are not hi and lo but
+ H = HOST == addresses for host to reference board memory
+ L = LOCAL == addresses on board
+ */
+ /*
+ * This does not deal with > 16 Meg physical memory, where
+ * Hmem != Lmem
+ */
+#define Hmem(lna) (vm_offset_t)((lna) + sc->lnbuf)
+#define Lmem(lna) (vm_offset_t)((lna) + sc->lnoffset)
+
+#endif /*VAXSTATION*/
+
+
+#ifdef FLAMINGO
+#include <alpha/alpha_cpu.h>
+
+/* XXX might be wrong, mostly stolen from kmin */
+extern void copyin_gap16(), copyout_gap16(), bzero_gap16();
+extern void copyin_gap32(), copyout_gap32();
+extern void bcopy(), bzero();
+
+private struct se_switch se_switch[] = {
+/* XXX re-use other 64k */
+ { 0/*later*/, 0/*later*/, 0x0, 0/*later*/, 0, 128, 64*1024,
+ copyin_gap16, copyout_gap16, copyin_gap32, copyout_gap32,
+ bzero_gap16, se_Hmem_gap16, se_Hmem_nogap},
+};
+
+/*
+ * "lna" is what se_malloc hands back. They are offsets using
+ * the sizing that the Lance would use. The Lance space is
+ * mapped somewhere in the I/O space, as indicated by the softc.
+ * Hence we have these two macros:
+ */
+/* H & L are not hi and lo but
+ H = HOST == addresses for host to reference board memory
+ L = LOCAL == addresses on board
+ */
+#define Hmem(lna) (vm_offset_t)((se_sw->mapaddr)(lna) + sc->lnbuf)
+#define Lmem(lna) (vm_offset_t)((vm_offset_t)lna + sc->lnoffset)
+#endif /*FLAMINGO*/
+
+
+/*
+ * Map a lance-space offset into an host-space one
+ */
+private vm_offset_t se_Hmem_nogap( vm_offset_t lna) { return lna;}
+private vm_offset_t se_Hmem_gap16( vm_offset_t lna) { return lna << 1;}
+
+/*
+ * Memory addresses for LANCE are 24 bits wide.
+ */
+#define Addr_lo(y) ((unsigned short)((vm_offset_t)(y) & 0xffff))
+#define Addr_hi(y) ((unsigned short)(((vm_offset_t)(y)>>16) & 0xff))
+
+#define LN_MEMORY_SIZE (se_sw->ramsize)
+
+/* XXX to accomodate heterogeneity this should be made per-drive */
+/* XXX and then some more */
+
+struct se_switch *se_sw = se_switch;
+
+void set_se_switch(n)
+int n;
+{
+ se_sw = &se_switch[n];
+}
+
+#ifndef LUNA88K
+void setse_switch(n, r, b, l, o)
+ vm_offset_t r, b, l, o;
+ int n;
+{
+ se_switch[n].regspace = r;
+ se_switch[n].bufspace = b;
+ se_switch[n].ln_bufspace = l;
+ se_switch[n].romspace = o;
+
+ /* make sure longword aligned */
+ if (se_switch[n].bufspace & 0x7) {
+ se_switch[n].bufspace = (se_switch[n].bufspace+0x7) & ~0x7;
+ }
+
+ set_se_switch(n);
+}
+#endif
+
+/*
+ * Autoconf info
+ */
+
+private vm_offset_t se_std[NLN] = { 0 };
+private struct bus_device *se_info[NLN];
+private int se_probe();
+private void se_attach();
+
+struct bus_driver se_driver =
+ { se_probe, 0, se_attach, 0, se_std, "se", se_info, };
+
+/*
+ * Externally visible functions
+ */
+char *se_unprobed_addr = 0;
+void se_intr(); /* kernel */
+
+int se_open(), se_output(), se_get_status(), /* user */
+ se_set_status(), se_setinput(), se_restart();
+
+/*
+ *
+ * Internal functions & definitions
+ *
+ */
+
+private int se_probe();
+private void se_init();
+private void init_lance_space();
+private void se_desc_set_status();
+private volatile long *se_desc_alloc(); /* must be aligned! */
+void se_start();
+private void copy_from_lance();
+private int copy_to_lance();
+
+int se_verbose = 0; /* debug flag */
+
+#define RLOG 4 /* 2**4 = 16 receive descriptors */
+#define TLOG 4 /* 2**4 = 16 transmit descriptors */
+#define NRCV (1<<RLOG) /* Receive descriptors */
+#define NXMT (1<<TLOG) /* Transmit descriptors */
+
+#define LN_BUFFER_SIZE (0x800-0x80)
+
+/*
+ * Ethernet software status per interface.
+ *
+ * Each interface is referenced by a network interface structure,
+ * is_if, which contains the output queue for the interface, its address, ...
+ */
+int se_loopback_hack = 1;
+
+struct se_softc {
+ struct ifnet is_if; /* generic interface header */
+ unsigned char is_addr[6]; /* ethernet hardware address */
+ unsigned short pad;
+ se_reg_t lnregs; /* Lance registers */
+ vm_offset_t lnbuf; /* Lance memory, Host offset */
+ vm_offset_t lnoffset; /* Lance memory, Lance offset */
+ vm_offset_t lnrom;
+ vm_offset_t lnsbrk; /* Lance memory allocator */
+ vm_offset_t lninit_block; /* Init block address */
+ se_desc_t lnrring[NRCV]; /* Receive ring desc. */
+ volatile long *lnrbuf[NRCV]; /* Receive buffers */
+ se_desc_t lntring[NXMT]; /* Transmit ring desc. */
+ volatile long *lntbuf[NXMT]; /* Transmit buffers */
+
+ int rcv_last; /* Rcv buffer last read */
+
+ io_req_t tpkt[NXMT+1]; /* Xmt pkt queue */
+ int xmt_count; /* Xmt queue size */
+ int xmt_last; /* Xmt queue head (insert) */
+ int xmt_complete; /* Xmt queue tail (remove) */
+
+ int se_flags; /* Flags for SIOCSIFFLAGS */
+ int counters[4]; /* error counters */
+#define bablcnt counters[0]
+#define misscnt counters[1]
+#define merrcnt counters[2]
+#define rstrtcnt counters[3]
+} se_softc_data[NLN];
+
+se_softc_t se_softc[NLN]; /* quick access */
+
+/*
+ * Probe the Lance to see if it's there
+ */
+private int se_open_state = 0;
+
+private int se_probe(
+ vm_offset_t reg,
+ register struct bus_device *ui)
+{
+ register se_softc_t sc;
+ se_reg_t rdp, rap;
+ int unit = ui->unit;
+
+ /*
+ * See if the interface is there by reading the lance CSR. On pmaxen
+ * and 3maxen this is superfluous, but..
+ */
+ rdp = (se_reg_t) (reg + se_sw->regspace);
+#ifdef DECSTATION
+ if (check_memory(rdp, 0))
+ return 0;
+#endif /*DECSTATION*/
+#ifdef MAPPED
+ SE_probe(reg,ui);
+#endif /*MAPPED*/
+ rap = rdp + 2; /* XXX might not be true in the future XXX */
+ /* rdp and rap are "shorts" on consecutive
+ "long" word boundaries */
+
+ /*
+ * Bind this interface to the softc.
+ */
+ sc = &se_softc_data[unit];
+ se_softc[unit] = sc;
+ sc->lnregs = (se_reg_t) (reg + se_sw->regspace);
+ sc->lnbuf = (vm_offset_t) (reg + se_sw->bufspace);
+ sc->lnoffset = (vm_offset_t) (se_sw->ln_bufspace);
+ sc->lnrom = (vm_offset_t) (reg + se_sw->romspace);
+
+ /*
+ * Reset the interface, and make sure we really do it! (the 3max
+ * seems quite stubborn about these registers)
+ */
+ se_write_reg(rap, CSR0_SELECT, CSR0_SELECT, "RAP");
+ se_write_reg(rdp, LN_CSR0_STOP, LN_CSR0_STOP, "csr0");
+
+ /*
+ * Allocate lance RAM buffer memory
+ */
+ init_lance_space(sc);
+
+ /*
+ * Initialize the chip
+ *
+ * NOTE: From now on we will only touch csr0
+ */
+ if (se_ship_init_block(sc, unit))
+ return 0;
+
+ /*
+ * Tell the world we are alive and well
+ */
+ se_open_state++;
+ return 1;
+}
+
+int se_ship_init_block(
+ register se_softc_t sc,
+ int unit)
+{
+ se_reg_t rdp = sc->lnregs;
+ se_reg_t rap;
+ register int i = 0;
+
+ rap = rdp + 2; /* XXX might not be true in the future XXX */
+
+ /*
+ * Load LANCE control block.
+ */
+
+#ifdef LUNA88K
+ /* turn on byte swap bit in csr3, set bcon bit - as in 2.5 */
+ se_write_reg(rap, CSR3_SELECT, CSR3_SELECT, "RAP");
+ se_write_reg(rdp, LN_CSR3_BSWP|LN_CSR3_BCON,
+ LN_CSR3_BSWP|LN_CSR3_BCON, "csr3");
+#endif
+
+ se_write_reg(rap, CSR1_SELECT, CSR1_SELECT, "RAP");
+ se_write_reg(rdp, Addr_lo(Lmem(sc->lninit_block)),
+ Addr_lo(Lmem(sc->lninit_block)), "csr1");
+
+ se_write_reg(rap, CSR2_SELECT, CSR2_SELECT, "RAP");
+ se_write_reg(rdp, Addr_hi(Lmem(sc->lninit_block)),
+ Addr_hi(Lmem(sc->lninit_block)), "csr2");
+
+ /*
+ * Start the INIT sequence now
+ */
+ se_write_reg(rap, CSR0_SELECT, CSR0_SELECT, "RAP");
+ *rdp = (LN_CSR0_IDON | LN_CSR0_INIT);
+ wbflush();
+
+ /* give it plenty of time to settle */
+ while (i++ < 10000) {
+ delay(100);
+ if ((*rdp & LN_CSR0_IDON) != 0)
+ break;
+ }
+ /* make sure got out okay */
+ if ((*rdp & LN_CSR0_IDON) == 0) {
+ printf("se%d: cannot initialize\n", unit);
+ if (*rdp & LN_CSR0_ERR)
+ printf("se%d: initialization error, csr = %04x\n",
+ unit, (*rdp & 0xffff));
+ return 1;
+ }
+ /*
+ * Do not enable interrupts just yet.
+ */
+ /* se_write_reg(rdp, LN_CSR0_STOP, LN_CSR0_STOP, "csr0"); */
+
+ return 0;
+}
+
+void
+se_write_reg(
+ register se_reg_t regptr,
+ register int val,
+ register int result,
+ char *regname)
+{
+ register int i = 0;
+
+ while ((unsigned short)(*regptr) != (unsigned short)result) {
+ *regptr = (se_reg_type)val;
+ wbflush();
+ if (++i > 10000) {
+ printf("se: %s did not settle (to x%x): x%x\n",
+ regname, result, (unsigned short)(*regptr));
+ return;
+ }
+ delay(100);
+ }
+}
+
+unsigned short
+se_read_reg(
+ register se_reg_t regptr)
+{
+ return (unsigned short) (*regptr);
+}
+
+private void
+init_lance_space(
+ register se_softc_t sc)
+{
+ register int lptr; /* Generic lance pointer */
+ se_desc_t ringaddr;
+ long *rom_eaddress = (long *) sc->lnrom;
+ int i;
+ struct se_init_block init_block;
+
+ /*
+ * Allocate local RAM buffer memory for the init block,
+ * fill in our local copy then copyout.
+ */
+
+ sc->lninit_block = se_malloc(sc, sizeof (struct se_init_block));
+
+ /*
+ * Set values on stack, then copyout en-masse
+ */
+ bzero(&init_block, sizeof(init_block));
+ init_block.mode = 0;
+
+ /* byte swapping between host and lance */
+
+ init_block.phys_addr_low = ((rom_eaddress[0]>>se_sw->romstride)&0xff) |
+ (((rom_eaddress[1]>>se_sw->romstride)&0xff) << 8);
+ init_block.phys_addr_med = ((rom_eaddress[2]>>se_sw->romstride)&0xff) |
+ (((rom_eaddress[3]>>se_sw->romstride)&0xff) << 8);
+ init_block.phys_addr_high = ((rom_eaddress[4]>>se_sw->romstride)&0xff) |
+ (((rom_eaddress[5]>>se_sw->romstride)&0xff) << 8);
+
+ /*
+ * Allocate both descriptor rings at once.
+ * Note that the quadword alignment requirement is
+ * inherent in the way we perform allocation,
+ * but it does depend on the size of the init block.
+ */
+ lptr = se_malloc(sc, sizeof (struct se_desc) * (NXMT + NRCV));
+
+ /*
+ * Initialize the buffer descriptors
+ */
+ init_block.recv_ring_pointer_lo = Addr_lo(Lmem(lptr));
+ init_block.recv_ring_pointer_hi = Addr_hi(Lmem(lptr));
+ init_block.recv_ring_len = RLOG;
+
+ for ( i = 0; i < NRCV ; i++, lptr += sizeof(struct se_desc)) {
+ ringaddr = (se_desc_t)Hmem(lptr);
+ sc->lnrring[i] = ringaddr;
+ sc->lnrbuf[i] = se_desc_alloc (sc, ringaddr);
+ }
+
+ init_block.xmit_ring_pointer_lo = Addr_lo(Lmem(lptr));
+ init_block.xmit_ring_pointer_hi = Addr_hi(Lmem(lptr));
+ init_block.xmit_ring_len = TLOG;
+
+ for ( i = 0 ; i < NXMT ; i++, lptr += sizeof(struct se_desc)) {
+ ringaddr = (se_desc_t)Hmem(lptr);
+ sc->lntring[i] = ringaddr;
+ sc->lntbuf[i] = se_desc_alloc (sc, ringaddr);
+ }
+
+ /*
+ * No logical address filtering
+ */
+ init_block.logical_addr_filter0 = 0;
+ init_block.logical_addr_filter1 = 0;
+ init_block.logical_addr_filter2 = 0;
+ init_block.logical_addr_filter3 = 0;
+
+ /*
+ * Move init block into lance space
+ */
+ (se_sw->desc_copyout)((vm_offset_t)&init_block, Hmem(sc->lninit_block), sizeof(init_block));
+ wbflush();
+}
+
+/*
+ * Interface exists: make available by filling in network interface
+ * record. System will initialize the interface when it is ready
+ * to accept packets.
+ */
+private void
+se_attach(
+ register struct bus_device *ui)
+{
+ unsigned char *enaddr;
+ struct ifnet *ifp;
+ long *rom_eaddress;
+ int unit = ui->unit;
+ se_softc_t sc = se_softc[unit];
+
+ rom_eaddress = (long *) sc->lnrom;
+
+ /*
+ * Read the address from the prom and save it.
+ */
+ enaddr = sc->is_addr;
+ enaddr[0] = (unsigned char) ((rom_eaddress[0] >> se_sw->romstride) & 0xff);
+ enaddr[1] = (unsigned char) ((rom_eaddress[1] >> se_sw->romstride) & 0xff);
+ enaddr[2] = (unsigned char) ((rom_eaddress[2] >> se_sw->romstride) & 0xff);
+ enaddr[3] = (unsigned char) ((rom_eaddress[3] >> se_sw->romstride) & 0xff);
+ enaddr[4] = (unsigned char) ((rom_eaddress[4] >> se_sw->romstride) & 0xff);
+ enaddr[5] = (unsigned char) ((rom_eaddress[5] >> se_sw->romstride) & 0xff);
+
+ printf(": %x-%x-%x-%x-%x-%x",
+ (rom_eaddress[0] >> se_sw->romstride) & 0xff,
+ (rom_eaddress[1] >> se_sw->romstride) & 0xff,
+ (rom_eaddress[2] >> se_sw->romstride) & 0xff,
+ (rom_eaddress[3] >> se_sw->romstride) & 0xff,
+ (rom_eaddress[4] >> se_sw->romstride) & 0xff,
+ (rom_eaddress[5] >> se_sw->romstride) & 0xff);
+
+ /*
+ * Initialize the standard interface descriptor
+ */
+ ifp = &sc->is_if;
+ ifp->if_unit = unit;
+ ifp->if_header_size = sizeof(struct ether_header);
+ ifp->if_header_format = HDR_ETHERNET;
+ ifp->if_address_size = 6;
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_flags |= IFF_BROADCAST;
+
+ ifp->if_address = (char *) enaddr;
+
+ if_init_queues(ifp);
+#ifdef MAPPED
+ SE_attach(ui);
+#endif /*MAPPED*/
+
+}
+
+/*
+ * Use a different hardware address for interface
+ */
+void
+se_setaddr(
+ unsigned char eaddr[6],
+ int unit)
+{
+ register se_softc_t sc = se_softc[unit];
+ struct se_init_block init_block;
+
+ /*
+ * Modify initialization block accordingly
+ */
+ (se_sw->desc_copyin) (Hmem(sc->lninit_block), (vm_offset_t)&init_block, sizeof(init_block));
+ bcopy(eaddr, &init_block.phys_addr_low, sizeof(*eaddr));
+ (se_sw->desc_copyout)((vm_offset_t)&init_block, Hmem(sc->lninit_block), sizeof(init_block));
+ /*
+ * Make a note of it
+ */
+ bcopy(eaddr, sc->is_addr, sizeof(*eaddr));
+
+ /*
+ * Restart the interface
+ */
+ se_restart(&sc->is_if);
+ se_init(unit);
+}
+
+/*
+ * Restart interface
+ *
+ * We use this internally on those errors that hang the chip,
+ * not sure yet what use the MI code will make of it.
+ *
+ * After stopping the chip and effectively turning off the interface
+ * we release all pending buffers and cause the chip to init
+ * itself. We do not enable interrupts here.
+ */
+int
+se_restart( register struct ifnet *ifp )
+{
+ register se_softc_t sc = se_softc[ifp->if_unit];
+ se_reg_t rdp;
+ register int i;
+
+ rdp = sc->lnregs;
+
+ /*
+ * stop the chip
+ */
+ se_write_reg(rdp, LN_CSR0_STOP, LN_CSR0_STOP, "csr0");
+
+ /*
+ * stop network activity
+ */
+ if (ifp->if_flags & IFF_RUNNING) {
+ ifp->if_flags &= ~(IFF_UP | IFF_RUNNING);
+ sc->se_flags &= ~(IFF_UP | IFF_RUNNING);
+ }
+ sc->rstrtcnt++;
+
+ if (se_verbose)
+ printf("se%d: %d restarts\n", ifp->if_unit, sc->rstrtcnt);
+
+ /*
+ * free up any buffers currently in use
+ */
+ for (i = 0; i < NXMT; i++)
+ if (sc->tpkt[i]) {
+ iodone(sc->tpkt[i]);
+ sc->tpkt[i] = (io_req_t) 0;
+ }
+ /*
+ * INIT the chip again, no need to reload init block address.
+ */
+ se_ship_init_block(sc, ifp->if_unit);
+
+ return (0);
+}
+
+/*
+ * Initialize the interface.
+ */
+private void
+se_init( int unit )
+{
+ register se_softc_t sc = se_softc[unit];
+ register se_desc_t *rp;
+ register struct ifnet *ifp = &sc->is_if;
+ se_reg_t rdp;
+ short mode;
+ spl_t s;
+ int i;
+
+ if (ifp->if_flags & IFF_RUNNING)
+ return;
+
+ rdp = sc->lnregs;
+
+ /*
+ * Init the buffer descriptors and indexes for each of the rings.
+ */
+ for (i = 0, rp = sc->lnrring; i < NRCV; i++, rp++)
+ se_desc_set_status(*rp, LN_RSTATE_OWN);
+
+ for (i = 0, rp = sc->lntring; i < NXMT; i++, rp++)
+ se_desc_set_status(*rp, 0);
+
+ sc->xmt_count = sc->xmt_complete = sc->xmt_last = sc->rcv_last = 0;
+
+ /*
+ * Deal with loopback mode operation
+ */
+ s = splimp();
+
+ (se_sw->desc_copyin) (Hmem(sc->lninit_block), (vm_offset_t)&mode, sizeof(mode));
+
+ if (ifp->if_flags & IFF_LOOPBACK
+ && ((mode & LN_MODE_LOOP) == 0)) {
+ /* if not already in loopback mode, do external loopback */
+ mode &= ~LN_MODE_INTL;
+ mode |= LN_MODE_LOOP;
+ (se_sw->desc_copyout) ((vm_offset_t)&mode, Hmem(sc->lninit_block), sizeof(mode));
+ se_restart(ifp);
+ se_init(ifp->if_unit);
+ splx(s);
+ return;
+ }
+
+ ifp->if_flags |= (IFF_UP | IFF_RUNNING);
+ sc->se_flags |= (IFF_UP | IFF_RUNNING);
+
+ /*
+ * Start the Lance and enable interrupts
+ */
+ *rdp = (LN_CSR0_STRT | LN_CSR0_INEA);
+ wbflush();
+
+ /*
+ * See if anything is already queued
+ */
+ se_start(unit);
+ splx(s);
+}
+
+
+/*
+ * Shut off the lance
+ */
+void
+se_stop(int unit)
+{
+ se_reg_t rdp = se_softc[unit]->lnregs;
+
+ se_write_reg(rdp, LN_CSR0_STOP, LN_CSR0_STOP, "csr0");
+}
+
+
+/*
+ * Open the device, declaring the interface up
+ * and enabling lance interrupts.
+ */
+/*ARGSUSED*/
+int
+se_open(
+ int unit,
+ int flag)
+{
+ register se_softc_t sc = se_softc[unit];
+
+ if (unit >= NLN)
+ return EINVAL;
+ if (!se_open_state)
+ return ENXIO;
+
+ sc->is_if.if_flags |= IFF_UP;
+ se_open_state++;
+ se_init(unit);
+ return (0);
+}
+
+#ifdef MAPPED
+int se_use_mapped_interface[NLN];
+#endif /*MAPPED*/
+
+void
+se_normal(int unit)
+{
+#ifdef MAPPED
+ se_use_mapped_interface[unit] = 0;
+#endif /*MAPPED*/
+ if (se_softc[unit]) {
+ se_restart((struct ifnet *)se_softc[unit]);
+ se_init(unit);
+ }
+}
+
+/*
+ * Ethernet interface interrupt routine
+ */
+void
+se_intr(
+ int unit,
+ spl_t spllevel)
+{
+ register se_softc_t sc = se_softc[unit];
+ se_reg_t rdp;
+ register struct ifnet *ifp = &sc->is_if;
+ register unsigned short csr;
+
+#ifdef MAPPED
+ if (se_use_mapped_interface[unit])
+ {
+ SE_intr(unit,spllevel);
+ return;
+ }
+#endif /*MAPPED*/
+
+ if (se_open_state < 2) { /* Stray, or not open for business */
+ rdp = (sc ? sc->lnregs : (se_reg_t)se_unprobed_addr);
+ *rdp |= LN_CSR0_STOP;
+ wbflush();
+ return;
+ }
+ rdp = sc->lnregs;
+
+ /*
+ * Read the CSR and process any error condition.
+ * Later on, restart the lance by writing back
+ * the CSR (for set-to-clear bits).
+ */
+ csr = *rdp; /* pick up the csr */
+
+ /* drop spurious interrupts */
+ if ((csr & LN_CSR0_INTR) == 0)
+ return;
+
+#ifdef DECSTATION
+ splx(spllevel); /* drop priority now */
+#endif /*DECSTATION*/
+again:
+ /*
+ * Check for errors first
+ */
+ if ( csr & LN_CSR0_ERR ) {
+ if (csr & LN_CSR0_MISS) {
+ /*
+ * Stop the chip to prevent a corrupt packet from
+ * being transmitted. There is a known problem with
+ * missed packet errors causing corrupted data to
+ * be transmitted to the same host as was just
+ * transmitted, with a valid crc appended to the
+ * packet. The only solution is to stop the chip,
+ * which will clear the Lance silo, thus preventing
+ * the corrupt data from being sent.
+ */
+ se_write_reg(rdp, LN_CSR0_STOP, LN_CSR0_STOP, "csr0");
+
+ sc->misscnt++;
+ if (se_verbose) {
+ int me = 0, lance = 0, index;
+ struct se_desc r;
+ for (index = 0; index < NRCV; index++) {
+ (se_sw->desc_copyin)(
+ (vm_offset_t)sc->lnrring[index],
+ (vm_offset_t)&r,
+ sizeof(r));
+ if (r.status & LN_RSTATE_OWN)
+ lance++;
+ else
+ me++;
+ }
+ printf("se%d: missed packet (%d) csr = %x, Lance %x, me %x\n",
+ unit, sc->misscnt, csr, lance, me);
+ }
+ se_restart(ifp);
+ se_init(unit);
+ return;
+ }
+ if (csr & LN_CSR0_BABL) {
+ sc->bablcnt++;
+ if (se_verbose)
+ printf("se%d: xmt timeout (%d)\n",
+ unit, sc->bablcnt);
+ }
+ if (csr & LN_CSR0_MERR) {
+ sc->merrcnt++;
+ printf("se%d: memory error (%d)\n",
+ unit, sc->merrcnt);
+
+ if (((csr & LN_CSR0_RXON) == 0)
+ || ((csr & LN_CSR0_TXON) == 0)) {
+ se_restart(ifp);
+ se_init(unit);
+ return;
+ }
+ }
+ }
+
+ *rdp = LN_CSR0_INEA | (csr & LN_CSR0_WTC);
+ wbflush();
+
+ if ( csr & LN_CSR0_RINT )
+ se_rint( unit );
+
+ if ( csr & LN_CSR0_TINT )
+ se_tint( unit );
+
+ if ((csr = *rdp) & (LN_CSR0_RINT | LN_CSR0_TINT))
+ goto again;
+}
+
+/*
+ * Handle a transmitter complete interrupt.
+ */
+void
+se_tint(int unit)
+{
+ register se_softc_t sc = se_softc[unit];
+ register index;
+ register status;
+ io_req_t request;
+ struct se_desc r;
+
+ /*
+ * Free up descriptors for all packets in queue for which
+ * transmission is complete. Start from queue tail, stop at first
+ * descriptor we do not OWN, or which is in an inconsistent state
+ * (lance still working).
+ */
+
+ while ((sc->xmt_complete != sc->xmt_last) && (sc->xmt_count > 0)) {
+
+ index = sc->xmt_complete;
+ (se_sw->desc_copyin) ((vm_offset_t)sc->lntring[index],
+ (vm_offset_t)&r, sizeof(r));
+ status = r.status;
+
+ /*
+ * Does lance still own it ?
+ */
+ if (status & LN_TSTATE_OWN)
+ break;
+
+ /*
+ * Packet sent allright, release queue slot.
+ */
+ request = sc->tpkt[index];
+ sc->tpkt[index] = (io_req_t) 0;
+ sc->xmt_complete = ++index & (NXMT - 1);
+ --sc->xmt_count;
+
+ sc->is_if.if_opackets++;
+ if (status & (LN_TSTATE_DEF|LN_TSTATE_ONE|LN_TSTATE_MORE))
+ sc->is_if.if_collisions++;
+
+ /*
+ * Check for transmission errors.
+ */
+ if (!se_loopback_hack && status & LN_TSTATE_ERR) {
+ sc->is_if.if_oerrors++;
+ if (se_verbose)
+ printf("se%d: xmt error (x%x)\n", unit, r.status2);
+
+ if (r.status2 & (LN_TSTATE2_RTRY|LN_TSTATE2_LCOL))
+ sc->is_if.if_collisions++;
+
+ /*
+ * Restart chip on errors that disable the
+ * transmitter.
+ */
+ iodone(request);
+ if (r.status2 & LN_TSTATE2_DISABLE) {
+ register struct ifnet *ifp = &sc->is_if;
+ se_restart(ifp);
+ se_init(ifp->if_unit);
+ return;
+ }
+ } else if (request) {
+ /*
+ * If this was a broadcast packet loop it back.
+ * Signal successful transmission of the packet.
+ */
+ register struct ether_header *eh;
+ register int i;
+
+ eh = (struct ether_header *) request->io_data;
+ /* ether broadcast address is in the spec */
+ for (i = 0; (i < 6) && (eh->ether_dhost[i] == 0xff); i++)
+ ; /* nop */
+ /* sending to ourselves makes sense sometimes */
+ if (i != 6 && se_loopback_hack)
+ for (i = 0;
+ (i < 6) && (eh->ether_dhost[i] == sc->is_addr[i]);
+ i++)
+ ; /* nop */
+ if (i == 6)
+ se_read(sc, 0, request->io_count, request);
+ iodone(request);
+ }
+ }
+ /*
+ * Dequeue next transmit request, if any.
+ */
+ if (sc->xmt_count <= 0)
+ se_start(unit);
+}
+
+/*
+ * Handle a receiver complete interrupt.
+ */
+void
+se_rint(int unit)
+{
+ register se_softc_t sc = se_softc[unit];
+ register index, first, len;
+ unsigned char status, status1;
+ int ring_cnt;
+ struct se_desc r;
+
+ /*
+ * Starting from where we left off, look around the receive ring and
+ * pass on all complete packets.
+ */
+
+ for (;; sc->rcv_last = ++index & (NRCV - 1)) {
+
+ /*
+ * Read in current descriptor
+ */
+read_descriptor:
+ (se_sw->desc_copyin) ((vm_offset_t)sc->lnrring[sc->rcv_last],
+ (vm_offset_t)&r, sizeof(r));
+ status = r.status;
+ if (status & LN_RSTATE_OWN)
+ break;
+ first = index = sc->rcv_last;
+
+ /*
+ * If not the start of a packet, error
+ */
+ if (!(status & LN_RSTATE_STP)) {
+ if (se_verbose)
+ printf("se%d: Rring #%d, status=%x !STP\n",
+ unit, index, status);
+ break;
+ }
+ /*
+ * See if packet is chained (should not) by looking at
+ * the last descriptor (OWN clear and ENP set).
+ * Remember the status info in this last descriptor.
+ */
+ ring_cnt = 1, status1 = status;
+ while (((status1 & (LN_RSTATE_ERR | LN_RSTATE_OWN | LN_RSTATE_ENP)) == 0) &&
+ (ring_cnt++ <= NRCV)) {
+ struct se_desc r1;
+ index = (index + 1) & (NRCV - 1);
+ (se_sw->desc_copyin) ((vm_offset_t)sc->lnrring[index],
+ (vm_offset_t)&r1, sizeof(r1));
+ status1 = r1.status;
+ }
+
+ /*
+ * Chained packet (--> illegally sized!); re-init the
+ * descriptors involved and ignore this bogus packet. I
+ * donno how, but it really happens that we get these
+ * monsters.
+ */
+ if (ring_cnt > 1) {
+ /*
+ * Return all descriptors to lance
+ */
+ se_desc_set_status(sc->lnrring[first], LN_RSTATE_OWN);
+ while (first != index) {
+ first = (first + 1) & (NRCV - 1);
+ se_desc_set_status(sc->lnrring[first], LN_RSTATE_OWN);
+ }
+ if ((status1 & LN_RSTATE_ERR) && se_verbose)
+ printf("se%d: rcv error %x (chained)\n", unit, status1);
+ continue;
+ }
+
+ /*
+ * Good packets must be owned by us and have the end of
+ * packet flag. And nothing else.
+ */
+ if ((status & ~LN_RSTATE_STP) == LN_RSTATE_ENP) {
+ sc->is_if.if_ipackets++;
+
+ if ((len = r.message_size) == 0)
+ /* race seen on pmaxen: the lance
+ * has not updated the size yet ??
+ */
+ goto read_descriptor;
+ /*
+ * Drop trailing CRC bytes from len and ship packet
+ * up
+ */
+ se_read(sc, (volatile char*)sc->lnrbuf[first], len-4,0);
+
+ /*
+ * Return descriptor to lance, and move on to next
+ * packet
+ */
+ r.status = LN_RSTATE_OWN;
+ (se_sw->desc_copyout)((vm_offset_t)&r,
+ (vm_offset_t)sc->lnrring[first],
+ sizeof(r));
+ continue;
+ }
+ /*
+ * Not a good packet, see what is wrong
+ */
+ if (status & LN_RSTATE_ERR) {
+ sc->is_if.if_ierrors++;
+
+ if (se_verbose)
+ printf("se%d: rcv error (x%x)\n", unit, status);
+
+ /*
+ * Return descriptor to lance
+ */
+ se_desc_set_status(sc->lnrring[first], LN_RSTATE_OWN);
+ } else {
+ /*
+ * Race condition viz lance, Wait for the next
+ * interrupt.
+ */
+ return;
+ }
+ }
+}
+
+/*
+ * Output routine.
+ * Call common function for wiring memory,
+ * come back later (to se_start) to get
+ * things going.
+ */
+io_return_t
+se_output(
+ int dev,
+ io_req_t ior)
+{
+ return net_write(&se_softc[dev]->is_if, (int(*)())se_start, ior);
+}
+
+/*
+ * Start output on interface.
+ *
+ */
+void
+se_start(int unit)
+{
+ register se_softc_t sc = se_softc[unit];
+ io_req_t request;
+ struct se_desc r;
+ int tlen;
+ spl_t s;
+ register int index;
+
+ s = splimp();
+
+ for (index = sc->xmt_last;
+ sc->xmt_count < (NXMT - 1);
+ sc->xmt_last = index = (index + 1) & (NXMT - 1)) {
+ /*
+ * Dequeue the next transmit request, if any.
+ */
+ IF_DEQUEUE(&sc->is_if.if_snd, request);
+ if (request == 0) {
+ /*
+ * Tell the lance to send the packet now
+ * instead of waiting until the next 1.6 ms
+ * poll interval expires.
+ */
+ *sc->lnregs = LN_CSR0_TDMD | LN_CSR0_INEA;
+ splx(s);
+ return; /* Nothing on the queue */
+ }
+
+ /*
+ * Keep request around until transmission complete
+ */
+ sc->tpkt[index] = request;
+ tlen = copy_to_lance(request, sc->lntbuf[index]);
+
+ /*
+ * Give away buffer. Must copyin/out, set len,
+ * and set the OWN flag. We do not do chaining.
+ */
+ (se_sw->desc_copyin)((vm_offset_t)sc->lntring[index],
+ (vm_offset_t)&r, sizeof(r));
+ r.buffer_size = -(tlen) | 0xf000;
+ r.status = (LN_TSTATE_OWN | LN_TSTATE_STP | LN_TSTATE_ENP);
+ (se_sw->desc_copyout)((vm_offset_t)&r,
+ (vm_offset_t)sc->lntring[index],
+ sizeof(r));
+ wbflush();
+
+ sc->xmt_count++;
+ }
+ /*
+ * Since we actually have queued new packets, tell
+ * the chip to rescan the descriptors _now_.
+ * It is quite unlikely that the ring be filled,
+ * but if it is .. the more reason to do it!
+ */
+ *sc->lnregs = LN_CSR0_TDMD | LN_CSR0_INEA;
+ splx(s);
+}
+
+
+/*
+ * Pull a packet off the interface and
+ * hand it up to the higher levels.
+ *
+ * Simulate broadcast packets in software.
+ */
+void
+se_read(
+ register se_softc_t sc,
+ volatile char *lnrbuf,
+ int len,
+ io_req_t loop_back)
+{
+ register struct ifnet *ifp = &sc->is_if;
+ register ipc_kmsg_t new_kmsg;
+ char *hdr, *pkt;
+
+ if (len <= sizeof(struct ether_header))
+ return; /* sanity */
+
+ /*
+ * Get a new kmsg to put data into.
+ */
+ new_kmsg = net_kmsg_get();
+ if (new_kmsg == IKM_NULL) {
+ /*
+ * No room, drop the packet
+ */
+ ifp->if_rcvdrops++;
+ return;
+ }
+
+ hdr = net_kmsg(new_kmsg)->header;
+ pkt = net_kmsg(new_kmsg)->packet;
+
+#define OFF0 (sizeof(struct ether_header) - sizeof(struct packet_header))
+#define OFF1 (OFF0 & ~3)
+ if (loop_back) {
+ bcopy(loop_back->io_data, hdr, sizeof(struct ether_header));
+ bcopy(loop_back->io_data + OFF0,
+ pkt, len - OFF0);
+ } else
+ copy_from_lance(lnrbuf, len, (struct ether_header*)hdr,
+ (struct packet_header*)pkt);
+
+ /*
+ * Set up the 'fake' header with length. Type has been left
+ * in the correct place.
+ */
+ len = len - OFF0;
+ ((struct packet_header *)pkt)->length = len;
+
+ /*
+ * Hand the packet to the network module.
+ */
+ net_packet(ifp, new_kmsg, len, ethernet_priority(new_kmsg));
+}
+
+
+/*
+ * Get a packet out of Lance memory and into main memory.
+ */
+private void
+copy_from_lance(
+ register volatile unsigned char *rbuf,
+ register unsigned int nbytes,
+ struct ether_header *hdr,
+ struct packet_header *pkt)
+{
+ /*
+ * Read in ethernet header
+ */
+ (se_sw->data_copyin) ((vm_offset_t)rbuf, (vm_offset_t)hdr, sizeof(struct ether_header));
+
+ nbytes -= sizeof(struct ether_header);
+ rbuf += (se_sw->mapoffs) (sizeof(struct ether_header));
+
+ pkt->type = (unsigned short) hdr->ether_type;
+
+ (se_sw->data_copyin) ((vm_offset_t)rbuf, (vm_offset_t)(pkt + 1), nbytes);
+}
+
+
+/*
+ * Move a packet into Lance space
+ */
+private int
+copy_to_lance(
+ register io_req_t request,
+ volatile char *sbuf)
+{
+ register unsigned short *dp;
+ register int len;
+
+ dp = (unsigned short *) request->io_data;
+ len = request->io_count;
+
+ if (len > (int)(ETHERMTU + sizeof(struct ether_header))) {
+ printf("se: truncating HUGE packet\n");
+ len = ETHERMTU + sizeof(struct ether_header);
+ }
+
+ (se_sw->data_copyout) ((vm_offset_t)dp, (vm_offset_t)sbuf, len);
+
+ if (len < LN_MINBUF_NOCH)
+ /*
+ * The lance needs at least this much data in a packet. Who
+ * cares if I send some garbage that was left in the lance
+ * buffer ? If one can spoof packets then one can spoof
+ * packets!
+ */
+ len = LN_MINBUF_NOCH;
+ return len;
+}
+
+/*
+ * Reset a descriptor's flags.
+ * Optionally give the descriptor to the lance
+ */
+private void
+se_desc_set_status (
+ register se_desc_t lndesc,
+ int val)
+{
+ struct se_desc desc;
+
+ (se_sw->desc_copyin) ((vm_offset_t)lndesc, (vm_offset_t)&desc, sizeof(desc));
+ desc.desc4.bits = 0;
+ desc.status = val;
+ (se_sw->desc_copyout) ((vm_offset_t)&desc, (vm_offset_t)lndesc, sizeof(desc));
+ wbflush();
+}
+
+/*
+ * Set/Get status functions
+ */
+int
+se_get_status(
+ int dev,
+ dev_flavor_t flavor,
+ dev_status_t status, /* pointer to OUT array */
+ natural_t *status_count) /* out */
+{
+ return (net_getstat(&se_softc[dev]->is_if,
+ flavor, status, status_count));
+}
+
+int
+se_set_status(
+ int unit,
+ dev_flavor_t flavor,
+ dev_status_t status,
+ natural_t status_count)
+{
+ register se_softc_t sc;
+
+ sc = se_softc[unit];
+
+
+ switch (flavor) {
+
+ case NET_STATUS:
+ break;
+
+ case NET_ADDRESS: {
+
+ register union ether_cvt {
+ unsigned char addr[6];
+ int lwd[2];
+ } *ec = (union ether_cvt *) status;
+
+ if (status_count < sizeof(*ec) / sizeof(int))
+ return (D_INVALID_SIZE);
+
+ ec->lwd[0] = ntohl(ec->lwd[0]);
+ ec->lwd[1] = ntohl(ec->lwd[1]);
+
+ se_setaddr(ec->addr, unit);
+
+ break;
+ }
+
+ default:
+ return (D_INVALID_OPERATION);
+ }
+
+ return (D_SUCCESS);
+}
+
+
+/*
+ * Install new filter.
+ * Nothing special needs to be done here.
+ */
+io_return_t
+se_setinput(
+ int dev,
+ ipc_port_t receive_port,
+ int priority,
+ filter_t *filter,
+ natural_t filter_count)
+{
+ return (net_set_filter(&se_softc[dev]->is_if,
+ receive_port, priority,
+ filter, filter_count));
+}
+
+/*
+ * Allocate and initialize a ring descriptor.
+ * Allocates a buffer from the lance memory and writes a descriptor
+ * for that buffer to the host virtual address LNDESC.
+ */
+private volatile long
+*se_desc_alloc (
+ register se_softc_t sc,
+ register se_desc_t lndesc)
+{
+ register vm_offset_t dp; /* data pointer */
+ struct se_desc desc;
+
+ /*
+ * Allocate buffer in lance space
+ */
+ dp = se_malloc(sc, LN_BUFFER_SIZE);
+
+ /*
+ * Build a descriptor pointing to it
+ */
+ desc.addr_low = Addr_lo(Lmem(dp));
+ desc.addr_hi = Addr_hi(Lmem(dp));
+ desc.status = 0;
+ desc.buffer_size = -LN_BUFFER_SIZE;
+ desc.desc4.bits = 0;
+
+ /*
+ * Copy the descriptor to lance space
+ */
+ (se_sw->desc_copyout) ((vm_offset_t)&desc, (vm_offset_t)lndesc, sizeof(desc));
+ wbflush();
+
+ return (volatile long *) Hmem(dp);
+}
+
+/*
+ * Allocate a chunk of lance RAM buffer. Since we never
+ * give lance RAM buffer memory back, we'll just step up the
+ * byte-count on a per-unit basis.
+ *
+ * The return value is an index into the lance memory, which can be
+ * passed with Hmem() and Lmem() to get the host and chip virtual addresses.
+ */
+private vm_offset_t
+se_malloc(
+ se_softc_t sc,
+ int size)
+{
+ register vm_offset_t ret;
+
+ /*
+ * On first call, zero lance memory
+ */
+ if (sc->lnsbrk == 0)
+ (se_sw->bzero) (Hmem(0), LN_MEMORY_SIZE);
+
+ /*
+ * Start out on the first double longword boundary
+ * (this accomodates some machines, with minimal loss)
+ */
+ if (sc->lnsbrk & 0xf)
+ sc->lnsbrk = (sc->lnsbrk + 0x10) & ~0xf;
+
+ ret = sc->lnsbrk;
+ sc->lnsbrk += size;
+
+ if (sc->lnsbrk > LN_MEMORY_SIZE)
+ panic("se_malloc");
+
+ return ret;
+}
+
+#endif NLN > 0
diff --git a/chips/lance.h b/chips/lance.h
new file mode 100644
index 00000000..f799d7e6
--- /dev/null
+++ b/chips/lance.h
@@ -0,0 +1,284 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: if_se.h
+ * Authors: Robert V. Baron and Alessandro Forin
+ * Date: 1989
+ *
+ */
+/*
+ * AMD 7990 "Lance" Ethernet interface definitions.
+ * All structures are as seen from the Lance,
+ * both in the memory-alignment sense and in the
+ * byte-order sense. Mapping to host memory is
+ * model specific: on pmaxen there is a 16 bit gap
+ * every other 16 bits.
+ */
+
+#include <scsi/scsi_endian.h>
+
+/*
+ * Selection of one of the four Lance CSR is done in a
+ * two-step process: select which CSR first by writing
+ * into the RAP, then access the register via the RDP.
+ * Note that (a) the selection remains, and (b) all
+ * but CSR0 can only be accessed when the chip is stopped.
+ * These registers are mapped in the 'registers' I/O segment.
+ */
+#ifndef se_reg_type
+#define se_reg_type unsigned short
+#endif
+typedef volatile se_reg_type *se_reg_t;
+
+#define CSR0_SELECT 0x0 /* Valid RAP selections */
+#define CSR1_SELECT 0x1
+#define CSR2_SELECT 0x2
+#define CSR3_SELECT 0x3
+
+/*
+ * Bit definitions for the CSR0.
+ * Legend:
+ * R=Readable W=Writeable
+ * S=Set-on-write-1 C=Clear-on-write-1
+ */
+
+#define LN_CSR0_INIT 0x0001 /* (RS) Initialize */
+#define LN_CSR0_STRT 0x0002 /* (RS) Start */
+#define LN_CSR0_STOP 0x0004 /* (RS) Stop */
+#define LN_CSR0_TDMD 0x0008 /* (RS) Transmit demand */
+#define LN_CSR0_TXON 0x0010 /* (R) Transmitter enabled */
+#define LN_CSR0_RXON 0x0020 /* (R) Receiver enabled */
+#define LN_CSR0_INEA 0x0040 /* (RW) Interrupt enable */
+#define LN_CSR0_INTR 0x0080 /* (R) Interrupt pending */
+#define LN_CSR0_IDON 0x0100 /* (RC) Initialization done */
+#define LN_CSR0_TINT 0x0200 /* (RC) Transmitter interrupt */
+#define LN_CSR0_RINT 0x0400 /* (RC) Receiver interrupt */
+#define LN_CSR0_MERR 0x0800 /* (RC) Memory error during DMA */
+#define LN_CSR0_MISS 0x1000 /* (RC) No available receive buffers */
+#define LN_CSR0_CERR 0x2000 /* (RC) Signal quality (SQE) test */
+#define LN_CSR0_BABL 0x4000 /* (RC) Babble error: xmit too long */
+#define LN_CSR0_ERR 0x8000 /* (R) Error summary: any of the 4 above */
+
+#define LN_CSR0_WTC 0x7f00 /* Write-to-clear bits */
+
+/*
+ * Bit definitions for the CSR1.
+ */
+
+#define LN_CSR1_MBZ 0x0001 /* Must be zero */
+#define LN_CSR1_IADR 0xfffe /* (RW) Initialization block address (low) */
+
+/*
+ * Bit definitions for the CSR2.
+ */
+
+#define LN_CSR2_IADR 0x00ff /* (RW) Initialization block address (high) */
+#define LN_CSR2_XXXX 0xff00 /* (RW) Reserved */
+
+/*
+ * Bit definitions for the CSR3.
+ */
+
+#define LN_CSR3_BCON 0x0001 /* (RW) BM/HOLD Control */
+#define LN_CSR3_ACON 0x0002 /* (RW) ALE Control */
+#define LN_CSR3_BSWP 0x0004 /* (RW) Byte Swap */
+#define LN_CSR3_XXXX 0xfff8 /* (RW) Reserved */
+
+
+/*
+ * Initialization Block
+ *
+ * Read when the INIT command is sent to the lance.
+ */
+
+struct se_init_block {
+ unsigned short mode; /* Mode Register, see below */
+ unsigned short phys_addr_low; /* Ethernet address */
+ unsigned short phys_addr_med; /* Ethernet address */
+ unsigned short phys_addr_high; /* Ethernet address */
+ unsigned short logical_addr_filter0; /* Multicast filter */
+ unsigned short logical_addr_filter1; /* Multicast filter */
+ unsigned short logical_addr_filter2; /* Multicast filter */
+ unsigned short logical_addr_filter3; /* Multicast filter */
+ unsigned short recv_ring_pointer_lo; /* Receive Ring ptr, low */
+ BITFIELD_3(unsigned char,
+ recv_ring_pointer_hi, /* Receive Ring ptr, high */
+ reserved0 : 5,
+ recv_ring_len : 3); /* Length: log2(nbuffers) */
+ unsigned short xmit_ring_pointer_lo; /* Transmit Ring ptr, low */
+ BITFIELD_3(unsigned char,
+ xmit_ring_pointer_hi, /* Transmit Ring ptr, high */
+ reserved1 : 5,
+ xmit_ring_len : 3); /* Length: log2(nbuffers) */
+};
+
+typedef volatile struct se_init_block *se_init_block_t;
+
+/*
+ * Bit definitions for the MODE word
+ * (Normally set to 0)
+ */
+
+#define LN_MODE_DRX 0x0001 /* Disable Receiver */
+#define LN_MODE_DTX 0x0002 /* Disable Transmitter */
+#define LN_MODE_LOOP 0x0004 /* Loopback mode */
+#define LN_MODE_DTRC 0x0008 /* Disable CRC generation */
+#define LN_MODE_COLL 0x0010 /* Force collision */
+#define LN_MODE_DRTY 0x0020 /* Disable retry */
+#define LN_MODE_INTL 0x0040 /* Internal Loopback mode */
+#define LN_MODE_XXXX 0x7f80 /* Reserved */
+#define LN_MODE_PROM 0x8000 /* Promiscuous mode */
+
+/*
+ * Bit definitions for the ring pointers
+ */
+
+#define LN_RNGP_LOW 0xfffc /* longword aligned */
+
+
+/*
+ * Buffer Descriptors
+ * Legend:
+ * H-set-by-Host C-set-by-chip
+ */
+
+struct se_desc {
+ unsigned short addr_low; /* (H) Buffer pointer low */
+ BITFIELD_2(unsigned char,
+ addr_hi, /* (H) Buffer pointer high */
+ status); /* (HC) Buffer status */
+ unsigned short buffer_size; /* (H) Buffer length (bytes),*/
+ /* bits 15..12 must be ones */
+ union {
+ struct {
+ BITFIELD_2(unsigned short,
+ bcnt : 12, /* (C) Rcvd data size */
+ res : 4); /* Reads as zeroes */
+ } rcv;
+ struct {
+ BITFIELD_2(unsigned short,
+ TDR : 10, /* (C) Time Domain Reflectometry */
+ flg2 : 6); /* (C) Xmit status */
+ } xmt;
+ unsigned short bits;
+ } desc4;
+#define message_size desc4.rcv.bcnt
+#define tdr desc4.xmt.TDR
+#define status2 desc4.xmt.flg2
+};
+
+typedef volatile struct se_desc *se_desc_t;
+
+/*
+ * Bit definition for STATUS byte (receive case)
+ */
+
+#define LN_RSTATE_ENP 0x01 /* (C) End of Packet */
+#define LN_RSTATE_STP 0x02 /* (C) Start of packet */
+#define LN_RSTATE_BUFF 0x04 /* (C) Buffer error */
+#define LN_RSTATE_CRC 0x08 /* (C) CRC error */
+#define LN_RSTATE_OFLO 0x10 /* (C) SILO Overflow */
+#define LN_RSTATE_FRAM 0x20 /* (C) Framing error */
+#define LN_RSTATE_ERR 0x40 /* (C) Error summary */
+#define LN_RSTATE_OWN 0x80 /* (C) Owned by Lance Chip (if set) */
+
+
+/*
+ * Bit definition for STATUS byte (transmit case)
+ */
+
+#define LN_TSTATE_ENP 0x01 /* (H) End of Packet */
+#define LN_TSTATE_STP 0x02 /* (H) Start of packet */
+#define LN_TSTATE_DEF 0x04 /* (C) Deferred */
+#define LN_TSTATE_ONE 0x08 /* (C) Retried exactly once */
+#define LN_TSTATE_MORE 0x10 /* (C) Retried more than once */
+#define LN_TSTATE_XXXX 0x20 /* Reserved */
+#define LN_TSTATE_ERR 0x40 /* (C) Error summary (see status2) */
+#define LN_TSTATE_OWN 0x80 /* (H) Owned by Lance Chip (if set) */
+
+/*
+ * Bit definitions for STATUS2 byte (transmit case)
+ */
+
+#define LN_TSTATE2_RTRY 0x01 /* (C) Failed after 16 retransmissions */
+#define LN_TSTATE2_LCAR 0x02 /* (C) Loss of Carrier */
+#define LN_TSTATE2_LCOL 0x04 /* (C) Late collision */
+#define LN_TSTATE2_XXXX 0x08 /* Reserved */
+#define LN_TSTATE2_UFLO 0x10 /* (C) Underflow (late memory) */
+#define LN_TSTATE2_BUFF 0x20 /* (C) Buffering error (no ENP) */
+
+ /* Errors that disable the transmitter */
+#define LN_TSTATE2_DISABLE (LN_TSTATE2_UFLO|LN_TSTATE2_BUFF|LN_TSTATE2_RTRY)
+
+/*
+ * Other chip characteristics
+ */
+
+#define LN_MINBUF_CH 100 /* Minimum size of first lance buffer, if
+ chaining */
+
+#define LN_MINBUF_NOCH 60 /* Minimum size of a lance buffer, if
+ no chaining and DTCR==1 */
+
+#define LN_MINBUF_NOCH_RAW 64 /* Minimum size of a lance buffer, if
+ no chaining and DTCR==0 */
+
+/*
+ * Information for mapped ether
+ */
+typedef struct mapped_ether_info {
+ volatile unsigned int interrupt_count;
+ /* tot interrupts received */
+ volatile unsigned short saved_csr0;
+ /* copy of csr0 at last intr */
+ unsigned char rom_stride;
+ unsigned char ram_stride;
+ /* rom&ram strides */
+ unsigned buffer_size;
+ /* how much ram for lance */
+ natural_t buffer_physaddr;
+ /* where it is in phys memory */
+ unsigned wait_event;
+} *mapped_ether_info_t;
+
+#ifdef KERNEL
+extern struct se_switch {
+ vm_offset_t regspace;
+ vm_offset_t bufspace;
+ vm_offset_t ln_bufspace;
+ vm_offset_t romspace;
+ short romstride;
+ short ramstride;
+ int ramsize;
+ void (*desc_copyin)( vm_offset_t, vm_offset_t, int);
+ void (*desc_copyout)( vm_offset_t, vm_offset_t, int);
+ void (*data_copyin)( vm_offset_t, vm_offset_t, int);
+ void (*data_copyout)( vm_offset_t, vm_offset_t, int);
+ void (*bzero)( vm_offset_t, int );
+ vm_offset_t (*mapaddr)( vm_offset_t );
+ vm_size_t (*mapoffs)( vm_size_t );
+} *se_sw;
+#endif KERNEL
diff --git a/chips/lance_mapped.c b/chips/lance_mapped.c
new file mode 100644
index 00000000..26096bd9
--- /dev/null
+++ b/chips/lance_mapped.c
@@ -0,0 +1,417 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: if_se_mapped.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 8/90
+ *
+ * In-kernel side of the user-mapped ethernet driver.
+ */
+
+#include <ln.h>
+#if NLN > 0
+#include <platforms.h>
+
+#include <mach/machine/vm_types.h>
+#include <machine/machspl.h> /* spl definitions */
+#include <chips/lance.h>
+#include <chips/busses.h>
+
+#include <device/device_types.h>
+#include <device/errno.h>
+#include <device/io_req.h>
+#include <device/net_status.h>
+#include <device/net_io.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+
+#include <vm/vm_kern.h>
+#include <kern/eventcount.h>
+
+#include <machine/machspl.h>
+
+#ifdef DECSTATION
+
+#define machine_btop mips_btop
+
+#define kvctophys(v) K0SEG_TO_PHYS((v)) /* kernel virtual cached */
+#define phystokvc(p) PHYS_TO_K0SEG((p)) /* and back */
+#define kvutophys(v) K1SEG_TO_PHYS((v)) /* kernel virtual uncached */
+#define phystokvu(p) PHYS_TO_K1SEG((p)) /* and back */
+ /* remap from k2 to k0 */
+#define kvirt(v) ((phystokvc(pmap_extract(pmap_kernel(),v))))
+
+#include <mips/mips_cpu.h>
+/*
+ * Wired addresses and sizes
+ */
+#define SE0_REG_EMRG (se_reg_t)(0xb8000000)
+
+#define REGBASE(unit) (((u_int)SE_statii[unit].registers) - se_sw->regspace)
+
+#define SE_REG_PHYS(unit) kvutophys(REGBASE(unit)+se_sw->regspace)
+#define SE_REG_SIZE PAGE_SIZE
+
+#define SE_BUF_PHYS(unit) kvutophys(REGBASE(unit)+se_sw->bufspace)
+#define SE_BUF_SIZE (128*1024)
+
+#define SE_ADR_PHYS(unit) kvutophys(REGBASE(unit)+se_sw->romspace)
+#define SE_ADR_SIZE PAGE_SIZE
+#endif /*DECSTATION*/
+
+#ifdef VAXSTATION
+#define machine_btop vax_btop
+#endif /*VAXSTATION*/
+
+#ifdef LUNA88K
+# define machine_btop m88k_btop
+# define kvirt(v) (v)
+# define kvctophys(v) pmap_extract(pmap_kernel(),(v))
+# define SE0_REG_EMRG ((se_reg_t)0xF1000000U)
+# define REGBASE(unit) (((u_int)SE_statii[unit].registers) - se_sw->regspace)
+# define SE_REG_PHYS(unit) (REGBASE(unit) + se_sw->regspace)
+# define SE_REG_SIZE PAGE_SIZE
+# define SE_BUF_PHYS(unit) (REGBASE(unit) + se_sw->bufspace)
+# define SE_BUF_SIZE (64*1024)
+# define SE_ADR_PHYS(unit) kvctophys(REGBASE(unit) + se_sw->romspace)
+# define SE_ADR_SIZE PAGE_SIZE
+# define wbflush() /*empty*/
+#endif /*LUNA88K*/
+
+/*
+ * Autoconf info
+ */
+
+static vm_offset_t SEstd[NLN] = { 0 };
+static struct bus_device *SEinfo[NLN];
+ void SE_attach();
+ int SE_probe();
+
+struct bus_driver SEdriver =
+ { SE_probe, 0, SE_attach, 0, SEstd, "SE", SEinfo, };
+
+/*
+ * Externally visible functions
+ */
+int SE_probe(); /* Kernel */
+void SE_intr(), SE_portdeath();
+ /* User */
+int SE_open(), SE_close();
+vm_offset_t SE_mmap();
+
+
+/* forward declarations */
+
+static void SE_stop(unsigned int unit);
+
+/*
+ * Status information for all interfaces
+ */
+/*static*/ struct SE_status {
+ se_reg_t registers;
+ mapped_ether_info_t info;
+ struct evc eventcounter;
+} SE_statii[NLN];
+
+
+/*
+ * Probe the Lance to see (if) that it's there
+ */
+int
+SE_probe(regbase, ui)
+ vm_offset_t regbase;
+ register struct bus_device *ui;
+{
+ int unit = ui->unit;
+ se_reg_t regs;
+ vm_offset_t addr;
+ mapped_ether_info_t info;
+ struct SE_status *self;
+
+
+ if (unit >= NLN)
+ return 0;
+
+ self = &SE_statii[unit];
+
+ printf("[mappable] ");
+
+ regs = (se_reg_t) (regbase + se_sw->regspace);
+ self->registers = regs;
+
+ /*
+ * Reset the interface
+ */
+ SE_stop(unit);
+
+ /*
+ * Grab a page to be mapped later to users
+ */
+ (void) kmem_alloc_wired(kernel_map, &addr, PAGE_SIZE);
+ /*
+ on the decstation, kmem_alloc_wired returns virtual addresses
+ in the k2 seg. Since this page is going to get mapped in
+ user space, we need to transform it to a better understood
+ virtual address. The kvirt function does this.
+ */
+ bzero(addr, PAGE_SIZE);
+ info = (mapped_ether_info_t) kvirt(addr);
+ self->info = info;
+
+ /*
+ * Set permanent info
+ */
+ info->rom_stride = se_sw->romstride;
+ info->ram_stride = se_sw->ramstride;
+ info->buffer_size = se_sw->ramsize;
+ info->buffer_physaddr = se_sw->ln_bufspace;
+
+ /*
+ * Synch setup
+ */
+ evc_init(&self->eventcounter);
+ info->wait_event = self->eventcounter.ev_id;
+
+ return 1;
+}
+
+void
+SE_attach(ui)
+ register struct bus_device *ui;
+{
+}
+
+
+/*
+ * Shut off the lance
+ */
+static void SE_stop(unsigned int unit)
+{
+ register se_reg_t regs = SE_statii[unit].registers;
+
+ if (regs == 0)
+ /* Stray interrupt */
+ regs = SE0_REG_EMRG;
+
+ regs[2] = CSR0_SELECT; /* XXXX rap XXXX */
+ wbflush();
+ regs[0] = LN_CSR0_STOP;
+ wbflush();
+}
+
+
+/*
+ * Ethernet interface interrupt routine
+ */
+void SE_intr(unit,spllevel)
+ int unit;
+ spl_t spllevel;
+{
+ register struct SE_status *self = &SE_statii[unit];
+ register se_reg_t regs = self->registers;
+ register csr;
+
+ if (regs == 0) { /* stray */
+ SE_stop(unit);
+ return;
+ }
+
+ /* Acknowledge interrupt request, drop spurious intr */
+ csr = regs[0];
+ if ((csr & LN_CSR0_INTR) == 0)
+ return;
+ regs[0] = csr & LN_CSR0_WTC; /* silence it */
+
+ splx(spllevel); /* drop priority now */
+
+ /* Pass csr state up to user thread */
+ if (self->info) {
+ self->info->interrupt_count++; /* total interrupts */
+ self->info->saved_csr0 = csr;
+ }
+
+ /* Awake user thread */
+ evc_signal(&self->eventcounter);
+}
+
+
+extern boolean_t se_use_mapped_interface[NLN];
+
+/*
+ * Device open procedure
+ */
+int SE_open(dev, flag, ior)
+ io_req_t ior;
+{
+ int unit = dev;
+ register struct SE_status *self = &SE_statii[unit];
+
+ if (unit >= NLN)
+ return EINVAL;
+
+ /*
+ * Silence interface, just in case
+ */
+ SE_stop(unit);
+
+ /*
+ * Reset eventcounter
+ */
+ evc_signal(&self->eventcounter);
+
+ se_use_mapped_interface[unit] = 1;
+
+ /*
+ * Do not turn Ether interrupts on. The user can do it when ready
+ * to take them.
+ */
+
+ return 0;
+}
+
+/*
+ * Device close procedure
+ */
+int SE_close(dev, flag)
+{
+ int unit = dev;
+ register struct SE_status *self = &SE_statii[unit];
+
+ if (unit >= NLN)
+ return EINVAL;
+
+ /*
+ * Silence interface, in case user forgot
+ */
+ SE_stop(unit);
+ evc_signal(&self->eventcounter);
+
+ se_normal(unit);
+
+ return 0;
+}
+
+
+/*
+ * Get status procedure.
+ * We need to tell that we are mappable.
+ */
+io_return_t
+SE_get_status(ifp, flavor, status, status_count)
+/* struct ifnet *ifp; not really..*/
+ int flavor;
+ dev_status_t status; /* pointer to OUT array */
+ unsigned int *status_count; /* OUT */
+{
+ switch (flavor) {
+ case NET_STATUS:
+ {
+ register struct net_status *ns = (struct net_status *)status;
+
+ ns->min_packet_size = sizeof(struct ether_header);
+ ns->max_packet_size = sizeof(struct ether_header) + ETHERMTU;
+ ns->header_format = HDR_ETHERNET;
+ ns->header_size = sizeof(struct ether_header);
+ ns->address_size = 6;
+ ns->flags = IFF_BROADCAST;
+ ns->mapped_size = SE_BUF_SIZE + (3 * PAGE_SIZE);
+
+ *status_count = NET_STATUS_COUNT;
+ break;
+ }
+/* case NET_ADDRESS: find it yourself */
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+
+/*
+ * Should not refuse this either
+ */
+int SE_set_status(dev, flavor, status, status_count)
+ int dev;
+ int flavor;
+ dev_status_t status;
+ unsigned int status_count;
+{
+ return (D_SUCCESS);
+}
+
+/*
+ * Port death notification routine
+ */
+void SE_portdeath(dev, dead_port)
+{
+}
+
+
+/*
+ * Virtual->physical mapping routine.
+ */
+vm_offset_t
+SE_mmap(dev, off, prot)
+ int dev;
+ vm_offset_t off;
+ vm_prot_t prot;
+{
+ vm_offset_t page;
+ vm_offset_t addr;
+ int unit = dev;
+
+ /*
+ * The offset (into the VM object) defines the following layout
+ *
+ * off size what
+ * 0 1pg mapping information (csr & #interrupts)
+ * 1pg 1pg lance registers
+ * 2pg 1pg lance station address (ROM)
+ * 3pg 128k lance buffers
+ */
+#define S0 PAGE_SIZE
+#define S1 (S0+SE_REG_SIZE)
+#define S2 (S1+SE_ADR_SIZE)
+#define S3 (S2+SE_BUF_SIZE)
+
+ if (off < S0) {
+ addr = kvctophys (SE_statii[unit].info);
+ } else if (off < S1) {
+ addr = (vm_offset_t) SE_REG_PHYS(unit);
+ off -= S0;
+ } else if (off < S2) {
+ addr = (vm_offset_t) SE_ADR_PHYS(unit);
+ off -= S1;
+ } else if (off < S3) {
+ addr = (vm_offset_t) SE_BUF_PHYS(unit);
+ off -= S2;
+ } else
+ return (EINVAL);
+
+ page = machine_btop(addr + off);
+ return (page);
+}
+
+#endif NLN > 0
diff --git a/chips/lk201.c b/chips/lk201.c
new file mode 100644
index 00000000..d11b1e97
--- /dev/null
+++ b/chips/lk201.c
@@ -0,0 +1,695 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: lk201.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Routines for the LK201 Keyboard Driver
+ */
+
+#include <lk.h>
+#if NLK > 0
+#include <bm.h>
+
+#include <mach_kdb.h>
+
+#include <mach/std_types.h>
+#include <device/device_types.h>
+#include <machine/machspl.h> /* spl definitions */
+#include <sys/ioctl.h>
+#include <machine/machspl.h>
+#include <chips/lk201.h>
+#include <chips/serial_defs.h>
+#include <chips/screen_defs.h>
+
+
+/*
+ * Structures describing the keyboard status
+ */
+typedef struct {
+ unsigned short kbd_flags;
+ unsigned short kbd_previous;
+ char kbd_gen_shift;
+ char kbd_ctrl;
+ char kbd_lock;
+ char kbd_meta;
+ char kbd_shift;
+} lk201_kbd_state_t;
+
+#define mapLOCKtoCNTRL 0x1 /* flags */
+
+typedef struct {
+ char led_active;
+ char led_pattern;
+ char led_increasing;
+ char led_lights;
+ int led_interval;
+ int led_light_count;
+} lk201_led_state_t;
+
+
+/*
+ * Keyboard state
+ */
+
+struct lk201_softc {
+ lk201_kbd_state_t kbd;
+ lk201_led_state_t led;
+ int sl_unit;
+} lk201_softc_data[NLK];
+
+typedef struct lk201_softc *lk201_softc_t;
+
+lk201_softc_t lk201_softc[NLK];
+
+/*
+ * Forward decls
+ */
+io_return_t
+lk201_translation(
+ int key,
+ char c,
+ int tabcode );
+
+int
+lk201_input(
+ int unit,
+ unsigned short data);
+
+
+/*
+ * Autoconf (sort-of)
+ */
+lk201_probe(
+ int unit)
+{
+ lk201_softc[unit] = &lk201_softc_data[unit];
+ return 1;
+}
+
+void lk201_attach(
+ int unit,
+ int sl_unit)
+{
+ lk201_softc[unit]->sl_unit = sl_unit;
+ lk201_selftest(unit);
+}
+
+/*
+ * Keyboard initialization
+ */
+
+static unsigned char lk201_reset_string[] = {
+ LK_CMD_LEDS_ON, LK_PARAM_LED_MASK(0xf), /* show we are resetting */
+ LK_CMD_SET_DEFAULTS,
+ LK_CMD_MODE(LK_MODE_RPT_DOWN,1),
+ LK_CMD_MODE(LK_MODE_RPT_DOWN,2),
+ LK_CMD_MODE(LK_MODE_RPT_DOWN,3),
+ LK_CMD_MODE(LK_MODE_RPT_DOWN,4),
+ LK_CMD_MODE(LK_MODE_DOWN_UP,5),
+ LK_CMD_MODE(LK_MODE_DOWN_UP,6),
+ LK_CMD_MODE(LK_MODE_RPT_DOWN,7),
+ LK_CMD_MODE(LK_MODE_RPT_DOWN,8),
+ LK_CMD_MODE(LK_MODE_RPT_DOWN,9),
+ LK_CMD_MODE(LK_MODE_RPT_DOWN,10),
+ LK_CMD_MODE(LK_MODE_RPT_DOWN,11),
+ LK_CMD_MODE(LK_MODE_RPT_DOWN,12),
+ LK_CMD_MODE(LK_MODE_DOWN,13),
+ LK_CMD_MODE(LK_MODE_RPT_DOWN,14),
+ LK_CMD_ENB_RPT,
+/* LK_CMD_ENB_KEYCLK, LK_PARAM_VOLUME(4), */
+ LK_CMD_DIS_KEYCLK,
+ LK_CMD_RESUME,
+ LK_CMD_ENB_BELL, LK_PARAM_VOLUME(4),
+ LK_CMD_LEDS_OFF, LK_PARAM_LED_MASK(0xf)
+};
+
+void
+lk201_reset(
+ int unit)
+{
+ register int i, sl;
+ register spl_t s;
+ lk201_softc_t lk;
+
+ lk = lk201_softc[unit];
+ sl = lk->sl_unit;
+ s = spltty();
+ for (i = 0; i < sizeof(lk201_reset_string); i++) {
+ (*console_putc)(sl,
+ SCREEN_LINE_KEYBOARD,
+ lk201_reset_string[i]);
+ delay(100);
+ }
+ /* zero any state associated with previous keypresses */
+ bzero(lk, sizeof(*lk));
+ lk->sl_unit = sl;
+ splx(s);
+}
+
+lk201_selftest(
+ int unit)
+{
+ int messg[4], sl;
+ spl_t s;
+
+ sl = lk201_softc[unit]->sl_unit;
+ s = spltty();
+ (*console_putc)(sl, SCREEN_LINE_KEYBOARD, LK_CMD_REQ_ID);
+ delay(10000);/* arbitrary */
+ messg[0] = (*console_getc)(sl, SCREEN_LINE_KEYBOARD, TRUE, TRUE);
+ messg[1] = (*console_getc)(sl, SCREEN_LINE_KEYBOARD, TRUE, TRUE);
+ splx(s);
+
+ printf("( lk201 id %x.%x", messg[0], messg[1]);
+
+ s = spltty();
+ (*console_putc)(sl, SCREEN_LINE_KEYBOARD, LK_CMD_POWER_UP);
+
+ /* cannot do this, waiting too long might cause receiver overruns */
+/* delay(80000);/* spec says 70 msecs or less */
+
+ messg[0] = (*console_getc)(sl, SCREEN_LINE_KEYBOARD, TRUE, TRUE);
+ messg[1] = (*console_getc)(sl, SCREEN_LINE_KEYBOARD, TRUE, TRUE);
+ messg[2] = (*console_getc)(sl, SCREEN_LINE_KEYBOARD, TRUE, TRUE);
+ messg[3] = (*console_getc)(sl, SCREEN_LINE_KEYBOARD, TRUE, TRUE);
+ splx(s);
+
+ printf(", self-test ");
+ if (messg[0] != 0x01 || messg[1] || messg[2] || messg[3])
+ printf("bad [%x %x %x %x]",
+ messg[0], messg[1], messg[2], messg[3]);
+ else
+ printf("ok )");
+
+ lk201_reset(unit);
+}
+
+/*
+ * Tinkerbell
+ */
+void
+lk201_ring_bell(
+ int unit)
+{
+ spl_t s = spltty();
+ (*console_putc)(lk201_softc[unit]->sl_unit, SCREEN_LINE_KEYBOARD, LK_CMD_BELL);
+ splx(s);
+}
+
+/*
+ * Here is your LED toy, Bob
+ */
+void
+lk201_lights(
+ int unit,
+ boolean_t on)
+{
+ unsigned int sl;
+ spl_t s;
+
+ sl = lk201_softc[unit]->sl_unit;
+ s = spltty();
+ (*console_putc)(sl, SCREEN_LINE_KEYBOARD, LK_CMD_LEDS_OFF);
+ (*console_putc)(sl, SCREEN_LINE_KEYBOARD, LK_PARAM_LED_MASK(0xf));
+ if (on < 16 && on > 0) {
+ (*console_putc)(sl, SCREEN_LINE_KEYBOARD, LK_CMD_LEDS_ON);
+ (*console_putc)(sl, SCREEN_LINE_KEYBOARD, LK_PARAM_LED_MASK(on));
+ }
+ splx(s);
+}
+
+
+lk201_led(
+ int unit)
+{
+ lk201_led_state_t *leds = &lk201_softc[unit]->led;
+ unsigned int sl;
+ spl_t s;
+
+ sl = lk201_softc[unit]->sl_unit;
+ if (leds->led_interval) { /* leds are on */
+ if (leds->led_light_count <= 0) { /* hit this lights */
+
+ if (leds->led_lights <= 0) leds->led_lights= 1; /* sanity */
+ if (leds->led_lights > 16) leds->led_lights = 16;/* sanity */
+ leds->led_light_count = leds->led_interval; /* reset */
+ s = spltty();
+ (*console_putc)(sl, SCREEN_LINE_KEYBOARD, LK_CMD_LEDS_OFF);
+ (*console_putc)(sl, SCREEN_LINE_KEYBOARD, LK_PARAM_LED_MASK(leds->led_lights));
+ switch (leds->led_pattern) {
+ case LED_OFF:
+ leds->led_interval = 0; /* since you can now set */
+ break; /* the interval even if off */
+ case LED_COUNT:
+ leds->led_lights++;
+ if (leds->led_lights > 16) leds->led_lights = 1;
+ break;
+ case LED_ROTATE:
+ leds->led_lights <<= 1;
+ if (leds->led_lights > 8) leds->led_lights = 1;
+ break;
+ case LED_CYLON:
+ if (leds->led_increasing) {
+ leds->led_lights <<= 1;
+ if (leds->led_lights > 8) {
+ leds->led_lights >>= 2;
+ leds->led_increasing = 0;
+ }
+ } else {
+ leds->led_lights >>= 1;
+ if (leds->led_lights <= 0) {
+ leds->led_lights = 2;
+ leds->led_increasing = 1;
+ }
+ }
+ break;
+ }
+ (*console_putc)( sl, SCREEN_LINE_KEYBOARD, LK_CMD_LEDS_ON);
+ (*console_putc)( sl, SCREEN_LINE_KEYBOARD, LK_PARAM_LED_MASK(leds->led_lights));
+ splx(s);
+ }
+ leds->led_light_count--;
+ } else {
+ if (leds->led_lights) {
+ s = spltty();
+ (*console_putc)(sl, SCREEN_LINE_KEYBOARD, LK_CMD_LEDS_OFF);
+ (*console_putc)(sl, SCREEN_LINE_KEYBOARD, LK_PARAM_LED_MASK(0xf));
+ leds->led_lights = 0;
+ splx(s);
+ }
+ leds->led_active = 0;
+#if NBM > 0
+ screen_enable_vretrace(unit, 0); /* interrupts off */
+#endif
+ }
+}
+
+
+/*
+ * Special user-visible ops
+ */
+io_return_t
+lk201_set_status(
+ int unit,
+ dev_flavor_t flavor,
+ dev_status_t status,
+ natural_t status_count)
+{
+ lk201_led_state_t *leds = &lk201_softc[unit]->led;
+ lk201_kbd_state_t *kbd = &lk201_softc[unit]->kbd;
+
+ switch( flavor ) {
+ case LK201_SEND_CMD:{
+ register lk201_cmd_t *cmd = (lk201_cmd_t*)status;
+ unsigned int cnt, s, sl;
+
+ if ((status_count < (sizeof(*cmd)/sizeof(int))) ||
+ ((cnt = cmd->len) > 2))
+ return D_INVALID_SIZE;
+
+ if (cnt == 0)
+ cmd->command |= LK_PARAM;
+ else
+ cmd->params[cnt-1] |= LK_PARAM;
+ sl = lk201_softc[unit]->sl_unit;
+ s = spltty();
+ (*console_putc)(sl, SCREEN_LINE_KEYBOARD, cmd->command);
+ if (cnt > 0)
+ (*console_putc)(sl, SCREEN_LINE_KEYBOARD, cmd->params[0]);
+ if (cnt > 1)
+ (*console_putc)(sl, SCREEN_LINE_KEYBOARD, cmd->params[1]);
+ splx(s);
+ return D_SUCCESS;
+ }
+ case LK201_LED_PATTERN:{
+ register int ptn = * (int *) status;
+ if (ptn != LED_OFF && ptn != LED_COUNT &&
+ ptn != LED_ROTATE && ptn != LED_CYLON ) {
+ return -1;
+ } else {
+ leds->led_pattern = ptn;
+ }
+ break;
+ }
+ case LK201_LED_INTERVAL:{
+ register int lcnt = * (int *) status;
+ if (lcnt < 0)
+ lcnt = 1;
+ leds->led_interval = lcnt;
+ break;
+ }
+ case LK201_mapLOCKtoCNTRL:{
+ boolean_t enable = * (boolean_t*) status;
+ if (enable)
+ kbd->kbd_flags |= mapLOCKtoCNTRL;
+ else
+ kbd->kbd_flags &= ~mapLOCKtoCNTRL;
+ return D_SUCCESS;
+ }
+ case LK201_REMAP_KEY:{
+ register KeyMap *k = (KeyMap *) status;
+ int mode;
+
+ if (status_count < (sizeof(KeyMap)/sizeof(int)))
+ return D_INVALID_SIZE;
+
+ mode = k->shifted ? 1 : 0;
+ if (k->meta) mode += 2;
+ return lk201_translation( k->in_keyval,
+ k->out_keyval,
+ mode );
+ }
+ default:
+ return D_INVALID_OPERATION;
+ }
+ leds->led_lights = 1;
+ leds->led_active = 1;
+#if NBM > 0
+ screen_enable_vretrace(unit, 1); /* interrupts on */
+#endif
+ return D_SUCCESS;
+}
+
+/*
+ * Keycode translation tables
+ *
+ * NOTE: these tables have been compressed a little bit
+ * because the lk201 cannot generate very small codes.
+ */
+
+unsigned char lk201_xlate_key[] = {
+ /* 86 */ 0 ,0
+ /* 88 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 96 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 104 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 112 */ ,0 ,0x1b ,0x08 ,'\n' ,0 ,0 ,0 ,0
+ /* 120 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 128 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 136 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 144 */ ,0 ,0 ,'0',0 ,'.','\r','1','2'
+ /* 152 */ ,'3','4','5','6',',','7','8','9'
+ /* 160 */ ,'-',0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 168 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 176 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 184 */ ,0 ,0 ,0 ,0 ,0x7f,'\r','\t','`'
+ /* 192 */ ,'1' ,'q' ,'a' ,'z' ,0 ,'2' ,'w' ,'s'
+ /* 200 */ ,'x' ,'<' ,0 ,'3' ,'e' ,'d' ,'c' ,0
+ /* 208 */ ,'4' ,'r' ,'f' ,'v' ,' ' ,0 ,'5' ,'t'
+ /* 216 */ ,'g' ,'b' ,0 ,'6' ,'y' ,'h' ,'n' ,0
+ /* 224 */ ,'7' ,'u' ,'j' ,'m' ,0 ,'8' ,'i' ,'k'
+ /* 232 */ ,',' ,0 ,'9' ,'o' ,'l' ,'.' ,0 ,'0'
+ /* 240 */ ,'p' ,0 ,';' ,'/' ,0 ,'=' ,']' ,'\\'
+ /* 248 */ ,0 ,'-' ,'[' ,'\'' ,0 ,0 ,0 ,0
+};
+
+unsigned char lk201_xlate_shifted[] = {
+ /* 86 */ 0 ,0
+ /* 88 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 96 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 104 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 112 */ ,0 ,0x1b ,0x08 ,'\n' ,0 ,0 ,0 ,0
+ /* 120 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 128 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 136 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 144 */ ,0 ,0 ,'0',0 ,'.','\r','1','2'
+ /* 152 */ ,'3','4','5','6',',','7','8','9'
+ /* 160 */ ,'-',0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 168 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 176 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 184 */ ,0 ,0 ,0 ,0 ,0x7f ,'\r' ,'\t' ,'~'
+ /* 192 */ ,'!' ,'Q' ,'A' ,'Z' ,0 ,'@' ,'W' ,'S'
+ /* 200 */ ,'X' ,'>' ,0 ,'#' ,'E' ,'D' ,'C' ,0
+ /* 208 */ ,'$' ,'R' ,'F' ,'V' ,' ' ,0 ,'%' ,'T'
+ /* 216 */ ,'G' ,'B' ,0 ,'^' ,'Y' ,'H' ,'N' ,0
+ /* 224 */ ,'&' ,'U' ,'J' ,'M' ,0 ,'*' ,'I' ,'K'
+ /* 232 */ ,'<' ,0 ,'(' ,'O' ,'L' ,'>' ,0 ,')'
+ /* 240 */ ,'P' ,0 ,':' ,'?' ,0 ,'+' ,'}' ,'|'
+ /* 248 */ ,0 ,'_' ,'{' ,'"' ,0 ,0 ,0 ,0
+};
+
+unsigned char lk201_xlate_meta[] = {
+ /* 86 */ 0 ,0
+ /* 88 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 96 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 104 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 112 */ ,0 ,0x1b ,0x08 ,'\n' ,0 ,0 ,0 ,0
+ /* 120 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 128 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 136 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 144 */ ,0 ,0 ,'0',0 ,'.','\r','1','2'
+ /* 152 */ ,'3','4','5','6',',','7','8','9'
+ /* 160 */ ,'-',0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 168 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 176 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 184 */ ,0 ,0 ,0 ,0 ,0x7f ,'\r' ,'\t' ,'~'
+ /* 192 */ ,'!' ,'Q' ,'A' ,'Z' ,0 ,'@' ,'W' ,'S'
+ /* 200 */ ,'X' ,'>' ,0 ,'#' ,'E' ,'D' ,'C' ,0
+ /* 208 */ ,'$' ,'R' ,'F' ,'V' ,' ' ,0 ,'%' ,'T'
+ /* 216 */ ,'G' ,'B' ,0 ,'^' ,'Y' ,'H' ,'N' ,0
+ /* 224 */ ,'&' ,'U' ,'J' ,'M' ,0 ,'*' ,'I' ,'K'
+ /* 232 */ ,'<' ,0 ,'(' ,'O' ,'L' ,'>' ,0 ,')'
+ /* 240 */ ,'P' ,0 ,':' ,'?' ,0 ,'+' ,'}' ,'|'
+ /* 248 */ ,0 ,'_' ,'{' ,'"' ,0 ,0 ,0 ,0
+};
+
+unsigned char lk201_xlate_shifted_meta[] = {
+ /* 86 */ 0 ,0
+ /* 88 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 96 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 104 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 112 */ ,0 ,0x1b ,0x08 ,'\n' ,0 ,0 ,0 ,0
+ /* 120 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 128 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 136 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 144 */ ,0 ,0 ,'0',0 ,'.','\r','1','2'
+ /* 152 */ ,'3','4','5','6',',','7','8','9'
+ /* 160 */ ,'-',0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 168 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 176 */ ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0
+ /* 184 */ ,0 ,0 ,0 ,0 ,0x7f ,'\r' ,'\t' ,'~'
+ /* 192 */ ,'!' ,'Q' ,'A' ,'Z' ,0 ,'@' ,'W' ,'S'
+ /* 200 */ ,'X' ,'>' ,0 ,'#' ,'E' ,'D' ,'C' ,0
+ /* 208 */ ,'$' ,'R' ,'F' ,'V' ,' ' ,0 ,'%' ,'T'
+ /* 216 */ ,'G' ,'B' ,0 ,'^' ,'Y' ,'H' ,'N' ,0
+ /* 224 */ ,'&' ,'U' ,'J' ,'M' ,0 ,'*' ,'I' ,'K'
+ /* 232 */ ,'<' ,0 ,'(' ,'O' ,'L' ,'>' ,0 ,')'
+ /* 240 */ ,'P' ,0 ,':' ,'?' ,0 ,'+' ,'}' ,'|'
+ /* 248 */ ,0 ,'_' ,'{' ,'"' ,0 ,0 ,0 ,0
+};
+
+
+io_return_t
+lk201_translation(
+ int key,
+ char c,
+ int tabcode )
+{
+ unsigned char *table;
+
+ if ((key &= 0xff) < LK_MINCODE)
+ return D_INVALID_OPERATION;
+
+ switch (tabcode) {
+ case 3:
+ table = lk201_xlate_shifted_meta;
+ break;
+ case 2:
+ table = lk201_xlate_meta;
+ break;
+ case 1:
+ table = lk201_xlate_shifted;
+ break;
+ case 0:
+ default:
+ table = lk201_xlate_key;
+ break;
+ }
+ table[key - LK_MINCODE] = c;
+ return D_SUCCESS;
+}
+
+/*
+ * Input character processing
+ */
+
+lk201_rint(
+ int unit,
+ unsigned short data,
+ boolean_t handle_shift,
+ boolean_t from_kernel)
+{
+ int c;
+ lk201_kbd_state_t *kbd = &lk201_softc[unit]->kbd;
+
+ /*
+ * Keyboard touched, clean char to 8 bits.
+ */
+#if NBM > 0
+ ssaver_bump(unit);
+#endif
+
+ data &= 0xff;
+
+ /* Translate keycode into ASCII */
+ if ((c = lk201_input(unit, data)) == -1)
+ return -1;
+
+#if NBM > 0
+ /*
+ * Notify X, unless we are called from inside kernel
+ */
+ if (!from_kernel &&
+ screen_keypress_event(unit, DEV_KEYBD, data, EVT_BUTTON_RAW))
+ return -1;
+#endif
+
+ /* Handle shifting if need to */
+ if (kbd->kbd_gen_shift)
+ return (handle_shift) ? cngetc() : -1;
+
+ return c;
+}
+
+/*
+ * Routine to grock a character from LK201
+ */
+#if MACH_KDB
+int lk201_allow_kdb = 1;
+#endif
+
+int lk201_debug = 0;
+
+lk201_input(
+ int unit,
+ unsigned short data)
+{
+ int c, sl;
+ lk201_kbd_state_t *kbd = &lk201_softc[unit]->kbd;
+
+ kbd->kbd_gen_shift = 0;
+
+#if MACH_KDB
+ if (lk201_allow_kdb && (data == LK_DO)) {
+ kdb_kintr();
+ return -2;
+ }
+#endif
+
+ /*
+ * Sanity checks
+ */
+
+ if (data == LK_INPUT_ERR || data == LK_OUTPUT_ERR) {
+ printf(" Keyboard error, code = %x\n",data);
+ return -1;
+ }
+ if (data < LK_MINCODE)
+ return -1;
+
+ /*
+ * Check special keys: shifts, ups, ..
+ */
+
+ if (data == LK_LOCK && (kbd->kbd_flags&mapLOCKtoCNTRL))
+ data = LK_CNTRL;
+
+ switch (data) {
+ case LK_LOCK:
+ kbd->kbd_lock ^= 1;
+ kbd->kbd_gen_shift = 1;
+ sl = lk201_softc[unit]->sl_unit;
+ /* called from interrupt, no need for spl */
+ if (kbd->kbd_lock)
+ (*console_putc)(sl, SCREEN_LINE_KEYBOARD, LK_CMD_LEDS_ON);
+ else
+ (*console_putc)(sl,SCREEN_LINE_KEYBOARD, LK_CMD_LEDS_OFF);
+ (*console_putc)(sl, SCREEN_LINE_KEYBOARD, LK_PARAM_LED_MASK(0x4));
+ return 0;
+
+ case LK_ALT:
+ case LK_L_ALT:
+ case LK_R_ALT:
+ case LK_R_COMPOSE:
+ kbd->kbd_meta ^= 1;
+ kbd->kbd_gen_shift = 1;
+ return 0;
+
+ case LK_SHIFT:
+ case LK_R_SHIFT:
+ kbd->kbd_shift ^= 1;
+ kbd->kbd_gen_shift = 1;
+ return 0;
+
+ case LK_CNTRL:
+ kbd->kbd_ctrl ^= 1;
+ kbd->kbd_gen_shift = 1;
+ return 0;
+
+ case LK_ALLUP:
+ kbd->kbd_ctrl = 0;
+ kbd->kbd_shift = 0;
+ kbd->kbd_meta = 0;
+ kbd->kbd_gen_shift = 1;
+ return 0;
+
+ case LK_REPEAT:
+ c = kbd->kbd_previous;
+ break;
+
+ default:
+
+ /*
+ * Do the key translation to ASCII
+ */
+ if (kbd->kbd_ctrl || kbd->kbd_lock || kbd->kbd_shift) {
+ c = ((kbd->kbd_meta) ?
+ lk201_xlate_shifted_meta : lk201_xlate_shifted)
+ [data - LK_MINCODE];
+ if (kbd->kbd_ctrl)
+ c &= 0x1f;
+ } else
+ c = ((kbd->kbd_meta) ?
+ lk201_xlate_meta : lk201_xlate_key)
+ [data-LK_MINCODE];
+ break;
+
+ }
+
+ kbd->kbd_previous = c;
+
+ /*
+ * DEBUG code DEBUG
+ */
+ if (lk201_debug && (c == 0)) {
+ printf("lk201: [%x]\n", data);
+ }
+
+ return c;
+}
+
+#endif /* NLK > 0 */
diff --git a/chips/lk201.h b/chips/lk201.h
new file mode 100644
index 00000000..5c7bbba0
--- /dev/null
+++ b/chips/lk201.h
@@ -0,0 +1,241 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: lk201.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Definitions for the LK201 Keyboard Driver
+ */
+
+/*
+ * KeyCodes generated by the LK201 board
+ * (labels as in the lk201-AA USA model)
+ */
+
+#define LK_MINCODE 0x56
+ /* Function keys */
+#define LK_F1 0x56 /* div 10 */
+#define LK_F2 0x57
+#define LK_F3 0x58
+#define LK_F4 0x59
+#define LK_F5 0x5a
+#define LK_F6 0x64 /* div 11 */
+#define LK_F7 0x65
+#define LK_F8 0x66
+#define LK_F9 0x67
+#define LK_F10 0x68
+#define LK_F11 0x71 /* div 12 */
+#define LK_ESC LK_F11
+#define LK_F12 0x72
+#define LK_BS LK_F12
+#define LK_F13 0x73
+#define LK_LF LK_F13
+#define LK_F14 0x74
+#define LK_HELP 0x7c /* div 13 */
+#define LK_DO 0x7d
+#define LK_F17 0x80 /* div 14 */
+#define LK_F18 0x81
+#define LK_F19 0x82
+#define LK_F20 0x83
+ /* Editing keys */
+#define LK_FIND 0x8a /* div 9 */
+#define LK_INSERT 0x8b
+#define LK_REMOVE 0x8c
+#define LK_SELECT 0x8d
+#define LK_PREV_SCREEN 0x8e
+#define LK_NEXT_SCREEN 0x8f
+ /* Numeric keypad */
+#define LK_KP_0 0x92 /* div 2 */
+#define LK_KP_DOT 0x94
+#define LK_KP_ENTER 0x95
+#define LK_KP_1 0x96
+#define LK_KP_2 0x97
+#define LK_KP_3 0x98
+#define LK_KP_4 0x99
+#define LK_KP_5 0x9a
+#define LK_KP_6 0x9b
+#define LK_KP_COMMA 0x9c
+#define LK_KP_7 0x9d
+#define LK_KP_8 0x9e
+#define LK_KP_9 0x9f
+#define LK_KP_MINUS 0xa0
+#define LK_KP_PF1 0xa1
+#define LK_KP_PF2 0xa2
+#define LK_KP_PF3 0xa3
+#define LK_KP_PF4 0xa4
+ /* Cursor keys */
+#define LK_LEFT 0xa7 /* div 7 */
+#define LK_RIGHT 0xa8
+#define LK_DOWN 0xa9 /* div 8 */
+#define LK_UP 0xaa
+ /* Shift & Co. */
+#define LK_R_SHIFT 0xab /* lk401 */
+#define LK_L_ALT 0xac /* lk401 */
+#define LK_R_COMPOSE 0xad /* lk401 */
+#define LK_SHIFT 0xae /* div 6 */
+#define LK_CNTRL 0xaf
+#define LK_LOCK 0xb0 /* div 5 */
+#define LK_ALT 0xb1
+#define LK_R_ALT 0xb2 /* lk401 */
+ /* Special codes */
+#define LK_ALLUP 0xb3
+#define LK_REPEAT 0xb4
+#define LK_OUTPUT_ERR 0xb5
+#define LK_INPUT_ERR 0xb6
+#define LK_KDBLOCK_ACK 0xb7
+#define LK_TESTMODE_ACK 0xb8
+#define LK_PFX_KEYDOWN 0xb9
+#define LK_MODECHG_ACK 0xba
+ /* Delete & Co. */
+#define LK_DEL 0xbc /* div 3 */
+#define LK_RETURN 0xbd /* div 4 */
+#define LK_TAB 0xbe
+ /* Graphic keys */
+
+#define LK_TILDE 0xbf /* div 2 */
+#define LK_1 0xc0
+#define LK_Q 0xc1
+#define LK_A 0xc2
+#define LK_Z 0xc3
+#define LK_2 0xc5
+#define LK_W 0xc6
+#define LK_S 0xc7
+#define LK_X 0xc8
+#define LK_LESSGRT 0xc9
+#define LK_3 0xcb
+#define LK_E 0xcc
+#define LK_D 0xcd
+#define LK_C 0xce
+#define LK_4 0xd0
+#define LK_R 0xd1
+#define LK_F 0xd2
+#define LK_V 0xd3
+#define LK_SP 0xd4
+#define LK_5 0xd6
+#define LK_T 0xd7
+#define LK_G 0xd8
+#define LK_B 0xd9
+#define LK_6 0xdb
+#define LK_Y 0xdc
+#define LK_H 0xdd
+#define LK_N 0xde
+#define LK_7 0xe0 /* div 1 */
+#define LK_U 0xe1
+#define LK_J 0xe2
+#define LK_M 0xe3
+#define LK_8 0xe5
+#define LK_I 0xe6
+#define LK_K 0xe7
+#define LK_DQUOTE 0xe8
+#define LK_9 0xea
+#define LK_O 0xeb
+#define LK_L 0xec
+#define LK_DOT 0xed
+#define LK_0 0xef
+#define LK_P 0xf0
+#define LK_COLON 0xf2
+#define LK_QMARK 0xf3
+#define LK_PLUS 0xf5
+#define LK_RBRACKET 0xf6
+#define LK_VBAR 0xf7
+#define LK_MINUS 0xf9
+#define LK_LBRACKET 0xfa
+#define LK_COMMA 0xfb
+
+
+/*
+ * Commands to the keyboard processor
+ */
+
+#define LK_PARAM 0x80 /* start/end parameter list */
+
+#define LK_CMD_RESUME 0x8b
+#define LK_CMD_INHIBIT 0xb9
+#define LK_CMD_LEDS_ON 0x13 /* 1 param: led bitmask */
+#define LK_CMD_LEDS_OFF 0x11 /* 1 param: led bitmask */
+#define LK_CMD_DIS_KEYCLK 0x99
+#define LK_CMD_ENB_KEYCLK 0x1b /* 1 param: volume */
+#define LK_CMD_DIS_CTLCLK 0xb9
+#define LK_CMD_ENB_CTLCLK 0xbb
+#define LK_CMD_SOUND_CLK 0x9f
+#define LK_CMD_DIS_BELL 0xa1
+#define LK_CMD_ENB_BELL 0x23 /* 1 param: volume */
+#define LK_CMD_BELL 0xa7
+#define LK_CMD_TMP_NORPT 0xc1
+#define LK_CMD_ENB_RPT 0xe3
+#define LK_CMD_DIS_RPT 0xe1
+#define LK_CMD_RPT_TO_DOWN 0xd9
+#define LK_CMD_REQ_ID 0xab
+#define LK_CMD_POWER_UP 0xfd
+#define LK_CMD_TEST_MODE 0xcb
+#define LK_CMD_SET_DEFAULTS 0xd3
+
+/* there are 4 leds, represent them in the low 4 bits of a byte */
+#define LK_PARAM_LED_MASK(ledbmap) (LK_PARAM|(ledbmap))
+
+/* max volume is 0, lowest is 0x7 */
+#define LK_PARAM_VOLUME(v) (LK_PARAM|((v)&0x7))
+
+/* mode set command(s) details */
+#define LK_MODE_DOWN 0x0
+#define LK_MODE_RPT_DOWN 0x2
+#define LK_MODE_DOWN_UP 0x6
+#define LK_CMD_MODE(m,div) (LK_PARAM|(div<<3)|m)
+
+
+/*
+ * Keyboard set_status codes and arguments
+ */
+
+/* Send a command packet to the lk201 */
+typedef struct {
+ unsigned char len; /* how many params */
+ unsigned char command; /* cmd to lk201 */
+ unsigned char params[2]; /* extra params */
+} lk201_cmd_t;
+#define LK201_SEND_CMD _IOW('q', 5, lk201_cmd_t)/* keybd. per. cmd */
+
+/* Control rotation of lk201 leds */
+#define LK201_LED_PATTERN _IOW('q', 119, int) /* cylon, ... */
+# define LED_COUNT 1 /* led counter */
+# define LED_ROTATE 2 /* led rotate */
+# define LED_CYLON 3 /* led cylon mode */
+
+#define LK201_LED_INTERVAL _IOW('q', 120, int) /* refresh interval */
+# define LED_OFF 0 /* no led movement */
+
+/* Map the caps-lock key to act as the control key (skinny-fingers) */
+#define LK201_mapLOCKtoCNTRL _IOW('q', 121, int) /* 1 or 0 */
+
+/* Remap a lk201 keycode to a different (ASCII) translation */
+typedef struct {
+ unsigned char in_keyval;
+ unsigned char shifted;
+ unsigned char out_keyval;
+ unsigned char meta;
+} KeyMap;
+#define LK201_REMAP_KEY _IOW('q', 122, KeyMap) /* 1 or 0 */
diff --git a/chips/mc_clock.c b/chips/mc_clock.c
new file mode 100644
index 00000000..15fa049d
--- /dev/null
+++ b/chips/mc_clock.c
@@ -0,0 +1,516 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mc_clock.c
+ * Author: Alessandro Forin
+ * Date: 8/90
+ *
+ * Driver for the MC146818 Clock
+ */
+
+#include <mc.h>
+#if NMC > 0
+#include <platforms.h>
+
+#include <mach/std_types.h>
+#include <machine/machspl.h> /* spl definitions */
+#include <chips/busses.h>
+
+#include <sys/time.h>
+#include <kern/time_out.h>
+#include <chips/mc_clock.h>
+
+#ifdef DECSTATION
+#include <mips/mips_cpu.h>
+#include <mips/clock.h>
+#endif /*DECSTATION*/
+
+#ifdef FLAMINGO
+#include <alpha/clock.h>
+#endif /*FLAMINGO*/
+
+#define private static
+#define public
+
+
+/* Architecture-specific defines */
+
+#ifdef DECSTATION
+
+#define MC_DEFAULT_ADDRESS (mc_clock_ram_t *)PHYS_TO_K1SEG(0x1d000000)
+#define MC_DOES_DELAYS 1
+
+/*
+ * Both the Pmax and the 3max implementations of the chip map
+ * bytes of the chip's RAM to 32 bit words (low byte).
+ * For convenience, we redefine here the chip's RAM layout
+ * making padding explicit.
+ */
+
+typedef struct {
+ volatile unsigned char mc_second;
+ char pad0[3];
+ volatile unsigned char mc_alarm_second;
+ char pad1[3];
+ volatile unsigned char mc_minute;
+ char pad2[3];
+ volatile unsigned char mc_alarm_minute;
+ char pad3[3];
+ volatile unsigned char mc_hour;
+ char pad4[3];
+ volatile unsigned char mc_alarm_hour;
+ char pad5[3];
+ volatile unsigned char mc_day_of_week;
+ char pad6[3];
+ volatile unsigned char mc_day_of_month;
+ char pad7[3];
+ volatile unsigned char mc_month;
+ char pad8[3];
+ volatile unsigned char mc_year;
+ char pad9[3];
+ volatile unsigned char mc_register_A;
+ char pad10[3];
+ volatile unsigned char mc_register_B;
+ char pad11[3];
+ volatile unsigned char mc_register_C;
+ char pad12[3];
+ volatile unsigned char mc_register_D;
+ char pad13[3];
+ unsigned char mc_non_volatile_ram[50 * 4]; /* unused */
+} mc_clock_ram_t;
+
+#define MC_CLOCK_PADDED 1
+
+#endif /*DECSTATION*/
+
+
+#ifdef FLAMINGO
+#define MC_DEFAULT_ADDRESS 0L
+
+/* padded, later */
+
+#endif /* FLAMINGO */
+
+
+
+#ifndef MC_CLOCK_PADDED
+typedef mc_clock_t mc_clock_ram_t; /* No padding needed */
+#endif
+
+/*
+ * Functions provided herein
+ */
+int mc_probe( vm_offset_t addr, struct bus_ctlr * );
+private void mc_attach();
+
+int mc_intr();
+
+void mc_open(), mc_close(), mc_write();
+private unsigned int mc_read();
+
+private void mc_wait_for_uip( mc_clock_ram_t *clock );
+
+
+/*
+ * Status
+ */
+boolean_t mc_running = FALSE;
+boolean_t mc_new_century = FALSE; /* "year" info overfloweth */
+
+private int days_per_month[12] = {
+ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
+};
+
+private unsigned int mc_read(); /* forward */
+private void mc_wait_for_uip();
+
+/*
+ * Where is the chip's RAM mapped
+ */
+private mc_clock_ram_t *rt_clock = MC_DEFAULT_ADDRESS;
+
+/*
+ * (Auto?)Configuration
+ */
+private vm_offset_t mc_std[NMC] = { 0 };
+private struct bus_device *mc_info[NMC];
+
+struct bus_driver mc_driver =
+ { mc_probe, 0, mc_attach, 0, mc_std, "mc", mc_info, };
+
+
+mc_probe(vm_offset_t addr, struct bus_ctlr *ui)
+{
+ rt_clock = (mc_clock_ram_t *)addr;
+ return 1;
+}
+
+private void
+mc_attach()
+{
+ printf(": MC146818 or like Time-Of-Year chip");
+}
+
+/*
+ * Interrupt routine
+ */
+#if MC_DOES_DELAYS
+
+private int config_step = 3;
+private volatile int had_intr;
+
+mc_intr(spllevel)
+ spl_t spllevel;
+{
+ /*
+ * Interrupt flags are read-to-clear.
+ */
+ if (config_step > 2)
+ return (rt_clock->mc_register_C & MC_REG_C_IRQF);
+ had_intr = (rt_clock->mc_register_C & MC_REG_C_IRQF) ? 1 : 0;
+ if (config_step++ == 0)
+ accurate_config_delay(spllevel);
+ return had_intr;
+}
+#else /* MC_DOES_DELAYS */
+
+mc_intr()
+{
+ return (rt_clock->mc_register_C); /* clear intr */
+}
+
+#endif /* MC_DOES_DELAYS */
+
+/*
+ * Start real-time clock.
+ */
+void
+mc_open()
+{
+ /*
+ * All we should need to do is to enable interrupts, but
+ * since we do not know what OS last ran on this box
+ * we'll reset it all over again. Just kidding..
+ */
+ unsigned unix_seconds_now;
+
+ /*
+ * Check for battery backup power. If we do not have it,
+ * warn the user. Time will be bogus only after power up.
+ */
+ if ((rt_clock->mc_register_D & MC_REG_D_VRT) == 0)
+ printf("WARNING: clock batteries are low\n");
+
+ /*
+ * Read the current time settings, check if the year info
+ * has been screwed up.
+ */
+ unix_seconds_now = mc_read();
+
+ if (unix_seconds_now < (SECYR * (1990 - YRREF)))
+ printf("The prom has clobbered the clock\n");
+
+ time.tv_sec = (long)unix_seconds_now;
+ mc_write();
+
+ mc_running = TRUE;
+}
+
+void
+mc_close()
+{
+ /*
+ * Disable interrupts, but keep the chip running.
+ * Note we are called at splhigh and an interrupt
+ * might be pending already.
+ */
+
+ mc_intr(0);
+ rt_clock->mc_register_B &= ~(MC_REG_B_UIE|MC_REG_B_AIE|MC_REG_B_PIE);
+ mc_running = FALSE;
+#if MC_DOES_DELAYS
+ config_step = 0;
+#endif
+}
+
+
+/*
+ * Set time-of-day. Must be called at splhigh()
+ */
+void
+mc_write()
+{
+ register mc_clock_ram_t *clock = rt_clock;
+ register unsigned years, months, days, hours, minutes, seconds;
+ register unsigned unix_seconds = time.tv_sec;
+ int frequence_selector, temp;
+ int bogus_hz = 0;
+
+ /*
+ * Convert U*x time into absolute time
+ */
+
+ years = YRREF;
+ while (1) {
+ seconds = SECYR;
+ if (LEAPYEAR(years))
+ seconds += SECDAY;
+ if (unix_seconds < seconds)
+ break;
+ unix_seconds -= seconds;
+ years++;
+ }
+
+ months = 0;
+ while (1) {
+ seconds = days_per_month[months++] * SECDAY;
+ if (months == 2 /* February */ && LEAPYEAR(years))
+ seconds += SECDAY;
+ if (unix_seconds < seconds)
+ break;
+ unix_seconds -= seconds;
+ }
+
+ days = unix_seconds / SECDAY;
+ unix_seconds -= SECDAY * days++;
+
+ hours = unix_seconds / SECHOUR;
+ unix_seconds -= SECHOUR * hours;
+
+ minutes = unix_seconds / SECMIN;
+ unix_seconds -= SECMIN * minutes;
+
+ seconds = unix_seconds;
+
+ /*
+ * Trim years into 0-99 range.
+ */
+ if ((years -= 1900) > 99) {
+ years -= 100;
+ mc_new_century = TRUE;
+ }
+
+ /*
+ * Check for "hot dates"
+ */
+ if (days >= 28 && days <= 30 &&
+ hours == 23 && minutes == 59 &&
+ seconds >= 58)
+ seconds = 57;
+
+ /*
+ * Select the interrupt frequency based on system params
+ */
+ switch (hz) {
+ case 1024:
+ frequence_selector = MC_BASE_32_KHz | MC_RATE_1024_Hz;
+ break;
+ case 512:
+ frequence_selector = MC_BASE_32_KHz | MC_RATE_512_Hz;
+ break;
+ case 256:
+ frequence_selector = MC_BASE_32_KHz | MC_RATE_256_Hz;
+ break;
+ case 128:
+ frequence_selector = MC_BASE_32_KHz | MC_RATE_128_Hz;
+ break;
+ case 64:
+default_frequence:
+ frequence_selector = MC_BASE_32_KHz | MC_RATE_64_Hz;
+ break;
+ default:
+ bogus_hz = hz;
+ hz = 64;
+ tick = 1000000 / 64;
+ goto default_frequence;
+ }
+
+ /*
+ * Stop updates while we fix it
+ */
+ mc_wait_for_uip(clock);
+ clock->mc_register_B = MC_REG_B_STOP;
+ wbflush();
+
+ /*
+ * Ack any pending interrupts
+ */
+ temp = clock->mc_register_C;
+
+ /*
+ * Reset the frequency divider, in case we are changing it.
+ */
+ clock->mc_register_A = MC_BASE_RESET;
+
+ /*
+ * Now update the time
+ */
+ clock->mc_second = seconds;
+ clock->mc_minute = minutes;
+ clock->mc_hour = hours;
+ clock->mc_day_of_month = days;
+ clock->mc_month = months;
+ clock->mc_year = years;
+
+ /*
+ * Spec says the VRT bit can be validated, but does not say how. I
+ * assume it is via reading the register.
+ */
+ temp = clock->mc_register_D;
+
+ /*
+ * Reconfigure the chip and get it started again
+ */
+ clock->mc_register_A = frequence_selector;
+ clock->mc_register_B = MC_REG_B_24HM | MC_REG_B_DM | MC_REG_B_PIE;
+
+ /*
+ * Print warnings, if we have to
+ */
+ if (bogus_hz != 0)
+ printf("Unacceptable value (%d Hz) for hz, reset to %d Hz\n",
+ bogus_hz, hz);
+}
+
+
+/*
+ * Internal functions
+ */
+
+private void
+mc_wait_for_uip(clock)
+ mc_clock_ram_t *clock;
+{
+ while (clock->mc_register_A & MC_REG_A_UIP)
+ delay(MC_UPD_MINIMUM >> 2);
+}
+
+private unsigned int
+mc_read()
+{
+ /*
+ * Note we only do this at boot time
+ */
+ register unsigned years, months, days, hours, minutes, seconds;
+ register mc_clock_ram_t *clock = rt_clock;;
+
+ /*
+ * If the chip is updating, wait
+ */
+ mc_wait_for_uip(clock);
+
+ years = clock->mc_year;
+ months = clock->mc_month;
+ days = clock->mc_day_of_month;
+ hours = clock->mc_hour;
+ minutes = clock->mc_minute;
+ seconds = clock->mc_second;
+
+ /*
+ * Convert to Unix time
+ */
+ seconds += minutes * SECMIN;
+ seconds += hours * SECHOUR;
+ seconds += (days - 1) * SECDAY;
+ if (months > 2 /* February */ && LEAPYEAR(years))
+ seconds += SECDAY;
+ while (months > 1)
+ seconds += days_per_month[--months - 1];
+
+ /*
+ * Note that in ten years from today (Aug,1990) the new century will
+ * cause the trouble that mc_new_century attempts to avoid.
+ */
+ if (mc_new_century)
+ years += 100;
+ years += 1900; /* chip base year in YRREF's century */
+
+ for (--years; years >= YRREF; years--) {
+ seconds += SECYR;
+ if (LEAPYEAR(years))
+ seconds += SECDAY;
+ }
+
+ return seconds;
+}
+
+#ifdef MC_DOES_DELAYS
+
+/*
+ * Timed delays
+ */
+extern unsigned int cpu_speed;
+
+void
+config_delay(speed)
+{
+ /*
+ * This is just an initial estimate, later on with the clock
+ * running we'll tune it more accurately.
+ */
+ cpu_speed = speed;
+}
+
+accurate_config_delay(spllevel)
+ spl_t spllevel;
+{
+ register unsigned int i;
+ register spl_t s;
+ int inner_loop_count;
+
+#ifdef mips
+ /* find "spllevel - 1" */
+ s = spllevel | ((spllevel >> 1) & SR_INT_MASK);
+ splx(s);
+#else
+#endif
+
+ /* wait till we have an interrupt pending */
+ had_intr = 0;
+ while (!had_intr)
+ continue;
+
+ had_intr = 0;
+ i = delay_timing_function(1, &had_intr, &inner_loop_count);
+
+ splx(spllevel);
+
+ i *= hz;
+ cpu_speed = i / (inner_loop_count * 1000000);
+
+ /* roundup clock speed */
+ i /= 100000;
+ if ((i % 10) >= 5)
+ i += 5;
+ printf("Estimating CPU clock at %d Mhz\n", i / 10);
+ if (isa_pmax() && cpu_speed != MC_DELAY_PMAX) {
+ printf("%s\n", "This machine looks like a DEC 2100");
+ machine_slot[cpu_number()].cpu_subtype = CPU_SUBTYPE_MIPS_R2000;
+ }
+}
+#endif /* MC_DOES_DELAYS */
+
+#endif NMC > 0
diff --git a/chips/mc_clock.h b/chips/mc_clock.h
new file mode 100644
index 00000000..0cd59579
--- /dev/null
+++ b/chips/mc_clock.h
@@ -0,0 +1,147 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mc_clock.h
+ * Author: Alessandro Forin
+ * Date: 8/90
+ *
+ * Definitions for the MC146818 Clock Driver
+ */
+
+/*
+ * Functions this module implements
+ */
+
+extern void resettodr(/* */); /* reset time-of-day register */
+extern void startrtclock(/* */); /* start real-time clock */
+extern void stopclocks(/* */); /* stop real-time clock */
+extern boolean_t ackrtclock(/* */); /* acknowledge interrupt, if any */
+extern boolean_t todr_running; /* status */
+
+extern boolean_t mc_new_century; /* patch this after year 2000 (honest!) */
+
+extern void delay(/* int usecs */); /* waste that many microseconds */
+extern void config_delay(/* int speed */); /* for delay() */
+#define MC_DELAY_PMAX 8
+#define MC_DELAY_3MAX 12
+
+extern void set_clock_addr(/* vm_offset_t addr */); /* RAM location */
+
+/*
+ * Real-Time Clock plus RAM device (MC146818)
+ */
+
+/*
+ * RAM Memory Map (as seen by the chip)
+ */
+typedef struct {
+ volatile unsigned char mc_second;
+ volatile unsigned char mc_alarm_second;
+ volatile unsigned char mc_minute;
+ volatile unsigned char mc_alarm_minute;
+ volatile unsigned char mc_hour;
+ volatile unsigned char mc_alarm_hour;
+ volatile unsigned char mc_day_of_week;
+ volatile unsigned char mc_day_of_month;
+ volatile unsigned char mc_month;
+ volatile unsigned char mc_year;
+ volatile unsigned char mc_register_A;
+ volatile unsigned char mc_register_B;
+ volatile unsigned char mc_register_C;
+ volatile unsigned char mc_register_D;
+ unsigned char mc_non_volatile_ram[50];
+} mc_clock_t;
+
+/*
+ * Register A defines (read/write)
+ */
+
+#define MC_REG_A_RS 0x0f /* Interrupt rate (and SQwave) select */
+#define MC_REG_A_DV 0x70 /* Divider select */
+#define MC_REG_A_UIP 0x80 /* Update In Progress (read-only bit) */
+
+/* Time base configuration */
+#define MC_BASE_4_MHz 0x00
+#define MC_BASE_1_MHz 0x10
+#define MC_BASE_32_KHz 0x20
+#define MC_BASE_NONE 0x60 /* actually, both of these reset */
+#define MC_BASE_RESET 0x70
+
+/* Interrupt rate table */
+#define MC_RATE_NONE 0x0 /* disabled */
+#define MC_RATE_1 0x1 /* 256Hz if MC_BASE_32_KHz, else 32768Hz */
+#define MC_RATE_2 0x2 /* 128Hz if MC_BASE_32_KHz, else 16384Hz */
+#define MC_RATE_8192_Hz 0x3 /* Tpi: 122.070 usecs */
+#define MC_RATE_4096_Hz 0x4 /* Tpi: 244.141 usecs */
+#define MC_RATE_2048_Hz 0x5 /* Tpi: 488.281 usecs */
+#define MC_RATE_1024_Hz 0x6 /* Tpi: 976.562 usecs */
+#define MC_RATE_512_Hz 0x7 /* Tpi: 1.953125 ms */
+#define MC_RATE_256_Hz 0x8 /* Tpi: 3.90625 ms */
+#define MC_RATE_128_Hz 0x9 /* Tpi: 7.8125 ms */
+#define MC_RATE_64_Hz 0xa /* Tpi: 15.625 ms */
+#define MC_RATE_32_Hz 0xb /* Tpi: 31.25 ms */
+#define MC_RATE_16_Hz 0xc /* Tpi: 62.5 ms */
+#define MC_RATE_8_Hz 0xd /* Tpi: 125 ms */
+#define MC_RATE_4_Hz 0xe /* Tpi: 250 ms */
+#define MC_RATE_2_Hz 0xf /* Tpi: 500 ms */
+
+/* Update cycle time */
+#define MC_UPD_4_MHz 248 /* usecs */
+#define MC_UPD_1_MHz 248 /* usecs */
+#define MC_UPD_32_KHz 1984 /* usecs */
+#define MC_UPD_MINIMUM 244 /* usecs, guaranteed if UIP=0 */
+
+/*
+ * Register B defines (read/write)
+ */
+
+#define MC_REG_B_DSE 0x01 /* Daylight Savings Enable */
+#define MC_REG_B_24HM 0x02 /* 24/12 Hour Mode */
+#define MC_REG_B_DM 0x04 /* Data Mode, 1=Binary 0=BCD */
+#define MC_REG_B_SQWE 0x08 /* Sqare Wave Enable */
+#define MC_REG_B_UIE 0x10 /* Update-ended Interrupt Enable */
+#define MC_REG_B_AIE 0x20 /* Alarm Interrupt Enable */
+#define MC_REG_B_PIE 0x40 /* Periodic Interrupt Enable */
+#define MC_REG_B_SET 0x80 /* Set NVram info, e.g. update time or ..*/
+#define MC_REG_B_STOP MC_REG_B_SET /* Stop updating the timing info */
+
+/*
+ * Register C defines (read-only)
+ */
+
+#define MC_REG_C_ZEROES 0x0f /* Reads as zero bits */
+#define MC_REG_C_UF 0x10 /* Update-ended interrupt flag */
+#define MC_REG_C_AF 0x20 /* Alarm interrupt flag */
+#define MC_REG_C_PF 0x40 /* Periodic interrupt flag */
+#define MC_REG_C_IRQF 0x80 /* Interrupt request flag */
+
+/*
+ * Register D defines (read-only)
+ */
+
+#define MC_REG_D_ZEROES 0x7f /* Reads as zero bits */
+#define MC_REG_D_VRT 0x80 /* Valid RAM and Time */
+
diff --git a/chips/mouse.c b/chips/mouse.c
new file mode 100644
index 00000000..85a27447
--- /dev/null
+++ b/chips/mouse.c
@@ -0,0 +1,321 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mouse.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Driver code for Digital's mouse AND tablet
+ */
+
+/*
+ * XXX This should be rewritten to support other
+ * XXX sorts of mices and tablets. But for now
+ * XXX I have none to play with. Sorry about that.
+ */
+
+#include <lk.h> /* one mouse per lk201 */
+#if NLK > 0
+
+#include <mach/std_types.h>
+#include <machine/machspl.h> /* spl definitions */
+#include <sys/time.h>
+#include <kern/time_out.h>
+
+#include <chips/serial_defs.h>
+#include <chips/screen_defs.h>
+
+#define MOUSE_INCREMENTAL 0x52 /* R */
+#define MOUSE_PROMPTED 0x44 /* D */
+#define MOUSE_REQ_POSITION 0x50 /* P */
+#define MOUSE_SELFTEST 0x54 /* T */
+#define MOUSE_RESERVED_FUNC 0x5a /* Z, param byte follows */
+
+#define TABLET_SAMPLE_55 0x4b /* K */ /* in reps/sec */
+#define TABLET_SAMPLE_72 0x4c /* L */
+#define TABLET_SAMPLE_120 0x4d /* M */
+#define TABLET_9600 0x42 /* B */
+
+#define TYPE_MOUSE 0x2
+#define TYPE_TABLET 0x4
+
+#define START_REPORT 0x80
+
+typedef union {
+ struct {
+ unsigned char r : 1, m : 1, l : 1, sy : 1, sx : 1;
+ unsigned char xpos;
+ unsigned char ypos;
+ } ms;
+ struct {
+ unsigned char pr : 1, buttons : 4;
+ unsigned char xlo, xhi;
+ unsigned char ylo, yhi;
+ } tb;
+ unsigned char raw[1];
+} mouse_report_t;
+
+
+/*
+ * Mouse state
+ */
+struct mouse_softc {
+ user_info_t *up;
+ mouse_report_t report;
+ unsigned char rep_bytes;
+ unsigned char rep_ptr;
+ unsigned char prev_buttons;
+ unsigned char flags;
+#define MS_TABLET 0x1
+#define MS_MOVING 0x2
+ char screen_unit;
+ char sl_unit;
+} mouse_softc_data[NLK];
+
+typedef struct mouse_softc *mouse_softc_t;
+
+mouse_softc_t mouse_softc[NLK];
+
+
+mouse_notify_mapped(
+ int unit,
+ int screen_unit,
+ user_info_t *up)
+{
+ mouse_softc_t ms = &mouse_softc_data[unit];
+
+ ms->up = up;
+ ms->screen_unit = screen_unit;
+}
+
+/*
+ * Autoconfiguration
+ */
+mouse_probe(
+ int unit)
+{
+ mouse_softc[unit] = &mouse_softc_data[unit];
+}
+
+mouse_attach(
+ int unit,
+ int sl_unit)
+{
+ int messg[4];
+ spl_t s;
+ mouse_softc_t ms;
+
+ ms = mouse_softc[unit];
+ ms->sl_unit = sl_unit;
+
+ s = spltty();
+ (*console_putc)(sl_unit, SCREEN_LINE_POINTER, MOUSE_SELFTEST);
+ delay(1);
+ messg[0] = (*console_getc)(sl_unit, SCREEN_LINE_POINTER, TRUE, TRUE);
+ messg[1] = (*console_getc)(sl_unit, SCREEN_LINE_POINTER, TRUE, TRUE);
+ messg[2] = (*console_getc)(sl_unit, SCREEN_LINE_POINTER, TRUE, TRUE);
+ messg[3] = (*console_getc)(sl_unit, SCREEN_LINE_POINTER, TRUE, TRUE);
+
+ delay(100000); /* spec says less than 500 msecs */
+ (*console_putc)(sl_unit, SCREEN_LINE_POINTER, MOUSE_INCREMENTAL);
+ splx(s);
+
+ ms->rep_bytes = 3;/* mouse */
+ if (messg[2] | messg[3]) {
+ printf(" bad pointer [%x %x %x %x] ",
+ messg[0], messg[1], messg[2], messg[3]);
+ if (messg[2] >= 0x20) printf("fatal ");
+ if (messg[2] == 0x3e) printf("RAM/ROM");
+ if (messg[2] == 0x3d) printf("button(s) %x", messg[3] & 0x1f);
+ } else {
+ int rev = messg[0] & 0xf;
+ int loc = (messg[1] & 0xf0) >> 4;
+ int tag = (messg[1] & 0xf);
+ printf("( %s rev. %x.%x )",
+ (tag == TYPE_MOUSE) ? "mouse" : "tablet",
+ rev, loc);
+ if (tag == TYPE_TABLET) {
+ ms->flags = MS_TABLET;
+ ms->rep_bytes = 5;
+ }
+ }
+}
+
+/*
+ * Process a character from the mouse
+ */
+mouse_input(
+ int unit,
+ register unsigned short data)
+{
+ mouse_softc_t ms = mouse_softc[unit];
+ register char flg, but;
+
+ data &= 0xff;
+
+ /* sanity: might miss a byte sometimes */
+ if (data & START_REPORT)
+ ms->rep_ptr = 0;
+
+ /* add byte to report */
+ ms->report.raw[ms->rep_ptr++] = data;
+
+ /* does this mean the mouse is moving */
+ if (data && ((data & START_REPORT) == 0))
+ ms->flags |= MS_MOVING;
+
+ /* Report complete ? */
+ if (ms->rep_ptr != ms->rep_bytes)
+ return;
+ ms->rep_ptr = 0;
+
+ ssaver_bump(ms->screen_unit);
+
+ /* check for mouse moved */
+ flg = ms->flags;
+ if (flg & MS_MOVING) {
+ ms->flags = flg & ~MS_MOVING;
+ mouse_motion_event(ms, flg);
+ }
+
+ /* check for button pressed */
+ if (but = ms->prev_buttons ^ ms->report.raw[0]) {
+ mouse_button_event(ms, flg, but);
+ ms->prev_buttons = ms->report.raw[0];
+ }
+}
+
+/*
+ * The mouse/puck moved.
+ * Find how much and post an event
+ */
+mouse_motion_event(
+ mouse_softc_t ms,
+ int flg)
+{
+ register int x, y;
+
+ if (flg & MS_TABLET) {
+
+ flg = DEV_TABLET;
+
+ x = (ms->report.tb.xhi << 8) | ms->report.tb.xlo;
+ y = (ms->report.tb.yhi << 8) | ms->report.tb.ylo;
+
+ } else {
+
+ flg = DEV_MOUSE;
+
+ x = ms->report.ms.xpos;
+ if (!ms->report.ms.sx) /* ??? */
+ x = -x;
+
+ y = ms->report.ms.ypos;
+ if (ms->report.ms.sy)
+ y = -y;
+
+ }
+
+ screen_motion_event(ms->screen_unit, flg, x, y);
+}
+
+/*
+ * A mouse/puck button was pressed/released.
+ * Find which one and post an event
+ */
+mouse_button_event(
+ mouse_softc_t ms,
+ int flg,
+ int bmask)
+{
+ register unsigned int buttons, i;
+ int key, type;
+
+ buttons = ms->report.raw[0];
+ if (flg & MS_TABLET) {
+ /* check each one of the four buttons */
+ for (i = 0; i < 4; i += 1) {
+ if ((bmask & (2<<i)) == 0)
+ continue;/* did not change */
+ type = (buttons & (2<<i)) ? EVT_BUTTON_DOWN : EVT_BUTTON_UP;
+ key = i;
+
+ screen_keypress_event( ms->screen_unit,
+ DEV_TABLET, key, type);
+ }
+ } else {
+ ms->up->mouse_buttons = buttons & 0x7;
+ /* check each one of the three buttons */
+ for (i = 0; i < 3; i += 1) {
+ if ((bmask & (1<<i)) == 0)
+ continue;/* did not change */
+ type = (buttons & (1<<i)) ? EVT_BUTTON_DOWN : EVT_BUTTON_UP;
+
+ if (i & 1)
+ key = KEY_MIDDLE_BUTTON;
+ else if ((i & 2) == 0)
+ key = KEY_RIGHT_BUTTON;
+ else
+ key = KEY_LEFT_BUTTON;
+
+ screen_keypress_event( ms->screen_unit,
+ DEV_MOUSE, key, type);
+ }
+ }
+}
+
+/*
+ * Generate escape sequences for position reporting
+ * These are the same as xterm's.
+ * Prefix:
+ * ESC [ M button down
+ * ESC [ N button up
+ * Body:
+ * BUTTON COL ROW
+ * Button:
+ * 0 <-> left, 1 <-> middle, 2 <-> right
+ * All body values are offset by the ascii SPACE character
+ */
+#define ESC '\033'
+#define SPACE ' '
+
+mouse_report_position(
+ int unit,
+ int col,
+ int row,
+ int key,
+ int type)
+{
+ cons_input(SCREEN_LINE_KEYBOARD, ESC, 0);
+ cons_input(SCREEN_LINE_KEYBOARD, '[', 0);
+ cons_input(SCREEN_LINE_KEYBOARD, (type==EVT_BUTTON_DOWN) ? 'M':'N', 0);
+
+ cons_input(SCREEN_LINE_KEYBOARD, (key - 1) + SPACE, 0);/* quick remapping */
+ cons_input(SCREEN_LINE_KEYBOARD, SPACE + col + 2, 0);
+ cons_input(SCREEN_LINE_KEYBOARD, SPACE + row + 1, 0);
+}
+
+#endif NLK > 0
diff --git a/chips/nc.c b/chips/nc.c
new file mode 100644
index 00000000..adce0ae0
--- /dev/null
+++ b/chips/nc.c
@@ -0,0 +1,851 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*** NETWORK INTERFACE IMPLEMENTATION CORE ***/
+
+#ifndef STUB
+#include <chips/nc.h>
+#else
+#include "nc.h"
+#endif
+
+/*** Types and data structures ***/
+
+#if PRODUCTION
+#define MAX_HASH 701
+#define MAX_HOST 4000
+#else
+#define MAX_HASH 7
+#define MAX_HOST 4
+#endif
+
+nw_dev_entry_s nc_failure_entry_table = {nc_fail, nc_fail,
+ nc_null, nc_null,
+ nc_null_poll, nc_null_send, nc_null_rpc,
+ nc_null_signal, nc_open_fail, nc_accept_fail,
+ nc_close_fail, nc_open_fail, nc_open_fail};
+
+nw_dev_entry_s nc_local_entry_table = {nc_succeed, nc_succeed,
+ nc_null, nc_null,
+ nc_null_poll, nc_local_send, nc_local_rpc,
+ nc_null_signal, nc_open_fail, nc_accept_fail,
+ nc_close_fail, nc_open_fail, nc_open_fail};
+
+
+typedef struct {
+ nw_address_s address;
+ int name_next:16;
+ int ip_next:16;
+ int nw_next:16;
+ nw_ep line:16;
+} nw_alist_s, *nw_alist_t;
+
+
+boolean_t nc_initialized = FALSE;
+nw_tx_header_s nw_tx[MAX_EP/2];
+nw_tx_header_t nw_free_tx_header;
+nw_rx_header_s nw_rx[2*MAX_EP];
+nw_rx_header_t nw_free_rx_header;
+nw_plist_s nw_peer[MAX_EP];
+nw_plist_t nw_free_peer;
+
+nw_devcb devct[MAX_DEV];
+
+nw_ecb ect[MAX_EP];
+
+int nw_free_ep_first, nw_free_ep_last;
+int nw_free_line_first, nw_free_line_last;
+
+nw_alist_s nw_address[MAX_HOST];
+int nw_free_address;
+
+int nw_name[MAX_HASH];
+int nw_ip[MAX_HASH];
+int nw_nw[MAX_HASH];
+
+int nw_fast_req;
+
+/*** System-independent functions ***/
+
+void nc_initialize() {
+ int ep, last_ep;
+
+ if (!nc_initialized) {
+ last_ep = sizeof(nw_tx)/sizeof(nw_tx_header_s) - 1;
+ for (ep = 0; ep < last_ep; ep++)
+ nw_tx[ep].next = &nw_tx[ep+1];
+ nw_tx[last_ep].next = NULL;
+ nw_free_tx_header = &nw_tx[0];
+ last_ep = sizeof(nw_rx)/sizeof(nw_rx_header_s) - 1;
+ for (ep = 0; ep < last_ep; ep++)
+ nw_rx[ep].next = &nw_rx[ep+1];
+ nw_rx[last_ep].next = NULL;
+ nw_free_rx_header = &nw_rx[0];
+ last_ep = sizeof(nw_peer)/sizeof(nw_plist_s) - 1;
+ for (ep = 0; ep < last_ep; ep++)
+ nw_peer[ep].next = &nw_peer[ep+1];
+ nw_peer[last_ep].next = NULL;
+ nw_free_peer = &nw_peer[0];
+ for (ep = 0; ep < MAX_DEV; ep++) {
+ devct[ep].status = NW_FAILURE;
+ devct[ep].type = NW_CONNECTIONLESS;
+ devct[ep].addr = NULL;
+ devct[ep].local_addr_1 = 0;
+ devct[ep].local_addr_2 = 0;
+ devct[ep].entry = &nc_failure_entry_table;
+ devct[ep].fast_req = 0;
+ }
+ devct[NW_NULL].status = NW_SUCCESS;
+ devct[NW_NULL].entry = &nc_local_entry_table;
+ last_ep = sizeof(ect)/sizeof(nw_ecb);
+ for (ep = 0; ep < last_ep; ep++) {
+ ect[ep].state = NW_INEXISTENT;
+ ect[ep].id = ep;
+ ect[ep].seqno = 0;
+ ect[ep].previous = ep - 1;
+ ect[ep].next = ep + 1;
+ }
+ ect[0].next = ect[0].previous = 0;
+ ect[last_ep-1].next = 0;
+ nw_free_ep_first = 1;
+ nw_free_ep_last = last_ep - 1;
+ nw_free_line_first = nw_free_line_last = 0;
+ for (ep = 0; ep < MAX_HOST; ep++) {
+ nw_address[ep].nw_next = ep + 1;
+ }
+ nw_address[MAX_HOST - 1].nw_next = -1;
+ nw_free_address = 0;
+ for (ep = 0; ep < MAX_HASH; ep++) {
+ nw_name[ep] = -1;
+ nw_ip[ep] = -1;
+ nw_nw[ep] = -1;
+ }
+ nw_fast_req = 0;
+ h_initialize();
+ nc_initialized = TRUE;
+ }
+}
+
+nw_tx_header_t nc_tx_header_allocate() {
+ nw_tx_header_t header;
+
+ header = nw_free_tx_header;
+ if (header != NULL)
+ nw_free_tx_header = header->next;
+ return header;
+}
+
+void nc_tx_header_deallocate(nw_tx_header_t header) {
+ nw_tx_header_t first_header;
+
+ first_header = header;
+ while (header->next != NULL)
+ header = header->next;
+ header->next = nw_free_tx_header;
+ nw_free_tx_header = first_header;
+}
+
+nw_rx_header_t nc_rx_header_allocate() {
+ nw_rx_header_t header;
+
+ header = nw_free_rx_header;
+ if (header != NULL)
+ nw_free_rx_header = header->next;
+ return header;
+}
+
+void nc_rx_header_deallocate(nw_rx_header_t header) {
+
+ header->next = nw_free_rx_header;
+ nw_free_rx_header = header;
+}
+
+nw_plist_t nc_peer_allocate() {
+ nw_plist_t peer;
+
+ peer = nw_free_peer;
+ if (peer != NULL)
+ nw_free_peer = peer->next;
+ return peer;
+}
+
+void nc_peer_deallocate(nw_plist_t peer) {
+ nw_plist_t first_peer;
+
+ first_peer = peer;
+ while (peer->next != NULL)
+ peer = peer->next;
+ peer->next = nw_free_peer;
+ nw_free_peer = first_peer;
+}
+
+
+nw_result nc_device_register(u_int dev, nw_dev_type type, char *dev_addr,
+ nw_dev_entry_t dev_entry_table) {
+ nw_result rc;
+
+ if (dev >= MAX_DEV) {
+ rc = NW_FAILURE;
+ } else {
+ devct[dev].status = NW_SUCCESS;
+ devct[dev].type = type;
+ devct[dev].addr = dev_addr;
+ devct[dev].entry = dev_entry_table;
+ devct[dev].fast_req = 0;
+ rc = NW_SUCCESS;
+ }
+ return rc;
+}
+
+nw_result nc_device_unregister(u_int dev, nw_result status) {
+ nw_result rc;
+
+ if (dev >= MAX_DEV) {
+ rc = NW_FAILURE;
+ } else {
+ devct[dev].status = status;
+ devct[dev].addr = NULL;
+ devct[dev].entry = &nc_failure_entry_table;
+ devct[dev].fast_req = 0;
+ rc = NW_SUCCESS;
+ }
+ return rc;
+}
+
+void nc_slow_sweep() {
+ int dev;
+
+ for (dev = 0; dev < MAX_DEV; dev++) {
+ if (devct[dev].status == NW_SUCCESS) {
+ (*(devct[dev].entry->slow_sweep)) (dev);
+ }
+ }
+}
+
+void nc_fast_timer_set(int dev) {
+
+ devct[dev].fast_req++;
+ if (nw_fast_req++ == 0)
+ h_fast_timer_set();
+}
+
+void nc_fast_timer_reset(int dev) {
+
+ devct[dev].fast_req--;
+ if (nw_fast_req-- == 0)
+ h_fast_timer_reset();
+}
+
+
+void nc_fast_sweep() {
+ int dev;
+
+ for (dev = 0; dev < MAX_DEV; dev++) {
+ if (devct[dev].status == NW_SUCCESS &&
+ devct[dev].fast_req > 0) {
+ devct[dev].fast_req = 0;
+ (*(devct[dev].entry->fast_sweep)) (dev);
+ }
+ }
+}
+
+int nc_hash_name(char *cp) {
+ int h;
+ char ch;
+ char *cp_end;
+
+ cp_end = cp + 19;
+ *cp_end = '\0';
+ h = 0;
+ ch = *cp++;
+ while (ch != '\0') {
+ h = (h << 7) + ch;
+ ch = *cp++;
+ if (ch != '\0') {
+ h = (h << 7) + ch;
+ ch = *cp++;
+ if (ch != '\0') {
+ h = (h << 7) + ch;
+ ch = *cp++;
+ }
+ }
+ h %= MAX_HASH;
+ }
+ return h;
+}
+
+
+nw_result nc_update(nw_update_type up_type, int *up_info) {
+ nw_result rc;
+ nw_alist_t ad;
+ int h, slot, previous_slot, found_slot;
+ nw_address_1 n1;
+ nw_address_2 n2;
+
+ if (up_type == NW_HOST_ADDRESS_REGISTER) {
+ if (nw_free_address == -1) {
+ rc = NW_NO_RESOURCES;
+ } else {
+ slot = nw_free_address;
+ ad = &nw_address[slot];
+ nw_free_address = ad->nw_next;
+ ad->address = *((nw_address_t) up_info);
+ h = nc_hash_name(ad->address.name);
+ ad->name_next = nw_name[h];
+ nw_name[h] = slot;
+ h = ad->address.ip_addr % MAX_HASH;
+ ad->ip_next = nw_ip[h];
+ nw_ip[h] = slot;
+ h = (ad->address.nw_addr_1 % MAX_HASH + ad->address.nw_addr_2)
+ % MAX_HASH;
+ ad->nw_next = nw_nw[h];
+ nw_nw[h] = slot;
+ ad->line = 0;
+ rc = NW_SUCCESS;
+ }
+ } else if (up_type == NW_HOST_ADDRESS_UNREGISTER) {
+ n1 = ((nw_address_t) up_info)->nw_addr_1;
+ n2 = ((nw_address_t) up_info)->nw_addr_2;
+ h = (n1 % MAX_HASH + n2) % MAX_HASH;
+ slot = nw_nw[h];
+ previous_slot = -1;
+ ad = &nw_address[slot];
+ while (slot != -1 && (ad->address.nw_addr_1 != n1 ||
+ ad->address.nw_addr_2 != n2)) {
+ previous_slot = slot;
+ slot = ad->nw_next;
+ ad = &nw_address[slot];
+ }
+ if (slot == -1) {
+ rc = NW_BAD_ADDRESS;
+ } else {
+ if (previous_slot == -1)
+ nw_nw[h] = ad->nw_next;
+ else
+ nw_address[previous_slot].nw_next = ad->nw_next;
+ ad->nw_next = nw_free_address;
+ nw_free_address = slot;
+ found_slot = slot;
+ if (ad->address.ip_addr != 0) {
+ h = ad->address.ip_addr % MAX_HASH;
+ slot = nw_ip[h];
+ previous_slot = -1;
+ while (slot != -1 && slot != found_slot) {
+ previous_slot = slot;
+ slot = nw_address[slot].ip_next;
+ }
+ if (slot == found_slot) {
+ if (previous_slot == -1)
+ nw_ip[h] = ad->ip_next;
+ else
+ nw_address[previous_slot].ip_next = ad->ip_next;
+ }
+ }
+ if (ad->address.name[0] != '\0') {
+ h = nc_hash_name(ad->address.name);
+ slot = nw_name[h];
+ previous_slot = -1;
+ while (slot != -1 && slot != found_slot) {
+ previous_slot = slot;
+ slot = nw_address[slot].name_next;
+ }
+ if (slot == found_slot) {
+ if (previous_slot == -1)
+ nw_name[h] = ad->name_next;
+ else
+ nw_address[previous_slot].name_next = ad->name_next;
+ }
+ }
+ rc = NW_SUCCESS;
+ }
+ } else {
+ rc = NW_INVALID_ARGUMENT;
+ }
+ return rc;
+}
+
+nw_result nc_lookup(nw_lookup_type lt, int *look_info) {
+ nw_result rc;
+ nw_address_t addr;
+ nw_alist_t ad;
+ int h, slot;
+ ip_address ip;
+ nw_address_1 n1;
+ nw_address_2 n2;
+
+ if (lt == NW_HOST_ADDRESS_LOOKUP) {
+ addr = (nw_address_t) look_info;
+ if (addr->ip_addr != 0) {
+ ip = addr->ip_addr;
+ h = ip % MAX_HASH;
+ slot = nw_ip[h];
+ ad = &nw_address[slot];
+ while (slot != -1 && ad->address.ip_addr != ip) {
+ slot = ad->ip_next;
+ ad = &nw_address[slot];
+ }
+ if (slot != -1) {
+ strcpy(addr->name, ad->address.name);
+ addr->nw_addr_1 = ad->address.nw_addr_1;
+ addr->nw_addr_2 = ad->address.nw_addr_2;
+ return NW_SUCCESS;
+ }
+ }
+ if (addr->name[0] != '\0') {
+ h = nc_hash_name(addr->name);
+ slot = nw_name[h];
+ ad = &nw_address[slot];
+ while (slot != -1 && strcmp(ad->address.name, addr->name) != 0) {
+ slot = ad->name_next;
+ ad = &nw_address[slot];
+ }
+ if (slot != -1) {
+ addr->ip_addr = ad->address.ip_addr;
+ addr->nw_addr_1 = ad->address.nw_addr_1;
+ addr->nw_addr_2 = ad->address.nw_addr_2;
+ return NW_SUCCESS;
+ }
+ }
+ if (addr->nw_addr_1 != 0 || addr->nw_addr_2 != 0) {
+ n1 = addr->nw_addr_1;
+ n2 = addr->nw_addr_2;
+ h = (n1 % MAX_HASH + n2) % MAX_HASH;
+ slot = nw_nw[h];
+ ad = &nw_address[slot];
+ while (slot != -1 && (ad->address.nw_addr_1 != n1 ||
+ ad->address.nw_addr_2 != n2)) {
+ slot = ad->nw_next;
+ ad = &nw_address[slot];
+ }
+ if (slot != -1) {
+ strcpy(addr->name, ad->address.name);
+ addr->ip_addr = ad->address.ip_addr;
+ return NW_SUCCESS;
+ }
+ }
+ rc = NW_BAD_ADDRESS;
+ } else {
+ rc = NW_INVALID_ARGUMENT;
+ }
+ return rc;
+}
+
+nw_result nc_line_update(nw_peer_t peer, nw_ep line) {
+ nw_result rc;
+ nw_alist_t ad;
+ int h, slot;
+ nw_address_1 n1;
+ nw_address_2 n2;
+
+ n1 = peer->rem_addr_1;
+ n2 = peer->rem_addr_2;
+ h = (n1 % MAX_HASH + n2) % MAX_HASH;
+ slot = nw_nw[h];
+ ad = &nw_address[slot];
+ while (slot != -1 && (ad->address.nw_addr_1 != n1 ||
+ ad->address.nw_addr_2 != n2)) {
+ slot = ad->nw_next;
+ ad = &nw_address[slot];
+ }
+ if (slot == -1) {
+ rc = NW_FAILURE;
+ } else {
+ ad->line = line;
+ rc = NW_SUCCESS;
+ }
+ return rc;
+}
+
+nw_ep nc_line_lookup(nw_peer_t peer) {
+ nw_ep lep;
+ nw_alist_t ad;
+ int h, slot;
+ nw_address_1 n1;
+ nw_address_2 n2;
+
+ n1 = peer->rem_addr_1;
+ n2 = peer->rem_addr_2;
+ h = (n1 % MAX_HASH + n2) % MAX_HASH;
+ slot = nw_nw[h];
+ ad = &nw_address[slot];
+ while (slot != -1 && (ad->address.nw_addr_1 != n1 ||
+ ad->address.nw_addr_2 != n2)) {
+ slot = ad->nw_next;
+ ad = &nw_address[slot];
+ }
+ if (slot == -1) {
+ lep = -1;
+ } else {
+ lep = ad->line;
+ }
+ return lep;
+}
+
+nw_result nc_endpoint_allocate(nw_ep_t epp, nw_protocol protocol,
+ nw_acceptance accept,
+ char *buffer_address, u_int buffer_size) {
+ nw_result rc;
+ nw_ep ep;
+ nw_ecb_t ecb;
+
+ if (ect[(ep = *epp)].state != NW_INEXISTENT) {
+ rc = NW_BAD_EP;
+ } else if (nw_free_ep_first == 0) {
+ *epp = nw_free_line_first;
+ rc = NW_NO_EP;
+ } else {
+ if (ep == 0) {
+ ecb = &ect[nw_free_ep_first];
+ *epp = ep = ecb->id;
+ nw_free_ep_first = ecb->next;
+ if (nw_free_ep_first == 0)
+ nw_free_ep_last = 0;
+ } else {
+ ecb = &ect[ep];
+ if (ecb->previous == 0)
+ nw_free_ep_first = ecb->next;
+ else
+ ect[ecb->previous].next = ecb->next;
+ if (ecb->next == 0)
+ nw_free_ep_last = ecb->previous;
+ else
+ ect[ecb->next].previous = ecb->previous;
+ }
+ if (protocol == NW_LINE) {
+ if (nw_free_line_last == 0)
+ nw_free_line_first = ep;
+ else
+ ect[nw_free_line_last].next = ep;
+ ecb->previous = nw_free_line_last;
+ ecb->next = 0;
+ nw_free_line_last = ep;
+ }
+ ecb->protocol = protocol;
+ ecb->accept = accept;
+ ecb->state = NW_UNCONNECTED;
+ ecb->conn = NULL;
+ ecb->buf_start = buffer_address;
+ ecb->buf_end = buffer_address + buffer_size;
+ ecb->free_buffer = (nw_unused_buffer_t) buffer_address;
+ ecb->free_buffer->buf_used = 0;
+ ecb->free_buffer->buf_length = buffer_size;
+ ecb->free_buffer->previous = NULL;
+ ecb->free_buffer->next = NULL;
+ ecb->overrun = 0;
+ ecb->seqno = 0;
+ ecb->tx_first = NULL;
+ ecb->tx_last = NULL;
+ ecb->tx_initial = NULL;
+ ecb->tx_current = NULL;
+ ecb->rx_first = NULL;
+ ecb->rx_last = NULL;
+ rc = NW_SUCCESS;
+ }
+ return rc;
+}
+
+nw_result nc_endpoint_deallocate(nw_ep ep) {
+ nw_ecb_t ecb;
+ nw_rx_header_t rx_header;
+
+ ecb = &ect[ep];
+ if (ecb->conn != NULL)
+ nc_peer_deallocate(ecb->conn);
+ if (ecb->tx_first != NULL)
+ nc_tx_header_deallocate(ecb->tx_first);
+ if (ecb->tx_initial != NULL)
+ nc_tx_header_deallocate(ecb->tx_initial);
+ while (ecb->rx_first != NULL) {
+ rx_header = ecb->rx_first;
+ ecb->rx_first = rx_header->next;
+ nc_rx_header_deallocate(rx_header);
+ }
+ if (ecb->protocol == NW_LINE) {
+ if (ecb->previous == 0)
+ nw_free_line_first = ecb->next;
+ else
+ ect[ecb->previous].next = ecb->next;
+ if (ecb->next == 0)
+ nw_free_line_last = ecb->previous;
+ else
+ ect[ecb->next].previous = ecb->previous;
+ }
+ ecb->next = 0;
+ ecb->previous = nw_free_ep_last;
+ if (nw_free_ep_last == 0)
+ nw_free_ep_first = ep;
+ else
+ ect[nw_free_ep_last].next = ep;
+ nw_free_ep_last = ep;
+ ecb->id = ep;
+ ecb->state = NW_INEXISTENT;
+ return NW_SUCCESS;
+}
+
+void nc_buffer_coalesce(nw_ecb_t ecb) {
+ nw_unused_buffer_t p, q, buf_free, buf_start, buf_end;
+
+ buf_start = p = (nw_unused_buffer_t) ecb->buf_start;
+ buf_end = (nw_unused_buffer_t) ecb->buf_end;
+ buf_free = NULL;
+ while (p >= buf_start && p < buf_end) {
+ if (p->buf_length & 0x3)
+ goto trash_area;
+ if (p->buf_used) {
+ p = (nw_unused_buffer_t) ((char *) p + p->buf_length);
+ } else {
+ q = (nw_unused_buffer_t) ((char *) p + p->buf_length);
+ while (q >= buf_start && q < buf_end && !q->buf_used) {
+ if (q->buf_length & 0x3)
+ goto trash_area;
+ p->buf_length += q->buf_length;
+ q = (nw_unused_buffer_t) ((char *) q + q->buf_length);
+ }
+ p->next = buf_free;
+ p->previous = NULL;
+ if (buf_free != NULL)
+ buf_free->previous = p;
+ buf_free = p;
+ p = q;
+ }
+ }
+ ecb->free_buffer = buf_free;
+ return;
+
+ trash_area:
+ ecb->free_buffer = NULL;
+ return;
+}
+
+
+nw_buffer_t nc_buffer_allocate(nw_ep ep, u_int size) {
+ nw_ecb_t ecb;
+ nw_unused_buffer_t buf, buf_start, buf_end;
+
+ ecb = &ect[ep];
+ buf_start = (nw_unused_buffer_t) ecb->buf_start;
+ buf_end = (nw_unused_buffer_t) (ecb->buf_end - sizeof(nw_buffer_s));
+ if (size < sizeof(nw_buffer_s))
+ size = sizeof(nw_buffer_s);
+ else
+ size = ((size + 3) >> 2) << 2;
+ buf = ecb->free_buffer;
+ if (buf != NULL) {
+ while (buf->buf_length < size) {
+ buf = buf->next;
+ if (buf < buf_start || buf > buf_end || ((int) buf & 0x3)) {
+ buf = NULL;
+ break;
+ }
+ }
+ }
+ if (buf == NULL) {
+ nc_buffer_coalesce(ecb);
+ buf = ecb->free_buffer;
+ while (buf != NULL && buf->buf_length < size)
+ buf = buf->next;
+ }
+ if (buf == NULL) {
+ ecb->overrun = 1;
+ } else {
+ if (buf->buf_length < size + sizeof(nw_buffer_s)) {
+ if (buf->previous == NULL)
+ ecb->free_buffer = buf->next;
+ else
+ buf->previous->next = buf->next;
+ if (buf->next != NULL)
+ buf->next->previous = buf->previous;
+ } else {
+ buf->buf_length -= size;
+ buf = (nw_unused_buffer_t) ((char *) buf + buf->buf_length);
+ buf->buf_length = size;
+ }
+ buf->buf_used = 1;
+ }
+ return (nw_buffer_t) buf;
+}
+
+nw_result nc_buffer_deallocate(nw_ep ep, nw_buffer_t buffer) {
+ nw_ecb_t ecb;
+ nw_unused_buffer_t buf;
+
+ ecb = &ect[ep];
+ buf = (nw_unused_buffer_t) buffer;
+ buf->buf_used = 0;
+ buf->previous = NULL;
+ buf->next = ecb->free_buffer;
+ if (ecb->free_buffer != NULL)
+ ecb->free_buffer->previous = buf;
+ ecb->free_buffer = buf;
+ return NW_SUCCESS;
+}
+
+nw_result nc_endpoint_status(nw_ep ep, nw_state_t state, nw_peer_t peer) {
+ nw_result rc;
+ nw_ecb_t ecb;
+
+ ecb = &ect[ep];
+ *state = ecb->state;
+ if (ecb->conn)
+ *peer = ecb->conn->peer;
+ if (ecb->overrun) {
+ ecb->overrun = 0;
+ rc = NW_OVERRUN;
+ } else if (ecb->rx_first != NULL) {
+ rc = NW_QUEUED;
+ } else {
+ rc = NW_SUCCESS;
+ }
+ return rc;
+}
+
+
+nw_result nc_local_send(nw_ep ep, nw_tx_header_t header, nw_options options) {
+ nw_result rc;
+ nw_ep receiver;
+ int length;
+ nw_buffer_t buffer;
+ nw_tx_header_t first_header;
+ nw_rx_header_t rx_header;
+ char *bufp;
+ nw_ecb_t ecb;
+
+ receiver = header->peer.remote_ep;
+ length = header->msg_length;
+ buffer = nc_buffer_allocate(receiver, sizeof(nw_buffer_s) + length);
+ if (buffer == NULL) {
+ rc = NW_OVERRUN;
+ } else {
+ buffer->buf_next = NULL;
+ buffer->block_offset = sizeof(nw_buffer_s);
+ buffer->block_length = length;
+ buffer->peer.rem_addr_1 = NW_NULL << 28;
+ buffer->peer.rem_addr_2 = 0;
+ buffer->peer.remote_ep = ep;
+ buffer->peer.local_ep = receiver;
+ bufp = (char *) buffer + sizeof(nw_buffer_s);
+ first_header = header;
+ while (header != NULL) {
+ length = header->block_length;
+ bcopy(header->block, bufp, length);
+ bufp += length;
+ if (header->buffer != NULL)
+ nc_buffer_deallocate(ep, header->buffer);
+ header = header->next;
+ }
+ nc_tx_header_deallocate(first_header);
+ ecb = &ect[receiver];
+ if (options == NW_URGENT) {
+ buffer->msg_seqno = 0;
+ if (nc_deliver_result(receiver, NW_RECEIVE_URGENT, (int) buffer))
+ rc = NW_SUCCESS;
+ else
+ rc = NW_NO_RESOURCES;
+ } else {
+ if (ecb->seqno == 1023)
+ buffer->msg_seqno = ecb->seqno = 1;
+ else
+ buffer->msg_seqno = ++ecb->seqno;
+ if (nc_deliver_result(receiver, NW_RECEIVE, (int) buffer))
+ rc = NW_SUCCESS;
+ else
+ rc = NW_NO_RESOURCES;
+ }
+ }
+ return rc;
+}
+
+nw_buffer_t nc_local_rpc(nw_ep ep, nw_tx_header_t header, nw_options options) {
+ nw_buffer_t buf;
+ nw_ecb_t ecb;
+ nw_rx_header_t rx_header;
+
+ ecb = &ect[ep];
+ rx_header = ecb->rx_first;
+ if (nc_local_send(ep, header, options) != NW_SUCCESS) {
+ buf = NW_BUFFER_ERROR;
+ } else if (rx_header == NULL) {
+ buf = NULL;
+ } else {
+ buf = rx_header->buffer;
+ ecb->rx_first = rx_header->next;
+ if (ecb->rx_first == NULL)
+ ecb->rx_last = NULL;
+ nc_rx_header_deallocate(rx_header);
+ }
+ return buf;
+}
+
+
+nw_result nc_succeed(int dev) {
+
+ return NW_SUCCESS;
+}
+
+void nc_null(int dev) {
+
+}
+
+nw_result nc_fail(int dev) {
+
+ return NW_FAILURE;
+}
+
+int nc_null_poll(int dev) {
+
+ return 1000000;
+}
+
+nw_result nc_null_send(nw_ep ep, nw_tx_header_t header, nw_options options) {
+
+ return NW_FAILURE;
+}
+
+nw_buffer_t nc_null_rpc(nw_ep ep, nw_tx_header_t header, nw_options options) {
+
+ return NW_BUFFER_ERROR;
+}
+
+void nc_null_signal(nw_buffer_t msg) {
+
+}
+
+nw_result nc_open_fail(nw_ep lep, nw_address_1 a1,
+ nw_address_2 a2, nw_ep rep) {
+
+ return NW_FAILURE;
+}
+
+nw_result nc_close_fail(nw_ep ep) {
+
+ return NW_FAILURE;
+}
+
+nw_result nc_accept_fail(nw_ep ep, nw_buffer_t msg, nw_ep_t epp) {
+
+ return NW_FAILURE;
+}
+
diff --git a/chips/nc.h b/chips/nc.h
new file mode 100644
index 00000000..0e481e8c
--- /dev/null
+++ b/chips/nc.h
@@ -0,0 +1,232 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*** NETWORK INTERFACE IMPLEMENTATION CORE ***/
+
+#ifndef _NC_H_
+#define _NC_H_
+
+#ifndef STUB
+#include <chips/nw.h>
+#else
+#include "nw.h"
+#endif
+
+/*** Types and data structures ***/
+
+#if PRODUCTION
+#define MAX_EP 1024
+#define MAX_DEV 16
+#else
+#define MAX_EP 32
+#define MAX_DEV 3
+#endif
+
+#define MASTER_LINE_EP 0
+#define SIGNAL_EP 1
+
+typedef struct nw_tx_headers {
+ nw_buffer_t buffer;
+ u_int msg_length;
+ char *block;
+ u_int block_length;
+ nw_peer_s peer;
+ nw_ep sender;
+ nw_options options;
+ struct nw_tx_headers *next;
+} nw_tx_header_s;
+
+typedef nw_tx_header_s *nw_tx_header_t;
+
+typedef struct nw_rx_headers {
+ nw_buffer_t buffer;
+ nw_ep receiver;
+ u_int reply;
+ int time_stamp;
+ struct nw_rx_headers *next;
+} nw_rx_header_s, *nw_rx_header_t;
+
+typedef enum {
+ NW_CONNECTIONLESS,
+ NW_CONNECTION_ORIENTED
+} nw_dev_type;
+
+
+typedef struct {
+ nw_result (*initialize)(int);
+ nw_result (*status)(int);
+ void (*slow_sweep)(int);
+ void (*fast_sweep)(int);
+ int (*poll)(int);
+ nw_result (*send)(nw_ep, nw_tx_header_t, nw_options);
+ nw_buffer_t (*rpc)(nw_ep, nw_tx_header_t, nw_options);
+ void (*signal)(nw_buffer_t);
+ nw_result (*open)(nw_ep, nw_address_1, nw_address_2, nw_ep);
+ nw_result (*accept)(nw_ep, nw_buffer_t, nw_ep_t);
+ nw_result (*close)(nw_ep);
+ nw_result (*add)(nw_ep, nw_address_1, nw_address_2, nw_ep);
+ nw_result (*drop)(nw_ep, nw_address_1, nw_address_2, nw_ep);
+} nw_dev_entry_s, *nw_dev_entry_t;
+
+typedef struct {
+ nw_result status;
+ nw_dev_type type;
+ char *addr;
+ nw_address_1 local_addr_1;
+ nw_address_2 local_addr_2;
+ nw_dev_entry_t entry;
+ int fast_req;
+} nw_devcb;
+
+extern nw_devcb devct[MAX_DEV];
+
+typedef struct plists {
+ nw_peer_s peer;
+ struct plists *next;
+} nw_plist_s, *nw_plist_t;
+
+typedef struct nw_unused_buffers {
+ u_int buf_used:1;
+ u_int buf_length:31;
+ struct nw_unused_buffers *next;
+ struct nw_unused_buffers *previous;
+} nw_unused_buffer_s, *nw_unused_buffer_t;
+
+typedef struct ecbs{
+ nw_protocol protocol;
+ nw_acceptance accept;
+ nw_state state;
+ nw_plist_t conn;
+ char *buf_start;
+ char *buf_end;
+ nw_unused_buffer_t free_buffer;
+ nw_ep id:16;
+ u_int overrun:1;
+ u_int seqno:14;
+ nw_tx_header_t tx_first;
+ nw_tx_header_t tx_last;
+ nw_tx_header_t tx_initial;
+ nw_tx_header_t tx_current;
+ nw_rx_header_t rx_first;
+ nw_rx_header_t rx_last;
+ nw_ep next:16;
+ nw_ep previous:16;
+} nw_ecb, *nw_ecb_t;
+
+extern nw_ecb ect[MAX_EP];
+
+extern int nw_free_ep_first, nw_free_ep_last;
+extern int nw_free_line_first, nw_free_line_last;
+
+typedef enum {
+ NW_RECEIVE,
+ NW_RECEIVE_URGENT,
+ NW_SEND,
+ NW_SIGNAL
+} nw_delivery;
+
+
+/*** System-independent functions implemented in core ***/
+
+extern void nc_initialize();
+
+extern nw_tx_header_t nc_tx_header_allocate();
+
+extern void nc_tx_header_deallocate(nw_tx_header_t header);
+
+extern nw_rx_header_t nc_rx_header_allocate();
+
+extern void nc_rx_header_deallocate(nw_rx_header_t header);
+
+extern nw_plist_t nc_peer_allocate();
+
+extern void nc_peer_deallocate(nw_plist_t peer);
+
+extern nw_result nc_device_register(u_int dev, nw_dev_type type,
+ char *dev_addr,
+ nw_dev_entry_t dev_entry_table);
+
+extern nw_result nc_device_unregister(u_int dev, nw_result status);
+
+extern void nc_fast_sweep();
+
+extern void nc_fast_timer_set();
+
+extern void nc_fast_timer_reset();
+
+extern void nc_slow_sweep();
+
+extern nw_result nc_update(nw_update_type up_type, int *up_info);
+
+extern nw_result nc_lookup(nw_lookup_type lt, int *look_info);
+
+extern nw_result nc_line_update(nw_peer_t peer, nw_ep line);
+
+extern nw_ep nc_line_lookup(nw_peer_t peer);
+
+extern nw_result nc_endpoint_allocate(nw_ep_t epp, nw_protocol protocol,
+ nw_acceptance accept,
+ char *buffer_address, u_int buffer_size);
+
+extern nw_result nc_endpoint_deallocate(nw_ep ep);
+
+extern nw_buffer_t nc_buffer_allocate(nw_ep ep, u_int size);
+
+extern nw_result nc_buffer_deallocate(nw_ep ep, nw_buffer_t buffer);
+
+extern nw_result nc_endpoint_status(nw_ep ep,
+ nw_state_t state, nw_peer_t peer);
+
+
+/* System-dependent function implemented in wrapper*/
+
+extern boolean_t nc_deliver_result(nw_ep ep, nw_delivery type, int result);
+
+/* Support required in wrapper */
+
+extern void h_initialize();
+
+extern void h_fast_timer_set();
+
+extern void h_fast_timer_reset();
+
+
+/* Stubs for device table */
+
+extern nw_result nc_succeed(int);
+extern nw_result nc_fail(int);
+extern void nc_null(int);
+extern int nc_null_poll(int);
+extern nw_result nc_null_send(nw_ep, nw_tx_header_t, nw_options);
+extern nw_buffer_t nc_null_rpc(nw_ep, nw_tx_header_t, nw_options);
+extern nw_result nc_local_send(nw_ep, nw_tx_header_t, nw_options);
+extern nw_buffer_t nc_local_rpc(nw_ep, nw_tx_header_t, nw_options);
+extern void nc_null_signal(nw_buffer_t);
+extern nw_result nc_open_fail(nw_ep, nw_address_1, nw_address_2, nw_ep);
+extern nw_result nc_accept_fail(nw_ep, nw_buffer_t, nw_ep_t);
+extern nw_result nc_close_fail(nw_ep);
+
+#endif /* _NC_H_ */
diff --git a/chips/nw.h b/chips/nw.h
new file mode 100644
index 00000000..63d497b6
--- /dev/null
+++ b/chips/nw.h
@@ -0,0 +1,494 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _NW_H_
+#define _NW_H_ 1
+
+#if defined(KMODE) || defined(KERNEL)
+#include <sys/types.h>
+#include <mach/port.h>
+#else
+#include "stub0.h"
+#endif
+
+/*** NETWORK APPLICATION PROGRAMMING INTERFACE ***/
+
+/*** TYPES ***/
+
+typedef enum {
+ NW_SUCCESS,
+ NW_FAILURE,
+ NW_BAD_ADDRESS,
+ NW_OVERRUN,
+ NW_NO_CARRIER,
+ NW_NOT_SERVER,
+ NW_NO_EP,
+ NW_BAD_EP,
+ NW_INVALID_ARGUMENT,
+ NW_NO_RESOURCES,
+ NW_PROT_VIOLATION,
+ NW_BAD_BUFFER,
+ NW_BAD_LENGTH,
+ NW_NO_REMOTE_EP,
+ NW_TIME_OUT,
+ NW_INCONSISTENCY,
+ NW_ABORTED,
+ NW_SYNCH,
+ NW_QUEUED
+} nw_result, *nw_result_t;
+
+typedef enum {
+ NW_INITIALIZE,
+ NW_HOST_ADDRESS_REGISTER,
+ NW_HOST_ADDRESS_UNREGISTER
+} nw_update_type;
+
+typedef enum {
+ NW_STATUS,
+ NW_HOST_ADDRESS_LOOKUP
+} nw_lookup_type;
+
+typedef u_int ip_address;
+
+typedef u_int nw_address_1;
+typedef u_int nw_address_2;
+
+typedef struct {
+ char name[20]; /*Host name -- first 19 characters, zero-terminated*/
+ ip_address ip_addr;
+ nw_address_1 nw_addr_1; /*4 most significant bits specify the device*/
+ nw_address_2 nw_addr_2;
+} nw_address_s, *nw_address_t;
+
+typedef enum {
+ NW_NULL,
+ NW_IP, /*Virtual network for IP addresses*/
+ NW_TCA100_1, /*Fore Systems ATM network, first unit*/
+ NW_TCA100_2 /*Fore Systems ATM network, second unit*/
+} nw_device, *nw_device_t;
+
+#define NW_DEVICE(addr) (addr >> 28)
+
+typedef u_int nw_ep, *nw_ep_t;
+
+typedef enum {
+ NW_RAW, /*Raw service provided by network*/
+ NW_DATAGRAM, /*Connectionless service*/
+ NW_SEQ_PACKET, /*Connection-oriented service*/
+ NW_LINE /*Multiplexing line (system use only)*/
+} nw_protocol;
+
+typedef enum {
+ NW_NO_ACCEPT, /*Connection requests not accepted (client)*/
+ NW_APPL_ACCEPT, /*Connection requests received as message by
+ application (msg_seqno 0), for examination
+ and approval (nw_connection_accept function)*/
+ NW_AUTO_ACCEPT, /*Connection requests automatically accepted
+ if endpoint is connection-oriented and
+ not already connected*/
+ NW_LINE_ACCEPT /*Connection requests automatically accepted
+ on a new endpoint (system use only)*/
+} nw_acceptance;
+
+typedef struct {
+ nw_address_1 rem_addr_1;
+ nw_address_2 rem_addr_2;
+ nw_ep remote_ep:16;
+ nw_ep local_ep:16;
+} nw_peer_s, *nw_peer_t;
+
+typedef struct nw_buffer {
+ u_int buf_used:1; /*Read-only for applications (always 1)*/
+ u_int buf_length:31; /*Read-only for applications*/
+ struct nw_buffer *buf_next; /*Used only to gather on sends*/
+ u_int msg_seqno:10; /*Sequential number of message,
+ automatically set by network interface*/
+ u_int block_offset:22; /*Offset to the beginning of data (in bytes),
+ from the start of the buffer*/
+ u_int block_deallocate:1; /*Used only to deallocate on sends*/
+ u_int block_length:31; /*Length of data (in bytes)
+ beginning at offset*/
+ nw_peer_s peer; /*Set on receives. Also required
+ in first block on datagram sends.
+ Ignored on sequenced packet sends.*/
+} nw_buffer_s, *nw_buffer_t;
+
+
+/* msg_seqno is normally between 1 and 1023, and increases modulo 1024
+ (skipping 0) between consecutive messages. In sequenced packets, msg_seqno
+ increases strictly by one. msg_seqno is assigned automatically.
+ The network interface writes in the buffer the msg_seqno used,
+ but only after the buffer has been transmitted and, in case of
+ sequenced packet, acknowledged. The application can use this update
+ to determine if a buffer can be reused, after a sending a message without
+ the deallocate option.
+ msg_seqno 0 is used when the corresponding send specifies the NW_URGENT
+ option. Such messages bypass any other messages possibly enqueued.
+ msg_seqno 0 is also used for open connection requests, in the case
+ of sequenced packet endpoints with the NW_APPL_ACCEPT option.
+ The type of msg_seqno 0 message is differentiated by the first word in the
+ message, which has type nw_options */
+
+#define NW_BUFFER_ERROR ((nw_buffer_t) -1) /*Used for error indication
+ other than buffer overrun
+ (for which NULL is used)*/
+
+typedef enum {
+ NW_INEXISTENT,
+ NW_UNCONNECTED,
+ NW_SIMPLEX_ORIGINATING,
+ NW_SIMPLEX_ORIGINATED,
+ NW_DUPLEX_ORIGINATING,
+ NW_DUPLEX_ORIGINATING_2,
+ NW_DUPLEX_ORIGINATED,
+ NW_ORIGINATOR_CLOSING,
+ NW_ORIGINATOR_RCLOSING,
+ NW_ACCEPTING,
+ NW_SIMPLEX_ACCEPTED,
+ NW_DUPLEX_ACCEPTING,
+ NW_DUPLEX_ACCEPTED,
+ NW_ACCEPTOR_CLOSING,
+ NW_ACCEPTOR_RCLOSING
+} nw_state, *nw_state_t;
+
+
+typedef enum nw_options {
+ NW_NORMAL,
+ NW_URGENT,
+ NW_SYNCHRONIZATION /*System use only*/
+} nw_options;
+
+
+/*** FUNCTIONS ***/
+
+extern nw_result nw_update(mach_port_t master_port, nw_update_type up_type,
+ int *up_info);
+
+/*****************************************************************************
+ Allows privileged applications to update network tables. The
+ application must present the device master port. up_type selects the
+ type of update, and up_info is cast accordingly to the correct type.
+
+ For NW_HOST_ADDRESS_REGISTER and NW_HOST_ADDRESS_UNREGISTER,
+ up_info has type nw_address_t. For NW_HOST_ADDRESS_UNREGISTER,
+ however, only the network address field is used.
+
+ up_info is not used for NW_INITIALIZE. This option is used to
+ initialize network interface tables, but does not initialize
+ devices. Initialization of hardware and network tables occurs
+ automatically at probe/boot time, so this option is normally
+ unnecessary.
+
+ Returns NW_SUCCESS if operation completed successfully.
+ NW_FAILURE if master port not presented.
+ NW_NO_RESOURCES if network tables full (NW_HOST_ADDRESS_REGISTER).
+ NW_BAD_ADDRESS if host not found (NW_HOST_ADDRESS_UNREGISTER).
+ NW_INVALID_ARGUMENT if up_type is invalid or up_info is
+ a bad pointer.
+ *****************************************************************************/
+
+
+extern nw_result nw_lookup(nw_lookup_type lt, int *look_info);
+
+/*****************************************************************************
+ Allows applications to lookup network tables. The type of
+ lookup is selected by lt, and look_info is cast to the correct type
+ accordingly.
+
+ For lt equal to NW_HOST_ADDRESS_LOOKUP, look_info has type
+ nw_address_t. In this option, the host is looked up first using the
+ IP address as a key (if non-zero), then by name (if non-empty),
+ and finally by network address (if non-zero). The function
+ returns NW_SUCCESS on the first match it finds, and sets the non-key
+ fields of look_info to the values found. No consistency check is
+ made if more than one key is supplied. The function returns
+ NW_BAD_ADDRESS if the host was not found, and NW_INVALID_ARGUMENT
+ if lt is invalid or look_info is a bad pointer.
+
+ For lt equal to NW_STATUS, look_info has type nw_device_t on input
+ and nw_result_t on output. The function returns NW_INVALID_ARGUMENT
+ if the device chosen is invalid or look_info is a bad pointer;
+ otherwise, the function returns NW_SUCCESS. look_info is
+ set to: NW_FAILURE if the device is not present, NW_NOT_SERVER
+ if the device is not serviced by this interface, or a
+ device-dependent value otherwise (NW_SUCCESS if there is no device error).
+
+ *****************************************************************************/
+
+
+extern nw_result nw_endpoint_allocate(nw_ep_t epp, nw_protocol protocol,
+ nw_acceptance accept, u_int buffer_size);
+
+/*****************************************************************************
+ Allocates a communication endpoint. On input, epp should point to the
+ the endpoint number desired, or to 0 if any number is adequate.
+ On output, epp points to the actual number allocated for the endpoint.
+ protocol specifies the transport discipline applied to data transmitted
+ or received through the endpoint. accept selects how open connection
+ requests received for the endpoint should be handled (connection-oriented
+ protocol), or whether the endpoint can receive messages (connectionless
+ protocol). buffer_size specifies the length in bytes of the buffer area
+ for data sent or received through the endpoint.
+
+ Returns NW_SUCCESS if endpoint successfully allocated.
+ NW_INVALID_ARGUMENT if epp is a bad pointer or the
+ protocol or accept arguments are invalid.
+ NW_NO_EP if the endpoint name space is exhausted.
+ NW_BAD_EP if there already is an endpoint with the
+ number selected, or the number selected is
+ out of bounds.
+ NW_NO_RESOURCES if not enough memory for buffer.
+ *****************************************************************************/
+
+
+extern nw_result nw_endpoint_deallocate(nw_ep ep);
+
+/*****************************************************************************
+ Deallocates the given endpoint.
+
+ Returns NW_SUCCESS if successfully deallocated endpoint.
+ NW_BAD_EP if endpoint does not exist.
+ NW_PROT_VIOLATION if access to endpoint not authorized.
+ *****************************************************************************/
+
+
+extern nw_buffer_t nw_buffer_allocate(nw_ep ep, u_int size);
+
+/*****************************************************************************
+ Allocates a buffer of the given size (in bytes) from the buffer area
+ of the given endpoint.
+
+ Returns NW_BUFFER_ERROR if endpoint does not exist or access to endpoint
+ is not authorized.
+ NULL if no buffer with given size could be allocated.
+ Pointer to allocated buffer, otherwise.
+ *****************************************************************************/
+
+
+extern nw_result nw_buffer_deallocate(nw_ep ep, nw_buffer_t buffer);
+
+/*****************************************************************************
+ Deallocates the given buffer.
+
+ Returns NW_SUCCESS if successfully deallocated buffer.
+ NW_BAD_EP if endpoint does not exist.
+ NW_PROT_VIOLATION if access to endpoint not authorized.
+ NW_BAD_BUFFER if buffer does not belong to endpoint's
+ buffer area or is malformed.
+ *****************************************************************************/
+
+
+extern nw_result nw_connection_open(nw_ep local_ep, nw_address_1 rem_addr_1,
+ nw_address_2 rem_addr_2, nw_ep remote_ep);
+
+/*****************************************************************************
+ Opens a connection.
+
+ Returns NW_SUCCESS if connection successfully opened.
+ NW_BAD_EP if local endpoint does not exist, uses connectionless
+ protocol or is already connected.
+ NW_PROT_VIOLATION if access to local or remote endpoint
+ not authorized.
+ NW_BAD_ADDRESS if address of remote host is invalid.
+ NW_NO_REMOTE_EP if connection name space exhausted at
+ remote host.
+ NW_TIME_OUT if attempt to open connection timed out repeatedly.
+ NW_FAILURE if remote endpoint does not exist, uses connectionless
+ protocol or is already connected, or if remote
+ application did not accept open request.
+ *****************************************************************************/
+
+
+extern nw_result nw_connection_accept(nw_ep ep, nw_buffer_t msg,
+ nw_ep_t new_epp);
+
+/*****************************************************************************
+ Accepts open request (at the remote host). On input, new_epp equal to
+ NULL indicates that the application does not accept the request.
+ new_epp pointing to the value 0 indicates that the application wants
+ to accept the connection on a new endpoint, created dynamically,
+ with the same attributes as the original endpoint; new_epp pointing
+ to the value ep indicates that the application wants to simply
+ accept the open request. On output, new_epp points to the endpoint
+ actually connected, if any. msg points to the open request, which is
+ automatically deallocated.
+
+ Returns NW_SUCCESS if connection correctly accepted or refused.
+ NW_BAD_EP if endpoint does not exist or has no outstanding
+ open request.
+ NW_PROT_VIOLATION if access to endpoint not authorized.
+ NW_BAD_BUFFER if msg does not belong to the endpoint's
+ buffer area, or is malformed.
+ NW_INVALID_ARGUMENT if new_epp is a bad pointer or points to
+ invalid value.
+ NW_NO_EP if endpoint name space exhausted.
+ NW_NO_RESOURCES if no buffer available for new endpoint.
+ NW_TIME_OUT if attempt to accept at different endpoint
+ repeatedly timed out.
+ *****************************************************************************/
+
+
+extern nw_result nw_connection_close(nw_ep ep);
+
+/*****************************************************************************
+ Closes the endpoint's connection.
+
+ Returns NW_SUCCESS if successfully closed connection.
+ NW_BAD_EP if endpoint does not exist or is not connected.
+ NW_PROT_VIOLATION if access to endpoint not authorized.
+ *****************************************************************************/
+
+
+extern nw_result nw_multicast_add(nw_ep local_ep, nw_address_1 rem_addr_1,
+ nw_address_2 rem_addr_2, nw_ep remote_ep);
+
+/*****************************************************************************
+ Open multicast group or add one more member to multicast group.
+
+ Returns NW_SUCCESS if successfully opened multicast group
+ or added member.
+ NW_BAD_EP if local endpoint does not exist, uses connectionless
+ protocol or is already connected point-to-point.
+ NW_PROT_VIOLATION if access to local or remote endpoint
+ not authorized.
+ NW_BAD_ADDRESS if address of remote host is invalid.
+ NW_NO_REMOTE_EP if connection name space exhausted at
+ remote host.
+ NW_TIME_OUT if attempt to open or add to multicast
+ timed out repeatedly.
+ NW_FAILURE if remote endpoint does not exist, uses connectionless
+ protocol or is already connected, or if remote
+ application did not accept open or add request.
+ *****************************************************************************/
+
+
+extern nw_result nw_multicast_drop(nw_ep local_ep, nw_address_1 rem_addr_1,
+ nw_address_2 rem_addr_2, nw_ep remote_ep);
+
+/*****************************************************************************
+ Drop member from multicast group, or close group if last member.
+
+ Returns NW_SUCCESS if successfully dropped member or closed group.
+ NW_BAD_EP if local endpoint does not exist or is not connected in
+ multicast to the given remote endpoint.
+ NW_PROT_VIOLATION if access to local endpoint not authorized.
+ *****************************************************************************/
+
+
+extern nw_result nw_endpoint_status(nw_ep ep, nw_state_t state,
+ nw_peer_t peer);
+
+/*****************************************************************************
+ Returns the state of the given endpoint and peer to which it is
+ connected, if any. In case of multicast group, the first peer is
+ returned.
+
+ Returns NW_SUCCESS if status correctly returned.
+ NW_BAD_EP if endpoint does not exist.
+ NW_PROT_VIOLATION if access to endpoint not authorized.
+ NW_INVALID_ARGUMENT if state or peer is a bad pointer.
+ *****************************************************************************/
+
+
+extern nw_result nw_send(nw_ep ep, nw_buffer_t msg, nw_options options);
+
+/*****************************************************************************
+ Sends message through endpoint with the given options.
+
+ Returns NW_SUCCESS if message successfully queued for sending
+ (connectionless protocol) or sent and acknowledged
+ (connection-oriented protocol).
+ NW_BAD_EP if endpoint does not exist or uses connection-oriented
+ protocol but is unconnected.
+ NW_PROT_VIOLATION if access to endpoint not authorized.
+ NW_BAD_BUFFER if msg (or one of the buffers linked by buf_next)
+ is not a buffer in the endpoint's buffer area, or
+ is malformed (e.g., block_length extends beyond
+ end of buffer).
+ NW_NO_RESOURCES if unable to queue message due to resource
+ exhaustion.
+ NW_BAD_LENGTH if the total message length is too long for the
+ network and protocol used.
+ NW_BAD_ADDRESS if address of remote host is invalid
+ (connectionless protocol).
+ NW_FAILURE if repeated errors in message transmission
+ (connection-oriented).
+ NW_TIME_OUT if repeated time-outs in message transmission
+ (connection-oriented).
+ NW_OVERRUN if no buffer available in receiver's buffer area.
+ (connection-oriented).
+ *****************************************************************************/
+
+
+extern nw_buffer_t nw_receive(nw_ep ep, int time_out);
+
+/*****************************************************************************
+ Receive message destined to endpoint. Return if request not
+ satisfied within time_out msec. time_out 0 means non-blocking receive,
+ while -1 means block indefinitely.
+
+ Returns NW_BUFFER_ERROR if endpoint does not exist or access
+ to endpoint is not authorized.
+ NULL if no message available for reception within the
+ specified time-out period.
+ Pointer to message, otherwise.
+ *****************************************************************************/
+
+
+extern nw_buffer_t nw_rpc(nw_ep ep, nw_buffer_t send_msg, nw_options options,
+ int time_out);
+
+/*****************************************************************************
+ Send message through given endpoint with given options and then
+ receive message through the same endpoint. Receive waiting time
+ is limited to time_out msec.
+
+ Returns NW_BUFFER_ERROR if endpoint does not exist, access to
+ endpoint is not authorized, or there was
+ some transmission error.
+ NULL if no message available for reception within the
+ specified time-out period.
+ Pointer to message received, otherwise.
+ *****************************************************************************/
+
+
+extern nw_buffer_t nw_select(u_int nep, nw_ep_t epp, int time_out);
+
+/*****************************************************************************
+ Receive message from one of the nep endpoints in the array epp.
+ Waiting time is limited to time_out msec.
+
+ Returns NW_BUFFER_ERROR if epp does not point to a valid array of nep
+ endpoint numbers, one of the endpoints does
+ not exist or has restricted access or the request
+ could not be correctly queued because of resource
+ exhaustion.
+ NULL if no message arrived within the specified time-out period.
+ Pointer to message received, otherwise.
+ *****************************************************************************/
+
+
+#endif /* _NW_H_ */
diff --git a/chips/nw_mk.c b/chips/nw_mk.c
new file mode 100644
index 00000000..067cf7d0
--- /dev/null
+++ b/chips/nw_mk.c
@@ -0,0 +1,1323 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Scienctxe
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*** MACH KERNEL WRAPPER ***/
+
+#ifndef STUB
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/sched_prim.h>
+#include <kern/eventcount.h>
+#include <kern/time_out.h>
+#include <machine/machspl.h> /* spl definitions */
+#include <vm/vm_kern.h>
+#include <chips/nc.h>
+#include <chips/nw_mk.h>
+
+decl_simple_lock_data(, nw_simple_lock);
+u_int previous_spl;
+
+#define nw_lock() \
+ previous_spl = splimp(); \
+ simple_lock(&nw_simple_lock)
+
+#define nw_unlock() \
+ simple_unlock(&nw_simple_lock); \
+ splx(previous_spl)
+
+typedef struct nw_pvs {
+ task_t owner;
+ char *buf_start;
+ char *buf_end;
+ struct nw_pvs *next;
+} nw_pv_s, *nw_pv_t;
+
+typedef struct nw_waiters {
+ thread_t waiter;
+ struct nw_waiters *next;
+} nw_waiter_s, *nw_waiter_t;
+
+typedef struct {
+ nw_pv_t pv;
+ thread_t sig_waiter;
+ nw_waiter_t rx_first;
+ nw_waiter_t rx_last;
+ nw_waiter_t tx_first;
+ nw_waiter_t tx_last;
+} nw_hecb, *nw_hecb_t;
+
+#else
+#include "nc.h"
+#include "nw_mk.h"
+#endif
+
+/*** Types and data structures ***/
+
+int h_initialized = FALSE;
+nw_pv_s nw_pv[2*MAX_EP];
+nw_pv_t nw_free_pv;
+nw_waiter_s nw_waiter[2*MAX_EP];
+nw_waiter_t nw_free_waiter;
+nw_ep_owned_s nw_waited[3*MAX_EP];
+nw_ep_owned_t nw_free_waited;
+nw_hecb hect[MAX_EP];
+timer_elt_data_t nw_fast_timer, nw_slow_timer;
+
+/*** Initialization ***/
+
+void h_initialize() {
+ int ep, last_ep;
+
+ if (!h_initialized) {
+ last_ep = sizeof(nw_pv)/sizeof(nw_pv_s) - 1;
+ for (ep = 0; ep < last_ep; ep++) {
+ nw_pv[ep].next = &nw_pv[ep+1];
+ }
+ nw_pv[last_ep].next = NULL;
+ nw_free_pv = &nw_pv[0];
+ last_ep = sizeof(nw_waiter)/sizeof(nw_waiter_s) - 1;
+ for (ep = 0; ep < last_ep; ep++) {
+ nw_waiter[ep].next = &nw_waiter[ep+1];
+ }
+ nw_waiter[last_ep].next = NULL;
+ nw_free_waiter = &nw_waiter[0];
+ last_ep = sizeof(nw_waited)/sizeof(nw_ep_owned_s) - 1;
+ for (ep = 0; ep < last_ep; ep++) {
+ nw_waited[ep].next = &nw_waited[ep+1];
+ }
+ nw_waited[last_ep].next = NULL;
+ nw_free_waited = &nw_waited[0];
+ last_ep = sizeof(hect)/sizeof(nw_hecb);
+ for (ep = 0; ep < last_ep; ep++) {
+ hect[ep].pv = NULL;
+ hect[ep].sig_waiter = NULL;
+ hect[ep].rx_first = NULL;
+ hect[ep].rx_last = NULL;
+ hect[ep].tx_first = NULL;
+ hect[ep].tx_last = NULL;
+ }
+ nw_fast_timer.fcn = mk_fast_sweep;
+ nw_fast_timer.param = NULL;
+ nw_fast_timer.set = TELT_UNSET;
+ nw_slow_timer.fcn = mk_slow_sweep;
+ nw_slow_timer.param = NULL;
+#if PRODUCTION
+ set_timeout(&nw_slow_timer, 2*hz);
+#endif
+ h_initialized = TRUE;
+ }
+}
+
+/*** User-trappable functions ***/
+
+nw_result mk_update(mach_port_t master_port, nw_update_type up_type,
+ int *up_info) {
+ nw_result rc;
+
+ if (master_port == 0) { /* XXX */
+ rc = NW_FAILURE;
+ } else {
+ nw_lock();
+ switch (up_type) {
+ case NW_HOST_ADDRESS_REGISTER:
+ case NW_HOST_ADDRESS_UNREGISTER:
+ if (invalid_user_access(current_task()->map, (vm_offset_t) up_info,
+ (vm_offset_t) up_info + sizeof(nw_address_s) - 1,
+ VM_PROT_READ | VM_PROT_WRITE)) {
+ rc = NW_INVALID_ARGUMENT;
+ } else {
+ rc = nc_update(up_type, up_info);
+ }
+ break;
+ case NW_INITIALIZE:
+ nc_initialize();
+ rc = NW_SUCCESS;
+ break;
+ default:
+ rc = NW_INVALID_ARGUMENT;
+ }
+ nw_unlock();
+ }
+ return rc;
+}
+
+
+
+nw_result mk_lookup(nw_lookup_type lt, int *look_info) {
+ nw_result rc;
+ int max_size, dev;
+
+ nw_lock();
+ switch (lt) {
+ case NW_HOST_ADDRESS_LOOKUP:
+ if (invalid_user_access(current_task()->map, (vm_offset_t) look_info,
+ (vm_offset_t) look_info + sizeof(nw_address_s) - 1,
+ VM_PROT_READ | VM_PROT_WRITE)) {
+ rc = NW_INVALID_ARGUMENT;
+ } else {
+ rc = nc_lookup(lt, look_info);
+ }
+ break;
+ case NW_STATUS:
+ max_size = sizeof(nw_device);
+ if (max_size < sizeof(nw_result))
+ max_size = sizeof(nw_result);
+ if (invalid_user_access(current_task()->map, (vm_offset_t) look_info,
+ (vm_offset_t) look_info + max_size - 1,
+ VM_PROT_READ | VM_PROT_WRITE) ||
+ (dev = look_info[0]) >= MAX_DEV || dev < 0) {
+ rc = NW_INVALID_ARGUMENT;
+ } else {
+ if (devct[dev].status != NW_SUCCESS) {
+ look_info[0] = (int) devct[dev].status;
+ rc = NW_SUCCESS;
+ } else {
+ rc = (*(devct[dev].entry->status)) (dev);
+ }
+ }
+ break;
+ default:
+ rc = NW_INVALID_ARGUMENT;
+ }
+ nw_unlock();
+ return rc;
+}
+
+
+nw_result mk_endpoint_allocate_internal(nw_ep_t epp, nw_protocol protocol,
+ nw_acceptance accept,
+ u_int buffer_size, boolean_t system) {
+ nw_result rc;
+ u_int ep;
+ vm_offset_t kernel_addr, user_addr;
+ nw_pv_t pv;
+ nw_ep_owned_t owned;
+
+ ep = *epp;
+ if (buffer_size == 0)
+ buffer_size = 0x1000;
+ else
+ buffer_size = (buffer_size + 0xfff) & ~0xfff;
+ nw_lock();
+ if (ep >= MAX_EP || (pv = hect[ep].pv) != NULL) {
+ rc = NW_BAD_EP;
+ } else if (nw_free_pv == NULL || nw_free_waited == NULL) {
+ rc = NW_NO_EP;
+ } else if (projected_buffer_allocate(current_task()->map, buffer_size, 0,
+ &kernel_addr, &user_addr,
+ VM_PROT_READ | VM_PROT_WRITE,
+ VM_INHERIT_NONE) != KERN_SUCCESS) {
+ rc = NW_NO_RESOURCES;
+ } else {
+ rc = nc_endpoint_allocate(epp, protocol, accept,
+ (char *) kernel_addr, buffer_size);
+ if (rc == NW_NO_EP && (ep = *epp) != 0) {
+ rc = (*(devct[NW_DEVICE(ect[ep].conn->peer.rem_addr_1)].entry->
+ close)) (ep);
+ if (rc == NW_SYNCH) {
+ hect[ep].sig_waiter = current_thread();
+ assert_wait(0, TRUE);
+ simple_unlock(&nw_simple_lock);
+ thread_block((void (*)()) 0);
+ }
+ rc = nc_endpoint_deallocate(ep);
+ if (rc == NW_SUCCESS) {
+ nc_line_update(&ect[ep].conn->peer, 0);
+ rc = nc_endpoint_allocate(epp, protocol, accept,
+ (char *) kernel_addr, buffer_size);
+ }
+ }
+ if (rc == NW_SUCCESS) {
+ ep = *epp;
+ if (system) {
+ hect[ep].pv = NULL;
+ } else {
+ hect[ep].pv = nw_free_pv;
+ nw_free_pv = nw_free_pv->next;
+ hect[ep].pv->owner = current_task();
+ hect[ep].pv->buf_start = (char *) user_addr;
+ hect[ep].pv->buf_end = (char *) user_addr + buffer_size;
+ hect[ep].pv->next = NULL;
+ }
+ hect[ep].sig_waiter = NULL;
+ hect[ep].rx_first = NULL;
+ hect[ep].rx_last = NULL;
+ hect[ep].tx_first = NULL;
+ hect[ep].tx_last = NULL;
+ owned = nw_free_waited;
+ nw_free_waited = nw_free_waited->next;
+ owned->ep = ep;
+ owned->next = current_task()->nw_ep_owned;
+ current_task()->nw_ep_owned = owned;
+ } else {
+ projected_buffer_deallocate(current_task()->map, user_addr,
+ user_addr + buffer_size);
+ }
+ }
+ nw_unlock();
+ return rc;
+}
+
+
+nw_result mk_endpoint_allocate(nw_ep_t epp, nw_protocol protocol,
+ nw_acceptance accept, u_int buffer_size) {
+ nw_result rc;
+
+ if (invalid_user_access(current_task()->map, (vm_offset_t) epp,
+ (vm_offset_t) epp + sizeof(nw_ep) - 1,
+ VM_PROT_READ | VM_PROT_WRITE) ||
+ (protocol != NW_RAW && protocol != NW_DATAGRAM &&
+ protocol != NW_SEQ_PACKET) || (accept != NW_NO_ACCEPT &&
+ accept != NW_APPL_ACCEPT && accept != NW_AUTO_ACCEPT)) {
+ rc = NW_INVALID_ARGUMENT;
+ } else {
+ rc = mk_endpoint_allocate_internal(epp, protocol, accept,
+ buffer_size, FALSE);
+ }
+ return rc;
+}
+
+nw_result mk_endpoint_deallocate_internal(nw_ep ep, task_t task,
+ boolean_t shutdown) {
+ nw_result rc;
+ nw_pv_t pv, pv_previous;
+ nw_ep_owned_t owned, owned_previous;
+ nw_waiter_t w, w_previous, w_next;
+
+ nw_lock();
+ if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
+ rc = NW_BAD_EP;
+ } else {
+ pv_previous = NULL;
+ while (pv != NULL && pv->owner != task) {
+ pv_previous = pv;
+ pv = pv->next;
+ }
+ if (pv == NULL) {
+ rc = NW_PROT_VIOLATION;
+ } else {
+ if (projected_buffer_deallocate(task->map, pv->buf_start,
+ pv->buf_end) != KERN_SUCCESS) {
+ rc = NW_INCONSISTENCY;
+ printf("Endpoint deallocate: inconsistency p. buffer\n");
+ } else {
+ if (pv_previous == NULL)
+ hect[ep].pv = pv->next;
+ else
+ pv_previous->next = pv->next;
+ pv->next = nw_free_pv;
+ nw_free_pv = pv;
+ owned = task->nw_ep_owned;
+ owned_previous = NULL;
+ while (owned != NULL && owned->ep != ep) {
+ owned_previous = owned;
+ owned = owned->next;
+ }
+ if (owned == NULL) {
+ rc = NW_INCONSISTENCY;
+ printf("Endpoint deallocate: inconsistency owned\n");
+ } else {
+ if (owned_previous == NULL)
+ task->nw_ep_owned = owned->next;
+ else
+ owned_previous->next = owned->next;
+ owned->next = nw_free_waited;
+ nw_free_waited = owned;
+ if (hect[ep].sig_waiter != NULL &&
+ hect[ep].sig_waiter->task == task) {
+/* if (!shutdown)*/
+ mk_deliver_result(hect[ep].sig_waiter, NW_ABORTED);
+ hect[ep].sig_waiter = NULL;
+ }
+ w = hect[ep].rx_first;
+ w_previous = NULL;
+ while (w != NULL) {
+ if (w->waiter->task == task) {
+/* if (!shutdown)*/
+ mk_deliver_result(w->waiter, NULL);
+ w_next = w->next;
+ if (w_previous == NULL)
+ hect[ep].rx_first = w_next;
+ else
+ w_previous->next = w_next;
+ w->next = nw_free_waiter;
+ nw_free_waiter = w;
+ w = w_next;
+ } else {
+ w_previous = w;
+ w = w->next;
+ }
+ }
+ if (hect[ep].rx_first == NULL)
+ hect[ep].rx_last = NULL;
+ w = hect[ep].tx_first;
+ w_previous = NULL;
+ while (w != NULL) {
+ if (w->waiter->task == task) {
+/* if (!shutdown)*/
+ mk_deliver_result(w->waiter, NW_ABORTED);
+ w_next = w->next;
+ if (w_previous == NULL)
+ hect[ep].tx_first = w_next;
+ else
+ w_previous->next = w_next;
+ w->next = nw_free_waiter;
+ nw_free_waiter = w;
+ w = w_next;
+ } else {
+ w_previous = w;
+ w = w->next;
+ }
+ }
+ if (hect[ep].tx_first == NULL)
+ hect[ep].tx_last = NULL;
+ if (hect[ep].pv == NULL) {
+ if (ect[ep].state != NW_UNCONNECTED) {
+ rc = (*(devct[NW_DEVICE(ect[ep].conn->peer.rem_addr_1)].entry->
+ close)) (ep);
+ if (rc == NW_SYNCH) {
+ hect[ep].sig_waiter = current_thread();
+ assert_wait(0, TRUE);
+ simple_unlock(&nw_simple_lock);
+ thread_block((void (*)()) 0);
+ }
+ }
+ rc = nc_endpoint_deallocate(ep);
+ }
+ }
+ }
+ }
+ }
+ nw_unlock();
+ return rc;
+}
+
+nw_result mk_endpoint_deallocate(nw_ep ep) {
+
+ mk_endpoint_deallocate_internal(ep, current_task(), FALSE);
+}
+
+
+nw_buffer_t mk_buffer_allocate(nw_ep ep, u_int size) {
+ nw_buffer_t buf;
+ nw_pv_t pv;
+
+ nw_lock();
+ if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
+ buf = NW_BUFFER_ERROR;
+ } else {
+ while (pv != NULL && pv->owner != current_task())
+ pv = pv->next;
+ if (pv == NULL) {
+ buf = NW_BUFFER_ERROR;
+ } else {
+ buf = nc_buffer_allocate(ep, size);
+ if (buf != NULL) {
+ buf = (nw_buffer_t) ((char *) buf - ect[ep].buf_start + pv->buf_start);
+ }
+ }
+ }
+ nw_unlock();
+ return buf;
+}
+
+
+
+nw_result mk_buffer_deallocate(nw_ep ep, nw_buffer_t buffer) {
+ nw_result rc;
+ nw_pv_t pv;
+
+ nw_lock();
+ if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
+ rc = NW_BAD_EP;
+ } else {
+ while (pv != NULL && pv->owner != current_task())
+ pv = pv->next;
+ if (pv == NULL) {
+ rc = NW_PROT_VIOLATION;
+ } else {
+ if ((char *) buffer < pv->buf_start ||
+ (char *) buffer + sizeof(nw_buffer_s) > pv->buf_end ||
+ !buffer->buf_used ||
+ (char *) buffer + buffer->buf_length > pv->buf_end) {
+ rc = NW_BAD_BUFFER;
+ } else {
+ buffer = (nw_buffer_t) ((char *) buffer - pv->buf_start +
+ ect[ep].buf_start);
+ rc = nc_buffer_deallocate(ep, buffer);
+ }
+ }
+ }
+ nw_unlock();
+ return rc;
+}
+
+
+nw_result mk_connection_open_internal(nw_ep local_ep, nw_address_1 rem_addr_1,
+ nw_address_2 rem_addr_2, nw_ep remote_ep) {
+ nw_result rc;
+
+ rc = (*devct[NW_DEVICE(rem_addr_1)].entry->open) (local_ep,
+ rem_addr_1, rem_addr_2,
+ remote_ep);
+ if (rc == NW_SYNCH) {
+ hect[local_ep].sig_waiter = current_thread();
+ assert_wait(0, TRUE);
+ simple_unlock(&nw_simple_lock);
+ thread_block((void (*)()) 0);
+ }
+ return rc;
+}
+
+nw_result mk_connection_open(nw_ep local_ep, nw_address_1 rem_addr_1,
+ nw_address_2 rem_addr_2, nw_ep remote_ep) {
+ nw_result rc;
+ nw_pv_t pv;
+
+ nw_lock();
+ if (local_ep >= MAX_EP || (pv = hect[local_ep].pv) == NULL) {
+ rc = NW_BAD_EP;
+ } else {
+ while (pv != NULL && pv->owner != current_task())
+ pv = pv->next;
+ if (pv == NULL) {
+ rc = NW_PROT_VIOLATION;
+ } else {
+ rc = (*(devct[NW_DEVICE(rem_addr_1)].entry->open))
+ (local_ep, rem_addr_1, rem_addr_2, remote_ep);
+ if (rc == NW_SYNCH) {
+ hect[local_ep].sig_waiter = current_thread();
+ assert_wait(0, TRUE);
+ current_thread()->nw_ep_waited = NULL;
+ simple_unlock(&nw_simple_lock);
+ thread_block(mk_return);
+ }
+ }
+ }
+ nw_unlock();
+ return rc;
+}
+
+
+nw_result mk_connection_accept(nw_ep ep, nw_buffer_t msg,
+ nw_ep_t new_epp) {
+ nw_result rc;
+ nw_pv_t pv;
+
+ nw_lock();
+ if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
+ rc = NW_BAD_EP;
+ } else {
+ while (pv != NULL && pv->owner != current_task())
+ pv = pv->next;
+ if (pv == NULL) {
+ rc = NW_PROT_VIOLATION;
+ } else if ((char *) msg < pv->buf_start ||
+ (char *) msg + sizeof(nw_buffer_s) > pv->buf_end ||
+ !msg->buf_used ||
+ (char *) msg + msg->buf_length > pv->buf_end) {
+ rc = NW_BAD_BUFFER;
+ } else if (new_epp != NULL &&
+ (invalid_user_access(current_task()->map, (vm_offset_t) new_epp,
+ (vm_offset_t) new_epp + sizeof(nw_ep) - 1,
+ VM_PROT_READ | VM_PROT_WRITE) ||
+ (*new_epp != 0 && *new_epp != ep))) {
+ rc = NW_INVALID_ARGUMENT;
+ } else {
+ rc = (*(devct[NW_DEVICE(ect[ep].conn->peer.rem_addr_1)].entry->accept))
+ (ep, msg, new_epp);
+ if (rc == NW_SYNCH) {
+ hect[ep].sig_waiter = current_thread();
+ assert_wait(0, TRUE);
+ current_thread()->nw_ep_waited = NULL;
+ simple_unlock(&nw_simple_lock);
+ thread_block(mk_return);
+ }
+ }
+ }
+ nw_unlock();
+ return rc;
+}
+
+nw_result mk_connection_close(nw_ep ep) {
+ nw_result rc;
+ nw_pv_t pv;
+
+ nw_lock();
+ if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
+ rc = NW_BAD_EP;
+ } else {
+ while (pv != NULL && pv->owner != current_task())
+ pv = pv->next;
+ if (pv == NULL) {
+ rc = NW_PROT_VIOLATION;
+ } else {
+ rc = (*devct[NW_DEVICE(ect[ep].conn->peer.rem_addr_1)].entry->close)
+ (ep);
+ if (rc == NW_SYNCH) {
+ hect[ep].sig_waiter = current_thread();
+ assert_wait(0, TRUE);
+ current_thread()->nw_ep_waited = NULL;
+ simple_unlock(&nw_simple_lock);
+ thread_block(mk_return);
+ }
+ }
+ }
+ nw_unlock();
+ return rc;
+}
+
+
+nw_result mk_multicast_add(nw_ep local_ep, nw_address_1 rem_addr_1,
+ nw_address_2 rem_addr_2, nw_ep remote_ep) {
+ nw_result rc;
+ nw_pv_t pv;
+
+ nw_lock();
+ if (local_ep >= MAX_EP || (pv = hect[local_ep].pv) == NULL) {
+ rc = NW_BAD_EP;
+ } else {
+ while (pv != NULL && pv->owner != current_task())
+ pv = pv->next;
+ if (pv == NULL) {
+ rc = NW_PROT_VIOLATION;
+ } else {
+ rc = (*(devct[NW_DEVICE(rem_addr_1)].entry->add))
+ (local_ep, rem_addr_1, rem_addr_2, remote_ep);
+ if (rc == NW_SYNCH) {
+ hect[local_ep].sig_waiter = current_thread();
+ assert_wait(0, TRUE);
+ current_thread()->nw_ep_waited = NULL;
+ simple_unlock(&nw_simple_lock);
+ thread_block(mk_return);
+ }
+ }
+ }
+ nw_unlock();
+ return rc;
+}
+
+
+nw_result mk_multicast_drop(nw_ep local_ep, nw_address_1 rem_addr_1,
+ nw_address_2 rem_addr_2, nw_ep remote_ep) {
+ nw_result rc;
+ nw_pv_t pv;
+
+ nw_lock();
+ if (local_ep >= MAX_EP || (pv = hect[local_ep].pv) == NULL) {
+ rc = NW_BAD_EP;
+ } else {
+ while (pv != NULL && pv->owner != current_task())
+ pv = pv->next;
+ if (pv == NULL) {
+ rc = NW_PROT_VIOLATION;
+ } else {
+ rc = (*(devct[NW_DEVICE(rem_addr_1)].entry->drop))
+ (local_ep, rem_addr_1, rem_addr_2, remote_ep);
+ if (rc == NW_SYNCH) {
+ hect[local_ep].sig_waiter = current_thread();
+ assert_wait(0, TRUE);
+ current_thread()->nw_ep_waited = NULL;
+ simple_unlock(&nw_simple_lock);
+ thread_block(mk_return);
+ }
+ }
+ }
+ nw_unlock();
+ return rc;
+}
+
+
+nw_result mk_endpoint_status(nw_ep ep, nw_state_t state,
+ nw_peer_t peer) {
+ nw_result rc;
+ nw_pv_t pv;
+
+ nw_lock();
+ if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
+ rc = NW_BAD_EP;
+ } else {
+ while (pv != NULL && pv->owner != current_task())
+ pv = pv->next;
+ if (pv == NULL) {
+ rc = NW_PROT_VIOLATION;
+ } else {
+ if (invalid_user_access(current_task()->map, (vm_offset_t) state,
+ (vm_offset_t) state + sizeof(nw_state) - 1,
+ VM_PROT_WRITE) ||
+ invalid_user_access(current_task()->map, (vm_offset_t) peer,
+ (vm_offset_t) peer + sizeof(nw_peer_s) - 1,
+ VM_PROT_WRITE)) {
+ rc = NW_INVALID_ARGUMENT;
+ } else {
+ rc = nc_endpoint_status(ep, state, peer);
+ }
+ }
+ }
+ nw_unlock();
+ return rc;
+}
+
+
+nw_result mk_send(nw_ep ep, nw_buffer_t msg, nw_options options) {
+ nw_result rc;
+ nw_pv_t pv;
+ nw_ep sender;
+ int dev;
+ nw_ecb_t ecb;
+ nw_tx_header_t header, first_header, previous_header;
+ nw_hecb_t hecb;
+ nw_waiter_t w;
+
+ nw_lock();
+ if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
+ rc = NW_BAD_EP;
+ } else {
+ while (pv != NULL && pv->owner != current_task())
+ pv = pv->next;
+ if (pv == NULL) {
+ rc = NW_PROT_VIOLATION;
+ } else {
+ ecb = &ect[ep];
+ if (ecb->state == NW_INEXISTENT ||
+ (ecb->protocol == NW_SEQ_PACKET && ecb->conn == NULL)) {
+ rc = NW_BAD_EP;
+ } else {
+ first_header = header = nc_tx_header_allocate();
+ previous_header = NULL;
+ rc = NW_SUCCESS;
+ while (header != NULL) {
+ if ((char *) msg < pv->buf_start ||
+ (char *) msg + sizeof(nw_buffer_s) > pv->buf_end ||
+ ((int) msg & 0x3) || (msg->block_offset & 0x3) ||
+ (msg->block_length & 0x3) || !msg->buf_used ||
+ (char *) msg + msg->buf_length > pv->buf_end ||
+ msg->block_offset + msg->block_length > msg->buf_length) {
+ rc = NW_BAD_BUFFER;
+ break;
+ } else {
+ if (previous_header == NULL) {
+ if (ecb->protocol == NW_SEQ_PACKET)
+ header->peer = ecb->conn->peer;
+ else
+ header->peer = msg->peer;
+ } else {
+ previous_header->next = header;
+ }
+ header->buffer = (nw_buffer_t) ((char *) msg - pv->buf_start +
+ ecb->buf_start);
+ header->block = (char *) header->buffer + msg->block_offset;
+ if (!msg->block_deallocate)
+ header->buffer = NULL;
+ header->msg_length = 0;
+ header->block_length = msg->block_length;
+ first_header->msg_length += header->block_length;
+ header->next = NULL;
+ if (msg->buf_next == NULL)
+ break;
+ msg = msg->buf_next;
+ previous_header = header;
+ header = nc_tx_header_allocate();
+ }
+ }
+ if (header == NULL) {
+ nc_tx_header_deallocate(first_header);
+ rc = NW_NO_RESOURCES;
+ } else if (rc == NW_SUCCESS) {
+ dev = NW_DEVICE(first_header->peer.rem_addr_1);
+ if (ecb->protocol != NW_DATAGRAM ||
+ devct[dev].type != NW_CONNECTION_ORIENTED) {
+ sender = first_header->peer.local_ep;
+ rc = NW_SUCCESS;
+ } else {
+ sender = nc_line_lookup(&first_header->peer);
+ if (sender == -1) {
+ rc = NW_BAD_ADDRESS;
+ } else if (sender > 0) {
+ rc = NW_SUCCESS;
+ } else {
+ rc = mk_endpoint_allocate_internal(&sender, NW_LINE,
+ NW_AUTO_ACCEPT, 0, TRUE);
+ if (rc == NW_SUCCESS) {
+ rc = mk_connection_open_internal(sender,
+ first_header->peer.rem_addr_1,
+ first_header->peer.rem_addr_2,
+ MASTER_LINE_EP);
+ if (rc == NW_SUCCESS)
+ nc_line_update(&first_header->peer, sender);
+ }
+ }
+ }
+ if (rc == NW_SUCCESS) {
+ first_header->sender = sender;
+ first_header->options = options;
+ rc = (*(devct[dev].entry->send)) (sender, first_header, options);
+ if ((rc == NW_SYNCH || rc == NW_QUEUED) &&
+ nw_free_waiter != NULL) {
+ w = nw_free_waiter;
+ nw_free_waiter = w->next;
+ w->waiter = current_thread();
+ w->next = NULL;
+ hecb = &hect[sender];
+ if (hecb->tx_last == NULL) {
+ hecb->tx_first = hecb->tx_last = w;
+ } else {
+ hecb->tx_last = hecb->tx_last->next = w;
+ }
+ assert_wait(0, TRUE);
+ current_thread()->nw_ep_waited = NULL;
+ simple_unlock(&nw_simple_lock);
+ thread_block(mk_return);
+ }
+ }
+ }
+ }
+ }
+ }
+ nw_unlock();
+ return rc;
+}
+
+
+nw_buffer_t mk_receive(nw_ep ep, int time_out) {
+ nw_buffer_t rc;
+ nw_pv_t pv;
+ nw_ecb_t ecb;
+ nw_rx_header_t header;
+ nw_hecb_t hecb;
+ nw_waiter_t w;
+ nw_ep_owned_t waited;
+
+ nw_lock();
+ if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
+ rc = NW_BUFFER_ERROR;
+ } else {
+ while (pv != NULL && pv->owner != current_task())
+ pv = pv->next;
+ if (pv == NULL) {
+ rc = NW_BUFFER_ERROR;
+ } else {
+ ecb = &ect[ep];
+ header = ecb->rx_first;
+ if (header != NULL) {
+ rc = (nw_buffer_t) ((char *) header->buffer - ecb->buf_start +
+ pv->buf_start);
+ ecb->rx_first = header->next;
+ if (ecb->rx_first == NULL)
+ ecb->rx_last = NULL;
+ nc_rx_header_deallocate(header);
+ } else if (time_out != 0 && nw_free_waiter != NULL &&
+ (time_out == -1 || nw_free_waited != NULL)) {
+ w = nw_free_waiter;
+ nw_free_waiter = w->next;
+ w->waiter = current_thread();
+ w->next = NULL;
+ hecb = &hect[ep];
+ if (hecb->rx_last == NULL)
+ hecb->rx_first = hecb->rx_last = w;
+ else
+ hecb->rx_last = hecb->rx_last->next = w;
+ assert_wait(0, TRUE);
+ if (time_out != -1) {
+ waited = nw_free_waited;
+ nw_free_waited = waited->next;
+ waited->ep = ep;
+ waited->next = NULL;
+ current_thread()->nw_ep_waited = waited;
+ current_thread()->wait_result = NULL;
+ if (!current_thread()->timer.set)
+ thread_set_timeout(time_out);
+ } else {
+ current_thread()->nw_ep_waited = NULL;
+ }
+ simple_unlock(&nw_simple_lock);
+ thread_block(mk_return);
+ } else {
+ rc = NULL;
+ }
+ }
+ }
+ nw_unlock();
+ return rc;
+}
+
+
+nw_buffer_t mk_rpc(nw_ep ep, nw_buffer_t msg, nw_options options,
+ int time_out) {
+ nw_buffer_t rc;
+ nw_result nrc;
+ nw_ep sender;
+ int dev;
+ nw_pv_t pv;
+ nw_ecb_t ecb;
+ nw_tx_header_t header, first_header, previous_header;
+ nw_hecb_t hecb;
+ nw_waiter_t w;
+ nw_ep_owned_t waited;
+
+ nw_lock();
+ if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
+ rc = NW_BUFFER_ERROR;
+ } else {
+ while (pv != NULL && pv->owner != current_task())
+ pv = pv->next;
+ if (pv == NULL) {
+ rc = NW_BUFFER_ERROR;
+ } else {
+ ecb = &ect[ep];
+ if (ecb->state == NW_INEXISTENT ||
+ (ecb->protocol == NW_SEQ_PACKET && ecb->conn == NULL)) {
+ rc = NW_BUFFER_ERROR;
+ } else {
+ first_header = header = nc_tx_header_allocate();
+ previous_header = NULL;
+ rc = NULL;
+ while (header != NULL) {
+ if ((char *) msg < pv->buf_start ||
+ (char *) msg + sizeof(nw_buffer_s) > pv->buf_end ||
+ ((int) msg & 0x3) || (msg->block_offset & 0x3) ||
+ (msg->block_length & 0x3) || !msg->buf_used ||
+ (char *) msg + msg->buf_length > pv->buf_end ||
+ msg->block_offset + msg->block_length > msg->buf_length) {
+ rc = NW_BUFFER_ERROR;
+ break;
+ } else {
+ if (previous_header == NULL) {
+ if (ecb->protocol == NW_SEQ_PACKET)
+ header->peer = ecb->conn->peer;
+ else
+ header->peer = msg->peer;
+ } else {
+ previous_header->next = header;
+ }
+ header->buffer = (nw_buffer_t) ((char *) msg - pv->buf_start +
+ ecb->buf_start);
+ header->block = (char *) header->buffer + msg->block_offset;
+ if (!msg->block_deallocate)
+ header->buffer = NULL;
+ header->msg_length = 0;
+ header->block_length = msg->block_length;
+ first_header->msg_length += header->block_length;
+ header->next = NULL;
+ if (msg->buf_next == NULL)
+ break;
+ msg = msg->buf_next;
+ previous_header = header;
+ header = nc_tx_header_allocate();
+ }
+ }
+ if (header == NULL) {
+ nc_tx_header_deallocate(first_header);
+ rc = NW_BUFFER_ERROR;
+ } else if (rc != NW_BUFFER_ERROR) {
+ dev = NW_DEVICE(first_header->peer.rem_addr_1);
+ if (ecb->protocol != NW_DATAGRAM ||
+ devct[dev].type != NW_CONNECTION_ORIENTED) {
+ sender = first_header->peer.local_ep;
+ nrc = NW_SUCCESS;
+ } else {
+ sender = nc_line_lookup(&first_header->peer);
+ if (sender == -1) {
+ nrc = NW_BAD_ADDRESS;
+ } else if (sender > 0) {
+ nrc = NW_SUCCESS;
+ } else {
+ nrc = mk_endpoint_allocate_internal(&sender, NW_LINE,
+ NW_AUTO_ACCEPT, 0, TRUE);
+ if (nrc == NW_SUCCESS) {
+ nrc = mk_connection_open_internal(sender,
+ first_header->peer.rem_addr_1,
+ first_header->peer.rem_addr_2,
+ MASTER_LINE_EP);
+ if (nrc == NW_SUCCESS)
+ nc_line_update(&first_header->peer, sender);
+ }
+ }
+ }
+ if (nrc == NW_SUCCESS) {
+ first_header->sender = sender;
+ first_header->options = options;
+ rc = (*(devct[dev].entry->rpc)) (sender, first_header, options);
+ if (rc != NULL && rc != NW_BUFFER_ERROR) {
+ rc = (nw_buffer_t) ((char *) rc - ecb->buf_start +
+ pv->buf_start);
+ } else if (rc == NULL && time_out != 0 && nw_free_waiter != NULL &&
+ (time_out == -1 || nw_free_waited != NULL)) {
+ w = nw_free_waiter;
+ nw_free_waiter = w->next;
+ w->waiter = current_thread();
+ w->next = NULL;
+ hecb = &hect[ep];
+ if (hecb->rx_last == NULL)
+ hecb->rx_first = hecb->rx_last = w;
+ else
+ hecb->rx_last = hecb->rx_last->next = w;
+ assert_wait(0, TRUE);
+ if (time_out != -1) {
+ waited = nw_free_waited;
+ nw_free_waited = waited->next;
+ waited->ep = ep;
+ waited->next = NULL;
+ current_thread()->nw_ep_waited = waited;
+ current_thread()->wait_result = NULL;
+ if (!current_thread()->timer.set)
+ thread_set_timeout(time_out);
+ } else {
+ current_thread()->nw_ep_waited = NULL;
+ }
+ simple_unlock(&nw_simple_lock);
+ thread_block(mk_return);
+ }
+ }
+ }
+ }
+ }
+ }
+ nw_unlock();
+ return rc;
+}
+
+nw_buffer_t mk_select(u_int nep, nw_ep_t epp, int time_out) {
+ nw_buffer_t rc;
+ nw_pv_t pv;
+ int i;
+ nw_ep ep;
+ nw_ecb_t ecb;
+ nw_rx_header_t header;
+ nw_hecb_t hecb;
+ nw_waiter_t w, w_next;
+ nw_ep_owned_t waited;
+
+ if (invalid_user_access(current_task()->map, (vm_offset_t) epp,
+ (vm_offset_t) epp + nep*sizeof(nw_ep) - 1,
+ VM_PROT_READ)) {
+ rc = NW_BUFFER_ERROR;
+ } else {
+ nw_lock();
+ for (i = 0; i < nep; i++) {
+ ep = epp[i];
+ if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
+ rc = NW_BUFFER_ERROR;
+ break;
+ } else {
+ while (pv != NULL && pv->owner != current_task())
+ pv = pv->next;
+ if (pv == NULL) {
+ rc = NW_BUFFER_ERROR;
+ break;
+ } else {
+ ecb = &ect[ep];
+ header = ecb->rx_first;
+ if (header != NULL) {
+ rc = (nw_buffer_t) ((char *) header->buffer - ecb->buf_start +
+ pv->buf_start);
+ ecb->rx_first = header->next;
+ if (ecb->rx_first == NULL)
+ ecb->rx_last = NULL;
+ nc_rx_header_deallocate(header);
+ break;
+ }
+ }
+ }
+ }
+ if (i == nep) {
+ if (time_out == 0) {
+ rc = NULL;
+ } else {
+ w = nw_free_waiter;
+ waited = nw_free_waited;
+ i = 0;
+ while (i < nep &&
+ nw_free_waiter != NULL && nw_free_waited != NULL) {
+ nw_free_waiter = nw_free_waiter->next;
+ nw_free_waited = nw_free_waited->next;
+ i++;
+ }
+ if (i < nep) {
+ nw_free_waiter = w;
+ nw_free_waited = waited;
+ rc = NW_BUFFER_ERROR;
+ } else {
+ current_thread()->nw_ep_waited = waited;
+ for (i = 0; i < nep; i++) {
+ ep = epp[i];
+ waited->ep = ep;
+ if (i < nep-1)
+ waited = waited->next;
+ else
+ waited->next = NULL;
+ w->waiter = current_thread();
+ w_next = w->next;
+ w->next = NULL;
+ hecb = &hect[ep];
+ if (hecb->rx_last == NULL)
+ hecb->rx_first = hecb->rx_last = w;
+ else
+ hecb->rx_last = hecb->rx_last->next = w;
+ w = w_next;
+ }
+ assert_wait(0, TRUE);
+ if (time_out != -1) {
+ current_thread()->wait_result = NULL;
+ if (!current_thread()->timer.set)
+ thread_set_timeout(time_out);
+ }
+ simple_unlock(&nw_simple_lock);
+ thread_block(mk_return);
+ }
+ }
+ }
+ nw_unlock();
+ }
+ return rc;
+}
+
+
+/*** System-dependent support ***/
+
+void mk_endpoint_collect(task_t task) {
+
+ while (task->nw_ep_owned != NULL) {
+ mk_endpoint_deallocate_internal(task->nw_ep_owned->ep, task, TRUE);
+ }
+}
+
+void mk_waited_collect(thread_t thread) {
+ nw_hecb_t hecb;
+ nw_waiter_t w, w_previous;
+ nw_ep_owned_t waited, waited_previous;
+
+ waited = thread->nw_ep_waited;
+ if (waited != NULL) {
+ while (waited != NULL) {
+ hecb = &hect[waited->ep];
+ w = hecb->rx_first;
+ w_previous = NULL;
+ while (w != NULL && w->waiter != thread) {
+ w_previous = w;
+ w = w->next;
+ }
+ if (w != NULL) {
+ if (w_previous == NULL)
+ hecb->rx_first = w->next;
+ else
+ w_previous->next = w->next;
+ if (w->next == NULL)
+ hecb->rx_last = w_previous;
+ w->next = nw_free_waiter;
+ nw_free_waiter = w;
+ }
+ waited_previous = waited;
+ waited = waited->next;
+ }
+ waited_previous->next = nw_free_waited;
+ nw_free_waited = thread->nw_ep_waited;
+ thread->nw_ep_waited = NULL;
+ }
+}
+
+void mk_return() {
+
+ thread_syscall_return(current_thread()->wait_result);
+}
+
+
+boolean_t mk_deliver_result(thread_t thread, int result) {
+ boolean_t rc;
+ int state, s;
+
+ s = splsched();
+ thread_lock(thread);
+ state = thread->state;
+
+ reset_timeout_check(&thread->timer);
+
+ switch (state & TH_SCHED_STATE) {
+ case TH_WAIT | TH_SUSP | TH_UNINT:
+ case TH_WAIT | TH_UNINT:
+ case TH_WAIT:
+ /*
+ * Sleeping and not suspendable - put on run queue.
+ */
+ thread->state = (state &~ TH_WAIT) | TH_RUN;
+ thread->wait_result = (kern_return_t) result;
+ simpler_thread_setrun(thread, TRUE);
+ rc = TRUE;
+ break;
+
+ case TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT:
+ case TH_RUN | TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT | TH_UNINT:
+ case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
+ /*
+ * Either already running, or suspended.
+ */
+ thread->state = state &~ TH_WAIT;
+ thread->wait_result = (kern_return_t) result;
+ rc = FALSE;
+ break;
+
+ default:
+ /*
+ * Not waiting.
+ */
+ rc = FALSE;
+ break;
+ }
+ thread_unlock(thread);
+ splx(s);
+ return rc;
+}
+
+
+boolean_t nc_deliver_result(nw_ep ep, nw_delivery type, int result) {
+ boolean_t rc;
+ nw_hecb_t hecb;
+ nw_ecb_t ecb;
+ nw_waiter_t w;
+ thread_t thread;
+ task_t task;
+ nw_pv_t pv;
+ nw_buffer_t buf;
+ nw_rx_header_t rx_header;
+ nw_tx_header_t tx_header;
+ nw_ep lep;
+
+ hecb = &hect[ep];
+ ecb = &ect[ep];
+
+ thread = NULL;
+ if (type == NW_RECEIVE || type == NW_RECEIVE_URGENT) {
+ w = hecb->rx_first;
+ if (w != NULL) {
+ thread = w->waiter;
+ hecb->rx_first = w->next;
+ if (hecb->rx_first == NULL)
+ hecb->rx_last = NULL;
+ w->next = nw_free_waiter;
+ nw_free_waiter = w;
+ task = thread->task;
+ pv = hecb->pv;
+ while (pv != NULL && pv->owner != task)
+ pv = pv->next;
+ if (pv == NULL) {
+ rc = FALSE;
+ } else {
+ buf = (nw_buffer_t) ((char *) result - ecb->buf_start + pv->buf_start);
+ rc = mk_deliver_result(thread, (int) buf);
+ }
+ } else {
+ rx_header = nc_rx_header_allocate();
+ if (rx_header == NULL) {
+ rc = FALSE;
+ } else {
+ rx_header->buffer = (nw_buffer_t) result;
+ if (type == NW_RECEIVE) {
+ rx_header->next = NULL;
+ if (ecb->rx_last == NULL)
+ ecb->rx_first = rx_header;
+ else
+ ecb->rx_last->next = rx_header;
+ ecb->rx_last = rx_header;
+ } else {
+ rx_header->next = ecb->rx_first;
+ if (ecb->rx_first == NULL)
+ ecb->rx_last = rx_header;
+ ecb->rx_first = rx_header;
+ }
+ rc = TRUE;
+ }
+ }
+ } else if (type == NW_SEND) {
+ w = hecb->tx_first;
+ if (w == NULL) {
+ rc = FALSE;
+ } else {
+ thread = w->waiter;
+ hecb->tx_first = w->next;
+ if (hecb->tx_first == NULL)
+ hecb->tx_last = NULL;
+ w->next = nw_free_waiter;
+ nw_free_waiter = w;
+ rc = mk_deliver_result(thread, result);
+ }
+ tx_header = ect[ep].tx_initial;
+ if (result == NW_SUCCESS) {
+ lep = tx_header->peer.local_ep;
+ while (tx_header != NULL) {
+ if (tx_header->buffer != NULL)
+ nc_buffer_deallocate(lep, tx_header->buffer);
+ tx_header = tx_header->next;
+ }
+ }
+ nc_tx_header_deallocate(ect[ep].tx_initial);
+ ect[ep].tx_initial = ect[ep].tx_current = NULL;
+ } else if (type == NW_SIGNAL) {
+ thread = hecb->sig_waiter;
+ hecb->sig_waiter = NULL;
+ if (thread == NULL) {
+ rc = FALSE;
+ } else {
+ rc = mk_deliver_result(thread, result);
+ }
+ }
+ return rc;
+}
+
+int mk_fast_sweep() {
+
+ nw_lock();
+ nc_fast_sweep();
+ nw_unlock();
+ return 0;
+}
+
+void h_fast_timer_set() {
+
+#ifdef PRODUCTION
+ if (!nw_fast_timer.set)
+ set_timeout(&nw_fast_timer, 1);
+#endif
+}
+
+void h_fast_timer_reset() {
+
+ if (nw_fast_timer.set)
+ reset_timeout(&nw_fast_timer);
+}
+
+int mk_slow_sweep() {
+
+#ifdef PRODUCTION
+ nw_lock();
+ nc_slow_sweep();
+ nw_unlock();
+ set_timeout(&nw_slow_timer, 2*hz);
+ return 0;
+#endif
+}
+
diff --git a/chips/nw_mk.h b/chips/nw_mk.h
new file mode 100644
index 00000000..d7ba503d
--- /dev/null
+++ b/chips/nw_mk.h
@@ -0,0 +1,97 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _NW_MK_H_
+#define _NW_MK_H_ 1
+
+#ifndef STUB
+#include <chips/nw.h>
+#include <kern/thread.h>
+#else
+#include "nw.h"
+#include "stub.h"
+#endif
+
+/*** NETWORK INTERFACE -- WRAPPER FOR MACH KERNEL ***/
+
+/*** User-trappable functions ***/
+
+extern nw_result mk_update(mach_port_t master_port, nw_update_type up_type,
+ int *up_info);
+
+extern nw_result mk_lookup(nw_lookup_type lt, int *look_info);
+
+extern nw_result mk_endpoint_allocate(nw_ep_t epp, nw_protocol protocol,
+ nw_acceptance accept, u_int buffer_size);
+
+extern nw_result mk_endpoint_deallocate(nw_ep ep);
+
+extern nw_buffer_t mk_buffer_allocate(nw_ep ep, u_int size);
+
+extern nw_result mk_buffer_deallocate(nw_ep ep, nw_buffer_t buffer);
+
+extern nw_result mk_connection_open(nw_ep local_ep, nw_address_1 rem_addr_1,
+ nw_address_2 rem_addr_2, nw_ep remote_ep);
+
+extern nw_result mk_connection_accept(nw_ep ep, nw_buffer_t msg,
+ nw_ep_t new_epp);
+
+extern nw_result mk_connection_close(nw_ep ep);
+
+extern nw_result mk_multicast_add(nw_ep local_ep, nw_address_1 rem_addr_1,
+ nw_address_2 rem_addr_2, nw_ep remote_ep);
+
+extern nw_result mk_multicast_drop(nw_ep local_ep, nw_address_1 rem_addr_1,
+ nw_address_2 rem_addr_2, nw_ep remote_ep);
+
+extern nw_result mk_endpoint_status(nw_ep ep, nw_state_t state,
+ nw_peer_t peer);
+
+extern nw_result mk_send(nw_ep ep, nw_buffer_t msg, nw_options options);
+
+extern nw_buffer_t mk_receive(nw_ep ep, int time_out);
+
+extern nw_buffer_t mk_rpc(nw_ep ep, nw_buffer_t send_msg,
+ nw_options options, int time_out);
+
+extern nw_buffer_t mk_select(u_int nep, nw_ep_t epp, int time_out);
+
+
+/*** System-dependent support ***/
+
+extern void mk_endpoint_collect(task_t task);
+
+extern void mk_waited_collect(thread_t thread);
+
+extern void mk_return();
+
+extern boolean_t mk_deliver_result(thread_t thread, int result);
+
+extern int mk_fast_sweep();
+
+extern int mk_slow_sweep();
+
+#endif /* _NW_MK_H_ */
diff --git a/chips/pm_defs.h b/chips/pm_defs.h
new file mode 100644
index 00000000..623f65ec
--- /dev/null
+++ b/chips/pm_defs.h
@@ -0,0 +1,57 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: pm_defs.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Definitions specific to the "pm" simple framebuffer driver,
+ * exported across sub-modules. Some other framebuffer drivers
+ * that share code with pm use these defs also.
+ */
+
+/* Hardware state (to be held in the screen descriptor) */
+
+typedef struct {
+ char *cursor_registers; /* opaque, for sharing */
+ unsigned short cursor_state; /* some regs are W-only */
+ short unused; /* padding, free */
+ char *vdac_registers; /* opaque, for sharing */
+ unsigned char *framebuffer;
+ unsigned char *plane_mask;
+} pm_softc_t;
+
+extern pm_softc_t *pm_alloc(/* unit, curs, framebuf, planem */);
+
+/* user mapping sizes */
+#define USER_INFO_SIZE PAGE_SIZE
+#define PMASK_SIZE PAGE_SIZE
+#define BITMAP_SIZE(sc) \
+ ((sc)->frame_height * (((sc)->flags & COLOR_SCREEN) ? \
+ sc->frame_scanline_width : \
+ sc->frame_scanline_width>>3))
+
+#define PM_SIZE(sc) (USER_INFO_SIZE+PMASK_SIZE+BITMAP_SIZE(sc))
diff --git a/chips/pm_hdw.c b/chips/pm_hdw.c
new file mode 100644
index 00000000..8ac94925
--- /dev/null
+++ b/chips/pm_hdw.c
@@ -0,0 +1,201 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: pm_hdw.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Driver for the VFB01/02 Mono/Color framebuffer (pmax)
+ * Hardware-level operations.
+ */
+
+#include <bm.h>
+#if NBM>0
+#include <platforms.h>
+
+#include <machine/machspl.h> /* spl definitions */
+#include <chips/screen_defs.h>
+#include <chips/pm_defs.h>
+#include <chips/busses.h>
+#include <machine/machspl.h>
+
+#ifdef DECSTATION
+#include <mips/mips_cpu.h>
+#include <mips/PMAX/kn01.h>
+
+#define KN01_CSR_ADDR PHYS_TO_K1SEG(KN01_SYS_CSR)
+#define KN01_FBUF_ADDR PHYS_TO_K1SEG(KN01_PHYS_FBUF_START)
+#define KN01_PLM_ADDR PHYS_TO_K1SEG(KN01_PHYS_COLMASK_START)
+#define KN01_BT478_ADDR PHYS_TO_K1SEG(KN01_SYS_VDAC)
+#define KN01_DC503_ADDR PHYS_TO_K1SEG(KN01_SYS_PCC)
+
+#define VRETRACE dc503_vretrace
+#define MONO_FRAME_WIDTH 2048
+#define ISA_MONO ((*(volatile short*)KN01_CSR_ADDR)&KN01_CSR_MONO)
+
+#endif /*DECSTATION*/
+
+#ifdef VAXSTATION
+#include <vax/ka3100.h>
+#define VRETRACE ka42_vretrace
+#define ISA_MONO 1
+#define MONO_FRAME_WIDTH 1024
+#endif /*VAXSTATION*/
+
+/*
+ * Definition of the driver for the auto-configuration program.
+ */
+
+int pm_probe(), pm_intr();
+static void pm_attach();
+
+vm_offset_t pm_std[] = { 0 };
+struct bus_device *pm_info[1];
+struct bus_driver pm_driver =
+ { pm_probe, 0, pm_attach, 0, pm_std, "pm", pm_info, };
+
+/*
+ * Probe/Attach functions
+ */
+
+pm_probe( /* reg, ui */)
+{
+ static probed_once = 0;
+#ifdef DECSTATION
+ if (!isa_pmax())
+ return 0;
+ if (check_memory(KN01_FBUF_ADDR, 0))
+ return 0;
+#endif /*DECSTATION*/
+ if (probed_once++ > 1)
+ printf("[mappable] ");
+ return 1;
+}
+
+static void
+pm_attach(ui)
+ struct bus_device *ui;
+{
+ int isa_mono = ISA_MONO;
+
+ printf(": %s%s",
+ isa_mono ? "monochrome" : "color",
+ " display");
+}
+
+
+/*
+ * Interrupt routine
+ */
+#ifdef DECSTATION
+pm_intr(unit,spllevel)
+ spl_t spllevel;
+{
+ /* this is the vertical retrace one */
+ splx(spllevel);
+ lk201_led(unit);
+}
+#endif /*DECSTATION*/
+
+#ifdef VAXSTATION
+pm_intr(unit)
+{
+ lk201_led(unit);
+}
+#endif /*VAXSTATION*/
+
+/*
+ * Boot time initialization: must make device
+ * usable as console asap.
+ */
+extern int
+ pm_cons_init(), pm_soft_reset(),
+ dc503_video_on(), dc503_video_off(),
+ pm_char_paint(), dc503_pos_cursor(),
+ pm_insert_line(), pm_remove_line(), pm_clear_bitmap(),
+ pm_set_status(), pm_get_status(),
+ VRETRACE(), pm_map_page();
+
+static struct screen_switch pm_sw = {
+ screen_noop, /* graphic_open */
+ pm_soft_reset, /* graphic_close */
+ pm_set_status, /* set_status */
+ pm_get_status, /* set_status */
+ pm_char_paint, /* char_paint */
+ dc503_pos_cursor, /* pos_cursor */
+ pm_insert_line, /* insert_line */
+ pm_remove_line, /* remove_line */
+ pm_clear_bitmap, /* clear_bitmap */
+ dc503_video_on, /* video_on */
+ dc503_video_off, /* video_off */
+ VRETRACE, /* enable vert retrace intr */
+ pm_map_page /* map_page */
+};
+
+pm_cold_init(unit, up)
+ user_info_t *up;
+{
+ pm_softc_t *pm;
+ screen_softc_t sc = screen(unit);
+ int isa_mono = ISA_MONO;
+
+ bcopy(&pm_sw, &sc->sw, sizeof(sc->sw));
+ if (isa_mono) {
+ sc->flags |= MONO_SCREEN;
+ sc->frame_scanline_width = MONO_FRAME_WIDTH;
+ } else {
+ sc->flags |= COLOR_SCREEN;
+ sc->frame_scanline_width = 1024;
+ }
+ sc->frame_height = 864;
+ sc->frame_visible_width = 1024;
+ sc->frame_visible_height = 864;
+
+ pm_init_screen_params(sc, up);
+ (void) screen_up(unit, up);
+
+#ifdef DECSTATION
+ pm = pm_alloc(unit, KN01_DC503_ADDR, KN01_FBUF_ADDR, KN01_PLM_ADDR);
+ pm->vdac_registers = (char*)KN01_BT478_ADDR;
+#endif /*DECSTATION*/
+#ifdef VAXSTATION
+ pm = pm_alloc(unit, cur_xxx, bm_mem, 0);
+#endif /*VAXSTATION*/
+
+ screen_default_colors(up);
+
+ dc503_init(pm);
+
+ pm_soft_reset(sc);
+
+ /*
+ * Clearing the screen at boot saves from scrolling
+ * much, and speeds up booting quite a bit.
+ */
+ screen_blitc( unit, 'C'-'@');/* clear screen */
+}
+
+#endif NBM>0
diff --git a/chips/pm_misc.c b/chips/pm_misc.c
new file mode 100644
index 00000000..d423645b
--- /dev/null
+++ b/chips/pm_misc.c
@@ -0,0 +1,594 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: pm_misc.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Driver for the VFB01/02 Mono/Color framebuffer (pmax)
+ * Hardware-independent operations, mostly shared with
+ * the CFB driver (see each individual function header),
+ * and possibly others.
+ */
+
+
+#include <platforms.h>
+
+#include <fb.h>
+
+#if defined(DECSTATION) || defined(FLAMINGO)
+#include <cfb.h>
+#include <mfb.h>
+#include <xcfb.h>
+#include <sfb.h>
+#define NPM (NFB+NCFB+NMFB+NXCFB+NSFB)
+#endif /*DECSTATION*/
+
+#ifdef VAXSTATION
+#define NPM (NFB)
+#endif /*VAXSTATION*/
+
+
+#if (NPM > 0)
+
+#include <mach/vm_param.h> /* PAGE_SIZE */
+#include <device/device_types.h>
+#include <vm/vm_map.h> /* kernel_pmap */
+
+#include <chips/screen_defs.h>
+#include <chips/pm_defs.h>
+
+
+#ifdef DECSTATION
+#define machine_btop mips_btop
+#define MONO_BM (256*1024)
+#endif /*DECSTATION*/
+
+#ifdef VAXSTATION
+#define machine_btop vax_btop
+/*
+ For now we use the last page of the frame for
+ the user_info structure.
+*/
+#define MONO_BM (256*1024-PAGE_SIZE)
+#endif /*VAXSTATION*/
+
+#ifdef FLAMINGO
+#define machine_btop alpha_btop
+#define MONO_BM (256*1024)
+#define LOG2_SIZEOF_LONG 3 /* 64bit archies */
+#endif /* FLAMINGO */
+
+#ifndef LOG2_SIZEOF_LONG
+#define LOG2_SIZEOF_LONG 2 /* 32bit archies */
+#endif
+
+
+/* Hardware state */
+pm_softc_t pm_softc_data[NPM];
+
+pm_softc_t*
+pm_alloc(
+ int unit,
+ char *cur,
+ unsigned char *fb,
+ unsigned char *pl)
+{
+ pm_softc_t *pm = &pm_softc_data[unit];
+
+ pm->cursor_registers = cur;
+ pm->framebuffer = fb;
+ pm->plane_mask = pl;
+ pm->vdac_registers = 0; /* later, if ever */
+
+ screen_attach(unit, (char *) pm);
+
+ return pm;
+}
+
+
+/*
+ * Routine to paint a char on a simple framebuffer.
+ * This is common to the pm, fb and cfb drivers.
+ */
+pm_char_paint(
+ screen_softc_t sc,
+ int c,
+ int row,
+ int col)
+{
+ register int incr;
+ int line_size;
+ register unsigned char *font, *bmap;
+ pm_softc_t *pm = (pm_softc_t*)sc->hw_state;
+
+ /*
+ * Here are the magic numbers that drive the loops below:
+ * incr bytes between scanlines of the glyph
+ * line_size bytes in a row, using the system font
+ *
+ * This code has been optimized to avoid multiplications,
+ * and is therefore much less obvious than it could be.
+ */
+ if (sc->flags & MONO_SCREEN) {
+ /*
+ * B&W screen: 1 bit/pixel
+ * incr --> 1 * BytesPerLine, with possible stride
+ */
+ incr = sc->frame_scanline_width >> 3;
+ } else {
+ /*
+ * Color screen: 8 bits/pixel
+ * incr --> 8 * BytesPerLine, with possible stride
+ */
+ incr = sc->frame_scanline_width;
+ col <<= 3;
+ }
+
+ /* not all compilers are smart about multiply by 15 */
+#if (KfontHeight==15)
+# define TIMES_KfontHeight(w) (((w)<<4)-(w))
+#else
+# define TIMES_KfontHeight(w) ((w)*KfontHeight)
+#endif
+ line_size = TIMES_KfontHeight(incr);
+
+ bmap = pm->framebuffer + col + (row * line_size);
+ font = &kfont_7x14[ (int)(c - ' ') * 15];
+ if (sc->flags & MONO_SCREEN) {
+ /*
+ * Unroll simple loops, take note of common cases
+ */
+ if (sc->standout) {
+# define mv() *bmap = ~*font++; bmap += incr;
+ mv();mv();mv();mv();mv();mv();mv();mv();
+ mv();mv();mv();mv();mv();mv();mv();
+# undef mv
+ } else if (c == ' ') {
+# define mv() *bmap = 0; bmap += incr;
+ mv();mv();mv();mv();mv();mv();mv();mv();
+ mv();mv();mv();mv();mv();mv();mv();
+# undef mv
+ } else {
+# define mv() *bmap = *font++; bmap += incr;
+ mv();mv();mv();mv();mv();mv();mv();mv();
+ mv();mv();mv();mv();mv();mv();mv();
+# undef mv
+ }
+ } else {
+ /*
+ * 8 bits per pixel --> paint one byte per each font bit.
+ * In order to spread out the 8 bits of a glyph line over
+ * the 64 bits per scanline use a simple vector multiply,
+ * taking 4 bits at a time to get the two resulting words
+ */
+ static unsigned int spread[16] = {
+ 0x00000000, 0x00000001, 0x00000100, 0x00000101,
+ 0x00010000, 0x00010001, 0x00010100, 0x00010101,
+ 0x01000000, 0x01000001, 0x01000100, 0x01000101,
+ 0x01010000, 0x01010001, 0x01010100, 0x01010101,
+ };
+ register int rev_video = sc->standout;
+ register int j;
+ for (j = 0; j < KfontHeight; j++) {
+ register unsigned char c = *font++;
+ if (rev_video) c = ~c;
+#if (LOG2_SIZEOF_LONG==3)
+ *((long*)bmap) = (long)spread[ c & 0xf ] |
+ ((long)(spread[ (c>>4) & 0xf ]) << 32);
+#else
+ ((int*)bmap)[0] = spread[ c & 0xf ];
+ ((int*)bmap)[1] = spread[ (c>>4) & 0xf ];
+#endif
+ bmap += incr;
+ }
+ }
+}
+
+/*
+ * Delete the line at the given row.
+ * This is common to the pm, fb and cfb drivers.
+ */
+pm_remove_line(
+ screen_softc_t sc,
+ short row)
+{
+ register long *dest, *src;
+ register long *end;
+ register long temp0,temp1,temp2,temp3;
+ register long i, scaninc, blockcnt;
+ long line_size, incr;
+ unsigned char *framebuffer;
+ pm_softc_t *pm = (pm_softc_t*)sc->hw_state;
+ long CharRows, CharCols;
+
+ CharRows = sc->up->max_row;
+ CharCols = sc->up->max_col;
+ framebuffer = pm->framebuffer;
+
+ /* Inner loop works 4 long words at a time (writebuffer deep) */
+# define BlockSizeShift (2+LOG2_SIZEOF_LONG)
+
+ /* To copy one (MONO) line, we need to iterate this many times */
+# define Blocks (CharCols>>BlockSizeShift)
+
+ /* Skip this many bytes to get to the next line */
+# define Slop(w) ((w) - (blockcnt<<BlockSizeShift))
+
+ if (sc->flags & MONO_SCREEN) {
+ blockcnt = Blocks;
+ /* See comments in pm_char_paint() */
+ incr = sc->frame_scanline_width >> 3;
+ } else {
+ blockcnt = Blocks << 3;
+ /* See comments in pm_char_paint() */
+ incr = sc->frame_scanline_width;
+ }
+ line_size = TIMES_KfontHeight(incr);
+
+ scaninc = (Slop(incr)) >> LOG2_SIZEOF_LONG; /* pointers are long* */
+
+ dest = (long *)(framebuffer + row * line_size);
+ src = (long *)((char*)dest + line_size);
+ end = (long *)(framebuffer + CharRows * line_size);
+ while (src < end) {
+ i = 0;
+ do {
+ temp0 = src[0];
+ temp1 = src[1];
+ temp2 = src[2];
+ temp3 = src[3];
+ dest[0] = temp0;
+ dest[1] = temp1;
+ dest[2] = temp2;
+ dest[3] = temp3;
+ dest += 4;
+ src += 4;
+ i++;
+ } while (i < blockcnt);
+ src += scaninc;
+ dest += scaninc;
+ }
+
+ /* Now zero out the last line */
+ bzero(framebuffer + (CharRows - 1) * line_size, line_size);
+
+ ascii_screen_rem_update(sc, row);
+}
+
+
+/*
+ * Open a new blank line at the given row.
+ * This is common to the pm, fb and cfb drivers.
+ */
+pm_insert_line(
+ screen_softc_t sc,
+ short row)
+{
+ register long *dest, *src;
+ register long *end;
+ register long temp0,temp1,temp2,temp3;
+ register long i, scaninc, blockcnt;
+ long line_size, incr;
+ unsigned char *framebuffer;
+ pm_softc_t *pm = (pm_softc_t*)sc->hw_state;
+ long CharRows, CharCols;
+
+ CharRows = sc->up->max_row;
+ CharCols = sc->up->max_col;
+
+ framebuffer = pm->framebuffer;
+
+ /* See above for comments */
+ if (sc->flags & MONO_SCREEN) {
+ blockcnt = Blocks;
+ /* See comments in pm_char_paint() */
+ incr = sc->frame_scanline_width >> 3;
+ } else {
+ blockcnt = Blocks << 3;
+ /* See comments in pm_char_paint() */
+ incr = sc->frame_scanline_width;
+ }
+ line_size = TIMES_KfontHeight(incr);
+
+ scaninc = Slop(incr) + ((2 * blockcnt) << BlockSizeShift);
+ scaninc >>= LOG2_SIZEOF_LONG; /* pointers are long* */
+ dest = (long *)(framebuffer + (CharRows - 1) * line_size);
+ src = (long *)((char*)dest - line_size);
+ end = (long *)(framebuffer + row * line_size);
+ while (src >= end) {
+ i = 0;
+ do {
+ temp0 = src[0];
+ temp1 = src[1];
+ temp2 = src[2];
+ temp3 = src[3];
+ dest[0] = temp0;
+ dest[1] = temp1;
+ dest[2] = temp2;
+ dest[3] = temp3;
+ dest += 4;
+ src += 4;
+ i++;
+ } while (i < blockcnt);
+ src -= scaninc;
+ dest -= scaninc;
+ }
+
+ /* Now zero out the line being opened */
+ bzero(framebuffer + row * line_size, line_size);
+
+ ascii_screen_ins_update(sc, row);
+}
+
+#undef Slop
+
+
+/*
+ * Initialize screen parameters in the
+ * user-mapped descriptor.
+ * This is common to various drivers.
+ */
+pm_init_screen_params(
+ screen_softc_t sc,
+ user_info_t *up)
+{
+ register int vis_x, vis_y;
+
+ up->frame_scanline_width = sc->frame_scanline_width;
+ up->frame_height = sc->frame_height;
+
+ vis_x = sc->frame_visible_width;
+ vis_y = sc->frame_visible_height;
+
+ up->max_x = vis_x;
+ up->max_y = vis_y;
+ up->max_cur_x = vis_x - 1;
+ up->max_cur_y = vis_y - 1;
+ up->min_cur_x = -15;
+ up->min_cur_y = -15;
+ up->max_row = vis_y / KfontHeight;
+ up->max_col = vis_x / KfontWidth;
+
+ up->version = 11;
+
+ up->mouse_threshold = 4;
+ up->mouse_scale = 2;
+
+ up->dev_dep_2.pm.tablet_scale_x = ((vis_x - 1) * 1000) / 2200;
+ up->dev_dep_2.pm.tablet_scale_y = ((vis_y - 1) * 1000) / 2200;
+}
+
+/*
+ * Clear the screen
+ * Used by pm, fb and cfb
+ */
+pm_clear_bitmap(
+ screen_softc_t sc)
+{
+ pm_softc_t *pm = (pm_softc_t *) sc->hw_state;
+ unsigned int screen_size;
+
+ /* Do not touch the non visible part */
+ screen_size = sc->frame_scanline_width * sc->frame_visible_height;
+ blkclr((char *)pm->framebuffer,
+ (sc->flags & MONO_SCREEN) ? (screen_size>>3) : screen_size);
+
+ /* clear ascii screenmap */
+ ascii_screen_fill(sc, ' ');
+}
+
+
+/*
+ * Size of the user-mapped structure
+ * Used by both pm and cfb
+ */
+pm_mem_need()
+{
+ return USER_INFO_SIZE;
+}
+
+/*
+ * Device-specific get status.
+ * Used by fb and cfb also.
+ */
+pm_get_status(
+ screen_softc_t sc,
+ dev_flavor_t flavor,
+ dev_status_t status,
+ natural_t *status_count)
+{
+ if (flavor == SCREEN_GET_OFFSETS) {
+ unsigned *offs = (unsigned *) status;
+
+ offs[0] = PM_SIZE(sc); /* virtual size */
+ offs[1] = 0; /* offset of user_info_t */
+ *status_count = 2;
+ return D_SUCCESS;
+ } else
+ return D_INVALID_OPERATION;
+}
+
+/*
+ * Driver-specific set status
+ * Only partially used by fb and cfb.
+ */
+pm_set_status(
+ screen_softc_t sc,
+ dev_flavor_t flavor,
+ dev_status_t status,
+ natural_t status_count)
+{
+ switch (flavor) {
+ case SCREEN_ADJ_MAPPED_INFO: {
+ unsigned user_addr = *(unsigned *) status;
+ user_info_t *up = sc->up;
+
+ /* Make it point to the event_queue, in user virtual */
+ up->evque.events = (screen_event_t *)(user_addr +
+ ((char*)up->event_queue - (char*)up));
+
+ /* Make it point to the point_track, in user virtual */
+ up->evque.track = (screen_timed_point_t *)(user_addr +
+ ((char*)up->point_track - (char*)up));
+
+ up->dev_dep_1.pm.planemask = (unsigned char *)(user_addr + USER_INFO_SIZE);
+
+ up->dev_dep_1.pm.bitmap = up->dev_dep_1.pm.planemask + PMASK_SIZE;
+
+ break;
+ }
+
+ case SCREEN_LOAD_CURSOR: {
+
+ sc->flags |= SCREEN_BEING_UPDATED;
+ dc503_load_cursor(sc->hw_state, (unsigned short*)status);
+ sc->flags &= ~SCREEN_BEING_UPDATED;
+
+ break;
+ }
+
+#ifdef DECSTATION
+ case SCREEN_SET_CURSOR_COLOR: {
+ pm_softc_t *pm = (pm_softc_t*) sc->hw_state;
+
+ sc->flags |= SCREEN_BEING_UPDATED;
+ bt478_cursor_color (pm->vdac_registers, (cursor_color_t*) status);
+ sc->flags &= ~SCREEN_BEING_UPDATED;
+
+ break;
+ }
+
+ case SCREEN_SET_CMAP_ENTRY: {
+ pm_softc_t *pm = (pm_softc_t*) sc->hw_state;
+ color_map_entry_t *e = (color_map_entry_t*) status;
+
+ if (e->index < 256) {
+ sc->flags |= SCREEN_BEING_UPDATED;
+ bt478_load_colormap_entry( pm->vdac_registers, e->index, &e->value);
+ sc->flags &= ~SCREEN_BEING_UPDATED;
+ }
+
+ break;
+ }
+#endif /*DECSTATION*/
+ default:
+ return D_INVALID_OPERATION;
+ }
+ return D_SUCCESS;
+}
+
+/*
+ * Map pages to user space
+ */
+vm_offset_t pm_map_page_empty = (vm_offset_t) 0;
+
+integer_t
+pm_map_page(
+ screen_softc_t sc,
+ vm_offset_t off,
+ int prot)
+{
+ int bitmapsize;
+ integer_t addr;
+ pm_softc_t *pm = (pm_softc_t *)sc->hw_state;
+ extern vm_offset_t pmap_extract( pmap_t map, vm_offset_t addr);
+
+ bitmapsize = BITMAP_SIZE(sc);
+
+#define OFF0 USER_INFO_SIZE /* user mapped info */
+#define OFF1 OFF0+PMASK_SIZE /* plane mask register */
+#define OFF2 OFF1+bitmapsize /* frame buffer mem */
+
+ if (off < OFF0)
+#ifdef DECSTATION
+ addr = kvtophys(sc->up);
+#else
+ addr = (integer_t) pmap_extract(kernel_pmap,
+ (vm_offset_t)sc->up);
+#endif
+ else
+ if (off < OFF1) {
+#ifdef VAXSTATION
+ if (pm_map_page_empty == 0) {
+ pm_map_page_empty = vm_page_grab_phys_addr();
+ }
+ addr = (integer_t)pmap_extract(kernel_pmap, pm_map_page_empty);
+#else
+ addr = (integer_t) pm->plane_mask;
+#endif
+ off -= OFF0;
+ } else
+ if (off < OFF2) {
+#ifdef DECSTATION
+ addr = (integer_t)pm->framebuffer;
+#else
+ addr = (integer_t)pmap_extract(kernel_pmap,
+ (vm_offset_t)pm->framebuffer);
+#endif
+ off -= OFF1;
+ } else
+ return D_INVALID_SIZE; /* ??? */
+
+ addr = machine_btop(addr + off);
+ return (addr);
+}
+
+
+/*
+ *-----------------------------------------------------------
+ * The rest of this file is stricly pmax/pvax-specific
+ *-----------------------------------------------------------
+ */
+#if (NFB > 0)
+
+/*
+ * Do what's needed when the X server exits
+ */
+pm_soft_reset(
+ screen_softc_t sc)
+{
+ pm_softc_t *pm = (pm_softc_t*) sc->hw_state;
+ user_info_t *up = sc->up;
+
+ /*
+ * Restore params in mapped structure
+ */
+ pm_init_screen_params(sc, up);
+ up->row = up->max_row - 2;
+ dc503_init(pm);
+
+#ifdef DECSTATION
+ if (sc->flags & MONO_SCREEN)
+ bt478_init_bw_map(pm->vdac_registers, pm->plane_mask);
+ else
+ bt478_init_color_map(pm->vdac_registers, pm->plane_mask);
+#endif /*DECSTATION*/
+}
+#endif /* NFB > 0 */
+
+
+#endif /* NPM > 0 */
diff --git a/chips/scc_8530.h b/chips/scc_8530.h
new file mode 100644
index 00000000..96a7964e
--- /dev/null
+++ b/chips/scc_8530.h
@@ -0,0 +1,355 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scc_8530.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/91
+ *
+ * Definitions for the Zilog Z8530 SCC serial line chip
+ */
+
+#ifndef _SCC_8530_H_
+#define _SCC_8530_H_
+
+/*
+ * Register map, needs definition of the alignment
+ * used on the specific machine.
+ * #define the 'scc_register_t' data type before
+ * including this header file. For restrictions on
+ * access modes define the set/get_datum macros.
+ * We provide defaults ifnot.
+ */
+
+#ifndef scc_register_t
+
+typedef struct scc_register {
+ volatile unsigned char datum;
+} scc_register_t;
+
+#endif
+
+
+#define SCC_CHANNEL_A 1
+#define SCC_CHANNEL_B 0
+
+typedef struct {
+ /* Channel B is first, then A */
+ struct {
+ scc_register_t scc_command; /* reg select */
+ scc_register_t scc_data; /* Rx/Tx buffer */
+ } scc_channel[2];
+} scc_regmap_t;
+
+
+#ifndef scc_set_datum
+#define scc_set_datum(d,v) (d) = (v)
+#define scc_get_datum(d,v) (v) = (d)
+#endif
+
+#define scc_init_reg(scc,chan) { \
+ char tmp; \
+ scc_get_datum((scc)->scc_channel[(chan)].scc_command.datum,tmp); \
+ scc_get_datum((scc)->scc_channel[(chan)].scc_command.datum,tmp); \
+ }
+
+#define scc_read_reg(scc,chan,reg,val) { \
+ scc_set_datum((scc)->scc_channel[(chan)].scc_command.datum,reg); \
+ scc_get_datum((scc)->scc_channel[(chan)].scc_command.datum,val); \
+ }
+
+#define scc_read_reg_zero(scc,chan,val) { \
+ scc_get_datum((scc)->scc_channel[(chan)].scc_command.datum,val); \
+ }
+
+#define scc_write_reg(scc,chan,reg,val) { \
+ scc_set_datum((scc)->scc_channel[(chan)].scc_command.datum,reg); \
+ scc_set_datum((scc)->scc_channel[(chan)].scc_command.datum,val); \
+ }
+
+#define scc_write_reg_zero(scc,chan,val) { \
+ scc_set_datum((scc)->scc_channel[(chan)].scc_command.datum,val); \
+ }
+
+#define scc_read_data(scc,chan,val) { \
+ scc_get_datum((scc)->scc_channel[(chan)].scc_data.datum,val); \
+ }
+
+#define scc_write_data(scc,chan,val) { \
+ scc_set_datum((scc)->scc_channel[(chan)].scc_data.datum,val); \
+ }
+
+/*
+ * Addressable registers
+ */
+
+#define SCC_RR0 0 /* status register */
+#define SCC_RR1 1 /* special receive conditions */
+#define SCC_RR2 2 /* (modified) interrupt vector */
+#define SCC_RR3 3 /* interrupts pending (cha A only) */
+#define SCC_RR8 8 /* recv buffer (alias for data) */
+#define SCC_RR10 10 /* sdlc status */
+#define SCC_RR12 12 /* BRG constant, low part */
+#define SCC_RR13 13 /* BRG constant, high part */
+#define SCC_RR15 15 /* interrupts currently enabled */
+
+#define SCC_WR0 0 /* reg select, and commands */
+#define SCC_WR1 1 /* interrupt and DMA enables */
+#define SCC_WR2 2 /* interrupt vector */
+#define SCC_WR3 3 /* receiver params and enables */
+#define SCC_WR4 4 /* clock/char/parity params */
+#define SCC_WR5 5 /* xmit params and enables */
+#define SCC_WR6 6 /* synchr SYNCH/address */
+#define SCC_WR7 7 /* synchr SYNCH/flag */
+#define SCC_WR8 8 /* xmit buffer (alias for data) */
+#define SCC_WR9 9 /* vectoring and resets */
+#define SCC_WR10 10 /* synchr params */
+#define SCC_WR11 11 /* clocking definitions */
+#define SCC_WR12 12 /* BRG constant, low part */
+#define SCC_WR13 13 /* BRG constant, high part */
+#define SCC_WR14 14 /* BRG enables and commands */
+#define SCC_WR15 15 /* interrupt enables */
+
+/*
+ * Read registers defines
+ */
+
+#define SCC_RR0_BREAK 0x80 /* break detected (rings twice), or */
+#define SCC_RR0_ABORT 0x80 /* abort (synchr) */
+#define SCC_RR0_TX_UNDERRUN 0x40 /* xmit buffer empty/end of message */
+#define SCC_RR0_CTS 0x20 /* clear-to-send pin active (sampled
+ only on intr and after RESI cmd */
+#define SCC_RR0_SYNCH 0x10 /* SYNCH found/still hunting */
+#define SCC_RR0_DCD 0x08 /* carrier-detect (same as CTS) */
+#define SCC_RR0_TX_EMPTY 0x04 /* xmit buffer empty */
+#define SCC_RR0_ZERO_COUNT 0x02 /* ? */
+#define SCC_RR0_RX_AVAIL 0x01 /* recv fifo not empty */
+
+#define SCC_RR1_EOF 0x80 /* end-of-frame, SDLC mode */
+#define SCC_RR1_CRC_ERR 0x40 /* incorrect CRC or.. */
+#define SCC_RR1_FRAME_ERR 0x40 /* ..bad frame */
+#define SCC_RR1_RX_OVERRUN 0x20 /* rcv fifo overflow */
+#define SCC_RR1_PARITY_ERR 0x10 /* incorrect parity in data */
+#define SCC_RR1_RESIDUE0 0x08
+#define SCC_RR1_RESIDUE1 0x04
+#define SCC_RR1_RESIDUE2 0x02
+#define SCC_RR1_ALL_SENT 0x01
+
+/* RR2 contains the interrupt vector unmodified (channel A) or
+ modified as follows (channel B, if vector-include-status) */
+
+#define SCC_RR2_STATUS(val) ((val)&0xf)
+
+#define SCC_RR2_B_XMIT_DONE 0x0
+#define SCC_RR2_B_EXT_STATUS 0x2
+#define SCC_RR2_B_RECV_DONE 0x4
+#define SCC_RR2_B_RECV_SPECIAL 0x6
+#define SCC_RR2_A_XMIT_DONE 0x8
+#define SCC_RR2_A_EXT_STATUS 0xa
+#define SCC_RR2_A_RECV_DONE 0xc
+#define SCC_RR2_A_RECV_SPECIAL 0xe
+
+/* Interrupts pending, to be read from channel A only (B raz) */
+#define SCC_RR3_zero 0xc0
+#define SCC_RR3_RX_IP_A 0x20
+#define SCC_RR3_TX_IP_A 0x10
+#define SCC_RR3_EXT_IP_A 0x08
+#define SCC_RR3_RX_IP_B 0x04
+#define SCC_RR3_TX_IP_B 0x02
+#define SCC_RR3_EXT_IP_B 0x01
+
+/* RR8 is the receive data buffer, a 3 deep FIFO */
+#define SCC_RECV_BUFFER SCC_RR8
+#define SCC_RECV_FIFO_DEEP 3
+
+#define SCC_RR10_1CLKS 0x80
+#define SCC_RR10_2CLKS 0x40
+#define SCC_RR10_zero 0x2d
+#define SCC_RR10_LOOP_SND 0x10
+#define SCC_RR10_ON_LOOP 0x02
+
+/* RR12/RR13 hold the timing base, upper byte in RR13 */
+
+#define scc_get_timing_base(scc,chan,val) { \
+ register char tmp; \
+ scc_read_reg(scc,chan,SCC_RR12,val);\
+ scc_read_reg(scc,chan,SCC_RR13,tmp);\
+ (val) = ((val)<<8)|(tmp&0xff);\
+ }
+
+#define SCC_RR15_BREAK_IE 0x80
+#define SCC_RR15_TX_UNDERRUN_IE 0x40
+#define SCC_RR15_CTS_IE 0x20
+#define SCC_RR15_SYNCH_IE 0x10
+#define SCC_RR15_DCD_IE 0x08
+#define SCC_RR15_zero 0x05
+#define SCC_RR15_ZERO_COUNT_IE 0x02
+
+
+/*
+ * Write registers defines
+ */
+
+/* WR0 is used for commands too */
+#define SCC_RESET_TXURUN_LATCH 0xc0
+#define SCC_RESET_TX_CRC 0x80
+#define SCC_RESET_RX_CRC 0x40
+#define SCC_RESET_HIGHEST_IUS 0x38 /* channel A only */
+#define SCC_RESET_ERROR 0x30
+#define SCC_RESET_TX_IP 0x28
+#define SCC_IE_NEXT_CHAR 0x20
+#define SCC_SEND_SDLC_ABORT 0x18
+#define SCC_RESET_EXT_IP 0x10
+
+#define SCC_WR1_DMA_ENABLE 0x80 /* dma control */
+#define SCC_WR1_DMA_MODE 0x40 /* drive ~req for DMA controller */
+#define SCC_WR1_DMA_RECV_DATA 0x20 /* from wire to host memory */
+ /* interrupt enable/conditions */
+#define SCC_WR1_RXI_SPECIAL_O 0x18 /* on special only */
+#define SCC_WR1_RXI_ALL_CHAR 0x10 /* on each char, or special */
+#define SCC_WR1_RXI_FIRST_CHAR 0x08 /* on first char, or special */
+#define SCC_WR1_RXI_DISABLE 0x00 /* never on recv */
+#define SCC_WR1_PARITY_IE 0x04 /* on parity errors */
+#define SCC_WR1_TX_IE 0x02
+#define SCC_WR1_EXT_IE 0x01
+
+/* WR2 is common and contains the interrupt vector (high nibble) */
+
+#define SCC_WR3_RX_8_BITS 0xc0
+#define SCC_WR3_RX_6_BITS 0x80
+#define SCC_WR3_RX_7_BITS 0x40
+#define SCC_WR3_RX_5_BITS 0x00
+#define SCC_WR3_AUTO_ENABLE 0x20
+#define SCC_WR3_HUNT_MODE 0x10
+#define SCC_WR3_RX_CRC_ENABLE 0x08
+#define SCC_WR3_SDLC_SRCH 0x04
+#define SCC_WR3_INHIBIT_SYNCH 0x02
+#define SCC_WR3_RX_ENABLE 0x01
+
+/* Should be re-written after reset */
+#define SCC_WR4_CLK_x64 0xc0 /* clock divide factor */
+#define SCC_WR4_CLK_x32 0x80
+#define SCC_WR4_CLK_x16 0x40
+#define SCC_WR4_CLK_x1 0x00
+#define SCC_WR4_EXT_SYNCH_MODE 0x30 /* synch modes */
+#define SCC_WR4_SDLC_MODE 0x20
+#define SCC_WR4_16BIT_SYNCH 0x10
+#define SCC_WR4_8BIT_SYNCH 0x00
+#define SCC_WR4_2_STOP 0x0c /* asynch modes */
+#define SCC_WR4_1_5_STOP 0x08
+#define SCC_WR4_1_STOP 0x04
+#define SCC_WR4_SYNCH_MODE 0x00
+#define SCC_WR4_EVEN_PARITY 0x02
+#define SCC_WR4_PARITY_ENABLE 0x01
+
+#define SCC_WR5_DTR 0x80 /* drive DTR pin */
+#define SCC_WR5_TX_8_BITS 0x60
+#define SCC_WR5_TX_6_BITS 0x40
+#define SCC_WR5_TX_7_BITS 0x20
+#define SCC_WR5_TX_5_BITS 0x00
+#define SCC_WR5_SEND_BREAK 0x10
+#define SCC_WR5_TX_ENABLE 0x08
+#define SCC_WR5_CRC_16 0x04 /* CRC if non zero, .. */
+#define SCC_WR5_SDLC 0x00 /* ..SDLC otherwise */
+#define SCC_WR5_RTS 0x02 /* drive RTS pin */
+#define SCC_WR5_TX_CRC_ENABLE 0x01
+
+/* Registers WR6 and WR7 are for synch modes data, with among other things: */
+
+#define SCC_WR6_BISYNCH_12 0x0f
+#define SCC_WR6_SDLC_RANGE_MASK 0x0f
+#define SCC_WR7_SDLC_FLAG 0x7e
+
+/* WR8 is the transmit data buffer (no FIFO) */
+#define SCC_XMT_BUFFER SCC_WR8
+
+#define SCC_WR9_HW_RESET 0xc0 /* force hardware reset */
+#define SCC_WR9_RESET_CHA_A 0x80
+#define SCC_WR9_RESET_CHA_B 0x40
+#define SCC_WR9_NON_VECTORED 0x20 /* mbz for Zilog chip */
+#define SCC_WR9_STATUS_HIGH 0x10
+#define SCC_WR9_MASTER_IE 0x08
+#define SCC_WR9_DLC 0x04 /* disable-lower-chain */
+#define SCC_WR9_NV 0x02 /* no vector */
+#define SCC_WR9_VIS 0x01 /* vector-includes-status */
+
+#define SCC_WR10_CRC_PRESET 0x80
+#define SCC_WR10_FM0 0x60
+#define SCC_WR10_FM1 0x40
+#define SCC_WR10_NRZI 0x20
+#define SCC_WR10_NRZ 0x00
+#define SCC_WR10_ACTIVE_ON_POLL 0x10
+#define SCC_WR10_MARK_IDLE 0x08 /* flag if zero */
+#define SCC_WR10_ABORT_ON_URUN 0x04 /* flag if zero */
+#define SCC_WR10_LOOP_MODE 0x02
+#define SCC_WR10_6BIT_SYNCH 0x01
+#define SCC_WR10_8BIT_SYNCH 0x00
+
+#define SCC_WR11_RTxC_XTAL 0x80 /* RTxC pin is input (ext oscill) */
+#define SCC_WR11_RCLK_DPLL 0x60 /* clock received data on dpll */
+#define SCC_WR11_RCLK_BAUDR 0x40 /* .. on BRG */
+#define SCC_WR11_RCLK_TRc_PIN 0x20 /* .. on TRxC pin */
+#define SCC_WR11_RCLK_RTc_PIN 0x00 /* .. on RTxC pin */
+#define SCC_WR11_XTLK_DPLL 0x18
+#define SCC_WR11_XTLK_BAUDR 0x10
+#define SCC_WR11_XTLK_TRc_PIN 0x08
+#define SCC_WR11_XTLK_RTc_PIN 0x00
+#define SCC_WR11_TRc_OUT 0x04 /* drive TRxC pin as output from..*/
+#define SCC_WR11_TRcOUT_DPLL 0x03 /* .. the dpll */
+#define SCC_WR11_TRcOUT_BAUDR 0x02 /* .. the BRG */
+#define SCC_WR11_TRcOUT_XMTCLK 0x01 /* .. the xmit clock */
+#define SCC_WR11_TRcOUT_XTAL 0x00 /* .. the external oscillator */
+
+/* WR12/WR13 are for timing base preset */
+#define scc_set_timing_base(scc,chan,val) { \
+ scc_write_reg(scc,chan,SCC_RR12,val);\
+ scc_write_reg(scc,chan,SCC_RR13,(val)>>8);\
+ }
+
+/* More commands in this register */
+#define SCC_WR14_NRZI_MODE 0xe0 /* synch modulations */
+#define SCC_WR14_FM_MODE 0xc0
+#define SCC_WR14_RTc_SOURCE 0xa0 /* clock is from pin .. */
+#define SCC_WR14_BAUDR_SOURCE 0x80 /* .. or internal BRG */
+#define SCC_WR14_DISABLE_DPLL 0x60
+#define SCC_WR14_RESET_CLKMISS 0x40
+#define SCC_WR14_SEARCH_MODE 0x20
+/* ..and more bitsy */
+#define SCC_WR14_LOCAL_LOOPB 0x10
+#define SCC_WR14_AUTO_ECHO 0x08
+#define SCC_WR14_DTR_REQUEST 0x04
+#define SCC_WR14_BAUDR_SRC 0x02
+#define SCC_WR14_BAUDR_ENABLE 0x01
+
+#define SCC_WR15_BREAK_IE 0x80
+#define SCC_WR15_TX_UNDERRUN_IE 0x40
+#define SCC_WR15_CTS_IE 0x20
+#define SCC_WR15_SYNCHUNT_IE 0x10
+#define SCC_WR15_DCD_IE 0x08
+#define SCC_WR15_zero 0x05
+#define SCC_WR15_ZERO_COUNT_IE 0x02
+
+
+#endif /*_SCC_8530_H_*/
diff --git a/chips/scc_8530_hdw.c b/chips/scc_8530_hdw.c
new file mode 100644
index 00000000..68c6e9d2
--- /dev/null
+++ b/chips/scc_8530_hdw.c
@@ -0,0 +1,1145 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scc_8530_hdw.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/91
+ *
+ * Hardware-level operations for the SCC Serial Line Driver
+ */
+
+#include <scc.h>
+#if NSCC > 0
+#include <bm.h>
+#include <platforms.h>
+
+#include <mach_kdb.h>
+
+#include <machine/machspl.h> /* spl definitions */
+#include <mach/std_types.h>
+#include <device/io_req.h>
+#include <device/tty.h>
+
+#include <chips/busses.h>
+#include <chips/serial_defs.h>
+#include <chips/screen_defs.h>
+
+/* Alignment and padding */
+#if defined(DECSTATION)
+/*
+ * 3min's padding
+ */
+typedef struct {
+ char pad0;
+ volatile unsigned char datum;
+ char pad1[2];
+} scc_padded1_register_t;
+
+#define scc_register_t scc_padded1_register_t
+#endif
+
+#if defined(FLAMINGO)
+typedef struct {
+ volatile unsigned int datum;
+ unsigned int pad1;
+} scc_padded1_register_t;
+
+#define scc_register_t scc_padded1_register_t
+
+#define scc_set_datum(d,v) (d) = (volatile unsigned int) (v) << 8, wbflush()
+#define scc_get_datum(d,v) (v) = ((d) >> 8) & 0xff
+
+#endif
+
+#include <chips/scc_8530.h> /* needs the above defs */
+
+#define private static
+#define public
+
+/*
+ * Forward decls
+ */
+private check_car( struct tty *, boolean_t );
+
+/*
+ * On the 3min keyboard and mouse come in on channels A
+ * of the two units. The MI code expects them at 'lines'
+ * 0 and 1, respectively. So we map here back and forth.
+ * Note also the MI code believes unit 0 has four lines.
+ */
+
+#define SCC_KBDUNIT 1
+#define SCC_PTRUNIT 0
+
+mi_to_scc(unitp, linep)
+ int *unitp, *linep;
+{
+ /* only play games on MI 'unit' 0 */
+ if (*unitp) {
+ /* e.g. by mapping the first four lines specially */
+ *unitp++;
+ return;
+ }
+
+ /* always get unit=0 (console) and line = 0|1 */
+ if (*linep == SCREEN_LINE_KEYBOARD) {
+ *unitp = SCC_KBDUNIT;
+ *linep = SCC_CHANNEL_A;
+ } else if (*linep == SCREEN_LINE_POINTER) {
+ *unitp = SCC_PTRUNIT;
+ *linep = SCC_CHANNEL_A;
+ } else {
+ *unitp = (*linep & 1);
+ *linep = SCC_CHANNEL_B;
+ }
+/* line 0 is channel B, line 1 is channel A */
+}
+
+#define NSCC_LINE 2 /* 2 ttys per chip */
+
+/* only care for mapping to ttyno */
+scc_to_mi(sccunit, sccline)
+{
+ if (sccunit > 1)
+ return (sccunit * NSCC_LINE + sccline);
+ /* only for console (first pair of SCCs): */
+ if (sccline == SCC_CHANNEL_A)
+ return ((!sccunit) & 1);
+ return 2+sccunit;
+}
+
+
+/*
+ * Driver status
+ */
+struct scc_softc {
+ scc_regmap_t *regs;
+
+ /* software copy of some write regs, for reg |= */
+ struct softreg {
+ unsigned char wr1;
+ unsigned char wr4;
+ unsigned char wr5;
+ unsigned char wr14;
+ } softr[2]; /* per channel */
+
+ unsigned char last_rr0[2]; /* for modem signals */
+ unsigned short fake; /* missing rs232 bits, channel A */
+ char polling_mode;
+ char softCAR, osoftCAR;
+ char probed_once;
+
+ boolean_t full_modem;
+ boolean_t isa_console;
+
+} scc_softc_data[NSCC];
+
+typedef struct scc_softc *scc_softc_t;
+
+scc_softc_t scc_softc[NSCC];
+
+scc_softCAR(unit, line, on)
+{
+ mi_to_scc(&unit, &line);
+ if (on)
+ scc_softc[unit]->softCAR |= 1<<line;
+ else
+ scc_softc[unit]->softCAR &= ~(1 << line);
+}
+
+
+/*
+ * BRG formula is:
+ * ClockFrequency
+ * BRGconstant = --------------------------- - 2
+ * 2 * BaudRate * ClockDivider
+ */
+/* Speed selections with Pclk=7.3728Mhz, clock x16 */
+static
+short scc_speeds[] =
+ /* 0 50 75 110 134.5 150 200 300 600 1200 1800 2400 */
+ { 0, 4606, 3070, 2093, 1711, 1534, 1150, 766, 382, 190, 126, 94,
+
+ /* 4800 9600 19.2k 38.4k */
+ 46, 22, 10, 4};
+
+/*
+ * Definition of the driver for the auto-configuration program.
+ */
+
+int scc_probe(), scc_intr();
+static void scc_attach();
+
+vm_offset_t scc_std[NSCC] = { 0 };
+struct bus_device *scc_info[NSCC];
+struct bus_driver scc_driver =
+ { scc_probe, 0, scc_attach, 0, scc_std, "scc", scc_info,};
+
+/*
+ * Adapt/Probe/Attach functions
+ */
+boolean_t scc_uses_modem_control = FALSE;/* patch this with adb */
+
+set_scc_address(
+ int sccunit,
+ vm_offset_t regs,
+ boolean_t has_modem,
+ boolean_t isa_console)
+{
+ extern int scc_probe(), scc_param(), scc_start(),
+ scc_putc(), scc_getc(),
+ scc_pollc(), scc_mctl(), scc_softCAR();
+
+ scc_std[sccunit] = regs;
+ scc_softc_data[sccunit].full_modem = has_modem & scc_uses_modem_control;
+ scc_softc_data[sccunit].isa_console = isa_console;
+
+ /* Do this here */
+ console_probe = scc_probe;
+ console_param = scc_param;
+ console_start = scc_start;
+ console_putc = scc_putc;
+ console_getc = scc_getc;
+ console_pollc = scc_pollc;
+ console_mctl = scc_mctl;
+ console_softCAR = scc_softCAR;
+
+}
+
+scc_probe(
+ int xxx,
+ struct bus_device *ui)
+{
+ int sccunit = ui->unit;
+ scc_softc_t scc;
+ register int val;
+ register scc_regmap_t *regs;
+
+ regs = (scc_regmap_t *)scc_std[sccunit];
+ if (regs == 0)
+ return 0;
+
+ /*
+ * See if we are here
+ */
+ if (check_memory(regs, 0)) {
+ /* no rides today */
+ return 0;
+ }
+
+ scc = &scc_softc_data[sccunit];
+
+ if (scc->probed_once++){
+ return 1;
+ }
+ /*
+ * Chip once-only initialization
+ *
+ * NOTE: The wiring we assume is the one on the 3min:
+ *
+ * out A-TxD --> TxD keybd or mouse
+ * in A-RxD --> RxD keybd or mouse
+ * out A-DTR~ --> DTR comm
+ * out A-RTS~ --> RTS comm
+ * in A-CTS~ --> SI comm
+ * in A-DCD~ --> RI comm
+ * in A-SYNCH~--> DSR comm
+ * out B-TxD --> TxD comm
+ * in B-RxD --> RxD comm
+ * in B-RxC --> TRxCB comm
+ * in B-TxC --> RTxCB comm
+ * out B-RTS~ --> SS comm
+ * in B-CTS~ --> CTS comm
+ * in B-DCD~ --> CD comm
+ */
+
+ scc_softc[sccunit] = scc;
+ scc->regs = regs;
+
+ scc->fake = 1<<SCC_CHANNEL_A;
+
+ {
+ register int i;
+ /* We need this in scc_start only, hence the funny
+ value: we need it non-zero and we want to avoid
+ too much overhead in getting to (scc,regs,line) */
+ for (i = 0; i < NSCC_LINE; i++) {
+ register struct tty *tp;
+
+ tp = console_tty[scc_to_mi(sccunit,i)];
+ tp->t_addr = (char*)(0x80000000L + (sccunit<<1) + (i&1));
+ /* do min buffering */
+ tp->t_state |= TS_MIN;
+ }
+ }
+
+ /* make sure reg pointer is in known state */
+ scc_init_reg(regs, SCC_CHANNEL_A);
+ scc_init_reg(regs, SCC_CHANNEL_B);
+
+ /* reset chip, fully */
+ scc_write_reg(regs, SCC_CHANNEL_A, SCC_WR9, SCC_WR9_HW_RESET);
+ delay(50000);/*enough ? */
+ scc_write_reg(regs, SCC_CHANNEL_A, SCC_WR9, 0);
+
+ /* program the interrupt vector */
+ scc_write_reg(regs, SCC_CHANNEL_A, SCC_WR2, 0xf0);
+ scc_write_reg(regs, SCC_CHANNEL_B, SCC_WR2, 0xf0);
+ scc_write_reg(regs, SCC_CHANNEL_A, SCC_WR9, SCC_WR9_VIS);
+
+ /* most of the init is in scc_param() */
+
+ /* timing base defaults */
+ scc->softr[SCC_CHANNEL_A].wr4 = SCC_WR4_CLK_x16;
+ scc->softr[SCC_CHANNEL_B].wr4 = SCC_WR4_CLK_x16;
+
+ /* enable DTR, RTS and dont SS */
+#if 0
+ /* According to one book I have this signal (pin 23, "SS")
+ is "specified by the provider", meaning the EIA-232-D
+ standard does not define what it is. Better leave
+ it alone */
+ scc->softr[SCC_CHANNEL_B].wr5 = SCC_WR5_RTS;
+#else
+ scc->softr[SCC_CHANNEL_B].wr5 = 0;
+#endif
+ scc->softr[SCC_CHANNEL_A].wr5 = SCC_WR5_RTS | SCC_WR5_DTR;
+
+ /* baud rates */
+ val = SCC_WR14_BAUDR_ENABLE|SCC_WR14_BAUDR_SRC;
+ scc->softr[SCC_CHANNEL_B].wr14 = val;
+ scc->softr[SCC_CHANNEL_A].wr14 = val;
+
+ /* interrupt conditions */
+ val = SCC_WR1_RXI_ALL_CHAR | SCC_WR1_PARITY_IE |
+ SCC_WR1_EXT_IE | SCC_WR1_TX_IE;
+ scc->softr[SCC_CHANNEL_A].wr1 = val;
+ scc->softr[SCC_CHANNEL_B].wr1 = val;
+
+ scc_read_reg_zero(regs, SCC_CHANNEL_A, scc->last_rr0[SCC_CHANNEL_A]);
+ scc_read_reg_zero(regs, SCC_CHANNEL_B, scc->last_rr0[SCC_CHANNEL_B]);
+
+ /*
+ * After probing, any line that should be active
+ * (keybd,mouse,rcline) is activated via scc_param().
+ */
+
+ scc_set_modem_control(scc, scc->full_modem);
+
+#if defined(KMIN) || defined (FLAMINGO) || defined(KN03)
+ /*
+ * Crock: MI code knows of unit 0 as console, we need
+ * unit 1 as well since the keyboard is there
+ * This is acceptable on maxine, which has to call its
+ * only one chip unit 1 so that rconsole is happy.
+ */
+ if (sccunit == 0) {
+ struct bus_device d;
+ d = *ui;
+ d.unit = 1;
+ scc_probe( xxx, &d);
+ }
+#endif
+ return 1;
+}
+
+boolean_t scc_timer_started = FALSE;
+
+static void
+scc_attach(
+ register struct bus_device *ui)
+{
+ int sccunit = ui->unit;
+ extern scc_scan();
+ extern int tty_inq_size;
+ int i;
+
+ /* We only have 4 ttys, but always at 9600
+ * Give em a lot of room (plus dma..)
+ */
+ tty_inq_size = 4096;
+ if (!scc_timer_started) {
+ /* do all of them, before we call scc_scan() */
+ /* harmless if done already */
+ for (i = 0; i < NSCC*NSCC_LINE; i++)
+ ttychars(console_tty[i]);
+
+ scc_timer_started = TRUE;
+ scc_scan();
+ }
+
+#if NBM > 0
+ if (SCREEN_ISA_CONSOLE() && scc_softc[sccunit]->isa_console) {
+ printf("\n sl0: ");
+ if (sccunit && rcline == 3) printf("( rconsole )");
+
+ if (sccunit == SCC_KBDUNIT) {
+ printf("\n sl1: "); lk201_attach(0, sccunit >> 1);
+ } else if (sccunit == SCC_PTRUNIT) {
+ printf("\n sl1: "); mouse_attach(0, sccunit >> 1);
+ }
+ } else
+#endif /*NBM > 0*/
+ {
+ printf("%s", (sccunit == 1) ?
+ "\n sl0: ( alternate console )\n sl1:" :
+ "\n sl0:\n sl1:");
+ }
+}
+
+/*
+ * Would you like to make a phone call ?
+ */
+scc_set_modem_control(
+ scc_softc_t scc,
+ boolean_t on)
+{
+ if (on)
+ /* your problem if the hardware then is broke */
+ scc->fake = 0;
+ else
+ scc->fake = 3;
+ scc->full_modem = on;
+ /* user should do an scc_param() ifchanged */
+}
+
+/*
+ * Polled I/O (debugger)
+ */
+scc_pollc(
+ int unit,
+ boolean_t on)
+{
+ scc_softc_t scc;
+ int line = SCREEN_LINE_KEYBOARD,
+ sccunit = unit;
+
+ mi_to_scc(&sccunit, &line);
+
+ scc = scc_softc[sccunit];
+ if (on) {
+ scc->polling_mode++;
+#if NBM > 0
+ screen_on_off(unit, TRUE);
+#endif NBM > 0
+ } else
+ scc->polling_mode--;
+}
+
+/*
+ * Interrupt routine
+ */
+int scc_intr_count;
+
+scc_intr(
+ int unit,
+ spl_t spllevel)
+{
+ scc_softc_t scc = scc_softc[unit];
+ register scc_regmap_t *regs = scc->regs;
+ register int rr1, rr2;
+ register int c;
+
+scc_intr_count++;
+
+#if mips
+ splx(spllevel); /* lower priority */
+#endif
+
+ while (1) {
+
+ scc_read_reg(regs, SCC_CHANNEL_B, SCC_RR2, rr2);
+
+ rr2 = SCC_RR2_STATUS(rr2);
+
+ /* are we done yet ? */
+ if (rr2 == 6) { /* strange, distinguished value */
+ register int rr3;
+ scc_read_reg(regs, SCC_CHANNEL_A, SCC_RR3, rr3);
+ if (rr3 == 0)
+ return;
+ }
+
+ if ((rr2 == SCC_RR2_A_XMIT_DONE) || (rr2 == SCC_RR2_B_XMIT_DONE)) {
+
+ register chan = (rr2 == SCC_RR2_A_XMIT_DONE) ?
+ SCC_CHANNEL_A : SCC_CHANNEL_B;
+
+ scc_write_reg(regs, SCC_CHANNEL_A, SCC_RR0, SCC_RESET_HIGHEST_IUS);
+ c = cons_simple_tint(scc_to_mi(unit,chan), FALSE);
+
+ if (c == -1) {
+ /* no more data for this line */
+
+ scc_read_reg(regs, chan, SCC_RR15, c);
+ c &= ~SCC_WR15_TX_UNDERRUN_IE;
+ scc_write_reg(regs, chan, SCC_WR15, c);
+
+ c = scc->softr[chan].wr1 & ~SCC_WR1_TX_IE;
+ scc_write_reg(regs, chan, SCC_WR1, c);
+ scc->softr[chan].wr1 = c;
+
+ c = cons_simple_tint(scc_to_mi(unit,chan), TRUE);
+ if (c != -1)
+ /* funny race, scc_start has been called already */
+ scc_write_data(regs, chan, c);
+ } else {
+ scc_write_data(regs, chan, c);
+ /* and leave it enabled */
+ }
+ }
+
+ else if (rr2 == SCC_RR2_A_RECV_DONE) {
+ int err = 0;
+
+ scc_write_reg(regs, SCC_CHANNEL_A, SCC_RR0, SCC_RESET_HIGHEST_IUS);
+ if (scc->polling_mode)
+ continue;
+
+ scc_read_data(regs, SCC_CHANNEL_A, c);
+ rr1 = scc_to_mi(unit,SCC_CHANNEL_A);
+ cons_simple_rint (rr1, rr1, c, 0);
+ }
+
+ else if (rr2 == SCC_RR2_B_RECV_DONE) {
+ int err = 0;
+
+ scc_write_reg(regs, SCC_CHANNEL_A, SCC_RR0, SCC_RESET_HIGHEST_IUS);
+ if (scc->polling_mode)
+ continue;
+
+ scc_read_data(regs, SCC_CHANNEL_B, c);
+ rr1 = scc_to_mi(unit,SCC_CHANNEL_B);
+ cons_simple_rint (rr1, rr1, c, 0);
+ }
+
+ else if ((rr2 == SCC_RR2_A_EXT_STATUS) || (rr2 == SCC_RR2_B_EXT_STATUS)) {
+ int chan = (rr2 == SCC_RR2_A_EXT_STATUS) ?
+ SCC_CHANNEL_A : SCC_CHANNEL_B;
+ scc_write_reg(regs, chan, SCC_RR0, SCC_RESET_EXT_IP);
+ scc_write_reg(regs, SCC_CHANNEL_A, SCC_RR0, SCC_RESET_HIGHEST_IUS);
+ scc_modem_intr(scc, chan, unit);
+ }
+
+ else if ((rr2 == SCC_RR2_A_RECV_SPECIAL) || (rr2 == SCC_RR2_B_RECV_SPECIAL)) {
+ register int chan = (rr2 == SCC_RR2_A_RECV_SPECIAL) ?
+ SCC_CHANNEL_A : SCC_CHANNEL_B;
+
+ scc_read_reg(regs, chan, SCC_RR1, rr1);
+ if (rr1 & (SCC_RR1_PARITY_ERR | SCC_RR1_RX_OVERRUN | SCC_RR1_FRAME_ERR)) {
+ int err;
+ /* map to CONS_ERR_xxx MI error codes */
+ err = ((rr1 & SCC_RR1_PARITY_ERR)<<8) |
+ ((rr1 & SCC_RR1_RX_OVERRUN)<<9) |
+ ((rr1 & SCC_RR1_FRAME_ERR)<<7);
+ scc_write_reg(regs, chan, SCC_RR0, SCC_RESET_ERROR);
+ rr1 = scc_to_mi(unit,chan);
+ cons_simple_rint(rr1, rr1, 0, err);
+ }
+ scc_write_reg(regs, SCC_CHANNEL_A, SCC_RR0, SCC_RESET_HIGHEST_IUS);
+ }
+
+ }
+
+}
+
+boolean_t
+scc_start(
+ struct tty *tp)
+{
+ register scc_regmap_t *regs;
+ register int chan, temp;
+ register struct softreg *sr;
+
+ temp = (natural_t)tp->t_addr;
+ chan = (temp & 1); /* channel */
+ temp = (temp >> 1)&0xff;/* sccunit */
+ regs = scc_softc[temp]->regs;
+ sr = &scc_softc[temp]->softr[chan];
+
+ scc_read_reg(regs, chan, SCC_RR15, temp);
+ temp |= SCC_WR15_TX_UNDERRUN_IE;
+ scc_write_reg(regs, chan, SCC_WR15, temp);
+
+ temp = sr->wr1 | SCC_WR1_TX_IE;
+ scc_write_reg(regs, chan, SCC_WR1, temp);
+ sr->wr1 = temp;
+
+ /* but we need a first char out or no cookie */
+ scc_read_reg(regs, chan, SCC_RR0, temp);
+ if (temp & SCC_RR0_TX_EMPTY)
+ {
+ register char c;
+
+ c = getc(&tp->t_outq);
+ scc_write_data(regs, chan, c);
+ }
+}
+
+/*
+ * Get a char from a specific SCC line
+ * [this is only used for console&screen purposes]
+ */
+scc_getc(
+ int unit,
+ int line,
+ boolean_t wait,
+ boolean_t raw)
+{
+ scc_softc_t scc;
+ register scc_regmap_t *regs;
+ unsigned char c;
+ int value, mi_line, rcvalue, from_line;
+
+ mi_line = line;
+ mi_to_scc(&unit, &line);
+
+ scc = scc_softc[unit];
+ regs = scc->regs;
+
+ /*
+ * wait till something available
+ *
+ * NOTE: we know! that rcline==3
+ */
+ if (rcline) rcline = 3;
+again:
+ rcvalue = 0;
+ while (1) {
+ scc_read_reg_zero(regs, line, value);
+ if (rcline && (mi_line == SCREEN_LINE_KEYBOARD)) {
+ scc_read_reg_zero(regs, SCC_CHANNEL_B, rcvalue);
+ value |= rcvalue;
+ }
+ if (((value & SCC_RR0_RX_AVAIL) == 0) && wait)
+ delay(10);
+ else
+ break;
+ }
+
+ /*
+ * if nothing found return -1
+ */
+ from_line = (rcvalue & SCC_RR0_RX_AVAIL) ? SCC_CHANNEL_B : line;
+
+ if (value & SCC_RR0_RX_AVAIL) {
+ scc_read_reg(regs, from_line, SCC_RR1, value);
+ scc_read_data(regs, from_line, c);
+ } else {
+/* splx(s);*/
+ return -1;
+ }
+
+ /*
+ * bad chars not ok
+ */
+ if (value&(SCC_RR1_PARITY_ERR | SCC_RR1_RX_OVERRUN | SCC_RR1_FRAME_ERR)) {
+/* scc_state(unit,from_line); */
+ scc_write_reg(regs, from_line, SCC_RR0, SCC_RESET_ERROR);
+ if (wait) {
+ scc_write_reg(regs, SCC_CHANNEL_A, SCC_RR0, SCC_RESET_HIGHEST_IUS);
+ goto again;
+ }
+ }
+ scc_write_reg(regs, SCC_CHANNEL_A, SCC_RR0, SCC_RESET_HIGHEST_IUS);
+/* splx(s);*/
+
+
+#if NBM > 0
+ if ((mi_line == SCREEN_LINE_KEYBOARD) && (from_line == SCC_CHANNEL_A) &&
+ !raw && SCREEN_ISA_CONSOLE() && scc->isa_console)
+ return lk201_rint(SCREEN_CONS_UNIT(), c, wait, scc->polling_mode);
+ else
+#endif NBM > 0
+ return c;
+}
+
+/*
+ * Put a char on a specific SCC line
+ */
+scc_putc(
+ int unit,
+ int line,
+ int c)
+{
+ scc_softc_t scc;
+ register scc_regmap_t *regs;
+ spl_t s = spltty();
+ register int value;
+
+ mi_to_scc(&unit, &line);
+
+ scc = scc_softc[unit];
+ regs = scc->regs;
+
+ do {
+ scc_read_reg(regs, line, SCC_RR0, value);
+ if (value & SCC_RR0_TX_EMPTY)
+ break;
+ delay(100);
+ } while (1);
+
+ scc_write_data(regs, line, c);
+/* wait for it to swallow the char ? */
+
+ splx(s);
+}
+
+scc_param(
+ struct tty *tp,
+ int line)
+{
+ scc_regmap_t *regs;
+ int value, sccline, unit;
+ struct softreg *sr;
+ scc_softc_t scc;
+
+ line = tp->t_dev;
+ /* MI code wants us to handle 4 lines on unit 0 */
+ unit = (line < 4) ? 0 : (line / NSCC_LINE);
+ sccline = line;
+ mi_to_scc(&unit, &sccline);
+
+ if ((scc = scc_softc[unit]) == 0) return; /* sanity */
+ regs = scc->regs;
+
+ sr = &scc->softr[sccline];
+
+ /*
+ * Do not let user fool around with kbd&mouse
+ */
+#if NBM > 0
+ if (screen_captures(line)) {
+ tp->t_ispeed = tp->t_ospeed = B4800;
+ tp->t_flags |= TF_LITOUT;
+ }
+#endif NBM > 0
+
+ if (tp->t_ispeed == 0) {
+ (void) scc_mctl(tp->t_dev, TM_HUP, DMSET); /* hang up line */
+ return;
+ }
+
+ /* reset line */
+ value = (sccline == SCC_CHANNEL_A) ? SCC_WR9_RESET_CHA_A : SCC_WR9_RESET_CHA_B;
+ scc_write_reg(regs, sccline, SCC_WR9, value);
+ delay(25);
+
+ /* stop bits, normally 1 */
+ value = sr->wr4 & 0xf0;
+ value |= (tp->t_ispeed == B110) ? SCC_WR4_2_STOP : SCC_WR4_1_STOP;
+
+ /* .. and parity */
+ if ((tp->t_flags & (TF_ODDP | TF_EVENP)) == TF_ODDP)
+ value |= SCC_WR4_PARITY_ENABLE;
+
+ /* set it now, remember it must be first after reset */
+ sr->wr4 = value;
+ scc_write_reg(regs, sccline, SCC_WR4, value);
+
+ /* vector again */
+ scc_write_reg(regs, sccline, SCC_WR2, 0xf0);
+
+ /* we only do 8 bits per char */
+ value = SCC_WR3_RX_8_BITS;
+ scc_write_reg(regs, sccline, SCC_WR3, value);
+
+ /* clear break, keep rts dtr */
+ value = sr->wr5 & (SCC_WR5_DTR|SCC_WR5_RTS);
+ value |= SCC_WR5_TX_8_BITS;
+ sr->wr5 = value;
+ scc_write_reg(regs, sccline, SCC_WR5, value);
+ /* some are on the other channel, which might
+ never be used (e.g. maxine has only one line) */
+ {
+ register int otherline = (sccline+1)&1;
+
+ scc_write_reg(regs, otherline, SCC_WR5, scc->softr[otherline].wr5);
+ }
+
+ scc_write_reg(regs, sccline, SCC_WR6, 0);
+ scc_write_reg(regs, sccline, SCC_WR7, 0);
+
+ scc_write_reg(regs, sccline, SCC_WR9, SCC_WR9_VIS);
+
+ scc_write_reg(regs, sccline, SCC_WR10, 0);
+
+ /* clock config */
+ value = SCC_WR11_RCLK_BAUDR | SCC_WR11_XTLK_BAUDR |
+ SCC_WR11_TRc_OUT | SCC_WR11_TRcOUT_BAUDR;
+ scc_write_reg(regs, sccline, SCC_WR11, value);
+
+ value = scc_speeds[tp->t_ispeed];
+ scc_set_timing_base(regs,sccline,value);
+
+ value = sr->wr14;
+ scc_write_reg(regs, sccline, SCC_WR14, value);
+
+#if FLAMINGO
+ if (unit != 1)
+#else
+ if (1)
+#endif
+ {
+ /* Chan-A: CTS==SI DCD==RI DSR=SYNCH */
+ value = SCC_WR15_CTS_IE | SCC_WR15_DCD_IE | SCC_WR15_SYNCHUNT_IE;
+ scc_write_reg(regs, SCC_CHANNEL_A, SCC_WR15, value);
+
+ /* Chan-B: CTS==CTS DCD==DCD */
+ value = SCC_WR15_BREAK_IE | SCC_WR15_CTS_IE | SCC_WR15_DCD_IE;
+ scc_write_reg(regs, SCC_CHANNEL_B, SCC_WR15, value);
+ } else {
+ /* Here if modem bits are floating noise, keep quiet */
+ value = SCC_WR15_BREAK_IE;
+ scc_write_reg(regs, sccline, SCC_WR15, value);
+ }
+
+ /* and now the enables */
+ value = SCC_WR3_RX_8_BITS | SCC_WR3_RX_ENABLE;
+ scc_write_reg(regs, sccline, SCC_WR3, value);
+
+ value = sr->wr5 | SCC_WR5_TX_ENABLE;
+ sr->wr5 = value;
+ scc_write_reg(regs, sccline, SCC_WR5, value);
+
+ /* master inter enable */
+ scc_write_reg(regs,sccline,SCC_WR9,SCC_WR9_MASTER_IE|SCC_WR9_VIS);
+
+ scc_write_reg(regs, sccline, SCC_WR1, sr->wr1);
+
+}
+
+/*
+ * Modem control functions
+ */
+scc_mctl(
+ int dev,
+ int bits,
+ int how)
+{
+ register scc_regmap_t *regs;
+ struct softreg *sra, *srb, *sr;
+ int unit, sccline;
+ int b = 0;
+ spl_t s;
+ scc_softc_t scc;
+
+ /* MI code wants us to handle 4 lines on unit 0 */
+ unit = (dev < 4) ? 0 : (dev / NSCC_LINE);
+ sccline = dev;
+ mi_to_scc(&unit, &sccline);
+
+ if ((scc = scc_softc[unit]) == 0) return 0; /* sanity */
+ regs = scc->regs;
+
+ sr = &scc->softr[sccline];
+ sra = &scc->softr[SCC_CHANNEL_A];
+ srb = &scc->softr[SCC_CHANNEL_B];
+
+ if (bits == TM_HUP) { /* close line (internal) */
+ bits = TM_DTR | TM_RTS;
+ how = DMBIC;
+ /* xxx interrupts too ?? */
+ }
+
+ if (bits & TM_BRK) {
+ switch (how) {
+ case DMSET:
+ case DMBIS:
+ sr->wr5 |= SCC_WR5_SEND_BREAK;
+ break;
+ case DMBIC:
+ sr->wr5 &= ~SCC_WR5_SEND_BREAK;
+ break;
+ default:
+ goto dontbrk;
+ }
+ s = spltty();
+ scc_write_reg(regs, sccline, SCC_WR5, sr->wr5);
+ splx(s);
+dontbrk:
+ b |= (sr->wr5 & SCC_WR5_SEND_BREAK) ? TM_BRK : 0;
+ }
+
+ /* no modem support on channel A */
+ if (sccline == SCC_CHANNEL_A)
+ return (b | TM_LE | TM_DTR | TM_CTS | TM_CAR | TM_DSR);
+
+ sra = &scc->softr[SCC_CHANNEL_A];
+ srb = &scc->softr[SCC_CHANNEL_B];
+
+#if 0
+ /* do I need to do something on this ? */
+ if (bits & TM_LE) { /* line enable */
+ }
+#endif
+
+ if (bits & (TM_DTR|TM_RTS)) { /* data terminal ready, request to send */
+ register int w = 0;
+
+ if (bits & TM_DTR) w |= SCC_WR5_DTR;
+ if (bits & TM_RTS) w |= SCC_WR5_RTS;
+
+ switch (how) {
+ case DMSET:
+ case DMBIS:
+ sra->wr5 |= w;
+ break;
+ case DMBIC:
+ sra->wr5 &= ~w;
+ break;
+ default:
+ goto dontdtr;
+ }
+ s = spltty();
+ scc_write_reg(regs, SCC_CHANNEL_A, SCC_WR5, sra->wr5);
+ splx(s);
+dontdtr:
+ b |= (sra->wr5 & w) ? (bits & (TM_DTR|TM_RTS)) : 0;
+ }
+
+ s = spltty();
+
+#if 0
+ /* Unsupported */
+ if (bits & TM_ST) { /* secondary transmit */
+ }
+ if (bits & TM_SR) { /* secondary receive */
+ }
+#endif
+
+ if (bits & TM_CTS) { /* clear to send */
+ register int value;
+ scc_read_reg(regs, SCC_CHANNEL_B, SCC_RR0, value);
+ b |= (value & SCC_RR0_CTS) ? TM_CTS : 0;
+ }
+
+ if (bits & TM_CAR) { /* carrier detect */
+ register int value;
+ scc_read_reg(regs, SCC_CHANNEL_B, SCC_RR0, value);
+ b |= (value & SCC_RR0_DCD) ? TM_CAR : 0;
+ }
+
+ if (bits & TM_RNG) { /* ring */
+ register int value;
+ scc_read_reg(regs, SCC_CHANNEL_A, SCC_RR0, value);
+ b |= (value & SCC_RR0_DCD) ? TM_RNG : 0;
+ }
+
+ if (bits & TM_DSR) { /* data set ready */
+ register int value;
+ scc_read_reg(regs, SCC_CHANNEL_A, SCC_RR0, value);
+ b |= (value & SCC_RR0_SYNCH) ? TM_DSR : 0;
+ }
+
+ splx(s);
+
+ return b;
+}
+
+#define debug 0
+
+scc_modem_intr(
+ scc_softc_t scc,
+ int chan,
+ int unit)
+{
+ register int value, changed;
+
+ scc_read_reg_zero(scc->regs, chan, value);
+
+ /* See what changed */
+ changed = value ^ scc->last_rr0[chan];
+ scc->last_rr0[chan] = value;
+
+#if debug
+printf("sccmodem: chan %c now %x, changed %x : ",
+ (chan == SCC_CHANNEL_B) ? 'B' : 'A',
+ value, changed);
+#endif
+
+ if (chan == SCC_CHANNEL_A) {
+ if (changed & SCC_RR0_CTS) {
+ /* Speed indicator, ignore XXX */
+#if debug
+printf("%s-speed ", (value & SCC_RR0_CTS) ? "Full" : "Half");
+#endif
+ }
+ if (changed & SCC_RR0_DCD) {
+ /* Ring indicator */
+#if debug
+printf("Ring ");
+#endif
+ }
+ if (changed & SCC_RR0_SYNCH) {
+ /* Data Set Ready */
+#if debug
+printf("DSR ");
+#endif
+ /* If modem went down then CD will also go down,
+ or it did already.
+ If modem came up then we have to wait for CD
+ anyways before enabling the line.
+ Either way, nothing to do here */
+ }
+ } else {
+ if (changed & SCC_RR0_CTS) {
+ /* Clear To Send */
+#if debug
+printf("CTS ");
+#endif
+ tty_cts(console_tty[scc_to_mi(unit,chan)],
+ value & SCC_RR0_CTS);
+ }
+ if (changed & SCC_RR0_DCD) {
+#if debug
+printf("CD ");
+#endif
+ check_car(console_tty[scc_to_mi(unit,chan)],
+ value & SCC_RR0_DCD);
+ }
+ }
+#if debug
+printf(".\n");
+#endif
+}
+
+private check_car(
+ register struct tty *tp,
+ boolean_t car)
+
+{
+ if (car) {
+#if notyet
+ /* cancel modem timeout if need to */
+ if (car & (SCC_MSR_CD2 | SCC_MSR_CD3))
+ untimeout(scc_hup, (vm_offset_t)tp);
+#endif
+
+ /* I think this belongs in the MI code */
+ if (tp->t_state & TS_WOPEN)
+ tp->t_state |= TS_ISOPEN;
+ /* carrier present */
+ if ((tp->t_state & TS_CARR_ON) == 0)
+ (void)ttymodem(tp, 1);
+ } else if ((tp->t_state&TS_CARR_ON) && ttymodem(tp, 0) == 0)
+ scc_mctl( tp->t_dev, TM_DTR, DMBIC);
+}
+
+/*
+ * Periodically look at the CD signals:
+ * they do generate interrupts but we
+ * must fake them on channel A. We might
+ * also fake them on channel B.
+ */
+scc_scan()
+{
+ register i;
+ spl_t s = spltty();
+
+ for (i = 0; i < NSCC; i++) {
+ register scc_softc_t scc;
+ register int car;
+ register struct tty **tpp;
+
+ scc = scc_softc[i];
+ if (scc == 0)
+ continue;
+ car = scc->softCAR | scc->fake;
+
+ tpp = &console_tty[i * NSCC_LINE];
+
+ while (car) {
+ if (car & 1)
+ check_car(*tpp, 1);
+ tpp++;
+ car = car>>1;
+ }
+
+ }
+ splx(s);
+ timeout(scc_scan, (vm_offset_t)0, 5*hz);
+}
+
+
+#if debug
+scc_rr0(unit,chan)
+{
+ int val;
+ scc_read_reg_zero(scc_softc[unit]->regs, chan, val);
+ return val;
+}
+
+scc_rreg(unit,chan,n)
+{
+ int val;
+ scc_read_reg(scc_softc[unit]->regs, chan, n, val);
+ return val;
+}
+
+scc_wreg(unit,chan,n,val)
+{
+ scc_write_reg(scc_softc[unit]->regs, chan, n, val);
+}
+
+scc_state(unit,soft)
+{
+ int rr0, rr1, rr3, rr12, rr13, rr15;
+
+ rr0 = scc_rreg(unit, SCC_CHANNEL_A, SCC_RR0);
+ rr1 = scc_rreg(unit, SCC_CHANNEL_A, SCC_RR1);
+ rr3 = scc_rreg(unit, SCC_CHANNEL_A, SCC_RR3);
+ rr12 = scc_rreg(unit, SCC_CHANNEL_A, SCC_RR12);
+ rr13 = scc_rreg(unit, SCC_CHANNEL_A, SCC_RR13);
+ rr15 = scc_rreg(unit, SCC_CHANNEL_A, SCC_RR15);
+ printf("{%d intr, A: R0 %x R1 %x R3 %x baudr %x R15 %x}\n",
+ scc_intr_count, rr0, rr1, rr3,
+ (rr13 << 8) | rr12, rr15);
+
+ rr0 = scc_rreg(unit, SCC_CHANNEL_B, SCC_RR0);
+ rr1 = scc_rreg(unit, SCC_CHANNEL_B, SCC_RR1);
+ rr3 = scc_rreg(unit, SCC_CHANNEL_B, SCC_RR2);
+ rr12 = scc_rreg(unit, SCC_CHANNEL_B, SCC_RR12);
+ rr13 = scc_rreg(unit, SCC_CHANNEL_B, SCC_RR13);
+ rr15 = scc_rreg(unit, SCC_CHANNEL_B, SCC_RR15);
+ printf("{B: R0 %x R1 %x R2 %x baudr %x R15 %x}\n",
+ rr0, rr1, rr3,
+ (rr13 << 8) | rr12, rr15);
+
+ if (soft) {
+ struct softreg *sr;
+ sr = scc_softc[unit]->softr;
+ printf("{B: W1 %x W4 %x W5 %x W14 %x}",
+ sr->wr1, sr->wr4, sr->wr5, sr->wr14);
+ sr++;
+ printf("{A: W1 %x W4 %x W5 %x W14 %x}\n",
+ sr->wr1, sr->wr4, sr->wr5, sr->wr14);
+ }
+}
+
+#endif
+
+#endif NSCC > 0
diff --git a/chips/screen.c b/chips/screen.c
new file mode 100644
index 00000000..22512815
--- /dev/null
+++ b/chips/screen.c
@@ -0,0 +1,1103 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: screen.c
+ * Author: Alessandro Forin, Robert V. Baron, Joseph S. Barrera,
+ * at Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Generic Screen Driver routines.
+ */
+
+#include <bm.h>
+#if NBM > 0
+#include <dtop.h>
+
+#include <machine/machspl.h> /* spl definitions */
+#include <chips/screen_defs.h>
+
+#include <chips/lk201.h>
+
+#include <mach/std_types.h>
+#include <sys/time.h>
+#include <kern/time_out.h>
+#include <device/io_req.h>
+
+#include <vm/vm_map.h>
+#include <device/ds_routines.h>
+#include <machine/machspl.h>
+
+#define Ctrl(x) ((x)-'@')
+
+#define SCREEN_BLITC_NORMAL 0
+#define SCREEN_BLITC_ROW 1
+#define SCREEN_BLITC_COL 2
+
+#define SCREEN_ASCII_INVALID '\0' /* ascii_screen not valid here */
+
+struct screen_softc screen_softc_data[NBM];
+struct screen_softc *screen_softc[NBM];
+
+short screen_console = 0;
+
+/* Forward decls */
+
+void screen_blitc(
+ int unit,
+ register unsigned char c);
+
+void screen_blitc_at(
+ register screen_softc_t sc,
+ unsigned char c,
+ short row,
+ short col);
+
+
+/*
+ 8-D A "Screen" has a bitmapped display, a keyboard and a mouse
+ *
+ */
+
+#if NDTOP > 0
+extern int dtop_kbd_probe(), dtop_set_status(), dtop_kbd_reset(),
+ dtop_ring_bell();
+#endif /* NDTOP */
+extern int lk201_probe(), lk201_set_status(), lk201_reset(),
+ lk201_ring_bell();
+
+struct kbd_probe_vector {
+ int (*probe)();
+ int (*set_status)();
+ int (*reset)();
+ int (*beep)();
+} kbd_vector[] = {
+#if NDTOP > 0
+ {dtop_kbd_probe, dtop_set_status, dtop_kbd_reset, dtop_ring_bell},
+#endif
+ {lk201_probe, lk201_set_status, lk201_reset, lk201_ring_bell},
+ {0,}
+};
+
+screen_find_kbd(int unit)
+{
+ struct kbd_probe_vector *p = kbd_vector;
+
+ for (; p->probe; p++) {
+ if ((*p->probe) (unit)) {
+ screen_softc[unit]->kbd_set_status = p->set_status;
+ screen_softc[unit]->kbd_reset = p->reset;
+ screen_softc[unit]->kbd_beep = p->beep;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * The screen probe routine looks for the associated
+ * keyboard and mouse, at the same unit number.
+ */
+screen_probe(int unit)
+{
+ if (unit >= NBM)
+ return 0;
+ screen_softc[unit] = &screen_softc_data[unit];
+ if (!screen_find())
+ return 0;
+ if (!screen_find_kbd(unit))
+ return 0;
+ mouse_probe(unit);
+ return 1;
+}
+
+screen_softc_t
+screen(int unit)
+{
+ return screen_softc[unit];
+}
+
+/*
+ * This is an upcall from the specific display
+ * hardware, to register its descriptor
+ */
+screen_attach(
+ int unit,
+ char **hwp)
+{
+ register screen_softc_t sc = screen_softc[unit];
+
+ sc->hw_state = hwp;
+ sc->blitc_state = SCREEN_BLITC_NORMAL;
+}
+
+/*
+ * This is another upcall (for now) to register
+ * the user-mapped information
+ */
+void
+screen_up(
+ int unit,
+ user_info_t *screen_data)
+{
+ register screen_softc_t sc = screen_softc[unit];
+
+ sc->up = screen_data;
+ mouse_notify_mapped(unit, unit, screen_data);
+ screen_event_init(screen_data);
+ ascii_screen_initialize(sc);
+}
+
+/*
+ * Screen saver
+ */
+#define SSAVER_MIN_TIME (2*60) /* Minimum fade interval */
+long ssaver_last = 0; /* Last tv_sec that the keyboard was touched */
+long ssaver_time = 0; /* Number of seconds before screen is blanked */
+
+void
+ssaver_bump(int unit)
+{
+ register long tnow = time.tv_sec;
+
+ if ((tnow - ssaver_last) > ssaver_time)
+ screen_on_off(unit, TRUE);
+ ssaver_last = tnow;
+}
+
+void
+screen_saver(int unit)
+{
+ register screen_softc_t sc = screen_softc[unit];
+
+ /* wakeup each minute */
+ timeout(screen_saver, unit, hz * 60);
+ if ((time.tv_sec - ssaver_last) >= ssaver_time)
+ /* this does nothing if already off */
+ screen_on_off(unit, FALSE);
+}
+
+/*
+ * Screen open routine. We are also notified
+ * of console operations if our screen is acting
+ * as a console display.
+ */
+screen_open(
+ int unit,
+ boolean_t console_only)
+{
+ register screen_softc_t sc = screen_softc[unit];
+
+ /*
+ * Start screen saver on first (console) open
+ */
+ if (!ssaver_time) {
+ ssaver_time = 10*60; /* 10 minutes to fade */
+ ssaver_bump(unit); /* .. from now */
+ screen_saver(unit); /* Start timer */
+ }
+ /*
+ * Really opening the screen or just notifying ?
+ */
+ if (!console_only) {
+#if 0
+ (*sc->sw.init_colormap)(sc);
+#endif
+ screen_event_init(sc->up);
+ ascii_screen_initialize(sc);
+ (*sc->sw.graphic_open)(sc->hw_state);
+ sc->mapped = TRUE;
+ }
+}
+
+/*
+ * Screen close
+ */
+screen_close(
+ int unit,
+ boolean_t console_only)
+{
+ register screen_softc_t sc = screen_softc[unit];
+
+ /*
+ * Closing of the plain console has no effect
+ */
+ if (!console_only) {
+ user_info_t *up = sc->up;
+
+ screen_default_colors(up);
+ /* mapped info, cursor and colormap resetting */
+ (*sc->sw.graphic_close)(sc);
+
+ /* turn screen on, and blank it */
+ screen_on_off(unit, TRUE);
+ ascii_screen_initialize(sc);
+ (*sc->sw.clear_bitmap)(sc);
+
+ /* position cursor circa page end */
+ up->row = up->max_row - 1;
+ up->col = 0;
+
+ /* set keyboard back our way */
+ (*sc->kbd_reset)(unit);
+ lk201_lights(unit, LED_OFF);
+
+ sc->mapped = FALSE;
+ }
+}
+
+screen_default_colors(
+ user_info_t *up)
+{
+ register int i;
+
+ /* restore bg and fg colors */
+ for (i = 0; i < 3; i++) {
+ up->dev_dep_2.pm.Bg_color[i] = 0x00;
+ up->dev_dep_2.pm.Fg_color[i] = 0xff;
+ }
+}
+
+/*
+ * Write characters to the screen
+ */
+screen_write(
+ int unit,
+ register io_req_t ior)
+{
+ register int count;
+ register unsigned char *data;
+ vm_offset_t addr;
+
+ if (unit == 1) /* no writes to the mouse */
+ return D_INVALID_OPERATION;
+
+ data = (unsigned char*) ior->io_data;
+ count = ior->io_count;
+ if (count == 0)
+ return (D_SUCCESS);
+
+ if (!(ior->io_op & IO_INBAND)) {
+ vm_map_copy_t copy = (vm_map_copy_t) data;
+ kern_return_t kr;
+
+ kr = vm_map_copyout(device_io_map, &addr, copy);
+ if (kr != KERN_SUCCESS)
+ return (kr);
+ data = (unsigned char *) addr;
+ }
+
+ /* Spill chars out, might fault data in */
+ while (count--)
+ screen_blitc(unit, *data++);
+
+ if (!(ior->io_op & IO_INBAND))
+ (void) vm_deallocate(device_io_map, addr, ior->io_count);
+
+ return (D_SUCCESS);
+}
+
+/*
+ * Read from the screen. This really means waiting
+ * for an event, which can be either a keypress on
+ * the keyboard (or pointer) or a mouse movement.
+ * If there are no available events we queue the
+ * request for later.
+ */
+queue_head_t screen_read_queue = { &screen_read_queue, &screen_read_queue };
+boolean_t screen_read_done();
+
+screen_read(
+ int unit,
+ register io_req_t ior)
+{
+ register user_info_t *up = screen_softc[unit]->up;
+ register spl_t s = spltty();
+
+ if (up->evque.q_head != up->evque.q_tail) {
+ splx(s);
+ return (D_SUCCESS);
+ }
+ ior->io_dev_ptr = (char *) up;
+ ior->io_done = screen_read_done;
+ enqueue_tail(&screen_read_queue, (queue_entry_t) ior);
+ splx(s);
+ return (D_IO_QUEUED);
+}
+
+boolean_t
+screen_read_done(
+ register io_req_t ior)
+{
+ register user_info_t *up = (user_info_t *) ior->io_dev_ptr;
+ register spl_t s = spltty();
+
+ if (up->evque.q_head != up->evque.q_tail) {
+ splx(s);
+ (void) ds_read_done(ior);
+ return (TRUE);
+ }
+ enqueue_tail(&screen_read_queue, (queue_entry_t) ior);
+ splx(s);
+ return (FALSE);
+}
+
+static
+screen_event_posted(
+ register user_info_t *up)
+{
+ if (up->evque.q_head != up->evque.q_tail) {
+ register io_req_t ior;
+ while ((ior = (io_req_t)dequeue_head(&screen_read_queue)))
+ iodone(ior);
+ }
+}
+
+boolean_t compress_mouse_events = TRUE;
+
+/*
+ * Upcall from input pointer devices
+ */
+screen_motion_event(
+ int unit,
+ int device,
+ int x,
+ int y)
+{
+ register screen_softc_t sc = screen_softc[unit];
+ register user_info_t *up = sc->up;
+ register unsigned next;
+ unsigned int ev_time;
+
+ /*
+ * Take care of scale/threshold issues
+ */
+ if (device == DEV_MOUSE) {
+ register int scale;
+
+ scale = up->mouse_scale;
+
+ if (scale >= 0) {
+ register int threshold;
+ register boolean_t neg;
+
+ threshold = up->mouse_threshold;
+
+ neg = (x < 0);
+ if (neg) x = -x;
+ if (x >= threshold)
+ x += (x - threshold) * scale;
+ if (neg) x = -x;
+
+ neg = (y < 0);
+ if (neg) y = -y;
+ if (y >= threshold)
+ y += (y - threshold) * scale;
+ if (neg) y = -y;
+
+ }
+
+ /* we expect mices in incremental mode */
+ x += up->mouse_loc.x;
+ y += up->mouse_loc.y;
+
+ } else if (device == DEV_TABLET) {
+
+ /* we expect tablets in absolute mode */
+ x = (x * up->dev_dep_2.pm.tablet_scale_x) / 1000;
+ y = ((2200 - y) * up->dev_dep_2.pm.tablet_scale_y) / 1000;
+
+ } /* else who are you */
+
+ /*
+ * Clip if necessary
+ */
+ {
+ register int max;
+
+ if (x > (max = up->max_cur_x))
+ x = max;
+ if (y > (max = up->max_cur_y))
+ y = max;
+ }
+
+ /*
+ * Did it actually move
+ */
+ if ((up->mouse_loc.x == x) &&
+ (up->mouse_loc.y == y))
+ return;
+
+ /*
+ * Update mouse location, and cursor
+ */
+ up->mouse_loc.x = x;
+ up->mouse_loc.y = y;
+
+ screen_set_cursor(sc, x, y);
+
+ /*
+ * Add point to track.
+ */
+ {
+ register screen_timed_point_t *tr;
+
+ /* simply add and overflow if necessary */
+ next = up->evque.t_next;
+ if (next >= MAX_TRACK)
+ next = MAX_TRACK-1;
+ tr = &up->point_track[next++];
+ up->evque.t_next = (next == MAX_TRACK) ? 0 : next;
+
+ ev_time = (unsigned) approx_time_in_msec();
+ tr->time = ev_time;
+ tr->x = x;
+ tr->y = y;
+ }
+
+ /*
+ * Don't post event if mouse is within bounding box,
+ * Note our y-s are upside down
+ */
+ if (y < up->mouse_box.bottom &&
+ y >= up->mouse_box.top &&
+ x < up->mouse_box.right &&
+ x >= up->mouse_box.left)
+ return;
+ up->mouse_box.bottom = 0; /* X11 wants it ? */
+
+ /*
+ * Post motion event now
+ */
+#define round(x) ((x) & (MAX_EVENTS - 1))
+ {
+ register unsigned int head = up->evque.q_head;
+ register unsigned int tail = up->evque.q_tail;
+ register screen_event_t *ev;
+
+ if (round(tail + 1) == head) /* queue full, drop it */
+ return;
+
+ /* see if we can spare too many motion events */
+ next = round(tail - 1);
+ if (compress_mouse_events &&
+ (tail != head) && (next != head)) {
+ ev = & up->event_queue[next];
+ if (ev->type == EVT_PTR_MOTION) {
+ ev->x = x;
+ ev->y = y;
+ ev->time = ev_time;
+ ev->device = device;
+ screen_event_posted(up);
+ return;
+ }
+ }
+ ev = & up->event_queue[tail];
+ ev->type = EVT_PTR_MOTION;
+ ev->time = ev_time;
+ ev->x = x;
+ ev->y = y;
+ ev->device = device;
+
+ /* added to queue */
+ up->evque.q_tail = round(tail + 1);
+ }
+
+ /*
+ * Wakeup any sleepers
+ */
+ screen_event_posted(up);
+}
+
+/*
+ * Upcall from keypress input devices
+ * Returns wether the event was consumed or not.
+ */
+boolean_t
+screen_keypress_event(
+ int unit,
+ int device,
+ int key,
+ int type)
+{
+ register screen_softc_t sc = screen_softc[unit];
+ register user_info_t *up = sc->up;
+ register unsigned int head, tail;
+ register screen_event_t *ev;
+
+ if (!sc->mapped) {
+ int col, row;
+
+ if (device != DEV_MOUSE)
+ return FALSE;
+ /* generate escapes for mouse position */
+ col = up->mouse_loc.x / 8;
+ row = up->mouse_loc.y / 15;
+ mouse_report_position(unit, col, row, key, type);
+ return TRUE;
+ }
+
+ head = up->evque.q_head;
+ tail = up->evque.q_tail;
+
+ if (round(tail + 1) == head) /* queue full */
+ return TRUE;
+
+ ev = & up->event_queue[tail];
+ ev->key = key;
+ ev->type = type;
+ ev->device = device;
+ ev->time = approx_time_in_msec();
+ ev->x = up->mouse_loc.x;
+ ev->y = up->mouse_loc.y;
+
+ up->evque.q_tail = round(tail + 1);
+
+ screen_event_posted(up);
+
+ return TRUE;
+}
+#undef round
+
+/*
+ * Event queue initialization
+ */
+screen_event_init(
+ user_info_t *up)
+{
+ up->evque.q_size = MAX_EVENTS;
+ up->evque.q_head = 0;
+ up->evque.q_tail = 0;
+; up->evque.t_size = MAX_TRACK;
+ up->evque.t_next = 0;
+ up->evque.timestamp = approx_time_in_msec();
+
+}
+
+/*
+ * Set/Get status functions.
+ * ...
+ */
+io_return_t
+screen_set_status(
+ int unit,
+ dev_flavor_t flavor,
+ dev_status_t status,
+ natural_t status_count)
+{
+ register screen_softc_t sc = screen_softc[unit];
+ register user_info_t *up = sc->up;
+ io_return_t ret = D_SUCCESS;
+
+/* XXX checks before getting here */
+
+ switch (flavor) {
+
+ case SCREEN_INIT:
+ ascii_screen_initialize(sc);
+ break;
+
+ case SCREEN_ON:
+ screen_on_off(unit, TRUE);
+ break;
+
+ case SCREEN_OFF:
+ screen_on_off(unit, FALSE);
+ break;
+
+ case SCREEN_FADE: {
+ register int tm = * (int *) status;
+
+ untimeout(screen_saver, unit); /* stop everything and */
+ if (tm == -1) /* don't reschedule a fade */
+ break;
+ if (tm < SSAVER_MIN_TIME)
+ tm = SSAVER_MIN_TIME;
+ ssaver_time = tm;
+ ssaver_bump(unit);
+ screen_saver(unit);
+ break;
+ }
+
+ case SCREEN_SET_CURSOR: {
+ screen_point_t *loc = (screen_point_t*) status;
+
+ if (status_count < sizeof(screen_point_t)/sizeof(int))
+ return D_INVALID_SIZE;
+
+ sc->flags |= SCREEN_BEING_UPDATED;
+ up->mouse_loc = *loc;
+ sc->flags &= ~SCREEN_BEING_UPDATED;
+
+ screen_set_cursor(sc, loc->x, loc->y);
+
+ break;
+ }
+
+ /* COMPAT: these codes do nothing, but we understand */
+ case _IO('q', 8): /* KERNLOOP */
+ case _IO('q', 9): /* KERNUNLOOP */
+ case _IO('g', 21): /* KERN_UNLOOP */
+ break;
+
+ /*
+ * Anything else is either device-specific,
+ * or for the keyboard
+ */
+ default:
+ ret = (*sc->sw.set_status)(sc, flavor, status, status_count);
+ if (ret == D_INVALID_OPERATION)
+ ret = (*sc->kbd_set_status)(unit, flavor,
+ status, status_count);
+ break;
+ }
+ return ret;
+}
+
+io_return_t
+screen_get_status(
+ int unit,
+ dev_flavor_t flavor,
+ dev_status_t status,
+ natural_t *count)
+{
+ register screen_softc_t sc = screen_softc[unit];
+
+ if (flavor == SCREEN_STATUS_FLAGS) {
+ *(int *)status = sc->flags;
+ *count = 1;
+ return D_SUCCESS;
+ } else if (flavor == SCREEN_HARDWARE_INFO) {
+ screen_hw_info_t *hinfo;
+
+ hinfo = (screen_hw_info_t*)status;
+ hinfo->frame_width = sc->frame_scanline_width;
+ hinfo->frame_height = sc->frame_height;
+ hinfo->frame_visible_width = sc->frame_visible_width;
+ hinfo->frame_visible_height = sc->frame_visible_height;
+ *count = sizeof(screen_hw_info_t)/sizeof(int);
+ return D_SUCCESS;
+ } else
+
+ return (*sc->sw.get_status)(sc, flavor, status, count);
+}
+
+/*
+ * Routine to handle display and control characters sent to screen
+ */
+void
+screen_blitc(
+ int unit,
+ register unsigned char c)
+{
+ register screen_softc_t sc = screen_softc[unit];
+ register user_info_t *up = sc->up;
+ register unsigned char *ap;
+ register int i;
+
+ /*
+ * Handle cursor positioning sequence
+ */
+ switch (sc->blitc_state) {
+ case SCREEN_BLITC_NORMAL:
+ break;
+
+ case SCREEN_BLITC_ROW:
+ c -= ' ';
+ if (c >= up->max_row) {
+ up->row = up->max_row - 1;
+ } else {
+ up->row = c;
+ }
+ sc->blitc_state = SCREEN_BLITC_COL;
+ return;
+
+ case SCREEN_BLITC_COL:
+ c -= ' ';
+ if (c >= up->max_col) {
+ up->col = up->max_col - 1;
+ } else {
+ up->col = c;
+ }
+ sc->blitc_state = SCREEN_BLITC_NORMAL;
+ goto move_cursor;
+ }
+
+ c &= 0xff;
+
+ /* echo on rconsole line */
+ rcputc(c);
+
+ /* we got something to say, turn on the TV */
+ ssaver_bump(unit);
+
+ switch (c) {
+ /* Locate cursor*/
+ case Ctrl('A'): /* ^A -> cm */
+ sc->blitc_state = SCREEN_BLITC_ROW;
+ return;
+
+ /* Home cursor */
+ case Ctrl('B'): /* ^B -> ho */
+ up->row = 0;
+ up->col = 0;
+ break;
+
+ /* Clear screen */
+ case Ctrl('C'): /* ^C -> cl */
+ up->row = 0;
+ up->col = 0;
+ (*sc->sw.clear_bitmap)(sc);
+ break;
+
+ /* Move forward */
+ case Ctrl('D'): /* ^D -> nd */
+ screen_advance_position(sc);
+ break;
+
+ /* Clear to eol */
+ case Ctrl('E'): /* ^E -> ce */
+ ap = &sc->ascii_screen[up->max_col*up->row + up->col];
+ for (i = up->col; i < up->max_col; i++, ap++) {
+ if (sc->standout || *ap != ' ') {
+ if (sc->standout) {
+ *ap = SCREEN_ASCII_INVALID;
+ } else {
+ *ap = ' ';
+ }
+ screen_blitc_at(sc, ' ', up->row, i);
+ }
+ }
+ return;
+
+ /* Cursor up */
+ case Ctrl('F'): /* ^F -> up */
+ if (up->row != 0) up->row--;
+ break;
+
+ case Ctrl('G'): /* ^G -> bell */
+ (*sc->kbd_beep)(unit);
+ return;
+
+ /* Backspace */
+ case Ctrl('H'): /* ^H -> bs */
+ if (--up->col < 0)
+ up->col = 0;
+ break;
+
+ case Ctrl('I'): /* ^I -> tab */
+ up->col += (8 - (up->col & 0x7));
+ break;
+
+ case Ctrl('J'): /* ^J -> lf */
+ if (up->row+1 >= up->max_row)
+ (*sc->sw.remove_line)(sc, 0);
+ else
+ up->row++;
+ break;
+
+ /* Start rev-video */
+ case Ctrl('K'): /* ^K -> so */
+ sc->standout = 1;
+ return;
+
+ /* End rev-video */
+ case Ctrl('L'): /* ^L -> se */
+ sc->standout = 0;
+ return;
+
+ case Ctrl('M'): /* ^M -> return */
+ up->col = 0;
+ break;
+
+ /* Save cursor position */
+ case Ctrl('N'): /* ^N -> sc */
+ sc->save_col = up->col;
+ sc->save_row = up->row;
+ return;
+
+ /* Restore cursor position */
+ case Ctrl('O'): /* ^O -> rc */
+ up->row = sc->save_row;
+ up->col = sc->save_col;
+ break;
+
+ /* Add blank line */
+ case Ctrl('P'): /* ^P -> al */
+ (*sc->sw.insert_line)(sc, up->row);
+ return;
+
+ /* Delete line */
+ case Ctrl('Q'): /* ^Q -> dl */
+ (*sc->sw.remove_line)(sc, up->row);
+ return;
+
+ default:
+ /*
+ * If the desired character is already there, then don't
+ * bother redrawing it. Always redraw standout-ed chars,
+ * so that we can assume that all cached characters are
+ * un-standout-ed. (This could be fixed.)
+ */
+ ap = &sc->ascii_screen[up->max_col*up->row + up->col];
+ if (sc->standout || c != *ap) {
+ if (sc->standout) {
+ *ap = SCREEN_ASCII_INVALID;
+ } else {
+ *ap = c;
+ }
+ screen_blitc_at(sc, c, up->row, up->col);
+ }
+ screen_advance_position(sc);
+ break;
+ }
+
+move_cursor:
+ screen_set_cursor(sc, up->col*8, up->row*15);
+
+}
+
+
+/*
+ * Advance current position, wrapping and scrolling when necessary
+ */
+screen_advance_position(
+ register screen_softc_t sc)
+{
+ register user_info_t *up = sc->up;
+
+ if (++up->col >= up->max_col) {
+ up->col = 0 ;
+ if (up->row+1 >= up->max_row) {
+ (*sc->sw.remove_line)(sc, 0);
+ } else {
+ up->row++;
+ }
+ }
+}
+
+
+/*
+ * Routine to display a character at a given position
+ */
+void
+screen_blitc_at(
+ register screen_softc_t sc,
+ unsigned char c,
+ short row,
+ short col)
+{
+ /*
+ * Silently ignore non-printable chars
+ */
+ if (c < ' ' || c > 0xfd)
+ return;
+ (*sc->sw.char_paint)(sc, c, row, col);
+}
+
+/*
+ * Update sc->ascii_screen array after deleting ROW
+ */
+ascii_screen_rem_update(
+ register screen_softc_t sc,
+ int row)
+{
+ register user_info_t *up = sc->up;
+ register unsigned int col_w, row_w;
+ register unsigned char *c, *end;
+
+ /* cache and sanity */
+ col_w = up->max_col;
+ if (col_w > MaxCharCols)
+ col_w = MaxCharCols;
+ row_w = up->max_row;
+ if (row_w > MaxCharRows)
+ row_w = MaxCharRows;
+
+ /* scroll up */
+ c = &sc->ascii_screen[row * col_w];
+ end = &sc->ascii_screen[(row_w-1) * col_w];
+ for (; c < end; c++) /* bcopy ? XXX */
+ *c = *(c + col_w);
+
+ /* zero out line that entered at end */
+ c = end;
+ end = &sc->ascii_screen[row_w * col_w];
+ for (; c < end; c++)
+ *c = ' ';
+
+}
+
+/*
+ * Update sc->ascii_screen array after opening new ROW
+ */
+ascii_screen_ins_update(
+ register screen_softc_t sc,
+ int row)
+{
+ register user_info_t *up = sc->up;
+ register unsigned int col_w, row_w;
+ register unsigned char *c, *end;
+
+ /* cache and sanity */
+ col_w = up->max_col;
+ if (col_w > MaxCharCols)
+ col_w = MaxCharCols;
+ row_w = up->max_row;
+ if (row_w > MaxCharRows)
+ row_w = MaxCharRows;
+
+ /* scroll down */
+ c = &sc->ascii_screen[row_w * col_w - 1];
+ end = &sc->ascii_screen[(row + 1) * col_w];
+ for (; c >= end; c--)
+ *c = *(c - col_w);
+
+ /* zero out line that entered at row */
+ c = end - 1;
+ end = &sc->ascii_screen[row * col_w];
+ for (; c >= end; c--)
+ *c = ' ';
+}
+
+/*
+ * Init charmap
+ */
+ascii_screen_fill(
+ register screen_softc_t sc,
+ char c)
+{
+ register user_info_t *up = sc->up;
+ register int i, to;
+
+ to = up->max_row * up->max_col;
+ for (i = 0; i < to; i++) {
+ sc->ascii_screen[i] = c;
+ }
+}
+
+ascii_screen_initialize(
+ register screen_softc_t sc)
+{
+ ascii_screen_fill(sc, SCREEN_ASCII_INVALID);
+}
+
+/*
+ * Cursor positioning
+ */
+screen_set_cursor(
+ register screen_softc_t sc,
+ register int x,
+ register int y)
+{
+ register user_info_t *up = sc->up;
+
+ /* If we are called from interrupt level.. */
+ if (sc->flags & SCREEN_BEING_UPDATED)
+ return;
+ sc->flags |= SCREEN_BEING_UPDATED;
+ /*
+ * Note that that was not atomic, but this is
+ * a two-party game on the same processor and
+ * not a real parallel program.
+ */
+
+ /* Sanity checks (ignore noise) */
+ if (y < up->min_cur_y || y > up->max_cur_y)
+ y = up->cursor.y;
+ if (x < up->min_cur_x || x > up->max_cur_x)
+ x = up->cursor.x;
+
+ /*
+ * Track cursor position
+ */
+ up->cursor.x = x;
+ up->cursor.y = y;
+
+ (*sc->sw.pos_cursor)(*(sc->hw_state), x, y);
+
+ sc->flags &= ~SCREEN_BEING_UPDATED;
+}
+
+screen_on_off(
+ int unit,
+ boolean_t on)
+{
+ register screen_softc_t sc = screen_softc[unit];
+
+ if (sc->sw.video_on == 0) /* sanity */
+ return;
+
+ if (on)
+ (*sc->sw.video_on)(sc->hw_state, sc->up);
+ else
+ (*sc->sw.video_off)(sc->hw_state, sc->up);
+}
+
+screen_enable_vretrace(
+ int unit,
+ boolean_t on)
+{
+ register screen_softc_t sc = screen_softc[unit];
+ (*sc->sw.intr_enable)(sc->hw_state, on);
+}
+
+/*
+ * For our purposes, time does not need to be
+ * precise but just monotonic and approximate
+ * to about the millisecond. Instead of div/
+ * mul by 1000 we div/mul by 1024 (shifting).
+ *
+ * Well, it almost worked. The only problem
+ * is that X somehow checks the time against
+ * gettimeofday() and .. turns screen off at
+ * startup if we use approx time. SO we are
+ * back to precise time, sigh.
+ */
+approx_time_in_msec()
+{
+#if 0
+ return ((time.seconds << 10) + (time.microseconds >> 10));
+#else
+ return ((time.seconds * 1000) + (time.microseconds / 1000));
+#endif
+}
+
+/*
+ * Screen mapping to user space
+ * This is called on a per-page basis
+ */
+screen_mmap(
+ int dev,
+ vm_offset_t off,
+ int prot)
+{
+ /* dev is safe, but it is the mouse's one */
+ register screen_softc_t sc = screen_softc[dev-1];
+
+ (*sc->sw.map_page)(sc, off, prot);
+}
+
+#endif NBM > 0
diff --git a/chips/screen.h b/chips/screen.h
new file mode 100644
index 00000000..69154d49
--- /dev/null
+++ b/chips/screen.h
@@ -0,0 +1,289 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: screen.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Definitions for the Generic Screen Driver.
+ */
+
+/*
+ * Most of these structures are defined so that the
+ * resulting structure mapped to user space appears
+ * to be compatible with the one used by the DEC X
+ * servers (pm_info..). Keep it that way and the
+ * X servers will keep on running.
+ */
+
+/*
+ * Generic structures and defines
+ */
+
+/* colors */
+typedef struct {
+ unsigned short red;
+ unsigned short green;
+ unsigned short blue;
+} color_map_t;
+
+typedef struct {
+ short unused;
+ unsigned short index;
+ color_map_t value;
+} color_map_entry_t;
+
+typedef struct {
+ unsigned int Bg_rgb[3];
+ unsigned int Fg_rgb[3];
+} cursor_color_t;
+
+/* generic input event */
+typedef struct {
+ short x; /* x position */
+ short y; /* y position */
+ unsigned int time; /* 1 millisecond units */
+
+ unsigned char type; /* button up/down/raw or motion */
+# define EVT_BUTTON_UP 0
+# define EVT_BUTTON_DOWN 1
+# define EVT_BUTTON_RAW 2
+# define EVT_PTR_MOTION 3
+
+ unsigned char key; /* the key (button only) */
+# define KEY_LEFT_BUTTON 1
+# define KEY_MIDDLE_BUTTON 2
+# define KEY_RIGHT_BUTTON 3
+# define KEY_TBL_LEFT_BUTTON 0
+# define KEY_TBL_FRONT_BUTTON 1
+# define KEY_TBL_RIGHT_BUTTON 2
+# define KEY_TBL_BACK_BUTTON 3
+
+ unsigned char index; /* which instance of device */
+
+ unsigned char device; /* which device */
+# define DEV_NULL 0
+# define DEV_MOUSE 1
+# define DEV_KEYBD 2
+# define DEV_TABLET 3
+# define DEV_AUX 4
+# define DEV_CONSOLE 5
+# define DEV_KNOB 8
+# define DEV_JOYSTICK 9
+
+} screen_event_t;
+
+/* timed coordinate info */
+typedef struct {
+ unsigned int time;
+ short x, y;
+} screen_timed_point_t;
+
+/* queue of input events, and ring of mouse motions track */
+typedef struct {
+ screen_event_t *events;
+ unsigned int q_size;
+ unsigned int q_head;
+ unsigned int q_tail;
+ unsigned long timestamp;
+ screen_timed_point_t *track;
+ unsigned int t_size;
+ unsigned int t_next;
+} screen_evque_t;
+
+/* mouse/cursor position */
+typedef struct {
+ short x;
+ short y;
+} screen_point_t;
+
+/* mouse motion bounding boxes */
+typedef struct {
+ short bottom;
+ short right;
+ short left;
+ short top;
+} screen_rect_t;
+
+/*
+ * Here it is, each field is marked as
+ *
+ * Kset : kernel sets it unconditionally
+ * Kuse : kernel uses it, safely
+ * Kdep : kernel might depend on it
+ */
+typedef struct {
+ screen_evque_t evque; /* Kset, Kuse */
+ short mouse_buttons; /* Kset */
+ screen_point_t xx3 /*tablet*/;
+ short xx4 /*tswitches*/;
+ screen_point_t cursor; /* Kset */
+ short row; /* Kdep */
+ short col; /* Kdep */
+ short max_row; /* Kdep */
+ short max_col; /* Kdep */
+ short max_x; /* Kset */
+ short max_y; /* Kset */
+ short max_cur_x; /* Kdep */
+ short max_cur_y; /* Kdep */
+ int version; /* Kset */
+ union {
+ struct {
+ unsigned char * bitmap; /* Kset */
+ short * x16 /*scanmap*/;
+ short * x17 /*cursorbits*/;
+ short * x18 /*pmaddr*/;
+ unsigned char * planemask; /* Kset */
+ } pm;
+ struct {
+ int x15 /* flags */;
+ int * gram /* Kset */;
+ int * rb_addr /* Kset */;
+ int rb_phys /* Kset */;
+ int rb_size /* Kset */;
+ } gx;
+ } dev_dep_1;
+ screen_point_t mouse_loc; /* Kdep */
+ screen_rect_t mouse_box; /* Kdep */
+ short mouse_threshold;/* Kuse */
+ short mouse_scale; /* Kuse */
+ short min_cur_x; /* Kdep */
+ short min_cur_y; /* Kdep */
+ union {
+ struct {
+ int x26 /*dev_type*/;
+ char * x27 /*framebuffer*/;
+ char * x28 /*volatile struct bt459 *bt459*/;
+ int x29 /*slot*/;
+ char cursor_sprite[1024];/* Kset */
+ unsigned char Bg_color[3]; /* Kset */
+ unsigned char Fg_color[3]; /* Kset */
+ int tablet_scale_x; /* Kuse */
+ int tablet_scale_y; /* Kuse */
+ } pm;
+ struct {
+ char * gxo /* Kset */;
+ char stamp_width /* Kset */;
+ char stamp_height /* Kset */;
+ char nplanes /* Kset */;
+ char x27_4 /* n10_present */;
+ char x28_1 /* dplanes */;
+ char zplanes /* Kset */;
+ char zzplanes /* Kset */;
+ unsigned char cursor_sprite[1024] /* Kuse */;
+ char x285_0 /* padding for next, which was int */;
+ unsigned char Fg_color[4] /* Kuse */;
+ unsigned char Bg_color[4] /* Kuse */;
+ unsigned short cmap_index /* Kuse */;
+ unsigned short cmap_count /* Kuse */;
+ unsigned int colormap[256] /* Kuse */;
+ int * stic_dma_rb /* Kset */;
+ int * stic_reg /* Kset */;
+ int ptpt_phys /* Kdep */;
+ int ptpt_size /* Kdep */;
+ int * ptpt_pgin /* Kset */;
+ } gx;
+ } dev_dep_2;
+ short frame_scanline_width; /* in pixels, Kset */
+ short frame_height; /* in scanlines, Kset */
+ /*
+ * Event queues are allocated right after that
+ */
+#define MAX_EVENTS 64
+#define MAX_TRACK 100
+ screen_event_t event_queue[MAX_EVENTS]; /* Kset */
+ screen_timed_point_t point_track[MAX_TRACK]; /* Kset */
+ /*
+ * Some like it hot
+ */
+ unsigned int event_id;
+ int interrupt_info;
+} user_info_t;
+
+
+/*
+ * Screen get_status codes and arguments
+ */
+#include <sys/ioctl.h>
+
+ /* Get size (and offset) of mapped info */
+#define SCREEN_GET_OFFSETS _IOR('q', 6, unsigned **)
+
+ /* Get screen status flags */
+#define SCREEN_STATUS_FLAGS _IOR('q', 22, int *)
+# define MONO_SCREEN 0x01
+# define COLOR_SCREEN 0x02
+# define SCREEN_BEING_UPDATED 0x04
+
+/*
+ * Screen set_status codes and arguments
+ */
+
+ /* start/stop screen saver, control fading interval */
+#define SCREEN_FADE _IOW('q', 114, int) /* fade screen */
+# define NO_FADE -1
+
+ /* Turn video on/off manually */
+#define SCREEN_ON _IO('q', 10)
+#define SCREEN_OFF _IO('q', 11)
+
+ /* Fixup pointers inside mapped info structure */
+#define SCREEN_ADJ_MAPPED_INFO _IOR('q', 1, user_info_t *)
+
+ /* Initialize anything that needs to, hw-wise */
+#define SCREEN_INIT _IO('q', 4)
+
+ /* Position cursor to a specific spot */
+#define SCREEN_SET_CURSOR _IOW('q', 2, screen_point_t)
+
+ /* Load Bg/Fg colors for cursor */
+#define SCREEN_SET_CURSOR_COLOR _IOW('q', 3, cursor_color_t)
+
+ /* Load cursor sprite, small cursor form */
+typedef unsigned short cursor_sprite_t[32];
+
+#define SCREEN_LOAD_CURSOR _IOW('q', 7, cursor_sprite_t)
+
+ /* Load cursor sprite, large 64x64 cursor form */
+typedef char cursor_sprite_long_t[1024];
+
+#define SCREEN_LOAD_CURSOR_LONG _IOW('q', 13, cursor_sprite_long_t)
+
+ /* Modify a given entry in the color map (VDAC) */
+#define SCREEN_SET_CMAP_ENTRY _IOW('q', 12, color_map_entry_t)
+
+ /* Return some other information about hardware (optional) */
+typedef struct {
+ int frame_width;
+ int frame_height;
+ int frame_visible_width;
+ int frame_visible_height;
+} screen_hw_info_t;
+#define SCREEN_HARDWARE_INFO _IOR('q', 23, screen_hw_info_t)
+
+ /* Screen-dependent, unspecified (and despised) */
+#define SCREEN_HARDWARE_DEP _IO('q', 24)
+
diff --git a/chips/screen_defs.h b/chips/screen_defs.h
new file mode 100644
index 00000000..083e11e5
--- /dev/null
+++ b/chips/screen_defs.h
@@ -0,0 +1,97 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: screen_defs.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 11/90
+ *
+ * Definitions for the Generic Screen Driver.
+ */
+
+#include <chips/screen.h>
+#include <chips/screen_switch.h>
+#include <device/device_types.h>
+
+/*
+ * Driver state
+ */
+typedef struct screen_softc {
+ user_info_t *up;
+ char **hw_state; /* semi-opaque */
+
+ struct screen_switch sw;
+
+ /* should also be a switch */
+ io_return_t (*kbd_set_status)();
+ int (*kbd_reset)();
+ int (*kbd_beep)();
+
+ char flags;
+ char mapped;
+ char blitc_state;
+ char standout;
+ short save_row;
+ short save_col;
+ /*
+ * Eventually move here all that is Kdep in the user structure,
+ * to avoid crashing because of a bogus graphic server
+ */
+ short frame_scanline_width; /* in pixels */
+ short frame_height; /* in scanlines */
+ short frame_visible_width; /* in pixels */
+ short frame_visible_height; /* in pixels */
+
+/* This is used by all screens, therefore it is sized maximally */
+# define MaxCharRows 68 /* 2DA screen & PMAG-AA */
+# define MaxCharCols 160 /* PMAG-AA */
+# define MinCharRows 57 /* pmax */
+ unsigned char ascii_screen[MaxCharRows*MaxCharCols];
+
+} *screen_softc_t;
+
+extern screen_softc_t screen(/* int unit */);
+
+/*
+ * This global says if we have a graphic console
+ * and where it is and if it is enabled
+ */
+extern short screen_console;
+#define SCREEN_CONS_ENBL (0x0100)
+#define SCREEN_ISA_CONSOLE() (screen_console & SCREEN_CONS_ENBL)
+#define SCREEN_CONS_UNIT() (screen_console & 0x00ff)
+
+/*
+ * A graphic screen needs a keyboard and a mouse/tablet
+ */
+#define SCREEN_LINE_KEYBOARD 0
+#define SCREEN_LINE_POINTER 1
+#define SCREEN_LINE_OTHER (-1)
+
+/* kernel font */
+#define KfontWidth 8
+#define KfontHeight 15
+extern unsigned char kfont_7x14[];
+
diff --git a/chips/screen_switch.c b/chips/screen_switch.c
new file mode 100644
index 00000000..e216a534
--- /dev/null
+++ b/chips/screen_switch.c
@@ -0,0 +1,154 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: screen_switch.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Autoconfiguration code for the Generic Screen Driver.
+ */
+
+#include <platforms.h>
+
+#if defined(DECSTATION) || defined(FLAMINGO)
+#include <fb.h>
+#include <gx.h>
+#include <cfb.h>
+#include <mfb.h>
+#include <xcfb.h>
+#include <sfb.h>
+#endif
+
+#ifdef VAXSTATION
+#define NGX 0
+#define NCFB 0
+#define NXCFB 0
+#endif
+
+#include <chips/screen_switch.h>
+
+/* When nothing needed */
+int screen_noop()
+{}
+
+/*
+ * Vector of graphic interface drivers to probe.
+ * Zero terminate this list.
+ */
+
+
+#if NGX > 0
+extern int gq_probe(), gq_cold_init();
+extern unsigned int gq_mem_need();
+
+extern int ga_probe(), ga_cold_init();
+extern unsigned int ga_mem_need();
+#endif /* NGX > 0 */
+
+#if NCFB > 0
+extern int cfb_probe(), cfb_cold_init();
+extern unsigned int pm_mem_need();
+#endif /* NCFB > 0 */
+
+#if NMFB > 0
+extern int fb_probe(), fb_cold_init();
+extern unsigned int pm_mem_need();
+#endif /* NMFB > 0 */
+
+#if NXCFB > 0
+extern int xcfb_probe(), xcfb_cold_init();
+extern unsigned int pm_mem_need();
+#endif /* NXCFB > 0 */
+
+#if NSFB > 0
+extern int sfb_probe(), sfb_cold_init();
+extern unsigned int pm_mem_need();
+#endif /* NSFB > 0 */
+
+#if NFB > 0
+extern int pm_probe(), pm_cold_init();
+extern unsigned int pm_mem_need();
+#endif /* NFB > 0 */
+
+struct screen_probe_vector screen_probe_vector[] = {
+
+#if NGX > 0
+ gq_probe, gq_mem_need, gq_cold_init, /* 3max 3D color option */
+ ga_probe, ga_mem_need, ga_cold_init, /* 3max 2D color option */
+#endif /* NGX > 0 */
+
+#if NSFB > 0
+ sfb_probe, pm_mem_need, sfb_cold_init, /* Smart frame buffer */
+#endif /* NSFB > 0 */
+
+#if NMFB > 0
+ fb_probe, pm_mem_need, fb_cold_init, /* 3max/3min 1D(?) mono option */
+#endif /* NMFB > 0 */
+
+#if NCFB > 0
+ cfb_probe, pm_mem_need, cfb_cold_init, /* 3max 1D(?) color option */
+#endif /* NCFB > 0 */
+
+#if NXCFB > 0
+ xcfb_probe, pm_mem_need, xcfb_cold_init,/* MAXine frame buffer */
+#endif /* NXCFB > 0 */
+
+#if NFB > 0
+ pm_probe, pm_mem_need, pm_cold_init, /* "pm" mono/color (pmax) */
+#endif
+ 0,
+};
+
+char *screen_data; /* opaque */
+
+int screen_find()
+{
+ struct screen_probe_vector *p = screen_probe_vector;
+ for (;p->probe; p++)
+ if ((*p->probe)()) {
+ (*p->setup)(0/*XXX*/, screen_data);
+ return 1;
+ }
+ return 0;
+}
+
+unsigned int
+screen_memory_alloc(avail)
+ char *avail;
+{
+ struct screen_probe_vector *p = screen_probe_vector;
+ int size;
+ for (; p->probe; p++)
+ if ((*p->probe) ()) {
+ screen_data = avail;
+ size = (*p->alloc) ();
+ bzero(screen_data, size);
+ return size;
+ }
+ return 0;
+
+}
+
diff --git a/chips/screen_switch.h b/chips/screen_switch.h
new file mode 100644
index 00000000..a84b4e7f
--- /dev/null
+++ b/chips/screen_switch.h
@@ -0,0 +1,85 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: screen_switch.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Definitions of things that must be tailored to
+ * specific hardware boards for the Generic Screen Driver.
+ */
+
+#ifndef SCREEN_SWITCH_H
+#define SCREEN_SWITCH_H 1
+
+/*
+ * List of probe routines, scanned at cold-boot time
+ * to see which, if any, graphic display is available.
+ * This is done before autoconf, so that printing on
+ * the console works early on. The alloc routine is
+ * called only on the first device that answers.
+ * Ditto for the setup routine, called later on.
+ */
+struct screen_probe_vector {
+ int (*probe)();
+ unsigned int (*alloc)();
+ int (*setup)();
+};
+
+/*
+ * Low-level operations on the graphic device, used
+ * by the otherwise device-independent interface code
+ */
+struct screen_switch {
+ int (*graphic_open)(); /* when X11 opens */
+ int (*graphic_close)(); /* .. or closes */
+ int (*set_status)(); /* dev-specific ops */
+ int (*get_status)(); /* dev-specific ops */
+ int (*char_paint)(); /* blitc */
+ int (*pos_cursor)(); /* cursor positioning */
+ int (*insert_line)(); /* ..and scroll down */
+ int (*remove_line)(); /* ..and scroll up */
+ int (*clear_bitmap)(); /* blank screen */
+ int (*video_on)(); /* screen saver */
+ int (*video_off)();
+ int (*intr_enable)();
+ int (*map_page)(); /* user-space mapping */
+};
+
+/*
+ * Each graphic device needs page-aligned memory
+ * to be mapped in user space later (for events
+ * and such). Size and content of this memory
+ * is unfortunately device-dependent, even if
+ * it did not need to (puns).
+ */
+extern char *screen_data;
+
+extern struct screen_probe_vector screen_probe_vector[];
+
+extern int screen_noop(), screen_find();
+
+#endif SCREEN_SWITCH_H
diff --git a/chips/serial_console.c b/chips/serial_console.c
new file mode 100644
index 00000000..7b15dd4c
--- /dev/null
+++ b/chips/serial_console.c
@@ -0,0 +1,694 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: serial_console.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 7/91
+ *
+ * Console driver for serial-line based consoles.
+ */
+
+#include <constty.h>
+#if NCONSTTY > 0
+#include <bm.h>
+#include <platforms.h>
+
+#include <mach_kdb.h>
+
+#include <machine/machspl.h> /* spl definitions */
+#include <device/io_req.h>
+#include <device/tty.h>
+#include <sys/syslog.h>
+
+#include <chips/busses.h>
+#include <chips/screen_defs.h>
+#include <chips/serial_defs.h>
+
+#ifdef DECSTATION
+#include <mips/prom_interface.h>
+#define find_rconsole(p) dec_check_rcline(p)
+#define CONSOLE_SERIAL_LINE_NO 3
+#endif /*DECSTATION*/
+
+#ifdef VAXSTATION
+#define find_rconsole(p)
+#define cnputc ser_putc
+#define cngetc ser_getc
+#define cnpollc ser_pollc
+#define cnmaygetc ser_maygetc
+#define CONSOLE_SERIAL_LINE_NO 3
+#endif /*VAXSTATION*/
+
+#ifdef FLAMINGO
+#define CONSOLE_SERIAL_LINE_NO 3
+#endif
+
+#ifndef CONSOLE_SERIAL_LINE_NO
+#define CONSOLE_SERIAL_LINE_NO 0
+#endif
+
+/* Size this as max possible number of lines in any serial chip we might use */
+static struct tty console_tty_data[NCONSTTY];
+struct tty *console_tty[NCONSTTY]; /* exported */
+
+#define DEFAULT_SPEED B9600
+#define DEFAULT_FLAGS (TF_EVENP|TF_ODDP|TF_ECHO)
+
+
+/*
+ * A machine MUST have a console. In our case
+ * things are a little complicated by the graphic
+ * display: people expect it to be their "console",
+ * but we'd like to be able to live without it.
+ * This is not to be confused with the "rconsole" thing:
+ * that just duplicates the console I/O to
+ * another place (for debugging/logging purposes).
+ *
+ * There is then another historical kludge: if
+ * there is a graphic display it is assumed that
+ * the minor "1" is the mouse, with some more
+ * magic attached to it. And again, one might like to
+ * use the serial line 1 as a regular one.
+ *
+ */
+#define user_console 0
+
+int console = 0;
+
+int (*console_probe)() = 0,
+ (*console_param)() = 0,
+ (*console_start)() = 0,
+ (*console_putc)() = 0,
+ (*console_getc)() = 0,
+ (*console_pollc)() = 0,
+ (*console_mctl)() = 0,
+ (*console_softCAR)() = 0;
+
+/*
+ * Lower-level (internal) interfaces, for printf and gets
+ */
+int cnunit = 0; /* which unit owns the 'console' */
+int cnline = 0; /* which line of that unit */
+int rcline = 3; /* alternate, "remote console" line */
+
+rcoff()
+{
+ spl_t s = splhigh();
+ cnpollc(FALSE);
+ rcline = 0;
+ cnpollc(TRUE);
+ splx(s);
+}
+
+rcputc(c)
+{
+ if (rcline)
+ (*console_putc)( cnunit, rcline, c);
+}
+
+cnputc(c)
+{
+#if NBM > 0
+ if (SCREEN_ISA_CONSOLE()) {
+ /* this does its own rcputc */
+ screen_blitc(SCREEN_CONS_UNIT(), c);
+ } else
+#endif NBM > 0
+ {
+ rcputc(c);
+ (*console_putc)( cnunit, cnline, c);/* insist on a console still */
+ }
+ if (c == '\n')
+ cnputc('\r');
+}
+
+cngetc()
+{
+ return (*console_getc)( cnunit, cnline, TRUE, FALSE);
+}
+
+cnpollc(bool)
+{
+ (*console_pollc)(cnunit, bool);
+}
+
+
+/* Debugger support */
+cnmaygetc()
+{
+ return (*console_getc)( cnunit, cnline, FALSE, FALSE);
+}
+
+
+#if NBM > 0
+boolean_t
+screen_captures(line)
+ register int line;
+{
+ return (SCREEN_ISA_CONSOLE() &&
+ ((line == SCREEN_LINE_KEYBOARD) ||
+ (line == SCREEN_LINE_POINTER)));
+}
+#endif
+
+/*
+ * Higher level (external) interface, for GP use
+ */
+
+
+/*
+ * This is basically a special form of autoconf,
+ * to get printf() going before true autoconf.
+ */
+cons_find(tube)
+ boolean_t tube;
+{
+ static struct bus_device d;
+ register int i;
+ struct tty *tp;
+
+ for (i = 0; i < NCONSTTY; i++)
+ console_tty[i] = &console_tty_data[i];
+ /* the hardware device will set tp->t_addr for valid ttys */
+
+ d.unit = 0;
+
+ if ((console_probe == 0) ||
+ ((*console_probe)(0, &d) == 0)) {
+ /* we have no console, but maybe that's ok */
+#if defined(DECSTATION) || defined(FLAMINGO)
+ /* no, it is not */
+ dprintf("%s", "no console!\n");
+ halt();
+#endif
+ return 0;
+ }
+
+ /*
+ * Remote console line
+ */
+ find_rconsole(&rcline);
+
+ /*
+ * Console always on unit 0. Fix if you need to
+ */
+ cnunit = 0;
+
+#if NBM > 0
+ if (tube && screen_probe(0)) {
+
+ /* associate screen to console iff */
+ if (console == user_console)
+ screen_console = cnunit | SCREEN_CONS_ENBL;
+ cnline = SCREEN_LINE_KEYBOARD;
+
+ /* mouse and keybd */
+ tp = console_tty[SCREEN_LINE_KEYBOARD];
+ tp->t_ispeed = B4800;
+ tp->t_ospeed = B4800;
+ tp->t_flags = TF_LITOUT|TF_EVENP|TF_ECHO|TF_XTABS|TF_CRMOD;
+ tp->t_dev = SCREEN_LINE_KEYBOARD;
+ (*console_param)(tp, SCREEN_LINE_KEYBOARD);
+
+ tp = console_tty[SCREEN_LINE_POINTER];
+ tp->t_ispeed = B4800;
+ tp->t_ospeed = B4800;
+ tp->t_flags = TF_LITOUT|TF_ODDP;
+ tp->t_dev = SCREEN_LINE_POINTER;
+ (*console_param)(tp, SCREEN_LINE_POINTER);
+ /* console_scan will turn on carrier */
+
+ } else {
+#endif NBM > 0
+ /* use non-graphic console as console */
+ cnline = CONSOLE_SERIAL_LINE_NO;
+
+ tp = console_tty[cnline];
+ tp->t_ispeed = B9600;
+ tp->t_ospeed = B9600;
+ tp->t_flags = TF_LITOUT|TF_EVENP|TF_ECHO|TF_XTABS|TF_CRMOD;
+ (*console_softCAR)(cnunit, cnline, TRUE);
+ console = cnline;
+ tp->t_dev = console;
+ (*console_param)(tp, SCREEN_LINE_OTHER);
+#if NBM > 0
+ }
+
+ /*
+ * Enable rconsole interrupts for KDB
+ */
+ if (tube && rcline != cnline) {
+ tp = console_tty[rcline];
+ tp->t_ispeed = B9600;
+ tp->t_ospeed = B9600;
+ tp->t_flags = TF_LITOUT|TF_EVENP|TF_ECHO|TF_XTABS|TF_CRMOD;
+ tp->t_dev = rcline;
+ (*console_softCAR)(cnunit, rcline, TRUE);
+ (*console_param)(tp, SCREEN_LINE_OTHER);
+ } else
+ rcline = 0;
+#endif NBM > 0
+}
+
+/*
+ * Open routine
+ */
+extern int
+ cons_start(struct tty *),
+ cons_stop(struct tty *, int),
+ cons_mctl(struct tty *, int, int);
+
+cons_open(dev, flag, ior)
+ int dev;
+ int flag;
+ io_req_t ior;
+{
+ register struct tty *tp;
+ register int ttyno;
+
+ if (dev == user_console)
+ dev = console;
+
+ ttyno = dev;
+ if (ttyno >= NCONSTTY)
+ return D_NO_SUCH_DEVICE;
+ tp = console_tty[ttyno];
+
+ /* But was it there at probe time */
+ if (tp->t_addr == 0)
+ return D_NO_SUCH_DEVICE;
+
+ tp->t_start = cons_start;
+ tp->t_stop = cons_stop;
+ tp->t_mctl = cons_mctl;
+
+#if NBM > 0
+ if (screen_captures(ttyno))
+ screen_open(SCREEN_CONS_UNIT(), ttyno==SCREEN_LINE_KEYBOARD);
+#endif NBM > 0
+
+ if ((tp->t_state & TS_ISOPEN) == 0) {
+ if (tp->t_ispeed == 0) {
+ tp->t_ispeed = DEFAULT_SPEED;
+ tp->t_ospeed = DEFAULT_SPEED;
+ tp->t_flags = DEFAULT_FLAGS;
+ }
+ tp->t_dev = dev;
+ (*console_param)(tp, ttyno);
+ }
+
+ return (char_open(dev, tp, flag, ior));
+}
+
+
+/*
+ * Close routine
+ */
+cons_close(dev, flag)
+ int dev;
+{
+ register struct tty *tp;
+ register int ttyno;
+ spl_t s;
+
+ if (dev == user_console)
+ dev = console;
+
+ ttyno = dev;
+
+#if NBM > 0
+ if (screen_captures(ttyno))
+ screen_close(SCREEN_CONS_UNIT(), ttyno==SCREEN_LINE_KEYBOARD);
+#endif NBM > 0
+
+ tp = console_tty[ttyno];
+
+ s = spltty();
+ simple_lock(&tp->t_lock);
+
+ ttyclose(tp);
+
+ simple_unlock(&tp->t_lock);
+ splx(s);
+}
+
+cons_read(dev, ior)
+ int dev;
+ register io_req_t ior;
+{
+ register struct tty *tp;
+ register ttyno;
+
+ if (dev == user_console)
+ dev = console;
+
+ ttyno = dev;
+#if NBM > 0
+ if (SCREEN_ISA_CONSOLE() && (ttyno == SCREEN_LINE_POINTER))
+ return screen_read(SCREEN_CONS_UNIT(), ior);
+#endif NBM > 0
+
+ tp = console_tty[ttyno];
+ return char_read(tp, ior);
+}
+
+
+cons_write(dev, ior)
+ int dev;
+ register io_req_t ior;
+{
+ register struct tty *tp;
+ register ttyno;
+
+ if (dev == user_console)
+ dev = console;
+
+ ttyno = dev;
+#if NBM > 0
+ if (screen_captures(ttyno))
+ return screen_write(SCREEN_CONS_UNIT(), ior);
+#endif NBM > 0
+
+ tp = console_tty[ttyno];
+ return char_write(tp, ior);
+}
+
+/*
+ * Start output on a line
+ */
+cons_start(tp)
+ register struct tty *tp;
+{
+ spl_t s;
+
+ s = spltty();
+ if (tp->t_state & (TS_TIMEOUT|TS_BUSY|TS_TTSTOP))
+ goto out;
+
+ if (tp->t_outq.c_cc == 0)
+ goto out;
+
+ tp->t_state |= TS_BUSY;
+
+ (*console_start)(tp);
+
+out:
+ splx(s);
+}
+
+/*
+ * Stop output on a line.
+ */
+cons_stop(tp, flag)
+ register struct tty *tp;
+{
+ register spl_t s;
+
+ s = spltty();
+ if (tp->t_state & TS_BUSY) {
+ if ((tp->t_state&TS_TTSTOP)==0)
+ tp->t_state |= TS_FLUSH;
+ }
+ splx(s);
+}
+
+
+/*
+ * Modem control
+ */
+cons_mctl(
+ struct tty *tp,
+ int bits,
+ int how)
+{
+ return (*console_mctl)(tp->t_dev, bits, how);
+}
+
+/*
+ * Abnormal close
+ */
+cons_portdeath(dev, port)
+ int dev;
+ mach_port_t port;
+{
+ if (dev == user_console)
+ dev = console;
+ return (tty_portdeath(console_tty[dev], port));
+}
+
+/*
+ * Get/Set status rotuines
+ */
+io_return_t
+cons_get_status(dev, flavor, data, status_count)
+ int dev;
+ dev_flavor_t flavor;
+ int * data; /* pointer to OUT array */
+ unsigned int *status_count; /* out */
+{
+ register struct tty *tp;
+ register int ttyno;
+
+ if (dev == user_console)
+ dev = console;
+
+ ttyno = dev;
+
+#if NBM > 0
+ if (screen_captures(ttyno) &&
+ (screen_get_status(SCREEN_CONS_UNIT(),
+ flavor, data, status_count) == D_SUCCESS))
+ return D_SUCCESS;
+#endif NBM > 0
+
+ tp = console_tty[ttyno];
+
+ switch (flavor) {
+ case TTY_MODEM:
+ /* Take all bits */
+ *data = (*console_mctl)(dev, -1, DMGET);
+ *status_count = 1;
+ break;
+ default:
+ return (tty_get_status(tp, flavor, data, status_count));
+ }
+ return (D_SUCCESS);
+}
+
+io_return_t
+cons_set_status(dev, flavor, data, status_count)
+ int dev;
+ dev_flavor_t flavor;
+ int * data;
+ unsigned int status_count;
+{
+ register struct tty *tp;
+ register int ttyno;
+
+ if (dev == user_console)
+ dev = console;
+
+ ttyno = dev;
+
+#if NBM > 0
+ if (screen_captures(ttyno) &&
+ (screen_set_status(SCREEN_CONS_UNIT(),
+ flavor, data, status_count) == D_SUCCESS))
+ return D_SUCCESS;
+#endif NBM > 0
+
+ tp = console_tty[ttyno];
+
+ switch (flavor) {
+ case TTY_MODEM:
+ if (status_count < TTY_MODEM_COUNT)
+ return (D_INVALID_OPERATION);
+ (void) (*console_mctl)(dev, *data, DMSET);
+ break;
+
+ case TTY_SET_BREAK:
+ (void) (*console_mctl)(dev, TM_BRK, DMBIS);
+ break;
+
+ case TTY_CLEAR_BREAK:
+ (void) (*console_mctl)(dev, TM_BRK, DMBIC);
+ break;
+
+ case TTY_STATUS:
+ {
+ register int error = D_SUCCESS;
+ struct tty_status *tsp;
+
+ /*
+ * Defend from noise. The cshell...
+ */
+ tsp = (struct tty_status *)data;
+ if ((tsp->tt_ispeed != tp->t_ispeed) ||
+ (tsp->tt_ospeed != tp->t_ospeed) ||
+ (tsp->tt_breakc != tp->t_breakc) ||
+ ((tsp->tt_flags & ~TF_HUPCLS) != tp->t_flags)) {
+
+ error = tty_set_status(tp, flavor, data, status_count);
+ if (error == 0) {
+ spl_t s = spltty();
+ tp->t_state &= ~(TS_BUSY|TS_FLUSH);
+ (*console_param)(tp, ttyno);
+ splx(s);
+ }
+ } else
+ if (tsp->tt_flags & TF_HUPCLS)
+ tp->t_state |= TS_HUPCLS;
+ return (error);
+ }
+ default:
+ return (tty_set_status(tp, flavor, data, status_count));
+ }
+ return (D_SUCCESS);
+}
+
+
+/*
+ * A simple scheme to dispatch interrupts.
+ *
+ * This deals with the fairly common case where we get an
+ * interrupt on each rx/tx character. A more elaborate
+ * scheme [someday here too..] would handle instead many
+ * characters per interrupt, perhaps using a DMA controller
+ * or a large SILO. Note that it is also possible to simulate
+ * a DMA chip with 'pseudo-dma' code that runs directly down
+ * in the interrupt routine.
+ */
+
+/*
+ * We just received a character, ship it up for further processing.
+ * Arguments are the tty number for which it is meant, a flag that
+ * indicates a keyboard or mouse is potentially attached to that
+ * tty (-1 if not), the character proper stripped down to 8 bits,
+ * and an indication of any error conditions associated with the
+ * receipt of the character.
+ * We deal here with rconsole input handling and dispatching to
+ * mouse or keyboard translation routines. cons_input() does
+ * the rest.
+ */
+#if MACH_KDB
+int l3break = 0x10; /* dear old ^P, we miss you so bad. */
+#endif MACH_KDB
+
+cons_simple_rint(ttyno, line, c, err)
+ int line;
+ int c;
+{
+ /*
+ * Rconsole. Drop in the debugger on break or ^P.
+ * Otherwise pretend input came from keyboard.
+ */
+ if (rcline && ttyno == rcline) {
+#if MACH_KDB
+ if ((err & CONS_ERR_BREAK) ||
+ ((c & 0x7f) == l3break))
+ return gimmeabreak();
+#endif /* MACH_KDB */
+ ttyno = console;
+ goto process_it;
+ }
+
+#if NBM > 0
+ if (screen_captures(line)) {
+ if (line == SCREEN_LINE_POINTER)
+ return mouse_input(SCREEN_CONS_UNIT(), c);
+ if (line == SCREEN_LINE_KEYBOARD) {
+ c = lk201_rint(SCREEN_CONS_UNIT(), c, FALSE, FALSE);
+ if (c == -1)
+ return; /* shift or bad char */
+ }
+ }
+#endif NBM > 0
+process_it:
+ cons_input(ttyno, c, err);
+}
+
+/*
+ * Send along a character on a tty. If we were waiting for
+ * this char to complete the open procedure do so; check
+ * for errors; if all is well proceed to ttyinput().
+ */
+cons_input(ttyno, c, err)
+{
+ register struct tty *tp;
+
+ tp = console_tty[ttyno];
+
+ if ((tp->t_state & TS_ISOPEN) == 0) {
+ tt_open_wakeup(tp);
+ return;
+ }
+ if (err) {
+ if (err & CONS_ERR_OVERRUN)
+ log(LOG_WARNING, "sl%d: silo overflow\n", ttyno);
+
+ if (err & CONS_ERR_PARITY)
+ if (((tp->t_flags & (TF_EVENP|TF_ODDP)) == TF_EVENP)
+ || ((tp->t_flags & (TF_EVENP|TF_ODDP)) == TF_ODDP))
+ return;
+ if (err & CONS_ERR_BREAK) /* XXX autobaud XXX */
+ c = tp->t_breakc;
+ }
+ ttyinput(c, tp);
+}
+
+/*
+ * Transmission of a character is complete.
+ * Return the next character or -1 if none.
+ */
+cons_simple_tint(ttyno, all_sent)
+ boolean_t all_sent;
+{
+ register struct tty *tp;
+
+ tp = console_tty[ttyno];
+ if ((tp->t_addr == 0) || /* not probed --> stray */
+ (tp->t_state & TS_TTSTOP))
+ return -1;
+
+ if (all_sent) {
+ tp->t_state &= ~TS_BUSY;
+ if (tp->t_state & TS_FLUSH)
+ tp->t_state &= ~TS_FLUSH;
+
+ cons_start(tp);
+ }
+
+ if (tp->t_outq.c_cc == 0 || (tp->t_state&TS_BUSY)==0)
+ return -1;
+
+ return getc(&tp->t_outq);
+}
+
+
+
+
+
+#endif /*NCONSTTY > 0*/
diff --git a/chips/serial_defs.h b/chips/serial_defs.h
new file mode 100644
index 00000000..97027b0b
--- /dev/null
+++ b/chips/serial_defs.h
@@ -0,0 +1,53 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: serial_defs.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 7/91
+ *
+ * Generic console driver for serial-line based consoles.
+ */
+
+
+/*
+ * Common defs
+ */
+
+extern int (*console_probe)(), (*console_param)(), (*console_start)(),
+ (*console_putc)(), (*console_getc)(),
+ (*console_pollc)(), (*console_mctl)(), (*console_softCAR)();
+extern cngetc(), cnmaygetc(), cnputc(), rcputc();
+
+extern struct tty *console_tty[];
+extern int rcline, cnline;
+extern int console;
+
+/* Simple one-char-at-a-time scheme */
+extern cons_simple_tint(), cons_simple_rint();
+
+#define CONS_ERR_PARITY 0x1000
+#define CONS_ERR_BREAK 0x2000
+#define CONS_ERR_OVERRUN 0x4000
diff --git a/chips/sfb_hdw.c b/chips/sfb_hdw.c
new file mode 100644
index 00000000..ff2b1f6d
--- /dev/null
+++ b/chips/sfb_hdw.c
@@ -0,0 +1,253 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: sfb_hdw.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 11/92
+ *
+ * Driver for the Smart Color Frame Buffer Display,
+ * hardware-level operations.
+ */
+
+#include <sfb.h>
+#if (NSFB > 0)
+#include <platforms.h>
+
+#include <machine/machspl.h>
+#include <mach/std_types.h>
+#include <chips/busses.h>
+#include <chips/screen_defs.h>
+#include <chips/pm_defs.h>
+#include <machine/machspl.h>
+
+typedef pm_softc_t sfb_softc_t;
+
+#ifdef DECSTATION
+#include <mips/PMAX/pmagb_ba.h>
+#include <mips/PMAX/tc.h>
+#endif
+
+#ifdef FLAMINGO
+#include <mips/PMAX/pmagb_ba.h> /* XXXX fixme */
+#include <alpha/DEC/tc.h>
+#define sparsify(x) ((1L << 28) | (((x) & 0x7ffffff) << 1) | \
+ ((x) & ~0x7ffffffL))
+#endif
+
+#ifndef sparsify
+#define sparsify(x) x
+#endif
+
+/*
+ * Definition of the driver for the auto-configuration program.
+ */
+
+int sfb_probe(), sfb_intr();
+void sfb_attach();
+
+vm_offset_t sfb_std[NSFB] = { 0 };
+struct bus_device *sfb_info[NSFB];
+struct bus_driver sfb_driver =
+ { sfb_probe, 0, sfb_attach, 0, sfb_std, "sfb", sfb_info,
+ 0, 0, BUS_INTR_DISABLED};
+
+/*
+ * Probe/Attach functions
+ */
+
+sfb_probe(
+ vm_offset_t addr,
+ struct bus_device *device)
+{
+ static probed_once = 0;
+
+ /*
+ * Probing was really done sweeping the TC long ago
+ */
+ if (tc_probe("sfb") == 0)
+ return 0;
+ if (probed_once++ > 1) {
+ printf("[mappable] ");
+ device->address = addr;
+ }
+ return 1;
+}
+
+void sfb_attach(
+ struct bus_device *ui)
+{
+ /* ... */
+ printf(": smart frame buffer");
+}
+
+
+/*
+ * Interrupt routine
+ */
+
+sfb_intr(
+ int unit,
+ spl_t spllevel)
+{
+ register volatile char *ack;
+
+ /* acknowledge interrupt */
+ ack = (volatile char *) sfb_info[unit]->address + SFB_OFFSET_ICLR;
+ *ack = 0;
+
+#if mips
+ splx(spllevel);
+#endif
+ lk201_led(unit);
+}
+
+sfb_vretrace(
+ sfb_softc_t *sfb,
+ boolean_t on)
+{
+ sfb_regs *regs;
+
+ regs = (sfb_regs *) ((char *)sfb->framebuffer - SFB_OFFSET_VRAM + SFB_OFFSET_REGS);
+
+ regs->intr_enable = (on) ? 1 : 0;
+}
+
+/*
+ * Boot time initialization: must make device
+ * usable as console asap.
+ */
+#define sfb_set_status cfb_set_status
+
+extern int
+ sfb_soft_reset(), sfb_set_status(),
+ sfb_pos_cursor(), bt459_video_on(),
+ bt459_video_off(), sfb_vretrace(),
+ pm_get_status(), pm_char_paint(),
+ pm_insert_line(), pm_remove_line(),
+ pm_clear_bitmap(), pm_map_page();
+
+static struct screen_switch sfb_sw = {
+ screen_noop, /* graphic_open */
+ sfb_soft_reset, /* graphic_close */
+ sfb_set_status, /* set_status */
+ pm_get_status, /* get_status */
+ pm_char_paint, /* char_paint */
+ sfb_pos_cursor, /* pos_cursor */
+ pm_insert_line, /* insert_line */
+ pm_remove_line, /* remove_line */
+ pm_clear_bitmap, /* clear_bitmap */
+ bt459_video_on, /* video_on */
+ bt459_video_off, /* video_off */
+ sfb_vretrace, /* intr_enable */
+ pm_map_page /* map_page */
+};
+
+sfb_cold_init(
+ int unit,
+ user_info_t *up)
+{
+ sfb_softc_t *sfb;
+ screen_softc_t sc = screen(unit);
+ vm_offset_t base = tc_probe("sfb");
+ int hor_p, ver_p;
+ boolean_t makes_sense;
+
+ bcopy(&sfb_sw, &sc->sw, sizeof(sc->sw));
+ sc->flags |= COLOR_SCREEN;
+
+ /*
+ * I am confused here by the documentation. One document
+ * sez there are three boards:
+ * "PMAGB-BA" can do 1280x1024 @66Hz or @72Hz
+ * "PMAGB-BC" can do 1024x864 @60Hz or 1280x1024 @72Hz
+ * "PMAGB-BE" can do 1024x768 @72Hz or 1280x1024 @72Hz
+ * Another document sez things differently:
+ * "PMAGB-BB" can do 1024x768 @72Hz
+ * "PMAGB-BD" can do 1024x864 @60Hz or 1280x1024 @72Hz
+ *
+ * I would be inclined to believe the first one, which came
+ * with an actual piece of hardware attached (a PMAGB-BA).
+ * But I could swear I got a first board (which blew up
+ * instantly) and it was calling itself PMAGB-BB...
+ *
+ * Since I have not seen any other hardware I will make
+ * this code as hypothetical as I can. Should work :-))
+ */
+
+ makes_sense = FALSE;
+
+ {
+ sfb_regs *regs;
+
+ regs = (sfb_regs *) ((char *)base + SFB_OFFSET_REGS);
+ hor_p = (regs->vhor_setup & 0x1ff) * 4;
+ ver_p = regs->vvert_setup & 0x7ff;
+
+ if (((hor_p == 1280) && (ver_p == 1024)) ||
+ ((hor_p == 1024) && (ver_p == 864)) ||
+ ((hor_p == 1024) && (ver_p == 768)))
+ makes_sense = TRUE;
+ }
+
+ if (makes_sense) {
+ sc->frame_scanline_width = hor_p;
+ sc->frame_height = ver_p;
+ sc->frame_visible_width = hor_p;
+ sc->frame_visible_height = ver_p;
+ } else {
+ sc->frame_scanline_width = 1280;
+ sc->frame_height = 1024;
+ sc->frame_visible_width = 1280;
+ sc->frame_visible_height = 1024;
+ }
+
+ pm_init_screen_params(sc,up);
+ (void) screen_up(unit, up);
+
+ sfb = pm_alloc( unit, sparsify(base + SFB_OFFSET_BT459),
+ base + SFB_OFFSET_VRAM, -1);
+
+ screen_default_colors(up);
+
+ sfb_soft_reset(sc);
+
+ /*
+ * Clearing the screen at boot saves from scrolling
+ * much, and speeds up booting quite a bit.
+ */
+ screen_blitc( unit, 'C'-'@');/* clear screen */
+}
+
+#if 0 /* this is how you find out about a new screen */
+fill(addr,n,c)
+ char *addr;
+{
+ while (n-- > 0) *addr++ = c;
+}
+#endif
+
+
+#endif (NSFB > 0)
diff --git a/chips/sfb_misc.c b/chips/sfb_misc.c
new file mode 100644
index 00000000..a51e9a43
--- /dev/null
+++ b/chips/sfb_misc.c
@@ -0,0 +1,133 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: sfb_misc.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 11/92
+ *
+ * Driver for the PMAGB-BA smart color framebuffer
+ *
+ */
+
+#include <sfb.h>
+#if (NSFB > 0)
+#include <platforms.h>
+
+/*
+ * NOTE: This driver relies heavily on the pm one, as well as the cfb.
+ */
+
+#include <device/device_types.h>
+#include <chips/screen_defs.h>
+#include <chips/pm_defs.h>
+typedef pm_softc_t sfb_softc_t;
+
+#include <chips/bt459.h>
+#define bt459 cursor_registers
+
+#ifdef DECSTATION
+#include <mips/PMAX/pmagb_ba.h>
+#endif
+
+#ifdef FLAMINGO
+#include <mips/PMAX/pmagb_ba.h> /* XXXX */
+#endif
+
+/*
+ * Initialize color map, for kernel use
+ */
+#define sfb_init_colormap cfb_init_colormap
+
+/*
+ * Position cursor
+ */
+sfb_pos_cursor(
+ bt459_regmap_t *regs,
+ int x,
+ int y)
+{
+ bt459_pos_cursor( regs, x + 368 - 219, y + 37 - 34);
+}
+
+/*
+ * Large viz small cursor
+ */
+#define sfb_small_cursor_to_large cfb_small_cursor_to_large
+
+/*
+ * Device-specific set status
+ */
+#define sfb_set_status cfb_set_status
+
+/*
+ * Hardware initialization
+ */
+sfb_init_screen(
+ sfb_softc_t *sfb)
+{
+ bt459_init( sfb->bt459,
+ sfb->bt459 + (SFB_OFFSET_RESET - SFB_OFFSET_BT459),
+ 4 /* 4:1 MUX */);
+}
+
+/*
+ * Do what's needed when X exits
+ */
+sfb_soft_reset(
+ screen_softc_t sc)
+{
+ sfb_softc_t *sfb = (sfb_softc_t*) sc->hw_state;
+ user_info_t *up = sc->up;
+ extern cursor_sprite_t dc503_default_cursor;
+
+ /*
+ * Restore params in mapped structure
+ */
+ pm_init_screen_params(sc,up);
+ up->row = up->max_row - 1;
+
+ up->dev_dep_2.pm.x26 = 2; /* you do not want to know */
+ up->dev_dep_1.pm.x18 = (short*)2;
+
+ /*
+ * Restore RAMDAC chip to default state
+ */
+ sfb_init_screen(sfb);
+
+ /*
+ * Load kernel's cursor sprite: just use the same pmax one
+ */
+ sfb_small_cursor_to_large(up, dc503_default_cursor);
+ bt459_cursor_sprite(sfb->bt459, up->dev_dep_2.pm.cursor_sprite);
+
+ /*
+ * Color map and cursor color
+ */
+ sfb_init_colormap(sc);
+}
+
+
+#endif (NSFB > 0)
diff --git a/chips/spans.c b/chips/spans.c
new file mode 100644
index 00000000..4529f8b6
--- /dev/null
+++ b/chips/spans.c
@@ -0,0 +1,114 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*** SPANS SIGNALING ***/
+
+#ifndef STUB
+#include <chips/spans.h>
+#include <chips/tca100_if.h>
+#else
+#include "spans.h"
+#include "tca100_if.h"
+#endif
+
+nw_result spans_initialize(int dev) {
+
+#if !PERMANENT_VIRTUAL_CONNECTIONS
+#endif
+
+}
+
+
+void spans_input(nw_buffer_t msg) {
+
+#if !PERMANENT_VIRTUAL_CONNECTIONS
+#endif
+
+}
+
+nw_result spans_open(nw_ep ep, nw_address_1 rem_addr_1,
+ nw_address_2 rem_addr_2, nw_ep remote_ep) {
+ nw_result rc;
+
+#if PERMANENT_VIRTUAL_CONNECTIONS
+ rc = NW_FAILURE;
+#else
+#endif
+
+ return rc;
+}
+
+nw_result spans_accept(nw_ep ep, nw_buffer_t msg, nw_ep_t new_epp) {
+ nw_result rc;
+
+#if PERMANENT_VIRTUAL_CONNECTIONS
+ rc = NW_FAILURE;
+#else
+#endif
+
+ return rc;
+}
+
+nw_result spans_close(nw_ep ep) {
+ nw_result rc;
+
+ tct[ep].rx_sar_header = 0;
+ rc = NW_SUCCESS;
+ return rc;
+}
+
+nw_result spans_add(nw_ep ep, nw_address_1 rem_addr_1,
+ nw_address_2 rem_addr_2, nw_ep remote_ep) {
+ nw_result rc;
+
+#if PERMANENT_VIRTUAL_CONNECTIONS
+ rc = NW_FAILURE;
+#else
+#endif
+
+ return rc;
+}
+
+nw_result spans_drop(nw_ep ep, nw_address_1 rem_addr_1,
+ nw_address_2 rem_addr_2, nw_ep remote_ep) {
+ nw_result rc;
+
+#if PERMANENT_VIRTUAL_CONNECTIONS
+ rc = NW_FAILURE;
+#else
+#endif
+
+ return rc;
+}
+
+void spans_timer_sweep() {
+
+#if !PERMANENT_VIRTUAL_CONNECTIONS
+#endif
+
+}
+
+
diff --git a/chips/spans.h b/chips/spans.h
new file mode 100644
index 00000000..7cb35002
--- /dev/null
+++ b/chips/spans.h
@@ -0,0 +1,58 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*** SPANS SIGNALING ***/
+
+#ifndef _SPANS_H_
+#define _SPANS_H_ 1
+
+#ifndef STUB
+#include <chips/nc.h>
+#else
+#include "nc.h"
+#endif
+
+extern nw_result spans_initialize(int dev);
+
+extern void spans_input(nw_buffer_t msg);
+
+extern nw_result spans_open(nw_ep ep, nw_address_1 rem_addr_1,
+ nw_address_2 rem_addr_2, nw_ep remote_ep);
+
+extern nw_result spans_accept(nw_ep ep, nw_buffer_t msg, nw_ep_t new_epp);
+
+extern nw_result spans_close(nw_ep ep);
+
+extern nw_result spans_add(nw_ep ep, nw_address_1 rem_addr_1,
+ nw_address_2 rem_addr_2, nw_ep remote_ep);
+
+extern nw_result spans_drop(nw_ep ep, nw_address_1 rem_addr_1,
+ nw_address_2 rem_addr_2, nw_ep remote_ep);
+
+extern void spans_timer_sweep();
+
+
+#endif /* _SPANS_H_ */
diff --git a/chips/tca100.c b/chips/tca100.c
new file mode 100644
index 00000000..7ad4feca
--- /dev/null
+++ b/chips/tca100.c
@@ -0,0 +1,360 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef STUB
+#include <atm.h>
+#else
+#include "atm.h"
+#endif
+
+#if NATM > 0
+
+#ifndef STUB
+#include <sys/types.h>
+#include <kern/thread.h>
+#include <kern/lock.h>
+#include <kern/eventcount.h>
+#include <machine/machspl.h> /* spl definitions */
+#include <mips/mips_cpu.h>
+#include <vm/vm_kern.h>
+#include <device/io_req.h>
+#include <device/device_types.h>
+#include <device/net_status.h>
+#include <chips/busses.h>
+#include <chips/nc.h>
+#include <chips/tca100.h>
+#include <chips/tca100_if.h>
+
+decl_simple_lock_data(, atm_simple_lock);
+
+#else
+#include "stub.h"
+#include "nc.h"
+#include "tca100_if.h"
+#include "tca100.h"
+
+int atm_simple_lock;
+
+#endif
+
+struct bus_device *atm_info[NATM];
+
+int atm_probe();
+void atm_attach();
+struct bus_driver atm_driver =
+ { atm_probe, 0, atm_attach, 0, /* csr */ 0, "atm", atm_info,
+ "", 0, /* flags */ 0 };
+
+atm_device_t atmp[NATM] = {NULL};
+u_int atm_open_count[NATM];
+u_int atm_mapped[NATM];
+u_int atm_control_mask[NATM];
+struct evc atm_event_counter[NATM];
+
+#define DEVICE(unit) ((unit == 0) ? NW_TCA100_1 : NW_TCA100_2)
+
+void atm_initialize(int unit) {
+
+ atmp[unit]->creg = (CR_RX_RESET | CR_TX_RESET);
+ atmp[unit]->creg = 0;
+ atmp[unit]->rxtimerv = 0;
+ atmp[unit]->rxthresh = 1;
+ atmp[unit]->txthresh = 0;
+ atmp[unit]->sreg = 0;
+ atmp[unit]->creg = atm_control_mask[unit] = (CR_RX_ENABLE | CR_TX_ENABLE);
+ atm_open_count[unit] = 0;
+ atm_mapped[unit] = 0;
+}
+
+/*** Device entry points ***/
+
+int atm_probe(vm_offset_t reg, struct bus_device *ui) {
+ int un;
+
+ un = ui->unit;
+ if (un >= NATM || check_memory(reg, 0)) {
+ return 0;
+ }
+
+ atm_info[un] = ui;
+ atmp[un] = (atm_device_t) reg;
+ nc_initialize();
+ if (nc_device_register(DEVICE(un), NW_CONNECTION_ORIENTED, (char *) reg,
+ &tca100_entry_table) == NW_SUCCESS &&
+ tca100_initialize(DEVICE(un)) == NW_SUCCESS) {
+ atm_initialize(un);
+ evc_init(&atm_event_counter[un]);
+ return 1;
+ } else {
+ atmp[un] = NULL;
+ (void) nc_device_unregister(DEVICE(un), NW_FAILURE);
+ return 0;
+ }
+}
+
+void atm_attach(struct bus_device *ui) {
+ int un;
+
+ un = ui->unit;
+ if (un >= NATM) {
+ printf("atm: stray attach\n");
+ } else {
+ atmp[un]->creg =
+ atm_control_mask[un] = CR_TX_ENABLE | CR_RX_ENABLE | RX_COUNT_INTR;
+ /*Enable ATM interrupts*/
+ }
+}
+
+void atm_intr(int unit, int spl_level) {
+
+ if (unit >= NATM || atmp[unit] == NULL) {
+ printf("atm: stray interrupt\n");
+ } else {
+ atmp[unit]->creg = CR_TX_ENABLE | CR_RX_ENABLE; /*Disable ATM interrupts*/
+ wbflush();
+ if (atm_mapped[unit]) {
+ splx(spl_level);
+ evc_signal(&atm_event_counter[unit]);
+ } else {
+ simple_lock(&atm_simple_lock);
+ tca100_poll(DEVICE(unit));
+ atmp[unit]->creg = atm_control_mask[unit];
+ simple_unlock(&atm_simple_lock);
+ splx(spl_level);
+ }
+ }
+}
+
+io_return_t atm_open(dev_t dev, int mode, io_req_t ior) {
+ int un;
+
+ un = minor(dev);
+ if (un >= NATM || atmp[un] == NULL) {
+ return D_NO_SUCH_DEVICE;
+/*
+ } else if (atm_open_count[un] > 0 && (atm_mapped[un] || (mode & D_WRITE))) {
+ return D_ALREADY_OPEN;
+*/
+ } else {
+ atm_open_count[un]++;
+ atm_mapped[un] = ((mode & D_WRITE) != 0);
+ if (atm_mapped[un])
+ (void) nc_device_unregister(DEVICE(un), NW_NOT_SERVER);
+ return D_SUCCESS;
+ }
+}
+
+io_return_t atm_close(dev_t dev) {
+ int un;
+
+ un = minor(dev);
+ if (un >= NATM || atmp[un] == NULL) {
+ return D_NO_SUCH_DEVICE;
+ } else if (atm_open_count[un] == 0) {
+ return D_INVALID_OPERATION;
+ } else {
+ if (atm_mapped[un]) {
+ (void) nc_device_register(DEVICE(un), NW_CONNECTION_ORIENTED,
+ (char *) atmp[un],
+ &tca100_entry_table);
+ atm_mapped[un] = 0;
+ }
+ atm_open_count[un]--;
+ return D_SUCCESS;
+ }
+}
+
+unsigned int *frc = 0xbe801000;
+char data[66000];
+
+io_return_t atm_read(dev_t dev, io_req_t ior) {
+ unsigned int ck1, ck2;
+ int i, j;
+ char c[16];
+
+ ck1 = *frc;
+ device_read_alloc(ior, ior->io_count);
+ for (i = 0, j = 0; i < ior->io_count; i += 4096, j++)
+ c[j] = (ior->io_data)[i];
+ ck2 = *frc;
+ ((int *) ior->io_data)[0] = ck1;
+ ((int *) ior->io_data)[1] = ck2;
+ return D_SUCCESS;
+}
+
+io_return_t atm_write(dev_t dev, io_req_t ior) {
+ int i, j;
+ char c[16];
+ boolean_t wait;
+
+ device_write_get(ior, &wait);
+ for (i = 0, j = 0; i < ior->io_total; i += 4096, j++)
+ c[j] = (ior->io_data)[i];
+ ior->io_residual = ior->io_total - *frc;
+ return D_SUCCESS;
+}
+
+io_return_t atm_get_status(dev_t dev, int flavor, dev_status_t status,
+ u_int *status_count) {
+ int un;
+
+ un = minor(dev);
+ if (un >= NATM || atmp[un] == NULL) {
+ return D_NO_SUCH_DEVICE;
+ } else {
+ switch ((atm_status) flavor) {
+ case ATM_MAP_SIZE:
+ status[0] = sizeof(atm_device_s);
+ *status_count = sizeof(int);
+ return D_SUCCESS;
+ case ATM_MTU_SIZE:
+ status[0] = 65535; /*MTU size*/
+ *status_count = sizeof(int);
+ return D_SUCCESS;
+ case ATM_EVC_ID:
+ status[0] = atm_event_counter[un].ev_id;
+ *status_count = sizeof(int);
+ return D_SUCCESS;
+ case ATM_ASSIGNMENT:
+ status[0] = atm_mapped[un];
+ status[1] = atm_open_count[un];
+ *status_count = 2 * sizeof(int);
+ return D_SUCCESS;
+ default:
+ return D_INVALID_OPERATION;
+ }
+ }
+}
+
+io_return_t atm_set_status(dev_t dev, int flavor, dev_status_t status,
+ u_int status_count) {
+ io_return_t rc;
+ int un, s;
+ nw_pvc_t pvcp;
+ nw_plist_t pel;
+ nw_ep lep;
+
+ un = minor(dev);
+ if (un >= NATM || atmp[un] == NULL) {
+ return D_NO_SUCH_DEVICE;
+ } else switch ((atm_status) flavor) {
+ case ATM_INITIALIZE:
+ if (status_count != 0) {
+ return D_INVALID_OPERATION;
+ } else {
+ s = splsched();
+ if (nc_device_register(DEVICE(un), NW_CONNECTION_ORIENTED,
+ (char *) atmp[un],
+ &tca100_entry_table) == NW_SUCCESS &&
+ tca100_initialize(DEVICE(un)) == NW_SUCCESS) {
+ atm_initialize(un);
+ rc = D_SUCCESS;
+ } else {
+ atmp[un] = NULL;
+ (void) nc_device_unregister(DEVICE(un), NW_FAILURE);
+ rc = D_INVALID_OPERATION;
+ }
+ splx(s);
+ return rc;
+ }
+ break;
+
+#if PERMANENT_VIRTUAL_CONNECTIONS
+ case ATM_PVC_SET:
+ pvcp = (nw_pvc_t) status;
+ if (status_count != sizeof(nw_pvc_s) || pvcp->pvc.local_ep >= MAX_EP) {
+ rc = D_INVALID_OPERATION;
+ } else if ((pel = nc_peer_allocate()) == NULL) {
+ rc = D_INVALID_OPERATION;
+ } else {
+ lep = pvcp->pvc.local_ep;
+ tct[lep].rx_sar_header = SSM | 1;
+ tct[lep].tx_atm_header = pvcp->tx_vp << ATM_VPVC_SHIFT;
+ tct[lep].tx_sar_header = 1;
+ ect[lep].state = NW_DUPLEX_ACCEPTED;
+ pel->peer = pvcp->pvc;
+ pel->next = NULL;
+ ect[lep].conn = pel;
+ if (pvcp->protocol == NW_LINE) {
+ if (nc_line_update(&pel->peer, lep) == NW_SUCCESS) {
+ ect[lep].protocol = pvcp->protocol;
+ if (nw_free_line_last == 0)
+ nw_free_line_first = lep;
+ else
+ ect[nw_free_line_last].next = lep;
+ ect[lep].previous = nw_free_line_last;
+ ect[lep].next = 0;
+ nw_free_line_last = lep;
+ rc = D_SUCCESS;
+ } else {
+ rc = D_INVALID_OPERATION;
+ }
+ } else {
+ rc = D_SUCCESS;
+ }
+ }
+ return rc;
+#endif
+
+ default:
+ return D_INVALID_OPERATION;
+ }
+}
+
+int atm_mmap(dev_t dev, vm_offset_t off, int prot) {
+ int un;
+ vm_offset_t addr;
+
+ un = minor(dev);
+ if (un >= NATM || atmp[un] == NULL || !atm_mapped[un] ||
+ off >= sizeof(atm_device_s)) {
+ return -1;
+ } else {
+ return mips_btop(K1SEG_TO_PHYS( (vm_offset_t) atmp[un] ) + off );
+ }
+}
+
+io_return_t atm_restart(int u) {
+
+ return D_INVALID_OPERATION;
+}
+
+io_return_t atm_setinput(dev_t dev, ipc_port_t receive_port, int priority,
+ filter_array_t *filter, u_int filter_count) {
+
+ return D_INVALID_OPERATION;
+}
+
+int atm_portdeath(dev_t dev, mach_port_t port) {
+
+ return D_INVALID_OPERATION;
+}
+
+
+#endif NATM > 0
+
+
+
diff --git a/chips/tca100.h b/chips/tca100.h
new file mode 100644
index 00000000..29312259
--- /dev/null
+++ b/chips/tca100.h
@@ -0,0 +1,200 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _TCA100_H_
+#define _TCA100_H_ 1
+
+#ifndef STUB
+#include <chips/nw.h>
+#else
+#include "nw.h"
+#endif
+
+/*** FORE TCA-100 Turbochannel ATM computer interface ***/
+
+/*** HARDWARE REGISTERS ***/
+
+typedef volatile unsigned int vol_u_int;
+
+typedef struct atm_device {
+ unsigned int prom[64 * 1024 / 4];
+ vol_u_int sreg;
+ vol_u_int creg_set;
+ vol_u_int creg_clr;
+ vol_u_int creg;
+ vol_u_int rxtimer;
+ unsigned int pad1;
+ vol_u_int rxtimerv;
+ unsigned int pad2;
+ vol_u_int rxcount;
+ unsigned int pad3;
+ vol_u_int rxthresh;
+ unsigned int pad4;
+ vol_u_int txcount;
+ unsigned int pad5;
+ vol_u_int txthresh;
+ unsigned int pad6[64*1024/4 - 15];
+ vol_u_int rxfifo[14];
+ unsigned int pad7[64*1024/4 - 14];
+ vol_u_int txfifo[14];
+ unsigned int pad8[64*1024/4 - 14];
+} atm_device_s, *atm_device_t;
+
+
+/*** DEFINITION OF BITS IN THE STATUS AND CONTROL REGISTERS ***/
+
+#define RX_COUNT_INTR 0x0001
+#define RX_EOM_INTR 0x0002
+#define RX_TIME_INTR 0x0004
+#define TX_COUNT_INTR 0x0008
+#define RX_CELL_LOST 0x0010
+#define RX_NO_CARRIER 0x0020
+#define CR_RX_ENABLE 0x0040
+#define CR_TX_ENABLE 0x0080
+#define CR_RX_RESET 0x0100
+#define CR_TX_RESET 0x0200
+
+#define RX_COUNTER_MASK 0x03ff
+
+/*** DEFINITION OF FIELDS FOR AAL3/4 WITH THE TCA-100 PADDING ***/
+
+/*Header -- ATM header*/
+
+#define VPI 0x0ff00000
+#define VCI 0x000ffff0
+
+#define ATM_HEADER_RSV_BITS 0x00000004
+
+#define PERMANENT_VIRTUAL_CONNECTIONS 1
+
+#if PERMANENT_VIRTUAL_CONNECTIONS
+#define ATM_VPVC_MASK 0x3ff00000
+#define ATM_VPVC_SHIFT 20
+#else
+#define ATM_VPVC_MASK 0x00003ff0
+#define ATM_VPVC_SHIFT 4
+#endif
+
+
+/*First payload word -- SAR header*/
+
+#define ATM_HEADER_CRC 0xff000000
+#define ATM_HEADER_CRC_SYNDROME 0x00ff0000
+
+#define SEG_TYPE 0x0000c000
+#define BOM 0x00008000
+#define COM 0x00000000
+#define EOM 0x00004000
+#define SSM 0x0000c000
+
+#define BOM_DATA_SIZE 40
+#define COM_DATA_SIZE 44
+#define EOM_DATA_SIZE 40
+#define SSM_DATA_SIZE 36
+
+#define SEQ_NO 0x00003c00
+#define SEQ_INC 0x00000400
+
+#define MID 0x000003ff
+#define MID_INC 0x00000001
+
+#define SAR_HEADER_MASK (ATM_HEADER_CRC_SYNDROME | SEG_TYPE | SEQ_NO | MID)
+
+/*Trailer -- SAR trailer and error flags*/
+
+#define PAYLOAD_LENGTH 0xfc000000
+#define FULL_SEGMENT_TRAILER (44 << 26)
+#define EMPTY_SEGMENT_TRAILER (4 << 26)
+#define SYNCH_SEGMENT_TRAILER (16 << 26)
+
+#define FRAMING_ERROR 0x0001
+#define HEADER_CRC_ERROR 0x0002
+#define PAYLOAD_CRC_ERROR 0x0004
+#define PAD2_ERROR 0x0007
+
+#define SAR_TRAILER_MASK (PAYLOAD_LENGTH | PAD2_ERROR)
+ /*This field should be FULL_SEGMENT_TRAILER IN BOM OR COM*/
+
+
+/*CS header and trailer fields*/
+
+#define CS_PDU_TYPE 0xff000000
+#define BE_TAG 0x00ff0000
+#define BA_SIZE 0x0000ffff
+
+#define CS_PROTOCOL_CONTROL_FIELD 0xff000000
+#define CS_LENGTH 0x0000ffff
+
+/*** DEVICE STATUS ***/
+
+typedef enum { /*"Flavors" for device_get_status and device_set_status*/
+ ATM_MAP_SIZE, /* device_get_status options */
+ ATM_MTU_SIZE,
+ ATM_EVC_ID, /* ID of event counter assigned to device */
+ ATM_ASSIGNMENT, /* Returns two words indicating whether device is mapped
+ and number of tasks with the device open */
+ /* device_set_status options */
+ ATM_INITIALIZE, /* Restarts hardware and low-level driver */
+ ATM_PVC_SET /* Sets up a permanent virtual connection --
+ the status argument array is cast to a nw_pvc_s
+ structure */
+
+} atm_status;
+
+typedef struct {
+ nw_peer_s pvc; /* Permanent virtual connection */
+ u_int tx_vp; /* VPI used for transmissions to permanent virtual
+ connection. The VPI used for reception is the
+ local endpoint number. VCIs are 0 */
+ nw_protocol protocol; /* Protocol of connection (possibly NW_LINE) */
+} nw_pvc_s, *nw_pvc_t;
+
+/*** BYTE ORDER ***/
+
+/*The ATM header and SAR header and trailer are converted to and from
+ host byte order by hardware. CS headers and trailers and
+ signaling messages need byte order conversion in software.
+ Conversion in software is also necessary for application messages
+ if the communicating hosts have different byte orders (e.g. DECstation
+ and SPARCstation). */
+
+#define HTONL(x) \
+ ((x << 24) | ((x & 0xff00) << 8) | ((x >> 8) & 0xff00) | ((u_int) x >> 24))
+
+#define NTOHL(x) HTONL(x)
+
+#if 0
+unsigned int htonl(unsigned int x) {
+
+ return ((x << 24) | ((x & 0xff00) << 8) | ((x >> 8) & 0xff00) | (x >> 24));
+}
+
+#define ntohl(x) htonl(x)
+
+#endif
+
+#endif /* _TCA100_H_ */
+
diff --git a/chips/tca100_if.c b/chips/tca100_if.c
new file mode 100644
index 00000000..5eeec101
--- /dev/null
+++ b/chips/tca100_if.c
@@ -0,0 +1,1377 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*** TCA 100 ATM NETWORK INTERFACE ***/
+
+#ifndef STUB
+#include <chips/tca100_if.h>
+# else
+#include "tca100_if.h"
+#endif
+
+#define SMALL_WINDOW_SIZE (BOM_DATA_SIZE + EOM_DATA_SIZE)
+#define INITIAL_WINDOW_SIZE BOM_DATA_SIZE
+#define CONTINUATION_WINDOW_SIZE (71 * COM_DATA_SIZE)
+#define FINAL_WINDOW_SIZE (70 * COM_DATA_SIZE + EOM_DATA_SIZE)
+#define MAX_LONG_RX 2
+#define MAX_LONG_TX 5
+#define BASE_TIME_OUT 5
+#define DELAYED_TIME_OUT 15
+#define MAX_RETRY 3
+#define POLL_LIMIT 100000
+#define POLL_IDLE_TIME 1
+#define POLL_CELL_TIME 8
+
+#define TCA_SYNCH 0xfc00
+#define TCA_ACK (NW_SUCCESS << 10)
+#define TCA_NAK (NW_FAILURE << 10)
+#define TCA_OVR (NW_OVERRUN << 10)
+#define TCA_SEQ (NW_INCONSISTENCY << 10)
+
+int tca100_verbose = 0;
+
+int tick[MAX_DEV];
+
+nw_control_s nw_tx_control[MAX_DEV][MAX_LONG_TX];
+nw_control_s nw_rx_control[MAX_DEV][MAX_LONG_RX];
+
+int long_tx_count[MAX_DEV], long_rx_count[MAX_DEV];
+
+nw_tx_header_t delayed_tx_first[MAX_DEV], delayed_tx_last[MAX_DEV];
+nw_rx_header_t delayed_rx_first[MAX_DEV], delayed_rx_last[MAX_DEV];
+
+nw_tcb tct[MAX_EP];
+
+u_int MTU[] = {9244, 65528, 65532, 65528};
+u_int MTU_URGENT[] = {32, 28, 32, 28};
+
+nw_dev_entry_s tca100_entry_table = {
+ tca100_initialize, tca100_status, spans_timer_sweep, tca100_timer_sweep,
+ tca100_poll, tca100_send, tca100_rpc, spans_input, spans_open, spans_accept,
+ spans_close, spans_add, spans_drop};
+
+typedef enum {
+ ATM_HEADER,
+ SAR_HEADER,
+ SAR_TRAILER,
+ CS_HEADER,
+ CS_TRAILER,
+ FRAME_ERROR,
+ DELIVERY_ERROR,
+ SYNCH_ERROR,
+ SEQ_ERROR,
+ OVERRUN_ERROR,
+ RX_RETRANSMISSION,
+ TX_RETRANSMISSION
+} tca_error;
+
+int tca_ec[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+nw_result tca100_initialize(int dev) {
+ nw_result rc;
+ int i;
+
+ rc = spans_initialize(dev);
+ if (rc = NW_SUCCESS) {
+ tick[dev] = 0;
+ for (i = 0; i < MAX_LONG_TX; i++)
+ nw_tx_control[dev][i].ep = 0;
+ long_tx_count[dev] = 0;
+ delayed_tx_first[dev] = delayed_tx_last[dev] = NULL;
+ for (i = 0; i < MAX_LONG_RX; i++)
+ nw_rx_control[dev][i].ep = 0;
+ long_rx_count[dev] = 0;
+ delayed_rx_first[dev] = delayed_rx_last[dev] = NULL;
+ for (i = 0; i < MAX_EP; i++) {
+ tct[i].rx_sar_header = 0;
+ tct[i].rx_control = NULL;
+ tct[i].tx_queued_count = 0;
+ tct[i].tx_control = NULL;
+ }
+ rc = NW_SUCCESS;
+ }
+ return rc;
+}
+
+nw_result tca100_status(int dev) {
+ nw_result rc;
+ atm_device_t atmp;
+ u_int status;
+
+ atmp = (atm_device_t) devct[dev].addr;
+ status = atmp->sreg;
+ if (status & RX_NO_CARRIER) {
+ atmp->creg_set = CR_RX_RESET;
+ atmp->creg_clr = ~CR_RX_RESET;
+ atmp->creg_set = CR_RX_ENABLE;
+ atmp->sreg = 0;
+ rc = NW_NO_CARRIER;
+ } else if (status & RX_CELL_LOST) {
+ atmp->sreg = RX_COUNT_INTR;
+ rc = NW_OVERRUN;
+ } else {
+ rc = NW_SUCCESS;
+ }
+ return rc;
+}
+
+
+void tca100_synch_send(int dev, nw_tcb_t tcb, u_int reply) {
+ vol_u_int *tx_fifo = &((atm_device_t) devct[dev].addr)->txfifo[0];
+
+#ifdef TRACING
+ printf("Synch sent %x\n", reply);
+#endif
+
+ tx_fifo[0] = tcb->tx_atm_header;
+ tx_fifo[1] = SSM;
+ tx_fifo[2] = HTONL(8);
+ tx_fifo[3] = HTONL(NW_SYNCHRONIZATION);
+ tx_fifo[4] = htonl(reply);
+ tx_fifo[5] = HTONL(8);
+ tx_fifo[6] = 0;
+ tx_fifo[7] = 0;
+ tx_fifo[8] = 0;
+ tx_fifo[9] = 0;
+ tx_fifo[10] = 0;
+ tx_fifo[11] = 0;
+ tx_fifo[12] = 0;
+ tx_fifo[13] = SYNCH_SEGMENT_TRAILER;
+}
+
+#define broken_cell_mend(length) { \
+ missing = length; \
+ while (missing != 0) { \
+ if (missing > block_count) { \
+ limit = 0; \
+ missing -= block_count; \
+ } else { \
+ limit = block_count - missing; \
+ missing = 0; \
+ } \
+ while (block_count > limit) { \
+ t1 = block[0]; \
+ block++; \
+ tx_fifo[1] = t1; \
+ block_count -= 4; \
+ } \
+ if (block_count == 0) { \
+ ecb->tx_current = tx_header = ecb->tx_current->next; \
+ if (tx_header != NULL) { \
+ block_count = tx_header->block_length; \
+ block = (vol_u_int *) tx_header->block; \
+ } \
+ } \
+ } \
+}
+
+
+nw_result tca100_window_send(int dev, nw_ecb_t ecb, nw_tcb_t tcb,
+ boolean_t initial) {
+ nw_result rc;
+ register vol_u_int *tx_fifo = &((atm_device_t) devct[dev].addr)->txfifo[0];
+ register vol_u_int *block;
+ register u_int block_count, msg_count;
+ register int com_count;
+ int eom_count;
+ register u_int atm_header, sar_header, sar_trailer;
+ u_int cs_header, end_count;
+ int limit, missing;
+ register u_int t1, t2;
+ nw_tx_header_t tx_header;
+ nw_options options;
+
+ atm_header = tcb->tx_atm_header;
+ if (initial) {
+ sar_header = tcb->tx_sar_header & MID;
+ tx_header = ecb->tx_initial;
+ block = (vol_u_int *) tx_header->block;
+ block_count = tx_header->block_length;
+ options = tx_header->options;
+ msg_count = tx_header->msg_length;
+ if (ecb->protocol == NW_LINE)
+ msg_count += 4;
+ if (options == NW_URGENT)
+ msg_count += 4;
+ cs_header = ecb->protocol | (sar_header & 0xff) << 8 |
+ (msg_count & 0xff00) << 8 | msg_count << 24;
+ tcb->tx_cs_header = cs_header;
+
+ if (msg_count <= SSM_DATA_SIZE) { /*Single segment message*/
+ tx_fifo[0] = atm_header;
+ sar_trailer = (msg_count + 8) << 26;
+ tx_fifo[1] = SSM | sar_header; /*Sequence number 0 is implicit*/
+ end_count = msg_count + 4;
+ tx_fifo[1] = cs_header;
+ if (options == NW_URGENT) {
+ tx_fifo[1] = HTONL(NW_URGENT);
+ msg_count -= 4;
+ }
+ if (ecb->protocol == NW_LINE) {
+ tx_fifo[1] = tx_header->peer.local_ep >> 8 |
+ (tx_header->peer.local_ep & 0x00ff) << 8 |
+ (tx_header->peer.remote_ep & 0xff00) << 8 |
+ tx_header->peer.remote_ep << 24;
+ msg_count -= 4;
+ }
+ if (ecb->protocol == NW_SEQ_PACKET) {
+ tcb->tx_synch = 0;
+ } else {
+ tcb->tx_synch = -1;
+ }
+ goto EOM_payload;
+
+ } else { /*Beginning of message*/
+ tx_fifo[0] = atm_header;
+ sar_trailer = FULL_SEGMENT_TRAILER;
+ tx_fifo[1] = BOM | sar_header; /*Sequence number 0 is implicit*/
+ tx_fifo[2] = cs_header;
+ if (block_count < BOM_DATA_SIZE) {
+ if (ecb->protocol == NW_LINE) {
+ t1 = tx_header->peer.local_ep >> 8 |
+ (tx_header->peer.local_ep & 0x00ff) << 8 |
+ (tx_header->peer.remote_ep & 0xff00) << 8 |
+ tx_header->peer.remote_ep << 24;
+ missing = BOM_DATA_SIZE - 4;
+ tx_fifo[3] = t1;
+ } else {
+ missing = BOM_DATA_SIZE;
+ }
+ broken_cell_mend(missing);
+ } else {
+ if (ecb->protocol == NW_LINE) {
+ t1 = tx_header->peer.local_ep >> 8 |
+ (tx_header->peer.local_ep & 0x00ff) << 8 |
+ (tx_header->peer.remote_ep & 0xff00) << 8 |
+ tx_header->peer.remote_ep << 24;
+ } else {
+ t1 = block[0];
+ block_count -= 4;
+ block++;
+ }
+ t2 = block[0];
+ tx_fifo[3] = t1;
+ tx_fifo[4] = t2;
+ t1 = block[1];
+ t2 = block[2];
+ tx_fifo[5] = t1;
+ tx_fifo[6] = t2;
+ t1 = block[3];
+ t2 = block[4];
+ tx_fifo[7] = t1;
+ tx_fifo[8] = t2;
+ t1 = block[5];
+ t2 = block[6];
+ tx_fifo[9] = t1;
+ tx_fifo[10] = t2;
+ t1 = block[7];
+ t2 = block[8];
+ tx_fifo[11] = t1;
+ tx_fifo[12] = t2;
+ block_count -= (BOM_DATA_SIZE - 4);
+ block += 9;
+ }
+ if (ecb->protocol == NW_RAW) {
+ msg_count -= BOM_DATA_SIZE;
+ com_count = msg_count / COM_DATA_SIZE;
+ msg_count = msg_count % COM_DATA_SIZE;
+ eom_count = 1;
+ tcb->tx_synch = -1;
+ } else if (msg_count > SMALL_WINDOW_SIZE) {
+ com_count = eom_count = 0;
+ tcb->tx_synch = msg_count;
+ msg_count -= BOM_DATA_SIZE;
+ } else {
+ com_count = 0;
+ eom_count = 1;
+ if (ecb->protocol == NW_SEQ_PACKET) {
+ tcb->tx_synch = 0;
+ } else {
+ tcb->tx_synch = -1;
+ }
+ msg_count -= BOM_DATA_SIZE;
+ }
+ tx_fifo[13] = sar_trailer;
+ sar_header += SEQ_INC;
+ }
+
+ } else {
+ sar_header = tcb->tx_sar_header;
+ sar_trailer = FULL_SEGMENT_TRAILER;
+ block = (vol_u_int *) tcb->tx_p;
+ block_count = tcb->tx_block_count;
+ msg_count = tcb->tx_msg_count;
+ if (msg_count > FINAL_WINDOW_SIZE) {
+ com_count = (CONTINUATION_WINDOW_SIZE / COM_DATA_SIZE);
+ eom_count = 0;
+ tcb->tx_synch = msg_count;
+ msg_count -= CONTINUATION_WINDOW_SIZE;
+ } else {
+ com_count = msg_count / COM_DATA_SIZE;
+ msg_count = msg_count % COM_DATA_SIZE;
+ eom_count = 1;
+ if (ecb->protocol == NW_SEQ_PACKET) {
+ tcb->tx_synch = 0;
+ } else {
+ tcb->tx_synch = -1;
+ }
+ }
+ }
+
+ while (com_count-- > 0) { /*Continuation of message*/
+ tx_fifo[0] = atm_header;
+ tx_fifo[1] = sar_header; /*COM is 0 and is implicit*/
+ if (block_count >= COM_DATA_SIZE) {
+ t1 = block[0];
+ t2 = block[1];
+ tx_fifo[2] = t1;
+ tx_fifo[3] = t2;
+ t1 = block[2];
+ t2 = block[3];
+ tx_fifo[4] = t1;
+ tx_fifo[5] = t2;
+ t1 = block[4];
+ t2 = block[5];
+ tx_fifo[6] = t1;
+ tx_fifo[7] = t2;
+ t1 = block[6];
+ t2 = block[7];
+ tx_fifo[8] = t1;
+ tx_fifo[9] = t2;
+ t1 = block[8];
+ t2 = block[9];
+ tx_fifo[10] = t1;
+ tx_fifo[11] = t2;
+ t1 = block[10];
+ block_count -= COM_DATA_SIZE;
+ tx_fifo[12] = t1;
+ tx_fifo[13] = sar_trailer;
+ block += 11;
+ sar_header = (sar_header + SEQ_INC) & (SEQ_NO | MID);
+ } else {
+ broken_cell_mend(COM_DATA_SIZE);
+ tx_fifo[13] = sar_trailer;
+ sar_header = (sar_header + SEQ_INC) & (SEQ_NO | MID);
+ }
+ }
+
+ if (eom_count != 0) { /*End of message*/
+ tx_fifo[0] = atm_header;
+ tx_fifo[1] = EOM | sar_header;
+ end_count = msg_count;
+ sar_trailer = (msg_count + 4) << 26;
+
+ EOM_payload:
+ if (block_count >= msg_count) {
+ if (msg_count & 0x4) {
+ t1 = block[0];
+ tx_fifo[1] = t1;
+ }
+ block = (vol_u_int *) ((char *) block + msg_count);
+ switch (msg_count >> 3) {
+ case 5:
+ t1 = block[-10];
+ t2 = block[-9];
+ tx_fifo[1] = t1;
+ tx_fifo[1] = t2;
+ case 4:
+ t1 = block[-8];
+ t2 = block[-7];
+ tx_fifo[1] = t1;
+ tx_fifo[1] = t2;
+ case 3:
+ t1 = block[-6];
+ t2 = block[-5];
+ tx_fifo[1] = t1;
+ tx_fifo[1] = t2;
+ case 2:
+ t1 = block[-4];
+ t2 = block[-3];
+ tx_fifo[1] = t1;
+ tx_fifo[1] = t2;
+ case 1:
+ t1 = block[-2];
+ t2 = block[-1];
+ tx_fifo[1] = t1;
+ tx_fifo[1] = t2;
+ }
+ msg_count = 0;
+ } else {
+ broken_cell_mend(msg_count);
+ msg_count = 0;
+ }
+
+ EOM_cs_trailer:
+ tx_fifo[1] = tcb->tx_cs_header;
+ switch (end_count) {
+ case 0: tx_fifo[1] = 0;
+ case 4: tx_fifo[1] = 0;
+ case 8: tx_fifo[1] = 0;
+ case 12: tx_fifo[1] = 0;
+ case 16: tx_fifo[1] = 0;
+ case 20: tx_fifo[1] = 0;
+ case 24: tx_fifo[1] = 0;
+ case 28: tx_fifo[1] = 0;
+ case 32: tx_fifo[1] = 0;
+ case 36: tx_fifo[1] = 0;
+ }
+ tx_fifo[13] = sar_trailer;
+ }
+
+ if (tcb->tx_synch == -1) {
+
+#ifdef TRACING
+ printf("Final window sent\n");
+#endif
+
+ sar_header = (sar_header + MID_INC) & MID;
+ if (sar_header == 0)
+ sar_header = 1;
+ tcb->tx_sar_header = sar_header;
+ rc = NW_SUCCESS;
+ } else {
+
+#ifdef TRACING
+ printf("Window synch at %x\n", msg_count);
+#endif
+
+ tcb->tx_sar_header = sar_header;
+ tcb->tx_p = (u_int *) block;
+ tcb->tx_block_count = block_count;
+ tcb->tx_msg_count = msg_count;
+ rc = NW_SYNCH;
+ }
+ return rc;
+}
+
+nw_result tca100_send(nw_ep ep, nw_tx_header_t header, nw_options options) {
+ nw_result rc;
+ int i, dev;
+ nw_ecb_t ecb;
+ nw_tcb_t tcb;
+ nw_control_t control;
+ nw_tx_header_t tx_header, tx_previous;
+
+ dev = NW_DEVICE(header->peer.rem_addr_1);
+ ecb = &ect[ep];
+ tcb = &tct[ep];
+ if ((options == NW_URGENT && header->msg_length >
+ MTU_URGENT[ecb->protocol]) || header->msg_length > MTU[ecb->protocol]) {
+ rc = NW_BAD_LENGTH;
+ } else if (tcb->tx_queued_count != 0 ||
+ (ecb->protocol != NW_RAW &&
+ long_tx_count[dev] >= MAX_LONG_TX &&
+ (header->msg_length > SMALL_WINDOW_SIZE ||
+ ecb->protocol == NW_SEQ_PACKET))) {
+ if (options == NW_URGENT && tcb->tx_queued_count != 0) {
+ tx_header = delayed_tx_first[dev];
+ tx_previous = NULL;
+ while (tx_header != NULL && tx_header->sender != ep) {
+ tx_previous = tx_header;
+ tx_header = tx_header->next;
+ }
+ if (tx_previous == NULL)
+ delayed_tx_first[dev] = header;
+ else
+ tx_previous->next = header;
+ while (header->next != NULL)
+ header = header->next;
+ header->next = tx_header;
+ } else {
+ if (delayed_tx_first[dev] == NULL)
+ delayed_tx_first[dev] = header;
+ else
+ delayed_tx_last[dev]->next = header;
+ delayed_tx_last[dev] = header;
+ }
+ tcb->tx_queued_count++;
+ rc = NW_QUEUED;
+
+#ifdef TRACING
+ printf("Send enqueued ep %d\n", ep);
+#endif
+
+ } else {
+
+
+#ifdef TRACING
+ printf("Send ep %d\n", ep);
+#endif
+
+ ecb->tx_initial = ecb->tx_current = header;
+ rc = tca100_window_send(dev, ecb, tcb, TRUE);
+ if (rc == NW_SUCCESS) {
+ while (header != NULL) {
+ if (header->buffer != NULL)
+ nc_buffer_deallocate(ep, header->buffer);
+ header = header->next;
+ }
+ nc_tx_header_deallocate(ecb->tx_initial);
+ ecb->tx_initial = ecb->tx_current = NULL;
+ } else {
+ control = &nw_tx_control[dev][0];
+ while (control->ep != 0)
+ control++;
+ control->ep = ep;
+ control->time_out = tick[dev] + BASE_TIME_OUT;
+ control->retry = 0;
+ tcb->reply = TCA_SYNCH;
+ tcb->tx_control = control;
+ tcb->tx_queued_count++;
+ if (long_tx_count[dev] + long_rx_count[dev] == 0)
+ nc_fast_timer_set(dev);
+ long_tx_count[dev]++;
+ }
+ }
+ return rc;
+}
+
+
+nw_result tx_slot_free(int dev, nw_control_t control) {
+ nw_result rc;
+ nw_tcb_t tcb;
+ nw_ecb_t ecb;
+ nw_tx_header_t tx_header;
+ nw_ep ep;
+
+ tcb = &tct[control->ep];
+ tcb->tx_control = NULL;
+ tcb->tx_queued_count--;
+ do {
+ tx_header = delayed_tx_first[dev];
+ if (tx_header == NULL) {
+ control->ep = 0;
+ long_tx_count[dev]--;
+ rc = NW_FAILURE;
+ } else {
+ ep = tx_header->sender;
+
+#ifdef TRACING
+ printf("Send dequeued ep %d\n", ep);
+#endif
+
+ ecb = &ect[ep];
+ tcb = &tct[ep];
+ ecb->tx_initial = ecb->tx_current = tx_header;
+ while (tx_header->next != NULL &&
+ tx_header->next->msg_length == 0) {
+ tx_header = tx_header->next;
+ }
+ delayed_tx_first[dev] = tx_header->next;
+ if (tx_header->next == NULL)
+ delayed_tx_last[dev] = NULL;
+ tx_header->next = NULL;
+ rc = tca100_window_send(dev, ecb, tcb, TRUE);
+ if (rc == NW_SYNCH) {
+ control->ep = ep;
+ tcb->tx_control = control;
+ tcb->reply = TCA_SYNCH;
+ control->time_out = tick[dev] + BASE_TIME_OUT;
+ control->retry = 0;
+ }
+ }
+ } while (rc == NW_SUCCESS);
+ return rc;
+}
+
+nw_result rx_slot_free(int dev, nw_control_t control) {
+ nw_result rc;
+ nw_rx_header_t rx_header;
+ nw_ep ep;
+ nw_tcb_t tcb;
+
+ if (control == NULL) {
+ rc = NW_SUCCESS;
+ } else {
+ tct[control->ep].rx_control = NULL;
+ while ((rx_header = delayed_rx_first[dev]) != NULL &&
+ tick[dev] >= rx_header->time_stamp) {
+ delayed_rx_first[dev] = rx_header->next;
+ nc_buffer_deallocate(rx_header->buffer->peer.local_ep,
+ rx_header->buffer);
+ ep = rx_header->receiver;
+ tcb = &tct[ep];
+ tcb->rx_sar_header = SSM | (tcb->rx_sar_header & MID);
+ nc_rx_header_deallocate(rx_header);
+ }
+ if (rx_header == NULL) {
+ delayed_rx_last[dev] = NULL;
+ control->ep = 0;
+ long_rx_count[dev]--;
+ rc = NW_FAILURE;
+ } else {
+ delayed_rx_first[dev] = rx_header->next;
+ if (rx_header->next == NULL)
+ delayed_rx_last[dev] = NULL;
+ ep = rx_header->receiver;
+ tcb = &tct[ep];
+ tca100_synch_send(dev, tcb, rx_header->reply);
+ control->ep = ep;
+ control->time_out = tick[dev] + BASE_TIME_OUT;
+ tcb->rx_control = control;
+ nc_rx_header_deallocate(rx_header);
+ }
+ }
+}
+
+
+int tca100_poll(int dev) {
+ vol_u_int *status = &((atm_device_t) devct[dev].addr)->sreg;
+ vol_u_int *ctl_set = &((atm_device_t) devct[dev].addr)->creg_set;
+ vol_u_int *rx_counter = &((atm_device_t) devct[dev].addr)->rxcount;
+ register vol_u_int *rx_fifo = &((atm_device_t) devct[dev].addr)->rxfifo[0];
+ register u_int rx_cell_count;
+ register u_int predicted_atm_header = 0;
+ register u_int predicted_sar_header;
+ u_int atm_header, sar_header, predicted_sar_trailer,
+ cs_header, end_count, cs_pad, rx_cell_total, reply,
+ block_length, initial_offset;
+ register vol_u_int *msg;
+ register int msg_count;
+ register int next_synch;
+ register u_int t1, t2;
+ nw_ecb_t ecb, tx_ecb;
+ nw_tcb_t new_tcb, tx_tcb;
+ nw_tcb dummy_tcb_s;
+ nw_tcb_t tcb = &dummy_tcb_s;
+ nw_control_t control;
+ nw_buffer_t buffer;
+ nw_protocol protocol;
+ nw_ep lep, rep;
+ nw_delivery delivery_type = NW_RECEIVE;
+ nw_rx_header_t rx_header;
+ nw_tx_header_t tx_header;
+ int i;
+ u_int tx_seqno, rx_seqno, tx_count, rx_count;
+
+ rx_cell_total = 0;
+ while ((rx_cell_count = *rx_counter & RX_COUNTER_MASK) != 0) {
+ rx_cell_total += rx_cell_count;
+ while (rx_cell_count-- > 0) {
+ atm_header = rx_fifo[0]; /*Check ATM header and SAR header*/
+ sar_header = (rx_fifo[1] & SAR_HEADER_MASK);
+ if (atm_header != predicted_atm_header) {
+ /*Must be cell from a different connection*/
+ if (atm_header & ~(ATM_VPVC_MASK | ATM_HEADER_RSV_BITS)) {
+ atm_header_error:
+ tca_ec[ATM_HEADER]++;
+ if (tca100_verbose)
+ printf("ATM header error %x\n", atm_header);
+ discard_cell:
+ *((char *) rx_fifo) = 0;
+ delivery_type = NW_RECEIVE;
+ continue;
+ } else {
+ t1 = (atm_header & ATM_VPVC_MASK) >> ATM_VPVC_SHIFT;
+ new_tcb = &tct[t1];
+ ecb = &ect[t1];
+
+ /*Switch cached connection*/
+ if (new_tcb->rx_sar_header == 0)
+ goto atm_header_error;
+ tcb->rx_sar_header = predicted_sar_header;
+ tcb->rx_p = (u_int *) msg;
+ tcb->rx_count = msg_count;
+ tcb->rx_next_synch = next_synch;
+ predicted_atm_header = atm_header;
+ tcb = new_tcb;
+ predicted_sar_header = tcb->rx_sar_header;
+ msg = tcb->rx_p;
+ msg_count = tcb->rx_count;
+ next_synch = tcb->rx_next_synch;
+ }
+ }
+
+ if (sar_header != predicted_sar_header) {
+ if ((sar_header ^ predicted_sar_header) == EOM &&
+ ((predicted_sar_header & BOM) || msg_count <= EOM_DATA_SIZE)) {
+ /*Difference on end of message bit only*/
+ predicted_sar_header = sar_header;
+ } else if (sar_header == SSM) { /*MID 0*/
+ cs_header = rx_fifo[2];
+ t1 = rx_fifo[3];
+ if (cs_header == HTONL(8) && t1 == HTONL(NW_SYNCHRONIZATION)) {
+ reply = rx_fifo[4]; /*Synch cell*/
+ if (rx_fifo[5] != cs_header)
+ goto cs_header_error;
+ cs_pad = rx_fifo[6];
+ t1 = rx_fifo[7];
+ t2 = rx_fifo[8];
+ cs_pad |= t1;
+ cs_pad |= t2;
+ t1 = rx_fifo[9];
+ t2 = rx_fifo[10];
+ cs_pad |= t1;
+ cs_pad |= t2;
+ t1 = rx_fifo[11];
+ t2 = rx_fifo[12];
+ cs_pad |= t1;
+ cs_pad |= t2;
+ t1 = rx_fifo[13];
+ if (cs_pad)
+ goto cs_trailer_error;
+ if ((t1 & SAR_TRAILER_MASK) != SYNCH_SEGMENT_TRAILER)
+ goto sar_trailer_error;
+ if (tcb->tx_control == NULL) {
+ tca_ec[SYNCH_ERROR]++;
+ if (tca100_verbose)
+ printf("Synch error %x\n", ntohl(reply));
+ } else {
+ tcb->reply = ntohl(reply);
+
+#ifdef TRACING
+ printf("Received synch ep %d %x\n", ecb->id, tcb->reply);
+#endif
+
+ }
+ continue;
+ } else if (t1 == HTONL(NW_URGENT)) { /*Urgent cell*/
+ delivery_type = NW_RECEIVE_URGENT;
+ goto cs_header_check;
+ } else { /*Bad segment*/
+ goto sar_header_error;
+ }
+ } else if (!(sar_header & ATM_HEADER_CRC_SYNDROME) &&
+ (sar_header & BOM) && (sar_header & SEQ_NO) == 0) {
+ if ((sar_header & MID) == (predicted_sar_header & MID)) {
+ /*Retransmission*/
+ if (tcb->rx_control != NULL) {
+ tcb->rx_control->ep = 0;
+ long_rx_count[dev]--;
+ }
+ nc_buffer_deallocate(tcb->rx_buffer->peer.local_ep,
+ tcb->rx_buffer);
+ predicted_sar_header = sar_header;
+ tca_ec[RX_RETRANSMISSION]++;
+ if (tca100_verbose)
+ printf("Receiving retransmission ep %d sar %x\n",
+ ecb->id, sar_header);
+ } else if (predicted_sar_header & BOM) {
+ /*Sequence number error*/
+ if (tca100_verbose)
+ printf("Sequence error ep %d pred %x real %x\n", ecb->id,
+ predicted_sar_header, sar_header);
+ if (ecb->protocol == NW_SEQ_PACKET) {
+ reply = 0xffff0000 | TCA_SEQ | (predicted_sar_header & MID);
+ tca100_synch_send(dev, tcb, reply);
+ tca_ec[SEQ_ERROR]++;
+ goto discard_cell;
+ } else {
+ predicted_sar_header = sar_header;
+ }
+ } else {
+ goto sar_header_error; /*Badly out of synch*/
+ }
+ } else { /*Cell loss*/
+
+ sar_header_error:
+ if (!(predicted_sar_header & BOM)) {
+ rx_slot_free(dev, tcb->rx_control);
+ nc_buffer_deallocate(tcb->rx_buffer->peer.local_ep,
+ tcb->rx_buffer);
+ predicted_sar_header = SSM | (predicted_sar_header & MID);
+ }
+ tca_ec[SAR_HEADER]++;
+ if (tca100_verbose)
+ printf("SAR header error ep %d pred %x real %x\n", ecb->id,
+ predicted_sar_header, sar_header);
+ goto discard_cell;
+ }
+ }
+
+ if ((predicted_sar_header & SEG_TYPE) == COM) {
+ /*Continuation of message*/
+ if (msg_count <= next_synch) {
+ if (msg_count == next_synch &&
+ msg_count >= CONTINUATION_WINDOW_SIZE) {
+ reply = (msg_count << 16) | TCA_ACK | (predicted_sar_header & MID);
+ tca100_synch_send(dev, tcb, reply);
+ if (msg_count > (CONTINUATION_WINDOW_SIZE + FINAL_WINDOW_SIZE)) {
+ next_synch = msg_count - CONTINUATION_WINDOW_SIZE;
+ } else if (ecb->protocol == NW_SEQ_PACKET) {
+ next_synch = 0;
+ } else {
+ next_synch = -1;
+ }
+ tcb->rx_control->time_out = tick[dev] + BASE_TIME_OUT;
+ } else {
+ rx_slot_free(dev, tcb->rx_control);
+ nc_buffer_deallocate(tcb->rx_buffer->peer.local_ep,
+ tcb->rx_buffer);
+ predicted_sar_header = SSM | (predicted_sar_header & MID);
+ tca_ec[FRAME_ERROR]++;
+ if (tca100_verbose)
+ printf("Frame error ep %d\n", ecb->id);
+ goto discard_cell;
+ }
+ }
+ t1 = rx_fifo[2];
+ t2 = rx_fifo[3];
+ msg[0] = t1;
+ msg[1] = t2;
+ t1 = rx_fifo[4];
+ t2 = rx_fifo[5];
+ msg[2] = t1;
+ msg[3] = t2;
+ t1 = rx_fifo[6];
+ t2 = rx_fifo[7];
+ msg[4] = t1;
+ msg[5] = t2;
+ t1 = rx_fifo[8];
+ t2 = rx_fifo[9];
+ msg[6] = t1;
+ msg[7] = t2;
+ t1 = rx_fifo[10];
+ t2 = rx_fifo[11];
+ msg[8] = t1;
+ msg[9] = t2;
+ t1 = rx_fifo[12];
+ t2 = rx_fifo[13];
+ msg[10] = t1;
+ if ((t2 & SAR_TRAILER_MASK) != FULL_SEGMENT_TRAILER) {
+ t1 = t2;
+ goto sar_trailer_error;
+ }
+ predicted_sar_header = (predicted_sar_header + SEQ_INC) &
+ (SEQ_NO | MID);
+ msg_count -= COM_DATA_SIZE;
+ msg += 11;
+
+ } else if ((predicted_sar_header & BOM) != 0) {
+ cs_header = rx_fifo[2];
+
+ cs_header_check:
+ block_length = msg_count = (((cs_header >> 8) & 0xff00) |
+ (cs_header >> 24));
+ protocol = cs_header & 0x00ff;
+ if (protocol == NW_RAW || protocol == NW_SEQ_PACKET) {
+ lep = ecb->conn->peer.local_ep;
+ rep = ecb->conn->peer.remote_ep;
+ if (delivery_type == NW_RECEIVE)
+ initial_offset = 0;
+ else
+ initial_offset = 4;
+ } else {
+ t1 = rx_fifo[3];
+ block_length -= 4;
+ lep = (t1 >> 8) & 0xff00 | t1 >> 24;
+ rep = (t1 & 0xff00) >> 8 | (t1 & 0x00ff) << 8;
+ if (delivery_type == NW_RECEIVE)
+ initial_offset = 4;
+ else
+ initial_offset = 8;
+ }
+ if (protocol != ecb->protocol || (protocol == NW_DATAGRAM) ||
+ (protocol == NW_LINE && ect[lep].protocol != NW_DATAGRAM) ||
+ ((predicted_sar_header & 0x00ff) << 8) != (cs_header & 0xff00) ||
+ ((delivery_type != NW_RECEIVE) &&
+ msg_count - initial_offset > MTU_URGENT[protocol]) ||
+ msg_count > MTU[protocol] || (msg_count & 0x3)) {
+
+ cs_header_error:
+ if ((protocol != NW_RAW && msg_count > SMALL_WINDOW_SIZE) ||
+ protocol == NW_SEQ_PACKET) {
+ reply = 0xffff0000 | TCA_NAK | (predicted_sar_header & MID);
+ tca100_synch_send(dev, tcb, reply);
+ }
+ tca_ec[CS_HEADER]++;
+ if (tca100_verbose)
+ printf("CS header error ep %d sar %x cs %x\n", ecb->id,
+ predicted_sar_header, cs_header);
+ goto discard_cell;
+ }
+ buffer = nc_buffer_allocate(lep, sizeof(nw_buffer_s) + block_length);
+ if (buffer == NULL) {
+ if ((protocol != NW_RAW && msg_count > SMALL_WINDOW_SIZE) ||
+ protocol == NW_SEQ_PACKET) {
+ reply = 0xffff0000 | TCA_OVR | (predicted_sar_header & MID);
+ tca100_synch_send(dev, tcb, reply);
+ }
+ tca_ec[OVERRUN_ERROR]++;
+ if (tca100_verbose)
+ printf("Overrun error ep %d\n", ecb->id);
+ goto discard_cell;
+ }
+ if (protocol == NW_RAW) {
+ next_synch = -1;
+ } else if (msg_count > SMALL_WINDOW_SIZE) {
+ reply = (msg_count << 16) | TCA_ACK | (predicted_sar_header & MID);
+ if (long_rx_count[dev] >= MAX_LONG_RX) {
+ rx_header = nc_rx_header_allocate();
+ if (rx_header == NULL) {
+ nc_buffer_deallocate(lep, buffer);
+ tca_ec[OVERRUN_ERROR]++;
+ goto discard_cell;
+ }
+ rx_header->buffer = buffer;
+ rx_header->receiver = ecb->id;
+ rx_header->reply = reply;
+ rx_header->time_stamp = tick[dev] + DELAYED_TIME_OUT;
+ rx_header->next = NULL;
+ if (delayed_rx_last[dev] == NULL)
+ delayed_rx_first[dev] = rx_header;
+ else
+ delayed_rx_last[dev]->next = rx_header;
+ delayed_rx_last[dev] = rx_header;
+ } else {
+ tca100_synch_send(dev, tcb, reply);
+ control = &nw_rx_control[dev][0];
+ while (control->ep != 0)
+ control++;
+ control->ep = ecb->id;
+ control->time_out = tick[dev] + BASE_TIME_OUT;
+ tcb->rx_control = control;
+ if (long_rx_count[dev] + long_tx_count[dev] == 0)
+ nc_fast_timer_set(dev);
+ long_rx_count[dev]++;
+ }
+ if (msg_count > INITIAL_WINDOW_SIZE + FINAL_WINDOW_SIZE)
+ next_synch = msg_count - INITIAL_WINDOW_SIZE;
+ else if (protocol == NW_SEQ_PACKET)
+ next_synch = 0;
+ else
+ next_synch = -1;
+ } else if (protocol == NW_SEQ_PACKET) {
+ next_synch = 0;
+ } else {
+ next_synch = -1;
+ }
+ msg = (vol_u_int *) ((char *) buffer + sizeof(nw_buffer_s));
+ tcb->rx_cs_header = cs_header;
+ tcb->rx_buffer = buffer;
+ buffer->buf_next = NULL;
+ buffer->msg_seqno = sar_header & MID;
+ buffer->block_offset = sizeof(nw_buffer_s);
+ buffer->block_length = block_length;
+ buffer->peer.rem_addr_1 = ecb->conn->peer.rem_addr_1;
+ buffer->peer.rem_addr_2 = ecb->conn->peer.rem_addr_2;
+ buffer->peer.local_ep = lep;
+ buffer->peer.remote_ep = rep;
+
+ if ((predicted_sar_header & EOM) == 0) { /*BOM*/
+ if (initial_offset == 0) {
+ t1 = rx_fifo[3];
+ t2 = rx_fifo[4];
+ msg[0] = t1;
+ msg[1] = t2;
+ msg += 2;
+ } else {
+ msg[0] = rx_fifo[4];
+ msg++;
+ }
+ t1 = rx_fifo[5];
+ t2 = rx_fifo[6];
+ msg[0] = t1;
+ msg[1] = t2;
+ t1 = rx_fifo[7];
+ t2 = rx_fifo[8];
+ msg[2] = t1;
+ msg[3] = t2;
+ t1 = rx_fifo[9];
+ t2 = rx_fifo[10];
+ msg[4] = t1;
+ msg[5] = t2;
+ t1 = rx_fifo[11];
+ t2 = rx_fifo[12];
+ msg[6] = t1;
+ t1 = rx_fifo[13];
+ msg[7] = t2;
+ if ((t1 & SAR_TRAILER_MASK) != FULL_SEGMENT_TRAILER)
+ goto sar_trailer_error;
+ msg_count -= BOM_DATA_SIZE;
+ msg += 8;
+ predicted_sar_header = (predicted_sar_header + SEQ_INC) &
+ (SEQ_NO | MID);
+
+ } else { /*SSM*/
+ end_count = msg_count + 4;
+ predicted_sar_trailer = (msg_count + 8) << 26;
+ if (delivery_type != NW_RECEIVE) {
+ msg[0] = NW_URGENT;
+ msg++;
+ }
+ msg_count -= initial_offset;
+ goto EOM_payload;
+ }
+ } else { /*EOM*/
+ end_count = msg_count;
+ predicted_sar_trailer = (msg_count + 4) << 26;
+
+ EOM_payload:
+ if (msg_count & 0x4) {
+ msg[0] = rx_fifo[2];
+ }
+ msg = (vol_u_int *) ((char *) msg + msg_count);
+ /*Fall-through the cases is intentional*/
+ switch (msg_count >> 3) {
+ case 5:
+ t1 = rx_fifo[2];
+ t2 = rx_fifo[2];
+ msg[-10] = t1;
+ msg[-9] = t2;
+ case 4:
+ t1 = rx_fifo[2];
+ t2 = rx_fifo[2];
+ msg[-8] = t1;
+ msg[-7] = t2;
+ case 3:
+ t1 = rx_fifo[2];
+ t2 = rx_fifo[2];
+ msg[-6] = t1;
+ msg[-5] = t2;
+ case 2:
+ t1 = rx_fifo[2];
+ t2 = rx_fifo[2];
+ msg[-4] = t1;
+ msg[-3] = t2;
+ case 1:
+ t1 = rx_fifo[2];
+ t2 = rx_fifo[2];
+ msg[-2] = t1;
+ msg[-1] = t2;
+ }
+
+ /*CS trailer should be equal to the CS header, followed by
+ padding zeros*/
+ cs_pad = (rx_fifo[2] != tcb->rx_cs_header);
+ /*Fall-through the cases is intentional*/
+ t1 = t2 = 0;
+ switch (end_count) {
+ case 0:
+ t1 = rx_fifo[2];
+ case 4:
+ t2 = rx_fifo[2];
+ cs_pad |= t1;
+ case 8:
+ t1 = rx_fifo[2];
+ cs_pad |= t2;
+ case 12:
+ t2 = rx_fifo[2];
+ cs_pad |= t1;
+ case 16:
+ t1 = rx_fifo[2];
+ cs_pad |= t2;
+ case 20:
+ t2 = rx_fifo[2];
+ cs_pad |= t1;
+ case 24:
+ t1 = rx_fifo[2];
+ cs_pad |= t2;
+ case 28:
+ t2 = rx_fifo[2];
+ cs_pad |= t1;
+ case 32:
+ t1 = rx_fifo[2];
+ cs_pad |= t2;
+ case 36:
+ t2 = rx_fifo[2];
+ cs_pad |= t1;
+ cs_pad |= t2;
+ }
+ t1 = rx_fifo[13];
+ if (cs_pad != 0) {
+ /*Errors in CS trailer or pad*/
+ cs_trailer_error:
+ tca_ec[CS_TRAILER]++;
+ if (tca100_verbose)
+ printf("CS trailer error ep %d hd %x pad %x\n", ecb->id,
+ tcb->rx_cs_header, cs_pad);
+ goto trailer_error;
+
+ } else if ((t1 & SAR_TRAILER_MASK) != predicted_sar_trailer) {
+ /*Error in SAR trailer or framing*/
+ sar_trailer_error:
+ tca_ec[SAR_TRAILER]++;
+ if (tca100_verbose)
+ printf("SAR trailer error ep %d pred %x real %x\n", ecb->id,
+ predicted_sar_trailer, t1);
+ goto trailer_error;
+
+ } else if (!nc_deliver_result(tcb->rx_buffer->peer.local_ep,
+ delivery_type, (int) tcb->rx_buffer)) {
+ tca_ec[DELIVERY_ERROR]++;
+ if (tca100_verbose)
+ printf("Delivery error ep %d\n", ecb->id);
+
+ trailer_error:
+ if (next_synch >= 0 && !(t1 & HEADER_CRC_ERROR)) {
+ reply = (msg_count << 16) | TCA_NAK | (predicted_sar_header & MID);
+ tca100_synch_send(dev, tcb, reply);
+ }
+ rx_slot_free(dev, tcb->rx_control);
+ nc_buffer_deallocate(tcb->rx_buffer->peer.local_ep,
+ tcb->rx_buffer);
+ predicted_sar_header = SSM | (predicted_sar_header & MID);
+ delivery_type = NW_RECEIVE;
+ } else {
+
+#ifdef TRACING
+ printf("Received correctly ep %d\n", ecb->id);
+#endif
+
+ if (next_synch == 0) {
+ reply = TCA_ACK | (predicted_sar_header & MID);
+ tca100_synch_send(dev, tcb, reply);
+ }
+ rx_slot_free(dev, tcb->rx_control);
+ if (delivery_type != NW_RECEIVE) {
+ delivery_type = NW_RECEIVE;
+ predicted_sar_header = SSM | (predicted_sar_header & MID);
+ } else {
+ predicted_sar_header = (predicted_sar_header + MID_INC) & MID;
+ if (predicted_sar_header == 0)
+ predicted_sar_header = 1;
+ predicted_sar_header |= SSM;
+ }
+ }
+ }
+ }
+
+ control = &nw_tx_control[dev][0];
+ for (i = 0; i < MAX_LONG_TX; i++) {
+ if (control->ep != 0 && tct[control->ep].reply != TCA_SYNCH) {
+ tx_ecb = &ect[control->ep];
+ tx_tcb = &tct[control->ep];
+ rx_seqno = tx_tcb->reply & MID;
+ tx_seqno = tx_tcb->tx_sar_header & MID;
+ rx_count = tx_tcb->reply >> 16;
+ tx_count = tx_tcb->tx_synch;
+ reply = tx_tcb->reply & TCA_SYNCH;
+ if (reply == TCA_ACK) {
+ if (rx_seqno == tx_seqno && rx_count == tx_count) {
+ if (rx_count == 0) {
+#ifdef TRACING
+ printf("Received final ack ep %d\n", tx_ecb->id);
+#endif
+
+ tx_seqno = (tx_seqno + MID_INC) & MID;
+ if (tx_seqno == 0)
+ tx_seqno = 1;
+ tx_tcb->tx_sar_header = tx_seqno;
+ tx_slot_free(dev, control);
+ tx_tcb->reply = NW_SUCCESS;
+ nc_deliver_result(tx_ecb->id, NW_SEND, NW_SUCCESS);
+ } else {
+ if (tca100_window_send(dev, tx_ecb, tx_tcb,
+ FALSE) == NW_SUCCESS) {
+ nc_deliver_result(control->ep, NW_SEND, NW_SUCCESS);
+ tx_tcb->reply = NW_SUCCESS;
+ tx_slot_free(dev, control);
+ } else {
+ control->time_out = tick[dev] + BASE_TIME_OUT;
+ tx_tcb->reply = TCA_SYNCH;
+ }
+ }
+ } else {
+ goto synch_error;
+ }
+ } else if (reply == TCA_OVR) {
+ if (rx_seqno == tx_seqno && rx_count == 0xffff &&
+ ((int) tx_ecb->tx_initial->msg_length -
+ (int) tx_tcb->tx_synch) <= (int) SMALL_WINDOW_SIZE) {
+ nc_deliver_result(control->ep, NW_SEND, NW_OVERRUN);
+ tx_tcb->reply = NW_OVERRUN;
+ tx_slot_free(dev, control);
+ } else {
+ goto synch_error;
+ }
+ } else if (reply == TCA_NAK) {
+ if (rx_seqno == tx_seqno &&
+ (rx_count == tx_count || (rx_count == 0xffff &&
+ ((int) tx_ecb->tx_initial->msg_length -
+ (int) tx_tcb->tx_synch) <= (int) SMALL_WINDOW_SIZE))) {
+ if (++control->retry < MAX_RETRY) {
+ if (tca100_verbose)
+ printf("Sending retransmission ep %d\n", tx_ecb->id);
+ if (tca100_window_send(dev, tx_ecb, tx_tcb,
+ TRUE) == NW_SUCCESS) {
+ nc_deliver_result(control->ep, NW_SEND, NW_SUCCESS);
+ tx_tcb->reply = NW_SUCCESS;
+ tx_slot_free(dev, control);
+ } else {
+ control->time_out = tick[dev] + BASE_TIME_OUT;
+ tx_tcb->reply = TCA_SYNCH;
+ }
+ tca_ec[TX_RETRANSMISSION]++;
+ } else {
+ nc_deliver_result(control->ep, NW_SEND, NW_FAILURE);
+ tx_tcb->reply = NW_FAILURE;
+ tx_slot_free(dev, control);
+ }
+ } else {
+ goto synch_error;
+ }
+ } else if (reply == TCA_SEQ) {
+ if (rx_count == 0xffff && tx_ecb->protocol == NW_SEQ_PACKET &&
+ ((int) tx_ecb->tx_initial->msg_length -
+ (int) tx_tcb->tx_synch) <= (int) SMALL_WINDOW_SIZE &&
+ rx_seqno == ((((tx_seqno + MID_INC) & MID) == 0) ?
+ 1 : tx_seqno + MID_INC)) {
+ tx_tcb->tx_sar_header = rx_seqno;
+ if (tca100_window_send(dev, tx_ecb, tx_tcb,
+ TRUE) == NW_SUCCESS) {
+ nc_deliver_result(control->ep, NW_SEND, NW_SUCCESS);
+ tx_tcb->reply = NW_SUCCESS;
+ tx_slot_free(dev, control);
+ } else {
+ control->time_out = tick[dev] + BASE_TIME_OUT;
+ tx_tcb->reply = TCA_SYNCH;
+ }
+ tca_ec[TX_RETRANSMISSION]++;
+ if (tca100_verbose)
+ printf("Sending seq retransmission ep %d\n", tx_ecb->id);
+ } else {
+ goto synch_error;
+ }
+ } else {
+ synch_error:
+ tca_ec[SYNCH_ERROR]++;
+ tx_tcb->reply = NW_FAILURE;
+ if (tca100_verbose)
+ printf("Synch error\n");
+ }
+ }
+ control++;
+ }
+ }
+ *status = ~(RX_COUNT_INTR | RX_EOM_INTR | RX_TIME_INTR);
+ tcb->rx_sar_header = predicted_sar_header;
+ tcb->rx_p = (u_int *) msg;
+ tcb->rx_count = msg_count;
+ tcb->rx_next_synch = next_synch;
+ *ctl_set = RX_COUNT_INTR;
+ return rx_cell_total;
+}
+
+
+
+void tca100_timer_sweep(int dev) {
+ int i, rt;
+ u_int reply;
+ nw_control_t control;
+ nw_ecb_t ecb;
+ nw_tcb_t tcb;
+ nw_tx_header_t tx_header;
+ nw_rx_header_t rx_header;
+
+ tick[dev]++;
+ control = &nw_rx_control[dev][0];
+ for (i = 0; i < MAX_LONG_RX; i++) {
+ if (control->ep != 0 && control->time_out < tick[dev]) {
+ rx_slot_free(dev, control);
+ tcb = &tct[control->ep];
+ nc_buffer_deallocate(tcb->rx_buffer->peer.local_ep, tcb->rx_buffer);
+ tcb->rx_sar_header = SSM | (tcb->rx_sar_header & MID);
+ }
+ control++;
+ }
+ control = &nw_tx_control[dev][0];
+ for (i = 0; i < MAX_LONG_TX; i++) {
+ if (control->ep != 0 && control->time_out < tick[dev]) {
+ ecb = &ect[control->ep];
+ tcb = &tct[control->ep];
+ if (++control->retry < MAX_RETRY) {
+ if (control->retry == 1)
+ rt = ( /* random() */ + devct[dev].local_addr_2) & 0x000f;
+ else
+ rt = ( /* random() */ + devct[dev].local_addr_1
+ + devct[dev].local_addr_2) & 0x00ff;
+ control->time_out = tick[dev] + BASE_TIME_OUT + rt;
+ tca100_window_send(dev, ecb, tcb, TRUE);
+ tca_ec[TX_RETRANSMISSION]++;
+ } else {
+ nc_deliver_result(control->ep, NW_SEND, NW_TIME_OUT);
+ tx_slot_free(dev, control);
+ }
+ }
+ control++;
+ }
+ if (long_tx_count[dev] + long_rx_count[dev] > 0)
+ nc_fast_timer_set(dev);
+ else
+ tick[dev] = 0;
+}
+
+nw_buffer_t tca100_rpc(nw_ep ep, nw_tx_header_t header, nw_options options) {
+ nw_result rc;
+ nw_buffer_t buf;
+ nw_ecb_t ecb;
+ nw_tcb_t tcb;
+ nw_rx_header_t rx_header;
+ int dev, poll_time, ncells;
+
+ tcb = &tct[ep];
+ ecb = &ect[header->peer.local_ep];
+ dev = NW_DEVICE(header->peer.rem_addr_1);
+ if ((rc = tca100_send(ep, header, options)) == NW_BAD_LENGTH) {
+ buf = NW_BUFFER_ERROR;
+ } else if (rc == NW_QUEUED) {
+ buf = NULL;
+ } else {
+ poll_time = 0;
+ if (rc == NW_SYNCH) {
+ while (tcb->reply == TCA_SYNCH && poll_time < POLL_LIMIT) {
+ ncells = tca100_poll(dev);
+ if (ncells == 0)
+ poll_time += POLL_IDLE_TIME;
+ else
+ poll_time += ncells * POLL_CELL_TIME;
+ }
+ }
+ if (tcb->reply != NW_SUCCESS) {
+ buf = NW_BUFFER_ERROR;
+ } else {
+ while (ecb->rx_first == NULL && poll_time < POLL_LIMIT) {
+ ncells = tca100_poll(dev);
+ if (ncells == 0)
+ poll_time += POLL_IDLE_TIME;
+ else
+ poll_time += ncells * POLL_CELL_TIME;
+ }
+ if (ecb->rx_first == NULL) {
+ buf = NULL;
+ } else {
+ rx_header = ecb->rx_first;
+ buf = rx_header->buffer;
+ ecb->rx_first = rx_header->next;
+ if (ecb->rx_first == NULL)
+ ecb->rx_last = NULL;
+ nc_rx_header_deallocate(rx_header);
+ }
+ }
+ }
+
+ return buf;
+}
+
+
+
+
+
+
+
+
+
diff --git a/chips/tca100_if.h b/chips/tca100_if.h
new file mode 100644
index 00000000..41183ea0
--- /dev/null
+++ b/chips/tca100_if.h
@@ -0,0 +1,89 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*** TCA 100 ATM NETWORK INTERFACE ***/
+
+#ifndef STUB
+#include <chips/nc.h>
+#include <chips/spans.h>
+#include <chips/tca100.h>
+#else
+#include "nc.h"
+#include "spans.h"
+#include "tca100.h"
+#endif
+
+typedef struct {
+ nw_ep ep;
+ int time_out;
+ int retry;
+} nw_control_s, *nw_control_t;
+
+
+typedef struct {
+ u_int rx_sar_header;
+ u_int rx_cs_header;
+ u_int *rx_p;
+ u_int rx_count;
+ u_int rx_next_synch;
+ nw_buffer_t rx_buffer;
+ nw_control_t rx_control;
+ u_int tx_atm_header;
+ u_int tx_sar_header;
+ u_int tx_cs_header;
+ u_int *tx_p;
+ u_int tx_msg_count;
+ u_int tx_block_count;
+ u_int tx_synch;
+ u_int tx_queued_count;
+ nw_control_t tx_control;
+ u_int reply;
+} nw_tcb, *nw_tcb_t;
+
+extern nw_tcb tct[MAX_EP];
+
+extern nw_dev_entry_s tca100_entry_table;
+
+extern nw_result tca100_initialize(int dev);
+
+extern nw_result tca100_status(int dev);
+
+extern void tca100_timer_sweep(int dev);
+
+extern int tca100_poll(int dev);
+
+extern nw_result tca100_send(nw_ep ep, nw_tx_header_t header,
+ nw_options options);
+
+extern nw_buffer_t tca100_rpc(nw_ep ep, nw_tx_header_t header,
+ nw_options options);
+
+
+
+
+
+
+
diff --git a/chips/vs42x_rb.h b/chips/vs42x_rb.h
new file mode 100644
index 00000000..82b89b06
--- /dev/null
+++ b/chips/vs42x_rb.h
@@ -0,0 +1,267 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vs42x_rb.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 5/91
+ *
+ * This file contains definitions for the VS42X-RB Storage
+ * controller, which includes a Disk controller and a
+ * SCSI controller.
+ */
+
+#ifndef _VS42X_RB_H_
+#define _VS42X_RB_H_
+
+/*
+ * Phys addresses for the Vax3100
+ */
+#define VAX3100_STC_BASE 0x200c0000
+#define VAX3100_STC_RAM_COMPAT 0x200d0000
+#define VAX3100_STC_RAM 0x202d0000
+
+#define VAX3100_STC_HDC9224 0x00000000 /* offsets from BASE */
+#define VAX3100_STC_5380_A 0x00000080
+#define VAX3100_STC_5380_B 0x00000180
+#define VAX3100_STC_DMAREG_A 0x000000a0
+#define VAX3100_STC_DMAREG_B 0x000001a0
+#define VAX3100_STC_RAM_MODE 0x000000e0
+
+#define VAX3100_STC_DMAREG_OFF (0xa0-0x80) /* offset from 5380 */
+
+#define SCI_REG_SIZE 512
+
+/*
+ * RAM Buffer for this storage system
+ */
+#define SCI_RAM_SIZE 128*1024
+#define SCI_RAM_COMPATSIZE 32*1024
+#define SCI_RAM_EXPMODE 0x01 /* char-size mode register */
+
+/*
+ * DMA controller for the SCSI subsystem
+ * (Defines for the NCR 5380 are elsewhere)
+ */
+
+typedef struct {
+ unsigned int sci_dma_adr; /* +000000a0 */
+ char pad0[0xc0-0xa0-4];
+ unsigned int sci_dma_count; /* +000000c0 */
+ unsigned int sci_dma_dir; /* +000000c4 */
+ char pad1[0xe0-0xc4-4];
+ unsigned char sci_dma_rammode;/* +000000e0 */
+} *sci_dmaregs_t;
+
+#define SCI_DMADR_PUT(ptr,adr) (ptr)->sci_dma_adr = (unsigned)(adr) & SCI_DMA_COUNT_MASK;
+#define SCI_DMADR_GET(ptr,adr) (adr) = (ptr)->sci_dma_adr;
+
+#define SCI_DMA_COUNT_MASK 0x0001ffff
+#define SCI_TC_GET(ptr,cnt) {\
+ (cnt) = (ptr)->sci_dma_count;\
+ if ((cnt) & 0x00010000) (cnt) |= ~SCI_DMA_COUNT_MASK;\
+ (cnt) = -(cnt);\
+ }
+#define SCI_TC_PUT(ptr,cnt) (ptr)->sci_dma_count = -(cnt);
+
+#define SCI_DMA_DIR_READ 0x00000001
+#define SCI_DMA_DIR_WRITE 0x00000000
+
+/*
+ * Disk controller subsytem (ST506/412), uses a
+ * HDC 9224 Universal Disk Controller chip and
+ * addresses up to 4 disks.
+ */
+typedef struct {
+ unsigned char hdc_rap; /* rw: reg addres ptr */
+ char pad0[3];
+ unsigned char hdc_cmd; /* w: controller command */
+#define hdc_status hdc_cmd /* r: interrupt status */
+ char pad1[3];
+} *sci_hdcregs_t;
+
+/*
+ * Register Address Pointer
+ */
+#define UDC_DMA7 0 /* rw: DMA address bits 7:0 */
+#define UDC_DMA15 1 /* rw: DMA address bits 15:8 */
+#define UDC_DMA23 2 /* rw: DMA address bits 23:16 */
+#define UDC_DSECT 3 /* rw: desired sector */
+#define UDC_DHEAD 4 /* wo: desired head */
+#define UDC_CHEAD 4 /* ro: current head */
+#define UDC_DCYL 5 /* wo: desired cylinder */
+#define UDC_CCYL 5 /* ro: current cylinder */
+#define UDC_SCNT 6 /* wo: sector count */
+#define UDC_RTCNT 7 /* wo: retry count */
+#define UDC_MODE 8 /* wo: operating mode */
+#define UDC_CSTAT 8 /* ro: chip status */
+#define UDC_TERM 9 /* wo: termination conditions */
+#define UDC_DSTAT 9 /* ro: drive status */
+#define UDC_DATA 10 /* rw: data */
+
+/*
+ * Controller Commands
+ */
+#define HDCC_RESET 0x00
+
+#define HDCC_SET_REGP 0x40 /* low 4 bits is regnum */
+
+#define HDCC_DESELECT 0x01
+
+#define HDCC_SELECT 0x20
+# define HDCC_SELECT_IDMASK 0x03
+# define HDCC_SELECT_DR_HD 0x04
+# define HDCC_SELECT_DR_SD 0x08
+# define HDCC_SELECT_DR_DD 0x0c
+
+#define HDCC_RESTORE_HD 0x03
+
+#define HDCC_RESTORE_RX 0x02
+
+#define HDCC_STEP 0x04
+# define HDCC_STEP_OUT 0x02
+# define HDCC_STEP_SKWAIT 0x01
+
+#define HDCC_POLL 0x10 /* low 4 bits is drive mask */
+
+#define HDCC_SEEK 0x50
+# define HDCC_SEEK_STEP 0x04
+# define HDCC_SEEK_SKWAIT 0x02
+# define HDCC_SEEK_VFY 0x01
+
+#define HDCC_FORMAT 0x60
+# define HDCC_FORMAT_DDMARK 0x10
+
+#define HDCC_READ_T 0x5a
+# define HDCC_READ_XDATA 0x01
+
+#define HDCC_READ_P 0x58
+
+#define HDCC_READ_L 0x5c
+# define HDCC_READ_L_BYPASS 0x02
+
+#define HDCC_WRITE_P 0x80
+# define HDCC_WRITE_BYPASS 0x40
+# define HDCC_WRITE_DDMARK 0x10
+
+#define HDCC_WRITE_L 0xa0
+
+/*
+ * Interrupt Status Register
+ */
+#define HDCI_BADSECT 0x01
+#define HDCI_OVRUN 0x02
+#define HDCI_RDYCHNG 0x04
+#define HDCI_TERMCOD 0x18
+# define HDC_T_SUCCESS 0x00
+# define HDC_T_EREAD_ID 0x08
+# define HDC_T_EVFY 0x10
+# define HDC_T_EXFER 0x18
+#define HDCI_DONE 0x20
+#define HDCI_DMAREQ 0x40
+#define HDCI_INT 0x80 /* interrupt pending */
+
+/*
+ * Desired/Current Head
+ */
+#define UDC_HEAD_HMASK 0x0f /* desired head no */
+#define UDC_HEAD_CMASK 0x70 /* desired cyl 10:8 */
+#define UDC_HEAD_BADSEC 0x80
+
+/*
+ * Sector Count
+ */
+#define HDC_MAXDATA 256*512
+
+/*
+ * Retry Count
+ */
+#define UDC_RTCNT_MASK 0xf0
+#define UDC_RTCNT_RXDIS 0x08 /* mbz */
+#define UDC_RTCNT_INVRDY 0x04
+#define UDC_RTCNT_MOTOR 0x02
+#define UDC_RTCNT_LOSPEED 0x01
+
+/*
+ * Mode
+ */
+#define UDC_MODE_HD 0x80 /* hard disk mode mb1 */
+#define UDC_MODE_CHKCOD 0x60 /* error checkin code */
+# define UDC_MODE_CRC 0x00
+# define UDC_MODE_EECC 0x20 /* NA */
+# define UDC_MODE_IECC 0x40 /* hard disks internal 32 ecc */
+# define UDC_MODE_AECC 0x60 /* NA */
+#define UDC_MODE_DENS 0x10 /* mbz */
+#define UDC_MODE_SRATE 0x07
+# define UDC_MODE_RATE_HD 0x00 /* hard disk */
+# define UDC_MODE_RATE_DD 0x01 /* double den rx23 */
+# define UDC_MODE_RATE_SD 0x02 /* single den rz23 */
+# define UDC_MODE_RATE_RD 0x06 /* restore drive */
+
+#define UDC_MODE_RX23_DD 0x81
+#define UDC_MODE_RX23_SD 0x82
+#define UDC_MODE_RDxx 0xc0
+#define UDC_MODE_RD_RESTORE 0xc6
+
+/*
+ * Status
+ */
+#define UDC_CSTAT_RETRIED 0x80
+#define UDC_CSTAT_ECC 0x40
+#define UDC_CSTAT_ECC_ERR 0x20
+#define UDC_CSTAT_DELDATA 0x10
+#define UDC_CSTAT_SYN_ERR 0x08
+#define UDC_CSTAT_COMP_ERR 0x04
+#define UDC_CSTAT_SELMASK 0x03
+# define UDC_CSTAT_SELHD0 0x00
+# define UDC_CSTAT_SELHD1 0x01
+# define UDC_CSTAT_SELRX 0x02
+# define UDC_CSTAT_SELHD2 0x03
+
+/*
+ * Termination
+ */
+#define UDC_TERM_CRCPRE 0x80 /* mb1 */
+#define UDC_TERM_IDONE 0x20
+#define UDC_TERM_DELDAT 0x10
+#define UDC_TERM_STAT3 0x08 /* mbz */
+#define UDC_TERM_WPROT 0x04
+#define UDC_TERM_IRDCHNG 0x02
+#define UDC_TERM_WFLT 0x01
+
+/*
+ * Drive status
+ */
+#define UDC_DSTAT_SELACK 0x80
+#define UDC_DSTAT_INDEX 0x40
+#define UDC_DSTAT_SKCOM 0x20
+#define UDC_DSTAT_TRK0 0x10
+#define UDC_DSTAT_STAT3 0x08 /* mbz */
+#define UDC_DSTAT_WPROT 0x04
+#define UDC_DSTAT_READY 0x02
+#define UDC_DSTAT_WFLT 0x01
+
+
+#endif _VS42X_RB_H_
diff --git a/chips/xcfb_hdw.c b/chips/xcfb_hdw.c
new file mode 100644
index 00000000..b2c67d67
--- /dev/null
+++ b/chips/xcfb_hdw.c
@@ -0,0 +1,230 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: xcfb_hdw.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 1/92
+ *
+ * Driver for the MAXine Color Frame Buffer Display,
+ * hardware-level operations.
+ */
+
+#include <xcfb.h>
+#if (NXCFB > 0)
+
+#include <platforms.h>
+
+#include <machine/machspl.h>
+#include <mach/std_types.h>
+#include <chips/busses.h>
+
+#include <chips/screen_defs.h>
+#include <chips/pm_defs.h>
+#include <machine/machspl.h>
+
+#ifdef MAXINE
+
+#include <chips/xcfb_monitor.h>
+
+#include <mips/PMAX/pmag_xine.h>
+#include <mips/PMAX/tc.h>
+#define enable_interrupt(s,o,x) (*tc_enable_interrupt)(s,o,x)
+
+#else /* MAXINE */
+You must say the magic words to get the coockies:
+#define enable_interrupt(slot,on,xx)
+#define IMS332_ADDRESS
+#define VRAM_OFFSET
+#define IMS332_RESET_ADDRESS
+#endif /* MAXINE */
+
+typedef pm_softc_t xcfb_softc_t;
+
+
+/*
+ * Definition of the driver for the auto-configuration program.
+ */
+
+int xcfb_probe(), xcfb_intr();
+static void xcfb_attach();
+
+vm_offset_t xcfb_std[NXCFB] = { 0 };
+struct bus_device *xcfb_info[NXCFB];
+struct bus_driver xcfb_driver =
+ { xcfb_probe, 0, xcfb_attach, 0, xcfb_std, "xcfb", xcfb_info,
+ 0, 0, BUS_INTR_DISABLED};
+
+/*
+ * Probe/Attach functions
+ */
+
+xcfb_probe( /* reg, ui */)
+{
+ static probed_once = 0;
+
+ /*
+ * Probing was really done sweeping the TC long ago
+ */
+ if (tc_probe("xcfb") == 0)
+ return 0;
+ if (probed_once++ > 1)
+ printf("[mappable] ");
+ return 1;
+}
+
+static void
+xcfb_attach(ui)
+ struct bus_device *ui;
+{
+ /* ... */
+ printf(": color display");
+}
+
+
+/*
+ * Interrupt routine
+ */
+
+xcfb_intr(unit,spllevel)
+ spl_t spllevel;
+{
+ /* interrupt has been acknowledge already */
+#if 0
+ splx(spllevel);
+ /* XXX make it load the unsafe things */
+#endif
+}
+
+xcfb_vretrace(xcfb, on)
+ xcfb_softc_t *xcfb;
+{
+ int i;
+
+ for (i = 0; i < NXCFB; i++)
+ if (xcfb_info[i]->address == (vm_offset_t)xcfb->framebuffer)
+ break;
+ if (i == NXCFB) return;
+
+ enable_interrupt(xcfb_info[i]->adaptor, on, 0);
+}
+
+/*
+ * Boot time initialization: must make device
+ * usable as console asap.
+ */
+
+/* some of these values are made up using ad hocery */
+static struct xcfb_monitor_type monitors[] = {
+ { "VRM17", 1024, 768, 1024, 1024, 16, 33,
+ 6, 2, 2, 21, 326, 16, 10, 10 },
+ /* XXX Add VRC16 */
+ { "VR262", 1024, 864, 1024, 1024, 16, 35,
+ 5, 3, 3, 37, 330, 16, 10, 10 },
+ { "VR299", 1024, 864, 1024, 1024, 16, 35,
+ 5, 3, 3, 37, 330, 16, 10, 10 },
+ { 0 }};
+
+/* set from prom command line */
+extern unsigned char *monitor_types[4];
+
+xcfb_monitor_type_t xcfb_get_monitor_type()
+{
+ xcfb_monitor_type_t m;
+
+ m = monitors;
+ if (monitor_types[3])
+ while (m->name) {
+ /* xcfb is always on the motherboard (slot 3),
+ fix if you need */
+ if (!strcmp(monitor_types[3], m->name))
+ break;
+ m++;
+ }
+ if (!m->name) /* the first is the default */
+ m = monitors;
+ return m;
+}
+
+
+extern int
+ xcfb_soft_reset(), xcfb_set_status(),
+ ims332_pos_cursor(), ims332_video_on(),
+ ims332_video_off(), xcfb_vretrace(),
+ pm_get_status(), pm_char_paint(),
+ pm_insert_line(), pm_remove_line(),
+ pm_clear_bitmap(), pm_map_page();
+
+static struct screen_switch xcfb_sw = {
+ screen_noop, /* graphic_open */
+ xcfb_soft_reset, /* graphic_close */
+ xcfb_set_status, /* set_status */
+ pm_get_status, /* get_status */
+ pm_char_paint, /* char_paint */
+ ims332_pos_cursor, /* pos_cursor */
+ pm_insert_line, /* insert_line */
+ pm_remove_line, /* remove_line */
+ pm_clear_bitmap, /* clear_bitmap */
+ ims332_video_on, /* video_on */
+ ims332_video_off, /* video_off */
+ xcfb_vretrace, /* intr_enable */
+ pm_map_page /* map_page */
+};
+
+xcfb_cold_init(unit, up)
+ user_info_t *up;
+{
+ xcfb_softc_t *xcfb;
+ screen_softc_t sc = screen(unit);
+ int base = tc_probe("xcfb");
+ xcfb_monitor_type_t m = xcfb_get_monitor_type();
+
+ bcopy(&xcfb_sw, &sc->sw, sizeof(sc->sw));
+
+ sc->flags |= COLOR_SCREEN; /* XXX should come from m->flags? */
+ sc->frame_scanline_width = m->frame_scanline_width;
+ sc->frame_height = m->frame_height;
+ sc->frame_visible_width = m->frame_visible_width;
+ sc->frame_visible_height = m->frame_visible_height;
+
+ pm_init_screen_params(sc, up);
+ (void) screen_up(unit, up);
+
+ xcfb = pm_alloc(unit, IMS332_ADDRESS, base + VRAM_OFFSET, 0);
+ xcfb->vdac_registers = (char *)IMS332_RESET_ADDRESS;
+
+ screen_default_colors(up);
+
+ xcfb_soft_reset(sc);
+
+ /*
+ * Clearing the screen at boot saves from scrolling
+ * much, and speeds up booting quite a bit.
+ */
+ screen_blitc( unit, 'C'-'@');/* clear screen */
+}
+
+
+#endif (NXCFB > 0)
diff --git a/chips/xcfb_misc.c b/chips/xcfb_misc.c
new file mode 100644
index 00000000..e7a08c33
--- /dev/null
+++ b/chips/xcfb_misc.c
@@ -0,0 +1,246 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: xcfb_misc.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 1/92
+ *
+ * Driver for the MAXine color framebuffer
+ *
+ */
+
+#include <xcfb.h>
+#if (NXCFB > 0)
+
+/*
+ * NOTE: This driver relies heavily on the pm one.
+ */
+
+#include <device/device_types.h>
+
+#include <chips/screen_defs.h>
+
+#include <chips/xcfb_monitor.h>
+#include <chips/pm_defs.h>
+typedef pm_softc_t xcfb_softc_t;
+
+#include <chips/ims332.h>
+#define ims332 cursor_registers
+
+
+/*
+ * Initialize color map, for kernel use
+ */
+xcfb_init_colormap(sc)
+ screen_softc_t sc;
+{
+ xcfb_softc_t *xcfb = (xcfb_softc_t*)sc->hw_state;
+ user_info_t *up = sc->up;
+ color_map_t Bg_Fg[2];
+ register int i;
+
+ ims332_init_colormap( xcfb->ims332 );
+
+ /* init bg/fg colors */
+ for (i = 0; i < 3; i++) {
+ up->dev_dep_2.pm.Bg_color[i] = 0x00;
+ up->dev_dep_2.pm.Fg_color[i] = 0xff;
+ }
+
+ Bg_Fg[0].red = Bg_Fg[0].green = Bg_Fg[0].blue = 0x00;
+ Bg_Fg[1].red = Bg_Fg[1].green = Bg_Fg[1].blue = 0xff;
+ ims332_cursor_color( xcfb->ims332, Bg_Fg);
+}
+
+/*
+ * Large viz small cursor
+ */
+xcfb_small_cursor_to_large(up, cursor)
+ user_info_t *up;
+ cursor_sprite_t cursor;
+{
+ unsigned short new_cursor[32];
+ unsigned char *cursor_bytes;
+ char *sprite;
+ register int i;
+
+ /* Clear out old cursor */
+ bzero( up->dev_dep_2.pm.cursor_sprite,
+ sizeof(up->dev_dep_2.pm.cursor_sprite));
+
+ /* small cursor is 32x2 bytes, fg first */
+ cursor_bytes = (unsigned char *) cursor;
+
+ /* use the upper left corner of the large cursor
+ * as a 64x1 cursor, fg&bg alternated */
+ for (i = 0; i < 32; i++) {
+ register short nc = 0;
+ register unsigned char fg, bg;
+ register int j, k;
+
+ fg = cursor_bytes[i];
+ bg = cursor_bytes[i + 32];
+ bg &= ~fg;
+ for (j = 1, k = 0; j < 256; j <<= 1) {
+ nc |= (bg & j) << (k++);
+ nc |= (fg & j) << (k);
+ }
+ new_cursor[i] = nc;
+ }
+
+ /* Now stick it in the proper place */
+
+ cursor_bytes = (unsigned char *) new_cursor;
+ sprite = up->dev_dep_2.pm.cursor_sprite;
+ for (i = 0; i < 64; i += 4) {
+ *sprite++ = cursor_bytes[i + 0];
+ *sprite++ = cursor_bytes[i + 1];
+ *sprite++ = cursor_bytes[i + 2];
+ *sprite++ = cursor_bytes[i + 3];
+ sprite += 12; /* skip rest of the line */
+ }
+}
+
+
+/*
+ * Device-specific set status
+ */
+xcfb_set_status(sc, flavor, status, status_count)
+ screen_softc_t sc;
+ int flavor;
+ dev_status_t status;
+ unsigned int status_count;
+{
+ xcfb_softc_t *xcfb = (xcfb_softc_t*) sc->hw_state;
+
+ switch (flavor) {
+
+ case SCREEN_ADJ_MAPPED_INFO:
+ return pm_set_status(sc, flavor, status, status_count);
+
+ case SCREEN_LOAD_CURSOR:
+
+ if (status_count < sizeof(cursor_sprite_t)/sizeof(int))
+ return D_INVALID_SIZE;
+
+ xcfb_small_cursor_to_large(sc->up, (cursor_sprite_t*) status);
+
+ /* Fall through */
+
+ case SCREEN_LOAD_CURSOR_LONG: /* 3max only */
+
+ sc->flags |= SCREEN_BEING_UPDATED;
+ ims332_cursor_sprite(xcfb->ims332, sc->up->dev_dep_2.pm.cursor_sprite);
+ sc->flags &= ~SCREEN_BEING_UPDATED;
+
+ break;
+
+ case SCREEN_SET_CURSOR_COLOR: {
+ color_map_t c[2];
+ register cursor_color_t *cc = (cursor_color_t*) status;
+
+ c[0].red = cc->Bg_rgb[0];
+ c[0].green = cc->Bg_rgb[1];
+ c[0].blue = cc->Bg_rgb[2];
+ c[1].red = cc->Fg_rgb[0];
+ c[1].green = cc->Fg_rgb[1];
+ c[1].blue = cc->Fg_rgb[2];
+
+ sc->flags |= SCREEN_BEING_UPDATED;
+ ims332_cursor_color (xcfb->ims332, c );
+ sc->flags &= ~SCREEN_BEING_UPDATED;
+
+ break;
+ }
+
+ case SCREEN_SET_CMAP_ENTRY: {
+ color_map_entry_t *e = (color_map_entry_t*) status;
+
+ if (e->index < 256) {
+ sc->flags |= SCREEN_BEING_UPDATED;
+ ims332_load_colormap_entry( xcfb->ims332, e->index, &e->value);
+ sc->flags &= ~SCREEN_BEING_UPDATED;
+ }
+ break;
+ }
+
+ default:
+ return D_INVALID_OPERATION;
+ }
+ return D_SUCCESS;
+}
+
+/*
+ * Hardware initialization
+ */
+xcfb_init_screen(xcfb)
+ xcfb_softc_t *xcfb;
+{
+ extern xcfb_monitor_type_t xcfb_get_monitor_type();
+
+ ims332_init( xcfb->ims332, xcfb->vdac_registers, xcfb_get_monitor_type());
+}
+
+/*
+ * Do what's needed when X exits
+ */
+xcfb_soft_reset(sc)
+ screen_softc_t sc;
+{
+ xcfb_softc_t *xcfb = (xcfb_softc_t*) sc->hw_state;
+ user_info_t *up = sc->up;
+ extern cursor_sprite_t dc503_default_cursor;
+
+ /*
+ * Restore params in mapped structure
+ */
+ pm_init_screen_params(sc,up);
+ up->row = up->max_row - 1;
+
+ up->dev_dep_2.pm.x26 = 2; /* you do not want to know */
+ up->dev_dep_1.pm.x18 = (short*)2;
+
+ /*
+ * Restore RAMDAC chip to default state
+ */
+ xcfb_init_screen(xcfb);
+
+ /*
+ * Load kernel's cursor sprite: just use the same pmax one
+ */
+ xcfb_small_cursor_to_large(up, dc503_default_cursor);
+ ims332_cursor_sprite(xcfb->ims332, up->dev_dep_2.pm.cursor_sprite);
+
+ /*
+ * Color map and cursor color
+ */
+ xcfb_init_colormap(sc);
+}
+
+
+
+
+#endif (NXCFB > 0)
diff --git a/chips/xcfb_monitor.h b/chips/xcfb_monitor.h
new file mode 100644
index 00000000..50dc9c90
--- /dev/null
+++ b/chips/xcfb_monitor.h
@@ -0,0 +1,56 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * Copyright (c) 1992 Helsinki University of Technology
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND HELSINKI UNIVERSITY OF TECHNOLOGY ALLOW FREE USE
+ * OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON AND
+ * HELSINKI UNIVERSITY OF TECHNOLOGY DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: xcfb_monitor.h
+ * Author: Jukka Partanen, Helsinki University of Technology 1992.
+ *
+ * A type describing the physical properties of a monitor
+ */
+
+#ifndef _XCFB_MONITOR_H_
+#define _XCFB_MONITOR_H_
+
+typedef struct xcfb_monitor_type {
+ char *name;
+ short frame_visible_width; /* pixels */
+ short frame_visible_height;
+ short frame_scanline_width;
+ short frame_height;
+ short half_sync; /* screen units (= 4 pixels) */
+ short back_porch;
+ short v_sync; /* lines */
+ short v_pre_equalize;
+ short v_post_equalize;
+ short v_blank;
+ short line_time; /* screen units */
+ short line_start;
+ short mem_init; /* some units */
+ short xfer_delay;
+} *xcfb_monitor_type_t;
+
+#endif /* _XCFB_MONITOR_H_ */
diff --git a/ddb/db_access.c b/ddb/db_access.c
new file mode 100644
index 00000000..7a391446
--- /dev/null
+++ b/ddb/db_access.c
@@ -0,0 +1,137 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+#include "mach_kdb.h"
+#if MACH_KDB
+
+#include <mach/boolean.h>
+#include <machine/db_machdep.h> /* type definitions */
+#include <machine/setjmp.h>
+#include <kern/task.h>
+#include <ddb/db_access.h>
+
+
+
+/*
+ * Access unaligned data items on aligned (longword)
+ * boundaries.
+ */
+
+extern void db_read_bytes(); /* machine-dependent */
+extern void db_write_bytes(); /* machine-dependent */
+
+int db_access_level = DB_ACCESS_LEVEL;
+
+/*
+ * This table is for sign-extending things.
+ * Therefore its entries are signed, and yes
+ * they are infact negative numbers.
+ * So don't you put no more Us in it. Or Ls either.
+ * Otherwise there is no point having it, n'est pas ?
+ */
+static int db_extend[sizeof(int)+1] = { /* table for sign-extending */
+ 0,
+ 0xFFFFFF80,
+ 0xFFFF8000,
+ 0xFF800000,
+ 0x80000000
+};
+
+db_expr_t
+db_get_task_value(addr, size, is_signed, task)
+ db_addr_t addr;
+ register int size;
+ boolean_t is_signed;
+ task_t task;
+{
+ char data[sizeof(db_expr_t)];
+ register db_expr_t value;
+ register int i;
+
+ db_read_bytes((void*)addr, size, data, task);
+
+ value = 0;
+#if BYTE_MSF
+ for (i = 0; i < size; i++)
+#else /* BYTE_LSF */
+ for (i = size - 1; i >= 0; i--)
+#endif
+ {
+ value = (value << 8) + (data[i] & 0xFF);
+ }
+
+ if (size <= sizeof(int)) {
+ if (is_signed && (value & db_extend[size]) != 0)
+ value |= db_extend[size];
+ }
+ return (value);
+}
+
+void
+db_put_task_value(addr, size, value, task)
+ db_addr_t addr;
+ register int size;
+ register db_expr_t value;
+ task_t task;
+{
+ char data[sizeof(db_expr_t)];
+ register int i;
+
+#if BYTE_MSF
+ for (i = size - 1; i >= 0; i--)
+#else /* BYTE_LSF */
+ for (i = 0; i < size; i++)
+#endif
+ {
+ data[i] = value & 0xFF;
+ value >>= 8;
+ }
+
+ db_write_bytes((void*)addr, size, data, task);
+}
+
+db_expr_t
+db_get_value(addr, size, is_signed)
+ db_addr_t addr;
+ int size;
+ boolean_t is_signed;
+{
+ return(db_get_task_value(addr, size, is_signed, TASK_NULL));
+}
+
+void
+db_put_value(addr, size, value)
+ db_addr_t addr;
+ int size;
+ db_expr_t value;
+{
+ db_put_task_value(addr, size, value, TASK_NULL);
+}
+
+#endif MACH_KDB
diff --git a/ddb/db_access.h b/ddb/db_access.h
new file mode 100644
index 00000000..c01b6ce9
--- /dev/null
+++ b/ddb/db_access.h
@@ -0,0 +1,73 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+/*
+ * Data access functions for debugger.
+ */
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+#include <ddb/db_task_thread.h>
+#include "vm_param.h"
+
+/* implementation dependent access capability */
+#define DB_ACCESS_KERNEL 0 /* only kernel space */
+#define DB_ACCESS_CURRENT 1 /* kernel or current task space */
+#define DB_ACCESS_ANY 2 /* any space */
+
+#ifndef DB_ACCESS_LEVEL
+#define DB_ACCESS_LEVEL DB_ACCESS_KERNEL
+#endif DB_ACCESS_LEVEL
+
+#ifndef DB_VALID_KERN_ADDR
+#define DB_VALID_KERN_ADDR(addr) ((addr) >= VM_MIN_KERNEL_ADDRESS \
+ && (addr) < VM_MAX_KERNEL_ADDRESS)
+#define DB_VALID_ADDRESS(addr,user) ((user != 0) ^ DB_VALID_KERN_ADDR(addr))
+#define DB_PHYS_EQ(task1,addr1,task2,addr2) 0
+#define DB_CHECK_ACCESS(addr,size,task) db_is_current_task(task)
+#endif DB_VALID_KERN_ADDR
+
+extern int db_access_level;
+
+extern db_expr_t db_get_value( db_addr_t addr,
+ int size,
+ boolean_t is_signed );
+
+extern void db_put_value( db_addr_t addr,
+ int size,
+ db_expr_t value );
+
+extern db_expr_t db_get_task_value( db_addr_t addr,
+ int size,
+ boolean_t is_signed,
+ task_t task );
+
+extern void db_put_task_value( db_addr_t addr,
+ int size,
+ db_expr_t value,
+ task_t task );
diff --git a/ddb/db_aout.c b/ddb/db_aout.c
new file mode 100644
index 00000000..6cb8294c
--- /dev/null
+++ b/ddb/db_aout.c
@@ -0,0 +1,507 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+#include "mach_kdb.h"
+#if MACH_KDB
+
+/*
+ * Symbol table routines for a.out format files.
+ */
+
+#include <mach/std_types.h>
+#include <machine/db_machdep.h> /* data types */
+#include <ddb/db_sym.h>
+
+#ifndef DB_NO_AOUT
+
+#include <ddb/nlist.h> /* a.out symbol table */
+#include <ddb/stab.h>
+
+#define private static
+
+/*
+ * An a.out symbol table as loaded into the kernel debugger:
+ *
+ * symtab -> size of symbol entries, in bytes
+ * sp -> first symbol entry
+ * ...
+ * ep -> last symbol entry + 1
+ * strtab == start of string table
+ * size of string table in bytes,
+ * including this word
+ * -> strings
+ */
+
+/*
+ * Find pointers to the start and end of the symbol entries,
+ * given a pointer to the start of the symbol table.
+ */
+#define db_get_aout_symtab(symtab, sp, ep) \
+ (sp = (struct nlist *)((vm_offset_t *)(symtab) + 1), \
+ ep = (struct nlist *)((char *)sp + *((int*)symtab)))
+
+boolean_t
+aout_db_sym_init(symtab, esymtab, name, task_addr)
+ char * symtab; /* pointer to start of symbol table */
+ char * esymtab; /* pointer to end of string table,
+ for checking - may be rounded up to
+ integer boundary */
+ char * name;
+ char * task_addr; /* use for this task only */
+{
+ register struct nlist *sym_start, *sym_end;
+ register struct nlist *sp;
+ register char * strtab;
+ register int strlen;
+ char * estrtab;
+
+ db_get_aout_symtab(symtab, sym_start, sym_end);
+
+ strtab = (char *)sym_end;
+ strlen = *(int *)strtab;
+ estrtab = strtab + strlen;
+
+#define round_to_size(x) \
+ (((vm_offset_t)(x) + sizeof(vm_size_t) - 1) & ~(sizeof(vm_size_t) - 1))
+
+ if (round_to_size(estrtab) != round_to_size(esymtab)) {
+ db_printf("[ %s symbol table not valid ]\n", name);
+ return (FALSE);
+ }
+
+#undef round_to_size
+
+ for (sp = sym_start; sp < sym_end; sp++) {
+ register long strx;
+ strx = sp->n_un.n_strx;
+ if (strx != 0) {
+ if (strx > strlen) {
+ db_printf("Bad string table index (%#x)\n", strx);
+ sp->n_un.n_name = 0;
+ continue;
+ }
+ sp->n_un.n_name = strtab + strx;
+ }
+ }
+
+ if (db_add_symbol_table(SYMTAB_AOUT,
+ (char *)sym_start,
+ (char *)sym_end,
+ name,
+ symtab,
+ task_addr))
+ {
+ /* Successfully added symbol table */
+ db_printf("[ preserving %d bytes of %s symbol table ]\n",
+ esymtab - (char *)symtab, name);
+ return TRUE;
+ }
+ else
+ return FALSE;
+}
+
+/*
+ * check file name or not (check xxxx.x pattern)
+ */
+private boolean_t
+aout_db_is_filename(name)
+ register char *name;
+{
+ while (*name) {
+ if (*name == '.') {
+ if (name[1])
+ return(TRUE);
+ }
+ name++;
+ }
+ return(FALSE);
+}
+
+/*
+ * special name comparison routine with a name in the symbol table entry
+ */
+private boolean_t
+aout_db_eq_name(sp, name)
+ struct nlist *sp;
+ char *name;
+{
+ register char *s1, *s2;
+
+ s1 = sp->n_un.n_name;
+ s2 = name;
+ if (*s1 == '_' && *s2 && *s2 != '_')
+ s1++;
+ while (*s2) {
+ if (*s1++ != *s2++) {
+ /*
+ * check .c .o file name comparison case
+ */
+ if (*s2 == 0 && sp->n_un.n_name <= s1 - 2
+ && s1[-2] == '.' && s1[-1] == 'o')
+ return(TRUE);
+ return(FALSE);
+ }
+ }
+ /*
+ * do special check for
+ * xxx:yyy for N_FUN
+ * xxx.ttt for N_DATA and N_BSS
+ */
+ return(*s1 == 0 || (*s1 == ':' && sp->n_type == N_FUN) ||
+ (*s1 == '.' && (sp->n_type == N_DATA || sp->n_type == N_BSS)));
+}
+
+/*
+ * search a symbol table with name and type
+ * fp(in,out): last found text file name symbol entry
+ */
+private struct nlist *
+aout_db_search_name(sp, ep, name, type, fp)
+ register struct nlist *sp;
+ struct nlist *ep;
+ char *name;
+ int type;
+ struct nlist **fp;
+{
+ struct nlist *file_sp = *fp;
+ struct nlist *found_sp = 0;
+
+ for ( ; sp < ep; sp++) {
+ if (sp->n_type == N_TEXT && aout_db_is_filename(sp->n_un.n_name))
+ *fp = sp;
+ if (type) {
+ if (sp->n_type == type) {
+ if (aout_db_eq_name(sp, name))
+ return(sp);
+ }
+ if (sp->n_type == N_SO)
+ *fp = sp;
+ continue;
+ }
+ if (sp->n_type & N_STAB)
+ continue;
+ if (sp->n_un.n_name && aout_db_eq_name(sp, name)) {
+ /*
+ * In case of qaulified search by a file,
+ * return it immediately with some check.
+ * Otherwise, search external one
+ */
+ if (file_sp) {
+ if ((file_sp == *fp) || (sp->n_type & N_EXT))
+ return(sp);
+ } else if (sp->n_type & N_EXT)
+ return(sp);
+ else
+ found_sp = sp;
+ }
+ }
+ return(found_sp);
+}
+
+/*
+ * search a symbol with file, func and line qualification
+ */
+private db_sym_t
+aout_db_qualified_search(stab, file, sym, line)
+ db_symtab_t *stab;
+ char *file;
+ char *sym;
+ int line;
+{
+ register struct nlist *sp = (struct nlist *)stab->start;
+ struct nlist *ep = (struct nlist *)stab->end;
+ struct nlist *fp = 0;
+ struct nlist *found_sp;
+ unsigned long func_top;
+ boolean_t in_file;
+
+ if (file == 0 && sym == 0)
+ return(0);
+ if (file) {
+ if ((sp = aout_db_search_name(sp, ep, file, N_TEXT, &fp)) == 0)
+ return(0);
+ }
+ if (sym) {
+ sp = aout_db_search_name(sp, ep, sym, (line > 0)? N_FUN: 0, &fp);
+ if (sp == 0)
+ return(0);
+ }
+ if (line > 0) {
+ if (file && !aout_db_eq_name(fp, file))
+ return(0);
+ found_sp = 0;
+ if (sp->n_type == N_FUN) {
+ /*
+ * qualified by function name
+ * search backward because line number entries
+ * for the function are above it in this case.
+ */
+ func_top = sp->n_value;
+ for (sp--; sp >= (struct nlist *)stab->start; sp--) {
+ if (sp->n_type != N_SLINE)
+ continue;
+ if (sp->n_value < func_top)
+ break;
+ if (sp->n_desc <= line) {
+ if (found_sp == 0 || found_sp->n_desc < sp->n_desc)
+ found_sp = sp;
+ if (sp->n_desc == line)
+ break;
+ }
+ }
+ if (sp->n_type != N_SLINE || sp->n_value < func_top)
+ return(0);
+ } else {
+ /*
+ * qualified by only file name
+ * search forward in this case
+ */
+ in_file = TRUE;
+ for (sp++; sp < ep; sp++) {
+ if (sp->n_type == N_TEXT
+ && aout_db_is_filename(sp->n_un.n_name))
+ break; /* enter into another file */
+ if (sp->n_type == N_SOL) {
+ in_file = aout_db_eq_name(sp, file);
+ continue;
+ }
+ if (!in_file || sp->n_type != N_SLINE)
+ continue;
+ if (sp->n_desc <= line) {
+ if (found_sp == 0 || found_sp->n_desc < sp->n_desc)
+ found_sp = sp;
+ if (sp->n_desc == line)
+ break;
+ }
+ }
+ }
+ sp = found_sp;
+ }
+ return((db_sym_t) sp);
+}
+
+/*
+ * lookup symbol by name
+ */
+db_sym_t
+aout_db_lookup(stab, symstr)
+ db_symtab_t *stab;
+ char * symstr;
+{
+ db_sym_t db_sym_parse_and_lookup();
+
+ return(db_sym_parse_and_lookup(aout_db_qualified_search, stab, symstr));
+}
+
+db_sym_t
+aout_db_search_symbol(symtab, off, strategy, diffp)
+ db_symtab_t * symtab;
+ register
+ db_addr_t off;
+ db_strategy_t strategy;
+ db_expr_t *diffp; /* in/out */
+{
+ register unsigned long diff = *diffp;
+ register struct nlist *symp = 0;
+ register struct nlist *sp, *ep;
+
+ sp = (struct nlist *)symtab->start;
+ ep = (struct nlist *)symtab->end;
+
+ for (; sp < ep; sp++) {
+ if (sp->n_un.n_name == 0)
+ continue;
+ if ((sp->n_type & N_STAB) != 0)
+ continue;
+ if (strategy == DB_STGY_XTRN && (sp->n_type & N_EXT) == 0)
+ continue;
+ if (off >= sp->n_value) {
+
+ unsigned int type = sp->n_type;
+
+ if (type == N_FN) continue;
+ if (off - sp->n_value < diff) {
+ diff = off - sp->n_value;
+ symp = sp;
+ if (diff == 0 && (type & N_EXT))
+ break;
+ }
+ else if (off - sp->n_value == diff) {
+ if (symp == 0)
+ symp = sp;
+ else if ((symp->n_type & N_EXT) == 0 &&
+ (type & N_EXT) != 0)
+ symp = sp; /* pick the external symbol */
+ }
+ }
+ }
+ if (symp == 0) {
+ *diffp = off;
+ }
+ else {
+ *diffp = diff;
+ }
+ return ((db_sym_t)symp);
+}
+
+/*
+ * Return the name and value for a symbol.
+ */
+void
+aout_db_symbol_values(sym, namep, valuep)
+ db_sym_t sym;
+ char **namep;
+ db_expr_t *valuep;
+{
+ register struct nlist *sp;
+
+ sp = (struct nlist *)sym;
+ if (namep)
+ *namep = sp->n_un.n_name;
+ if (valuep)
+ *valuep = sp->n_value;
+}
+
+#define X_DB_MAX_DIFF 8 /* maximum allowable diff at the end of line */
+
+/*
+ * search symbol by value
+ */
+private boolean_t
+aout_db_search_by_addr(stab, addr, file, func, line, diff)
+ db_symtab_t *stab;
+ register vm_offset_t addr;
+ char **file;
+ char **func;
+ int *line;
+ unsigned long *diff;
+{
+ register struct nlist *sp;
+ register struct nlist *line_sp, *func_sp, *file_sp, *line_func;
+ register vm_size_t func_diff, line_diff;
+ boolean_t found_line = FALSE;
+ struct nlist *ep = (struct nlist *)stab->end;
+
+ line_sp = func_sp = file_sp = line_func = 0;
+ *file = *func = 0;
+ *line = 0;
+ func_diff = line_diff = ~0;
+ for (sp = (struct nlist *)stab->start; sp < ep; sp++) {
+ switch(sp->n_type) {
+ case N_SLINE:
+ if (sp->n_value <= addr) {
+ if (line_sp == 0 || line_diff >= addr - sp->n_value) {
+ if (line_func)
+ line_func = 0;
+ line_sp = sp;
+ line_diff = addr - sp->n_value;
+ }
+ }
+ if (sp->n_value >= addr && line_sp)
+ found_line = TRUE;
+ continue;
+ case N_FUN:
+ if ((found_line || (line_sp && line_diff < X_DB_MAX_DIFF))
+ && line_func == 0)
+ line_func = sp;
+ continue;
+ case N_SO:
+ if (sp->n_value > addr)
+ continue;
+ if (file_sp == 0 || file_sp->n_value <= sp->n_value)
+ file_sp = sp;
+ continue;
+ case N_TEXT:
+ if (aout_db_is_filename(sp->n_un.n_name)) {
+ if (sp->n_value > addr)
+ continue;
+ if (file_sp == 0 || file_sp->n_value <= sp->n_value)
+ file_sp = sp;
+ } else if (sp->n_value <= addr &&
+ (func_sp == 0 || func_diff > addr - sp->n_value)) {
+ func_sp = sp;
+ func_diff = addr - sp->n_value;
+ }
+ continue;
+ case N_TEXT|N_EXT:
+ if (sp->n_value <= addr &&
+ (func_sp == 0 || func_diff >= addr - sp->n_value)) {
+ func_sp = sp;
+ func_diff = addr - sp->n_value;
+ if (func_diff == 0 && file_sp && func_sp)
+ break;
+ }
+ default:
+ continue;
+ }
+ break;
+ }
+ if (line_sp) {
+ if (line_func == 0 || func_sp == 0
+ || line_func->n_value != func_sp->n_value)
+ line_sp = 0;
+ }
+ if (file_sp) {
+ *diff = addr - file_sp->n_value;
+ *file = file_sp->n_un.n_name;
+ }
+ if (func_sp) {
+ *diff = addr - func_sp->n_value;
+ *func = (func_sp->n_un.n_name[0] == '_')?
+ func_sp->n_un.n_name + 1: func_sp->n_un.n_name;
+ }
+ if (line_sp) {
+ *diff = addr - line_sp->n_value;
+ *line = line_sp->n_desc;
+ }
+ return(file_sp || func_sp || line_sp);
+}
+
+/*
+ * Find filename and lineno within, given the current pc.
+ */
+boolean_t
+aout_db_line_at_pc(stab, sym, file, line, pc)
+ db_symtab_t *stab;
+ db_sym_t sym;
+ char **file;
+ int *line;
+ db_expr_t pc;
+{
+ char *func;
+ unsigned long diff;
+ boolean_t found;
+
+ found = aout_db_search_by_addr(stab,(vm_offset_t)pc,file,&func,line,&diff);
+ return(found && func && *file);
+}
+
+#endif /* DB_NO_AOUT */
+
+#endif MACH_KDB
diff --git a/ddb/db_break.c b/ddb/db_break.c
new file mode 100644
index 00000000..d0ce1fc2
--- /dev/null
+++ b/ddb/db_break.c
@@ -0,0 +1,733 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+#include "mach_kdb.h"
+#if MACH_KDB
+
+
+/*
+ * Breakpoints.
+ */
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+#include <ddb/db_lex.h>
+#include <ddb/db_break.h>
+#include <ddb/db_access.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_command.h>
+#include <ddb/db_task_thread.h>
+
+#define NBREAKPOINTS 100
+#define NTHREAD_LIST (NBREAKPOINTS*3)
+
+struct db_breakpoint db_break_table[NBREAKPOINTS];
+db_breakpoint_t db_next_free_breakpoint = &db_break_table[0];
+db_breakpoint_t db_free_breakpoints = 0;
+db_breakpoint_t db_breakpoint_list = 0;
+
+static struct db_thread_breakpoint db_thread_break_list[NTHREAD_LIST];
+static db_thread_breakpoint_t db_free_thread_break_list = 0;
+static boolean_t db_thread_break_init = FALSE;
+static int db_breakpoint_number = 0;
+
+db_breakpoint_t
+db_breakpoint_alloc()
+{
+ register db_breakpoint_t bkpt;
+
+ if ((bkpt = db_free_breakpoints) != 0) {
+ db_free_breakpoints = bkpt->link;
+ return (bkpt);
+ }
+ if (db_next_free_breakpoint == &db_break_table[NBREAKPOINTS]) {
+ db_printf("All breakpoints used.\n");
+ return (0);
+ }
+ bkpt = db_next_free_breakpoint;
+ db_next_free_breakpoint++;
+
+ return (bkpt);
+}
+
+void
+db_breakpoint_free(bkpt)
+ register db_breakpoint_t bkpt;
+{
+ bkpt->link = db_free_breakpoints;
+ db_free_breakpoints = bkpt;
+}
+
+static int
+db_add_thread_breakpoint(bkpt, task_thd, count, task_bpt)
+ register db_breakpoint_t bkpt;
+ vm_offset_t task_thd;
+ boolean_t task_bpt;
+{
+ register db_thread_breakpoint_t tp;
+
+ if (db_thread_break_init == FALSE) {
+ for (tp = db_thread_break_list;
+ tp < &db_thread_break_list[NTHREAD_LIST-1]; tp++)
+ tp->tb_next = tp+1;
+ tp->tb_next = 0;
+ db_free_thread_break_list = db_thread_break_list;
+ db_thread_break_init = TRUE;
+ }
+ if (db_free_thread_break_list == 0)
+ return (-1);
+ tp = db_free_thread_break_list;
+ db_free_thread_break_list = tp->tb_next;
+ tp->tb_is_task = task_bpt;
+ tp->tb_task_thd = task_thd;
+ tp->tb_count = count;
+ tp->tb_init_count = count;
+ tp->tb_cond = 0;
+ tp->tb_number = ++db_breakpoint_number;
+ tp->tb_next = bkpt->threads;
+ bkpt->threads = tp;
+ return(0);
+}
+
+static int
+db_delete_thread_breakpoint(bkpt, task_thd)
+ register db_breakpoint_t bkpt;
+ vm_offset_t task_thd;
+{
+ register db_thread_breakpoint_t tp;
+ register db_thread_breakpoint_t *tpp;
+ void db_cond_free();
+
+ if (task_thd == 0) {
+ /* delete all the thread-breakpoints */
+
+ for (tpp = &bkpt->threads; (tp = *tpp) != 0; tpp = &tp->tb_next)
+ db_cond_free(tp);
+
+ *tpp = db_free_thread_break_list;
+ db_free_thread_break_list = bkpt->threads;
+ bkpt->threads = 0;
+ return 0;
+ } else {
+ /* delete the specified thread-breakpoint */
+
+ for (tpp = &bkpt->threads; (tp = *tpp) != 0; tpp = &tp->tb_next)
+ if (tp->tb_task_thd == task_thd) {
+ db_cond_free(tp);
+ *tpp = tp->tb_next;
+ tp->tb_next = db_free_thread_break_list;
+ db_free_thread_break_list = tp;
+ return 0;
+ }
+
+ return -1; /* not found */
+ }
+}
+
+static db_thread_breakpoint_t
+db_find_thread_breakpoint(bkpt, thread)
+ db_breakpoint_t bkpt;
+ thread_t thread;
+{
+ register db_thread_breakpoint_t tp;
+ register task_t task = (thread == THREAD_NULL)? TASK_NULL: thread->task;
+
+ for (tp = bkpt->threads; tp; tp = tp->tb_next) {
+ if (tp->tb_is_task) {
+ if (tp->tb_task_thd == (vm_offset_t)task)
+ break;
+ continue;
+ }
+ if (tp->tb_task_thd == (vm_offset_t)thread || tp->tb_task_thd == 0)
+ break;
+ }
+ return(tp);
+}
+
+db_thread_breakpoint_t
+db_find_thread_breakpoint_here(task, addr)
+ task_t task;
+ db_addr_t addr;
+{
+ db_breakpoint_t bkpt;
+
+ bkpt = db_find_breakpoint(task, (db_addr_t)addr);
+ if (bkpt == 0)
+ return(0);
+ return(db_find_thread_breakpoint(bkpt, current_thread()));
+}
+
+db_thread_breakpoint_t
+db_find_breakpoint_number(num, bkptp)
+ int num;
+ db_breakpoint_t *bkptp;
+{
+ register db_thread_breakpoint_t tp;
+ register db_breakpoint_t bkpt;
+
+ for (bkpt = db_breakpoint_list; bkpt != 0; bkpt = bkpt->link) {
+ for (tp = bkpt->threads; tp; tp = tp->tb_next) {
+ if (tp->tb_number == num) {
+ if (bkptp)
+ *bkptp = bkpt;
+ return(tp);
+ }
+ }
+ }
+ return(0);
+}
+
+static void
+db_force_delete_breakpoint(bkpt, task_thd, is_task)
+ db_breakpoint_t bkpt;
+ vm_offset_t task_thd;
+ boolean_t is_task;
+{
+ db_printf("deleted a stale breakpoint at ");
+ if (bkpt->task == TASK_NULL || db_lookup_task(bkpt->task) >= 0)
+ db_task_printsym(bkpt->address, DB_STGY_PROC, bkpt->task);
+ else
+ db_printf("%#X", bkpt->address);
+ if (bkpt->task)
+ db_printf(" in task %X", bkpt->task);
+ if (task_thd)
+ db_printf(" for %s %X", (is_task)? "task": "thread", task_thd);
+ db_printf("\n");
+ db_delete_thread_breakpoint(bkpt, task_thd);
+}
+
+void
+db_check_breakpoint_valid()
+{
+ register db_thread_breakpoint_t tbp, tbp_next;
+ register db_breakpoint_t bkpt, *bkptp;
+
+ bkptp = &db_breakpoint_list;
+ for (bkpt = *bkptp; bkpt; bkpt = *bkptp) {
+ if (bkpt->task != TASK_NULL) {
+ if (db_lookup_task(bkpt->task) < 0) {
+ db_force_delete_breakpoint(bkpt, 0, FALSE);
+ *bkptp = bkpt->link;
+ db_breakpoint_free(bkpt);
+ continue;
+ }
+ } else {
+ for (tbp = bkpt->threads; tbp; tbp = tbp_next) {
+ tbp_next = tbp->tb_next;
+ if (tbp->tb_task_thd == 0)
+ continue;
+ if ((tbp->tb_is_task &&
+ db_lookup_task((task_t)(tbp->tb_task_thd)) < 0) ||
+ (!tbp->tb_is_task &&
+ db_lookup_thread((thread_t)(tbp->tb_task_thd)) < 0)) {
+ db_force_delete_breakpoint(bkpt,
+ tbp->tb_task_thd, tbp->tb_is_task);
+ }
+ }
+ if (bkpt->threads == 0) {
+ db_put_task_value(bkpt->address, BKPT_SIZE,
+ bkpt->bkpt_inst, bkpt->task);
+ *bkptp = bkpt->link;
+ db_breakpoint_free(bkpt);
+ continue;
+ }
+ }
+ bkptp = &bkpt->link;
+ }
+}
+
+void
+db_set_breakpoint(task, addr, count, thread, task_bpt)
+ task_t task;
+ db_addr_t addr;
+ int count;
+ thread_t thread;
+ boolean_t task_bpt;
+{
+ register db_breakpoint_t bkpt;
+ db_breakpoint_t alloc_bkpt = 0;
+ vm_offset_t task_thd;
+
+ bkpt = db_find_breakpoint(task, addr);
+ if (bkpt) {
+ if (thread == THREAD_NULL
+ || db_find_thread_breakpoint(bkpt, thread)) {
+ db_printf("Already set.\n");
+ return;
+ }
+ } else {
+ if (!DB_CHECK_ACCESS(addr, BKPT_SIZE, task)) {
+ db_printf("Cannot set break point at %X\n", addr);
+ return;
+ }
+ alloc_bkpt = bkpt = db_breakpoint_alloc();
+ if (bkpt == 0) {
+ db_printf("Too many breakpoints.\n");
+ return;
+ }
+ bkpt->task = task;
+ bkpt->flags = (task && thread == THREAD_NULL)?
+ (BKPT_USR_GLOBAL|BKPT_1ST_SET): 0;
+ bkpt->address = addr;
+ bkpt->threads = 0;
+ }
+ if (db_breakpoint_list == 0)
+ db_breakpoint_number = 0;
+ task_thd = (task_bpt)? (vm_offset_t)(thread->task): (vm_offset_t)thread;
+ if (db_add_thread_breakpoint(bkpt, task_thd, count, task_bpt) < 0) {
+ if (alloc_bkpt)
+ db_breakpoint_free(alloc_bkpt);
+ db_printf("Too many thread_breakpoints.\n");
+ } else {
+ db_printf("set breakpoint #%d\n", db_breakpoint_number);
+ if (alloc_bkpt) {
+ bkpt->link = db_breakpoint_list;
+ db_breakpoint_list = bkpt;
+ }
+ }
+}
+
+void
+db_delete_breakpoint(task, addr, task_thd)
+ task_t task;
+ db_addr_t addr;
+ vm_offset_t task_thd;
+{
+ register db_breakpoint_t bkpt;
+ register db_breakpoint_t *prev;
+
+ for (prev = &db_breakpoint_list; (bkpt = *prev) != 0;
+ prev = &bkpt->link) {
+ if ((bkpt->task == task
+ || (task != TASK_NULL && (bkpt->flags & BKPT_USR_GLOBAL)))
+ && bkpt->address == addr)
+ break;
+ }
+ if (bkpt && (bkpt->flags & BKPT_SET_IN_MEM)) {
+ db_printf("cannot delete it now.\n");
+ return;
+ }
+ if (bkpt == 0
+ || db_delete_thread_breakpoint(bkpt, task_thd) < 0) {
+ db_printf("Not set.\n");
+ return;
+ }
+ if (bkpt->threads == 0) {
+ *prev = bkpt->link;
+ db_breakpoint_free(bkpt);
+ }
+}
+
+db_breakpoint_t
+db_find_breakpoint(task, addr)
+ task_t task;
+ db_addr_t addr;
+{
+ register db_breakpoint_t bkpt;
+
+ for (bkpt = db_breakpoint_list; bkpt != 0; bkpt = bkpt->link) {
+ if ((bkpt->task == task
+ || (task != TASK_NULL && (bkpt->flags & BKPT_USR_GLOBAL)))
+ && bkpt->address == addr)
+ return (bkpt);
+ }
+ return (0);
+}
+
+boolean_t
+db_find_breakpoint_here(task, addr)
+ task_t task;
+ db_addr_t addr;
+{
+ register db_breakpoint_t bkpt;
+
+ for (bkpt = db_breakpoint_list; bkpt != 0; bkpt = bkpt->link) {
+ if ((bkpt->task == task
+ || (task != TASK_NULL && (bkpt->flags & BKPT_USR_GLOBAL)))
+ && bkpt->address == addr)
+ return(TRUE);
+ if ((bkpt->flags & BKPT_USR_GLOBAL) == 0 &&
+ DB_PHYS_EQ(task, (vm_offset_t)addr, bkpt->task, (vm_offset_t)bkpt->address))
+ return (TRUE);
+ }
+ return(FALSE);
+}
+
+boolean_t db_breakpoints_inserted = TRUE;
+
+void
+db_set_breakpoints()
+{
+ register db_breakpoint_t bkpt;
+ register task_t task;
+ db_expr_t inst;
+ task_t cur_task;
+
+ cur_task = (current_thread())? current_thread()->task: TASK_NULL;
+ if (!db_breakpoints_inserted) {
+ for (bkpt = db_breakpoint_list; bkpt != 0; bkpt = bkpt->link) {
+ if (bkpt->flags & BKPT_SET_IN_MEM)
+ continue;
+ task = bkpt->task;
+ if (bkpt->flags & BKPT_USR_GLOBAL) {
+ if ((bkpt->flags & BKPT_1ST_SET) == 0) {
+ if (cur_task == TASK_NULL)
+ continue;
+ task = cur_task;
+ } else
+ bkpt->flags &= ~BKPT_1ST_SET;
+ }
+ if (DB_CHECK_ACCESS(bkpt->address, BKPT_SIZE, task)) {
+ inst = db_get_task_value(bkpt->address, BKPT_SIZE, FALSE,
+ task);
+ if (inst == BKPT_SET(inst))
+ continue;
+ bkpt->bkpt_inst = inst;
+ db_put_task_value(bkpt->address,
+ BKPT_SIZE,
+ BKPT_SET(bkpt->bkpt_inst), task);
+ bkpt->flags |= BKPT_SET_IN_MEM;
+ } else {
+ db_printf("Warning: cannot set breakpoint at %X ",
+ bkpt->address);
+ if (task)
+ db_printf("in task %X\n", task);
+ else
+ db_printf("in kernel space\n");
+ }
+ }
+ db_breakpoints_inserted = TRUE;
+ }
+}
+
+void
+db_clear_breakpoints()
+{
+ register db_breakpoint_t bkpt, *bkptp;
+ register task_t task;
+ task_t cur_task;
+ db_expr_t inst;
+
+ cur_task = (current_thread())? current_thread()->task: TASK_NULL;
+ if (db_breakpoints_inserted) {
+ bkptp = &db_breakpoint_list;
+ for (bkpt = *bkptp; bkpt; bkpt = *bkptp) {
+ task = bkpt->task;
+ if (bkpt->flags & BKPT_USR_GLOBAL) {
+ if (cur_task == TASK_NULL) {
+ bkptp = &bkpt->link;
+ continue;
+ }
+ task = cur_task;
+ }
+ if ((bkpt->flags & BKPT_SET_IN_MEM)
+ && DB_CHECK_ACCESS(bkpt->address, BKPT_SIZE, task)) {
+ inst = db_get_task_value(bkpt->address, BKPT_SIZE, FALSE,
+ task);
+ if (inst != BKPT_SET(inst)) {
+ if (bkpt->flags & BKPT_USR_GLOBAL) {
+ bkptp = &bkpt->link;
+ continue;
+ }
+ db_force_delete_breakpoint(bkpt, 0, FALSE);
+ *bkptp = bkpt->link;
+ db_breakpoint_free(bkpt);
+ continue;
+ }
+ db_put_task_value(bkpt->address, BKPT_SIZE,
+ bkpt->bkpt_inst, task);
+ bkpt->flags &= ~BKPT_SET_IN_MEM;
+ }
+ bkptp = &bkpt->link;
+ }
+ db_breakpoints_inserted = FALSE;
+ }
+}
+
+/*
+ * Set a temporary breakpoint.
+ * The instruction is changed immediately,
+ * so the breakpoint does not have to be on the breakpoint list.
+ */
+db_breakpoint_t
+db_set_temp_breakpoint(task, addr)
+ task_t task;
+ db_addr_t addr;
+{
+ register db_breakpoint_t bkpt;
+
+ bkpt = db_breakpoint_alloc();
+ if (bkpt == 0) {
+ db_printf("Too many breakpoints.\n");
+ return 0;
+ }
+ bkpt->task = task;
+ bkpt->address = addr;
+ bkpt->flags = BKPT_TEMP;
+ bkpt->threads = 0;
+ if (db_add_thread_breakpoint(bkpt, 0, 1, FALSE) < 0) {
+ if (bkpt)
+ db_breakpoint_free(bkpt);
+ db_printf("Too many thread_breakpoints.\n");
+ return 0;
+ }
+ bkpt->bkpt_inst = db_get_task_value(bkpt->address, BKPT_SIZE,
+ FALSE, task);
+ db_put_task_value(bkpt->address, BKPT_SIZE,
+ BKPT_SET(bkpt->bkpt_inst), task);
+ return bkpt;
+}
+
+void
+db_delete_temp_breakpoint(task, bkpt)
+ task_t task;
+ db_breakpoint_t bkpt;
+{
+ db_put_task_value(bkpt->address, BKPT_SIZE, bkpt->bkpt_inst, task);
+ db_delete_thread_breakpoint(bkpt, 0);
+ db_breakpoint_free(bkpt);
+}
+
+/*
+ * List breakpoints.
+ */
+void
+db_list_breakpoints()
+{
+ register db_breakpoint_t bkpt;
+
+ if (db_breakpoint_list == 0) {
+ db_printf("No breakpoints set\n");
+ return;
+ }
+
+ db_printf(" No Space Thread Cnt Address(Cond)\n");
+ for (bkpt = db_breakpoint_list;
+ bkpt != 0;
+ bkpt = bkpt->link)
+ {
+ register db_thread_breakpoint_t tp;
+ int task_id;
+ int thread_id;
+
+ if (bkpt->threads) {
+ for (tp = bkpt->threads; tp; tp = tp->tb_next) {
+ db_printf("%3d ", tp->tb_number);
+ if (bkpt->flags & BKPT_USR_GLOBAL)
+ db_printf("user ");
+ else if (bkpt->task == TASK_NULL)
+ db_printf("kernel ");
+ else if ((task_id = db_lookup_task(bkpt->task)) < 0)
+ db_printf("%0*X ", 2*sizeof(vm_offset_t), bkpt->task);
+ else
+ db_printf("task%-3d ", task_id);
+ if (tp->tb_task_thd == 0) {
+ db_printf("all ");
+ } else {
+ if (tp->tb_is_task) {
+ task_id = db_lookup_task((task_t)(tp->tb_task_thd));
+ if (task_id < 0)
+ db_printf("%0*X ", 2*sizeof(vm_offset_t),
+ tp->tb_task_thd);
+ else
+ db_printf("task%03d ", task_id);
+ } else {
+ thread_t thd = (thread_t)(tp->tb_task_thd);
+ task_id = db_lookup_task(thd->task);
+ thread_id = db_lookup_task_thread(thd->task, thd);
+ if (task_id < 0 || thread_id < 0)
+ db_printf("%0*X ", 2*sizeof(vm_offset_t),
+ tp->tb_task_thd);
+ else
+ db_printf("task%03d.%-3d ", task_id, thread_id);
+ }
+ }
+ db_printf("%3d ", tp->tb_init_count);
+ db_task_printsym(bkpt->address, DB_STGY_PROC, bkpt->task);
+ if (tp->tb_cond > 0) {
+ db_printf("(");
+ db_cond_print(tp);
+ db_printf(")");
+ }
+ db_printf("\n");
+ }
+ } else {
+ if (bkpt->task == TASK_NULL)
+ db_printf(" ? kernel ");
+ else
+ db_printf("%*X ", 2*sizeof(vm_offset_t), bkpt->task);
+ db_printf("(?) ");
+ db_task_printsym(bkpt->address, DB_STGY_PROC, bkpt->task);
+ db_printf("\n");
+ }
+ }
+}
+
+/* Delete breakpoint */
+/*ARGSUSED*/
+void
+db_delete_cmd()
+{
+ register n;
+ thread_t thread;
+ vm_offset_t task_thd;
+ boolean_t user_global = FALSE;
+ boolean_t task_bpt = FALSE;
+ boolean_t user_space = FALSE;
+ boolean_t thd_bpt = FALSE;
+ db_expr_t addr;
+ int t;
+
+ t = db_read_token();
+ if (t == tSLASH) {
+ t = db_read_token();
+ if (t != tIDENT) {
+ db_printf("Bad modifier \"%s\"\n", db_tok_string);
+ db_error(0);
+ }
+ user_global = db_option(db_tok_string, 'U');
+ user_space = (user_global)? TRUE: db_option(db_tok_string, 'u');
+ task_bpt = db_option(db_tok_string, 'T');
+ thd_bpt = db_option(db_tok_string, 't');
+ if (task_bpt && user_global)
+ db_error("Cannot specify both 'T' and 'U' option\n");
+ t = db_read_token();
+ }
+ if (t == tHASH) {
+ db_thread_breakpoint_t tbp;
+ db_breakpoint_t bkpt;
+
+ if (db_read_token() != tNUMBER) {
+ db_printf("Bad break point number #%s\n", db_tok_string);
+ db_error(0);
+ }
+ if ((tbp = db_find_breakpoint_number(db_tok_number, &bkpt)) == 0) {
+ db_printf("No such break point #%d\n", db_tok_number);
+ db_error(0);
+ }
+ db_delete_breakpoint(bkpt->task, bkpt->address, tbp->tb_task_thd);
+ return;
+ }
+ db_unread_token(t);
+ if (!db_expression(&addr)) {
+ /*
+ * We attempt to pick up the user_space indication from db_dot,
+ * so that a plain "d" always works.
+ */
+ addr = (db_expr_t)db_dot;
+ if (!user_space && !DB_VALID_ADDRESS((vm_offset_t)addr, FALSE))
+ user_space = TRUE;
+ }
+ if (!DB_VALID_ADDRESS((vm_offset_t) addr, user_space)) {
+ db_printf("Address %#X is not in %s space\n", addr,
+ (user_space)? "user": "kernel");
+ db_error(0);
+ }
+ if (thd_bpt || task_bpt) {
+ for (n = 0; db_get_next_thread(&thread, n); n++) {
+ if (thread == THREAD_NULL)
+ db_error("No active thread\n");
+ if (task_bpt) {
+ if (thread->task == TASK_NULL)
+ db_error("No task\n");
+ task_thd = (vm_offset_t) (thread->task);
+ } else
+ task_thd = (user_global)? 0: (vm_offset_t) thread;
+ db_delete_breakpoint(db_target_space(thread, user_space),
+ (db_addr_t)addr, task_thd);
+ }
+ } else {
+ db_delete_breakpoint(db_target_space(THREAD_NULL, user_space),
+ (db_addr_t)addr, 0);
+ }
+}
+
+/* Set breakpoint with skip count */
+/*ARGSUSED*/
+void
+db_breakpoint_cmd(addr, have_addr, count, modif)
+ db_expr_t addr;
+ int have_addr;
+ db_expr_t count;
+ char * modif;
+{
+ register n;
+ thread_t thread;
+ boolean_t user_global = db_option(modif, 'U');
+ boolean_t task_bpt = db_option(modif, 'T');
+ boolean_t user_space;
+
+ if (count == -1)
+ count = 1;
+
+ if (!task_bpt && db_option(modif,'t'))
+ task_bpt = TRUE;
+
+ if (task_bpt && user_global)
+ db_error("Cannot specify both 'T' and 'U'\n");
+ user_space = (user_global)? TRUE: db_option(modif, 'u');
+ if (user_space && db_access_level < DB_ACCESS_CURRENT)
+ db_error("User space break point is not supported\n");
+ if (!task_bpt && !DB_VALID_ADDRESS((vm_offset_t)addr, user_space)) {
+ /* if the user has explicitly specified user space,
+ do not insert a breakpoint into the kernel */
+ if (user_space)
+ db_error("Invalid user space address\n");
+ user_space = TRUE;
+ db_printf("%#X is in user space\n", addr);
+ }
+ if (db_option(modif, 't') || task_bpt) {
+ for (n = 0; db_get_next_thread(&thread, n); n++) {
+ if (thread == THREAD_NULL)
+ db_error("No active thread\n");
+ if (task_bpt && thread->task == TASK_NULL)
+ db_error("No task\n");
+ if (db_access_level <= DB_ACCESS_CURRENT && user_space
+ && thread->task != db_current_task())
+ db_error("Cannot set break point in inactive user space\n");
+ db_set_breakpoint(db_target_space(thread, user_space),
+ (db_addr_t)addr, count,
+ (user_global)? THREAD_NULL: thread,
+ task_bpt);
+ }
+ } else {
+ db_set_breakpoint(db_target_space(THREAD_NULL, user_space),
+ (db_addr_t)addr,
+ count, THREAD_NULL, FALSE);
+ }
+}
+
+/* list breakpoints */
+void
+db_listbreak_cmd()
+{
+ db_list_breakpoints();
+}
+
+#endif MACH_KDB
diff --git a/ddb/db_break.h b/ddb/db_break.h
new file mode 100644
index 00000000..4d4bd2cf
--- /dev/null
+++ b/ddb/db_break.h
@@ -0,0 +1,86 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+#ifndef _DDB_DB_BREAK_H_
+#define _DDB_DB_BREAK_H_
+
+#include <machine/db_machdep.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <mach/boolean.h>
+
+/*
+ * thread list at the same breakpoint address
+ */
+struct db_thread_breakpoint {
+ vm_offset_t tb_task_thd; /* target task or thread */
+ boolean_t tb_is_task; /* task qualified */
+ short tb_number; /* breakpoint number */
+ short tb_init_count; /* skip count(initial value) */
+ short tb_count; /* current skip count */
+ short tb_cond; /* break condition */
+ struct db_thread_breakpoint *tb_next; /* next chain */
+};
+
+typedef struct db_thread_breakpoint *db_thread_breakpoint_t;
+
+/*
+ * Breakpoint.
+ */
+
+struct db_breakpoint {
+ task_t task; /* target task */
+ db_addr_t address; /* set here */
+ db_thread_breakpoint_t threads; /* thread */
+ int flags; /* flags: */
+#define BKPT_SINGLE_STEP 0x2 /* to simulate single step */
+#define BKPT_TEMP 0x4 /* temporary */
+#define BKPT_USR_GLOBAL 0x8 /* global user space break point */
+#define BKPT_SET_IN_MEM 0x10 /* break point is set in memory */
+#define BKPT_1ST_SET 0x20 /* 1st time set of user global bkpt */
+ vm_size_t bkpt_inst; /* saved instruction at bkpt */
+ struct db_breakpoint *link; /* link in in-use or free chain */
+};
+
+typedef struct db_breakpoint *db_breakpoint_t;
+
+extern db_breakpoint_t db_find_breakpoint( task_t task, db_addr_t addr);
+extern boolean_t db_find_breakpoint_here( task_t task, db_addr_t addr);
+extern void db_set_breakpoints();
+extern void db_clear_breakpoints();
+extern db_thread_breakpoint_t db_find_thread_breakpoint_here
+ ( task_t task, db_addr_t addr );
+extern db_thread_breakpoint_t db_find_breakpoint_number
+ ( int num, db_breakpoint_t *bkptp);
+
+extern db_breakpoint_t db_set_temp_breakpoint( task_t task, db_addr_t addr);
+extern void db_delete_temp_breakpoint
+ ( task_t task, db_breakpoint_t bkpt);
+
+#endif _DDB_DB_BREAK_H_
diff --git a/ddb/db_command.c b/ddb/db_command.c
new file mode 100644
index 00000000..50a02bd4
--- /dev/null
+++ b/ddb/db_command.c
@@ -0,0 +1,597 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#include "mach_kdb.h"
+#if MACH_KDB
+
+/*
+ * Command dispatcher.
+ */
+#include <cpus.h>
+#include <norma_ipc.h>
+#include <norma_vm.h>
+
+#include <mach/boolean.h>
+#include <kern/strings.h>
+#include <machine/db_machdep.h>
+
+#include <ddb/db_lex.h>
+#include <ddb/db_output.h>
+#include <ddb/db_command.h>
+#include <ddb/db_task_thread.h>
+
+#include <machine/setjmp.h>
+#include <kern/thread.h>
+#include <ipc/ipc_pset.h> /* 4proto */
+#include <ipc/ipc_port.h> /* 4proto */
+
+
+
+/*
+ * Exported global variables
+ */
+boolean_t db_cmd_loop_done;
+jmp_buf_t *db_recover = 0;
+db_addr_t db_dot;
+db_addr_t db_last_addr;
+db_addr_t db_prev;
+db_addr_t db_next;
+
+/*
+ * if 'ed' style: 'dot' is set at start of last item printed,
+ * and '+' points to next line.
+ * Otherwise: 'dot' points to next item, '..' points to last.
+ */
+boolean_t db_ed_style = TRUE;
+
+/*
+ * Results of command search.
+ */
+#define CMD_UNIQUE 0
+#define CMD_FOUND 1
+#define CMD_NONE 2
+#define CMD_AMBIGUOUS 3
+#define CMD_HELP 4
+
+/*
+ * Search for command prefix.
+ */
+int
+db_cmd_search(name, table, cmdp)
+ char * name;
+ struct db_command *table;
+ struct db_command **cmdp; /* out */
+{
+ struct db_command *cmd;
+ int result = CMD_NONE;
+
+ for (cmd = table; cmd->name != 0; cmd++) {
+ register char *lp;
+ register char *rp;
+ register int c;
+
+ lp = name;
+ rp = cmd->name;
+ while ((c = *lp) == *rp) {
+ if (c == 0) {
+ /* complete match */
+ *cmdp = cmd;
+ return (CMD_UNIQUE);
+ }
+ lp++;
+ rp++;
+ }
+ if (c == 0) {
+ /* end of name, not end of command -
+ partial match */
+ if (result == CMD_FOUND) {
+ result = CMD_AMBIGUOUS;
+ /* but keep looking for a full match -
+ this lets us match single letters */
+ }
+ else {
+ *cmdp = cmd;
+ result = CMD_FOUND;
+ }
+ }
+ }
+ if (result == CMD_NONE) {
+ /* check for 'help' */
+ if (!strncmp(name, "help", strlen(name)))
+ result = CMD_HELP;
+ }
+ return (result);
+}
+
+void
+db_cmd_list(table)
+ struct db_command *table;
+{
+ register struct db_command *cmd;
+
+ for (cmd = table; cmd->name != 0; cmd++) {
+ db_printf("%-12s", cmd->name);
+ db_end_line();
+ }
+}
+
+void
+db_command(last_cmdp, cmd_table)
+ struct db_command **last_cmdp; /* IN_OUT */
+ struct db_command *cmd_table;
+{
+ struct db_command *cmd;
+ int t;
+ char modif[TOK_STRING_SIZE];
+ db_expr_t addr, count;
+ boolean_t have_addr = FALSE;
+ int result;
+
+ t = db_read_token();
+ if (t == tEOL || t == tSEMI_COLON) {
+ /* empty line repeats last command, at 'next' */
+ cmd = *last_cmdp;
+ addr = (db_expr_t)db_next;
+ have_addr = FALSE;
+ count = 1;
+ modif[0] = '\0';
+ if (t == tSEMI_COLON)
+ db_unread_token(t);
+ }
+ else if (t == tEXCL) {
+ void db_fncall();
+ db_fncall();
+ return;
+ }
+ else if (t != tIDENT) {
+ db_printf("?\n");
+ db_flush_lex();
+ return;
+ }
+ else {
+ /*
+ * Search for command
+ */
+ while (cmd_table) {
+ result = db_cmd_search(db_tok_string,
+ cmd_table,
+ &cmd);
+ switch (result) {
+ case CMD_NONE:
+ if (db_exec_macro(db_tok_string) == 0)
+ return;
+ db_printf("No such command \"%s\"\n", db_tok_string);
+ db_flush_lex();
+ return;
+ case CMD_AMBIGUOUS:
+ db_printf("Ambiguous\n");
+ db_flush_lex();
+ return;
+ case CMD_HELP:
+ db_cmd_list(cmd_table);
+ db_flush_lex();
+ return;
+ default:
+ break;
+ }
+ if ((cmd_table = cmd->more) != 0) {
+ t = db_read_token();
+ if (t != tIDENT) {
+ db_cmd_list(cmd_table);
+ db_flush_lex();
+ return;
+ }
+ }
+ }
+
+ if ((cmd->flag & CS_OWN) == 0) {
+ /*
+ * Standard syntax:
+ * command [/modifier] [addr] [,count]
+ */
+ t = db_read_token();
+ if (t == tSLASH) {
+ t = db_read_token();
+ if (t != tIDENT) {
+ db_printf("Bad modifier \"/%s\"\n", db_tok_string);
+ db_flush_lex();
+ return;
+ }
+ db_strcpy(modif, db_tok_string);
+ }
+ else {
+ db_unread_token(t);
+ modif[0] = '\0';
+ }
+
+ if (db_expression(&addr)) {
+ db_dot = (db_addr_t) addr;
+ db_last_addr = db_dot;
+ have_addr = TRUE;
+ }
+ else {
+ addr = (db_expr_t) db_dot;
+ have_addr = FALSE;
+ }
+ t = db_read_token();
+ if (t == tCOMMA) {
+ if (!db_expression(&count)) {
+ db_printf("Count missing after ','\n");
+ db_flush_lex();
+ return;
+ }
+ }
+ else {
+ db_unread_token(t);
+ count = -1;
+ }
+ }
+ }
+ *last_cmdp = cmd;
+ if (cmd != 0) {
+ /*
+ * Execute the command.
+ */
+ (*cmd->fcn)(addr, have_addr, count, modif);
+
+ if (cmd->flag & CS_SET_DOT) {
+ /*
+ * If command changes dot, set dot to
+ * previous address displayed (if 'ed' style).
+ */
+ if (db_ed_style) {
+ db_dot = db_prev;
+ }
+ else {
+ db_dot = db_next;
+ }
+ }
+ else {
+ /*
+ * If command does not change dot,
+ * set 'next' location to be the same.
+ */
+ db_next = db_dot;
+ }
+ }
+}
+
+void
+db_command_list(last_cmdp, cmd_table)
+ struct db_command **last_cmdp; /* IN_OUT */
+ struct db_command *cmd_table;
+{
+ void db_skip_to_eol();
+
+ do {
+ db_command(last_cmdp, cmd_table);
+ db_skip_to_eol();
+ } while (db_read_token() == tSEMI_COLON && db_cmd_loop_done == 0);
+}
+
+/*
+ * 'show' commands
+ */
+extern void db_listbreak_cmd();
+extern void db_listwatch_cmd();
+extern void db_show_regs(), db_show_one_thread(), db_show_one_task();
+extern void db_show_all_threads();
+extern void db_show_macro();
+extern void vm_map_print(), vm_object_print(), vm_page_print();
+extern void vm_map_copy_print();
+extern void ipc_port_print(), ipc_pset_print(), db_show_all_slocks();
+extern void ipc_kmsg_print(), ipc_msg_print();
+extern void db_show_port_id();
+void db_show_help();
+#if NORMA_IPC
+extern void netipc_packet_print(), netipc_pcs_print(), db_show_all_uids();
+extern void db_show_all_proxies(), db_show_all_principals();
+extern void db_show_all_uids_verbose();
+#endif NORMA_IPC
+#if NORMA_VM
+extern void xmm_obj_print(), xmm_reply_print();
+#endif NORMA_VM
+
+struct db_command db_show_all_cmds[] = {
+ { "threads", db_show_all_threads, 0, 0 },
+ { "slocks", db_show_all_slocks, 0, 0 },
+#if NORMA_IPC
+ { "uids", db_show_all_uids, 0, 0 },
+ { "proxies", db_show_all_proxies, 0, 0 },
+ { "principals", db_show_all_principals, 0, 0 },
+ { "vuids", db_show_all_uids_verbose, 0, 0 },
+#endif NORMA_IPC
+ { (char *)0 }
+};
+
+struct db_command db_show_cmds[] = {
+ { "all", 0, 0, db_show_all_cmds },
+ { "registers", db_show_regs, 0, 0 },
+ { "breaks", db_listbreak_cmd, 0, 0 },
+ { "watches", db_listwatch_cmd, 0, 0 },
+ { "thread", db_show_one_thread, 0, 0 },
+ { "task", db_show_one_task, 0, 0 },
+ { "macro", db_show_macro, CS_OWN, 0 },
+ { "map", vm_map_print, 0, 0 },
+ { "object", vm_object_print, 0, 0 },
+ { "page", vm_page_print, 0, 0 },
+ { "copy", vm_map_copy_print, 0, 0 },
+ { "port", ipc_port_print, 0, 0 },
+ { "pset", ipc_pset_print, 0, 0 },
+ { "kmsg", ipc_kmsg_print, 0, 0 },
+ { "msg", ipc_msg_print, 0, 0 },
+ { "ipc_port", db_show_port_id, 0, 0 },
+#if NORMA_IPC
+ { "packet", netipc_packet_print, 0, 0 },
+ { "pcs", netipc_pcs_print, 0, 0 },
+#endif NORMA_IPC
+#if NORMA_VM
+ { "xmm_obj", xmm_obj_print, 0, 0 },
+ { "xmm_reply", xmm_reply_print, 0, 0 },
+#endif NORMA_VM
+ { (char *)0, }
+};
+
+extern void db_print_cmd(), db_examine_cmd(), db_set_cmd();
+extern void db_examine_forward(), db_examine_backward();
+extern void db_search_cmd();
+extern void db_write_cmd();
+extern void db_delete_cmd(), db_breakpoint_cmd();
+extern void db_deletewatch_cmd(), db_watchpoint_cmd();
+extern void db_single_step_cmd(), db_trace_until_call_cmd(),
+ db_trace_until_matching_cmd(), db_continue_cmd();
+extern void db_stack_trace_cmd(), db_cond_cmd();
+void db_help_cmd();
+void db_def_macro_cmd(), db_del_macro_cmd();
+void db_fncall();
+extern void db_reset_cpu();
+
+struct db_command db_command_table[] = {
+#ifdef DB_MACHINE_COMMANDS
+ /* this must be the first entry, if it exists */
+ { "machine", 0, 0, 0},
+#endif
+ { "print", db_print_cmd, CS_OWN, 0 },
+ { "examine", db_examine_cmd, CS_MORE|CS_SET_DOT, 0 },
+ { "x", db_examine_cmd, CS_MORE|CS_SET_DOT, 0 },
+ { "xf", db_examine_forward, CS_SET_DOT, 0 },
+ { "xb", db_examine_backward, CS_SET_DOT, 0 },
+ { "search", db_search_cmd, CS_OWN|CS_SET_DOT, 0 },
+ { "set", db_set_cmd, CS_OWN, 0 },
+ { "write", db_write_cmd, CS_MORE|CS_SET_DOT, 0 },
+ { "w", db_write_cmd, CS_MORE|CS_SET_DOT, 0 },
+ { "delete", db_delete_cmd, CS_OWN, 0 },
+ { "d", db_delete_cmd, CS_OWN, 0 },
+ { "break", db_breakpoint_cmd, CS_MORE, 0 },
+ { "dwatch", db_deletewatch_cmd, CS_MORE, 0 },
+ { "watch", db_watchpoint_cmd, CS_MORE, 0 },
+ { "step", db_single_step_cmd, 0, 0 },
+ { "s", db_single_step_cmd, 0, 0 },
+ { "continue", db_continue_cmd, 0, 0 },
+ { "c", db_continue_cmd, 0, 0 },
+ { "until", db_trace_until_call_cmd,0, 0 },
+ { "next", db_trace_until_matching_cmd,0, 0 },
+ { "match", db_trace_until_matching_cmd,0, 0 },
+ { "trace", db_stack_trace_cmd, 0, 0 },
+ { "cond", db_cond_cmd, CS_OWN, 0 },
+ { "call", db_fncall, CS_OWN, 0 },
+ { "macro", db_def_macro_cmd, CS_OWN, 0 },
+ { "dmacro", db_del_macro_cmd, CS_OWN, 0 },
+ { "show", 0, 0, db_show_cmds },
+ { "reset", db_reset_cpu, 0, 0 },
+ { "reboot", db_reset_cpu, 0, 0 },
+ { (char *)0, }
+};
+
+#ifdef DB_MACHINE_COMMANDS
+
+/* this function should be called to install the machine dependent
+ commands. It should be called before the debugger is enabled */
+void db_machine_commands_install(ptr)
+struct db_command *ptr;
+{
+ db_command_table[0].more = ptr;
+ return;
+}
+
+#endif
+
+
+struct db_command *db_last_command = 0;
+
+void
+db_help_cmd()
+{
+ struct db_command *cmd = db_command_table;
+
+ while (cmd->name != 0) {
+ db_printf("%-12s", cmd->name);
+ db_end_line();
+ cmd++;
+ }
+}
+
+int (*ddb_display)();
+
+void
+db_command_loop()
+{
+ jmp_buf_t db_jmpbuf;
+ jmp_buf_t *prev = db_recover;
+ extern int db_output_line;
+ extern int db_macro_level;
+#if NORMA_IPC
+ extern int _node_self; /* node_self() may not be callable yet */
+#endif NORMA_IPC
+
+ /*
+ * Initialize 'prev' and 'next' to dot.
+ */
+ db_prev = db_dot;
+ db_next = db_dot;
+
+ if (ddb_display)
+ (*ddb_display)();
+
+ db_cmd_loop_done = 0;
+ while (!db_cmd_loop_done) {
+ (void) _setjmp(db_recover = &db_jmpbuf);
+ db_macro_level = 0;
+ if (db_print_position() != 0)
+ db_printf("\n");
+ db_output_line = 0;
+ db_printf("db%s", (db_default_thread)? "t": "");
+#if NORMA_IPC
+ db_printf("%d", _node_self);
+#endif
+#if NCPUS > 1
+ db_printf("{%d}", cpu_number());
+#endif
+ db_printf("> ");
+
+ (void) db_read_line("!!");
+ db_command_list(&db_last_command, db_command_table);
+ }
+
+ db_recover = prev;
+}
+
+boolean_t
+db_exec_cmd_nest(cmd, size)
+ char *cmd;
+ int size;
+{
+ struct db_lex_context lex_context;
+
+ db_cmd_loop_done = 0;
+ if (cmd) {
+ db_save_lex_context(&lex_context);
+ db_switch_input(cmd, size /**OLD, &lex_context OLD**/);
+ }
+ db_command_list(&db_last_command, db_command_table);
+ if (cmd)
+ db_restore_lex_context(&lex_context);
+ return(db_cmd_loop_done == 0);
+}
+
+#ifdef __GNUC__
+extern __volatile__ void _longjmp();
+#endif
+
+void db_error(s)
+ char *s;
+{
+ extern int db_macro_level;
+
+ db_macro_level = 0;
+ if (db_recover) {
+ if (s)
+ db_printf(s);
+ db_flush_lex();
+ _longjmp(db_recover, 1);
+ }
+ else
+ {
+ if (s)
+ db_printf(s);
+ panic("db_error");
+ }
+}
+
+/*
+ * Call random function:
+ * !expr(arg,arg,arg)
+ */
+void
+db_fncall()
+{
+ db_expr_t fn_addr;
+#define MAXARGS 11
+ db_expr_t args[MAXARGS];
+ int nargs = 0;
+ db_expr_t retval;
+ db_expr_t (*func)();
+ int t;
+
+ if (!db_expression(&fn_addr)) {
+ db_printf("Bad function \"%s\"\n", db_tok_string);
+ db_flush_lex();
+ return;
+ }
+ func = (db_expr_t (*) ()) fn_addr;
+
+ t = db_read_token();
+ if (t == tLPAREN) {
+ if (db_expression(&args[0])) {
+ nargs++;
+ while ((t = db_read_token()) == tCOMMA) {
+ if (nargs == MAXARGS) {
+ db_printf("Too many arguments\n");
+ db_flush_lex();
+ return;
+ }
+ if (!db_expression(&args[nargs])) {
+ db_printf("Argument missing\n");
+ db_flush_lex();
+ return;
+ }
+ nargs++;
+ }
+ db_unread_token(t);
+ }
+ if (db_read_token() != tRPAREN) {
+ db_printf("?\n");
+ db_flush_lex();
+ return;
+ }
+ }
+ while (nargs < MAXARGS) {
+ args[nargs++] = 0;
+ }
+
+ retval = (*func)(args[0], args[1], args[2], args[3], args[4],
+ args[5], args[6], args[7], args[8], args[9] );
+ db_printf(" %#N\n", retval);
+}
+
+boolean_t
+db_option(modif, option)
+ char *modif;
+ int option;
+{
+ register char *p;
+
+ for (p = modif; *p; p++)
+ if (*p == option)
+ return(TRUE);
+ return(FALSE);
+}
+
+#endif MACH_KDB
diff --git a/ddb/db_command.h b/ddb/db_command.h
new file mode 100644
index 00000000..60762b21
--- /dev/null
+++ b/ddb/db_command.h
@@ -0,0 +1,70 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+#include "mach_kdb.h"
+#if MACH_KDB
+
+/*
+ * Command loop declarations.
+ */
+
+#include <machine/db_machdep.h>
+#include <machine/setjmp.h>
+
+extern void db_command_loop();
+extern boolean_t db_exec_conditional_cmd();
+extern boolean_t db_option(/* char *, int */);
+
+extern void db_error(/* char * */); /* report error */
+
+extern db_addr_t db_dot; /* current location */
+extern db_addr_t db_last_addr; /* last explicit address typed */
+extern db_addr_t db_prev; /* last address examined
+ or written */
+extern db_addr_t db_next; /* next address to be examined
+ or written */
+extern jmp_buf_t * db_recover; /* error recovery */
+
+extern jmp_buf_t * db_recover; /* error recovery */
+
+/*
+ * Command table
+ */
+struct db_command {
+ char * name; /* command name */
+ void (*fcn)(); /* function to call */
+ int flag; /* extra info: */
+#define CS_OWN 0x1 /* non-standard syntax */
+#define CS_MORE 0x2 /* standard syntax, but may have other
+ words at end */
+#define CS_SET_DOT 0x100 /* set dot after command */
+ struct db_command *more; /* another level of command */
+};
+
+#endif MACH_KDB
diff --git a/ddb/db_cond.c b/ddb/db_cond.c
new file mode 100644
index 00000000..c78f03c1
--- /dev/null
+++ b/ddb/db_cond.c
@@ -0,0 +1,181 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include "mach_kdb.h"
+#if MACH_KDB
+
+#include <machine/db_machdep.h>
+#include <machine/setjmp.h>
+
+#include <ddb/db_lex.h>
+#include <ddb/db_break.h>
+#include <ddb/db_command.h>
+
+
+
+#define DB_MAX_COND 10 /* maximum conditions to be set */
+
+int db_ncond_free = DB_MAX_COND; /* free condition */
+struct db_cond {
+ int c_size; /* size of cond */
+ char c_cond_cmd[DB_LEX_LINE_SIZE]; /* cond & cmd */
+} db_cond[DB_MAX_COND];
+
+void
+db_cond_free(bkpt)
+ db_thread_breakpoint_t bkpt;
+{
+ if (bkpt->tb_cond > 0) {
+ db_cond[bkpt->tb_cond-1].c_size = 0;
+ db_ncond_free++;
+ bkpt->tb_cond = 0;
+ }
+}
+
+boolean_t
+db_cond_check(bkpt)
+ db_thread_breakpoint_t bkpt;
+{
+ register struct db_cond *cp;
+ db_expr_t value;
+ int t;
+ jmp_buf_t db_jmpbuf;
+ extern jmp_buf_t *db_recover;
+
+ if (bkpt->tb_cond <= 0) /* no condition */
+ return(TRUE);
+ db_dot = PC_REGS(DDB_REGS);
+ db_prev = db_dot;
+ db_next = db_dot;
+ if (_setjmp(db_recover = &db_jmpbuf)) {
+ /*
+ * in case of error, return true to enter interactive mode
+ */
+ return(TRUE);
+ }
+
+ /*
+ * switch input, and evalutate condition
+ */
+ cp = &db_cond[bkpt->tb_cond - 1];
+ db_switch_input(cp->c_cond_cmd, cp->c_size);
+ if (!db_expression(&value)) {
+ db_printf("error: condition evaluation error\n");
+ return(TRUE);
+ }
+ if (value == 0 || --(bkpt->tb_count) > 0)
+ return(FALSE);
+
+ /*
+ * execute a command list if exist
+ */
+ bkpt->tb_count = bkpt->tb_init_count;
+ if ((t = db_read_token()) != tEOL) {
+ db_unread_token(t);
+ return(db_exec_cmd_nest(0, 0));
+ }
+ return(TRUE);
+}
+
+void
+db_cond_print(bkpt)
+ db_thread_breakpoint_t bkpt;
+{
+ register char *p, *ep;
+ register struct db_cond *cp;
+
+ if (bkpt->tb_cond <= 0)
+ return;
+ cp = &db_cond[bkpt->tb_cond-1];
+ p = cp->c_cond_cmd;
+ ep = p + cp->c_size;
+ while (p < ep) {
+ if (*p == '\n' || *p == 0)
+ break;
+ db_putchar(*p++);
+ }
+}
+
+void
+db_cond_cmd()
+{
+ register c;
+ register struct db_cond *cp;
+ register char *p;
+ db_expr_t value;
+ db_thread_breakpoint_t bkpt;
+
+ if (db_read_token() != tHASH || db_read_token() != tNUMBER) {
+ db_printf("#<number> expected instead of \"%s\"\n", db_tok_string);
+ db_error(0);
+ return;
+ }
+ if ((bkpt = db_find_breakpoint_number(db_tok_number, 0)) == 0) {
+ db_printf("No such break point #%d\n", db_tok_number);
+ db_error(0);
+ return;
+ }
+ /*
+ * if the break point already has a condition, free it first
+ */
+ if (bkpt->tb_cond > 0) {
+ cp = &db_cond[bkpt->tb_cond - 1];
+ db_cond_free(bkpt);
+ } else {
+ if (db_ncond_free <= 0) {
+ db_error("Too many conditions\n");
+ return;
+ }
+ for (cp = db_cond; cp < &db_cond[DB_MAX_COND]; cp++)
+ if (cp->c_size == 0)
+ break;
+ if (cp >= &db_cond[DB_MAX_COND])
+ panic("bad db_cond_free");
+ }
+ for (c = db_read_char(); c == ' ' || c == '\t'; c = db_read_char());
+ for (p = cp->c_cond_cmd; c >= 0; c = db_read_char())
+ *p++ = c;
+ /*
+ * switch to saved data and call db_expression to check the condition.
+ * If no condition is supplied, db_expression will return false.
+ * In this case, clear previous condition of the break point.
+ * If condition is supplied, set the condition to the permanent area.
+ * Note: db_expression will not return here, if the condition
+ * expression is wrong.
+ */
+ db_switch_input(cp->c_cond_cmd, p - cp->c_cond_cmd);
+ if (!db_expression(&value)) {
+ /* since condition is already freed, do nothing */
+ db_flush_lex();
+ return;
+ }
+ db_flush_lex();
+ db_ncond_free--;
+ cp->c_size = p - cp->c_cond_cmd;
+ bkpt->tb_cond = (cp - db_cond) + 1;
+}
+
+#endif MACH_KDB
diff --git a/ddb/db_examine.c b/ddb/db_examine.c
new file mode 100644
index 00000000..c996fd18
--- /dev/null
+++ b/ddb/db_examine.c
@@ -0,0 +1,506 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+#include "mach_kdb.h"
+#if MACH_KDB
+
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+
+#include <ddb/db_access.h>
+#include <ddb/db_lex.h>
+#include <ddb/db_output.h>
+#include <ddb/db_command.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_task_thread.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <mach/vm_param.h>
+
+#define db_thread_to_task(thread) ((thread)? thread->task: TASK_NULL)
+
+char db_examine_format[TOK_STRING_SIZE] = "x";
+int db_examine_count = 1;
+db_addr_t db_examine_prev_addr = 0;
+thread_t db_examine_thread = THREAD_NULL;
+
+extern db_addr_t db_disasm(db_addr_t pc, boolean_t altform, task_t task);
+ /* instruction disassembler */
+
+void db_examine();/*forwards*/
+void db_strcpy();
+
+/*
+ * Examine (print) data.
+ */
+/*ARGSUSED*/
+void
+db_examine_cmd(addr, have_addr, count, modif)
+ db_expr_t addr;
+ int have_addr;
+ db_expr_t count;
+ char * modif;
+{
+ thread_t thread;
+ boolean_t db_option();
+
+ if (modif[0] != '\0')
+ db_strcpy(db_examine_format, modif);
+
+ if (count == -1)
+ count = 1;
+ db_examine_count = count;
+ if (db_option(modif, 't'))
+ {
+ if (!db_get_next_thread(&thread, 0))
+ return;
+ }
+ else
+ if (db_option(modif,'u'))
+ thread = current_thread();
+ else
+ thread = THREAD_NULL;
+
+ db_examine_thread = thread;
+ db_examine((db_addr_t) addr, db_examine_format, count,
+ db_thread_to_task(thread));
+}
+
+/* ARGSUSED */
+void
+db_examine_forward(addr, have_addr, count, modif)
+ db_expr_t addr;
+ int have_addr;
+ db_expr_t count;
+ char * modif;
+{
+ db_examine(db_next, db_examine_format, db_examine_count,
+ db_thread_to_task(db_examine_thread));
+}
+
+/* ARGSUSED */
+void
+db_examine_backward(addr, have_addr, count, modif)
+ db_expr_t addr;
+ int have_addr;
+ db_expr_t count;
+ char * modif;
+{
+
+ db_examine(db_examine_prev_addr - (db_next - db_examine_prev_addr),
+ db_examine_format, db_examine_count,
+ db_thread_to_task(db_examine_thread));
+}
+
+void
+db_examine(addr, fmt, count, task)
+ register
+ db_addr_t addr;
+ char * fmt; /* format string */
+ int count; /* repeat count */
+ task_t task;
+{
+ int c;
+ db_expr_t value;
+ int size; /* in bytes */
+ int width;
+ char * fp;
+
+ db_examine_prev_addr = addr;
+ while (--count >= 0) {
+ fp = fmt;
+ size = sizeof(int);
+ width = 4*size;
+ while ((c = *fp++) != 0) {
+ switch (c) {
+ case 'b':
+ size = sizeof(char);
+ width = 4*size;
+ break;
+ case 'h':
+ size = sizeof(short);
+ width = 4*size;
+ break;
+ case 'l':
+ size = sizeof(long);
+ width = 4*size;
+ break;
+ case 'a': /* address */
+ case 'A': /* function address */
+ /* always forces a new line */
+ if (db_print_position() != 0)
+ db_printf("\n");
+ db_prev = addr;
+ db_task_printsym(addr,
+ (c == 'a')?DB_STGY_ANY:DB_STGY_PROC,
+ task);
+ db_printf(":\t");
+ break;
+ case 'm':
+ db_next = db_xcdump(addr, size, count+1, task);
+ return;
+ default:
+ if (db_print_position() == 0) {
+ /* If we hit a new symbol, print it */
+ char * name;
+ db_addr_t off;
+
+ db_find_task_sym_and_offset(addr,&name,&off,task);
+ if (off == 0)
+ db_printf("%s:\t", name);
+ else
+ db_printf("\t\t");
+
+ db_prev = addr;
+ }
+
+ switch (c) {
+ case ',': /* skip one unit w/o printing */
+ addr += size;
+ break;
+
+ case 'r': /* signed, current radix */
+ value = db_get_task_value(addr,size,TRUE,task);
+ addr += size;
+ db_printf("%-*R", width, value);
+ break;
+ case 'x': /* unsigned hex */
+ value = db_get_task_value(addr,size,FALSE,task);
+ addr += size;
+ db_printf("%-*X", width, value);
+ break;
+ case 'z': /* signed hex */
+ value = db_get_task_value(addr,size,TRUE,task);
+ addr += size;
+ db_printf("%-*Z", width, value);
+ break;
+ case 'd': /* signed decimal */
+ value = db_get_task_value(addr,size,TRUE,task);
+ addr += size;
+ db_printf("%-*D", width, value);
+ break;
+ case 'U': /* unsigned decimal */
+ value = db_get_task_value(addr,size,FALSE,task);
+ addr += size;
+ db_printf("%-*U", width, value);
+ break;
+ case 'o': /* unsigned octal */
+ value = db_get_task_value(addr,size,FALSE,task);
+ addr += size;
+ db_printf("%-*O", width, value);
+ break;
+ case 'c': /* character */
+ value = db_get_task_value(addr,1,FALSE,task);
+ addr += 1;
+ if (value >= ' ' && value <= '~')
+ db_printf("%c", value);
+ else
+ db_printf("\\%03o", value);
+ break;
+ case 's': /* null-terminated string */
+ for (;;) {
+ value = db_get_task_value(addr,1,FALSE,task);
+ addr += 1;
+ if (value == 0)
+ break;
+ if (value >= ' ' && value <= '~')
+ db_printf("%c", value);
+ else
+ db_printf("\\%03o", value);
+ }
+ break;
+ case 'i': /* instruction */
+ addr = db_disasm(addr, FALSE, task);
+ break;
+ case 'I': /* instruction, alternate form */
+ addr = db_disasm(addr, TRUE, task);
+ break;
+ default:
+ break;
+ }
+ if (db_print_position() != 0)
+ db_end_line();
+ break;
+ }
+ }
+ }
+ db_next = addr;
+}
+
+/*
+ * Print value.
+ */
+char db_print_format = 'x';
+
+/*ARGSUSED*/
+void
+db_print_cmd()
+{
+ db_expr_t value;
+ int t;
+ task_t task = TASK_NULL;
+
+ if ((t = db_read_token()) == tSLASH) {
+ if (db_read_token() != tIDENT) {
+ db_printf("Bad modifier \"/%s\"\n", db_tok_string);
+ db_error(0);
+ /* NOTREACHED */
+ }
+ if (db_tok_string[0])
+ db_print_format = db_tok_string[0];
+ if (db_option(db_tok_string, 't') && db_default_thread)
+ task = db_default_thread->task;
+ } else
+ db_unread_token(t);
+
+ for ( ; ; ) {
+ t = db_read_token();
+ if (t == tSTRING) {
+ db_printf("%s", db_tok_string);
+ continue;
+ }
+ db_unread_token(t);
+ if (!db_expression(&value))
+ break;
+ switch (db_print_format) {
+ case 'a':
+ db_task_printsym((db_addr_t)value, DB_STGY_ANY, task);
+ break;
+ case 'r':
+ db_printf("%*r", 3+2*sizeof(db_expr_t), value);
+ break;
+ case 'x':
+ db_printf("%*x", 2*sizeof(db_expr_t), value);
+ break;
+ case 'z':
+ db_printf("%*z", 2*sizeof(db_expr_t), value);
+ break;
+ case 'd':
+ db_printf("%*d", 3+2*sizeof(db_expr_t), value);
+ break;
+ case 'u':
+ db_printf("%*u", 3+2*sizeof(db_expr_t), value);
+ break;
+ case 'o':
+ db_printf("%o", 4*sizeof(db_expr_t), value);
+ break;
+ case 'c':
+ value = value & 0xFF;
+ if (value >= ' ' && value <= '~')
+ db_printf("%c", value);
+ else
+ db_printf("\\%03o", value);
+ break;
+ default:
+ db_printf("Unknown format %c\n", db_print_format);
+ db_print_format = 'x';
+ db_error(0);
+ }
+ }
+}
+
+void
+db_print_loc_and_inst(loc, task)
+ db_addr_t loc;
+ task_t task;
+{
+ db_task_printsym(loc, DB_STGY_PROC, task);
+ db_printf(":\t");
+ (void) db_disasm(loc, TRUE, task);
+}
+
+void
+db_strcpy(dst, src)
+ register char *dst;
+ register char *src;
+{
+ while (*dst++ = *src++)
+ ;
+}
+
+void db_search(); /*forward*/
+/*
+ * Search for a value in memory.
+ * Syntax: search [/bhl] addr value [mask] [,count] [thread]
+ */
+void
+db_search_cmd()
+{
+ int t;
+ db_addr_t addr;
+ int size = 0;
+ db_expr_t value;
+ db_expr_t mask;
+ db_addr_t count;
+ thread_t thread;
+ boolean_t thread_flag = FALSE;
+ register char *p;
+
+ t = db_read_token();
+ if (t == tSLASH) {
+ t = db_read_token();
+ if (t != tIDENT) {
+ bad_modifier:
+ db_printf("Bad modifier \"/%s\"\n", db_tok_string);
+ db_flush_lex();
+ return;
+ }
+
+ for (p = db_tok_string; *p; p++) {
+ switch(*p) {
+ case 'b':
+ size = sizeof(char);
+ break;
+ case 'h':
+ size = sizeof(short);
+ break;
+ case 'l':
+ size = sizeof(long);
+ break;
+ case 't':
+ thread_flag = TRUE;
+ break;
+ default:
+ goto bad_modifier;
+ }
+ }
+ } else {
+ db_unread_token(t);
+ size = sizeof(int);
+ }
+
+ if (!db_expression(&addr)) {
+ db_printf("Address missing\n");
+ db_flush_lex();
+ return;
+ }
+
+ if (!db_expression(&value)) {
+ db_printf("Value missing\n");
+ db_flush_lex();
+ return;
+ }
+
+ if (!db_expression(&mask))
+ mask = ~0;
+
+ t = db_read_token();
+ if (t == tCOMMA) {
+ if (!db_expression(&count)) {
+ db_printf("Count missing\n");
+ db_flush_lex();
+ return;
+ }
+ } else {
+ db_unread_token(t);
+ count = -1; /* effectively forever */
+ }
+ if (thread_flag) {
+ if (!db_get_next_thread(&thread, 0))
+ return;
+ } else
+ thread = THREAD_NULL;
+
+ db_search(addr, size, value, mask, count, db_thread_to_task(thread));
+}
+
+void
+db_search(addr, size, value, mask, count, task)
+ register
+ db_addr_t addr;
+ int size;
+ db_expr_t value;
+ db_expr_t mask;
+ unsigned int count;
+ task_t task;
+{
+ while (count-- != 0) {
+ db_prev = addr;
+ if ((db_get_task_value(addr,size,FALSE,task) & mask) == value)
+ break;
+ addr += size;
+ }
+ db_next = addr;
+}
+
+#define DB_XCDUMP_NC 16
+
+int
+db_xcdump(addr, size, count, task)
+ db_addr_t addr;
+ int size;
+ int count;
+ task_t task;
+{
+ register i, n;
+ db_expr_t value;
+ int bcount;
+ db_addr_t off;
+ char *name;
+ char data[DB_XCDUMP_NC];
+
+ db_find_task_sym_and_offset(addr, &name, &off, task);
+ for (n = count*size; n > 0; n -= bcount) {
+ db_prev = addr;
+ if (off == 0) {
+ db_printf("%s:\n", name);
+ off = -1;
+ }
+ db_printf("%0*X:%s", 2*sizeof(db_addr_t), addr,
+ (size != 1)? " ": "");
+ bcount = ((n > DB_XCDUMP_NC)? DB_XCDUMP_NC: n);
+ if (trunc_page(addr) != trunc_page(addr+bcount-1)) {
+ db_addr_t next_page_addr = trunc_page(addr+bcount-1);
+ if (!DB_CHECK_ACCESS(next_page_addr, sizeof(int), task))
+ bcount = next_page_addr - addr;
+ }
+ db_read_bytes((char *)addr, bcount, data, task);
+ for (i = 0; i < bcount && off != 0; i += size) {
+ if (i % 4 == 0)
+ db_printf(" ");
+ value = db_get_task_value(addr, size, FALSE, task);
+ db_printf("%0*x ", size*2, value);
+ addr += size;
+ db_find_task_sym_and_offset(addr, &name, &off, task);
+ }
+ db_printf("%*s",
+ ((DB_XCDUMP_NC-i)/size)*(size*2+1)+(DB_XCDUMP_NC-i)/4,
+ "");
+ bcount = i;
+ db_printf("%s*", (size != 1)? " ": "");
+ for (i = 0; i < bcount; i++) {
+ value = data[i];
+ db_printf("%c", (value >= ' ' && value <= '~')? value: '.');
+ }
+ db_printf("*\n");
+ }
+ return(addr);
+}
+
+#endif MACH_KDB
diff --git a/ddb/db_expr.c b/ddb/db_expr.c
new file mode 100644
index 00000000..b9848bb6
--- /dev/null
+++ b/ddb/db_expr.c
@@ -0,0 +1,391 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+#include "mach_kdb.h"
+#if MACH_KDB
+
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+#include <ddb/db_lex.h>
+#include <ddb/db_access.h>
+#include <ddb/db_command.h>
+#include <kern/task.h>
+
+
+boolean_t
+db_term(valuep)
+ db_expr_t *valuep;
+{
+ int t;
+
+ switch(t = db_read_token()) {
+ case tIDENT:
+ if (!db_value_of_name(db_tok_string, valuep)) {
+ db_printf("Symbol \"%s\" not found\n", db_tok_string);
+ db_error(0);
+ /*NOTREACHED*/
+ }
+ return (TRUE);
+ case tNUMBER:
+ *valuep = db_tok_number;
+ return (TRUE);
+ case tDOT:
+ *valuep = (db_expr_t)db_dot;
+ return (TRUE);
+ case tDOTDOT:
+ *valuep = (db_expr_t)db_prev;
+ return (TRUE);
+ case tPLUS:
+ *valuep = (db_expr_t) db_next;
+ return (TRUE);
+ case tQUOTE:
+ *valuep = (db_expr_t)db_last_addr;
+ return (TRUE);
+ case tDOLLAR:
+ if (!db_get_variable(valuep))
+ return (FALSE);
+ return (TRUE);
+ case tLPAREN:
+ if (!db_expression(valuep)) {
+ db_error("Unmached ()s\n");
+ /*NOTREACHED*/
+ }
+ t = db_read_token();
+ if (t != tRPAREN) {
+ db_printf("')' expected at \"%s...\"\n", db_tok_string);
+ db_error(0);
+ /*NOTREACHED*/
+ }
+ return (TRUE);
+ default:
+ db_unread_token(t);
+ return (FALSE);
+ }
+}
+
+int
+db_size_option(modif, u_option, t_option)
+ char *modif;
+ boolean_t *u_option;
+ boolean_t *t_option;
+{
+ register char *p;
+ int size = sizeof(int);
+
+ *u_option = FALSE;
+ *t_option = FALSE;
+ for (p = modif; *p; p++) {
+ switch(*p) {
+ case 'b':
+ size = sizeof(char);
+ break;
+ case 'h':
+ size = sizeof(short);
+ break;
+ case 'l':
+ size = sizeof(long);
+ break;
+ case 'u':
+ *u_option = TRUE;
+ break;
+ case 't':
+ *t_option = TRUE;
+ break;
+ }
+ }
+ return(size);
+}
+
+boolean_t
+db_unary(valuep)
+ db_expr_t *valuep;
+{
+ int t;
+ int size;
+ boolean_t u_opt, t_opt;
+ task_t task;
+ extern task_t db_default_task;
+
+ t = db_read_token();
+ if (t == tMINUS) {
+ if (!db_unary(valuep)) {
+ db_error("Expression syntax error after '-'\n");
+ /*NOTREACHED*/
+ }
+ *valuep = -*valuep;
+ return (TRUE);
+ }
+ if (t == tSTAR) {
+ /* indirection */
+ if (!db_unary(valuep)) {
+ db_error("Expression syntax error after '*'\n");
+ /*NOTREACHED*/
+ }
+ task = TASK_NULL;
+ size = sizeof(db_addr_t);
+ u_opt = FALSE;
+ t = db_read_token();
+ if (t == tIDENT && db_tok_string[0] == ':') {
+ size = db_size_option(&db_tok_string[1], &u_opt, &t_opt);
+ if (t_opt)
+ task = db_default_task;
+ } else
+ db_unread_token(t);
+ *valuep = db_get_task_value((db_addr_t)*valuep, size, !u_opt, task);
+ return (TRUE);
+ }
+ if (t == tEXCL) {
+ if (!db_unary(valuep)) {
+ db_error("Expression syntax error after '!'\n");
+ /*NOTREACHED*/
+ }
+ *valuep = (!(*valuep));
+ return (TRUE);
+ }
+ db_unread_token(t);
+ return (db_term(valuep));
+}
+
+boolean_t
+db_mult_expr(valuep)
+ db_expr_t *valuep;
+{
+ db_expr_t lhs, rhs;
+ int t;
+ char c;
+
+ if (!db_unary(&lhs))
+ return (FALSE);
+
+ t = db_read_token();
+ while (t == tSTAR || t == tSLASH || t == tPCT || t == tHASH
+ || t == tBIT_AND) {
+ c = db_tok_string[0];
+ if (!db_term(&rhs)) {
+ db_printf("Expression syntax error after '%c'\n", c);
+ db_error(0);
+ /*NOTREACHED*/
+ }
+ switch(t) {
+ case tSTAR:
+ lhs *= rhs;
+ break;
+ case tBIT_AND:
+ lhs &= rhs;
+ break;
+ default:
+ if (rhs == 0) {
+ db_error("Divide by 0\n");
+ /*NOTREACHED*/
+ }
+ if (t == tSLASH)
+ lhs /= rhs;
+ else if (t == tPCT)
+ lhs %= rhs;
+ else
+ lhs = ((lhs+rhs-1)/rhs)*rhs;
+ }
+ t = db_read_token();
+ }
+ db_unread_token(t);
+ *valuep = lhs;
+ return (TRUE);
+}
+
+boolean_t
+db_add_expr(valuep)
+ db_expr_t *valuep;
+{
+ db_expr_t lhs, rhs;
+ int t;
+ char c;
+
+ if (!db_mult_expr(&lhs))
+ return (FALSE);
+
+ t = db_read_token();
+ while (t == tPLUS || t == tMINUS || t == tBIT_OR) {
+ c = db_tok_string[0];
+ if (!db_mult_expr(&rhs)) {
+ db_printf("Expression syntax error after '%c'\n", c);
+ db_error(0);
+ /*NOTREACHED*/
+ }
+ if (t == tPLUS)
+ lhs += rhs;
+ else if (t == tMINUS)
+ lhs -= rhs;
+ else
+ lhs |= rhs;
+ t = db_read_token();
+ }
+ db_unread_token(t);
+ *valuep = lhs;
+ return (TRUE);
+}
+
+boolean_t
+db_shift_expr(valuep)
+ db_expr_t *valuep;
+{
+ db_expr_t lhs, rhs;
+ int t;
+
+ if (!db_add_expr(&lhs))
+ return (FALSE);
+
+ t = db_read_token();
+ while (t == tSHIFT_L || t == tSHIFT_R) {
+ if (!db_add_expr(&rhs)) {
+ db_printf("Expression syntax error after \"%s\"\n",
+ (t == tSHIFT_L)? "<<": ">>");
+ db_error(0);
+ /*NOTREACHED*/
+ }
+ if (rhs < 0) {
+ db_error("Negative shift amount\n");
+ /*NOTREACHED*/
+ }
+ if (t == tSHIFT_L)
+ lhs <<= rhs;
+ else {
+ /* Shift right is unsigned */
+ lhs = (natural_t) lhs >> rhs;
+ }
+ t = db_read_token();
+ }
+ db_unread_token(t);
+ *valuep = lhs;
+ return (TRUE);
+}
+
+boolean_t
+db_logical_relation_expr(valuep)
+ db_expr_t *valuep;
+{
+ db_expr_t lhs, rhs;
+ int t;
+ char op[3];
+
+ if (!db_shift_expr(&lhs))
+ return(FALSE);
+
+ t = db_read_token();
+ while (t == tLOG_EQ || t == tLOG_NOT_EQ
+ || t == tGREATER || t == tGREATER_EQ
+ || t == tLESS || t == tLESS_EQ) {
+ op[0] = db_tok_string[0];
+ op[1] = db_tok_string[1];
+ op[2] = 0;
+ if (!db_shift_expr(&rhs)) {
+ db_printf("Expression syntax error after \"%s\"\n", op);
+ db_error(0);
+ /*NOTREACHED*/
+ }
+ switch(t) {
+ case tLOG_EQ:
+ lhs = (lhs == rhs);
+ break;
+ case tLOG_NOT_EQ:
+ lhs = (lhs != rhs);
+ break;
+ case tGREATER:
+ lhs = (lhs > rhs);
+ break;
+ case tGREATER_EQ:
+ lhs = (lhs >= rhs);
+ break;
+ case tLESS:
+ lhs = (lhs < rhs);
+ break;
+ case tLESS_EQ:
+ lhs = (lhs <= rhs);
+ break;
+ }
+ t = db_read_token();
+ }
+ db_unread_token(t);
+ *valuep = lhs;
+ return (TRUE);
+}
+
+boolean_t
+db_logical_and_expr(valuep)
+ db_expr_t *valuep;
+{
+ db_expr_t lhs, rhs;
+ int t;
+
+ if (!db_logical_relation_expr(&lhs))
+ return(FALSE);
+
+ t = db_read_token();
+ while (t == tLOG_AND) {
+ if (!db_logical_relation_expr(&rhs)) {
+ db_error("Expression syntax error after \"&&\"\n");
+ /*NOTREACHED*/
+ }
+ lhs = (lhs && rhs);
+ }
+ db_unread_token(t);
+ *valuep = lhs;
+ return (TRUE);
+}
+
+boolean_t
+db_logical_or_expr(valuep)
+ db_expr_t *valuep;
+{
+ db_expr_t lhs, rhs;
+ int t;
+
+ if (!db_logical_and_expr(&lhs))
+ return(FALSE);
+
+ t = db_read_token();
+ while (t == tLOG_OR) {
+ if (!db_logical_and_expr(&rhs)) {
+ db_error("Expression syntax error after \"||\"\n");
+ /*NOTREACHED*/
+ }
+ lhs = (lhs || rhs);
+ }
+ db_unread_token(t);
+ *valuep = lhs;
+ return (TRUE);
+}
+
+int
+db_expression(valuep)
+ db_expr_t *valuep;
+{
+ return (db_logical_or_expr(valuep));
+}
+
+#endif MACH_KDB
diff --git a/ddb/db_expr.h b/ddb/db_expr.h
new file mode 100644
index 00000000..989b66be
--- /dev/null
+++ b/ddb/db_expr.h
@@ -0,0 +1,26 @@
+/*
+ * (c) Copyright 1992, 1993, 1994, 1995 OPEN SOFTWARE FOUNDATION, INC.
+ * ALL RIGHTS RESERVED
+ */
+/*
+ * OSF RI nmk19b2 5/2/95
+ */
+
+#ifndef _DDB_DB_EXPR_H_
+#define _DDB_DB_EXPR_H_
+
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+
+
+/* Prototypes for functions exported by this module.
+ */
+
+int db_size_option(
+ char *modif,
+ boolean_t *u_option,
+ boolean_t *t_option);
+
+int db_expression(db_expr_t *valuep);
+
+#endif /* !_DDB_DB_EXPR_H_ */
diff --git a/ddb/db_ext_symtab.c b/ddb/db_ext_symtab.c
new file mode 100644
index 00000000..1a8ea8b6
--- /dev/null
+++ b/ddb/db_ext_symtab.c
@@ -0,0 +1,123 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include "mach_kdb.h"
+#if MACH_KDB
+
+#include <mach_debug.h>
+
+#if MACH_DEBUG
+
+#include <mach/mach_types.h> /* vm_address_t */
+#include <mach/std_types.h> /* pointer_t */
+#include <mach/vm_param.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <kern/host.h>
+#include <kern/task.h>
+#include <ddb/db_sym.h>
+
+
+
+/*
+ * Loads a symbol table for an external file into the kernel debugger.
+ * The symbol table data is an array of characters. It is assumed that
+ * the caller and the kernel debugger agree on its format.
+ */
+kern_return_t
+host_load_symbol_table(host, task, name, symtab, symtab_count)
+ host_t host;
+ task_t task;
+ char * name;
+ pointer_t symtab;
+ unsigned int symtab_count;
+{
+ kern_return_t result;
+ vm_offset_t symtab_start;
+ vm_offset_t symtab_end;
+ vm_map_t map;
+ vm_map_copy_t symtab_copy_object;
+
+ if (host == HOST_NULL)
+ return (KERN_INVALID_ARGUMENT);
+
+ /*
+ * Copy the symbol table array into the kernel.
+ * We make a copy of the copy object, and clear
+ * the old one, so that returning error will not
+ * deallocate the data twice.
+ */
+ symtab_copy_object = (vm_map_copy_t) symtab;
+ result = vm_map_copyout(
+ kernel_map,
+ &symtab_start,
+ vm_map_copy_copy(symtab_copy_object));
+ if (result != KERN_SUCCESS)
+ return (result);
+
+ symtab_end = symtab_start + symtab_count;
+
+ /*
+ * Add the symbol table.
+ * Do not keep a reference for the task map. XXX
+ */
+ if (task == TASK_NULL)
+ map = VM_MAP_NULL;
+ else
+ map = task->map;
+ if (!X_db_sym_init((char *)symtab_start,
+ (char *)symtab_end,
+ name,
+ (char *)map))
+ {
+ /*
+ * Not enough room for symbol table - failure.
+ */
+ (void) vm_deallocate(kernel_map,
+ symtab_start,
+ symtab_count);
+ return (KERN_FAILURE);
+ }
+
+ /*
+ * Wire down the symbol table
+ */
+ (void) vm_map_pageable(kernel_map,
+ symtab_start,
+ round_page(symtab_end),
+ VM_PROT_READ|VM_PROT_WRITE);
+
+ /*
+ * Discard the original copy object
+ */
+ vm_map_copy_discard(symtab_copy_object);
+
+ return (KERN_SUCCESS);
+}
+
+#endif /* MACH_DEBUG */
+
+#endif MACH_KDB
diff --git a/ddb/db_input.c b/ddb/db_input.c
new file mode 100644
index 00000000..c175ae15
--- /dev/null
+++ b/ddb/db_input.c
@@ -0,0 +1,378 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+#include "mach_kdb.h"
+#if MACH_KDB
+
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+#include <ddb/db_output.h>
+
+
+
+#ifndef DB_HISTORY_SIZE
+#define DB_HISTORY_SIZE 4000
+#endif DB_HISTORY_SIZE
+
+/*
+ * Character input and editing.
+ */
+
+/*
+ * We don't track output position while editing input,
+ * since input always ends with a new-line. We just
+ * reset the line position at the end.
+ */
+char * db_lbuf_start; /* start of input line buffer */
+char * db_lbuf_end; /* end of input line buffer */
+char * db_lc; /* current character */
+char * db_le; /* one past last character */
+#if DB_HISTORY_SIZE != 0
+char db_history[DB_HISTORY_SIZE]; /* start of history buffer */
+int db_history_size = DB_HISTORY_SIZE;/* size of history buffer */
+char * db_history_curr = db_history; /* start of current line */
+char * db_history_last = db_history; /* start of last line */
+char * db_history_prev = (char *) 0; /* start of previous line */
+#endif
+
+#define CTRL(c) ((c) & 0x1f)
+#define isspace(c) ((c) == ' ' || (c) == '\t')
+#define BLANK ' '
+#define BACKUP '\b'
+
+void
+db_putstring(s, count)
+ char *s;
+ int count;
+{
+ while (--count >= 0)
+ cnputc(*s++);
+}
+
+void
+db_putnchars(c, count)
+ int c;
+ int count;
+{
+ while (--count >= 0)
+ cnputc(c);
+}
+
+/*
+ * Delete N characters, forward or backward
+ */
+#define DEL_FWD 0
+#define DEL_BWD 1
+void
+db_delete(n, bwd)
+ int n;
+ int bwd;
+{
+ register char *p;
+
+ if (bwd) {
+ db_lc -= n;
+ db_putnchars(BACKUP, n);
+ }
+ for (p = db_lc; p < db_le-n; p++) {
+ *p = *(p+n);
+ cnputc(*p);
+ }
+ db_putnchars(BLANK, n);
+ db_putnchars(BACKUP, db_le - db_lc);
+ db_le -= n;
+}
+
+void
+db_delete_line()
+{
+ db_delete(db_le - db_lc, DEL_FWD);
+ db_delete(db_lc - db_lbuf_start, DEL_BWD);
+ db_le = db_lc = db_lbuf_start;
+}
+
+#if DB_HISTORY_SIZE != 0
+#define INC_DB_CURR() \
+ do { \
+ db_history_curr++; \
+ if (db_history_curr > \
+ db_history + db_history_size - 1) \
+ db_history_curr = db_history; \
+ } while (0)
+#define DEC_DB_CURR() \
+ do { \
+ db_history_curr--; \
+ if (db_history_curr < db_history) \
+ db_history_curr = db_history + \
+ db_history_size - 1; \
+ } while (0)
+#endif
+
+/* returns TRUE at end-of-line */
+boolean_t
+db_inputchar(c)
+ int c;
+{
+ switch (c) {
+ case CTRL('b'):
+ /* back up one character */
+ if (db_lc > db_lbuf_start) {
+ cnputc(BACKUP);
+ db_lc--;
+ }
+ break;
+ case CTRL('f'):
+ /* forward one character */
+ if (db_lc < db_le) {
+ cnputc(*db_lc);
+ db_lc++;
+ }
+ break;
+ case CTRL('a'):
+ /* beginning of line */
+ while (db_lc > db_lbuf_start) {
+ cnputc(BACKUP);
+ db_lc--;
+ }
+ break;
+ case CTRL('e'):
+ /* end of line */
+ while (db_lc < db_le) {
+ cnputc(*db_lc);
+ db_lc++;
+ }
+ break;
+ case CTRL('h'):
+ case 0177:
+ /* erase previous character */
+ if (db_lc > db_lbuf_start)
+ db_delete(1, DEL_BWD);
+ break;
+ case CTRL('d'):
+ /* erase next character */
+ if (db_lc < db_le)
+ db_delete(1, DEL_FWD);
+ break;
+ case CTRL('k'):
+ /* delete to end of line */
+ if (db_lc < db_le)
+ db_delete(db_le - db_lc, DEL_FWD);
+ break;
+ case CTRL('u'):
+ /* delete line */
+ db_delete_line();
+ break;
+ case CTRL('t'):
+ /* twiddle last 2 characters */
+ if (db_lc >= db_lbuf_start + 2) {
+ c = db_lc[-2];
+ db_lc[-2] = db_lc[-1];
+ db_lc[-1] = c;
+ cnputc(BACKUP);
+ cnputc(BACKUP);
+ cnputc(db_lc[-2]);
+ cnputc(db_lc[-1]);
+ }
+ break;
+#if DB_HISTORY_SIZE != 0
+ case CTRL('p'):
+ DEC_DB_CURR();
+ while (db_history_curr != db_history_last) {
+ DEC_DB_CURR();
+ if (*db_history_curr == '\0')
+ break;
+ }
+ db_delete_line();
+ if (db_history_curr == db_history_last) {
+ INC_DB_CURR();
+ db_le = db_lc = db_lbuf_start;
+ } else {
+ register char *p;
+ INC_DB_CURR();
+ for (p = db_history_curr, db_le = db_lbuf_start;
+ *p; ) {
+ *db_le++ = *p++;
+ if (p == db_history + db_history_size) {
+ p = db_history;
+ }
+ }
+ db_lc = db_le;
+ }
+ db_putstring(db_lbuf_start, db_le - db_lbuf_start);
+ break;
+ case CTRL('n'):
+ while (db_history_curr != db_history_last) {
+ if (*db_history_curr == '\0')
+ break;
+ INC_DB_CURR();
+ }
+ if (db_history_curr != db_history_last) {
+ INC_DB_CURR();
+ db_delete_line();
+ if (db_history_curr != db_history_last) {
+ register char *p;
+ for (p = db_history_curr,
+ db_le = db_lbuf_start; *p;) {
+ *db_le++ = *p++;
+ if (p == db_history +
+ db_history_size) {
+ p = db_history;
+ }
+ }
+ db_lc = db_le;
+ }
+ db_putstring(db_lbuf_start, db_le - db_lbuf_start);
+ }
+ break;
+#endif
+ case CTRL('r'):
+ db_putstring("^R\n", 3);
+ if (db_le > db_lbuf_start) {
+ db_putstring(db_lbuf_start, db_le - db_lbuf_start);
+ db_putnchars(BACKUP, db_le - db_lc);
+ }
+ break;
+ case '\n':
+ case '\r':
+#if DB_HISTORY_SIZE != 0
+ /*
+ * Check whether current line is the same
+ * as previous saved line. If it is, don`t
+ * save it.
+ */
+ if (db_history_curr == db_history_prev) {
+ register char *pp, *pc;
+
+ /*
+ * Is it the same?
+ */
+ for (pp = db_history_prev, pc = db_lbuf_start;
+ pc != db_le && *pp; ) {
+ if (*pp != *pc)
+ break;
+ if (++pp == db_history + db_history_size) {
+ pp = db_history;
+ }
+ pc++;
+ }
+ if (!*pp && pc == db_le) {
+ /*
+ * Repeated previous line. Don`t save.
+ */
+ db_history_curr = db_history_last;
+ *db_le++ = c;
+ return (TRUE);
+ }
+ }
+ if (db_le != db_lbuf_start) {
+ register char *p;
+ db_history_prev = db_history_last;
+ for (p = db_lbuf_start; p != db_le; p++) {
+ *db_history_last++ = *p;
+ if (db_history_last == db_history +
+ db_history_size) {
+ db_history_last = db_history;
+ }
+ }
+ *db_history_last++ = '\0';
+ }
+ db_history_curr = db_history_last;
+#endif
+ *db_le++ = c;
+ return (TRUE);
+ default:
+ if (db_le == db_lbuf_end) {
+ cnputc('\007');
+ }
+ else if (c >= ' ' && c <= '~') {
+ register char *p;
+
+ for (p = db_le; p > db_lc; p--)
+ *p = *(p-1);
+ *db_lc++ = c;
+ db_le++;
+ cnputc(c);
+ db_putstring(db_lc, db_le - db_lc);
+ db_putnchars(BACKUP, db_le - db_lc);
+ }
+ break;
+ }
+ return (FALSE);
+}
+
+int
+db_readline(lstart, lsize)
+ char * lstart;
+ int lsize;
+{
+ db_force_whitespace(); /* synch output position */
+
+ db_lbuf_start = lstart;
+ db_lbuf_end = lstart + lsize - 1;
+ db_lc = lstart;
+ db_le = lstart;
+
+ while (!db_inputchar(cngetc()))
+ continue;
+
+ db_putchar('\n'); /* synch output position */
+
+ *db_le = 0;
+ return (db_le - db_lbuf_start);
+}
+
+void
+db_check_interrupt()
+{
+ register int c;
+
+ c = cnmaygetc();
+ switch (c) {
+ case -1: /* no character */
+ return;
+
+ case CTRL('c'):
+ db_error((char *)0);
+ /*NOTREACHED*/
+
+ case CTRL('s'):
+ do {
+ c = cnmaygetc();
+ if (c == CTRL('c'))
+ db_error((char *)0);
+ } while (c != CTRL('q'));
+ break;
+
+ default:
+ /* drop on floor */
+ break;
+ }
+}
+
+#endif MACH_KDB
diff --git a/ddb/db_lex.c b/ddb/db_lex.c
new file mode 100644
index 00000000..617f1237
--- /dev/null
+++ b/ddb/db_lex.c
@@ -0,0 +1,455 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+#include "mach_kdb.h"
+#if MACH_KDB
+
+/*
+ * Lexical analyzer.
+ */
+#include <machine/db_machdep.h>
+#include <kern/strings.h>
+#include <ddb/db_lex.h>
+
+char db_line[DB_LEX_LINE_SIZE];
+char db_last_line[DB_LEX_LINE_SIZE];
+char *db_lp, *db_endlp;
+char *db_last_lp;
+int db_look_char = 0;
+db_expr_t db_look_token = 0;
+
+int
+db_read_line(repeat_last)
+ char *repeat_last;
+{
+ int i;
+
+ i = db_readline(db_line, sizeof(db_line));
+ if (i == 0)
+ return (0); /* EOI */
+ if (repeat_last) {
+ if (strncmp(db_line, repeat_last, strlen(repeat_last)) == 0) {
+ db_strcpy(db_line, db_last_line);
+ db_printf("%s", db_line);
+ i = strlen(db_line);
+ } else if (db_line[0] != '\n' && db_line[0] != 0)
+ db_strcpy(db_last_line, db_line);
+ }
+ db_lp = db_line;
+ db_endlp = db_lp + i;
+ db_last_lp = db_lp;
+ db_look_char = 0;
+ db_look_token = 0;
+ return (i);
+}
+
+void
+db_flush_line()
+{
+ db_lp = db_line;
+ db_last_lp = db_lp;
+ db_endlp = db_line;
+}
+
+void
+db_switch_input(buffer, size)
+ char *buffer;
+ int size;
+{
+ db_lp = buffer;
+ db_last_lp = db_lp;
+ db_endlp = buffer + size;
+ db_look_char = 0;
+ db_look_token = 0;
+}
+
+void
+db_save_lex_context(lp)
+ register struct db_lex_context *lp;
+{
+ lp->l_ptr = db_lp;
+ lp->l_eptr = db_endlp;
+ lp->l_char = db_look_char;
+ lp->l_token = db_look_token;
+}
+
+void
+db_restore_lex_context(lp)
+ register struct db_lex_context *lp;
+{
+ db_lp = lp->l_ptr;
+ db_last_lp = db_lp;
+ db_endlp = lp->l_eptr;
+ db_look_char = lp->l_char;
+ db_look_token = lp->l_token;
+}
+
+int
+db_read_char()
+{
+ int c;
+
+ if (db_look_char != 0) {
+ c = db_look_char;
+ db_look_char = 0;
+ }
+ else if (db_lp >= db_endlp)
+ c = -1;
+ else
+ c = *db_lp++;
+ return (c);
+}
+
+void
+db_unread_char(c)
+ int c;
+{
+ db_look_char = c;
+}
+
+void
+db_unread_token(t)
+ int t;
+{
+ db_look_token = t;
+}
+
+int
+db_read_token()
+{
+ int t;
+
+ if (db_look_token) {
+ t = db_look_token;
+ db_look_token = 0;
+ }
+ else {
+ db_last_lp = db_lp;
+ if (db_look_char)
+ db_last_lp--;
+ t = db_lex();
+ }
+ return (t);
+}
+
+db_expr_t db_tok_number;
+char db_tok_string[TOK_STRING_SIZE];
+db_expr_t db_radix = 16;
+
+void
+db_flush_lex()
+{
+ db_flush_line();
+ db_look_char = 0;
+ db_look_token = 0;
+}
+
+#define DB_DISP_SKIP 40 /* number of chars to display skip */
+
+void
+db_skip_to_eol()
+{
+ register skip;
+ register t;
+ register n;
+ register char *p;
+
+ t = db_read_token();
+ p = db_last_lp;
+ for (skip = 0; t != tEOL && t != tSEMI_COLON && t != tEOF; skip++)
+ t = db_read_token();
+ if (t == tSEMI_COLON)
+ db_unread_token(t);
+ if (skip != 0) {
+ while (p < db_last_lp && (*p == ' ' || *p == '\t'))
+ p++;
+ db_printf("Warning: Skipped input data \"");
+ for (n = 0; n < DB_DISP_SKIP && p < db_last_lp; n++)
+ db_printf("%c", *p++);
+ if (n >= DB_DISP_SKIP)
+ db_printf("....");
+ db_printf("\"\n");
+ }
+}
+
+int
+db_lex()
+{
+ register char *cp;
+ register c;
+
+ c = db_read_char();
+ while (c <= ' ' || c > '~') {
+ if (c == '\n' || c == -1)
+ return (tEOL);
+ c = db_read_char();
+ }
+
+ cp = db_tok_string;
+ *cp++ = c;
+
+ if (c >= '0' && c <= '9') {
+ /* number */
+ int r, digit;
+
+ if (c > '0')
+ r = db_radix;
+ else {
+ c = db_read_char();
+ if (c == 'O' || c == 'o')
+ r = 8;
+ else if (c == 'T' || c == 't')
+ r = 10;
+ else if (c == 'X' || c == 'x')
+ r = 16;
+ else {
+ cp--;
+ r = db_radix;
+ db_unread_char(c);
+ }
+ c = db_read_char();
+ *cp++ = c;
+ }
+ db_tok_number = 0;
+ for (;;) {
+ if (c >= '0' && c <= ((r == 8) ? '7' : '9'))
+ digit = c - '0';
+ else if (r == 16 && ((c >= 'A' && c <= 'F') ||
+ (c >= 'a' && c <= 'f'))) {
+ if (c >= 'a')
+ digit = c - 'a' + 10;
+ else
+ digit = c - 'A' + 10;
+ }
+ else
+ break;
+ db_tok_number = db_tok_number * r + digit;
+ c = db_read_char();
+ if (cp < &db_tok_string[sizeof(db_tok_string)-1])
+ *cp++ = c;
+ }
+ cp[-1] = 0;
+ if ((c >= '0' && c <= '9') ||
+ (c >= 'A' && c <= 'Z') ||
+ (c >= 'a' && c <= 'z') ||
+ (c == '_'))
+ {
+ db_printf("Bad character '%c' after number %s\n",
+ c, db_tok_string);
+ db_error(0);
+ db_flush_lex();
+ return (tEOF);
+ }
+ db_unread_char(c);
+ return (tNUMBER);
+ }
+ if ((c >= 'A' && c <= 'Z') ||
+ (c >= 'a' && c <= 'z') ||
+ c == '_' || c == '\\' || c == ':')
+ {
+ /* identifier */
+ if (c == '\\') {
+ c = db_read_char();
+ if (c == '\n' || c == -1)
+ db_error("Bad '\\' at the end of line\n");
+ cp[-1] = c;
+ }
+ while (1) {
+ c = db_read_char();
+ if ((c >= 'A' && c <= 'Z') ||
+ (c >= 'a' && c <= 'z') ||
+ (c >= '0' && c <= '9') ||
+ c == '_' || c == '\\' || c == ':' || c == '.')
+ {
+ if (c == '\\') {
+ c = db_read_char();
+ if (c == '\n' || c == -1)
+ db_error("Bad '\\' at the end of line\n");
+ }
+ *cp++ = c;
+ if (cp == db_tok_string+sizeof(db_tok_string)) {
+ db_error("String too long\n");
+ db_flush_lex();
+ return (tEOF);
+ }
+ continue;
+ }
+ else {
+ *cp = '\0';
+ break;
+ }
+ }
+ db_unread_char(c);
+ return (tIDENT);
+ }
+
+ *cp = 0;
+ switch (c) {
+ case '+':
+ return (tPLUS);
+ case '-':
+ return (tMINUS);
+ case '.':
+ c = db_read_char();
+ if (c == '.') {
+ *cp++ = c;
+ *cp = 0;
+ return (tDOTDOT);
+ }
+ db_unread_char(c);
+ return (tDOT);
+ case '*':
+ return (tSTAR);
+ case '/':
+ return (tSLASH);
+ case '=':
+ c = db_read_char();
+ if (c == '=') {
+ *cp++ = c;
+ *cp = 0;
+ return(tLOG_EQ);
+ }
+ db_unread_char(c);
+ return (tEQ);
+ case '%':
+ return (tPCT);
+ case '#':
+ return (tHASH);
+ case '(':
+ return (tLPAREN);
+ case ')':
+ return (tRPAREN);
+ case ',':
+ return (tCOMMA);
+ case '\'':
+ return (tQUOTE);
+ case '"':
+ /* string */
+ cp = db_tok_string;
+ c = db_read_char();
+ while (c != '"' && c > 0 && c != '\n') {
+ if (cp >= &db_tok_string[sizeof(db_tok_string)-1]) {
+ db_error("Too long string\n");
+ db_flush_lex();
+ return (tEOF);
+ }
+ if (c == '\\') {
+ c = db_read_char();
+ switch(c) {
+ case 'n':
+ c = '\n'; break;
+ case 't':
+ c = '\t'; break;
+ case '\\':
+ case '"':
+ break;
+ default:
+ db_printf("Bad escape sequence '\\%c'\n", c);
+ db_error(0);
+ db_flush_lex();
+ return (tEOF);
+ }
+ }
+ *cp++ = c;
+ c = db_read_char();
+ }
+ *cp = 0;
+ if (c != '"') {
+ db_error("Non terminated string constant\n");
+ db_flush_lex();
+ return (tEOF);
+ }
+ return (tSTRING);
+ case '$':
+ return (tDOLLAR);
+ case '!':
+ c = db_read_char();
+ if (c == '=') {
+ *cp++ = c;
+ *cp = 0;
+ return(tLOG_NOT_EQ);
+ }
+ db_unread_char(c);
+ return (tEXCL);
+ case '&':
+ c = db_read_char();
+ if (c == '&') {
+ *cp++ = c;
+ *cp = 0;
+ return(tLOG_AND);
+ }
+ db_unread_char(c);
+ return(tBIT_AND);
+ case '|':
+ c = db_read_char();
+ if (c == '|') {
+ *cp++ = c;
+ *cp = 0;
+ return(tLOG_OR);
+ }
+ db_unread_char(c);
+ return(tBIT_OR);
+ case '<':
+ c = db_read_char();
+ *cp++ = c;
+ *cp = 0;
+ if (c == '<')
+ return (tSHIFT_L);
+ if (c == '=')
+ return (tLESS_EQ);
+ cp[-1] = 0;
+ db_unread_char(c);
+ return(tLESS);
+ break;
+ case '>':
+ c = db_read_char();
+ *cp++ = c;
+ *cp = 0;
+ if (c == '>')
+ return (tSHIFT_R);
+ if (c == '=')
+ return (tGREATER_EQ);
+ cp[-1] = 0;
+ db_unread_char(c);
+ return (tGREATER);
+ break;
+ case ';':
+ return (tSEMI_COLON);
+ case '?':
+ return (tQUESTION);
+ case -1:
+ db_strcpy(db_tok_string, "<EOL>");
+ return (tEOF);
+ }
+ db_printf("Bad character '%c'\n", c);
+ db_flush_lex();
+ return (tEOF);
+}
+
+#endif MACH_KDB
diff --git a/ddb/db_lex.h b/ddb/db_lex.h
new file mode 100644
index 00000000..c73b93e2
--- /dev/null
+++ b/ddb/db_lex.h
@@ -0,0 +1,93 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+/*
+ * Lexical analyzer.
+ */
+
+#define TOK_STRING_SIZE 64
+#define DB_LEX_LINE_SIZE 256
+
+struct db_lex_context {
+ int l_char; /* peek char */
+ int l_token; /* peek token */
+ char *l_ptr; /* line pointer */
+ char *l_eptr; /* line end pointer */
+};
+
+extern int db_read_line(/* char *rep_str */);
+extern void db_flush_line();
+extern int db_read_char();
+extern void db_unread_char(/* char c */);
+extern int db_read_token();
+extern void db_unread_token(/* int t */);
+extern void db_flush_lex();
+extern void db_switch_input(/* char *, int */);
+extern void db_save_lex_context(/* struct db_lex_context * */);
+extern void db_restore_lex_context(/* struct db_lex_context * */);
+extern void db_skip_to_eol();
+
+extern db_expr_t db_tok_number;
+extern char db_tok_string[TOK_STRING_SIZE];
+extern db_expr_t db_radix;
+
+#define tEOF (-1)
+#define tEOL 1
+#define tNUMBER 2
+#define tIDENT 3
+#define tPLUS 4
+#define tMINUS 5
+#define tDOT 6
+#define tSTAR 7
+#define tSLASH 8
+#define tEQ 9
+#define tLPAREN 10
+#define tRPAREN 11
+#define tPCT 12
+#define tHASH 13
+#define tCOMMA 14
+#define tQUOTE 15
+#define tDOLLAR 16
+#define tEXCL 17
+#define tSHIFT_L 18
+#define tSHIFT_R 19
+#define tDOTDOT 20
+#define tSEMI_COLON 21
+#define tLOG_EQ 22
+#define tLOG_NOT_EQ 23
+#define tLESS 24
+#define tLESS_EQ 25
+#define tGREATER 26
+#define tGREATER_EQ 27
+#define tBIT_AND 28
+#define tBIT_OR 29
+#define tLOG_AND 30
+#define tLOG_OR 31
+#define tSTRING 32
+#define tQUESTION 33
diff --git a/ddb/db_macro.c b/ddb/db_macro.c
new file mode 100644
index 00000000..c186ac6a
--- /dev/null
+++ b/ddb/db_macro.c
@@ -0,0 +1,183 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#include "mach_kdb.h"
+#if MACH_KDB
+
+#include <kern/thread.h>
+
+#include <machine/db_machdep.h>
+#include <ddb/db_lex.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_command.h>
+
+
+
+/*
+ * debugger macro support
+ */
+
+#define DB_MACRO_LEVEL 5 /* max macro nesting */
+#define DB_NARGS 10 /* max args */
+#define DB_NUSER_MACRO 10 /* max user macros */
+
+int db_macro_free = DB_NUSER_MACRO;
+struct db_user_macro {
+ char m_name[TOK_STRING_SIZE];
+ char m_lbuf[DB_LEX_LINE_SIZE];
+ int m_size;
+} db_user_macro[DB_NUSER_MACRO];
+
+int db_macro_level = 0;
+db_expr_t db_macro_args[DB_MACRO_LEVEL][DB_NARGS];
+
+static struct db_user_macro *
+db_lookup_macro(name)
+ char *name;
+{
+ register struct db_user_macro *mp;
+
+ for (mp = db_user_macro; mp < &db_user_macro[DB_NUSER_MACRO]; mp++) {
+ if (mp->m_name[0] == 0)
+ continue;
+ if (strcmp(mp->m_name, name) == 0)
+ return(mp);
+ }
+ return(0);
+}
+
+void
+db_def_macro_cmd()
+{
+ register char *p;
+ register c;
+ register struct db_user_macro *mp, *ep;
+
+ if (db_read_token() != tIDENT) {
+ db_printf("Bad macro name \"%s\"\n", db_tok_string);
+ db_error(0);
+ /* NOTREACHED */
+ }
+ if ((mp = db_lookup_macro(db_tok_string)) == 0) {
+ if (db_macro_free <= 0)
+ db_error("Too many macros\n");
+ /* NOTREACHED */
+ ep = &db_user_macro[DB_NUSER_MACRO];
+ for (mp = db_user_macro; mp < ep && mp->m_name[0]; mp++);
+ if (mp >= ep)
+ db_error("ddb: internal error(macro)\n");
+ /* NOTREACHED */
+ db_macro_free--;
+ db_strcpy(mp->m_name, db_tok_string);
+ }
+ for (c = db_read_char(); c == ' ' || c == '\t'; c = db_read_char());
+ for (p = mp->m_lbuf; c > 0; c = db_read_char())
+ *p++ = c;
+ *p = 0;
+ mp->m_size = p - mp->m_lbuf;
+}
+
+void
+db_del_macro_cmd()
+{
+ register struct db_user_macro *mp;
+
+ if (db_read_token() != tIDENT
+ || (mp = db_lookup_macro(db_tok_string)) == 0) {
+ db_printf("No such macro \"%s\"\n", db_tok_string);
+ db_error(0);
+ /* NOTREACHED */
+ } else {
+ mp->m_name[0] = 0;
+ db_macro_free++;
+ }
+}
+
+void
+db_show_macro()
+{
+ register struct db_user_macro *mp;
+ int t;
+ char *name = 0;
+
+ if ((t = db_read_token()) == tIDENT)
+ name = db_tok_string;
+ else
+ db_unread_token(t);
+ for (mp = db_user_macro; mp < &db_user_macro[DB_NUSER_MACRO]; mp++) {
+ if (mp->m_name[0] == 0)
+ continue;
+ if (name && strcmp(mp->m_name, name))
+ continue;
+ db_printf("%s: %s", mp->m_name, mp->m_lbuf);
+ }
+}
+
+int
+db_exec_macro(name)
+ char *name;
+{
+ register struct db_user_macro *mp;
+ register n;
+
+ if ((mp = db_lookup_macro(name)) == 0)
+ return(-1);
+ if (db_macro_level+1 >= DB_MACRO_LEVEL) {
+ db_macro_level = 0;
+ db_error("Too many macro nest\n");
+ /* NOTREACHED */
+ }
+ for (n = 0;
+ n < DB_NARGS &&
+ db_expression(&db_macro_args[db_macro_level+1][n]);
+ n++);
+ while (n < DB_NARGS)
+ db_macro_args[db_macro_level+1][n++] = 0;
+ db_macro_level++;
+ db_exec_cmd_nest(mp->m_lbuf, mp->m_size);
+ db_macro_level--;
+ return(0);
+}
+
+int
+/* ARGSUSED */
+db_arg_variable(vp, valuep, flag, ap)
+ struct db_variable *vp;
+ db_expr_t *valuep;
+ int flag;
+ db_var_aux_param_t ap;
+{
+ if (ap->level != 1 || ap->suffix[0] < 1 || ap->suffix[0] > DB_NARGS) {
+ db_error("Bad $arg variable\n");
+ /* NOTREACHED */
+ }
+ if (flag == DB_VAR_GET)
+ *valuep = db_macro_args[db_macro_level][ap->suffix[0]-1];
+ else
+ db_macro_args[db_macro_level][ap->suffix[0]-1] = *valuep;
+ return(0);
+}
+
+#endif MACH_KDB
diff --git a/ddb/db_mp.c b/ddb/db_mp.c
new file mode 100644
index 00000000..607c24db
--- /dev/null
+++ b/ddb/db_mp.c
@@ -0,0 +1,339 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#include "mach_kdb.h"
+#if MACH_KDB
+
+#include <cpus.h>
+
+#if NCPUS > 1
+
+#include <mach/boolean.h>
+#include <mach/machine.h>
+
+#include <kern/cpu_number.h>
+#include <kern/lock.h>
+
+#include <machine/db_machdep.h>
+
+#include <ddb/db_command.h>
+#include <ddb/db_run.h>
+
+/*
+ * Routines to interlock access to the kernel debugger on
+ * multiprocessors.
+ */
+
+decl_simple_lock_data(,db_lock) /* lock to enter debugger */
+volatile int db_cpu = -1; /* CPU currently in debugger */
+ /* -1 if none */
+int db_active[NCPUS] = { 0 }; /* count recursive entries
+ into debugger */
+int db_slave[NCPUS] = { 0 }; /* nonzero if cpu interrupted
+ by another cpu in debugger */
+
+int db_enter_debug = 0;
+
+void remote_db(); /* forward */
+void lock_db();
+void unlock_db();
+
+
+/*
+ * Called when entering kernel debugger.
+ * Takes db lock. If we were called remotely (slave state) we just
+ * wait for db_cpu to be equal to cpu_number(). Otherwise enter debugger
+ * if not active on another cpu
+ */
+
+boolean_t
+db_enter()
+{
+ int mycpu = cpu_number();
+
+ /*
+ * Count recursive entries to debugger.
+ */
+ db_active[mycpu]++;
+
+ /*
+ * Wait for other CPUS to leave debugger.
+ */
+ lock_db();
+
+ if (db_enter_debug)
+ db_printf(
+ "db_enter: cpu %d[%d], master %d, db_cpu %d, run mode %d\n",
+ mycpu, db_slave[mycpu], master_cpu, db_cpu, db_run_mode);
+
+ /*
+ * If no CPU in debugger, and I am not being stopped,
+ * enter the debugger.
+ */
+ if (db_cpu == -1 && !db_slave[mycpu]) {
+ remote_db(); /* stop other cpus */
+ db_cpu = mycpu;
+ return TRUE;
+ }
+ /*
+ * If I am already in the debugger (recursive entry
+ * or returning from single step), enter debugger.
+ */
+ else if (db_cpu == mycpu)
+ return TRUE;
+ /*
+ * Otherwise, cannot enter debugger.
+ */
+ else
+ return FALSE;
+}
+
+/*
+ * Leave debugger.
+ */
+void
+db_leave()
+{
+ int mycpu = cpu_number();
+
+ /*
+ * If continuing, give up debugger
+ */
+ if (db_run_mode == STEP_CONTINUE)
+ db_cpu = -1;
+
+ /*
+ * If I am a slave, drop my slave count.
+ */
+ if (db_slave[mycpu])
+ db_slave[mycpu]--;
+ if (db_enter_debug)
+ db_printf("db_leave: cpu %d[%d], db_cpu %d, run_mode %d\n",
+ mycpu, db_slave[mycpu], db_cpu, db_run_mode);
+ /*
+ * Unlock debugger.
+ */
+ unlock_db();
+
+ /*
+ * Drop recursive entry count.
+ */
+ db_active[mycpu]--;
+}
+
+
+/*
+ * invoke kernel debugger on slave processors
+ */
+
+void
+remote_db() {
+ int my_cpu = cpu_number();
+ register int i;
+
+ for (i = 0; i < NCPUS; i++) {
+ if (i != my_cpu &&
+ machine_slot[i].is_cpu &&
+ machine_slot[i].running)
+ {
+ cpu_interrupt_to_db(i);
+ }
+ }
+}
+
+/*
+ * Save and restore DB global registers.
+ *
+ * DB_SAVE_CTXT must be at the start of a block, and
+ * DB_RESTORE_CTXT must be in the same block.
+ */
+
+#ifdef __STDC__
+#define DB_SAVE(type, name) extern type name; type name##_save = name
+#define DB_RESTORE(name) name = name##_save
+#else /* __STDC__ */
+#define DB_SAVE(type, name) extern type name; type name/**/_save = name
+#define DB_RESTORE(name) name = name/**/_save
+#endif /* __STDC__ */
+
+#define DB_SAVE_CTXT() \
+ DB_SAVE(int, db_run_mode); \
+ DB_SAVE(boolean_t, db_sstep_print); \
+ DB_SAVE(int, db_loop_count); \
+ DB_SAVE(int, db_call_depth); \
+ DB_SAVE(int, db_inst_count); \
+ DB_SAVE(int, db_last_inst_count); \
+ DB_SAVE(int, db_load_count); \
+ DB_SAVE(int, db_store_count); \
+ DB_SAVE(boolean_t, db_cmd_loop_done); \
+ DB_SAVE(jmp_buf_t *, db_recover); \
+ DB_SAVE(db_addr_t, db_dot); \
+ DB_SAVE(db_addr_t, db_last_addr); \
+ DB_SAVE(db_addr_t, db_prev); \
+ DB_SAVE(db_addr_t, db_next); \
+ SAVE_DDB_REGS
+
+#define DB_RESTORE_CTXT() \
+ DB_RESTORE(db_run_mode); \
+ DB_RESTORE(db_sstep_print); \
+ DB_RESTORE(db_loop_count); \
+ DB_RESTORE(db_call_depth); \
+ DB_RESTORE(db_inst_count); \
+ DB_RESTORE(db_last_inst_count); \
+ DB_RESTORE(db_load_count); \
+ DB_RESTORE(db_store_count); \
+ DB_RESTORE(db_cmd_loop_done); \
+ DB_RESTORE(db_recover); \
+ DB_RESTORE(db_dot); \
+ DB_RESTORE(db_last_addr); \
+ DB_RESTORE(db_prev); \
+ DB_RESTORE(db_next); \
+ RESTORE_DDB_REGS
+
+/*
+ * switch to another cpu
+ */
+void
+db_on(cpu)
+ int cpu;
+{
+ /*
+ * Save ddb global variables
+ */
+ DB_SAVE_CTXT();
+
+ /*
+ * Don`t do if bad CPU number.
+ * CPU must also be spinning in db_entry.
+ */
+ if (cpu < 0 || cpu >= NCPUS || !db_active[cpu])
+ return;
+
+ /*
+ * Give debugger to that CPU
+ */
+ db_cpu = cpu;
+ unlock_db();
+
+ /*
+ * Wait for it to come back again
+ */
+ lock_db();
+
+ /*
+ * Restore ddb globals
+ */
+ DB_RESTORE_CTXT();
+
+ if (db_cpu == -1) /* someone continued */
+ db_continue_cmd(0, 0, 0, "");
+}
+
+/*
+ * Called by interprocessor interrupt when one CPU is
+ * in kernel debugger and wants to stop other CPUs
+ */
+void
+remote_db_enter()
+{
+ db_slave[cpu_number()]++;
+ kdb_kintr();
+}
+
+/*
+ * Acquire kernel debugger.
+ * Conditional code for forwarding characters from slave to console
+ * if console on master only.
+ */
+
+/*
+ * As long as db_cpu is not -1 or cpu_number(), we know that debugger
+ * is active on another cpu.
+ */
+void
+lock_db()
+{
+ int my_cpu = cpu_number();
+
+ for (;;) {
+#if CONSOLE_ON_MASTER
+ if (my_cpu == master_cpu) {
+ db_console();
+ }
+#endif
+ if (db_cpu != -1 && db_cpu != my_cpu)
+ continue;
+
+#if CONSOLE_ON_MASTER
+ if (my_cpu == master_cpu) {
+ if (!simple_lock_try(&db_lock))
+ continue;
+ }
+ else {
+ simple_lock(&db_lock);
+ }
+#else
+ simple_lock(&db_lock);
+#endif
+ if (db_cpu == -1 || db_cpu == my_cpu)
+ break;
+ simple_unlock(&db_lock);
+ }
+}
+
+void
+unlock_db()
+{
+ simple_unlock(&db_lock);
+}
+
+#ifdef sketch
+void
+db_console()
+{
+ if (i_bit(CBUS_PUT_CHAR, my_word)) {
+ volatile u_char c = cbus_ochar;
+ i_bit_clear(CBUS_PUT_CHAR, my_word);
+ cnputc(c);
+ } else if (i_bit(CBUS_GET_CHAR, my_word)) {
+ if (cbus_wait_char)
+ cbus_ichar = cngetc();
+ else
+ cbus_ichar = cnmaygetc();
+ i_bit_clear(CBUS_GET_CHAR, my_word);
+#ifndef notdef
+ } else if (!cnmaygetc()) {
+#else /* notdef */
+ } else if (com_is_char() && !com_getc(TRUE)) {
+#endif /* notdef */
+ simple_unlock(&db_lock);
+ db_cpu = my_cpu;
+ }
+}
+#endif /* sketch */
+
+#endif /* NCPUS > 1 */
+
+#endif MACH_KDB
diff --git a/ddb/db_output.c b/ddb/db_output.c
new file mode 100644
index 00000000..d7e416d2
--- /dev/null
+++ b/ddb/db_output.c
@@ -0,0 +1,240 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#include "mach_kdb.h"
+#if MACH_KDB
+
+/*
+ * Printf and character output for debugger.
+ */
+
+#include <mach/boolean.h>
+#include <sys/varargs.h>
+#include <machine/db_machdep.h>
+#include <ddb/db_lex.h>
+#include <ddb/db_output.h>
+
+/*
+ * Character output - tracks position in line.
+ * To do this correctly, we should know how wide
+ * the output device is - then we could zero
+ * the line position when the output device wraps
+ * around to the start of the next line.
+ *
+ * Instead, we count the number of spaces printed
+ * since the last printing character so that we
+ * don't print trailing spaces. This avoids most
+ * of the wraparounds.
+ */
+
+#ifndef DB_MAX_LINE
+#define DB_MAX_LINE 24 /* maximum line */
+#define DB_MAX_WIDTH 80 /* maximum width */
+#endif DB_MAX_LINE
+
+#define DB_MIN_MAX_WIDTH 20 /* minimum max width */
+#define DB_MIN_MAX_LINE 3 /* minimum max line */
+#define CTRL(c) ((c) & 0xff)
+
+int db_output_position = 0; /* output column */
+int db_output_line = 0; /* output line number */
+int db_last_non_space = 0; /* last non-space character */
+int db_tab_stop_width = 8; /* how wide are tab stops? */
+#define NEXT_TAB(i) \
+ ((((i) + db_tab_stop_width) / db_tab_stop_width) * db_tab_stop_width)
+int db_max_line = DB_MAX_LINE; /* output max lines */
+int db_max_width = DB_MAX_WIDTH; /* output line width */
+
+extern void db_check_interrupt();
+
+/*
+ * Force pending whitespace.
+ */
+void
+db_force_whitespace()
+{
+ register int last_print, next_tab;
+
+ last_print = db_last_non_space;
+ while (last_print < db_output_position) {
+ next_tab = NEXT_TAB(last_print);
+ if (next_tab <= db_output_position) {
+ cnputc('\t');
+ last_print = next_tab;
+ }
+ else {
+ cnputc(' ');
+ last_print++;
+ }
+ }
+ db_last_non_space = db_output_position;
+}
+
+static void
+db_more()
+{
+ register char *p;
+ boolean_t quit_output = FALSE;
+
+ for (p = "--db_more--"; *p; p++)
+ cnputc(*p);
+ switch(cngetc()) {
+ case ' ':
+ db_output_line = 0;
+ break;
+ case 'q':
+ case CTRL('c'):
+ db_output_line = 0;
+ quit_output = TRUE;
+ break;
+ default:
+ db_output_line--;
+ break;
+ }
+ p = "\b\b\b\b\b\b\b\b\b\b\b \b\b\b\b\b\b\b\b\b\b\b";
+ while (*p)
+ cnputc(*p++);
+ if (quit_output) {
+ db_error(0);
+ /* NOTREACHED */
+ }
+}
+
+/*
+ * Output character. Buffer whitespace.
+ */
+void
+db_putchar(c)
+ int c; /* character to output */
+{
+ if (db_max_line >= DB_MIN_MAX_LINE && db_output_line >= db_max_line-1)
+ db_more();
+ if (c > ' ' && c <= '~') {
+ /*
+ * Printing character.
+ * If we have spaces to print, print them first.
+ * Use tabs if possible.
+ */
+ db_force_whitespace();
+ cnputc(c);
+ db_output_position++;
+ if (db_max_width >= DB_MIN_MAX_WIDTH
+ && db_output_position >= db_max_width-1) {
+ /* auto new line */
+ cnputc('\n');
+ db_output_position = 0;
+ db_last_non_space = 0;
+ db_output_line++;
+ }
+ db_last_non_space = db_output_position;
+ }
+ else if (c == '\n') {
+ /* Return */
+ cnputc(c);
+ db_output_position = 0;
+ db_last_non_space = 0;
+ db_output_line++;
+ db_check_interrupt();
+ }
+ else if (c == '\t') {
+ /* assume tabs every 8 positions */
+ db_output_position = NEXT_TAB(db_output_position);
+ }
+ else if (c == ' ') {
+ /* space */
+ db_output_position++;
+ }
+ else if (c == '\007') {
+ /* bell */
+ cnputc(c);
+ }
+ /* other characters are assumed non-printing */
+}
+
+void
+db_id_putc(char c, vm_offset_t dummy)
+{
+ db_putchar(c);
+}
+
+/*
+ * Return output position
+ */
+int
+db_print_position()
+{
+ return (db_output_position);
+}
+
+/*
+ * End line if too long.
+ */
+void db_end_line()
+{
+ if (db_output_position >= db_max_width-1)
+ db_printf("\n");
+}
+
+/*
+ * Printing
+ */
+extern void _doprnt();
+
+/*VARARGS1*/
+void
+db_printf( fmt, va_alist)
+ char * fmt;
+ va_dcl
+{
+ va_list listp;
+
+#ifdef db_printf_enter
+ db_printf_enter(); /* optional multiP serialization */
+#endif
+ va_start(listp);
+ _doprnt(fmt, &listp, db_id_putc, db_radix, 0);
+ va_end(listp);
+}
+
+/* alternate name */
+
+/*VARARGS1*/
+void
+kdbprintf(fmt, va_alist)
+ char * fmt;
+ va_dcl
+{
+ va_list listp;
+ va_start(listp);
+ _doprnt(fmt, &listp, db_id_putc, db_radix, 0);
+ va_end(listp);
+}
+
+#endif MACH_KDB
diff --git a/ddb/db_output.h b/ddb/db_output.h
new file mode 100644
index 00000000..a3f0de05
--- /dev/null
+++ b/ddb/db_output.h
@@ -0,0 +1,44 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/90
+ */
+
+/*
+ * Printing routines for kernel debugger.
+ */
+
+extern void db_force_whitespace();
+extern int db_print_position();
+extern void db_end_line();
+#if 1
+extern void db_printf();
+#else
+extern void db_printf( char *fmt, ...);
+#endif
+
+
diff --git a/ddb/db_print.c b/ddb/db_print.c
new file mode 100644
index 00000000..727af233
--- /dev/null
+++ b/ddb/db_print.c
@@ -0,0 +1,511 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#include "mach_kdb.h"
+#if MACH_KDB
+
+/*
+ * Miscellaneous printing.
+ */
+#include <mach/port.h>
+#include <kern/strings.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/queue.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <machine/db_machdep.h>
+#include <machine/thread.h>
+
+#include <ddb/db_lex.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_task_thread.h>
+
+extern unsigned int db_maxoff;
+
+/* ARGSUSED */
+void
+db_show_regs(addr, have_addr, count, modif)
+ db_expr_t addr;
+ boolean_t have_addr;
+ db_expr_t count;
+ char *modif;
+{
+ register struct db_variable *regp;
+ db_expr_t value;
+ db_addr_t offset;
+ char * name;
+ register i;
+ struct db_var_aux_param aux_param;
+ task_t task = TASK_NULL;
+
+ aux_param.modif = modif;
+ aux_param.thread = THREAD_NULL;
+ if (db_option(modif, 't')) {
+ if (have_addr) {
+ if (!db_check_thread_address_valid((thread_t)addr))
+ return;
+ aux_param.thread = (thread_t)addr;
+ } else
+ aux_param.thread = db_default_thread;
+ if (aux_param.thread != THREAD_NULL)
+ task = aux_param.thread->task;
+ }
+ for (regp = db_regs; regp < db_eregs; regp++) {
+ if (regp->max_level > 1) {
+ db_printf("bad multi-suffixed register %s\n", regp->name);
+ continue;
+ }
+ aux_param.level = regp->max_level;
+ for (i = regp->low; i <= regp->high; i++) {
+ aux_param.suffix[0] = i;
+ db_read_write_variable(regp, &value, DB_VAR_GET, &aux_param);
+ if (regp->max_level > 0)
+ db_printf("%s%d%*s", regp->name, i,
+ 12-strlen(regp->name)-((i<10)?1:2), "");
+ else
+ db_printf("%-12s", regp->name);
+ db_printf("%#*N", 2+2*sizeof(vm_offset_t), value);
+ db_find_xtrn_task_sym_and_offset((db_addr_t)value, &name,
+ &offset, task);
+ if (name != 0 && offset <= db_maxoff && offset != value) {
+ db_printf("\t%s", name);
+ if (offset != 0)
+ db_printf("+%#r", offset);
+ }
+ db_printf("\n");
+ }
+ }
+}
+
+#define OPTION_LONG 0x001 /* long print option */
+#define OPTION_USER 0x002 /* print ps-like stuff */
+#define OPTION_INDENT 0x100 /* print with indent */
+#define OPTION_THREAD_TITLE 0x200 /* print thread title */
+#define OPTION_TASK_TITLE 0x400 /* print thread title */
+
+#ifndef DB_TASK_NAME
+#define DB_TASK_NAME(task) /* no task name */
+#define DB_TASK_NAME_TITLE "" /* no task name */
+#endif DB_TASK_NAME
+
+#ifndef db_thread_fp_used
+#define db_thread_fp_used(thread) FALSE
+#endif
+
+char *
+db_thread_stat(thread, status)
+ register thread_t thread;
+ char *status;
+{
+ register char *p = status;
+
+ *p++ = (thread->state & TH_RUN) ? 'R' : '.';
+ *p++ = (thread->state & TH_WAIT) ? 'W' : '.';
+ *p++ = (thread->state & TH_SUSP) ? 'S' : '.';
+ *p++ = (thread->state & TH_SWAPPED) ? 'O' : '.';
+ *p++ = (thread->state & TH_UNINT) ? 'N' : '.';
+ /* show if the FPU has been used */
+ *p++ = db_thread_fp_used(thread) ? 'F' : '.';
+ *p++ = 0;
+ return(status);
+}
+
+void
+db_print_thread(thread, thread_id, flag)
+ thread_t thread;
+ int thread_id;
+ int flag;
+{
+ if (flag & OPTION_USER) {
+ char status[8];
+ char *indent = "";
+
+ if (flag & OPTION_LONG) {
+ if (flag & OPTION_INDENT)
+ indent = " ";
+ if (flag & OPTION_THREAD_TITLE) {
+ db_printf("%s ID: THREAD STAT STACK PCB", indent);
+ db_printf(" SUS PRI CONTINUE,WAIT_FUNC\n");
+ }
+ db_printf("%s%3d%c %0*X %s %0*X %0*X %3d %3d ",
+ indent, thread_id,
+ (thread == current_thread())? '#': ':',
+ 2*sizeof(vm_offset_t), thread,
+ db_thread_stat(thread, status),
+ 2*sizeof(vm_offset_t), thread->kernel_stack,
+ 2*sizeof(vm_offset_t), thread->pcb,
+ thread->suspend_count, thread->sched_pri);
+ if ((thread->state & TH_SWAPPED) && thread->swap_func) {
+ db_task_printsym((db_addr_t)thread->swap_func,
+ DB_STGY_ANY, kernel_task);
+ db_printf(", ");
+ }
+ if (thread->state & TH_WAIT)
+ db_task_printsym((db_addr_t)thread->wait_event,
+ DB_STGY_ANY, kernel_task);
+ db_printf("\n");
+ } else {
+ if (thread_id % 3 == 0) {
+ if (flag & OPTION_INDENT)
+ db_printf("\n ");
+ } else
+ db_printf(" ");
+ db_printf("%3d%c(%0*X,%s)", thread_id,
+ (thread == current_thread())? '#': ':',
+ 2*sizeof(vm_offset_t), thread,
+ db_thread_stat(thread, status));
+ }
+ } else {
+ if (flag & OPTION_INDENT)
+ db_printf(" %3d (%0*X) ", thread_id,
+ 2*sizeof(vm_offset_t), thread);
+ else
+ db_printf("(%0*X) ", 2*sizeof(vm_offset_t), thread);
+ db_printf("%c%c%c%c%c",
+ (thread->state & TH_RUN) ? 'R' : ' ',
+ (thread->state & TH_WAIT) ? 'W' : ' ',
+ (thread->state & TH_SUSP) ? 'S' : ' ',
+ (thread->state & TH_UNINT)? 'N' : ' ',
+ db_thread_fp_used(thread) ? 'F' : ' ');
+ if (thread->state & TH_SWAPPED) {
+ if (thread->swap_func) {
+ db_printf("(");
+ db_task_printsym((db_addr_t)thread->swap_func,
+ DB_STGY_ANY, kernel_task);
+ db_printf(")");
+ } else {
+ db_printf("(swapped)");
+ }
+ }
+ if (thread->state & TH_WAIT) {
+ db_printf(" ");
+ db_task_printsym((db_addr_t)thread->wait_event,
+ DB_STGY_ANY, kernel_task);
+ }
+ db_printf("\n");
+ }
+}
+
+void
+db_print_task(task, task_id, flag)
+ task_t task;
+ int task_id;
+ int flag;
+{
+ thread_t thread;
+ int thread_id;
+
+ if (flag & OPTION_USER) {
+ if (flag & OPTION_TASK_TITLE) {
+ db_printf(" ID: TASK MAP THD SUS PR %s",
+ DB_TASK_NAME_TITLE);
+ if ((flag & OPTION_LONG) == 0)
+ db_printf(" THREADS");
+ db_printf("\n");
+ }
+ db_printf("%3d: %0*X %0*X %3d %3d %2d ",
+ task_id, 2*sizeof(vm_offset_t), task,
+ 2*sizeof(vm_offset_t), task->map, task->thread_count,
+ task->suspend_count, task->priority);
+ DB_TASK_NAME(task);
+ if (flag & OPTION_LONG) {
+ if (flag & OPTION_TASK_TITLE)
+ flag |= OPTION_THREAD_TITLE;
+ db_printf("\n");
+ } else if (task->thread_count <= 1)
+ flag &= ~OPTION_INDENT;
+ thread_id = 0;
+ queue_iterate(&task->thread_list, thread, thread_t, thread_list) {
+ db_print_thread(thread, thread_id, flag);
+ flag &= ~OPTION_THREAD_TITLE;
+ thread_id++;
+ }
+ if ((flag & OPTION_LONG) == 0)
+ db_printf("\n");
+ } else {
+ if (flag & OPTION_TASK_TITLE)
+ db_printf(" TASK THREADS\n");
+ db_printf("%3d (%0*X): ", task_id, 2*sizeof(vm_offset_t), task);
+ if (task->thread_count == 0) {
+ db_printf("no threads\n");
+ } else {
+ if (task->thread_count > 1) {
+ db_printf("%d threads: \n", task->thread_count);
+ flag |= OPTION_INDENT;
+ } else
+ flag &= ~OPTION_INDENT;
+ thread_id = 0;
+ queue_iterate(&task->thread_list, thread,
+ thread_t, thread_list)
+ db_print_thread(thread, thread_id++, flag);
+ }
+ }
+}
+
+/*ARGSUSED*/
+void
+db_show_all_threads(addr, have_addr, count, modif)
+ db_expr_t addr;
+ boolean_t have_addr;
+ db_expr_t count;
+ char * modif;
+{
+ task_t task;
+ int task_id;
+ int flag;
+ processor_set_t pset;
+
+ flag = OPTION_TASK_TITLE|OPTION_INDENT;
+ if (db_option(modif, 'u'))
+ flag |= OPTION_USER;
+ if (db_option(modif, 'l'))
+ flag |= OPTION_LONG;
+
+ task_id = 0;
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+ queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
+ db_print_task(task, task_id, flag);
+ flag &= ~OPTION_TASK_TITLE;
+ task_id++;
+ }
+ }
+}
+
+db_addr_t
+db_task_from_space(
+ ipc_space_t space,
+ int *task_id)
+{
+ task_t task;
+ int tid = 0;
+ processor_set_t pset;
+
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+ queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
+ if (task->itk_space == space) {
+ *task_id = tid;
+ return (db_addr_t)task;
+ }
+ tid++;
+ }
+ }
+ *task_id = 0;
+ return (0);
+}
+
+/*ARGSUSED*/
+void
+db_show_one_thread(addr, have_addr, count, modif)
+ db_expr_t addr;
+ boolean_t have_addr;
+ db_expr_t count;
+ char * modif;
+{
+ int flag;
+ int thread_id;
+ thread_t thread;
+
+ flag = OPTION_THREAD_TITLE;
+ if (db_option(modif, 'u'))
+ flag |= OPTION_USER;
+ if (db_option(modif, 'l'))
+ flag |= OPTION_LONG;
+
+ if (!have_addr) {
+ thread = current_thread();
+ if (thread == THREAD_NULL) {
+ db_error("No thread\n");
+ /*NOTREACHED*/
+ }
+ } else
+ thread = (thread_t) addr;
+
+ if ((thread_id = db_lookup_thread(thread)) < 0) {
+ db_printf("bad thread address %#X\n", addr);
+ db_error(0);
+ /*NOTREACHED*/
+ }
+
+ if (flag & OPTION_USER) {
+ db_printf("TASK%d(%0*X):\n",
+ db_lookup_task(thread->task),
+ 2*sizeof(vm_offset_t), thread->task);
+ db_print_thread(thread, thread_id, flag);
+ } else {
+ db_printf("task %d(%0*X): thread %d",
+ db_lookup_task(thread->task),
+ 2*sizeof(vm_offset_t), thread->task, thread_id);
+ db_print_thread(thread, thread_id, flag);
+ }
+}
+
+/*ARGSUSED*/
+void
+db_show_one_task(addr, have_addr, count, modif)
+ db_expr_t addr;
+ boolean_t have_addr;
+ db_expr_t count;
+ char * modif;
+{
+ int flag;
+ int task_id;
+ task_t task;
+
+ flag = OPTION_TASK_TITLE;
+ if (db_option(modif, 'u'))
+ flag |= OPTION_USER;
+ if (db_option(modif, 'l'))
+ flag |= OPTION_LONG;
+
+ if (!have_addr) {
+ task = db_current_task();
+ if (task == TASK_NULL) {
+ db_error("No task\n");
+ /*NOTREACHED*/
+ }
+ } else
+ task = (task_t) addr;
+
+ if ((task_id = db_lookup_task(task)) < 0) {
+ db_printf("bad task address %#X\n", addr);
+ db_error(0);
+ /*NOTREACHED*/
+ }
+
+ db_print_task(task, task_id, flag);
+}
+
+int
+db_port_iterate(thread, func)
+ thread_t thread;
+ void (*func)();
+{
+ ipc_entry_t entry;
+ int index;
+ int n = 0;
+ int size;
+ ipc_space_t space;
+
+ space = thread->task->itk_space;
+ entry = space->is_table;
+ size = space->is_table_size;
+ for (index = 0; index < size; index++, entry++) {
+ if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS)
+ (*func)(index, (ipc_port_t) entry->ie_object,
+ entry->ie_bits, n++);
+ }
+ return(n);
+}
+
+ipc_port_t
+db_lookup_port(thread, id)
+ thread_t thread;
+ int id;
+{
+ register ipc_space_t space;
+ register ipc_entry_t entry;
+
+ if (thread == THREAD_NULL)
+ return(0);
+ space = thread->task->itk_space;
+ if (id < 0 || id >= space->is_table_size)
+ return(0);
+ entry = &space->is_table[id];
+ if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS)
+ return((ipc_port_t)entry->ie_object);
+ return(0);
+}
+
+static void
+db_print_port_id(id, port, bits, n)
+ int id;
+ ipc_port_t port;
+ unsigned bits;
+ int n;
+{
+ if (n != 0 && n % 3 == 0)
+ db_printf("\n");
+ db_printf("\tport%d(%s,%x)", id,
+ (bits & MACH_PORT_TYPE_RECEIVE)? "r":
+ (bits & MACH_PORT_TYPE_SEND)? "s": "S", port);
+}
+
+static void
+db_print_port_id_long(
+ int id,
+ ipc_port_t port,
+ unsigned bits,
+ int n)
+{
+ if (n != 0)
+ db_printf("\n");
+ db_printf("\tport%d(%s, port=0x%x", id,
+ (bits & MACH_PORT_TYPE_RECEIVE)? "r":
+ (bits & MACH_PORT_TYPE_SEND)? "s": "S", port);
+ db_printf(", receiver_name=0x%x)", port->ip_receiver_name);
+}
+
+/* ARGSUSED */
+void
+db_show_port_id(addr, have_addr, count, modif)
+ db_expr_t addr;
+ boolean_t have_addr;
+ db_expr_t count;
+ char * modif;
+{
+ thread_t thread;
+
+ if (!have_addr) {
+ thread = current_thread();
+ if (thread == THREAD_NULL) {
+ db_error("No thread\n");
+ /*NOTREACHED*/
+ }
+ } else
+ thread = (thread_t) addr;
+ if (db_lookup_thread(thread) < 0) {
+ db_printf("Bad thread address %#X\n", addr);
+ db_error(0);
+ /*NOTREACHED*/
+ }
+ if (db_option(modif, 'l'))
+ {
+ if (db_port_iterate(thread, db_print_port_id_long))
+ db_printf("\n");
+ return;
+ }
+ if (db_port_iterate(thread, db_print_port_id))
+ db_printf("\n");
+}
+
+#endif MACH_KDB
diff --git a/ddb/db_print.h b/ddb/db_print.h
new file mode 100644
index 00000000..634c5be4
--- /dev/null
+++ b/ddb/db_print.h
@@ -0,0 +1,110 @@
+/*
+ * (c) Copyright 1992, 1993, 1994, 1995 OPEN SOFTWARE FOUNDATION, INC.
+ * ALL RIGHTS RESERVED
+ */
+/*
+ * OSF RI nmk19b2 5/2/95
+ */
+
+#ifndef _DDB_DB_PRINT_H_
+#define _DDB_DB_PRINT_H_
+
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+
+/* Prototypes for functions exported by this module.
+ */
+void db_show_regs(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char *modif);
+
+void db_show_all_acts(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char * modif);
+
+void db_show_one_act(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char * modif);
+
+void db_show_one_task(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char * modif);
+
+void db_show_shuttle(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char * modif);
+
+void db_show_port_id(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char * modif);
+
+void db_show_one_task_vm(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char *modif);
+
+void db_show_all_task_vm(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char *modif);
+
+void db_show_one_space(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char * modif);
+
+void db_show_all_spaces(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char * modif);
+
+void db_sys(void);
+
+int db_port_kmsg_count(
+ ipc_port_t port);
+
+db_addr_t db_task_from_space(
+ ipc_space_t space,
+ int *task_id);
+
+void db_show_one_simple_lock(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char * modif);
+
+void db_show_one_mutex(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char * modif);
+
+void db_show_subsystem(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char * modif);
+
+void db_show_runq(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char * modif);
+
+#endif /* !_DDB_DB_PRINT_H_ */
diff --git a/ddb/db_run.c b/ddb/db_run.c
new file mode 100644
index 00000000..47c39c28
--- /dev/null
+++ b/ddb/db_run.c
@@ -0,0 +1,441 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#include "mach_kdb.h"
+#if MACH_KDB
+
+/*
+ * Commands to run process.
+ */
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+
+#include <ddb/db_lex.h>
+#include <ddb/db_break.h>
+#include <ddb/db_access.h>
+#include <ddb/db_run.h>
+#include <ddb/db_task_thread.h>
+
+int db_run_mode;
+
+boolean_t db_sstep_print;
+int db_loop_count;
+int db_call_depth;
+
+int db_inst_count;
+int db_last_inst_count;
+int db_load_count;
+int db_store_count;
+
+#ifndef db_set_single_step
+void db_set_task_single_step(/* db_regs_t *, task_t */);/* forward */
+#else
+#define db_set_task_single_step(regs,task) db_set_single_step(regs)
+#endif
+#ifndef db_clear_single_step
+void db_clear_task_single_step(/* db_regs_t *, task_t */);
+#else
+#define db_clear_task_single_step(regs,task) db_clear_single_step(regs)
+#endif
+
+boolean_t
+db_stop_at_pc(is_breakpoint, task)
+ boolean_t *is_breakpoint;
+ task_t task;
+{
+ register db_addr_t pc;
+ register db_thread_breakpoint_t bkpt;
+ boolean_t db_cond_check();
+
+ db_clear_task_single_step(DDB_REGS, task);
+ db_clear_breakpoints();
+ db_clear_watchpoints();
+ pc = PC_REGS(DDB_REGS);
+
+#ifdef FIXUP_PC_AFTER_BREAK
+ if (*is_breakpoint) {
+ /*
+ * Breakpoint trap. Fix up the PC if the
+ * machine requires it.
+ */
+ FIXUP_PC_AFTER_BREAK
+ pc = PC_REGS(DDB_REGS);
+ }
+#endif
+
+ /*
+ * Now check for a breakpoint at this address.
+ */
+ bkpt = db_find_thread_breakpoint_here(task, pc);
+ if (bkpt) {
+ if (db_cond_check(bkpt)) {
+ *is_breakpoint = TRUE;
+ return (TRUE); /* stop here */
+ }
+ }
+ *is_breakpoint = FALSE;
+
+ if (db_run_mode == STEP_INVISIBLE) {
+ db_run_mode = STEP_CONTINUE;
+ return (FALSE); /* continue */
+ }
+ if (db_run_mode == STEP_COUNT) {
+ return (FALSE); /* continue */
+ }
+ if (db_run_mode == STEP_ONCE) {
+ if (--db_loop_count > 0) {
+ if (db_sstep_print) {
+ db_print_loc_and_inst(pc, task);
+ }
+ return (FALSE); /* continue */
+ }
+ }
+ if (db_run_mode == STEP_RETURN) {
+ /* WARNING: the following assumes an instruction fits an int */
+ db_expr_t ins = db_get_task_value(pc, sizeof(int), FALSE, task);
+
+ /* continue until matching return */
+
+ if (!inst_trap_return(ins) &&
+ (!inst_return(ins) || --db_call_depth != 0)) {
+ if (db_sstep_print) {
+ if (inst_call(ins) || inst_return(ins)) {
+ register int i;
+
+ db_printf("[after %6d /%4d] ",
+ db_inst_count,
+ db_inst_count - db_last_inst_count);
+ db_last_inst_count = db_inst_count;
+ for (i = db_call_depth; --i > 0; )
+ db_printf(" ");
+ db_print_loc_and_inst(pc, task);
+ db_printf("\n");
+ }
+ }
+ if (inst_call(ins))
+ db_call_depth++;
+ return (FALSE); /* continue */
+ }
+ }
+ if (db_run_mode == STEP_CALLT) {
+ /* WARNING: the following assumes an instruction fits an int */
+ db_expr_t ins = db_get_task_value(pc, sizeof(int), FALSE, task);
+
+ /* continue until call or return */
+
+ if (!inst_call(ins) &&
+ !inst_return(ins) &&
+ !inst_trap_return(ins)) {
+ return (FALSE); /* continue */
+ }
+ }
+ if (db_find_breakpoint_here(task, pc))
+ return(FALSE);
+ db_run_mode = STEP_NONE;
+ return (TRUE);
+}
+
+void
+db_restart_at_pc(watchpt, task)
+ boolean_t watchpt;
+ task_t task;
+{
+ register db_addr_t pc = PC_REGS(DDB_REGS), brpc;
+
+ if ((db_run_mode == STEP_COUNT) ||
+ (db_run_mode == STEP_RETURN) ||
+ (db_run_mode == STEP_CALLT)) {
+ db_expr_t ins;
+
+ /*
+ * We are about to execute this instruction,
+ * so count it now.
+ */
+
+ ins = db_get_task_value(pc, sizeof(int), FALSE, task);
+ db_inst_count++;
+ db_load_count += inst_load(ins);
+ db_store_count += inst_store(ins);
+#ifdef SOFTWARE_SSTEP
+ /* Account for instructions in delay slots */
+ brpc = next_instr_address(pc,1,task);
+ if ((brpc != pc) && (inst_branch(ins) || inst_call(ins))) {
+ /* Note: this ~assumes an instruction <= sizeof(int) */
+ ins = db_get_task_value(brpc, sizeof(int), FALSE, task);
+ db_inst_count++;
+ db_load_count += inst_load(ins);
+ db_store_count += inst_store(ins);
+ }
+#endif /* SOFTWARE_SSTEP */
+ }
+
+ if (db_run_mode == STEP_CONTINUE) {
+ if (watchpt || db_find_breakpoint_here(task, pc)) {
+ /*
+ * Step over breakpoint/watchpoint.
+ */
+ db_run_mode = STEP_INVISIBLE;
+ db_set_task_single_step(DDB_REGS, task);
+ } else {
+ db_set_breakpoints();
+ db_set_watchpoints();
+ }
+ } else {
+ db_set_task_single_step(DDB_REGS, task);
+ }
+}
+
+void
+db_single_step(regs, task)
+ db_regs_t *regs;
+ task_t task;
+{
+ if (db_run_mode == STEP_CONTINUE) {
+ db_run_mode = STEP_INVISIBLE;
+ db_set_task_single_step(regs, task);
+ }
+}
+
+#ifdef SOFTWARE_SSTEP
+/*
+ * Software implementation of single-stepping.
+ * If your machine does not have a trace mode
+ * similar to the vax or sun ones you can use
+ * this implementation, done for the mips.
+ * Just define the above conditional and provide
+ * the functions/macros defined below.
+ *
+ * extern boolean_t
+ * inst_branch(), returns true if the instruction might branch
+ * extern unsigned
+ * branch_taken(), return the address the instruction might
+ * branch to
+ * db_getreg_val(); return the value of a user register,
+ * as indicated in the hardware instruction
+ * encoding, e.g. 8 for r8
+ *
+ * next_instr_address(pc,bd,task) returns the address of the first
+ * instruction following the one at "pc",
+ * which is either in the taken path of
+ * the branch (bd==1) or not. This is
+ * for machines (mips) with branch delays.
+ *
+ * A single-step may involve at most 2 breakpoints -
+ * one for branch-not-taken and one for branch taken.
+ * If one of these addresses does not already have a breakpoint,
+ * we allocate a breakpoint and save it here.
+ * These breakpoints are deleted on return.
+ */
+db_breakpoint_t db_not_taken_bkpt = 0;
+db_breakpoint_t db_taken_bkpt = 0;
+
+db_breakpoint_t
+db_find_temp_breakpoint(task, addr)
+ task_t task;
+ db_addr_t addr;
+{
+ if (db_taken_bkpt && (db_taken_bkpt->address == addr) &&
+ db_taken_bkpt->task == task)
+ return db_taken_bkpt;
+ if (db_not_taken_bkpt && (db_not_taken_bkpt->address == addr) &&
+ db_not_taken_bkpt->task == task)
+ return db_not_taken_bkpt;
+ return 0;
+}
+
+void
+db_set_task_single_step(regs, task)
+ register db_regs_t *regs;
+ task_t task;
+{
+ db_addr_t pc = PC_REGS(regs), brpc;
+ register unsigned int inst;
+ register boolean_t unconditional;
+
+ /*
+ * User was stopped at pc, e.g. the instruction
+ * at pc was not executed.
+ */
+ inst = db_get_task_value(pc, sizeof(int), FALSE, task);
+ if (inst_branch(inst) || inst_call(inst)) {
+ extern db_expr_t getreg_val();
+
+ brpc = branch_taken(inst, pc, getreg_val, regs);
+ if (brpc != pc) { /* self-branches are hopeless */
+ db_taken_bkpt = db_set_temp_breakpoint(task, brpc);
+ } else
+ db_taken_bkpt = 0;
+ pc = next_instr_address(pc,1,task);
+ }
+
+ /* check if this control flow instruction is an unconditional transfer */
+ unconditional = inst_unconditional_flow_transfer(inst);
+
+ pc = next_instr_address(pc,0,task);
+ /*
+ We only set the sequential breakpoint if previous instruction was not
+ an unconditional change of flow of control. If the previous instruction
+ is an unconditional change of flow of control, setting a breakpoint in the
+ next sequential location may set a breakpoint in data or in another routine,
+ which could screw up either the program or the debugger.
+ (Consider, for instance, that the next sequential instruction is the
+ start of a routine needed by the debugger.)
+ */
+ if (!unconditional && db_find_breakpoint_here(task, pc) == 0) {
+ db_not_taken_bkpt = db_set_temp_breakpoint(task, pc);
+ }
+ else
+ db_not_taken_bkpt = 0;
+}
+
+void
+db_clear_task_single_step(regs, task)
+ db_regs_t *regs;
+ task_t task;
+{
+ if (db_taken_bkpt != 0) {
+ db_delete_temp_breakpoint(task, db_taken_bkpt);
+ db_taken_bkpt = 0;
+ }
+ if (db_not_taken_bkpt != 0) {
+ db_delete_temp_breakpoint(task, db_not_taken_bkpt);
+ db_not_taken_bkpt = 0;
+ }
+}
+
+#endif /* SOFTWARE_SSTEP */
+
+
+extern int db_cmd_loop_done;
+
+/* single-step */
+/*ARGSUSED*/
+void
+db_single_step_cmd(addr, have_addr, count, modif)
+ db_expr_t addr;
+ int have_addr;
+ db_expr_t count;
+ char * modif;
+{
+ boolean_t print = FALSE;
+
+ if (count == -1)
+ count = 1;
+
+ if (modif[0] == 'p')
+ print = TRUE;
+
+ db_run_mode = STEP_ONCE;
+ db_loop_count = count;
+ db_sstep_print = print;
+ db_inst_count = 0;
+ db_last_inst_count = 0;
+ db_load_count = 0;
+ db_store_count = 0;
+
+ db_cmd_loop_done = 1;
+}
+
+/* trace and print until call/return */
+/*ARGSUSED*/
+void
+db_trace_until_call_cmd(addr, have_addr, count, modif)
+ db_expr_t addr;
+ int have_addr;
+ db_expr_t count;
+ char * modif;
+{
+ boolean_t print = FALSE;
+
+ if (modif[0] == 'p')
+ print = TRUE;
+
+ db_run_mode = STEP_CALLT;
+ db_sstep_print = print;
+ db_inst_count = 0;
+ db_last_inst_count = 0;
+ db_load_count = 0;
+ db_store_count = 0;
+
+ db_cmd_loop_done = 1;
+}
+
+/*ARGSUSED*/
+void
+db_trace_until_matching_cmd(addr, have_addr, count, modif)
+ db_expr_t addr;
+ int have_addr;
+ db_expr_t count;
+ char * modif;
+{
+ boolean_t print = FALSE;
+
+ if (modif[0] == 'p')
+ print = TRUE;
+
+ db_run_mode = STEP_RETURN;
+ db_call_depth = 1;
+ db_sstep_print = print;
+ db_inst_count = 0;
+ db_last_inst_count = 0;
+ db_load_count = 0;
+ db_store_count = 0;
+
+ db_cmd_loop_done = 1;
+}
+
+/* continue */
+/*ARGSUSED*/
+void
+db_continue_cmd(addr, have_addr, count, modif)
+ db_expr_t addr;
+ int have_addr;
+ db_expr_t count;
+ char * modif;
+{
+ if (modif[0] == 'c')
+ db_run_mode = STEP_COUNT;
+ else
+ db_run_mode = STEP_CONTINUE;
+ db_inst_count = 0;
+ db_last_inst_count = 0;
+ db_load_count = 0;
+ db_store_count = 0;
+
+ db_cmd_loop_done = 1;
+}
+
+boolean_t
+db_in_single_step()
+{
+ return(db_run_mode != STEP_NONE && db_run_mode != STEP_CONTINUE);
+}
+
+#endif MACH_KDB
diff --git a/ddb/db_run.h b/ddb/db_run.h
new file mode 100644
index 00000000..fcc8ece5
--- /dev/null
+++ b/ddb/db_run.h
@@ -0,0 +1,37 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+extern int db_run_mode;
+
+/* modes the system may be running in */
+
+#define STEP_NONE 0
+#define STEP_ONCE 1
+#define STEP_RETURN 2
+#define STEP_CALLT 3
+#define STEP_CONTINUE 4
+#define STEP_INVISIBLE 5
+#define STEP_COUNT 6
diff --git a/ddb/db_sym.c b/ddb/db_sym.c
new file mode 100644
index 00000000..c48a48fd
--- /dev/null
+++ b/ddb/db_sym.c
@@ -0,0 +1,523 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#include "mach_kdb.h"
+#if MACH_KDB
+
+#include <mach/std_types.h>
+#include <kern/strings.h>
+#include <machine/db_machdep.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_task_thread.h>
+
+#include <vm/vm_map.h> /* vm_map_t */
+
+/*
+ * Multiple symbol tables
+ */
+#define MAXNOSYMTABS 5 /* mach, bootstrap, ux, emulator, 1 spare */
+
+db_symtab_t db_symtabs[MAXNOSYMTABS] = {{0,},};
+int db_nsymtab = 0;
+
+db_symtab_t *db_last_symtab;
+
+db_sym_t db_lookup(); /* forward */
+
+/*
+ * Add symbol table, with given name, to list of symbol tables.
+ */
+boolean_t
+db_add_symbol_table(type, start, end, name, ref, map_pointer)
+ int type;
+ char *start;
+ char *end;
+ char *name;
+ char *ref;
+ char *map_pointer;
+{
+ register db_symtab_t *st;
+ extern vm_map_t kernel_map;
+
+ if (db_nsymtab >= MAXNOSYMTABS)
+ return (FALSE);
+
+ st = &db_symtabs[db_nsymtab];
+ st->type = type;
+ st->start = start;
+ st->end = end;
+ st->private = ref;
+ st->map_pointer = (map_pointer == (char *)kernel_map)? 0: map_pointer;
+ strcpy(st->name, name);
+
+ db_nsymtab++;
+
+ return (TRUE);
+}
+
+/*
+ * db_qualify("vm_map", "ux") returns "ux::vm_map".
+ *
+ * Note: return value points to static data whose content is
+ * overwritten by each call... but in practice this seems okay.
+ */
+static char *
+db_qualify(symname, symtabname)
+ char *symname;
+ register char *symtabname;
+{
+ static char tmp[256];
+ register char *s;
+
+ s = tmp;
+ while (*s++ = *symtabname++) {
+ }
+ s[-1] = ':';
+ *s++ = ':';
+ while (*s++ = *symname++) {
+ }
+ return tmp;
+}
+
+
+boolean_t
+db_eqname( char* src, char* dst, char c )
+{
+ if (!strcmp(src, dst))
+ return (TRUE);
+ if (src[0] == c)
+ return (!strcmp(src+1,dst));
+ return (FALSE);
+}
+
+boolean_t
+db_value_of_name(name, valuep)
+ char *name;
+ db_expr_t *valuep;
+{
+ db_sym_t sym;
+
+ sym = db_lookup(name);
+ if (sym == DB_SYM_NULL)
+ return (FALSE);
+ db_symbol_values(0, sym, &name, valuep);
+ return (TRUE);
+}
+
+/*
+ * Lookup a symbol.
+ * If the symbol has a qualifier (e.g., ux::vm_map),
+ * then only the specified symbol table will be searched;
+ * otherwise, all symbol tables will be searched.
+ */
+db_sym_t
+db_lookup(symstr)
+ char *symstr;
+{
+ db_sym_t sp;
+ register int i;
+ int symtab_start = 0;
+ int symtab_end = db_nsymtab;
+ register char *cp;
+
+ /*
+ * Look for, remove, and remember any symbol table specifier.
+ */
+ for (cp = symstr; *cp; cp++) {
+ if (*cp == ':' && cp[1] == ':') {
+ *cp = '\0';
+ for (i = 0; i < db_nsymtab; i++) {
+ if (! strcmp(symstr, db_symtabs[i].name)) {
+ symtab_start = i;
+ symtab_end = i + 1;
+ break;
+ }
+ }
+ *cp = ':';
+ if (i == db_nsymtab)
+ db_error("Invalid symbol table name\n");
+ symstr = cp+2;
+ }
+ }
+
+ /*
+ * Look in the specified set of symbol tables.
+ * Return on first match.
+ */
+ for (i = symtab_start; i < symtab_end; i++) {
+ if (sp = X_db_lookup(&db_symtabs[i], symstr)) {
+ db_last_symtab = &db_symtabs[i];
+ return sp;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Common utility routine to parse a symbol string into a file
+ * name, a symbol name and line number.
+ * This routine is called from X_db_lookup if the object dependent
+ * handler supports qualified search with a file name or a line number.
+ * It parses the symbol string, and call an object dependent routine
+ * with parsed file name, symbol name and line number.
+ */
+db_sym_t
+db_sym_parse_and_lookup(func, symtab, symstr)
+ db_sym_t (*func)();
+ db_symtab_t *symtab;
+ char *symstr;
+{
+ register char *p;
+ register n;
+ int n_name;
+ int line_number;
+ char *file_name = 0;
+ char *sym_name = 0;
+ char *component[3];
+ db_sym_t found = DB_SYM_NULL;
+
+ /*
+ * disassemble the symbol into components:
+ * [file_name:]symbol[:line_nubmer]
+ */
+ component[0] = symstr;
+ component[1] = component[2] = 0;
+ for (p = symstr, n = 1; *p; p++) {
+ if (*p == ':') {
+ if (n >= 3)
+ break;
+ *p = 0;
+ component[n++] = p+1;
+ }
+ }
+ if (*p != 0)
+ goto out;
+ line_number = 0;
+ n_name = n;
+ p = component[n-1];
+ if (*p >= '0' && *p <= '9') {
+ if (n == 1)
+ goto out;
+ for (line_number = 0; *p; p++) {
+ if (*p < '0' || *p > '9')
+ goto out;
+ line_number = line_number*10 + *p - '0';
+ }
+ n_name--;
+ } else if (n >= 3)
+ goto out;
+ if (n_name == 1) {
+ for (p = component[0]; *p && *p != '.'; p++);
+ if (*p == '.') {
+ file_name = component[0];
+ sym_name = 0;
+ } else {
+ file_name = 0;
+ sym_name = component[0];
+ }
+ } else {
+ file_name = component[0];
+ sym_name = component[1];
+ }
+ found = func(symtab, file_name, sym_name, line_number);
+
+out:
+ while (--n >= 1)
+ component[n][-1] = ':';
+ return(found);
+}
+
+/*
+ * Does this symbol name appear in more than one symbol table?
+ * Used by db_symbol_values to decide whether to qualify a symbol.
+ */
+boolean_t db_qualify_ambiguous_names = FALSE;
+
+boolean_t
+db_name_is_ambiguous(sym_name)
+ char *sym_name;
+{
+ register int i;
+ register
+ boolean_t found_once = FALSE;
+
+ if (!db_qualify_ambiguous_names)
+ return FALSE;
+
+ for (i = 0; i < db_nsymtab; i++) {
+ if (X_db_lookup(&db_symtabs[i], sym_name)) {
+ if (found_once)
+ return TRUE;
+ found_once = TRUE;
+ }
+ }
+ return FALSE;
+}
+
+
+db_sym_t db_search_in_task_symbol();
+
+/*
+ * Find the closest symbol to val, and return its name
+ * and the difference between val and the symbol found.
+ *
+ * Logic change. If the task argument is non NULL and a
+ * matching symbol is found in a symbol table which explictly
+ * specifies its map to be task->map, that symbol will have
+ * precedence over any symbol from a symbol table will a null
+ * map. This allows overlapping kernel/user maps to work correctly.
+ *
+ */
+db_sym_t
+db_search_task_symbol(val, strategy, offp, task)
+ register db_addr_t val;
+ db_strategy_t strategy;
+ db_addr_t *offp; /* better be unsigned */
+ task_t task;
+{
+ db_sym_t ret;
+
+ if (task != TASK_NULL)
+ ret = db_search_in_task_symbol(val, strategy, offp, task);
+ else
+ {
+ ret = db_search_in_task_symbol(val, strategy, offp, task);
+ /*
+ db_search_in_task_symbol will return success with
+ a very large offset when it should have failed.
+ */
+ if (ret == DB_SYM_NULL || (*offp) > 0x1000000)
+ {
+ task = db_current_task();
+ ret = db_search_in_task_symbol(val, strategy, offp, task);
+ }
+ }
+
+ return ret;
+}
+
+db_sym_t
+db_search_in_task_symbol(val, strategy, offp, task)
+ register db_addr_t val;
+ db_strategy_t strategy;
+ db_addr_t *offp;
+ task_t task;
+{
+ register vm_size_t diff;
+ vm_size_t newdiff;
+ register int i;
+ db_symtab_t *sp;
+ db_sym_t ret = DB_SYM_NULL, sym;
+ vm_map_t map_for_val;
+
+ map_for_val = (task == TASK_NULL)? VM_MAP_NULL: task->map;
+ newdiff = diff = ~0;
+ db_last_symtab = (db_symtab_t *) 0;
+ for (sp = &db_symtabs[0], i = 0; i < db_nsymtab; sp++, i++)
+ {
+ newdiff = ~0;
+ if ((vm_map_t)sp->map_pointer == VM_MAP_NULL ||
+ (vm_map_t)sp->map_pointer == map_for_val)
+ {
+ sym = X_db_search_symbol(sp, val, strategy, (db_expr_t*)&newdiff);
+ if (sym == DB_SYM_NULL)
+ continue;
+ if (db_last_symtab == (db_symtab_t *) 0)
+ { /* first hit */
+ db_last_symtab = sp;
+ diff = newdiff;
+ ret = sym;
+ continue;
+ }
+ if ((vm_map_t) sp->map_pointer == VM_MAP_NULL &&
+ (vm_map_t) db_last_symtab->map_pointer == VM_MAP_NULL &&
+ newdiff < diff )
+ { /* closer null map match */
+ db_last_symtab = sp;
+ diff = newdiff;
+ ret = sym;
+ continue;
+ }
+ if ((vm_map_t) sp->map_pointer != VM_MAP_NULL &&
+ (newdiff < 0x100000) &&
+ ((vm_map_t) db_last_symtab->map_pointer == VM_MAP_NULL ||
+ newdiff < diff ))
+ { /* update if new is in matching map and symbol is "close",
+ and
+ old is VM_MAP_NULL or old in is matching map but is further away
+ */
+ db_last_symtab = sp;
+ diff = newdiff;
+ ret = sym;
+ continue;
+ }
+ }
+ }
+
+ *offp = diff;
+ return ret;
+}
+
+/*
+ * Return name and value of a symbol
+ */
+void
+db_symbol_values(stab, sym, namep, valuep)
+ db_symtab_t *stab;
+ db_sym_t sym;
+ char **namep;
+ db_expr_t *valuep;
+{
+ db_expr_t value;
+ char *name;
+
+ if (sym == DB_SYM_NULL) {
+ *namep = 0;
+ return;
+ }
+ if (stab == 0)
+ stab = db_last_symtab;
+
+ X_db_symbol_values(stab, sym, &name, &value);
+
+ if (db_name_is_ambiguous(name))
+ *namep = db_qualify(name, db_last_symtab->name);
+ else
+ *namep = name;
+ if (valuep)
+ *valuep = value;
+}
+
+
+/*
+ * Print the closest symbol to value
+ *
+ * After matching the symbol according to the given strategy
+ * we print it in the name+offset format, provided the symbol's
+ * value is close enough (eg smaller than db_maxoff).
+ * We also attempt to print [filename:linenum] when applicable
+ * (eg for procedure names).
+ *
+ * If we could not find a reasonable name+offset representation,
+ * then we just print the value in hex. Small values might get
+ * bogus symbol associations, e.g. 3 might get some absolute
+ * value like _INCLUDE_VERSION or something, therefore we do
+ * not accept symbols whose value is zero (and use plain hex).
+ */
+
+unsigned int db_maxoff = 0x4000;
+
+void
+db_task_printsym(off, strategy, task)
+ db_expr_t off;
+ db_strategy_t strategy;
+ task_t task;
+{
+ db_addr_t d;
+ char *filename;
+ char *name;
+ db_expr_t value;
+ int linenum;
+ db_sym_t cursym;
+
+ cursym = db_search_task_symbol(off, strategy, &d, task);
+ db_symbol_values(0, cursym, &name, &value);
+ if (name == 0 || d >= db_maxoff || value == 0) {
+ db_printf("%#n", off);
+ return;
+ }
+ db_printf("%s", name);
+ if (d)
+ db_printf("+0x%x", d);
+ if (strategy == DB_STGY_PROC) {
+ if (db_line_at_pc(cursym, &filename, &linenum, off)) {
+ db_printf(" [%s", filename);
+ if (linenum > 0)
+ db_printf(":%d", linenum);
+ db_printf("]");
+ }
+ }
+}
+
+void
+db_printsym(off, strategy)
+ db_expr_t off;
+ db_strategy_t strategy;
+{
+ db_task_printsym(off, strategy, TASK_NULL);
+}
+
+boolean_t
+db_line_at_pc( sym, filename, linenum, pc)
+ db_sym_t sym;
+ char **filename;
+ int *linenum;
+ db_expr_t pc;
+{
+ return (db_last_symtab) ?
+ X_db_line_at_pc( db_last_symtab, sym, filename, linenum, pc) :
+ FALSE;
+}
+
+/*
+ * Switch into symbol-table specific routines
+ */
+
+extern boolean_t aout_db_sym_init(), aout_db_line_at_pc();
+extern db_sym_t aout_db_lookup(), aout_db_search_symbol();
+extern void aout_db_symbol_values();
+
+extern boolean_t coff_db_sym_init(), coff_db_line_at_pc();
+extern db_sym_t coff_db_lookup(), coff_db_search_symbol();
+extern void coff_db_symbol_values();
+
+struct db_sym_switch x_db[] = {
+
+ /* BSD a.out format (really, sdb/dbx(1) symtabs) */
+#ifdef DB_NO_AOUT
+ { 0,},
+#else /* DB_NO_AOUT */
+ { aout_db_sym_init, aout_db_lookup, aout_db_search_symbol,
+ aout_db_line_at_pc, aout_db_symbol_values },
+#endif /* DB_NO_AOUT */
+
+#ifdef DB_NO_COFF
+ { 0,},
+#else /* DB_NO_COFF */
+ { coff_db_sym_init, coff_db_lookup, coff_db_search_symbol,
+ coff_db_line_at_pc, coff_db_symbol_values },
+#endif /* DB_NO_COFF */
+
+ /* Machdep, not inited here */
+ { 0,}
+
+};
+
+#endif MACH_KDB
diff --git a/ddb/db_sym.h b/ddb/db_sym.h
new file mode 100644
index 00000000..dd320b7c
--- /dev/null
+++ b/ddb/db_sym.h
@@ -0,0 +1,200 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 8/90
+ */
+
+#include <mach/boolean.h>
+#include <mach/machine/vm_types.h>
+#include <machine/db_machdep.h>
+
+/*
+ * This module can handle multiple symbol tables,
+ * of multiple types, at the same time
+ */
+#define SYMTAB_NAME_LEN 32
+
+typedef struct {
+ int type;
+#define SYMTAB_AOUT 0
+#define SYMTAB_COFF 1
+#define SYMTAB_MACHDEP 2
+ char *start; /* symtab location */
+ char *end;
+ char *private; /* optional machdep pointer */
+ char *map_pointer; /* symbols are for this map only,
+ if not null */
+ char name[SYMTAB_NAME_LEN];
+ /* symtab name */
+} db_symtab_t;
+
+extern db_symtab_t *db_last_symtab; /* where last symbol was found */
+
+/*
+ * Symbol representation is specific to the symtab style:
+ * BSD compilers use dbx' nlist, other compilers might use
+ * a different one
+ */
+typedef char * db_sym_t; /* opaque handle on symbols */
+#define DB_SYM_NULL ((db_sym_t)0)
+
+/*
+ * Non-stripped symbol tables will have duplicates, for instance
+ * the same string could match a parameter name, a local var, a
+ * global var, etc.
+ * We are most concerned with the following matches.
+ */
+typedef int db_strategy_t; /* search strategy */
+
+#define DB_STGY_ANY 0 /* anything goes */
+#define DB_STGY_XTRN 1 /* only external symbols */
+#define DB_STGY_PROC 2 /* only procedures */
+
+extern boolean_t db_qualify_ambiguous_names;
+ /* if TRUE, check across symbol tables
+ * for multiple occurrences of a name.
+ * Might slow down quite a bit
+ * ..but the machine has nothing
+ * else to do, now does it ? */
+
+/*
+ * Functions exported by the symtable module
+ */
+
+/* extend the list of symbol tables */
+
+extern boolean_t db_add_symbol_table( int type,
+ char * start,
+ char * end,
+ char *name,
+ char *ref,
+ char *map_pointer );
+
+/* find symbol value given name */
+
+extern int db_value_of_name( char* name, db_expr_t* valuep);
+
+/* find symbol given value */
+
+extern db_sym_t db_search_task_symbol( db_addr_t val,
+ db_strategy_t strategy,
+ db_addr_t *offp,
+ task_t task );
+
+/* return name and value of symbol */
+
+extern void db_symbol_values( db_symtab_t *stab,
+ db_sym_t sym,
+ char** namep,
+ db_expr_t* valuep);
+
+/* find name&value given approx val */
+
+#define db_find_sym_and_offset(val,namep,offp) \
+ db_symbol_values(0, db_search_symbol(val,DB_STGY_ANY,offp),namep,0)
+
+/* ditto, but no locals */
+#define db_find_xtrn_sym_and_offset(val,namep,offp) \
+ db_symbol_values(0, db_search_symbol(val,DB_STGY_XTRN,offp),namep,0)
+
+/* find name&value given approx val */
+
+#define db_find_task_sym_and_offset(val,namep,offp,task) \
+ db_symbol_values(0, db_search_task_symbol(val,DB_STGY_ANY,offp,task), \
+ namep, 0)
+
+/* ditto, but no locals */
+#define db_find_xtrn_task_sym_and_offset(val,namep,offp,task) \
+ db_symbol_values(0, db_search_task_symbol(val,DB_STGY_XTRN,offp,task), \
+ namep,0)
+
+/* find symbol in current task */
+#define db_search_symbol(val,strgy,offp) \
+ db_search_task_symbol(val,strgy,offp,0)
+
+/* strcmp, modulo leading char */
+extern boolean_t db_eqname( char* src, char* dst, char c );
+
+/* print closest symbol to a value */
+extern void db_task_printsym( db_expr_t off,
+ db_strategy_t strategy,
+ task_t task);
+
+/* print closest symbol to a value */
+extern void db_printsym( db_expr_t off, db_strategy_t strategy);
+
+/*
+ * Symbol table switch, defines the interface
+ * to symbol-table specific routines.
+ * [NOTE: incomplete prototypes cuz broken compiler]
+ */
+
+extern struct db_sym_switch {
+
+ boolean_t (*init)(
+/* char *start,
+ char *end,
+ char *name,
+ char *task_addr
+*/ );
+
+ db_sym_t (*lookup)(
+/* db_symtab_t *stab,
+ char *symstr
+*/ );
+ db_sym_t (*search_symbol)(
+/* db_symtab_t *stab,
+ db_addr_t off,
+ db_strategy_t strategy,
+ db_expr_t *diffp
+*/ );
+
+ boolean_t (*line_at_pc)(
+/* db_symtab_t *stab,
+ db_sym_t sym,
+ char **file,
+ int *line,
+ db_expr_t pc
+*/ );
+
+ void (*symbol_values)(
+/* db_sym_t sym,
+ char **namep,
+ db_expr_t *valuep
+*/ );
+
+} x_db[];
+
+#ifndef symtab_type
+#define symtab_type(s) SYMTAB_AOUT
+#endif
+
+#define X_db_sym_init(s,e,n,t) x_db[symtab_type(s)].init(s,e,n,t)
+#define X_db_lookup(s,n) x_db[(s)->type].lookup(s,n)
+#define X_db_search_symbol(s,o,t,d) x_db[(s)->type].search_symbol(s,o,t,d)
+#define X_db_line_at_pc(s,p,f,l,a) x_db[(s)->type].line_at_pc(s,p,f,l,a)
+#define X_db_symbol_values(s,p,n,v) x_db[(s)->type].symbol_values(p,n,v)
diff --git a/ddb/db_task_thread.c b/ddb/db_task_thread.c
new file mode 100644
index 00000000..8ab8cde0
--- /dev/null
+++ b/ddb/db_task_thread.c
@@ -0,0 +1,303 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include "mach_kdb.h"
+#if MACH_KDB
+
+#include <machine/db_machdep.h>
+#include <ddb/db_task_thread.h>
+#include <ddb/db_variables.h>
+
+
+
+/*
+ * Following constants are used to prevent infinite loop of task
+ * or thread search due to the incorrect list.
+ */
+#define DB_MAX_TASKID 0x10000 /* max # of tasks */
+#define DB_MAX_THREADID 0x10000 /* max # of threads in a task */
+#define DB_MAX_PSETS 0x10000 /* max # of processor sets */
+
+task_t db_default_task; /* default target task */
+thread_t db_default_thread; /* default target thread */
+
+/*
+ * search valid task queue, and return the queue position as the task id
+ */
+int
+db_lookup_task(target_task)
+ task_t target_task;
+{
+ register task_t task;
+ register task_id;
+ register processor_set_t pset;
+ register npset = 0;
+
+ task_id = 0;
+ if (queue_first(&all_psets) == 0)
+ return(-1);
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+ if (npset++ >= DB_MAX_PSETS)
+ return(-1);
+ if (queue_first(&pset->tasks) == 0)
+ continue;
+ queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
+ if (target_task == task)
+ return(task_id);
+ if (task_id++ >= DB_MAX_TASKID)
+ return(-1);
+ }
+ }
+ return(-1);
+}
+
+/*
+ * search thread queue of the task, and return the queue position
+ */
+int
+db_lookup_task_thread(task, target_thread)
+ task_t task;
+ thread_t target_thread;
+{
+ register thread_t thread;
+ register thread_id;
+
+ thread_id = 0;
+ if (queue_first(&task->thread_list) == 0)
+ return(-1);
+ queue_iterate(&task->thread_list, thread, thread_t, thread_list) {
+ if (target_thread == thread)
+ return(thread_id);
+ if (thread_id++ >= DB_MAX_THREADID)
+ return(-1);
+ }
+ return(-1);
+}
+
+/*
+ * search thread queue of every valid task, and return the queue position
+ * as the thread id.
+ */
+int
+db_lookup_thread(target_thread)
+ thread_t target_thread;
+{
+ register thread_id;
+ register task_t task;
+ register processor_set_t pset;
+ register ntask = 0;
+ register npset = 0;
+
+ if (queue_first(&all_psets) == 0)
+ return(-1);
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+ if (npset++ >= DB_MAX_PSETS)
+ return(-1);
+ if (queue_first(&pset->tasks) == 0)
+ continue;
+ queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
+ if (ntask++ > DB_MAX_TASKID)
+ return(-1);
+ if (task->thread_count == 0)
+ continue;
+ thread_id = db_lookup_task_thread(task, target_thread);
+ if (thread_id >= 0)
+ return(thread_id);
+ }
+ }
+ return(-1);
+}
+
+/*
+ * check the address is a valid thread address
+ */
+boolean_t
+db_check_thread_address_valid(thread)
+ thread_t thread;
+{
+ if (db_lookup_thread(thread) < 0) {
+ db_printf("Bad thread address 0x%x\n", thread);
+ db_flush_lex();
+ return(FALSE);
+ } else
+ return(TRUE);
+}
+
+/*
+ * convert task_id(queue postion) to task address
+ */
+task_t
+db_lookup_task_id(task_id)
+ register task_id;
+{
+ register task_t task;
+ register processor_set_t pset;
+ register npset = 0;
+
+ if (task_id > DB_MAX_TASKID)
+ return(TASK_NULL);
+ if (queue_first(&all_psets) == 0)
+ return(TASK_NULL);
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+ if (npset++ >= DB_MAX_PSETS)
+ return(TASK_NULL);
+ if (queue_first(&pset->tasks) == 0)
+ continue;
+ queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
+ if (task_id-- <= 0)
+ return(task);
+ }
+ }
+ return(TASK_NULL);
+}
+
+/*
+ * convert (task_id, thread_id) pair to thread address
+ */
+static thread_t
+db_lookup_thread_id(task, thread_id)
+ task_t task;
+ register thread_id;
+{
+ register thread_t thread;
+
+
+ if (thread_id > DB_MAX_THREADID)
+ return(THREAD_NULL);
+ if (queue_first(&task->thread_list) == 0)
+ return(THREAD_NULL);
+ queue_iterate(&task->thread_list, thread, thread_t, thread_list) {
+ if (thread_id-- <= 0)
+ return(thread);
+ }
+ return(THREAD_NULL);
+}
+
+/*
+ * get next parameter from a command line, and check it as a valid
+ * thread address
+ */
+boolean_t
+db_get_next_thread(threadp, position)
+ thread_t *threadp;
+ int position;
+{
+ db_expr_t value;
+ thread_t thread;
+
+ *threadp = THREAD_NULL;
+ if (db_expression(&value)) {
+ thread = (thread_t) value;
+ if (!db_check_thread_address_valid(thread)) {
+ db_flush_lex();
+ return(FALSE);
+ }
+ } else if (position <= 0) {
+ thread = db_default_thread;
+ } else
+ return(FALSE);
+ *threadp = thread;
+ return(TRUE);
+}
+
+/*
+ * check the default thread is still valid
+ * ( it is called in entering DDB session )
+ */
+void
+db_init_default_thread()
+{
+ if (db_lookup_thread(db_default_thread) < 0) {
+ db_default_thread = THREAD_NULL;
+ db_default_task = TASK_NULL;
+ } else
+ db_default_task = db_default_thread->task;
+}
+
+/*
+ * set or get default thread which is used when /t or :t option is specified
+ * in the command line
+ */
+/* ARGSUSED */
+int
+db_set_default_thread(vp, valuep, flag)
+ struct db_variable *vp;
+ db_expr_t *valuep;
+ int flag;
+{
+ thread_t thread;
+
+ if (flag != DB_VAR_SET) {
+ *valuep = (db_expr_t) db_default_thread;
+ return(0);
+ }
+ thread = (thread_t) *valuep;
+ if (thread != THREAD_NULL && !db_check_thread_address_valid(thread))
+ db_error(0);
+ /* NOTREACHED */
+ db_default_thread = thread;
+ if (thread)
+ db_default_task = thread->task;
+ return(0);
+}
+
+/*
+ * convert $taskXXX[.YYY] type DDB variable to task or thread address
+ */
+int
+db_get_task_thread(vp, valuep, flag, ap)
+ struct db_variable *vp;
+ db_expr_t *valuep;
+ int flag;
+ db_var_aux_param_t ap;
+{
+ task_t task;
+ thread_t thread;
+
+ if (flag != DB_VAR_GET) {
+ db_error("Cannot set to $task variable\n");
+ /* NOTREACHED */
+ }
+ if ((task = db_lookup_task_id(ap->suffix[0])) == TASK_NULL) {
+ db_printf("no such task($task%d)\n", ap->suffix[0]);
+ db_error(0);
+ /* NOTREACHED */
+ }
+ if (ap->level <= 1) {
+ *valuep = (db_expr_t) task;
+ return(0);
+ }
+ if ((thread = db_lookup_thread_id(task, ap->suffix[1])) == THREAD_NULL){
+ db_printf("no such thread($task%d.%d)\n",
+ ap->suffix[0], ap->suffix[1]);
+ db_error(0);
+ /* NOTREACHED */
+ }
+ *valuep = (db_expr_t) thread;
+ return(0);
+}
+
+#endif MACH_KDB
diff --git a/ddb/db_task_thread.h b/ddb/db_task_thread.h
new file mode 100644
index 00000000..26a071cf
--- /dev/null
+++ b/ddb/db_task_thread.h
@@ -0,0 +1,51 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _DDB_DB_TASK_THREAD_H_
+#define _DDB_DB_TASK_THREAD_H_
+
+#include <kern/task.h>
+#include <kern/thread.h>
+
+#define db_current_task() \
+ ((current_thread())? current_thread()->task: TASK_NULL)
+#define db_target_space(thread, user_space) \
+ ((!(user_space))? TASK_NULL: \
+ (thread)? (thread)->task: db_current_task())
+#define db_is_current_task(task) \
+ ((task) == TASK_NULL || (task) == db_current_task())
+
+extern task_t db_default_task; /* default target task */
+extern thread_t db_default_thread; /* default target thread */
+
+extern int db_lookup_task(/* task_t */);
+extern int db_lookup_thread(/* thread_t */);
+extern int db_lookup_task_thread(/* task_t, thread_t */);
+extern boolean_t db_check_thread_address_valid(/* db_expr_t */);
+extern boolean_t db_get_next_thread(/* thread_t *, int */);
+extern void db_init_default_thread();
+
+#endif _DDB_DB_TASK_THREAD_H_
diff --git a/ddb/db_trap.c b/ddb/db_trap.c
new file mode 100644
index 00000000..dbb58920
--- /dev/null
+++ b/ddb/db_trap.c
@@ -0,0 +1,107 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#include "mach_kdb.h"
+#if MACH_KDB
+
+/*
+ * Trap entry point to kernel debugger.
+ */
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+#include <ddb/db_command.h>
+#include <ddb/db_access.h>
+#include <ddb/db_break.h>
+#include <ddb/db_task_thread.h>
+
+
+
+extern jmp_buf_t *db_recover;
+
+extern void db_restart_at_pc();
+extern boolean_t db_stop_at_pc();
+
+extern int db_inst_count;
+extern int db_load_count;
+extern int db_store_count;
+
+void
+db_task_trap(type, code, user_space)
+ int type, code;
+ boolean_t user_space;
+{
+ jmp_buf_t db_jmpbuf;
+ jmp_buf_t *prev;
+ boolean_t bkpt;
+ boolean_t watchpt;
+ void db_init_default_thread();
+ void db_check_breakpoint_valid();
+ task_t task_space;
+
+ task_space = db_target_space(current_thread(), user_space);
+ bkpt = IS_BREAKPOINT_TRAP(type, code);
+ watchpt = IS_WATCHPOINT_TRAP(type, code);
+
+ db_init_default_thread();
+ db_check_breakpoint_valid();
+ if (db_stop_at_pc(&bkpt, task_space)) {
+ if (db_inst_count) {
+ db_printf("After %d instructions (%d loads, %d stores),\n",
+ db_inst_count, db_load_count, db_store_count);
+ }
+ if (bkpt)
+ db_printf("Breakpoint at ");
+ else if (watchpt)
+ db_printf("Watchpoint at ");
+ else
+ db_printf("Stopped at ");
+ db_dot = PC_REGS(DDB_REGS);
+
+ prev = db_recover;
+ if (_setjmp(db_recover = &db_jmpbuf) == 0)
+ db_print_loc_and_inst(db_dot, task_space);
+ else
+ db_printf("Trouble printing location %#X.\n", db_dot);
+ db_recover = prev;
+
+ db_command_loop();
+ }
+
+ db_restart_at_pc(watchpt, task_space);
+}
+
+void
+db_trap(type, code)
+ int type, code;
+{
+ db_task_trap(type, code, !DB_VALID_KERN_ADDR(PC_REGS(DDB_REGS)));
+}
+
+#endif MACH_KDB
diff --git a/ddb/db_variables.c b/ddb/db_variables.c
new file mode 100644
index 00000000..541dca1c
--- /dev/null
+++ b/ddb/db_variables.c
@@ -0,0 +1,241 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#include "mach_kdb.h"
+#if MACH_KDB
+
+#include <machine/db_machdep.h>
+
+#include <ddb/db_lex.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_task_thread.h>
+
+extern unsigned long db_maxoff;
+
+extern db_expr_t db_radix;
+extern db_expr_t db_max_width;
+extern db_expr_t db_tab_stop_width;
+extern db_expr_t db_max_line;
+extern int db_set_default_thread();
+extern int db_get_task_thread();
+extern int db_arg_variable();
+
+#define DB_NWORK 32 /* number of work variable */
+
+db_expr_t db_work[DB_NWORK]; /* work variable */
+
+struct db_variable db_vars[] = {
+ { "radix", &db_radix, FCN_NULL },
+ { "maxoff", (db_expr_t*)&db_maxoff, FCN_NULL },
+ { "maxwidth", &db_max_width, FCN_NULL },
+ { "tabstops", &db_tab_stop_width, FCN_NULL },
+ { "lines", &db_max_line, FCN_NULL },
+ { "thread", 0, db_set_default_thread },
+ { "task", 0, db_get_task_thread,
+ 1, 2, -1, -1 },
+ { "work", &db_work[0], FCN_NULL,
+ 1, 1, 0, DB_NWORK-1 },
+ { "arg", 0, db_arg_variable,
+ 1, 1, -1, -1 },
+};
+struct db_variable *db_evars = db_vars + sizeof(db_vars)/sizeof(db_vars[0]);
+
+char *
+db_get_suffix(suffix, suffix_value)
+ register char *suffix;
+ short *suffix_value;
+{
+ register value;
+
+ for (value = 0; *suffix && *suffix != '.' && *suffix != ':'; suffix++) {
+ if (*suffix < '0' || *suffix > '9')
+ return(0);
+ value = value*10 + *suffix - '0';
+ }
+ *suffix_value = value;
+ if (*suffix == '.')
+ suffix++;
+ return(suffix);
+}
+
+static boolean_t
+db_cmp_variable_name(vp, name, ap)
+ struct db_variable *vp;
+ char *name;
+ register db_var_aux_param_t ap;
+{
+ register char *var_np, *np;
+ register level;
+
+ for (np = name, var_np = vp->name; *var_np; ) {
+ if (*np++ != *var_np++)
+ return(FALSE);
+ }
+ for (level = 0; *np && *np != ':' && level < vp->max_level; level++){
+ if ((np = db_get_suffix(np, &ap->suffix[level])) == 0)
+ return(FALSE);
+ }
+ if ((*np && *np != ':') || level < vp->min_level
+ || (level > 0 && (ap->suffix[0] < vp->low
+ || (vp->high >= 0 && ap->suffix[0] > vp->high))))
+ return(FALSE);
+ db_strcpy(ap->modif, (*np)? np+1: "");
+ ap->thread = (db_option(ap->modif, 't')?db_default_thread: THREAD_NULL);
+ ap->level = level;
+ return(TRUE);
+}
+
+int
+db_find_variable(varp, ap)
+ struct db_variable **varp;
+ db_var_aux_param_t ap;
+{
+ int t;
+ struct db_variable *vp;
+
+ t = db_read_token();
+ if (t == tIDENT) {
+ for (vp = db_vars; vp < db_evars; vp++) {
+ if (db_cmp_variable_name(vp, db_tok_string, ap)) {
+ *varp = vp;
+ return (1);
+ }
+ }
+ for (vp = db_regs; vp < db_eregs; vp++) {
+ if (db_cmp_variable_name(vp, db_tok_string, ap)) {
+ *varp = vp;
+ return (1);
+ }
+ }
+ }
+ db_printf("Unknown variable \"$%s\"\n", db_tok_string);
+ db_error(0);
+ return (0);
+}
+
+
+void db_read_write_variable(); /* forward */
+
+int
+db_get_variable(valuep)
+ db_expr_t *valuep;
+{
+ struct db_variable *vp;
+ struct db_var_aux_param aux_param;
+ char modif[TOK_STRING_SIZE];
+
+ aux_param.modif = modif;
+ if (!db_find_variable(&vp, &aux_param))
+ return (0);
+
+ db_read_write_variable(vp, valuep, DB_VAR_GET, &aux_param);
+
+ return (1);
+}
+
+int
+db_set_variable(value)
+ db_expr_t value;
+{
+ struct db_variable *vp;
+ struct db_var_aux_param aux_param;
+ char modif[TOK_STRING_SIZE];
+
+ aux_param.modif = modif;
+ if (!db_find_variable(&vp, &aux_param))
+ return (0);
+
+ db_read_write_variable(vp, &value, DB_VAR_SET, &aux_param);
+
+ return (1);
+}
+
+void
+db_read_write_variable(vp, valuep, rw_flag, ap)
+ struct db_variable *vp;
+ db_expr_t *valuep;
+ int rw_flag;
+ db_var_aux_param_t ap;
+{
+ int (*func)() = vp->fcn;
+ struct db_var_aux_param aux_param;
+
+ if (ap == 0) {
+ ap = &aux_param;
+ ap->modif = "";
+ ap->level = 0;
+ ap->thread = THREAD_NULL;
+ }
+ if (func == FCN_NULL) {
+ if (rw_flag == DB_VAR_SET)
+ vp->valuep[(ap->level)? (ap->suffix[0] - vp->low): 0] = *valuep;
+ else
+ *valuep = vp->valuep[(ap->level)? (ap->suffix[0] - vp->low): 0];
+ } else
+ (*func)(vp, valuep, rw_flag, ap);
+}
+
+void
+db_set_cmd()
+{
+ db_expr_t value;
+ int t;
+ struct db_variable *vp;
+ struct db_var_aux_param aux_param;
+ char modif[TOK_STRING_SIZE];
+
+ aux_param.modif = modif;
+ t = db_read_token();
+ if (t != tDOLLAR) {
+ db_error("Variable name should be prefixed with $\n");
+ return;
+ }
+ if (!db_find_variable(&vp, &aux_param)) {
+ db_error("Unknown variable\n");
+ return;
+ }
+
+ t = db_read_token();
+ if (t != tEQ)
+ db_unread_token(t);
+
+ if (!db_expression(&value)) {
+ db_error("No value\n");
+ return;
+ }
+ if ((t = db_read_token()) == tSEMI_COLON)
+ db_unread_token(t);
+ else if (t != tEOL)
+ db_error("?\n");
+
+ db_read_write_variable(vp, &value, DB_VAR_SET, &aux_param);
+}
+
+#endif MACH_KDB
diff --git a/ddb/db_variables.h b/ddb/db_variables.h
new file mode 100644
index 00000000..8829aa99
--- /dev/null
+++ b/ddb/db_variables.h
@@ -0,0 +1,78 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#ifndef _DB_VARIABLES_H_
+#define _DB_VARIABLES_H_
+
+#include <kern/thread.h>
+
+/*
+ * Debugger variables.
+ */
+struct db_variable {
+ char *name; /* Name of variable */
+ db_expr_t *valuep; /* pointer to value of variable */
+ /* function to call when reading/writing */
+ int (*fcn)(/* db_variable, db_expr_t, int, db_var_aux_param_t */);
+ short min_level; /* number of minimum suffix levels */
+ short max_level; /* number of maximum suffix levels */
+ short low; /* low value of level 1 suffix */
+ short high; /* high value of level 1 suffix */
+#define DB_VAR_GET 0
+#define DB_VAR_SET 1
+};
+#define FCN_NULL ((int (*)())0)
+
+#define DB_VAR_LEVEL 3 /* maximum number of suffix level */
+
+#define db_read_variable(vp, valuep) \
+ db_read_write_variable(vp, valuep, DB_VAR_GET, 0)
+#define db_write_variable(vp, valuep) \
+ db_read_write_variable(vp, valuep, DB_VAR_SET, 0)
+
+/*
+ * auxiliary parameters passed to a variable handler
+ */
+struct db_var_aux_param {
+ char *modif; /* option strings */
+ short level; /* number of levels */
+ short suffix[DB_VAR_LEVEL]; /* suffix */
+ thread_t thread; /* target task */
+};
+
+typedef struct db_var_aux_param *db_var_aux_param_t;
+
+
+extern struct db_variable db_vars[]; /* debugger variables */
+extern struct db_variable *db_evars;
+extern struct db_variable db_regs[]; /* machine registers */
+extern struct db_variable *db_eregs;
+
+#endif /* _DB_VARIABLES_H_ */
diff --git a/ddb/db_watch.c b/ddb/db_watch.c
new file mode 100644
index 00000000..410c0a25
--- /dev/null
+++ b/ddb/db_watch.c
@@ -0,0 +1,318 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: Richard P. Draves, Carnegie Mellon University
+ * Date: 10/90
+ */
+
+#include "mach_kdb.h"
+#if MACH_KDB
+
+#include <mach/boolean.h>
+#include <mach/vm_param.h>
+#include <mach/machine/vm_types.h>
+#include <mach/machine/vm_param.h>
+#include <vm/vm_map.h>
+
+#include <machine/db_machdep.h>
+#include <ddb/db_lex.h>
+#include <ddb/db_watch.h>
+#include <ddb/db_access.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_task_thread.h>
+
+
+
+/*
+ * Watchpoints.
+ */
+
+boolean_t db_watchpoints_inserted = TRUE;
+
+#define NWATCHPOINTS 100
+struct db_watchpoint db_watch_table[NWATCHPOINTS];
+db_watchpoint_t db_next_free_watchpoint = &db_watch_table[0];
+db_watchpoint_t db_free_watchpoints = 0;
+db_watchpoint_t db_watchpoint_list = 0;
+
+extern vm_map_t kernel_map;
+
+db_watchpoint_t
+db_watchpoint_alloc()
+{
+ register db_watchpoint_t watch;
+
+ if ((watch = db_free_watchpoints) != 0) {
+ db_free_watchpoints = watch->link;
+ return (watch);
+ }
+ if (db_next_free_watchpoint == &db_watch_table[NWATCHPOINTS]) {
+ db_printf("All watchpoints used.\n");
+ return (0);
+ }
+ watch = db_next_free_watchpoint;
+ db_next_free_watchpoint++;
+
+ return (watch);
+}
+
+void
+db_watchpoint_free(watch)
+ register db_watchpoint_t watch;
+{
+ watch->link = db_free_watchpoints;
+ db_free_watchpoints = watch;
+}
+
+void
+db_set_watchpoint(task, addr, size)
+ task_t task;
+ db_addr_t addr;
+ vm_size_t size;
+{
+ register db_watchpoint_t watch;
+
+ /*
+ * Should we do anything fancy with overlapping regions?
+ */
+
+ for (watch = db_watchpoint_list; watch != 0; watch = watch->link) {
+ if (watch->task == task &&
+ (watch->loaddr == addr) &&
+ (watch->hiaddr == addr+size)) {
+ db_printf("Already set.\n");
+ return;
+ }
+ }
+
+ watch = db_watchpoint_alloc();
+ if (watch == 0) {
+ db_printf("Too many watchpoints.\n");
+ return;
+ }
+
+ watch->task = task;
+ watch->loaddr = addr;
+ watch->hiaddr = addr+size;
+
+ watch->link = db_watchpoint_list;
+ db_watchpoint_list = watch;
+
+ db_watchpoints_inserted = FALSE;
+}
+
+void
+db_delete_watchpoint(task, addr)
+ task_t task;
+ db_addr_t addr;
+{
+ register db_watchpoint_t watch;
+ register db_watchpoint_t *prev;
+
+ for (prev = &db_watchpoint_list; (watch = *prev) != 0;
+ prev = &watch->link) {
+ if (watch->task == task &&
+ (watch->loaddr <= addr) &&
+ (addr < watch->hiaddr)) {
+ *prev = watch->link;
+ db_watchpoint_free(watch);
+ return;
+ }
+ }
+
+ db_printf("Not set.\n");
+}
+
+void
+db_list_watchpoints()
+{
+ register db_watchpoint_t watch;
+ int task_id;
+
+ if (db_watchpoint_list == 0) {
+ db_printf("No watchpoints set\n");
+ return;
+ }
+
+ db_printf("Space Address Size\n");
+ for (watch = db_watchpoint_list; watch != 0; watch = watch->link) {
+ if (watch->task == TASK_NULL)
+ db_printf("kernel ");
+ else {
+ task_id = db_lookup_task(watch->task);
+ if (task_id < 0)
+ db_printf("%*X", 2*sizeof(vm_offset_t), watch->task);
+ else
+ db_printf("task%-3d ", task_id);
+ }
+ db_printf(" %*X %X\n", 2*sizeof(vm_offset_t), watch->loaddr,
+ watch->hiaddr - watch->loaddr);
+ }
+}
+
+static int
+db_get_task(modif, taskp, addr)
+ char *modif;
+ task_t *taskp;
+ db_addr_t addr;
+{
+ task_t task = TASK_NULL;
+ db_expr_t value;
+ boolean_t user_space;
+
+ user_space = db_option(modif, 'T');
+ if (user_space) {
+ if (db_expression(&value)) {
+ task = (task_t)value;
+ if (db_lookup_task(task) < 0) {
+ db_printf("bad task address %X\n", task);
+ return(-1);
+ }
+ } else {
+ task = db_default_task;
+ if (task == TASK_NULL) {
+ if ((task = db_current_task()) == TASK_NULL) {
+ db_printf("no task\n");
+ return(-1);
+ }
+ }
+ }
+ }
+ if (!DB_VALID_ADDRESS(addr, user_space)) {
+ db_printf("Address %#X is not in %s space\n", addr,
+ (user_space)? "user": "kernel");
+ return(-1);
+ }
+ *taskp = task;
+ return(0);
+}
+
+/* Delete watchpoint */
+/*ARGSUSED*/
+void
+db_deletewatch_cmd(addr, have_addr, count, modif)
+ db_expr_t addr;
+ int have_addr;
+ db_expr_t count;
+ char * modif;
+{
+ task_t task;
+
+ if (db_get_task(modif, &task, addr) < 0)
+ return;
+ db_delete_watchpoint(task, addr);
+}
+
+/* Set watchpoint */
+/*ARGSUSED*/
+void
+db_watchpoint_cmd(addr, have_addr, count, modif)
+ db_expr_t addr;
+ int have_addr;
+ db_expr_t count;
+ char * modif;
+{
+ vm_size_t size;
+ db_expr_t value;
+ task_t task;
+ boolean_t db_option();
+
+ if (db_get_task(modif, &task, addr) < 0)
+ return;
+ if (db_expression(&value))
+ size = (vm_size_t) value;
+ else
+ size = sizeof(int);
+ db_set_watchpoint(task, addr, size);
+}
+
+/* list watchpoints */
+void
+db_listwatch_cmd()
+{
+ db_list_watchpoints();
+}
+
+void
+db_set_watchpoints()
+{
+ register db_watchpoint_t watch;
+ vm_map_t map;
+
+ if (!db_watchpoints_inserted) {
+ for (watch = db_watchpoint_list; watch != 0; watch = watch->link) {
+ map = (watch->task)? watch->task->map: kernel_map;
+ pmap_protect(map->pmap,
+ trunc_page(watch->loaddr),
+ round_page(watch->hiaddr),
+ VM_PROT_READ);
+ }
+ db_watchpoints_inserted = TRUE;
+ }
+}
+
+void
+db_clear_watchpoints()
+{
+ db_watchpoints_inserted = FALSE;
+}
+
+boolean_t
+db_find_watchpoint(map, addr, regs)
+ vm_map_t map;
+ db_addr_t addr;
+ db_regs_t *regs;
+{
+ register db_watchpoint_t watch;
+ db_watchpoint_t found = 0;
+ register task_t task_space;
+
+ task_space = (map == kernel_map)? TASK_NULL: db_current_task();
+ for (watch = db_watchpoint_list; watch != 0; watch = watch->link) {
+ if (watch->task == task_space) {
+ if ((watch->loaddr <= addr) && (addr < watch->hiaddr))
+ return (TRUE);
+ else if ((trunc_page(watch->loaddr) <= addr) &&
+ (addr < round_page(watch->hiaddr)))
+ found = watch;
+ }
+ }
+
+ /*
+ * We didn't hit exactly on a watchpoint, but we are
+ * in a protected region. We want to single-step
+ * and then re-protect.
+ */
+
+ if (found) {
+ db_watchpoints_inserted = FALSE;
+ db_single_step(regs, task_space);
+ }
+
+ return (FALSE);
+}
+
+#endif MACH_KDB
diff --git a/ddb/db_watch.h b/ddb/db_watch.h
new file mode 100644
index 00000000..9192bbd4
--- /dev/null
+++ b/ddb/db_watch.h
@@ -0,0 +1,63 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 10/90
+ */
+
+#include "mach_kdb.h"
+#if MACH_KDB
+
+#ifndef _DDB_DB_WATCH_
+#define _DDB_DB_WATCH_
+
+#include <mach/machine/vm_types.h>
+#include <kern/task.h>
+#include <machine/db_machdep.h>
+
+/*
+ * Watchpoint.
+ */
+
+typedef struct db_watchpoint {
+ task_t task; /* in this map */
+ db_addr_t loaddr; /* from this address */
+ db_addr_t hiaddr; /* to this address */
+ struct db_watchpoint *link; /* link in in-use or free chain */
+} *db_watchpoint_t;
+
+extern boolean_t db_find_watchpoint(/* task_t task, db_addr_t addr,
+ db_regs_t *regs */);
+extern void db_set_watchpoints();
+extern void db_clear_watchpoints();
+
+extern void db_set_watchpoint(/* task_t task, db_addr_t addr, vm_size_t size */);
+extern void db_delete_watchpoint(/* task_t task, db_addr_t addr */);
+extern void db_list_watchpoints();
+
+#endif _DDB_DB_WATCH_
+
+#endif MACH_KDB
diff --git a/ddb/db_write_cmd.c b/ddb/db_write_cmd.c
new file mode 100644
index 00000000..a72102b2
--- /dev/null
+++ b/ddb/db_write_cmd.c
@@ -0,0 +1,109 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#include "mach_kdb.h"
+#if MACH_KDB
+
+#include <mach/boolean.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+
+#include <machine/db_machdep.h>
+
+#include <ddb/db_lex.h>
+#include <ddb/db_access.h>
+#include <ddb/db_command.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_task_thread.h>
+
+
+
+/*
+ * Write to file.
+ */
+/*ARGSUSED*/
+void
+db_write_cmd(address, have_addr, count, modif)
+ db_expr_t address;
+ boolean_t have_addr;
+ db_expr_t count;
+ char * modif;
+{
+ register db_addr_t addr;
+ register db_expr_t old_value;
+ db_expr_t new_value;
+ register int size;
+ boolean_t wrote_one = FALSE;
+ boolean_t t_opt, u_opt;
+ thread_t thread;
+ task_t task;
+
+ addr = (db_addr_t) address;
+
+ size = db_size_option(modif, &u_opt, &t_opt);
+ if (t_opt)
+ {
+ if (!db_get_next_thread(&thread, 0))
+ return;
+ task = thread->task;
+ }
+ else
+ task = db_current_task();
+
+ /* if user space is not explicitly specified,
+ look in the kernel */
+ if (!u_opt)
+ task = TASK_NULL;
+
+ if (!DB_VALID_ADDRESS(addr, u_opt)) {
+ db_printf("Bad address %#*X\n", 2*sizeof(vm_offset_t), addr);
+ return;
+ }
+
+ while (db_expression(&new_value)) {
+ old_value = db_get_task_value(addr, size, FALSE, task);
+ db_task_printsym(addr, DB_STGY_ANY, task);
+ db_printf("\t\t%#*N\t=\t%#*N\n",
+ 2*sizeof(db_expr_t), old_value,
+ 2*sizeof(db_expr_t), new_value);
+ db_put_task_value(addr, size, new_value, task);
+ addr += size;
+
+ wrote_one = TRUE;
+ }
+
+ if (!wrote_one)
+ db_error("Nothing written.\n");
+
+ db_next = addr;
+ db_prev = addr - size;
+}
+
+#endif MACH_KDB
diff --git a/ddb/nlist.h b/ddb/nlist.h
new file mode 100644
index 00000000..b948dfd3
--- /dev/null
+++ b/ddb/nlist.h
@@ -0,0 +1,63 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * nlist.h - symbol table entry structure for an a.out file
+ * derived from FSF's a.out.gnu.h
+ *
+ */
+
+#ifndef _DDB_NLIST_H_
+#define _DDB_NLIST_H_
+
+struct nlist {
+ union n_un {
+ char *n_name; /* symbol name */
+ long n_strx; /* index into file string table */
+ } n_un;
+ unsigned char n_type; /* type flag, i.e. N_TEXT etc; see below */
+ unsigned char n_other; /* machdep uses */
+ short n_desc; /* see <stab.h> */
+#if alpha
+ int n_pad; /* alignment, used to carry framesize info */
+#endif
+ vm_offset_t n_value; /* value of this symbol (or sdb offset) */
+};
+
+/*
+ * Simple values for n_type.
+ */
+#define N_UNDF 0 /* undefined */
+#define N_ABS 2 /* absolute */
+#define N_TEXT 4 /* text */
+#define N_DATA 6 /* data */
+#define N_BSS 8 /* bss */
+#define N_FN 0x1f /* file name symbol */
+#define N_EXT 1 /* external bit, or'ed in */
+#define N_TYPE 0x1e /* mask for all the type bits */
+#define N_STAB 0xe0 /* if any of these bits set, a SDB entry */
+
+
+#endif /* _DDB_NLIST_H_ */
diff --git a/ddb/stab.h b/ddb/stab.h
new file mode 100644
index 00000000..3ebc1af8
--- /dev/null
+++ b/ddb/stab.h
@@ -0,0 +1,69 @@
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)stab.h 5.2 (Berkeley) 4/4/91
+ */
+
+
+/*
+ * The following are symbols used by various debuggers and by the Pascal
+ * compiler. Each of them must have one (or more) of the bits defined by
+ * the N_STAB mask set.
+ */
+
+#define N_GSYM 0x20 /* global symbol */
+#define N_FNAME 0x22 /* F77 function name */
+#define N_FUN 0x24 /* procedure name */
+#define N_STSYM 0x26 /* data segment variable */
+#define N_LCSYM 0x28 /* bss segment variable */
+#define N_MAIN 0x2a /* main function name */
+#define N_PC 0x30 /* global Pascal symbol */
+#define N_FRAME 0x34 /* stack frame descriptor */
+#define N_RSYM 0x40 /* register variable */
+#define N_SLINE 0x44 /* text segment line number */
+#define N_DSLINE 0x46 /* data segment line number */
+#define N_BSLINE 0x48 /* bss segment line number */
+#define N_SSYM 0x60 /* structure/union element */
+#define N_SO 0x64 /* main source file name */
+#define N_LSYM 0x80 /* stack variable */
+#define N_BINCL 0x82 /* include file beginning */
+#define N_SOL 0x84 /* included source file name */
+#define N_PSYM 0xa0 /* parameter variable */
+#define N_EINCL 0xa2 /* include file end */
+#define N_ENTRY 0xa4 /* alternate entry point */
+#define N_LBRAC 0xc0 /* left bracket */
+#define N_EXCL 0xc2 /* deleted include file */
+#define N_RBRAC 0xe0 /* right bracket */
+#define N_BCOMM 0xe2 /* begin common */
+#define N_ECOMM 0xe4 /* end common */
+#define N_ECOML 0xe8 /* end common (local name) */
+#define N_LENG 0xfe /* length of preceding entry */
diff --git a/ddb/tr.h b/ddb/tr.h
new file mode 100644
index 00000000..1d6145ad
--- /dev/null
+++ b/ddb/tr.h
@@ -0,0 +1,112 @@
+/*
+ * (c) Copyright 1992, 1993, 1994, 1995 OPEN SOFTWARE FOUNDATION, INC.
+ * ALL RIGHTS RESERVED
+ */
+/*
+ * OSF RI nmk19b2 5/2/95
+ */
+
+/*
+ * File: ddb/tr.h
+ * Author: Alan Langerman, Jeffrey Heller
+ * Date: 1992
+ *
+ * Internal trace routines. Like old-style XPRs but
+ * less formatting.
+ */
+
+#include <mach_assert.h>
+#include <mach_tr.h>
+
+/*
+ * Originally, we only wanted tracing when
+ * MACH_TR and MACH_ASSERT were turned on
+ * together. Now, there's no reason why
+ * MACH_TR and MACH_ASSERT can't be completely
+ * orthogonal.
+ */
+#define TRACE_BUFFER (MACH_TR)
+
+/*
+ * Log events in a circular trace buffer for future debugging.
+ * Events are unsigned integers. Each event has a descriptive
+ * message.
+ *
+ * TR_DECL must be used at the beginning of a routine using
+ * one of the tr calls. The macro should be passed the name
+ * of the function surrounded by quotation marks, e.g.,
+ * TR_DECL("netipc_recv_intr");
+ * and should be terminated with a semi-colon. The TR_DECL
+ * must be the *last* declaration in the variable declaration
+ * list, or syntax errors will be introduced when TRACE_BUFFER
+ * is turned off.
+ */
+#ifndef _DDB_TR_H_
+#define _DDB_TR_H_
+
+#if TRACE_BUFFER
+
+#include <machine/db_machdep.h>
+
+#define __ui__ (unsigned int)
+#define TR_INIT() tr_init()
+#define TR_SHOW(a,b,c) show_tr((a),(b),(c))
+#define TR_DECL(funcname) char *__ntr_func_name__ = funcname
+#define tr1(msg) \
+ tr(__ntr_func_name__, __FILE__, __LINE__, (msg), \
+ 0,0,0,0)
+#define tr2(msg,tag1) \
+ tr(__ntr_func_name__, __FILE__, __LINE__, (msg), \
+ __ui__(tag1),0,0,0)
+#define tr3(msg,tag1,tag2) \
+ tr(__ntr_func_name__, __FILE__, __LINE__, (msg), \
+ __ui__(tag1),__ui__(tag2),0,0)
+#define tr4(msg,tag1,tag2,tag3) \
+ tr(__ntr_func_name__, __FILE__, __LINE__, (msg), \
+ __ui__(tag1),__ui__(tag2),__ui__(tag3),0)
+#define tr5(msg,tag1,tag2,tag3,tag4) \
+ tr(__ntr_func_name__, __FILE__, __LINE__, (msg), \
+ __ui__(tag1),__ui__(tag2),__ui__(tag3),__ui__(tag4))
+
+/*
+ * Adjust tr log indentation based on function
+ * call graph; this method is quick-and-dirty
+ * and only works safely on a uniprocessor.
+ */
+extern int tr_indent;
+#define tr_start() tr_indent++
+#define tr_stop() tr_indent--
+
+extern void tr_init(void);
+extern void tr(
+ char *funcname,
+ char *file,
+ unsigned int lineno,
+ char *fmt,
+ unsigned int tag1,
+ unsigned int tag2,
+ unsigned int tag3,
+ unsigned int tag4);
+
+extern void db_show_tr(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char * modif);
+
+#else /* TRACE_BUFFER */
+
+#define TR_INIT()
+#define TR_SHOW(a,b,c)
+#define TR_DECL(funcname)
+#define tr1(msg)
+#define tr2(msg, tag1)
+#define tr3(msg, tag1, tag2)
+#define tr4(msg, tag1, tag2, tag3)
+#define tr5(msg, tag1, tag2, tag3, tag4)
+#define tr_start()
+#define tr_stop()
+
+#endif /* TRACE_BUFFER */
+
+#endif /* _DDB_TR_H_ */
diff --git a/device/blkio.c b/device/blkio.c
new file mode 100644
index 00000000..26d4a75a
--- /dev/null
+++ b/device/blkio.c
@@ -0,0 +1,238 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/89
+ *
+ * Block IO driven from generic kernel IO interface.
+ */
+#include <mach/kern_return.h>
+
+#include <device/param.h>
+#include <device/device_types.h>
+#include <device/io_req.h>
+#include <device/ds_routines.h>
+
+
+
+io_return_t block_io(strat, max_count, ior)
+ void (*strat)();
+ void (*max_count)();
+ register io_req_t ior;
+{
+ register kern_return_t rc;
+ boolean_t wait = FALSE;
+
+ /*
+ * Make sure the size is not too large by letting max_count
+ * change io_count. If we are doing a write, then io_alloc_size
+ * preserves the original io_count.
+ */
+ (*max_count)(ior);
+
+ /*
+ * If reading, allocate memory. If writing, wire
+ * down the incoming memory.
+ */
+ if (ior->io_op & IO_READ)
+ rc = device_read_alloc(ior, (vm_size_t)ior->io_count);
+ else
+ rc = device_write_get(ior, &wait);
+
+ if (rc != KERN_SUCCESS)
+ return (rc);
+
+ /*
+ * Queue the operation for the device.
+ */
+ (*strat)(ior);
+
+ /*
+ * The io is now queued. Wait for it if needed.
+ */
+ if (wait) {
+ iowait(ior);
+ return(D_SUCCESS);
+ }
+
+ return (D_IO_QUEUED);
+}
+
+/*
+ * 'standard' max_count routine. VM continuations mean that this
+ * code can cope with arbitrarily-sized write operations (they won't be
+ * atomic, but any caller that cares will do the op synchronously).
+ */
+#define MAX_PHYS (256 * 1024)
+
+void minphys(ior)
+ register io_req_t ior;
+{
+ if ((ior->io_op & (IO_WRITE | IO_READ | IO_OPEN)) == IO_WRITE)
+ return;
+
+ if (ior->io_count > MAX_PHYS)
+ ior->io_count = MAX_PHYS;
+}
+
+/*
+ * Dummy routine placed in device switch entries to indicate that
+ * block device may be mapped.
+ */
+vm_offset_t block_io_mmap()
+{
+ return (0);
+}
+
+/*
+ * Disk sort routine.
+ *
+ * We order the disk request chain so that the disk head will sweep
+ * back and forth across the disk. The chain is divided into two
+ * pieces, with requests ordered in opposite directions. Assume that
+ * the first part of the chain holds increasing cylinder numbers.
+ * If a new request has a higher cylinder number than the head of
+ * the chain, the disk head has not yet reached it; the new request
+ * can go in the first part of the chain. If the new request has
+ * a lower cylinder number, the disk head has already passed it and
+ * must catch it on the way back; so the new request goes in the
+ * second (descending) part of the chain.
+ * When all of the requests in the ascending portion are filled,
+ * the descending chain becomes the first chain, and requests above
+ * the first now go in the second part of the chain (ascending).
+ */
+
+#define io_cylinder io_residual
+ /* Disk drivers put cylinder here */
+#define h_head io_next
+#define h_tail io_prev
+ /* IORs are chained here */
+
+void disksort(head, ior)
+ io_req_t head; /* (sort of) */
+ io_req_t ior;
+{
+ register int cylinder = ior->io_cylinder;
+ register io_req_t next, prev;
+
+ next = head->h_head;
+ if (next == 0) {
+ head->h_head = ior;
+ head->h_tail = ior;
+ ior->io_next = 0;
+ return;
+ }
+
+ do {
+ prev = next;
+ next = prev->io_next;
+ } while (next != 0 && prev->io_cylinder == next->io_cylinder);
+
+ if (next == 0) {
+ prev->io_next = ior;
+ head->h_tail = ior;
+ ior->io_next = 0;
+ return;
+ }
+
+ if (prev->io_cylinder < next->io_cylinder) {
+ /*
+ * Ascending list first.
+ */
+ if (prev->io_cylinder <= cylinder) {
+ /*
+ * Insert in ascending list.
+ */
+ while (next != 0 &&
+ next->io_cylinder <= cylinder &&
+ prev->io_cylinder <= next->io_cylinder)
+ {
+ prev = next;
+ next = prev->io_next;
+ }
+ }
+ else {
+ /*
+ * Insert in descending list
+ */
+ do {
+ prev = next;
+ next = prev->io_next;
+ } while (next != 0 &&
+ prev->io_cylinder <= next->io_cylinder);
+
+ while (next != 0 &&
+ next->io_cylinder >= cylinder)
+ {
+ prev = next;
+ next = prev->io_next;
+ }
+ }
+ }
+ else {
+ /*
+ * Descending first.
+ */
+ if (prev->io_cylinder >= cylinder) {
+ /*
+ * Insert in descending list.
+ */
+ while (next != 0 &&
+ next->io_cylinder >= cylinder &&
+ prev->io_cylinder >= next->io_cylinder)
+ {
+ prev = next;
+ next = prev->io_next;
+ }
+ }
+ else {
+ /*
+ * Insert in ascending list
+ */
+ do {
+ prev = next;
+ next = prev->io_next;
+ } while (next != 0 &&
+ prev->io_cylinder >= next->io_cylinder);
+ while (next != 0 &&
+ next->io_cylinder <= cylinder)
+ {
+ prev = next;
+ next = prev->io_next;
+ }
+ }
+ }
+ /*
+ * Insert between prev and next.
+ */
+ prev->io_next = ior;
+ ior->io_next = next;
+ if (next == 0) {
+ /* At tail of list. */
+ head->h_tail = ior;
+ }
+}
+
diff --git a/device/buf.h b/device/buf.h
new file mode 100644
index 00000000..cf346948
--- /dev/null
+++ b/device/buf.h
@@ -0,0 +1,102 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/90
+ *
+ * Definitions to make new IO structures look like old ones
+ */
+
+/*
+ * io_req and fields
+ */
+#include <device/io_req.h>
+
+#define buf io_req
+
+/*
+ * Redefine fields for drivers using old names
+ */
+#define b_flags io_op
+#define b_bcount io_count
+#define b_error io_error
+#define b_dev io_unit
+#define b_blkno io_recnum
+#define b_resid io_residual
+#define b_un io_un
+#define b_addr data
+#define av_forw io_next
+#define av_back io_prev
+#define b_physblock io_physrec
+#define b_blocktotal io_rectotal
+
+/*
+ * Redefine fields for driver request list heads, using old names.
+ */
+#define b_actf io_next
+#define b_actl io_prev
+#define b_forw io_link
+#define b_back io_rlink
+#define b_active io_count
+#define b_errcnt io_residual
+#define b_bufsize io_alloc_size
+
+/*
+ * Redefine flags
+ */
+#define B_WRITE IO_WRITE
+#define B_READ IO_READ
+#define B_OPEN IO_OPEN
+#define B_DONE IO_DONE
+#define B_ERROR IO_ERROR
+#define B_BUSY IO_BUSY
+#define B_WANTED IO_WANTED
+#define B_BAD IO_BAD
+#define B_CALL IO_CALL
+
+#define B_MD1 IO_SPARE_START
+
+/*
+ * Redefine uio structure
+ */
+#define uio io_req
+
+/*
+ * Redefine physio routine
+ */
+#define physio(strat, xbuf, dev, ops, minphys, ior) \
+ block_io(strat, minphys, ior)
+
+/*
+ * Export standard minphys routine.
+ */
+extern minphys();
+
+/*
+ * Alternate name for iodone
+ */
+#define biodone iodone
+#define biowait iowait
diff --git a/device/chario.c b/device/chario.c
new file mode 100644
index 00000000..b3994a58
--- /dev/null
+++ b/device/chario.c
@@ -0,0 +1,1089 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/88
+ *
+ * TTY io.
+ * Compatibility with old TTY device drivers.
+ */
+
+#include <mach/kern_return.h>
+#include <mach/mig_errors.h>
+#include <mach/vm_param.h>
+#include <machine/machspl.h> /* spl definitions */
+
+#include <ipc/ipc_port.h>
+
+#include <kern/lock.h>
+#include <kern/queue.h>
+
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+
+#include <device/device_types.h>
+#include <device/io_req.h>
+#include <device/ds_routines.h>
+#include "device_reply.h"
+
+#include <device/tty.h>
+
+/* If you change these, check that tty_outq_size and tty_inq_size
+ * is greater than largest tthiwat entry.
+ */
+short tthiwat[16] =
+ { 100,100,100,100,100,100,100,200,200,400,400,400,650,650,1300,2000 };
+short ttlowat[16] =
+ { 30, 30, 30, 30, 30, 30, 30, 50, 50,120,120,120,125,125, 125, 125 };
+
+/*
+ * forward declarations
+ */
+void queue_delayed_reply(
+ queue_t, io_req_t, boolean_t (*)(io_req_t));
+void tty_output(struct tty *);
+void tty_flush(struct tty *, int);
+boolean_t char_open_done(io_req_t);
+boolean_t char_read_done(io_req_t);
+boolean_t char_write_done(io_req_t);
+
+/*
+ * Fake 'line discipline' switch for the benefit of old code
+ * that wants to call through it.
+ */
+struct ldisc_switch linesw[] = {
+ {
+ char_read,
+ char_write,
+ ttyinput,
+ ttymodem,
+ tty_output
+ }
+};
+
+/*
+ * Sizes for input and output circular buffers.
+ */
+int tty_inq_size = 4096; /* big nuf */
+int tty_outq_size = 2048; /* Must be bigger that tthiwat */
+int pdma_default = 1; /* turn pseudo dma on by default */
+
+/*
+ * compute pseudo-dma tables
+ */
+
+int pdma_timeouts[NSPEEDS]; /* how many ticks in timeout */
+int pdma_water_mark[NSPEEDS];
+
+
+void chario_init(void)
+{
+ /* the basic idea with the timeouts is two allow enough
+ time for a character to show up if data is coming in at full data rate
+ plus a little slack. 2 ticks is considered slack
+ Below 300 baud we just glob a character at a time */
+#define _PR(x) ((hz/x) + 2)
+
+ int i;
+
+ for (i = B0; i < B300; i++)
+ pdma_timeouts[i] = 0;
+
+ pdma_timeouts[B300] = _PR(30);
+ pdma_timeouts[B600] = _PR(60);
+ pdma_timeouts[B1200] = _PR(120);
+ pdma_timeouts[B1800] = _PR(180);
+ pdma_timeouts[B2400] = _PR(240);
+ pdma_timeouts[B4800] = _PR(480);
+ pdma_timeouts[B9600] = _PR(960);
+ pdma_timeouts[EXTA] = _PR(1440); /* >14400 baud */
+ pdma_timeouts[EXTB] = _PR(1920); /* >19200 baud */
+
+ for (i = B0; i < B300; i++)
+ pdma_water_mark[i] = 0;
+
+ /* for the slow speeds, we try to buffer 0.02 of the baud rate
+ (20% of the character rate). For the faster lines,
+ we try to buffer 1/2 the input queue size */
+
+#undef _PR
+#define _PR(x) (0.20 * x)
+
+ pdma_water_mark[B300] = _PR(120);
+ pdma_water_mark[B600] = _PR(120);
+ pdma_water_mark[B1200] = _PR(120);
+ pdma_water_mark[B1800] = _PR(180);
+ pdma_water_mark[B2400] = _PR(240);
+ pdma_water_mark[B4800] = _PR(480);
+ i = tty_inq_size/2;
+ pdma_water_mark[B9600] = i;
+ pdma_water_mark[EXTA] = i; /* >14400 baud */
+ pdma_water_mark[EXTB] = i; /* >19200 baud */
+
+ return;
+}
+
+/*
+ * Open TTY, waiting for CARR_ON.
+ * No locks may be held.
+ * May run on any CPU.
+ */
+io_return_t char_open(
+ int dev,
+ struct tty * tp,
+ dev_mode_t mode,
+ io_req_t ior)
+{
+ spl_t s;
+ io_return_t rc = D_SUCCESS;
+
+ s = spltty();
+ simple_lock(&tp->t_lock);
+
+ tp->t_dev = dev;
+
+ if (tp->t_mctl)
+ (*tp->t_mctl)(tp, TM_DTR, DMSET);
+
+ if (pdma_default)
+ tp->t_state |= TS_MIN;
+
+ if ((tp->t_state & TS_CARR_ON) == 0) {
+ /*
+ * No carrier.
+ */
+ if (mode & D_NODELAY) {
+ tp->t_state |= TS_ONDELAY;
+ }
+ else {
+ /*
+ * Don`t return from open until carrier detected.
+ */
+ tp->t_state |= TS_WOPEN;
+
+ ior->io_dev_ptr = (char *)tp;
+
+ queue_delayed_reply(&tp->t_delayed_open, ior, char_open_done);
+ rc = D_IO_QUEUED;
+ goto out;
+ }
+ }
+ tp->t_state |= TS_ISOPEN;
+ if (tp->t_mctl)
+ (*tp->t_mctl)(tp, TM_RTS, DMBIS);
+out:
+ simple_unlock(&tp->t_lock);
+ splx(s);
+ return rc;
+}
+
+/*
+ * Retry wait for CARR_ON for open.
+ * No locks may be held.
+ * May run on any CPU.
+ */
+boolean_t char_open_done(
+ io_req_t ior)
+{
+ register struct tty *tp = (struct tty *)ior->io_dev_ptr;
+ spl_t s = spltty();
+
+ simple_lock(&tp->t_lock);
+ if ((tp->t_state & TS_ISOPEN) == 0) {
+ queue_delayed_reply(&tp->t_delayed_open, ior, char_open_done);
+ simple_unlock(&tp->t_lock);
+ splx(s);
+ return FALSE;
+ }
+
+ tp->t_state |= TS_ISOPEN;
+ tp->t_state &= ~TS_WOPEN;
+
+ if (tp->t_mctl)
+ (*tp->t_mctl)(tp, TM_RTS, DMBIS);
+
+ simple_unlock(&tp->t_lock);
+ splx(s);
+
+ ior->io_error = D_SUCCESS;
+ (void) ds_open_done(ior);
+ return TRUE;
+}
+
+boolean_t tty_close_open_reply(
+ io_req_t ior)
+{
+ ior->io_error = D_DEVICE_DOWN;
+ (void) ds_open_done(ior);
+ return TRUE;
+}
+
+/*
+ * Write to TTY.
+ * No locks may be held.
+ * Calls device start routine; must already be on master if
+ * device needs to run on master.
+ */
+io_return_t char_write(
+ register struct tty * tp,
+ register io_req_t ior)
+{
+ spl_t s;
+ register int count;
+ register char *data;
+ vm_offset_t addr;
+ io_return_t rc = D_SUCCESS;
+
+ data = ior->io_data;
+ count = ior->io_count;
+ if (count == 0)
+ return rc;
+
+ if (!(ior->io_op & IO_INBAND)) {
+ /*
+ * Copy out-of-line data into kernel address space.
+ * Since data is copied as page list, it will be
+ * accessible.
+ */
+ vm_map_copy_t copy = (vm_map_copy_t) data;
+ kern_return_t kr;
+
+ kr = vm_map_copyout(device_io_map, &addr, copy);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ data = (char *) addr;
+ }
+
+ /*
+ * Check for tty operating.
+ */
+ s = spltty();
+ simple_lock(&tp->t_lock);
+
+ if ((tp->t_state & TS_CARR_ON) == 0) {
+
+ if ((tp->t_state & TS_ONDELAY) == 0) {
+ /*
+ * No delayed writes - tell caller that device is down
+ */
+ rc = D_IO_ERROR;
+ goto out;
+ }
+
+ if (ior->io_mode & D_NOWAIT) {
+ rc = D_WOULD_BLOCK;
+ goto out;
+ }
+ }
+
+ /*
+ * Copy data into the output buffer.
+ * Report the amount not copied.
+ */
+
+ ior->io_residual = b_to_q(data, count, &tp->t_outq);
+
+ /*
+ * Start hardware output.
+ */
+
+ tp->t_state &= ~TS_TTSTOP;
+ tty_output(tp);
+
+ if (tp->t_outq.c_cc > TTHIWAT(tp) ||
+ (tp->t_state & TS_CARR_ON) == 0) {
+
+ /*
+ * Do not send reply until some characters have been sent.
+ */
+ ior->io_dev_ptr = (char *)tp;
+ queue_delayed_reply(&tp->t_delayed_write, ior, char_write_done);
+
+ rc = D_IO_QUEUED;
+ }
+out:
+ simple_unlock(&tp->t_lock);
+ splx(s);
+
+ if (!(ior->io_op & IO_INBAND))
+ (void) vm_deallocate(device_io_map, addr, ior->io_count);
+ return rc;
+}
+
+/*
+ * Retry wait for output queue emptied, for write.
+ * No locks may be held.
+ * May run on any CPU.
+ */
+boolean_t char_write_done(
+ register io_req_t ior)
+{
+ register struct tty *tp = (struct tty *)ior->io_dev_ptr;
+ register spl_t s = spltty();
+
+ simple_lock(&tp->t_lock);
+ if (tp->t_outq.c_cc > TTHIWAT(tp) ||
+ (tp->t_state & TS_CARR_ON) == 0) {
+
+ queue_delayed_reply(&tp->t_delayed_write, ior, char_write_done);
+ simple_unlock(&tp->t_lock);
+ splx(s);
+ return FALSE;
+ }
+ simple_unlock(&tp->t_lock);
+ splx(s);
+
+ if (IP_VALID(ior->io_reply_port)) {
+ (void) (*((ior->io_op & IO_INBAND) ?
+ ds_device_write_reply_inband :
+ ds_device_write_reply))(ior->io_reply_port,
+ ior->io_reply_port_type,
+ ior->io_error,
+ (int) (ior->io_total -
+ ior->io_residual));
+ }
+ mach_device_deallocate(ior->io_device);
+ return TRUE;
+}
+
+boolean_t tty_close_write_reply(
+ register io_req_t ior)
+{
+ ior->io_residual = ior->io_count;
+ ior->io_error = D_DEVICE_DOWN;
+ (void) ds_write_done(ior);
+ return TRUE;
+}
+
+/*
+ * Read from TTY.
+ * No locks may be held.
+ * May run on any CPU - does not talk to device driver.
+ */
+io_return_t char_read(
+ register struct tty *tp,
+ register io_req_t ior)
+{
+ spl_t s;
+ kern_return_t rc;
+
+ /*
+ * Allocate memory for read buffer.
+ */
+ rc = device_read_alloc(ior, (vm_size_t)ior->io_count);
+ if (rc != KERN_SUCCESS)
+ return rc;
+
+ s = spltty();
+ simple_lock(&tp->t_lock);
+ if ((tp->t_state & TS_CARR_ON) == 0) {
+
+ if ((tp->t_state & TS_ONDELAY) == 0) {
+ /*
+ * No delayed writes - tell caller that device is down
+ */
+ rc = D_IO_ERROR;
+ goto out;
+ }
+
+ if (ior->io_mode & D_NOWAIT) {
+ rc = D_WOULD_BLOCK;
+ goto out;
+ }
+
+ }
+
+ if (tp->t_inq.c_cc <= 0 ||
+ (tp->t_state & TS_CARR_ON) == 0) {
+
+ ior->io_dev_ptr = (char *)tp;
+ queue_delayed_reply(&tp->t_delayed_read, ior, char_read_done);
+ rc = D_IO_QUEUED;
+ goto out;
+ }
+
+ ior->io_residual = ior->io_count - q_to_b(&tp->t_inq,
+ ior->io_data,
+ (int)ior->io_count);
+ if (tp->t_state & TS_RTS_DOWN) {
+ (*tp->t_mctl)(tp, TM_RTS, DMBIS);
+ tp->t_state &= ~TS_RTS_DOWN;
+ }
+
+ out:
+ simple_unlock(&tp->t_lock);
+ splx(s);
+ return rc;
+}
+
+/*
+ * Retry wait for characters, for read.
+ * No locks may be held.
+ * May run on any CPU - does not talk to device driver.
+ */
+boolean_t char_read_done(
+ register io_req_t ior)
+{
+ register struct tty *tp = (struct tty *)ior->io_dev_ptr;
+ register spl_t s = spltty();
+
+ simple_lock(&tp->t_lock);
+
+ if (tp->t_inq.c_cc <= 0 ||
+ (tp->t_state & TS_CARR_ON) == 0) {
+
+ queue_delayed_reply(&tp->t_delayed_read, ior, char_read_done);
+ simple_unlock(&tp->t_lock);
+ splx(s);
+ return FALSE;
+ }
+
+ ior->io_residual = ior->io_count - q_to_b(&tp->t_inq,
+ ior->io_data,
+ (int)ior->io_count);
+ if (tp->t_state & TS_RTS_DOWN) {
+ (*tp->t_mctl)(tp, TM_RTS, DMBIS);
+ tp->t_state &= ~TS_RTS_DOWN;
+ }
+
+ simple_unlock(&tp->t_lock);
+ splx(s);
+
+ (void) ds_read_done(ior);
+ return TRUE;
+}
+
+boolean_t tty_close_read_reply(
+ register io_req_t ior)
+{
+ ior->io_residual = ior->io_count;
+ ior->io_error = D_DEVICE_DOWN;
+ (void) ds_read_done(ior);
+ return TRUE;
+}
+
+/*
+ * Close the tty.
+ * Tty must be locked (at spltty).
+ * Iff modem control should run on master.
+ */
+void ttyclose(
+ register struct tty *tp)
+{
+ register io_req_t ior;
+
+ /*
+ * Flush the read and write queues. Signal
+ * the open queue so that those waiting for open
+ * to complete will see that the tty is closed.
+ */
+ while ((ior = (io_req_t)dequeue_head(&tp->t_delayed_read)) != 0) {
+ ior->io_done = tty_close_read_reply;
+ iodone(ior);
+ }
+ while ((ior = (io_req_t)dequeue_head(&tp->t_delayed_write)) != 0) {
+ ior->io_done = tty_close_write_reply;
+ iodone(ior);
+ }
+ while ((ior = (io_req_t)dequeue_head(&tp->t_delayed_open)) != 0) {
+ ior->io_done = tty_close_open_reply;
+ iodone(ior);
+ }
+
+ /* Close down modem */
+ if (tp->t_mctl) {
+ (*tp->t_mctl)(tp, TM_BRK|TM_RTS, DMBIC);
+ if ((tp->t_state&(TS_HUPCLS|TS_WOPEN)) || (tp->t_state&TS_ISOPEN)==0)
+ (*tp->t_mctl)(tp, TM_HUP, DMSET);
+ }
+
+ /* only save buffering bit, and carrier */
+ tp->t_state = tp->t_state & (TS_MIN|TS_CARR_ON);
+}
+
+/*
+ * Port-death routine to clean up reply messages.
+ */
+boolean_t
+tty_queue_clean(
+ queue_t q,
+ ipc_port_t port,
+ boolean_t (*routine)(io_req_t) )
+{
+ register io_req_t ior;
+
+ ior = (io_req_t)queue_first(q);
+ while (!queue_end(q, (queue_entry_t)ior)) {
+ if (ior->io_reply_port == port) {
+ remqueue(q, (queue_entry_t)ior);
+ ior->io_done = routine;
+ iodone(ior);
+ return TRUE;
+ }
+ ior = ior->io_next;
+ }
+ return FALSE;
+}
+
+/*
+ * Handle port-death (dead reply port) for tty.
+ * No locks may be held.
+ * May run on any CPU.
+ */
+boolean_t
+tty_portdeath(
+ struct tty * tp,
+ ipc_port_t port)
+{
+ register spl_t spl = spltty();
+ register boolean_t result;
+
+ simple_lock(&tp->t_lock);
+
+ /*
+ * The queues may never have been initialized
+ */
+ if (tp->t_delayed_read.next == 0) {
+ result = FALSE;
+ }
+ else {
+ result =
+ tty_queue_clean(&tp->t_delayed_read, port,
+ tty_close_read_reply)
+ || tty_queue_clean(&tp->t_delayed_write, port,
+ tty_close_write_reply)
+ || tty_queue_clean(&tp->t_delayed_open, port,
+ tty_close_open_reply);
+ }
+ simple_unlock(&tp->t_lock);
+ splx(spl);
+
+ return result;
+}
+
+/*
+ * Get TTY status.
+ * No locks may be held.
+ * May run on any CPU.
+ */
+io_return_t tty_get_status(
+ register struct tty *tp,
+ dev_flavor_t flavor,
+ int * data, /* pointer to OUT array */
+ natural_t *count) /* out */
+{
+ spl_t s;
+
+ switch (flavor) {
+ case TTY_STATUS:
+ {
+ register struct tty_status *tsp =
+ (struct tty_status *) data;
+
+ if (*count < TTY_STATUS_COUNT)
+ return (D_INVALID_OPERATION);
+
+ s = spltty();
+ simple_lock(&tp->t_lock);
+
+ tsp->tt_ispeed = tp->t_ispeed;
+ tsp->tt_ospeed = tp->t_ospeed;
+ tsp->tt_breakc = tp->t_breakc;
+ tsp->tt_flags = tp->t_flags;
+ if (tp->t_state & TS_HUPCLS)
+ tsp->tt_flags |= TF_HUPCLS;
+
+ simple_unlock(&tp->t_lock);
+ splx(s);
+
+ *count = TTY_STATUS_COUNT;
+ break;
+
+ }
+ default:
+ return D_INVALID_OPERATION;
+ }
+ return D_SUCCESS;
+}
+
+/*
+ * Set TTY status.
+ * No locks may be held.
+ * Calls device start or stop routines; must already be on master if
+ * device needs to run on master.
+ */
+io_return_t tty_set_status(
+ register struct tty *tp,
+ dev_flavor_t flavor,
+ int * data,
+ natural_t count)
+{
+ int s;
+
+ switch (flavor) {
+ case TTY_FLUSH:
+ {
+ register int flags;
+ if (count < TTY_FLUSH_COUNT)
+ return D_INVALID_OPERATION;
+
+ flags = *data;
+ if (flags == 0)
+ flags = D_READ | D_WRITE;
+
+ s = spltty();
+ simple_lock(&tp->t_lock);
+ tty_flush(tp, flags);
+ simple_unlock(&tp->t_lock);
+ splx(s);
+
+ break;
+ }
+ case TTY_STOP:
+ /* stop output */
+ s = spltty();
+ simple_lock(&tp->t_lock);
+ if ((tp->t_state & TS_TTSTOP) == 0) {
+ tp->t_state |= TS_TTSTOP;
+ (*tp->t_stop)(tp, 0);
+ }
+ simple_unlock(&tp->t_lock);
+ splx(s);
+ break;
+
+ case TTY_START:
+ /* start output */
+ s = spltty();
+ simple_lock(&tp->t_lock);
+ if (tp->t_state & TS_TTSTOP) {
+ tp->t_state &= ~TS_TTSTOP;
+ tty_output(tp);
+ }
+ simple_unlock(&tp->t_lock);
+ splx(s);
+ break;
+
+ case TTY_STATUS:
+ /* set special characters and speed */
+ {
+ register struct tty_status *tsp;
+
+ if (count < TTY_STATUS_COUNT)
+ return D_INVALID_OPERATION;
+
+ tsp = (struct tty_status *)data;
+
+ if (tsp->tt_ispeed < 0 ||
+ tsp->tt_ispeed >= NSPEEDS ||
+ tsp->tt_ospeed < 0 ||
+ tsp->tt_ospeed >= NSPEEDS)
+ {
+ return D_INVALID_OPERATION;
+ }
+
+ s = spltty();
+ simple_lock(&tp->t_lock);
+
+ tp->t_ispeed = tsp->tt_ispeed;
+ tp->t_ospeed = tsp->tt_ospeed;
+ tp->t_breakc = tsp->tt_breakc;
+ tp->t_flags = tsp->tt_flags & ~TF_HUPCLS;
+ if (tsp->tt_flags & TF_HUPCLS)
+ tp->t_state |= TS_HUPCLS;
+
+ simple_unlock(&tp->t_lock);
+ splx(s);
+ break;
+ }
+ default:
+ return D_INVALID_OPERATION;
+ }
+ return D_SUCCESS;
+}
+
+
+/*
+ * [internal]
+ * Queue IOR on reply queue, to wait for TTY operation.
+ * TTY must be locked (at spltty).
+ */
+void queue_delayed_reply(
+ queue_t qh,
+ io_req_t ior,
+ boolean_t (*io_done)(io_req_t) )
+{
+ ior->io_done = io_done;
+ enqueue_tail(qh, (queue_entry_t)ior);
+}
+
+/*
+ * Retry delayed IO operations for TTY.
+ * TTY containing queue must be locked (at spltty).
+ */
+void tty_queue_completion(
+ register queue_t qh)
+{
+ register io_req_t ior;
+
+ while ((ior = (io_req_t)dequeue_head(qh)) != 0) {
+ iodone(ior);
+ }
+}
+
+/*
+ * Set the default special characters.
+ * Since this routine is called whenever a tty has never been opened,
+ * we can initialize the queues here.
+ */
+void ttychars(
+ register struct tty *tp)
+{
+ if ((tp->t_flags & TS_INIT) == 0) {
+ /*
+ * Initialize queues
+ */
+ queue_init(&tp->t_delayed_open);
+ queue_init(&tp->t_delayed_read);
+ queue_init(&tp->t_delayed_write);
+
+ /*
+ * Initialize character buffers
+ */
+ cb_alloc(&tp->t_inq, tty_inq_size);
+
+ /* if we might do modem flow control */
+ if (tp->t_mctl && tp->t_inq.c_hog > 30)
+ tp->t_inq.c_hog -= 30;
+
+ cb_alloc(&tp->t_outq, tty_outq_size);
+
+ /*
+ * Mark initialized
+ */
+ tp->t_state |= TS_INIT;
+ }
+
+ tp->t_breakc = 0;
+}
+
+/*
+ * Flush all TTY queues.
+ * Called at spltty, tty already locked.
+ * Calls device STOP routine; must already be on master if
+ * device needs to run on master.
+ */
+void tty_flush(
+ register struct tty *tp,
+ int rw)
+{
+ if (rw & D_READ) {
+ cb_clear(&tp->t_inq);
+ tty_queue_completion(&tp->t_delayed_read);
+ }
+ if (rw & D_WRITE) {
+ tp->t_state &= ~TS_TTSTOP;
+ (*tp->t_stop)(tp, rw);
+ cb_clear(&tp->t_outq);
+ tty_queue_completion(&tp->t_delayed_write);
+ }
+}
+
+/*
+ * Restart character output after a delay timeout.
+ * Calls device start routine - must be on master CPU.
+ *
+ * Timeout routines are called only on master CPU.
+ * What if device runs on a different CPU?
+ */
+void ttrstrt(
+ register struct tty *tp)
+{
+ register spl_t s;
+
+ s = spltty();
+ simple_lock(&tp->t_lock);
+
+ tp->t_state &= ~TS_TIMEOUT;
+ ttstart (tp);
+
+ simple_unlock(&tp->t_lock);
+ splx(s);
+}
+
+/*
+ * Start output on the typewriter. It is used from the top half
+ * after some characters have been put on the output queue,
+ * from the interrupt routine to transmit the next
+ * character, and after a timeout has finished.
+ *
+ * Called at spltty, tty already locked.
+ * Must be on master CPU if device runs on master.
+ */
+void ttstart(tp)
+ register struct tty *tp;
+{
+ if ((tp->t_state & (TS_TIMEOUT|TS_TTSTOP|TS_BUSY)) == 0) {
+ /*
+ * Start up the hardware again
+ */
+ (*tp->t_start)(tp);
+
+ /*
+ * Wake up those waiting for write completion.
+ */
+ if (tp->t_outq.c_cc <= TTLOWAT(tp))
+ tty_queue_completion(&tp->t_delayed_write);
+ }
+}
+
+/*
+ * Start character output, if the device is not busy or
+ * stopped or waiting for a timeout.
+ *
+ * Called at spltty, tty already locked.
+ * Must be on master CPU if device runs on master.
+ */
+void tty_output(
+ register struct tty *tp)
+{
+ if ((tp->t_state & (TS_TIMEOUT|TS_TTSTOP|TS_BUSY)) == 0) {
+ /*
+ * Not busy. Start output.
+ */
+ (*tp->t_start)(tp);
+
+ /*
+ * Wake up those waiting for write completion.
+ */
+ if (tp->t_outq.c_cc <= TTLOWAT(tp))
+ tty_queue_completion(&tp->t_delayed_write);
+ }
+}
+
+/*
+ * Send any buffered recvd chars up to user
+ */
+void ttypush(
+ register struct tty *tp)
+{
+ spl_t s = spltty();
+ register int state;
+
+ simple_lock(&tp->t_lock);
+
+ /*
+ The pdma timeout has gone off.
+ If no character has been received since the timeout
+ was set, push any pending characters up.
+ If any characters were received in the last interval
+ then just reset the timeout and the character received bit.
+ */
+
+ state = tp->t_state;
+
+ if (state & TS_MIN_TO)
+ {
+ if (state & TS_MIN_TO_RCV)
+ { /* a character was received */
+ tp->t_state = state & ~TS_MIN_TO_RCV;
+ timeout(ttypush,tp,pdma_timeouts[tp->t_ispeed]);
+ }
+ else
+ {
+ tp->t_state = state & ~TS_MIN_TO;
+ if (tp->t_inq.c_cc) /* pending characters */
+ tty_queue_completion(&tp->t_delayed_read);
+ }
+ }
+ else
+ {
+ tp->t_state = state & ~TS_MIN_TO_RCV;/* sanity */
+ }
+
+ simple_unlock(&tp->t_lock);
+ splx(s);
+}
+
+/*
+ * Put input character on input queue.
+ *
+ * Called at spltty, tty already locked.
+ */
+void ttyinput(
+ unsigned int c,
+ struct tty *tp)
+{
+ if (tp->t_inq.c_cc >= tp->t_inq.c_hog) {
+ /*
+ * Do not want to overflow input queue
+ */
+ if (tp->t_mctl) {
+ (*tp->t_mctl)(tp, TM_RTS, DMBIC);
+ tp->t_state |= TS_RTS_DOWN;
+ }
+ tty_queue_completion(&tp->t_delayed_read);
+ return;
+
+ }
+
+ c &= 0xff;
+
+ (void) putc(c, &tp->t_inq);
+ if ((tp->t_state & TS_MIN) == 0 ||
+ tp->t_inq.c_cc > pdma_water_mark[tp->t_ispeed])
+ {
+ /*
+ * No input buffering, or input minimum exceeded.
+ * Grab a request from input queue and queue it
+ * to io_done thread.
+ */
+ if (tp->t_state & TS_MIN_TO) {
+ tp->t_state &= ~(TS_MIN_TO|TS_MIN_TO_RCV);
+ untimeout(ttypush, tp);
+ }
+ tty_queue_completion(&tp->t_delayed_read);
+ }
+ else {
+ /*
+ * Not enough characters.
+ * If no timeout is set, initiate the timeout
+ * Otherwise set the character received during timeout interval
+ * flag.
+ * One alternative approach would be just to reset the timeout
+ * into the future, but this involves making a timeout/untimeout
+ * call on every character.
+ */
+ register int ptime = pdma_timeouts[tp->t_ispeed];
+ if (ptime > 0)
+ {
+ if ((tp->t_state & TS_MIN_TO) == 0)
+ {
+ tp->t_state |= TS_MIN_TO;
+ timeout(ttypush, tp, ptime);
+ }
+ else
+ {
+ tp->t_state |= TS_MIN_TO_RCV;
+ }
+ }
+ }
+}
+
+/*
+ * Put many characters on input queue.
+ *
+ * Called at spltty, tty already locked.
+ */
+void ttyinput_many(
+ struct tty *tp,
+ unsigned char *chars,
+ int count)
+{
+ /*
+ * Do not want to overflow input queue
+ */
+ if (tp->t_inq.c_cc < tp->t_inq.c_hog)
+ count -= b_to_q( chars, count, &tp->t_inq);
+
+ tty_queue_completion(&tp->t_delayed_read);
+}
+
+
+/*
+ * Handle modem control transition on a tty.
+ * Flag indicates new state of carrier.
+ * Returns FALSE if the line should be turned off.
+ *
+ * Called at spltty, tty already locked.
+ */
+boolean_t ttymodem(
+ struct tty * tp,
+ boolean_t carrier_up)
+{
+ if ((tp->t_state&TS_WOPEN) == 0 && (tp->t_flags & MDMBUF)) {
+ /*
+ * Flow control by carrier. Carrier down stops
+ * output; carrier up restarts output.
+ */
+ if (carrier_up) {
+ tp->t_state &= ~TS_TTSTOP;
+ tty_output(tp);
+ }
+ else if ((tp->t_state&TS_TTSTOP) == 0) {
+ tp->t_state |= TS_TTSTOP;
+ (*tp->t_stop)(tp, 0);
+ }
+ }
+ else if (carrier_up) {
+ /*
+ * Carrier now on.
+ */
+ tp->t_state |= TS_CARR_ON;
+ tt_open_wakeup(tp);
+ }
+ else {
+ /*
+ * Lost carrier.
+ */
+ tp->t_state &= ~TS_CARR_ON;
+ if (tp->t_state & TS_ISOPEN &&
+ (tp->t_flags & NOHANG) == 0)
+ {
+ /*
+ * Hang up TTY if carrier drops.
+ * Need to alert users, somehow...
+ */
+ tty_flush(tp, D_READ|D_WRITE);
+ return FALSE;
+ }
+ }
+ return TRUE;
+}
+
+/*
+ * Similarly, handle transitions on the ClearToSend
+ * signal. Nowadays, it is used by many modems as
+ * a flow-control device: they turn it down to stop
+ * us from sending more chars. We do the same with
+ * the RequestToSend signal. [Yes, that is exactly
+ * why those signals are defined in the standard.]
+ *
+ * Tty must be locked and on master.
+ */
+tty_cts(
+ struct tty * tp,
+ boolean_t cts_up)
+{
+ if (tp->t_state & TS_ISOPEN){
+ if (cts_up) {
+ tp->t_state &= ~(TS_TTSTOP|TS_BUSY);
+ tty_output(tp);
+ } else {
+ tp->t_state |= (TS_TTSTOP|TS_BUSY);
+ (*tp->t_stop)(tp, D_WRITE);
+ }
+ }
+}
diff --git a/device/cirbuf.c b/device/cirbuf.c
new file mode 100644
index 00000000..96531687
--- /dev/null
+++ b/device/cirbuf.c
@@ -0,0 +1,298 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ *
+ * Circular buffers for TTY
+ */
+
+#include <device/cirbuf.h>
+#include <kern/kalloc.h>
+
+
+
+/* read at c_cf, write at c_cl */
+/* if c_cf == c_cl, buffer is empty */
+/* if c_cl == c_cf - 1, buffer is full */
+
+#if DEBUG
+int cb_check_enable = 0;
+#define CB_CHECK(cb) if (cb_check_enable) cb_check(cb)
+
+void
+cb_check(register struct cirbuf *cb)
+{
+ if (!(cb->c_cf >= cb->c_start && cb->c_cf < cb->c_end))
+ panic("cf %x out of range [%x..%x)",
+ cb->c_cf, cb->c_start, cb->c_end);
+ if (!(cb->c_cl >= cb->c_start && cb->c_cl < cb->c_end))
+ panic("cl %x out of range [%x..%x)",
+ cb->c_cl, cb->c_start, cb->c_end);
+ if (cb->c_cf <= cb->c_cl) {
+ if (!(cb->c_cc == cb->c_cl - cb->c_cf))
+ panic("cc %x should be %x",
+ cb->c_cc,
+ cb->c_cl - cb->c_cf);
+ }
+ else {
+ if (!(cb->c_cc == cb->c_end - cb->c_cf
+ + cb->c_cl - cb->c_start))
+ panic("cc %x should be %x",
+ cb->c_cc,
+ cb->c_end - cb->c_cf +
+ cb->c_cl - cb->c_start);
+ }
+}
+#else /* DEBUG */
+#define CB_CHECK(cb)
+#endif /* DEBUG */
+
+/*
+ * Put one character in circular buffer.
+ */
+int putc(
+ int c,
+ register struct cirbuf *cb)
+{
+ register char *ow, *nw;
+
+ ow = cb->c_cl;
+ nw = ow+1;
+ if (nw == cb->c_end)
+ nw = cb->c_start;
+ if (nw == cb->c_cf)
+ return 1; /* not entered */
+ *ow = c;
+ cb->c_cl = nw;
+
+ cb->c_cc++;
+
+ CB_CHECK(cb);
+
+ return 0;
+}
+
+/*
+ * Get one character from circular buffer.
+ */
+int getc(register struct cirbuf *cb)
+{
+ register unsigned char *nr;
+ register int c;
+
+ nr = (unsigned char *)cb->c_cf;
+ if (nr == (unsigned char *)cb->c_cl) {
+ CB_CHECK(cb);
+ return -1; /* empty */
+ }
+ c = *nr;
+ nr++;
+ if (nr == (unsigned char *)cb->c_end)
+ nr = (unsigned char *)cb->c_start;
+ cb->c_cf = (char *)nr;
+
+ cb->c_cc--;
+
+ CB_CHECK(cb);
+
+ return c;
+}
+
+/*
+ * Get lots of characters.
+ * Return number moved.
+ */
+int
+q_to_b( register struct cirbuf *cb,
+ register char *cp,
+ register int count)
+{
+ char * ocp = cp;
+ register int i;
+
+ while (count != 0) {
+ if (cb->c_cl == cb->c_cf)
+ break; /* empty */
+ if (cb->c_cl < cb->c_cf)
+ i = cb->c_end - cb->c_cf;
+ else
+ i = cb->c_cl - cb->c_cf;
+ if (i > count)
+ i = count;
+ bcopy(cb->c_cf, cp, i);
+ cp += i;
+ count -= i;
+ cb->c_cf += i;
+ cb->c_cc -= i;
+ if (cb->c_cf == cb->c_end)
+ cb->c_cf = cb->c_start;
+
+ CB_CHECK(cb);
+ }
+ CB_CHECK(cb);
+
+ return cp - ocp;
+}
+
+/*
+ * Add character array to buffer and return number of characters
+ * NOT entered.
+ */
+int
+b_to_q( register char * cp,
+ int count,
+ register struct cirbuf *cb)
+{
+ register int i;
+ register char *lim;
+
+ while (count != 0) {
+ lim = cb->c_cf - 1;
+ if (lim < cb->c_start)
+ lim = cb->c_end - 1;
+
+ if (cb->c_cl == lim)
+ break;
+ if (cb->c_cl < lim)
+ i = lim - cb->c_cl;
+ else
+ i = cb->c_end - cb->c_cl;
+
+ if (i > count)
+ i = count;
+ bcopy(cp, cb->c_cl, i);
+ cp += i;
+ count -= i;
+ cb->c_cc += i;
+ cb->c_cl += i;
+ if (cb->c_cl == cb->c_end)
+ cb->c_cl = cb->c_start;
+
+ CB_CHECK(cb);
+ }
+ CB_CHECK(cb);
+ return count;
+}
+
+/*
+ * Return number of contiguous characters up to a character
+ * that matches the mask.
+ */
+int
+ndqb( register struct cirbuf *cb,
+ register int mask)
+{
+ register char *cp, *lim;
+
+ if (cb->c_cl < cb->c_cf)
+ lim = cb->c_end;
+ else
+ lim = cb->c_cl;
+ if (mask == 0)
+ return (lim - cb->c_cf);
+ cp = cb->c_cf;
+ while (cp < lim) {
+ if (*cp & mask)
+ break;
+ cp++;
+ }
+ return (cp - cb->c_cf);
+}
+
+/*
+ * Flush characters from circular buffer.
+ */
+void
+ndflush(register struct cirbuf *cb,
+ register int count)
+{
+ register int i;
+
+ while (count != 0) {
+ if (cb->c_cl == cb->c_cf)
+ break; /* empty */
+ if (cb->c_cl < cb->c_cf)
+ i = cb->c_end - cb->c_cf;
+ else
+ i = cb->c_cl - cb->c_cf;
+ if (i > count)
+ i = count;
+ count -= i;
+ cb->c_cf += i;
+ cb->c_cc -= i;
+ if (cb->c_cf == cb->c_end)
+ cb->c_cf = cb->c_start;
+ CB_CHECK(cb);
+ }
+
+ CB_CHECK(cb);
+}
+
+/*
+ * Empty a circular buffer.
+ */
+void cb_clear(struct cirbuf *cb)
+{
+ cb->c_cf = cb->c_start;
+ cb->c_cl = cb->c_start;
+ cb->c_cc = 0;
+}
+
+/*
+ * Allocate character space for a circular buffer.
+ */
+void
+cb_alloc(
+ register struct cirbuf *cb,
+ int buf_size)
+{
+ register char *buf;
+
+ buf = (char *)kalloc(buf_size);
+
+ cb->c_start = buf;
+ cb->c_end = buf + buf_size;
+ cb->c_cf = buf;
+ cb->c_cl = buf;
+ cb->c_cc = 0;
+ cb->c_hog = buf_size - 1;
+
+ CB_CHECK(cb);
+}
+
+/*
+ * Free character space for a circular buffer.
+ */
+void
+cb_free(register struct cirbuf *cb)
+{
+ int size;
+
+ size = cb->c_end - cb->c_start;
+ kfree((vm_offset_t)cb->c_start, size);
+}
+
diff --git a/device/cirbuf.h b/device/cirbuf.h
new file mode 100644
index 00000000..a3f50ce5
--- /dev/null
+++ b/device/cirbuf.h
@@ -0,0 +1,62 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#ifndef _DEVICE_CIRBUF_H_
+#define _DEVICE_CIRBUF_H_
+
+/*
+ * Circular buffers for TTY
+ */
+
+struct cirbuf {
+ char * c_start; /* start of buffer */
+ char * c_end; /* end of buffer + 1*/
+ char * c_cf; /* read pointer */
+ char * c_cl; /* write pointer */
+ short c_cc; /* current number of characters
+ (compatibility) */
+ short c_hog; /* max ever */
+};
+
+/*
+ * Exported routines
+ */
+extern int putc(int, struct cirbuf *);
+extern int getc(struct cirbuf *);
+extern int q_to_b(struct cirbuf *, char *, int);
+extern int b_to_q(char *, int, struct cirbuf *);
+extern int nqdb(struct cirbuf *, int);
+extern void ndflush(struct cirbuf *, int);
+extern void cb_clear(struct cirbuf *);
+
+extern void cb_alloc(struct cirbuf *, int);
+extern void cb_free(struct cirbuf *);
+
+#endif /* _DEVICE_CIRBUF_H_ */
diff --git a/device/conf.h b/device/conf.h
new file mode 100644
index 00000000..7ca5dd67
--- /dev/null
+++ b/device/conf.h
@@ -0,0 +1,114 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/88
+ */
+
+#ifndef _DEVICE_CONF_H_
+#define _DEVICE_CONF_H_
+
+#include <mach/machine/vm_types.h>
+
+/*
+ * Operations list for major device types.
+ */
+struct dev_ops {
+ char * d_name; /* name for major device */
+ int (*d_open)(); /* open device */
+ int (*d_close)(); /* close device */
+ int (*d_read)(); /* read */
+ int (*d_write)(); /* write */
+ int (*d_getstat)(); /* get status/control */
+ int (*d_setstat)(); /* set status/control */
+ vm_offset_t (*d_mmap)(); /* map memory */
+ int (*d_async_in)();/* asynchronous input setup */
+ int (*d_reset)(); /* reset device */
+ int (*d_port_death)();
+ /* clean up reply ports */
+ int d_subdev; /* number of sub-devices per
+ unit */
+ int (*d_dev_info)(); /* driver info for kernel */
+};
+typedef struct dev_ops *dev_ops_t;
+
+/*
+ * Routines for null entries.
+ */
+extern int nulldev(); /* no operation - OK */
+extern int nodev(); /* no operation - error */
+extern vm_offset_t nomap(); /* no operation - error */
+
+/*
+ * Flavor constants for d_dev_info routine
+ */
+#define D_INFO_BLOCK_SIZE 1
+
+/*
+ * Head of list of attached devices
+ */
+extern struct dev_ops dev_name_list[];
+extern int dev_name_count;
+
+/*
+ * Macro to search device list
+ */
+#define dev_search(dp) \
+ for (dp = dev_name_list; \
+ dp < &dev_name_list[dev_name_count]; \
+ dp++)
+
+/*
+ * Indirection vectors for certain devices.
+ */
+struct dev_indirect {
+ char * d_name; /* name for device */
+ dev_ops_t d_ops; /* operations (major device) */
+ int d_unit; /* and unit number */
+};
+typedef struct dev_indirect *dev_indirect_t;
+
+/*
+ * List of indirect devices.
+ */
+extern struct dev_indirect dev_indirect_list[];
+extern int dev_indirect_count;
+
+/*
+ * Macro to search indirect list
+ */
+#define dev_indirect_search(di) \
+ for (di = dev_indirect_list; \
+ di < &dev_indirect_list[dev_indirect_count]; \
+ di++)
+
+/*
+ * Exported routine to set indirection.
+ */
+extern void dev_set_indirect();
+
+#endif /* _DEVICE_CONF_H_ */
+
diff --git a/device/cons.c b/device/cons.c
new file mode 100644
index 00000000..954f5276
--- /dev/null
+++ b/device/cons.c
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 1988-1994, The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Utah $Hdr: cons.c 1.14 94/12/14$
+ */
+
+#ifdef MACH_KERNEL
+#include <sys/types.h>
+#include <device/conf.h>
+#include <mach/boolean.h>
+#include <cons.h>
+#else
+#include <sys/param.h>
+#include <sys/user.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/ioctl.h>
+#include <sys/tty.h>
+#include <sys/file.h>
+#include <sys/conf.h>
+#include <hpdev/cons.h>
+#endif
+
+static int cn_inited = 0;
+static struct consdev *cn_tab = 0; /* physical console device info */
+#ifndef MACH_KERNEL
+static struct tty *constty = 0; /* virtual console output device */
+#endif
+
+/*
+ * ROM getc/putc primitives.
+ * On some architectures, the boot ROM provides basic character input/output
+ * routines that can be used before devices are configured or virtual memory
+ * is enabled. This can be useful to debug (or catch panics from) code early
+ * in the bootstrap procedure.
+ */
+int (*romgetc)() = 0;
+void (*romputc)() = 0;
+
+#if CONSBUFSIZE > 0
+/*
+ * Temporary buffer to store console output before a console is selected.
+ * This is statically allocated so it can be called before malloc/kmem_alloc
+ * have been initialized. It is initialized so it won't be clobbered as
+ * part of the zeroing of BSS (on PA/Mach).
+ */
+static char consbuf[CONSBUFSIZE] = { 0 };
+static char *consbp = consbuf;
+static int consbufused = 0;
+#endif
+
+cninit()
+{
+ struct consdev *cp;
+#ifdef MACH_KERNEL
+ dev_ops_t cn_ops;
+ int x;
+#endif
+
+ if (cn_inited)
+ return;
+
+ /*
+ * Collect information about all possible consoles
+ * and find the one with highest priority
+ */
+ for (cp = constab; cp->cn_probe; cp++) {
+ (*cp->cn_probe)(cp);
+ if (cp->cn_pri > CN_DEAD &&
+ (cn_tab == NULL || cp->cn_pri > cn_tab->cn_pri))
+ cn_tab = cp;
+ }
+ /*
+ * Found a console, initialize it.
+ */
+ if (cp = cn_tab) {
+ /*
+ * Initialize as console
+ */
+ (*cp->cn_init)(cp);
+#ifdef MACH_KERNEL
+ /*
+ * Look up its dev_ops pointer in the device table and
+ * place it in the device indirection table.
+ */
+ if (dev_name_lookup(cp->cn_name, &cn_ops, &x) == FALSE)
+ panic("cninit: dev_name_lookup failed");
+ dev_set_indirection("console", cn_ops, minor(cp->cn_dev));
+#endif
+#if CONSBUFSIZE > 0
+ /*
+ * Now that the console is initialized, dump any chars in
+ * the temporary console buffer.
+ */
+ if (consbufused) {
+ char *cbp = consbp;
+ do {
+ if (*cbp)
+ cnputc(*cbp);
+ if (++cbp == &consbuf[CONSBUFSIZE])
+ cbp = consbuf;
+ } while (cbp != consbp);
+ consbufused = 0;
+ }
+#endif
+ cn_inited = 1;
+ return;
+ }
+ /*
+ * No console device found, not a problem for BSD, fatal for Mach
+ */
+#ifdef MACH_KERNEL
+ panic("can't find a console device");
+#endif
+}
+
+#ifndef MACH_KERNEL
+cnopen(dev, flag)
+ dev_t dev;
+{
+ if (cn_tab == NULL)
+ return(0);
+ dev = cn_tab->cn_dev;
+ return ((*cdevsw[major(dev)].d_open)(dev, flag));
+}
+
+cnclose(dev, flag)
+ dev_t dev;
+{
+ if (cn_tab == NULL)
+ return(0);
+ dev = cn_tab->cn_dev;
+ return ((*cdevsw[major(dev)].d_close)(dev, flag));
+}
+
+cnread(dev, uio)
+ dev_t dev;
+ struct uio *uio;
+{
+ if (cn_tab == NULL)
+ return(0);
+ dev = cn_tab->cn_dev;
+ return ((*cdevsw[major(dev)].d_read)(dev, uio));
+}
+
+cnwrite(dev, uio)
+ dev_t dev;
+ struct uio *uio;
+{
+ if (cn_tab == NULL)
+ return(0);
+ dev = cn_tab->cn_dev;
+ return ((*cdevsw[major(dev)].d_write)(dev, uio));
+}
+
+cnioctl(dev, cmd, data, flag)
+ dev_t dev;
+ caddr_t data;
+{
+ if (cn_tab == NULL)
+ return(0);
+ /*
+ * Superuser can always use this to wrest control of console
+ * output from the "virtual" console.
+ */
+ if (cmd == TIOCCONS && constty) {
+ if (!suser())
+ return(EPERM);
+ constty = NULL;
+ return(0);
+ }
+ dev = cn_tab->cn_dev;
+ return ((*cdevsw[major(dev)].d_ioctl)(dev, cmd, data, flag));
+}
+
+cnselect(dev, rw)
+ dev_t dev;
+ int rw;
+{
+ if (cn_tab == NULL)
+ return(1);
+ return(ttselect(cn_tab->cn_dev, rw));
+}
+
+#ifndef hp300
+/*
+ * XXX Should go away when the new CIO MUX driver is in place
+ */
+#define d_control d_mmap
+cncontrol(dev, cmd, data)
+ dev_t dev;
+ int cmd;
+ int data;
+{
+ if (cn_tab == NULL)
+ return(0);
+ dev = cn_tab->cn_dev;
+ return((*cdevsw[major(dev)].d_control)(dev, cmd, data));
+}
+#undef d_control
+#endif
+#endif
+
+cngetc()
+{
+ if (cn_tab)
+ return ((*cn_tab->cn_getc)(cn_tab->cn_dev, 1));
+ if (romgetc)
+ return ((*romgetc)(1));
+ return (0);
+}
+
+#ifdef MACH_KERNEL
+cnmaygetc()
+{
+ if (cn_tab)
+ return((*cn_tab->cn_getc)(cn_tab->cn_dev, 0));
+ if (romgetc)
+ return ((*romgetc)(0));
+ return (0);
+}
+#endif
+
+cnputc(c)
+ int c;
+{
+ if (c == 0)
+ return;
+
+ if (cn_tab) {
+ (*cn_tab->cn_putc)(cn_tab->cn_dev, c);
+ if (c == '\n')
+ (*cn_tab->cn_putc)(cn_tab->cn_dev, '\r');
+ } else if (romputc) {
+ (*romputc)(c);
+ if (c == '\n')
+ (*romputc)('\r');
+ }
+#if CONSBUFSIZE > 0
+ else {
+ if (consbufused == 0) {
+ consbp = consbuf;
+ consbufused = 1;
+ bzero(consbuf, CONSBUFSIZE);
+ }
+ *consbp++ = c;
+ if (consbp >= &consbuf[CONSBUFSIZE])
+ consbp = consbuf;
+ }
+#endif
+}
diff --git a/device/cons.h b/device/cons.h
new file mode 100644
index 00000000..bcaeffc4
--- /dev/null
+++ b/device/cons.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1988-1994, The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Utah $Hdr: cons.h 1.10 94/12/14$
+ */
+
+struct consdev {
+#ifdef MACH_KERNEL
+ char *cn_name; /* name of device in dev_name_list */
+#endif
+ int (*cn_probe)(); /* probe hardware and fill in consdev info */
+ int (*cn_init)(); /* turn on as console */
+ int (*cn_getc)(); /* kernel getchar interface */
+ int (*cn_putc)(); /* kernel putchar interface */
+ dev_t cn_dev; /* major/minor of device */
+ short cn_pri; /* pecking order; the higher the better */
+};
+
+/* values for cn_pri - reflect our policy for console selection */
+#define CN_DEAD 0 /* device doesn't exist */
+#define CN_NORMAL 1 /* device exists but is nothing special */
+#define CN_INTERNAL 2 /* "internal" bit-mapped display */
+#define CN_REMOTE 3 /* serial interface with remote bit set */
+
+/* XXX */
+#define CONSMAJOR 0
+
+#define CONSBUFSIZE 1024
+
+#ifdef KERNEL
+extern struct consdev constab[];
+extern struct consdev *cn_tab;
+#endif
diff --git a/device/dev_forward.defs b/device/dev_forward.defs
new file mode 100644
index 00000000..a237bb86
--- /dev/null
+++ b/device/dev_forward.defs
@@ -0,0 +1,44 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: Joseph S. Barrera, Carnegie Mellon University
+ * Date: 12/90
+ */
+
+subsystem KernelUser dev_forward 2800;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+#include <device/device_types.defs>
+
+type reply_port_t = MACH_MSG_TYPE_MOVE_SEND_ONCE | polymorphic
+ ctype: mach_port_t;
+
+simpleroutine forward_device_open_send(
+ master_port : mach_port_t;
+ ureplyport reply_port : reply_port_t;
+ mode : dev_mode_t;
+ name : dev_name_t);
diff --git a/device/dev_hdr.h b/device/dev_hdr.h
new file mode 100644
index 00000000..b976caf9
--- /dev/null
+++ b/device/dev_hdr.h
@@ -0,0 +1,108 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/89
+ */
+
+#ifndef _DEVICE_DEV_HDR_H_
+#define _DEVICE_DEV_HDR_H_
+
+#include <mach/port.h>
+#include <kern/lock.h>
+#include <kern/queue.h>
+
+#include <device/conf.h>
+
+#ifdef i386
+#include <i386at/dev_hdr.h>
+#else
+#define mach_device device
+#define mach_device_t device_t
+#define MACH_DEVICE_NULL DEVICE_NULL
+#define mach_device_reference device_reference
+#define mach_device_deallocate device_deallocate
+#define mach_convert_device_to_port convert_device_to_port
+#endif
+
+/*
+ * Generic device header. May be allocated with the device,
+ * or built when the device is opened.
+ */
+struct mach_device {
+ decl_simple_lock_data(,ref_lock)/* lock for reference count */
+ int ref_count; /* reference count */
+ decl_simple_lock_data(, lock) /* lock for rest of state */
+ short state; /* state: */
+#define DEV_STATE_INIT 0 /* not open */
+#define DEV_STATE_OPENING 1 /* being opened */
+#define DEV_STATE_OPEN 2 /* open */
+#define DEV_STATE_CLOSING 3 /* being closed */
+ short flag; /* random flags: */
+#define D_EXCL_OPEN 0x0001 /* open only once */
+ short open_count; /* number of times open */
+ short io_in_progress; /* number of IOs in progress */
+ boolean_t io_wait; /* someone waiting for IO to finish */
+
+ struct ipc_port *port; /* open port */
+ queue_chain_t number_chain; /* chain for lookup by number */
+ int dev_number; /* device number */
+ int bsize; /* replacement for DEV_BSIZE */
+ struct dev_ops *dev_ops; /* and operations vector */
+#ifdef i386
+ struct device dev; /* the real device structure */
+#endif
+};
+typedef struct mach_device *mach_device_t;
+#define MACH_DEVICE_NULL ((mach_device_t)0)
+
+/*
+ * To find and remove device entries
+ */
+mach_device_t device_lookup(); /* by name */
+
+void mach_device_reference();
+void mach_device_deallocate();
+
+/*
+ * To find and remove port-to-device mappings
+ */
+device_t dev_port_lookup();
+void dev_port_enter();
+void dev_port_remove();
+
+/*
+ * To call a routine on each device
+ */
+boolean_t dev_map();
+
+/*
+ * To lock and unlock state and open-count
+ */
+#define device_lock(device) simple_lock(&(device)->lock)
+#define device_unlock(device) simple_unlock(&(device)->lock)
+
+#endif /* _DEVICE_DEV_HDR_H_ */
diff --git a/device/dev_lookup.c b/device/dev_lookup.c
new file mode 100644
index 00000000..746c394b
--- /dev/null
+++ b/device/dev_lookup.c
@@ -0,0 +1,409 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/89
+ */
+
+#include <mach/port.h>
+#include <mach/vm_param.h>
+
+#include <kern/queue.h>
+#include <kern/zalloc.h>
+
+#include <device/device_types.h>
+#include <device/dev_hdr.h>
+#include <device/conf.h>
+#include <device/param.h> /* DEV_BSIZE, as default */
+
+#include <ipc/ipc_port.h>
+#include <kern/ipc_kobject.h>
+
+#ifdef i386
+#include <i386at/device_emul.h>
+#endif
+
+/*
+ * Device structure routines: reference counting, port->device.
+ */
+
+/*
+ * Lookup/enter by device number.
+ */
+#define NDEVHASH 8
+#define DEV_NUMBER_HASH(dev) ((dev) & (NDEVHASH-1))
+queue_head_t dev_number_hash_table[NDEVHASH];
+
+/*
+ * Lock for device-number to device lookup.
+ * Must be held before device-ref_count lock.
+ */
+decl_simple_lock_data(,
+ dev_number_lock)
+
+zone_t dev_hdr_zone;
+
+/*
+ * Enter device in the number lookup table.
+ * The number table lock must be held.
+ */
+void
+dev_number_enter(device)
+ register mach_device_t device;
+{
+ register queue_t q;
+
+ q = &dev_number_hash_table[DEV_NUMBER_HASH(device->dev_number)];
+ queue_enter(q, device, mach_device_t, number_chain);
+}
+
+/*
+ * Remove device from the device-number lookup table.
+ * The device-number table lock must be held.
+ */
+void
+dev_number_remove(device)
+ register mach_device_t device;
+{
+ register queue_t q;
+
+ q = &dev_number_hash_table[DEV_NUMBER_HASH(device->dev_number)];
+ queue_remove(q, device, mach_device_t, number_chain);
+}
+
+/*
+ * Lookup a device by device operations and minor number.
+ * The number table lock must be held.
+ */
+mach_device_t
+dev_number_lookup(ops, devnum)
+ dev_ops_t ops;
+ int devnum;
+{
+ register queue_t q;
+ register mach_device_t device;
+
+ q = &dev_number_hash_table[DEV_NUMBER_HASH(devnum)];
+ queue_iterate(q, device, mach_device_t, number_chain) {
+ if (device->dev_ops == ops && device->dev_number == devnum) {
+ return (device);
+ }
+ }
+ return (MACH_DEVICE_NULL);
+}
+
+/*
+ * Look up a device by name, and create the device structure
+ * if it does not exist. Enter it in the dev_number lookup
+ * table.
+ */
+mach_device_t
+device_lookup(name)
+ char * name;
+{
+ dev_ops_t dev_ops;
+ int dev_minor;
+ register mach_device_t device;
+ register mach_device_t new_device;
+
+ /*
+ * Get the device and unit number from the name.
+ */
+ if (!dev_name_lookup(name, &dev_ops, &dev_minor))
+ return (MACH_DEVICE_NULL);
+
+ /*
+ * Look up the device in the hash table. If it is
+ * not there, enter it.
+ */
+ new_device = MACH_DEVICE_NULL;
+ simple_lock(&dev_number_lock);
+ while ((device = dev_number_lookup(dev_ops, dev_minor))
+ == MACH_DEVICE_NULL) {
+ /*
+ * Must unlock to allocate the structure. If
+ * the structure has appeared after we have allocated,
+ * release the new structure.
+ */
+ if (new_device != MACH_DEVICE_NULL)
+ break; /* allocated */
+
+ simple_unlock(&dev_number_lock);
+
+ new_device = (mach_device_t) zalloc(dev_hdr_zone);
+ simple_lock_init(&new_device->ref_lock);
+ new_device->ref_count = 1;
+ simple_lock_init(&new_device->lock);
+ new_device->state = DEV_STATE_INIT;
+ new_device->flag = 0;
+ new_device->open_count = 0;
+ new_device->io_in_progress = 0;
+ new_device->io_wait = FALSE;
+ new_device->port = IP_NULL;
+ new_device->dev_ops = dev_ops;
+ new_device->dev_number = dev_minor;
+ new_device->bsize = DEV_BSIZE; /* change later */
+
+ simple_lock(&dev_number_lock);
+ }
+
+ if (device == MACH_DEVICE_NULL) {
+ /*
+ * No existing device structure. Insert the
+ * new one.
+ */
+ assert(new_device != MACH_DEVICE_NULL);
+ device = new_device;
+
+ dev_number_enter(device);
+ simple_unlock(&dev_number_lock);
+ }
+ else {
+ /*
+ * Have existing device.
+ */
+ mach_device_reference(device);
+ simple_unlock(&dev_number_lock);
+
+ if (new_device != MACH_DEVICE_NULL)
+ zfree(dev_hdr_zone, (vm_offset_t)new_device);
+ }
+
+ return (device);
+}
+
+/*
+ * Add a reference to the device.
+ */
+void
+mach_device_reference(device)
+ register mach_device_t device;
+{
+ simple_lock(&device->ref_lock);
+ device->ref_count++;
+ simple_unlock(&device->ref_lock);
+}
+
+/*
+ * Remove a reference to the device, and deallocate the
+ * structure if no references are left.
+ */
+void
+mach_device_deallocate(device)
+ register mach_device_t device;
+{
+ simple_lock(&device->ref_lock);
+ if (--device->ref_count > 0) {
+ simple_unlock(&device->ref_lock);
+ return;
+ }
+ device->ref_count = 1;
+ simple_unlock(&device->ref_lock);
+
+ simple_lock(&dev_number_lock);
+ simple_lock(&device->ref_lock);
+ if (--device->ref_count > 0) {
+ simple_unlock(&device->ref_lock);
+ simple_unlock(&dev_number_lock);
+ return;
+ }
+
+ dev_number_remove(device);
+ simple_unlock(&device->ref_lock);
+ simple_unlock(&dev_number_lock);
+
+ zfree(dev_hdr_zone, (vm_offset_t)device);
+}
+
+/*
+
+ */
+/*
+ * port-to-device lookup routines.
+ */
+decl_simple_lock_data(,
+ dev_port_lock)
+
+/*
+ * Enter a port-to-device mapping.
+ */
+void
+dev_port_enter(device)
+ register mach_device_t device;
+{
+ mach_device_reference(device);
+#ifdef i386
+ ipc_kobject_set(device->port,
+ (ipc_kobject_t) &device->dev, IKOT_DEVICE);
+ device->dev.emul_data = device;
+ {
+ extern struct device_emulation_ops mach_device_emulation_ops;
+
+ device->dev.emul_ops = &mach_device_emulation_ops;
+ }
+#else
+ ipc_kobject_set(device->port, (ipc_kobject_t) device, IKOT_DEVICE);
+#endif
+}
+
+/*
+ * Remove a port-to-device mapping.
+ */
+void
+dev_port_remove(device)
+ register mach_device_t device;
+{
+ ipc_kobject_set(device->port, IKO_NULL, IKOT_NONE);
+ mach_device_deallocate(device);
+}
+
+/*
+ * Lookup a device by its port.
+ * Doesn't consume the naked send right; produces a device reference.
+ */
+device_t
+dev_port_lookup(port)
+ ipc_port_t port;
+{
+ register device_t device;
+
+ if (!IP_VALID(port))
+ return (DEVICE_NULL);
+
+ ip_lock(port);
+ if (ip_active(port) && (ip_kotype(port) == IKOT_DEVICE)) {
+ device = (device_t) port->ip_kobject;
+#ifdef i386
+ if (device->emul_ops->reference)
+ (*device->emul_ops->reference)(device->emul_data);
+#else
+ mach_device_reference(device);
+#endif
+ }
+ else
+ device = DEVICE_NULL;
+
+ ip_unlock(port);
+ return (device);
+}
+
+/*
+ * Get the port for a device.
+ * Consumes a device reference; produces a naked send right.
+ */
+ipc_port_t
+convert_device_to_port(device)
+ register device_t device;
+{
+#ifndef i386
+ register ipc_port_t port;
+#endif
+
+ if (device == DEVICE_NULL)
+ return IP_NULL;
+
+#ifdef i386
+ return (*device->emul_ops->dev_to_port) (device->emul_data);
+#else
+ device_lock(device);
+ if (device->state == DEV_STATE_OPEN)
+ port = ipc_port_make_send(device->port);
+ else
+ port = IP_NULL;
+ device_unlock(device);
+
+ mach_device_deallocate(device);
+ return port;
+#endif
+}
+
+/*
+ * Call a supplied routine on each device, passing it
+ * the port as an argument. If the routine returns TRUE,
+ * stop the search and return TRUE. If none returns TRUE,
+ * return FALSE.
+ */
+boolean_t
+dev_map(routine, port)
+ boolean_t (*routine)();
+ mach_port_t port;
+{
+ register int i;
+ register queue_t q;
+ register mach_device_t dev, prev_dev;
+
+ for (i = 0, q = &dev_number_hash_table[0];
+ i < NDEVHASH;
+ i++, q++) {
+ prev_dev = MACH_DEVICE_NULL;
+ simple_lock(&dev_number_lock);
+ queue_iterate(q, dev, mach_device_t, number_chain) {
+ mach_device_reference(dev);
+ simple_unlock(&dev_number_lock);
+ if (prev_dev != MACH_DEVICE_NULL)
+ mach_device_deallocate(prev_dev);
+
+ if ((*routine)(dev, port)) {
+ /*
+ * Done
+ */
+ mach_device_deallocate(dev);
+ return (TRUE);
+ }
+
+ simple_lock(&dev_number_lock);
+ prev_dev = dev;
+ }
+ simple_unlock(&dev_number_lock);
+ if (prev_dev != MACH_DEVICE_NULL)
+ mach_device_deallocate(prev_dev);
+ }
+ return (FALSE);
+}
+
+/*
+ * Initialization
+ */
+#define NDEVICES 256
+
+void
+dev_lookup_init()
+{
+ register int i;
+
+ simple_lock_init(&dev_number_lock);
+
+ for (i = 0; i < NDEVHASH; i++)
+ queue_init(&dev_number_hash_table[i]);
+
+ simple_lock_init(&dev_port_lock);
+
+ dev_hdr_zone = zinit(sizeof(struct mach_device),
+ sizeof(struct mach_device) * NDEVICES,
+ PAGE_SIZE,
+ FALSE,
+ "open device entry");
+}
diff --git a/device/dev_master.h b/device/dev_master.h
new file mode 100644
index 00000000..964ae828
--- /dev/null
+++ b/device/dev_master.h
@@ -0,0 +1,60 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 11/89
+ *
+ * Bind an IO operation to the master CPU.
+ */
+
+#include <cpus.h>
+
+#if NCPUS > 1
+
+#include <kern/macro_help.h>
+#include <kern/cpu_number.h>
+#include <kern/sched_prim.h>
+#include <kern/thread.h>
+#include <kern/processor.h>
+
+#define io_grab_master() \
+ MACRO_BEGIN \
+ thread_bind(current_thread(), master_processor); \
+ if (current_processor() != master_processor) \
+ thread_block((void (*)()) 0); \
+ MACRO_END
+
+#define io_release_master() \
+ MACRO_BEGIN \
+ thread_bind(current_thread(), PROCESSOR_NULL); \
+ MACRO_END
+
+#else NCPUS > 1
+
+#define io_grab_master()
+#define io_release_master()
+
+#endif NCPUS > 1
diff --git a/device/dev_name.c b/device/dev_name.c
new file mode 100644
index 00000000..99d9227a
--- /dev/null
+++ b/device/dev_name.c
@@ -0,0 +1,237 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ */
+
+#include <device/device_types.h>
+#include <device/dev_hdr.h>
+#include <device/conf.h>
+
+
+
+/*
+ * Routines placed in empty entries in the device tables
+ */
+int nulldev()
+{
+ return (D_SUCCESS);
+}
+
+int nodev()
+{
+ return (D_INVALID_OPERATION);
+}
+
+vm_offset_t
+nomap()
+{
+ return (D_INVALID_OPERATION);
+}
+
+/*
+ * Name comparison routine.
+ * Compares first 'len' characters of 'src'
+ * with 'target', which is zero-terminated.
+ * Returns TRUE if strings are equal:
+ * src and target are equal in first 'len' characters
+ * next character of target is 0 (end of string).
+ */
+boolean_t
+name_equal(src, len, target)
+ register char * src;
+ register int len;
+ register char * target;
+{
+ while (--len >= 0)
+ if (*src++ != *target++)
+ return FALSE;
+ return *target == 0;
+}
+
+/*
+ * device name lookup
+ */
+boolean_t dev_name_lookup(name, ops, unit)
+ char * name;
+ dev_ops_t *ops; /* out */
+ int *unit; /* out */
+{
+ /*
+ * Assume that block device names are of the form
+ *
+ * <device_name><unit_number>[[<slice num>]<partition>]
+ *
+ * where
+ * <device_name> is the name in the device table
+ * <unit_number> is an integer
+ * <slice num> * is 's' followed by a number (disks only!)
+ * <partition> is a letter in [a-h] (disks only?)
+ */
+
+ register char * cp = name;
+ int len;
+ register int j = 0;
+ register int c;
+ dev_ops_t dev;
+ register boolean_t found;
+
+ int slice_num=0;
+
+#if 0
+ printf("lookup on name %s\n",name);
+#endif 0
+
+ /*
+ * Find device type name (characters before digit)
+ */
+ while ((c = *cp) != '\0' &&
+ !(c >= '0' && c <= '9'))
+ cp++;
+
+ len = cp - name;
+ if (c != '\0') {
+ /*
+ * Find unit number
+ */
+ while ((c = *cp) != '\0' &&
+ c >= '0' && c <= '9') {
+ j = j * 10 + (c - '0');
+ cp++;
+ }
+ }
+
+ found = FALSE;
+ dev_search(dev) {
+ if (name_equal(name, len, dev->d_name)) {
+ found = TRUE;
+ break;
+ }
+ }
+ if (!found) {
+ /* name not found - try indirection list */
+ register dev_indirect_t di;
+
+ dev_indirect_search(di) {
+ if (name_equal(name, len, di->d_name)) {
+ /*
+ * Return device and unit from indirect vector.
+ */
+ *ops = di->d_ops;
+ *unit = di->d_unit;
+ return (TRUE);
+ }
+ }
+ /* Not found in either list. */
+ return (FALSE);
+ }
+
+ *ops = dev;
+ *unit = j;
+
+ /*
+ * Find sub-device number
+ */
+
+ j = dev->d_subdev;
+ if (j > 0) {
+ /* if no slice string, slice num = 0 */
+
+ /* <subdev_count>*unit + <slice_number>*16 -- I know it's bad */
+ *unit *= j;
+
+ /* find slice ? */
+ if (c=='s') {
+ cp++;
+ while ((c = *cp) != '\0' &&
+ c >= '0' && c <= '9') {
+ slice_num = slice_num * 10 + (c - '0');
+ cp++;
+ }
+ }
+
+ *unit += (slice_num <<4);
+ /* if slice==0, it is either compatability or whole device */
+
+ if (c >= 'a' && c < 'a' + j) { /* note: w/o this -> whole slice */
+ /*
+ * Minor number is <subdev_count>*unit + letter.
+ * NOW it is slice result + letter
+ */
+#if 0
+ *unit = *unit * j + (c - 'a' +1); /* +1 to start 'a' at 1 */
+#endif 0
+ *unit += (c - 'a' +1);
+ }
+ }
+ return (TRUE);
+}
+
+/*
+ * Change an entry in the indirection list.
+ */
+void
+dev_set_indirection(name, ops, unit)
+ char *name;
+ dev_ops_t ops;
+ int unit;
+{
+ register dev_indirect_t di;
+
+ dev_indirect_search(di) {
+ if (!strcmp(di->d_name, name)) {
+ di->d_ops = ops;
+ di->d_unit = unit;
+ break;
+ }
+ }
+}
+
+boolean_t dev_change_indirect(iname, dname, unit)
+char *iname,*dname;
+int unit;
+{
+ struct dev_ops *dp;
+ struct dev_indirect *di;
+ int found = FALSE;
+
+ dev_search(dp) {
+ if (!strcmp(dp->d_name,dname)) {
+ found = TRUE;
+ break;
+ }
+ }
+ if (!found) return FALSE;
+ dev_indirect_search(di) {
+ if (!strcmp(di->d_name,iname)) {
+ di->d_ops = dp;
+ di->d_unit = unit;
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
diff --git a/device/dev_pager.c b/device/dev_pager.c
new file mode 100644
index 00000000..007942d3
--- /dev/null
+++ b/device/dev_pager.c
@@ -0,0 +1,741 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/89
+ *
+ * Device pager.
+ */
+#include <norma_vm.h>
+
+#include <mach/boolean.h>
+#include <mach/port.h>
+#include <mach/message.h>
+#include <mach/std_types.h>
+#include <mach/mach_types.h>
+
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <kern/queue.h>
+#include <kern/zalloc.h>
+#include <kern/kalloc.h>
+
+#include <vm/vm_page.h>
+#include <vm/vm_kern.h>
+
+#include <device/device_types.h>
+#include <device/ds_routines.h>
+#include <device/dev_hdr.h>
+#include <device/io_req.h>
+
+extern vm_offset_t block_io_mmap(); /* dummy routine to allow
+ mmap for block devices */
+
+/*
+ * The device pager routines are called directly from the message
+ * system (via mach_msg), and thus run in the kernel-internal
+ * environment. All ports are in internal form (ipc_port_t),
+ * and must be correctly reference-counted in order to be saved
+ * in other data structures. Kernel routines may be called
+ * directly. Kernel types are used for data objects (tasks,
+ * memory objects, ports). The only IPC routines that may be
+ * called are ones that masquerade as the kernel task (via
+ * msg_send_from_kernel).
+ *
+ * Port rights and references are maintained as follows:
+ * Memory object port:
+ * The device_pager task has all rights.
+ * Memory object control port:
+ * The device_pager task has only send rights.
+ * Memory object name port:
+ * The device_pager task has only send rights.
+ * The name port is not even recorded.
+ * Regardless how the object is created, the control and name
+ * ports are created by the kernel and passed through the memory
+ * management interface.
+ *
+ * The device_pager assumes that access to its memory objects
+ * will not be propagated to more that one host, and therefore
+ * provides no consistency guarantees beyond those made by the
+ * kernel.
+ *
+ * In the event that more than one host attempts to use a device
+ * memory object, the device_pager will only record the last set
+ * of port names. [This can happen with only one host if a new
+ * mapping is being established while termination of all previous
+ * mappings is taking place.] Currently, the device_pager assumes
+ * that its clients adhere to the initialization and termination
+ * protocols in the memory management interface; otherwise, port
+ * rights or out-of-line memory from erroneous messages may be
+ * allowed to accumulate.
+ *
+ * [The phrase "currently" has been used above to denote aspects of
+ * the implementation that could be altered without changing the rest
+ * of the basic documentation.]
+ */
+
+/*
+ * Basic device pager structure.
+ */
+struct dev_pager {
+ decl_simple_lock_data(, lock) /* lock for reference count */
+ int ref_count; /* reference count */
+ int client_count; /* How many memory_object_create
+ * calls have we received */
+ ipc_port_t pager; /* pager port */
+ ipc_port_t pager_request; /* Known request port */
+ ipc_port_t pager_name; /* Known name port */
+ mach_device_t device; /* Device handle */
+ int type; /* to distinguish */
+#define DEV_PAGER_TYPE 0
+#define CHAR_PAGER_TYPE 1
+ /* char pager specifics */
+ int prot;
+ vm_size_t size;
+};
+typedef struct dev_pager *dev_pager_t;
+#define DEV_PAGER_NULL ((dev_pager_t)0)
+
+
+zone_t dev_pager_zone;
+
+void dev_pager_reference(register dev_pager_t ds)
+{
+ simple_lock(&ds->lock);
+ ds->ref_count++;
+ simple_unlock(&ds->lock);
+}
+
+void dev_pager_deallocate(register dev_pager_t ds)
+{
+ simple_lock(&ds->lock);
+ if (--ds->ref_count > 0) {
+ simple_unlock(&ds->lock);
+ return;
+ }
+
+ simple_unlock(&ds->lock);
+ zfree(dev_pager_zone, (vm_offset_t)ds);
+}
+
+/*
+ * A hash table of ports for device_pager backed objects.
+ */
+
+#define DEV_PAGER_HASH_COUNT 127
+
+struct dev_pager_entry {
+ queue_chain_t links;
+ ipc_port_t name;
+ dev_pager_t pager_rec;
+};
+typedef struct dev_pager_entry *dev_pager_entry_t;
+
+queue_head_t dev_pager_hashtable[DEV_PAGER_HASH_COUNT];
+zone_t dev_pager_hash_zone;
+decl_simple_lock_data(,
+ dev_pager_hash_lock)
+
+#define dev_pager_hash(name_port) \
+ (((natural_t)(name_port) & 0xffffff) % DEV_PAGER_HASH_COUNT)
+
+void dev_pager_hash_init(void)
+{
+ register int i;
+ register vm_size_t size;
+
+ size = sizeof(struct dev_pager_entry);
+ dev_pager_hash_zone = zinit(
+ size,
+ size * 1000,
+ PAGE_SIZE,
+ FALSE,
+ "dev_pager port hash");
+ for (i = 0; i < DEV_PAGER_HASH_COUNT; i++)
+ queue_init(&dev_pager_hashtable[i]);
+ simple_lock_init(&dev_pager_hash_lock);
+}
+
+void dev_pager_hash_insert(
+ ipc_port_t name_port,
+ dev_pager_t rec)
+{
+ register dev_pager_entry_t new_entry;
+
+ new_entry = (dev_pager_entry_t) zalloc(dev_pager_hash_zone);
+ new_entry->name = name_port;
+ new_entry->pager_rec = rec;
+
+ simple_lock(&dev_pager_hash_lock);
+ queue_enter(&dev_pager_hashtable[dev_pager_hash(name_port)],
+ new_entry, dev_pager_entry_t, links);
+ simple_unlock(&dev_pager_hash_lock);
+}
+
+void dev_pager_hash_delete(ipc_port_t name_port)
+{
+ register queue_t bucket;
+ register dev_pager_entry_t entry;
+
+ bucket = &dev_pager_hashtable[dev_pager_hash(name_port)];
+
+ simple_lock(&dev_pager_hash_lock);
+ for (entry = (dev_pager_entry_t)queue_first(bucket);
+ !queue_end(bucket, &entry->links);
+ entry = (dev_pager_entry_t)queue_next(&entry->links)) {
+ if (entry->name == name_port) {
+ queue_remove(bucket, entry, dev_pager_entry_t, links);
+ break;
+ }
+ }
+ simple_unlock(&dev_pager_hash_lock);
+ if (entry)
+ zfree(dev_pager_hash_zone, (vm_offset_t)entry);
+}
+
+dev_pager_t dev_pager_hash_lookup(ipc_port_t name_port)
+{
+ register queue_t bucket;
+ register dev_pager_entry_t entry;
+ register dev_pager_t pager;
+
+ bucket = &dev_pager_hashtable[dev_pager_hash(name_port)];
+
+ simple_lock(&dev_pager_hash_lock);
+ for (entry = (dev_pager_entry_t)queue_first(bucket);
+ !queue_end(bucket, &entry->links);
+ entry = (dev_pager_entry_t)queue_next(&entry->links)) {
+ if (entry->name == name_port) {
+ pager = entry->pager_rec;
+ dev_pager_reference(pager);
+ simple_unlock(&dev_pager_hash_lock);
+ return (pager);
+ }
+ }
+ simple_unlock(&dev_pager_hash_lock);
+ return (DEV_PAGER_NULL);
+}
+
+kern_return_t device_pager_setup(
+ mach_device_t device,
+ int prot,
+ vm_offset_t offset,
+ vm_size_t size,
+ mach_port_t *pager)
+{
+ register dev_pager_t d;
+
+ /*
+ * Verify the device is indeed mappable
+ */
+ if (!device->dev_ops->d_mmap || (device->dev_ops->d_mmap == nomap))
+ return (D_INVALID_OPERATION);
+
+ /*
+ * Allocate a structure to hold the arguments
+ * and port to represent this object.
+ */
+
+ d = dev_pager_hash_lookup((ipc_port_t)device); /* HACK */
+ if (d != DEV_PAGER_NULL) {
+ *pager = (mach_port_t) ipc_port_make_send(d->pager);
+ dev_pager_deallocate(d);
+ return (D_SUCCESS);
+ }
+
+ d = (dev_pager_t) zalloc(dev_pager_zone);
+ if (d == DEV_PAGER_NULL)
+ return (KERN_RESOURCE_SHORTAGE);
+
+ simple_lock_init(&d->lock);
+ d->ref_count = 1;
+
+ /*
+ * Allocate the pager port.
+ */
+ d->pager = ipc_port_alloc_kernel();
+ if (d->pager == IP_NULL) {
+ dev_pager_deallocate(d);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+
+ d->client_count = 0;
+ d->pager_request = IP_NULL;
+ d->pager_name = IP_NULL;
+ d->device = device;
+ mach_device_reference(device);
+ d->prot = prot;
+ d->size = round_page(size);
+ if (device->dev_ops->d_mmap == block_io_mmap) {
+ d->type = DEV_PAGER_TYPE;
+ } else {
+ d->type = CHAR_PAGER_TYPE;
+ }
+
+ dev_pager_hash_insert(d->pager, d);
+ dev_pager_hash_insert((ipc_port_t)device, d); /* HACK */
+
+ *pager = (mach_port_t) ipc_port_make_send(d->pager);
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Routine: device_pager_release
+ * Purpose:
+ * Relinquish any references or rights that were
+ * associated with the result of a call to
+ * device_pager_setup.
+ */
+void device_pager_release(memory_object_t object)
+{
+ if (MACH_PORT_VALID(object))
+ ipc_port_release_send((ipc_port_t) object);
+}
+
+boolean_t device_pager_debug = FALSE;
+
+boolean_t device_pager_data_request_done(); /* forward */
+boolean_t device_pager_data_write_done(); /* forward */
+
+
+kern_return_t device_pager_data_request(
+ ipc_port_t pager,
+ ipc_port_t pager_request,
+ vm_offset_t offset,
+ vm_size_t length,
+ vm_prot_t protection_required)
+{
+ register dev_pager_t ds;
+
+#ifdef lint
+ protection_required++;
+#endif lint
+
+ if (device_pager_debug)
+ printf("(device_pager)data_request: pager=%d, offset=0x%x, length=0x%x\n",
+ pager, offset, length);
+
+ ds = dev_pager_hash_lookup((ipc_port_t)pager);
+ if (ds == DEV_PAGER_NULL)
+ panic("(device_pager)data_request: lookup failed");
+
+ if (ds->pager_request != pager_request)
+ panic("(device_pager)data_request: bad pager_request");
+
+ if (ds->type == CHAR_PAGER_TYPE) {
+ register vm_object_t object;
+ vm_offset_t device_map_page(void *,vm_offset_t);
+
+#if NORMA_VM
+ object = vm_object_lookup(pager);
+#else NORMA_VM
+ object = vm_object_lookup(pager_request);
+#endif NORMA_VM
+ if (object == VM_OBJECT_NULL) {
+ (void) r_memory_object_data_error(pager_request,
+ offset, length,
+ KERN_FAILURE);
+ dev_pager_deallocate(ds);
+ return (KERN_SUCCESS);
+ }
+
+ vm_object_page_map(object,
+ offset, length,
+ device_map_page, (char *)ds);
+
+ vm_object_deallocate(object);
+ }
+ else {
+ register io_req_t ior;
+ register mach_device_t device;
+ io_return_t result;
+
+ panic("(device_pager)data_request: dev pager");
+
+ device = ds->device;
+ mach_device_reference(device);
+ dev_pager_deallocate(ds);
+
+ /*
+ * Package the read for the device driver.
+ */
+ io_req_alloc(ior, 0);
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_READ | IO_CALL;
+ ior->io_mode = 0;
+ ior->io_recnum = offset / device->bsize;
+ ior->io_data = 0; /* driver must allocate */
+ ior->io_count = length;
+ ior->io_alloc_size = 0; /* no data allocated yet */
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = device_pager_data_request_done;
+ ior->io_reply_port = pager_request;
+ ior->io_reply_port_type = MACH_MSG_TYPE_PORT_SEND;
+
+ result = (*device->dev_ops->d_read)(device->dev_number, ior);
+ if (result == D_IO_QUEUED)
+ return (KERN_SUCCESS);
+
+ /*
+ * Return by queuing IOR for io_done thread, to reply in
+ * correct environment (kernel).
+ */
+ ior->io_error = result;
+ iodone(ior);
+ }
+
+ dev_pager_deallocate(ds);
+
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Always called by io_done thread.
+ */
+boolean_t device_pager_data_request_done(register io_req_t ior)
+{
+ vm_offset_t start_alloc, end_alloc;
+ vm_size_t size_read;
+
+ if (ior->io_error == D_SUCCESS) {
+ size_read = ior->io_count;
+ if (ior->io_residual) {
+ if (device_pager_debug)
+ printf("(device_pager)data_request_done: r: 0x%x\n",ior->io_residual);
+ bzero( (char *) (&ior->io_data[ior->io_count -
+ ior->io_residual]),
+ (unsigned) ior->io_residual);
+ }
+ } else {
+ size_read = ior->io_count - ior->io_residual;
+ }
+
+ start_alloc = trunc_page((vm_offset_t)ior->io_data);
+ end_alloc = start_alloc + round_page(ior->io_alloc_size);
+
+ if (ior->io_error == D_SUCCESS) {
+ vm_map_copy_t copy;
+ kern_return_t kr;
+
+ kr = vm_map_copyin(kernel_map, (vm_offset_t)ior->io_data,
+ size_read, TRUE, &copy);
+ if (kr != KERN_SUCCESS)
+ panic("device_pager_data_request_done");
+
+ (void) r_memory_object_data_provided(
+ ior->io_reply_port,
+ ior->io_recnum * ior->io_device->bsize,
+ (vm_offset_t)copy,
+ size_read,
+ VM_PROT_NONE);
+ }
+ else {
+ (void) r_memory_object_data_error(
+ ior->io_reply_port,
+ ior->io_recnum * ior->io_device->bsize,
+ (vm_size_t)ior->io_count,
+ ior->io_error);
+ }
+
+ (void)vm_deallocate(kernel_map,
+ start_alloc,
+ end_alloc - start_alloc);
+ mach_device_deallocate(ior->io_device);
+ return (TRUE);
+}
+
+kern_return_t device_pager_data_write(
+ ipc_port_t pager,
+ ipc_port_t pager_request,
+ register vm_offset_t offset,
+ register pointer_t addr,
+ vm_size_t data_count)
+{
+ register dev_pager_t ds;
+ register mach_device_t device;
+ register io_req_t ior;
+ kern_return_t result;
+
+ panic("(device_pager)data_write: called");
+
+ ds = dev_pager_hash_lookup((ipc_port_t)pager);
+ if (ds == DEV_PAGER_NULL)
+ panic("(device_pager)data_write: lookup failed");
+
+ if (ds->pager_request != pager_request)
+ panic("(device_pager)data_write: bad pager_request");
+
+ if (ds->type == CHAR_PAGER_TYPE)
+ panic("(device_pager)data_write: char pager");
+
+ device = ds->device;
+ mach_device_reference(device);
+ dev_pager_deallocate(ds);
+
+ /*
+ * Package the write request for the device driver.
+ */
+ io_req_alloc(ior, data_count);
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_WRITE | IO_CALL;
+ ior->io_mode = 0;
+ ior->io_recnum = offset / device->bsize;
+ ior->io_data = (io_buf_ptr_t)addr;
+ ior->io_count = data_count;
+ ior->io_alloc_size = data_count; /* amount to deallocate */
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = device_pager_data_write_done;
+ ior->io_reply_port = IP_NULL;
+
+ result = (*device->dev_ops->d_write)(device->dev_number, ior);
+
+ if (result != D_IO_QUEUED) {
+ device_write_dealloc(ior);
+ io_req_free((vm_offset_t)ior);
+ mach_device_deallocate(device);
+ }
+
+ return (KERN_SUCCESS);
+}
+
+boolean_t device_pager_data_write_done(ior)
+ register io_req_t ior;
+{
+ device_write_dealloc(ior);
+ mach_device_deallocate(ior->io_device);
+
+ return (TRUE);
+}
+
+kern_return_t device_pager_copy(
+ ipc_port_t pager,
+ ipc_port_t pager_request,
+ register vm_offset_t offset,
+ register vm_size_t length,
+ ipc_port_t new_pager)
+{
+ panic("(device_pager)copy: called");
+}
+
+kern_return_t
+device_pager_supply_completed(
+ ipc_port_t pager,
+ ipc_port_t pager_request,
+ vm_offset_t offset,
+ vm_size_t length,
+ kern_return_t result,
+ vm_offset_t error_offset)
+{
+ panic("(device_pager)supply_completed: called");
+}
+
+kern_return_t
+device_pager_data_return(
+ ipc_port_t pager,
+ ipc_port_t pager_request,
+ vm_offset_t offset,
+ register pointer_t addr,
+ vm_size_t data_cnt,
+ boolean_t dirty,
+ boolean_t kernel_copy)
+{
+ panic("(device_pager)data_return: called");
+}
+
+kern_return_t
+device_pager_change_completed(
+ ipc_port_t pager,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy)
+{
+ panic("(device_pager)change_completed: called");
+}
+
+/*
+ * The mapping function takes a byte offset, but returns
+ * a machine-dependent page frame number. We convert
+ * that into something that the pmap module will
+ * accept later.
+ */
+vm_offset_t device_map_page(
+ void *dsp,
+ vm_offset_t offset)
+{
+ register dev_pager_t ds = (dev_pager_t) dsp;
+
+ return pmap_phys_address(
+ (*(ds->device->dev_ops->d_mmap))
+ (ds->device->dev_number, offset, ds->prot));
+}
+
+kern_return_t device_pager_init_pager(
+ ipc_port_t pager,
+ ipc_port_t pager_request,
+ ipc_port_t pager_name,
+ vm_size_t pager_page_size)
+{
+ register dev_pager_t ds;
+
+ if (device_pager_debug)
+ printf("(device_pager)init: pager=%d, request=%d, name=%d\n",
+ pager, pager_request, pager_name);
+
+ assert(pager_page_size == PAGE_SIZE);
+ assert(IP_VALID(pager_request));
+ assert(IP_VALID(pager_name));
+
+ ds = dev_pager_hash_lookup(pager);
+ assert(ds != DEV_PAGER_NULL);
+
+ assert(ds->client_count == 0);
+ assert(ds->pager_request == IP_NULL);
+ assert(ds->pager_name == IP_NULL);
+
+ ds->client_count = 1;
+
+ /*
+ * We save the send rights for the request and name ports.
+ */
+
+ ds->pager_request = pager_request;
+ ds->pager_name = pager_name;
+
+ if (ds->type == CHAR_PAGER_TYPE) {
+ /*
+ * Reply that the object is ready
+ */
+ (void) r_memory_object_set_attributes(pager_request,
+ TRUE, /* ready */
+ FALSE, /* do not cache */
+ MEMORY_OBJECT_COPY_NONE);
+ } else {
+ (void) r_memory_object_set_attributes(pager_request,
+ TRUE, /* ready */
+ TRUE, /* cache */
+ MEMORY_OBJECT_COPY_DELAY);
+ }
+
+ dev_pager_deallocate(ds);
+ return (KERN_SUCCESS);
+}
+
+kern_return_t device_pager_terminate(
+ ipc_port_t pager,
+ ipc_port_t pager_request,
+ ipc_port_t pager_name)
+{
+ register dev_pager_t ds;
+
+ assert(IP_VALID(pager_request));
+ assert(IP_VALID(pager_name));
+
+ ds = dev_pager_hash_lookup(pager);
+ assert(ds != DEV_PAGER_NULL);
+
+ assert(ds->client_count == 1);
+ assert(ds->pager_request == pager_request);
+ assert(ds->pager_name == pager_name);
+
+ dev_pager_hash_delete(ds->pager);
+ dev_pager_hash_delete((ipc_port_t)ds->device); /* HACK */
+ mach_device_deallocate(ds->device);
+
+ /* release the send rights we have saved from the init call */
+
+ ipc_port_release_send(pager_request);
+ ipc_port_release_send(pager_name);
+
+ /* release the naked receive rights we just acquired */
+
+ ipc_port_release_receive(pager_request);
+ ipc_port_release_receive(pager_name);
+
+ /* release the kernel's receive right for the pager port */
+
+ ipc_port_dealloc_kernel(pager);
+
+ /* once for ref from lookup, once to make it go away */
+ dev_pager_deallocate(ds);
+ dev_pager_deallocate(ds);
+
+ return (KERN_SUCCESS);
+}
+
+kern_return_t device_pager_data_unlock(
+ ipc_port_t memory_object,
+ ipc_port_t memory_control_port,
+ vm_offset_t offset,
+ vm_size_t length,
+ vm_prot_t desired_access)
+{
+#ifdef lint
+ memory_object++; memory_control_port++; offset++; length++; desired_access++;
+#endif lint
+
+ panic("(device_pager)data_unlock: called");
+ return (KERN_FAILURE);
+}
+
+kern_return_t device_pager_lock_completed(
+ ipc_port_t memory_object,
+ ipc_port_t pager_request_port,
+ vm_offset_t offset,
+ vm_size_t length)
+{
+#ifdef lint
+ memory_object++; pager_request_port++; offset++; length++;
+#endif lint
+
+ panic("(device_pager)lock_completed: called");
+ return (KERN_FAILURE);
+}
+
+void device_pager_init(void)
+{
+ register vm_size_t size;
+
+ /*
+ * Initialize zone of paging structures.
+ */
+ size = sizeof(struct dev_pager);
+ dev_pager_zone = zinit(size,
+ (vm_size_t) size * 1000,
+ PAGE_SIZE,
+ FALSE,
+ "device pager structures");
+
+ /*
+ * Initialize the name port hashing stuff.
+ */
+ dev_pager_hash_init();
+}
diff --git a/device/device.srv b/device/device.srv
new file mode 100644
index 00000000..06aa0be3
--- /dev/null
+++ b/device/device.srv
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+simport <kern/compat_xxx_defs.h>; /* for obsolete routines */
+
+#include <device/device.defs>
diff --git a/device/device_init.c b/device/device_init.c
new file mode 100644
index 00000000..4eef63d6
--- /dev/null
+++ b/device/device_init.c
@@ -0,0 +1,73 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ *
+ * Initialize device service as part of kernel task.
+ */
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+#include <kern/task.h>
+
+#include <device/device_types.h>
+#include <device/device_port.h>
+
+
+
+extern void ds_init();
+extern void dev_lookup_init();
+extern void net_io_init();
+extern void device_pager_init();
+extern void chario_init(void);
+#ifdef FIPC
+extern void fipc_init();
+#endif
+
+extern void io_done_thread();
+extern void net_thread();
+
+ipc_port_t master_device_port;
+
+void
+device_service_create()
+{
+ master_device_port = ipc_port_alloc_kernel();
+ if (master_device_port == IP_NULL)
+ panic("can't allocate master device port");
+
+ ds_init();
+ dev_lookup_init();
+ net_io_init();
+ device_pager_init();
+ chario_init();
+#ifdef FIPC
+ fipc_init();
+#endif
+
+ (void) kernel_thread(kernel_task, io_done_thread, 0);
+ (void) kernel_thread(kernel_task, net_thread, 0);
+}
diff --git a/device/device_pager.srv b/device/device_pager.srv
new file mode 100644
index 00000000..e4e52eac
--- /dev/null
+++ b/device/device_pager.srv
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#define memory_object device_pager
+
+/*
+ * Rename all of the functions in the pager interface, to avoid
+ * confusing them with the kernel interface.
+ */
+#define memory_object_init device_pager_init_pager
+#define memory_object_terminate device_pager_terminate
+#define memory_object_copy device_pager_copy
+#define memory_object_data_request device_pager_data_request
+#define memory_object_data_unlock device_pager_data_unlock
+#define memory_object_data_write device_pager_data_write
+#define memory_object_lock_completed device_pager_lock_completed
+#define memory_object_supply_completed device_pager_supply_completed
+#define memory_object_data_return device_pager_data_return
+#define memory_object_change_completed device_pager_change_completed
+
+#include <mach/memory_object.defs>
diff --git a/device/device_port.h b/device/device_port.h
new file mode 100644
index 00000000..980a7095
--- /dev/null
+++ b/device/device_port.h
@@ -0,0 +1,41 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ */
+
+#ifndef _DEVICE_DEVICE_PORT_H_
+#define _DEVICE_DEVICE_PORT_H_
+
+#include <ipc/ipc_port.h>
+
+/*
+ * Master privileged port for this host's device service
+ */
+extern ipc_port_t master_device_port;
+
+#endif _DEVICE_DEVICE_PORT_H_
diff --git a/device/device_reply.cli b/device/device_reply.cli
new file mode 100644
index 00000000..956540c4
--- /dev/null
+++ b/device/device_reply.cli
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a client presentation file. */
+
+#define KERNEL_USER 1
+
+#include <device/device_reply.defs>
diff --git a/device/device_types_kernel.h b/device/device_types_kernel.h
new file mode 100644
index 00000000..dd7b63e8
--- /dev/null
+++ b/device/device_types_kernel.h
@@ -0,0 +1,44 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ */
+
+#ifndef _DEVICE_DEVICE_TYPES_KERNEL_H_
+#define _DEVICE_DEVICE_TYPES_KERNEL_H_
+
+/*
+ * Kernel-only type definitions for device server.
+ */
+
+#include <mach/port.h>
+#include <device/dev_hdr.h>
+
+extern device_t dev_port_lookup(/* struct ipc_port * */);
+extern struct ipc_port *convert_device_to_port(/* device_t */);
+
+#endif _DEVICE_DEVICE_TYPES_KERNEL_H_
diff --git a/device/dk_label.c b/device/dk_label.c
new file mode 100644
index 00000000..8639e338
--- /dev/null
+++ b/device/dk_label.c
@@ -0,0 +1,98 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: rz_disk.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ */
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <device/device_types.h>
+#include <device/disk_status.h>
+
+/* Checksum a disk label */
+unsigned
+dkcksum(lp)
+ struct disklabel *lp;
+{
+ register unsigned short *start, *end, sum = 0;
+
+ start = (unsigned short *)lp;
+ end = (unsigned short*)&lp->d_partitions[lp->d_npartitions];
+ while (start < end) sum ^= *start++;
+ return sum;
+}
+
+/* Perform some checks and then copy a disk label */
+setdisklabel(lp, nlp)
+ struct disklabel *lp, *nlp;
+{
+ if (nlp->d_magic != DISKMAGIC || nlp->d_magic2 != DISKMAGIC ||
+ (dkcksum(nlp) != 0))
+ return D_INVALID_OPERATION;
+ *lp = *nlp;
+ return D_SUCCESS;
+}
+
+dkgetlabel(lp, flavor, data, count)
+ struct disklabel *lp;
+ int flavor;
+ int * data; /* pointer to OUT array */
+ unsigned int *count; /* OUT */
+{
+
+ switch (flavor) {
+ /* get */
+ case DIOCGDINFO:
+ *(struct disklabel *)data = *lp;
+ *count = sizeof(struct disklabel)/sizeof(int);
+ break;
+ case DIOCGDINFO - (0x10<<16):
+ *(struct disklabel *)data = *lp;
+ *count = sizeof(struct disklabel)/sizeof(int) - 4;
+ break;
+ }
+}
+
+print_bsd_label(lp, str)
+struct disklabel *lp;
+char *str;
+{
+int i;
+ printf("%s sectors %d, tracks %d, cylinders %d\n",
+ str, lp->d_nsectors, lp->d_ntracks, lp->d_ncylinders);
+ printf("%s secpercyl %d, secperunit %d, npartitions %d\n",
+ str, lp->d_secpercyl, lp->d_secperunit, lp->d_npartitions);
+
+ for (i = 0; i < lp->d_npartitions; i++) {
+ printf("%s %c: size = %d, offset = %d\n",
+ str, 'a'+i,
+ lp->d_partitions[i].p_size,
+ lp->d_partitions[i].p_offset);
+ }
+}
diff --git a/device/ds_routines.c b/device/ds_routines.c
new file mode 100644
index 00000000..9880f7ec
--- /dev/null
+++ b/device/ds_routines.c
@@ -0,0 +1,1820 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/89
+ */
+
+#include <norma_device.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/mig_errors.h>
+#include <mach/port.h>
+#include <mach/vm_param.h>
+#include <mach/notify.h>
+#include <machine/machspl.h> /* spl definitions */
+
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <kern/ast.h>
+#include <kern/counters.h>
+#include <kern/queue.h>
+#include <kern/zalloc.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <kern/sched_prim.h>
+
+#include <vm/memory_object.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+
+#include <device/device_types.h>
+#include <device/dev_hdr.h>
+#include <device/conf.h>
+#include <device/io_req.h>
+#include <device/ds_routines.h>
+#include <device/net_status.h>
+#include <device/device_port.h>
+#include "device_reply.h"
+
+#include <machine/machspl.h>
+
+#ifdef i386
+#include <i386at/device_emul.h>
+#endif
+
+#ifdef i386
+ipc_port_t
+mach_convert_device_to_port (void *d)
+{
+ ipc_port_t port;
+ mach_device_t device = d;
+
+ if (! device)
+ return IP_NULL;
+ device_lock(device);
+ if (device->state == DEV_STATE_OPEN)
+ port = ipc_port_make_send(device->port);
+ else
+ port = IP_NULL;
+ device_unlock(device);
+ mach_device_deallocate(device);
+ return port;
+}
+#endif /* i386 */
+
+#ifdef i386
+static io_return_t
+device_open(reply_port, reply_port_type, mode, name, device_p)
+#else
+io_return_t
+ds_device_open(open_port, reply_port, reply_port_type,
+ mode, name, device_p)
+ ipc_port_t open_port;
+#endif
+ ipc_port_t reply_port;
+ mach_msg_type_name_t reply_port_type;
+ dev_mode_t mode;
+ char * name;
+ device_t *device_p; /* out */
+{
+ register mach_device_t device;
+ register kern_return_t result;
+ register io_req_t ior;
+ char namebuf[64];
+ ipc_port_t notify;
+
+#ifndef i386
+ /*
+ * Open must be called on the master device port.
+ */
+ if (open_port != master_device_port)
+ return (D_INVALID_OPERATION);
+
+ /*
+ * There must be a reply port.
+ */
+ if (!IP_VALID(reply_port)) {
+ printf("ds_* invalid reply port\n");
+ Debugger("ds_* reply_port");
+ return (MIG_NO_REPLY); /* no sense in doing anything */
+ }
+
+#if NORMA_DEVICE
+ /*
+ * Map global device name to <node> + local device name.
+ */
+ if (name[0] != '<') {
+ extern char *dev_forward_name();
+
+ name = dev_forward_name(name, namebuf, sizeof(namebuf));
+ }
+ /*
+ * Look for explicit node specifier, e.g., <2>sd0a.
+ * If found, then forward request to correct device server.
+ * If not found, then remove '<n>' and process locally.
+ *
+ * XXX should handle send-right reply_port as well as send-once XXX
+ */
+ if (name[0] == '<') {
+ char *n;
+ int node = 0;
+
+ for (n = &name[1]; *n != '>'; n++) {
+ if (*n >= '0' && *n <= '9') {
+ node = 10 * node + (*n - '0');
+ } else {
+ return (D_NO_SUCH_DEVICE);
+ }
+ }
+ if (node == node_self()) {
+ name = &n[1]; /* skip trailing '>' */
+ } else {
+ forward_device_open_send(remote_device(node),
+ reply_port, mode, name);
+ return (MIG_NO_REPLY);
+ }
+ }
+#endif NORMA_DEVICE
+#endif /* ! i386 */
+
+ /*
+ * Find the device.
+ */
+ device = device_lookup(name);
+ if (device == MACH_DEVICE_NULL)
+ return (D_NO_SUCH_DEVICE);
+
+ /*
+ * If the device is being opened or closed,
+ * wait for that operation to finish.
+ */
+ device_lock(device);
+ while (device->state == DEV_STATE_OPENING ||
+ device->state == DEV_STATE_CLOSING) {
+ device->io_wait = TRUE;
+ thread_sleep((event_t)device, simple_lock_addr(device->lock), TRUE);
+ device_lock(device);
+ }
+
+ /*
+ * If the device is already open, increment the open count
+ * and return.
+ */
+ if (device->state == DEV_STATE_OPEN) {
+
+ if (device->flag & D_EXCL_OPEN) {
+ /*
+ * Cannot open a second time.
+ */
+ device_unlock(device);
+ mach_device_deallocate(device);
+ return (D_ALREADY_OPEN);
+ }
+
+ device->open_count++;
+ device_unlock(device);
+#ifdef i386
+ *device_p = &device->dev;
+#else
+ *device_p = device;
+#endif
+ return (D_SUCCESS);
+ /*
+ * Return deallocates device reference while acquiring
+ * port.
+ */
+ }
+
+ /*
+ * Allocate the device port and register the device before
+ * opening it.
+ */
+ device->state = DEV_STATE_OPENING;
+ device_unlock(device);
+
+ /*
+ * Allocate port, keeping a reference for it.
+ */
+ device->port = ipc_port_alloc_kernel();
+ if (device->port == IP_NULL) {
+ device_lock(device);
+ device->state = DEV_STATE_INIT;
+ device->port = IP_NULL;
+ if (device->io_wait) {
+ device->io_wait = FALSE;
+ thread_wakeup((event_t)device);
+ }
+ device_unlock(device);
+ mach_device_deallocate(device);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+
+ dev_port_enter(device);
+
+ /*
+ * Request no-senders notifications on device port.
+ */
+ notify = ipc_port_make_sonce(device->port);
+ ip_lock(device->port);
+ ipc_port_nsrequest(device->port, 1, notify, &notify);
+ assert(notify == IP_NULL);
+
+ /*
+ * Open the device.
+ */
+ io_req_alloc(ior, 0);
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_OPEN | IO_CALL;
+ ior->io_mode = mode;
+ ior->io_error = 0;
+ ior->io_done = ds_open_done;
+ ior->io_reply_port = reply_port;
+ ior->io_reply_port_type = reply_port_type;
+
+ result = (*device->dev_ops->d_open)(device->dev_number, (int)mode, ior);
+ if (result == D_IO_QUEUED)
+ return (MIG_NO_REPLY);
+
+ /*
+ * Return result via ds_open_done.
+ */
+ ior->io_error = result;
+ (void) ds_open_done(ior);
+
+ io_req_free(ior);
+
+ return (MIG_NO_REPLY); /* reply already sent */
+}
+
+boolean_t
+ds_open_done(ior)
+ register io_req_t ior;
+{
+ kern_return_t result;
+ register mach_device_t device;
+
+ device = ior->io_device;
+ result = ior->io_error;
+
+ if (result != D_SUCCESS) {
+ /*
+ * Open failed. Deallocate port and device.
+ */
+ dev_port_remove(device);
+ ipc_port_dealloc_kernel(device->port);
+ device->port = IP_NULL;
+
+ device_lock(device);
+ device->state = DEV_STATE_INIT;
+ if (device->io_wait) {
+ device->io_wait = FALSE;
+ thread_wakeup((event_t)device);
+ }
+ device_unlock(device);
+
+ mach_device_deallocate(device);
+ device = MACH_DEVICE_NULL;
+ }
+ else {
+ /*
+ * Open succeeded.
+ */
+ device_lock(device);
+ device->state = DEV_STATE_OPEN;
+ device->open_count = 1;
+ if (device->io_wait) {
+ device->io_wait = FALSE;
+ thread_wakeup((event_t)device);
+ }
+ device_unlock(device);
+
+ /* donate device reference to get port */
+ }
+ /*
+ * Must explicitly convert device to port, since
+ * device_reply interface is built as 'user' side
+ * (thus cannot get translation).
+ */
+ if (IP_VALID(ior->io_reply_port)) {
+ (void) ds_device_open_reply(ior->io_reply_port,
+ ior->io_reply_port_type,
+ result,
+#ifdef i386
+ (mach_convert_device_to_port
+ (device)));
+#else
+ convert_device_to_port(device));
+#endif
+ } else
+ mach_device_deallocate(device);
+
+ return (TRUE);
+}
+
+#ifdef i386
+static io_return_t
+device_close(device)
+#else
+io_return_t
+ds_device_close(device)
+#endif
+ register mach_device_t device;
+{
+#ifndef i386
+ if (device == DEVICE_NULL)
+ return (D_NO_SUCH_DEVICE);
+#endif
+
+ device_lock(device);
+
+ /*
+ * If device will remain open, do nothing.
+ */
+ if (--device->open_count > 0) {
+ device_unlock(device);
+ return (D_SUCCESS);
+ }
+
+ /*
+ * If device is being closed, do nothing.
+ */
+ if (device->state == DEV_STATE_CLOSING) {
+ device_unlock(device);
+ return (D_SUCCESS);
+ }
+
+ /*
+ * Mark device as closing, to prevent new IO.
+ * Outstanding IO will still be in progress.
+ */
+ device->state = DEV_STATE_CLOSING;
+ device_unlock(device);
+
+ /*
+ * ? wait for IO to end ?
+ * only if device wants to
+ */
+
+ /*
+ * Remove the device-port association.
+ */
+ dev_port_remove(device);
+ ipc_port_dealloc_kernel(device->port);
+
+ /*
+ * Close the device
+ */
+ (*device->dev_ops->d_close)(device->dev_number);
+
+ /*
+ * Finally mark it closed. If someone else is trying
+ * to open it, the open can now proceed.
+ */
+ device_lock(device);
+ device->state = DEV_STATE_INIT;
+ if (device->io_wait) {
+ device->io_wait = FALSE;
+ thread_wakeup((event_t)device);
+ }
+ device_unlock(device);
+
+ return (D_SUCCESS);
+}
+
+/*
+ * Write to a device.
+ */
+#ifdef i386
+static io_return_t
+device_write(d, reply_port, reply_port_type, mode, recnum,
+ data, data_count, bytes_written)
+ void *d;
+#else
+io_return_t
+ds_device_write(device, reply_port, reply_port_type, mode, recnum,
+ data, data_count, bytes_written)
+ register mach_device_t device;
+#endif
+ ipc_port_t reply_port;
+ mach_msg_type_name_t reply_port_type;
+ dev_mode_t mode;
+ recnum_t recnum;
+ io_buf_ptr_t data;
+ unsigned int data_count;
+ int *bytes_written; /* out */
+{
+#ifdef i386
+ register mach_device_t device = d;
+#endif
+ register io_req_t ior;
+ register io_return_t result;
+
+#ifndef i386
+ /*
+ * Refuse if device is dead or not completely open.
+ */
+ if (device == DEVICE_NULL)
+ return (D_NO_SUCH_DEVICE);
+#endif
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+#ifndef i386
+ if (data == 0)
+ return (D_INVALID_SIZE);
+#endif
+
+ /*
+ * XXX Need logic to reject ridiculously big requests.
+ */
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ /*
+ * Package the write request for the device driver
+ */
+ io_req_alloc(ior, data_count);
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_WRITE | IO_CALL;
+ ior->io_mode = mode;
+ ior->io_recnum = recnum;
+ ior->io_data = data;
+ ior->io_count = data_count;
+ ior->io_total = data_count;
+ ior->io_alloc_size = 0;
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = ds_write_done;
+ ior->io_reply_port = reply_port;
+ ior->io_reply_port_type = reply_port_type;
+ ior->io_copy = VM_MAP_COPY_NULL;
+
+ /*
+ * The ior keeps an extra reference for the device.
+ */
+ mach_device_reference(device);
+
+ /*
+ * And do the write ...
+ *
+ * device_write_dealoc returns false if there's more
+ * to do; it has updated the ior appropriately and expects
+ * its caller to reinvoke it on the device.
+ */
+
+ do {
+
+ result = (*device->dev_ops->d_write)(device->dev_number, ior);
+
+ /*
+ * If the IO was queued, delay reply until it is finished.
+ */
+ if (result == D_IO_QUEUED)
+ return (MIG_NO_REPLY);
+
+ /*
+ * Discard the local mapping of the data.
+ */
+
+ } while (!device_write_dealloc(ior));
+
+ /*
+ * Return the number of bytes actually written.
+ */
+ *bytes_written = ior->io_total - ior->io_residual;
+
+ /*
+ * Remove the extra reference.
+ */
+ mach_device_deallocate(device);
+
+ io_req_free(ior);
+ return (result);
+}
+
+/*
+ * Write to a device, but memory is in message.
+ */
+#ifdef i386
+static io_return_t
+device_write_inband(d, reply_port, reply_port_type, mode, recnum,
+ data, data_count, bytes_written)
+ void *d;
+#else
+io_return_t
+ds_device_write_inband(device, reply_port, reply_port_type, mode, recnum,
+ data, data_count, bytes_written)
+ register mach_device_t device;
+#endif
+ ipc_port_t reply_port;
+ mach_msg_type_name_t reply_port_type;
+ dev_mode_t mode;
+ recnum_t recnum;
+ io_buf_ptr_inband_t data;
+ unsigned int data_count;
+ int *bytes_written; /* out */
+{
+#ifdef i386
+ register mach_device_t device = d;
+#endif
+ register io_req_t ior;
+ register io_return_t result;
+
+#ifndef i386
+ /*
+ * Refuse if device is dead or not completely open.
+ */
+ if (device == DEVICE_NULL)
+ return (D_NO_SUCH_DEVICE);
+#endif
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+#ifndef i386
+ if (data == 0)
+ return (D_INVALID_SIZE);
+#endif
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ /*
+ * Package the write request for the device driver.
+ */
+ io_req_alloc(ior, 0);
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_WRITE | IO_CALL | IO_INBAND;
+ ior->io_mode = mode;
+ ior->io_recnum = recnum;
+ ior->io_data = data;
+ ior->io_count = data_count;
+ ior->io_total = data_count;
+ ior->io_alloc_size = 0;
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = ds_write_done;
+ ior->io_reply_port = reply_port;
+ ior->io_reply_port_type = reply_port_type;
+
+ /*
+ * The ior keeps an extra reference for the device.
+ */
+ mach_device_reference(device);
+
+ /*
+ * And do the write.
+ */
+ result = (*device->dev_ops->d_write)(device->dev_number, ior);
+
+ /*
+ * If the IO was queued, delay reply until it is finished.
+ */
+ if (result == D_IO_QUEUED)
+ return (MIG_NO_REPLY);
+
+ /*
+ * Return the number of bytes actually written.
+ */
+ *bytes_written = ior->io_total - ior->io_residual;
+
+ /*
+ * Remove the extra reference.
+ */
+ mach_device_deallocate(device);
+
+ io_req_free(ior);
+ return (result);
+}
+
+/*
+ * Wire down incoming memory to give to device.
+ */
+kern_return_t
+device_write_get(ior, wait)
+ register io_req_t ior;
+ boolean_t *wait;
+{
+ vm_map_copy_t io_copy;
+ vm_offset_t new_addr;
+ register kern_return_t result;
+ int bsize;
+ vm_size_t min_size;
+
+ /*
+ * By default, caller does not have to wait.
+ */
+ *wait = FALSE;
+
+ /*
+ * Nothing to do if no data.
+ */
+ if (ior->io_count == 0)
+ return (KERN_SUCCESS);
+
+ /*
+ * Loaned iors already have valid data.
+ */
+ if (ior->io_op & IO_LOANED)
+ return (KERN_SUCCESS);
+
+ /*
+ * Inband case.
+ */
+ if (ior->io_op & IO_INBAND) {
+ assert(ior->io_count <= sizeof (io_buf_ptr_inband_t));
+ new_addr = zalloc(io_inband_zone);
+ bcopy((void*)ior->io_data, (void*)new_addr, ior->io_count);
+ ior->io_data = (io_buf_ptr_t)new_addr;
+ ior->io_alloc_size = sizeof (io_buf_ptr_inband_t);
+
+ return (KERN_SUCCESS);
+ }
+
+ /*
+ * Figure out how much data to move this time. If the device
+ * won't return a block size, then we have to do the whole
+ * request in one shot (ditto if this is a block fragment),
+ * otherwise, move at least one block's worth.
+ */
+ result = (*ior->io_device->dev_ops->d_dev_info)(
+ ior->io_device->dev_number,
+ D_INFO_BLOCK_SIZE,
+ &bsize);
+
+ if (result != KERN_SUCCESS || ior->io_count < (vm_size_t) bsize)
+ min_size = (vm_size_t) ior->io_count;
+ else
+ min_size = (vm_size_t) bsize;
+
+ /*
+ * Map the pages from this page list into memory.
+ * io_data records location of data.
+ * io_alloc_size is the vm size of the region to deallocate.
+ */
+ io_copy = (vm_map_copy_t) ior->io_data;
+ result = kmem_io_map_copyout(device_io_map,
+ (vm_offset_t*)&ior->io_data, &new_addr,
+ &ior->io_alloc_size, io_copy, min_size);
+ if (result != KERN_SUCCESS)
+ return (result);
+
+ if ((ior->io_data + ior->io_count) >
+ (((char *)new_addr) + ior->io_alloc_size)) {
+
+ /*
+ * Operation has to be split. Reset io_count for how
+ * much we can do this time.
+ */
+ assert(vm_map_copy_has_cont(io_copy));
+ assert(ior->io_count == io_copy->size);
+ ior->io_count = ior->io_alloc_size -
+ (ior->io_data - ((char *)new_addr));
+
+ /*
+ * Caller must wait synchronously.
+ */
+ ior->io_op &= ~IO_CALL;
+ *wait = TRUE;
+ }
+
+ ior->io_copy = io_copy; /* vm_map_copy to discard */
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Clean up memory allocated for IO.
+ */
+boolean_t
+device_write_dealloc(ior)
+ register io_req_t ior;
+{
+ vm_map_copy_t new_copy = VM_MAP_COPY_NULL;
+ register
+ vm_map_copy_t io_copy;
+ kern_return_t result;
+ vm_offset_t size_to_do;
+ int bsize;
+
+ if (ior->io_alloc_size == 0)
+ return (TRUE);
+
+ /*
+ * Inband case.
+ */
+ if (ior->io_op & IO_INBAND) {
+ zfree(io_inband_zone, (vm_offset_t)ior->io_data);
+
+ return (TRUE);
+ }
+
+ if ((io_copy = ior->io_copy) == VM_MAP_COPY_NULL)
+ return (TRUE);
+
+ /*
+ * To prevent a possible deadlock with the default pager,
+ * we have to release space in the device_io_map before
+ * we allocate any memory. (Which vm_map_copy_invoke_cont
+ * might do.) See the discussion in ds_init.
+ */
+
+ kmem_io_map_deallocate(device_io_map,
+ trunc_page(ior->io_data),
+ (vm_size_t) ior->io_alloc_size);
+
+ if (vm_map_copy_has_cont(io_copy)) {
+
+ /*
+ * Remember how much is left, then
+ * invoke or abort the continuation.
+ */
+ size_to_do = io_copy->size - ior->io_count;
+ if (ior->io_error == 0) {
+ vm_map_copy_invoke_cont(io_copy, &new_copy, &result);
+ }
+ else {
+ vm_map_copy_abort_cont(io_copy);
+ result = KERN_FAILURE;
+ }
+
+ if (result == KERN_SUCCESS && new_copy != VM_MAP_COPY_NULL) {
+ register int res;
+
+ /*
+ * We have a new continuation, reset the ior to
+ * represent the remainder of the request. Must
+ * adjust the recnum because drivers assume
+ * that the residual is zero.
+ */
+ ior->io_op &= ~IO_DONE;
+ ior->io_op |= IO_CALL;
+
+ res = (*ior->io_device->dev_ops->d_dev_info)(
+ ior->io_device->dev_number,
+ D_INFO_BLOCK_SIZE,
+ &bsize);
+
+ if (res != D_SUCCESS)
+ panic("device_write_dealloc: No block size");
+
+ ior->io_recnum += ior->io_count/bsize;
+ ior->io_count = new_copy->size;
+ }
+ else {
+
+ /*
+ * No continuation. Add amount we didn't get
+ * to into residual.
+ */
+ ior->io_residual += size_to_do;
+ }
+ }
+
+ /*
+ * Clean up the state for the IO that just completed.
+ */
+ vm_map_copy_discard(ior->io_copy);
+ ior->io_copy = VM_MAP_COPY_NULL;
+ ior->io_data = (char *) new_copy;
+
+ /*
+ * Return FALSE if there's more IO to do.
+ */
+
+ return(new_copy == VM_MAP_COPY_NULL);
+}
+
+/*
+ * Send write completion message to client, and discard the data.
+ */
+boolean_t
+ds_write_done(ior)
+ register io_req_t ior;
+{
+ /*
+ * device_write_dealloc discards the data that has been
+ * written, but may decide that there is more to write.
+ */
+ while (!device_write_dealloc(ior)) {
+ register io_return_t result;
+ register mach_device_t device;
+
+ /*
+ * More IO to do -- invoke it.
+ */
+ device = ior->io_device;
+ result = (*device->dev_ops->d_write)(device->dev_number, ior);
+
+ /*
+ * If the IO was queued, return FALSE -- not done yet.
+ */
+ if (result == D_IO_QUEUED)
+ return (FALSE);
+ }
+
+ /*
+ * Now the write is really complete. Send reply.
+ */
+
+ if (IP_VALID(ior->io_reply_port)) {
+ (void) (*((ior->io_op & IO_INBAND) ?
+ ds_device_write_reply_inband :
+ ds_device_write_reply))(ior->io_reply_port,
+ ior->io_reply_port_type,
+ ior->io_error,
+ (int) (ior->io_total -
+ ior->io_residual));
+ }
+ mach_device_deallocate(ior->io_device);
+
+ return (TRUE);
+}
+
+/*
+ * Read from a device.
+ */
+#ifdef i386
+static io_return_t
+device_read(d, reply_port, reply_port_type, mode, recnum,
+ bytes_wanted, data, data_count)
+ void *d;
+#else
+io_return_t
+ds_device_read(device, reply_port, reply_port_type, mode, recnum,
+ bytes_wanted, data, data_count)
+ register mach_device_t device;
+#endif
+ ipc_port_t reply_port;
+ mach_msg_type_name_t reply_port_type;
+ dev_mode_t mode;
+ recnum_t recnum;
+ int bytes_wanted;
+ io_buf_ptr_t *data; /* out */
+ unsigned int *data_count; /* out */
+{
+#ifdef i386
+ register mach_device_t device = d;
+#endif
+ register io_req_t ior;
+ register io_return_t result;
+
+#ifdef lint
+ *data = *data;
+ *data_count = *data_count;
+#endif lint
+
+#ifndef i386
+ /*
+ * Refuse if device is dead or not completely open.
+ */
+ if (device == DEVICE_NULL)
+ return (D_NO_SUCH_DEVICE);
+#endif
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ /*
+ * There must be a reply port.
+ */
+ if (!IP_VALID(reply_port)) {
+ printf("ds_* invalid reply port\n");
+ Debugger("ds_* reply_port");
+ return (MIG_NO_REPLY); /* no sense in doing anything */
+ }
+
+ /*
+ * Package the read request for the device driver
+ */
+ io_req_alloc(ior, 0);
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_READ | IO_CALL;
+ ior->io_mode = mode;
+ ior->io_recnum = recnum;
+ ior->io_data = 0; /* driver must allocate data */
+ ior->io_count = bytes_wanted;
+ ior->io_alloc_size = 0; /* no data allocated yet */
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = ds_read_done;
+ ior->io_reply_port = reply_port;
+ ior->io_reply_port_type = reply_port_type;
+
+ /*
+ * The ior keeps an extra reference for the device.
+ */
+ mach_device_reference(device);
+
+ /*
+ * And do the read.
+ */
+ result = (*device->dev_ops->d_read)(device->dev_number, ior);
+
+ /*
+ * If the IO was queued, delay reply until it is finished.
+ */
+ if (result == D_IO_QUEUED)
+ return (MIG_NO_REPLY);
+
+ /*
+ * Return result via ds_read_done.
+ */
+ ior->io_error = result;
+ (void) ds_read_done(ior);
+ io_req_free(ior);
+
+ return (MIG_NO_REPLY); /* reply has already been sent. */
+}
+
+/*
+ * Read from a device, but return the data 'inband.'
+ */
+#ifdef i386
+static io_return_t
+device_read_inband(d, reply_port, reply_port_type, mode, recnum,
+ bytes_wanted, data, data_count)
+ void *d;
+#else
+io_return_t
+ds_device_read_inband(device, reply_port, reply_port_type, mode, recnum,
+ bytes_wanted, data, data_count)
+ register mach_device_t device;
+#endif
+ ipc_port_t reply_port;
+ mach_msg_type_name_t reply_port_type;
+ dev_mode_t mode;
+ recnum_t recnum;
+ int bytes_wanted;
+ char *data; /* pointer to OUT array */
+ unsigned int *data_count; /* out */
+{
+#ifdef i386
+ register mach_device_t device = d;
+#endif
+ register io_req_t ior;
+ register io_return_t result;
+
+#ifdef lint
+ *data = *data;
+ *data_count = *data_count;
+#endif lint
+
+#ifndef i386
+ /*
+ * Refuse if device is dead or not completely open.
+ */
+ if (device == DEVICE_NULL)
+ return (D_NO_SUCH_DEVICE);
+#endif
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ /*
+ * There must be a reply port.
+ */
+ if (!IP_VALID(reply_port)) {
+ printf("ds_* invalid reply port\n");
+ Debugger("ds_* reply_port");
+ return (MIG_NO_REPLY); /* no sense in doing anything */
+ }
+
+ /*
+ * Package the read for the device driver
+ */
+ io_req_alloc(ior, 0);
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_READ | IO_CALL | IO_INBAND;
+ ior->io_mode = mode;
+ ior->io_recnum = recnum;
+ ior->io_data = 0; /* driver must allocate data */
+ ior->io_count =
+ ((bytes_wanted < sizeof(io_buf_ptr_inband_t)) ?
+ bytes_wanted : sizeof(io_buf_ptr_inband_t));
+ ior->io_alloc_size = 0; /* no data allocated yet */
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = ds_read_done;
+ ior->io_reply_port = reply_port;
+ ior->io_reply_port_type = reply_port_type;
+
+ /*
+ * The ior keeps an extra reference for the device.
+ */
+ mach_device_reference(device);
+
+ /*
+ * Do the read.
+ */
+ result = (*device->dev_ops->d_read)(device->dev_number, ior);
+
+ /*
+ * If the io was queued, delay reply until it is finished.
+ */
+ if (result == D_IO_QUEUED)
+ return (MIG_NO_REPLY);
+
+ /*
+ * Return result, via ds_read_done.
+ */
+ ior->io_error = result;
+ (void) ds_read_done(ior);
+ io_req_free(ior);
+
+ return (MIG_NO_REPLY); /* reply has already been sent. */
+}
+
+
+/*
+ * Allocate wired-down memory for device read.
+ */
+kern_return_t device_read_alloc(ior, size)
+ register io_req_t ior;
+ register vm_size_t size;
+{
+ vm_offset_t addr;
+ kern_return_t kr;
+
+ /*
+ * Nothing to do if no data.
+ */
+ if (ior->io_count == 0)
+ return (KERN_SUCCESS);
+
+ if (ior->io_op & IO_INBAND) {
+ ior->io_data = (io_buf_ptr_t) zalloc(io_inband_zone);
+ ior->io_alloc_size = sizeof(io_buf_ptr_inband_t);
+ } else {
+ size = round_page(size);
+ kr = kmem_alloc(kernel_map, &addr, size);
+ if (kr != KERN_SUCCESS)
+ return (kr);
+
+ ior->io_data = (io_buf_ptr_t) addr;
+ ior->io_alloc_size = size;
+ }
+
+ return (KERN_SUCCESS);
+}
+
+boolean_t ds_read_done(ior)
+ io_req_t ior;
+{
+ vm_offset_t start_data, end_data;
+ vm_offset_t start_sent, end_sent;
+ register vm_size_t size_read;
+
+ if (ior->io_error)
+ size_read = 0;
+ else
+ size_read = ior->io_count - ior->io_residual;
+
+ start_data = (vm_offset_t)ior->io_data;
+ end_data = start_data + size_read;
+
+ start_sent = (ior->io_op & IO_INBAND) ? start_data :
+ trunc_page(start_data);
+ end_sent = (ior->io_op & IO_INBAND) ?
+ start_data + ior->io_alloc_size : round_page(end_data);
+
+ /*
+ * Zero memory that the device did not fill.
+ */
+ if (start_sent < start_data)
+ bzero((char *)start_sent, start_data - start_sent);
+ if (end_sent > end_data)
+ bzero((char *)end_data, end_sent - end_data);
+
+
+ /*
+ * Touch the data being returned, to mark it dirty.
+ * If the pages were filled by DMA, the pmap module
+ * may think that they are clean.
+ */
+ {
+ register vm_offset_t touch;
+ register int c;
+
+ for (touch = start_sent; touch < end_sent; touch += PAGE_SIZE) {
+ c = *(char *)touch;
+ *(char *)touch = c;
+ }
+ }
+
+ /*
+ * Send the data to the reply port - this
+ * unwires and deallocates it.
+ */
+ if (ior->io_op & IO_INBAND) {
+ (void)ds_device_read_reply_inband(ior->io_reply_port,
+ ior->io_reply_port_type,
+ ior->io_error,
+ (char *) start_data,
+ size_read);
+ } else {
+ vm_map_copy_t copy;
+ kern_return_t kr;
+
+ kr = vm_map_copyin_page_list(kernel_map, start_data,
+ size_read, TRUE, TRUE,
+ &copy, FALSE);
+
+ if (kr != KERN_SUCCESS)
+ panic("read_done: vm_map_copyin_page_list failed");
+
+ (void)ds_device_read_reply(ior->io_reply_port,
+ ior->io_reply_port_type,
+ ior->io_error,
+ (char *) copy,
+ size_read);
+ }
+
+ /*
+ * Free any memory that was allocated but not sent.
+ */
+ if (ior->io_count != 0) {
+ if (ior->io_op & IO_INBAND) {
+ if (ior->io_alloc_size > 0)
+ zfree(io_inband_zone, (vm_offset_t)ior->io_data);
+ } else {
+ register vm_offset_t end_alloc;
+
+ end_alloc = start_sent + round_page(ior->io_alloc_size);
+ if (end_alloc > end_sent)
+ (void) vm_deallocate(kernel_map,
+ end_sent,
+ end_alloc - end_sent);
+ }
+ }
+
+ mach_device_deallocate(ior->io_device);
+
+ return (TRUE);
+}
+
+#ifdef i386
+static io_return_t
+device_set_status(d, flavor, status, status_count)
+ void *d;
+#else
+io_return_t
+ds_device_set_status(device, flavor, status, status_count)
+ register mach_device_t device;
+#endif
+ dev_flavor_t flavor;
+ dev_status_t status;
+ mach_msg_type_number_t status_count;
+{
+#ifdef i386
+ register mach_device_t device = d;
+#endif
+
+#ifndef i386
+ /*
+ * Refuse if device is dead or not completely open.
+ */
+ if (device == DEVICE_NULL)
+ return (D_NO_SUCH_DEVICE);
+#endif
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ return ((*device->dev_ops->d_setstat)(device->dev_number,
+ flavor,
+ status,
+ status_count));
+}
+
+#ifdef i386
+io_return_t
+mach_device_get_status(d, flavor, status, status_count)
+ void *d;
+#else
+io_return_t
+ds_device_get_status(device, flavor, status, status_count)
+ register mach_device_t device;
+#endif
+ dev_flavor_t flavor;
+ dev_status_t status; /* pointer to OUT array */
+ mach_msg_type_number_t *status_count; /* out */
+{
+#ifdef i386
+ register mach_device_t device = d;
+#endif
+
+#ifndef i386
+ /*
+ * Refuse if device is dead or not completely open.
+ */
+ if (device == DEVICE_NULL)
+ return (D_NO_SUCH_DEVICE);
+#endif
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ return ((*device->dev_ops->d_getstat)(device->dev_number,
+ flavor,
+ status,
+ status_count));
+}
+
+#ifdef i386
+static io_return_t
+device_set_filter(d, receive_port, priority, filter, filter_count)
+ void *d;
+#else
+io_return_t
+ds_device_set_filter(device, receive_port, priority, filter, filter_count)
+ register mach_device_t device;
+#endif
+ ipc_port_t receive_port;
+ int priority;
+ filter_t filter[]; /* pointer to IN array */
+ unsigned int filter_count;
+{
+#ifdef i386
+ register mach_device_t device = d;
+#endif
+
+#ifndef i386
+ /*
+ * Refuse if device is dead or not completely open.
+ */
+ if (device == DEVICE_NULL)
+ return (D_NO_SUCH_DEVICE);
+#endif
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ /*
+ * Request is absurd if no receive port is specified.
+ */
+ if (!IP_VALID(receive_port))
+ return (D_INVALID_OPERATION);
+
+ return ((*device->dev_ops->d_async_in)(device->dev_number,
+ receive_port,
+ priority,
+ filter,
+ filter_count));
+}
+
+#ifdef i386
+static io_return_t
+device_map(d, protection, offset, size, pager, unmap)
+ void *d;
+#else
+io_return_t
+ds_device_map(device, protection, offset, size, pager, unmap)
+ register mach_device_t device;
+#endif
+ vm_prot_t protection;
+ vm_offset_t offset;
+ vm_size_t size;
+ ipc_port_t *pager; /* out */
+ boolean_t unmap; /* ? */
+{
+#ifdef i386
+ register mach_device_t device = d;
+#endif
+
+#ifdef lint
+ unmap = unmap;
+#endif lint
+ if (protection & ~VM_PROT_ALL)
+ return (KERN_INVALID_ARGUMENT);
+
+#ifndef i386
+ /*
+ * Refuse if device is dead or not completely open.
+ */
+ if (device == DEVICE_NULL)
+ return (D_NO_SUCH_DEVICE);
+#endif
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ return (device_pager_setup(device, protection, offset, size,
+ (mach_port_t*)pager));
+}
+
+/*
+ * Doesn't do anything (yet).
+ */
+#ifdef i386
+static void
+#else
+void
+#endif
+ds_no_senders(notification)
+ mach_no_senders_notification_t *notification;
+{
+ printf("ds_no_senders called! device_port=0x%x count=%d\n",
+ notification->not_header.msgh_remote_port,
+ notification->not_count);
+}
+
+#ifndef i386
+boolean_t
+ds_notify(msg)
+ mach_msg_header_t *msg;
+{
+ switch (msg->msgh_id) {
+ case MACH_NOTIFY_NO_SENDERS:
+ ds_no_senders((mach_no_senders_notification_t *) msg);
+ return TRUE;
+
+ default:
+ printf("ds_notify: strange notification %d\n", msg->msgh_id);
+ return FALSE;
+ }
+}
+#endif
+
+queue_head_t io_done_list;
+decl_simple_lock_data(, io_done_list_lock)
+
+#define splio splsched /* XXX must block ALL io devices */
+
+void iodone(ior)
+ register io_req_t ior;
+{
+ register spl_t s;
+
+ /*
+ * If this ior was loaned to us, return it directly.
+ */
+ if (ior->io_op & IO_LOANED) {
+ (*ior->io_done)(ior);
+ return;
+ }
+ /*
+ * If !IO_CALL, some thread is waiting for this. Must lock
+ * structure to interlock correctly with iowait(). Else can
+ * toss on queue for io_done thread to call completion.
+ */
+ s = splio();
+ if ((ior->io_op & IO_CALL) == 0) {
+ ior_lock(ior);
+ ior->io_op |= IO_DONE;
+ ior->io_op &= ~IO_WANTED;
+ ior_unlock(ior);
+ thread_wakeup((event_t)ior);
+ } else {
+ ior->io_op |= IO_DONE;
+ simple_lock(&io_done_list_lock);
+ enqueue_tail(&io_done_list, (queue_entry_t)ior);
+ thread_wakeup((event_t)&io_done_list);
+ simple_unlock(&io_done_list_lock);
+ }
+ splx(s);
+}
+
+void io_done_thread_continue()
+{
+ for (;;) {
+ register spl_t s;
+ register io_req_t ior;
+
+#if defined (i386) && defined (LINUX_DEV)
+ free_skbuffs ();
+#endif
+ s = splio();
+ simple_lock(&io_done_list_lock);
+ while ((ior = (io_req_t)dequeue_head(&io_done_list)) != 0) {
+ simple_unlock(&io_done_list_lock);
+ (void) splx(s);
+
+ if ((*ior->io_done)(ior)) {
+ /*
+ * IO done - free io_req_elt
+ */
+ io_req_free(ior);
+ }
+ /* else routine has re-queued it somewhere */
+
+ s = splio();
+ simple_lock(&io_done_list_lock);
+ }
+
+ assert_wait(&io_done_list, FALSE);
+ simple_unlock(&io_done_list_lock);
+ (void) splx(s);
+ counter(c_io_done_thread_block++);
+ thread_block(io_done_thread_continue);
+ }
+}
+
+void io_done_thread()
+{
+ /*
+ * Set thread privileges and highest priority.
+ */
+ current_thread()->vm_privilege = TRUE;
+ stack_privilege(current_thread());
+ thread_set_own_priority(0);
+
+ io_done_thread_continue();
+ /*NOTREACHED*/
+}
+
+#define DEVICE_IO_MAP_SIZE (2 * 1024 * 1024)
+
+extern void ds_trap_init(void); /* forward */
+
+void ds_init()
+{
+ vm_offset_t device_io_min, device_io_max;
+
+ queue_init(&io_done_list);
+ simple_lock_init(&io_done_list_lock);
+
+ device_io_map = kmem_suballoc(kernel_map,
+ &device_io_min,
+ &device_io_max,
+ DEVICE_IO_MAP_SIZE,
+ FALSE);
+ /*
+ * If the kernel receives many device_write requests, the
+ * device_io_map might run out of space. To prevent
+ * device_write_get from failing in this case, we enable
+ * wait_for_space on the map. This causes kmem_io_map_copyout
+ * to block until there is sufficient space.
+ * (XXX Large writes may be starved by small writes.)
+ *
+ * There is a potential deadlock problem with this solution,
+ * if a device_write from the default pager has to wait
+ * for the completion of a device_write which needs to wait
+ * for memory allocation. Hence, once device_write_get
+ * allocates space in device_io_map, no blocking memory
+ * allocations should happen until device_write_dealloc
+ * frees the space. (XXX A large write might starve
+ * a small write from the default pager.)
+ */
+ device_io_map->wait_for_space = TRUE;
+
+ io_inband_zone = zinit(sizeof(io_buf_ptr_inband_t),
+ 1000 * sizeof(io_buf_ptr_inband_t),
+ 10 * sizeof(io_buf_ptr_inband_t),
+ FALSE,
+ "io inband read buffers");
+
+ ds_trap_init();
+}
+
+void iowait(ior)
+ io_req_t ior;
+{
+ spl_t s;
+
+ s = splio();
+ ior_lock(ior);
+ while ((ior->io_op&IO_DONE)==0) {
+ assert_wait((event_t)ior, FALSE);
+ ior_unlock(ior);
+ thread_block((void (*)()) 0);
+ ior_lock(ior);
+ }
+ ior_unlock(ior);
+ splx(s);
+}
+
+
+/*
+ * Device trap support.
+ */
+
+/*
+ * Memory Management
+ *
+ * This currently has a single pool of 2k wired buffers
+ * since we only handle writes to an ethernet device.
+ * Should be more general.
+ */
+#define IOTRAP_REQSIZE 2048
+
+zone_t io_trap_zone;
+
+/*
+ * Initialization. Called from ds_init().
+ */
+void
+ds_trap_init(void)
+{
+ io_trap_zone = zinit(IOTRAP_REQSIZE,
+ 256 * IOTRAP_REQSIZE,
+ 16 * IOTRAP_REQSIZE,
+ FALSE,
+ "wired device trap buffers");
+}
+
+/*
+ * Allocate an io_req_t.
+ * Currently zalloc's from io_trap_zone.
+ *
+ * Could have lists of different size zones.
+ * Could call a device-specific routine.
+ */
+io_req_t
+ds_trap_req_alloc(mach_device_t device, vm_size_t data_size)
+{
+ return (io_req_t) zalloc(io_trap_zone);
+}
+
+/*
+ * Called by iodone to release ior.
+ */
+boolean_t
+ds_trap_write_done(io_req_t ior)
+{
+ register mach_device_t dev;
+
+ dev = ior->io_device;
+
+ /*
+ * Should look at reply port and maybe send a message.
+ */
+ zfree(io_trap_zone, ior);
+
+ /*
+ * Give up device reference from ds_write_trap.
+ */
+ mach_device_deallocate(dev);
+ return TRUE;
+}
+
+/*
+ * Like device_write except that data is in user space.
+ */
+#ifdef i386
+static io_return_t
+device_write_trap (void *d, dev_mode_t mode,
+ recnum_t recnum, vm_offset_t data, vm_size_t data_count)
+#else
+io_return_t
+ds_device_write_trap(device_t device,
+ dev_mode_t mode,
+ recnum_t recnum,
+ vm_offset_t data,
+ vm_size_t data_count)
+#endif
+{
+#ifdef i386
+ mach_device_t device = d;
+#endif
+ io_req_t ior;
+ io_return_t result;
+
+#ifndef i386
+ /*
+ * Refuse if device is dead or not completely open.
+ */
+ if (device == DEVICE_NULL)
+ return (D_NO_SUCH_DEVICE);
+#endif
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ /*
+ * Get a buffer to hold the ioreq.
+ */
+ ior = ds_trap_req_alloc(device, data_count);
+
+ /*
+ * Package the write request for the device driver.
+ */
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_WRITE | IO_CALL | IO_LOANED;
+ ior->io_mode = mode;
+ ior->io_recnum = recnum;
+ ior->io_data = (io_buf_ptr_t)
+ (vm_offset_t)ior + sizeof(struct io_req);
+ ior->io_count = data_count;
+ ior->io_total = data_count;
+ ior->io_alloc_size = 0;
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = ds_trap_write_done;
+ ior->io_reply_port = IP_NULL; /* XXX */
+ ior->io_reply_port_type = 0; /* XXX */
+
+ /*
+ * Copy the data from user space.
+ */
+ if (data_count > 0)
+ copyin((char *)data, (char *)ior->io_data, data_count);
+
+ /*
+ * The ior keeps an extra reference for the device.
+ */
+ mach_device_reference(device);
+
+ /*
+ * And do the write.
+ */
+ result = (*device->dev_ops->d_write)(device->dev_number, ior);
+
+ /*
+ * If the IO was queued, delay reply until it is finished.
+ */
+ if (result == D_IO_QUEUED)
+ return (MIG_NO_REPLY);
+
+ /*
+ * Remove the extra reference.
+ */
+ mach_device_deallocate(device);
+
+ zfree(io_trap_zone, ior);
+ return (result);
+}
+
+#ifdef i386
+static io_return_t
+device_writev_trap (void *d, dev_mode_t mode,
+ recnum_t recnum, io_buf_vec_t *iovec, vm_size_t iocount)
+#else
+io_return_t
+ds_device_writev_trap(device_t device,
+ dev_mode_t mode,
+ recnum_t recnum,
+ io_buf_vec_t *iovec,
+ vm_size_t iocount)
+#endif
+{
+#ifdef i386
+ mach_device_t device = d;
+#endif
+ io_req_t ior;
+ io_return_t result;
+ io_buf_vec_t stack_iovec[16]; /* XXX */
+ vm_size_t data_count;
+ int i;
+
+#ifndef i386
+ /*
+ * Refuse if device is dead or not completely open.
+ */
+ if (device == DEVICE_NULL)
+ return (D_NO_SUCH_DEVICE);
+#endif
+
+ if (device->state != DEV_STATE_OPEN)
+ return (D_NO_SUCH_DEVICE);
+
+ /* XXX note that a CLOSE may proceed at any point */
+
+ /*
+ * Copyin user addresses.
+ */
+ if (iocount > 16)
+ return KERN_INVALID_VALUE; /* lame */
+ copyin((char *)iovec,
+ (char *)stack_iovec,
+ iocount * sizeof(io_buf_vec_t));
+ for (data_count = 0, i = 0; i < iocount; i++)
+ data_count += stack_iovec[i].count;
+
+ /*
+ * Get a buffer to hold the ioreq.
+ */
+ ior = ds_trap_req_alloc(device, data_count);
+
+ /*
+ * Package the write request for the device driver.
+ */
+
+ ior->io_device = device;
+ ior->io_unit = device->dev_number;
+ ior->io_op = IO_WRITE | IO_CALL | IO_LOANED;
+ ior->io_mode = mode;
+ ior->io_recnum = recnum;
+ ior->io_data = (io_buf_ptr_t)
+ (vm_offset_t)ior + sizeof(struct io_req);
+ ior->io_count = data_count;
+ ior->io_total = data_count;
+ ior->io_alloc_size = 0;
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = ds_trap_write_done;
+ ior->io_reply_port = IP_NULL; /* XXX */
+ ior->io_reply_port_type = 0; /* XXX */
+
+ /*
+ * Copy the data from user space.
+ */
+ if (data_count > 0) {
+ vm_offset_t p;
+
+ p = (vm_offset_t) ior->io_data;
+ for (i = 0; i < iocount; i++) {
+ copyin((char *) stack_iovec[i].data,
+ (char *) p,
+ stack_iovec[i].count);
+ p += stack_iovec[i].count;
+ }
+ }
+
+ /*
+ * The ior keeps an extra reference for the device.
+ */
+ mach_device_reference(device);
+
+ /*
+ * And do the write.
+ */
+ result = (*device->dev_ops->d_write)(device->dev_number, ior);
+
+ /*
+ * If the IO was queued, delay reply until it is finished.
+ */
+ if (result == D_IO_QUEUED)
+ return (MIG_NO_REPLY);
+
+ /*
+ * Remove the extra reference.
+ */
+ mach_device_deallocate(device);
+
+ zfree(io_trap_zone, ior);
+ return (result);
+}
+
+#ifdef i386
+struct device_emulation_ops mach_device_emulation_ops =
+{
+ mach_device_reference,
+ mach_device_deallocate,
+ mach_convert_device_to_port,
+ device_open,
+ device_close,
+ device_write,
+ device_write_inband,
+ device_read,
+ device_read_inband,
+ device_set_status,
+ mach_device_get_status,
+ device_set_filter,
+ device_map,
+ ds_no_senders,
+ device_write_trap,
+ device_writev_trap
+};
+#endif
diff --git a/device/ds_routines.h b/device/ds_routines.h
new file mode 100644
index 00000000..fff21fbe
--- /dev/null
+++ b/device/ds_routines.h
@@ -0,0 +1,52 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ *
+ * Device service utility routines.
+ */
+
+#ifndef DS_ROUTINES_H
+#define DS_ROUTINES_H
+
+#include <vm/vm_map.h>
+#include <device/device_types.h>
+
+/*
+ * Map for device IO memory.
+ */
+vm_map_t device_io_map;
+
+kern_return_t device_read_alloc();
+kern_return_t device_write_get();
+boolean_t device_write_dealloc();
+
+boolean_t ds_open_done();
+boolean_t ds_read_done();
+boolean_t ds_write_done();
+
+#endif DS_ROUTINES_H
diff --git a/device/errno.h b/device/errno.h
new file mode 100644
index 00000000..35e31e5e
--- /dev/null
+++ b/device/errno.h
@@ -0,0 +1,45 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ *
+ * Old names for new error codes, for compatibility.
+ */
+
+#ifndef _ERRNO_
+#define _ERRNO_
+
+#include <device/device_types.h> /* the real error names */
+
+#define EIO D_IO_ERROR
+#define ENXIO D_NO_SUCH_DEVICE
+#define EINVAL D_INVALID_SIZE /* XXX */
+#define EBUSY D_ALREADY_OPEN
+#define ENOTTY D_INVALID_OPERATION
+#define ENOMEM D_NO_MEMORY
+
+#endif _ERRNO_
diff --git a/device/if_ether.h b/device/if_ether.h
new file mode 100644
index 00000000..da83fcfe
--- /dev/null
+++ b/device/if_ether.h
@@ -0,0 +1,57 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Ethernet definitions.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ */
+
+#ifndef _DEVICE_IF_ETHER_H_
+#define _DEVICE_IF_ETHER_H_
+
+#include <sys/types.h>
+
+/*
+ * Structure of a 10Mb/s Ethernet header.
+ */
+struct ether_header {
+ u_char ether_dhost[6];
+ u_char ether_shost[6];
+ u_short ether_type;
+};
+
+#define ETHERMTU 1500
+#define ETHERMIN (60-14)
+
+#ifdef KERNEL
+u_char etherbroadcastaddr[6];
+
+extern char * ether_sprintf();
+#endif KERNEL
+
+#endif /*_DEVICE_IF_ETHER_H_*/
diff --git a/device/if_hdr.h b/device/if_hdr.h
new file mode 100644
index 00000000..64defb71
--- /dev/null
+++ b/device/if_hdr.h
@@ -0,0 +1,150 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Taken from (bsd)net/if.h. Modified for MACH kernel.
+ */
+/*
+ * Copyright (c) 1982, 1986 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the above copyright notice and this paragraph are
+ * duplicated in all such forms and that any documentation,
+ * advertising materials, and other materials related to such
+ * distribution and use acknowledge that the software was developed
+ * by the University of California, Berkeley. The name of the
+ * University may not be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * @(#)if.h 7.3 (Berkeley) 6/27/88
+ */
+
+#ifndef _IF_HDR_
+#define _IF_HDR_
+
+#include <kern/lock.h>
+#include <kern/queue.h>
+
+/*
+ * Queue for network output and filter input.
+ */
+struct ifqueue {
+ queue_head_t ifq_head; /* queue of io_req_t */
+ int ifq_len; /* length of queue */
+ int ifq_maxlen; /* maximum length of queue */
+ int ifq_drops; /* number of packets dropped
+ because queue full */
+ decl_simple_lock_data(,
+ ifq_lock) /* lock for queue and counters */
+};
+
+/*
+ * Header for network interface drivers.
+ */
+struct ifnet {
+ short if_unit; /* unit number */
+ short if_flags; /* up/down, broadcast, etc. */
+ short if_timer; /* time until if_watchdog called */
+ short if_mtu; /* maximum transmission unit */
+ short if_header_size; /* length of header */
+ short if_header_format; /* format of hardware header */
+ short if_address_size; /* length of hardware address */
+ short if_alloc_size; /* size of read buffer to allocate */
+ char *if_address; /* pointer to hardware address */
+ struct ifqueue if_snd; /* output queue */
+ queue_head_t if_rcv_port_list; /* input filter list */
+ decl_simple_lock_data(,
+ if_rcv_port_list_lock) /* lock for filter list */
+/* statistics */
+ int if_ipackets; /* packets received */
+ int if_ierrors; /* input errors */
+ int if_opackets; /* packets sent */
+ int if_oerrors; /* output errors */
+ int if_collisions; /* collisions on csma interfaces */
+ int if_rcvdrops; /* packets received but dropped */
+};
+
+#define IFF_UP 0x0001 /* interface is up */
+#define IFF_BROADCAST 0x0002 /* interface can broadcast */
+#define IFF_DEBUG 0x0004 /* turn on debugging */
+#define IFF_LOOPBACK 0x0008 /* is a loopback net */
+#define IFF_POINTOPOINT 0x0010 /* point-to-point link */
+#define IFF_RUNNING 0x0040 /* resources allocated */
+#define IFF_NOARP 0x0080 /* no address resolution protocol */
+#define IFF_PROMISC 0x0100 /* receive all packets */
+#define IFF_ALLMULTI 0x0200 /* receive all multicast packets */
+#define IFF_BRIDGE 0x0100 /* support token ring routing field */
+#define IFF_SNAP 0x0200 /* support extended sap header */
+
+/* internal flags only: */
+#define IFF_CANTCHANGE (IFF_BROADCAST | IFF_POINTOPOINT | IFF_RUNNING)
+
+/*
+ * Output queues (ifp->if_snd)
+ * have queues of messages stored on ifqueue structures. Entries
+ * are added to and deleted from these structures by these macros, which
+ * should be called with ipl raised to splimp().
+ * XXX locking XXX
+ */
+
+#define IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen)
+#define IF_DROP(ifq) ((ifq)->ifq_drops++)
+#define IF_ENQUEUE(ifq, ior) { \
+ simple_lock(&(ifq)->ifq_lock); \
+ enqueue_tail(&(ifq)->ifq_head, (queue_entry_t)ior); \
+ (ifq)->ifq_len++; \
+ simple_unlock(&(ifq)->ifq_lock); \
+}
+#define IF_PREPEND(ifq, ior) { \
+ simple_lock(&(ifq)->ifq_lock); \
+ enqueue_head(&(ifq)->ifq_head, (queue_entry_t)ior); \
+ (ifq)->ifq_len++; \
+ simple_unlock(&(ifq)->ifq_lock); \
+}
+
+#define IF_DEQUEUE(ifq, ior) { \
+ simple_lock(&(ifq)->ifq_lock); \
+ if (((ior) = (io_req_t)dequeue_head(&(ifq)->ifq_head)) != 0) \
+ (ifq)->ifq_len--; \
+ simple_unlock(&(ifq)->ifq_lock); \
+}
+
+#define IFQ_MAXLEN 50
+
+#define IFQ_INIT(ifq) { \
+ queue_init(&(ifq)->ifq_head); \
+ simple_lock_init(&(ifq)->ifq_lock); \
+ (ifq)->ifq_len = 0; \
+ (ifq)->ifq_maxlen = IFQ_MAXLEN; \
+ (ifq)->ifq_drops = 0; \
+}
+
+#define IFNET_SLOWHZ 1 /* granularity is 1 second */
+
+#endif _IF_HDR_
diff --git a/device/io_req.h b/device/io_req.h
new file mode 100644
index 00000000..76e55b69
--- /dev/null
+++ b/device/io_req.h
@@ -0,0 +1,141 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 10/88
+ */
+
+#ifndef _IO_REQ_
+#define _IO_REQ_
+
+#include <mach/boolean.h>
+#include <mach/port.h>
+#include <mach/message.h>
+#include <mach/vm_param.h>
+#include <kern/kalloc.h>
+#include <kern/lock.h>
+#include <vm/vm_page.h>
+#include <device/device_types.h>
+#include <device/dev_hdr.h>
+
+#include <kern/macro_help.h>
+
+/*
+ * IO request element, queued on device for delayed replies.
+ */
+struct io_req {
+ struct io_req * io_next; /* next, ... */
+ struct io_req * io_prev; /* prev pointers: link in done,
+ defered, or in-progress list */
+ mach_device_t io_device; /* pointer to open-device structure */
+ char * io_dev_ptr; /* pointer to driver structure -
+ filled in by driver if necessary */
+ int io_unit; /* unit number ('minor') of device */
+ int io_op; /* IO operation */
+ dev_mode_t io_mode; /* operation mode (wait, truncate) */
+ recnum_t io_recnum; /* starting record number for
+ random-access devices */
+
+ union io_un {
+ io_buf_ptr_t data; /* data, for IO requests */
+ } io_un;
+#define io_data io_un.data
+
+ long io_count; /* amount requested */
+ long io_alloc_size; /* amount allocated */
+ long io_residual; /* amount NOT done */
+ io_return_t io_error; /* error code */
+ boolean_t (*io_done)(); /* call when done - returns TRUE
+ if IO really finished */
+ struct ipc_port *io_reply_port; /* reply port, for asynchronous
+ messages */
+ mach_msg_type_name_t io_reply_port_type;
+ /* send or send-once right? */
+ struct io_req * io_link; /* forward link (for driver header) */
+ struct io_req * io_rlink; /* reverse link (for driver header) */
+ vm_map_copy_t io_copy; /* vm_map_copy obj. for this op. */
+ long io_total; /* total op size, for write */
+ decl_simple_lock_data(,io_req_lock)
+ /* Lock for this structure */
+ long io_physrec; /* mapping to the physical block
+ number */
+ long io_rectotal; /* total number of blocks to move */
+};
+typedef struct io_req * io_req_t;
+
+/*
+ * LOCKING NOTE: Operations on io_req's are in general single threaded by
+ * the invoking code, obviating the need for a lock. The usual IO_CALL
+ * path through the code is: Initiating thread hands io_req to device driver,
+ * driver passes it to io_done thread, io_done thread sends reply message. No
+ * locking is needed in this sequence. Unfortunately, a synchronous wait
+ * for a buffer requires a lock to avoid problems if the wait and interrupt
+ * happen simultaneously on different processors.
+ */
+
+#define ior_lock(ior) simple_lock(&(ior)->io_req_lock)
+#define ior_unlock(ior) simple_unlock(&(ior)->io_req_lock)
+
+/*
+ * Flags and operations
+ */
+
+#define IO_WRITE 0x00000000 /* operation is write */
+#define IO_READ 0x00000001 /* operation is read */
+#define IO_OPEN 0x00000002 /* operation is open */
+#define IO_DONE 0x00000100 /* operation complete */
+#define IO_ERROR 0x00000200 /* error on operation */
+#define IO_BUSY 0x00000400 /* operation in progress */
+#define IO_WANTED 0x00000800 /* wakeup when no longer BUSY */
+#define IO_BAD 0x00001000 /* bad disk block */
+#define IO_CALL 0x00002000 /* call io_done_thread when done */
+#define IO_INBAND 0x00004000 /* mig call was inband */
+#define IO_INTERNAL 0x00008000 /* internal, device-driver specific */
+#define IO_LOANED 0x00010000 /* ior loaned by another module */
+
+#define IO_SPARE_START 0x00020000 /* start of spare flags */
+
+/*
+ * Standard completion routine for io_requests.
+ */
+void iodone(/* io_req_t */);
+
+/*
+ * Macros to allocate and free IORs - will convert to zones later.
+ */
+#define io_req_alloc(ior,size) \
+ MACRO_BEGIN \
+ (ior) = (io_req_t)kalloc(sizeof(struct io_req)); \
+ simple_lock_init(&(ior)->io_req_lock); \
+ MACRO_END
+
+#define io_req_free(ior) \
+ (kfree((vm_offset_t)(ior), sizeof(struct io_req)))
+
+
+zone_t io_inband_zone; /* for inband reads */
+
+#endif _IO_REQ_
diff --git a/device/memory_object_reply.cli b/device/memory_object_reply.cli
new file mode 100644
index 00000000..f2cd4803
--- /dev/null
+++ b/device/memory_object_reply.cli
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a client presentation file. */
+
+#define KERNEL_USER 1
+
+#include <mach/mach.defs>
diff --git a/device/net_io.c b/device/net_io.c
new file mode 100644
index 00000000..7714ebe3
--- /dev/null
+++ b/device/net_io.c
@@ -0,0 +1,2168 @@
+ /*
+ * Mach Operating System
+ * Copyright (c) 1993-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/98
+ *
+ * Network IO.
+ *
+ * Packet filter code taken from vaxif/enet.c written
+ * CMU and Stanford.
+ */
+
+/*
+ * Note: don't depend on anything in this file.
+ * It may change a lot real soon. -cmaeda 11 June 1993
+ */
+
+#include <norma_ether.h>
+#include <mach_ttd.h>
+
+#include <sys/types.h>
+#include <device/net_status.h>
+#include <machine/machspl.h> /* spl definitions */
+#include <device/net_io.h>
+#include <device/if_hdr.h>
+#include <device/io_req.h>
+#include <device/ds_routines.h>
+
+#include <mach/boolean.h>
+#include <mach/vm_param.h>
+
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_mqueue.h>
+
+#include <kern/counters.h>
+#include <kern/lock.h>
+#include <kern/queue.h>
+#include <kern/sched_prim.h>
+#include <kern/thread.h>
+
+#if NORMA_ETHER
+#include <norma/ipc_ether.h>
+#endif /*NORMA_ETHER*/
+
+#include <machine/machspl.h>
+
+#if MACH_TTD
+#include <ttd/ttd_stub.h>
+#endif /* MACH_TTD */
+
+#if MACH_TTD
+int kttd_async_counter= 0;
+#endif /* MACH_TTD */
+
+
+/*
+ * Packet Buffer Management
+ *
+ * This module manages a private pool of kmsg buffers.
+ */
+
+/*
+ * List of net kmsgs queued to be sent to users.
+ * Messages can be high priority or low priority.
+ * The network thread processes high priority messages first.
+ */
+decl_simple_lock_data(,net_queue_lock)
+boolean_t net_thread_awake = FALSE;
+struct ipc_kmsg_queue net_queue_high;
+int net_queue_high_size = 0;
+int net_queue_high_max = 0; /* for debugging */
+struct ipc_kmsg_queue net_queue_low;
+int net_queue_low_size = 0;
+int net_queue_low_max = 0; /* for debugging */
+
+/*
+ * List of net kmsgs that can be touched at interrupt level.
+ * If it is empty, we will also steal low priority messages.
+ */
+decl_simple_lock_data(,net_queue_free_lock)
+struct ipc_kmsg_queue net_queue_free;
+int net_queue_free_size = 0; /* on free list */
+int net_queue_free_max = 0; /* for debugging */
+
+/*
+ * This value is critical to network performance.
+ * At least this many buffers should be sitting in net_queue_free.
+ * If this is set too small, we will drop network packets.
+ * Even a low drop rate (<1%) can cause severe network throughput problems.
+ * We add one to net_queue_free_min for every filter.
+ */
+int net_queue_free_min = 3;
+
+int net_queue_free_hits = 0; /* for debugging */
+int net_queue_free_steals = 0; /* for debugging */
+int net_queue_free_misses = 0; /* for debugging */
+
+int net_kmsg_send_high_hits = 0; /* for debugging */
+int net_kmsg_send_low_hits = 0; /* for debugging */
+int net_kmsg_send_high_misses = 0; /* for debugging */
+int net_kmsg_send_low_misses = 0; /* for debugging */
+
+int net_thread_awaken = 0; /* for debugging */
+int net_ast_taken = 0; /* for debugging */
+
+decl_simple_lock_data(,net_kmsg_total_lock)
+int net_kmsg_total = 0; /* total allocated */
+int net_kmsg_max; /* initialized below */
+
+vm_size_t net_kmsg_size; /* initialized below */
+
+/*
+ * We want more buffers when there aren't enough in the free queue
+ * and the low priority queue. However, we don't want to allocate
+ * more than net_kmsg_max.
+ */
+
+#define net_kmsg_want_more() \
+ (((net_queue_free_size + net_queue_low_size) < net_queue_free_min) && \
+ (net_kmsg_total < net_kmsg_max))
+
+ipc_kmsg_t
+net_kmsg_get(void)
+{
+ register ipc_kmsg_t kmsg;
+ spl_t s;
+
+ /*
+ * First check the list of free buffers.
+ */
+ s = splimp();
+ simple_lock(&net_queue_free_lock);
+ kmsg = ipc_kmsg_queue_first(&net_queue_free);
+ if (kmsg != IKM_NULL) {
+ ipc_kmsg_rmqueue_first_macro(&net_queue_free, kmsg);
+ net_queue_free_size--;
+ net_queue_free_hits++;
+ }
+ simple_unlock(&net_queue_free_lock);
+
+ if (kmsg == IKM_NULL) {
+ /*
+ * Try to steal from the low priority queue.
+ */
+ simple_lock(&net_queue_lock);
+ kmsg = ipc_kmsg_queue_first(&net_queue_low);
+ if (kmsg != IKM_NULL) {
+ ipc_kmsg_rmqueue_first_macro(&net_queue_low, kmsg);
+ net_queue_low_size--;
+ net_queue_free_steals++;
+ }
+ simple_unlock(&net_queue_lock);
+ }
+
+ if (kmsg == IKM_NULL)
+ net_queue_free_misses++;
+ (void) splx(s);
+
+ if (net_kmsg_want_more() || (kmsg == IKM_NULL)) {
+ boolean_t awake;
+
+ s = splimp();
+ simple_lock(&net_queue_lock);
+ awake = net_thread_awake;
+ net_thread_awake = TRUE;
+ simple_unlock(&net_queue_lock);
+ (void) splx(s);
+
+ if (!awake)
+ thread_wakeup((event_t) &net_thread_awake);
+ }
+
+ return kmsg;
+}
+
+void
+net_kmsg_put(register ipc_kmsg_t kmsg)
+{
+ spl_t s;
+
+ s = splimp();
+ simple_lock(&net_queue_free_lock);
+ ipc_kmsg_enqueue_macro(&net_queue_free, kmsg);
+ if (++net_queue_free_size > net_queue_free_max)
+ net_queue_free_max = net_queue_free_size;
+ simple_unlock(&net_queue_free_lock);
+ (void) splx(s);
+}
+
+void
+net_kmsg_collect(void)
+{
+ register ipc_kmsg_t kmsg;
+ spl_t s;
+
+ s = splimp();
+ simple_lock(&net_queue_free_lock);
+ while (net_queue_free_size > net_queue_free_min) {
+ kmsg = ipc_kmsg_dequeue(&net_queue_free);
+ net_queue_free_size--;
+ simple_unlock(&net_queue_free_lock);
+ (void) splx(s);
+
+ net_kmsg_free(kmsg);
+ simple_lock(&net_kmsg_total_lock);
+ net_kmsg_total--;
+ simple_unlock(&net_kmsg_total_lock);
+
+ s = splimp();
+ simple_lock(&net_queue_free_lock);
+ }
+ simple_unlock(&net_queue_free_lock);
+ (void) splx(s);
+}
+
+void
+net_kmsg_more(void)
+{
+ register ipc_kmsg_t kmsg;
+
+ /*
+ * Replenish net kmsg pool if low. We don't have the locks
+ * necessary to look at these variables, but that's OK because
+ * misread values aren't critical. The danger in this code is
+ * that while we allocate buffers, interrupts are happening
+ * which take buffers out of the free list. If we are not
+ * careful, we will sit in the loop and allocate a zillion
+ * buffers while a burst of packets arrives. So we count
+ * buffers in the low priority queue as available, because
+ * net_kmsg_get will make use of them, and we cap the total
+ * number of buffers we are willing to allocate.
+ */
+
+ while (net_kmsg_want_more()) {
+ simple_lock(&net_kmsg_total_lock);
+ net_kmsg_total++;
+ simple_unlock(&net_kmsg_total_lock);
+ kmsg = net_kmsg_alloc();
+ net_kmsg_put(kmsg);
+ }
+}
+
+/*
+ * Packet Filter Data Structures
+ *
+ * Each network interface has a set of packet filters
+ * that are run on incoming packets.
+ *
+ * Each packet filter may represent a single network
+ * session or multiple network sessions. For example,
+ * all application level TCP sessions would be represented
+ * by a single packet filter data structure.
+ *
+ * If a packet filter has a single session, we use a
+ * struct net_rcv_port to represent it. If the packet
+ * filter represents multiple sessions, we use a
+ * struct net_hash_header to represent it.
+ */
+
+/*
+ * Each interface has a write port and a set of read ports.
+ * Each read port has one or more filters to determine what packets
+ * should go to that port.
+ */
+
+/*
+ * Receive port for net, with packet filter.
+ * This data structure by itself represents a packet
+ * filter for a single session.
+ */
+struct net_rcv_port {
+ queue_chain_t chain; /* list of open_descriptors */
+ ipc_port_t rcv_port; /* port to send packet to */
+ int rcv_qlimit; /* port's qlimit */
+ int rcv_count; /* number of packets received */
+ int priority; /* priority for filter */
+ filter_t *filter_end; /* pointer to end of filter */
+ filter_t filter[NET_MAX_FILTER];
+ /* filter operations */
+};
+typedef struct net_rcv_port *net_rcv_port_t;
+
+zone_t net_rcv_zone; /* zone of net_rcv_port structs */
+
+
+#define NET_HASH_SIZE 256
+#define N_NET_HASH 4
+#define N_NET_HASH_KEYS 4
+
+unsigned int bpf_hash (int, unsigned int *);
+
+/*
+ * A single hash entry.
+ */
+struct net_hash_entry {
+ queue_chain_t chain; /* list of entries with same hval */
+#define he_next chain.next
+#define he_prev chain.prev
+ ipc_port_t rcv_port; /* destination port */
+ int rcv_qlimit; /* qlimit for the port */
+ unsigned int keys[N_NET_HASH_KEYS];
+};
+typedef struct net_hash_entry *net_hash_entry_t;
+
+zone_t net_hash_entry_zone;
+
+/*
+ * This structure represents a packet filter with multiple sessions.
+ *
+ * For example, all application level TCP sessions might be
+ * represented by one of these structures. It looks like a
+ * net_rcv_port struct so that both types can live on the
+ * same packet filter queues.
+ */
+struct net_hash_header {
+ struct net_rcv_port rcv;
+ int n_keys; /* zero if not used */
+ int ref_count; /* reference count */
+ net_hash_entry_t table[NET_HASH_SIZE];
+} filter_hash_header[N_NET_HASH];
+
+typedef struct net_hash_header *net_hash_header_t;
+
+decl_simple_lock_data(,net_hash_header_lock)
+
+#define HASH_ITERATE(head, elt) (elt) = (net_hash_entry_t) (head); do {
+#define HASH_ITERATE_END(head, elt) \
+ (elt) = (net_hash_entry_t) queue_next((queue_entry_t) (elt)); \
+ } while ((elt) != (head));
+
+
+#define FILTER_ITERATE(ifp, fp, nextfp) \
+ for ((fp) = (net_rcv_port_t) queue_first(&(ifp)->if_rcv_port_list);\
+ !queue_end(&(ifp)->if_rcv_port_list, (queue_entry_t)(fp)); \
+ (fp) = (nextfp)) { \
+ (nextfp) = (net_rcv_port_t) queue_next(&(fp)->chain);
+#define FILTER_ITERATE_END }
+
+/* entry_p must be net_rcv_port_t or net_hash_entry_t */
+#define ENQUEUE_DEAD(dead, entry_p) { \
+ queue_next(&(entry_p)->chain) = (queue_entry_t) (dead); \
+ (dead) = (queue_entry_t)(entry_p); \
+}
+
+extern boolean_t net_do_filter(); /* CSPF */
+extern int bpf_do_filter(); /* BPF */
+
+
+/*
+ * ethernet_priority:
+ *
+ * This function properly belongs in the ethernet interfaces;
+ * it should not be called by this module. (We get packet
+ * priorities as an argument to net_filter.) It is here
+ * to avoid massive code duplication.
+ *
+ * Returns TRUE for high-priority packets.
+ */
+
+boolean_t ethernet_priority(kmsg)
+ ipc_kmsg_t kmsg;
+{
+ register unsigned char *addr =
+ (unsigned char *) net_kmsg(kmsg)->header;
+
+ /*
+ * A simplistic check for broadcast packets.
+ */
+
+ if ((addr[0] == 0xff) && (addr[1] == 0xff) &&
+ (addr[2] == 0xff) && (addr[3] == 0xff) &&
+ (addr[4] == 0xff) && (addr[5] == 0xff))
+ return FALSE;
+ else
+ return TRUE;
+}
+
+mach_msg_type_t header_type = {
+ MACH_MSG_TYPE_BYTE,
+ 8,
+ NET_HDW_HDR_MAX,
+ TRUE,
+ FALSE,
+ FALSE,
+ 0
+};
+
+mach_msg_type_t packet_type = {
+ MACH_MSG_TYPE_BYTE, /* name */
+ 8, /* size */
+ 0, /* number */
+ TRUE, /* inline */
+ FALSE, /* longform */
+ FALSE /* deallocate */
+};
+
+/*
+ * net_deliver:
+ *
+ * Called and returns holding net_queue_lock, at splimp.
+ * Dequeues a message and delivers it at spl0.
+ * Returns FALSE if no messages.
+ */
+boolean_t net_deliver(nonblocking)
+ boolean_t nonblocking;
+{
+ register ipc_kmsg_t kmsg;
+ boolean_t high_priority;
+ struct ipc_kmsg_queue send_list;
+
+ /*
+ * Pick up a pending network message and deliver it.
+ * Deliver high priority messages before low priority.
+ */
+
+ if ((kmsg = ipc_kmsg_dequeue(&net_queue_high)) != IKM_NULL) {
+ net_queue_high_size--;
+ high_priority = TRUE;
+ } else if ((kmsg = ipc_kmsg_dequeue(&net_queue_low)) != IKM_NULL) {
+ net_queue_low_size--;
+ high_priority = FALSE;
+ } else
+ return FALSE;
+ simple_unlock(&net_queue_lock);
+ (void) spl0();
+
+ /*
+ * Run the packet through the filters,
+ * getting back a queue of packets to send.
+ */
+ net_filter(kmsg, &send_list);
+
+ if (!nonblocking) {
+ /*
+ * There is a danger of running out of available buffers
+ * because they all get moved into the high priority queue
+ * or a port queue. In particular, we might need to
+ * allocate more buffers as we pull (previously available)
+ * buffers out of the low priority queue. But we can only
+ * allocate if we are allowed to block.
+ */
+ net_kmsg_more();
+ }
+
+ while ((kmsg = ipc_kmsg_dequeue(&send_list)) != IKM_NULL) {
+ int count;
+
+ /*
+ * Fill in the rest of the kmsg.
+ */
+ count = net_kmsg(kmsg)->net_rcv_msg_packet_count;
+
+ ikm_init_special(kmsg, IKM_SIZE_NETWORK);
+
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0);
+ /* remember message sizes must be rounded up */
+ kmsg->ikm_header.msgh_size =
+ ((mach_msg_size_t) (sizeof(struct net_rcv_msg)
+ - NET_RCV_MAX + count))+3 &~ 3;
+ kmsg->ikm_header.msgh_local_port = MACH_PORT_NULL;
+ kmsg->ikm_header.msgh_kind = MACH_MSGH_KIND_NORMAL;
+ kmsg->ikm_header.msgh_id = NET_RCV_MSG_ID;
+
+ net_kmsg(kmsg)->header_type = header_type;
+ net_kmsg(kmsg)->packet_type = packet_type;
+ net_kmsg(kmsg)->net_rcv_msg_packet_count = count;
+
+ /*
+ * Send the packet to the destination port. Drop it
+ * if the destination port is over its backlog.
+ */
+
+ if (ipc_mqueue_send(kmsg, MACH_SEND_TIMEOUT, 0) ==
+ MACH_MSG_SUCCESS) {
+ if (high_priority)
+ net_kmsg_send_high_hits++;
+ else
+ net_kmsg_send_low_hits++;
+ /* the receiver is responsible for the message now */
+ } else {
+ if (high_priority)
+ net_kmsg_send_high_misses++;
+ else
+ net_kmsg_send_low_misses++;
+ ipc_kmsg_destroy(kmsg);
+ }
+ }
+
+ (void) splimp();
+ simple_lock(&net_queue_lock);
+ return TRUE;
+}
+
+/*
+ * We want to deliver packets using ASTs, so we can avoid the
+ * thread_wakeup/thread_block needed to get to the network
+ * thread. However, we can't allocate memory in the AST handler,
+ * because memory allocation might block. Hence we have the
+ * network thread to allocate memory. The network thread also
+ * delivers packets, so it can be allocating and delivering for a
+ * burst. net_thread_awake is protected by net_queue_lock
+ * (instead of net_queue_free_lock) so that net_packet and
+ * net_ast can safely determine if the network thread is running.
+ * This prevents a race that might leave a packet sitting without
+ * being delivered. It is possible for net_kmsg_get to think
+ * the network thread is awake, and so avoid a wakeup, and then
+ * have the network thread sleep without allocating. The next
+ * net_kmsg_get will do a wakeup.
+ */
+
+void net_ast()
+{
+ spl_t s;
+
+ net_ast_taken++;
+
+ /*
+ * If the network thread is awake, then we would
+ * rather deliver messages from it, because
+ * it can also allocate memory.
+ */
+
+ s = splimp();
+ simple_lock(&net_queue_lock);
+ while (!net_thread_awake && net_deliver(TRUE))
+ continue;
+
+ /*
+ * Prevent an unnecessary AST. Either the network
+ * thread will deliver the messages, or there are
+ * no messages left to deliver.
+ */
+
+ simple_unlock(&net_queue_lock);
+ (void) splsched();
+ ast_off(cpu_number(), AST_NETWORK);
+ (void) splx(s);
+}
+
+void net_thread_continue()
+{
+ for (;;) {
+ spl_t s;
+
+ net_thread_awaken++;
+
+ /*
+ * First get more buffers.
+ */
+ net_kmsg_more();
+
+ s = splimp();
+ simple_lock(&net_queue_lock);
+ while (net_deliver(FALSE))
+ continue;
+
+ net_thread_awake = FALSE;
+ assert_wait(&net_thread_awake, FALSE);
+ simple_unlock(&net_queue_lock);
+ (void) splx(s);
+ counter(c_net_thread_block++);
+ thread_block(net_thread_continue);
+ }
+}
+
+void net_thread()
+{
+ spl_t s;
+
+ /*
+ * We should be very high priority.
+ */
+
+ thread_set_own_priority(0);
+
+ /*
+ * We sleep initially, so that we don't allocate any buffers
+ * unless the network is really in use and they are needed.
+ */
+
+ s = splimp();
+ simple_lock(&net_queue_lock);
+ net_thread_awake = FALSE;
+ assert_wait(&net_thread_awake, FALSE);
+ simple_unlock(&net_queue_lock);
+ (void) splx(s);
+ counter(c_net_thread_block++);
+ thread_block(net_thread_continue);
+ net_thread_continue();
+ /*NOTREACHED*/
+}
+
+void
+reorder_queue(first, last)
+ register queue_t first, last;
+{
+ register queue_entry_t prev, next;
+
+ prev = first->prev;
+ next = last->next;
+
+ prev->next = last;
+ next->prev = first;
+
+ last->prev = prev;
+ last->next = first;
+
+ first->next = next;
+ first->prev = last;
+}
+
+/*
+ * Incoming packet. Header has already been moved to proper place.
+ * We are already at splimp.
+ */
+void
+net_packet(ifp, kmsg, count, priority)
+ register struct ifnet *ifp;
+ register ipc_kmsg_t kmsg;
+ unsigned int count;
+ boolean_t priority;
+{
+ boolean_t awake;
+
+#if NORMA_ETHER
+ if (netipc_net_packet(kmsg, count)) {
+ return;
+ }
+#endif NORMA_ETHER
+
+#if MACH_TTD
+ /*
+ * Do a quick check to see if it is a kernel TTD packet.
+ *
+ * Only check if KernelTTD is enabled, ie. the current
+ * device driver supports TTD, and the bootp succeded.
+ */
+ if (kttd_enabled && kttd_handle_async(kmsg)) {
+ /*
+ * Packet was a valid ttd packet and
+ * doesn't need to be passed up to filter.
+ * The ttd code put the used kmsg buffer
+ * back onto the free list.
+ */
+ if (kttd_debug)
+ printf("**%x**", kttd_async_counter++);
+ return;
+ }
+#endif /* MACH_TTD */
+
+ kmsg->ikm_header.msgh_remote_port = (mach_port_t) ifp;
+ net_kmsg(kmsg)->net_rcv_msg_packet_count = count;
+
+ simple_lock(&net_queue_lock);
+ if (priority) {
+ ipc_kmsg_enqueue(&net_queue_high, kmsg);
+ if (++net_queue_high_size > net_queue_high_max)
+ net_queue_high_max = net_queue_high_size;
+ } else {
+ ipc_kmsg_enqueue(&net_queue_low, kmsg);
+ if (++net_queue_low_size > net_queue_low_max)
+ net_queue_low_max = net_queue_low_size;
+ }
+ /*
+ * If the network thread is awake, then we don't
+ * need to take an AST, because the thread will
+ * deliver the packet.
+ */
+ awake = net_thread_awake;
+ simple_unlock(&net_queue_lock);
+
+ if (!awake) {
+ spl_t s = splsched();
+ ast_on(cpu_number(), AST_NETWORK);
+ (void) splx(s);
+ }
+}
+
+int net_filter_queue_reorder = 0; /* non-zero to enable reordering */
+
+/*
+ * Run a packet through the filters, returning a list of messages.
+ * We are *not* called at interrupt level.
+ */
+void
+net_filter(kmsg, send_list)
+ register ipc_kmsg_t kmsg;
+ ipc_kmsg_queue_t send_list;
+{
+ register struct ifnet *ifp;
+ register net_rcv_port_t infp, nextfp;
+ register ipc_kmsg_t new_kmsg;
+
+ net_hash_entry_t entp, *hash_headp;
+ ipc_port_t dest;
+ queue_entry_t dead_infp = (queue_entry_t) 0;
+ queue_entry_t dead_entp = (queue_entry_t) 0;
+ unsigned int ret_count;
+
+ int count = net_kmsg(kmsg)->net_rcv_msg_packet_count;
+ ifp = (struct ifnet *) kmsg->ikm_header.msgh_remote_port;
+ ipc_kmsg_queue_init(send_list);
+
+ /*
+ * Unfortunately we can't allocate or deallocate memory
+ * while holding this lock. And we can't drop the lock
+ * while examining the filter list.
+ */
+ simple_lock(&ifp->if_rcv_port_list_lock);
+ FILTER_ITERATE(ifp, infp, nextfp)
+ {
+ entp = (net_hash_entry_t) 0;
+ if (infp->filter[0] == NETF_BPF) {
+ ret_count = bpf_do_filter(infp, net_kmsg(kmsg)->packet, count,
+ net_kmsg(kmsg)->header,
+ &hash_headp, &entp);
+ if (entp == (net_hash_entry_t) 0)
+ dest = infp->rcv_port;
+ else
+ dest = entp->rcv_port;
+ } else {
+ ret_count = net_do_filter(infp, net_kmsg(kmsg)->packet, count,
+ net_kmsg(kmsg)->header);
+ if (ret_count)
+ ret_count = count;
+ dest = infp->rcv_port;
+ }
+
+ if (ret_count) {
+
+ /*
+ * Make a send right for the destination.
+ */
+
+ dest = ipc_port_copy_send(dest);
+ if (!IP_VALID(dest)) {
+ /*
+ * This filter is dead. We remove it from the
+ * filter list and set it aside for deallocation.
+ */
+
+ if (entp == (net_hash_entry_t) 0) {
+ queue_remove(&ifp->if_rcv_port_list, infp,
+ net_rcv_port_t, chain);
+ ENQUEUE_DEAD(dead_infp, infp);
+ continue;
+ } else {
+ hash_ent_remove (ifp,
+ (net_hash_header_t)infp,
+ FALSE, /* no longer used */
+ hash_headp,
+ entp,
+ &dead_entp);
+ continue;
+ }
+ }
+
+ /*
+ * Deliver copy of packet to this channel.
+ */
+ if (ipc_kmsg_queue_empty(send_list)) {
+ /*
+ * Only receiver, so far
+ */
+ new_kmsg = kmsg;
+ } else {
+ /*
+ * Other receivers - must allocate message and copy.
+ */
+ new_kmsg = net_kmsg_get();
+ if (new_kmsg == IKM_NULL) {
+ ipc_port_release_send(dest);
+ break;
+ }
+
+ bcopy(
+ net_kmsg(kmsg)->packet,
+ net_kmsg(new_kmsg)->packet,
+ ret_count);
+ bcopy(
+ net_kmsg(kmsg)->header,
+ net_kmsg(new_kmsg)->header,
+ NET_HDW_HDR_MAX);
+ }
+ net_kmsg(new_kmsg)->net_rcv_msg_packet_count = ret_count;
+ new_kmsg->ikm_header.msgh_remote_port = (mach_port_t) dest;
+ ipc_kmsg_enqueue(send_list, new_kmsg);
+
+ {
+ register net_rcv_port_t prevfp;
+ int rcount = ++infp->rcv_count;
+
+ /*
+ * See if ordering of filters is wrong
+ */
+ if (infp->priority >= NET_HI_PRI) {
+ prevfp = (net_rcv_port_t) queue_prev(&infp->chain);
+ /*
+ * If infp is not the first element on the queue,
+ * and the previous element is at equal priority
+ * but has a lower count, then promote infp to
+ * be in front of prevfp.
+ */
+ if ((queue_t)prevfp != &ifp->if_rcv_port_list &&
+ infp->priority == prevfp->priority) {
+ /*
+ * Threshold difference to prevent thrashing
+ */
+ if (net_filter_queue_reorder
+ && (100 + prevfp->rcv_count < rcount))
+ reorder_queue(&prevfp->chain, &infp->chain);
+ }
+ /*
+ * High-priority filter -> no more deliveries
+ */
+ break;
+ }
+ }
+ }
+ }
+ FILTER_ITERATE_END
+
+ simple_unlock(&ifp->if_rcv_port_list_lock);
+
+ /*
+ * Deallocate dead filters.
+ */
+ if (dead_infp != 0)
+ net_free_dead_infp(dead_infp);
+ if (dead_entp != 0)
+ net_free_dead_entp(dead_entp);
+
+ if (ipc_kmsg_queue_empty(send_list)) {
+ /* Not sent - recycle */
+ net_kmsg_put(kmsg);
+ }
+}
+
+boolean_t
+net_do_filter(infp, data, data_count, header)
+ net_rcv_port_t infp;
+ char * data;
+ unsigned int data_count;
+ char * header;
+{
+ int stack[NET_FILTER_STACK_DEPTH+1];
+ register int *sp;
+ register filter_t *fp, *fpe;
+ register unsigned int op, arg;
+
+ /*
+ * The filter accesses the header and data
+ * as unsigned short words.
+ */
+ data_count /= sizeof(unsigned short);
+
+#define data_word ((unsigned short *)data)
+#define header_word ((unsigned short *)header)
+
+ sp = &stack[NET_FILTER_STACK_DEPTH];
+ fp = &infp->filter[0];
+ fpe = infp->filter_end;
+
+ *sp = TRUE;
+
+ while (fp < fpe) {
+ arg = *fp++;
+ op = NETF_OP(arg);
+ arg = NETF_ARG(arg);
+
+ switch (arg) {
+ case NETF_NOPUSH:
+ arg = *sp++;
+ break;
+ case NETF_PUSHZERO:
+ arg = 0;
+ break;
+ case NETF_PUSHLIT:
+ arg = *fp++;
+ break;
+ case NETF_PUSHIND:
+ arg = *sp++;
+ if (arg >= data_count)
+ return FALSE;
+ arg = data_word[arg];
+ break;
+ case NETF_PUSHHDRIND:
+ arg = *sp++;
+ if (arg >= NET_HDW_HDR_MAX/sizeof(unsigned short))
+ return FALSE;
+ arg = header_word[arg];
+ break;
+ default:
+ if (arg >= NETF_PUSHSTK) {
+ arg = sp[arg - NETF_PUSHSTK];
+ }
+ else if (arg >= NETF_PUSHHDR) {
+ arg = header_word[arg - NETF_PUSHHDR];
+ }
+ else {
+ arg -= NETF_PUSHWORD;
+ if (arg >= data_count)
+ return FALSE;
+ arg = data_word[arg];
+ }
+ break;
+
+ }
+ switch (op) {
+ case NETF_OP(NETF_NOP):
+ *--sp = arg;
+ break;
+ case NETF_OP(NETF_AND):
+ *sp &= arg;
+ break;
+ case NETF_OP(NETF_OR):
+ *sp |= arg;
+ break;
+ case NETF_OP(NETF_XOR):
+ *sp ^= arg;
+ break;
+ case NETF_OP(NETF_EQ):
+ *sp = (*sp == arg);
+ break;
+ case NETF_OP(NETF_NEQ):
+ *sp = (*sp != arg);
+ break;
+ case NETF_OP(NETF_LT):
+ *sp = (*sp < arg);
+ break;
+ case NETF_OP(NETF_LE):
+ *sp = (*sp <= arg);
+ break;
+ case NETF_OP(NETF_GT):
+ *sp = (*sp > arg);
+ break;
+ case NETF_OP(NETF_GE):
+ *sp = (*sp >= arg);
+ break;
+ case NETF_OP(NETF_COR):
+ if (*sp++ == arg)
+ return (TRUE);
+ break;
+ case NETF_OP(NETF_CAND):
+ if (*sp++ != arg)
+ return (FALSE);
+ break;
+ case NETF_OP(NETF_CNOR):
+ if (*sp++ == arg)
+ return (FALSE);
+ break;
+ case NETF_OP(NETF_CNAND):
+ if (*sp++ != arg)
+ return (TRUE);
+ break;
+ case NETF_OP(NETF_LSH):
+ *sp <<= arg;
+ break;
+ case NETF_OP(NETF_RSH):
+ *sp >>= arg;
+ break;
+ case NETF_OP(NETF_ADD):
+ *sp += arg;
+ break;
+ case NETF_OP(NETF_SUB):
+ *sp -= arg;
+ break;
+ }
+ }
+ return ((*sp) ? TRUE : FALSE);
+
+#undef data_word
+#undef header_word
+}
+
+/*
+ * Check filter for invalid operations or stack over/under-flow.
+ */
+boolean_t
+parse_net_filter(filter, count)
+ register filter_t *filter;
+ unsigned int count;
+{
+ register int sp;
+ register filter_t *fpe = &filter[count];
+ register filter_t op, arg;
+
+ sp = NET_FILTER_STACK_DEPTH;
+
+ for (; filter < fpe; filter++) {
+ op = NETF_OP(*filter);
+ arg = NETF_ARG(*filter);
+
+ switch (arg) {
+ case NETF_NOPUSH:
+ break;
+ case NETF_PUSHZERO:
+ sp--;
+ break;
+ case NETF_PUSHLIT:
+ filter++;
+ if (filter >= fpe)
+ return (FALSE); /* literal value not in filter */
+ sp--;
+ break;
+ case NETF_PUSHIND:
+ case NETF_PUSHHDRIND:
+ break;
+ default:
+ if (arg >= NETF_PUSHSTK) {
+ if (arg - NETF_PUSHSTK + sp > NET_FILTER_STACK_DEPTH)
+ return FALSE;
+ }
+ else if (arg >= NETF_PUSHHDR) {
+ if (arg - NETF_PUSHHDR >=
+ NET_HDW_HDR_MAX/sizeof(unsigned short))
+ return FALSE;
+ }
+ /* else... cannot check for packet bounds
+ without packet */
+ sp--;
+ break;
+ }
+ if (sp < 2) {
+ return (FALSE); /* stack overflow */
+ }
+ if (op == NETF_OP(NETF_NOP))
+ continue;
+
+ /*
+ * all non-NOP operators are binary.
+ */
+ if (sp > NET_MAX_FILTER-2)
+ return (FALSE);
+
+ sp++;
+ switch (op) {
+ case NETF_OP(NETF_AND):
+ case NETF_OP(NETF_OR):
+ case NETF_OP(NETF_XOR):
+ case NETF_OP(NETF_EQ):
+ case NETF_OP(NETF_NEQ):
+ case NETF_OP(NETF_LT):
+ case NETF_OP(NETF_LE):
+ case NETF_OP(NETF_GT):
+ case NETF_OP(NETF_GE):
+ case NETF_OP(NETF_COR):
+ case NETF_OP(NETF_CAND):
+ case NETF_OP(NETF_CNOR):
+ case NETF_OP(NETF_CNAND):
+ case NETF_OP(NETF_LSH):
+ case NETF_OP(NETF_RSH):
+ case NETF_OP(NETF_ADD):
+ case NETF_OP(NETF_SUB):
+ break;
+ default:
+ return (FALSE);
+ }
+ }
+ return (TRUE);
+}
+
+/*
+ * Set a filter for a network interface.
+ *
+ * We are given a naked send right for the rcv_port.
+ * If we are successful, we must consume that right.
+ */
+io_return_t
+net_set_filter(ifp, rcv_port, priority, filter, filter_count)
+ struct ifnet *ifp;
+ ipc_port_t rcv_port;
+ int priority;
+ filter_t *filter;
+ unsigned int filter_count;
+{
+ int filter_bytes;
+ bpf_insn_t match;
+ register net_rcv_port_t infp, my_infp;
+ net_rcv_port_t nextfp;
+ net_hash_header_t hhp;
+ register net_hash_entry_t entp, hash_entp;
+ net_hash_entry_t *head, nextentp;
+ queue_entry_t dead_infp, dead_entp;
+ int i;
+ int ret, is_new_infp;
+ io_return_t rval;
+
+ /*
+ * Check the filter syntax.
+ */
+
+ filter_bytes = CSPF_BYTES(filter_count);
+ match = (bpf_insn_t) 0;
+
+ if (filter_count > 0 && filter[0] == NETF_BPF) {
+ ret = bpf_validate((bpf_insn_t)filter, filter_bytes, &match);
+ if (!ret)
+ return (D_INVALID_OPERATION);
+ } else {
+ if (!parse_net_filter(filter, filter_count))
+ return (D_INVALID_OPERATION);
+ }
+
+ rval = D_SUCCESS; /* default return value */
+ dead_infp = dead_entp = 0;
+
+ if (match == (bpf_insn_t) 0) {
+ /*
+ * If there is no match instruction, we allocate
+ * a normal packet filter structure.
+ */
+ my_infp = (net_rcv_port_t) zalloc(net_rcv_zone);
+ my_infp->rcv_port = rcv_port;
+ is_new_infp = TRUE;
+ } else {
+ /*
+ * If there is a match instruction, we assume there will
+ * multiple session with a common substructure and allocate
+ * a hash table to deal with them.
+ */
+ my_infp = 0;
+ hash_entp = (net_hash_entry_t) zalloc(net_hash_entry_zone);
+ is_new_infp = FALSE;
+ }
+
+ /*
+ * Look for an existing filter on the same reply port.
+ * Look for filters with dead ports (for GC).
+ * Look for a filter with the same code except KEY insns.
+ */
+
+ simple_lock(&ifp->if_rcv_port_list_lock);
+
+ FILTER_ITERATE(ifp, infp, nextfp)
+ {
+ if (infp->rcv_port == MACH_PORT_NULL) {
+ if (match != 0
+ && infp->priority == priority
+ && my_infp == 0
+ && (infp->filter_end - infp->filter) == filter_count
+ && bpf_eq((bpf_insn_t)infp->filter,
+ filter, filter_bytes))
+ {
+ my_infp = infp;
+ }
+
+ for (i = 0; i < NET_HASH_SIZE; i++) {
+ head = &((net_hash_header_t) infp)->table[i];
+ if (*head == 0)
+ continue;
+
+ /*
+ * Check each hash entry to make sure the
+ * destination port is still valid. Remove
+ * any invalid entries.
+ */
+ entp = *head;
+ do {
+ nextentp = (net_hash_entry_t) entp->he_next;
+
+ /* checked without
+ ip_lock(entp->rcv_port) */
+ if (entp->rcv_port == rcv_port
+ || !IP_VALID(entp->rcv_port)
+ || !ip_active(entp->rcv_port)) {
+
+ ret = hash_ent_remove (ifp,
+ (net_hash_header_t)infp,
+ (my_infp == infp),
+ head,
+ entp,
+ &dead_entp);
+ if (ret)
+ goto hash_loop_end;
+ }
+
+ entp = nextentp;
+ /* While test checks head since hash_ent_remove
+ might modify it.
+ */
+ } while (*head != 0 && entp != *head);
+ }
+ hash_loop_end:
+ ;
+
+ } else if (infp->rcv_port == rcv_port
+ || !IP_VALID(infp->rcv_port)
+ || !ip_active(infp->rcv_port)) {
+ /* Remove the old filter from list */
+ remqueue(&ifp->if_rcv_port_list, (queue_entry_t)infp);
+ ENQUEUE_DEAD(dead_infp, infp);
+ }
+ }
+ FILTER_ITERATE_END
+
+ if (my_infp == 0) {
+ /* Allocate a dummy infp */
+ simple_lock(&net_hash_header_lock);
+ for (i = 0; i < N_NET_HASH; i++) {
+ if (filter_hash_header[i].n_keys == 0)
+ break;
+ }
+ if (i == N_NET_HASH) {
+ simple_unlock(&net_hash_header_lock);
+ simple_unlock(&ifp->if_rcv_port_list_lock);
+
+ ipc_port_release_send(rcv_port);
+ if (match != 0)
+ zfree (net_hash_entry_zone, (vm_offset_t)hash_entp);
+
+ rval = D_NO_MEMORY;
+ goto clean_and_return;
+ }
+
+ hhp = &filter_hash_header[i];
+ hhp->n_keys = match->jt;
+ simple_unlock(&net_hash_header_lock);
+
+ hhp->ref_count = 0;
+ for (i = 0; i < NET_HASH_SIZE; i++)
+ hhp->table[i] = 0;
+
+ my_infp = (net_rcv_port_t)hhp;
+ my_infp->rcv_port = MACH_PORT_NULL; /* indication of dummy */
+ is_new_infp = TRUE;
+ }
+
+ if (is_new_infp) {
+ my_infp->priority = priority;
+ my_infp->rcv_count = 0;
+
+ /* Copy filter program. */
+ bcopy ((vm_offset_t)filter, (vm_offset_t)my_infp->filter,
+ filter_bytes);
+ my_infp->filter_end =
+ (filter_t *)((char *)my_infp->filter + filter_bytes);
+
+ if (match == 0) {
+ my_infp->rcv_qlimit = net_add_q_info(rcv_port);
+ } else {
+ my_infp->rcv_qlimit = 0;
+ }
+
+ /* Insert my_infp according to priority */
+ queue_iterate(&ifp->if_rcv_port_list, infp, net_rcv_port_t, chain)
+ if (priority > infp->priority)
+ break;
+ enqueue_tail((queue_t)&infp->chain, (queue_entry_t)my_infp);
+ }
+
+ if (match != 0)
+ { /* Insert to hash list */
+ net_hash_entry_t *p;
+ int j;
+
+ hash_entp->rcv_port = rcv_port;
+ for (i = 0; i < match->jt; i++) /* match->jt is n_keys */
+ hash_entp->keys[i] = match[i+1].k;
+ p = &((net_hash_header_t)my_infp)->
+ table[bpf_hash(match->jt, hash_entp->keys)];
+
+ /* Not checking for the same key values */
+ if (*p == 0) {
+ queue_init ((queue_t) hash_entp);
+ *p = hash_entp;
+ } else {
+ enqueue_tail((queue_t)*p, hash_entp);
+ }
+
+ ((net_hash_header_t)my_infp)->ref_count++;
+ hash_entp->rcv_qlimit = net_add_q_info(rcv_port);
+
+ }
+
+ simple_unlock(&ifp->if_rcv_port_list_lock);
+
+clean_and_return:
+ /* No locks are held at this point. */
+
+ if (dead_infp != 0)
+ net_free_dead_infp(dead_infp);
+ if (dead_entp != 0)
+ net_free_dead_entp(dead_entp);
+
+ return (rval);
+}
+
+/*
+ * Other network operations
+ */
+io_return_t
+net_getstat(ifp, flavor, status, count)
+ struct ifnet *ifp;
+ dev_flavor_t flavor;
+ dev_status_t status; /* pointer to OUT array */
+ natural_t *count; /* OUT */
+{
+ switch (flavor) {
+ case NET_STATUS:
+ {
+ register struct net_status *ns = (struct net_status *)status;
+
+ if (*count < NET_STATUS_COUNT)
+ return (D_INVALID_OPERATION);
+
+ ns->min_packet_size = ifp->if_header_size;
+ ns->max_packet_size = ifp->if_header_size + ifp->if_mtu;
+ ns->header_format = ifp->if_header_format;
+ ns->header_size = ifp->if_header_size;
+ ns->address_size = ifp->if_address_size;
+ ns->flags = ifp->if_flags;
+ ns->mapped_size = 0;
+
+ *count = NET_STATUS_COUNT;
+ break;
+ }
+ case NET_ADDRESS:
+ {
+ register int addr_byte_count;
+ register int addr_int_count;
+ register int i;
+
+ addr_byte_count = ifp->if_address_size;
+ addr_int_count = (addr_byte_count + (sizeof(int)-1))
+ / sizeof(int);
+
+ if (*count < addr_int_count)
+ {
+/* XXX debug hack. */
+printf ("net_getstat: count: %d, addr_int_count: %d\n",
+ *count, addr_int_count);
+ return (D_INVALID_OPERATION);
+ }
+
+ bcopy((char *)ifp->if_address,
+ (char *)status,
+ (unsigned) addr_byte_count);
+ if (addr_byte_count < addr_int_count * sizeof(int))
+ bzero((char *)status + addr_byte_count,
+ (unsigned) (addr_int_count * sizeof(int)
+ - addr_byte_count));
+
+ for (i = 0; i < addr_int_count; i++) {
+ register int word;
+
+ word = status[i];
+ status[i] = htonl(word);
+ }
+ *count = addr_int_count;
+ break;
+ }
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+
+io_return_t
+net_write(ifp, start, ior)
+ register struct ifnet *ifp;
+ int (*start)();
+ io_req_t ior;
+{
+ spl_t s;
+ kern_return_t rc;
+ boolean_t wait;
+
+ /*
+ * Reject the write if the interface is down.
+ */
+ if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING))
+ return (D_DEVICE_DOWN);
+
+ /*
+ * Reject the write if the packet is too large or too small.
+ */
+ if (ior->io_count < ifp->if_header_size ||
+ ior->io_count > ifp->if_header_size + ifp->if_mtu)
+ return (D_INVALID_SIZE);
+
+ /*
+ * Wire down the memory.
+ */
+
+ rc = device_write_get(ior, &wait);
+ if (rc != KERN_SUCCESS)
+ return (rc);
+
+ /*
+ * Network interfaces can't cope with VM continuations.
+ * If wait is set, just panic.
+ */
+ if (wait) {
+ panic("net_write: VM continuation");
+ }
+
+ /*
+ * Queue the packet on the output queue, and
+ * start the device.
+ */
+ s = splimp();
+ IF_ENQUEUE(&ifp->if_snd, ior);
+ (*start)(ifp->if_unit);
+ splx(s);
+
+ return (D_IO_QUEUED);
+}
+
+#ifdef FIPC
+/* This gets called by nefoutput for dev_ops->d_port_death ... */
+
+io_return_t
+net_fwrite(ifp, start, ior)
+ register struct ifnet *ifp;
+ int (*start)();
+ io_req_t ior;
+{
+ spl_t s;
+ kern_return_t rc;
+ boolean_t wait;
+
+ /*
+ * Reject the write if the interface is down.
+ */
+ if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING))
+ return (D_DEVICE_DOWN);
+
+ /*
+ * Reject the write if the packet is too large or too small.
+ */
+ if (ior->io_count < ifp->if_header_size ||
+ ior->io_count > ifp->if_header_size + ifp->if_mtu)
+ return (D_INVALID_SIZE);
+
+ /*
+ * DON'T Wire down the memory.
+ */
+#if 0
+ rc = device_write_get(ior, &wait);
+ if (rc != KERN_SUCCESS)
+ return (rc);
+#endif
+ /*
+ * Network interfaces can't cope with VM continuations.
+ * If wait is set, just panic.
+ */
+ /* I'll have to figure out who was setting wait...*/
+#if 0
+ if (wait) {
+ panic("net_write: VM continuation");
+ }
+#endif
+ /*
+ * Queue the packet on the output queue, and
+ * start the device.
+ */
+ s = splimp();
+ IF_ENQUEUE(&ifp->if_snd, ior);
+ (*start)(ifp->if_unit);
+ splx(s);
+
+ return (D_IO_QUEUED);
+}
+#endif /* FIPC */
+
+/*
+ * Initialize the whole package.
+ */
+void
+net_io_init()
+{
+ register vm_size_t size;
+
+ size = sizeof(struct net_rcv_port);
+ net_rcv_zone = zinit(size,
+ size * 1000,
+ PAGE_SIZE,
+ FALSE,
+ "net_rcv_port");
+
+ size = sizeof(struct net_hash_entry);
+ net_hash_entry_zone = zinit(size,
+ size * 100,
+ PAGE_SIZE,
+ FALSE,
+ "net_hash_entry");
+
+ size = ikm_plus_overhead(sizeof(struct net_rcv_msg));
+ net_kmsg_size = round_page(size);
+
+ /*
+ * net_kmsg_max caps the number of buffers
+ * we are willing to allocate. By default,
+ * we allow for net_queue_free_min plus
+ * the queue limit for each filter.
+ * (Added as the filters are added.)
+ */
+
+ simple_lock_init(&net_kmsg_total_lock);
+ if (net_kmsg_max == 0)
+ net_kmsg_max = net_queue_free_min;
+
+ simple_lock_init(&net_queue_free_lock);
+ ipc_kmsg_queue_init(&net_queue_free);
+
+ simple_lock_init(&net_queue_lock);
+ ipc_kmsg_queue_init(&net_queue_high);
+ ipc_kmsg_queue_init(&net_queue_low);
+
+ simple_lock_init(&net_hash_header_lock);
+}
+
+
+/* ======== BPF: Berkeley Packet Filter ======== */
+
+/*-
+ * Copyright (c) 1990-1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from the Stanford/CMU enet packet filter,
+ * (net/enet.c) distributed as part of 4.3BSD, and code contributed
+ * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
+ * Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)bpf.c 7.5 (Berkeley) 7/15/91
+ */
+
+#if defined(sparc) || defined(mips) || defined(ibm032) || defined(alpha)
+#define BPF_ALIGN
+#endif
+
+#ifndef BPF_ALIGN
+#define EXTRACT_SHORT(p) ((u_short)ntohs(*(u_short *)p))
+#define EXTRACT_LONG(p) (ntohl(*(u_long *)p))
+#else
+#define EXTRACT_SHORT(p)\
+ ((u_short)\
+ ((u_short)*((u_char *)p+0)<<8|\
+ (u_short)*((u_char *)p+1)<<0))
+#define EXTRACT_LONG(p)\
+ ((u_long)*((u_char *)p+0)<<24|\
+ (u_long)*((u_char *)p+1)<<16|\
+ (u_long)*((u_char *)p+2)<<8|\
+ (u_long)*((u_char *)p+3)<<0)
+#endif
+
+/*
+ * Execute the filter program starting at pc on the packet p
+ * wirelen is the length of the original packet
+ * buflen is the amount of data present
+ */
+
+int
+bpf_do_filter(infp, p, wirelen, header, hash_headpp, entpp)
+ net_rcv_port_t infp;
+ char * p; /* packet data */
+ unsigned int wirelen; /* data_count (in bytes) */
+ char * header;
+ net_hash_entry_t **hash_headpp, *entpp; /* out */
+{
+ register bpf_insn_t pc, pc_end;
+ register unsigned int buflen;
+
+ register unsigned long A, X;
+ register int k;
+ long mem[BPF_MEMWORDS];
+
+ pc = ((bpf_insn_t) infp->filter) + 1;
+ /* filter[0].code is BPF_BEGIN */
+ pc_end = (bpf_insn_t)infp->filter_end;
+ buflen = NET_RCV_MAX;
+ *entpp = 0; /* default */
+
+#ifdef lint
+ A = 0;
+ X = 0;
+#endif
+ for (; pc < pc_end; ++pc) {
+ switch (pc->code) {
+
+ default:
+#ifdef KERNEL
+ return 0;
+#else
+ abort();
+#endif
+ case BPF_RET|BPF_K:
+ if (infp->rcv_port == MACH_PORT_NULL &&
+ *entpp == 0) {
+ return 0;
+ }
+ return ((u_int)pc->k <= wirelen) ?
+ pc->k : wirelen;
+
+ case BPF_RET|BPF_A:
+ if (infp->rcv_port == MACH_PORT_NULL &&
+ *entpp == 0) {
+ return 0;
+ }
+ return ((u_int)A <= wirelen) ?
+ A : wirelen;
+
+ case BPF_RET|BPF_MATCH_IMM:
+ if (bpf_match ((net_hash_header_t)infp, pc->jt, mem,
+ hash_headpp, entpp)) {
+ return ((u_int)pc->k <= wirelen) ?
+ pc->k : wirelen;
+ }
+ return 0;
+
+ case BPF_LD|BPF_W|BPF_ABS:
+ k = pc->k;
+ if ((u_int)k + sizeof(long) <= buflen) {
+#ifdef BPF_ALIGN
+ if (((int)(p + k) & 3) != 0)
+ A = EXTRACT_LONG(&p[k]);
+ else
+#endif
+ A = ntohl(*(long *)(p + k));
+ continue;
+ }
+
+ k -= BPF_DLBASE;
+ if ((u_int)k + sizeof(long) <= NET_HDW_HDR_MAX) {
+#ifdef BPF_ALIGN
+ if (((int)(header + k) & 3) != 0)
+ A = EXTRACT_LONG(&header[k]);
+ else
+#endif
+ A = ntohl(*(long *)(header + k));
+ continue;
+ } else {
+ return 0;
+ }
+
+ case BPF_LD|BPF_H|BPF_ABS:
+ k = pc->k;
+ if ((u_int)k + sizeof(short) <= buflen) {
+ A = EXTRACT_SHORT(&p[k]);
+ continue;
+ }
+
+ k -= BPF_DLBASE;
+ if ((u_int)k + sizeof(short) <= NET_HDW_HDR_MAX) {
+ A = EXTRACT_SHORT(&header[k]);
+ continue;
+ } else {
+ return 0;
+ }
+
+ case BPF_LD|BPF_B|BPF_ABS:
+ k = pc->k;
+ if ((u_int)k < buflen) {
+ A = p[k];
+ continue;
+ }
+
+ k -= BPF_DLBASE;
+ if ((u_int)k < NET_HDW_HDR_MAX) {
+ A = header[k];
+ continue;
+ } else {
+ return 0;
+ }
+
+ case BPF_LD|BPF_W|BPF_LEN:
+ A = wirelen;
+ continue;
+
+ case BPF_LDX|BPF_W|BPF_LEN:
+ X = wirelen;
+ continue;
+
+ case BPF_LD|BPF_W|BPF_IND:
+ k = X + pc->k;
+ if (k + sizeof(long) > buflen)
+ return 0;
+#ifdef BPF_ALIGN
+ if (((int)(p + k) & 3) != 0)
+ A = EXTRACT_LONG(&p[k]);
+ else
+#endif
+ A = ntohl(*(long *)(p + k));
+ continue;
+
+ case BPF_LD|BPF_H|BPF_IND:
+ k = X + pc->k;
+ if (k + sizeof(short) > buflen)
+ return 0;
+ A = EXTRACT_SHORT(&p[k]);
+ continue;
+
+ case BPF_LD|BPF_B|BPF_IND:
+ k = X + pc->k;
+ if (k >= buflen)
+ return 0;
+ A = p[k];
+ continue;
+
+ case BPF_LDX|BPF_MSH|BPF_B:
+ k = pc->k;
+ if (k >= buflen)
+ return 0;
+ X = (p[pc->k] & 0xf) << 2;
+ continue;
+
+ case BPF_LD|BPF_IMM:
+ A = pc->k;
+ continue;
+
+ case BPF_LDX|BPF_IMM:
+ X = pc->k;
+ continue;
+
+ case BPF_LD|BPF_MEM:
+ A = mem[pc->k];
+ continue;
+
+ case BPF_LDX|BPF_MEM:
+ X = mem[pc->k];
+ continue;
+
+ case BPF_ST:
+ mem[pc->k] = A;
+ continue;
+
+ case BPF_STX:
+ mem[pc->k] = X;
+ continue;
+
+ case BPF_JMP|BPF_JA:
+ pc += pc->k;
+ continue;
+
+ case BPF_JMP|BPF_JGT|BPF_K:
+ pc += (A > pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JGE|BPF_K:
+ pc += (A >= pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JEQ|BPF_K:
+ pc += (A == pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JSET|BPF_K:
+ pc += (A & pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JGT|BPF_X:
+ pc += (A > X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JGE|BPF_X:
+ pc += (A >= X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JEQ|BPF_X:
+ pc += (A == X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JSET|BPF_X:
+ pc += (A & X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_ALU|BPF_ADD|BPF_X:
+ A += X;
+ continue;
+
+ case BPF_ALU|BPF_SUB|BPF_X:
+ A -= X;
+ continue;
+
+ case BPF_ALU|BPF_MUL|BPF_X:
+ A *= X;
+ continue;
+
+ case BPF_ALU|BPF_DIV|BPF_X:
+ if (X == 0)
+ return 0;
+ A /= X;
+ continue;
+
+ case BPF_ALU|BPF_AND|BPF_X:
+ A &= X;
+ continue;
+
+ case BPF_ALU|BPF_OR|BPF_X:
+ A |= X;
+ continue;
+
+ case BPF_ALU|BPF_LSH|BPF_X:
+ A <<= X;
+ continue;
+
+ case BPF_ALU|BPF_RSH|BPF_X:
+ A >>= X;
+ continue;
+
+ case BPF_ALU|BPF_ADD|BPF_K:
+ A += pc->k;
+ continue;
+
+ case BPF_ALU|BPF_SUB|BPF_K:
+ A -= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_MUL|BPF_K:
+ A *= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_DIV|BPF_K:
+ A /= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_AND|BPF_K:
+ A &= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_OR|BPF_K:
+ A |= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_LSH|BPF_K:
+ A <<= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_RSH|BPF_K:
+ A >>= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_NEG:
+ A = -A;
+ continue;
+
+ case BPF_MISC|BPF_TAX:
+ X = A;
+ continue;
+
+ case BPF_MISC|BPF_TXA:
+ A = X;
+ continue;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Return 1 if the 'f' is a valid filter program without a MATCH
+ * instruction. Return 2 if it is a valid filter program with a MATCH
+ * instruction. Otherwise, return 0.
+ * The constraints are that each jump be forward and to a valid
+ * code. The code must terminate with either an accept or reject.
+ * 'valid' is an array for use by the routine (it must be at least
+ * 'len' bytes long).
+ *
+ * The kernel needs to be able to verify an application's filter code.
+ * Otherwise, a bogus program could easily crash the system.
+ */
+int
+bpf_validate(f, bytes, match)
+ bpf_insn_t f;
+ int bytes;
+ bpf_insn_t *match;
+{
+ register int i, j, len;
+ register bpf_insn_t p;
+
+ len = BPF_BYTES2LEN(bytes);
+ /* f[0].code is already checked to be BPF_BEGIN. So skip f[0]. */
+
+ for (i = 1; i < len; ++i) {
+ /*
+ * Check that that jumps are forward, and within
+ * the code block.
+ */
+ p = &f[i];
+ if (BPF_CLASS(p->code) == BPF_JMP) {
+ register int from = i + 1;
+
+ if (BPF_OP(p->code) == BPF_JA) {
+ if (from + p->k >= len)
+ return 0;
+ }
+ else if (from + p->jt >= len || from + p->jf >= len)
+ return 0;
+ }
+ /*
+ * Check that memory operations use valid addresses.
+ */
+ if ((BPF_CLASS(p->code) == BPF_ST ||
+ (BPF_CLASS(p->code) == BPF_LD &&
+ (p->code & 0xe0) == BPF_MEM)) &&
+ (p->k >= BPF_MEMWORDS || p->k < 0))
+ return 0;
+ /*
+ * Check for constant division by 0.
+ */
+ if (p->code == (BPF_ALU|BPF_DIV|BPF_K) && p->k == 0)
+ return 0;
+ /*
+ * Check for match instruction.
+ * Only one match instruction per filter is allowed.
+ */
+ if (p->code == (BPF_RET|BPF_MATCH_IMM)) {
+ if (*match != 0 ||
+ p->jt == 0 ||
+ p->jt > N_NET_HASH_KEYS)
+ return 0;
+ i += p->jt; /* skip keys */
+ if (i + 1 > len)
+ return 0;
+
+ for (j = 1; j <= p->jt; j++) {
+ if (p[j].code != (BPF_MISC|BPF_KEY))
+ return 0;
+ }
+
+ *match = p;
+ }
+ }
+ if (BPF_CLASS(f[len - 1].code) == BPF_RET)
+ return ((*match == 0) ? 1 : 2);
+ else
+ return 0;
+}
+
+int
+bpf_eq (f1, f2, bytes)
+ register bpf_insn_t f1, f2;
+ register int bytes;
+{
+ register int count;
+
+ count = BPF_BYTES2LEN(bytes);
+ for (; count--; f1++, f2++) {
+ if (!BPF_INSN_EQ(f1, f2)) {
+ if ( f1->code == (BPF_MISC|BPF_KEY) &&
+ f2->code == (BPF_MISC|BPF_KEY) )
+ continue;
+ return FALSE;
+ }
+ };
+ return TRUE;
+}
+
+unsigned int
+bpf_hash (n, keys)
+ register int n;
+ register unsigned int *keys;
+{
+ register unsigned int hval = 0;
+
+ while (n--) {
+ hval += *keys++;
+ }
+ return (hval % NET_HASH_SIZE);
+}
+
+
+int
+bpf_match (hash, n_keys, keys, hash_headpp, entpp)
+ net_hash_header_t hash;
+ register int n_keys;
+ register unsigned int *keys;
+ net_hash_entry_t **hash_headpp, *entpp;
+{
+ register net_hash_entry_t head, entp;
+ register int i;
+
+ if (n_keys != hash->n_keys)
+ return FALSE;
+
+ *hash_headpp = &hash->table[bpf_hash(n_keys, keys)];
+ head = **hash_headpp;
+
+ if (head == 0)
+ return FALSE;
+
+ HASH_ITERATE (head, entp)
+ {
+ for (i = 0; i < n_keys; i++) {
+ if (keys[i] != entp->keys[i])
+ break;
+ }
+ if (i == n_keys) {
+ *entpp = entp;
+ return TRUE;
+ }
+ }
+ HASH_ITERATE_END (head, entp)
+ return FALSE;
+}
+
+
+/*
+ * Removes a hash entry (ENTP) from its queue (HEAD).
+ * If the reference count of filter (HP) becomes zero and not USED,
+ * HP is removed from ifp->if_rcv_port_list and is freed.
+ */
+
+int
+hash_ent_remove (ifp, hp, used, head, entp, dead_p)
+ struct ifnet *ifp;
+ net_hash_header_t hp;
+ int used;
+ net_hash_entry_t *head, entp;
+ queue_entry_t *dead_p;
+{
+ hp->ref_count--;
+
+ if (*head == entp) {
+
+ if (queue_empty((queue_t) entp)) {
+ *head = 0;
+ ENQUEUE_DEAD(*dead_p, entp);
+ if (hp->ref_count == 0 && !used) {
+ remqueue((queue_t) &ifp->if_rcv_port_list,
+ (queue_entry_t)hp);
+ hp->n_keys = 0;
+ return TRUE;
+ }
+ return FALSE;
+ } else {
+ *head = (net_hash_entry_t)queue_next((queue_t) entp);
+ }
+ }
+
+ remqueue((queue_t)*head, (queue_entry_t)entp);
+ ENQUEUE_DEAD(*dead_p, entp);
+ return FALSE;
+}
+
+int
+net_add_q_info (rcv_port)
+ ipc_port_t rcv_port;
+{
+ mach_port_msgcount_t qlimit = 0;
+
+ /*
+ * We use a new port, so increase net_queue_free_min
+ * and net_kmsg_max to allow for more queued messages.
+ */
+
+ if (IP_VALID(rcv_port)) {
+ ip_lock(rcv_port);
+ if (ip_active(rcv_port))
+ qlimit = rcv_port->ip_qlimit;
+ ip_unlock(rcv_port);
+ }
+
+ simple_lock(&net_kmsg_total_lock);
+ net_queue_free_min++;
+ net_kmsg_max += qlimit + 1;
+ simple_unlock(&net_kmsg_total_lock);
+
+ return (int)qlimit;
+}
+
+net_del_q_info (qlimit)
+ int qlimit;
+{
+ simple_lock(&net_kmsg_total_lock);
+ net_queue_free_min--;
+ net_kmsg_max -= qlimit + 1;
+ simple_unlock(&net_kmsg_total_lock);
+}
+
+
+/*
+ * net_free_dead_infp (dead_infp)
+ * queue_entry_t dead_infp; list of dead net_rcv_port_t.
+ *
+ * Deallocates dead net_rcv_port_t.
+ * No locks should be held when called.
+ */
+net_free_dead_infp (dead_infp)
+ queue_entry_t dead_infp;
+{
+ register net_rcv_port_t infp, nextfp;
+
+ for (infp = (net_rcv_port_t) dead_infp; infp != 0; infp = nextfp)
+ {
+ nextfp = (net_rcv_port_t) queue_next(&infp->chain);
+ ipc_port_release_send(infp->rcv_port);
+ net_del_q_info(infp->rcv_qlimit);
+ zfree(net_rcv_zone, (vm_offset_t) infp);
+ }
+}
+
+/*
+ * net_free_dead_entp (dead_entp)
+ * queue_entry_t dead_entp; list of dead net_hash_entry_t.
+ *
+ * Deallocates dead net_hash_entry_t.
+ * No locks should be held when called.
+ */
+net_free_dead_entp (dead_entp)
+ queue_entry_t dead_entp;
+{
+ register net_hash_entry_t entp, nextentp;
+
+ for (entp = (net_hash_entry_t)dead_entp; entp != 0; entp = nextentp)
+ {
+ nextentp = (net_hash_entry_t) queue_next(&entp->chain);
+
+ ipc_port_release_send(entp->rcv_port);
+ net_del_q_info(entp->rcv_qlimit);
+ zfree(net_hash_entry_zone, (vm_offset_t) entp);
+ }
+}
+
diff --git a/device/net_io.h b/device/net_io.h
new file mode 100644
index 00000000..2228e72b
--- /dev/null
+++ b/device/net_io.h
@@ -0,0 +1,80 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: ll/89
+ */
+
+#ifndef _DEVICE_NET_IO_H_
+#define _DEVICE_NET_IO_H_
+
+/*
+ * Utilities for playing with network messages.
+ */
+
+#include <mach/machine/vm_types.h>
+#include <ipc/ipc_kmsg.h>
+
+#include <kern/macro_help.h>
+#include <kern/lock.h>
+#include <kern/kalloc.h>
+
+#include <device/net_status.h>
+
+/*
+ * A network packet is wrapped in a kernel message while in
+ * the kernel.
+ */
+
+#define net_kmsg(kmsg) ((net_rcv_msg_t)&(kmsg)->ikm_header)
+
+/*
+ * Interrupt routines may allocate and free net_kmsgs with these
+ * functions. net_kmsg_get may return IKM_NULL.
+ */
+
+extern ipc_kmsg_t net_kmsg_get();
+extern void net_kmsg_put();
+
+/*
+ * Network utility routines.
+ */
+
+extern void net_packet();
+extern void net_filter();
+extern io_return_t net_getstat();
+extern io_return_t net_write();
+
+/*
+ * Non-interrupt code may allocate and free net_kmsgs with these functions.
+ */
+
+extern vm_size_t net_kmsg_size;
+
+#define net_kmsg_alloc() ((ipc_kmsg_t) kalloc(net_kmsg_size))
+#define net_kmsg_free(kmsg) kfree((vm_offset_t) (kmsg), net_kmsg_size)
+
+#endif _DEVICE_NET_IO_H_
diff --git a/device/param.h b/device/param.h
new file mode 100644
index 00000000..41b4793e
--- /dev/null
+++ b/device/param.h
@@ -0,0 +1,49 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#ifndef _DEVICE_PARAM_H_
+#define _DEVICE_PARAM_H_
+
+/*
+ * Compatibility definitions for disk IO.
+ */
+
+/*
+ * Disk devices do all IO in 512-byte blocks.
+ */
+#define DEV_BSIZE 512
+
+/*
+ * Conversion between bytes and disk blocks.
+ */
+#define btodb(byte_offset) ((byte_offset) >> 9)
+#define dbtob(block_number) ((block_number) << 9)
+
+#endif /* _DEVICE_PARAM_H_ */
diff --git a/device/subrs.c b/device/subrs.c
new file mode 100644
index 00000000..9d590208
--- /dev/null
+++ b/device/subrs.c
@@ -0,0 +1,140 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Random device subroutines and stubs.
+ */
+
+#include <vm/vm_kern.h>
+#include <device/buf.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+
+
+
+/*
+ * Print out disk name and block number for hard disk errors.
+ */
+void harderr(bp, cp)
+ struct buf *bp;
+ char * cp;
+{
+ printf("%s%d%c: hard error sn%d ",
+ cp,
+ minor(bp->b_dev) >> 3,
+ 'a' + (minor(bp->b_dev) & 0x7),
+ bp->b_blkno);
+}
+
+/*
+ * Ethernet support routines.
+ */
+u_char etherbroadcastaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+/*
+ * Convert Ethernet address to printable (loggable) representation.
+ */
+char *
+ether_sprintf(ap)
+ register u_char *ap;
+{
+ register i;
+ static char etherbuf[18];
+ register char *cp = etherbuf;
+ static char digits[] = "0123456789abcdef";
+
+ for (i = 0; i < 6; i++) {
+ *cp++ = digits[*ap >> 4];
+ *cp++ = digits[*ap++ & 0xf];
+ *cp++ = ':';
+ }
+ *--cp = 0;
+ return (etherbuf);
+}
+
+/*
+ * Initialize send and receive queues on an interface.
+ */
+void if_init_queues(ifp)
+ register struct ifnet *ifp;
+{
+ IFQ_INIT(&ifp->if_snd);
+ queue_init(&ifp->if_rcv_port_list);
+ simple_lock_init(&ifp->if_rcv_port_list_lock);
+}
+
+
+/*
+ * Compatibility with BSD device drivers.
+ */
+void sleep(channel, priority)
+ vm_offset_t channel;
+ int priority;
+{
+ assert_wait((event_t) channel, FALSE); /* not interruptible XXX */
+ thread_block((void (*)()) 0);
+}
+
+void wakeup(channel)
+ vm_offset_t channel;
+{
+ thread_wakeup((event_t) channel);
+}
+
+struct buf *
+geteblk(size)
+ int size;
+{
+ register io_req_t ior;
+
+ io_req_alloc(ior, 0);
+ ior->io_device = (device_t)0;
+ ior->io_unit = 0;
+ ior->io_op = 0;
+ ior->io_mode = 0;
+ ior->io_recnum = 0;
+ ior->io_count = size;
+ ior->io_residual = 0;
+ ior->io_error = 0;
+
+ size = round_page(size);
+ ior->io_alloc_size = size;
+ if (kmem_alloc(kernel_map, (vm_offset_t *)&ior->io_data, size)
+ != KERN_SUCCESS)
+ panic("geteblk");
+
+ return (ior);
+}
+
+void brelse(bp)
+ struct buf *bp;
+{
+ register io_req_t ior = bp;
+
+ (void) vm_deallocate(kernel_map,
+ (vm_offset_t) ior->io_data,
+ (vm_size_t) ior->io_alloc_size);
+ io_req_free(ior);
+}
diff --git a/device/tty.h b/device/tty.h
new file mode 100644
index 00000000..94229962
--- /dev/null
+++ b/device/tty.h
@@ -0,0 +1,203 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ *
+ * Compatibility TTY structure for existing TTY device drivers.
+ */
+
+#ifndef _DEVICE_TTY_H_
+#define _DEVICE_TTY_H_
+
+#include <kern/lock.h>
+#include <kern/queue.h>
+#include <mach/port.h>
+
+#include <device/device_types.h>
+#include <device/tty_status.h>
+#include <device/cirbuf.h>
+#include <device/io_req.h>
+
+#ifdef luna88k
+#include <luna88k/jtermio.h>
+#endif
+
+struct tty {
+ decl_simple_lock_data(,t_lock)
+ struct cirbuf t_inq; /* input buffer */
+ struct cirbuf t_outq; /* output buffer */
+ char * t_addr; /* device pointer */
+ int t_dev; /* device number */
+ int (*t_start)(struct tty *);
+ /* routine to start output */
+#define t_oproc t_start
+ int (*t_stop)(struct tty *, int);
+ /* routine to stop output */
+ int (*t_mctl)(struct tty *, int, int);
+ /* (optional) routine to control
+ modem signals */
+ char t_ispeed; /* input speed */
+ char t_ospeed; /* output speed */
+ char t_breakc; /* character to deliver when 'break'
+ condition received */
+ int t_flags; /* mode flags */
+ int t_state; /* current state */
+ int t_line; /* fake line discipline number,
+ for old drivers - always 0 */
+ queue_head_t t_delayed_read; /* pending read requests */
+ queue_head_t t_delayed_write;/* pending write requests */
+ queue_head_t t_delayed_open; /* pending open requests */
+
+/*
+ * Items beyond this point should be removed to device-specific
+ * extension structures.
+ */
+ int (*t_getstat)(); /* routine to get status */
+ int (*t_setstat)(); /* routine to set status */
+ dev_ops_t t_tops; /* another device to possibly
+ push through */
+};
+typedef struct tty *tty_t;
+
+/*
+ * Common TTY service routines
+ */
+extern io_return_t char_open(
+ int dev,
+ struct tty * tp,
+ dev_mode_t mode,
+ io_req_t ior);
+
+extern io_return_t char_read(
+ struct tty * tp,
+ io_req_t ior);
+
+extern io_return_t char_write(
+ struct tty * tp,
+ io_req_t ior);
+
+extern void ttyinput(
+ unsigned int c,
+ struct tty * tp);
+
+extern boolean_t ttymodem(
+ struct tty * tp,
+ boolean_t carrier_up);
+
+extern void tty_queue_completion(
+ queue_t queue);
+#define tt_open_wakeup(tp) \
+ (tty_queue_completion(&(tp)->t_delayed_open))
+#define tt_write_wakeup(tp) \
+ (tty_queue_completion(&(tp)->t_delayed_write))
+
+extern void ttychars(
+ struct tty * tp);
+
+#define TTMINBUF 90
+
+short tthiwat[NSPEEDS], ttlowat[NSPEEDS];
+#define TTHIWAT(tp) tthiwat[(tp)->t_ospeed]
+#define TTLOWAT(tp) ttlowat[(tp)->t_ospeed]
+
+/* internal state bits */
+#define TS_INIT 0x00000001 /* tty structure initialized */
+#define TS_TIMEOUT 0x00000002 /* delay timeout in progress */
+#define TS_WOPEN 0x00000004 /* waiting for open to complete */
+#define TS_ISOPEN 0x00000008 /* device is open */
+#define TS_FLUSH 0x00000010 /* outq has been flushed during DMA */
+#define TS_CARR_ON 0x00000020 /* software copy of carrier-present */
+#define TS_BUSY 0x00000040 /* output in progress */
+#define TS_ASLEEP 0x00000080 /* wakeup when output done */
+
+#define TS_TTSTOP 0x00000100 /* output stopped by ctl-s */
+#define TS_HUPCLS 0x00000200 /* hang up upon last close */
+#define TS_TBLOCK 0x00000400 /* tandem queue blocked */
+
+#define TS_NBIO 0x00001000 /* tty in non-blocking mode */
+#define TS_ONDELAY 0x00002000 /* device is open; software copy of
+ * carrier is not present */
+#define TS_MIN 0x00004000 /* buffer input chars, if possible */
+#define TS_MIN_TO 0x00008000 /* timeout for the above is active */
+
+#define TS_OUT 0x00010000 /* tty in use for dialout only */
+#define TS_RTS_DOWN 0x00020000 /* modem pls stop */
+
+#define TS_TRANSLATE 0x00100000 /* translation device enabled */
+#define TS_KDB 0x00200000 /* should enter kdb on ALT */
+
+#define TS_MIN_TO_RCV 0x00400000 /* character recived during
+ receive timeout interval */
+
+/* flags - old names defined in terms of new ones */
+
+#define TANDEM TF_TANDEM
+#define ODDP TF_ODDP
+#define EVENP TF_EVENP
+#define ANYP (ODDP|EVENP)
+#define MDMBUF TF_MDMBUF
+#define LITOUT TF_LITOUT
+#define NOHANG TF_NOHANG
+
+#define ECHO TF_ECHO
+#define CRMOD TF_CRMOD
+#define XTABS TF_XTABS
+
+/* these are here only to let old code compile - they are never set */
+#define RAW LITOUT
+#define PASS8 LITOUT
+
+/*
+ * Hardware bits.
+ * SHOULD NOT BE HERE.
+ */
+#define DONE 0200
+#define IENABLE 0100
+
+/*
+ * Modem control commands.
+ */
+#define DMSET 0
+#define DMBIS 1
+#define DMBIC 2
+#define DMGET 3
+
+/*
+ * Fake 'line discipline' switch, for the benefit of old code
+ * that wants to call through it.
+ */
+struct ldisc_switch {
+ int (*l_read) (struct tty *, io_req_t); /* read */
+ int (*l_write)(struct tty *, io_req_t); /* write */
+ void (*l_rint) (unsigned int, struct tty *); /* character input */
+ boolean_t (*l_modem)(struct tty *, boolean_t); /* modem change */
+ void (*l_start)(struct tty *); /* start output */
+};
+
+extern struct ldisc_switch linesw[];
+
+#endif /* _DEVICE_TTY_H_ */
diff --git a/gensym.awk b/gensym.awk
new file mode 100644
index 00000000..21283214
--- /dev/null
+++ b/gensym.awk
@@ -0,0 +1,78 @@
+#
+# Copyright (c) 1994 The University of Utah and
+# the Computer Systems Laboratory (CSL). All rights reserved.
+#
+# Permission to use, copy, modify and distribute this software and its
+# documentation is hereby granted, provided that both the copyright
+# notice and this permission notice appear in all copies of the
+# software, derivative works or modified versions, and any portions
+# thereof, and that both notices appear in supporting documentation.
+#
+# THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+# IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+# ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+#
+# CSL requests users of this software to return to csl-dist@cs.utah.edu any
+# improvements that they make and grant CSL redistribution rights.
+#
+# Author: Bryan Ford, University of Utah CSL
+#
+
+BEGIN {
+ bogus_printed = "no"
+}
+
+# Start the bogus function just before the first sym directive,
+# so that any #includes higher in the file don't get stuffed inside it.
+/^[a-z]/ {
+ if (bogus_printed == "no")
+ {
+ print "void bogus() {";
+ bogus_printed = "yes";
+ }
+}
+
+# Take an arbitrarily complex C symbol or expression and constantize it.
+/^expr/ {
+ print "__asm (\"";
+ if ($3 == "")
+ printf "* %s mAgIc%%0\" : : \"i\" (%s));\n", $2, $2;
+ else
+ printf "* %s mAgIc%%0\" : : \"i\" (%s));\n", $3, $2;
+}
+
+# Output a symbol defining the size of a C structure.
+/^size/ {
+ print "__asm (\"";
+ if ($4 == "")
+ printf "* %s_SIZE mAgIc%%0\" : : \"i\" (sizeof(struct %s)));\n",
+ toupper($3), $2;
+ else
+ printf "* %s mAgIc%%0\" : : \"i\" (sizeof(struct %s)));\n",
+ $4, $2;
+}
+
+# Output a symbol defining the byte offset of an element of a C structure.
+/^offset/ {
+ print "__asm (\"";
+ if ($5 == "")
+ {
+ printf "* %s_%s mAgIc%%0\" : : \"i\" (&((struct %s*)0)->%s));\n",
+ toupper($3), toupper($4), $2, $4;
+ }
+ else
+ {
+ printf "* %s mAgIc%%0\" : : \"i\" (&((struct %s*)0)->%s));\n",
+ toupper($5), $2, $4;
+ }
+}
+
+# Copy through all preprocessor directives.
+/^#/ {
+ print
+}
+
+END {
+ print "}"
+}
+
diff --git a/i386/Makefrag b/i386/Makefrag
new file mode 100644
index 00000000..23f5e856
--- /dev/null
+++ b/i386/Makefrag
@@ -0,0 +1,111 @@
+# i386 Mach makefile fragment
+# Copyright 1996 Free Software Foundation, Inc.
+# This file is part of GNU Mach. Redistribution terms are not yet decided.
+
+
+# Some of the i386-specific code checks for these.
+DEFINES += -DCONTINUATIONS -DLINUX_DEV -D__ELF__ -Di386
+
+# Source files for any i386 kernel
+i386at-files = autoconf.c blit.c com.c conf.c cons_conf.c fd.c \
+ i386at_ds_routines.c immc.c int_init.c iopl.c kd.c kd_event.c \
+ kd_mouse.c kd_queue.c lpr.c model_dep.c nfd.c nhd.c \
+ phys_mem_grab_page.c pic_isa.c rtc.c
+i386-files = ast_check.c db_disasm.c db_interface.c db_trace.c debug_i386.c \
+ fpe_linkage.c fpu.c gdt.c hardclock.c idt.c io_emulate.c io_map.c \
+ iopb.c ktss.c kttd_interface.c ldt.c loose_ends.c mp_desc.c pcb.c \
+ phys.c pic.c pit.c seg.c trap.c user_ldt.c
+intel-files = pmap.c read_fault.c
+
+# Assembler source
+i386at-Sfiles = boothdr.S interrupt.S kdasm.S
+i386-Sfiles = cswitch.S debug_trace.S idt_inittab.S locore.S spl.S
+
+# Account for them in the image
+objfiles += $(subst .c,.o,$(i386at-files) $(i386-files) $(intel-files)) \
+ $(subst .S,.o,$(i386at-Sfiles) $(i386-Sfiles))
+vpath %.c $(sysdep)/i386at $(sysdep)/i386 $(sysdep)/intel
+vpath %.S $(sysdep)/i386at $(sysdep)/i386
+
+# Files from the generic source that we want
+objfiles += busses.o cirbuf.o
+vpath busses.c $(srcdir)/chips
+
+# FPE emulation
+objfiles += fpe.o
+
+# Mig-generated
+objfiles += mach_i386_server.o
+
+### Linux device drivers (make this Better, Please)
+
+# instead of Mach's KERNEL, Linux uses __KERNEL__. Whee.
+DEFINES += -D__KERNEL__
+
+linux-gen-files = $(addprefix linux_,$(linux-gen-names))
+linux-gen-names = autoirq.c block.c dma.c init.c irq.c kmem.c misc.c net.c \
+ port.c printk.c sched.c soft.c timer.c version.c vsprintf.c
+
+linux-block-files = cmd640.c floppy.c genhd.c ide-cd.c ide.c rz1000.c triton.c
+
+linux-net-files = 3c501.c 3c503.c 3c505.c 3c507.c 3c509.c 3c59x.c \
+ 8390.c Space.c ac3200.c apricot.c at1700.c atp.c \
+ de4x5.c de600.c de620.c depca.c dev.c e2100.c eepro.c eexpress.c \
+ eth16i.c ewrk3.c hp-plus.c hp.c hp100.c lance.c ne.c net_init.c \
+ ni52.c ni65.c seeq8005.c sk_g16.c smc-ultra.c tulip.c \
+ wavelan.c wd.c znet.c
+
+linux-pci-files = bios32.c pci.c
+
+linux-scsi-files = 53c7,8xx.c AM53C974.c BusLogic.c NCR53c406a.c advansys.c \
+ aha152x.c aha1542.c aha1740.c aic7xxx.c constants.c eata.c eata_dma.c \
+ eata_pio.c fdomain.c g_NCR5380.c hosts.c in2000.c pas16.c qlogic.c \
+ scsi.c scsi_debug.c scsi_ioctl.c scsi_proc.c scsicam.c sd.c \
+ sd_ioctl.c seagate.c sr.c sr_ioctl.c t128.c u14-34f.c \
+ ultrastor.c wd7000.c
+
+objfiles += $(subst .c,.o,$(linux-gen-files) $(linux-block-files) \
+ $(linux-net-files) $(linux-pci-files) $(linux-scsi-files))
+vpath %.c $(sysdep)/i386at/gpl/linux $(sysdep)/i386at/gpl/linux/block
+vpath %.c $(sysdep)/i386at/gpl/linux/net $(sysdep)/i386at/gpl/linux/pci
+vpath %.c $(sysdep)/i386at/gpl/linux/scsi
+
+# Because of the use of `extern inline' in some Linux header files without
+# corresponding text-segment definitions, we must always optimize.
+CFLAGS += -O2
+
+### End Linux device drivers grot
+
+
+# XXX Temporary
+i386at_ds_routines.o: device_interface.h
+
+
+
+# Where to find some things
+vpath i386asm.sym $(sysdep)/i386
+vpath mach_i386.srv $(sysdep)/i386
+
+# Make sure boothdr.o is at the very front
+objfiles := boothdr.o $(filter-out boothdr.o,$(objfiles))
+
+# Our include files are here; make sure they PRECEDE the generic ones.
+INCLUDES := -I$(sysdep)/i386at -I$(sysdep)/i386 \
+ -I$(sysdep)/include -I$(sysdep)/include/mach/sa \
+ -I$(sysdep)/bogus -I$(sysdep) \
+ -I$(sysdep)/i386at/gpl/linux -I$(sysdep)/i386at/gpl/linux/include \
+ $(INCLUDES)
+
+
+# arrange suitable load address
+LDFLAGS += -Ttext 100000
+
+# Assemble .S files correctly
+ASFLAGS += -DASSEMBLER
+
+boothdr.o: i386asm.h
+
+# Cheat, cheat, cheat.
+fpe.o: fpe.b_elf
+ uudecode $<
+vpath fpe.b_elf $(sysdep)/i386
diff --git a/i386/bogus/aha.h b/i386/bogus/aha.h
new file mode 100644
index 00000000..370fd1a1
--- /dev/null
+++ b/i386/bogus/aha.h
@@ -0,0 +1 @@
+#define NAHA 1
diff --git a/i386/bogus/asc.h b/i386/bogus/asc.h
new file mode 100644
index 00000000..fcfb5adb
--- /dev/null
+++ b/i386/bogus/asc.h
@@ -0,0 +1 @@
+#define NASC 0
diff --git a/i386/bogus/at3c501.h b/i386/bogus/at3c501.h
new file mode 100644
index 00000000..0e4e4406
--- /dev/null
+++ b/i386/bogus/at3c501.h
@@ -0,0 +1 @@
+#define NAT3C501 1
diff --git a/i386/bogus/blit.h b/i386/bogus/blit.h
new file mode 100644
index 00000000..1471edfe
--- /dev/null
+++ b/i386/bogus/blit.h
@@ -0,0 +1 @@
+#define NBLIT 0
diff --git a/i386/bogus/com.h b/i386/bogus/com.h
new file mode 100644
index 00000000..11832d40
--- /dev/null
+++ b/i386/bogus/com.h
@@ -0,0 +1 @@
+#define NCOM 4
diff --git a/i386/bogus/de6c.h b/i386/bogus/de6c.h
new file mode 100644
index 00000000..5ff16b10
--- /dev/null
+++ b/i386/bogus/de6c.h
@@ -0,0 +1 @@
+#define NDE6C 1
diff --git a/i386/bogus/eaha.h b/i386/bogus/eaha.h
new file mode 100644
index 00000000..c79f7ade
--- /dev/null
+++ b/i386/bogus/eaha.h
@@ -0,0 +1 @@
+#define NEAHA 1
diff --git a/i386/bogus/evc.h b/i386/bogus/evc.h
new file mode 100644
index 00000000..6bb31d14
--- /dev/null
+++ b/i386/bogus/evc.h
@@ -0,0 +1 @@
+#define NEVC 0
diff --git a/i386/bogus/fd.h b/i386/bogus/fd.h
new file mode 100644
index 00000000..e2632b31
--- /dev/null
+++ b/i386/bogus/fd.h
@@ -0,0 +1 @@
+#define NFD 4/*2*/
diff --git a/i386/bogus/fpe.h b/i386/bogus/fpe.h
new file mode 100644
index 00000000..48cc80f0
--- /dev/null
+++ b/i386/bogus/fpe.h
@@ -0,0 +1 @@
+#define FPE 1
diff --git a/i386/bogus/hd.h b/i386/bogus/hd.h
new file mode 100644
index 00000000..fadba180
--- /dev/null
+++ b/i386/bogus/hd.h
@@ -0,0 +1 @@
+#define NHD 4
diff --git a/i386/bogus/hpp.h b/i386/bogus/hpp.h
new file mode 100644
index 00000000..b47b397f
--- /dev/null
+++ b/i386/bogus/hpp.h
@@ -0,0 +1 @@
+#define NHPP 1
diff --git a/i386/bogus/lpr.h b/i386/bogus/lpr.h
new file mode 100644
index 00000000..02a8ebfb
--- /dev/null
+++ b/i386/bogus/lpr.h
@@ -0,0 +1 @@
+#define NLPR 1
diff --git a/i386/bogus/mach_machine_routines.h b/i386/bogus/mach_machine_routines.h
new file mode 100644
index 00000000..bf7bab51
--- /dev/null
+++ b/i386/bogus/mach_machine_routines.h
@@ -0,0 +1 @@
+#define MACH_MACHINE_ROUTINES 1
diff --git a/i386/bogus/ne.h b/i386/bogus/ne.h
new file mode 100644
index 00000000..607585ba
--- /dev/null
+++ b/i386/bogus/ne.h
@@ -0,0 +1 @@
+#define NNE 2
diff --git a/i386/bogus/ns8390.h b/i386/bogus/ns8390.h
new file mode 100644
index 00000000..34d585ae
--- /dev/null
+++ b/i386/bogus/ns8390.h
@@ -0,0 +1 @@
+#define NNS8390 1
diff --git a/i386/bogus/nscsi.h b/i386/bogus/nscsi.h
new file mode 100644
index 00000000..45307ba0
--- /dev/null
+++ b/i386/bogus/nscsi.h
@@ -0,0 +1 @@
+#define NSCSI 4
diff --git a/i386/bogus/par.h b/i386/bogus/par.h
new file mode 100644
index 00000000..e445001c
--- /dev/null
+++ b/i386/bogus/par.h
@@ -0,0 +1 @@
+#define NPAR 0
diff --git a/i386/bogus/pc586.h b/i386/bogus/pc586.h
new file mode 100644
index 00000000..76411f47
--- /dev/null
+++ b/i386/bogus/pc586.h
@@ -0,0 +1 @@
+#define NPC586 1
diff --git a/i386/bogus/platforms.h b/i386/bogus/platforms.h
new file mode 100644
index 00000000..c4d0a24d
--- /dev/null
+++ b/i386/bogus/platforms.h
@@ -0,0 +1 @@
+#define AT386 1
diff --git a/i386/bogus/rc.h b/i386/bogus/rc.h
new file mode 100644
index 00000000..7bbe5986
--- /dev/null
+++ b/i386/bogus/rc.h
@@ -0,0 +1,16 @@
+/*
+ * This controls whether or not we use a serial line for the console
+ * (ie, remote console).
+ */
+
+/*
+ * Values for RCLINE:
+ * -1 = disable
+ * 0 = port 0x3f8/irq 4 (DOS COM1)
+ * 1 = port 0x2f8/irq 3 (DOS COM2)
+ * 2 = port 0x3e8/irq 5 (DOS COM3)
+ * 3 = port 0x2e8/irq 9 (DOS COM4)
+ */
+
+#define RCLINE -1 /* com port for console */
+#define RCADDR 0x3f8 /* where it is */
diff --git a/i386/bogus/sbic.h b/i386/bogus/sbic.h
new file mode 100644
index 00000000..a0f322c0
--- /dev/null
+++ b/i386/bogus/sbic.h
@@ -0,0 +1 @@
+#define NSBIC 0
diff --git a/i386/bogus/sci.h b/i386/bogus/sci.h
new file mode 100644
index 00000000..3eb25428
--- /dev/null
+++ b/i386/bogus/sci.h
@@ -0,0 +1 @@
+#define NSCI 0
diff --git a/i386/bogus/sii.h b/i386/bogus/sii.h
new file mode 100644
index 00000000..3a914f55
--- /dev/null
+++ b/i386/bogus/sii.h
@@ -0,0 +1 @@
+#define NSII 0
diff --git a/i386/bogus/siop.h b/i386/bogus/siop.h
new file mode 100644
index 00000000..bffff9d4
--- /dev/null
+++ b/i386/bogus/siop.h
@@ -0,0 +1 @@
+#define NSIOP 0
diff --git a/i386/bogus/ul.h b/i386/bogus/ul.h
new file mode 100644
index 00000000..94f74ab9
--- /dev/null
+++ b/i386/bogus/ul.h
@@ -0,0 +1 @@
+#define NUL 0
diff --git a/i386/bogus/wd.h b/i386/bogus/wd.h
new file mode 100644
index 00000000..6c9b9559
--- /dev/null
+++ b/i386/bogus/wd.h
@@ -0,0 +1 @@
+#define NWD 0
diff --git a/i386/bogus/wt.h b/i386/bogus/wt.h
new file mode 100644
index 00000000..79b5e9c4
--- /dev/null
+++ b/i386/bogus/wt.h
@@ -0,0 +1 @@
+#define NWT 0
diff --git a/i386/dos/dos_buf.c b/i386/dos/dos_buf.c
new file mode 100644
index 00000000..5c7fcd82
--- /dev/null
+++ b/i386/dos/dos_buf.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include "dos_io.h"
+
+#ifndef DOS_BUF_DYNAMIC
+char dos_buf[DOS_BUF_SIZE];
+#endif
+
diff --git a/i386/dos/dos_check_err.c b/i386/dos/dos_check_err.c
new file mode 100644
index 00000000..07aa6511
--- /dev/null
+++ b/i386/dos/dos_check_err.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/error.h>
+#include <mach/machine/eflags.h>
+
+#include "dos_io.h"
+
+int dos_check_err(struct real_call_data *rcd)
+{
+ if (rcd->flags & EFL_CF)
+ return err_dos + (rcd->eax & 0xffff);
+ else
+ return 0;
+}
+
diff --git a/i386/dos/dos_close.c b/i386/dos/dos_close.c
new file mode 100644
index 00000000..d3d0d00b
--- /dev/null
+++ b/i386/dos/dos_close.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <unistd.h>
+#include <errno.h>
+
+#include "dos_io.h"
+
+int dos_close(dos_fd_t fd)
+{
+ struct real_call_data real_call_data;
+
+ dos_init_rcd(&real_call_data);
+ real_call_data.eax = 0x3e00;
+ real_call_data.ebx = fd;
+ real_int(0x21, &real_call_data);
+ return dos_check_err(&real_call_data);
+}
+
diff --git a/i386/dos/dos_fstat.c b/i386/dos/dos_fstat.c
new file mode 100644
index 00000000..0c48016e
--- /dev/null
+++ b/i386/dos/dos_fstat.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/stat.h>
+
+#include "dos_io.h"
+
+int dos_fstat(dos_fd_t fd, struct stat *st)
+{
+ int err;
+ int actual = 0;
+ struct real_call_data real_call_data;
+ vm_offset_t old_pos, new_pos;
+
+ dos_init_rcd(&real_call_data);
+
+ bzero(st, sizeof(*st));
+ st->st_nlink = 1;
+ st->st_mode = S_IRWXU | S_IRWXG | S_IRWXO; /* XXX attributes */
+
+ /* Get device information,
+ which will tell us whether this is a character device
+ or a regular file. */
+ real_call_data.eax = 0x4400;
+ real_call_data.ebx = fd;
+ real_int(0x21, &real_call_data);
+ if (err = dos_check_err(&real_call_data))
+ return err;
+ if (real_call_data.edx & (1<<7))
+ st->st_mode |= S_IFCHR;
+ else
+ st->st_mode |= S_IFREG;
+
+ /* XXX get date/time with int 21 5700 */
+
+ /* Get file size by seeking to the end and back. */
+ if (!dos_seek(fd, 0, 1, &old_pos)
+ && !dos_seek(fd, 0, 2, &st->st_size))
+ {
+ if (err = dos_seek(fd, old_pos, 0, &new_pos))
+ return err;
+ if (new_pos != old_pos)
+ return EIO;/*XXX*/
+ }
+
+ /* Always assume 512-byte blocks for now... */
+ st->st_blocks = (st->st_size + 511) / 512;
+ st->st_blksize = 512;
+
+ return 0;
+}
+
diff --git a/i386/dos/dos_gettimeofday.c b/i386/dos/dos_gettimeofday.c
new file mode 100644
index 00000000..3b61bab7
--- /dev/null
+++ b/i386/dos/dos_gettimeofday.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <unistd.h>
+#include <errno.h>
+#include <sys/time.h>
+
+#include "dos_io.h"
+#include "debug.h"
+
+int dos_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+ static int daysofmonth[12] = {31, 28, 31, 30, 31, 30, 31,
+ 31, 30, 31, 30, 31};
+
+ struct real_call_data real_call_data;
+ int err;
+
+ dos_init_rcd(&real_call_data);
+
+ if (tv)
+ {
+ int year, month, day, hour, min, sec, hund;
+
+ real_call_data.eax = 0x2a00;
+ real_int(0x21, &real_call_data);
+ year = real_call_data.ecx & 0xffff;
+ month = (real_call_data.edx >> 8) & 0xff;
+ day = real_call_data.edx & 0xff;
+ real_call_data.eax = 0x2c00;
+ real_int(0x21, &real_call_data);
+ if (err = dos_check_err(&real_call_data))
+ return err;
+
+ hour = (real_call_data.ecx >> 8) & 0xff;
+ min = real_call_data.ecx & 0xff;
+ sec = (real_call_data.edx >> 8) & 0xff;
+ hund = real_call_data.edx & 0xff;
+
+ tv->tv_sec = (year - 1970) * (365 * 24 * 60 * 60);
+ tv->tv_sec += (year - 1970) / 4 * (24 * 60 * 60); /* XXX??? */
+ tv->tv_sec += daysofmonth[month-1] * (24 * 60 * 60);
+ if ((((year - 1970) % 4) == 0) && (month > 2)) /* XXX??? */
+ tv->tv_sec += 24 * 60 * 60;
+ tv->tv_sec += day * 24 * 60 * 60;
+ tv->tv_sec += hour * 60 * 60;
+ tv->tv_sec += min * 60;
+ tv->tv_sec += sec;
+ tv->tv_usec = hund * (1000000 / 100);
+ }
+ if (tz)
+ return EINVAL; /*XXX*/
+
+ assert(tz == 0);
+ return 0;
+}
+
diff --git a/i386/dos/dos_io.h b/i386/dos/dos_io.h
new file mode 100644
index 00000000..990d08cd
--- /dev/null
+++ b/i386/dos/dos_io.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _i386_kernel_dos_dos_io_h_
+#define _i386_kernel_dos_dos_io_h_
+
+#include <mach/machine/vm_types.h>
+
+#include "real.h"
+#include "debug.h"
+
+struct stat;
+struct timeval;
+struct timezone;
+struct termios;
+
+typedef int dos_fd_t;
+
+/* Maximum number of bytes we can read or write with one DOS call
+ to or from memory not in the low 1MB accessible to DOS.
+ Must be less than 64KB.
+ Try to keep this size on a sector (512-byte) boundary for performance. */
+#ifndef DOS_BUF_SIZE
+#define DOS_BUF_SIZE 0x1000
+#endif
+
+/* If DOS_BUF_DYNAMIC is set, then dos_buf is a pointer
+ which must be provided and initialized by calling code.
+ Otherwise, the dos_buf is a statically-allocated bss array. */
+#ifdef DOS_BUF_DYNAMIC
+extern char *dos_buf;
+#else
+extern char dos_buf[DOS_BUF_SIZE];
+#endif
+
+int dos_check_err(struct real_call_data *rcd);
+
+int dos_open(const char *s, int flags, int create_mode, dos_fd_t *out_fh);
+int dos_close(dos_fd_t fd);
+int dos_read(dos_fd_t fd, void *buf, vm_size_t size, vm_size_t *out_actual);
+int dos_write(dos_fd_t fd, const void *buf, vm_size_t size, vm_size_t *out_actual);
+int dos_seek(dos_fd_t fd, vm_offset_t offset, int whence, vm_offset_t *out_newpos);
+int dos_fstat(dos_fd_t fd, struct stat *st);
+int dos_tcgetattr(dos_fd_t fd, struct termios *t);
+int dos_rename(const char *oldpath, const char *newpath);
+int dos_unlink(const char *filename);
+
+int dos_gettimeofday(struct timeval *tv, struct timezone *tz);
+
+#define dos_init_rcd(rcd) real_call_data_init(rcd)
+
+#define real_set_ds_dx(ptr) \
+ ({ unsigned ofs = (unsigned)(ptr); \
+ assert(ofs < 0x10000); \
+ real_call_data.ds = real_cs; \
+ real_call_data.edx = ofs; \
+ })
+#define real_set_es_di(ptr) \
+ ({ unsigned ofs = (unsigned)(ptr); \
+ assert(ofs < 0x10000); \
+ real_call_data.es = real_cs; \
+ real_call_data.edi = ofs; \
+ })
+
+
+#endif _i386_kernel_dos_dos_io_h_
diff --git a/i386/dos/dos_open.c b/i386/dos/dos_open.c
new file mode 100644
index 00000000..7e6ef2bf
--- /dev/null
+++ b/i386/dos/dos_open.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <errno.h>
+
+#include "dos_io.h"
+#include "vm_param.h"
+#include "debug.h"
+
+int dos_open(const char *s, int flags, int mode, dos_fd_t *out_fh)
+{
+ struct real_call_data real_call_data;
+ int err = EINVAL; /*XXX*/
+ vm_offset_t dos_buf_phys = (vm_offset_t)kvtophys(dos_buf);
+
+ assert(dos_buf); assert(dos_buf_phys);
+ assert(dos_buf_phys < 0x100000);
+
+ dos_init_rcd(&real_call_data);
+
+ if (strlen(s)+1 > DOS_BUF_SIZE)
+ return E2BIG;
+ strcpy(dos_buf, s);
+ real_call_data.ds = dos_buf_phys >> 4;
+ real_call_data.edx = dos_buf_phys & 15;
+
+ /* Possible situations:
+
+ - 3d
+ C 3d || 3c
+ T 3d (ensure that it exists), close, 3c
+ CT 3c
+ X 3d
+ C X 3d (ensure that it doesn't exist), 3c
+ TX 3d (ensure that it exists), close, 3c
+ CTX 3d (ensure that it doesn't exist), 3c
+ */
+
+ if ((flags & (O_CREAT | O_EXCL | O_TRUNC)) != (O_CREAT | O_TRUNC))
+ {
+ /* First try opening the file with function 0x3D. */
+ real_call_data.eax = 0x3d40 | (flags & O_ACCMODE);
+ real_call_data.ecx = 0;
+ real_int(0x21, &real_call_data);
+ err = dos_check_err(&real_call_data);
+ if (!err)
+ *out_fh = real_call_data.eax & 0xffff;
+ }
+
+ /* Now based on the result, determine what to do next. */
+ switch (flags & (O_CREAT | O_EXCL | O_TRUNC))
+ {
+ case 0:
+ case 0 | O_EXCL:
+ if (!err)
+ goto success;
+ else
+ return err;
+ case O_CREAT:
+ if (!err)
+ goto success;
+ else
+ break;
+ case O_CREAT | O_EXCL:
+ case O_CREAT | O_TRUNC | O_EXCL:
+ if (!err)
+ {
+ /* The file exists, but wasn't supposed to.
+ Close it and return an error. */
+ dos_close(real_call_data.eax & 0xffff);
+ return EEXIST;
+ }
+ else
+ break;
+ case O_TRUNC:
+ case O_TRUNC | O_EXCL:
+ if (!err)
+ {
+ /* We've verified that the file exists -
+ now close it and reopen it with CREAT
+ so it'll be truncated as requested. */
+ dos_close(real_call_data.eax & 0xffff);
+ break;
+ }
+ else
+ return err;
+ case O_CREAT | O_TRUNC:
+ /* This is the one case in which
+ we didn't try to open the file above at all.
+ Just fall on through and open it with CREAT. */
+ break;
+ }
+
+ /* Now try opening the file with DOS's CREAT call,
+ which truncates the file if it already exists. */
+ real_call_data.eax = 0x3c00;
+ real_call_data.ecx = mode & S_IWUSR ? 0 : 1;
+ real_int(0x21, &real_call_data);
+ if (!(err = dos_check_err(&real_call_data)))
+ {
+ *out_fh = real_call_data.eax & 0xffff;
+
+ /* We don't have to worry about O_APPEND here,
+ because we know the file starts out empty. */
+
+ return 0;
+ }
+
+ return err;
+
+ success:
+
+ /* If the caller requested append access,
+ just seek to the end of the file once on open.
+ We can't implement full POSIX behavior here without help,
+ since DOS file descriptors don't have an append mode.
+ To get full POSIX behavior,
+ the caller must seek to the end of the file before every write.
+ However, seeking to the end only on open
+ is probably enough for most typical uses. */
+ if (flags & O_APPEND)
+ {
+ vm_offset_t newpos;
+ err = dos_seek(*out_fh, 0, SEEK_END, &newpos);
+ if (err)
+ {
+ dos_close(*out_fh);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
diff --git a/i386/dos/dos_read.c b/i386/dos/dos_read.c
new file mode 100644
index 00000000..ae7213fb
--- /dev/null
+++ b/i386/dos/dos_read.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <unistd.h>
+#include <errno.h>
+
+#include "dos_io.h"
+#include "vm_param.h"
+
+int dos_read(dos_fd_t fd, void *buf, vm_size_t size, vm_size_t *out_actual)
+{
+ int err;
+ int actual = 0;
+ struct real_call_data real_call_data;
+ vm_offset_t dos_buf_phys = (vm_offset_t)kvtophys(dos_buf);
+
+ assert(dos_buf); assert(dos_buf_phys);
+ assert(dos_buf_phys < 0x100000);
+
+ dos_init_rcd(&real_call_data);
+
+ while (size > 0)
+ {
+ int little_size = size;
+ int little_actual;
+
+ if (little_size > DOS_BUF_SIZE)
+ little_size = DOS_BUF_SIZE;
+
+ real_call_data.eax = 0x3f00;
+ real_call_data.ebx = fd;
+ real_call_data.ecx = little_size;
+ real_call_data.ds = dos_buf_phys >> 4;
+ real_call_data.edx = dos_buf_phys & 15;
+ real_int(0x21, &real_call_data);
+ if (err = dos_check_err(&real_call_data))
+ return err;
+ little_actual = real_call_data.eax & 0xffff;
+ assert(little_actual <= little_size);
+
+ /* XXX don't copy if buf is <1MB */
+ memcpy(buf, dos_buf, little_actual);
+
+ buf += little_actual;
+ size -= little_actual;
+ actual += little_actual;
+
+ if (little_actual < little_size)
+ break;
+ }
+
+ *out_actual = actual;
+ return 0;
+}
+
diff --git a/i386/dos/dos_rename.c b/i386/dos/dos_rename.c
new file mode 100644
index 00000000..e780ba43
--- /dev/null
+++ b/i386/dos/dos_rename.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <unistd.h>
+#include <errno.h>
+
+#include "dos_io.h"
+#include "vm_param.h"
+
+int dos_rename(const char *oldpath, const char *newpath)
+{
+ /* This function isn't fully atomic like on Unix,
+ but it's as close as I know how to do under DOS. */
+
+ struct real_call_data real_call_data;
+ vm_offset_t dos_buf_phys = (vm_offset_t)kvtophys(dos_buf);
+ int err;
+
+ dos_init_rcd(&real_call_data);
+
+ /* Put the oldpath in the first half of dos_buf,
+ and the newpath in the second half. */
+ if ((strlen(oldpath)+1 > DOS_BUF_SIZE/2)
+ || (strlen(newpath)+1 > DOS_BUF_SIZE/2))
+ { errno = E2BIG; return -1; }
+ strcpy(dos_buf, oldpath);
+ strcpy(dos_buf+DOS_BUF_SIZE/2, newpath);
+
+ /* Try once to rename the file. */
+ real_call_data.eax = 0x5600;
+ real_call_data.ds = dos_buf_phys >> 4;
+ real_call_data.edx = dos_buf_phys & 15;
+ real_call_data.es = dos_buf_phys >> 4;
+ real_call_data.edi = (dos_buf_phys & 15) + DOS_BUF_SIZE/2;
+ real_int(0x21, &real_call_data);
+
+ /* If that failed, delete newpath and then retry the rename.
+ We _hope_ the failure was because newpath already existed;
+ the DOS error codes I'm getting back seem to be bogus. */
+ if (err = dos_check_err(&real_call_data))
+ {
+ real_call_data.eax = 0x4100;
+ real_call_data.ds = dos_buf_phys >> 4;
+ real_call_data.edx = (dos_buf_phys & 15) + DOS_BUF_SIZE/2;
+ real_int(0x21, &real_call_data);
+ if (err = dos_check_err(&real_call_data))
+ return err;
+
+ real_call_data.eax = 0x5600;
+ real_call_data.ds = dos_buf_phys >> 4;
+ real_call_data.edx = dos_buf_phys & 15;
+ real_call_data.es = dos_buf_phys >> 4;
+ real_call_data.edi = (dos_buf_phys & 15) + DOS_BUF_SIZE/2;
+ real_int(0x21, &real_call_data);
+ err = dos_check_err(&real_call_data);
+ }
+
+ return err;
+}
+
diff --git a/i386/dos/dos_seek.c b/i386/dos/dos_seek.c
new file mode 100644
index 00000000..ba61d878
--- /dev/null
+++ b/i386/dos/dos_seek.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <unistd.h>
+#include <errno.h>
+
+#include "dos_io.h"
+
+int dos_seek(dos_fd_t fd, vm_offset_t offset, int whence, vm_offset_t *out_newpos)
+{
+ struct real_call_data real_call_data;
+ int err;
+
+ dos_init_rcd(&real_call_data);
+ real_call_data.eax = 0x4200 | whence;
+ real_call_data.ebx = fd;
+ real_call_data.ecx = (unsigned)offset >> 16;
+ real_call_data.edx = (unsigned)offset & 0xffff;
+ real_int(0x21, &real_call_data);
+ if (err = dos_check_err(&real_call_data))
+ return err;
+ *out_newpos = (real_call_data.edx << 16) | (real_call_data.eax & 0xffff);
+ return 0;
+}
+
diff --git a/i386/dos/dos_tcgetattr.c b/i386/dos/dos_tcgetattr.c
new file mode 100644
index 00000000..8f0d4931
--- /dev/null
+++ b/i386/dos/dos_tcgetattr.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/termios.h>
+
+#include "dos_io.h"
+
+int dos_tcgetattr(dos_fd_t fd, struct termios *t)
+{
+ int err;
+ struct real_call_data real_call_data;
+
+ dos_init_rcd(&real_call_data);
+
+ bzero(t, sizeof(*t));
+
+ /* First make sure this is actually a character device. */
+ real_call_data.eax = 0x4400;
+ real_call_data.ebx = fd;
+ real_int(0x21, &real_call_data);
+ if (err = dos_check_err(&real_call_data))
+ return err;
+ if (!(real_call_data.edx & (1<<7)))
+ return ENOTTY;
+
+ return 0;
+}
+
diff --git a/i386/dos/dos_unlink.c b/i386/dos/dos_unlink.c
new file mode 100644
index 00000000..cb169bbd
--- /dev/null
+++ b/i386/dos/dos_unlink.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <unistd.h>
+#include <errno.h>
+
+#include "dos_io.h"
+#include "vm_param.h"
+
+int dos_unlink(const char *filename)
+{
+ struct real_call_data real_call_data;
+ vm_offset_t dos_buf_phys = (vm_offset_t)kvtophys(dos_buf);
+
+ dos_init_rcd(&real_call_data);
+
+ if (strlen(filename)+1 > DOS_BUF_SIZE)
+ return E2BIG;
+ strcpy(dos_buf, filename);
+
+ real_call_data.eax = 0x4100;
+ real_call_data.ds = dos_buf_phys >> 4;
+ real_call_data.edx = dos_buf_phys & 15;
+ real_int(0x21, &real_call_data);
+
+ return dos_check_err(&real_call_data);
+}
+
diff --git a/i386/dos/dos_write.c b/i386/dos/dos_write.c
new file mode 100644
index 00000000..5dab8f90
--- /dev/null
+++ b/i386/dos/dos_write.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <unistd.h>
+#include <errno.h>
+
+#include "dos_io.h"
+#include "vm_param.h"
+
+int dos_write(dos_fd_t fd, const void *buf, vm_size_t size, vm_size_t *out_actual)
+{
+ int err;
+ int actual = 0;
+ struct real_call_data real_call_data;
+ vm_offset_t dos_buf_phys = (vm_offset_t)kvtophys(dos_buf);
+
+ assert(dos_buf); assert(dos_buf_phys);
+ assert(dos_buf_phys < 0x100000);
+
+ dos_init_rcd(&real_call_data);
+
+ while (size > 0)
+ {
+ int little_size = size;
+ int little_actual;
+
+ if (little_size > DOS_BUF_SIZE)
+ little_size = DOS_BUF_SIZE;
+
+ /* XXX don't copy if buf is <1MB */
+ memcpy(dos_buf, buf, little_size);
+
+ real_call_data.eax = 0x4000;
+ real_call_data.ebx = fd;
+ real_call_data.ecx = little_size;
+ real_call_data.ds = dos_buf_phys >> 4;
+ real_call_data.edx = dos_buf_phys & 15;
+ real_int(0x21, &real_call_data);
+ if (err = dos_check_err(&real_call_data))
+ return err;
+ little_actual = real_call_data.eax & 0xffff;
+ assert(little_actual <= little_size);
+
+ buf += little_actual;
+ size -= little_actual;
+ actual += little_actual;
+
+ if (little_actual < little_size)
+ break;
+ }
+
+ *out_actual = actual;
+ return 0;
+}
+
diff --git a/i386/dos/i16/gdt.h b/i386/dos/i16/gdt.h
new file mode 100644
index 00000000..3dad28b4
--- /dev/null
+++ b/i386/dos/i16/gdt.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_DOS_GDT_
+#define _I386_DOS_GDT_
+
+#define cpu_gdt_init_VCPI_CS(cpu)
+#define cpu_gdt_init_VCPI_2(cpu)
+#define cpu_gdt_init_VCPI_3(cpu)
+
+#include_next "gdt.h"
+
+#endif _I386_DOS_GDT_
diff --git a/i386/dos/i16/gdt_sels.h b/i386/dos/i16/gdt_sels.h
new file mode 100644
index 00000000..25e7d966
--- /dev/null
+++ b/i386/dos/i16/gdt_sels.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include "config.h"
+
+#include_next "gdt_sels.h"
+
+#ifdef ENABLE_VCPI
+
+/* Segment descriptors for use by a VCPI server. */
+gdt_sel(VCPI_CS)
+gdt_sel(VCPI_2)
+gdt_sel(VCPI_3)
+
+#endif ENABLE_VCPI
+
diff --git a/i386/dos/i16/i16_crt0.S b/i386/dos/i16/i16_crt0.S
new file mode 100644
index 00000000..f21c6a6b
--- /dev/null
+++ b/i386/dos/i16/i16_crt0.S
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/asm.h>
+
+#include "config.h"
+#include "i16_crt0.h"
+
+ .code16
+ .text
+
+ENTRY(i16_entry)
+ /* DOS starts us up with our stack pointer pointing
+ to the very top of our BSS segment.
+ ds and es point to the PSP. */
+
+#define DELAY jmp 1f; 1: jmp 1f; 1: jmp 1f; 1:
+
+ /* Check to make sure we're running on a 386 or higher -
+ _without_ using any 32-bit instructions of course.
+ Tricky, since gas doesn't support 16-bit addressing modes. :-)
+ We can't produce any 16-bit relocations either,
+ because ELF doesn't support them.
+ This code is basically straight out of the Pentium manual,
+ except gassed of coursed. */
+ pushfw
+ DELAY
+ popw %bx
+ movw $0xfff,%ax
+ andw %bx,%ax
+ pushw %ax
+ DELAY
+ popfw
+ DELAY
+ pushfw
+ DELAY
+ popw %ax
+ and $0xf000,%ax
+ cmpw $0xf000,%ax
+
+ je 1f
+ orw $0xf000,%bx
+ pushw %bx
+ DELAY
+ popfw
+ DELAY
+ pushfw
+ DELAY
+ popw %ax
+ andw $0xf000,%ax
+ jnz 4f
+1:
+ /* Gak! We're running on an 8086/8088/80286! */
+ callw 5f
+ .ascii "This program requires a 386 or better.\r\n\0"
+5: popw %si
+ movw %cs,%ax
+ movw %ax,%ds
+ cld
+2: lodsb
+ orb %al,%al
+ jz 3f
+ movb $0x02,%ah
+ movb %al,%dl
+ int $0x21
+ jmp 2b
+3: movw $0x4c02,%ax
+ int $0x21
+4:
+ /* Now we can use 32-bit instructions all we want. */
+
+ /* Save the PSP segment address (dx). */
+ movw %ds,%dx
+
+ /* Find our real-mode code segment (ax). */
+ movw %cs,%ax
+
+#ifdef ENABLE_PAGE_ALIGNED_KERNEL
+ /* Move our code and data so that everything is on a page boundary.
+ Theoretically we _could_ go past the end of available memory,
+ since we're not checking, but it's enormously unlikely. */
+ std
+ movw %ax,%ds
+ addw $0xff,%ax
+ andw $0xff00,%ax
+ movw %ax,%es
+ movl $EXT(edata),%ecx
+ subl $EXT(i16_entry_2),%ecx
+ movl $EXT(edata)-1,%esi
+ movw %si,%di
+ rep
+ movsb
+
+ /* Start running at the new address. */
+ pushl $EXT(i16_entry_2)
+ movw %ax,2(%esp)
+ lretw
+
+ENTRY(i16_entry_2)
+ /* We're now page aligned. */
+#endif ENABLE_PAGE_ALIGNED_KERNEL
+
+ /* Load the data segment registers appropriately. */
+ movw %ax,%es
+ movw %ax,%ss
+
+ /* Start using a real stack. */
+ movl $EXT(crt0_stack)+CRT0_STACK_SIZE,%esp
+
+ /* Clear our BSS segment. */
+ movl $EXT(edata),%edi
+ movl $EXT(end),%ecx
+ subw %di,%cx
+ xorb %al,%al
+ cld
+ rep
+ stosb
+
+ /* Find the size of the environment array (si)
+ and the number of environment variables plus one (bx).
+ The PSP segment is still in dx. */
+ movw %dx,%ds
+ movw 0x2c,%ds
+ xorw %si,%si
+1: lodsb
+ orb %al,%al
+ jnz 1b
+ lodsb
+ orb %al,%al
+ jnz 1b
+
+ /* Allocate space for the environment array on the stack.
+ Also make sure the top 16 bits of ESP are cleared,
+ and that the stack pointer is longword aligned. */
+ subw %si,%sp
+ andl $0x0000fffc,%esp
+
+ /* Copy the environment array to the local stack.
+ We present it backwards, but big deal - shouldn't matter. */
+ xorl %edi,%edi
+ movw %sp,%di
+ xorl %esi,%esi
+ pushl %esi
+ jmp 3f
+2: pushl %edi
+ stosb
+1: lodsb
+ stosb
+ orb %al,%al
+ jnz 1b
+3: lodsb
+ orb %al,%al
+ jnz 2b
+ movl %esp,%cs:EXT(environ)
+
+ /* Copy the program name to the local stack;
+ it will be used as argv[0]. */
+ lodsw
+ movw %si,%bx
+1: pushw $0
+ lodsb
+ orb %al,%al
+ jz 2f
+ lodsb
+ orb %al,%al
+ jnz 1b
+2: movw %bx,%si
+ movw %sp,%di
+3: lodsb
+ stosb
+ orb %al,%al
+ jnz 3b
+ movl %esp,%ebp
+
+ /* Build argv[1..n] from the command tail in the PSP.
+ Count the arguments in ebx. */
+ movw %dx,%ds
+ xorl %ecx,%ecx
+ xorl %ebx,%ebx
+ movb 0x80,%cl /* get size of command tail */
+ incw %cx /* plus the return character */
+ movw $0x80,%si
+ addw %cx,%si /* si = ptr to return character */
+ movw %sp,%di
+ decw %di
+ subw %cx,%sp /* allocate space on the stack */
+ andw $0xfffc,%sp
+ pushl %ebx
+ std
+1: xorb %al,%al /* store a null terminator for this arg */
+ stosb
+ incl %ebx
+2: cmpw $0x80,%si
+ je 5f
+ lodsb /* scan backwards for the end of an arg */
+ cmpb $0x20,%al
+ jbe 2b
+3: stosb /* copy the arg */
+ cmpw $0x80,%si
+ je 4f
+ lodsb
+ cmpb $0x20,%al
+ ja 3b
+4: movw %di,%cx /* push an arg pointer */
+ incw %cx
+ pushl %ecx
+ jmp 1b
+5:
+
+ /* Push the argv[0] pointer. */
+ pushl %ebp
+
+ /* Push the argument and envirnonment parameters on the stack. */
+ movl %esp,%eax
+ pushl %cs:EXT(environ)
+ pushl %eax
+ pushl %ebx
+
+ /* Release all conventional memory above the top of our BSS.
+ The PSP segment is still in dx. */
+ movl $EXT(end)+15,%ebx
+ shrw $4,%bx
+ movw %cs,%ax
+ addw %ax,%bx
+ subw %dx,%bx
+ movw %dx,%es
+ movb $0x4a,%ah
+ int $0x21
+
+ /* Load the normal data segment registers. */
+ movw %cs,%ax
+ movw %ax,%ds
+ movw %ax,%es
+
+ /* GCC wants the direction flag cleared at all times. */
+ cld
+
+ /* Initialize the bss and run the program. */
+ call EXT(i16_main)
+
+ .globl EXT(crt0_stack)
+ .comm EXT(crt0_stack),CRT0_STACK_SIZE
+
+ .globl EXT(environ)
+ .comm EXT(environ),4
+
+
+ .data
+
+ .section .anno,"aw",@progbits
+ P2ALIGN(4)
+ .globl __ANNO_START__
+__ANNO_START__:
+
diff --git a/i386/dos/i16/i16_crt0.h b/i386/dos/i16/i16_crt0.h
new file mode 100644
index 00000000..352d1cbb
--- /dev/null
+++ b/i386/dos/i16/i16_crt0.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_DOS_I16_CRT0_H_
+#define _I386_DOS_I16_CRT0_H_
+
+#define CRT0_STACK_SIZE 4096
+
+#ifndef ASSEMBLER
+extern char crt0_stack[CRT0_STACK_SIZE];
+#endif
+
+#endif _I386_DOS_I16_CRT0_H_
diff --git a/i386/dos/i16/i16_dos.h b/i386/dos/i16/i16_dos.h
new file mode 100644
index 00000000..27b0ca46
--- /dev/null
+++ b/i386/dos/i16/i16_dos.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I16DOS_H_
+#define _I16DOS_H_
+
+#include <mach/inline.h>
+#include <mach/machine/far_ptr.h>
+
+
+/* Returns 16-bit DOS version number:
+ major version number in high byte, minor in low byte. */
+MACH_INLINE unsigned short i16_dos_version(void)
+{
+ unsigned short dos_version_swapped;
+ asm volatile("int $0x21" : "=a" (dos_version_swapped) : "a" (0x3000));
+ return (dos_version_swapped >> 8) | (dos_version_swapped << 8);
+}
+
+MACH_INLINE void i16_dos_putchar(int c)
+{
+ asm volatile("int $0x21" : : "a" (0x0200), "d" (c));
+}
+
+MACH_INLINE void i16_dos_exit(int rc)
+{
+ asm volatile("int $0x21" : : "a" (0x4c00 | (rc & 0xff)));
+}
+
+MACH_INLINE void i16_dos_get_int_vec(int vecnum, struct far_pointer_16 *out_vec)
+{
+ asm volatile("
+ pushw %%es
+ int $0x21
+ movw %%es,%0
+ popw %%es
+ " : "=r" (out_vec->seg), "=b" (out_vec->ofs)
+ : "a" (0x3500 | vecnum));
+}
+
+MACH_INLINE void i16_dos_set_int_vec(int vecnum, struct far_pointer_16 *new_vec)
+{
+ asm volatile("
+ pushw %%ds
+ movw %1,%%ds
+ int $0x21
+ popw %%ds
+ " :
+ : "a" (0x2500 | vecnum),
+ "r" (new_vec->seg), "d" (new_vec->ofs));
+}
+
+/* Open a DOS file and return the new file handle.
+ Returns -1 if an error occurs. */
+MACH_INLINE int i16_dos_open(const char *filename, int access)
+{
+ int fh;
+ asm volatile("
+ int $0x21
+ jnc 1f
+ movl $-1,%%eax
+ 1:
+ " : "=a" (fh) : "a" (0x3d00 | access), "d" (filename));
+ return fh;
+}
+
+MACH_INLINE void i16_dos_close(int fh)
+{
+ asm volatile("int $0x21" : : "a" (0x3e00), "b" (fh));
+}
+
+MACH_INLINE int i16_dos_get_device_info(int fh)
+{
+ int info_word;
+ asm volatile("
+ int $0x21
+ jnc 1f
+ movl $-1,%%edx
+ 1:
+ " : "=d" (info_word) : "a" (0x4400), "b" (fh), "d" (0));
+ return info_word;
+}
+
+MACH_INLINE int i16_dos_get_output_status(int fh)
+{
+ int status;
+ asm volatile("
+ int $0x21
+ movzbl %%al,%%eax
+ jnc 1f
+ movl $-1,%%eax
+ 1:
+ " : "=a" (status) : "a" (0x4407), "b" (fh));
+ return status;
+}
+
+MACH_INLINE int i16_dos_alloc(unsigned short *inout_paras)
+{
+ int seg;
+ asm volatile("
+ int $0x21
+ jnc 1f
+ movl $-1,%%eax
+ 1:
+ " : "=a" (seg), "=b" (*inout_paras)
+ : "a" (0x4800), "b" (*inout_paras));
+ return seg;
+}
+
+MACH_INLINE int i16_dos_free(unsigned short seg)
+{
+ asm volatile("
+ pushw %%es
+ movw %1,%%es
+ int $0x21
+ popw %%es
+ " : : "a" (0x4900), "r" (seg) : "eax");
+}
+
+MACH_INLINE unsigned short i16_dos_get_psp_seg(void)
+{
+ unsigned short psp_seg;
+ asm volatile("int $0x21" : "=b" (psp_seg) : "a" (0x6200));
+ return psp_seg;
+}
+
+#endif _I16DOS_H_
diff --git a/i386/dos/i16/i16_dos_mem.c b/i386/dos/i16/i16_dos_mem.c
new file mode 100644
index 00000000..e78398db
--- /dev/null
+++ b/i386/dos/i16/i16_dos_mem.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/code16.h>
+#include <mach/machine/vm_types.h>
+
+#include "i16_dos.h"
+#include "config.h"
+
+
+
+/* These aren't static because vcpi and dpmi code need to grab DOS memory
+ before we've switched to protected mode and memory has been collected. */
+vm_offset_t dos_mem_phys_free_mem;
+vm_size_t dos_mem_phys_free_size;
+
+
+CODE32
+
+void dos_mem_collect(void)
+{
+ if (dos_mem_phys_free_mem)
+ {
+ phys_mem_add(dos_mem_phys_free_mem, dos_mem_phys_free_size);
+ dos_mem_phys_free_mem = 0;
+ }
+}
+
+CODE16
+
+void i16_dos_mem_check()
+{
+ unsigned short paras = 0xf000;
+ int dos_mem_seg;
+
+ /* Allocate as big a chunk of memory as we can find. */
+ do
+ {
+ if (paras == 0)
+ return;
+ dos_mem_seg = i16_dos_alloc(&paras);
+ }
+ while (dos_mem_seg < 0);
+
+ dos_mem_phys_free_mem = dos_mem_seg << 4;
+ dos_mem_phys_free_size = paras << 4;
+
+#ifdef ENABLE_CODE_CHECK
+ i16_code_copy();
+#endif
+}
+
+
+#ifdef ENABLE_CODE_CHECK
+
+/* Big humongo kludge to help in finding random code-trashing bugs.
+ We copy the entire text segment upon initialization,
+ and then check it later as necessary. */
+
+#include <mach/machine/proc_reg.h>
+#include "vm_param.h"
+
+extern char etext[], i16_entry_2[];
+
+static int code_copy_seg;
+
+static int i16_code_copy()
+{
+ int text_size = (int)etext & ~0xf;
+ int i;
+
+ if (dos_mem_phys_free_size < text_size)
+ return;
+
+ code_copy_seg = dos_mem_phys_free_mem >> 4;
+ dos_mem_phys_free_mem += text_size;
+ dos_mem_phys_free_size -= text_size;
+
+ set_fs(code_copy_seg);
+ for (i = 0; i < text_size; i += 4)
+ asm volatile("
+ movl (%%ebx),%%eax
+ movl %%eax,%%fs:(%%ebx)
+ " : : "b" (i) : "eax");
+}
+
+void i16_code_check(int dummy)
+{
+ int text_size = (int)etext & ~0xf;
+ int i, old, new;
+ int found = 0;
+
+ if (!code_copy_seg)
+ return;
+
+ set_fs(code_copy_seg);
+ for (i = (int)i16_entry_2; i < text_size; i += 4)
+ {
+ asm volatile("
+ movl (%%ebx),%%eax
+ movl %%fs:(%%ebx),%%ecx
+ " : "=a" (new), "=c" (old) : "b" (i));
+ if (old != new)
+ {
+ found = 1;
+ i16_writehexw(i);
+ i16_putchar(' ');
+ i16_writehexl(old);
+ i16_putchar(' ');
+ i16_writehexl(new);
+ i16_putchar('\r');
+ i16_putchar('\n');
+ }
+ }
+ if (found)
+ {
+ code_copy_seg = 0;
+ i16_writehexl((&dummy)[-1]);
+ i16_die(" DOS extender code trashed!");
+ }
+}
+
+CODE32
+
+void code_check(int dummy)
+{
+ int text_size = (int)etext & ~0xf;
+ unsigned char *new = 0, *old = (void*)phystokv(code_copy_seg*16);
+ int found = 0;
+ int i;
+
+ if (!code_copy_seg)
+ return;
+
+ for (i = (int)i16_entry_2; (i < text_size) && (found < 10); i++)
+ {
+ /* In several places we have to self-modify an int instruction,
+ or the segment value in an absolute long jmp instruction,
+ so ignore any changes preceded by those opcodes. */
+ if ((new[i] != old[i])
+ && (old[i-1] != 0xcd)
+ && (old[i-6] != 0xea))
+ {
+ if (!found)
+ {
+ found = 1;
+ about_to_die(1);
+ }
+ printf("%08x addr %04x was %02x now %02x\n",
+ (&dummy)[-1], i, old[i], new[i]);
+ }
+ }
+ if (found)
+ {
+ code_copy_seg = 0;
+ die("%08x DOS extender code trashed!", (&dummy)[-1]);
+ }
+}
+
+CODE16
+
+#endif ENABLE_CODE_CHECK
diff --git a/i386/dos/i16/i16_exit.c b/i386/dos/i16/i16_exit.c
new file mode 100644
index 00000000..04b943cb
--- /dev/null
+++ b/i386/dos/i16/i16_exit.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/code16.h>
+
+#include "config.h"
+#include "i16_dos.h"
+
+
+CODE16
+
+void i16_exit(int rc)
+{
+ /* Clean up properly. */
+ i16_ext_mem_shutdown();
+ i16_xms_shutdown();
+#ifdef ENABLE_VCPI
+ i16_vcpi_shutdown();
+#endif
+#ifdef ENABLE_DPMI
+ i16_dpmi_shutdown();
+#endif
+#ifdef ENABLE_CODE_CHECK
+ i16_code_check();
+#endif
+
+ /* Call the DOS exit function. */
+ i16_dos_exit(rc);
+}
+
diff --git a/i386/dos/i16/i16_main.c b/i386/dos/i16/i16_main.c
new file mode 100644
index 00000000..5558d09a
--- /dev/null
+++ b/i386/dos/i16/i16_main.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/code16.h>
+
+#include "config.h"
+#include "i16_dos.h"
+
+
+CODE16
+
+int argc;
+char **argv;
+
+void i16_main(int _argc, char **_argv)
+{
+ argc = _argc;
+ argv = _argv;
+
+ i16_init();
+
+ /* Make sure we're running on a good enough DOS version. */
+ if (i16_dos_version() < 0x300)
+ i16_die("DOS 3.00 or higher required.");
+
+ /* See if we're running in a DPMI or VCPI environment.
+ If either of these are successful, they don't return. */
+ i16_dos_mem_check();
+#ifdef ENABLE_DPMI
+ i16_dpmi_check();
+#endif
+ i16_xms_check();
+ i16_ext_mem_check();
+#ifdef ENABLE_VCPI
+ i16_vcpi_check();
+#endif
+
+ i16_raw_start();
+}
+
diff --git a/i386/dos/i16/i16_putchar.c b/i386/dos/i16/i16_putchar.c
new file mode 100644
index 00000000..911cbe4a
--- /dev/null
+++ b/i386/dos/i16/i16_putchar.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/code16.h>
+
+#include "i16_dos.h"
+
+CODE16
+
+void i16_putchar(int ch)
+{
+ if (ch == '\n')
+ i16_dos_putchar('\r');
+ i16_dos_putchar(ch);
+}
+
diff --git a/i386/dos/i16/i16_vcpi.c b/i386/dos/i16/i16_vcpi.c
new file mode 100644
index 00000000..e021d6b2
--- /dev/null
+++ b/i386/dos/i16/i16_vcpi.c
@@ -0,0 +1,564 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/boolean.h>
+#include <mach/vm_param.h>
+#include <mach/machine/code16.h>
+#include <mach/machine/vm_types.h>
+#include <mach/machine/paging.h>
+#include <mach/machine/eflags.h>
+#include <mach/machine/proc_reg.h>
+#include <mach/machine/far_ptr.h>
+#include <mach/machine/vcpi.h>
+#include <mach/machine/asm.h>
+
+#include "config.h"
+#include "i16.h"
+#include "i16_dos.h"
+#include "cpu.h"
+#include "real.h"
+#include "debug.h"
+#include "vm_param.h"
+
+#ifdef ENABLE_VCPI
+
+static boolean_t ems_page_allocated;
+static unsigned short ems_handle;
+
+static vm_offset_t vcpi_pdir, vcpi_ptable0;
+
+struct far_pointer_32 vcpi_pmode_entry = {0, VCPI_CS};
+struct vcpi_switch_data vcpi_switch_data;
+
+static struct pseudo_descriptor gdt_pdesc, idt_pdesc;
+
+static boolean_t pic_reprogrammed;
+
+/* Save area for the DOS interrupt vectors
+ that used to be in the place we relocated the master PIC to. */
+static struct far_pointer_16 master_save_vecs[8];
+
+
+#ifdef ENABLE_PAGING
+#define VCPI_PAGING_INIT(pdir_pa, first_unmapped_pa) vcpi_paging_init(pdir_pa, first_unmapped_pa)
+#else
+#define VCPI_PAGING_INIT(pdir_pa, first_unmapped_pa) ((void)0)
+#endif
+
+#ifdef ENABLE_KERNEL_LDT
+#define KERNEL_LDT_INIT() (vcpi_switch_data.ldt_sel = KERNEL_LDT)
+#else
+#define KERNEL_LDT_INIT() ((void)0)
+#endif
+
+
+CODE16
+
+static void i16_vcpi_switch_to_pmode()
+{
+ extern vm_offset_t boot_image_pa;
+
+ i16_cli();
+
+ i16_assert(i16_get_ds() == i16_get_cs());
+ i16_assert(i16_get_es() == i16_get_cs());
+ i16_assert(i16_get_ss() == i16_get_cs());
+
+ /* Make sure the TSS isn't marked busy. */
+ cpu[0].tables.gdt[KERNEL_TSS_IDX].access &= ~ACC_TSS_BUSY;
+
+ /* Ask the VCPI server to switch to protected mode. */
+ asm volatile("
+ movl %%esp,%%edx
+ int $0x67
+ "SEXT(pmode_return)":
+ movl %%edx,%%esp
+ movw %2,%%dx
+ movw %%dx,%%ss
+ movw %%dx,%%ds
+ movw %%dx,%%es
+ xorw %%dx,%%dx
+ movw %%dx,%%fs
+ movw %%dx,%%gs
+ " :
+ : "a" ((unsigned short)0xde0c),
+ "S" (boot_image_pa + (vm_offset_t)&vcpi_switch_data),
+ "i" (KERNEL_DS)
+ : "eax", "edx", "esi");
+
+ /* Make sure the direction flag is still clear. */
+ i16_cld();
+}
+
+static void i16_vcpi_switch_to_real_mode()
+{
+ i16_cli();
+
+ /* As requested by VCPI spec... */
+ i16_clts();
+
+ /* Perform the switch. */
+ asm volatile("
+ movl %%esp,%%edx
+ pushl %1
+ pushl %1
+ pushl %1
+ pushl %1
+ pushl %1
+ pushl %%edx
+ pushl $0
+ pushl %1
+ pushl $1f
+ movw %2,%%ds
+ lcall %%ss:"SEXT(vcpi_pmode_entry)"
+ 1:
+ " :
+ : "a" ((unsigned short)0xde0c),
+ "r" ((unsigned)real_cs),
+ "r" ((unsigned short)LINEAR_DS)
+ : "eax", "edx");
+
+ i16_assert(!(i16_get_eflags() & EFL_IF));
+ i16_assert(i16_get_ds() == i16_get_cs());
+ i16_assert(i16_get_es() == i16_get_cs());
+ i16_assert(i16_get_ss() == i16_get_cs());
+
+ /* Make sure the direction flag is still clear. */
+ i16_cld();
+}
+
+CODE32
+
+static void vcpi_real_int(int intnum, struct real_call_data *rcd)
+{
+ do_16bit(
+ unsigned int eflags;
+
+ i16_vcpi_switch_to_real_mode();
+ i16_real_int(intnum, rcd);
+ i16_vcpi_switch_to_pmode();
+ );
+}
+
+static void vcpi_exit(int rc)
+{
+ do_16bit(
+ i16_vcpi_switch_to_real_mode();
+ i16_exit(rc);
+ while (1);
+ );
+}
+
+CODE16
+
+static inline void
+i16_vcpi_set_int_vecs(unsigned short master, unsigned short slave)
+{
+ unsigned short rc;
+
+ i16_assert(!(get_eflags() & EFL_IF));
+ asm volatile("int $0x67"
+ : "=a" (rc)
+ : "a" ((unsigned short)0xde0b),
+ "b" ((unsigned short)master),
+ "c" ((unsigned short)slave));
+ i16_assert((rc & 0xff00) == 0);
+ i16_assert(!(get_eflags() & EFL_IF));
+}
+
+/* Find a (hopefully) empty set of interrupt vectors
+ to use for the master hardware interrupts.
+ We assume that eight interrupt vectors in a row
+ that all have the same value are unused.
+ If VCPI servers weren't so brain-damaged
+ and took care of this during interrupt reflection
+ (like we do when running in raw mode),
+ this kludgery wouldn't be needed... */
+static int i16_find_free_vec_range()
+{
+ /* i will track the first vector in a range;
+ j will track the last. */
+ int i, j;
+ struct far_pointer_16 iv, jv;
+
+ j = 0xff;
+ i16_dos_get_int_vec(j, &jv);
+
+ for (i = j-1; ; i--)
+ {
+ if (i == 0x50)
+ {
+ /* No completely free sets found.
+ Stop here and just use 0x50-0x57. */
+ break;
+ }
+
+ i16_dos_get_int_vec(i, &iv);
+ if ((iv.ofs != jv.ofs) || (iv.seg != jv.seg))
+ {
+ /* Vector contents changed. */
+ j = i;
+ jv = iv;
+ continue;
+ }
+
+ if ((j-i+1 >= 8) && ((i & 7) == 0))
+ {
+ /* Found a free range. */
+ break;
+ }
+ }
+
+ return i;
+}
+
+void i16_vcpi_check()
+{
+ extern vm_offset_t dos_mem_phys_free_mem;
+ extern vm_offset_t dos_mem_phys_free_size;
+ extern void pmode_return();
+ extern vm_offset_t boot_image_pa;
+ extern void (*i16_switch_to_real_mode)();
+ extern void (*i16_switch_to_pmode)();
+
+ unsigned short rc;
+ unsigned short first_free_pte;
+ unsigned short vcpi_ver;
+
+ i16_assert(boot_image_pa == kvtophys(0));
+
+ /* Check for presence of EMM driver. */
+ {
+ int dev_info, out_status;
+ int fh;
+
+ fh = i16_dos_open("EMMXXXX0", 0);
+ if (fh < 0)
+ return;
+ dev_info = i16_dos_get_device_info(fh);
+ out_status = i16_dos_get_output_status(fh);
+ i16_dos_close(fh);
+ if ((dev_info < 0) || !(dev_info & 0x80)
+ || (out_status != 0xff))
+ return;
+ }
+
+ /* Allocate an EMS page to force the EMM to be turned on.
+ If it fails, keep going anyway -
+ it may simply mean all the EMS pages are allocated. */
+ asm volatile("int $0x67"
+ : "=a" (rc),
+ "=d" (ems_handle)
+ : "a" ((unsigned short)0x4300),
+ "b" ((unsigned short)1));
+ if (!(rc & 0xff00))
+ ems_page_allocated = TRUE;
+
+ /* Check for VCPI. */
+ asm volatile("int $0x67" : "=a" (rc), "=b" (vcpi_ver) : "a" ((unsigned short)0xde00));
+ if (rc & 0xff00)
+ return;
+ i16_assert(vcpi_ver >= 0x0100);
+
+ /* OK, it's there - we're now committed to using VCPI. */
+ i16_switch_to_real_mode = i16_vcpi_switch_to_real_mode;
+ i16_switch_to_pmode = i16_vcpi_switch_to_pmode;
+ real_int = vcpi_real_int;
+ real_exit = vcpi_exit;
+
+ do_debug(i16_puts("VCPI detected"));
+
+ /* Allocate a page directory and page table from low DOS memory. */
+ {
+ vm_offset_t new_dos_mem;
+
+ new_dos_mem = ((dos_mem_phys_free_mem + PAGE_MASK) & ~PAGE_MASK)
+ + PAGE_SIZE*2;
+ if ((!dos_mem_phys_free_mem)
+ || (new_dos_mem - dos_mem_phys_free_mem
+ > dos_mem_phys_free_size))
+ i16_die("not enough low DOS memory available");
+ dos_mem_phys_free_size -= new_dos_mem - dos_mem_phys_free_mem;
+ dos_mem_phys_free_mem = new_dos_mem;
+ vcpi_pdir = new_dos_mem - PAGE_SIZE*2;
+ vcpi_ptable0 = vcpi_pdir + PAGE_SIZE;
+ }
+
+ /* Initialize them. */
+ {
+ int i;
+ pt_entry_t pde0 = vcpi_ptable0
+ | INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_USER;
+
+ set_fs(vcpi_pdir >> 4);
+ asm volatile("movl %0,%%fs:(0)" : : "r" (pde0));
+ for (i = 1; i < NPDES + NPTES; i++)
+ asm volatile("movl $0,%%fs:(,%0,4)" : : "r" (i));
+ }
+
+ /* Initialize the protected-mode interface. */
+ asm volatile("
+ pushw %%es
+ movw %4,%%es
+ int $0x67
+ popw %%es
+ "
+ : "=a" (rc),
+ "=b" (vcpi_pmode_entry.ofs),
+ "=D" (first_free_pte)
+ : "a" ((unsigned short)0xde01),
+ "r" ((unsigned short)(vcpi_ptable0 >> 4)),
+ "D" (0),
+ "S" (&cpu[0].tables.gdt[VCPI_CS_IDX]));
+ i16_assert((rc & 0xff00) == 0);
+ i16_assert(get_ds() == get_cs());
+ i16_assert(get_es() == get_cs());
+
+#ifdef DEBUG
+ /* Sanity check: make sure the server did what it was supposed to do. */
+
+ i16_assert((cpu[0].tables.gdt[VCPI_CS_IDX].access & ACC_P|ACC_CODE) == ACC_P|ACC_CODE);
+ if (cpu[0].tables.gdt[VCPI_CS_IDX].granularity & SZ_G)
+ i16_assert(vcpi_pmode_entry.ofs <
+ (((vm_offset_t)cpu[0].tables.gdt[VCPI_CS_IDX].limit_high << 28)
+ | ((vm_offset_t)cpu[0].tables.gdt[VCPI_CS_IDX].limit_low << 12)
+ | (vm_offset_t)0xfff));
+ else
+ i16_assert(vcpi_pmode_entry.ofs <
+ (((vm_offset_t)cpu[0].tables.gdt[VCPI_CS_IDX].limit_high << 16)
+ | (vm_offset_t)cpu[0].tables.gdt[VCPI_CS_IDX].limit_low));
+
+ i16_assert(first_free_pte/sizeof(pt_entry_t) >= 1*1024*1024/PAGE_SIZE);
+ i16_assert(first_free_pte/sizeof(pt_entry_t) <= 4*1024*1024/PAGE_SIZE);
+
+ {
+ int i;
+
+ for (i = 0; i < 1*1024*1024/PAGE_SIZE; i++)
+ {
+ pt_entry_t entry;
+
+ set_ds(vcpi_ptable0 >> 4);
+ entry = ((pt_entry_t*)0)[i];
+ set_ds(get_cs());
+ i16_assert(entry & INTEL_PTE_VALID);
+ if (i < 0xf0000/PAGE_SIZE)
+ i16_assert(entry & INTEL_PTE_WRITE);
+ i16_assert(entry & INTEL_PTE_USER);
+ i16_assert(!(entry & INTEL_PTE_AVAIL));
+ }
+ }
+#endif /* DEBUG */
+
+ /* Find the VCPI server's hardware interrupt vector mappings. */
+ asm volatile("int $0x67"
+ : "=a" (rc),
+ "=b" (irq_master_base),
+ "=c" (irq_slave_base)
+ : "a" ((unsigned short)0xde0a));
+ i16_assert((rc & 0xff00) == 0);
+ irq_master_base &= 0xffff;
+ irq_slave_base &= 0xffff;
+ i16_assert((irq_master_base & 7) == 0);
+ i16_assert((irq_master_base == 0x08) || (irq_master_base >= 0x20));
+ i16_assert((irq_slave_base & 7) == 0);
+ i16_assert(irq_slave_base >= 0x20);
+
+ /* If they're the usual DOS values, change them. */
+ if (irq_master_base == 0x08)
+ {
+ pic_reprogrammed = TRUE;
+
+ i16_cli();
+
+ irq_master_base = i16_find_free_vec_range();
+
+ /* Save the old vectors in that range
+ and set them to a copy of vectors 8-15. */
+ {
+ int i;
+
+ for (i = 0; i < 8; i++)
+ {
+ struct far_pointer_16 hw_vec;
+
+ i16_dos_get_int_vec(irq_master_base+i,
+ &master_save_vecs[i]);
+ i16_dos_get_int_vec(0x08+i, &hw_vec);
+ i16_dos_set_int_vec(irq_master_base+i, &hw_vec);
+ }
+ }
+
+ /* Reprogram the PIC. */
+ i16_pic_set_master(irq_master_base);
+
+ /* Inform the VCPI server. */
+ i16_vcpi_set_int_vecs(irq_master_base, irq_slave_base);
+ }
+
+ /* Initialize the switch-to-pmode data structure. */
+ vcpi_switch_data.phys_pdir = vcpi_pdir;
+ vcpi_switch_data.lin_gdt = boot_image_pa+(vm_offset_t)&gdt_pdesc.limit;
+ vcpi_switch_data.lin_idt = boot_image_pa+(vm_offset_t)&idt_pdesc.limit;
+ vcpi_switch_data.tss_sel = KERNEL_TSS;
+ vcpi_switch_data.entry_eip = (unsigned short)(vm_offset_t)&pmode_return;
+ vcpi_switch_data.entry_cs = KERNEL_16_CS;
+
+ /* Initialize the GDT and IDT pseudo-descriptors. */
+ gdt_pdesc.limit = sizeof(cpu[0].tables.gdt)-1;
+ gdt_pdesc.linear_base = boot_image_pa + (vm_offset_t)&cpu[0].tables.gdt;
+ idt_pdesc.limit = sizeof(cpu[0].tables.idt)-1;
+ idt_pdesc.linear_base = boot_image_pa + (vm_offset_t)&cpu[0].tables.idt;
+
+ /* Set the GDT to temporary settings
+ just for getting into pmode the first time. */
+ i16_gdt_init_temp();
+
+ /* VCPI insists on loading a TSS immediately on entering pmode,
+ so initialize the KERNEL_TSS descriptor in the GDT. */
+ i16_fill_gdt_descriptor(&cpu[0], KERNEL_TSS,
+ boot_image_pa + (vm_offset_t)&cpu[0].tables.tss,
+ sizeof(cpu[0].tables.tss)-1,
+ ACC_PL_K|ACC_TSS, 0);
+ cpu[0].tables.tss.io_bit_map_offset = sizeof(cpu[0].tables.tss);
+
+#if 0
+ /* Dump the various VCPI data structures, for debugging. */
+ {
+ int i;
+
+ i16_puts("Switch data");
+ i16_writehexl(switch_data.phys_pdir); i16_putchar(' ');
+ i16_writehexl(switch_data.lin_gdt); i16_putchar(' ');
+ i16_writehexl(switch_data.lin_idt); i16_putchar(' ');
+ i16_writehexw(switch_data.ldt_sel); i16_putchar(' ');
+ i16_writehexw(switch_data.tss_sel); i16_putchar(' ');
+ i16_writehexl(switch_data.entry_eip); i16_putchar(' ');
+ i16_writehexw(switch_data.entry_cs); i16_puts("");
+
+ i16_puts("GDT pdesc");
+ i16_writehexw(gdt_pdesc.limit); i16_putchar(' ');
+ i16_writehexl(gdt_pdesc.linear_base); i16_puts("");
+
+ i16_puts("IDT pdesc");
+ i16_writehexw(idt_pdesc.limit); i16_putchar(' ');
+ i16_writehexl(idt_pdesc.linear_base); i16_puts("");
+
+ i16_puts("GDT");
+ for (i = 0; i < GDTSZ; i++)
+ {
+ i16_writehexw(i*8); i16_putchar(' ');
+ i16_writehexll(*((long long*)&cpu[0].tables.gdt[i]));
+ i16_puts("");
+ }
+ }
+#endif
+
+ /* Switch into pmode briefly to initialize the CPU tables and such. */
+ i16_vcpi_switch_to_pmode();
+ i16_do_32bit(
+
+ /* Note that right now we can only access the first 1MB of memory,
+ because paging is enabled and that's the only memory region that's been mapped.
+ The rest of physical memory won't be mapped until VCPI_PAGING_INIT,
+ but VCPI_PAGING_INIT requires allocating memory for page tables,
+ and we can't call phys_mem_collect() to provide memory to the allocator
+ until all physical memory can be read and written.
+ To get out of this catch-22,
+ we call dos_mem_collect() beforehand here
+ to make low DOS memory available for allocation by VCPI_PAGING_INIT.
+ The call to phys_mem_collect() later will cause dos_mem_collect
+ to be called a second time, but it'll just do nothing then. */
+ dos_mem_collect();
+
+ /* Initialize the basic CPU tables. */
+ cpu_init(&cpu[0]);
+
+ /* Initialize the paging system. */
+ VCPI_PAGING_INIT(vcpi_pdir, (vm_offset_t)first_free_pte / 4 * PAGE_SIZE);
+
+ /* Now that we can access all physical memory,
+ collect the remaining memory regions we discovered while in 16-bit mode
+ and add them to our free memory list. */
+ phys_mem_collect();
+
+ /* Initialize the hardware interrupt vectors in the IDT. */
+ idt_irq_init();
+
+ /* Now that we have an initialized LDT descriptor, start using it. */
+ KERNEL_LDT_INIT();
+
+ /* Switch to real mode and back again once more,
+ to make sure everything's loaded properly. */
+ do_16bit(
+ i16_vcpi_switch_to_real_mode();
+ i16_vcpi_switch_to_pmode();
+ );
+
+ vcpi_start();
+ );
+}
+
+/* Shouldn't be necessary, but just in case the end of the above function,
+ containing the .code16, gets "optimized away"... */
+CODE16
+
+void i16_vcpi_shutdown()
+{
+ if (pic_reprogrammed)
+ {
+ pic_reprogrammed = FALSE;
+
+ i16_cli();
+
+ i16_assert(irq_master_base >= 0x20);
+
+ /* Reprogram the PIC. */
+ i16_pic_set_master(0x08);
+
+ /* Inform the VCPI server. */
+ i16_vcpi_set_int_vecs(0x08, irq_slave_base);
+
+ /* Restore the old interrupt vectors. */
+ {
+ int i;
+
+ for (i = 0; i < 8; i++)
+ {
+ i16_dos_set_int_vec(irq_master_base+i,
+ &master_save_vecs[i]);
+ }
+ }
+
+ i16_sti();
+ }
+
+ if (ems_page_allocated)
+ {
+ ems_page_allocated = 0;
+ asm volatile("int $0x67" : : "a" (0x4500), "d" (ems_handle));
+ }
+}
+
+#endif ENABLE_VCPI
+
diff --git a/i386/dos/i16/i16_xms.c b/i386/dos/i16/i16_xms.c
new file mode 100644
index 00000000..ba75d5c0
--- /dev/null
+++ b/i386/dos/i16/i16_xms.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/code16.h>
+#include <mach/machine/vm_types.h>
+#include <mach/machine/far_ptr.h>
+#include <mach/machine/asm.h>
+
+#include "i16_a20.h"
+#include "phys_mem.h"
+#include "debug.h"
+
+
+struct far_pointer_16 xms_control;
+
+#define CALL_XMS "lcallw "SEXT(xms_control)
+
+
+static vm_offset_t xms_phys_free_mem;
+static vm_size_t xms_phys_free_size;
+
+static short free_handle;
+static char free_handle_allocated;
+static char free_handle_locked;
+
+
+CODE32
+
+void xms_mem_collect(void)
+{
+ if (xms_phys_free_mem)
+ {
+ phys_mem_add(xms_phys_free_mem, xms_phys_free_size);
+ xms_phys_free_mem = 0;
+ }
+}
+
+CODE16
+
+static void i16_xms_enable_a20(void)
+{
+ short success;
+ asm volatile(CALL_XMS : "=a" (success) : "a" (0x0500) : "ebx");
+ if (!success)
+ i16_die("XMS error: can't enable A20 line");
+}
+
+static void i16_xms_disable_a20(void)
+{
+ short success;
+ asm volatile(CALL_XMS : "=a" (success) : "a" (0x0600) : "ebx");
+ if (!success)
+ i16_die("XMS error: can't disable A20 line");
+}
+
+void i16_xms_check()
+{
+ unsigned short rc;
+ unsigned short free_k;
+
+ /* Check for an XMS server. */
+ asm volatile("
+ int $0x2f
+ " : "=a" (rc)
+ : "a" (0x4300));
+ if ((rc & 0xff) != 0x80)
+ return;
+
+ /* Get XMS driver's control function. */
+ asm volatile("
+ pushl %%ds
+ pushl %%es
+ int $0x2f
+ movw %%es,%0
+ popl %%es
+ popl %%ds
+ " : "=r" (xms_control.seg), "=b" (xms_control.ofs)
+ : "a" (0x4310));
+
+ /* See how much memory is available. */
+ asm volatile(CALL_XMS
+ : "=a" (free_k)
+ : "a" (0x0800)
+ : "ebx", "edx");
+ if (free_k * 1024 == 0)
+ return;
+
+ xms_phys_free_size = (unsigned)free_k * 1024;
+
+ /* Grab the biggest memory block we can get. */
+ asm volatile(CALL_XMS
+ : "=a" (rc), "=d" (free_handle)
+ : "a" (0x0900), "d" (free_k)
+ : "ebx");
+ if (!rc)
+ i16_die("XMS error: can't allocate extended memory");
+
+ free_handle_allocated = 1;
+
+ /* Lock it down. */
+ asm volatile(CALL_XMS "
+ shll $16,%%edx
+ movw %%bx,%%dx
+ " : "=a" (rc), "=d" (xms_phys_free_mem)
+ : "a" (0x0c00), "d" (free_handle)
+ : "ebx");
+ if (!rc)
+ i16_die("XMS error: can't lock down extended memory");
+
+ free_handle_locked = 1;
+
+ /* We need to update phys_mem_max here
+ instead of just letting phys_mem_add() do it
+ when the memory is collected with phys_mem_collect(),
+ because VCPI initialization needs to know the top of physical memory
+ before phys_mem_collect() is called.
+ See i16_vcpi.c for the gross details. */
+ if (phys_mem_max < xms_phys_free_mem + xms_phys_free_size)
+ phys_mem_max = xms_phys_free_mem + xms_phys_free_size;
+
+ i16_enable_a20 = i16_xms_enable_a20;
+ i16_disable_a20 = i16_xms_disable_a20;
+
+ do_debug(i16_puts("XMS detected"));
+}
+
+void i16_xms_shutdown()
+{
+ unsigned short rc;
+
+ if (free_handle_locked)
+ {
+ /* Unlock our memory block. */
+ asm volatile(CALL_XMS
+ : "=a" (rc)
+ : "a" (0x0d00), "d" (free_handle)
+ : "ebx");
+ free_handle_locked = 0;
+ if (!rc)
+ i16_die("XMS error: can't unlock extended memory");
+ }
+
+ if (free_handle_allocated)
+ {
+ /* Free the memory block. */
+ asm volatile(CALL_XMS
+ : "=a" (rc)
+ : "a" (0x0a00), "d" (free_handle)
+ : "ebx");
+ free_handle_allocated = 0;
+ if (!rc)
+ i16_die("XMS error: can't free extended memory");
+ }
+}
+
diff --git a/i386/dos/i16/idt.h b/i386/dos/i16/idt.h
new file mode 100644
index 00000000..54691966
--- /dev/null
+++ b/i386/dos/i16/idt.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_DOS_IDT_
+#define _I386_DOS_IDT_
+
+#include "config.h"
+
+#ifdef ENABLE_VCPI
+
+/* We need a maximum-size IDT in order to run as a VCPI client,
+ because someone else may already have reprogrammed the PIC
+ to point to any set of vectors. */
+#define IDTSZ 256
+
+#endif ENABLE_VCPI
+
+#include_next "idt.h"
+
+#endif _I386_DOS_IDT_
diff --git a/i386/dos/i16/phys_mem_sources.h b/i386/dos/i16/phys_mem_sources.h
new file mode 100644
index 00000000..249e14a6
--- /dev/null
+++ b/i386/dos/i16/phys_mem_sources.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+pms(dos_mem)
+pms(xms_mem)
+/*pms(vcpi_mem) XXX */
+
+#include_next "phys_mem_sources.h"
diff --git a/i386/dos/putchar.c b/i386/dos/putchar.c
new file mode 100644
index 00000000..9b5d59bc
--- /dev/null
+++ b/i386/dos/putchar.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/eflags.h>
+
+#include "real.h"
+
+int putchar(int c)
+{
+ struct real_call_data rcd;
+
+ if (c == '\n')
+ putchar('\r');
+
+ real_call_data_init(&rcd);
+ rcd.eax = 0x0200;
+ rcd.edx = c;
+ real_int(0x21, &rcd);
+ return rcd.flags & EFL_CF ? -1 : 0;
+}
+
diff --git a/i386/i386/ast.h b/i386/i386/ast.h
new file mode 100644
index 00000000..7afaa41a
--- /dev/null
+++ b/i386/i386/ast.h
@@ -0,0 +1,47 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_AST_H_
+#define _I386_AST_H_
+
+/*
+ * Machine-dependent AST file for machines with no hardware AST support.
+ *
+ * For the I386, we define AST_I386_FP to handle delayed
+ * floating-point exceptions. The FPU may interrupt on errors
+ * while the user is not running (in kernel or other thread running).
+ */
+
+#define AST_I386_FP 0x80000000
+
+#define MACHINE_AST_PER_THREAD AST_I386_FP
+
+
+/* Chain to the machine-independent header. */
+/* #include_next "ast.h" */
+
+
+#endif /* _I386_AST_H_ */
diff --git a/i386/i386/ast_check.c b/i386/i386/ast_check.c
new file mode 100644
index 00000000..faa3b8ed
--- /dev/null
+++ b/i386/i386/ast_check.c
@@ -0,0 +1,61 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <cpus.h>
+
+#if NCPUS > 1
+
+/*
+ * Handle signalling ASTs on other processors.
+ *
+ * Initial i386 implementation does nothing.
+ */
+
+#include <kern/processor.h>
+
+/*
+ * Initialize for remote invocation of ast_check.
+ */
+init_ast_check(processor)
+ processor_t processor;
+{
+#ifdef lint
+ processor++;
+#endif lint
+}
+
+/*
+ * Cause remote invocation of ast_check. Caller is at splsched().
+ */
+cause_ast_check(processor)
+ processor_t processor;
+{
+#ifdef lint
+ processor++;
+#endif lint
+}
+
+#endif /* NCPUS > 1 */
diff --git a/i386/i386/ast_types.h b/i386/i386/ast_types.h
new file mode 100644
index 00000000..89e31825
--- /dev/null
+++ b/i386/i386/ast_types.h
@@ -0,0 +1,36 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_AST_TYPES_H_
+#define _I386_AST_TYPES_H_
+
+/*
+ * Data type for remote ast_check() invocation support. Currently
+ * not implemented. Do this first to avoid include problems.
+ */
+typedef int ast_check_t;
+
+#endif /* _I386_AST_TYPES_H_ */
diff --git a/i386/i386/cpu_number.h b/i386/i386/cpu_number.h
new file mode 100644
index 00000000..e60ac771
--- /dev/null
+++ b/i386/i386/cpu_number.h
@@ -0,0 +1,49 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Machine-dependent definitions for cpu identification.
+ *
+ */
+#ifndef _I386_CPU_NUMBER_H_
+#define _I386_CPU_NUMBER_H_
+
+#if NCPUS > 1
+
+/* More-specific code must define cpu_number() and CPU_NUMBER. */
+#define CX(addr, reg) addr(,reg,4)
+
+#else /* NCPUS == 1 */
+
+#define CPU_NUMBER(reg)
+#define CX(addr,reg) addr
+
+#endif /* NCPUS == 1 */
+
+#ifndef ASSEMBLER
+#include "kern/cpu_number.h"
+#endif
+
+#endif /* _I386_CPU_NUMBER_H_ */
diff --git a/i386/i386/cswitch.S b/i386/i386/cswitch.S
new file mode 100644
index 00000000..8187980f
--- /dev/null
+++ b/i386/i386/cswitch.S
@@ -0,0 +1,142 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <cpus.h>
+#include <platforms.h>
+
+#include <mach/machine/asm.h>
+
+#include "proc_reg.h"
+#include "i386asm.h"
+#include "cpu_number.h"
+
+/*
+ * Context switch routines for i386.
+ */
+
+ENTRY(Load_context)
+ movl S_ARG0,%ecx /* get thread */
+ movl TH_KERNEL_STACK(%ecx),%ecx /* get kernel stack */
+ lea KERNEL_STACK_SIZE-IKS_SIZE-IEL_SIZE(%ecx),%edx
+ /* point to stack top */
+ CPU_NUMBER(%eax)
+ movl %ecx,CX(EXT(active_stacks),%eax) /* store stack address */
+ movl %edx,CX(EXT(kernel_stack),%eax) /* store stack top */
+
+ movl KSS_ESP(%ecx),%esp /* switch stacks */
+ movl KSS_ESI(%ecx),%esi /* restore registers */
+ movl KSS_EDI(%ecx),%edi
+ movl KSS_EBP(%ecx),%ebp
+ movl KSS_EBX(%ecx),%ebx
+ xorl %eax,%eax /* return zero (no old thread) */
+ jmp *KSS_EIP(%ecx) /* resume thread */
+
+/*
+ * This really only has to save registers
+ * when there is no explicit continuation.
+ */
+
+ENTRY(Switch_context)
+ CPU_NUMBER(%edx)
+ movl CX(EXT(active_stacks),%edx),%ecx /* get old kernel stack */
+
+ movl %ebx,KSS_EBX(%ecx) /* save registers */
+ movl %ebp,KSS_EBP(%ecx)
+ movl %edi,KSS_EDI(%ecx)
+ movl %esi,KSS_ESI(%ecx)
+ popl KSS_EIP(%ecx) /* save return PC */
+ movl %esp,KSS_ESP(%ecx) /* save SP */
+
+ movl 0(%esp),%eax /* get old thread */
+ movl %ecx,TH_KERNEL_STACK(%eax) /* save old stack */
+ movl 4(%esp),%ebx /* get continuation */
+ movl %ebx,TH_SWAP_FUNC(%eax) /* save continuation */
+
+ movl 8(%esp),%esi /* get new thread */
+
+ movl TH_KERNEL_STACK(%esi),%ecx /* get its kernel stack */
+ lea KERNEL_STACK_SIZE-IKS_SIZE-IEL_SIZE(%ecx),%ebx
+ /* point to stack top */
+
+ movl %esi,CX(EXT(active_threads),%edx) /* new thread is active */
+ movl %ecx,CX(EXT(active_stacks),%edx) /* set current stack */
+ movl %ebx,CX(EXT(kernel_stack),%edx) /* set stack top */
+
+ movl KSS_ESP(%ecx),%esp /* switch stacks */
+ movl KSS_ESI(%ecx),%esi /* restore registers */
+ movl KSS_EDI(%ecx),%edi
+ movl KSS_EBP(%ecx),%ebp
+ movl KSS_EBX(%ecx),%ebx
+ jmp *KSS_EIP(%ecx) /* return old thread */
+
+ENTRY(Thread_continue)
+ pushl %eax /* push the thread argument */
+ xorl %ebp,%ebp /* zero frame pointer */
+ call *%ebx /* call real continuation */
+
+#if NCPUS > 1
+/*
+ * void switch_to_shutdown_context(thread_t thread,
+ * void (*routine)(processor_t),
+ * processor_t processor)
+ *
+ * saves the kernel context of the thread,
+ * switches to the interrupt stack,
+ * continues the thread (with thread_continue),
+ * then runs routine on the interrupt stack.
+ *
+ * Assumes that the thread is a kernel thread (thus
+ * has no FPU state)
+ */
+ENTRY(switch_to_shutdown_context)
+ CPU_NUMBER(%edx)
+ movl EXT(active_stacks)(,%edx,4),%ecx /* get old kernel stack */
+ movl %ebx,KSS_EBX(%ecx) /* save registers */
+ movl %ebp,KSS_EBP(%ecx)
+ movl %edi,KSS_EDI(%ecx)
+ movl %esi,KSS_ESI(%ecx)
+ popl KSS_EIP(%ecx) /* save return PC */
+ movl %esp,KSS_ESP(%ecx) /* save SP */
+
+ movl 0(%esp),%eax /* get old thread */
+ movl %ecx,TH_KERNEL_STACK(%eax) /* save old stack */
+ movl $0,TH_SWAP_FUNC(%eax) /* clear continuation */
+ movl 4(%esp),%ebx /* get routine to run next */
+ movl 8(%esp),%esi /* get its argument */
+
+ movl _interrupt_stack(,%edx,4),%ecx /* point to its interrupt stack */
+ lea INTSTACK_SIZE(%ecx),%esp /* switch to it (top) */
+
+ pushl %eax /* push thread */
+ call EXT(thread_dispatch) /* reschedule thread */
+ addl $4,%esp /* clean stack */
+
+ pushl %esi /* push argument */
+ call *%ebx /* call routine to run */
+ hlt /* (should never return) */
+
+#endif NCPUS > 1
+
diff --git a/i386/i386/db_disasm.c b/i386/i386/db_disasm.c
new file mode 100644
index 00000000..dfb85e2c
--- /dev/null
+++ b/i386/i386/db_disasm.c
@@ -0,0 +1,1423 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#include "mach_kdb.h"
+#if MACH_KDB
+
+/*
+ * Instruction disassembler.
+ */
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+
+#include <ddb/db_access.h>
+#include <ddb/db_output.h>
+#include <ddb/db_sym.h>
+
+#include <kern/task.h>
+
+/*
+ * Switch to disassemble 16-bit code.
+ */
+boolean_t db_disasm_16 = FALSE;
+
+/*
+ * Size attributes
+ */
+#define BYTE 0
+#define WORD 1
+#define LONG 2
+#define QUAD 3
+#define SNGL 4
+#define DBLR 5
+#define EXTR 6
+#define SDEP 7
+#define NONE 8
+
+/*
+ * Addressing modes
+ */
+#define E 1 /* general effective address */
+#define Eind 2 /* indirect address (jump, call) */
+#define El 3 /* address, long size */
+#define Ew 4 /* address, word size */
+#define Eb 5 /* address, byte size */
+#define R 6 /* register, in 'reg' field */
+#define Rw 7 /* word register, in 'reg' field */
+#define Ri 8 /* register in instruction */
+#define S 9 /* segment reg, in 'reg' field */
+#define Si 10 /* segment reg, in instruction */
+#define A 11 /* accumulator */
+#define BX 12 /* (bx) */
+#define CL 13 /* cl, for shifts */
+#define DX 14 /* dx, for IO */
+#define SI 15 /* si */
+#define DI 16 /* di */
+#define CR 17 /* control register */
+#define DR 18 /* debug register */
+#define TR 19 /* test register */
+#define I 20 /* immediate, unsigned */
+#define Is 21 /* immediate, signed */
+#define Ib 22 /* byte immediate, unsigned */
+#define Ibs 23 /* byte immediate, signed */
+#define Iw 24 /* word immediate, unsigned */
+#define Il 25 /* long immediate */
+#define O 26 /* direct address */
+#define Db 27 /* byte displacement from EIP */
+#define Dl 28 /* long displacement from EIP */
+#define o1 29 /* constant 1 */
+#define o3 30 /* constant 3 */
+#define OS 31 /* immediate offset/segment */
+#define ST 32 /* FP stack top */
+#define STI 33 /* FP stack */
+#define X 34 /* extended FP op */
+#define XA 35 /* for 'fstcw %ax' */
+
+struct inst {
+ char * i_name; /* name */
+ short i_has_modrm; /* has regmodrm byte */
+ short i_size; /* operand size */
+ int i_mode; /* addressing modes */
+ char * i_extra; /* pointer to extra opcode table */
+};
+
+#define op1(x) (x)
+#define op2(x,y) ((x)|((y)<<8))
+#define op3(x,y,z) ((x)|((y)<<8)|((z)<<16))
+
+struct finst {
+ char * f_name; /* name for memory instruction */
+ int f_size; /* size for memory instruction */
+ int f_rrmode; /* mode for rr instruction */
+ char * f_rrname; /* name for rr instruction
+ (or pointer to table) */
+};
+
+char * db_Grp6[] = {
+ "sldt",
+ "str",
+ "lldt",
+ "ltr",
+ "verr",
+ "verw",
+ "",
+ ""
+};
+
+char * db_Grp7[] = {
+ "sgdt",
+ "sidt",
+ "lgdt",
+ "lidt",
+ "smsw",
+ "",
+ "lmsw",
+ "invlpg"
+};
+
+char * db_Grp8[] = {
+ "",
+ "",
+ "",
+ "",
+ "bt",
+ "bts",
+ "btr",
+ "btc"
+};
+
+struct inst db_inst_0f0x[] = {
+/*00*/ { "", TRUE, NONE, op1(Ew), (char *)db_Grp6 },
+/*01*/ { "", TRUE, NONE, op1(Ew), (char *)db_Grp7 },
+/*02*/ { "lar", TRUE, LONG, op2(E,R), 0 },
+/*03*/ { "lsl", TRUE, LONG, op2(E,R), 0 },
+/*04*/ { "", FALSE, NONE, 0, 0 },
+/*05*/ { "", FALSE, NONE, 0, 0 },
+/*06*/ { "clts", FALSE, NONE, 0, 0 },
+/*07*/ { "", FALSE, NONE, 0, 0 },
+
+/*08*/ { "invd", FALSE, NONE, 0, 0 },
+/*09*/ { "wbinvd",FALSE, NONE, 0, 0 },
+/*0a*/ { "", FALSE, NONE, 0, 0 },
+/*0b*/ { "", FALSE, NONE, 0, 0 },
+/*0c*/ { "", FALSE, NONE, 0, 0 },
+/*0d*/ { "", FALSE, NONE, 0, 0 },
+/*0e*/ { "", FALSE, NONE, 0, 0 },
+/*0f*/ { "", FALSE, NONE, 0, 0 },
+};
+
+struct inst db_inst_0f2x[] = {
+/*20*/ { "mov", TRUE, LONG, op2(CR,El), 0 }, /* use El for reg */
+/*21*/ { "mov", TRUE, LONG, op2(DR,El), 0 }, /* since mod == 11 */
+/*22*/ { "mov", TRUE, LONG, op2(El,CR), 0 },
+/*23*/ { "mov", TRUE, LONG, op2(El,DR), 0 },
+/*24*/ { "mov", TRUE, LONG, op2(TR,El), 0 },
+/*25*/ { "", FALSE, NONE, 0, 0 },
+/*26*/ { "mov", TRUE, LONG, op2(El,TR), 0 },
+/*27*/ { "", FALSE, NONE, 0, 0 },
+
+/*28*/ { "", FALSE, NONE, 0, 0 },
+/*29*/ { "", FALSE, NONE, 0, 0 },
+/*2a*/ { "", FALSE, NONE, 0, 0 },
+/*2b*/ { "", FALSE, NONE, 0, 0 },
+/*2c*/ { "", FALSE, NONE, 0, 0 },
+/*2d*/ { "", FALSE, NONE, 0, 0 },
+/*2e*/ { "", FALSE, NONE, 0, 0 },
+/*2f*/ { "", FALSE, NONE, 0, 0 },
+};
+
+struct inst db_inst_0f8x[] = {
+/*80*/ { "jo", FALSE, NONE, op1(Dl), 0 },
+/*81*/ { "jno", FALSE, NONE, op1(Dl), 0 },
+/*82*/ { "jb", FALSE, NONE, op1(Dl), 0 },
+/*83*/ { "jnb", FALSE, NONE, op1(Dl), 0 },
+/*84*/ { "jz", FALSE, NONE, op1(Dl), 0 },
+/*85*/ { "jnz", FALSE, NONE, op1(Dl), 0 },
+/*86*/ { "jbe", FALSE, NONE, op1(Dl), 0 },
+/*87*/ { "jnbe", FALSE, NONE, op1(Dl), 0 },
+
+/*88*/ { "js", FALSE, NONE, op1(Dl), 0 },
+/*89*/ { "jns", FALSE, NONE, op1(Dl), 0 },
+/*8a*/ { "jp", FALSE, NONE, op1(Dl), 0 },
+/*8b*/ { "jnp", FALSE, NONE, op1(Dl), 0 },
+/*8c*/ { "jl", FALSE, NONE, op1(Dl), 0 },
+/*8d*/ { "jnl", FALSE, NONE, op1(Dl), 0 },
+/*8e*/ { "jle", FALSE, NONE, op1(Dl), 0 },
+/*8f*/ { "jnle", FALSE, NONE, op1(Dl), 0 },
+};
+
+struct inst db_inst_0f9x[] = {
+/*90*/ { "seto", TRUE, NONE, op1(Eb), 0 },
+/*91*/ { "setno", TRUE, NONE, op1(Eb), 0 },
+/*92*/ { "setb", TRUE, NONE, op1(Eb), 0 },
+/*93*/ { "setnb", TRUE, NONE, op1(Eb), 0 },
+/*94*/ { "setz", TRUE, NONE, op1(Eb), 0 },
+/*95*/ { "setnz", TRUE, NONE, op1(Eb), 0 },
+/*96*/ { "setbe", TRUE, NONE, op1(Eb), 0 },
+/*97*/ { "setnbe",TRUE, NONE, op1(Eb), 0 },
+
+/*98*/ { "sets", TRUE, NONE, op1(Eb), 0 },
+/*99*/ { "setns", TRUE, NONE, op1(Eb), 0 },
+/*9a*/ { "setp", TRUE, NONE, op1(Eb), 0 },
+/*9b*/ { "setnp", TRUE, NONE, op1(Eb), 0 },
+/*9c*/ { "setl", TRUE, NONE, op1(Eb), 0 },
+/*9d*/ { "setnl", TRUE, NONE, op1(Eb), 0 },
+/*9e*/ { "setle", TRUE, NONE, op1(Eb), 0 },
+/*9f*/ { "setnle",TRUE, NONE, op1(Eb), 0 },
+};
+
+struct inst db_inst_0fax[] = {
+/*a0*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*a1*/ { "pop", FALSE, NONE, op1(Si), 0 },
+/*a2*/ { "", FALSE, NONE, 0, 0 },
+/*a3*/ { "bt", TRUE, LONG, op2(R,E), 0 },
+/*a4*/ { "shld", TRUE, LONG, op3(Ib,E,R), 0 },
+/*a5*/ { "shld", TRUE, LONG, op3(CL,E,R), 0 },
+/*a6*/ { "", FALSE, NONE, 0, 0 },
+/*a7*/ { "", FALSE, NONE, 0, 0 },
+
+/*a8*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*a9*/ { "pop", FALSE, NONE, op1(Si), 0 },
+/*aa*/ { "", FALSE, NONE, 0, 0 },
+/*ab*/ { "bts", TRUE, LONG, op2(R,E), 0 },
+/*ac*/ { "shrd", TRUE, LONG, op3(Ib,E,R), 0 },
+/*ad*/ { "shrd", TRUE, LONG, op3(CL,E,R), 0 },
+/*a6*/ { "", FALSE, NONE, 0, 0 },
+/*a7*/ { "imul", TRUE, LONG, op2(E,R), 0 },
+};
+
+struct inst db_inst_0fbx[] = {
+/*b0*/ { "", FALSE, NONE, 0, 0 },
+/*b1*/ { "", FALSE, NONE, 0, 0 },
+/*b2*/ { "lss", TRUE, LONG, op2(E, R), 0 },
+/*b3*/ { "btr", TRUE, LONG, op2(R, E), 0 },
+/*b4*/ { "lfs", TRUE, LONG, op2(E, R), 0 },
+/*b5*/ { "lgs", TRUE, LONG, op2(E, R), 0 },
+/*b6*/ { "movzb", TRUE, LONG, op2(Eb,R), 0 },
+/*b7*/ { "movzw", TRUE, LONG, op2(Ew,R), 0 },
+
+/*b8*/ { "", FALSE, NONE, 0, 0 },
+/*b9*/ { "", FALSE, NONE, 0, 0 },
+/*ba*/ { "", TRUE, LONG, op2(Ibs,E), (char *)db_Grp8 },
+/*bb*/ { "btc", TRUE, LONG, op2(R, E), 0 },
+/*bc*/ { "bsf", TRUE, LONG, op2(E, R), 0 },
+/*bd*/ { "bsr", TRUE, LONG, op2(E, R), 0 },
+/*be*/ { "movsb", TRUE, LONG, op2(Eb,R), 0 },
+/*bf*/ { "movsw", TRUE, LONG, op2(Ew,R), 0 },
+};
+
+struct inst db_inst_0fcx[] = {
+/*c0*/ { "xadd", TRUE, BYTE, op2(R, E), 0 },
+/*c1*/ { "xadd", TRUE, LONG, op2(R, E), 0 },
+/*c2*/ { "", FALSE, NONE, 0, 0 },
+/*c3*/ { "", FALSE, NONE, 0, 0 },
+/*c4*/ { "", FALSE, NONE, 0, 0 },
+/*c5*/ { "", FALSE, NONE, 0, 0 },
+/*c6*/ { "", FALSE, NONE, 0, 0 },
+/*c7*/ { "", FALSE, NONE, 0, 0 },
+/*c8*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*c9*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*ca*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cb*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cc*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cd*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*ce*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cf*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+};
+
+struct inst db_inst_0fdx[] = {
+/*c0*/ { "cmpxchg",TRUE, BYTE, op2(R, E), 0 },
+/*c1*/ { "cmpxchg",TRUE, LONG, op2(R, E), 0 },
+/*c2*/ { "", FALSE, NONE, 0, 0 },
+/*c3*/ { "", FALSE, NONE, 0, 0 },
+/*c4*/ { "", FALSE, NONE, 0, 0 },
+/*c5*/ { "", FALSE, NONE, 0, 0 },
+/*c6*/ { "", FALSE, NONE, 0, 0 },
+/*c7*/ { "", FALSE, NONE, 0, 0 },
+/*c8*/ { "", FALSE, NONE, 0, 0 },
+/*c9*/ { "", FALSE, NONE, 0, 0 },
+/*ca*/ { "", FALSE, NONE, 0, 0 },
+/*cb*/ { "", FALSE, NONE, 0, 0 },
+/*cc*/ { "", FALSE, NONE, 0, 0 },
+/*cd*/ { "", FALSE, NONE, 0, 0 },
+/*ce*/ { "", FALSE, NONE, 0, 0 },
+/*cf*/ { "", FALSE, NONE, 0, 0 },
+};
+
+struct inst *db_inst_0f[] = {
+ db_inst_0f0x,
+ 0,
+ db_inst_0f2x,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ db_inst_0f8x,
+ db_inst_0f9x,
+ db_inst_0fax,
+ db_inst_0fbx,
+ db_inst_0fcx,
+ db_inst_0fdx,
+ 0,
+ 0
+};
+
+char * db_Esc92[] = {
+ "fnop", "", "", "", "", "", "", ""
+};
+char * db_Esc93[] = {
+ "", "", "", "", "", "", "", ""
+};
+char * db_Esc94[] = {
+ "fchs", "fabs", "", "", "ftst", "fxam", "", ""
+};
+char * db_Esc95[] = {
+ "fld1", "fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz",""
+};
+char * db_Esc96[] = {
+ "f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp",
+ "fincstp"
+};
+char * db_Esc97[] = {
+ "fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos"
+};
+
+char * db_Esca4[] = {
+ "", "fucompp","", "", "", "", "", ""
+};
+
+char * db_Escb4[] = {
+ "", "", "fnclex","fninit","", "", "", ""
+};
+
+char * db_Esce3[] = {
+ "", "fcompp","", "", "", "", "", ""
+};
+
+char * db_Escf4[] = {
+ "fnstsw","", "", "", "", "", "", ""
+};
+
+struct finst db_Esc8[] = {
+/*0*/ { "fadd", SNGL, op2(STI,ST), 0 },
+/*1*/ { "fmul", SNGL, op2(STI,ST), 0 },
+/*2*/ { "fcom", SNGL, op2(STI,ST), 0 },
+/*3*/ { "fcomp", SNGL, op2(STI,ST), 0 },
+/*4*/ { "fsub", SNGL, op2(STI,ST), 0 },
+/*5*/ { "fsubr", SNGL, op2(STI,ST), 0 },
+/*6*/ { "fdiv", SNGL, op2(STI,ST), 0 },
+/*7*/ { "fdivr", SNGL, op2(STI,ST), 0 },
+};
+
+struct finst db_Esc9[] = {
+/*0*/ { "fld", SNGL, op1(STI), 0 },
+/*1*/ { "", NONE, op1(STI), "fxch" },
+/*2*/ { "fst", SNGL, op1(X), (char *)db_Esc92 },
+/*3*/ { "fstp", SNGL, op1(X), (char *)db_Esc93 },
+/*4*/ { "fldenv", NONE, op1(X), (char *)db_Esc94 },
+/*5*/ { "fldcw", NONE, op1(X), (char *)db_Esc95 },
+/*6*/ { "fnstenv",NONE, op1(X), (char *)db_Esc96 },
+/*7*/ { "fnstcw", NONE, op1(X), (char *)db_Esc97 },
+};
+
+struct finst db_Esca[] = {
+/*0*/ { "fiadd", WORD, 0, 0 },
+/*1*/ { "fimul", WORD, 0, 0 },
+/*2*/ { "ficom", WORD, 0, 0 },
+/*3*/ { "ficomp", WORD, 0, 0 },
+/*4*/ { "fisub", WORD, op1(X), (char *)db_Esca4 },
+/*5*/ { "fisubr", WORD, 0, 0 },
+/*6*/ { "fidiv", WORD, 0, 0 },
+/*7*/ { "fidivr", WORD, 0, 0 }
+};
+
+struct finst db_Escb[] = {
+/*0*/ { "fild", WORD, 0, 0 },
+/*1*/ { "", NONE, 0, 0 },
+/*2*/ { "fist", WORD, 0, 0 },
+/*3*/ { "fistp", WORD, 0, 0 },
+/*4*/ { "", WORD, op1(X), (char *)db_Escb4 },
+/*5*/ { "fld", EXTR, 0, 0 },
+/*6*/ { "", WORD, 0, 0 },
+/*7*/ { "fstp", EXTR, 0, 0 },
+};
+
+struct finst db_Escc[] = {
+/*0*/ { "fadd", DBLR, op2(ST,STI), 0 },
+/*1*/ { "fmul", DBLR, op2(ST,STI), 0 },
+/*2*/ { "fcom", DBLR, op2(ST,STI), 0 },
+/*3*/ { "fcomp", DBLR, op2(ST,STI), 0 },
+/*4*/ { "fsub", DBLR, op2(ST,STI), "fsubr" },
+/*5*/ { "fsubr", DBLR, op2(ST,STI), "fsub" },
+/*6*/ { "fdiv", DBLR, op2(ST,STI), "fdivr" },
+/*7*/ { "fdivr", DBLR, op2(ST,STI), "fdiv" },
+};
+
+struct finst db_Escd[] = {
+/*0*/ { "fld", DBLR, op1(STI), "ffree" },
+/*1*/ { "", NONE, 0, 0 },
+/*2*/ { "fst", DBLR, op1(STI), 0 },
+/*3*/ { "fstp", DBLR, op1(STI), 0 },
+/*4*/ { "frstor", NONE, op1(STI), "fucom" },
+/*5*/ { "", NONE, op1(STI), "fucomp" },
+/*6*/ { "fnsave", NONE, 0, 0 },
+/*7*/ { "fnstsw", NONE, 0, 0 },
+};
+
+struct finst db_Esce[] = {
+/*0*/ { "fiadd", LONG, op2(ST,STI), "faddp" },
+/*1*/ { "fimul", LONG, op2(ST,STI), "fmulp" },
+/*2*/ { "ficom", LONG, 0, 0 },
+/*3*/ { "ficomp", LONG, op1(X), (char *)db_Esce3 },
+/*4*/ { "fisub", LONG, op2(ST,STI), "fsubrp" },
+/*5*/ { "fisubr", LONG, op2(ST,STI), "fsubp" },
+/*6*/ { "fidiv", LONG, op2(ST,STI), "fdivrp" },
+/*7*/ { "fidivr", LONG, op2(ST,STI), "fdivp" },
+};
+
+struct finst db_Escf[] = {
+/*0*/ { "fild", LONG, 0, 0 },
+/*1*/ { "", LONG, 0, 0 },
+/*2*/ { "fist", LONG, 0, 0 },
+/*3*/ { "fistp", LONG, 0, 0 },
+/*4*/ { "fbld", NONE, op1(XA), (char *)db_Escf4 },
+/*5*/ { "fld", QUAD, 0, 0 },
+/*6*/ { "fbstp", NONE, 0, 0 },
+/*7*/ { "fstp", QUAD, 0, 0 },
+};
+
+struct finst *db_Esc_inst[] = {
+ db_Esc8, db_Esc9, db_Esca, db_Escb,
+ db_Escc, db_Escd, db_Esce, db_Escf
+};
+
+char * db_Grp1[] = {
+ "add",
+ "or",
+ "adc",
+ "sbb",
+ "and",
+ "sub",
+ "xor",
+ "cmp"
+};
+
+char * db_Grp2[] = {
+ "rol",
+ "ror",
+ "rcl",
+ "rcr",
+ "shl",
+ "shr",
+ "shl",
+ "sar"
+};
+
+struct inst db_Grp3[] = {
+ { "test", TRUE, NONE, op2(I,E), 0 },
+ { "test", TRUE, NONE, op2(I,E), 0 },
+ { "not", TRUE, NONE, op1(E), 0 },
+ { "neg", TRUE, NONE, op1(E), 0 },
+ { "mul", TRUE, NONE, op2(E,A), 0 },
+ { "imul", TRUE, NONE, op2(E,A), 0 },
+ { "div", TRUE, NONE, op2(E,A), 0 },
+ { "idiv", TRUE, NONE, op2(E,A), 0 },
+};
+
+struct inst db_Grp4[] = {
+ { "inc", TRUE, BYTE, op1(E), 0 },
+ { "dec", TRUE, BYTE, op1(E), 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 }
+};
+
+struct inst db_Grp5[] = {
+ { "inc", TRUE, LONG, op1(E), 0 },
+ { "dec", TRUE, LONG, op1(E), 0 },
+ { "call", TRUE, NONE, op1(Eind),0 },
+ { "lcall", TRUE, NONE, op1(Eind),0 },
+ { "jmp", TRUE, NONE, op1(Eind),0 },
+ { "ljmp", TRUE, NONE, op1(Eind),0 },
+ { "push", TRUE, LONG, op1(E), 0 },
+ { "", TRUE, NONE, 0, 0 }
+};
+
+struct inst db_inst_table[256] = {
+/*00*/ { "add", TRUE, BYTE, op2(R, E), 0 },
+/*01*/ { "add", TRUE, LONG, op2(R, E), 0 },
+/*02*/ { "add", TRUE, BYTE, op2(E, R), 0 },
+/*03*/ { "add", TRUE, LONG, op2(E, R), 0 },
+/*04*/ { "add", FALSE, BYTE, op2(Is, A), 0 },
+/*05*/ { "add", FALSE, LONG, op2(Is, A), 0 },
+/*06*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*07*/ { "pop", FALSE, NONE, op1(Si), 0 },
+
+/*08*/ { "or", TRUE, BYTE, op2(R, E), 0 },
+/*09*/ { "or", TRUE, LONG, op2(R, E), 0 },
+/*0a*/ { "or", TRUE, BYTE, op2(E, R), 0 },
+/*0b*/ { "or", TRUE, LONG, op2(E, R), 0 },
+/*0c*/ { "or", FALSE, BYTE, op2(I, A), 0 },
+/*0d*/ { "or", FALSE, LONG, op2(I, A), 0 },
+/*0e*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*0f*/ { "", FALSE, NONE, 0, 0 },
+
+/*10*/ { "adc", TRUE, BYTE, op2(R, E), 0 },
+/*11*/ { "adc", TRUE, LONG, op2(R, E), 0 },
+/*12*/ { "adc", TRUE, BYTE, op2(E, R), 0 },
+/*13*/ { "adc", TRUE, LONG, op2(E, R), 0 },
+/*14*/ { "adc", FALSE, BYTE, op2(Is, A), 0 },
+/*15*/ { "adc", FALSE, LONG, op2(Is, A), 0 },
+/*16*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*17*/ { "pop", FALSE, NONE, op1(Si), 0 },
+
+/*18*/ { "sbb", TRUE, BYTE, op2(R, E), 0 },
+/*19*/ { "sbb", TRUE, LONG, op2(R, E), 0 },
+/*1a*/ { "sbb", TRUE, BYTE, op2(E, R), 0 },
+/*1b*/ { "sbb", TRUE, LONG, op2(E, R), 0 },
+/*1c*/ { "sbb", FALSE, BYTE, op2(Is, A), 0 },
+/*1d*/ { "sbb", FALSE, LONG, op2(Is, A), 0 },
+/*1e*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*1f*/ { "pop", FALSE, NONE, op1(Si), 0 },
+
+/*20*/ { "and", TRUE, BYTE, op2(R, E), 0 },
+/*21*/ { "and", TRUE, LONG, op2(R, E), 0 },
+/*22*/ { "and", TRUE, BYTE, op2(E, R), 0 },
+/*23*/ { "and", TRUE, LONG, op2(E, R), 0 },
+/*24*/ { "and", FALSE, BYTE, op2(I, A), 0 },
+/*25*/ { "and", FALSE, LONG, op2(I, A), 0 },
+/*26*/ { "", FALSE, NONE, 0, 0 },
+/*27*/ { "aaa", FALSE, NONE, 0, 0 },
+
+/*28*/ { "sub", TRUE, BYTE, op2(R, E), 0 },
+/*29*/ { "sub", TRUE, LONG, op2(R, E), 0 },
+/*2a*/ { "sub", TRUE, BYTE, op2(E, R), 0 },
+/*2b*/ { "sub", TRUE, LONG, op2(E, R), 0 },
+/*2c*/ { "sub", FALSE, BYTE, op2(Is, A), 0 },
+/*2d*/ { "sub", FALSE, LONG, op2(Is, A), 0 },
+/*2e*/ { "", FALSE, NONE, 0, 0 },
+/*2f*/ { "das", FALSE, NONE, 0, 0 },
+
+/*30*/ { "xor", TRUE, BYTE, op2(R, E), 0 },
+/*31*/ { "xor", TRUE, LONG, op2(R, E), 0 },
+/*32*/ { "xor", TRUE, BYTE, op2(E, R), 0 },
+/*33*/ { "xor", TRUE, LONG, op2(E, R), 0 },
+/*34*/ { "xor", FALSE, BYTE, op2(I, A), 0 },
+/*35*/ { "xor", FALSE, LONG, op2(I, A), 0 },
+/*36*/ { "", FALSE, NONE, 0, 0 },
+/*37*/ { "daa", FALSE, NONE, 0, 0 },
+
+/*38*/ { "cmp", TRUE, BYTE, op2(R, E), 0 },
+/*39*/ { "cmp", TRUE, LONG, op2(R, E), 0 },
+/*3a*/ { "cmp", TRUE, BYTE, op2(E, R), 0 },
+/*3b*/ { "cmp", TRUE, LONG, op2(E, R), 0 },
+/*3c*/ { "cmp", FALSE, BYTE, op2(Is, A), 0 },
+/*3d*/ { "cmp", FALSE, LONG, op2(Is, A), 0 },
+/*3e*/ { "", FALSE, NONE, 0, 0 },
+/*3f*/ { "aas", FALSE, NONE, 0, 0 },
+
+/*40*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*41*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*42*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*43*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*44*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*45*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*46*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*47*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+
+/*48*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*49*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4a*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4b*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4c*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4d*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4e*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4f*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+
+/*50*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*51*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*52*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*53*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*54*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*55*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*56*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*57*/ { "push", FALSE, LONG, op1(Ri), 0 },
+
+/*58*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*59*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5a*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5b*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5c*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5d*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5e*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5f*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+
+/*60*/ { "pusha", FALSE, LONG, 0, 0 },
+/*61*/ { "popa", FALSE, LONG, 0, 0 },
+/*62*/ { "bound", TRUE, LONG, op2(E, R), 0 },
+/*63*/ { "arpl", TRUE, NONE, op2(Ew,Rw), 0 },
+
+/*64*/ { "", FALSE, NONE, 0, 0 },
+/*65*/ { "", FALSE, NONE, 0, 0 },
+/*66*/ { "", FALSE, NONE, 0, 0 },
+/*67*/ { "", FALSE, NONE, 0, 0 },
+
+/*68*/ { "push", FALSE, LONG, op1(I), 0 },
+/*69*/ { "imul", TRUE, LONG, op3(I,E,R), 0 },
+/*6a*/ { "push", FALSE, LONG, op1(Ib), 0 },
+/*6b*/ { "imul", TRUE, LONG, op3(Ibs,E,R),0 },
+/*6c*/ { "ins", FALSE, BYTE, op2(DX, DI), 0 },
+/*6d*/ { "ins", FALSE, LONG, op2(DX, DI), 0 },
+/*6e*/ { "outs", FALSE, BYTE, op2(SI, DX), 0 },
+/*6f*/ { "outs", FALSE, LONG, op2(SI, DX), 0 },
+
+/*70*/ { "jo", FALSE, NONE, op1(Db), 0 },
+/*71*/ { "jno", FALSE, NONE, op1(Db), 0 },
+/*72*/ { "jb", FALSE, NONE, op1(Db), 0 },
+/*73*/ { "jnb", FALSE, NONE, op1(Db), 0 },
+/*74*/ { "jz", FALSE, NONE, op1(Db), 0 },
+/*75*/ { "jnz", FALSE, NONE, op1(Db), 0 },
+/*76*/ { "jbe", FALSE, NONE, op1(Db), 0 },
+/*77*/ { "jnbe", FALSE, NONE, op1(Db), 0 },
+
+/*78*/ { "js", FALSE, NONE, op1(Db), 0 },
+/*79*/ { "jns", FALSE, NONE, op1(Db), 0 },
+/*7a*/ { "jp", FALSE, NONE, op1(Db), 0 },
+/*7b*/ { "jnp", FALSE, NONE, op1(Db), 0 },
+/*7c*/ { "jl", FALSE, NONE, op1(Db), 0 },
+/*7d*/ { "jnl", FALSE, NONE, op1(Db), 0 },
+/*7e*/ { "jle", FALSE, NONE, op1(Db), 0 },
+/*7f*/ { "jnle", FALSE, NONE, op1(Db), 0 },
+
+/*80*/ { "", TRUE, BYTE, op2(I, E), (char *)db_Grp1 },
+/*81*/ { "", TRUE, LONG, op2(I, E), (char *)db_Grp1 },
+/*82*/ { "", TRUE, BYTE, op2(Is,E), (char *)db_Grp1 },
+/*83*/ { "", TRUE, LONG, op2(Ibs,E), (char *)db_Grp1 },
+/*84*/ { "test", TRUE, BYTE, op2(R, E), 0 },
+/*85*/ { "test", TRUE, LONG, op2(R, E), 0 },
+/*86*/ { "xchg", TRUE, BYTE, op2(R, E), 0 },
+/*87*/ { "xchg", TRUE, LONG, op2(R, E), 0 },
+
+/*88*/ { "mov", TRUE, BYTE, op2(R, E), 0 },
+/*89*/ { "mov", TRUE, LONG, op2(R, E), 0 },
+/*8a*/ { "mov", TRUE, BYTE, op2(E, R), 0 },
+/*8b*/ { "mov", TRUE, LONG, op2(E, R), 0 },
+/*8c*/ { "mov", TRUE, NONE, op2(S, Ew), 0 },
+/*8d*/ { "lea", TRUE, LONG, op2(E, R), 0 },
+/*8e*/ { "mov", TRUE, NONE, op2(Ew, S), 0 },
+/*8f*/ { "pop", TRUE, LONG, op1(E), 0 },
+
+/*90*/ { "nop", FALSE, NONE, 0, 0 },
+/*91*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*92*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*93*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*94*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*95*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*96*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*97*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+
+/*98*/ { "cbw", FALSE, SDEP, 0, "cwde" }, /* cbw/cwde */
+/*99*/ { "cwd", FALSE, SDEP, 0, "cdq" }, /* cwd/cdq */
+/*9a*/ { "lcall", FALSE, NONE, op1(OS), 0 },
+/*9b*/ { "wait", FALSE, NONE, 0, 0 },
+/*9c*/ { "pushf", FALSE, LONG, 0, 0 },
+/*9d*/ { "popf", FALSE, LONG, 0, 0 },
+/*9e*/ { "sahf", FALSE, NONE, 0, 0 },
+/*9f*/ { "lahf", FALSE, NONE, 0, 0 },
+
+/*a0*/ { "mov", FALSE, BYTE, op2(O, A), 0 },
+/*a1*/ { "mov", FALSE, LONG, op2(O, A), 0 },
+/*a2*/ { "mov", FALSE, BYTE, op2(A, O), 0 },
+/*a3*/ { "mov", FALSE, LONG, op2(A, O), 0 },
+/*a4*/ { "movs", FALSE, BYTE, op2(SI,DI), 0 },
+/*a5*/ { "movs", FALSE, LONG, op2(SI,DI), 0 },
+/*a6*/ { "cmps", FALSE, BYTE, op2(SI,DI), 0 },
+/*a7*/ { "cmps", FALSE, LONG, op2(SI,DI), 0 },
+
+/*a8*/ { "test", FALSE, BYTE, op2(I, A), 0 },
+/*a9*/ { "test", FALSE, LONG, op2(I, A), 0 },
+/*aa*/ { "stos", FALSE, BYTE, op1(DI), 0 },
+/*ab*/ { "stos", FALSE, LONG, op1(DI), 0 },
+/*ac*/ { "lods", FALSE, BYTE, op1(SI), 0 },
+/*ad*/ { "lods", FALSE, LONG, op1(SI), 0 },
+/*ae*/ { "scas", FALSE, BYTE, op1(DI), 0 },
+/*af*/ { "scas", FALSE, LONG, op1(DI), 0 },
+
+/*b0*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b1*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b2*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b3*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b4*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b5*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b6*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b7*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+
+/*b8*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*b9*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*ba*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bb*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bc*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bd*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*be*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bf*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+
+/*c0*/ { "", TRUE, BYTE, op2(Ib, E), (char *)db_Grp2 },
+/*c1*/ { "", TRUE, LONG, op2(Ib, E), (char *)db_Grp2 },
+/*c2*/ { "ret", FALSE, NONE, op1(Iw), 0 },
+/*c3*/ { "ret", FALSE, NONE, 0, 0 },
+/*c4*/ { "les", TRUE, LONG, op2(E, R), 0 },
+/*c5*/ { "lds", TRUE, LONG, op2(E, R), 0 },
+/*c6*/ { "mov", TRUE, BYTE, op2(I, E), 0 },
+/*c7*/ { "mov", TRUE, LONG, op2(I, E), 0 },
+
+/*c8*/ { "enter", FALSE, NONE, op2(Ib, Iw), 0 },
+/*c9*/ { "leave", FALSE, NONE, 0, 0 },
+/*ca*/ { "lret", FALSE, NONE, op1(Iw), 0 },
+/*cb*/ { "lret", FALSE, NONE, 0, 0 },
+/*cc*/ { "int", FALSE, NONE, op1(o3), 0 },
+/*cd*/ { "int", FALSE, NONE, op1(Ib), 0 },
+/*ce*/ { "into", FALSE, NONE, 0, 0 },
+/*cf*/ { "iret", FALSE, NONE, 0, 0 },
+
+/*d0*/ { "", TRUE, BYTE, op2(o1, E), (char *)db_Grp2 },
+/*d1*/ { "", TRUE, LONG, op2(o1, E), (char *)db_Grp2 },
+/*d2*/ { "", TRUE, BYTE, op2(CL, E), (char *)db_Grp2 },
+/*d3*/ { "", TRUE, LONG, op2(CL, E), (char *)db_Grp2 },
+/*d4*/ { "aam", TRUE, NONE, 0, 0 },
+/*d5*/ { "aad", TRUE, NONE, 0, 0 },
+/*d6*/ { "", FALSE, NONE, 0, 0 },
+/*d7*/ { "xlat", FALSE, BYTE, op1(BX), 0 },
+
+/*d8*/ { "", TRUE, NONE, 0, (char *)db_Esc8 },
+/*d9*/ { "", TRUE, NONE, 0, (char *)db_Esc9 },
+/*da*/ { "", TRUE, NONE, 0, (char *)db_Esca },
+/*db*/ { "", TRUE, NONE, 0, (char *)db_Escb },
+/*dc*/ { "", TRUE, NONE, 0, (char *)db_Escc },
+/*dd*/ { "", TRUE, NONE, 0, (char *)db_Escd },
+/*de*/ { "", TRUE, NONE, 0, (char *)db_Esce },
+/*df*/ { "", TRUE, NONE, 0, (char *)db_Escf },
+
+/*e0*/ { "loopne",FALSE, NONE, op1(Db), 0 },
+/*e1*/ { "loope", FALSE, NONE, op1(Db), 0 },
+/*e2*/ { "loop", FALSE, NONE, op1(Db), 0 },
+/*e3*/ { "jcxz", FALSE, SDEP, op1(Db), "jecxz" },
+/*e4*/ { "in", FALSE, BYTE, op2(Ib, A), 0 },
+/*e5*/ { "in", FALSE, LONG, op2(Ib, A) , 0 },
+/*e6*/ { "out", FALSE, BYTE, op2(A, Ib), 0 },
+/*e7*/ { "out", FALSE, LONG, op2(A, Ib) , 0 },
+
+/*e8*/ { "call", FALSE, NONE, op1(Dl), 0 },
+/*e9*/ { "jmp", FALSE, NONE, op1(Dl), 0 },
+/*ea*/ { "ljmp", FALSE, NONE, op1(OS), 0 },
+/*eb*/ { "jmp", FALSE, NONE, op1(Db), 0 },
+/*ec*/ { "in", FALSE, BYTE, op2(DX, A), 0 },
+/*ed*/ { "in", FALSE, LONG, op2(DX, A) , 0 },
+/*ee*/ { "out", FALSE, BYTE, op2(A, DX), 0 },
+/*ef*/ { "out", FALSE, LONG, op2(A, DX) , 0 },
+
+/*f0*/ { "", FALSE, NONE, 0, 0 },
+/*f1*/ { "", FALSE, NONE, 0, 0 },
+/*f2*/ { "", FALSE, NONE, 0, 0 },
+/*f3*/ { "", FALSE, NONE, 0, 0 },
+/*f4*/ { "hlt", FALSE, NONE, 0, 0 },
+/*f5*/ { "cmc", FALSE, NONE, 0, 0 },
+/*f6*/ { "", TRUE, BYTE, 0, (char *)db_Grp3 },
+/*f7*/ { "", TRUE, LONG, 0, (char *)db_Grp3 },
+
+/*f8*/ { "clc", FALSE, NONE, 0, 0 },
+/*f9*/ { "stc", FALSE, NONE, 0, 0 },
+/*fa*/ { "cli", FALSE, NONE, 0, 0 },
+/*fb*/ { "sti", FALSE, NONE, 0, 0 },
+/*fc*/ { "cld", FALSE, NONE, 0, 0 },
+/*fd*/ { "std", FALSE, NONE, 0, 0 },
+/*fe*/ { "", TRUE, NONE, 0, (char *)db_Grp4 },
+/*ff*/ { "", TRUE, NONE, 0, (char *)db_Grp5 },
+};
+
+struct inst db_bad_inst =
+ { "???", FALSE, NONE, 0, 0 }
+;
+
+#define f_mod(byte) ((byte)>>6)
+#define f_reg(byte) (((byte)>>3)&0x7)
+#define f_rm(byte) ((byte)&0x7)
+
+#define sib_ss(byte) ((byte)>>6)
+#define sib_index(byte) (((byte)>>3)&0x7)
+#define sib_base(byte) ((byte)&0x7)
+
+struct i_addr {
+ int is_reg; /* if reg, reg number is in 'disp' */
+ int disp;
+ char * base;
+ char * index;
+ int ss;
+};
+
+char * db_index_reg_16[8] = {
+ "%bx,%si",
+ "%bx,%di",
+ "%bp,%si",
+ "%bp,%di",
+ "%si",
+ "%di",
+ "%bp",
+ "%bx"
+};
+
+char * db_reg[3][8] = {
+ { "%al", "%cl", "%dl", "%bl", "%ah", "%ch", "%dh", "%bh" },
+ { "%ax", "%cx", "%dx", "%bx", "%sp", "%bp", "%si", "%di" },
+ { "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi" }
+};
+
+char * db_seg_reg[8] = {
+ "%es", "%cs", "%ss", "%ds", "%fs", "%gs", "", ""
+};
+
+/*
+ * lengths for size attributes
+ */
+int db_lengths[] = {
+ 1, /* BYTE */
+ 2, /* WORD */
+ 4, /* LONG */
+ 8, /* QUAD */
+ 4, /* SNGL */
+ 8, /* DBLR */
+ 10, /* EXTR */
+};
+
+#define get_value_inc(result, loc, size, is_signed, task) \
+ result = db_get_task_value((loc), (size), (is_signed), (task)); \
+ (loc) += (size);
+
+/*
+ * Read address at location and return updated location.
+ */
+db_addr_t
+db_read_address(
+ db_addr_t loc,
+ int short_addr,
+ int regmodrm,
+ struct i_addr *addrp, /* out */
+ task_t task)
+{
+ int mod, rm, sib, index, disp;
+
+ mod = f_mod(regmodrm);
+ rm = f_rm(regmodrm);
+
+ if (mod == 3) {
+ addrp->is_reg = TRUE;
+ addrp->disp = rm;
+ return loc;
+ }
+ addrp->is_reg = FALSE;
+ addrp->index = 0;
+
+ if (short_addr) {
+ addrp->index = 0;
+ addrp->ss = 0;
+ switch (mod) {
+ case 0:
+ if (rm == 6) {
+ get_value_inc(disp, loc, 2, TRUE, task);
+ addrp->disp = disp;
+ addrp->base = 0;
+ }
+ else {
+ addrp->disp = 0;
+ addrp->base = db_index_reg_16[rm];
+ }
+ break;
+ case 1:
+ get_value_inc(disp, loc, 1, TRUE, task);
+ addrp->disp = disp;
+ addrp->base = db_index_reg_16[rm];
+ break;
+ case 2:
+ get_value_inc(disp, loc, 2, TRUE, task);
+ addrp->disp = disp;
+ addrp->base = db_index_reg_16[rm];
+ break;
+ }
+ }
+ else {
+ if (mod != 3 && rm == 4) {
+ get_value_inc(sib, loc, 1, FALSE, task);
+ rm = sib_base(sib);
+ index = sib_index(sib);
+ if (index != 4)
+ addrp->index = db_reg[LONG][index];
+ addrp->ss = sib_ss(sib);
+ }
+
+ switch (mod) {
+ case 0:
+ if (rm == 5) {
+ get_value_inc(addrp->disp, loc, 4, FALSE, task);
+ addrp->base = 0;
+ }
+ else {
+ addrp->disp = 0;
+ addrp->base = db_reg[LONG][rm];
+ }
+ break;
+
+ case 1:
+ get_value_inc(disp, loc, 1, TRUE, task);
+ addrp->disp = disp;
+ addrp->base = db_reg[LONG][rm];
+ break;
+
+ case 2:
+ get_value_inc(disp, loc, 4, FALSE, task);
+ addrp->disp = disp;
+ addrp->base = db_reg[LONG][rm];
+ break;
+ }
+ }
+ return loc;
+}
+
+void
+db_print_address(
+ char * seg,
+ int size,
+ struct i_addr *addrp,
+ task_t task)
+{
+ if (addrp->is_reg) {
+ db_printf("%s", db_reg[size][addrp->disp]);
+ return;
+ }
+
+ if (seg) {
+ db_printf("%s:", seg);
+ }
+
+ if (addrp->base != 0 || addrp->index != 0) {
+ db_printf("%#n", addrp->disp);
+ db_printf("(");
+ if (addrp->base)
+ db_printf("%s", addrp->base);
+ if (addrp->index)
+ db_printf(",%s,%d", addrp->index, 1<<addrp->ss);
+ db_printf(")");
+ } else
+ db_task_printsym((db_addr_t)addrp->disp, DB_STGY_ANY, task);
+}
+
+/*
+ * Disassemble floating-point ("escape") instruction
+ * and return updated location.
+ */
+db_addr_t
+db_disasm_esc(
+ db_addr_t loc,
+ int inst,
+ int short_addr,
+ int size,
+ char * seg,
+ task_t task)
+{
+ int regmodrm;
+ struct finst *fp;
+ int mod;
+ struct i_addr address;
+ char * name;
+
+ get_value_inc(regmodrm, loc, 1, FALSE, task);
+ fp = &db_Esc_inst[inst - 0xd8][f_reg(regmodrm)];
+ mod = f_mod(regmodrm);
+ if (mod != 3) {
+ /*
+ * Normal address modes.
+ */
+ loc = db_read_address(loc, short_addr, regmodrm, &address, task);
+ db_printf(fp->f_name);
+ switch(fp->f_size) {
+ case SNGL:
+ db_printf("s");
+ break;
+ case DBLR:
+ db_printf("l");
+ break;
+ case EXTR:
+ db_printf("t");
+ break;
+ case WORD:
+ db_printf("s");
+ break;
+ case LONG:
+ db_printf("l");
+ break;
+ case QUAD:
+ db_printf("q");
+ break;
+ default:
+ break;
+ }
+ db_printf("\t");
+ db_print_address(seg, BYTE, &address, task);
+ }
+ else {
+ /*
+ * 'reg-reg' - special formats
+ */
+ switch (fp->f_rrmode) {
+ case op2(ST,STI):
+ name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+ db_printf("%s\t%%st,%%st(%d)",name,f_rm(regmodrm));
+ break;
+ case op2(STI,ST):
+ name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+ db_printf("%s\t%%st(%d),%%st",name, f_rm(regmodrm));
+ break;
+ case op1(STI):
+ name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+ db_printf("%s\t%%st(%d)",name, f_rm(regmodrm));
+ break;
+ case op1(X):
+ db_printf("%s", ((char **)fp->f_rrname)[f_rm(regmodrm)]);
+ break;
+ case op1(XA):
+ db_printf("%s\t%%ax",
+ ((char **)fp->f_rrname)[f_rm(regmodrm)]);
+ break;
+ default:
+ db_printf("<bad instruction>");
+ break;
+ }
+ }
+
+ return loc;
+}
+
+/*
+ * Disassemble instruction at 'loc'. 'altfmt' specifies an
+ * (optional) alternate format. Return address of start of
+ * next instruction.
+ */
+db_addr_t
+db_disasm(
+ db_addr_t loc,
+ boolean_t altfmt,
+ task_t task)
+{
+ int inst;
+ int size;
+ int short_addr;
+ char * seg;
+ struct inst * ip;
+ char * i_name;
+ int i_size;
+ int i_mode;
+ int regmodrm;
+ boolean_t first;
+ int displ;
+ int prefix;
+ int imm;
+ int imm2;
+ int len;
+ struct i_addr address;
+
+ get_value_inc(inst, loc, 1, FALSE, task);
+ if (db_disasm_16) {
+ short_addr = TRUE;
+ size = WORD;
+ }
+ else {
+ short_addr = FALSE;
+ size = LONG;
+ }
+ seg = 0;
+ regmodrm = 0;
+
+ /*
+ * Get prefixes
+ */
+ prefix = TRUE;
+ do {
+ switch (inst) {
+ case 0x66: /* data16 */
+ if (size == LONG)
+ size = WORD;
+ else
+ size = LONG;
+ break;
+ case 0x67:
+ short_addr = !short_addr;
+ break;
+ case 0x26:
+ seg = "%es";
+ break;
+ case 0x36:
+ seg = "%ss";
+ break;
+ case 0x2e:
+ seg = "%cs";
+ break;
+ case 0x3e:
+ seg = "%ds";
+ break;
+ case 0x64:
+ seg = "%fs";
+ break;
+ case 0x65:
+ seg = "%gs";
+ break;
+ case 0xf0:
+ db_printf("lock ");
+ break;
+ case 0xf2:
+ db_printf("repne ");
+ break;
+ case 0xf3:
+ db_printf("repe "); /* XXX repe VS rep */
+ break;
+ default:
+ prefix = FALSE;
+ break;
+ }
+ if (prefix) {
+ get_value_inc(inst, loc, 1, FALSE, task);
+ }
+ } while (prefix);
+
+ if (inst >= 0xd8 && inst <= 0xdf) {
+ loc = db_disasm_esc(loc, inst, short_addr, size, seg, task);
+ db_printf("\n");
+ return loc;
+ }
+
+ if (inst == 0x0f) {
+ get_value_inc(inst, loc, 1, FALSE, task);
+ ip = db_inst_0f[inst>>4];
+ if (ip == 0) {
+ ip = &db_bad_inst;
+ }
+ else {
+ ip = &ip[inst&0xf];
+ }
+ }
+ else
+ ip = &db_inst_table[inst];
+
+ if (ip->i_has_modrm) {
+ get_value_inc(regmodrm, loc, 1, FALSE, task);
+ loc = db_read_address(loc, short_addr, regmodrm, &address, task);
+ }
+
+ i_name = ip->i_name;
+ i_size = ip->i_size;
+ i_mode = ip->i_mode;
+
+ if (ip->i_extra == (char *)db_Grp1 ||
+ ip->i_extra == (char *)db_Grp2 ||
+ ip->i_extra == (char *)db_Grp6 ||
+ ip->i_extra == (char *)db_Grp7 ||
+ ip->i_extra == (char *)db_Grp8) {
+ i_name = ((char **)ip->i_extra)[f_reg(regmodrm)];
+ }
+ else if (ip->i_extra == (char *)db_Grp3) {
+ ip = (struct inst *)ip->i_extra;
+ ip = &ip[f_reg(regmodrm)];
+ i_name = ip->i_name;
+ i_mode = ip->i_mode;
+ }
+ else if (ip->i_extra == (char *)db_Grp4 ||
+ ip->i_extra == (char *)db_Grp5) {
+ ip = (struct inst *)ip->i_extra;
+ ip = &ip[f_reg(regmodrm)];
+ i_name = ip->i_name;
+ i_mode = ip->i_mode;
+ i_size = ip->i_size;
+ }
+
+ if (i_size == SDEP) {
+ if (size == WORD)
+ db_printf(i_name);
+ else
+ db_printf(ip->i_extra);
+ }
+ else {
+ db_printf(i_name);
+ if (i_size != NONE) {
+ if (i_size == BYTE) {
+ db_printf("b");
+ size = BYTE;
+ }
+ else if (i_size == WORD) {
+ db_printf("w");
+ size = WORD;
+ }
+ else if (size == WORD)
+ db_printf("w");
+ else
+ db_printf("l");
+ }
+ }
+ db_printf("\t");
+ for (first = TRUE;
+ i_mode != 0;
+ i_mode >>= 8, first = FALSE)
+ {
+ if (!first)
+ db_printf(",");
+
+ switch (i_mode & 0xFF) {
+
+ case E:
+ db_print_address(seg, size, &address, task);
+ break;
+
+ case Eind:
+ db_printf("*");
+ db_print_address(seg, size, &address, task);
+ break;
+
+ case El:
+ db_print_address(seg, LONG, &address, task);
+ break;
+
+ case Ew:
+ db_print_address(seg, WORD, &address, task);
+ break;
+
+ case Eb:
+ db_print_address(seg, BYTE, &address, task);
+ break;
+
+ case R:
+ db_printf("%s", db_reg[size][f_reg(regmodrm)]);
+ break;
+
+ case Rw:
+ db_printf("%s", db_reg[WORD][f_reg(regmodrm)]);
+ break;
+
+ case Ri:
+ db_printf("%s", db_reg[size][f_rm(inst)]);
+ break;
+
+ case S:
+ db_printf("%s", db_seg_reg[f_reg(regmodrm)]);
+ break;
+
+ case Si:
+ db_printf("%s", db_seg_reg[f_reg(inst)]);
+ break;
+
+ case A:
+ db_printf("%s", db_reg[size][0]); /* acc */
+ break;
+
+ case BX:
+ if (seg)
+ db_printf("%s:", seg);
+ db_printf("(%s)", short_addr ? "%bx" : "%ebx");
+ break;
+
+ case CL:
+ db_printf("%%cl");
+ break;
+
+ case DX:
+ db_printf("%%dx");
+ break;
+
+ case SI:
+ if (seg)
+ db_printf("%s:", seg);
+ db_printf("(%s)", short_addr ? "%si" : "%esi");
+ break;
+
+ case DI:
+ db_printf("%%es:(%s)", short_addr ? "%di" : "%edi");
+ break;
+
+ case CR:
+ db_printf("%%cr%d", f_reg(regmodrm));
+ break;
+
+ case DR:
+ db_printf("%%dr%d", f_reg(regmodrm));
+ break;
+
+ case TR:
+ db_printf("%%tr%d", f_reg(regmodrm));
+ break;
+
+ case I:
+ len = db_lengths[size];
+ get_value_inc(imm, loc, len, FALSE, task);/* unsigned */
+ db_printf("$%#n", imm);
+ break;
+
+ case Is:
+ len = db_lengths[size];
+ get_value_inc(imm, loc, len, TRUE, task); /* signed */
+ db_printf("$%#r", imm);
+ break;
+
+ case Ib:
+ get_value_inc(imm, loc, 1, FALSE, task); /* unsigned */
+ db_printf("$%#n", imm);
+ break;
+
+ case Ibs:
+ get_value_inc(imm, loc, 1, TRUE, task); /* signed */
+ db_printf("$%#r", imm);
+ break;
+
+ case Iw:
+ get_value_inc(imm, loc, 2, FALSE, task); /* unsigned */
+ db_printf("$%#n", imm);
+ break;
+
+ case Il:
+ get_value_inc(imm, loc, 4, FALSE, task);
+ db_printf("$%#n", imm);
+ break;
+
+ case O:
+ if (short_addr) {
+ get_value_inc(displ, loc, 2, TRUE, task);
+ }
+ else {
+ get_value_inc(displ, loc, 4, TRUE, task);
+ }
+ if (seg)
+ db_printf("%s:%#r",seg, displ);
+ else
+ db_task_printsym((db_addr_t)displ, DB_STGY_ANY, task);
+ break;
+
+ case Db:
+ get_value_inc(displ, loc, 1, TRUE, task);
+ if (short_addr) {
+ /* offset only affects low 16 bits */
+ displ = (loc & 0xffff0000)
+ | ((loc + displ) & 0xffff);
+ }
+ else
+ displ = displ + loc;
+ db_task_printsym((db_addr_t)displ,DB_STGY_XTRN,task);
+ break;
+
+ case Dl:
+ if (short_addr) {
+ get_value_inc(displ, loc, 2, TRUE, task);
+ /* offset only affects low 16 bits */
+ displ = (loc & 0xffff0000)
+ | ((loc + displ) & 0xffff);
+ }
+ else {
+ get_value_inc(displ, loc, 4, TRUE, task);
+ displ = displ + loc;
+ }
+ db_task_printsym((db_addr_t)displ, DB_STGY_XTRN, task);
+ break;
+
+ case o1:
+ db_printf("$1");
+ break;
+
+ case o3:
+ db_printf("$3");
+ break;
+
+ case OS:
+ if (short_addr) {
+ get_value_inc(imm, loc, 2, FALSE, task); /* offset */
+ }
+ else {
+ get_value_inc(imm, loc, 4, FALSE, task); /* offset */
+ }
+ get_value_inc(imm2, loc, 2, FALSE, task); /* segment */
+ db_printf("$%#n,%#n", imm2, imm);
+ break;
+ }
+ }
+
+ if (altfmt == 0 && !db_disasm_16) {
+ if (inst == 0xe9 || inst == 0xeb) {
+ /*
+ * GAS pads to longword boundary after unconditional jumps.
+ */
+ loc = (loc + (4-1)) & ~(4-1);
+ }
+ }
+ db_printf("\n");
+ return loc;
+}
+
+#endif MACH_KDB
diff --git a/i386/i386/db_interface.c b/i386/i386/db_interface.c
new file mode 100644
index 00000000..30b0b0f3
--- /dev/null
+++ b/i386/i386/db_interface.c
@@ -0,0 +1,558 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Interface to new debugger.
+ */
+
+#include "mach_kdb.h"
+#if MACH_KDB
+
+#include <cpus.h>
+
+#include <sys/reboot.h>
+#include <vm/pmap.h>
+
+#include <i386/thread.h>
+#include <i386/db_machdep.h>
+#include <i386/seg.h>
+#include <i386/trap.h>
+#include <i386/setjmp.h>
+#include <i386/pmap.h>
+#include "gdt.h"
+#include "trap.h"
+
+#include "vm_param.h"
+#include <vm/vm_map.h>
+#include <kern/cpu_number.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <ddb/db_task_thread.h>
+#include <machine/machspl.h>
+
+struct i386_saved_state *i386_last_saved_statep;
+struct i386_saved_state i386_nested_saved_state;
+unsigned i386_last_kdb_sp;
+
+extern thread_t db_default_thread;
+
+/*
+ * Print trap reason.
+ */
+kdbprinttrap(type, code)
+ int type, code;
+{
+ printf("kernel: %s (%d), code=%x\n",
+ trap_name(type), type, code);
+}
+
+/*
+ * kdb_trap - field a TRACE or BPT trap
+ */
+
+extern jmp_buf_t *db_recover;
+spl_t saved_ipl[NCPUS]; /* just to know what was IPL before trap */
+
+boolean_t
+kdb_trap(
+ int type,
+ int code,
+ register struct i386_saved_state *regs)
+{
+ spl_t s;
+
+ s = splhigh();
+ saved_ipl[cpu_number()] = s;
+
+ switch (type) {
+ case T_DEBUG: /* single_step */
+ {
+ extern int dr_addr[];
+ int addr;
+ int status = dr6();
+
+ if (status & 0xf) { /* hmm hdw break */
+ addr = status & 0x8 ? dr_addr[3] :
+ status & 0x4 ? dr_addr[2] :
+ status & 0x2 ? dr_addr[1] :
+ dr_addr[0];
+ regs->efl |= EFL_RF;
+ db_single_step_cmd(addr, 0, 1, "p");
+ }
+ }
+ case T_INT3: /* breakpoint */
+ case T_WATCHPOINT: /* watchpoint */
+ case -1: /* keyboard interrupt */
+ break;
+
+ default:
+ if (db_recover) {
+ i386_nested_saved_state = *regs;
+ db_printf("Caught %s (%d), code = %x, pc = %x\n",
+ trap_name(type), type, code, regs->eip);
+ db_error("");
+ /*NOTREACHED*/
+ }
+ kdbprinttrap(type, code);
+ }
+
+#if NCPUS > 1
+ if (db_enter())
+#endif /* NCPUS > 1 */
+ {
+ i386_last_saved_statep = regs;
+ i386_last_kdb_sp = (unsigned) &type;
+
+ /* XXX Should switch to ddb`s own stack here. */
+
+ ddb_regs = *regs;
+ if ((regs->cs & 0x3) == 0) {
+ /*
+ * Kernel mode - esp and ss not saved
+ */
+ ddb_regs.uesp = (int)&regs->uesp; /* kernel stack pointer */
+ ddb_regs.ss = KERNEL_DS;
+ }
+
+ cnpollc(TRUE);
+ db_task_trap(type, code, (regs->cs & 0x3) != 0);
+ cnpollc(FALSE);
+
+ regs->eip = ddb_regs.eip;
+ regs->efl = ddb_regs.efl;
+ regs->eax = ddb_regs.eax;
+ regs->ecx = ddb_regs.ecx;
+ regs->edx = ddb_regs.edx;
+ regs->ebx = ddb_regs.ebx;
+ if (regs->cs & 0x3) {
+ /*
+ * user mode - saved esp and ss valid
+ */
+ regs->uesp = ddb_regs.uesp; /* user stack pointer */
+ regs->ss = ddb_regs.ss & 0xffff; /* user stack segment */
+ }
+ regs->ebp = ddb_regs.ebp;
+ regs->esi = ddb_regs.esi;
+ regs->edi = ddb_regs.edi;
+ regs->es = ddb_regs.es & 0xffff;
+ regs->cs = ddb_regs.cs & 0xffff;
+ regs->ds = ddb_regs.ds & 0xffff;
+ regs->fs = ddb_regs.fs & 0xffff;
+ regs->gs = ddb_regs.gs & 0xffff;
+
+ if ((type == T_INT3) &&
+ (db_get_task_value(regs->eip, BKPT_SIZE, FALSE, TASK_NULL)
+ == BKPT_INST))
+ regs->eip += BKPT_SIZE;
+ }
+#if NCPUS > 1
+ db_leave();
+#endif /* NCPUS > 1 */
+
+ splx(s);
+ return 1;
+}
+
+/*
+ * Enter KDB through a keyboard trap.
+ * We show the registers as of the keyboard interrupt
+ * instead of those at its call to KDB.
+ */
+struct int_regs {
+ int gs;
+ int fs;
+ int edi;
+ int esi;
+ int ebp;
+ int ebx;
+ struct i386_interrupt_state *is;
+};
+
+void
+kdb_kentry(
+ struct int_regs *int_regs)
+{
+ struct i386_interrupt_state *is = int_regs->is;
+ spl_t s = splhigh();
+
+#if NCPUS > 1
+ if (db_enter())
+#endif /* NCPUS > 1 */
+ {
+ if (is->cs & 0x3) {
+ ddb_regs.uesp = ((int *)(is+1))[0];
+ ddb_regs.ss = ((int *)(is+1))[1];
+ }
+ else {
+ ddb_regs.ss = KERNEL_DS;
+ ddb_regs.uesp= (int)(is+1);
+ }
+ ddb_regs.efl = is->efl;
+ ddb_regs.cs = is->cs;
+ ddb_regs.eip = is->eip;
+ ddb_regs.eax = is->eax;
+ ddb_regs.ecx = is->ecx;
+ ddb_regs.edx = is->edx;
+ ddb_regs.ebx = int_regs->ebx;
+ ddb_regs.ebp = int_regs->ebp;
+ ddb_regs.esi = int_regs->esi;
+ ddb_regs.edi = int_regs->edi;
+ ddb_regs.ds = is->ds;
+ ddb_regs.es = is->es;
+ ddb_regs.fs = int_regs->fs;
+ ddb_regs.gs = int_regs->gs;
+
+ cnpollc(TRUE);
+ db_task_trap(-1, 0, (ddb_regs.cs & 0x3) != 0);
+ cnpollc(FALSE);
+
+ if (ddb_regs.cs & 0x3) {
+ ((int *)(is+1))[0] = ddb_regs.uesp;
+ ((int *)(is+1))[1] = ddb_regs.ss & 0xffff;
+ }
+ is->efl = ddb_regs.efl;
+ is->cs = ddb_regs.cs & 0xffff;
+ is->eip = ddb_regs.eip;
+ is->eax = ddb_regs.eax;
+ is->ecx = ddb_regs.ecx;
+ is->edx = ddb_regs.edx;
+ int_regs->ebx = ddb_regs.ebx;
+ int_regs->ebp = ddb_regs.ebp;
+ int_regs->esi = ddb_regs.esi;
+ int_regs->edi = ddb_regs.edi;
+ is->ds = ddb_regs.ds & 0xffff;
+ is->es = ddb_regs.es & 0xffff;
+ int_regs->fs = ddb_regs.fs & 0xffff;
+ int_regs->gs = ddb_regs.gs & 0xffff;
+ }
+#if NCPUS > 1
+ db_leave();
+#endif /* NCPUS > 1 */
+
+ (void) splx(s);
+}
+
+boolean_t db_no_vm_fault = TRUE;
+
+int
+db_user_to_kernel_address(
+ task_t task,
+ vm_offset_t addr,
+ unsigned *kaddr,
+ int flag)
+{
+ register pt_entry_t *ptp;
+ boolean_t faulted = FALSE;
+
+ retry:
+ ptp = pmap_pte(task->map->pmap, addr);
+ if (ptp == PT_ENTRY_NULL || (*ptp & INTEL_PTE_VALID) == 0) {
+ if (!faulted && !db_no_vm_fault) {
+ kern_return_t err;
+
+ faulted = TRUE;
+ err = vm_fault( task->map,
+ trunc_page(addr),
+ VM_PROT_READ,
+ FALSE, FALSE, 0);
+ if (err == KERN_SUCCESS)
+ goto retry;
+ }
+ if (flag) {
+ db_printf("\nno memory is assigned to address %08x\n", addr);
+ db_error(0);
+ /* NOTREACHED */
+ }
+ return(-1);
+ }
+ *kaddr = (unsigned)ptetokv(*ptp) + (addr & (INTEL_PGBYTES-1));
+ return(0);
+}
+
+/*
+ * Read bytes from kernel address space for debugger.
+ */
+
+void
+db_read_bytes(
+ vm_offset_t addr,
+ register int size,
+ register char *data,
+ task_t task)
+{
+ register char *src;
+ register int n;
+ unsigned kern_addr;
+
+ src = (char *)addr;
+ if (addr >= VM_MIN_KERNEL_ADDRESS || task == TASK_NULL) {
+ if (task == TASK_NULL)
+ task = db_current_task();
+ while (--size >= 0) {
+ if (addr++ < VM_MIN_KERNEL_ADDRESS && task == TASK_NULL) {
+ db_printf("\nbad address %x\n", addr);
+ db_error(0);
+ /* NOTREACHED */
+ }
+ *data++ = *src++;
+ }
+ return;
+ }
+ while (size > 0) {
+ if (db_user_to_kernel_address(task, addr, &kern_addr, 1) < 0)
+ return;
+ src = (char *)kern_addr;
+ n = intel_trunc_page(addr+INTEL_PGBYTES) - addr;
+ if (n > size)
+ n = size;
+ size -= n;
+ addr += n;
+ while (--n >= 0)
+ *data++ = *src++;
+ }
+}
+
+/*
+ * Write bytes to kernel address space for debugger.
+ */
+void
+db_write_bytes(
+ vm_offset_t addr,
+ register int size,
+ register char *data,
+ task_t task)
+{
+ register char *dst;
+
+ register pt_entry_t *ptep0 = 0;
+ pt_entry_t oldmap0 = 0;
+ vm_offset_t addr1;
+ register pt_entry_t *ptep1 = 0;
+ pt_entry_t oldmap1 = 0;
+ extern char etext;
+ void db_write_bytes_user_space();
+
+ if ((addr < VM_MIN_KERNEL_ADDRESS) ^
+ ((addr + size) <= VM_MIN_KERNEL_ADDRESS)) {
+ db_error("\ncannot write data into mixed space\n");
+ /* NOTREACHED */
+ }
+ if (addr < VM_MIN_KERNEL_ADDRESS) {
+ if (task) {
+ db_write_bytes_user_space(addr, size, data, task);
+ return;
+ } else if (db_current_task() == TASK_NULL) {
+ db_printf("\nbad address %x\n", addr);
+ db_error(0);
+ /* NOTREACHED */
+ }
+ }
+
+ if (addr >= VM_MIN_KERNEL_ADDRESS &&
+ addr <= (vm_offset_t)&etext)
+ {
+ ptep0 = pmap_pte(kernel_pmap, addr);
+ oldmap0 = *ptep0;
+ *ptep0 |= INTEL_PTE_WRITE;
+
+ addr1 = i386_trunc_page(addr + size - 1);
+ if (i386_trunc_page(addr) != addr1) {
+ /* data crosses a page boundary */
+
+ ptep1 = pmap_pte(kernel_pmap, addr1);
+ oldmap1 = *ptep1;
+ *ptep1 |= INTEL_PTE_WRITE;
+ }
+ flush_tlb();
+ }
+
+ dst = (char *)addr;
+
+ while (--size >= 0)
+ *dst++ = *data++;
+
+ if (ptep0) {
+ *ptep0 = oldmap0;
+ if (ptep1) {
+ *ptep1 = oldmap1;
+ }
+ flush_tlb();
+ }
+}
+
+void
+db_write_bytes_user_space(
+ vm_offset_t addr,
+ register int size,
+ register char *data,
+ task_t task)
+{
+ register char *dst;
+ register int n;
+ unsigned kern_addr;
+
+ while (size > 0) {
+ if (db_user_to_kernel_address(task, addr, &kern_addr, 1) < 0)
+ return;
+ dst = (char *)kern_addr;
+ n = intel_trunc_page(addr+INTEL_PGBYTES) - addr;
+ if (n > size)
+ n = size;
+ size -= n;
+ addr += n;
+ while (--n >= 0)
+ *dst++ = *data++;
+ }
+}
+
+boolean_t
+db_check_access(
+ vm_offset_t addr,
+ register int size,
+ task_t task)
+{
+ register n;
+ vm_offset_t kern_addr;
+
+ if (addr >= VM_MIN_KERNEL_ADDRESS) {
+ if (kernel_task == TASK_NULL)
+ return TRUE;
+ task = kernel_task;
+ } else if (task == TASK_NULL) {
+ if (current_thread() == THREAD_NULL)
+ return FALSE;
+ task = current_thread()->task;
+ }
+ while (size > 0) {
+ if (db_user_to_kernel_address(task, addr, &kern_addr, 0) < 0)
+ return FALSE;
+ n = intel_trunc_page(addr+INTEL_PGBYTES) - addr;
+ if (n > size)
+ n = size;
+ size -= n;
+ addr += n;
+ }
+ return TRUE;
+}
+
+boolean_t
+db_phys_eq(
+ task_t task1,
+ vm_offset_t addr1,
+ task_t task2,
+ vm_offset_t addr2)
+{
+ vm_offset_t kern_addr1, kern_addr2;
+
+ if (addr1 >= VM_MIN_KERNEL_ADDRESS || addr2 >= VM_MIN_KERNEL_ADDRESS)
+ return FALSE;
+ if ((addr1 & (INTEL_PGBYTES-1)) != (addr2 & (INTEL_PGBYTES-1)))
+ return FALSE;
+ if (task1 == TASK_NULL) {
+ if (current_thread() == THREAD_NULL)
+ return FALSE;
+ task1 = current_thread()->task;
+ }
+ if (db_user_to_kernel_address(task1, addr1, &kern_addr1, 0) < 0
+ || db_user_to_kernel_address(task2, addr2, &kern_addr2, 0) < 0)
+ return FALSE;
+ return(kern_addr1 == kern_addr2);
+}
+
+#define DB_USER_STACK_ADDR (VM_MIN_KERNEL_ADDRESS)
+#define DB_NAME_SEARCH_LIMIT (DB_USER_STACK_ADDR-(INTEL_PGBYTES*3))
+
+static boolean_t
+db_search_null(
+ task_t task,
+ vm_offset_t *svaddr,
+ vm_offset_t evaddr,
+ vm_offset_t *skaddr,
+ int flag)
+{
+ register unsigned vaddr;
+ register unsigned *kaddr;
+
+ kaddr = (unsigned *)*skaddr;
+ for (vaddr = *svaddr; vaddr > evaddr; vaddr -= sizeof(unsigned)) {
+ if (vaddr % INTEL_PGBYTES == 0) {
+ vaddr -= sizeof(unsigned);
+ if (db_user_to_kernel_address(task, vaddr, skaddr, 0) < 0)
+ return FALSE;
+ kaddr = (vm_offset_t *)*skaddr;
+ } else {
+ vaddr -= sizeof(unsigned);
+ kaddr--;
+ }
+ if ((*kaddr == 0) ^ (flag == 0)) {
+ *svaddr = vaddr;
+ *skaddr = (unsigned)kaddr;
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+void
+db_task_name(
+ task_t task)
+{
+ register char *p;
+ register n;
+ unsigned vaddr, kaddr;
+
+ vaddr = DB_USER_STACK_ADDR;
+ kaddr = 0;
+
+ /*
+ * skip nulls at the end
+ */
+ if (!db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT, &kaddr, 0)) {
+ db_printf(DB_NULL_TASK_NAME);
+ return;
+ }
+ /*
+ * search start of args
+ */
+ if (!db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT, &kaddr, 1)) {
+ db_printf(DB_NULL_TASK_NAME);
+ return;
+ }
+
+ n = DB_TASK_NAME_LEN-1;
+ p = (char *)kaddr + sizeof(unsigned);
+ for (vaddr += sizeof(int); vaddr < DB_USER_STACK_ADDR && n > 0;
+ vaddr++, p++, n--) {
+ if (vaddr % INTEL_PGBYTES == 0) {
+ (void)db_user_to_kernel_address(task, vaddr, &kaddr, 0);
+ p = (char*)kaddr;
+ }
+ db_printf("%c", (*p < ' ' || *p > '~')? ' ': *p);
+ }
+ while (n-- >= 0) /* compare with >= 0 for one more space */
+ db_printf(" ");
+}
+
+#endif MACH_KDB
diff --git a/i386/i386/db_machdep.h b/i386/i386/db_machdep.h
new file mode 100644
index 00000000..ee5853a0
--- /dev/null
+++ b/i386/i386/db_machdep.h
@@ -0,0 +1,110 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_DB_MACHDEP_H_
+#define _I386_DB_MACHDEP_H_
+
+/*
+ * Machine-dependent defines for new kernel debugger.
+ */
+
+#include <mach/machine/vm_types.h>
+#include <mach/machine/vm_param.h>
+#include <mach/machine/eflags.h>
+#include <i386/thread.h> /* for thread_status */
+#include <i386/trap.h>
+
+typedef vm_offset_t db_addr_t; /* address - unsigned */
+typedef int db_expr_t; /* expression - signed */
+
+typedef struct i386_saved_state db_regs_t;
+db_regs_t ddb_regs; /* register state */
+#define DDB_REGS (&ddb_regs)
+#define SAVE_DDB_REGS DB_SAVE(db_regs_t, ddb_regs)
+#define RESTORE_DDB_REGS DB_RESTORE(ddb_regs)
+
+#define PC_REGS(regs) ((db_addr_t)(regs)->eip)
+
+#define BKPT_INST 0xcc /* breakpoint instruction */
+#define BKPT_SIZE (1) /* size of breakpoint inst */
+#define BKPT_SET(inst) (BKPT_INST)
+
+#define FIXUP_PC_AFTER_BREAK ddb_regs.eip -= 1;
+
+#define db_clear_single_step(regs) ((regs)->efl &= ~EFL_TF)
+#define db_set_single_step(regs) ((regs)->efl |= EFL_TF)
+
+#define IS_BREAKPOINT_TRAP(type, code) ((type) == T_INT3)
+#define IS_WATCHPOINT_TRAP(type, code) ((type) == T_WATCHPOINT)
+
+#define I_CALL 0xe8
+#define I_CALLI 0xff
+#define I_RET 0xc3
+#define I_IRET 0xcf
+
+#define inst_trap_return(ins) (((ins)&0xff) == I_IRET)
+#define inst_return(ins) (((ins)&0xff) == I_RET)
+#define inst_call(ins) (((ins)&0xff) == I_CALL || \
+ (((ins)&0xff) == I_CALLI && \
+ ((ins)&0x3800) == 0x1000))
+#define inst_load(ins) 0
+#define inst_store(ins) 0
+
+/* access capability and access macros */
+
+#define DB_ACCESS_LEVEL 2 /* access any space */
+#define DB_CHECK_ACCESS(addr,size,task) \
+ db_check_access(addr,size,task)
+#define DB_PHYS_EQ(task1,addr1,task2,addr2) \
+ db_phys_eq(task1,addr1,task2,addr2)
+#define DB_VALID_KERN_ADDR(addr) \
+ ((addr) >= VM_MIN_KERNEL_ADDRESS && \
+ (addr) < VM_MAX_KERNEL_ADDRESS)
+#define DB_VALID_ADDRESS(addr,user) \
+ ((!(user) && DB_VALID_KERN_ADDR(addr)) || \
+ ((user) && (addr) < VM_MIN_KERNEL_ADDRESS))
+
+boolean_t db_check_access(/* vm_offset_t, int, task_t */);
+boolean_t db_phys_eq(/* task_t, vm_offset_t, task_t, vm_offset_t */);
+
+/* macros for printing OS server dependent task name */
+
+#define DB_TASK_NAME(task) db_task_name(task)
+#define DB_TASK_NAME_TITLE "COMMAND "
+#define DB_TASK_NAME_LEN 23
+#define DB_NULL_TASK_NAME "? "
+
+void db_task_name(/* task_t */);
+
+/* macro for checking if a thread has used floating-point */
+
+#define db_thread_fp_used(thread) ((thread)->pcb->ims.ifps != 0)
+
+/* only a.out symbol tables */
+
+#define DB_NO_COFF 1
+
+#endif /* _I386_DB_MACHDEP_H_ */
diff --git a/i386/i386/db_trace.c b/i386/i386/db_trace.c
new file mode 100644
index 00000000..358d2b10
--- /dev/null
+++ b/i386/i386/db_trace.c
@@ -0,0 +1,674 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include "mach_kdb.h"
+#if MACH_KDB
+
+#include <mach/boolean.h>
+#include <vm/vm_map.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+
+#include <machine/db_machdep.h>
+#include <machine/machspl.h>
+
+#include <ddb/db_access.h>
+#include <ddb/db_command.h>
+#include <ddb/db_output.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_task_thread.h>
+
+#include "trap.h"
+
+db_i386_reg_value(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ struct db_var_aux_param *ap); /* forward */
+
+/*
+ * Machine register set.
+ */
+struct db_variable db_regs[] = {
+ { "cs", (int *)&ddb_regs.cs, db_i386_reg_value },
+ { "ds", (int *)&ddb_regs.ds, db_i386_reg_value },
+ { "es", (int *)&ddb_regs.es, db_i386_reg_value },
+ { "fs", (int *)&ddb_regs.fs, db_i386_reg_value },
+ { "gs", (int *)&ddb_regs.gs, db_i386_reg_value },
+ { "ss", (int *)&ddb_regs.ss, db_i386_reg_value },
+ { "eax",(int *)&ddb_regs.eax, db_i386_reg_value },
+ { "ecx",(int *)&ddb_regs.ecx, db_i386_reg_value },
+ { "edx",(int *)&ddb_regs.edx, db_i386_reg_value },
+ { "ebx",(int *)&ddb_regs.ebx, db_i386_reg_value },
+ { "esp",(int *)&ddb_regs.uesp,db_i386_reg_value },
+ { "ebp",(int *)&ddb_regs.ebp, db_i386_reg_value },
+ { "esi",(int *)&ddb_regs.esi, db_i386_reg_value },
+ { "edi",(int *)&ddb_regs.edi, db_i386_reg_value },
+ { "eip",(int *)&ddb_regs.eip, db_i386_reg_value },
+ { "efl",(int *)&ddb_regs.efl, db_i386_reg_value },
+};
+struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]);
+
+/*
+ * Stack trace.
+ */
+#define INKERNEL(va) (((vm_offset_t)(va)) >= VM_MIN_KERNEL_ADDRESS)
+
+struct i386_frame {
+ struct i386_frame *f_frame;
+ int f_retaddr;
+ int f_arg0;
+};
+
+#define TRAP 1
+#define INTERRUPT 2
+#define SYSCALL 3
+
+db_addr_t db_user_trap_symbol_value = 0;
+db_addr_t db_kernel_trap_symbol_value = 0;
+db_addr_t db_interrupt_symbol_value = 0;
+db_addr_t db_return_to_iret_symbol_value = 0;
+db_addr_t db_syscall_symbol_value = 0;
+boolean_t db_trace_symbols_found = FALSE;
+
+struct i386_kregs {
+ char *name;
+ int offset;
+} i386_kregs[] = {
+ { "ebx", (int)(&((struct i386_kernel_state *)0)->k_ebx) },
+ { "esp", (int)(&((struct i386_kernel_state *)0)->k_esp) },
+ { "ebp", (int)(&((struct i386_kernel_state *)0)->k_ebp) },
+ { "edi", (int)(&((struct i386_kernel_state *)0)->k_edi) },
+ { "esi", (int)(&((struct i386_kernel_state *)0)->k_esi) },
+ { "eip", (int)(&((struct i386_kernel_state *)0)->k_eip) },
+ { 0 },
+};
+
+int *
+db_lookup_i386_kreg(
+ char *name,
+ int *kregp)
+{
+ register struct i386_kregs *kp;
+
+ for (kp = i386_kregs; kp->name; kp++) {
+ if (strcmp(name, kp->name) == 0)
+ return (int *)((int)kregp + kp->offset);
+ }
+ return 0;
+}
+
+db_i386_reg_value(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ db_var_aux_param_t ap)
+{
+ int *dp = 0;
+ db_expr_t null_reg = 0;
+ register thread_t thread = ap->thread;
+ extern unsigned int_stack_high;
+
+ if (db_option(ap->modif, 'u')) {
+ if (thread == THREAD_NULL) {
+ if ((thread = current_thread()) == THREAD_NULL)
+ db_error("no user registers\n");
+ }
+ if (thread == current_thread()) {
+ if (ddb_regs.cs & 0x3)
+ dp = vp->valuep;
+ else if (ddb_regs.ebp < int_stack_high)
+ db_error("cannot get/set user registers in nested interrupt\n");
+ }
+ } else {
+ if (thread == THREAD_NULL || thread == current_thread()) {
+ dp = vp->valuep;
+ } else if ((thread->state & TH_SWAPPED) == 0 &&
+ thread->kernel_stack) {
+ dp = db_lookup_i386_kreg(vp->name,
+ (int *)(STACK_IKS(thread->kernel_stack)));
+ if (dp == 0)
+ dp = &null_reg;
+ } else if ((thread->state & TH_SWAPPED) &&
+ thread->swap_func != thread_exception_return) {
+/*.....this breaks t/t $taskN.0...*/
+ /* only EIP is valid */
+ if (vp->valuep == (int *) &ddb_regs.eip) {
+ dp = (int *)(&thread->swap_func);
+ } else {
+ dp = &null_reg;
+ }
+ }
+ }
+ if (dp == 0) {
+ if (thread->pcb == 0)
+ db_error("no pcb\n");
+ dp = (int *)((int)(&thread->pcb->iss) +
+ ((int)vp->valuep - (int)&ddb_regs));
+ }
+ if (flag == DB_VAR_SET)
+ *dp = *valuep;
+ else
+ *valuep = *dp;
+}
+
+void
+db_find_trace_symbols(void)
+{
+ db_expr_t value;
+ if (db_value_of_name("_user_trap", &value))
+ db_user_trap_symbol_value = (db_addr_t) value;
+ if (db_value_of_name("_kernel_trap", &value))
+ db_kernel_trap_symbol_value = (db_addr_t) value;
+ if (db_value_of_name("_interrupt", &value))
+ db_interrupt_symbol_value = (db_addr_t) value;
+ if (db_value_of_name("_return_to_iret", &value))
+ db_return_to_iret_symbol_value = (db_addr_t) value;
+ if (db_value_of_name("_syscall", &value))
+ db_syscall_symbol_value = (db_addr_t) value;
+ db_trace_symbols_found = TRUE;
+}
+
+/*
+ * Figure out how many arguments were passed into the frame at "fp".
+ */
+int db_numargs_default = 5;
+
+int
+db_numargs(
+ struct i386_frame *fp,
+ task_t task)
+{
+ int *argp;
+ int inst;
+ int args;
+ extern char etext[];
+
+ argp = (int *)db_get_task_value((int)&fp->f_retaddr, 4, FALSE, task);
+ if (argp < (int *)VM_MIN_KERNEL_ADDRESS || argp > (int *)etext)
+ args = db_numargs_default;
+ else if (!DB_CHECK_ACCESS((int)argp, 4, task))
+ args = db_numargs_default;
+ else {
+ inst = db_get_task_value((int)argp, 4, FALSE, task);
+ if ((inst & 0xff) == 0x59) /* popl %ecx */
+ args = 1;
+ else if ((inst & 0xffff) == 0xc483) /* addl %n, %esp */
+ args = ((inst >> 16) & 0xff) / 4;
+ else
+ args = db_numargs_default;
+ }
+ return args;
+}
+
+struct interrupt_frame {
+ struct i386_frame *if_frame; /* point to next frame */
+ int if_retaddr; /* return address to _interrupt */
+ int if_unit; /* unit number */
+ spl_t if_spl; /* saved spl */
+ int if_iretaddr; /* _return_to_{iret,iret_i} */
+ int if_edx; /* old sp(iret) or saved edx(iret_i) */
+ int if_ecx; /* saved ecx(iret_i) */
+ int if_eax; /* saved eax(iret_i) */
+ int if_eip; /* saved eip(iret_i) */
+ int if_cs; /* saved cs(iret_i) */
+ int if_efl; /* saved efl(iret_i) */
+};
+
+/*
+ * Figure out the next frame up in the call stack.
+ * For trap(), we print the address of the faulting instruction and
+ * proceed with the calling frame. We return the ip that faulted.
+ * If the trap was caused by jumping through a bogus pointer, then
+ * the next line in the backtrace will list some random function as
+ * being called. It should get the argument list correct, though.
+ * It might be possible to dig out from the next frame up the name
+ * of the function that faulted, but that could get hairy.
+ */
+void
+db_nextframe(
+ struct i386_frame **lfp, /* in/out */
+ struct i386_frame **fp, /* in/out */
+ db_addr_t *ip, /* out */
+ int frame_type, /* in */
+ thread_t thread) /* in */
+{
+ struct i386_saved_state *saved_regs;
+ struct interrupt_frame *ifp;
+ task_t task = (thread != THREAD_NULL)? thread->task: TASK_NULL;
+
+ switch(frame_type) {
+ case TRAP:
+ /*
+ * We know that trap() has 1 argument and we know that
+ * it is an (struct i386_saved_state *).
+ */
+ saved_regs = (struct i386_saved_state *)
+ db_get_task_value((int)&((*fp)->f_arg0),4,FALSE,task);
+ db_printf(">>>>> %s (%d) at ",
+ trap_name(saved_regs->trapno), saved_regs->trapno);
+ db_task_printsym(saved_regs->eip, DB_STGY_PROC, task);
+ db_printf(" <<<<<\n");
+ *fp = (struct i386_frame *)saved_regs->ebp;
+ *ip = (db_addr_t)saved_regs->eip;
+ break;
+ case INTERRUPT:
+ if (*lfp == 0) {
+ db_printf(">>>>> interrupt <<<<<\n");
+ goto miss_frame;
+ }
+ db_printf(">>>>> interrupt at ");
+ ifp = (struct interrupt_frame *)(*lfp);
+ *fp = ifp->if_frame;
+ if (ifp->if_iretaddr == db_return_to_iret_symbol_value)
+ *ip = ((struct i386_interrupt_state *) ifp->if_edx)->eip;
+ else
+ *ip = (db_addr_t) ifp->if_eip;
+ db_task_printsym(*ip, DB_STGY_PROC, task);
+ db_printf(" <<<<<\n");
+ break;
+ case SYSCALL:
+ if (thread != THREAD_NULL && thread->pcb) {
+ *ip = (db_addr_t) thread->pcb->iss.eip;
+ *fp = (struct i386_frame *) thread->pcb->iss.ebp;
+ break;
+ }
+ /* falling down for unknown case */
+ default:
+ miss_frame:
+ *ip = (db_addr_t)
+ db_get_task_value((int)&(*fp)->f_retaddr, 4, FALSE, task);
+ *lfp = *fp;
+ *fp = (struct i386_frame *)
+ db_get_task_value((int)&(*fp)->f_frame, 4, FALSE, task);
+ break;
+ }
+}
+
+void
+db_i386_stack_trace(
+ thread_t th,
+ struct i386_frame *frame,
+ db_addr_t callpc,
+ db_expr_t count,
+ int flags); /* forward */
+
+#define F_USER_TRACE 1
+#define F_TRACE_THREAD 2
+
+void
+db_stack_trace_cmd(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char *modif)
+{
+ boolean_t trace_thread = FALSE;
+ struct i386_frame *frame;
+ db_addr_t callpc;
+ int flags = 0;
+ thread_t th;
+
+ {
+ register char *cp = modif;
+ register char c;
+
+ while ((c = *cp++) != 0) {
+ if (c == 't')
+ trace_thread = TRUE;
+ if (c == 'u')
+ flags |= F_USER_TRACE;
+ }
+ }
+
+ if (!have_addr && !trace_thread) {
+ frame = (struct i386_frame *)ddb_regs.ebp;
+ callpc = (db_addr_t)ddb_regs.eip;
+ th = current_thread();
+ } else if (trace_thread) {
+ if (have_addr) {
+ th = (thread_t) addr;
+ if (!db_check_thread_address_valid((db_addr_t)th))
+ return;
+ } else {
+ th = db_default_thread;
+ if (th == THREAD_NULL)
+ th = current_thread();
+ if (th == THREAD_NULL) {
+ db_printf("no active thread\n");
+ return;
+ }
+ }
+ if (th == current_thread()) {
+ frame = (struct i386_frame *)ddb_regs.ebp;
+ callpc = (db_addr_t)ddb_regs.eip;
+ } else {
+ if (th->pcb == 0) {
+ db_printf("thread has no pcb\n");
+ return;
+ }
+ if ((th->state & TH_SWAPPED) || th->kernel_stack == 0) {
+ register struct i386_saved_state *iss = &th->pcb->iss;
+
+ db_printf("Continuation ");
+ db_task_printsym((db_expr_t)th->swap_func,
+ DB_STGY_PROC,
+ th->task);
+ db_printf("\n");
+
+ frame = (struct i386_frame *) (iss->ebp);
+ callpc = (db_addr_t) (iss->eip);
+ } else {
+ register struct i386_kernel_state *iks;
+ iks = STACK_IKS(th->kernel_stack);
+ frame = (struct i386_frame *) (iks->k_ebp);
+ callpc = (db_addr_t) (iks->k_eip);
+ }
+ }
+ } else {
+ frame = (struct i386_frame *)addr;
+ th = (db_default_thread)? db_default_thread: current_thread();
+ callpc = (db_addr_t)db_get_task_value((int)&frame->f_retaddr, 4,
+ FALSE,
+ (th == THREAD_NULL) ? TASK_NULL : th->task);
+ }
+
+ db_i386_stack_trace( th, frame, callpc, count, flags );
+}
+
+
+void
+db_i386_stack_trace(
+ thread_t th,
+ struct i386_frame *frame,
+ db_addr_t callpc,
+ db_expr_t count,
+ int flags)
+{
+ task_t task;
+ boolean_t kernel_only;
+ int *argp;
+ int user_frame = 0;
+ struct i386_frame *lastframe;
+ int frame_type;
+ char *filename;
+ int linenum;
+ extern unsigned int db_maxoff;
+
+ if (count == -1)
+ count = 65535;
+
+ kernel_only = (flags & F_USER_TRACE) == 0;
+
+ task = (th == THREAD_NULL) ? TASK_NULL : th->task;
+
+ if (!db_trace_symbols_found)
+ db_find_trace_symbols();
+
+ if (!INKERNEL((unsigned)callpc) && !INKERNEL((unsigned)frame)) {
+ db_printf(">>>>> user space <<<<<\n");
+ user_frame++;
+ }
+
+ lastframe = 0;
+ while (count-- && frame != 0) {
+ register int narg;
+ char * name;
+ db_expr_t offset;
+
+ if (INKERNEL((unsigned)callpc) && user_frame == 0) {
+ db_addr_t call_func = 0;
+
+ db_symbol_values(0, db_search_task_symbol(callpc,
+ DB_STGY_XTRN, (db_addr_t *)&offset,
+ TASK_NULL),
+ &name, (db_expr_t *)&call_func);
+ if (call_func == db_user_trap_symbol_value ||
+ call_func == db_kernel_trap_symbol_value) {
+ frame_type = TRAP;
+ narg = 1;
+ } else if (call_func == db_interrupt_symbol_value) {
+ frame_type = INTERRUPT;
+ goto next_frame;
+ } else if (call_func == db_syscall_symbol_value) {
+ frame_type = SYSCALL;
+ goto next_frame;
+ } else {
+ frame_type = 0;
+ narg = db_numargs(frame, task);
+ }
+ } else if (INKERNEL((unsigned)callpc) ^ INKERNEL((unsigned)frame)) {
+ frame_type = 0;
+ narg = -1;
+ } else {
+ frame_type = 0;
+ narg = db_numargs(frame, task);
+ }
+
+ db_find_task_sym_and_offset(callpc, &name,
+ (db_addr_t *)&offset, task);
+ if (name == 0 || offset > db_maxoff) {
+ db_printf("0x%x(", callpc);
+ offset = 0;
+ } else
+ db_printf("%s(", name);
+
+ argp = &frame->f_arg0;
+ while (narg > 0) {
+ db_printf("%x", db_get_task_value((int)argp,4,FALSE,task));
+ argp++;
+ if (--narg != 0)
+ db_printf(",");
+ }
+ if (narg < 0)
+ db_printf("...");
+ db_printf(")");
+ if (offset) {
+ db_printf("+%x", offset);
+ }
+ if (db_line_at_pc(0, &filename, &linenum, callpc)) {
+ db_printf(" [%s", filename);
+ if (linenum > 0)
+ db_printf(":%d", linenum);
+ db_printf("]");
+ }
+ db_printf("\n");
+
+ next_frame:
+ db_nextframe(&lastframe, &frame, &callpc, frame_type, th);
+
+ if (frame == 0) {
+ /* end of chain */
+ break;
+ }
+ if (!INKERNEL(lastframe) ||
+ (!INKERNEL((unsigned)callpc) && !INKERNEL((unsigned)frame)))
+ user_frame++;
+ if (user_frame == 1) {
+ db_printf(">>>>> user space <<<<<\n");
+ if (kernel_only)
+ break;
+ }
+ if (frame <= lastframe) {
+ if (INKERNEL(lastframe) && !INKERNEL(frame))
+ continue;
+ db_printf("Bad frame pointer: 0x%x\n", frame);
+ break;
+ }
+ }
+}
+
+#define CTHREADS_SUPPORT 1
+
+#if CTHREADS_SUPPORT
+
+thread_t
+db_find_kthread(
+ vm_offset_t ustack_base,
+ vm_size_t ustack_top,
+ task_t task)
+{
+ thread_t thread;
+
+ queue_iterate(&task->thread_list, thread, thread_t, thread_list) {
+ vm_offset_t usp = thread->pcb->iss.uesp/*ebp works*/;
+ if (usp >= ustack_base && usp < ustack_top)
+ return thread;
+ }
+ return THREAD_NULL;
+}
+
+static void db_cproc_state(
+ int state,
+ char s[4])
+{
+ if (state == 0) {
+ *s++ = 'R';
+ } else {
+ if (state & 1) *s++ = 'S';
+ if (state & 2) *s++ = 'B';
+ if (state & 4) *s++ = 'C';
+ }
+ *s = 0;
+}
+
+/* offsets in a cproc structure */
+int db_cproc_next_offset = 0 * 4;
+int db_cproc_incarnation_offset = 1 * 4;
+int db_cproc_list_offset = 2 * 4;
+int db_cproc_wait_offset = 3 * 4;
+int db_cproc_context_offset = 5 * 4;
+int db_cproc_state_offset = 7 * 4;
+int db_cproc_stack_base_offset = 10 * 4 + sizeof(mach_msg_header_t);
+int db_cproc_stack_size_offset = 11 * 4 + sizeof(mach_msg_header_t);
+
+/* offsets in a cproc_switch context structure */
+int db_cprocsw_framep_offset = 3 * 4;
+int db_cprocsw_pc_offset = 4 * 4;
+
+#include <machine/setjmp.h>
+
+extern jmp_buf_t *db_recover;
+
+void db_trace_cproc(
+ vm_offset_t cproc,
+ thread_t thread)
+{
+ jmp_buf_t db_jmpbuf;
+ jmp_buf_t *prev = db_recover;
+ task_t task;
+ db_addr_t pc, fp;
+
+ task = (thread == THREAD_NULL)? TASK_NULL: thread->task;
+
+ if (!_setjmp(db_recover = &db_jmpbuf)) {
+ char pstate[4];
+ unsigned int s, w, n, c, cth;
+
+ s = db_get_task_value(cproc + db_cproc_state_offset, 4, FALSE, task);
+ w = db_get_task_value(cproc + db_cproc_wait_offset, 4, FALSE, task);
+ n = db_get_task_value(cproc + db_cproc_next_offset, 4, FALSE, task);
+ c = db_get_task_value(cproc + db_cproc_context_offset, 4, FALSE, task);
+ cth = db_get_task_value(cproc + db_cproc_incarnation_offset, 4, FALSE, task);
+
+ db_cproc_state(s, pstate);
+
+ db_printf("CThread %x (cproc %x) %s", cth, cproc, pstate);
+ if (w) db_printf(" awaits %x", w);
+ if (n) db_printf(" next %x", n);
+ db_printf("\n");
+
+ if ((s != 0) && (c != 0)) {
+ pc = db_get_task_value(c + db_cprocsw_pc_offset, 4, FALSE, task);
+ fp = c + db_cprocsw_framep_offset;
+ } else {
+ db_addr_t sb;
+ vm_size_t ss;
+
+ sb = db_get_task_value(cproc + db_cproc_stack_base_offset, sizeof(db_expr_t), FALSE, task);
+ ss = db_get_task_value(cproc + db_cproc_stack_size_offset, sizeof(db_expr_t), FALSE, task);
+ db_printf(" Stack base: %x\n", sb);
+ /*
+ * Lessee now..
+ */
+ thread = db_find_kthread(sb, sb+ss, task);
+ if (thread != THREAD_NULL) {
+ pc = thread->pcb->iss.eip;
+ fp = thread->pcb->iss.ebp;
+ } else
+ fp = -1;
+ }
+
+ if (fp != -1)
+ db_i386_stack_trace(thread, (struct i386_frame*)fp, pc,
+ -1, F_USER_TRACE);
+ }
+
+ db_recover = prev;
+}
+
+void db_all_cprocs(
+ task_t task,
+ db_expr_t cproc_list)
+{
+ jmp_buf_t db_jmpbuf;
+ jmp_buf_t *prev = db_recover;
+ thread_t thread;
+ db_expr_t cproc, next;
+
+
+ if (task != TASK_NULL) {
+ thread = (thread_t) queue_first(&task->thread_list);
+ } else
+ thread = current_thread();
+
+ if (cproc_list != 0)
+ next = cproc_list;
+ else
+ if (!db_value_of_name("unix::cproc_list", &next)) {
+ db_printf("No cprocs.\n");
+ return;
+ }
+
+
+ while (next) {
+ if (_setjmp(db_recover = &db_jmpbuf))
+ break;
+
+ cproc = db_get_task_value(next, 4, FALSE, TASK_NULL);
+ if (cproc == 0) break;
+ next = cproc + db_cproc_list_offset;
+
+ db_trace_cproc(cproc, thread);
+ }
+
+ db_recover = prev;
+}
+
+#endif /* CTHREADS_SUPPORT */
+
+#endif MACH_KDB
diff --git a/i386/i386/debug.h b/i386/i386/debug.h
new file mode 100644
index 00000000..99108b69
--- /dev/null
+++ b/i386/i386/debug.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_DEBUG_
+#define _I386_DEBUG_
+
+
+#ifdef DEBUG
+
+
+/* Maximum number of entries in a debug trace.
+ If the buffer overflows, the oldest entries are forgotten. */
+#define DEBUG_TRACE_LEN 512
+
+/* Add the caller's current position to the debug trace buffer.
+ Only the kernel stack needs to be valid;
+ the other data segment registers are not needed
+ and all registers are saved. */
+#ifndef ASSEMBLER
+
+/* Dump a saved state.
+ Probably a good idea to have this around
+ even when DEBUG isn't turned on. */
+void dump_ss(struct i386_saved_state *st);
+
+#define DEBUG_TRACE _debug_trace(__FILE__,__LINE__)
+
+/* Reset the debug trace buffer so it contains no valid entries. */
+void debug_trace_reset(void);
+
+/* Dump the contents of the trace buffer to the console.
+ Also clears the trace buffer. */
+void debug_trace_dump(void);
+
+#else ASSEMBLER
+
+#define DEBUG_TRACE \
+ pushl $__LINE__ ;\
+ pushl $9f ;\
+ call __debug_trace ;\
+ addl $8,%esp ;\
+ .data ;\
+9: .ascii __FILE__"\0" ;\
+ .text
+
+#endif ASSEMBLER
+
+
+#endif DEBUG
+
+/* XXX #include_next "debug.h" */
+
+#endif _I386_DEBUG_
diff --git a/i386/i386/debug_i386.c b/i386/i386/debug_i386.c
new file mode 100644
index 00000000..c8cd5635
--- /dev/null
+++ b/i386/i386/debug_i386.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include "thread.h"
+#include "trap.h"
+#include "debug.h"
+
+void dump_ss(struct i386_saved_state *st)
+{
+ printf("Dump of i386_saved_state %08x:\n", st);
+ printf("EAX %08x EBX %08x ECX %08x EDX %08x\n",
+ st->eax, st->ebx, st->ecx, st->edx);
+ printf("ESI %08x EDI %08x EBP %08x ESP %08x\n",
+ st->esi, st->edi, st->ebp, st->uesp);
+ printf("CS %04x SS %04x DS %04x ES %04x FS %04x GS %04x\n",
+ st->cs & 0xffff, st->ss & 0xffff,
+ st->ds & 0xffff, st->es & 0xffff,
+ st->fs & 0xffff, st->gs & 0xffff);
+ printf("v86: DS %04x ES %04x FS %04x GS %04x\n",
+ st->v86_segs.v86_ds & 0xffff, st->v86_segs.v86_es & 0xffff,
+ st->v86_segs.v86_gs & 0xffff, st->v86_segs.v86_gs & 0xffff);
+ printf("EIP %08x EFLAGS %08x\n", st->eip, st->efl);
+ printf("trapno %d: %s, error %08x\n",
+ st->trapno, trap_name(st->trapno),
+ st->err);
+}
+
+#ifdef DEBUG
+
+struct debug_trace_entry
+{
+ char *filename;
+ int linenum;
+};
+struct debug_trace_entry debug_trace_buf[DEBUG_TRACE_LEN];
+int debug_trace_pos;
+
+
+void
+debug_trace_reset()
+{
+ int s = splhigh();
+ debug_trace_pos = 0;
+ debug_trace_buf[DEBUG_TRACE_LEN-1].filename = 0;
+ splx(s);
+}
+
+static void
+print_entry(int i, int *col)
+{
+ char *fn, *p;
+
+ /* Strip off the path from the filename. */
+ fn = debug_trace_buf[i].filename;
+ for (p = fn; *p; p++)
+ if (*p == '/')
+ fn = p+1;
+
+ printf(" %9s:%-4d", fn, debug_trace_buf[i].linenum);
+ if (++*col == 5)
+ {
+ printf("\n");
+ *col = 0;
+ }
+}
+
+void
+debug_trace_dump()
+{
+ int s = splhigh();
+ int i;
+ int col = 0;
+
+ printf("Debug trace dump ");
+
+ /* If the last entry is nonzero,
+ the trace probably wrapped around.
+ Print out all the entries after the current position
+ before all the entries before it,
+ so we get a total of DEBUG_TRACE_LEN entries
+ in correct time order. */
+ if (debug_trace_buf[DEBUG_TRACE_LEN-1].filename != 0)
+ {
+ printf("(full):\n");
+
+ for (i = debug_trace_pos; i < DEBUG_TRACE_LEN; i++)
+ {
+ print_entry(i, &col);
+ }
+ }
+ else
+ printf("(%d entries):\n", debug_trace_pos);
+
+ /* Print the entries before the current position. */
+ for (i = 0; i < debug_trace_pos; i++)
+ {
+ print_entry(i, &col);
+ }
+
+ if (col != 0)
+ printf("\n");
+
+ debug_trace_reset();
+
+ splx(s);
+}
+
+#include "syscall_sw.h"
+
+int syscall_trace = 0;
+
+int
+syscall_trace_print(int syscallvec, ...)
+{
+ int syscallnum = syscallvec >> 4;
+ int i;
+
+ printf("syscall -%d:", syscallnum);
+ for (i = 0; i < mach_trap_table[syscallnum].mach_trap_arg_count; i++)
+ printf(" %08x", (&syscallvec)[1+i]);
+ printf("\n");
+
+ return syscallvec;
+}
+
+#endif DEBUG
+
diff --git a/i386/i386/debug_trace.S b/i386/i386/debug_trace.S
new file mode 100644
index 00000000..a263bcfd
--- /dev/null
+++ b/i386/i386/debug_trace.S
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#ifdef DEBUG
+
+#include <mach/machine/asm.h>
+
+#include "debug.h"
+
+ .text
+ENTRY(_debug_trace)
+ pushf
+ cli
+ pushl %eax
+ pushl %ebx
+ .byte 0x36 /* SS: bug in gas? */
+ movl %ss:EXT(debug_trace_pos),%eax
+ movl 16(%esp),%ebx
+ movl %ebx,%ss:EXT(debug_trace_buf)(,%eax,8)
+ movl 20(%esp),%ebx
+ movl %ebx,%ss:EXT(debug_trace_buf)+4(,%eax,8)
+ incl %eax
+ andl $DEBUG_TRACE_LEN-1,%eax
+ .byte 0x36 /* SS: bug in gas? */
+ movl %eax,%ss:EXT(debug_trace_pos)
+ popl %ebx
+ popl %eax
+ popf
+ ret
+
+#endif DEBUG
+
+/* XXX gas bug? need at least one symbol... */
+foo:
+
diff --git a/i386/i386/eflags.h b/i386/i386/eflags.h
new file mode 100644
index 00000000..26ce3d91
--- /dev/null
+++ b/i386/i386/eflags.h
@@ -0,0 +1,35 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#ifndef _KERNEL_I386_EFLAGS_H_
+#define _KERNEL_I386_EFLAGS_H_
+
+#include <mach/machine/eflags.h>
+
+/* Eflags bit combinations used by the Mach kernel. */
+#define EFL_USER_SET (EFL_IF)
+#define EFL_USER_CLEAR (EFL_IOPL|EFL_NT|EFL_RF)
+
+#endif _KERNEL_I386_EFLAGS_H_
diff --git a/i386/i386/fpe.b b/i386/i386/fpe.b
new file mode 100755
index 00000000..10a8e65d
--- /dev/null
+++ b/i386/i386/fpe.b
@@ -0,0 +1,478 @@
+
+begin 644 fpe.o
+M!P$``+12`````````````#````````````````````!@'@8/H`^HB>7I:@D`
+M`(GL#ZD/H0<?8<\`````5E>+?"00#[1W/(M',#T`````#X27````/0$```!U
+M"(/&(.F(````/0(```!U!8/&).M\/0,```!U!8/&*.MP/04```!U!8/&+.MD
+M/1(```!T)3T4````="0]%0```'0C/18```!T(CT7````=$&+=QADK6:)1P!D
+MK6:)1P1DK6:)1PADK6:)1PQDK8E'$&2MB4<49*V)1QB#Q@1DK8E'(&2MB4<D
+M9*V)1RADK8E'+&2MB4<P9*UFB4<T9*V)1SB)=SQFC&=`7U[+````````````
+M`,#__P````````"`_W\`````````````___________^?T(Q``!",0``V!(`
+M`+(2``!*!```2@0``)4&```K!0``2@0``$H$``"5!@``*P4``&4&``!E!@``
+M"@<``%,&```*!0``"@4``(,&``#?!```2@0``$H$``!^!@``*P4``$H$``!*
+M!```?@8``"L%``!E!@``908```H'``!3!@``"@4```H%``!L!@``WP0``$H$
+M``"S!@``\08``"L%``"S!@``LP8``"$'``"A!@``\08``"$'``#Q!@``PP8`
+M``H%``"A!@``PP8``-\$``!*!```VP8``+,&```K!0``LP8``"$'``"S!@``
+MH08``/$&``#Q!@``(0<``,,&```*!0``VP8``*$&``#?!```ON@```#&1<3_
+MQD7&`HU]O!X.'Q8'N0(```#SI8E-N(L&9HE%R!_#'@'N%A\6!XU]N+D%````
+M\Z4?PV6`#00````(9?8%``````AU/X%MR`!@``"!?<C_?P``?2^`O7#_____
+M#X6D````98`-!````""`O6______#X6/````98`-!0````+I@@```&6`#00`
+M```@Z/(C```\#'0<BD7$Z*`C``!U$F6`#04````"OO(```#I4?___\=%R/Y_
+M``#'1;@`````QT6\_____\=%P/____^`O73___\%?#"`O73___\(?R>R`V4B
+M%0$```"`^@)_&700QT6\`````,=%P`#____K!X%EO`#X___&1<8`PV7V!0``
+M```0=2!E@`T$````$(%%R`!@``"#?<@`#X\G____@6W(`&```+^X____N`$`
+M``#HDT(``.@@(P``L/^_N/___^@J(@``9H.]</___P!T)&6`#00````P@+UO
+M_____W4398`-!0````+V1<.`=`7&1<8`PS'`B47(O[S____H?DP``,9%Q@9U
+M!,9%Q@'#@WP]$`!U+,=$/1`!````]D0]"X!U'0^]3#T(=!>+1#T$@^D?]]D/
+MI40]"--D/00I3#T0PXM$/00/O<B#Z1_WV=/@B40]",=$/00`````@\$@Z]QT
+M'>A5(P``9?8%``````$/A!H!``#H[_W__^F:````98`E!0```/V*18X*1:(/
+MA00!```/MIUT____@.L%9L'C`B[_DQ`!``"_N/___^@G(@``L`#H-B$``+^X
+M____Z&I!``"+1<@]_G\```^/MP```(/X``^,M@```.B7_O__(<!U%+^\____
+MZ)-+```/A9T```#&1<8!@+UP_____W4998`-!````""`O6______=0AE@`T%
+M`````K^X____OGS____I+P$``&6`#00````"9?8%``````)T5;^`____Z,G^
+M__^_E/___^B__O__Z4#___]E@`T$`````F7V!0`````"="J_@/___^B>_O__
+MZ1____]E@`T$`````F7V!0`````"=`GKO.@6_?__ZXK#Z.;]___K@J@0="9E
+M@`T$`````67V!0`````!=.*_@/___^@J&```OGS____IE@```(!]C@)T#(!]
+MH@(/A9````#K#/9%BT!T#(!]H@)U&/9%GT!U$F6`#00````!9?8%``````%T
+MF+Y\____@'V.`G0'OY3____K28!]H@)T![^`____ZSR*18R*9:!0@&6+?X!E
+MGW_&18P`QD6@`,:%=/___P96Z%$K``!>6(A%C+^`____@'W$`'0(B&6@OY3_
+M__^`3#T+P.B1%```Z14B``"*7:**?8[VQP1T`K<#]L,$=`*S`X'C`P,``,#G
+M`@#[,/_!XP*X^T````*%=/____;D`<,N_Z,@`0``PV6`#00````"9?8%````
+M``)T[;Z`____ZRYE@`T$`````F7V!0`````"=-3V5:#K$F6`#00````"9?8%
+M``````)TO;Z4____Z*C[___K9&6`#00````"9?8%``````)TG[[\````Z'#[
+M___&1<8!ZS-E@`T$`````F7V!0`````"#X1Y____ZQ9E@`T$````!&7V!0``
+M```$#X1A____Z-C[___&1<0`BF6,.F6@=`/V5<3IQOW__XIEH("]=/___P9U
+M`O;4.F6,#X1$____98`-!`````%E]@4``````0^$&____^CD^O__Z8_]__\`
+M``!B#@``GPX``+T.``!2#P``DPX``"00``#Q#@``1Q```)<.``"G#@``%0\`
+M`.</``";#@``I0X``"X/```0$```'PT``"T-```V#0``0@T``",-```H#0``
+M.@T``#$-``#^#0```@X```8.```*#@``#@X``!4.```A#@``)0X``"D.```Q
+M#@``.0X``$$.``!)#@``2@X``%(.``!:#@``?1H``$`J```1'P``<C(``#LH
+M```7!```%P0``!<$```7!```@2D``!8=``!3'0``.R@``/8>``!A*0``5QX`
+M`!,Z```3.@``VS\``"A*```3.@``_!4``.$?``!<%```M2,``+D=```[*```
+MMB@``)<I``#0*0``KRD``/03``#T$P``]!,``/03``#T$P``]!,``/03``"7
+M*```DR@``*4I``!R,@``Z1T``"),``#'30``Q$X``.$?``!R,@``!0<#,`8Q
+M"#(`A`$SFXF<G0`9C#,"#82$*2J$A!\@(2(C)"6$$A`3%`\N)B<6$14M%Q@K
+M+`"$`3,``#,SC(RHCHR$A(0`A`$SBX2*GAH9`3,O-(2$&ADS,YZ$A(0```<'
+M!0$%`0<'!P<'!P<'!P`%`04"!0-$1```0$````P,#`P,#`P`1&A'F$=D``!D
+M:$1'1)1$1`,'!04'!0<%``<!`0```0$%!P,#!P4'!0$'```#`P```0`'!P4!
+M!0$'!P<'!P<'!P('!04(!`4%!0<"`@4%"`2![)@```!FC-!FCMAFCL!F+HL5
+M&````&:.Z@\"T/?"``!``'40@>3__P``@>7__P``LP'K`K,`B%T"]T4X```"
+M`'40QD4#`6:)1?J-13R)11SK,L9%`P"+13R)11QFBT5`9HE%^F:+141FB44(
+M9HM%2&:)10QFBT5,9HE%!&:+15!FB44`_+DD````C;UH____,<#SJ_RR!XC1
+M9KL"`H!]`P%U(,5U,,9%^`'&1?D!#P)%-*D``$``=1S&1?@`QD7Y`.L2#[=U
+M-,'F!`-U,,9%^`#&1?D`9HL&1B2'>?B`Q$#0T-#`9KX`.&8AQF;![@MFOQX`
+M9B''9M'G#A\/O_8/O_\N_Y=$!P``@.0'9HF%=/___V:)E7;___]FB8UY____
+M9HFU?/___V:)O7[___^(O7C___^(G7O___^`?0,!=1+%=3")M6[___]FC)UR
+M____ZQN+=3")M6[___\/MTTT9HF-<O___\'A!`'.%A]F,<DQVV:+50QFBP9&
+M(,!X0#QG=0:`=?D!Z^X\9G4&@'7X`>OD9DEFBU4$/&1TVF:+50`\97329HM5
+M"#PF=,IFBU4T/"YTPF:+5?H\-G2ZZ[1&B./V1?D!#X4)`@``#[<&4&:+10QF
+MCMA8@/O`#X.(````T.-R$7@,@.,.@/L,=!-F,<!.3F:8@^,.T>,N_Y.$!P``
+M1D:`?0,!=2=FB95L____B85H____]H5T____@'5)969EB148````9:,4````
+MZS@/M]+!X@0!T&:,TF:)E6S___^)A6C____VA73___^`=1=E9J,4````B<+!
+MZA#!X@QE98D5&````(!]`P%U!8EU_.L,#[=5-,'B!"G6B77\OG;___^_@/__
+M_\=%J`````#'1:P`````QT6P`````,=%M`````#H-`T``&90OGG___^_E/__
+M_^@C#0``9EMF"=B*G73____0TW,5BHUT____@/F;=W&`^8YT;(#YBG1G966*
+M#0````#VT8#A/V4B#00```!T!,T0Z^;VA73___^`=42`?0,!=1E0Q(5N____
+M9:,,````969EC`40````6.LE4`^WA7+____!X`0#A6[___]E9J,,````P>@0
+MP>`,9:,0````6`^_V]'#9@G`+O^3Y`<``&5EB@4`````]M`D?V4B!00```!T
+M(+``BHUT____]L&`=`6`^9MW&&9E@0T$````@("P`NL*9F6!)00```!_?XM-
+M_(E-,")%.70"S1#I[_+__V8#12!F`T44PV8#11##9@-%$&8#12##9@-%%&8#
+M11AGXP?#9@-%$.OR9HM5^L,Q_X#\P',+@.0'@/P$=0.*/D:+!AX/H5!FBT4,
+M9H[86(#[P`^#:_[__X/&!-#C<AAX$(#C#D>`^PH/A/#]__\QP$Z#[@,/OL"`
+MXP^`^PAT$8/C#M'C+O^3I`<``.G._?__B/LA_W00@.<'@/\%=0ADBP:#Q@3K
+M$V:)WX/C!\'C`B[_DZ0'``!FB?N(WX#G.(#_(`^$E_W__XC9P,$"@.$#T.N#
+MXQPN_Y/$!P``Z7[]__\#12S#`T4HPP-%),,#12##`T4<9^,(PP-%&&?C`<-F
+MBU7ZPP-%%,,#11##BWTLT^<!^,.+?2C3YP'XPXM])-/G`?C#BWT@T^<!^,/#
+MBWT8T^<!^,.+?133YP'XPXM]$-/G`?C#L0"U`(J&I`@``&:)UV:Z!0%FB=8\
+M,70-/#)T#3PP=02P`[<!P[`&ZP*P"(;?9H?*P[$#Z\VQ`>O)L0+KQ6:Y!03K
+MP;,!BH:D"```9HG79KH%`6:Y!01FB<[KN8J&K`@``&;1Y@^_]F:+E@`)``!F
+MO@``9HG//`!T##PS=`T\`70-9HG^PV:^!0/#MP&P`<.*AM@(``"*ED@)``"*
+MME`)``!F4HJ6.`D``(JV0`D``&9>Z[^*AN@(``!FT>8/O_9FBY98"0``9KX!
+M`.NFBH;8"```BI9P"0``BK90"0``9E**EF@)``"*MD`)``!F7NN"]L0@=$YF
+MO@`?9B'&9L'N"`^_]HJ&&`D``-#`N00```#`P`)FOP8`9B''#[__9O^W$`D`
+M`.+JBH:X"```/!!T"#P1=`0\%'4"MP%F7V9>9EEF6L-FB<^*AK0(```\`'00
+M/!ET%3PS="`\`70@9HG^PV:Z!01FO@4#PV:Z!0%FN04$9HG.9HG7P[`!MP%F
+MN@4!9KX%!,.*AO`(```\-'4$MP&P+SPO=0]FN04$9HG79HG69KH%`<,\&G6B
+MMP#KGHJ&^`@``#P:=0*S`3R>=>BU!.OD@/PI=16S`;<!L"]FN04$9HG79HG6
+M9KH%`<.PA&:)SF:)S\-FO@`'9B'&9L'N"`^_]HJ&X`@``.OC9K@!`,,``!PD
+M.40<)#Q$OKRZN0$"`@(!`@(#O[C____HYC\``+^`____L0'H834``.C]%0``
+M@/H#=`:*38.(3;H/MMJ_=!```"X/M@P[B(UH____C76$-O]V!/9%`@%T!0^W
+MU.L"B>))=0%!-O\V@^X$XOB)R+]P$```+HH,.XB-;O___U#B_8G3B<>*C6C_
+M__]1-HL3.U6<=0NX_____S:+4_SK%C:+0_SW=9R)1;")5:SK$HM%L$B+5:R)
+M1;`#59QR%HE5K(M%L/=EF#M5K'((=^$V.T/X=]LQP(E%M(J%;O___XG!P>`"
+M]]B)QHM$-:#W9;`V*00S<P%"BT6T-BD$,W,!0HE5M(/&!.+@-BD4,W,D_TVP
+MBHUN____B<C!X`+WV(G&^(M$-:`V$00S@\8$XO,V$0PSBT6PB40]P%E)=`N#
+MZP2#[P3I1/___XG(B<:*C6[____0X68V"T0S_H/N`N+U",0(9;J)SHJ-;O__
+M_S:+1#/\B40UB(/N!.+RBHUH____`HUN____@/D"=0%!P>$"`<S#:&00``#K
+M!6AH$```O[C____H73X``+^4____L0'HV#,``.AT%```#[;:7RZ*!#N(A6[_
+M__\N#[:#;!```(G'BT6`BUV$BU6(#ZS8`@^LTP+!Z@+K#='@T=/1TG(0@$P]
+M`B`K190;79@;59SK!>@W````Z#P```#^C6[___]UUB'2>`>`3#T"(.L%Z!H`
+M``")18")782)58@)V`G0#Z3"$&8)T`C$B&6ZPP-%E!-=F!-5G,/19;C15;S1
+M5<##BT60*T6D!?\_``")1<B*18PR1:"(1<3HJOW__[^X____Z>H^``"+19`#
+M1:0M_C\``(E%R(I%C#)%H(A%Q.@*````O[C____IQ#X``!Z_K/___Q8?C5P]
+M`.A9/0``O[C____H43T``+D(````OY3____H#ST``'4$`<\!R[Z`____BT0U
+M!`M$-0!U!`'.`<M7BWP]`#')BT0U`/?G`0,1T8/^@'0%B4L$ZR:+1#4$]^<!
+MR(/2`#')`4,$$=&+1#4(]^<!R(/2``%#"(/2`(E3#%^#PP2#QP2#_Z!UL[^L
+M____Z+L\``!T!(!-N@$?PP```````````(#_/P!`_HH;S4MXFM0`0`#`N_`7
+M7"D[JKC_/P#`-,)H(:+:#\D`0`"@F/?/^X2:()K]/P#@JWG/T?<7<K'^/P``
+M`````````````-#KN`8```#VXP4L$@``B<;'1;@`````9BZ+!F:)1;HNBT8"
+MB46\+HM&!HE%P"X/MT8*B47(,<")1<2R`[^X____Z&P1``"_N/___^B@,0``
+M98`E!0```/TQP(M%J)^_N/___^E#!@``=!3H$!,``&7V!0`````!=#WI*`8`
+M`&6`)04```#]BD6./`!T8#P&=$`\$G4BOX#____H!0D``&6`#00````!9?8%
+M``````%T!>GP!0``PSP"=`7IY@4``/9%BT!T!>G;!0``@$V+0.O.98`-!```
+M``)E]@4``````G339L=%D`$`QD6.`+@^0```.460?5N_@/___U?H"#$``%^R
+M`[``Z*80``"`O7#_____=1EE@`T$````(("];_____]U"&6`#04````",<"_
+MA/___^@).P``=`^_@/___^BH/```Z5L%``")19#&18X!Z4\%``"_N/___^@3
+M.P``OY3____H"SL``('A_P```/EJ09SK%U&<L0'HTSL``)V<$,D(3;NQ`NC.
+M.P``BD6+),"(19:Q`NC,.P``L,"*9;N=<@K!X!#H3````.L8]M3!X!`!192+
+M1;SWT!%%F(M%P/?0$46<6>*MT-D(3;IFBT6Z_L3!X!#H'````+^8____,<#H
+M6CH```I%EW0$@$VZ0+$(Z5H[``"^N/___[^4____Z;X)``!T$.AP$0``9?8%
+M``````%U4<-E@"4%````_8I%CCP`=0B`?8P`=27K3SP&=0B`?8P`=1GK,CP!
+M="D\$G0//`)U!>ER_O__@'V,`'06Z.L0``!TOK^X____Z#\'``#IBP```.GC
+M_O__Z,(0``!TH[^`____Z%#M__^!;9#_/P``]T60`0```'0%_TV0ZPZQ`3#`
+MOX#____H6"\``(M%D-'X!?\_``")1<CHHO[__S'`B47$O[C____HV`\``.CI
+M#@``O[C____H'2\``("]</____]U&66`#00````@@+UO_____W4(98`-!0``
+M``*_N/___^FL`P```,2=:/___S'`B40]!(A$/0@FBP.)1#T)P?@7B&0]#"7_
+M````QT6H`````'5$BD0]"V8+1#T)=0K&1#T.`8E$/1##QD0]#@;&1:K_9?8%
+M``````)U`</'1#T0@3\```^]3#T(@^D?]]G39#T(*4P]$,,\_W4RQT0]$/]_
+M``#W1#T(____?\9$/0X*=!KV1#T+0'4.QD6H_V7V!0`````!=`7&1#T.`L/&
+M1#T.``6`/P``B40]$(!,/0N`PQ[%G6C____&1#T$`(L#B40]!8M#!!^)1#T)
+MP?@49IF(5#T,)?\'``#'1:@`````=6B+1#T$"T0]"'4%Z3W____&1#T.!L9%
+MJO]E]@4``````G4!P\=$/1`$/```#[U,/0AT%XM$/02#Z1_WV0^E1#T(TV0]
+M!"E,/1##BT0]!`^]R(/I'_?9T^")1#T(QT0]!`````"#P2#KW&8]_P=U.\=$
+M/1#_?P``@60]"/___P^+1#T("T0]!,9$/0X*=#GV1#T+"'4.QD6H_V7V!0``
+M```!="G&1#T.`NL.!0`\``")1#T0QD0]#@"+1#T$#Z1$/0@#P60]!`.`3#T+
+M@,,>Q;5H____%@>#QP17C7P]`+D"````\Z5FBP9?'V:9B%0]""7_?P``B40]
+M#,=%J`````!T+_9$/0>`=02R$NL(9CW_?W0)L@"(5#T*,<##L@J+1#T$)?__
+M_W\+1#T`=.BR`NODBT0]!`M$/0"R`738L@;KU'(9``"#&0``FAD``+`9``"C
+M&0``]!8``*X7```H'```-QP``(H<``"K&```*!P``"@<``"X&0``@'PU``=T
+M(C';B5P]`(I<-0'!XP(N_Z,?&0``BEPU`,'C`B[_DS,9```QP,/H/0T``.@4
+M`@``="&)WAX/J!_I&/___^@F#0``9D#KY>@=#0```H5U____Z]CHXP,``&9`
+MPQ[%M6C___\6!XU]EKD%````\V:E'V:!?8K__W4+OX#____HN0,``,.*19]F
+MF(AEC+^`____Z'8V``"Q$E&Q!.A4-P``@V6?#^A6-P``OH#____H..C__[$"
+MZ#$W``"+1;AF,<`!18"+1;P1182+1<`118@QP(I%GV8!18*P`!%%A!%%B%GB
+MM;^`____Z`HV``!U"(E%D,9%C@'#QT603D```+^`____Z'XW``#&18X`P[\:
+M``!,&P``LQH``+\:``!3&P``=!#H[PP``&7V!0`````!=0'#GW4(98`E!0``
+M`/V_@/___[Y\____#[9<-0'!XP(N_Z-I&@``Z`T,``!F0.F?````G@^%@0``
+M`.C/````=!KHL@P``&7V!0`````!#X2Y````Z+,"``#K8(M%J`G`=%D(P'0=
+M98`-!`````%E]@4``````0^$D0```(!,/0M`ZS@(Y'0498`-!`````1E]@4`
+M````!'1TZR!E@`T$`````F7V!0`````"=&"`O7;___\!=P7&1#T.`.A+#0``
+MZ'0+``#K"^AM"P```H5U____@^`'B<&*1#T.Z/D,``"-=#T$OQP````!S\'A
+M`@'/N0(````/J`<>%A_SI8HF9B4`@&8+1@0?969EB0?#Z"8+``!F2"0'Z"L+
+M```\`\.-=8"-?93H[@X``(!]C@!U%L=%I/\_``"+19`M_S\``+^`____ZVZ`
+M?8X&=23&1:K_9?8%``````)T(+^4____Z-3G__^+1:3'1:3_/P``Z\J`?8X!
+M=`7&18P`P\9%J?]E]@4`````!'3QQT60_W\``,9%BX#'18S_``H`P\2=:/__
+M_V8FBP,/O\#K"<2=:/___R:+`XG",<")1#T`B40]!`G0=1*)1#T(B$0]#,9$
+M/0X!B40]$,.9B%0]#'D"]]@/O<B#Z1_WV=/@B40]"+@>0```*<B)1#T0QD0]
+M#@##'L6U:/___Q8'5XU\/02Y`@```/.E7Q^+1#T(F8A4/0Q"=13W5#T$@T0]
+M!`'W5#T(@U0]"`#K$`M$/01U"HE$/1#&1#T.`</'1#T0/D````^]3#T(=!>+
+M1#T$@^D?]]D/I40]"--D/00I3#T0PXM$/00/O<B#Z1_WV=/@B40]",=$/00`
+M````@\$@Z]SHQ@P``.@%````Z3P,``"Q!>B9"0``'@^H'_;A9KDH`&8IP;X<
+M````5I@!Q@'&#[?)\V:E9HG!7O-FI1_#Z%X+``"Q!>AF"0``'O;AN2@```!F
+M*<&_'````%>8`<<!Q\6U:/___X/&#H!]^`%U`X/&#O-FI6:)P5_S9J4?Z54+
+M``##,<")1#T$QT0]"````,#&1#T,_\=$/1#_?P``QD0]#@+#=`_HLPD``&7V
+M!0`````!=.YE@R4%````_>C#_/__,<"?OY3___^^?O___^F[_/__=!)E@`T%
+M````066`)04```#YZTME@"4%````N(I%CCP2=#P\`G0@/`IT%#P&=0IE@`T%
+M````1.LF]D6+@'0(98`-!0````30R',(98`-!0```$#0R',(98`-!0````'V
+M18P!=`AE@`T%`````L-T(>@5"0``9?8%``````%T1K^4____Z";___\,_Y_I
+M8/___^@;_?__=`7I4?___V6`)04```#]QT6H`````(I%CCP2=1]E]@4`````
+M`74)98`-!`````'#OX#____HW_[__^L8/`)U+?9%BT!U$F7V!0`````!=->`
+M38M`QD6H_XUU@(U]E.C""P``Z+/[___IZ_[__^BZ_/__Z=3^__]T$.AV"```
+M9?8%``````%U!</&18P`Z8'[__]T$.A;"```9?8%``````%U!,/V58SI9_O_
+M_P!0L0'H>S$``+$!Z&LQ``"^E/___[^X____9L=$-0```+D#````N``````;
+M1#4`B40]`$9&1D9'1T='XNGK%%'HZS$``+Z4____]H5H____`70%OKC___^_
+M@/___^@F````T)5H____6>+4]H5H____`74/OI3___^_@/___^@%````Z:@Q
+M``"+1#4`9C'``40]`(M$-0011#T$BT0U"!%$/0C#`>\![AX6'Q8'N0,```#S
+MI1_#GV6`)04```#Y9L>%:/___P``GG0*Z'@'``#IY@$``(I%C@I%H@^%S@$`
+M`(M%D"M%I+^`____?&.#^#]^#F6`#04````$@\@@@^`_*4600.CW_O__,<"_
+MA/___^CV+P``=1EE]@4%````!`^$&@$``(B%:/___^D/`0``OX#____H?#$`
+M`&7V!04````$=`S&A6C___\`Z;@```"?@+UT____+@^%J@```)YU![$!Z+PP
+M``#_3:3_=8S&1:``QD6,`,:%=/___P:+19`[1:2^@/___WX%OI3___^_X/__
+M_^@*____Z'@0``"_N/___^A_+P``CT6,OX#___]U%/:%:/___P%T3?Z%:/__
+M__95C.M"ON#___^`?<0`=`SHS?[__[^`____ZRN+19`[1:1T"K^4____Z+3^
+M___^A6C_____1:2*18R(1:#H$Q```+^X____@WP]$`%]88G^Z`7A__]E_S4`
+M````98`-`0````/&A7#___\`Z-7A__]ECP4`````O[C____K,8E%D,9%C@&`
+MO73___\N=1S&18P`9?8%`0````1T#F7V!0$````(=03&18S_OX#___^^?/__
+M_^C^^/__98`E!0```+R*A6C____`X`9S"&6`#04````!T.!S"&6`#04```!`
+MT.!S"&6`#04````"PZ@0="9E@`T$`````67V!0`````!=.F_@/___^B>^___
+MOGS____I"N3__XI%HHIECH#\`G0&/`)U*>L*]D6+0'0*/`)U&/9%GT!U$F6`
+M#00````!9?8%``````%THNERX___@+UT____&`^$K0```(#\"G20/`%TC(#\
+M!G5C98`-!`````)E]@4``````@^$;/___[^`____/`IU/?9%BX!T$,=%D`$`
+M``#&18X`Z6[___]E]@4`````$`^%8/___^@EX?__@460`&```&6`#00````0
+MZ4?___]0Z`OA__]8/`9U(F6`#00````"9?8%``````(/A`7___^_E/___U#H
+MY>#__UB`_`$/A*[^__\\"@^$IO[__^D6_?__@/P*=6B`?8P`=2X\`0^$U?[_
+M_SP&=1)E@`T$`````F7V!0`````"=%N*1:"(18R_@/___^G_````/`H/A*?^
+M__\\!G4298`-!`````)E]@4``````G0MOY3____H$2T``(E%I,9%H@'IRP``
+M`(#\!G4398`-!`````)E]@4``````G4!PSP&#X6F````98`-!`````)E]@4`
+M`````G3E@/P`=0R_E/___^@@X/__ZR]E]@4`````$'5YZ90```!T"NBW`P``
+MZ27^__]E@"4%````_8I%C@I%H@^%!?[__V7_-0````!E@`T!````#.B+"P``
+M98\%`````'0(@'V,`'0,ZQB+18@]/>```'X'N#W@``#K##T"(/__?06X`B#_
+M_P%%I(%]I/Y_``!_%8-]I`%\(+^4____OGS____I;/;__[Z4____Z`7>___H
+M$][__^LLOI3____H]-W__V7_-0````!E@`T!`````\:%</___P#HQ-[__V6/
+M!0````"_N/___^NRATP]!B')>0+VUH'A____?PM,/0)F"TP]`'0"]M(PY/9$
+M/0H!=!'VU,=$/0(`````9L=$/0```+D"`0``PX9,/0@@R7D"]M:`X7\+3#T$
+M"TP]`'0"]M(PY/9$/0D!=`+VU,=$/00`````QT0]``````"Y`P$``,.'3#T$
+M(<EY`O;6@>'___]_"TP]`'0"]M(PY/9$/0@!=`+VU,=$/0``````N00!``##
+MBTP]!/;%!'0"]M:!X?\#```+3#T`=`+VTC#D]D0]!0AT`O;4QT0]``````!F
+M@60]!`#XN0<(``##ATP]`"')>0+VUH'A____?W0"]M(PY/9$/00!=`+VU+D(
+M`0``P[DD``#S)```)"4``%TE``!\)```"B8```(F````)@``,28``##V#[_R
+MP>8"9C'2,<DN_Y:")0``9B'2=1L\_XC0=0>(A7'____#9HF5</___XB%;___
+M_\,\_W4(B(5Q____ZPEFQX5P_____P!0Z+4````/MMA8+O^CEB4``/;7.'P]
+M#'4SZR>(UPCB//]U&XJ=</___V:!^_\`=0XZI6____]U!F9"=`;K#F9"=`JP
+M`(B%;____^LHQH5O_____XCJ,.V[#````"G+`=\PP`!4/0!'21!$/0!'XOD8
+MP(/O#,.(Q.@_````/`1U!R#D=`LPP,,\"'7Y(.1T]0S_P^@D````/`1T]#P(
+M=/#KX[(#92(5`0```,-E@"4!````_&4(!0$```##966*!0$````D#,-E@"4!
+M````\V4(!0$```##966*!04````D.,#H`\,/ML")P='AB<O!XP(!RX/#'&5F
+MH0@```!FT^@D`SP"=2QE9F6+0PAF)?]_=0.P!L-E98M#!*D```"`=0.P$L,E
+M____?V4+`[`*=`*P`L-E]@4`````$,/HB@```&7V!0`````"P^AS````9?8%
+M``````'#9?8%``````C#9?8%`0```!##9?8%!````"##9?8%!`````'#9?8%
+M!`````+#98`-!````$%E@"4%````_<-E@`T$````066`#04````"PV6`#00`
+M```@PV6`#00````0PV6`#00````(PV6`#00````!PV6`#00````"PV6`#00`
+M```$9?8%``````3#98`-!0````'#98`-!0```$##98`-!0````+#98`-!0``
+M``3#98`E!0```/[#98`E!0```+_#98`E!0```/W#98`E!0```+C#98`E!0``
+M`/O#98`E!````-_#Z`O___]U!^@H____=7J*A7C____H!@```(J%>____SP!
+M=#DT`'4TZ&S^__\"A77___\D!XC!L`/0X69ETPT(````98`E"````/PD`V4(
+M!0@```!F9=,%"````,/H-_[__^C.____L0CK`K$X966*!04````D.&4P!04`
+M````R"0X90@%!0```,,>#Z@'Q;5H____OP````"Y!P```(!]^`%U#?.E'V6`
+M#0````!`ZP]FI4='XOH?98`-`````$"Y"````#';N`H```#WX[X<`````<:X
+M`P```&4C!0@```"#^`-T0F5F98M&"&8E_W]T&67V1@>`=02R`NL<9CW_?W0$
+ML@#K$K("ZPYE98M&!&4+!K(!=`*R`F6`)0@```#\90@5"````&9EP0T(````
+M`D/BD\-FN'\#96:C`````&:X__]E9J,(````9D!E9J,$````P\2=:/___V8F
+MBP-F#4``96:C`````,/H10```&6`#0````!_PV5EQ@4$`````,.*A7K___\\
+M!&5FH00```!T"\2=:/___V8FB0/#9HE%+,-E9J$`````9B5_'V8-0!#KWQX/
+MJ!]F98$E`````'\?9F6!#0````!`$+X`````Q+UH____N0<```"`??@!=03S
+MI1_#9J5&1N+Z'\,`2RP``,\M```++P``Q"\``(DP``"R*@``"R\```LO``#F
+M*@``=`_H+/W__V7V!0`````!=&!E@"4%````_8"]??___P1U"N@S\/__Z<']
+M__\/MIU\____P>,"@_L(?2*`?8X2=1QE@`T$`````67V!0`````!=!Z_@/__
+M_^C[\O__+O^C'"H``!X6'Q8'N04```#SI1_#Q+UH____'A8?C76$N0(```#S
+MI1^*98QF)0"`9@M%D&8FB0?I4_W__P```````$!V.FL+WF7_-0````!E@`T!
+M`````X!]C@%U!>D?`0``@7V0_W\``'4P98`-!`````%E]@4``````74%Z2,!
+M``#'18(`````QT6&````@&;'18K__^GO````@'V.!G4+QT60`0```,9%C@"X
+M/D```#E%D'VUOX#___]7Z)\:``!?L@.P`.@]^O__,<"_A/___^C")```=0F)
+M19#&18X!ZWV+5#T$@?JSMN`-#X=Y____BT0]`'(+/0``9*</@VC____'1#T$
+M`````,=$/0``````@^\"NP#*FCOW\U"Y!````+MD````B=`QTO?SDM0*P.0$
+M"."(1#T`1^+K6`G`B%0]`'094C'2N0H```#W\9)9B,2(R&H`N04```#KTH"]
+M</____]U&66`#00````@@+UO_____W4(98`-!0````**98R`Y("(98MEQ#T4
+M````'A8?C76"N04```#S9J4?Z.K[__]ECP4`````PV@M*```Q+UH____OH$_
+M``"R`(I%CCP`=$0\!G0V/`)U'/9%BT!U%F6`#00````!9?8%``````%T&8!-
+MBT"+38F!X?__?X`/MD60P>`7"<@FB0?#OX#____H'-?__[]^0```Z$(```#$
+MO6C___]T+7@DBT6))?___X"I____`'01)?__?X`/MDV0@,&`P>$7"<@FB0?#
+MN'___W_K!;A_``"`"D6,P<@(Z^A75E*P`+^`____Z*/X___HW!@``%I>7SEU
+MD'Q1#X2(````.7V0#XZ)````98`-!`````AE]@4`````"'4#6%C#98`-!```
+M`"!E98H%`0```"0,M`@XQ'@4BD6,Z!#Y__^<>`AE@`T%`````IW#9?8%````
+M`!!U"F6`#00````0Z[R_@/___XGP5U+H>Q@``%I?L/_H&OC__V:#O7#___\`
+M=`AE@`T$````,/9%BX!U!&;_39!F@[UP____`'0998`-!````""`O6______
+M=0AE@`T%`````C'`0,-H+2@``+X!/```L@**18X\`'1#/`9T-3P!=0AFQT60
+M`#SK4CP"=1WV18M`=1=E@`T$`````67V!0`````!=0'#@$V+0&;'19#_0^LI
+MOX#____HG]7__[_^0P``Z,7^__]T?7AHBD6%9IB_AO___^CZ(0``=#NQ`^AD
+M(@``Q+UH____'A8?C76%N0,```#S9J4?9HM%D&8M`#QFP>`$@&6+#PI%BX!E
+MC(`*98QF)HD'PS'`Q+UH____)HD'9B:)1P2*98R`Y(!F)HE'!L.X_____^C<
+M____9KCO?PIEC.OGZ,S___]F)H%/!O!_P[D.0```*TV0>$6#^1%V!;D1````
+MOX#___\PP.@4%P``OX#___^R!+``Z+WV__\\`'4=9H%]B@"`<@AW$X!]C/]U
+M#;``.D6,=`9F]UV*,,##@'V.`'0&@'V.!G4]Z)____]U-F:#O7#___\`=!EE
+M@`T$````(("];_____]U"&6`#04````"9HM%BL2]:/___V8FB0?IV?C__V8Q
+MP(!]C@%TZ&6`#00````!9K@`@&7V!0`````!==+#N1Y````K39!X18/Y(78%
+MN2$```"_@/___S#`Z%L6``"_@/___[(!L`#H!/;__SP`=1V!?8@```"`<@AW
+M$H!]C/]U#+``.D6,=`7W78@PP,.`?8X`=`:`?8X&=3OHG____W4T9H.]</__
+M_P!T&66`#00````@@+UO_____W4(98`-!0````*+18C$O6C___\FB0?I(OC_
+M_S'`@'V.`73J98`-!`````&X````@&7V!0`````!==/#N3Y````K39!X4X/Y
+M078%N4$```"_@/___S#`Z*05``"_@/___[(#L`#H3?7__SP`=2N!?8@```"`
+M<@YW((-]A`!U&H!]C/]U%+``.D6,=`TQP/==A!M%B(E%B##`PX!]C@!T!H!]
+MC@9U0NB1____=3MF@[UP____`'0998`-!````""`O6______=0AE@`T%````
+M`HM%A(M5B,2]:/___R:)!R:)5P3I5O?__S'`F8!]C@%TY66`#00````!N@``
+M`(!E]@4``````77.PXM%@&8QP`-%E(E%N(M%A!-%F(E%O(M%B!-%G(E%P+``
+MT-##BT6`@664``#__RM%E(E%N(M%A!M%F(E%O(M%B!M%G(E%P+``T-##ON]%
+M``#K!;Y/,@``BTV0*TVD>"F#^4-V!;E#````L`"_E/___U;_UEZ+19")1<B`
+M?8X!=#/V18N`=2WK'??9@_E#=@6Y0P```%&P`+^`_____]:+1:2)1<A9@'VB
+M`70(L`#V19^`=`*P_X"]=/___P9U`_95H(IEC#IEH'4HZ#/___^_N/___^@5
+M%```BD6,B$7$]D7#@'4,O[C____H;AX``'5=PU"*18R(1<3H)____[^X____
+M/`"X`````'0<]U0]`(-$/0`!]U0]!!%$/03W5#T($40]"/95Q.@P'@``=1KH
+M@/3__S0$9DAFF(AEQ%@\_W469L=%R```PU@\_W4*O[C____HE!\``,/H8AX`
+M`(M$/0`E____'PG"=`B!3#T`````((%D/0````#@PW0598`E!0```/UE@`T$
+M````0>DH`0``QT6H`````&6`)04```#Y@+UT____*745QD6@`,9%H@&_E/__
+M_^BU'0``B46DBD6."D6B='8\`71RJ!`/A=\```"*18Z*9:(\`G0'@/P"=2WK
+M#_9%BT`/A,0```"`_`)U"O9%GT`/A+4```"`O73___\O#X3)````Z:,```"`
+M?8X&=1'&1:K_@WV0`'4'QT60`0```(!]H@9U$<9%JO^#?:0`=0?'1:0!````
+MQH5T____!N@"_O__O[C____H"1T```^$A````&6`)04```"^]D7$`70(98`-
+M!0````&#?:@`=07IO/3__X!]J`!T&V7V!0`````!=`7II_3__\.`?:H`=07I
+MF_3__V6`#00````"9?8%``````)TXNF$]/__98`-!`````''1:C_````ZQ"`
+M?8X&=`:`?:(&=03&1:K_98`-!0````7K"&6`)04```#^98`-!0```$#K@```
+M@`````````````"```````````````````#U'YCLP+OP%UPI.ZJX````````
+M``````$```````````#``(1DWODS\P2U````C?01!5^L?XH```````````"`
+M`/________^_`*"?A];[.1K`E0!@\E?<:%["TZ0`(/T\M-[/T0"N`(#FLYA(
+MUK<?LP#`B3GL=ZR;UK4`@%-,D18NM#RW`(#\BT)XMX7RMP#@5KICU6LC3K@`
+MX#$FJU/X'WRX`&#]H*(?NBF3N`#`5QB^RGNQGK@`H'!$_@T5=J2X`&"J&YO2
+MCUBGN`!@E.F:OMC)J+@`@#-R)1>`@JFX`,`TPF@AHMH/R0"`17O:#2LX8^T`
+M8!7K!F3)K]OZ`,`R;GMAU=2M_@#`-D[O9[G=JO\`@$(EL4O=K>K_`."[U93;
+MW:KZ_P``:+G4W:VJ_O\`P$NYW=VJJO__`*!+W=VMJNK__P"@V]W=JJKZ__\`
+MX-W=K:JJ_O__`.#=W:JJJO___P#@W:VJJNK___\`X-VJJJKZ____`."MJJJJ
+M_O___P#,.0``LSD``+`Y``"G.0``%CD``*1!``"J00``03H``#0\```)/```
+M!SL```D\``!`/```"3P``/,[```T/```-#P``$$Z```>/```XCH``!X\``#S
+M.P``LSH``$$Z``#W/```P#P```<[``!;/```)#L``-\\``!;/```KSP``"0[
+M```#/0``F3P``.(Z```-/0``#3T``+,Z``!!.@``.ST``#L]```'.P``63T`
+M`#L]```D.P``0ST``*\\```D.P```ST``)D\``#B.@``)3T``"4]``"S.@``
+M9?\U`````&6!#0```````P``98`-!````"#&1<0`9L>%;/___P``BT6D*T60
+MB46H@_@/?C^#^#]_!>G0````Z!#<___H*`\``(M%R(/X``^/!0(```^,`P(`
+M`+^\____Z'09```/A?,!``#&1<8!Z>H!``#HS1D``.C1&0``Z#KZ__]FT:5L
+M____@'W#`'54Z$8;``"_@/___^B7&0``Z/;Y___H1QL``+[H,P``Z&X;``#H
+M,1L``(%EE````/_H"QL``&;_A6S___^`?<,`=!.Y`0```.AM&0``@664````
+M_^L%Z!0:``#_1:B#?:@/=HRY!P```.@#&@``@66`````X,=%J`\```#HX1D`
+M`.@&V?__Z-<:``"Y"0```.@N&0``@66```#@_^BZ&@``Z!(9``"!990``.#_
+M@66X````X.B4&@``Z$[Y___HD1H``.AY&@``Z&T:``"Y'@```.CL&```QT6`
+M`````+D>````Z(D9``#H)=O__[^X____Z+$8``#&1</`Z%L:``"!990```#@
+M@'WW`+`!N0<```!U![``N0@```!0O^[____H4!D``.@#&@``Z%S8__^!9;@`
+M``#@6-#HN0<```!S!;D&````Z&88``#H$AH``&;1K6S___]S&;@*````]V6H
+M!>(T``")QN@F&@``Z/`9``!F@[UL____`'06N0$```#H/1@``(%E@```X/__
+M3:CKOH!]BP"Y"````'0(N0<```#_3:CHQQ@``,=%D/X_``"+1:@I19"_@/__
+M_^@5&0``Z$09``"+19")1<C&1<8`98\%`````,/'1#T``````,=$/00`````
+MQT0]"`````#'1#T,```!`,=$/1``````P\=$/0``````QT0]!#7":"''1#T(
+MHMH/R<=$/0P`````QT0]$/\_``##BE6,BG6@QD6,`,9%H`"(\X/C"(M%I#M%
+MD'\W?"N+18@[19Q_+7PABT6$.T68?R-\%[^X____Z)S_____3<@)VP^$LP``
+M`.LX4^@7$@``6X/+!%-2Z._\__]:6PG;=2&+1<B#^`!_3WP&@'W&`71G4NBG
+MR?__6F6`#00````0ZWF_@/___^A-____+O^C@C4``,:%=/___P7K"O]%D,:%
+M=/___P92C7V4C76XZ-[P___H=_?__UIE_S4`````98$-```````#``!2Z.L+
+M``!:98\%`````&:#O7#___\`=!EE@`T$````(("];_____]U"&6`#04````"
+MB%7$PW07Z%GM__]E]@4``````74!P^CVQ___ZWAE@"4%````_0^V78X*7:(/
+MA=X````/MIUT____@.L4=%"`^_QT/(%]D/\_```/C\4```!\)H!]C``/A+D`
+M``"!?8@```"`#X6L````@WV$``^%H@```.G4`0``NP@```#K#X!]C``/A8P`
+M``"[!````"[_DY(U``"_N/___[Y\____Z5O+__]E@`T$`````F7V!0`````"
+M#X1;____OX#____H\<C__[^4____Z.?(___I7____V6`#00````"9?8%````
+M``(/A"S___^_@/___^C"R/__Z3K___]E@`T$`````F7V!0`````"#X0'____
+MZ[3VPQ!T&V6`#00````!9?8%``````$/A.K^___IYO[__X!]C@)T"(!]H@)U
+M+^L,]D6+0'0,@'VB`G4<]D6?0'4698`-!`````%E]@4``````0^$K_[__^DT
+MRO__BGV.#[:%=/___RP4=$$\_'0I@7V0_S\``'P9#X3&_O__@7V0_W\``'6`
+M@'V,``^%=O___[B`````ZQ2`?8P`=`F`_P$/A6#___^X0````(I=HO;'!'0"
+MMP/VPP1T`K,#@>,#`P``P.<"`/LP_\'C`@'#+O^CGC4``&6`#00````"9?8%
+M``````(/A!O^__^`?:``=#>_N/___^C4_/___T7(ZS)E@`T$`````F7V!0``
+M```"#X3P_?__O[C____HK_S__^L0@'V@`'7)O[C____H=/S__XI5C(A5Q.E)
+M_O__98`-!`````1E]@4`````!`^$L_W__\=%B````(#'18P```H`QT60_W\`
+M`+^`____BE6@]M*(5#T,Z1#^__]E@`T$`````F7V!0`````"#X1U_?__OX#_
+M__^*5:"(5#T,Z>G]__^!?9#_/P``?SI\-8%]B````(!W+X-]A`!U*>E%_O__
+M98`-!`````1E]@4`````!`^$+_W__^L)@7V0_S\``'T#]E6@OY3____IG/W_
+M_V6`#00````"9?8%``````(/A`']___KVV6`#00````"9?8%``````(/A.G\
+M__^*58PP5:#KP&6`#00````"9?8%``````(/A,O\__^*5:`P58R_@/___^E`
+M_?__6_]T-0#_=#4$_W0U"/]T-0S_=#40_^-;CT0U$(]$-0R/1#4(CT0U!(]$
+M-0#_X\=$-0``````QT0U!`````#'1#4(````@,=$-0P`````QT0U$/\_``##
+M9L>%;/___P``N/\_```K19"#^#]_48E%D(/X#W=^Z,H2``"X"@```/=ED`5"
+M-```B<;HH10``&;1I6S___^`?<,`=1/H110``(%E@```X/]F_X5L_____T60
+M@WV0#W<PZ#83``#KO,9%H`#'1:3_/P``O@8T``#H;10``.@-$P``Z&S4___H
+MA`<``.DN`0``QT60#P```.C[$@``Z)P3``"Q'NA'$@``@V6``+$>Z.H2``#H
+MAM3__^C1$P``@66`````X+[\,P``Z!$4``"+39"#P0KH!!(``(%EN```X/_H
+MB!,``.@#$@``Z)@3``#H9_+__XM-D$'HX!$``(%EN```X/_H?A,``+X&-```
+MZ+83``#H51,``.AJ$P``Z&P2``#HD='__[D(````Z*P1``"!9;@``.#_9M&M
+M;/___W,DZ$@3``#H/!,``(M-D.B3$0``@664``#@_V:!39Z``.C3\?__9H.]
+M;/___P!T$>AG$0``@66X``#@__]-D.NXL0B`?<,`=`6Q!_]-D.CU$0``N/\_
+M```K19")1<C'1<0`````]D7#@'4*L0'HUA$``/]-R.C5$@``Z.T2``##]E6,
+MZ#G^__^-=;B-?93H$NO__[Z`____Z/O]___&18S_QH5T____!KZX____Z+C]
+M___HC/'__^@4!@``C76XC7V4Z-[J__^^@/___^BP_?__Z-W2___H]04``,/H
+M]P8``'4%Z;/:__^!?9#_/P``?_)\&H%]B````(!WYX-]A`!UX8!]C`!TV_]-
+MD.O698`-!````"!E_S4`````98$-```````#``!E@24`````__/__X%]D/X_
+M``!T"(!]C`!U6>L5@7V(````@'=5@WV$`'5/@'V,`'5"Z&3]__^+3<B#^0!^
+M!L9%Q@#K#>C#PO__98`-!````!"`O6______=0AE@`T%`````F6/!0````"_
+MN/___^D&VO__Z.'^___KO(!]C`!U?[Z4____Z.?\___&A73___\&Z(;P___H
+M#@4``(UUN(U]@.C8Z?__Z*[^__^-=;B-?8#HR.G__[Z4____Z+'\___&A73_
+M__\%Z%#P___HV`0``/]%R(UUN(U]@.B?Z?__OI3____HB/S__\:%=/___P;H
+M)_#__^BO!```Z5+___^^E/___^AH_/__QH5T____!>@'\/__Z(\$``"-=;B-
+M?8#H6>G__^AP_/__C76XC7V`Z$GI__^^E/___^@R_/__QH5T____!>C1[___
+MZ%D$``"-=;B-?8#H(^G___]-D+Z4____Z`G\___&A73___\&Z*CO___H,`0`
+M`.G3_O__QD7:`.L$QD7:`67_-0````!E@0T```````,``+_B____Z"40``"*
+M1:*(1>*+1:2)A6C___^*1:"(1<[&1=@`@'W:`'4HZ+\.``"^)#0``.B@$```
+M@'W#`'5<Z5P!``#H[@\``,9%V/_IBP$``(M%D(E%R(!]C`!UYNCE#P``N?\_
+M```K39"#^4AV!;E(````Z&T.``!FBTV6@>'_'P``"<IT!F:!398`((!-GX"!
+M990```#@ZSOH^`X``.B2#P``N0$```#HV0X``+^Z____Z+H-``!T.,=%R/X_
+M``"_N/___^A7#P``Z'</``#HN0\``/^U:/___^C8S?__CX5H____@66X````
+MX.G4````Z/38__^_@O___^AQ#0``=1?&18X!BD7.,$6,C76`C7VXZ,SG___K
+M;X%EX````/_H0`\``(N%:/___XE%I(I%SHA%H.C>S___Z-`"``"+1<@]_G\`
+M`'](@_@`?$I_$K^\____Z!T-``!U/,9%Q@'K!,9%Q@!F@[UP____`'0998`-
+M!````""`O6______=0AE@`T%`````F6/!0````##Z`2____K\>C5O___Z^K_
+M19#HH`X``+X0-```Z!@/``"Y!P```.C,#0``QD78_\=%R/\_``"_NO___^BB
+M#```=`J_N/___^A&#@``Z*8.``!FQX5L____``"X_S\``"M%R(E%J(/X#P^'
+MD````.B+#@``N0<````#3:CHWPP``.C/#```Z![M__^`?<,`=!F!9;@``.#_
+MQD7#`.A7#@``9L>%;/___P$`9M&E;/___^CE#0``BTVHZ*(,``"!38@``(``
+MZ-_L__^`?<,`=!>!9;@``.#_QD7#`.@8#@``9O^%;/___[D!````Z!`-``#_
+M1:B#?:@/=K#H``T``&6`#00````@Z(\-``#HGL[__XM-J(/Y2'8%N4@```#H
+M*PP``(%EN````.#HR0T``.B8[/__Z+@-``"+3:B#^4AV!;E(````Z!4,``"!
+M98````#@Z&$-``"^&C0``.C9#0``Z)D-``#H60T``(M-J$&#^4AV!;E(````
+MZ-D+``"!990```#PZ$+L__^!9;@```#@Z#4-``#H70T``/^U:/___^B#R___
+MCX5H____@66X````X.A'#0``OBXT``#HC@T``+D'````Z((+``#_3:AFT:UL
+M____<R*+=:A.N`H```#WYHG&@<9,-```Z&H-``#H"@T``.BOZ___9H.];/__
+M_P!T"NA#"P``_TVHZ\&X!T```"M%J(E%R(%EN```X/^_N/___^C:"@``=`J_
+MN/___^A=#```@'W:`'4XZ!O6___HL@P``(M%R(E%I,:%=/___P6*1=B(1:#H
+MG.O__^B<#```BD7$B$6,BT7(B460Z?3\__^*1=B(1<3KXL9%N@&R`[^X____
+ML`#HR=___[^X____M`2H`74!P_]$/1"Q`>C""@``(=)T!8!,/0(!PXG!AT0]
+M$"G!@_E#?@6Y0P```##`Z]MT*NA8X?__98`E!0```/ME]@4``````74"6,._
+M@/___[Y^____6)_I9-3__V6`)04```#[Z$W5__]T!#'`Z]S'1:@`````98`E
+M!0```/V*18X\`79M/`9R#G0_OX#____H']?__^L*]D6+0'7.@$V+0&7V!0``
+M```!=0IE@`T$`````>N4C76`C7V4Z`#D___&1:C_6.D<U___9?8%``````)U
+M#66`#00````"Z6C____&1:K_OX#____H\;S__X/(`<-T'^B5X/__98`E!0``
+M`/ME]@4``````74"6,-8Z:+3__]E@"4%````^8I%CCP!?FL\!GPJ=$:`O73_
+M__\2=1,\$G0/@'V,`'32QT60_S\``.O)OX#____H9=;__^L*]D6+0'6W@$V+
+M0&6`#00````!9?8%``````%TG^N?98`-!`````)E]@4``````G2+OX#____H
+M5;S__X/(`<,QP(J%=/___XIEC(E%K,9%C`!FQX5H____``"+19`M_C\``'Q5
+M@_@_?@IE@`T%````!%C#QT68-<)H(<=%G*+:#\G'1:``````QT6D_C\``"E%
+MD$#H8-?__XM%A`M%B'4/B46`B460QT6.`0```.L2OX#____H]0D``&6`#00`
+M```@]H5H____!'0$9O=5K?:%:/___P)T!O95K_95KO:%:/___P%U`</V5:^`
+M?8X!=2>`?:P3=2'&18N`QD6.`,=%D/\_``"^E/___^A4]?__@\0$Z2(#``!E
+M@`T$````(,=%E`````#'19@UPF@AQT6<HMH/R<=%H`````#'1:3^/P``QD6,
+M_\:%=/___P7HO.C__V7_-0````!E@24`````__/__^@R_?__98\%`````(UU
+MN(U]@.CUX?__PV;'A6S___\``(E%D(/X$']8_TV0Z.0'``"X"@```/=ED`7B
+M-```B<;HNPD``&;1I6S___^`?<,`=1/H7PD``(%E@```X/]F_X5L_____T60
+M@WV0#W<'Z%`(``#KO.A%"```QT6``````.C?"```Z-O)__^+39#1X8/!".AM
+M!P``@66X````_^@+"0``OD(T``#H0PD``.C;"```Z%0'``#HV`@``+$)Z%,'
+M``#HV`@``.B2Y___Z#`'``#HT`@``/]-D&;1K6S___]S-NBL"```Z(T(``#H
+M;>?__^BP"```BTV0T>'H#`<``(%EE```X/_H=>?__^B."```@67@````_V:#
+MO6S___\`=!BQ`;_L____Z.P&``"!9>P``.#__TV0ZY^Q"(!]]P!T!;$'_TV0
+MO^[____H=P<``.@J"```N/\_```K19")19"_@/___^C=!P``Z!P(``#H/P<`
+M`,=%I/\_``#'1:``````OY3____HNP<``,/HY_O__W4/OI3____H7O/__^F,
+MT___Z##]__^`?8X!=2^`?:__=>#'18``````QT6$`````,=%B````(#'18P`
+M``H`QT60_W\``,9%J?_KM[C_/P``*T60@_@_?GP]_S\``'Q`BD6MB$6,98`-
+M!````!!E]@4`````$'4)@460`&```.N"OX#___^X`0```.A"^___QT60````
+M`,9%C@;I8____[Z4____Z,'R__^`?:__#X6-````9HM%K8A%C(AEH&7_-0``
+M``!E@0T```````,``/]UK.LP9?\U`````&6!#0```````P``_W6LZ)7]__^/
+M1:QFBT6MB$6,B&6@_W6L@'VO_W4'Z&4```#K!>AC````@+UO_____W4(98`-
+M!0````*-=;B-?8#H2]___[Z4____Z#3R__^/1:QECP4`````BD6M,D6NB$6,
+MZ5?2__^^@/___^CD\?__C764C7V`Z!3?__^^E/___^CF\?__P^C;____9?\U
+M`````&6!#0```````P``Z/O&___H$_K__V6/!0````##OKC____HF_'__^C-
+M____C76XC7V`Z,;>__^^E/___^B8\?__98$-```````/``#&A73___\%Z$/E
+M___HR_G__XUUN(U]E.B5WO__OH#____H?O'___]%D.A_____P^BP^O__=1K&
+M1:PKBD6,B$6MC76`C7VXZ&;>___IG0$``.@K^___@'V.`74+@'VO_W7?Z7T!
+M``"`?:__#X38`0``N/\_```K19"#^!Y\5'\1@7V(```0C7=)<K:#?80`=+`]
+M_S\``'RI98`-!````!!E]@4`````$'4)@460`&```.N.OX#___^X`0```.A*
+M^?__QT60`````,9%C@;I;_____]UK&7_-0````!E@24`````__/__V6!#0``
+M`````P``0.C&^___OH#____H=?#__[Z4____Z&OP___HF/[__[Z4____Z'/P
+M__^^@/___^AI\/__Z*C^__]ECP4`````CT6L98`E!0```/V`O6______#X6E
+M````98`-!0````+IF````(UU@(U]E.A1W?__Z(#%___H<OC__XUUN(U]@.@\
+MW?__OI3____H)?#__\:%=/___P7HQ./__^A,^/__C76XC7V4Z!;=__^^@/__
+M_^C_[____T60Z`#^__^-=;B-?93H^=S__[Z`____Z.+O___&A73___\&Z('C
+M___H"?C__\/H"_G__W4MQD6L+,9%K@"^N/___^BW[___@'VL+'0%BD6MZP.*
+M1:Z(1<2_N/___^FDS/__Z'/Y__^`?8X!=2N`?:__=<K'1;@`````QT6\````
+M`,=%P`````#'1<0```$`QT7(`````.NZ@'VO_P^$*/[__[C_/P``*T60@_@^
+M?!5_C(%]B````(!W"H-]A``/A'G_____=:QE_S4`````98$E`````/_S__]E
+M@0T```````,``$#H+?K__^@3_?__C76XC7V`Z!'<___HL/[__V6/!0````"/
+M1:QE@"4%````_8"];_____\/A2G___]E@`T%`````ND<____Z$OW__]U+#'`
+MBF6,B46LOI3____HNN[__X!]K_]U!>B3_/__9HM%K8A%C(AEH.G3SO__Z'?X
+M__^`?8X!=-&X_S\``"M%D(/X/GQ2?P^!?8@```"`=T>#?80`=4$]_S\``'RL
+M98`-!````!!E]@4`````$'4)@460`&```.N1OX#___^X`0```.BM]O__QT60
+M`````,9%C@;I<O____]UK&7_-0````!E@24`````__/__V6!#0```````P``
+M@_@>?!]_$8%]B```$(UW%'(&@WV$`'4,,=N^@/___^C$[?__G$#H!/G__YUT
+M%+Z`____Z+#M__^^E/___^BF[?__G.C2^___G704OI3____HJNW__[Z`____
+MZ*#M__^^N/___^A_[?__=`><Z-+[__^=OH#____H@^W__W0*OKC____H8.W_
+M_^@Z_?__OH#____H:.W__XUUN(U]E.B!VO__98\%`````(]%K.FK_O__9HM$
+M/0AF"T0]!F8+1#T$9@M$/0)F"T0]`'4!PV:X___#BT0]``M$/00+1#T(=0'#
+MN/_____#,<")1#T`B40]!(E$/0C#9C'`9HE$/0AFB40]!F:)1#T$9HE$/0)F
+MB40]`,.+3:C0X>L9L0&_N/___^L0L0B_E/___^L'L0B_@/___[``,=+0R`^^
+MP%"U`H#Y(')9@/E@<B%T`V8)P@M4/0`+5#T$"U0]"(C@B40]`(E$/02)1#T(
+M6,,>%A\6!P'OB?Z(RX/A8,'I!8C("Q:#Q@3B^6:Y`P`HP8C/\Z6(P8C@\ZN#
+M[PPI[Q]FB=E85U"+1#T`,=L/K<,)VHM$/00/K40]`(/'!/[-=?!8B.`/K40]
+M`%_XP[^Z____ZQ2Q"+^6____ZPNQ".L"L0&_@O___X#Y4'X"L5"(R"1X=#)1
+M5QYFF&;!Z`-F2+D)`````<\/O\`IP0'OB?XIQDX6'Q8'_?.DB`^)P8G^3_.D
+M_!]?68/A!W0;9M%D/0!FT50]`F;15#T$9M%4/09FT50]".+EP_9$/0N`=1A7
+MN0$```"#QP+HS____U__3#T0]D0]"X##Z-O___]T^<._@O___[Z6____ZV*_
+MNO___^L/O^+____H!0```+^6____OH+____K1;^6____ZP6_@O___[[N____
+MZS*_EO___^L%OX+___^^XO___^L?O^+____K$[_N____ZPR_EO___^L%OX+_
+M__^^NO___P'N'A8?%@<![[D%````\V:E'\.^\C,``.@*````O[C____I9?/_
+M_^@E````Z6_>__^_@O___^@;````Z8/>__^_EO___^OOZ`4```#I5<#__[^6
+M____'@[KJ@``!`````4`````````#P````4````8````(`````4````<````
+M+0````4```"T4@``-@```%]F<&5?<W1A<G0`7V9P95]R96=?<V5G;65N=`!?
+59G!E7W)E8V]V97(`7V9P95]E;F0`
+`
+end
diff --git a/i386/i386/fpe.b_elf b/i386/i386/fpe.b_elf
new file mode 100644
index 00000000..c04619e1
--- /dev/null
+++ b/i386/i386/fpe.b_elf
@@ -0,0 +1,576 @@
+begin 775 fpe.o
+M?T5,1@$!`0````````````(``P`!`````````#0```"08P```````#0`(``!
+M`"@`!P`$``$`````$`````````````"T4@``M%(```<`````$```````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M````````````````````````````````````````````````````````````
+M`&`>!@^@#ZB)Y>EJ"0``B>P/J0^A!Q]ASP````!65XM\)!`/M'<\BT<P/0``
+M```/A)<````]`0```'4(@\8@Z8@````]`@```'4%@\8DZWP]`P```'4%@\8H
+MZW`]!0```'4%@\8LZV0]$@```'0E/10```!T)#T5````=",]%@```'0B/1<`
+M``!T08MW&&2M9HE'`&2M9HE'!&2M9HE'"&2M9HE'#&2MB4<09*V)1Q1DK8E'
+M&(/&!&2MB4<@9*V)1R1DK8E'*&2MB4<L9*V)1S!DK6:)1S1DK8E'.(EW/&:,
+M9T!?7LL`````````````P/__`````````(#_?P````````````#_________
+M__Y_0C$``$(Q``#8$@``LA(``$H$``!*!```E08``"L%``!*!```2@0``)4&
+M```K!0``908``&4&```*!P``4P8```H%```*!0``@P8``-\$``!*!```2@0`
+M`'X&```K!0``2@0``$H$``!^!@``*P4``&4&``!E!@``"@<``%,&```*!0``
+M"@4``&P&``#?!```2@0``+,&``#Q!@``*P4``+,&``"S!@``(0<``*$&``#Q
+M!@``(0<``/$&``##!@``"@4``*$&``##!@``WP0``$H$``#;!@``LP8``"L%
+M``"S!@``(0<``+,&``"A!@``\08``/$&```A!P``PP8```H%``#;!@``H08`
+M`-\$``"^Z````,9%Q/_&1<8"C7V\'@X?%@>Y`@```/.EB4VXBP9FB47('\,>
+M`>X6'Q8'C7VXN04```#SI1_#98`-!`````AE]@4`````"'4_@6W(`&```(%]
+MR/]_``!]+X"]</____\/A:0```!E@`T$````(("];_____\/A8\```!E@`T%
+M`````NF"````98`-!````"#H\B,``#P,=!R*1<3HH",``'4298`-!0````*^
+M\@```.E1____QT7(_G\``,=%N`````#'1;S_____QT7`_____X"]=/___P5\
+M,("]=/___PA_)[(#92(5`0```(#Z`G\9=!#'1;P`````QT7``/___^L'@66\
+M`/C__\9%Q@##9?8%`````!!U(&6`#00````0@47(`&```(-]R``/CR?___^!
+M;<@`8```O[C___^X`0```.B30@``Z"`C``"P_[^X____Z"HB``!F@[UP____
+M`'0D98`-!````#"`O6______=1-E@`T%`````O9%PX!T!<9%Q@##,<")1<B_
+MO/___^A^3```QD7&!G4$QD7&`<.#?#T0`'4LQT0]$`$```#V1#T+@'4=#[U,
+M/0AT%XM$/02#Z1_WV0^E1#T(TV0]!"E,/1##BT0]!`^]R(/I'_?9T^")1#T(
+MQT0]!`````"#P2#KW'0=Z%4C``!E]@4``````0^$&@$``.CO_?__Z9H```!E
+M@"4%````_8I%C@I%H@^%!`$```^VG73___^`ZP5FP>,"+O^3$`$``+^X____
+MZ"<B``"P`.@V(0``O[C____H:D$``(M%R#W^?P``#X^W````@_@`#XRV````
+MZ)?^__\AP'44O[S____HDTL```^%G0```,9%Q@&`O7#_____=1EE@`T$````
+M(("];_____]U"&6`#04````"O[C___^^?/___^DO`0``98`-!`````)E]@4`
+M`````G15OX#____HR?[__[^4____Z+_^___I0/___V6`#00````"9?8%````
+M``)T*K^`____Z)[^___I'____V6`#00````"9?8%``````)T">N\Z!;]___K
+MBL/HYOW__^N"J!!T)F6`#00````!9?8%``````%TXK^`____Z"H8``"^?/__
+M_^F6````@'V.`G0,@'VB`@^%D````.L,]D6+0'0,@'VB`G48]D6?0'4298`-
+M!`````%E]@4``````728OGS___^`?8X"=`>_E/___^M)@'VB`G0'OX#____K
+M/(I%C(IEH%"`98M_@&6??\9%C`#&1:``QH5T____!E;H42L``%Y8B$6,OX#_
+M__^`?<0`=`B(9:"_E/___X!,/0O`Z)$4``#I%2(``(I=HHI]CO;'!'0"MP/V
+MPP1T`K,#@>,#`P``P.<"`/LP_\'C`KC[0````H5T____]N0!PR[_HR`!``##
+M98`-!`````)E]@4``````G3MOH#____K+F6`#00````"9?8%``````)TU/95
+MH.L298`-!`````)E]@4``````G2]OI3____HJ/O__^MD98`-!`````)E]@4`
+M`````G2?OOP```#H</O__\9%Q@'K,V6`#00````"9?8%``````(/A'G____K
+M%F6`#00````$9?8%``````0/A&'____HV/O__\9%Q`"*98PZ9:!T`_95Q.G&
+M_?__BF6@@+UT____!G4"]M0Z98P/A$3___]E@`T$`````67V!0`````!#X0;
+M____Z.3Z___IC_W__P```&(.``"?#@``O0X``%(/``"3#@``)!```/$.``!'
+M$```EPX``*<.```5#P``YP\``)L.``"E#@``+@\``!`0```?#0``+0T``#8-
+M``!"#0``(PT``"@-```Z#0``,0T``/X-```"#@``!@X```H.```.#@``%0X`
+M`"$.```E#@``*0X``#$.```Y#@``00X``$D.``!*#@``4@X``%H.``!]&@``
+M0"H``!$?``!R,@``.R@``!<$```7!```%P0``!<$``"!*0``%AT``%,=```[
+M*```]AX``&$I``!7'@``$SH``!,Z``#;/P``*$H``!,Z``#\%0``X1\``%P4
+M``"U(P``N1T``#LH``"V*```ERD``-`I``"O*0``]!,``/03``#T$P``]!,`
+M`/03``#T$P``]!,``)<H``"3*```I2D``'(R``#I'0``(DP``,=-``#$3@``
+MX1\``'(R```%!P,P!C$(,@"$`3.;B9R=`!F,,P(-A(0I*H2$'R`A(B,D)802
+M$!,4#RXF)Q81%2T7&"LL`(0!,P``,S.,C*B.C(2$A`"$`3.+A(J>&AD!,R\T
+MA(0:&3,SGH2$A```!P<%`04!!P<'!P<'!P<'``4!!0(%`T1$``!`0```#`P,
+M#`P,#`!$:$>81V0``&1H1$=$E$1$`P<%!0<%!P4`!P$!```!`04'`P,'!0<%
+M`0<```,#```!``<'!0$%`0<'!P<'!P<'`@<%!0@$!04%!P("!04(!('LF```
+M`&:,T&:.V&:.P&8NBQ48````9H[J#P+0]\(``$``=1"!Y/__``"!Y?__``"S
+M`>L"LP"(70+W13@```(`=1#&10,!9HE%^HU%/(E%'.LRQD4#`(M%/(E%'&:+
+M14!FB47Z9HM%1&:)10AFBT5(9HE%#&:+14QFB44$9HM%4&:)10#\N20```"-
+MO6C___\QP/.K_+('B-%FNP("@'T#`74@Q74PQD7X`<9%^0$/`D4TJ0``0`!U
+M',9%^`#&1?D`ZQ(/MW4TP>8$`W4PQD7X`,9%^0!FBP9&)(=Y^(#$0-#0T,!F
+MO@`X9B'&9L'N"V:_'@!F(<=FT><.'P^_]@^__R[_ET0'``"`Y`=FB85T____
+M9HF5=O___V:)C7G___]FB;5\____9HF]?O___XB]>/___XB=>____X!]`P%U
+M$L5U,(FU;O___V:,G7+____K&XMU,(FU;O___P^W331FB8UR____P>$$`<X6
+M'V8QR3';9HM5#&:+!D8@P'A`/&=U!H!U^0'K[CQF=0:`=?@!Z^1F26:+500\
+M9'3:9HM5`#QE=-)FBU4(/"9TRF:+530\+G3"9HM5^CPV=+KKM$:(X_9%^0$/
+MA0D"```/MP909HM%#&:.V%B`^\`/@X@```#0XW(1>`R`XPZ`^PQT$V8QP$Y.
+M9IB#XP[1XR[_DX0'``!&1H!]`P%U)V:)E6S___^)A6C____VA73___^`=4EE
+M9F6)%1@```!EHQ0```#K.`^WTL'B!`'09HS29HF5;/___XF%:/____:%=/__
+M_X!U%V5FHQ0```")PL'J$,'B#&5EB148````@'T#`74%B77\ZPP/MU4TP>($
+M*=:)=?R^=O___[^`____QT6H`````,=%K`````#'1;``````QT6T`````.@T
+M#0``9E"^>?___[^4____Z",-``!F6V8)V(J==/___]#3<Q6*C73___^`^9MW
+M<8#YCG1L@/F*=&=E98H-`````/;1@.$_92(-!````'0$S1#KYO:%=/___X!U
+M1(!]`P%U&5#$A6[___]EHPP```!E9F6,!1````!8ZR50#[>%<O___\'@!`.%
+M;O___V5FHPP```#!Z!#!X`QEHQ````!8#[_;T<-F"<`N_Y/D!P``966*!0``
+M``#VT"1_92(%!````'0@L`"*C73____VP8!T!8#YFW<89F6!#00```"`@+`"
+MZPIF98$E!````']_BTW\B4TP(D4Y=`+-$.GO\O__9@-%(&8#113#9@-%$,-F
+M`T409@-%(,-F`T449@-%&&?C!\-F`T40Z_)FBU7ZPS'_@/S`<PN`Y`>`_`1U
+M`XH^1HL&'@^A4&:+10QFCMA8@/O`#X-K_O__@\8$T.-R&'@0@.,.1X#["@^$
+M\/W__S'`3H/N`P^^P(#C#X#["'01@^,.T>,N_Y.D!P``Z<[]__^(^R'_=!"`
+MYP>`_P5U"&2+!H/&!.L39HG?@^,'P>,"+O^3I`<``&:)^XC?@.<X@/\@#X27
+M_?__B-G`P0*`X0/0ZX/C'"[_D\0'``#I?OW__P-%+,,#12C#`T4DPP-%(,,#
+M11QGXPC#`T489^,!PV:+5?K#`T44PP-%$,.+?2S3YP'XPXM]*-/G`?C#BWTD
+MT^<!^,.+?2#3YP'XP\.+?1C3YP'XPXM]%-/G`?C#BWT0T^<!^,.Q`+4`BH:D
+M"```9HG79KH%`6:)UCPQ=`T\,G0-/#!U!+`#MP'#L`;K`K`(AM]FA\K#L0/K
+MS;$!Z\FQ`NO%9KD%!.O!LP&*AJ0(``!FB==FN@4!9KD%!&:)SNNYBH:L"```
+M9M'F#[_V9HN6``D``&:^``!FB<\\`'0,/#-T#3P!=`UFB?[#9KX%`\.W`;`!
+MPXJ&V`@``(J62`D``(JV4`D``&92BI8X"0``BK9`"0``9E[KOXJ&Z`@``&;1
+MY@^_]F:+EE@)``!FO@$`ZZ:*AM@(``"*EG`)``"*ME`)``!F4HJ6:`D``(JV
+M0`D``&9>ZX+VQ"!T3F:^`!]F(<9FP>X(#[_VBH88"0``T,"Y!````,#``F:_
+M!@!F(<</O_]F_[<0"0``XNJ*AK@(```\$'0(/!%T!#P4=0*W`69?9EYF669:
+MPV:)SXJ&M`@``#P`=!`\&705/#-T(#P!="!FB?[#9KH%!&:^!0/#9KH%`6:Y
+M!01FB<YFB=?#L`&W`6:Z!0%FO@4$PXJ&\`@``#PT=02W`;`O/"]U#V:Y!01F
+MB==FB=9FN@4!PSP:=:*W`.N>BH;X"```/!IU`K,!/)YUZ+4$Z^2`_"EU%;,!
+MMP&P+V:Y!01FB==FB=9FN@4!P["$9HG.9HG/PV:^``=F(<9FP>X(#[_VBH;@
+M"```Z^-FN`$`PP``'"0Y1!PD/$2^O+JY`0("`@$"`@._N/___^CF/P``OX#_
+M__^Q`>AA-0``Z/T5``"`^@-T!HI-@XA-N@^VVK]T$```+@^V##N(C6C___^-
+M=80V_W8$]D4"`70%#[?4ZP*)XDEU`4$V_S:#[@3B^(G(OW`0```NB@P[B(UN
+M____4.+]B=.)QXJ-:/___U$VBQ,[59QU"[C_____-HM3_.L6-HM#_/=UG(E%
+ML(E5K.L2BT6P2(M5K(E%L`-5G'(6B56LBT6P]V68.U6L<@AWX38[0_AWVS'`
+MB46TBH5N____B<'!X`+WV(G&BT0UH/=EL#8I!#-S`4*+1;0V*00S<P%"B56T
+M@\8$XN`V*10S<R3_3;"*C6[___^)R,'@`O?8B<;XBT0UH#81!#.#Q@3B\S81
+M##.+1;")1#W`64ET"X/K!(/O!.E$____B<B)QHJ-;O___]#A9C8+1#/^@^X"
+MXO4(Q`AENHG.BHUN____-HM$,_R)1#6(@^X$XO**C6C___\"C6[___^`^0)U
+M`4'!X0(!S,-H9!```.L%:&@0``"_N/___^A=/@``OY3___^Q`>C8,P``Z'04
+M```/MMI?+HH$.XB%;O___RX/MH-L$```B<>+18"+782+58@/K-@"#ZS3`L'J
+M`NL-T>#1T]'2<A"`3#T"("M%E!M=F!M5G.L%Z#<```#H/````/Z-;O___W76
+M(=)X!X!,/0(@ZP7H&@```(E%@(E=A(E5B`G8"=`/I,(09@G0",2(9;K#`T64
+M$UV8$U6<P]%EN-%5O-%5P,.+19`K1:0%_S\``(E%R(I%C#)%H(A%Q.BJ_?__
+MO[C____IZCX``(M%D`-%I"W^/P``B47(BD6,,D6@B$7$Z`H```"_N/___^G$
+M/@``'K^L____%A^-7#T`Z%D]``"_N/___^A1/0``N0@```"_E/___^@//0``
+M=00!SP'+OH#___^+1#4$"T0U`'4$`<X!RU>+?#T`,<F+1#4`]^<!`Q'1@_Z`
+M=`6)2P3K)HM$-03WYP'(@](`,<D!0P01T8M$-0CWYP'(@](``4,(@](`B5,,
+M7X/#!(/'!(/_H'6SOZS____HNSP``'0$@$VZ`1_#````````````@/\_`$#^
+MBAO-2WB:U`!``,"[\!=<*3NJN/\_`,`TPF@AHMH/R0!``*"8]\_[A)H@FOT_
+M`."K><_1]Q=RL?X_````````````````T.NX!@```/;C!2P2``")QL=%N```
+M``!F+HL&9HE%NBZ+1@*)1;PNBT8&B47`+@^W1@J)1<@QP(E%Q+(#O[C____H
+M;!$``+^X____Z*`Q``!E@"4%````_3'`BT6HG[^X____Z4,&``!T%.@0$P``
+M9?8%``````%T/>DH!@``98`E!0```/V*18X\`'1@/`9T0#P2=2*_@/___^@%
+M"0``98`-!`````%E]@4``````70%Z?`%``##/`)T!>GF!0``]D6+0'0%Z=L%
+M``"`38M`Z\YE@`T$`````F7V!0`````"=--FQT60`0#&18X`N#Y````Y19!]
+M6[^`____5^@(,0``7[(#L`#HIA```("]</____]U&66`#00````@@+UO____
+M_W4(98`-!0````(QP+^$____Z`D[``!T#[^`____Z*@\``#I6P4``(E%D,9%
+MC@'I3P4``+^X____Z!,[``"_E/___^@+.P``@>'_````^6I!G.L749RQ`>C3
+M.P``G9P0R0A-N[$"Z,X[``"*18LDP(A%EK$"Z,P[``"PP(IENYUR"L'@$.A,
+M````ZQCVU,'@$`%%E(M%O/?0$468BT7`]]`119Q9XJW0V0A-NF:+1;K^Q,'@
+M$.@<````OYC___\QP.A:.@``"D67=`2`3;I`L0CI6CL``+ZX____OY3____I
+MO@D``'00Z'`1``!E]@4``````751PV6`)04```#]BD6./`!U"(!]C`!U)>M/
+M/`9U"(!]C`!U&>LR/`%T*3P2=`\\`G4%Z7+^__^`?8P`=!;HZQ```'2^O[C_
+M___H/P<``.F+````Z>/^___HPA```'2COX#____H4.W__X%MD/\_``#W19`!
+M````=`7_39#K#K$!,,"_@/___^A8+P``BT60T?@%_S\``(E%R.BB_O__,<")
+M1<2_N/___^C8#P``Z.D.``"_N/___^@=+P``@+UP_____W4998`-!````""`
+MO6______=0AE@`T%`````K^X____Z:P#````Q)UH____,<")1#T$B$0]"":+
+M`XE$/0G!^!>(9#T,)?\```#'1:@`````=42*1#T+9@M$/0EU"L9$/0X!B40]
+M$,/&1#T.!L9%JO]E]@4``````G4!P\=$/1"!/P``#[U,/0B#Z1_WV=-D/0@I
+M3#T0PSS_=3+'1#T0_W\``/=$/0C___]_QD0]#@IT&O9$/0M`=0[&1:C_9?8%
+M``````%T!<9$/0X"P\9$/0X`!8`_``")1#T0@$P]"X##'L6=:/___\9$/00`
+MBP.)1#T%BT,$'XE$/0G!^!1FF8A4/0PE_P<``,=%J`````!U:(M$/00+1#T(
+M=07I/?___\9$/0X&QD6J_V7V!0`````"=0'#QT0]$`0\```/O4P]"'07BT0]
+M!(/I'_?9#Z5$/0C39#T$*4P]$,.+1#T$#[W(@^D?]]G3X(E$/0C'1#T$````
+M`(/!(.O<9CW_!W4[QT0]$/]_``"!9#T(____#XM$/0@+1#T$QD0]#@IT.?9$
+M/0L(=0[&1:C_9?8%``````%T*<9$/0X"ZPX%`#P``(E$/1#&1#T.`(M$/00/
+MI$0]"`/!9#T$`X!,/0N`PQ[%M6C___\6!X/'!%>-?#T`N0(```#SI6:+!E\?
+M9IF(5#T()?]_``")1#T,QT6H`````'0O]D0]!X!U!+(2ZPAF/?]_=`FR`(A4
+M/0HQP,.R"HM$/00E____?PM$/0!TZ+("Z^2+1#T$"T0]`+(!=-BR!NO4<AD`
+M`(,9``":&0``L!D``*,9``#T%@``KA<``"@<```W'```BAP``*L8```H'```
+M*!P``+@9``"`?#4`!W0B,=N)7#T`BEPU`<'C`B[_HQ\9``"*7#4`P>,"+O^3
+M,QD``#'`P^@]#0``Z!0"``!T(8G>'@^H'^D8____Z"8-``!F0.OEZ!T-```"
+MA77____KV.CC`P``9D##'L6U:/___Q8'C7V6N04```#S9J4?9H%]BO__=0N_
+M@/___^BY`P``PXI%GV:8B&6,OX#____H=C8``+$24;$$Z%0W``"#99\/Z%8W
+M``"^@/___^@XZ/__L0+H,3<``(M%N&8QP`%%@(M%O!%%A(M%P!%%B#'`BD6?
+M9@%%@K``$46$$46(6>*UOX#____H"C8``'4(B460QD6.`</'19!.0```OX#_
+M___H?C<``,9%C@##OQH``$P;``"S&@``OQH``%,;``!T$.CO#```9?8%````
+M``%U`<.?=0AE@"4%````_;^`____OGS___\/MEPU`<'C`B[_HVD:``#H#0P`
+M`&9`Z9\```">#X6!````Z,\```!T&NBR#```9?8%``````$/A+D```#HLP(`
+M`.M@BT6H"<!T60C`=!UE@`T$`````67V!0`````!#X21````@$P]"T#K.`CD
+M=!1E@`T$````!&7V!0`````$='3K(&6`#00````"9?8%``````)T8("]=O__
+M_P%W!<9$/0X`Z$L-``#H=`L``.L+Z&T+```"A77___^#X`>)P8I$/0[H^0P`
+M`(UT/02_'`````'/P>$"`<^Y`@````^H!QX6'_.EBB9F)0"`9@M&!!]E9F6)
+M!\/H)@L``&9()`?H*PL``#P#PXUU@(U]E.CN#@``@'V.`'46QT6D_S\``(M%
+MD"W_/P``OX#____K;H!]C@9U),9%JO]E]@4``````G0@OY3____HU.?__XM%
+MI,=%I/\_``#KRH!]C@%T!<9%C`##QD6I_V7V!0`````$=/''19#_?P``QD6+
+M@,=%C/\`"@##Q)UH____9B:+`P^_P.L)Q)UH____)HL#B<(QP(E$/0")1#T$
+M"=!U$HE$/0B(1#T,QD0]#@&)1#T0PYF(5#T,>0+WV`^]R(/I'_?9T^")1#T(
+MN!Y````IR(E$/1#&1#T.`,,>Q;5H____%@=7C7P]!+D"````\Z5?'XM$/0B9
+MB%0]#$)U%/=4/02#1#T$`?=4/0B#5#T(`.L0"T0]!'4*B40]$,9$/0X!P\=$
+M/1`^0```#[U,/0AT%XM$/02#Z1_WV0^E1#T(TV0]!"E,/1##BT0]!`^]R(/I
+M'_?9T^")1#T(QT0]!`````"#P2#KW.C&#```Z`4```#I/`P``+$%Z)D)```>
+M#Z@?]N%FN2@`9BG!OAP```!6F`'&`<8/M\GS9J5FB<%>\V:E'\/H7@L``+$%
+MZ&8)```>]N&Y*````&8IP;\<````5Y@!QP''Q;5H____@\8.@'WX`74#@\8.
+M\V:E9HG!7_-FI1_I50L``,,QP(E$/03'1#T(````P,9$/0S_QT0]$/]_``#&
+M1#T.`L-T#^BS"0``9?8%``````%T[F6#)04```#]Z,/\__\QP)^_E/___[Y^
+M____Z;O\__]T$F6`#04```!!98`E!0```/GK2V6`)04```"XBD6./!)T/#P"
+M="`\"G04/`9U"F6`#04```!$ZR;V18N`=`AE@`T%````!-#(<PAE@`T%````
+M0-#(<PAE@`T%`````?9%C`%T"&6`#04````"PW0AZ!4)``!E]@4``````71&
+MOY3____H)O___PS_G^E@____Z!O]__]T!>E1____98`E!0```/W'1:@`````
+MBD6./!)U'V7V!0`````!=0EE@`T$`````<._@/___^C?_O__ZQ@\`G4M]D6+
+M0'429?8%``````%TUX!-BT#&1:C_C76`C7V4Z,(+``#HL_O__^GK_O__Z+K\
+M___IU/[__W00Z'8(``!E]@4``````74%P\9%C`#I@?O__W00Z%L(``!E]@4`
+M`````74$P_95C.EG^___`%"Q`>A[,0``L0'H:S$``+Z4____O[C___]FQT0U
+M````N0,```"X`````!M$-0")1#T`1D9&1D='1T?BZ>L44>CK,0``OI3____V
+MA6C___\!=`6^N/___[^`____Z"8```#0E6C___]9XM3VA6C___\!=0^^E/__
+M_[^`____Z`4```#IJ#$``(M$-0!F,<`!1#T`BT0U!!%$/02+1#4($40]",,!
+M[P'N'A8?%@>Y`P```/.E'\.?98`E!0```/EFQX5H____``">=`KH>`<``.GF
+M`0``BD6."D6B#X7.`0``BT60*T6DOX#___]\8X/X/WX.98`-!0````2#R""#
+MX#\I19!`Z/?^__\QP+^$____Z/8O``!U&67V!04````$#X0:`0``B(5H____
+MZ0\!``"_@/___^A\,0``9?8%!0````1T#,:%:/___P#IN````)^`O73___\N
+M#X6J````GG4'L0'HO#```/]-I/]UC,9%H`#&18P`QH5T____!HM%D#M%I+Z`
+M____?@6^E/___[_@____Z`K____H>!```+^X____Z'\O``"/18R_@/___W44
+M]H5H____`71-_H5H____]E6,ZT*^X/___X!]Q`!T#.C-_O__OX#____K*XM%
+MD#M%I'0*OY3____HM/[___Z%:/____]%I(I%C(A%H.@3$```O[C___^#?#T0
+M`7UAB?[H!>'__V7_-0````!E@`T!`````\:%</___P#HU>'__V6/!0````"_
+MN/___^LQB460QD6.`8"]=/___RYU',9%C`!E]@4!````!'0.9?8%`0````AU
+M!,9%C/^_@/___[Y\____Z/[X__]E@"4%````O(J%:/___\#@!G,(98`-!0``
+M``'0X',(98`-!0```$#0X',(98`-!0````+#J!!T)F6`#00````!9?8%````
+M``%TZ;^`____Z)[[__^^?/___^D*Y/__BD6BBF6.@/P"=`8\`G4IZPKV18M`
+M=`H\`G48]D6?0'4298`-!`````%E]@4``````72BZ7+C__^`O73___\8#X2M
+M````@/P*=)`\`72,@/P&=6-E@`T$`````F7V!0`````"#X1L____OX#___\\
+M"G4]]D6+@'00QT60`0```,9%C@#I;O___V7V!0`````0#X5@____Z"7A__^!
+M19``8```98`-!````!#I1____U#H"^'__U@\!G4B98`-!`````)E]@4`````
+M`@^$!?___[^4____4.CEX/__6(#\`0^$KO[__SP*#X2F_O__Z1;]__^`_`IU
+M:(!]C`!U+CP!#X35_O__/`9U$F6`#00````"9?8%``````)T6XI%H(A%C+^`
+M____Z?\````\"@^$I_[__SP&=1)E@`T$`````F7V!0`````"="V_E/___^@1
+M+0``B46DQD6B`>G+````@/P&=1-E@`T$`````F7V!0`````"=0'#/`8/A:8`
+M``!E@`T$`````F7V!0`````"=.6`_`!U#+^4____Z"#@___K+V7V!0`````0
+M=7GIE````'0*Z+<#``#I)?[__V6`)04```#]BD6."D6B#X4%_O__9?\U````
+M`&6`#0$````,Z(L+``!ECP4`````=`B`?8P`=`SK&(M%B#T]X```?@>X/>``
+M`.L,/0(@__]]!;@"(/__`46D@7VD_G\``'\5@WVD`7P@OY3___^^?/___^EL
+M]O__OI3____H!=[__^@3WO__ZRR^E/___^CTW?__9?\U`````&6`#0$````#
+MQH5P____`.C$WO__98\%`````+^X____Z[*'3#T&(<EY`O;6@>'___]_"TP]
+M`F8+3#T`=`+VTC#D]D0]"@%T$?;4QT0]`@````!FQT0]````N0(!``##ADP]
+M""#)>0+VUH#A?PM,/00+3#T`=`+VTC#D]D0]"0%T`O;4QT0]!`````#'1#T`
+M`````+D#`0``PX=,/00AR7D"]M:!X?___W\+3#T`=`+VTC#D]D0]"`%T`O;4
+MQT0]``````"Y!`$``,.+3#T$]L4$=`+VUH'A_P,```M,/0!T`O;2,.3V1#T%
+M"'0"]M3'1#T``````&:!9#T$`/BY!P@``,.'3#T`(<EY`O;6@>'___]_=`+V
+MTC#D]D0]!`%T`O;4N0@!``##N20``/,D```D)0``724``'PD```*)@```B8`
+M```F```Q)@``,/8/O_+!Y@)F,=(QR2[_EH(E``!F(=)U&SS_B-!U!XB%<?__
+M_\-FB95P____B(5O____PSS_=0B(A7'____K"6;'A7#_____`%#HM0````^V
+MV%@N_Z.6)0``]M<X?#T,=3/K)XC7".(\_W4;BIUP____9H'[_P!U#CJE;___
+M_W4&9D)T!NL.9D)T"K``B(5O____ZRC&A6______B.HP[;L,````*<L!WS#`
+M`%0]`$=)$$0]`$?B^1C`@^\,PXC$Z#\````\!'4'(.1T"S#`PSP(=?D@Y'3U
+M#/_#Z"0````\!'3T/`AT\.OCL@-E(A4!````PV6`)0$```#\90@%`0```,-E
+M98H%`0```"0,PV6`)0$```#S90@%`0```,-E98H%!0```"0XP.@#PP^VP(G!
+MT>&)R\'C`@'+@\,<96:A"````&;3Z"0#/`)U+&5F98M#"&8E_W]U`[`&PV5E
+MBT,$J0```(!U`[`2PR7___]_90L#L`IT`K`"PV7V!0`````0P^B*````9?8%
+M``````+#Z',```!E]@4``````<-E]@4`````",-E]@4!````$,-E]@4$````
+M(,-E]@4$`````<-E]@4$`````L-E@`T$````066`)04```#]PV6`#00```!!
+M98`-!0````+#98`-!````"##98`-!````!##98`-!`````C#98`-!`````'#
+M98`-!`````+#98`-!`````1E]@4`````!,-E@`T%`````<-E@`T%````0,-E
+M@`T%`````L-E@`T%````!,-E@"4%````_L-E@"4%````O\-E@"4%````_<-E
+M@"4%````N,-E@"4%````^\-E@"4$````W\/H"____W4'Z"C___]U>HJ%>/__
+M_^@&````BH5[____/`%T.30`=33H;/[__P*%=?___R0'B,&P`]#A9F73#0@`
+M``!E@"4(````_"0#90@%"````&9ETP4(````P^@W_O__Z,[___^Q".L"L3AE
+M98H%!0```"0X93`%!0````#()#AE"`4%````PQX/J`?%M6C___^_`````+D'
+M````@'WX`74-\Z4?98`-`````$#K#V:E1T?B^A]E@`T`````0+D(````,=NX
+M"@```/?COAP````!QK@#````92,%"````(/X`W1"969EBT8(9B7_?W099?9&
+M!X!U!+("ZQQF/?]_=`2R`.L2L@+K#F5EBT8$90L&L@%T`K("98`E"````/QE
+M"!4(````9F7!#0@````"0^*3PV:X?P-E9J,`````9KC__V5FHP@```!F0&5F
+MHP0```##Q)UH____9B:+`V8-0`!E9J,`````P^A%````98`-`````'_#967&
+M!00`````PXJ%>O___SP$96:A!````'0+Q)UH____9B:)`\-FB44LPV5FH0``
+M``!F)7\?9@U`$.O?'@^H'V9E@24`````?Q]F98$-`````$`0O@````#$O6C_
+M__^Y!P```(!]^`%U!/.E'\-FI49&XOH?PP!++```SRT```LO``#$+P``B3``
+M`+(J```++P``"R\``.8J``!T#^@L_?__9?8%``````%T8&6`)04```#]@+U]
+M____!'4*Z#/P___IP?W__P^VG7S____!XP*#^PA](H!]CA)U'&6`#00````!
+M9?8%``````%T'K^`____Z/OR__\N_Z,<*@``'A8?%@>Y!0```/.E'\/$O6C_
+M__\>%A^-=82Y`@```/.E'XIEC&8E`(!F"T609B:)!^E3_?__````````0'8Z
+M:PO>9?\U`````&6`#0$````#@'V.`74%Z1\!``"!?9#_?P``=3!E@`T$````
+M`67V!0`````!=07I(P$``,=%@@````#'188```"`9L=%BO__Z>\```"`?8X&
+M=0O'19`!````QD6.`+@^0```.460?;6_@/___U?HGQH``%^R`[``Z#WZ__\Q
+MP+^$____Z,(D``!U"8E%D,9%C@'K?8M4/02!^K.VX`T/AWG___^+1#T`<@L]
+M``!DIP^#:/___\=$/00`````QT0]``````"#[P*[`,J:._?S4+D$````NV0`
+M``")T#'2]_.2U`K`Y`0(X(A$/0!'XNM8"<"(5#T`=!E2,=*Y"@```/?QDEF(
+MQ(C(:@"Y!0```.O2@+UP_____W4998`-!````""`O6______=0AE@`T%````
+M`HIEC(#D@(AEBV7$/10````>%A^-=8*Y!0```/-FI1_HZOO__V6/!0````##
+M:"TH``#$O6C___^^@3\``+(`BD6./`!T1#P&=#8\`G4<]D6+0'4698`-!```
+M``%E]@4``````709@$V+0(M-B8'A__]_@`^V19#!X!<)R":)!\._@/___^@<
+MU___OWY```#H0@```,2]:/___W0M>"2+18DE____@*G___\`=!$E__]_@`^V
+M39"`P8#!X1<)R":)!\.X?___?^L%N'\``(`*18S!R`CKZ%=64K``OX#____H
+MH_C__^C<&```6EY?.760?%$/A(@````Y?9`/CHD```!E@`T$````"&7V!0``
+M```(=0-86,-E@`T$````(&5EB@4!````)`RT"#C$>!2*18SH$/G__YQX"&6`
+M#04````"G<-E]@4`````$'4*98`-!````!#KO+^`____B?!74NA[&```6E^P
+M_^@:^/__9H.]</___P!T"&6`#00````P]D6+@'4$9O]-D&:#O7#___\`=!EE
+M@`T$````(("];_____]U"&6`#04````",<!`PV@M*```O@$\``"R`HI%CCP`
+M=$,\!G0U/`%U"&;'19``/.M2/`)U'?9%BT!U%V6`#00````!9?8%``````%U
+M`<.`38M`9L=%D/]#ZRF_@/___^B?U?__O_Y#``#HQ?[__W1]>&B*185FF+^&
+M____Z/HA``!T.[$#Z&0B``#$O6C___\>%A^-=86Y`P```/-FI1]FBT609BT`
+M/&;!X`2`98L/"D6+@&6,@`IEC&8FB0?#,<#$O6C___\FB0=F)HE'!(IEC(#D
+M@&8FB4<&P[C_____Z-S___]FN.]_"F6,Z^?HS/___V8F@4\&\'_#N0Y````K
+M39!X18/Y$78%N1$```"_@/___S#`Z!07``"_@/___[($L`#HO?;__SP`=1UF
+M@7V*`(!R"'<3@'V,_W4-L``Z18QT!F;W78HPP,.`?8X`=`:`?8X&=3WHG___
+M_W4V9H.]</___P!T&66`#00````@@+UO_____W4(98`-!0````)FBT6*Q+UH
+M____9B:)!^G9^/__9C'`@'V.`73H98`-!`````%FN`"`9?8%``````%UTL.Y
+M'D```"M-D'A%@_DA=@6Y(0```+^`____,,#H6Q8``+^`____L@&P`.@$]O__
+M/`!U'8%]B````(!R"'<2@'V,_W4,L``Z18QT!?==B##`PX!]C@!T!H!]C@9U
+M.^B?____=31F@[UP____`'0998`-!````""`O6______=0AE@`T%`````HM%
+MB,2]:/___R:)!^DB^/__,<"`?8X!=.IE@`T$`````;@```"`9?8%``````%U
+MT\.Y/D```"M-D'A3@_E!=@6Y00```+^`____,,#HI!4``+^`____L@.P`.A-
+M]?__/`!U*X%]B````(!R#G<@@WV$`'4:@'V,_W44L``Z18QT#3'`]UV$&T6(
+MB46(,,##@'V.`'0&@'V.!G5"Z)'___]U.V:#O7#___\`=!EE@`T$````(("]
+M;_____]U"&6`#04````"BT6$BU6(Q+UH____)HD')HE7!.E6]___,<"9@'V.
+M`73E98`-!`````&Z````@&7V!0`````!=<[#BT6`9C'``T64B46XBT6$$T68
+MB46\BT6($T6<B47`L`#0T,.+18"!990``/__*T64B46XBT6$&T68B46\BT6(
+M&T6<B47`L`#0T,.^[T4``.L%OD\R``"+39`K3:1X*8/Y0W8%N4,```"P`+^4
+M____5O_67HM%D(E%R(!]C@%T,_9%BX!U+>L=]]F#^4-V!;E#````4;``OX#_
+M____UHM%I(E%R%F`?:(!=`BP`/9%GX!T`K#_@+UT____!G4#]E6@BF6,.F6@
+M=2CH,____[^X____Z!44``"*18R(1<3V1<.`=0R_N/___^AN'@``=5W#4(I%
+MC(A%Q.@G____O[C___\\`+@`````=!SW5#T`@T0]``'W5#T$$40]!/=4/0@1
+M1#T(]E7$Z#`>``!U&NB`]/__-`1F2&:8B&7$6#S_=19FQT7(``##6#S_=0J_
+MN/___^B4'P``P^AB'@``BT0]`"7___\?"<)T"(%,/0`````@@60]`````.##
+M=!5E@"4%````_66`#00```!!Z2@!``#'1:@`````98`E!0```/F`O73___\I
+M=17&1:``QD6B`;^4____Z+4=``")1:2*18X*1:)T=CP!='*H$`^%WP```(I%
+MCHIEHCP"=`>`_`)U+>L/]D6+0`^$Q````(#\`G4*]D6?0`^$M0```("]=/__
+M_R\/A,D```#IHP```(!]C@9U$<9%JO^#?9``=0?'19`!````@'VB!G41QD6J
+M_X-]I`!U!\=%I`$```#&A73___\&Z`+^__^_N/___^@)'0``#X2$````98`E
+M!0```+[V1<0!=`AE@`T%`````8-]J`!U!>F\]/__@'VH`'0;9?8%``````%T
+M!>FG]/__PX!]J@!U!>F;]/__98`-!`````)E]@4``````G3BZ83T__]E@`T$
+M`````<=%J/\```#K$(!]C@9T!H!]H@9U!,9%JO]E@`T%````!>L(98`E!0``
+M`/YE@`T%````0.N```"``````````````(```````````````````/4?F.S`
+MN_`77"D[JK@``````````````0```````````,``A&3>^3/S!+4```"-]!$%
+M7ZQ_B@```````````(``_________[\`H)^'UOLY&L"5`&#R5]QH7L+3I``@
+M_3RTWL_1`*X`@.:SF$C6MQ^S`,").>QWK)O6M0"`4TR1%BZT/+<`@/R+0GBW
+MA?*W`.!6NF/5:R-.N`#@,2:K4_@??+@`8/V@HA^Z*9.X`,!7&+[*>[&>N`"@
+M<$3^#15VI+@`8*H;F]*/6*>X`&"4Z9J^V,FHN`"`,W(E%X""J;@`P#3":"&B
+MV@_)`(!%>]H-*SAC[0!@%>L&9,FOV_H`P#)N>V'5U*W^`,`V3N]GN=VJ_P"`
+M0B6Q2]VMZO\`X+O5E-O=JOK_``!HN=3=K:K^_P#`2[G=W:JJ__\`H$O=W:VJ
+MZO__`*#;W=VJJOK__P#@W=VMJJK^__\`X-W=JJJJ____`.#=K:JJZO___P#@
+MW:JJJOK___\`X*VJJJK^____`,PY``"S.0``L#D``*<Y```6.0``I$$``*I!
+M``!!.@``-#P```D\```'.P``"3P``$`\```)/```\SL``#0\```T/```03H`
+M`!X\``#B.@``'CP``/,[``"S.@``03H``/<\``#`/```!SL``%L\```D.P``
+MWSP``%L\``"O/```)#L```,]``"9/```XCH```T]```-/0``LSH``$$Z```[
+M/0``.ST```<[``!9/0``.ST``"0[``!#/0``KSP``"0[```#/0``F3P``.(Z
+M```E/0``)3T``+,Z``!E_S4`````98$-```````#``!E@`T$````(,9%Q`!F
+MQX5L____``"+1:0K19")1:B#^`]^/X/X/W\%Z=````#H$-S__^@H#P``BT7(
+M@_@`#X\%`@``#XP#`@``O[S____H=!D```^%\P$``,9%Q@'IZ@$``.C-&0``
+MZ-$9``#H.OK__V;1I6S___^`?<,`=53H1AL``+^`____Z)<9``#H]OG__^A'
+M&P``ON@S``#H;AL``.@Q&P``@664````_^@+&P``9O^%;/___X!]PP!T$[D!
+M````Z&T9``"!990```#_ZP7H%!H``/]%J(-]J`]VC+D'````Z`,:``"!98``
+M``#@QT6H#P```.CA&0``Z`;9___HUQH``+D)````Z"X9``"!98```.#_Z+H:
+M``#H$AD``(%EE```X/^!9;@```#@Z)0:``#H3OG__^B1&@``Z'D:``#H;1H`
+M`+D>````Z.P8``#'18``````N1X```#HB1D``.@EV___O[C____HL1@``,9%
+MP\#H6QH``(%EE````."`??<`L`&Y!P```'4'L`"Y"````%"_[O___^A0&0``
+MZ`,:``#H7-C__X%EN````.!8T.BY!P```',%N08```#H9A@``.@2&@``9M&M
+M;/___W,9N`H```#W9:@%XC0``(G&Z"8:``#H\!D``&:#O6S___\`=!:Y`0``
+M`.@]&```@66```#@__]-J.N^@'V+`+D(````=`BY!P```/]-J.C'&```QT60
+M_C\``(M%J"E%D+^`____Z!49``#H1!D``(M%D(E%R,9%Q@!ECP4`````P\=$
+M/0``````QT0]!`````#'1#T(`````,=$/0P```$`QT0]$`````##QT0]````
+M``#'1#T$-<)H(<=$/0BBV@_)QT0]#`````#'1#T0_S\``,.*58R*=:#&18P`
+MQD6@`(CS@^,(BT6D.T60?S=\*XM%B#M%G'\M?"&+180[19A_(WP7O[C____H
+MG/____]-R`G;#X2S````ZSA3Z!<2``!;@\L$4U+H[_S__UI;"=MU(8M%R(/X
+M`']/?`:`?<8!=&=2Z*?)__]:98`-!````!#K>;^`____Z$W___\N_Z."-0``
+MQH5T____!>L*_T60QH5T____!E*-?92-=;CHWO#__^AW]___6F7_-0````!E
+M@0T```````,``%+HZPL``%IECP4`````9H.]</___P!T&66`#00````@@+UO
+M_____W4(98`-!0````*(5<3#=!?H6>W__V7V!0`````!=0'#Z/;'___K>&6`
+M)04```#]#[9=C@I=H@^%W@````^VG73___^`ZQ1T4(#[_'0\@7V0_S\```^/
+MQ0```'PF@'V,``^$N0```(%]B````(`/A:P```"#?80`#X6B````Z=0!``"[
+M"````.L/@'V,``^%C````+L$````+O^3DC4``+^X____OGS____I6\O__V6`
+M#00````"9?8%``````(/A%O___^_@/___^CQR/__OY3____HY\C__^E?____
+M98`-!`````)E]@4``````@^$+/___[^`____Z,+(___I.O___V6`#00````"
+M9?8%``````(/A`?____KM/;#$'0;98`-!`````%E]@4``````0^$ZO[__^GF
+M_O__@'V.`G0(@'VB`G4OZPSV18M`=`R`?:("=1SV19]`=19E@`T$`````67V
+M!0`````!#X2O_O__Z33*__^*?8X/MH5T____+!1T03S\="F!?9#_/P``?!D/
+MA,;^__^!?9#_?P``=8"`?8P`#X5V____N(````#K%(!]C`!T"8#_`0^%8/__
+M_[A`````BEVB]L<$=`*W`_;#!'0"LP.!XP,#``#`YP(`^S#_P>,"`<,N_Z.>
+M-0``98`-!`````)E]@4``````@^$&_[__X!]H`!T-[^X____Z-3\____1<CK
+M,F6`#00````"9?8%``````(/A/#]__^_N/___^BO_/__ZQ"`?:``=<F_N/__
+M_^AT_/__BE6,B%7$Z4G^__]E@`T$````!&7V!0`````$#X2S_?__QT6(````
+M@,=%C```"@#'19#_?P``OX#___^*5:#VTHA4/0SI$/[__V6`#00````"9?8%
+M``````(/A'7]__^_@/___XI5H(A4/0SIZ?W__X%]D/\_``!_.GPU@7V(````
+M@'<O@WV$`'4IZ47^__]E@`T$````!&7V!0`````$#X0O_?__ZPF!?9#_/P``
+M?0/V5:"_E/___^F<_?__98`-!`````)E]@4``````@^$`?W__^O;98`-!```
+M``)E]@4``````@^$Z?S__XI5C#!5H.O`98`-!`````)E]@4``````@^$R_S_
+M_XI5H#!5C+^`____Z4#]__];_W0U`/]T-03_=#4(_W0U#/]T-1#_XUN/1#40
+MCT0U#(]$-0B/1#4$CT0U`/_CQT0U``````#'1#4$`````,=$-0@```"`QT0U
+M#`````#'1#40_S\``,-FQX5L____``"X_S\``"M%D(/X/W]1B460@_@/=W[H
+MRA(``+@*````]V60!4(T``")QNBA%```9M&E;/___X!]PP!U$^A%%```@66`
+M``#@_V;_A6S_____19"#?9`/=S#H-A,``.N\QD6@`,=%I/\_``"^!C0``.AM
+M%```Z`T3``#H;-3__^B$!P``Z2X!``#'19`/````Z/L2``#HG!,``+$>Z$<2
+M``"#98``L1[HZA(``.B&U/__Z-$3``"!98````#@OOPS``#H$10``(M-D(/!
+M"N@$$@``@66X``#@_^B($P``Z`,2``#HF!,``.AG\O__BTV00>C@$0``@66X
+M``#@_^A^$P``O@8T``#HMA,``.A5$P``Z&H3``#H;!(``.B1T?__N0@```#H
+MK!$``(%EN```X/]FT:UL____<R3H2!,``.@\$P``BTV0Z),1``"!990``.#_
+M9H%-GH``Z-/Q__]F@[UL____`'01Z&<1``"!9;@``.#__TV0Z[BQ"(!]PP!T
+M!;$'_TV0Z/41``"X_S\``"M%D(E%R,=%Q`````#V1<.`=0JQ`>C6$0``_TW(
+MZ-42``#H[1(``,/V58SH.?[__XUUN(U]E.@2Z___OH#____H^_W__\9%C/_&
+MA73___\&OKC____HN/W__^B,\?__Z!0&``"-=;B-?93HWNK__[Z`____Z+#]
+M___HW=+__^CU!0``P^CW!@``=07IL]K__X%]D/\_``!_\GP:@7V(````@'?G
+M@WV$`'7A@'V,`'3;_TV0Z]9E@`T$````(&7_-0````!E@0T```````,``&6!
+M)0````#_\___@7V0_C\``'0(@'V,`'59ZQ6!?8@```"`=U6#?80`=4^`?8P`
+M=4+H9/W__XM-R(/Y`'X&QD7&`.L-Z,/"__]E@`T$````$("];_____]U"&6`
+M#04````"98\%`````+^X____Z0;:___HX?[__^N\@'V,`'5_OI3____HY_S_
+M_\:%=/___P;HAO#__^@.!0``C76XC7V`Z-CI___HKO[__XUUN(U]@.C(Z?__
+MOI3____HL?S__\:%=/___P7H4/#__^C8!```_T7(C76XC7V`Z)_I__^^E/__
+M_^B(_/__QH5T____!N@G\/__Z*\$``#I4O___[Z4____Z&C\___&A73___\%
+MZ`?P___HCP0``(UUN(U]@.A9Z?__Z'#\__^-=;B-?8#H2>G__[Z4____Z#+\
+M___&A73___\%Z-'O___H600``(UUN(U]@.@CZ?___TV0OI3____H"?S__\:%
+M=/___P;HJ.___^@P!```Z=/^___&1=H`ZP3&1=H!9?\U`````&6!#0``````
+M`P``O^+____H)1```(I%HHA%XHM%I(F%:/___XI%H(A%SL9%V`"`?=H`=2CH
+MOPX``+XD-```Z*`0``"`?<,`=5SI7`$``.CN#P``QD78_^F+`0``BT60B47(
+M@'V,`'7FZ.4/``"Y_S\``"M-D(/Y2'8%N4@```#H;0X``&:+39:!X?\?```)
+MRG0&9H%-E@`@@$V?@(%EE````.#K.^CX#@``Z)(/``"Y`0```.C9#@``O[K_
+M___HN@T``'0XQT7(_C\``+^X____Z%</``#H=P\``.BY#P``_[5H____Z-C-
+M__^/A6C___^!9;@```#@Z=0```#H]-C__[^"____Z'$-``!U%\9%C@&*1<XP
+M18R-=8"-?;CHS.?__^MO@67@````_^A`#P``BX5H____B46DBD7.B$6@Z-[/
+M___HT`(``(M%R#W^?P``?TB#^`!\2G\2O[S____H'0T``'4\QD7&`>L$QD7&
+M`&:#O7#___\`=!EE@`T$````(("];_____]U"&6`#04````"98\%`````,/H
+M!+___^OQZ-6____KZO]%D.B@#@``OA`T``#H&`\``+D'````Z,P-``#&1=C_
+MQT7(_S\``+^Z____Z*(,``!T"K^X____Z$8.``#HI@X``&;'A6S___\``+C_
+M/P``*T7(B46H@_@/#X>0````Z(L.``"Y!P````--J.C?#```Z,\,``#H'NW_
+M_X!]PP!T&8%EN```X/_&1<,`Z%<.``!FQX5L____`0!FT:5L____Z.4-``"+
+M3:CHH@P``(%-B```@`#HW^S__X!]PP!T%X%EN```X/_&1<,`Z!@.``!F_X5L
+M____N0$```#H$`T``/]%J(-]J`]VL.@`#0``98`-!````"#HCPT``.B>SO__
+MBTVH@_E(=@6Y2````.@K#```@66X````X.C)#0``Z)CL___HN`T``(M-J(/Y
+M2'8%N4@```#H%0P``(%E@````.#H80T``+X:-```Z-D-``#HF0T``.A9#0``
+MBTVH08/Y2'8%N4@```#HV0L``(%EE````/#H0NS__X%EN````.#H-0T``.A=
+M#0``_[5H____Z(/+__^/A6C___^!9;@```#@Z$<-``"^+C0``.B.#0``N0<`
+M``#H@@L``/]-J&;1K6S___]S(HMUJ$ZX"@```/?FB<:!QDPT``#H:@T``.@*
+M#0``Z*_K__]F@[UL____`'0*Z$,+``#_3:CKP;@'0```*T6HB47(@66X``#@
+M_[^X____Z-H*``!T"K^X____Z%T,``"`?=H`=3CH&];__^BR#```BT7(B46D
+MQH5T____!8I%V(A%H.B<Z___Z)P,``"*1<2(18R+1<B)19#I]/S__XI%V(A%
+MQ.OBQD6Z`;(#O[C___^P`.C)W___O[C___^T!*@!=0'#_T0]$+$!Z,(*```A
+MTG0%@$P]`@'#B<&'1#T0*<&#^4-^!;E#````,,#KVW0JZ%CA__]E@"4%````
+M^V7V!0`````!=0)8P[^`____OG[___]8G^EDU/__98`E!0```/OH3=7__W0$
+M,<#KW,=%J`````!E@"4%````_8I%CCP!=FT\!G(.=#^_@/___^@?U___ZPKV
+M18M`=<Z`38M`9?8%``````%U"F6`#00````!ZY2-=8"-?93H`.3__\9%J/]8
+MZ1S7__]E]@4``````G4-98`-!`````+I:/___\9%JO^_@/___^CQO/__@\@!
+MPW0?Z)7@__]E@"4%````^V7V!0`````!=0)8PUCIHM/__V6`)04```#YBD6.
+M/`%^:SP&?"IT1H"]=/___Q)U$SP2=`^`?8P`=-+'19#_/P``Z\F_@/___^AE
+MUO__ZPKV18M`=;>`38M`98`-!`````%E]@4``````72?ZY]E@`T$`````F7V
+M!0`````"=(N_@/___^A5O/__@\@!PS'`BH5T____BF6,B46LQD6,`&;'A6C_
+M__\``(M%D"W^/P``?%6#^#]^"F6`#04````$6,/'19@UPF@AQT6<HMH/R<=%
+MH`````#'1:3^/P``*4600.A@U___BT6$"T6(=0^)18")19#'18X!````ZQ*_
+M@/___^CU"0``98`-!````"#VA6C___\$=`1F]U6M]H5H____`G0&]E6O]E6N
+M]H5H____`74!P_95KX!]C@%U)X!]K!-U(<9%BX#&18X`QT60_S\``+Z4____
+MZ%3U__^#Q`3I(@,``&6`#00````@QT64`````,=%F#7":"''19RBV@_)QT6@
+M`````,=%I/X_``#&18S_QH5T____!>B\Z/__9?\U`````&6!)0````#_\___
+MZ#+]__]ECP4`````C76XC7V`Z/7A___#9L>%;/___P``B460@_@0?UC_39#H
+MY`<``+@*````]V60!>(T``")QNB["0``9M&E;/___X!]PP!U$^A?"0``@66`
+M``#@_V;_A6S_____19"#?9`/=P?H4`@``.N\Z$4(``#'18``````Z-\(``#H
+MV\G__XM-D-'A@\$(Z&T'``"!9;@```#_Z`L)``"^0C0``.A#"0``Z-L(``#H
+M5`<``.C8"```L0GH4P<``.C8"```Z)+G___H,`<``.C0"```_TV09M&M;/__
+M_W,VZ*P(``#HC0@``.AMY___Z+`(``"+39#1X>@,!P``@664``#@_^AUY___
+MZ(X(``"!9>````#_9H.];/___P!T&+$!O^S____H[`8``(%E[```X/__39#K
+MG[$(@'WW`'0%L0?_39"_[O___^AW!P``Z"H(``"X_S\``"M%D(E%D+^`____
+MZ-T'``#H'`@``.@_!P``QT6D_S\``,=%H`````"_E/___^B[!P``P^CG^___
+M=0^^E/___^A>\___Z8S3___H,/W__X!]C@%U+X!]K_]UX,=%@`````#'180`
+M````QT6(````@,=%C```"@#'19#_?P``QD6I_^NWN/\_```K19"#^#]^?#W_
+M/P``?$"*1:V(18QE@`T$````$&7V!0`````0=0F!19``8```ZX*_@/___[@!
+M````Z$+[___'19``````QD6.!NEC____OI3____HP?+__X!]K_\/A8T```!F
+MBT6MB$6,B&6@9?\U`````&6!#0```````P``_W6LZS!E_S4`````98$-````
+M```#``#_=:SHE?W__X]%K&:+1:V(18R(9:#_=:R`?:__=0?H90```.L%Z&,`
+M``"`O6______=0AE@`T%`````HUUN(U]@.A+W___OI3____H-/+__X]%K&6/
+M!0````"*1:TR1:Z(18SI5]+__[Z`____Z.3Q__^-=92-?8#H%-___[Z4____
+MZ.;Q___#Z-O___]E_S4`````98$-```````#``#H^\;__^@3^O__98\%````
+M`,.^N/___^B;\?__Z,W___^-=;B-?8#HQM[__[Z4____Z)CQ__]E@0T`````
+M``\``,:%=/___P7H0^7__^C+^?__C76XC7V4Z)7>__^^@/___^A^\?___T60
+MZ'_____#Z+#Z__]U&L9%K"N*18R(1:V-=8"-?;CH9M[__^F=`0``Z"O[__^`
+M?8X!=0N`?:__==_I?0$``(!]K_\/A-@!``"X_S\``"M%D(/X'GQ4?Q&!?8@`
+M`!"-=TERMH-]A`!TL#W_/P``?*EE@`T$````$&7V!0`````0=0F!19``8```
+MZXZ_@/___[@!````Z$KY___'19``````QD6.!NEO_____W6L9?\U`````&6!
+M)0````#_\___98$-```````#``!`Z,;[__^^@/___^AU\/__OI3____H:_#_
+M_^B8_O__OI3____H<_#__[Z`____Z&GP___HJ/[__V6/!0````"/1:QE@"4%
+M````_8"];_____\/A:4```!E@`T%`````NF8````C76`C7V4Z%'=___H@,7_
+M_^AR^/__C76XC7V`Z#S=__^^E/___^@E\/__QH5T____!>C$X___Z$SX__^-
+M=;B-?93H%MW__[Z`____Z/_O____19#H`/[__XUUN(U]E.CYW/__OH#____H
+MXN___\:%=/___P;H@>/__^@)^/__P^@+^?__=2W&1:PLQD6N`+ZX____Z+?O
+M__^`?:PL=`6*1:WK`XI%KHA%Q+^X____Z:3,___H<_G__X!]C@%U*X!]K_]U
+MRL=%N`````#'1;P`````QT7``````,=%Q````0#'1<@`````Z[J`?:__#X0H
+M_O__N/\_```K19"#^#Y\%7^,@7V(````@'<*@WV$``^$>?____]UK&7_-0``
+M``!E@24`````__/__V6!#0```````P``0.@M^O__Z!/]__^-=;B-?8#H$=S_
+M_^BP_O__98\%`````(]%K&6`)04```#]@+UO_____P^%*?___V6`#04````"
+MZ1S____H2_?__W4L,<"*98R)1:R^E/___^BZ[O__@'VO_W4%Z)/\__]FBT6M
+MB$6,B&6@Z=/.___H=_C__X!]C@%TT;C_/P``*T60@_@^?%)_#X%]B````(!W
+M1X-]A`!U03W_/P``?*QE@`T$````$&7V!0`````0=0F!19``8```ZY&_@/__
+M_[@!````Z*WV___'19``````QD6.!NER_____W6L9?\U`````&6!)0````#_
+M\___98$-```````#``"#^!Y\'W\1@7V(```0C7<4<@:#?80`=0PQV[Z`____
+MZ,3M__^<0.@$^?__G704OH#____HL.W__[Z4____Z*;M__^<Z-+[__^==!2^
+ME/___^BJ[?__OH#____HH.W__[ZX____Z'_M__]T!YSHTOO__YV^@/___^B#
+M[?__=`J^N/___^A@[?__Z#K]__^^@/___^AH[?__C76XC7V4Z(':__]ECP4`
+M````CT6LZ:O^__]FBT0]"&8+1#T&9@M$/01F"T0]`F8+1#T`=0'#9KC__\.+
+M1#T`"T0]!`M$/0AU`<.X_____\,QP(E$/0")1#T$B40]",-F,<!FB40]"&:)
+M1#T&9HE$/01FB40]`F:)1#T`PXM-J-#AZQFQ`;^X____ZQ"Q"+^4____ZP>Q
+M"+^`____L``QTM#(#[[`4+4"@/D@<EF`^6!R(70#9@G""U0]``M4/00+5#T(
+MB.")1#T`B40]!(E$/0A8PQX6'Q8'`>^)_HC+@^%@P>D%B,@+%H/&!.+Y9KD#
+M`"C!B,_SI8C!B.#SJX/O#"GO'V:)V5A74(M$/0`QVP^MPPG:BT0]!`^M1#T`
+M@\<$_LUU\%B(X`^M1#T`7_C#O[K____K%+$(OY;____K"[$(ZP*Q`;^"____
+M@/E0?@*Q4(C()'AT,E%7'F:89L'H`V9(N0D````!SP^_P"G!`>^)_BG&3A8?
+M%@?]\Z2(#XG!B?Y/\Z3\'U]9@^$'=!MFT60]`&;15#T"9M%4/01FT50]!F;1
+M5#T(XN7#]D0]"X!U&%>Y`0```(/'`NC/____7_],/1#V1#T+@,/HV____W3Y
+MP[^"____OI;____K8K^Z____ZP^_XO___^@%````OY;___^^@O___^M%OY;_
+M___K!;^"____ON[____K,K^6____ZP6_@O___[[B____ZQ^_XO___^L3O^[_
+M___K#+^6____ZP6_@O___[ZZ____`>X>%A\6!P'ON04```#S9J4?P[[R,P``
+MZ`H```"_N/___^EE\___Z"4```#I;][__[^"____Z!L```#I@][__[^6____
+MZ^_H!0```.E5P/__OY;___\>#NNJ````+G-Y;71A8@`N<W1R=&%B`"YS:'-T
+M<G1A8@`N=&5X=``N9&%T80`N8G-S````````````````````````````````
+M```````#``$``````+12`````````P`"``````"T4@````````,``P`!````
+M```````````1``$`"P```!@`````````$0`!`!L````<`````````!$``0`G
+M````M%(````````1``$``&9P95]S=&%R=`!F<&5?<F5G7W-E9VUE;G0`9G!E
+M7W)E8V]V97(`9G!E7V5N9```````````````````````````````````````
+M`````````````````!L````!````!P``````````$```M%(`````````````
+M!``````````A`````0````,```"T4@``M&(```````````````````0`````
+M````)P````@````#````M%(``+1B```````````````````$`````````!$`
+M```#``````````````"T8@``+````````````````0`````````!`````@``
+M````````````X&(``(`````&````!`````0````0````"0````,`````````
+<`````&!C```O```````````````!```````````0
+`
+end
diff --git a/i386/i386/fpe_linkage.c b/i386/i386/fpe_linkage.c
new file mode 100644
index 00000000..cac58e0b
--- /dev/null
+++ b/i386/i386/fpe_linkage.c
@@ -0,0 +1,359 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * Support routines for FP emulator.
+ */
+
+#include <fpe.h>
+
+#include <cpus.h>
+
+#include <mach/std_types.h>
+#include <mach/exception.h>
+#include <mach/thread_status.h>
+
+#include <kern/cpu_number.h>
+#include <kern/thread.h>
+
+#include <vm/vm_kern.h>
+
+#include <mach/machine/eflags.h>
+#include "vm_param.h"
+#include <i386/pmap.h>
+#include <i386/thread.h>
+#include <i386/fpu.h>
+#include "proc_reg.h"
+#include "seg.h"
+#include "idt.h"
+#include "gdt.h"
+
+#if NCPUS > 1
+#include <i386/mp_desc.h>
+#endif
+
+extern vm_offset_t kvtophys();
+
+/*
+ * Symbols exported from FPE emulator.
+ */
+extern char fpe_start[]; /* start of emulator text;
+ also emulation entry point */
+extern char fpe_end[]; /* end of emulator text */
+extern int fpe_reg_segment;
+ /* word holding segment number for
+ FPE register/status area */
+extern char fpe_recover[]; /* emulation fault recovery entry point */
+
+extern void fix_desc();
+
+#if NCPUS > 1
+#define curr_gdt(mycpu) (mp_gdt[mycpu])
+#define curr_idt(mycpu) (mp_desc_table[mycpu]->idt)
+#else
+#define curr_gdt(mycpu) (gdt)
+#define curr_idt(mycpu) (idt)
+#endif
+
+#define gdt_desc_p(mycpu,sel) \
+ ((struct real_descriptor *)&curr_gdt(mycpu)[sel_idx(sel)])
+#define idt_desc_p(mycpu,idx) \
+ ((struct real_gate *)&curr_idt(mycpu)[idx])
+
+void set_user_access(); /* forward */
+
+/*
+ * long pointer for calling FPE register recovery routine.
+ */
+struct long_ptr {
+ unsigned long offset;
+ unsigned short segment;
+};
+
+struct long_ptr fpe_recover_ptr;
+
+/*
+ * Initialize descriptors for FP emulator.
+ */
+void
+fpe_init()
+{
+ register struct real_descriptor *gdt_p;
+ register struct real_gate *idt_p;
+
+ /*
+ * Map in the pages for the FP emulator:
+ * read-only, user-accessible.
+ */
+ set_user_access(pmap_kernel(),
+ (vm_offset_t)fpe_start,
+ (vm_offset_t)fpe_end,
+ FALSE);
+
+ /*
+ * Put the USER_FPREGS segment value in the FP emulator.
+ */
+ fpe_reg_segment = USER_FPREGS;
+
+ /*
+ * Change exception 7 gate (coprocessor not present)
+ * to a trap gate to the FPE code segment.
+ */
+ idt_p = idt_desc_p(cpu_number(), 7);
+ idt_p->offset_low = 0; /* offset of FPE entry */
+ idt_p->offset_high = 0;
+ idt_p->selector = FPE_CS; /* FPE code segment */
+ idt_p->word_count = 0;
+ idt_p->access = ACC_P|ACC_PL_K|ACC_TRAP_GATE;
+ /* trap gate */
+ /* kernel privileges only,
+ so INT $7 does not call
+ the emulator */
+
+ /*
+ * Build GDT entry for FP code segment.
+ */
+ gdt_p = gdt_desc_p(cpu_number(), FPE_CS);
+ gdt_p->base_low = ((vm_offset_t) fpe_start) & 0xffff;
+ gdt_p->base_med = (((vm_offset_t) fpe_start) >> 16) & 0xff;
+ gdt_p->base_high = ((vm_offset_t) fpe_start) >> 24;
+ gdt_p->limit_low = (vm_offset_t) fpe_end
+ - (vm_offset_t) fpe_start
+ - 1;
+ gdt_p->limit_high = 0;
+ gdt_p->granularity = SZ_32;
+ gdt_p->access = ACC_P|ACC_PL_K|ACC_CODE_CR;
+ /* conforming segment,
+ usable by kernel */
+
+ /*
+ * Build GDT entry for user FP state area - template,
+ * since each thread has its own.
+ */
+ gdt_p = gdt_desc_p(cpu_number(), USER_FPREGS);
+ /* descriptor starts as 0 */
+ gdt_p->limit_low = sizeof(struct i386_fp_save)
+ + sizeof(struct i386_fp_regs)
+ - 1;
+ gdt_p->limit_high = 0;
+ gdt_p->granularity = 0;
+ gdt_p->access = ACC_PL_U|ACC_DATA_W;
+ /* start as "not present" */
+
+ /*
+ * Set up the recovery routine pointer
+ */
+ fpe_recover_ptr.offset = fpe_recover - fpe_start;
+ fpe_recover_ptr.segment = FPE_CS;
+
+ /*
+ * Set i386 to emulate coprocessor.
+ */
+ set_cr0((get_cr0() & ~CR0_MP) | CR0_EM);
+}
+
+/*
+ * Enable FPE use for a new thread.
+ * Allocates the FP save area.
+ */
+boolean_t
+fp_emul_error(regs)
+ struct i386_saved_state *regs;
+{
+ register struct i386_fpsave_state *ifps;
+ register vm_offset_t start_va;
+
+ if ((regs->err & 0xfffc) != (USER_FPREGS & ~SEL_PL))
+ return FALSE;
+
+ /*
+ * Make the FPU save area user-accessible (by FPE)
+ */
+ ifps = current_thread()->pcb->ims.ifps;
+ if (ifps == 0) {
+ /*
+ * No FP register state yet - allocate it.
+ */
+ fp_state_alloc();
+ ifps = current_thread()->pcb->ims.ifps;
+ }
+
+ panic("fp_emul_error: FP emulation is probably broken because of VM changes; fix! XXX");
+ start_va = (vm_offset_t) &ifps->fp_save_state;
+ set_user_access(current_map()->pmap,
+ start_va,
+ start_va + sizeof(struct i386_fp_save),
+ TRUE);
+
+ /*
+ * Enable FPE use for this thread
+ */
+ enable_fpe(ifps);
+
+ return TRUE;
+}
+
+/*
+ * Enable FPE use. ASSUME that kernel does NOT use FPU
+ * except to handle user exceptions.
+ */
+void
+enable_fpe(ifps)
+ register struct i386_fpsave_state *ifps;
+{
+ struct real_descriptor *dp;
+ vm_offset_t start_va;
+
+ dp = gdt_desc_p(cpu_number(), USER_FPREGS);
+ start_va = (vm_offset_t)&ifps->fp_save_state;
+
+ dp->base_low = start_va & 0xffff;
+ dp->base_med = (start_va >> 16) & 0xff;
+ dp->base_high = start_va >> 24;
+ dp->access |= ACC_P;
+}
+
+void
+disable_fpe()
+{
+ /*
+ * The kernel might be running with fs & gs segments
+ * which refer to USER_FPREGS, if we entered the kernel
+ * from a FP-using thread. We have to clear these segments
+ * lest we get a Segment Not Present trap. This would happen
+ * if the kernel took an interrupt or fault after clearing
+ * the present bit but before exiting to user space (which
+ * would reset fs & gs from the current user thread).
+ */
+
+ asm volatile("xorl %eax, %eax");
+ asm volatile("movw %ax, %fs");
+ asm volatile("movw %ax, %gs");
+
+ gdt_desc_p(cpu_number(), USER_FPREGS)->access &= ~ACC_P;
+}
+
+void
+set_user_access(pmap, start, end, writable)
+ pmap_t pmap;
+ vm_offset_t start;
+ vm_offset_t end;
+ boolean_t writable;
+{
+ register vm_offset_t va;
+ register pt_entry_t * dirbase = pmap->dirbase;
+ register pt_entry_t * ptep;
+ register pt_entry_t * pdep;
+
+ start = i386_trunc_page(start);
+ end = i386_round_page(end);
+
+ for (va = start; va < end; va += I386_PGBYTES) {
+
+ pdep = &dirbase[lin2pdenum(kvtolin(va))];
+ *pdep |= INTEL_PTE_USER;
+ ptep = (pt_entry_t *)ptetokv(*pdep);
+ ptep = &ptep[ptenum(va)];
+ *ptep |= INTEL_PTE_USER;
+ if (!writable)
+ *ptep &= ~INTEL_PTE_WRITE;
+ }
+}
+
+/*
+ * Route exception through emulator fixup routine if
+ * it occured within the emulator.
+ */
+extern void exception();
+
+void
+fpe_exception_fixup(exc, code, subcode)
+ int exc, code, subcode;
+{
+ thread_t thread = current_thread();
+ pcb_t pcb = thread->pcb;
+
+ if (pcb->iss.efl & EFL_VM) {
+ /*
+ * The emulator doesn`t handle V86 mode.
+ * If this is a GP fault on the emulator`s
+ * code segment, change it to an FP not present
+ * fault.
+ */
+ if (exc == EXC_BAD_INSTRUCTION
+ && code == EXC_I386_GPFLT
+ && subcode == FPE_CS + 1)
+ {
+ exc = EXC_ARITHMETIC; /* arithmetic error: */
+ code = EXC_I386_NOEXT; /* no FPU */
+ subcode = 0;
+ }
+ }
+ else
+ if ((pcb->iss.cs & 0xfffc) == FPE_CS) {
+ /*
+ * Pass registers to emulator,
+ * to let it fix them up.
+ * The emulator fixup routine knows about
+ * an i386_thread_state.
+ */
+ struct i386_thread_state tstate;
+ unsigned int count;
+
+ count = i386_THREAD_STATE_COUNT;
+ (void) thread_getstatus(thread,
+ i386_REGS_SEGS_STATE,
+ (thread_state_t) &tstate,
+ &count);
+
+ /*
+ * long call to emulator register recovery routine
+ */
+ asm volatile("pushl %0; lcall %1; addl $4,%%esp"
+ :
+ : "r" (&tstate),
+ "m" (*(char *)&fpe_recover_ptr) );
+
+ (void) thread_setstatus(thread,
+ i386_REGS_SEGS_STATE,
+ (thread_state_t) &tstate,
+ count);
+ /*
+ * In addition, check for a GP fault on 'int 16' in
+ * the emulator, since the interrupt gate is protected.
+ * If so, change it to an arithmetic error.
+ */
+ if (exc == EXC_BAD_INSTRUCTION
+ && code == EXC_I386_GPFLT
+ && subcode == 8*16+2) /* idt[16] */
+ {
+ exc = EXC_ARITHMETIC;
+ code = EXC_I386_EXTERR;
+ subcode = pcb->ims.ifps->fp_save_state.fp_status;
+ }
+ }
+ exception(exc, code, subcode);
+}
diff --git a/i386/i386/fpu.c b/i386/i386/fpu.c
new file mode 100644
index 00000000..dc49a6a4
--- /dev/null
+++ b/i386/i386/fpu.c
@@ -0,0 +1,750 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992-1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Support for 80387 floating point or FP emulator.
+ */
+#include <cpus.h>
+#include <fpe.h>
+#include <platforms.h>
+
+#include <mach/exception.h>
+#include <mach/machine/thread_status.h>
+#include <mach/machine/fp_reg.h>
+
+#include <machine/machspl.h> /* spls */
+#include <kern/mach_param.h>
+#include <kern/thread.h>
+#include <kern/zalloc.h>
+
+#include <i386/thread.h>
+#include <i386/fpu.h>
+#include "cpu_number.h"
+
+#if 0
+#include <i386/ipl.h>
+extern int curr_ipl;
+#define ASSERT_IPL(L) \
+{ \
+ if (curr_ipl != L) { \
+ printf("IPL is %d, expected %d\n", curr_ipl, L); \
+ panic("fpu: wrong ipl"); \
+ } \
+}
+#else
+#define ASSERT_IPL(L)
+#endif
+
+extern void i386_exception();
+
+int fp_kind = FP_387; /* 80387 present */
+zone_t ifps_zone; /* zone for FPU save area */
+
+#if NCPUS == 1
+volatile thread_t fp_thread = THREAD_NULL;
+ /* thread whose state is in FPU */
+ /* always THREAD_NULL if emulating
+ FPU */
+volatile thread_t fp_intr_thread = THREAD_NULL;
+
+
+#define clear_fpu() \
+ { \
+ set_ts(); \
+ fp_thread = THREAD_NULL; \
+ }
+
+#else /* NCPUS > 1 */
+#define clear_fpu() \
+ { \
+ set_ts(); \
+ }
+
+#endif
+
+
+/*
+ * Look for FPU and initialize it.
+ * Called on each CPU.
+ */
+void
+init_fpu()
+{
+ unsigned short status, control;
+
+ /*
+ * Check for FPU by initializing it,
+ * then trying to read the correct bit patterns from
+ * the control and status registers.
+ */
+ set_cr0(get_cr0() & ~(CR0_EM|CR0_TS)); /* allow use of FPU */
+
+ fninit();
+ status = fnstsw();
+ fnstcw(&control);
+
+ if ((status & 0xff) == 0 &&
+ (control & 0x103f) == 0x3f)
+ {
+ /*
+ * We have a FPU of some sort.
+ * Compare -infinity against +infinity
+ * to check whether we have a 287 or a 387.
+ */
+ volatile double fp_infinity, fp_one, fp_zero;
+ fp_one = 1.0;
+ fp_zero = 0.0;
+ fp_infinity = fp_one / fp_zero;
+ if (fp_infinity == -fp_infinity) {
+ /*
+ * We have an 80287.
+ */
+ fp_kind = FP_287;
+ asm volatile(".byte 0xdb; .byte 0xe4"); /* fnsetpm */
+ }
+ else {
+ /*
+ * We have a 387.
+ */
+ fp_kind = FP_387;
+ }
+ /*
+ * Trap wait instructions. Turn off FPU for now.
+ */
+ set_cr0(get_cr0() | CR0_TS | CR0_MP);
+ }
+ else {
+#if FPE
+ /*
+ * Use the floating-point emulator.
+ */
+ fp_kind = FP_SOFT;
+ fpe_init();
+#else /* no fpe */
+ /*
+ * NO FPU.
+ */
+ fp_kind = FP_NO;
+ set_cr0(get_cr0() | CR0_EM);
+#endif
+ }
+}
+
+/*
+ * Initialize FP handling.
+ */
+void
+fpu_module_init()
+{
+ ifps_zone = zinit(sizeof(struct i386_fpsave_state),
+ THREAD_MAX * sizeof(struct i386_fpsave_state),
+ THREAD_CHUNK * sizeof(struct i386_fpsave_state),
+ 0, "i386 fpsave state");
+}
+
+/*
+ * Free a FPU save area.
+ * Called only when thread terminating - no locking necessary.
+ */
+void
+fp_free(fps)
+ struct i386_fpsave_state *fps;
+{
+ASSERT_IPL(SPL0);
+#if NCPUS == 1
+ if ((fp_thread != THREAD_NULL) && (fp_thread->pcb->ims.ifps == fps)) {
+ /*
+ * Make sure we don't get FPU interrupts later for
+ * this thread
+ */
+ fwait();
+
+ /* Mark it free and disable access */
+ clear_fpu();
+ }
+#endif /* NCPUS == 1 */
+ zfree(ifps_zone, (vm_offset_t) fps);
+}
+
+/*
+ * Set the floating-point state for a thread.
+ * If the thread is not the current thread, it is
+ * not running (held). Locking needed against
+ * concurrent fpu_set_state or fpu_get_state.
+ */
+kern_return_t
+fpu_set_state(thread, state)
+ thread_t thread;
+ struct i386_float_state *state;
+{
+ register pcb_t pcb = thread->pcb;
+ register struct i386_fpsave_state *ifps;
+ register struct i386_fpsave_state *new_ifps;
+
+ASSERT_IPL(SPL0);
+ if (fp_kind == FP_NO)
+ return KERN_FAILURE;
+
+#if NCPUS == 1
+
+ /*
+ * If this thread`s state is in the FPU,
+ * discard it; we are replacing the entire
+ * FPU state.
+ */
+ if (fp_thread == thread) {
+ fwait(); /* wait for possible interrupt */
+ clear_fpu(); /* no state in FPU */
+ }
+#endif
+
+ if (state->initialized == 0) {
+ /*
+ * new FPU state is 'invalid'.
+ * Deallocate the fp state if it exists.
+ */
+ simple_lock(&pcb->lock);
+ ifps = pcb->ims.ifps;
+ pcb->ims.ifps = 0;
+ simple_unlock(&pcb->lock);
+
+ if (ifps != 0) {
+ zfree(ifps_zone, (vm_offset_t) ifps);
+ }
+ }
+ else {
+ /*
+ * Valid state. Allocate the fp state if there is none.
+ */
+ register struct i386_fp_save *user_fp_state;
+ register struct i386_fp_regs *user_fp_regs;
+
+ user_fp_state = (struct i386_fp_save *) &state->hw_state[0];
+ user_fp_regs = (struct i386_fp_regs *)
+ &state->hw_state[sizeof(struct i386_fp_save)];
+
+ new_ifps = 0;
+ Retry:
+ simple_lock(&pcb->lock);
+ ifps = pcb->ims.ifps;
+ if (ifps == 0) {
+ if (new_ifps == 0) {
+ simple_unlock(&pcb->lock);
+ new_ifps = (struct i386_fpsave_state *) zalloc(ifps_zone);
+ goto Retry;
+ }
+ ifps = new_ifps;
+ new_ifps = 0;
+ pcb->ims.ifps = ifps;
+ }
+
+ /*
+ * Ensure that reserved parts of the environment are 0.
+ */
+ bzero((char *)&ifps->fp_save_state, sizeof(struct i386_fp_save));
+
+ ifps->fp_save_state.fp_control = user_fp_state->fp_control;
+ ifps->fp_save_state.fp_status = user_fp_state->fp_status;
+ ifps->fp_save_state.fp_tag = user_fp_state->fp_tag;
+ ifps->fp_save_state.fp_eip = user_fp_state->fp_eip;
+ ifps->fp_save_state.fp_cs = user_fp_state->fp_cs;
+ ifps->fp_save_state.fp_opcode = user_fp_state->fp_opcode;
+ ifps->fp_save_state.fp_dp = user_fp_state->fp_dp;
+ ifps->fp_save_state.fp_ds = user_fp_state->fp_ds;
+
+#if FPE
+ if (fp_kind == FP_SOFT) {
+ /*
+ * The emulator stores the registers by physical
+ * register number, not from top-of-stack.
+ * Shuffle the registers into the correct order.
+ */
+ register char *src; /* user regs */
+ register char *dst; /* kernel regs */
+ int i;
+
+ src = (char *)user_fp_regs;
+ dst = (char *)&ifps->fp_regs;
+ i = (ifps->fp_save_state.fp_status & FPS_TOS)
+ >> FPS_TOS_SHIFT; /* physical register
+ for st(0) */
+ if (i == 0)
+ bcopy(src, dst, 8 * 10);
+ else {
+ bcopy(src,
+ dst + 10 * i,
+ 10 * (8 - i));
+ bcopy(src + 10 * (8 - i),
+ dst,
+ 10 * i);
+ }
+ }
+ else
+ ifps->fp_regs = *user_fp_regs;
+#else /* no FPE */
+ ifps->fp_regs = *user_fp_regs;
+#endif /* FPE */
+
+ simple_unlock(&pcb->lock);
+ if (new_ifps != 0)
+ zfree(ifps_zone, (vm_offset_t) ifps);
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Get the floating-point state for a thread.
+ * If the thread is not the current thread, it is
+ * not running (held). Locking needed against
+ * concurrent fpu_set_state or fpu_get_state.
+ */
+kern_return_t
+fpu_get_state(thread, state)
+ thread_t thread;
+ register struct i386_float_state *state;
+{
+ register pcb_t pcb = thread->pcb;
+ register struct i386_fpsave_state *ifps;
+
+ASSERT_IPL(SPL0);
+ if (fp_kind == FP_NO)
+ return KERN_FAILURE;
+
+ simple_lock(&pcb->lock);
+ ifps = pcb->ims.ifps;
+ if (ifps == 0) {
+ /*
+ * No valid floating-point state.
+ */
+ simple_unlock(&pcb->lock);
+ bzero((char *)state, sizeof(struct i386_float_state));
+ return KERN_SUCCESS;
+ }
+
+ /* Make sure we`ve got the latest fp state info */
+ clear_ts();
+ fp_save(thread);
+ clear_fpu();
+
+ state->fpkind = fp_kind;
+ state->exc_status = 0;
+
+ {
+ register struct i386_fp_save *user_fp_state;
+ register struct i386_fp_regs *user_fp_regs;
+
+ state->initialized = ifps->fp_valid;
+
+ user_fp_state = (struct i386_fp_save *) &state->hw_state[0];
+ user_fp_regs = (struct i386_fp_regs *)
+ &state->hw_state[sizeof(struct i386_fp_save)];
+
+ /*
+ * Ensure that reserved parts of the environment are 0.
+ */
+ bzero((char *)user_fp_state, sizeof(struct i386_fp_save));
+
+ user_fp_state->fp_control = ifps->fp_save_state.fp_control;
+ user_fp_state->fp_status = ifps->fp_save_state.fp_status;
+ user_fp_state->fp_tag = ifps->fp_save_state.fp_tag;
+ user_fp_state->fp_eip = ifps->fp_save_state.fp_eip;
+ user_fp_state->fp_cs = ifps->fp_save_state.fp_cs;
+ user_fp_state->fp_opcode = ifps->fp_save_state.fp_opcode;
+ user_fp_state->fp_dp = ifps->fp_save_state.fp_dp;
+ user_fp_state->fp_ds = ifps->fp_save_state.fp_ds;
+
+#if FPE
+ if (fp_kind == FP_SOFT) {
+ /*
+ * The emulator stores the registers by physical
+ * register number, not from top-of-stack.
+ * Shuffle the registers into the correct order.
+ */
+ register char *src; /* kernel regs */
+ register char *dst; /* user regs */
+ int i;
+
+ src = (char *)&ifps->fp_regs;
+ dst = (char *)user_fp_regs;
+ i = (ifps->fp_save_state.fp_status & FPS_TOS)
+ >> FPS_TOS_SHIFT; /* physical register
+ for st(0) */
+ if (i == 0)
+ bcopy(src, dst, 8 * 10);
+ else {
+ bcopy(src + 10 * i,
+ dst,
+ 10 * (8 - i));
+ bcopy(src,
+ dst + 10 * (8 - i),
+ 10 * i);
+ }
+ }
+ else
+ *user_fp_regs = ifps->fp_regs;
+#else /* no FPE */
+ *user_fp_regs = ifps->fp_regs;
+#endif /* FPE */
+ }
+ simple_unlock(&pcb->lock);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Initialize FPU.
+ *
+ * Raise exceptions for:
+ * invalid operation
+ * divide by zero
+ * overflow
+ *
+ * Use 53-bit precision.
+ */
+void fpinit()
+{
+ unsigned short control;
+
+ASSERT_IPL(SPL0);
+ clear_ts();
+ fninit();
+ fnstcw(&control);
+ control &= ~(FPC_PC|FPC_RC); /* Clear precision & rounding control */
+ control |= (FPC_PC_53 | /* Set precision */
+ FPC_RC_RN | /* round-to-nearest */
+ FPC_ZE | /* Suppress zero-divide */
+ FPC_OE | /* and overflow */
+ FPC_UE | /* underflow */
+ FPC_IE | /* Allow NaNQs and +-INF */
+ FPC_DE | /* Allow denorms as operands */
+ FPC_PE); /* No trap for precision loss */
+ fldcw(control);
+}
+
+/*
+ * Coprocessor not present.
+ */
+fpnoextflt()
+{
+ /*
+ * Enable FPU use.
+ */
+ASSERT_IPL(SPL0);
+ clear_ts();
+#if NCPUS == 1
+
+ /*
+ * If this thread`s state is in the FPU, we are done.
+ */
+ if (fp_thread == current_thread())
+ return;
+
+ /* Make sure we don't do fpsave() in fp_intr while doing fpsave()
+ * here if the current fpu instruction generates an error.
+ */
+ fwait();
+ /*
+ * If another thread`s state is in the FPU, save it.
+ */
+ if (fp_thread != THREAD_NULL) {
+ fp_save(fp_thread);
+ }
+
+ /*
+ * Give this thread the FPU.
+ */
+ fp_thread = current_thread();
+
+#endif /* NCPUS == 1 */
+
+ /*
+ * Load this thread`s state into the FPU.
+ */
+ fp_load(current_thread());
+}
+
+/*
+ * FPU overran end of segment.
+ * Re-initialize FPU. Floating point state is not valid.
+ */
+fpextovrflt()
+{
+ register thread_t thread = current_thread();
+ register pcb_t pcb;
+ register struct i386_fpsave_state *ifps;
+
+#if NCPUS == 1
+
+ /*
+ * Is exception for the currently running thread?
+ */
+ if (fp_thread != thread) {
+ /* Uh oh... */
+ panic("fpextovrflt");
+ }
+#endif
+
+ /*
+ * This is a non-recoverable error.
+ * Invalidate the thread`s FPU state.
+ */
+ pcb = thread->pcb;
+ simple_lock(&pcb->lock);
+ ifps = pcb->ims.ifps;
+ pcb->ims.ifps = 0;
+ simple_unlock(&pcb->lock);
+
+ /*
+ * Re-initialize the FPU.
+ */
+ clear_ts();
+ fninit();
+
+ /*
+ * And disable access.
+ */
+ clear_fpu();
+
+ if (ifps)
+ zfree(ifps_zone, (vm_offset_t) ifps);
+
+ /*
+ * Raise exception.
+ */
+ i386_exception(EXC_BAD_ACCESS, VM_PROT_READ|VM_PROT_EXECUTE, 0);
+ /*NOTREACHED*/
+}
+
+/*
+ * FPU error. Called by AST.
+ */
+fpexterrflt()
+{
+ register thread_t thread = current_thread();
+
+ASSERT_IPL(SPL0);
+#if NCPUS == 1
+ /*
+ * Since FPU errors only occur on ESC or WAIT instructions,
+ * the current thread should own the FPU. If it didn`t,
+ * we should have gotten the task-switched interrupt first.
+ */
+ if (fp_thread != THREAD_NULL) {
+ panic("fpexterrflt");
+ return;
+ }
+
+ /*
+ * Check if we got a context switch between the interrupt and the AST
+ * This can happen if the interrupt arrived after the FPU AST was
+ * checked. In this case, raise the exception in fp_load when this
+ * thread next time uses the FPU. Remember exception condition in
+ * fp_valid (extended boolean 2).
+ */
+ if (fp_intr_thread != thread) {
+ if (fp_intr_thread == THREAD_NULL) {
+ panic("fpexterrflt: fp_intr_thread == THREAD_NULL");
+ return;
+ }
+ fp_intr_thread->pcb->ims.ifps->fp_valid = 2;
+ fp_intr_thread = THREAD_NULL;
+ return;
+ }
+ fp_intr_thread = THREAD_NULL;
+#else /* NCPUS == 1 */
+ /*
+ * Save the FPU state and turn off the FPU.
+ */
+ fp_save(thread);
+#endif /* NCPUS == 1 */
+
+ /*
+ * Raise FPU exception.
+ * Locking not needed on pcb->ims.ifps,
+ * since thread is running.
+ */
+ i386_exception(EXC_ARITHMETIC,
+ EXC_I386_EXTERR,
+ thread->pcb->ims.ifps->fp_save_state.fp_status);
+ /*NOTREACHED*/
+}
+
+/*
+ * Save FPU state.
+ *
+ * Locking not needed:
+ * . if called from fpu_get_state, pcb already locked.
+ * . if called from fpnoextflt or fp_intr, we are single-cpu
+ * . otherwise, thread is running.
+ */
+fp_save(thread)
+ register thread_t thread;
+{
+ register pcb_t pcb = thread->pcb;
+ register struct i386_fpsave_state *ifps = pcb->ims.ifps;
+
+ if (ifps != 0 && !ifps->fp_valid) {
+ /* registers are in FPU */
+ ifps->fp_valid = TRUE;
+ fnsave(&ifps->fp_save_state);
+ }
+}
+
+/*
+ * Restore FPU state from PCB.
+ *
+ * Locking not needed; always called on the current thread.
+ */
+fp_load(thread)
+ register thread_t thread;
+{
+ register pcb_t pcb = thread->pcb;
+ register struct i386_fpsave_state *ifps;
+
+ASSERT_IPL(SPL0);
+ ifps = pcb->ims.ifps;
+ if (ifps == 0) {
+ ifps = (struct i386_fpsave_state *) zalloc(ifps_zone);
+ bzero(ifps, sizeof *ifps);
+ pcb->ims.ifps = ifps;
+ fpinit();
+#if 1
+/*
+ * I'm not sure this is needed. Does the fpu regenerate the interrupt in
+ * frstor or not? Without this code we may miss some exceptions, with it
+ * we might send too many exceptions.
+ */
+ } else if (ifps->fp_valid == 2) {
+ /* delayed exception pending */
+
+ ifps->fp_valid = TRUE;
+ clear_fpu();
+ /*
+ * Raise FPU exception.
+ * Locking not needed on pcb->ims.ifps,
+ * since thread is running.
+ */
+ i386_exception(EXC_ARITHMETIC,
+ EXC_I386_EXTERR,
+ thread->pcb->ims.ifps->fp_save_state.fp_status);
+ /*NOTREACHED*/
+#endif
+ } else {
+ frstor(ifps->fp_save_state);
+ }
+ ifps->fp_valid = FALSE; /* in FPU */
+}
+
+/*
+ * Allocate and initialize FP state for current thread.
+ * Don't load state.
+ *
+ * Locking not needed; always called on the current thread.
+ */
+void
+fp_state_alloc()
+{
+ pcb_t pcb = current_thread()->pcb;
+ struct i386_fpsave_state *ifps;
+
+ ifps = (struct i386_fpsave_state *)zalloc(ifps_zone);
+ bzero(ifps, sizeof *ifps);
+ pcb->ims.ifps = ifps;
+
+ ifps->fp_valid = TRUE;
+ ifps->fp_save_state.fp_control = (0x037f
+ & ~(FPC_IM|FPC_ZM|FPC_OM|FPC_PC))
+ | (FPC_PC_53|FPC_IC_AFF);
+ ifps->fp_save_state.fp_status = 0;
+ ifps->fp_save_state.fp_tag = 0xffff; /* all empty */
+}
+
+#if AT386 || PS2
+/*
+ * Handle a coprocessor error interrupt on the AT386.
+ * This comes in on line 5 of the slave PIC at SPL1.
+ */
+fpintr()
+{
+ spl_t s;
+ thread_t thread = current_thread();
+
+ASSERT_IPL(SPL1);
+ /*
+ * Turn off the extended 'busy' line.
+ */
+ outb(0xf0, 0);
+
+ /*
+ * Save the FPU context to the thread using it.
+ */
+#if NCPUS == 1
+ if (fp_thread == THREAD_NULL) {
+ printf("fpintr: FPU not belonging to anyone!\n");
+ clear_ts();
+ fninit();
+ clear_fpu();
+ return;
+ }
+
+ if (fp_thread != thread) {
+ /*
+ * FPU exception is for a different thread.
+ * When that thread again uses the FPU an exception will be
+ * raised in fp_load. Remember the condition in fp_valid (== 2).
+ */
+ clear_ts();
+ fp_save(fp_thread);
+ fp_thread->pcb->ims.ifps->fp_valid = 2;
+ fninit();
+ clear_fpu();
+ /* leave fp_intr_thread THREAD_NULL */
+ return;
+ }
+ if (fp_intr_thread != THREAD_NULL)
+ panic("fp_intr: already caught intr");
+ fp_intr_thread = thread;
+#endif /* NCPUS == 1 */
+
+ clear_ts();
+ fp_save(thread);
+ fninit();
+ clear_fpu();
+
+ /*
+ * Since we are running on the interrupt stack, we must
+ * signal the thread to take the exception when we return
+ * to user mode. Use an AST to do this.
+ *
+ * Don`t set the thread`s AST field. If the thread is
+ * descheduled before it takes the AST, it will notice
+ * the FPU error when it reloads its FPU state.
+ */
+ s = splsched();
+ ast_on(cpu_number(), AST_I386_FP);
+ splx(s);
+}
+#endif /* AT386 || PS2 */
diff --git a/i386/i386/fpu.h b/i386/i386/fpu.h
new file mode 100644
index 00000000..7db1a8ef
--- /dev/null
+++ b/i386/i386/fpu.h
@@ -0,0 +1,130 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_FPU_H_
+#define _I386_FPU_H_
+
+/*
+ * Macro definitions for routines to manipulate the
+ * floating-point processor.
+ */
+
+#include <cpus.h>
+#include <fpe.h>
+#include <i386/proc_reg.h>
+#include <i386/thread.h>
+
+/*
+ * FPU instructions.
+ */
+#define fninit() \
+ asm volatile("fninit")
+
+#define fnstcw(control) \
+ asm("fnstcw %0" : "=m" (*(unsigned short *)(control)))
+
+#define fldcw(control) \
+ asm volatile("fldcw %0" : : "m" (*(unsigned short *) &(control)) )
+
+#define fnstsw() \
+ ({ \
+ unsigned short _status__; \
+ asm("fnstsw %0" : "=ma" (_status__)); \
+ _status__; \
+ })
+
+#define fnclex() \
+ asm volatile("fnclex")
+
+#define fnsave(state) \
+ asm volatile("fnsave %0" : "=m" (*state))
+
+#define frstor(state) \
+ asm volatile("frstor %0" : : "m" (state))
+
+#define fwait() \
+ asm("fwait");
+
+/*
+ * If floating-point instructions are emulated,
+ * we must load the floating-point register selector
+ * when switching to a new thread.
+ */
+#if FPE
+extern void disable_fpe();
+extern void enable_fpe();
+
+#define fpu_save_context(thread) \
+ { \
+ if (fp_kind == FP_SOFT) \
+ disable_fpe(); \
+ else \
+ set_ts(); \
+ }
+
+#define fpu_load_context(pcb) \
+ { \
+ register struct i386_fpsave_state *ifps; \
+ if (fp_kind == FP_SOFT && (ifps = pcb->ims.ifps) != 0) \
+ enable_fpe(ifps); \
+ }
+
+#else /* no FPE */
+
+#define fpu_load_context(pcb)
+
+/*
+ * Save thread`s FPU context.
+ * If only one CPU, we just set the task-switched bit,
+ * to keep the new thread from using the coprocessor.
+ * If multiple CPUs, we save the entire state.
+ */
+#if NCPUS > 1
+#define fpu_save_context(thread) \
+ { \
+ register struct i386_fpsave_state *ifps; \
+ ifps = (thread)->pcb->ims.ifps; \
+ if (ifps != 0 && !ifps->fp_valid) { \
+ /* registers are in FPU - save to memory */ \
+ ifps->fp_valid = TRUE; \
+ fnsave(&ifps->fp_save_state); \
+ set_ts(); \
+ } \
+ }
+
+#else /* NCPUS == 1 */
+#define fpu_save_context(thread) \
+ { \
+ set_ts(); \
+ }
+
+#endif /* NCPUS == 1 */
+
+#endif /* no FPE */
+
+extern int fp_kind;
+
+#endif /* _I386_FPU_H_ */
diff --git a/i386/i386/gdt.c b/i386/i386/gdt.c
new file mode 100644
index 00000000..d8551110
--- /dev/null
+++ b/i386/i386/gdt.c
@@ -0,0 +1,88 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Global descriptor table.
+ */
+#include <mach/machine/vm_types.h>
+
+#include <platforms.h>
+
+#include "vm_param.h"
+#include "seg.h"
+#include "gdt.h"
+
+#if PS2
+extern unsigned long abios_int_return;
+extern unsigned long abios_th_return;
+extern char intstack[];
+#endif /* PS2 */
+
+struct real_descriptor gdt[GDTSZ];
+
+void
+gdt_init()
+{
+ /* Initialize the kernel code and data segment descriptors. */
+ fill_gdt_descriptor(KERNEL_CS,
+ LINEAR_MIN_KERNEL_ADDRESS,
+ LINEAR_MAX_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS - 1,
+ ACC_PL_K|ACC_CODE_R, SZ_32);
+ fill_gdt_descriptor(KERNEL_DS,
+ LINEAR_MIN_KERNEL_ADDRESS,
+ LINEAR_MAX_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS - 1,
+ ACC_PL_K|ACC_DATA_W, SZ_32);
+
+ /* Load the new GDT. */
+ {
+ struct pseudo_descriptor pdesc;
+
+ pdesc.limit = sizeof(gdt)-1;
+ pdesc.linear_base = kvtolin(&gdt);
+ lgdt(&pdesc);
+ }
+
+ /* Reload all the segment registers from the new GDT.
+ We must load ds and es with 0 before loading them with KERNEL_DS
+ because some processors will "optimize out" the loads
+ if the previous selector values happen to be the same. */
+ asm volatile("
+ ljmp %0,$1f
+ 1:
+ movw %w2,%%ds
+ movw %w2,%%es
+ movw %w2,%%fs
+ movw %w2,%%gs
+
+ movw %w1,%%ds
+ movw %w1,%%es
+ movw %w1,%%ss
+ " : : "i" (KERNEL_CS), "r" (KERNEL_DS), "r" (0));
+}
+
diff --git a/i386/i386/gdt.h b/i386/i386/gdt.h
new file mode 100644
index 00000000..10d47624
--- /dev/null
+++ b/i386/i386/gdt.h
@@ -0,0 +1,72 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON, IBM, AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON, IBM, AND CSL DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_GDT_
+#define _I386_GDT_
+
+#include "seg.h"
+
+/*
+ * Kernel descriptors for Mach - 32-bit flat address space.
+ */
+#define KERNEL_CS 0x08 /* kernel code */
+#define KERNEL_DS 0x10 /* kernel data */
+#define KERNEL_LDT 0x18 /* master LDT */
+#define KERNEL_TSS 0x20 /* master TSS (uniprocessor) */
+#define USER_LDT 0x28 /* place for per-thread LDT */
+#define USER_TSS 0x30 /* place for per-thread TSS
+ that holds IO bitmap */
+#define FPE_CS 0x38 /* floating-point emulator code */
+#define USER_FPREGS 0x40 /* user-mode access to saved
+ floating-point registers */
+
+#ifdef PS2
+#define ABIOS_INT_RET 0x48 /* 16 bit return selector for ABIOS */
+#define ABIOS_TH_RET 0x50 /* 16 bit return selector for ABIOS */
+#define ABIOS_INT_SS 0x58 /* ABIOS interrupt stack selector */
+#define ABIOS_TH_SS 0x60 /* ABIOS current stack selector */
+#define ABIOS_FIRST_AVAIL_SEL \
+ 0x68 /* first selector for ABIOS
+ to allocate */
+#define GDTSZ 0x300 /* size of gdt table */
+#else /* PS2 */
+#define GDTSZ 11
+#endif /* PS2 */
+
+
+extern struct real_descriptor gdt[GDTSZ];
+
+/* Fill a segment descriptor in the GDT. */
+#define fill_gdt_descriptor(segment, base, limit, access, sizebits) \
+ fill_descriptor(&gdt[segment/8], base, limit, access, sizebits)
+
+#endif _I386_GDT_
diff --git a/i386/i386/hardclock.c b/i386/i386/hardclock.c
new file mode 100644
index 00000000..b4804da3
--- /dev/null
+++ b/i386/i386/hardclock.c
@@ -0,0 +1,99 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Clock interrupt.
+ */
+#include <mach/machine/eflags.h>
+
+#include <platforms.h>
+
+#include <kern/time_out.h>
+#include <i386/thread.h>
+
+#ifdef SYMMETRY
+#include <sqt/intctl.h>
+#endif
+#if defined(AT386) || defined(iPSC386)
+#include <i386/ipl.h>
+#endif
+#ifdef PS2
+#include <i386/pic.h>
+#include <i386/pio.h>
+#endif PS2
+
+extern void clock_interrupt();
+extern char return_to_iret[];
+
+void
+#ifdef PS2
+hardclock(iunit, ivect, old_ipl, ret_addr, regs)
+ int iunit; /* 'unit' number */
+ int ivect; /* interrupt number */
+#else /* PS2 */
+hardclock(iunit, old_ipl, ret_addr, regs)
+ int iunit; /* 'unit' number */
+ int old_ipl; /* old interrupt level */
+#endif /* PS2 */
+ char * ret_addr; /* return address in interrupt handler */
+ struct i386_interrupt_state *regs;
+ /* saved registers */
+{
+ if (ret_addr == return_to_iret)
+ /*
+ * Interrupt from user mode or from thread stack.
+ */
+ clock_interrupt(tick, /* usec per tick */
+ (regs->efl & EFL_VM) || /* user mode */
+ ((regs->cs & 0x03) != 0), /* user mode */
+#if defined(PS2) || defined(LINUX_DEV)
+ FALSE /* ignore SPL0 */
+#else /* PS2 */
+ old_ipl == SPL0 /* base priority */
+#endif /* PS2 */
+ );
+ else
+ /*
+ * Interrupt from interrupt stack.
+ */
+ clock_interrupt(tick, /* usec per tick */
+ FALSE, /* kernel mode */
+ FALSE); /* not SPL0 */
+
+#ifdef LINUX_DEV
+ linux_timer_intr();
+#endif
+
+#ifdef PS2
+ /*
+ * Reset the clock interrupt line.
+ */
+ outb(0x61, inb(0x61) | 0x80);
+#endif /* PS2 */
+}
diff --git a/i386/i386/i386asm.sym b/i386/i386/i386asm.sym
new file mode 100644
index 00000000..e38a1bd6
--- /dev/null
+++ b/i386/i386/i386asm.sym
@@ -0,0 +1,139 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <platforms.h>
+#include <cpus.h>
+#include <mach_kdb.h>
+#include <stat_time.h>
+
+/*
+ * Pass field offsets to assembly code.
+ */
+#include <sys/reboot.h>
+
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <kern/syscall_emulation.h>
+#include <i386/thread.h>
+#include <i386/pmap.h>
+#include "vm_param.h"
+#include "seg.h"
+#include "tss.h"
+#include "idt.h"
+#include "gdt.h"
+#include "ldt.h"
+#include "mp_desc.h"
+
+
+offset thread th pcb
+offset thread th task
+offset thread th recover
+offset thread th kernel_stack
+offset thread th swap_func
+
+offset task task eml_dispatch TASK_EMUL
+
+offset eml_dispatch eml disp_min DISP_MIN
+offset eml_dispatch eml disp_count DISP_COUNT
+offset eml_dispatch eml disp_vector DISP_VECTOR
+
+expr &STACK_IKS(0)->k_ebx KSS_EBX
+expr &STACK_IKS(0)->k_esp KSS_ESP
+expr &STACK_IKS(0)->k_ebp KSS_EBP
+expr &STACK_IKS(0)->k_esi KSS_ESI
+expr &STACK_IKS(0)->k_edi KSS_EDI
+expr &STACK_IKS(0)->k_eip KSS_EIP
+size i386_kernel_state iks
+
+size i386_exception_link iel
+
+offset i386_saved_state r cs
+offset i386_saved_state r uesp
+offset i386_saved_state r eax
+offset i386_saved_state r trapno
+offset i386_saved_state r err
+offset i386_saved_state r efl R_EFLAGS
+offset i386_saved_state r eip
+offset i386_saved_state r cr2
+
+offset i386_interrupt_state i eip
+offset i386_interrupt_state i cs
+offset i386_interrupt_state i efl
+
+offset i386_tss tss esp0
+offset i386_tss tss ss0
+
+expr I386_PGBYTES NBPG
+expr VM_MIN_ADDRESS
+expr VM_MAX_ADDRESS
+expr VM_MIN_KERNEL_ADDRESS KERNELBASE
+expr KERNEL_STACK_SIZE
+
+expr PDESHIFT
+expr PTESHIFT
+expr PTEMASK
+
+expr INTEL_PTE_PFN PTE_PFN
+expr INTEL_PTE_VALID PTE_V
+expr INTEL_PTE_WRITE PTE_W
+expr ~INTEL_PTE_VALID PTE_INVALID
+expr NPTES PTES_PER_PAGE
+expr INTEL_PTE_VALID|INTEL_PTE_WRITE INTEL_PTE_KERNEL
+
+expr IDTSZ
+expr GDTSZ
+expr LDTSZ
+
+expr KERNEL_CS
+expr KERNEL_DS
+expr KERNEL_TSS
+expr KERNEL_LDT
+
+expr (VM_MIN_KERNEL_ADDRESS>>PDESHIFT)*sizeof(pt_entry_t) KERNELBASEPDE
+
+#if MACH_KDB
+expr RB_KDB
+#endif MACH_KDB
+
+#if NCPUS > 1
+offset mp_desc_table mp gdt
+offset mp_desc_table mp idt
+#endif NCPUS > 1
+expr INTSTACK_SIZE
+
+#if !STAT_TIME
+offset timer tm low_bits LOW_BITS
+offset timer tm high_bits HIGH_BITS
+offset timer tm high_bits_check HIGH_BITS_CHECK
+expr TIMER_HIGH_UNIT
+offset thread th system_timer
+offset thread th user_timer
+#endif
+
diff --git a/i386/i386/idt-gen.h b/i386/i386/idt-gen.h
new file mode 100644
index 00000000..4663593e
--- /dev/null
+++ b/i386/i386/idt-gen.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#ifndef _I386_IDT_
+#define _I386_IDT_
+
+#include <mach/vm_param.h>
+
+#include "seg.h"
+
+/*
+ * Interrupt table must always be at least 32 entries long,
+ * to cover the basic i386 exception vectors.
+ * More-specific code will probably define it to be longer,
+ * to allow separate entrypoints for hardware interrupts.
+ */
+#ifndef IDTSZ
+#error you need to define IDTSZ
+#endif
+
+extern struct real_gate idt[IDTSZ];
+
+/* Fill a gate in the IDT. */
+#define fill_idt_gate(int_num, entry, selector, access, dword_count) \
+ fill_gate(&idt[int_num], entry, selector, access, dword_count)
+
+#endif _I386_IDT_
diff --git a/i386/i386/idt.c b/i386/i386/idt.c
new file mode 100644
index 00000000..56688517
--- /dev/null
+++ b/i386/i386/idt.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include "vm_param.h"
+#include "seg.h"
+#include "idt.h"
+#include "gdt.h"
+
+struct real_gate idt[IDTSZ];
+
+struct idt_init_entry
+{
+ unsigned entrypoint;
+ unsigned short vector;
+ unsigned short type;
+};
+extern struct idt_init_entry idt_inittab[];
+
+void idt_init()
+{
+ struct idt_init_entry *iie = idt_inittab;
+
+ /* Initialize the exception vectors from the idt_inittab. */
+ while (iie->entrypoint)
+ {
+ fill_idt_gate(iie->vector, iie->entrypoint, KERNEL_CS, iie->type, 0);
+ iie++;
+ }
+
+ /* Load the IDT pointer into the processor. */
+ {
+ struct pseudo_descriptor pdesc;
+
+ pdesc.limit = sizeof(idt)-1;
+ pdesc.linear_base = kvtolin(&idt);
+ lidt(&pdesc);
+ }
+}
+
diff --git a/i386/i386/idt_inittab.S b/i386/i386/idt_inittab.S
new file mode 100644
index 00000000..77185681
--- /dev/null
+++ b/i386/i386/idt_inittab.S
@@ -0,0 +1,121 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#include <mach/machine/asm.h>
+
+#include "seg.h"
+
+
+/* We'll be using macros to fill in a table in data hunk 2
+ while writing trap entrypoint routines at the same time.
+ Here's the header that comes before everything else. */
+ .data 2
+ENTRY(idt_inittab)
+ .text
+
+/*
+ * Interrupt descriptor table and code vectors for it.
+ */
+#define IDT_ENTRY(n,entry,type) \
+ .data 2 ;\
+ .long entry ;\
+ .word n ;\
+ .word type ;\
+ .text
+
+/*
+ * No error code. Clear error code and push trap number.
+ */
+#define EXCEPTION(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_K|ACC_TRAP_GATE);\
+ENTRY(name) ;\
+ pushl $(0) ;\
+ pushl $(n) ;\
+ jmp EXT(alltraps)
+
+/*
+ * User-accessible exception. Otherwise, same as above.
+ */
+#define EXCEP_USR(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_U|ACC_TRAP_GATE);\
+ENTRY(name) ;\
+ pushl $(0) ;\
+ pushl $(n) ;\
+ jmp EXT(alltraps)
+
+/*
+ * Error code has been pushed. Just push trap number.
+ */
+#define EXCEP_ERR(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_K|ACC_INTR_GATE);\
+ENTRY(name) ;\
+ pushl $(n) ;\
+ jmp EXT(alltraps)
+
+/*
+ * Special interrupt code: dispatches to a unique entrypoint,
+ * not defined automatically here.
+ */
+#define EXCEP_SPC(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_K|ACC_TRAP_GATE)
+
+
+EXCEPTION(0x00,t_zero_div)
+EXCEP_SPC(0x01,t_debug)
+/* skip NMI interrupt - let more specific code figure that out. */
+EXCEP_USR(0x03,t_int3)
+EXCEP_USR(0x04,t_into)
+EXCEP_USR(0x05,t_bounds)
+EXCEPTION(0x06,t_invop)
+EXCEPTION(0x07,t_nofpu)
+EXCEPTION(0x08,a_dbl_fault)
+EXCEPTION(0x09,a_fpu_over)
+EXCEPTION(0x0a,a_inv_tss)
+EXCEP_SPC(0x0b,t_segnp)
+EXCEP_ERR(0x0c,t_stack_fault)
+EXCEP_SPC(0x0d,t_gen_prot)
+EXCEP_SPC(0x0e,t_page_fault)
+EXCEPTION(0x0f,t_trap_0f)
+EXCEPTION(0x10,t_fpu_err)
+EXCEPTION(0x11,t_trap_11)
+EXCEPTION(0x12,t_trap_12)
+EXCEPTION(0x13,t_trap_13)
+EXCEPTION(0x14,t_trap_14)
+EXCEPTION(0x15,t_trap_15)
+EXCEPTION(0x16,t_trap_16)
+EXCEPTION(0x17,t_trap_17)
+EXCEPTION(0x18,t_trap_18)
+EXCEPTION(0x19,t_trap_19)
+EXCEPTION(0x1a,t_trap_1a)
+EXCEPTION(0x1b,t_trap_1b)
+EXCEPTION(0x1c,t_trap_1c)
+EXCEPTION(0x1d,t_trap_1d)
+EXCEPTION(0x1e,t_trap_1e)
+EXCEPTION(0x1f,t_trap_1f)
+
+/* Terminator */
+ .data 2
+ .long 0
+
diff --git a/i386/i386/io_emulate.c b/i386/i386/io_emulate.c
new file mode 100644
index 00000000..1bc5a75b
--- /dev/null
+++ b/i386/i386/io_emulate.c
@@ -0,0 +1,108 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#include <platforms.h>
+
+#include <mach/boolean.h>
+#include <mach/port.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_right.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_entry.h>
+
+#include <device/dev_hdr.h>
+
+#include <i386/thread.h>
+#include <i386/io_port.h>
+#include <i386/io_emulate.h>
+
+extern ipc_port_t iopl_device_port;
+extern mach_device_t iopl_device;
+
+int
+emulate_io(regs, opcode, io_port)
+ struct i386_saved_state *regs;
+ int opcode;
+ int io_port;
+{
+ thread_t thread = current_thread();
+
+#if AT386
+ if (iopl_emulate(regs, opcode, io_port))
+ return EM_IO_DONE;
+#endif /* AT386 */
+
+ if (iopb_check_mapping(thread, iopl_device))
+ return EM_IO_ERROR;
+
+ /*
+ * Check for send rights to the IOPL device port.
+ */
+ if (iopl_device_port == IP_NULL)
+ return EM_IO_ERROR;
+ {
+ ipc_space_t space = current_space();
+ mach_port_t name;
+ ipc_entry_t entry;
+ boolean_t has_rights = FALSE;
+
+ is_write_lock(space);
+ assert(space->is_active);
+
+ if (ipc_right_reverse(space, (ipc_object_t) iopl_device_port,
+ &name, &entry)) {
+ /* iopl_device_port is locked and active */
+ if (entry->ie_bits & MACH_PORT_TYPE_SEND)
+ has_rights = TRUE;
+ ip_unlock(iopl_device_port);
+ }
+
+ is_write_unlock(space);
+ if (!has_rights) {
+ return EM_IO_ERROR;
+ }
+ }
+
+
+ /*
+ * Map the IOPL port set into the thread.
+ */
+
+ if (i386_io_port_add(thread, iopl_device)
+ != KERN_SUCCESS)
+ return EM_IO_ERROR;
+
+ /*
+ * Make the thread use its IO_TSS to get the IO permissions;
+ * it may not have had one before this.
+ */
+ switch_ktss(thread->pcb);
+
+ return EM_IO_RETRY;
+}
diff --git a/i386/i386/io_emulate.h b/i386/i386/io_emulate.h
new file mode 100644
index 00000000..de0d12df
--- /dev/null
+++ b/i386/i386/io_emulate.h
@@ -0,0 +1,43 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_IO_EMULATE_H_
+#define _I386_IO_EMULATE_H_
+
+/*
+ * Return codes from IO emulation.
+ */
+extern int emulate_io(/*
+ struct i386_saved_state *regs,
+ int opcode,
+ int io_port
+ */);
+
+#define EM_IO_DONE 0 /* IO instruction executed, proceed */
+#define EM_IO_RETRY 1 /* IO port mapped, retry instruction */
+#define EM_IO_ERROR 2 /* IO port not mapped */
+
+#endif /* _I386_IO_EMULATE_H_ */
diff --git a/i386/i386/io_map.c b/i386/i386/io_map.c
new file mode 100644
index 00000000..256a9a08
--- /dev/null
+++ b/i386/i386/io_map.c
@@ -0,0 +1,58 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/vm_param.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+
+extern vm_offset_t kernel_virtual_start;
+
+/*
+ * Allocate and map memory for devices that may need to be mapped before
+ * Mach VM is running.
+ */
+vm_offset_t
+io_map(phys_addr, size)
+ vm_offset_t phys_addr;
+ vm_size_t size;
+{
+ vm_offset_t start;
+
+ if (kernel_map == VM_MAP_NULL) {
+ /*
+ * VM is not initialized. Grab memory.
+ */
+ start = kernel_virtual_start;
+ kernel_virtual_start += round_page(size);
+ printf("stealing kernel virtual addresses %08x-%08x\n", start, kernel_virtual_start);
+ }
+ else {
+ (void) kmem_alloc_pageable(kernel_map, &start, round_page(size));
+ }
+ (void) pmap_map_bd(start, phys_addr, phys_addr + round_page(size),
+ VM_PROT_READ|VM_PROT_WRITE);
+ return (start);
+}
diff --git a/i386/i386/io_port.h b/i386/i386/io_port.h
new file mode 100644
index 00000000..62022b78
--- /dev/null
+++ b/i386/i386/io_port.h
@@ -0,0 +1,43 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_IO_PORT_H_
+#define _I386_IO_PORT_H_
+/*
+ * IO register definitions.
+ */
+typedef unsigned short io_reg_t;
+
+#define IO_REG_NULL (0x00ff) /* reserved */
+
+/*
+ * Allocate and destroy io port sets for users to map into
+ * threads.
+ */
+extern void io_port_create(/* device_t, io_reg_t * */);
+extern void io_port_destroy(/* device_t */);
+
+#endif /* _I386_IO_PORT_H_ */
diff --git a/i386/i386/iopb.c b/i386/i386/iopb.c
new file mode 100644
index 00000000..d2addac0
--- /dev/null
+++ b/i386/i386/iopb.c
@@ -0,0 +1,615 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Code to manipulate IO permission bitmaps.
+ */
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+
+#include <ipc/ipc_port.h>
+
+#include <kern/kalloc.h>
+#include <kern/lock.h>
+#include <kern/queue.h>
+#include <kern/thread.h>
+
+#include <device/dev_hdr.h>
+
+#include "io_port.h"
+#include "iopb.h"
+#include "seg.h"
+#include "gdt.h"
+
+/*
+ * A set of ports for an IO device.
+ */
+struct io_port {
+ mach_device_t device; /* Mach device */
+ queue_chain_t dev_list; /* link in device list */
+ queue_chain_t io_use_list; /* List of threads that use it */
+ io_reg_t *io_port_list; /* list of IO ports that use it */
+ /* list ends with IO_REG_NULL */
+};
+typedef struct io_port *io_port_t;
+
+/*
+ * Lookup table for device -> io_port mapping
+ * (a linked list - I don't expect too many)
+ */
+queue_head_t device_to_io_port_list;
+
+/*
+ * Cross-reference:
+ * all threads that have IO ports mapped
+ * all IO ports that have threads mapped
+ */
+struct io_use {
+ queue_chain_t psq; /* Links from port set */
+ queue_chain_t tsq; /* links from tss */
+ io_port_t ps; /* Port set */
+ iopb_tss_t ts; /* Task segment */
+};
+typedef struct io_use *io_use_t;
+
+/*
+ * Big lock for the whole mess.
+ */
+decl_simple_lock_data(, iopb_lock)
+
+/*
+ * Initialize the package.
+ */
+void
+iopb_init(void)
+{
+ queue_init(&device_to_io_port_list);
+ simple_lock_init(&iopb_lock);
+}
+
+/*
+ * Initialize bitmap (set all bits to OFF == 1)
+ */
+void
+io_bitmap_init(
+ isa_iopb bp,
+ boolean_t on_off)
+{
+ register unsigned char *b = bp;
+ register int s;
+ unsigned char c;
+
+ /*
+ * Disallow access to ports 0x00 .. 0xff
+ */
+ for (s = 0; s < (0xff+1)/8; s++) {
+ *b++ = ~0; /* no access */
+ }
+
+ if (on_off)
+ c = 0;
+ else
+ c = ~0;
+
+ for (; s < sizeof(isa_iopb); s++) {
+ *b++ = c;
+ }
+}
+
+/*
+ * Set selected bits in bitmap to ON == 0
+ */
+void
+io_bitmap_set(
+ isa_iopb bp,
+ io_reg_t *bit_list)
+{
+ io_reg_t io_bit;
+
+ while ((io_bit = *bit_list++) != IO_REG_NULL) {
+ bp[io_bit>>3] &= ~(1 << (io_bit & 0x7));
+ }
+}
+
+/*
+ * Set selected bits in bitmap to OFF == 1
+ */
+void
+io_bitmap_clear(
+ isa_iopb bp,
+ io_reg_t *bit_list)
+{
+ io_reg_t io_bit;
+
+ while ((io_bit = *bit_list++) != IO_REG_NULL) {
+ bp[io_bit>>3] |= (1 << (io_bit & 0x7));
+ }
+}
+
+/*
+ * Lookup an io-port set by device
+ */
+io_port_t
+device_to_io_port_lookup(
+ mach_device_t device)
+{
+ register io_port_t io_port;
+
+ queue_iterate(&device_to_io_port_list, io_port, io_port_t, dev_list) {
+ if (io_port->device == device) {
+ return io_port;
+ }
+ }
+ return 0;
+}
+
+/*
+ * [exported]
+ * Create an io_port set
+ */
+void
+io_port_create(
+ mach_device_t device,
+ io_reg_t *io_port_list)
+{
+ register io_port_t io_port;
+
+ io_port = (io_port_t) kalloc(sizeof(struct io_port));
+
+ simple_lock(&iopb_lock);
+ if (device_to_io_port_lookup(device) != 0) {
+ simple_unlock(&iopb_lock);
+ kfree((vm_offset_t) io_port, sizeof(struct io_port));
+ return;
+ }
+
+ io_port->device = device;
+ queue_init(&io_port->io_use_list);
+ io_port->io_port_list = io_port_list;
+
+ /*
+ * Enter in lookup list.
+ */
+ queue_enter(&device_to_io_port_list, io_port, io_port_t, dev_list);
+
+ simple_unlock(&iopb_lock);
+}
+
+/*
+ * [exported]
+ * Destroy an io port set, removing any IO mappings.
+ */
+void
+io_port_destroy(
+ mach_device_t device)
+{
+ io_port_t io_port;
+ io_use_t iu;
+
+ simple_lock(&iopb_lock);
+ io_port = device_to_io_port_lookup(device);
+ if (io_port == 0) {
+ simple_unlock(&iopb_lock);
+ return;
+ }
+
+ queue_iterate(&io_port->io_use_list, iu, io_use_t, psq) {
+ iopb_tss_t io_tss;
+ io_tss = iu->ts;
+ io_bitmap_clear(io_tss->bitmap, io_port->io_port_list);
+ queue_remove(&io_tss->io_port_list, iu, io_use_t, tsq);
+ }
+ queue_remove(&device_to_io_port_list, io_port, io_port_t, dev_list);
+ simple_unlock(&iopb_lock);
+
+ while (!queue_empty(&io_port->io_use_list)) {
+ iu = (io_use_t) queue_first(&io_port->io_use_list);
+ queue_remove(&io_port->io_use_list, iu, io_use_t, psq);
+ kfree((vm_offset_t)iu, sizeof(struct io_use));
+ }
+
+ kfree((vm_offset_t)io_port, sizeof(struct io_port));
+}
+
+/*
+ * Initialize an IO TSS.
+ */
+void
+io_tss_init(
+ iopb_tss_t io_tss,
+ boolean_t access_all) /* allow access or not */
+{
+ vm_offset_t addr = (vm_offset_t) io_tss;
+ vm_size_t size = (char *)&io_tss->barrier - (char *)io_tss;
+
+ bzero(&io_tss->tss, sizeof(struct i386_tss));
+ io_tss->tss.io_bit_map_offset
+ = (char *)&io_tss->bitmap - (char *)io_tss;
+ io_tss->tss.ss0 = KERNEL_DS;
+ io_bitmap_init(io_tss->bitmap, access_all);
+ io_tss->barrier = ~0;
+ queue_init(&io_tss->io_port_list);
+ io_tss->iopb_desc[0] = ((size-1) & 0xffff)
+ | ((addr & 0xffff) << 16);
+ io_tss->iopb_desc[1] = ((addr & 0x00ff0000) >> 16)
+ | ((ACC_TSS|ACC_PL_K|ACC_P) << 8)
+ | ((size-1) & 0x000f0000)
+ | (addr & 0xff000000);
+}
+
+/*
+ * [exported]
+ * Create an IOPB_TSS
+ */
+iopb_tss_t
+iopb_create(void)
+{
+ register iopb_tss_t ts;
+
+ ts = (iopb_tss_t) kalloc(sizeof (struct iopb_tss));
+ io_tss_init(ts, TRUE); /* XXX */
+ return ts;
+}
+
+/*
+ * [exported]
+ * Destroy an IOPB_TSS
+ */
+void
+iopb_destroy(
+ iopb_tss_t io_tss)
+{
+ io_use_t iu;
+ io_port_t io_port;
+
+ simple_lock(&iopb_lock);
+
+ queue_iterate(&io_tss->io_port_list, iu, io_use_t, tsq) {
+ io_port = iu->ps;
+ /* skip bitmap clear - entire bitmap will vanish */
+ queue_remove(&io_port->io_use_list, iu, io_use_t, psq);
+ }
+
+ simple_unlock(&iopb_lock);
+
+ while (!queue_empty(&io_tss->io_port_list)) {
+ iu = (io_use_t) queue_first(&io_tss->io_port_list);
+ queue_remove(&io_tss->io_port_list, iu, io_use_t, tsq);
+ kfree((vm_offset_t)iu, sizeof(struct io_use));
+ }
+
+ kfree((vm_offset_t)io_tss, sizeof(struct iopb_tss));
+}
+
+/*
+ * Add an IO mapping to a thread.
+ */
+kern_return_t
+i386_io_port_add(
+ thread_t thread,
+ mach_device_t device)
+{
+ pcb_t pcb;
+ iopb_tss_t io_tss, new_io_tss;
+ io_port_t io_port;
+ io_use_t iu, old_iu;
+
+ if (thread == THREAD_NULL
+ || device == DEVICE_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ pcb = thread->pcb;
+
+ new_io_tss = 0;
+ iu = (io_use_t) kalloc(sizeof(struct io_use));
+
+ Retry:
+ simple_lock(&iopb_lock);
+
+ /* find the io_port_t for the device */
+ io_port = device_to_io_port_lookup(device);
+ if (io_port == 0) {
+ /*
+ * Device does not have IO ports available.
+ */
+ simple_unlock(&iopb_lock);
+ if (new_io_tss)
+ kfree((vm_offset_t)new_io_tss, sizeof(struct iopb_tss));
+ kfree((vm_offset_t) iu, sizeof(struct io_use));
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /* Have the IO port. */
+
+ /* Make sure the thread has a TSS. */
+
+ simple_lock(&pcb->lock);
+ io_tss = pcb->ims.io_tss;
+ if (io_tss == 0) {
+ if (new_io_tss == 0) {
+ /*
+ * Allocate an IO-tss.
+ */
+ simple_unlock(&pcb->lock);
+ simple_unlock(&iopb_lock);
+
+ new_io_tss = (iopb_tss_t) kalloc(sizeof(struct iopb_tss));
+ io_tss_init(new_io_tss, TRUE); /* XXX */
+
+ goto Retry;
+ }
+ io_tss = new_io_tss;
+ pcb->ims.io_tss = io_tss;
+ new_io_tss = 0;
+ }
+
+ /*
+ * Have io_port and io_tss.
+ * See whether device is already mapped.
+ */
+ queue_iterate(&io_tss->io_port_list, old_iu, io_use_t, tsq) {
+ if (old_iu->ps == io_port) {
+ /*
+ * Already mapped.
+ */
+ simple_unlock(&pcb->lock);
+ simple_unlock(&iopb_lock);
+
+ kfree((vm_offset_t)iu, sizeof(struct io_use));
+ if (new_io_tss)
+ kfree((vm_offset_t)new_io_tss, sizeof(struct iopb_tss));
+ return KERN_SUCCESS;
+ }
+ }
+
+ /*
+ * Add mapping.
+ */
+ iu->ps = io_port;
+ iu->ts = io_tss;
+ queue_enter(&io_port->io_use_list, iu, io_use_t, psq);
+ queue_enter(&io_tss->io_port_list, iu, io_use_t, tsq);
+ io_bitmap_set(io_tss->bitmap, io_port->io_port_list);
+
+ simple_unlock(&pcb->lock);
+ simple_unlock(&iopb_lock);
+
+ if (new_io_tss)
+ kfree((vm_offset_t)new_io_tss, sizeof(struct iopb_tss));
+ return KERN_SUCCESS;
+
+}
+
+/*
+ * Remove an IO mapping from a thread.
+ */
+kern_return_t
+i386_io_port_remove(thread, device)
+ thread_t thread;
+ mach_device_t device;
+{
+ pcb_t pcb;
+ iopb_tss_t io_tss;
+ io_port_t io_port;
+ io_use_t iu;
+
+ if (thread == THREAD_NULL
+ || device == DEVICE_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ pcb = thread->pcb;
+
+ simple_lock(&iopb_lock);
+
+ /* find the io_port_t for the device */
+
+ io_port = device_to_io_port_lookup(device);
+ if (io_port == 0) {
+ /*
+ * Device does not have IO ports available.
+ */
+ simple_unlock(&iopb_lock);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ simple_lock(&pcb->lock);
+ io_tss = pcb->ims.io_tss;
+ if (io_tss == 0) {
+ simple_unlock(&pcb->lock);
+ simple_unlock(&iopb_lock);
+ return KERN_INVALID_ARGUMENT; /* not mapped */
+ }
+
+ /*
+ * Find the mapping.
+ */
+ queue_iterate(&io_tss->io_port_list, iu, io_use_t, tsq) {
+ if (iu->ps == io_port) {
+ /*
+ * Found mapping. Remove it.
+ */
+ io_bitmap_clear(io_tss->bitmap, io_port->io_port_list);
+
+ queue_remove(&io_port->io_use_list, iu, io_use_t, psq);
+ queue_remove(&io_tss->io_port_list, iu, io_use_t, tsq);
+
+ simple_unlock(&pcb->lock);
+ simple_unlock(&iopb_lock);
+
+ kfree((vm_offset_t)iu, sizeof(struct io_use));
+
+ return KERN_SUCCESS;
+ }
+ }
+
+ /*
+ * No mapping.
+ */
+ return KERN_INVALID_ARGUMENT;
+}
+
+/*
+ * Return the IO ports mapped into a thread.
+ */
+extern ipc_port_t mach_convert_device_to_port(/* device_t */);
+
+kern_return_t
+i386_io_port_list(thread, list, list_count)
+ thread_t thread;
+ mach_device_t **list;
+ unsigned int *list_count;
+{
+ register pcb_t pcb;
+ register iopb_tss_t io_tss;
+ unsigned int count, alloc_count;
+ mach_device_t *devices;
+ vm_size_t size_needed, size;
+ vm_offset_t addr;
+ int i;
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ pcb = thread->pcb;
+
+ alloc_count = 16; /* a guess */
+
+ do {
+ size_needed = alloc_count * sizeof(ipc_port_t);
+ if (size_needed <= size)
+ break;
+
+ if (size != 0)
+ kfree(addr,size);
+
+ assert(size_needed > 0);
+ size = size_needed;
+
+ addr = kalloc(size);
+ if (addr == 0)
+ return KERN_RESOURCE_SHORTAGE;
+
+ devices = (mach_device_t *)addr;
+ count = 0;
+
+ simple_lock(&iopb_lock);
+ simple_lock(&pcb->lock);
+ io_tss = pcb->ims.io_tss;
+ if (io_tss != 0) {
+ register io_use_t iu;
+
+ queue_iterate(&io_tss->io_port_list, iu, io_use_t, tsq) {
+ if (++count < alloc_count) {
+ *devices = iu->ps->device;
+ device_reference(*devices);
+ devices++;
+ }
+ }
+ }
+ simple_unlock(&pcb->lock);
+ simple_unlock(&iopb_lock);
+ } while (count > alloc_count);
+
+ if (count == 0) {
+ /*
+ * No IO ports
+ */
+ *list = 0;
+ *list_count = 0;
+
+ if (size != 0)
+ kfree(addr, size);
+ }
+ else {
+ /*
+ * If we allocated too much, must copy.
+ */
+ size_needed = count * sizeof(ipc_port_t);
+ if (size_needed < size) {
+ vm_offset_t new_addr;
+
+ new_addr = kalloc(size_needed);
+ if (new_addr == 0) {
+ for (i = 0; i < count; i++)
+ device_deallocate(devices[i]);
+ kfree(addr, size);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ bcopy((void *)addr, (void *)new_addr, size_needed);
+ kfree(addr, size);
+ devices = (mach_device_t *)new_addr;
+ }
+
+ for (i = 0; i < count; i++)
+ ((ipc_port_t *)devices)[i] =
+ mach_convert_device_to_port(devices[i]);
+ }
+ *list = devices;
+ *list_count = count;
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Check whether an IO device is mapped to a particular thread.
+ * Used to support the 'iopl' device automatic mapping.
+ */
+boolean_t
+iopb_check_mapping(thread, device)
+ thread_t thread;
+ mach_device_t device;
+{
+ pcb_t pcb;
+ io_port_t io_port;
+ io_use_t iu;
+
+ pcb = thread->pcb;
+
+ simple_lock(&iopb_lock);
+
+ /* Find the io port for the device */
+
+ io_port = device_to_io_port_lookup(device);
+ if (io_port == 0) {
+ simple_unlock(&iopb_lock);
+ return FALSE;
+ }
+
+ /* Look up the mapping in the device`s mapping list. */
+
+ queue_iterate(&io_port->io_use_list, iu, io_use_t, psq) {
+ if (iu->ts == pcb->ims.io_tss) {
+ /*
+ * Device is mapped.
+ */
+ simple_unlock(&iopb_lock);
+ return TRUE;
+ }
+ }
+ simple_unlock(&iopb_lock);
+ return FALSE;
+}
diff --git a/i386/i386/iopb.h b/i386/i386/iopb.h
new file mode 100644
index 00000000..0a3e5745
--- /dev/null
+++ b/i386/i386/iopb.h
@@ -0,0 +1,62 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_IOPB_H_
+#define _I386_IOPB_H_
+
+#include <i386/tss.h>
+#include <kern/queue.h>
+
+/*
+ * IO permission bitmap.
+ *
+ * Allows only IO ports 0 .. 0x3ff: for ISA machines.
+ */
+
+#define iopb_howmany(a,b) (((a)+(b)-1)/(b))
+
+#define IOPB_MAX 0xffff /* ISA bus allows ports 0..3ff */
+ /* but accelerator cards are funky */
+#define IOPB_BYTES (iopb_howmany(IOPB_MAX+1,8))
+
+typedef unsigned char isa_iopb[IOPB_BYTES];
+
+/*
+ * An IO permission map is a task segment with an IO permission bitmap.
+ */
+
+struct iopb_tss {
+ struct i386_tss tss; /* task state segment */
+ isa_iopb bitmap; /* bitmap of mapped IO ports */
+ unsigned int barrier; /* bitmap barrier for CPU slop */
+ queue_head_t io_port_list; /* list of mapped IO ports */
+ int iopb_desc[2]; /* descriptor for this TSS */
+};
+
+typedef struct iopb_tss *iopb_tss_t;
+
+#endif /* _I386_IOPB_H_ */
+
diff --git a/i386/i386/ipl.h b/i386/i386/ipl.h
new file mode 100644
index 00000000..06ee58a0
--- /dev/null
+++ b/i386/i386/ipl.h
@@ -0,0 +1,77 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+Copyright (c) 1988,1989 Prime Computer, Inc. Natick, MA 01760
+All Rights Reserved.
+
+Permission to use, copy, modify, and distribute this
+software and its documentation for any purpose and
+without fee is hereby granted, provided that the above
+copyright notice appears in all copies and that both the
+copyright notice and this permission notice appear in
+supporting documentation, and that the name of Prime
+Computer, Inc. not be used in advertising or publicity
+pertaining to distribution of the software without
+specific, written prior permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS", AND PRIME COMPUTER,
+INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN
+NO EVENT SHALL PRIME COMPUTER, INC. BE LIABLE FOR ANY
+SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR
+OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+
+#define SPL0 0
+#define SPL1 1
+#define SPL2 2
+#define SPL3 3
+#define SPL4 4
+#define SPL5 5
+#define SPL6 6
+#define SPL7 7
+
+#define SPLPP 5
+#define SPLTTY 6
+#define SPLNI 6
+#define SPLHI 7
+#define IPLHI SPLHI
+
+#define NSPL (SPL7 + 1)
+
+#ifdef KERNEL
+#ifndef ASSEMBLER
+#include <machine/machspl.h>
+extern int (*ivect[])();
+extern int iunit[];
+extern int intpri[];
+#endif ASSEMBLER
+#endif KERNEL
diff --git a/i386/i386/ktss.c b/i386/i386/ktss.c
new file mode 100644
index 00000000..836a6f6c
--- /dev/null
+++ b/i386/i386/ktss.c
@@ -0,0 +1,61 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Kernel task state segment.
+ *
+ * We don't use the i386 task switch mechanism. We need a TSS
+ * only to hold the kernel stack pointer for the current thread.
+ *
+ * XXX multiprocessor??
+ */
+#include "vm_param.h"
+#include "seg.h"
+#include "gdt.h"
+#include "ktss.h"
+
+void
+ktss_init()
+{
+ /* XXX temporary exception stack */
+ static exception_stack[1024];
+
+ /* Initialize the master TSS descriptor. */
+ fill_gdt_descriptor(KERNEL_TSS,
+ kvtolin(&ktss), sizeof(ktss)+65536/8+1-1,
+ ACC_PL_K|ACC_TSS, 0);
+
+ /* Initialize the master TSS. */
+ ktss.ss0 = KERNEL_DS;
+ ktss.esp0 = (unsigned)(exception_stack+1024);
+ ktss.io_bit_map_offset = sizeof(ktss);
+
+ /* Set the last byte in the I/O bitmap to all 1's. */
+ ((unsigned char*)&ktss)[sizeof(ktss)+65536/8] = 0xff;
+
+ /* Load the TSS. */
+ ltr(KERNEL_TSS);
+}
+
diff --git a/i386/i386/ktss.h b/i386/i386/ktss.h
new file mode 100644
index 00000000..021f47fd
--- /dev/null
+++ b/i386/i386/ktss.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_KTSS_
+#define _I386_KTSS_
+
+#include "tss.h"
+
+extern struct i386_tss ktss;
+
+#endif _I386_KTSS_
diff --git a/i386/i386/kttd_interface.c b/i386/i386/kttd_interface.c
new file mode 100644
index 00000000..3f2f3900
--- /dev/null
+++ b/i386/i386/kttd_interface.c
@@ -0,0 +1,577 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include "mach_ttd.h"
+
+#if MACH_TTD
+
+#include <mach/machine/eflags.h>
+
+#include <kern/thread.h>
+#include <kern/processor.h>
+#include <mach/thread_status.h>
+#include <mach/vm_param.h>
+#include <i386/seg.h>
+#include <sys/types.h>
+
+#include <ttd/ttd_types.h>
+#include <ttd/ttd_stub.h>
+#include <machine/kttd_machdep.h>
+
+/*
+ * Shamelessly copied from the ddb sources:
+ */
+struct i386_saved_state *kttd_last_saved_statep;
+struct i386_saved_state kttd_nested_saved_state;
+unsigned last_kttd_sp;
+
+struct i386_saved_state kttd_regs; /* was ddb_regs */
+
+extern int kttd_debug;
+extern boolean_t kttd_enabled;
+extern vm_offset_t virtual_end;
+
+#define I386_BREAKPOINT 0xcc
+
+/*
+ * kernel map
+ */
+extern vm_map_t kernel_map;
+
+boolean_t kttd_console_init(void)
+{
+ /*
+ * Get local machine's IP address via bootp.
+ */
+ return(ttd_ip_bootp());
+}
+
+/*
+ * Execute a break instruction that will invoke ttd
+ */
+void kttd_break(void)
+{
+ if (!kttd_enabled)
+ return;
+ asm("int3");
+}
+
+/*
+ * Halt all processors on the 386at (not really applicable).
+ */
+void kttd_halt_processors(void)
+{
+ /* XXX Fix for Sequent!!! */
+ /* Only one on AT386, so ignore for now... */
+}
+
+/*
+ * Determine whether or not the ehternet device driver supports
+ * ttd.
+ */
+boolean_t kttd_supported(void)
+{
+ return ((int)ttd_get_packet != NULL);
+}
+
+/*
+ * Return the ttd machine type for the i386at
+ */
+ttd_machine_type get_ttd_machine_type(void)
+{
+ return TTD_AT386;
+}
+
+void kttd_machine_getregs(struct i386_gdb_register_state *ttd_state)
+{
+ ttd_state->gs = kttd_regs.gs;
+ ttd_state->fs = kttd_regs.fs;
+ ttd_state->es = kttd_regs.es;
+ ttd_state->ds = kttd_regs.ds;
+ ttd_state->edi = kttd_regs.edi;
+ ttd_state->esi = kttd_regs.esi;
+ ttd_state->ebp = kttd_regs.ebp;
+
+ /*
+ * This is set up to point to the right place in
+ * kttd_trap and .
+ */
+ ttd_state->esp = kttd_regs.uesp;
+
+ ttd_state->ebx = kttd_regs.ebx;
+ ttd_state->edx = kttd_regs.edx;
+ ttd_state->ecx = kttd_regs.ecx;
+ ttd_state->eax = kttd_regs.eax;
+ ttd_state->eip = kttd_regs.eip;
+ ttd_state->cs = kttd_regs.cs;
+ ttd_state->efl = kttd_regs.efl;
+ ttd_state->ss = kttd_regs.ss;
+}
+
+void kttd_machine_setregs(struct i386_gdb_register_state *ttd_state)
+{
+ if (kttd_regs.gs != ttd_state->gs) {
+ if (kttd_debug)
+ printf("gs 0x%x:0x%x, ", kttd_regs.gs, ttd_state->gs);
+ kttd_regs.gs = ttd_state->gs;
+ }
+ if (kttd_regs.fs != ttd_state->fs) {
+ if (kttd_debug)
+ printf("fs 0x%x:0x%x, ", kttd_regs.fs, ttd_state->fs);
+ kttd_regs.fs = ttd_state->fs;
+ }
+ if (kttd_regs.es != ttd_state->es) {
+ if (kttd_debug)
+ printf("es 0x%x:0x%x, ", kttd_regs.es, ttd_state->es);
+ kttd_regs.es = ttd_state->es;
+ }
+ if (kttd_regs.ds != ttd_state->ds) {
+ if (kttd_debug)
+ printf("ds 0x%x:0x%x, ", kttd_regs.ds, ttd_state->ds);
+ kttd_regs.ds = ttd_state->ds;
+ }
+ if (kttd_regs.edi != ttd_state->edi) {
+ if (kttd_debug)
+ printf("edi 0x%x:0x%x, ", kttd_regs.edi, ttd_state->edi);
+ kttd_regs.edi = ttd_state->edi;
+ }
+ if (kttd_regs.esi != ttd_state->esi) {
+ if (kttd_debug)
+ printf("esi 0x%x:0x%x, ", kttd_regs.esi, ttd_state->esi);
+ kttd_regs.esi = ttd_state->esi;
+ }
+ if (kttd_regs.ebp != ttd_state->ebp) {
+ if (kttd_debug)
+ printf("ebp 0x%x:0x%x, ", kttd_regs.ebp, ttd_state->ebp);
+ kttd_regs.ebp = ttd_state->ebp;
+ }
+ if (kttd_regs.ebx != ttd_state->ebx) {
+ if (kttd_debug)
+ printf("ebx 0x%x:0x%x, ", kttd_regs.ebx, ttd_state->ebx);
+ kttd_regs.ebx = ttd_state->ebx;
+ }
+ if (kttd_regs.edx != ttd_state->edx) {
+ if (kttd_debug)
+ printf("edx 0x%x:0x%x, ", kttd_regs.edx, ttd_state->edx);
+ kttd_regs.edx = ttd_state->edx;
+ }
+ if (kttd_regs.ecx != ttd_state->ecx) {
+ if (kttd_debug)
+ printf("ecx 0x%x:0x%x, ", kttd_regs.ecx, ttd_state->ecx);
+ kttd_regs.ecx = ttd_state->ecx;
+ }
+ if (kttd_regs.eax != ttd_state->eax) {
+ if (kttd_debug)
+ printf("eax 0x%x:0x%x, ", kttd_regs.eax, ttd_state->eax);
+ kttd_regs.eax = ttd_state->eax;
+ }
+ if (kttd_regs.eip != ttd_state->eip) {
+ if (kttd_debug)
+ printf("eip 0x%x:0x%x, ", kttd_regs.eip, ttd_state->eip);
+ kttd_regs.eip = ttd_state->eip;
+ }
+ if (kttd_regs.cs != ttd_state->cs) {
+ if (kttd_debug)
+ printf("cs 0x%x:0x%x, ", kttd_regs.cs, ttd_state->cs);
+ kttd_regs.cs = ttd_state->cs;
+ }
+ if (kttd_regs.efl != ttd_state->efl) {
+ if (kttd_debug)
+ printf("efl 0x%x:0x%x, ", kttd_regs.efl, ttd_state->efl);
+ kttd_regs.efl = ttd_state->efl;
+ }
+#if 0
+ /*
+ * We probably shouldn't mess with the uesp or the ss? XXX
+ */
+ if (kttd_regs.ss != ttd_state->ss) {
+ if (kttd_debug)
+ printf("ss 0x%x:0x%x, ", kttd_regs.ss, ttd_state->ss);
+ kttd_regs.ss = ttd_state->ss;
+ }
+#endif 0
+
+}
+
+/*
+ * Enable a page for access, faulting it in if necessary
+ */
+boolean_t kttd_mem_access(vm_offset_t offset, vm_prot_t access)
+{
+ kern_return_t code;
+
+ /*
+ * VM_MIN_KERNEL_ADDRESS if the beginning of equiv
+ * mapped kernel memory. virtual_end is the end.
+ * If it's in between it's always accessible
+ */
+ if (offset >= VM_MIN_KERNEL_ADDRESS && offset < virtual_end)
+ return TRUE;
+
+ if (offset >= virtual_end) {
+ /*
+ * fault in the memory just to make sure we can access it
+ */
+ if (kttd_debug)
+ printf(">>>>>>>>>>Faulting in memory: 0x%x, 0x%x\n",
+ trunc_page(offset), access);
+ code = vm_fault(kernel_map, trunc_page(offset), access, FALSE,
+ FALSE, (void (*)()) 0);
+ }else{
+ /*
+ * Check for user thread
+ */
+#if 1
+ if ((current_thread() != THREAD_NULL) &&
+ (current_thread()->task->map->pmap != kernel_pmap) &&
+ (current_thread()->task->map->pmap != PMAP_NULL)) {
+ code = vm_fault(current_thread()->task->map,
+ trunc_page(offset), access, FALSE,
+ FALSE, (void (*)()) 0);
+ }else{
+ /*
+ * Invalid kernel address (below VM_MIN_KERNEL_ADDRESS)
+ */
+ return FALSE;
+ }
+#else
+ if (kttd_debug)
+ printf("==========Would've tried to map in user area 0x%x\n",
+ trunc_page(offset));
+ return FALSE;
+#endif /* 0 */
+ }
+
+ return (code == KERN_SUCCESS);
+}
+
+/*
+ * See if we modified the kernel text and if so flush the caches.
+ * This routine is never called with a range that crosses a page
+ * boundary.
+ */
+void kttd_flush_cache(vm_offset_t offset, vm_size_t length)
+{
+ /* 386 doesn't need this */
+ return;
+}
+
+/*
+ * Insert a breakpoint into memory.
+ */
+boolean_t kttd_insert_breakpoint(vm_address_t address,
+ ttd_saved_inst *saved_inst)
+{
+ /*
+ * Saved old memory data:
+ */
+ *saved_inst = *(unsigned char *)address;
+
+ /*
+ * Put in a Breakpoint:
+ */
+ *(unsigned char *)address = I386_BREAKPOINT;
+
+ return TRUE;
+}
+
+/*
+ * Remove breakpoint from memory.
+ */
+boolean_t kttd_remove_breakpoint(vm_address_t address,
+ ttd_saved_inst saved_inst)
+{
+ /*
+ * replace it:
+ */
+ *(unsigned char *)address = (saved_inst & 0xff);
+
+ return TRUE;
+}
+
+/*
+ * Set single stepping mode. Assumes that program counter is set
+ * to the location where single stepping is to begin. The 386 is
+ * an easy single stepping machine, ie. built into the processor.
+ */
+boolean_t kttd_set_machine_single_step(void)
+{
+ /* Turn on Single Stepping */
+ kttd_regs.efl |= EFL_TF;
+
+ return TRUE;
+}
+
+/*
+ * Clear single stepping mode.
+ */
+boolean_t kttd_clear_machine_single_step(void)
+{
+ /* Turn off the trace flag */
+ kttd_regs.efl &= ~EFL_TF;
+
+ return TRUE;
+}
+
+
+/*
+ * kttd_type_to_ttdtrap:
+ *
+ * Fills in the task and thread info structures with the reason
+ * for entering the Teledebugger (bp, single step, pg flt, etc.)
+ *
+ */
+void kttd_type_to_ttdtrap(int type)
+{
+ /* XXX Fill this in sometime for i386 */
+}
+
+/*
+ * kttd_trap:
+ *
+ * This routine is called from the trap or interrupt handler when a
+ * breakpoint instruction is encountered or a single step operation
+ * completes. The argument is a pointer to a machine dependent
+ * saved_state structure that was built on the interrupt or kernel stack.
+ *
+ */
+boolean_t kttd_trap(int type, int code, struct i386_saved_state *regs)
+{
+ int s;
+
+ if (kttd_debug)
+ printf("kttd_TRAP, before splhigh()\n");
+
+ /*
+ * TTD isn't supported by the driver.
+ *
+ * Try to switch off to kdb if it is resident.
+ * Otherwise just hang (this might be panic).
+ *
+ * Check to make sure that TTD is supported.
+ * (Both by the machine's driver's, and bootp if using ether).
+ */
+ if (!kttd_supported()) {
+ kttd_enabled = FALSE;
+ return FALSE;
+ }
+
+ s = splhigh();
+
+ /*
+ * We are already in TTD!
+ */
+ if (++kttd_active > MAX_KTTD_ACTIVE) {
+ printf("kttd_trap: RE-ENTERED!!!\n");
+ }
+
+ if (kttd_debug)
+ printf("kttd_TRAP, after splhigh()\n");
+
+ /* Should switch to kttd's own stack here. */
+
+ kttd_regs = *regs;
+
+ if ((regs->cs & 0x3) == 0) {
+ /*
+ * Kernel mode - esp and ss not saved
+ */
+ kttd_regs.uesp = (int)&regs->uesp; /* kernel stack pointer */
+ kttd_regs.ss = KERNEL_DS;
+ }
+
+ /*
+ * If this was not entered via an interrupt (type != -1)
+ * then we've entered via a bpt, single, etc. and must
+ * set the globals.
+ *
+ * Setup the kttd globals for entry....
+ */
+ if (type != -1) {
+ kttd_current_request = NULL;
+ kttd_current_length = 0;
+ kttd_current_kmsg = NULL;
+ kttd_run_status = FULL_STOP;
+ }else{
+ /*
+ * We know that we can only get here if we did a kttd_intr
+ * since it's the way that we are called with type -1 (via
+ * the trampoline), so we don't have to worry about entering
+ * from Cntl-Alt-D like the mips does.
+ */
+ /*
+ * Perform sanity check!
+ */
+ if ((kttd_current_request == NULL) ||
+ (kttd_current_length == 0) ||
+ (kttd_current_kmsg == NULL) ||
+ (kttd_run_status != ONE_STOP)) {
+
+ printf("kttd_trap: INSANITY!!!\n");
+ }
+ }
+
+ kttd_task_trap(type, code, (regs->cs & 0x3) != 0);
+
+ regs->eip = kttd_regs.eip;
+ regs->efl = kttd_regs.efl;
+ regs->eax = kttd_regs.eax;
+ regs->ecx = kttd_regs.ecx;
+ regs->edx = kttd_regs.edx;
+ regs->ebx = kttd_regs.ebx;
+ if (regs->cs & 0x3) {
+ /*
+ * user mode - saved esp and ss valid
+ */
+ regs->uesp = kttd_regs.uesp; /* user stack pointer */
+ regs->ss = kttd_regs.ss & 0xffff; /* user stack segment */
+ }
+ regs->ebp = kttd_regs.ebp;
+ regs->esi = kttd_regs.esi;
+ regs->edi = kttd_regs.edi;
+ regs->es = kttd_regs.es & 0xffff;
+ regs->cs = kttd_regs.cs & 0xffff;
+ regs->ds = kttd_regs.ds & 0xffff;
+ regs->fs = kttd_regs.fs & 0xffff;
+ regs->gs = kttd_regs.gs & 0xffff;
+
+ if (--kttd_active < MIN_KTTD_ACTIVE)
+ printf("ttd_trap: kttd_active < 0\n");
+
+ if (kttd_debug) {
+ printf("Leaving kttd_trap, kttd_active = %d\n", kttd_active);
+ }
+
+ /*
+ * Only reset this if we entered kttd_trap via an async trampoline.
+ */
+ if (type == -1) {
+ if (kttd_run_status == RUNNING)
+ printf("kttd_trap: $$$$$ run_status already RUNNING! $$$$$\n");
+ kttd_run_status = RUNNING;
+ }
+
+ /* Is this right? XXX */
+ kttd_run_status = RUNNING;
+
+ (void) splx(s);
+
+ /*
+ * Return true, that yes we handled the trap.
+ */
+ return TRUE;
+}
+
+/*
+ * Enter KTTD through a network packet trap.
+ * We show the registers as of the network interrupt
+ * instead of those at its call to KDB.
+ */
+struct int_regs {
+ int gs;
+ int fs;
+ int edi;
+ int esi;
+ int ebp;
+ int ebx;
+ struct i386_interrupt_state *is;
+};
+
+void
+kttd_netentry(int_regs)
+ struct int_regs *int_regs;
+{
+ struct i386_interrupt_state *is = int_regs->is;
+ int s;
+
+ if (kttd_debug)
+ printf("kttd_NETENTRY before slphigh()\n");
+
+ s = splhigh();
+
+ if (kttd_debug)
+ printf("kttd_NETENTRY after slphigh()\n");
+
+ if (is->cs & 0x3) {
+ /*
+ * Interrupted from User Space
+ */
+ kttd_regs.uesp = ((int *)(is+1))[0];
+ kttd_regs.ss = ((int *)(is+1))[1];
+ }
+ else {
+ /*
+ * Interrupted from Kernel Space
+ */
+ kttd_regs.ss = KERNEL_DS;
+ kttd_regs.uesp= (int)(is+1);
+ }
+ kttd_regs.efl = is->efl;
+ kttd_regs.cs = is->cs;
+ kttd_regs.eip = is->eip;
+ kttd_regs.eax = is->eax;
+ kttd_regs.ecx = is->ecx;
+ kttd_regs.edx = is->edx;
+ kttd_regs.ebx = int_regs->ebx;
+ kttd_regs.ebp = int_regs->ebp;
+ kttd_regs.esi = int_regs->esi;
+ kttd_regs.edi = int_regs->edi;
+ kttd_regs.ds = is->ds;
+ kttd_regs.es = is->es;
+ kttd_regs.fs = int_regs->fs;
+ kttd_regs.gs = int_regs->gs;
+
+ kttd_active++;
+ kttd_task_trap(-1, 0, (kttd_regs.cs & 0x3) != 0);
+ kttd_active--;
+
+ if (kttd_regs.cs & 0x3) {
+ ((int *)(is+1))[0] = kttd_regs.uesp;
+ ((int *)(is+1))[1] = kttd_regs.ss & 0xffff;
+ }
+ is->efl = kttd_regs.efl;
+ is->cs = kttd_regs.cs & 0xffff;
+ is->eip = kttd_regs.eip;
+ is->eax = kttd_regs.eax;
+ is->ecx = kttd_regs.ecx;
+ is->edx = kttd_regs.edx;
+ int_regs->ebx = kttd_regs.ebx;
+ int_regs->ebp = kttd_regs.ebp;
+ int_regs->esi = kttd_regs.esi;
+ int_regs->edi = kttd_regs.edi;
+ is->ds = kttd_regs.ds & 0xffff;
+ is->es = kttd_regs.es & 0xffff;
+ int_regs->fs = kttd_regs.fs & 0xffff;
+ int_regs->gs = kttd_regs.gs & 0xffff;
+
+ if (kttd_run_status == RUNNING)
+ printf("kttd_netentry: %%%%% run_status already RUNNING! %%%%%\n");
+ kttd_run_status = RUNNING;
+
+ (void) splx(s);
+}
+
+#endif MACH_TTD
diff --git a/i386/i386/kttd_machdep.h b/i386/i386/kttd_machdep.h
new file mode 100644
index 00000000..8ac7de18
--- /dev/null
+++ b/i386/i386/kttd_machdep.h
@@ -0,0 +1,59 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KTTD_MACHDEP_H_
+#define _KTTD_MACHDEP_H_
+
+#define MAX_KTTD_ACTIVE 2
+#define MIN_KTTD_ACTIVE 0
+
+/*
+ * Register state for gdb
+ */
+struct i386_gdb_register_state {
+ int eax;
+ int ecx;
+ int edx;
+ int ebx;
+ int esp; /* 4 */
+ int ebp; /* 5 */
+ int esi;
+ int edi;
+ int eip; /* 8 */
+ int efl; /* 9 */
+ int cs;
+ int ss;
+ int ds;
+ int es;
+ int fs;
+ int gs;
+};
+
+typedef struct i386_gdb_register_state ttd_machine_state;
+
+typedef unsigned long ttd_saved_inst;
+
+#endif /* _KTTD_MACHDEP_H_ */
diff --git a/i386/i386/ldt.c b/i386/i386/ldt.c
new file mode 100644
index 00000000..a2b125ce
--- /dev/null
+++ b/i386/i386/ldt.c
@@ -0,0 +1,64 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * "Local" descriptor table. At the moment, all tasks use the
+ * same LDT.
+ */
+#include <mach/machine/vm_types.h>
+
+#include "vm_param.h"
+#include "seg.h"
+#include "gdt.h"
+#include "ldt.h"
+
+extern int syscall();
+
+struct real_descriptor ldt[LDTSZ];
+
+void
+ldt_init()
+{
+ /* Initialize the master LDT descriptor in the GDT. */
+ fill_gdt_descriptor(KERNEL_LDT,
+ kvtolin(&ldt), sizeof(ldt)-1,
+ ACC_PL_K|ACC_LDT, 0);
+
+ /* Initialize the LDT descriptors. */
+ fill_ldt_gate(USER_SCALL,
+ (vm_offset_t)&syscall, KERNEL_CS,
+ ACC_PL_U|ACC_CALL_GATE, 0);
+ fill_ldt_descriptor(USER_CS,
+ VM_MIN_ADDRESS, VM_MAX_ADDRESS-VM_MIN_ADDRESS,
+ /* XXX LINEAR_... */
+ ACC_PL_U|ACC_CODE_R, SZ_32);
+ fill_ldt_descriptor(USER_DS,
+ VM_MIN_ADDRESS, VM_MAX_ADDRESS-VM_MIN_ADDRESS,
+ ACC_PL_U|ACC_DATA_W, SZ_32);
+
+ /* Activate the LDT. */
+ lldt(KERNEL_LDT);
+}
+
diff --git a/i386/i386/ldt.h b/i386/i386/ldt.h
new file mode 100644
index 00000000..da0b0af3
--- /dev/null
+++ b/i386/i386/ldt.h
@@ -0,0 +1,66 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON, IBM, AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON, IBM, AND CSL DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * This file describes the standard LDT provided by default
+ * to all user-level Mach tasks.
+ */
+#ifndef _I386_LDT_
+#define _I386_LDT_
+
+#include "seg.h"
+
+/*
+ * User descriptors for Mach - 32-bit flat address space
+ */
+#define USER_SCALL 0x07 /* system call gate */
+#define USER_CS 0x17 /* user code segment */
+#define USER_DS 0x1f /* user data segment */
+
+#define LDTSZ 4
+
+
+#ifndef ASSEMBLER
+
+extern struct real_descriptor ldt[LDTSZ];
+
+/* Fill a segment descriptor in the LDT. */
+#define fill_ldt_descriptor(selector, base, limit, access, sizebits) \
+ fill_descriptor(&ldt[selector/8], base, limit, access, sizebits)
+
+#define fill_ldt_gate(selector, offset, dest_selector, access, word_count) \
+ fill_gate((struct real_gate*)&ldt[selector/8], \
+ offset, dest_selector, access, word_count)
+
+#endif !ASSEMBLER
+
+#endif _I386_LDT_
diff --git a/i386/i386/lock.h b/i386/i386/lock.h
new file mode 100644
index 00000000..053a3ea6
--- /dev/null
+++ b/i386/i386/lock.h
@@ -0,0 +1,130 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Machine-dependent simple locks for the i386.
+ */
+#ifndef _I386_LOCK_H_
+#define _I386_LOCK_H_
+
+#if NCPUS > 1
+
+/*
+ * All of the locking routines are built from calls on
+ * a locked-exchange operation. Values of the lock are
+ * 0 for unlocked, 1 for locked.
+ */
+
+#ifdef __GNUC__
+
+/*
+ * The code here depends on the GNU C compiler.
+ */
+
+#define _simple_lock_xchg_(lock, new_val) \
+ ({ register int _old_val_; \
+ asm volatile("xchgl %0, %2" \
+ : "=r" (_old_val_) \
+ : "0" (new_val), "m" (*(lock)) \
+ ); \
+ _old_val_; \
+ })
+
+#define simple_lock_init(l) \
+ ((l)->lock_data = 0)
+
+#define simple_lock(l) \
+ ({ \
+ while(_simple_lock_xchg_(l, 1)) \
+ while (*(volatile int *)&(l)->lock_data) \
+ continue; \
+ 0; \
+ })
+
+#define simple_unlock(l) \
+ (_simple_lock_xchg_(l, 0))
+
+#define simple_lock_try(l) \
+ (!_simple_lock_xchg_(l, 1))
+
+/*
+ * General bit-lock routines.
+ */
+#define bit_lock(bit, l) \
+ ({ \
+ asm volatile(" jmp 1f \n\
+ 0: btl %0, %1 \n\
+ jb 0b \n\
+ 1: lock \n\
+ btsl %0, %1 \n\
+ jb 0b" \
+ : \
+ : "r" (bit), "m" (*(volatile int *)(l))); \
+ 0; \
+ })
+
+#define bit_unlock(bit, l) \
+ ({ \
+ asm volatile(" lock \n\
+ btrl %0, %1" \
+ : \
+ : "r" (bit), "m" (*(volatile int *)(l))); \
+ 0; \
+ })
+
+/*
+ * Set or clear individual bits in a long word.
+ * The locked access is needed only to lock access
+ * to the word, not to individual bits.
+ */
+#define i_bit_set(bit, l) \
+ ({ \
+ asm volatile(" lock \n\
+ btsl %0, %1" \
+ : \
+ : "r" (bit), "m" (*(l)) ); \
+ 0; \
+ })
+
+#define i_bit_clear(bit, l) \
+ ({ \
+ asm volatile(" lock \n\
+ btrl %0, %1" \
+ : \
+ : "r" (bit), "m" (*(l)) ); \
+ 0; \
+ })
+
+#endif /* __GNUC__ */
+
+extern void simple_lock_pause();
+
+#endif NCPUS > 1
+
+
+#include_next "lock.h"
+
+
+#endif /* _I386_LOCK_H_ */
diff --git a/i386/i386/locore.S b/i386/i386/locore.S
new file mode 100644
index 00000000..8bc5d5e0
--- /dev/null
+++ b/i386/i386/locore.S
@@ -0,0 +1,1726 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the nema IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <cpus.h>
+#include <platforms.h>
+#include <mach_kdb.h>
+#include <mach_ttd.h>
+#include <stat_time.h>
+
+#include <mach/machine/asm.h>
+#include <mach/machine/eflags.h>
+#include "proc_reg.h"
+#include "trap.h"
+#include "seg.h"
+#include "ldt.h"
+#include "i386asm.h"
+#include "cpu_number.h"
+
+/*
+ * Fault recovery.
+ */
+#define RECOVER_TABLE_START \
+ .text 2 ;\
+DATA(recover_table) ;\
+ .text
+
+#define RECOVER(addr) \
+ .text 2 ;\
+ .long 9f ;\
+ .long addr ;\
+ .text ;\
+9:
+
+#define RECOVER_TABLE_END \
+ .text 2 ;\
+ .globl EXT(recover_table_end) ;\
+LEXT(recover_table_end) ;\
+ .text
+
+/*
+ * Retry table for certain successful faults.
+ */
+#define RETRY_TABLE_START \
+ .text 3 ;\
+DATA(retry_table) ;\
+ .text
+
+#define RETRY(addr) \
+ .text 3 ;\
+ .long 9f ;\
+ .long addr ;\
+ .text ;\
+9:
+
+#define RETRY_TABLE_END \
+ .text 3 ;\
+ .globl EXT(retry_table_end) ;\
+LEXT(retry_table_end) ;\
+ .text
+
+/*
+ * Allocate recovery and retry tables.
+ */
+ RECOVER_TABLE_START
+ RETRY_TABLE_START
+
+/*
+ * Timing routines.
+ */
+#if STAT_TIME
+
+#define TIME_TRAP_UENTRY
+#define TIME_TRAP_SENTRY
+#define TIME_TRAP_UEXIT
+#define TIME_INT_ENTRY
+#define TIME_INT_EXIT
+
+#else /* microsecond timing */
+
+/*
+ * Microsecond timing.
+ * Assumes a free-running microsecond counter.
+ * no TIMER_MAX check needed.
+ */
+
+/*
+ * There is only one current time-stamp per CPU, since only
+ * the time-stamp in the current timer is used.
+ * To save time, we allocate the current time-stamps here.
+ */
+ .comm _current_tstamp, 4*NCPUS
+
+/*
+ * Update time on user trap entry.
+ * 11 instructions (including cli on entry)
+ * Assumes CPU number in %edx.
+ * Uses %eax, %ebx, %ecx.
+ */
+#define TIME_TRAP_UENTRY \
+ cli /* block interrupts */ ;\
+ movl VA_ETC,%ebx /* get timer value */ ;\
+ movl CX(_current_tstamp,%edx),%ecx /* get old time stamp */;\
+ movl %ebx,CX(_current_tstamp,%edx) /* set new time stamp */;\
+ subl %ecx,%ebx /* elapsed = new-old */ ;\
+ movl CX(_current_timer,%edx),%ecx /* get current timer */ ;\
+ addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
+ jns 0f /* if overflow, */ ;\
+ call timer_normalize /* normalize timer */ ;\
+0: addl $(TH_SYS_TIMER-TH_USER_TIMER),%ecx ;\
+ /* switch to sys timer */;\
+ movl %ecx,CX(_current_timer,%edx) /* make it current */ ;\
+ sti /* allow interrupts */
+
+/*
+ * Update time on system call entry.
+ * 11 instructions (including cli on entry)
+ * Assumes CPU number in %edx.
+ * Uses %ebx, %ecx.
+ * Same as TIME_TRAP_UENTRY, but preserves %eax.
+ */
+#define TIME_TRAP_SENTRY \
+ cli /* block interrupts */ ;\
+ movl VA_ETC,%ebx /* get timer value */ ;\
+ movl CX(_current_tstamp,%edx),%ecx /* get old time stamp */;\
+ movl %ebx,CX(_current_tstamp,%edx) /* set new time stamp */;\
+ subl %ecx,%ebx /* elapsed = new-old */ ;\
+ movl CX(_current_timer,%edx),%ecx /* get current timer */ ;\
+ addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
+ jns 0f /* if overflow, */ ;\
+ pushl %eax /* save %eax */ ;\
+ call timer_normalize /* normalize timer */ ;\
+ popl %eax /* restore %eax */ ;\
+0: addl $(TH_SYS_TIMER-TH_USER_TIMER),%ecx ;\
+ /* switch to sys timer */;\
+ movl %ecx,CX(_current_timer,%edx) /* make it current */ ;\
+ sti /* allow interrupts */
+
+/*
+ * update time on user trap exit.
+ * 10 instructions.
+ * Assumes CPU number in %edx.
+ * Uses %ebx, %ecx.
+ */
+#define TIME_TRAP_UEXIT \
+ cli /* block interrupts */ ;\
+ movl VA_ETC,%ebx /* get timer */ ;\
+ movl CX(_current_tstamp,%edx),%ecx /* get old time stamp */;\
+ movl %ebx,CX(_current_tstamp,%edx) /* set new time stamp */;\
+ subl %ecx,%ebx /* elapsed = new-old */ ;\
+ movl CX(_current_timer,%edx),%ecx /* get current timer */ ;\
+ addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
+ jns 0f /* if overflow, */ ;\
+ call timer_normalize /* normalize timer */ ;\
+0: addl $(TH_USER_TIMER-TH_SYS_TIMER),%ecx ;\
+ /* switch to user timer */;\
+ movl %ecx,CX(_current_timer,%edx) /* make it current */
+
+/*
+ * update time on interrupt entry.
+ * 9 instructions.
+ * Assumes CPU number in %edx.
+ * Leaves old timer in %ebx.
+ * Uses %ecx.
+ */
+#define TIME_INT_ENTRY \
+ movl VA_ETC,%ecx /* get timer */ ;\
+ movl CX(_current_tstamp,%edx),%ebx /* get old time stamp */;\
+ movl %ecx,CX(_current_tstamp,%edx) /* set new time stamp */;\
+ subl %ebx,%ecx /* elapsed = new-old */ ;\
+ movl CX(_current_timer,%edx),%ebx /* get current timer */ ;\
+ addl %ecx,LOW_BITS(%ebx) /* add to low bits */ ;\
+ leal CX(0,%edx),%ecx /* timer is 16 bytes */ ;\
+ lea CX(_kernel_timer,%edx),%ecx /* get interrupt timer*/;\
+ movl %ecx,CX(_current_timer,%edx) /* set timer
+
+/*
+ * update time on interrupt exit.
+ * 11 instructions
+ * Assumes CPU number in %edx, old timer in %ebx.
+ * Uses %eax, %ecx.
+ */
+#define TIME_INT_EXIT \
+ movl VA_ETC,%eax /* get timer */ ;\
+ movl CX(_current_tstamp,%edx),%ecx /* get old time stamp */;\
+ movl %eax,CX(_current_tstamp,%edx) /* set new time stamp */;\
+ subl %ecx,%eax /* elapsed = new-old */ ;\
+ movl CX(_current_timer,%edx),%ecx /* get current timer */ ;\
+ addl %eax,LOW_BITS(%ecx) /* add to low bits */ ;\
+ jns 0f /* if overflow, */ ;\
+ call timer_normalize /* normalize timer */ ;\
+0: testb $0x80,LOW_BITS+3(%ebx) /* old timer overflow? */;\
+ jz 0f /* if overflow, */ ;\
+ movl %ebx,%ecx /* get old timer */ ;\
+ call timer_normalize /* normalize timer */ ;\
+0: movl %ebx,CX(_current_timer,%edx) /* set timer */
+
+
+/*
+ * Normalize timer in ecx.
+ * Preserves edx; clobbers eax.
+ */
+ .align 2
+timer_high_unit:
+ .long TIMER_HIGH_UNIT /* div has no immediate opnd */
+
+timer_normalize:
+ pushl %edx /* save register */
+ xorl %edx,%edx /* clear divisor high */
+ movl LOW_BITS(%ecx),%eax /* get divisor low */
+ divl timer_high_unit,%eax /* quotient in eax */
+ /* remainder in edx */
+ addl %eax,HIGH_BITS_CHECK(%ecx) /* add high_inc to check */
+ movl %edx,LOW_BITS(%ecx) /* remainder to low_bits */
+ addl %eax,HIGH_BITS(%ecx) /* add high_inc to high bits */
+ popl %edx /* restore register */
+ ret
+
+/*
+ * Switch to a new timer.
+ */
+ENTRY(timer_switch)
+ CPU_NUMBER(%edx) /* get this CPU */
+ movl VA_ETC,%ecx /* get timer */
+ movl CX(_current_tstamp,%edx),%eax /* get old time stamp */
+ movl %ecx,CX(_current_tstamp,%edx) /* set new time stamp */
+ subl %ecx,%eax /* elapsed = new - old */
+ movl CX(_current_timer,%edx),%ecx /* get current timer */
+ addl %eax,LOW_BITS(%ecx) /* add to low bits */
+ jns 0f /* if overflow, */
+ call timer_normalize /* normalize timer */
+0:
+ movl S_ARG0,%ecx /* get new timer */
+ movl %ecx,CX(_current_timer,%edx) /* set timer */
+ ret
+
+/*
+ * Initialize the first timer for a CPU.
+ */
+ENTRY(start_timer)
+ CPU_NUMBER(%edx) /* get this CPU */
+ movl VA_ETC,%ecx /* get timer */
+ movl %ecx,CX(_current_tstamp,%edx) /* set initial time stamp */
+ movl S_ARG0,%ecx /* get timer */
+ movl %ecx,CX(_current_timer,%edx) /* set initial timer */
+ ret
+
+#endif /* accurate timing */
+
+/* */
+
+/*
+ * Trap/interrupt entry points.
+ *
+ * All traps must create the following save area on the kernel stack:
+ *
+ * gs
+ * fs
+ * es
+ * ds
+ * edi
+ * esi
+ * ebp
+ * cr2 if page fault - otherwise unused
+ * ebx
+ * edx
+ * ecx
+ * eax
+ * trap number
+ * error code
+ * eip
+ * cs
+ * eflags
+ * user esp - if from user
+ * user ss - if from user
+ * es - if from V86 thread
+ * ds - if from V86 thread
+ * fs - if from V86 thread
+ * gs - if from V86 thread
+ *
+ */
+
+/*
+ * General protection or segment-not-present fault.
+ * Check for a GP/NP fault in the kernel_return
+ * sequence; if there, report it as a GP/NP fault on the user's instruction.
+ *
+ * esp-> 0: trap code (NP or GP)
+ * 4: segment number in error
+ * 8 eip
+ * 12 cs
+ * 16 eflags
+ * 20 old registers (trap is from kernel)
+ */
+ENTRY(t_gen_prot)
+ pushl $(T_GENERAL_PROTECTION) /* indicate fault type */
+ jmp trap_check_kernel_exit /* check for kernel exit sequence */
+
+ENTRY(t_segnp)
+ pushl $(T_SEGMENT_NOT_PRESENT)
+ /* indicate fault type */
+
+trap_check_kernel_exit:
+ testl $(EFL_VM),16(%esp) /* is trap from V86 mode? */
+ jnz EXT(alltraps) /* isn`t kernel trap if so */
+ testl $3,12(%esp) /* is trap from kernel mode? */
+ jne EXT(alltraps) /* if so: */
+ /* check for the kernel exit sequence */
+ cmpl $_kret_iret,8(%esp) /* on IRET? */
+ je fault_iret
+ cmpl $_kret_popl_ds,8(%esp) /* popping DS? */
+ je fault_popl_ds
+ cmpl $_kret_popl_es,8(%esp) /* popping ES? */
+ je fault_popl_es
+ cmpl $_kret_popl_fs,8(%esp) /* popping FS? */
+ je fault_popl_fs
+ cmpl $_kret_popl_gs,8(%esp) /* popping GS? */
+ je fault_popl_gs
+take_fault: /* if none of the above: */
+ jmp EXT(alltraps) /* treat as normal trap. */
+
+/*
+ * GP/NP fault on IRET: CS or SS is in error.
+ * All registers contain the user's values.
+ *
+ * on SP is
+ * 0 trap number
+ * 4 errcode
+ * 8 eip
+ * 12 cs --> trapno
+ * 16 efl --> errcode
+ * 20 user eip
+ * 24 user cs
+ * 28 user eflags
+ * 32 user esp
+ * 36 user ss
+ */
+fault_iret:
+ movl %eax,8(%esp) /* save eax (we don`t need saved eip) */
+ popl %eax /* get trap number */
+ movl %eax,12-4(%esp) /* put in user trap number */
+ popl %eax /* get error code */
+ movl %eax,16-8(%esp) /* put in user errcode */
+ popl %eax /* restore eax */
+ jmp EXT(alltraps) /* take fault */
+
+/*
+ * Fault restoring a segment register. The user's registers are still
+ * saved on the stack. The offending segment register has not been
+ * popped.
+ */
+fault_popl_ds:
+ popl %eax /* get trap number */
+ popl %edx /* get error code */
+ addl $12,%esp /* pop stack to user regs */
+ jmp push_es /* (DS on top of stack) */
+fault_popl_es:
+ popl %eax /* get trap number */
+ popl %edx /* get error code */
+ addl $12,%esp /* pop stack to user regs */
+ jmp push_fs /* (ES on top of stack) */
+fault_popl_fs:
+ popl %eax /* get trap number */
+ popl %edx /* get error code */
+ addl $12,%esp /* pop stack to user regs */
+ jmp push_gs /* (FS on top of stack) */
+fault_popl_gs:
+ popl %eax /* get trap number */
+ popl %edx /* get error code */
+ addl $12,%esp /* pop stack to user regs */
+ jmp push_segregs /* (GS on top of stack) */
+
+push_es:
+ pushl %es /* restore es, */
+push_fs:
+ pushl %fs /* restore fs, */
+push_gs:
+ pushl %gs /* restore gs. */
+push_segregs:
+ movl %eax,R_TRAPNO(%esp) /* set trap number */
+ movl %edx,R_ERR(%esp) /* set error code */
+ jmp trap_set_segs /* take trap */
+
+/*
+ * Debug trap. Check for single-stepping across system call into
+ * kernel. If this is the case, taking the debug trap has turned
+ * off single-stepping - save the flags register with the trace
+ * bit set.
+ */
+ENTRY(t_debug)
+ testl $(EFL_VM),8(%esp) /* is trap from V86 mode? */
+ jnz 0f /* isn`t kernel trap if so */
+ testl $3,4(%esp) /* is trap from kernel mode? */
+ jnz 0f /* if so: */
+ cmpl $syscall_entry,(%esp) /* system call entry? */
+ jne 0f /* if so: */
+ /* flags are sitting where syscall */
+ /* wants them */
+ addl $8,%esp /* remove eip/cs */
+ jmp syscall_entry_2 /* continue system call entry */
+
+0: pushl $0 /* otherwise: */
+ pushl $(T_DEBUG) /* handle as normal */
+ jmp EXT(alltraps) /* debug fault */
+
+/*
+ * Page fault traps save cr2.
+ */
+ENTRY(t_page_fault)
+ pushl $(T_PAGE_FAULT) /* mark a page fault trap */
+ pusha /* save the general registers */
+ movl %cr2,%eax /* get the faulting address */
+ movl %eax,12(%esp) /* save in esp save slot */
+ jmp trap_push_segs /* continue fault */
+
+/*
+ * All 'exceptions' enter here with:
+ * esp-> trap number
+ * error code
+ * old eip
+ * old cs
+ * old eflags
+ * old esp if trapped from user
+ * old ss if trapped from user
+ */
+ENTRY(alltraps)
+ pusha /* save the general registers */
+trap_push_segs:
+ pushl %ds /* and the segment registers */
+ pushl %es
+ pushl %fs
+ pushl %gs
+
+ /* Note that we have to load the segment registers
+ even if this is a trap from the kernel,
+ because the kernel uses user segment registers for copyin/copyout.
+ (XXX Would it be smarter just to use fs or gs for that?) */
+ mov %ss,%ax /* switch to kernel data segment */
+ mov %ax,%ds /* (same as kernel stack segment) */
+ mov %ax,%es
+
+trap_set_segs:
+ cld /* clear direction flag */
+ testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
+ jnz trap_from_user /* user mode trap if so */
+ testb $3,R_CS(%esp) /* user mode trap? */
+ jz trap_from_kernel /* kernel trap if not */
+trap_from_user:
+
+ CPU_NUMBER(%edx)
+ TIME_TRAP_UENTRY
+
+ movl CX(EXT(kernel_stack),%edx),%ebx
+ xchgl %ebx,%esp /* switch to kernel stack */
+ /* user regs pointer already set */
+_take_trap:
+ pushl %ebx /* pass register save area to trap */
+ call EXT(user_trap) /* call user trap routine */
+ movl 4(%esp),%esp /* switch back to PCB stack */
+
+ orl %eax,%eax /* emulated syscall? */
+ jz _return_from_trap /* no, just return */
+ movl R_EAX(%ebx),%eax /* yes, get syscall number */
+ jmp syscall_entry_3 /* and emulate it */
+
+/*
+ * Return from trap or system call, checking for ASTs.
+ * On PCB stack.
+ */
+
+_return_from_trap:
+ CPU_NUMBER(%edx)
+ cmpl $0,CX(EXT(need_ast),%edx)
+ jz _return_to_user /* if we need an AST: */
+
+ movl CX(EXT(kernel_stack),%edx),%esp
+ /* switch to kernel stack */
+ call EXT(i386_astintr) /* take the AST */
+ popl %esp /* switch back to PCB stack */
+ jmp _return_from_trap /* and check again (rare) */
+ /* ASTs after this point will */
+ /* have to wait */
+
+_return_to_user:
+ TIME_TRAP_UEXIT
+
+/*
+ * Return from kernel mode to interrupted thread.
+ */
+
+_return_from_kernel:
+_kret_popl_gs:
+ popl %gs /* restore segment registers */
+_kret_popl_fs:
+ popl %fs
+_kret_popl_es:
+ popl %es
+_kret_popl_ds:
+ popl %ds
+ popa /* restore general registers */
+ addl $8,%esp /* discard trap number and error code */
+_kret_iret:
+ iret /* return from interrupt */
+
+
+/*
+ * Trap from kernel mode. No need to switch stacks.
+ */
+trap_from_kernel:
+#if MACH_KDB || MACH_TTD
+ movl %esp,%ebx /* save current stack */
+
+ cmpl EXT(int_stack_high),%esp /* on an interrupt stack? */
+ jb 1f /* OK if so */
+
+ CPU_NUMBER(%edx) /* get CPU number */
+ cmpl CX(EXT(kernel_stack),%edx),%esp
+ /* already on kernel stack? */
+ ja 0f
+ cmpl CX(EXT(active_stacks),%edx),%esp
+ ja 1f /* switch if not */
+0:
+ movl CX(EXT(kernel_stack),%edx),%esp
+1:
+ pushl %ebx /* save old stack */
+ pushl %ebx /* pass as parameter */
+ call EXT(kernel_trap) /* to kernel trap routine */
+ addl $4,%esp /* pop parameter */
+ popl %esp /* return to old stack */
+#else /* MACH_KDB || MACH_TTD */
+
+ pushl %esp /* pass parameter */
+ call EXT(kernel_trap) /* to kernel trap routine */
+ addl $4,%esp /* pop parameter */
+#endif /* MACH_KDB || MACH_TTD */
+ jmp _return_from_kernel
+
+
+/*
+ * Called as a function, makes the current thread
+ * return from the kernel as if from an exception.
+ */
+
+ENTRY(thread_exception_return)
+ENTRY(thread_bootstrap_return)
+ movl %esp,%ecx /* get kernel stack */
+ or $(KERNEL_STACK_SIZE-1),%ecx
+ movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
+ jmp _return_from_trap
+
+/*
+ * Called as a function, makes the current thread
+ * return from the kernel as if from a syscall.
+ * Takes the syscall's return code as an argument.
+ */
+
+ENTRY(thread_syscall_return)
+ movl S_ARG0,%eax /* get return value */
+ movl %esp,%ecx /* get kernel stack */
+ or $(KERNEL_STACK_SIZE-1),%ecx
+ movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
+ movl %eax,R_EAX(%esp) /* save return value */
+ jmp _return_from_trap
+
+ENTRY(call_continuation)
+ movl S_ARG0,%eax /* get continuation */
+ movl %esp,%ecx /* get kernel stack */
+ or $(KERNEL_STACK_SIZE-1),%ecx
+ addl $(-3-IKS_SIZE),%ecx
+ movl %ecx,%esp /* pop the stack */
+ xorl %ebp,%ebp /* zero frame pointer */
+ jmp *%eax /* goto continuation */
+
+
+
+#define INTERRUPT(n) \
+ .data 2 ;\
+ .long 0f ;\
+ .text ;\
+ P2ALIGN(TEXT_ALIGN) ;\
+0: ;\
+ pushl %eax ;\
+ movl $(n),%eax ;\
+ jmp EXT(all_intrs)
+
+ .data 2
+DATA(int_entry_table)
+ .text
+INTERRUPT(0)
+INTERRUPT(1)
+INTERRUPT(2)
+INTERRUPT(3)
+INTERRUPT(4)
+INTERRUPT(5)
+INTERRUPT(6)
+INTERRUPT(7)
+INTERRUPT(8)
+INTERRUPT(9)
+INTERRUPT(10)
+INTERRUPT(11)
+INTERRUPT(12)
+INTERRUPT(13)
+INTERRUPT(14)
+INTERRUPT(15)
+
+/* XXX handle NMI - at least print a warning like Linux does. */
+
+/*
+ * All interrupts enter here.
+ * old %eax on stack; interrupt number in %eax.
+ */
+ENTRY(all_intrs)
+ pushl %ecx /* save registers */
+ pushl %edx
+ cld /* clear direction flag */
+
+ cmpl %ss:EXT(int_stack_high),%esp /* on an interrupt stack? */
+ jb int_from_intstack /* if not: */
+
+ pushl %ds /* save segment registers */
+ pushl %es
+ mov %ss,%dx /* switch to kernel segments */
+ mov %dx,%ds
+ mov %dx,%es
+
+ CPU_NUMBER(%edx)
+
+ movl CX(EXT(int_stack_top),%edx),%ecx
+ xchgl %ecx,%esp /* switch to interrupt stack */
+
+#if STAT_TIME
+ pushl %ecx /* save pointer to old stack */
+#else
+ pushl %ebx /* save %ebx - out of the way */
+ /* so stack looks the same */
+ pushl %ecx /* save pointer to old stack */
+ TIME_INT_ENTRY /* do timing */
+#endif
+
+ call EXT(interrupt) /* call generic interrupt routine */
+
+ .globl EXT(return_to_iret)
+LEXT(return_to_iret) /* ( label for kdb_kintr and hardclock) */
+
+ CPU_NUMBER(%edx)
+#if STAT_TIME
+#else
+ TIME_INT_EXIT /* do timing */
+ movl 4(%esp),%ebx /* restore the extra reg we saved */
+#endif
+
+ popl %esp /* switch back to old stack */
+
+ testl $(EFL_VM),I_EFL(%esp) /* if in V86 */
+ jnz 0f /* or */
+ testb $3,I_CS(%esp) /* user mode, */
+ jz 1f /* check for ASTs */
+0:
+ cmpl $0,CX(EXT(need_ast),%edx)
+ jnz ast_from_interrupt /* take it if so */
+1:
+ pop %es /* restore segment regs */
+ pop %ds
+ pop %edx
+ pop %ecx
+ pop %eax
+ iret /* return to caller */
+
+int_from_intstack:
+ call EXT(interrupt) /* call interrupt routine */
+_return_to_iret_i: /* ( label for kdb_kintr) */
+ pop %edx /* must have been on kernel segs */
+ pop %ecx
+ pop %eax /* no ASTs */
+ iret
+
+/*
+ * Take an AST from an interrupt.
+ * On PCB stack.
+ * sp-> es -> edx
+ * ds -> ecx
+ * edx -> eax
+ * ecx -> trapno
+ * eax -> code
+ * eip
+ * cs
+ * efl
+ * esp
+ * ss
+ */
+ast_from_interrupt:
+ pop %es /* restore all registers ... */
+ pop %ds
+ popl %edx
+ popl %ecx
+ popl %eax
+ pushl $0 /* zero code */
+ pushl $0 /* zero trap number */
+ pusha /* save general registers */
+ push %ds /* save segment registers */
+ push %es
+ push %fs
+ push %gs
+ mov %ss,%dx /* switch to kernel segments */
+ mov %dx,%ds
+ mov %dx,%es
+
+ CPU_NUMBER(%edx)
+ TIME_TRAP_UENTRY
+
+ movl CX(EXT(kernel_stack),%edx),%esp
+ /* switch to kernel stack */
+ call EXT(i386_astintr) /* take the AST */
+ popl %esp /* back to PCB stack */
+ jmp _return_from_trap /* return */
+
+#if MACH_KDB
+/*
+ * kdb_kintr: enter kdb from keyboard interrupt.
+ * Chase down the stack frames until we find one whose return
+ * address is the interrupt handler. At that point, we have:
+ *
+ * frame-> saved %ebp
+ * return address in interrupt handler
+ * iunit
+ * [ PS2 - saved interrupt number ]
+ * saved SPL
+ * return address == return_to_iret_i
+ * saved %edx
+ * saved %ecx
+ * saved %eax
+ * saved %eip
+ * saved %cs
+ * saved %efl
+ *
+ * OR:
+ * frame-> saved %ebp
+ * return address in interrupt handler
+ * iunit
+ * [ PS2 - saved interrupt number ]
+ * saved SPL
+ * return address == return_to_iret
+ * pointer to save area on old stack
+ * [ saved %ebx, if accurate timing ]
+ *
+ * old stack: saved %es
+ * saved %ds
+ * saved %edx
+ * saved %ecx
+ * saved %eax
+ * saved %eip
+ * saved %cs
+ * saved %efl
+ *
+ * Call kdb, passing it that register save area.
+ */
+
+#ifdef PS2
+#define RET_OFFSET 20
+#else /* not PS2 */
+#define RET_OFFSET 16
+#endif /* PS2 */
+
+ENTRY(kdb_kintr)
+ movl %ebp,%eax /* save caller`s frame pointer */
+ movl $EXT(return_to_iret),%ecx /* interrupt return address 1 */
+ movl $_return_to_iret_i,%edx /* interrupt return address 2 */
+
+0: cmpl RET_OFFSET(%eax),%ecx /* does this frame return to */
+ /* interrupt handler (1)? */
+ je 1f
+ cmpl RET_OFFSET(%eax),%edx /* interrupt handler (2)? */
+ je 2f /* if not: */
+ movl (%eax),%eax /* try next frame */
+ jmp 0b
+
+1: movl $kdb_from_iret,RET_OFFSET(%eax)
+ ret /* returns to kernel/user stack */
+
+2: movl $kdb_from_iret_i,RET_OFFSET(%eax)
+ /* returns to interrupt stack */
+ ret
+
+/*
+ * On return from keyboard interrupt, we will execute
+ * kdb_from_iret_i
+ * if returning to an interrupt on the interrupt stack
+ * kdb_from_iret
+ * if returning to an interrupt on the user or kernel stack
+ */
+kdb_from_iret:
+ /* save regs in known locations */
+#if STAT_TIME
+ pushl %ebx /* caller`s %ebx is in reg */
+#else
+ movl 4(%esp),%eax /* get caller`s %ebx */
+ pushl %eax /* push on stack */
+#endif
+ pushl %ebp
+ pushl %esi
+ pushl %edi
+ push %fs
+ push %gs
+ pushl %esp /* pass regs */
+ call EXT(kdb_kentry) /* to kdb */
+ addl $4,%esp /* pop parameters */
+ pop %gs /* restore registers */
+ pop %fs
+ popl %edi
+ popl %esi
+ popl %ebp
+#if STAT_TIME
+ popl %ebx
+#else
+ popl %eax
+ movl %eax,4(%esp)
+#endif
+ jmp EXT(return_to_iret) /* normal interrupt return */
+
+kdb_from_iret_i: /* on interrupt stack */
+ pop %edx /* restore saved registers */
+ pop %ecx
+ pop %eax
+ pushl $0 /* zero error code */
+ pushl $0 /* zero trap number */
+ pusha /* save general registers */
+ push %ds /* save segment registers */
+ push %es
+ push %fs
+ push %gs
+ pushl %esp /* pass regs, */
+ pushl $0 /* code, */
+ pushl $-1 /* type to kdb */
+ call EXT(kdb_trap)
+ addl $12,%esp /* remove parameters */
+ pop %gs /* restore segment registers */
+ pop %fs
+ pop %es
+ pop %ds
+ popa /* restore general registers */
+ addl $8,%esp
+ iret
+
+#endif MACH_KDB
+
+#if MACH_TTD
+/*
+ * Same code as that above for the keyboard entry into kdb.
+ */
+ENTRY(kttd_intr)
+ movl %ebp,%eax /* save caller`s frame pointer */
+ movl $EXT(return_to_iret),%ecx /* interrupt return address 1 */
+ movl $_return_to_iret_i,%edx /* interrupt return address 2 */
+
+0: cmpl 16(%eax),%ecx /* does this frame return to */
+ /* interrupt handler (1)? */
+ je 1f
+ cmpl 16(%eax),%edx /* interrupt handler (2)? */
+ je 2f /* if not: */
+ movl (%eax),%eax /* try next frame */
+ jmp 0b
+
+1: movl $ttd_from_iret,16(%eax) /* returns to kernel/user stack */
+ ret
+
+2: movl $ttd_from_iret_i,16(%eax)
+ /* returns to interrupt stack */
+ ret
+
+/*
+ * On return from keyboard interrupt, we will execute
+ * ttd_from_iret_i
+ * if returning to an interrupt on the interrupt stack
+ * ttd_from_iret
+ * if returning to an interrupt on the user or kernel stack
+ */
+ttd_from_iret:
+ /* save regs in known locations */
+#if STAT_TIME
+ pushl %ebx /* caller`s %ebx is in reg */
+#else
+ movl 4(%esp),%eax /* get caller`s %ebx */
+ pushl %eax /* push on stack */
+#endif
+ pushl %ebp
+ pushl %esi
+ pushl %edi
+ push %fs
+ push %gs
+ pushl %esp /* pass regs */
+ call _kttd_netentry /* to kdb */
+ addl $4,%esp /* pop parameters */
+ pop %gs /* restore registers */
+ pop %fs
+ popl %edi
+ popl %esi
+ popl %ebp
+#if STAT_TIME
+ popl %ebx
+#else
+ popl %eax
+ movl %eax,4(%esp)
+#endif
+ jmp EXT(return_to_iret) /* normal interrupt return */
+
+ttd_from_iret_i: /* on interrupt stack */
+ pop %edx /* restore saved registers */
+ pop %ecx
+ pop %eax
+ pushl $0 /* zero error code */
+ pushl $0 /* zero trap number */
+ pusha /* save general registers */
+ push %ds /* save segment registers */
+ push %es
+ push %fs
+ push %gs
+ pushl %esp /* pass regs, */
+ pushl $0 /* code, */
+ pushl $-1 /* type to kdb */
+ call _kttd_trap
+ addl $12,%esp /* remove parameters */
+ pop %gs /* restore segment registers */
+ pop %fs
+ pop %es
+ pop %ds
+ popa /* restore general registers */
+ addl $8,%esp
+ iret
+
+#endif /* MACH_TTD */
+
+/*
+ * System call enters through a call gate. Flags are not saved -
+ * we must shuffle stack to look like trap save area.
+ *
+ * esp-> old eip
+ * old cs
+ * old esp
+ * old ss
+ *
+ * eax contains system call number.
+ */
+ENTRY(syscall)
+syscall_entry:
+ pushf /* save flags as soon as possible */
+syscall_entry_2:
+ pushl %eax /* save system call number */
+ pushl $0 /* clear trap number slot */
+
+ pusha /* save the general registers */
+ pushl %ds /* and the segment registers */
+ pushl %es
+ pushl %fs
+ pushl %gs
+
+ mov %ss,%dx /* switch to kernel data segment */
+ mov %dx,%ds
+ mov %dx,%es
+
+/*
+ * Shuffle eflags,eip,cs into proper places
+ */
+
+ movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
+ movl R_CS(%esp),%ecx /* eip is in CS slot */
+ movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
+ movl %ecx,R_EIP(%esp) /* fix eip */
+ movl %edx,R_CS(%esp) /* fix cs */
+ movl %ebx,R_EFLAGS(%esp) /* fix eflags */
+
+ CPU_NUMBER(%edx)
+ TIME_TRAP_SENTRY
+
+ movl CX(EXT(kernel_stack),%edx),%ebx
+ /* get current kernel stack */
+ xchgl %ebx,%esp /* switch stacks - %ebx points to */
+ /* user registers. */
+ /* user regs pointer already set */
+
+/*
+ * Check for MACH or emulated system call
+ */
+syscall_entry_3:
+ movl CX(EXT(active_threads),%edx),%edx
+ /* point to current thread */
+ movl TH_TASK(%edx),%edx /* point to task */
+ movl TASK_EMUL(%edx),%edx /* get emulation vector */
+ orl %edx,%edx /* if none, */
+ je syscall_native /* do native system call */
+ movl %eax,%ecx /* copy system call number */
+ subl DISP_MIN(%edx),%ecx /* get displacement into syscall */
+ /* vector table */
+ jl syscall_native /* too low - native system call */
+ cmpl DISP_COUNT(%edx),%ecx /* check range */
+ jnl syscall_native /* too high - native system call */
+ movl DISP_VECTOR(%edx,%ecx,4),%edx
+ /* get the emulation vector */
+ orl %edx,%edx /* emulated system call if not zero */
+ jnz syscall_emul
+
+/*
+ * Native system call.
+ */
+syscall_native:
+ negl %eax /* get system call number */
+ jl mach_call_range /* out of range if it was positive */
+ cmpl EXT(mach_trap_count),%eax /* check system call table bounds */
+ jg mach_call_range /* error if out of range */
+#if 0 /* debug hack to show the syscall number on the screen */
+ movb %al,%dl
+ shrb $4,%dl
+ orb $0x30,%dl
+ movb $0x0f,%dh
+ movw %dx,0xb800a
+ movb %al,%dl
+ andb $0xf,%dl
+ orb $0x30,%dl
+ movb $0xf,%dh
+ movw %dx,0xb800c
+#endif
+ shll $4,%eax /* manual indexing */
+ movl EXT(mach_trap_table)(%eax),%ecx
+ /* get number of arguments */
+ jecxz mach_call_call /* skip argument copy if none */
+
+ movl R_UESP(%ebx),%esi /* get user stack pointer */
+ lea 4(%esi,%ecx,4),%esi /* skip user return address, */
+ /* and point past last argument */
+ movl $USER_DS,%edx /* use user data segment for accesses */
+ mov %dx,%fs
+ movl %esp,%edx /* save kernel ESP for error recovery */
+
+0: subl $4,%esi
+ RECOVER(mach_call_addr_push)
+ pushl %fs:(%esi) /* push argument on stack */
+ loop 0b /* loop for all arguments */
+
+mach_call_call:
+
+#ifdef DEBUG
+ testb $0xff,EXT(syscall_trace)
+ jz 0f
+ pushl %eax
+ call EXT(syscall_trace_print)
+ /* will return with syscallofs still (or again) in eax */
+ addl $4,%esp
+0:
+#endif DEBUG
+
+ call *EXT(mach_trap_table)+4(%eax)
+ /* call procedure */
+ movl %esp,%ecx /* get kernel stack */
+ or $(KERNEL_STACK_SIZE-1),%ecx
+ movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
+ movl %eax,R_EAX(%esp) /* save return value */
+ jmp _return_from_trap /* return to user */
+
+/*
+ * Address out of range. Change to page fault.
+ * %esi holds failing address.
+ */
+mach_call_addr_push:
+ movl %edx,%esp /* clean parameters from stack */
+mach_call_addr:
+ movl %esi,R_CR2(%ebx) /* set fault address */
+ movl $(T_PAGE_FAULT),R_TRAPNO(%ebx)
+ /* set page-fault trap */
+ movl $(T_PF_USER),R_ERR(%ebx)
+ /* set error code - read user space */
+ jmp _take_trap /* treat as a trap */
+
+/*
+ * System call out of range. Treat as invalid-instruction trap.
+ * (? general protection?)
+ */
+mach_call_range:
+ movl $(T_INVALID_OPCODE),R_TRAPNO(%ebx)
+ /* set invalid-operation trap */
+ movl $0,R_ERR(%ebx) /* clear error code */
+ jmp _take_trap /* treat as a trap */
+
+/*
+ * User space emulation of system calls.
+ * edx - user address to handle syscall
+ *
+ * User stack will become:
+ * uesp-> eflags
+ * eip
+ * eax still contains syscall number.
+ */
+syscall_emul:
+ movl $USER_DS,%edi /* use user data segment for accesses */
+ mov %di,%fs
+
+/* XXX what about write-protected pages? */
+ movl R_UESP(%ebx),%edi /* get user stack pointer */
+ subl $8,%edi /* push space for new arguments */
+ movl R_EFLAGS(%ebx),%eax /* move flags */
+ RECOVER(syscall_addr)
+ movl %eax,%fs:0(%edi) /* to user stack */
+ movl R_EIP(%ebx),%eax /* move eip */
+ RECOVER(syscall_addr)
+ movl %eax,%fs:4(%edi) /* to user stack */
+ movl %edi,R_UESP(%ebx) /* set new user stack pointer */
+ movl %edx,R_EIP(%ebx) /* change return address to trap */
+ movl %ebx,%esp /* back to PCB stack */
+ jmp _return_from_trap /* return to user */
+
+/*
+ * Address error - address is in %edi.
+ */
+syscall_addr:
+ movl %edi,R_CR2(%ebx) /* set fault address */
+ movl $(T_PAGE_FAULT),R_TRAPNO(%ebx)
+ /* set page-fault trap */
+ movl $(T_PF_USER),R_ERR(%ebx)
+ /* set error code - read user space */
+ jmp _take_trap /* treat as a trap */
+
+/* */
+/*
+ * Utility routines.
+ */
+
+/*
+ * Copy from user address space.
+ * arg0: user address
+ * arg1: kernel address
+ * arg2: byte count
+ */
+ENTRY(copyin)
+Entry(copyinmsg)
+ pushl %esi
+ pushl %edi /* save registers */
+
+ movl 8+S_ARG0,%esi /* get user start address */
+ movl 8+S_ARG1,%edi /* get kernel destination address */
+ movl 8+S_ARG2,%edx /* get count */
+
+ movl $USER_DS,%eax /* use user data segment for accesses */
+ mov %ax,%ds
+
+ /*cld*/ /* count up: default mode in all GCC code */
+ movl %edx,%ecx /* move by longwords first */
+ shrl $2,%ecx
+ RECOVER(copyin_fail)
+ rep
+ movsl /* move longwords */
+ movl %edx,%ecx /* now move remaining bytes */
+ andl $3,%ecx
+ RECOVER(copyin_fail)
+ rep
+ movsb
+ xorl %eax,%eax /* return 0 for success */
+
+copyin_ret:
+ mov %ss,%di /* restore DS to kernel segment */
+ mov %di,%ds
+
+ popl %edi /* restore registers */
+ popl %esi
+ ret /* and return */
+
+copyin_fail:
+ movl $1,%eax /* return 1 for failure */
+ jmp copyin_ret /* pop frame and return */
+
+/*
+ * Copy to user address space.
+ * arg0: kernel address
+ * arg1: user address
+ * arg2: byte count
+ */
+ENTRY(copyout)
+Entry(copyoutmsg)
+ pushl %esi
+ pushl %edi /* save registers */
+
+ movl 8+S_ARG0,%esi /* get kernel start address */
+ movl 8+S_ARG1,%edi /* get user start address */
+ movl 8+S_ARG2,%edx /* get count */
+
+ movl $USER_DS,%eax /* use user data segment for accesses */
+ mov %ax,%es
+
+/*
+ * Check whether user address space is writable
+ * before writing to it - hardware is broken.
+ * XXX only have to do this on 386's.
+ */
+copyout_retry:
+ movl %cr3,%ecx /* point to page directory */
+ movl %edi,%eax /* get page directory bits */
+ shrl $(PDESHIFT),%eax /* from user address */
+ movl KERNELBASE(%ecx,%eax,4),%ecx
+ /* get page directory pointer */
+ testl $(PTE_V),%ecx /* present? */
+ jz 0f /* if not, fault is OK */
+ andl $(PTE_PFN),%ecx /* isolate page frame address */
+ movl %edi,%eax /* get page table bits */
+ shrl $(PTESHIFT),%eax
+ andl $(PTEMASK),%eax /* from user address */
+ leal KERNELBASE(%ecx,%eax,4),%ecx
+ /* point to page table entry */
+ movl (%ecx),%eax /* get it */
+ testl $(PTE_V),%eax /* present? */
+ jz 0f /* if not, fault is OK */
+ testl $(PTE_W),%eax /* writable? */
+ jnz 0f /* OK if so */
+/*
+ * Not writable - must fake a fault. Turn off access to the page.
+ */
+ andl $(PTE_INVALID),(%ecx) /* turn off valid bit */
+ movl %cr3,%eax /* invalidate TLB */
+ movl %eax,%cr3
+0:
+
+/*
+ * Copy only what fits on the current destination page.
+ * Check for write-fault again on the next page.
+ */
+ leal NBPG(%edi),%eax /* point to */
+ andl $(-NBPG),%eax /* start of next page */
+ subl %edi,%eax /* get number of bytes to that point */
+ cmpl %edx,%eax /* bigger than count? */
+ jle 1f /* if so, */
+ movl %edx,%eax /* use count */
+1:
+
+ /*cld*/ /* count up: always this way in GCC code */
+ movl %eax,%ecx /* move by longwords first */
+ shrl $2,%ecx
+ RECOVER(copyout_fail)
+ RETRY(copyout_retry)
+ rep
+ movsl
+ movl %eax,%ecx /* now move remaining bytes */
+ andl $3,%ecx
+ RECOVER(copyout_fail)
+ RETRY(copyout_retry)
+ rep
+ movsb /* move */
+ subl %eax,%edx /* decrement count */
+ jg copyout_retry /* restart on next page if not done */
+ xorl %eax,%eax /* return 0 for success */
+
+copyout_ret:
+ mov %ss,%di /* restore ES to kernel segment */
+ mov %di,%es
+
+ popl %edi /* restore registers */
+ popl %esi
+ ret /* and return */
+
+copyout_fail:
+ movl $1,%eax /* return 1 for failure */
+ jmp copyout_ret /* pop frame and return */
+
+/* XXX turn the following stubs into inline functions. */
+
+/*
+ * FPU routines.
+ */
+
+/*
+ * Initialize FPU.
+ */
+ENTRY(_fninit)
+ fninit
+ ret
+
+/*
+ * Read control word
+ */
+ENTRY(_fstcw)
+ pushl %eax /* get stack space */
+ fstcw (%esp)
+ popl %eax
+ ret
+
+/*
+ * Set control word
+ */
+ENTRY(_fldcw)
+ fldcw 4(%esp)
+ ret
+
+/*
+ * Read status word
+ */
+ENTRY(_fnstsw)
+ xor %eax,%eax /* clear high 16 bits of eax */
+ fnstsw %ax /* read FP status */
+ ret
+
+/*
+ * Clear FPU exceptions
+ */
+ENTRY(_fnclex)
+ fnclex
+ ret
+
+/*
+ * Clear task-switched flag.
+ */
+ENTRY(_clts)
+ clts
+ ret
+
+/*
+ * Save complete FPU state. Save error for later.
+ */
+ENTRY(_fpsave)
+ movl 4(%esp),%eax /* get save area pointer */
+ fnsave (%eax) /* save complete state, including */
+ /* errors */
+ ret
+
+/*
+ * Restore FPU state.
+ */
+ENTRY(_fprestore)
+ movl 4(%esp),%eax /* get save area pointer */
+ frstor (%eax) /* restore complete state */
+ ret
+
+/*
+ * Set cr3
+ */
+ENTRY(set_cr3)
+ movl 4(%esp),%eax /* get new cr3 value */
+ movl %eax,%cr3 /* load it */
+ ret
+
+/*
+ * Read cr3
+ */
+ENTRY(get_cr3)
+ movl %cr3,%eax
+ ret
+
+/*
+ * Flush TLB
+ */
+ENTRY(flush_tlb)
+ movl %cr3,%eax /* flush tlb by reloading CR3 */
+ movl %eax,%cr3 /* with itself */
+ ret
+
+/*
+ * Read cr2
+ */
+ENTRY(get_cr2)
+ movl %cr2,%eax
+ ret
+
+/*
+ * Read ldtr
+ */
+ENTRY(get_ldt)
+ xorl %eax,%eax
+ sldt %ax
+ ret
+
+/*
+ * Set ldtr
+ */
+ENTRY(set_ldt)
+ lldt 4(%esp)
+ ret
+
+/*
+ * Read task register.
+ */
+ENTRY(get_tr)
+ xorl %eax,%eax
+ str %ax
+ ret
+
+/*
+ * Set task register. Also clears busy bit of task descriptor.
+ */
+ENTRY(set_tr)
+ movl S_ARG0,%eax /* get task segment number */
+ subl $8,%esp /* push space for SGDT */
+ sgdt 2(%esp) /* store GDT limit and base (linear) */
+ movl 4(%esp),%edx /* address GDT */
+ movb $(ACC_P|ACC_PL_K|ACC_TSS),5(%edx,%eax)
+ /* fix access byte in task descriptor */
+ ltr %ax /* load task register */
+ addl $8,%esp /* clear stack */
+ ret /* and return */
+
+/*
+ * Set task-switched flag.
+ */
+ENTRY(_setts)
+ movl %cr0,%eax /* get cr0 */
+ orl $(CR0_TS),%eax /* or in TS bit */
+ movl %eax,%cr0 /* set cr0 */
+ ret
+
+/*
+ * void outb(unsigned char *io_port,
+ * unsigned char byte)
+ *
+ * Output a byte to an IO port.
+ */
+ENTRY(outb)
+ movl S_ARG0,%edx /* IO port address */
+ movl S_ARG1,%eax /* data to output */
+ outb %al,%dx /* send it out */
+#ifdef iPSC386
+ mull %ecx /* Delay a little to make H/W happy */
+#endif iPSC386
+ ret
+
+/*
+ * unsigned char inb(unsigned char *io_port)
+ *
+ * Input a byte from an IO port.
+ */
+ENTRY(inb)
+ movl S_ARG0,%edx /* IO port address */
+ xor %eax,%eax /* clear high bits of register */
+ inb %dx,%al /* get the byte */
+#ifdef iPSC386
+/ Do a long multiply to delay a little to make H/W happy. Must
+/ save and restore EAX which is used to hold result of multiply
+ pushl %eax
+ mull %ecx
+ popl %eax
+#endif iPSC386
+ ret
+
+/*
+ * void outw(unsigned short *io_port,
+ * unsigned short word)
+ *
+ * Output a word to an IO port.
+ */
+ENTRY(outw)
+ movl S_ARG0,%edx /* IO port address */
+ movl S_ARG1,%eax /* data to output */
+ outw %ax,%dx /* send it out */
+ ret
+
+/*
+ * unsigned short inw(unsigned short *io_port)
+ *
+ * Input a word from an IO port.
+ */
+ENTRY(inw)
+ movl S_ARG0,%edx /* IO port address */
+ xor %eax,%eax /* clear high bits of register */
+ inw %dx,%ax /* get the word */
+ ret
+
+/*
+ * void outl(unsigned int *io_port,
+ * unsigned int byte)
+ *
+ * Output an int to an IO port.
+ */
+ENTRY(outl)
+ movl S_ARG0,%edx /* IO port address */
+ movl S_ARG1,%eax /* data to output */
+ outl %eax,%dx /* send it out */
+ ret
+
+/*
+ * unsigned int inl(unsigned int *io_port)
+ *
+ * Input an int from an IO port.
+ */
+ENTRY(inl)
+ movl S_ARG0,%edx /* IO port address */
+ inl %dx,%eax /* get the int */
+ ret
+
+/*
+ * void loutb(unsigned byte *io_port,
+ * unsigned byte *data,
+ * unsigned int count)
+ *
+ * Output an array of bytes to an IO port.
+ */
+ENTRY(loutb)
+ movl %esi,%eax /* save register */
+ movl S_ARG0,%edx /* get io port number */
+ movl S_ARG1,%esi /* get data address */
+ movl S_ARG2,%ecx /* get count */
+
+ cld /* count up */
+
+ rep
+ outsb /* output */
+
+ movl %eax,%esi /* restore register */
+ ret /* exit */
+
+
+/*
+ * void loutw(unsigned short *io_port,
+ * unsigned short *data,
+ * unsigned int count)
+ *
+ * Output an array of shorts to an IO port.
+ */
+ENTRY(loutw)
+ movl %esi,%eax /* save register */
+ movl S_ARG0,%edx /* get io port number */
+ movl S_ARG1,%esi /* get data address */
+ movl S_ARG2,%ecx /* get count */
+
+ cld /* count up */
+
+ rep
+ outsw /* output */
+
+ movl %eax,%esi /* restore register */
+ ret /* exit */
+
+
+/*
+ * void linb(unsigned char *io_port,
+ * unsigned char *data,
+ * unsigned int count)
+ *
+ * Input an array of bytes from an IO port.
+ */
+ENTRY(linb)
+ movl %edi,%eax /* save register */
+ movl S_ARG0,%edx /* get io port number */
+ movl S_ARG1,%edi /* get data address */
+ movl S_ARG2,%ecx /* get count */
+
+ cld /* count up */
+
+ rep
+ insb /* input */
+
+ movl %eax,%edi /* restore register */
+ ret /* exit */
+
+
+/*
+ * void linw(unsigned short *io_port,
+ * unsigned short *data,
+ * unsigned int count)
+ *
+ * Input an array of shorts from an IO port.
+ */
+ENTRY(linw)
+ movl %edi,%eax /* save register */
+ movl S_ARG0,%edx /* get io port number */
+ movl S_ARG1,%edi /* get data address */
+ movl S_ARG2,%ecx /* get count */
+
+ cld /* count up */
+
+ rep
+ insw /* input */
+
+ movl %eax,%edi /* restore register */
+ ret /* exit */
+
+
+/*
+ * int inst_fetch(int eip, int cs);
+ *
+ * Fetch instruction byte. Return -1 if invalid address.
+ */
+ENTRY(inst_fetch)
+ movl S_ARG1, %eax /* get segment */
+ movw %ax,%fs /* into FS */
+ movl S_ARG0, %eax /* get offset */
+ RETRY(EXT(inst_fetch)) /* re-load FS on retry */
+ RECOVER(_inst_fetch_fault)
+ movzbl %fs:(%eax),%eax /* load instruction byte */
+ ret
+
+_inst_fetch_fault:
+ movl $-1,%eax /* return -1 if error */
+ ret
+
+
+/*
+ * Done with recovery and retry tables.
+ */
+ RECOVER_TABLE_END
+ RETRY_TABLE_END
+
+
+
+ENTRY(dr6)
+ movl %db6, %eax
+ ret
+
+/* dr<i>(address, type, len, persistence)
+ */
+ENTRY(dr0)
+ movl S_ARG0, %eax
+ movl %eax,EXT(dr_addr)
+ movl %eax, %db0
+ movl $0, %ecx
+ jmp 0f
+ENTRY(dr1)
+ movl S_ARG0, %eax
+ movl %eax,EXT(dr_addr)+1*4
+ movl %eax, %db1
+ movl $2, %ecx
+ jmp 0f
+ENTRY(dr2)
+ movl S_ARG0, %eax
+ movl %eax,EXT(dr_addr)+2*4
+ movl %eax, %db2
+ movl $4, %ecx
+ jmp 0f
+
+ENTRY(dr3)
+ movl S_ARG0, %eax
+ movl %eax,EXT(dr_addr)+3*4
+ movl %eax, %db3
+ movl $6, %ecx
+
+0:
+ pushl %ebp
+ movl %esp, %ebp
+
+ movl %db7, %edx
+ movl %edx,EXT(dr_addr)+4*4
+ andl dr_msk(,%ecx,2),%edx /* clear out new entry */
+ movl %edx,EXT(dr_addr)+5*4
+ movzbl B_ARG3, %eax
+ andb $3, %al
+ shll %cl, %eax
+ orl %eax, %edx
+
+ movzbl B_ARG1, %eax
+ andb $3, %al
+ addb $0x10, %ecx
+ shll %cl, %eax
+ orl %eax, %edx
+
+ movzbl B_ARG2, %eax
+ andb $3, %al
+ addb $0x2, %ecx
+ shll %cl, %eax
+ orl %eax, %edx
+
+ movl %edx, %db7
+ movl %edx,EXT(dr_addr)+7*4
+ movl %edx, %eax
+ leave
+ ret
+
+ .data
+dr_msk:
+ .long ~0x000f0003
+ .long ~0x00f0000c
+ .long ~0x0f000030
+ .long ~0xf00000c0
+ENTRY(dr_addr)
+ .long 0,0,0,0
+ .long 0,0,0,0
+ .text
+
+/*
+ * Waste 10 microseconds.
+ */
+ENTRY(tenmicrosec)
+ movl EXT(microdata),%ecx /* cycle count for 10 microsecond loop */
+tenmicroloop:
+ loop tenmicroloop
+ ret
+
+/*
+ * cpu_shutdown()
+ * Force reboot
+ */
+null_idtr:
+ .word 0
+ .long 0
+
+Entry(cpu_shutdown)
+ lidt null_idtr /* disable the interrupt handler */
+ xor %ecx,%ecx /* generate a divide by zero */
+ div %ecx,%eax /* reboot now */
+ ret /* this will "never" be executed */
+
+
+/*
+ * Allocate enough space for a kernel TSS with a complete I/O bitmap,
+ * for making v86-mode BIOS calls. XXX
+ */
+ .data
+ .globl EXT(ktss)
+ .comm EXT(ktss),0x68+65536/8+1
+
diff --git a/i386/i386/loose_ends.c b/i386/i386/loose_ends.c
new file mode 100644
index 00000000..6a10adc3
--- /dev/null
+++ b/i386/i386/loose_ends.c
@@ -0,0 +1,82 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+#include <mach_assert.h>
+
+
+ /*
+ * For now we will always go to single user mode, since there is
+ * no way pass this request through the boot.
+ */
+int boothowto = 0;
+
+ /*
+ * Should be rewritten in asm anyway.
+ */
+/*
+ * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
+ * them correctly.
+ */
+ovbcopy(from, to, bytes)
+ char *from, *to;
+ int bytes; /* num bytes to copy */
+{
+ /* Assume that bcopy copies left-to-right (low addr first). */
+ if (from + bytes <= to || to + bytes <= from || to == from)
+ bcopy(from, to, bytes); /* non-overlapping or no-op*/
+ else if (from > to)
+ bcopy(from, to, bytes); /* overlapping but OK */
+ else {
+ /* to > from: overlapping, and must copy right-to-left. */
+ from += bytes - 1;
+ to += bytes - 1;
+ while (bytes-- > 0)
+ *to-- = *from--;
+ }
+}
+
+/* Someone with time should write code to set cpuspeed automagically */
+int cpuspeed = 4;
+#define DELAY(n) { register int N = cpuspeed * (n); while (--N > 0); }
+delay(n)
+{
+ DELAY(n);
+}
+
+#if MACH_ASSERT
+
+/*
+ * Machine-dependent routine to fill in an array with up to callstack_max
+ * levels of return pc information.
+ */
+void machine_callstack(
+ unsigned long *buf,
+ int callstack_max)
+{
+}
+
+#endif /* MACH_ASSERT */
diff --git a/i386/i386/mach_i386.srv b/i386/i386/mach_i386.srv
new file mode 100644
index 00000000..48d16ba4
--- /dev/null
+++ b/i386/i386/mach_i386.srv
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#include <mach/machine/mach_i386.defs>
diff --git a/i386/i386/mach_param.h b/i386/i386/mach_param.h
new file mode 100644
index 00000000..d7d4deee
--- /dev/null
+++ b/i386/i386/mach_param.h
@@ -0,0 +1,31 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Machine-dependent parameters for i386.
+ */
+
+#define HZ (100)
+ /* clock tick each 10 ms. */
diff --git a/i386/i386/machine_routines.h b/i386/i386/machine_routines.h
new file mode 100644
index 00000000..a1fb489e
--- /dev/null
+++ b/i386/i386/machine_routines.h
@@ -0,0 +1,37 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_MACHINE_ROUTINES_H_
+#define _I386_MACHINE_ROUTINES_H_
+
+/*
+ * The i386 has a set of machine-dependent interfaces.
+ */
+#define MACHINE_SERVER mach_i386_server
+#define MACHINE_SERVER_ROUTINE mach_i386_server_routine
+
+#endif
+
diff --git a/i386/i386/machspl.h b/i386/i386/machspl.h
new file mode 100644
index 00000000..bbb26754
--- /dev/null
+++ b/i386/i386/machspl.h
@@ -0,0 +1,29 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/* XXX replaced by... */
+#include <i386/spl.h>
+
diff --git a/i386/i386/mp_desc.c b/i386/i386/mp_desc.c
new file mode 100644
index 00000000..d7b4f61e
--- /dev/null
+++ b/i386/i386/mp_desc.c
@@ -0,0 +1,235 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <cpus.h>
+
+#if NCPUS > 1
+
+#include <kern/cpu_number.h>
+#include <mach/machine.h>
+#include <vm/vm_kern.h>
+
+#include <i386/mp_desc.h>
+#include <i386/lock.h>
+
+/*
+ * The i386 needs an interrupt stack to keep the PCB stack from being
+ * overrun by interrupts. All interrupt stacks MUST lie at lower addresses
+ * than any thread`s kernel stack.
+ */
+
+/*
+ * Addresses of bottom and top of interrupt stacks.
+ */
+vm_offset_t interrupt_stack[NCPUS];
+vm_offset_t int_stack_top[NCPUS];
+
+/*
+ * Barrier address.
+ */
+vm_offset_t int_stack_high;
+
+/*
+ * First cpu`s interrupt stack.
+ */
+char intstack[]; /* bottom */
+char eintstack[]; /* top */
+
+/*
+ * We allocate interrupt stacks from physical memory.
+ */
+extern
+vm_offset_t avail_start;
+
+/*
+ * Multiprocessor i386/i486 systems use a separate copy of the
+ * GDT, IDT, LDT, and kernel TSS per processor. The first three
+ * are separate to avoid lock contention: the i386 uses locked
+ * memory cycles to access the descriptor tables. The TSS is
+ * separate since each processor needs its own kernel stack,
+ * and since using a TSS marks it busy.
+ */
+
+/*
+ * Allocated descriptor tables.
+ */
+struct mp_desc_table *mp_desc_table[NCPUS] = { 0 };
+
+/*
+ * Pointer to TSS for access in load_context.
+ */
+struct i386_tss *mp_ktss[NCPUS] = { 0 };
+
+/*
+ * Pointer to GDT to reset the KTSS busy bit.
+ */
+struct real_descriptor *mp_gdt[NCPUS] = { 0 };
+
+/*
+ * Boot-time tables, for initialization and master processor.
+ */
+extern struct real_gate idt[IDTSZ];
+extern struct real_descriptor gdt[GDTSZ];
+extern struct real_descriptor ldt[LDTSZ];
+extern struct i386_tss ktss;
+
+/*
+ * Allocate and initialize the per-processor descriptor tables.
+ */
+
+struct mp_desc_table *
+mp_desc_init(mycpu)
+ register int mycpu;
+{
+ register struct mp_desc_table *mpt;
+
+ if (mycpu == master_cpu) {
+ /*
+ * Master CPU uses the tables built at boot time.
+ * Just set the TSS and GDT pointers.
+ */
+ mp_ktss[mycpu] = &ktss;
+ mp_gdt[mycpu] = gdt;
+ return 0;
+ }
+ else {
+ /*
+ * Other CPUs allocate the table from the bottom of
+ * the interrupt stack.
+ */
+ mpt = (struct mp_desc_table *) interrupt_stack[mycpu];
+
+ mp_desc_table[mycpu] = mpt;
+ mp_ktss[mycpu] = &mpt->ktss;
+ mp_gdt[mycpu] = mpt->gdt;
+
+ /*
+ * Copy the tables
+ */
+ bcopy((char *)idt,
+ (char *)mpt->idt,
+ sizeof(idt));
+ bcopy((char *)gdt,
+ (char *)mpt->gdt,
+ sizeof(gdt));
+ bcopy((char *)ldt,
+ (char *)mpt->ldt,
+ sizeof(ldt));
+ bzero((char *)&mpt->ktss,
+ sizeof(struct i386_tss));
+
+ /*
+ * Fix up the entries in the GDT to point to
+ * this LDT and this TSS.
+ */
+ fill_descriptor(&mpt->gdt[sel_idx(KERNEL_LDT)],
+ (unsigned)&mpt->ldt,
+ LDTSZ * sizeof(struct real_descriptor) - 1,
+ ACC_P|ACC_PL_K|ACC_LDT, 0);
+ fill_descriptor(&mpt->gdt[sel_idx(KERNEL_TSS)],
+ (unsigned)&mpt->ktss,
+ sizeof(struct i386_tss) - 1,
+ ACC_P|ACC_PL_K|ACC_TSS, 0);
+
+ mpt->ktss.ss0 = KERNEL_DS;
+ mpt->ktss.io_bit_map_offset = 0x0FFF; /* no IO bitmap */
+
+ return mpt;
+ }
+}
+
+
+/*
+ * Called after all CPUs have been found, but before the VM system
+ * is running. The machine array must show which CPUs exist.
+ */
+void
+interrupt_stack_alloc()
+{
+ register int i;
+ int cpu_count;
+ vm_offset_t stack_start;
+
+ /*
+ * Count the number of CPUs.
+ */
+ cpu_count = 0;
+ for (i = 0; i < NCPUS; i++)
+ if (machine_slot[i].is_cpu)
+ cpu_count++;
+
+ /*
+ * Allocate an interrupt stack for each CPU except for
+ * the master CPU (which uses the bootstrap stack)
+ */
+ if (!init_alloc(INTSTACK_SIZE*(cpu_count-1), &stack_start))
+ panic("not enough memory for interrupt stacks");
+
+ /*
+ * Set up pointers to the top of the interrupt stack.
+ */
+ for (i = 0; i < NCPUS; i++) {
+ if (i == master_cpu) {
+ interrupt_stack[i] = (vm_offset_t) intstack;
+ int_stack_top[i] = (vm_offset_t) eintstack;
+ }
+ else if (machine_slot[i].is_cpu) {
+ interrupt_stack[i] = stack_start;
+ int_stack_top[i] = stack_start + INTSTACK_SIZE;
+
+ stack_start += INTSTACK_SIZE;
+ }
+ }
+
+ /*
+ * Set up the barrier address. All thread stacks MUST
+ * be above this address.
+ */
+ int_stack_high = stack_start;
+}
+
+/* XXX should be adjusted per CPU speed */
+int simple_lock_pause_loop = 100;
+
+unsigned int simple_lock_pause_count = 0; /* debugging */
+
+void
+simple_lock_pause()
+{
+ static volatile int dummy;
+ int i;
+
+ simple_lock_pause_count++;
+
+ /*
+ * Used in loops that are trying to acquire locks out-of-order.
+ */
+
+ for (i = 0; i < simple_lock_pause_loop; i++)
+ dummy++; /* keep the compiler from optimizing the loop away */
+}
+
+#endif /* NCPUS > 1 */
diff --git a/i386/i386/mp_desc.h b/i386/i386/mp_desc.h
new file mode 100644
index 00000000..dbc3f5ea
--- /dev/null
+++ b/i386/i386/mp_desc.h
@@ -0,0 +1,84 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_MP_DESC_H_
+#define _I386_MP_DESC_H_
+
+#include <cpus.h>
+
+#if MULTIPROCESSOR
+
+/*
+ * Multiprocessor i386/i486 systems use a separate copy of the
+ * GDT, IDT, LDT, and kernel TSS per processor. The first three
+ * are separate to avoid lock contention: the i386 uses locked
+ * memory cycles to access the descriptor tables. The TSS is
+ * separate since each processor needs its own kernel stack,
+ * and since using a TSS marks it busy.
+ */
+
+#include "seg.h"
+#include "tss.h"
+#include "idt.h"
+#include "gdt.h"
+#include "ldt.h"
+
+/*
+ * The descriptor tables are together in a structure
+ * allocated one per processor (except for the boot processor).
+ */
+struct mp_desc_table {
+ struct real_gate idt[IDTSZ]; /* IDT */
+ struct real_descriptor gdt[GDTSZ]; /* GDT */
+ struct real_descriptor ldt[LDTSZ]; /* LDT */
+ struct i386_tss ktss;
+};
+
+/*
+ * They are pointed to by a per-processor array.
+ */
+extern struct mp_desc_table *mp_desc_table[NCPUS];
+
+/*
+ * The kernel TSS gets its own pointer.
+ */
+extern struct i386_tss *mp_ktss[NCPUS];
+
+/*
+ * So does the GDT.
+ */
+extern struct real_descriptor *mp_gdt[NCPUS];
+
+
+/*
+ * Each CPU calls this routine to set up its descriptor tables.
+ */
+extern struct mp_desc_table * mp_desc_init(/* int */);
+
+
+#endif MULTIPROCESSOR
+
+#endif /* _I386_MP_DESC_H_ */
diff --git a/i386/i386/pcb.c b/i386/i386/pcb.c
new file mode 100644
index 00000000..f16f21a6
--- /dev/null
+++ b/i386/i386/pcb.c
@@ -0,0 +1,769 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <cpus.h>
+#include <mach_debug.h>
+
+#include <mach/std_types.h>
+#include <mach/kern_return.h>
+#include <mach/thread_status.h>
+#include <mach/exec/exec.h>
+
+#include "vm_param.h"
+#include <kern/counters.h>
+#include <kern/mach_param.h>
+#include <kern/thread.h>
+#include <kern/sched_prim.h>
+#include <vm/vm_kern.h>
+#include <vm/pmap.h>
+
+#include <i386/thread.h>
+#include <i386/proc_reg.h>
+#include <i386/seg.h>
+#include <i386/tss.h>
+#include <i386/user_ldt.h>
+#include <i386/fpu.h>
+#include "eflags.h"
+#include "gdt.h"
+#include "ldt.h"
+#include "ktss.h"
+
+#if NCPUS > 1
+#include <i386/mp_desc.h>
+#endif
+
+extern thread_t Switch_context();
+extern void Thread_continue();
+
+extern iopb_tss_t iopb_create();
+extern void iopb_destroy();
+extern void user_ldt_free();
+
+zone_t pcb_zone;
+
+vm_offset_t kernel_stack[NCPUS]; /* top of active_stack */
+
+/*
+ * stack_attach:
+ *
+ * Attach a kernel stack to a thread.
+ */
+
+void stack_attach(thread, stack, continuation)
+ register thread_t thread;
+ register vm_offset_t stack;
+ void (*continuation)();
+{
+ counter(if (++c_stacks_current > c_stacks_max)
+ c_stacks_max = c_stacks_current);
+
+ thread->kernel_stack = stack;
+
+ /*
+ * We want to run continuation, giving it as an argument
+ * the return value from Load_context/Switch_context.
+ * Thread_continue takes care of the mismatch between
+ * the argument-passing/return-value conventions.
+ * This function will not return normally,
+ * so we don`t have to worry about a return address.
+ */
+ STACK_IKS(stack)->k_eip = (int) Thread_continue;
+ STACK_IKS(stack)->k_ebx = (int) continuation;
+ STACK_IKS(stack)->k_esp = (int) STACK_IEL(stack);
+
+ /*
+ * Point top of kernel stack to user`s registers.
+ */
+ STACK_IEL(stack)->saved_state = &thread->pcb->iss;
+}
+
+/*
+ * stack_detach:
+ *
+ * Detaches a kernel stack from a thread, returning the old stack.
+ */
+
+vm_offset_t stack_detach(thread)
+ register thread_t thread;
+{
+ register vm_offset_t stack;
+
+ counter(if (--c_stacks_current < c_stacks_min)
+ c_stacks_min = c_stacks_current);
+
+ stack = thread->kernel_stack;
+ thread->kernel_stack = 0;
+
+ return stack;
+}
+
+#if NCPUS > 1
+#define curr_gdt(mycpu) (mp_gdt[mycpu])
+#define curr_ktss(mycpu) (mp_ktss[mycpu])
+#else
+#define curr_gdt(mycpu) (gdt)
+#define curr_ktss(mycpu) (&ktss)
+#endif
+
+#define gdt_desc_p(mycpu,sel) \
+ ((struct real_descriptor *)&curr_gdt(mycpu)[sel_idx(sel)])
+
+void switch_ktss(pcb)
+ register pcb_t pcb;
+{
+ int mycpu = cpu_number();
+ {
+ register iopb_tss_t tss = pcb->ims.io_tss;
+ vm_offset_t pcb_stack_top;
+
+ /*
+ * Save a pointer to the top of the "kernel" stack -
+ * actually the place in the PCB where a trap into
+ * kernel mode will push the registers.
+ * The location depends on V8086 mode. If we are
+ * not in V8086 mode, then a trap into the kernel
+ * won`t save the v86 segments, so we leave room.
+ */
+
+ pcb_stack_top = (pcb->iss.efl & EFL_VM)
+ ? (int) (&pcb->iss + 1)
+ : (int) (&pcb->iss.v86_segs);
+
+ if (tss == 0) {
+ /*
+ * No per-thread IO permissions.
+ * Use standard kernel TSS.
+ */
+ if (!(gdt_desc_p(mycpu,KERNEL_TSS)->access & ACC_TSS_BUSY))
+ set_tr(KERNEL_TSS);
+ curr_ktss(mycpu)->esp0 = pcb_stack_top;
+ }
+ else {
+ /*
+ * Set the IO permissions. Use this thread`s TSS.
+ */
+ *gdt_desc_p(mycpu,USER_TSS)
+ = *(struct real_descriptor *)tss->iopb_desc;
+ tss->tss.esp0 = pcb_stack_top;
+ set_tr(USER_TSS);
+ gdt_desc_p(mycpu,KERNEL_TSS)->access &= ~ ACC_TSS_BUSY;
+ }
+ }
+
+ {
+ register user_ldt_t ldt = pcb->ims.ldt;
+ /*
+ * Set the thread`s LDT.
+ */
+ if (ldt == 0) {
+ /*
+ * Use system LDT.
+ */
+ set_ldt(KERNEL_LDT);
+ }
+ else {
+ /*
+ * Thread has its own LDT.
+ */
+ *gdt_desc_p(mycpu,USER_LDT) = ldt->desc;
+ set_ldt(USER_LDT);
+ }
+ }
+ /*
+ * Load the floating-point context, if necessary.
+ */
+ fpu_load_context(pcb);
+
+}
+
+/*
+ * stack_handoff:
+ *
+ * Move the current thread's kernel stack to the new thread.
+ */
+
+void stack_handoff(old, new)
+ register thread_t old;
+ register thread_t new;
+{
+ register int mycpu = cpu_number();
+ register vm_offset_t stack;
+
+ /*
+ * Save FP registers if in use.
+ */
+ fpu_save_context(old);
+
+ /*
+ * Switch address maps if switching tasks.
+ */
+ {
+ task_t old_task, new_task;
+
+ if ((old_task = old->task) != (new_task = new->task)) {
+ PMAP_DEACTIVATE_USER(vm_map_pmap(old_task->map),
+ old, mycpu);
+ PMAP_ACTIVATE_USER(vm_map_pmap(new_task->map),
+ new, mycpu);
+ }
+ }
+
+ /*
+ * Load the rest of the user state for the new thread
+ */
+ switch_ktss(new->pcb);
+
+ /*
+ * Switch to new thread
+ */
+ stack = current_stack();
+ old->kernel_stack = 0;
+ new->kernel_stack = stack;
+ active_threads[mycpu] = new;
+
+ /*
+ * Switch exception link to point to new
+ * user registers.
+ */
+
+ STACK_IEL(stack)->saved_state = &new->pcb->iss;
+
+}
+
+/*
+ * Switch to the first thread on a CPU.
+ */
+void load_context(new)
+ register thread_t new;
+{
+ switch_ktss(new->pcb);
+ Load_context(new);
+}
+
+/*
+ * Switch to a new thread.
+ * Save the old thread`s kernel state or continuation,
+ * and return it.
+ */
+thread_t switch_context(old, continuation, new)
+ register thread_t old;
+ void (*continuation)();
+ register thread_t new;
+{
+ /*
+ * Save FP registers if in use.
+ */
+ fpu_save_context(old);
+
+ /*
+ * Switch address maps if switching tasks.
+ */
+ {
+ task_t old_task, new_task;
+ int mycpu = cpu_number();
+
+ if ((old_task = old->task) != (new_task = new->task)) {
+ PMAP_DEACTIVATE_USER(vm_map_pmap(old_task->map),
+ old, mycpu);
+ PMAP_ACTIVATE_USER(vm_map_pmap(new_task->map),
+ new, mycpu);
+ }
+ }
+
+ /*
+ * Load the rest of the user state for the new thread
+ */
+ switch_ktss(new->pcb);
+
+ return Switch_context(old, continuation, new);
+}
+
+void pcb_module_init()
+{
+ pcb_zone = zinit(sizeof(struct pcb),
+ THREAD_MAX * sizeof(struct pcb),
+ THREAD_CHUNK * sizeof(struct pcb),
+ 0, "i386 pcb state");
+
+ fpu_module_init();
+ iopb_init();
+}
+
+void pcb_init(thread)
+ register thread_t thread;
+{
+ register pcb_t pcb;
+
+ pcb = (pcb_t) zalloc(pcb_zone);
+ if (pcb == 0)
+ panic("pcb_init");
+
+ counter(if (++c_threads_current > c_threads_max)
+ c_threads_max = c_threads_current);
+
+ /*
+ * We can't let random values leak out to the user.
+ */
+ bzero((char *) pcb, sizeof *pcb);
+ simple_lock_init(&pcb->lock);
+
+ /*
+ * Guarantee that the bootstrapped thread will be in user
+ * mode.
+ */
+ pcb->iss.cs = USER_CS;
+ pcb->iss.ss = USER_DS;
+ pcb->iss.ds = USER_DS;
+ pcb->iss.es = USER_DS;
+ pcb->iss.fs = USER_DS;
+ pcb->iss.gs = USER_DS;
+ pcb->iss.efl = EFL_USER_SET;
+
+ thread->pcb = pcb;
+}
+
+void pcb_terminate(thread)
+ register thread_t thread;
+{
+ register pcb_t pcb = thread->pcb;
+
+ counter(if (--c_threads_current < c_threads_min)
+ c_threads_min = c_threads_current);
+
+ if (pcb->ims.io_tss != 0)
+ iopb_destroy(pcb->ims.io_tss);
+ if (pcb->ims.ifps != 0)
+ fp_free(pcb->ims.ifps);
+ if (pcb->ims.ldt != 0)
+ user_ldt_free(pcb->ims.ldt);
+ zfree(pcb_zone, (vm_offset_t) pcb);
+ thread->pcb = 0;
+}
+
+/*
+ * pcb_collect:
+ *
+ * Attempt to free excess pcb memory.
+ */
+
+void pcb_collect(thread)
+ thread_t thread;
+{
+}
+
+
+/*
+ * thread_setstatus:
+ *
+ * Set the status of the specified thread.
+ */
+
+kern_return_t thread_setstatus(thread, flavor, tstate, count)
+ thread_t thread;
+ int flavor;
+ thread_state_t tstate;
+ unsigned int count;
+{
+ switch (flavor) {
+ case i386_THREAD_STATE:
+ case i386_REGS_SEGS_STATE:
+ {
+ register struct i386_thread_state *state;
+ register struct i386_saved_state *saved_state;
+
+ if (count < i386_THREAD_STATE_COUNT) {
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ if (flavor == i386_REGS_SEGS_STATE) {
+ /*
+ * Code and stack selectors must not be null,
+ * and must have user protection levels.
+ * Only the low 16 bits are valid.
+ */
+ state->cs &= 0xffff;
+ state->ss &= 0xffff;
+ state->ds &= 0xffff;
+ state->es &= 0xffff;
+ state->fs &= 0xffff;
+ state->gs &= 0xffff;
+
+ if (state->cs == 0 || (state->cs & SEL_PL) != SEL_PL_U
+ || state->ss == 0 || (state->ss & SEL_PL) != SEL_PL_U)
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ state = (struct i386_thread_state *) tstate;
+
+ saved_state = USER_REGS(thread);
+
+ /*
+ * General registers
+ */
+ saved_state->edi = state->edi;
+ saved_state->esi = state->esi;
+ saved_state->ebp = state->ebp;
+ saved_state->uesp = state->uesp;
+ saved_state->ebx = state->ebx;
+ saved_state->edx = state->edx;
+ saved_state->ecx = state->ecx;
+ saved_state->eax = state->eax;
+ saved_state->eip = state->eip;
+ saved_state->efl = (state->efl & ~EFL_USER_CLEAR)
+ | EFL_USER_SET;
+
+ /*
+ * Segment registers. Set differently in V8086 mode.
+ */
+ if (state->efl & EFL_VM) {
+ /*
+ * Set V8086 mode segment registers.
+ */
+ saved_state->cs = state->cs & 0xffff;
+ saved_state->ss = state->ss & 0xffff;
+ saved_state->v86_segs.v86_ds = state->ds & 0xffff;
+ saved_state->v86_segs.v86_es = state->es & 0xffff;
+ saved_state->v86_segs.v86_fs = state->fs & 0xffff;
+ saved_state->v86_segs.v86_gs = state->gs & 0xffff;
+
+ /*
+ * Zero protected mode segment registers.
+ */
+ saved_state->ds = 0;
+ saved_state->es = 0;
+ saved_state->fs = 0;
+ saved_state->gs = 0;
+
+ if (thread->pcb->ims.v86s.int_table) {
+ /*
+ * Hardware assist on.
+ */
+ thread->pcb->ims.v86s.flags =
+ state->efl & (EFL_TF | EFL_IF);
+ }
+ }
+ else if (flavor == i386_THREAD_STATE) {
+ /*
+ * 386 mode. Set segment registers for flat
+ * 32-bit address space.
+ */
+ saved_state->cs = USER_CS;
+ saved_state->ss = USER_DS;
+ saved_state->ds = USER_DS;
+ saved_state->es = USER_DS;
+ saved_state->fs = USER_DS;
+ saved_state->gs = USER_DS;
+ }
+ else {
+ /*
+ * User setting segment registers.
+ * Code and stack selectors have already been
+ * checked. Others will be reset by 'iret'
+ * if they are not valid.
+ */
+ saved_state->cs = state->cs;
+ saved_state->ss = state->ss;
+ saved_state->ds = state->ds;
+ saved_state->es = state->es;
+ saved_state->fs = state->fs;
+ saved_state->gs = state->gs;
+ }
+ break;
+ }
+
+ case i386_FLOAT_STATE: {
+
+ if (count < i386_FLOAT_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ return fpu_set_state(thread,
+ (struct i386_float_state *) tstate);
+ }
+
+ /*
+ * Temporary - replace by i386_io_map
+ */
+ case i386_ISA_PORT_MAP_STATE: {
+ register struct i386_isa_port_map_state *state;
+ register iopb_tss_t tss;
+
+ if (count < i386_ISA_PORT_MAP_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+#if 0
+ /*
+ * If the thread has no ktss yet,
+ * we must allocate one.
+ */
+
+ state = (struct i386_isa_port_map_state *) tstate;
+ tss = thread->pcb->ims.io_tss;
+ if (tss == 0) {
+ tss = iopb_create();
+ thread->pcb->ims.io_tss = tss;
+ }
+
+ bcopy((char *) state->pm,
+ (char *) tss->bitmap,
+ sizeof state->pm);
+#endif
+ break;
+ }
+
+ case i386_V86_ASSIST_STATE:
+ {
+ register struct i386_v86_assist_state *state;
+ vm_offset_t int_table;
+ int int_count;
+
+ if (count < i386_V86_ASSIST_STATE_COUNT)
+ return KERN_INVALID_ARGUMENT;
+
+ state = (struct i386_v86_assist_state *) tstate;
+ int_table = state->int_table;
+ int_count = state->int_count;
+
+ if (int_table >= VM_MAX_ADDRESS ||
+ int_table +
+ int_count * sizeof(struct v86_interrupt_table)
+ > VM_MAX_ADDRESS)
+ return KERN_INVALID_ARGUMENT;
+
+ thread->pcb->ims.v86s.int_table = int_table;
+ thread->pcb->ims.v86s.int_count = int_count;
+
+ thread->pcb->ims.v86s.flags =
+ USER_REGS(thread)->efl & (EFL_TF | EFL_IF);
+ break;
+ }
+
+ default:
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * thread_getstatus:
+ *
+ * Get the status of the specified thread.
+ */
+
+kern_return_t thread_getstatus(thread, flavor, tstate, count)
+ register thread_t thread;
+ int flavor;
+ thread_state_t tstate; /* pointer to OUT array */
+ unsigned int *count; /* IN/OUT */
+{
+ switch (flavor) {
+ case THREAD_STATE_FLAVOR_LIST:
+ if (*count < 4)
+ return (KERN_INVALID_ARGUMENT);
+ tstate[0] = i386_THREAD_STATE;
+ tstate[1] = i386_FLOAT_STATE;
+ tstate[2] = i386_ISA_PORT_MAP_STATE;
+ tstate[3] = i386_V86_ASSIST_STATE;
+ *count = 4;
+ break;
+
+ case i386_THREAD_STATE:
+ case i386_REGS_SEGS_STATE:
+ {
+ register struct i386_thread_state *state;
+ register struct i386_saved_state *saved_state;
+
+ if (*count < i386_THREAD_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ state = (struct i386_thread_state *) tstate;
+ saved_state = USER_REGS(thread);
+
+ /*
+ * General registers.
+ */
+ state->edi = saved_state->edi;
+ state->esi = saved_state->esi;
+ state->ebp = saved_state->ebp;
+ state->ebx = saved_state->ebx;
+ state->edx = saved_state->edx;
+ state->ecx = saved_state->ecx;
+ state->eax = saved_state->eax;
+ state->eip = saved_state->eip;
+ state->efl = saved_state->efl;
+ state->uesp = saved_state->uesp;
+
+ state->cs = saved_state->cs;
+ state->ss = saved_state->ss;
+ if (saved_state->efl & EFL_VM) {
+ /*
+ * V8086 mode.
+ */
+ state->ds = saved_state->v86_segs.v86_ds & 0xffff;
+ state->es = saved_state->v86_segs.v86_es & 0xffff;
+ state->fs = saved_state->v86_segs.v86_fs & 0xffff;
+ state->gs = saved_state->v86_segs.v86_gs & 0xffff;
+
+ if (thread->pcb->ims.v86s.int_table) {
+ /*
+ * Hardware assist on
+ */
+ if ((thread->pcb->ims.v86s.flags &
+ (EFL_IF|V86_IF_PENDING))
+ == 0)
+ state->efl &= ~EFL_IF;
+ }
+ }
+ else {
+ /*
+ * 386 mode.
+ */
+ state->ds = saved_state->ds & 0xffff;
+ state->es = saved_state->es & 0xffff;
+ state->fs = saved_state->fs & 0xffff;
+ state->gs = saved_state->gs & 0xffff;
+ }
+ *count = i386_THREAD_STATE_COUNT;
+ break;
+ }
+
+ case i386_FLOAT_STATE: {
+
+ if (*count < i386_FLOAT_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ *count = i386_FLOAT_STATE_COUNT;
+ return fpu_get_state(thread,
+ (struct i386_float_state *)tstate);
+ }
+
+ /*
+ * Temporary - replace by i386_io_map
+ */
+ case i386_ISA_PORT_MAP_STATE: {
+ register struct i386_isa_port_map_state *state;
+ register iopb_tss_t tss;
+
+ if (*count < i386_ISA_PORT_MAP_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ state = (struct i386_isa_port_map_state *) tstate;
+ tss = thread->pcb->ims.io_tss;
+
+ if (tss == 0) {
+ int i;
+
+ /*
+ * The thread has no ktss, so no IO permissions.
+ */
+
+ for (i = 0; i < sizeof state->pm; i++)
+ state->pm[i] = 0xff;
+ } else {
+ /*
+ * The thread has its own ktss.
+ */
+
+ bcopy((char *) tss->bitmap,
+ (char *) state->pm,
+ sizeof state->pm);
+ }
+
+ *count = i386_ISA_PORT_MAP_STATE_COUNT;
+ break;
+ }
+
+ case i386_V86_ASSIST_STATE:
+ {
+ register struct i386_v86_assist_state *state;
+
+ if (*count < i386_V86_ASSIST_STATE_COUNT)
+ return KERN_INVALID_ARGUMENT;
+
+ state = (struct i386_v86_assist_state *) tstate;
+ state->int_table = thread->pcb->ims.v86s.int_table;
+ state->int_count = thread->pcb->ims.v86s.int_count;
+
+ *count = i386_V86_ASSIST_STATE_COUNT;
+ break;
+ }
+
+ default:
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Alter the thread`s state so that a following thread_exception_return
+ * will make the thread return 'retval' from a syscall.
+ */
+void
+thread_set_syscall_return(thread, retval)
+ thread_t thread;
+ kern_return_t retval;
+{
+ thread->pcb->iss.eax = retval;
+}
+
+
+/*
+ * Return prefered address of user stack.
+ * Always returns low address. If stack grows up,
+ * the stack grows away from this address;
+ * if stack grows down, the stack grows towards this
+ * address.
+ */
+vm_offset_t
+user_stack_low(stack_size)
+ vm_size_t stack_size;
+{
+ return (VM_MAX_ADDRESS - stack_size);
+}
+
+/*
+ * Allocate argument area and set registers for first user thread.
+ */
+vm_offset_t
+set_user_regs(stack_base, stack_size, exec_info, arg_size)
+ vm_offset_t stack_base; /* low address */
+ vm_offset_t stack_size;
+ struct exec_info *exec_info;
+ vm_size_t arg_size;
+{
+ vm_offset_t arg_addr;
+ register struct i386_saved_state *saved_state;
+
+ arg_size = (arg_size + sizeof(int) - 1) & ~(sizeof(int)-1);
+ arg_addr = stack_base + stack_size - arg_size;
+
+ saved_state = USER_REGS(current_thread());
+ saved_state->uesp = (int)arg_addr;
+ saved_state->eip = exec_info->entry;
+
+ return (arg_addr);
+}
diff --git a/i386/i386/phys.c b/i386/i386/phys.c
new file mode 100644
index 00000000..518812d3
--- /dev/null
+++ b/i386/i386/phys.c
@@ -0,0 +1,102 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/boolean.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <vm/vm_map.h>
+#include "vm_param.h"
+#include <mach/vm_prot.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+
+#include <i386/pmap.h>
+#include <mach/machine/vm_param.h>
+
+/*
+ * pmap_zero_page zeros the specified (machine independent) page.
+ */
+pmap_zero_page(p)
+ vm_offset_t p;
+{
+ assert(p != vm_page_fictitious_addr);
+ bzero(phystokv(p), PAGE_SIZE);
+}
+
+/*
+ * pmap_copy_page copies the specified (machine independent) pages.
+ */
+pmap_copy_page(src, dst)
+ vm_offset_t src, dst;
+{
+ assert(src != vm_page_fictitious_addr);
+ assert(dst != vm_page_fictitious_addr);
+
+ bcopy(phystokv(src), phystokv(dst), PAGE_SIZE);
+}
+
+/*
+ * copy_to_phys(src_addr_v, dst_addr_p, count)
+ *
+ * Copy virtual memory to physical memory
+ */
+copy_to_phys(src_addr_v, dst_addr_p, count)
+ vm_offset_t src_addr_v, dst_addr_p;
+ int count;
+{
+ assert(dst_addr_p != vm_page_fictitious_addr);
+ bcopy(src_addr_v, phystokv(dst_addr_p), count);
+}
+
+/*
+ * copy_from_phys(src_addr_p, dst_addr_v, count)
+ *
+ * Copy physical memory to virtual memory. The virtual memory
+ * is assumed to be present (e.g. the buffer pool).
+ */
+copy_from_phys(src_addr_p, dst_addr_v, count)
+ vm_offset_t src_addr_p, dst_addr_v;
+ int count;
+{
+ assert(src_addr_p != vm_page_fictitious_addr);
+ bcopy(phystokv(src_addr_p), dst_addr_v, count);
+}
+
+/*
+ * kvtophys(addr)
+ *
+ * Convert a kernel virtual address to a physical address
+ */
+vm_offset_t
+kvtophys(addr)
+vm_offset_t addr;
+{
+ pt_entry_t *pte;
+
+ if ((pte = pmap_pte(kernel_pmap, addr)) == PT_ENTRY_NULL)
+ return 0;
+ return i386_trunc_page(*pte) | (addr & INTEL_OFFMASK);
+}
diff --git a/i386/i386/pic.c b/i386/i386/pic.c
new file mode 100644
index 00000000..8380db84
--- /dev/null
+++ b/i386/i386/pic.c
@@ -0,0 +1,270 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+Copyright (c) 1988,1989 Prime Computer, Inc. Natick, MA 01760
+All Rights Reserved.
+
+Permission to use, copy, modify, and distribute this
+software and its documentation for any purpose and
+without fee is hereby granted, provided that the above
+copyright notice appears in all copies and that both the
+copyright notice and this permission notice appear in
+supporting documentation, and that the name of Prime
+Computer, Inc. not be used in advertising or publicity
+pertaining to distribution of the software without
+specific, written prior permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS", AND PRIME COMPUTER,
+INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN
+NO EVENT SHALL PRIME COMPUTER, INC. BE LIABLE FOR ANY
+SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR
+OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#include <platforms.h>
+
+#include <sys/types.h>
+#include <i386/ipl.h>
+#include <i386/pic.h>
+#include <i386/machspl.h>
+
+spl_t curr_ipl;
+int pic_mask[NSPL];
+int curr_pic_mask;
+
+int iunit[NINTR] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+
+int nintr = NINTR;
+int npics = NPICS;
+
+char *master_icw, *master_ocw, *slaves_icw, *slaves_ocw;
+
+u_short PICM_ICW1, PICM_OCW1, PICS_ICW1, PICS_OCW1 ;
+u_short PICM_ICW2, PICM_OCW2, PICS_ICW2, PICS_OCW2 ;
+u_short PICM_ICW3, PICM_OCW3, PICS_ICW3, PICS_OCW3 ;
+u_short PICM_ICW4, PICS_ICW4 ;
+
+/*
+** picinit() - This routine
+** * Establishes a table of interrupt vectors
+** * Establishes a table of interrupt priority levels
+** * Establishes a table of interrupt masks to be put
+** in the PICs.
+** * Establishes location of PICs in the system
+** * Initialises them
+**
+** At this stage the interrupt functionality of this system should be
+** coplete.
+**
+*/
+
+
+/*
+** 1. First we form a table of PIC masks - rather then calling form_pic_mask()
+** each time there is a change of interrupt level - we will form a table
+** of pic masks, as there are only 7 interrupt priority levels.
+**
+** 2. The next thing we must do is to determine which of the PIC interrupt
+** request lines have to be masked out, this is done by calling
+** form_pic_mask() with a (int_lev) of zero, this will find all the
+** interrupt lines that have priority 0, (ie to be ignored).
+** Then we split this up for the master/slave PICs.
+**
+** 2. Initialise the PICs , master first, then the slave.
+** All the register field definitions are described in pic_jh.h, also
+** the settings of these fields for the various registers are selected.
+**
+*/
+
+picinit()
+{
+
+ u_short i;
+
+ asm("cli");
+
+ /*
+ ** 1. Form pic mask table
+ */
+#if 0
+ printf (" Let the console driver screw up this line ! \n");
+#endif
+
+ form_pic_mask();
+
+ /*
+ ** 1a. Select current SPL.
+ */
+
+ curr_ipl = SPLHI;
+ curr_pic_mask = pic_mask[SPLHI];
+
+ /*
+ ** 2. Generate addresses to each PIC port.
+ */
+
+ master_icw = (char *)PIC_MASTER_ICW;
+ master_ocw = (char *)PIC_MASTER_OCW;
+ slaves_icw = (char *)PIC_SLAVE_ICW;
+ slaves_ocw = (char *)PIC_SLAVE_OCW;
+
+#ifdef PS2
+#else /* PS2 */
+ /*
+ ** 3. Select options for each ICW and each OCW for each PIC.
+ */
+
+ PICM_ICW1 =
+ (ICW_TEMPLATE | EDGE_TRIGGER | ADDR_INTRVL8 | CASCADE_MODE | ICW4__NEEDED);
+
+ PICS_ICW1 =
+ (ICW_TEMPLATE | EDGE_TRIGGER | ADDR_INTRVL8 | CASCADE_MODE | ICW4__NEEDED);
+
+ PICM_ICW2 = PICM_VECTBASE;
+ PICS_ICW2 = PICS_VECTBASE;
+
+#ifdef AT386
+ PICM_ICW3 = ( SLAVE_ON_IR2 );
+ PICS_ICW3 = ( I_AM_SLAVE_2 );
+#endif AT386
+#ifdef iPSC386
+ PICM_ICW3 = ( SLAVE_ON_IR7 );
+ PICS_ICW3 = ( I_AM_SLAVE_7 );
+#endif iPSC386
+
+#ifdef iPSC386
+ /* Use Buffered mode for iPSC386 */
+ PICM_ICW4 = (SNF_MODE_DIS | BUFFERD_MODE | I_AM_A_MASTR |
+ NRML_EOI_MOD | I8086_EMM_MOD);
+ PICS_ICW4 = (SNF_MODE_DIS | BUFFERD_MODE | I_AM_A_SLAVE |
+ NRML_EOI_MOD | I8086_EMM_MOD);
+#else iPSC386
+ PICM_ICW4 =
+ (SNF_MODE_DIS | NONBUFD_MODE | NRML_EOI_MOD | I8086_EMM_MOD);
+ PICS_ICW4 =
+ (SNF_MODE_DIS | NONBUFD_MODE | NRML_EOI_MOD | I8086_EMM_MOD);
+#endif iPSC386
+
+ PICM_OCW1 = (curr_pic_mask & 0x00FF);
+ PICS_OCW1 = ((curr_pic_mask & 0xFF00)>>8);
+
+ PICM_OCW2 = NON_SPEC_EOI;
+ PICS_OCW2 = NON_SPEC_EOI;
+
+ PICM_OCW3 = (OCW_TEMPLATE | READ_NEXT_RD | READ_IR_ONRD );
+ PICS_OCW3 = (OCW_TEMPLATE | READ_NEXT_RD | READ_IR_ONRD );
+
+
+ /*
+ ** 4. Initialise master - send commands to master PIC
+ */
+
+ outb ( master_icw, PICM_ICW1 );
+ outb ( master_ocw, PICM_ICW2 );
+ outb ( master_ocw, PICM_ICW3 );
+ outb ( master_ocw, PICM_ICW4 );
+
+ outb ( master_ocw, PICM_MASK );
+ outb ( master_icw, PICM_OCW3 );
+
+ /*
+ ** 5. Initialise slave - send commands to slave PIC
+ */
+
+ outb ( slaves_icw, PICS_ICW1 );
+ outb ( slaves_ocw, PICS_ICW2 );
+ outb ( slaves_ocw, PICS_ICW3 );
+ outb ( slaves_ocw, PICS_ICW4 );
+
+
+ outb ( slaves_ocw, PICS_OCW1 );
+ outb ( slaves_icw, PICS_OCW3 );
+
+ /*
+ ** 6. Initialise interrupts
+ */
+ outb ( master_ocw, PICM_OCW1 );
+
+#endif /* PS2 */
+
+#if 0
+ printf(" spl set to %x \n", curr_pic_mask);
+#endif
+
+}
+
+
+/*
+** form_pic_mask(int_lvl)
+**
+** For a given interrupt priority level (int_lvl), this routine goes out
+** and scans through the interrupt level table, and forms a mask based on the
+** entries it finds there that have the same or lower interrupt priority level
+** as (int_lvl). It returns a 16-bit mask which will have to be split up between
+** the 2 pics.
+**
+*/
+
+#if defined(AT386) || defined(PS2)
+#define SLAVEMASK (0xFFFF ^ SLAVE_ON_IR2)
+#endif /* defined(AT386) || defined(PS2) */
+#ifdef iPSC386
+#define SLAVEMASK (0xFFFF ^ SLAVE_ON_IR7)
+#endif iPSC386
+
+#define SLAVEACTV 0xFF00
+
+form_pic_mask()
+{
+ unsigned int i, j, bit, mask;
+
+ for (i=SPL0; i < NSPL; i++) {
+ for (j=0x00, bit=0x01, mask = 0; j < NINTR; j++, bit<<=1)
+ if (intpri[j] <= i)
+ mask |= bit;
+
+ if ((mask & SLAVEACTV) != SLAVEACTV )
+ mask &= SLAVEMASK;
+
+ pic_mask[i] = mask;
+ }
+}
+
+intnull(unit_dev)
+{
+ printf("intnull(%d)\n", unit_dev);
+}
+
+int prtnull_count = 0;
+prtnull(unit)
+{
+ ++prtnull_count;
+}
diff --git a/i386/i386/pic.h b/i386/i386/pic.h
new file mode 100644
index 00000000..66b92d80
--- /dev/null
+++ b/i386/i386/pic.h
@@ -0,0 +1,197 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+Copyright (c) 1988,1989 Prime Computer, Inc. Natick, MA 01760
+All Rights Reserved.
+
+Permission to use, copy, modify, and distribute this
+software and its documentation for any purpose and
+without fee is hereby granted, provided that the above
+copyright notice appears in all copies and that both the
+copyright notice and this permission notice appear in
+supporting documentation, and that the name of Prime
+Computer, Inc. not be used in advertising or publicity
+pertaining to distribution of the software without
+specific, written prior permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS", AND PRIME COMPUTER,
+INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN
+NO EVENT SHALL PRIME COMPUTER, INC. BE LIABLE FOR ANY
+SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR
+OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _I386_PIC_H_
+#define _I386_PIC_H_
+
+#include <platforms.h>
+
+#define NINTR 0x10
+#define NPICS 0x02
+
+/*
+** The following are definitions used to locate the PICs in the system
+*/
+
+#if defined(AT386) || defined(PS2)
+#define ADDR_PIC_BASE 0x20
+#define OFF_ICW 0x00
+#define OFF_OCW 0x01
+#define SIZE_PIC 0x80
+#endif /* defined(AT386) || defined(PS2) */
+
+#ifdef iPSC386
+#define ADDR_PIC_BASE 0xC0
+#define OFF_ICW 0x00
+#define OFF_OCW 0x02
+#define SIZE_PIC 0x04
+#endif iPSC386
+
+#define PIC_MASTER_ICW (ADDR_PIC_BASE + OFF_ICW)
+#define PIC_MASTER_OCW (ADDR_PIC_BASE + OFF_OCW)
+#define PIC_SLAVE_ICW (PIC_MASTER_ICW + SIZE_PIC)
+#define PIC_SLAVE_OCW (PIC_MASTER_OCW + SIZE_PIC)
+
+/*
+** The following banks of definitions ICW1, ICW2, ICW3, and ICW4 are used
+** to define the fields of the various ICWs for initialisation of the PICs
+*/
+
+/*
+** ICW1
+*/
+
+#define ICW_TEMPLATE 0x10
+
+#define LEVL_TRIGGER 0x08
+#define EDGE_TRIGGER 0x00
+#define ADDR_INTRVL4 0x04
+#define ADDR_INTRVL8 0x00
+#define SINGLE__MODE 0x02
+#define CASCADE_MODE 0x00
+#define ICW4__NEEDED 0x01
+#define NO_ICW4_NEED 0x00
+
+/*
+** ICW2
+*/
+
+#if defined(AT386) || defined(PS2)
+#define PICM_VECTBASE 0x40
+#define PICS_VECTBASE PICM_VECTBASE + 0x08
+#endif /* defined(AT386) || defined(PS2) */
+
+#ifdef iPSC386
+#define PICM_VECTBASE 0x40
+#define PICS_VECTBASE PICM_VECTBASE + 0x08
+#endif iPSC386
+
+/*
+** ICW3
+*/
+
+#define SLAVE_ON_IR0 0x01
+#define SLAVE_ON_IR1 0x02
+#define SLAVE_ON_IR2 0x04
+#define SLAVE_ON_IR3 0x08
+#define SLAVE_ON_IR4 0x10
+#define SLAVE_ON_IR5 0x20
+#define SLAVE_ON_IR6 0x40
+#define SLAVE_ON_IR7 0x80
+
+#define I_AM_SLAVE_0 0x00
+#define I_AM_SLAVE_1 0x01
+#define I_AM_SLAVE_2 0x02
+#define I_AM_SLAVE_3 0x03
+#define I_AM_SLAVE_4 0x04
+#define I_AM_SLAVE_5 0x05
+#define I_AM_SLAVE_6 0x06
+#define I_AM_SLAVE_7 0x07
+
+/*
+** ICW4
+*/
+
+#define SNF_MODE_ENA 0x10
+#define SNF_MODE_DIS 0x00
+#define BUFFERD_MODE 0x08
+#define NONBUFD_MODE 0x00
+#if iPSC386
+#define I_AM_A_SLAVE 0x00
+#define I_AM_A_MASTR 0x04
+#endif iPSC386
+#define AUTO_EOI_MOD 0x02
+#define NRML_EOI_MOD 0x00
+#define I8086_EMM_MOD 0x01
+#define SET_MCS_MODE 0x00
+
+/*
+** OCW1
+*/
+#define PICM_MASK 0xFF
+#define PICS_MASK 0xFF
+/*
+** OCW2
+*/
+
+#define NON_SPEC_EOI 0x20
+#define SPECIFIC_EOI 0x30
+#define ROT_NON_SPEC 0x50
+#define SET_ROT_AEOI 0x40
+#define RSET_ROTAEOI 0x00
+#define ROT_SPEC_EOI 0x70
+#define SET_PRIORITY 0x60
+#define NO_OPERATION 0x20
+
+#define SEND_EOI_IR0 0x00
+#define SEND_EOI_IR1 0x01
+#define SEND_EOI_IR2 0x02
+#define SEND_EOI_IR3 0x03
+#define SEND_EOI_IR4 0x04
+#define SEND_EOI_IR5 0x05
+#define SEND_EOI_IR6 0x06
+#define SEND_EOI_IR7 0x07
+
+/*
+** OCW3
+*/
+
+#define OCW_TEMPLATE 0x08
+#define SPECIAL_MASK 0x40
+#define MASK_MDE_SET 0x20
+#define MASK_MDE_RST 0x00
+#define POLL_COMMAND 0x04
+#define NO_POLL_CMND 0x00
+#define READ_NEXT_RD 0x02
+#define READ_IR_ONRD 0x00
+#define READ_IS_ONRD 0x01
+
+#endif _I386_PIC_H_
diff --git a/i386/i386/pio.h b/i386/i386/pio.h
new file mode 100644
index 00000000..b2427f92
--- /dev/null
+++ b/i386/i386/pio.h
@@ -0,0 +1,61 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_PIO_H_
+#define _I386_PIO_H_
+
+#ifndef __GNUC__
+#error You do not stand a chance. This file is gcc only.
+#endif __GNUC__
+
+#define inl(y) \
+({ unsigned long _tmp__; \
+ asm volatile("inl %1, %0" : "=a" (_tmp__) : "d" ((unsigned short)(y))); \
+ _tmp__; })
+
+#define inw(y) \
+({ unsigned short _tmp__; \
+ asm volatile(".byte 0x66; inl %1, %0" : "=a" (_tmp__) : "d" ((unsigned short)(y))); \
+ _tmp__; })
+
+#define inb(y) \
+({ unsigned char _tmp__; \
+ asm volatile("inb %1, %0" : "=a" (_tmp__) : "d" ((unsigned short)(y))); \
+ _tmp__; })
+
+
+#define outl(x, y) \
+{ asm volatile("outl %0, %1" : : "a" (y) , "d" ((unsigned short)(x))); }
+
+
+#define outw(x, y) \
+{asm volatile(".byte 0x66; outl %0, %1" : : "a" ((unsigned short)(y)) , "d" ((unsigned short)(x))); }
+
+
+#define outb(x, y) \
+{ asm volatile("outb %0, %1" : : "a" ((unsigned char)(y)) , "d" ((unsigned short)(x))); }
+
+#endif /* _I386_PIO_H_ */
diff --git a/i386/i386/pit.c b/i386/i386/pit.c
new file mode 100644
index 00000000..3ae14870
--- /dev/null
+++ b/i386/i386/pit.c
@@ -0,0 +1,236 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#include <platforms.h>
+#include <kern/time_out.h>
+#include <i386/ipl.h>
+#include <i386/pit.h>
+
+int pitctl_port = PITCTL_PORT; /* For 386/20 Board */
+int pitctr0_port = PITCTR0_PORT; /* For 386/20 Board */
+int pitctr1_port = PITCTR1_PORT; /* For 386/20 Board */
+int pitctr2_port = PITCTR2_PORT; /* For 386/20 Board */
+/* We want PIT 0 in square wave mode */
+
+int pit0_mode = PIT_C0|PIT_SQUAREMODE|PIT_READMODE ;
+
+
+unsigned int delaycount; /* loop count in trying to delay for
+ * 1 millisecond
+ */
+unsigned long microdata=50; /* loop count for 10 microsecond wait.
+ MUST be initialized for those who
+ insist on calling "tenmicrosec"
+ it before the clock has been
+ initialized.
+ */
+unsigned int clknumb = CLKNUM; /* interrupt interval for timer 0 */
+
+#ifdef PS2
+extern int clock_int_handler();
+
+#include <sys/types.h>
+#include <i386ps2/abios.h>
+static struct generic_request *clock_request_block;
+static int clock_flags;
+char cqbuf[200]; /*XXX temporary.. should use kmem_alloc or whatever..*/
+#endif /* PS2 */
+
+clkstart()
+{
+ unsigned int flags;
+ unsigned char byte;
+ int s;
+
+ intpri[0] = SPLHI;
+ form_pic_mask();
+
+ findspeed();
+ microfind();
+ s = sploff(); /* disable interrupts */
+
+#ifdef PS2
+ abios_clock_start();
+#endif /* PS2 */
+
+ /* Since we use only timer 0, we program that.
+ * 8254 Manual specifically says you do not need to program
+ * timers you do not use
+ */
+ outb(pitctl_port, pit0_mode);
+ clknumb = CLKNUM/hz;
+ byte = clknumb;
+ outb(pitctr0_port, byte);
+ byte = clknumb>>8;
+ outb(pitctr0_port, byte);
+ splon(s); /* restore interrupt state */
+}
+
+#define COUNT 10000 /* should be a multiple of 1000! */
+
+findspeed()
+{
+ unsigned int flags;
+ unsigned char byte;
+ unsigned int leftover;
+ int i;
+ int j;
+ int s;
+
+ s = sploff(); /* disable interrupts */
+ /* Put counter in count down mode */
+#define PIT_COUNTDOWN PIT_READMODE|PIT_NDIVMODE
+ outb(pitctl_port, PIT_COUNTDOWN);
+ /* output a count of -1 to counter 0 */
+ outb(pitctr0_port, 0xff);
+ outb(pitctr0_port, 0xff);
+ delaycount = COUNT;
+ spinwait(1);
+ /* Read the value left in the counter */
+ byte = inb(pitctr0_port); /* least siginifcant */
+ leftover = inb(pitctr0_port); /* most significant */
+ leftover = (leftover<<8) + byte ;
+ /* Formula for delaycount is :
+ * (loopcount * timer clock speed)/ (counter ticks * 1000)
+ * 1000 is for figuring out milliseconds
+ */
+ /* we arrange calculation so that it doesn't overflow */
+ delaycount = ((COUNT/1000) * CLKNUM) / (0xffff-leftover);
+ printf("findspeed: delaycount=%d (tics=%d)\n",
+ delaycount, (0xffff-leftover));
+ splon(s); /* restore interrupt state */
+}
+
+#ifdef PS2
+
+abios_clock_start()
+{
+ struct generic_request temp_request_block;
+ int rc;
+
+ nmi_enable(); /* has to happen somewhere! */
+ temp_request_block.r_current_req_blck_len = ABIOS_MIN_REQ_SIZE;
+ temp_request_block.r_logical_id = abios_next_LID(SYSTIME_ID,
+ ABIOS_FIRST_LID);
+ temp_request_block.r_unit = 0;
+ temp_request_block.r_function = ABIOS_LOGICAL_PARAMETER;
+ temp_request_block.r_return_code = ABIOS_UNDEFINED;
+
+ abios_common_start(&temp_request_block,0);
+ if (temp_request_block.r_return_code != ABIOS_DONE) {
+ panic("couldn init abios time code!\n");
+ }
+
+ /*
+ * now build the clock request for the hardware system clock
+ */
+ clock_request_block = (struct generic_request *)cqbuf;
+ clock_request_block->r_current_req_blck_len =
+ temp_request_block.r_request_block_length;
+ clock_request_block->r_logical_id = temp_request_block.r_logical_id;
+ clock_request_block->r_unit = 0;
+ clock_request_block->r_function = ABIOS_DEFAULT_INTERRUPT;
+ clock_request_block->r_return_code = ABIOS_UNDEFINED;
+ clock_flags = temp_request_block.r_logical_id_flags;
+}
+
+ackrtclock()
+{
+ if (clock_request_block) {
+ clock_request_block->r_return_code = ABIOS_UNDEFINED;
+ abios_common_interrupt(clock_request_block,clock_flags);
+ }
+ }
+#endif /* PS2 */
+
+
+spinwait(millis)
+ int millis; /* number of milliseconds to delay */
+{
+ int i, j;
+
+ for (i=0;i<millis;i++)
+ for (j=0;j<delaycount;j++)
+ ;
+}
+
+#define MICROCOUNT 1000 /* keep small to prevent overflow */
+microfind()
+{
+ unsigned int flags;
+ unsigned char byte;
+ unsigned short leftover;
+ int s;
+
+
+ s = sploff(); /* disable interrupts */
+
+ /* Put counter in count down mode */
+ outb(pitctl_port, PIT_COUNTDOWN);
+ /* output a count of -1 to counter 0 */
+ outb(pitctr0_port, 0xff);
+ outb(pitctr0_port, 0xff);
+ microdata=MICROCOUNT;
+ tenmicrosec();
+ /* Read the value left in the counter */
+ byte = inb(pitctr0_port); /* least siginifcant */
+ leftover = inb(pitctr0_port); /* most significant */
+ leftover = (leftover<<8) + byte ;
+ /* Formula for delaycount is :
+ * (loopcount * timer clock speed)/ (counter ticks * 1000)
+ * Note also that 1000 is for figuring out milliseconds
+ */
+ microdata = (MICROCOUNT * CLKNUM) / ((0xffff-leftover)*100000);
+ if (!microdata)
+ microdata++;
+
+ splon(s); /* restore interrupt state */
+}
diff --git a/i386/i386/pit.h b/i386/i386/pit.h
new file mode 100644
index 00000000..3cadb30a
--- /dev/null
+++ b/i386/i386/pit.h
@@ -0,0 +1,118 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#include <platforms.h>
+#if defined(MB1) || defined(MB2) || EXL > 0 || iPSC386
+/* Definitions for 8254 Programmable Interrupt Timer ports on 386/20 */
+#define PITCTR0_PORT 0xD0 /* counter 0 port */
+#define PITCTR1_PORT 0xD2 /* counter 1 port */
+#define PITCTR2_PORT 0xD4 /* counter 2 port */
+#define PITCTL_PORT 0xD6 /* PIT control port */
+#else /* defined(AT386) || defined(PS2) */
+/* Definitions for 8254 Programmable Interrupt Timer ports on AT 386 */
+#define PITCTR0_PORT 0x40 /* counter 0 port */
+#define PITCTR1_PORT 0x41 /* counter 1 port */
+#define PITCTR2_PORT 0x42 /* counter 2 port */
+#define PITCTL_PORT 0x43 /* PIT control port */
+#define PITAUX_PORT 0x61 /* PIT auxiliary port */
+/* bits used in auxiliary control port for timer 2 */
+#define PITAUX_GATE2 0x01 /* aux port, PIT gate 2 input */
+#define PITAUX_OUT2 0x02 /* aux port, PIT clock out 2 enable */
+#endif /* defined(AT386) || defined(PS2) */
+
+/* Following are used for Timer 0 */
+#define PIT_C0 0x00 /* select counter 0 */
+#define PIT_LOADMODE 0x30 /* load least significant byte followed
+ * by most significant byte */
+#define PIT_NDIVMODE 0x04 /*divide by N counter */
+#define PIT_SQUAREMODE 0x06 /* square-wave mode */
+
+/* Used for Timer 1. Used for delay calculations in countdown mode */
+#define PIT_C1 0x40 /* select counter 1 */
+#define PIT_READMODE 0x30 /* read or load least significant byte
+ * followed by most significant byte */
+#define PIT_RATEMODE 0x06 /* square-wave mode for USART */
+
+/*
+ * Clock speed for the timer in hz divided by the constant HZ
+ * (defined in param.h)
+ */
+#if AT386 || PS2
+#define CLKNUM 1193167
+#endif /* AT386 || PS2 */
+#if defined(MB1)
+#define CLKNUM 12300
+#endif
+#if defined(MB2) || EXL > 0
+#define CLKNUM 12500
+#endif
+#if iPSC386
+#define CLKNUM 1000000
+#endif iPSC386
+
+#if EXL
+/* added micro-timer support. --- csy */
+typedef struct time_latch {
+ time_t ticks; /* time in HZ since boot */
+ time_t uticks; /* time in 1.25 MHZ */
+/* don't need these two for now. --- csy */
+/* time_t secs; /* seconds since boot */
+/* time_t epochsecs; /* seconds since epoch */
+ } time_latch;
+/* a couple in-line assembly codes for efficiency. */
+asm int intr_disable()
+{
+ pushfl
+ cli
+}
+
+asm int intr_restore()
+{
+ popfl
+}
+
+#endif EXL
diff --git a/i386/i386/pmap.h b/i386/i386/pmap.h
new file mode 100644
index 00000000..28b8cead
--- /dev/null
+++ b/i386/i386/pmap.h
@@ -0,0 +1,30 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Now using shared pmap module for i386 and i860.
+ */
+
+#include <intel/pmap.h>
diff --git a/i386/i386/proc_reg.h b/i386/i386/proc_reg.h
new file mode 100644
index 00000000..1aa646b8
--- /dev/null
+++ b/i386/i386/proc_reg.h
@@ -0,0 +1,150 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Processor registers for i386 and i486.
+ */
+#ifndef _I386_PROC_REG_H_
+#define _I386_PROC_REG_H_
+
+/*
+ * CR0
+ */
+#define CR0_PG 0x80000000 /* enable paging */
+#define CR0_CD 0x40000000 /* i486: cache disable */
+#define CR0_NW 0x20000000 /* i486: no write-through */
+#define CR0_AM 0x00040000 /* i486: alignment check mask */
+#define CR0_WP 0x00010000 /* i486: write-protect kernel access */
+#define CR0_NE 0x00000020 /* i486: handle numeric exceptions */
+#define CR0_ET 0x00000010 /* extension type is 80387 */
+ /* (not official) */
+#define CR0_TS 0x00000008 /* task switch */
+#define CR0_EM 0x00000004 /* emulate coprocessor */
+#define CR0_MP 0x00000002 /* monitor coprocessor */
+#define CR0_PE 0x00000001 /* enable protected mode */
+
+#ifndef ASSEMBLER
+#ifdef __GNUC__
+
+static inline unsigned
+get_eflags()
+{
+ unsigned eflags;
+ asm volatile("pushfd; popl %0" : "=r" (eflags));
+ return eflags;
+}
+
+static inline void
+set_eflags(unsigned eflags)
+{
+ asm volatile("pushl %0; popfd" : : "r" (eflags));
+}
+
+#define get_esp() \
+ ({ \
+ register unsigned int _temp__; \
+ asm("mov %%esp, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define get_eflags() \
+ ({ \
+ register unsigned int _temp__; \
+ asm("pushf; popl %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define get_cr0() \
+ ({ \
+ register unsigned int _temp__; \
+ asm("mov %%cr0, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define set_cr0(value) \
+ ({ \
+ register unsigned int _temp__ = (value); \
+ asm volatile("mov %0, %%cr0" : : "r" (_temp__)); \
+ })
+
+#define get_cr2() \
+ ({ \
+ register unsigned int _temp__; \
+ asm("mov %%cr2, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define get_cr3() \
+ ({ \
+ register unsigned int _temp__; \
+ asm("mov %%cr3, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define set_cr3(value) \
+ ({ \
+ register unsigned int _temp__ = (value); \
+ asm volatile("mov %0, %%cr3" : : "r" (_temp__)); \
+ })
+
+#define set_ts() \
+ set_cr0(get_cr0() | CR0_TS)
+
+#define clear_ts() \
+ asm volatile("clts")
+
+#define get_tr() \
+ ({ \
+ unsigned short _seg__; \
+ asm volatile("str %0" : "=rm" (_seg__) ); \
+ _seg__; \
+ })
+
+#define set_tr(seg) \
+ asm volatile("ltr %0" : : "rm" ((unsigned short)(seg)) )
+
+#define get_ldt() \
+ ({ \
+ unsigned short _seg__; \
+ asm volatile("sldt %0" : "=rm" (_seg__) ); \
+ _seg__; \
+ })
+
+#define set_ldt(seg) \
+ asm volatile("lldt %0" : : "rm" ((unsigned short)(seg)) )
+
+/* This doesn't set a processor register,
+ but it's often used immediately after setting one,
+ to flush the instruction queue. */
+#define flush_instr_queue() \
+ asm("
+ jmp 0f
+ 0:
+ ")
+
+#endif /* __GNUC__ */
+#endif /* ASSEMBLER */
+
+#endif /* _I386_PROC_REG_H_ */
diff --git a/i386/i386/sched_param.h b/i386/i386/sched_param.h
new file mode 100644
index 00000000..cb372e51
--- /dev/null
+++ b/i386/i386/sched_param.h
@@ -0,0 +1,40 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Scheduler parameters.
+ */
+
+#ifndef _I386_SCHED_PARAM_H_
+#define _I386_SCHED_PARAM_H_
+
+/*
+ * Sequent requires a right shift of 18 bits to convert
+ * microseconds to priorities.
+ */
+
+#define PRI_SHIFT 18
+
+#endif _I386_SCHED_PARAM_H_
diff --git a/i386/i386/seg.c b/i386/i386/seg.c
new file mode 100644
index 00000000..d57c255e
--- /dev/null
+++ b/i386/i386/seg.c
@@ -0,0 +1,5 @@
+
+#define MACH_INLINE
+#include "seg.h"
+#include "tss.h"
+
diff --git a/i386/i386/seg.h b/i386/i386/seg.h
new file mode 100644
index 00000000..b86e967c
--- /dev/null
+++ b/i386/i386/seg.h
@@ -0,0 +1,184 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_SEG_H_
+#define _I386_SEG_H_
+
+#include <mach/inline.h>
+#include <platforms.h>
+
+/*
+ * i386 segmentation.
+ */
+
+#ifndef ASSEMBLER
+
+/*
+ * Real segment descriptor.
+ */
+struct real_descriptor {
+ unsigned int limit_low:16, /* limit 0..15 */
+ base_low:16, /* base 0..15 */
+ base_med:8, /* base 16..23 */
+ access:8, /* access byte */
+ limit_high:4, /* limit 16..19 */
+ granularity:4, /* granularity */
+ base_high:8; /* base 24..31 */
+};
+
+struct real_gate {
+ unsigned int offset_low:16, /* offset 0..15 */
+ selector:16,
+ word_count:8,
+ access:8,
+ offset_high:16; /* offset 16..31 */
+};
+
+#endif !ASSEMBLER
+
+#define SZ_32 0x4 /* 32-bit segment */
+#define SZ_16 0x0 /* 16-bit segment */
+#define SZ_G 0x8 /* 4K limit field */
+
+#define ACC_A 0x01 /* accessed */
+#define ACC_TYPE 0x1e /* type field: */
+
+#define ACC_TYPE_SYSTEM 0x00 /* system descriptors: */
+
+#define ACC_LDT 0x02 /* LDT */
+#define ACC_CALL_GATE_16 0x04 /* 16-bit call gate */
+#define ACC_TASK_GATE 0x05 /* task gate */
+#define ACC_TSS 0x09 /* task segment */
+#define ACC_CALL_GATE 0x0c /* call gate */
+#define ACC_INTR_GATE 0x0e /* interrupt gate */
+#define ACC_TRAP_GATE 0x0f /* trap gate */
+
+#define ACC_TSS_BUSY 0x02 /* task busy */
+
+#define ACC_TYPE_USER 0x10 /* user descriptors */
+
+#define ACC_DATA 0x10 /* data */
+#define ACC_DATA_W 0x12 /* data, writable */
+#define ACC_DATA_E 0x14 /* data, expand-down */
+#define ACC_DATA_EW 0x16 /* data, expand-down,
+ writable */
+#define ACC_CODE 0x18 /* code */
+#define ACC_CODE_R 0x1a /* code, readable */
+#define ACC_CODE_C 0x1c /* code, conforming */
+#define ACC_CODE_CR 0x1e /* code, conforming,
+ readable */
+#define ACC_PL 0x60 /* access rights: */
+#define ACC_PL_K 0x00 /* kernel access only */
+#define ACC_PL_U 0x60 /* user access */
+#define ACC_P 0x80 /* segment present */
+
+/*
+ * Components of a selector
+ */
+#define SEL_LDT 0x04 /* local selector */
+#define SEL_PL 0x03 /* privilege level: */
+#define SEL_PL_K 0x00 /* kernel selector */
+#define SEL_PL_U 0x03 /* user selector */
+
+/*
+ * Convert selector to descriptor table index.
+ */
+#define sel_idx(sel) ((sel)>>3)
+
+
+#ifndef ASSEMBLER
+
+#include <mach/inline.h>
+
+
+/* Format of a "pseudo-descriptor", used for loading the IDT and GDT. */
+struct pseudo_descriptor
+{
+ short pad;
+ unsigned short limit;
+ unsigned long linear_base;
+};
+
+
+/* Load the processor's IDT, GDT, or LDT pointers. */
+MACH_INLINE void lgdt(struct pseudo_descriptor *pdesc)
+{
+ __asm volatile("lgdt %0" : : "m" (pdesc->limit));
+}
+MACH_INLINE void lidt(struct pseudo_descriptor *pdesc)
+{
+ __asm volatile("lidt %0" : : "m" (pdesc->limit));
+}
+MACH_INLINE void lldt(unsigned short ldt_selector)
+{
+ __asm volatile("lldt %w0" : : "r" (ldt_selector));
+}
+
+#ifdef CODE16
+#define i16_lgdt lgdt
+#define i16_lidt lidt
+#define i16_lldt lldt
+#endif
+
+
+/* Fill a segment descriptor. */
+MACH_INLINE void
+fill_descriptor(struct real_descriptor *desc, unsigned base, unsigned limit,
+ unsigned char access, unsigned char sizebits)
+{
+ if (limit > 0xfffff)
+ {
+ limit >>= 12;
+ sizebits |= SZ_G;
+ }
+ desc->limit_low = limit & 0xffff;
+ desc->base_low = base & 0xffff;
+ desc->base_med = (base >> 16) & 0xff;
+ desc->access = access | ACC_P;
+ desc->limit_high = limit >> 16;
+ desc->granularity = sizebits;
+ desc->base_high = base >> 24;
+}
+
+/* Fill a gate with particular values. */
+MACH_INLINE void
+fill_gate(struct real_gate *gate, unsigned offset, unsigned short selector,
+ unsigned char access, unsigned char word_count)
+{
+ gate->offset_low = offset & 0xffff;
+ gate->selector = selector;
+ gate->word_count = word_count;
+ gate->access = access | ACC_P;
+ gate->offset_high = (offset >> 16) & 0xffff;
+}
+
+#endif !ASSEMBLER
+
+#endif /* _I386_SEG_H_ */
diff --git a/i386/i386/setjmp.h b/i386/i386/setjmp.h
new file mode 100644
index 00000000..21c856dc
--- /dev/null
+++ b/i386/i386/setjmp.h
@@ -0,0 +1,36 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Setjmp/longjmp buffer for i386.
+ */
+#ifndef _I386_SETJMP_H_
+#define _I386_SETJMP_H_
+
+typedef struct jmp_buf {
+ int jmp_buf[6]; /* ebx, esi, edi, ebp, esp, eip */
+} jmp_buf_t;
+
+#endif /* _I386_SETJMP_H_ */
diff --git a/i386/i386/spl.S b/i386/i386/spl.S
new file mode 100644
index 00000000..f77b5563
--- /dev/null
+++ b/i386/i386/spl.S
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 1995 Shantanu Goel
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE AUTHOR ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE AUTHOR DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+/*
+ * spl routines for the i386at.
+ */
+
+#include <mach/machine/asm.h>
+#include <i386/ipl.h>
+#include <i386/pic.h>
+
+/*
+ * Set IPL to the specified value.
+ *
+ * NOTE: Normally we would not have to enable interrupts
+ * here. Linux drivers, however, use cli()/sti(), so we must
+ * guard against the case where a Mach routine which
+ * has done an spl() calls a Linux routine that returns
+ * with interrupts disabled. A subsequent splx() can,
+ * potentially, return with interrupts disabled.
+ */
+#define SETIPL(level) \
+ movl $(level),%edx; \
+ cmpl EXT(curr_ipl),%edx; \
+ jne spl; \
+ sti; \
+ movl %edx,%eax; \
+ ret
+
+/*
+ * Program PICs with mask in %eax.
+ */
+#define SETMASK() \
+ cmpl EXT(curr_pic_mask),%eax; \
+ je 9f; \
+ outb %al,$(PIC_MASTER_OCW); \
+ movl %eax,EXT(curr_pic_mask); \
+ movb %ah,%al; \
+ outb %al,$(PIC_SLAVE_OCW); \
+9:
+
+ENTRY(spl0)
+ movl EXT(curr_ipl),%eax /* save current ipl */
+ pushl %eax
+ cli /* disable interrupts */
+#ifdef LINUX_DEV
+ movl EXT(bh_active),%eax
+ /* get pending mask */
+ andl EXT(bh_mask),%eax /* any pending unmasked interrupts? */
+ jz 1f /* no, skip */
+ call EXT(spl1) /* block further interrupts */
+ incl EXT(intr_count) /* set interrupt flag */
+ call EXT(linux_soft_intr) /* go handle interrupt */
+ decl EXT(intr_count) /* decrement interrupt flag */
+ cli /* disable interrupts */
+1:
+#endif
+ cmpl $0,softclkpending /* softclock pending? */
+ je 1f /* no, skip */
+ movl $0,softclkpending /* clear flag */
+ call EXT(spl1) /* block further interrupts */
+#ifdef LINUX_DEV
+ incl EXT(intr_count) /* set interrupt flag */
+#endif
+ call EXT(softclock) /* go handle interrupt */
+#ifdef LINUX_DEV
+ decl EXT(intr_count) /* decrement interrupt flag */
+#endif
+ cli /* disable interrupts */
+1:
+ cmpl $(SPL0),EXT(curr_ipl) /* are we at spl0? */
+ je 1f /* yes, all done */
+ movl $(SPL0),EXT(curr_ipl) /* set ipl */
+ movl EXT(pic_mask)+SPL0*4,%eax
+ /* get PIC mask */
+ SETMASK() /* program PICs with new mask */
+1:
+ sti /* enable interrupts */
+ popl %eax /* return previous mask */
+ ret
+
+Entry(splsoftclock)
+ENTRY(spl1)
+ SETIPL(SPL1)
+
+ENTRY(spl2)
+ SETIPL(SPL2)
+
+ENTRY(spl3)
+ SETIPL(SPL3)
+
+Entry(splnet)
+Entry(splhdw)
+ENTRY(spl4)
+ SETIPL(SPL4)
+
+Entry(splbio)
+Entry(spldcm)
+ENTRY(spl5)
+ SETIPL(SPL5)
+
+Entry(spltty)
+Entry(splimp)
+Entry(splvm)
+ENTRY(spl6)
+ SETIPL(SPL6)
+
+Entry(splclock)
+Entry(splsched)
+Entry(splhigh)
+Entry(splhi)
+ENTRY(spl7)
+ SETIPL(SPL7)
+
+ENTRY(splx)
+ movl 4(%esp),%edx /* get ipl */
+ testl %edx,%edx /* spl0? */
+ jz EXT(spl0) /* yes, handle specially */
+ cmpl EXT(curr_ipl),%edx /* same ipl as current? */
+ jne spl /* no */
+ sti /* ensure interrupts are enabled */
+ movl %edx,%eax /* return previous ipl */
+ ret
+
+/*
+ * Like splx() but returns with interrupts disabled and does
+ * not return the previous ipl. This should only be called
+ * when returning from an interrupt.
+ */
+ .align TEXT_ALIGN
+ .globl splx_cli
+splx_cli:
+ movl 4(%esp),%edx /* get ipl */
+ cli /* disable interrupts */
+ testl %edx,%edx /* spl0? */
+ jnz 2f /* no, skip */
+#ifdef LINUX_DEV
+ movl EXT(bh_active),%eax
+ /* get pending mask */
+ andl EXT(bh_mask),%eax /* any pending unmasked interrupts? */
+ jz 1f /* no, skip */
+ call EXT(spl1) /* block further interrupts */
+ incl EXT(intr_count) /* set interrupt flag */
+ call EXT(linux_soft_intr) /* go handle interrupt */
+ decl EXT(intr_count) /* decrement interrupt flag */
+ cli /* disable interrupts */
+1:
+#endif
+ cmpl $0,softclkpending /* softclock pending? */
+ je 1f /* no, skip */
+ movl $0,softclkpending /* clear flag */
+ call EXT(spl1) /* block further interrupts */
+#ifdef LINUX_DEV
+ incl EXT(intr_count) /* set interrupt flag */
+#endif
+ call EXT(softclock) /* go handle interrupt */
+#ifdef LINUX_DEV
+ decl EXT(intr_count) /* decrement interrupt flag */
+#endif
+ cli /* disable interrupts */
+1:
+ xorl %edx,%edx /* edx = ipl 0 */
+2:
+ cmpl EXT(curr_ipl),%edx /* same ipl as current? */
+ je 1f /* yes, all done */
+ movl %edx,EXT(curr_ipl) /* set ipl */
+ movl EXT(pic_mask)(,%edx,4),%eax
+ /* get PIC mask */
+ SETMASK() /* program PICs with new mask */
+1:
+ ret
+
+/*
+ * NOTE: This routine must *not* use %ecx, otherwise
+ * the interrupt code will break.
+ */
+ .align TEXT_ALIGN
+ .globl spl
+spl:
+ movl EXT(pic_mask)(,%edx,4),%eax
+ /* get PIC mask */
+ cli /* disable interrupts */
+ xchgl EXT(curr_ipl),%edx /* set ipl */
+ SETMASK() /* program PICs with new mask */
+ sti /* enable interrupts */
+ movl %edx,%eax /* return previous ipl */
+ ret
+
+ENTRY(sploff)
+ pushfl
+ popl %eax
+ cli
+ ret
+
+ENTRY(splon)
+ pushl 4(%esp)
+ popfl
+ ret
+
+ .data
+ .align DATA_ALIGN
+softclkpending:
+ .long 0
+ .text
+
+ENTRY(setsoftclock)
+ incl softclkpending
+ ret
diff --git a/i386/i386/spl.h b/i386/i386/spl.h
new file mode 100644
index 00000000..219ee9f2
--- /dev/null
+++ b/i386/i386/spl.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 1995, 1994, 1993, 1992, 1991, 1990
+ * Open Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of ("OSF") or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL OSF BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * ACTION OF CONTRACT, NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE
+ */
+/*
+ * OSF Research Institute MK6.1 (unencumbered) 1/31/1995
+ */
+
+#ifndef _MACHINE_SPL_H_
+#define _MACHINE_SPL_H_
+
+/*
+ * This file defines the interrupt priority levels used by
+ * machine-dependent code.
+ */
+
+typedef int spl_t;
+
+extern spl_t (splhi)(void);
+
+extern spl_t (spl1)(void);
+
+extern spl_t (spl2)(void);
+
+extern spl_t (spl3)(void);
+
+extern spl_t (spl4)(void);
+extern spl_t (splhdw)(void);
+
+extern spl_t (spl5)(void);
+extern spl_t (spldcm)(void);
+
+extern spl_t (spl6)(void);
+
+#endif /* _MACHINE_SPL_H_ */
diff --git a/i386/i386/thread.h b/i386/i386/thread.h
new file mode 100644
index 00000000..922427eb
--- /dev/null
+++ b/i386/i386/thread.h
@@ -0,0 +1,195 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: machine/thread.h
+ *
+ * This file contains the structure definitions for the thread
+ * state as applied to I386 processors.
+ */
+
+#ifndef _I386_THREAD_H_
+#define _I386_THREAD_H_
+
+#include <mach/boolean.h>
+#include <mach/machine/vm_types.h>
+#include <mach/machine/fp_reg.h>
+
+#include <kern/lock.h>
+
+#include <i386/iopb.h>
+#include <i386/tss.h>
+
+/*
+ * i386_saved_state:
+ *
+ * This structure corresponds to the state of user registers
+ * as saved upon kernel entry. It lives in the pcb.
+ * It is also pushed onto the stack for exceptions in the kernel.
+ */
+
+struct i386_saved_state {
+ unsigned int gs;
+ unsigned int fs;
+ unsigned int es;
+ unsigned int ds;
+ unsigned int edi;
+ unsigned int esi;
+ unsigned int ebp;
+ unsigned int cr2; /* kernel esp stored by pusha -
+ we save cr2 here later */
+ unsigned int ebx;
+ unsigned int edx;
+ unsigned int ecx;
+ unsigned int eax;
+ unsigned int trapno;
+ unsigned int err;
+ unsigned int eip;
+ unsigned int cs;
+ unsigned int efl;
+ unsigned int uesp;
+ unsigned int ss;
+ struct v86_segs {
+ unsigned int v86_es; /* virtual 8086 segment registers */
+ unsigned int v86_ds;
+ unsigned int v86_fs;
+ unsigned int v86_gs;
+ } v86_segs;
+};
+
+/*
+ * i386_exception_link:
+ *
+ * This structure lives at the high end of the kernel stack.
+ * It points to the current thread`s user registers.
+ */
+struct i386_exception_link {
+ struct i386_saved_state *saved_state;
+};
+
+/*
+ * i386_kernel_state:
+ *
+ * This structure corresponds to the state of kernel registers
+ * as saved in a context-switch. It lives at the base of the stack.
+ */
+
+struct i386_kernel_state {
+ int k_ebx; /* kernel context */
+ int k_esp;
+ int k_ebp;
+ int k_edi;
+ int k_esi;
+ int k_eip;
+};
+
+/*
+ * Save area for user floating-point state.
+ * Allocated only when necessary.
+ */
+
+struct i386_fpsave_state {
+ boolean_t fp_valid;
+ struct i386_fp_save fp_save_state;
+ struct i386_fp_regs fp_regs;
+};
+
+/*
+ * v86_assist_state:
+ *
+ * This structure provides data to simulate 8086 mode
+ * interrupts. It lives in the pcb.
+ */
+
+struct v86_assist_state {
+ vm_offset_t int_table;
+ unsigned short int_count;
+ unsigned short flags; /* 8086 flag bits */
+};
+#define V86_IF_PENDING 0x8000 /* unused bit */
+
+/*
+ * i386_interrupt_state:
+ *
+ * This structure describes the set of registers that must
+ * be pushed on the current ring-0 stack by an interrupt before
+ * we can switch to the interrupt stack.
+ */
+
+struct i386_interrupt_state {
+ int es;
+ int ds;
+ int edx;
+ int ecx;
+ int eax;
+ int eip;
+ int cs;
+ int efl;
+};
+
+/*
+ * i386_machine_state:
+ *
+ * This structure corresponds to special machine state.
+ * It lives in the pcb. It is not saved by default.
+ */
+
+struct i386_machine_state {
+ iopb_tss_t io_tss;
+ struct user_ldt * ldt;
+ struct i386_fpsave_state *ifps;
+ struct v86_assist_state v86s;
+};
+
+typedef struct pcb {
+ struct i386_interrupt_state iis[2]; /* interrupt and NMI */
+ struct i386_saved_state iss;
+ struct i386_machine_state ims;
+ decl_simple_lock_data(, lock)
+} *pcb_t;
+
+/*
+ * On the kernel stack is:
+ * stack: ...
+ * struct i386_exception_link
+ * struct i386_kernel_state
+ * stack+KERNEL_STACK_SIZE
+ */
+
+#define STACK_IKS(stack) \
+ ((struct i386_kernel_state *)((stack) + KERNEL_STACK_SIZE) - 1)
+#define STACK_IEL(stack) \
+ ((struct i386_exception_link *)STACK_IKS(stack) - 1)
+
+#define USER_REGS(thread) (&(thread)->pcb->iss)
+
+
+#define syscall_emulation_sync(task) /* do nothing */
+
+
+/* #include_next "thread.h" */
+
+
+#endif _I386_THREAD_H_
diff --git a/i386/i386/time_stamp.h b/i386/i386/time_stamp.h
new file mode 100644
index 00000000..43bb956b
--- /dev/null
+++ b/i386/i386/time_stamp.h
@@ -0,0 +1,30 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * The i386 timestamp implementation uses the default, so we don't
+ * need to do anything here.
+ */
+
diff --git a/i386/i386/timer.h b/i386/i386/timer.h
new file mode 100644
index 00000000..b74965df
--- /dev/null
+++ b/i386/i386/timer.h
@@ -0,0 +1,71 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_TIMER_H_
+#define _I386_TIMER_H_
+
+/*
+ * Machine dependent timer definitions.
+ */
+
+#include <platforms.h>
+
+#ifdef SYMMETRY
+
+/*
+ * TIMER_MAX is not used on the Sequent because a 32-bit rollover
+ * timer does not need to be adjusted for maximum value.
+ */
+
+/*
+ * TIMER_RATE is the rate of the timer in ticks per second.
+ * It is used to calculate percent cpu usage.
+ */
+
+#define TIMER_RATE 1000000
+
+/*
+ * TIMER_HIGH_UNIT is the unit for high_bits in terms of low_bits.
+ * Setting it to TIMER_RATE makes the high unit seconds.
+ */
+
+#define TIMER_HIGH_UNIT TIMER_RATE
+
+/*
+ * TIMER_ADJUST is used to adjust the value of a timer after
+ * it has been copied into a time_value_t. No adjustment is needed
+ * on Sequent because high_bits is in seconds.
+ */
+
+/*
+ * MACHINE_TIMER_ROUTINES should defined if the timer routines are
+ * implemented in machine-dependent code (e.g. assembly language).
+ */
+#define MACHINE_TIMER_ROUTINES
+
+#endif
+
+#endif /* _I386_TIMER_H_ */
diff --git a/i386/i386/trap.c b/i386/i386/trap.c
new file mode 100644
index 00000000..6096a39f
--- /dev/null
+++ b/i386/i386/trap.c
@@ -0,0 +1,1139 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Hardware trap/fault handler.
+ */
+
+#include <cpus.h>
+#include <fpe.h>
+#include <mach_kdb.h>
+#include <mach_ttd.h>
+#include <mach_pcsample.h>
+
+#include <sys/types.h>
+#include <mach/machine/eflags.h>
+#include <i386/trap.h>
+#include <machine/machspl.h> /* for spl_t */
+
+#include <mach/exception.h>
+#include <mach/kern_return.h>
+#include "vm_param.h"
+#include <mach/machine/thread_status.h>
+
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+
+#include <kern/ast.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+
+#include <i386/io_emulate.h>
+
+#include "debug.h"
+
+extern void exception();
+extern void thread_exception_return();
+
+extern void i386_exception();
+
+#if MACH_KDB
+boolean_t debug_all_traps_with_kdb = FALSE;
+extern struct db_watchpoint *db_watchpoint_list;
+extern boolean_t db_watchpoints_inserted;
+
+void
+thread_kdb_return()
+{
+ register thread_t thread = current_thread();
+ register struct i386_saved_state *regs = USER_REGS(thread);
+
+ if (kdb_trap(regs->trapno, regs->err, regs)) {
+ thread_exception_return();
+ /*NOTREACHED*/
+ }
+}
+#endif MACH_KDB
+
+#if MACH_TTD
+extern boolean_t kttd_enabled;
+boolean_t debug_all_traps_with_kttd = TRUE;
+#endif MACH_TTD
+
+void
+user_page_fault_continue(kr)
+ kern_return_t kr;
+{
+ register thread_t thread = current_thread();
+ register struct i386_saved_state *regs = USER_REGS(thread);
+
+ if (kr == KERN_SUCCESS) {
+#if MACH_KDB
+ if (db_watchpoint_list &&
+ db_watchpoints_inserted &&
+ (regs->err & T_PF_WRITE) &&
+ db_find_watchpoint(thread->task->map,
+ (vm_offset_t)regs->cr2,
+ regs))
+ kdb_trap(T_WATCHPOINT, 0, regs);
+#endif MACH_KDB
+ thread_exception_return();
+ /*NOTREACHED*/
+ }
+
+#if MACH_KDB
+ if (debug_all_traps_with_kdb &&
+ kdb_trap(regs->trapno, regs->err, regs)) {
+ thread_exception_return();
+ /*NOTREACHED*/
+ }
+#endif MACH_KDB
+
+ i386_exception(EXC_BAD_ACCESS, kr, regs->cr2);
+ /*NOTREACHED*/
+}
+
+/*
+ * Fault recovery in copyin/copyout routines.
+ */
+struct recovery {
+ int fault_addr;
+ int recover_addr;
+};
+
+extern struct recovery recover_table[];
+extern struct recovery recover_table_end[];
+
+/*
+ * Recovery from Successful fault in copyout does not
+ * return directly - it retries the pte check, since
+ * the 386 ignores write protection in kernel mode.
+ */
+extern struct recovery retry_table[];
+extern struct recovery retry_table_end[];
+
+
+static char *trap_type[] = {
+ "Divide error",
+ "Debug trap",
+ "NMI",
+ "Breakpoint",
+ "Overflow",
+ "Bounds check",
+ "Invalid opcode",
+ "No coprocessor",
+ "Double fault",
+ "Coprocessor overrun",
+ "Invalid TSS",
+ "Segment not present",
+ "Stack bounds",
+ "General protection",
+ "Page fault",
+ "(reserved)",
+ "Coprocessor error"
+};
+#define TRAP_TYPES (sizeof(trap_type)/sizeof(trap_type[0]))
+
+char *trap_name(unsigned int trapnum)
+{
+ return trapnum < TRAP_TYPES ? trap_type[trapnum] : "(unknown)";
+}
+
+
+boolean_t brb = TRUE;
+
+/*
+ * Trap from kernel mode. Only page-fault errors are recoverable,
+ * and then only in special circumstances. All other errors are
+ * fatal.
+ */
+void kernel_trap(regs)
+ register struct i386_saved_state *regs;
+{
+ int exc;
+ int code;
+ int subcode;
+ register int type;
+ vm_map_t map;
+ kern_return_t result;
+ register thread_t thread;
+ extern char start[], etext[];
+
+ type = regs->trapno;
+ code = regs->err;
+ thread = current_thread();
+
+#if 0
+((short*)0xb8700)[0] = 0x0f00+'K';
+((short*)0xb8700)[1] = 0x0f30+(type / 10);
+((short*)0xb8700)[2] = 0x0f30+(type % 10);
+#endif
+#if 0
+printf("kernel trap %d error %d\n", type, code);
+dump_ss(regs);
+#endif
+
+ switch (type) {
+ case T_NO_FPU:
+ fpnoextflt();
+ return;
+
+ case T_FPU_FAULT:
+ fpextovrflt();
+ return;
+
+ case T_FLOATING_POINT_ERROR:
+ fpexterrflt();
+ return;
+
+ case T_PAGE_FAULT:
+
+ /* Get faulting linear address */
+ subcode = regs->cr2;
+#if 0
+ printf("kernel page fault at linear address %08x\n", subcode);
+#endif
+
+ /* If it's in the kernel linear address region,
+ convert it to a kernel virtual address
+ and use the kernel map to process the fault. */
+ if (subcode >= LINEAR_MIN_KERNEL_ADDRESS) {
+#if 0
+ printf("%08x in kernel linear address range\n", subcode);
+#endif
+ map = kernel_map;
+ subcode = lintokv(subcode);
+#if 0
+ printf("now %08x\n", subcode);
+#endif
+ if (trunc_page(subcode) == 0
+ || (subcode >= (int)start
+ && subcode < (int)etext)) {
+ printf("Kernel page fault at address 0x%x, "
+ "eip = 0x%x\n",
+ subcode, regs->eip);
+ goto badtrap;
+ }
+ } else {
+ assert(thread);
+ map = thread->task->map;
+ if (map == kernel_map) {
+ printf("kernel page fault at %08x:\n");
+ dump_ss(regs);
+ panic("kernel thread accessed user space!\n");
+ }
+ }
+
+ /*
+ * Since the 386 ignores write protection in
+ * kernel mode, always try for write permission
+ * first. If that fails and the fault was a
+ * read fault, retry with read permission.
+ */
+ result = vm_fault(map,
+ trunc_page((vm_offset_t)subcode),
+ VM_PROT_READ|VM_PROT_WRITE,
+ FALSE,
+ FALSE,
+ (void (*)()) 0);
+#if MACH_KDB
+ if (result == KERN_SUCCESS) {
+ /* Look for watchpoints */
+ if (db_watchpoint_list &&
+ db_watchpoints_inserted &&
+ (code & T_PF_WRITE) &&
+ db_find_watchpoint(map,
+ (vm_offset_t)subcode, regs))
+ kdb_trap(T_WATCHPOINT, 0, regs);
+ }
+ else
+#endif MACH_KDB
+ if ((code & T_PF_WRITE) == 0 &&
+ result == KERN_PROTECTION_FAILURE)
+ {
+ /*
+ * Must expand vm_fault by hand,
+ * so that we can ask for read-only access
+ * but enter a (kernel)writable mapping.
+ */
+ result = intel_read_fault(map,
+ trunc_page((vm_offset_t)subcode));
+ }
+
+ if (result == KERN_SUCCESS) {
+ /*
+ * Certain faults require that we back up
+ * the EIP.
+ */
+ register struct recovery *rp;
+
+ for (rp = retry_table; rp < retry_table_end; rp++) {
+ if (regs->eip == rp->fault_addr) {
+ regs->eip = rp->recover_addr;
+ break;
+ }
+ }
+ return;
+ }
+
+ /*
+ * If there is a failure recovery address
+ * for this fault, go there.
+ */
+ {
+ register struct recovery *rp;
+
+ for (rp = recover_table;
+ rp < recover_table_end;
+ rp++) {
+ if (regs->eip == rp->fault_addr) {
+ regs->eip = rp->recover_addr;
+ return;
+ }
+ }
+ }
+
+ /*
+ * Check thread recovery address also -
+ * v86 assist uses it.
+ */
+ if (thread->recover) {
+ regs->eip = thread->recover;
+ thread->recover = 0;
+ return;
+ }
+
+ /*
+ * Unanticipated page-fault errors in kernel
+ * should not happen.
+ */
+ /* fall through */
+
+ default:
+ badtrap:
+ printf("Kernel ");
+ if (type < TRAP_TYPES)
+ printf("%s trap", trap_type[type]);
+ else
+ printf("trap %d", type);
+ printf(", eip 0x%x\n", regs->eip);
+#if MACH_TTD
+ if (kttd_enabled && kttd_trap(type, code, regs))
+ return;
+#endif /* MACH_TTD */
+#if MACH_KDB
+ if (kdb_trap(type, code, regs))
+ return;
+#endif MACH_KDB
+ splhigh();
+ printf("kernel trap, type %d, code = %x\n",
+ type, code);
+ dump_ss(regs);
+ panic("trap");
+ return;
+ }
+}
+
+
+/*
+ * Trap from user mode.
+ * Return TRUE if from emulated system call.
+ */
+int user_trap(regs)
+ register struct i386_saved_state *regs;
+{
+ int exc;
+ int code;
+ int subcode;
+ register int type;
+ vm_map_t map;
+ kern_return_t result;
+ register thread_t thread = current_thread();
+ extern vm_offset_t phys_last_addr;
+
+ if ((vm_offset_t)thread < phys_last_addr) {
+ printf("user_trap: bad thread pointer 0x%x\n", thread);
+ printf("trap type %d, code 0x%x, va 0x%x, eip 0x%x\n",
+ regs->trapno, regs->err, regs->cr2, regs->eip);
+ asm volatile ("1: hlt; jmp 1b");
+ }
+#if 0
+printf("user trap %d error %d sub %08x\n", type, code, subcode);
+#endif
+
+ if (regs->efl & EFL_VM) {
+ /*
+ * If hardware assist can handle exception,
+ * continue execution.
+ */
+ if (v86_assist(thread, regs))
+ return 0;
+ }
+
+ type = regs->trapno;
+ code = 0;
+ subcode = 0;
+
+#if 0
+ ((short*)0xb8700)[3] = 0x0f00+'U';
+ ((short*)0xb8700)[4] = 0x0f30+(type / 10);
+ ((short*)0xb8700)[5] = 0x0f30+(type % 10);
+#endif
+#if 0
+ printf("user trap %d error %d\n", type, code);
+ dump_ss(regs);
+#endif
+
+ switch (type) {
+
+ case T_DIVIDE_ERROR:
+ exc = EXC_ARITHMETIC;
+ code = EXC_I386_DIV;
+ break;
+
+ case T_DEBUG:
+#if MACH_TTD
+ if (kttd_enabled && kttd_in_single_step()) {
+ if (kttd_trap(type, regs->err, regs))
+ return 0;
+ }
+#endif /* MACH_TTD */
+#if MACH_KDB
+ if (db_in_single_step()) {
+ if (kdb_trap(type, regs->err, regs))
+ return 0;
+ }
+#endif
+ exc = EXC_BREAKPOINT;
+ code = EXC_I386_SGL;
+ break;
+
+ case T_INT3:
+#if MACH_TTD
+ if (kttd_enabled && kttd_trap(type, regs->err, regs))
+ return 0;
+ break;
+#endif /* MACH_TTD */
+#if MACH_KDB
+ {
+ boolean_t db_find_breakpoint_here();
+
+ if (db_find_breakpoint_here(
+ (current_thread())? current_thread()->task: TASK_NULL,
+ regs->eip - 1)) {
+ if (kdb_trap(type, regs->err, regs))
+ return 0;
+ }
+ }
+#endif
+ exc = EXC_BREAKPOINT;
+ code = EXC_I386_BPT;
+ break;
+
+ case T_OVERFLOW:
+ exc = EXC_ARITHMETIC;
+ code = EXC_I386_INTO;
+ break;
+
+ case T_OUT_OF_BOUNDS:
+ exc = EXC_SOFTWARE;
+ code = EXC_I386_BOUND;
+ break;
+
+ case T_INVALID_OPCODE:
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_INVOP;
+ break;
+
+ case T_NO_FPU:
+ case 32: /* XXX */
+ fpnoextflt();
+ return 0;
+
+ case T_FPU_FAULT:
+ fpextovrflt();
+ return 0;
+
+ case 10: /* invalid TSS == iret with NT flag set */
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_INVTSSFLT;
+ subcode = regs->err & 0xffff;
+ break;
+
+ case T_SEGMENT_NOT_PRESENT:
+#if FPE
+ if (fp_emul_error(regs))
+ return 0;
+#endif /* FPE */
+
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_SEGNPFLT;
+ subcode = regs->err & 0xffff;
+ break;
+
+ case T_STACK_FAULT:
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_STKFLT;
+ subcode = regs->err & 0xffff;
+ break;
+
+ case T_GENERAL_PROTECTION:
+ if (!(regs->efl & EFL_VM)) {
+ if (check_io_fault(regs))
+ return 0;
+ }
+ /* Check for an emulated int80 system call.
+ NetBSD-current and Linux use trap instead of call gate. */
+ if (thread->task->eml_dispatch) {
+ unsigned char opcode, intno;
+
+ opcode = inst_fetch(regs->eip, regs->cs);
+ intno = inst_fetch(regs->eip+1, regs->cs);
+ if (opcode == 0xcd && intno == 0x80) {
+ regs->eip += 2;
+ return 1;
+ }
+ }
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_GPFLT;
+ subcode = regs->err & 0xffff;
+ break;
+
+ case T_PAGE_FAULT:
+#if 0
+ printf("user page fault at linear address %08x\n", subcode);
+#endif
+ assert(subcode < LINEAR_MIN_KERNEL_ADDRESS);
+ subcode = regs->cr2;
+ (void) vm_fault(thread->task->map,
+ trunc_page((vm_offset_t)subcode),
+ (regs->err & T_PF_WRITE)
+ ? VM_PROT_READ|VM_PROT_WRITE
+ : VM_PROT_READ,
+ FALSE,
+ FALSE,
+ user_page_fault_continue);
+ /*NOTREACHED*/
+ break;
+
+ case T_FLOATING_POINT_ERROR:
+ fpexterrflt();
+ return 0;
+
+ default:
+#if MACH_TTD
+ if (kttd_enabled && kttd_trap(type, regs->err, regs))
+ return 0;
+#endif /* MACH_TTD */
+#if MACH_KDB
+ if (kdb_trap(type, regs->err, regs))
+ return 0;
+#endif MACH_KDB
+ splhigh();
+ printf("user trap, type %d, code = %x\n",
+ type, regs->err);
+ dump_ss(regs);
+ panic("trap");
+ return 0;
+ }
+
+#if MACH_TTD
+ if (debug_all_traps_with_kttd && kttd_trap(type, regs->err, regs))
+ return 0;
+#endif /* MACH_TTD */
+#if MACH_KDB
+ if (debug_all_traps_with_kdb &&
+ kdb_trap(type, regs->err, regs))
+ return 0;
+#endif MACH_KDB
+
+ i386_exception(exc, code, subcode);
+ /*NOTREACHED*/
+}
+
+/*
+ * V86 mode assist for interrupt handling.
+ */
+boolean_t v86_assist_on = TRUE;
+boolean_t v86_unsafe_ok = FALSE;
+boolean_t v86_do_sti_cli = TRUE;
+boolean_t v86_do_sti_immediate = FALSE;
+
+#define V86_IRET_PENDING 0x4000
+
+int cli_count = 0;
+int sti_count = 0;
+
+boolean_t
+v86_assist(thread, regs)
+ thread_t thread;
+ register struct i386_saved_state *regs;
+{
+ register struct v86_assist_state *v86 = &thread->pcb->ims.v86s;
+
+/*
+ * Build an 8086 address. Use only when off is known to be 16 bits.
+ */
+#define Addr8086(seg,off) ((((seg) & 0xffff) << 4) + (off))
+
+#define EFL_V86_SAFE ( EFL_OF | EFL_DF | EFL_TF \
+ | EFL_SF | EFL_ZF | EFL_AF \
+ | EFL_PF | EFL_CF )
+ struct iret_32 {
+ int eip;
+ int cs;
+ int eflags;
+ };
+ struct iret_16 {
+ unsigned short ip;
+ unsigned short cs;
+ unsigned short flags;
+ };
+ union iret_struct {
+ struct iret_32 iret_32;
+ struct iret_16 iret_16;
+ };
+
+ struct int_vec {
+ unsigned short ip;
+ unsigned short cs;
+ };
+
+ if (!v86_assist_on)
+ return FALSE;
+
+ /*
+ * If delayed STI pending, enable interrupts.
+ * Turn off tracing if on only to delay STI.
+ */
+ if (v86->flags & V86_IF_PENDING) {
+ v86->flags &= ~V86_IF_PENDING;
+ v86->flags |= EFL_IF;
+ if ((v86->flags & EFL_TF) == 0)
+ regs->efl &= ~EFL_TF;
+ }
+
+ if (regs->trapno == T_DEBUG) {
+
+ if (v86->flags & EFL_TF) {
+ /*
+ * Trace flag was also set - it has priority
+ */
+ return FALSE; /* handle as single-step */
+ }
+ /*
+ * Fall through to check for interrupts.
+ */
+ }
+ else if (regs->trapno == T_GENERAL_PROTECTION) {
+ /*
+ * General protection error - must be an 8086 instruction
+ * to emulate.
+ */
+ register int eip;
+ boolean_t addr_32 = FALSE;
+ boolean_t data_32 = FALSE;
+ int io_port;
+
+ /*
+ * Set up error handler for bad instruction/data
+ * fetches.
+ */
+ asm("movl $(addr_error), %0" : "=m" (thread->recover));
+
+ eip = regs->eip;
+ while (TRUE) {
+ unsigned char opcode;
+
+ if (eip > 0xFFFF) {
+ thread->recover = 0;
+ return FALSE; /* GP fault: IP out of range */
+ }
+
+ opcode = *(unsigned char *)Addr8086(regs->cs,eip);
+ eip++;
+ switch (opcode) {
+ case 0xf0: /* lock */
+ case 0xf2: /* repne */
+ case 0xf3: /* repe */
+ case 0x2e: /* cs */
+ case 0x36: /* ss */
+ case 0x3e: /* ds */
+ case 0x26: /* es */
+ case 0x64: /* fs */
+ case 0x65: /* gs */
+ /* ignore prefix */
+ continue;
+
+ case 0x66: /* data size */
+ data_32 = TRUE;
+ continue;
+
+ case 0x67: /* address size */
+ addr_32 = TRUE;
+ continue;
+
+ case 0xe4: /* inb imm */
+ case 0xe5: /* inw imm */
+ case 0xe6: /* outb imm */
+ case 0xe7: /* outw imm */
+ io_port = *(unsigned char *)Addr8086(regs->cs, eip);
+ eip++;
+ goto do_in_out;
+
+ case 0xec: /* inb dx */
+ case 0xed: /* inw dx */
+ case 0xee: /* outb dx */
+ case 0xef: /* outw dx */
+ case 0x6c: /* insb */
+ case 0x6d: /* insw */
+ case 0x6e: /* outsb */
+ case 0x6f: /* outsw */
+ io_port = regs->edx & 0xffff;
+
+ do_in_out:
+ if (!data_32)
+ opcode |= 0x6600; /* word IO */
+
+ switch (emulate_io(regs, opcode, io_port)) {
+ case EM_IO_DONE:
+ /* instruction executed */
+ break;
+ case EM_IO_RETRY:
+ /* port mapped, retry instruction */
+ thread->recover = 0;
+ return TRUE;
+ case EM_IO_ERROR:
+ /* port not mapped */
+ thread->recover = 0;
+ return FALSE;
+ }
+ break;
+
+ case 0xfa: /* cli */
+ if (!v86_do_sti_cli) {
+ thread->recover = 0;
+ return (FALSE);
+ }
+
+ v86->flags &= ~EFL_IF;
+ /* disable simulated interrupts */
+ cli_count++;
+ break;
+
+ case 0xfb: /* sti */
+ if (!v86_do_sti_cli) {
+ thread->recover = 0;
+ return (FALSE);
+ }
+
+ if ((v86->flags & EFL_IF) == 0) {
+ if (v86_do_sti_immediate) {
+ v86->flags |= EFL_IF;
+ } else {
+ v86->flags |= V86_IF_PENDING;
+ regs->efl |= EFL_TF;
+ }
+ /* single step to set IF next inst. */
+ }
+ sti_count++;
+ break;
+
+ case 0x9c: /* pushf */
+ {
+ int flags;
+ vm_offset_t sp;
+ int size;
+
+ flags = regs->efl;
+ if ((v86->flags & EFL_IF) == 0)
+ flags &= ~EFL_IF;
+
+ if ((v86->flags & EFL_TF) == 0)
+ flags &= ~EFL_TF;
+ else flags |= EFL_TF;
+
+ sp = regs->uesp;
+ if (!addr_32)
+ sp &= 0xffff;
+ else if (sp > 0xffff)
+ goto stack_error;
+ size = (data_32) ? 4 : 2;
+ if (sp < size)
+ goto stack_error;
+ sp -= size;
+ if (copyout((char *)&flags,
+ (char *)Addr8086(regs->ss,sp),
+ size))
+ goto addr_error;
+ if (addr_32)
+ regs->uesp = sp;
+ else
+ regs->uesp = (regs->uesp & 0xffff0000) | sp;
+ break;
+ }
+
+ case 0x9d: /* popf */
+ {
+ vm_offset_t sp;
+ int nflags;
+
+ sp = regs->uesp;
+ if (!addr_32)
+ sp &= 0xffff;
+ else if (sp > 0xffff)
+ goto stack_error;
+
+ if (data_32) {
+ if (sp > 0xffff - sizeof(int))
+ goto stack_error;
+ nflags = *(int *)Addr8086(regs->ss,sp);
+ sp += sizeof(int);
+ }
+ else {
+ if (sp > 0xffff - sizeof(short))
+ goto stack_error;
+ nflags = *(unsigned short *)
+ Addr8086(regs->ss,sp);
+ sp += sizeof(short);
+ }
+ if (addr_32)
+ regs->uesp = sp;
+ else
+ regs->uesp = (regs->uesp & 0xffff0000) | sp;
+
+ if (v86->flags & V86_IRET_PENDING) {
+ v86->flags = nflags & (EFL_TF | EFL_IF);
+ v86->flags |= V86_IRET_PENDING;
+ } else {
+ v86->flags = nflags & (EFL_TF | EFL_IF);
+ }
+ regs->efl = (regs->efl & ~EFL_V86_SAFE)
+ | (nflags & EFL_V86_SAFE);
+ break;
+ }
+ case 0xcf: /* iret */
+ {
+ vm_offset_t sp;
+ int nflags;
+ int size;
+ union iret_struct iret_struct;
+
+ v86->flags &= ~V86_IRET_PENDING;
+ sp = regs->uesp;
+ if (!addr_32)
+ sp &= 0xffff;
+ else if (sp > 0xffff)
+ goto stack_error;
+
+ if (data_32) {
+ if (sp > 0xffff - sizeof(struct iret_32))
+ goto stack_error;
+ iret_struct.iret_32 =
+ *(struct iret_32 *) Addr8086(regs->ss,sp);
+ sp += sizeof(struct iret_32);
+ }
+ else {
+ if (sp > 0xffff - sizeof(struct iret_16))
+ goto stack_error;
+ iret_struct.iret_16 =
+ *(struct iret_16 *) Addr8086(regs->ss,sp);
+ sp += sizeof(struct iret_16);
+ }
+ if (addr_32)
+ regs->uesp = sp;
+ else
+ regs->uesp = (regs->uesp & 0xffff0000) | sp;
+
+ if (data_32) {
+ eip = iret_struct.iret_32.eip;
+ regs->cs = iret_struct.iret_32.cs & 0xffff;
+ nflags = iret_struct.iret_32.eflags;
+ }
+ else {
+ eip = iret_struct.iret_16.ip;
+ regs->cs = iret_struct.iret_16.cs;
+ nflags = iret_struct.iret_16.flags;
+ }
+
+ v86->flags = nflags & (EFL_TF | EFL_IF);
+ regs->efl = (regs->efl & ~EFL_V86_SAFE)
+ | (nflags & EFL_V86_SAFE);
+ break;
+ }
+ default:
+ /*
+ * Instruction not emulated here.
+ */
+ thread->recover = 0;
+ return FALSE;
+ }
+ break; /* exit from 'while TRUE' */
+ }
+ regs->eip = (regs->eip & 0xffff0000 | eip);
+ }
+ else {
+ /*
+ * Not a trap we handle.
+ */
+ thread->recover = 0;
+ return FALSE;
+ }
+
+ if ((v86->flags & EFL_IF) && ((v86->flags & V86_IRET_PENDING)==0)) {
+
+ struct v86_interrupt_table *int_table;
+ int int_count;
+ int vec;
+ int i;
+
+ int_table = (struct v86_interrupt_table *) v86->int_table;
+ int_count = v86->int_count;
+
+ vec = 0;
+ for (i = 0; i < int_count; int_table++, i++) {
+ if (!int_table->mask && int_table->count > 0) {
+ int_table->count--;
+ vec = int_table->vec;
+ break;
+ }
+ }
+ if (vec != 0) {
+ /*
+ * Take this interrupt
+ */
+ vm_offset_t sp;
+ struct iret_16 iret_16;
+ struct int_vec int_vec;
+
+ sp = regs->uesp & 0xffff;
+ if (sp < sizeof(struct iret_16))
+ goto stack_error;
+ sp -= sizeof(struct iret_16);
+ iret_16.ip = regs->eip;
+ iret_16.cs = regs->cs;
+ iret_16.flags = regs->efl & 0xFFFF;
+ if ((v86->flags & EFL_TF) == 0)
+ iret_16.flags &= ~EFL_TF;
+ else iret_16.flags |= EFL_TF;
+
+#ifdef gcc_1_36_worked
+ int_vec = ((struct int_vec *)0)[vec];
+#else
+ bcopy((char *) (sizeof(struct int_vec) * vec),
+ (char *)&int_vec,
+ sizeof (struct int_vec));
+#endif
+ if (copyout((char *)&iret_16,
+ (char *)Addr8086(regs->ss,sp),
+ sizeof(struct iret_16)))
+ goto addr_error;
+ regs->uesp = (regs->uesp & 0xFFFF0000) | (sp & 0xffff);
+ regs->eip = int_vec.ip;
+ regs->cs = int_vec.cs;
+ regs->efl &= ~EFL_TF;
+ v86->flags &= ~(EFL_IF | EFL_TF);
+ v86->flags |= V86_IRET_PENDING;
+ }
+ }
+
+ thread->recover = 0;
+ return TRUE;
+
+ /*
+ * On address error, report a page fault.
+ * XXX report GP fault - we don`t save
+ * the faulting address.
+ */
+ addr_error:
+ asm("addr_error:;");
+ thread->recover = 0;
+ return FALSE;
+
+ /*
+ * On stack address error, return stack fault (12).
+ */
+ stack_error:
+ thread->recover = 0;
+ regs->trapno = T_STACK_FAULT;
+ return FALSE;
+}
+
+/*
+ * Handle AST traps for i386.
+ * Check for delayed floating-point exception from
+ * AT-bus machines.
+ */
+void
+i386_astintr()
+{
+ int mycpu = cpu_number();
+
+ (void) splsched(); /* block interrupts to check reasons */
+ if (need_ast[mycpu] & AST_I386_FP) {
+ /*
+ * AST was for delayed floating-point exception -
+ * FP interrupt occured while in kernel.
+ * Turn off this AST reason and handle the FPU error.
+ */
+ ast_off(mycpu, AST_I386_FP);
+ (void) spl0();
+
+ fpexterrflt();
+ }
+ else {
+ /*
+ * Not an FPU trap. Handle the AST.
+ * Interrupts are still blocked.
+ */
+ ast_taken();
+ }
+}
+
+/*
+ * Handle exceptions for i386.
+ *
+ * If we are an AT bus machine, we must turn off the AST for a
+ * delayed floating-point exception.
+ *
+ * If we are providing floating-point emulation, we may have
+ * to retrieve the real register values from the floating point
+ * emulator.
+ */
+void
+i386_exception(exc, code, subcode)
+ int exc;
+ int code;
+ int subcode;
+{
+ spl_t s;
+
+ /*
+ * Turn off delayed FPU error handling.
+ */
+ s = splsched();
+ ast_off(cpu_number(), AST_I386_FP);
+ splx(s);
+
+#if FPE
+ fpe_exception_fixup(exc, code, subcode);
+#else
+ exception(exc, code, subcode);
+#endif
+ /*NOTREACHED*/
+}
+
+boolean_t
+check_io_fault(regs)
+ struct i386_saved_state *regs;
+{
+ int eip, opcode, io_port;
+ boolean_t data_16 = FALSE;
+
+ /*
+ * Get the instruction.
+ */
+ eip = regs->eip;
+
+ for (;;) {
+ opcode = inst_fetch(eip, regs->cs);
+ eip++;
+ switch (opcode) {
+ case 0x66: /* data-size prefix */
+ data_16 = TRUE;
+ continue;
+
+ case 0xf3: /* rep prefix */
+ case 0x26: /* es */
+ case 0x2e: /* cs */
+ case 0x36: /* ss */
+ case 0x3e: /* ds */
+ case 0x64: /* fs */
+ case 0x65: /* gs */
+ continue;
+
+ case 0xE4: /* inb imm */
+ case 0xE5: /* inl imm */
+ case 0xE6: /* outb imm */
+ case 0xE7: /* outl imm */
+ /* port is immediate byte */
+ io_port = inst_fetch(eip, regs->cs);
+ eip++;
+ break;
+
+ case 0xEC: /* inb dx */
+ case 0xED: /* inl dx */
+ case 0xEE: /* outb dx */
+ case 0xEF: /* outl dx */
+ case 0x6C: /* insb */
+ case 0x6D: /* insl */
+ case 0x6E: /* outsb */
+ case 0x6F: /* outsl */
+ /* port is in DX register */
+ io_port = regs->edx & 0xFFFF;
+ break;
+
+ default:
+ return FALSE;
+ }
+ break;
+ }
+
+ if (data_16)
+ opcode |= 0x6600; /* word IO */
+
+ switch (emulate_io(regs, opcode, io_port)) {
+ case EM_IO_DONE:
+ /* instruction executed */
+ regs->eip = eip;
+ return TRUE;
+
+ case EM_IO_RETRY:
+ /* port mapped, retry instruction */
+ return TRUE;
+
+ case EM_IO_ERROR:
+ /* port not mapped */
+ return FALSE;
+ }
+}
+
+#if MACH_PCSAMPLE > 0
+/*
+ * return saved state for interrupted user thread
+ */
+unsigned
+interrupted_pc(t)
+ thread_t t;
+{
+ register struct i386_saved_state *iss;
+
+ iss = USER_REGS(t);
+ return iss->eip;
+}
+#endif /* MACH_PCSAMPLE > 0*/
+
diff --git a/i386/i386/trap.h b/i386/i386/trap.h
new file mode 100644
index 00000000..f4dcbd57
--- /dev/null
+++ b/i386/i386/trap.h
@@ -0,0 +1,38 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_TRAP_H_
+#define _I386_TRAP_H_
+
+#include <mach/machine/trap.h>
+
+#ifndef ASSEMBLER
+
+char *trap_name(unsigned int trapnum);
+
+#endif !ASSEMBLER
+
+#endif _I386_TRAP_H_
diff --git a/i386/i386/tss.h b/i386/i386/tss.h
new file mode 100644
index 00000000..0d02f703
--- /dev/null
+++ b/i386/i386/tss.h
@@ -0,0 +1,76 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_TSS_H_
+#define _I386_TSS_H_
+
+#include <mach/inline.h>
+
+/*
+ * i386 Task State Segment
+ */
+struct i386_tss {
+ int back_link; /* segment number of previous task,
+ if nested */
+ int esp0; /* initial stack pointer ... */
+ int ss0; /* and segment for ring 0 */
+ int esp1; /* initial stack pointer ... */
+ int ss1; /* and segment for ring 1 */
+ int esp2; /* initial stack pointer ... */
+ int ss2; /* and segment for ring 2 */
+ int cr3; /* CR3 - page table directory
+ physical address */
+ int eip;
+ int eflags;
+ int eax;
+ int ecx;
+ int edx;
+ int ebx;
+ int esp; /* current stack pointer */
+ int ebp;
+ int esi;
+ int edi;
+ int es;
+ int cs;
+ int ss; /* current stack segment */
+ int ds;
+ int fs;
+ int gs;
+ int ldt; /* local descriptor table segment */
+ unsigned short trace_trap; /* trap on switch to this task */
+ unsigned short io_bit_map_offset;
+ /* offset to start of IO permission
+ bit map */
+};
+
+/* Load the current task register. */
+MACH_INLINE void
+ltr(unsigned short segment)
+{
+ __asm volatile("ltr %0" : : "r" (segment));
+}
+
+#endif /* _I386_TSS_H_ */
diff --git a/i386/i386/user_ldt.c b/i386/i386/user_ldt.c
new file mode 100644
index 00000000..71ca08da
--- /dev/null
+++ b/i386/i386/user_ldt.c
@@ -0,0 +1,389 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994,1993,1992,1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * User LDT management.
+ * Each thread in a task may have its own LDT.
+ */
+
+#include <kern/kalloc.h>
+#include <kern/thread.h>
+
+#include <vm/vm_kern.h>
+
+#include <i386/seg.h>
+#include <i386/thread.h>
+#include <i386/user_ldt.h>
+#include "ldt.h"
+
+char acc_type[8][3] = {
+ /* code stack data */
+ { 0, 0, 1 }, /* data */
+ { 0, 1, 1 }, /* data, writable */
+ { 0, 0, 1 }, /* data, expand-down */
+ { 0, 1, 1 }, /* data, writable, expand-down */
+ { 1, 0, 0 }, /* code */
+ { 1, 0, 1 }, /* code, readable */
+ { 1, 0, 0 }, /* code, conforming */
+ { 1, 0, 1 }, /* code, readable, conforming */
+};
+
+boolean_t selector_check(thread, sel, type)
+ thread_t thread;
+ int sel;
+ int type; /* code, stack, data */
+{
+ struct user_ldt *ldt;
+ int access;
+
+ ldt = thread->pcb->ims.ldt;
+ if (ldt == 0) {
+ switch (type) {
+ case S_CODE:
+ return sel == USER_CS;
+ case S_STACK:
+ return sel == USER_DS;
+ case S_DATA:
+ return sel == 0 ||
+ sel == USER_CS ||
+ sel == USER_DS;
+ }
+ }
+
+ if (type != S_DATA && sel == 0)
+ return FALSE;
+ if ((sel & (SEL_LDT|SEL_PL)) != (SEL_LDT|SEL_PL_U)
+ || sel > ldt->desc.limit_low)
+ return FALSE;
+
+ access = ldt->ldt[sel_idx(sel)].access;
+
+ if ((access & (ACC_P|ACC_PL|ACC_TYPE_USER))
+ != (ACC_P|ACC_PL_U|ACC_TYPE_USER))
+ return FALSE;
+ /* present, pl == pl.user, not system */
+
+ return acc_type[(access & 0xe)>>1][type];
+}
+
+/*
+ * Add the descriptors to the LDT, starting with
+ * the descriptor for 'first_selector'.
+ */
+kern_return_t
+i386_set_ldt(thread, first_selector, desc_list, count, desc_list_inline)
+ thread_t thread;
+ int first_selector;
+ struct real_descriptor *desc_list;
+ unsigned int count;
+ boolean_t desc_list_inline;
+{
+ user_ldt_t new_ldt, old_ldt, cur_ldt;
+ struct real_descriptor *dp;
+ int i;
+ pcb_t pcb;
+ vm_size_t ldt_size_needed;
+ int first_desc = sel_idx(first_selector);
+ vm_map_copy_t old_copy_object;
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+ if (first_desc < 0 || first_desc > 8191)
+ return KERN_INVALID_ARGUMENT;
+ if (first_desc + count >= 8192)
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * If desc_list is not inline, it is in copyin form.
+ * We must copy it out to the kernel map, and wire
+ * it down (we touch it while the PCB is locked).
+ *
+ * We make a copy of the copyin object, and clear
+ * out the old one, so that returning KERN_INVALID_ARGUMENT
+ * will not try to deallocate the data twice.
+ */
+ if (!desc_list_inline) {
+ kern_return_t kr;
+ vm_offset_t dst_addr;
+
+ old_copy_object = (vm_map_copy_t) desc_list;
+
+ kr = vm_map_copyout(ipc_kernel_map, &dst_addr,
+ vm_map_copy_copy(old_copy_object));
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ (void) vm_map_pageable(ipc_kernel_map,
+ dst_addr,
+ dst_addr + count * sizeof(struct real_descriptor),
+ VM_PROT_READ|VM_PROT_WRITE);
+ desc_list = (struct real_descriptor *)dst_addr;
+ }
+
+ for (i = 0, dp = desc_list;
+ i < count;
+ i++, dp++)
+ {
+ switch (dp->access & ~ACC_A) {
+ case 0:
+ case ACC_P:
+ /* valid empty descriptor */
+ break;
+ case ACC_P | ACC_CALL_GATE:
+ /* Mach kernel call */
+ *dp = *(struct real_descriptor *)
+ &ldt[sel_idx(USER_SCALL)];
+ break;
+ case ACC_P | ACC_PL_U | ACC_DATA:
+ case ACC_P | ACC_PL_U | ACC_DATA_W:
+ case ACC_P | ACC_PL_U | ACC_DATA_E:
+ case ACC_P | ACC_PL_U | ACC_DATA_EW:
+ case ACC_P | ACC_PL_U | ACC_CODE:
+ case ACC_P | ACC_PL_U | ACC_CODE_R:
+ case ACC_P | ACC_PL_U | ACC_CODE_C:
+ case ACC_P | ACC_PL_U | ACC_CODE_CR:
+ case ACC_P | ACC_PL_U | ACC_CALL_GATE_16:
+ case ACC_P | ACC_PL_U | ACC_CALL_GATE:
+ break;
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+ }
+ ldt_size_needed = sizeof(struct real_descriptor)
+ * (first_desc + count - 1);
+
+ pcb = thread->pcb;
+ old_ldt = 0; /* the one to throw away */
+ new_ldt = 0; /* the one to allocate */
+ Retry:
+ simple_lock(&pcb->lock);
+ cur_ldt = pcb->ims.ldt;
+ if (cur_ldt == 0 ||
+ cur_ldt->desc.limit_low + 1 < ldt_size_needed)
+ {
+ /*
+ * No current LDT, or not big enough
+ */
+ if (new_ldt == 0) {
+ simple_unlock(&pcb->lock);
+
+ new_ldt = (user_ldt_t)
+ kalloc(ldt_size_needed
+ + sizeof(struct real_descriptor));
+ /*
+ * Build a descriptor that describes the
+ * LDT itself
+ */
+ {
+ vm_offset_t ldt_base;
+
+ ldt_base = (vm_offset_t) &new_ldt->ldt[0];
+
+ new_ldt->desc.limit_low = ldt_size_needed - 1;
+ new_ldt->desc.limit_high = 0;
+ new_ldt->desc.base_low = ldt_base & 0xffff;
+ new_ldt->desc.base_med = (ldt_base >> 16) & 0xff;
+ new_ldt->desc.base_high = ldt_base >> 24;
+ new_ldt->desc.access = ACC_P | ACC_LDT;
+ new_ldt->desc.granularity = 0;
+ }
+
+ goto Retry;
+ }
+
+ /*
+ * Have new LDT. Copy descriptors from current to new.
+ */
+ if (cur_ldt)
+ bcopy((char *) &cur_ldt->ldt[0],
+ (char *) &new_ldt->ldt[0],
+ cur_ldt->desc.limit_low + 1);
+
+ old_ldt = cur_ldt; /* discard old LDT */
+ cur_ldt = new_ldt; /* use new LDT from now on */
+ new_ldt = 0; /* keep new LDT */
+
+ pcb->ims.ldt = cur_ldt; /* set LDT for thread */
+ }
+
+ /*
+ * Install new descriptors.
+ */
+ bcopy((char *) desc_list,
+ (char *) &cur_ldt->ldt[first_desc],
+ count * sizeof(struct real_descriptor));
+
+ simple_unlock(&pcb->lock);
+
+ /*
+ * Discard old LDT if it was replaced
+ */
+ if (old_ldt)
+ kfree((vm_offset_t)old_ldt,
+ old_ldt->desc.limit_low + 1
+ + sizeof(struct real_descriptor));
+
+ /*
+ * Discard new LDT if it was not used
+ */
+ if (new_ldt)
+ kfree((vm_offset_t)new_ldt,
+ new_ldt->desc.limit_low + 1
+ + sizeof(struct real_descriptor));
+
+ /*
+ * Free the descriptor list, if it was
+ * out-of-line. Also discard the original
+ * copy object for it.
+ */
+ if (!desc_list_inline) {
+ (void) kmem_free(ipc_kernel_map,
+ (vm_offset_t) desc_list,
+ count * sizeof(struct real_descriptor));
+ vm_map_copy_discard(old_copy_object);
+ }
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+i386_get_ldt(thread, first_selector, selector_count, desc_list, count)
+ thread_t thread;
+ int first_selector;
+ int selector_count; /* number wanted */
+ struct real_descriptor **desc_list; /* in/out */
+ unsigned int *count; /* in/out */
+{
+ struct user_ldt *user_ldt;
+ pcb_t pcb = thread->pcb;
+ int first_desc = sel_idx(first_selector);
+ unsigned int ldt_count;
+ vm_size_t ldt_size;
+ vm_size_t size, size_needed;
+ vm_offset_t addr;
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+ if (first_desc < 0 || first_desc > 8191)
+ return KERN_INVALID_ARGUMENT;
+ if (first_desc + selector_count >= 8192)
+ return KERN_INVALID_ARGUMENT;
+
+ addr = 0;
+ size = 0;
+
+ for (;;) {
+ simple_lock(&pcb->lock);
+ user_ldt = pcb->ims.ldt;
+ if (user_ldt == 0) {
+ simple_unlock(&pcb->lock);
+ if (addr)
+ kmem_free(ipc_kernel_map, addr, size);
+ *count = 0;
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * Find how many descriptors we should return.
+ */
+ ldt_count = (user_ldt->desc.limit_low + 1) /
+ sizeof (struct real_descriptor);
+ ldt_count -= first_desc;
+ if (ldt_count > selector_count)
+ ldt_count = selector_count;
+
+ ldt_size = ldt_count * sizeof(struct real_descriptor);
+
+ /*
+ * Do we have the memory we need?
+ */
+ if (ldt_count <= *count)
+ break; /* fits in-line */
+
+ size_needed = round_page(ldt_size);
+ if (size_needed <= size)
+ break;
+
+ /*
+ * Unlock the pcb and allocate more memory
+ */
+ simple_unlock(&pcb->lock);
+
+ if (size != 0)
+ kmem_free(ipc_kernel_map, addr, size);
+
+ size = size_needed;
+
+ if (kmem_alloc(ipc_kernel_map, &addr, size)
+ != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /*
+ * copy out the descriptors
+ */
+ bcopy((char *)&user_ldt[first_desc],
+ (char *)*desc_list,
+ ldt_size);
+ *count = ldt_count;
+ simple_unlock(&pcb->lock);
+
+ if (addr) {
+ vm_size_t size_used, size_left;
+ vm_map_copy_t memory;
+
+ /*
+ * Free any unused memory beyond the end of the last page used
+ */
+ size_used = round_page(ldt_size);
+ if (size_used != size)
+ kmem_free(ipc_kernel_map,
+ addr + size_used, size - size_used);
+
+ /*
+ * Zero the remainder of the page being returned.
+ */
+ size_left = size_used - ldt_size;
+ if (size_left > 0)
+ bzero((char *)addr + ldt_size, size_left);
+
+ /*
+ * Make memory into copyin form - this unwires it.
+ */
+ (void) vm_map_copyin(ipc_kernel_map, addr, size_used, TRUE, &memory);
+ *desc_list = (struct real_descriptor *)memory;
+ }
+
+ return KERN_SUCCESS;
+}
+
+void
+user_ldt_free(user_ldt)
+ user_ldt_t user_ldt;
+{
+ kfree((vm_offset_t)user_ldt,
+ user_ldt->desc.limit_low + 1
+ + sizeof(struct real_descriptor));
+}
diff --git a/i386/i386/user_ldt.h b/i386/i386/user_ldt.h
new file mode 100644
index 00000000..9267ac7e
--- /dev/null
+++ b/i386/i386/user_ldt.h
@@ -0,0 +1,55 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_USER_LDT_H_
+#define _I386_USER_LDT_H_
+
+/*
+ * User LDT management.
+ *
+ * Each thread in a task may have its own LDT.
+ */
+
+#include <i386/seg.h>
+
+struct user_ldt {
+ struct real_descriptor desc; /* descriptor for self */
+ struct real_descriptor ldt[1]; /* descriptor table (variable) */
+};
+typedef struct user_ldt * user_ldt_t;
+
+/*
+ * Check code/stack/data selector values against LDT if present.
+ */
+#define S_CODE 0 /* code segment */
+#define S_STACK 1 /* stack segment */
+#define S_DATA 2 /* data segment */
+
+extern boolean_t selector_check(/* thread_t thread,
+ int sel,
+ int type */);
+
+#endif /* _I386_USER_LDT_H_ */
diff --git a/i386/i386/vm_param.h b/i386/i386/vm_param.h
new file mode 100644
index 00000000..30e9418e
--- /dev/null
+++ b/i386/i386/vm_param.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_KERNEL_I386_VM_PARAM_
+#define _I386_KERNEL_I386_VM_PARAM_
+
+/* XXX use xu/vm_param.h */
+#include <mach/vm_param.h>
+
+/* The kernel address space is 1GB, starting at virtual address 0. */
+#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t) 0x00000000)
+#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t) 0x40000000)
+
+/* The kernel virtual address space is actually located
+ at high linear addresses.
+ This is the kernel address range in linear addresses. */
+#define LINEAR_MIN_KERNEL_ADDRESS ((vm_offset_t) 0xc0000000)
+#define LINEAR_MAX_KERNEL_ADDRESS ((vm_offset_t) 0xffffffff)
+
+#define KERNEL_STACK_SIZE (1*I386_PGBYTES)
+#define INTSTACK_SIZE (1*I386_PGBYTES)
+ /* interrupt stack size */
+
+/*
+ * Conversion between 80386 pages and VM pages
+ */
+
+#define trunc_i386_to_vm(p) (atop(trunc_page(i386_ptob(p))))
+#define round_i386_to_vm(p) (atop(round_page(i386_ptob(p))))
+#define vm_to_i386(p) (i386_btop(ptoa(p)))
+
+/*
+ * Physical memory is direct-mapped to virtual memory
+ * starting at virtual address phys_mem_va.
+ */
+extern vm_offset_t phys_mem_va;
+#define phystokv(a) ((vm_offset_t)(a) + phys_mem_va)
+
+/*
+ * Kernel virtual memory is actually at 0xc0000000 in linear addresses.
+ */
+#define kvtolin(a) ((vm_offset_t)(a) + LINEAR_MIN_KERNEL_ADDRESS)
+#define lintokv(a) ((vm_offset_t)(a) - LINEAR_MIN_KERNEL_ADDRESS)
+
+#endif _I386_KERNEL_I386_VM_PARAM_
diff --git a/i386/i386/vm_tuning.h b/i386/i386/vm_tuning.h
new file mode 100644
index 00000000..a5091fb7
--- /dev/null
+++ b/i386/i386/vm_tuning.h
@@ -0,0 +1,35 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: i386/vm_tuning.h
+ *
+ * VM tuning parameters for the i386 (without reference bits).
+ */
+
+#ifndef _I386_VM_TUNING_H_
+#define _I386_VM_TUNING_H_
+
+#endif _I386_VM_TUNING_H_
diff --git a/i386/i386/xpr.h b/i386/i386/xpr.h
new file mode 100644
index 00000000..19ef026a
--- /dev/null
+++ b/i386/i386/xpr.h
@@ -0,0 +1,32 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: xpr.h
+ *
+ * Machine dependent module for the XPR tracing facility.
+ */
+
+#define XPR_TIMESTAMP (0)
diff --git a/i386/i386/zalloc.h b/i386/i386/zalloc.h
new file mode 100644
index 00000000..bf7cf6b2
--- /dev/null
+++ b/i386/i386/zalloc.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 1996-1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Utah $Hdr: zalloc.h 1.4 94/12/16$
+ * Author: Bryan Ford
+ */
+
+#ifndef _I386_ZALLOC_H_
+#define _I386_ZALLOC_H_
+
+#include <kern/zalloc.h>
+
+#endif /* _I386_ZALLOC_H_ */
diff --git a/i386/i386at/asm_startup.h b/i386/i386at/asm_startup.h
new file mode 100644
index 00000000..5b35293a
--- /dev/null
+++ b/i386/i386at/asm_startup.h
@@ -0,0 +1,42 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Startup code for an i386 on an AT.
+ * Kernel is loaded starting at 1MB.
+ * Protected mode, paging disabled.
+ *
+ * %esp -> boottype
+ * size of extended memory (K)
+ * size of conventional memory (K)
+ * boothowto
+ *
+ */
+
+ popl _boottype+KVTOPHYS /* get boottype */
+ popl _extmem+KVTOPHYS /* extended memory, in K */
+ popl _cnvmem+KVTOPHYS /* conventional memory, in K */
+ popl _boothowto+KVTOPHYS /* boot flags */
+
diff --git a/i386/i386at/autoconf.c b/i386/i386at/autoconf.c
new file mode 100644
index 00000000..50eec98b
--- /dev/null
+++ b/i386/i386at/autoconf.c
@@ -0,0 +1,484 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifdef MACH_KERNEL
+#include <mach_ttd.h>
+#include <mach/std_types.h>
+#else /* MACH_KERNEL */
+#include <cpus.h>
+#include <platforms.h>
+#include <generic.h>
+#include <sys/param.h>
+#include <mach/machine.h>
+#include <machine/cpu.h>
+#endif /* MACH_KERNEL */
+#ifdef LINUX_DEV
+#include <i386/pic.h>
+#endif
+#include <i386/ipl.h>
+#include <chips/busses.h>
+
+/* initialization typecasts */
+#define SPL_FIVE (vm_offset_t)SPL5
+#define SPL_SIX (vm_offset_t)SPL6
+#define SPL_TTY (vm_offset_t)SPLTTY
+
+
+#include <hd.h>
+#if NHD > 0
+extern struct bus_driver hddriver;
+extern int hdintr();
+#endif /* NHD */
+
+#include <fd.h>
+#if NFD > 0
+extern struct bus_driver fddriver;
+extern int fdintr();
+#endif /* NFD */
+
+#include <aha.h>
+#if NAHA > 0
+extern struct bus_driver aha_driver;
+extern int aha_intr();
+#endif /* NAHA */
+
+#include <eaha.h>
+#if NEAHA > 0
+extern struct bus_driver eaha_driver;
+extern int eaha_intr();
+#endif /* NEAHA */
+
+#include <pc586.h>
+#if NPC586 > 0
+extern struct bus_driver pcdriver;
+extern int pc586intr();
+#endif /* NPC586 */
+
+#include <ne.h>
+#if NNE > 0
+extern struct bus_driver nedriver;
+extern int neintr();
+#endif NNE
+
+#include <ns8390.h>
+#if NNS8390 > 0
+extern struct bus_driver ns8390driver;
+extern int ns8390intr();
+#endif /* NNS8390 */
+
+#include <at3c501.h>
+#if NAT3C501 > 0
+extern struct bus_driver at3c501driver;
+extern int at3c501intr();
+#endif /* NAT3C501 */
+
+#include <ul.h>
+#if NUL > 0
+extern struct bus_driver uldriver;
+extern int ulintr();
+#endif
+
+#include <wd.h>
+#if NWD > 0
+extern struct bus_driver wddriver;
+extern int wdintr();
+#endif
+
+#include <hpp.h>
+#if NHPP > 0
+extern struct bus_driver hppdriver;
+extern int hppintr();
+#endif
+
+#include <com.h>
+#if NCOM > 0
+extern struct bus_driver comdriver;
+extern int comintr();
+#endif /* NCOM */
+
+#include <lpr.h>
+#if NLPR > 0
+extern struct bus_driver lprdriver;
+extern int lprintr();
+#endif /* NLPR */
+
+#include <wt.h>
+#if NWT > 0
+extern struct bus_driver wtdriver;
+extern int wtintr();
+#endif /* NWT */
+
+struct bus_ctlr bus_master_init[] = {
+
+/* driver name unit intr address len phys_address
+ adaptor alive flags spl pic */
+
+#ifndef LINUX_DEV
+#if NHD > 0
+ {&hddriver, "hdc", 0, hdintr, 0x1f0, 8, 0x1f0,
+ '?', 0, 0, SPL_FIVE, 14},
+
+ {&hddriver, "hdc", 1, hdintr, 0x170, 8, 0x170,
+ '?', 0, 0, SPL_FIVE, 15},
+#endif /* NHD > 0 */
+
+#if NAHA > 0
+ {&aha_driver, "ahac", 0, aha_intr, 0x330, 4, 0x330,
+ '?', 0, 0, SPL_FIVE, 11},
+
+#if NAHA > 1
+
+ {&aha_driver, "ahac", 1, aha_intr, 0x234, 4, 0x234,
+ '?', 0, 0, SPL_FIVE, 12},
+ {&aha_driver, "ahac", 1, aha_intr, 0x230, 4, 0x230,
+ '?', 0, 0, SPL_FIVE, 12},
+ {&aha_driver, "ahac", 1, aha_intr, 0x134, 4, 0x134,
+ '?', 0, 0, SPL_FIVE, 12},
+ {&aha_driver, "ahac", 1, aha_intr, 0x130, 4, 0x130,
+ '?', 0, 0, SPL_FIVE, 12},
+
+#else
+
+ {&aha_driver, "ahac", 0, aha_intr, 0x334, 4, 0x334,
+ '?', 0, 0, SPL_FIVE, 11},
+ {&aha_driver, "ahac", 0, aha_intr, 0x234, 4, 0x234,
+ '?', 0, 0, SPL_FIVE, 11},
+ {&aha_driver, "ahac", 0, aha_intr, 0x230, 4, 0x230,
+ '?', 0, 0, SPL_FIVE, 11},
+ {&aha_driver, "ahac", 0, aha_intr, 0x134, 4, 0x134,
+ '?', 0, 0, SPL_FIVE, 11},
+ {&aha_driver, "ahac", 0, aha_intr, 0x130, 4, 0x130,
+ '?', 0, 0, SPL_FIVE, 11},
+
+#endif /* NAHA > 1 */
+#endif /* NAHA > 0*/
+
+#if NEAHA > 0
+{&eaha_driver, "eahac", 0, eaha_intr, 0x0000, 4, 0x0000,
+ '?', 0, 0, SPL_FIVE, 12},
+{&eaha_driver, "eahac", 0, eaha_intr, 0x1000, 4, 0x1000,
+ '?', 0, 0, SPL_FIVE, 12},
+{&eaha_driver, "eahac", 0, eaha_intr, 0x2000, 4, 0x2000,
+ '?', 0, 0, SPL_FIVE, 12},
+{&eaha_driver, "eahac", 0, eaha_intr, 0x3000, 4, 0x3000,
+ '?', 0, 0, SPL_FIVE, 12},
+{&eaha_driver, "eahac", 0, eaha_intr, 0x4000, 4, 0x4000,
+ '?', 0, 0, SPL_FIVE, 12},
+{&eaha_driver, "eahac", 0, eaha_intr, 0x5000, 4, 0x5000,
+ '?', 0, 0, SPL_FIVE, 12},
+{&eaha_driver, "eahac", 0, eaha_intr, 0x6000, 4, 0x6000,
+ '?', 0, 0, SPL_FIVE, 12},
+{&eaha_driver, "eahac", 0, eaha_intr, 0x7000, 4, 0x7000,
+ '?', 0, 0, SPL_FIVE, 12},
+{&eaha_driver, "eahac", 0, eaha_intr, 0x8000, 4, 0x8000,
+ '?', 0, 0, SPL_FIVE, 12},
+{&eaha_driver, "eahac", 0, eaha_intr, 0x9000, 4, 0x9000,
+ '?', 0, 0, SPL_FIVE, 12},
+{&eaha_driver, "eahac", 0, eaha_intr, 0xa000, 4, 0xa000,
+ '?', 0, 0, SPL_FIVE, 12},
+{&eaha_driver, "eahac", 0, eaha_intr, 0xb000, 4, 0xb000,
+ '?', 0, 0, SPL_FIVE, 12},
+{&eaha_driver, "eahac", 0, eaha_intr, 0xc000, 4, 0xc000,
+ '?', 0, 0, SPL_FIVE, 12},
+{&eaha_driver, "eahac", 0, eaha_intr, 0xd000, 4, 0xd000,
+ '?', 0, 0, SPL_FIVE, 12},
+{&eaha_driver, "eahac", 0, eaha_intr, 0xe000, 4, 0xe000,
+ '?', 0, 0, SPL_FIVE, 12},
+{&eaha_driver, "eahac", 0, eaha_intr, 0xf000, 4, 0xf000,
+ '?', 0, 0, SPL_FIVE, 12},
+#endif /* NEAHA > 0 */
+
+#if NFD > 0
+ {&fddriver, "fdc", 0, fdintr, 0x3f2, 6, 0x3f2,
+ '?', 0, 0, SPL_FIVE, 6},
+
+ {&fddriver, "fdc", 1, fdintr, 0x372, 6, 0x372,
+ '?', 0, 0, SPL_FIVE, 10},
+#endif /* NFD > 0 */
+#endif /* ! LINUX_DEV */
+
+ 0
+};
+
+
+struct bus_device bus_device_init[] = {
+
+/* driver name unit intr address am phys_address
+ adaptor alive ctlr slave flags *mi *next sysdep sysdep */
+
+#ifndef LINUX_DEV
+#if NHD > 0
+ {&hddriver, "hd", 0, hdintr, 0x104, 8, 0x1f0,
+ '?', 0, 0, 0, 0, 0, 0, SPL_FIVE, 14},
+ {&hddriver, "hd", 1, hdintr, 0x118, 8, 0x1f0,
+ '?', 0, 0, 1, 0, 0, 0, SPL_FIVE, 14},
+ {&hddriver, "hd", 2, hdintr, 0x104, 8, 0x170, /*??*/
+ '?', 0, 1, 0, 0, 0, 0, SPL_FIVE, 15},
+ {&hddriver, "hd", 3, hdintr, 0x118, 8, 0x170,
+ '?', 0, 1, 1, 0, 0, 0, SPL_FIVE, 15},
+#endif /* NHD > 0 */
+
+#if NAHA > 0
+{ &aha_driver, "rz", 0, 0, 0x0,0, 0, '?', 0, 0, 0, 0, },
+{ &aha_driver, "rz", 1, 0, 0x0,0, 0, '?', 0, 0, 1, 0, },
+{ &aha_driver, "rz", 2, 0, 0x0,0, 0, '?', 0, 0, 2, 0, },
+{ &aha_driver, "rz", 3, 0, 0x0,0, 0, '?', 0, 0, 3, 0, },
+{ &aha_driver, "rz", 4, 0, 0x0,0, 0, '?', 0, 0, 4, 0, },
+{ &aha_driver, "rz", 5, 0, 0x0,0, 0, '?', 0, 0, 5, 0, },
+{ &aha_driver, "rz", 6, 0, 0x0,0, 0, '?', 0, 0, 6, 0, },
+{ &aha_driver, "rz", 7, 0, 0x0,0, 0, '?', 0, 0, 7, 0, },
+
+{ &aha_driver, "tz", 0, 0, 0x0,0, 0, '?', 0, 0, 0, 0, },
+{ &aha_driver, "tz", 1, 0, 0x0,0, 0, '?', 0, 0, 1, 0, },
+{ &aha_driver, "tz", 2, 0, 0x0,0, 0, '?', 0, 0, 2, 0, },
+{ &aha_driver, "tz", 3, 0, 0x0,0, 0, '?', 0, 0, 3, 0, },
+{ &aha_driver, "tz", 4, 0, 0x0,0, 0, '?', 0, 0, 4, 0, },
+{ &aha_driver, "tz", 5, 0, 0x0,0, 0, '?', 0, 0, 5, 0, },
+{ &aha_driver, "tz", 6, 0, 0x0,0, 0, '?', 0, 0, 6, 0, },
+{ &aha_driver, "tz", 7, 0, 0x0,0, 0, '?', 0, 0, 7, 0, },
+
+#if NAHA > 1
+
+{ &aha_driver, "rz", 8, 0, 0x0,0, 0, '?', 0, 1, 0, 0, },
+{ &aha_driver, "rz", 9, 0, 0x0,0, 0, '?', 0, 1, 1, 0, },
+{ &aha_driver, "rz", 10, 0, 0x0,0, 0, '?', 0, 1, 2, 0, },
+{ &aha_driver, "rz", 11, 0, 0x0,0, 0, '?', 0, 1, 3, 0, },
+{ &aha_driver, "rz", 12, 0, 0x0,0, 0, '?', 0, 1, 4, 0, },
+{ &aha_driver, "rz", 13, 0, 0x0,0, 0, '?', 0, 1, 5, 0, },
+{ &aha_driver, "rz", 14, 0, 0x0,0, 0, '?', 0, 1, 6, 0, },
+{ &aha_driver, "rz", 15, 0, 0x0,0, 0, '?', 0, 1, 7, 0, },
+
+{ &aha_driver, "tz", 8, 0, 0x0,0, 0, '?', 0, 1, 0, 0, },
+{ &aha_driver, "tz", 9, 0, 0x0,0, 0, '?', 0, 1, 1, 0, },
+{ &aha_driver, "tz", 10, 0, 0x0,0, 0, '?', 0, 1, 2, 0, },
+{ &aha_driver, "tz", 11, 0, 0x0,0, 0, '?', 0, 1, 3, 0, },
+{ &aha_driver, "tz", 12, 0, 0x0,0, 0, '?', 0, 1, 4, 0, },
+{ &aha_driver, "tz", 13, 0, 0x0,0, 0, '?', 0, 1, 5, 0, },
+{ &aha_driver, "tz", 14, 0, 0x0,0, 0, '?', 0, 1, 6, 0, },
+{ &aha_driver, "tz", 15, 0, 0x0,0, 0, '?', 0, 1, 7, 0, },
+#endif /* NAHA > 1 */
+#endif /* NAHA > 0 */
+
+#if NEAHA > 0
+{ &eaha_driver, "rz", 0, 0, 0x0,0, 0, '?', 0, 0, 0, 0, },
+{ &eaha_driver, "rz", 1, 0, 0x0,0, 0, '?', 0, 0, 1, 0, },
+{ &eaha_driver, "rz", 2, 0, 0x0,0, 0, '?', 0, 0, 2, 0, },
+{ &eaha_driver, "rz", 3, 0, 0x0,0, 0, '?', 0, 0, 3, 0, },
+{ &eaha_driver, "rz", 4, 0, 0x0,0, 0, '?', 0, 0, 4, 0, },
+{ &eaha_driver, "rz", 5, 0, 0x0,0, 0, '?', 0, 0, 5, 0, },
+{ &eaha_driver, "rz", 6, 0, 0x0,0, 0, '?', 0, 0, 6, 0, },
+{ &eaha_driver, "rz", 7, 0, 0x0,0, 0, '?', 0, 0, 7, 0, },
+
+{ &eaha_driver, "tz", 0, 0, 0x0,0, 0, '?', 0, 0, 0, 0, },
+{ &eaha_driver, "tz", 1, 0, 0x0,0, 0, '?', 0, 0, 1, 0, },
+{ &eaha_driver, "tz", 2, 0, 0x0,0, 0, '?', 0, 0, 2, 0, },
+{ &eaha_driver, "tz", 3, 0, 0x0,0, 0, '?', 0, 0, 3, 0, },
+{ &eaha_driver, "tz", 4, 0, 0x0,0, 0, '?', 0, 0, 4, 0, },
+{ &eaha_driver, "tz", 5, 0, 0x0,0, 0, '?', 0, 0, 5, 0, },
+{ &eaha_driver, "tz", 6, 0, 0x0,0, 0, '?', 0, 0, 6, 0, },
+{ &eaha_driver, "tz", 7, 0, 0x0,0, 0, '?', 0, 0, 7, 0, },
+#endif /* NEAHA > 0*/
+
+#if NFD > 0
+ {&fddriver, "fd", 0, fdintr, 0x3f2, 6, 0x3f2,
+ '?', 0, 0, 0, 0, 0, 0, SPL_FIVE, 6},
+ {&fddriver, "fd", 1, fdintr, 0x3f2, 6, 0x3f2,
+ '?', 0, 0, 1, 0, 0, 0, SPL_FIVE, 6},
+
+ {&fddriver, "fd", 2, fdintr, 0x372, 6, 0x372,
+ '?', 0, 1, 0, 0, 0, 0, SPL_FIVE, 10},
+ {&fddriver, "fd", 3, fdintr, 0x372, 6, 0x372,
+ '?', 0, 1, 1, 0, 0, 0, SPL_FIVE, 10},
+#endif /* NFD > 0 */
+
+#if NPC586 > 0
+ /* For MACH Default */
+ {&pcdriver, "pc", 0, pc586intr, 0xd0000, 0, 0xd0000,
+ '?', 0, -1, -1, 0, 0, 0, SPL_FIVE, 9},
+ /* For Factory Default */
+ {&pcdriver, "pc", 0, pc586intr, 0xc0000, 0, 0xc0000,
+ '?', 0, -1, -1, 0, 0, 0, SPL_FIVE, 5},
+ /* For what Intel Ships */
+ {&pcdriver, "pc", 0, pc586intr, 0xf00000, 0, 0xf00000,
+ '?', 0, -1, -1, 0, 0, 0, SPL_FIVE, 12},
+#endif /* NPC586 > 0 */
+
+#if NNE > 0
+{&nedriver, "ne", 0, neintr, 0x280,0x4000,0xd0000,
+ '?', 0, -1, -1, 0, 0, 0, SPL_SIX, 5},
+{&nedriver, "ne", 1, neintr, 0x300,0x4000,0xd0000,
+ '?', 0, -1, -1, 0, 0, 0, SPL_SIX, 10},
+#endif NNE > 0
+
+#if NNS8390 > 0
+ /* "wd" and "el" */
+ {&ns8390driver, "wd", 0, ns8390intr, 0x280,0x2000,0xd0000,
+ '?', 0, -1, -1, 0, 0, 0, SPL_SIX, 9},
+ {&ns8390driver, "wd", 0, ns8390intr, 0x2a0,0x2000,0xd0000,
+ '?', 0, -1, -1, 0, 0, 0, SPL_SIX, 9},
+ {&ns8390driver, "wd", 0, ns8390intr, 0x2e0,0x2000,0xd0000,
+ '?', 0, -1, -1, 0, 0, 0, SPL_SIX, 5},
+ {&ns8390driver, "wd", 0, ns8390intr, 0x300,0x2000,0xd0000,
+ '?', 0, -1, -1, 0, 0, 0, SPL_SIX, 5},
+ {&ns8390driver, "wd", 0, ns8390intr, 0x250,0x2000,0xd0000,
+ '?', 0, -1, -1, 0, 0, 0, SPL_SIX, 5},
+ {&ns8390driver, "wd", 0, ns8390intr, 0x350,0x2000,0xd0000,
+ '?', 0, -1, -1, 0, 0, 0, SPL_SIX, 5},
+ {&ns8390driver, "wd", 0, ns8390intr, 0x240,0x2000,0xd0000,
+ '?', 0, -1, -1, 0, 0, 0, SPL_SIX, 11},
+ {&ns8390driver, "wd", 1, ns8390intr, 0x340,0x2000,0xe8000,
+ '?', 0, -1, -1, 0, 0, 0, SPL_SIX, 15},
+#endif /* NNS8390 > 0 */
+
+#if NAT3C501 > 0
+ {&at3c501driver, "et", 0, at3c501intr, 0x300, 0,0x300,
+ '?', 0, -1, -1, 0, 0, 0, SPL_SIX, 9},
+#endif /* NAT3C501 > 0 */
+
+#if NUL > 0
+ {&uldriver, "ul", 0, ulintr, 0, 0, 0, '?', 0, -1, -1, 0, 0, 0, SPL_SIX, 0},
+ {&uldriver, "ul", 1, ulintr, 0, 0, 0, '?', 0, -1, -1, 0, 0, 0, SPL_SIX, 0},
+#endif
+
+#if NWD > 0
+ {&wddriver, "wd", 0, wdintr, 0, 0, 0, '?', 0, -1, -1, 0, 0, 0, SPL_SIX, 9},
+ {&wddriver, "wd", 1, wdintr, 0, 0, 0, '?', 0, -1, -1, 0, 0, 0, SPL_SIX, 15},
+#endif
+
+#if NHPP > 0
+ {&hppdriver, "hpp", 0, hppintr, 0, 0, 0, '?', 0, -1, -1, 0, 0, 0, SPL_SIX, 0},
+ {&hppdriver, "hpp", 1, hppintr, 0, 0, 0, '?', 0, -1, -1, 0, 0, 0, SPL_SIX, 0},
+#endif
+#endif /* ! LINUX_DEV */
+
+#if NCOM > 0
+ {&comdriver, "com", 0, comintr, 0x3f8, 8, 0x3f8,
+ '?', 0, -1, -1, 0, 0, 0, SPL_TTY, 4},
+ {&comdriver, "com", 1, comintr, 0x2f8, 8, 0x2f8,
+ '?', 0, -1, -1, 0, 0, 0, SPL_TTY, 3},
+ {&comdriver, "com", 2, comintr, 0x3e8, 8, 0x3e8,
+ '?', 0, -1, -1, 0, 0, 0, SPL_TTY, 5},
+#endif /* NCOM > 0 */
+
+#ifndef LINUX_DEV
+#if NLPR > 0
+ {&lprdriver, "lpr", 0, lprintr, 0x378, 3, 0x378,
+ '?', 0, -1, -1, 0, 0, 0, SPL_TTY, 7},
+ {&lprdriver, "lpr", 0, lprintr, 0x278, 3, 0x278,
+ '?', 0, -1, -1, 0, 0, 0, SPL_TTY, 7},
+ {&lprdriver, "lpr", 0, lprintr, 0x3bc, 3, 0x3bc,
+ '?', 0, -1, -1, 0, 0, 0, SPL_TTY, 7},
+#endif /* NLPR > 0 */
+
+#if NWT > 0
+ {&wtdriver, "wt", 0, wtintr, 0x300, 2, 0x300,
+ '?', 0, -1, -1, 0, 0, 0, SPL_FIVE, 5},
+ {&wtdriver, "wt", 0, wtintr, 0x288, 2, 0x288,
+ '?', 0, -1, -1, 0, 0, 0, SPL_FIVE, 5},
+ {&wtdriver, "wt", 0, wtintr, 0x388, 2, 0x388,
+ '?', 0, -1, -1, 0, 0, 0, SPL_FIVE, 5},
+#endif /* NWT > 0 */
+#endif /* ! LINUX_DEV */
+
+ 0
+};
+
+/*
+ * probeio:
+ *
+ * Probe and subsequently attach devices out on the AT bus.
+ *
+ *
+ */
+void probeio(void)
+{
+ register struct bus_device *device;
+ register struct bus_ctlr *master;
+ int i = 0;
+
+ for (master = bus_master_init; master->driver; master++)
+ {
+ if (configure_bus_master(master->name, master->address,
+ master->phys_address, i, "atbus"))
+ i++;
+ }
+
+ for (device = bus_device_init; device->driver; device++)
+ {
+ /* ignore what we (should) have found already */
+ if (device->alive || device->ctlr >= 0)
+ continue;
+ if (configure_bus_device(device->name, device->address,
+ device->phys_address, i, "atbus"))
+ i++;
+ }
+
+#if MACH_TTD
+ /*
+ * Initialize Remote kernel debugger.
+ */
+ ttd_init();
+#endif /* MACH_TTD */
+}
+
+void take_dev_irq(
+ struct bus_device *dev)
+{
+ int pic = (int)dev->sysdep1;
+
+ if (intpri[pic] == 0) {
+ iunit[pic] = dev->unit;
+ ivect[pic] = dev->intr;
+ intpri[pic] = (int)dev->sysdep;
+ form_pic_mask();
+ } else {
+ printf("The device below will clobber IRQ %d.\n", pic);
+ printf("You have two devices at the same IRQ.\n");
+ printf("This won't work. Reconfigure your hardware and try again.\n");
+ printf("%s%d: port = %x, spl = %d, pic = %d.\n",
+ dev->name, dev->unit, dev->address,
+ dev->sysdep, dev->sysdep1);
+ while (1);
+ }
+
+}
+
+void take_ctlr_irq(
+ struct bus_ctlr *ctlr)
+{
+ int pic = ctlr->sysdep1;
+ if (intpri[pic] == 0) {
+ iunit[pic] = ctlr->unit;
+ ivect[pic] = ctlr->intr;
+ intpri[pic] = (int)ctlr->sysdep;
+ form_pic_mask();
+ } else {
+ printf("The device below will clobber IRQ %d.\n", pic);
+ printf("You have two devices at the same IRQ. This won't work.\n");
+ printf("Reconfigure your hardware and try again.\n");
+ while (1);
+ }
+}
diff --git a/i386/i386at/blit.c b/i386/i386at/blit.c
new file mode 100644
index 00000000..e97b0eea
--- /dev/null
+++ b/i386/i386at/blit.c
@@ -0,0 +1,948 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/* **********************************************************************
+ File: blit.c
+ Description: Device Driver for Bell Tech Blit card
+
+ $ Header: $
+
+ Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989.
+ All rights reserved.
+********************************************************************** */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifdef MACH_KERNEL
+#include <sys/types.h>
+#include <device/errno.h>
+#else MACH_KERNEL
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/param.h>
+#include <sys/dir.h>
+#include <sys/signal.h>
+#include <sys/user.h>
+#endif MACH_KERNEL
+#include <vm/vm_kern.h>
+#include <mach/vm_param.h>
+#include <machine/machspl.h>
+
+#include <i386at/blitreg.h>
+#include <i386at/blitvar.h>
+#include <i386at/blituser.h>
+#include <i386at/kd.h>
+#include <i386at/kdsoft.h>
+
+#include <blit.h>
+
+
+/*
+ * This driver really only supports 1 card, though parts of it were
+ * written to support multiple cards. If you want to finish the job
+ * and really support multiple cards, then you'll have to:
+ *
+ * (1) make sure that driver functions pass around a pointer telling
+ * which card they're talking about.
+ *
+ * (2) coordinate things with the kd driver, so that one card is used
+ * for the console and the other is simply an additional display.
+ */
+#define MAXBLITS 1
+
+#if NBLIT > MAXBLITS
+/* oh, no, you don't want to do this...; */
+
+#else
+#if NBLIT > 0
+
+#define AUTOINIT 0
+
+ /*
+ * Forward Declarations
+ */
+static tiledesc();
+static loadall();
+
+#if AUTOINIT
+int blitattach(), blitprobe();
+#endif
+
+int blitioctl(), blitopen(), blitclose(), blitmmap();
+
+
+static void setstatus();
+#define CARD_RESET 0
+#define CARD_MAPPED 1
+#define CARD_MAYBE_PRESENT 2
+#define CARD_PRESENT 3
+#define BIU_INIT 4
+#define UNUSED1 5
+#define DP_INIT 6
+#define UNUSED2 7
+
+
+#if AUTOINIT
+struct mb_device *blitinfo[NBLIT];
+
+struct mb_driver blitdriver = {
+ blitprobe,
+ 0, /* slave routine */
+ blitattach,
+ 0, 0, 0, /* go, done, intr routines */
+ BLIT_MAPPED_SIZE,
+ "blit", blitinfo, /* device info */
+ 0, 0, /* no controller */
+ 0 /* no flags */
+ /* rest zeros */
+};
+#endif /* AUTOINIT */
+
+
+/*
+ * Per-card bookkeeping information for driver.
+ *
+ * "scrstrip" and "dpctlregs" point to data areas that are passed to
+ * the Display Processor. They are allocated out of the spare
+ * graphics memory. "scrstrip" is used to describe an entire screen.
+ * "dpctlregs" contains assorted parameters for the display
+ * controller.
+ *
+ * "firstfree" is an offset into the graphics memory. Memory starting
+ * there can be allocated by users.
+ */
+
+struct blitsoft {
+ struct blitdev *blt; /* ptr to mapped card */
+ caddr_t physaddr; /* start of mapped card */
+ boolean_t open; /* is device open? */
+ struct screen_descrip *scrstrip;
+ DPCONTROLBLK *dpctlregs;
+ int firstfree;
+} blitsoft[NBLIT];
+
+
+/*
+ * The following array contains the initial settings for
+ * the Display Processor Control Block Registers.
+ * The video timing signals in this array are for the
+ * Bell Technologies Blit Express running in 1664 x 1200 x 1 mode.
+ * Please treat as read-only.
+ */
+
+DPCONTROLBLK blit_mparm = {
+ DP_DSP_ON, /* video status */
+ 0x00ff, /* interrupt mask - all disabled */
+ 0x0010, /* trip point */
+ 0x00ff, /* frame interrupt interval */
+ 0x0000, /* reserved */
+ CRTM_NONINTER | CRTM_SUPHIGH_SPEED, /* CRT controller mode */
+ 41, /* horizontal synch stop */
+ 57, /* horiz field start */
+ 265, /* horiz field stop */
+ 265, /* line length */
+ 15, /* vert synch stop */
+ 43, /* vert field start */
+ 1243, /* vert field stop */
+ 1244, /* frame length */
+ 0x0000, 0x0000, /* descriptor pointer */
+ 0x0000, /* reserved */
+ 0x0101, /* x, y zoom factors */
+ 0x0000, /* FldColor */
+ 0x00ff, /* BdrColor */
+ 0x0000, /* 1Bpp Pad */
+ 0x0000, /* 2Bpp Pad */
+ 0x0000, /* 4Bpp Pad */
+ DP_CURSOR_CROSSHAIR, /* cursor style & mode */
+ 0x00A0, 0x0050, /* cursor x & y loc. */
+ /* cursor pattern */
+ 0xfffe, 0xfffc, 0xc018, 0xc030, 0xc060, 0xc0c0, 0xc0c0, 0xc060,
+ 0xc430, 0xce18, 0xdb0c, 0xf186, 0xe0c3, 0xc066, 0x803c, 0x0018
+};
+
+void blitreboot();
+
+/***********
+ *
+ * Initialization.
+ *
+ ***********/
+
+
+/*
+ * Probe - is the board there?
+ *
+ * in: reg = start of mapped Blit memory.
+ *
+ * out: returns size of mapped Blit memory if the board is present,
+ * 0 otherwise.
+ *
+ * effects: if the board is present, it is reset and left visible in
+ * Unix mode.
+ */
+
+#if AUTOINIT
+/*ARGSUSED*/
+int
+blitprobe(reg, unit)
+ caddr_t reg;
+ int unit;
+{
+ struct blitdev *blt = (struct blitdev *)reg;
+
+ if (blit_present())
+ return(BLIT_MAPPED_SIZE); /* go */
+ else
+ return(0); /* no-go */
+}
+#endif /* AUTOINIT */
+
+
+/*
+ * Temporary initialization routine. This will go away when we have
+ * autoconfig.
+ */
+
+blitinit()
+{
+ if (!blit_present())
+ return;
+
+ blit_init();
+}
+
+
+/*
+ * Allocate needed objects from Blit's memory.
+ */
+blit_memory_init(bs)
+ struct blitsoft *bs;
+{
+ struct blitdev *blt = bs->blt;
+ struct blitmem *bm = (struct blitmem *)blt->graphmem;
+ u_char *p = bm->spare;
+
+ if ((int)p % 2 == 1)
+ ++p;
+
+ bs->scrstrip = (struct screen_descrip *)p;
+ p += sizeof(struct screen_descrip);
+ if ((int)p % 2 == 1)
+ ++p;
+
+ bs->dpctlregs = (DPCONTROLBLK *)p;
+ p += sizeof(DPCONTROLBLK);
+ if ((int)p % 2 == 1)
+ ++p;
+
+ /*
+ * Note: if you use the 786 graphics processor for character
+ * processing, you should copy the font from the ROM into
+ * graphics memory and change font_start to point to it.
+ * Otherwise, the 786 will have problems accessing the font.
+ */
+
+ bs->firstfree = p - blt->graphmem;
+}
+
+
+/*
+ * Reset the Blit board and leave it visible.
+ */
+
+blit_reset_board()
+{
+ union blit_config_reg config;
+
+ config.byte = inb(BLIT_CONFIG_ADDR);
+ config.reg.reset = 1;
+ outb(BLIT_CONFIG_ADDR, config.byte);
+ config.reg.reset = 0;
+ config.reg.mode = BLIT_UNIX_MODE;
+ config.reg.invisible = BLIT_VISIBLE;
+ outb(BLIT_CONFIG_ADDR, config.byte);
+ setstatus(CARD_RESET);
+}
+
+
+#if AUTOINIT
+/*
+ * Attach - finish initialization by setting up the 786.
+ */
+
+blitattach(md)
+ struct mb_device *md;
+{
+ struct blitdev *blt = (struct blitdev *)md->md_addr;
+
+ blit_init(xyz);
+}
+#endif /* AUTOINIT */
+
+
+/*
+ * Initialize Bus Interface Unit.
+ */
+
+init_biu(blt)
+ struct blitdev *blt;
+{
+ WRITEREG8(blt, INTER_RELOC, 0);
+ WRITEREG8(blt, BIU_CONTROL, BIU_16BIT);
+
+ /* WRITEREG16(blt, DRAM_REFRESH, 0x003f); */
+ WRITEREG16(blt, DRAM_REFRESH, 0x0018); /* refresh rate */
+ WRITEREG16(blt, DRAM_CONTROL,
+ MEMROWS1 | FASTPG_INTERLV | HEIGHT_256K);
+ WRITEREG16(blt, DP_PRIORITY, (7 << 3) | 7); /* max pri */
+ WRITEREG16(blt, GP_PRIORITY, (1 << 3) | 1); /* almost min pri */
+ WRITEREG16(blt, EXT_PRIORITY, 5 << 3);
+
+ /* now freeze the settings */
+ WRITEREG16(blt, BIU_CONTROL, BIU_16BIT | BIU_WP1);
+
+ /* Put graphics processor into Poll state. */
+ WRITEREG16(blt, GP_OPCODE_REG, (OP_LINK|GECL));
+}
+
+
+/*
+ * Initialize the Display Processor.
+ * XXX - assumes only 1 card is installed, assumes monochrome display.
+ */
+
+init_dp(bs)
+ struct blitsoft *bs;
+{
+ struct blitdev *blt = bs->blt;
+ struct blitmem *bm = (struct blitmem *)blt->graphmem;
+
+ /*
+ * Set up strip header and tile descriptor for the whole
+ * screen. It's not clear why the C bit should be turned on,
+ * but it seems to get rid of the nasty flickering you can get
+ * by positioning an xterm window along the top of the screen.
+ */
+ bs->scrstrip->strip.lines = BLIT_MONOHEIGHT - 1;
+ bs->scrstrip->strip.linkl = 0;
+ bs->scrstrip->strip.linkh = 0;
+ bs->scrstrip->strip.tiles = DP_C_BIT | (1 - 1);
+ tiledesc(&bs->scrstrip->tile,
+ 0, 0, /* x, y */
+ BLIT_MONOWIDTH, /* width of strip */
+ BLIT_MONOWIDTH, /* width of bitmap */
+ VM_TO_ADDR786(bm->fb.mono_fb, blt), /* the actual bitmap */
+ 1); /* bits per pixel */
+
+ /* Copy into DP register block. */
+ *(bs->dpctlregs) = blit_mparm;
+ bs->dpctlregs->descl = DP_ADDRLOW(VM_TO_ADDR786(bs->scrstrip, blt));
+ bs->dpctlregs->desch = DP_ADDRHIGH(VM_TO_ADDR786(bs->scrstrip, blt));
+
+ /* Load the DP with the register block */
+ loadall(blt, bs->dpctlregs);
+}
+
+
+/*
+ * Fill in a tile descriptor.
+ */
+
+static
+tiledesc(tile, x, y, w, ww, adx, bpp)
+ TILEDESC *tile; /* pointer to tile descriptor */
+ int x; /* starting x in bitmap */
+ int y; /* starting y in bitmap */
+ int w; /* width of strip (in bits_) */
+ int ww; /* actual width of bitmap (bits) */
+ addr786_t adx; /* start of bitmap */
+ int bpp; /* bits per pixel */
+{
+ u_short bm_width;
+ short rghtp;
+ short adr_left, adr_right;
+ addr786_t bmstadr;
+ u_short start_stop_bit;
+
+ bm_width = 2 * (((ww + 1) * bpp) / 16);
+ rghtp = x + w - 1;
+ adr_left = ((x * bpp) / 16) * 2;
+ adr_right = ((rghtp * bpp) / 16) * 2;
+ bmstadr = (ww * y) + adr_left + (int)adx;
+ start_stop_bit = ((((16 - 1) - ((x * bpp) % 16)) << 4) +
+ ((16 - ((rghtp + 1) * bpp) % 16) % 16) +
+ (bpp << 8));
+
+ tile->bitmapw = bm_width;
+ tile->meml = DP_ADDRLOW(bmstadr);
+ tile->memh = DP_ADDRHIGH(bmstadr);
+ tile->bppss = start_stop_bit;
+ tile->fetchcnt = adr_right - adr_left;
+ tile->flags = 0;
+}
+
+
+/*
+ * Cause the Display Processor to load its Control Registers from
+ * "vm_addr".
+ */
+
+static
+loadall(blt, vm_addr)
+struct blitdev *blt;
+DPCONTROLBLK *vm_addr;
+{
+ addr786_t blit_addr = VM_TO_ADDR786(vm_addr, blt);
+ int i;
+
+ /* set up dp address */
+ WRITEREG16(blt, DP_PARM1_REG, DP_ADDRLOW(blit_addr));
+ WRITEREG16(blt, DP_PARM2_REG, DP_ADDRHIGH(blit_addr));
+
+ /* set blanking video */
+ WRITEREG16(blt, DEF_VIDEO_REG, 0);
+
+ /* load opcode to start dp */
+ WRITEREG16(blt, DP_OPCODE_REG, DP_LOADALL);
+
+ /* wait for acceptance */
+ for (i = 0; i < DP_RDYTIMEOUT; ++i)
+ if (READREG(blt, DP_OPCODE_REG) & DECL)
+ break;
+
+ if (i >= DP_RDYTIMEOUT) {
+ printf("Blit Display Processor timeout (loading registers)\n");
+ hang:
+ goto hang;
+ }
+
+#ifdef notdef
+ /* wait for acceptance */
+ CDELAY((READREG(blt, DP_OPCODE_REG) & DECL) != 0, DP_RDYTIMEOUT);
+ if ((READREG(blt, DP_OPCODE_REG) & DECL) == 0) {
+ printf("Blit Display Processor timeout (loading registers)\n");
+ hang:
+ goto hang;
+ }
+#endif /* notdef */
+}
+
+
+/*
+ * blit_present: returns YES if Blit is present. For the first call,
+ * the hardware is probed. After that, a flag is used.
+ * Sets blitsoft[0].blt and blitsoft[0].physaddr.
+ */
+
+#define TEST_BYTE 0xa5 /* should not be all 0's or 1's */
+
+boolean_t
+blit_present()
+{
+ static boolean_t present = FALSE;
+ static boolean_t initialized = FALSE;
+ struct blitdev *blt;
+ boolean_t blit_rom_ok();
+ struct blitdev *mapblit();
+ void freeblit();
+
+ /*
+ * We set "initialized" early on so that if the Blit init. code
+ * fails, kdb will still be able to use the EGA or VGA display
+ * (if present).
+ */
+ if (initialized)
+ return(present);
+ initialized = TRUE;
+
+ blit_reset_board();
+ blt = mapblit((caddr_t)BLIT_BASE_ADDR, BLIT_MAPPED_SIZE);
+ setstatus(CARD_MAPPED);
+ if (blt == NULL)
+ panic("blit: can't map display");
+ blt->graphmem[0] = TEST_BYTE;
+ present = FALSE;
+ if (blt->graphmem[0] == TEST_BYTE) {
+ setstatus(CARD_MAYBE_PRESENT);
+ present = blit_rom_ok(blt);
+ }
+ if (present) {
+ blitsoft[0].blt = blt;
+ blitsoft[0].physaddr = (caddr_t)BLIT_BASE_ADDR;
+ setstatus(CARD_PRESENT);
+ }
+ else
+ freeblit((vm_offset_t)blt, BLIT_MAPPED_SIZE);
+ return(present);
+}
+
+#undef TEST_BYTE
+
+
+/*
+ * mapblit: map the card into kernel vm and return the (virtual)
+ * address.
+ */
+struct blitdev *
+mapblit(physaddr, length)
+caddr_t physaddr; /* start of card */
+int length; /* num bytes to map */
+{
+ vm_offset_t vmaddr;
+#ifdef MACH_KERNEL
+ vm_offset_t io_map();
+#else MACH_KERNEL
+ vm_offset_t pmap_map_bd();
+#endif MACH_KERNEL
+
+ if (physaddr != (caddr_t)trunc_page(physaddr))
+ panic("Blit card not on page boundary");
+
+#ifdef MACH_KERNEL
+ vmaddr = io_map((vm_offset_t)physaddr, length);
+ if (vmaddr == 0)
+#else MACH_KERNEL
+ if (kmem_alloc_pageable(kernel_map,
+ &vmaddr, round_page(BLIT_MAPPED_SIZE))
+ != KERN_SUCCESS)
+#endif MACH_KERNEL
+ panic("can't alloc VM for Blit card");
+
+ (void)pmap_map_bd(vmaddr, (vm_offset_t)physaddr,
+ (vm_offset_t)physaddr+length,
+ VM_PROT_READ | VM_PROT_WRITE);
+ return((struct blitdev *)vmaddr);
+}
+
+
+/*
+ * freeblit: free card from memory.
+ * XXX - currently a no-op.
+ */
+void
+freeblit(va, length)
+vm_offset_t va; /* virt addr start of card */
+int length;
+{
+}
+
+
+/*
+ * blit_init: initialize globals & hardware, and set cursor. Could be
+ * called twice, once as part of kd initialization and once as part of
+ * blit initialization. Should not be called before blit_present() is
+ * called.
+ */
+
+void
+blit_init()
+{
+ static boolean_t initialized = FALSE;
+ struct blitmem *gmem; /* start of blit graphics memory */
+ int card;
+ void getfontinfo(), clear_blit();
+
+ if (initialized)
+ return;
+
+ for (card = 0; card < NBLIT; ++card) {
+ if (card > 0) {
+ blitsoft[card].blt = NULL;
+ blitsoft[card].physaddr = NULL;
+ }
+ blitsoft[card].open = FALSE;
+ blitsoft[card].scrstrip = NULL;
+ blitsoft[card].dpctlregs = NULL;
+ blitsoft[card].firstfree = 0;
+ }
+
+ /*
+ * blit_memory_init allocates memory used by the Display Processor,
+ * so it comes before the call to init_dp. blit_memory_init
+ * potentially copies the font from ROM into the graphics memory,
+ * so it comes after the call to getfontinfo.
+ */
+ getfontinfo(blitsoft[0].blt); /* get info & check assumptions */
+ blit_memory_init(&blitsoft[0]);
+
+ /* init 786 */
+ init_biu(blitsoft[0].blt);
+ setstatus(BIU_INIT);
+ init_dp(&blitsoft[0]);
+ setstatus(DP_INIT);
+
+ gmem = (struct blitmem *)blitsoft[0].blt->graphmem;
+ vid_start = gmem->fb.mono_fb;
+ kd_lines = 25;
+ kd_cols = 80;
+ kd_attr = KA_NORMAL;
+
+ /*
+ * Use generic bitmap routines, no 786 assist (see
+ * blit_memory_init).
+ */
+ kd_dput = bmpput;
+ kd_dmvup = bmpmvup;
+ kd_dmvdown = bmpmvdown;
+ kd_dclear = bmpclear;
+ kd_dsetcursor = bmpsetcursor;
+ kd_dreset = blitreboot;
+
+ clear_blit(blitsoft[0].blt);
+ (*kd_dsetcursor)(0);
+
+ initialized = TRUE;
+}
+
+
+/*
+ * blit_rom_ok: make sure we're looking at the ROM for a monochrome
+ * Blit.
+ */
+
+boolean_t
+blit_rom_ok(blt)
+ struct blitdev *blt;
+{
+ short magic;
+ short bpp;
+
+ magic = READROM(blt->eprom, EP_MAGIC1);
+ if (magic != EP_MAGIC1_VAL) {
+#ifdef notdef
+ printf("blit: magic1 bad (0x%x)\n", magic);
+#endif
+ return(FALSE);
+ }
+ magic = READROM(blt->eprom, EP_MAGIC2);
+ if (magic != EP_MAGIC2_VAL) {
+#ifdef notdef
+ printf("blit: magic2 bad (0x%x)\n", magic);
+#endif
+ return(FALSE);
+ }
+ bpp = READROM(blt->eprom, EP_BPP);
+ if (bpp != 1) {
+#ifdef notdef
+ printf("blit: not monochrome board (bpp = 0x%x)\n", bpp);
+#endif
+ return(FALSE);
+ }
+
+ return(TRUE);
+}
+
+
+/*
+ * getfontinfo: get information about the font and make sure that
+ * our simplifying assumptions are valid.
+ */
+
+void
+getfontinfo(blt)
+ struct blitdev *blt;
+{
+ u_char *rom = blt->eprom;
+ short fontoffset;
+ short pick_cursor_height();
+
+ fb_width = BLIT_MONOWIDTH;
+ fb_height = BLIT_MONOHEIGHT;
+ chars_in_font = READROM(rom, EP_NUMCHARS);
+ char_width = READROM(rom, EP_CHARWIDTH);
+ char_height = READROM(rom, EP_CHARHEIGHT);
+ fontoffset = READROM(rom, EP_FONTSTART);
+ xstart = READROM(rom, EP_XSTART);
+ ystart = READROM(rom, EP_YSTART);
+ char_black = BLIT_BLACK_BYTE;
+ char_white = BLIT_WHITE_BYTE;
+
+ font_start = rom + fontoffset;
+
+ /*
+ * Check byte-alignment assumption.
+ * XXX - does it do any good to panic when initializing the
+ * console driver?
+ */
+ if (char_width % 8 != 0)
+ panic("blit: char width not integral num of bytes");
+ if (xstart % 8 != 0) {
+ /* move it to a more convenient location */
+ printf("blit: console corner moved.\n");
+ xstart = 8 * (xstart/8);
+ }
+
+ cursor_height = pick_cursor_height();
+ char_byte_width = char_width / 8;
+ fb_byte_width = BLIT_MONOWIDTH / 8;
+ font_byte_width = char_byte_width * chars_in_font;
+}
+
+
+/*
+ * pick_cursor_height: pick a size for the cursor, based on the font
+ * size.
+ */
+
+short
+pick_cursor_height()
+{
+ int scl_avail; /* scan lines available for console */
+ int scl_per_line; /* scan lines per console line */
+
+ /*
+ * scan lines avail. = total lines - top margin;
+ * no bottom margin (XXX).
+ */
+ scl_avail = BLIT_MONOHEIGHT - ystart;
+
+ scl_per_line = scl_avail / kd_lines;
+ if (scl_per_line < char_height)
+ return(1);
+ else
+ return(scl_per_line - char_height);
+}
+
+
+/*
+ * setstatus: Give a status indication to the user. Ideally, we'd
+ * just set the 3 user-controlled LED's. Unfortunately, that doesn't
+ * seem to work. So, we ring the bell.
+ */
+
+static void
+setstatus(val)
+ int val;
+{
+ union blit_diag_reg diag;
+
+ diag.byte = inb(BLIT_DIAG_ADDR);
+ diag.reg.led0 = (val & 1) ? BLIT_LED_ON : BLIT_LED_OFF;
+ diag.reg.led1 = (val & 2) ? BLIT_LED_ON : BLIT_LED_OFF;
+ diag.reg.led2 = (val & 4) ? BLIT_LED_ON : BLIT_LED_OFF;
+ outb(BLIT_DIAG_ADDR, diag.byte);
+
+#ifdef DEBUG
+ for (val &= 7; val > 0; val--) {
+ feep();
+ pause();
+ }
+ for (val = 0; val < 10; val++) {
+ pause();
+ }
+#endif
+}
+
+
+
+/***********
+ *
+ * Other (non-initialization) routines.
+ *
+ ***********/
+
+
+/*
+ * Open - Verify that minor device is OK and not in use, then clear
+ * the screen.
+ */
+
+/*ARGSUSED*/
+int
+blitopen(dev, flag)
+ dev_t dev;
+ int flag;
+{
+ void clear_blit();
+ int which = minor(dev);
+
+ if (!blit_present() || which >= NBLIT)
+ return(ENXIO);
+ if (blitsoft[which].open)
+ return(EBUSY);
+
+ clear_blit(blitsoft[which].blt);
+ blitsoft[which].open = TRUE;
+ return(0); /* ok */
+}
+
+
+/*
+ * Close - free any kernel memory structures that were allocated while
+ * the device was open (currently none).
+ */
+
+/*ARGSUSED*/
+blitclose(dev, flag)
+ dev_t dev;
+ int flag;
+{
+ int which = minor(dev);
+
+ if (!blitsoft[which].open)
+ panic("blit: closing not-open device??");
+ blitsoft[which].open = FALSE;
+}
+
+
+/*
+ * Mmap.
+ */
+
+/*ARGSUSED*/
+int
+blitmmap(dev, off, prot)
+ dev_t dev;
+ off_t off;
+ int prot;
+{
+ if ((u_int) off >= BLIT_MAPPED_SIZE)
+ return(-1);
+
+ /* Get page frame number for the page to be mapped. */
+ return(i386_btop(blitsoft[minor(dev)].physaddr + off));
+}
+
+
+/*
+ * Ioctl.
+ */
+
+#ifdef MACH_KERNEL
+io_return_t blit_get_stat(dev, flavor, data, count)
+ dev_t dev;
+ int flavor;
+ int *data; /* pointer to OUT array */
+ unsigned int *count; /* OUT */
+{
+ int which = minor(dev);
+
+ switch (flavor) {
+ case BLIT_1ST_UNUSED:
+ if (*count < 1)
+ return (D_INVALID_OPERATION);
+ *data = blitsoft[which].firstfree;
+ *count = 1;
+ break;
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+#else MACH_KERNEL
+/*ARGSUSED*/
+int
+blitioctl(dev, cmd, data, flag)
+ dev_t dev;
+ int cmd;
+ caddr_t data;
+ int flag;
+{
+ int which = minor(dev);
+ int err = 0;
+
+ switch (cmd) {
+ case BLIT_1ST_UNUSED:
+ *(int *)data = blitsoft[which].firstfree;
+ break;
+ default:
+ err = ENOTTY;
+ }
+
+ return(err);
+}
+#endif MACH_KERNEL
+
+/*
+ * clear_blit: clear blit's screen.
+ */
+
+void
+clear_blit(blt)
+ struct blitdev *blt;
+{
+ (*kd_dclear)(0, kd_lines*kd_cols, KA_NORMAL);
+}
+
+/*
+ * Put the board into DOS mode in preparation for rebooting.
+ */
+
+void
+blitreboot()
+{
+ union blit_config_reg config;
+
+ config.byte = inb(BLIT_CONFIG_ADDR);
+ config.reg.mode = BLIT_DOS_MODE;
+ config.reg.invisible = BLIT_VISIBLE;
+ outb(BLIT_CONFIG_ADDR, config.byte);
+}
+
+#endif /* NBLIT > 0 */
+#endif /* NBLIT > MAXBLITS */
diff --git a/i386/i386at/blitreg.h b/i386/i386at/blitreg.h
new file mode 100644
index 00000000..7226aca5
--- /dev/null
+++ b/i386/i386at/blitreg.h
@@ -0,0 +1,404 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* **********************************************************************
+ File: blitreg.h
+ Description: Bell Tech Blit card hardware description
+
+ $ Header: $
+
+ Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989.
+ All rights reserved.
+********************************************************************** */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * Some code taken from Bob Glossman's 1987 "minimal Blit Express
+ * driver", copyright unknown. Probably copyright Intel, too.
+ */
+
+
+#ifndef blitreg_DEFINED
+#define blitreg_DEFINED
+
+
+/*
+ * Registers accessible through AT I/O space. These addresses can be
+ * changed by changing bits 4-8 of the Blit's DIP switch.
+ */
+
+#define BLIT_CONFIG_ADDR 0x304
+#define BLIT_DIAG_ADDR 0x306
+
+#if defined(sun386) || defined(i386)
+
+
+/*
+ * Layout of Blit control register.
+ */
+
+union blit_config_reg {
+ struct config_bits {
+ unsigned dos_segment : 4;
+ unsigned reset : 1;
+ unsigned mode : 1;
+#define BLIT_UNIX_MODE 1
+#define BLIT_DOS_MODE 0
+ unsigned invisible : 1;
+#define BLIT_INVISIBLE 1
+#define BLIT_VISIBLE 0
+ unsigned unused : 1;
+ } reg;
+ u_char byte;
+};
+
+
+/*
+ * Blit Diag register.
+ * The UNIX base address is currently hardwired to BLIT_BASE_ADDR.
+ */
+
+#define BLIT_BASE_ADDR 0xd80000 /* base of blit memory (phys addr) */
+
+union blit_diag_reg {
+ struct diag_bits {
+ unsigned unix_base_addr : 5; /* phys addr (ignored) */
+ unsigned led0 : 1;
+ unsigned led1 : 1;
+ unsigned led2 : 1;
+#define BLIT_LED_ON 1
+#define BLIT_LED_OFF 0
+ } reg;
+ u_char byte;
+};
+
+#endif /* sun386 || i386 */
+
+
+/*
+ * Graphics memory, 786 registers, static RAM, and EPROM, all
+ * accessible through mapped memory.
+ */
+
+#define BLIT_MONOWIDTH 1664
+#define BLIT_MONOHEIGHT 1200
+#define BLIT_MONOFBSIZE ((BLIT_MONOWIDTH*BLIT_MONOHEIGHT)/8)
+ /* byte size of monochrome fb */
+
+#define BLIT_MEMSIZE 0x100000 /* num bytes mapped graphics memory */
+
+#define BLIT_REGSIZE 128 /* bytes taken by 786 registers */
+#define BLIT_REGPAD (0x10000 - BLIT_REGSIZE)
+ /* padding between reg's and SRAM */
+
+#define BLIT_SRAMSIZE 0x4000 /* num bytes mapped for SRAM */
+#define BLIT_SRAMPAD (0x10000 - BLIT_SRAMSIZE)
+ /* padding between SRAM and EPROM */
+
+#define BLIT_EPROMSIZE 0x20000 /* num bytes mapped for EPROM */
+
+
+/*
+ * Layout of the Blit's mapped memory. The physical address is (or
+ * will be, eventually) determined by the Diag register (above).
+ */
+
+struct blitdev {
+ u_char graphmem[BLIT_MEMSIZE];
+ u_char reg786[BLIT_REGSIZE];
+ u_char pad1[BLIT_REGPAD];
+ u_char sram[BLIT_SRAMSIZE];
+ u_char pad2[BLIT_SRAMPAD];
+ u_char eprom[BLIT_EPROMSIZE];
+};
+
+#define BLIT_MAPPED_SIZE sizeof(struct blitdev)
+
+
+/*
+ * Offsets for 786 registers (i.e., indices into reg786[]).
+ */
+
+#define INTER_RELOC 0x00 /* Internal Relocation Register */
+#define BIU_CONTROL 0x04 /* BIU Control Register */
+#define DRAM_REFRESH 0x06 /* DRAM Refresh control register */
+#define DRAM_CONTROL 0x08 /* DRAM control register */
+#define DP_PRIORITY 0x0A /* DP priority register */
+#define GP_PRIORITY 0x0C /* GP priority register*/
+#define EXT_PRIORITY 0x0E /* External Priority Register*/
+#define GP_OPCODE_REG 0x20 /* GP opcode register */
+#define GP_PARM1_REG 0x22 /* GP Parameter 1 Register */
+#define GP_PARM2_REG 0x24 /* GP Parameter 2 Register*/
+#define GP_STAT_REG 0x26 /* GP Status Register*/
+#define DP_OPCODE_REG 0x40 /* DP opcode register */
+#define DP_PARM1_REG 0x42 /* DP Parameter 1 Register*/
+#define DP_PARM2_REG 0x44 /* DP Parameter 2 Register*/
+#define DP_PARM3_REG 0x46 /* DP Parameter 3 Register*/
+#define DP_STAT_REG 0x48 /* DP Status Register*/
+#define DEF_VIDEO_REG 0x4A /* DP Default Video Register*/
+
+
+/*
+ * 786 BIU Control Register values.
+ */
+
+#define BIU_WP1 0x02 /* Write Protect One; 1 = on */
+#define BIU_16BIT 0x10 /* access 786 registers as words; 0 = bytes */
+
+
+/*
+ * 786 DRAM/VRAM Control Register values.
+ */
+
+/* RW bits */
+#define MEMROWS1 0
+#define MEMROWS2 0x20
+#define MEMROWS3 0x40
+#define MEMROWS4 0x60
+
+/* DC bits */
+#define PG_NONINTERLV 0
+#define FASTPG_NONINTERLV 0x10
+#define PG_INTERLV 0x08
+#define FASTPG_INTERLV 0x18
+
+/* HT bits */
+#define HEIGHT_8K 0
+#define HEIGHT_16K 0x1
+#define HEIGHT_32K 0x2
+#define HEIGHT_64K 0x3
+#define HEIGHT_128K 0x4
+#define HEIGHT_256K 0x5
+#define HEIGHT_512K 0x6
+#define HEIGHT_1M 0x7
+
+
+/*
+ * 786 Graphics Processor opcodes.
+ */
+
+#define GECL 0x001 /* end of command list */
+#define OP_LINK 0x200 /* LINK - "link next cmd" */
+
+
+/*
+ * 786 Display Processor opcodes.
+ */
+
+#define DECL 1 /* end of list */
+#define DP_LOADALL 0x500
+
+
+/*
+ * Macros for accessing 786 registers (see BIU_16BIT) and EPROM.
+ */
+
+#define WRITEREG8(base,offset,val) \
+ (base)->reg786[(offset)] = (val) & 0xff, \
+ (base)->reg786[(offset)+1] = ((val) & 0xff00) >> 8
+
+#define WRITEREG16(base,offset,val) \
+ (*((u_short *)((base)->reg786+(offset)))) = (val)
+
+#define READREG(base,offset) \
+ (*((u_short *)(((base)->reg786+(offset)))))
+
+#define WRITEROM(romp,offset,val) \
+ (*((u_short *)((romp)+(offset)))) = (val)
+
+#define READROM(romp,offset) \
+ (*((u_short *)(((romp)+(offset)))))
+
+
+/*
+ * Layout of Display Processor Control Block Registers. This block is
+ * allocated somewhere in the Blit's graphics memory, and a pointer to
+ * it is passed to the Display Processor.
+ *
+ * NOTE: The 786 only sees the memory mapped by the Blit. Thus all
+ * addresses passed to the 786 are relative to the start of the Blit's
+ * mapped memory.
+ */
+
+typedef int addr786_t; /* 0 = start of Blit mapped memory */
+
+typedef struct {
+ u_short vidstat; /* video status */
+ u_short intrmask; /* interrupt mask */
+ u_short trip_point;
+ u_short frame_intr; /* frame interrupt */
+ u_short reserved1;
+ u_short crtmode; /* CRT controller mode */
+ u_short hsyncstop; /* monitor parameters */
+ u_short hfldstart;
+ u_short hfldstop;
+ u_short linelength;
+ u_short vsyncstop;
+ u_short vfldstart;
+ u_short vfldstop;
+ u_short vframelen;
+ u_short descl; /* descriptor pointer low part */
+ u_short desch; /* descriptor pointer high part */
+ u_short reserved2;
+ u_short xyzoom;
+ u_short fldcolor;
+ u_short bordercolor;
+ u_short bpp_pad1;
+ u_short bpp_pad2;
+ u_short bpp_pad4;
+ u_short csrmode; /* & CsrPad */
+ u_short cursorx; /* cursor x location */
+ u_short cursory; /* cursor y location */
+ u_short cursorpat[16]; /* cursor pattern */
+} DPCONTROLBLK;
+
+
+/*
+ * Values for 786 Display Processor Control Block Registers.
+ */
+
+/* video status */
+#define DP_DSP_ON 1 /* display on */
+#define DP_CSR_ON 2 /* cursor on */
+
+/* CRT controller modes */
+#define CRTM_NONINTER 0 /* non-interlaced */
+#define CRTM_INTERLCD 0x40 /* interlaced */
+#define CRTM_INTERSYN 0x60 /* interlaced - sync */
+#define CRTM_WIN_STAT_ENABLE 0x10 /* window status enable */
+#define CRTM_SYNC_SLAVE_MODE 0x08 /* on = operate as slave */
+#define CRTM_BLANK_SLAVE_MODE 0x04 /* on = Blank is input */
+#define CRTM_NORMAL_SPEED 0x00
+#define CRTM_HIGH_SPEED 0x01
+#define CRTM_VRYHIGH_SPEED 0x02
+#define CRTM_SUPHIGH_SPEED 0x03
+
+/* cursor style */
+#define DP_CURSOR_16X16 0x8000 /* off = 8x8 */
+#define DP_CURSOR_CROSSHAIR 0x4000 /* off = block cursor */
+#define DP_CURSOR_TRANSPRNT 0x2000 /* off = cursor is opaque */
+
+
+/*
+ * Types for dealing with 786 Display Processor.
+ */
+
+typedef struct {
+ u_short lines; /* (lines in strip) - 1 */
+ u_short linkl; /* link to next strip low part */
+ u_short linkh; /* link to next strip high part */
+ u_short tiles; /* C bit, (tiles in strip) - 1 */
+} STRIPHEADER;
+
+/*
+ * If the C bit is turned on, the display processor "automatically
+ * displays the background color" for areas not defined by the strips.
+ * See section 3.1.3.2 of the '786 User's Manual.
+ */
+#define DP_C_BIT 0x8000
+
+typedef struct {
+ u_short bitmapw; /* width of bitmap */
+ u_short meml; /* btb mem address low part */
+ u_short memh; /* btb mem address high part */
+ u_short bppss; /* bpp, start and stop fields */
+ u_short fetchcnt; /* fetch count */
+ u_short flags; /* various flags */
+} TILEDESC;
+
+
+/*
+ * Macros for encoding addresses for strip headers & tile descriptors.
+ * addr786 is relative to the start of the Blit's mapped memory.
+ */
+
+#define DP_ADDRLOW(addr786) (((int)(addr786)) & 0xffff)
+#define DP_ADDRHIGH(addr786) ((((int)(addr786)) >> 16) & 0x3f)
+
+
+/*
+ * Byte offsets to useful data words within the EPROM.
+ */
+
+#define EP_MAGIC1 0
+#define EP_MAGIC1_VAL 0x7856
+#define EP_MAGIC2 2
+#define EP_MAGIC2_VAL 0x6587
+#define EP_DPSTART 4 /* start of DP ctl block */
+ /* (0 = start of EPROM) */
+#define EP_DPLEN 6 /* byte length of DP control block */
+
+#define EP_FONTSTART 8 /* start of font */
+ /* (0 = start of EPROM) */
+#define EP_FONTLEN 10 /* byte length of font */
+#define EP_CHARWIDTH 12 /* bit width of each char in font */
+#define EP_CHARHEIGHT 14
+#define EP_NUMCHARS 16 /* num chars in font */
+
+/* where in the bitmap the 25x80 console screen starts */
+#define EP_XSTART 18
+#define EP_YSTART 20
+
+#define EP_SCREENWIDTH 22 /* pixels per scan line */
+#define EP_SCREENHEIGHT 24 /* number of scan lines */
+
+#define EP_FIXUP_X 26 /* magic numbers for displaying */
+#define EP_FIXUP_Y 28 /* hardware cursor */
+
+#define EP_BPP 30 /* bits per pixel */
+
+
+/*
+ * Miscellaneous.
+ */
+
+#define BLIT_BLACK_BIT 0 /* try saying that 3 times fast */
+#define BLIT_WHITE_BIT 1
+#define BLIT_BLACK_BYTE 0
+#define BLIT_WHITE_BYTE 0xff
+
+
+#endif /* blitreg_DEFINED */
diff --git a/i386/i386at/blituser.h b/i386/i386at/blituser.h
new file mode 100644
index 00000000..0ebcfffb
--- /dev/null
+++ b/i386/i386at/blituser.h
@@ -0,0 +1,73 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* **********************************************************************
+ File: blituser.h
+ Description: User-program definitions for Bell Tech Blit card
+
+ Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989.
+ All rights reserved.
+********************************************************************** */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _BLITUSER_
+#define _BLITUSER_
+
+#include <sys/ioctl.h>
+
+/*
+ * Ioctl's.
+ */
+
+/*
+ * BLIT_1ST_UNUSED returns a byte offset into the Blit graphics
+ * memory. The user is free to allocate and use any graphics memory
+ * starting at that offset.
+ */
+
+#define BLIT_1ST_UNUSED _IOR('b', 1, int)
+
+#endif /* _BLITUSER_ */
diff --git a/i386/i386at/blitvar.h b/i386/i386at/blitvar.h
new file mode 100644
index 00000000..58401f39
--- /dev/null
+++ b/i386/i386at/blitvar.h
@@ -0,0 +1,116 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* **********************************************************************
+ File: blitvar.h
+ Description: Definitions used by Blit driver other than h/w definition.
+
+ $ Header: $
+
+ Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989.
+ All rights reserved.
+********************************************************************** */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#include <i386at/blitreg.h>
+#include <sys/types.h>
+#include <mach/boolean.h>
+
+
+/*
+ * This is how we use the Blit's graphics memory. The frame buffer
+ * goes at the front, and the rest is used for miscellaneous
+ * allocations. Users can use the "spare" memory, but they should do
+ * an ioctl to find out which part of the memory is really free.
+ */
+
+struct blitmem {
+ union blitfb {
+ u_char mono_fb[BLIT_MONOFBSIZE];
+ u_char color_fb[1]; /* place-holder */
+ } fb;
+ u_char spare[BLIT_MEMSIZE - sizeof(union blitfb)];
+};
+
+
+/*
+ * Macro to get from blitdev pointer to monochrome framebuffer.
+ */
+#define BLIT_MONOFB(blt, fbptr) \
+ { struct blitmem *mymem = (struct blitmem *)((blt)->graphmem); \
+ fbptr = mymem->fb.mono_fb; \
+ }
+
+
+/*
+ * Single-tile description that can be used to describe the entire
+ * screen.
+ */
+
+struct screen_descrip {
+ STRIPHEADER strip;
+ TILEDESC tile;
+};
+
+
+/*
+ * Number of microseconds we're willing to wait for display processor
+ * to load its command block.
+ */
+
+#define DP_RDYTIMEOUT 1000000
+
+
+/*
+ * Conversion macros.
+ */
+
+#define VM_TO_ADDR786(vmaddr, blit_base) \
+ ((int)(vmaddr) - (int)(blit_base))
+
+
+extern boolean_t blit_present();
+extern void blit_init();
diff --git a/i386/i386at/boothdr.S b/i386/i386at/boothdr.S
new file mode 100644
index 00000000..de807538
--- /dev/null
+++ b/i386/i386at/boothdr.S
@@ -0,0 +1,62 @@
+
+#include <mach/machine/asm.h>
+
+#include "i386asm.h"
+
+ .text
+
+ /* We should never be entered this way. */
+ .globl start,_start
+start:
+_start:
+ jmp boot_entry
+
+ /* MultiBoot header - see multiboot.h. */
+#define MULTIBOOT_MAGIC 0x1BADB002
+#ifdef __ELF__
+#define MULTIBOOT_FLAGS 0x00000002
+#else /* __ELF__ */
+#define MULTIBOOT_FLAGS 0x00010002
+#endif /* __ELF__ */
+ P2ALIGN(2)
+boot_hdr:
+ .long MULTIBOOT_MAGIC
+ .long MULTIBOOT_FLAGS
+ /*
+ * The next item here is the checksum.
+ * XX this works OK until we need at least the 30th bit.
+ */
+ .long - (MULTIBOOT_MAGIC+MULTIBOOT_FLAGS)
+#ifndef __ELF__ /* a.out kludge */
+ .long boot_hdr /* header_addr */
+ .long _start /* load_addr */
+ .long _edata /* load_end_addr */
+ .long _end /* bss_end_addr */
+ .long boot_entry /* entry */
+#endif /* __ELF__ */
+
+boot_entry:
+
+ /* Switch to our own interrupt stack. */
+ movl $_intstack+INTSTACK_SIZE,%esp
+
+ /* Reset EFLAGS to a known state. */
+ pushl $0
+ popf
+
+ /* Clear uninitialized data. */
+ lea _edata,%edi
+ lea _end,%ecx
+ subl %edi,%ecx
+ xorl %eax,%eax
+ rep
+ stosb
+
+ /* Push the boot_info pointer to be the second argument. */
+ pushl %ebx
+
+ /* Jump into C code. */
+ call EXT(c_boot_entry)
+
+ .comm _intstack,INTSTACK_SIZE
+
diff --git a/i386/i386at/com.c b/i386/i386at/com.c
new file mode 100644
index 00000000..113387f2
--- /dev/null
+++ b/i386/i386at/com.c
@@ -0,0 +1,891 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994,1993,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <com.h>
+#if NCOM > 0
+
+#include <mach/std_types.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <device/conf.h>
+#include <device/errno.h>
+#include <device/tty.h>
+#include <device/io_req.h>
+
+#include <i386/ipl.h>
+#include <i386/pio.h>
+#include <i386/machspl.h>
+#include <chips/busses.h>
+#include <i386at/comreg.h>
+
+#include <rc.h>
+#include <cons.h>
+
+extern void timeout(), ttrstrt();
+
+int comprobe(), comintr(), comstart(), commctl();
+void comattach();
+static void comparam();
+int comstop(), comgetstat(), comsetstat();
+
+static vm_offset_t com_std[NCOM] = { 0 };
+struct bus_device *cominfo[NCOM];
+struct bus_driver comdriver = {
+ comprobe, 0, comattach, 0, com_std, "com", cominfo, 0, 0, 0};
+
+struct tty com_tty[NCOM];
+int commodem[NCOM];
+int comcarrier[NCOM] = {0, 0,};
+boolean_t comfifo[NCOM];
+boolean_t comtimer_active;
+int comtimer_state[NCOM];
+
+#if RCLINE >= 0
+#define RCBAUD B9600
+static struct bus_device *comcndev;
+int comcnprobe(struct consdev *cp);
+int comcninit(struct consdev *cp);
+int comcngetc(dev_t dev, int wait);
+int comcnputc(dev_t dev, int c);
+#endif
+
+#ifndef PORTSELECTOR
+#define ISPEED B9600
+#define IFLAGS (EVENP|ODDP|ECHO|CRMOD)
+#else
+#define ISPEED B4800
+#define IFLAGS (EVENP|ODDP)
+#endif
+
+u_short divisorreg[] = {
+ 0, 2304, 1536, 1047, /* 0, 50, 75, 110*/
+ 857, 768, 576, 384, 192, /* 134.5, 150, 200, 300, 600*/
+ 96, 64, 48, /* 1200, 1800, 2000, 2400 */
+ 24, 12, /* 3600, 4800, 7200, 9600 */
+ 6, 3, 2}; /* 19200, 38400, 56000 */
+
+
+/*
+ *
+ * Probes are called during kernel boot: return 1 to mean that
+ * the relevant device is present today.
+ *
+ */
+int
+comprobe_general(struct bus_device *dev, int noisy)
+{
+ u_short addr = dev->address;
+ int unit = dev->unit;
+ int oldctl, oldmsb;
+ char *type = "8250";
+ int i;
+
+ if ((unit < 0) || (unit > NCOM)) {
+ printf("com %d out of range\n", unit);
+ return(0);
+ }
+ oldctl = inb(LINE_CTL(addr)); /* Save old value of LINE_CTL */
+ oldmsb = inb(BAUD_MSB(addr)); /* Save old value of BAUD_MSB */
+ outb(LINE_CTL(addr), 0); /* Select INTR_ENAB */
+ outb(BAUD_MSB(addr), 0);
+ if (inb(BAUD_MSB(addr)) != 0)
+ {
+ outb(LINE_CTL(addr), oldctl);
+ outb(BAUD_MSB(addr), oldmsb);
+ return 0;
+ }
+ outb(LINE_CTL(addr), iDLAB); /* Select BAUD_MSB */
+ outb(BAUD_MSB(addr), 255);
+ if (inb(BAUD_MSB(addr)) != 255)
+ {
+ outb(LINE_CTL(addr), oldctl);
+ outb(BAUD_MSB(addr), oldmsb);
+ return 0;
+ }
+ outb(LINE_CTL(addr), 0); /* Select INTR_ENAB */
+ if (inb(BAUD_MSB(addr)) != 0) /* Check that it has kept its value*/
+ {
+ outb(LINE_CTL(addr), oldctl);
+ outb(BAUD_MSB(addr), oldmsb);
+ return 0;
+ }
+
+ /* Com port found, now check what chip it has */
+
+ for(i = 0; i < 256; i++) /* Is there Scratch register */
+ {
+ outb(SCR(addr), i);
+ if (inb(SCR(addr)) != i)
+ break;
+ }
+ if (i == 256)
+ { /* Yes == 450 or 460 */
+ outb(SCR(addr), 0);
+ type = "82450 or 16450";
+ outb(FIFO_CTL(addr), iFIFOENA | iFIFO14CH); /* Enable fifo */
+ if ((inb(FIFO_CTL(addr)) & iFIFO14CH) != 0)
+ { /* Was it successfull */
+ /* if both bits are not set then broken xx550 */
+ if ((inb(FIFO_CTL(addr)) & iFIFO14CH) == iFIFO14CH)
+ {
+ type = "82550 or 16550";
+ comfifo[unit] = TRUE;
+ }
+ else
+ {
+ type = "82550 or 16550 with non-working FIFO";
+ }
+ outb(INTR_ID(addr), 0x00); /* Disable fifos */
+ }
+ }
+ if (noisy)
+ printf("com%d: %s chip.\n", unit, type);
+ return 1;
+}
+
+/*
+ * Probe routine for use during kernel startup when it is probing
+ * all of bus_device_init
+ */
+int
+comprobe(int port, struct bus_device *dev)
+{
+ return comprobe_general(dev, /*noisy*/ 1);
+}
+
+#if RCLINE >= 0
+/*
+ * Probe routine for use by the console
+ */
+int
+comcnprobe(struct consdev *cp)
+{
+ struct bus_device *b;
+ int maj, unit, pri;
+
+ maj = 0;
+ unit = -1;
+ pri = CN_DEAD;
+
+ for (b = bus_device_init; b->driver; b++)
+ if (strcmp(b->name, "com") == 0
+ && b->unit == RCLINE
+ && comprobe_general(b, /*quiet*/ 0))
+ {
+ /* Found one */
+ comcndev = b;
+ unit = b->unit;
+ pri = CN_REMOTE;
+ break;
+ }
+
+ cp->cn_dev = makedev(maj, unit);
+ cp->cn_pri = pri;
+}
+#endif
+
+
+/*
+ *
+ * Device Attach's are called during kernel boot, but only if the matching
+ * device Probe returned a 1.
+ *
+ */
+void
+comattach(struct bus_device *dev)
+{
+ u_char unit = dev->unit;
+ u_short addr = dev->address;
+
+ take_dev_irq(dev);
+ printf(", port = %x, spl = %d, pic = %d. (DOS COM%d)",
+ dev->address, dev->sysdep, dev->sysdep1, unit+1);
+
+/* comcarrier[unit] = addr->flags;*/
+ commodem[unit] = 0;
+
+ outb(INTR_ENAB(addr), 0);
+ outb(MODEM_CTL(addr), 0);
+ while (!(inb(INTR_ID(addr))&1)) {
+ (void) inb(LINE_STAT (addr)); /* reset overrun error etc */
+ (void) inb(TXRX (addr)); /* reset data-ready */
+ (void) inb(MODEM_STAT(addr)); /* reset modem status reg */
+ }
+}
+
+#if RCLINE >= 0
+/*
+ * Attach/init routine for console. This isn't called by
+ * configure_bus_device which sets the alive, adaptor, and minfo
+ * fields of the bus_device struct (comattach is), therefore we do
+ * that by hand.
+ */
+int
+comcninit(struct consdev *cp)
+{
+ u_char unit = comcndev->unit;
+ u_short addr = comcndev->address;
+
+ take_dev_irq(comcndev);
+
+ comcndev->alive = 1;
+ comcndev->adaptor = 0;
+ cominfo[minor(cp->cn_dev)] = comcndev;
+
+ outb(LINE_CTL(addr), iDLAB);
+ outb(BAUD_LSB(addr), divisorreg[RCBAUD] & 0xff);
+ outb(BAUD_MSB(addr), divisorreg[RCBAUD] >>8);
+ outb(LINE_CTL(addr), i7BITS|iPEN);
+ outb(INTR_ENAB(addr), 0);
+ outb(MODEM_CTL(addr), iDTR|iRTS|iOUT2);
+
+ {
+ char msg[128];
+ volatile unsigned char *p = (volatile unsigned char *)0xb8000;
+ int i;
+
+ sprintf(msg, " **** using COM port %d for console ****",
+ unit+1);
+ for (i = 0; msg[i]; i++) {
+ p[2*i] = msg[i];
+ p[2*i+1] = (0<<7) /* blink */
+ | (0x0<<4) /* bg */
+ | (1<<3) /* hi-intensity */
+ | 0x4; /* fg */
+ }
+ }
+
+}
+#endif
+
+/*
+ * Probe for COM<dev> after autoconfiguration.
+ * Used to handle PCMCIA modems, which may appear
+ * at any time.
+ */
+boolean_t com_reprobe(
+ int unit)
+{
+ struct bus_device *device;
+
+ /*
+ * Look for COM device <unit> in the device
+ * initialization list. It must not be alive
+ * (otherwise we would have opened it already).
+ */
+ for (device = bus_device_init; device->driver; device++) {
+ if (device->driver == &comdriver && device->unit == unit &&
+ !device->alive && device->ctlr == (char)-1)
+ {
+ /*
+ * Found an entry for com port <unit>.
+ * Probe it.
+ */
+ if (configure_bus_device(device->name,
+ device->address,
+ device->phys_address,
+ 0,
+ "atbus"))
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+io_return_t comopen(
+ int dev,
+ int flag,
+ io_req_t ior)
+{
+ int unit = minor(dev);
+ u_short addr;
+ struct bus_device *isai;
+ struct tty *tp;
+ spl_t s;
+ io_return_t result;
+
+ if (unit >= NCOM)
+ return ENXIO; /* no such device */
+ if ((isai = cominfo[unit]) == 0 || isai->alive == 0) {
+ /*
+ * Try to probe it again
+ */
+ if (!com_reprobe(unit))
+ return ENXIO;
+ }
+ tp = &com_tty[unit];
+
+ if ((tp->t_state & (TS_ISOPEN|TS_WOPEN)) == 0) {
+ ttychars(tp);
+ tp->t_addr = (char *)isai->address;
+ tp->t_dev = dev;
+ tp->t_oproc = comstart;
+ tp->t_stop = comstop;
+ tp->t_mctl = commctl;
+ tp->t_getstat = comgetstat;
+ tp->t_setstat = comsetstat;
+#ifndef PORTSELECTOR
+ if (tp->t_ispeed == 0) {
+#else
+ tp->t_state |= TS_HUPCLS;
+#endif /* PORTSELECTOR */
+ tp->t_ispeed = ISPEED;
+ tp->t_ospeed = ISPEED;
+ tp->t_flags = IFLAGS;
+ tp->t_state &= ~TS_BUSY;
+#ifndef PORTSELECTOR
+ }
+#endif /* PORTSELECTOR */
+ }
+/*rvb tp->t_state |= TS_WOPEN; */
+ if ((tp->t_state & TS_ISOPEN) == 0)
+ comparam(unit);
+ addr = (int)tp->t_addr;
+
+ s = spltty();
+ if (!comcarrier[unit]) /* not originating */
+ tp->t_state |= TS_CARR_ON;
+ else {
+ int modem_stat = inb(MODEM_STAT(addr));
+ if (modem_stat & iRLSD)
+ tp->t_state |= TS_CARR_ON;
+ else
+ tp->t_state &= ~TS_CARR_ON;
+ fix_modem_state(unit, modem_stat);
+ }
+ splx(s);
+
+ result = char_open(dev, tp, flag, ior);
+
+ if (!comtimer_active) {
+ comtimer_active = TRUE;
+ comtimer();
+ }
+
+ s = spltty();
+ while(!(inb(INTR_ID(addr))&1)) { /* while pending interrupts */
+ (void) inb(LINE_STAT (addr)); /* reset overrun error */
+ (void) inb(TXRX (addr)); /* reset data-ready */
+ (void) inb(MODEM_STAT(addr)); /* reset modem status */
+ }
+ splx(s);
+ return result;
+}
+
+io_return_t comclose(dev, flag)
+int dev;
+int flag;
+{
+ struct tty *tp = &com_tty[minor(dev)];
+ u_short addr = (int)tp->t_addr;
+
+ ttyclose(tp);
+ if (tp->t_state&TS_HUPCLS || (tp->t_state&TS_ISOPEN)==0) {
+ outb(INTR_ENAB(addr), 0);
+ outb(MODEM_CTL(addr), 0);
+ tp->t_state &= ~TS_BUSY;
+ commodem[minor(dev)] = 0;
+ if (comfifo[minor(dev)] != 0)
+ outb(INTR_ID(addr), 0x00); /* Disable fifos */
+ }
+ return 0;
+}
+
+io_return_t comread(dev, ior)
+int dev;
+io_req_t ior;
+{
+ return char_read(&com_tty[minor(dev)], ior);
+}
+
+io_return_t comwrite(dev, ior)
+int dev;
+io_req_t ior;
+{
+ return char_write(&com_tty[minor(dev)], ior);
+}
+
+io_return_t comportdeath(dev, port)
+dev_t dev;
+mach_port_t port;
+{
+ return (tty_portdeath(&com_tty[minor(dev)], port));
+}
+
+io_return_t
+comgetstat(dev, flavor, data, count)
+dev_t dev;
+int flavor;
+int *data; /* pointer to OUT array */
+unsigned int *count; /* out */
+{
+ io_return_t result = D_SUCCESS;
+ int unit = minor(dev);
+
+ switch (flavor) {
+ case TTY_MODEM:
+ fix_modem_state(unit, inb(MODEM_STAT(cominfo[unit]->address)));
+ *data = commodem[unit];
+ *count = 1;
+ break;
+ default:
+ result = tty_get_status(&com_tty[unit], flavor, data, count);
+ break;
+ }
+ return (result);
+}
+
+io_return_t
+comsetstat(dev, flavor, data, count)
+dev_t dev;
+int flavor;
+int * data;
+unsigned int count;
+{
+ io_return_t result = D_SUCCESS;
+ int unit = minor(dev);
+ struct tty *tp = &com_tty[unit];
+
+ switch (flavor) {
+ case TTY_SET_BREAK:
+ commctl(tp, TM_BRK, DMBIS);
+ break;
+ case TTY_CLEAR_BREAK:
+ commctl(tp, TM_BRK, DMBIC);
+ break;
+ case TTY_MODEM:
+ commctl(tp, *data, DMSET);
+ break;
+ default:
+ result = tty_set_status(&com_tty[unit], flavor, data, count);
+ if (result == D_SUCCESS && flavor == TTY_STATUS)
+ comparam(unit);
+ return (result);
+ }
+ return (D_SUCCESS);
+}
+
+comintr(unit)
+int unit;
+{
+ register struct tty *tp = &com_tty[unit];
+ u_short addr = cominfo[unit]->address;
+ static char comoverrun = 0;
+ char c, line, intr_id;
+ int line_stat;
+
+ while (! ((intr_id=(inb(INTR_ID(addr))&MASKi)) & 1))
+ switch (intr_id) {
+ case MODi:
+ /* modem change */
+ commodem_intr(unit, inb(MODEM_STAT(addr)));
+ break;
+
+ case TRAi:
+ comtimer_state[unit] = 0;
+ tp->t_state &= ~(TS_BUSY|TS_FLUSH);
+ tt_write_wakeup(tp);
+ (void) comstart(tp);
+ break;
+ case RECi:
+ case CTIi: /* Character timeout indication */
+ if (tp->t_state&TS_ISOPEN) {
+ while ((line = inb(LINE_STAT(addr))) & iDR) {
+ c = inb(TXRX(addr));
+ ttyinput(c, tp);
+ }
+ } else
+ tt_open_wakeup(tp);
+ break;
+ case LINi:
+ line_stat = inb(LINE_STAT(addr));
+
+ if ((line_stat & iPE) &&
+ ((tp->t_flags&(EVENP|ODDP)) == EVENP ||
+ (tp->t_flags&(EVENP|ODDP)) == ODDP)) {
+ /* parity error */;
+ } else if (line&iOR && !comoverrun) {
+ printf("com%d: overrun\n", unit);
+ comoverrun = 1;
+ } else if (line_stat & (iFE | iBRKINTR)) {
+ /* framing error or break */
+ ttyinput(tp->t_breakc, tp);
+ }
+ break;
+ }
+}
+
+static void
+comparam(unit)
+register int unit;
+{
+ struct tty *tp = &com_tty[unit];
+ u_short addr = (int)tp->t_addr;
+ spl_t s = spltty();
+ int mode;
+
+ if (tp->t_ispeed == B0) {
+ tp->t_state |= TS_HUPCLS;
+ outb(MODEM_CTL(addr), iOUT2);
+ commodem[unit] = 0;
+ splx(s);
+ return;
+ }
+
+ /* Do input buffering */
+ if (tp->t_ispeed >= B300)
+ tp->t_state |= TS_MIN;
+
+ outb(LINE_CTL(addr), iDLAB);
+ outb(BAUD_LSB(addr), divisorreg[tp->t_ispeed] & 0xff);
+ outb(BAUD_MSB(addr), divisorreg[tp->t_ispeed] >> 8);
+
+ if (tp->t_flags & (RAW|LITOUT|PASS8))
+ mode = i8BITS;
+ else
+ mode = i7BITS | iPEN;
+ if (tp->t_flags & EVENP)
+ mode |= iEPS;
+ if (tp->t_ispeed == B110)
+ /*
+ * 110 baud uses two stop bits -
+ * all other speeds use one
+ */
+ mode |= iSTB;
+
+ outb(LINE_CTL(addr), mode);
+
+ outb(INTR_ENAB(addr), iTX_ENAB|iRX_ENAB|iMODEM_ENAB|iERROR_ENAB);
+ if (comfifo[unit])
+ outb(FIFO_CTL(addr), iFIFOENA|iFIFO14CH);
+ outb(MODEM_CTL(addr), iDTR|iRTS|iOUT2);
+ commodem[unit] |= (TM_DTR|TM_RTS);
+ splx(s);
+}
+
+comparm(int unit, int baud, int intr, int mode, int modem)
+{
+ u_short addr = (u_short)(cominfo[unit]->address);
+ spl_t s = spltty();
+
+ if (unit != 0 && unit != 1) {
+ printf("comparm(unit, baud, mode, intr, modem)\n");
+ splx(s);
+ return;
+ }
+ outb(LINE_CTL(addr), iDLAB);
+ outb(BAUD_LSB(addr), divisorreg[baud] & 0xff);
+ outb(BAUD_MSB(addr), divisorreg[baud] >> 8);
+ outb(LINE_CTL(addr), mode);
+ outb(INTR_ENAB(addr), intr);
+ outb(MODEM_CTL(addr), modem);
+ splx(s);
+}
+
+int comst_1, comst_2, comst_3, comst_4, comst_5 = 14;
+
+int
+comstart(tp)
+struct tty *tp;
+{
+ char nch;
+ int i;
+
+ if (tp->t_state & (TS_TIMEOUT|TS_TTSTOP|TS_BUSY)) {
+comst_1++;
+ return(0);
+ }
+ if ((!queue_empty(&tp->t_delayed_write)) &&
+ (tp->t_outq.c_cc <= TTLOWAT(tp))) {
+comst_2++;
+ tt_write_wakeup(tp);
+ }
+ if (!tp->t_outq.c_cc) {
+comst_3++;
+ return(0);
+ }
+
+#if 0
+ i = (comfifo[minor(tp->t_dev)]) ? /*14*/comst_5 : 1;
+
+ tp->t_state |= TS_BUSY;
+ while (i-- > 0) {
+ nch = getc(&tp->t_outq);
+ if (nch == -1) break;
+ if ((nch & 0200) && ((tp->t_flags & LITOUT) == 0)) {
+ timeout(ttrstrt, (char *)tp, (nch & 0x7f) + 6);
+ tp->t_state |= TS_TIMEOUT;
+comst_4++;
+ return(0);
+ }
+ outb(TXRX((int)tp->t_addr), nch);
+ }
+#else
+ nch = getc(&tp->t_outq);
+ if ((nch & 0200) && ((tp->t_flags & LITOUT) == 0)) {
+ timeout(ttrstrt, (char *)tp, (nch & 0x7f) + 6);
+ tp->t_state |= TS_TIMEOUT;
+comst_4++;
+ return(0);
+ }
+ outb(TXRX((int)tp->t_addr), nch);
+ tp->t_state |= TS_BUSY;
+#endif
+ return(0);
+}
+
+/* Check for stuck xmitters */
+int comtimer_interval = 5;
+
+comtimer()
+{
+ spl_t s = spltty();
+ struct tty *tp = com_tty;
+ int i, nch;
+
+ for (i = 0; i < NCOM; i++, tp++) {
+ if ((tp->t_state & TS_ISOPEN) == 0)
+ continue;
+ if (!tp->t_outq.c_cc)
+ continue;
+ if (++comtimer_state[i] < 2)
+ continue;
+ /* Its stuck */
+printf("Tty %x was stuck\n", tp);
+ nch = getc(&tp->t_outq);
+ outb(TXRX((int)tp->t_addr), nch);
+ }
+
+ splx(s);
+ timeout(comtimer, 0, comtimer_interval*hz);
+}
+
+/*
+ * Set receive modem state from modem status register.
+ */
+fix_modem_state(unit, modem_stat)
+int unit, modem_stat;
+{
+ int stat = 0;
+
+ if (modem_stat & iCTS)
+ stat |= TM_CTS; /* clear to send */
+ if (modem_stat & iDSR)
+ stat |= TM_DSR; /* data set ready */
+ if (modem_stat & iRI)
+ stat |= TM_RNG; /* ring indicator */
+ if (modem_stat & iRLSD)
+ stat |= TM_CAR; /* carrier? */
+
+ commodem[unit] = (commodem[unit] & ~(TM_CTS|TM_DSR|TM_RNG|TM_CAR))
+ | stat;
+}
+
+/*
+ * Modem change (input signals)
+ */
+commodem_intr(
+ int unit,
+ int stat)
+{
+ int changed;
+
+ changed = commodem[unit];
+ fix_modem_state(unit, stat);
+ stat = commodem[unit];
+
+ /* Assumption: if the other party can handle
+ modem signals then it should handle all
+ the necessary ones. Else fix the cable. */
+
+ changed ^= stat; /* what changed ? */
+
+ if (changed & TM_CTS)
+ tty_cts( &com_tty[unit], stat & TM_CTS );
+
+#if 0
+ if (changed & TM_CAR)
+ ttymodem( &com_tty[unit], stat & TM_CAR );
+#endif
+
+}
+
+/*
+ * Set/get modem bits
+ */
+commctl(
+ register struct tty *tp,
+ int bits,
+ int how)
+{
+ spl_t s;
+ int unit;
+ vm_offset_t dev_addr;
+ register int b;
+
+ unit = minor(tp->t_dev);
+
+ if (bits == TM_HUP) { /* close line (internal) */
+ bits = TM_DTR | TM_RTS;
+ how = DMBIC;
+ }
+
+ if (how == DMGET) return commodem[unit];
+
+ dev_addr = cominfo[unit]->address;
+
+ s = spltty();
+
+ switch (how) {
+ case DMSET:
+ b = bits; break;
+ case DMBIS:
+ b = commodem[unit] | bits; break;
+ case DMBIC:
+ b = commodem[unit] & ~bits; break;
+ }
+ commodem[unit] = b;
+
+ if (bits & TM_BRK) {
+ if (b & TM_BRK) {
+ outb(LINE_CTL(dev_addr), inb(LINE_CTL(dev_addr)) | iSETBREAK);
+ } else {
+ outb(LINE_CTL(dev_addr), inb(LINE_CTL(dev_addr)) & ~iSETBREAK);
+ }
+ }
+
+#if 0
+ /* do I need to do something on this ? */
+ if (bits & TM_LE) { /* line enable */
+ }
+#endif
+#if 0
+ /* Unsupported */
+ if (bits & TM_ST) { /* secondary transmit */
+ }
+ if (bits & TM_SR) { /* secondary receive */
+ }
+#endif
+ if (bits & (TM_DTR|TM_RTS)) { /* data terminal ready, request to send */
+ how = iOUT2;
+ if (b & TM_DTR) how |= iDTR;
+ if (b & TM_RTS) how |= iRTS;
+ outb(MODEM_CTL(dev_addr), how);
+ }
+
+ splx(s);
+
+ /* the rest are inputs */
+ return commodem[unit];
+}
+
+comstop(tp, flags)
+register struct tty *tp;
+int flags;
+{
+ if ((tp->t_state & TS_BUSY) && (tp->t_state & TS_TTSTOP) == 0)
+ tp->t_state |= TS_FLUSH;
+}
+
+/*
+ *
+ * Code to be called from debugger.
+ *
+ */
+void compr_addr(addr)
+{
+ /* The two line_stat prints may show different values, since
+ * touching some of the registers constitutes changing them.
+ */
+ printf("LINE_STAT(%x) %x\n",
+ LINE_STAT(addr), inb(LINE_STAT(addr)));
+
+ printf("TXRX(%x) %x, INTR_ENAB(%x) %x, INTR_ID(%x) %x, LINE_CTL(%x) %x,\n\
+MODEM_CTL(%x) %x, LINE_STAT(%x) %x, MODEM_STAT(%x) %x\n",
+ TXRX(addr), inb(TXRX(addr)),
+ INTR_ENAB(addr), inb(INTR_ENAB(addr)),
+ INTR_ID(addr), inb(INTR_ID(addr)),
+ LINE_CTL(addr), inb(LINE_CTL(addr)),
+ MODEM_CTL(addr), inb(MODEM_CTL(addr)),
+ LINE_STAT(addr), inb(LINE_STAT(addr)),
+ MODEM_STAT(addr),inb(MODEM_STAT(addr)));
+}
+
+int compr(unit)
+{
+ compr_addr(cominfo[unit]->address);
+ return(0);
+}
+
+int
+comgetc(int unit)
+{
+ u_short addr = (u_short)(cominfo[unit]->address);
+ spl_t s = spltty();
+ int c;
+
+ while((inb(LINE_STAT(addr)) & iDR) == 0) ;
+
+ c = inb(TXRX(addr));
+ splx(s);
+ return c;
+}
+
+#if RCLINE >= 0
+/*
+ * Routines for the console
+ */
+int
+comcnputc(dev_t dev, int c)
+{
+ u_short addr = (u_short)(cominfo[minor(dev)]->address);
+
+ /* Wait for transmitter to empty */
+ while((inb(LINE_STAT(addr)) & iTHRE) == 0)
+ continue;
+
+ /* send the char */
+ if (c == '\n')
+ comcnputc(dev, '\r');
+ outb(addr, c);
+}
+
+int
+comcngetc(dev_t dev, int wait)
+{
+ u_short addr = (u_short)(cominfo[minor(dev)]->address);
+ int c;
+
+ while((inb(LINE_STAT(addr)) & iDR) == 0)
+ if (! wait)
+ return 0;
+
+ c = inb(TXRX(addr));
+ return c & 0x7f;
+}
+#endif /* RCLINE */
+
+#endif /* NCOM */
diff --git a/i386/i386at/comreg.h b/i386/i386at/comreg.h
new file mode 100644
index 00000000..12174a1c
--- /dev/null
+++ b/i386/i386at/comreg.h
@@ -0,0 +1,134 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Olivetti serial port driver v1.0
+ * Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989
+ * All rights reserved.
+ *
+ */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#define TXRX(addr) (addr + 0)
+#define BAUD_LSB(addr) (addr + 0)
+#define BAUD_MSB(addr) (addr + 1)
+#define INTR_ENAB(addr) (addr + 1)
+#define INTR_ID(addr) (addr + 2)
+#define FIFO_CTL(addr) (addr + 2)
+#define LINE_CTL(addr) (addr + 3)
+#define MODEM_CTL(addr) (addr + 4)
+#define LINE_STAT(addr) (addr + 5)
+#define MODEM_STAT(addr)(addr + 6)
+#define SCR(addr) (addr + 7)
+
+#define MODi 0
+#define TRAi 2
+#define RECi 4
+#define LINi 6
+#define CTIi 0xc
+#define MASKi 0xf
+
+/* line control register */
+#define iWLS0 0x01 /*word length select bit 0 */
+#define iWLS1 0x02 /*word length select bit 2 */
+#define iSTB 0x04 /* number of stop bits */
+#define iPEN 0x08 /* parity enable */
+#define iEPS 0x10 /* even parity select */
+#define iSP 0x20 /* stick parity */
+#define iSETBREAK 0x40 /* break key */
+#define iDLAB 0x80 /* divisor latch access bit */
+#define i5BITS 0x00 /* 5 bits per char */
+#define i6BITS 0x01 /* 6 bits per char */
+#define i7BITS 0x02 /* 7 bits per char */
+#define i8BITS 0x03 /* 8 bits per char */
+
+/* line status register */
+#define iDR 0x01 /* data ready */
+#define iOR 0x02 /* overrun error */
+#define iPE 0x04 /* parity error */
+#define iFE 0x08 /* framing error */
+#define iBRKINTR 0x10 /* a break has arrived */
+#define iTHRE 0x20 /* tx hold reg is now empty */
+#define iTSRE 0x40 /* tx shift reg is now empty */
+
+/* interrupt id regisger */
+#define iMODEM_INTR 0x01
+#define iTX_INTR 0x02
+#define iRX_INTR 0x04
+#define iERROR_INTR 0x08
+
+/* interrupt enable register */
+#define iRX_ENAB 0x01
+#define iTX_ENAB 0x02
+#define iERROR_ENAB 0x04
+#define iMODEM_ENAB 0x08
+
+/* modem control register */
+#define iDTR 0x01 /* data terminal ready */
+#define iRTS 0x02 /* request to send */
+#define iOUT1 0x04 /* COM aux line -not used */
+#define iOUT2 0x08 /* turns intr to 386 on/off */
+#define iLOOP 0x10 /* loopback for diagnostics */
+
+/* modem status register */
+#define iDCTS 0x01 /* delta clear to send */
+#define iDDSR 0x02 /* delta data set ready */
+#define iTERI 0x04 /* trail edge ring indicator */
+#define iDRLSD 0x08 /* delta rx line sig detect */
+#define iCTS 0x10 /* clear to send */
+#define iDSR 0x20 /* data set ready */
+#define iRI 0x40 /* ring indicator */
+#define iRLSD 0x80 /* rx line sig detect */
+
+/* fifo control register (only in 16550) */
+#define iFIFOENA 0x01 /* Enable fifos */
+#define iCLRRCVRFIFO 0x02 /* Clear receive fifo */
+#define iCLRXMITFIFO 0x04 /* Clear transmit fifo */
+#define iDMAMODE 0x08 /* DMA transfer enable */
+#define iFIFO1CH 0x00 /* Receive fifo trigger level 1 char */
+#define iFIFO4CH 0x40 /* Receive fifo trigger level 4 chars*/
+#define iFIFO8CH 0x80 /* Receive fifo trigger level 8 chars*/
+#define iFIFO14CH 0xc0 /* Receive fifo trigger level 14 chars*/
diff --git a/i386/i386at/conf.c b/i386/i386at/conf.c
new file mode 100644
index 00000000..2bd2d2af
--- /dev/null
+++ b/i386/i386at/conf.c
@@ -0,0 +1,399 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Device switch for i386 AT bus.
+ */
+
+#include <mach/machine/vm_types.h>
+#include <device/conf.h>
+
+extern vm_offset_t block_io_mmap();
+
+extern int timeopen(), timeclose();
+extern vm_offset_t timemmap();
+#define timename "time"
+
+#include <hd.h>
+#if NHD > 0
+extern int hdopen(), hdclose(), hdread(), hdwrite();
+extern int hdgetstat(), hdsetstat(), hddevinfo();
+#define hdname "hd"
+
+#if 0
+extern int pchdopen(),pchdread(),pchdwrite(),pchdgetstat(),pchdsetstat();
+#define pchdname "pchd"
+#endif
+
+#endif NHD > 0
+
+#include <aha.h>
+#if NAHA > 0
+int rz_open(), rz_close(), rz_read(), rz_write();
+int rz_get_status(), rz_set_status(), rz_devinfo();
+int cd_open(), cd_close(), cd_read(), cd_write();
+#define rzname "sd"
+#define tzname "st"
+#define scname "sc" /* processors */
+#define cdname "cd_audio" /* CD-ROM DA */
+
+#endif /*NAHA > 0*/
+
+#include <fd.h>
+#if NFD > 0
+extern int fdopen(), fdclose(), fdread(), fdwrite();
+extern int fdgetstat(), fdsetstat(), fddevinfo();
+#define fdname "fd"
+#endif NFD > 0
+
+#include <wt.h>
+#if NWT > 0
+extern int wtopen(), wtread(), wtwrite(), wtclose();
+#define wtname "wt"
+#endif NWT > 0
+
+#include <pc586.h>
+#if NPC586 > 0
+extern int pc586open(), pc586output(), pc586getstat(), pc586setstat(),
+ pc586setinput();
+#define pc586name "pc"
+#endif NPC586 > 0
+
+#include <ne.h>
+#if NNE > 0
+extern int neopen(), neoutput(), negetstat(), nesetstat(), nesetinput();
+#ifdef FIPC
+extern int nefoutput();
+#endif /* FIPC */
+#define nename "ne"
+#endif NNE > 0
+
+#include <ns8390.h>
+#if NNS8390 > 0
+extern int wd8003open(), eliiopen();
+extern int ns8390output(), ns8390getstat(), ns8390setstat(),
+ ns8390setinput();
+#define ns8390wdname "wd"
+#define ns8390elname "el"
+#endif NNS8390 > 0
+
+#include <at3c501.h>
+#if NAT3C501 > 0
+extern int at3c501open(), at3c501output(),
+ at3c501getstat(), at3c501setstat(),
+ at3c501setinput();
+#define at3c501name "et"
+#endif NAT3C501 > 0
+
+#include <ul.h>
+#if NUL > 0
+extern int ulopen(), uloutput(), ulgetstat(), ulsetstat(),
+ ulsetinput();
+#define ulname "ul"
+#endif NUL > 0
+
+#include <wd.h>
+#if NWD > 0
+extern int wdopen(), wdoutput(), wdgetstat(), wdsetstat(),
+ wdsetinput();
+#define wdname "wd"
+#endif NWD > 0
+
+#include <hpp.h>
+#if NHPP > 0
+extern int hppopen(), hppoutput(), hppgetstat(), hppsetstat(),
+ hppsetinput();
+#define hppname "hpp"
+#endif /* NHPP > 0 */
+
+#include <par.h>
+#if NPAR > 0
+extern int paropen(), paroutput(), pargetstat(), parsetstat(),
+ parsetinput();
+#define parname "par"
+#endif NPAR > 0
+
+#include <de6c.h>
+#if NDE6C > 0
+extern int de6copen(), de6coutput(), de6cgetstat(), de6csetstat(),
+ de6csetinput();
+#define de6cname "de"
+#endif NDE6C > 0
+
+extern int kdopen(), kdclose(), kdread(), kdwrite();
+extern int kdgetstat(), kdsetstat(), kdportdeath();
+extern vm_offset_t kdmmap();
+#define kdname "kd"
+
+#include <com.h>
+#if NCOM > 0
+extern int comopen(), comclose(), comread(), comwrite();
+extern int comgetstat(), comsetstat(), comportdeath();
+#define comname "com"
+#endif NCOM > 0
+
+#include <lpr.h>
+#if NLPR > 0
+extern int lpropen(), lprclose(), lprread(), lprwrite();
+extern int lprgetstat(), lprsetstat(), lprportdeath();
+#define lprname "lpr"
+#endif NLPR > 0
+
+#include <blit.h>
+#if NBLIT > 0
+extern int blitopen(), blitclose(), blit_get_stat();
+extern vm_offset_t blitmmap();
+#define blitname "blit"
+
+extern int mouseinit(), mouseopen(), mouseclose();
+extern int mouseioctl(), mouseselect(), mouseread();
+#endif
+
+extern int kbdopen(), kbdclose(), kbdread();
+extern int kbdgetstat(), kbdsetstat();
+#define kbdname "kbd"
+
+extern int mouseopen(), mouseclose(), mouseread();
+#define mousename "mouse"
+
+extern int ioplopen(), ioplclose();
+extern vm_offset_t ioplmmap();
+#define ioplname "iopl"
+
+/*
+ * List of devices - console must be at slot 0
+ */
+struct dev_ops dev_name_list[] =
+{
+ /*name, open, close, read,
+ write, getstat, setstat, mmap,
+ async_in, reset, port_death, subdev,
+ dev_info */
+
+ /* We don't assign a console here, when we find one via
+ cninit() we stick something appropriate here through the
+ indirect list */
+ { "cn", nulldev, nulldev, nulldev,
+ nulldev, nulldev, nulldev, nulldev,
+ nodev, nulldev, nulldev, 0,
+ nodev },
+
+ { kdname, kdopen, kdclose, kdread,
+ kdwrite, kdgetstat, kdsetstat, kdmmap,
+ nodev, nulldev, kdportdeath, 0,
+ nodev },
+
+ { timename, timeopen, timeclose, nulldev,
+ nulldev, nulldev, nulldev, timemmap,
+ nodev, nulldev, nulldev, 0,
+ nodev },
+
+#ifndef LINUX_DEV
+#if NHD > 0
+ { hdname, hdopen, hdclose, hdread,
+ hdwrite, hdgetstat, hdsetstat, nomap,
+ nodev, nulldev, nulldev, 1024,
+ hddevinfo },
+#endif NHD > 0
+
+#if NAHA > 0
+ { rzname, rz_open, rz_close, rz_read,
+ rz_write, rz_get_status, rz_set_status, nomap,
+ nodev, nulldev, nulldev, 1024, /* 8 */
+ rz_devinfo },
+
+ { tzname, rz_open, rz_close, rz_read,
+ rz_write, rz_get_status, rz_set_status, nomap,
+ nodev, nulldev, nulldev, 8,
+ nodev },
+
+ { cdname, cd_open, cd_close, cd_read,
+ cd_write, nodev, nodev, nomap,
+ nodev, nulldev, nulldev, 8,
+ nodev },
+
+ { scname, rz_open, rz_close, rz_read,
+ rz_write, rz_get_status, rz_set_status, nomap,
+ nodev, nulldev, nulldev, 8,
+ nodev },
+
+#endif /*NAHA > 0*/
+
+#if NFD > 0
+ { fdname, fdopen, fdclose, fdread,
+ fdwrite, fdgetstat, fdsetstat, nomap,
+ nodev, nulldev, nulldev, 64,
+ fddevinfo },
+#endif NFD > 0
+
+#if NWT > 0
+ { wtname, wtopen, wtclose, wtread,
+ wtwrite, nulldev, nulldev, nomap,
+ nodev, nulldev, nulldev, 0,
+ nodev },
+#endif NWT > 0
+
+#if NPC586 > 0
+ { pc586name, pc586open, nulldev, nulldev,
+ pc586output, pc586getstat, pc586setstat, nomap,
+ pc586setinput,nulldev, nulldev, 0,
+ nodev },
+#endif
+
+#if NNE > 0
+ { nename, neopen, nulldev, nulldev,
+ neoutput, negetstat, nesetstat, nulldev,
+#ifdef FIPC
+ nesetinput, nulldev, nefoutput, 0,
+#else
+ nesetinput, nulldev, nulldev, 0,
+#endif /* FIPC */
+ nodev },
+#endif
+
+#if NAT3C501 > 0
+ { at3c501name, at3c501open, nulldev, nulldev,
+ at3c501output,at3c501getstat, at3c501setstat, nomap,
+ at3c501setinput, nulldev, nulldev, 0,
+ nodev },
+#endif
+
+#if NNS8390 > 0
+ { ns8390wdname, wd8003open, nulldev, nulldev,
+ ns8390output, ns8390getstat, ns8390setstat, nomap,
+ ns8390setinput, nulldev, nulldev, 0,
+ nodev },
+
+ { ns8390elname, eliiopen, nulldev, nulldev,
+ ns8390output, ns8390getstat, ns8390setstat, nomap,
+ ns8390setinput, nulldev, nulldev, 0,
+ nodev },
+#endif
+
+#if NUL > 0
+ { ulname, ulopen, nulldev, nulldev,
+ uloutput, ulgetstat, ulsetstat, nulldev,
+ ulsetinput, nulldev, nulldev, 0,
+ nodev },
+#endif
+
+#if NWD > 0
+ { wdname, wdopen, nulldev, nulldev,
+ wdoutput, wdgetstat, wdsetstat, nulldev,
+ wdsetinput, nulldev, nulldev, 0,
+ nodev },
+#endif
+
+#if NHPP > 0
+ { hppname, hppopen, nulldev, nulldev,
+ hppoutput, hppgetstat, hppsetstat, nulldev,
+ hppsetinput, nulldev, nulldev, 0,
+ nodev },
+#endif
+
+#if NPAR > 0
+ { parname, paropen, nulldev, nulldev,
+ paroutput, pargetstat, parsetstat, nomap,
+ parsetinput, nulldev, nulldev, 0,
+ nodev },
+#endif
+
+#if NDE6C > 0
+ { de6cname, de6copen, nulldev, nulldev,
+ de6coutput, de6cgetstat, de6csetstat, nomap,
+ de6csetinput, nulldev, nulldev, 0,
+ nodev },
+#endif
+#endif /* ! LINUX_DEV */
+
+#if NCOM > 0
+ { comname, comopen, comclose, comread,
+ comwrite, comgetstat, comsetstat, nomap,
+ nodev, nulldev, comportdeath, 0,
+ nodev },
+#endif
+
+#ifndef LINUX_DEV
+#if NLPR > 0
+ { lprname, lpropen, lprclose, lprread,
+ lprwrite, lprgetstat, lprsetstat, nomap,
+ nodev, nulldev, lprportdeath, 0,
+ nodev },
+#endif
+#endif /* ! LINUX_DEV */
+
+#if NBLIT > 0
+ { blitname, blitopen, blitclose, nodev,
+ nodev, blit_get_stat, nodev, blitmmap,
+ nodev, nodev, nodev, 0,
+ nodev },
+#endif
+
+ { mousename, mouseopen, mouseclose, mouseread,
+ nodev, nulldev, nulldev, nomap,
+ nodev, nulldev, nulldev, 0,
+ nodev },
+
+ { kbdname, kbdopen, kbdclose, kbdread,
+ nodev, kbdgetstat, kbdsetstat, nomap,
+ nodev, nulldev, nulldev, 0,
+ nodev },
+
+ { ioplname, ioplopen, ioplclose, nodev,
+ nodev, nodev, nodev, ioplmmap,
+ nodev, nulldev, nulldev, 0,
+ nodev },
+
+#if 0
+#if NHD > 0
+ { pchdname, pchdopen, hdclose, pchdread,
+ pchdwrite, pchdgetstat, pchdsetstat, nomap,
+ nodev, nulldev, nulldev, 16,
+ hddevinfo },
+#endif NHD > 0
+#endif
+
+#if 0
+#if NHD > 0
+ { hdname, hdopen, hdclose, hdread,
+ hdwrite, hdgetstat, hdsetstat, nomap,
+ nodev, nulldev, nulldev, 16,
+ hddevinfo },
+#endif NHD > 0
+#endif 0 /* Kevin doesn't know why this was here. */
+
+};
+int dev_name_count = sizeof(dev_name_list)/sizeof(dev_name_list[0]);
+
+/*
+ * Indirect list.
+ */
+struct dev_indirect dev_indirect_list[] = {
+
+ /* console */
+ { "console", &dev_name_list[0], 0 }
+};
+int dev_indirect_count = sizeof(dev_indirect_list)
+ / sizeof(dev_indirect_list[0]);
diff --git a/i386/i386at/cons_conf.c b/i386/i386at/cons_conf.c
new file mode 100644
index 00000000..49dc0238
--- /dev/null
+++ b/i386/i386at/cons_conf.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1988-1994, The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Utah $Hdr: cons_conf.c 1.7 94/12/14$
+ */
+
+/*
+ * This entire table could be autoconfig()ed but that would mean that
+ * the kernel's idea of the console would be out of sync with that of
+ * the standalone boot. I think it best that they both use the same
+ * known algorithm unless we see a pressing need otherwise.
+ */
+#include <sys/types.h>
+#include <cons.h>
+#include <com.h>
+#include <rc.h>
+
+extern int kdcnprobe(), kdcninit(), kdcngetc(), kdcnputc();
+#if NCOM > 0 && RCLINE >= 0
+extern int comcnprobe(), comcninit(), comcngetc(), comcnputc();
+#endif
+
+/*
+ * The rest of the consdev fields are filled in by the respective
+ * cnprobe routine.
+ */
+struct consdev constab[] = {
+ {"kd", kdcnprobe, kdcninit, kdcngetc, kdcnputc},
+#if NCOM > 0 && RCLINE >= 0 && 1
+ {"com", comcnprobe, comcninit, comcngetc, comcnputc},
+#endif
+ {0}
+};
diff --git a/i386/i386at/cram.h b/i386/i386at/cram.h
new file mode 100644
index 00000000..8373ce03
--- /dev/null
+++ b/i386/i386at/cram.h
@@ -0,0 +1,75 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * cram.h
+ */
+
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * outb(CMOS_ADDR, addr);
+ * result = inb(CMOS_DATA);
+ *
+ * where "addr" tells what value you want to read (some are listed
+ * below). Interrupts should be disabled while you do this.
+ */
+
+/* I/O ports */
+
+#define CMOS_ADDR 0x70 /* port for CMOS ram address */
+#define CMOS_DATA 0x71 /* port for CMOS ram data */
+
+
+/* Addresses, related masks, and potential results */
+
+#define CMOS_EB 0x14 /* read Equipment Byte */
+#define CM_SCRMSK 0x30 /* mask for EB query to get screen */
+#define CM_EGA_VGA 0x00 /* "not CGA or MONO" */
+#define CM_CGA_40 0x10
+#define CM_CGA_80 0x20
+#define CM_MONO_80 0x30
+
diff --git a/i386/i386at/dev_hdr.h b/i386/i386at/dev_hdr.h
new file mode 100644
index 00000000..7af644b8
--- /dev/null
+++ b/i386/i386at/dev_hdr.h
@@ -0,0 +1,43 @@
+/*
+ * Mach device definitions (i386at version).
+ *
+ * Copyright (c) 1996 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+#ifndef _I386AT_DEV_HDR_H_
+#define _I386AT_DEV_HDR_H_
+
+struct device_emulation_ops;
+
+/* This structure is associated with each open device port.
+ The port representing the device points to this structure. */
+struct device
+{
+ struct device_emulation_ops *emul_ops;
+ void *emul_data;
+};
+
+typedef struct device *device_t;
+
+#define DEVICE_NULL ((device_t) 0)
+
+#endif /* _I386AT_DEV_HDR_H_ */
diff --git a/i386/i386at/device_emul.h b/i386/i386at/device_emul.h
new file mode 100644
index 00000000..957bd505
--- /dev/null
+++ b/i386/i386at/device_emul.h
@@ -0,0 +1,64 @@
+/*
+ * Mach device emulation definitions (i386at version).
+ *
+ * Copyright (c) 1996 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+#ifndef _I386AT_DEVICE_EMUL_H_
+#define _I386AT_DEVICE_EMUL_H_
+
+#include <mach/notify.h>
+#include <device/net_status.h>
+
+/* Each emulation layer provides these operations. */
+struct device_emulation_ops
+{
+ void (*reference) (void *);
+ void (*dealloc) (void *);
+ ipc_port_t (*dev_to_port) (void *);
+ io_return_t (*open) (ipc_port_t, mach_msg_type_name_t,
+ dev_mode_t, char *, device_t *);
+ io_return_t (*close) (void *);
+ io_return_t (*write) (void *, ipc_port_t, mach_msg_type_name_t,
+ dev_mode_t, recnum_t, io_buf_ptr_t, unsigned, int *);
+ io_return_t (*write_inband) (void *, ipc_port_t, mach_msg_type_name_t,
+ dev_mode_t, recnum_t, io_buf_ptr_inband_t,
+ unsigned, int *);
+ io_return_t (*read) (void *, ipc_port_t, mach_msg_type_name_t,
+ dev_mode_t, recnum_t, int, io_buf_ptr_t *, unsigned *);
+ io_return_t (*read_inband) (void *, ipc_port_t, mach_msg_type_name_t,
+ dev_mode_t, recnum_t, int, char *, unsigned *);
+ io_return_t (*set_status) (void *, dev_flavor_t, dev_status_t,
+ mach_msg_type_number_t);
+ io_return_t (*get_status) (void *, dev_flavor_t, dev_status_t,
+ mach_msg_type_number_t *);
+ io_return_t (*set_filter) (void *, ipc_port_t, int, filter_t [], unsigned);
+ io_return_t (*map) (void *, vm_prot_t, vm_offset_t,
+ vm_size_t, ipc_port_t *, boolean_t);
+ void (*no_senders) (mach_no_senders_notification_t *);
+ io_return_t (*write_trap) (void *, dev_mode_t,
+ recnum_t, vm_offset_t, vm_size_t);
+ io_return_t (*writev_trap) (void *, dev_mode_t,
+ recnum_t, io_buf_vec_t *, vm_size_t);
+};
+
+#endif /* _I386AT_DEVICE_EMUL_H_ */
diff --git a/i386/i386at/disk.h b/i386/i386at/disk.h
new file mode 100644
index 00000000..e1fe6b98
--- /dev/null
+++ b/i386/i386at/disk.h
@@ -0,0 +1,186 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * disk.h
+ */
+
+/* Grab the public part. */
+#include <mach/machine/disk.h>
+
+
+
+#define MAX_ALTENTS 253 /* Maximum # of slots for alts */
+ /* allowed for in the table. */
+
+#define ALT_SANITY 0xdeadbeef /* magic # to validate alt table */
+
+struct alt_table {
+ u_short alt_used; /* # of alternates already assigned */
+ u_short alt_reserved; /* # of alternates reserved on disk */
+ long alt_base; /* 1st sector (abs) of the alt area */
+ long alt_bad[MAX_ALTENTS]; /* list of bad sectors/tracks */
+};
+
+struct alt_info { /* table length should be multiple of 512 */
+ long alt_sanity; /* to validate correctness */
+ u_short alt_version; /* to corroborate vintage */
+ u_short alt_pad; /* padding for alignment */
+ struct alt_table alt_trk; /* bad track table */
+ struct alt_table alt_sec; /* bad sector table */
+};
+typedef struct alt_info altinfo_t;
+
+#define V_NUMPAR 16 /* maximum number of partitions */
+
+#define VTOC_SANE 0x600DDEEE /* Indicates a sane VTOC */
+#define PDLOCATION 29 /* location of VTOC */
+
+#define BAD_BLK 0x80 /* needed for V_VERIFY */
+/* BAD_BLK moved from old hdreg.h */
+
+
+#define HDPDLOC 29 /* location of pdinfo/vtoc */
+#define LBLLOC 1 /* label block for xxxbsd */
+
+/* Partition permission flags */
+#define V_OPEN 0x100 /* Partition open (for driver use) */
+#define V_VALID 0x200 /* Partition is valid to use */
+
+
+
+/* Sanity word for the physical description area */
+#define VALID_PD 0xCA5E600D
+
+struct localpartition {
+ u_int p_flag; /*permision flags*/
+ long p_start; /*physical start sector no of partition*/
+ long p_size; /*# of physical sectors in partition*/
+};
+typedef struct localpartition localpartition_t;
+
+struct evtoc {
+ u_int fill0[6];
+ u_int cyls; /*number of cylinders per drive*/
+ u_int tracks; /*number tracks per cylinder*/
+ u_int sectors; /*number sectors per track*/
+ u_int fill1[13];
+ u_int version; /*layout version*/
+ u_int alt_ptr; /*byte offset of alternates table*/
+ u_short alt_len; /*byte length of alternates table*/
+ u_int sanity; /*to verify vtoc sanity*/
+ u_int xcyls; /*number of cylinders per drive*/
+ u_int xtracks; /*number tracks per cylinder*/
+ u_int xsectors; /*number sectors per track*/
+ u_short nparts; /*number of partitions*/
+ u_short fill2; /*pad for 286 compiler*/
+ char label[40];
+ struct localpartition part[V_NUMPAR];/*partition headers*/
+ char fill[512-352];
+};
+
+union io_arg {
+ struct {
+ u_short ncyl; /* number of cylinders on drive */
+ u_char nhead; /* number of heads/cyl */
+ u_char nsec; /* number of sectors/track */
+ u_short secsiz; /* number of bytes/sector */
+ } ia_cd; /* used for Configure Drive cmd */
+ struct {
+ u_short flags; /* flags (see below) */
+ long bad_sector; /* absolute sector number */
+ long new_sector; /* RETURNED alternate sect assigned */
+ } ia_abs; /* used for Add Bad Sector cmd */
+ struct {
+ u_short start_trk; /* first track # */
+ u_short num_trks; /* number of tracks to format */
+ u_short intlv; /* interleave factor */
+ } ia_fmt; /* used for Format Tracks cmd */
+ struct {
+ u_short start_trk; /* first track */
+ char *intlv_tbl; /* interleave table */
+ } ia_xfmt; /* used for the V_XFORMAT ioctl */
+};
+
+
+#define BOOTSZ 446 /* size of boot code in master boot block */
+#define FD_NUMPART 4 /* number of 'partitions' in fdisk table */
+#define ACTIVE 128 /* indicator of active partition */
+#define BOOT_MAGIC 0xAA55 /* signature of the boot record */
+#define UNIXOS 99 /* UNIX partition */
+#define BSDOS 165
+#define LINUXSWAP 130
+#define LINUXOS 131
+extern int OS; /* what partition we came from */
+
+/*
+ * structure to hold the fdisk partition table
+ */
+struct ipart {
+ u_char bootid; /* bootable or not */
+ u_char beghead; /* beginning head, sector, cylinder */
+ u_char begsect; /* begcyl is a 10-bit number. High 2 bits */
+ u_char begcyl; /* are in begsect. */
+ u_char systid; /* OS type */
+ u_char endhead; /* ending head, sector, cylinder */
+ u_char endsect; /* endcyl is a 10-bit number. High 2 bits */
+ u_char endcyl; /* are in endsect. */
+ long relsect; /* first sector relative to start of disk */
+ long numsect; /* number of sectors in partition */
+};
+
+/*
+ * structure to hold master boot block in physical sector 0 of the disk.
+ * Note that partitions stuff can't be directly included in the structure
+ * because of lameo '386 compiler alignment design.
+ */
+struct mboot { /* master boot block */
+ char bootinst[BOOTSZ];
+ char parts[FD_NUMPART * sizeof(struct ipart)];
+ u_short signature;
+};
+
diff --git a/i386/i386at/ds8390.h b/i386/i386at/ds8390.h
new file mode 100644
index 00000000..a91e6427
--- /dev/null
+++ b/i386/i386at/ds8390.h
@@ -0,0 +1,166 @@
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ds8390.h 7.1 (Berkeley) 5/9/91
+ */
+
+/*
+ * Nominal Semidestructor DS8390 Ethernet Chip
+ * Register and bit definitions
+ */
+
+/*
+ * Page register offset values
+ */
+#define ds_cmd 0x00 /* Command register: */
+#define DSCM_STOP 0x01 /* Stop controller */
+#define DSCM_START 0x02 /* Start controller */
+#define DSCM_TRANS 0x04 /* Transmit packet */
+#define DSCM_RREAD 0x08 /* Remote read */
+#define DSCM_RWRITE 0x10 /* Remote write */
+#define DSCM_NODMA 0x20 /* No Remote DMA present */
+#define DSCM_PG0 0x00 /* Select Page 0 */
+#define DSCM_PG1 0x40 /* Select Page 1 */
+#define DSCM_PG2 0x80 /* Select Page 2? */
+
+#define ds0_pstart 0x01 /* Page Start register */
+#define ds0_pstop 0x02 /* Page Stop register */
+#define ds0_bnry 0x03 /* Boundary Pointer */
+#define ds0_bndy ds0_bnry /* Boundary Pointer */
+
+#define ds0_tsr 0x04 /* Transmit Status (read-only) */
+#define DSTS_PTX 0x01 /* Successful packet transmit */
+#define DSTS_COLL 0x04 /* Packet transmit w/ collision*/
+#define DSTS_COLL16 0x04 /* Packet had >16 collisions & fail */
+#define DSTS_ABT 0x08 /* Transmit aborted */
+#define DSTS_CRS 0x10 /* Carrier sense lost/xmit !aborted */
+#define DSTS_UND 0x20 /* FIFO Underrun on transmission*/
+#define DSTS_CDH 0x40 /* CD heartbeat */
+#define DSTS_OWC 0x80 /* Out of window collision - */
+ /* transmit not aborted */
+
+#define ds0_tpsr ds0_tsr /* Transmit Page (write-only) */
+#define ds0_tbcr0 0x05 /* Transmit Byte count, low WO */
+#define ds0_tbcr1 0x06 /* Transmit Byte count, high WO */
+
+#define ds0_isr 0x07 /* Interrupt status register */
+#define DSIS_RX 0x01 /* Successful packet reception */
+#define DSIS_TX 0x02 /* Successful packet transmission */
+#define DSIS_RXE 0x04 /* Packet reception w/error */
+#define DSIS_TXE 0x08 /* Packet transmission w/error*/
+#define DSIS_ROVRN 0x10 /* Receiver overrun in the ring*/
+#define DSIS_CTRS 0x20 /* Diagnostic counters need attn */
+#define DSIS_RDC 0x40 /* Remote DMA Complete */
+#define DSIS_RESET 0x80 /* Reset Complete */
+
+#define ds0_rsar0 0x08 /* Remote start address low WO */
+#define ds0_rsar1 0x09 /* Remote start address high WO */
+#define ds0_rbcr0 0x0A /* Remote byte count low WO */
+#define ds0_rbcr1 0x0B /* Remote byte count high WO */
+
+#define ds0_rsr 0x0C /* Receive status RO */
+#define ds0_cntr0 0x0D /* Receive status RO */
+#define ds0_cntr1 0x0E /* Receive status RO */
+#define ds0_cntr2 0x0F /* Receive status RO */
+#define DSRS_RPC 0x01 /* Received Packet Complete */
+#define DSRS_CRC 0x02 /* CRC error */
+#define DSRS_FAE 0x04 /* Frame alignment error */
+#define DSRS_FO 0x08 /* FIFO Overrun */
+#define DSRS_MPA 0x10 /* Missed packet */
+#define DSRS_PHY 0x20 /* Physical/multicast address */
+#define DSRS_DIS 0x40 /* Receiver disable */
+#define DSRS_DFR 0x80 /* Deferring */
+
+#define ds0_rcr ds0_rsr /* Receive configuration WO */
+#define DSRC_SEP 0x01 /* Save error packets */
+#define DSRC_AR 0x02 /* Accept Runt packets */
+#define DSRC_AB 0x04 /* Accept Broadcast packets */
+#define DSRC_AM 0x08 /* Accept Multicast packets */
+#define DSRC_PRO 0x10 /* Promiscuous physical */
+#define DSRC_MON 0x20 /* Monitor mode */
+
+#define ds0_tcr 0x0D /* Transmit configuration WO */
+#define DSTC_CRC 0x01 /* Inhibit CRC */
+#define DSTC_LB0 0x02 /* Encoded Loopback Control */
+#define DSTC_LB1 0x04 /* Encoded Loopback Control */
+#define DSTC_ATD 0x08 /* Auto Transmit Disable */
+#define DSTC_OFST 0x10 /* Collision Offset Enable */
+
+#define ds0_rcvalctr ds0_tcr /* Receive alignment err ctr RO */
+
+#define ds0_dcr 0x0E /* Data configuration WO */
+#define DSDC_WTS 0x01 /* Word Transfer Select */
+#define DSDC_BOS 0x02 /* Byte Order Select */
+#define DSDC_LAS 0x04 /* Long Address Select */
+#define DSDC_BMS 0x08 /* Burst Mode Select */
+#define DSDC_AR 0x10 /* Autoinitialize Remote */
+#define DSDC_FT0 0x20 /* Fifo Threshold Select */
+#define DSDC_FT1 0x40 /* Fifo Threshold Select */
+
+#define ds0_rcvcrcctr ds0_dcr /* Receive CRC error counter RO */
+
+#define ds0_imr 0x0F /* Interrupt mask register WO */
+#define DSIM_PRXE 0x01 /* Packet received enable */
+#define DSIM_PTXE 0x02 /* Packet transmitted enable */
+#define DSIM_RXEE 0x04 /* Receive error enable */
+#define DSIM_TXEE 0x08 /* Transmit error enable */
+#define DSIM_OVWE 0x10 /* Overwrite warning enable */
+#define DSIM_CNTE 0x20 /* Counter overflow enable */
+#define DSIM_RDCE 0x40 /* Dma complete enable */
+
+
+/* We DON'T enable Counter Overflow and Remote DMA complete. */
+#define IMR_ENABLE (DSIM_PRXE|DSIM_PTXE|DSIM_RXEE|DSIM_TXEE|DSIM_OVWE)
+
+#define ds0_rcvfrmctr ds0_imr /* Receive Frame error cntr RO */
+
+
+#define ds1_par0 ds0_pstart /* Physical address register 0 */
+ /* Physical address registers 1-4 */
+#define ds1_par5 ds0_tbcr1 /* Physical address register 5 */
+#define ds1_curr ds0_isr /* Current page (receive unit) */
+#define ds1_mar0 ds0_rsar0 /* Multicast address register 0 */
+ /* Multicast address registers 1-6 */
+#define ds1_mar7 ds0_imr /* Multicast address register 7 */
+#define ds1_curr ds0_isr /* Current page (receive unit) */
+
+#define DS_PGSIZE 256 /* Size of RAM pages in bytes */
+
+/*
+ * Packet receive header, 1 per each buffer page used in receive packet
+ */
+struct prhdr {
+ u_char pr_status; /* is this a good packet, same as ds0_rsr */
+ u_char pr_nxtpg; /* next page of packet or next packet */
+ u_char pr_sz0;
+ u_char pr_sz1;
+};
diff --git a/i386/i386at/eisa.h b/i386/i386at/eisa.h
new file mode 100644
index 00000000..33629ca0
--- /dev/null
+++ b/i386/i386at/eisa.h
@@ -0,0 +1,110 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Copyright 1992 by Open Software Foundation,
+ * Grenoble, FRANCE
+ *
+ * All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of OSF or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior
+ * permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+ * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Eisa defs
+ */
+
+#ifndef _I386AT_EISA_H_
+#define _I386AT_EISA_H_
+
+#include <mach/boolean.h>
+
+#if EISA
+extern boolean_t is_eisa_bus;
+
+#define EISA_ID_REG(board, byte) (0xc80 | (byte) | ((board) << 12))
+
+#define EISA_ID_REG_0 0x0
+#define EISA_ID_REG_1 0x1
+#define EISA_ID_REG_2 0x2
+#define EISA_ID_REG_3 0x3
+
+#define EISA_SYSTEM_BOARD 0x0
+
+struct std_board_id {
+ unsigned revision: 8, /* Revision number */
+ product: 8; /* Product number */
+};
+
+struct sys_board_id {
+ unsigned bus_vers: 3, /* EISA bus version */
+ reserved: 13; /* Manufacturer reserved */
+};
+
+struct board_id {
+ union {
+ struct sys_board_id sys_id;
+ struct std_board_id std_id;
+ } bd_id;
+ unsigned name_char_2: 5, /* 3nd compressed char */
+ name_char_1: 5, /* 2nd compressed char */
+ name_char_0: 5, /* 1st compressed char */
+ not_eisa: 1; /* 0 if eisa board */
+};
+
+union eisa_board_id {
+ unsigned char byte[4];
+ struct board_id id;
+};
+
+typedef union eisa_board_id eisa_board_id_t;
+
+
+/* Additional DMA registers */
+
+#define DMA0HIPAGE 0x481 /* DMA 0 address: bits 24-31 */
+#define DMA0HICNT 0x405 /* DMA 0 count: bits 16-23 */
+
+
+#else /* EISA */
+#define is_eisa_bus FALSE
+#define probe_eisa()
+#endif /* EISA */
+
+#endif /* _I386AT_EISA_H_ */
diff --git a/i386/i386at/fd.c b/i386/i386at/fd.c
new file mode 100644
index 00000000..773411b4
--- /dev/null
+++ b/i386/i386at/fd.c
@@ -0,0 +1,1701 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/* Copyright (c) 1987, 1988 TOSHIBA Corp. */
+/* All Rights Reserved */
+
+#if 0
+
+#include <fd.h>
+
+#ifdef MACH_KERNEL
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <device/buf.h>
+#include <device/errno.h>
+#else MACH_KERNEL
+#include <sys/buf.h>
+#include <sys/errno.h>
+#include <sys/user.h>
+#include <sys/ioctl.h>
+#endif MACH_KERNEL
+#include <i386/pio.h>
+#include <i386/machspl.h>
+#include <chips/busses.h>
+#include <i386at/fdreg.h>
+#include <i386at/disk.h>
+#include <vm/vm_kern.h>
+
+#ifdef DEBUG
+#define D(x) x
+#define DD(x) x
+#else /* DEBUG */
+#define D(x)
+#define DD(x)
+#endif /* DEBUG */
+
+/*
+ * Floppy Device-Table Definitions (drtabs)
+ *
+ * Cyls,Sec,spc,part,Mtype,RWFpl,FGpl
+ */
+struct fddrtab m765f[] = { /* format table */
+ 80, 18, 1440, 9, 0x88, 0x2a, 0x50, /* [0] 3.50" 720 Kb */
+ 80, 36, 2880, 18, 0x08, 0x1b, 0x6c, /* [1] 3.50" 1.44 Meg */
+ 40, 18, 720, 9, 0xa8, 0x2a, 0x50, /* [2] 5.25" 360 Kb */
+ 80, 30, 2400, 15, 0x08, 0x1b, 0x54 /* [3] 5.25" 1.20 Meg */
+};
+
+/*
+ * The following are static initialization variables
+ * which are based on the configuration.
+ */
+struct ctrl_info ctrl_info[MAXUNIT>>1] = { /* device data table */
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } ,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+};
+
+struct unit_info unit_info[MAXUNIT]; /* unit buffer headers */
+
+char *fderr = "FD Error on unit";
+char *fdmsg[] = {
+ "?",
+ "Missing data address mark",
+ "Write protected",
+ "Sector not found",
+ "Data Overrun", /* Over run error */
+ "Uncorrectable data read error", /* CRC Error */
+ "FDC Error",
+ "Illegal format type",
+ "Drive not ready",
+ "diskette not present - please insert",
+ "Illegal interrupt type"
+};
+
+struct buf fdrbuf[MAXUNIT]; /* data transfer buffer structures */
+
+int fdminphys();
+int fdintr(), fdprobe(), fdslave();
+void fdattach();
+int FdDmaEISA = 0;
+int FdDmaThreshold = 16 * 1024 * 1024;
+vm_offset_t FdDmaPage = (vm_offset_t) 0;
+vm_offset_t fd_std[NFD] = { 0 };
+struct bus_device *fd_dinfo[NFD*2];
+struct bus_ctlr *fd_minfo[NFD];
+struct bus_driver fddriver =
+ {fdprobe, fdslave, fdattach, 0, fd_std, "fd", fd_dinfo, "fdc", fd_minfo, 0};
+
+int m765verify[MAXUNIT] = {1,1,1,1}; /* write after read flag */
+ /* 0 != verify mode */
+ /* 0 == not verify mode */
+#ifdef MACH_KERNEL
+extern struct buf *geteblk();
+#endif MACH_KERNEL
+
+#define trfrate(uip, type) outb(VFOREG(uip->addr),(((type)&RATEMASK)>>6))
+#define rbskrate(uip, type) trfrate(uip,(type)&RAPID?RPSEEK:NMSEEK)
+#define getparm(type) ((type<0||type>3)?(struct fddrtab *)ERROR:&m765f[type])
+#define relative(s1,s2) ((s1)>(s2)?(s1)-(s2):(s2)-(s1))
+
+fdprobe(port, ctlr)
+struct bus_ctlr *ctlr;
+{
+ int spot = STSREG((int) ctlr->address);
+ struct ctrl_info *cip = &ctrl_info[ctlr->unit];
+ int i, in;
+
+ outb(spot, DATAOK);
+ for (i = 1000; i--;) {
+ in = inb(spot);
+ if ((in&DATAOK) == DATAOK && !(in&0x0f)) {
+ take_ctlr_irq(ctlr);
+ cip->b_cmd.c_rbmtr = 0; /* recalibrate/moter flag */
+ cip->b_cmd.c_intr = CMDRST; /* interrupt flag */
+ cip->b_unitf = 0;
+ cip->b_uip = 0;
+ cip->b_rwerr = cip->b_seekerr = cip->b_rberr = 0;
+ cip->usebuf = 0;
+ if (FdDmaPage) {
+ cip->b_pbuf = FdDmaPage + PAGE_SIZE * ctlr->unit;
+ if (kmem_alloc_pageable(kernel_map,
+ (vm_offset_t *)&cip->b_vbuf,
+ PAGE_SIZE) != KERN_SUCCESS) {
+ printf("%s%d: can not kmem_alloc_pageable.\n",
+ ctlr->name, ctlr->unit);
+ return 0;
+ }
+ (void)pmap_map(cip->b_vbuf,
+ (vm_offset_t)cip->b_pbuf,
+ (vm_offset_t)cip->b_pbuf+PAGE_SIZE,
+ VM_PROT_READ | VM_PROT_WRITE);
+ }
+ printf("%s%d: port = %x, spl = %d, pic = %d.\n", ctlr->name,
+ ctlr->unit, ctlr->address, ctlr->sysdep, ctlr->sysdep1);
+ return(1);
+ }
+ }
+ return(0);
+}
+
+fdslave(dev, xxxx)
+struct bus_device *dev;
+{
+ return(1); /* gross hack */
+}
+
+void fdattach(dev)
+struct bus_device *dev;
+{
+ struct unit_info *uip = &unit_info[dev->unit];
+ struct ctrl_info *cip = &ctrl_info[dev->ctlr];
+
+ uip->dev = dev;
+ dev->address = dev->mi->address;
+ uip->addr = dev->address;
+ uip->b_cmd = &cip->b_cmd;
+ uip->b_seekaddr = 0;
+ uip->av_forw = 0;
+ uip->wakeme = 0;
+ if (cip->b_unitf) {
+ uip->b_unitf=cip->b_unitf->b_unitf;
+ cip->b_unitf->b_unitf=uip;
+ } else {
+ uip->b_unitf=uip;
+ cip->b_unitf=uip;
+ }
+ uip->d_drtab.dr_type &= ~OKTYPE;
+
+ printf(", port = %x, spl = %d, pic = %d.",
+ dev->address, dev->sysdep, dev->sysdep1);
+
+ rstout(uip);
+ specify(uip);
+}
+/*****************************************************************************
+ *
+ * TITLE: fdopen
+ *
+ * ABSTRACT: Open a unit.
+ *
+ ****************************************************************************/
+fdopen(dev, flag, otyp)
+dev_t dev;
+int flag; /* not used */
+int otyp; /* not used */
+{
+ struct fddrtab *driv;
+ struct buf *wbp;
+ spl_t x = SPL();
+ int error = 0;
+ int unit = UNIT(dev);
+ struct unit_info *uip = &unit_info[unit];
+ int slave = uip->dev->slave;
+ struct ctrl_info *cip = &ctrl_info[uip->dev->ctlr];
+ struct fdcmd *cmdp = uip->b_cmd;
+ if (unit < MAXUNIT){
+ /* Since all functions that use this are called from open, we only
+ set this once, right here. */
+ rstout(uip);
+ cip->b_wup = uip;
+ openchk(cmdp);
+ cmdp->c_devflag |= FDMCHK;
+ chkbusy(cmdp);
+ cmdp->c_stsflag |= MTRFLAG;
+ mtr_on(uip);
+ if(inb(VFOREG(uip->addr))&OPENBIT ||
+ !(uip->d_drtab.dr_type&OKTYPE)){
+ uip->d_drtab.dr_type &= ~OKTYPE;
+ if(!rbrate(RAPID, uip))
+ fdseek(RAPID, uip, 2);
+ if(inb(VFOREG(uip->addr))&OPENBIT)
+ error = ENXIO;
+ }
+ cmdp->c_stsflag &= ~MTRFLAG;
+ mtr_on(uip);
+ openfre(cmdp);
+ if(!error && !(uip->d_drtab.dr_type & OKTYPE)) {
+ if (MEDIATYPE(dev)>3)
+ goto endopen;
+ driv = &m765f[MEDIATYPE(dev)];
+ wbp = geteblk(BLKSIZE);
+ m765sweep(uip, driv);
+ cmdp->c_rbmtr &= ~(1<<(RBSHIFT+(slave)));
+ ++cip->b_rwerr;
+ wbp->b_dev = dev; wbp->b_error = 0; wbp->b_resid = 0;
+ wbp->b_flags = (B_READ|B_VERIFY); wbp->b_bcount = 512;
+ wbp->b_pfcent = 2*driv->dr_spc + driv->dr_nsec - 1;
+ setqueue(wbp, uip);
+ biowait(wbp);
+ brelse(wbp);
+ error = 0;
+ uip->d_drtab.dr_type |= OKTYPE;
+ }
+ } else
+ error = ENXIO;
+ endopen:
+ splx(x);
+ return(error);
+}
+/*****************************************************************************
+ *
+ * TITLE: fdclose
+ *
+ * ABSTRACT: Close a unit.
+ *
+ * Called on last close. mark the unit closed and not-ready.
+ *
+ * Unix doesn't actually "open" an inode for rootdev, swapdev or pipedev.
+ * If UNIT(swapdev) != UNIT(rootdev), then must add code in init() to
+ * "open" swapdev. These devices should never be closed.
+ *
+ *****************************************************************************/
+fdclose(dev, flag, otyp, offset)
+dev_t dev; /* major, minor numbers */
+int flag; /* not used */
+int otyp; /* not used */
+off_t offset; /* not used */
+{
+ extern dev_t rootdev, swapdev;
+ struct unit_info *uip = &unit_info[UNIT(dev)];
+ spl_t s;
+
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+ if ((dev == rootdev) || (dev == swapdev)) /* never close these */
+ return(0);
+#endif MACH_KERNEL
+
+ /* Clear the bit.
+ * If last close of drive insure drtab queue is empty before returning.
+ */
+ s = SPL();
+ while(uip->av_forw != 0) {
+ uip->wakeme = 1;
+ sleep(uip, PRIBIO);
+ }
+ splx(s);
+#ifdef MACH_KERNEL
+ return(0);
+#else MACH_KERNEL
+ close(0);
+#endif MACH_KERNEL
+}
+/*****************************************************************************
+ *
+ * TITLE: fdstrategy
+ *
+ * ABSTRACT: Queue an I/O Request, and start it if not busy already.
+ *
+ * Reject request if unit is not-ready.
+ *
+ * Note: check for not-ready done here ==> could get requests
+ * queued prior to unit going not-ready.
+ * not-ready status to those requests that are attempted
+ * before a new volume is inserted. Once a new volume is
+ * inserted, would get good I/O's to wrong volume.
+ *
+ * CALLS: iodone(),setqueue()
+ *
+ * CALLING ROUTINES: fdread (indirectly, thru physio)
+ * fdwrite (indirectly, thru physio)
+ *
+ ****************************************************************************/
+fdstrategy(bp)
+struct buf *bp; /* buffer header */
+{
+ unsigned bytes_left;
+ daddr_t secno;
+ struct unit_info *uip = &unit_info[UNIT(bp->b_dev)];
+ struct fddrtab *dr = &uip->d_drtab;
+ struct fddrtab *sdr;
+
+ bp->b_error = 0;
+ /* set b_resid to b_bcount because we haven't done anything yet */
+ bp->b_resid = bp->b_bcount;
+ if (!(dr->dr_type & OKTYPE) ||
+ ((sdr = getparm(MEDIATYPE(bp->b_dev)))==(struct fddrtab *)ERROR) ||
+ /* wrong parameters */
+ (sdr->dr_ncyl != dr->dr_ncyl) || (sdr->dr_nsec != dr->dr_nsec) ||
+ ((sdr->dr_type|OKTYPE) != dr->dr_type) ||
+ (sdr->dr_rwgpl != dr->dr_rwgpl) ||
+ (sdr->dr_fgpl != dr->dr_fgpl)) {
+ bp->b_flags |= B_ERROR;
+ bp->b_error = EIO;
+ biodone(bp);
+ return(0);
+ }
+ /*
+ * Figure "secno" from b_blkno. Adjust sector # for partition.
+ *
+ * If reading just past the end of the device, it's
+ * End of File. If not reading, or if read starts further in
+ * than the first sector after the partition, it's an error.
+ *
+ * secno is logical blockno / # of logical blocks per sector */
+ secno = (bp->b_blkno * NBPSCTR) >> 9;
+ if (secno >= dr->p_nsec) {
+ if (!((bp->b_flags & B_READ) && (secno == dr->p_nsec))){
+ /* off the deep end */
+ bp->b_flags |= B_ERROR;
+ bp->b_error = ENXIO;
+ }
+ biodone(bp);
+ return(0);
+ }
+/* At this point, it is no longer possible to directly return from strategy.
+ We now set b_resid to the number of bytes we cannot transfer because
+ they lie beyond the end of the request's partition. This value is 0
+ if the entire request is within the partition. */
+ bytes_left = (dr->p_nsec - secno) << 9;
+ bp->b_resid = ((bp->b_bcount<=bytes_left)?0:(bp->b_bcount-bytes_left));
+ bp->b_pfcent = secno;
+ setqueue(bp, uip);
+ return(0);
+}
+
+/***************************************************************************
+ *
+ * set queue to buffer
+ *
+ ***************************************************************************/
+setqueue(bp, uip)
+struct buf *bp;
+struct unit_info *uip;
+{
+ spl_t x = SPL();
+ struct ctrl_info *cip = &ctrl_info[uip->dev->ctlr];
+ struct fdcmd *cmdp = uip->b_cmd;
+
+ openchk(cmdp); /* openning check */
+ cmdp->c_devflag |= STRCHK;
+ fd_disksort(uip, bp); /* queue the request */
+ /*
+ * If no requests are in progress, start this one up. Else
+ * leave it on the queue, and fdintr will call m765io later.
+ */
+ if(!cip->b_uip)
+ m765io(uip);
+ splx(x);
+}
+/***************************************************************************
+ *
+ * check io_busy routine
+ *
+ ***************************************************************************/
+chkbusy(cmdp)
+struct fdcmd *cmdp;
+{
+ while(cmdp->c_devflag & STRCHK){
+ cmdp->c_devflag |= STRWAIT;
+ sleep(&cmdp->c_devflag,PZERO);
+ }
+}
+/***************************************************************************
+ *
+ * check fdopen() routine
+ *
+ ***************************************************************************/
+openchk(cmdp)
+struct fdcmd *cmdp;
+{
+ while(cmdp->c_devflag & FDMCHK ){
+ cmdp->c_devflag |= FDWAIT;
+ sleep(&cmdp->c_devflag,PZERO);
+ }
+}
+/***************************************************************************
+ *
+ * free fdopen() routine
+ *
+ ***************************************************************************/
+openfre(cmdp)
+struct fdcmd *cmdp;
+{
+ cmdp->c_devflag &= ~FDMCHK;
+ if(cmdp->c_devflag & FDWAIT){
+ cmdp->c_devflag &= ~FDWAIT;
+ wakeup(&cmdp->c_devflag);
+ }
+}
+/*****************************************************************************
+ *
+ * TITLE: m765io
+ *
+ * ABSTRACT: Start handling an I/O request.
+ *
+ ****************************************************************************/
+m765io(uip)
+struct unit_info *uip;
+{
+ extern int(m765iosub)();
+ register struct buf *bp;
+ struct ctrl_info *cip = &ctrl_info[uip->dev->ctlr];
+
+ bp = uip->av_forw; /*move bp to ctrl_info[ctrl].b_buf*/
+ cip->b_buf = bp;
+ cip->b_uip = uip;
+ cip->b_xferaddr = bp->b_un.b_addr;
+ cip->b_xfercount = bp->b_bcount - bp->b_resid;
+ cip->b_sector = bp->b_pfcent;
+ uip->b_cmd->c_stsflag |= MTRFLAG;
+ if(!mtr_start(uip))
+ timeout(m765iosub, uip, HZ);
+ else
+ m765iosub(uip);
+}
+/****************************************************************************
+ *
+ * m765io subroutine
+ *
+ ****************************************************************************/
+m765iosub(uip)
+struct unit_info *uip;
+{
+ struct fddrtab *dr = &uip->d_drtab;
+ int startsec;
+ int slave = uip->dev->slave;
+ struct ctrl_info *cip = &ctrl_info[uip->dev->ctlr];
+ struct fdcmd *cmdp = uip->b_cmd;
+
+ rwcmdset(uip);
+ if(cip->b_buf->b_flags&B_FORMAT)
+ goto skipchk;
+ startsec = (cmdp->c_rwdata[3] * dr->dr_nsec) + cmdp->c_rwdata[4];
+ if(startsec+(cip->b_xfercount>>9)-1 > dr->dr_spc)
+ cip->b_xferdma = (dr->dr_spc-startsec+1) << 9;
+ else
+skipchk: cip->b_xferdma = cip->b_xfercount;
+ if(!(cmdp->c_rbmtr & (1<<(RBSHIFT+slave))))
+ cip->b_status = rbirate(uip);
+ else if(uip->b_seekaddr != cmdp->c_saddr)
+ cip->b_status = fdiseek(uip,cmdp->c_saddr);
+ else
+ cip->b_status = outicmd(uip);
+ if(cip->b_status)
+ intrerr0(uip);
+ return;
+}
+/***************************************************************************
+ *
+ * read / write / format / verify command set to command table
+ *
+ ***************************************************************************/
+rwcmdset(uip)
+struct unit_info *uip;
+{
+ short resid;
+ int slave = uip->dev->slave;
+ struct ctrl_info *cip = &ctrl_info[uip->dev->ctlr];
+ struct fdcmd *cmdp = uip->b_cmd;
+
+ switch(cip->b_buf->b_flags&(B_FORMAT|B_VERIFY|B_READ|B_WRITE)){
+ case B_VERIFY|B_WRITE: /* VERIFY after WRITE */
+ cmdp->c_rwdata[0] = RDMV;
+ break;
+ case B_FORMAT:
+ cmdp->c_dcount = FMTCNT;
+ cmdp->c_rwdata[0] = FMTM;
+ cmdp->c_saddr = cip->b_sector / uip->d_drtab.dr_spc;
+ resid = cip->b_sector % uip->d_drtab.dr_spc;
+ cmdp->c_rwdata[1] = slave|((resid/uip->d_drtab.dr_nsec)<<2);
+ cmdp->c_rwdata[2] =
+ ((struct fmttbl *)cip->b_buf->b_un.b_addr)->s_type;
+ cmdp->c_rwdata[3] = uip->d_drtab.dr_nsec;
+ cmdp->c_rwdata[4] = uip->d_drtab.dr_fgpl;
+ cmdp->c_rwdata[5] = FMTDATA;
+ break;
+ case B_WRITE:
+ case B_READ:
+ case B_READ|B_VERIFY:
+ cmdp->c_dcount = RWCNT;
+ if(cip->b_buf->b_flags&B_READ)
+ if(cip->b_buf->b_flags&B_VERIFY)
+ cmdp->c_rwdata[0] = RDMV;
+ else
+ cmdp->c_rwdata[0] = RDM;
+ else
+ cmdp->c_rwdata[0] = WTM; /* format or write */
+ resid = cip->b_sector % uip->d_drtab.dr_spc;
+ cmdp->c_rwdata[3] = resid / uip->d_drtab.dr_nsec;
+ cmdp->c_rwdata[1] = slave|(cmdp->c_rwdata[3]<<2);
+ cmdp->c_rwdata[2] = cmdp->c_saddr =
+ cip->b_sector / uip->d_drtab.dr_spc;
+ cmdp->c_rwdata[4] = (resid % uip->d_drtab.dr_nsec) + 1;
+ cmdp->c_rwdata[5] = 2;
+ cmdp->c_rwdata[6] = uip->d_drtab.dr_nsec;
+ cmdp->c_rwdata[7] = uip->d_drtab.dr_rwgpl;
+ cmdp->c_rwdata[8] = DTL;
+ D(printf("SET %x %x C%x H%x S%x %x %x %x %x ",
+ cmdp->c_rwdata[0], cmdp->c_rwdata[1],
+ cmdp->c_rwdata[2], cmdp->c_rwdata[3],
+ cmdp->c_rwdata[4], cmdp->c_rwdata[5],
+ cmdp->c_rwdata[6], cmdp->c_rwdata[7],
+ cmdp->c_rwdata[8]));
+ break;
+ }
+}
+/*****************************************************************************
+ *
+ * TITLE: fdread
+ *
+ * ABSTRACT: "Raw" read. Use physio().
+ *
+ * CALLS: m765breakup (indirectly, thru physio)
+ *
+ ****************************************************************************/
+fdread(dev, uio)
+register dev_t dev;
+struct uio *uio;
+{
+#ifdef MACH_KERNEL
+ /* no need for page-size restriction */
+ return (block_io(fdstrategy, minphys, uio));
+#else MACH_KERNEL
+ return(physio(fdstrategy,&fdrbuf[UNIT(dev)],dev,B_READ,fdminphys,uio));
+#endif MACH_KERNEL
+}
+/*****************************************************************************
+ *
+ * TITLE: fdwrite
+ *
+ * ABSTRACT: "Raw" write. Use physio().
+ *
+ * CALLS: m765breakup (indirectly, thru physio)
+ *
+ ****************************************************************************/
+fdwrite(dev, uio)
+register dev_t dev;
+struct uio *uio;
+{
+#ifdef MACH_KERNEL
+ /* no need for page-size restriction */
+ return (block_io(fdstrategy, minphys, uio));
+#else MACH_KERNEL
+ return(physio(fdstrategy,&fdrbuf[UNIT(dev)],dev,B_WRITE,fdminphys,uio));
+#endif MACH_KERNEL
+}
+/*****************************************************************************
+ *
+ * TITLE: fdminphys
+ *
+ * ABSTRACT: Trim buffer length if buffer-size is bigger than page size
+ *
+ * CALLS: physio
+ *
+ ****************************************************************************/
+fdminphys(bp)
+struct buf *bp;
+{
+ if (bp->b_bcount > PAGESIZ)
+ bp->b_bcount = PAGESIZ;
+}
+#ifdef MACH_KERNEL
+/* IOC_OUT only and not IOC_INOUT */
+io_return_t fdgetstat(dev, flavor, data, count)
+ dev_t dev;
+ int flavor;
+ int * data; /* pointer to OUT array */
+ unsigned int *count; /* OUT */
+{
+ switch (flavor) {
+
+ /* Mandatory flavors */
+
+ case DEV_GET_SIZE: {
+ int ret;
+ struct disk_parms p;
+
+ ret = fd_getparms(dev, &p);
+ if (ret) return ret;
+ data[DEV_GET_SIZE_DEVICE_SIZE] = p.dp_pnumsec * NBPSCTR;
+ data[DEV_GET_SIZE_RECORD_SIZE] = NBPSCTR;
+ *count = DEV_GET_SIZE_COUNT;
+ break;
+ }
+
+ /* Extra flavors */
+
+ case V_GETPARMS:
+ if (*count < sizeof (struct disk_parms)/sizeof (int))
+ return (D_INVALID_OPERATION);
+ *count = sizeof (struct disk_parms)/sizeof(int);
+ return (fd_getparms(dev, data));
+ default:
+ return (D_INVALID_OPERATION);
+ }
+}
+/* IOC_VOID or IOC_IN or IOC_INOUT */
+/*ARGSUSED*/
+io_return_t fdsetstat(dev, flavor, data, count)
+ dev_t dev;
+ int flavor;
+ int * data;
+ unsigned int count;
+{
+ int unit = UNIT(dev);
+ switch (flavor) {
+ case V_SETPARMS: /* Caller wants reset_parameters */
+ return(fd_setparms(unit,*(int *)data));
+ case V_FORMAT:
+ return(fd_format(dev,data));
+ case V_VERIFY: /* cmdarg : 0 == no verify, 0 != verify */
+ m765verify[unit] = *(int *)data;
+ return(D_SUCCESS);
+ default:
+ return(D_INVALID_OPERATION);
+ }
+}
+
+/*
+ * Get block size
+ */
+int
+fddevinfo(dev, flavor, info)
+dev_t dev;
+int flavor;
+char *info;
+{
+ register struct fddrtab *dr;
+ register struct fdpart *p;
+ register int result = D_SUCCESS;
+
+ switch (flavor) {
+ case D_INFO_BLOCK_SIZE:
+ dr = &unit_info[UNIT(dev)].d_drtab;
+
+ if(dr->dr_type & OKTYPE)
+ *((int *) info) = 512;
+ else
+ result = D_INVALID_OPERATION;
+
+ break;
+ default:
+ result = D_INVALID_OPERATION;
+ }
+
+ return(result);
+}
+#else MACH_KERNEL
+/*****************************************************************************
+ *
+ * TITLE: fdioctl
+ *
+ * ABSTRACT: m765 driver special functions.
+ *
+ * CALLING ROUTINES: kernel
+ *
+ ****************************************************************************/
+int
+fdioctl(dev, cmd, cmdarg, flag)
+dev_t dev; /* major, minor numbers */
+int cmd; /* command code */
+int *cmdarg; /* user structure with parameters */
+int flag; /* not used */
+{
+ register unsigned unit = UNIT(dev);
+ switch (cmd) {
+ case V_SETPARMS: /* Caller wants reset_parameters */
+ return(fd_setparms(unit,*cmdarg));
+ case V_GETPARMS: /* Caller wants device parameters */
+ return(fd_getparms(dev,cmdarg));
+ case V_FORMAT:
+ return(fd_format(dev,cmdarg));
+ case V_VERIFY: /* cmdarg : 0 == no verify, 0 != verify */
+ m765verify[unit] = *cmdarg;
+ return(0);
+ }
+ return(EINVAL);
+}
+#endif MACH_KERNEL
+/****************************************************************************
+ *
+ * set fd parameters
+ *
+ ****************************************************************************/
+int
+fd_setparms(unit, cmdarg)
+register unsigned int unit;
+long cmdarg;
+{
+ struct fddrtab *fdparm;
+ spl_t x;
+ struct unit_info *uip = &unit_info[unit];
+ struct fdcmd *cmdp = uip->b_cmd;
+
+ cmdp->c_rbmtr &= ~(1<<(RBSHIFT+uip->dev->slave));
+ if ((fdparm = getparm(MEDIATYPE(cmdarg))) == (struct fddrtab *)ERROR)
+ return(EINVAL);
+ x = SPL();
+ openchk(cmdp);
+ cmdp->c_devflag |= FDMCHK;
+ chkbusy(cmdp);
+ m765sweep(uip, fdparm);
+ uip->d_drtab.dr_type |= OKTYPE;
+ openfre(cmdp);
+ splx(x);
+ return(0);
+}
+/****************************************************************************
+ *
+ * get fd parameters
+ *
+ ****************************************************************************/
+int
+fd_getparms(dev,cmdarg)
+dev_t dev; /* major, minor numbers */
+int *cmdarg;
+{
+ struct disk_parms *diskp = (struct disk_parms *)cmdarg;
+ register struct fddrtab *dr = &unit_info[UNIT(dev)].d_drtab;
+
+ if(dr->dr_type & OKTYPE){
+ diskp->dp_type = DPT_FLOPPY;
+ diskp->dp_heads = 2;
+ diskp->dp_sectors = dr->dr_nsec;
+ diskp->dp_pstartsec = 0;
+ diskp->dp_cyls = dr->dr_ncyl;
+ diskp->dp_pnumsec = dr->p_nsec;
+ return(0);
+ }
+ return(ENXIO);
+}
+/****************************************************************************
+ *
+ * format command
+ *
+ ****************************************************************************/
+fd_format(dev,cmdarg)
+dev_t dev; /* major, minor numbers */
+int *cmdarg;
+
+{
+ register struct buf *bp;
+ register daddr_t track;
+ union io_arg *varg;
+ u_short num_trks;
+ register struct fddrtab *dr = &unit_info[UNIT(dev)].d_drtab;
+
+ if(!(dr->dr_type & OKTYPE))
+ return(EINVAL);
+ varg = (union io_arg *)cmdarg;
+ num_trks = varg->ia_fmt.num_trks;
+ track = (daddr_t)(varg->ia_fmt.start_trk*dr->dr_nsec);
+ if((track + (num_trks*dr->dr_nsec))>dr->p_nsec)
+ return(EINVAL);
+ bp = geteblk(BLKSIZE); /* get struct buf area */
+ while (num_trks>0) {
+ bp->b_flags &= ~B_DONE;
+ bp->b_dev = dev;
+ bp->b_error = 0; bp->b_resid = 0;
+ bp->b_flags = B_FORMAT;
+ bp->b_bcount = dr->dr_nsec * FMTID;
+ bp->b_blkno = (daddr_t)((track << 9) / NBPSCTR);
+ if(makeidtbl(bp->b_un.b_addr,dr,
+ varg->ia_fmt.start_trk++,varg->ia_fmt.intlv))
+ return(EINVAL);
+ fdstrategy(bp);
+ biowait(bp);
+ if(bp->b_error)
+ if((bp->b_error == (char)EBBHARD) ||
+ (bp->b_error == (char)EBBSOFT))
+ return(EIO);
+ else
+ return(bp->b_error);
+ num_trks--;
+ track += dr->dr_nsec;
+ }
+ brelse(bp);
+ return(0);
+}
+/****************************************************************************
+ *
+ * make id table for format
+ *
+ ****************************************************************************/
+makeidtbl(tblpt,dr,track,intlv)
+struct fmttbl *tblpt;
+struct fddrtab *dr;
+unsigned short track;
+unsigned short intlv;
+{
+ register int i,j,secno;
+
+ if(intlv >= dr->dr_nsec)
+ return(1);
+ for(i=0; i<dr->dr_nsec; i++)
+ tblpt[i].sector = 0;
+ for(i=0,j=0,secno=1; i<dr->dr_nsec; i++){
+ tblpt[j].cyl = track >> 1;
+ tblpt[j].head = track & 1;
+ tblpt[j].sector = secno++;
+ tblpt[j].s_type = 2;
+ if((j+=intlv) < dr->dr_nsec)
+ continue;
+ for(j-=dr->dr_nsec; j < dr->dr_nsec ; j++)
+ if(!tblpt[j].sector)
+ break;
+ }
+ return(0);
+}
+/*****************************************************************************
+ *
+ * TITLE: fdintr
+ *
+ * ABSTRACT: Handle interrupt.
+ *
+ * Interrupt procedure for m765 driver. Gets status of last
+ * operation and performs service function according to the
+ * type of interrupt. If it was an operation complete interrupt,
+ * switches on the current driver state and either declares the
+ * operation done, or starts the next operation
+ *
+ ****************************************************************************/
+fdintr(ctrl)
+int ctrl;
+{
+ extern int(m765intrsub)();
+ struct unit_info *uip = ctrl_info[ctrl].b_uip;
+ struct unit_info *wup = ctrl_info[ctrl].b_wup;
+ struct fdcmd *cmdp = &ctrl_info[ctrl].b_cmd;
+ if(cmdp->c_stsflag & INTROUT)
+ untimeout(fdintr, ctrl);
+ cmdp->c_stsflag &= ~INTROUT;
+ switch(cmdp->c_intr){
+ case RWFLAG:
+ rwintr(uip);
+ break;
+ case SKFLAG:
+ case SKEFLAG|SKFLAG:
+ case RBFLAG:
+ timeout(m765intrsub, uip, SEEKWAIT);
+ break;
+ case WUPFLAG:
+ cmdp->c_intr &= ~WUPFLAG;
+ wakeup(wup);
+ }
+ return(0);
+}
+/*****************************************************************************
+ *
+ * interrup subroutine (seek recalibrate)
+ *
+ *****************************************************************************/
+m765intrsub(uip)
+struct unit_info *uip;
+{
+ struct ctrl_info *cip = &ctrl_info[uip->dev->ctlr];
+
+ if((cip->b_status = sis(uip))!= ST0OK)
+ switch(uip->b_cmd->c_intr){
+ case SKFLAG:
+ seekintr(uip);
+ break;
+ case SKEFLAG|SKFLAG:
+ seekintre(uip);
+ break;
+ case RBFLAG:
+ rbintr(uip);
+ }
+}
+/*****************************************************************************
+ *
+ * read / write / format / verify interrupt routine
+ *
+ *****************************************************************************/
+rwintr(uip)
+struct unit_info *uip;
+{
+ int rsult[7];
+ register int rtn, count;
+ struct ctrl_info *cip = &ctrl_info[uip->dev->ctlr];
+ struct fdcmd *cmdp = uip->b_cmd;
+
+ cmdp->c_intr &= ~RWFLAG;
+ if((cip->b_buf->b_flags&(B_READ|B_VERIFY))!=(B_READ|B_VERIFY))
+ if(inb(VFOREG(uip->addr))&OPENBIT){
+ if(cip->b_buf->b_flags&B_FORMAT){
+ cip->b_status = TIMEOUT;
+ intrerr0(uip);
+ } else {
+ if((inb(STSREG(uip->addr))&ST0OK)!=ST0OK)
+ printf("%s %d : %s\n",
+ fderr,
+ uip-unit_info,
+ fdmsg[DOORERR]);
+ rstout(uip);
+ specify(uip);
+ cmdp->c_rbmtr &= RBRST;
+ cmdp->c_intr |= SKEFLAG;
+ if(cmdp->c_saddr > 2)
+ fdiseek(uip, cmdp->c_saddr-2);
+ else
+ fdiseek(uip, cmdp->c_saddr+2);
+ }
+ return;
+ }
+ for( count = 0 ; count < 7 ; count++ ){
+ if(rtn = fdc_sts(FD_ISTS, uip)) /* status check */
+ goto rwend;
+ rsult[count] = inb(DATAREG(uip->addr));
+ }
+ rtn = 0;
+ if(rsult[0]&0xc0){
+ rtn = cmdp->c_rwdata[0]<<8;
+ if(rsult[0]&0x80){ rtn |= FDCERR; goto rwend; }
+ if(rsult[1]&0x80){ rtn |= NOREC; goto rwend; }
+ if(rsult[1]&0x20){ rtn |= CRCERR; goto rwend; }
+ if(rsult[1]&0x10){ rtn |= OVERRUN; goto rwend; }
+ if(rsult[1]&0x04){ rtn |= NOREC; goto rwend; }
+ if(rsult[1]&0x02){ rtn |= WTPRT; goto rwend; }
+ if(rsult[1]&0x01){ rtn |= ADDRERR; goto rwend; }
+ rtn |= FDCERR;
+rwend: outb(0x0a, 0x06);
+ }
+ if(cip->b_status = rtn) {
+ D(printf("\n->rwierr %x ", rtn));
+ rwierr(uip);
+ } else { /* write command */
+ if(((cip->b_buf->b_flags&(B_FORMAT|B_READ|B_WRITE))==B_WRITE)
+ && !(cip->b_buf->b_flags & B_VERIFY)) {
+ D(printf("->w/v "));
+ cip->b_buf->b_flags |= B_VERIFY;
+ rwcmdset(uip);
+ if(cip->b_status = outicmd(uip))
+ intrerr0(uip);
+ return;
+ }
+ /* clear retry count */
+ if (cip->usebuf) {
+ bcopy(cip->b_vbuf, cip->b_xferaddr, cip->b_xferdma);
+ DD(printf("R(%x, %x, %x)\n",
+ cip->b_vbuf, cip->b_xferaddr, cip->b_xferdma));
+ }
+ cip->b_buf->b_flags &= ~B_VERIFY;
+ cip->b_rwerr = cip->b_seekerr = cip->b_rberr = 0;
+ cip->b_xfercount -= cip->b_xferdma;
+ cip->b_xferaddr += cip->b_xferdma;
+ cip->b_sector = cip->b_sector+(cip->b_xferdma>>9);
+ D(printf("->done%s\n", cip->b_xfercount?"":"." ));
+ /* next address (cyl,head,sec) */
+ if((int)cip->b_xfercount>0)
+ m765iosub(uip);
+ else
+ quechk(uip);
+ }
+}
+/*****************************************************************************
+ *
+ * read / write / format / verify error routine
+ *
+ *****************************************************************************/
+rwierr(uip)
+struct unit_info *uip;
+{
+ short status;
+ struct ctrl_info *cip = &ctrl_info[uip->dev->ctlr];
+ struct fdcmd *cmdp = uip->b_cmd;
+
+ D(printf("%x-%x-%x ", cip->b_rwerr&SRMASK, cip->b_rwerr&MRMASK, cip->b_rwerr&LRMASK));
+ if((cip->b_buf->b_flags&(B_READ|B_VERIFY))==(B_READ|B_VERIFY)){
+ if((cip->b_rwerr&SRMASK)<MEDIARD)
+ goto rwrtry;
+ if((cip->b_rwerr&MRMASK)<MEDIASEEK)
+ goto rwseek;
+ goto rwexit;
+ } else
+ if(cip->b_buf->b_flags&B_VERIFY){
+ cip->b_buf->b_flags &= ~B_VERIFY;
+ rwcmdset(uip);
+ }
+rwrtry: status = cip->b_status;
+ if((++cip->b_rwerr&SRMASK)<SRETRY)
+ cip->b_status = outicmd(uip);
+ else {
+rwseek: cip->b_rwerr = (cip->b_rwerr&RMRMASK)+MINC;
+ if((cip->b_rwerr&MRMASK)<MRETRY){
+ cmdp->c_intr |= SKEFLAG;
+ if(cmdp->c_saddr > 2)
+ cip->b_status=fdiseek(uip,cmdp->c_saddr-2);
+ else
+ cip->b_status=fdiseek(uip,cmdp->c_saddr+2);
+ } else {
+ cip->b_rwerr = (cip->b_rwerr&LRMASK)+LINC;
+ if((cip->b_rwerr&LRMASK)<LRETRY)
+ cip->b_status=rbirate(uip);
+ }
+ }
+ if(cip->b_status){
+ D(printf("ERR->intrerr0 "));
+ cip->b_status = status;
+rwexit: intrerr0(uip);
+ }
+}
+/*****************************************************************************
+ *
+ * recalibrate interrupt routine
+ *
+ *****************************************************************************/
+rbintr(uip)
+struct unit_info *uip;
+{
+ struct ctrl_info *cip = &ctrl_info[uip->dev->ctlr];
+ struct fdcmd *cmdp = uip->b_cmd;
+
+ cmdp->c_intr &= ~RBFLAG;
+ if(cip->b_status) {
+ if(++cip->b_rberr<SRETRY)
+ cip->b_status = rbirate(uip);
+ } else {
+ cmdp->c_rbmtr |= 1<<(RBSHIFT+uip->dev->slave);
+ uip->b_seekaddr = 0;
+ cip->b_rberr = 0;
+ cip->b_status=fdiseek(uip, cmdp->c_saddr);
+ }
+ if(cip->b_status)
+ intrerr0(uip);
+}
+/******************************************************************************
+ *
+ * seek interrupt routine
+ *
+ *****************************************************************************/
+seekintr(uip)
+struct unit_info *uip;
+{
+ struct ctrl_info *cip = &ctrl_info[uip->dev->ctlr];
+ struct fdcmd *cmdp = uip->b_cmd;
+
+ cmdp->c_intr &= ~SKFLAG;
+ if(cip->b_status)
+ seekierr(uip, cmdp->c_saddr);
+ else {
+ uip->b_seekaddr = cmdp->c_saddr;
+ cip->b_status = outicmd(uip);
+ }
+ if(cip->b_status)
+ intrerr0(uip);
+ else
+ cip->b_seekerr = 0;
+}
+/*****************************************************************************
+ *
+ * seek error retry interrupt routine
+ *
+ *****************************************************************************/
+seekintre(uip)
+struct unit_info *uip;
+{
+ register char seekpoint;
+ struct ctrl_info *cip = &ctrl_info[uip->dev->ctlr];
+ struct fdcmd *cmdp = uip->b_cmd;
+
+ cmdp->c_intr &= ~(SKEFLAG|SKFLAG);
+ if(cmdp->c_saddr > 2)
+ seekpoint = cmdp->c_saddr-2;
+ else
+ seekpoint = cmdp->c_saddr+2;
+ if(cip->b_status)
+ seekierr(uip, seekpoint);
+ else {
+ uip->b_seekaddr = seekpoint;
+ cip->b_status = fdiseek(uip, cmdp->c_saddr);
+ }
+ if(cip->b_status)
+ intrerr0(uip);
+ else
+ cip->b_seekerr = 0;
+}
+/*****************************************************************************
+ *
+ * seek error routine
+ *
+ *****************************************************************************/
+seekierr(uip, seekpoint)
+struct unit_info *uip;
+register char seekpoint;
+{
+ struct ctrl_info *cip = &ctrl_info[uip->dev->ctlr];
+
+ if((++cip->b_seekerr&SRMASK)<SRETRY)
+ cip->b_status=fdiseek(uip, seekpoint);
+ else {
+ cip->b_seekerr = (cip->b_seekerr&MRMASK) + MINC;
+ if((cip->b_seekerr&MRMASK)<MRETRY)
+ cip->b_status=rbirate(uip);
+ }
+ if(cip->b_status)
+ intrerr0(uip);
+}
+/*****************************************************************************
+ *
+ * TITLE: m765sweep
+ *
+ * ABSTRACT: Perform an initialization sweep.
+ *
+ **************************************************************************/
+m765sweep(uip, cdr)
+struct unit_info *uip;
+register struct fddrtab *cdr; /* device initialization data */
+{
+ register struct fddrtab *dr = &uip->d_drtab;
+
+ dr->dr_ncyl = cdr->dr_ncyl;
+ dr->dr_nsec = cdr->dr_nsec;
+ dr->dr_spc = cdr->dr_spc;
+ dr->p_nsec = cdr->p_nsec;
+ dr->dr_type = cdr->dr_type;
+ dr->dr_rwgpl= cdr->dr_rwgpl;
+ dr->dr_fgpl = cdr->dr_fgpl;
+}
+/*****************************************************************************
+ *
+ * TITLE: m765disksort
+ *
+ *****************************************************************************/
+fd_disksort(uip, bp)
+struct unit_info *uip; /* Pointer to head of active queue */
+register struct buf *bp; /* Pointer to buffer to be inserted */
+{
+ register struct buf *bp2; /* Pointer to next buffer in queue */
+ register struct buf *bp1; /* Pointer where to insert buffer */
+
+ if (!(bp1 = uip->av_forw)) {
+ /* No other buffers to compare against */
+ uip->av_forw = bp;
+ bp->av_forw = 0;
+ return;
+ }
+ bp2 = bp1->av_forw;
+ while(bp2 && (relative(bp1->b_pfcent,bp->b_pfcent) >=
+ relative(bp1->b_pfcent,bp2->b_pfcent))) {
+ bp1 = bp2;
+ bp2 = bp1->av_forw;
+ }
+ bp1->av_forw = bp;
+ bp->av_forw = bp2;
+}
+/*****************************************************************************
+ *
+ * Set Interrupt error and FDC reset
+ *
+ *****************************************************************************/
+intrerr0(uip)
+struct unit_info *uip;
+{
+ struct buf *bp; /* Pointer to next buffer in queue */
+ int resid;
+ struct ctrl_info *cip = &ctrl_info[uip->dev->ctlr];
+ struct fdcmd *cmdp = uip->b_cmd;
+ register struct fddrtab *dr = &uip->d_drtab;
+
+ if((cip->b_buf->b_flags&(B_READ|B_VERIFY))!=(B_READ|B_VERIFY)){
+ resid = cip->b_xfercount = cip->b_xferdma-1-inb(DMACNT)*0x101;
+ resid = (cip->b_sector + (resid>>9)) % dr->dr_spc;
+ printf("%s %d : %s\n",
+ fderr,
+ uip->dev->slave,
+ fdmsg[cip->b_status&BYTEMASK]);
+ printf("cylinder = %d ",cmdp->c_saddr);
+ printf("head = %d sector = %d byte/sec = %d\n",
+ resid / dr->dr_nsec , (resid % dr->dr_nsec)+1 , 512);
+ }
+ cip->b_rwerr = cip->b_seekerr = cip->b_rberr = 0;
+ cmdp->c_intr = CMDRST;
+ if(((cip->b_buf->b_flags&(B_READ|B_VERIFY))!=(B_READ|B_VERIFY)) &&
+ uip->dev->slave)
+ dr->dr_type &= ~OKTYPE;
+ bp = cip->b_buf;
+ bp->b_flags |= B_ERROR;
+ switch(cip->b_status&BYTEMASK){
+ case ADDRERR:
+ case OVERRUN:
+ case FDCERR:
+ case TIMEOUT:
+ bp->b_error = EIO;
+ break;
+ case WTPRT:
+#ifdef MACH_KERNEL
+ bp->b_error = ENXIO;
+#else
+ bp->b_error = ENODEV;
+#endif
+ break;
+ case NOREC:
+ bp->b_error = EBBHARD;
+ break;
+ case CRCERR:
+ bp->b_error = EBBSOFT;
+ }
+ rstout(uip);
+ specify(uip);
+ cmdp->c_rbmtr &= RBRST;
+ quechk(uip);
+}
+/*****************************************************************************
+ *
+ * Next queue check routine
+ *
+ *****************************************************************************/
+quechk(uip)
+struct unit_info *uip;
+{
+ register struct buf *bp = uip->av_forw;
+ struct ctrl_info *cip = &ctrl_info[uip->dev->ctlr];
+ struct unit_info *loop;
+ struct fdcmd *cmdp = uip->b_cmd;
+ /* clear retry count */
+ cip->b_rwerr = cip->b_seekerr = cip->b_rberr = 0;
+ bp->b_resid = bp->b_resid + cip->b_xfercount;
+ uip->av_forw=bp->av_forw;
+ if (!uip->av_forw && uip->wakeme) {
+ uip->wakeme = 0;
+ wakeup(uip);
+ }
+ biodone(bp);
+ loop = uip;
+ do {
+ loop=loop->b_unitf;
+ if (loop->av_forw) {
+ m765io(loop);
+ return;
+ }
+ } while (loop!=uip);
+ cip->b_uip = 0;
+ cmdp->c_stsflag &= ~MTRFLAG;
+ mtr_on(uip);
+ cmdp->c_devflag &= ~STRCHK;
+ if(cmdp->c_devflag & STRWAIT){
+ cmdp->c_devflag &= ~STRWAIT;
+ wakeup(&cmdp->c_devflag);
+ }
+}
+fdprint(dev,str)
+dev_t dev;
+char *str;
+{
+ printf("floppy disk driver: %s on bad dev %d, partition %d\n",
+ str, UNIT(dev), 0);
+}
+fdsize()
+{
+ printf("fdsize() -- not implemented\n");
+}
+fddump()
+{
+ printf("fddump() -- not implemented\n");
+}
+/*****************************************************************************
+ *
+ * fdc reset routine
+ *
+ *****************************************************************************/
+rstout(uip)
+struct unit_info *uip;
+{
+ register int outd;
+
+ outd = ((uip->b_cmd->c_rbmtr&MTRMASK)<<MTR_ON)|uip->dev->slave;
+ outb(CTRLREG(uip->addr), outd);
+ outd |= FDC_RST;
+ outb(CTRLREG(uip->addr), outd);
+ outd |= DMAREQ;
+ outb(CTRLREG(uip->addr), outd);
+}
+/*****************************************************************************
+ *
+ * specify command routine
+ *
+ *****************************************************************************/
+specify(uip)
+struct unit_info *uip;
+{
+ /* status check */
+ if(fdc_sts(FD_OSTS, uip))
+ return;
+ /* Specify command */
+ outb(DATAREG(uip->addr), SPCCMD);
+ /* status check */
+ if(fdc_sts(FD_OSTS, uip))
+ return;
+ /* Step rate,Head unload time */
+ outb(DATAREG(uip->addr), SRTHUT);
+ /* status check */
+ if(fdc_sts(FD_OSTS, uip))
+ return;
+ /* Head load time,Non DMA Mode*/
+ outb(DATAREG(uip->addr), HLTND);
+ return;
+}
+/****************************************************************************
+ *
+ * recalibrate command routine
+ *
+ ****************************************************************************/
+rbrate(mtype,uip)
+char mtype;
+struct unit_info *uip;
+{
+ register int rtn = 1, rty_flg=2;
+ spl_t x;
+ struct fdcmd *cmdp = uip->b_cmd;
+
+ rbskrate(uip, mtype); /* set transfer rate */
+ while((rty_flg--)&&rtn){
+ if(rtn = fdc_sts(FD_OSTS, uip)) /* status check */
+ break;
+ /*recalibrate command*/
+ outb(DATAREG(uip->addr), RBCMD);
+ if(rtn = fdc_sts(FD_OSTS, uip)) /* status check */
+ break;
+ /* Device to wake up specified in open */
+ cmdp->c_intr |= WUPFLAG;
+ x = SPL();
+ outb(DATAREG(uip->addr), uip->dev->slave);
+ rtn = ERROR;
+ while(rtn) {
+ uip->wakeme = 1;
+ sleep(uip, PZERO);
+ if((rtn = sis(uip)) == ST0OK)
+ /* Device to wake up specified in open */
+ cmdp->c_intr |= WUPFLAG;
+ else
+ break;
+ }
+ splx(x);
+ }
+ return(rtn);
+}
+/*****************************************************************************
+ *
+ * seek command routine
+ *
+ ****************************************************************************/
+fdseek(mtype, uip, cylno)
+register char mtype;
+struct unit_info *uip;
+register int cylno;
+{
+ spl_t x;
+ int rtn;
+ struct fdcmd *cmdp = uip->b_cmd;
+
+ rbskrate(uip, mtype);
+ if(rtn = fdc_sts(FD_OSTS, uip)) /* status check */
+ return(rtn);
+ outb(DATAREG(uip->addr), SEEKCMD); /* seek command */
+ if(rtn = fdc_sts(FD_OSTS, uip)) /* status check */
+ return(rtn);
+ outb(DATAREG(uip->addr), uip->dev->slave); /* drive number */
+ if(rtn = fdc_sts(FD_OSTS, uip)) /* status check */
+ return(rtn);
+ x = SPL();
+ /* Device to wake up specified in open */
+ cmdp->c_intr |= WUPFLAG;
+ outb(DATAREG(uip->addr), cylno); /* seek count */
+ rtn = ERROR;
+ while(rtn){
+ uip->wakeme = 1;
+ sleep(uip, PZERO);
+ if((rtn = sis(uip)) == ST0OK)
+ /* Device to wake up specified in open */
+ cmdp->c_intr |= WUPFLAG;
+ else
+ break;
+ }
+ splx(x);
+ return(rtn);
+}
+/*****************************************************************************
+ *
+ * seek commnd routine(use interrupt)
+ *
+ *****************************************************************************/
+fdiseek(uip, cylno)
+struct unit_info *uip;
+int cylno;
+{
+ register int rtn;
+
+ D(printf("SK %x ", cylno));
+ rbskrate(uip, uip->d_drtab.dr_type);/* set transfer rate */
+ if(rtn = fdc_sts(FD_OSTS, uip)) /* status check */
+ goto fdiend;
+ outb(DATAREG(uip->addr), SEEKCMD); /* seek command */
+ if(rtn = fdc_sts(FD_OSTS, uip)) /* status check */
+ goto fdiend;
+ outb(DATAREG(uip->addr), uip->dev->slave); /* drive number */
+ if(rtn = fdc_sts(FD_OSTS, uip)) /* status check */
+ goto fdiend;
+ uip->b_seekaddr = cylno;
+ if(uip->d_drtab.dr_type&DOUBLE)
+ cylno = cylno * 2;
+ uip->b_cmd->c_intr |= SKFLAG;
+ outb(DATAREG(uip->addr), cylno); /* seek count */
+fdiend:
+ if(rtn)
+ rtn |= SEEKCMD<<8;
+ return(rtn);
+}
+/*****************************************************************************
+ *
+ * recalibrate command routine(use interrupt)
+ *
+ *****************************************************************************/
+rbirate(uip)
+struct unit_info *uip;
+{
+ register int rtn;
+
+ rbskrate(uip, uip->d_drtab.dr_type);/* set transfer rate */
+ if(!(rtn = fdc_sts(FD_OSTS, uip))) { /* status check */
+ /* recalibrate command */
+ outb(DATAREG(uip->addr), RBCMD);
+ if(!(rtn = fdc_sts(FD_OSTS, uip))) { /* status check */
+ uip->b_cmd->c_intr |= RBFLAG;
+ outb(DATAREG(uip->addr), uip->dev->slave);
+ }
+ }
+ return(rtn ? rtn|RBCMD<<8 : 0);
+}
+/*****************************************************************************
+ *
+ * read / write / format / verify command out routine(use interrupt)
+ *
+ *****************************************************************************/
+outicmd(uip)
+struct unit_info *uip;
+{
+ int rtn;
+ register int *data,cnt0,dmalen;
+ register long address;
+ struct ctrl_info *cip = &ctrl_info[uip->dev->ctlr];
+ struct fdcmd *cmdp = uip->b_cmd;
+ spl_t x = splhi();
+
+ outb(DMACMD1,DMADATA0); /* DMA #1 command register */
+ outb(DMAMSK1,DMADATA1); /* DMA #1 all mask register */
+ /* Perhaps outb(0x0a,0x02); might work better on line above? */
+ switch(cmdp->c_rwdata[0]){
+ case RDM:
+ D(printf("RDM"));
+ outb(DMABPFF,DMARD);
+ outb(DMAMODE,DMARD);
+ break;
+ case WTM:
+ case FMTM:
+ D(printf("W"));
+ outb(DMABPFF,DMAWT);
+ outb(DMAMODE,DMAWT);
+ break;
+ case RDMV:
+ D(printf("RDMV"));
+ outb(DMABPFF,DMAVRF);
+ outb(DMAMODE,DMAVRF);
+ }
+ /* get work buffer physical address */
+ address = kvtophys(cip->b_xferaddr);
+ dmalen = i386_trunc_page(address) + I386_PGBYTES - address;
+ if ( (cip->b_rwerr&MRMASK) >= 0x10)
+ dmalen = 0x200;
+ if (dmalen<=cip->b_xferdma)
+ cip->b_xferdma = dmalen;
+ else
+ dmalen = cip->b_xferdma;
+ if (address >= FdDmaThreshold) {
+ DD(printf(">(%x[%x], %x[%x] L%x\n",
+ address, cip->b_pbuf,
+ cip->b_xferaddr, cip->b_vbuf, dmalen));
+ if (!FdDmaEISA) {
+ cip->usebuf = 1;
+ address = (long)cip->b_pbuf;
+ if (cmdp->c_rwdata[0] == WTM || cmdp->c_rwdata[0] == FMTM) {
+ bcopy(cip->b_xferaddr, cip->b_vbuf, dmalen);
+ DD(printf("W(%x, %x, %x)\n",
+ cip->b_xferaddr, cip->b_vbuf, dmalen));
+ }
+ } else
+ cip->usebuf = 0;
+ } else
+ cip->usebuf = 0;
+ D(printf(" %x L%x ", address, dmalen));
+ /* set buffer address */
+ outb(DMAADDR,(int)address&BYTEMASK);
+ outb(DMAADDR,(((int)address>>8)&BYTEMASK));
+ outb(DMAPAGE,(((int)address>>16)&BYTEMASK));
+ if (FdDmaEISA)
+ outb(FdDmaEISA+DMAPAGE-0x80,(((int)address>>24)&BYTEMASK));
+ /* set transfer count */
+ outb(DMACNT,(--dmalen)&BYTEMASK);
+ outb(DMACNT,((dmalen>>8)&BYTEMASK));
+ outb(DMAMSK,CHANNEL2);
+ splx(x);
+ trfrate(uip, uip->d_drtab.dr_type); /* set transfer rate */
+ data = &cmdp->c_rwdata[0];
+ for(cnt0 = 0; cnt0<cmdp->c_dcount; cnt0++,data++){
+ if(rtn = fdc_sts(FD_OSTS, uip)) /*status check*/
+ break;
+ outb(DATAREG(uip->addr), *data);
+ }
+ if(!rtn){
+ cmdp->c_intr |= RWFLAG;
+ cmdp->c_stsflag |= INTROUT;
+ cnt0 = ((cip->b_buf->b_flags&(B_READ|B_VERIFY)) ==
+ (B_READ|B_VERIFY))?TOUT:ITOUT;
+#ifdef MACH_KERNEL
+ timeout(fdintr,uip->dev->ctlr,cnt0);
+#else MACH_KERNEL
+ cmdp->c_timeid = timeout(fdintr,uip->dev->ctlr,cnt0);
+#endif MACH_KERNEL
+ }
+ return(rtn);
+}
+/*****************************************************************************
+ *
+ * sense interrupt status routine
+ *
+ *****************************************************************************/
+sis(uip)
+struct unit_info *uip;
+{
+ register int rtn, st0;
+
+ if(rtn = fdc_sts(FD_OSTS, uip)) /* status check */
+ return(rtn);
+ outb(DATAREG(uip->addr), SISCMD);
+ if(rtn = fdc_sts(FD_ISTS, uip)) /* status check */
+ return(rtn);
+ st0 = inb(DATAREG(uip->addr)) & ST0OK; /* get st0 */
+ if(rtn = fdc_sts(FD_ISTS, uip)) /* status check */
+ return(rtn);
+ inb(DATAREG(uip->addr)); /* get pcn */
+ if (st0&(ST0AT|ST0IC))
+ st0 = FDCERR;
+ return(st0);
+}
+
+/*****************************************************************************
+ *
+ * fdc status get routine
+ *
+ *****************************************************************************/
+fdc_sts(mode, uip)
+register int mode;
+struct unit_info *uip;
+{
+ register int ind;
+ int cnt0 = STSCHKCNT;
+
+ while(cnt0--)
+ if(((ind=inb(STSREG(uip->addr))) & DATAOK) &&
+ ((ind & DTOCPU) == mode))
+ return(0);
+ return(TIMEOUT);
+}
+/*****************************************************************************
+ *
+ * motor on routine
+ *
+ *****************************************************************************/
+mtr_on(uip)
+struct unit_info *uip;
+{
+ extern int(mtr_off)();
+ extern int(wakeup)();
+ struct fdcmd *cmdp = uip->b_cmd;
+
+ if(!(mtr_start(uip))){
+ timeout(wakeup,&cmdp->c_stsflag,HZ);
+ sleep(&cmdp->c_stsflag,PZERO);
+ }
+ cmdp->c_stsflag |= MTROFF;
+#ifdef MACH_KERNEL
+ timeout(mtr_off,uip,MTRSTOP);
+#else MACH_KERNEL
+ cmdp->c_mtrid = timeout(mtr_off,uip,MTRSTOP);
+#endif MACH_KERNEL
+}
+/*****************************************************************************
+ *
+ * motor start routine
+ *
+ *****************************************************************************/
+mtr_start(uip)
+struct unit_info *uip;
+{
+ int status;
+ int (mtr_off)();
+ struct fdcmd *cmdp = uip->b_cmd;
+ int slave = uip->dev->slave;
+ if(cmdp->c_stsflag & MTROFF){
+ untimeout(mtr_off, uip);
+ cmdp->c_stsflag &= ~MTROFF;
+ }
+ status = cmdp->c_rbmtr&(1<<slave);
+ cmdp->c_rbmtr |= (1<<slave);
+ outb(CTRLREG(uip->addr), ((cmdp->c_rbmtr&MTRMASK)<<MTR_ON)|
+ FDC_RST|slave|DMAREQ);
+ return(status);
+}
+/*****************************************************************************
+ *
+ * motor off routine
+ *
+ *****************************************************************************/
+mtr_off(uip)
+struct unit_info *uip;
+{
+ struct fdcmd *cmdp = uip->b_cmd;
+
+ cmdp->c_stsflag &= ~MTROFF;
+ if(!(cmdp->c_stsflag&MTRFLAG)){
+ cmdp->c_rbmtr &= MTRRST;
+ outb(CTRLREG(uip->addr), FDC_RST | DMAREQ);
+ }
+}
+
+#endif
diff --git a/i386/i386at/fdreg.h b/i386/i386at/fdreg.h
new file mode 100644
index 00000000..98d8d007
--- /dev/null
+++ b/i386/i386at/fdreg.h
@@ -0,0 +1,368 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/* Copyright (c) 1987, 1988 TOSHIBA Corp. */
+/* All Rights Reserved */
+
+#ident "@(#)m765.h 1.13 - 88/02/17"
+
+/*******************************************************************
+ *
+ * Toshiba Floppy Driver for UNIX System V R3
+ *
+ * June 21, 1988
+ *
+ * Intended Drive Units:
+ * Worldwide - Model No. ND-356 3.5" unformatted 2MB/1MB
+ * UNIX Media Type Name: 2HD512/2DD512/2D512/1D512.
+ *
+ * In Japan Only - Model No. ND-355 3.5" unformatted 1.6MB/1MB
+ * UNIX Media Type Name: 2HC1024/2HC512/2HC256/2DD512/2D512/1D512.
+ *
+ * Worldwide - Model No. ND-04DT-A 5.25" unformatted 500 KB
+ * UNIX Media Type Name: 2D512/1D512.
+ *
+ * In Japan Only - Model No. ND-08DE 5.25" unformatted 1.6MB/1MB
+ * UNIX Media Type Name: 2HC1024/2HC512/2HC256/2DD512/2D512/1D512.
+ *
+ * Use with other devices may require modification.
+ *
+ * Notes:
+ * For further detail regarding drive units contact
+ * Toshiba America,Inc. Disk Products Division,
+ * Irvine, CA (714) 583-3000.
+ *
+ *******************************************************************/
+
+/*
+ * fdcmd.c_rbmtr
+ *
+ * |--+--+--+--+--+--+--+--|
+ * | | | | | | | | |
+ * |--+--+--+--+--+--+--+--|
+ * ^ ^ ^ ^
+ * | | | |--- unit0 motor on flag
+ * | | |------ unit1 motor on flag
+ * | |--------------- unit0 recalibrate flag
+ * |------------------ unit1 recalibrate flag
+ */
+#define MTRMASK 0x003 /* mask motor_flag for get status */
+#define MTRRST 0x0fc /* reset motor_flag data */
+#define RBSHIFT 0x004 /* shift count for recalibrate data */
+#define RBRST 0x0cf /* reset recalibrate data */
+
+/*
+ * fdcmd.c_intr
+ *
+ * |--+--+--+--+--+--+--+--|
+ * | | | | | | | | |
+ * |--+--+--+--+--+--+--+--|
+ * ^ ^ ^ ^ ^ ^ ^ ^
+ * reserved --+ | | | | | | +--- read/write flag
+ * reserved -----+ | | | | +------ seek flag
+ * reserved --------+ | | +------ seek flag for retry
+ * recalibrate/seek flag(for open) ----------+ +--------- recalibrate flag
+ */
+#define RWFLAG 0x001
+#define SKFLAG 0x002
+#define SKEFLAG 0x004
+#define RBFLAG 0x008
+#define WUPFLAG 0x010
+#define CMDRST 0x000
+
+/*
+ * fddrtab.dr_type
+ *
+ * +---+---+---+---+---+---+---+---+
+ * | | | | | | | | |
+ * +---+---+---+---+---+---+---+---+
+ * ^ ^ ^ ^ ^
+ * | | | | |----------- rapid seek flag
+ * |---| | | 0: normal seek
+ * | | | 1: rapid seek
+ * | | |--------------- detect format
+ * | | 0: no detect
+ * | | 1: format type OK
+ * | |------------------- 40 or 80 cylinder(for 2hc/2dd drive)
+ * | 0: 80 cylinder
+ * | 1: 40 cylinder
+ * |------------------------- transfer rate(for read/write/format)
+ * 00: 500kbps 10: 250kbps
+ * 01: 300kbps 11: reserved
+ */
+#define RPSEEK 0x00 /* rapid seek */
+#define RAPID 0x08 /* rapid seek flag */
+#define OKTYPE 0x10 /* media change flag */
+#define DOUBLE 0x20 /* double/single step change */
+#define NMSEEK 0x80 /* normal seek */
+#define RATEMASK 0xc0 /* transfer parameter mask data */
+
+/*
+ * device number
+ *
+ * 15 10 9 8 7 0
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * | 0 0 0 0 0 0 0 1| | 0| 0| 0| 0| |
+ * +-----------+-----+-----+--+--+--+--+-----------+
+ * ^ ^ ^ ^ ^ ^
+ * |____________________| |__| |__|
+ * | | |
+ * | | |- media type
+ * major number | 0: 3.50" 720 KB
+ * |- unit number 1: 3.50" 1.44 Meg
+ * 2: 5.25" 360 KB
+ * 3: 5.25" 1.20 Meg
+ */
+#define UNIT(dev) ((dev & 0xc0)>>6) /* get unit number */
+#define MEDIATYPE(dev) (dev & 0x03) /* get media type */
+/*****************************************************************************
+
+ wait time / timeout count
+
+ *****************************************************************************/
+#define STSCHKCNT 0x2800 /* For check status */
+#define ITOUT HZ*5 /* interrupt timeout count */
+#define TOUT HZ/4 /* media type check timeout count */
+#define MTRSTOP HZ*2 /* motor off time */
+#define SEEKWAIT HZ/100*3 /* head_lock time */
+
+/******************************************************************************
+
+ define for FDC
+
+ ******************************************************************************/
+/* FDC register */
+#define CTRLREG(ADDR) (ADDR) /* controle register */
+#define STSREG(ADDR) ((ADDR)+2) /* status register */
+#define DATAREG(ADDR) ((ADDR)+3) /* data register */
+#define VFOREG(ADDR) ((ADDR)+5) /* vfo register */
+
+/* CTRLREG flags */
+#define FDC_RST 0x04
+#define MTR_ON 0x04
+#define DMAREQ 0x08
+#define RDY 0x40
+#define BSY 0x80
+
+/* status for command_out */
+#define FD_OSTS 0x00 /* For output check */
+#define FD_ISTS 0x40 /* For input check */
+#define DTOCPU 0x40
+#define DATAOK 0x80
+
+/* Command for FDC */
+#define SPCCMD 0x03 /* Specify command */
+#define RBCMD 0x07 /* Recalibrate command */
+#define SISCMD 0x08 /* Sense interrupt status command */
+#define SEEKCMD 0x0f /* seek command */
+#define RDM 0xe6 /* FDC READ command */
+#define RDMV 0x42e6 /* VERIFY READ command */
+#define WTM 0xc5 /* FDC WRITE command */
+#define FMTM 0x4d /* FDC FORMAT command */
+#define FMTDATA 0x5e /* format data */
+
+/* check value */
+#define OPENBIT 0x80 /* VFO check define */
+#define BYTEMASK 0xff
+
+/* FDC error code define */
+#define ERROR 0xff
+#define EBBHARD 128
+#define EBBSOFT 129
+#define ST0AT 0x40
+#define ST0IC 0x80
+#define ST0OK 0xc0
+#define ADDRERR 0x01
+#define WTPRT 0x02
+#define NOREC 0x03
+#define OVERRUN 0x04
+#define CRCERR 0x05
+#define FDCERR 0x06
+#define TIMEOUT 0x08
+#define DOORERR 0x09
+
+/******************************************************************************
+
+ define for DMA
+
+ *****************************************************************************/
+/* DMA register */
+#define DMACMD1 0x08 /* DMA #1 command register */
+#define DMAMSK1 0x0f /* DMA #1 all mask register */
+#define DMABPFF 0x0c
+#define DMAMODE 0x0b
+#define DMAADDR 0x04
+#define DMAPAGE 0x81
+#define DMACNT 0x05
+#define DMAMSK 0x0a
+
+/* dma set data */
+#define DMARD 0x46 /* DMA read mode */
+#define DMAWT 0x4a /* DMA write mode */
+#define DMAVRF 0x42 /* DMA verify mode */
+
+#define DMADATA0 0x00 /* DMA #2 all mask data */
+#define DMADATA1 0x0b /* DMA #1 all mask data */
+#define CHANNEL2 0x02
+
+#define SRTHUT 0xdf
+#define HLTND 0x02
+#define DTL 0xff
+
+/******************************************************************************
+
+ etc. define
+
+ *****************************************************************************/
+#define SPL spl5 /* Same as in i386at/autoconf.c */
+#define MAXUNIT 4 /* Max unit number */
+#define BLKSIZE 512 /* block size */
+
+/* fdcmd.c_stsflag */
+#define MTRFLAG 0x01
+#define MTROFF 0x02
+#define INTROUT 0x04
+
+/* fdcmd.c_devflag (media check flag . etc.) */
+#define FDMCHK 0x01
+#define FDWAIT 0x02
+#define STRCHK 0x04
+#define STRWAIT 0x08
+
+/* fdcmd.c_dcount */
+#define FDCCNT 9 /* Command table for read/write/format (FDC) */
+#define RWCNT 9 /* Read/Write command count */
+#define FMTCNT 6 /* format command count */
+
+struct fdcmd {
+ int c_rbmtr; /* moter & rcalibrate flag */
+ int c_intr; /* intr flag */
+ int c_stsflag; /* moter flag */
+ int c_mtrid; /* motor off queue id */
+ int c_timeid; /* interrupt timeout id */
+ int c_devflag; /* device status */
+ int c_dcount; /* Read/Write/Format data count */
+ int c_rwdata[FDCCNT]; /* Read/Write/Format cmd (FDC) */
+ int c_saddr; /* cmd seek address */
+};
+
+/* fdmbuf.b_rberr/fdmbuf.b_seekerr/fdmbuf.b_rwerr */
+#define MEDIARD 0x01
+#define MEDIASEEK 0x01
+#define SRETRY 0x03
+#define MRETRY 0x30
+#define LRETRY 0x300
+#define SRMASK 0x0f
+#define MRMASK 0xf0
+#define RMRMASK 0xff0
+#define LRMASK 0xf00
+#define MINC 0x10
+#define LINC 0x100
+
+struct ctrl_info {
+ struct unit_info *b_unitf; /* first buffer for this dev */
+ struct unit_info *b_uip; /* like b_unit */
+ struct unit_info *b_wup; /* unit to wake up when WUPFLAG */
+ short b_rberr; /* rb error count (for recovery) */
+ short b_seekerr; /* seek error count (for recovery) */
+ short b_rwerr; /* r/w error count (for recovery) */
+ short b_status; /* error status */
+ struct buf *b_buf; /* set bp address */
+ caddr_t b_xferaddr; /* trasfer address */
+ unsigned int b_xfercount; /* total transfer count */
+ unsigned int b_xferdma; /* dma transfer count */
+ int usebuf; /* use private dma page */
+ caddr_t b_vbuf; /* virtual address for dma page */
+ vm_offset_t b_pbuf; /* physical dma page (model_dep.c) */
+ daddr_t b_sector; /* read/write sector */
+ struct fdcmd b_cmd; /* set command table address */
+};
+
+#define FMTID 4
+struct fmttbl {
+ unsigned char cyl;
+ unsigned char head;
+ unsigned char sector;
+ unsigned char s_type;
+};
+
+struct fddrtab {
+ u_short dr_ncyl; /* cylinder count */
+ u_short dr_spc; /* actual sectors/cylinder */
+ daddr_t p_nsec; /* disk length (sector count) */
+ char dr_nsec; /* sector per track */
+ char dr_type; /* media type */
+ char dr_rwgpl; /* Read / Write Gap length */
+ char dr_fgpl; /* Format Gap length */
+};
+
+struct unit_info {
+ struct unit_info *b_unitf; /* next slave */
+ struct buf *av_forw; /* head of I/O queue (b_forw) */
+ int b_seekaddr; /* cylinder address */
+ u_short addr;
+ struct fddrtab d_drtab; /* floppy disk parameter */
+ struct bus_device *dev;
+ struct fdcmd *b_cmd; /* set command table address */
+ char wakeme; /* set if someone wants to be woken */
+};
+
+#define HZ 100 /* 100 ticks/second of the clock */
+#define NBPSCTR 512 /* Bytes per LOGICAL disk sector */
+ /* These should be added to
+ "sys/param.h". */
+#define PAGESIZ 4096
+#ifdef MACH_KERNEL
+#define PZERO 25
+#define PRIBIO 20
+
+#define B_VERIFY IO_SPARE_START
+#define B_FORMAT (IO_SPARE_START << 1)
+
+#define b_pfcent io_mode
+
+#endif MACH_KERNEL
diff --git a/i386/i386at/gpl/if_hpp.c b/i386/i386at/gpl/if_hpp.c
new file mode 100644
index 00000000..c1770301
--- /dev/null
+++ b/i386/i386at/gpl/if_hpp.c
@@ -0,0 +1,690 @@
+/*
+ Written 1994 by Donald Becker.
+
+ This driver is for the Hewlett Packard PC LAN (27***) plus ethercards.
+ These cards are sold under several model numbers, usually 2724*.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ As is often the case, a great deal of credit is owed to Russ Nelson.
+ The Crynwr packet driver was my primary source of HP-specific
+ programming information.
+*/
+
+/*
+ * Ported to mach by Stephen Clawson, sclawson@cs.utah.edu
+ * University of Utah CSL.
+ *
+ * Derived from the Linux driver by Donald Becker.
+ *
+ * Also uses code Shantanu Goel adapted from Donald Becker
+ * for ns8930 support.
+ *
+ */
+
+#include <hpp.h>
+#if NHPP > 0
+
+#include <sys/types.h>
+#include "vm_param.h"
+#include <kern/time_out.h>
+#include <device/device_types.h>
+#include <device/errno.h>
+#include <device/io_req.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+#include <device/net_status.h>
+#include <device/net_io.h>
+#include <chips/busses.h>
+#include <i386/ipl.h>
+#include <i386/pio.h>
+#include <i386at/gpl/if_nsreg.h>
+
+
+/*
+ * XXX - This is some gross glue garbage. The io instructions really
+ * should be integrated into pio.h...
+ */
+#define IO_DELAY __asm__ __volatile__("outb %al,$0x80")
+#define outb_p(p, v) { outb(p, v); IO_DELAY; }
+#define inb_p(p) ({ unsigned char _v; _v = inb(p); IO_DELAY; _v; })
+
+
+static __inline void
+insw(u_short port, void *addr, int cnt)
+{
+ __asm __volatile("cld\n\trepne\n\tinsw" :
+ : "d" (port), "D" (addr), "c" (cnt) : "%edi", "%ecx");
+}
+
+static __inline void
+outsw(u_short port, void *addr, int cnt)
+{
+ __asm __volatile("cld\n\trepne\n\toutsw" :
+ : "d" (port), "S" (addr), "c" (cnt) : "%esi", "%ecx");
+}
+
+
+/*
+ The HP EtherTwist chip implementation is a fairly routine DP8390
+ implementation. It allows both shared memory and programmed-I/O buffer
+ access, using a custom interface for both. The programmed-I/O mode is
+ entirely implemented in the HP EtherTwist chip, bypassing the problem
+ ridden built-in 8390 facilities used on NE2000 designs. The shared
+ memory mode is likewise special, with an offset register used to make
+ packets appear at the shared memory base. Both modes use a base and bounds
+ page register to hide the Rx ring buffer wrap -- a packet that spans the
+ end of physical buffer memory appears continuous to the driver. (c.f. the
+ 3c503 and Cabletron E2100)
+
+ A special note: the internal buffer of the board is only 8 bits wide.
+ This lays several nasty traps for the unaware:
+ - the 8390 must be programmed for byte-wide operations
+ - all I/O and memory operations must work on whole words (the access
+ latches are serially preloaded and have no byte-swapping ability).
+
+ This board is laid out in I/O space much like the earlier HP boards:
+ the first 16 locations are for the board registers, and the second 16 are
+ for the 8390. The board is easy to identify, with both a dedicated 16 bit
+ ID register and a constant 0x530* value in the upper bits of the paging
+ register.
+*/
+
+#define HP_ID 0x00 /* ID register, always 0x4850. */
+#define HP_PAGING 0x02 /* Registers visible @ 8-f, see PageName. */
+#define HPP_OPTION 0x04 /* Bitmapped options, see HP_Option.*/
+#define HPP_OUT_ADDR 0x08 /* I/O output location in Perf_Page.*/
+#define HPP_IN_ADDR 0x0A /* I/O input location in Perf_Page.*/
+#define HP_DATAPORT 0x0c /* I/O data transfer in Perf_Page.*/
+#define HPP_NIC_OFFSET 0x10 /* Offset to the 8390 registers.*/
+#define HP_IO_EXTENT 32
+
+#define HP_START_PG 0x00 /* First page of TX buffer */
+#define HP_STOP_PG 0x80 /* Last page +1 of RX ring */
+/*#define HP_STOP_PG 0x1f
+
+/* The register set selected in HP_PAGING. */
+enum PageName {
+ Perf_Page = 0, /* Normal operation. */
+ MAC_Page = 1, /* The ethernet address (+checksum). */
+ HW_Page = 2, /* EEPROM-loaded hw parameters. */
+ LAN_Page = 4, /* Transciever type, testing, etc. */
+ ID_Page = 6 };
+
+/* The bit definitions for the HPP_OPTION register. */
+enum HP_Option {
+ NICReset = 1, /* Active low, really UNreset. */
+ ChipReset = 2,
+ EnableIRQ = 4,
+ FakeIntr = 8,
+ BootROMEnb = 0x10,
+ IOEnb = 0x20,
+ MemEnable = 0x40,
+ ZeroWait = 0x80,
+ MemDisable = 0x1000, };
+
+
+void hpp_reset_8390(struct nssoftc *ns);
+
+void hpp_mem_block_input(struct nssoftc *ns, int, char *, int);
+int hpp_mem_block_output(struct nssoftc *ns, int, char *, int);
+void hpp_io_block_input(struct nssoftc *ns, int, char *, int);
+int hpp_io_block_output(struct nssoftc *ns, int,char *, int);
+
+
+/*
+ * Watchdog timer.
+ */
+int hppwstart = 0;
+void hppwatch(void);
+
+
+/*
+ * Autoconfig structures.
+ */
+int hpp_std[] = { 0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0 };
+struct bus_device *hpp_info[NHPP];
+int hpp_probe();
+void hpp_attach();
+struct bus_driver hppdriver = {
+ hpp_probe, 0, hpp_attach, 0, hpp_std, "hpp", hpp_info, 0, 0, 0
+};
+
+
+/*
+ * ns8390 state.
+ */
+struct nssoftc hppnssoftc[NHPP];
+
+
+/*
+ * hpp state.
+ */
+struct hppsoftc {
+ unsigned long rmem_start; /* shmem "recv" start */
+ unsigned long rmem_end; /* shmem "recv" end */
+ unsigned long mem_start; /* shared mem start */
+ unsigned long mem_end; /* shared mem end */
+} hppsoftc[NHPP];
+
+
+/*
+ * Probe a list of addresses for the card.
+ *
+ */
+int hpp_probe(port, dev)
+ int port;
+ struct bus_device *dev;
+{
+ int unit = dev->unit;
+ char *str = "hp-plus ethernet board %d out of range.\n";
+ caddr_t base = (caddr_t) (dev ? dev->address : 0);
+ int i;
+
+ if ((unit < 0) || (unit >= NHPP)) {
+ printf(str, unit);
+ return(0);
+ }
+
+ /* Check a single specified location. */
+ if (base > (caddr_t) 0x1ff)
+ return hpp_probe1(dev, base);
+ else if (base != 0) /* Don't probe at all. */
+ return 0;
+
+ for (i = 0; hpp_std[i]; i++) {
+ int ioaddr = hpp_std[i];
+
+ if ( ioaddr > 0 && hpp_probe1(dev, ioaddr) ) {
+ dev->address = ioaddr;
+ hpp_std[i] = -1; /* Mark address used */
+ return(1);
+ }
+ }
+
+ return 0;
+}
+
+
+
+/*
+ * Do the interesting part of the probe at a single address.
+ *
+ */
+int hpp_probe1(dev, ioaddr)
+ struct bus_device *dev;
+ int ioaddr;
+{
+ int i;
+ u_char checksum = 0;
+ int mem_start;
+
+ struct hppsoftc *hpp = &hppsoftc[dev->unit];
+ struct nssoftc *ns = &hppnssoftc[dev->unit];
+ struct ifnet *ifp = &ns->sc_if;
+
+ /* Check for the HP+ signature, 50 48 0x 53. */
+ if (inw(ioaddr + HP_ID) != 0x4850
+ || (inw(ioaddr + HP_PAGING) & 0xfff0) != 0x5300)
+ return 0;
+
+
+ printf("%s%d: HP PClan plus at %#3x,", dev->name, dev->unit, ioaddr);
+ /* Retrieve and checksum the station address. */
+ outw(ioaddr + HP_PAGING, MAC_Page);
+
+ printf("MAC_Page = %d, ioaddr = %x\n", MAC_Page, ioaddr);
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ u_char inval = inb(ioaddr + 8 + i);
+ ns->sc_addr[i] = inval;
+ checksum += inval;
+ printf(" %2.2x", inval);
+ }
+ checksum += inb(ioaddr + 14);
+
+ if (checksum != 0xff) {
+ printf(" bad checksum %2.2x.\n", checksum);
+ return 0;
+ } else {
+ /* Point at the Software Configuration Flags. */
+ outw(ioaddr + HP_PAGING, ID_Page);
+ printf(" ID %4.4x", inw(ioaddr + 12));
+ }
+
+
+ /* Read the IRQ line. */
+ outw(ioaddr + HP_PAGING, HW_Page);
+ {
+ int irq = inb(ioaddr + 13) & 0x0f;
+ int option = inw(ioaddr + HPP_OPTION);
+
+ dev->sysdep1 = irq;
+ take_dev_irq(dev);
+
+ if (option & MemEnable) {
+ mem_start = inw(ioaddr + 9) << 8;
+ printf(", IRQ %d, memory address %#x.\n", irq, mem_start);
+ } else {
+ mem_start = 0;
+ printf(", IRQ %d, programmed-I/O mode.\n", irq);
+ }
+ }
+
+ /* Set the wrap registers for string I/O reads. */
+ outw( ioaddr + 14, (HP_START_PG + TX_2X_PAGES) | ((HP_STOP_PG - 1) << 8));
+
+ /* Set the base address to point to the NIC, not the "real" base! */
+ ns->sc_port = ioaddr + HPP_NIC_OFFSET;
+
+ ns->sc_name = dev->name;
+ ns->sc_unit = dev->unit;
+ ns->sc_pingpong = 0; /* turn off pingpong mode */
+ ns->sc_word16 = 0; /* Agggghhhhh! Debug time: 2 days! */
+ ns->sc_txstrtpg = HP_START_PG;
+ ns->sc_rxstrtpg = HP_START_PG + TX_2X_PAGES;
+ ns->sc_stoppg = HP_STOP_PG;
+
+
+ ns->sc_reset = hpp_reset_8390;
+ ns->sc_input = hpp_io_block_input;
+ ns->sc_output = hpp_io_block_output;
+
+ /* Check if the memory_enable flag is set in the option register. */
+ if (mem_start) {
+ ns->sc_input = hpp_mem_block_input;
+ ns->sc_output = hpp_mem_block_output;
+ hpp->mem_start = mem_start;
+ hpp->rmem_start = hpp->mem_start + TX_2X_PAGES * 256;
+ hpp->mem_end = hpp->rmem_end
+ = hpp->mem_start + (HP_STOP_PG - HP_START_PG) * 256;
+ }
+
+ outw(ioaddr + HP_PAGING, Perf_Page);
+
+ /* Leave the 8390 and HP chip reset. */
+ outw( ioaddr + HPP_OPTION, inw(ioaddr + HPP_OPTION) & ~EnableIRQ );
+
+ /*
+ * Initialize interface header.
+ */
+ ifp->if_unit = dev->unit;
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_flags = IFF_BROADCAST;
+ ifp->if_header_size = sizeof(struct ether_header);
+ ifp->if_header_format = HDR_ETHERNET;
+ ifp->if_address_size = ETHER_ADDR_LEN;
+ ifp->if_address = ns->sc_addr;
+ if_init_queues(ifp);
+
+ return (1);
+}
+
+/*
+ * XXX
+ *
+ * this routine really should do the invasive part of the setup.
+ */
+void
+hpp_attach(dev)
+ struct bus_device *dev;
+{
+ /* NULL */
+}
+
+
+
+int
+hppopen(dev, flag)
+ dev_t dev;
+ int flag;
+{
+ int s, unit = minor(dev);
+ struct bus_device *bd;
+ struct hppsoftc *hpp;
+ struct nssoftc *ns = &hppnssoftc[unit];
+
+ int ioaddr = ns->sc_port - HPP_NIC_OFFSET;
+ int option_reg;
+
+ if (unit < 0 || unit >= NHPP ||
+ (bd = hpp_info[unit]) == 0 || !(bd->alive))
+ return ENXIO;
+
+ /*
+ * Start watchdog.
+ */
+ if (!hppwstart) {
+ hppwstart++;
+ timeout(hppwatch, 0, hz);
+ }
+ hpp = &hppsoftc[unit];
+ ns->sc_if.if_flags |= IFF_UP;
+
+ s = splimp();
+
+ /* Reset the 8390 and HP chip. */
+ option_reg = inw(ioaddr + HPP_OPTION);
+ outw( ioaddr + HPP_OPTION, option_reg & ~(NICReset + ChipReset) );
+ IO_DELAY; IO_DELAY;
+
+ /* Unreset the board and enable interrupts. */
+ outw( ioaddr + HPP_OPTION, option_reg | (EnableIRQ + NICReset + ChipReset));
+
+ /* Set the wrap registers for programmed-I/O operation. */
+ outw( ioaddr + HP_PAGING, HW_Page );
+ outw( ioaddr + 14, (HP_START_PG + TX_2X_PAGES) | ((HP_STOP_PG - 1) << 8) );
+
+ /* Select the operational page. */
+ outw( ioaddr + HP_PAGING, Perf_Page );
+ nsinit(ns);
+
+ splx(s);
+
+ return (0);
+}
+
+/*
+ * needs to be called at splimp()?
+ *
+ */
+void
+hpp_reset_8390(ns)
+ struct nssoftc *ns;
+{
+ int ioaddr = ns->sc_port - HPP_NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ outw( ioaddr + HPP_OPTION, option_reg & ~(NICReset + ChipReset) );
+ /* Pause a few cycles for the hardware reset to take place. */
+ IO_DELAY;
+ IO_DELAY;
+ ns->sc_txing = 0;
+ outw( ioaddr + HPP_OPTION, option_reg | (EnableIRQ + NICReset + ChipReset) );
+
+ /*
+ * XXX - I'm not sure there needs to be this many IO_DELAY's...
+ */
+ IO_DELAY; IO_DELAY;
+ IO_DELAY; IO_DELAY;
+
+ if ((inb_p(ioaddr + HPP_NIC_OFFSET + EN0_ISR) & ENISR_RESET) == 0)
+ printf("%s: hp_reset_8390() did not complete.\n", ns->sc_name);
+
+ return;
+}
+
+
+/*
+ * Block input and output, similar to the Crynwr packet driver.
+ * Note that transfer with the EtherTwist+ must be on word boundaries.
+ */
+void
+hpp_io_block_input(ns, count, buf, ring_offset)
+ struct nssoftc *ns;
+ int count;
+ char *buf;
+ int ring_offset;
+{
+ int ioaddr = ns->sc_port - HPP_NIC_OFFSET;
+
+ outw(ioaddr + HPP_IN_ADDR, ring_offset);
+
+ insw(ioaddr + HP_DATAPORT, buf, count >> 1 );
+
+ if (count & 0x01)
+ buf[count-1] = (char) inw(ioaddr + HP_DATAPORT);
+
+}
+
+void
+hpp_mem_block_input(ns, count, buf, ring_offset)
+ struct nssoftc *ns;
+ int count;
+ char *buf;
+ int ring_offset;
+{
+ int ioaddr = ns->sc_port - HPP_NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+ char *mem_start = (char *)phystokv(hppsoftc[ns->sc_unit].mem_start);
+
+ outw(ioaddr + HPP_IN_ADDR, ring_offset);
+ outw(ioaddr + HPP_OPTION, option_reg & ~(MemDisable + BootROMEnb));
+
+ /* copy as much as we can straight through */
+ bcopy16(mem_start, buf, count & ~1);
+
+ /* Now we copy that last byte. */
+ if (count & 0x01) {
+ u_short savebyte[2];
+
+ bcopy16(mem_start + (count & ~1), savebyte, 2);
+ buf[count-1] = savebyte[0];
+ }
+
+ outw(ioaddr + HPP_OPTION, option_reg);
+}
+
+
+/*
+ * output data into NIC buffers.
+ *
+ * NOTE: All transfers must be on word boundaries.
+ */
+int
+hpp_io_block_output(ns, count, buf, start_page)
+ struct nssoftc *ns;
+ int count;
+ char *buf;
+ int start_page;
+{
+ int ioaddr = ns->sc_port - HPP_NIC_OFFSET;
+
+ outw(ioaddr + HPP_OUT_ADDR, start_page << 8) ;
+
+ if (count > 1) {
+ outsw(ioaddr + HP_DATAPORT, buf, count >> 1);
+ }
+
+ if ( (count & 1) == 1 ) {
+ u_char savebyte[2];
+
+ savebyte[1] = 0;
+ savebyte[0] = buf[count - 1];
+ outw(ioaddr + HP_DATAPORT, *(u_short *)savebyte);
+ }
+
+ if (count < (ETHERMIN + sizeof( struct ether_header )))
+ count = ETHERMIN + sizeof( struct ether_header );
+
+
+ return (count) ;
+}
+
+
+/* XXX
+ *
+ * I take great pains to not try and bcopy past the end of the buffer,
+ * does this matter? Are the io request buffers the exact byte size?
+ */
+int
+hpp_mem_block_output(ns, count, buf, start_page )
+ struct nssoftc *ns;
+ int count;
+ char *buf;
+ int start_page;
+{
+ int ioaddr = ns->sc_port - HPP_NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+ struct hppsoftc *hpp = &hppsoftc[ns->sc_unit];
+ char *shmem;
+
+ outw(ioaddr + HPP_OUT_ADDR, start_page << 8);
+ outw(ioaddr + HPP_OPTION, option_reg & ~(MemDisable + BootROMEnb));
+
+ shmem = (char *)phystokv(hpp->mem_start);
+ bcopy16(buf, shmem, count & ~1);
+
+ if ( (count & 1) == 1 ) {
+ u_char savebyte[2];
+
+ savebyte[1] = 0;
+ savebyte[0] = buf[count - 1];
+ bcopy16(savebyte, shmem + (count & ~1), 2);
+ }
+
+ while (count < ETHERMIN + sizeof(struct ether_header)) {
+ *(shmem + count) = 0;
+ count++;
+ }
+
+ outw(ioaddr + HPP_OPTION, option_reg);
+
+ return count;
+}
+
+
+int
+hppintr(unit)
+ int unit;
+{
+ nsintr(&hppnssoftc[unit]);
+
+ return(0);
+}
+
+void
+hppstart(unit)
+ int unit;
+{
+ nsstart(&hppnssoftc[unit]);
+}
+
+int hppoutput();
+
+int
+hppoutput(dev, ior)
+ dev_t dev;
+ io_req_t ior;
+{
+ int unit = minor(dev);
+ struct bus_device *ui;
+
+ if (unit >= NHPP || (ui = hpp_info[unit]) == 0 || ui->alive == 0)
+ return (ENXIO);
+
+ return (net_write(&hppnssoftc[unit].sc_if, hppstart, ior));
+}
+
+
+int
+hppsetinput(dev, receive_port, priority, filter, filter_count)
+ dev_t dev;
+ mach_port_t receive_port;
+ int priority;
+ filter_t *filter;
+ unsigned filter_count;
+{
+ int unit = minor(dev);
+ struct bus_device *ui;
+
+ if (unit >= NHPP || (ui = hpp_info[unit]) == 0 || ui->alive == 0)
+ return (ENXIO);
+
+ return (net_set_filter(&hppnssoftc[unit].sc_if, receive_port,
+ priority, filter, filter_count));
+}
+
+
+int
+hppgetstat(dev, flavor, status, count)
+ dev_t dev;
+ int flavor;
+ dev_status_t status;
+ unsigned *count;
+{
+ int unit = minor(dev);
+ struct bus_device *ui;
+
+ if (unit >= NHPP || (ui = hpp_info[unit]) == 0 || ui->alive == 0)
+ return (ENXIO);
+
+ return (net_getstat(&hppnssoftc[unit].sc_if, flavor, status, count));
+}
+
+
+int
+hppsetstat(dev, flavor, status, count)
+ dev_t dev;
+ int flavor;
+ dev_status_t status;
+ unsigned count;
+{
+ int unit = minor(dev), oflags, s;
+ struct bus_device *ui;
+ struct ifnet *ifp;
+ struct net_status *ns;
+
+ if (unit >= NHPP || (ui = hpp_info[unit]) == 0 || ui->alive == 0)
+ return (ENXIO);
+
+ ifp = &hppnssoftc[unit].sc_if;
+
+ switch (flavor) {
+
+ case NET_STATUS:
+ if (count < NET_STATUS_COUNT)
+ return (D_INVALID_SIZE);
+ ns = (struct net_status *)status;
+ oflags = ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC);
+ ifp->if_flags &= ~(IFF_ALLMULTI|IFF_PROMISC);
+ ifp->if_flags |= ns->flags & (IFF_ALLMULTI|IFF_PROMISC);
+ if ((ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) != oflags) {
+ s = splimp();
+ nsinit(&hppnssoftc[unit]);
+ splx(s);
+ }
+ break;
+
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+
+/*
+ * Watchdog.
+ * Check for hung transmissions.
+ */
+void
+hppwatch()
+{
+ int unit, s;
+ struct nssoftc *ns;
+
+ timeout(hppwatch, 0, hz);
+
+ s = splimp();
+ for (unit = 0; unit < NHPP; unit++) {
+ if (hpp_info[unit] == 0 || hpp_info[unit]->alive == 0)
+ continue;
+ ns = &hppnssoftc[unit];
+ if (ns->sc_timer && --ns->sc_timer == 0) {
+ printf("hpp%d: transmission timeout\n", unit);
+ (*ns->sc_reset)(ns);
+ nsinit(ns);
+ }
+ }
+ splx(s);
+}
+
+
+#endif /* NHPP > 0 */
+
+
diff --git a/i386/i386at/gpl/if_ns.c b/i386/i386at/gpl/if_ns.c
new file mode 100644
index 00000000..da629cb3
--- /dev/null
+++ b/i386/i386at/gpl/if_ns.c
@@ -0,0 +1,642 @@
+/*
+ * Copyright (c) 1994 Shantanu Goel
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE AUTHOR ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE AUTHOR DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+/*
+ * Written 1992,1993 by Donald Becker.
+ *
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may be used and
+ * distributed according to the terms of the GNU Public License,
+ * incorporated herein by reference.
+ *
+ * The Author may be reached as becker@super.org or
+ * C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
+ */
+
+#include <ul.h>
+#include <wd.h>
+#include <hpp.h>
+#if NUL > 0 || NWD > 0 || NHPP > 0
+/*
+ * Generic NS8390 routines.
+ * Derived from the Linux driver by Donald Becker.
+ *
+ * Shantanu Goel (goel@cs.columbia.edu)
+ */
+#include <sys/types.h>
+#include <device/device_types.h>
+#include <device/errno.h>
+#include <device/io_req.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+#include <device/net_status.h>
+#include <device/net_io.h>
+#include <chips/busses.h>
+#include <i386/machspl.h>
+#include <i386/pio.h>
+#include <i386at/gpl/if_nsreg.h>
+
+#define IO_DELAY __asm__ __volatile__ ("outb %al,$0x80")
+#define outb_p(p, v) { outb(p, v); IO_DELAY; }
+#define inb_p(p) ({ unsigned char _v; _v = inb(p); IO_DELAY; _v; })
+
+#define NSDEBUG
+#ifdef NSDEBUG
+int nsdebug = 0;
+#define DEBUGF(stmt) { if (nsdebug) stmt; }
+#else
+#define DEBUGF(stmt)
+#endif
+
+void nsxint(struct nssoftc *);
+void nsrint(struct nssoftc *);
+void nsxmit(struct nssoftc *, unsigned, int);
+void nsrxoverrun(struct nssoftc *);
+
+/*
+ * Initialize the NIC.
+ * Must be called at splimp().
+ */
+void
+nsinit(sc)
+ struct nssoftc *sc;
+{
+ int port = sc->sc_port, i, rxconfig;
+ int endcfg = sc->sc_word16 ? (0x48 | ENDCFG_WTS) : 0x48;
+ struct ifnet *ifp = &sc->sc_if;
+
+ /*
+ * Reset the board.
+ */
+ (*sc->sc_reset)(sc);
+
+ sc->sc_oactive = 0;
+ sc->sc_txing = 0;
+ sc->sc_timer = 0;
+ sc->sc_tx1 = sc->sc_tx2 = 0;
+ sc->sc_curpg = sc->sc_rxstrtpg;
+
+ /*
+ * Follow National Semiconductor's recommendations for
+ * initializing the DP83902.
+ */
+ outb_p(port, E8390_NODMA+E8390_PAGE0+E8390_STOP); /* 0x21 */
+ outb_p(port + EN0_DCFG, endcfg); /* 0x48 or 0x49 */
+
+ /*
+ * Clear remote byte count registers.
+ */
+ outb_p(port + EN0_RCNTLO, 0);
+ outb_p(port + EN0_RCNTHI, 0);
+
+ /*
+ * Set to monitor and loopback mode -- this is vital!
+ */
+ outb_p(port + EN0_RXCR, E8390_RXOFF); /* 0x20 */
+ outb_p(port + EN0_TXCR, E8390_TXOFF); /* 0x02 */
+
+ /*
+ * Set transmit page and receive ring.
+ */
+ outb_p(port + EN0_TPSR, sc->sc_txstrtpg);
+ outb_p(port + EN0_STARTPG, sc->sc_rxstrtpg);
+ outb_p(port + EN0_BOUNDARY, sc->sc_stoppg - 1);
+ outb_p(port + EN0_STOPPG, sc->sc_stoppg);
+
+ /*
+ * Clear pending interrupts and mask.
+ */
+ outb_p(port + EN0_ISR, 0xff);
+
+ /*
+ * Enable the following interrupts: receive/transmit complete,
+ * receive/transmit error, and Receiver OverWrite.
+ *
+ * Counter overflow and Remote DMA complete are *not* enabled.
+ */
+ outb_p(port + EN0_IMR, ENISR_RX | ENISR_TX | ENISR_RX_ERR |
+ ENISR_TX_ERR | ENISR_OVER );
+
+ /*
+ * Copy station address into 8390 registers.
+ */
+ outb_p(port, E8390_NODMA + E8390_PAGE1 + E8390_STOP); /* 0x61 */
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ outb_p(port + EN1_PHYS + i, sc->sc_addr[i]);
+
+ /*
+ * Set up to accept all multicast packets.
+ */
+ for (i = 0; i < 8; i++)
+ outb_p(port + EN1_MULT + i, 0xff);
+
+ /*
+ * Initialize CURRent pointer
+ */
+ outb_p(port + EN1_CURPAG, sc->sc_rxstrtpg);
+
+ /*
+ * Program command register for page 0.
+ */
+ outb_p(port, E8390_NODMA + E8390_PAGE0 + E8390_STOP);
+
+#if 0
+ outb_p(port + EN0_ISR, 0xff);
+ outb_p(port + EN0_IMR, ENISR_ALL);
+#endif
+
+ outb_p(port + E8390_CMD, E8390_NODMA + E8390_PAGE0 + E8390_START);
+ outb_p(port + EN0_TXCR, E8390_TXCONFIG); /* xmit on */
+
+ /* 3c503 TechMan says rxconfig only after the NIC is started. */
+ rxconfig = E8390_RXCONFIG;
+ if (ifp->if_flags & IFF_ALLMULTI)
+ rxconfig |= 0x08;
+ if (ifp->if_flags & IFF_PROMISC)
+ rxconfig |= 0x10;
+ outb_p(port + EN0_RXCR, rxconfig); /* rx on */
+
+ /*
+ * Mark interface as up and start output.
+ */
+ ifp->if_flags |= IFF_RUNNING;
+ nsstart(sc);
+}
+
+/*
+ * Start output on interface.
+ * Must be called at splimp().
+ */
+void
+nsstart(sc)
+ struct nssoftc *sc;
+{
+ io_req_t ior;
+ struct ifnet *ifp = &sc->sc_if;
+
+ /*
+ * Drop packets if interface is down.
+ */
+ if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) {
+ while (1) {
+ IF_DEQUEUE(&ifp->if_snd, ior);
+ if (ior == 0)
+ return;
+ iodone(ior);
+ }
+ }
+ /*
+ * If transmitter is busy, bail out.
+ */
+ if (sc->sc_oactive)
+ return;
+
+ /*
+ * Dequeue a packet.
+ */
+ IF_DEQUEUE(&ifp->if_snd, ior);
+ if (ior == 0)
+ return;
+
+ /* Mask interrupts from the ethercard. */
+ outb( sc->sc_port + EN0_IMR, 0x00);
+
+ if (sc->sc_pingpong) {
+ int count, output_page;
+
+ if (sc->sc_tx1 == 0) {
+ output_page = sc->sc_txstrtpg;
+ sc->sc_tx1 = count = (*sc->sc_output)(sc,
+ ior->io_count,
+ ior->io_data,
+ sc->sc_txstrtpg);
+ } else if (sc->sc_tx2 == 0) {
+ output_page = sc->sc_txstrtpg + 6;
+ sc->sc_tx2 = count = (*sc->sc_output)(sc,
+ ior->io_count,
+ ior->io_data,
+ output_page);
+ } else {
+ sc->sc_oactive = 1;
+ IF_PREPEND(&ifp->if_snd, ior);
+ return;
+ }
+
+ DEBUGF({
+ struct ether_header *eh;
+
+ eh = (struct ether_header *)ior->io_data;
+ printf("send: %s%d: %x:%x:%x:%x:%x:%x, "
+ "olen %d, len %d\n",
+ sc->sc_name, sc->sc_unit,
+ eh->ether_dhost[0], eh->ether_dhost[1],
+ eh->ether_dhost[2], eh->ether_dhost[3],
+ eh->ether_dhost[4], eh->ether_dhost[5],
+ ior->io_count, count);
+ });
+
+ if (!sc->sc_txing) {
+ nsxmit(sc, count, output_page);
+ if (output_page == sc->sc_txstrtpg)
+ sc->sc_tx1 = -1, sc->sc_lasttx = -1;
+ else
+ sc->sc_tx2 = -1, sc->sc_lasttx = -2;
+ }
+ sc->sc_oactive = (sc->sc_tx1 && sc->sc_tx2);
+ } else {
+ int count;
+
+ count = (*sc->sc_output)(sc, ior->io_count,
+ ior->io_data, sc->sc_txstrtpg);
+
+ DEBUGF({
+ struct ether_header *eh;
+
+ eh = (struct ether_header *)ior->io_data;
+ printf("send: %s%d: %x:%x:%x:%x:%x:%x, "
+ "olen %d, len %d\n",
+ sc->sc_name, sc->sc_unit,
+ eh->ether_dhost[0], eh->ether_dhost[1],
+ eh->ether_dhost[2], eh->ether_dhost[3],
+ eh->ether_dhost[4], eh->ether_dhost[5],
+ ior->io_count, count);
+ });
+
+ nsxmit(sc, count, sc->sc_txstrtpg);
+ sc->sc_oactive = 1;
+ }
+
+ /* reenable 8390 interrupts. */
+ outb_p(sc->sc_port + EN0_IMR, ENISR_ALL);
+
+ iodone(ior);
+}
+
+/*
+ * Interrupt routine.
+ * Called by board level driver.
+ */
+void
+nsintr(sc)
+ struct nssoftc *sc;
+{
+ int port = sc->sc_port;
+ int interrupts, boguscount = 0;
+ struct ifnet *ifp = &sc->sc_if;
+
+ if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) {
+ DEBUGF(printf("nsintr: %s%d: interface down\n",
+ sc->sc_name, sc->sc_unit));
+ return;
+ }
+
+ /*
+ * Change to page 0 and read intr status reg.
+ */
+ outb_p(port + E8390_CMD, E8390_NODMA+E8390_PAGE0);
+
+ while ((interrupts = inb_p(port + EN0_ISR)) != 0 && ++boguscount < 9) {
+ if (interrupts & ENISR_RDC) {
+ /*
+ * Ack meaningless DMA complete.
+ */
+ outb_p(port + EN0_ISR, ENISR_RDC);
+ }
+
+ if (interrupts & ENISR_OVER)
+ nsrxoverrun(sc);
+ else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
+ nsrint(sc);
+ }
+
+ if (interrupts & ENISR_TX) {
+ nsxint(sc);
+ }
+ else if (interrupts & ENISR_COUNTERS) {
+ /*
+ * XXX - We really should be storing statistics
+ * about the interface. For now we just drop them.
+ */
+
+ /* reading resets the counters! */
+ (void) inb_p(port + EN0_COUNTER0); /* frame */
+ (void) inb_p(port + EN0_COUNTER1); /* crc */
+ (void) inb_p(port + EN0_COUNTER2); /* miss */
+
+ DEBUGF(printf("%s%d: acked counter interrupt.\n",
+ sc->sc_name, sc->sc_unit));
+
+ outb_p(port + EN0_ISR, ENISR_COUNTERS); /* ack intr */
+ }
+
+ if (interrupts & ENISR_TX_ERR) {
+ DEBUGF(printf("acking transmit error\n"));
+ outb_p(port + EN0_ISR, ENISR_TX_ERR); /* ack intr */
+ }
+
+ outb_p(port + E8390_CMD, E8390_NODMA+E8390_PAGE0+E8390_START);
+ }
+
+ DEBUGF({
+ if (interrupts) {
+ printf("%s%d: unknown interrupt 0x%x",
+ sc->sc_name, sc->sc_unit, interrupts);
+ outb_p(port + E8390_CMD,
+ E8390_NODMA+E8390_PAGE0+E8390_START);
+ outb_p(port + EN0_ISR, 0xff); /* ack all intrs */
+ }
+ })
+}
+
+/*
+ * Process a transmit interrupt.
+ */
+void
+nsxint(sc)
+ struct nssoftc *sc;
+{
+ int port = sc->sc_port, status;
+ struct ifnet *ifp = &sc->sc_if;
+
+ status = inb(port + EN0_TSR);
+ outb_p(port + EN0_ISR, ENISR_TX); /* ack intr */
+
+ sc->sc_txing = 0;
+ sc->sc_timer = 0;
+ sc->sc_oactive = 0;
+
+ if (sc->sc_pingpong) {
+ if (sc->sc_tx1 < 0) {
+ if (sc->sc_lasttx != 1 && sc->sc_lasttx != -1)
+ printf("%s%d: bogus last_tx_buffer %d,"
+ "tx1 = %d\n",
+ sc->sc_name, sc->sc_unit,
+ sc->sc_lasttx, sc->sc_tx1);
+ sc->sc_tx1 = 0;
+ if (sc->sc_tx2 > 0) {
+ nsxmit(sc, sc->sc_tx2, sc->sc_txstrtpg + 6);
+ sc->sc_tx2 = -1;
+ sc->sc_lasttx = 2;
+ } else
+ sc->sc_lasttx = 20;
+ } else if (sc->sc_tx2 < 0) {
+ if (sc->sc_lasttx != 2 && sc->sc_lasttx != -2)
+ printf("%s%d: bogus last_tx_buffer %d,"
+ "tx2 = %d\n",
+ sc->sc_name, sc->sc_unit,
+ sc->sc_lasttx, sc->sc_tx2);
+ sc->sc_tx2 = 0;
+ if (sc->sc_tx1 > 0) {
+ nsxmit(sc, sc->sc_tx1, sc->sc_txstrtpg);
+ sc->sc_tx1 = -1;
+ sc->sc_lasttx = 1;
+ } else
+ sc->sc_lasttx = 10;
+ } else
+ printf("%s%d: unexpected TX-done interrupt, "
+ "lasttx = %d\n",
+ sc->sc_name, sc->sc_unit, sc->sc_lasttx);
+ }
+ /*
+ * Update stats.
+ */
+ if (status & ENTSR_COL) {
+ if (status & ENTSR_ABT)
+ ifp->if_collisions += 16;
+ else
+ ifp->if_collisions += inb(port + EN0_NCR);
+ }
+ if (status & ENTSR_PTX) {
+ DEBUGF(printf("sent: %s%d\n", sc->sc_name, sc->sc_unit));
+ ifp->if_opackets++;
+ } else
+ ifp->if_oerrors++;
+
+ /*
+ * Start output on interface.
+ */
+ nsstart(sc);
+}
+
+/*
+ * Process a receive interrupt.
+ */
+void
+nsrint(sc)
+ struct nssoftc *sc;
+{
+ int port = sc->sc_port;
+ int rxing_page, this_frame, next_frame, current_offset;
+ int rx_pkt_count = 0;
+ int num_rx_pages = sc->sc_stoppg - sc->sc_rxstrtpg;
+ struct nspkthdr rx_frame;
+ struct ifnet *ifp = &sc->sc_if;
+
+ while (++rx_pkt_count < 10) {
+ int pkt_len;
+
+ /*
+ * Get the rx page (incoming packet pointer).
+ */
+ outb_p(port + E8390_CMD, E8390_NODMA+E8390_PAGE1);
+ rxing_page = inb_p(port + EN1_CURPAG);
+ outb_p(port + E8390_CMD, E8390_NODMA+E8390_PAGE0);
+
+ /*
+ * Remove one frame from the ring.
+ * Boundary is always a page behind.
+ */
+ this_frame = inb_p(port + EN0_BOUNDARY) + 1;
+ if (this_frame >= sc->sc_stoppg)
+ this_frame = sc->sc_rxstrtpg;
+
+ DEBUGF({
+ if (this_frame != sc->sc_curpg)
+ printf("%s%d: mismatched read page pointers "
+ "%x vs %x\n",
+ sc->sc_name, sc->sc_unit,
+ this_frame, sc->sc_curpg);
+ });
+
+ if (this_frame == rxing_page) {
+ DEBUGF(printf("this_frame = rxing_page!\n"));
+ break;
+ }
+
+ current_offset = this_frame << 8;
+ (*sc->sc_input)(sc, sizeof(rx_frame), (char *)&rx_frame,
+ current_offset);
+
+ pkt_len = rx_frame.count - sizeof(rx_frame);
+
+ next_frame = this_frame + 1 + ((pkt_len + 4) >> 8);
+
+ if (rx_frame.next != next_frame
+ && rx_frame.next != next_frame + 1
+ && rx_frame.next != next_frame - num_rx_pages
+ && rx_frame.next != next_frame + 1 - num_rx_pages) {
+ sc->sc_curpg = rxing_page;
+ outb(port + EN0_BOUNDARY, sc->sc_curpg - 1);
+ ifp->if_ierrors++;
+ DEBUGF(printf("INPUT ERROR?\n"));
+ continue;
+ }
+ if (pkt_len < 60 || pkt_len > 1518) {
+ ifp->if_ierrors++;
+ DEBUGF(printf("%s%d: bad packet length %d\n",
+ sc->sc_name, sc->sc_unit, pkt_len));
+ } else if ((rx_frame.status & 0x0f) == ENRSR_RXOK) {
+ ipc_kmsg_t kmsg;
+
+ kmsg = net_kmsg_get();
+ if (kmsg == 0) {
+ DEBUGF(printf("%s%d: dropped packet\n",
+ sc->sc_name, sc->sc_unit));
+ ifp->if_rcvdrops++;
+ } else {
+ int len, off;
+ struct ether_header *eh;
+ struct packet_header *pkt;
+
+ ifp->if_ipackets++;
+ off = current_offset + sizeof(rx_frame);
+ eh = ((struct ether_header *)
+ (&net_kmsg(kmsg)->header[0]));
+ (*sc->sc_input)(sc,
+ sizeof(struct ether_header),
+ (char *)eh, off);
+ off += sizeof(struct ether_header);
+ len = pkt_len - sizeof(struct ether_header);
+
+ DEBUGF(printf("rcv: %s%d: %x:%x:%x:%x:%x:%x, "
+ "len %d, type 0x%x\n",
+ sc->sc_name, sc->sc_unit,
+ eh->ether_shost[0],
+ eh->ether_shost[1],
+ eh->ether_shost[2],
+ eh->ether_shost[3],
+ eh->ether_shost[4],
+ eh->ether_shost[5],
+ len, eh->ether_type));
+
+ pkt = ((struct packet_header *)
+ (&net_kmsg(kmsg)->packet[0]));
+ (*sc->sc_input)(sc, len, (char *)(pkt+1), off);
+ pkt->type = eh->ether_type;
+ pkt->length = len+sizeof(struct packet_header);
+ net_packet(ifp, kmsg, pkt->length,
+ ethernet_priority(kmsg));
+ }
+ } else {
+ DEBUGF(printf("%s%d: bogus packet: "
+ "status=0x%x nxpg=0x%x size=%d\n",
+ sc->sc_name, sc->sc_unit,
+ rx_frame.status, rx_frame.next,
+ rx_frame.count));
+ ifp->if_ierrors++;
+ }
+ next_frame = rx_frame.next;
+ if (next_frame >= sc->sc_stoppg) {
+ DEBUGF(printf("%s%d: next frame inconsistency, 0x%x\n",
+ sc->sc_name, sc->sc_unit, next_frame));
+ next_frame = sc->sc_rxstrtpg;
+ }
+ sc->sc_curpg = next_frame;
+ outb(port + EN0_BOUNDARY, next_frame - 1);
+ }
+
+ /*
+ * Bug alert! Reset ENISR_OVER to avoid spurious overruns!
+ */
+ outb_p(port + EN0_ISR, ENISR_RX+ENISR_RX_ERR+ENISR_OVER);
+}
+
+/*
+ * Handle a receive overrun condition.
+ *
+ * XXX - this needs to be gone over in light of the NS documentation.
+ */
+void
+nsrxoverrun(sc)
+ struct nssoftc *sc;
+{
+ int port = sc->sc_port, i;
+ extern unsigned delaycount;
+
+ printf("%s%d: receive overrun\n", sc->sc_name, sc->sc_unit);
+
+ /*
+ * We should already be stopped and in page0, but just to be sure...
+ */
+ outb_p(port + E8390_CMD, E8390_NODMA+E8390_PAGE0+E8390_STOP);
+
+ /*
+ * Clear remote byte counter registers.
+ */
+ outb_p(port + EN0_RCNTLO, 0);
+ outb_p(port + EN0_RCNTHI, 0);
+
+ /*
+ * Wait for reset to complete.
+ */
+ for (i = delaycount*2; i && !(inb_p(port+EN0_ISR) & ENISR_RESET); i--)
+ ;
+ if (i == 0) {
+ printf("%s%d: reset did not complete at overrun\n",
+ sc->sc_name, sc->sc_unit);
+ nsinit(sc);
+ return;
+ }
+ /*
+ * Disable transmitter.
+ */
+ outb_p(port + EN0_TXCR, E8390_TXOFF);
+
+ /*
+ * Remove packets.
+ */
+ nsrint(sc);
+
+ outb_p(port + EN0_ISR, 0xff);
+ outb_p(port + E8390_CMD, E8390_NODMA+E8390_PAGE0+E8390_START);
+ outb_p(port + EN0_TXCR, E8390_TXCONFIG);
+}
+
+/*
+ * Trigger a transmit start.
+ */
+void
+nsxmit(sc, length, start_page)
+ struct nssoftc *sc;
+ unsigned length;
+ int start_page;
+{
+ int port = sc->sc_port;
+
+ sc->sc_txing = 1;
+ outb_p(port, E8390_NODMA+E8390_PAGE0);
+ if (inb_p(port) & E8390_TRANS) {
+ printf("%s%d: nsxmit() called with the transmitter busy\n",
+ sc->sc_name, sc->sc_unit);
+ return;
+ }
+ outb_p(port + EN0_TCNTLO, length & 0xff);
+ outb_p(port + EN0_TCNTHI, (length >> 8) & 0xff);
+ outb_p(port + EN0_TPSR, start_page);
+ outb_p(port, E8390_NODMA+E8390_TRANS+E8390_START);
+ sc->sc_timer = 4;
+}
+
+#endif /* NUL > 0 || NWD > 0 */
diff --git a/i386/i386at/gpl/if_nsreg.h b/i386/i386at/gpl/if_nsreg.h
new file mode 100644
index 00000000..89447976
--- /dev/null
+++ b/i386/i386at/gpl/if_nsreg.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 1994 Shantanu Goel
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE AUTHOR ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE AUTHOR DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+/*
+ * Written 1992,1993 by Donald Becker.
+ *
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may be used and
+ * distributed according to the terms of the GNU Public License,
+ * incorporated herein by reference.
+ *
+ * The Author may be reached as becker@super.org or
+ * C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
+ */
+
+/*
+ * Generic NS8390 definitions.
+ * Derived from the Linux driver by Donald Becker.
+ */
+
+#define ETHER_ADDR_LEN 6
+
+/*
+ * 8390 state.
+ */
+struct nssoftc {
+ struct ifnet sc_if; /* interface header */
+ u_char sc_addr[ETHER_ADDR_LEN]; /* station address */
+ /*
+ * The following are board specific.
+ * reset() - resets the NIC and board
+ * input() - read data into buffer from supplied offset
+ * output() - write data from buffer into supplied page
+ * the data is padded if necessary and the actual
+ * count is returned.
+ */
+ void (*sc_reset)(struct nssoftc *);
+ void (*sc_input)(struct nssoftc *, int, char *, int);
+ int (*sc_output)(struct nssoftc *, int, char *, int);
+ int sc_word16:1; /* 16 bit (vs 8 bit) board */
+ int sc_txing:1; /* transmit active */
+ int sc_pingpong:1; /* using ping-pong driver */
+ int sc_oactive:1; /* transmitter is active */
+ u_char sc_txstrtpg; /* starting transmit page */
+ u_char sc_rxstrtpg; /* starting receive page */
+ u_char sc_stoppg; /* stop page */
+ u_char sc_curpg; /* current page */
+ short sc_tx1; /* packet lengths for ping-pong transmit */
+ short sc_tx2;
+ short sc_lasttx;
+ u_char sc_timer; /* watchdog */
+ int sc_port; /* I/O port of 8390 */
+ char *sc_name; /* name of board */
+ int sc_unit; /* unit in driver */
+};
+
+#define TX_2X_PAGES 12
+#define TX_1X_PAGES 6
+#define TX_PAGES(ns) ((ns)->sc_pingpong ? TX_2X_PAGES : TX_1X_PAGES)
+
+/* Some generic ethernet register configurations. */
+#define E8390_TX_IRQ_MASK 0xa /* For register EN0_ISR */
+#define E8390_RX_IRQ_MASK 0x5
+#define E8390_RXCONFIG 0x4 /* EN0_RXCR: broadcasts, no multicast,errors */
+#define E8390_RXOFF 0x20 /* EN0_RXCR: Accept no packets */
+#define E8390_TXCONFIG 0x00 /* EN0_TXCR: Normal transmit mode */
+#define E8390_TXOFF 0x02 /* EN0_TXCR: Transmitter off */
+
+/* Register accessed at EN_CMD, the 8390 base addr. */
+#define E8390_STOP 0x01 /* Stop and reset the chip */
+#define E8390_START 0x02 /* Start the chip, clear reset */
+#define E8390_TRANS 0x04 /* Transmit a frame */
+#define E8390_RREAD 0x08 /* Remote read */
+#define E8390_RWRITE 0x10 /* Remote write */
+#define E8390_NODMA 0x20 /* Remote DMA */
+#define E8390_PAGE0 0x00 /* Select page chip registers */
+#define E8390_PAGE1 0x40 /* using the two high-order bits */
+#define E8390_PAGE2 0x80 /* Page 3 is invalid. */
+
+#define E8390_CMD 0x00 /* The command register (for all pages) */
+/* Page 0 register offsets. */
+#define EN0_CLDALO 0x01 /* Low byte of current local dma addr RD */
+#define EN0_STARTPG 0x01 /* Starting page of ring bfr WR */
+#define EN0_CLDAHI 0x02 /* High byte of current local dma addr RD */
+#define EN0_STOPPG 0x02 /* Ending page +1 of ring bfr WR */
+#define EN0_BOUNDARY 0x03 /* Boundary page of ring bfr RD WR */
+#define EN0_TSR 0x04 /* Transmit status reg RD */
+#define EN0_TPSR 0x04 /* Transmit starting page WR */
+#define EN0_NCR 0x05 /* Number of collision reg RD */
+#define EN0_TCNTLO 0x05 /* Low byte of tx byte count WR */
+#define EN0_FIFO 0x06 /* FIFO RD */
+#define EN0_TCNTHI 0x06 /* High byte of tx byte count WR */
+#define EN0_ISR 0x07 /* Interrupt status reg RD WR */
+#define EN0_CRDALO 0x08 /* low byte of current remote dma address RD */
+#define EN0_RSARLO 0x08 /* Remote start address reg 0 */
+#define EN0_CRDAHI 0x09 /* high byte, current remote dma address RD */
+#define EN0_RSARHI 0x09 /* Remote start address reg 1 */
+#define EN0_RCNTLO 0x0a /* Remote byte count reg WR */
+#define EN0_RCNTHI 0x0b /* Remote byte count reg WR */
+#define EN0_RSR 0x0c /* rx status reg RD */
+#define EN0_RXCR 0x0c /* RX configuration reg WR */
+#define EN0_TXCR 0x0d /* TX configuration reg WR */
+#define EN0_COUNTER0 0x0d /* Rcv alignment error counter RD */
+#define EN0_DCFG 0x0e /* Data configuration reg WR */
+#define EN0_COUNTER1 0x0e /* Rcv CRC error counter RD */
+#define EN0_IMR 0x0f /* Interrupt mask reg WR */
+#define EN0_COUNTER2 0x0f /* Rcv missed frame error counter RD */
+
+/* Bits in EN0_ISR - Interrupt status register */
+#define ENISR_RX 0x01 /* Receiver, no error */
+#define ENISR_TX 0x02 /* Transmitter, no error */
+#define ENISR_RX_ERR 0x04 /* Receiver, with error */
+#define ENISR_TX_ERR 0x08 /* Transmitter, with error */
+#define ENISR_OVER 0x10 /* Receiver overwrote the ring */
+#define ENISR_COUNTERS 0x20 /* Counters need emptying */
+#define ENISR_RDC 0x40 /* remote dma complete */
+#define ENISR_RESET 0x80 /* Reset completed */
+#define ENISR_ALL 0x3f /* Interrupts we will enable */
+
+/* Bits in EN0_DCFG - Data config register */
+#define ENDCFG_WTS 0x01 /* word transfer mode selection */
+
+/* Page 1 register offsets. */
+#define EN1_PHYS 0x01 /* This board's physical enet addr RD WR */
+#define EN1_CURPAG 0x07 /* Current memory page RD WR */
+#define EN1_MULT 0x08 /* Multicast filter mask array (8 bytes) RD WR */
+
+/* Bits in received packet status byte and EN0_RSR*/
+#define ENRSR_RXOK 0x01 /* Received a good packet */
+#define ENRSR_CRC 0x02 /* CRC error */
+#define ENRSR_FAE 0x04 /* frame alignment error */
+#define ENRSR_FO 0x08 /* FIFO overrun */
+#define ENRSR_MPA 0x10 /* missed pkt */
+#define ENRSR_PHY 0x20 /* physical/multicase address */
+#define ENRSR_DIS 0x40 /* receiver disable. set in monitor mode */
+#define ENRSR_DEF 0x80 /* deferring */
+
+/* Transmitted packet status, EN0_TSR. */
+#define ENTSR_PTX 0x01 /* Packet transmitted without error */
+#define ENTSR_ND 0x02 /* The transmit wasn't deferred. */
+#define ENTSR_COL 0x04 /* The transmit collided at least once. */
+#define ENTSR_ABT 0x08 /* The transmit collided 16 times, and was deferred. */
+#define ENTSR_CRS 0x10 /* The carrier sense was lost. */
+#define ENTSR_FU 0x20 /* A "FIFO underrun" occured during transmit. */
+#define ENTSR_CDH 0x40 /* The collision detect "heartbeat" signal was lost. */
+#define ENTSR_OWC 0x80 /* There was an out-of-window collision. */
+
+/* The per-packet-header format. */
+struct nspkthdr {
+ u_char status; /* status */
+ u_char next; /* pointer to next packet. */
+ u_short count; /* header + packet length in bytes */
+};
+
+void nsinit(struct nssoftc *);
+void nsstart(struct nssoftc *);
+void nsintr(struct nssoftc *);
diff --git a/i386/i386at/gpl/if_ul.c b/i386/i386at/gpl/if_ul.c
new file mode 100644
index 00000000..59e74f55
--- /dev/null
+++ b/i386/i386at/gpl/if_ul.c
@@ -0,0 +1,489 @@
+/*
+ * Copyright (c) 1994 Shantanu Goel
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE AUTHOR ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE AUTHOR DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+/*
+ * Written 1993 by Donald Becker.
+ *
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may be used and
+ * distributed according to the terms of the GNU Public License,
+ * incorporated herein by reference.
+ *
+ * The Author may be reached as becker@super.org or
+ * C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
+ */
+
+#include <wd.h>
+#include <ul.h>
+#if NUL > 0
+/*
+ * Driver for SMC Ultra ethernet adaptor.
+ * Derived from the Linux driver by Donald Becker.
+ *
+ * Shantanu Goel (goel@cs.columbia.edu)
+ */
+#include <mach/sa/sys/types.h>
+#include "vm_param.h"
+#include <kern/time_out.h>
+#include <device/device_types.h>
+#include <device/errno.h>
+#include <device/io_req.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+#include <device/net_status.h>
+#include <device/net_io.h>
+#include <chips/busses.h>
+#include <i386/machspl.h>
+#include <i386/pio.h>
+#include <i386at/gpl/if_nsreg.h>
+
+#define START_PG 0x00 /* first page of TX buffer */
+#define ULTRA_CMDREG 0 /* offset of ASIC command register */
+#define ULTRA_RESET 0x80 /* board reset in ULTRA_CMDREG */
+#define ULTRA_MEMEN 0x40 /* enable shared memory */
+#define ULTRA_NIC_OFF 16 /* NIC register offset */
+
+#define ulunit(dev) minor(dev)
+
+/*
+ * Autoconfiguration stuff.
+ */
+int ulprobe();
+void ulattach();
+int ulstd[] = { 0x200, 0x220, 0x240, 0x280, 0x300, 0x340, 0x380, 0 };
+struct bus_device *ulinfo[NUL];
+struct bus_driver uldriver = {
+ ulprobe, 0, ulattach, 0, ulstd, "ul", ulinfo, 0, 0, 0
+};
+
+/*
+ * NS8390 state.
+ */
+struct nssoftc ulnssoftc[NUL];
+
+/*
+ * Ultra state.
+ */
+struct ulsoftc {
+ int sc_mstart; /* start of board's RAM */
+ int sc_mend; /* end of board's RAM */
+ int sc_rmstart; /* start of receive RAM */
+ int sc_rmend; /* end of receive RAM */
+} ulsoftc[NUL];
+
+void ulstart(int);
+void ul_reset(struct nssoftc *sc);
+void ul_input(struct nssoftc *sc, int, char *, int);
+int ul_output(struct nssoftc *sc, int, char *, int);
+
+/*
+ * Watchdog.
+ */
+int ulwstart = 0;
+void ulwatch(void);
+
+#define ULDEBUG
+#ifdef ULDEBUG
+int uldebug = 0;
+#define DEBUGF(stmt) { if (uldebug) stmt; }
+#else
+#define DEBUGF(stmt)
+#endif
+
+/*
+ * Probe for the Ultra.
+ * This looks like an 8013 with the station address PROM
+ * at I/O ports <base>+8 to <base>+13, with a checksum following.
+ */
+int
+ulprobe(xxx, ui)
+ int xxx;
+ struct bus_device *ui;
+{
+ int *port;
+
+ if (ui->unit >= NUL) {
+ printf("ul%d: not configured\n", ui->unit);
+ return (0);
+ }
+ for (port = ulstd; *port; port++) {
+ if (*port < 0)
+ continue;
+ /*
+ * Check chip ID nibble.
+ */
+ if ((inb(*port + 7) & 0xf0) != 0x20)
+ continue;
+ if (ulprobe1(*port, ui)) {
+ ui->address = *port;
+ *port = -1;
+#if NWD > 0
+ /*
+ * XXX: The Western Digital/SMC driver can sometimes
+ * probe the Ultra incorrectly. Remove the Ultra's
+ * port from it's list to avoid the problem.
+ */
+ {
+ int i;
+ extern int wdstd[];
+
+ for (i = 0; wdstd[i]; i++) {
+ if (wdstd[i] == ui->address) {
+ wdstd[i] = -1;
+ break;
+ }
+ }
+ }
+#endif
+ return (1);
+ }
+ }
+ return (0);
+}
+
+int
+ulprobe1(port, ui)
+ int port;
+ struct bus_device *ui;
+{
+ u_char num_pages, irqreg, addr, reg4;
+ u_char irqmap[] = { 0, 9, 3, 5, 7, 10, 11, 15 };
+ short num_pages_tbl[4] = { 0x20, 0x40, 0x80, 0xff };
+ int i, irq, checksum = 0;
+ int addr_tbl[4] = { 0x0c0000, 0x0e0000, 0xfc0000, 0xfe0000 };
+ struct ulsoftc *ul = &ulsoftc[ui->unit];
+ struct nssoftc *ns = &ulnssoftc[ui->unit];
+ struct ifnet *ifp = &ns->sc_if;
+
+ /*
+ * Select the station address register set.
+ */
+ reg4 = inb(port + 4) & 0x7f;
+ outb(port + 4, reg4);
+
+ for (i = 0; i < 8; i++)
+ checksum += inb(port + 8 + i);
+ if ((checksum & 0xff) != 0xff)
+ return (0);
+
+ /*
+ * Use 2 transmit buffers.
+ */
+ ns->sc_pingpong = 1;
+
+ printf("ul%d: SMC Ultra at 0x%03x, ", ui->unit, port);
+ for (i = 0; i < ETHER_ADDR_LEN; i++) {
+ if (i == 0)
+ printf("%02x", ns->sc_addr[i] = inb(port + 8 + i));
+ else
+ printf(":%02x", ns->sc_addr[i] = inb(port + 8 + i));
+ }
+ /*
+ * Switch from station address to alternate register set
+ * and read useful registers there.
+ */
+ outb(port + 4, 0x80 | reg4);
+
+ /*
+ * Enable FINE16 mode to avoid BIOS ROM width mismatches
+ * during reboot.
+ */
+ outb(port + 0x0c, 0x80 | inb(port + 0x0c));
+ irqreg = inb(port + 0x0d);
+ addr = inb(port + 0x0b);
+
+ /*
+ * Switch back to station address register set so the MSDOG
+ * driver can find the card after a warm boot.
+ */
+ outb(port + 4, reg4);
+
+ /*
+ * Determine IRQ. The IRQ bits are split.
+ */
+ irq = irqmap[((irqreg & 0x40) >> 4) + ((irqreg & 0x0c) >> 2)];
+ if (irq == 0) {
+ printf(", failed to detect IRQ line.\n");
+ return (0);
+ }
+ ui->sysdep1 = irq;
+ take_dev_irq(ui);
+ printf(", irq %d", irq);
+
+ /*
+ * Determine board's RAM location.
+ */
+ ul->sc_mstart = ((addr & 0x0f) << 13) + addr_tbl[(addr >> 6) & 3];
+ num_pages = num_pages_tbl[(addr >> 4) & 3];
+ ul->sc_rmstart = ul->sc_mstart + TX_PAGES(ns) * 256;
+ ul->sc_mend = ul->sc_rmend
+ = ul->sc_mstart + (num_pages - START_PG) * 256;
+ printf(", memory 0x%05x-0x%05x\n", ul->sc_mstart, ul->sc_mend);
+
+ /*
+ * Initialize 8390 state.
+ */
+ ns->sc_name = ui->name;
+ ns->sc_unit = ui->unit;
+ ns->sc_port = port + ULTRA_NIC_OFF;
+ ns->sc_word16 = 1;
+ ns->sc_txstrtpg = START_PG;
+ ns->sc_rxstrtpg = START_PG + TX_PAGES(ns);
+ ns->sc_stoppg = num_pages;
+ ns->sc_reset = ul_reset;
+ ns->sc_input = ul_input;
+ ns->sc_output = ul_output;
+
+ DEBUGF(printf("ul%d: txstrtpg %d rxstrtpg %d num_pages %d\n",
+ ui->unit, ns->sc_txstrtpg, ns->sc_rxstrtpg, num_pages));
+
+ /*
+ * Initialize interface header.
+ */
+ ifp->if_unit = ui->unit;
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_flags = IFF_BROADCAST;
+ ifp->if_header_size = sizeof(struct ether_header);
+ ifp->if_header_format = HDR_ETHERNET;
+ ifp->if_address_size = ETHER_ADDR_LEN;
+ ifp->if_address = ns->sc_addr;
+ if_init_queues(ifp);
+
+ return (1);
+}
+
+void
+ulattach(ui)
+ struct bus_device *ui;
+{
+ /*
+ * void
+ */
+}
+
+int
+ulopen(dev, flag)
+ dev_t dev;
+ int flag;
+{
+ int unit = ulunit(dev), s;
+ struct bus_device *ui;
+
+ if (unit >= NUL || (ui = ulinfo[unit]) == 0 || ui->alive == 0)
+ return (ENXIO);
+
+ /*
+ * Start watchdog.
+ */
+ if (!ulwstart) {
+ ulwstart++;
+ timeout(ulwatch, 0, hz);
+ }
+ ulnssoftc[unit].sc_if.if_flags |= IFF_UP;
+ s = splimp();
+ outb(ui->address, ULTRA_MEMEN); /* enable memory, 16 bit mode */
+ outb(ui->address + 5, 0x80);
+ outb(ui->address + 6, 0x01); /* enable interrupts and memory */
+ nsinit(&ulnssoftc[unit]);
+ splx(s);
+ return (0);
+}
+
+int
+uloutput(dev, ior)
+ dev_t dev;
+ io_req_t ior;
+{
+ int unit = ulunit(dev);
+ struct bus_device *ui;
+
+ if (unit >= NUL || (ui = ulinfo[unit]) == 0 || ui->alive == 0)
+ return (ENXIO);
+
+ return (net_write(&ulnssoftc[unit].sc_if, ulstart, ior));
+}
+
+int
+ulsetinput(dev, receive_port, priority, filter, filter_count)
+ dev_t dev;
+ mach_port_t receive_port;
+ int priority;
+ filter_t *filter;
+ unsigned filter_count;
+{
+ int unit = ulunit(dev);
+ struct bus_device *ui;
+
+ if (unit >= NUL || (ui = ulinfo[unit]) == 0 || ui->alive == 0)
+ return (ENXIO);
+
+ return (net_set_filter(&ulnssoftc[unit].sc_if, receive_port,
+ priority, filter, filter_count));
+}
+
+int
+ulgetstat(dev, flavor, status, count)
+ dev_t dev;
+ int flavor;
+ dev_status_t status;
+ unsigned *count;
+{
+ int unit = ulunit(dev);
+ struct bus_device *ui;
+
+ if (unit >= NUL || (ui = ulinfo[unit]) == 0 || ui->alive == 0)
+ return (ENXIO);
+
+ return (net_getstat(&ulnssoftc[unit].sc_if, flavor, status, count));
+}
+
+int
+ulsetstat(dev, flavor, status, count)
+ dev_t dev;
+ int flavor;
+ dev_status_t status;
+ unsigned count;
+{
+ int unit = ulunit(dev), oflags, s;
+ struct bus_device *ui;
+ struct ifnet *ifp;
+ struct net_status *ns;
+
+ if (unit >= NUL || (ui = ulinfo[unit]) == 0 || ui->alive == 0)
+ return (ENXIO);
+
+ ifp = &ulnssoftc[unit].sc_if;
+
+ switch (flavor) {
+
+ case NET_STATUS:
+ if (count < NET_STATUS_COUNT)
+ return (D_INVALID_SIZE);
+ ns = (struct net_status *)status;
+ oflags = ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC);
+ ifp->if_flags &= ~(IFF_ALLMULTI|IFF_PROMISC);
+ ifp->if_flags |= ns->flags & (IFF_ALLMULTI|IFF_PROMISC);
+ if ((ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) != oflags) {
+ s = splimp();
+ nsinit(&ulnssoftc[unit]);
+ splx(s);
+ }
+ break;
+
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+
+void
+ulintr(unit)
+ int unit;
+{
+ nsintr(&ulnssoftc[unit]);
+}
+
+void
+ulstart(unit)
+ int unit;
+{
+ nsstart(&ulnssoftc[unit]);
+}
+
+void
+ul_reset(ns)
+ struct nssoftc *ns;
+{
+ int port = ns->sc_port - ULTRA_NIC_OFF; /* ASIC base address */
+
+ outb(port, ULTRA_RESET);
+ outb(0x80, 0); /* I/O delay */
+ outb(port, ULTRA_MEMEN);
+}
+
+void
+ul_input(ns, count, buf, ring_offset)
+ struct nssoftc *ns;
+ int count;
+ char *buf;
+ int ring_offset;
+{
+ int xfer_start;
+ struct ulsoftc *ul = &ulsoftc[ns->sc_unit];
+
+ DEBUGF(printf("ul%d: ring_offset = %d\n", ns->sc_unit, ring_offset));
+
+ xfer_start = ul->sc_mstart + ring_offset - (START_PG << 8);
+ if (xfer_start + count > ul->sc_rmend) {
+ int semi_count = ul->sc_rmend - xfer_start;
+
+ /*
+ * Input move must be wrapped.
+ */
+ bcopy((char *)phystokv(xfer_start), buf, semi_count);
+ count -= semi_count;
+ bcopy((char *)phystokv(ul->sc_rmstart), buf+semi_count, count);
+ } else
+ bcopy((char *)phystokv(xfer_start), buf, count);
+}
+
+int
+ul_output(ns, count, buf, start_page)
+ struct nssoftc *ns;
+ int count;
+ char *buf;
+ int start_page;
+{
+ char *shmem;
+ int i;
+ struct ulsoftc *ul = &ulsoftc[ns->sc_unit];
+
+ DEBUGF(printf("ul%d: start_page = %d\n", ns->sc_unit, start_page));
+
+ shmem = (char *)phystokv(ul->sc_mstart + ((start_page-START_PG) << 8));
+ bcopy(buf, shmem, count);
+ while (count < ETHERMIN + sizeof(struct ether_header)) {
+ *(shmem + count) = 0;
+ count++;
+ }
+ return (count);
+}
+
+/*
+ * Watchdog.
+ * Check for hung transmissions.
+ */
+void
+ulwatch()
+{
+ int unit, s;
+ struct nssoftc *ns;
+
+ timeout(ulwatch, 0, hz);
+
+ s = splimp();
+ for (unit = 0; unit < NUL; unit++) {
+ if (ulinfo[unit] == 0 || ulinfo[unit]->alive == 0)
+ continue;
+ ns = &ulnssoftc[unit];
+ if (ns->sc_timer && --ns->sc_timer == 0) {
+ printf("ul%d: transmission timeout\n", unit);
+ nsinit(ns);
+ }
+ }
+ splx(s);
+}
+
+#endif /* NUL > 0 */
diff --git a/i386/i386at/gpl/if_wd.c b/i386/i386at/gpl/if_wd.c
new file mode 100644
index 00000000..c569a3e1
--- /dev/null
+++ b/i386/i386at/gpl/if_wd.c
@@ -0,0 +1,581 @@
+/*
+ * Copyright (c) 1994 Shantanu Goel
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE AUTHOR ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE AUTHOR DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+/*
+ * Written 1993 by Donald Becker.
+ *
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may be used and
+ * distributed according to the terms of the GNU Public License,
+ * incorporated herein by reference.
+ *
+ * The Author may be reached as becker@super.org or
+ * C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
+ */
+
+#include <wd.h>
+#if NWD > 0
+/*
+ * Driver for SMC/Western Digital Ethernet adaptors.
+ * Derived from the Linux driver by Donald Becker.
+ *
+ * Shantanu Goel (goel@cs.columbia.edu)
+ */
+#include <mach/sa/sys/types.h>
+#include "vm_param.h"
+#include <kern/time_out.h>
+#include <device/device_types.h>
+#include <device/errno.h>
+#include <device/io_req.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+#include <device/net_status.h>
+#include <device/net_io.h>
+#include <chips/busses.h>
+#include <i386/machspl.h>
+#include <i386/pio.h>
+#include <i386at/gpl/if_nsreg.h>
+
+#define WD_START_PG 0x00 /* first page of TX buffer */
+#define WD03_STOP_PG 0x20 /* last page +1 of RX ring */
+#define WD13_STOP_PG 0x40 /* last page +1 of RX ring */
+
+#define WD_CMDREG 0 /* offset of ASIC command register */
+#define WD_RESET 0x80 /* board reset in WDTRA_CMDREG */
+#define WD_MEMEN 0x40 /* enable shared memory */
+#define WD_CMDREG5 5 /* offset of 16-bit-only ASIC register 5 */
+#define ISA16 0x80 /* enable 16 bit access from the ISA bus */
+#define NIC16 0x40 /* enable 16 bit access from the 8390 */
+#define WD_NIC_OFF 16 /* NIC register offset */
+
+#define wdunit(dev) minor(dev)
+
+/*
+ * Autoconfiguration stuff.
+ */
+int wdprobe();
+void wdattach();
+int wdstd[] = { 0x300, 0x280, 0x380, 0x240, 0 };
+struct bus_device *wdinfo[NWD];
+struct bus_driver wddriver = {
+ wdprobe, 0, wdattach, 0, wdstd, "wd", wdinfo, 0, 0, 0
+};
+
+/*
+ * NS8390 state.
+ */
+struct nssoftc wdnssoftc[NWD];
+
+/*
+ * Board state.
+ */
+struct wdsoftc {
+ int sc_mstart; /* start of board's RAM */
+ int sc_mend; /* end of board's RAM */
+ int sc_rmstart; /* start of receive RAM */
+ int sc_rmend; /* end of receive RAM */
+ int sc_reg0; /* copy of register 0 of ASIC */
+ int sc_reg5; /* copy of register 5 of ASIC */
+} wdsoftc[NWD];
+
+void wdstart(int);
+void wd_reset(struct nssoftc *sc);
+void wd_input(struct nssoftc *sc, int, char *, int);
+int wd_output(struct nssoftc *sc, int, char *, int);
+
+/*
+ * Watchdog.
+ */
+int wdwstart = 0;
+void wdwatch(void);
+
+#define WDDEBUG
+#ifdef WDDEBUG
+int wddebug = 0;
+#define DEBUGF(stmt) { if (wddebug) stmt; }
+#else
+#define DEBUGF(stmt)
+#endif
+
+/*
+ * Probe for the WD8003 and WD8013.
+ * These cards have the station address PROM at I/O ports <base>+8
+ * to <base>+13, with a checksum following. A Soundblaster can have
+ * the same checksum as a WD ethercard, so we have an extra exclusionary
+ * check for it.
+ */
+int
+wdprobe(xxx, ui)
+ int xxx;
+ struct bus_device *ui;
+{
+ int *port;
+
+ if (ui->unit >= NWD) {
+ printf("wd%d: not configured\n", ui->unit);
+ return (0);
+ }
+ for (port = wdstd; *port; port++) {
+ if (*port < 0)
+ continue;
+ if (inb(*port + 8) != 0xff
+ && inb(*port + 9) != 0xff
+ && wdprobe1(*port, ui)) {
+ ui->address = *port;
+ *port = -1;
+ return (1);
+ }
+ }
+ return (0);
+}
+
+int
+wdprobe1(port, ui)
+ int port;
+ struct bus_device *ui;
+{
+
+ int i, irq = 0, checksum = 0, ancient = 0, word16 = 0;
+ struct wdsoftc *wd = &wdsoftc[ui->unit];
+ struct nssoftc *ns = &wdnssoftc[ui->unit];
+ struct ifnet *ifp = &ns->sc_if;
+
+ for (i = 0; i < 8; i++)
+ checksum += inb(port + 8 + i);
+ if ((checksum & 0xff) != 0xff)
+ return (0);
+
+ printf("wd%d: WD80x3 at 0x%03x, ", ui->unit, port);
+ for (i = 0; i < ETHER_ADDR_LEN; i++) {
+ if (i == 0)
+ printf("%02x", ns->sc_addr[i] = inb(port + 8 + i));
+ else
+ printf(":%02x", ns->sc_addr[i] = inb(port + 8 + i));
+ }
+ /*
+ * Check for PureData.
+ */
+ if (inb(port) == 'P' && inb(port + 1) == 'D') {
+ u_char reg5 = inb(port + 5);
+
+ switch (inb(port + 2)) {
+
+ case 0x03:
+ case 0x05:
+ word16 = 0;
+ break;
+
+ case 0x0a:
+ word16 = 1;
+ break;
+
+ default:
+ word16 = 0;
+ break;
+ }
+ wd->sc_mstart = ((reg5 & 0x1c) + 0xc0) << 12;
+ irq = (reg5 & 0xe0) == 0xe0 ? 10 : (reg5 >> 5) + 1;
+ } else {
+ /*
+ * Check for 8 bit vs 16 bit card.
+ */
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ if (inb(port + i) != inb(port + 8 + i))
+ break;
+ if (i >= ETHER_ADDR_LEN) {
+ ancient = 1;
+ word16 = 0;
+ } else {
+ int tmp = inb(port + 1);
+
+ /*
+ * Attempt to clear 16bit bit.
+ */
+ outb(port + 1, tmp ^ 0x01);
+ if (((inb(port + 1) & 0x01) == 0x01) /* 16 bit */
+ && (tmp & 0x01) == 0x01) { /* in 16 bit slot */
+ int asic_reg5 = inb(port + WD_CMDREG5);
+
+ /*
+ * Magic to set ASIC to word-wide mode.
+ */
+ outb(port+WD_CMDREG5, NIC16|(asic_reg5&0x1f));
+ outb(port + 1, tmp);
+ word16 = 1;
+ } else
+ word16 = 0;
+ outb(port + 1, tmp);
+ }
+ if (!ancient && (inb(port + 1) & 0x01) != (word16 & 0x01))
+ printf("\nwd%d: bus width conflict, "
+ "%d (probe) != %d (reg report)", ui->unit,
+ word16 ? 16 : 8, (inb(port+1) & 0x01) ? 16 : 8);
+ }
+ /*
+ * Determine board's RAM location.
+ */
+ if (wd->sc_mstart == 0) {
+ int reg0 = inb(port);
+
+ if (reg0 == 0xff || reg0 == 0)
+ wd->sc_mstart = 0xd0000;
+ else {
+ int high_addr_bits = inb(port + WD_CMDREG5) & 0x1f;
+
+ if (high_addr_bits == 0x1f || word16 == 0)
+ high_addr_bits = 0x01;
+ wd->sc_mstart = ((reg0&0x3f)<<13)+(high_addr_bits<<19);
+ }
+ }
+ /*
+ * Determine irq.
+ */
+ if (irq == 0) {
+ int irqmap[] = { 9, 3, 5, 7, 10, 11, 15, 4 };
+ int reg1 = inb(port + 1);
+ int reg4 = inb(port + 4);
+
+ /*
+ * For old card, irq must be supplied.
+ */
+ if (ancient || reg1 == 0xff) {
+ if (ui->sysdep1 == 0) {
+ printf("\nwd%d: must specify IRQ for card\n",
+ ui->unit);
+ return (0);
+ }
+ irq = ui->sysdep1;
+ } else {
+ DEBUGF({
+ int i = ((reg4 >> 5) & 0x03) + (reg1 & 0x04);
+
+ printf("\nwd%d: irq index %d\n", ui->unit, i);
+ printf("wd%d:", ui->unit);
+ })
+ irq = irqmap[((reg4 >> 5) & 0x03) + (reg1 & 0x04)];
+ }
+ } else if (irq == 2)
+ irq = 9;
+ ui->sysdep1 = irq;
+ take_dev_irq(ui);
+ printf(", irq %d", irq);
+
+ /*
+ * Initialize 8390 state.
+ */
+ ns->sc_name = ui->name;
+ ns->sc_unit = ui->unit;
+ ns->sc_port = port + WD_NIC_OFF;
+ ns->sc_reset = wd_reset;
+ ns->sc_input = wd_input;
+ ns->sc_output = wd_output;
+ ns->sc_pingpong = 1;
+ ns->sc_word16 = word16;
+ ns->sc_txstrtpg = WD_START_PG;
+ ns->sc_rxstrtpg = WD_START_PG + TX_PAGES(ns);
+ ns->sc_stoppg = word16 ? WD13_STOP_PG : WD03_STOP_PG;
+
+ wd->sc_rmstart = wd->sc_mstart + TX_PAGES(ns) * 256;
+ wd->sc_mend = wd->sc_rmend
+ = wd->sc_mstart + (ns->sc_stoppg - WD_START_PG) * 256;
+ printf(", memory 0x%05x-0x%05x", wd->sc_mstart, wd->sc_mend);
+
+ if (word16)
+ printf(", 16 bit");
+ printf("\n");
+
+ DEBUGF(printf("wd%d: txstrtpg %d rxstrtpg %d num_pages %d\n",
+ ui->unit, ns->sc_txstrtpg, ns->sc_rxstrtpg,
+ (wd->sc_mend - wd->sc_mstart) / 256));
+
+ /*
+ * Initialize interface header.
+ */
+ ifp->if_unit = ui->unit;
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_flags = IFF_BROADCAST;
+ ifp->if_header_size = sizeof(struct ether_header);
+ ifp->if_header_format = HDR_ETHERNET;
+ ifp->if_address_size = ETHER_ADDR_LEN;
+ ifp->if_address = ns->sc_addr;
+ if_init_queues(ifp);
+
+ return (1);
+}
+
+void
+wdattach(ui)
+ struct bus_device *ui;
+{
+ /*
+ * void
+ */
+}
+
+int
+wdopen(dev, flag)
+ dev_t dev;
+ int flag;
+{
+ int unit = wdunit(dev), s;
+ struct bus_device *ui;
+ struct wdsoftc *wd;
+ struct nssoftc *ns;
+
+ if (unit >= NWD || (ui = wdinfo[unit]) == 0 || ui->alive == 0)
+ return (ENXIO);
+
+ /*
+ * Start watchdog.
+ */
+ if (!wdwstart) {
+ wdwstart++;
+ timeout(wdwatch, 0, hz);
+ }
+ wd = &wdsoftc[unit];
+ ns = &wdnssoftc[unit];
+ ns->sc_if.if_flags |= IFF_UP;
+ s = splimp();
+ wd->sc_reg0 = ((wd->sc_mstart >> 13) & 0x3f) | WD_MEMEN;
+ wd->sc_reg5 = ((wd->sc_mstart >> 19) & 0x1f) | NIC16;
+ if (ns->sc_word16)
+ outb(ui->address + WD_CMDREG5, wd->sc_reg5);
+ outb(ui->address, wd->sc_reg0);
+ nsinit(ns);
+ splx(s);
+ return (0);
+}
+
+int
+wdoutput(dev, ior)
+ dev_t dev;
+ io_req_t ior;
+{
+ int unit = wdunit(dev);
+ struct bus_device *ui;
+
+ if (unit >= NWD || (ui = wdinfo[unit]) == 0 || ui->alive == 0)
+ return (ENXIO);
+
+ return (net_write(&wdnssoftc[unit].sc_if, wdstart, ior));
+}
+
+int
+wdsetinput(dev, receive_port, priority, filter, filter_count)
+ dev_t dev;
+ mach_port_t receive_port;
+ int priority;
+ filter_t *filter;
+ unsigned filter_count;
+{
+ int unit = wdunit(dev);
+ struct bus_device *ui;
+
+ if (unit >= NWD || (ui = wdinfo[unit]) == 0 || ui->alive == 0)
+ return (ENXIO);
+
+ return (net_set_filter(&wdnssoftc[unit].sc_if, receive_port,
+ priority, filter, filter_count));
+}
+
+int
+wdgetstat(dev, flavor, status, count)
+ dev_t dev;
+ int flavor;
+ dev_status_t status;
+ unsigned *count;
+{
+ int unit = wdunit(dev);
+ struct bus_device *ui;
+
+ if (unit >= NWD || (ui = wdinfo[unit]) == 0 || ui->alive == 0)
+ return (ENXIO);
+
+ return (net_getstat(&wdnssoftc[unit].sc_if, flavor, status, count));
+}
+
+int
+wdsetstat(dev, flavor, status, count)
+ dev_t dev;
+ int flavor;
+ dev_status_t status;
+ unsigned count;
+{
+ int unit = wdunit(dev), oflags, s;
+ struct bus_device *ui;
+ struct ifnet *ifp;
+ struct net_status *ns;
+
+ if (unit >= NWD || (ui = wdinfo[unit]) == 0 || ui->alive == 0)
+ return (ENXIO);
+
+ ifp = &wdnssoftc[unit].sc_if;
+
+ switch (flavor) {
+
+ case NET_STATUS:
+ if (count < NET_STATUS_COUNT)
+ return (D_INVALID_SIZE);
+ ns = (struct net_status *)status;
+ oflags = ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC);
+ ifp->if_flags &= ~(IFF_ALLMULTI|IFF_PROMISC);
+ ifp->if_flags |= ns->flags & (IFF_ALLMULTI|IFF_PROMISC);
+ if ((ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) != oflags) {
+ s = splimp();
+ nsinit(&wdnssoftc[unit]);
+ splx(s);
+ }
+ break;
+
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+
+void
+wdintr(unit)
+ int unit;
+{
+ nsintr(&wdnssoftc[unit]);
+}
+
+void
+wdstart(unit)
+ int unit;
+{
+ nsstart(&wdnssoftc[unit]);
+}
+
+void
+wd_reset(ns)
+ struct nssoftc *ns;
+{
+ int port = ns->sc_port - WD_NIC_OFF; /* ASIC base address */
+ struct wdsoftc *wd = &wdsoftc[ns->sc_unit];
+
+ outb(port, WD_RESET);
+ outb(0x80, 0); /* I/O delay */
+ /*
+ * Set up the ASIC registers, just in case something changed them.
+ */
+ outb(port, ((wd->sc_mstart >> 13) & 0x3f) | WD_MEMEN);
+ if (ns->sc_word16)
+ outb(port + WD_CMDREG5, NIC16 | ((wd->sc_mstart>>19) & 0x1f));
+}
+
+void
+wd_input(ns, count, buf, ring_offset)
+ struct nssoftc *ns;
+ int count;
+ char *buf;
+ int ring_offset;
+{
+ int port = ns->sc_port - WD_NIC_OFF;
+ int xfer_start;
+ struct wdsoftc *wd = &wdsoftc[ns->sc_unit];
+
+ DEBUGF(printf("wd%d: ring_offset = %d\n", ns->sc_unit, ring_offset));
+
+ xfer_start = wd->sc_mstart + ring_offset - (WD_START_PG << 8);
+
+ /*
+ * The NIC driver calls us 3 times. Once to read the NIC 4 byte
+ * header, next to read the Ethernet header and finally to read
+ * the actual data. We enable 16 bit mode before the NIC header
+ * and disable it after the packet body.
+ */
+ if (count == 4) {
+ if (ns->sc_word16)
+ outb(port + WD_CMDREG5, ISA16 | wd->sc_reg5);
+ ((int *)buf)[0] = ((int *)phystokv(xfer_start))[0];
+ return;
+ }
+ if (count == sizeof(struct ether_header)) {
+ xfer_start = (int)phystokv(xfer_start);
+ ((int *)buf)[0] = ((int *)xfer_start)[0];
+ ((int *)buf)[1] = ((int *)xfer_start)[1];
+ ((int *)buf)[2] = ((int *)xfer_start)[2];
+ ((short *)(buf + 12))[0] = ((short *)(xfer_start + 12))[0];
+ return;
+ }
+ if (xfer_start + count > wd->sc_rmend) {
+ int semi_count = wd->sc_rmend - xfer_start;
+
+ /*
+ * Input move must be wrapped.
+ */
+ bcopy((char *)phystokv(xfer_start), buf, semi_count);
+ count -= semi_count;
+ bcopy((char *)phystokv(wd->sc_rmstart),buf+semi_count,count);
+ } else
+ bcopy((char *)phystokv(xfer_start), buf, count);
+ if (ns->sc_word16)
+ outb(port + WD_CMDREG5, wd->sc_reg5);
+}
+
+int
+wd_output(ns, count, buf, start_page)
+ struct nssoftc *ns;
+ int count;
+ char *buf;
+ int start_page;
+{
+ char *shmem;
+ int i, port = ns->sc_port - WD_NIC_OFF;
+ struct wdsoftc *wd = &wdsoftc[ns->sc_unit];
+
+ DEBUGF(printf("wd%d: start_page = %d\n", ns->sc_unit, start_page));
+
+ shmem = (char *)phystokv(wd->sc_mstart+((start_page-WD_START_PG)<<8));
+ if (ns->sc_word16) {
+ outb(port + WD_CMDREG5, ISA16 | wd->sc_reg5);
+ bcopy(buf, shmem, count);
+ outb(port + WD_CMDREG5, wd->sc_reg5);
+ } else
+ bcopy(buf, shmem, count);
+ while (count < ETHERMIN + sizeof(struct ether_header)) {
+ *(shmem + count) = 0;
+ count++;
+ }
+ return (count);
+}
+
+/*
+ * Watchdog.
+ * Check for hung transmissions.
+ */
+void
+wdwatch()
+{
+ int unit, s;
+ struct nssoftc *ns;
+
+ timeout(wdwatch, 0, hz);
+
+ s = splimp();
+ for (unit = 0; unit < NWD; unit++) {
+ if (wdinfo[unit] == 0 || wdinfo[unit]->alive == 0)
+ continue;
+ ns = &wdnssoftc[unit];
+ if (ns->sc_timer && --ns->sc_timer == 0) {
+ printf("wd%d: transmission timeout\n", unit);
+ nsinit(ns);
+ }
+ }
+ splx(s);
+}
+
+#endif /* NWD > 0 */
diff --git a/i386/i386at/gpl/linux/block/cmd640.c b/i386/i386at/gpl/linux/block/cmd640.c
new file mode 100644
index 00000000..99a139dc
--- /dev/null
+++ b/i386/i386at/gpl/linux/block/cmd640.c
@@ -0,0 +1,738 @@
+/*
+ * linux/drivers/block/cmd640.c Version 0.07 Jan 27, 1996
+ *
+ * Copyright (C) 1995-1996 Linus Torvalds & author (see below)
+ */
+
+/*
+ * Principal Author/Maintainer: abramov@cecmow.enet.dec.com (Igor Abramov)
+ *
+ * This file provides support for the advanced features and bugs
+ * of IDE interfaces using the CMD Technologies 0640 IDE interface chip.
+ *
+ * Version 0.01 Initial version, hacked out of ide.c,
+ * and #include'd rather than compiled separately.
+ * This will get cleaned up in a subsequent release.
+ *
+ * Version 0.02 Fixes for vlb initialization code, enable
+ * read-ahead for versions 'B' and 'C' of chip by
+ * default, some code cleanup.
+ *
+ * Version 0.03 Added reset of secondary interface,
+ * and black list for devices which are not compatible
+ * with read ahead mode. Separate function for setting
+ * readahead is added, possibly it will be called some
+ * day from ioctl processing code.
+ *
+ * Version 0.04 Now configs/compiles separate from ide.c -ml
+ *
+ * Version 0.05 Major rewrite of interface timing code.
+ * Added new function cmd640_set_mode to set PIO mode
+ * from ioctl call. New drives added to black list.
+ *
+ * Version 0.06 More code cleanup. Readahead is enabled only for
+ * detected hard drives, not included in readahed
+ * black list.
+ *
+ * Version 0.07 Changed to more conservative drive tuning policy.
+ * Unknown drives, which report PIO < 4 are set to
+ * (reported_PIO - 1) if it is supported, or to PIO0.
+ * List of known drives extended by info provided by
+ * CMD at their ftp site.
+ *
+ * Version 0.08 Added autotune/noautotune support. -ml
+ *
+ */
+
+#undef REALLY_SLOW_IO /* most systems can safely undef this */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <asm/io.h>
+#include "ide.h"
+#include "ide_modes.h"
+
+int cmd640_vlb = 0;
+
+/*
+ * CMD640 specific registers definition.
+ */
+
+#define VID 0x00
+#define DID 0x02
+#define PCMD 0x04
+#define PSTTS 0x06
+#define REVID 0x08
+#define PROGIF 0x09
+#define SUBCL 0x0a
+#define BASCL 0x0b
+#define BaseA0 0x10
+#define BaseA1 0x14
+#define BaseA2 0x18
+#define BaseA3 0x1c
+#define INTLINE 0x3c
+#define INPINE 0x3d
+
+#define CFR 0x50
+#define CFR_DEVREV 0x03
+#define CFR_IDE01INTR 0x04
+#define CFR_DEVID 0x18
+#define CFR_AT_VESA_078h 0x20
+#define CFR_DSA1 0x40
+#define CFR_DSA0 0x80
+
+#define CNTRL 0x51
+#define CNTRL_DIS_RA0 0x40
+#define CNTRL_DIS_RA1 0x80
+#define CNTRL_ENA_2ND 0x08
+
+#define CMDTIM 0x52
+#define ARTTIM0 0x53
+#define DRWTIM0 0x54
+#define ARTTIM1 0x55
+#define DRWTIM1 0x56
+#define ARTTIM23 0x57
+#define DIS_RA2 0x04
+#define DIS_RA3 0x08
+#define DRWTIM23 0x58
+#define BRST 0x59
+
+static ide_tuneproc_t cmd640_tune_drive;
+
+/* Interface to access cmd640x registers */
+static void (*put_cmd640_reg)(int reg_no, int val);
+static byte (*get_cmd640_reg)(int reg_no);
+
+enum { none, vlb, pci1, pci2 };
+static int bus_type = none;
+static int cmd640_chip_version;
+static int cmd640_key;
+static int bus_speed; /* MHz */
+
+/*
+ * For some unknown reasons pcibios functions which read and write registers
+ * do not always work with cmd640. We use direct IO instead.
+ */
+
+/* PCI method 1 access */
+
+static void put_cmd640_reg_pci1(int reg_no, int val)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outl_p((reg_no & 0xfc) | cmd640_key, 0xcf8);
+ outb_p(val, (reg_no & 3) + 0xcfc);
+ restore_flags(flags);
+}
+
+static byte get_cmd640_reg_pci1(int reg_no)
+{
+ byte b;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outl_p((reg_no & 0xfc) | cmd640_key, 0xcf8);
+ b = inb_p(0xcfc + (reg_no & 3));
+ restore_flags(flags);
+ return b;
+}
+
+/* PCI method 2 access (from CMD datasheet) */
+
+static void put_cmd640_reg_pci2(int reg_no, int val)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outb_p(0x10, 0xcf8);
+ outb_p(val, cmd640_key + reg_no);
+ outb_p(0, 0xcf8);
+ restore_flags(flags);
+}
+
+static byte get_cmd640_reg_pci2(int reg_no)
+{
+ byte b;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outb_p(0x10, 0xcf8);
+ b = inb_p(cmd640_key + reg_no);
+ outb_p(0, 0xcf8);
+ restore_flags(flags);
+ return b;
+}
+
+/* VLB access */
+
+static void put_cmd640_reg_vlb(int reg_no, int val)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outb_p(reg_no, cmd640_key + 8);
+ outb_p(val, cmd640_key + 0xc);
+ restore_flags(flags);
+}
+
+static byte get_cmd640_reg_vlb(int reg_no)
+{
+ byte b;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outb_p(reg_no, cmd640_key + 8);
+ b = inb_p(cmd640_key + 0xc);
+ restore_flags(flags);
+ return b;
+}
+
+/*
+ * Probe for CMD640x -- pci method 1
+ */
+
+static int probe_for_cmd640_pci1(void)
+{
+ long id;
+ int k;
+
+ for (k = 0x80000000; k <= 0x8000f800; k += 0x800) {
+ outl(k, 0xcf8);
+ id = inl(0xcfc);
+ if (id != 0x06401095)
+ continue;
+ put_cmd640_reg = put_cmd640_reg_pci1;
+ get_cmd640_reg = get_cmd640_reg_pci1;
+ cmd640_key = k;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Probe for CMD640x -- pci method 2
+ */
+
+static int probe_for_cmd640_pci2(void)
+{
+ int i;
+ int v_id;
+ int d_id;
+
+ for (i = 0xc000; i <= 0xcf00; i += 0x100) {
+ outb(0x10, 0xcf8);
+ v_id = inw(i);
+ d_id = inw(i + 2);
+ outb(0, 0xcf8);
+ if (v_id != 0x1095 || d_id != 0x640)
+ continue;
+ put_cmd640_reg = put_cmd640_reg_pci2;
+ get_cmd640_reg = get_cmd640_reg_pci2;
+ cmd640_key = i;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Probe for CMD640x -- vlb
+ */
+
+static int probe_for_cmd640_vlb(void) {
+ byte b;
+
+ outb(CFR, 0x178);
+ b = inb(0x17c);
+ if (b == 0xff || b == 0 || (b & CFR_AT_VESA_078h)) {
+ outb(CFR, 0x78);
+ b = inb(0x7c);
+ if (b == 0xff || b == 0 || !(b & CFR_AT_VESA_078h))
+ return 0;
+ cmd640_key = 0x70;
+ } else {
+ cmd640_key = 0x170;
+ }
+ put_cmd640_reg = put_cmd640_reg_vlb;
+ get_cmd640_reg = get_cmd640_reg_vlb;
+ return 1;
+}
+
+/*
+ * Low level reset for controller, actually it has nothing specific for
+ * CMD640, but I don't know how to use standard reset routine before
+ * we recognized any drives.
+ */
+
+static void cmd640_reset_controller(int iface_no)
+{
+ int retry_count = 600;
+ int base_port = iface_no ? 0x170 : 0x1f0;
+
+ outb_p(4, base_port + 7);
+ udelay(5);
+ outb_p(0, base_port + 7);
+
+ do {
+ udelay(5);
+ retry_count -= 1;
+ } while ((inb_p(base_port + 7) & 0x80) && retry_count);
+
+ if (retry_count == 0)
+ printk("cmd640: failed to reset controller %d\n", iface_no);
+#if 0
+ else
+ printk("cmd640: controller %d reset [%d]\n",
+ iface_no, retry_count);
+#endif
+}
+
+/*
+ * Probe for Cmd640x and initialize it if found
+ */
+
+int ide_probe_for_cmd640x(void)
+{
+ int second_port;
+ byte b;
+
+ if (probe_for_cmd640_pci1()) {
+ bus_type = pci1;
+ } else if (probe_for_cmd640_pci2()) {
+ bus_type = pci2;
+ } else if (cmd640_vlb && probe_for_cmd640_vlb()) {
+ /* May be remove cmd640_vlb at all, and probe in any case */
+ bus_type = vlb;
+ } else {
+ return 0;
+ }
+
+ ide_hwifs[0].serialized = 1; /* ensure this *always* gets set */
+
+#if 0
+ /* Dump initial state of chip registers */
+ for (b = 0; b != 0xff; b++) {
+ printk(" %2x%c", get_cmd640_reg(b),
+ ((b&0xf) == 0xf) ? '\n' : ',');
+ }
+
+#endif
+
+ /*
+ * Undocumented magic. (There is no 0x5b port in specs)
+ */
+
+ put_cmd640_reg(0x5b, 0xbd);
+ if (get_cmd640_reg(0x5b) != 0xbd) {
+ printk("ide: can't initialize cmd640 -- wrong value in 0x5b\n");
+ return 0;
+ }
+ put_cmd640_reg(0x5b, 0);
+
+ /*
+ * Documented magic.
+ */
+
+ cmd640_chip_version = get_cmd640_reg(CFR) & CFR_DEVREV;
+ if (cmd640_chip_version == 0) {
+ printk ("ide: wrong CMD640 version -- 0\n");
+ return 0;
+ }
+
+ /*
+ * Setup the most conservative timings for all drives,
+ */
+ put_cmd640_reg(ARTTIM0, 0xc0);
+ put_cmd640_reg(ARTTIM1, 0xc0);
+ put_cmd640_reg(ARTTIM23, 0xcc); /* 0xc0? */
+
+ /*
+ * Do not initialize secondary controller for vlbus
+ */
+ second_port = (bus_type != vlb);
+
+ /*
+ * Set the maximum allowed bus speed (it is safest until we
+ * find how to detect bus speed)
+ * Normally PCI bus runs at 33MHz, but often works overclocked to 40
+ */
+ bus_speed = (bus_type == vlb) ? 50 : 40;
+
+ /*
+ * Setup Control Register
+ */
+ b = get_cmd640_reg(CNTRL);
+
+ if (second_port)
+ b |= CNTRL_ENA_2ND;
+ else
+ b &= ~CNTRL_ENA_2ND;
+
+ /*
+ * Disable readahead for drives at primary interface
+ */
+ b |= (CNTRL_DIS_RA0 | CNTRL_DIS_RA1);
+
+ put_cmd640_reg(CNTRL, b);
+
+ /*
+ * Note that we assume that the first interface is at 0x1f0,
+ * and that the second interface, if enabled, is at 0x170.
+ */
+ ide_hwifs[0].chipset = ide_cmd640;
+ ide_hwifs[0].tuneproc = &cmd640_tune_drive;
+ if (ide_hwifs[0].drives[0].autotune == 0)
+ ide_hwifs[0].drives[0].autotune = 1;
+ if (ide_hwifs[0].drives[1].autotune == 0)
+ ide_hwifs[0].drives[1].autotune = 1;
+
+ /*
+ * Initialize 2nd IDE port, if required
+ */
+ if (second_port) {
+ ide_hwifs[1].chipset = ide_cmd640;
+ ide_hwifs[1].tuneproc = &cmd640_tune_drive;
+ if (ide_hwifs[1].drives[0].autotune == 0)
+ ide_hwifs[1].drives[0].autotune = 1;
+ if (ide_hwifs[1].drives[1].autotune == 0)
+ ide_hwifs[1].drives[1].autotune = 1;
+ /* We reset timings, and disable read-ahead */
+ put_cmd640_reg(ARTTIM23, (DIS_RA2 | DIS_RA3));
+ put_cmd640_reg(DRWTIM23, 0);
+
+ cmd640_reset_controller(1);
+ }
+
+ printk("ide: buggy CMD640%c interface at ",
+ 'A' - 1 + cmd640_chip_version);
+ switch (bus_type) {
+ case vlb :
+ printk("local bus, port 0x%x", cmd640_key);
+ break;
+ case pci1:
+ printk("pci, (0x%x)", cmd640_key);
+ break;
+ case pci2:
+ printk("pci,(access method 2) (0x%x)", cmd640_key);
+ break;
+ }
+
+ /*
+ * Reset interface timings
+ */
+ put_cmd640_reg(CMDTIM, 0);
+
+ printk("\n ... serialized, secondary interface %s\n",
+ second_port ? "enabled" : "disabled");
+
+ return 1;
+}
+
+int cmd640_off(void) {
+ static int a = 0;
+ byte b;
+
+ if (bus_type == none || a == 1)
+ return 0;
+ a = 1;
+ b = get_cmd640_reg(CNTRL);
+ b &= ~CNTRL_ENA_2ND;
+ put_cmd640_reg(CNTRL, b);
+ return 1;
+}
+
+/*
+ * Sets readahead mode for specific drive
+ * in the future it could be called from ioctl
+ */
+
+static void set_readahead_mode(int mode, int if_num, int dr_num)
+{
+ static int masks[2][2] =
+ {
+ {CNTRL_DIS_RA0, CNTRL_DIS_RA1},
+ {DIS_RA2, DIS_RA3}
+ };
+
+ int port = (if_num == 0) ? CNTRL : ARTTIM23;
+ int mask = masks[if_num][dr_num];
+ byte b;
+
+ b = get_cmd640_reg(port);
+ if (mode)
+ b &= ~mask; /* Enable readahead for specific drive */
+ else
+ b |= mask; /* Disable readahed for specific drive */
+ put_cmd640_reg(port, b);
+}
+
+static struct readahead_black_list {
+ const char* name;
+ int mode;
+} drives_ra[] = {
+ { "ST3655A", 0 },
+ { "SAMSUNG", 0 }, /* Be conservative */
+ { NULL, 0 }
+};
+
+static int strmatch(const char* pattern, const char* name) {
+ char c1, c2;
+
+ while (1) {
+ c1 = *pattern++;
+ c2 = *name++;
+ if (c1 == 0) {
+ return 0;
+ }
+ if (c1 != c2)
+ return 1;
+ }
+}
+
+static int known_drive_readahead(char* name) {
+ int i;
+
+ for (i = 0; drives_ra[i].name != NULL; i++) {
+ if (strmatch(drives_ra[i].name, name) == 0) {
+ return drives_ra[i].mode;
+ }
+ }
+ return -1;
+}
+
+static int arttim[4] = {2, 2, 2, 2}; /* Address setup count (in clocks) */
+static int a_count[4] = {1, 1, 1, 1}; /* Active count (encoded) */
+static int r_count[4] = {1, 1, 1, 1}; /* Recovery count (encoded) */
+
+/*
+ * Convert address setup count from number of clocks
+ * to representation used by controller
+ */
+
+inline static int pack_arttim(int clocks)
+{
+ if (clocks <= 2) return 0x40;
+ else if (clocks == 3) return 0x80;
+ else if (clocks == 4) return 0x00;
+ else return 0xc0;
+}
+
+/*
+ * Pack active and recovery counts into single byte representation
+ * used by controller
+ */
+
+inline static int pack_counts(int act_count, int rec_count)
+{
+ return ((act_count & 0x0f)<<4) | (rec_count & 0x0f);
+}
+
+inline int max(int a, int b) { return a > b ? a : b; }
+inline int max4(int *p) { return max(p[0], max(p[1], max(p[2], p[3]))); }
+
+/*
+ * Set timing parameters
+ */
+
+static void cmd640_set_timing(int if_num, int dr_num)
+{
+ int b_reg;
+ int ac, rc, at;
+
+ /*
+ * Set address setup count and drive read/write timing registers.
+ * Primary interface has individual count/timing registers for
+ * each drive. Secondary interface has common set of registers, and
+ * we should set timings for the slowest drive.
+ */
+
+ if (if_num == 0) {
+ b_reg = dr_num ? ARTTIM1 : ARTTIM0;
+ at = arttim[dr_num];
+ ac = a_count[dr_num];
+ rc = r_count[dr_num];
+ } else {
+ b_reg = ARTTIM23;
+ at = max(arttim[2], arttim[3]);
+ ac = max(a_count[2], a_count[3]);
+ rc = max(r_count[2], r_count[3]);
+ }
+
+ put_cmd640_reg(b_reg, pack_arttim(at));
+ put_cmd640_reg(b_reg + 1, pack_counts(ac, rc));
+
+ /*
+ * Update CMDTIM (IDE Command Block Timing Register)
+ */
+
+ ac = max4(r_count);
+ rc = max4(a_count);
+ put_cmd640_reg(CMDTIM, pack_counts(ac, rc));
+}
+
+/*
+ * Standard timings for PIO modes
+ */
+
+static struct pio_timing {
+ int mc_time; /* Minimal cycle time (ns) */
+ int av_time; /* Address valid to DIOR-/DIOW- setup (ns) */
+ int ds_time; /* DIOR data setup (ns) */
+} pio_timings[6] = {
+ { 70, 165, 600 }, /* PIO Mode 0 */
+ { 50, 125, 383 }, /* PIO Mode 1 */
+ { 30, 100, 240 }, /* PIO Mode 2 */
+ { 30, 80, 180 }, /* PIO Mode 3 */
+ { 25, 70, 125 }, /* PIO Mode 4 -- should be 120, not 125 */
+ { 20, 50, 100 } /* PIO Mode ? (nonstandard) */
+};
+
+static void cmd640_timings_to_clocks(int mc_time, int av_time, int ds_time,
+ int clock_time, int drv_idx)
+{
+ int a, b;
+
+ arttim[drv_idx] = (mc_time + clock_time - 1)/clock_time;
+
+ a = (av_time + clock_time - 1)/clock_time;
+ if (a < 2)
+ a = 2;
+ b = (ds_time + clock_time - 1)/clock_time - a;
+ if (b < 2)
+ b = 2;
+ if (b > 0x11) {
+ a += b - 0x11;
+ b = 0x11;
+ }
+ if (a > 0x10)
+ a = 0x10;
+ if (cmd640_chip_version > 1)
+ b -= 1;
+ if (b > 0x10)
+ b = 0x10;
+
+ a_count[drv_idx] = a;
+ r_count[drv_idx] = b;
+}
+
+static void set_pio_mode(int if_num, int drv_num, int mode_num) {
+ int p_base;
+ int i;
+
+ p_base = if_num ? 0x170 : 0x1f0;
+ outb_p(3, p_base + 1);
+ outb_p(mode_num | 8, p_base + 2);
+ outb_p((drv_num | 0xa) << 4, p_base + 6);
+ outb_p(0xef, p_base + 7);
+ for (i = 0; (i < 100) && (inb (p_base + 7) & 0x80); i++)
+ udelay(10000);
+}
+
+/*
+ * Set a specific pio_mode for a drive
+ */
+
+static void cmd640_set_mode(ide_drive_t* drive, int pio_mode) {
+ int interface_number;
+ int drive_number;
+ int clock_time; /* ns */
+ int mc_time, av_time, ds_time;
+
+ interface_number = HWIF(drive)->index;
+ drive_number = drive->select.b.unit;
+ clock_time = 1000/bus_speed;
+
+ mc_time = pio_timings[pio_mode].mc_time;
+ av_time = pio_timings[pio_mode].av_time;
+ ds_time = pio_timings[pio_mode].ds_time;
+
+ cmd640_timings_to_clocks(mc_time, av_time, ds_time, clock_time,
+ interface_number*2 + drive_number);
+ set_pio_mode(interface_number, drive_number, pio_mode);
+ cmd640_set_timing(interface_number, drive_number);
+}
+
+/*
+ * Drive PIO mode "autoconfiguration".
+ * Ideally, this code should *always* call cmd640_set_mode(), but it doesn't.
+ */
+
+static void cmd640_tune_drive(ide_drive_t *drive, byte pio_mode) {
+ int interface_number;
+ int drive_number;
+ int clock_time; /* ns */
+ int max_pio;
+ int mc_time, av_time, ds_time;
+ struct hd_driveid* id;
+ int readahead; /* there is a global named read_ahead */
+
+ if (pio_mode != 255) {
+ cmd640_set_mode(drive, pio_mode);
+ return;
+ }
+
+ interface_number = HWIF(drive)->index;
+ drive_number = drive->select.b.unit;
+ clock_time = 1000/bus_speed;
+ id = drive->id;
+ if ((max_pio = ide_scan_pio_blacklist(id->model)) != -1) {
+ ds_time = pio_timings[max_pio].ds_time;
+ } else {
+ max_pio = id->tPIO;
+ ds_time = pio_timings[max_pio].ds_time;
+ if (id->field_valid & 2) {
+ if ((id->capability & 8) && (id->eide_pio_modes & 7)) {
+ if (id->eide_pio_modes & 4) max_pio = 5;
+ else if (id->eide_pio_modes & 2) max_pio = 4;
+ else max_pio = 3;
+ ds_time = id->eide_pio_iordy;
+ } else {
+ ds_time = id->eide_pio;
+ }
+ if (ds_time == 0)
+ ds_time = pio_timings[max_pio].ds_time;
+ }
+
+ /*
+ * Conservative "downgrade"
+ */
+ if (max_pio < 4 && max_pio != 0) {
+ max_pio -= 1;
+ ds_time = pio_timings[max_pio].ds_time;
+ }
+ }
+ mc_time = pio_timings[max_pio].mc_time;
+ av_time = pio_timings[max_pio].av_time;
+ cmd640_timings_to_clocks(mc_time, av_time, ds_time, clock_time,
+ interface_number*2 + drive_number);
+ set_pio_mode(interface_number, drive_number, max_pio);
+ cmd640_set_timing(interface_number, drive_number);
+
+ /*
+ * Disable (or set) readahead mode
+ */
+
+ readahead = 0;
+ if (cmd640_chip_version > 1) { /* Mmmm.. probably should be > 2 ?? */
+ readahead = known_drive_readahead(id->model);
+ if (readahead == -1)
+ readahead = 1; /* Mmmm.. probably be 0 ?? */
+ set_readahead_mode(readahead, interface_number, drive_number);
+ }
+
+ printk ("Mode and Timing set to PIO%d, Readahead is %s\n",
+ max_pio, readahead ? "enabled" : "disabled");
+}
+
diff --git a/i386/i386at/gpl/linux/block/floppy.c b/i386/i386at/gpl/linux/block/floppy.c
new file mode 100644
index 00000000..ee4a8980
--- /dev/null
+++ b/i386/i386at/gpl/linux/block/floppy.c
@@ -0,0 +1,4100 @@
+/*
+ * linux/kernel/floppy.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1993, 1994 Alain Knaff
+ */
+/*
+ * 02.12.91 - Changed to static variables to indicate need for reset
+ * and recalibrate. This makes some things easier (output_byte reset
+ * checking etc), and means less interrupt jumping in case of errors,
+ * so the code is hopefully easier to understand.
+ */
+
+/*
+ * This file is certainly a mess. I've tried my best to get it working,
+ * but I don't like programming floppies, and I have only one anyway.
+ * Urgel. I should check for more errors, and do more graceful error
+ * recovery. Seems there are problems with several drives. I've tried to
+ * correct them. No promises.
+ */
+
+/*
+ * As with hd.c, all routines within this file can (and will) be called
+ * by interrupts, so extreme caution is needed. A hardware interrupt
+ * handler may not sleep, or a kernel panic will happen. Thus I cannot
+ * call "floppy-on" directly, but have to set a special timer interrupt
+ * etc.
+ */
+
+/*
+ * 28.02.92 - made track-buffering routines, based on the routines written
+ * by entropy@wintermute.wpi.edu (Lawrence Foard). Linus.
+ */
+
+/*
+ * Automatic floppy-detection and formatting written by Werner Almesberger
+ * (almesber@nessie.cs.id.ethz.ch), who also corrected some problems with
+ * the floppy-change signal detection.
+ */
+
+/*
+ * 1992/7/22 -- Hennus Bergman: Added better error reporting, fixed
+ * FDC data overrun bug, added some preliminary stuff for vertical
+ * recording support.
+ *
+ * 1992/9/17: Added DMA allocation & DMA functions. -- hhb.
+ *
+ * TODO: Errors are still not counted properly.
+ */
+
+/* 1992/9/20
+ * Modifications for ``Sector Shifting'' by Rob Hooft (hooft@chem.ruu.nl)
+ * modelled after the freeware MS/DOS program fdformat/88 V1.8 by
+ * Christoph H. Hochst\"atter.
+ * I have fixed the shift values to the ones I always use. Maybe a new
+ * ioctl() should be created to be able to modify them.
+ * There is a bug in the driver that makes it impossible to format a
+ * floppy as the first thing after bootup.
+ */
+
+/*
+ * 1993/4/29 -- Linus -- cleaned up the timer handling in the kernel, and
+ * this helped the floppy driver as well. Much cleaner, and still seems to
+ * work.
+ */
+
+/* 1994/6/24 --bbroad-- added the floppy table entries and made
+ * minor modifications to allow 2.88 floppies to be run.
+ */
+
+/* 1994/7/13 -- Paul Vojta -- modified the probing code to allow three or more
+ * disk types.
+ */
+
+/*
+ * 1994/8/8 -- Alain Knaff -- Switched to fdpatch driver: Support for bigger
+ * format bug fixes, but unfortunately some new bugs too...
+ */
+
+/* 1994/9/17 -- Koen Holtman -- added logging of physical floppy write
+ * errors to allow safe writing by specialized programs.
+ */
+
+/* 1995/4/24 -- Dan Fandrich -- added support for Commodore 1581 3.5" disks
+ * by defining bit 1 of the "stretch" parameter to mean put sectors on the
+ * opposite side of the disk, leaving the sector IDs alone (i.e. Commodore's
+ * drives are "upside-down").
+ */
+
+/*
+ * 1995/8/26 -- Andreas Busse -- added Mips support.
+ */
+
+/*
+ * 1995/10/18 -- Ralf Baechle -- Portability cleanup; move machine dependend
+ * features to asm/floppy.h.
+ */
+
+
+#define FLOPPY_SANITY_CHECK
+#undef FLOPPY_SILENT_DCL_CLEAR
+
+#define REALLY_SLOW_IO
+
+#define DEBUGT 2
+#define DCL_DEBUG /* debug disk change line */
+
+/* do print messages for unexpected interrupts */
+static int print_unex=1;
+#include <linux/utsname.h>
+#include <linux/module.h>
+
+/* the following is the mask of allowed drives. By default units 2 and
+ * 3 of both floppy controllers are disabled, because switching on the
+ * motor of these drives causes system hangs on some PCI computers. drive
+ * 0 is the low bit (0x1), and drive 7 is the high bit (0x80). Bits are on if
+ * a drive is allowed. */
+static int FLOPPY_IRQ=6;
+static int FLOPPY_DMA=2;
+static int allowed_drive_mask = 0x33;
+
+
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/tqueue.h>
+#define FDPATCHES
+#include <linux/fdreg.h>
+
+
+#include <linux/fd.h>
+
+
+#define OLDFDRAWCMD 0x020d /* send a raw command to the fdc */
+
+struct old_floppy_raw_cmd {
+ void *data;
+ long length;
+
+ unsigned char rate;
+ unsigned char flags;
+ unsigned char cmd_count;
+ unsigned char cmd[9];
+ unsigned char reply_count;
+ unsigned char reply[7];
+ int track;
+};
+
+#include <linux/errno.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/delay.h>
+#include <linux/mc146818rtc.h> /* CMOS defines */
+#include <linux/ioport.h>
+
+#include <asm/dma.h>
+#include <asm/floppy.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/segment.h>
+
+#define MAJOR_NR FLOPPY_MAJOR
+
+#include <linux/blk.h>
+
+
+/* Dma Memory related stuff */
+
+/* Pure 2^n version of get_order */
+static inline int __get_order (int size)
+{
+ int order;
+
+#ifdef _ASM_IO_H2
+ __asm__ __volatile__("bsr %1,%0"
+ : "=r" (order)
+ : "r" (size / PAGE_SIZE));
+#else
+ for (order = 0; order < NR_MEM_LISTS; ++order)
+ if (size <= (PAGE_SIZE << order))
+ return order;
+#endif
+ return NR_MEM_LISTS;
+}
+
+static unsigned long dma_mem_alloc(int size)
+{
+ int order = __get_order(size);
+
+ if (order >= NR_MEM_LISTS)
+ return(0);
+ return __get_dma_pages(GFP_KERNEL,order);
+}
+
+/* End dma memory related stuff */
+
+static unsigned int fake_change = 0;
+static int initialising=1;
+
+static inline int TYPE(kdev_t x) {
+ return (MINOR(x)>>2) & 0x1f;
+}
+static inline int DRIVE(kdev_t x) {
+ return (MINOR(x)&0x03) | ((MINOR(x)&0x80) >> 5);
+}
+#define ITYPE(x) (((x)>>2) & 0x1f)
+#define TOMINOR(x) ((x & 3) | ((x & 4) << 5))
+#define UNIT(x) ((x) & 0x03) /* drive on fdc */
+#define FDC(x) (((x) & 0x04) >> 2) /* fdc of drive */
+#define REVDRIVE(fdc, unit) ((unit) + ((fdc) << 2))
+ /* reverse mapping from unit and fdc to drive */
+#define DP (&drive_params[current_drive])
+#define DRS (&drive_state[current_drive])
+#define DRWE (&write_errors[current_drive])
+#define FDCS (&fdc_state[fdc])
+#define CLEARF(x) (clear_bit(x##_BIT, &DRS->flags))
+#define SETF(x) (set_bit(x##_BIT, &DRS->flags))
+#define TESTF(x) (test_bit(x##_BIT, &DRS->flags))
+
+#define UDP (&drive_params[drive])
+#define UDRS (&drive_state[drive])
+#define UDRWE (&write_errors[drive])
+#define UFDCS (&fdc_state[FDC(drive)])
+#define UCLEARF(x) (clear_bit(x##_BIT, &UDRS->flags))
+#define USETF(x) (set_bit(x##_BIT, &UDRS->flags))
+#define UTESTF(x) (test_bit(x##_BIT, &UDRS->flags))
+
+#define DPRINT(x) printk(DEVICE_NAME "%d: " x,current_drive)
+
+#define DPRINT1(x,x1) printk(DEVICE_NAME "%d: " x,current_drive,(x1))
+
+#define DPRINT2(x,x1,x2) printk(DEVICE_NAME "%d: " x,current_drive,(x1),(x2))
+
+#define DPRINT3(x,x1,x2,x3) printk(DEVICE_NAME "%d: " x,current_drive,(x1),(x2),(x3))
+
+#define PH_HEAD(floppy,head) (((((floppy)->stretch & 2) >>1) ^ head) << 2)
+#define STRETCH(floppy) ((floppy)->stretch & FD_STRETCH)
+
+#define CLEARSTRUCT(x) memset((x), 0, sizeof(*(x)))
+
+/* read/write */
+#define COMMAND raw_cmd->cmd[0]
+#define DR_SELECT raw_cmd->cmd[1]
+#define TRACK raw_cmd->cmd[2]
+#define HEAD raw_cmd->cmd[3]
+#define SECTOR raw_cmd->cmd[4]
+#define SIZECODE raw_cmd->cmd[5]
+#define SECT_PER_TRACK raw_cmd->cmd[6]
+#define GAP raw_cmd->cmd[7]
+#define SIZECODE2 raw_cmd->cmd[8]
+#define NR_RW 9
+
+/* format */
+#define F_SIZECODE raw_cmd->cmd[2]
+#define F_SECT_PER_TRACK raw_cmd->cmd[3]
+#define F_GAP raw_cmd->cmd[4]
+#define F_FILL raw_cmd->cmd[5]
+#define NR_F 6
+
+/*
+ * Maximum disk size (in kilobytes). This default is used whenever the
+ * current disk size is unknown.
+ * [Now it is rather a minimum]
+ */
+#define MAX_DISK_SIZE 2 /* 3984*/
+
+#define K_64 0x10000 /* 64KB */
+
+/*
+ * globals used by 'result()'
+ */
+#define MAX_REPLIES 17
+static unsigned char reply_buffer[MAX_REPLIES];
+static int inr; /* size of reply buffer, when called from interrupt */
+#define ST0 (reply_buffer[0])
+#define ST1 (reply_buffer[1])
+#define ST2 (reply_buffer[2])
+#define ST3 (reply_buffer[0]) /* result of GETSTATUS */
+#define R_TRACK (reply_buffer[3])
+#define R_HEAD (reply_buffer[4])
+#define R_SECTOR (reply_buffer[5])
+#define R_SIZECODE (reply_buffer[6])
+
+#define SEL_DLY (2*HZ/100)
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+/*
+ * this struct defines the different floppy drive types.
+ */
+static struct {
+ struct floppy_drive_params params;
+ const char *name; /* name printed while booting */
+} default_drive_params[]= {
+/* NOTE: the time values in jiffies should be in msec!
+ CMOS drive type
+ | Maximum data rate supported by drive type
+ | | Head load time, msec
+ | | | Head unload time, msec (not used)
+ | | | | Step rate interval, usec
+ | | | | | Time needed for spinup time (jiffies)
+ | | | | | | Timeout for spinning down (jiffies)
+ | | | | | | | Spindown offset (where disk stops)
+ | | | | | | | | Select delay
+ | | | | | | | | | RPS
+ | | | | | | | | | | Max number of tracks
+ | | | | | | | | | | | Interrupt timeout
+ | | | | | | | | | | | | Max nonintlv. sectors
+ | | | | | | | | | | | | | -Max Errors- flags */
+{{0, 500, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 80, 3*HZ, 20, {3,1,2,0,2}, 0,
+ 0, { 7, 4, 8, 2, 1, 5, 3,10}, 3*HZ/2, 0 }, "unknown" },
+
+{{1, 300, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 40, 3*HZ, 17, {3,1,2,0,2}, 0,
+ 0, { 1, 0, 0, 0, 0, 0, 0, 0}, 3*HZ/2, 1 }, "360K PC" }, /*5 1/4 360 KB PC*/
+
+{{2, 500, 16, 16, 6000, 4*HZ/10, 3*HZ, 14, SEL_DLY, 6, 83, 3*HZ, 17, {3,1,2,0,2}, 0,
+ 0, { 2, 5, 6,23,10,20,11, 0}, 3*HZ/2, 2 }, "1.2M" }, /*5 1/4 HD AT*/
+
+{{3, 250, 16, 16, 3000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
+ 0, { 4,22,21,30, 3, 0, 0, 0}, 3*HZ/2, 4 }, "720k" }, /*3 1/2 DD*/
+
+{{4, 500, 16, 16, 4000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
+ 0, { 7, 4,25,22,31,21,29,11}, 3*HZ/2, 7 }, "1.44M" }, /*3 1/2 HD*/
+
+{{5, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
+ 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M AMI BIOS" }, /*3 1/2 ED*/
+
+{{6, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
+ 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M" } /*3 1/2 ED*/
+/* | --autodetected formats--- | | |
+ * read_track | | Name printed when booting
+ * | Native format
+ * Frequency of disk change checks */
+};
+
+static struct floppy_drive_params drive_params[N_DRIVE];
+static struct floppy_drive_struct drive_state[N_DRIVE];
+static struct floppy_write_errors write_errors[N_DRIVE];
+static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
+
+/*
+ * This struct defines the different floppy types.
+ *
+ * Bit 0 of 'stretch' tells if the tracks need to be doubled for some
+ * types (e.g. 360kB diskette in 1.2MB drive, etc.). Bit 1 of 'stretch'
+ * tells if the disk is in Commodore 1581 format, which means side 0 sectors
+ * are located on side 1 of the disk but with a side 0 ID, and vice-versa.
+ * This is the same as the Sharp MZ-80 5.25" CP/M disk format, except that the
+ * 1581's logical side 0 is on physical side 1, whereas the Sharp's logical
+ * side 0 is on physical side 0 (but with the misnamed sector IDs).
+ * 'stretch' should probably be renamed to something more general, like
+ * 'options'. Other parameters should be self-explanatory (see also
+ * setfdprm(8)).
+ */
+static struct floppy_struct floppy_type[32] = {
+ { 0, 0,0, 0,0,0x00,0x00,0x00,0x00,NULL }, /* 0 no testing */
+ { 720, 9,2,40,0,0x2A,0x02,0xDF,0x50,"d360" }, /* 1 360KB PC */
+ { 2400,15,2,80,0,0x1B,0x00,0xDF,0x54,"h1200" }, /* 2 1.2MB AT */
+ { 720, 9,1,80,0,0x2A,0x02,0xDF,0x50,"D360" }, /* 3 360KB SS 3.5" */
+ { 1440, 9,2,80,0,0x2A,0x02,0xDF,0x50,"D720" }, /* 4 720KB 3.5" */
+ { 720, 9,2,40,1,0x23,0x01,0xDF,0x50,"h360" }, /* 5 360KB AT */
+ { 1440, 9,2,80,0,0x23,0x01,0xDF,0x50,"h720" }, /* 6 720KB AT */
+ { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,"H1440" }, /* 7 1.44MB 3.5" */
+ { 5760,36,2,80,0,0x1B,0x43,0xAF,0x54,"E2880" }, /* 8 2.88MB 3.5" */
+ { 5760,36,2,80,0,0x1B,0x43,0xAF,0x54,"CompaQ"}, /* 9 2.88MB 3.5" */
+
+ { 2880,18,2,80,0,0x25,0x00,0xDF,0x02,"h1440" }, /* 10 1.44MB 5.25" */
+ { 3360,21,2,80,0,0x1C,0x00,0xCF,0x0C,"H1680" }, /* 11 1.68MB 3.5" */
+ { 820,10,2,41,1,0x25,0x01,0xDF,0x2E,"h410" }, /* 12 410KB 5.25" */
+ { 1640,10,2,82,0,0x25,0x02,0xDF,0x2E,"H820" }, /* 13 820KB 3.5" */
+ { 2952,18,2,82,0,0x25,0x00,0xDF,0x02,"h1476" }, /* 14 1.48MB 5.25" */
+ { 3444,21,2,82,0,0x25,0x00,0xDF,0x0C,"H1722" }, /* 15 1.72MB 3.5" */
+ { 840,10,2,42,1,0x25,0x01,0xDF,0x2E,"h420" }, /* 16 420KB 5.25" */
+ { 1660,10,2,83,0,0x25,0x02,0xDF,0x2E,"H830" }, /* 17 830KB 3.5" */
+ { 2988,18,2,83,0,0x25,0x00,0xDF,0x02,"h1494" }, /* 18 1.49MB 5.25" */
+ { 3486,21,2,83,0,0x25,0x00,0xDF,0x0C,"H1743" }, /* 19 1.74 MB 3.5" */
+
+ { 1760,11,2,80,0,0x1C,0x09,0xCF,0x00,"h880" }, /* 20 880KB 5.25" */
+ { 2080,13,2,80,0,0x1C,0x01,0xCF,0x00,"D1040" }, /* 21 1.04MB 3.5" */
+ { 2240,14,2,80,0,0x1C,0x19,0xCF,0x00,"D1120" }, /* 22 1.12MB 3.5" */
+ { 3200,20,2,80,0,0x1C,0x20,0xCF,0x2C,"h1600" }, /* 23 1.6MB 5.25" */
+ { 3520,22,2,80,0,0x1C,0x08,0xCF,0x2e,"H1760" }, /* 24 1.76MB 3.5" */
+ { 3840,24,2,80,0,0x1C,0x20,0xCF,0x00,"H1920" }, /* 25 1.92MB 3.5" */
+ { 6400,40,2,80,0,0x25,0x5B,0xCF,0x00,"E3200" }, /* 26 3.20MB 3.5" */
+ { 7040,44,2,80,0,0x25,0x5B,0xCF,0x00,"E3520" }, /* 27 3.52MB 3.5" */
+ { 7680,48,2,80,0,0x25,0x63,0xCF,0x00,"E3840" }, /* 28 3.84MB 3.5" */
+
+ { 3680,23,2,80,0,0x1C,0x10,0xCF,0x00,"H1840" }, /* 29 1.84MB 3.5" */
+ { 1600,10,2,80,0,0x25,0x02,0xDF,0x2E,"D800" }, /* 30 800KB 3.5" */
+ { 3200,20,2,80,0,0x1C,0x00,0xCF,0x2C,"H1600" }, /* 31 1.6MB 3.5" */
+};
+
+#define NUMBER(x) (sizeof(x) / sizeof(*(x)))
+#define SECTSIZE (_FD_SECTSIZE(*floppy))
+
+/* Auto-detection: Disk type used until the next media change occurs. */
+static struct floppy_struct *current_type[N_DRIVE] = {
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL
+};
+
+/*
+ * User-provided type information. current_type points to
+ * the respective entry of this array.
+ */
+static struct floppy_struct user_params[N_DRIVE];
+
+static int floppy_sizes[256];
+static int floppy_blocksizes[256] = { 0, };
+
+/*
+ * The driver is trying to determine the correct media format
+ * while probing is set. rw_interrupt() clears it after a
+ * successful access.
+ */
+static int probing = 0;
+
+/* Synchronization of FDC access. */
+#define FD_COMMAND_NONE -1
+#define FD_COMMAND_ERROR 2
+#define FD_COMMAND_OKAY 3
+
+static volatile int command_status = FD_COMMAND_NONE, fdc_busy = 0;
+static struct wait_queue *fdc_wait = NULL, *command_done = NULL;
+#ifdef MACH
+extern int issig (void);
+#define NO_SIGNAL (! issig () || ! interruptible)
+#else
+#define NO_SIGNAL (!(current->signal & ~current->blocked) || !interruptible)
+#endif
+#define CALL(x) if ((x) == -EINTR) return -EINTR
+#define ECALL(x) if ((ret = (x))) return ret;
+#define _WAIT(x,i) CALL(ret=wait_til_done((x),i))
+#define WAIT(x) _WAIT((x),interruptible)
+#define IWAIT(x) _WAIT((x),1)
+
+/* Errors during formatting are counted here. */
+static int format_errors;
+
+/* Format request descriptor. */
+static struct format_descr format_req;
+
+/*
+ * Rate is 0 for 500kb/s, 1 for 300kbps, 2 for 250kbps
+ * Spec1 is 0xSH, where S is stepping rate (F=1ms, E=2ms, D=3ms etc),
+ * H is head unload time (1=16ms, 2=32ms, etc)
+ */
+
+/*
+ * Track buffer
+ * Because these are written to by the DMA controller, they must
+ * not contain a 64k byte boundary crossing, or data will be
+ * corrupted/lost. Alignment of these is enforced in boot/head.S.
+ * Note that you must not change the sizes below without updating head.S.
+ */
+static char *floppy_track_buffer=0;
+static int max_buffer_sectors=0;
+
+static int *errors;
+typedef void (*done_f)(int);
+static struct cont_t {
+ void (*interrupt)(void); /* this is called after the interrupt of the
+ * main command */
+ void (*redo)(void); /* this is called to retry the operation */
+ void (*error)(void); /* this is called to tally an error */
+ done_f done; /* this is called to say if the operation has
+ * succeeded/failed */
+} *cont=NULL;
+
+static void floppy_ready(void);
+static void floppy_start(void);
+static void process_fd_request(void);
+static void recalibrate_floppy(void);
+static void floppy_shutdown(void);
+
+static int floppy_grab_irq_and_dma(void);
+static void floppy_release_irq_and_dma(void);
+
+/*
+ * The "reset" variable should be tested whenever an interrupt is scheduled,
+ * after the commands have been sent. This is to ensure that the driver doesn't
+ * get wedged when the interrupt doesn't come because of a failed command.
+ * reset doesn't need to be tested before sending commands, because
+ * output_byte is automatically disabled when reset is set.
+ */
+#define CHECK_RESET { if (FDCS->reset){ reset_fdc(); return; } }
+static void reset_fdc(void);
+
+/*
+ * These are global variables, as that's the easiest way to give
+ * information to interrupts. They are the data used for the current
+ * request.
+ */
+#define NO_TRACK -1
+#define NEED_1_RECAL -2
+#define NEED_2_RECAL -3
+
+/* */
+static int usage_count = 0;
+
+
+/* buffer related variables */
+static int buffer_track = -1;
+static int buffer_drive = -1;
+static int buffer_min = -1;
+static int buffer_max = -1;
+
+/* fdc related variables, should end up in a struct */
+static struct floppy_fdc_state fdc_state[N_FDC];
+static int fdc; /* current fdc */
+
+static struct floppy_struct * floppy = floppy_type;
+static unsigned char current_drive = 0;
+static long current_count_sectors = 0;
+static unsigned char sector_t; /* sector in track */
+
+#ifdef DEBUGT
+static long unsigned debugtimer;
+#endif
+
+/*
+ * Debugging
+ * =========
+ */
+static inline void set_debugt(void)
+{
+#ifdef DEBUGT
+ debugtimer = jiffies;
+#endif
+}
+
+static inline void debugt(const char *message)
+{
+#ifdef DEBUGT
+ if (DP->flags & DEBUGT)
+ printk("%s dtime=%lu\n", message, jiffies-debugtimer);
+#endif
+}
+
+typedef void (*timeout_fn)(unsigned long);
+static struct timer_list fd_timeout ={ NULL, NULL, 0, 0,
+ (timeout_fn) floppy_shutdown };
+
+static const char *timeout_message;
+
+#ifdef FLOPPY_SANITY_CHECK
+static void is_alive(const char *message)
+{
+ /* this routine checks whether the floppy driver is "alive" */
+ if (fdc_busy && command_status < 2 && !fd_timeout.prev){
+ DPRINT1("timeout handler died: %s\n",message);
+ }
+}
+#endif
+
+#ifdef FLOPPY_SANITY_CHECK
+
+#define OLOGSIZE 20
+
+static void (*lasthandler)(void) = NULL;
+static int interruptjiffies=0;
+static int resultjiffies=0;
+static int resultsize=0;
+static int lastredo=0;
+
+static struct output_log {
+ unsigned char data;
+ unsigned char status;
+ unsigned long jiffies;
+} output_log[OLOGSIZE];
+
+static int output_log_pos=0;
+#endif
+
+#define CURRENTD -1
+#define MAXTIMEOUT -2
+
+static void reschedule_timeout(int drive, const char *message, int marg)
+{
+ if (drive == CURRENTD)
+ drive = current_drive;
+ del_timer(&fd_timeout);
+ if (drive < 0 || drive > N_DRIVE) {
+ fd_timeout.expires = jiffies + 20*HZ;
+ drive=0;
+ } else
+ fd_timeout.expires = jiffies + UDP->timeout;
+ add_timer(&fd_timeout);
+ if (UDP->flags & FD_DEBUG){
+ DPRINT("reschedule timeout ");
+ printk(message, marg);
+ printk("\n");
+ }
+ timeout_message = message;
+}
+
+static int maximum(int a, int b)
+{
+ if(a > b)
+ return a;
+ else
+ return b;
+}
+#define INFBOUND(a,b) (a)=maximum((a),(b));
+
+static int minimum(int a, int b)
+{
+ if(a < b)
+ return a;
+ else
+ return b;
+}
+#define SUPBOUND(a,b) (a)=minimum((a),(b));
+
+
+/*
+ * Bottom half floppy driver.
+ * ==========================
+ *
+ * This part of the file contains the code talking directly to the hardware,
+ * and also the main service loop (seek-configure-spinup-command)
+ */
+
+/*
+ * disk change.
+ * This routine is responsible for maintaining the FD_DISK_CHANGE flag,
+ * and the last_checked date.
+ *
+ * last_checked is the date of the last check which showed 'no disk change'
+ * FD_DISK_CHANGE is set under two conditions:
+ * 1. The floppy has been changed after some i/o to that floppy already
+ * took place.
+ * 2. No floppy disk is in the drive. This is done in order to ensure that
+ * requests are quickly flushed in case there is no disk in the drive. It
+ * follows that FD_DISK_CHANGE can only be cleared if there is a disk in
+ * the drive.
+ *
+ * For 1., maxblock is observed. Maxblock is 0 if no i/o has taken place yet.
+ * For 2., FD_DISK_NEWCHANGE is watched. FD_DISK_NEWCHANGE is cleared on
+ * each seek. If a disk is present, the disk change line should also be
+ * cleared on each seek. Thus, if FD_DISK_NEWCHANGE is clear, but the disk
+ * change line is set, this means either that no disk is in the drive, or
+ * that it has been removed since the last seek.
+ *
+ * This means that we really have a third possibility too:
+ * The floppy has been changed after the last seek.
+ */
+
+static int disk_change(int drive)
+{
+ int fdc=FDC(drive);
+#ifdef FLOPPY_SANITY_CHECK
+ if (jiffies < UDP->select_delay + UDRS->select_date)
+ DPRINT("WARNING disk change called early\n");
+ if (!(FDCS->dor & (0x10 << UNIT(drive))) ||
+ (FDCS->dor & 3) != UNIT(drive) ||
+ fdc != FDC(drive)){
+ DPRINT("probing disk change on unselected drive\n");
+ DPRINT3("drive=%d fdc=%d dor=%x\n",drive, FDC(drive),
+ FDCS->dor);
+ }
+#endif
+
+#ifdef DCL_DEBUG
+ if (UDP->flags & FD_DEBUG){
+ DPRINT1("checking disk change line for drive %d\n",drive);
+ DPRINT1("jiffies=%ld\n", jiffies);
+ DPRINT1("disk change line=%x\n",fd_inb(FD_DIR)&0x80);
+ DPRINT1("flags=%x\n",UDRS->flags);
+ }
+#endif
+ if (UDP->flags & FD_BROKEN_DCL)
+ return UTESTF(FD_DISK_CHANGED);
+ if ((fd_inb(FD_DIR) ^ UDP->flags) & 0x80){
+ USETF(FD_VERIFY); /* verify write protection */
+ if (UDRS->maxblock){
+ /* mark it changed */
+ USETF(FD_DISK_CHANGED);
+
+ /* invalidate its geometry */
+ if (UDRS->keep_data >= 0) {
+ if ((UDP->flags & FTD_MSG) &&
+ current_type[drive] != NULL)
+ DPRINT("Disk type is undefined after "
+ "disk change\n");
+ current_type[drive] = NULL;
+ floppy_sizes[TOMINOR(current_drive)] = MAX_DISK_SIZE;
+ }
+ }
+ /*USETF(FD_DISK_NEWCHANGE);*/
+ return 1;
+ } else {
+ UDRS->last_checked=jiffies;
+ UCLEARF(FD_DISK_NEWCHANGE);
+ }
+ return 0;
+}
+
+static inline int is_selected(int dor, int unit)
+{
+ return ((dor & (0x10 << unit)) && (dor &3) == unit);
+}
+
+static int set_dor(int fdc, char mask, char data)
+{
+ register unsigned char drive, unit, newdor,olddor;
+
+ if (FDCS->address == -1)
+ return -1;
+
+ olddor = FDCS->dor;
+ newdor = (olddor & mask) | data;
+ if (newdor != olddor){
+ unit = olddor & 0x3;
+ if (is_selected(olddor, unit) && !is_selected(newdor,unit)){
+ drive = REVDRIVE(fdc,unit);
+#ifdef DCL_DEBUG
+ if (UDP->flags & FD_DEBUG){
+ DPRINT("calling disk change from set_dor\n");
+ }
+#endif
+ disk_change(drive);
+ }
+ FDCS->dor = newdor;
+ fd_outb(newdor, FD_DOR);
+
+ unit = newdor & 0x3;
+ if (!is_selected(olddor, unit) && is_selected(newdor,unit)){
+ drive = REVDRIVE(fdc,unit);
+ UDRS->select_date = jiffies;
+ }
+ }
+ if (newdor & 0xf0)
+ floppy_grab_irq_and_dma();
+ if (olddor & 0xf0)
+ floppy_release_irq_and_dma();
+ return olddor;
+}
+
+static void twaddle(void)
+{
+ if (DP->select_delay)
+ return;
+ fd_outb(FDCS->dor & ~(0x10<<UNIT(current_drive)),FD_DOR);
+ fd_outb(FDCS->dor, FD_DOR);
+ DRS->select_date = jiffies;
+}
+
+/* reset all driver information about the current fdc. This is needed after
+ * a reset, and after a raw command. */
+static void reset_fdc_info(int mode)
+{
+ int drive;
+
+ FDCS->spec1 = FDCS->spec2 = -1;
+ FDCS->need_configure = 1;
+ FDCS->perp_mode = 1;
+ FDCS->rawcmd = 0;
+ for (drive = 0; drive < N_DRIVE; drive++)
+ if (FDC(drive) == fdc &&
+ (mode || UDRS->track != NEED_1_RECAL))
+ UDRS->track = NEED_2_RECAL;
+}
+
+/* selects the fdc and drive, and enables the fdc's input/dma. */
+static void set_fdc(int drive)
+{
+ if (drive >= 0 && drive < N_DRIVE){
+ fdc = FDC(drive);
+ current_drive = drive;
+ }
+ if (fdc != 1 && fdc != 0) {
+ printk("bad fdc value\n");
+ return;
+ }
+ set_dor(fdc,~0,8);
+ set_dor(1-fdc, ~8, 0);
+ if (FDCS->rawcmd == 2)
+ reset_fdc_info(1);
+ if (fd_inb(FD_STATUS) != STATUS_READY)
+ FDCS->reset = 1;
+}
+
+/* locks the driver */
+static int lock_fdc(int drive, int interruptible)
+{
+ if (!usage_count){
+ printk("trying to lock fdc while usage count=0\n");
+ return -1;
+ }
+ floppy_grab_irq_and_dma();
+ cli();
+ while (fdc_busy && NO_SIGNAL)
+ interruptible_sleep_on(&fdc_wait);
+ if (fdc_busy){
+ sti();
+ return -EINTR;
+ }
+ fdc_busy = 1;
+ sti();
+ command_status = FD_COMMAND_NONE;
+ reschedule_timeout(drive, "lock fdc", 0);
+ set_fdc(drive);
+ return 0;
+}
+
+#define LOCK_FDC(drive,interruptible) \
+if (lock_fdc(drive,interruptible)) return -EINTR;
+
+
+/* unlocks the driver */
+static inline void unlock_fdc(void)
+{
+ raw_cmd = 0;
+ if (!fdc_busy)
+ DPRINT("FDC access conflict!\n");
+
+ if (DEVICE_INTR)
+ DPRINT1("device interrupt still active at FDC release: %p!\n",
+ DEVICE_INTR);
+ command_status = FD_COMMAND_NONE;
+ del_timer(&fd_timeout);
+ cont = NULL;
+ fdc_busy = 0;
+ floppy_release_irq_and_dma();
+ wake_up(&fdc_wait);
+}
+
+/* switches the motor off after a given timeout */
+static void motor_off_callback(unsigned long nr)
+{
+ unsigned char mask = ~(0x10 << UNIT(nr));
+
+ set_dor(FDC(nr), mask, 0);
+}
+
+static struct timer_list motor_off_timer[N_DRIVE] = {
+ { NULL, NULL, 0, 0, motor_off_callback },
+ { NULL, NULL, 0, 1, motor_off_callback },
+ { NULL, NULL, 0, 2, motor_off_callback },
+ { NULL, NULL, 0, 3, motor_off_callback },
+ { NULL, NULL, 0, 4, motor_off_callback },
+ { NULL, NULL, 0, 5, motor_off_callback },
+ { NULL, NULL, 0, 6, motor_off_callback },
+ { NULL, NULL, 0, 7, motor_off_callback }
+};
+
+/* schedules motor off */
+static void floppy_off(unsigned int drive)
+{
+ unsigned long volatile delta;
+ register int fdc=FDC(drive);
+
+ if (!(FDCS->dor & (0x10 << UNIT(drive))))
+ return;
+
+ del_timer(motor_off_timer+drive);
+
+ /* make spindle stop in a position which minimizes spinup time
+ * next time */
+ if (UDP->rps){
+ delta = jiffies - UDRS->first_read_date + HZ -
+ UDP->spindown_offset;
+ delta = ((delta * UDP->rps) % HZ) / UDP->rps;
+ motor_off_timer[drive].expires = jiffies + UDP->spindown - delta;
+ }
+ add_timer(motor_off_timer+drive);
+}
+
+/*
+ * cycle through all N_DRIVE floppy drives, for disk change testing.
+ * stopping at current drive. This is done before any long operation, to
+ * be sure to have up to date disk change information.
+ */
+static void scandrives(void)
+{
+ int i, drive, saved_drive;
+
+ if (DP->select_delay)
+ return;
+
+ saved_drive = current_drive;
+ for (i=0; i < N_DRIVE; i++){
+ drive = (saved_drive + i + 1) % N_DRIVE;
+ if (UDRS->fd_ref == 0 || UDP->select_delay != 0)
+ continue; /* skip closed drives */
+ set_fdc(drive);
+ if (!(set_dor(fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) &
+ (0x10 << UNIT(drive))))
+ /* switch the motor off again, if it was off to
+ * begin with */
+ set_dor(fdc, ~(0x10 << UNIT(drive)), 0);
+ }
+ set_fdc(saved_drive);
+}
+
+static struct timer_list fd_timer ={ NULL, NULL, 0, 0, 0 };
+
+/* this function makes sure that the disk stays in the drive during the
+ * transfer */
+static void fd_watchdog(void)
+{
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from watchdog\n");
+ }
+#endif
+
+ if (disk_change(current_drive)){
+ DPRINT("disk removed during i/o\n");
+ floppy_shutdown();
+ } else {
+ del_timer(&fd_timer);
+ fd_timer.function = (timeout_fn) fd_watchdog;
+ fd_timer.expires = jiffies + HZ / 10;
+ add_timer(&fd_timer);
+ }
+}
+
+static void main_command_interrupt(void)
+{
+ del_timer(&fd_timer);
+ cont->interrupt();
+}
+
+/* waits for a delay (spinup or select) to pass */
+static int wait_for_completion(int delay, timeout_fn function)
+{
+ if (FDCS->reset){
+ reset_fdc(); /* do the reset during sleep to win time
+ * if we don't need to sleep, it's a good
+ * occasion anyways */
+ return 1;
+ }
+
+ if (jiffies < delay){
+ del_timer(&fd_timer);
+ fd_timer.function = function;
+ fd_timer.expires = delay;
+ add_timer(&fd_timer);
+ return 1;
+ }
+ return 0;
+}
+
+static int hlt_disabled=0;
+static void floppy_disable_hlt(void)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ if (!hlt_disabled){
+ hlt_disabled=1;
+#ifdef HAVE_DISABLE_HLT
+ disable_hlt();
+#endif
+ }
+ restore_flags(flags);
+}
+
+static void floppy_enable_hlt(void)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ if (hlt_disabled){
+ hlt_disabled=0;
+#ifdef HAVE_DISABLE_HLT
+ enable_hlt();
+#endif
+ }
+ restore_flags(flags);
+}
+
+
+static void setup_DMA(void)
+{
+#ifdef FLOPPY_SANITY_CHECK
+ if (raw_cmd->length == 0){
+ int i;
+
+ printk("zero dma transfer size:");
+ for (i=0; i < raw_cmd->cmd_count; i++)
+ printk("%x,", raw_cmd->cmd[i]);
+ printk("\n");
+ cont->done(0);
+ FDCS->reset = 1;
+ return;
+ }
+ if ((long) raw_cmd->kernel_data % 512){
+ printk("non aligned address: %p\n", raw_cmd->kernel_data);
+ cont->done(0);
+ FDCS->reset=1;
+ return;
+ }
+ if (CROSS_64KB(raw_cmd->kernel_data, raw_cmd->length)) {
+ printk("DMA crossing 64-K boundary %p-%p\n",
+ raw_cmd->kernel_data,
+ raw_cmd->kernel_data + raw_cmd->length);
+ cont->done(0);
+ FDCS->reset=1;
+ return;
+ }
+#endif
+ cli();
+ fd_disable_dma();
+ fd_clear_dma_ff();
+ fd_set_dma_mode((raw_cmd->flags & FD_RAW_READ)?
+ DMA_MODE_READ : DMA_MODE_WRITE);
+ fd_set_dma_addr(virt_to_bus(raw_cmd->kernel_data));
+ fd_set_dma_count(raw_cmd->length);
+ fd_enable_dma();
+ sti();
+ floppy_disable_hlt();
+}
+
+/* sends a command byte to the fdc */
+static int output_byte(char byte)
+{
+ int counter;
+ unsigned char status = 0;
+ unsigned char rstatus;
+
+ if (FDCS->reset)
+ return -1;
+ for (counter = 0; counter < 10000 && !FDCS->reset; counter++) {
+ rstatus = fd_inb(FD_STATUS);
+ status = rstatus &(STATUS_READY|STATUS_DIR|STATUS_DMA);
+ if (!(status & STATUS_READY))
+ continue;
+ if (status == STATUS_READY){
+ fd_outb(byte,FD_DATA);
+
+#ifdef FLOPPY_SANITY_CHECK
+ output_log[output_log_pos].data = byte;
+ output_log[output_log_pos].status = rstatus;
+ output_log[output_log_pos].jiffies = jiffies;
+ output_log_pos = (output_log_pos + 1) % OLOGSIZE;
+#endif
+ return 0;
+ } else
+ break;
+ }
+ FDCS->reset = 1;
+ if (!initialising)
+ DPRINT2("Unable to send byte %x to FDC. Status=%x\n",
+ byte, status);
+ return -1;
+}
+#define LAST_OUT(x) if (output_byte(x)){ reset_fdc();return;}
+
+/* gets the response from the fdc */
+static int result(void)
+{
+ int i = 0, counter, status = 0;
+
+ if (FDCS->reset)
+ return -1;
+ for (counter = 0; counter < 10000 && !FDCS->reset; counter++) {
+ status = fd_inb(FD_STATUS)&
+ (STATUS_DIR|STATUS_READY|STATUS_BUSY|STATUS_DMA);
+ if (!(status & STATUS_READY))
+ continue;
+ if (status == STATUS_READY){
+#ifdef FLOPPY_SANITY_CHECK
+ resultjiffies = jiffies;
+ resultsize = i;
+#endif
+ return i;
+ }
+ if (status & STATUS_DMA)
+ break;
+ if (status == (STATUS_DIR|STATUS_READY|STATUS_BUSY)) {
+ if (i >= MAX_REPLIES) {
+ DPRINT("floppy_stat reply overrun\n");
+ break;
+ }
+ reply_buffer[i++] = fd_inb(FD_DATA);
+ }
+ }
+ FDCS->reset = 1;
+ if (!initialising)
+ DPRINT3("Getstatus times out (%x) on fdc %d [%d]\n",
+ status, fdc, i);
+ return -1;
+}
+
+/* Set perpendicular mode as required, based on data rate, if supported.
+ * 82077 Now tested. 1Mbps data rate only possible with 82077-1.
+ */
+static inline void perpendicular_mode(void)
+{
+ unsigned char perp_mode;
+
+ if (raw_cmd->rate & 0x40){
+ switch(raw_cmd->rate & 3){
+ case 0:
+ perp_mode=2;
+ break;
+ case 3:
+ perp_mode=3;
+ break;
+ default:
+ DPRINT("Invalid data rate for perpendicular mode!\n");
+ cont->done(0);
+ FDCS->reset = 1; /* convenient way to return to
+ * redo without to much hassle (deep
+ * stack et al. */
+ return;
+ }
+ } else
+ perp_mode = 0;
+
+ if (FDCS->perp_mode == perp_mode)
+ return;
+ if (FDCS->version >= FDC_82077_ORIG && FDCS->has_fifo) {
+ output_byte(FD_PERPENDICULAR);
+ output_byte(perp_mode);
+ FDCS->perp_mode = perp_mode;
+ } else if (perp_mode) {
+ DPRINT("perpendicular mode not supported by this FDC.\n");
+ }
+} /* perpendicular_mode */
+
+#define NOMINAL_DTR 500
+
+/* Issue a "SPECIFY" command to set the step rate time, head unload time,
+ * head load time, and DMA disable flag to values needed by floppy.
+ *
+ * The value "dtr" is the data transfer rate in Kbps. It is needed
+ * to account for the data rate-based scaling done by the 82072 and 82077
+ * FDC types. This parameter is ignored for other types of FDCs (i.e.
+ * 8272a).
+ *
+ * Note that changing the data transfer rate has a (probably deleterious)
+ * effect on the parameters subject to scaling for 82072/82077 FDCs, so
+ * fdc_specify is called again after each data transfer rate
+ * change.
+ *
+ * srt: 1000 to 16000 in microseconds
+ * hut: 16 to 240 milliseconds
+ * hlt: 2 to 254 milliseconds
+ *
+ * These values are rounded up to the next highest available delay time.
+ */
+static void fdc_specify(void)
+{
+ unsigned char spec1, spec2;
+ int srt, hlt, hut;
+ unsigned long dtr = NOMINAL_DTR;
+ unsigned long scale_dtr = NOMINAL_DTR;
+ int hlt_max_code = 0x7f;
+ int hut_max_code = 0xf;
+
+ if (FDCS->need_configure && FDCS->has_fifo) {
+ if (FDCS->reset)
+ return;
+ /* Turn on FIFO for 82077-class FDC (improves performance) */
+ /* TODO: lock this in via LOCK during initialization */
+ output_byte(FD_CONFIGURE);
+ output_byte(0);
+ output_byte(0x2A); /* FIFO on, polling off, 10 byte threshold */
+ output_byte(0); /* precompensation from track 0 upwards */
+ if (FDCS->reset){
+ FDCS->has_fifo=0;
+ return;
+ }
+ FDCS->need_configure = 0;
+ /*DPRINT("FIFO enabled\n");*/
+ }
+
+ switch (raw_cmd->rate & 0x03) {
+ case 3:
+ dtr = 1000;
+ break;
+ case 1:
+ dtr = 300;
+ break;
+ case 2:
+ dtr = 250;
+ break;
+ }
+
+ if (FDCS->version >= FDC_82072) {
+ scale_dtr = dtr;
+ hlt_max_code = 0x00; /* 0==256msec*dtr0/dtr (not linear!) */
+ hut_max_code = 0x0; /* 0==256msec*dtr0/dtr (not linear!) */
+ }
+
+ /* Convert step rate from microseconds to milliseconds and 4 bits */
+ srt = 16 - (DP->srt*scale_dtr/1000 + NOMINAL_DTR - 1)/NOMINAL_DTR;
+ SUPBOUND(srt, 0xf);
+ INFBOUND(srt, 0);
+
+ hlt = (DP->hlt*scale_dtr/2 + NOMINAL_DTR - 1)/NOMINAL_DTR;
+ if (hlt < 0x01)
+ hlt = 0x01;
+ else if (hlt > 0x7f)
+ hlt = hlt_max_code;
+
+ hut = (DP->hut*scale_dtr/16 + NOMINAL_DTR - 1)/NOMINAL_DTR;
+ if (hut < 0x1)
+ hut = 0x1;
+ else if (hut > 0xf)
+ hut = hut_max_code;
+
+ spec1 = (srt << 4) | hut;
+ spec2 = (hlt << 1);
+
+ /* If these parameters did not change, just return with success */
+ if (FDCS->spec1 != spec1 || FDCS->spec2 != spec2) {
+ /* Go ahead and set spec1 and spec2 */
+ output_byte(FD_SPECIFY);
+ output_byte(FDCS->spec1 = spec1);
+ output_byte(FDCS->spec2 = spec2);
+ }
+} /* fdc_specify */
+
+/* Set the FDC's data transfer rate on behalf of the specified drive.
+ * NOTE: with 82072/82077 FDCs, changing the data rate requires a reissue
+ * of the specify command (i.e. using the fdc_specify function).
+ */
+static int fdc_dtr(void)
+{
+ /* If data rate not already set to desired value, set it. */
+ if ((raw_cmd->rate & 3) == FDCS->dtr)
+ return 0;
+
+ /* Set dtr */
+ fd_outb(raw_cmd->rate & 3, FD_DCR);
+
+ /* TODO: some FDC/drive combinations (C&T 82C711 with TEAC 1.2MB)
+ * need a stabilization period of several milliseconds to be
+ * enforced after data rate changes before R/W operations.
+ * Pause 5 msec to avoid trouble. (Needs to be 2 jiffies)
+ */
+ FDCS->dtr = raw_cmd->rate & 3;
+ return(wait_for_completion(jiffies+2*HZ/100,
+ (timeout_fn) floppy_ready));
+} /* fdc_dtr */
+
+static void tell_sector(void)
+{
+ printk(": track %d, head %d, sector %d, size %d",
+ R_TRACK, R_HEAD, R_SECTOR, R_SIZECODE);
+} /* tell_sector */
+
+
+/*
+ * Ok, this error interpreting routine is called after a
+ * DMA read/write has succeeded
+ * or failed, so we check the results, and copy any buffers.
+ * hhb: Added better error reporting.
+ * ak: Made this into a separate routine.
+ */
+static int interpret_errors(void)
+{
+ char bad;
+
+ if (inr!=7) {
+ DPRINT("-- FDC reply error");
+ FDCS->reset = 1;
+ return 1;
+ }
+
+ /* check IC to find cause of interrupt */
+ switch (ST0 & ST0_INTR) {
+ case 0x40: /* error occurred during command execution */
+ bad = 1;
+ if (ST1 & ST1_WP) {
+ DPRINT("Drive is write protected\n");
+ CLEARF(FD_DISK_WRITABLE);
+ cont->done(0);
+ bad = 2;
+ } else if (ST1 & ST1_ND) {
+ SETF(FD_NEED_TWADDLE);
+ } else if (ST1 & ST1_OR) {
+ if (DP->flags & FTD_MSG)
+ DPRINT("Over/Underrun - retrying\n");
+ bad = 0;
+ }else if (*errors >= DP->max_errors.reporting){
+ DPRINT("");
+ if (ST0 & ST0_ECE) {
+ printk("Recalibrate failed!");
+ } else if (ST2 & ST2_CRC) {
+ printk("data CRC error");
+ tell_sector();
+ } else if (ST1 & ST1_CRC) {
+ printk("CRC error");
+ tell_sector();
+ } else if ((ST1 & (ST1_MAM|ST1_ND)) || (ST2 & ST2_MAM)) {
+ if (!probing) {
+ printk("sector not found");
+ tell_sector();
+ } else
+ printk("probe failed...");
+ } else if (ST2 & ST2_WC) { /* seek error */
+ printk("wrong cylinder");
+ } else if (ST2 & ST2_BC) { /* cylinder marked as bad */
+ printk("bad cylinder");
+ } else {
+ printk("unknown error. ST[0..2] are: 0x%x 0x%x 0x%x", ST0, ST1, ST2);
+ tell_sector();
+ }
+ printk("\n");
+
+ }
+ if (ST2 & ST2_WC || ST2 & ST2_BC)
+ /* wrong cylinder => recal */
+ DRS->track = NEED_2_RECAL;
+ return bad;
+ case 0x80: /* invalid command given */
+ DPRINT("Invalid FDC command given!\n");
+ cont->done(0);
+ return 2;
+ case 0xc0:
+ DPRINT("Abnormal termination caused by polling\n");
+ cont->error();
+ return 2;
+ default: /* (0) Normal command termination */
+ return 0;
+ }
+}
+
+/*
+ * This routine is called when everything should be correctly set up
+ * for the transfer (ie floppy motor is on, the correct floppy is
+ * selected, and the head is sitting on the right track).
+ */
+static void setup_rw_floppy(void)
+{
+ int i,ready_date,r, flags,dflags;
+ timeout_fn function;
+
+ flags = raw_cmd->flags;
+ if (flags & (FD_RAW_READ | FD_RAW_WRITE))
+ flags |= FD_RAW_INTR;
+
+ if ((flags & FD_RAW_SPIN) && !(flags & FD_RAW_NO_MOTOR)){
+ ready_date = DRS->spinup_date + DP->spinup;
+ /* If spinup will take a long time, rerun scandrives
+ * again just before spinup completion. Beware that
+ * after scandrives, we must again wait for selection.
+ */
+ if (ready_date > jiffies + DP->select_delay){
+ ready_date -= DP->select_delay;
+ function = (timeout_fn) floppy_start;
+ } else
+ function = (timeout_fn) setup_rw_floppy;
+
+ /* wait until the floppy is spinning fast enough */
+ if (wait_for_completion(ready_date,function))
+ return;
+ }
+ dflags = DRS->flags;
+
+ if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE))
+ setup_DMA();
+
+ if (flags & FD_RAW_INTR)
+ SET_INTR(main_command_interrupt);
+
+ r=0;
+ for (i=0; i< raw_cmd->cmd_count; i++)
+ r|=output_byte(raw_cmd->cmd[i]);
+
+#ifdef DEBUGT
+ debugt("rw_command: ");
+#endif
+ if (r){
+ reset_fdc();
+ return;
+ }
+
+ if (!(flags & FD_RAW_INTR)){
+ inr = result();
+ cont->interrupt();
+ } else if (flags & FD_RAW_NEED_DISK)
+ fd_watchdog();
+}
+
+static int blind_seek;
+
+/*
+ * This is the routine called after every seek (or recalibrate) interrupt
+ * from the floppy controller.
+ */
+static void seek_interrupt(void)
+{
+#ifdef DEBUGT
+ debugt("seek interrupt:");
+#endif
+ if (inr != 2 || (ST0 & 0xF8) != 0x20) {
+ DPRINT("seek failed\n");
+ DRS->track = NEED_2_RECAL;
+ cont->error();
+ cont->redo();
+ return;
+ }
+ if (DRS->track >= 0 && DRS->track != ST1 && !blind_seek){
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("clearing NEWCHANGE flag because of effective seek\n");
+ DPRINT1("jiffies=%ld\n", jiffies);
+ }
+#endif
+ CLEARF(FD_DISK_NEWCHANGE); /* effective seek */
+ DRS->select_date = jiffies;
+ }
+ DRS->track = ST1;
+ floppy_ready();
+}
+
+static void check_wp(void)
+{
+ if (TESTF(FD_VERIFY)) {
+ /* check write protection */
+ output_byte(FD_GETSTATUS);
+ output_byte(UNIT(current_drive));
+ if (result() != 1){
+ FDCS->reset = 1;
+ return;
+ }
+ CLEARF(FD_VERIFY);
+ CLEARF(FD_NEED_TWADDLE);
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("checking whether disk is write protected\n");
+ DPRINT1("wp=%x\n",ST3 & 0x40);
+ }
+#endif
+ if (!(ST3 & 0x40))
+ SETF(FD_DISK_WRITABLE);
+ else
+ CLEARF(FD_DISK_WRITABLE);
+ }
+}
+
+static void seek_floppy(void)
+{
+ int track;
+
+ blind_seek=0;
+
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from seek\n");
+ }
+#endif
+
+ if (!TESTF(FD_DISK_NEWCHANGE) &&
+ disk_change(current_drive) &&
+ (raw_cmd->flags & FD_RAW_NEED_DISK)){
+ /* the media changed flag should be cleared after the seek.
+ * If it isn't, this means that there is really no disk in
+ * the drive.
+ */
+ SETF(FD_DISK_CHANGED);
+ cont->done(0);
+ cont->redo();
+ return;
+ }
+ if (DRS->track <= NEED_1_RECAL){
+ recalibrate_floppy();
+ return;
+ } else if (TESTF(FD_DISK_NEWCHANGE) &&
+ (raw_cmd->flags & FD_RAW_NEED_DISK) &&
+ (DRS->track <= NO_TRACK || DRS->track == raw_cmd->track)) {
+ /* we seek to clear the media-changed condition. Does anybody
+ * know a more elegant way, which works on all drives? */
+ if (raw_cmd->track)
+ track = raw_cmd->track - 1;
+ else {
+ if (DP->flags & FD_SILENT_DCL_CLEAR){
+ set_dor(fdc, ~(0x10 << UNIT(current_drive)), 0);
+ blind_seek = 1;
+ raw_cmd->flags |= FD_RAW_NEED_SEEK;
+ }
+ track = 1;
+ }
+ } else {
+ check_wp();
+ if (raw_cmd->track != DRS->track &&
+ (raw_cmd->flags & FD_RAW_NEED_SEEK))
+ track = raw_cmd->track;
+ else {
+ setup_rw_floppy();
+ return;
+ }
+ }
+
+ SET_INTR(seek_interrupt);
+ output_byte(FD_SEEK);
+ output_byte(UNIT(current_drive));
+ LAST_OUT(track);
+#ifdef DEBUGT
+ debugt("seek command:");
+#endif
+}
+
+static void recal_interrupt(void)
+{
+#ifdef DEBUGT
+ debugt("recal interrupt:");
+#endif
+ if (inr !=2)
+ FDCS->reset = 1;
+ else if (ST0 & ST0_ECE) {
+ switch(DRS->track){
+ case NEED_1_RECAL:
+#ifdef DEBUGT
+ debugt("recal interrupt need 1 recal:");
+#endif
+ /* after a second recalibrate, we still haven't
+ * reached track 0. Probably no drive. Raise an
+ * error, as failing immediately might upset
+ * computers possessed by the Devil :-) */
+ cont->error();
+ cont->redo();
+ return;
+ case NEED_2_RECAL:
+#ifdef DEBUGT
+ debugt("recal interrupt need 2 recal:");
+#endif
+ /* If we already did a recalibrate,
+ * and we are not at track 0, this
+ * means we have moved. (The only way
+ * not to move at recalibration is to
+ * be already at track 0.) Clear the
+ * new change flag */
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("clearing NEWCHANGE flag because of second recalibrate\n");
+ }
+#endif
+
+ CLEARF(FD_DISK_NEWCHANGE);
+ DRS->select_date = jiffies;
+ /* fall through */
+ default:
+#ifdef DEBUGT
+ debugt("recal interrupt default:");
+#endif
+ /* Recalibrate moves the head by at
+ * most 80 steps. If after one
+ * recalibrate we don't have reached
+ * track 0, this might mean that we
+ * started beyond track 80. Try
+ * again. */
+ DRS->track = NEED_1_RECAL;
+ break;
+ }
+ } else
+ DRS->track = ST1;
+ floppy_ready();
+}
+
+/*
+ * Unexpected interrupt - Print as much debugging info as we can...
+ * All bets are off...
+ */
+static void unexpected_floppy_interrupt(void)
+{
+ int i;
+ if (initialising)
+ return;
+ if (print_unex){
+ DPRINT("unexpected interrupt\n");
+ if (inr >= 0)
+ for (i=0; i<inr; i++)
+ printk("%d %x\n", i, reply_buffer[i]);
+ }
+ while(1){
+ output_byte(FD_SENSEI);
+ inr=result();
+ if (inr != 2)
+ break;
+ if (print_unex){
+ printk("sensei\n");
+ for (i=0; i<inr; i++)
+ printk("%d %x\n", i, reply_buffer[i]);
+ }
+ }
+ FDCS->reset = 1;
+}
+
+static struct tq_struct floppy_tq =
+{ 0, 0, (void *) (void *) unexpected_floppy_interrupt, 0 };
+
+/* interrupt handler */
+static void floppy_interrupt(int irq, struct pt_regs * regs)
+{
+ void (*handler)(void) = DEVICE_INTR;
+
+ lasthandler = handler;
+ interruptjiffies = jiffies;
+
+ floppy_enable_hlt();
+ CLEAR_INTR;
+ if (fdc >= N_FDC || FDCS->address == -1){
+ /* we don't even know which FDC is the culprit */
+ printk("DOR0=%x\n", fdc_state[0].dor);
+ printk("floppy interrupt on bizarre fdc %d\n",fdc);
+ printk("handler=%p\n", handler);
+ is_alive("bizarre fdc");
+ return;
+ }
+ inr = result();
+ if (!handler){
+ unexpected_floppy_interrupt();
+ is_alive("unexpected");
+ return;
+ }
+ if (inr == 0){
+ do {
+ output_byte(FD_SENSEI);
+ inr = result();
+ } while ((ST0 & 0x83) != UNIT(current_drive) && inr == 2);
+ }
+ floppy_tq.routine = (void *)(void *) handler;
+ queue_task_irq(&floppy_tq, &tq_timer);
+ is_alive("normal interrupt end");
+}
+
+static void recalibrate_floppy(void)
+{
+#ifdef DEBUGT
+ debugt("recalibrate floppy:");
+#endif
+ SET_INTR(recal_interrupt);
+ output_byte(FD_RECALIBRATE);
+ LAST_OUT(UNIT(current_drive));
+}
+
+/*
+ * Must do 4 FD_SENSEIs after reset because of ``drive polling''.
+ */
+static void reset_interrupt(void)
+{
+#ifdef DEBUGT
+ debugt("reset interrupt:");
+#endif
+ /* fdc_specify(); reprogram fdc */
+ result(); /* get the status ready for set_fdc */
+ if (FDCS->reset) {
+ printk("reset set in interrupt, calling %p\n", cont->error);
+ cont->error(); /* a reset just after a reset. BAD! */
+ }
+ cont->redo();
+}
+
+/*
+ * reset is done by pulling bit 2 of DOR low for a while (old FDC's),
+ * or by setting the self clearing bit 7 of STATUS (newer FDC's)
+ */
+static void reset_fdc(void)
+{
+ SET_INTR(reset_interrupt);
+ FDCS->reset = 0;
+ reset_fdc_info(0);
+ if (FDCS->version >= FDC_82077)
+ fd_outb(0x80 | (FDCS->dtr &3), FD_STATUS);
+ else {
+ fd_outb(FDCS->dor & ~0x04, FD_DOR);
+ udelay(FD_RESET_DELAY);
+ outb(FDCS->dor, FD_DOR);
+ }
+}
+
+static void empty(void)
+{
+}
+
+void show_floppy(void)
+{
+ int i;
+
+ printk("\n");
+ printk("floppy driver state\n");
+ printk("-------------------\n");
+ printk("now=%ld last interrupt=%d last called handler=%p\n",
+ jiffies, interruptjiffies, lasthandler);
+
+
+#ifdef FLOPPY_SANITY_CHECK
+ printk("timeout_message=%s\n", timeout_message);
+ printk("last output bytes:\n");
+ for (i=0; i < OLOGSIZE; i++)
+ printk("%2x %2x %ld\n",
+ output_log[(i+output_log_pos) % OLOGSIZE].data,
+ output_log[(i+output_log_pos) % OLOGSIZE].status,
+ output_log[(i+output_log_pos) % OLOGSIZE].jiffies);
+ printk("last result at %d\n", resultjiffies);
+ printk("last redo_fd_request at %d\n", lastredo);
+ for (i=0; i<resultsize; i++){
+ printk("%2x ", reply_buffer[i]);
+ }
+ printk("\n");
+#endif
+
+ printk("status=%x\n", fd_inb(FD_STATUS));
+ printk("fdc_busy=%d\n", fdc_busy);
+ if (DEVICE_INTR)
+ printk("DEVICE_INTR=%p\n", DEVICE_INTR);
+ if (floppy_tq.sync)
+ printk("floppy_tq.routine=%p\n", floppy_tq.routine);
+ if (fd_timer.prev)
+ printk("fd_timer.function=%p\n", fd_timer.function);
+ if (fd_timeout.prev){
+ printk("timer_table=%p\n",fd_timeout.function);
+ printk("expires=%ld\n",fd_timeout.expires-jiffies);
+ printk("now=%ld\n",jiffies);
+ }
+ printk("cont=%p\n", cont);
+ printk("CURRENT=%p\n", CURRENT);
+ printk("command_status=%d\n", command_status);
+ printk("\n");
+}
+
+static void floppy_shutdown(void)
+{
+ if (!initialising)
+ show_floppy();
+ CLEAR_INTR;
+ floppy_tq.routine = (void *)(void *) empty;
+ del_timer(&fd_timer);
+ sti();
+
+ floppy_enable_hlt();
+ fd_disable_dma();
+ /* avoid dma going to a random drive after shutdown */
+
+ if (!initialising)
+ DPRINT("floppy timeout\n");
+ FDCS->reset = 1;
+ if (cont){
+ cont->done(0);
+ cont->redo(); /* this will recall reset when needed */
+ } else {
+ printk("no cont in shutdown!\n");
+ process_fd_request();
+ }
+ is_alive("floppy shutdown");
+}
+/*typedef void (*timeout_fn)(unsigned long);*/
+
+/* start motor, check media-changed condition and write protection */
+static int start_motor(void (*function)(void) )
+{
+ int mask, data;
+
+ mask = 0xfc;
+ data = UNIT(current_drive);
+ if (!(raw_cmd->flags & FD_RAW_NO_MOTOR)){
+ if (!(FDCS->dor & (0x10 << UNIT(current_drive)))){
+ set_debugt();
+ /* no read since this drive is running */
+ DRS->first_read_date = 0;
+ /* note motor start time if motor is not yet running */
+ DRS->spinup_date = jiffies;
+ data |= (0x10 << UNIT(current_drive));
+ }
+ } else
+ if (FDCS->dor & (0x10 << UNIT(current_drive)))
+ mask &= ~(0x10 << UNIT(current_drive));
+
+ /* starts motor and selects floppy */
+ del_timer(motor_off_timer + current_drive);
+ set_dor(fdc, mask, data);
+
+ /* wait_for_completion also schedules reset if needed. */
+ return(wait_for_completion(DRS->select_date+DP->select_delay,
+ (timeout_fn) function));
+}
+
+static void floppy_ready(void)
+{
+ CHECK_RESET;
+ if (start_motor(floppy_ready)) return;
+ if (fdc_dtr()) return;
+
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from floppy_ready\n");
+ }
+#endif
+
+ if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) &&
+ disk_change(current_drive) &&
+ !DP->select_delay)
+ twaddle(); /* this clears the dcl on certain drive/controller
+ * combinations */
+
+ if (raw_cmd->flags & (FD_RAW_NEED_SEEK | FD_RAW_NEED_DISK)){
+ perpendicular_mode();
+ fdc_specify(); /* must be done here because of hut, hlt ... */
+ seek_floppy();
+ } else
+ setup_rw_floppy();
+}
+
+static void floppy_start(void)
+{
+ reschedule_timeout(CURRENTD, "floppy start", 0);
+
+ scandrives();
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("setting NEWCHANGE in floppy_start\n");
+ }
+#endif
+ SETF(FD_DISK_NEWCHANGE);
+ floppy_ready();
+}
+
+/*
+ * ========================================================================
+ * here ends the bottom half. Exported routines are:
+ * floppy_start, floppy_off, floppy_ready, lock_fdc, unlock_fdc, set_fdc,
+ * start_motor, reset_fdc, reset_fdc_info, interpret_errors.
+ * Initialisation also uses output_byte, result, set_dor, floppy_interrupt
+ * and set_dor.
+ * ========================================================================
+ */
+/*
+ * General purpose continuations.
+ * ==============================
+ */
+
+static void do_wakeup(void)
+{
+ reschedule_timeout(MAXTIMEOUT, "do wakeup", 0);
+ cont = 0;
+ command_status += 2;
+ wake_up(&command_done);
+}
+
+static struct cont_t wakeup_cont={
+ empty,
+ do_wakeup,
+ empty,
+ (done_f)empty
+};
+
+static int wait_til_done(void (*handler)(void), int interruptible)
+{
+ int ret;
+
+ floppy_tq.routine = (void *)(void *) handler;
+ queue_task(&floppy_tq, &tq_timer);
+
+ cli();
+ while(command_status < 2 && NO_SIGNAL){
+ is_alive("wait_til_done");
+ if (interruptible)
+ interruptible_sleep_on(&command_done);
+ else
+ sleep_on(&command_done);
+ }
+ if (command_status < 2){
+ floppy_shutdown();
+ sti();
+ process_fd_request();
+ return -EINTR;
+ }
+ sti();
+
+ if (FDCS->reset)
+ command_status = FD_COMMAND_ERROR;
+ if (command_status == FD_COMMAND_OKAY)
+ ret=0;
+ else
+ ret=-EIO;
+ command_status = FD_COMMAND_NONE;
+ return ret;
+}
+
+static void generic_done(int result)
+{
+ command_status = result;
+ cont = &wakeup_cont;
+}
+
+static void generic_success(void)
+{
+ cont->done(1);
+}
+
+static void generic_failure(void)
+{
+ cont->done(0);
+}
+
+static void success_and_wakeup(void)
+{
+ generic_success();
+ cont->redo();
+}
+
+
+/*
+ * formatting and rw support.
+ * ==========================
+ */
+
+static int next_valid_format(void)
+{
+ int probed_format;
+
+ probed_format = DRS->probed_format;
+ while(1){
+ if (probed_format >= 8 ||
+ !DP->autodetect[probed_format]){
+ DRS->probed_format = 0;
+ return 1;
+ }
+ if (floppy_type[DP->autodetect[probed_format]].sect){
+ DRS->probed_format = probed_format;
+ return 0;
+ }
+ probed_format++;
+ }
+}
+
+static void bad_flp_intr(void)
+{
+ if (probing){
+ DRS->probed_format++;
+ if (!next_valid_format())
+ return;
+ }
+ (*errors)++;
+ INFBOUND(DRWE->badness, *errors);
+ if (*errors > DP->max_errors.abort)
+ cont->done(0);
+ if (*errors > DP->max_errors.reset)
+ FDCS->reset = 1;
+ else if (*errors > DP->max_errors.recal)
+ DRS->track = NEED_2_RECAL;
+}
+
+static void set_floppy(kdev_t device)
+{
+ if (TYPE(device))
+ floppy = TYPE(device) + floppy_type;
+ else
+ floppy = current_type[ DRIVE(device) ];
+}
+
+/*
+ * formatting and support.
+ * =======================
+ */
+static void format_interrupt(void)
+{
+ switch (interpret_errors()){
+ case 1:
+ cont->error();
+ case 2:
+ break;
+ case 0:
+ cont->done(1);
+ }
+ cont->redo();
+}
+
+#define CODE2SIZE (ssize = ((1 << SIZECODE) + 3) >> 2)
+#define FM_MODE(x,y) ((y) & ~(((x)->rate & 0x80) >>1))
+#define CT(x) ((x) | 0x40)
+static void setup_format_params(int track)
+{
+ struct fparm {
+ unsigned char track,head,sect,size;
+ } *here = (struct fparm *)floppy_track_buffer;
+ int il,n;
+ int count,head_shift,track_shift;
+
+ raw_cmd = &default_raw_cmd;
+ raw_cmd->track = track;
+
+ raw_cmd->flags = FD_RAW_WRITE | FD_RAW_INTR | FD_RAW_SPIN |
+ /*FD_RAW_NEED_DISK |*/ FD_RAW_NEED_SEEK;
+ raw_cmd->rate = floppy->rate & 0x43;
+ raw_cmd->cmd_count = NR_F;
+ COMMAND = FM_MODE(floppy,FD_FORMAT);
+ DR_SELECT = UNIT(current_drive) + PH_HEAD(floppy,format_req.head);
+ F_SIZECODE = FD_SIZECODE(floppy);
+ F_SECT_PER_TRACK = floppy->sect << 2 >> F_SIZECODE;
+ F_GAP = floppy->fmt_gap;
+ F_FILL = FD_FILL_BYTE;
+
+ raw_cmd->kernel_data = floppy_track_buffer;
+ raw_cmd->length = 4 * F_SECT_PER_TRACK;
+
+ /* allow for about 30ms for data transport per track */
+ head_shift = (F_SECT_PER_TRACK + 5) / 6;
+
+ /* a ``cylinder'' is two tracks plus a little stepping time */
+ track_shift = 2 * head_shift + 3;
+
+ /* position of logical sector 1 on this track */
+ n = (track_shift * format_req.track + head_shift * format_req.head)
+ % F_SECT_PER_TRACK;
+
+ /* determine interleave */
+ il = 1;
+ if (floppy->sect > DP->interleave_sect && F_SIZECODE == 2)
+ il++;
+
+ /* initialize field */
+ for (count = 0; count < F_SECT_PER_TRACK; ++count) {
+ here[count].track = format_req.track;
+ here[count].head = format_req.head;
+ here[count].sect = 0;
+ here[count].size = F_SIZECODE;
+ }
+ /* place logical sectors */
+ for (count = 1; count <= F_SECT_PER_TRACK; ++count) {
+ here[n].sect = count;
+ n = (n+il) % F_SECT_PER_TRACK;
+ if (here[n].sect) { /* sector busy, find next free sector */
+ ++n;
+ if (n>= F_SECT_PER_TRACK) {
+ n-=F_SECT_PER_TRACK;
+ while (here[n].sect) ++n;
+ }
+ }
+ }
+}
+
+static void redo_format(void)
+{
+ buffer_track = -1;
+ setup_format_params(format_req.track << STRETCH(floppy));
+ floppy_start();
+#ifdef DEBUGT
+ debugt("queue format request");
+#endif
+}
+
+static struct cont_t format_cont={
+ format_interrupt,
+ redo_format,
+ bad_flp_intr,
+ generic_done };
+
+static int do_format(kdev_t device, struct format_descr *tmp_format_req)
+{
+ int ret;
+ int drive=DRIVE(device);
+
+ LOCK_FDC(drive,1);
+ set_floppy(device);
+ if (!floppy ||
+ floppy->track > DP->tracks ||
+ tmp_format_req->track >= floppy->track ||
+ tmp_format_req->head >= floppy->head ||
+ (floppy->sect << 2) % (1 << FD_SIZECODE(floppy)) ||
+ !floppy->fmt_gap) {
+ process_fd_request();
+ return -EINVAL;
+ }
+ format_req = *tmp_format_req;
+ format_errors = 0;
+ cont = &format_cont;
+ errors = &format_errors;
+ IWAIT(redo_format);
+ process_fd_request();
+ return ret;
+}
+
+/*
+ * Buffer read/write and support
+ * =============================
+ */
+
+/* new request_done. Can handle physical sectors which are smaller than a
+ * logical buffer */
+static void request_done(int uptodate)
+{
+ int block;
+
+ probing = 0;
+ reschedule_timeout(MAXTIMEOUT, "request done %d", uptodate);
+
+ if (!CURRENT){
+ DPRINT("request list destroyed in floppy request done\n");
+ return;
+ }
+ if (uptodate){
+ /* maintain values for invalidation on geometry
+ * change */
+ block = current_count_sectors + CURRENT->sector;
+ INFBOUND(DRS->maxblock, block);
+ if (block > floppy->sect)
+ DRS->maxtrack = 1;
+
+ /* unlock chained buffers */
+ while (current_count_sectors && CURRENT &&
+ current_count_sectors >= CURRENT->current_nr_sectors){
+ current_count_sectors -= CURRENT->current_nr_sectors;
+ CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
+ CURRENT->sector += CURRENT->current_nr_sectors;
+ end_request(1);
+ }
+ if (current_count_sectors && CURRENT){
+ /* "unlock" last subsector */
+ CURRENT->buffer += current_count_sectors <<9;
+ CURRENT->current_nr_sectors -= current_count_sectors;
+ CURRENT->nr_sectors -= current_count_sectors;
+ CURRENT->sector += current_count_sectors;
+ return;
+ }
+
+ if (current_count_sectors && !CURRENT)
+ DPRINT("request list destroyed in floppy request done\n");
+
+ } else {
+ if (CURRENT->cmd == WRITE) {
+ /* record write error information */
+ DRWE->write_errors++;
+ if (DRWE->write_errors == 1) {
+ DRWE->first_error_sector = CURRENT->sector;
+ DRWE->first_error_generation = DRS->generation;
+ }
+ DRWE->last_error_sector = CURRENT->sector;
+ DRWE->last_error_generation = DRS->generation;
+ }
+ end_request(0);
+ }
+}
+
+/* Interrupt handler evaluating the result of the r/w operation */
+static void rw_interrupt(void)
+{
+ int nr_sectors, ssize;
+
+ if (!DRS->first_read_date)
+ DRS->first_read_date = jiffies;
+
+ nr_sectors = 0;
+ CODE2SIZE;
+ nr_sectors = ((R_TRACK-TRACK)*floppy->head+R_HEAD-HEAD) *
+ floppy->sect + ((R_SECTOR-SECTOR) << SIZECODE >> 2) -
+ (sector_t % floppy->sect) % ssize;
+
+#ifdef FLOPPY_SANITY_CHECK
+ if (nr_sectors > current_count_sectors + ssize -
+ (current_count_sectors + sector_t) % ssize +
+ sector_t % ssize){
+ DPRINT2("long rw: %x instead of %lx\n",
+ nr_sectors, current_count_sectors);
+ printk("rs=%d s=%d\n", R_SECTOR, SECTOR);
+ printk("rh=%d h=%d\n", R_HEAD, HEAD);
+ printk("rt=%d t=%d\n", R_TRACK, TRACK);
+ printk("spt=%d st=%d ss=%d\n", SECT_PER_TRACK,
+ sector_t, ssize);
+ }
+#endif
+ INFBOUND(nr_sectors,0);
+ SUPBOUND(current_count_sectors, nr_sectors);
+
+ switch (interpret_errors()){
+ case 2:
+ cont->redo();
+ return;
+ case 1:
+ if (!current_count_sectors){
+ cont->error();
+ cont->redo();
+ return;
+ }
+ break;
+ case 0:
+ if (!current_count_sectors){
+ cont->redo();
+ return;
+ }
+ current_type[current_drive] = floppy;
+ floppy_sizes[TOMINOR(current_drive) ]= floppy->size>>1;
+ break;
+ }
+
+ if (probing) {
+ if (DP->flags & FTD_MSG)
+ DPRINT2("Auto-detected floppy type %s in fd%d\n",
+ floppy->name,current_drive);
+ current_type[current_drive] = floppy;
+ floppy_sizes[TOMINOR(current_drive)] = floppy->size >> 1;
+ probing = 0;
+ }
+
+ if (CT(COMMAND) != FD_READ ||
+ raw_cmd->kernel_data == CURRENT->buffer){
+ /* transfer directly from buffer */
+ cont->done(1);
+ } else if (CT(COMMAND) == FD_READ){
+ buffer_track = raw_cmd->track;
+ buffer_drive = current_drive;
+ INFBOUND(buffer_max, nr_sectors + sector_t);
+ }
+ cont->redo();
+}
+
+/* Compute maximal contiguous buffer size. */
+static int buffer_chain_size(void)
+{
+ struct buffer_head *bh;
+ int size;
+ char *base;
+
+ base = CURRENT->buffer;
+ size = CURRENT->current_nr_sectors << 9;
+ bh = CURRENT->bh;
+
+ if (bh){
+ bh = bh->b_reqnext;
+ while (bh && bh->b_data == base + size){
+ size += bh->b_size;
+ bh = bh->b_reqnext;
+ }
+ }
+ return size >> 9;
+}
+
+/* Compute the maximal transfer size */
+static int transfer_size(int ssize, int max_sector, int max_size)
+{
+ SUPBOUND(max_sector, sector_t + max_size);
+
+ /* alignment */
+ max_sector -= (max_sector % floppy->sect) % ssize;
+
+ /* transfer size, beginning not aligned */
+ current_count_sectors = max_sector - sector_t ;
+
+ return max_sector;
+}
+
+/*
+ * Move data from/to the track buffer to/from the buffer cache.
+ */
+static void copy_buffer(int ssize, int max_sector, int max_sector_2)
+{
+ int remaining; /* number of transferred 512-byte sectors */
+ struct buffer_head *bh;
+ char *buffer, *dma_buffer;
+ int size;
+
+ max_sector = transfer_size(ssize,
+ minimum(max_sector, max_sector_2),
+ CURRENT->nr_sectors);
+
+ if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
+ buffer_max > sector_t + CURRENT->nr_sectors)
+ current_count_sectors = minimum(buffer_max - sector_t,
+ CURRENT->nr_sectors);
+
+ remaining = current_count_sectors << 9;
+#ifdef FLOPPY_SANITY_CHECK
+ if ((remaining >> 9) > CURRENT->nr_sectors &&
+ CT(COMMAND) == FD_WRITE){
+ DPRINT("in copy buffer\n");
+ printk("current_count_sectors=%ld\n", current_count_sectors);
+ printk("remaining=%d\n", remaining >> 9);
+ printk("CURRENT->nr_sectors=%ld\n",CURRENT->nr_sectors);
+ printk("CURRENT->current_nr_sectors=%ld\n",
+ CURRENT->current_nr_sectors);
+ printk("max_sector=%d\n", max_sector);
+ printk("ssize=%d\n", ssize);
+ }
+#endif
+
+ buffer_max = maximum(max_sector, buffer_max);
+
+ dma_buffer = floppy_track_buffer + ((sector_t - buffer_min) << 9);
+
+ bh = CURRENT->bh;
+ size = CURRENT->current_nr_sectors << 9;
+ buffer = CURRENT->buffer;
+
+ while (remaining > 0){
+ SUPBOUND(size, remaining);
+#ifdef FLOPPY_SANITY_CHECK
+ if (dma_buffer + size >
+ floppy_track_buffer + (max_buffer_sectors << 10) ||
+ dma_buffer < floppy_track_buffer){
+ DPRINT1("buffer overrun in copy buffer %d\n",
+ (int) ((floppy_track_buffer - dma_buffer) >>9));
+ printk("sector_t=%d buffer_min=%d\n",
+ sector_t, buffer_min);
+ printk("current_count_sectors=%ld\n",
+ current_count_sectors);
+ if (CT(COMMAND) == FD_READ)
+ printk("read\n");
+ if (CT(COMMAND) == FD_READ)
+ printk("write\n");
+ break;
+ }
+ if (((unsigned long)buffer) % 512)
+ DPRINT1("%p buffer not aligned\n", buffer);
+#endif
+ if (CT(COMMAND) == FD_READ) {
+ fd_cacheflush(dma_buffer, size);
+ memcpy(buffer, dma_buffer, size);
+ } else {
+ memcpy(dma_buffer, buffer, size);
+ fd_cacheflush(dma_buffer, size);
+ }
+ remaining -= size;
+ if (!remaining)
+ break;
+
+ dma_buffer += size;
+ bh = bh->b_reqnext;
+#ifdef FLOPPY_SANITY_CHECK
+ if (!bh){
+ DPRINT("bh=null in copy buffer after copy\n");
+ break;
+ }
+#endif
+ size = bh->b_size;
+ buffer = bh->b_data;
+ }
+#ifdef FLOPPY_SANITY_CHECK
+ if (remaining){
+ if (remaining > 0)
+ max_sector -= remaining >> 9;
+ DPRINT1("weirdness: remaining %d\n", remaining>>9);
+ }
+#endif
+}
+
+/*
+ * Formulate a read/write request.
+ * this routine decides where to load the data (directly to buffer, or to
+ * tmp floppy area), how much data to load (the size of the buffer, the whole
+ * track, or a single sector)
+ * All floppy_track_buffer handling goes in here. If we ever add track buffer
+ * allocation on the fly, it should be done here. No other part should need
+ * modification.
+ */
+
+static int make_raw_rw_request(void)
+{
+ int aligned_sector_t;
+ int max_sector, max_size, tracksize, ssize;
+
+ set_fdc(DRIVE(CURRENT->rq_dev));
+
+ raw_cmd = &default_raw_cmd;
+ raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK |
+ FD_RAW_NEED_SEEK;
+ raw_cmd->cmd_count = NR_RW;
+ if (CURRENT->cmd == READ){
+ raw_cmd->flags |= FD_RAW_READ;
+ COMMAND = FM_MODE(floppy,FD_READ);
+ } else if (CURRENT->cmd == WRITE){
+ raw_cmd->flags |= FD_RAW_WRITE;
+ COMMAND = FM_MODE(floppy,FD_WRITE);
+ } else {
+ DPRINT("make_raw_rw_request: unknown command\n");
+ return 0;
+ }
+
+ max_sector = floppy->sect * floppy->head;
+
+ TRACK = CURRENT->sector / max_sector;
+ sector_t = CURRENT->sector % max_sector;
+ if (floppy->track && TRACK >= floppy->track)
+ return 0;
+ HEAD = sector_t / floppy->sect;
+
+ if (((floppy->stretch & FD_SWAPSIDES) || TESTF(FD_NEED_TWADDLE)) &&
+ sector_t < floppy->sect)
+ max_sector = floppy->sect;
+
+ /* 2M disks have phantom sectors on the first track */
+ if ((floppy->rate & FD_2M) && (!TRACK) && (!HEAD)){
+ max_sector = 2 * floppy->sect / 3;
+ if (sector_t >= max_sector){
+ current_count_sectors = minimum(floppy->sect - sector_t,
+ CURRENT->nr_sectors);
+ return 1;
+ }
+ SIZECODE = 2;
+ } else
+ SIZECODE = FD_SIZECODE(floppy);
+ raw_cmd->rate = floppy->rate & 0x43;
+ if ((floppy->rate & FD_2M) &&
+ (TRACK || HEAD) &&
+ raw_cmd->rate == 2)
+ raw_cmd->rate = 1;
+
+ if (SIZECODE)
+ SIZECODE2 = 0xff;
+ else
+ SIZECODE2 = 0x80;
+ raw_cmd->track = TRACK << STRETCH(floppy);
+ DR_SELECT = UNIT(current_drive) + PH_HEAD(floppy,HEAD);
+ GAP = floppy->gap;
+ CODE2SIZE;
+ SECT_PER_TRACK = floppy->sect << 2 >> SIZECODE;
+ SECTOR = ((sector_t % floppy->sect) << 2 >> SIZECODE) + 1;
+ tracksize = floppy->sect - floppy->sect % ssize;
+ if (tracksize < floppy->sect){
+ SECT_PER_TRACK ++;
+ if (tracksize <= sector_t % floppy->sect)
+ SECTOR--;
+ while (tracksize <= sector_t % floppy->sect){
+ while(tracksize + ssize > floppy->sect){
+ SIZECODE--;
+ ssize >>= 1;
+ }
+ SECTOR++; SECT_PER_TRACK ++;
+ tracksize += ssize;
+ }
+ max_sector = HEAD * floppy->sect + tracksize;
+ } else if (!TRACK && !HEAD && !(floppy->rate & FD_2M) && probing)
+ max_sector = floppy->sect;
+
+ aligned_sector_t = sector_t - (sector_t % floppy->sect) % ssize;
+ max_size = CURRENT->nr_sectors;
+ if ((raw_cmd->track == buffer_track) &&
+ (current_drive == buffer_drive) &&
+ (sector_t >= buffer_min) && (sector_t < buffer_max)) {
+ /* data already in track buffer */
+ if (CT(COMMAND) == FD_READ) {
+ copy_buffer(1, max_sector, buffer_max);
+ return 1;
+ }
+ } else if (aligned_sector_t != sector_t || CURRENT->nr_sectors < ssize){
+ if (CT(COMMAND) == FD_WRITE){
+ if (sector_t + CURRENT->nr_sectors > ssize &&
+ sector_t + CURRENT->nr_sectors < ssize + ssize)
+ max_size = ssize + ssize;
+ else
+ max_size = ssize;
+ }
+ raw_cmd->flags &= ~FD_RAW_WRITE;
+ raw_cmd->flags |= FD_RAW_READ;
+ COMMAND = FM_MODE(floppy,FD_READ);
+ } else if ((unsigned long)CURRENT->buffer < MAX_DMA_ADDRESS) {
+ unsigned long dma_limit;
+ int direct, indirect;
+
+ indirect= transfer_size(ssize,max_sector,max_buffer_sectors*2) -
+ sector_t;
+
+ /*
+ * Do NOT use minimum() here---MAX_DMA_ADDRESS is 64 bits wide
+ * on a 64 bit machine!
+ */
+ max_size = buffer_chain_size();
+ dma_limit = (MAX_DMA_ADDRESS - ((unsigned long) CURRENT->buffer)) >> 9;
+ if ((unsigned long) max_size > dma_limit) {
+ max_size = dma_limit;
+ }
+ /* 64 kb boundaries */
+ if (CROSS_64KB(CURRENT->buffer, max_size << 9))
+ max_size = (K_64 - ((long) CURRENT->buffer) % K_64)>>9;
+ direct = transfer_size(ssize,max_sector,max_size) - sector_t;
+ /*
+ * We try to read tracks, but if we get too many errors, we
+ * go back to reading just one sector at a time.
+ *
+ * This means we should be able to read a sector even if there
+ * are other bad sectors on this track.
+ */
+ if (!direct ||
+ (indirect * 2 > direct * 3 &&
+ *errors < DP->max_errors.read_track &&
+ /*!TESTF(FD_NEED_TWADDLE) &&*/
+ ((!probing || (DP->read_track&(1<<DRS->probed_format)))))){
+ max_size = CURRENT->nr_sectors;
+ } else {
+ raw_cmd->kernel_data = CURRENT->buffer;
+ raw_cmd->length = current_count_sectors << 9;
+ if (raw_cmd->length == 0){
+ DPRINT("zero dma transfer attempted from make_raw_request\n");
+ DPRINT3("indirect=%d direct=%d sector_t=%d",
+ indirect, direct, sector_t);
+ return 0;
+ }
+ return 2;
+ }
+ }
+
+ if (CT(COMMAND) == FD_READ)
+ max_size = max_sector; /* unbounded */
+
+ /* claim buffer track if needed */
+ if (buffer_track != raw_cmd->track || /* bad track */
+ buffer_drive !=current_drive || /* bad drive */
+ sector_t > buffer_max ||
+ sector_t < buffer_min ||
+ ((CT(COMMAND) == FD_READ ||
+ (aligned_sector_t == sector_t && CURRENT->nr_sectors >= ssize))&&
+ max_sector > 2 * max_buffer_sectors + buffer_min &&
+ max_size + sector_t > 2 * max_buffer_sectors + buffer_min)
+ /* not enough space */){
+ buffer_track = -1;
+ buffer_drive = current_drive;
+ buffer_max = buffer_min = aligned_sector_t;
+ }
+ raw_cmd->kernel_data = floppy_track_buffer +
+ ((aligned_sector_t-buffer_min)<<9);
+
+ if (CT(COMMAND) == FD_WRITE){
+ /* copy write buffer to track buffer.
+ * if we get here, we know that the write
+ * is either aligned or the data already in the buffer
+ * (buffer will be overwritten) */
+#ifdef FLOPPY_SANITY_CHECK
+ if (sector_t != aligned_sector_t && buffer_track == -1)
+ DPRINT("internal error offset !=0 on write\n");
+#endif
+ buffer_track = raw_cmd->track;
+ buffer_drive = current_drive;
+ copy_buffer(ssize, max_sector, 2*max_buffer_sectors+buffer_min);
+ } else
+ transfer_size(ssize, max_sector,
+ 2*max_buffer_sectors+buffer_min-aligned_sector_t);
+
+ /* round up current_count_sectors to get dma xfer size */
+ raw_cmd->length = sector_t+current_count_sectors-aligned_sector_t;
+ raw_cmd->length = ((raw_cmd->length -1)|(ssize-1))+1;
+ raw_cmd->length <<= 9;
+#ifdef FLOPPY_SANITY_CHECK
+ if ((raw_cmd->length < current_count_sectors << 9) ||
+ (raw_cmd->kernel_data != CURRENT->buffer &&
+ CT(COMMAND) == FD_WRITE &&
+ (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max ||
+ aligned_sector_t < buffer_min)) ||
+ raw_cmd->length % (128 << SIZECODE) ||
+ raw_cmd->length <= 0 || current_count_sectors <= 0){
+ DPRINT2("fractionary current count b=%lx s=%lx\n",
+ raw_cmd->length, current_count_sectors);
+ if (raw_cmd->kernel_data != CURRENT->buffer)
+ printk("addr=%d, length=%ld\n",
+ (int) ((raw_cmd->kernel_data -
+ floppy_track_buffer) >> 9),
+ current_count_sectors);
+ printk("st=%d ast=%d mse=%d msi=%d\n",
+ sector_t, aligned_sector_t, max_sector, max_size);
+ printk("ssize=%x SIZECODE=%d\n", ssize, SIZECODE);
+ printk("command=%x SECTOR=%d HEAD=%d, TRACK=%d\n",
+ COMMAND, SECTOR, HEAD, TRACK);
+ printk("buffer drive=%d\n", buffer_drive);
+ printk("buffer track=%d\n", buffer_track);
+ printk("buffer_min=%d\n", buffer_min);
+ printk("buffer_max=%d\n", buffer_max);
+ return 0;
+ }
+
+ if (raw_cmd->kernel_data != CURRENT->buffer){
+ if (raw_cmd->kernel_data < floppy_track_buffer ||
+ current_count_sectors < 0 ||
+ raw_cmd->length < 0 ||
+ raw_cmd->kernel_data + raw_cmd->length >
+ floppy_track_buffer + (max_buffer_sectors << 10)){
+ DPRINT("buffer overrun in schedule dma\n");
+ printk("sector_t=%d buffer_min=%d current_count=%ld\n",
+ sector_t, buffer_min,
+ raw_cmd->length >> 9);
+ printk("current_count_sectors=%ld\n",
+ current_count_sectors);
+ if (CT(COMMAND) == FD_READ)
+ printk("read\n");
+ if (CT(COMMAND) == FD_READ)
+ printk("write\n");
+ return 0;
+ }
+ } else if (raw_cmd->length > CURRENT->nr_sectors << 9 ||
+ current_count_sectors > CURRENT->nr_sectors){
+ DPRINT("buffer overrun in direct transfer\n");
+ return 0;
+ } else if (raw_cmd->length < current_count_sectors << 9){
+ DPRINT("more sectors than bytes\n");
+ printk("bytes=%ld\n", raw_cmd->length >> 9);
+ printk("sectors=%ld\n", current_count_sectors);
+ }
+ if (raw_cmd->length == 0){
+ DPRINT("zero dma transfer attempted from make_raw_request\n");
+ return 0;
+ }
+#endif
+ return 2;
+}
+
+static void redo_fd_request(void)
+{
+#define REPEAT {request_done(0); continue; }
+ kdev_t device;
+ int tmp;
+
+ lastredo = jiffies;
+ if (current_drive < N_DRIVE)
+ floppy_off(current_drive);
+
+ if (CURRENT && CURRENT->rq_status == RQ_INACTIVE){
+ DPRINT("current not active!\n");
+ return;
+ }
+
+ while(1){
+ if (!CURRENT) {
+ CLEAR_INTR;
+ unlock_fdc();
+ return;
+ }
+ if (MAJOR(CURRENT->rq_dev) != MAJOR_NR)
+ panic(DEVICE_NAME ": request list destroyed");
+ if (CURRENT->bh && !buffer_locked(CURRENT->bh))
+ panic(DEVICE_NAME ": block not locked");
+
+ device = CURRENT->rq_dev;
+ set_fdc(DRIVE(device));
+ reschedule_timeout(CURRENTD, "redo fd request", 0);
+
+ set_floppy(device);
+ raw_cmd = & default_raw_cmd;
+ raw_cmd->flags = 0;
+ if (start_motor(redo_fd_request)) return;
+ if (test_bit(current_drive, &fake_change) ||
+ TESTF(FD_DISK_CHANGED)){
+ DPRINT("disk absent or changed during operation\n");
+ REPEAT;
+ }
+ if (!floppy) { /* Autodetection */
+ if (!probing){
+ DRS->probed_format = 0;
+ if (next_valid_format()){
+ DPRINT("no autodetectable formats\n");
+ floppy = NULL;
+ REPEAT;
+ }
+ }
+ probing = 1;
+ floppy = floppy_type+DP->autodetect[DRS->probed_format];
+ } else
+ probing = 0;
+ errors = & (CURRENT->errors);
+ tmp = make_raw_rw_request();
+ if (tmp < 2){
+ request_done(tmp);
+ continue;
+ }
+
+ if (TESTF(FD_NEED_TWADDLE))
+ twaddle();
+ floppy_tq.routine = (void *)(void *) floppy_start;
+ queue_task(&floppy_tq, &tq_timer);
+#ifdef DEBUGT
+ debugt("queue fd request");
+#endif
+ return;
+ }
+#undef REPEAT
+}
+
+static struct cont_t rw_cont={
+ rw_interrupt,
+ redo_fd_request,
+ bad_flp_intr,
+ request_done };
+
+static struct tq_struct request_tq =
+{ 0, 0, (void *) (void *) redo_fd_request, 0 };
+
+static void process_fd_request(void)
+{
+ cont = &rw_cont;
+ queue_task(&request_tq, &tq_timer);
+}
+
+static void do_fd_request(void)
+{
+ if (fdc_busy){
+ /* fdc busy, this new request will be treated when the
+ current one is done */
+ is_alive("do fd request, old request running");
+ return;
+ }
+ lock_fdc(MAXTIMEOUT,0);
+ process_fd_request();
+ is_alive("do fd request");
+}
+
+static struct cont_t poll_cont={
+ success_and_wakeup,
+ floppy_ready,
+ generic_failure,
+ generic_done };
+
+static int poll_drive(int interruptible, int flag)
+{
+ int ret;
+ /* no auto-sense, just clear dcl */
+ raw_cmd = &default_raw_cmd;
+ raw_cmd->flags= flag;
+ raw_cmd->track=0;
+ raw_cmd->cmd_count=0;
+ cont = &poll_cont;
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("setting NEWCHANGE in poll_drive\n");
+ }
+#endif
+ SETF(FD_DISK_NEWCHANGE);
+ WAIT(floppy_ready);
+ return ret;
+}
+
+/*
+ * User triggered reset
+ * ====================
+ */
+
+static void reset_intr(void)
+{
+ printk("weird, reset interrupt called\n");
+}
+
+static struct cont_t reset_cont={
+ reset_intr,
+ success_and_wakeup,
+ generic_failure,
+ generic_done };
+
+static int user_reset_fdc(int drive, int arg, int interruptible)
+{
+ int ret;
+
+ ret=0;
+ LOCK_FDC(drive,interruptible);
+ if (arg == FD_RESET_ALWAYS)
+ FDCS->reset=1;
+ if (FDCS->reset){
+ cont = &reset_cont;
+ WAIT(reset_fdc);
+ }
+ process_fd_request();
+ return ret;
+}
+
+/*
+ * Misc Ioctl's and support
+ * ========================
+ */
+static int fd_copyout(void *param, const void *address, int size)
+{
+ int ret;
+
+ ECALL(verify_area(VERIFY_WRITE,param,size));
+ fd_cacheflush(address, size); /* is this necessary ??? */
+ /* Ralf: Yes; only the l2 cache is completly chipset
+ controlled */
+ memcpy_tofs(param,(void *) address, size);
+ return 0;
+}
+
+static int fd_copyin(void *param, void *address, int size)
+{
+ int ret;
+
+ ECALL(verify_area(VERIFY_READ,param,size));
+ memcpy_fromfs((void *) address, param, size);
+ return 0;
+}
+
+#define COPYOUT(x) ECALL(fd_copyout((void *)param, &(x), sizeof(x)))
+#define COPYIN(x) ECALL(fd_copyin((void *)param, &(x), sizeof(x)))
+
+static inline const char *drive_name(int type, int drive)
+{
+ struct floppy_struct *floppy;
+
+ if (type)
+ floppy = floppy_type + type;
+ else {
+ if (UDP->native_format)
+ floppy = floppy_type + UDP->native_format;
+ else
+ return "(null)";
+ }
+ if (floppy->name)
+ return floppy->name;
+ else
+ return "(null)";
+}
+
+
+/* raw commands */
+static void raw_cmd_done(int flag)
+{
+ int i;
+
+ if (!flag) {
+ raw_cmd->flags = FD_RAW_FAILURE;
+ raw_cmd->flags |= FD_RAW_HARDFAILURE;
+ } else {
+ raw_cmd->reply_count = inr;
+ for (i=0; i< raw_cmd->reply_count; i++)
+ raw_cmd->reply[i] = reply_buffer[i];
+
+ if (raw_cmd->flags & (FD_RAW_READ | FD_RAW_WRITE))
+ raw_cmd->length = get_dma_residue(FLOPPY_DMA);
+
+ if ((raw_cmd->flags & FD_RAW_SOFTFAILURE) &&
+ (!raw_cmd->reply_count || (raw_cmd->reply[0] & 0xc0)))
+ raw_cmd->flags |= FD_RAW_FAILURE;
+
+ if (disk_change(current_drive))
+ raw_cmd->flags |= FD_RAW_DISK_CHANGE;
+ else
+ raw_cmd->flags &= ~FD_RAW_DISK_CHANGE;
+ if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER)
+ motor_off_callback(current_drive);
+
+ if (raw_cmd->next &&
+ (!(raw_cmd->flags & FD_RAW_FAILURE) ||
+ !(raw_cmd->flags & FD_RAW_STOP_IF_FAILURE)) &&
+ ((raw_cmd->flags & FD_RAW_FAILURE) ||
+ !(raw_cmd->flags &FD_RAW_STOP_IF_SUCCESS))) {
+ raw_cmd = raw_cmd->next;
+ return;
+ }
+ }
+ generic_done(flag);
+}
+
+
+static struct cont_t raw_cmd_cont={
+ success_and_wakeup,
+ floppy_start,
+ generic_failure,
+ raw_cmd_done
+};
+
+static inline int raw_cmd_copyout(int cmd, char *param,
+ struct floppy_raw_cmd *ptr)
+{
+ struct old_floppy_raw_cmd old_raw_cmd;
+ int ret;
+
+ while(ptr) {
+ if (cmd == OLDFDRAWCMD) {
+ old_raw_cmd.flags = ptr->flags;
+ old_raw_cmd.data = ptr->data;
+ old_raw_cmd.length = ptr->length;
+ old_raw_cmd.rate = ptr->rate;
+ old_raw_cmd.reply_count = ptr->reply_count;
+ memcpy(old_raw_cmd.reply, ptr->reply, 7);
+ COPYOUT(old_raw_cmd);
+ param += sizeof(old_raw_cmd);
+ } else {
+ COPYOUT(*ptr);
+ param += sizeof(struct floppy_raw_cmd);
+ }
+
+ if ((ptr->flags & FD_RAW_READ) && ptr->buffer_length){
+ if (ptr->length>=0 && ptr->length<=ptr->buffer_length)
+ ECALL(fd_copyout(ptr->data,
+ ptr->kernel_data,
+ ptr->buffer_length -
+ ptr->length));
+ }
+ ptr = ptr->next;
+ }
+ return 0;
+}
+
+
+static void raw_cmd_free(struct floppy_raw_cmd **ptr)
+{
+ struct floppy_raw_cmd *next,*this;
+
+ this = *ptr;
+ *ptr = 0;
+ while(this) {
+ if (this->buffer_length) {
+ free_pages((unsigned long)this->kernel_data,
+ __get_order(this->buffer_length));
+ this->buffer_length = 0;
+ }
+ next = this->next;
+ kfree(this);
+ this = next;
+ }
+}
+
+
+static inline int raw_cmd_copyin(int cmd, char *param,
+ struct floppy_raw_cmd **rcmd)
+{
+ struct floppy_raw_cmd *ptr;
+ struct old_floppy_raw_cmd old_raw_cmd;
+ int ret;
+ int i;
+
+ *rcmd = 0;
+ while(1) {
+ ptr = (struct floppy_raw_cmd *)
+ kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER);
+ if (!ptr)
+ return -ENOMEM;
+ *rcmd = ptr;
+ if (cmd == OLDFDRAWCMD){
+ COPYIN(old_raw_cmd);
+ ptr->flags = old_raw_cmd.flags;
+ ptr->data = old_raw_cmd.data;
+ ptr->length = old_raw_cmd.length;
+ ptr->rate = old_raw_cmd.rate;
+ ptr->cmd_count = old_raw_cmd.cmd_count;
+ ptr->track = old_raw_cmd.track;
+ ptr->phys_length = 0;
+ ptr->next = 0;
+ ptr->buffer_length = 0;
+ memcpy(ptr->cmd, old_raw_cmd.cmd, 9);
+ param += sizeof(struct old_floppy_raw_cmd);
+ if (ptr->cmd_count > 9)
+ return -EINVAL;
+ } else {
+ COPYIN(*ptr);
+ ptr->next = 0;
+ ptr->buffer_length = 0;
+ param += sizeof(struct floppy_raw_cmd);
+ if (ptr->cmd_count > 16)
+ return -EINVAL;
+ }
+
+ for (i=0; i< 16; i++)
+ ptr->reply[i] = 0;
+ ptr->resultcode = 0;
+ ptr->kernel_data = 0;
+
+ if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
+ if (ptr->length <= 0)
+ return -EINVAL;
+ ptr->kernel_data =(char*)dma_mem_alloc(ptr->length);
+ if (!ptr->kernel_data)
+ return -ENOMEM;
+ ptr->buffer_length = ptr->length;
+ }
+ if ( ptr->flags & FD_RAW_READ )
+ ECALL( verify_area( VERIFY_WRITE, ptr->data,
+ ptr->length ));
+ if (ptr->flags & FD_RAW_WRITE)
+ ECALL(fd_copyin(ptr->data, ptr->kernel_data,
+ ptr->length));
+ rcmd = & (ptr->next);
+ if (!(ptr->flags & FD_RAW_MORE))
+ return 0;
+ ptr->rate &= 0x43;
+ }
+}
+
+
+static int raw_cmd_ioctl(int cmd, void *param)
+{
+ int drive, ret, ret2;
+ struct floppy_raw_cmd *my_raw_cmd;
+
+ if (FDCS->rawcmd <= 1)
+ FDCS->rawcmd = 1;
+ for (drive= 0; drive < N_DRIVE; drive++){
+ if (FDC(drive) != fdc)
+ continue;
+ if (drive == current_drive){
+ if (UDRS->fd_ref > 1){
+ FDCS->rawcmd = 2;
+ break;
+ }
+ } else if (UDRS->fd_ref){
+ FDCS->rawcmd = 2;
+ break;
+ }
+ }
+
+ if (FDCS->reset)
+ return -EIO;
+
+ ret = raw_cmd_copyin(cmd, param, &my_raw_cmd);
+ if (ret) {
+ raw_cmd_free(&my_raw_cmd);
+ return ret;
+ }
+
+ raw_cmd = my_raw_cmd;
+ cont = &raw_cmd_cont;
+ ret=wait_til_done(floppy_start,1);
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from raw_cmd ioctl\n");
+ }
+#endif
+
+ if (ret != -EINTR && FDCS->reset)
+ ret = -EIO;
+
+ DRS->track = NO_TRACK;
+
+ ret2 = raw_cmd_copyout(cmd, param, my_raw_cmd);
+ if (!ret)
+ ret = ret2;
+ raw_cmd_free(&my_raw_cmd);
+ return ret;
+}
+
+static int invalidate_drive(kdev_t rdev)
+{
+ /* invalidate the buffer track to force a reread */
+ set_bit(DRIVE(rdev), &fake_change);
+ process_fd_request();
+ check_disk_change(rdev);
+ return 0;
+}
+
+
+static inline void clear_write_error(int drive)
+{
+ CLEARSTRUCT(UDRWE);
+}
+
+static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
+ int drive, int type, kdev_t device)
+{
+ int cnt;
+
+ /* sanity checking for parameters.*/
+ if (g->sect <= 0 ||
+ g->head <= 0 ||
+ g->track <= 0 ||
+ g->track > UDP->tracks>>STRETCH(g) ||
+ /* check if reserved bits are set */
+ (g->stretch&~(FD_STRETCH|FD_SWAPSIDES)) != 0)
+ return -EINVAL;
+ if (type){
+ if (!suser())
+ return -EPERM;
+ LOCK_FDC(drive,1);
+ for (cnt = 0; cnt < N_DRIVE; cnt++){
+ if (ITYPE(drive_state[cnt].fd_device) == type &&
+ drive_state[cnt].fd_ref)
+ set_bit(drive, &fake_change);
+ }
+ floppy_type[type] = *g;
+ floppy_type[type].name="user format";
+ for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
+ floppy_sizes[cnt]= floppy_sizes[cnt+0x80]=
+ floppy_type[type].size>>1;
+ process_fd_request();
+ for (cnt = 0; cnt < N_DRIVE; cnt++){
+ if (ITYPE(drive_state[cnt].fd_device) == type &&
+ drive_state[cnt].fd_ref)
+ check_disk_change(
+ MKDEV(FLOPPY_MAJOR,
+ drive_state[cnt].fd_device));
+ }
+ } else {
+ LOCK_FDC(drive,1);
+ if (cmd != FDDEFPRM)
+ /* notice a disk change immediately, else
+ * we loose our settings immediately*/
+ CALL(poll_drive(1,0));
+ user_params[drive] = *g;
+ if (buffer_drive == drive)
+ SUPBOUND(buffer_max, user_params[drive].sect);
+ current_type[drive] = &user_params[drive];
+ floppy_sizes[drive] = user_params[drive].size >> 1;
+ if (cmd == FDDEFPRM)
+ DRS->keep_data = -1;
+ else
+ DRS->keep_data = 1;
+ /* invalidation. Invalidate only when needed, i.e.
+ * when there are already sectors in the buffer cache
+ * whose number will change. This is useful, because
+ * mtools often changes the geometry of the disk after
+ * looking at the boot block */
+ if (DRS->maxblock > user_params[drive].sect || DRS->maxtrack)
+ invalidate_drive(device);
+ else
+ process_fd_request();
+ }
+ return 0;
+}
+
+/* handle obsolete ioctl's */
+static struct translation_entry {
+ int newcmd;
+ int oldcmd;
+ int oldsize; /* size of 0x00xx-style ioctl. Reflects old structures, thus
+ * use numeric values. NO SIZEOFS */
+} translation_table[]= {
+ {FDCLRPRM, 0, 0},
+ {FDSETPRM, 1, 28},
+ {FDDEFPRM, 2, 28},
+ {FDGETPRM, 3, 28},
+ {FDMSGON, 4, 0},
+ {FDMSGOFF, 5, 0},
+ {FDFMTBEG, 6, 0},
+ {FDFMTTRK, 7, 12},
+ {FDFMTEND, 8, 0},
+ {FDSETEMSGTRESH, 10, 0},
+ {FDFLUSH, 11, 0},
+ {FDSETMAXERRS, 12, 20},
+ {OLDFDRAWCMD, 30, 0},
+ {FDGETMAXERRS, 14, 20},
+ {FDGETDRVTYP, 16, 16},
+ {FDSETDRVPRM, 20, 88},
+ {FDGETDRVPRM, 21, 88},
+ {FDGETDRVSTAT, 22, 52},
+ {FDPOLLDRVSTAT, 23, 52},
+ {FDRESET, 24, 0},
+ {FDGETFDCSTAT, 25, 40},
+ {FDWERRORCLR, 27, 0},
+ {FDWERRORGET, 28, 24},
+ {FDRAWCMD, 0, 0},
+ {FDTWADDLE, 40, 0} };
+
+static inline int normalize_0x02xx_ioctl(int *cmd, int *size)
+{
+ int i;
+
+ for (i=0; i < ARRAY_SIZE(translation_table); i++) {
+ if ((*cmd & 0xffff) == (translation_table[i].newcmd & 0xffff)){
+ *size = _IOC_SIZE(*cmd);
+ *cmd = translation_table[i].newcmd;
+ if (*size > _IOC_SIZE(*cmd)) {
+ printk("ioctl not yet supported\n");
+ return -EFAULT;
+ }
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static inline int xlate_0x00xx_ioctl(int *cmd, int *size)
+{
+ int i;
+ /* old ioctls' for kernels <= 1.3.33 */
+ /* When the next even release will come around, we'll start
+ * warning against these.
+ * When the next odd release will come around, we'll fail with
+ * -EINVAL */
+ if(strcmp(system_utsname.version, "1.4.0") >= 0)
+ printk("obsolete floppy ioctl %x\n", *cmd);
+ if((system_utsname.version[0] == '1' &&
+ strcmp(system_utsname.version, "1.5.0") >= 0) ||
+ (system_utsname.version[0] >= '2' &&
+ strcmp(system_utsname.version, "2.1.0") >= 0))
+ return -EINVAL;
+ for (i=0; i < ARRAY_SIZE(translation_table); i++) {
+ if (*cmd == translation_table[i].oldcmd) {
+ *size = translation_table[i].oldsize;
+ *cmd = translation_table[i].newcmd;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long param)
+{
+#define IOCTL_MODE_BIT 8
+#define OPEN_WRITE_BIT 16
+#define IOCTL_ALLOWED (filp && (filp->f_mode & IOCTL_MODE_BIT))
+#define OUT(c,x) case c: outparam = (const char *) (x); break
+#define IN(c,x,tag) case c: *(x) = inparam. tag ; return 0
+
+ int i,drive,type;
+ kdev_t device;
+ int ret;
+ int size;
+ union inparam {
+ struct floppy_struct g; /* geometry */
+ struct format_descr f;
+ struct floppy_max_errors max_errors;
+ struct floppy_drive_params dp;
+ } inparam; /* parameters coming from user space */
+ const char *outparam; /* parameters passed back to user space */
+
+ device = inode->i_rdev;
+ switch (cmd) {
+ RO_IOCTLS(device,param);
+ }
+ type = TYPE(device);
+ drive = DRIVE(device);
+
+ /* convert the old style command into a new style command */
+ if ((cmd & 0xff00) == 0x0200) {
+ ECALL(normalize_0x02xx_ioctl(&cmd, &size));
+ } else if ((cmd & 0xff00) == 0x0000) {
+ ECALL(xlate_0x00xx_ioctl(&cmd, &size));
+ } else
+ return -EINVAL;
+
+ /* permission checks */
+ if (((cmd & 0x80) && !suser()) ||
+ ((cmd & 0x40) && !IOCTL_ALLOWED))
+ return -EPERM;
+
+ /* verify writability of result, and fail early */
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ ECALL(verify_area(VERIFY_WRITE,(void *) param, size));
+
+ /* copyin */
+ CLEARSTRUCT(&inparam);
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ ECALL(fd_copyin((void *)param, &inparam, size))
+
+ switch (cmd) {
+ case FDCLRPRM:
+ LOCK_FDC(drive,1);
+ current_type[drive] = NULL;
+ floppy_sizes[drive] = MAX_DISK_SIZE;
+ UDRS->keep_data = 0;
+ return invalidate_drive(device);
+ case FDSETPRM:
+ case FDDEFPRM:
+ return set_geometry(cmd, & inparam.g,
+ drive, type, device);
+ case FDGETPRM:
+ if (type)
+ outparam = (char *) &floppy_type[type];
+ else
+ outparam = (char *) current_type[drive];
+ if(!outparam)
+ return -ENODEV;
+ break;
+
+ case FDMSGON:
+ UDP->flags |= FTD_MSG;
+ return 0;
+ case FDMSGOFF:
+ UDP->flags &= ~FTD_MSG;
+ return 0;
+
+ case FDFMTBEG:
+ LOCK_FDC(drive,1);
+ CALL(poll_drive(1, FD_RAW_NEED_DISK));
+ ret = UDRS->flags;
+ process_fd_request();
+ if(ret & FD_VERIFY)
+ return -ENODEV;
+ if(!(ret & FD_DISK_WRITABLE))
+ return -EROFS;
+ return 0;
+ case FDFMTTRK:
+ if (UDRS->fd_ref != 1)
+ return -EBUSY;
+ return do_format(device, &inparam.f);
+ case FDFMTEND:
+ case FDFLUSH:
+ LOCK_FDC(drive,1);
+ return invalidate_drive(device);
+
+ case FDSETEMSGTRESH:
+ UDP->max_errors.reporting =
+ (unsigned short) (param & 0x0f);
+ return 0;
+ OUT(FDGETMAXERRS, &UDP->max_errors);
+ IN(FDSETMAXERRS, &UDP->max_errors, max_errors);
+
+ case FDGETDRVTYP:
+ outparam = drive_name(type,drive);
+ SUPBOUND(size,strlen(outparam)+1);
+ break;
+
+ IN(FDSETDRVPRM, UDP, dp);
+ OUT(FDGETDRVPRM, UDP);
+
+ case FDPOLLDRVSTAT:
+ LOCK_FDC(drive,1);
+ CALL(poll_drive(1, FD_RAW_NEED_DISK));
+ process_fd_request();
+ /* fall through */
+ OUT(FDGETDRVSTAT, UDRS);
+
+ case FDRESET:
+ return user_reset_fdc(drive, (int)param, 1);
+
+ OUT(FDGETFDCSTAT,UFDCS);
+
+ case FDWERRORCLR:
+ CLEARSTRUCT(UDRWE);
+ return 0;
+ OUT(FDWERRORGET,UDRWE);
+
+ case OLDFDRAWCMD:
+ case FDRAWCMD:
+ if (type)
+ return -EINVAL;
+ LOCK_FDC(drive,1);
+ set_floppy(device);
+ CALL(i = raw_cmd_ioctl(cmd,(void *) param));
+ process_fd_request();
+ return i;
+
+ case FDTWADDLE:
+ LOCK_FDC(drive,1);
+ twaddle();
+ process_fd_request();
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ return fd_copyout((void *)param, outparam, size);
+ else
+ return 0;
+#undef IOCTL_ALLOWED
+#undef OUT
+#undef IN
+}
+
+static void config_types(void)
+{
+ int first=1;
+ int drive;
+
+ /* read drive info out of physical cmos */
+ drive=0;
+ if (!UDP->cmos)
+ UDP->cmos= FLOPPY0_TYPE;
+ drive=1;
+ if (!UDP->cmos && FLOPPY1_TYPE)
+ UDP->cmos = FLOPPY1_TYPE;
+
+ /* XXX */
+ /* additional physical CMOS drive detection should go here */
+
+ for (drive=0; drive < N_DRIVE; drive++){
+ if (UDP->cmos >= 0 && UDP->cmos <= NUMBER(default_drive_params))
+ memcpy((char *) UDP,
+ (char *) (&default_drive_params[(int)UDP->cmos].params),
+ sizeof(struct floppy_drive_params));
+ if (UDP->cmos){
+ if (first)
+ printk("Floppy drive(s): ");
+ else
+ printk(", ");
+ first=0;
+ if (UDP->cmos > 0){
+ allowed_drive_mask |= 1 << drive;
+ printk("fd%d is %s", drive,
+ default_drive_params[(int)UDP->cmos].name);
+ } else
+ printk("fd%d is unknown type %d",drive,
+ UDP->cmos);
+ }
+ }
+ if (!first)
+ printk("\n");
+}
+
+static int floppy_read(struct inode * inode, struct file * filp,
+ char * buf, int count)
+{
+ int drive = DRIVE(inode->i_rdev);
+
+ check_disk_change(inode->i_rdev);
+ if (UTESTF(FD_DISK_CHANGED))
+ return -ENXIO;
+ return block_read(inode, filp, buf, count);
+}
+
+static int floppy_write(struct inode * inode, struct file * filp,
+ const char * buf, int count)
+{
+ int block;
+ int ret;
+ int drive = DRIVE(inode->i_rdev);
+
+ if (!UDRS->maxblock)
+ UDRS->maxblock=1;/* make change detectable */
+ check_disk_change(inode->i_rdev);
+ if (UTESTF(FD_DISK_CHANGED))
+ return -ENXIO;
+ if (!UTESTF(FD_DISK_WRITABLE))
+ return -EROFS;
+ block = (filp->f_pos + count) >> 9;
+ INFBOUND(UDRS->maxblock, block);
+ ret= block_write(inode, filp, buf, count);
+ return ret;
+}
+
+static void floppy_release(struct inode * inode, struct file * filp)
+{
+ int drive;
+
+ drive = DRIVE(inode->i_rdev);
+
+ if (!filp || (filp->f_mode & (2 | OPEN_WRITE_BIT)))
+ /* if the file is mounted OR (writable now AND writable at
+ * open time) Linus: Does this cover all cases? */
+ block_fsync(inode,filp);
+
+ if (UDRS->fd_ref < 0)
+ UDRS->fd_ref=0;
+ else if (!UDRS->fd_ref--) {
+ DPRINT("floppy_release with fd_ref == 0");
+ UDRS->fd_ref = 0;
+ }
+ floppy_release_irq_and_dma();
+}
+
+/*
+ * floppy_open check for aliasing (/dev/fd0 can be the same as
+ * /dev/PS0 etc), and disallows simultaneous access to the same
+ * drive with different device numbers.
+ */
+#define RETERR(x) do{floppy_release(inode,filp); return -(x);}while(0)
+
+static int floppy_open(struct inode * inode, struct file * filp)
+{
+ int drive;
+ int old_dev;
+ int try;
+ char *tmp;
+
+ if (!filp) {
+ DPRINT("Weird, open called with filp=0\n");
+ return -EIO;
+ }
+
+ drive = DRIVE(inode->i_rdev);
+ if (drive >= N_DRIVE ||
+ !(allowed_drive_mask & (1 << drive)) ||
+ fdc_state[FDC(drive)].version == FDC_NONE)
+ return -ENXIO;
+
+ if (TYPE(inode->i_rdev) >= NUMBER(floppy_type))
+ return -ENXIO;
+ old_dev = UDRS->fd_device;
+ if (UDRS->fd_ref && old_dev != MINOR(inode->i_rdev))
+ return -EBUSY;
+
+ if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)){
+ USETF(FD_DISK_CHANGED);
+ USETF(FD_VERIFY);
+ }
+
+ if (UDRS->fd_ref == -1 ||
+ (UDRS->fd_ref && (filp->f_flags & O_EXCL)))
+ return -EBUSY;
+
+ if (floppy_grab_irq_and_dma())
+ return -EBUSY;
+
+ if (filp->f_flags & O_EXCL)
+ UDRS->fd_ref = -1;
+ else
+ UDRS->fd_ref++;
+
+ if (!floppy_track_buffer){
+ /* if opening an ED drive, reserve a big buffer,
+ * else reserve a small one */
+ if ((UDP->cmos == 6) || (UDP->cmos == 5))
+ try = 64; /* Only 48 actually useful */
+ else
+ try = 32; /* Only 24 actually useful */
+
+ tmp=(char *)dma_mem_alloc(1024 * try);
+ if (!tmp) {
+ try >>= 1; /* buffer only one side */
+ INFBOUND(try, 16);
+ tmp= (char *)dma_mem_alloc(1024*try);
+ }
+ if (!tmp) {
+ DPRINT("Unable to allocate DMA memory\n");
+ RETERR(ENXIO);
+ }
+ if (floppy_track_buffer){
+ free_pages((unsigned long)tmp,__get_order(try*1024));
+ }else {
+ buffer_min = buffer_max = -1;
+ floppy_track_buffer = tmp;
+ max_buffer_sectors = try;
+ }
+ }
+
+ UDRS->fd_device = MINOR(inode->i_rdev);
+ if (old_dev != -1 && old_dev != MINOR(inode->i_rdev)) {
+ if (buffer_drive == drive)
+ buffer_track = -1;
+ invalidate_buffers(MKDEV(FLOPPY_MAJOR,old_dev));
+ }
+
+ /* Allow ioctls if we have write-permissions even if read-only open */
+ if ((filp->f_mode & 2) || (permission(inode,2) == 0))
+ filp->f_mode |= IOCTL_MODE_BIT;
+ if (filp->f_mode & 2)
+ filp->f_mode |= OPEN_WRITE_BIT;
+
+ if (UFDCS->rawcmd == 1)
+ UFDCS->rawcmd = 2;
+
+ if (filp->f_flags & O_NDELAY)
+ return 0;
+ if (filp->f_mode & 3) {
+ UDRS->last_checked = 0;
+ check_disk_change(inode->i_rdev);
+ if (UTESTF(FD_DISK_CHANGED))
+ RETERR(ENXIO);
+ }
+ if ((filp->f_mode & 2) && !(UTESTF(FD_DISK_WRITABLE)))
+ RETERR(EROFS);
+ return 0;
+#undef RETERR
+}
+
+/*
+ * Check if the disk has been changed or if a change has been faked.
+ */
+static int check_floppy_change(kdev_t dev)
+{
+ int drive = DRIVE(dev);
+
+ if (MAJOR(dev) != MAJOR_NR) {
+ DPRINT("floppy_changed: not a floppy\n");
+ return 0;
+ }
+
+ if (UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY))
+ return 1;
+
+ if (UDRS->last_checked + UDP->checkfreq < jiffies){
+ lock_fdc(drive,0);
+ poll_drive(0,0);
+ process_fd_request();
+ }
+
+ if (UTESTF(FD_DISK_CHANGED) ||
+ UTESTF(FD_VERIFY) ||
+ test_bit(drive, &fake_change) ||
+ (!TYPE(dev) && !current_type[drive]))
+ return 1;
+ return 0;
+}
+
+/* revalidate the floppy disk, i.e. trigger format autodetection by reading
+ * the bootblock (block 0). "Autodetection" is also needed to check whether
+ * there is a disk in the drive at all... Thus we also do it for fixed
+ * geometry formats */
+static int floppy_revalidate(kdev_t dev)
+{
+#define NO_GEOM (!current_type[drive] && !TYPE(dev))
+ struct buffer_head * bh;
+ int drive=DRIVE(dev);
+ int cf;
+
+ if (UTESTF(FD_DISK_CHANGED) ||
+ UTESTF(FD_VERIFY) ||
+ test_bit(drive, &fake_change) ||
+ NO_GEOM){
+ lock_fdc(drive,0);
+ cf = UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY);
+ if (!(cf || test_bit(drive, &fake_change) || NO_GEOM)){
+ process_fd_request(); /*already done by another thread*/
+ return 0;
+ }
+ UDRS->maxblock = 0;
+ UDRS->maxtrack = 0;
+ if (buffer_drive == drive)
+ buffer_track = -1;
+ clear_bit(drive, &fake_change);
+ UCLEARF(FD_DISK_CHANGED);
+ if (cf)
+ UDRS->generation++;
+ if (NO_GEOM){
+ /* auto-sensing */
+ int size = floppy_blocksizes[MINOR(dev)];
+ if (!size)
+ size = 1024;
+ if (!(bh = getblk(dev,0,size))){
+ process_fd_request();
+ return 1;
+ }
+ if (bh && !buffer_uptodate(bh))
+ ll_rw_block(READ, 1, &bh);
+ process_fd_request();
+ wait_on_buffer(bh);
+ brelse(bh);
+ return 0;
+ }
+ if (cf)
+ poll_drive(0, FD_RAW_NEED_DISK);
+ process_fd_request();
+ }
+ return 0;
+}
+
+static struct file_operations floppy_fops = {
+ NULL, /* lseek - default */
+ floppy_read, /* read - general block-dev read */
+ floppy_write, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* select */
+ fd_ioctl, /* ioctl */
+ NULL, /* mmap */
+ floppy_open, /* open */
+ floppy_release, /* release */
+ block_fsync, /* fsync */
+ NULL, /* fasync */
+ check_floppy_change, /* media_change */
+ floppy_revalidate, /* revalidate */
+};
+
+/*
+ * Floppy Driver initialisation
+ * =============================
+ */
+
+/* Determine the floppy disk controller type */
+/* This routine was written by David C. Niemi */
+static char get_fdc_version(void)
+{
+ int r;
+
+ output_byte(FD_DUMPREGS); /* 82072 and better know DUMPREGS */
+ if (FDCS->reset)
+ return FDC_NONE;
+ if ((r = result()) <= 0x00)
+ return FDC_NONE; /* No FDC present ??? */
+ if ((r==1) && (reply_buffer[0] == 0x80)){
+ printk("FDC %d is a 8272A\n",fdc);
+ return FDC_8272A; /* 8272a/765 don't know DUMPREGS */
+ }
+ if (r != 10) {
+ printk("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n",
+ fdc, r);
+ return FDC_UNKNOWN;
+ }
+ output_byte(FD_VERSION);
+ r = result();
+ if ((r == 1) && (reply_buffer[0] == 0x80)){
+ printk("FDC %d is a 82072\n",fdc);
+ return FDC_82072; /* 82072 doesn't know VERSION */
+ }
+ if ((r != 1) || (reply_buffer[0] != 0x90)) {
+ printk("FDC %d init: VERSION: unexpected return of %d bytes.\n",
+ fdc, r);
+ return FDC_UNKNOWN;
+ }
+ output_byte(FD_UNLOCK);
+ r = result();
+ if ((r == 1) && (reply_buffer[0] == 0x80)){
+ printk("FDC %d is a pre-1991 82077\n", fdc);
+ return FDC_82077_ORIG; /* Pre-1991 82077 doesn't know LOCK/UNLOCK */
+ }
+ if ((r != 1) || (reply_buffer[0] != 0x00)) {
+ printk("FDC %d init: UNLOCK: unexpected return of %d bytes.\n",
+ fdc, r);
+ return FDC_UNKNOWN;
+ }
+ output_byte(FD_PARTID);
+ r = result();
+ if (r != 1) {
+ printk("FDC %d init: PARTID: unexpected return of %d bytes.\n",
+ fdc, r);
+ return FDC_UNKNOWN;
+ }
+ if (reply_buffer[0] == 0x80) {
+ printk("FDC %d is a post-1991 82077\n",fdc);
+ return FDC_82077; /* Revised 82077AA passes all the tests */
+ }
+ switch (reply_buffer[0] >> 5) {
+ case 0x0:
+ output_byte(FD_SAVE);
+ r = result();
+ if (r != 16) {
+ printk("FDC %d init: SAVE: unexpected return of %d bytes.\n", fdc, r);
+ return FDC_UNKNOWN;
+ }
+ if (!(reply_buffer[0] & 0x40)) {
+ printk("FDC %d is a 3Volt 82078SL.\n",fdc);
+ return FDC_82078;
+ }
+ /* Either a 82078-1 or a 82078SL running at 5Volt */
+ printk("FDC %d is a 82078-1.\n",fdc);
+ return FDC_82078_1;
+ case 0x1:
+ printk("FDC %d is a 44pin 82078\n",fdc);
+ return FDC_82078;
+ case 0x2:
+ printk("FDC %d is a S82078B\n", fdc);
+ return FDC_S82078B;
+ case 0x3:
+ printk("FDC %d is a National Semiconductor PC87306\n", fdc);
+ return FDC_87306;
+ default:
+ printk("FDC %d init: 82077 variant with PARTID=%d.\n",
+ fdc, reply_buffer[0] >> 5);
+ return FDC_82077_UNKN;
+ }
+} /* get_fdc_version */
+
+/* lilo configuration */
+
+/* we make the invert_dcl function global. One day, somebody might
+ * want to centralize all thinkpad related options into one lilo option,
+ * there are just so many thinkpad related quirks! */
+void floppy_invert_dcl(int *ints,int param)
+{
+ int i;
+
+ for (i=0; i < ARRAY_SIZE(default_drive_params); i++){
+ if (param)
+ default_drive_params[i].params.flags |= 0x80;
+ else
+ default_drive_params[i].params.flags &= ~0x80;
+ }
+ DPRINT("Configuring drives for inverted dcl\n");
+}
+
+static void daring(int *ints,int param)
+{
+ int i;
+
+ for (i=0; i < ARRAY_SIZE(default_drive_params); i++){
+ if (param){
+ default_drive_params[i].params.select_delay = 0;
+ default_drive_params[i].params.flags |= FD_SILENT_DCL_CLEAR;
+ } else {
+ default_drive_params[i].params.select_delay = 2*HZ/100;
+ default_drive_params[i].params.flags &= ~FD_SILENT_DCL_CLEAR;
+ }
+ }
+ DPRINT1("Assuming %s floppy hardware\n", param ? "standard" : "broken");
+}
+
+static void allow_drives(int *ints, int param)
+{
+ allowed_drive_mask=param;
+ DPRINT1("setting allowed_drive_mask to 0x%x\n", param);
+}
+
+static void fdc2_adr(int *ints, int param)
+{
+ FDC2 = param;
+ if (param)
+ DPRINT1("enabling second fdc at address 0x%3x\n", FDC2);
+ else
+ DPRINT("disabling second fdc\n");
+}
+
+static void unex(int *ints,int param)
+{
+ print_unex = param;
+ DPRINT1("%sprinting messages for unexpected interrupts\n",
+ param ? "" : "not ");
+}
+
+static void set_cmos(int *ints, int dummy)
+{
+ int current_drive=0;
+
+ if (ints[0] != 2){
+ DPRINT("wrong number of parameter for cmos\n");
+ return;
+ }
+ current_drive = ints[1];
+ if (current_drive < 0 || current_drive >= 8){
+ DPRINT("bad drive for set_cmos\n");
+ return;
+ }
+ if (current_drive >= 4 && !FDC2)
+ fdc2_adr(0, 0x370);
+ if (ints[2] <= 0 || ints[2] >= NUMBER(default_drive_params)){
+ DPRINT1("bad cmos code %d\n", ints[2]);
+ return;
+ }
+ DP->cmos = ints[2];
+ DPRINT1("setting cmos code to %d\n", ints[2]);
+}
+
+static struct param_table {
+ const char *name;
+ void (*fn)(int *ints, int param);
+ int def_param;
+} config_params[]={
+ { "allowed_drive_mask", allow_drives, 0xff },
+ { "all_drives", allow_drives, 0xff },
+ { "asus_pci", allow_drives, 0x33 },
+
+ { "daring", daring, 1},
+
+ { "two_fdc", fdc2_adr, 0x370 },
+ { "one_fdc", fdc2_adr, 0 },
+
+ { "thinkpad", floppy_invert_dcl, 1 },
+
+ { "cmos", set_cmos, 0 },
+
+ { "unexpected_interrupts", unex, 1 },
+ { "no_unexpected_interrupts", unex, 0 },
+ { "L40SX", unex, 0 } };
+
+#define FLOPPY_SETUP
+void floppy_setup(char *str, int *ints)
+{
+ int i;
+ int param;
+ if (str)
+ for (i=0; i< ARRAY_SIZE(config_params); i++){
+ if (strcmp(str,config_params[i].name) == 0){
+ if (ints[0])
+ param = ints[1];
+ else
+ param = config_params[i].def_param;
+ config_params[i].fn(ints,param);
+ return;
+ }
+ }
+ if (str) {
+ DPRINT1("unknown floppy option [%s]\n", str);
+
+ DPRINT("allowed options are:");
+ for (i=0; i< ARRAY_SIZE(config_params); i++)
+ printk(" %s",config_params[i].name);
+ printk("\n");
+ } else
+ DPRINT("botched floppy option\n");
+ DPRINT("Read linux/drivers/block/README.fd\n");
+}
+
+int floppy_init(void)
+{
+ int i,unit,drive;
+ int have_no_fdc= -EIO;
+
+ raw_cmd = 0;
+
+ sti();
+
+ if (register_blkdev(MAJOR_NR,"fd",&floppy_fops)) {
+ printk("Unable to get major %d for floppy\n",MAJOR_NR);
+ return -EBUSY;
+ }
+
+ for (i=0; i<256; i++)
+ if (ITYPE(i))
+ floppy_sizes[i] = floppy_type[ITYPE(i)].size >> 1;
+ else
+ floppy_sizes[i] = MAX_DISK_SIZE;
+
+ blk_size[MAJOR_NR] = floppy_sizes;
+ blksize_size[MAJOR_NR] = floppy_blocksizes;
+ blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ reschedule_timeout(MAXTIMEOUT, "floppy init", MAXTIMEOUT);
+ config_types();
+
+ for (i = 0; i < N_FDC; i++) {
+ fdc = i;
+ CLEARSTRUCT(FDCS);
+ FDCS->dtr = -1;
+ FDCS->dor = 0x4;
+ }
+
+ fdc_state[0].address = FDC1;
+#if N_FDC > 1
+ fdc_state[1].address = FDC2;
+#endif
+
+ if (floppy_grab_irq_and_dma()){
+ unregister_blkdev(MAJOR_NR,"fd");
+ return -EBUSY;
+ }
+
+ /* initialise drive state */
+ for (drive = 0; drive < N_DRIVE; drive++) {
+ CLEARSTRUCT(UDRS);
+ CLEARSTRUCT(UDRWE);
+ UDRS->flags = FD_VERIFY | FD_DISK_NEWCHANGE | FD_DISK_CHANGED;
+ UDRS->fd_device = -1;
+ floppy_track_buffer = NULL;
+ max_buffer_sectors = 0;
+ }
+
+ for (i = 0; i < N_FDC; i++) {
+ fdc = i;
+ FDCS->driver_version = FD_DRIVER_VERSION;
+ for (unit=0; unit<4; unit++)
+ FDCS->track[unit] = 0;
+ if (FDCS->address == -1)
+ continue;
+ FDCS->rawcmd = 2;
+ if (user_reset_fdc(-1,FD_RESET_ALWAYS,0)){
+ FDCS->address = -1;
+ continue;
+ }
+ /* Try to determine the floppy controller type */
+ FDCS->version = get_fdc_version();
+ if (FDCS->version == FDC_NONE){
+ FDCS->address = -1;
+ continue;
+ }
+
+ request_region(FDCS->address, 6, "floppy");
+ request_region(FDCS->address+7, 1, "floppy DIR");
+ /* address + 6 is reserved, and may be taken by IDE.
+ * Unfortunately, Adaptec doesn't know this :-(, */
+
+ have_no_fdc = 0;
+ /* Not all FDCs seem to be able to handle the version command
+ * properly, so force a reset for the standard FDC clones,
+ * to avoid interrupt garbage.
+ */
+ FDCS->has_fifo = FDCS->version >= FDC_82077_ORIG;
+ user_reset_fdc(-1,FD_RESET_ALWAYS,0);
+ }
+ fdc=0;
+ del_timer(&fd_timeout);
+ current_drive = 0;
+ floppy_release_irq_and_dma();
+ initialising=0;
+ if (have_no_fdc) {
+ DPRINT("no floppy controllers found\n");
+ unregister_blkdev(MAJOR_NR,"fd");
+ } else
+ virtual_dma_init();
+ return have_no_fdc;
+}
+
+static int floppy_grab_irq_and_dma(void)
+{
+ int i;
+ cli();
+ if (usage_count++){
+ sti();
+ return 0;
+ }
+ sti();
+ MOD_INC_USE_COUNT;
+ for (i=0; i< N_FDC; i++){
+ if (FDCS->address != -1){
+ fdc = i;
+ reset_fdc_info(1);
+ fd_outb(FDCS->dor, FD_DOR);
+ }
+ }
+ set_dor(0, ~0, 8); /* avoid immediate interrupt */
+
+ if (fd_request_irq()) {
+ DPRINT1("Unable to grab IRQ%d for the floppy driver\n",
+ FLOPPY_IRQ);
+ return -1;
+ }
+ if (fd_request_dma()) {
+ DPRINT1("Unable to grab DMA%d for the floppy driver\n",
+ FLOPPY_DMA);
+ fd_free_irq();
+ return -1;
+ }
+ for (fdc = 0; fdc < N_FDC; fdc++)
+ if (FDCS->address != -1)
+ fd_outb(FDCS->dor, FD_DOR);
+ fdc = 0;
+ fd_enable_irq();
+ return 0;
+}
+
+static void floppy_release_irq_and_dma(void)
+{
+#ifdef FLOPPY_SANITY_CHECK
+ int drive;
+#endif
+ long tmpsize;
+ void *tmpaddr;
+
+ cli();
+ if (--usage_count){
+ sti();
+ return;
+ }
+ sti();
+ MOD_DEC_USE_COUNT;
+ fd_disable_dma();
+ fd_free_dma();
+ fd_disable_irq();
+ fd_free_irq();
+
+ set_dor(0, ~0, 8);
+#if N_FDC > 1
+ set_dor(1, ~8, 0);
+#endif
+ floppy_enable_hlt();
+
+ if (floppy_track_buffer && max_buffer_sectors) {
+ tmpsize = max_buffer_sectors*1024;
+ tmpaddr = (void *)floppy_track_buffer;
+ floppy_track_buffer = 0;
+ max_buffer_sectors = 0;
+ buffer_min = buffer_max = -1;
+ free_pages((unsigned long)tmpaddr, __get_order(tmpsize));
+ }
+
+#ifdef FLOPPY_SANITY_CHECK
+ for (drive=0; drive < N_FDC * 4; drive++)
+ if (motor_off_timer[drive].next)
+ printk("motor off timer %d still active\n", drive);
+
+ if (fd_timeout.next)
+ printk("floppy timer still active:%s\n", timeout_message);
+ if (fd_timer.next)
+ printk("auxiliary floppy timer still active\n");
+ if (floppy_tq.sync)
+ printk("task queue still active\n");
+#endif
+}
+
+
+#ifdef MODULE
+
+extern char *get_options(char *str, int *ints);
+
+static void mod_setup(char *pattern, void (*setup)(char *, int *))
+{
+ int i;
+ char c;
+ int j;
+ int match;
+ char buffer[100];
+ int ints[11];
+ int length = strlen(pattern)+1;
+
+ match=0;
+ j=1;
+
+ for (i=current->mm->env_start; i< current->mm->env_end; i ++){
+ c= get_fs_byte(i);
+ if (match){
+ if (j==99)
+ c='\0';
+ buffer[j] = c;
+ if (!c || c == ' ' || c == '\t'){
+ if (j){
+ buffer[j] = '\0';
+ setup(get_options(buffer,ints),ints);
+ }
+ j=0;
+ } else
+ j++;
+ if (!c)
+ break;
+ continue;
+ }
+ if ((!j && !c) || (j && c == pattern[j-1]))
+ j++;
+ else
+ j=0;
+ if (j==length){
+ match=1;
+ j=0;
+ }
+ }
+}
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+int init_module(void)
+{
+ printk("inserting floppy driver for %s\n", kernel_version);
+
+ mod_setup("floppy=", floppy_setup);
+
+ return floppy_init();
+}
+
+void cleanup_module(void)
+{
+ int fdc;
+
+ for (fdc=0; fdc<2; fdc++)
+ if (FDCS->address != -1){
+ release_region(FDCS->address, 6);
+ release_region(FDCS->address+7, 1);
+ }
+
+ unregister_blkdev(MAJOR_NR, "fd");
+
+ blk_dev[MAJOR_NR].request_fn = 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/i386/i386at/gpl/linux/block/genhd.c b/i386/i386at/gpl/linux/block/genhd.c
new file mode 100644
index 00000000..60cba6c1
--- /dev/null
+++ b/i386/i386at/gpl/linux/block/genhd.c
@@ -0,0 +1,610 @@
+/*
+ * Code extracted from
+ * linux/kernel/hd.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ *
+ * Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug
+ * in the early extended-partition checks and added DM partitions
+ *
+ * Support for DiskManager v6.0x added by Mark Lord (mlord@bnr.ca)
+ * with information provided by OnTrack. This now works for linux fdisk
+ * and LILO, as well as loadlin and bootln. Note that disks other than
+ * /dev/hda *must* have a "DOS" type 0x51 partition in the first slot (hda1).
+ *
+ * More flexible handling of extended partitions - aeb, 950831
+ *
+ * Check partition table on IDE disks for common CHS translations
+ */
+
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/string.h>
+
+#include <asm/system.h>
+
+#ifdef __alpha__
+/*
+ * On the Alpha, we get unaligned access exceptions on
+ * p->nr_sects and p->start_sect, when the partition table
+ * is not on a 4-byte boundary, which is frequently the case.
+ * This code uses unaligned load instructions to prevent
+ * such exceptions.
+ */
+#include <asm/unaligned.h>
+#define NR_SECTS(p) ldl_u(&p->nr_sects)
+#define START_SECT(p) ldl_u(&p->start_sect)
+#else /* __alpha__ */
+#define NR_SECTS(p) p->nr_sects
+#define START_SECT(p) p->start_sect
+#endif /* __alpha__ */
+
+#ifdef MACH
+#include <i386/ipl.h>
+#endif
+
+struct gendisk *gendisk_head = NULL;
+
+static int current_minor = 0;
+extern int *blk_size[];
+extern void rd_load(void);
+
+extern int chr_dev_init(void);
+extern int blk_dev_init(void);
+extern int scsi_dev_init(void);
+extern int net_dev_init(void);
+
+static void print_minor_name (struct gendisk *hd, int minor)
+{
+ unsigned int unit = minor >> hd->minor_shift;
+ unsigned int part = minor & ((1 << hd->minor_shift) - 1);
+
+#ifdef CONFIG_BLK_DEV_IDE
+ /*
+ * IDE devices use multiple major numbers, but the drives
+ * are named as: {hda,hdb}, {hdc,hdd}, {hde,hdf}, {hdg,hdh}..
+ * This requires some creative handling here to find the
+ * correct name to use, with some help from ide.c
+ */
+ if (!strcmp(hd->major_name,"ide")) {
+ char name[16]; /* more than large enough */
+ strcpy(name, hd->real_devices); /* courtesy ide.c */
+ name[strlen(name)-1] += unit;
+ printk(" %s", name);
+ } else
+#endif
+ printk(" %s%c", hd->major_name, 'a' + unit);
+ if (part)
+ printk("%d", part);
+ else
+ printk(":");
+}
+
+static void add_partition (struct gendisk *hd, int minor, int start, int size)
+{
+ hd->part[minor].start_sect = start;
+ hd->part[minor].nr_sects = size;
+ print_minor_name(hd, minor);
+}
+
+static inline int is_extended_partition(struct partition *p)
+{
+ return (p->sys_ind == DOS_EXTENDED_PARTITION ||
+ p->sys_ind == LINUX_EXTENDED_PARTITION);
+}
+
+#ifdef CONFIG_MSDOS_PARTITION
+/*
+ * Create devices for each logical partition in an extended partition.
+ * The logical partitions form a linked list, with each entry being
+ * a partition table with two entries. The first entry
+ * is the real data partition (with a start relative to the partition
+ * table start). The second is a pointer to the next logical partition
+ * (with a start relative to the entire extended partition).
+ * We do not create a Linux partition for the partition tables, but
+ * only for the actual data partitions.
+ */
+
+static void extended_partition(struct gendisk *hd, kdev_t dev)
+{
+ struct buffer_head *bh;
+ struct partition *p;
+ unsigned long first_sector, first_size, this_sector, this_size;
+ int mask = (1 << hd->minor_shift) - 1;
+ int i;
+
+ first_sector = hd->part[MINOR(dev)].start_sect;
+ first_size = hd->part[MINOR(dev)].nr_sects;
+ this_sector = first_sector;
+
+ while (1) {
+ if ((current_minor & mask) == 0)
+ return;
+ if (!(bh = bread(dev,0,1024)))
+ return;
+ /*
+ * This block is from a device that we're about to stomp on.
+ * So make sure nobody thinks this block is usable.
+ */
+ bh->b_state = 0;
+
+ if (*(unsigned short *) (bh->b_data+510) != 0xAA55)
+ goto done;
+
+ p = (struct partition *) (0x1BE + bh->b_data);
+
+ this_size = hd->part[MINOR(dev)].nr_sects;
+
+ /*
+ * Usually, the first entry is the real data partition,
+ * the 2nd entry is the next extended partition, or empty,
+ * and the 3rd and 4th entries are unused.
+ * However, DRDOS sometimes has the extended partition as
+ * the first entry (when the data partition is empty),
+ * and OS/2 seems to use all four entries.
+ */
+
+ /*
+ * First process the data partition(s)
+ */
+ for (i=0; i<4; i++, p++) {
+ if (!NR_SECTS(p) || is_extended_partition(p))
+ continue;
+
+ /* Check the 3rd and 4th entries -
+ these sometimes contain random garbage */
+ if (i >= 2
+ && START_SECT(p) + NR_SECTS(p) > this_size
+ && (this_sector + START_SECT(p) < first_sector ||
+ this_sector + START_SECT(p) + NR_SECTS(p) >
+ first_sector + first_size))
+ continue;
+
+ add_partition(hd, current_minor, this_sector+START_SECT(p), NR_SECTS(p));
+ current_minor++;
+ if ((current_minor & mask) == 0)
+ goto done;
+ }
+ /*
+ * Next, process the (first) extended partition, if present.
+ * (So far, there seems to be no reason to make
+ * extended_partition() recursive and allow a tree
+ * of extended partitions.)
+ * It should be a link to the next logical partition.
+ * Create a minor for this just long enough to get the next
+ * partition table. The minor will be reused for the next
+ * data partition.
+ */
+ p -= 4;
+ for (i=0; i<4; i++, p++)
+ if(NR_SECTS(p) && is_extended_partition(p))
+ break;
+ if (i == 4)
+ goto done; /* nothing left to do */
+
+ hd->part[current_minor].nr_sects = NR_SECTS(p);
+ hd->part[current_minor].start_sect = first_sector + START_SECT(p);
+ this_sector = first_sector + START_SECT(p);
+ dev = MKDEV(hd->major, current_minor);
+ brelse(bh);
+ }
+done:
+ brelse(bh);
+}
+
+static int msdos_partition(struct gendisk *hd, kdev_t dev, unsigned long first_sector)
+{
+ int i, minor = current_minor;
+ struct buffer_head *bh;
+ struct partition *p;
+ unsigned char *data;
+ int mask = (1 << hd->minor_shift) - 1;
+#ifdef CONFIG_BLK_DEV_IDE
+ int tested_for_xlate = 0;
+
+read_mbr:
+#endif
+ if (!(bh = bread(dev,0,1024))) {
+ printk(" unable to read partition table\n");
+ return -1;
+ }
+ data = bh->b_data;
+ /* In some cases we modify the geometry */
+ /* of the drive (below), so ensure that */
+ /* nobody else tries to re-use this data. */
+ bh->b_state = 0;
+#ifdef CONFIG_BLK_DEV_IDE
+check_table:
+#endif
+ if (*(unsigned short *) (0x1fe + data) != 0xAA55) {
+ brelse(bh);
+ return 0;
+ }
+ p = (struct partition *) (0x1be + data);
+
+#ifdef CONFIG_BLK_DEV_IDE
+ if (!tested_for_xlate++) { /* Do this only once per disk */
+ /*
+ * Look for various forms of IDE disk geometry translation
+ */
+ extern int ide_xlate_1024(kdev_t, int, const char *);
+ unsigned int sig = *(unsigned short *)(data + 2);
+ if (p->sys_ind == EZD_PARTITION) {
+ /*
+ * The remainder of the disk must be accessed using
+ * a translated geometry that reduces the number of
+ * apparent cylinders to less than 1024 if possible.
+ *
+ * ide_xlate_1024() will take care of the necessary
+ * adjustments to fool fdisk/LILO and partition check.
+ */
+ if (ide_xlate_1024(dev, -1, " [EZD]")) {
+ data += 512;
+ goto check_table;
+ }
+ } else if (p->sys_ind == DM6_PARTITION) {
+
+ /*
+ * Everything on the disk is offset by 63 sectors,
+ * including a "new" MBR with its own partition table,
+ * and the remainder of the disk must be accessed using
+ * a translated geometry that reduces the number of
+ * apparent cylinders to less than 1024 if possible.
+ *
+ * ide_xlate_1024() will take care of the necessary
+ * adjustments to fool fdisk/LILO and partition check.
+ */
+ if (ide_xlate_1024(dev, 1, " [DM6:DDO]")) {
+ brelse(bh);
+ goto read_mbr; /* start over with new MBR */
+ }
+ } else if (sig <= 0x1ae && *(unsigned short *)(data + sig) == 0x55AA
+ && (1 & *(unsigned char *)(data + sig + 2)) )
+ {
+ /*
+ * DM6 signature in MBR, courtesy of OnTrack
+ */
+ (void) ide_xlate_1024 (dev, 0, " [DM6:MBR]");
+ } else if (p->sys_ind == DM6_AUX1PARTITION || p->sys_ind == DM6_AUX3PARTITION) {
+ /*
+ * DM6 on other than the first (boot) drive
+ */
+ (void) ide_xlate_1024(dev, 0, " [DM6:AUX]");
+ } else {
+ /*
+ * Examine the partition table for common translations.
+ * This is necessary for drives for situations where
+ * the translated geometry is unavailable from the BIOS.
+ */
+ for (i = 0; i < 4 ; i++) {
+ struct partition *q = &p[i];
+ if (NR_SECTS(q) && q->sector == 1 && q->end_sector == 63) {
+ unsigned int heads = q->end_head + 1;
+ if (heads == 32 || heads == 64 || heads == 128) {
+
+ (void) ide_xlate_1024(dev, heads, " [PTBL]");
+ break;
+ }
+ }
+ }
+ }
+ }
+#endif /* CONFIG_BLK_DEV_IDE */
+
+ current_minor += 4; /* first "extra" minor (for extended partitions) */
+ for (i=1 ; i<=4 ; minor++,i++,p++) {
+ if (!NR_SECTS(p))
+ continue;
+ add_partition(hd, minor, first_sector+START_SECT(p), NR_SECTS(p));
+ if (is_extended_partition(p)) {
+ printk(" <");
+ /*
+ * If we are rereading the partition table, we need
+ * to set the size of the partition so that we will
+ * be able to bread the block containing the extended
+ * partition info.
+ */
+ hd->sizes[minor] = hd->part[minor].nr_sects
+ >> (BLOCK_SIZE_BITS - 9);
+ extended_partition(hd, MKDEV(hd->major, minor));
+ printk(" >");
+ /* prevent someone doing mkfs or mkswap on an
+ extended partition, but leave room for LILO */
+ if (hd->part[minor].nr_sects > 2)
+ hd->part[minor].nr_sects = 2;
+ }
+ }
+ /*
+ * Check for old-style Disk Manager partition table
+ */
+ if (*(unsigned short *) (data+0xfc) == 0x55AA) {
+ p = (struct partition *) (0x1be + data);
+ for (i = 4 ; i < 16 ; i++, current_minor++) {
+ p--;
+ if ((current_minor & mask) == 0)
+ break;
+ if (!(START_SECT(p) && NR_SECTS(p)))
+ continue;
+ add_partition(hd, current_minor, START_SECT(p), NR_SECTS(p));
+ }
+ }
+ printk("\n");
+ brelse(bh);
+ return 1;
+}
+
+#endif /* CONFIG_MSDOS_PARTITION */
+
+#ifdef CONFIG_OSF_PARTITION
+
+static int osf_partition(struct gendisk *hd, unsigned int dev, unsigned long first_sector)
+{
+ int i;
+ int mask = (1 << hd->minor_shift) - 1;
+ struct buffer_head *bh;
+ struct disklabel {
+ u32 d_magic;
+ u16 d_type,d_subtype;
+ u8 d_typename[16];
+ u8 d_packname[16];
+ u32 d_secsize;
+ u32 d_nsectors;
+ u32 d_ntracks;
+ u32 d_ncylinders;
+ u32 d_secpercyl;
+ u32 d_secprtunit;
+ u16 d_sparespertrack;
+ u16 d_sparespercyl;
+ u32 d_acylinders;
+ u16 d_rpm, d_interleave, d_trackskew, d_cylskew;
+ u32 d_headswitch, d_trkseek, d_flags;
+ u32 d_drivedata[5];
+ u32 d_spare[5];
+ u32 d_magic2;
+ u16 d_checksum;
+ u16 d_npartitions;
+ u32 d_bbsize, d_sbsize;
+ struct d_partition {
+ u32 p_size;
+ u32 p_offset;
+ u32 p_fsize;
+ u8 p_fstype;
+ u8 p_frag;
+ u16 p_cpg;
+ } d_partitions[8];
+ } * label;
+ struct d_partition * partition;
+#define DISKLABELMAGIC (0x82564557UL)
+
+ if (!(bh = bread(dev,0,1024))) {
+ printk("unable to read partition table\n");
+ return -1;
+ }
+ label = (struct disklabel *) (bh->b_data+64);
+ partition = label->d_partitions;
+ if (label->d_magic != DISKLABELMAGIC) {
+ printk("magic: %08x\n", label->d_magic);
+ brelse(bh);
+ return 0;
+ }
+ if (label->d_magic2 != DISKLABELMAGIC) {
+ printk("magic2: %08x\n", label->d_magic2);
+ brelse(bh);
+ return 0;
+ }
+ for (i = 0 ; i < label->d_npartitions; i++, partition++) {
+ if ((current_minor & mask) == 0)
+ break;
+ if (partition->p_size)
+ add_partition(hd, current_minor,
+ first_sector+partition->p_offset,
+ partition->p_size);
+ current_minor++;
+ }
+ printk("\n");
+ brelse(bh);
+ return 1;
+}
+
+#endif /* CONFIG_OSF_PARTITION */
+
+#ifdef CONFIG_SUN_PARTITION
+
+static int sun_partition(struct gendisk *hd, unsigned int dev, unsigned long first_sector)
+{
+ int i, csum;
+ unsigned short *ush;
+ struct buffer_head *bh;
+ struct sun_disklabel {
+ unsigned char info[128]; /* Informative text string */
+ unsigned char spare[292]; /* Boot information etc. */
+ unsigned short rspeed; /* Disk rotational speed */
+ unsigned short pcylcount; /* Physical cylinder count */
+ unsigned short sparecyl; /* extra sects per cylinder */
+ unsigned char spare2[4]; /* More magic... */
+ unsigned short ilfact; /* Interleave factor */
+ unsigned short ncyl; /* Data cylinder count */
+ unsigned short nacyl; /* Alt. cylinder count */
+ unsigned short ntrks; /* Tracks per cylinder */
+ unsigned short nsect; /* Sectors per track */
+ unsigned char spare3[4]; /* Even more magic... */
+ struct sun_partition {
+ unsigned long start_cylinder;
+ unsigned long num_sectors;
+ } partitions[8];
+ unsigned short magic; /* Magic number */
+ unsigned short csum; /* Label xor'd checksum */
+ } * label;
+ struct sun_partition *p;
+ unsigned long spc;
+#define SUN_LABEL_MAGIC 0xDABE
+
+ if(!(bh = bread(dev, 0, 1024))) {
+ printk("Dev %d: unable to read partition table\n", dev);
+ return -1;
+ }
+ label = (struct sun_disklabel *) bh->b_data;
+ p = label->partitions;
+ if(label->magic != SUN_LABEL_MAGIC) {
+ printk("Dev %d Sun disklabel: bad magic %08x\n", dev, label->magic);
+ brelse(bh);
+ return 0;
+ }
+ /* Look at the checksum */
+ ush = ((unsigned short *) (label+1)) - 1;
+ for(csum = 0; ush >= ((unsigned short *) label);)
+ csum ^= *ush--;
+ if(csum) {
+ printk("Dev %d Sun disklabel: Csum bad, label corrupted\n", dev);
+ brelse(bh);
+ return 0;
+ }
+ /* All Sun disks have 8 partition entries */
+ spc = (label->ntrks * label->nsect);
+ for(i=0; i < 8; i++, p++) {
+ unsigned long st_sector;
+
+ /* We register all partitions, even if zero size, so that
+ * the minor numbers end up ok as per SunOS interpretation.
+ */
+ st_sector = first_sector + (p->start_cylinder * spc);
+ add_partition(hd, current_minor, st_sector, p->num_sectors);
+ current_minor++;
+ }
+ printk("\n");
+ brelse(bh);
+ return 1;
+}
+
+#endif /* CONFIG_SUN_PARTITION */
+
+static void check_partition(struct gendisk *hd, kdev_t dev)
+{
+ static int first_time = 1;
+ unsigned long first_sector;
+
+ if (first_time)
+ printk("Partition check:\n");
+ first_time = 0;
+ first_sector = hd->part[MINOR(dev)].start_sect;
+
+ /*
+ * This is a kludge to allow the partition check to be
+ * skipped for specific drives (e.g. IDE cd-rom drives)
+ */
+ if ((int)first_sector == -1) {
+ hd->part[MINOR(dev)].start_sect = 0;
+ return;
+ }
+
+ printk(" ");
+ print_minor_name(hd, MINOR(dev));
+#ifdef CONFIG_MSDOS_PARTITION
+ if (msdos_partition(hd, dev, first_sector))
+ return;
+#endif
+#ifdef CONFIG_OSF_PARTITION
+ if (osf_partition(hd, dev, first_sector))
+ return;
+#endif
+#ifdef CONFIG_SUN_PARTITION
+ if(sun_partition(hd, dev, first_sector))
+ return;
+#endif
+ printk(" unknown partition table\n");
+}
+
+/* This function is used to re-read partition tables for removable disks.
+ Much of the cleanup from the old partition tables should have already been
+ done */
+
+/* This function will re-read the partition tables for a given device,
+and set things back up again. There are some important caveats,
+however. You must ensure that no one is using the device, and no one
+can start using the device while this function is being executed. */
+
+void resetup_one_dev(struct gendisk *dev, int drive)
+{
+ int i;
+ int first_minor = drive << dev->minor_shift;
+ int end_minor = first_minor + dev->max_p;
+
+ blk_size[dev->major] = NULL;
+ current_minor = 1 + first_minor;
+ check_partition(dev, MKDEV(dev->major, first_minor));
+
+ /*
+ * We need to set the sizes array before we will be able to access
+ * any of the partitions on this device.
+ */
+ if (dev->sizes != NULL) { /* optional safeguard in ll_rw_blk.c */
+ for (i = first_minor; i < end_minor; i++)
+ dev->sizes[i] = dev->part[i].nr_sects >> (BLOCK_SIZE_BITS - 9);
+ blk_size[dev->major] = dev->sizes;
+ }
+}
+
+static void setup_dev(struct gendisk *dev)
+{
+ int i, drive;
+ int end_minor = dev->max_nr * dev->max_p;
+
+ blk_size[dev->major] = NULL;
+ for (i = 0 ; i < end_minor; i++) {
+ dev->part[i].start_sect = 0;
+ dev->part[i].nr_sects = 0;
+ }
+ dev->init(dev);
+ for (drive = 0 ; drive < dev->nr_real ; drive++) {
+ int first_minor = drive << dev->minor_shift;
+ current_minor = 1 + first_minor;
+ check_partition(dev, MKDEV(dev->major, first_minor));
+ }
+ if (dev->sizes != NULL) { /* optional safeguard in ll_rw_blk.c */
+ for (i = 0; i < end_minor; i++)
+ dev->sizes[i] = dev->part[i].nr_sects >> (BLOCK_SIZE_BITS - 9);
+ blk_size[dev->major] = dev->sizes;
+ }
+}
+
+void device_setup(void)
+{
+ extern void console_map_init(void);
+ struct gendisk *p;
+ int nr=0;
+#ifdef MACH
+ extern int linux_intr_pri;
+
+ linux_intr_pri = SPL5;
+#endif
+
+#ifndef MACH
+ chr_dev_init();
+#endif
+ blk_dev_init();
+ sti();
+#ifdef CONFIG_SCSI
+ scsi_dev_init();
+#endif
+#ifdef CONFIG_INET
+#ifdef MACH
+ linux_intr_pri = SPL6;
+#endif
+ net_dev_init();
+#endif
+#ifndef MACH
+ console_map_init();
+#endif
+
+ for (p = gendisk_head ; p ; p=p->next) {
+ setup_dev(p);
+ nr += p->nr_real;
+ }
+#ifdef CONFIG_BLK_DEV_RAM
+ rd_load();
+#endif
+}
diff --git a/i386/i386at/gpl/linux/block/ide-cd.c b/i386/i386at/gpl/linux/block/ide-cd.c
new file mode 100644
index 00000000..6dc93806
--- /dev/null
+++ b/i386/i386at/gpl/linux/block/ide-cd.c
@@ -0,0 +1,2770 @@
+/*
+ * linux/drivers/block/ide-cd.c
+ *
+ * 1.00 Oct 31, 1994 -- Initial version.
+ * 1.01 Nov 2, 1994 -- Fixed problem with starting request in
+ * cdrom_check_status.
+ * 1.03 Nov 25, 1994 -- leaving unmask_intr[] as a user-setting (as for disks)
+ * (from mlord) -- minor changes to cdrom_setup()
+ * -- renamed ide_dev_s to ide_drive_t, enable irq on command
+ * 2.00 Nov 27, 1994 -- Generalize packet command interface;
+ * add audio ioctls.
+ * 2.01 Dec 3, 1994 -- Rework packet command interface to handle devices
+ * which send an interrupt when ready for a command.
+ * 2.02 Dec 11, 1994 -- Cache the TOC in the driver.
+ * Don't use SCMD_PLAYAUDIO_TI; it's not included
+ * in the current version of ATAPI.
+ * Try to use LBA instead of track or MSF addressing
+ * when possible.
+ * Don't wait for READY_STAT.
+ * 2.03 Jan 10, 1995 -- Rewrite block read routines to handle block sizes
+ * other than 2k and to move multiple sectors in a
+ * single transaction.
+ * 2.04 Apr 21, 1995 -- Add work-around for Creative Labs CD220E drives.
+ * Thanks to Nick Saw <cwsaw@pts7.pts.mot.com> for
+ * help in figuring this out. Ditto for Acer and
+ * Aztech drives, which seem to have the same problem.
+ * 2.04b May 30, 1995 -- Fix to match changes in ide.c version 3.16 -ml
+ * 2.05 Jun 8, 1995 -- Don't attempt to retry after an illegal request
+ * or data protect error.
+ * Use HWIF and DEV_HWIF macros as in ide.c.
+ * Always try to do a request_sense after
+ * a failed command.
+ * Include an option to give textual descriptions
+ * of ATAPI errors.
+ * Fix a bug in handling the sector cache which
+ * showed up if the drive returned data in 512 byte
+ * blocks (like Pioneer drives). Thanks to
+ * Richard Hirst <srh@gpt.co.uk> for diagnosing this.
+ * Properly supply the page number field in the
+ * MODE_SELECT command.
+ * PLAYAUDIO12 is broken on the Aztech; work around it.
+ * 2.05x Aug 11, 1995 -- lots of data structure renaming/restructuring in ide.c
+ * (my apologies to Scott, but now ide-cd.c is independent)
+ * 3.00 Aug 22, 1995 -- Implement CDROMMULTISESSION ioctl.
+ * Implement CDROMREADAUDIO ioctl (UNTESTED).
+ * Use input_ide_data() and output_ide_data().
+ * Add door locking.
+ * Fix usage count leak in cdrom_open, which happened
+ * when a read-write mount was attempted.
+ * Try to load the disk on open.
+ * Implement CDROMEJECT_SW ioctl (off by default).
+ * Read total cdrom capacity during open.
+ * Rearrange logic in cdrom_decode_status. Issue
+ * request sense commands for failed packet commands
+ * from here instead of from cdrom_queue_packet_command.
+ * Fix a race condition in retrieving error information.
+ * Suppress printing normal unit attention errors and
+ * some drive not ready errors.
+ * Implement CDROMVOLREAD ioctl.
+ * Implement CDROMREADMODE1/2 ioctls.
+ * Fix race condition in setting up interrupt handlers
+ * when the `serialize' option is used.
+ * 3.01 Sep 2, 1995 -- Fix ordering of reenabling interrupts in
+ * cdrom_queue_request.
+ * Another try at using ide_[input,output]_data.
+ * 3.02 Sep 16, 1995 -- Stick total disk capacity in partition table as well.
+ * Make VERBOSE_IDE_CD_ERRORS dump failed command again.
+ * Dump out more information for ILLEGAL REQUEST errs.
+ * Fix handling of errors occuring before the
+ * packet command is transferred.
+ * Fix transfers with odd bytelengths.
+ * 3.03 Oct 27, 1995 -- Some Creative drives have an id of just `CD'.
+ * `DCI-2S10' drives are broken too.
+ * 3.04 Nov 20, 1995 -- So are Vertos drives.
+ * 3.05 Dec 1, 1995 -- Changes to go with overhaul of ide.c and ide-tape.c
+ * 3.06 Dec 16, 1995 -- Add support needed for partitions.
+ * More workarounds for Vertos bugs (based on patches
+ * from Holger Dietze <dietze@aix520.informatik.uni-leipzig.de>).
+ * Try to eliminate byteorder assumptions.
+ * Use atapi_cdrom_subchnl struct definition.
+ * Add STANDARD_ATAPI compilation option.
+ * 3.07 Jan 29, 1996 -- More twiddling for broken drives: Sony 55D,
+ * Vertos 300.
+ * Add NO_DOOR_LOCKING configuration option.
+ * Handle drive_cmd requests w/NULL args (for hdparm -t).
+ * Work around sporadic Sony55e audio play problem.
+ * 3.07a Feb 11, 1996 -- check drive->id for NULL before dereferencing, to fix
+ * problem with "hde=cdrom" with no drive present. -ml
+ *
+ * NOTE: Direct audio reads will only work on some types of drive.
+ * So far, i've received reports of success for Sony and Toshiba drives.
+ *
+ * ATAPI cd-rom driver. To be used with ide.c.
+ *
+ * Copyright (C) 1994, 1995, 1996 scott snyder <snyder@fnald0.fnal.gov>
+ * May be copied or modified under the terms of the GNU General Public License
+ * (../../COPYING).
+ */
+
+
+/***************************************************************************/
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/errno.h>
+#include <linux/hdreg.h>
+#include <linux/cdrom.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/segment.h>
+#ifdef __alpha__
+# include <asm/unaligned.h>
+#endif
+
+#include "ide.h"
+
+
+
+/* Turn this on to have the driver print out the meanings of the
+ ATAPI error codes. This will use up additional kernel-space
+ memory, though. */
+
+#ifndef VERBOSE_IDE_CD_ERRORS
+#define VERBOSE_IDE_CD_ERRORS 0
+#endif
+
+
+/* Turning this on will remove code to work around various nonstandard
+ ATAPI implementations. If you know your drive follows the standard,
+ this will give you a slightly smaller kernel. */
+
+#ifndef STANDARD_ATAPI
+#define STANDARD_ATAPI 0
+#endif
+
+
+/* Turning this on will disable the door-locking functionality.
+ This is apparently needed for supermount. */
+
+#ifndef NO_DOOR_LOCKING
+#define NO_DOOR_LOCKING 0
+#endif
+
+
+/************************************************************************/
+
+#define SECTOR_SIZE 512
+#define SECTOR_BITS 9
+#define SECTORS_PER_FRAME (CD_FRAMESIZE / SECTOR_SIZE)
+
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+
+/* special command codes for strategy routine. */
+#define PACKET_COMMAND 4315
+#define REQUEST_SENSE_COMMAND 4316
+#define RESET_DRIVE_COMMAND 4317
+
+/* Some ATAPI command opcodes (just like SCSI).
+ (Some other cdrom-specific codes are in cdrom.h.) */
+#define TEST_UNIT_READY 0x00
+#define REQUEST_SENSE 0x03
+#define START_STOP 0x1b
+#define ALLOW_MEDIUM_REMOVAL 0x1e
+#define READ_CAPACITY 0x25
+#define READ_10 0x28
+#define MODE_SENSE_10 0x5a
+#define MODE_SELECT_10 0x55
+#define READ_CD 0xbe
+
+
+/* ATAPI sense keys (mostly copied from scsi.h). */
+
+#define NO_SENSE 0x00
+#define RECOVERED_ERROR 0x01
+#define NOT_READY 0x02
+#define MEDIUM_ERROR 0x03
+#define HARDWARE_ERROR 0x04
+#define ILLEGAL_REQUEST 0x05
+#define UNIT_ATTENTION 0x06
+#define DATA_PROTECT 0x07
+#define ABORTED_COMMAND 0x0b
+#define MISCOMPARE 0x0e
+
+/* We want some additional flags for cd-rom drives.
+ To save space in the ide_drive_t struct, use some fields which
+ doesn't make sense for cd-roms -- `bios_sect' and `bios_head'. */
+
+/* Configuration flags. These describe the capabilities of the drive.
+ They generally do not change after initialization, unless we learn
+ more about the drive from stuff failing. */
+struct ide_cd_config_flags {
+ __u8 drq_interrupt : 1; /* Device sends an interrupt when ready
+ for a packet command. */
+ __u8 no_doorlock : 1; /* Drive cannot lock the door. */
+#if ! STANDARD_ATAPI
+ __u8 no_playaudio12: 1; /* The PLAYAUDIO12 command is not supported. */
+
+ __u8 no_lba_toc : 1; /* Drive cannot return TOC info in LBA format. */
+ __u8 playmsf_uses_bcd : 1; /* Drive uses BCD in PLAYAUDIO_MSF. */
+ __u8 old_readcd : 1; /* Drive uses old READ CD opcode. */
+ __u8 vertos_lossage: 1; /* Drive is a Vertos 300,
+ and likes to speak BCD. */
+#endif /* not STANDARD_ATAPI */
+ __u8 reserved : 1;
+};
+#define CDROM_CONFIG_FLAGS(drive) ((struct ide_cd_config_flags *)&((drive)->bios_sect))
+
+
+/* State flags. These give information about the current state of the
+ drive, and will change during normal operation. */
+struct ide_cd_state_flags {
+ __u8 media_changed : 1; /* Driver has noticed a media change. */
+ __u8 toc_valid : 1; /* Saved TOC information is current. */
+ __u8 door_locked : 1; /* We think that the drive door is locked. */
+ __u8 eject_on_close: 1; /* Drive should eject when device is closed. */
+ __u8 reserved : 4;
+};
+#define CDROM_STATE_FLAGS(drive) ((struct ide_cd_state_flags *)&((drive)->bios_head))
+
+
+#define SECTOR_BUFFER_SIZE CD_FRAMESIZE
+
+
+
+/****************************************************************************
+ * Routines to read and write data from/to the drive, using
+ * the routines input_ide_data() and output_ide_data() from ide.c.
+ *
+ * These routines will round up any request for an odd number of bytes,
+ * so if an odd bytecount is specified, be sure that there's at least one
+ * extra byte allocated for the buffer.
+ */
+
+
+static inline
+void cdrom_in_bytes (ide_drive_t *drive, void *buffer, uint bytecount)
+{
+ ++bytecount;
+ ide_input_data (drive, buffer, bytecount / 4);
+ if ((bytecount & 0x03) >= 2)
+ {
+ insw (IDE_DATA_REG, ((byte *)buffer) + (bytecount & ~0x03), 1);
+ }
+}
+
+
+static inline
+void cdrom_out_bytes (ide_drive_t *drive, void *buffer, uint bytecount)
+{
+ ++bytecount;
+ ide_output_data (drive, buffer, bytecount / 4);
+ if ((bytecount & 0x03) >= 2)
+ {
+ outsw (IDE_DATA_REG, ((byte *)buffer) + (bytecount & ~0x03), 1);
+ }
+}
+
+
+
+/****************************************************************************
+ * Descriptions of ATAPI error codes.
+ */
+
+#define ARY_LEN(a) ((sizeof(a) / sizeof(a[0])))
+
+#if VERBOSE_IDE_CD_ERRORS
+
+/* From Table 124 of the ATAPI 1.2 spec. */
+
+char *sense_key_texts[16] = {
+ "No sense data",
+ "Recovered error",
+ "Not ready",
+ "Medium error",
+ "Hardware error",
+ "Illegal request",
+ "Unit attention",
+ "Data protect",
+ "(reserved)",
+ "(reserved)",
+ "(reserved)",
+ "Aborted command",
+ "(reserved)",
+ "(reserved)",
+ "Miscompare",
+ "(reserved)",
+};
+
+
+/* From Table 125 of the ATAPI 1.2 spec. */
+
+struct {
+ short asc_ascq;
+ char *text;
+} sense_data_texts[] = {
+ { 0x0000, "No additional sense information" },
+ { 0x0011, "Audio play operation in progress" },
+ { 0x0012, "Audio play operation paused" },
+ { 0x0013, "Audio play operation successfully completed" },
+ { 0x0014, "Audio play operation stopped due to error" },
+ { 0x0015, "No current audio status to return" },
+
+ { 0x0200, "No seek complete" },
+
+ { 0x0400, "Logical unit not ready - cause not reportable" },
+ { 0x0401, "Logical unit not ready - in progress (sic) of becoming ready" },
+ { 0x0402, "Logical unit not ready - initializing command required" },
+ { 0x0403, "Logical unit not ready - manual intervention required" },
+
+ { 0x0600, "No reference position found" },
+
+ { 0x0900, "Track following error" },
+ { 0x0901, "Tracking servo failure" },
+ { 0x0902, "Focus servo failure" },
+ { 0x0903, "Spindle servo failure" },
+
+ { 0x1100, "Unrecovered read error" },
+ { 0x1106, "CIRC unrecovered error" },
+
+ { 0x1500, "Random positioning error" },
+ { 0x1501, "Mechanical positioning error" },
+ { 0x1502, "Positioning error detected by read of medium" },
+
+ { 0x1700, "Recovered data with no error correction applied" },
+ { 0x1701, "Recovered data with retries" },
+ { 0x1702, "Recovered data with positive head offset" },
+ { 0x1703, "Recovered data with negative head offset" },
+ { 0x1704, "Recovered data with retries and/or CIRC applied" },
+ { 0x1705, "Recovered data using previous sector ID" },
+
+ { 0x1800, "Recovered data with error correction applied" },
+ { 0x1801, "Recovered data with error correction and retries applied" },
+ { 0x1802, "Recovered data - the data was auto-reallocated" },
+ { 0x1803, "Recovered data with CIRC" },
+ { 0x1804, "Recovered data with L-EC" },
+ { 0x1805, "Recovered data - recommend reassignment" },
+ { 0x1806, "Recovered data - recommend rewrite" },
+
+ { 0x1a00, "Parameter list length error" },
+
+ { 0x2000, "Invalid command operation code" },
+
+ { 0x2100, "Logical block address out of range" },
+
+ { 0x2400, "Invalid field in command packet" },
+
+ { 0x2600, "Invalid field in parameter list" },
+ { 0x2601, "Parameter not supported" },
+ { 0x2602, "Parameter value invalid" },
+ { 0x2603, "Threshold parameters not supported" },
+
+ { 0x2800, "Not ready to ready transition, medium may have changed" },
+
+ { 0x2900, "Power on, reset or bus device reset occurred" },
+
+ { 0x2a00, "Parameters changed" },
+ { 0x2a01, "Mode parameters changed" },
+
+ { 0x3000, "Incompatible medium installed" },
+ { 0x3001, "Cannot read medium - unknown format" },
+ { 0x3002, "Cannot read medium - incompatible format" },
+
+ { 0x3700, "Rounded parameter" },
+
+ { 0x3900, "Saving parameters not supported" },
+
+ { 0x3a00, "Medium not present" },
+
+ { 0x3f00, "ATAPI CD-ROM drive operating conditions have changed" },
+ { 0x3f01, "Microcode has been changed" },
+ { 0x3f02, "Changed operating definition" },
+ { 0x3f03, "Inquiry data has changed" },
+
+ { 0x4000, "Diagnostic failure on component (ASCQ)" },
+
+ { 0x4400, "Internal ATAPI CD-ROM drive failure" },
+
+ { 0x4e00, "Overlapped commands attempted" },
+
+ { 0x5300, "Media load or eject failed" },
+ { 0x5302, "Medium removal prevented" },
+
+ { 0x5700, "Unable to recover table of contents" },
+
+ { 0x5a00, "Operator request or state change input (unspecified)" },
+ { 0x5a01, "Operator medium removal request" },
+
+ { 0x5b00, "Threshold condition met" },
+
+ { 0x5c00, "Status change" },
+
+ { 0x6300, "End of user area encountered on this track" },
+
+ { 0x6400, "Illegal mode for this track" },
+
+ { 0xbf00, "Loss of streaming" },
+};
+#endif
+
+
+
+/****************************************************************************
+ * Generic packet command support and error handling routines.
+ */
+
+
+static
+void cdrom_analyze_sense_data (ide_drive_t *drive,
+ struct atapi_request_sense *reqbuf,
+ struct packet_command *failed_command)
+{
+ /* Don't print not ready or unit attention errors for READ_SUBCHANNEL.
+ Workman (and probably other programs) uses this command to poll
+ the drive, and we don't want to fill the syslog with useless errors. */
+ if (failed_command &&
+ failed_command->c[0] == SCMD_READ_SUBCHANNEL &&
+ (reqbuf->sense_key == NOT_READY || reqbuf->sense_key == UNIT_ATTENTION))
+ return;
+
+#if VERBOSE_IDE_CD_ERRORS
+ {
+ int i;
+ char *s;
+ char buf[80];
+
+ printk ("ATAPI device %s:\n", drive->name);
+
+ printk (" Error code: 0x%02x\n", reqbuf->error_code);
+
+ if (reqbuf->sense_key >= 0 &&
+ reqbuf->sense_key < ARY_LEN (sense_key_texts))
+ s = sense_key_texts[reqbuf->sense_key];
+ else
+ s = "(bad sense key)";
+
+ printk (" Sense key: 0x%02x - %s\n", reqbuf->sense_key, s);
+
+ if (reqbuf->asc == 0x40) {
+ sprintf (buf, "Diagnostic failure on component 0x%02x", reqbuf->ascq);
+ s = buf;
+ }
+
+ else {
+ int lo, hi;
+ int key = (reqbuf->asc << 8);
+ if ( ! (reqbuf->ascq >= 0x80 && reqbuf->ascq <= 0xdd) )
+ key |= reqbuf->ascq;
+
+ lo = 0;
+ hi = ARY_LEN (sense_data_texts);
+ s = NULL;
+
+ while (hi > lo) {
+ int mid = (lo + hi) / 2;
+ if (sense_data_texts[mid].asc_ascq == key) {
+ s = sense_data_texts[mid].text;
+ break;
+ }
+ else if (sense_data_texts[mid].asc_ascq > key)
+ hi = mid;
+ else
+ lo = mid+1;
+ }
+ }
+
+ if (s == NULL) {
+ if (reqbuf->asc > 0x80)
+ s = "(vendor-specific error)";
+ else
+ s = "(reserved error code)";
+ }
+
+ printk (" Additional sense data: 0x%02x, 0x%02x - %s\n",
+ reqbuf->asc, reqbuf->ascq, s);
+
+ if (failed_command != NULL) {
+ printk (" Failed packet command: ");
+ for (i=0; i<sizeof (failed_command->c); i++)
+ printk ("%02x ", failed_command->c[i]);
+ printk ("\n");
+ }
+
+ if (reqbuf->sense_key == ILLEGAL_REQUEST &&
+ (reqbuf->sense_key_specific[0] & 0x80) != 0)
+ {
+ printk (" Error in %s byte %d",
+ (reqbuf->sense_key_specific[0] & 0x40) != 0
+ ? "command packet"
+ : "command data",
+ (reqbuf->sense_key_specific[1] << 8) +
+ reqbuf->sense_key_specific[2]);
+
+ if ((reqbuf->sense_key_specific[0] & 0x40) != 0)
+ {
+ printk (" bit %d", reqbuf->sense_key_specific[0] & 0x07);
+ }
+
+ printk ("\n");
+ }
+ }
+
+#else
+
+ /* Suppress printing unit attention and `in progress of becoming ready'
+ errors when we're not being verbose. */
+
+ if (reqbuf->sense_key == UNIT_ATTENTION ||
+ (reqbuf->sense_key == NOT_READY && (reqbuf->asc == 4 ||
+ reqbuf->asc == 0x3a)))
+ return;
+
+ printk ("%s: code: 0x%02x key: 0x%02x asc: 0x%02x ascq: 0x%02x\n",
+ drive->name,
+ reqbuf->error_code, reqbuf->sense_key, reqbuf->asc, reqbuf->ascq);
+#endif
+}
+
+
+/* Fix up a possibly partially-processed request so that we can
+ start it over entirely, or even put it back on the request queue. */
+static void restore_request (struct request *rq)
+{
+ if (rq->buffer != rq->bh->b_data)
+ {
+ int n = (rq->buffer - rq->bh->b_data) / SECTOR_SIZE;
+ rq->buffer = rq->bh->b_data;
+ rq->nr_sectors += n;
+ rq->sector -= n;
+ }
+ rq->current_nr_sectors = rq->bh->b_size >> SECTOR_BITS;
+}
+
+
+static void cdrom_queue_request_sense (ide_drive_t *drive,
+ struct semaphore *sem,
+ struct atapi_request_sense *reqbuf,
+ struct packet_command *failed_command)
+{
+ struct request *rq;
+ struct packet_command *pc;
+ int len;
+
+ /* If the request didn't explicitly specify where to put the sense data,
+ use the statically allocated structure. */
+ if (reqbuf == NULL)
+ reqbuf = &drive->cdrom_info.sense_data;
+
+ /* Make up a new request to retrieve sense information. */
+
+ pc = &HWIF(drive)->request_sense_pc;
+ memset (pc, 0, sizeof (*pc));
+
+ /* The request_sense structure has an odd number of (16-bit) words,
+ which won't work well with 32-bit transfers. However, we don't care
+ about the last two bytes, so just truncate the structure down
+ to an even length. */
+ len = sizeof (*reqbuf) / 4;
+ len *= 4;
+
+ pc->c[0] = REQUEST_SENSE;
+ pc->c[4] = len;
+ pc->buffer = (char *)reqbuf;
+ pc->buflen = len;
+ pc->sense_data = (struct atapi_request_sense *)failed_command;
+
+ /* stuff the sense request in front of our current request */
+
+ rq = &HWIF(drive)->request_sense_request;
+ ide_init_drive_cmd (rq);
+ rq->cmd = REQUEST_SENSE_COMMAND;
+ rq->buffer = (char *)pc;
+ rq->sem = sem;
+ (void) ide_do_drive_cmd (drive, rq, ide_preempt);
+}
+
+
+static void cdrom_end_request (int uptodate, ide_drive_t *drive)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+
+ /* The code in blk.h can screw us up on error recovery if the block
+ size is larger than 1k. Fix that up here. */
+ if (!uptodate && rq->bh != 0)
+ {
+ int adj = rq->current_nr_sectors - 1;
+ rq->current_nr_sectors -= adj;
+ rq->sector += adj;
+ }
+
+ if (rq->cmd == REQUEST_SENSE_COMMAND && uptodate)
+ {
+ struct packet_command *pc = (struct packet_command *)rq->buffer;
+ cdrom_analyze_sense_data (drive,
+ (struct atapi_request_sense *)(pc->buffer - pc->c[4]),
+ (struct packet_command *)pc->sense_data);
+ }
+
+ ide_end_request (uptodate, HWGROUP(drive));
+}
+
+
+/* Mark that we've seen a media change, and invalidate our internal
+ buffers. */
+static void cdrom_saw_media_change (ide_drive_t *drive)
+{
+ CDROM_STATE_FLAGS (drive)->media_changed = 1;
+ CDROM_STATE_FLAGS (drive)->toc_valid = 0;
+ drive->cdrom_info.nsectors_buffered = 0;
+}
+
+
+/* Returns 0 if the request should be continued.
+ Returns 1 if the request was ended. */
+static int cdrom_decode_status (ide_drive_t *drive, int good_stat, int *stat_ret)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+ int stat, err, sense_key, cmd;
+
+ /* Check for errors. */
+ stat = GET_STAT();
+ *stat_ret = stat;
+
+ if (OK_STAT (stat, good_stat, BAD_R_STAT))
+ return 0;
+
+ /* Got an error. */
+ err = IN_BYTE (IDE_ERROR_REG);
+ sense_key = err >> 4;
+
+ if (rq == NULL)
+ printk ("%s : missing request in cdrom_decode_status\n", drive->name);
+ else
+ {
+ cmd = rq->cmd;
+
+ if (cmd == REQUEST_SENSE_COMMAND)
+ {
+ /* We got an error trying to get sense info from the drive
+ (probably while trying to recover from a former error).
+ Just give up. */
+
+ struct packet_command *pc = (struct packet_command *)rq->buffer;
+ pc->stat = 1;
+ cdrom_end_request (1, drive);
+ ide_error (drive, "request sense failure", stat);
+ return 1;
+ }
+
+ else if (cmd == PACKET_COMMAND)
+ {
+ /* All other functions, except for READ. */
+
+ struct packet_command *pc = (struct packet_command *)rq->buffer;
+ struct semaphore *sem = NULL;
+
+ /* Check for tray open. */
+ if (sense_key == NOT_READY)
+ {
+ cdrom_saw_media_change (drive);
+
+ /* Print an error message to the syslog.
+ Exception: don't print anything if this is a read subchannel
+ command. This is because workman constantly polls the drive
+ with this command, and we don't want to uselessly fill up
+ the syslog. */
+ if (pc->c[0] != SCMD_READ_SUBCHANNEL)
+ printk ("%s : tray open or drive not ready\n", drive->name);
+ }
+
+ /* Check for media change. */
+ else if (sense_key == UNIT_ATTENTION)
+ {
+ cdrom_saw_media_change (drive);
+ printk ("%s: media changed\n", drive->name);
+ }
+
+ /* Otherwise, print an error. */
+ else
+ {
+ ide_dump_status (drive, "packet command error", stat);
+ }
+
+ /* Set the error flag and complete the request.
+ Then, if we have a CHECK CONDITION status, queue a request
+ sense command. We must be careful, though: we don't want
+ the thread in cdrom_queue_packet_command to wake up until
+ the request sense has completed. We do this by transferring
+ the semaphore from the packet command request to the
+ request sense request. */
+
+ if ((stat & ERR_STAT) != 0)
+ {
+ sem = rq->sem;
+ rq->sem = NULL;
+ }
+
+ pc->stat = 1;
+ cdrom_end_request (1, drive);
+
+ if ((stat & ERR_STAT) != 0)
+ cdrom_queue_request_sense (drive, sem, pc->sense_data, pc);
+ }
+
+ else
+ {
+ /* Handle errors from READ requests. */
+
+ /* Check for tray open. */
+ if (sense_key == NOT_READY)
+ {
+ cdrom_saw_media_change (drive);
+
+ /* Fail the request. */
+ printk ("%s : tray open\n", drive->name);
+ cdrom_end_request (0, drive);
+ }
+
+ /* Check for media change. */
+ else if (sense_key == UNIT_ATTENTION)
+ {
+ cdrom_saw_media_change (drive);
+
+ /* Arrange to retry the request.
+ But be sure to give up if we've retried too many times. */
+ if (++rq->errors > ERROR_MAX)
+ {
+ cdrom_end_request (0, drive);
+ }
+ }
+ /* No point in retrying after an illegal request or
+ data protect error.*/
+ else if (sense_key == ILLEGAL_REQUEST || sense_key == DATA_PROTECT)
+ {
+ ide_dump_status (drive, "command error", stat);
+ cdrom_end_request (0, drive);
+ }
+
+ /* If there were other errors, go to the default handler. */
+ else if ((err & ~ABRT_ERR) != 0)
+ {
+ ide_error (drive, "cdrom_decode_status", stat);
+ return 1;
+ }
+
+ /* Else, abort if we've racked up too many retries. */
+ else if ((++rq->errors > ERROR_MAX))
+ {
+ cdrom_end_request (0, drive);
+ }
+
+ /* If we got a CHECK_CONDITION status, queue a request sense
+ command. */
+ if ((stat & ERR_STAT) != 0)
+ cdrom_queue_request_sense (drive, NULL, NULL, NULL);
+ }
+ }
+
+ /* Retry, or handle the next request. */
+ return 1;
+}
+
+
+/* Set up the device registers for transferring a packet command on DEV,
+ expecting to later transfer XFERLEN bytes. HANDLER is the routine
+ which actually transfers the command to the drive. If this is a
+ drq_interrupt device, this routine will arrange for HANDLER to be
+ called when the interrupt from the drive arrives. Otherwise, HANDLER
+ will be called immediately after the drive is prepared for the transfer. */
+
+static int cdrom_start_packet_command (ide_drive_t *drive, int xferlen,
+ ide_handler_t *handler)
+{
+ /* Wait for the controller to be idle. */
+ if (ide_wait_stat (drive, 0, BUSY_STAT, WAIT_READY)) return 1;
+
+ /* Set up the controller registers. */
+ OUT_BYTE (0, IDE_FEATURE_REG);
+ OUT_BYTE (0, IDE_NSECTOR_REG);
+ OUT_BYTE (0, IDE_SECTOR_REG);
+
+ OUT_BYTE (xferlen & 0xff, IDE_LCYL_REG);
+ OUT_BYTE (xferlen >> 8 , IDE_HCYL_REG);
+ OUT_BYTE (drive->ctl, IDE_CONTROL_REG);
+
+ if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt)
+ {
+ ide_set_handler (drive, handler, WAIT_CMD);
+ OUT_BYTE (WIN_PACKETCMD, IDE_COMMAND_REG); /* packet command */
+ }
+ else
+ {
+ OUT_BYTE (WIN_PACKETCMD, IDE_COMMAND_REG); /* packet command */
+ (*handler) (drive);
+ }
+
+ return 0;
+}
+
+
+/* Send a packet command to DRIVE described by CMD_BUF and CMD_LEN.
+ The device registers must have already been prepared
+ by cdrom_start_packet_command.
+ HANDLER is the interrupt handler to call when the command completes
+ or there's data ready. */
+static int cdrom_transfer_packet_command (ide_drive_t *drive,
+ char *cmd_buf, int cmd_len,
+ ide_handler_t *handler)
+{
+ if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt)
+ {
+ /* Here we should have been called after receiving an interrupt
+ from the device. DRQ should how be set. */
+ int stat_dum;
+
+ /* Check for errors. */
+ if (cdrom_decode_status (drive, DRQ_STAT, &stat_dum)) return 1;
+ }
+ else
+ {
+ /* Otherwise, we must wait for DRQ to get set. */
+ if (ide_wait_stat (drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) return 1;
+ }
+
+ /* Arm the interrupt handler. */
+ ide_set_handler (drive, handler, WAIT_CMD);
+
+ /* Send the command to the device. */
+ cdrom_out_bytes (drive, cmd_buf, cmd_len);
+
+ return 0;
+}
+
+
+
+/****************************************************************************
+ * Block read functions.
+ */
+
+/*
+ * Buffer up to SECTORS_TO_TRANSFER sectors from the drive in our sector
+ * buffer. Once the first sector is added, any subsequent sectors are
+ * assumed to be continuous (until the buffer is cleared). For the first
+ * sector added, SECTOR is its sector number. (SECTOR is then ignored until
+ * the buffer is cleared.)
+ */
+static void cdrom_buffer_sectors (ide_drive_t *drive, unsigned long sector,
+ int sectors_to_transfer)
+{
+ struct cdrom_info *info = &drive->cdrom_info;
+
+ /* Number of sectors to read into the buffer. */
+ int sectors_to_buffer = MIN (sectors_to_transfer,
+ (SECTOR_BUFFER_SIZE >> SECTOR_BITS) -
+ info->nsectors_buffered);
+
+ char *dest;
+
+ /* If we don't yet have a sector buffer, try to allocate one.
+ If we can't get one atomically, it's not fatal -- we'll just throw
+ the data away rather than caching it. */
+ if (info->sector_buffer == NULL)
+ {
+ info->sector_buffer = (char *) kmalloc (SECTOR_BUFFER_SIZE, GFP_ATOMIC);
+
+ /* If we couldn't get a buffer, don't try to buffer anything... */
+ if (info->sector_buffer == NULL)
+ sectors_to_buffer = 0;
+ }
+
+ /* If this is the first sector in the buffer, remember its number. */
+ if (info->nsectors_buffered == 0)
+ info->sector_buffered = sector;
+
+ /* Read the data into the buffer. */
+ dest = info->sector_buffer + info->nsectors_buffered * SECTOR_SIZE;
+ while (sectors_to_buffer > 0)
+ {
+ cdrom_in_bytes (drive, dest, SECTOR_SIZE);
+ --sectors_to_buffer;
+ --sectors_to_transfer;
+ ++info->nsectors_buffered;
+ dest += SECTOR_SIZE;
+ }
+
+ /* Throw away any remaining data. */
+ while (sectors_to_transfer > 0)
+ {
+ char dum[SECTOR_SIZE];
+ cdrom_in_bytes (drive, dum, sizeof (dum));
+ --sectors_to_transfer;
+ }
+}
+
+
+/*
+ * Check the contents of the interrupt reason register from the cdrom
+ * and attempt to recover if there are problems. Returns 0 if everything's
+ * ok; nonzero if the request has been terminated.
+ */
+static inline
+int cdrom_read_check_ireason (ide_drive_t *drive, int len, int ireason)
+{
+ ireason &= 3;
+ if (ireason == 2) return 0;
+
+ if (ireason == 0)
+ {
+ /* Whoops... The drive is expecting to receive data from us! */
+ printk ("%s: cdrom_read_intr: "
+ "Drive wants to transfer data the wrong way!\n",
+ drive->name);
+
+ /* Throw some data at the drive so it doesn't hang
+ and quit this request. */
+ while (len > 0)
+ {
+ int dum = 0;
+ cdrom_out_bytes (drive, &dum, sizeof (dum));
+ len -= sizeof (dum);
+ }
+ }
+
+ else
+ {
+ /* Drive wants a command packet, or invalid ireason... */
+ printk ("%s: cdrom_read_intr: bad interrupt reason %d\n",
+ drive->name, ireason);
+ }
+
+ cdrom_end_request (0, drive);
+ return -1;
+}
+
+
+/*
+ * Interrupt routine. Called when a read request has completed.
+ */
+static void cdrom_read_intr (ide_drive_t *drive)
+{
+ int stat;
+ int ireason, len, sectors_to_transfer, nskip;
+
+ struct request *rq = HWGROUP(drive)->rq;
+
+ /* Check for errors. */
+ if (cdrom_decode_status (drive, 0, &stat)) return;
+
+ /* Read the interrupt reason and the transfer length. */
+ ireason = IN_BYTE (IDE_NSECTOR_REG);
+ len = IN_BYTE (IDE_LCYL_REG) + 256 * IN_BYTE (IDE_HCYL_REG);
+
+ /* If DRQ is clear, the command has completed. */
+ if ((stat & DRQ_STAT) == 0)
+ {
+ /* If we're not done filling the current buffer, complain.
+ Otherwise, complete the command normally. */
+ if (rq->current_nr_sectors > 0)
+ {
+ printk ("%s: cdrom_read_intr: data underrun (%ld blocks)\n",
+ drive->name, rq->current_nr_sectors);
+ cdrom_end_request (0, drive);
+ }
+ else
+ cdrom_end_request (1, drive);
+
+ return;
+ }
+
+ /* Check that the drive is expecting to do the same thing that we are. */
+ if (cdrom_read_check_ireason (drive, len, ireason)) return;
+
+ /* Assume that the drive will always provide data in multiples of at least
+ SECTOR_SIZE, as it gets hairy to keep track of the transfers otherwise. */
+ if ((len % SECTOR_SIZE) != 0)
+ {
+ printk ("%s: cdrom_read_intr: Bad transfer size %d\n",
+ drive->name, len);
+ printk (" This drive is not supported by this version of the driver\n");
+ cdrom_end_request (0, drive);
+ return;
+ }
+
+ /* The number of sectors we need to read from the drive. */
+ sectors_to_transfer = len / SECTOR_SIZE;
+
+ /* First, figure out if we need to bit-bucket any of the leading sectors. */
+ nskip = MIN ((int)(rq->current_nr_sectors - (rq->bh->b_size >> SECTOR_BITS)),
+ sectors_to_transfer);
+
+ while (nskip > 0)
+ {
+ /* We need to throw away a sector. */
+ char dum[SECTOR_SIZE];
+ cdrom_in_bytes (drive, dum, sizeof (dum));
+
+ --rq->current_nr_sectors;
+ --nskip;
+ --sectors_to_transfer;
+ }
+
+ /* Now loop while we still have data to read from the drive. */
+ while (sectors_to_transfer > 0)
+ {
+ int this_transfer;
+
+ /* If we've filled the present buffer but there's another chained
+ buffer after it, move on. */
+ if (rq->current_nr_sectors == 0 &&
+ rq->nr_sectors > 0)
+ cdrom_end_request (1, drive);
+
+ /* If the buffers are full, cache the rest of the data in our
+ internal buffer. */
+ if (rq->current_nr_sectors == 0)
+ {
+ cdrom_buffer_sectors (drive, rq->sector, sectors_to_transfer);
+ sectors_to_transfer = 0;
+ }
+ else
+ {
+ /* Transfer data to the buffers.
+ Figure out how many sectors we can transfer
+ to the current buffer. */
+ this_transfer = MIN (sectors_to_transfer,
+ rq->current_nr_sectors);
+
+ /* Read this_transfer sectors into the current buffer. */
+ while (this_transfer > 0)
+ {
+ cdrom_in_bytes (drive, rq->buffer, SECTOR_SIZE);
+ rq->buffer += SECTOR_SIZE;
+ --rq->nr_sectors;
+ --rq->current_nr_sectors;
+ ++rq->sector;
+ --this_transfer;
+ --sectors_to_transfer;
+ }
+ }
+ }
+
+ /* Done moving data!
+ Wait for another interrupt. */
+ ide_set_handler (drive, &cdrom_read_intr, WAIT_CMD);
+}
+
+
+/*
+ * Try to satisfy some of the current read request from our cached data.
+ * Returns nonzero if the request has been completed, zero otherwise.
+ */
+static int cdrom_read_from_buffer (ide_drive_t *drive)
+{
+ struct cdrom_info *info = &drive->cdrom_info;
+ struct request *rq = HWGROUP(drive)->rq;
+
+ /* Can't do anything if there's no buffer. */
+ if (info->sector_buffer == NULL) return 0;
+
+ /* Loop while this request needs data and the next block is present
+ in our cache. */
+ while (rq->nr_sectors > 0 &&
+ rq->sector >= info->sector_buffered &&
+ rq->sector < info->sector_buffered + info->nsectors_buffered)
+ {
+ if (rq->current_nr_sectors == 0)
+ cdrom_end_request (1, drive);
+
+ memcpy (rq->buffer,
+ info->sector_buffer +
+ (rq->sector - info->sector_buffered) * SECTOR_SIZE,
+ SECTOR_SIZE);
+ rq->buffer += SECTOR_SIZE;
+ --rq->current_nr_sectors;
+ --rq->nr_sectors;
+ ++rq->sector;
+ }
+
+ /* If we've satisfied the current request, terminate it successfully. */
+ if (rq->nr_sectors == 0)
+ {
+ cdrom_end_request (1, drive);
+ return -1;
+ }
+
+ /* Move on to the next buffer if needed. */
+ if (rq->current_nr_sectors == 0)
+ cdrom_end_request (1, drive);
+
+ /* If this condition does not hold, then the kluge i use to
+ represent the number of sectors to skip at the start of a transfer
+ will fail. I think that this will never happen, but let's be
+ paranoid and check. */
+ if (rq->current_nr_sectors < (rq->bh->b_size >> SECTOR_BITS) &&
+ (rq->sector % SECTORS_PER_FRAME) != 0)
+ {
+ printk ("%s: cdrom_read_from_buffer: buffer botch (%ld)\n",
+ drive->name, rq->sector);
+ cdrom_end_request (0, drive);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+
+/*
+ * Routine to send a read packet command to the drive.
+ * This is usually called directly from cdrom_start_read.
+ * However, for drq_interrupt devices, it is called from an interrupt
+ * when the drive is ready to accept the command.
+ */
+static void cdrom_start_read_continuation (ide_drive_t *drive)
+{
+ struct packet_command pc;
+ struct request *rq = HWGROUP(drive)->rq;
+
+ int nsect, sector, nframes, frame, nskip;
+
+ /* Number of sectors to transfer. */
+ nsect = rq->nr_sectors;
+
+ /* Starting sector. */
+ sector = rq->sector;
+
+ /* If the requested sector doesn't start on a cdrom block boundary,
+ we must adjust the start of the transfer so that it does,
+ and remember to skip the first few sectors. If the CURRENT_NR_SECTORS
+ field is larger than the size of the buffer, it will mean that
+ we're to skip a number of sectors equal to the amount by which
+ CURRENT_NR_SECTORS is larger than the buffer size. */
+ nskip = (sector % SECTORS_PER_FRAME);
+ if (nskip > 0)
+ {
+ /* Sanity check... */
+ if (rq->current_nr_sectors != (rq->bh->b_size >> SECTOR_BITS))
+ {
+ printk ("%s: cdrom_start_read_continuation: buffer botch (%ld)\n",
+ drive->name, rq->current_nr_sectors);
+ cdrom_end_request (0, drive);
+ return;
+ }
+
+ sector -= nskip;
+ nsect += nskip;
+ rq->current_nr_sectors += nskip;
+ }
+
+ /* Convert from sectors to cdrom blocks, rounding up the transfer
+ length if needed. */
+ nframes = (nsect + SECTORS_PER_FRAME-1) / SECTORS_PER_FRAME;
+ frame = sector / SECTORS_PER_FRAME;
+
+ /* Largest number of frames was can transfer at once is 64k-1. */
+ nframes = MIN (nframes, 65535);
+
+ /* Set up the command */
+ memset (&pc.c, 0, sizeof (pc.c));
+ pc.c[0] = READ_10;
+ pc.c[7] = (nframes >> 8);
+ pc.c[8] = (nframes & 0xff);
+#ifdef __alpha__
+ stl_u (htonl (frame), (unsigned int *) &pc.c[2]);
+#else
+ *(int *)(&pc.c[2]) = htonl (frame);
+#endif
+
+ /* Send the command to the drive and return. */
+ (void) cdrom_transfer_packet_command (drive, pc.c, sizeof (pc.c),
+ &cdrom_read_intr);
+}
+
+
+/*
+ * Start a read request from the CD-ROM.
+ */
+static void cdrom_start_read (ide_drive_t *drive, unsigned int block)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+ int minor = MINOR (rq->rq_dev);
+
+ /* If the request is relative to a partition, fix it up to refer to the
+ absolute address. */
+ if ((minor & PARTN_MASK) != 0) {
+ rq->sector = block;
+ minor &= ~PARTN_MASK;
+ rq->rq_dev = MKDEV (MAJOR(rq->rq_dev), minor);
+ }
+
+ /* We may be retrying this request after an error.
+ Fix up any weirdness which might be present in the request packet. */
+ restore_request (rq);
+
+ /* Satisfy whatever we can of this request from our cached sector. */
+ if (cdrom_read_from_buffer (drive))
+ return;
+
+ /* Clear the local sector buffer. */
+ drive->cdrom_info.nsectors_buffered = 0;
+
+ /* Start sending the read request to the drive. */
+ cdrom_start_packet_command (drive, 32768, cdrom_start_read_continuation);
+}
+
+
+
+
+/****************************************************************************
+ * Execute all other packet commands.
+ */
+
+/* Forward declarations. */
+static int
+cdrom_lockdoor (ide_drive_t *drive, int lockflag,
+ struct atapi_request_sense *reqbuf);
+
+
+
+/* Interrupt routine for packet command completion. */
+static void cdrom_pc_intr (ide_drive_t *drive)
+{
+ int ireason, len, stat, thislen;
+ struct request *rq = HWGROUP(drive)->rq;
+ struct packet_command *pc = (struct packet_command *)rq->buffer;
+
+ /* Check for errors. */
+ if (cdrom_decode_status (drive, 0, &stat)) return;
+
+ /* Read the interrupt reason and the transfer length. */
+ ireason = IN_BYTE (IDE_NSECTOR_REG);
+ len = IN_BYTE (IDE_LCYL_REG) + 256 * IN_BYTE (IDE_HCYL_REG);
+
+ /* If DRQ is clear, the command has completed.
+ Complain if we still have data left to transfer. */
+ if ((stat & DRQ_STAT) == 0)
+ {
+ /* Some of the trailing request sense fields are optional, and
+ some drives don't send them. Sigh. */
+ if (pc->c[0] == REQUEST_SENSE && pc->buflen > 0 && pc->buflen <= 5) {
+ while (pc->buflen > 0) {
+ *pc->buffer++ = 0;
+ --pc->buflen;
+ }
+ }
+
+ if (pc->buflen == 0)
+ cdrom_end_request (1, drive);
+ else
+ {
+ printk ("%s: cdrom_pc_intr: data underrun %d\n",
+ drive->name, pc->buflen);
+ pc->stat = 1;
+ cdrom_end_request (1, drive);
+ }
+ return;
+ }
+
+ /* Figure out how much data to transfer. */
+ thislen = pc->buflen;
+ if (thislen < 0) thislen = -thislen;
+ if (thislen > len) thislen = len;
+
+ /* The drive wants to be written to. */
+ if ((ireason & 3) == 0)
+ {
+ /* Check that we want to write. */
+ if (pc->buflen > 0)
+ {
+ printk ("%s: cdrom_pc_intr: Drive wants to transfer data the wrong way!\n",
+ drive->name);
+ pc->stat = 1;
+ thislen = 0;
+ }
+
+ /* Transfer the data. */
+ cdrom_out_bytes (drive, pc->buffer, thislen);
+
+ /* If we haven't moved enough data to satisfy the drive,
+ add some padding. */
+ while (len > thislen)
+ {
+ int dum = 0;
+ cdrom_out_bytes (drive, &dum, sizeof (dum));
+ len -= sizeof (dum);
+ }
+
+ /* Keep count of how much data we've moved. */
+ pc->buffer += thislen;
+ pc->buflen += thislen;
+ }
+
+ /* Same drill for reading. */
+ else if ((ireason & 3) == 2)
+ {
+ /* Check that we want to read. */
+ if (pc->buflen < 0)
+ {
+ printk ("%s: cdrom_pc_intr: Drive wants to transfer data the wrong way!\n",
+ drive->name);
+ pc->stat = 1;
+ thislen = 0;
+ }
+
+ /* Transfer the data. */
+ cdrom_in_bytes (drive, pc->buffer, thislen);
+
+ /* If we haven't moved enough data to satisfy the drive,
+ add some padding. */
+ while (len > thislen)
+ {
+ int dum = 0;
+ cdrom_in_bytes (drive, &dum, sizeof (dum));
+ len -= sizeof (dum);
+ }
+
+ /* Keep count of how much data we've moved. */
+ pc->buffer += thislen;
+ pc->buflen -= thislen;
+ }
+
+ else
+ {
+ printk ("%s: cdrom_pc_intr: The drive appears confused (ireason = 0x%2x)\n",
+ drive->name, ireason);
+ pc->stat = 1;
+ }
+
+ /* Now we wait for another interrupt. */
+ ide_set_handler (drive, &cdrom_pc_intr, WAIT_CMD);
+}
+
+
+static void cdrom_do_pc_continuation (ide_drive_t *drive)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+ struct packet_command *pc = (struct packet_command *)rq->buffer;
+
+ /* Send the command to the drive and return. */
+ cdrom_transfer_packet_command (drive, pc->c, sizeof (pc->c), &cdrom_pc_intr);
+}
+
+
+static void cdrom_do_packet_command (ide_drive_t *drive)
+{
+ int len;
+ struct request *rq = HWGROUP(drive)->rq;
+ struct packet_command *pc = (struct packet_command *)rq->buffer;
+
+ len = pc->buflen;
+ if (len < 0) len = -len;
+
+ pc->stat = 0;
+
+ /* Start sending the command to the drive. */
+ cdrom_start_packet_command (drive, len, cdrom_do_pc_continuation);
+}
+
+#ifndef MACH
+/* Sleep for TIME jiffies.
+ Not to be called from an interrupt handler. */
+static
+void cdrom_sleep (int time)
+{
+ current->state = TASK_INTERRUPTIBLE;
+ current->timeout = jiffies + time;
+ schedule ();
+}
+#endif
+
+static
+int cdrom_queue_packet_command (ide_drive_t *drive, struct packet_command *pc)
+{
+ struct atapi_request_sense my_reqbuf;
+ int retries = 10;
+ struct request req;
+
+ /* If our caller has not provided a place to stick any sense data,
+ use our own area. */
+ if (pc->sense_data == NULL)
+ pc->sense_data = &my_reqbuf;
+ pc->sense_data->sense_key = 0;
+
+ /* Start of retry loop. */
+ do {
+ ide_init_drive_cmd (&req);
+ req.cmd = PACKET_COMMAND;
+ req.buffer = (char *)pc;
+ (void) ide_do_drive_cmd (drive, &req, ide_wait);
+
+ if (pc->stat != 0)
+ {
+ /* The request failed. Retry if it was due to a unit attention status
+ (usually means media was changed). */
+ struct atapi_request_sense *reqbuf = pc->sense_data;
+
+ if (reqbuf->sense_key == UNIT_ATTENTION)
+ ;
+
+ /* Also retry if the drive is in the process of loading a disk.
+ This time, however, wait a little between retries to give
+ the drive time. */
+ else if (reqbuf->sense_key == NOT_READY && reqbuf->asc == 4)
+ {
+ cdrom_sleep (HZ);
+ }
+
+ /* Otherwise, don't retry. */
+ else
+ retries = 0;
+
+ --retries;
+ }
+
+ /* End of retry loop. */
+ } while (pc->stat != 0 && retries >= 0);
+
+
+ /* Return an error if the command failed. */
+ if (pc->stat != 0)
+ return -EIO;
+
+ else
+ {
+ /* The command succeeded. If it was anything other than a request sense,
+ eject, or door lock command, and we think that the door is presently
+ unlocked, lock it again. (The door was probably unlocked via
+ an explicit CDROMEJECT ioctl.) */
+ if (CDROM_STATE_FLAGS (drive)->door_locked == 0 &&
+ (pc->c[0] != REQUEST_SENSE &&
+ pc->c[0] != ALLOW_MEDIUM_REMOVAL &&
+ pc->c[0] != START_STOP))
+ {
+ (void) cdrom_lockdoor (drive, 1, NULL);
+ }
+ return 0;
+ }
+}
+
+
+
+/****************************************************************************
+ * drive_cmd handling.
+ *
+ * Most of the functions accessed via drive_cmd are not valid for ATAPI
+ * devices. Only attempt to execute those which actually should be valid.
+ */
+
+static
+void cdrom_do_drive_cmd (ide_drive_t *drive)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+ byte *args = rq->buffer;
+
+ if (args)
+ {
+#if 0 /* This bit isn't done yet... */
+ if (args[0] == WIN_SETFEATURES &&
+ (args[2] == 0x66 || args[2] == 0xcc || args[2] == 0x02 ||
+ args[2] == 0xdd || args[2] == 0x5d))
+ {
+ OUT_BYTE (args[2], io_base + IDE_FEATURE_OFFSET);
+ <send cmd>
+ }
+ else
+#endif
+ {
+ printk ("%s: Unsupported drive command %02x %02x %02x\n",
+ drive->name, args[0], args[1], args[2]);
+ rq->errors = 1;
+ }
+ }
+
+ cdrom_end_request (1, drive);
+}
+
+
+
+/****************************************************************************
+ * cdrom driver request routine.
+ */
+
+void ide_do_rw_cdrom (ide_drive_t *drive, unsigned long block)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+
+ if (rq -> cmd == PACKET_COMMAND || rq -> cmd == REQUEST_SENSE_COMMAND)
+ cdrom_do_packet_command (drive);
+
+ else if (rq -> cmd == RESET_DRIVE_COMMAND)
+ {
+ cdrom_end_request (1, drive);
+ ide_do_reset (drive);
+ return;
+ }
+
+ else if (rq -> cmd == IDE_DRIVE_CMD)
+ cdrom_do_drive_cmd (drive);
+
+ else if (rq -> cmd != READ)
+ {
+ printk ("ide-cd: bad cmd %d\n", rq -> cmd);
+ cdrom_end_request (0, drive);
+ }
+ else
+ cdrom_start_read (drive, block);
+}
+
+
+
+/****************************************************************************
+ * Ioctl handling.
+ *
+ * Routines which queue packet commands take as a final argument a pointer
+ * to an atapi_request_sense struct. If execution of the command results
+ * in an error with a CHECK CONDITION status, this structure will be filled
+ * with the results of the subsequent request sense command. The pointer
+ * can also be NULL, in which case no sense information is returned.
+ */
+
+#if ! STANDARD_ATAPI
+static
+int bin2bcd (int x)
+{
+ return (x%10) | ((x/10) << 4);
+}
+
+
+static
+int bcd2bin (int x)
+{
+ return (x >> 4) * 10 + (x & 0x0f);
+}
+#endif /* not STANDARD_ATAPI */
+
+
+static inline
+void lba_to_msf (int lba, byte *m, byte *s, byte *f)
+{
+ lba += CD_BLOCK_OFFSET;
+ lba &= 0xffffff; /* negative lbas use only 24 bits */
+ *m = lba / (CD_SECS * CD_FRAMES);
+ lba %= (CD_SECS * CD_FRAMES);
+ *s = lba / CD_FRAMES;
+ *f = lba % CD_FRAMES;
+}
+
+
+static inline
+int msf_to_lba (byte m, byte s, byte f)
+{
+ return (((m * CD_SECS) + s) * CD_FRAMES + f) - CD_BLOCK_OFFSET;
+}
+
+
+static int
+cdrom_check_status (ide_drive_t *drive,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+
+ pc.sense_data = reqbuf;
+ pc.c[0] = TEST_UNIT_READY;
+
+ return cdrom_queue_packet_command (drive, &pc);
+}
+
+
+/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */
+static int
+cdrom_lockdoor (ide_drive_t *drive, int lockflag,
+ struct atapi_request_sense *reqbuf)
+{
+ struct atapi_request_sense my_reqbuf;
+ int stat;
+ struct packet_command pc;
+
+ if (reqbuf == NULL)
+ reqbuf = &my_reqbuf;
+
+ /* If the drive cannot lock the door, just pretend. */
+ if (CDROM_CONFIG_FLAGS (drive)->no_doorlock)
+ stat = 0;
+ else
+ {
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = ALLOW_MEDIUM_REMOVAL;
+ pc.c[4] = (lockflag != 0);
+ stat = cdrom_queue_packet_command (drive, &pc);
+ }
+
+ if (stat == 0)
+ CDROM_STATE_FLAGS (drive)->door_locked = lockflag;
+ else
+ {
+ /* If we got an illegal field error, the drive
+ probably cannot lock the door. */
+ if (reqbuf->sense_key == ILLEGAL_REQUEST && reqbuf->asc == 0x24)
+ {
+ printk ("%s: door locking not supported\n", drive->name);
+ CDROM_CONFIG_FLAGS (drive)->no_doorlock = 1;
+ stat = 0;
+ CDROM_STATE_FLAGS (drive)->door_locked = lockflag;
+ }
+ }
+ return stat;
+}
+
+
+/* Eject the disk if EJECTFLAG is 0.
+ If EJECTFLAG is 1, try to reload the disk. */
+static int
+cdrom_eject (ide_drive_t *drive, int ejectflag,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = START_STOP;
+ pc.c[4] = 2 + (ejectflag != 0);
+ return cdrom_queue_packet_command (drive, &pc);
+}
+
+
+static int
+cdrom_pause (ide_drive_t *drive, int pauseflag,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = SCMD_PAUSE_RESUME;
+ pc.c[8] = !pauseflag;
+ return cdrom_queue_packet_command (drive, &pc);
+}
+
+
+static int
+cdrom_startstop (ide_drive_t *drive, int startflag,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = START_STOP;
+ pc.c[1] = 1;
+ pc.c[4] = startflag;
+ return cdrom_queue_packet_command (drive, &pc);
+}
+
+
+static int
+cdrom_read_capacity (ide_drive_t *drive, unsigned *capacity,
+ struct atapi_request_sense *reqbuf)
+{
+ struct {
+ unsigned lba;
+ unsigned blocklen;
+ } capbuf;
+
+ int stat;
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = READ_CAPACITY;
+ pc.buffer = (char *)&capbuf;
+ pc.buflen = sizeof (capbuf);
+
+ stat = cdrom_queue_packet_command (drive, &pc);
+ if (stat == 0)
+ {
+ *capacity = ntohl (capbuf.lba);
+ }
+
+ return stat;
+}
+
+
+static int
+cdrom_read_tocentry (ide_drive_t *drive, int trackno, int msf_flag,
+ int format, char *buf, int buflen,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.buffer = buf;
+ pc.buflen = buflen;
+ pc.c[0] = SCMD_READ_TOC;
+ pc.c[6] = trackno;
+ pc.c[7] = (buflen >> 8);
+ pc.c[8] = (buflen & 0xff);
+ pc.c[9] = (format << 6);
+ if (msf_flag) pc.c[1] = 2;
+ return cdrom_queue_packet_command (drive, &pc);
+}
+
+
+/* Try to read the entire TOC for the disk into our internal buffer. */
+static int
+cdrom_read_toc (ide_drive_t *drive,
+ struct atapi_request_sense *reqbuf)
+{
+ int msf_flag;
+ int stat, ntracks, i;
+ struct atapi_toc *toc = drive->cdrom_info.toc;
+ struct {
+ struct atapi_toc_header hdr;
+ struct atapi_toc_entry ent;
+ } ms_tmp;
+
+ if (toc == NULL)
+ {
+ /* Try to allocate space. */
+ toc = (struct atapi_toc *) kmalloc (sizeof (struct atapi_toc),
+ GFP_KERNEL);
+ drive->cdrom_info.toc = toc;
+ }
+
+ if (toc == NULL)
+ {
+ printk ("%s: No cdrom TOC buffer!\n", drive->name);
+ return -EIO;
+ }
+
+ /* Check to see if the existing data is still valid.
+ If it is, just return. */
+ if (CDROM_STATE_FLAGS (drive)->toc_valid)
+ (void) cdrom_check_status (drive, NULL);
+
+ if (CDROM_STATE_FLAGS (drive)->toc_valid) return 0;
+
+#if STANDARD_ATAPI
+ msf_flag = 0;
+#else /* not STANDARD_ATAPI */
+ /* Some drives can't return TOC data in LBA format. */
+ msf_flag = (CDROM_CONFIG_FLAGS (drive)->no_lba_toc);
+#endif /* not STANDARD_ATAPI */
+
+ /* First read just the header, so we know how long the TOC is. */
+ stat = cdrom_read_tocentry (drive, 0, msf_flag, 0, (char *)&toc->hdr,
+ sizeof (struct atapi_toc_header) +
+ sizeof (struct atapi_toc_entry),
+ reqbuf);
+ if (stat) return stat;
+
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->vertos_lossage)
+ {
+ toc->hdr.first_track = bcd2bin (toc->hdr.first_track);
+ toc->hdr.last_track = bcd2bin (toc->hdr.last_track);
+ /* hopefully the length is not BCD, too ;-| */
+ }
+#endif /* not STANDARD_ATAPI */
+
+ ntracks = toc->hdr.last_track - toc->hdr.first_track + 1;
+ if (ntracks <= 0) return -EIO;
+ if (ntracks > MAX_TRACKS) ntracks = MAX_TRACKS;
+
+ /* Now read the whole schmeer. */
+ stat = cdrom_read_tocentry (drive, 0, msf_flag, 0, (char *)&toc->hdr,
+ sizeof (struct atapi_toc_header) +
+ (ntracks+1) * sizeof (struct atapi_toc_entry),
+ reqbuf);
+ if (stat) return stat;
+ toc->hdr.toc_length = ntohs (toc->hdr.toc_length);
+
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->vertos_lossage)
+ {
+ toc->hdr.first_track = bcd2bin (toc->hdr.first_track);
+ toc->hdr.last_track = bcd2bin (toc->hdr.last_track);
+ /* hopefully the length is not BCD, too ;-| */
+ }
+#endif /* not STANDARD_ATAPI */
+
+ for (i=0; i<=ntracks; i++)
+ {
+#if ! STANDARD_ATAPI
+ if (msf_flag)
+ {
+ if (CDROM_CONFIG_FLAGS (drive)->vertos_lossage)
+ {
+ toc->ent[i].track = bcd2bin (toc->ent[i].track);
+ toc->ent[i].addr.msf.m = bcd2bin (toc->ent[i].addr.msf.m);
+ toc->ent[i].addr.msf.s = bcd2bin (toc->ent[i].addr.msf.s);
+ toc->ent[i].addr.msf.f = bcd2bin (toc->ent[i].addr.msf.f);
+ }
+ toc->ent[i].addr.lba = msf_to_lba (toc->ent[i].addr.msf.m,
+ toc->ent[i].addr.msf.s,
+ toc->ent[i].addr.msf.f);
+ }
+ else
+#endif /* not STANDARD_ATAPI */
+ toc->ent[i].addr.lba = ntohl (toc->ent[i].addr.lba);
+ }
+
+ /* Read the multisession information. */
+ stat = cdrom_read_tocentry (drive, 0, msf_flag, 1,
+ (char *)&ms_tmp, sizeof (ms_tmp),
+ reqbuf);
+ if (stat) return stat;
+#if ! STANDARD_ATAPI
+ if (msf_flag)
+ toc->last_session_lba = msf_to_lba (ms_tmp.ent.addr.msf.m,
+ ms_tmp.ent.addr.msf.s,
+ ms_tmp.ent.addr.msf.f);
+ else
+#endif /* not STANDARD_ATAPI */
+ toc->last_session_lba = ntohl (ms_tmp.ent.addr.lba);
+
+ toc->xa_flag = (ms_tmp.hdr.first_track != ms_tmp.hdr.last_track);
+
+ /* Now try to get the total cdrom capacity. */
+ stat = cdrom_read_capacity (drive, &toc->capacity, reqbuf);
+ if (stat) toc->capacity = 0x1fffff;
+
+ HWIF(drive)->gd->sizes[drive->select.b.unit << PARTN_BITS]
+ = toc->capacity * SECTORS_PER_FRAME;
+ drive->part[0].nr_sects = toc->capacity * SECTORS_PER_FRAME;
+
+ /* Remember that we've read this stuff. */
+ CDROM_STATE_FLAGS (drive)->toc_valid = 1;
+
+ return 0;
+}
+
+
+static int
+cdrom_read_subchannel (ide_drive_t *drive,
+ char *buf, int buflen,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.buffer = buf;
+ pc.buflen = buflen;
+ pc.c[0] = SCMD_READ_SUBCHANNEL;
+ pc.c[2] = 0x40; /* request subQ data */
+ pc.c[3] = 0x01; /* Format 1: current position */
+ pc.c[7] = (buflen >> 8);
+ pc.c[8] = (buflen & 0xff);
+ return cdrom_queue_packet_command (drive, &pc);
+}
+
+
+/* modeflag: 0 = current, 1 = changeable mask, 2 = default, 3 = saved */
+static int
+cdrom_mode_sense (ide_drive_t *drive, int pageno, int modeflag,
+ char *buf, int buflen,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.buffer = buf;
+ pc.buflen = buflen;
+ pc.c[0] = MODE_SENSE_10;
+ pc.c[2] = pageno | (modeflag << 6);
+ pc.c[7] = (buflen >> 8);
+ pc.c[8] = (buflen & 0xff);
+ return cdrom_queue_packet_command (drive, &pc);
+}
+
+
+static int
+cdrom_mode_select (ide_drive_t *drive, int pageno, char *buf, int buflen,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.buffer = buf;
+ pc.buflen = - buflen;
+ pc.c[0] = MODE_SELECT_10;
+ pc.c[1] = 0x10;
+ pc.c[2] = pageno;
+ pc.c[7] = (buflen >> 8);
+ pc.c[8] = (buflen & 0xff);
+ return cdrom_queue_packet_command (drive, &pc);
+}
+
+
+static int
+cdrom_play_lba_range_play12 (ide_drive_t *drive, int lba_start, int lba_end,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = SCMD_PLAYAUDIO12;
+#ifdef __alpha__
+ stq_u(((long) htonl (lba_end - lba_start) << 32) | htonl(lba_start),
+ (unsigned long *) &pc.c[2]);
+#else
+ *(int *)(&pc.c[2]) = htonl (lba_start);
+ *(int *)(&pc.c[6]) = htonl (lba_end - lba_start);
+#endif
+
+ return cdrom_queue_packet_command (drive, &pc);
+}
+
+
+#if ! STANDARD_ATAPI
+static int
+cdrom_play_lba_range_msf (ide_drive_t *drive, int lba_start, int lba_end,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = SCMD_PLAYAUDIO_MSF;
+ lba_to_msf (lba_start, &pc.c[3], &pc.c[4], &pc.c[5]);
+ lba_to_msf (lba_end-1, &pc.c[6], &pc.c[7], &pc.c[8]);
+
+ if (CDROM_CONFIG_FLAGS (drive)->playmsf_uses_bcd)
+ {
+ pc.c[3] = bin2bcd (pc.c[3]);
+ pc.c[4] = bin2bcd (pc.c[4]);
+ pc.c[5] = bin2bcd (pc.c[5]);
+ pc.c[6] = bin2bcd (pc.c[6]);
+ pc.c[7] = bin2bcd (pc.c[7]);
+ pc.c[8] = bin2bcd (pc.c[8]);
+ }
+
+ return cdrom_queue_packet_command (drive, &pc);
+}
+#endif /* not STANDARD_ATAPI */
+
+
+static int
+cdrom_play_lba_range_1 (ide_drive_t *drive, int lba_start, int lba_end,
+ struct atapi_request_sense *reqbuf)
+{
+ /* This is rather annoying.
+ My NEC-260 won't recognize group 5 commands such as PLAYAUDIO12;
+ the only way to get it to play more than 64k of blocks at once
+ seems to be the PLAYAUDIO_MSF command. However, the parameters
+ the NEC 260 wants for the PLAYMSF command are incompatible with
+ the new version of the spec.
+
+ So what i'll try is this. First try for PLAYAUDIO12. If it works,
+ great. Otherwise, if the drive reports an illegal command code,
+ try PLAYAUDIO_MSF using the NEC 260-style bcd parameters. */
+
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->no_playaudio12)
+ return cdrom_play_lba_range_msf (drive, lba_start, lba_end, reqbuf);
+ else
+#endif /* not STANDARD_ATAPI */
+ {
+ int stat;
+ struct atapi_request_sense my_reqbuf;
+
+ if (reqbuf == NULL)
+ reqbuf = &my_reqbuf;
+
+ stat = cdrom_play_lba_range_play12 (drive, lba_start, lba_end, reqbuf);
+ if (stat == 0) return 0;
+
+#if ! STANDARD_ATAPI
+ /* It failed. Try to find out why. */
+ if (reqbuf->sense_key == ILLEGAL_REQUEST && reqbuf->asc == 0x20)
+ {
+ /* The drive didn't recognize the command.
+ Retry with the MSF variant. */
+ printk ("%s: Drive does not support PLAYAUDIO12; "
+ "trying PLAYAUDIO_MSF\n", drive->name);
+ CDROM_CONFIG_FLAGS (drive)->no_playaudio12 = 1;
+ CDROM_CONFIG_FLAGS (drive)->playmsf_uses_bcd = 1;
+ return cdrom_play_lba_range_msf (drive, lba_start, lba_end, reqbuf);
+ }
+#endif /* not STANDARD_ATAPI */
+
+ /* Failed for some other reason. Give up. */
+ return stat;
+ }
+}
+
+
+/* Play audio starting at LBA LBA_START and finishing with the
+ LBA before LBA_END. */
+static int
+cdrom_play_lba_range (ide_drive_t *drive, int lba_start, int lba_end,
+ struct atapi_request_sense *reqbuf)
+{
+ int i, stat;
+ struct atapi_request_sense my_reqbuf;
+
+ if (reqbuf == NULL)
+ reqbuf = &my_reqbuf;
+
+ /* Some drives, will, for certain audio cds,
+ give an error if you ask them to play the entire cd using the
+ values which are returned in the TOC. The play will succeed, however,
+ if the ending address is adjusted downwards by a few frames. */
+ for (i=0; i<75; i++)
+ {
+ stat = cdrom_play_lba_range_1 (drive, lba_start, lba_end, reqbuf);
+
+ if (stat == 0 ||
+ !(reqbuf->sense_key == ILLEGAL_REQUEST && reqbuf->asc == 0x24))
+ return stat;
+
+ --lba_end;
+ if (lba_end <= lba_start) break;
+ }
+
+ return stat;
+}
+
+
+static
+int cdrom_get_toc_entry (ide_drive_t *drive, int track,
+ struct atapi_toc_entry **ent,
+ struct atapi_request_sense *reqbuf)
+{
+ int stat, ntracks;
+ struct atapi_toc *toc;
+
+ /* Make sure our saved TOC is valid. */
+ stat = cdrom_read_toc (drive, reqbuf);
+ if (stat) return stat;
+
+ toc = drive->cdrom_info.toc;
+
+ /* Check validity of requested track number. */
+ ntracks = toc->hdr.last_track - toc->hdr.first_track + 1;
+ if (track == CDROM_LEADOUT)
+ *ent = &toc->ent[ntracks];
+ else if (track < toc->hdr.first_track ||
+ track > toc->hdr.last_track)
+ return -EINVAL;
+ else
+ *ent = &toc->ent[track - toc->hdr.first_track];
+
+ return 0;
+}
+
+
+static int
+cdrom_read_block (ide_drive_t *drive, int format, int lba,
+ char *buf, int buflen,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+ struct atapi_request_sense my_reqbuf;
+ int stat;
+
+ if (reqbuf == NULL)
+ reqbuf = &my_reqbuf;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.buffer = buf;
+ pc.buflen = buflen;
+
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->old_readcd)
+ pc.c[0] = 0xd4;
+ else
+#endif /* not STANDARD_ATAPI */
+ pc.c[0] = READ_CD;
+
+ pc.c[1] = (format << 2);
+#ifdef __alpha__
+ stl_u(htonl (lba), (unsigned int *) &pc.c[2]);
+#else
+ *(int *)(&pc.c[2]) = htonl (lba);
+#endif
+ pc.c[8] = 1; /* one block */
+ pc.c[9] = 0x10;
+
+ stat = cdrom_queue_packet_command (drive, &pc);
+
+#if ! STANDARD_ATAPI
+ /* If the drive doesn't recognize the READ CD opcode, retry the command
+ with an older opcode for that command. */
+ if (stat && reqbuf->sense_key == ILLEGAL_REQUEST && reqbuf->asc == 0x20 &&
+ CDROM_CONFIG_FLAGS (drive)->old_readcd == 0)
+ {
+ printk ("%s: Drive does not recognize READ_CD; trying opcode 0xd4\n",
+ drive->name);
+ CDROM_CONFIG_FLAGS (drive)->old_readcd = 1;
+ return cdrom_read_block (drive, format, lba, buf, buflen, reqbuf);
+ }
+#endif /* not STANDARD_ATAPI */
+
+ return stat;
+}
+
+
+int ide_cdrom_ioctl (ide_drive_t *drive, struct inode *inode,
+ struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd)
+ {
+ case CDROMEJECT:
+ {
+ int stat;
+
+ if (drive->usage > 1)
+ return -EBUSY;
+
+ stat = cdrom_lockdoor (drive, 0, NULL);
+ if (stat) return stat;
+
+ return cdrom_eject (drive, 0, NULL);
+ }
+
+ case CDROMEJECT_SW:
+ {
+ CDROM_STATE_FLAGS (drive)->eject_on_close = arg;
+ return 0;
+ }
+
+ case CDROMPAUSE:
+ return cdrom_pause (drive, 1, NULL);
+
+ case CDROMRESUME:
+ return cdrom_pause (drive, 0, NULL);
+
+ case CDROMSTART:
+ return cdrom_startstop (drive, 1, NULL);
+
+ case CDROMSTOP:
+ {
+ int stat;
+
+ stat = cdrom_startstop (drive, 0, NULL);
+ if (stat) return stat;
+ /* pit says the Dolphin needs this. */
+ return cdrom_eject (drive, 1, NULL);
+ }
+
+ case CDROMPLAYMSF:
+ {
+ struct cdrom_msf msf;
+ int stat, lba_start, lba_end;
+
+ stat = verify_area (VERIFY_READ, (void *)arg, sizeof (msf));
+ if (stat) return stat;
+
+ memcpy_fromfs (&msf, (void *) arg, sizeof(msf));
+
+ lba_start = msf_to_lba (msf.cdmsf_min0, msf.cdmsf_sec0,
+ msf.cdmsf_frame0);
+ lba_end = msf_to_lba (msf.cdmsf_min1, msf.cdmsf_sec1,
+ msf.cdmsf_frame1) + 1;
+
+ if (lba_end <= lba_start) return -EINVAL;
+
+ return cdrom_play_lba_range (drive, lba_start, lba_end, NULL);
+ }
+
+ /* Like just about every other Linux cdrom driver, we ignore the
+ index part of the request here. */
+ case CDROMPLAYTRKIND:
+ {
+ int stat, lba_start, lba_end;
+ struct cdrom_ti ti;
+ struct atapi_toc_entry *first_toc, *last_toc;
+
+ stat = verify_area (VERIFY_READ, (void *)arg, sizeof (ti));
+ if (stat) return stat;
+
+ memcpy_fromfs (&ti, (void *) arg, sizeof(ti));
+
+ stat = cdrom_get_toc_entry (drive, ti.cdti_trk0, &first_toc, NULL);
+ if (stat) return stat;
+ stat = cdrom_get_toc_entry (drive, ti.cdti_trk1, &last_toc, NULL);
+ if (stat) return stat;
+
+ if (ti.cdti_trk1 != CDROM_LEADOUT) ++last_toc;
+ lba_start = first_toc->addr.lba;
+ lba_end = last_toc->addr.lba;
+
+ if (lba_end <= lba_start) return -EINVAL;
+
+ return cdrom_play_lba_range (drive, lba_start, lba_end, NULL);
+ }
+
+ case CDROMREADTOCHDR:
+ {
+ int stat;
+ struct cdrom_tochdr tochdr;
+ struct atapi_toc *toc;
+
+ stat = verify_area (VERIFY_WRITE, (void *) arg, sizeof (tochdr));
+ if (stat) return stat;
+
+ /* Make sure our saved TOC is valid. */
+ stat = cdrom_read_toc (drive, NULL);
+ if (stat) return stat;
+
+ toc = drive->cdrom_info.toc;
+ tochdr.cdth_trk0 = toc->hdr.first_track;
+ tochdr.cdth_trk1 = toc->hdr.last_track;
+
+ memcpy_tofs ((void *) arg, &tochdr, sizeof (tochdr));
+
+ return stat;
+ }
+
+ case CDROMREADTOCENTRY:
+ {
+ int stat;
+ struct cdrom_tocentry tocentry;
+ struct atapi_toc_entry *toce;
+
+ stat = verify_area (VERIFY_READ, (void *) arg, sizeof (tocentry));
+ if (stat) return stat;
+ stat = verify_area (VERIFY_WRITE, (void *) arg, sizeof (tocentry));
+ if (stat) return stat;
+
+ memcpy_fromfs (&tocentry, (void *) arg, sizeof (tocentry));
+
+ stat = cdrom_get_toc_entry (drive, tocentry.cdte_track, &toce, NULL);
+ if (stat) return stat;
+
+ tocentry.cdte_ctrl = toce->control;
+ tocentry.cdte_adr = toce->adr;
+
+ if (tocentry.cdte_format == CDROM_MSF)
+ {
+ /* convert to MSF */
+ lba_to_msf (toce->addr.lba,
+ &tocentry.cdte_addr.msf.minute,
+ &tocentry.cdte_addr.msf.second,
+ &tocentry.cdte_addr.msf.frame);
+ }
+ else
+ tocentry.cdte_addr.lba = toce->addr.lba;
+
+ memcpy_tofs ((void *) arg, &tocentry, sizeof (tocentry));
+
+ return stat;
+ }
+
+ case CDROMSUBCHNL:
+ {
+ struct atapi_cdrom_subchnl scbuf;
+ int stat, abs_lba, rel_lba;
+ struct cdrom_subchnl subchnl;
+
+ stat = verify_area (VERIFY_WRITE, (void *) arg, sizeof (subchnl));
+ if (stat) return stat;
+ stat = verify_area (VERIFY_READ, (void *) arg, sizeof (subchnl));
+ if (stat) return stat;
+
+ memcpy_fromfs (&subchnl, (void *) arg, sizeof (subchnl));
+
+ stat = cdrom_read_subchannel (drive, (char *)&scbuf, sizeof (scbuf),
+ NULL);
+ if (stat) return stat;
+
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->vertos_lossage)
+ {
+ abs_lba = msf_to_lba (bcd2bin (scbuf.acdsc_absaddr.msf.minute),
+ bcd2bin (scbuf.acdsc_absaddr.msf.second),
+ bcd2bin (scbuf.acdsc_absaddr.msf.frame));
+ rel_lba = msf_to_lba (bcd2bin (scbuf.acdsc_reladdr.msf.minute),
+ bcd2bin (scbuf.acdsc_reladdr.msf.second),
+ bcd2bin (scbuf.acdsc_reladdr.msf.frame));
+ scbuf.acdsc_trk = bcd2bin (scbuf.acdsc_trk);
+ }
+ else
+#endif /* not STANDARD_ATAPI */
+ {
+ abs_lba = ntohl (scbuf.acdsc_absaddr.lba);
+ rel_lba = ntohl (scbuf.acdsc_reladdr.lba);
+ }
+
+ if (subchnl.cdsc_format == CDROM_MSF)
+ {
+ lba_to_msf (abs_lba,
+ &subchnl.cdsc_absaddr.msf.minute,
+ &subchnl.cdsc_absaddr.msf.second,
+ &subchnl.cdsc_absaddr.msf.frame);
+ lba_to_msf (rel_lba,
+ &subchnl.cdsc_reladdr.msf.minute,
+ &subchnl.cdsc_reladdr.msf.second,
+ &subchnl.cdsc_reladdr.msf.frame);
+ }
+ else
+ {
+ subchnl.cdsc_absaddr.lba = abs_lba;
+ subchnl.cdsc_reladdr.lba = rel_lba;
+ }
+
+ subchnl.cdsc_audiostatus = scbuf.acdsc_audiostatus;
+ subchnl.cdsc_ctrl = scbuf.acdsc_ctrl;
+ subchnl.cdsc_trk = scbuf.acdsc_trk;
+ subchnl.cdsc_ind = scbuf.acdsc_ind;
+
+ memcpy_tofs ((void *) arg, &subchnl, sizeof (subchnl));
+
+ return stat;
+ }
+
+ case CDROMVOLCTRL:
+ {
+ struct cdrom_volctrl volctrl;
+ char buffer[24], mask[24];
+ int stat;
+
+ stat = verify_area (VERIFY_READ, (void *) arg, sizeof (volctrl));
+ if (stat) return stat;
+ memcpy_fromfs (&volctrl, (void *) arg, sizeof (volctrl));
+
+ stat = cdrom_mode_sense (drive, 0x0e, 0, buffer, sizeof (buffer),NULL);
+ if (stat) return stat;
+ stat = cdrom_mode_sense (drive, 0x0e, 1, mask , sizeof (buffer),NULL);
+ if (stat) return stat;
+
+ buffer[1] = buffer[2] = 0;
+
+ buffer[17] = volctrl.channel0 & mask[17];
+ buffer[19] = volctrl.channel1 & mask[19];
+ buffer[21] = volctrl.channel2 & mask[21];
+ buffer[23] = volctrl.channel3 & mask[23];
+
+ return cdrom_mode_select (drive, 0x0e, buffer, sizeof (buffer), NULL);
+ }
+
+ case CDROMVOLREAD:
+ {
+ struct cdrom_volctrl volctrl;
+ char buffer[24];
+ int stat;
+
+ stat = verify_area (VERIFY_WRITE, (void *) arg, sizeof (volctrl));
+ if (stat) return stat;
+
+ stat = cdrom_mode_sense (drive, 0x0e, 0, buffer, sizeof (buffer), NULL);
+ if (stat) return stat;
+
+ volctrl.channel0 = buffer[17];
+ volctrl.channel1 = buffer[19];
+ volctrl.channel2 = buffer[21];
+ volctrl.channel3 = buffer[23];
+
+ memcpy_tofs ((void *) arg, &volctrl, sizeof (volctrl));
+
+ return 0;
+ }
+
+ case CDROMMULTISESSION:
+ {
+ struct cdrom_multisession ms_info;
+ struct atapi_toc *toc;
+ int stat;
+
+ stat = verify_area (VERIFY_READ, (void *)arg, sizeof (ms_info));
+ if (stat) return stat;
+ stat = verify_area (VERIFY_WRITE, (void *)arg, sizeof (ms_info));
+ if (stat) return stat;
+
+ memcpy_fromfs (&ms_info, (void *)arg, sizeof (ms_info));
+
+ /* Make sure the TOC information is valid. */
+ stat = cdrom_read_toc (drive, NULL);
+ if (stat) return stat;
+
+ toc = drive->cdrom_info.toc;
+
+ if (ms_info.addr_format == CDROM_MSF)
+ lba_to_msf (toc->last_session_lba,
+ &ms_info.addr.msf.minute,
+ &ms_info.addr.msf.second,
+ &ms_info.addr.msf.frame);
+
+ else if (ms_info.addr_format == CDROM_LBA)
+ ms_info.addr.lba = toc->last_session_lba;
+
+ else
+ return -EINVAL;
+
+ ms_info.xa_flag = toc->xa_flag;
+
+ memcpy_tofs ((void *)arg, &ms_info, sizeof (ms_info));
+
+ return 0;
+ }
+
+ /* Read 2352 byte blocks from audio tracks. */
+ case CDROMREADAUDIO:
+ {
+ int stat, lba;
+ struct atapi_toc *toc;
+ struct cdrom_read_audio ra;
+ char buf[CD_FRAMESIZE_RAW];
+
+ /* Make sure the TOC is up to date. */
+ stat = cdrom_read_toc (drive, NULL);
+ if (stat) return stat;
+
+ toc = drive->cdrom_info.toc;
+
+ stat = verify_area (VERIFY_READ, (char *)arg, sizeof (ra));
+ if (stat) return stat;
+
+ memcpy_fromfs (&ra, (void *)arg, sizeof (ra));
+
+ if (ra.nframes < 0 || ra.nframes > toc->capacity)
+ return -EINVAL;
+ else if (ra.nframes == 0)
+ return 0;
+
+ stat = verify_area (VERIFY_WRITE, (char *)ra.buf,
+ ra.nframes * CD_FRAMESIZE_RAW);
+ if (stat) return stat;
+
+ if (ra.addr_format == CDROM_MSF)
+ lba = msf_to_lba (ra.addr.msf.minute, ra.addr.msf.second,
+ ra.addr.msf.frame);
+
+ else if (ra.addr_format == CDROM_LBA)
+ lba = ra.addr.lba;
+
+ else
+ return -EINVAL;
+
+ if (lba < 0 || lba >= toc->capacity)
+ return -EINVAL;
+
+ while (ra.nframes > 0)
+ {
+ stat = cdrom_read_block (drive, 1, lba, buf,
+ CD_FRAMESIZE_RAW, NULL);
+ if (stat) return stat;
+ memcpy_tofs (ra.buf, buf, CD_FRAMESIZE_RAW);
+ ra.buf += CD_FRAMESIZE_RAW;
+ --ra.nframes;
+ ++lba;
+ }
+
+ return 0;
+ }
+
+ case CDROMREADMODE1:
+ case CDROMREADMODE2:
+ {
+ struct cdrom_msf msf;
+ int blocksize, format, stat, lba;
+ struct atapi_toc *toc;
+ char buf[CD_FRAMESIZE_RAW0];
+
+ if (cmd == CDROMREADMODE1)
+ {
+ blocksize = CD_FRAMESIZE;
+ format = 2;
+ }
+ else
+ {
+ blocksize = CD_FRAMESIZE_RAW0;
+ format = 3;
+ }
+
+ stat = verify_area (VERIFY_READ, (char *)arg, sizeof (msf));
+ if (stat) return stat;
+ stat = verify_area (VERIFY_WRITE, (char *)arg, blocksize);
+ if (stat) return stat;
+
+ memcpy_fromfs (&msf, (void *)arg, sizeof (msf));
+
+ lba = msf_to_lba (msf.cdmsf_min0, msf.cdmsf_sec0, msf.cdmsf_frame0);
+
+ /* Make sure the TOC is up to date. */
+ stat = cdrom_read_toc (drive, NULL);
+ if (stat) return stat;
+
+ toc = drive->cdrom_info.toc;
+
+ if (lba < 0 || lba >= toc->capacity)
+ return -EINVAL;
+
+ stat = cdrom_read_block (drive, format, lba, buf, blocksize, NULL);
+ if (stat) return stat;
+
+ memcpy_tofs ((char *)arg, buf, blocksize);
+ return 0;
+ }
+
+#if 0 /* Doesn't work reliably yet. */
+ case CDROMRESET:
+ {
+ struct request req;
+ ide_init_drive_cmd (&req);
+ req.cmd = RESET_DRIVE_COMMAND;
+ return ide_do_drive_cmd (drive, &req, ide_wait);
+ }
+#endif
+
+
+#ifdef TEST
+ case 0x1234:
+ {
+ int stat;
+ struct packet_command pc;
+ int len, lena;
+
+ memset (&pc, 0, sizeof (pc));
+
+ stat = verify_area (VERIFY_READ, (void *) arg, sizeof (pc.c));
+ if (stat) return stat;
+ memcpy_fromfs (&pc.c, (void *) arg, sizeof (pc.c));
+ arg += sizeof (pc.c);
+
+ stat = verify_area (VERIFY_READ, (void *) arg, sizeof (len));
+ if (stat) return stat;
+ memcpy_fromfs (&len, (void *) arg , sizeof (len));
+ arg += sizeof (len);
+
+ if (len > 0) {
+ stat = verify_area (VERIFY_WRITE, (void *) arg, len);
+ if (stat) return stat;
+ }
+
+ lena = len;
+ if (lena < 0) lena = 0;
+
+ {
+ char buf[lena];
+ if (len > 0) {
+ pc.buflen = len;
+ pc.buffer = buf;
+ }
+
+ stat = cdrom_queue_packet_command (drive, &pc);
+
+ if (len > 0)
+ memcpy_tofs ((void *)arg, buf, len);
+ }
+
+ return stat;
+ }
+#endif
+
+ default:
+ return -EPERM;
+ }
+
+}
+
+
+
+/****************************************************************************
+ * Other driver requests (open, close, check media change).
+ */
+
+int ide_cdrom_check_media_change (ide_drive_t *drive)
+{
+ int retval;
+
+ (void) cdrom_check_status (drive, NULL);
+
+ retval = CDROM_STATE_FLAGS (drive)->media_changed;
+ CDROM_STATE_FLAGS (drive)->media_changed = 0;
+
+ return retval;
+}
+
+
+int ide_cdrom_open (struct inode *ip, struct file *fp, ide_drive_t *drive)
+{
+ /* no write access */
+ if (fp->f_mode & 2)
+ {
+ --drive->usage;
+ return -EROFS;
+ }
+
+ /* If this is the first open, check the drive status. */
+ if (drive->usage == 1)
+ {
+ int stat;
+ struct atapi_request_sense my_reqbuf;
+ my_reqbuf.sense_key = 0;
+
+ /* Get the drive status. */
+ stat = cdrom_check_status (drive, &my_reqbuf);
+
+ /* If the tray is open, try to close it. */
+ if (stat && my_reqbuf.sense_key == NOT_READY)
+ {
+ cdrom_eject (drive, 1, &my_reqbuf);
+ stat = cdrom_check_status (drive, &my_reqbuf);
+ }
+
+ /* Return an error if there are still problems. */
+ if (stat && my_reqbuf.sense_key != UNIT_ATTENTION)
+ {
+ --drive->usage;
+ return -ENXIO;
+ }
+
+ /* Now lock the door. */
+ (void) cdrom_lockdoor (drive, 1, &my_reqbuf);
+
+ /* And try to read the TOC information now. */
+ (void) cdrom_read_toc (drive, &my_reqbuf);
+ }
+
+ return 0;
+}
+
+
+/*
+ * Close down the device. Invalidate all cached blocks.
+ */
+
+void ide_cdrom_release (struct inode *inode, struct file *file, ide_drive_t *drive)
+{
+ if (drive->usage == 0)
+ {
+ invalidate_buffers (inode->i_rdev);
+
+ /* Unlock the door. */
+ (void) cdrom_lockdoor (drive, 0, NULL);
+
+ /* Do an eject if we were requested to do so. */
+ if (CDROM_STATE_FLAGS (drive)->eject_on_close)
+ (void) cdrom_eject (drive, 0, NULL);
+ }
+}
+
+
+
+/****************************************************************************
+ * Device initialization.
+ */
+
+void ide_cdrom_setup (ide_drive_t *drive)
+{
+ blksize_size[HWIF(drive)->major][drive->select.b.unit << PARTN_BITS] = CD_FRAMESIZE;
+
+ drive->special.all = 0;
+ drive->ready_stat = 0;
+
+ CDROM_STATE_FLAGS (drive)->media_changed = 0;
+ CDROM_STATE_FLAGS (drive)->toc_valid = 0;
+ CDROM_STATE_FLAGS (drive)->door_locked = 0;
+
+ /* Turn this off by default, since many people don't like it. */
+ CDROM_STATE_FLAGS (drive)->eject_on_close= 0;
+
+#if NO_DOOR_LOCKING
+ CDROM_CONFIG_FLAGS (drive)->no_doorlock = 1;
+#else
+ CDROM_CONFIG_FLAGS (drive)->no_doorlock = 0;
+#endif
+
+ if (drive->id != NULL) {
+ CDROM_CONFIG_FLAGS (drive)->drq_interrupt =
+ ((drive->id->config & 0x0060) == 0x20);
+ } else {
+ CDROM_CONFIG_FLAGS (drive)->drq_interrupt = 0;
+ }
+
+#if ! STANDARD_ATAPI
+ CDROM_CONFIG_FLAGS (drive)->no_playaudio12 = 0;
+ CDROM_CONFIG_FLAGS (drive)->old_readcd = 0;
+ CDROM_CONFIG_FLAGS (drive)->no_lba_toc = 0;
+ CDROM_CONFIG_FLAGS (drive)->playmsf_uses_bcd = 0;
+ CDROM_CONFIG_FLAGS (drive)->vertos_lossage = 0;
+
+ if (drive->id != NULL) {
+ /* Accommodate some broken drives... */
+ if (strcmp (drive->id->model, "CD220E") == 0 ||
+ strcmp (drive->id->model, "CD") == 0) /* Creative Labs */
+ CDROM_CONFIG_FLAGS (drive)->no_lba_toc = 1;
+
+ else if (strcmp (drive->id->model, "TO-ICSLYAL") == 0 || /* Acer CD525E */
+ strcmp (drive->id->model, "OTI-SCYLLA") == 0)
+ CDROM_CONFIG_FLAGS (drive)->no_lba_toc = 1;
+
+ /* I don't know who makes this.
+ Francesco Messineo <sidera@ccii.unipi.it> says this one's broken too. */
+ else if (strcmp (drive->id->model, "DCI-2S10") == 0)
+ CDROM_CONFIG_FLAGS (drive)->no_lba_toc = 1;
+
+ else if (strcmp (drive->id->model, "CDA26803I SE") == 0) /* Aztech */
+ {
+ CDROM_CONFIG_FLAGS (drive)->no_lba_toc = 1;
+
+ /* This drive _also_ does not implement PLAYAUDIO12 correctly. */
+ CDROM_CONFIG_FLAGS (drive)->no_playaudio12 = 1;
+ }
+
+ /* Vertos 300.
+ There seem to be at least two different, incompatible versions
+ of this drive floating around. Luckily, they appear to return their
+ id strings with different byte orderings. */
+ else if (strcmp (drive->id->model, "V003S0DS") == 0)
+ {
+ CDROM_CONFIG_FLAGS (drive)->vertos_lossage = 1;
+ CDROM_CONFIG_FLAGS (drive)->playmsf_uses_bcd = 1;
+ CDROM_CONFIG_FLAGS (drive)->no_lba_toc = 1;
+ }
+ else if (strcmp (drive->id->model, "0V300SSD") == 0 ||
+ strcmp (drive->id->model, "V003M0DP") == 0)
+ CDROM_CONFIG_FLAGS (drive)->no_lba_toc = 1;
+
+ /* Vertos 400. */
+ else if (strcmp (drive->id->model, "V004E0DT") == 0 ||
+ strcmp (drive->id->model, "0V400ETD") == 0)
+ CDROM_CONFIG_FLAGS (drive)->no_lba_toc = 1;
+
+ else if ( strcmp (drive->id->model, "CD-ROM CDU55D") == 0) /*sony cdu55d */
+ CDROM_CONFIG_FLAGS (drive)->no_playaudio12 = 1;
+
+ else if (strcmp (drive->id->model, "CD-ROM CDU55E") == 0)
+ CDROM_CONFIG_FLAGS (drive)->no_playaudio12 = 1;
+ } /* drive-id != NULL */
+#endif /* not STANDARD_ATAPI */
+
+ drive->cdrom_info.toc = NULL;
+ drive->cdrom_info.sector_buffer = NULL;
+ drive->cdrom_info.sector_buffered = 0;
+ drive->cdrom_info.nsectors_buffered = 0;
+}
+
+
+
+/*
+ * TODO:
+ * CDROM_GET_UPC
+ * CDROMRESET
+ * Lock the door when a read request completes successfully and the
+ * door is not already locked. Also try to reorganize to reduce
+ * duplicated functionality between read and ioctl paths?
+ * Establish interfaces for an IDE port driver, and break out the cdrom
+ * code into a loadable module.
+ * Support changers.
+ * Write some real documentation.
+ */
diff --git a/i386/i386at/gpl/linux/block/ide.c b/i386/i386at/gpl/linux/block/ide.c
new file mode 100644
index 00000000..4b7c5e95
--- /dev/null
+++ b/i386/i386at/gpl/linux/block/ide.c
@@ -0,0 +1,3087 @@
+/*
+ * linux/drivers/block/ide.c Version 5.28 Feb 11, 1996
+ *
+ * Copyright (C) 1994-1996 Linus Torvalds & authors (see below)
+ */
+#define _IDE_C /* needed by <linux/blk.h> */
+
+/*
+ * This is the multiple IDE interface driver, as evolved from hd.c.
+ * It supports up to four IDE interfaces, on one or more IRQs (usually 14 & 15).
+ * There can be up to two drives per interface, as per the ATA-2 spec.
+ *
+ * Primary i/f: ide0: major=3; (hda) minor=0; (hdb) minor=64
+ * Secondary i/f: ide1: major=22; (hdc or hd1a) minor=0; (hdd or hd1b) minor=64
+ * Tertiary i/f: ide2: major=33; (hde) minor=0; (hdf) minor=64
+ * Quaternary i/f: ide3: major=34; (hdg) minor=0; (hdh) minor=64
+ *
+ * It is easy to extend ide.c to handle more than four interfaces:
+ *
+ * Change the MAX_HWIFS constant in ide.h.
+ *
+ * Define some new major numbers (in major.h), and insert them into
+ * the ide_hwif_to_major table in ide.c.
+ *
+ * Fill in the extra values for the new interfaces into the two tables
+ * inside ide.c: default_io_base[] and default_irqs[].
+ *
+ * Create the new request handlers by cloning "do_ide3_request()"
+ * for each new interface, and add them to the switch statement
+ * in the ide_init() function in ide.c.
+ *
+ * Recompile, create the new /dev/ entries, and it will probably work.
+ *
+ * From hd.c:
+ * |
+ * | It traverses the request-list, using interrupts to jump between functions.
+ * | As nearly all functions can be called within interrupts, we may not sleep.
+ * | Special care is recommended. Have Fun!
+ * |
+ * | modified by Drew Eckhardt to check nr of hd's from the CMOS.
+ * |
+ * | Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug
+ * | in the early extended-partition checks and added DM partitions.
+ * |
+ * | Early work on error handling by Mika Liljeberg (liljeber@cs.Helsinki.FI).
+ * |
+ * | IRQ-unmask, drive-id, multiple-mode, support for ">16 heads",
+ * | and general streamlining by Mark Lord (mlord@bnr.ca).
+ *
+ * October, 1994 -- Complete line-by-line overhaul for linux 1.1.x, by:
+ *
+ * Mark Lord (mlord@bnr.ca) (IDE Perf.Pkg)
+ * Delman Lee (delman@mipg.upenn.edu) ("Mr. atdisk2")
+ * Petri Mattila (ptjmatti@kruuna.helsinki.fi) (EIDE stuff)
+ * Scott Snyder (snyder@fnald0.fnal.gov) (ATAPI IDE cd-rom)
+ *
+ * Maintained by Mark Lord (mlord@bnr.ca): ide.c, ide.h, triton.c, hd.c, ..
+ *
+ * This was a rewrite of just about everything from hd.c, though some original
+ * code is still sprinkled about. Think of it as a major evolution, with
+ * inspiration from lots of linux users, esp. hamish@zot.apana.org.au
+ *
+ * Version 1.0 ALPHA initial code, primary i/f working okay
+ * Version 1.3 BETA dual i/f on shared irq tested & working!
+ * Version 1.4 BETA added auto probing for irq(s)
+ * Version 1.5 BETA added ALPHA (untested) support for IDE cd-roms,
+ * ...
+ * Version 3.5 correct the bios_cyl field if it's too small
+ * (linux 1.1.76) (to help fdisk with brain-dead BIOSs)
+ * Version 3.6 cosmetic corrections to comments and stuff
+ * (linux 1.1.77) reorganise probing code to make it understandable
+ * added halfway retry to probing for drive identification
+ * added "hdx=noprobe" command line option
+ * allow setting multmode even when identification fails
+ * Version 3.7 move set_geometry=1 from do_identify() to ide_init()
+ * increase DRQ_WAIT to eliminate nuisance messages
+ * wait for DRQ_STAT instead of DATA_READY during probing
+ * (courtesy of Gary Thomas gary@efland.UU.NET)
+ * Version 3.8 fixed byte-swapping for confused Mitsumi cdrom drives
+ * update of ide-cd.c from Scott, allows blocksize=1024
+ * cdrom probe fixes, inspired by jprang@uni-duisburg.de
+ * Version 3.9 don't use LBA if lba_capacity looks funny
+ * correct the drive capacity calculations
+ * fix probing for old Seagates without IDE_ALTSTATUS_REG
+ * fix byte-ordering for some NEC cdrom drives
+ * Version 3.10 disable multiple mode by default; was causing trouble
+ * Version 3.11 fix mis-identification of old WD disks as cdroms
+ * Version 3,12 simplify logic for selecting initial mult_count
+ * (fixes problems with buggy WD drives)
+ * Version 3.13 remove excess "multiple mode disabled" messages
+ * Version 3.14 fix ide_error() handling of BUSY_STAT
+ * fix byte-swapped cdrom strings (again.. arghh!)
+ * ignore INDEX bit when checking the ALTSTATUS reg
+ * Version 3.15 add SINGLE_THREADED flag for use with dual-CMD i/f
+ * ignore WRERR_STAT for non-write operations
+ * added vlb_sync support for DC-2000A & others,
+ * (incl. some Promise chips), courtesy of Frank Gockel
+ * Version 3.16 convert vlb_32bit and vlb_sync into runtime flags
+ * add ioctls to get/set VLB flags (HDIO_[SG]ET_CHIPSET)
+ * rename SINGLE_THREADED to SUPPORT_SERIALIZE,
+ * add boot flag to "serialize" operation for CMD i/f
+ * add optional support for DTC2278 interfaces,
+ * courtesy of andy@cercle.cts.com (Dyan Wile).
+ * add boot flag to enable "dtc2278" probe
+ * add probe to avoid EATA (SCSI) interfaces,
+ * courtesy of neuffer@goofy.zdv.uni-mainz.de.
+ * Version 4.00 tidy up verify_area() calls - heiko@colossus.escape.de
+ * add flag to ignore WRERR_STAT for some drives
+ * courtesy of David.H.West@um.cc.umich.edu
+ * assembly syntax tweak to vlb_sync
+ * removeable drive support from scuba@cs.tu-berlin.de
+ * add transparent support for DiskManager-6.0x "Dynamic
+ * Disk Overlay" (DDO), most of this is in genhd.c
+ * eliminate "multiple mode turned off" message at boot
+ * Version 4.10 fix bug in ioctl for "hdparm -c3"
+ * fix DM6:DDO support -- now works with LILO, fdisk, ...
+ * don't treat some naughty WD drives as removeable
+ * Version 4.11 updated DM6 support using info provided by OnTrack
+ * Version 5.00 major overhaul, multmode setting fixed, vlb_sync fixed
+ * added support for 3rd/4th/alternative IDE ports
+ * created ide.h; ide-cd.c now compiles separate from ide.c
+ * hopefully fixed infinite "unexpected_intr" from cdroms
+ * zillions of other changes and restructuring
+ * somehow reduced overall memory usage by several kB
+ * probably slowed things down slightly, but worth it
+ * Version 5.01 AT LAST!! Finally understood why "unexpected_intr"
+ * was happening at various times/places: whenever the
+ * ide-interface's ctl_port was used to "mask" the irq,
+ * it also would trigger an edge in the process of masking
+ * which would result in a self-inflicted interrupt!!
+ * (such a stupid way to build a hardware interrupt mask).
+ * This is now fixed (after a year of head-scratching).
+ * Version 5.02 got rid of need for {enable,disable}_irq_list()
+ * Version 5.03 tune-ups, comments, remove "busy wait" from drive resets
+ * removed PROBE_FOR_IRQS option -- no longer needed
+ * OOOPS! fixed "bad access" bug for 2nd drive on an i/f
+ * Version 5.04 changed "ira %d" to "irq %d" in DEBUG message
+ * added more comments, cleaned up unexpected_intr()
+ * OOOPS! fixed null pointer problem in ide reset code
+ * added autodetect for Triton chipset -- no effect yet
+ * Version 5.05 OOOPS! fixed bug in revalidate_disk()
+ * OOOPS! fixed bug in ide_do_request()
+ * added ATAPI reset sequence for cdroms
+ * Version 5.10 added Bus-Mastered DMA support for Triton Chipset
+ * some (mostly) cosmetic changes
+ * Version 5.11 added ht6560b support by malafoss@snakemail.hut.fi
+ * reworked PCI scanning code
+ * added automatic RZ1000 detection/support
+ * added automatic PCI CMD640 detection/support
+ * added option for VLB CMD640 support
+ * tweaked probe to find cdrom on hdb with disks on hda,hdc
+ * Version 5.12 some performance tuning
+ * added message to alert user to bad /dev/hd[cd] entries
+ * OOOPS! fixed bug in atapi reset
+ * driver now forces "serialize" again for all cmd640 chips
+ * noticed REALLY_SLOW_IO had no effect, moved it to ide.c
+ * made do_drive_cmd() into public ide_do_drive_cmd()
+ * Version 5.13 fixed typo ('B'), thanks to houston@boyd.geog.mcgill.ca
+ * fixed ht6560b support
+ * Version 5.13b (sss) fix problem in calling ide_cdrom_setup()
+ * don't bother invalidating nonexistent partitions
+ * Version 5.14 fixes to cmd640 support.. maybe it works now(?)
+ * added & tested full EZ-DRIVE support -- don't use LILO!
+ * don't enable 2nd CMD640 PCI port during init - conflict
+ * Version 5.15 bug fix in init_cmd640_vlb()
+ * bug fix in interrupt sharing code
+ * Version 5.16 ugh.. fix "serialize" support, broken in 5.15
+ * remove "Huh?" from cmd640 code
+ * added qd6580 interface speed select from Colten Edwards
+ * Version 5.17 kludge around bug in BIOS32 on Intel triton motherboards
+ * Version 5.18 new CMD640 code, moved to cmd640.c, #include'd for now
+ * new UMC8672 code, moved to umc8672.c, #include'd for now
+ * disallow turning on DMA when h/w not capable of DMA
+ * Version 5.19 fix potential infinite timeout on resets
+ * extend reset poll into a general purpose polling scheme
+ * add atapi tape drive support from Gadi Oxman
+ * simplify exit from _intr routines -- no IDE_DO_REQUEST
+ * Version 5.20 leave current rq on blkdev request list during I/O
+ * generalized ide_do_drive_cmd() for tape/cdrom driver use
+ * Version 5.21 fix nasty cdrom/tape bug (ide_preempt was messed up)
+ * Version 5.22 fix ide_xlate_1024() to work with/without drive->id
+ * Version 5.23 miscellaneous touch-ups
+ * Version 5.24 fix #if's for SUPPORT_CMD640
+ * Version 5.25 more touch-ups, fix cdrom resets, ...
+ * cmd640.c now configs/compiles separate from ide.c
+ * Version 5.26 keep_settings now maintains the using_dma flag
+ * fix [EZD] remap message to only output at boot time
+ * fix "bad /dev/ entry" message to say hdc, not hdc0
+ * fix ide_xlate_1024() to respect user specified CHS
+ * use CHS from partn table if it looks translated
+ * re-merged flags chipset,vlb_32bit,vlb_sync into io_32bit
+ * keep track of interface chipset type, when known
+ * add generic PIO mode "tuneproc" mechanism
+ * fix cmd640_vlb option
+ * fix ht6560b support (was completely broken)
+ * umc8672.c now configures/compiles separate from ide.c
+ * move dtc2278 support to dtc2278.c
+ * move ht6560b support to ht6560b.c
+ * move qd6580 support to qd6580.c
+ * add ali14xx support in ali14xx.c
+ * Version 5.27 add [no]autotune parameters to help cmd640
+ * move rz1000 support to rz1000.c
+ * Version 5.28 #include "ide_modes.h"
+ * fix disallow_unmask: now per-interface "no_unmask" bit
+ * force io_32bit to be the same on drive pairs of dtc2278
+ * improved IDE tape error handling, and tape DMA support
+ * bugfix in ide_do_drive_cmd() for cdroms + serialize
+ *
+ * Some additional driver compile-time options are in ide.h
+ *
+ * To do, in likely order of completion:
+ * - add Promise DC4030VL support from peterd@pnd-pc.demon.co.uk
+ * - modify kernel to obtain BIOS geometry for drives on 2nd/3rd/4th i/f
+*/
+
+#if defined (MACH) && !defined (LINUX_IDE_DEBUG)
+#undef DEBUG
+#endif
+
+#undef REALLY_SLOW_IO /* most systems can safely undef this */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/errno.h>
+#include <linux/hdreg.h>
+#include <linux/genhd.h>
+#include <linux/malloc.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/segment.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_PCI
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#endif /* CONFIG_PCI */
+
+#include "ide.h"
+#include "ide_modes.h"
+
+static ide_hwgroup_t *irq_to_hwgroup [NR_IRQS];
+static const byte ide_hwif_to_major[MAX_HWIFS] = {IDE0_MAJOR, IDE1_MAJOR, IDE2_MAJOR, IDE3_MAJOR};
+
+static const unsigned short default_io_base[MAX_HWIFS] = {0x1f0, 0x170, 0x1e8, 0x168};
+static const byte default_irqs[MAX_HWIFS] = {14, 15, 11, 10};
+
+#if (DISK_RECOVERY_TIME > 0)
+/*
+ * For really screwy hardware (hey, at least it *can* be used with Linux)
+ * we can enforce a minimum delay time between successive operations.
+ */
+static unsigned long read_timer(void)
+{
+ unsigned long t, flags;
+ int i;
+
+ save_flags(flags);
+ cli();
+ t = jiffies * 11932;
+ outb_p(0, 0x43);
+ i = inb_p(0x40);
+ i |= inb(0x40) << 8;
+ restore_flags(flags);
+ return (t - i);
+}
+
+static void set_recovery_timer (ide_hwif_t *hwif)
+{
+ hwif->last_time = read_timer();
+}
+#define SET_RECOVERY_TIMER(drive) set_recovery_timer (drive)
+
+#else
+
+#define SET_RECOVERY_TIMER(drive)
+
+#endif /* DISK_RECOVERY_TIME */
+
+/*
+ * init_ide_data() sets reasonable default values into all fields
+ * of all instances of the hwifs and drives, but only on the first call.
+ * Subsequent calls have no effect (they don't wipe out anything).
+ *
+ * This routine is normally called at driver initialization time,
+ * but may also be called MUCH earlier during kernel "command-line"
+ * parameter processing. As such, we cannot depend on any other parts
+ * of the kernel (such as memory allocation) to be functioning yet.
+ *
+ * This is too bad, as otherwise we could dynamically allocate the
+ * ide_drive_t structs as needed, rather than always consuming memory
+ * for the max possible number (MAX_HWIFS * MAX_DRIVES) of them.
+ */
+#define MAGIC_COOKIE 0x12345678
+static void init_ide_data (void)
+{
+ byte *p;
+ unsigned int h, unit;
+ static unsigned long magic_cookie = MAGIC_COOKIE;
+
+ if (magic_cookie != MAGIC_COOKIE)
+ return; /* already initialized */
+ magic_cookie = 0;
+
+ for (h = 0; h < NR_IRQS; ++h)
+ irq_to_hwgroup[h] = NULL;
+
+ /* bulk initialize hwif & drive info with zeros */
+ p = ((byte *) ide_hwifs) + sizeof(ide_hwifs);
+ do {
+ *--p = 0;
+ } while (p > (byte *) ide_hwifs);
+
+ /* fill in any non-zero initial values */
+ for (h = 0; h < MAX_HWIFS; ++h) {
+ ide_hwif_t *hwif = &ide_hwifs[h];
+
+ hwif->index = h;
+ hwif->noprobe = (h > 1);
+ hwif->io_base = default_io_base[h];
+ hwif->ctl_port = hwif->io_base ? hwif->io_base+0x206 : 0x000;
+#ifdef CONFIG_BLK_DEV_HD
+ if (hwif->io_base == HD_DATA)
+ hwif->noprobe = 1; /* may be overriden by ide_setup() */
+#endif /* CONFIG_BLK_DEV_HD */
+ hwif->major = ide_hwif_to_major[h];
+ hwif->name[0] = 'i';
+ hwif->name[1] = 'd';
+ hwif->name[2] = 'e';
+ hwif->name[3] = '0' + h;
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ hwif->tape_drive = NULL;
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+
+ drive->select.all = (unit<<4)|0xa0;
+ drive->hwif = hwif;
+ drive->ctl = 0x08;
+ drive->ready_stat = READY_STAT;
+ drive->bad_wstat = BAD_W_STAT;
+ drive->special.b.recalibrate = 1;
+ drive->special.b.set_geometry = 1;
+ drive->name[0] = 'h';
+ drive->name[1] = 'd';
+ drive->name[2] = 'a' + (h * MAX_DRIVES) + unit;
+ }
+ }
+}
+
+#if SUPPORT_VLB_SYNC
+/*
+ * Some localbus EIDE interfaces require a special access sequence
+ * when using 32-bit I/O instructions to transfer data. We call this
+ * the "vlb_sync" sequence, which consists of three successive reads
+ * of the sector count register location, with interrupts disabled
+ * to ensure that the reads all happen together.
+ */
+static inline void do_vlb_sync (unsigned short port) {
+ (void) inb (port);
+ (void) inb (port);
+ (void) inb (port);
+}
+#endif /* SUPPORT_VLB_SYNC */
+
+/*
+ * This is used for most PIO data transfers *from* the IDE interface
+ */
+void ide_input_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
+{
+ unsigned short io_base = HWIF(drive)->io_base;
+ unsigned short data_reg = io_base+IDE_DATA_OFFSET;
+ byte io_32bit = drive->io_32bit;
+
+ if (io_32bit) {
+#if SUPPORT_VLB_SYNC
+ if (io_32bit & 2) {
+ cli();
+ do_vlb_sync(io_base+IDE_NSECTOR_OFFSET);
+ insl(data_reg, buffer, wcount);
+ if (drive->unmask)
+ sti();
+ } else
+#endif /* SUPPORT_VLB_SYNC */
+ insl(data_reg, buffer, wcount);
+ } else
+ insw(data_reg, buffer, wcount<<1);
+}
+
+/*
+ * This is used for most PIO data transfers *to* the IDE interface
+ */
+void ide_output_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
+{
+ unsigned short io_base = HWIF(drive)->io_base;
+ unsigned short data_reg = io_base+IDE_DATA_OFFSET;
+ byte io_32bit = drive->io_32bit;
+
+ if (io_32bit) {
+#if SUPPORT_VLB_SYNC
+ if (io_32bit & 2) {
+ cli();
+ do_vlb_sync(io_base+IDE_NSECTOR_OFFSET);
+ outsl(data_reg, buffer, wcount);
+ if (drive->unmask)
+ sti();
+ } else
+#endif /* SUPPORT_VLB_SYNC */
+ outsl(data_reg, buffer, wcount);
+ } else
+ outsw(data_reg, buffer, wcount<<1);
+}
+
+/*
+ * This should get invoked any time we exit the driver to
+ * wait for an interrupt response from a drive. handler() points
+ * at the appropriate code to handle the next interrupt, and a
+ * timer is started to prevent us from waiting forever in case
+ * something goes wrong (see the timer_expiry() handler later on).
+ */
+void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout)
+{
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+#ifdef DEBUG
+ if (hwgroup->handler != NULL) {
+ printk("%s: ide_set_handler: handler not null; old=%p, new=%p\n",
+ drive->name, hwgroup->handler, handler);
+ }
+#endif
+ hwgroup->handler = handler;
+ hwgroup->timer.expires = jiffies + timeout;
+ add_timer(&(hwgroup->timer));
+}
+
+/*
+ * lba_capacity_is_ok() performs a sanity check on the claimed "lba_capacity"
+ * value for this drive (from its reported identification information).
+ *
+ * Returns: 1 if lba_capacity looks sensible
+ * 0 otherwise
+ */
+static int lba_capacity_is_ok (struct hd_driveid *id)
+{
+ unsigned long lba_sects = id->lba_capacity;
+ unsigned long chs_sects = id->cyls * id->heads * id->sectors;
+ unsigned long _10_percent = chs_sects / 10;
+
+ /* perform a rough sanity check on lba_sects: within 10% is "okay" */
+ if ((lba_sects - chs_sects) < _10_percent)
+ return 1; /* lba_capacity is good */
+
+ /* some drives have the word order reversed */
+ lba_sects = (lba_sects << 16) | (lba_sects >> 16);
+ if ((lba_sects - chs_sects) < _10_percent) {
+ id->lba_capacity = lba_sects; /* fix it */
+ return 1; /* lba_capacity is (now) good */
+ }
+ return 0; /* lba_capacity value is bad */
+}
+
+/*
+ * current_capacity() returns the capacity (in sectors) of a drive
+ * according to its current geometry/LBA settings.
+ */
+static unsigned long current_capacity (ide_drive_t *drive)
+{
+ struct hd_driveid *id = drive->id;
+ unsigned long capacity;
+
+ if (!drive->present)
+ return 0;
+ if (drive->media != ide_disk)
+ return 0x7fffffff; /* cdrom or tape */
+ /* Determine capacity, and use LBA if the drive properly supports it */
+ if (id != NULL && (id->capability & 2) && lba_capacity_is_ok(id)) {
+ drive->select.b.lba = 1;
+ capacity = id->lba_capacity;
+ } else {
+ drive->select.b.lba = 0;
+ capacity = drive->cyl * drive->head * drive->sect;
+ }
+ return (capacity - drive->sect0);
+}
+
+/*
+ * ide_geninit() is called exactly *once* for each major, from genhd.c,
+ * at the beginning of the initial partition check for the drives.
+ */
+static void ide_geninit (struct gendisk *gd)
+{
+ unsigned int unit;
+ ide_hwif_t *hwif = gd->real_devices;
+
+ for (unit = 0; unit < gd->nr_real; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->present && drive->media == ide_cdrom)
+ ide_cdrom_setup(drive);
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->present && drive->media == ide_tape)
+ idetape_setup(drive);
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ drive->part[0].nr_sects = current_capacity(drive);
+ if (!drive->present || drive->media != ide_disk) {
+ drive->part[0].start_sect = -1; /* skip partition check */
+ }
+ }
+ /*
+ * The partition check in genhd.c needs this string to identify
+ * our minor devices by name for display purposes.
+ * Note that doing this will prevent us from working correctly
+ * if ever called a second time for this major (never happens).
+ */
+ gd->real_devices = hwif->drives[0].name; /* name of first drive */
+}
+
+/*
+ * init_gendisk() (as opposed to ide_geninit) is called for each major device,
+ * after probing for drives, to allocate partition tables and other data
+ * structures needed for the routines in genhd.c. ide_geninit() gets called
+ * somewhat later, during the partition check.
+ */
+static void init_gendisk (ide_hwif_t *hwif)
+{
+ struct gendisk *gd;
+ unsigned int unit, units, minors;
+ int *bs;
+
+ /* figure out maximum drive number on the interface */
+ for (units = MAX_DRIVES; units > 0; --units) {
+ if (hwif->drives[units-1].present)
+ break;
+ }
+ minors = units * (1<<PARTN_BITS);
+ gd = kmalloc (sizeof(struct gendisk), GFP_KERNEL);
+ gd->sizes = kmalloc (minors * sizeof(int), GFP_KERNEL);
+ gd->part = kmalloc (minors * sizeof(struct hd_struct), GFP_KERNEL);
+ bs = kmalloc (minors*sizeof(int), GFP_KERNEL);
+
+ /* cdroms and msdos f/s are examples of non-1024 blocksizes */
+ blksize_size[hwif->major] = bs;
+ for (unit = 0; unit < minors; ++unit)
+ *bs++ = BLOCK_SIZE;
+
+ for (unit = 0; unit < units; ++unit)
+ hwif->drives[unit].part = &gd->part[unit << PARTN_BITS];
+
+ gd->major = hwif->major; /* our major device number */
+ gd->major_name = IDE_MAJOR_NAME; /* treated special in genhd.c */
+ gd->minor_shift = PARTN_BITS; /* num bits for partitions */
+ gd->max_p = 1<<PARTN_BITS; /* 1 + max partitions / drive */
+ gd->max_nr = units; /* max num real drives */
+ gd->nr_real = units; /* current num real drives */
+ gd->init = ide_geninit; /* initialization function */
+ gd->real_devices= hwif; /* ptr to internal data */
+
+ gd->next = gendisk_head; /* link new major into list */
+ hwif->gd = gendisk_head = gd;
+}
+
+static void do_reset1 (ide_drive_t *, int); /* needed below */
+
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+/*
+ * atapi_reset_pollfunc() gets invoked to poll the interface for completion every 50ms
+ * during an atapi drive reset operation. If the drive has not yet responded,
+ * and we have not yet hit our maximum waiting time, then the timer is restarted
+ * for another 50ms.
+ */
+static void atapi_reset_pollfunc (ide_drive_t *drive)
+{
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+ byte stat;
+
+ OUT_BYTE (drive->select.all, IDE_SELECT_REG);
+ udelay (10);
+
+ if (OK_STAT(stat=GET_STAT(), 0, BUSY_STAT)) {
+ printk("%s: ATAPI reset complete\n", drive->name);
+ } else {
+ if (jiffies < hwgroup->poll_timeout) {
+ ide_set_handler (drive, &atapi_reset_pollfunc, HZ/20);
+ return; /* continue polling */
+ }
+ hwgroup->poll_timeout = 0; /* end of polling */
+ printk("%s: ATAPI reset timed-out, status=0x%02x\n", drive->name, stat);
+ do_reset1 (drive, 1); /* do it the old fashioned way */
+ }
+ hwgroup->poll_timeout = 0; /* done polling */
+}
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+
+/*
+ * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
+ * during an ide reset operation. If the drives have not yet responded,
+ * and we have not yet hit our maximum waiting time, then the timer is restarted
+ * for another 50ms.
+ */
+static void reset_pollfunc (ide_drive_t *drive)
+{
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+ ide_hwif_t *hwif = HWIF(drive);
+ byte tmp;
+
+ if (!OK_STAT(tmp=GET_STAT(), 0, BUSY_STAT)) {
+ if (jiffies < hwgroup->poll_timeout) {
+ ide_set_handler (drive, &reset_pollfunc, HZ/20);
+ return; /* continue polling */
+ }
+ printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp);
+ } else {
+ printk("%s: reset: ", hwif->name);
+ if ((tmp = GET_ERR()) == 1)
+ printk("success\n");
+ else {
+ printk("master: ");
+ switch (tmp & 0x7f) {
+ case 1: printk("passed");
+ break;
+ case 2: printk("formatter device error");
+ break;
+ case 3: printk("sector buffer error");
+ break;
+ case 4: printk("ECC circuitry error");
+ break;
+ case 5: printk("controlling MPU error");
+ break;
+ default:printk("error (0x%02x?)", tmp);
+ }
+ if (tmp & 0x80)
+ printk("; slave: failed");
+ printk("\n");
+ }
+ }
+ hwgroup->poll_timeout = 0; /* done polling */
+}
+
+/*
+ * do_reset1() attempts to recover a confused drive by resetting it.
+ * Unfortunately, resetting a disk drive actually resets all devices on
+ * the same interface, so it can really be thought of as resetting the
+ * interface rather than resetting the drive.
+ *
+ * ATAPI devices have their own reset mechanism which allows them to be
+ * individually reset without clobbering other devices on the same interface.
+ *
+ * Unfortunately, the IDE interface does not generate an interrupt to let
+ * us know when the reset operation has finished, so we must poll for this.
+ * Equally poor, though, is the fact that this may a very long time to complete,
+ * (up to 30 seconds worstcase). So, instead of busy-waiting here for it,
+ * we set a timer to poll at 50ms intervals.
+ */
+static void do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
+{
+ unsigned int unit;
+ unsigned long flags;
+ ide_hwif_t *hwif = HWIF(drive);
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+
+ save_flags(flags);
+ cli(); /* Why ? */
+
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ /* For an ATAPI device, first try an ATAPI SRST. */
+ if (drive->media != ide_disk) {
+ if (!do_not_try_atapi) {
+ if (!drive->keep_settings)
+ drive->unmask = 0;
+ OUT_BYTE (drive->select.all, IDE_SELECT_REG);
+ udelay (20);
+ OUT_BYTE (WIN_SRST, IDE_COMMAND_REG);
+ hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
+ ide_set_handler (drive, &atapi_reset_pollfunc, HZ/20);
+ restore_flags (flags);
+ return;
+ }
+ }
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+
+ /*
+ * First, reset any device state data we were maintaining
+ * for any of the drives on this interface.
+ */
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *rdrive = &hwif->drives[unit];
+ rdrive->special.all = 0;
+ rdrive->special.b.set_geometry = 1;
+ rdrive->special.b.recalibrate = 1;
+ if (OK_TO_RESET_CONTROLLER)
+ rdrive->mult_count = 0;
+ if (!rdrive->keep_settings) {
+ rdrive->using_dma = 0;
+ rdrive->mult_req = 0;
+ rdrive->unmask = 0;
+ }
+ if (rdrive->mult_req != rdrive->mult_count)
+ rdrive->special.b.set_multmode = 1;
+ }
+
+#if OK_TO_RESET_CONTROLLER
+ /*
+ * Note that we also set nIEN while resetting the device,
+ * to mask unwanted interrupts from the interface during the reset.
+ * However, due to the design of PC hardware, this will cause an
+ * immediate interrupt due to the edge transition it produces.
+ * This single interrupt gives us a "fast poll" for drives that
+ * recover from reset very quickly, saving us the first 50ms wait time.
+ */
+ OUT_BYTE(drive->ctl|6,IDE_CONTROL_REG); /* set SRST and nIEN */
+ udelay(5); /* more than enough time */
+ OUT_BYTE(drive->ctl|2,IDE_CONTROL_REG); /* clear SRST, leave nIEN */
+ hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
+ ide_set_handler (drive, &reset_pollfunc, HZ/20);
+#endif /* OK_TO_RESET_CONTROLLER */
+
+ restore_flags (flags);
+}
+
+/*
+ * ide_do_reset() is the entry point to the drive/interface reset code.
+ */
+void ide_do_reset (ide_drive_t *drive)
+{
+ do_reset1 (drive, 0);
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape)
+ drive->tape.reset_issued=1;
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+}
+
+/*
+ * Clean up after success/failure of an explicit drive cmd
+ */
+void ide_end_drive_cmd (ide_drive_t *drive, byte stat, byte err)
+{
+ unsigned long flags;
+ struct request *rq = HWGROUP(drive)->rq;
+
+ if (rq->cmd == IDE_DRIVE_CMD) {
+ byte *args = (byte *) rq->buffer;
+ rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
+ if (args) {
+ args[0] = stat;
+ args[1] = err;
+ args[2] = IN_BYTE(IDE_NSECTOR_REG);
+ }
+ }
+ save_flags(flags);
+ cli();
+ blk_dev[MAJOR(rq->rq_dev)].current_request = rq->next;
+ HWGROUP(drive)->rq = NULL;
+ rq->rq_status = RQ_INACTIVE;
+ if (rq->sem != NULL)
+ up(rq->sem);
+ restore_flags(flags);
+}
+
+/*
+ * Error reporting, in human readable form (luxurious, but a memory hog).
+ */
+byte ide_dump_status (ide_drive_t *drive, const char *msg, byte stat)
+{
+ unsigned long flags;
+ byte err = 0;
+
+ save_flags (flags);
+ sti();
+ printk("%s: %s: status=0x%02x", drive->name, msg, stat);
+#if FANCY_STATUS_DUMPS
+ if (drive->media == ide_disk) {
+ printk(" { ");
+ if (stat & BUSY_STAT)
+ printk("Busy ");
+ else {
+ if (stat & READY_STAT) printk("DriveReady ");
+ if (stat & WRERR_STAT) printk("DeviceFault ");
+ if (stat & SEEK_STAT) printk("SeekComplete ");
+ if (stat & DRQ_STAT) printk("DataRequest ");
+ if (stat & ECC_STAT) printk("CorrectedError ");
+ if (stat & INDEX_STAT) printk("Index ");
+ if (stat & ERR_STAT) printk("Error ");
+ }
+ printk("}");
+ }
+#endif /* FANCY_STATUS_DUMPS */
+ printk("\n");
+ if ((stat & (BUSY_STAT|ERR_STAT)) == ERR_STAT) {
+ err = GET_ERR();
+ printk("%s: %s: error=0x%02x", drive->name, msg, err);
+#if FANCY_STATUS_DUMPS
+ if (drive->media == ide_disk) {
+ printk(" { ");
+ if (err & BBD_ERR) printk("BadSector ");
+ if (err & ECC_ERR) printk("UncorrectableError ");
+ if (err & ID_ERR) printk("SectorIdNotFound ");
+ if (err & ABRT_ERR) printk("DriveStatusError ");
+ if (err & TRK0_ERR) printk("TrackZeroNotFound ");
+ if (err & MARK_ERR) printk("AddrMarkNotFound ");
+ printk("}");
+ if (err & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) {
+ byte cur = IN_BYTE(IDE_SELECT_REG);
+ if (cur & 0x40) { /* using LBA? */
+ printk(", LBAsect=%ld", (unsigned long)
+ ((cur&0xf)<<24)
+ |(IN_BYTE(IDE_HCYL_REG)<<16)
+ |(IN_BYTE(IDE_LCYL_REG)<<8)
+ | IN_BYTE(IDE_SECTOR_REG));
+ } else {
+ printk(", CHS=%d/%d/%d",
+ (IN_BYTE(IDE_HCYL_REG)<<8) +
+ IN_BYTE(IDE_LCYL_REG),
+ cur & 0xf,
+ IN_BYTE(IDE_SECTOR_REG));
+ }
+ if (HWGROUP(drive)->rq)
+ printk(", sector=%ld", HWGROUP(drive)->rq->sector);
+ }
+ }
+#endif /* FANCY_STATUS_DUMPS */
+ printk("\n");
+ }
+ restore_flags (flags);
+ return err;
+}
+
+/*
+ * try_to_flush_leftover_data() is invoked in response to a drive
+ * unexpectedly having its DRQ_STAT bit set. As an alternative to
+ * resetting the drive, this routine tries to clear the condition
+ * by read a sector's worth of data from the drive. Of course,
+ * this may not help if the drive is *waiting* for data from *us*.
+ */
+static void try_to_flush_leftover_data (ide_drive_t *drive)
+{
+ int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS;
+
+ while (i > 0) {
+ unsigned long buffer[16];
+ unsigned int wcount = (i > 16) ? 16 : i;
+ i -= wcount;
+ ide_input_data (drive, buffer, wcount);
+ }
+}
+
+/*
+ * ide_error() takes action based on the error returned by the controller.
+ */
+void ide_error (ide_drive_t *drive, const char *msg, byte stat)
+{
+ struct request *rq;
+ byte err;
+
+ err = ide_dump_status(drive, msg, stat);
+ if ((rq = HWGROUP(drive)->rq) == NULL || drive == NULL)
+ return;
+ /* retry only "normal" I/O: */
+ if (rq->cmd == IDE_DRIVE_CMD || (rq->cmd != READ && rq->cmd != WRITE && drive->media == ide_disk))
+ {
+ rq->errors = 1;
+ ide_end_drive_cmd(drive, stat, err);
+ return;
+ }
+ if (stat & BUSY_STAT) { /* other bits are useless when BUSY */
+ rq->errors |= ERROR_RESET;
+ } else {
+ if (drive->media == ide_disk && (stat & ERR_STAT)) {
+ /* err has different meaning on cdrom and tape */
+ if (err & (BBD_ERR | ECC_ERR)) /* retries won't help these */
+ rq->errors = ERROR_MAX;
+ else if (err & TRK0_ERR) /* help it find track zero */
+ rq->errors |= ERROR_RECAL;
+ }
+ if ((stat & DRQ_STAT) && rq->cmd != WRITE)
+ try_to_flush_leftover_data(drive);
+ }
+ if (GET_STAT() & (BUSY_STAT|DRQ_STAT))
+ rq->errors |= ERROR_RESET; /* Mmmm.. timing problem */
+
+ if (rq->errors >= ERROR_MAX) {
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape) {
+ rq->errors = 0;
+ idetape_end_request(0, HWGROUP(drive));
+ }
+ else
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ ide_end_request(0, HWGROUP(drive));
+ }
+ else {
+ if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
+ ++rq->errors;
+ ide_do_reset(drive);
+ return;
+ } else if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
+ drive->special.b.recalibrate = 1;
+ ++rq->errors;
+ }
+}
+
+/*
+ * read_intr() is the handler for disk read/multread interrupts
+ */
+static void read_intr (ide_drive_t *drive)
+{
+ byte stat;
+ int i;
+ unsigned int msect, nsect;
+ struct request *rq;
+
+ if (!OK_STAT(stat=GET_STAT(),DATA_READY,BAD_R_STAT)) {
+ ide_error(drive, "read_intr", stat);
+ return;
+ }
+ msect = drive->mult_count;
+read_next:
+ rq = HWGROUP(drive)->rq;
+ if (msect) {
+ if ((nsect = rq->current_nr_sectors) > msect)
+ nsect = msect;
+ msect -= nsect;
+ } else
+ nsect = 1;
+ ide_input_data(drive, rq->buffer, nsect * SECTOR_WORDS);
+#ifdef DEBUG
+ printk("%s: read: sectors(%ld-%ld), buffer=0x%08lx, remaining=%ld\n",
+ drive->name, rq->sector, rq->sector+nsect-1,
+ (unsigned long) rq->buffer+(nsect<<9), rq->nr_sectors-nsect);
+#endif
+ rq->sector += nsect;
+ rq->buffer += nsect<<9;
+ rq->errors = 0;
+ i = (rq->nr_sectors -= nsect);
+ if ((rq->current_nr_sectors -= nsect) <= 0)
+ ide_end_request(1, HWGROUP(drive));
+ if (i > 0) {
+ if (msect)
+ goto read_next;
+ ide_set_handler (drive, &read_intr, WAIT_CMD);
+ }
+}
+
+/*
+ * write_intr() is the handler for disk write interrupts
+ */
+static void write_intr (ide_drive_t *drive)
+{
+ byte stat;
+ int i;
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+ struct request *rq = hwgroup->rq;
+
+ if (OK_STAT(stat=GET_STAT(),DRIVE_READY,drive->bad_wstat)) {
+#ifdef DEBUG
+ printk("%s: write: sector %ld, buffer=0x%08lx, remaining=%ld\n",
+ drive->name, rq->sector, (unsigned long) rq->buffer,
+ rq->nr_sectors-1);
+#endif
+ if ((rq->nr_sectors == 1) ^ ((stat & DRQ_STAT) != 0)) {
+ rq->sector++;
+ rq->buffer += 512;
+ rq->errors = 0;
+ i = --rq->nr_sectors;
+ --rq->current_nr_sectors;
+ if (rq->current_nr_sectors <= 0)
+ ide_end_request(1, hwgroup);
+ if (i > 0) {
+ ide_output_data (drive, rq->buffer, SECTOR_WORDS);
+ ide_set_handler (drive, &write_intr, WAIT_CMD);
+ }
+ return;
+ }
+ }
+ ide_error(drive, "write_intr", stat);
+}
+
+/*
+ * multwrite() transfers a block of one or more sectors of data to a drive
+ * as part of a disk multwrite operation.
+ */
+static void multwrite (ide_drive_t *drive)
+{
+ struct request *rq = &HWGROUP(drive)->wrq;
+ unsigned int mcount = drive->mult_count;
+
+ do {
+ unsigned int nsect = rq->current_nr_sectors;
+ if (nsect > mcount)
+ nsect = mcount;
+ mcount -= nsect;
+
+ ide_output_data(drive, rq->buffer, nsect<<7);
+#ifdef DEBUG
+ printk("%s: multwrite: sector %ld, buffer=0x%08lx, count=%d, remaining=%ld\n",
+ drive->name, rq->sector, (unsigned long) rq->buffer,
+ nsect, rq->nr_sectors - nsect);
+#endif
+ if ((rq->nr_sectors -= nsect) <= 0)
+ break;
+ if ((rq->current_nr_sectors -= nsect) == 0) {
+ if ((rq->bh = rq->bh->b_reqnext) != NULL) {
+ rq->current_nr_sectors = rq->bh->b_size>>9;
+ rq->buffer = rq->bh->b_data;
+ } else {
+ panic("%s: buffer list corrupted\n", drive->name);
+ break;
+ }
+ } else {
+ rq->buffer += nsect << 9;
+ }
+ } while (mcount);
+}
+
+/*
+ * multwrite_intr() is the handler for disk multwrite interrupts
+ */
+static void multwrite_intr (ide_drive_t *drive)
+{
+ byte stat;
+ int i;
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+ struct request *rq = &hwgroup->wrq;
+
+ if (OK_STAT(stat=GET_STAT(),DRIVE_READY,drive->bad_wstat)) {
+ if (stat & DRQ_STAT) {
+ if (rq->nr_sectors) {
+ multwrite(drive);
+ ide_set_handler (drive, &multwrite_intr, WAIT_CMD);
+ return;
+ }
+ } else {
+ if (!rq->nr_sectors) { /* all done? */
+ rq = hwgroup->rq;
+ for (i = rq->nr_sectors; i > 0;){
+ i -= rq->current_nr_sectors;
+ ide_end_request(1, hwgroup);
+ }
+ return;
+ }
+ }
+ }
+ ide_error(drive, "multwrite_intr", stat);
+}
+
+/*
+ * Issue a simple drive command
+ * The drive must be selected beforehand.
+ */
+static void ide_cmd(ide_drive_t *drive, byte cmd, byte nsect, ide_handler_t *handler)
+{
+ ide_set_handler (drive, handler, WAIT_CMD);
+ OUT_BYTE(drive->ctl,IDE_CONTROL_REG);
+ OUT_BYTE(nsect,IDE_NSECTOR_REG);
+ OUT_BYTE(cmd,IDE_COMMAND_REG);
+}
+
+/*
+ * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
+ */
+static void set_multmode_intr (ide_drive_t *drive)
+{
+ byte stat = GET_STAT();
+
+ sti();
+ if (OK_STAT(stat,READY_STAT,BAD_STAT)) {
+ drive->mult_count = drive->mult_req;
+ } else {
+ drive->mult_req = drive->mult_count = 0;
+ drive->special.b.recalibrate = 1;
+ (void) ide_dump_status(drive, "set_multmode", stat);
+ }
+}
+
+/*
+ * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
+ */
+static void set_geometry_intr (ide_drive_t *drive)
+{
+ byte stat = GET_STAT();
+
+ sti();
+ if (!OK_STAT(stat,READY_STAT,BAD_STAT))
+ ide_error(drive, "set_geometry_intr", stat);
+}
+
+/*
+ * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
+ */
+static void recal_intr (ide_drive_t *drive)
+{
+ byte stat = GET_STAT();
+
+ sti();
+ if (!OK_STAT(stat,READY_STAT,BAD_STAT))
+ ide_error(drive, "recal_intr", stat);
+}
+
+/*
+ * drive_cmd_intr() is invoked on completion of a special DRIVE_CMD.
+ */
+static void drive_cmd_intr (ide_drive_t *drive)
+{
+ byte stat = GET_STAT();
+
+ sti();
+ if (OK_STAT(stat,READY_STAT,BAD_STAT))
+ ide_end_drive_cmd (drive, stat, GET_ERR());
+ else
+ ide_error(drive, "drive_cmd", stat); /* calls ide_end_drive_cmd */
+}
+
+/*
+ * do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT
+ * commands to a drive. It used to do much more, but has been scaled back.
+ */
+static inline void do_special (ide_drive_t *drive)
+{
+ special_t *s = &drive->special;
+next:
+#ifdef DEBUG
+ printk("%s: do_special: 0x%02x\n", drive->name, s->all);
+#endif
+ if (s->b.set_geometry) {
+ s->b.set_geometry = 0;
+ if (drive->media == ide_disk) {
+ OUT_BYTE(drive->sect,IDE_SECTOR_REG);
+ OUT_BYTE(drive->cyl,IDE_LCYL_REG);
+ OUT_BYTE(drive->cyl>>8,IDE_HCYL_REG);
+ OUT_BYTE(((drive->head-1)|drive->select.all)&0xBF,IDE_SELECT_REG);
+ ide_cmd(drive, WIN_SPECIFY, drive->sect, &set_geometry_intr);
+ }
+ } else if (s->b.recalibrate) {
+ s->b.recalibrate = 0;
+ if (drive->media == ide_disk) {
+ ide_cmd(drive, WIN_RESTORE, drive->sect, &recal_intr);
+ }
+ } else if (s->b.set_pio) {
+ ide_tuneproc_t *tuneproc = HWIF(drive)->tuneproc;
+ s->b.set_pio = 0;
+ if (tuneproc != NULL)
+ tuneproc(drive, drive->pio_req);
+ goto next;
+ } else if (s->b.set_multmode) {
+ s->b.set_multmode = 0;
+ if (drive->media == ide_disk) {
+ if (drive->id && drive->mult_req > drive->id->max_multsect)
+ drive->mult_req = drive->id->max_multsect;
+ ide_cmd(drive, WIN_SETMULT, drive->mult_req, &set_multmode_intr);
+ } else
+ drive->mult_req = 0;
+ } else if (s->all) {
+ s->all = 0;
+ printk("%s: bad special flag: 0x%02x\n", drive->name, s->all);
+ }
+}
+
+/*
+ * This routine busy-waits for the drive status to be not "busy".
+ * It then checks the status for all of the "good" bits and none
+ * of the "bad" bits, and if all is okay it returns 0. All other
+ * cases return 1 after invoking ide_error() -- caller should just return.
+ *
+ * This routine should get fixed to not hog the cpu during extra long waits..
+ * That could be done by busy-waiting for the first jiffy or two, and then
+ * setting a timer to wake up at half second intervals thereafter,
+ * until timeout is achieved, before timing out.
+ */
+int ide_wait_stat (ide_drive_t *drive, byte good, byte bad, unsigned long timeout)
+{
+ byte stat;
+ unsigned long flags;
+
+test:
+ udelay(1); /* spec allows drive 400ns to change "BUSY" */
+ if (OK_STAT((stat = GET_STAT()), good, bad))
+ return 0; /* fast exit for most frequent case */
+ if (!(stat & BUSY_STAT)) {
+ ide_error(drive, "status error", stat);
+ return 1;
+ }
+
+ save_flags(flags);
+ sti();
+ timeout += jiffies;
+ do {
+ if (!((stat = GET_STAT()) & BUSY_STAT)) {
+ restore_flags(flags);
+ goto test;
+ }
+ } while (jiffies <= timeout);
+
+ restore_flags(flags);
+ ide_error(drive, "status timeout", GET_STAT());
+ return 1;
+}
+
+/*
+ * do_rw_disk() issues WIN_{MULT}READ and WIN_{MULT}WRITE commands to a disk,
+ * using LBA if supported, or CHS otherwise, to address sectors. It also takes
+ * care of issuing special DRIVE_CMDs.
+ */
+static inline void do_rw_disk (ide_drive_t *drive, struct request *rq, unsigned long block)
+{
+ unsigned short io_base = HWIF(drive)->io_base;
+
+ OUT_BYTE(drive->ctl,IDE_CONTROL_REG);
+ OUT_BYTE(rq->nr_sectors,io_base+IDE_NSECTOR_OFFSET);
+ if (drive->select.b.lba) {
+#ifdef DEBUG
+ printk("%s: %sing: LBAsect=%ld, sectors=%ld, buffer=0x%08lx\n",
+ drive->name, (rq->cmd==READ)?"read":"writ",
+ block, rq->nr_sectors, (unsigned long) rq->buffer);
+#endif
+ OUT_BYTE(block,io_base+IDE_SECTOR_OFFSET);
+ OUT_BYTE(block>>=8,io_base+IDE_LCYL_OFFSET);
+ OUT_BYTE(block>>=8,io_base+IDE_HCYL_OFFSET);
+ OUT_BYTE(((block>>8)&0x0f)|drive->select.all,io_base+IDE_SELECT_OFFSET);
+ } else {
+ unsigned int sect,head,cyl,track;
+ track = block / drive->sect;
+ sect = block % drive->sect + 1;
+ OUT_BYTE(sect,io_base+IDE_SECTOR_OFFSET);
+ head = track % drive->head;
+ cyl = track / drive->head;
+ OUT_BYTE(cyl,io_base+IDE_LCYL_OFFSET);
+ OUT_BYTE(cyl>>8,io_base+IDE_HCYL_OFFSET);
+ OUT_BYTE(head|drive->select.all,io_base+IDE_SELECT_OFFSET);
+#ifdef DEBUG
+ printk("%s: %sing: CHS=%d/%d/%d, sectors=%ld, buffer=0x%08lx\n",
+ drive->name, (rq->cmd==READ)?"read":"writ", cyl,
+ head, sect, rq->nr_sectors, (unsigned long) rq->buffer);
+#endif
+ }
+ if (rq->cmd == READ) {
+#ifdef CONFIG_BLK_DEV_TRITON
+ if (drive->using_dma && !(HWIF(drive)->dmaproc(ide_dma_read, drive)))
+ return;
+#endif /* CONFIG_BLK_DEV_TRITON */
+ ide_set_handler(drive, &read_intr, WAIT_CMD);
+ OUT_BYTE(drive->mult_count ? WIN_MULTREAD : WIN_READ, io_base+IDE_COMMAND_OFFSET);
+ return;
+ }
+ if (rq->cmd == WRITE) {
+#ifdef CONFIG_BLK_DEV_TRITON
+ if (drive->using_dma && !(HWIF(drive)->dmaproc(ide_dma_write, drive)))
+ return;
+#endif /* CONFIG_BLK_DEV_TRITON */
+ OUT_BYTE(drive->mult_count ? WIN_MULTWRITE : WIN_WRITE, io_base+IDE_COMMAND_OFFSET);
+ if (ide_wait_stat(drive, DATA_READY, drive->bad_wstat, WAIT_DRQ)) {
+ printk("%s: no DRQ after issuing %s\n", drive->name,
+ drive->mult_count ? "MULTWRITE" : "WRITE");
+ return;
+ }
+ if (!drive->unmask)
+ cli();
+ if (drive->mult_count) {
+ HWGROUP(drive)->wrq = *rq; /* scratchpad */
+ ide_set_handler (drive, &multwrite_intr, WAIT_CMD);
+ multwrite(drive);
+ } else {
+ ide_set_handler (drive, &write_intr, WAIT_CMD);
+ ide_output_data(drive, rq->buffer, SECTOR_WORDS);
+ }
+ return;
+ }
+ if (rq->cmd == IDE_DRIVE_CMD) {
+ byte *args = rq->buffer;
+ if (args) {
+#ifdef DEBUG
+ printk("%s: DRIVE_CMD cmd=0x%02x sc=0x%02x fr=0x%02x\n",
+ drive->name, args[0], args[1], args[2]);
+#endif
+ OUT_BYTE(args[2],io_base+IDE_FEATURE_OFFSET);
+ ide_cmd(drive, args[0], args[1], &drive_cmd_intr);
+ return;
+ } else {
+ /*
+ * NULL is actually a valid way of waiting for
+ * all current requests to be flushed from the queue.
+ */
+#ifdef DEBUG
+ printk("%s: DRIVE_CMD (null)\n", drive->name);
+#endif
+ ide_end_drive_cmd(drive, GET_STAT(), GET_ERR());
+ return;
+ }
+ }
+ printk("%s: bad command: %d\n", drive->name, rq->cmd);
+ ide_end_request(0, HWGROUP(drive));
+}
+
+/*
+ * do_request() initiates handling of a new I/O request
+ */
+static inline void do_request (ide_hwif_t *hwif, struct request *rq)
+{
+ unsigned int minor, unit;
+ unsigned long block, blockend;
+ ide_drive_t *drive;
+
+ sti();
+#ifdef DEBUG
+ printk("%s: do_request: current=0x%08lx\n", hwif->name, (unsigned long) rq);
+#endif
+ minor = MINOR(rq->rq_dev);
+ unit = minor >> PARTN_BITS;
+ if (MAJOR(rq->rq_dev) != hwif->major || unit >= MAX_DRIVES) {
+ printk("%s: bad device number: %s\n",
+ hwif->name, kdevname(rq->rq_dev));
+ goto kill_rq;
+ }
+ drive = &hwif->drives[unit];
+#ifdef DEBUG
+ if (rq->bh && !buffer_locked(rq->bh)) {
+ printk("%s: block not locked\n", drive->name);
+ goto kill_rq;
+ }
+#endif
+ block = rq->sector;
+ blockend = block + rq->nr_sectors;
+ if ((blockend < block) || (blockend > drive->part[minor&PARTN_MASK].nr_sects)) {
+ printk("%s%c: bad access: block=%ld, count=%ld\n", drive->name,
+ (minor&PARTN_MASK)?'0'+(minor&PARTN_MASK):' ', block, rq->nr_sectors);
+ goto kill_rq;
+ }
+ block += drive->part[minor&PARTN_MASK].start_sect + drive->sect0;
+#if FAKE_FDISK_FOR_EZDRIVE
+ if (block == 0 && drive->remap_0_to_1)
+ block = 1; /* redirect MBR access to EZ-Drive partn table */
+#endif /* FAKE_FDISK_FOR_EZDRIVE */
+ ((ide_hwgroup_t *)hwif->hwgroup)->drive = drive;
+#ifdef CONFIG_BLK_DEV_HT6560B
+ if (hwif->selectproc)
+ hwif->selectproc (drive);
+#endif /* CONFIG_BLK_DEV_HT6560B */
+#if (DISK_RECOVERY_TIME > 0)
+ while ((read_timer() - hwif->last_time) < DISK_RECOVERY_TIME);
+#endif
+
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ POLL_HWIF_TAPE_DRIVE; /* macro from ide-tape.h */
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+
+ OUT_BYTE(drive->select.all,IDE_SELECT_REG);
+ if (ide_wait_stat(drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) {
+ printk("%s: drive not ready for command\n", drive->name);
+ return;
+ }
+
+ if (!drive->special.all) {
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ switch (drive->media) {
+ case ide_disk:
+ do_rw_disk (drive, rq, block);
+ return;
+#ifdef CONFIG_BLK_DEV_IDECD
+ case ide_cdrom:
+ ide_do_rw_cdrom (drive, block);
+ return;
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ case ide_tape:
+ if (rq->cmd == IDE_DRIVE_CMD) {
+ byte *args = (byte *) rq->buffer;
+ OUT_BYTE(args[2],IDE_FEATURE_REG);
+ ide_cmd(drive, args[0], args[1], &drive_cmd_intr);
+ return;
+ }
+ idetape_do_request (drive, rq, block);
+ return;
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+
+ default:
+ printk("%s: media type %d not supported\n",
+ drive->name, drive->media);
+ goto kill_rq;
+ }
+#else
+ do_rw_disk (drive, rq, block); /* simpler and faster */
+ return;
+#endif /* CONFIG_BLK_DEV_IDEATAPI */;
+ }
+ do_special(drive);
+ return;
+kill_rq:
+ ide_end_request(0, hwif->hwgroup);
+}
+
+/*
+ * The driver enables interrupts as much as possible. In order to do this,
+ * (a) the device-interrupt is always masked before entry, and
+ * (b) the timeout-interrupt is always disabled before entry.
+ *
+ * If we enter here from, say irq14, and then start a new request for irq15,
+ * (possible with "serialize" option) then we cannot ensure that we exit
+ * before the irq15 hits us. So, we must be careful not to let this bother us.
+ *
+ * Interrupts are still masked (by default) whenever we are exchanging
+ * data/cmds with a drive, because some drives seem to have very poor
+ * tolerance for latency during I/O. For devices which don't suffer from
+ * this problem (most don't), the unmask flag can be set using the "hdparm"
+ * utility, to permit other interrupts during data/cmd transfers.
+ */
+void ide_do_request (ide_hwgroup_t *hwgroup)
+{
+ cli(); /* paranoia */
+ if (hwgroup->handler != NULL) {
+ printk("%s: EEeekk!! handler not NULL in ide_do_request()\n", hwgroup->hwif->name);
+ return;
+ }
+ do {
+ ide_hwif_t *hwif = hwgroup->hwif;
+ struct request *rq;
+ if ((rq = hwgroup->rq) == NULL) {
+ do {
+ rq = blk_dev[hwif->major].current_request;
+ if (rq != NULL && rq->rq_status != RQ_INACTIVE)
+ goto got_rq;
+ } while ((hwif = hwif->next) != hwgroup->hwif);
+ return; /* no work left for this hwgroup */
+ }
+ got_rq:
+ do_request(hwgroup->hwif = hwif, hwgroup->rq = rq);
+ cli();
+ } while (hwgroup->handler == NULL);
+}
+
+/*
+ * do_hwgroup_request() invokes ide_do_request() after first masking
+ * all possible interrupts for the current hwgroup. This prevents race
+ * conditions in the event that an unexpected interrupt occurs while
+ * we are in the driver.
+ *
+ * Note that when an interrupt is used to reenter the driver, the first level
+ * handler will already have masked the irq that triggered, but any other ones
+ * for the hwgroup will still be unmasked. The driver tries to be careful
+ * about such things.
+ */
+static void do_hwgroup_request (ide_hwgroup_t *hwgroup)
+{
+ if (hwgroup->handler == NULL) {
+ ide_hwif_t *hgif = hwgroup->hwif;
+ ide_hwif_t *hwif = hgif;
+ do {
+ disable_irq(hwif->irq);
+ } while ((hwif = hwif->next) != hgif);
+ ide_do_request (hwgroup);
+ do {
+ enable_irq(hwif->irq);
+ } while ((hwif = hwif->next) != hgif);
+ }
+}
+
+static void do_ide0_request (void) /* invoked with cli() */
+{
+ do_hwgroup_request (ide_hwifs[0].hwgroup);
+}
+
+static void do_ide1_request (void) /* invoked with cli() */
+{
+ do_hwgroup_request (ide_hwifs[1].hwgroup);
+}
+
+static void do_ide2_request (void) /* invoked with cli() */
+{
+ do_hwgroup_request (ide_hwifs[2].hwgroup);
+}
+
+static void do_ide3_request (void) /* invoked with cli() */
+{
+ do_hwgroup_request (ide_hwifs[3].hwgroup);
+}
+
+static void timer_expiry (unsigned long data)
+{
+ ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data;
+ ide_drive_t *drive = hwgroup->drive;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ if (hwgroup->poll_timeout != 0) { /* polling in progress? */
+ ide_handler_t *handler = hwgroup->handler;
+ hwgroup->handler = NULL;
+ handler(drive);
+ } else if (hwgroup->handler == NULL) { /* not waiting for anything? */
+ sti(); /* drive must have responded just as the timer expired */
+ printk("%s: marginal timeout\n", drive->name);
+ } else {
+ hwgroup->handler = NULL; /* abort the operation */
+ if (hwgroup->hwif->dmaproc)
+ (void) hwgroup->hwif->dmaproc (ide_dma_abort, drive);
+ ide_error(drive, "irq timeout", GET_STAT());
+ }
+ if (hwgroup->handler == NULL)
+ do_hwgroup_request (hwgroup);
+ restore_flags(flags);
+}
+
+/*
+ * There's nothing really useful we can do with an unexpected interrupt,
+ * other than reading the status register (to clear it), and logging it.
+ * There should be no way that an irq can happen before we're ready for it,
+ * so we needn't worry much about losing an "important" interrupt here.
+ *
+ * On laptops (and "green" PCs), an unexpected interrupt occurs whenever the
+ * drive enters "idle", "standby", or "sleep" mode, so if the status looks
+ * "good", we just ignore the interrupt completely.
+ *
+ * This routine assumes cli() is in effect when called.
+ *
+ * If an unexpected interrupt happens on irq15 while we are handling irq14
+ * and if the two interfaces are "serialized" (CMD640B), then it looks like
+ * we could screw up by interfering with a new request being set up for irq15.
+ *
+ * In reality, this is a non-issue. The new command is not sent unless the
+ * drive is ready to accept one, in which case we know the drive is not
+ * trying to interrupt us. And ide_set_handler() is always invoked before
+ * completing the issuance of any new drive command, so we will not be
+ * accidently invoked as a result of any valid command completion interrupt.
+ *
+ */
+static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
+{
+ byte stat;
+ unsigned int unit;
+ ide_hwif_t *hwif = hwgroup->hwif;
+
+ /*
+ * handle the unexpected interrupt
+ */
+ do {
+ if (hwif->irq == irq) {
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ if (!drive->present)
+ continue;
+#ifdef CONFIG_BLK_DEV_HT6560B
+ if (hwif->selectproc)
+ hwif->selectproc (drive);
+#endif /* CONFIG_BLK_DEV_HT6560B */
+ if (!OK_STAT(stat=GET_STAT(), drive->ready_stat, BAD_STAT))
+ (void) ide_dump_status(drive, "unexpected_intr", stat);
+ if ((stat & DRQ_STAT))
+ try_to_flush_leftover_data(drive);
+ }
+ }
+ } while ((hwif = hwif->next) != hwgroup->hwif);
+#ifdef CONFIG_BLK_DEV_HT6560B
+ if (hwif->selectproc)
+ hwif->selectproc (hwgroup->drive);
+#endif /* CONFIG_BLK_DEV_HT6560B */
+}
+
+/*
+ * entry point for all interrupts, caller does cli() for us
+ */
+static void ide_intr (int irq, struct pt_regs *regs)
+{
+ ide_hwgroup_t *hwgroup = irq_to_hwgroup[irq];
+ ide_handler_t *handler;
+
+ if (irq == hwgroup->hwif->irq && (handler = hwgroup->handler) != NULL) {
+ ide_drive_t *drive = hwgroup->drive;
+ hwgroup->handler = NULL;
+ del_timer(&(hwgroup->timer));
+ if (drive->unmask)
+ sti();
+ handler(drive);
+ cli(); /* this is necessary, as next rq may be different irq */
+ if (hwgroup->handler == NULL) {
+ SET_RECOVERY_TIMER(HWIF(drive));
+ ide_do_request(hwgroup);
+ }
+ } else {
+ unexpected_intr(irq, hwgroup);
+ }
+ cli();
+}
+
+/*
+ * get_info_ptr() returns the (ide_drive_t *) for a given device number.
+ * It returns NULL if the given device number does not match any present drives.
+ */
+static ide_drive_t *get_info_ptr (kdev_t i_rdev)
+{
+ int major = MAJOR(i_rdev);
+ unsigned int h;
+
+ for (h = 0; h < MAX_HWIFS; ++h) {
+ ide_hwif_t *hwif = &ide_hwifs[h];
+ if (hwif->present && major == hwif->major) {
+ unsigned unit = DEVICE_NR(i_rdev);
+ if (unit < MAX_DRIVES) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ if (drive->present)
+ return drive;
+ } else if (major == IDE0_MAJOR && unit < 4) {
+ printk("ide: probable bad entry for /dev/hd%c\n", 'a'+unit);
+ printk("ide: to fix it, run: /usr/src/linux/drivers/block/MAKEDEV.ide\n");
+ }
+ break;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * This function is intended to be used prior to invoking ide_do_drive_cmd().
+ */
+void ide_init_drive_cmd (struct request *rq)
+{
+ rq->buffer = NULL;
+ rq->cmd = IDE_DRIVE_CMD;
+ rq->sector = 0;
+ rq->nr_sectors = 0;
+ rq->current_nr_sectors = 0;
+ rq->sem = NULL;
+ rq->bh = NULL;
+ rq->bhtail = NULL;
+ rq->next = NULL;
+
+#if 0 /* these are done each time through ide_do_drive_cmd() */
+ rq->errors = 0;
+ rq->rq_status = RQ_ACTIVE;
+ rq->rq_dev = ????;
+#endif
+}
+
+/*
+ * This function issues a special IDE device request
+ * onto the request queue.
+ *
+ * If action is ide_wait, then then rq is queued at the end of
+ * the request queue, and the function sleeps until it has been
+ * processed. This is for use when invoked from an ioctl handler.
+ *
+ * If action is ide_preempt, then the rq is queued at the head of
+ * the request queue, displacing the currently-being-processed
+ * request and this function returns immediately without waiting
+ * for the new rq to be completed. This is VERY DANGEROUS, and is
+ * intended for careful use by the ATAPI tape/cdrom driver code.
+ *
+ * If action is ide_next, then the rq is queued immediately after
+ * the currently-being-processed-request (if any), and the function
+ * returns without waiting for the new rq to be completed. As above,
+ * This is VERY DANGEROUS, and is intended for careful use by the
+ * ATAPI tape/cdrom driver code.
+ *
+ * If action is ide_end, then the rq is queued at the end of the
+ * request queue, and the function returns immediately without waiting
+ * for the new rq to be completed. This is again intended for careful
+ * use by the ATAPI tape/cdrom driver code. (Currently used by ide-tape.c,
+ * when operating in the pipelined operation mode).
+ */
+int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action)
+{
+ unsigned long flags;
+ unsigned int major = HWIF(drive)->major;
+ struct request *cur_rq;
+ struct blk_dev_struct *bdev = &blk_dev[major];
+ struct semaphore sem = MUTEX_LOCKED;
+
+ rq->errors = 0;
+ rq->rq_status = RQ_ACTIVE;
+ rq->rq_dev = MKDEV(major,(drive->select.b.unit)<<PARTN_BITS);
+ if (action == ide_wait)
+ rq->sem = &sem;
+
+ save_flags(flags);
+ cli();
+ cur_rq = bdev->current_request;
+
+ if (cur_rq == NULL || action == ide_preempt) {
+ rq->next = cur_rq;
+ bdev->current_request = rq;
+ if (action == ide_preempt) {
+ HWGROUP(drive)->rq = NULL;
+ } else
+ if (HWGROUP(drive)->rq == NULL) { /* is this necessary (?) */
+ bdev->request_fn();
+ cli();
+ }
+ } else {
+ if (action == ide_wait || action == ide_end) {
+ while (cur_rq->next != NULL) /* find end of list */
+ cur_rq = cur_rq->next;
+ }
+ rq->next = cur_rq->next;
+ cur_rq->next = rq;
+ }
+ if (action == ide_wait && rq->rq_status != RQ_INACTIVE)
+ down(&sem); /* wait for it to be serviced */
+ restore_flags(flags);
+ return rq->errors ? -EIO : 0; /* return -EIO if errors */
+}
+
+static int ide_open(struct inode * inode, struct file * filp)
+{
+ ide_drive_t *drive;
+ unsigned long flags;
+
+ if ((drive = get_info_ptr(inode->i_rdev)) == NULL)
+ return -ENODEV;
+ save_flags(flags);
+ cli();
+ while (drive->busy)
+ sleep_on(&drive->wqueue);
+ drive->usage++;
+ restore_flags(flags);
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->media == ide_cdrom)
+ return ide_cdrom_open (inode, filp, drive);
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape)
+ return idetape_blkdev_open (inode, filp, drive);
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ if (drive->removeable) {
+ byte door_lock[] = {WIN_DOORLOCK,0,0,0};
+ struct request rq;
+ check_disk_change(inode->i_rdev);
+ ide_init_drive_cmd (&rq);
+ rq.buffer = door_lock;
+ /*
+ * Ignore the return code from door_lock,
+ * since the open() has already succeeded,
+ * and the door_lock is irrelevant at this point.
+ */
+ (void) ide_do_drive_cmd(drive, &rq, ide_wait);
+ }
+ return 0;
+}
+
+/*
+ * Releasing a block device means we sync() it, so that it can safely
+ * be forgotten about...
+ */
+static void ide_release(struct inode * inode, struct file * file)
+{
+ ide_drive_t *drive;
+
+ if ((drive = get_info_ptr(inode->i_rdev)) != NULL) {
+ sync_dev(inode->i_rdev);
+ drive->usage--;
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->media == ide_cdrom) {
+ ide_cdrom_release (inode, file, drive);
+ return;
+ }
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape) {
+ idetape_blkdev_release (inode, file, drive);
+ return;
+ }
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ if (drive->removeable) {
+ byte door_unlock[] = {WIN_DOORUNLOCK,0,0,0};
+ struct request rq;
+ invalidate_buffers(inode->i_rdev);
+ ide_init_drive_cmd (&rq);
+ rq.buffer = door_unlock;
+ (void) ide_do_drive_cmd(drive, &rq, ide_wait);
+ }
+ }
+}
+
+/*
+ * This routine is called to flush all partitions and partition tables
+ * for a changed disk, and then re-read the new partition table.
+ * If we are revalidating a disk because of a media change, then we
+ * enter with usage == 0. If we are using an ioctl, we automatically have
+ * usage == 1 (we need an open channel to use an ioctl :-), so this
+ * is our limit.
+ */
+static int revalidate_disk(kdev_t i_rdev)
+{
+ ide_drive_t *drive;
+ unsigned int p, major, minor;
+ long flags;
+
+ if ((drive = get_info_ptr(i_rdev)) == NULL)
+ return -ENODEV;
+
+ major = MAJOR(i_rdev);
+ minor = drive->select.b.unit << PARTN_BITS;
+ save_flags(flags);
+ cli();
+ if (drive->busy || (drive->usage > 1)) {
+ restore_flags(flags);
+ return -EBUSY;
+ };
+ drive->busy = 1;
+ restore_flags(flags);
+
+ for (p = 0; p < (1<<PARTN_BITS); ++p) {
+ if (drive->part[p].nr_sects > 0) {
+ kdev_t devp = MKDEV(major, minor+p);
+ sync_dev (devp);
+ invalidate_inodes (devp);
+ invalidate_buffers (devp);
+ }
+ drive->part[p].start_sect = 0;
+ drive->part[p].nr_sects = 0;
+ };
+
+ drive->part[0].nr_sects = current_capacity(drive);
+ if (drive->media == ide_disk)
+ resetup_one_dev(HWIF(drive)->gd, drive->select.b.unit);
+
+ drive->busy = 0;
+ wake_up(&drive->wqueue);
+ return 0;
+}
+
+static int write_fs_long (unsigned long useraddr, long value)
+{
+ int err;
+
+ if (NULL == (long *)useraddr)
+ return -EINVAL;
+ if ((err = verify_area(VERIFY_WRITE, (long *)useraddr, sizeof(long))))
+ return err;
+ put_user((unsigned)value, (long *) useraddr);
+ return 0;
+}
+
+static int ide_ioctl (struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct hd_geometry *loc = (struct hd_geometry *) arg;
+ int err;
+ ide_drive_t *drive;
+ unsigned long flags;
+ struct request rq;
+
+ ide_init_drive_cmd (&rq);
+ if (!inode || !(inode->i_rdev))
+ return -EINVAL;
+ if ((drive = get_info_ptr(inode->i_rdev)) == NULL)
+ return -ENODEV;
+ switch (cmd) {
+ case HDIO_GETGEO:
+ if (!loc || drive->media != ide_disk) return -EINVAL;
+ err = verify_area(VERIFY_WRITE, loc, sizeof(*loc));
+ if (err) return err;
+ put_user(drive->bios_head, (byte *) &loc->heads);
+ put_user(drive->bios_sect, (byte *) &loc->sectors);
+ put_user(drive->bios_cyl, (unsigned short *) &loc->cylinders);
+ put_user((unsigned)drive->part[MINOR(inode->i_rdev)&PARTN_MASK].start_sect,
+ (unsigned long *) &loc->start);
+ return 0;
+
+ case BLKFLSBUF:
+ if(!suser()) return -EACCES;
+ fsync_dev(inode->i_rdev);
+ invalidate_buffers(inode->i_rdev);
+ return 0;
+
+ case BLKRASET:
+ if(!suser()) return -EACCES;
+ if(arg > 0xff) return -EINVAL;
+ read_ahead[MAJOR(inode->i_rdev)] = arg;
+ return 0;
+
+ case BLKRAGET:
+ return write_fs_long(arg, read_ahead[MAJOR(inode->i_rdev)]);
+
+ case BLKGETSIZE: /* Return device size */
+ return write_fs_long(arg, drive->part[MINOR(inode->i_rdev)&PARTN_MASK].nr_sects);
+ case BLKRRPART: /* Re-read partition tables */
+ return revalidate_disk(inode->i_rdev);
+
+ case HDIO_GET_KEEPSETTINGS:
+ return write_fs_long(arg, drive->keep_settings);
+
+ case HDIO_GET_UNMASKINTR:
+ return write_fs_long(arg, drive->unmask);
+
+ case HDIO_GET_DMA:
+ return write_fs_long(arg, drive->using_dma);
+
+ case HDIO_GET_32BIT:
+ return write_fs_long(arg, drive->io_32bit);
+
+ case HDIO_GET_MULTCOUNT:
+ return write_fs_long(arg, drive->mult_count);
+
+ case HDIO_GET_IDENTITY:
+ if (!arg || (MINOR(inode->i_rdev) & PARTN_MASK))
+ return -EINVAL;
+ if (drive->id == NULL)
+ return -ENOMSG;
+ err = verify_area(VERIFY_WRITE, (char *)arg, sizeof(*drive->id));
+ if (!err)
+ memcpy_tofs((char *)arg, (char *)drive->id, sizeof(*drive->id));
+ return err;
+
+ case HDIO_GET_NOWERR:
+ return write_fs_long(arg, drive->bad_wstat == BAD_R_STAT);
+
+ case HDIO_SET_DMA:
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->media == ide_cdrom)
+ return -EPERM;
+#endif /* CONFIG_BLK_DEV_IDECD */
+ if (!drive->id || !(drive->id->capability & 1) || !HWIF(drive)->dmaproc)
+ return -EPERM;
+ case HDIO_SET_KEEPSETTINGS:
+ case HDIO_SET_UNMASKINTR:
+ case HDIO_SET_NOWERR:
+ if (arg > 1)
+ return -EINVAL;
+ case HDIO_SET_32BIT:
+ if (!suser())
+ return -EACCES;
+ if ((MINOR(inode->i_rdev) & PARTN_MASK))
+ return -EINVAL;
+ save_flags(flags);
+ cli();
+ switch (cmd) {
+ case HDIO_SET_DMA:
+ if (!(HWIF(drive)->dmaproc)) {
+ restore_flags(flags);
+ return -EPERM;
+ }
+ drive->using_dma = arg;
+ break;
+ case HDIO_SET_KEEPSETTINGS:
+ drive->keep_settings = arg;
+ break;
+ case HDIO_SET_UNMASKINTR:
+ if (arg && HWIF(drive)->no_unmask) {
+ restore_flags(flags);
+ return -EPERM;
+ }
+ drive->unmask = arg;
+ break;
+ case HDIO_SET_NOWERR:
+ drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT;
+ break;
+ case HDIO_SET_32BIT:
+ if (arg > (1 + (SUPPORT_VLB_SYNC<<1)))
+ return -EINVAL;
+ drive->io_32bit = arg;
+#ifdef CONFIG_BLK_DEV_DTC2278
+ if (HWIF(drive)->chipset == ide_dtc2278)
+ HWIF(drive)->drives[!drive->select.b.unit].io_32bit = arg;
+#endif /* CONFIG_BLK_DEV_DTC2278 */
+ break;
+ }
+ restore_flags(flags);
+ return 0;
+
+ case HDIO_SET_MULTCOUNT:
+ if (!suser())
+ return -EACCES;
+ if (MINOR(inode->i_rdev) & PARTN_MASK)
+ return -EINVAL;
+ if (drive->id && arg > drive->id->max_multsect)
+ return -EINVAL;
+ save_flags(flags);
+ cli();
+ if (drive->special.b.set_multmode) {
+ restore_flags(flags);
+ return -EBUSY;
+ }
+ drive->mult_req = arg;
+ drive->special.b.set_multmode = 1;
+ restore_flags(flags);
+ (void) ide_do_drive_cmd (drive, &rq, ide_wait);
+ return (drive->mult_count == arg) ? 0 : -EIO;
+
+ case HDIO_DRIVE_CMD:
+ {
+ unsigned long args;
+
+ if (NULL == (long *) arg)
+ err = ide_do_drive_cmd(drive, &rq, ide_wait);
+ else {
+ if (!(err = verify_area(VERIFY_READ,(long *)arg,sizeof(long))))
+ {
+ args = get_user((long *)arg);
+ if (!(err = verify_area(VERIFY_WRITE,(long *)arg,sizeof(long)))) {
+ rq.buffer = (char *) &args;
+ err = ide_do_drive_cmd(drive, &rq, ide_wait);
+ put_user(args,(long *)arg);
+ }
+ }
+ }
+ return err;
+ }
+ case HDIO_SET_PIO_MODE:
+ if (!suser())
+ return -EACCES;
+ if (MINOR(inode->i_rdev) & PARTN_MASK)
+ return -EINVAL;
+ if (!HWIF(drive)->tuneproc)
+ return -ENOSYS;
+ save_flags(flags);
+ cli();
+ drive->pio_req = (int) arg;
+ drive->special.b.set_pio = 1;
+ restore_flags(flags);
+ return 0;
+
+ RO_IOCTLS(inode->i_rdev, arg);
+
+ default:
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->media == ide_cdrom)
+ return ide_cdrom_ioctl(drive, inode, file, cmd, arg);
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape)
+ return idetape_blkdev_ioctl(drive, inode, file, cmd, arg);
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ return -EPERM;
+ }
+}
+
+static int ide_check_media_change (kdev_t i_rdev)
+{
+ ide_drive_t *drive;
+
+ if ((drive = get_info_ptr(i_rdev)) == NULL)
+ return -ENODEV;
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->media == ide_cdrom)
+ return ide_cdrom_check_media_change (drive);
+#endif /* CONFIG_BLK_DEV_IDECD */
+ if (drive->removeable) /* for disks */
+ return 1; /* always assume it was changed */
+ return 0;
+}
+
+void ide_fixstring (byte *s, const int bytecount, const int byteswap)
+{
+ byte *p = s, *end = &s[bytecount & ~1]; /* bytecount must be even */
+
+ if (byteswap) {
+ /* convert from big-endian to host byte order */
+ for (p = end ; p != s;) {
+ unsigned short *pp = (unsigned short *) (p -= 2);
+ *pp = ntohs(*pp);
+ }
+ }
+
+ /* strip leading blanks */
+ while (s != end && *s == ' ')
+ ++s;
+
+ /* compress internal blanks and strip trailing blanks */
+ while (s != end && *s) {
+ if (*s++ != ' ' || (s != end && *s && *s != ' '))
+ *p++ = *(s-1);
+ }
+
+ /* wipe out trailing garbage */
+ while (p != end)
+ *p++ = '\0';
+}
+
+static inline void do_identify (ide_drive_t *drive, byte cmd)
+{
+ int bswap;
+ struct hd_driveid *id;
+ unsigned long capacity, check;
+
+ id = drive->id = kmalloc (SECTOR_WORDS*4, GFP_KERNEL);
+ ide_input_data(drive, id, SECTOR_WORDS); /* read 512 bytes of id info */
+ sti();
+
+ /*
+ * EATA SCSI controllers do a hardware ATA emulation: ignore them
+ */
+ if ((id->model[0] == 'P' && id->model[1] == 'M')
+ || (id->model[0] == 'S' && id->model[1] == 'K')) {
+ printk("%s: EATA SCSI HBA %.10s\n", drive->name, id->model);
+ drive->present = 0;
+ return;
+ }
+
+ /*
+ * WIN_IDENTIFY returns little-endian info,
+ * WIN_PIDENTIFY *usually* returns little-endian info.
+ */
+ bswap = 1;
+ if (cmd == WIN_PIDENTIFY) {
+ if ((id->model[0] == 'N' && id->model[1] == 'E') /* NEC */
+ || (id->model[0] == 'F' && id->model[1] == 'X') /* Mitsumi */
+ || (id->model[0] == 'P' && id->model[1] == 'i'))/* Pioneer */
+ bswap = 0; /* Vertos drives may still be weird */
+ }
+ ide_fixstring (id->model, sizeof(id->model), bswap);
+ ide_fixstring (id->fw_rev, sizeof(id->fw_rev), bswap);
+ ide_fixstring (id->serial_no, sizeof(id->serial_no), bswap);
+
+ /*
+ * Check for an ATAPI device
+ */
+
+ if (cmd == WIN_PIDENTIFY) {
+ byte type = (id->config >> 8) & 0x1f;
+ printk("%s: %s, ATAPI ", drive->name, id->model);
+ switch (type) {
+ case 0: /* Early cdrom models used zero */
+ case 5:
+#ifdef CONFIG_BLK_DEV_IDECD
+ printk ("CDROM drive\n");
+ drive->media = ide_cdrom;
+ drive->present = 1;
+ drive->removeable = 1;
+ return;
+#else
+ printk ("CDROM ");
+ break;
+#endif /* CONFIG_BLK_DEV_IDECD */
+ case 1:
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ printk ("TAPE drive");
+ if (idetape_identify_device (drive,id)) {
+ drive->media = ide_tape;
+ drive->present = 1;
+ drive->removeable = 1;
+ if (HWIF(drive)->dmaproc != NULL &&
+ !HWIF(drive)->dmaproc(ide_dma_check, drive))
+ printk(", DMA");
+ printk("\n");
+ }
+ else {
+ drive->present = 0;
+ printk ("\nide-tape: the tape is not supported by this version of the driver\n");
+ }
+ return;
+#else
+ printk ("TAPE ");
+ break;
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ default:
+ drive->present = 0;
+ printk("Type %d - Unknown device\n", type);
+ return;
+ }
+ drive->present = 0;
+ printk("- not supported by this kernel\n");
+ return;
+ }
+
+ /* check for removeable disks (eg. SYQUEST), ignore 'WD' drives */
+ if (id->config & (1<<7)) { /* removeable disk ? */
+ if (id->model[0] != 'W' || id->model[1] != 'D')
+ drive->removeable = 1;
+ }
+
+ drive->media = ide_disk;
+ /* Extract geometry if we did not already have one for the drive */
+ if (!drive->present) {
+ drive->present = 1;
+ drive->cyl = drive->bios_cyl = id->cyls;
+ drive->head = drive->bios_head = id->heads;
+ drive->sect = drive->bios_sect = id->sectors;
+ }
+ /* Handle logical geometry translation by the drive */
+ if ((id->field_valid & 1) && id->cur_cyls && id->cur_heads
+ && (id->cur_heads <= 16) && id->cur_sectors)
+ {
+ /*
+ * Extract the physical drive geometry for our use.
+ * Note that we purposely do *not* update the bios info.
+ * This way, programs that use it (like fdisk) will
+ * still have the same logical view as the BIOS does,
+ * which keeps the partition table from being screwed.
+ *
+ * An exception to this is the cylinder count,
+ * which we reexamine later on to correct for 1024 limitations.
+ */
+ drive->cyl = id->cur_cyls;
+ drive->head = id->cur_heads;
+ drive->sect = id->cur_sectors;
+
+ /* check for word-swapped "capacity" field in id information */
+ capacity = drive->cyl * drive->head * drive->sect;
+ check = (id->cur_capacity0 << 16) | id->cur_capacity1;
+ if (check == capacity) { /* was it swapped? */
+ /* yes, bring it into little-endian order: */
+ id->cur_capacity0 = (capacity >> 0) & 0xffff;
+ id->cur_capacity1 = (capacity >> 16) & 0xffff;
+ }
+ }
+ /* Use physical geometry if what we have still makes no sense */
+ if ((!drive->head || drive->head > 16) && id->heads && id->heads <= 16) {
+ drive->cyl = id->cyls;
+ drive->head = id->heads;
+ drive->sect = id->sectors;
+ }
+ /* Correct the number of cyls if the bios value is too small */
+ if (drive->sect == drive->bios_sect && drive->head == drive->bios_head) {
+ if (drive->cyl > drive->bios_cyl)
+ drive->bios_cyl = drive->cyl;
+ }
+
+ (void) current_capacity (drive); /* initialize LBA selection */
+
+ printk ("%s: %.40s, %ldMB w/%dKB Cache, %sCHS=%d/%d/%d",
+ drive->name, id->model, current_capacity(drive)/2048L, id->buf_size/2,
+ drive->select.b.lba ? "LBA, " : "",
+ drive->bios_cyl, drive->bios_head, drive->bios_sect);
+
+ drive->mult_count = 0;
+ if (id->max_multsect) {
+ drive->mult_req = INITIAL_MULT_COUNT;
+ if (drive->mult_req > id->max_multsect)
+ drive->mult_req = id->max_multsect;
+ if (drive->mult_req || ((id->multsect_valid & 1) && id->multsect))
+ drive->special.b.set_multmode = 1;
+ }
+ if (HWIF(drive)->dmaproc != NULL) { /* hwif supports DMA? */
+ if (!(HWIF(drive)->dmaproc(ide_dma_check, drive)))
+ printk(", DMA");
+ }
+ printk("\n");
+}
+
+/*
+ * Delay for *at least* 10ms. As we don't know how much time is left
+ * until the next tick occurs, we wait an extra tick to be safe.
+ * This is used only during the probing/polling for drives at boot time.
+ */
+static void delay_10ms (void)
+{
+ unsigned long timer = jiffies + (HZ + 99)/100 + 1;
+ while (timer > jiffies);
+}
+
+/*
+ * try_to_identify() sends an ATA(PI) IDENTIFY request to a drive
+ * and waits for a response. It also monitors irqs while this is
+ * happening, in hope of automatically determining which one is
+ * being used by the interface.
+ *
+ * Returns: 0 device was identified
+ * 1 device timed-out (no response to identify request)
+ * 2 device aborted the command (refused to identify itself)
+ */
+static int try_to_identify (ide_drive_t *drive, byte cmd)
+{
+ int hd_status, rc;
+ unsigned long timeout;
+ int irqs = 0;
+
+ if (!HWIF(drive)->irq) { /* already got an IRQ? */
+ probe_irq_off(probe_irq_on()); /* clear dangling irqs */
+ irqs = probe_irq_on(); /* start monitoring irqs */
+ OUT_BYTE(drive->ctl,IDE_CONTROL_REG); /* enable device irq */
+ }
+
+ delay_10ms(); /* take a deep breath */
+ if ((IN_BYTE(IDE_ALTSTATUS_REG) ^ IN_BYTE(IDE_STATUS_REG)) & ~INDEX_STAT) {
+ printk("%s: probing with STATUS instead of ALTSTATUS\n", drive->name);
+ hd_status = IDE_STATUS_REG; /* ancient Seagate drives */
+ } else
+ hd_status = IDE_ALTSTATUS_REG; /* use non-intrusive polling */
+
+ OUT_BYTE(cmd,IDE_COMMAND_REG); /* ask drive for ID */
+ timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
+ timeout += jiffies;
+ do {
+ if (jiffies > timeout) {
+ if (!HWIF(drive)->irq)
+ (void) probe_irq_off(irqs);
+ return 1; /* drive timed-out */
+ }
+ delay_10ms(); /* give drive a breather */
+ } while (IN_BYTE(hd_status) & BUSY_STAT);
+
+ delay_10ms(); /* wait for IRQ and DRQ_STAT */
+ if (OK_STAT(GET_STAT(),DRQ_STAT,BAD_R_STAT)) {
+ cli(); /* some systems need this */
+ do_identify(drive, cmd); /* drive returned ID */
+ if (drive->present && drive->media != ide_tape) {
+ ide_tuneproc_t *tuneproc = HWIF(drive)->tuneproc;
+ if (tuneproc != NULL && drive->autotune == 1)
+ tuneproc(drive, 255); /* auto-tune PIO mode */
+ }
+ rc = 0; /* drive responded with ID */
+ } else
+ rc = 2; /* drive refused ID */
+ if (!HWIF(drive)->irq) {
+ irqs = probe_irq_off(irqs); /* get irq number */
+ if (irqs > 0)
+ HWIF(drive)->irq = irqs;
+ else /* Mmmm.. multiple IRQs */
+ printk("%s: IRQ probe failed (%d)\n", drive->name, irqs);
+ }
+ return rc;
+}
+
+/*
+ * do_probe() has the difficult job of finding a drive if it exists,
+ * without getting hung up if it doesn't exist, without trampling on
+ * ethernet cards, and without leaving any IRQs dangling to haunt us later.
+ *
+ * If a drive is "known" to exist (from CMOS or kernel parameters),
+ * but does not respond right away, the probe will "hang in there"
+ * for the maximum wait time (about 30 seconds), otherwise it will
+ * exit much more quickly.
+ *
+ * Returns: 0 device was identified
+ * 1 device timed-out (no response to identify request)
+ * 2 device aborted the command (refused to identify itself)
+ * 3 bad status from device (possible for ATAPI drives)
+ * 4 probe was not attempted because failure was obvious
+ */
+static int do_probe (ide_drive_t *drive, byte cmd)
+{
+ int rc;
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ if (drive->present) { /* avoid waiting for inappropriate probes */
+ if ((drive->media != ide_disk) && (cmd == WIN_IDENTIFY))
+ return 4;
+ }
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+#ifdef DEBUG
+ printk("probing for %s: present=%d, media=%d, probetype=%s\n",
+ drive->name, drive->present, drive->media,
+ (cmd == WIN_IDENTIFY) ? "ATA" : "ATAPI");
+#endif
+#ifdef CONFIG_BLK_DEV_HT6560B
+ if (HWIF(drive)->selectproc)
+ HWIF(drive)->selectproc (drive);
+#endif /* CONFIG_BLK_DEV_HT6560B */
+ OUT_BYTE(drive->select.all,IDE_SELECT_REG); /* select target drive */
+ delay_10ms(); /* wait for BUSY_STAT */
+ if (IN_BYTE(IDE_SELECT_REG) != drive->select.all && !drive->present) {
+ OUT_BYTE(0xa0,IDE_SELECT_REG); /* exit with drive0 selected */
+ return 3; /* no i/f present: avoid killing ethernet cards */
+ }
+
+ if (OK_STAT(GET_STAT(),READY_STAT,BUSY_STAT)
+ || drive->present || cmd == WIN_PIDENTIFY)
+ {
+ if ((rc = try_to_identify(drive,cmd))) /* send cmd and wait */
+ rc = try_to_identify(drive,cmd); /* failed: try again */
+ if (rc == 1)
+ printk("%s: no response (status = 0x%02x)\n", drive->name, GET_STAT());
+ (void) GET_STAT(); /* ensure drive irq is clear */
+ } else {
+ rc = 3; /* not present or maybe ATAPI */
+ }
+ if (drive->select.b.unit != 0) {
+ OUT_BYTE(0xa0,IDE_SELECT_REG); /* exit with drive0 selected */
+ delay_10ms();
+ (void) GET_STAT(); /* ensure drive irq is clear */
+ }
+ return rc;
+}
+
+/*
+ * probe_for_drive() tests for existance of a given drive using do_probe().
+ *
+ * Returns: 0 no device was found
+ * 1 device was found (note: drive->present might still be 0)
+ */
+static inline byte probe_for_drive (ide_drive_t *drive)
+{
+ if (drive->noprobe) /* skip probing? */
+ return drive->present;
+ if (do_probe(drive, WIN_IDENTIFY) >= 2) { /* if !(success||timed-out) */
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ (void) do_probe(drive, WIN_PIDENTIFY); /* look for ATAPI device */
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+ }
+ if (!drive->present)
+ return 0; /* drive not found */
+ if (drive->id == NULL) { /* identification failed? */
+ if (drive->media == ide_disk) {
+ printk ("%s: non-IDE drive, CHS=%d/%d/%d\n",
+ drive->name, drive->cyl, drive->head, drive->sect);
+ }
+#ifdef CONFIG_BLK_DEV_IDECD
+ else if (drive->media == ide_cdrom) {
+ printk("%s: ATAPI cdrom (?)\n", drive->name);
+ }
+#endif /* CONFIG_BLK_DEV_IDECD */
+ else {
+ drive->present = 0; /* nuke it */
+ return 1; /* drive was found */
+ }
+ }
+ if (drive->media == ide_disk && !drive->select.b.lba) {
+ if (!drive->head || drive->head > 16) {
+ printk("%s: INVALID GEOMETRY: %d PHYSICAL HEADS?\n",
+ drive->name, drive->head);
+ drive->present = 0;
+ }
+ }
+ return 1; /* drive was found */
+}
+
+/*
+ * This routine only knows how to look for drive units 0 and 1
+ * on an interface, so any setting of MAX_DRIVES > 2 won't work here.
+ */
+static void probe_for_drives (ide_hwif_t *hwif)
+{
+ unsigned int unit;
+
+ if (check_region(hwif->io_base,8) || check_region(hwif->ctl_port,1)) {
+ int msgout = 0;
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ if (drive->present) {
+ drive->present = 0;
+ printk("%s: ERROR, PORTS ALREADY IN USE\n", drive->name);
+ msgout = 1;
+ }
+ }
+ if (!msgout)
+ printk("%s: ports already in use, skipping probe\n", hwif->name);
+ } else {
+ unsigned long flags;
+ save_flags(flags);
+
+#if (MAX_DRIVES > 2)
+ printk("%s: probing for first 2 of %d possible drives\n", hwif->name, MAX_DRIVES);
+#endif
+ sti(); /* needed for jiffies and irq probing */
+ /*
+ * Second drive should only exist if first drive was found,
+ * but a lot of cdrom drives seem to be configured as slave-only
+ */
+ for (unit = 0; unit < 2; ++unit) { /* note the hardcoded '2' */
+ ide_drive_t *drive = &hwif->drives[unit];
+ (void) probe_for_drive (drive);
+ }
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ if (drive->present) {
+ hwif->present = 1;
+ request_region(hwif->io_base, 8, hwif->name);
+ request_region(hwif->ctl_port, 1, hwif->name);
+ break;
+ }
+ }
+ restore_flags(flags);
+ }
+}
+
+/*
+ * stridx() returns the offset of c within s,
+ * or -1 if c is '\0' or not found within s.
+ */
+static int stridx (const char *s, char c)
+{
+ char *i = strchr(s, c);
+ return (i && c) ? i - s : -1;
+}
+
+/*
+ * match_parm() does parsing for ide_setup():
+ *
+ * 1. the first char of s must be '='.
+ * 2. if the remainder matches one of the supplied keywords,
+ * the index (1 based) of the keyword is negated and returned.
+ * 3. if the remainder is a series of no more than max_vals numbers
+ * separated by commas, the numbers are saved in vals[] and a
+ * count of how many were saved is returned. Base10 is assumed,
+ * and base16 is allowed when prefixed with "0x".
+ * 4. otherwise, zero is returned.
+ */
+static int match_parm (char *s, const char *keywords[], int vals[], int max_vals)
+{
+ static const char *decimal = "0123456789";
+ static const char *hex = "0123456789abcdef";
+ int i, n;
+
+ if (*s++ == '=') {
+ /*
+ * Try matching against the supplied keywords,
+ * and return -(index+1) if we match one
+ */
+ for (i = 0; *keywords != NULL; ++i) {
+ if (!strcmp(s, *keywords++))
+ return -(i+1);
+ }
+ /*
+ * Look for a series of no more than "max_vals"
+ * numeric values separated by commas, in base10,
+ * or base16 when prefixed with "0x".
+ * Return a count of how many were found.
+ */
+ for (n = 0; (i = stridx(decimal, *s)) >= 0;) {
+ vals[n] = i;
+ while ((i = stridx(decimal, *++s)) >= 0)
+ vals[n] = (vals[n] * 10) + i;
+ if (*s == 'x' && !vals[n]) {
+ while ((i = stridx(hex, *++s)) >= 0)
+ vals[n] = (vals[n] * 0x10) + i;
+ }
+ if (++n == max_vals)
+ break;
+ if (*s == ',')
+ ++s;
+ }
+ if (!*s)
+ return n;
+ }
+ return 0; /* zero = nothing matched */
+}
+
+/*
+ * ide_setup() gets called VERY EARLY during initialization,
+ * to handle kernel "command line" strings beginning with "hdx="
+ * or "ide". Here is the complete set currently supported:
+ *
+ * "hdx=" is recognized for all "x" from "a" to "h", such as "hdc".
+ * "idex=" is recognized for all "x" from "0" to "3", such as "ide1".
+ *
+ * "hdx=noprobe" : drive may be present, but do not probe for it
+ * "hdx=nowerr" : ignore the WRERR_STAT bit on this drive
+ * "hdx=cdrom" : drive is present, and is a cdrom drive
+ * "hdx=cyl,head,sect" : disk drive is present, with specified geometry
+ * "hdx=autotune" : driver will attempt to tune interface speed
+ * to the fastest PIO mode supported,
+ * if possible for this drive only.
+ * Not fully supported by all chipset types,
+ * and quite likely to cause trouble with
+ * older/odd IDE drives.
+ *
+ * "idex=noprobe" : do not attempt to access/use this interface
+ * "idex=base" : probe for an interface at the addr specified,
+ * where "base" is usually 0x1f0 or 0x170
+ * and "ctl" is assumed to be "base"+0x206
+ * "idex=base,ctl" : specify both base and ctl
+ * "idex=base,ctl,irq" : specify base, ctl, and irq number
+ * "idex=autotune" : driver will attempt to tune interface speed
+ * to the fastest PIO mode supported,
+ * for all drives on this interface.
+ * Not fully supported by all chipset types,
+ * and quite likely to cause trouble with
+ * older/odd IDE drives.
+ * "idex=noautotune" : driver will NOT attempt to tune interface speed
+ * This is the default for most chipsets,
+ * except the cmd640.
+ *
+ * The following two are valid ONLY on ide0,
+ * and the defaults for the base,ctl ports must not be altered.
+ *
+ * "ide0=serialize" : do not overlap operations on ide0 and ide1.
+ * "ide0=dtc2278" : probe/support DTC2278 interface
+ * "ide0=ht6560b" : probe/support HT6560B interface
+ * "ide0=cmd640_vlb" : *REQUIRED* for VLB cards with the CMD640 chip
+ * (not for PCI -- automatically detected)
+ * "ide0=qd6580" : probe/support qd6580 interface
+ * "ide0=ali14xx" : probe/support ali14xx chipsets (ALI M1439, M1443, M1445)
+ * "ide0=umc8672" : probe/support umc8672 chipsets
+ */
+void ide_setup (char *s)
+{
+ int i, vals[3];
+ ide_hwif_t *hwif;
+ ide_drive_t *drive;
+ unsigned int hw, unit;
+ const char max_drive = 'a' + ((MAX_HWIFS * MAX_DRIVES) - 1);
+ const char max_hwif = '0' + (MAX_HWIFS - 1);
+
+ printk("ide_setup: %s", s);
+ init_ide_data ();
+
+ /*
+ * Look for drive options: "hdx="
+ */
+ if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) {
+ const char *hd_words[] = {"noprobe", "nowerr", "cdrom", "serialize",
+ "autotune", "noautotune", NULL};
+ unit = s[2] - 'a';
+ hw = unit / MAX_DRIVES;
+ unit = unit % MAX_DRIVES;
+ hwif = &ide_hwifs[hw];
+ drive = &hwif->drives[unit];
+ switch (match_parm(&s[3], hd_words, vals, 3)) {
+ case -1: /* "noprobe" */
+ drive->noprobe = 1;
+ goto done;
+ case -2: /* "nowerr" */
+ drive->bad_wstat = BAD_R_STAT;
+ hwif->noprobe = 0;
+ goto done;
+ case -3: /* "cdrom" */
+ drive->present = 1;
+ drive->media = ide_cdrom;
+ hwif->noprobe = 0;
+ goto done;
+ case -4: /* "serialize" */
+ printk(" -- USE \"ide%c=serialize\" INSTEAD", '0'+hw);
+ goto do_serialize;
+ case -5: /* "autotune" */
+ drive->autotune = 1;
+ goto done;
+ case -6: /* "noautotune" */
+ drive->autotune = 2;
+ goto done;
+ case 3: /* cyl,head,sect */
+ drive->media = ide_disk;
+ drive->cyl = drive->bios_cyl = vals[0];
+ drive->head = drive->bios_head = vals[1];
+ drive->sect = drive->bios_sect = vals[2];
+ drive->present = 1;
+ drive->forced_geom = 1;
+ hwif->noprobe = 0;
+ goto done;
+ default:
+ goto bad_option;
+ }
+ }
+ /*
+ * Look for interface options: "idex="
+ */
+ if (s[0] == 'i' && s[1] == 'd' && s[2] == 'e' && s[3] >= '0' && s[3] <= max_hwif) {
+ /*
+ * Be VERY CAREFUL changing this: note hardcoded indexes below
+ */
+ const char *ide_words[] = {"noprobe", "serialize", "autotune", "noautotune",
+ "qd6580", "ht6560b", "cmd640_vlb", "dtc2278", "umc8672", "ali14xx", NULL};
+ hw = s[3] - '0';
+ hwif = &ide_hwifs[hw];
+ i = match_parm(&s[4], ide_words, vals, 3);
+
+ /*
+ * Cryptic check to ensure chipset not already set for hwif:
+ */
+ if (i != -1 && i != -2) {
+ if (hwif->chipset != ide_unknown)
+ goto bad_option;
+ if (i < 0 && ide_hwifs[1].chipset != ide_unknown)
+ goto bad_option;
+ }
+ /*
+ * Interface keywords work only for ide0:
+ */
+ if (i <= -6 && hw != 0)
+ goto bad_hwif;
+
+ switch (i) {
+#ifdef CONFIG_BLK_DEV_ALI14XX
+ case -10: /* "ali14xx" */
+ {
+ extern void init_ali14xx (void);
+ init_ali14xx();
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_ALI14XX */
+#ifdef CONFIG_BLK_DEV_UMC8672
+ case -9: /* "umc8672" */
+ {
+ extern void init_umc8672 (void);
+ init_umc8672();
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_UMC8672 */
+#ifdef CONFIG_BLK_DEV_DTC2278
+ case -8: /* "dtc2278" */
+ {
+ extern void init_dtc2278 (void);
+ init_dtc2278();
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_DTC2278 */
+#ifdef CONFIG_BLK_DEV_CMD640
+ case -7: /* "cmd640_vlb" */
+ {
+ extern int cmd640_vlb; /* flag for cmd640.c */
+ cmd640_vlb = 1;
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_CMD640 */
+#ifdef CONFIG_BLK_DEV_HT6560B
+ case -6: /* "ht6560b" */
+ {
+ extern void init_ht6560b (void);
+ init_ht6560b();
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_HT6560B */
+#if CONFIG_BLK_DEV_QD6580
+ case -5: /* "qd6580" (no secondary i/f) */
+ {
+ extern void init_qd6580 (void);
+ init_qd6580();
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_QD6580 */
+ case -4: /* "noautotune" */
+ hwif->drives[0].autotune = 2;
+ hwif->drives[1].autotune = 2;
+ goto done;
+ case -3: /* "autotune" */
+ hwif->drives[0].autotune = 1;
+ hwif->drives[1].autotune = 1;
+ goto done;
+ case -2: /* "serialize" */
+ do_serialize:
+ if (hw > 1) goto bad_hwif;
+ ide_hwifs[0].serialized = 1;
+ goto done;
+
+ case -1: /* "noprobe" */
+ hwif->noprobe = 1;
+ goto done;
+
+ case 1: /* base */
+ vals[1] = vals[0] + 0x206; /* default ctl */
+ case 2: /* base,ctl */
+ vals[2] = 0; /* default irq = probe for it */
+ case 3: /* base,ctl,irq */
+ hwif->io_base = vals[0];
+ hwif->ctl_port = vals[1];
+ hwif->irq = vals[2];
+ hwif->noprobe = 0;
+ hwif->chipset = ide_generic;
+ goto done;
+
+ case 0: goto bad_option;
+ default:
+ printk(" -- SUPPORT NOT CONFIGURED IN THIS KERNEL\n");
+ return;
+ }
+ }
+bad_option:
+ printk(" -- BAD OPTION\n");
+ return;
+bad_hwif:
+ printk("-- NOT SUPPORTED ON ide%d", hw);
+done:
+ printk("\n");
+}
+
+/*
+ * This routine is called from the partition-table code in genhd.c
+ * to "convert" a drive to a logical geometry with fewer than 1024 cyls.
+ *
+ * The second parameter, "xparm", determines exactly how the translation
+ * will be handled:
+ * 0 = convert to CHS with fewer than 1024 cyls
+ * using the same method as Ontrack DiskManager.
+ * 1 = same as "0", plus offset everything by 63 sectors.
+ * -1 = similar to "0", plus redirect sector 0 to sector 1.
+ * >1 = convert to a CHS geometry with "xparm" heads.
+ *
+ * Returns 0 if the translation was not possible, if the device was not
+ * an IDE disk drive, or if a geometry was "forced" on the commandline.
+ * Returns 1 if the geometry translation was successful.
+ */
+int ide_xlate_1024 (kdev_t i_rdev, int xparm, const char *msg)
+{
+ ide_drive_t *drive;
+ static const byte head_vals[] = {4, 8, 16, 32, 64, 128, 255, 0};
+ const byte *heads = head_vals;
+ unsigned long tracks;
+
+ if ((drive = get_info_ptr(i_rdev)) == NULL || drive->forced_geom)
+ return 0;
+
+ if (xparm > 1 && xparm <= drive->bios_head && drive->bios_sect == 63)
+ return 0; /* we already have a translation */
+
+ printk("%s ", msg);
+
+ if (drive->id) {
+ drive->cyl = drive->id->cyls;
+ drive->head = drive->id->heads;
+ drive->sect = drive->id->sectors;
+ }
+ drive->bios_cyl = drive->cyl;
+ drive->bios_head = drive->head;
+ drive->bios_sect = drive->sect;
+ drive->special.b.set_geometry = 1;
+
+ tracks = drive->bios_cyl * drive->bios_head * drive->bios_sect / 63;
+ drive->bios_sect = 63;
+ if (xparm > 1) {
+ drive->bios_head = xparm;
+ drive->bios_cyl = tracks / drive->bios_head;
+ } else {
+ while (drive->bios_cyl >= 1024) {
+ drive->bios_head = *heads;
+ drive->bios_cyl = tracks / drive->bios_head;
+ if (0 == *++heads)
+ break;
+ }
+#if FAKE_FDISK_FOR_EZDRIVE
+ if (xparm == -1) {
+ drive->remap_0_to_1 = 1;
+ msg = "0->1";
+ } else
+#endif /* FAKE_FDISK_FOR_EZDRIVE */
+ if (xparm == 1) {
+ drive->sect0 = 63;
+ drive->bios_cyl = (tracks - 1) / drive->bios_head;
+ msg = "+63";
+ }
+ printk("[remap %s] ", msg);
+ }
+ drive->part[0].nr_sects = current_capacity(drive);
+ printk("[%d/%d/%d]", drive->bios_cyl, drive->bios_head, drive->bios_sect);
+ return 1;
+}
+
+/*
+ * We query CMOS about hard disks : it could be that we have a SCSI/ESDI/etc
+ * controller that is BIOS compatible with ST-506, and thus showing up in our
+ * BIOS table, but not register compatible, and therefore not present in CMOS.
+ *
+ * Furthermore, we will assume that our ST-506 drives <if any> are the primary
+ * drives in the system -- the ones reflected as drive 1 or 2. The first
+ * drive is stored in the high nibble of CMOS byte 0x12, the second in the low
+ * nibble. This will be either a 4 bit drive type or 0xf indicating use byte
+ * 0x19 for an 8 bit type, drive 1, 0x1a for drive 2 in CMOS. A non-zero value
+ * means we have an AT controller hard disk for that drive.
+ *
+ * Of course, there is no guarantee that either drive is actually on the
+ * "primary" IDE interface, but we don't bother trying to sort that out here.
+ * If a drive is not actually on the primary interface, then these parameters
+ * will be ignored. This results in the user having to supply the logical
+ * drive geometry as a boot parameter for each drive not on the primary i/f.
+ *
+ * The only "perfect" way to handle this would be to modify the setup.[cS] code
+ * to do BIOS calls Int13h/Fn08h and Int13h/Fn48h to get all of the drive info
+ * for us during initialization. I have the necessary docs -- any takers? -ml
+ */
+
+static void probe_cmos_for_drives (ide_hwif_t *hwif)
+{
+#ifdef __i386__
+ extern struct drive_info_struct drive_info;
+ byte cmos_disks, *BIOS = (byte *) &drive_info;
+ int unit;
+
+ outb_p(0x12,0x70); /* specify CMOS address 0x12 */
+ cmos_disks = inb_p(0x71); /* read the data from 0x12 */
+ /* Extract drive geometry from CMOS+BIOS if not already setup */
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ if ((cmos_disks & (0xf0 >> (unit*4))) && !drive->present) {
+ drive->cyl = drive->bios_cyl = *(unsigned short *)BIOS;
+ drive->head = drive->bios_head = *(BIOS+2);
+ drive->sect = drive->bios_sect = *(BIOS+14);
+ drive->ctl = *(BIOS+8);
+ drive->present = 1;
+ }
+ BIOS += 16;
+ }
+#endif
+}
+
+/*
+ * This routine sets up the irq for an ide interface, and creates a new
+ * hwgroup for the irq/hwif if none was previously assigned.
+ *
+ * The SA_INTERRUPT in sa_flags means ide_intr() is always entered with
+ * interrupts completely disabled. This can be bad for interrupt latency,
+ * but anything else has led to problems on some machines. We re-enable
+ * interrupts as much as we can safely do in most places.
+ */
+static int init_irq (ide_hwif_t *hwif)
+{
+ unsigned long flags;
+ int irq = hwif->irq;
+ ide_hwgroup_t *hwgroup = irq_to_hwgroup[irq];
+
+ save_flags(flags);
+ cli();
+
+ /*
+ * Grab the irq if we don't already have it from a previous hwif
+ */
+ if (hwgroup == NULL) {
+ if (request_irq(irq, ide_intr, SA_INTERRUPT|SA_SAMPLE_RANDOM, hwif->name)) {
+ restore_flags(flags);
+ printk(" -- FAILED!");
+ return 1;
+ }
+ }
+ /*
+ * Check for serialization with ide1.
+ * This code depends on us having already taken care of ide1.
+ */
+ if (hwif->serialized && hwif->name[3] == '0' && ide_hwifs[1].present)
+ hwgroup = ide_hwifs[1].hwgroup;
+ /*
+ * If this is the first interface in a group,
+ * then we need to create the hwgroup structure
+ */
+ if (hwgroup == NULL) {
+ hwgroup = kmalloc (sizeof(ide_hwgroup_t), GFP_KERNEL);
+ hwgroup->hwif = hwif->next = hwif;
+ hwgroup->rq = NULL;
+ hwgroup->handler = NULL;
+ hwgroup->drive = &hwif->drives[0];
+ hwgroup->poll_timeout = 0;
+ init_timer(&hwgroup->timer);
+ hwgroup->timer.function = &timer_expiry;
+ hwgroup->timer.data = (unsigned long) hwgroup;
+ } else {
+ hwif->next = hwgroup->hwif->next;
+ hwgroup->hwif->next = hwif;
+ }
+ hwif->hwgroup = hwgroup;
+ irq_to_hwgroup[irq] = hwgroup;
+
+ restore_flags(flags); /* safe now that hwif->hwgroup is set up */
+
+ printk("%s at 0x%03x-0x%03x,0x%03x on irq %d", hwif->name,
+ hwif->io_base, hwif->io_base+7, hwif->ctl_port, irq);
+ if (hwgroup->hwif != hwif)
+ printk(" (serialized with %s)", hwgroup->hwif->name);
+ printk("\n");
+ return 0;
+}
+
+static struct file_operations ide_fops = {
+ NULL, /* lseek - default */
+ block_read, /* read - general block-dev read */
+ block_write, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* select */
+ ide_ioctl, /* ioctl */
+ NULL, /* mmap */
+ ide_open, /* open */
+ ide_release, /* release */
+ block_fsync /* fsync */
+ ,NULL, /* fasync */
+ ide_check_media_change, /* check_media_change */
+ revalidate_disk /* revalidate */
+};
+
+#ifdef CONFIG_PCI
+#if defined(CONFIG_BLK_DEV_RZ1000) || defined(CONFIG_BLK_DEV_TRITON)
+
+typedef void (ide_pci_init_proc_t)(byte, byte);
+
+/*
+ * ide_probe_pci() scans PCI for a specific vendor/device function,
+ * and invokes the supplied init routine for each instance detected.
+ */
+static void ide_probe_pci (unsigned short vendor, unsigned short device, ide_pci_init_proc_t *init, int func_adj)
+{
+ unsigned long flags;
+ unsigned index;
+ byte fn, bus;
+
+ save_flags(flags);
+ cli();
+ for (index = 0; !pcibios_find_device (vendor, device, index, &bus, &fn); ++index) {
+ init (bus, fn + func_adj);
+ }
+ restore_flags(flags);
+}
+
+#endif /* defined(CONFIG_BLK_DEV_RZ1000) || defined(CONFIG_BLK_DEV_TRITON) */
+#endif /* CONFIG_PCI */
+
+/*
+ * ide_init_pci() finds/initializes "known" PCI IDE interfaces
+ *
+ * This routine should ideally be using pcibios_find_class() to find
+ * all IDE interfaces, but that function causes some systems to "go weird".
+ */
+static void probe_for_hwifs (void)
+{
+#ifdef CONFIG_PCI
+ /*
+ * Find/initialize PCI IDE interfaces
+ */
+ if (pcibios_present()) {
+#ifdef CONFIG_BLK_DEV_RZ1000
+ ide_pci_init_proc_t init_rz1000;
+ ide_probe_pci (PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000, &init_rz1000, 0);
+#endif /* CONFIG_BLK_DEV_RZ1000 */
+#ifdef CONFIG_BLK_DEV_TRITON
+ /*
+ * Apparently the BIOS32 services on Intel motherboards are
+ * buggy and won't find the PCI_DEVICE_ID_INTEL_82371_1 for us.
+ * So instead, we search for PCI_DEVICE_ID_INTEL_82371_0,
+ * and then add 1.
+ */
+ ide_probe_pci (PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371_0, &ide_init_triton, 1);
+#endif /* CONFIG_BLK_DEV_TRITON */
+ }
+#endif /* CONFIG_PCI */
+#ifdef CONFIG_BLK_DEV_CMD640
+ {
+ extern void ide_probe_for_cmd640x (void);
+ ide_probe_for_cmd640x();
+ }
+#endif
+}
+
+/*
+ * This is gets invoked once during initialization, to set *everything* up
+ */
+int ide_init (void)
+{
+ int h;
+
+ init_ide_data ();
+ /*
+ * Probe for special "known" interface chipsets
+ */
+ probe_for_hwifs ();
+
+ /*
+ * Probe for drives in the usual way.. CMOS/BIOS, then poke at ports
+ */
+ for (h = 0; h < MAX_HWIFS; ++h) {
+ ide_hwif_t *hwif = &ide_hwifs[h];
+ if (!hwif->noprobe) {
+ if (hwif->io_base == HD_DATA)
+ probe_cmos_for_drives (hwif);
+ probe_for_drives (hwif);
+ }
+ if (hwif->present) {
+ if (!hwif->irq) {
+ if (!(hwif->irq = default_irqs[h])) {
+ printk("%s: DISABLED, NO IRQ\n", hwif->name);
+ hwif->present = 0;
+ continue;
+ }
+ }
+#ifdef CONFIG_BLK_DEV_HD
+ if (hwif->irq == HD_IRQ && hwif->io_base != HD_DATA) {
+ printk("%s: CANNOT SHARE IRQ WITH OLD HARDDISK DRIVER (hd.c)\n", hwif->name);
+ hwif->present = 0;
+ }
+#endif /* CONFIG_BLK_DEV_HD */
+ }
+ }
+
+ /*
+ * Now we try to set up irqs and major devices for what was found
+ */
+ for (h = MAX_HWIFS-1; h >= 0; --h) {
+ void (*rfn)(void);
+ ide_hwif_t *hwif = &ide_hwifs[h];
+ if (!hwif->present)
+ continue;
+ hwif->present = 0; /* we set it back to 1 if all is ok below */
+ switch (hwif->major) {
+ case IDE0_MAJOR: rfn = &do_ide0_request; break;
+ case IDE1_MAJOR: rfn = &do_ide1_request; break;
+ case IDE2_MAJOR: rfn = &do_ide2_request; break;
+ case IDE3_MAJOR: rfn = &do_ide3_request; break;
+ default:
+ printk("%s: request_fn NOT DEFINED\n", hwif->name);
+ continue;
+ }
+ if (register_blkdev (hwif->major, hwif->name, &ide_fops)) {
+ printk("%s: UNABLE TO GET MAJOR NUMBER %d\n", hwif->name, hwif->major);
+ } else if (init_irq (hwif)) {
+ printk("%s: UNABLE TO GET IRQ %d\n", hwif->name, hwif->irq);
+ (void) unregister_blkdev (hwif->major, hwif->name);
+ } else {
+ init_gendisk(hwif);
+ blk_dev[hwif->major].request_fn = rfn;
+ read_ahead[hwif->major] = 8; /* (4kB) */
+ hwif->present = 1; /* success */
+ }
+ }
+
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ idetape_register_chrdev(); /* Register character device interface to the ide tape */
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+
+ return 0;
+}
diff --git a/i386/i386at/gpl/linux/block/ide.h b/i386/i386at/gpl/linux/block/ide.h
new file mode 100644
index 00000000..b1ebc4c3
--- /dev/null
+++ b/i386/i386at/gpl/linux/block/ide.h
@@ -0,0 +1,655 @@
+/*
+ * linux/drivers/block/ide.h
+ *
+ * Copyright (C) 1994, 1995 Linus Torvalds & authors
+ */
+
+#include <linux/config.h>
+
+/*
+ * This is the multiple IDE interface driver, as evolved from hd.c.
+ * It supports up to four IDE interfaces, on one or more IRQs (usually 14 & 15).
+ * There can be up to two drives per interface, as per the ATA-2 spec.
+ *
+ * Primary i/f: ide0: major=3; (hda) minor=0; (hdb) minor=64
+ * Secondary i/f: ide1: major=22; (hdc or hd1a) minor=0; (hdd or hd1b) minor=64
+ * Tertiary i/f: ide2: major=33; (hde) minor=0; (hdf) minor=64
+ * Quaternary i/f: ide3: major=34; (hdg) minor=0; (hdh) minor=64
+ */
+
+/******************************************************************************
+ * IDE driver configuration options (play with these as desired):
+ *
+ * REALLY_SLOW_IO can be defined in ide.c and ide-cd.c, if necessary
+ */
+#undef REALLY_FAST_IO /* define if ide ports are perfect */
+#define INITIAL_MULT_COUNT 0 /* off=0; on=2,4,8,16,32, etc.. */
+
+#ifndef SUPPORT_VLB_SYNC /* 1 to support weird 32-bit chips */
+#define SUPPORT_VLB_SYNC 1 /* 0 to reduce kernel size */
+#endif
+#ifndef DISK_RECOVERY_TIME /* off=0; on=access_delay_time */
+#define DISK_RECOVERY_TIME 0 /* for hardware that needs it */
+#endif
+#ifndef OK_TO_RESET_CONTROLLER /* 1 needed for good error recovery */
+#define OK_TO_RESET_CONTROLLER 1 /* 0 for use with AH2372A/B interface */
+#endif
+#ifndef FAKE_FDISK_FOR_EZDRIVE /* 1 to help linux fdisk with EZDRIVE */
+#define FAKE_FDISK_FOR_EZDRIVE 1 /* 0 to reduce kernel size */
+#endif
+#ifndef FANCY_STATUS_DUMPS /* 1 for human-readable drive errors */
+#define FANCY_STATUS_DUMPS 1 /* 0 to reduce kernel size */
+#endif
+
+#if defined(CONFIG_BLK_DEV_IDECD) || defined(CONFIG_BLK_DEV_IDETAPE)
+#define CONFIG_BLK_DEV_IDEATAPI 1
+#endif
+
+/*
+ * IDE_DRIVE_CMD is used to implement many features of the hdparm utility
+ */
+#define IDE_DRIVE_CMD 99 /* (magic) undef to reduce kernel size*/
+
+/*
+ * "No user-serviceable parts" beyond this point :)
+ *****************************************************************************/
+
+typedef unsigned char byte; /* used everywhere */
+
+/*
+ * Probably not wise to fiddle with these
+ */
+#define ERROR_MAX 8 /* Max read/write errors per sector */
+#define ERROR_RESET 3 /* Reset controller every 4th retry */
+#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
+
+/*
+ * Ensure that various configuration flags have compatible settings
+ */
+#ifdef REALLY_SLOW_IO
+#undef REALLY_FAST_IO
+#endif
+
+/*
+ * Definitions for accessing IDE controller registers
+ */
+
+#define HWIF(drive) ((ide_hwif_t *)drive->hwif)
+#define HWGROUP(drive) ((ide_hwgroup_t *)(HWIF(drive)->hwgroup))
+
+#define IDE_DATA_OFFSET (0)
+#define IDE_ERROR_OFFSET (1)
+#define IDE_NSECTOR_OFFSET (2)
+#define IDE_SECTOR_OFFSET (3)
+#define IDE_LCYL_OFFSET (4)
+#define IDE_HCYL_OFFSET (5)
+#define IDE_SELECT_OFFSET (6)
+#define IDE_STATUS_OFFSET (7)
+#define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET
+#define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET
+
+#define IDE_DATA_REG (HWIF(drive)->io_base+IDE_DATA_OFFSET)
+#define IDE_ERROR_REG (HWIF(drive)->io_base+IDE_ERROR_OFFSET)
+#define IDE_NSECTOR_REG (HWIF(drive)->io_base+IDE_NSECTOR_OFFSET)
+#define IDE_SECTOR_REG (HWIF(drive)->io_base+IDE_SECTOR_OFFSET)
+#define IDE_LCYL_REG (HWIF(drive)->io_base+IDE_LCYL_OFFSET)
+#define IDE_HCYL_REG (HWIF(drive)->io_base+IDE_HCYL_OFFSET)
+#define IDE_SELECT_REG (HWIF(drive)->io_base+IDE_SELECT_OFFSET)
+#define IDE_STATUS_REG (HWIF(drive)->io_base+IDE_STATUS_OFFSET)
+#define IDE_CONTROL_REG (HWIF(drive)->ctl_port)
+#define IDE_FEATURE_REG IDE_ERROR_REG
+#define IDE_COMMAND_REG IDE_STATUS_REG
+#define IDE_ALTSTATUS_REG IDE_CONTROL_REG
+
+#ifdef REALLY_FAST_IO
+#define OUT_BYTE(b,p) outb((b),p)
+#define IN_BYTE(p) (byte)inb(p)
+#else
+#define OUT_BYTE(b,p) outb_p((b),p)
+#define IN_BYTE(p) (byte)inb_p(p)
+#endif /* REALLY_FAST_IO */
+
+#define GET_ERR() IN_BYTE(IDE_ERROR_REG)
+#define GET_STAT() IN_BYTE(IDE_STATUS_REG)
+#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
+#define BAD_R_STAT (BUSY_STAT | ERR_STAT)
+#define BAD_W_STAT (BAD_R_STAT | WRERR_STAT)
+#define BAD_STAT (BAD_R_STAT | DRQ_STAT)
+#define DRIVE_READY (READY_STAT | SEEK_STAT)
+#define DATA_READY (DRIVE_READY | DRQ_STAT)
+
+/*
+ * Some more useful definitions
+ */
+#define IDE_MAJOR_NAME "ide" /* the same for all i/f; see also genhd.c */
+#define MAJOR_NAME IDE_MAJOR_NAME
+#define PARTN_BITS 6 /* number of minor dev bits for partitions */
+#define PARTN_MASK ((1<<PARTN_BITS)-1) /* a useful bit mask */
+#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
+#define MAX_HWIFS 4 /* an arbitrary, but realistic limit */
+#define SECTOR_WORDS (512 / 4) /* number of 32bit words per sector */
+
+/*
+ * Timeouts for various operations:
+ */
+#define WAIT_DRQ (5*HZ/100) /* 50msec - spec allows up to 20ms */
+#define WAIT_READY (3*HZ/100) /* 30msec - should be instantaneous */
+#define WAIT_PIDENTIFY (1*HZ) /* 1sec - should be less than 3ms (?) */
+#define WAIT_WORSTCASE (30*HZ) /* 30sec - worst case when spinning up */
+#define WAIT_CMD (10*HZ) /* 10sec - maximum wait for an IRQ to happen */
+
+#ifdef CONFIG_BLK_DEV_IDETAPE
+#include "ide-tape.h"
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+
+#ifdef CONFIG_BLK_DEV_IDECD
+
+struct atapi_request_sense {
+ unsigned char error_code : 7;
+ unsigned char valid : 1;
+ byte reserved1;
+ unsigned char sense_key : 4;
+ unsigned char reserved2 : 1;
+ unsigned char ili : 1;
+ unsigned char reserved3 : 2;
+ byte info[4];
+ byte sense_len;
+ byte command_info[4];
+ byte asc;
+ byte ascq;
+ byte fru;
+ byte sense_key_specific[3];
+};
+
+struct packet_command {
+ char *buffer;
+ int buflen;
+ int stat;
+ struct atapi_request_sense *sense_data;
+ unsigned char c[12];
+};
+
+/* Space to hold the disk TOC. */
+
+#define MAX_TRACKS 99
+struct atapi_toc_header {
+ unsigned short toc_length;
+ byte first_track;
+ byte last_track;
+};
+
+struct atapi_toc_entry {
+ byte reserved1;
+ unsigned control : 4;
+ unsigned adr : 4;
+ byte track;
+ byte reserved2;
+ union {
+ unsigned lba;
+ struct {
+ byte reserved3;
+ byte m;
+ byte s;
+ byte f;
+ } msf;
+ } addr;
+};
+
+struct atapi_toc {
+ int last_session_lba;
+ int xa_flag;
+ unsigned capacity;
+ struct atapi_toc_header hdr;
+ struct atapi_toc_entry ent[MAX_TRACKS+1]; /* One extra for the leadout. */
+};
+
+
+/* This structure is annoyingly close to, but not identical with,
+ the cdrom_subchnl structure from cdrom.h. */
+struct atapi_cdrom_subchnl
+{
+ u_char acdsc_reserved;
+ u_char acdsc_audiostatus;
+ u_short acdsc_length;
+ u_char acdsc_format;
+
+ u_char acdsc_adr: 4;
+ u_char acdsc_ctrl: 4;
+ u_char acdsc_trk;
+ u_char acdsc_ind;
+ union
+ {
+ struct
+ {
+ u_char minute;
+ u_char second;
+ u_char frame;
+ } msf;
+ int lba;
+ } acdsc_absaddr;
+ union
+ {
+ struct
+ {
+ u_char minute;
+ u_char second;
+ u_char frame;
+ } msf;
+ int lba;
+ } acdsc_reladdr;
+};
+
+
+/* Extra per-device info for cdrom drives. */
+struct cdrom_info {
+
+ /* Buffer for table of contents. NULL if we haven't allocated
+ a TOC buffer for this device yet. */
+
+ struct atapi_toc *toc;
+
+ /* Sector buffer. If a read request wants only the first part of a cdrom
+ block, we cache the rest of the block here, in the expectation that that
+ data is going to be wanted soon. SECTOR_BUFFERED is the number of the
+ first buffered sector, and NSECTORS_BUFFERED is the number of sectors
+ in the buffer. Before the buffer is allocated, we should have
+ SECTOR_BUFFER == NULL and NSECTORS_BUFFERED == 0. */
+
+ unsigned long sector_buffered;
+ unsigned long nsectors_buffered;
+ char *sector_buffer;
+
+ /* The result of the last successful request sense command
+ on this device. */
+ struct atapi_request_sense sense_data;
+};
+
+#endif /* CONFIG_BLK_DEV_IDECD */
+
+/*
+ * Now for the data we need to maintain per-drive: ide_drive_t
+ */
+
+typedef enum {ide_disk, ide_cdrom, ide_tape} ide_media_t;
+
+typedef union {
+ unsigned all : 8; /* all of the bits together */
+ struct {
+ unsigned set_geometry : 1; /* respecify drive geometry */
+ unsigned recalibrate : 1; /* seek to cyl 0 */
+ unsigned set_multmode : 1; /* set multmode count */
+ unsigned set_pio : 1; /* set pio mode */
+ unsigned reserved : 4; /* unused */
+ } b;
+ } special_t;
+
+typedef union {
+ unsigned all : 8; /* all of the bits together */
+ struct {
+ unsigned head : 4; /* always zeros here */
+ unsigned unit : 1; /* drive select number, 0 or 1 */
+ unsigned bit5 : 1; /* always 1 */
+ unsigned lba : 1; /* using LBA instead of CHS */
+ unsigned bit7 : 1; /* always 1 */
+ } b;
+ } select_t;
+
+typedef struct ide_drive_s {
+ special_t special; /* special action flags */
+ unsigned present : 1; /* drive is physically present */
+ unsigned noprobe : 1; /* from: hdx=noprobe */
+ unsigned keep_settings : 1; /* restore settings after drive reset */
+ unsigned busy : 1; /* currently doing revalidate_disk() */
+ unsigned removeable : 1; /* 1 if need to do check_media_change */
+ unsigned using_dma : 1; /* disk is using dma for read/write */
+ unsigned forced_geom : 1; /* 1 if hdx=c,h,s was given at boot */
+ unsigned unmask : 1; /* flag: okay to unmask other irqs */
+ unsigned autotune : 2; /* 1=autotune, 2=noautotune, 0=default */
+#if FAKE_FDISK_FOR_EZDRIVE
+ unsigned remap_0_to_1 : 1; /* flag: partitioned with ezdrive */
+#endif /* FAKE_FDISK_FOR_EZDRIVE */
+ ide_media_t media; /* disk, cdrom, tape */
+ select_t select; /* basic drive/head select reg value */
+ byte ctl; /* "normal" value for IDE_CONTROL_REG */
+ byte ready_stat; /* min status value for drive ready */
+ byte mult_count; /* current multiple sector setting */
+ byte mult_req; /* requested multiple sector setting */
+ byte pio_req; /* requested multiple sector setting */
+ byte io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
+ byte bad_wstat; /* used for ignoring WRERR_STAT */
+ byte sect0; /* offset of first sector for DM6:DDO */
+ byte usage; /* current "open()" count for drive */
+ byte head; /* "real" number of heads */
+ byte sect; /* "real" sectors per track */
+ byte bios_head; /* BIOS/fdisk/LILO number of heads */
+ byte bios_sect; /* BIOS/fdisk/LILO sectors per track */
+ unsigned short bios_cyl; /* BIOS/fdisk/LILO number of cyls */
+ unsigned short cyl; /* "real" number of cyls */
+ void *hwif; /* actually (ide_hwif_t *) */
+ struct wait_queue *wqueue; /* used to wait for drive in open() */
+ struct hd_driveid *id; /* drive model identification info */
+ struct hd_struct *part; /* drive partition table */
+ char name[4]; /* drive name, such as "hda" */
+#ifdef CONFIG_BLK_DEV_IDECD
+ struct cdrom_info cdrom_info; /* for ide-cd.c */
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ idetape_tape_t tape; /* for ide-tape.c */
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+
+ } ide_drive_t;
+
+/*
+ * An ide_dmaproc_t() initiates/aborts DMA read/write operations on a drive.
+ *
+ * The caller is assumed to have selected the drive and programmed the drive's
+ * sector address using CHS or LBA. All that remains is to prepare for DMA
+ * and then issue the actual read/write DMA/PIO command to the drive.
+ *
+ * Returns 0 if all went well.
+ * Returns 1 if DMA read/write could not be started, in which case the caller
+ * should either try again later, or revert to PIO for the current request.
+ */
+typedef enum { ide_dma_read = 0, ide_dma_write = 1,
+ ide_dma_abort = 2, ide_dma_check = 3,
+ ide_dma_status_bad = 4, ide_dma_transferred = 5,
+ ide_dma_begin = 6 }
+ ide_dma_action_t;
+
+typedef int (ide_dmaproc_t)(ide_dma_action_t, ide_drive_t *);
+
+
+/*
+ * An ide_tuneproc_t() is used to set the speed of an IDE interface
+ * to a particular PIO mode. The "byte" parameter is used
+ * to select the PIO mode by number (0,1,2,3,4,5), and a value of 255
+ * indicates that the interface driver should "auto-tune" the PIO mode
+ * according to the drive capabilities in drive->id;
+ *
+ * Not all interface types support tuning, and not all of those
+ * support all possible PIO settings. They may silently ignore
+ * or round values as they see fit.
+ */
+typedef void (ide_tuneproc_t)(ide_drive_t *, byte);
+
+/*
+ * This is used to provide HT6560B interface support.
+ * It will probably also be used by the DC4030VL driver.
+ */
+typedef void (ide_selectproc_t) (ide_drive_t *);
+
+/*
+ * hwif_chipset_t is used to keep track of the specific hardware
+ * chipset used by each IDE interface, if known.
+ */
+typedef enum { ide_unknown, ide_generic, ide_triton,
+ ide_cmd640, ide_dtc2278, ide_ali14xx,
+ ide_qd6580, ide_umc8672, ide_ht6560b }
+ hwif_chipset_t;
+
+typedef struct hwif_s {
+ struct hwif_s *next; /* for linked-list in ide_hwgroup_t */
+ void *hwgroup; /* actually (ide_hwgroup_t *) */
+ unsigned short io_base; /* base io port addr */
+ unsigned short ctl_port; /* usually io_base+0x206 */
+ ide_drive_t drives[MAX_DRIVES]; /* drive info */
+ struct gendisk *gd; /* gendisk structure */
+ ide_tuneproc_t *tuneproc; /* routine to tune PIO mode for drives */
+#ifdef CONFIG_BLK_DEV_HT6560B
+ ide_selectproc_t *selectproc; /* tweaks hardware to select drive */
+#endif /* CONFIG_BLK_DEV_HT6560B */
+ ide_dmaproc_t *dmaproc; /* dma read/write/abort routine */
+ unsigned long *dmatable; /* dma physical region descriptor table */
+ unsigned short dma_base; /* base addr for dma ports (triton) */
+ byte irq; /* our irq number */
+ byte major; /* our major number */
+ char name[5]; /* name of interface, eg. "ide0" */
+ byte index; /* 0 for ide0; 1 for ide1; ... */
+ hwif_chipset_t chipset; /* sub-module for tuning.. */
+ unsigned noprobe : 1; /* don't probe for this interface */
+ unsigned present : 1; /* this interface exists */
+ unsigned serialized : 1; /* valid only for ide_hwifs[0] */
+ unsigned no_unmask : 1; /* disallow setting unmask bits */
+#if (DISK_RECOVERY_TIME > 0)
+ unsigned long last_time; /* time when previous rq was done */
+#endif
+#ifdef CONFIG_BLK_DEV_IDECD
+ struct request request_sense_request; /* from ide-cd.c */
+ struct packet_command request_sense_pc; /* from ide-cd.c */
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ ide_drive_t *tape_drive; /* Pointer to the tape on this interface */
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ } ide_hwif_t;
+
+/*
+ * internal ide interrupt handler type
+ */
+typedef void (ide_handler_t)(ide_drive_t *);
+
+typedef struct hwgroup_s {
+ ide_handler_t *handler;/* irq handler, if active */
+ ide_drive_t *drive; /* current drive */
+ ide_hwif_t *hwif; /* ptr to current hwif in linked-list */
+ struct request *rq; /* current request */
+ struct timer_list timer; /* failsafe timer */
+ struct request wrq; /* local copy of current write rq */
+ unsigned long poll_timeout; /* timeout value during long polls */
+ } ide_hwgroup_t;
+
+/*
+ * ide_hwifs[] is the master data structure used to keep track
+ * of just about everything in ide.c. Whenever possible, routines
+ * should be using pointers to a drive (ide_drive_t *) or
+ * pointers to a hwif (ide_hwif_t *), rather than indexing this
+ * structure directly (the allocation/layout may change!).
+ */
+#ifdef _IDE_C
+ ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */
+#else
+extern ide_hwif_t ide_hwifs[];
+#endif
+
+/*
+ * One final include file, which references some of the data/defns from above
+ */
+#define IDE_DRIVER /* "parameter" for blk.h */
+#include <linux/blk.h>
+
+#if (DISK_RECOVERY_TIME > 0)
+void ide_set_recovery_timer (ide_hwif_t *);
+#define SET_RECOVERY_TIMER(drive) ide_set_recovery_timer (drive)
+#else
+#define SET_RECOVERY_TIMER(drive)
+#endif
+
+/*
+ * This is used for (nearly) all data transfers from the IDE interface
+ */
+void ide_input_data (ide_drive_t *drive, void *buffer, unsigned int wcount);
+
+/*
+ * This is used for (nearly) all data transfers to the IDE interface
+ */
+void ide_output_data (ide_drive_t *drive, void *buffer, unsigned int wcount);
+
+/*
+ * This is used on exit from the driver, to designate the next irq handler
+ * and also to start the safety timer.
+ */
+void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout);
+
+/*
+ * Error reporting, in human readable form (luxurious, but a memory hog).
+ */
+byte ide_dump_status (ide_drive_t *drive, const char *msg, byte stat);
+
+/*
+ * ide_error() takes action based on the error returned by the controller.
+ * The calling function must return afterwards, to restart the request.
+ */
+void ide_error (ide_drive_t *drive, const char *msg, byte stat);
+
+/*
+ * ide_fixstring() cleans up and (optionally) byte-swaps a text string,
+ * removing leading/trailing blanks and compressing internal blanks.
+ * It is primarily used to tidy up the model name/number fields as
+ * returned by the WIN_[P]IDENTIFY commands.
+ */
+void ide_fixstring (byte *s, const int bytecount, const int byteswap);
+
+/*
+ * This routine busy-waits for the drive status to be not "busy".
+ * It then checks the status for all of the "good" bits and none
+ * of the "bad" bits, and if all is okay it returns 0. All other
+ * cases return 1 after invoking ide_error() -- caller should return.
+ *
+ */
+int ide_wait_stat (ide_drive_t *drive, byte good, byte bad, unsigned long timeout);
+
+/*
+ * This routine is called from the partition-table code in genhd.c
+ * to "convert" a drive to a logical geometry with fewer than 1024 cyls.
+ *
+ * The second parameter, "xparm", determines exactly how the translation
+ * will be handled:
+ * 0 = convert to CHS with fewer than 1024 cyls
+ * using the same method as Ontrack DiskManager.
+ * 1 = same as "0", plus offset everything by 63 sectors.
+ * -1 = similar to "0", plus redirect sector 0 to sector 1.
+ * >1 = convert to a CHS geometry with "xparm" heads.
+ *
+ * Returns 0 if the translation was not possible, if the device was not
+ * an IDE disk drive, or if a geometry was "forced" on the commandline.
+ * Returns 1 if the geometry translation was successful.
+ */
+int ide_xlate_1024 (kdev_t, int, const char *);
+
+/*
+ * Start a reset operation for an IDE interface.
+ * The caller should return immediately after invoking this.
+ */
+void ide_do_reset (ide_drive_t *);
+
+/*
+ * This function is intended to be used prior to invoking ide_do_drive_cmd().
+ */
+void ide_init_drive_cmd (struct request *rq);
+
+/*
+ * "action" parameter type for ide_do_drive_cmd() below.
+ */
+typedef enum
+ {ide_wait, /* insert rq at end of list, and wait for it */
+ ide_next, /* insert rq immediately after current request */
+ ide_preempt, /* insert rq in front of current request */
+ ide_end} /* insert rq at end of list, but don't wait for it */
+ ide_action_t;
+
+/*
+ * This function issues a special IDE device request
+ * onto the request queue.
+ *
+ * If action is ide_wait, then then rq is queued at the end of
+ * the request queue, and the function sleeps until it has been
+ * processed. This is for use when invoked from an ioctl handler.
+ *
+ * If action is ide_preempt, then the rq is queued at the head of
+ * the request queue, displacing the currently-being-processed
+ * request and this function returns immediately without waiting
+ * for the new rq to be completed. This is VERY DANGEROUS, and is
+ * intended for careful use by the ATAPI tape/cdrom driver code.
+ *
+ * If action is ide_next, then the rq is queued immediately after
+ * the currently-being-processed-request (if any), and the function
+ * returns without waiting for the new rq to be completed. As above,
+ * This is VERY DANGEROUS, and is intended for careful use by the
+ * ATAPI tape/cdrom driver code.
+ *
+ * If action is ide_end, then the rq is queued at the end of the
+ * request queue, and the function returns immediately without waiting
+ * for the new rq to be completed. This is again intended for careful
+ * use by the ATAPI tape/cdrom driver code. (Currently used by ide-tape.c,
+ * when operating in the pipelined operation mode).
+ */
+int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action);
+
+/*
+ * Clean up after success/failure of an explicit drive cmd.
+ * stat/err are used only when (HWGROUP(drive)->rq->cmd == IDE_DRIVE_CMD).
+ */
+void ide_end_drive_cmd (ide_drive_t *drive, byte stat, byte err);
+
+#ifdef CONFIG_BLK_DEV_IDECD
+/*
+ * These are routines in ide-cd.c invoked from ide.c
+ */
+void ide_do_rw_cdrom (ide_drive_t *, unsigned long);
+int ide_cdrom_ioctl (ide_drive_t *, struct inode *, struct file *, unsigned int, unsigned long);
+int ide_cdrom_check_media_change (ide_drive_t *);
+int ide_cdrom_open (struct inode *, struct file *, ide_drive_t *);
+void ide_cdrom_release (struct inode *, struct file *, ide_drive_t *);
+void ide_cdrom_setup (ide_drive_t *);
+#endif /* CONFIG_BLK_DEV_IDECD */
+
+#ifdef CONFIG_BLK_DEV_IDETAPE
+
+/*
+ * Functions in ide-tape.c which are invoked from ide.c:
+ */
+
+/*
+ * idetape_identify_device is called during device probing stage to
+ * probe for an ide atapi tape drive and to initialize global variables
+ * in ide-tape.c which provide the link between the character device
+ * and the correspoding block device.
+ *
+ * Returns 1 if an ide tape was detected and is supported.
+ * Returns 0 otherwise.
+ */
+
+int idetape_identify_device (ide_drive_t *drive,struct hd_driveid *id);
+
+/*
+ * idetape_setup is called a bit later than idetape_identify_device,
+ * during the search for disk partitions, to initialize various tape
+ * state variables in ide_drive_t *drive.
+ */
+
+void idetape_setup (ide_drive_t *drive);
+
+/*
+ * idetape_do_request is our request function. It is called by ide.c
+ * to process a new request.
+ */
+
+void idetape_do_request (ide_drive_t *drive, struct request *rq, unsigned long block);
+
+/*
+ * idetape_end_request is used to finish servicing a request, and to
+ * insert a pending pipeline request into the main device queue.
+ */
+
+void idetape_end_request (byte uptodate, ide_hwgroup_t *hwgroup);
+
+/*
+ * Block device interface functions.
+ */
+
+int idetape_blkdev_ioctl (ide_drive_t *drive, struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg);
+int idetape_blkdev_open (struct inode *inode, struct file *filp, ide_drive_t *drive);
+void idetape_blkdev_release (struct inode *inode, struct file *filp, ide_drive_t *drive);
+
+/*
+ * idetape_register_chrdev initializes the character device interface to
+ * the ide tape drive.
+ */
+
+void idetape_register_chrdev (void);
+
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+
+#ifdef CONFIG_BLK_DEV_TRITON
+void ide_init_triton (byte, byte);
+#endif /* CONFIG_BLK_DEV_TRITON */
diff --git a/i386/i386at/gpl/linux/block/ide_modes.h b/i386/i386at/gpl/linux/block/ide_modes.h
new file mode 100644
index 00000000..e174d5dc
--- /dev/null
+++ b/i386/i386at/gpl/linux/block/ide_modes.h
@@ -0,0 +1,142 @@
+#ifndef _IDE_MODES_H
+#define _IDE_MODES_H
+/*
+ * linux/drivers/block/ide_modes.h
+ *
+ * Copyright (C) 1996 Linus Torvalds, Igor Abramov, and Mark Lord
+ */
+
+/*
+ * Shared data/functions for determining best PIO mode for an IDE drive.
+ * Most of this stuff originally lived in cmd640.c, and changes to the
+ * ide_pio_blacklist[] table should be made with EXTREME CAUTION to avoid
+ * breaking the fragile cmd640.c support.
+ */
+
+#if defined(CONFIG_BLK_DEV_CMD640) || defined(CONFIG_IDE_CHIPSETS)
+
+#ifndef _IDE_C
+
+int ide_scan_pio_blacklist (char *model);
+unsigned int ide_get_best_pio_mode (ide_drive_t *drive);
+
+#else /* _IDE_C */
+
+/*
+ * Black list. Some drives incorrectly report their maximal PIO mode,
+ * at least in respect to CMD640. Here we keep info on some known drives.
+ */
+static struct ide_pio_info {
+ const char *name;
+ int pio;
+} ide_pio_blacklist [] = {
+/* { "Conner Peripherals 1275MB - CFS1275A", 4 }, */
+
+ { "WDC AC2700", 3 },
+ { "WDC AC2540", 3 },
+ { "WDC AC2420", 3 },
+ { "WDC AC2340", 3 },
+ { "WDC AC2250", 0 },
+ { "WDC AC2200", 0 },
+ { "WDC AC2120", 0 },
+ { "WDC AC2850", 3 },
+ { "WDC AC1270", 3 },
+ { "WDC AC1170", 3 },
+ { "WDC AC1210", 1 },
+ { "WDC AC280", 0 },
+/* { "WDC AC21000", 4 }, */
+ { "WDC AC31000", 3 },
+/* { "WDC AC21200", 4 }, */
+ { "WDC AC31200", 3 },
+/* { "WDC AC31600", 4 }, */
+
+ { "Maxtor 7131 AT", 1 },
+ { "Maxtor 7171 AT", 1 },
+ { "Maxtor 7213 AT", 1 },
+ { "Maxtor 7245 AT", 1 },
+ { "Maxtor 7345 AT", 1 },
+ { "Maxtor 7546 AT", 3 },
+ { "Maxtor 7540 AV", 3 },
+
+ { "SAMSUNG SHD-3121A", 1 },
+ { "SAMSUNG SHD-3122A", 1 },
+ { "SAMSUNG SHD-3172A", 1 },
+
+/* { "ST51080A", 4 },
+ * { "ST51270A", 4 },
+ * { "ST31220A", 4 },
+ * { "ST31640A", 4 },
+ * { "ST32140A", 4 },
+ * { "ST3780A", 4 },
+ */
+ { "ST5660A", 3 },
+ { "ST3660A", 3 },
+ { "ST3630A", 3 },
+ { "ST3655A", 3 },
+ { "ST3391A", 3 },
+ { "ST3390A", 1 },
+ { "ST3600A", 1 },
+ { "ST3290A", 0 },
+ { "ST3144A", 0 },
+
+ { "QUANTUM ELS127A", 0 },
+ { "QUANTUM ELS170A", 0 },
+ { "QUANTUM LPS240A", 0 },
+ { "QUANTUM LPS210A", 3 },
+ { "QUANTUM LPS270A", 3 },
+ { "QUANTUM LPS365A", 3 },
+ { "QUANTUM LPS540A", 3 },
+ { "QUANTUM FIREBALL", 3 }, /* For models 540/640/1080/1280 */
+ /* 1080A works fine in mode4 with triton */
+ { NULL, 0 }
+};
+
+/*
+ * This routine searches the ide_pio_blacklist for an entry
+ * matching the start/whole of the supplied model name.
+ *
+ * Returns -1 if no match found.
+ * Otherwise returns the recommended PIO mode from ide_pio_blacklist[].
+ */
+int ide_scan_pio_blacklist (char *model)
+{
+ struct ide_pio_info *p;
+
+ for (p = ide_pio_blacklist; p->name != NULL; p++) {
+ if (strncmp(p->name, model, strlen(p->name)) == 0)
+ return p->pio;
+ }
+ return -1;
+}
+
+/*
+ * This routine returns the recommended PIO mode for a given drive,
+ * based on the drive->id information and the ide_pio_blacklist[].
+ * This is used by most chipset support modules when "auto-tuning".
+ */
+unsigned int ide_get_best_pio_mode (ide_drive_t *drive)
+{
+ unsigned int pio = 0;
+ struct hd_driveid *id = drive->id;
+
+ if (id != NULL) {
+ if (HWIF(drive)->chipset != ide_cmd640 && !strcmp("QUANTUM FIREBALL1080A", id->model))
+ pio = 4;
+ else
+ pio = ide_scan_pio_blacklist(id->model);
+ if (pio == -1) {
+ pio = (id->tPIO < 2) ? id->tPIO : 2;
+ if (id->field_valid & 2) {
+ byte modes = id->eide_pio_modes;
+ if (modes & 4) pio = 5;
+ else if (modes & 2) pio = 4;
+ else if (modes & 1) pio = 3;
+ }
+ }
+ }
+ return pio;
+}
+
+#endif /* _IDE_C */
+#endif /* defined(CONFIG_BLK_DEV_CMD640) || defined(CONFIG_IDE_CHIPSETS) */
+#endif /* _IDE_MODES_H */
diff --git a/i386/i386at/gpl/linux/block/rz1000.c b/i386/i386at/gpl/linux/block/rz1000.c
new file mode 100644
index 00000000..11f1dbd5
--- /dev/null
+++ b/i386/i386at/gpl/linux/block/rz1000.c
@@ -0,0 +1,56 @@
+/*
+ * linux/drivers/block/rz1000.c Version 0.02 Feb 08, 1996
+ *
+ * Copyright (C) 1995-1996 Linus Torvalds & author (see below)
+ */
+
+/*
+ * Principal Author/Maintainer: mlord@bnr.ca (Mark Lord)
+ *
+ * This file provides support for disabling the buggy read-ahead
+ * mode of the RZ1000 IDE chipset, commonly used on Intel motherboards.
+ */
+
+#undef REALLY_SLOW_IO /* most systems can safely undef this */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <asm/io.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include "ide.h"
+
+static void ide_pci_access_error (int rc)
+{
+ printk("ide: pcibios access failed - %s\n", pcibios_strerror(rc));
+}
+
+void init_rz1000 (byte bus, byte fn)
+{
+ int rc;
+ unsigned short reg;
+
+ printk("ide: buggy RZ1000 interface: ");
+ if ((rc = pcibios_read_config_word (bus, fn, PCI_COMMAND, &reg))) {
+ ide_pci_access_error (rc);
+ } else if (!(reg & 1)) {
+ printk("not enabled\n");
+ } else {
+ if ((rc = pcibios_read_config_word(bus, fn, 0x40, &reg))
+ || (rc = pcibios_write_config_word(bus, fn, 0x40, reg & 0xdfff)))
+ {
+ ide_hwifs[0].no_unmask = 1;
+ ide_hwifs[1].no_unmask = 1;
+ ide_hwifs[0].serialized = 1;
+ ide_pci_access_error (rc);
+ printk("serialized, disabled unmasking\n");
+ } else
+ printk("disabled read-ahead\n");
+ }
+}
diff --git a/i386/i386at/gpl/linux/block/triton.c b/i386/i386at/gpl/linux/block/triton.c
new file mode 100644
index 00000000..58296611
--- /dev/null
+++ b/i386/i386at/gpl/linux/block/triton.c
@@ -0,0 +1,459 @@
+/*
+ * linux/drivers/block/triton.c Version 1.06 Feb 6, 1996
+ *
+ * Copyright (c) 1995-1996 Mark Lord
+ * May be copied or modified under the terms of the GNU General Public License
+ */
+
+/*
+ * This module provides support for the Bus Master IDE DMA function
+ * of the Intel PCI Triton chipset (82371FB).
+ *
+ * DMA is currently supported only for hard disk drives (not cdroms).
+ *
+ * Support for cdroms will likely be added at a later date,
+ * after broader experience has been obtained with hard disks.
+ *
+ * Up to four drives may be enabled for DMA, and the Triton chipset will
+ * (hopefully) arbitrate the PCI bus among them. Note that the 82371FB chip
+ * provides a single "line buffer" for the BM IDE function, so performance of
+ * multiple (two) drives doing DMA simultaneously will suffer somewhat,
+ * as they contest for that resource bottleneck. This is handled transparently
+ * inside the 82371FB chip.
+ *
+ * By default, DMA support is prepared for use, but is currently enabled only
+ * for drives which support multi-word DMA mode2 (mword2), or which are
+ * recognized as "good" (see table below). Drives with only mode0 or mode1
+ * (single or multi) DMA should also work with this chipset/driver (eg. MC2112A)
+ * but are not enabled by default. Use "hdparm -i" to view modes supported
+ * by a given drive.
+ *
+ * The hdparm-2.4 (or later) utility can be used for manually enabling/disabling
+ * DMA support, but must be (re-)compiled against this kernel version or later.
+ *
+ * To enable DMA, use "hdparm -d1 /dev/hd?" on a per-drive basis after booting.
+ * If problems arise, ide.c will disable DMA operation after a few retries.
+ * This error recovery mechanism works and has been extremely well exercised.
+ *
+ * IDE drives, depending on their vintage, may support several different modes
+ * of DMA operation. The boot-time modes are indicated with a "*" in
+ * the "hdparm -i" listing, and can be changed with *knowledgeable* use of
+ * the "hdparm -X" feature. There is seldom a need to do this, as drives
+ * normally power-up with their "best" PIO/DMA modes enabled.
+ *
+ * Testing was done with an ASUS P55TP4XE/100 system and the following drives:
+ *
+ * Quantum Fireball 1080A (1Gig w/83kB buffer), DMA mode2, PIO mode4.
+ * - DMA mode2 works well (7.4MB/sec), despite the tiny on-drive buffer.
+ * - This drive also does PIO mode4, at about the same speed as DMA mode2.
+ * An awesome drive for the price!
+ *
+ * Fujitsu M1606TA (1Gig w/256kB buffer), DMA mode2, PIO mode4.
+ * - DMA mode2 gives horrible performance (1.6MB/sec), despite the good
+ * size of the on-drive buffer and a boasted 10ms average access time.
+ * - PIO mode4 was better, but peaked at a mere 4.5MB/sec.
+ *
+ * Micropolis MC2112A (1Gig w/508kB buffer), drive pre-dates EIDE and ATA2.
+ * - DMA works fine (2.2MB/sec), probably due to the large on-drive buffer.
+ * - This older drive can also be tweaked for fastPIO (3.7MB/sec) by using
+ * maximum clock settings (5,4) and setting all flags except prefetch.
+ *
+ * Western Digital AC31000H (1Gig w/128kB buffer), DMA mode1, PIO mode3.
+ * - DMA does not work reliably. The drive appears to be somewhat tardy
+ * in deasserting DMARQ at the end of a sector. This is evident in
+ * the observation that WRITEs work most of the time, depending on
+ * cache-buffer occupancy, but multi-sector reads seldom work.
+ *
+ * Testing was done with a Gigabyte GA-586 ATE system and the following drive:
+ * (Uwe Bonnes - bon@elektron.ikp.physik.th-darmstadt.de)
+ *
+ * Western Digital AC31600H (1.6Gig w/128kB buffer), DMA mode2, PIO mode4.
+ * - much better than its 1Gig cousin, this drive is reported to work
+ * very well with DMA (7.3MB/sec).
+ *
+ * Other drives:
+ *
+ * Maxtor 7540AV (515Meg w/32kB buffer), DMA modes mword0/sword2, PIO mode3.
+ * - a budget drive, with budget performance, around 3MB/sec.
+ *
+ * Western Digital AC2850F (814Meg w/64kB buffer), DMA mode1, PIO mode3.
+ * - another "caviar" drive, similar to the AC31000, except that this one
+ * worked with DMA in at least one system. Throughput is about 3.8MB/sec
+ * for both DMA and PIO.
+ *
+ * Conner CFS850A (812Meg w/64kB buffer), DMA mode2, PIO mode4.
+ * - like most Conner models, this drive proves that even a fast interface
+ * cannot improve slow media. Both DMA and PIO peak around 3.5MB/sec.
+ *
+ * If you have any drive models to add, email your results to: mlord@bnr.ca
+ * Keep an eye on /var/adm/messages for "DMA disabled" messages.
+ *
+ * Some people have reported trouble with Intel Zappa motherboards.
+ * This can be fixed by upgrading the AMI BIOS to version 1.00.04.BS0,
+ * available from ftp://ftp.intel.com/pub/bios/10004bs0.exe
+ * (thanks to Glen Morrell <glen@spin.Stanford.edu> for researching this).
+ *
+ * And, yes, Intel Zappa boards really *do* use the Triton IDE ports.
+ */
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include "ide.h"
+
+/*
+ * good_dma_drives() lists the model names (from "hdparm -i")
+ * of drives which do not support mword2 DMA but which are
+ * known to work fine with this interface under Linux.
+ */
+const char *good_dma_drives[] = {"Micropolis 2112A",
+ "CONNER CTMA 4000"};
+
+/*
+ * Our Physical Region Descriptor (PRD) table should be large enough
+ * to handle the biggest I/O request we are likely to see. Since requests
+ * can have no more than 256 sectors, and since the typical blocksize is
+ * two sectors, we could get by with a limit of 128 entries here for the
+ * usual worst case. Most requests seem to include some contiguous blocks,
+ * further reducing the number of table entries required.
+ *
+ * The driver reverts to PIO mode for individual requests that exceed
+ * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
+ * 100% of all crazy scenarios here is not necessary.
+ *
+ * As it turns out though, we must allocate a full 4KB page for this,
+ * so the two PRD tables (ide0 & ide1) will each get half of that,
+ * allowing each to have about 256 entries (8 bytes each) from this.
+ */
+#define PRD_BYTES 8
+#define PRD_ENTRIES (PAGE_SIZE / (2 * PRD_BYTES))
+
+/*
+ * dma_intr() is the handler for disk read/write DMA interrupts
+ */
+static void dma_intr (ide_drive_t *drive)
+{
+ byte stat, dma_stat;
+ int i;
+ struct request *rq = HWGROUP(drive)->rq;
+ unsigned short dma_base = HWIF(drive)->dma_base;
+
+ dma_stat = inb(dma_base+2); /* get DMA status */
+ outb(inb(dma_base)&~1, dma_base); /* stop DMA operation */
+ stat = GET_STAT(); /* get drive status */
+ if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
+ if ((dma_stat & 7) == 4) { /* verify good DMA status */
+ rq = HWGROUP(drive)->rq;
+ for (i = rq->nr_sectors; i > 0;) {
+ i -= rq->current_nr_sectors;
+ ide_end_request(1, HWGROUP(drive));
+ }
+ return;
+ }
+ printk("%s: bad DMA status: 0x%02x\n", drive->name, dma_stat);
+ }
+ sti();
+ ide_error(drive, "dma_intr", stat);
+}
+
+/*
+ * build_dmatable() prepares a dma request.
+ * Returns 0 if all went okay, returns 1 otherwise.
+ */
+static int build_dmatable (ide_drive_t *drive)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+ struct buffer_head *bh = rq->bh;
+ unsigned long size, addr, *table = HWIF(drive)->dmatable;
+ unsigned int count = 0;
+
+ do {
+ /*
+ * Determine addr and size of next buffer area. We assume that
+ * individual virtual buffers are always composed linearly in
+ * physical memory. For example, we assume that any 8kB buffer
+ * is always composed of two adjacent physical 4kB pages rather
+ * than two possibly non-adjacent physical 4kB pages.
+ */
+ if (bh == NULL) { /* paging and tape requests have (rq->bh == NULL) */
+ addr = virt_to_bus (rq->buffer);
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape)
+ size = drive->tape.pc->request_transfer;
+ else
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ size = rq->nr_sectors << 9;
+ } else {
+ /* group sequential buffers into one large buffer */
+ addr = virt_to_bus (bh->b_data);
+ size = bh->b_size;
+ while ((bh = bh->b_reqnext) != NULL) {
+ if ((addr + size) != virt_to_bus (bh->b_data))
+ break;
+ size += bh->b_size;
+ }
+ }
+
+ /*
+ * Fill in the dma table, without crossing any 64kB boundaries.
+ * We assume 16-bit alignment of all blocks.
+ */
+ while (size) {
+ if (++count >= PRD_ENTRIES) {
+ printk("%s: DMA table too small\n", drive->name);
+ return 1; /* revert to PIO for this request */
+ } else {
+ unsigned long bcount = 0x10000 - (addr & 0xffff);
+ if (bcount > size)
+ bcount = size;
+ *table++ = addr;
+ *table++ = bcount;
+ addr += bcount;
+ size -= bcount;
+ }
+ }
+ } while (bh != NULL);
+ if (count) {
+ *--table |= 0x80000000; /* set End-Of-Table (EOT) bit */
+ return 0;
+ }
+ printk("%s: empty DMA table?\n", drive->name);
+ return 1; /* let the PIO routines handle this weirdness */
+}
+
+static int config_drive_for_dma (ide_drive_t *drive)
+{
+ const char **list;
+
+ struct hd_driveid *id = drive->id;
+ if (id && (id->capability & 1)) {
+ /* Enable DMA on any drive that supports mword2 DMA */
+ if ((id->field_valid & 2) && (id->dma_mword & 0x404) == 0x404) {
+ drive->using_dma = 1;
+ return 0; /* DMA enabled */
+ }
+ /* Consult the list of known "good" drives */
+ list = good_dma_drives;
+ while (*list) {
+ if (!strcmp(*list++,id->model)) {
+ drive->using_dma = 1;
+ return 0; /* DMA enabled */
+ }
+ }
+ }
+ return 1; /* DMA not enabled */
+}
+
+/*
+ * triton_dmaproc() initiates/aborts DMA read/write operations on a drive.
+ *
+ * The caller is assumed to have selected the drive and programmed the drive's
+ * sector address using CHS or LBA. All that remains is to prepare for DMA
+ * and then issue the actual read/write DMA/PIO command to the drive.
+ *
+ * For ATAPI devices, we just prepare for DMA and return. The caller should
+ * then issue the packet command to the drive and call us again with
+ * ide_dma_begin afterwards.
+ *
+ * Returns 0 if all went well.
+ * Returns 1 if DMA read/write could not be started, in which case
+ * the caller should revert to PIO for the current request.
+ */
+static int triton_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
+{
+ unsigned long dma_base = HWIF(drive)->dma_base;
+ unsigned int reading = (1 << 3);
+
+ switch (func) {
+ case ide_dma_abort:
+ outb(inb(dma_base)&~1, dma_base); /* stop DMA */
+ return 0;
+ case ide_dma_check:
+ return config_drive_for_dma (drive);
+ case ide_dma_write:
+ reading = 0;
+ case ide_dma_read:
+ break;
+ case ide_dma_status_bad:
+ return ((inb(dma_base+2) & 7) != 4); /* verify good DMA status */
+ case ide_dma_transferred:
+#if 0
+ return (number of bytes actually transferred);
+#else
+ return (0);
+#endif
+ case ide_dma_begin:
+ outb(inb(dma_base)|1, dma_base); /* begin DMA */
+ return 0;
+ default:
+ printk("triton_dmaproc: unsupported func: %d\n", func);
+ return 1;
+ }
+ if (build_dmatable (drive))
+ return 1;
+ outl(virt_to_bus (HWIF(drive)->dmatable), dma_base + 4); /* PRD table */
+ outb(reading, dma_base); /* specify r/w */
+ outb(0x26, dma_base+2); /* clear status bits */
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ if (drive->media != ide_disk)
+ return 0;
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+ ide_set_handler(drive, &dma_intr, WAIT_CMD); /* issue cmd to drive */
+ OUT_BYTE(reading ? WIN_READDMA : WIN_WRITEDMA, IDE_COMMAND_REG);
+ outb(inb(dma_base)|1, dma_base); /* begin DMA */
+ return 0;
+}
+
+/*
+ * print_triton_drive_flags() displays the currently programmed options
+ * in the Triton chipset for a given drive.
+ *
+ * If fastDMA is "no", then slow ISA timings are used for DMA data xfers.
+ * If fastPIO is "no", then slow ISA timings are used for PIO data xfers.
+ * If IORDY is "no", then IORDY is assumed to always be asserted.
+ * If PreFetch is "no", then data pre-fetch/post are not used.
+ *
+ * When "fastPIO" and/or "fastDMA" are "yes", then faster PCI timings and
+ * back-to-back 16-bit data transfers are enabled, using the sample_CLKs
+ * and recovery_CLKs (PCI clock cycles) timing parameters for that interface.
+ */
+static void print_triton_drive_flags (unsigned int unit, byte flags)
+{
+ printk(" %s ", unit ? "slave :" : "master:");
+ printk( "fastDMA=%s", (flags&9) ? "on " : "off");
+ printk(" PreFetch=%s", (flags&4) ? "on " : "off");
+ printk(" IORDY=%s", (flags&2) ? "on " : "off");
+ printk(" fastPIO=%s\n", ((flags&9)==1) ? "on " : "off");
+}
+
+static void init_triton_dma (ide_hwif_t *hwif, unsigned short base)
+{
+ static unsigned long dmatable = 0;
+
+ printk(" %s: BusMaster DMA at 0x%04x-0x%04x", hwif->name, base, base+7);
+ if (check_region(base, 8)) {
+ printk(" -- ERROR, PORTS ALREADY IN USE");
+ } else {
+ request_region(base, 8, "triton DMA");
+ hwif->dma_base = base;
+ if (!dmatable) {
+ /*
+ * Since we know we are on a PCI bus, we could
+ * actually use __get_free_pages() here instead
+ * of __get_dma_pages() -- no ISA limitations.
+ */
+ dmatable = __get_dma_pages(GFP_KERNEL, 0);
+ }
+ if (dmatable) {
+ hwif->dmatable = (unsigned long *) dmatable;
+ dmatable += (PRD_ENTRIES * PRD_BYTES);
+ outl(virt_to_bus(hwif->dmatable), base + 4);
+ hwif->dmaproc = &triton_dmaproc;
+ }
+ }
+ printk("\n");
+}
+
+/*
+ * calc_mode() returns the ATA PIO mode number, based on the number
+ * of cycle clks passed in. Assumes 33Mhz bus operation (30ns per clk).
+ */
+byte calc_mode (byte clks)
+{
+ if (clks == 3) return 5;
+ if (clks == 4) return 4;
+ if (clks < 6) return 3;
+ if (clks < 8) return 2;
+ if (clks < 13) return 1;
+ return 0;
+}
+
+/*
+ * ide_init_triton() prepares the IDE driver for DMA operation.
+ * This routine is called once, from ide.c during driver initialization,
+ * for each triton chipset which is found (unlikely to be more than one).
+ */
+void ide_init_triton (byte bus, byte fn)
+{
+ int rc = 0, h;
+ int dma_enabled = 0;
+ unsigned short bmiba, pcicmd;
+ unsigned int timings;
+
+ printk("ide: Triton BM-IDE on PCI bus %d function %d\n", bus, fn);
+ /*
+ * See if IDE and BM-DMA features are enabled:
+ */
+ if ((rc = pcibios_read_config_word(bus, fn, 0x04, &pcicmd)))
+ goto quit;
+ if ((pcicmd & 1) == 0) {
+ printk("ide: Triton IDE ports are not enabled\n");
+ goto quit;
+ }
+ if ((pcicmd & 4) == 0) {
+ printk("ide: Triton BM-DMA feature is not enabled -- upgrade your BIOS\n");
+ } else {
+ /*
+ * Get the bmiba base address
+ */
+ if ((rc = pcibios_read_config_word(bus, fn, 0x20, &bmiba)))
+ goto quit;
+ bmiba &= 0xfff0; /* extract port base address */
+ dma_enabled = 1;
+ }
+
+ /*
+ * See if ide port(s) are enabled
+ */
+ if ((rc = pcibios_read_config_dword(bus, fn, 0x40, &timings)))
+ goto quit;
+ if (!(timings & 0x80008000)) {
+ printk("ide: neither Triton IDE port is enabled\n");
+ goto quit;
+ }
+
+ /*
+ * Save the dma_base port addr for each interface
+ */
+ for (h = 0; h < MAX_HWIFS; ++h) {
+ byte s_clks, r_clks;
+ ide_hwif_t *hwif = &ide_hwifs[h];
+ unsigned short time;
+ if (hwif->io_base == 0x1f0) {
+ time = timings & 0xffff;
+ if ((timings & 0x8000) == 0) /* interface enabled? */
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba);
+ } else if (hwif->io_base == 0x170) {
+ time = timings >> 16;
+ if ((timings & 0x8000) == 0) /* interface enabled? */
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba + 8);
+ } else
+ continue;
+ s_clks = ((~time >> 12) & 3) + 2;
+ r_clks = ((~time >> 8) & 3) + 1;
+ printk(" %s timing: (0x%04x) sample_CLKs=%d, recovery_CLKs=%d (PIO mode%d)\n",
+ hwif->name, time, s_clks, r_clks, calc_mode(s_clks+r_clks));
+ print_triton_drive_flags (0, time & 0xf);
+ print_triton_drive_flags (1, (time >> 4) & 0xf);
+ }
+
+quit: if (rc) printk("ide: pcibios access failed - %s\n", pcibios_strerror(rc));
+}
+
diff --git a/i386/i386at/gpl/linux/include/asm/bitops.h b/i386/i386at/gpl/linux/include/asm/bitops.h
new file mode 100644
index 00000000..5387f31d
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/bitops.h
@@ -0,0 +1,137 @@
+#ifndef _I386_BITOPS_H
+#define _I386_BITOPS_H
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ */
+
+/*
+ * These have to be done with inline assembly: that way the bit-setting
+ * is guaranteed to be atomic. All bit operations return 0 if the bit
+ * was cleared before the operation and != 0 if it was not.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+
+#ifdef __SMP__
+#define LOCK_PREFIX "lock ; "
+#else
+#define LOCK_PREFIX ""
+#endif
+
+/*
+ * Some hacks to defeat gcc over-optimizations..
+ */
+struct __dummy { unsigned long a[100]; };
+#define ADDR (*(struct __dummy *) addr)
+#define CONST_ADDR (*(const struct __dummy *) addr)
+
+extern __inline__ int set_bit(int nr, void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(LOCK_PREFIX
+ "btsl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"ir" (nr));
+ return oldbit;
+}
+
+extern __inline__ int clear_bit(int nr, void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(LOCK_PREFIX
+ "btrl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"ir" (nr));
+ return oldbit;
+}
+
+extern __inline__ int change_bit(int nr, void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(LOCK_PREFIX
+ "btcl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"ir" (nr));
+ return oldbit;
+}
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+extern __inline__ int test_bit(int nr, const void * addr)
+{
+ return 1UL & (((const unsigned int *) addr)[nr >> 5] >> (nr & 31));
+}
+
+/*
+ * Find-bit routines..
+ */
+extern __inline__ int find_first_zero_bit(void * addr, unsigned size)
+{
+ int res;
+
+ if (!size)
+ return 0;
+ __asm__("
+ cld
+ movl $-1,%%eax
+ xorl %%edx,%%edx
+ repe; scasl
+ je 1f
+ xorl -4(%%edi),%%eax
+ subl $4,%%edi
+ bsfl %%eax,%%edx
+1: subl %%ebx,%%edi
+ shll $3,%%edi
+ addl %%edi,%%edx"
+ :"=d" (res)
+ :"c" ((size + 31) >> 5), "D" (addr), "b" (addr)
+ :"ax", "cx", "di");
+ return res;
+}
+
+extern __inline__ int find_next_zero_bit (void * addr, int size, int offset)
+{
+ unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
+ int set = 0, bit = offset & 31, res;
+
+ if (bit) {
+ /*
+ * Look for zero in first byte
+ */
+ __asm__("
+ bsfl %1,%0
+ jne 1f
+ movl $32, %0
+1: "
+ : "=r" (set)
+ : "r" (~(*p >> bit)));
+ if (set < (32 - bit))
+ return set + offset;
+ set = 32 - bit;
+ p++;
+ }
+ /*
+ * No zero yet, search remaining full bytes for a zero
+ */
+ res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
+ return (offset + set + res);
+}
+
+/*
+ * ffz = Find First Zero in word. Undefined if no zero exists,
+ * so code should check against ~0UL first..
+ */
+extern __inline__ unsigned long ffz(unsigned long word)
+{
+ __asm__("bsfl %1,%0"
+ :"=r" (word)
+ :"r" (~word));
+ return word;
+}
+
+#endif /* _I386_BITOPS_H */
diff --git a/i386/i386at/gpl/linux/include/asm/byteorder.h b/i386/i386at/gpl/linux/include/asm/byteorder.h
new file mode 100644
index 00000000..3f40767f
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/byteorder.h
@@ -0,0 +1,90 @@
+#ifndef _I386_BYTEORDER_H
+#define _I386_BYTEORDER_H
+
+#undef ntohl
+#undef ntohs
+#undef htonl
+#undef htons
+
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN 1234
+#endif
+
+#ifndef __LITTLE_ENDIAN_BITFIELD
+#define __LITTLE_ENDIAN_BITFIELD
+#endif
+
+/* For avoiding bswap on i386 */
+#ifdef __KERNEL__
+#include <linux/config.h>
+#endif
+
+extern unsigned long int ntohl(unsigned long int);
+extern unsigned short int ntohs(unsigned short int);
+extern unsigned long int htonl(unsigned long int);
+extern unsigned short int htons(unsigned short int);
+
+extern __inline__ unsigned long int __ntohl(unsigned long int);
+extern __inline__ unsigned short int __ntohs(unsigned short int);
+extern __inline__ unsigned long int __constant_ntohl(unsigned long int);
+extern __inline__ unsigned short int __constant_ntohs(unsigned short int);
+
+extern __inline__ unsigned long int
+__ntohl(unsigned long int x)
+{
+#if defined(__KERNEL__) && !defined(CONFIG_M386)
+ __asm__("bswap %0" : "=r" (x) : "0" (x));
+#else
+ __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */
+ "rorl $16,%0\n\t" /* swap words */
+ "xchgb %b0,%h0" /* swap higher bytes */
+ :"=q" (x)
+ : "0" (x));
+#endif
+ return x;
+}
+
+#define __constant_ntohl(x) \
+ ((unsigned long int)((((unsigned long int)(x) & 0x000000ffU) << 24) | \
+ (((unsigned long int)(x) & 0x0000ff00U) << 8) | \
+ (((unsigned long int)(x) & 0x00ff0000U) >> 8) | \
+ (((unsigned long int)(x) & 0xff000000U) >> 24)))
+
+extern __inline__ unsigned short int
+__ntohs(unsigned short int x)
+{
+ __asm__("xchgb %b0,%h0" /* swap bytes */
+ : "=q" (x)
+ : "0" (x));
+ return x;
+}
+
+#define __constant_ntohs(x) \
+ ((unsigned short int)((((unsigned short int)(x) & 0x00ff) << 8) | \
+ (((unsigned short int)(x) & 0xff00) >> 8))) \
+
+#define __htonl(x) __ntohl(x)
+#define __htons(x) __ntohs(x)
+#define __constant_htonl(x) __constant_ntohl(x)
+#define __constant_htons(x) __constant_ntohs(x)
+
+#ifdef __OPTIMIZE__
+# define ntohl(x) \
+(__builtin_constant_p((long)(x)) ? \
+ __constant_ntohl((x)) : \
+ __ntohl((x)))
+# define ntohs(x) \
+(__builtin_constant_p((short)(x)) ? \
+ __constant_ntohs((x)) : \
+ __ntohs((x)))
+# define htonl(x) \
+(__builtin_constant_p((long)(x)) ? \
+ __constant_htonl((x)) : \
+ __htonl((x)))
+# define htons(x) \
+(__builtin_constant_p((short)(x)) ? \
+ __constant_htons((x)) : \
+ __htons((x)))
+#endif
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/asm/delay.h b/i386/i386at/gpl/linux/include/asm/delay.h
new file mode 100644
index 00000000..e22e8d6b
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/delay.h
@@ -0,0 +1,59 @@
+#ifndef _I386_DELAY_H
+#define _I386_DELAY_H
+
+/*
+ * Copyright (C) 1993 Linus Torvalds
+ *
+ * Delay routines, using a pre-computed "loops_per_second" value.
+ */
+
+#ifdef __SMP__
+#include <asm/smp.h>
+#endif
+
+extern __inline__ void __delay(int loops)
+{
+ __asm__ __volatile__(
+ ".align 2,0x90\n1:\tdecl %0\n\tjns 1b"
+ :/* no outputs */
+ :"a" (loops)
+ :"ax");
+}
+
+/*
+ * division by multiplication: you don't have to worry about
+ * loss of precision.
+ *
+ * Use only for very small delays ( < 1 msec). Should probably use a
+ * lookup table, really, as the multiplications take much too long with
+ * short delays. This is a "reasonable" implementation, though (and the
+ * first constant multiplications gets optimized away if the delay is
+ * a constant)
+ */
+extern __inline__ void udelay(unsigned long usecs)
+{
+ usecs *= 0x000010c6; /* 2**32 / 1000000 */
+ __asm__("mull %0"
+ :"=d" (usecs)
+#ifdef __SMP__
+ :"a" (usecs),"0" (cpu_data[smp_processor_id()].udelay_val)
+#else
+ :"a" (usecs),"0" (loops_per_sec)
+#endif
+ :"ax");
+
+ __delay(usecs);
+}
+
+extern __inline__ unsigned long muldiv(unsigned long a, unsigned long b, unsigned long c)
+{
+ __asm__("mull %1 ; divl %2"
+ :"=a" (a)
+ :"d" (b),
+ "r" (c),
+ "0" (a)
+ :"dx");
+ return a;
+}
+
+#endif /* defined(_I386_DELAY_H) */
diff --git a/i386/i386at/gpl/linux/include/asm/dma.h b/i386/i386at/gpl/linux/include/asm/dma.h
new file mode 100644
index 00000000..b739ed7d
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/dma.h
@@ -0,0 +1,271 @@
+/* $Id: dma.h,v 1.1.1.1 1997/02/25 21:27:24 thomas Exp $
+ * linux/include/asm/dma.h: Defines for using and allocating dma channels.
+ * Written by Hennus Bergman, 1992.
+ * High DMA channel support & info by Hannu Savolainen
+ * and John Boyd, Nov. 1992.
+ */
+
+#ifndef _ASM_DMA_H
+#define _ASM_DMA_H
+
+#include <asm/io.h> /* need byte IO */
+
+
+#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
+#define dma_outb outb_p
+#else
+#define dma_outb outb
+#endif
+
+#define dma_inb inb
+
+/*
+ * NOTES about DMA transfers:
+ *
+ * controller 1: channels 0-3, byte operations, ports 00-1F
+ * controller 2: channels 4-7, word operations, ports C0-DF
+ *
+ * - ALL registers are 8 bits only, regardless of transfer size
+ * - channel 4 is not used - cascades 1 into 2.
+ * - channels 0-3 are byte - addresses/counts are for physical bytes
+ * - channels 5-7 are word - addresses/counts are for physical words
+ * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
+ * - transfer count loaded to registers is 1 less than actual count
+ * - controller 2 offsets are all even (2x offsets for controller 1)
+ * - page registers for 5-7 don't use data bit 0, represent 128K pages
+ * - page registers for 0-3 use bit 0, represent 64K pages
+ *
+ * DMA transfers are limited to the lower 16MB of _physical_ memory.
+ * Note that addresses loaded into registers must be _physical_ addresses,
+ * not logical addresses (which may differ if paging is active).
+ *
+ * Address mapping for channels 0-3:
+ *
+ * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * P7 ... P0 A7 ... A0 A7 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Address mapping for channels 5-7:
+ *
+ * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
+ * | ... | \ \ ... \ \ \ ... \ \
+ * | ... | \ \ ... \ \ \ ... \ (not used)
+ * | ... | \ \ ... \ \ \ ... \
+ * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
+ * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
+ * the hardware level, so odd-byte transfers aren't possible).
+ *
+ * Transfer count (_not # bytes_) is limited to 64K, represented as actual
+ * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
+ * and up to 128K bytes may be transferred on channels 5-7 in one operation.
+ *
+ */
+
+#define MAX_DMA_CHANNELS 8
+
+/* The maximum address that we can perform a DMA transfer to on this platform */
+#define MAX_DMA_ADDRESS 0x1000000
+
+/* 8237 DMA controllers */
+#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
+#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
+
+/* DMA controller registers */
+#define DMA1_CMD_REG 0x08 /* command register (w) */
+#define DMA1_STAT_REG 0x08 /* status register (r) */
+#define DMA1_REQ_REG 0x09 /* request register (w) */
+#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
+#define DMA1_MODE_REG 0x0B /* mode register (w) */
+#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
+#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
+#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
+#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
+#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
+
+#define DMA2_CMD_REG 0xD0 /* command register (w) */
+#define DMA2_STAT_REG 0xD0 /* status register (r) */
+#define DMA2_REQ_REG 0xD2 /* request register (w) */
+#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
+#define DMA2_MODE_REG 0xD6 /* mode register (w) */
+#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
+#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
+#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
+#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
+#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
+
+#define DMA_ADDR_0 0x00 /* DMA address registers */
+#define DMA_ADDR_1 0x02
+#define DMA_ADDR_2 0x04
+#define DMA_ADDR_3 0x06
+#define DMA_ADDR_4 0xC0
+#define DMA_ADDR_5 0xC4
+#define DMA_ADDR_6 0xC8
+#define DMA_ADDR_7 0xCC
+
+#define DMA_CNT_0 0x01 /* DMA count registers */
+#define DMA_CNT_1 0x03
+#define DMA_CNT_2 0x05
+#define DMA_CNT_3 0x07
+#define DMA_CNT_4 0xC2
+#define DMA_CNT_5 0xC6
+#define DMA_CNT_6 0xCA
+#define DMA_CNT_7 0xCE
+
+#define DMA_PAGE_0 0x87 /* DMA page registers */
+#define DMA_PAGE_1 0x83
+#define DMA_PAGE_2 0x81
+#define DMA_PAGE_3 0x82
+#define DMA_PAGE_5 0x8B
+#define DMA_PAGE_6 0x89
+#define DMA_PAGE_7 0x8A
+
+#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
+#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
+#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
+
+/* enable/disable a specific DMA channel */
+static __inline__ void enable_dma(unsigned int dmanr)
+{
+ if (dmanr<=3)
+ dma_outb(dmanr, DMA1_MASK_REG);
+ else
+ dma_outb(dmanr & 3, DMA2_MASK_REG);
+}
+
+static __inline__ void disable_dma(unsigned int dmanr)
+{
+ if (dmanr<=3)
+ dma_outb(dmanr | 4, DMA1_MASK_REG);
+ else
+ dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
+}
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while interrupts are disabled! ---
+ */
+static __inline__ void clear_dma_ff(unsigned int dmanr)
+{
+ if (dmanr<=3)
+ dma_outb(0, DMA1_CLEAR_FF_REG);
+ else
+ dma_outb(0, DMA2_CLEAR_FF_REG);
+}
+
+/* set mode (above) for a specific DMA channel */
+static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+{
+ if (dmanr<=3)
+ dma_outb(mode | dmanr, DMA1_MODE_REG);
+ else
+ dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
+}
+
+/* Set only the page register bits of the transfer address.
+ * This is used for successive transfers when we know the contents of
+ * the lower 16 bits of the DMA current address register, but a 64k boundary
+ * may have been crossed.
+ */
+static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
+{
+ switch(dmanr) {
+ case 0:
+ dma_outb(pagenr, DMA_PAGE_0);
+ break;
+ case 1:
+ dma_outb(pagenr, DMA_PAGE_1);
+ break;
+ case 2:
+ dma_outb(pagenr, DMA_PAGE_2);
+ break;
+ case 3:
+ dma_outb(pagenr, DMA_PAGE_3);
+ break;
+ case 5:
+ dma_outb(pagenr & 0xfe, DMA_PAGE_5);
+ break;
+ case 6:
+ dma_outb(pagenr & 0xfe, DMA_PAGE_6);
+ break;
+ case 7:
+ dma_outb(pagenr & 0xfe, DMA_PAGE_7);
+ break;
+ }
+}
+
+
+/* Set transfer address & page bits for specific DMA channel.
+ * Assumes dma flipflop is clear.
+ */
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
+{
+ set_dma_page(dmanr, a>>16);
+ if (dmanr <= 3) {
+ dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+ dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+ } else {
+ dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+ dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+ }
+}
+
+
+/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
+ * a specific DMA channel.
+ * You must ensure the parameters are valid.
+ * NOTE: from a manual: "the number of transfers is one more
+ * than the initial word count"! This is taken into account.
+ * Assumes dma flip-flop is clear.
+ * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
+ */
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+ count--;
+ if (dmanr <= 3) {
+ dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+ dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+ } else {
+ dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+ dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+ }
+}
+
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ *
+ * Assumes DMA flip-flop is clear.
+ */
+static __inline__ int get_dma_residue(unsigned int dmanr)
+{
+ unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
+ : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
+
+ /* using short to get 16-bit wrap around */
+ unsigned short count;
+
+ count = 1 + dma_inb(io_port);
+ count += dma_inb(io_port) << 8;
+
+ return (dmanr<=3)? count : (count<<1);
+}
+
+
+/* These are in kernel/dma.c: */
+extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
+extern void free_dma(unsigned int dmanr); /* release it again */
+
+
+#endif /* _ASM_DMA_H */
diff --git a/i386/i386at/gpl/linux/include/asm/errno.h b/i386/i386at/gpl/linux/include/asm/errno.h
new file mode 100644
index 00000000..f5c37cad
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/errno.h
@@ -0,0 +1,252 @@
+#ifndef _I386_ERRNO_H
+#define _I386_ERRNO_H
+
+#ifdef MACH_INCLUDE
+#define LINUX_EPERM 1 /* Operation not permitted */
+#define LINUX_ENOENT 2 /* No such file or directory */
+#define LINUX_ESRCH 3 /* No such process */
+#define LINUX_EINTR 4 /* Interrupted system call */
+#define LINUX_EIO 5 /* I/O error */
+#define LINUX_ENXIO 6 /* No such device or address */
+#define LINUX_E2BIG 7 /* Arg list too long */
+#define LINUX_ENOEXEC 8 /* Exec format error */
+#define LINUX_EBADF 9 /* Bad file number */
+#define LINUX_ECHILD 10 /* No child processes */
+#define LINUX_EAGAIN 11 /* Try again */
+#define LINUX_ENOMEM 12 /* Out of memory */
+#define LINUX_EACCES 13 /* Permission denied */
+#define LINUX_EFAULT 14 /* Bad address */
+#define LINUX_ENOTBLK 15 /* Block device required */
+#define LINUX_EBUSY 16 /* Device or resource busy */
+#define LINUX_EEXIST 17 /* File exists */
+#define LINUX_EXDEV 18 /* Cross-device link */
+#define LINUX_ENODEV 19 /* No such device */
+#define LINUX_ENOTDIR 20 /* Not a directory */
+#define LINUX_EISDIR 21 /* Is a directory */
+#define LINUX_EINVAL 22 /* Invalid argument */
+#define LINUX_ENFILE 23 /* File table overflow */
+#define LINUX_EMFILE 24 /* Too many open files */
+#define LINUX_ENOTTY 25 /* Not a typewriter */
+#define LINUX_ETXTBSY 26 /* Text file busy */
+#define LINUX_EFBIG 27 /* File too large */
+#define LINUX_ENOSPC 28 /* No space left on device */
+#define LINUX_ESPIPE 29 /* Illegal seek */
+#define LINUX_EROFS 30 /* Read-only file system */
+#define LINUX_EMLINK 31 /* Too many links */
+#define LINUX_EPIPE 32 /* Broken pipe */
+#define LINUX_EDOM 33 /* Math argument out of domain of func */
+#define LINUX_ERANGE 34 /* Math result not representable */
+#define LINUX_EDEADLK 35 /* Resource deadlock would occur */
+#define LINUX_ENAMETOOLONG 36 /* File name too long */
+#define LINUX_ENOLCK 37 /* No record locks available */
+#define LINUX_ENOSYS 38 /* Function not implemented */
+#define LINUX_ENOTEMPTY 39 /* Directory not empty */
+#define LINUX_ELOOP 40 /* Too many symbolic links encountered */
+#define LINUX_EWOULDBLOCK LINUX_EAGAIN /* Operation would block */
+#define LINUX_ENOMSG 42 /* No message of desired type */
+#define LINUX_EIDRM 43 /* Identifier removed */
+#define LINUX_ECHRNG 44 /* Channel number out of range */
+#define LINUX_EL2NSYNC 45 /* Level 2 not synchronized */
+#define LINUX_EL3HLT 46 /* Level 3 halted */
+#define LINUX_EL3RST 47 /* Level 3 reset */
+#define LINUX_ELNRNG 48 /* Link number out of range */
+#define LINUX_EUNATCH 49 /* Protocol driver not attached */
+#define LINUX_ENOCSI 50 /* No CSI structure available */
+#define LINUX_EL2HLT 51 /* Level 2 halted */
+#define LINUX_EBADE 52 /* Invalid exchange */
+#define LINUX_EBADR 53 /* Invalid request descriptor */
+#define LINUX_EXFULL 54 /* Exchange full */
+#define LINUX_ENOANO 55 /* No anode */
+#define LINUX_EBADRQC 56 /* Invalid request code */
+#define LINUX_EBADSLT 57 /* Invalid slot */
+#define LINUX_EDEADLOCK 58 /* File locking deadlock error */
+#define LINUX_EBFONT 59 /* Bad font file format */
+#define LINUX_ENOSTR 60 /* Device not a stream */
+#define LINUX_ENODATA 61 /* No data available */
+#define LINUX_ETIME 62 /* Timer expired */
+#define LINUX_ENOSR 63 /* Out of streams resources */
+#define LINUX_ENONET 64 /* Machine is not on the network */
+#define LINUX_ENOPKG 65 /* Package not installed */
+#define LINUX_EREMOTE 66 /* Object is remote */
+#define LINUX_ENOLINK 67 /* Link has been severed */
+#define LINUX_EADV 68 /* Advertise error */
+#define LINUX_ESRMNT 69 /* Srmount error */
+#define LINUX_ECOMM 70 /* Communication error on send */
+#define LINUX_EPROTO 71 /* Protocol error */
+#define LINUX_EMULTIHOP 72 /* Multihop attempted */
+#define LINUX_EDOTDOT 73 /* RFS specific error */
+#define LINUX_EBADMSG 74 /* Not a data message */
+#define LINUX_EOVERFLOW 75 /* Value too large for defined data type */
+#define LINUX_ENOTUNIQ 76 /* Name not unique on network */
+#define LINUX_EBADFD 77 /* File descriptor in bad state */
+#define LINUX_EREMCHG 78 /* Remote address changed */
+#define LINUX_ELIBACC 79 /* Can not access a needed shared library */
+#define LINUX_ELIBBAD 80 /* Accessing a corrupted shared library */
+#define LINUX_ELIBSCN 81 /* .lib section in a.out corrupted */
+#define LINUX_ELIBMAX 82 /* Attempting to link in too many shared libraries */
+#define LINUX_ELIBEXEC 83 /* Cannot exec a shared library directly */
+#define LINUX_EILSEQ 84 /* Illegal byte sequence */
+#define LINUX_ERESTART 85 /* Interrupted system call should be restarted */
+#define LINUX_ESTRPIPE 86 /* Streams pipe error */
+#define LINUX_EUSERS 87 /* Too many users */
+#define LINUX_ENOTSOCK 88 /* Socket operation on non-socket */
+#define LINUX_EDESTADDRREQ 89 /* Destination address required */
+#define LINUX_EMSGSIZE 90 /* Message too long */
+#define LINUX_EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define LINUX_ENOPROTOOPT 92 /* Protocol not available */
+#define LINUX_EPROTONOSUPPORT 93 /* Protocol not supported */
+#define LINUX_ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define LINUX_EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define LINUX_EPFNOSUPPORT 96 /* Protocol family not supported */
+#define LINUX_EAFNOSUPPORT 97 /* Address family not supported by protocol */
+#define LINUX_EADDRINUSE 98 /* Address already in use */
+#define LINUX_EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define LINUX_ENETDOWN 100 /* Network is down */
+#define LINUX_ENETUNREACH 101 /* Network is unreachable */
+#define LINUX_ENETRESET 102 /* Network dropped connection because of reset */
+#define LINUX_ECONNABORTED 103 /* Software caused connection abort */
+#define LINUX_ECONNRESET 104 /* Connection reset by peer */
+#define LINUX_ENOBUFS 105 /* No buffer space available */
+#define LINUX_EISCONN 106 /* Transport endpoint is already connected */
+#define LINUX_ENOTCONN 107 /* Transport endpoint is not connected */
+#define LINUX_ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+#define LINUX_ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define LINUX_ETIMEDOUT 110 /* Connection timed out */
+#define LINUX_ECONNREFUSED 111 /* Connection refused */
+#define LINUX_EHOSTDOWN 112 /* Host is down */
+#define LINUX_EHOSTUNREACH 113 /* No route to host */
+#define LINUX_EALREADY 114 /* Operation already in progress */
+#define LINUX_EINPROGRESS 115 /* Operation now in progress */
+#define LINUX_ESTALE 116 /* Stale NFS file handle */
+#define LINUX_EUCLEAN 117 /* Structure needs cleaning */
+#define LINUX_ENOTNAM 118 /* Not a XENIX named type file */
+#define LINUX_ENAVAIL 119 /* No XENIX semaphores available */
+#define LINUX_EISNAM 120 /* Is a named type file */
+#define LINUX_EREMOTEIO 121 /* Remote I/O error */
+#define LINUX_EDQUOT 122 /* Quota exceeded */
+#else /* ! MACH_INCLUDE */
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* I/O error */
+#define ENXIO 6 /* No such device or address */
+#define E2BIG 7 /* Arg list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file number */
+#define ECHILD 10 /* No child processes */
+#define EAGAIN 11 /* Try again */
+#define ENOMEM 12 /* Out of memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#define ENOTBLK 15 /* Block device required */
+#define EBUSY 16 /* Device or resource busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* No such device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* File table overflow */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Not a typewriter */
+#define ETXTBSY 26 /* Text file busy */
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only file system */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+#define EDOM 33 /* Math argument out of domain of func */
+#define ERANGE 34 /* Math result not representable */
+#define EDEADLK 35 /* Resource deadlock would occur */
+#define ENAMETOOLONG 36 /* File name too long */
+#define ENOLCK 37 /* No record locks available */
+#define ENOSYS 38 /* Function not implemented */
+#define ENOTEMPTY 39 /* Directory not empty */
+#define ELOOP 40 /* Too many symbolic links encountered */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define ENOMSG 42 /* No message of desired type */
+#define EIDRM 43 /* Identifier removed */
+#define ECHRNG 44 /* Channel number out of range */
+#define EL2NSYNC 45 /* Level 2 not synchronized */
+#define EL3HLT 46 /* Level 3 halted */
+#define EL3RST 47 /* Level 3 reset */
+#define ELNRNG 48 /* Link number out of range */
+#define EUNATCH 49 /* Protocol driver not attached */
+#define ENOCSI 50 /* No CSI structure available */
+#define EL2HLT 51 /* Level 2 halted */
+#define EBADE 52 /* Invalid exchange */
+#define EBADR 53 /* Invalid request descriptor */
+#define EXFULL 54 /* Exchange full */
+#define ENOANO 55 /* No anode */
+#define EBADRQC 56 /* Invalid request code */
+#define EBADSLT 57 /* Invalid slot */
+#define EDEADLOCK 58 /* File locking deadlock error */
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EMULTIHOP 72 /* Multihop attempted */
+#define EDOTDOT 73 /* RFS specific error */
+#define EBADMSG 74 /* Not a data message */
+#define EOVERFLOW 75 /* Value too large for defined data type */
+#define ENOTUNIQ 76 /* Name not unique on network */
+#define EBADFD 77 /* File descriptor in bad state */
+#define EREMCHG 78 /* Remote address changed */
+#define ELIBACC 79 /* Can not access a needed shared library */
+#define ELIBBAD 80 /* Accessing a corrupted shared library */
+#define ELIBSCN 81 /* .lib section in a.out corrupted */
+#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 83 /* Cannot exec a shared library directly */
+#define EILSEQ 84 /* Illegal byte sequence */
+#define ERESTART 85 /* Interrupted system call should be restarted */
+#define ESTRPIPE 86 /* Streams pipe error */
+#define EUSERS 87 /* Too many users */
+#define ENOTSOCK 88 /* Socket operation on non-socket */
+#define EDESTADDRREQ 89 /* Destination address required */
+#define EMSGSIZE 90 /* Message too long */
+#define EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 92 /* Protocol not available */
+#define EPROTONOSUPPORT 93 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 96 /* Protocol family not supported */
+#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
+#define EADDRINUSE 98 /* Address already in use */
+#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define ENETDOWN 100 /* Network is down */
+#define ENETUNREACH 101 /* Network is unreachable */
+#define ENETRESET 102 /* Network dropped connection because of reset */
+#define ECONNABORTED 103 /* Software caused connection abort */
+#define ECONNRESET 104 /* Connection reset by peer */
+#define ENOBUFS 105 /* No buffer space available */
+#define EISCONN 106 /* Transport endpoint is already connected */
+#define ENOTCONN 107 /* Transport endpoint is not connected */
+#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define ETIMEDOUT 110 /* Connection timed out */
+#define ECONNREFUSED 111 /* Connection refused */
+#define EHOSTDOWN 112 /* Host is down */
+#define EHOSTUNREACH 113 /* No route to host */
+#define EALREADY 114 /* Operation already in progress */
+#define EINPROGRESS 115 /* Operation now in progress */
+#define ESTALE 116 /* Stale NFS file handle */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EDQUOT 122 /* Quota exceeded */
+#endif /* ! MACH_INCLUDE */
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/asm/fcntl.h b/i386/i386at/gpl/linux/include/asm/fcntl.h
new file mode 100644
index 00000000..0cb8fcdb
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/fcntl.h
@@ -0,0 +1,64 @@
+#ifndef _I386_FCNTL_H
+#define _I386_FCNTL_H
+
+/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
+ located on an ext2 file system */
+#define O_ACCMODE 0003
+#define O_RDONLY 00
+#define O_WRONLY 01
+#define O_RDWR 02
+#define O_CREAT 0100 /* not fcntl */
+#define O_EXCL 0200 /* not fcntl */
+#define O_NOCTTY 0400 /* not fcntl */
+#define O_TRUNC 01000 /* not fcntl */
+#define O_APPEND 02000
+#define O_NONBLOCK 04000
+#define O_NDELAY O_NONBLOCK
+#define O_SYNC 010000
+#define FASYNC 020000 /* fcntl, for BSD compatibility */
+
+#define F_DUPFD 0 /* dup */
+#define F_GETFD 1 /* get f_flags */
+#define F_SETFD 2 /* set f_flags */
+#define F_GETFL 3 /* more flags (cloexec) */
+#define F_SETFL 4
+#define F_GETLK 5
+#define F_SETLK 6
+#define F_SETLKW 7
+
+#define F_SETOWN 8 /* for sockets. */
+#define F_GETOWN 9 /* for sockets. */
+
+/* for F_[GET|SET]FL */
+#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
+
+/* for posix fcntl() and lockf() */
+#define F_RDLCK 0
+#define F_WRLCK 1
+#define F_UNLCK 2
+
+/* for old implementation of bsd flock () */
+#define F_EXLCK 4 /* or 3 */
+#define F_SHLCK 8 /* or 4 */
+
+/* operations for bsd flock(), also used by the kernel implementation */
+#define LOCK_SH 1 /* shared lock */
+#define LOCK_EX 2 /* exclusive lock */
+#define LOCK_NB 4 /* or'd with one of the above to prevent
+ blocking */
+#define LOCK_UN 8 /* remove lock */
+
+#ifdef __KERNEL__
+#define F_POSIX 1
+#define F_FLOCK 2
+#endif /* __KERNEL__ */
+
+struct flock {
+ short l_type;
+ short l_whence;
+ off_t l_start;
+ off_t l_len;
+ pid_t l_pid;
+};
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/asm/floppy.h b/i386/i386at/gpl/linux/include/asm/floppy.h
new file mode 100644
index 00000000..93c58fea
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/floppy.h
@@ -0,0 +1,56 @@
+/*
+ * Architecture specific parts of the Floppy driver
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995
+ */
+#ifndef __ASM_I386_FLOPPY_H
+#define __ASM_I386_FLOPPY_H
+
+#define fd_inb(port) inb_p(port)
+#define fd_outb(port,value) outb_p(port,value)
+
+#define fd_enable_dma() enable_dma(FLOPPY_DMA)
+#define fd_disable_dma() disable_dma(FLOPPY_DMA)
+#define fd_request_dma() request_dma(FLOPPY_DMA,"floppy")
+#define fd_free_dma() free_dma(FLOPPY_DMA)
+#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA)
+#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA,mode)
+#define fd_set_dma_addr(addr) set_dma_addr(FLOPPY_DMA,addr)
+#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count)
+#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
+#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
+#define fd_cacheflush(addr,size) /* nothing */
+#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt, \
+ SA_INTERRUPT|SA_SAMPLE_RANDOM, \
+ "floppy")
+#define fd_free_irq() free_irq(FLOPPY_IRQ);
+
+__inline__ void virtual_dma_init(void)
+{
+ /* Nothing to do on an i386 */
+}
+
+static int FDC1 = 0x3f0;
+static int FDC2 = -1;
+
+#define FLOPPY0_TYPE ((CMOS_READ(0x10) >> 4) & 15)
+#define FLOPPY1_TYPE (CMOS_READ(0x10) & 15)
+
+#define N_FDC 2
+#define N_DRIVE 8
+
+/*
+ * The DMA channel used by the floppy controller cannot access data at
+ * addresses >= 16MB
+ *
+ * Went back to the 1MB limit, as some people had problems with the floppy
+ * driver otherwise. It doesn't matter much for performance anyway, as most
+ * floppy accesses go through the track buffer.
+ */
+#define CROSS_64KB(a,s) ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)
+
+#endif /* __ASM_I386_FLOPPY_H */
diff --git a/i386/i386at/gpl/linux/include/asm/io.h b/i386/i386at/gpl/linux/include/asm/io.h
new file mode 100644
index 00000000..98e32ce6
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/io.h
@@ -0,0 +1,213 @@
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+/*
+ * This file contains the definitions for the x86 IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated
+ * to (a) handle it all in a way that makes gcc able to optimize it
+ * as well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ */
+
+/*
+ * Thanks to James van Artsdalen for a better timing-fix than
+ * the two short jumps: using outb's to a nonexistent port seems
+ * to guarantee better timings even on fast machines.
+ *
+ * On the other hand, I'd like to be sure of a non-existent port:
+ * I feel a bit unsafe about using 0x80 (should be safe, though)
+ *
+ * Linus
+ */
+
+#ifdef SLOW_IO_BY_JUMPING
+#define __SLOW_DOWN_IO __asm__ __volatile__("jmp 1f\n1:\tjmp 1f\n1:")
+#else
+#define __SLOW_DOWN_IO __asm__ __volatile__("outb %al,$0x80")
+#endif
+
+#ifdef REALLY_SLOW_IO
+#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
+#else
+#define SLOW_DOWN_IO __SLOW_DOWN_IO
+#endif
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ * These are trivial on the 1:1 Linux/i386 mapping (but if we ever
+ * make the kernel segment mapped at 0, we need to do translation
+ * on the i386 as well)
+ */
+extern inline unsigned long virt_to_phys(volatile void * address)
+{
+ return (unsigned long) address;
+}
+
+extern inline void * phys_to_virt(unsigned long address)
+{
+ return (void *) address;
+}
+
+/*
+ * IO bus memory addresses are also 1:1 with the physical address
+ */
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently. On the x86 architecture, we just read/write the
+ * memory location directly.
+ */
+#define readb(addr) (*(volatile unsigned char *) (addr))
+#define readw(addr) (*(volatile unsigned short *) (addr))
+#define readl(addr) (*(volatile unsigned int *) (addr))
+
+#define writeb(b,addr) ((*(volatile unsigned char *) (addr)) = (b))
+#define writew(b,addr) ((*(volatile unsigned short *) (addr)) = (b))
+#define writel(b,addr) ((*(volatile unsigned int *) (addr)) = (b))
+
+#define memset_io(a,b,c) memset((void *)(a),(b),(c))
+#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
+#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
+
+/*
+ * Again, i386 does not require mem IO specific function.
+ */
+
+#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d))
+
+/*
+ * Talk about misusing macros..
+ */
+
+#define __OUT1(s,x) \
+extern inline void __out##s(unsigned x value, unsigned short port) {
+
+#define __OUT2(s,s1,s2) \
+__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
+
+#define __OUT(s,s1,x) \
+__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); } \
+__OUT1(s##c,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); } \
+__OUT1(s##_p,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); SLOW_DOWN_IO; } \
+__OUT1(s##c_p,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); SLOW_DOWN_IO; }
+
+#define __IN1(s) \
+extern inline RETURN_TYPE __in##s(unsigned short port) { RETURN_TYPE _v;
+
+#define __IN2(s,s1,s2) \
+__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
+
+#define __IN(s,s1,i...) \
+__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); return _v; } \
+__IN1(s##c) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); return _v; } \
+__IN1(s##_p) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); SLOW_DOWN_IO; return _v; } \
+__IN1(s##c_p) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); SLOW_DOWN_IO; return _v; }
+
+#define __INS(s) \
+extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \
+{ __asm__ __volatile__ ("cld ; rep ; ins" #s \
+: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define __OUTS(s) \
+extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
+{ __asm__ __volatile__ ("cld ; rep ; outs" #s \
+: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define RETURN_TYPE unsigned char
+/* __IN(b,"b","0" (0)) */
+__IN(b,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned short
+/* __IN(w,"w","0" (0)) */
+__IN(w,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned int
+__IN(l,"")
+#undef RETURN_TYPE
+
+__OUT(b,"b",char)
+__OUT(w,"w",short)
+__OUT(l,,int)
+
+__INS(b)
+__INS(w)
+__INS(l)
+
+__OUTS(b)
+__OUTS(w)
+__OUTS(l)
+
+/*
+ * Note that due to the way __builtin_constant_p() works, you
+ * - can't use it inside a inline function (it will never be true)
+ * - you don't have to worry about side effects within the __builtin..
+ */
+#define outb(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outbc((val),(port)) : \
+ __outb((val),(port)))
+
+#define inb(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inbc(port) : \
+ __inb(port))
+
+#define outb_p(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outbc_p((val),(port)) : \
+ __outb_p((val),(port)))
+
+#define inb_p(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inbc_p(port) : \
+ __inb_p(port))
+
+#define outw(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outwc((val),(port)) : \
+ __outw((val),(port)))
+
+#define inw(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inwc(port) : \
+ __inw(port))
+
+#define outw_p(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outwc_p((val),(port)) : \
+ __outw_p((val),(port)))
+
+#define inw_p(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inwc_p(port) : \
+ __inw_p(port))
+
+#define outl(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outlc((val),(port)) : \
+ __outl((val),(port)))
+
+#define inl(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inlc(port) : \
+ __inl(port))
+
+#define outl_p(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outlc_p((val),(port)) : \
+ __outl_p((val),(port)))
+
+#define inl_p(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inlc_p(port) : \
+ __inl_p(port))
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/asm/ioctl.h b/i386/i386at/gpl/linux/include/asm/ioctl.h
new file mode 100644
index 00000000..cc94e3b9
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/ioctl.h
@@ -0,0 +1,75 @@
+/* $Id: ioctl.h,v 1.1.1.1 1997/02/25 21:27:24 thomas Exp $
+ *
+ * linux/ioctl.h for Linux by H.H. Bergman.
+ */
+
+#ifndef _ASMI386_IOCTL_H
+#define _ASMI386_IOCTL_H
+
+/* ioctl command encoding: 32 bits total, command in lower 16 bits,
+ * size of the parameter structure in the lower 14 bits of the
+ * upper 16 bits.
+ * Encoding the size of the parameter structure in the ioctl request
+ * is useful for catching programs compiled with old versions
+ * and to avoid overwriting user space outside the user buffer area.
+ * The highest 2 bits are reserved for indicating the ``access mode''.
+ * NOTE: This limits the max parameter size to 16kB -1 !
+ */
+
+/*
+ * The following is for compatibility across the various Linux
+ * platforms. The i386 ioctl numbering scheme doesn't really enforce
+ * a type field. De facto, however, the top 8 bits of the lower 16
+ * bits are indeed used as a type field, so we might just as well make
+ * this explicit here. Please be sure to use the decoding macros
+ * below from now on.
+ */
+#define _IOC_NRBITS 8
+#define _IOC_TYPEBITS 8
+#define _IOC_SIZEBITS 14
+#define _IOC_DIRBITS 2
+
+#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT 0
+#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+/*
+ * Direction bits.
+ */
+#define _IOC_NONE 0U
+#define _IOC_WRITE 1U
+#define _IOC_READ 2U
+
+#define _IOC(dir,type,nr,size) \
+ (((dir) << _IOC_DIRSHIFT) | \
+ ((type) << _IOC_TYPESHIFT) | \
+ ((nr) << _IOC_NRSHIFT) | \
+ ((size) << _IOC_SIZESHIFT))
+
+/* used to create numbers */
+#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
+#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+
+/* used to decode ioctl numbers.. */
+#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+/* ...and for the drivers/sound files... */
+
+#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
+
+#endif /* _ASMI386_IOCTL_H */
diff --git a/i386/i386at/gpl/linux/include/asm/irq.h b/i386/i386at/gpl/linux/include/asm/irq.h
new file mode 100644
index 00000000..8fd44b48
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/irq.h
@@ -0,0 +1,346 @@
+#ifndef _ASM_IRQ_H
+#define _ASM_IRQ_H
+
+/*
+ * linux/include/asm/irq.h
+ *
+ * (C) 1992, 1993 Linus Torvalds
+ *
+ * IRQ/IPI changes taken from work by Thomas Radke <tomsoft@informatik.tu-chemnitz.de>
+ */
+
+#include <linux/linkage.h>
+#include <asm/segment.h>
+
+#define NR_IRQS 16
+
+extern void disable_irq(unsigned int);
+extern void enable_irq(unsigned int);
+
+#define __STR(x) #x
+#define STR(x) __STR(x)
+
+#define SAVE_ALL \
+ "cld\n\t" \
+ "push %gs\n\t" \
+ "push %fs\n\t" \
+ "push %es\n\t" \
+ "push %ds\n\t" \
+ "pushl %eax\n\t" \
+ "pushl %ebp\n\t" \
+ "pushl %edi\n\t" \
+ "pushl %esi\n\t" \
+ "pushl %edx\n\t" \
+ "pushl %ecx\n\t" \
+ "pushl %ebx\n\t" \
+ "movl $" STR(KERNEL_DS) ",%edx\n\t" \
+ "mov %dx,%ds\n\t" \
+ "mov %dx,%es\n\t" \
+ "movl $" STR(USER_DS) ",%edx\n\t" \
+ "mov %dx,%fs\n\t" \
+ "movl $0,%edx\n\t" \
+ "movl %edx,%db7\n\t"
+
+/*
+ * SAVE_MOST/RESTORE_MOST is used for the faster version of IRQ handlers,
+ * installed by using the SA_INTERRUPT flag. These kinds of IRQ's don't
+ * call the routines that do signal handling etc on return, and can have
+ * more relaxed register-saving etc. They are also atomic, and are thus
+ * suited for small, fast interrupts like the serial lines or the harddisk
+ * drivers, which don't actually need signal handling etc.
+ *
+ * Also note that we actually save only those registers that are used in
+ * C subroutines (%eax, %edx and %ecx), so if you do something weird,
+ * you're on your own. The only segments that are saved (not counting the
+ * automatic stack and code segment handling) are %ds and %es, and they
+ * point to kernel space. No messing around with %fs here.
+ */
+#define SAVE_MOST \
+ "cld\n\t" \
+ "push %es\n\t" \
+ "push %ds\n\t" \
+ "pushl %eax\n\t" \
+ "pushl %edx\n\t" \
+ "pushl %ecx\n\t" \
+ "movl $" STR(KERNEL_DS) ",%edx\n\t" \
+ "mov %dx,%ds\n\t" \
+ "mov %dx,%es\n\t"
+
+#define RESTORE_MOST \
+ "popl %ecx\n\t" \
+ "popl %edx\n\t" \
+ "popl %eax\n\t" \
+ "pop %ds\n\t" \
+ "pop %es\n\t" \
+ "iret"
+
+/*
+ * The "inb" instructions are not needed, but seem to change the timings
+ * a bit - without them it seems that the harddisk driver won't work on
+ * all hardware. Arghh.
+ */
+#define ACK_FIRST(mask) \
+ "inb $0x21,%al\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\torb $" #mask ","SYMBOL_NAME_STR(cache_21)"\n\t" \
+ "movb "SYMBOL_NAME_STR(cache_21)",%al\n\t" \
+ "outb %al,$0x21\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\tmovb $0x20,%al\n\t" \
+ "outb %al,$0x20\n\t"
+
+#define ACK_SECOND(mask) \
+ "inb $0xA1,%al\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\torb $" #mask ","SYMBOL_NAME_STR(cache_A1)"\n\t" \
+ "movb "SYMBOL_NAME_STR(cache_A1)",%al\n\t" \
+ "outb %al,$0xA1\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\tmovb $0x20,%al\n\t" \
+ "outb %al,$0xA0\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\toutb %al,$0x20\n\t"
+
+#define UNBLK_FIRST(mask) \
+ "inb $0x21,%al\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\tandb $~(" #mask "),"SYMBOL_NAME_STR(cache_21)"\n\t" \
+ "movb "SYMBOL_NAME_STR(cache_21)",%al\n\t" \
+ "outb %al,$0x21\n\t"
+
+#define UNBLK_SECOND(mask) \
+ "inb $0xA1,%al\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\tandb $~(" #mask "),"SYMBOL_NAME_STR(cache_A1)"\n\t" \
+ "movb "SYMBOL_NAME_STR(cache_A1)",%al\n\t" \
+ "outb %al,$0xA1\n\t"
+
+#define IRQ_NAME2(nr) nr##_interrupt(void)
+#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
+#define FAST_IRQ_NAME(nr) IRQ_NAME2(fast_IRQ##nr)
+#define BAD_IRQ_NAME(nr) IRQ_NAME2(bad_IRQ##nr)
+
+#ifdef __SMP__
+
+#ifndef __SMP_PROF__
+#define SMP_PROF_INT_SPINS
+#define SMP_PROF_IPI_CNT
+#else
+#define SMP_PROF_INT_SPINS "incl "SYMBOL_NAME_STR(smp_spins)"(,%eax,4)\n\t"
+#define SMP_PROF_IPI_CNT "incl "SYMBOL_NAME_STR(ipi_count)"\n\t"
+#endif
+
+#define GET_PROCESSOR_ID \
+ "movl "SYMBOL_NAME_STR(apic_reg)", %edx\n\t" \
+ "movl 32(%edx), %eax\n\t" \
+ "shrl $24,%eax\n\t" \
+ "andb $0x0F,%al\n"
+
+#define ENTER_KERNEL \
+ "pushl %eax\n\t" \
+ "pushl %edx\n\t" \
+ "pushfl\n\t" \
+ "cli\n\t" \
+ GET_PROCESSOR_ID \
+ "1: " \
+ "lock\n\t" \
+ "btsl $0, "SYMBOL_NAME_STR(kernel_flag)"\n\t" \
+ "jnc 3f\n\t" \
+ "cmpb "SYMBOL_NAME_STR(active_kernel_processor)", %al\n\t" \
+ "je 4f\n\t" \
+ "2: " \
+ SMP_PROF_INT_SPINS \
+ "btl %al, "SYMBOL_NAME_STR(smp_invalidate_needed)"\n\t" \
+ "jnc 5f\n\t" \
+ "lock\n\t" \
+ "btrl %al, "SYMBOL_NAME_STR(smp_invalidate_needed)"\n\t" \
+ "jnc 5f\n\t" \
+ "movl %cr3,%edx\n\t" \
+ "movl %edx,%cr3\n" \
+ "5: btl $0, "SYMBOL_NAME_STR(kernel_flag)"\n\t" \
+ "jc 2b\n\t" \
+ "jmp 1b\n\t" \
+ "3: " \
+ "movb %al, "SYMBOL_NAME_STR(active_kernel_processor)"\n\t" \
+ "4: " \
+ "incl "SYMBOL_NAME_STR(kernel_counter)"\n\t" \
+ "popfl\n\t" \
+ "popl %edx\n\t" \
+ "popl %eax\n\t"
+
+#define LEAVE_KERNEL \
+ "pushfl\n\t" \
+ "cli\n\t" \
+ "decl "SYMBOL_NAME_STR(kernel_counter)"\n\t" \
+ "jnz 1f\n\t" \
+ "movb $" STR (NO_PROC_ID) ", "SYMBOL_NAME_STR(active_kernel_processor)"\n\t" \
+ "lock\n\t" \
+ "btrl $0, "SYMBOL_NAME_STR(kernel_flag)"\n\t" \
+ "1: " \
+ "popfl\n\t"
+
+
+/*
+ * the syscall count inc is a gross hack because ret_from_syscall is used by both irq and
+ * syscall return paths (urghh).
+ */
+
+#define BUILD_IRQ(chip,nr,mask) \
+asmlinkage void IRQ_NAME(nr); \
+asmlinkage void FAST_IRQ_NAME(nr); \
+asmlinkage void BAD_IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushl $-"#nr"-2\n\t" \
+ SAVE_ALL \
+ ENTER_KERNEL \
+ ACK_##chip(mask) \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
+ "sti\n\t" \
+ "movl %esp,%ebx\n\t" \
+ "pushl %ebx\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \
+ "addl $8,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "incl "SYMBOL_NAME_STR(syscall_count)"\n\t" \
+ "jmp ret_from_sys_call\n" \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ ENTER_KERNEL \
+ ACK_##chip(mask) \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_fast_IRQ)"\n\t" \
+ "addl $4,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ LEAVE_KERNEL \
+ RESTORE_MOST \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ ENTER_KERNEL \
+ ACK_##chip(mask) \
+ LEAVE_KERNEL \
+ RESTORE_MOST);
+
+
+/*
+ * Message pass must be a fast IRQ..
+ */
+
+#define BUILD_MSGIRQ(chip,nr,mask) \
+asmlinkage void IRQ_NAME(nr); \
+asmlinkage void FAST_IRQ_NAME(nr); \
+asmlinkage void BAD_IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushl $-"#nr"-2\n\t" \
+ SAVE_ALL \
+ ENTER_KERNEL \
+ ACK_##chip(mask) \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
+ "sti\n\t" \
+ "movl %esp,%ebx\n\t" \
+ "pushl %ebx\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \
+ "addl $8,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "incl "SYMBOL_NAME_STR(syscall_count)"\n\t" \
+ "jmp ret_from_sys_call\n" \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ ACK_##chip(mask) \
+ SMP_PROF_IPI_CNT \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_fast_IRQ)"\n\t" \
+ "addl $4,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ RESTORE_MOST \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ ACK_##chip(mask) \
+ RESTORE_MOST);
+
+#define BUILD_RESCHEDIRQ(nr) \
+asmlinkage void IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushl $-"#nr"-2\n\t" \
+ SAVE_ALL \
+ ENTER_KERNEL \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
+ "sti\n\t" \
+ "movl %esp,%ebx\n\t" \
+ "pushl %ebx\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(smp_reschedule_irq)"\n\t" \
+ "addl $8,%esp\n\t" \
+ "cli\n\t" \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "incl "SYMBOL_NAME_STR(syscall_count)"\n\t" \
+ "jmp ret_from_sys_call\n");
+#else
+
+#define BUILD_IRQ(chip,nr,mask) \
+asmlinkage void IRQ_NAME(nr); \
+asmlinkage void FAST_IRQ_NAME(nr); \
+asmlinkage void BAD_IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushl $-"#nr"-2\n\t" \
+ SAVE_ALL \
+ ACK_##chip(mask) \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
+ "sti\n\t" \
+ "movl %esp,%ebx\n\t" \
+ "pushl %ebx\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \
+ "addl $8,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "jmp ret_from_sys_call\n" \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ ACK_##chip(mask) \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_fast_IRQ)"\n\t" \
+ "addl $4,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ RESTORE_MOST \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ ACK_##chip(mask) \
+ RESTORE_MOST);
+
+#endif
+#endif
diff --git a/i386/i386at/gpl/linux/include/asm/page.h b/i386/i386at/gpl/linux/include/asm/page.h
new file mode 100644
index 00000000..2bb6837e
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/page.h
@@ -0,0 +1,64 @@
+#ifndef _I386_PAGE_H
+#define _I386_PAGE_H
+
+#ifndef MACH_INCLUDE
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+#endif
+
+#ifdef __KERNEL__
+
+#define STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+#endif
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/* This handles the memory map.. */
+#define PAGE_OFFSET 0
+#define MAP_NR(addr) (((unsigned long)(addr)) >> PAGE_SHIFT)
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_PAGE_H */
diff --git a/i386/i386at/gpl/linux/include/asm/param.h b/i386/i386at/gpl/linux/include/asm/param.h
new file mode 100644
index 00000000..f821b864
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/param.h
@@ -0,0 +1,20 @@
+#ifndef _ASMi386_PARAM_H
+#define _ASMi386_PARAM_H
+
+#ifndef HZ
+#define HZ 100
+#endif
+
+#define EXEC_PAGESIZE 4096
+
+#ifndef NGROUPS
+#define NGROUPS 32
+#endif
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/asm/processor.h b/i386/i386at/gpl/linux/include/asm/processor.h
new file mode 100644
index 00000000..dea7827b
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/processor.h
@@ -0,0 +1,146 @@
+/*
+ * include/asm-i386/processor.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+#ifndef __ASM_I386_PROCESSOR_H
+#define __ASM_I386_PROCESSOR_H
+
+/*
+ * System setup and hardware bug flags..
+ */
+extern char hard_math;
+extern char x86; /* lower 4 bits */
+extern char x86_vendor_id[13];
+extern char x86_model; /* lower 4 bits */
+extern char x86_mask; /* lower 4 bits */
+extern int x86_capability; /* field of flags */
+extern int fdiv_bug;
+extern char ignore_irq13;
+extern char wp_works_ok; /* doesn't work on a 386 */
+extern char hlt_works_ok; /* problems on some 486Dx4's and old 386's */
+
+/*
+ * Bus types (default is ISA, but people can check others with these..)
+ * MCA_bus hardcoded to 0 for now.
+ */
+extern int EISA_bus;
+#define MCA_bus 0
+#define MCA_bus__is_a_macro /* for versions in ksyms.c */
+
+/*
+ * User space process size: 3GB. This is hardcoded into a few places,
+ * so don't change it unless you know what you are doing.
+ */
+#define TASK_SIZE (0xC0000000UL)
+
+/*
+ * Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
+ */
+#define IO_BITMAP_SIZE 32
+
+struct i387_hard_struct {
+ long cwd;
+ long swd;
+ long twd;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
+};
+
+struct i387_soft_struct {
+ long cwd;
+ long swd;
+ long twd;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long top;
+ struct fpu_reg regs[8]; /* 8*16 bytes for each FP-reg = 128 bytes */
+ unsigned char lookahead;
+ struct info *info;
+ unsigned long entry_eip;
+};
+
+union i387_union {
+ struct i387_hard_struct hard;
+ struct i387_soft_struct soft;
+};
+
+struct thread_struct {
+ unsigned short back_link,__blh;
+ unsigned long esp0;
+ unsigned short ss0,__ss0h;
+ unsigned long esp1;
+ unsigned short ss1,__ss1h;
+ unsigned long esp2;
+ unsigned short ss2,__ss2h;
+ unsigned long cr3;
+ unsigned long eip;
+ unsigned long eflags;
+ unsigned long eax,ecx,edx,ebx;
+ unsigned long esp;
+ unsigned long ebp;
+ unsigned long esi;
+ unsigned long edi;
+ unsigned short es, __esh;
+ unsigned short cs, __csh;
+ unsigned short ss, __ssh;
+ unsigned short ds, __dsh;
+ unsigned short fs, __fsh;
+ unsigned short gs, __gsh;
+ unsigned short ldt, __ldth;
+ unsigned short trace, bitmap;
+ unsigned long io_bitmap[IO_BITMAP_SIZE+1];
+ unsigned long tr;
+ unsigned long cr2, trap_no, error_code;
+/* floating point info */
+ union i387_union i387;
+/* virtual 86 mode info */
+ struct vm86_struct * vm86_info;
+ unsigned long screen_bitmap;
+ unsigned long v86flags, v86mask, v86mode;
+};
+
+#define INIT_MMAP { &init_mm, 0, 0x40000000, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC }
+
+#define INIT_TSS { \
+ 0,0, \
+ sizeof(init_kernel_stack) + (long) &init_kernel_stack, \
+ KERNEL_DS, 0, \
+ 0,0,0,0,0,0, \
+ (long) &swapper_pg_dir, \
+ 0,0,0,0,0,0,0,0,0,0, \
+ USER_DS,0,USER_DS,0,USER_DS,0,USER_DS,0,USER_DS,0,USER_DS,0, \
+ _LDT(0),0, \
+ 0, 0x8000, \
+ {~0, }, /* ioperm */ \
+ _TSS(0), 0, 0,0, \
+ { { 0, }, }, /* 387 state */ \
+ NULL, 0, 0, 0, 0 /* vm86_info */ \
+}
+
+#define alloc_kernel_stack() get_free_page(GFP_KERNEL)
+#define free_kernel_stack(page) free_page((page))
+
+static inline void start_thread(struct pt_regs * regs, unsigned long eip, unsigned long esp)
+{
+ regs->cs = USER_CS;
+ regs->ds = regs->es = regs->ss = regs->fs = regs->gs = USER_DS;
+ regs->eip = eip;
+ regs->esp = esp;
+}
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+extern inline unsigned long thread_saved_pc(struct thread_struct *t)
+{
+ return ((unsigned long *)t->esp)[3];
+}
+
+#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/i386/i386at/gpl/linux/include/asm/ptrace.h b/i386/i386at/gpl/linux/include/asm/ptrace.h
new file mode 100644
index 00000000..8e4aa52f
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/ptrace.h
@@ -0,0 +1,52 @@
+#ifndef _I386_PTRACE_H
+#define _I386_PTRACE_H
+
+#define EBX 0
+#define ECX 1
+#define EDX 2
+#define ESI 3
+#define EDI 4
+#define EBP 5
+#define EAX 6
+#define DS 7
+#define ES 8
+#define FS 9
+#define GS 10
+#define ORIG_EAX 11
+#define EIP 12
+#define CS 13
+#define EFL 14
+#define UESP 15
+#define SS 16
+
+
+/* this struct defines the way the registers are stored on the
+ stack during a system call. */
+
+struct pt_regs {
+ long ebx;
+ long ecx;
+ long edx;
+ long esi;
+ long edi;
+ long ebp;
+ long eax;
+ unsigned short ds, __dsu;
+ unsigned short es, __esu;
+ unsigned short fs, __fsu;
+ unsigned short gs, __gsu;
+ long orig_eax;
+ long eip;
+ unsigned short cs, __csu;
+ long eflags;
+ long esp;
+ unsigned short ss, __ssu;
+};
+
+#ifdef __KERNEL__
+#define user_mode(regs) ((VM_MASK & (regs)->eflags) || (3 & (regs)->cs))
+#define instruction_pointer(regs) ((regs)->eip)
+extern void show_regs(struct pt_regs *);
+#endif
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/asm/resource.h b/i386/i386at/gpl/linux/include/asm/resource.h
new file mode 100644
index 00000000..83940d16
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/resource.h
@@ -0,0 +1,37 @@
+#ifndef _I386_RESOURCE_H
+#define _I386_RESOURCE_H
+
+/*
+ * Resource limits
+ */
+
+#define RLIMIT_CPU 0 /* CPU time in ms */
+#define RLIMIT_FSIZE 1 /* Maximum filesize */
+#define RLIMIT_DATA 2 /* max data size */
+#define RLIMIT_STACK 3 /* max stack size */
+#define RLIMIT_CORE 4 /* max core file size */
+#define RLIMIT_RSS 5 /* max resident set size */
+#define RLIMIT_NPROC 6 /* max number of processes */
+#define RLIMIT_NOFILE 7 /* max number of open files */
+#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
+
+#define RLIM_NLIMITS 9
+
+#ifdef __KERNEL__
+
+#define INIT_RLIMITS \
+{ \
+ { LONG_MAX, LONG_MAX }, \
+ { LONG_MAX, LONG_MAX }, \
+ { LONG_MAX, LONG_MAX }, \
+ { _STK_LIM, _STK_LIM }, \
+ { 0, LONG_MAX }, \
+ { LONG_MAX, LONG_MAX }, \
+ { MAX_TASKS_PER_USER, MAX_TASKS_PER_USER }, \
+ { NR_OPEN, NR_OPEN }, \
+ { LONG_MAX, LONG_MAX }, \
+}
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/asm/segment.h b/i386/i386at/gpl/linux/include/asm/segment.h
new file mode 100644
index 00000000..7cfafa4b
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/segment.h
@@ -0,0 +1,347 @@
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+#ifdef MACH
+#define KERNEL_CS 0x08
+#define KERNEL_DS 0x10
+
+#define USER_CS 0x17
+#define USER_DS 0x1f
+#else
+#define KERNEL_CS 0x10
+#define KERNEL_DS 0x18
+
+#define USER_CS 0x23
+#define USER_DS 0x2B
+#endif
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Uh, these should become the main single-value transfer routines..
+ * They automatically use the right size if we just have the right
+ * pointer type..
+ */
+#define put_user(x,ptr) __put_user((unsigned long)(x),(ptr),sizeof(*(ptr)))
+#define get_user(ptr) ((__typeof__(*(ptr)))__get_user((ptr),sizeof(*(ptr))))
+
+/*
+ * This is a silly but good way to make sure that
+ * the __put_user function is indeed always optimized,
+ * and that we use the correct sizes..
+ */
+extern int bad_user_access_length(void);
+
+/*
+ * dummy pointer type structure.. gcc won't try to do something strange
+ * this way..
+ */
+struct __segment_dummy { unsigned long a[100]; };
+#define __sd(x) ((struct __segment_dummy *) (x))
+#define __const_sd(x) ((const struct __segment_dummy *) (x))
+
+static inline void __put_user(unsigned long x, void * y, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ ("movb %b1,%%fs:%0"
+ :"=m" (*__sd(y))
+ :"iq" ((unsigned char) x), "m" (*__sd(y)));
+ break;
+ case 2:
+ __asm__ ("movw %w1,%%fs:%0"
+ :"=m" (*__sd(y))
+ :"ir" ((unsigned short) x), "m" (*__sd(y)));
+ break;
+ case 4:
+ __asm__ ("movl %1,%%fs:%0"
+ :"=m" (*__sd(y))
+ :"ir" (x), "m" (*__sd(y)));
+ break;
+ default:
+ bad_user_access_length();
+ }
+}
+
+static inline unsigned long __get_user(const void * y, int size)
+{
+ unsigned long result;
+
+ switch (size) {
+ case 1:
+ __asm__ ("movb %%fs:%1,%b0"
+ :"=q" (result)
+ :"m" (*__const_sd(y)));
+ return (unsigned char) result;
+ case 2:
+ __asm__ ("movw %%fs:%1,%w0"
+ :"=r" (result)
+ :"m" (*__const_sd(y)));
+ return (unsigned short) result;
+ case 4:
+ __asm__ ("movl %%fs:%1,%0"
+ :"=r" (result)
+ :"m" (*__const_sd(y)));
+ return result;
+ default:
+ return bad_user_access_length();
+ }
+}
+
+static inline void __generic_memcpy_tofs(void * to, const void * from, unsigned long n)
+{
+ __asm__ volatile
+ (" cld
+ push %%es
+ movw %%fs,%%cx
+ movw %%cx,%%es
+ cmpl $3,%0
+ jbe 1f
+ movl %%edi,%%ecx
+ negl %%ecx
+ andl $3,%%ecx
+ subl %%ecx,%0
+ rep; movsb
+ movl %0,%%ecx
+ shrl $2,%%ecx
+ rep; movsl
+ andl $3,%0
+ 1: movl %0,%%ecx
+ rep; movsb
+ pop %%es"
+ :"=abd" (n)
+ :"0" (n),"D" ((long) to),"S" ((long) from)
+ :"cx","di","si");
+}
+
+static inline void __constant_memcpy_tofs(void * to, const void * from, unsigned long n)
+{
+ switch (n) {
+ case 0:
+ return;
+ case 1:
+ __put_user(*(const char *) from, (char *) to, 1);
+ return;
+ case 2:
+ __put_user(*(const short *) from, (short *) to, 2);
+ return;
+ case 3:
+ __put_user(*(const short *) from, (short *) to, 2);
+ __put_user(*(2+(const char *) from), 2+(char *) to, 1);
+ return;
+ case 4:
+ __put_user(*(const int *) from, (int *) to, 4);
+ return;
+ case 8:
+ __put_user(*(const int *) from, (int *) to, 4);
+ __put_user(*(1+(const int *) from), 1+(int *) to, 4);
+ return;
+ case 12:
+ __put_user(*(const int *) from, (int *) to, 4);
+ __put_user(*(1+(const int *) from), 1+(int *) to, 4);
+ __put_user(*(2+(const int *) from), 2+(int *) to, 4);
+ return;
+ case 16:
+ __put_user(*(const int *) from, (int *) to, 4);
+ __put_user(*(1+(const int *) from), 1+(int *) to, 4);
+ __put_user(*(2+(const int *) from), 2+(int *) to, 4);
+ __put_user(*(3+(const int *) from), 3+(int *) to, 4);
+ return;
+ }
+#define COMMON(x) \
+__asm__("cld\n\t" \
+ "push %%es\n\t" \
+ "push %%fs\n\t" \
+ "pop %%es\n\t" \
+ "rep ; movsl\n\t" \
+ x \
+ "pop %%es" \
+ : /* no outputs */ \
+ :"c" (n/4),"D" ((long) to),"S" ((long) from) \
+ :"cx","di","si")
+
+ switch (n % 4) {
+ case 0:
+ COMMON("");
+ return;
+ case 1:
+ COMMON("movsb\n\t");
+ return;
+ case 2:
+ COMMON("movsw\n\t");
+ return;
+ case 3:
+ COMMON("movsw\n\tmovsb\n\t");
+ return;
+ }
+#undef COMMON
+}
+
+static inline void __generic_memcpy_fromfs(void * to, const void * from, unsigned long n)
+{
+ __asm__ volatile
+ (" cld
+ cmpl $3,%0
+ jbe 1f
+ movl %%edi,%%ecx
+ negl %%ecx
+ andl $3,%%ecx
+ subl %%ecx,%0
+ fs; rep; movsb
+ movl %0,%%ecx
+ shrl $2,%%ecx
+ fs; rep; movsl
+ andl $3,%0
+ 1: movl %0,%%ecx
+ fs; rep; movsb"
+ :"=abd" (n)
+ :"0" (n),"D" ((long) to),"S" ((long) from)
+ :"cx","di","si", "memory");
+}
+
+static inline void __constant_memcpy_fromfs(void * to, const void * from, unsigned long n)
+{
+ switch (n) {
+ case 0:
+ return;
+ case 1:
+ *(char *)to = __get_user((const char *) from, 1);
+ return;
+ case 2:
+ *(short *)to = __get_user((const short *) from, 2);
+ return;
+ case 3:
+ *(short *) to = __get_user((const short *) from, 2);
+ *((char *) to + 2) = __get_user(2+(const char *) from, 1);
+ return;
+ case 4:
+ *(int *) to = __get_user((const int *) from, 4);
+ return;
+ case 8:
+ *(int *) to = __get_user((const int *) from, 4);
+ *(1+(int *) to) = __get_user(1+(const int *) from, 4);
+ return;
+ case 12:
+ *(int *) to = __get_user((const int *) from, 4);
+ *(1+(int *) to) = __get_user(1+(const int *) from, 4);
+ *(2+(int *) to) = __get_user(2+(const int *) from, 4);
+ return;
+ case 16:
+ *(int *) to = __get_user((const int *) from, 4);
+ *(1+(int *) to) = __get_user(1+(const int *) from, 4);
+ *(2+(int *) to) = __get_user(2+(const int *) from, 4);
+ *(3+(int *) to) = __get_user(3+(const int *) from, 4);
+ return;
+ }
+#define COMMON(x) \
+__asm__("cld\n\t" \
+ "rep ; fs ; movsl\n\t" \
+ x \
+ : /* no outputs */ \
+ :"c" (n/4),"D" ((long) to),"S" ((long) from) \
+ :"cx","di","si","memory")
+
+ switch (n % 4) {
+ case 0:
+ COMMON("");
+ return;
+ case 1:
+ COMMON("fs ; movsb");
+ return;
+ case 2:
+ COMMON("fs ; movsw");
+ return;
+ case 3:
+ COMMON("fs ; movsw\n\tfs ; movsb");
+ return;
+ }
+#undef COMMON
+}
+
+#define memcpy_fromfs(to, from, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy_fromfs((to),(from),(n)) : \
+ __generic_memcpy_fromfs((to),(from),(n)))
+
+#define memcpy_tofs(to, from, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy_tofs((to),(from),(n)) : \
+ __generic_memcpy_tofs((to),(from),(n)))
+
+/*
+ * These are deprecated..
+ *
+ * Use "put_user()" and "get_user()" with the proper pointer types instead.
+ */
+
+#define get_fs_byte(addr) __get_user((const unsigned char *)(addr),1)
+#define get_fs_word(addr) __get_user((const unsigned short *)(addr),2)
+#define get_fs_long(addr) __get_user((const unsigned int *)(addr),4)
+
+#define put_fs_byte(x,addr) __put_user((x),(unsigned char *)(addr),1)
+#define put_fs_word(x,addr) __put_user((x),(unsigned short *)(addr),2)
+#define put_fs_long(x,addr) __put_user((x),(unsigned int *)(addr),4)
+
+#ifdef WE_REALLY_WANT_TO_USE_A_BROKEN_INTERFACE
+
+static inline unsigned short get_user_word(const short *addr)
+{
+ return __get_user(addr, 2);
+}
+
+static inline unsigned char get_user_byte(const char * addr)
+{
+ return __get_user(addr,1);
+}
+
+static inline unsigned long get_user_long(const int *addr)
+{
+ return __get_user(addr, 4);
+}
+
+static inline void put_user_byte(char val,char *addr)
+{
+ __put_user(val, addr, 1);
+}
+
+static inline void put_user_word(short val,short * addr)
+{
+ __put_user(val, addr, 2);
+}
+
+static inline void put_user_long(unsigned long val,int * addr)
+{
+ __put_user(val, addr, 4);
+}
+
+#endif
+
+/*
+ * Someone who knows GNU asm better than I should double check the following.
+ * It seems to work, but I don't know if I'm doing something subtly wrong.
+ * --- TYT, 11/24/91
+ * [ nothing wrong here, Linus: I just changed the ax to be any reg ]
+ */
+
+static inline unsigned long get_fs(void)
+{
+ unsigned long _v;
+ __asm__("mov %%fs,%w0":"=r" (_v):"0" (0));
+ return _v;
+}
+
+static inline unsigned long get_ds(void)
+{
+ unsigned long _v;
+ __asm__("mov %%ds,%w0":"=r" (_v):"0" (0));
+ return _v;
+}
+
+static inline void set_fs(unsigned long val)
+{
+ __asm__ __volatile__("mov %w0,%%fs": /* no output */ :"r" (val));
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_SEGMENT_H */
diff --git a/i386/i386at/gpl/linux/include/asm/sigcontext.h b/i386/i386at/gpl/linux/include/asm/sigcontext.h
new file mode 100644
index 00000000..5b84694f
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/sigcontext.h
@@ -0,0 +1,29 @@
+#ifndef _ASMi386_SIGCONTEXT_H
+#define _ASMi386_SIGCONTEXT_H
+
+struct sigcontext_struct {
+ unsigned short gs, __gsh;
+ unsigned short fs, __fsh;
+ unsigned short es, __esh;
+ unsigned short ds, __dsh;
+ unsigned long edi;
+ unsigned long esi;
+ unsigned long ebp;
+ unsigned long esp;
+ unsigned long ebx;
+ unsigned long edx;
+ unsigned long ecx;
+ unsigned long eax;
+ unsigned long trapno;
+ unsigned long err;
+ unsigned long eip;
+ unsigned short cs, __csh;
+ unsigned long eflags;
+ unsigned long esp_at_signal;
+ unsigned short ss, __ssh;
+ unsigned long i387;
+ unsigned long oldmask;
+ unsigned long cr2;
+};
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/asm/signal.h b/i386/i386at/gpl/linux/include/asm/signal.h
new file mode 100644
index 00000000..b37613fe
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/signal.h
@@ -0,0 +1,95 @@
+#ifndef _ASMi386_SIGNAL_H
+#define _ASMi386_SIGNAL_H
+
+typedef unsigned long sigset_t; /* at least 32 bits */
+
+#define _NSIG 32
+#define NSIG _NSIG
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGUNUSED 31
+
+/*
+ * sa_flags values: SA_STACK is not currently supported, but will allow the
+ * usage of signal stacks by using the (now obsolete) sa_restorer field in
+ * the sigaction structure as a stack pointer. This is now possible due to
+ * the changes in signal handling. LBT 010493.
+ * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ */
+#define SA_NOCLDSTOP 1
+#define SA_STACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_INTERRUPT 0x20000000
+#define SA_NOMASK 0x40000000
+#define SA_ONESHOT 0x80000000
+
+#ifdef __KERNEL__
+/*
+ * These values of sa_flags are used only by the kernel as part of the
+ * irq handling routines.
+ *
+ * SA_INTERRUPT is also used by the irq handling routines.
+ */
+#define SA_PROBE SA_ONESHOT
+#define SA_SAMPLE_RANDOM SA_RESTART
+#endif
+
+
+#define SIG_BLOCK 0 /* for blocking signals */
+#define SIG_UNBLOCK 1 /* for unblocking signals */
+#define SIG_SETMASK 2 /* for setting the signal mask */
+
+/* Type of a signal handler. */
+typedef void (*__sighandler_t)(int);
+
+#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
+#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
+#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
+
+struct sigaction {
+ __sighandler_t sa_handler;
+ sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+#ifdef __KERNEL__
+#include <asm/sigcontext.h>
+#endif
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/asm/socket.h b/i386/i386at/gpl/linux/include/asm/socket.h
new file mode 100644
index 00000000..dc923006
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/socket.h
@@ -0,0 +1,31 @@
+#ifndef _ASM_SOCKET_H
+#define _ASM_SOCKET_H
+
+/* Socket-level I/O control calls. */
+#define FIOSETOWN 0x8901
+#define SIOCSPGRP 0x8902
+#define FIOGETOWN 0x8903
+#define SIOCGPGRP 0x8904
+#define SIOCATMARK 0x8905
+#define SIOCGSTAMP 0x8906 /* Get stamp */
+
+/* For setsockoptions(2) */
+#define SOL_SOCKET 1
+
+#define SO_DEBUG 1
+#define SO_REUSEADDR 2
+#define SO_TYPE 3
+#define SO_ERROR 4
+#define SO_DONTROUTE 5
+#define SO_BROADCAST 6
+#define SO_SNDBUF 7
+#define SO_RCVBUF 8
+#define SO_KEEPALIVE 9
+#define SO_OOBINLINE 10
+#define SO_NO_CHECK 11
+#define SO_PRIORITY 12
+#define SO_LINGER 13
+#define SO_BSDCOMPAT 14
+/* To add :#define SO_REUSEPORT 15 */
+
+#endif /* _ASM_SOCKET_H */
diff --git a/i386/i386at/gpl/linux/include/asm/stat.h b/i386/i386at/gpl/linux/include/asm/stat.h
new file mode 100644
index 00000000..b4c64869
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/stat.h
@@ -0,0 +1,41 @@
+#ifndef _I386_STAT_H
+#define _I386_STAT_H
+
+struct old_stat {
+ unsigned short st_dev;
+ unsigned short st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_mtime;
+ unsigned long st_ctime;
+};
+
+struct new_stat {
+ unsigned short st_dev;
+ unsigned short __pad1;
+ unsigned long st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned short __pad2;
+ unsigned long st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long __unused1;
+ unsigned long st_mtime;
+ unsigned long __unused2;
+ unsigned long st_ctime;
+ unsigned long __unused3;
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/asm/statfs.h b/i386/i386at/gpl/linux/include/asm/statfs.h
new file mode 100644
index 00000000..6efb7411
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/statfs.h
@@ -0,0 +1,21 @@
+#ifndef _I386_STATFS_H
+#define _I386_STATFS_H
+
+typedef struct {
+ long val[2];
+} fsid_t;
+
+struct statfs {
+ long f_type;
+ long f_bsize;
+ long f_blocks;
+ long f_bfree;
+ long f_bavail;
+ long f_files;
+ long f_ffree;
+ fsid_t f_fsid;
+ long f_namelen;
+ long f_spare[6];
+};
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/asm/string.h b/i386/i386at/gpl/linux/include/asm/string.h
new file mode 100644
index 00000000..d93a2a77
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/string.h
@@ -0,0 +1,593 @@
+#ifndef _I386_STRING_H_
+#define _I386_STRING_H_
+
+/*
+ * This string-include defines all string functions as inline
+ * functions. Use gcc. It also assumes ds=es=data space, this should be
+ * normal. Most of the string-functions are rather heavily hand-optimized,
+ * see especially strtok,strstr,str[c]spn. They should work, but are not
+ * very easy to understand. Everything is done entirely within the register
+ * set, making the functions fast and clean. String instructions have been
+ * used through-out, making for "slightly" unclear code :-)
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#define __HAVE_ARCH_STRCPY
+extern inline char * strcpy(char * dest,const char *src)
+{
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tlodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b"
+ : /* no output */
+ :"S" (src),"D" (dest):"si","di","ax","memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCPY
+extern inline char * strncpy(char * dest,const char *src,size_t count)
+{
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tdecl %2\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "rep\n\t"
+ "stosb\n"
+ "2:"
+ : /* no output */
+ :"S" (src),"D" (dest),"c" (count):"si","di","ax","cx","memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCAT
+extern inline char * strcat(char * dest,const char * src)
+{
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "decl %1\n"
+ "1:\tlodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b"
+ : /* no output */
+ :"S" (src),"D" (dest),"a" (0),"c" (0xffffffff):"si","di","ax","cx");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCAT
+extern inline char * strncat(char * dest,const char * src,size_t count)
+{
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "decl %1\n\t"
+ "movl %4,%3\n"
+ "1:\tdecl %3\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n"
+ "2:\txorl %2,%2\n\t"
+ "stosb"
+ : /* no output */
+ :"S" (src),"D" (dest),"a" (0),"c" (0xffffffff),"g" (count)
+ :"si","di","ax","cx","memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCMP
+extern inline int strcmp(const char * cs,const char * ct)
+{
+register int __res;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tlodsb\n\t"
+ "scasb\n\t"
+ "jne 2f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "xorl %%eax,%%eax\n\t"
+ "jmp 3f\n"
+ "2:\tsbbl %%eax,%%eax\n\t"
+ "orb $1,%%eax\n"
+ "3:"
+ :"=a" (__res):"S" (cs),"D" (ct):"si","di");
+return __res;
+}
+
+#define __HAVE_ARCH_STRNCMP
+extern inline int strncmp(const char * cs,const char * ct,size_t count)
+{
+register int __res;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tdecl %3\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "scasb\n\t"
+ "jne 3f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n"
+ "2:\txorl %%eax,%%eax\n\t"
+ "jmp 4f\n"
+ "3:\tsbbl %%eax,%%eax\n\t"
+ "orb $1,%%al\n"
+ "4:"
+ :"=a" (__res):"S" (cs),"D" (ct),"c" (count):"si","di","cx");
+return __res;
+}
+
+#define __HAVE_ARCH_STRCHR
+extern inline char * strchr(const char * s, int c)
+{
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movb %%al,%%ah\n"
+ "1:\tlodsb\n\t"
+ "cmpb %%ah,%%al\n\t"
+ "je 2f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "movl $1,%1\n"
+ "2:\tmovl %1,%0\n\t"
+ "decl %0"
+ :"=a" (__res):"S" (s),"0" (c):"si");
+return __res;
+}
+
+#define __HAVE_ARCH_STRRCHR
+extern inline char * strrchr(const char * s, int c)
+{
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movb %%al,%%ah\n"
+ "1:\tlodsb\n\t"
+ "cmpb %%ah,%%al\n\t"
+ "jne 2f\n\t"
+ "leal -1(%%esi),%0\n"
+ "2:\ttestb %%al,%%al\n\t"
+ "jne 1b"
+ :"=d" (__res):"0" (0),"S" (s),"a" (c):"ax","si");
+return __res;
+}
+
+#define __HAVE_ARCH_STRSPN
+extern inline size_t strspn(const char * cs, const char * ct)
+{
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movl %4,%%edi\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "notl %%ecx\n\t"
+ "decl %%ecx\n\t"
+ "movl %%ecx,%%edx\n"
+ "1:\tlodsb\n\t"
+ "testb %%al,%%al\n\t"
+ "je 2f\n\t"
+ "movl %4,%%edi\n\t"
+ "movl %%edx,%%ecx\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "je 1b\n"
+ "2:\tdecl %0"
+ :"=S" (__res):"a" (0),"c" (0xffffffff),"0" (cs),"g" (ct)
+ :"ax","cx","dx","di");
+return __res-cs;
+}
+
+#define __HAVE_ARCH_STRCSPN
+extern inline size_t strcspn(const char * cs, const char * ct)
+{
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movl %4,%%edi\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "notl %%ecx\n\t"
+ "decl %%ecx\n\t"
+ "movl %%ecx,%%edx\n"
+ "1:\tlodsb\n\t"
+ "testb %%al,%%al\n\t"
+ "je 2f\n\t"
+ "movl %4,%%edi\n\t"
+ "movl %%edx,%%ecx\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "jne 1b\n"
+ "2:\tdecl %0"
+ :"=S" (__res):"a" (0),"c" (0xffffffff),"0" (cs),"g" (ct)
+ :"ax","cx","dx","di");
+return __res-cs;
+}
+
+#define __HAVE_ARCH_STRPBRK
+extern inline char * strpbrk(const char * cs,const char * ct)
+{
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movl %4,%%edi\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "notl %%ecx\n\t"
+ "decl %%ecx\n\t"
+ "movl %%ecx,%%edx\n"
+ "1:\tlodsb\n\t"
+ "testb %%al,%%al\n\t"
+ "je 2f\n\t"
+ "movl %4,%%edi\n\t"
+ "movl %%edx,%%ecx\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "jne 1b\n\t"
+ "decl %0\n\t"
+ "jmp 3f\n"
+ "2:\txorl %0,%0\n"
+ "3:"
+ :"=S" (__res):"a" (0),"c" (0xffffffff),"0" (cs),"g" (ct)
+ :"ax","cx","dx","di");
+return __res;
+}
+
+#define __HAVE_ARCH_STRSTR
+extern inline char * strstr(const char * cs,const char * ct)
+{
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t" \
+ "movl %4,%%edi\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "notl %%ecx\n\t"
+ "decl %%ecx\n\t" /* NOTE! This also sets Z if searchstring='' */
+ "movl %%ecx,%%edx\n"
+ "1:\tmovl %4,%%edi\n\t"
+ "movl %%esi,%%eax\n\t"
+ "movl %%edx,%%ecx\n\t"
+ "repe\n\t"
+ "cmpsb\n\t"
+ "je 2f\n\t" /* also works for empty string, see above */
+ "xchgl %%eax,%%esi\n\t"
+ "incl %%esi\n\t"
+ "cmpb $0,-1(%%eax)\n\t"
+ "jne 1b\n\t"
+ "xorl %%eax,%%eax\n\t"
+ "2:"
+ :"=a" (__res):"0" (0),"c" (0xffffffff),"S" (cs),"g" (ct)
+ :"cx","dx","di","si");
+return __res;
+}
+
+#define __HAVE_ARCH_STRLEN
+extern inline size_t strlen(const char * s)
+{
+register int __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "notl %0\n\t"
+ "decl %0"
+ :"=c" (__res):"D" (s),"a" (0),"0" (0xffffffff):"di");
+return __res;
+}
+
+#define __HAVE_ARCH_STRTOK
+extern inline char * strtok(char * s,const char * ct)
+{
+register char * __res;
+__asm__ __volatile__(
+ "testl %1,%1\n\t"
+ "jne 1f\n\t"
+ "testl %0,%0\n\t"
+ "je 8f\n\t"
+ "movl %0,%1\n"
+ "1:\txorl %0,%0\n\t"
+ "movl $-1,%%ecx\n\t"
+ "xorl %%eax,%%eax\n\t"
+ "cld\n\t"
+ "movl %4,%%edi\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "notl %%ecx\n\t"
+ "decl %%ecx\n\t"
+ "je 7f\n\t" /* empty delimiter-string */
+ "movl %%ecx,%%edx\n"
+ "2:\tlodsb\n\t"
+ "testb %%al,%%al\n\t"
+ "je 7f\n\t"
+ "movl %4,%%edi\n\t"
+ "movl %%edx,%%ecx\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "je 2b\n\t"
+ "decl %1\n\t"
+ "cmpb $0,(%1)\n\t"
+ "je 7f\n\t"
+ "movl %1,%0\n"
+ "3:\tlodsb\n\t"
+ "testb %%al,%%al\n\t"
+ "je 5f\n\t"
+ "movl %4,%%edi\n\t"
+ "movl %%edx,%%ecx\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "jne 3b\n\t"
+ "decl %1\n\t"
+ "cmpb $0,(%1)\n\t"
+ "je 5f\n\t"
+ "movb $0,(%1)\n\t"
+ "incl %1\n\t"
+ "jmp 6f\n"
+ "5:\txorl %1,%1\n"
+ "6:\tcmpb $0,(%0)\n\t"
+ "jne 7f\n\t"
+ "xorl %0,%0\n"
+ "7:\ttestl %0,%0\n\t"
+ "jne 8f\n\t"
+ "movl %0,%1\n"
+ "8:"
+ :"=b" (__res),"=S" (___strtok)
+ :"0" (___strtok),"1" (s),"g" (ct)
+ :"ax","cx","dx","di","memory");
+return __res;
+}
+
+extern inline void * __memcpy(void * to, const void * from, size_t n)
+{
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep ; movsl\n\t"
+ "testb $2,%b1\n\t"
+ "je 1f\n\t"
+ "movsw\n"
+ "1:\ttestb $1,%b1\n\t"
+ "je 2f\n\t"
+ "movsb\n"
+ "2:"
+ : /* no output */
+ :"c" (n/4), "q" (n),"D" ((long) to),"S" ((long) from)
+ : "cx","di","si","memory");
+return (to);
+}
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as the count is constant.
+ */
+extern inline void * __constant_memcpy(void * to, const void * from, size_t n)
+{
+ switch (n) {
+ case 0:
+ return to;
+ case 1:
+ *(unsigned char *)to = *(const unsigned char *)from;
+ return to;
+ case 2:
+ *(unsigned short *)to = *(const unsigned short *)from;
+ return to;
+ case 3:
+ *(unsigned short *)to = *(const unsigned short *)from;
+ *(2+(unsigned char *)to) = *(2+(const unsigned char *)from);
+ return to;
+ case 4:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ return to;
+ }
+#define COMMON(x) \
+__asm__("cld\n\t" \
+ "rep ; movsl" \
+ x \
+ : /* no outputs */ \
+ : "c" (n/4),"D" ((long) to),"S" ((long) from) \
+ : "cx","di","si","memory");
+
+ switch (n % 4) {
+ case 0: COMMON(""); return to;
+ case 1: COMMON("\n\tmovsb"); return to;
+ case 2: COMMON("\n\tmovsw"); return to;
+ case 3: COMMON("\n\tmovsw\n\tmovsb"); return to;
+ }
+#undef COMMON
+}
+
+#define __HAVE_ARCH_MEMCPY
+#define memcpy(t, f, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy((t),(f),(n)) : \
+ __memcpy((t),(f),(n)))
+
+#define __HAVE_ARCH_MEMMOVE
+extern inline void * memmove(void * dest,const void * src, size_t n)
+{
+if (dest<src)
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep\n\t"
+ "movsb"
+ : /* no output */
+ :"c" (n),"S" (src),"D" (dest)
+ :"cx","si","di");
+else
+__asm__ __volatile__(
+ "std\n\t"
+ "rep\n\t"
+ "movsb\n\t"
+ "cld"
+ : /* no output */
+ :"c" (n),
+ "S" (n-1+(const char *)src),
+ "D" (n-1+(char *)dest)
+ :"cx","si","di","memory");
+return dest;
+}
+
+#define memcmp __builtin_memcmp
+
+#define __HAVE_ARCH_MEMCHR
+extern inline void * memchr(const void * cs,int c,size_t count)
+{
+register void * __res;
+if (!count)
+ return NULL;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "je 1f\n\t"
+ "movl $1,%0\n"
+ "1:\tdecl %0"
+ :"=D" (__res):"a" (c),"D" (cs),"c" (count)
+ :"cx");
+return __res;
+}
+
+extern inline void * __memset_generic(void * s, char c,size_t count)
+{
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep\n\t"
+ "stosb"
+ : /* no output */
+ :"a" (c),"D" (s),"c" (count)
+ :"cx","di","memory");
+return s;
+}
+
+/* we might want to write optimized versions of these later */
+#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count))
+
+/*
+ * memset(x,0,y) is a reasonably common thing to do, so we want to fill
+ * things 32 bits at a time even when we don't know the size of the
+ * area at compile-time..
+ */
+extern inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
+{
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep ; stosl\n\t"
+ "testb $2,%b1\n\t"
+ "je 1f\n\t"
+ "stosw\n"
+ "1:\ttestb $1,%b1\n\t"
+ "je 2f\n\t"
+ "stosb\n"
+ "2:"
+ : /* no output */
+ :"a" (c), "q" (count), "c" (count/4), "D" ((long) s)
+ :"cx","di","memory");
+return (s);
+}
+
+/* Added by Gertjan van Wingerde to make minix and sysv module work */
+#define __HAVE_ARCH_STRNLEN
+extern inline size_t strnlen(const char * s, size_t count)
+{
+register int __res;
+__asm__ __volatile__(
+ "movl %1,%0\n\t"
+ "jmp 2f\n"
+ "1:\tcmpb $0,(%0)\n\t"
+ "je 3f\n\t"
+ "incl %0\n"
+ "2:\tdecl %2\n\t"
+ "cmpl $-1,%2\n\t"
+ "jne 1b\n"
+ "3:\tsubl %1,%0"
+ :"=a" (__res):"c" (s),"d" (count));
+return __res;
+}
+/* end of additional stuff */
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as we by now know that both pattern and count is constant..
+ */
+extern inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
+{
+ switch (count) {
+ case 0:
+ return s;
+ case 1:
+ *(unsigned char *)s = pattern;
+ return s;
+ case 2:
+ *(unsigned short *)s = pattern;
+ return s;
+ case 3:
+ *(unsigned short *)s = pattern;
+ *(2+(unsigned char *)s) = pattern;
+ return s;
+ case 4:
+ *(unsigned long *)s = pattern;
+ return s;
+ }
+#define COMMON(x) \
+__asm__("cld\n\t" \
+ "rep ; stosl" \
+ x \
+ : /* no outputs */ \
+ : "a" (pattern),"c" (count/4),"D" ((long) s) \
+ : "cx","di","memory")
+
+ switch (count % 4) {
+ case 0: COMMON(""); return s;
+ case 1: COMMON("\n\tstosb"); return s;
+ case 2: COMMON("\n\tstosw"); return s;
+ case 3: COMMON("\n\tstosw\n\tstosb"); return s;
+ }
+#undef COMMON
+}
+
+#define __constant_c_x_memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_c_and_count_memset((s),(c),(count)) : \
+ __constant_c_memset((s),(c),(count)))
+
+#define __memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_count_memset((s),(c),(count)) : \
+ __memset_generic((s),(c),(count)))
+
+#define __HAVE_ARCH_MEMSET
+#define memset(s, c, count) \
+(__builtin_constant_p(c) ? \
+ __constant_c_x_memset((s),(0x01010101UL*(unsigned char)c),(count)) : \
+ __memset((s),(c),(count)))
+
+/*
+ * find the first occurrence of byte 'c', or 1 past the area if none
+ */
+#define __HAVE_ARCH_MEMSCAN
+extern inline void * memscan(void * addr, int c, size_t size)
+{
+ if (!size)
+ return addr;
+ __asm__("cld
+ repnz; scasb
+ jnz 1f
+ dec %%edi
+1: "
+ : "=D" (addr), "=c" (size)
+ : "0" (addr), "1" (size), "a" (c));
+ return addr;
+}
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/asm/system.h b/i386/i386at/gpl/linux/include/asm/system.h
new file mode 100644
index 00000000..9c6f862a
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/system.h
@@ -0,0 +1,301 @@
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <asm/segment.h>
+
+/*
+ * Entry into gdt where to find first TSS. GDT layout:
+ * 0 - nul
+ * 1 - kernel code segment
+ * 2 - kernel data segment
+ * 3 - user code segment
+ * 4 - user data segment
+ * ...
+ * 8 - TSS #0
+ * 9 - LDT #0
+ * 10 - TSS #1
+ * 11 - LDT #1
+ */
+#define FIRST_TSS_ENTRY 8
+#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1)
+#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3))
+#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3))
+#define load_TR(n) __asm__("ltr %%ax": /* no output */ :"a" (_TSS(n)))
+#define load_ldt(n) __asm__("lldt %%ax": /* no output */ :"a" (_LDT(n)))
+#define store_TR(n) \
+__asm__("str %%ax\n\t" \
+ "subl %2,%%eax\n\t" \
+ "shrl $4,%%eax" \
+ :"=a" (n) \
+ :"0" (0),"i" (FIRST_TSS_ENTRY<<3))
+
+/* This special macro can be used to load a debugging register */
+
+#define loaddebug(register) \
+ __asm__("movl %0,%%edx\n\t" \
+ "movl %%edx,%%db" #register "\n\t" \
+ : /* no output */ \
+ :"m" (current->debugreg[register]) \
+ :"dx");
+
+
+/*
+ * switch_to(n) should switch tasks to task nr n, first
+ * checking that n isn't the current task, in which case it does nothing.
+ * This also clears the TS-flag if the task we switched to has used
+ * the math co-processor latest.
+ *
+ * It also reloads the debug regs if necessary..
+ */
+
+
+#ifdef __SMP__
+ /*
+ * Keep the lock depth straight. If we switch on an interrupt from
+ * kernel->user task we need to lose a depth, and if we switch the
+ * other way we need to gain a depth. Same layer switches come out
+ * the same.
+ *
+ * We spot a switch in user mode because the kernel counter is the
+ * same as the interrupt counter depth. (We never switch during the
+ * message/invalidate IPI).
+ *
+ * We fsave/fwait so that an exception goes off at the right time
+ * (as a call from the fsave or fwait in effect) rather than to
+ * the wrong process.
+ */
+
+#define switch_to(tsk) do { \
+ cli();\
+ if(current->flags&PF_USEDFPU) \
+ { \
+ __asm__ __volatile__("fnsave %0":"=m" (current->tss.i387.hard)); \
+ __asm__ __volatile__("fwait"); \
+ current->flags&=~PF_USEDFPU; \
+ } \
+ current->lock_depth=syscall_count; \
+ kernel_counter+=next->lock_depth-current->lock_depth; \
+ syscall_count=next->lock_depth; \
+__asm__("pushl %%edx\n\t" \
+ "movl "SYMBOL_NAME_STR(apic_reg)",%%edx\n\t" \
+ "movl 0x20(%%edx), %%edx\n\t" \
+ "shrl $22,%%edx\n\t" \
+ "and $0x3C,%%edx\n\t" \
+ "xchgl %%ecx,"SYMBOL_NAME_STR(current_set)"(,%%edx)\n\t" \
+ "popl %%edx\n\t" \
+ "ljmp %0\n\t" \
+ "sti\n\t" \
+ : /* no output */ \
+ :"m" (*(((char *)&tsk->tss.tr)-4)), \
+ "c" (tsk) \
+ :"cx"); \
+ /* Now maybe reload the debug registers */ \
+ if(current->debugreg[7]){ \
+ loaddebug(0); \
+ loaddebug(1); \
+ loaddebug(2); \
+ loaddebug(3); \
+ loaddebug(6); \
+ } \
+} while (0)
+
+#else
+#define switch_to(tsk) do { \
+__asm__("cli\n\t" \
+ "xchgl %%ecx,"SYMBOL_NAME_STR(current_set)"\n\t" \
+ "ljmp %0\n\t" \
+ "sti\n\t" \
+ "cmpl %%ecx,"SYMBOL_NAME_STR(last_task_used_math)"\n\t" \
+ "jne 1f\n\t" \
+ "clts\n" \
+ "1:" \
+ : /* no output */ \
+ :"m" (*(((char *)&tsk->tss.tr)-4)), \
+ "c" (tsk) \
+ :"cx"); \
+ /* Now maybe reload the debug registers */ \
+ if(current->debugreg[7]){ \
+ loaddebug(0); \
+ loaddebug(1); \
+ loaddebug(2); \
+ loaddebug(3); \
+ loaddebug(6); \
+ } \
+} while (0)
+#endif
+
+#define _set_base(addr,base) \
+__asm__("movw %%dx,%0\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %%dl,%1\n\t" \
+ "movb %%dh,%2" \
+ : /* no output */ \
+ :"m" (*((addr)+2)), \
+ "m" (*((addr)+4)), \
+ "m" (*((addr)+7)), \
+ "d" (base) \
+ :"dx")
+
+#define _set_limit(addr,limit) \
+__asm__("movw %%dx,%0\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %1,%%dh\n\t" \
+ "andb $0xf0,%%dh\n\t" \
+ "orb %%dh,%%dl\n\t" \
+ "movb %%dl,%1" \
+ : /* no output */ \
+ :"m" (*(addr)), \
+ "m" (*((addr)+6)), \
+ "d" (limit) \
+ :"dx")
+
+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , base )
+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , (limit-1)>>12 )
+
+static inline unsigned long _get_base(char * addr)
+{
+ unsigned long __base;
+ __asm__("movb %3,%%dh\n\t"
+ "movb %2,%%dl\n\t"
+ "shll $16,%%edx\n\t"
+ "movw %1,%%dx"
+ :"=&d" (__base)
+ :"m" (*((addr)+2)),
+ "m" (*((addr)+4)),
+ "m" (*((addr)+7)));
+ return __base;
+}
+
+#define get_base(ldt) _get_base( ((char *)&(ldt)) )
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+ unsigned long __limit;
+ __asm__("lsll %1,%0"
+ :"=r" (__limit):"r" (segment));
+ return __limit+1;
+}
+
+#define nop() __asm__ __volatile__ ("nop")
+
+/*
+ * Clear and set 'TS' bit respectively
+ */
+#define clts() __asm__ __volatile__ ("clts")
+#define stts() \
+__asm__ __volatile__ ( \
+ "movl %%cr0,%%eax\n\t" \
+ "orl $8,%%eax\n\t" \
+ "movl %%eax,%%cr0" \
+ : /* no outputs */ \
+ : /* no inputs */ \
+ :"ax")
+
+
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define tas(ptr) (xchg((ptr),1))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((volatile struct __xchg_dummy *)(x))
+
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__("xchgb %b0,%1"
+ :"=q" (x), "=m" (*__xg(ptr))
+ :"0" (x), "m" (*__xg(ptr)));
+ break;
+ case 2:
+ __asm__("xchgw %w0,%1"
+ :"=r" (x), "=m" (*__xg(ptr))
+ :"0" (x), "m" (*__xg(ptr)));
+ break;
+ case 4:
+ __asm__("xchgl %0,%1"
+ :"=r" (x), "=m" (*__xg(ptr))
+ :"0" (x), "m" (*__xg(ptr)));
+ break;
+ }
+ return x;
+}
+
+#define mb() __asm__ __volatile__ ("" : : :"memory")
+#define sti() __asm__ __volatile__ ("sti": : :"memory")
+#define cli() __asm__ __volatile__ ("cli": : :"memory")
+
+#define save_flags(x) \
+__asm__ __volatile__("pushfl ; popl %0":"=r" (x): /* no input */ :"memory")
+
+#define restore_flags(x) \
+__asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"r" (x):"memory")
+
+#define iret() __asm__ __volatile__ ("iret": : :"memory")
+
+#define _set_gate(gate_addr,type,dpl,addr) \
+__asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
+ "movw %2,%%dx\n\t" \
+ "movl %%eax,%0\n\t" \
+ "movl %%edx,%1" \
+ :"=m" (*((long *) (gate_addr))), \
+ "=m" (*(1+(long *) (gate_addr))) \
+ :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
+ "d" ((char *) (addr)),"a" (KERNEL_CS << 16) \
+ :"ax","dx")
+
+#define set_intr_gate(n,addr) \
+ _set_gate(&idt[n],14,0,addr)
+
+#define set_trap_gate(n,addr) \
+ _set_gate(&idt[n],15,0,addr)
+
+#define set_system_gate(n,addr) \
+ _set_gate(&idt[n],15,3,addr)
+
+#define set_call_gate(a,addr) \
+ _set_gate(a,12,3,addr)
+
+#define _set_seg_desc(gate_addr,type,dpl,base,limit) {\
+ *((gate_addr)+1) = ((base) & 0xff000000) | \
+ (((base) & 0x00ff0000)>>16) | \
+ ((limit) & 0xf0000) | \
+ ((dpl)<<13) | \
+ (0x00408000) | \
+ ((type)<<8); \
+ *(gate_addr) = (((base) & 0x0000ffff)<<16) | \
+ ((limit) & 0x0ffff); }
+
+#define _set_tssldt_desc(n,addr,limit,type) \
+__asm__ __volatile__ ("movw $" #limit ",%1\n\t" \
+ "movw %%ax,%2\n\t" \
+ "rorl $16,%%eax\n\t" \
+ "movb %%al,%3\n\t" \
+ "movb $" type ",%4\n\t" \
+ "movb $0x00,%5\n\t" \
+ "movb %%ah,%6\n\t" \
+ "rorl $16,%%eax" \
+ : /* no output */ \
+ :"a" (addr+0xc0000000), "m" (*(n)), "m" (*(n+2)), "m" (*(n+4)), \
+ "m" (*(n+5)), "m" (*(n+6)), "m" (*(n+7)) \
+ )
+
+#define set_tss_desc(n,addr) _set_tssldt_desc(((char *) (n)),((int)(addr)),235,"0x89")
+#define set_ldt_desc(n,addr,size) \
+ _set_tssldt_desc(((char *) (n)),((int)(addr)),((size << 3) - 1),"0x82")
+
+/*
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+extern struct desc_struct default_ldt;
+
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#ifndef MACH
+#define HAVE_DISABLE_HLT
+#endif
+void disable_hlt(void);
+void enable_hlt(void);
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/asm/termios.h b/i386/i386at/gpl/linux/include/asm/termios.h
new file mode 100644
index 00000000..e9cbf14e
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/termios.h
@@ -0,0 +1,304 @@
+#ifndef _I386_TERMIOS_H
+#define _I386_TERMIOS_H
+
+/* 0x54 is just a magic number to make these relatively unique ('T') */
+
+#define TCGETS 0x5401
+#define TCSETS 0x5402
+#define TCSETSW 0x5403
+#define TCSETSF 0x5404
+#define TCGETA 0x5405
+#define TCSETA 0x5406
+#define TCSETAW 0x5407
+#define TCSETAF 0x5408
+#define TCSBRK 0x5409
+#define TCXONC 0x540A
+#define TCFLSH 0x540B
+#define TIOCEXCL 0x540C
+#define TIOCNXCL 0x540D
+#define TIOCSCTTY 0x540E
+#define TIOCGPGRP 0x540F
+#define TIOCSPGRP 0x5410
+#define TIOCOUTQ 0x5411
+#define TIOCSTI 0x5412
+#define TIOCGWINSZ 0x5413
+#define TIOCSWINSZ 0x5414
+#define TIOCMGET 0x5415
+#define TIOCMBIS 0x5416
+#define TIOCMBIC 0x5417
+#define TIOCMSET 0x5418
+#define TIOCGSOFTCAR 0x5419
+#define TIOCSSOFTCAR 0x541A
+#define FIONREAD 0x541B
+#define TIOCINQ FIONREAD
+#define TIOCLINUX 0x541C
+#define TIOCCONS 0x541D
+#define TIOCGSERIAL 0x541E
+#define TIOCSSERIAL 0x541F
+#define TIOCPKT 0x5420
+#define FIONBIO 0x5421
+#define TIOCNOTTY 0x5422
+#define TIOCSETD 0x5423
+#define TIOCGETD 0x5424
+#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */
+#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
+#define FIOCLEX 0x5451
+#define FIOASYNC 0x5452
+#define TIOCSERCONFIG 0x5453
+#define TIOCSERGWILD 0x5454
+#define TIOCSERSWILD 0x5455
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TIOCSERGETLSR 0x5459 /* Get line status register */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+
+/* Used for packet mode */
+#define TIOCPKT_DATA 0
+#define TIOCPKT_FLUSHREAD 1
+#define TIOCPKT_FLUSHWRITE 2
+#define TIOCPKT_STOP 4
+#define TIOCPKT_START 8
+#define TIOCPKT_NOSTOP 16
+#define TIOCPKT_DOSTOP 32
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+#define NCCS 19
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+#ifdef __KERNEL__
+/* intr=^C quit=^| erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^U werase=^W lnext=^V
+ eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+#endif
+
+/* c_iflag bits */
+#define IGNBRK 0000001
+#define BRKINT 0000002
+#define IGNPAR 0000004
+#define PARMRK 0000010
+#define INPCK 0000020
+#define ISTRIP 0000040
+#define INLCR 0000100
+#define IGNCR 0000200
+#define ICRNL 0000400
+#define IUCLC 0001000
+#define IXON 0002000
+#define IXANY 0004000
+#define IXOFF 0010000
+#define IMAXBEL 0020000
+
+/* c_oflag bits */
+#define OPOST 0000001
+#define OLCUC 0000002
+#define ONLCR 0000004
+#define OCRNL 0000010
+#define ONOCR 0000020
+#define ONLRET 0000040
+#define OFILL 0000100
+#define OFDEL 0000200
+#define NLDLY 0000400
+#define NL0 0000000
+#define NL1 0000400
+#define CRDLY 0003000
+#define CR0 0000000
+#define CR1 0001000
+#define CR2 0002000
+#define CR3 0003000
+#define TABDLY 0014000
+#define TAB0 0000000
+#define TAB1 0004000
+#define TAB2 0010000
+#define TAB3 0014000
+#define XTABS 0014000
+#define BSDLY 0020000
+#define BS0 0000000
+#define BS1 0020000
+#define VTDLY 0040000
+#define VT0 0000000
+#define VT1 0040000
+#define FFDLY 0100000
+#define FF0 0000000
+#define FF1 0100000
+
+/* c_cflag bit meaning */
+#define CBAUD 0010017
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE 0000060
+#define CS5 0000000
+#define CS6 0000020
+#define CS7 0000040
+#define CS8 0000060
+#define CSTOPB 0000100
+#define CREAD 0000200
+#define PARENB 0000400
+#define PARODD 0001000
+#define HUPCL 0002000
+#define CLOCAL 0004000
+#define CBAUDEX 0010000
+#define B57600 0010001
+#define B115200 0010002
+#define B230400 0010003
+#define CIBAUD 002003600000 /* input baud rate (not used) */
+#define CRTSCTS 020000000000 /* flow control */
+
+/* c_lflag bits */
+#define ISIG 0000001
+#define ICANON 0000002
+#define XCASE 0000004
+#define ECHO 0000010
+#define ECHOE 0000020
+#define ECHOK 0000040
+#define ECHONL 0000100
+#define NOFLSH 0000200
+#define TOSTOP 0000400
+#define ECHOCTL 0001000
+#define ECHOPRT 0002000
+#define ECHOKE 0004000
+#define FLUSHO 0010000
+#define PENDIN 0040000
+#define IEXTEN 0100000
+
+/* modem lines */
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+
+
+/* tcflow() and TCXONC use these */
+#define TCOOFF 0
+#define TCOON 1
+#define TCIOFF 2
+#define TCION 3
+
+/* tcflush() and TCFLSH use these */
+#define TCIFLUSH 0
+#define TCOFLUSH 1
+#define TCIOFLUSH 2
+
+/* tcsetattr uses these */
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+/* line disciplines */
+#define N_TTY 0
+#define N_SLIP 1
+#define N_MOUSE 2
+#define N_PPP 3
+
+#ifdef __KERNEL__
+
+#include <linux/string.h>
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+extern inline void trans_from_termio(struct termio * termio,
+ struct termios * termios)
+{
+#define SET_LOW_BITS(x,y) ((x) = (0xffff0000 & (x)) | (y))
+ SET_LOW_BITS(termios->c_iflag, termio->c_iflag);
+ SET_LOW_BITS(termios->c_oflag, termio->c_oflag);
+ SET_LOW_BITS(termios->c_cflag, termio->c_cflag);
+ SET_LOW_BITS(termios->c_lflag, termio->c_lflag);
+#undef SET_LOW_BITS
+ memcpy(termios->c_cc, termio->c_cc, NCC);
+}
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+extern inline void trans_to_termio(struct termios * termios,
+ struct termio * termio)
+{
+ termio->c_iflag = termios->c_iflag;
+ termio->c_oflag = termios->c_oflag;
+ termio->c_cflag = termios->c_cflag;
+ termio->c_lflag = termios->c_lflag;
+ termio->c_line = termios->c_line;
+ memcpy(termio->c_cc, termios->c_cc, NCC);
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_TERMIOS_H */
diff --git a/i386/i386at/gpl/linux/include/asm/types.h b/i386/i386at/gpl/linux/include/asm/types.h
new file mode 100644
index 00000000..1b82bef4
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/types.h
@@ -0,0 +1,109 @@
+#ifndef _I386_TYPES_H
+#define _I386_TYPES_H
+
+#ifndef MACH_INCLUDE
+#ifndef _SIZE_T
+#define _SIZE_T
+typedef unsigned int size_t;
+#endif
+
+#ifndef _SSIZE_T
+#define _SSIZE_T
+typedef int ssize_t;
+#endif
+
+#ifndef _PTRDIFF_T
+#define _PTRDIFF_T
+typedef int ptrdiff_t;
+#endif
+
+#ifndef _TIME_T
+#define _TIME_T
+typedef long time_t;
+#endif
+
+#ifndef _CLOCK_T
+#define _CLOCK_T
+typedef long clock_t;
+#endif
+#endif /* ! MACH_INCLUDE */
+
+typedef int pid_t;
+#ifndef MACH_INCLUDE
+typedef unsigned short uid_t;
+typedef unsigned short gid_t;
+typedef unsigned short dev_t;
+typedef unsigned long ino_t;
+typedef unsigned short mode_t;
+#endif
+typedef unsigned short umode_t;
+#ifndef MACH_INCLUDE
+typedef unsigned short nlink_t;
+typedef int daddr_t;
+typedef long off_t;
+#endif
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+typedef signed long long s64;
+typedef unsigned long long u64;
+
+#endif /* __KERNEL__ */
+
+#undef __FD_SET
+#define __FD_SET(fd,fdsetp) \
+ __asm__ __volatile__("btsl %1,%0": \
+ "=m" (*(fd_set *) (fdsetp)):"r" ((int) (fd)))
+
+#undef __FD_CLR
+#define __FD_CLR(fd,fdsetp) \
+ __asm__ __volatile__("btrl %1,%0": \
+ "=m" (*(fd_set *) (fdsetp)):"r" ((int) (fd)))
+
+#undef __FD_ISSET
+#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
+ unsigned char __result; \
+ __asm__ __volatile__("btl %1,%2 ; setb %0" \
+ :"=q" (__result) :"r" ((int) (fd)), \
+ "m" (*(fd_set *) (fdsetp))); \
+ __result; }))
+
+#undef __FD_ZERO
+#define __FD_ZERO(fdsetp) \
+ __asm__ __volatile__("cld ; rep ; stosl" \
+ :"=m" (*(fd_set *) (fdsetp)) \
+ :"a" (0), "c" (__FDSET_INTS), \
+ "D" ((fd_set *) (fdsetp)) :"cx","di")
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/asm/unistd.h b/i386/i386at/gpl/linux/include/asm/unistd.h
new file mode 100644
index 00000000..1837f210
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/asm/unistd.h
@@ -0,0 +1,322 @@
+#ifndef _ASM_I386_UNISTD_H_
+#define _ASM_I386_UNISTD_H_
+
+/*
+ * This file contains the system call numbers.
+ */
+
+#define __NR_setup 0 /* used only by init, to get system going */
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_waitpid 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_chown 16
+#define __NR_break 17
+#define __NR_oldstat 18
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_oldfstat 28
+#define __NR_pause 29
+#define __NR_utime 30
+#define __NR_stty 31
+#define __NR_gtty 32
+#define __NR_access 33
+#define __NR_nice 34
+#define __NR_ftime 35
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+#define __NR_prof 44
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_signal 48
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_phys 52
+#define __NR_lock 53
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+#define __NR_mpx 56
+#define __NR_setpgid 57
+#define __NR_ulimit 58
+#define __NR_oldolduname 59
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+#define __NR_sgetmask 68
+#define __NR_ssetmask 69
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrlimit 76
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_select 82
+#define __NR_symlink 83
+#define __NR_oldlstat 84
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+#define __NR_profil 98
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+#define __NR_ioperm 101
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+#define __NR_olduname 109
+#define __NR_iopl 110
+#define __NR_vhangup 111
+#define __NR_idle 112
+#define __NR_vm86 113
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+#define __NR_modify_ldt 123
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+#define __NR_create_module 127
+#define __NR_init_module 128
+#define __NR_delete_module 129
+#define __NR_get_kernel_syms 130
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR_getdents 141
+#define __NR__newselect 142
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+
+/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
+#define _syscall0(type,name) \
+type name(void) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name)); \
+if (__res >= 0) \
+ return (type) __res; \
+errno = -__res; \
+return -1; \
+}
+
+#define _syscall1(type,name,type1,arg1) \
+type name(type1 arg1) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name),"b" ((long)(arg1))); \
+if (__res >= 0) \
+ return (type) __res; \
+errno = -__res; \
+return -1; \
+}
+
+#define _syscall2(type,name,type1,arg1,type2,arg2) \
+type name(type1 arg1,type2 arg2) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2))); \
+if (__res >= 0) \
+ return (type) __res; \
+errno = -__res; \
+return -1; \
+}
+
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
+type name(type1 arg1,type2 arg2,type3 arg3) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
+ "d" ((long)(arg3))); \
+if (__res>=0) \
+ return (type) __res; \
+errno=-__res; \
+return -1; \
+}
+
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
+ "d" ((long)(arg3)),"S" ((long)(arg4))); \
+if (__res>=0) \
+ return (type) __res; \
+errno=-__res; \
+return -1; \
+}
+
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
+ type5,arg5) \
+type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
+ "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5))); \
+if (__res>=0) \
+ return (type) __res; \
+errno=-__res; \
+return -1; \
+}
+
+#ifdef __KERNEL_SYSCALLS__
+
+/*
+ * we need this inline - forking from kernel space will result
+ * in NO COPY ON WRITE (!!!), until an execve is executed. This
+ * is no problem, but for the stack. This is handled by not letting
+ * main() use the stack at all after fork(). Thus, no function
+ * calls - which means inline code for fork too, as otherwise we
+ * would use the stack upon exit from 'fork()'.
+ *
+ * Actually only pause and fork are needed inline, so that there
+ * won't be any messing with the stack from main(), but we define
+ * some others too.
+ */
+#define __NR__exit __NR_exit
+static inline _syscall0(int,idle)
+static inline _syscall0(int,fork)
+static inline _syscall2(int,clone,unsigned long,flags,char *,esp)
+static inline _syscall0(int,pause)
+static inline _syscall0(int,setup)
+static inline _syscall0(int,sync)
+static inline _syscall0(pid_t,setsid)
+static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
+static inline _syscall1(int,dup,int,fd)
+static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
+static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
+static inline _syscall1(int,close,int,fd)
+static inline _syscall1(int,_exit,int,exitcode)
+static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options)
+
+static inline pid_t wait(int * wait_stat)
+{
+ return waitpid(-1,wait_stat,0);
+}
+
+/*
+ * This is the mechanism for creating a new kernel thread.
+ *
+ * NOTE! Only a kernel-only process(ie the swapper or direct descendants
+ * who haven't done an "execve()") should use this: it will work within
+ * a system call from a "real" process, but the process memory space will
+ * not be free'd until both the parent and the child have exited.
+ */
+static inline pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+ long retval;
+
+ __asm__ __volatile__(
+ "movl %%esp,%%esi\n\t"
+ "int $0x80\n\t" /* Linux/i386 system call */
+ "cmpl %%esp,%%esi\n\t" /* child or parent? */
+ "je 1f\n\t" /* parent - jump */
+ "pushl %3\n\t" /* push argument */
+ "call *%4\n\t" /* call fn */
+ "movl %2,%0\n\t" /* exit */
+ "int $0x80\n"
+ "1:\t"
+ :"=a" (retval)
+ :"0" (__NR_clone), "i" (__NR_exit),
+ "r" (arg), "r" (fn),
+ "b" (flags | CLONE_VM)
+ :"si");
+ return retval;
+}
+
+#endif
+
+#endif /* _ASM_I386_UNISTD_H_ */
diff --git a/i386/i386at/gpl/linux/include/linux/autoconf.h b/i386/i386at/gpl/linux/include/linux/autoconf.h
new file mode 100644
index 00000000..1588347c
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/autoconf.h
@@ -0,0 +1,210 @@
+/*
+ * Automatically generated C config: don't edit
+ */
+
+/*
+ * Loadable module support
+ */
+#undef CONFIG_MODULES
+#undef CONFIG_MODVERSIONS
+#undef CONFIG_KERNELD
+
+/*
+ * General setup
+ */
+#undef CONFIG_MATH_EMULATION
+#undef CONFIG_NET
+#undef CONFIG_MAX_16M
+#define CONFIG_PCI
+#define CONFIG_SYSVIPC 1
+#define CONFIG_BINFMT_AOUT 1
+#define CONFIG_BINFMT_ELF 1
+#undef CONFIG_KERNEL_ELF
+#undef CONFIG_M386
+#define CONFIG_M486 1
+#undef CONFIG_M586
+#undef CONFIG_M686
+
+/*
+ * Floppy, IDE, and other block devices
+ */
+#define CONFIG_BLK_DEV_FD 1
+#define CONFIG_BLK_DEV_IDE 1
+
+/*
+ * Please see drivers/block/README.ide for help/info on IDE drives
+ */
+#define CONFIG_BLK_DEV_HD_IDE 1
+#define CONFIG_BLK_DEV_IDEATAPI 1
+#define CONFIG_BLK_DEV_IDECD 1
+#undef CONFIG_BLK_DEV_IDETAPE
+#define CONFIG_BLK_DEV_CMD640 1
+#define CONFIG_BLK_DEV_RZ1000 1
+#define CONFIG_BLK_DEV_TRITON 1
+#define CONFIG_IDE_CHIPSETS 1
+#undef CONFIG_BLK_DEV_RAM
+#undef CONFIG_BLK_DEV_LOOP
+#undef CONFIG_BLK_DEV_XD
+
+/*
+ * Networking options
+ */
+#undef CONFIG_FIREWALL
+#undef CONFIG_NET_ALIAS
+#define CONFIG_INET 1
+#undef CONFIG_IP_FORWARD
+#undef CONFIG_IP_MULTICAST
+#undef CONFIG_IP_ACCT
+
+/*
+ * (it is safe to leave these untouched)
+ */
+#undef CONFIG_INET_PCTCP
+#undef CONFIG_INET_RARP
+#undef CONFIG_NO_PATH_MTU_DISCOVERY
+#undef CONFIG_TCP_NAGLE_OFF
+#define CONFIG_IP_NOSR 1
+#define CONFIG_SKB_LARGE 1
+
+/*
+ *
+ */
+#undef CONFIG_IPX
+#undef CONFIG_ATALK
+#undef CONFIG_AX25
+#undef CONFIG_NETLINK
+
+/*
+ * SCSI support
+ */
+#define CONFIG_SCSI 1
+
+/*
+ * SCSI support type (disk, tape, CDrom)
+ */
+#define CONFIG_BLK_DEV_SD 1
+#undef CONFIG_CHR_DEV_ST
+#define CONFIG_BLK_DEV_SR 1
+#undef CONFIG_CHR_DEV_SG
+
+/*
+ * Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+ */
+#undef CONFIG_SCSI_MULTI_LUN
+#undef CONFIG_SCSI_CONSTANTS
+
+/*
+ * SCSI low-level drivers
+ */
+#undef CONFIG_SCSI_ADVANSYS
+#define CONFIG_SCSI_AHA152X 1
+#define CONFIG_SCSI_AHA1542 1
+#define CONFIG_SCSI_AHA1740 1
+#define CONFIG_SCSI_AIC7XXX 1
+#undef CONFIG_SCSI_BUSLOGIC
+#undef CONFIG_SCSI_EATA_DMA
+#undef CONFIG_SCSI_EATA_PIO
+#define CONFIG_SCSI_U14_34F 1
+#undef CONFIG_SCSI_FUTURE_DOMAIN
+#undef CONFIG_SCSI_GENERIC_NCR5380
+#undef CONFIG_SCSI_IN2000
+#undef CONFIG_SCSI_PAS16
+#undef CONFIG_SCSI_QLOGIC
+#define CONFIG_SCSI_SEAGATE 1
+#undef CONFIG_SCSI_T128
+#undef CONFIG_SCSI_ULTRASTOR
+#undef CONFIG_SCSI_7000FASST
+#undef CONFIG_SCSI_EATA
+#undef CONFIG_SCSI_NCR53C406A
+#undef CONFIG_SCSI_AM53C974
+#define CONFIG_SCSI_NCR53C7xx 1
+
+/*
+ * Network device support
+ */
+#undef CONFIG_NETDEVICES
+#undef CONFIG_DUMMY
+#undef CONFIG_SLIP
+#undef CONFIG_SLIP_COMPRESSED
+#undef CONFIG_SLIP_SMART
+#undef CONFIG_PPP
+
+/*
+ * CCP compressors for PPP are only built as modules.
+ */
+#undef CONFIG_SCC
+#undef CONFIG_PLIP
+#undef CONFIG_EQUALIZER
+#undef CONFIG_NET_ALPHA
+#define CONFIG_NET_VENDOR_SMC 1
+#undef CONFIG_LANCE
+#undef CONFIG_NET_VENDOR_3COM
+#undef CONFIG_EL1
+#undef CONFIG_EL2
+#define CONFIG_EL3 1
+#undef CONFIG_VORTEX
+#define CONFIG_NET_ISA 1
+#undef CONFIG_E2100
+#undef CONFIG_DEPCA
+#undef CONFIG_EWRK3
+#define CONFIG_HPLAN_PLUS 1
+#undef CONFIG_HPLAN
+#undef CONFIG_HP100
+#define CONFIG_NE2000 1
+#undef CONFIG_SK_G16
+#undef CONFIG_NET_EISA
+#undef CONFIG_NET_POCKET
+#undef CONFIG_TR
+#undef CONFIG_ARCNET
+#define CONFIG_DE4X5 1
+#define CONFIG_ULTRA 1
+#define CONFIG_WD80x3 1
+
+/*
+ * CD-ROM drivers (not for SCSI or IDE/ATAPI drives)
+ */
+#undef CONFIG_CD_NO_IDESCSI
+
+/*
+ * Filesystems
+ */
+#undef CONFIG_QUOTA
+#define CONFIG_MINIX_FS 1
+#undef CONFIG_EXT_FS
+#define CONFIG_EXT2_FS 1
+#undef CONFIG_XIA_FS
+#define CONFIG_FAT_FS 1
+#define CONFIG_MSDOS_FS 1
+#undef CONFIG_VFAT_FS
+#undef CONFIG_UMSDOS_FS
+#define CONFIG_PROC_FS 1
+#define CONFIG_NFS_FS 1
+#undef CONFIG_ROOT_NFS
+#undef CONFIG_SMB_FS
+#define CONFIG_ISO9660_FS 1
+#undef CONFIG_HPFS_FS
+#undef CONFIG_SYSV_FS
+
+/*
+ * Character devices
+ */
+#undef CONFIG_CYCLADES
+#undef CONFIG_STALDRV
+#define CONFIG_PRINTER 1
+#undef CONFIG_BUSMOUSE
+#undef CONFIG_PSMOUSE
+#undef CONFIG_MS_BUSMOUSE
+#undef CONFIG_ATIXL_BUSMOUSE
+#undef CONFIG_QIC02_TAPE
+#undef CONFIG_APM
+#undef CONFIG_WATCHDOG
+
+/*
+ * Sound
+ */
+#undef CONFIG_SOUND
+
+/*
+ * Kernel hacking
+ */
+#undef CONFIG_PROFILE
diff --git a/i386/i386at/gpl/linux/include/linux/binfmts.h b/i386/i386at/gpl/linux/include/linux/binfmts.h
new file mode 100644
index 00000000..0d1c403a
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/binfmts.h
@@ -0,0 +1,60 @@
+#ifndef _LINUX_BINFMTS_H
+#define _LINUX_BINFMTS_H
+
+#include <linux/ptrace.h>
+
+/*
+ * MAX_ARG_PAGES defines the number of pages allocated for arguments
+ * and envelope for the new program. 32 should suffice, this gives
+ * a maximum env+arg of 128kB w/4KB pages!
+ */
+#define MAX_ARG_PAGES 32
+
+/*
+ * This structure is used to hold the arguments that are used when loading binaries.
+ */
+struct linux_binprm{
+ char buf[128];
+ unsigned long page[MAX_ARG_PAGES];
+ unsigned long p;
+ int sh_bang;
+ struct inode * inode;
+ int e_uid, e_gid;
+ int argc, envc;
+ char * filename; /* Name of binary */
+ unsigned long loader, exec;
+};
+
+/*
+ * This structure defines the functions that are used to load the binary formats that
+ * linux accepts.
+ */
+struct linux_binfmt {
+ struct linux_binfmt * next;
+ int *use_count;
+ int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
+ int (*load_shlib)(int fd);
+ int (*core_dump)(long signr, struct pt_regs * regs);
+};
+
+extern int register_binfmt(struct linux_binfmt *);
+extern int unregister_binfmt(struct linux_binfmt *);
+
+extern int read_exec(struct inode *inode, unsigned long offset,
+ char * addr, unsigned long count, int to_kmem);
+
+extern int open_inode(struct inode * inode, int mode);
+
+extern int init_elf_binfmt(void);
+extern int init_aout_binfmt(void);
+
+extern void flush_old_exec(struct linux_binprm * bprm);
+extern unsigned long setup_arg_pages(unsigned long text_size,unsigned long * page);
+extern unsigned long * create_tables(char * p,struct linux_binprm * bprm,int ibcs);
+extern unsigned long copy_strings(int argc,char ** argv,unsigned long *page,
+ unsigned long p, int from_kmem);
+
+/* this eventually goes away */
+#define change_ldt(a,b) setup_arg_pages(a,b)
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/bios32.h b/i386/i386at/gpl/linux/include/linux/bios32.h
new file mode 100644
index 00000000..f57398e5
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/bios32.h
@@ -0,0 +1,61 @@
+/*
+ * BIOS32, PCI BIOS functions and defines
+ * Copyright 1994, Drew Eckhardt
+ *
+ * For more information, please consult
+ *
+ * PCI BIOS Specification Revision
+ * PCI Local Bus Specification
+ * PCI System Design Guide
+ *
+ * PCI Special Interest Group
+ * M/S HF3-15A
+ * 5200 N.E. Elam Young Parkway
+ * Hillsboro, Oregon 97124-6497
+ * +1 (503) 696-2000
+ * +1 (800) 433-5177
+ *
+ * Manuals are $25 each or $50 for all three, plus $7 shipping
+ * within the United States, $35 abroad.
+ */
+
+#ifndef BIOS32_H
+#define BIOS32_H
+
+/*
+ * Error values that may be returned by the PCI bios. Use
+ * pcibios_strerror() to convert to a printable string.
+ */
+#define PCIBIOS_SUCCESSFUL 0x00
+#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
+#define PCIBIOS_BAD_VENDOR_ID 0x83
+#define PCIBIOS_DEVICE_NOT_FOUND 0x86
+#define PCIBIOS_BAD_REGISTER_NUMBER 0x87
+#define PCIBIOS_SET_FAILED 0x88
+#define PCIBIOS_BUFFER_TOO_SMALL 0x89
+
+extern int pcibios_present (void);
+extern unsigned long pcibios_init (unsigned long memory_start,
+ unsigned long memory_end);
+extern unsigned long pcibios_fixup (unsigned long memory_start,
+ unsigned long memory_end);
+extern int pcibios_find_class (unsigned int class_code, unsigned short index,
+ unsigned char *bus, unsigned char *dev_fn);
+extern int pcibios_find_device (unsigned short vendor, unsigned short dev_id,
+ unsigned short index, unsigned char *bus,
+ unsigned char *dev_fn);
+extern int pcibios_read_config_byte (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned char *val);
+extern int pcibios_read_config_word (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned short *val);
+extern int pcibios_read_config_dword (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned int *val);
+extern int pcibios_write_config_byte (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned char val);
+extern int pcibios_write_config_word (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned short val);
+extern pcibios_write_config_dword (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned int val);
+extern const char *pcibios_strerror (int error);
+
+#endif /* BIOS32_H */
diff --git a/i386/i386at/gpl/linux/include/linux/blk.h b/i386/i386at/gpl/linux/include/linux/blk.h
new file mode 100644
index 00000000..d7801dde
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/blk.h
@@ -0,0 +1,424 @@
+#ifndef _BLK_H
+#define _BLK_H
+
+#include <linux/blkdev.h>
+#include <linux/locks.h>
+#include <linux/config.h>
+
+/*
+ * This is used in the elevator algorithm. We don't prioritise reads
+ * over writes any more --- although reads are more time-critical than
+ * writes, by treating them equally we increase filesystem throughput.
+ * This turns out to give better overall performance. -- sct
+ */
+#define IN_ORDER(s1,s2) \
+((s1)->rq_dev < (s2)->rq_dev || (((s1)->rq_dev == (s2)->rq_dev && \
+(s1)->sector < (s2)->sector)))
+
+/*
+ * These will have to be changed to be aware of different buffer
+ * sizes etc.. It actually needs a major cleanup.
+ */
+#ifdef IDE_DRIVER
+#define SECTOR_MASK ((BLOCK_SIZE >> 9) - 1)
+#else
+#define SECTOR_MASK (blksize_size[MAJOR_NR] && \
+ blksize_size[MAJOR_NR][MINOR(CURRENT->rq_dev)] ? \
+ ((blksize_size[MAJOR_NR][MINOR(CURRENT->rq_dev)] >> 9) - 1) : \
+ ((BLOCK_SIZE >> 9) - 1))
+#endif /* IDE_DRIVER */
+
+#define SUBSECTOR(block) (CURRENT->current_nr_sectors > 0)
+
+#ifdef CONFIG_CDU31A
+extern int cdu31a_init(void);
+#endif CONFIG_CDU31A
+#ifdef CONFIG_MCD
+extern int mcd_init(void);
+#endif CONFIG_MCD
+#ifdef CONFIG_MCDX
+extern int mcdx_init(void);
+#endif CONFIG_MCDX
+#ifdef CONFIG_SBPCD
+extern int sbpcd_init(void);
+#endif CONFIG_SBPCD
+#ifdef CONFIG_AZTCD
+extern int aztcd_init(void);
+#endif CONFIG_AZTCD
+#ifdef CONFIG_CDU535
+extern int sony535_init(void);
+#endif CONFIG_CDU535
+#ifdef CONFIG_GSCD
+extern int gscd_init(void);
+#endif CONFIG_GSCD
+#ifdef CONFIG_CM206
+extern int cm206_init(void);
+#endif CONFIG_CM206
+#ifdef CONFIG_OPTCD
+extern int optcd_init(void);
+#endif CONFIG_OPTCD
+#ifdef CONFIG_SJCD
+extern int sjcd_init(void);
+#endif CONFIG_SJCD
+#ifdef CONFIG_CDI_INIT
+extern int cdi_init(void);
+#endif CONFIG_CDI_INIT
+#ifdef CONFIG_BLK_DEV_HD
+extern int hd_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_IDE
+extern int ide_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_XD
+extern int xd_init(void);
+#endif
+
+extern void set_device_ro(kdev_t dev,int flag);
+void add_blkdev_randomness(int major);
+
+extern int floppy_init(void);
+extern void rd_load(void);
+extern int rd_init(void);
+extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_image_start; /* starting block # of image */
+
+#define RO_IOCTLS(dev,where) \
+ case BLKROSET: if (!suser()) return -EACCES; \
+ set_device_ro((dev),get_fs_long((long *) (where))); return 0; \
+ case BLKROGET: { int __err = verify_area(VERIFY_WRITE, (void *) (where), sizeof(long)); \
+ if (!__err) put_fs_long(0!=is_read_only(dev),(long *) (where)); return __err; }
+
+#if defined(MAJOR_NR) || defined(IDE_DRIVER)
+
+/*
+ * Add entries as needed.
+ */
+
+#ifdef IDE_DRIVER
+
+#define DEVICE_NR(device) (MINOR(device) >> PARTN_BITS)
+#define DEVICE_ON(device) /* nothing */
+#define DEVICE_OFF(device) /* nothing */
+
+#elif (MAJOR_NR == RAMDISK_MAJOR)
+
+/* ram disk */
+#define DEVICE_NAME "ramdisk"
+#define DEVICE_REQUEST rd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+#define DEVICE_NO_RANDOM
+
+#elif (MAJOR_NR == FLOPPY_MAJOR)
+
+static void floppy_off(unsigned int nr);
+
+#define DEVICE_NAME "floppy"
+#define DEVICE_INTR do_floppy
+#define DEVICE_REQUEST do_fd_request
+#define DEVICE_NR(device) ( (MINOR(device) & 3) | ((MINOR(device) & 0x80 ) >> 5 ))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device) floppy_off(DEVICE_NR(device))
+
+#elif (MAJOR_NR == HD_MAJOR)
+
+/* harddisk: timeout is 6 seconds.. */
+#define DEVICE_NAME "harddisk"
+#define DEVICE_INTR do_hd
+#define DEVICE_TIMEOUT HD_TIMER
+#define TIMEOUT_VALUE (6*HZ)
+#define DEVICE_REQUEST do_hd_request
+#define DEVICE_NR(device) (MINOR(device)>>6)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_DISK_MAJOR)
+
+#define DEVICE_NAME "scsidisk"
+#define DEVICE_INTR do_sd
+#define TIMEOUT_VALUE (2*HZ)
+#define DEVICE_REQUEST do_sd_request
+#define DEVICE_NR(device) (MINOR(device) >> 4)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_TAPE_MAJOR)
+
+#define DEVICE_NAME "scsitape"
+#define DEVICE_INTR do_st
+#define DEVICE_NR(device) (MINOR(device) & 0x7f)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_CDROM_MAJOR)
+
+#define DEVICE_NAME "CD-ROM"
+#define DEVICE_INTR do_sr
+#define DEVICE_REQUEST do_sr_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == XT_DISK_MAJOR)
+
+#define DEVICE_NAME "xt disk"
+#define DEVICE_REQUEST do_xd_request
+#define DEVICE_NR(device) (MINOR(device) >> 6)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CDU31A_CDROM_MAJOR)
+
+#define DEVICE_NAME "CDU31A"
+#define DEVICE_REQUEST do_cdu31a_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MITSUMI_CDROM_MAJOR)
+
+#define DEVICE_NAME "Mitsumi CD-ROM"
+/* #define DEVICE_INTR do_mcd */
+#define DEVICE_REQUEST do_mcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MITSUMI_X_CDROM_MAJOR)
+
+#define DEVICE_NAME "Mitsumi CD-ROM"
+/* #define DEVICE_INTR do_mcdx */
+#define DEVICE_REQUEST do_mcdx_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #1"
+#define DEVICE_REQUEST do_sbpcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM2_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #2"
+#define DEVICE_REQUEST do_sbpcd2_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM3_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #3"
+#define DEVICE_REQUEST do_sbpcd3_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM4_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #4"
+#define DEVICE_REQUEST do_sbpcd4_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == AZTECH_CDROM_MAJOR)
+
+#define DEVICE_NAME "Aztech CD-ROM"
+#define DEVICE_REQUEST do_aztcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CDU535_CDROM_MAJOR)
+
+#define DEVICE_NAME "SONY-CDU535"
+#define DEVICE_INTR do_cdu535
+#define DEVICE_REQUEST do_cdu535_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == GOLDSTAR_CDROM_MAJOR)
+
+#define DEVICE_NAME "Goldstar R420"
+#define DEVICE_REQUEST do_gscd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CM206_CDROM_MAJOR)
+#define DEVICE_NAME "Philips/LMS cd-rom cm206"
+#define DEVICE_REQUEST do_cm206_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == OPTICS_CDROM_MAJOR)
+
+#define DEVICE_NAME "DOLPHIN 8000AT CD-ROM"
+#define DEVICE_REQUEST do_optcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SANYO_CDROM_MAJOR)
+
+#define DEVICE_NAME "Sanyo H94A CD-ROM"
+#define DEVICE_REQUEST do_sjcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#endif /* MAJOR_NR == whatever */
+
+#if (MAJOR_NR != SCSI_TAPE_MAJOR) && !defined(IDE_DRIVER)
+
+#ifndef CURRENT
+#define CURRENT (blk_dev[MAJOR_NR].current_request)
+#endif
+
+#define CURRENT_DEV DEVICE_NR(CURRENT->rq_dev)
+
+#ifdef DEVICE_INTR
+void (*DEVICE_INTR)(void) = NULL;
+#endif
+#ifdef DEVICE_TIMEOUT
+
+#define SET_TIMER \
+((timer_table[DEVICE_TIMEOUT].expires = jiffies + TIMEOUT_VALUE), \
+(timer_active |= 1<<DEVICE_TIMEOUT))
+
+#define CLEAR_TIMER \
+timer_active &= ~(1<<DEVICE_TIMEOUT)
+
+#define SET_INTR(x) \
+if ((DEVICE_INTR = (x)) != NULL) \
+ SET_TIMER; \
+else \
+ CLEAR_TIMER;
+
+#else
+
+#define SET_INTR(x) (DEVICE_INTR = (x))
+
+#endif /* DEVICE_TIMEOUT */
+
+static void (DEVICE_REQUEST)(void);
+
+#ifdef DEVICE_INTR
+#define CLEAR_INTR SET_INTR(NULL)
+#else
+#define CLEAR_INTR
+#endif
+
+#define INIT_REQUEST \
+ if (!CURRENT) {\
+ CLEAR_INTR; \
+ return; \
+ } \
+ if (MAJOR(CURRENT->rq_dev) != MAJOR_NR) \
+ panic(DEVICE_NAME ": request list destroyed"); \
+ if (CURRENT->bh) { \
+ if (!buffer_locked(CURRENT->bh)) \
+ panic(DEVICE_NAME ": block not locked"); \
+ }
+
+#endif /* (MAJOR_NR != SCSI_TAPE_MAJOR) && !defined(IDE_DRIVER) */
+
+/* end_request() - SCSI devices have their own version */
+/* - IDE drivers have their own copy too */
+
+#if ! SCSI_MAJOR(MAJOR_NR)
+
+#if defined(IDE_DRIVER) && !defined(_IDE_C) /* shared copy for IDE modules */
+void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup);
+#else
+
+#ifdef IDE_DRIVER
+void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup) {
+ struct request *req = hwgroup->rq;
+#else
+static void end_request(int uptodate) {
+ struct request *req = CURRENT;
+#endif /* IDE_DRIVER */
+ struct buffer_head * bh;
+
+ if (!uptodate) {
+ printk("end_request: I/O error, dev %s, sector %lu\n",
+ kdevname(req->rq_dev), req->sector);
+#ifdef MACH
+ req->errors = 1;
+ while (req->bh) {
+ bh = req->bh;
+ req->bh = bh->b_reqnext;
+ mark_buffer_uptodate(bh, 0);
+ unlock_buffer(bh);
+ }
+ goto done;
+#else
+ req->nr_sectors--;
+ req->nr_sectors &= ~SECTOR_MASK;
+ req->sector += (BLOCK_SIZE / 512);
+ req->sector &= ~SECTOR_MASK;
+#endif
+ }
+
+ if ((bh = req->bh) != NULL) {
+ req->bh = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+ mark_buffer_uptodate(bh, uptodate);
+ unlock_buffer(bh);
+ if ((bh = req->bh) != NULL) {
+ req->current_nr_sectors = bh->b_size >> 9;
+ if (req->nr_sectors < req->current_nr_sectors) {
+ req->nr_sectors = req->current_nr_sectors;
+ printk("end_request: buffer-list destroyed\n");
+ }
+ req->buffer = bh->b_data;
+ return;
+ }
+ }
+#ifdef MACH
+ req->errors = 0;
+
+done:
+#endif
+#ifndef DEVICE_NO_RANDOM
+ add_blkdev_randomness(MAJOR(req->rq_dev));
+#endif
+#ifdef IDE_DRIVER
+ blk_dev[MAJOR(req->rq_dev)].current_request = req->next;
+ hwgroup->rq = NULL;
+#else
+ DEVICE_OFF(req->rq_dev);
+ CURRENT = req->next;
+#endif /* IDE_DRIVER */
+ if (req->sem != NULL)
+ up(req->sem);
+ req->rq_status = RQ_INACTIVE;
+#ifndef MACH
+ wake_up(&wait_for_request);
+#endif
+#ifdef MACH
+ {
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ (*blk_dev[MAJOR(req->rq_dev)].request_fn)();
+ restore_flags(flags);
+ }
+#endif
+}
+#endif /* defined(IDE_DRIVER) && !defined(_IDE_C) */
+#endif /* ! SCSI_MAJOR(MAJOR_NR) */
+
+#endif /* defined(MAJOR_NR) || defined(IDE_DRIVER) */
+
+#endif /* _BLK_H */
diff --git a/i386/i386at/gpl/linux/include/linux/blkdev.h b/i386/i386at/gpl/linux/include/linux/blkdev.h
new file mode 100644
index 00000000..ba4d08af
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/blkdev.h
@@ -0,0 +1,56 @@
+#ifndef _LINUX_BLKDEV_H
+#define _LINUX_BLKDEV_H
+
+#include <linux/major.h>
+#include <linux/sched.h>
+#include <linux/genhd.h>
+
+/*
+ * Ok, this is an expanded form so that we can use the same
+ * request for paging requests when that is implemented. In
+ * paging, 'bh' is NULL, and the semaphore is used to wait
+ * for read/write completion.
+ */
+struct request {
+ volatile int rq_status; /* should split this into a few status bits */
+#define RQ_INACTIVE (-1)
+#define RQ_ACTIVE 1
+#define RQ_SCSI_BUSY 0xffff
+#define RQ_SCSI_DONE 0xfffe
+#define RQ_SCSI_DISCONNECTING 0xffe0
+
+ kdev_t rq_dev;
+ int cmd; /* READ or WRITE */
+ int errors;
+ unsigned long sector;
+ unsigned long nr_sectors;
+ unsigned long current_nr_sectors;
+ char * buffer;
+ struct semaphore * sem;
+ struct buffer_head * bh;
+ struct buffer_head * bhtail;
+ struct request * next;
+};
+
+struct blk_dev_struct {
+ void (*request_fn)(void);
+ struct request * current_request;
+};
+
+struct sec_size {
+ unsigned block_size;
+ unsigned block_size_bits;
+};
+
+extern struct sec_size * blk_sec[MAX_BLKDEV];
+extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
+extern struct wait_queue * wait_for_request;
+extern void resetup_one_dev(struct gendisk *dev, int drive);
+
+extern int * blk_size[MAX_BLKDEV];
+
+extern int * blksize_size[MAX_BLKDEV];
+
+extern int * hardsect_size[MAX_BLKDEV];
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/cdrom.h b/i386/i386at/gpl/linux/include/linux/cdrom.h
new file mode 100644
index 00000000..5811ff0f
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/cdrom.h
@@ -0,0 +1,465 @@
+/*
+ * -- <linux/cdrom.h>
+ * general (not only SCSI) header library for linux CDROM drivers
+ * (C) 1992 David Giller rafetmad@oxy.edu
+ * 1994, 1995 Eberhard Moenkeberg emoenke@gwdg.de
+ *
+ */
+
+#ifndef _LINUX_CDROM_H
+#define _LINUX_CDROM_H
+
+/*
+ * some fix numbers
+ */
+#define CD_MINS 74 /* max. minutes per CD, not really a limit */
+#define CD_SECS 60 /* seconds per minute */
+#define CD_FRAMES 75 /* frames per second */
+
+#define CD_SYNC_SIZE 12 /* 12 sync bytes per raw data frame, not transfered by the drive */
+#define CD_HEAD_SIZE 4 /* header (address) bytes per raw data frame */
+#define CD_SUBHEAD_SIZE 8 /* subheader bytes per raw XA data frame */
+#define CD_XA_HEAD (CD_HEAD_SIZE+CD_SUBHEAD_SIZE) /* "before data" part of raw XA frame */
+#define CD_XA_SYNC_HEAD (CD_SYNC_SIZE+CD_XA_HEAD)/* sync bytes + header of XA frame */
+
+#define CD_FRAMESIZE 2048 /* bytes per frame, "cooked" mode */
+#define CD_FRAMESIZE_RAW 2352 /* bytes per frame, "raw" mode */
+/* most drives don't deliver everything: */
+#define CD_FRAMESIZE_RAW1 (CD_FRAMESIZE_RAW-CD_SYNC_SIZE) /* 2340 */
+#define CD_FRAMESIZE_RAW0 (CD_FRAMESIZE_RAW-CD_SYNC_SIZE-CD_HEAD_SIZE) /* 2336 */
+
+#define CD_EDC_SIZE 4 /* bytes EDC per most raw data frame types */
+#define CD_ZERO_SIZE 8 /* bytes zero per yellow book mode 1 frame */
+#define CD_ECC_SIZE 276 /* bytes ECC per most raw data frame types */
+#define CD_XA_TAIL (CD_EDC_SIZE+CD_ECC_SIZE) /* "after data" part of raw XA frame */
+
+#define CD_FRAMESIZE_SUB 96 /* subchannel data "frame" size */
+#define CD_MSF_OFFSET 150 /* MSF numbering offset of first frame */
+
+#define CD_CHUNK_SIZE 24 /* lowest-level "data bytes piece" */
+#define CD_NUM_OF_CHUNKS 98 /* chunks per frame */
+
+#define CD_FRAMESIZE_XA CD_FRAMESIZE_RAW1 /* obsolete name */
+#define CD_BLOCK_OFFSET CD_MSF_OFFSET /* obsolete name */
+
+/*
+ * the raw frame layout:
+ *
+ * - audio (red): | audio_sample_bytes |
+ * | 2352 |
+ *
+ * - data (yellow, mode1): | sync - head - data - EDC - zero - ECC |
+ * | 12 - 4 - 2048 - 4 - 8 - 276 |
+ *
+ * - data (yellow, mode2): | sync - head - data |
+ * | 12 - 4 - 2336 |
+ *
+ * - XA data (green, mode2 form1): | sync - head - sub - data - EDC - ECC |
+ * | 12 - 4 - 8 - 2048 - 4 - 276 |
+ *
+ * - XA data (green, mode2 form2): | sync - head - sub - data - EDC |
+ * | 12 - 4 - 8 - 2324 - 4 |
+ */
+
+/*
+ * CDROM IOCTL structures
+ */
+
+struct cdrom_blk
+{
+ unsigned from;
+ unsigned short len;
+};
+
+
+struct cdrom_msf
+{
+ u_char cdmsf_min0; /* start minute */
+ u_char cdmsf_sec0; /* start second */
+ u_char cdmsf_frame0; /* start frame */
+ u_char cdmsf_min1; /* end minute */
+ u_char cdmsf_sec1; /* end second */
+ u_char cdmsf_frame1; /* end frame */
+};
+
+struct cdrom_ti
+{
+ u_char cdti_trk0; /* start track */
+ u_char cdti_ind0; /* start index */
+ u_char cdti_trk1; /* end track */
+ u_char cdti_ind1; /* end index */
+};
+
+struct cdrom_tochdr
+{
+ u_char cdth_trk0; /* start track */
+ u_char cdth_trk1; /* end track */
+};
+
+struct cdrom_tocentry
+{
+ u_char cdte_track;
+ u_char cdte_adr :4;
+ u_char cdte_ctrl :4;
+ u_char cdte_format;
+ union
+ {
+ struct
+ {
+ u_char minute;
+ u_char second;
+ u_char frame;
+ } msf;
+ int lba;
+ } cdte_addr;
+ u_char cdte_datamode;
+};
+
+/*
+ * CD-ROM address types (cdrom_tocentry.cdte_format)
+ */
+#define CDROM_LBA 0x01 /* "logical block": first frame is #0 */
+#define CDROM_MSF 0x02 /* "minute-second-frame": binary, not bcd here! */
+
+/*
+ * bit to tell whether track is data or audio (cdrom_tocentry.cdte_ctrl)
+ */
+#define CDROM_DATA_TRACK 0x04
+
+/*
+ * The leadout track is always 0xAA, regardless of # of tracks on disc
+ */
+#define CDROM_LEADOUT 0xAA
+
+struct cdrom_subchnl
+{
+ u_char cdsc_format;
+ u_char cdsc_audiostatus;
+ u_char cdsc_adr: 4;
+ u_char cdsc_ctrl: 4;
+ u_char cdsc_trk;
+ u_char cdsc_ind;
+ union
+ {
+ struct
+ {
+ u_char minute;
+ u_char second;
+ u_char frame;
+ } msf;
+ int lba;
+ } cdsc_absaddr;
+ union
+ {
+ struct
+ {
+ u_char minute;
+ u_char second;
+ u_char frame;
+ } msf;
+ int lba;
+ } cdsc_reladdr;
+};
+
+/*
+ * audio states (from SCSI-2, but seen with other drives, too)
+ */
+#define CDROM_AUDIO_INVALID 0x00 /* audio status not supported */
+#define CDROM_AUDIO_PLAY 0x11 /* audio play operation in progress */
+#define CDROM_AUDIO_PAUSED 0x12 /* audio play operation paused */
+#define CDROM_AUDIO_COMPLETED 0x13 /* audio play successfully completed */
+#define CDROM_AUDIO_ERROR 0x14 /* audio play stopped due to error */
+#define CDROM_AUDIO_NO_STATUS 0x15 /* no current audio status to return */
+
+struct cdrom_volctrl
+{
+ u_char channel0;
+ u_char channel1;
+ u_char channel2;
+ u_char channel3;
+};
+
+struct cdrom_read
+{
+ int cdread_lba;
+ caddr_t cdread_bufaddr;
+ int cdread_buflen;
+};
+
+/*
+ * extensions for transfering audio frames
+ * currently used by sbpcd.c, cdu31a.c, ide-cd.c
+ */
+struct cdrom_read_audio
+{
+ union
+ {
+ struct
+ {
+ u_char minute;
+ u_char second;
+ u_char frame;
+ } msf;
+ int lba;
+ } addr; /* frame address */
+ u_char addr_format; /* CDROM_LBA or CDROM_MSF */
+ int nframes; /* number of 2352-byte-frames to read at once, limited by the drivers */
+ u_char *buf; /* frame buffer (size: nframes*2352 bytes) */
+};
+
+/*
+ * this has to be the "arg" of the CDROMMULTISESSION ioctl
+ * for obtaining multi session info.
+ * The returned "addr" is valid only if "xa_flag" is true.
+ */
+struct cdrom_multisession
+{
+ union
+ {
+ struct
+ {
+ u_char minute;
+ u_char second;
+ u_char frame;
+ } msf;
+ int lba;
+ } addr; /* frame address: start-of-last-session (not the new "frame 16"!)*/
+ u_char xa_flag; /* 1: "is XA disk" */
+ u_char addr_format; /* CDROM_LBA or CDROM_MSF */
+};
+
+#ifdef FIVETWELVE
+#define CDROM_MODE1_SIZE 512
+#else
+#define CDROM_MODE1_SIZE 2048
+#endif FIVETWELVE
+#define CDROM_MODE2_SIZE 2336
+
+/*
+ * CD-ROM IOCTL commands
+ * For IOCTL calls, we will commandeer byte 0x53, or 'S'.
+ */
+
+#define CDROMPAUSE 0x5301
+#define CDROMRESUME 0x5302
+#define CDROMPLAYMSF 0x5303 /* (struct cdrom_msf) */
+#define CDROMPLAYTRKIND 0x5304 /* (struct cdrom_ti) */
+
+#define CDROMREADTOCHDR 0x5305 /* (struct cdrom_tochdr) */
+#define CDROMREADTOCENTRY 0x5306 /* (struct cdrom_tocentry) */
+
+#define CDROMSTOP 0x5307 /* stop the drive motor */
+#define CDROMSTART 0x5308 /* turn the motor on */
+
+#define CDROMEJECT 0x5309 /* eject CD-ROM media */
+
+#define CDROMVOLCTRL 0x530a /* (struct cdrom_volctrl) */
+
+#define CDROMSUBCHNL 0x530b /* (struct cdrom_subchnl) */
+
+#define CDROMREADMODE2 0x530c /* (struct cdrom_read) */
+ /* read type-2 data */
+
+#define CDROMREADMODE1 0x530d /* (struct cdrom_read) */
+ /* read type-1 data */
+
+#define CDROMREADAUDIO 0x530e /* (struct cdrom_read_audio) */
+
+/*
+ * enable (1) / disable (0) auto-ejecting
+ */
+#define CDROMEJECT_SW 0x530f /* arg: 0 or 1 */
+
+/*
+ * obtain the start-of-last-session address of multi session disks
+ */
+#define CDROMMULTISESSION 0x5310 /* (struct cdrom_multisession) */
+
+/*
+ * obtain the "universal product code" number
+ * (only some data disks have it coded)
+ */
+#define CDROM_GET_UPC 0x5311 /* 8 bytes returned */
+
+#define CDROMRESET 0x5312 /* hard-reset the drive */
+#define CDROMVOLREAD 0x5313 /* let the drive tell its volume setting */
+ /* (struct cdrom_volctrl) */
+
+/*
+ * these ioctls are used in aztcd.c
+ */
+#define CDROMREADRAW 0x5314 /* read data in raw mode */
+#define CDROMREADCOOKED 0x5315 /* read data in cooked mode */
+#define CDROMSEEK 0x5316 /* seek msf address */
+
+/*
+ * for playing audio in logical block addressing mode
+ */
+#define CDROMPLAYBLK 0x5317 /* (struct cdrom_blk) */
+
+
+/*
+ * CD-ROM-specific SCSI command opcodes
+ */
+
+/*
+ * Group 2 (10-byte). All of these are called 'optional' by SCSI-II.
+ */
+#define SCMD_READ_TOC 0x43 /* read table of contents */
+#define SCMD_PLAYAUDIO_MSF 0x47 /* play data at time offset */
+#define SCMD_PLAYAUDIO_TI 0x48 /* play data at track/index */
+#define SCMD_PAUSE_RESUME 0x4B /* pause/resume audio */
+#define SCMD_READ_SUBCHANNEL 0x42 /* read SC info on playing disc */
+#define SCMD_PLAYAUDIO10 0x45 /* play data at logical block */
+#define SCMD_READ_HEADER 0x44 /* read TOC header */
+
+/*
+ * Group 5
+ */
+#define SCMD_PLAYAUDIO12 0xA5 /* play data at logical block */
+#define SCMD_PLAYTRACK_REL12 0xA9 /* play track at relative offset */
+
+/*
+ * Group 6 Commands
+ */
+#define SCMD_CD_PLAYBACK_CONTROL 0xC9 /* Sony vendor-specific audio */
+#define SCMD_CD_PLAYBACK_STATUS 0xC4 /* control opcodes */
+
+/*
+ * CD-ROM capacity structure.
+ */
+struct scsi_capacity
+{
+ u_long capacity;
+ u_long lbasize;
+};
+
+/*
+ * CD-ROM MODE_SENSE/MODE_SELECT parameters
+ */
+#define ERR_RECOVERY_PARMS 0x01
+#define DISCO_RECO_PARMS 0x02
+#define FORMAT_PARMS 0x03
+#define GEOMETRY_PARMS 0x04
+#define CERTIFICATION_PARMS 0x06
+#define CACHE_PARMS 0x38
+
+/*
+ * standard mode-select header prepended to all mode-select commands
+ */
+struct ccs_modesel_head
+{
+ u_char _r1; /* reserved */
+ u_char medium; /* device-specific medium type */
+ u_char _r2; /* reserved */
+ u_char block_desc_length; /* block descriptor length */
+ u_char density; /* device-specific density code */
+ u_char number_blocks_hi; /* number of blocks in this block desc */
+ u_char number_blocks_med;
+ u_char number_blocks_lo;
+ u_char _r3;
+ u_char block_length_hi; /* block length for blocks in this desc */
+ u_short block_length;
+};
+
+/*
+ * error recovery parameters
+ */
+struct ccs_err_recovery
+{
+ u_char _r1 : 2; /* reserved */
+ u_char page_code : 6; /* page code */
+ u_char page_length; /* page length */
+ u_char awre : 1; /* auto write realloc enabled */
+ u_char arre : 1; /* auto read realloc enabled */
+ u_char tb : 1; /* transfer block */
+ u_char rc : 1; /* read continuous */
+ u_char eec : 1; /* enable early correction */
+ u_char per : 1; /* post error */
+ u_char dte : 1; /* disable transfer on error */
+ u_char dcr : 1; /* disable correction */
+ u_char retry_count; /* error retry count */
+ u_char correction_span; /* largest recov. to be attempted, bits */
+ u_char head_offset_count; /* head offset (2's C) for each retry */
+ u_char strobe_offset_count; /* data strobe */
+ u_char recovery_time_limit; /* time limit on recovery attempts */
+};
+
+/*
+ * disco/reco parameters
+ */
+struct ccs_disco_reco
+{
+ u_char _r1 : 2; /* reserved */
+ u_char page_code : 6; /* page code */
+ u_char page_length; /* page length */
+ u_char buffer_full_ratio; /* write buffer reconnect threshold */
+ u_char buffer_empty_ratio; /* read */
+ u_short bus_inactivity_limit; /* limit on bus inactivity time */
+ u_short disconnect_time_limit; /* minimum disconnect time */
+ u_short connect_time_limit; /* minimum connect time */
+ u_short _r2; /* reserved */
+};
+
+/*
+ * drive geometry parameters
+ */
+struct ccs_geometry
+{
+ u_char _r1 : 2; /* reserved */
+ u_char page_code : 6; /* page code */
+ u_char page_length; /* page length */
+ u_char cyl_ub; /* #cyls */
+ u_char cyl_mb;
+ u_char cyl_lb;
+ u_char heads; /* #heads */
+ u_char precomp_cyl_ub; /* precomp start */
+ u_char precomp_cyl_mb;
+ u_char precomp_cyl_lb;
+ u_char current_cyl_ub; /* reduced current start */
+ u_char current_cyl_mb;
+ u_char current_cyl_lb;
+ u_short step_rate; /* stepping motor rate */
+ u_char landing_cyl_ub; /* landing zone */
+ u_char landing_cyl_mb;
+ u_char landing_cyl_lb;
+ u_char _r2;
+ u_char _r3;
+ u_char _r4;
+};
+
+/*
+ * cache parameters
+ */
+struct ccs_cache
+{
+ u_char _r1 : 2; /* reserved */
+ u_char page_code : 6; /* page code */
+ u_char page_length; /* page length */
+ u_char mode; /* cache control byte */
+ u_char threshold; /* prefetch threshold */
+ u_char max_prefetch; /* maximum prefetch size */
+ u_char max_multiplier; /* maximum prefetch multiplier */
+ u_char min_prefetch; /* minimum prefetch size */
+ u_char min_multiplier; /* minimum prefetch multiplier */
+ u_char _r2[8];
+};
+
+#endif _LINUX_CDROM_H
+/*==========================================================================*/
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 8
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -8
+ * c-argdecl-indent: 8
+ * c-label-offset: -8
+ * c-continued-statement-offset: 8
+ * c-continued-brace-offset: 0
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/include/linux/config.h b/i386/i386at/gpl/linux/include/linux/config.h
new file mode 100644
index 00000000..a54cdff2
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/config.h
@@ -0,0 +1,41 @@
+#ifndef _LINUX_CONFIG_H
+#define _LINUX_CONFIG_H
+
+#include <linux/autoconf.h>
+
+/*
+ * Defines for what uname() should return
+ */
+#ifndef UTS_SYSNAME
+#define UTS_SYSNAME "Linux"
+#endif
+
+#ifndef UTS_MACHINE
+#define UTS_MACHINE "unknown"
+#endif
+
+#ifndef UTS_NODENAME
+#define UTS_NODENAME "(none)" /* set by sethostname() */
+#endif
+
+#ifndef UTS_DOMAINNAME
+#define UTS_DOMAINNAME "(none)" /* set by setdomainname() */
+#endif
+
+/*
+ * The definitions for UTS_RELEASE and UTS_VERSION are now defined
+ * in linux/version.h, and should only be used by linux/version.c
+ */
+
+/* Don't touch these, unless you really know what your doing. */
+#define DEF_INITSEG 0x9000
+#define DEF_SYSSEG 0x1000
+#define DEF_SETUPSEG 0x9020
+#define DEF_SYSSIZE 0x7F00
+
+/* internal svga startup constants */
+#define NORMAL_VGA 0xffff /* 80x25 mode */
+#define EXTENDED_VGA 0xfffe /* 80x50 mode */
+#define ASK_VGA 0xfffd /* ask for it at bootup */
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/delay.h b/i386/i386at/gpl/linux/include/linux/delay.h
new file mode 100644
index 00000000..50b5d0b1
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/delay.h
@@ -0,0 +1,14 @@
+#ifndef _LINUX_DELAY_H
+#define _LINUX_DELAY_H
+
+/*
+ * Copyright (C) 1993 Linus Torvalds
+ *
+ * Delay routines, using a pre-computed "loops_per_second" value.
+ */
+
+extern unsigned long loops_per_sec;
+
+#include <asm/delay.h>
+
+#endif /* defined(_LINUX_DELAY_H) */
diff --git a/i386/i386at/gpl/linux/include/linux/errno.h b/i386/i386at/gpl/linux/include/linux/errno.h
new file mode 100644
index 00000000..ac212844
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/errno.h
@@ -0,0 +1,16 @@
+#ifndef _LINUX_ERRNO_H
+#define _LINUX_ERRNO_H
+
+#include <asm/errno.h>
+
+#ifdef __KERNEL__
+
+/* Should never be seen by user programs */
+#define ERESTARTSYS 512
+#define ERESTARTNOINTR 513
+#define ERESTARTNOHAND 514 /* restart if no handler.. */
+#define ENOIOCTLCMD 515 /* No ioctl command */
+
+#endif
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/etherdevice.h b/i386/i386at/gpl/linux/include/linux/etherdevice.h
new file mode 100644
index 00000000..b29b76b9
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/etherdevice.h
@@ -0,0 +1,55 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. NET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Ethernet handlers.
+ *
+ * Version: @(#)eth.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Relocated to include/linux where it belongs by Alan Cox
+ * <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * WARNING: This move may well be temporary. This file will get merged with others RSN.
+ *
+ */
+#ifndef _LINUX_ETHERDEVICE_H
+#define _LINUX_ETHERDEVICE_H
+
+
+#include <linux/if_ether.h>
+
+#ifdef __KERNEL__
+extern int eth_header(struct sk_buff *skb, struct device *dev,
+ unsigned short type, void *daddr,
+ void *saddr, unsigned len);
+extern int eth_rebuild_header(void *buff, struct device *dev,
+ unsigned long dst, struct sk_buff *skb);
+#ifdef MACH
+#define eth_type_trans(skb, dev) 0
+#else
+extern unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev);
+#endif
+extern void eth_header_cache_bind(struct hh_cache ** hhp, struct device *dev,
+ unsigned short htype, __u32 daddr);
+extern void eth_header_cache_update(struct hh_cache *hh, struct device *dev, unsigned char * haddr);
+#ifdef MACH
+#define eth_copy_and_sum(skb, src, length, base) \
+ memcpy ((skb)->data, src, length)
+#else
+extern void eth_copy_and_sum(struct sk_buff *dest,
+ unsigned char *src, int length, int base);
+#endif
+extern struct device * init_etherdev(struct device *, int);
+
+#endif
+
+#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/i386/i386at/gpl/linux/include/linux/fcntl.h b/i386/i386at/gpl/linux/include/linux/fcntl.h
new file mode 100644
index 00000000..9de3512e
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/fcntl.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_FCNTL_H
+#define _LINUX_FCNTL_H
+
+#include <asm/fcntl.h>
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/fd.h b/i386/i386at/gpl/linux/include/linux/fd.h
new file mode 100644
index 00000000..87a837b6
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/fd.h
@@ -0,0 +1,368 @@
+#ifndef _LINUX_FD_H
+#define _LINUX_FD_H
+
+#include <linux/ioctl.h>
+
+/* New file layout: Now the ioctl definitions immediately follow the
+ * definitions of the structures that they use */
+
+/*
+ * Geometry
+ */
+struct floppy_struct {
+ unsigned int size, /* nr of sectors total */
+ sect, /* sectors per track */
+ head, /* nr of heads */
+ track, /* nr of tracks */
+ stretch; /* !=0 means double track steps */
+#define FD_STRETCH 1
+#define FD_SWAPSIDES 2
+
+ unsigned char gap, /* gap1 size */
+
+ rate, /* data rate. |= 0x40 for perpendicular */
+#define FD_2M 0x4
+#define FD_SIZECODEMASK 0x38
+#define FD_SIZECODE(floppy) (((((floppy)->rate&FD_SIZECODEMASK)>> 3)+ 2) %8)
+#define FD_SECTSIZE(floppy) ( (floppy)->rate & FD_2M ? \
+ 512 : 128 << FD_SIZECODE(floppy) )
+#define FD_PERP 0x40
+
+ spec1, /* stepping rate, head unload time */
+ fmt_gap; /* gap2 size */
+ const char * name; /* used only for predefined formats */
+};
+
+
+/* commands needing write access have 0x40 set */
+/* commands needing super user access have 0x80 set */
+
+#define FDCLRPRM _IO(2, 0x41)
+/* clear user-defined parameters */
+
+#define FDSETPRM _IOW(2, 0x42, struct floppy_struct)
+#define FDSETMEDIAPRM FDSETPRM
+/* set user-defined parameters for current media */
+
+#define FDDEFPRM _IOW(2, 0x43, struct floppy_struct)
+#define FDGETPRM _IOR(2, 0x04, struct floppy_struct)
+#define FDDEFMEDIAPRM FDDEFPRM
+#define FDGETMEDIAPRM FDGETPRM
+/* set/get disk parameters */
+
+
+#define FDMSGON _IO(2,0x45)
+#define FDMSGOFF _IO(2,0x46)
+/* issue/don't issue kernel messages on media type change */
+
+
+/*
+ * Formatting (obsolete)
+ */
+#define FD_FILL_BYTE 0xF6 /* format fill byte. */
+
+struct format_descr {
+ unsigned int device,head,track;
+};
+
+#define FDFMTBEG _IO(2,0x47)
+/* begin formatting a disk */
+#define FDFMTTRK _IOW(2,0x48, struct format_descr)
+/* format the specified track */
+#define FDFMTEND _IO(2,0x49)
+/* end formatting a disk */
+
+
+/*
+ * Error thresholds
+ */
+struct floppy_max_errors {
+ unsigned int
+ abort, /* number of errors to be reached before aborting */
+ read_track, /* maximal number of errors permitted to read an
+ * entire track at once */
+ reset, /* maximal number of errors before a reset is tried */
+ recal, /* maximal number of errors before a recalibrate is
+ * tried */
+
+ /*
+ * Threshold for reporting FDC errors to the console.
+ * Setting this to zero may flood your screen when using
+ * ultra cheap floppies ;-)
+ */
+ reporting;
+
+};
+
+#define FDSETEMSGTRESH _IO(2,0x4a)
+/* set fdc error reporting threshold */
+
+#define FDFLUSH _IO(2,0x4b)
+/* flush buffers for media; either for verifying media, or for
+ * handling a media change without closing the file descriptor */
+
+#define FDSETMAXERRS _IOW(2, 0x4c, struct floppy_max_errors)
+#define FDGETMAXERRS _IOR(2, 0x0e, struct floppy_max_errors)
+/* set/get abortion and read_track threshold. See also floppy_drive_params
+ * structure */
+
+
+typedef char floppy_drive_name[16];
+#define FDGETDRVTYP _IOR(2, 0x0f, floppy_drive_name)
+/* get drive type: 5 1/4 or 3 1/2 */
+
+
+/*
+ * Drive parameters (user modifyable)
+ */
+struct floppy_drive_params {
+ char cmos; /* cmos type */
+
+ /* Spec2 is (HLD<<1 | ND), where HLD is head load time (1=2ms, 2=4 ms
+ * etc) and ND is set means no DMA. Hardcoded to 6 (HLD=6ms, use DMA).
+ */
+ unsigned long max_dtr; /* Step rate, usec */
+ unsigned long hlt; /* Head load/settle time, msec */
+ unsigned long hut; /* Head unload time (remnant of
+ * 8" drives) */
+ unsigned long srt; /* Step rate, usec */
+
+ unsigned long spinup; /* time needed for spinup (expressed
+ * in jiffies) */
+ unsigned long spindown; /* timeout needed for spindown */
+ unsigned char spindown_offset; /* decides in which position the disk
+ * will stop */
+ unsigned char select_delay; /* delay to wait after select */
+ unsigned char rps; /* rotations per second */
+ unsigned char tracks; /* maximum number of tracks */
+ unsigned long timeout; /* timeout for interrupt requests */
+
+ unsigned char interleave_sect; /* if there are more sectors, use
+ * interleave */
+
+ struct floppy_max_errors max_errors;
+
+ char flags; /* various flags, including ftd_msg */
+/*
+ * Announce successful media type detection and media information loss after
+ * disk changes.
+ * Also used to enable/disable printing of overrun warnings.
+ */
+
+#define FTD_MSG 0x10
+#define FD_BROKEN_DCL 0x20
+#define FD_DEBUG 0x02
+#define FD_SILENT_DCL_CLEAR 0x4
+#define FD_INVERTED_DCL 0x80
+
+ char read_track; /* use readtrack during probing? */
+
+/*
+ * Auto-detection. Each drive type has eight formats which are
+ * used in succession to try to read the disk. If the FDC cannot lock onto
+ * the disk, the next format is tried. This uses the variable 'probing'.
+ */
+ short autodetect[8]; /* autodetected formats */
+
+ int checkfreq; /* how often should the drive be checked for disk
+ * changes */
+ int native_format; /* native format of this drive */
+};
+
+enum {
+ FD_NEED_TWADDLE_BIT, /* more magic */
+ FD_VERIFY_BIT, /* inquire for write protection */
+ FD_DISK_NEWCHANGE_BIT, /* change detected, and no action undertaken yet
+ * to clear media change status */
+ FD_UNUSED_BIT,
+ FD_DISK_CHANGED_BIT, /* disk has been changed since last i/o */
+ FD_DISK_WRITABLE_BIT /* disk is writable */
+};
+
+#define FDSETDRVPRM _IOW(2, 0x90, struct floppy_drive_params)
+#define FDGETDRVPRM _IOR(2, 0x11, struct floppy_drive_params)
+/* set/get drive parameters */
+
+
+/*
+ * Current drive state (not directly modifyable by user, readonly)
+ */
+struct floppy_drive_struct {
+ signed char flags;
+/* values for these flags */
+#define FD_NEED_TWADDLE (1 << FD_NEED_TWADDLE_BIT)
+#define FD_VERIFY (1 << FD_VERIFY_BIT)
+#define FD_DISK_NEWCHANGE (1 << FD_DISK_NEWCHANGE_BIT)
+#define FD_DISK_CHANGED (1 << FD_DISK_CHANGED_BIT)
+#define FD_DISK_WRITABLE (1 << FD_DISK_WRITABLE_BIT)
+
+ unsigned long spinup_date;
+ unsigned long select_date;
+ unsigned long first_read_date;
+ short probed_format;
+ short track; /* current track */
+ short maxblock; /* id of highest block read */
+ short maxtrack; /* id of highest half track read */
+ int generation; /* how many diskchanges? */
+
+/*
+ * (User-provided) media information is _not_ discarded after a media change
+ * if the corresponding keep_data flag is non-zero. Positive values are
+ * decremented after each probe.
+ */
+ int keep_data;
+
+ /* Prevent "aliased" accesses. */
+ int fd_ref;
+ int fd_device;
+ int last_checked; /* when was the drive last checked for a disk
+ * change? */
+
+ char *dmabuf;
+ int bufblocks;
+};
+
+#define FDGETDRVSTAT _IOR(2, 0x12, struct floppy_drive_struct)
+#define FDPOLLDRVSTAT _IOR(2, 0x13, struct floppy_drive_struct)
+/* get drive state: GET returns the cached state, POLL polls for new state */
+
+
+/*
+ * reset FDC
+ */
+enum reset_mode {
+ FD_RESET_IF_NEEDED, /* reset only if the reset flags is set */
+ FD_RESET_IF_RAWCMD, /* obsolete */
+ FD_RESET_ALWAYS /* reset always */
+};
+#define FDRESET _IO(2, 0x54)
+
+
+/*
+ * FDC state
+ */
+struct floppy_fdc_state {
+ int spec1; /* spec1 value last used */
+ int spec2; /* spec2 value last used */
+ int dtr;
+ unsigned char version; /* FDC version code */
+ unsigned char dor;
+ int address; /* io address */
+ unsigned int rawcmd:2;
+ unsigned int reset:1;
+ unsigned int need_configure:1;
+ unsigned int perp_mode:2;
+ unsigned int has_fifo:1;
+ unsigned int driver_version; /* version code for floppy driver */
+#define FD_DRIVER_VERSION 0x100
+/* user programs using the floppy API should use floppy_fdc_state to
+ * get the version number of the floppy driver that they are running
+ * on. If this version number is bigger than the one compiled into the
+ * user program (the FD_DRIVER_VERSION define), it should be prepared
+ * to bigger structures
+ */
+
+ unsigned char track[4];
+ /* Position of the heads of the 4 units attached to this FDC,
+ * as stored on the FDC. In the future, the position as stored
+ * on the FDC might not agree with the actual physical
+ * position of these drive heads. By allowing such
+ * disagreement, it will be possible to reset the FDC without
+ * incurring the expensive cost of repositioning all heads.
+ * Right now, these positions are hard wired to 0. */
+
+};
+
+#define FDGETFDCSTAT _IOR(2, 0x15, struct floppy_fdc_state)
+
+
+/*
+ * Asynchronous Write error tracking
+ */
+struct floppy_write_errors {
+ /* Write error logging.
+ *
+ * These fields can be cleared with the FDWERRORCLR ioctl.
+ * Only writes that were attempted but failed due to a physical media
+ * error are logged. write(2) calls that fail and return an error code
+ * to the user process are not counted.
+ */
+
+ unsigned int write_errors; /* number of physical write errors
+ * encountered */
+
+ /* position of first and last write errors */
+ unsigned long first_error_sector;
+ int first_error_generation;
+ unsigned long last_error_sector;
+ int last_error_generation;
+
+ unsigned int badness; /* highest retry count for a read or write
+ * operation */
+};
+
+#define FDWERRORCLR _IO(2, 0x56)
+/* clear write error and badness information */
+#define FDWERRORGET _IOR(2, 0x17, struct floppy_write_errors)
+/* get write error and badness information */
+
+
+/*
+ * Raw commands
+ */
+/* new interface flag: now we can do them in batches */
+#define FDHAVEBATCHEDRAWCMD
+
+struct floppy_raw_cmd {
+ unsigned int flags;
+#define FD_RAW_READ 1
+#define FD_RAW_WRITE 2
+#define FD_RAW_NO_MOTOR 4
+#define FD_RAW_DISK_CHANGE 4 /* out: disk change flag was set */
+#define FD_RAW_INTR 8 /* wait for an interrupt */
+#define FD_RAW_SPIN 0x10 /* spin up the disk for this command */
+#define FD_RAW_NO_MOTOR_AFTER 0x20 /* switch the motor off after command
+ * completion */
+#define FD_RAW_NEED_DISK 0x40 /* this command needs a disk to be present */
+#define FD_RAW_NEED_SEEK 0x80 /* this command uses an implied seek (soft) */
+
+/* more "in" flags */
+#define FD_RAW_MORE 0x100 /* more records follow */
+#define FD_RAW_STOP_IF_FAILURE 0x200 /* stop if we encounter a failure */
+#define FD_RAW_STOP_IF_SUCCESS 0x400 /* stop if command successful */
+#define FD_RAW_SOFTFAILURE 0x800 /* consider the return value for failure
+ * detection too */
+
+/* more "out" flags */
+#define FD_RAW_FAILURE 0x10000 /* command sent to fdc, fdc returned error */
+#define FD_RAW_HARDFAILURE 0x20000 /* fdc had to be reset, or timed out */
+
+ void *data;
+ char *kernel_data; /* location of data buffer in the kernel */
+ struct floppy_raw_cmd *next; /* used for chaining of raw cmd's
+ * withing the kernel */
+ long length; /* in: length of dma transfer. out: remaining bytes */
+ long phys_length; /* physical length, if different from dma length */
+ int buffer_length; /* length of allocated buffer */
+
+ unsigned char rate;
+ unsigned char cmd_count;
+ unsigned char cmd[16];
+ unsigned char reply_count;
+ unsigned char reply[16];
+ int track;
+ int resultcode;
+
+ int reserved1;
+ int reserved2;
+};
+
+#define FDRAWCMD _IO(2, 0x58)
+/* send a raw command to the fdc. Structure size not included, because of
+ * batches */
+
+#define FDTWADDLE _IO(2, 0x59)
+/* flicker motor-on bit before reading a sector. Experimental */
+
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/fdreg.h b/i386/i386at/gpl/linux/include/linux/fdreg.h
new file mode 100644
index 00000000..03d8893d
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/fdreg.h
@@ -0,0 +1,127 @@
+#ifndef _LINUX_FDREG_H
+#define _LINUX_FDREG_H
+/*
+ * This file contains some defines for the floppy disk controller.
+ * Various sources. Mostly "IBM Microcomputers: A Programmers
+ * Handbook", Sanches and Canton.
+ */
+
+#ifdef FDPATCHES
+
+#define FD_IOPORT fdc_state[fdc].address
+
+/* Fd controller regs. S&C, about page 340 */
+#define FD_STATUS (4 + FD_IOPORT )
+#define FD_DATA (5 + FD_IOPORT )
+
+/* Digital Output Register */
+#define FD_DOR (2 + FD_IOPORT )
+
+/* Digital Input Register (read) */
+#define FD_DIR (7 + FD_IOPORT )
+
+/* Diskette Control Register (write)*/
+#define FD_DCR (7 + FD_IOPORT )
+
+#else
+
+#define FD_STATUS 0x3f4
+#define FD_DATA 0x3f5
+#define FD_DOR 0x3f2 /* Digital Output Register */
+#define FD_DIR 0x3f7 /* Digital Input Register (read) */
+#define FD_DCR 0x3f7 /* Diskette Control Register (write)*/
+
+#endif
+
+/* Bits of main status register */
+#define STATUS_BUSYMASK 0x0F /* drive busy mask */
+#define STATUS_BUSY 0x10 /* FDC busy */
+#define STATUS_DMA 0x20 /* 0- DMA mode */
+#define STATUS_DIR 0x40 /* 0- cpu->fdc */
+#define STATUS_READY 0x80 /* Data reg ready */
+
+/* Bits of FD_ST0 */
+#define ST0_DS 0x03 /* drive select mask */
+#define ST0_HA 0x04 /* Head (Address) */
+#define ST0_NR 0x08 /* Not Ready */
+#define ST0_ECE 0x10 /* Equipment check error */
+#define ST0_SE 0x20 /* Seek end */
+#define ST0_INTR 0xC0 /* Interrupt code mask */
+
+/* Bits of FD_ST1 */
+#define ST1_MAM 0x01 /* Missing Address Mark */
+#define ST1_WP 0x02 /* Write Protect */
+#define ST1_ND 0x04 /* No Data - unreadable */
+#define ST1_OR 0x10 /* OverRun */
+#define ST1_CRC 0x20 /* CRC error in data or addr */
+#define ST1_EOC 0x80 /* End Of Cylinder */
+
+/* Bits of FD_ST2 */
+#define ST2_MAM 0x01 /* Missing Address Mark (again) */
+#define ST2_BC 0x02 /* Bad Cylinder */
+#define ST2_SNS 0x04 /* Scan Not Satisfied */
+#define ST2_SEH 0x08 /* Scan Equal Hit */
+#define ST2_WC 0x10 /* Wrong Cylinder */
+#define ST2_CRC 0x20 /* CRC error in data field */
+#define ST2_CM 0x40 /* Control Mark = deleted */
+
+/* Bits of FD_ST3 */
+#define ST3_HA 0x04 /* Head (Address) */
+#define ST3_DS 0x08 /* drive is double-sided */
+#define ST3_TZ 0x10 /* Track Zero signal (1=track 0) */
+#define ST3_RY 0x20 /* drive is ready */
+#define ST3_WP 0x40 /* Write Protect */
+#define ST3_FT 0x80 /* Drive Fault */
+
+/* Values for FD_COMMAND */
+#define FD_RECALIBRATE 0x07 /* move to track 0 */
+#define FD_SEEK 0x0F /* seek track */
+#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */
+#define FD_WRITE 0xC5 /* write with MT, MFM */
+#define FD_SENSEI 0x08 /* Sense Interrupt Status */
+#define FD_SPECIFY 0x03 /* specify HUT etc */
+#define FD_FORMAT 0x4D /* format one track */
+#define FD_VERSION 0x10 /* get version code */
+#define FD_CONFIGURE 0x13 /* configure FIFO operation */
+#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */
+#define FD_GETSTATUS 0x04 /* read ST3 */
+#define FD_DUMPREGS 0x0E /* dump the contents of the fdc regs */
+#define FD_READID 0xEA /* prints the header of a sector */
+#define FD_UNLOCK 0x14 /* Fifo config unlock */
+#define FD_LOCK 0x94 /* Fifo config lock */
+#define FD_RSEEK_OUT 0x8f /* seek out (i.e. to lower tracks) */
+#define FD_RSEEK_IN 0xcf /* seek in (i.e. to higher tracks) */
+#define FD_PARTID 0x18 /* part id ("extended" version cmd) */
+#define FD_SAVE 0x2e /* save fdc regs for later restore */
+
+/* DMA commands */
+#define DMA_READ 0x46
+#define DMA_WRITE 0x4A
+
+/* FDC version return types */
+#define FDC_NONE 0x00
+#define FDC_UNKNOWN 0x10 /* DO NOT USE THIS TYPE EXCEPT IF IDENTIFICATION
+ FAILS EARLY */
+#define FDC_8272A 0x20 /* Intel 8272a, NEC 765 */
+#define FDC_765ED 0x30 /* Non-Intel 1MB-compatible FDC, can't detect */
+#define FDC_82072 0x40 /* Intel 82072; 8272a + FIFO + DUMPREGS */
+#define FDC_82077_ORIG 0x50 /* Original version of 82077AA, sans LOCK */
+#define FDC_82077 0x52 /* 82077AA-1 */
+#define FDC_82077_UNKN 0x53 /* Unknown 82077 variant */
+#define FDC_82078 0x60 /* 44pin 82078 or 64pin 82078SL */
+#define FDC_82078_1 0x61 /* 82078-1 (2Mbps fdc) */
+#define FDC_S82078B 0x62 /* S82078B (first seen on Adaptec AVA-2825 VLB
+ * SCSI/EIDE/Floppy controller) */
+#define FDC_87306 0x63 /* National Semiconductor PC 87306 */
+
+/*
+ * Beware: the fdc type list is roughly sorted by increasing features.
+ * Presence of features is tested by comparing the FDC version id with the
+ * "oldest" version that has the needed feature.
+ * If during FDC detection, an obscure test fails late in the sequence, don't
+ * assign FDC_UNKNOWN. Else the FDC will be treated as a dumb 8272a, or worse.
+ * This is especially true if the tests are unneeded.
+ */
+
+#define FD_RESET_DELAY 20
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/fs.h b/i386/i386at/gpl/linux/include/linux/fs.h
new file mode 100644
index 00000000..d5bc62b1
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/fs.h
@@ -0,0 +1,720 @@
+#ifndef _LINUX_FS_H
+#define _LINUX_FS_H
+
+/*
+ * This file has definitions for some important file table
+ * structures etc.
+ */
+
+#include <linux/linkage.h>
+#include <linux/limits.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <linux/vfs.h>
+#include <linux/net.h>
+#include <linux/kdev_t.h>
+#include <linux/ioctl.h>
+
+/*
+ * It's silly to have NR_OPEN bigger than NR_FILE, but I'll fix
+ * that later. Anyway, now the file code is no longer dependent
+ * on bitmaps in unsigned longs, but uses the new fd_set structure..
+ *
+ * Some programs (notably those using select()) may have to be
+ * recompiled to take full advantage of the new limits..
+ */
+
+/* Fixed constants first: */
+#undef NR_OPEN
+#define NR_OPEN 256
+
+#define NR_SUPER 64
+#define NR_IHASH 131
+#define BLOCK_SIZE 1024
+#define BLOCK_SIZE_BITS 10
+
+/* And dynamically-tunable limits and defaults: */
+extern int max_inodes, nr_inodes;
+extern int max_files, nr_files;
+#define NR_INODE 2048 /* this should be bigger than NR_FILE */
+#define NR_FILE 1024 /* this can well be larger on a larger system */
+
+#define MAY_EXEC 1
+#define MAY_WRITE 2
+#define MAY_READ 4
+
+#define FMODE_READ 1
+#define FMODE_WRITE 2
+
+#define READ 0
+#define WRITE 1
+#define READA 2 /* read-ahead - don't pause */
+#define WRITEA 3 /* "write-ahead" - silly, but somewhat useful */
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#define NIL_FILP ((struct file *)0)
+#define SEL_IN 1
+#define SEL_OUT 2
+#define SEL_EX 4
+
+/*
+ * These are the fs-independent mount-flags: up to 16 flags are supported
+ */
+#define MS_RDONLY 1 /* Mount read-only */
+#define MS_NOSUID 2 /* Ignore suid and sgid bits */
+#define MS_NODEV 4 /* Disallow access to device special files */
+#define MS_NOEXEC 8 /* Disallow program execution */
+#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
+#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
+#define S_WRITE 128 /* Write on file/directory/symlink */
+#define S_APPEND 256 /* Append-only file */
+#define S_IMMUTABLE 512 /* Immutable file */
+
+/*
+ * Flags that can be altered by MS_REMOUNT
+ */
+#define MS_RMT_MASK (MS_RDONLY)
+
+/*
+ * Magic mount flag number. Has to be or-ed to the flag values.
+ */
+#define MS_MGC_VAL 0xC0ED0000 /* magic flag number to indicate "new" flags */
+#define MS_MGC_MSK 0xffff0000 /* magic flag number mask */
+
+/*
+ * Note that read-only etc flags are inode-specific: setting some file-system
+ * flags just means all the inodes inherit those flags by default. It might be
+ * possible to override it selectively if you really wanted to with some
+ * ioctl() that is not currently implemented.
+ *
+ * Exception: MS_RDONLY is always applied to the entire file system.
+ */
+#define IS_RDONLY(inode) (((inode)->i_sb) && ((inode)->i_sb->s_flags & MS_RDONLY))
+#define IS_NOSUID(inode) ((inode)->i_flags & MS_NOSUID)
+#define IS_NODEV(inode) ((inode)->i_flags & MS_NODEV)
+#define IS_NOEXEC(inode) ((inode)->i_flags & MS_NOEXEC)
+#define IS_SYNC(inode) ((inode)->i_flags & MS_SYNCHRONOUS)
+
+#define IS_WRITABLE(inode) ((inode)->i_flags & S_WRITE)
+#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
+#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
+
+/* the read-only stuff doesn't really belong here, but any other place is
+ probably as bad and I don't want to create yet another include file. */
+
+#define BLKROSET _IO(0x12,93) /* set device read-only (0 = read-write) */
+#define BLKROGET _IO(0x12,94) /* get read-only status (0 = read_write) */
+#define BLKRRPART _IO(0x12,95) /* re-read partition table */
+#define BLKGETSIZE _IO(0x12,96) /* return device size */
+#define BLKFLSBUF _IO(0x12,97) /* flush buffer cache */
+#define BLKRASET _IO(0x12,98) /* Set read ahead for block device */
+#define BLKRAGET _IO(0x12,99) /* get current read ahead setting */
+
+#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
+#define FIBMAP _IO(0x00,1) /* bmap access */
+#define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */
+
+#ifdef __KERNEL__
+
+#include <asm/bitops.h>
+
+extern void buffer_init(void);
+extern unsigned long inode_init(unsigned long start, unsigned long end);
+extern unsigned long file_table_init(unsigned long start, unsigned long end);
+extern unsigned long name_cache_init(unsigned long start, unsigned long end);
+
+typedef char buffer_block[BLOCK_SIZE];
+
+/* bh state bits */
+#define BH_Uptodate 0 /* 1 if the buffer contains valid data */
+#define BH_Dirty 1 /* 1 if the buffer is dirty */
+#define BH_Lock 2 /* 1 if the buffer is locked */
+#define BH_Req 3 /* 0 if the buffer has been invalidated */
+#define BH_Touched 4 /* 1 if the buffer has been touched (aging) */
+#define BH_Has_aged 5 /* 1 if the buffer has been aged (aging) */
+#define BH_Protected 6 /* 1 if the buffer is protected */
+#define BH_FreeOnIO 7 /* 1 to discard the buffer_head after IO */
+
+/*
+ * Try to keep the most commonly used fields in single cache lines (16
+ * bytes) to improve performance. This ordering should be
+ * particularly beneficial on 32-bit processors.
+ *
+ * We use the first 16 bytes for the data which is used in searches
+ * over the block hash lists (ie. getblk(), find_buffer() and
+ * friends).
+ *
+ * The second 16 bytes we use for lru buffer scans, as used by
+ * sync_buffers() and refill_freelist(). -- sct
+ */
+#ifdef MACH
+struct buffer_head
+{
+ unsigned long b_blocknr;
+ kdev_t b_dev;
+ unsigned long b_state;
+ unsigned long b_size;
+ char *b_data;
+ struct wait_queue *b_wait;
+ struct buffer_head *b_reqnext;
+ void *b_page_list;
+ int b_index;
+ int b_off;
+ int b_usrcnt;
+ struct request *b_request;
+ struct semaphore *b_sem;
+};
+#else /* ! MACH */
+struct buffer_head {
+ /* First cache line: */
+ unsigned long b_blocknr; /* block number */
+ kdev_t b_dev; /* device (B_FREE = free) */
+ struct buffer_head * b_next; /* Hash queue list */
+ struct buffer_head * b_this_page; /* circular list of buffers in one page */
+
+ /* Second cache line: */
+ unsigned long b_state; /* buffer state bitmap (see above) */
+ struct buffer_head * b_next_free;
+ unsigned int b_count; /* users using this block */
+ unsigned long b_size; /* block size */
+
+ /* Non-performance-critical data follows. */
+ char * b_data; /* pointer to data block (1024 bytes) */
+ unsigned int b_list; /* List that this buffer appears */
+ unsigned long b_flushtime; /* Time when this (dirty) buffer
+ * should be written */
+ unsigned long b_lru_time; /* Time when this buffer was
+ * last used. */
+ struct wait_queue * b_wait;
+ struct buffer_head * b_prev; /* doubly linked list of hash-queue */
+ struct buffer_head * b_prev_free; /* doubly linked list of buffers */
+ struct buffer_head * b_reqnext; /* request queue */
+ char *b_usrbuf;
+ struct request *b_request;
+ struct semaphore *b_sem;
+};
+#endif /* ! MACH */
+
+static inline int buffer_uptodate(struct buffer_head * bh)
+{
+ return test_bit(BH_Uptodate, &bh->b_state);
+}
+
+static inline int buffer_dirty(struct buffer_head * bh)
+{
+ return test_bit(BH_Dirty, &bh->b_state);
+}
+
+static inline int buffer_locked(struct buffer_head * bh)
+{
+ return test_bit(BH_Lock, &bh->b_state);
+}
+
+static inline int buffer_req(struct buffer_head * bh)
+{
+ return test_bit(BH_Req, &bh->b_state);
+}
+
+static inline int buffer_touched(struct buffer_head * bh)
+{
+ return test_bit(BH_Touched, &bh->b_state);
+}
+
+static inline int buffer_has_aged(struct buffer_head * bh)
+{
+ return test_bit(BH_Has_aged, &bh->b_state);
+}
+
+static inline int buffer_protected(struct buffer_head * bh)
+{
+ return test_bit(BH_Protected, &bh->b_state);
+}
+
+#ifndef MACH
+#include <linux/pipe_fs_i.h>
+#include <linux/minix_fs_i.h>
+#include <linux/ext_fs_i.h>
+#include <linux/ext2_fs_i.h>
+#include <linux/hpfs_fs_i.h>
+#include <linux/msdos_fs_i.h>
+#include <linux/umsdos_fs_i.h>
+#include <linux/iso_fs_i.h>
+#include <linux/nfs_fs_i.h>
+#include <linux/xia_fs_i.h>
+#include <linux/sysv_fs_i.h>
+#endif
+
+/*
+ * Attribute flags. These should be or-ed together to figure out what
+ * has been changed!
+ */
+#define ATTR_MODE 1
+#define ATTR_UID 2
+#define ATTR_GID 4
+#define ATTR_SIZE 8
+#define ATTR_ATIME 16
+#define ATTR_MTIME 32
+#define ATTR_CTIME 64
+#define ATTR_ATIME_SET 128
+#define ATTR_MTIME_SET 256
+#define ATTR_FORCE 512 /* Not a change, but a change it */
+
+/*
+ * This is the Inode Attributes structure, used for notify_change(). It
+ * uses the above definitions as flags, to know which values have changed.
+ * Also, in this manner, a Filesystem can look at only the values it cares
+ * about. Basically, these are the attributes that the VFS layer can
+ * request to change from the FS layer.
+ *
+ * Derek Atkins <warlord@MIT.EDU> 94-10-20
+ */
+struct iattr {
+ unsigned int ia_valid;
+ umode_t ia_mode;
+ uid_t ia_uid;
+ gid_t ia_gid;
+ off_t ia_size;
+ time_t ia_atime;
+ time_t ia_mtime;
+ time_t ia_ctime;
+};
+
+#include <linux/quota.h>
+
+#ifdef MACH
+struct inode
+{
+ umode_t i_mode;
+ kdev_t i_rdev;
+};
+
+struct file
+{
+ mode_t f_mode;
+ loff_t f_pos;
+ unsigned short f_flags;
+ int f_resid;
+ void *f_object;
+ void *f_np;
+};
+
+struct vm_area_struct;
+struct page;
+#else /* ! MACH */
+struct inode {
+ kdev_t i_dev;
+ unsigned long i_ino;
+ umode_t i_mode;
+ nlink_t i_nlink;
+ uid_t i_uid;
+ gid_t i_gid;
+ kdev_t i_rdev;
+ off_t i_size;
+ time_t i_atime;
+ time_t i_mtime;
+ time_t i_ctime;
+ unsigned long i_blksize;
+ unsigned long i_blocks;
+ unsigned long i_version;
+ unsigned long i_nrpages;
+ struct semaphore i_sem;
+ struct inode_operations *i_op;
+ struct super_block *i_sb;
+ struct wait_queue *i_wait;
+ struct file_lock *i_flock;
+ struct vm_area_struct *i_mmap;
+ struct page *i_pages;
+ struct dquot *i_dquot[MAXQUOTAS];
+ struct inode *i_next, *i_prev;
+ struct inode *i_hash_next, *i_hash_prev;
+ struct inode *i_bound_to, *i_bound_by;
+ struct inode *i_mount;
+ unsigned short i_count;
+ unsigned short i_flags;
+ unsigned char i_lock;
+ unsigned char i_dirt;
+ unsigned char i_pipe;
+ unsigned char i_sock;
+ unsigned char i_seek;
+ unsigned char i_update;
+ unsigned short i_writecount;
+ union {
+ struct pipe_inode_info pipe_i;
+ struct minix_inode_info minix_i;
+ struct ext_inode_info ext_i;
+ struct ext2_inode_info ext2_i;
+ struct hpfs_inode_info hpfs_i;
+ struct msdos_inode_info msdos_i;
+ struct umsdos_inode_info umsdos_i;
+ struct iso_inode_info isofs_i;
+ struct nfs_inode_info nfs_i;
+ struct xiafs_inode_info xiafs_i;
+ struct sysv_inode_info sysv_i;
+ struct socket socket_i;
+ void * generic_ip;
+ } u;
+};
+
+struct file {
+ mode_t f_mode;
+ loff_t f_pos;
+ unsigned short f_flags;
+ unsigned short f_count;
+ off_t f_reada;
+ struct file *f_next, *f_prev;
+ int f_owner; /* pid or -pgrp where SIGIO should be sent */
+ struct inode * f_inode;
+ struct file_operations * f_op;
+ unsigned long f_version;
+ void *private_data; /* needed for tty driver, and maybe others */
+};
+#endif /* ! MACH */
+
+struct file_lock {
+ struct file_lock *fl_next; /* singly linked list for this inode */
+ struct file_lock *fl_nextlink; /* doubly linked list of all locks */
+ struct file_lock *fl_prevlink; /* used to simplify lock removal */
+ struct file_lock *fl_block;
+ struct task_struct *fl_owner;
+ struct wait_queue *fl_wait;
+ struct file *fl_file;
+ char fl_flags;
+ char fl_type;
+ off_t fl_start;
+ off_t fl_end;
+};
+
+struct fasync_struct {
+ int magic;
+ struct fasync_struct *fa_next; /* singly linked list */
+ struct file *fa_file;
+};
+
+#define FASYNC_MAGIC 0x4601
+
+extern int fasync_helper(struct inode *, struct file *, int, struct fasync_struct **);
+
+#ifndef MACH
+#include <linux/minix_fs_sb.h>
+#include <linux/ext_fs_sb.h>
+#include <linux/ext2_fs_sb.h>
+#include <linux/hpfs_fs_sb.h>
+#include <linux/msdos_fs_sb.h>
+#include <linux/iso_fs_sb.h>
+#include <linux/nfs_fs_sb.h>
+#include <linux/xia_fs_sb.h>
+#include <linux/sysv_fs_sb.h>
+
+struct super_block {
+ kdev_t s_dev;
+ unsigned long s_blocksize;
+ unsigned char s_blocksize_bits;
+ unsigned char s_lock;
+ unsigned char s_rd_only;
+ unsigned char s_dirt;
+ struct file_system_type *s_type;
+ struct super_operations *s_op;
+ struct dquot_operations *dq_op;
+ unsigned long s_flags;
+ unsigned long s_magic;
+ unsigned long s_time;
+ struct inode * s_covered;
+ struct inode * s_mounted;
+ struct wait_queue * s_wait;
+ union {
+ struct minix_sb_info minix_sb;
+ struct ext_sb_info ext_sb;
+ struct ext2_sb_info ext2_sb;
+ struct hpfs_sb_info hpfs_sb;
+ struct msdos_sb_info msdos_sb;
+ struct isofs_sb_info isofs_sb;
+ struct nfs_sb_info nfs_sb;
+ struct xiafs_sb_info xiafs_sb;
+ struct sysv_sb_info sysv_sb;
+ void *generic_sbp;
+ } u;
+};
+#endif /* ! MACH */
+
+/*
+ * This is the "filldir" function type, used by readdir() to let
+ * the kernel specify what kind of dirent layout it wants to have.
+ * This allows the kernel to read directories into kernel space or
+ * to have different dirent layouts depending on the binary type.
+ */
+typedef int (*filldir_t)(void *, const char *, int, off_t, ino_t);
+
+struct file_operations {
+ int (*lseek) (struct inode *, struct file *, off_t, int);
+ int (*read) (struct inode *, struct file *, char *, int);
+ int (*write) (struct inode *, struct file *, const char *, int);
+ int (*readdir) (struct inode *, struct file *, void *, filldir_t);
+ int (*select) (struct inode *, struct file *, int, select_table *);
+ int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long);
+ int (*mmap) (struct inode *, struct file *, struct vm_area_struct *);
+ int (*open) (struct inode *, struct file *);
+ void (*release) (struct inode *, struct file *);
+ int (*fsync) (struct inode *, struct file *);
+ int (*fasync) (struct inode *, struct file *, int);
+ int (*check_media_change) (kdev_t dev);
+ int (*revalidate) (kdev_t dev);
+};
+
+struct inode_operations {
+ struct file_operations * default_file_ops;
+ int (*create) (struct inode *,const char *,int,int,struct inode **);
+ int (*lookup) (struct inode *,const char *,int,struct inode **);
+ int (*link) (struct inode *,struct inode *,const char *,int);
+ int (*unlink) (struct inode *,const char *,int);
+ int (*symlink) (struct inode *,const char *,int,const char *);
+ int (*mkdir) (struct inode *,const char *,int,int);
+ int (*rmdir) (struct inode *,const char *,int);
+ int (*mknod) (struct inode *,const char *,int,int,int);
+ int (*rename) (struct inode *,const char *,int,struct inode *,const char *,int);
+ int (*readlink) (struct inode *,char *,int);
+ int (*follow_link) (struct inode *,struct inode *,int,int,struct inode **);
+ int (*readpage) (struct inode *, struct page *);
+ int (*writepage) (struct inode *, struct page *);
+ int (*bmap) (struct inode *,int);
+ void (*truncate) (struct inode *);
+ int (*permission) (struct inode *, int);
+ int (*smap) (struct inode *,int);
+};
+
+struct super_operations {
+ void (*read_inode) (struct inode *);
+ int (*notify_change) (struct inode *, struct iattr *);
+ void (*write_inode) (struct inode *);
+ void (*put_inode) (struct inode *);
+ void (*put_super) (struct super_block *);
+ void (*write_super) (struct super_block *);
+ void (*statfs) (struct super_block *, struct statfs *, int);
+ int (*remount_fs) (struct super_block *, int *, char *);
+};
+
+struct dquot_operations {
+ void (*initialize) (struct inode *, short);
+ void (*drop) (struct inode *);
+ int (*alloc_block) (const struct inode *, unsigned long);
+ int (*alloc_inode) (const struct inode *, unsigned long);
+ void (*free_block) (const struct inode *, unsigned long);
+ void (*free_inode) (const struct inode *, unsigned long);
+ int (*transfer) (struct inode *, struct iattr *, char);
+};
+
+struct file_system_type {
+ struct super_block *(*read_super) (struct super_block *, void *, int);
+ const char *name;
+ int requires_dev;
+ struct file_system_type * next;
+};
+
+extern int register_filesystem(struct file_system_type *);
+extern int unregister_filesystem(struct file_system_type *);
+
+asmlinkage int sys_open(const char *, int, int);
+asmlinkage int sys_close(unsigned int); /* yes, it's really unsigned */
+
+extern void kill_fasync(struct fasync_struct *fa, int sig);
+
+extern int getname(const char * filename, char **result);
+extern void putname(char * name);
+extern int do_truncate(struct inode *, unsigned long);
+extern int register_blkdev(unsigned int, const char *, struct file_operations *);
+extern int unregister_blkdev(unsigned int major, const char * name);
+extern int blkdev_open(struct inode * inode, struct file * filp);
+extern struct file_operations def_blk_fops;
+extern struct inode_operations blkdev_inode_operations;
+
+extern int register_chrdev(unsigned int, const char *, struct file_operations *);
+extern int unregister_chrdev(unsigned int major, const char * name);
+extern int chrdev_open(struct inode * inode, struct file * filp);
+extern struct file_operations def_chr_fops;
+extern struct inode_operations chrdev_inode_operations;
+
+extern void init_fifo(struct inode * inode);
+
+extern struct file_operations connecting_fifo_fops;
+extern struct file_operations read_fifo_fops;
+extern struct file_operations write_fifo_fops;
+extern struct file_operations rdwr_fifo_fops;
+extern struct file_operations read_pipe_fops;
+extern struct file_operations write_pipe_fops;
+extern struct file_operations rdwr_pipe_fops;
+
+extern struct file_system_type *get_fs_type(const char *name);
+
+extern int fs_may_mount(kdev_t dev);
+extern int fs_may_umount(kdev_t dev, struct inode * mount_root);
+extern int fs_may_remount_ro(kdev_t dev);
+
+extern struct file *first_file;
+extern struct super_block super_blocks[NR_SUPER];
+
+extern void refile_buffer(struct buffer_head * buf);
+extern void set_writetime(struct buffer_head * buf, int flag);
+extern void refill_freelist(int size);
+extern int try_to_free_buffer(struct buffer_head*, struct buffer_head**, int);
+
+extern struct buffer_head ** buffer_pages;
+extern int nr_buffers;
+extern int buffermem;
+extern int nr_buffer_heads;
+
+#define BUF_CLEAN 0
+#define BUF_UNSHARED 1 /* Buffers that were shared but are not any more */
+#define BUF_LOCKED 2 /* Buffers scheduled for write */
+#define BUF_LOCKED1 3 /* Supers, inodes */
+#define BUF_DIRTY 4 /* Dirty buffers, not yet scheduled for write */
+#define BUF_SHARED 5 /* Buffers shared */
+#define NR_LIST 6
+
+#ifdef MACH
+extern inline void
+mark_buffer_uptodate (struct buffer_head *bh, int on)
+{
+ if (on)
+ set_bit (BH_Uptodate, &bh->b_state);
+ else
+ clear_bit (BH_Uptodate, &bh->b_state);
+}
+#else
+void mark_buffer_uptodate(struct buffer_head * bh, int on);
+#endif
+
+extern inline void mark_buffer_clean(struct buffer_head * bh)
+{
+#ifdef MACH
+ clear_bit(BH_Dirty, &bh->b_state);
+#else
+ if (clear_bit(BH_Dirty, &bh->b_state)) {
+ if (bh->b_list == BUF_DIRTY)
+ refile_buffer(bh);
+ }
+#endif
+}
+
+extern inline void mark_buffer_dirty(struct buffer_head * bh, int flag)
+{
+#ifdef MACH
+ set_bit(BH_Dirty, &bh->b_state);
+#else
+ if (!set_bit(BH_Dirty, &bh->b_state)) {
+ set_writetime(bh, flag);
+ if (bh->b_list != BUF_DIRTY)
+ refile_buffer(bh);
+ }
+#endif
+}
+
+extern int check_disk_change(kdev_t dev);
+#ifdef MACH
+#define invalidate_inodes(dev)
+#else
+extern void invalidate_inodes(kdev_t dev);
+#endif
+extern void invalidate_inode_pages(struct inode *, unsigned long);
+#ifdef MACH
+#define invalidate_buffers(dev)
+#else
+extern void invalidate_buffers(kdev_t dev);
+#endif
+extern int floppy_is_wp(int minor);
+extern void sync_inodes(kdev_t dev);
+#ifdef MACH
+#define sync_dev(dev)
+#define fsync_dev(dev)
+#else
+extern void sync_dev(kdev_t dev);
+extern int fsync_dev(kdev_t dev);
+#endif
+extern void sync_supers(kdev_t dev);
+extern int bmap(struct inode * inode,int block);
+extern int notify_change(struct inode *, struct iattr *);
+extern int namei(const char * pathname, struct inode ** res_inode);
+extern int lnamei(const char * pathname, struct inode ** res_inode);
+#ifdef MACH
+#define permission(i, m) 0
+#else
+extern int permission(struct inode * inode,int mask);
+#endif
+extern int get_write_access(struct inode *inode);
+extern void put_write_access(struct inode *inode);
+extern int open_namei(const char * pathname, int flag, int mode,
+ struct inode ** res_inode, struct inode * base);
+extern int do_mknod(const char * filename, int mode, dev_t dev);
+extern int do_pipe(int *);
+extern void iput(struct inode * inode);
+extern struct inode * __iget(struct super_block * sb,int nr,int crsmnt);
+extern struct inode * get_empty_inode(void);
+extern void insert_inode_hash(struct inode *);
+extern void clear_inode(struct inode *);
+extern struct inode * get_pipe_inode(void);
+extern struct file * get_empty_filp(void);
+extern int close_fp(struct file *filp);
+extern struct buffer_head * get_hash_table(kdev_t dev, int block, int size);
+extern struct buffer_head * getblk(kdev_t dev, int block, int size);
+extern void ll_rw_block(int rw, int nr, struct buffer_head * bh[]);
+extern void ll_rw_page(int rw, kdev_t dev, unsigned long nr, char * buffer);
+extern void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buffer);
+extern int is_read_only(kdev_t dev);
+extern void __brelse(struct buffer_head *buf);
+extern inline void brelse(struct buffer_head *buf)
+{
+ if (buf)
+ __brelse(buf);
+}
+extern void __bforget(struct buffer_head *buf);
+extern inline void bforget(struct buffer_head *buf)
+{
+ if (buf)
+ __bforget(buf);
+}
+extern void set_blocksize(kdev_t dev, int size);
+extern struct buffer_head * bread(kdev_t dev, int block, int size);
+extern struct buffer_head * breada(kdev_t dev,int block, int size,
+ unsigned int pos, unsigned int filesize);
+
+extern int generic_readpage(struct inode *, struct page *);
+extern int generic_file_read(struct inode *, struct file *, char *, int);
+extern int generic_mmap(struct inode *, struct file *, struct vm_area_struct *);
+extern int brw_page(int, unsigned long, kdev_t, int [], int, int);
+
+extern void put_super(kdev_t dev);
+unsigned long generate_cluster(kdev_t dev, int b[], int size);
+extern kdev_t ROOT_DEV;
+
+extern void show_buffers(void);
+extern void mount_root(void);
+
+extern int char_read(struct inode *, struct file *, char *, int);
+extern int block_read(struct inode *, struct file *, char *, int);
+extern int read_ahead[];
+
+extern int char_write(struct inode *, struct file *, const char *, int);
+extern int block_write(struct inode *, struct file *, const char *, int);
+
+extern int block_fsync(struct inode *, struct file *);
+extern int file_fsync(struct inode *, struct file *);
+
+extern void dcache_add(struct inode *, const char *, int, unsigned long);
+extern int dcache_lookup(struct inode *, const char *, int, unsigned long *);
+
+extern int inode_change_ok(struct inode *, struct iattr *);
+extern void inode_setattr(struct inode *, struct iattr *);
+
+extern inline struct inode * iget(struct super_block * sb,int nr)
+{
+ return __iget(sb, nr, 1);
+}
+
+/* kludge to get SCSI modules working */
+#include <linux/minix_fs.h>
+#include <linux/minix_fs_sb.h>
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/genhd.h b/i386/i386at/gpl/linux/include/linux/genhd.h
new file mode 100644
index 00000000..e1c5888d
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/genhd.h
@@ -0,0 +1,73 @@
+#ifndef _LINUX_GENHD_H
+#define _LINUX_GENHD_H
+
+/*
+ * genhd.h Copyright (C) 1992 Drew Eckhardt
+ * Generic hard disk header file by
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ */
+
+#define CONFIG_MSDOS_PARTITION 1
+
+#ifdef __alpha__
+#define CONFIG_OSF_PARTITION 1
+#endif
+
+#ifdef __sparc__
+#define CONFIG_SUN_PARTITION 1
+#endif
+
+/* These two have identical behaviour; use the second one if DOS fdisk gets
+ confused about extended/logical partitions starting past cylinder 1023. */
+#define DOS_EXTENDED_PARTITION 5
+#define LINUX_EXTENDED_PARTITION 0x85
+
+#define DM6_PARTITION 0x54 /* has DDO: use xlated geom & offset */
+#define EZD_PARTITION 0x55 /* EZ-DRIVE: same as DM6 (we think) */
+#define DM6_AUX1PARTITION 0x51 /* no DDO: use xlated geom */
+#define DM6_AUX3PARTITION 0x53 /* no DDO: use xlated geom */
+
+#ifdef MACH_INCLUDE
+struct linux_partition {
+#else
+struct partition {
+#endif
+ unsigned char boot_ind; /* 0x80 - active */
+ unsigned char head; /* starting head */
+ unsigned char sector; /* starting sector */
+ unsigned char cyl; /* starting cylinder */
+ unsigned char sys_ind; /* What partition type */
+ unsigned char end_head; /* end head */
+ unsigned char end_sector; /* end sector */
+ unsigned char end_cyl; /* end cylinder */
+ unsigned int start_sect; /* starting sector counting from 0 */
+ unsigned int nr_sects; /* nr of sectors in partition */
+};
+
+struct hd_struct {
+ long start_sect;
+ long nr_sects;
+};
+
+struct gendisk {
+ int major; /* major number of driver */
+ const char *major_name; /* name of major driver */
+ int minor_shift; /* number of times minor is shifted to
+ get real minor */
+ int max_p; /* maximum partitions per device */
+ int max_nr; /* maximum number of real devices */
+
+ void (*init)(struct gendisk *); /* Initialization called before we do our thing */
+ struct hd_struct *part; /* partition table */
+ int *sizes; /* device size in blocks, copied to blk_size[] */
+ int nr_real; /* number of real devices */
+
+ void *real_devices; /* internal use */
+ struct gendisk *next;
+};
+
+extern struct gendisk *gendisk_head; /* linked list of disks */
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/hdreg.h b/i386/i386at/gpl/linux/include/linux/hdreg.h
new file mode 100644
index 00000000..58a9a5df
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/hdreg.h
@@ -0,0 +1,171 @@
+#ifndef _LINUX_HDREG_H
+#define _LINUX_HDREG_H
+
+/*
+ * This file contains some defines for the AT-hd-controller.
+ * Various sources.
+ */
+
+#define HD_IRQ 14 /* the standard disk interrupt */
+
+/* ide.c has it's own port definitions in "ide.h" */
+
+/* Hd controller regs. Ref: IBM AT Bios-listing */
+#define HD_DATA 0x1f0 /* _CTL when writing */
+#define HD_ERROR 0x1f1 /* see err-bits */
+#define HD_NSECTOR 0x1f2 /* nr of sectors to read/write */
+#define HD_SECTOR 0x1f3 /* starting sector */
+#define HD_LCYL 0x1f4 /* starting cylinder */
+#define HD_HCYL 0x1f5 /* high byte of starting cyl */
+#define HD_CURRENT 0x1f6 /* 101dhhhh , d=drive, hhhh=head */
+#define HD_STATUS 0x1f7 /* see status-bits */
+#define HD_FEATURE HD_ERROR /* same io address, read=error, write=feature */
+#define HD_PRECOMP HD_FEATURE /* obsolete use of this port - predates IDE */
+#define HD_COMMAND HD_STATUS /* same io address, read=status, write=cmd */
+
+#define HD_CMD 0x3f6 /* used for resets */
+#define HD_ALTSTATUS 0x3f6 /* same as HD_STATUS but doesn't clear irq */
+
+/* remainder is shared between hd.c, ide.c, ide-cd.c, and the hdparm utility */
+
+/* Bits of HD_STATUS */
+#define ERR_STAT 0x01
+#define INDEX_STAT 0x02
+#define ECC_STAT 0x04 /* Corrected error */
+#define DRQ_STAT 0x08
+#define SEEK_STAT 0x10
+#define WRERR_STAT 0x20
+#define READY_STAT 0x40
+#define BUSY_STAT 0x80
+
+/* Values for HD_COMMAND */
+#define WIN_RESTORE 0x10
+#define WIN_READ 0x20
+#define WIN_WRITE 0x30
+#define WIN_VERIFY 0x40
+#define WIN_FORMAT 0x50
+#define WIN_INIT 0x60
+#define WIN_SEEK 0x70
+#define WIN_DIAGNOSE 0x90
+#define WIN_SPECIFY 0x91 /* set drive geometry translation */
+#define WIN_SETIDLE1 0xE3
+#define WIN_SETIDLE2 0x97
+
+#define WIN_DOORLOCK 0xde /* lock door on removeable drives */
+#define WIN_DOORUNLOCK 0xdf /* unlock door on removeable drives */
+
+#define WIN_MULTREAD 0xC4 /* read sectors using multiple mode */
+#define WIN_MULTWRITE 0xC5 /* write sectors using multiple mode */
+#define WIN_SETMULT 0xC6 /* enable/disable multiple mode */
+#define WIN_IDENTIFY 0xEC /* ask drive to identify itself */
+#define WIN_SETFEATURES 0xEF /* set special drive features */
+#define WIN_READDMA 0xc8 /* read sectors using DMA transfers */
+#define WIN_WRITEDMA 0xca /* write sectors using DMA transfers */
+
+/* Additional drive command codes used by ATAPI devices. */
+#define WIN_PIDENTIFY 0xA1 /* identify ATAPI device */
+#define WIN_SRST 0x08 /* ATAPI soft reset command */
+#define WIN_PACKETCMD 0xa0 /* Send a packet command. */
+
+/* Bits for HD_ERROR */
+#define MARK_ERR 0x01 /* Bad address mark */
+#define TRK0_ERR 0x02 /* couldn't find track 0 */
+#define ABRT_ERR 0x04 /* Command aborted */
+#define ID_ERR 0x10 /* ID field not found */
+#define ECC_ERR 0x40 /* Uncorrectable ECC error */
+#define BBD_ERR 0x80 /* block marked bad */
+
+struct hd_geometry {
+ unsigned char heads;
+ unsigned char sectors;
+ unsigned short cylinders;
+ unsigned long start;
+};
+
+/* hd/ide ctl's that pass (arg) ptrs to user space are numbered 0x030n/0x031n */
+#define HDIO_GETGEO 0x0301 /* get device geometry */
+#define HDIO_GET_UNMASKINTR 0x0302 /* get current unmask setting */
+#define HDIO_GET_MULTCOUNT 0x0304 /* get current IDE blockmode setting */
+#define HDIO_GET_IDENTITY 0x0307 /* get IDE identification info */
+#define HDIO_GET_KEEPSETTINGS 0x0308 /* get keep-settings-on-reset flag */
+#define HDIO_GET_32BIT 0x0309 /* get current io_32bit setting */
+#define HDIO_GET_NOWERR 0x030a /* get ignore-write-error flag */
+#define HDIO_GET_DMA 0x030b /* get use-dma flag */
+#define HDIO_DRIVE_CMD 0x031f /* execute a special drive command */
+
+/* hd/ide ctl's that pass (arg) non-ptr values are numbered 0x032n/0x033n */
+#define HDIO_SET_MULTCOUNT 0x0321 /* change IDE blockmode */
+#define HDIO_SET_UNMASKINTR 0x0322 /* permit other irqs during I/O */
+#define HDIO_SET_KEEPSETTINGS 0x0323 /* keep ioctl settings on reset */
+#define HDIO_SET_32BIT 0x0324 /* change io_32bit flags */
+#define HDIO_SET_NOWERR 0x0325 /* change ignore-write-error flag */
+#define HDIO_SET_DMA 0x0326 /* change use-dma flag */
+#define HDIO_SET_PIO_MODE 0x0327 /* reconfig interface to new speed */
+
+/* structure returned by HDIO_GET_IDENTITY, as per ANSI ATA2 rev.2f spec */
+struct hd_driveid {
+ unsigned short config; /* lots of obsolete bit flags */
+ unsigned short cyls; /* "physical" cyls */
+ unsigned short reserved2; /* reserved (word 2) */
+ unsigned short heads; /* "physical" heads */
+ unsigned short track_bytes; /* unformatted bytes per track */
+ unsigned short sector_bytes; /* unformatted bytes per sector */
+ unsigned short sectors; /* "physical" sectors per track */
+ unsigned short vendor0; /* vendor unique */
+ unsigned short vendor1; /* vendor unique */
+ unsigned short vendor2; /* vendor unique */
+ unsigned char serial_no[20]; /* 0 = not_specified */
+ unsigned short buf_type;
+ unsigned short buf_size; /* 512 byte increments; 0 = not_specified */
+ unsigned short ecc_bytes; /* for r/w long cmds; 0 = not_specified */
+ unsigned char fw_rev[8]; /* 0 = not_specified */
+ unsigned char model[40]; /* 0 = not_specified */
+ unsigned char max_multsect; /* 0=not_implemented */
+ unsigned char vendor3; /* vendor unique */
+ unsigned short dword_io; /* 0=not_implemented; 1=implemented */
+ unsigned char vendor4; /* vendor unique */
+ unsigned char capability; /* bits 0:DMA 1:LBA 2:IORDYsw 3:IORDYsup*/
+ unsigned short reserved50; /* reserved (word 50) */
+ unsigned char vendor5; /* vendor unique */
+ unsigned char tPIO; /* 0=slow, 1=medium, 2=fast */
+ unsigned char vendor6; /* vendor unique */
+ unsigned char tDMA; /* 0=slow, 1=medium, 2=fast */
+ unsigned short field_valid; /* bits 0:cur_ok 1:eide_ok */
+ unsigned short cur_cyls; /* logical cylinders */
+ unsigned short cur_heads; /* logical heads */
+ unsigned short cur_sectors; /* logical sectors per track */
+ unsigned short cur_capacity0; /* logical total sectors on drive */
+ unsigned short cur_capacity1; /* (2 words, misaligned int) */
+ unsigned char multsect; /* current multiple sector count */
+ unsigned char multsect_valid; /* when (bit0==1) multsect is ok */
+ unsigned int lba_capacity; /* total number of sectors */
+ unsigned short dma_1word; /* single-word dma info */
+ unsigned short dma_mword; /* multiple-word dma info */
+ unsigned short eide_pio_modes; /* bits 0:mode3 1:mode4 */
+ unsigned short eide_dma_min; /* min mword dma cycle time (ns) */
+ unsigned short eide_dma_time; /* recommended mword dma cycle time (ns) */
+ unsigned short eide_pio; /* min cycle time (ns), no IORDY */
+ unsigned short eide_pio_iordy; /* min cycle time (ns), with IORDY */
+ unsigned short reserved69; /* reserved (word 69) */
+ unsigned short reserved70; /* reserved (word 70) */
+ /* unsigned short reservedxx[57];*/ /* reserved (words 71-127) */
+ /* unsigned short vendor7 [32];*/ /* vendor unique (words 128-159) */
+ /* unsigned short reservedyy[96];*/ /* reserved (words 160-255) */
+};
+
+#ifdef __KERNEL__
+/*
+ * These routines are used for kernel command line parameters from main.c:
+ */
+#include <linux/config.h>
+
+#ifdef CONFIG_BLK_DEV_HD
+void hd_setup(char *, int *);
+#endif /* CONFIG_BLK_DEV_HD */
+#ifdef CONFIG_BLK_DEV_IDE
+void ide_setup(char *);
+#endif /* CONFIG_BLK_DEV_IDE */
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_HDREG_H */
diff --git a/i386/i386at/gpl/linux/include/linux/head.h b/i386/i386at/gpl/linux/include/linux/head.h
new file mode 100644
index 00000000..3829b1c3
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/head.h
@@ -0,0 +1,19 @@
+#ifndef _LINUX_HEAD_H
+#define _LINUX_HEAD_H
+
+typedef struct desc_struct {
+ unsigned long a,b;
+} desc_table[256];
+
+extern desc_table idt,gdt;
+
+#define GDT_NUL 0
+#define GDT_CODE 1
+#define GDT_DATA 2
+#define GDT_TMP 3
+
+#define LDT_NUL 0
+#define LDT_CODE 1
+#define LDT_DATA 2
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/if.h b/i386/i386at/gpl/linux/include/linux/if.h
new file mode 100644
index 00000000..73ef7feb
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/if.h
@@ -0,0 +1,167 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the INET interface module.
+ *
+ * Version: @(#)if.h 1.0.2 04/18/93
+ *
+ * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1982-1988
+ * Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_H
+#define _LINUX_IF_H
+
+#include <linux/types.h> /* for "caddr_t" et al */
+#include <linux/socket.h> /* for "struct sockaddr" et al */
+
+/* Standard interface flags. */
+#define LINUX_IFF_UP 0x1 /* interface is up */
+#define LINUX_IFF_BROADCAST 0x2 /* broadcast address valid */
+#define LINUX_IFF_DEBUG 0x4 /* turn on debugging */
+#define LINUX_IFF_LOOPBACK 0x8 /* is a loopback net */
+#define LINUX_IFF_POINTOPOINT 0x10 /* interface is has p-p link */
+#define LINUX_IFF_NOTRAILERS 0x20 /* avoid use of trailers */
+#define LINUX_IFF_RUNNING 0x40 /* resources allocated */
+#define LINUX_IFF_NOARP 0x80 /* no ARP protocol */
+#define LINUX_IFF_PROMISC 0x100 /* receive all packets */
+/* Not supported */
+#define LINUX_IFF_ALLMULTI 0x200 /* receive all multicast packets*/
+
+#define LINUX_IFF_MASTER 0x400 /* master of a load balancer */
+#define LINUX_IFF_SLAVE 0x800 /* slave of a load balancer */
+
+#define LINUX_IFF_MULTICAST 0x1000 /* Supports multicast */
+
+#ifdef MACH
+#ifndef MACH_INCLUDE
+#define IFF_UP LINUX_IFF_UP
+#define IFF_BROADCAST LINUX_IFF_BROADCAST
+#define IFF_DEBUG LINUX_IFF_DEBUG
+#define IFF_LOOPBACK LINUX_IFF_LOOPBACK
+#define IFF_POINTOPOINT LINUX_IFF_POINTOPOINT
+#define IFF_NOTRAILERS LINUX_IFF_NOTRAILERS
+#define IFF_RUNNING LINUX_IFF_RUNNING
+#define IFF_NOARP LINUX_IFF_NOARP
+#define IFF_PROMISC LINUX_IFF_PROMISC
+#define IFF_ALLMULTI LINUX_IFF_ALLMULTI
+#define IFF_MASTER LINUX_IFF_MASTER
+#define IFF_SLAVE LINUX_IFF_SLAVE
+#define IFF_MULTICAST LINUX_IFF_MULTICAST
+#endif
+#endif
+
+/*
+ * The ifaddr structure contains information about one address
+ * of an interface. They are maintained by the different address
+ * families, are allocated and attached when an address is set,
+ * and are linked together so all addresses for an interface can
+ * be located.
+ */
+
+struct ifaddr
+{
+ struct sockaddr ifa_addr; /* address of interface */
+ union {
+ struct sockaddr ifu_broadaddr;
+ struct sockaddr ifu_dstaddr;
+ } ifa_ifu;
+ struct iface *ifa_ifp; /* back-pointer to interface */
+ struct ifaddr *ifa_next; /* next address for interface */
+};
+
+#define ifa_broadaddr ifa_ifu.ifu_broadaddr /* broadcast address */
+#define ifa_dstaddr ifa_ifu.ifu_dstaddr /* other end of link */
+
+/*
+ * Device mapping structure. I'd just gone off and designed a
+ * beautiful scheme using only loadable modules with arguments
+ * for driver options and along come the PCMCIA people 8)
+ *
+ * Ah well. The get() side of this is good for WDSETUP, and it'll
+ * be handy for debugging things. The set side is fine for now and
+ * being very small might be worth keeping for clean configuration.
+ */
+
+struct ifmap
+{
+ unsigned long mem_start;
+ unsigned long mem_end;
+ unsigned short base_addr;
+ unsigned char irq;
+ unsigned char dma;
+ unsigned char port;
+ /* 3 bytes spare */
+};
+
+/*
+ * Interface request structure used for socket
+ * ioctl's. All interface ioctl's must have parameter
+ * definitions which begin with ifr_name. The
+ * remainder may be interface specific.
+ */
+
+struct ifreq
+{
+#define IFHWADDRLEN 6
+#define IFNAMSIZ 16
+ union
+ {
+ char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ } ifr_ifrn;
+
+ union {
+ struct sockaddr ifru_addr;
+ struct sockaddr ifru_dstaddr;
+ struct sockaddr ifru_broadaddr;
+ struct sockaddr ifru_netmask;
+ struct sockaddr ifru_hwaddr;
+ short ifru_flags;
+ int ifru_metric;
+ int ifru_mtu;
+ struct ifmap ifru_map;
+ char ifru_slave[IFNAMSIZ]; /* Just fits the size */
+ caddr_t ifru_data;
+ } ifr_ifru;
+};
+
+#define ifr_name ifr_ifrn.ifrn_name /* interface name */
+#define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */
+#define ifr_addr ifr_ifru.ifru_addr /* address */
+#define ifr_dstaddr ifr_ifru.ifru_dstaddr /* other end of p-p lnk */
+#define ifr_broadaddr ifr_ifru.ifru_broadaddr /* broadcast address */
+#define ifr_netmask ifr_ifru.ifru_netmask /* interface net mask */
+#define ifr_flags ifr_ifru.ifru_flags /* flags */
+#define ifr_metric ifr_ifru.ifru_metric /* metric */
+#define ifr_mtu ifr_ifru.ifru_mtu /* mtu */
+#define ifr_map ifr_ifru.ifru_map /* device map */
+#define ifr_slave ifr_ifru.ifru_slave /* slave device */
+#define ifr_data ifr_ifru.ifru_data /* for use by interface */
+
+/*
+ * Structure used in SIOCGIFCONF request.
+ * Used to retrieve interface configuration
+ * for machine (useful for programs which
+ * must know all networks accessible).
+ */
+
+struct ifconf
+{
+ int ifc_len; /* size of buffer */
+ union
+ {
+ caddr_t ifcu_buf;
+ struct ifreq *ifcu_req;
+ } ifc_ifcu;
+};
+#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */
+#define ifc_req ifc_ifcu.ifcu_req /* array of structures */
+
+#endif /* _LINUX_IF_H */
diff --git a/i386/i386at/gpl/linux/include/linux/if_arp.h b/i386/i386at/gpl/linux/include/linux/if_arp.h
new file mode 100644
index 00000000..fa350688
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/if_arp.h
@@ -0,0 +1,103 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the ARP (RFC 826) protocol.
+ *
+ * Version: @(#)if_arp.h 1.0.1 04/16/93
+ *
+ * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1986-1988
+ * Portions taken from the KA9Q/NOS (v2.00m PA0GRI) source.
+ * Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Florian La Roche.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_ARP_H
+#define _LINUX_IF_ARP_H
+
+/* ARP protocol HARDWARE identifiers. */
+#define ARPHRD_NETROM 0 /* from KA9Q: NET/ROM pseudo */
+#define ARPHRD_ETHER 1 /* Ethernet 10Mbps */
+#define ARPHRD_EETHER 2 /* Experimental Ethernet */
+#define ARPHRD_AX25 3 /* AX.25 Level 2 */
+#define ARPHRD_PRONET 4 /* PROnet token ring */
+#define ARPHRD_CHAOS 5 /* Chaosnet */
+#define ARPHRD_IEEE802 6 /* IEEE 802.2 Ethernet/TR/TB */
+#define ARPHRD_ARCNET 7 /* ARCnet */
+#define ARPHRD_APPLETLK 8 /* APPLEtalk */
+/* Dummy types for non ARP hardware */
+#define ARPHRD_SLIP 256
+#define ARPHRD_CSLIP 257
+#define ARPHRD_SLIP6 258
+#define ARPHRD_CSLIP6 259
+#define ARPHRD_RSRVD 260 /* Notional KISS type */
+#define ARPHRD_ADAPT 264
+#define ARPHRD_PPP 512
+#define ARPHRD_TUNNEL 768 /* IPIP tunnel */
+#define ARPHRD_TUNNEL6 769 /* IPIP6 tunnel */
+#define ARPHRD_FRAD 770 /* Frame Relay */
+#define ARPHRD_SKIP 771 /* SKIP vif */
+#define ARPHRD_LOOPBACK 772 /* Loopback device */
+
+/* ARP protocol opcodes. */
+#define ARPOP_REQUEST 1 /* ARP request */
+#define ARPOP_REPLY 2 /* ARP reply */
+#define ARPOP_RREQUEST 3 /* RARP request */
+#define ARPOP_RREPLY 4 /* RARP reply */
+
+
+/* ARP ioctl request. */
+struct arpreq {
+ struct sockaddr arp_pa; /* protocol address */
+ struct sockaddr arp_ha; /* hardware address */
+ int arp_flags; /* flags */
+ struct sockaddr arp_netmask; /* netmask (only for proxy arps) */
+ char arp_dev[16];
+};
+
+struct arpreq_old {
+ struct sockaddr arp_pa; /* protocol address */
+ struct sockaddr arp_ha; /* hardware address */
+ int arp_flags; /* flags */
+ struct sockaddr arp_netmask; /* netmask (only for proxy arps) */
+};
+
+/* ARP Flag values. */
+#define ATF_COM 0x02 /* completed entry (ha valid) */
+#define ATF_PERM 0x04 /* permanent entry */
+#define ATF_PUBL 0x08 /* publish entry */
+#define ATF_USETRAILERS 0x10 /* has requested trailers */
+#define ATF_NETMASK 0x20 /* want to use a netmask (only
+ for proxy entries) */
+
+/*
+ * This structure defines an ethernet arp header.
+ */
+
+struct arphdr
+{
+ unsigned short ar_hrd; /* format of hardware address */
+ unsigned short ar_pro; /* format of protocol address */
+ unsigned char ar_hln; /* length of hardware address */
+ unsigned char ar_pln; /* length of protocol address */
+ unsigned short ar_op; /* ARP opcode (command) */
+
+#if 0
+ /*
+ * Ethernet looks like this : This bit is variable sized however...
+ */
+ unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
+ unsigned char ar_sip[4]; /* sender IP address */
+ unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
+ unsigned char ar_tip[4]; /* target IP address */
+#endif
+
+};
+
+#endif /* _LINUX_IF_ARP_H */
diff --git a/i386/i386at/gpl/linux/include/linux/if_ether.h b/i386/i386at/gpl/linux/include/linux/if_ether.h
new file mode 100644
index 00000000..14078472
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/if_ether.h
@@ -0,0 +1,96 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the Ethernet IEEE 802.3 interface.
+ *
+ * Version: @(#)if_ether.h 1.0.1a 02/08/94
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald Becker, <becker@super.org>
+ * Alan Cox, <alan@cymru.net>
+ * Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_ETHER_H
+#define _LINUX_IF_ETHER_H
+
+/* IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
+ and FCS/CRC (frame check sequence). */
+#define ETH_ALEN 6 /* Octets in one ethernet addr */
+#define ETH_HLEN 14 /* Total octets in header. */
+#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
+#define ETH_DATA_LEN 1500 /* Max. octets in payload */
+#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
+
+
+/* These are the defined Ethernet Protocol ID's. */
+#define ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */
+#define ETH_P_ECHO 0x0200 /* Ethernet Echo packet */
+#define ETH_P_PUP 0x0400 /* Xerox PUP packet */
+#define ETH_P_IP 0x0800 /* Internet Protocol packet */
+#define ETH_P_X25 0x0805 /* CCITT X.25 */
+#define ETH_P_ARP 0x0806 /* Address Resolution packet */
+#define ETH_P_BPQ 0x08FF /* G8BPQ AX.25 Ethernet Packet [ NOT AN OFFICIALLY REGISTERED ID ] */
+#define ETH_P_DEC 0x6000 /* DEC Assigned proto */
+#define ETH_P_DNA_DL 0x6001 /* DEC DNA Dump/Load */
+#define ETH_P_DNA_RC 0x6002 /* DEC DNA Remote Console */
+#define ETH_P_DNA_RT 0x6003 /* DEC DNA Routing */
+#define ETH_P_LAT 0x6004 /* DEC LAT */
+#define ETH_P_DIAG 0x6005 /* DEC Diagnostics */
+#define ETH_P_CUST 0x6006 /* DEC Customer use */
+#define ETH_P_SCA 0x6007 /* DEC Systems Comms Arch */
+#define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */
+#define ETH_P_ATALK 0x809B /* Appletalk DDP */
+#define ETH_P_AARP 0x80F3 /* Appletalk AARP */
+#define ETH_P_IPX 0x8137 /* IPX over DIX */
+#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */
+#define ETH_P_802_3 0x0001 /* Dummy type for 802.3 frames */
+#define ETH_P_AX25 0x0002 /* Dummy protocol id for AX.25 */
+#define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */
+#define ETH_P_802_2 0x0004 /* 802.2 frames */
+#define ETH_P_SNAP 0x0005 /* Internal only */
+#define ETH_P_DDCMP 0x0006 /* DEC DDCMP: Internal only */
+#define ETH_P_WAN_PPP 0x0007 /* Dummy type for WAN PPP frames*/
+#define ETH_P_PPP_MP 0x0008 /* Dummy type for PPP MP frames */
+
+/* This is an Ethernet frame header. */
+struct ethhdr {
+ unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
+ unsigned char h_source[ETH_ALEN]; /* source ether addr */
+ unsigned short h_proto; /* packet type ID field */
+};
+
+/* Ethernet statistics collection data. */
+struct enet_statistics{
+ int rx_packets; /* total packets received */
+ int tx_packets; /* total packets transmitted */
+ int rx_errors; /* bad packets received */
+ int tx_errors; /* packet transmit problems */
+ int rx_dropped; /* no space in linux buffers */
+ int tx_dropped; /* no space available in linux */
+ int multicast; /* multicast packets received */
+ int collisions;
+
+ /* detailed rx_errors: */
+ int rx_length_errors;
+ int rx_over_errors; /* receiver ring buff overflow */
+ int rx_crc_errors; /* recved pkt with crc error */
+ int rx_frame_errors; /* recv'd frame alignment error */
+ int rx_fifo_errors; /* recv'r fifo overrun */
+ int rx_missed_errors; /* receiver missed packet */
+
+ /* detailed tx_errors */
+ int tx_aborted_errors;
+ int tx_carrier_errors;
+ int tx_fifo_errors;
+ int tx_heartbeat_errors;
+ int tx_window_errors;
+};
+
+#endif /* _LINUX_IF_ETHER_H */
diff --git a/i386/i386at/gpl/linux/include/linux/if_tr.h b/i386/i386at/gpl/linux/include/linux/if_tr.h
new file mode 100644
index 00000000..61629332
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/if_tr.h
@@ -0,0 +1,104 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the Token-Ring IEEE 802.5 interface.
+ *
+ * Version: @(#)if_tr.h 0.0 07/11/94
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald Becker, <becker@super.org>
+ * Peter De Schrijver, <stud11@cc4.kuleuven.ac.be>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_TR_H
+#define _LINUX_IF_TR_H
+
+
+/* IEEE 802.5 Token-Ring magic constants. The frame sizes omit the preamble
+ and FCS/CRC (frame check sequence). */
+#define TR_ALEN 6 /* Octets in one ethernet addr */
+#define TR_HLEN (sizeof(struct trh_hdr)+sizeof(struct trllc))
+#define AC 0x10
+#define LLC_FRAME 0x40
+#if 0
+#define ETH_HLEN 14 /* Total octets in header. */
+#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
+#define ETH_DATA_LEN 1500 /* Max. octets in payload */
+#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
+#endif
+
+
+/* These are some defined Ethernet Protocol ID's. */
+#define ETH_P_IP 0x0800 /* Internet Protocol packet */
+#define ETH_P_ARP 0x0806 /* Address Resolution packet */
+#define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */
+
+/* LLC and SNAP constants */
+#define EXTENDED_SAP 0xAA
+#define UI_CMD 0x03
+
+/* This is an Token-Ring frame header. */
+struct trh_hdr {
+ unsigned char ac; /* access control field */
+ unsigned char fc; /* frame control field */
+ unsigned char daddr[TR_ALEN]; /* destination address */
+ unsigned char saddr[TR_ALEN]; /* source address */
+ unsigned short rcf; /* route control field */
+ unsigned short rseg[8];/* routing registers */
+};
+
+/* This is an Token-Ring LLC structure */
+struct trllc {
+ unsigned char dsap; /* destination SAP */
+ unsigned char ssap; /* source SAP */
+ unsigned char llc; /* LLC control field */
+ unsigned char protid[3]; /* protocol id */
+ unsigned short ethertype; /* ether type field */
+};
+
+
+/* Token-Ring statistics collection data. */
+struct tr_statistics{
+ int rx_packets; /* total packets received */
+ int tx_packets; /* total packets transmitted */
+ int rx_errors; /* bad packets received */
+ int tx_errors; /* packet transmit problems */
+ int rx_dropped; /* no space in linux buffers */
+ int tx_dropped; /* no space available in linux */
+ int multicast; /* multicast packets received */
+ int transmit_collision;
+
+ /* detailed Token-Ring errors. See IBM Token-Ring Network Architecture
+ for more info */
+
+ int line_errors;
+ int internal_errors;
+ int burst_errors;
+ int A_C_errors;
+ int abort_delimiters;
+ int lost_frames;
+ int recv_congest_count;
+ int frame_copied_errors;
+ int frequency_errors;
+ int token_errors;
+ int dummy1;
+
+};
+
+/* source routing stuff */
+
+#define TR_RII 0x80
+#define TR_RCF_DIR_BIT 0x80
+#define TR_RCF_LEN_MASK 0x1f00
+#define TR_RCF_BROADCAST 0x8000
+#define TR_RCF_LIMITED_BROADCAST 0xA000
+#define TR_RCF_FRAME2K 0x20
+#define TR_RCF_BROADCAST_MASK 0xC000
+
+#endif /* _LINUX_IF_TR_H */
diff --git a/i386/i386at/gpl/linux/include/linux/igmp.h b/i386/i386at/gpl/linux/include/linux/igmp.h
new file mode 100644
index 00000000..161528f1
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/igmp.h
@@ -0,0 +1,117 @@
+/*
+ * Linux NET3: Internet Gateway Management Protocol [IGMP]
+ *
+ * Authors:
+ * Alan Cox <Alan.Cox@linux.org>
+ *
+ * Extended to talk the BSD extended IGMP protocol of mrouted 3.6
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_IGMP_H
+#define _LINUX_IGMP_H
+
+/*
+ * IGMP protocol structures
+ */
+
+/*
+ * Header in on cable format
+ */
+
+struct igmphdr
+{
+ __u8 type;
+ __u8 code; /* For newer IGMP */
+ __u16 csum;
+ __u32 group;
+};
+
+#define IGMP_HOST_MEMBERSHIP_QUERY 0x11 /* From RFC1112 */
+#define IGMP_HOST_MEMBERSHIP_REPORT 0x12 /* Ditto */
+#define IGMP_DVMRP 0x13 /* DVMRP routing */
+#define IGMP_PIM 0x14 /* PIM routing */
+#define IGMP_HOST_NEW_MEMBERSHIP_REPORT 0x16 /* New version of 0x11 */
+#define IGMP_HOST_LEAVE_MESSAGE 0x17 /* An extra BSD seems to send */
+
+#define IGMP_MTRACE_RESP 0x1e
+#define IGMP_MTRACE 0x1f
+
+
+/*
+ * Use the BSD names for these for compatibility
+ */
+
+#define IGMP_DELAYING_MEMBER 0x01
+#define IGMP_IDLE_MEMBER 0x02
+#define IGMP_LAZY_MEMBER 0x03
+#define IGMP_SLEEPING_MEMBER 0x04
+#define IGMP_AWAKENING_MEMBER 0x05
+
+#define IGMP_OLD_ROUTER 0x00
+#define IGMP_NEW_ROUTER 0x01
+
+#define IGMP_MINLEN 8
+
+#define IGMP_MAX_HOST_REPORT_DELAY 10 /* max delay for response to */
+ /* query (in seconds) */
+
+#define IGMP_TIMER_SCALE 10 /* denotes that the igmphdr->timer field */
+ /* specifies time in 10th of seconds */
+
+#define IGMP_AGE_THRESHOLD 540 /* If this host don't hear any IGMP V1 */
+ /* message in this period of time, */
+ /* revert to IGMP v2 router. */
+
+#define IGMP_ALL_HOSTS htonl(0xE0000001L)
+#define IGMP_ALL_ROUTER htonl(0xE0000002L)
+#define IGMP_LOCAL_GROUP htonl(0xE0000000L)
+#define IGMP_LOCAL_GROUP_MASK htonl(0xFFFFFF00L)
+
+/*
+ * struct for keeping the multicast list in
+ */
+
+#ifdef __KERNEL__
+struct ip_mc_socklist
+{
+ unsigned long multiaddr[IP_MAX_MEMBERSHIPS]; /* This is a speed trade off */
+ struct device *multidev[IP_MAX_MEMBERSHIPS];
+};
+
+struct ip_mc_list
+{
+ struct device *interface;
+ unsigned long multiaddr;
+ struct ip_mc_list *next;
+ struct timer_list timer;
+ int tm_running;
+ int users;
+};
+
+struct ip_router_info
+{
+ struct device *dev;
+ int type; /* type of router which is querier on this interface */
+ int time; /* # of slow timeouts since last old query */
+ struct timer_list timer;
+ struct ip_router_info *next;
+};
+
+extern struct ip_mc_list *ip_mc_head;
+
+
+extern int igmp_rcv(struct sk_buff *, struct device *, struct options *, __u32, unsigned short,
+ __u32, int , struct inet_protocol *);
+extern void ip_mc_drop_device(struct device *dev);
+extern int ip_mc_join_group(struct sock *sk, struct device *dev, unsigned long addr);
+extern int ip_mc_leave_group(struct sock *sk, struct device *dev,unsigned long addr);
+extern void ip_mc_drop_socket(struct sock *sk);
+extern void ip_mr_init(void);
+#endif
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/in.h b/i386/i386at/gpl/linux/include/linux/in.h
new file mode 100644
index 00000000..c8e156e8
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/in.h
@@ -0,0 +1,149 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions of the Internet Protocol.
+ *
+ * Version: @(#)in.h 1.0.1 04/21/93
+ *
+ * Authors: Original taken from the GNU Project <netinet/in.h> file.
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IN_H
+#define _LINUX_IN_H
+
+#include <linux/types.h>
+
+/* Standard well-defined IP protocols. */
+enum {
+ IPPROTO_IP = 0, /* Dummy protocol for TCP */
+ IPPROTO_ICMP = 1, /* Internet Control Message Protocol */
+ IPPROTO_IGMP = 2, /* Internet Gateway Management Protocol */
+ IPPROTO_IPIP = 4, /* IPIP tunnels (older KA9Q tunnels use 94) */
+ IPPROTO_TCP = 6, /* Transmission Control Protocol */
+ IPPROTO_EGP = 8, /* Exterior Gateway Protocol */
+ IPPROTO_PUP = 12, /* PUP protocol */
+ IPPROTO_UDP = 17, /* User Datagram Protocol */
+ IPPROTO_IDP = 22, /* XNS IDP protocol */
+
+ IPPROTO_RAW = 255, /* Raw IP packets */
+ IPPROTO_MAX
+};
+
+
+/* Internet address. */
+struct in_addr {
+ __u32 s_addr;
+};
+
+/* Request struct for multicast socket ops */
+
+struct ip_mreq
+{
+ struct in_addr imr_multiaddr; /* IP multicast address of group */
+ struct in_addr imr_interface; /* local IP address of interface */
+};
+
+
+/* Structure describing an Internet (IP) socket address. */
+#define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */
+struct sockaddr_in {
+ short int sin_family; /* Address family */
+ unsigned short int sin_port; /* Port number */
+ struct in_addr sin_addr; /* Internet address */
+
+ /* Pad to size of `struct sockaddr'. */
+ unsigned char __pad[__SOCK_SIZE__ - sizeof(short int) -
+ sizeof(unsigned short int) - sizeof(struct in_addr)];
+};
+#define sin_zero __pad /* for BSD UNIX comp. -FvK */
+
+
+/*
+ * Definitions of the bits in an Internet address integer.
+ * On subnets, host and network parts are found according
+ * to the subnet mask, not these masks.
+ */
+#define IN_CLASSA(a) ((((long int) (a)) & 0x80000000) == 0)
+#define IN_CLASSA_NET 0xff000000
+#define IN_CLASSA_NSHIFT 24
+#define IN_CLASSA_HOST (0xffffffff & ~IN_CLASSA_NET)
+#define IN_CLASSA_MAX 128
+
+#define IN_CLASSB(a) ((((long int) (a)) & 0xc0000000) == 0x80000000)
+#define IN_CLASSB_NET 0xffff0000
+#define IN_CLASSB_NSHIFT 16
+#define IN_CLASSB_HOST (0xffffffff & ~IN_CLASSB_NET)
+#define IN_CLASSB_MAX 65536
+
+#define IN_CLASSC(a) ((((long int) (a)) & 0xe0000000) == 0xc0000000)
+#define IN_CLASSC_NET 0xffffff00
+#define IN_CLASSC_NSHIFT 8
+#define IN_CLASSC_HOST (0xffffffff & ~IN_CLASSC_NET)
+
+#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000)
+#define IN_MULTICAST(a) IN_CLASSD(a)
+#define IN_MULTICAST_NET 0xF0000000
+
+#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xe0000000) == 0xe0000000)
+#define IN_BADCLASS(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
+
+/* Address to accept any incoming messages. */
+#define INADDR_ANY ((unsigned long int) 0x00000000)
+
+/* Address to send to all hosts. */
+#define INADDR_BROADCAST ((unsigned long int) 0xffffffff)
+
+/* Address indicating an error return. */
+#define INADDR_NONE 0xffffffff
+
+/* Network number for local host loopback. */
+#define IN_LOOPBACKNET 127
+
+/* Address to loopback in software to local host. */
+#define INADDR_LOOPBACK 0x7f000001 /* 127.0.0.1 */
+#define IN_LOOPBACK(a) ((((long int) (a)) & 0xff000000) == 0x7f000000)
+
+/* Defines for Multicast INADDR */
+#define INADDR_UNSPEC_GROUP 0xe0000000 /* 224.0.0.0 */
+#define INADDR_ALLHOSTS_GROUP 0xe0000001 /* 224.0.0.1 */
+#define INADDR_MAX_LOCAL_GROUP 0xe00000ff /* 224.0.0.255 */
+
+/* <asm/byteorder.h> contains the htonl type stuff.. */
+
+#include <asm/byteorder.h>
+
+/* Some random defines to make it easier in the kernel.. */
+#ifdef __KERNEL__
+
+#define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
+#define MULTICAST(x) (((x) & htonl(0xf0000000)) == htonl(0xe0000000))
+
+#endif
+
+/*
+ * IPv6 definitions as we start to include them. This is just
+ * a beginning dont get excited 8)
+ */
+
+struct in_addr6
+{
+ unsigned char s6_addr[16];
+};
+
+struct sockaddr_in6
+{
+ unsigned short sin6_family;
+ unsigned short sin6_port;
+ unsigned long sin6_flowinfo;
+ struct in_addr6 sin6_addr;
+};
+
+
+#endif /* _LINUX_IN_H */
diff --git a/i386/i386at/gpl/linux/include/linux/inet.h b/i386/i386at/gpl/linux/include/linux/inet.h
new file mode 100644
index 00000000..9ecc9cb3
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/inet.h
@@ -0,0 +1,52 @@
+/*
+ * Swansea University Computer Society NET3
+ *
+ * This work is derived from NET2Debugged, which is in turn derived
+ * from NET2D which was written by:
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This work was derived from Ross Biro's inspirational work
+ * for the LINUX operating system. His version numbers were:
+ *
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ * $Id: inet.h,v 1.1.1.1 1997/02/25 21:27:28 thomas Exp $
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_INET_H
+#define _LINUX_INET_H
+
+#ifdef __KERNEL__
+
+extern void inet_proto_init(struct net_proto *pro);
+extern char *in_ntoa(unsigned long in);
+extern unsigned long in_aton(const char *str);
+
+#endif
+#endif /* _LINUX_INET_H */
diff --git a/i386/i386at/gpl/linux/include/linux/interrupt.h b/i386/i386at/gpl/linux/include/linux/interrupt.h
new file mode 100644
index 00000000..a20cbe8e
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/interrupt.h
@@ -0,0 +1,91 @@
+/* interrupt.h */
+#ifndef _LINUX_INTERRUPT_H
+#define _LINUX_INTERRUPT_H
+
+#include <linux/kernel.h>
+#include <asm/bitops.h>
+
+struct bh_struct {
+ void (*routine)(void *);
+ void *data;
+};
+
+extern unsigned long bh_active;
+extern unsigned long bh_mask;
+extern struct bh_struct bh_base[32];
+
+asmlinkage void do_bottom_half(void);
+
+/* Who gets which entry in bh_base. Things which will occur most often
+ should come first - in which case NET should be up the top with SERIAL/TQUEUE! */
+
+enum {
+ TIMER_BH = 0,
+ CONSOLE_BH,
+ TQUEUE_BH,
+ SERIAL_BH,
+ NET_BH,
+ IMMEDIATE_BH,
+ KEYBOARD_BH,
+ CYCLADES_BH,
+ CM206_BH
+};
+
+extern inline void mark_bh(int nr)
+{
+ set_bit(nr, &bh_active);
+}
+
+extern inline void disable_bh(int nr)
+{
+ clear_bit(nr, &bh_mask);
+}
+
+extern inline void enable_bh(int nr)
+{
+ set_bit(nr, &bh_mask);
+}
+
+extern inline void start_bh_atomic(void)
+{
+ intr_count++;
+ barrier();
+}
+
+extern inline void end_bh_atomic(void)
+{
+ barrier();
+ intr_count--;
+}
+
+/*
+ * Autoprobing for irqs:
+ *
+ * probe_irq_on() and probe_irq_off() provide robust primitives
+ * for accurate IRQ probing during kernel initialization. They are
+ * reasonably simple to use, are not "fooled" by spurious interrupts,
+ * and, unlike other attempts at IRQ probing, they do not get hung on
+ * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
+ *
+ * For reasonably foolproof probing, use them as follows:
+ *
+ * 1. clear and/or mask the device's internal interrupt.
+ * 2. sti();
+ * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
+ * 4. enable the device and cause it to trigger an interrupt.
+ * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
+ * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
+ * 7. service the device to clear its pending interrupt.
+ * 8. loop again if paranoia is required.
+ *
+ * probe_irq_on() returns a mask of allocated irq's.
+ *
+ * probe_irq_off() takes the mask as a parameter,
+ * and returns the irq number which occurred,
+ * or zero if none occurred, or a negative irq number
+ * if more than one irq occurred.
+ */
+extern unsigned long probe_irq_on(void); /* returns 0 on failure */
+extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/ioctl.h b/i386/i386at/gpl/linux/include/linux/ioctl.h
new file mode 100644
index 00000000..aa91eb39
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/ioctl.h
@@ -0,0 +1,7 @@
+#ifndef _LINUX_IOCTL_H
+#define _LINUX_IOCTL_H
+
+#include <asm/ioctl.h>
+
+#endif /* _LINUX_IOCTL_H */
+
diff --git a/i386/i386at/gpl/linux/include/linux/ioport.h b/i386/i386at/gpl/linux/include/linux/ioport.h
new file mode 100644
index 00000000..335e3b65
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/ioport.h
@@ -0,0 +1,31 @@
+/*
+ * portio.h Definitions of routines for detecting, reserving and
+ * allocating system resources.
+ *
+ * Version: 0.01 8/30/93
+ *
+ * Author: Donald Becker (becker@super.org)
+ */
+
+#ifndef _LINUX_PORTIO_H
+#define _LINUX_PORTIO_H
+
+#define HAVE_PORTRESERVE
+/*
+ * Call check_region() before probing for your hardware.
+ * Once you have found you hardware, register it with request_region().
+ * If you unload the driver, use release_region to free ports.
+ */
+extern void reserve_setup(char *str, int *ints);
+extern int check_region(unsigned int from, unsigned int extent);
+extern void request_region(unsigned int from, unsigned int extent,const char *name);
+extern void release_region(unsigned int from, unsigned int extent);
+extern int get_ioport_list(char *);
+
+
+#define HAVE_AUTOIRQ
+extern void *irq2dev_map[16]; /* Use only if you own the IRQ. */
+extern int autoirq_setup(int waittime);
+extern int autoirq_report(int waittime);
+
+#endif /* _LINUX_PORTIO_H */
diff --git a/i386/i386at/gpl/linux/include/linux/ip.h b/i386/i386at/gpl/linux/include/linux/ip.h
new file mode 100644
index 00000000..4d5d70c1
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/ip.h
@@ -0,0 +1,113 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the IP protocol.
+ *
+ * Version: @(#)ip.h 1.0.2 04/28/93
+ *
+ * Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IP_H
+#define _LINUX_IP_H
+#include <asm/byteorder.h>
+
+#define IPOPT_END 0
+#define IPOPT_NOOP 1
+#define IPOPT_SEC 130
+#define IPOPT_LSRR 131
+#define IPOPT_SSRR 137
+#define IPOPT_RR 7
+#define IPOPT_SID 136
+#define IPOPT_TIMESTAMP 68
+
+
+#define MAXTTL 255
+
+struct timestamp {
+ __u8 len;
+ __u8 ptr;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 flags:4,
+ overflow:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 overflow:4,
+ flags:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u32 data[9];
+};
+
+
+#define MAX_ROUTE 16
+
+struct route {
+ char route_size;
+ char pointer;
+ unsigned long route[MAX_ROUTE];
+};
+
+#define IPOPT_OPTVAL 0
+#define IPOPT_OLEN 1
+#define IPOPT_OFFSET 2
+#define IPOPT_MINOFF 4
+#define MAX_IPOPTLEN 40
+#define IPOPT_NOP IPOPT_NOOP
+#define IPOPT_EOL IPOPT_END
+#define IPOPT_TS IPOPT_TIMESTAMP
+
+#define IPOPT_TS_TSONLY 0 /* timestamps only */
+#define IPOPT_TS_TSANDADDR 1 /* timestamps and addresses */
+#define IPOPT_TS_PRESPEC 2 /* specified modules only */
+
+struct options {
+ __u32 faddr; /* Saved first hop address */
+ unsigned char optlen;
+ unsigned char srr;
+ unsigned char rr;
+ unsigned char ts;
+ unsigned char is_setbyuser:1, /* Set by setsockopt? */
+ is_data:1, /* Options in __data, rather than skb */
+ is_strictroute:1, /* Strict source route */
+ srr_is_hit:1, /* Packet destination addr was our one */
+ is_changed:1, /* IP checksum more not valid */
+ rr_needaddr:1, /* Need to record addr of outgoing dev */
+ ts_needtime:1, /* Need to record timestamp */
+ ts_needaddr:1; /* Need to record addr of outgoing dev */
+ unsigned char __pad1;
+ unsigned char __pad2;
+ unsigned char __pad3;
+ unsigned char __data[0];
+};
+
+struct iphdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 ihl:4,
+ version:4;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u8 version:4,
+ ihl:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 tos;
+ __u16 tot_len;
+ __u16 id;
+ __u16 frag_off;
+ __u8 ttl;
+ __u8 protocol;
+ __u16 check;
+ __u32 saddr;
+ __u32 daddr;
+ /*The options start here. */
+};
+
+
+#endif /* _LINUX_IP_H */
diff --git a/i386/i386at/gpl/linux/include/linux/ipc.h b/i386/i386at/gpl/linux/include/linux/ipc.h
new file mode 100644
index 00000000..3878e020
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/ipc.h
@@ -0,0 +1,67 @@
+#ifndef _LINUX_IPC_H
+#define _LINUX_IPC_H
+#include <linux/types.h>
+
+typedef int key_t; /* should go in <types.h> type for IPC key */
+#define IPC_PRIVATE ((key_t) 0)
+
+struct ipc_perm
+{
+ key_t key;
+ ushort uid; /* owner euid and egid */
+ ushort gid;
+ ushort cuid; /* creator euid and egid */
+ ushort cgid;
+ ushort mode; /* access modes see mode flags below */
+ ushort seq; /* sequence number */
+};
+
+
+/* resource get request flags */
+#define IPC_CREAT 00001000 /* create if key is nonexistent */
+#define IPC_EXCL 00002000 /* fail if key exists */
+#define IPC_NOWAIT 00004000 /* return error on wait */
+
+
+/*
+ * Control commands used with semctl, msgctl and shmctl
+ * see also specific commands in sem.h, msg.h and shm.h
+ */
+#define IPC_RMID 0 /* remove resource */
+#define IPC_SET 1 /* set ipc_perm options */
+#define IPC_STAT 2 /* get ipc_perm options */
+#define IPC_INFO 3 /* see ipcs */
+
+#ifdef __KERNEL__
+
+/* special shmsegs[id], msgque[id] or semary[id] values */
+#define IPC_UNUSED ((void *) -1)
+#define IPC_NOID ((void *) -2) /* being allocated/destroyed */
+
+/*
+ * These are used to wrap system calls. See ipc/util.c.
+ */
+struct ipc_kludge {
+ struct msgbuf *msgp;
+ long msgtyp;
+};
+
+#define SEMOP 1
+#define SEMGET 2
+#define SEMCTL 3
+#define MSGSND 11
+#define MSGRCV 12
+#define MSGGET 13
+#define MSGCTL 14
+#define SHMAT 21
+#define SHMDT 22
+#define SHMGET 23
+#define SHMCTL 24
+
+#define IPCCALL(version,op) ((version)<<16 | (op))
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_IPC_H */
+
+
diff --git a/i386/i386at/gpl/linux/include/linux/kdev_t.h b/i386/i386at/gpl/linux/include/linux/kdev_t.h
new file mode 100644
index 00000000..0497ea8c
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/kdev_t.h
@@ -0,0 +1,114 @@
+#ifndef _LINUX_KDEV_T_H
+#define _LINUX_KDEV_T_H
+#ifdef __KERNEL__
+/*
+As a preparation for the introduction of larger device numbers,
+we introduce a type kdev_t to hold them. No information about
+this type is known outside of this include file.
+
+Objects of type kdev_t designate a device. Outside of the kernel
+the corresponding things are objects of type dev_t - usually an
+integral type with the device major and minor in the high and low
+bits, respectively. Conversion is done by
+
+extern kdev_t to_kdev_t(int);
+
+It is up to the various file systems to decide how objects of type
+dev_t are stored on disk.
+The only other point of contact between kernel and outside world
+are the system calls stat and mknod, new versions of which will
+eventually have to be used in libc.
+
+[Unfortunately, the floppy control ioctls fail to hide the internal
+kernel structures, and the fd_device field of a struct floppy_drive_struct
+is user-visible. So, it remains a dev_t for the moment, with some ugly
+conversions in floppy.c.]
+
+Inside the kernel, we aim for a kdev_t type that is a pointer
+to a structure with information about the device (like major,
+minor, size, blocksize, sectorsize, name, read-only flag,
+struct file_operations etc.).
+
+However, for the time being we let kdev_t be almost the same as dev_t:
+
+typedef struct { unsigned short major, minor; } kdev_t;
+
+Admissible operations on an object of type kdev_t:
+- passing it along
+- comparing it for equality with another such object
+- storing it in ROOT_DEV, inode->i_dev, inode->i_rdev, sb->s_dev,
+ bh->b_dev, req->rq_dev, de->dc_dev, tty->device
+- using its bit pattern as argument in a hash function
+- finding its major and minor
+- complaining about it
+
+An object of type kdev_t is created only by the function MKDEV(),
+with the single exception of the constant 0 (no device).
+
+Right now the other information mentioned above is usually found
+in static arrays indexed by major or major,minor.
+
+An obstacle to immediately using
+ typedef struct { ... (* lots of information *) } *kdev_t
+is the case of mknod used to create a block device that the
+kernel doesn't know about at present (but first learns about
+when some module is inserted).
+
+aeb - 950811
+*/
+
+/* Since MINOR(dev) is used as index in static arrays,
+ the kernel is not quite ready yet for larger minors.
+ However, everything runs fine with an arbitrary kdev_t type. */
+
+#define MINORBITS 8
+#define MINORMASK ((1<<MINORBITS) - 1)
+
+typedef unsigned short kdev_t;
+
+#define MAJOR(dev) ((dev) >> MINORBITS)
+#define MINOR(dev) ((dev) & MINORMASK)
+#define HASHDEV(dev) (dev)
+#define NODEV 0
+#define MKDEV(ma,mi) (((ma) << MINORBITS) | (mi))
+#define B_FREE 0xffff /* yuk */
+
+extern char * kdevname(kdev_t); /* note: returns pointer to static data! */
+
+/*
+As long as device numbers in the outside world have 16 bits only,
+we use these conversions.
+*/
+
+static inline unsigned int kdev_t_to_nr(kdev_t dev) {
+ return (MAJOR(dev)<<8) | MINOR(dev);
+}
+
+static inline kdev_t to_kdev_t(int dev)
+{
+ int major, minor;
+#if 0
+ major = (dev >> 16);
+ if (!major) {
+ major = (dev >> 8);
+ minor = (dev & 0xff);
+ } else
+ minor = (dev & 0xffff);
+#else
+ major = (dev >> 8);
+ minor = (dev & 0xff);
+#endif
+ return MKDEV(major, minor);
+}
+
+#else /* __KERNEL__ */
+
+/*
+Some programs want their definitions of MAJOR and MINOR and MKDEV
+from the kernel sources. These must be the externally visible ones.
+*/
+#define MAJOR(dev) ((dev)>>8)
+#define MINOR(dev) ((dev) & 0xff)
+#define MKDEV(ma,mi) ((ma)<<8 | (mi))
+#endif /* __KERNEL__ */
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/kernel.h b/i386/i386at/gpl/linux/include/linux/kernel.h
new file mode 100644
index 00000000..d4985576
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/kernel.h
@@ -0,0 +1,94 @@
+#ifndef _LINUX_KERNEL_H
+#define _LINUX_KERNEL_H
+
+/*
+ * 'kernel.h' contains some often-used function prototypes etc
+ */
+
+#ifdef __KERNEL__
+
+#include <stdarg.h>
+#include <linux/linkage.h>
+
+#define INT_MAX ((int)(~0U>>1))
+#define UINT_MAX (~0U)
+#define LONG_MAX ((long)(~0UL>>1))
+#define ULONG_MAX (~0UL)
+
+#define STACK_MAGIC 0xdeadbeef
+
+#define KERN_EMERG "<0>" /* system is unusable */
+#define KERN_ALERT "<1>" /* action must be taken immediately */
+#define KERN_CRIT "<2>" /* critical conditions */
+#define KERN_ERR "<3>" /* error conditions */
+#define KERN_WARNING "<4>" /* warning conditions */
+#define KERN_NOTICE "<5>" /* normal but significant condition */
+#define KERN_INFO "<6>" /* informational */
+#define KERN_DEBUG "<7>" /* debug-level messages */
+
+#if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 5)
+# define NORET_TYPE __volatile__
+# define ATTRIB_NORET /**/
+# define NORET_AND /**/
+#else
+# define NORET_TYPE /**/
+# define ATTRIB_NORET __attribute__((noreturn))
+# define NORET_AND noreturn,
+#endif
+
+extern void math_error(void);
+NORET_TYPE void panic(const char * fmt, ...)
+ __attribute__ ((NORET_AND format (printf, 1, 2)));
+NORET_TYPE void do_exit(long error_code)
+ ATTRIB_NORET;
+extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+extern int linux_sprintf(char * buf, const char * fmt, ...);
+extern int linux_vsprintf(char *buf, const char *, va_list);
+#ifndef MACH_INCLUDE
+#define sprintf linux_sprintf
+#define vsprintf linux_vsprintf
+#endif
+
+extern int session_of_pgrp(int pgrp);
+
+extern int kill_proc(int pid, int sig, int priv);
+extern int kill_pg(int pgrp, int sig, int priv);
+extern int kill_sl(int sess, int sig, int priv);
+
+asmlinkage int printk(const char * fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+/*
+ * This is defined as a macro, but at some point this might become a
+ * real subroutine that sets a flag if it returns true (to do
+ * BSD-style accounting where the process is flagged if it uses root
+ * privs). The implication of this is that you should do normal
+ * permissions checks first, and check suser() last.
+ *
+ * "suser()" checks against the effective user id, while "fsuser()"
+ * is used for file permission checking and checks against the fsuid..
+ */
+#ifdef MACH
+#define suser() 1
+#else
+#define suser() (current->euid == 0)
+#endif
+#define fsuser() (current->fsuid == 0)
+
+#endif /* __KERNEL__ */
+
+#define SI_LOAD_SHIFT 16
+struct sysinfo {
+ long uptime; /* Seconds since boot */
+ unsigned long loads[3]; /* 1, 5, and 15 minute load averages */
+ unsigned long totalram; /* Total usable main memory size */
+ unsigned long freeram; /* Available memory size */
+ unsigned long sharedram; /* Amount of shared memory */
+ unsigned long bufferram; /* Memory used by buffers */
+ unsigned long totalswap; /* Total swap space size */
+ unsigned long freeswap; /* swap space still available */
+ unsigned short procs; /* Number of current processes */
+ char _f[22]; /* Pads structure to 64 bytes */
+};
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/kernel_stat.h b/i386/i386at/gpl/linux/include/linux/kernel_stat.h
new file mode 100644
index 00000000..1966490a
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/kernel_stat.h
@@ -0,0 +1,32 @@
+#ifndef _LINUX_KERNEL_STAT_H
+#define _LINUX_KERNEL_STAT_H
+
+#include <asm/irq.h>
+
+/*
+ * 'kernel_stat.h' contains the definitions needed for doing
+ * some kernel statistics (cpu usage, context switches ...),
+ * used by rstatd/perfmeter
+ */
+
+#define DK_NDRIVE 4
+
+struct kernel_stat {
+ unsigned int cpu_user, cpu_nice, cpu_system;
+ unsigned int dk_drive[DK_NDRIVE];
+ unsigned int dk_drive_rio[DK_NDRIVE];
+ unsigned int dk_drive_wio[DK_NDRIVE];
+ unsigned int dk_drive_rblk[DK_NDRIVE];
+ unsigned int dk_drive_wblk[DK_NDRIVE];
+ unsigned int pgpgin, pgpgout;
+ unsigned int pswpin, pswpout;
+ unsigned int interrupts[NR_IRQS];
+ unsigned int ipackets, opackets;
+ unsigned int ierrors, oerrors;
+ unsigned int collisions;
+ unsigned int context_swtch;
+};
+
+extern struct kernel_stat kstat;
+
+#endif /* _LINUX_KERNEL_STAT_H */
diff --git a/i386/i386at/gpl/linux/include/linux/limits.h b/i386/i386at/gpl/linux/include/linux/limits.h
new file mode 100644
index 00000000..d0f300c4
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/limits.h
@@ -0,0 +1,17 @@
+#ifndef _LINUX_LIMITS_H
+#define _LINUX_LIMITS_H
+
+#define NR_OPEN 256
+
+#define NGROUPS_MAX 32 /* supplemental group IDs are available */
+#define ARG_MAX 131072 /* # bytes of args + environ for exec() */
+#define CHILD_MAX 999 /* no limit :-) */
+#define OPEN_MAX 256 /* # open files a process may have */
+#define LINK_MAX 127 /* # links a file may have */
+#define MAX_CANON 255 /* size of the canonical input queue */
+#define MAX_INPUT 255 /* size of the type-ahead buffer */
+#define NAME_MAX 255 /* # chars in a file name */
+#define PATH_MAX 1024 /* # chars in a path name */
+#define PIPE_BUF 4096 /* # bytes in atomic write to a pipe */
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/linkage.h b/i386/i386at/gpl/linux/include/linux/linkage.h
new file mode 100644
index 00000000..c8a7a491
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/linkage.h
@@ -0,0 +1,59 @@
+#ifndef _LINUX_LINKAGE_H
+#define _LINUX_LINKAGE_H
+
+#ifdef __cplusplus
+#define asmlinkage extern "C"
+#else
+#define asmlinkage
+#endif
+
+#ifdef __ELF__
+#define SYMBOL_NAME_STR(X) #X
+#define SYMBOL_NAME(X) X
+#ifdef __STDC__
+#define SYMBOL_NAME_LABEL(X) X##:
+#else
+#define SYMBOL_NAME_LABEL(X) X/**/:
+#endif
+#else
+#define SYMBOL_NAME_STR(X) "_"#X
+#ifdef __STDC__
+#define SYMBOL_NAME(X) _##X
+#define SYMBOL_NAME_LABEL(X) _##X##:
+#else
+#define SYMBOL_NAME(X) _/**/X
+#define SYMBOL_NAME_LABEL(X) _/**/X/**/:
+#endif
+#endif
+
+#if !defined(__i486__) && !defined(__i586__)
+#ifdef __ELF__
+#define __ALIGN .align 4,0x90
+#define __ALIGN_STR ".align 4,0x90"
+#else /* __ELF__ */
+#define __ALIGN .align 2,0x90
+#define __ALIGN_STR ".align 2,0x90"
+#endif /* __ELF__ */
+#else /* __i486__/__i586__ */
+#ifdef __ELF__
+#define __ALIGN .align 16,0x90
+#define __ALIGN_STR ".align 16,0x90"
+#else /* __ELF__ */
+#define __ALIGN .align 4,0x90
+#define __ALIGN_STR ".align 4,0x90"
+#endif /* __ELF__ */
+#endif /* __i486__/__i586__ */
+
+#ifdef __ASSEMBLY__
+
+#define ALIGN __ALIGN
+#define ALIGN_STRING __ALIGN_STRING
+
+#define ENTRY(name) \
+ .globl SYMBOL_NAME(name); \
+ ALIGN; \
+ SYMBOL_NAME_LABEL(name)
+
+#endif
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/locks.h b/i386/i386at/gpl/linux/include/linux/locks.h
new file mode 100644
index 00000000..c3202b08
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/locks.h
@@ -0,0 +1,66 @@
+#ifndef _LINUX_LOCKS_H
+#define _LINUX_LOCKS_H
+
+#ifndef _LINUX_MM_H
+#include <linux/mm.h>
+#endif
+#ifndef _LINUX_PAGEMAP_H
+#include <linux/pagemap.h>
+#endif
+
+/*
+ * Unlocked, temporary IO buffer_heads gets moved to the reuse_list
+ * once their page becomes unlocked.
+ */
+extern struct buffer_head *reuse_list;
+
+/*
+ * Buffer cache locking - note that interrupts may only unlock, not
+ * lock buffers.
+ */
+extern void __wait_on_buffer(struct buffer_head *);
+
+extern inline void wait_on_buffer(struct buffer_head * bh)
+{
+ if (test_bit(BH_Lock, &bh->b_state))
+ __wait_on_buffer(bh);
+}
+
+extern inline void lock_buffer(struct buffer_head * bh)
+{
+ if (set_bit(BH_Lock, &bh->b_state))
+ __wait_on_buffer(bh);
+}
+
+void unlock_buffer(struct buffer_head *);
+
+#ifndef MACH
+/*
+ * super-block locking. Again, interrupts may only unlock
+ * a super-block (although even this isn't done right now.
+ * nfs may need it).
+ */
+extern void __wait_on_super(struct super_block *);
+
+extern inline void wait_on_super(struct super_block * sb)
+{
+ if (sb->s_lock)
+ __wait_on_super(sb);
+}
+
+extern inline void lock_super(struct super_block * sb)
+{
+ if (sb->s_lock)
+ __wait_on_super(sb);
+ sb->s_lock = 1;
+}
+
+extern inline void unlock_super(struct super_block * sb)
+{
+ sb->s_lock = 0;
+ wake_up(&sb->s_wait);
+}
+#endif /* ! MACH */
+
+#endif /* _LINUX_LOCKS_H */
+
diff --git a/i386/i386at/gpl/linux/include/linux/major.h b/i386/i386at/gpl/linux/include/linux/major.h
new file mode 100644
index 00000000..c1b2dcf0
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/major.h
@@ -0,0 +1,119 @@
+#ifndef _LINUX_MAJOR_H
+#define _LINUX_MAJOR_H
+
+/*
+ * This file has definitions for major device numbers
+ */
+
+/* limits */
+
+#define MAX_CHRDEV 64
+#define MAX_BLKDEV 64
+
+/*
+ * assignments
+ *
+ * devices are as follows (same as minix, so we can use the minix fs):
+ *
+ * character block comments
+ * -------------------- -------------------- --------------------
+ * 0 - unnamed unnamed minor 0 = true nodev
+ * 1 - /dev/mem ramdisk
+ * 2 - /dev/ptyp* floppy
+ * 3 - /dev/ttyp* ide0 or hd
+ * 4 - /dev/tty*
+ * 5 - /dev/tty; /dev/cua*
+ * 6 - lp
+ * 7 - /dev/vcs*
+ * 8 - scsi disk
+ * 9 - scsi tape
+ * 10 - mice
+ * 11 - scsi cdrom
+ * 12 - qic02 tape
+ * 13 - xt disk
+ * 14 - sound card
+ * 15 - cdu31a cdrom
+ * 16 - sockets goldstar cdrom
+ * 17 - af_unix optics cdrom
+ * 18 - af_inet sanyo cdrom
+ * 19 - cyclades /dev/ttyC*
+ * 20 - cyclades /dev/cub* mitsumi (mcdx) cdrom
+ * 21 - scsi generic
+ * 22 - ide1
+ * 23 - mitsumi cdrom
+ * 24 - sony535 cdrom
+ * 25 - matsushita cdrom minors 0..3
+ * 26 - matsushita cdrom 2 minors 0..3
+ * 27 - qic117 tape matsushita cdrom 3 minors 0..3
+ * 28 - matsushita cdrom 4 minors 0..3
+ * 29 - aztech/orchid/okano/wearnes cdrom
+ * 32 - philips/lms cm206 cdrom
+ * 33 - ide2
+ * 34 - z8530 driver ide3
+ * 36 - netlink
+ */
+
+#define UNNAMED_MAJOR 0
+#define MEM_MAJOR 1
+#define RAMDISK_MAJOR 1
+#define FLOPPY_MAJOR 2
+#define PTY_MASTER_MAJOR 2
+#define IDE0_MAJOR 3
+#define PTY_SLAVE_MAJOR 3
+#define HD_MAJOR IDE0_MAJOR
+#define TTY_MAJOR 4
+#define TTYAUX_MAJOR 5
+#define LP_MAJOR 6
+#define VCS_MAJOR 7
+#define SCSI_DISK_MAJOR 8
+#define SCSI_TAPE_MAJOR 9
+#define MOUSE_MAJOR 10
+#define SCSI_CDROM_MAJOR 11
+#define QIC02_TAPE_MAJOR 12
+#define XT_DISK_MAJOR 13
+#define SOUND_MAJOR 14
+#define CDU31A_CDROM_MAJOR 15
+#define SOCKET_MAJOR 16
+#define GOLDSTAR_CDROM_MAJOR 16
+#define AF_UNIX_MAJOR 17
+#define OPTICS_CDROM_MAJOR 17
+#define AF_INET_MAJOR 18
+#define SANYO_CDROM_MAJOR 18
+#define CYCLADES_MAJOR 19
+#define CYCLADESAUX_MAJOR 20
+#define MITSUMI_X_CDROM_MAJOR 20
+#define SCSI_GENERIC_MAJOR 21
+#define Z8530_MAJOR 34
+#define IDE1_MAJOR 22
+#define MITSUMI_CDROM_MAJOR 23
+#define CDU535_CDROM_MAJOR 24
+#define STL_SERIALMAJOR 24
+#define MATSUSHITA_CDROM_MAJOR 25
+#define STL_CALLOUTMAJOR 25
+#define MATSUSHITA_CDROM2_MAJOR 26
+#define QIC117_TAPE_MAJOR 27
+#define MATSUSHITA_CDROM3_MAJOR 27
+#define MATSUSHITA_CDROM4_MAJOR 28
+#define STL_SIOMEMMAJOR 28
+#define AZTECH_CDROM_MAJOR 29
+#define CM206_CDROM_MAJOR 32
+#define IDE2_MAJOR 33
+#define IDE3_MAJOR 34
+#define NETLINK_MAJOR 36
+#define IDETAPE_MAJOR 37
+
+/*
+ * Tests for SCSI devices.
+ */
+
+#define SCSI_MAJOR(M) \
+ ((M) == SCSI_DISK_MAJOR \
+ || (M) == SCSI_TAPE_MAJOR \
+ || (M) == SCSI_CDROM_MAJOR \
+ || (M) == SCSI_GENERIC_MAJOR)
+
+static inline int scsi_major(int m) {
+ return SCSI_MAJOR(m);
+}
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/malloc.h b/i386/i386at/gpl/linux/include/linux/malloc.h
new file mode 100644
index 00000000..847383ad
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/malloc.h
@@ -0,0 +1,16 @@
+#ifndef _LINUX_MALLOC_H
+#define _LINUX_MALLOC_H
+
+#include <linux/mm.h>
+
+#ifndef MACH_INCLUDE
+#define kmalloc linux_kmalloc
+#define kfree linux_kfree
+#endif
+
+void *linux_kmalloc(unsigned int size, int priority);
+void linux_kfree(void * obj);
+
+#define kfree_s(a,b) linux_kfree(a)
+
+#endif /* _LINUX_MALLOC_H */
diff --git a/i386/i386at/gpl/linux/include/linux/math_emu.h b/i386/i386at/gpl/linux/include/linux/math_emu.h
new file mode 100644
index 00000000..0d9606d9
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/math_emu.h
@@ -0,0 +1,43 @@
+#ifndef _LINUX_MATH_EMU_H
+#define _LINUX_MATH_EMU_H
+
+struct fpu_reg {
+ char sign;
+ char tag;
+ long exp;
+ unsigned sigl;
+ unsigned sigh;
+};
+
+
+/* This structure matches the layout of the data saved to the stack
+ following a device-not-present interrupt, part of it saved
+ automatically by the 80386/80486.
+ */
+struct info {
+ long ___orig_eip;
+ long ___ret_from_system_call;
+ long ___ebx;
+ long ___ecx;
+ long ___edx;
+ long ___esi;
+ long ___edi;
+ long ___ebp;
+ long ___eax;
+ long ___ds;
+ long ___es;
+ long ___fs;
+ long ___gs;
+ long ___orig_eax;
+ long ___eip;
+ long ___cs;
+ long ___eflags;
+ long ___esp;
+ long ___ss;
+ long ___vm86_es; /* This and the following only in vm86 mode */
+ long ___vm86_ds;
+ long ___vm86_fs;
+ long ___vm86_gs;
+};
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/mc146818rtc.h b/i386/i386at/gpl/linux/include/linux/mc146818rtc.h
new file mode 100644
index 00000000..d2e709a1
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/mc146818rtc.h
@@ -0,0 +1,109 @@
+/* mc146818rtc.h - register definitions for the Real-Time-Clock / CMOS RAM
+ * Copyright Torsten Duwe <duwe@informatik.uni-erlangen.de> 1993
+ * derived from Data Sheet, Copyright Motorola 1984 (!).
+ * It was written to be part of the Linux operating system.
+ */
+/* permission is hereby granted to copy, modify and redistribute this code
+ * in terms of the GNU Library General Public License, Version 2 or later,
+ * at your option.
+ */
+
+#ifndef _MC146818RTC_H
+#define _MC146818RTC_H
+#include <asm/io.h>
+
+#ifndef RTC_PORT
+#define RTC_PORT(x) (0x70 + (x))
+#define RTC_ALWAYS_BCD 1
+#endif
+
+#define CMOS_READ(addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+inb_p(RTC_PORT(1)); \
+})
+#define CMOS_WRITE(val, addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+outb_p((val),RTC_PORT(1)); \
+})
+
+/**********************************************************************
+ * register summary
+ **********************************************************************/
+#define RTC_SECONDS 0
+#define RTC_SECONDS_ALARM 1
+#define RTC_MINUTES 2
+#define RTC_MINUTES_ALARM 3
+#define RTC_HOURS 4
+#define RTC_HOURS_ALARM 5
+/* RTC_*_alarm is always true if 2 MSBs are set */
+# define RTC_ALARM_DONT_CARE 0xC0
+
+#define RTC_DAY_OF_WEEK 6
+#define RTC_DAY_OF_MONTH 7
+#define RTC_MONTH 8
+#define RTC_YEAR 9
+
+/* control registers - Moto names
+ */
+#define RTC_REG_A 10
+#define RTC_REG_B 11
+#define RTC_REG_C 12
+#define RTC_REG_D 13
+
+/**********************************************************************
+ * register details
+ **********************************************************************/
+#define RTC_FREQ_SELECT RTC_REG_A
+
+/* update-in-progress - set to "1" 244 microsecs before RTC goes off the bus,
+ * reset after update (may take 1.984ms @ 32768Hz RefClock) is complete,
+ * totalling to a max high interval of 2.228 ms.
+ */
+# define RTC_UIP 0x80
+# define RTC_DIV_CTL 0x70
+ /* divider control: refclock values 4.194 / 1.049 MHz / 32.768 kHz */
+# define RTC_REF_CLCK_4MHZ 0x00
+# define RTC_REF_CLCK_1MHZ 0x10
+# define RTC_REF_CLCK_32KHZ 0x20
+ /* 2 values for divider stage reset, others for "testing purposes only" */
+# define RTC_DIV_RESET1 0x60
+# define RTC_DIV_RESET2 0x70
+ /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
+# define RTC_RATE_SELECT 0x0F
+
+/**********************************************************************/
+#define RTC_CONTROL RTC_REG_B
+# define RTC_SET 0x80 /* disable updates for clock setting */
+# define RTC_PIE 0x40 /* periodic interrupt enable */
+# define RTC_AIE 0x20 /* alarm interrupt enable */
+# define RTC_UIE 0x10 /* update-finished interrupt enable */
+# define RTC_SQWE 0x08 /* enable square-wave output */
+# define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
+# define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
+# define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
+
+/**********************************************************************/
+#define RTC_INTR_FLAGS RTC_REG_C
+/* caution - cleared by read */
+# define RTC_IRQF 0x80 /* any of the following 3 is active */
+# define RTC_PF 0x40
+# define RTC_AF 0x20
+# define RTC_UF 0x10
+
+/**********************************************************************/
+#define RTC_VALID RTC_REG_D
+# define RTC_VRT 0x80 /* valid RAM and time */
+/**********************************************************************/
+
+/* example: !(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
+ * determines if the following two #defines are needed
+ */
+#ifndef BCD_TO_BIN
+#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
+#endif
+
+#ifndef BIN_TO_BCD
+#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
+#endif
+
+#endif /* _MC146818RTC_H */
diff --git a/i386/i386at/gpl/linux/include/linux/minix_fs.h b/i386/i386at/gpl/linux/include/linux/minix_fs.h
new file mode 100644
index 00000000..f0ecdea0
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/minix_fs.h
@@ -0,0 +1,135 @@
+#ifndef _LINUX_MINIX_FS_H
+#define _LINUX_MINIX_FS_H
+
+/*
+ * The minix filesystem constants/structures
+ */
+
+/*
+ * Thanks to Kees J Bot for sending me the definitions of the new
+ * minix filesystem (aka V2) with bigger inodes and 32-bit block
+ * pointers.
+ */
+
+#define MINIX_ROOT_INO 1
+
+/* Not the same as the bogus LINK_MAX in <linux/limits.h>. Oh well. */
+#define MINIX_LINK_MAX 250
+
+#define MINIX_I_MAP_SLOTS 8
+#define MINIX_Z_MAP_SLOTS 64
+#define MINIX_SUPER_MAGIC 0x137F /* original minix fs */
+#define MINIX_SUPER_MAGIC2 0x138F /* minix fs, 30 char names */
+#define MINIX2_SUPER_MAGIC 0x2468 /* minix V2 fs */
+#define MINIX2_SUPER_MAGIC2 0x2478 /* minix V2 fs, 30 char names */
+#define MINIX_VALID_FS 0x0001 /* Clean fs. */
+#define MINIX_ERROR_FS 0x0002 /* fs has errors. */
+
+#define MINIX_INODES_PER_BLOCK ((BLOCK_SIZE)/(sizeof (struct minix_inode)))
+#define MINIX2_INODES_PER_BLOCK ((BLOCK_SIZE)/(sizeof (struct minix2_inode)))
+
+#define MINIX_V1 0x0001 /* original minix fs */
+#define MINIX_V2 0x0002 /* minix V2 fs */
+
+#define INODE_VERSION(inode) inode->i_sb->u.minix_sb.s_version
+
+/*
+ * This is the original minix inode layout on disk.
+ * Note the 8-bit gid and atime and ctime.
+ */
+struct minix_inode {
+ __u16 i_mode;
+ __u16 i_uid;
+ __u32 i_size;
+ __u32 i_time;
+ __u8 i_gid;
+ __u8 i_nlinks;
+ __u16 i_zone[9];
+};
+
+/*
+ * The new minix inode has all the time entries, as well as
+ * long block numbers and a third indirect block (7+1+1+1
+ * instead of 7+1+1). Also, some previously 8-bit values are
+ * now 16-bit. The inode is now 64 bytes instead of 32.
+ */
+struct minix2_inode {
+ __u16 i_mode;
+ __u16 i_nlinks;
+ __u16 i_uid;
+ __u16 i_gid;
+ __u32 i_size;
+ __u32 i_atime;
+ __u32 i_mtime;
+ __u32 i_ctime;
+ __u32 i_zone[10];
+};
+
+/*
+ * minix super-block data on disk
+ */
+struct minix_super_block {
+ __u16 s_ninodes;
+ __u16 s_nzones;
+ __u16 s_imap_blocks;
+ __u16 s_zmap_blocks;
+ __u16 s_firstdatazone;
+ __u16 s_log_zone_size;
+ __u32 s_max_size;
+ __u16 s_magic;
+ __u16 s_state;
+ __u32 s_zones;
+};
+
+struct minix_dir_entry {
+ __u16 inode;
+ char name[0];
+};
+
+#ifdef __KERNEL__
+
+extern int minix_lookup(struct inode * dir,const char * name, int len,
+ struct inode ** result);
+extern int minix_create(struct inode * dir,const char * name, int len, int mode,
+ struct inode ** result);
+extern int minix_mkdir(struct inode * dir, const char * name, int len, int mode);
+extern int minix_rmdir(struct inode * dir, const char * name, int len);
+extern int minix_unlink(struct inode * dir, const char * name, int len);
+extern int minix_symlink(struct inode * inode, const char * name, int len,
+ const char * symname);
+extern int minix_link(struct inode * oldinode, struct inode * dir, const char * name, int len);
+extern int minix_mknod(struct inode * dir, const char * name, int len, int mode, int rdev);
+extern int minix_rename(struct inode * old_dir, const char * old_name, int old_len,
+ struct inode * new_dir, const char * new_name, int new_len);
+extern struct inode * minix_new_inode(const struct inode * dir);
+extern void minix_free_inode(struct inode * inode);
+extern unsigned long minix_count_free_inodes(struct super_block *sb);
+extern int minix_new_block(struct super_block * sb);
+extern void minix_free_block(struct super_block * sb, int block);
+extern unsigned long minix_count_free_blocks(struct super_block *sb);
+
+extern int minix_bmap(struct inode *,int);
+
+extern struct buffer_head * minix_getblk(struct inode *, int, int);
+extern struct buffer_head * minix_bread(struct inode *, int, int);
+
+extern void minix_truncate(struct inode *);
+extern void minix_put_super(struct super_block *);
+extern struct super_block *minix_read_super(struct super_block *,void *,int);
+extern int init_minix_fs(void);
+extern void minix_write_super(struct super_block *);
+extern int minix_remount (struct super_block * sb, int * flags, char * data);
+extern void minix_read_inode(struct inode *);
+extern void minix_write_inode(struct inode *);
+extern void minix_put_inode(struct inode *);
+extern void minix_statfs(struct super_block *, struct statfs *, int);
+extern int minix_sync_inode(struct inode *);
+extern int minix_sync_file(struct inode *, struct file *);
+
+extern struct inode_operations minix_file_inode_operations;
+extern struct inode_operations minix_dir_inode_operations;
+extern struct inode_operations minix_symlink_inode_operations;
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/minix_fs_sb.h b/i386/i386at/gpl/linux/include/linux/minix_fs_sb.h
new file mode 100644
index 00000000..e77b4efc
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/minix_fs_sb.h
@@ -0,0 +1,25 @@
+#ifndef _MINIX_FS_SB
+#define _MINIX_FS_SB
+
+/*
+ * minix super-block data in memory
+ */
+struct minix_sb_info {
+ unsigned long s_ninodes;
+ unsigned long s_nzones;
+ unsigned long s_imap_blocks;
+ unsigned long s_zmap_blocks;
+ unsigned long s_firstdatazone;
+ unsigned long s_log_zone_size;
+ unsigned long s_max_size;
+ struct buffer_head * s_imap[8];
+ struct buffer_head * s_zmap[64];
+ unsigned long s_dirsize;
+ unsigned long s_namelen;
+ struct buffer_head * s_sbh;
+ struct minix_super_block * s_ms;
+ unsigned short s_mount_state;
+ unsigned short s_version;
+};
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/mm.h b/i386/i386at/gpl/linux/include/linux/mm.h
new file mode 100644
index 00000000..f8bbb9ba
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/mm.h
@@ -0,0 +1,297 @@
+#ifndef _LINUX_MM_H
+#define _LINUX_MM_H
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+extern unsigned long high_memory;
+
+#include <asm/page.h>
+
+#ifdef __KERNEL__
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+extern int verify_area(int, const void *, unsigned long);
+
+/*
+ * Linux kernel virtual memory manager primitives.
+ * The idea being to have a "virtual" mm in the same way
+ * we have a virtual fs - giving a cleaner interface to the
+ * mm details, and allowing different kinds of memory mappings
+ * (from shared memory to executable loading to arbitrary
+ * mmap() functions).
+ */
+
+/*
+ * This struct defines a memory VMM memory area. There is one of these
+ * per VM-area/task. A VM area is any part of the process virtual memory
+ * space that has a special rule for the page-fault handlers (ie a shared
+ * library, the executable area etc).
+ */
+struct vm_area_struct {
+ struct mm_struct * vm_mm; /* VM area parameters */
+ unsigned long vm_start;
+ unsigned long vm_end;
+ pgprot_t vm_page_prot;
+ unsigned short vm_flags;
+/* AVL tree of VM areas per task, sorted by address */
+ short vm_avl_height;
+ struct vm_area_struct * vm_avl_left;
+ struct vm_area_struct * vm_avl_right;
+/* linked list of VM areas per task, sorted by address */
+ struct vm_area_struct * vm_next;
+/* for areas with inode, the circular list inode->i_mmap */
+/* for shm areas, the circular list of attaches */
+/* otherwise unused */
+ struct vm_area_struct * vm_next_share;
+ struct vm_area_struct * vm_prev_share;
+/* more */
+ struct vm_operations_struct * vm_ops;
+ unsigned long vm_offset;
+ struct inode * vm_inode;
+ unsigned long vm_pte; /* shared mem */
+};
+
+/*
+ * vm_flags..
+ */
+#define VM_READ 0x0001 /* currently active flags */
+#define VM_WRITE 0x0002
+#define VM_EXEC 0x0004
+#define VM_SHARED 0x0008
+
+#define VM_MAYREAD 0x0010 /* limits for mprotect() etc */
+#define VM_MAYWRITE 0x0020
+#define VM_MAYEXEC 0x0040
+#define VM_MAYSHARE 0x0080
+
+#define VM_GROWSDOWN 0x0100 /* general info on the segment */
+#define VM_GROWSUP 0x0200
+#define VM_SHM 0x0400 /* shared memory area, don't swap out */
+#define VM_DENYWRITE 0x0800 /* ETXTBSY on write attempts.. */
+
+#define VM_EXECUTABLE 0x1000
+#define VM_LOCKED 0x2000
+
+#define VM_STACK_FLAGS 0x0177
+
+/*
+ * mapping from the currently active vm_flags protection bits (the
+ * low four bits) to a page protection mask..
+ */
+extern pgprot_t protection_map[16];
+
+
+/*
+ * These are the virtual MM functions - opening of an area, closing and
+ * unmapping it (needed to keep files on disk up-to-date etc), pointer
+ * to the functions called when a no-page or a wp-page exception occurs.
+ */
+struct vm_operations_struct {
+ void (*open)(struct vm_area_struct * area);
+ void (*close)(struct vm_area_struct * area);
+ void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
+ void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
+ int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
+ void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
+ unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
+ unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
+ unsigned long page);
+ int (*swapout)(struct vm_area_struct *, unsigned long, pte_t *);
+ pte_t (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
+};
+
+/*
+ * Try to keep the most commonly accessed fields in single cache lines
+ * here (16 bytes or greater). This ordering should be particularly
+ * beneficial on 32-bit processors.
+ *
+ * The first line is data used in linear searches (eg. clock algorithm
+ * scans). The second line is data used in page searches through the
+ * page-cache. -- sct
+ */
+typedef struct page {
+ unsigned int count;
+ unsigned dirty:16,
+ age:8,
+ uptodate:1,
+ error:1,
+ referenced:1,
+ locked:1,
+ free_after:1,
+ unused:2,
+ reserved:1;
+ struct wait_queue *wait;
+ struct page *next;
+
+ struct page *next_hash;
+ unsigned long offset;
+ struct inode *inode;
+ struct page *write_list;
+
+ struct page *prev;
+ struct page *prev_hash;
+} mem_map_t;
+
+extern mem_map_t * mem_map;
+
+/*
+ * Free area management
+ */
+
+#define NR_MEM_LISTS 6
+
+struct mem_list {
+ struct mem_list * next;
+ struct mem_list * prev;
+};
+
+extern struct mem_list free_area_list[NR_MEM_LISTS];
+extern unsigned int * free_area_map[NR_MEM_LISTS];
+
+/*
+ * This is timing-critical - most of the time in getting a new page
+ * goes to clearing the page. If you want a page without the clearing
+ * overhead, just use __get_free_page() directly..
+ */
+#define __get_free_page(priority) __get_free_pages((priority),0,~0UL)
+#define __get_dma_pages(priority, order) __get_free_pages((priority),(order),MAX_DMA_ADDRESS)
+extern unsigned long __get_free_pages(int priority, unsigned long gfporder, unsigned long max_addr);
+
+extern inline unsigned long get_free_page(int priority)
+{
+ unsigned long page;
+
+ page = __get_free_page(priority);
+ if (page)
+ memset((void *) page, 0, PAGE_SIZE);
+ return page;
+}
+
+/* memory.c & swap.c*/
+
+#define free_page(addr) free_pages((addr),0)
+extern void free_pages(unsigned long addr, unsigned long order);
+
+extern void show_free_areas(void);
+extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
+ unsigned long address);
+
+extern void free_page_tables(struct task_struct * tsk);
+extern void clear_page_tables(struct task_struct * tsk);
+extern int new_page_tables(struct task_struct * tsk);
+extern int copy_page_tables(struct task_struct * to);
+
+extern int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
+extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
+extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
+extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
+
+extern void vmtruncate(struct inode * inode, unsigned long offset);
+extern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access);
+extern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
+extern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
+
+extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
+extern void mem_init(unsigned long start_mem, unsigned long end_mem);
+extern void show_mem(void);
+extern void oom(struct task_struct * tsk);
+extern void si_meminfo(struct sysinfo * val);
+
+/* vmalloc.c */
+
+extern void * vmalloc(unsigned long size);
+extern void * vremap(unsigned long offset, unsigned long size);
+extern void vfree(void * addr);
+extern int vread(char *buf, char *addr, int count);
+
+/* mmap.c */
+extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long off);
+extern void merge_segments(struct task_struct *, unsigned long, unsigned long);
+extern void insert_vm_struct(struct task_struct *, struct vm_area_struct *);
+extern void remove_shared_vm_struct(struct vm_area_struct *);
+extern void build_mmap_avl(struct mm_struct *);
+extern void exit_mmap(struct mm_struct *);
+extern int do_munmap(unsigned long, size_t);
+extern unsigned long get_unmapped_area(unsigned long, unsigned long);
+
+/* filemap.c */
+extern unsigned long page_unuse(unsigned long);
+extern int shrink_mmap(int, unsigned long);
+extern void truncate_inode_pages(struct inode *, unsigned long);
+
+#define GFP_BUFFER 0x00
+#define GFP_ATOMIC 0x01
+#define GFP_USER 0x02
+#define GFP_KERNEL 0x03
+#define GFP_NOBUFFER 0x04
+#define GFP_NFS 0x05
+
+/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
+ platforms, used as appropriate on others */
+
+#define GFP_DMA 0x80
+
+#define GFP_LEVEL_MASK 0xf
+
+#define avl_empty (struct vm_area_struct *) NULL
+
+#ifndef MACH
+static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
+{
+ unsigned long grow;
+
+ address &= PAGE_MASK;
+ if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
+ return -ENOMEM;
+ grow = vma->vm_start - address;
+ vma->vm_start = address;
+ vma->vm_offset -= grow;
+ vma->vm_mm->total_vm += grow >> PAGE_SHIFT;
+ if (vma->vm_flags & VM_LOCKED)
+ vma->vm_mm->locked_vm += grow >> PAGE_SHIFT;
+ return 0;
+}
+
+/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+static inline struct vm_area_struct * find_vma (struct task_struct * task, unsigned long addr)
+{
+ struct vm_area_struct * result = NULL;
+ struct vm_area_struct * tree;
+
+ if (!task->mm)
+ return NULL;
+ for (tree = task->mm->mmap_avl ; ; ) {
+ if (tree == avl_empty)
+ return result;
+ if (tree->vm_end > addr) {
+ if (tree->vm_start <= addr)
+ return tree;
+ result = tree;
+ tree = tree->vm_avl_left;
+ } else
+ tree = tree->vm_avl_right;
+ }
+}
+
+/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+ NULL if none. Assume start_addr < end_addr. */
+static inline struct vm_area_struct * find_vma_intersection (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
+{
+ struct vm_area_struct * vma;
+
+ vma = find_vma(task,start_addr);
+ if (!vma || end_addr <= vma->vm_start)
+ return NULL;
+ return vma;
+}
+#endif /* ! MACH */
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/module.h b/i386/i386at/gpl/linux/include/linux/module.h
new file mode 100644
index 00000000..a91ad19b
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/module.h
@@ -0,0 +1,115 @@
+/*
+ * Dynamic loading of modules into the kernel.
+ *
+ * Modified by Bjorn Ekwall <bj0rn@blox.se>
+ */
+
+#ifndef _LINUX_MODULE_H
+#define _LINUX_MODULE_H
+
+#ifdef __GENKSYMS__
+# define _set_ver(sym,vers) sym
+# undef MODVERSIONS
+# define MODVERSIONS
+#else /* ! __GENKSYMS__ */
+# if defined(MODVERSIONS) && !defined(MODULE) && defined(EXPORT_SYMTAB)
+# define _set_ver(sym,vers) sym
+# include <linux/modversions.h>
+# endif
+#endif /* __GENKSYMS__ */
+
+/* values of module.state */
+#define MOD_UNINITIALIZED 0
+#define MOD_RUNNING 1
+#define MOD_DELETED 2
+
+/* maximum length of module name */
+#define MOD_MAX_NAME 64
+
+/* magic marker for modules inserted from kerneld, to be auto-reaped */
+#define MOD_AUTOCLEAN 0x40000000 /* big enough, but no sign problems... */
+
+/* maximum length of symbol name */
+#define SYM_MAX_NAME 60
+
+struct kernel_sym { /* sent to "insmod" */
+ unsigned long value; /* value of symbol */
+ char name[SYM_MAX_NAME]; /* name of symbol */
+};
+
+struct module_ref {
+ struct module *module;
+ struct module_ref *next;
+};
+
+struct internal_symbol {
+ void *addr;
+ const char *name;
+ };
+
+struct symbol_table { /* received from "insmod" */
+ int size; /* total, including string table!!! */
+ int n_symbols;
+ int n_refs;
+ struct internal_symbol symbol[0]; /* actual size defined by n_symbols */
+ struct module_ref ref[0]; /* actual size defined by n_refs */
+};
+/*
+ * Note: The string table follows immediately after the symbol table in memory!
+ */
+
+struct module {
+ struct module *next;
+ struct module_ref *ref; /* the list of modules that refer to me */
+ struct symbol_table *symtab;
+ const char *name;
+ int size; /* size of module in pages */
+ void* addr; /* address of module */
+ int state;
+ void (*cleanup)(void); /* cleanup routine */
+};
+
+struct mod_routines {
+ int (*init)(void); /* initialization routine */
+ void (*cleanup)(void); /* cleanup routine */
+};
+
+/* rename_module_symbol(old_name, new_name) WOW! */
+extern int rename_module_symbol(char *, char *);
+
+/* insert new symbol table */
+extern int register_symtab(struct symbol_table *);
+
+/*
+ * The first word of the module contains the use count.
+ */
+#define GET_USE_COUNT(module) (* (long *) (module)->addr)
+/*
+ * define the count variable, and usage macros.
+ */
+
+#ifdef MODULE
+
+extern long mod_use_count_;
+#define MOD_INC_USE_COUNT mod_use_count_++
+#define MOD_DEC_USE_COUNT mod_use_count_--
+#define MOD_IN_USE ((mod_use_count_ & ~MOD_AUTOCLEAN) != 0)
+
+#ifndef __NO_VERSION__
+#include <linux/version.h>
+char kernel_version[]=UTS_RELEASE;
+#endif
+
+#if defined(MODVERSIONS) && !defined(__GENKSYMS__)
+int Using_Versions; /* gcc will handle this global (used as a flag) correctly */
+#endif
+
+#else
+
+#define MOD_INC_USE_COUNT do { } while (0)
+#define MOD_DEC_USE_COUNT do { } while (0)
+#define MOD_IN_USE 1
+
+#endif
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/mount.h b/i386/i386at/gpl/linux/include/linux/mount.h
new file mode 100644
index 00000000..357c7ae6
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/mount.h
@@ -0,0 +1,30 @@
+/*
+ *
+ * Definitions for mount interface. This describes the in the kernel build
+ * linkedlist with mounted filesystems.
+ *
+ * Author: Marco van Wieringen <mvw@mcs.ow.nl> <mvw@tnix.net> <mvw@cistron.nl>
+ *
+ * Version: $Id: mount.h,v 1.1.1.1 1997/02/25 21:27:29 thomas Exp $
+ *
+ */
+#ifndef _LINUX_MOUNT_H
+#define _LINUX_MOUNT_H
+
+struct vfsmount
+{
+ kdev_t mnt_dev; /* Device this applies to */
+ char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
+ char *mnt_dirname; /* Name of directory mounted on */
+ unsigned int mnt_flags; /* Flags of this device */
+ struct semaphore mnt_sem; /* lock device while I/O in progress */
+ struct super_block *mnt_sb; /* pointer to superblock */
+ struct file *mnt_quotas[MAXQUOTAS]; /* fp's to quotafiles */
+ time_t mnt_iexp[MAXQUOTAS]; /* expiretime for inodes */
+ time_t mnt_bexp[MAXQUOTAS]; /* expiretime for blocks */
+ struct vfsmount *mnt_next; /* pointer to next in linkedlist */
+};
+
+struct vfsmount *lookup_vfsmnt(kdev_t dev);
+
+#endif /* _LINUX_MOUNT_H */
diff --git a/i386/i386at/gpl/linux/include/linux/net.h b/i386/i386at/gpl/linux/include/linux/net.h
new file mode 100644
index 00000000..1fbe98a0
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/net.h
@@ -0,0 +1,132 @@
+/*
+ * NET An implementation of the SOCKET network access protocol.
+ * This is the master header file for the Linux NET layer,
+ * or, in plain English: the networking handling part of the
+ * kernel.
+ *
+ * Version: @(#)net.h 1.0.3 05/25/93
+ *
+ * Authors: Orest Zborowski, <obz@Kodak.COM>
+ * Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_NET_H
+#define _LINUX_NET_H
+
+
+#include <linux/wait.h>
+#include <linux/socket.h>
+
+
+#define NSOCKETS 2000 /* Dynamic, this is MAX LIMIT */
+#define NSOCKETS_UNIX 128 /* unix domain static limit */
+#define NPROTO 16 /* should be enough for now.. */
+
+
+#define SYS_SOCKET 1 /* sys_socket(2) */
+#define SYS_BIND 2 /* sys_bind(2) */
+#define SYS_CONNECT 3 /* sys_connect(2) */
+#define SYS_LISTEN 4 /* sys_listen(2) */
+#define SYS_ACCEPT 5 /* sys_accept(2) */
+#define SYS_GETSOCKNAME 6 /* sys_getsockname(2) */
+#define SYS_GETPEERNAME 7 /* sys_getpeername(2) */
+#define SYS_SOCKETPAIR 8 /* sys_socketpair(2) */
+#define SYS_SEND 9 /* sys_send(2) */
+#define SYS_RECV 10 /* sys_recv(2) */
+#define SYS_SENDTO 11 /* sys_sendto(2) */
+#define SYS_RECVFROM 12 /* sys_recvfrom(2) */
+#define SYS_SHUTDOWN 13 /* sys_shutdown(2) */
+#define SYS_SETSOCKOPT 14 /* sys_setsockopt(2) */
+#define SYS_GETSOCKOPT 15 /* sys_getsockopt(2) */
+#define SYS_SENDMSG 16 /* sys_sendmsg(2) */
+#define SYS_RECVMSG 17 /* sys_recvmsg(2) */
+
+
+typedef enum {
+ SS_FREE = 0, /* not allocated */
+ SS_UNCONNECTED, /* unconnected to any socket */
+ SS_CONNECTING, /* in process of connecting */
+ SS_CONNECTED, /* connected to socket */
+ SS_DISCONNECTING /* in process of disconnecting */
+} socket_state;
+
+#define SO_ACCEPTCON (1<<16) /* performed a listen */
+#define SO_WAITDATA (1<<17) /* wait data to read */
+#define SO_NOSPACE (1<<18) /* no space to write */
+
+#ifdef __KERNEL__
+/*
+ * Internal representation of a socket. not all the fields are used by
+ * all configurations:
+ *
+ * server client
+ * conn client connected to server connected to
+ * iconn list of clients -unused-
+ * awaiting connections
+ * wait sleep for clients, sleep for connection,
+ * sleep for i/o sleep for i/o
+ */
+struct socket {
+ short type; /* SOCK_STREAM, ... */
+ socket_state state;
+ long flags;
+ struct proto_ops *ops; /* protocols do most everything */
+ void *data; /* protocol data */
+ struct socket *conn; /* server socket connected to */
+ struct socket *iconn; /* incomplete client conn.s */
+ struct socket *next;
+ struct wait_queue **wait; /* ptr to place to wait on */
+ struct inode *inode;
+ struct fasync_struct *fasync_list; /* Asynchronous wake up list */
+};
+
+#define SOCK_INODE(S) ((S)->inode)
+
+struct proto_ops {
+ int family;
+
+ int (*create) (struct socket *sock, int protocol);
+ int (*dup) (struct socket *newsock, struct socket *oldsock);
+ int (*release) (struct socket *sock, struct socket *peer);
+ int (*bind) (struct socket *sock, struct sockaddr *umyaddr,
+ int sockaddr_len);
+ int (*connect) (struct socket *sock, struct sockaddr *uservaddr,
+ int sockaddr_len, int flags);
+ int (*socketpair) (struct socket *sock1, struct socket *sock2);
+ int (*accept) (struct socket *sock, struct socket *newsock,
+ int flags);
+ int (*getname) (struct socket *sock, struct sockaddr *uaddr,
+ int *usockaddr_len, int peer);
+ int (*select) (struct socket *sock, int sel_type,
+ select_table *wait);
+ int (*ioctl) (struct socket *sock, unsigned int cmd,
+ unsigned long arg);
+ int (*listen) (struct socket *sock, int len);
+ int (*shutdown) (struct socket *sock, int flags);
+ int (*setsockopt) (struct socket *sock, int level, int optname,
+ char *optval, int optlen);
+ int (*getsockopt) (struct socket *sock, int level, int optname,
+ char *optval, int *optlen);
+ int (*fcntl) (struct socket *sock, unsigned int cmd,
+ unsigned long arg);
+ int (*sendmsg) (struct socket *sock, struct msghdr *m, int total_len, int nonblock, int flags);
+ int (*recvmsg) (struct socket *sock, struct msghdr *m, int total_len, int nonblock, int flags, int *addr_len);
+};
+
+struct net_proto {
+ const char *name; /* Protocol name */
+ void (*init_func)(struct net_proto *); /* Bootstrap */
+};
+
+extern int sock_wake_async(struct socket *sock, int how);
+extern int sock_register(int family, struct proto_ops *ops);
+extern int sock_unregister(int family);
+extern struct socket *sock_alloc(void);
+extern void sock_release(struct socket *sock);
+#endif /* __KERNEL__ */
+#endif /* _LINUX_NET_H */
diff --git a/i386/i386at/gpl/linux/include/linux/netdevice.h b/i386/i386at/gpl/linux/include/linux/netdevice.h
new file mode 100644
index 00000000..9e1143be
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/netdevice.h
@@ -0,0 +1,332 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Interfaces handler.
+ *
+ * Version: @(#)dev.h 1.0.10 08/12/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Corey Minyard <wf-rch!minyard@relay.EU.net>
+ * Donald J. Becker, <becker@super.org>
+ * Alan Cox, <A.Cox@swansea.ac.uk>
+ * Bjorn Ekwall. <bj0rn@blox.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Moved to /usr/include/linux for NET3
+ */
+#ifndef _LINUX_NETDEVICE_H
+#define _LINUX_NETDEVICE_H
+
+#include <linux/config.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+
+/* for future expansion when we will have different priorities. */
+#define DEV_NUMBUFFS 3
+#define MAX_ADDR_LEN 7
+#ifndef CONFIG_AX25
+#ifndef CONFIG_TR
+#ifndef CONFIG_NET_IPIP
+#define MAX_HEADER 32 /* We really need about 18 worst case .. so 32 is aligned */
+#else
+#define MAX_HEADER 48 /* We need to allow for having tunnel headers */
+#endif /* IPIP */
+#else
+#define MAX_HEADER 48 /* Token Ring header needs 40 bytes ... 48 is aligned */
+#endif /* TR */
+#else
+#define MAX_HEADER 96 /* AX.25 + NetROM */
+#endif /* AX25 */
+
+#define IS_MYADDR 1 /* address is (one of) our own */
+#define IS_LOOPBACK 2 /* address is for LOOPBACK */
+#define IS_BROADCAST 3 /* address is a valid broadcast */
+#define IS_INVBCAST 4 /* Wrong netmask bcast not for us (unused)*/
+#define IS_MULTICAST 5 /* Multicast IP address */
+
+/*
+ * We tag multicasts with these structures.
+ */
+
+struct dev_mc_list
+{
+ struct dev_mc_list *next;
+ char dmi_addr[MAX_ADDR_LEN];
+ unsigned short dmi_addrlen;
+ unsigned short dmi_users;
+};
+
+struct hh_cache
+{
+ struct hh_cache *hh_next;
+ void *hh_arp; /* Opaque pointer, used by
+ * any address resolution module,
+ * not only ARP.
+ */
+ unsigned int hh_refcnt; /* number of users */
+ unsigned short hh_type; /* protocol identifier, f.e ETH_P_IP */
+ char hh_uptodate; /* hh_data is valid */
+ char hh_data[16]; /* cached hardware header */
+};
+
+/*
+ * The DEVICE structure.
+ * Actually, this whole structure is a big mistake. It mixes I/O
+ * data with strictly "high-level" data, and it has to know about
+ * almost every data structure used in the INET module.
+ */
+#ifdef MACH
+#ifndef MACH_INCLUDE
+#define device linux_device
+#endif
+struct linux_device
+#else
+struct device
+#endif
+{
+
+ /*
+ * This is the first field of the "visible" part of this structure
+ * (i.e. as seen by users in the "Space.c" file). It is the name
+ * the interface.
+ */
+ char *name;
+
+ /* I/O specific fields - FIXME: Merge these and struct ifmap into one */
+ unsigned long rmem_end; /* shmem "recv" end */
+ unsigned long rmem_start; /* shmem "recv" start */
+ unsigned long mem_end; /* shared mem end */
+ unsigned long mem_start; /* shared mem start */
+ unsigned long base_addr; /* device I/O address */
+ unsigned char irq; /* device IRQ number */
+
+ /* Low-level status flags. */
+ volatile unsigned char start, /* start an operation */
+ interrupt; /* interrupt arrived */
+ unsigned long tbusy; /* transmitter busy must be long for bitops */
+
+ struct linux_device *next;
+
+ /* The device initialization function. Called only once. */
+ int (*init)(struct linux_device *dev);
+
+ /* Some hardware also needs these fields, but they are not part of the
+ usual set specified in Space.c. */
+ unsigned char if_port; /* Selectable AUI, TP,..*/
+ unsigned char dma; /* DMA channel */
+
+ struct enet_statistics* (*get_stats)(struct linux_device *dev);
+
+ /*
+ * This marks the end of the "visible" part of the structure. All
+ * fields hereafter are internal to the system, and may change at
+ * will (read: may be cleaned up at will).
+ */
+
+ /* These may be needed for future network-power-down code. */
+ unsigned long trans_start; /* Time (in jiffies) of last Tx */
+ unsigned long last_rx; /* Time of last Rx */
+
+ unsigned short flags; /* interface flags (a la BSD) */
+ unsigned short family; /* address family ID (AF_INET) */
+ unsigned short metric; /* routing metric (not used) */
+ unsigned short mtu; /* interface MTU value */
+ unsigned short type; /* interface hardware type */
+ unsigned short hard_header_len; /* hardware hdr length */
+ void *priv; /* pointer to private data */
+
+ /* Interface address info. */
+ unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
+ unsigned char pad; /* make dev_addr aligned to 8 bytes */
+ unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address */
+ unsigned char addr_len; /* hardware address length */
+ unsigned long pa_addr; /* protocol address */
+ unsigned long pa_brdaddr; /* protocol broadcast addr */
+ unsigned long pa_dstaddr; /* protocol P-P other side addr */
+ unsigned long pa_mask; /* protocol netmask */
+ unsigned short pa_alen; /* protocol address length */
+
+ struct dev_mc_list *mc_list; /* Multicast mac addresses */
+ int mc_count; /* Number of installed mcasts */
+
+ struct ip_mc_list *ip_mc_list; /* IP multicast filter chain */
+ __u32 tx_queue_len; /* Max frames per queue allowed */
+
+ /* For load balancing driver pair support */
+
+ unsigned long pkt_queue; /* Packets queued */
+ struct linux_device *slave; /* Slave device */
+ struct net_alias_info *alias_info; /* main dev alias info */
+ struct net_alias *my_alias; /* alias devs */
+
+ /* Pointer to the interface buffers. */
+ struct sk_buff_head buffs[DEV_NUMBUFFS];
+
+ /* Pointers to interface service routines. */
+ int (*open)(struct linux_device *dev);
+ int (*stop)(struct linux_device *dev);
+ int (*hard_start_xmit) (struct sk_buff *skb,
+ struct linux_device *dev);
+ int (*hard_header) (struct sk_buff *skb,
+ struct linux_device *dev,
+ unsigned short type,
+ void *daddr,
+ void *saddr,
+ unsigned len);
+ int (*rebuild_header)(void *eth,
+ struct linux_device *dev,
+ unsigned long raddr, struct sk_buff *skb);
+#define HAVE_MULTICAST
+ void (*set_multicast_list)(struct linux_device *dev);
+#define HAVE_SET_MAC_ADDR
+ int (*set_mac_address)(struct linux_device *dev,
+ void *addr);
+#define HAVE_PRIVATE_IOCTL
+ int (*do_ioctl)(struct linux_device *dev,
+ struct ifreq *ifr, int cmd);
+#define HAVE_SET_CONFIG
+ int (*set_config)(struct linux_device *dev,
+ struct ifmap *map);
+#define HAVE_HEADER_CACHE
+ void (*header_cache_bind)(struct hh_cache **hhp,
+ struct linux_device *dev,
+ unsigned short htype,
+ __u32 daddr);
+ void (*header_cache_update)(struct hh_cache *hh,
+ struct linux_device *dev,
+ unsigned char * haddr);
+#ifdef MACH
+#ifdef MACH_INCLUDE
+ struct net_data *net_data;
+#else
+ void *net_data;
+#endif
+#endif
+};
+
+
+struct packet_type {
+ unsigned short type; /* This is really htons(ether_type). */
+ struct linux_device * dev;
+ int (*func) (struct sk_buff *, struct linux_device *,
+ struct packet_type *);
+ void *data;
+ struct packet_type *next;
+};
+
+
+#ifdef __KERNEL__
+
+#include <linux/notifier.h>
+
+/* Used by dev_rint */
+#define IN_SKBUFF 1
+
+extern volatile unsigned long in_bh;
+
+extern struct linux_device loopback_dev;
+extern struct linux_device *dev_base;
+extern struct packet_type *ptype_base[16];
+
+
+extern int ip_addr_match(unsigned long addr1, unsigned long addr2);
+extern int ip_chk_addr(unsigned long addr);
+extern struct linux_device *ip_dev_check(unsigned long daddr);
+extern unsigned long ip_my_addr(void);
+extern unsigned long ip_get_mask(unsigned long addr);
+extern struct linux_device *ip_dev_find(unsigned long addr);
+extern struct linux_device *dev_getbytype(unsigned short type);
+
+extern void dev_add_pack(struct packet_type *pt);
+extern void dev_remove_pack(struct packet_type *pt);
+extern struct linux_device *dev_get(const char *name);
+extern int dev_open(struct linux_device *dev);
+extern int dev_close(struct linux_device *dev);
+extern void dev_queue_xmit(struct sk_buff *skb,
+ struct linux_device *dev,
+ int pri);
+#define HAVE_NETIF_RX 1
+extern void netif_rx(struct sk_buff *skb);
+extern void dev_transmit(void);
+extern int in_net_bh(void);
+extern void net_bh(void *tmp);
+#ifdef MACH
+#define dev_tint(dev)
+#else
+extern void dev_tint(struct linux_device *dev);
+#endif
+extern int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy);
+extern int dev_ioctl(unsigned int cmd, void *);
+
+extern void dev_init(void);
+
+/* Locking protection for page faults during outputs to devices unloaded during the fault */
+
+extern int dev_lockct;
+
+/*
+ * These two dont currently need to be interrupt safe
+ * but they may do soon. Do it properly anyway.
+ */
+
+extern __inline__ void dev_lock_list(void)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ dev_lockct++;
+ restore_flags(flags);
+}
+
+extern __inline__ void dev_unlock_list(void)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ dev_lockct--;
+ restore_flags(flags);
+}
+
+/*
+ * This almost never occurs, isnt in performance critical paths
+ * and we can thus be relaxed about it
+ */
+
+extern __inline__ void dev_lock_wait(void)
+{
+ while(dev_lockct)
+ schedule();
+}
+
+
+/* These functions live elsewhere (drivers/net/net_init.c, but related) */
+
+extern void ether_setup(struct linux_device *dev);
+extern void tr_setup(struct linux_device *dev);
+extern int ether_config(struct linux_device *dev,
+ struct ifmap *map);
+/* Support for loadable net-drivers */
+extern int register_netdev(struct linux_device *dev);
+extern void unregister_netdev(struct linux_device *dev);
+extern int register_netdevice_notifier(struct notifier_block *nb);
+extern int unregister_netdevice_notifier(struct notifier_block *nb);
+/* Functions used for multicast support */
+extern void dev_mc_upload(struct linux_device *dev);
+extern void dev_mc_delete(struct linux_device *dev,
+ void *addr, int alen, int all);
+extern void dev_mc_add(struct linux_device *dev,
+ void *addr, int alen, int newonly);
+extern void dev_mc_discard(struct linux_device *dev);
+/* This is the wrong place but it'll do for the moment */
+extern void ip_mc_allhost(struct linux_device *dev);
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_DEV_H */
diff --git a/i386/i386at/gpl/linux/include/linux/nfs.h b/i386/i386at/gpl/linux/include/linux/nfs.h
new file mode 100644
index 00000000..ceb0cd1b
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/nfs.h
@@ -0,0 +1,172 @@
+#ifndef _LINUX_NFS_H
+#define _LINUX_NFS_H
+
+#ifndef MACH_INCLUDE
+#define NFS_PORT 2049
+#define NFS_MAXDATA 8192
+#define NFS_MAXPATHLEN 1024
+#define NFS_MAXNAMLEN 255
+#define NFS_MAXGROUPS 16
+#define NFS_FHSIZE 32
+#define NFS_COOKIESIZE 4
+#define NFS_FIFO_DEV (-1)
+#define NFSMODE_FMT 0170000
+#define NFSMODE_DIR 0040000
+#define NFSMODE_CHR 0020000
+#define NFSMODE_BLK 0060000
+#define NFSMODE_REG 0100000
+#define NFSMODE_LNK 0120000
+#define NFSMODE_SOCK 0140000
+#define NFSMODE_FIFO 0010000
+
+#ifdef __KERNEL__ /* user programs should get these from the rpc header files */
+
+#define RPC_VERSION 2
+
+enum rpc_auth_flavor {
+ RPC_AUTH_NULL = 0,
+ RPC_AUTH_UNIX = 1,
+ RPC_AUTH_SHORT = 2
+};
+
+enum rpc_msg_type {
+ RPC_CALL = 0,
+ RPC_REPLY = 1
+};
+
+enum rpc_reply_stat {
+ RPC_MSG_ACCEPTED = 0,
+ RPC_MSG_DENIED = 1
+};
+
+enum rpc_accept_stat {
+ RPC_SUCCESS = 0,
+ RPC_PROG_UNAVAIL = 1,
+ RPC_PROG_MISMATCH = 2,
+ RPC_PROC_UNAVAIL = 3,
+ RPC_GARBAGE_ARGS = 4
+};
+
+enum rpc_reject_stat {
+ RPC_MISMATCH = 0,
+ RPC_AUTH_ERROR = 1
+};
+
+enum rpc_auth_stat {
+ RPC_AUTH_BADCRED = 1,
+ RPC_AUTH_REJECTEDCRED = 2,
+ RPC_AUTH_BADVERF = 3,
+ RPC_AUTH_REJECTEDVERF = 4,
+ RPC_AUTH_TOOWEAK = 5
+};
+
+#endif /* __KERNEL__ */
+
+enum nfs_stat {
+ NFS_OK = 0,
+ NFSERR_PERM = 1,
+ NFSERR_NOENT = 2,
+ NFSERR_IO = 5,
+ NFSERR_NXIO = 6,
+ NFSERR_EAGAIN = 11,
+ NFSERR_ACCES = 13,
+ NFSERR_EXIST = 17,
+ NFSERR_NODEV = 19,
+ NFSERR_NOTDIR = 20,
+ NFSERR_ISDIR = 21,
+ NFSERR_INVAL = 22, /* that Sun forgot */
+ NFSERR_FBIG = 27,
+ NFSERR_NOSPC = 28,
+ NFSERR_ROFS = 30,
+ NFSERR_NAMETOOLONG = 63,
+ NFSERR_NOTEMPTY = 66,
+ NFSERR_DQUOT = 69,
+ NFSERR_STALE = 70,
+ NFSERR_WFLUSH = 99
+};
+
+enum nfs_ftype {
+ NFNON = 0,
+ NFREG = 1,
+ NFDIR = 2,
+ NFBLK = 3,
+ NFCHR = 4,
+ NFLNK = 5,
+ NFSOCK = 6,
+ NFBAD = 7,
+ NFFIFO = 8
+};
+
+#define NFS_PROGRAM 100003
+#define NFS_VERSION 2
+#define NFSPROC_NULL 0
+#define NFSPROC_GETATTR 1
+#define NFSPROC_SETATTR 2
+#define NFSPROC_ROOT 3
+#define NFSPROC_LOOKUP 4
+#define NFSPROC_READLINK 5
+#define NFSPROC_READ 6
+#define NFSPROC_WRITECACHE 7
+#define NFSPROC_WRITE 8
+#define NFSPROC_CREATE 9
+#define NFSPROC_REMOVE 10
+#define NFSPROC_RENAME 11
+#define NFSPROC_LINK 12
+#define NFSPROC_SYMLINK 13
+#define NFSPROC_MKDIR 14
+#define NFSPROC_RMDIR 15
+#define NFSPROC_READDIR 16
+#define NFSPROC_STATFS 17
+
+struct nfs_fh {
+ char data[NFS_FHSIZE];
+};
+
+struct nfs_time {
+ u_int seconds;
+ u_int useconds;
+};
+
+struct nfs_fattr {
+ enum nfs_ftype type;
+ u_int mode;
+ u_int nlink;
+ u_int uid;
+ u_int gid;
+ u_int size;
+ u_int blocksize;
+ u_int rdev;
+ u_int blocks;
+ u_int fsid;
+ u_int fileid;
+ struct nfs_time atime;
+ struct nfs_time mtime;
+ struct nfs_time ctime;
+};
+
+struct nfs_sattr {
+ u_int mode;
+ u_int uid;
+ u_int gid;
+ u_int size;
+ struct nfs_time atime;
+ struct nfs_time mtime;
+};
+
+struct nfs_entry {
+ u_int fileid;
+ char *name;
+ int cookie;
+ int eof;
+};
+
+struct nfs_fsinfo {
+ u_int tsize;
+ u_int bsize;
+ u_int blocks;
+ u_int bfree;
+ u_int bavail;
+};
+
+#endif /* ! MACH_INCLUDE */
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/notifier.h b/i386/i386at/gpl/linux/include/linux/notifier.h
new file mode 100644
index 00000000..3de4d976
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/notifier.h
@@ -0,0 +1,100 @@
+/*
+ * Routines to manage notifier chains for passing status changes to any
+ * interested routines. We need this instead of hard coded call lists so
+ * that modules can poke their nose into the innards. The network devices
+ * needed them so here they are for the rest of you.
+ *
+ * Alan Cox <Alan.Cox@linux.org>
+ */
+
+#ifndef _LINUX_NOTIFIER_H
+#define _LINUX_NOTIFIER_H
+#include <linux/errno.h>
+
+struct notifier_block
+{
+ int (*notifier_call)(struct notifier_block *this, unsigned long, void *);
+ struct notifier_block *next;
+ int priority;
+};
+
+
+#ifdef __KERNEL__
+
+#define NOTIFY_DONE 0x0000 /* Don't care */
+#define NOTIFY_OK 0x0001 /* Suits me */
+#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
+#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) /* Bad/Veto action */
+
+extern __inline__ int notifier_chain_register(struct notifier_block **list, struct notifier_block *n)
+{
+ while(*list)
+ {
+ if(n->priority > (*list)->priority)
+ break;
+ list= &((*list)->next);
+ }
+ n->next = *list;
+ *list=n;
+ return 0;
+}
+
+/*
+ * Warning to any non GPL module writers out there.. these functions are
+ * GPL'd
+ */
+
+extern __inline__ int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n)
+{
+ while((*nl)!=NULL)
+ {
+ if((*nl)==n)
+ {
+ *nl=n->next;
+ return 0;
+ }
+ nl=&((*nl)->next);
+ }
+#ifdef MACH_INCLUDE
+ return -LINUX_ENOENT;
+#else
+ return -ENOENT;
+#endif
+}
+
+/*
+ * This is one of these things that is generally shorter inline
+ */
+
+extern __inline__ int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
+{
+ int ret=NOTIFY_DONE;
+ struct notifier_block *nb = *n;
+ while(nb)
+ {
+ ret=nb->notifier_call(nb,val,v);
+ if(ret&NOTIFY_STOP_MASK)
+ return ret;
+ nb=nb->next;
+ }
+ return ret;
+}
+
+
+/*
+ * Declared notifiers so far. I can imagine quite a few more chains
+ * over time (eg laptop power reset chains, reboot chain (to clean
+ * device units up), device [un]mount chain, module load/unload chain,
+ * low memory chain, screenblank chain (for plug in modular screenblankers)
+ * VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
+ */
+
+/* netdevice notifier chain */
+#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
+#define NETDEV_DOWN 0x0002
+#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
+ detected a hardware crash and restarted
+ - we can use this eg to kick tcp sessions
+ once done */
+#endif
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/pagemap.h b/i386/i386at/gpl/linux/include/linux/pagemap.h
new file mode 100644
index 00000000..6de993b3
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/pagemap.h
@@ -0,0 +1,131 @@
+#ifndef _LINUX_PAGEMAP_H
+#define _LINUX_PAGEMAP_H
+
+#include <asm/system.h>
+
+/*
+ * Page-mapping primitive inline functions
+ *
+ * Copyright 1995 Linus Torvalds
+ */
+
+#ifndef MACH
+static inline unsigned long page_address(struct page * page)
+{
+ return PAGE_OFFSET + PAGE_SIZE*(page - mem_map);
+}
+
+#define PAGE_HASH_BITS 10
+#define PAGE_HASH_SIZE (1 << PAGE_HASH_BITS)
+
+#define PAGE_AGE_VALUE 16
+
+extern unsigned long page_cache_size;
+extern struct page * page_hash_table[PAGE_HASH_SIZE];
+
+/*
+ * We use a power-of-two hash table to avoid a modulus,
+ * and get a reasonable hash by knowing roughly how the
+ * inode pointer and offsets are distributed (ie, we
+ * roughly know which bits are "significant")
+ */
+static inline unsigned long _page_hashfn(struct inode * inode, unsigned long offset)
+{
+#define i (((unsigned long) inode)/sizeof(unsigned long))
+#define o (offset >> PAGE_SHIFT)
+#define s(x) ((x)+((x)>>PAGE_HASH_BITS))
+ return s(i+o) & (PAGE_HASH_SIZE-1);
+#undef i
+#undef o
+#undef s
+}
+
+#define page_hash(inode,offset) page_hash_table[_page_hashfn(inode,offset)]
+
+static inline struct page * find_page(struct inode * inode, unsigned long offset)
+{
+ struct page *page;
+ unsigned long flags;
+
+ for (page = page_hash(inode, offset); page ; page = page->next_hash) {
+ if (page->inode != inode)
+ continue;
+ if (page->offset != offset)
+ continue;
+ save_flags(flags);
+ cli();
+ page->referenced = 1;
+ page->count++;
+ restore_flags(flags);
+ break;
+ }
+ return page;
+}
+
+static inline void remove_page_from_hash_queue(struct page * page)
+{
+ struct page **p = &page_hash(page->inode,page->offset);
+
+ page_cache_size--;
+ if (page->next_hash)
+ page->next_hash->prev_hash = page->prev_hash;
+ if (page->prev_hash)
+ page->prev_hash->next_hash = page->next_hash;
+ if (*p == page)
+ *p = page->next_hash;
+ page->next_hash = page->prev_hash = NULL;
+}
+
+static inline void add_page_to_hash_queue(struct inode * inode, struct page * page)
+{
+ struct page **p = &page_hash(inode,page->offset);
+
+ page_cache_size++;
+ page->referenced = 1;
+ page->age = PAGE_AGE_VALUE;
+ page->prev_hash = NULL;
+ if ((page->next_hash = *p) != NULL)
+ page->next_hash->prev_hash = page;
+ *p = page;
+}
+
+static inline void remove_page_from_inode_queue(struct page * page)
+{
+ struct inode * inode = page->inode;
+
+ page->inode = NULL;
+ inode->i_nrpages--;
+ if (inode->i_pages == page)
+ inode->i_pages = page->next;
+ if (page->next)
+ page->next->prev = page->prev;
+ if (page->prev)
+ page->prev->next = page->next;
+ page->next = NULL;
+ page->prev = NULL;
+}
+
+static inline void add_page_to_inode_queue(struct inode * inode, struct page * page)
+{
+ struct page **p = &inode->i_pages;
+
+ inode->i_nrpages++;
+ page->inode = inode;
+ page->prev = NULL;
+ if ((page->next = *p) != NULL)
+ page->next->prev = page;
+ *p = page;
+}
+
+extern void __wait_on_page(struct page *);
+static inline void wait_on_page(struct page * page)
+{
+ if (page->locked)
+ __wait_on_page(page);
+}
+
+extern void update_vm_cache(struct inode *, unsigned long, const char *, int);
+
+#endif /* ! MACH */
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/param.h b/i386/i386at/gpl/linux/include/linux/param.h
new file mode 100644
index 00000000..092e92f6
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/param.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_PARAM_H
+#define _LINUX_PARAM_H
+
+#include <asm/param.h>
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/pci.h b/i386/i386at/gpl/linux/include/linux/pci.h
new file mode 100644
index 00000000..9e059501
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/pci.h
@@ -0,0 +1,618 @@
+/*
+ * PCI defines and function prototypes
+ * Copyright 1994, Drew Eckhardt
+ *
+ * For more information, please consult
+ *
+ * PCI BIOS Specification Revision
+ * PCI Local Bus Specification
+ * PCI System Design Guide
+ *
+ * PCI Special Interest Group
+ * M/S HF3-15A
+ * 5200 N.E. Elam Young Parkway
+ * Hillsboro, Oregon 97124-6497
+ * +1 (503) 696-2000
+ * +1 (800) 433-5177
+ *
+ * Manuals are $25 each or $50 for all three, plus $7 shipping
+ * within the United States, $35 abroad.
+ */
+
+
+
+/* PROCEDURE TO REPORT NEW PCI DEVICES
+ * We are trying to collect informations on new PCI devices, using
+ * the standart PCI identification procedure. If some warning is
+ * displayed at boot time, please report
+ * - /proc/pci
+ * - your exact hardware description. Try to find out
+ * which device is unknown. It may be you mainboard chipset.
+ * PCI-CPU bridge or PCI-ISA bridge.
+ * - If you can't find the actual information in your hardware
+ * booklet, try to read the references of the chip on the board.
+ * - Send all that, with the word PCIPROBE in the subject,
+ * to frederic@cao-vlsi.ibp.fr, and I'll add your device to
+ * the list as soon as possible
+ * fred.
+ */
+
+
+
+#ifndef PCI_H
+#define PCI_H
+
+/*
+ * Under PCI, each device has 256 bytes of configuration address space,
+ * of which the first 64 bytes are standardized as follows:
+ */
+#define PCI_VENDOR_ID 0x00 /* 16 bits */
+#define PCI_DEVICE_ID 0x02 /* 16 bits */
+#define PCI_COMMAND 0x04 /* 16 bits */
+#define PCI_COMMAND_IO 0x1 /* Enable response in I/O space */
+#define PCI_COMMAND_MEMORY 0x2 /* Enable response in Memory space */
+#define PCI_COMMAND_MASTER 0x4 /* Enable bus mastering */
+#define PCI_COMMAND_SPECIAL 0x8 /* Enable response to special cycles */
+#define PCI_COMMAND_INVALIDATE 0x10 /* Use memory write and invalidate */
+#define PCI_COMMAND_VGA_PALETTE 0x20 /* Enable palette snooping */
+#define PCI_COMMAND_PARITY 0x40 /* Enable parity checking */
+#define PCI_COMMAND_WAIT 0x80 /* Enable address/data stepping */
+#define PCI_COMMAND_SERR 0x100 /* Enable SERR */
+#define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */
+
+#define PCI_STATUS 0x06 /* 16 bits */
+#define PCI_STATUS_66MHZ 0x20 /* Support 66 Mhz PCI 2.1 bus */
+#define PCI_STATUS_UDF 0x40 /* Support User Definable Features */
+
+#define PCI_STATUS_FAST_BACK 0x80 /* Accept fast-back to back */
+#define PCI_STATUS_PARITY 0x100 /* Detected parity error */
+#define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */
+#define PCI_STATUS_DEVSEL_FAST 0x000
+#define PCI_STATUS_DEVSEL_MEDIUM 0x200
+#define PCI_STATUS_DEVSEL_SLOW 0x400
+#define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */
+#define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */
+#define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */
+#define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */
+#define PCI_STATUS_DETECTED_PARITY 0x8000 /* Set on parity error */
+
+#define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8
+ revision */
+#define PCI_REVISION_ID 0x08 /* Revision ID */
+#define PCI_CLASS_PROG 0x09 /* Reg. Level Programming Interface */
+#define PCI_CLASS_DEVICE 0x0a /* Device class */
+
+#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
+#define PCI_LATENCY_TIMER 0x0d /* 8 bits */
+#define PCI_HEADER_TYPE 0x0e /* 8 bits */
+#define PCI_BIST 0x0f /* 8 bits */
+#define PCI_BIST_CODE_MASK 0x0f /* Return result */
+#define PCI_BIST_START 0x40 /* 1 to start BIST, 2 secs or less */
+#define PCI_BIST_CAPABLE 0x80 /* 1 if BIST capable */
+
+/*
+ * Base addresses specify locations in memory or I/O space.
+ * Decoded size can be determined by writing a value of
+ * 0xffffffff to the register, and reading it back. Only
+ * 1 bits are decoded.
+ */
+#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */
+#define PCI_BASE_ADDRESS_1 0x14 /* 32 bits */
+#define PCI_BASE_ADDRESS_2 0x18 /* 32 bits */
+#define PCI_BASE_ADDRESS_3 0x1c /* 32 bits */
+#define PCI_BASE_ADDRESS_4 0x20 /* 32 bits */
+#define PCI_BASE_ADDRESS_5 0x24 /* 32 bits */
+#define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */
+#define PCI_BASE_ADDRESS_SPACE_IO 0x01
+#define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00
+#define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06
+#define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */
+#define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M */
+#define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */
+#define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */
+#define PCI_BASE_ADDRESS_MEM_MASK (~0x0f)
+#define PCI_BASE_ADDRESS_IO_MASK (~0x03)
+/* bit 1 is reserved if address_space = 1 */
+
+#define PCI_CARDBUS_CIS 0x28
+#define PCI_SUBSYSTEM_ID 0x2c
+#define PCI_SUBSYSTEM_VENDOR_ID 0x2e
+#define PCI_ROM_ADDRESS 0x30 /* 32 bits */
+#define PCI_ROM_ADDRESS_ENABLE 0x01 /* Write 1 to enable ROM,
+ bits 31..11 are address,
+ 10..2 are reserved */
+/* 0x34-0x3b are reserved */
+#define PCI_INTERRUPT_LINE 0x3c /* 8 bits */
+#define PCI_INTERRUPT_PIN 0x3d /* 8 bits */
+#define PCI_MIN_GNT 0x3e /* 8 bits */
+#define PCI_MAX_LAT 0x3f /* 8 bits */
+
+#define PCI_CLASS_NOT_DEFINED 0x0000
+#define PCI_CLASS_NOT_DEFINED_VGA 0x0001
+
+#define PCI_BASE_CLASS_STORAGE 0x01
+#define PCI_CLASS_STORAGE_SCSI 0x0100
+#define PCI_CLASS_STORAGE_IDE 0x0101
+#define PCI_CLASS_STORAGE_FLOPPY 0x0102
+#define PCI_CLASS_STORAGE_IPI 0x0103
+#define PCI_CLASS_STORAGE_RAID 0x0104
+#define PCI_CLASS_STORAGE_OTHER 0x0180
+
+#define PCI_BASE_CLASS_NETWORK 0x02
+#define PCI_CLASS_NETWORK_ETHERNET 0x0200
+#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201
+#define PCI_CLASS_NETWORK_FDDI 0x0202
+#define PCI_CLASS_NETWORK_ATM 0x0203
+#define PCI_CLASS_NETWORK_OTHER 0x0280
+
+#define PCI_BASE_CLASS_DISPLAY 0x03
+#define PCI_CLASS_DISPLAY_VGA 0x0300
+#define PCI_CLASS_DISPLAY_XGA 0x0301
+#define PCI_CLASS_DISPLAY_OTHER 0x0380
+
+#define PCI_BASE_CLASS_MULTIMEDIA 0x04
+#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400
+#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401
+#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480
+
+#define PCI_BASE_CLASS_MEMORY 0x05
+#define PCI_CLASS_MEMORY_RAM 0x0500
+#define PCI_CLASS_MEMORY_FLASH 0x0501
+#define PCI_CLASS_MEMORY_OTHER 0x0580
+
+#define PCI_BASE_CLASS_BRIDGE 0x06
+#define PCI_CLASS_BRIDGE_HOST 0x0600
+#define PCI_CLASS_BRIDGE_ISA 0x0601
+#define PCI_CLASS_BRIDGE_EISA 0x0602
+#define PCI_CLASS_BRIDGE_MC 0x0603
+#define PCI_CLASS_BRIDGE_PCI 0x0604
+#define PCI_CLASS_BRIDGE_PCMCIA 0x0605
+#define PCI_CLASS_BRIDGE_NUBUS 0x0606
+#define PCI_CLASS_BRIDGE_CARDBUS 0x0607
+#define PCI_CLASS_BRIDGE_OTHER 0x0680
+
+
+#define PCI_BASE_CLASS_COMMUNICATION 0x07
+#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700
+#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701
+#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
+
+#define PCI_BASE_CLASS_SYSTEM 0x08
+#define PCI_CLASS_SYSTEM_PIC 0x0800
+#define PCI_CLASS_SYSTEM_DMA 0x0801
+#define PCI_CLASS_SYSTEM_TIMER 0x0802
+#define PCI_CLASS_SYSTEM_RTC 0x0803
+#define PCI_CLASS_SYSTEM_OTHER 0x0880
+
+#define PCI_BASE_CLASS_INPUT 0x09
+#define PCI_CLASS_INPUT_KEYBOARD 0x0900
+#define PCI_CLASS_INPUT_PEN 0x0901
+#define PCI_CLASS_INPUT_MOUSE 0x0902
+#define PCI_CLASS_INPUT_OTHER 0x0980
+
+#define PCI_BASE_CLASS_DOCKING 0x0a
+#define PCI_CLASS_DOCKING_GENERIC 0x0a00
+#define PCI_CLASS_DOCKING_OTHER 0x0a01
+
+#define PCI_BASE_CLASS_PROCESSOR 0x0b
+#define PCI_CLASS_PROCESSOR_386 0x0b00
+#define PCI_CLASS_PROCESSOR_486 0x0b01
+#define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02
+#define PCI_CLASS_PROCESSOR_ALPHA 0x0b10
+#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20
+#define PCI_CLASS_PROCESSOR_CO 0x0b40
+
+#define PCI_BASE_CLASS_SERIAL 0x0c
+#define PCI_CLASS_SERIAL_FIREWIRE 0x0c00
+#define PCI_CLASS_SERIAL_ACCESS 0x0c01
+#define PCI_CLASS_SERIAL_SSA 0x0c02
+#define PCI_CLASS_SERIAL_USB 0x0c03
+#define PCI_CLASS_SERIAL_FIBER 0x0c04
+
+#define PCI_CLASS_OTHERS 0xff
+
+/*
+ * Vendor and card ID's: sort these numerically according to vendor
+ * (and according to card ID within vendor)
+ */
+#define PCI_VENDOR_ID_COMPAQ 0x0e11
+#define PCI_DEVICE_ID_COMPAQ_1280 0x3033
+#define PCI_DEVICE_ID_COMPAQ_THUNDER 0xf130
+
+#define PCI_VENDOR_ID_NCR 0x1000
+#define PCI_DEVICE_ID_NCR_53C810 0x0001
+#define PCI_DEVICE_ID_NCR_53C820 0x0002
+#define PCI_DEVICE_ID_NCR_53C825 0x0003
+#define PCI_DEVICE_ID_NCR_53C815 0x0004
+
+#define PCI_VENDOR_ID_ATI 0x1002
+#define PCI_DEVICE_ID_ATI_68800 0x4158
+#define PCI_DEVICE_ID_ATI_215CT222 0x4354
+#define PCI_DEVICE_ID_ATI_210888CX 0x4358
+#define PCI_DEVICE_ID_ATI_210888GX 0x4758
+
+#define PCI_VENDOR_ID_VLSI 0x1004
+#define PCI_DEVICE_ID_VLSI_82C592 0x0005
+#define PCI_DEVICE_ID_VLSI_82C593 0x0006
+#define PCI_DEVICE_ID_VLSI_82C594 0x0007
+#define PCI_DEVICE_ID_VLSI_82C597 0x0009
+
+#define PCI_VENDOR_ID_ADL 0x1005
+#define PCI_DEVICE_ID_ADL_2301 0x2301
+
+#define PCI_VENDOR_ID_NS 0x100b
+#define PCI_DEVICE_ID_NS_87410 0xd001
+
+#define PCI_VENDOR_ID_TSENG 0x100c
+#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202
+#define PCI_DEVICE_ID_TSENG_W32P_b 0x3205
+#define PCI_DEVICE_ID_TSENG_W32P_c 0x3206
+#define PCI_DEVICE_ID_TSENG_W32P_d 0x3207
+
+#define PCI_VENDOR_ID_WEITEK 0x100e
+#define PCI_DEVICE_ID_WEITEK_P9000 0x9001
+#define PCI_DEVICE_ID_WEITEK_P9100 0x9100
+
+#define PCI_VENDOR_ID_DEC 0x1011
+#define PCI_DEVICE_ID_DEC_BRD 0x0001
+#define PCI_DEVICE_ID_DEC_TULIP 0x0002
+#define PCI_DEVICE_ID_DEC_TGA 0x0004
+#define PCI_DEVICE_ID_DEC_TULIP_FAST 0x0009
+#define PCI_DEVICE_ID_DEC_FDDI 0x000F
+#define PCI_DEVICE_ID_DEC_TULIP_PLUS 0x0014
+
+#define PCI_VENDOR_ID_CIRRUS 0x1013
+#define PCI_DEVICE_ID_CIRRUS_5430 0x00a0
+#define PCI_DEVICE_ID_CIRRUS_5434_4 0x00a4
+#define PCI_DEVICE_ID_CIRRUS_5434_8 0x00a8
+#define PCI_DEVICE_ID_CIRRUS_5436 0x00ac
+#define PCI_DEVICE_ID_CIRRUS_6205 0x0205
+#define PCI_DEVICE_ID_CIRRUS_6729 0x1100
+#define PCI_DEVICE_ID_CIRRUS_7542 0x1200
+#define PCI_DEVICE_ID_CIRRUS_7543 0x1202
+
+#define PCI_VENDOR_ID_IBM 0x1014
+#define PCI_DEVICE_ID_IBM_82G2675 0x001d
+
+#define PCI_VENDOR_ID_WD 0x101c
+#define PCI_DEVICE_ID_WD_7197 0x3296
+
+#define PCI_VENDOR_ID_AMD 0x1022
+#define PCI_DEVICE_ID_AMD_LANCE 0x2000
+#define PCI_DEVICE_ID_AMD_SCSI 0x2020
+
+#define PCI_VENDOR_ID_TRIDENT 0x1023
+#define PCI_DEVICE_ID_TRIDENT_9420 0x9420
+#define PCI_DEVICE_ID_TRIDENT_9440 0x9440
+#define PCI_DEVICE_ID_TRIDENT_9660 0x9660
+
+#define PCI_VENDOR_ID_AI 0x1025
+#define PCI_DEVICE_ID_AI_M1435 0x1435
+
+#define PCI_VENDOR_ID_MATROX 0x102B
+#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
+#define PCI_DEVICE_ID_MATROX_MIL 0x0519
+#define PCI_DEVICE_ID_MATROX_MGA_IMP 0x0d10
+
+#define PCI_VENDOR_ID_CT 0x102c
+#define PCI_DEVICE_ID_CT_65545 0x00d8
+
+#define PCI_VENDOR_ID_FD 0x1036
+#define PCI_DEVICE_ID_FD_36C70 0x0000
+
+#define PCI_VENDOR_ID_SI 0x1039
+#define PCI_DEVICE_ID_SI_6201 0x0001
+#define PCI_DEVICE_ID_SI_6202 0x0002
+#define PCI_DEVICE_ID_SI_503 0x0008
+#define PCI_DEVICE_ID_SI_501 0x0406
+#define PCI_DEVICE_ID_SI_496 0x0496
+#define PCI_DEVICE_ID_SI_601 0x0601
+#define PCI_DEVICE_ID_SI_5511 0x5511
+#define PCI_DEVICE_ID_SI_5513 0x5513
+
+#define PCI_VENDOR_ID_HP 0x103c
+#define PCI_DEVICE_ID_HP_J2585A 0x1030
+
+#define PCI_VENDOR_ID_PCTECH 0x1042
+#define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000
+
+#define PCI_VENDOR_ID_DPT 0x1044
+#define PCI_DEVICE_ID_DPT 0xa400
+
+#define PCI_VENDOR_ID_OPTI 0x1045
+#define PCI_DEVICE_ID_OPTI_92C178 0xc178
+#define PCI_DEVICE_ID_OPTI_82C557 0xc557
+#define PCI_DEVICE_ID_OPTI_82C558 0xc558
+#define PCI_DEVICE_ID_OPTI_82C621 0xc621
+#define PCI_DEVICE_ID_OPTI_82C822 0xc822
+
+#define PCI_VENDOR_ID_SGS 0x104a
+#define PCI_DEVICE_ID_SGS_2000 0x0008
+#define PCI_DEVICE_ID_SGS_1764 0x0009
+
+#define PCI_VENDOR_ID_BUSLOGIC 0x104B
+#define PCI_DEVICE_ID_BUSLOGIC_946C_2 0x0140
+#define PCI_DEVICE_ID_BUSLOGIC_946C 0x1040
+#define PCI_DEVICE_ID_BUSLOGIC_930 0x8130
+
+#define PCI_VENDOR_ID_OAK 0x104e
+#define PCI_DEVICE_ID_OAK_OTI107 0x0107
+
+#define PCI_VENDOR_ID_PROMISE 0x105a
+#define PCI_DEVICE_ID_PROMISE_5300 0x5300
+
+#define PCI_VENDOR_ID_N9 0x105d
+#define PCI_DEVICE_ID_N9_I128 0x2309
+#define PCI_DEVICE_ID_N9_I128_2 0x2339
+
+#define PCI_VENDOR_ID_UMC 0x1060
+#define PCI_DEVICE_ID_UMC_UM8673F 0x0101
+#define PCI_DEVICE_ID_UMC_UM8891A 0x0891
+#define PCI_DEVICE_ID_UMC_UM8886BF 0x673a
+#define PCI_DEVICE_ID_UMC_UM8886A 0x886a
+#define PCI_DEVICE_ID_UMC_UM8881F 0x8881
+#define PCI_DEVICE_ID_UMC_UM8886F 0x8886
+#define PCI_DEVICE_ID_UMC_UM9017F 0x9017
+#define PCI_DEVICE_ID_UMC_UM8886N 0xe886
+#define PCI_DEVICE_ID_UMC_UM8891N 0xe891
+
+#define PCI_VENDOR_ID_X 0x1061
+#define PCI_DEVICE_ID_X_AGX016 0x0001
+
+#define PCI_VENDOR_ID_NEXGEN 0x1074
+#define PCI_DEVICE_ID_NEXGEN_82C501 0x4e78
+
+#define PCI_VENDOR_ID_QLOGIC 0x1077
+#define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020
+#define PCI_DEVICE_ID_QLOGIC_ISP1022 0x1022
+
+#define PCI_VENDOR_ID_LEADTEK 0x107d
+#define PCI_DEVICE_ID_LEADTEK_805 0x0000
+
+#define PCI_VENDOR_ID_CONTAQ 0x1080
+#define PCI_DEVICE_ID_CONTAQ_82C599 0x0600
+
+#define PCI_VENDOR_ID_FOREX 0x1083
+
+#define PCI_VENDOR_ID_OLICOM 0x108d
+
+#define PCI_VENDOR_ID_CMD 0x1095
+#define PCI_DEVICE_ID_CMD_640 0x0640
+#define PCI_DEVICE_ID_CMD_646 0x0646
+
+#define PCI_VENDOR_ID_VISION 0x1098
+#define PCI_DEVICE_ID_VISION_QD8500 0x0001
+#define PCI_DEVICE_ID_VISION_QD8580 0x0002
+
+#define PCI_VENDOR_ID_SIERRA 0x10a8
+#define PCI_DEVICE_ID_SIERRA_STB 0x0000
+
+#define PCI_VENDOR_ID_ACC 0x10aa
+#define PCI_DEVICE_ID_ACC_2056 0x0000
+
+#define PCI_VENDOR_ID_WINBOND 0x10ad
+#define PCI_DEVICE_ID_WINBOND_83769 0x0001
+#define PCI_DEVICE_ID_WINBOND_82C105 0x0105
+
+#define PCI_VENDOR_ID_3COM 0x10b7
+#define PCI_DEVICE_ID_3COM_3C590 0x5900
+#define PCI_DEVICE_ID_3COM_3C595TX 0x5950
+#define PCI_DEVICE_ID_3COM_3C595T4 0x5951
+#define PCI_DEVICE_ID_3COM_3C595MII 0x5952
+
+#define PCI_VENDOR_ID_AL 0x10b9
+#define PCI_DEVICE_ID_AL_M1445 0x1445
+#define PCI_DEVICE_ID_AL_M1449 0x1449
+#define PCI_DEVICE_ID_AL_M1451 0x1451
+#define PCI_DEVICE_ID_AL_M1461 0x1461
+#define PCI_DEVICE_ID_AL_M1489 0x1489
+#define PCI_DEVICE_ID_AL_M1511 0x1511
+#define PCI_DEVICE_ID_AL_M1513 0x1513
+#define PCI_DEVICE_ID_AL_M4803 0x5215
+
+#define PCI_VENDOR_ID_ASP 0x10cd
+#define PCI_DEVICE_ID_ASP_ABP940 0x1200
+
+#define PCI_VENDOR_ID_IMS 0x10e0
+#define PCI_DEVICE_ID_IMS_8849 0x8849
+
+#define PCI_VENDOR_ID_TEKRAM2 0x10e1
+#define PCI_DEVICE_ID_TEKRAM2_690c 0x690c
+
+#define PCI_VENDOR_ID_AMCC 0x10e8
+#define PCI_DEVICE_ID_AMCC_MYRINET 0x8043
+
+#define PCI_VENDOR_ID_INTERG 0x10ea
+#define PCI_DEVICE_ID_INTERG_1680 0x1680
+
+#define PCI_VENDOR_ID_REALTEK 0x10ec
+#define PCI_DEVICE_ID_REALTEK_8029 0x8029
+
+#define PCI_VENDOR_ID_INIT 0x1101
+#define PCI_DEVICE_ID_INIT_320P 0x9100
+
+#define PCI_VENDOR_ID_VIA 0x1106
+#define PCI_DEVICE_ID_VIA_82C505 0x0505
+#define PCI_DEVICE_ID_VIA_82C561 0x0561
+#define PCI_DEVICE_ID_VIA_82C576 0x0576
+#define PCI_DEVICE_ID_VIA_82C416 0x1571
+
+#define PCI_VENDOR_ID_VORTEX 0x1119
+#define PCI_DEVICE_ID_VORTEX_GDT 0x0001
+
+#define PCI_VENDOR_ID_EF 0x111a
+#define PCI_DEVICE_ID_EF_ATM_FPGA 0x0000
+#define PCI_DEVICE_ID_EF_ATM_ASIC 0x0002
+
+#define PCI_VENDOR_ID_FORE 0x1127
+#define PCI_DEVICE_ID_FORE_PCA200PC 0x0210
+
+#define PCI_VENDOR_ID_IMAGINGTECH 0x112f
+#define PCI_DEVICE_ID_IMAGINGTECH_ICPCI 0x0000
+
+#define PCI_VENDOR_ID_PLX 0x113c
+#define PCI_DEVICE_ID_PLX_9060 0x0001
+
+#define PCI_VENDOR_ID_ALLIANCE 0x1142
+#define PCI_DEVICE_ID_ALLIANCE_PROMOTIO 0x3210
+#define PCI_DEVICE_ID_ALLIANCE_PROVIDEO 0x6422
+
+#define PCI_VENDOR_ID_MUTECH 0x1159
+#define PCI_DEVICE_ID_MUTECH_MV1000 0x0001
+
+#define PCI_VENDOR_ID_ZEITNET 0x1193
+#define PCI_DEVICE_ID_ZEITNET_1221 0x0001
+#define PCI_DEVICE_ID_ZEITNET_1225 0x0002
+
+#define PCI_VENDOR_ID_SPECIALIX 0x11cb
+#define PCI_DEVICE_ID_SPECIALIX_XIO 0x4000
+#define PCI_DEVICE_ID_SPECIALIX_RIO 0x8000
+
+#define PCI_VENDOR_ID_RP 0x11fe
+#define PCI_DEVICE_ID_RP8OCTA 0x0001
+#define PCI_DEVICE_ID_RP8INTF 0x0002
+#define PCI_DEVICE_ID_RP16INTF 0x0003
+#define PCI_DEVICE_ID_RP32INTF 0x0004
+
+#define PCI_VENDOR_ID_CYCLADES 0x120e
+#define PCI_DEVICE_ID_CYCLADES_Y 0x0100
+
+#define PCI_VENDOR_ID_SYMPHONY 0x1c1c
+#define PCI_DEVICE_ID_SYMPHONY_101 0x0001
+
+#define PCI_VENDOR_ID_TEKRAM 0x1de1
+#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
+
+#define PCI_VENDOR_ID_AVANCE 0x4005
+#define PCI_DEVICE_ID_AVANCE_2302 0x2302
+
+#define PCI_VENDOR_ID_S3 0x5333
+#define PCI_DEVICE_ID_S3_811 0x8811
+#define PCI_DEVICE_ID_S3_868 0x8880
+#define PCI_DEVICE_ID_S3_928 0x88b0
+#define PCI_DEVICE_ID_S3_864_1 0x88c0
+#define PCI_DEVICE_ID_S3_864_2 0x88c1
+#define PCI_DEVICE_ID_S3_964_1 0x88d0
+#define PCI_DEVICE_ID_S3_964_2 0x88d1
+#define PCI_DEVICE_ID_S3_968 0x88f0
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+#define PCI_DEVICE_ID_INTEL_82375 0x0482
+#define PCI_DEVICE_ID_INTEL_82424 0x0483
+#define PCI_DEVICE_ID_INTEL_82378 0x0484
+#define PCI_DEVICE_ID_INTEL_82430 0x0486
+#define PCI_DEVICE_ID_INTEL_82434 0x04a3
+#define PCI_DEVICE_ID_INTEL_7116 0x1223
+#define PCI_DEVICE_ID_INTEL_82596 0x1226
+#define PCI_DEVICE_ID_INTEL_82865 0x1227
+#define PCI_DEVICE_ID_INTEL_82557 0x1229
+#define PCI_DEVICE_ID_INTEL_82437 0x122d
+#define PCI_DEVICE_ID_INTEL_82371_0 0x122e
+#define PCI_DEVICE_ID_INTEL_82371_1 0x1230
+#define PCI_DEVICE_ID_INTEL_P6 0x84c4
+
+#define PCI_VENDOR_ID_ADAPTEC 0x9004
+#define PCI_DEVICE_ID_ADAPTEC_7850 0x5078
+#define PCI_DEVICE_ID_ADAPTEC_7870 0x7078
+#define PCI_DEVICE_ID_ADAPTEC_7871 0x7178
+#define PCI_DEVICE_ID_ADAPTEC_7872 0x7278
+#define PCI_DEVICE_ID_ADAPTEC_7873 0x7378
+#define PCI_DEVICE_ID_ADAPTEC_7874 0x7478
+#define PCI_DEVICE_ID_ADAPTEC_7880 0x8078
+#define PCI_DEVICE_ID_ADAPTEC_7881 0x8178
+#define PCI_DEVICE_ID_ADAPTEC_7882 0x8278
+#define PCI_DEVICE_ID_ADAPTEC_7883 0x8378
+#define PCI_DEVICE_ID_ADAPTEC_7884 0x8478
+
+#define PCI_VENDOR_ID_ATRONICS 0x907f
+#define PCI_DEVICE_ID_ATRONICS_2015 0x2015
+
+#define PCI_VENDOR_ID_HER 0xedd8
+#define PCI_DEVICE_ID_HER_STING 0xa091
+#define PCI_DEVICE_ID_HER_STINGARK 0xa099
+
+/*
+ * The PCI interface treats multi-function devices as independent
+ * devices. The slot/function address of each device is encoded
+ * in a single byte as follows:
+ *
+ * 7:4 = slot
+ * 3:0 = function
+ */
+#define PCI_DEVFN(slot,func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
+#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
+#define PCI_FUNC(devfn) ((devfn) & 0x07)
+
+/*
+ * There is one pci_dev structure for each slot-number/function-number
+ * combination:
+ */
+struct pci_dev {
+ struct pci_bus *bus; /* bus this device is on */
+ struct pci_dev *sibling; /* next device on this bus */
+ struct pci_dev *next; /* chain of all devices */
+
+ void *sysdata; /* hook for sys-specific extension */
+
+ unsigned int devfn; /* encoded device & function index */
+ unsigned short vendor;
+ unsigned short device;
+ unsigned int class; /* 3 bytes: (base,sub,prog-if) */
+ unsigned int master : 1; /* set if device is master capable */
+ /*
+ * In theory, the irq level can be read from configuration
+ * space and all would be fine. However, old PCI chips don't
+ * support these registers and return 0 instead. For example,
+ * the Vision864-P rev 0 chip can uses INTA, but returns 0 in
+ * the interrupt line and pin registers. pci_init()
+ * initializes this field with the value at PCI_INTERRUPT_LINE
+ * and it is the job of pcibios_fixup() to change it if
+ * necessary. The field must not be 0 unless the device
+ * cannot generate interrupts at all.
+ */
+ unsigned char irq; /* irq generated by this device */
+};
+
+struct pci_bus {
+ struct pci_bus *parent; /* parent bus this bridge is on */
+ struct pci_bus *children; /* chain of P2P bridges on this bus */
+ struct pci_bus *next; /* chain of all PCI buses */
+
+ struct pci_dev *self; /* bridge device as seen by parent */
+ struct pci_dev *devices; /* devices behind this bridge */
+
+ void *sysdata; /* hook for sys-specific extension */
+
+ unsigned char number; /* bus number */
+ unsigned char primary; /* number of primary bridge */
+ unsigned char secondary; /* number of secondary bridge */
+ unsigned char subordinate; /* max number of subordinate buses */
+};
+
+/*
+ * This is used to map a vendor-id/device-id pair into device-specific
+ * information.
+ */
+struct pci_dev_info {
+ unsigned short vendor; /* vendor id */
+ unsigned short device; /* device id */
+
+ const char *name; /* device name */
+ unsigned char bridge_type; /* bridge type or 0xff */
+};
+
+extern struct pci_bus pci_root; /* root bus */
+extern struct pci_dev *pci_devices; /* list of all devices */
+
+
+extern unsigned long pci_init (unsigned long mem_start, unsigned long mem_end);
+
+extern struct pci_dev_info *pci_lookup_dev (unsigned int vendor,
+ unsigned int dev);
+extern const char *pci_strclass (unsigned int class);
+extern const char *pci_strvendor (unsigned int vendor);
+extern const char *pci_strdev (unsigned int vendor, unsigned int device);
+
+extern int get_pci_list (char *buf);
+
+#endif /* PCI_H */
diff --git a/i386/i386at/gpl/linux/include/linux/personality.h b/i386/i386at/gpl/linux/include/linux/personality.h
new file mode 100644
index 00000000..3e465eaa
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/personality.h
@@ -0,0 +1,51 @@
+#ifndef _PERSONALITY_H
+#define _PERSONALITY_H
+
+#include <linux/linkage.h>
+#include <linux/ptrace.h>
+
+
+/* Flags for bug emulation. These occupy the top three bytes. */
+#define STICKY_TIMEOUTS 0x4000000
+#define WHOLE_SECONDS 0x2000000
+
+/* Personality types. These go in the low byte. Avoid using the top bit,
+ * it will conflict with error returns.
+ */
+#define PER_MASK (0x00ff)
+#define PER_LINUX (0x0000)
+#define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
+#define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
+#define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
+#define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
+#define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
+#define PER_BSD (0x0006)
+#define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
+
+/* Prototype for an lcall7 syscall handler. */
+typedef asmlinkage void (*lcall7_func)(struct pt_regs *);
+
+
+/* Description of an execution domain - personality range supported,
+ * lcall7 syscall handler, start up / shut down functions etc.
+ * N.B. The name and lcall7 handler must be where they are since the
+ * offset of the handler is hard coded in kernel/sys_call.S.
+ */
+struct exec_domain {
+ const char *name;
+ lcall7_func handler;
+ unsigned char pers_low, pers_high;
+ unsigned long * signal_map;
+ unsigned long * signal_invmap;
+ int *use_count;
+ struct exec_domain *next;
+};
+
+extern struct exec_domain default_exec_domain;
+
+extern struct exec_domain *lookup_exec_domain(unsigned long personality);
+extern int register_exec_domain(struct exec_domain *it);
+extern int unregister_exec_domain(struct exec_domain *it);
+extern asmlinkage int sys_personality(unsigned long personality);
+
+#endif /* _PERSONALITY_H */
diff --git a/i386/i386at/gpl/linux/include/linux/proc_fs.h b/i386/i386at/gpl/linux/include/linux/proc_fs.h
new file mode 100644
index 00000000..cc674c9b
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/proc_fs.h
@@ -0,0 +1,269 @@
+#ifndef _LINUX_PROC_FS_H
+#define _LINUX_PROC_FS_H
+
+#include <linux/fs.h>
+#include <linux/malloc.h>
+
+/*
+ * The proc filesystem constants/structures
+ */
+
+/*
+ * We always define these enumerators
+ */
+
+enum root_directory_inos {
+ PROC_ROOT_INO = 1,
+ PROC_LOADAVG,
+ PROC_UPTIME,
+ PROC_MEMINFO,
+ PROC_KMSG,
+ PROC_VERSION,
+ PROC_CPUINFO,
+ PROC_PCI,
+ PROC_SELF, /* will change inode # */
+ PROC_NET,
+ PROC_SCSI,
+ PROC_MALLOC,
+ PROC_KCORE,
+ PROC_MODULES,
+ PROC_STAT,
+ PROC_DEVICES,
+ PROC_INTERRUPTS,
+ PROC_FILESYSTEMS,
+ PROC_KSYMS,
+ PROC_DMA,
+ PROC_IOPORTS,
+ PROC_APM,
+#ifdef __SMP_PROF__
+ PROC_SMP_PROF,
+#endif
+ PROC_PROFILE, /* whether enabled or not */
+ PROC_CMDLINE,
+ PROC_SYS,
+ PROC_MTAB
+};
+
+enum pid_directory_inos {
+ PROC_PID_INO = 2,
+ PROC_PID_STATUS,
+ PROC_PID_MEM,
+ PROC_PID_CWD,
+ PROC_PID_ROOT,
+ PROC_PID_EXE,
+ PROC_PID_FD,
+ PROC_PID_ENVIRON,
+ PROC_PID_CMDLINE,
+ PROC_PID_STAT,
+ PROC_PID_STATM,
+ PROC_PID_MAPS
+};
+
+enum pid_subdirectory_inos {
+ PROC_PID_FD_DIR = 1
+};
+
+enum net_directory_inos {
+ PROC_NET_UNIX = 128,
+ PROC_NET_ARP,
+ PROC_NET_ROUTE,
+ PROC_NET_DEV,
+ PROC_NET_RAW,
+ PROC_NET_TCP,
+ PROC_NET_UDP,
+ PROC_NET_SNMP,
+ PROC_NET_RARP,
+ PROC_NET_IGMP,
+ PROC_NET_IPMR_VIF,
+ PROC_NET_IPMR_MFC,
+ PROC_NET_IPFWFWD,
+ PROC_NET_IPFWIN,
+ PROC_NET_IPFWOUT,
+ PROC_NET_IPACCT,
+ PROC_NET_IPMSQHST,
+ PROC_NET_WAVELAN,
+ PROC_NET_IPX_INTERFACE,
+ PROC_NET_IPX_ROUTE,
+ PROC_NET_IPX,
+ PROC_NET_ATALK,
+ PROC_NET_AT_ROUTE,
+ PROC_NET_ATIF,
+ PROC_NET_AX25_ROUTE,
+ PROC_NET_AX25,
+ PROC_NET_AX25_CALLS,
+ PROC_NET_NR_NODES,
+ PROC_NET_NR_NEIGH,
+ PROC_NET_NR,
+ PROC_NET_SOCKSTAT,
+ PROC_NET_RTCACHE,
+ PROC_NET_AX25_BPQETHER,
+ PROC_NET_ALIAS_TYPES,
+ PROC_NET_ALIASES,
+ PROC_NET_LAST
+};
+
+enum scsi_directory_inos {
+ PROC_SCSI_SCSI = 256,
+ PROC_SCSI_ADVANSYS,
+ PROC_SCSI_EATA,
+ PROC_SCSI_EATA_PIO,
+ PROC_SCSI_AHA152X,
+ PROC_SCSI_AHA1542,
+ PROC_SCSI_AHA1740,
+ PROC_SCSI_AIC7XXX,
+ PROC_SCSI_BUSLOGIC,
+ PROC_SCSI_U14_34F,
+ PROC_SCSI_FDOMAIN,
+ PROC_SCSI_GENERIC_NCR5380,
+ PROC_SCSI_IN2000,
+ PROC_SCSI_PAS16,
+ PROC_SCSI_QLOGIC,
+ PROC_SCSI_SEAGATE,
+ PROC_SCSI_T128,
+ PROC_SCSI_NCR53C7xx,
+ PROC_SCSI_ULTRASTOR,
+ PROC_SCSI_7000FASST,
+ PROC_SCSI_EATA2X,
+ PROC_SCSI_AM53C974,
+ PROC_SCSI_SSC,
+ PROC_SCSI_NCR53C406A,
+ PROC_SCSI_SCSI_DEBUG,
+ PROC_SCSI_NOT_PRESENT,
+ PROC_SCSI_FILE, /* I'm asuming here that we */
+ PROC_SCSI_LAST = (PROC_SCSI_FILE + 16) /* won't ever see more than */
+}; /* 16 HBAs in one machine */
+
+/* Finally, the dynamically allocatable proc entries are reserved: */
+
+#define PROC_DYNAMIC_FIRST 4096
+#define PROC_NDYNAMIC 4096
+
+#define PROC_SUPER_MAGIC 0x9fa0
+
+/*
+ * This is not completely implemented yet. The idea is to
+ * create a in-memory tree (like the actual /proc filesystem
+ * tree) of these proc_dir_entries, so that we can dynamically
+ * add new files to /proc.
+ *
+ * The "next" pointer creates a linked list of one /proc directory,
+ * while parent/subdir create the directory structure (every
+ * /proc file has a parent, but "subdir" is NULL for all
+ * non-directory entries).
+ *
+ * "get_info" is called at "read", while "fill_inode" is used to
+ * fill in file type/protection/owner information specific to the
+ * particular /proc file.
+ */
+struct proc_dir_entry {
+ unsigned short low_ino;
+ unsigned short namelen;
+ const char *name;
+ mode_t mode;
+ nlink_t nlink;
+ uid_t uid;
+ gid_t gid;
+ unsigned long size;
+ struct inode_operations * ops;
+ int (*get_info)(char *, char **, off_t, int, int);
+ void (*fill_inode)(struct inode *);
+ struct proc_dir_entry *next, *parent, *subdir;
+ void *data;
+};
+
+extern int (* dispatch_scsi_info_ptr) (int ino, char *buffer, char **start,
+ off_t offset, int length, int inout);
+
+extern struct proc_dir_entry proc_root;
+extern struct proc_dir_entry proc_net;
+extern struct proc_dir_entry proc_scsi;
+extern struct proc_dir_entry proc_sys;
+extern struct proc_dir_entry proc_pid;
+extern struct proc_dir_entry proc_pid_fd;
+
+extern struct inode_operations proc_scsi_inode_operations;
+
+extern void proc_root_init(void);
+extern void proc_base_init(void);
+extern void proc_net_init(void);
+
+extern int proc_register(struct proc_dir_entry *, struct proc_dir_entry *);
+extern int proc_register_dynamic(struct proc_dir_entry *,
+ struct proc_dir_entry *);
+extern int proc_unregister(struct proc_dir_entry *, int);
+
+static inline int proc_net_register(struct proc_dir_entry * x)
+{
+ return proc_register(&proc_net, x);
+}
+
+static inline int proc_net_unregister(int x)
+{
+ return proc_unregister(&proc_net, x);
+}
+
+static inline int proc_scsi_register(struct proc_dir_entry *driver,
+ struct proc_dir_entry *x)
+{
+ x->ops = &proc_scsi_inode_operations;
+ if(x->low_ino < PROC_SCSI_FILE){
+ return(proc_register(&proc_scsi, x));
+ }else{
+ return(proc_register(driver, x));
+ }
+}
+
+static inline int proc_scsi_unregister(struct proc_dir_entry *driver, int x)
+{
+ extern void scsi_init_free(char *ptr, unsigned int size);
+
+ if(x <= PROC_SCSI_FILE)
+ return(proc_unregister(&proc_scsi, x));
+ else {
+ struct proc_dir_entry **p = &driver->subdir, *dp;
+ int ret;
+
+ while ((dp = *p) != NULL) {
+ if (dp->low_ino == x)
+ break;
+ p = &dp->next;
+ }
+ ret = proc_unregister(driver, x);
+ scsi_init_free((char *) dp, sizeof(struct proc_dir_entry) + 4);
+ return(ret);
+ }
+}
+
+extern struct super_block *proc_read_super(struct super_block *,void *,int);
+extern int init_proc_fs(void);
+extern struct inode * proc_get_inode(struct super_block *, int, struct proc_dir_entry *);
+extern void proc_statfs(struct super_block *, struct statfs *, int);
+extern void proc_read_inode(struct inode *);
+extern void proc_write_inode(struct inode *);
+extern int proc_match(int, const char *, struct proc_dir_entry *);
+
+/*
+ * These are generic /proc routines that use the internal
+ * "struct proc_dir_entry" tree to traverse the filesystem.
+ *
+ * The /proc root directory has extended versions to take care
+ * of the /proc/<pid> subdirectories.
+ */
+extern int proc_readdir(struct inode *, struct file *, void *, filldir_t);
+extern int proc_lookup(struct inode *, const char *, int, struct inode **);
+
+extern struct inode_operations proc_dir_inode_operations;
+extern struct inode_operations proc_net_inode_operations;
+extern struct inode_operations proc_netdir_inode_operations;
+extern struct inode_operations proc_scsi_inode_operations;
+extern struct inode_operations proc_mem_inode_operations;
+extern struct inode_operations proc_sys_inode_operations;
+extern struct inode_operations proc_array_inode_operations;
+extern struct inode_operations proc_arraylong_inode_operations;
+extern struct inode_operations proc_kcore_inode_operations;
+extern struct inode_operations proc_profile_inode_operations;
+extern struct inode_operations proc_kmsg_inode_operations;
+extern struct inode_operations proc_link_inode_operations;
+extern struct inode_operations proc_fd_inode_operations;
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/ptrace.h b/i386/i386at/gpl/linux/include/linux/ptrace.h
new file mode 100644
index 00000000..0a02879d
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/ptrace.h
@@ -0,0 +1,26 @@
+#ifndef _LINUX_PTRACE_H
+#define _LINUX_PTRACE_H
+/* ptrace.h */
+/* structs and defines to help the user use the ptrace system call. */
+
+/* has the defines to get at the registers. */
+
+#define PTRACE_TRACEME 0
+#define PTRACE_PEEKTEXT 1
+#define PTRACE_PEEKDATA 2
+#define PTRACE_PEEKUSR 3
+#define PTRACE_POKETEXT 4
+#define PTRACE_POKEDATA 5
+#define PTRACE_POKEUSR 6
+#define PTRACE_CONT 7
+#define PTRACE_KILL 8
+#define PTRACE_SINGLESTEP 9
+
+#define PTRACE_ATTACH 0x10
+#define PTRACE_DETACH 0x11
+
+#define PTRACE_SYSCALL 24
+
+#include <asm/ptrace.h>
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/quota.h b/i386/i386at/gpl/linux/include/linux/quota.h
new file mode 100644
index 00000000..59b86fe8
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/quota.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 1982, 1986 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Robert Elz at The University of Melbourne.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Version: $Id: quota.h,v 1.1.1.1 1997/02/25 21:27:30 thomas Exp $
+ */
+
+#ifndef _LINUX_QUOTA_
+#define _LINUX_QUOTA_
+
+#include <linux/errno.h>
+
+/*
+ * Convert diskblocks to blocks and the other way around.
+ * currently only to fool the BSD source. :-)
+ */
+#define dbtob(num) (num << 10)
+#define btodb(num) (num >> 10)
+
+/*
+ * Convert count of filesystem blocks to diskquota blocks, meant
+ * for filesystems where i_blksize != BLOCK_SIZE
+ */
+#define fs_to_dq_blocks(num, blksize) (((num) * (blksize)) / BLOCK_SIZE)
+
+/*
+ * Definitions for disk quotas imposed on the average user
+ * (big brother finally hits Linux).
+ *
+ * The following constants define the amount of time given a user
+ * before the soft limits are treated as hard limits (usually resulting
+ * in an allocation failure). The timer is started when the user crosses
+ * their soft limit, it is reset when they go below their soft limit.
+ */
+#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */
+#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */
+
+#define MAXQUOTAS 2
+#define USRQUOTA 0 /* element used for user quotas */
+#define GRPQUOTA 1 /* element used for group quotas */
+
+#include <linux/mount.h>
+
+/*
+ * Definitions for the default names of the quotas files.
+ */
+#define INITQFNAMES { \
+ "user", /* USRQUOTA */ \
+ "group", /* GRPQUOTA */ \
+ "undefined", \
+};
+
+#define QUOTAFILENAME "quota"
+#define QUOTAGROUP "staff"
+
+#define NR_DQHASH 43 /* Just an arbitrary number any suggestions ? */
+#define NR_DQUOTS 256 /* Number of quotas active at one time */
+
+/*
+ * Command definitions for the 'quotactl' system call.
+ * The commands are broken into a main command defined below
+ * and a subcommand that is used to convey the type of
+ * quota that is being manipulated (see above).
+ */
+#define SUBCMDMASK 0x00ff
+#define SUBCMDSHIFT 8
+#define QCMD(cmd, type) (((cmd) << SUBCMDSHIFT) | ((type) & SUBCMDMASK))
+
+#define Q_QUOTAON 0x0100 /* enable quotas */
+#define Q_QUOTAOFF 0x0200 /* disable quotas */
+#define Q_GETQUOTA 0x0300 /* get limits and usage */
+#define Q_SETQUOTA 0x0400 /* set limits and usage */
+#define Q_SETUSE 0x0500 /* set usage */
+#define Q_SYNC 0x0600 /* sync disk copy of a filesystems quotas */
+#define Q_SETQLIM 0x0700 /* set limits */
+#define Q_GETSTATS 0x0800 /* get collected stats */
+
+/*
+ * The following structure defines the format of the disk quota file
+ * (as it appears on disk) - the file is an array of these structures
+ * indexed by user or group number.
+ */
+struct dqblk {
+ __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */
+ __u32 dqb_bsoftlimit; /* preferred limit on disk blks */
+ __u32 dqb_curblocks; /* current block count */
+ __u32 dqb_ihardlimit; /* maximum # allocated inodes */
+ __u32 dqb_isoftlimit; /* preferred inode limit */
+ __u32 dqb_curinodes; /* current # allocated inodes */
+ time_t dqb_btime; /* time limit for excessive disk use */
+ time_t dqb_itime; /* time limit for excessive files */
+};
+
+/*
+ * Shorthand notation.
+ */
+#define dq_bhardlimit dq_dqb.dqb_bhardlimit
+#define dq_bsoftlimit dq_dqb.dqb_bsoftlimit
+#define dq_curblocks dq_dqb.dqb_curblocks
+#define dq_ihardlimit dq_dqb.dqb_ihardlimit
+#define dq_isoftlimit dq_dqb.dqb_isoftlimit
+#define dq_curinodes dq_dqb.dqb_curinodes
+#define dq_btime dq_dqb.dqb_btime
+#define dq_itime dq_dqb.dqb_itime
+
+#define dqoff(UID) ((off_t)((UID) * sizeof (struct dqblk)))
+
+struct dqstats {
+ __u32 lookups;
+ __u32 drops;
+ __u32 reads;
+ __u32 writes;
+ __u32 cache_hits;
+ __u32 pages_allocated;
+ __u32 allocated_dquots;
+ __u32 free_dquots;
+ __u32 syncs;
+};
+
+#ifdef __KERNEL__
+
+/*
+ * Maximum lenght of a message generated in the quota system,
+ * that needs to be kicked onto the tty.
+ */
+#define MAX_QUOTA_MESSAGE 75
+
+#define DQ_LOCKED 0x01 /* locked for update */
+#define DQ_WANT 0x02 /* wanted for update */
+#define DQ_MOD 0x04 /* dquot modified since read */
+#define DQ_BLKS 0x10 /* uid/gid has been warned about blk limit */
+#define DQ_INODES 0x20 /* uid/gid has been warned about inode limit */
+#define DQ_FAKE 0x40 /* no limits only usage */
+
+struct dquot {
+ unsigned int dq_id; /* id this applies to (uid, gid) */
+ short dq_type; /* type of quota */
+ kdev_t dq_dev; /* Device this applies to */
+ short dq_flags; /* see DQ_* */
+ short dq_count; /* reference count */
+ struct vfsmount *dq_mnt; /* vfsmountpoint this applies to */
+ struct dqblk dq_dqb; /* diskquota usage */
+ struct wait_queue *dq_wait; /* pointer to waitqueue */
+ struct dquot *dq_prev; /* pointer to prev dquot */
+ struct dquot *dq_next; /* pointer to next dquot */
+ struct dquot *dq_hash_prev; /* pointer to prev dquot */
+ struct dquot *dq_hash_next; /* pointer to next dquot */
+};
+
+#define NODQUOT (struct dquot *)NULL
+
+/*
+ * Flags used for set_dqblk.
+ */
+#define QUOTA_SYSCALL 0x01
+#define SET_QUOTA 0x02
+#define SET_USE 0x04
+#define SET_QLIMIT 0x08
+
+#define QUOTA_OK 0
+#define NO_QUOTA 1
+
+/*
+ * declaration of quota_function calls in kernel.
+ */
+
+extern void dquot_initialize(struct inode *inode, short type);
+extern void dquot_drop(struct inode *inode);
+extern int dquot_alloc_block(const struct inode *inode, unsigned long number);
+extern int dquot_alloc_inode(const struct inode *inode, unsigned long number);
+extern void dquot_free_block(const struct inode *inode, unsigned long number);
+extern void dquot_free_inode(const struct inode *inode, unsigned long number);
+extern int dquot_transfer(struct inode *inode, struct iattr *iattr, char direction);
+
+extern void invalidate_dquots(kdev_t dev, short type);
+extern int quota_off(kdev_t dev, short type);
+extern int sync_dquots(kdev_t dev, short type);
+
+#else
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+int quotactl __P ((int, const char *, int, caddr_t));
+__END_DECLS
+
+#endif /* __KERNEL__ */
+#endif /* _QUOTA_ */
diff --git a/i386/i386at/gpl/linux/include/linux/resource.h b/i386/i386at/gpl/linux/include/linux/resource.h
new file mode 100644
index 00000000..f3bffbd7
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/resource.h
@@ -0,0 +1,60 @@
+#ifndef _LINUX_RESOURCE_H
+#define _LINUX_RESOURCE_H
+
+#include <linux/time.h>
+
+/*
+ * Resource control/accounting header file for linux
+ */
+
+/*
+ * Definition of struct rusage taken from BSD 4.3 Reno
+ *
+ * We don't support all of these yet, but we might as well have them....
+ * Otherwise, each time we add new items, programs which depend on this
+ * structure will lose. This reduces the chances of that happening.
+ */
+#define RUSAGE_SELF 0
+#define RUSAGE_CHILDREN (-1)
+#define RUSAGE_BOTH (-2) /* sys_wait4() uses this */
+
+struct rusage {
+ struct timeval ru_utime; /* user time used */
+ struct timeval ru_stime; /* system time used */
+ long ru_maxrss; /* maximum resident set size */
+ long ru_ixrss; /* integral shared memory size */
+ long ru_idrss; /* integral unshared data size */
+ long ru_isrss; /* integral unshared stack size */
+ long ru_minflt; /* page reclaims */
+ long ru_majflt; /* page faults */
+ long ru_nswap; /* swaps */
+ long ru_inblock; /* block input operations */
+ long ru_oublock; /* block output operations */
+ long ru_msgsnd; /* messages sent */
+ long ru_msgrcv; /* messages received */
+ long ru_nsignals; /* signals received */
+ long ru_nvcsw; /* voluntary context switches */
+ long ru_nivcsw; /* involuntary " */
+};
+
+#define RLIM_INFINITY ((long)(~0UL>>1))
+
+struct rlimit {
+ long rlim_cur;
+ long rlim_max;
+};
+
+#define PRIO_MIN (-20)
+#define PRIO_MAX 20
+
+#define PRIO_PROCESS 0
+#define PRIO_PGRP 1
+#define PRIO_USER 2
+
+/*
+ * Due to binary compatibility, the actual resource numbers
+ * may be different for different linux versions..
+ */
+#include <asm/resource.h>
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/route.h b/i386/i386at/gpl/linux/include/linux/route.h
new file mode 100644
index 00000000..5be4853e
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/route.h
@@ -0,0 +1,78 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the IP router interface.
+ *
+ * Version: @(#)route.h 1.0.3 05/27/93
+ *
+ * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1986-1988
+ * for the purposes of compatibility only.
+ *
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_ROUTE_H
+#define _LINUX_ROUTE_H
+
+#include <linux/if.h>
+
+
+/* This structure gets passed by the SIOCADDRT and SIOCDELRT calls. */
+struct rtentry
+{
+ unsigned long rt_hash; /* hash key for lookups */
+ struct sockaddr rt_dst; /* target address */
+ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */
+ struct sockaddr rt_genmask; /* target network mask (IP) */
+ short rt_flags;
+ short rt_refcnt;
+ unsigned long rt_use;
+ struct ifnet *rt_ifp;
+ short rt_metric; /* +1 for binary compatibility! */
+ char *rt_dev; /* forcing the device at add */
+ unsigned long rt_mss; /* per route MTU/Window */
+ unsigned long rt_window; /* Window clamping */
+ unsigned short rt_irtt; /* Initial RTT */
+};
+
+
+#define RTF_UP 0x0001 /* route usable */
+#define RTF_GATEWAY 0x0002 /* destination is a gateway */
+#define RTF_HOST 0x0004 /* host entry (net otherwise) */
+#define RTF_REINSTATE 0x0008 /* reinstate route after tmout */
+#define RTF_DYNAMIC 0x0010 /* created dyn. (by redirect) */
+#define RTF_MODIFIED 0x0020 /* modified dyn. (by redirect) */
+#define RTF_MSS 0x0040 /* specific MSS for this route */
+#define RTF_WINDOW 0x0080 /* per route window clamping */
+#define RTF_IRTT 0x0100 /* Initial round trip time */
+#define RTF_REJECT 0x0200 /* Reject route */
+
+/*
+ * This structure is passed from the kernel to user space by netlink
+ * routing/device announcements
+ */
+
+struct netlink_rtinfo
+{
+ unsigned long rtmsg_type;
+ struct sockaddr rtmsg_dst;
+ struct sockaddr rtmsg_gateway;
+ struct sockaddr rtmsg_genmask;
+ short rtmsg_flags;
+ short rtmsg_metric;
+ char rtmsg_device[16];
+};
+
+#define RTMSG_NEWROUTE 0x01
+#define RTMSG_DELROUTE 0x02
+#define RTMSG_NEWDEVICE 0x11
+#define RTMSG_DELDEVICE 0x12
+
+#endif /* _LINUX_ROUTE_H */
+
diff --git a/i386/i386at/gpl/linux/include/linux/sched.h b/i386/i386at/gpl/linux/include/linux/sched.h
new file mode 100644
index 00000000..28fe7ef0
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/sched.h
@@ -0,0 +1,492 @@
+#ifndef _LINUX_SCHED_H
+#define _LINUX_SCHED_H
+
+/*
+ * define DEBUG if you want the wait-queues to have some extra
+ * debugging code. It's not normally used, but might catch some
+ * wait-queue coding errors.
+ *
+ * #define DEBUG
+ */
+
+#include <asm/param.h> /* for HZ */
+
+extern unsigned long intr_count;
+extern unsigned long event;
+
+#include <linux/binfmts.h>
+#include <linux/personality.h>
+#include <linux/tasks.h>
+#include <linux/kernel.h>
+#include <asm/system.h>
+#include <asm/page.h>
+
+#include <linux/smp.h>
+#include <linux/tty.h>
+#include <linux/sem.h>
+
+/*
+ * cloning flags:
+ */
+#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */
+#define CLONE_VM 0x00000100 /* set if VM shared between processes */
+#define CLONE_FS 0x00000200 /* set if fs info shared between processes */
+#define CLONE_FILES 0x00000400 /* set if open files shared between processes */
+#define CLONE_SIGHAND 0x00000800 /* set if signal handlers shared */
+#define CLONE_PID 0x00001000 /* set if pid shared */
+
+/*
+ * These are the constant used to fake the fixed-point load-average
+ * counting. Some notes:
+ * - 11 bit fractions expand to 22 bits by the multiplies: this gives
+ * a load-average precision of 10 bits integer + 11 bits fractional
+ * - if you want to count load-averages more often, you need more
+ * precision, or rounding will get you. With 2-second counting freq,
+ * the EXP_n values would be 1981, 2034 and 2043 if still using only
+ * 11 bit fractions.
+ */
+extern unsigned long avenrun[]; /* Load averages */
+
+#define FSHIFT 11 /* nr of bits of precision */
+#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
+#define LOAD_FREQ (5*HZ) /* 5 sec intervals */
+#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
+#define EXP_5 2014 /* 1/exp(5sec/5min) */
+#define EXP_15 2037 /* 1/exp(5sec/15min) */
+
+#define CALC_LOAD(load,exp,n) \
+ load *= exp; \
+ load += n*(FIXED_1-exp); \
+ load >>= FSHIFT;
+
+#define CT_TO_SECS(x) ((x) / HZ)
+#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ)
+
+extern int nr_running, nr_tasks;
+
+#define FIRST_TASK task[0]
+#define LAST_TASK task[NR_TASKS-1]
+
+#include <linux/head.h>
+#include <linux/fs.h>
+#include <linux/signal.h>
+#include <linux/time.h>
+#include <linux/param.h>
+#include <linux/resource.h>
+#include <linux/vm86.h>
+#include <linux/math_emu.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+
+#include <asm/processor.h>
+
+#define TASK_RUNNING 0
+#define TASK_INTERRUPTIBLE 1
+#define TASK_UNINTERRUPTIBLE 2
+#define TASK_ZOMBIE 3
+#define TASK_STOPPED 4
+#define TASK_SWAPPING 5
+
+/*
+ * Scheduling policies
+ */
+#define SCHED_OTHER 0
+#define SCHED_FIFO 1
+#define SCHED_RR 2
+
+struct sched_param {
+ int sched_priority;
+};
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#ifdef __KERNEL__
+
+#define barrier() __asm__("": : :"memory")
+
+extern void sched_init(void);
+extern void show_state(void);
+extern void trap_init(void);
+
+asmlinkage void schedule(void);
+
+struct files_struct {
+ int count;
+ fd_set close_on_exec;
+ struct file * fd[NR_OPEN];
+};
+
+#define INIT_FILES { \
+ 1, \
+ { { 0, } }, \
+ { NULL, } \
+}
+
+struct fs_struct {
+ int count;
+ unsigned short umask;
+ struct inode * root, * pwd;
+};
+
+#define INIT_FS { \
+ 1, \
+ 0022, \
+ NULL, NULL \
+}
+
+struct mm_struct {
+ int count;
+ pgd_t * pgd;
+ unsigned long context;
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long start_brk, brk, start_stack, start_mmap;
+ unsigned long arg_start, arg_end, env_start, env_end;
+ unsigned long rss, total_vm, locked_vm;
+ unsigned long def_flags;
+ struct vm_area_struct * mmap;
+ struct vm_area_struct * mmap_avl;
+};
+
+#define INIT_MM { \
+ 1, \
+ swapper_pg_dir, \
+ 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, \
+ 0, \
+ &init_mmap, &init_mmap }
+
+struct signal_struct {
+ int count;
+ struct sigaction action[32];
+};
+
+#define INIT_SIGNALS { \
+ 1, \
+ { {0,}, } }
+
+struct task_struct {
+/* these are hardcoded - don't touch */
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
+ long counter;
+ long priority;
+ unsigned long signal;
+ unsigned long blocked; /* bitmap of masked signals */
+ unsigned long flags; /* per process flags, defined below */
+ int errno;
+ long debugreg[8]; /* Hardware debugging registers */
+ struct exec_domain *exec_domain;
+/* various fields */
+ struct linux_binfmt *binfmt;
+ struct task_struct *next_task, *prev_task;
+ struct task_struct *next_run, *prev_run;
+ unsigned long saved_kernel_stack;
+ unsigned long kernel_stack_page;
+ int exit_code, exit_signal;
+ unsigned long personality;
+ int dumpable:1;
+ int did_exec:1;
+ int pid,pgrp,tty_old_pgrp,session,leader;
+ int groups[NGROUPS];
+ /*
+ * pointers to (original) parent process, youngest child, younger sibling,
+ * older sibling, respectively. (p->father can be replaced with
+ * p->p_pptr->pid)
+ */
+ struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr;
+ struct wait_queue *wait_chldexit; /* for wait4() */
+ unsigned short uid,euid,suid,fsuid;
+ unsigned short gid,egid,sgid,fsgid;
+ unsigned long timeout, policy, rt_priority;
+ unsigned long it_real_value, it_prof_value, it_virt_value;
+ unsigned long it_real_incr, it_prof_incr, it_virt_incr;
+ struct timer_list real_timer;
+ long utime, stime, cutime, cstime, start_time;
+/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
+ unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap;
+ int swappable:1;
+ unsigned long swap_address;
+ unsigned long old_maj_flt; /* old value of maj_flt */
+ unsigned long dec_flt; /* page fault count of the last time */
+ unsigned long swap_cnt; /* number of pages to swap on next pass */
+/* limits */
+ struct rlimit rlim[RLIM_NLIMITS];
+ unsigned short used_math;
+ char comm[16];
+/* file system info */
+ int link_count;
+ struct tty_struct *tty; /* NULL if no tty */
+/* ipc stuff */
+ struct sem_undo *semundo;
+ struct sem_queue *semsleeping;
+/* ldt for this task - used by Wine. If NULL, default_ldt is used */
+ struct desc_struct *ldt;
+/* tss for this task */
+ struct thread_struct tss;
+/* filesystem information */
+ struct fs_struct *fs;
+/* open file information */
+ struct files_struct *files;
+/* memory management info */
+ struct mm_struct *mm;
+/* signal handlers */
+ struct signal_struct *sig;
+#ifdef __SMP__
+ int processor;
+ int last_processor;
+ int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */
+#endif
+};
+
+/*
+ * Per process flags
+ */
+#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
+ /* Not implemented yet, only for 486*/
+#define PF_PTRACED 0x00000010 /* set if ptrace (0) has been called. */
+#define PF_TRACESYS 0x00000020 /* tracing system calls */
+
+#define PF_STARTING 0x00000100 /* being created */
+#define PF_EXITING 0x00000200 /* getting shut down */
+
+#define PF_USEDFPU 0x00100000 /* Process used the FPU this quantum (SMP only) */
+
+/*
+ * Limit the stack by to some sane default: root can always
+ * increase this limit if needed.. 8MB seems reasonable.
+ */
+#define _STK_LIM (8*1024*1024)
+
+#define DEF_PRIORITY (20*HZ/100) /* 200 ms time slices */
+
+/*
+ * INIT_TASK is used to set up the first task table, touch at
+ * your own risk!. Base=0, limit=0x1fffff (=2MB)
+ */
+#define INIT_TASK \
+/* state etc */ { 0,DEF_PRIORITY,DEF_PRIORITY,0,0,0,0, \
+/* debugregs */ { 0, }, \
+/* exec domain */&default_exec_domain, \
+/* binfmt */ NULL, \
+/* schedlink */ &init_task,&init_task, &init_task, &init_task, \
+/* stack */ 0,(unsigned long) &init_kernel_stack, \
+/* ec,brk... */ 0,0,0,0,0, \
+/* pid etc.. */ 0,0,0,0,0, \
+/* suppl grps*/ {NOGROUP,}, \
+/* proc links*/ &init_task,&init_task,NULL,NULL,NULL,NULL, \
+/* uid etc */ 0,0,0,0,0,0,0,0, \
+/* timeout */ 0,SCHED_OTHER,0,0,0,0,0,0,0, \
+/* timer */ { NULL, NULL, 0, 0, it_real_fn }, \
+/* utime */ 0,0,0,0,0, \
+/* flt */ 0,0,0,0,0,0, \
+/* swp */ 0,0,0,0,0, \
+/* rlimits */ INIT_RLIMITS, \
+/* math */ 0, \
+/* comm */ "swapper", \
+/* fs info */ 0,NULL, \
+/* ipc */ NULL, NULL, \
+/* ldt */ NULL, \
+/* tss */ INIT_TSS, \
+/* fs */ &init_fs, \
+/* files */ &init_files, \
+/* mm */ &init_mm, \
+/* signals */ &init_signals, \
+}
+
+extern struct mm_struct init_mm;
+extern struct task_struct init_task;
+extern struct task_struct *task[NR_TASKS];
+extern struct task_struct *last_task_used_math;
+extern struct task_struct *current_set[NR_CPUS];
+/*
+ * On a single processor system this comes out as current_set[0] when cpp
+ * has finished with it, which gcc will optimise away.
+ */
+#define current (0+current_set[smp_processor_id()]) /* Current on this processor */
+extern unsigned long volatile jiffies;
+extern unsigned long itimer_ticks;
+extern unsigned long itimer_next;
+extern struct timeval xtime;
+extern int need_resched;
+extern void do_timer(struct pt_regs *);
+
+extern unsigned int * prof_buffer;
+extern unsigned long prof_len;
+extern unsigned long prof_shift;
+
+extern int securelevel; /* system security level */
+
+#define CURRENT_TIME (xtime.tv_sec)
+
+extern void sleep_on(struct wait_queue ** p);
+extern void interruptible_sleep_on(struct wait_queue ** p);
+extern void wake_up(struct wait_queue ** p);
+extern void wake_up_interruptible(struct wait_queue ** p);
+extern void wake_up_process(struct task_struct * tsk);
+
+extern void notify_parent(struct task_struct * tsk);
+extern int send_sig(unsigned long sig,struct task_struct * p,int priv);
+extern int in_group_p(gid_t grp);
+
+extern int request_irq(unsigned int irq,void (*handler)(int, struct pt_regs *),
+ unsigned long flags, const char *device);
+extern void free_irq(unsigned int irq);
+
+extern void copy_thread(int, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
+extern void flush_thread(void);
+extern void exit_thread(void);
+
+extern void exit_fs(struct task_struct *);
+extern void exit_files(struct task_struct *);
+extern void exit_sighand(struct task_struct *);
+extern void release_thread(struct task_struct *);
+
+extern int do_execve(char *, char **, char **, struct pt_regs *);
+extern int do_fork(unsigned long, unsigned long, struct pt_regs *);
+
+#ifdef MACH
+extern void add_wait_queue(struct wait_queue **, struct wait_queue *);
+extern void remove_wait_queue(struct wait_queue **, struct wait_queue *);
+#else /* ! MACH */
+/*
+ * The wait-queues are circular lists, and you have to be *very* sure
+ * to keep them correct. Use only these two functions to add/remove
+ * entries in the queues.
+ */
+extern inline void add_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ unsigned long flags;
+
+#ifdef DEBUG
+ if (wait->next) {
+ __label__ here;
+ unsigned long pc;
+ pc = (unsigned long) &&here;
+ here:
+ printk("add_wait_queue (%08lx): wait->next = %08lx\n",pc,(unsigned long) wait->next);
+ }
+#endif
+ save_flags(flags);
+ cli();
+ if (!*p) {
+ wait->next = wait;
+ *p = wait;
+ } else {
+ wait->next = (*p)->next;
+ (*p)->next = wait;
+ }
+ restore_flags(flags);
+}
+
+extern inline void remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ unsigned long flags;
+ struct wait_queue * tmp;
+#ifdef DEBUG
+ unsigned long ok = 0;
+#endif
+
+ save_flags(flags);
+ cli();
+ if ((*p == wait) &&
+#ifdef DEBUG
+ (ok = 1) &&
+#endif
+ ((*p = wait->next) == wait)) {
+ *p = NULL;
+ } else {
+ tmp = wait;
+ while (tmp->next != wait) {
+ tmp = tmp->next;
+#ifdef DEBUG
+ if (tmp == *p)
+ ok = 1;
+#endif
+ }
+ tmp->next = wait->next;
+ }
+ wait->next = NULL;
+ restore_flags(flags);
+#ifdef DEBUG
+ if (!ok) {
+ __label__ here;
+ ok = (unsigned long) &&here;
+ printk("removed wait_queue not on list.\n");
+ printk("list = %08lx, queue = %08lx\n",(unsigned long) p, (unsigned long) wait);
+ here:
+ printk("eip = %08lx\n",ok);
+ }
+#endif
+}
+
+extern inline void select_wait(struct wait_queue ** wait_address, select_table * p)
+{
+ struct select_table_entry * entry;
+
+ if (!p || !wait_address)
+ return;
+ if (p->nr >= __MAX_SELECT_TABLE_ENTRIES)
+ return;
+ entry = p->entry + p->nr;
+ entry->wait_address = wait_address;
+ entry->wait.task = current;
+ entry->wait.next = NULL;
+ add_wait_queue(wait_address,&entry->wait);
+ p->nr++;
+}
+#endif /* ! MACH */
+
+extern void __down(struct semaphore * sem);
+
+/*
+ * These are not yet interrupt-safe
+ */
+extern inline void down(struct semaphore * sem)
+{
+ if (sem->count <= 0)
+ __down(sem);
+ sem->count--;
+}
+
+extern inline void up(struct semaphore * sem)
+{
+ sem->count++;
+ wake_up(&sem->wait);
+}
+
+#define REMOVE_LINKS(p) do { unsigned long flags; \
+ save_flags(flags) ; cli(); \
+ (p)->next_task->prev_task = (p)->prev_task; \
+ (p)->prev_task->next_task = (p)->next_task; \
+ restore_flags(flags); \
+ if ((p)->p_osptr) \
+ (p)->p_osptr->p_ysptr = (p)->p_ysptr; \
+ if ((p)->p_ysptr) \
+ (p)->p_ysptr->p_osptr = (p)->p_osptr; \
+ else \
+ (p)->p_pptr->p_cptr = (p)->p_osptr; \
+ } while (0)
+
+#define SET_LINKS(p) do { unsigned long flags; \
+ save_flags(flags); cli(); \
+ (p)->next_task = &init_task; \
+ (p)->prev_task = init_task.prev_task; \
+ init_task.prev_task->next_task = (p); \
+ init_task.prev_task = (p); \
+ restore_flags(flags); \
+ (p)->p_ysptr = NULL; \
+ if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \
+ (p)->p_osptr->p_ysptr = p; \
+ (p)->p_pptr->p_cptr = p; \
+ } while (0)
+
+#define for_each_task(p) \
+ for (p = &init_task ; (p = p->next_task) != &init_task ; )
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/scsi.h b/i386/i386at/gpl/linux/include/linux/scsi.h
new file mode 100644
index 00000000..a05072cf
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/scsi.h
@@ -0,0 +1,198 @@
+#ifndef _LINUX_SCSI_H
+#define _LINUX_SCSI_H
+
+/*
+ * This header file contains public constants and structures used by
+ * the scsi code for linux.
+ */
+
+/*
+ $Header: cvs/gnumach/i386/i386at/gpl/linux/include/linux/Attic/scsi.h,v 1.1.1.1 1997/02/25 21:27:31 thomas Exp $
+
+ For documentation on the OPCODES, MESSAGES, and SENSE values,
+ please consult the SCSI standard.
+
+*/
+
+/*
+ * SCSI opcodes
+ */
+
+#define TEST_UNIT_READY 0x00
+#define REZERO_UNIT 0x01
+#define REQUEST_SENSE 0x03
+#define FORMAT_UNIT 0x04
+#define READ_BLOCK_LIMITS 0x05
+#define REASSIGN_BLOCKS 0x07
+#define READ_6 0x08
+#define WRITE_6 0x0a
+#define SEEK_6 0x0b
+#define READ_REVERSE 0x0f
+#define WRITE_FILEMARKS 0x10
+#define SPACE 0x11
+#define INQUIRY 0x12
+#define RECOVER_BUFFERED_DATA 0x14
+#define MODE_SELECT 0x15
+#define RESERVE 0x16
+#define RELEASE 0x17
+#define COPY 0x18
+#define ERASE 0x19
+#define MODE_SENSE 0x1a
+#define START_STOP 0x1b
+#define RECEIVE_DIAGNOSTIC 0x1c
+#define SEND_DIAGNOSTIC 0x1d
+#define ALLOW_MEDIUM_REMOVAL 0x1e
+
+#define SET_WINDOW 0x24
+#define READ_CAPACITY 0x25
+#define READ_10 0x28
+#define WRITE_10 0x2a
+#define SEEK_10 0x2b
+#define WRITE_VERIFY 0x2e
+#define VERIFY 0x2f
+#define SEARCH_HIGH 0x30
+#define SEARCH_EQUAL 0x31
+#define SEARCH_LOW 0x32
+#define SET_LIMITS 0x33
+#define PRE_FETCH 0x34
+#define READ_POSITION 0x34
+#define SYNCHRONIZE_CACHE 0x35
+#define LOCK_UNLOCK_CACHE 0x36
+#define READ_DEFECT_DATA 0x37
+#define MEDIUM_SCAN 0x38
+#define COMPARE 0x39
+#define COPY_VERIFY 0x3a
+#define WRITE_BUFFER 0x3b
+#define READ_BUFFER 0x3c
+#define UPDATE_BLOCK 0x3d
+#define READ_LONG 0x3e
+#define WRITE_LONG 0x3f
+#define CHANGE_DEFINITION 0x40
+#define WRITE_SAME 0x41
+#define LOG_SELECT 0x4c
+#define LOG_SENSE 0x4d
+#define MODE_SELECT_10 0x55
+#define MODE_SENSE_10 0x5a
+#define READ_12 0xa8
+#define WRITE_12 0xaa
+#define WRITE_VERIFY_12 0xae
+#define SEARCH_HIGH_12 0xb0
+#define SEARCH_EQUAL_12 0xb1
+#define SEARCH_LOW_12 0xb2
+#define SEND_VOLUME_TAG 0xb6
+#define WRITE_LONG_2 0xea
+
+/*
+ * Status codes
+ */
+
+#define GOOD 0x00
+#define CHECK_CONDITION 0x01
+#define CONDITION_GOOD 0x02
+#define BUSY 0x04
+#define INTERMEDIATE_GOOD 0x08
+#define INTERMEDIATE_C_GOOD 0x0a
+#define RESERVATION_CONFLICT 0x0c
+#define QUEUE_FULL 0x1a
+
+#define STATUS_MASK 0x1e
+
+/*
+ * SENSE KEYS
+ */
+
+#define NO_SENSE 0x00
+#define RECOVERED_ERROR 0x01
+#define NOT_READY 0x02
+#define MEDIUM_ERROR 0x03
+#define HARDWARE_ERROR 0x04
+#define ILLEGAL_REQUEST 0x05
+#define UNIT_ATTENTION 0x06
+#define DATA_PROTECT 0x07
+#define BLANK_CHECK 0x08
+#define COPY_ABORTED 0x0a
+#define ABORTED_COMMAND 0x0b
+#define VOLUME_OVERFLOW 0x0d
+#define MISCOMPARE 0x0e
+
+
+/*
+ * DEVICE TYPES
+ */
+
+#define TYPE_DISK 0x00
+#define TYPE_TAPE 0x01
+#define TYPE_PROCESSOR 0x03 /* HP scanners use this */
+#define TYPE_WORM 0x04 /* Treated as ROM by our system */
+#define TYPE_ROM 0x05
+#define TYPE_SCANNER 0x06
+#define TYPE_MOD 0x07 /* Magneto-optical disk -
+ * - treated as TYPE_DISK */
+#define TYPE_NO_LUN 0x7f
+
+
+/*
+ * MESSAGE CODES
+ */
+
+#define COMMAND_COMPLETE 0x00
+#define EXTENDED_MESSAGE 0x01
+#define EXTENDED_MODIFY_DATA_POINTER 0x00
+#define EXTENDED_SDTR 0x01
+#define EXTENDED_EXTENDED_IDENTIFY 0x02 /* SCSI-I only */
+#define EXTENDED_WDTR 0x03
+#define SAVE_POINTERS 0x02
+#define RESTORE_POINTERS 0x03
+#define DISCONNECT 0x04
+#define INITIATOR_ERROR 0x05
+#define ABORT 0x06
+#define MESSAGE_REJECT 0x07
+#define NOP 0x08
+#define MSG_PARITY_ERROR 0x09
+#define LINKED_CMD_COMPLETE 0x0a
+#define LINKED_FLG_CMD_COMPLETE 0x0b
+#define BUS_DEVICE_RESET 0x0c
+
+#define INITIATE_RECOVERY 0x0f /* SCSI-II only */
+#define RELEASE_RECOVERY 0x10 /* SCSI-II only */
+
+#define SIMPLE_QUEUE_TAG 0x20
+#define HEAD_OF_QUEUE_TAG 0x21
+#define ORDERED_QUEUE_TAG 0x22
+
+/*
+ * Here are some scsi specific ioctl commands which are sometimes useful.
+ */
+/* These are a few other constants only used by scsi devices */
+
+#define SCSI_IOCTL_GET_IDLUN 0x5382
+
+/* Used to turn on and off tagged queuing for scsi devices */
+
+#define SCSI_IOCTL_TAGGED_ENABLE 0x5383
+#define SCSI_IOCTL_TAGGED_DISABLE 0x5384
+
+/* Used to obtain the host number of a device. */
+#define SCSI_IOCTL_PROBE_HOST 0x5385
+
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/scsicam.h b/i386/i386at/gpl/linux/include/linux/scsicam.h
new file mode 100644
index 00000000..954e1407
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/scsicam.h
@@ -0,0 +1,17 @@
+/*
+ * scsicam.h - SCSI CAM support functions, use for HDIO_GETGEO, etc.
+ *
+ * Copyright 1993, 1994 Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@Colorado.EDU
+ * +1 (303) 786-7975
+ *
+ * For more information, please consult the SCSI-CAM draft.
+ */
+
+#ifndef SCSICAM_H
+#define SCSICAM_H
+#include <linux/kdev_t.h>
+extern int scsicam_bios_param (Disk *disk, kdev_t dev, int *ip);
+#endif /* def SCSICAM_H */
diff --git a/i386/i386at/gpl/linux/include/linux/sem.h b/i386/i386at/gpl/linux/include/linux/sem.h
new file mode 100644
index 00000000..0eb1d024
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/sem.h
@@ -0,0 +1,112 @@
+#ifndef _LINUX_SEM_H
+#define _LINUX_SEM_H
+#include <linux/ipc.h>
+
+/* semop flags */
+#define SEM_UNDO 0x1000 /* undo the operation on exit */
+
+/* semctl Command Definitions. */
+#define GETPID 11 /* get sempid */
+#define GETVAL 12 /* get semval */
+#define GETALL 13 /* get all semval's */
+#define GETNCNT 14 /* get semncnt */
+#define GETZCNT 15 /* get semzcnt */
+#define SETVAL 16 /* set semval */
+#define SETALL 17 /* set all semval's */
+
+/* One semid data structure for each set of semaphores in the system. */
+struct semid_ds {
+ struct ipc_perm sem_perm; /* permissions .. see ipc.h */
+ time_t sem_otime; /* last semop time */
+ time_t sem_ctime; /* last change time */
+ struct sem *sem_base; /* ptr to first semaphore in array */
+ struct sem_queue *sem_pending; /* pending operations to be processed */
+ struct sem_queue **sem_pending_last; /* last pending operation */
+ struct sem_undo *undo; /* undo requests on this array */
+ ushort sem_nsems; /* no. of semaphores in array */
+};
+
+/* semop system calls takes an array of these. */
+struct sembuf {
+ ushort sem_num; /* semaphore index in array */
+ short sem_op; /* semaphore operation */
+ short sem_flg; /* operation flags */
+};
+
+/* arg for semctl system calls. */
+union semun {
+ int val; /* value for SETVAL */
+ struct semid_ds *buf; /* buffer for IPC_STAT & IPC_SET */
+ ushort *array; /* array for GETALL & SETALL */
+ struct seminfo *__buf; /* buffer for IPC_INFO */
+ void *__pad;
+};
+
+struct seminfo {
+ int semmap;
+ int semmni;
+ int semmns;
+ int semmnu;
+ int semmsl;
+ int semopm;
+ int semume;
+ int semusz;
+ int semvmx;
+ int semaem;
+};
+
+#define SEMMNI 128 /* ? max # of semaphore identifiers */
+#define SEMMSL 32 /* <= 512 max num of semaphores per id */
+#define SEMMNS (SEMMNI*SEMMSL) /* ? max # of semaphores in system */
+#define SEMOPM 32 /* ~ 100 max num of ops per semop call */
+#define SEMVMX 32767 /* semaphore maximum value */
+
+/* unused */
+#define SEMUME SEMOPM /* max num of undo entries per process */
+#define SEMMNU SEMMNS /* num of undo structures system wide */
+#define SEMAEM (SEMVMX >> 1) /* adjust on exit max value */
+#define SEMMAP SEMMNS /* # of entries in semaphore map */
+#define SEMUSZ 20 /* sizeof struct sem_undo */
+
+#ifdef __KERNEL__
+
+/* One semaphore structure for each semaphore in the system. */
+struct sem {
+ short semval; /* current value */
+ short sempid; /* pid of last operation */
+};
+
+/* ipcs ctl cmds */
+#define SEM_STAT 18
+#define SEM_INFO 19
+
+/* One queue for each semaphore set in the system. */
+struct sem_queue {
+ struct sem_queue * next; /* next entry in the queue */
+ struct sem_queue ** prev; /* previous entry in the queue, *(q->prev) == q */
+ struct wait_queue * sleeper; /* sleeping process */
+ struct sem_undo * undo; /* undo structure */
+ int pid; /* process id of requesting process */
+ int status; /* completion status of operation */
+ struct semid_ds * sma; /* semaphore array for operations */
+ struct sembuf * sops; /* array of pending operations */
+ int nsops; /* number of operations */
+};
+
+/* Each task has a list of undo requests. They are executed automatically
+ * when the process exits.
+ */
+struct sem_undo {
+ struct sem_undo * proc_next; /* next entry on this process */
+ struct sem_undo * id_next; /* next entry on this semaphore set */
+ int semid; /* semaphore set identifier */
+ short * semadj; /* array of adjustments, one per semaphore */
+};
+
+asmlinkage int sys_semget (key_t key, int nsems, int semflg);
+asmlinkage int sys_semop (int semid, struct sembuf *sops, unsigned nsops);
+asmlinkage int sys_semctl (int semid, int semnum, int cmd, union semun arg);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_SEM_H */
diff --git a/i386/i386at/gpl/linux/include/linux/signal.h b/i386/i386at/gpl/linux/include/linux/signal.h
new file mode 100644
index 00000000..9d1afa91
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/signal.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_SIGNAL_H
+#define _LINUX_SIGNAL_H
+
+#include <asm/signal.h>
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/skbuff.h b/i386/i386at/gpl/linux/include/linux/skbuff.h
new file mode 100644
index 00000000..65418168
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/skbuff.h
@@ -0,0 +1,474 @@
+/*
+ * Definitions for the 'struct sk_buff' memory handlers.
+ *
+ * Authors:
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_SKBUFF_H
+#define _LINUX_SKBUFF_H
+#include <linux/malloc.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/config.h>
+
+#define CONFIG_SKB_CHECK 0
+
+#define HAVE_ALLOC_SKB /* For the drivers to know */
+#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
+
+
+#define FREE_READ 1
+#define FREE_WRITE 0
+
+#define CHECKSUM_NONE 0
+#define CHECKSUM_HW 1
+#define CHECKSUM_UNNECESSARY 2
+
+struct sk_buff_head
+{
+ struct sk_buff * volatile next;
+ struct sk_buff * volatile prev;
+ __u32 qlen; /* Must be same length as a pointer
+ for using debugging */
+#if CONFIG_SKB_CHECK
+ int magic_debug_cookie;
+#endif
+};
+
+
+struct sk_buff
+{
+ struct sk_buff * volatile next; /* Next buffer in list */
+ struct sk_buff * volatile prev; /* Previous buffer in list */
+ struct sk_buff_head * list; /* List we are on */
+#if CONFIG_SKB_CHECK
+ int magic_debug_cookie;
+#endif
+ struct sk_buff * volatile link3; /* Link for IP protocol level buffer chains */
+ struct sock *sk; /* Socket we are owned by */
+ unsigned long when; /* used to compute rtt's */
+ struct timeval stamp; /* Time we arrived */
+ struct linux_device *dev; /* Device we arrived on/are leaving by */
+ union
+ {
+ struct tcphdr *th;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct udphdr *uh;
+ unsigned char *raw;
+ /* for passing an fd in a unix domain socket */
+ struct file *filp;
+ } h;
+
+ union
+ {
+ /* As yet incomplete physical layer views */
+ unsigned char *raw;
+ struct ethhdr *ethernet;
+ } mac;
+
+ struct iphdr *ip_hdr; /* For IPPROTO_RAW */
+ unsigned long len; /* Length of actual data */
+ unsigned long csum; /* Checksum */
+ __u32 saddr; /* IP source address */
+ __u32 daddr; /* IP target address */
+ __u32 raddr; /* IP next hop address */
+ __u32 seq; /* TCP sequence number */
+ __u32 end_seq; /* seq [+ fin] [+ syn] + datalen */
+ __u32 ack_seq; /* TCP ack sequence number */
+ unsigned char proto_priv[16]; /* Protocol private data */
+ volatile char acked, /* Are we acked ? */
+ used, /* Are we in use ? */
+ free, /* How to free this buffer */
+ arp; /* Has IP/ARP resolution finished */
+ unsigned char tries, /* Times tried */
+ lock, /* Are we locked ? */
+ localroute, /* Local routing asserted for this frame */
+ pkt_type, /* Packet class */
+ ip_summed; /* Driver fed us an IP checksum */
+#define PACKET_HOST 0 /* To us */
+#define PACKET_BROADCAST 1 /* To all */
+#define PACKET_MULTICAST 2 /* To group */
+#define PACKET_OTHERHOST 3 /* To someone else */
+ unsigned short users; /* User count - see datagram.c,tcp.c */
+ unsigned short protocol; /* Packet protocol from driver. */
+ unsigned short truesize; /* Buffer size */
+
+ int count; /* reference count */
+ struct sk_buff *data_skb; /* Link to the actual data skb */
+ unsigned char *head; /* Head of buffer */
+ unsigned char *data; /* Data head pointer */
+ unsigned char *tail; /* Tail pointer */
+ unsigned char *end; /* End pointer */
+ void (*destructor)(struct sk_buff *this); /* Destruct function */
+#ifdef MACH
+#ifdef MACH_INCLUDE
+ ipc_port_t reply;
+ mach_msg_type_name_t reply_type;
+ vm_map_copy_t copy;
+#else
+ void *reply;
+ unsigned reply_type;
+ void *copy;
+#endif
+#endif
+};
+
+#ifdef CONFIG_SKB_LARGE
+#define SK_WMEM_MAX 65535
+#define SK_RMEM_MAX 65535
+#else
+#define SK_WMEM_MAX 32767
+#define SK_RMEM_MAX 32767
+#endif
+
+#if CONFIG_SKB_CHECK
+#define SK_FREED_SKB 0x0DE2C0DE
+#define SK_GOOD_SKB 0xDEC0DED1
+#define SK_HEAD_SKB 0x12231298
+#endif
+
+#ifdef __KERNEL__
+/*
+ * Handling routines are only of interest to the kernel
+ */
+
+#include <asm/system.h>
+
+#if 0
+extern void print_skb(struct sk_buff *);
+#endif
+extern void kfree_skb(struct sk_buff *skb, int rw);
+extern void skb_queue_head_init(struct sk_buff_head *list);
+extern void skb_queue_head(struct sk_buff_head *list,struct sk_buff *buf);
+extern void skb_queue_tail(struct sk_buff_head *list,struct sk_buff *buf);
+extern struct sk_buff * skb_dequeue(struct sk_buff_head *list);
+extern void skb_insert(struct sk_buff *old,struct sk_buff *newsk);
+extern void skb_append(struct sk_buff *old,struct sk_buff *newsk);
+extern void skb_unlink(struct sk_buff *buf);
+extern __u32 skb_queue_len(struct sk_buff_head *list);
+extern struct sk_buff * skb_peek_copy(struct sk_buff_head *list);
+extern struct sk_buff * alloc_skb(unsigned int size, int priority);
+extern struct sk_buff * dev_alloc_skb(unsigned int size);
+extern void kfree_skbmem(struct sk_buff *skb);
+extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
+extern struct sk_buff * skb_copy(struct sk_buff *skb, int priority);
+extern void skb_device_lock(struct sk_buff *skb);
+extern void skb_device_unlock(struct sk_buff *skb);
+extern void dev_kfree_skb(struct sk_buff *skb, int mode);
+extern int skb_device_locked(struct sk_buff *skb);
+#ifdef MACH
+#define skb_put(skb, len) ((skb)->data)
+#else
+extern unsigned char * skb_put(struct sk_buff *skb, int len);
+#endif
+extern unsigned char * skb_push(struct sk_buff *skb, int len);
+extern unsigned char * skb_pull(struct sk_buff *skb, int len);
+extern int skb_headroom(struct sk_buff *skb);
+extern int skb_tailroom(struct sk_buff *skb);
+#ifdef MACH
+#define skb_reserve(skb, len)
+#else
+extern void skb_reserve(struct sk_buff *skb, int len);
+#endif
+extern void skb_trim(struct sk_buff *skb, int len);
+
+/*
+ * Peek an sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. For an interrupt
+ * type system cli() peek the buffer copy the data and sti();
+ */
+extern __inline__ struct sk_buff *skb_peek(struct sk_buff_head *list_)
+{
+ struct sk_buff *list = ((struct sk_buff *)list_)->next;
+ if (list == (struct sk_buff *)list_)
+ list = NULL;
+ return list;
+}
+
+/*
+ * Return the length of an sk_buff queue
+ */
+
+extern __inline__ __u32 skb_queue_len(struct sk_buff_head *list_)
+{
+ return(list_->qlen);
+}
+
+#if CONFIG_SKB_CHECK
+extern int skb_check(struct sk_buff *skb,int,int, char *);
+#define IS_SKB(skb) skb_check((skb), 0, __LINE__,__FILE__)
+#define IS_SKB_HEAD(skb) skb_check((skb), 1, __LINE__,__FILE__)
+#else
+#define IS_SKB(skb)
+#define IS_SKB_HEAD(skb)
+
+extern __inline__ void skb_queue_head_init(struct sk_buff_head *list)
+{
+ list->prev = (struct sk_buff *)list;
+ list->next = (struct sk_buff *)list;
+ list->qlen = 0;
+}
+
+/*
+ * Insert an sk_buff at the start of a list.
+ *
+ * The "__skb_xxxx()" functions are the non-atomic ones that
+ * can only be called with interrupts disabled.
+ */
+
+extern __inline__ void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ prev = (struct sk_buff *)list;
+ next = prev->next;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+extern __inline__ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_queue_head(list, newsk);
+ restore_flags(flags);
+}
+
+/*
+ * Insert an sk_buff at the end of a list.
+ */
+
+extern __inline__ void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ next = (struct sk_buff *)list;
+ prev = next->prev;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+extern __inline__ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_queue_tail(list, newsk);
+ restore_flags(flags);
+}
+
+/*
+ * Remove an sk_buff from a list.
+ */
+
+extern __inline__ struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
+{
+ struct sk_buff *next, *prev, *result;
+
+ prev = (struct sk_buff *) list;
+ next = prev->next;
+ result = NULL;
+ if (next != prev) {
+ result = next;
+ next = next->next;
+ list->qlen--;
+ next->prev = prev;
+ prev->next = next;
+ result->next = NULL;
+ result->prev = NULL;
+ result->list = NULL;
+ }
+ return result;
+}
+
+extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
+{
+ long flags;
+ struct sk_buff *result;
+
+ save_flags(flags);
+ cli();
+ result = __skb_dequeue(list);
+ restore_flags(flags);
+ return result;
+}
+
+/*
+ * Insert a packet before another one in a list.
+ */
+
+extern __inline__ void __skb_insert(struct sk_buff *next, struct sk_buff *newsk)
+{
+ struct sk_buff * prev = next->prev;
+
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+ newsk->list = next->list;
+ newsk->list->qlen++;
+}
+
+extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_insert(old, newsk);
+ restore_flags(flags);
+}
+
+/*
+ * Place a packet after a given packet in a list.
+ */
+
+extern __inline__ void __skb_append(struct sk_buff *prev, struct sk_buff *newsk)
+{
+ struct sk_buff * next = prev->next;
+
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+ newsk->list = prev->list;
+ newsk->list->qlen++;
+}
+
+extern __inline__ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_append(old, newsk);
+ restore_flags(flags);
+}
+
+/*
+ * remove sk_buff from list. _Must_ be called atomically, and with
+ * the list known..
+ */
+extern __inline__ void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
+{
+ struct sk_buff * next, * prev;
+
+ list->qlen--;
+ next = skb->next;
+ prev = skb->prev;
+ skb->next = NULL;
+ skb->prev = NULL;
+ skb->list = NULL;
+ next->prev = prev;
+ prev->next = next;
+}
+
+/*
+ * Remove an sk_buff from its list. Works even without knowing the list it
+ * is sitting on, which can be handy at times. It also means that THE LIST
+ * MUST EXIST when you unlink. Thus a list must have its contents unlinked
+ * _FIRST_.
+ */
+
+extern __inline__ void skb_unlink(struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ if(skb->list)
+ __skb_unlink(skb, skb->list);
+ restore_flags(flags);
+}
+
+#ifndef MACH
+/*
+ * Add data to an sk_buff
+ */
+
+extern __inline__ unsigned char *skb_put(struct sk_buff *skb, int len)
+{
+ unsigned char *tmp=skb->tail;
+ skb->tail+=len;
+ skb->len+=len;
+ if(skb->tail>skb->end)
+ panic("skput:over: %p:%d", __builtin_return_address(0),len);
+ return tmp;
+}
+#endif
+
+extern __inline__ unsigned char *skb_push(struct sk_buff *skb, int len)
+{
+ skb->data-=len;
+ skb->len+=len;
+ if(skb->data<skb->head)
+ panic("skpush:under: %p:%d", __builtin_return_address(0),len);
+ return skb->data;
+}
+
+extern __inline__ unsigned char * skb_pull(struct sk_buff *skb, int len)
+{
+ if(len > skb->len)
+ return NULL;
+ skb->data+=len;
+ skb->len-=len;
+ return skb->data;
+}
+
+extern __inline__ int skb_headroom(struct sk_buff *skb)
+{
+ return skb->data-skb->head;
+}
+
+extern __inline__ int skb_tailroom(struct sk_buff *skb)
+{
+ return skb->end-skb->tail;
+}
+
+#ifndef MACH
+extern __inline__ void skb_reserve(struct sk_buff *skb, int len)
+{
+ skb->data+=len;
+ skb->tail+=len;
+}
+#endif
+
+extern __inline__ void skb_trim(struct sk_buff *skb, int len)
+{
+ if(skb->len>len)
+ {
+ skb->len=len;
+ skb->tail=skb->data+len;
+ }
+}
+
+#endif
+
+extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
+extern int datagram_select(struct sock *sk, int sel_type, select_table *wait);
+extern void skb_copy_datagram(struct sk_buff *from, int offset, char *to,int size);
+extern void skb_copy_datagram_iovec(struct sk_buff *from, int offset, struct iovec *to,int size);
+extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb);
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SKBUFF_H */
diff --git a/i386/i386at/gpl/linux/include/linux/smp.h b/i386/i386at/gpl/linux/include/linux/smp.h
new file mode 100644
index 00000000..72984f15
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/smp.h
@@ -0,0 +1,54 @@
+#ifndef __LINUX_SMP_H
+#define __LINUX_SMP_H
+
+/*
+ * Generic SMP support
+ * Alan Cox. <alan@cymru.net>
+ */
+
+#ifdef __SMP__
+#include <asm/smp.h>
+
+extern void smp_message_pass(int target, int msg, unsigned long data, int wait);
+extern void smp_boot_cpus(void); /* Boot processor call to load the other CPU's */
+extern void smp_callin(void); /* Processor call in. Must hold processors until .. */
+extern void smp_commence(void); /* Multiprocessors may now schedule */
+extern int smp_num_cpus;
+extern int smp_threads_ready; /* True once the per process idle is forked */
+#ifdef __SMP_PROF__
+extern volatile unsigned long smp_spins[NR_CPUS]; /* count of interrupt spins */
+extern volatile unsigned long smp_spins_sys_idle[]; /* count of idle spins */
+extern volatile unsigned long smp_spins_syscall[]; /* count of syscall spins */
+extern volatile unsigned long smp_spins_syscall_cur[]; /* count of syscall spins for the current
+ call */
+extern volatile unsigned long smp_idle_count[1+NR_CPUS];/* count idle ticks */
+extern volatile unsigned long smp_idle_map; /* map with idle cpus */
+#else
+extern volatile unsigned long smp_spins;
+#endif
+
+
+extern volatile unsigned long smp_msg_data;
+extern volatile int smp_src_cpu;
+extern volatile int smp_msg_id;
+
+#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
+#define MSG_ALL 0x8001
+
+#define MSG_INVALIDATE_TLB 0x0001 /* Remote processor TLB invalidate */
+#define MSG_STOP_CPU 0x0002 /* Sent to shut down slave CPU's when rebooting */
+#define MSG_RESCHEDULE 0x0003 /* Reschedule request from master CPU */
+
+#else
+
+/*
+ * These macros fold the SMP functionality into a single CPU system
+ */
+
+#define smp_num_cpus 1
+#define smp_processor_id() 0
+#define smp_message_pass(t,m,d,w)
+#define smp_threads_ready 1
+#define kernel_lock()
+#endif
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/socket.h b/i386/i386at/gpl/linux/include/linux/socket.h
new file mode 100644
index 00000000..bf2991ad
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/socket.h
@@ -0,0 +1,126 @@
+#ifndef _LINUX_SOCKET_H
+#define _LINUX_SOCKET_H
+
+#include <asm/socket.h> /* arch-dependent defines */
+#include <linux/sockios.h> /* the SIOCxxx I/O controls */
+#include <linux/uio.h> /* iovec support */
+
+struct sockaddr {
+ unsigned short sa_family; /* address family, AF_xxx */
+ char sa_data[14]; /* 14 bytes of protocol address */
+};
+
+struct linger {
+ int l_onoff; /* Linger active */
+ int l_linger; /* How long to linger for */
+};
+
+struct msghdr
+{
+ void * msg_name; /* Socket name */
+ int msg_namelen; /* Length of name */
+ struct iovec * msg_iov; /* Data blocks */
+ int msg_iovlen; /* Number of blocks */
+ void * msg_accrights; /* Per protocol magic (eg BSD file descriptor passing) */
+ int msg_accrightslen; /* Length of rights list */
+};
+
+/* Socket types. */
+#define SOCK_STREAM 1 /* stream (connection) socket */
+#define SOCK_DGRAM 2 /* datagram (conn.less) socket */
+#define SOCK_RAW 3 /* raw socket */
+#define SOCK_RDM 4 /* reliably-delivered message */
+#define SOCK_SEQPACKET 5 /* sequential packet socket */
+#define SOCK_PACKET 10 /* linux specific way of */
+ /* getting packets at the dev */
+ /* level. For writing rarp and */
+ /* other similar things on the */
+ /* user level. */
+
+/* Supported address families. */
+#define AF_UNSPEC 0
+#define AF_UNIX 1 /* Unix domain sockets */
+#define AF_INET 2 /* Internet IP Protocol */
+#define AF_AX25 3 /* Amateur Radio AX.25 */
+#define AF_IPX 4 /* Novell IPX */
+#define AF_APPLETALK 5 /* Appletalk DDP */
+#define AF_NETROM 6 /* Amateur radio NetROM */
+#define AF_BRIDGE 7 /* Multiprotocol bridge */
+#define AF_AAL5 8 /* Reserved for Werner's ATM */
+#define AF_X25 9 /* Reserved for X.25 project */
+#define AF_INET6 10 /* IP version 6 */
+#define AF_MAX 12 /* For now.. */
+
+/* Protocol families, same as address families. */
+#define PF_UNSPEC AF_UNSPEC
+#define PF_UNIX AF_UNIX
+#define PF_INET AF_INET
+#define PF_AX25 AF_AX25
+#define PF_IPX AF_IPX
+#define PF_APPLETALK AF_APPLETALK
+#define PF_NETROM AF_NETROM
+#define PF_BRIDGE AF_BRIDGE
+#define PF_AAL5 AF_AAL5
+#define PF_X25 AF_X25
+#define PF_INET6 AF_INET6
+
+#define PF_MAX AF_MAX
+
+/* Maximum queue length specificable by listen. */
+#define SOMAXCONN 128
+
+/* Flags we can use with send/ and recv. */
+#define MSG_OOB 1
+#define MSG_PEEK 2
+#define MSG_DONTROUTE 4
+
+/* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */
+#define SOL_IP 0
+#define SOL_IPX 256
+#define SOL_AX25 257
+#define SOL_ATALK 258
+#define SOL_NETROM 259
+#define SOL_TCP 6
+#define SOL_UDP 17
+
+/* IP options */
+#define IP_TOS 1
+#define IPTOS_LOWDELAY 0x10
+#define IPTOS_THROUGHPUT 0x08
+#define IPTOS_RELIABILITY 0x04
+#define IP_TTL 2
+#define IP_HDRINCL 3
+#define IP_OPTIONS 4
+
+#define IP_MULTICAST_IF 32
+#define IP_MULTICAST_TTL 33
+#define IP_MULTICAST_LOOP 34
+#define IP_ADD_MEMBERSHIP 35
+#define IP_DROP_MEMBERSHIP 36
+
+
+/* These need to appear somewhere around here */
+#define IP_DEFAULT_MULTICAST_TTL 1
+#define IP_DEFAULT_MULTICAST_LOOP 1
+#define IP_MAX_MEMBERSHIPS 20
+
+/* IPX options */
+#define IPX_TYPE 1
+
+/* TCP options - this way around because someone left a set in the c library includes */
+#define TCP_NODELAY 1
+#define TCP_MAXSEG 2
+
+/* The various priorities. */
+#define SOPRI_INTERACTIVE 0
+#define SOPRI_NORMAL 1
+#define SOPRI_BACKGROUND 2
+
+#ifdef __KERNEL__
+extern void memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
+extern int verify_iovec(struct msghdr *m, struct iovec *iov, char *address, int mode);
+extern void memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
+extern int move_addr_to_user(void *kaddr, int klen, void *uaddr, int *ulen);
+extern int move_addr_to_kernel(void *uaddr, int ulen, void *kaddr);
+#endif
+#endif /* _LINUX_SOCKET_H */
diff --git a/i386/i386at/gpl/linux/include/linux/sockios.h b/i386/i386at/gpl/linux/include/linux/sockios.h
new file mode 100644
index 00000000..ee20a0b1
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/sockios.h
@@ -0,0 +1,91 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions of the socket-level I/O control calls.
+ *
+ * Version: @(#)sockios.h 1.0.2 03/09/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_SOCKIOS_H
+#define _LINUX_SOCKIOS_H
+
+/* Routing table calls. */
+#define SIOCADDRT 0x890B /* add routing table entry */
+#define SIOCDELRT 0x890C /* delete routing table entry */
+
+/* Socket configuration controls. */
+#define SIOCGIFNAME 0x8910 /* get iface name */
+#define SIOCSIFLINK 0x8911 /* set iface channel */
+#define SIOCGIFCONF 0x8912 /* get iface list */
+#define SIOCGIFFLAGS 0x8913 /* get flags */
+#define SIOCSIFFLAGS 0x8914 /* set flags */
+#define SIOCGIFADDR 0x8915 /* get PA address */
+#define SIOCSIFADDR 0x8916 /* set PA address */
+#define SIOCGIFDSTADDR 0x8917 /* get remote PA address */
+#define SIOCSIFDSTADDR 0x8918 /* set remote PA address */
+#define SIOCGIFBRDADDR 0x8919 /* get broadcast PA address */
+#define SIOCSIFBRDADDR 0x891a /* set broadcast PA address */
+#define SIOCGIFNETMASK 0x891b /* get network PA mask */
+#define SIOCSIFNETMASK 0x891c /* set network PA mask */
+#define SIOCGIFMETRIC 0x891d /* get metric */
+#define SIOCSIFMETRIC 0x891e /* set metric */
+#define SIOCGIFMEM 0x891f /* get memory address (BSD) */
+#define SIOCSIFMEM 0x8920 /* set memory address (BSD) */
+#define SIOCGIFMTU 0x8921 /* get MTU size */
+#define SIOCSIFMTU 0x8922 /* set MTU size */
+#define SIOCSIFHWADDR 0x8924 /* set hardware address (NI) */
+#define SIOCGIFENCAP 0x8925 /* get/set slip encapsulation */
+#define SIOCSIFENCAP 0x8926
+#define SIOCGIFHWADDR 0x8927 /* Get hardware address */
+#define SIOCGIFSLAVE 0x8929 /* Driver slaving support */
+#define SIOCSIFSLAVE 0x8930
+/* begin multicast support change */
+#define SIOCADDMULTI 0x8931
+#define SIOCDELMULTI 0x8932
+/* end multicast support change */
+
+/* ARP cache control calls. */
+#define OLD_SIOCDARP 0x8950 /* old delete ARP table entry */
+#define OLD_SIOCGARP 0x8951 /* old get ARP table entry */
+#define OLD_SIOCSARP 0x8952 /* old set ARP table entry */
+#define SIOCDARP 0x8953 /* delete ARP table entry */
+#define SIOCGARP 0x8954 /* get ARP table entry */
+#define SIOCSARP 0x8955 /* set ARP table entry */
+
+/* RARP cache control calls. */
+#define SIOCDRARP 0x8960 /* delete RARP table entry */
+#define SIOCGRARP 0x8961 /* get RARP table entry */
+#define SIOCSRARP 0x8962 /* set RARP table entry */
+
+/* Driver configuration calls */
+
+#define SIOCGIFMAP 0x8970 /* Get device parameters */
+#define SIOCSIFMAP 0x8971 /* Set device parameters */
+
+
+/* Device private ioctl calls */
+
+/*
+ * These 16 ioctls are available to devices via the do_ioctl() device
+ * vector. Each device should include this file and redefine these names
+ * as their own. Because these are device dependent it is a good idea
+ * _NOT_ to issue them to random objects and hope.
+ */
+
+#define SIOCDEVPRIVATE 0x89F0 /* to 89FF */
+
+/*
+ * These 16 ioctl calls are protocol private
+ */
+
+#define SIOCPROTOPRIVATE 0x89E0 /* to 89EF */
+#endif /* _LINUX_SOCKIOS_H */
diff --git a/i386/i386at/gpl/linux/include/linux/stat.h b/i386/i386at/gpl/linux/include/linux/stat.h
new file mode 100644
index 00000000..d86b1646
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/stat.h
@@ -0,0 +1,53 @@
+#ifndef _LINUX_STAT_H
+#define _LINUX_STAT_H
+
+#ifdef __KERNEL__
+
+#include <asm/stat.h>
+
+#endif
+
+#define S_IFMT 00170000
+#define S_IFSOCK 0140000
+#define S_IFLNK 0120000
+#define S_IFREG 0100000
+#define S_IFBLK 0060000
+#define S_IFDIR 0040000
+#define S_IFCHR 0020000
+#define S_IFIFO 0010000
+#define S_ISUID 0004000
+#define S_ISGID 0002000
+#define S_ISVTX 0001000
+
+#define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK)
+#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
+#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
+#define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR)
+#define S_ISBLK(m) (((m) & S_IFMT) == S_IFBLK)
+#define S_ISFIFO(m) (((m) & S_IFMT) == S_IFIFO)
+#define S_ISSOCK(m) (((m) & S_IFMT) == S_IFSOCK)
+
+#define S_IRWXU 00700
+#define S_IRUSR 00400
+#define S_IWUSR 00200
+#define S_IXUSR 00100
+
+#define S_IRWXG 00070
+#define S_IRGRP 00040
+#define S_IWGRP 00020
+#define S_IXGRP 00010
+
+#define S_IRWXO 00007
+#define S_IROTH 00004
+#define S_IWOTH 00002
+#define S_IXOTH 00001
+
+#ifdef __KERNEL__
+#define S_IRWXUGO (S_IRWXU|S_IRWXG|S_IRWXO)
+#define S_IALLUGO (S_ISUID|S_ISGID|S_ISVTX|S_IRWXUGO)
+#define S_IRUGO (S_IRUSR|S_IRGRP|S_IROTH)
+#define S_IWUGO (S_IWUSR|S_IWGRP|S_IWOTH)
+#define S_IXUGO (S_IXUSR|S_IXGRP|S_IXOTH)
+#endif
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/stddef.h b/i386/i386at/gpl/linux/include/linux/stddef.h
new file mode 100644
index 00000000..c6221e71
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/stddef.h
@@ -0,0 +1,15 @@
+#ifndef _LINUX_STDDEF_H
+#define _LINUX_STDDEF_H
+
+#ifndef _SIZE_T
+#define _SIZE_T
+typedef unsigned int size_t;
+#endif
+
+#undef NULL
+#define NULL ((void *)0)
+
+#undef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/string.h b/i386/i386at/gpl/linux/include/linux/string.h
new file mode 100644
index 00000000..e9162b38
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/string.h
@@ -0,0 +1,44 @@
+#ifndef _LINUX_STRING_H_
+#define _LINUX_STRING_H_
+
+#include <linux/types.h> /* for size_t */
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern char * ___strtok;
+extern char * strcpy(char *,const char *);
+extern char * strncpy(char *,const char *,size_t);
+extern char * strcat(char *, const char *);
+extern char * strncat(char *, const char *, size_t);
+extern char * strchr(const char *,int);
+extern char * strpbrk(const char *,const char *);
+extern char * strtok(char *,const char *);
+extern char * strstr(const char *,const char *);
+extern size_t strlen(const char *);
+extern size_t strnlen(const char *,size_t);
+extern size_t strspn(const char *,const char *);
+extern int strcmp(const char *,const char *);
+extern int strncmp(const char *,const char *,size_t);
+
+extern void * memset(void *,int,size_t);
+extern void * memcpy(void *,const void *,size_t);
+extern void * memmove(void *,const void *,size_t);
+extern void * memscan(void *,int,size_t);
+extern int memcmp(const void *,const void *,size_t);
+
+/*
+ * Include machine specific inline routines
+ */
+#include <asm/string.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _LINUX_STRING_H_ */
diff --git a/i386/i386at/gpl/linux/include/linux/tasks.h b/i386/i386at/gpl/linux/include/linux/tasks.h
new file mode 100644
index 00000000..4540e34f
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/tasks.h
@@ -0,0 +1,19 @@
+#ifndef _LINUX_TASKS_H
+#define _LINUX_TASKS_H
+
+/*
+ * This is the maximum nr of tasks - change it if you need to
+ */
+
+#ifdef __SMP__
+#define NR_CPUS 32 /* Max processors that can be running in SMP */
+#else
+#define NR_CPUS 1
+#endif
+
+#define NR_TASKS 512
+
+#define MAX_TASKS_PER_USER (NR_TASKS/2)
+#define MIN_TASKS_LEFT_FOR_ROOT 4
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/tcp.h b/i386/i386at/gpl/linux/include/linux/tcp.h
new file mode 100644
index 00000000..ae6a063e
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/tcp.h
@@ -0,0 +1,71 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the TCP protocol.
+ *
+ * Version: @(#)tcp.h 1.0.2 04/28/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_TCP_H
+#define _LINUX_TCP_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+struct tcphdr {
+ __u16 source;
+ __u16 dest;
+ __u32 seq;
+ __u32 ack_seq;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u16 res1:4,
+ doff:4,
+ fin:1,
+ syn:1,
+ rst:1,
+ psh:1,
+ ack:1,
+ urg:1,
+ res2:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u16 doff:4,
+ res1:4,
+ res2:2,
+ urg:1,
+ ack:1,
+ psh:1,
+ rst:1,
+ syn:1,
+ fin:1;
+#else
+#error "Adjust your <asm/byteorder.h> defines"
+#endif
+ __u16 window;
+ __u16 check;
+ __u16 urg_ptr;
+};
+
+
+enum {
+ TCP_ESTABLISHED = 1,
+ TCP_SYN_SENT,
+ TCP_SYN_RECV,
+ TCP_FIN_WAIT1,
+ TCP_FIN_WAIT2,
+ TCP_TIME_WAIT,
+ TCP_CLOSE,
+ TCP_CLOSE_WAIT,
+ TCP_LAST_ACK,
+ TCP_LISTEN,
+ TCP_CLOSING /* now a valid state */
+};
+
+#endif /* _LINUX_TCP_H */
diff --git a/i386/i386at/gpl/linux/include/linux/termios.h b/i386/i386at/gpl/linux/include/linux/termios.h
new file mode 100644
index 00000000..47866288
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/termios.h
@@ -0,0 +1,7 @@
+#ifndef _LINUX_TERMIOS_H
+#define _LINUX_TERMIOS_H
+
+#include <linux/types.h>
+#include <asm/termios.h>
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/time.h b/i386/i386at/gpl/linux/include/linux/time.h
new file mode 100644
index 00000000..269e9dc6
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/time.h
@@ -0,0 +1,50 @@
+#ifndef _LINUX_TIME_H
+#define _LINUX_TIME_H
+
+struct timespec {
+ long tv_sec; /* seconds */
+ long tv_nsec; /* nanoseconds */
+};
+
+struct timeval {
+ int tv_sec; /* seconds */
+ int tv_usec; /* microseconds */
+};
+
+struct timezone {
+ int tz_minuteswest; /* minutes west of Greenwich */
+ int tz_dsttime; /* type of dst correction */
+};
+
+#define NFDBITS __NFDBITS
+
+#ifdef __KERNEL__
+void do_gettimeofday(struct timeval *tv);
+void do_settimeofday(struct timeval *tv);
+#endif
+
+#define FD_SETSIZE __FD_SETSIZE
+#define FD_SET(fd,fdsetp) __FD_SET(fd,fdsetp)
+#define FD_CLR(fd,fdsetp) __FD_CLR(fd,fdsetp)
+#define FD_ISSET(fd,fdsetp) __FD_ISSET(fd,fdsetp)
+#define FD_ZERO(fdsetp) __FD_ZERO(fdsetp)
+
+/*
+ * Names of the interval timers, and structure
+ * defining a timer setting.
+ */
+#define ITIMER_REAL 0
+#define ITIMER_VIRTUAL 1
+#define ITIMER_PROF 2
+
+struct itimerspec {
+ struct timespec it_interval; /* timer period */
+ struct timespec it_value; /* timer expiration */
+};
+
+struct itimerval {
+ struct timeval it_interval; /* timer interval */
+ struct timeval it_value; /* current value */
+};
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/timer.h b/i386/i386at/gpl/linux/include/linux/timer.h
new file mode 100644
index 00000000..c54e8c5e
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/timer.h
@@ -0,0 +1,101 @@
+#ifndef _LINUX_TIMER_H
+#define _LINUX_TIMER_H
+
+/*
+ * DON'T CHANGE THESE!! Most of them are hardcoded into some assembly language
+ * as well as being defined here.
+ */
+
+/*
+ * The timers are:
+ *
+ * BLANK_TIMER console screen-saver timer
+ *
+ * BEEP_TIMER console beep timer
+ *
+ * RS_TIMER timer for the RS-232 ports
+ *
+ * SWAP_TIMER timer for the background pageout daemon
+ *
+ * HD_TIMER harddisk timer
+ *
+ * HD_TIMER2 (atdisk2 patches)
+ *
+ * FLOPPY_TIMER floppy disk timer (not used right now)
+ *
+ * SCSI_TIMER scsi.c timeout timer
+ *
+ * NET_TIMER tcp/ip timeout timer
+ *
+ * COPRO_TIMER 387 timeout for buggy hardware..
+ *
+ * QIC02_TAPE_TIMER timer for QIC-02 tape driver (it's not hardcoded)
+ *
+ * MCD_TIMER Mitsumi CD-ROM Timer
+ *
+ * GSCD_TIMER Goldstar CD-ROM Timer
+ *
+ * OPTCD_TIMER Optics Storage CD-ROM Timer
+ *
+ */
+
+#define BLANK_TIMER 0
+#define BEEP_TIMER 1
+#define RS_TIMER 2
+#define SWAP_TIMER 3
+
+#define HD_TIMER 16
+#define FLOPPY_TIMER 17
+#define SCSI_TIMER 18
+#define NET_TIMER 19
+#define SOUND_TIMER 20
+#define COPRO_TIMER 21
+
+#define QIC02_TAPE_TIMER 22 /* hhb */
+#define MCD_TIMER 23
+
+#define HD_TIMER2 24
+#define GSCD_TIMER 25
+#define OPTCD_TIMER 26
+
+struct timer_struct {
+ unsigned long expires;
+ void (*fn)(void);
+};
+
+extern unsigned long timer_active;
+extern struct timer_struct timer_table[32];
+
+/*
+ * This is completely separate from the above, and is the
+ * "new and improved" way of handling timers more dynamically.
+ * Hopefully efficient and general enough for most things.
+ *
+ * The "hardcoded" timers above are still useful for well-
+ * defined problems, but the timer-list is probably better
+ * when you need multiple outstanding timers or similar.
+ *
+ * The "data" field is in case you want to use the same
+ * timeout function for several timeouts. You can use this
+ * to distinguish between the different invocations.
+ */
+struct timer_list {
+ struct timer_list *next;
+ struct timer_list *prev;
+ unsigned long expires;
+ unsigned long data;
+ void (*function)(unsigned long);
+};
+
+extern void add_timer(struct timer_list * timer);
+extern int del_timer(struct timer_list * timer);
+
+extern void it_real_fn(unsigned long);
+
+extern inline void init_timer(struct timer_list * timer)
+{
+ timer->next = NULL;
+ timer->prev = NULL;
+}
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/tqueue.h b/i386/i386at/gpl/linux/include/linux/tqueue.h
new file mode 100644
index 00000000..d483a155
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/tqueue.h
@@ -0,0 +1,163 @@
+/*
+ * tqueue.h --- task queue handling for Linux.
+ *
+ * Mostly based on a proposed bottom-half replacement code written by
+ * Kai Petzke, wpp@marie.physik.tu-berlin.de.
+ *
+ * Modified for use in the Linux kernel by Theodore Ts'o,
+ * tytso@mit.edu. Any bugs are my fault, not Kai's.
+ *
+ * The original comment follows below.
+ */
+
+#ifndef _LINUX_TQUEUE_H
+#define _LINUX_TQUEUE_H
+
+#include <asm/bitops.h>
+#include <asm/system.h>
+
+#ifdef INCLUDE_INLINE_FUNCS
+#define _INLINE_ extern
+#else
+#define _INLINE_ extern __inline__
+#endif
+
+/*
+ * New proposed "bottom half" handlers:
+ * (C) 1994 Kai Petzke, wpp@marie.physik.tu-berlin.de
+ *
+ * Advantages:
+ * - Bottom halfs are implemented as a linked list. You can have as many
+ * of them, as you want.
+ * - No more scanning of a bit field is required upon call of a bottom half.
+ * - Support for chained bottom half lists. The run_task_queue() function can be
+ * used as a bottom half handler. This is for example useful for bottom
+ * halfs, which want to be delayed until the next clock tick.
+ *
+ * Problems:
+ * - The queue_task_irq() inline function is only atomic with respect to itself.
+ * Problems can occur, when queue_task_irq() is called from a normal system
+ * call, and an interrupt comes in. No problems occur, when queue_task_irq()
+ * is called from an interrupt or bottom half, and interrupted, as run_task_queue()
+ * will not be executed/continued before the last interrupt returns. If in
+ * doubt, use queue_task(), not queue_task_irq().
+ * - Bottom halfs are called in the reverse order that they were linked into
+ * the list.
+ */
+
+struct tq_struct {
+ struct tq_struct *next; /* linked list of active bh's */
+ int sync; /* must be initialized to zero */
+ void (*routine)(void *); /* function to call */
+ void *data; /* argument to function */
+};
+
+typedef struct tq_struct * task_queue;
+
+#define DECLARE_TASK_QUEUE(q) task_queue q = &tq_last
+
+extern struct tq_struct tq_last;
+extern task_queue tq_timer, tq_immediate, tq_scheduler;
+
+#ifdef INCLUDE_INLINE_FUNCS
+struct tq_struct tq_last = {
+ &tq_last, 0, 0, 0
+};
+#endif
+
+/*
+ * To implement your own list of active bottom halfs, use the following
+ * two definitions:
+ *
+ * struct tq_struct *my_bh = &tq_last;
+ * struct tq_struct run_my_bh = {
+ * 0, 0, (void *)(void *) run_task_queue, &my_bh
+ * };
+ *
+ * To activate a bottom half on your list, use:
+ *
+ * queue_task(tq_pointer, &my_bh);
+ *
+ * To run the bottom halfs on your list put them on the immediate list by:
+ *
+ * queue_task(&run_my_bh, &tq_immediate);
+ *
+ * This allows you to do deferred procession. For example, you could
+ * have a bottom half list tq_timer, which is marked active by the timer
+ * interrupt.
+ */
+
+/*
+ * queue_task_irq: put the bottom half handler "bh_pointer" on the list
+ * "bh_list". You may call this function only from an interrupt
+ * handler or a bottom half handler.
+ */
+_INLINE_ void queue_task_irq(struct tq_struct *bh_pointer,
+ task_queue *bh_list)
+{
+ if (!set_bit(0,&bh_pointer->sync)) {
+ bh_pointer->next = *bh_list;
+ *bh_list = bh_pointer;
+ }
+}
+
+/*
+ * queue_task_irq_off: put the bottom half handler "bh_pointer" on the list
+ * "bh_list". You may call this function only when interrupts are off.
+ */
+_INLINE_ void queue_task_irq_off(struct tq_struct *bh_pointer,
+ task_queue *bh_list)
+{
+ if (!(bh_pointer->sync & 1)) {
+ bh_pointer->sync = 1;
+ bh_pointer->next = *bh_list;
+ *bh_list = bh_pointer;
+ }
+}
+
+
+/*
+ * queue_task: as queue_task_irq, but can be called from anywhere.
+ */
+_INLINE_ void queue_task(struct tq_struct *bh_pointer,
+ task_queue *bh_list)
+{
+ if (!set_bit(0,&bh_pointer->sync)) {
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ bh_pointer->next = *bh_list;
+ *bh_list = bh_pointer;
+ restore_flags(flags);
+ }
+}
+
+/*
+ * Call all "bottom halfs" on a given list.
+ */
+_INLINE_ void run_task_queue(task_queue *list)
+{
+ register struct tq_struct *save_p;
+ register struct tq_struct *p;
+ void *arg;
+ void (*f) (void *);
+
+ while(1) {
+ p = xchg(list,&tq_last);
+ if(p == &tq_last)
+ break;
+
+ do {
+ arg = p -> data;
+ f = p -> routine;
+ save_p = p -> next;
+ p -> sync = 0;
+ (*f)(arg);
+ p = save_p;
+ } while(p != &tq_last);
+ }
+}
+
+#undef _INLINE_
+
+#endif /* _LINUX_TQUEUE_H */
diff --git a/i386/i386at/gpl/linux/include/linux/trdevice.h b/i386/i386at/gpl/linux/include/linux/trdevice.h
new file mode 100644
index 00000000..96801763
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/trdevice.h
@@ -0,0 +1,40 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. NET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Ethernet handlers.
+ *
+ * Version: @(#)eth.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Relocated to include/linux where it belongs by Alan Cox
+ * <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * WARNING: This move may well be temporary. This file will get merged with others RSN.
+ *
+ */
+#ifndef _LINUX_TRDEVICE_H
+#define _LINUX_TRDEVICE_H
+
+
+#include <linux/if_tr.h>
+
+#ifdef __KERNEL__
+extern int tr_header(struct sk_buff *skb, struct device *dev,
+ unsigned short type, void *daddr,
+ void *saddr, unsigned len);
+extern int tr_rebuild_header(void *buff, struct device *dev,
+ unsigned long raddr, struct sk_buff *skb);
+extern unsigned short tr_type_trans(struct sk_buff *skb, struct device *dev);
+
+#endif
+
+#endif /* _LINUX_TRDEVICE_H */
diff --git a/i386/i386at/gpl/linux/include/linux/tty.h b/i386/i386at/gpl/linux/include/linux/tty.h
new file mode 100644
index 00000000..fe139511
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/tty.h
@@ -0,0 +1,340 @@
+#ifndef _LINUX_TTY_H
+#define _LINUX_TTY_H
+
+/*
+ * 'tty.h' defines some structures used by tty_io.c and some defines.
+ */
+
+#ifdef __KERNEL__
+#include <linux/fs.h>
+#include <linux/termios.h>
+#include <linux/tqueue.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_ldisc.h>
+
+#include <asm/system.h>
+
+
+/*
+ * Note: don't mess with NR_PTYS until you understand the tty minor
+ * number allocation game...
+ * (Note: the *_driver.minor_start values 1, 64, 128, 192 are
+ * hardcoded at present.)
+ */
+#define MIN_NR_CONSOLES 1 /* must be at least 1 */
+#define MAX_NR_CONSOLES 63 /* serial lines start at 64 */
+#define MAX_NR_USER_CONSOLES 63 /* must be root to allocate above this */
+ /* Note: the ioctl VT_GETSTATE does not work for
+ consoles 16 and higher (since it returns a short) */
+#define NR_PTYS 256
+#define NR_LDISCS 16
+
+/*
+ * These are set up by the setup-routine at boot-time:
+ */
+
+struct screen_info {
+ unsigned char orig_x;
+ unsigned char orig_y;
+ unsigned char unused1[2];
+ unsigned short orig_video_page;
+ unsigned char orig_video_mode;
+ unsigned char orig_video_cols;
+ unsigned short unused2;
+ unsigned short orig_video_ega_bx;
+ unsigned short unused3;
+ unsigned char orig_video_lines;
+ unsigned char orig_video_isVGA;
+ unsigned short orig_video_points;
+};
+
+extern struct screen_info screen_info;
+
+#define ORIG_X (screen_info.orig_x)
+#define ORIG_Y (screen_info.orig_y)
+#define ORIG_VIDEO_PAGE (screen_info.orig_video_page)
+#define ORIG_VIDEO_MODE (screen_info.orig_video_mode)
+#define ORIG_VIDEO_COLS (screen_info.orig_video_cols)
+#define ORIG_VIDEO_EGA_BX (screen_info.orig_video_ega_bx)
+#define ORIG_VIDEO_LINES (screen_info.orig_video_lines)
+#define ORIG_VIDEO_ISVGA (screen_info.orig_video_isVGA)
+#define ORIG_VIDEO_POINTS (screen_info.orig_video_points)
+
+#define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
+#define VIDEO_TYPE_CGA 0x11 /* CGA Display */
+#define VIDEO_TYPE_EGAM 0x20 /* EGA/VGA in Monochrome Mode */
+#define VIDEO_TYPE_EGAC 0x21 /* EGA in Color Mode */
+#define VIDEO_TYPE_VGAC 0x22 /* VGA+ in Color Mode */
+
+#define VIDEO_TYPE_TGAC 0x40 /* DEC TGA */
+
+/*
+ * This character is the same as _POSIX_VDISABLE: it cannot be used as
+ * a c_cc[] character, but indicates that a particular special character
+ * isn't in use (eg VINTR has no character etc)
+ */
+#define __DISABLED_CHAR '\0'
+
+/*
+ * This is the flip buffer used for the tty driver. The buffer is
+ * located in the tty structure, and is used as a high speed interface
+ * between the tty driver and the tty line discipline.
+ */
+#define TTY_FLIPBUF_SIZE 512
+
+struct tty_flip_buffer {
+ struct tq_struct tqueue;
+ unsigned char char_buf[2*TTY_FLIPBUF_SIZE];
+ char flag_buf[2*TTY_FLIPBUF_SIZE];
+ char *char_buf_ptr;
+ unsigned char *flag_buf_ptr;
+ int count;
+ int buf_num;
+};
+
+/*
+ * When a break, frame error, or parity error happens, these codes are
+ * stuffed into the flags buffer.
+ */
+#define TTY_NORMAL 0
+#define TTY_BREAK 1
+#define TTY_FRAME 2
+#define TTY_PARITY 3
+#define TTY_OVERRUN 4
+
+#define INTR_CHAR(tty) ((tty)->termios->c_cc[VINTR])
+#define QUIT_CHAR(tty) ((tty)->termios->c_cc[VQUIT])
+#define ERASE_CHAR(tty) ((tty)->termios->c_cc[VERASE])
+#define KILL_CHAR(tty) ((tty)->termios->c_cc[VKILL])
+#define EOF_CHAR(tty) ((tty)->termios->c_cc[VEOF])
+#define TIME_CHAR(tty) ((tty)->termios->c_cc[VTIME])
+#define MIN_CHAR(tty) ((tty)->termios->c_cc[VMIN])
+#define SWTC_CHAR(tty) ((tty)->termios->c_cc[VSWTC])
+#define START_CHAR(tty) ((tty)->termios->c_cc[VSTART])
+#define STOP_CHAR(tty) ((tty)->termios->c_cc[VSTOP])
+#define SUSP_CHAR(tty) ((tty)->termios->c_cc[VSUSP])
+#define EOL_CHAR(tty) ((tty)->termios->c_cc[VEOL])
+#define REPRINT_CHAR(tty) ((tty)->termios->c_cc[VREPRINT])
+#define DISCARD_CHAR(tty) ((tty)->termios->c_cc[VDISCARD])
+#define WERASE_CHAR(tty) ((tty)->termios->c_cc[VWERASE])
+#define LNEXT_CHAR(tty) ((tty)->termios->c_cc[VLNEXT])
+#define EOL2_CHAR(tty) ((tty)->termios->c_cc[VEOL2])
+
+#define _I_FLAG(tty,f) ((tty)->termios->c_iflag & (f))
+#define _O_FLAG(tty,f) ((tty)->termios->c_oflag & (f))
+#define _C_FLAG(tty,f) ((tty)->termios->c_cflag & (f))
+#define _L_FLAG(tty,f) ((tty)->termios->c_lflag & (f))
+
+#define I_IGNBRK(tty) _I_FLAG((tty),IGNBRK)
+#define I_BRKINT(tty) _I_FLAG((tty),BRKINT)
+#define I_IGNPAR(tty) _I_FLAG((tty),IGNPAR)
+#define I_PARMRK(tty) _I_FLAG((tty),PARMRK)
+#define I_INPCK(tty) _I_FLAG((tty),INPCK)
+#define I_ISTRIP(tty) _I_FLAG((tty),ISTRIP)
+#define I_INLCR(tty) _I_FLAG((tty),INLCR)
+#define I_IGNCR(tty) _I_FLAG((tty),IGNCR)
+#define I_ICRNL(tty) _I_FLAG((tty),ICRNL)
+#define I_IUCLC(tty) _I_FLAG((tty),IUCLC)
+#define I_IXON(tty) _I_FLAG((tty),IXON)
+#define I_IXANY(tty) _I_FLAG((tty),IXANY)
+#define I_IXOFF(tty) _I_FLAG((tty),IXOFF)
+#define I_IMAXBEL(tty) _I_FLAG((tty),IMAXBEL)
+
+#define O_OPOST(tty) _O_FLAG((tty),OPOST)
+#define O_OLCUC(tty) _O_FLAG((tty),OLCUC)
+#define O_ONLCR(tty) _O_FLAG((tty),ONLCR)
+#define O_OCRNL(tty) _O_FLAG((tty),OCRNL)
+#define O_ONOCR(tty) _O_FLAG((tty),ONOCR)
+#define O_ONLRET(tty) _O_FLAG((tty),ONLRET)
+#define O_OFILL(tty) _O_FLAG((tty),OFILL)
+#define O_OFDEL(tty) _O_FLAG((tty),OFDEL)
+#define O_NLDLY(tty) _O_FLAG((tty),NLDLY)
+#define O_CRDLY(tty) _O_FLAG((tty),CRDLY)
+#define O_TABDLY(tty) _O_FLAG((tty),TABDLY)
+#define O_BSDLY(tty) _O_FLAG((tty),BSDLY)
+#define O_VTDLY(tty) _O_FLAG((tty),VTDLY)
+#define O_FFDLY(tty) _O_FLAG((tty),FFDLY)
+
+#define C_BAUD(tty) _C_FLAG((tty),CBAUD)
+#define C_CSIZE(tty) _C_FLAG((tty),CSIZE)
+#define C_CSTOPB(tty) _C_FLAG((tty),CSTOPB)
+#define C_CREAD(tty) _C_FLAG((tty),CREAD)
+#define C_PARENB(tty) _C_FLAG((tty),PARENB)
+#define C_PARODD(tty) _C_FLAG((tty),PARODD)
+#define C_HUPCL(tty) _C_FLAG((tty),HUPCL)
+#define C_CLOCAL(tty) _C_FLAG((tty),CLOCAL)
+#define C_CIBAUD(tty) _C_FLAG((tty),CIBAUD)
+#define C_CRTSCTS(tty) _C_FLAG((tty),CRTSCTS)
+
+#define L_ISIG(tty) _L_FLAG((tty),ISIG)
+#define L_ICANON(tty) _L_FLAG((tty),ICANON)
+#define L_XCASE(tty) _L_FLAG((tty),XCASE)
+#define L_ECHO(tty) _L_FLAG((tty),ECHO)
+#define L_ECHOE(tty) _L_FLAG((tty),ECHOE)
+#define L_ECHOK(tty) _L_FLAG((tty),ECHOK)
+#define L_ECHONL(tty) _L_FLAG((tty),ECHONL)
+#define L_NOFLSH(tty) _L_FLAG((tty),NOFLSH)
+#define L_TOSTOP(tty) _L_FLAG((tty),TOSTOP)
+#define L_ECHOCTL(tty) _L_FLAG((tty),ECHOCTL)
+#define L_ECHOPRT(tty) _L_FLAG((tty),ECHOPRT)
+#define L_ECHOKE(tty) _L_FLAG((tty),ECHOKE)
+#define L_FLUSHO(tty) _L_FLAG((tty),FLUSHO)
+#define L_PENDIN(tty) _L_FLAG((tty),PENDIN)
+#define L_IEXTEN(tty) _L_FLAG((tty),IEXTEN)
+
+/*
+ * Where all of the state associated with a tty is kept while the tty
+ * is open. Since the termios state should be kept even if the tty
+ * has been closed --- for things like the baud rate, etc --- it is
+ * not stored here, but rather a pointer to the real state is stored
+ * here. Possible the winsize structure should have the same
+ * treatment, but (1) the default 80x24 is usually right and (2) it's
+ * most often used by a windowing system, which will set the correct
+ * size each time the window is created or resized anyway.
+ * IMPORTANT: since this structure is dynamically allocated, it must
+ * be no larger than 4096 bytes. Changing TTY_BUF_SIZE will change
+ * the size of this structure, and it needs to be done with care.
+ * - TYT, 9/14/92
+ */
+struct tty_struct {
+ int magic;
+ struct tty_driver driver;
+ struct tty_ldisc ldisc;
+ struct termios *termios, *termios_locked;
+ int pgrp;
+ int session;
+ kdev_t device;
+ unsigned long flags;
+ int count;
+ struct winsize winsize;
+ unsigned char stopped:1, hw_stopped:1, packet:1;
+ unsigned char ctrl_status;
+
+ struct tty_struct *link;
+ struct fasync_struct *fasync;
+ struct tty_flip_buffer flip;
+ int max_flip_cnt;
+ struct wait_queue *write_wait;
+ struct wait_queue *read_wait;
+ void *disc_data;
+ void *driver_data;
+
+#define N_TTY_BUF_SIZE 4096
+
+ /*
+ * The following is data for the N_TTY line discipline. For
+ * historical reasons, this is included in the tty structure.
+ */
+ unsigned int column;
+ unsigned char lnext:1, erasing:1, raw:1, real_raw:1, icanon:1;
+ unsigned char closing:1;
+ unsigned short minimum_to_wake;
+ unsigned overrun_time;
+ int num_overrun;
+ unsigned long process_char_map[256/(8*sizeof(unsigned long))];
+ char *read_buf;
+ int read_head;
+ int read_tail;
+ int read_cnt;
+ unsigned long read_flags[N_TTY_BUF_SIZE/(8*sizeof(unsigned long))];
+ int canon_data;
+ unsigned long canon_head;
+ unsigned int canon_column;
+};
+
+/* tty magic number */
+#define TTY_MAGIC 0x5401
+
+/*
+ * These bits are used in the flags field of the tty structure.
+ *
+ * So that interrupts won't be able to mess up the queues,
+ * copy_to_cooked must be atomic with respect to itself, as must
+ * tty->write. Thus, you must use the inline functions set_bit() and
+ * clear_bit() to make things atomic.
+ */
+#define TTY_THROTTLED 0
+#define TTY_IO_ERROR 1
+#define TTY_SLAVE_CLOSED 2
+#define TTY_EXCLUSIVE 3
+#define TTY_DEBUG 4
+#define TTY_DO_WRITE_WAKEUP 5
+#define TTY_PUSH 6
+#define TTY_CLOSING 7
+
+#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))
+
+extern void tty_write_flush(struct tty_struct *);
+
+extern struct termios tty_std_termios;
+extern struct tty_struct * redirect;
+extern struct tty_ldisc ldiscs[];
+extern int fg_console, last_console, want_console;
+
+extern int kmsg_redirect;
+extern struct wait_queue * keypress_wait;
+
+extern unsigned long con_init(unsigned long);
+
+extern int rs_init(void);
+extern int lp_init(void);
+extern int pty_init(void);
+extern int tty_init(void);
+extern int vcs_init(void);
+extern int cy_init(void);
+extern int stl_init(void);
+extern int stli_init(void);
+
+extern int tty_paranoia_check(struct tty_struct *tty, kdev_t device,
+ const char *routine);
+extern char *_tty_name(struct tty_struct *tty, char *buf);
+extern char *tty_name(struct tty_struct *tty);
+extern void tty_wait_until_sent(struct tty_struct * tty, int timeout);
+extern int tty_check_change(struct tty_struct * tty);
+extern void stop_tty(struct tty_struct * tty);
+extern void start_tty(struct tty_struct * tty);
+extern int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc);
+extern int tty_register_driver(struct tty_driver *driver);
+extern int tty_unregister_driver(struct tty_driver *driver);
+extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp,
+ int buflen);
+extern void tty_write_message(struct tty_struct *tty, char *msg);
+
+extern int is_orphaned_pgrp(int pgrp);
+extern int is_ignored(int sig);
+extern int tty_signal(int sig, struct tty_struct *tty);
+extern void tty_hangup(struct tty_struct * tty);
+extern void tty_vhangup(struct tty_struct * tty);
+extern void tty_unhangup(struct file *filp);
+extern int tty_hung_up_p(struct file * filp);
+extern void do_SAK(struct tty_struct *tty);
+extern void disassociate_ctty(int priv);
+
+/* n_tty.c */
+extern struct tty_ldisc tty_ldisc_N_TTY;
+
+/* tty_ioctl.c */
+extern int n_tty_ioctl(struct tty_struct * tty, struct file * file,
+ unsigned int cmd, unsigned long arg);
+
+/* serial.c */
+
+extern int rs_open(struct tty_struct * tty, struct file * filp);
+
+/* pty.c */
+
+extern int pty_open(struct tty_struct * tty, struct file * filp);
+
+/* console.c */
+
+extern int con_open(struct tty_struct * tty, struct file * filp);
+extern void update_screen(int new_console);
+extern void console_print(const char *);
+
+/* vt.c */
+
+extern int vt_ioctl(struct tty_struct *tty, struct file * file,
+ unsigned int cmd, unsigned long arg);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/tty_driver.h b/i386/i386at/gpl/linux/include/linux/tty_driver.h
new file mode 100644
index 00000000..3468fa2d
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/tty_driver.h
@@ -0,0 +1,189 @@
+#ifndef _LINUX_TTY_DRIVER_H
+#define _LINUX_TTY_DRIVER_H
+
+/*
+ * This structure defines the interface between the low-level tty
+ * driver and the tty routines. The following routines can be
+ * defined; unless noted otherwise, they are optional, and can be
+ * filled in with a null pointer.
+ *
+ * int (*open)(struct tty_struct * tty, struct file * filp);
+ *
+ * This routine is called when a particular tty device is opened.
+ * This routine is mandatory; if this routine is not filled in,
+ * the attempted open will fail with ENODEV.
+ *
+ * void (*close)(struct tty_struct * tty, struct file * filp);
+ *
+ * This routine is called when a particular tty device is closed.
+ *
+ * int (*write)(struct tty_struct * tty, int from_user,
+ * const unsigned char *buf, int count);
+ *
+ * This routine is called by the kernel to write a series of
+ * characters to the tty device. The characters may come from
+ * user space or kernel space. This routine will return the
+ * number of characters actually accepted for writing. This
+ * routine is mandatory.
+ *
+ * void (*put_char)(struct tty_struct *tty, unsigned char ch);
+ *
+ * This routine is called by the kernel to write a single
+ * character to the tty device. If the kernel uses this routine,
+ * it must call the flush_chars() routine (if defined) when it is
+ * done stuffing characters into the driver. If there is no room
+ * in the queue, the character is ignored.
+ *
+ * void (*flush_chars)(struct tty_struct *tty);
+ *
+ * This routine is called by the kernel after it has written a
+ * series of characters to the tty device using put_char().
+ *
+ * int (*write_room)(struct tty_struct *tty);
+ *
+ * This routine returns the numbers of characters the tty driver
+ * will accept for queuing to be written. This number is subject
+ * to change as output buffers get emptied, or if the output flow
+ * control is acted.
+ *
+ * int (*ioctl)(struct tty_struct *tty, struct file * file,
+ * unsigned int cmd, unsigned long arg);
+ *
+ * This routine allows the tty driver to implement
+ * device-specific ioctl's. If the ioctl number passed in cmd
+ * is not recognized by the driver, it should return ENOIOCTLCMD.
+ *
+ * void (*set_termios)(struct tty_struct *tty, struct termios * old);
+ *
+ * This routine allows the tty driver to be notified when
+ * device's termios settings have changed. Note that a
+ * well-designed tty driver should be prepared to accept the case
+ * where old == NULL, and try to do something rational.
+ *
+ * void (*set_ldisc)(struct tty_struct *tty);
+ *
+ * This routine allows the tty driver to be notified when the
+ * device's termios settings have changed.
+ *
+ * void (*throttle)(struct tty_struct * tty);
+ *
+ * This routine notifies the tty driver that input buffers for
+ * the line discipline are close to full, and it should somehow
+ * signal that no more characters should be sent to the tty.
+ *
+ * void (*unthrottle)(struct tty_struct * tty);
+ *
+ * This routine notifies the tty drivers that it should signals
+ * that characters can now be sent to the tty without fear of
+ * overrunning the input buffers of the line disciplines.
+ *
+ * void (*stop)(struct tty_struct *tty);
+ *
+ * This routine notifies the tty driver that it should stop
+ * outputting characters to the tty device.
+ *
+ * void (*start)(struct tty_struct *tty);
+ *
+ * This routine notifies the tty driver that it resume sending
+ * characters to the tty device.
+ *
+ * void (*hangup)(struct tty_struct *tty);
+ *
+ * This routine notifies the tty driver that it should hangup the
+ * tty device.
+ *
+ */
+
+#include <linux/fs.h>
+
+struct tty_driver {
+ int magic; /* magic number for this structure */
+ const char *name;
+ int name_base; /* offset of printed name */
+ short major; /* major device number */
+ short minor_start; /* start of minor device number*/
+ short num; /* number of devices */
+ short type; /* type of tty driver */
+ short subtype; /* subtype of tty driver */
+ struct termios init_termios; /* Initial termios */
+ int flags; /* tty driver flags */
+ int *refcount; /* for loadable tty drivers */
+ struct tty_driver *other; /* only used for the PTY driver */
+
+ /*
+ * Pointer to the tty data structures
+ */
+ struct tty_struct **table;
+ struct termios **termios;
+ struct termios **termios_locked;
+
+ /*
+ * Interface routines from the upper tty layer to the tty
+ * driver.
+ */
+ int (*open)(struct tty_struct * tty, struct file * filp);
+ void (*close)(struct tty_struct * tty, struct file * filp);
+ int (*write)(struct tty_struct * tty, int from_user,
+ const unsigned char *buf, int count);
+ void (*put_char)(struct tty_struct *tty, unsigned char ch);
+ void (*flush_chars)(struct tty_struct *tty);
+ int (*write_room)(struct tty_struct *tty);
+ int (*chars_in_buffer)(struct tty_struct *tty);
+ int (*ioctl)(struct tty_struct *tty, struct file * file,
+ unsigned int cmd, unsigned long arg);
+ void (*set_termios)(struct tty_struct *tty, struct termios * old);
+ void (*throttle)(struct tty_struct * tty);
+ void (*unthrottle)(struct tty_struct * tty);
+ void (*stop)(struct tty_struct *tty);
+ void (*start)(struct tty_struct *tty);
+ void (*hangup)(struct tty_struct *tty);
+ void (*flush_buffer)(struct tty_struct *tty);
+ void (*set_ldisc)(struct tty_struct *tty);
+
+ /*
+ * linked list pointers
+ */
+ struct tty_driver *next;
+ struct tty_driver *prev;
+};
+
+/* tty driver magic number */
+#define TTY_DRIVER_MAGIC 0x5402
+
+/*
+ * tty driver flags
+ *
+ * TTY_DRIVER_RESET_TERMIOS --- requests the tty layer to reset the
+ * termios setting when the last process has closed the device.
+ * Used for PTY's, in particular.
+ *
+ * TTY_DRIVER_REAL_RAW --- if set, indicates that the driver will
+ * guarantee never not to set any special character handling
+ * flags if ((IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR ||
+ * !INPCK)). That is, if there is no reason for the driver to
+ * send notifications of parity and break characters up to the
+ * line driver, it won't do so. This allows the line driver to
+ * optimize for this case if this flag is set. (Note that there
+ * is also a promise, if the above case is true, not to signal
+ * overruns, either.)
+ */
+#define TTY_DRIVER_INSTALLED 0x0001
+#define TTY_DRIVER_RESET_TERMIOS 0x0002
+#define TTY_DRIVER_REAL_RAW 0x0004
+
+/* tty driver types */
+#define TTY_DRIVER_TYPE_SYSTEM 0x0001
+#define TTY_DRIVER_TYPE_CONSOLE 0x0002
+#define TTY_DRIVER_TYPE_SERIAL 0x0003
+#define TTY_DRIVER_TYPE_PTY 0x0004
+#define TTY_DRIVER_TYPE_SCC 0x0005 /* scc driver */
+
+/* system subtypes (magic, used by tty_io.c) */
+#define SYSTEM_TYPE_TTY 0x0001
+#define SYSTEM_TYPE_CONSOLE 0x0002
+
+/* pty subtypes (magic, used by tty_io.c) */
+#define PTY_TYPE_MASTER 0x0001
+#define PTY_TYPE_SLAVE 0x0002
+
+#endif /* #ifdef _LINUX_TTY_DRIVER_H */
diff --git a/i386/i386at/gpl/linux/include/linux/tty_ldisc.h b/i386/i386at/gpl/linux/include/linux/tty_ldisc.h
new file mode 100644
index 00000000..87b54ca3
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/tty_ldisc.h
@@ -0,0 +1,46 @@
+#ifndef _LINUX_TTY_LDISC_H
+#define _LINUX_TTY_LDISC_H
+
+/*
+ * Definitions for the tty line discipline
+ */
+
+#include <linux/fs.h>
+#include <linux/wait.h>
+
+struct tty_ldisc {
+ int magic;
+ int num;
+ int flags;
+ /*
+ * The following routines are called from above.
+ */
+ int (*open)(struct tty_struct *);
+ void (*close)(struct tty_struct *);
+ void (*flush_buffer)(struct tty_struct *tty);
+ int (*chars_in_buffer)(struct tty_struct *tty);
+ int (*read)(struct tty_struct * tty, struct file * file,
+ unsigned char * buf, unsigned int nr);
+ int (*write)(struct tty_struct * tty, struct file * file,
+ const unsigned char * buf, unsigned int nr);
+ int (*ioctl)(struct tty_struct * tty, struct file * file,
+ unsigned int cmd, unsigned long arg);
+ void (*set_termios)(struct tty_struct *tty, struct termios * old);
+ int (*select)(struct tty_struct * tty, struct inode * inode,
+ struct file * file, int sel_type,
+ struct select_table_struct *wait);
+
+ /*
+ * The following routines are called from below.
+ */
+ void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
+ char *fp, int count);
+ int (*receive_room)(struct tty_struct *);
+ void (*write_wakeup)(struct tty_struct *);
+};
+
+#define TTY_LDISC_MAGIC 0x5403
+
+#define LDISC_FLAG_DEFINED 0x00000001
+
+#endif /* _LINUX_TTY_LDISC_H */
diff --git a/i386/i386at/gpl/linux/include/linux/types.h b/i386/i386at/gpl/linux/include/linux/types.h
new file mode 100644
index 00000000..376d3ac3
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/types.h
@@ -0,0 +1,72 @@
+#ifndef _LINUX_TYPES_H
+#define _LINUX_TYPES_H
+
+/*
+ * This allows for 256 file descriptors: if NR_OPEN is ever grown beyond that
+ * you'll have to change this too. But 256 fd's seem to be enough even for such
+ * "real" unices like SunOS, so hopefully this is one limit that doesn't have
+ * to be changed.
+ *
+ * Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in <sys/time.h>
+ * (and thus <linux/time.h>) - but this is a more logical place for them. Solved
+ * by having dummy defines in <sys/time.h>.
+ */
+
+/*
+ * Those macros may have been defined in <gnu/types.h>. But we always
+ * use the ones here.
+ */
+#undef __NFDBITS
+#define __NFDBITS (8 * sizeof(unsigned int))
+
+#undef __FD_SETSIZE
+#define __FD_SETSIZE 256
+
+#undef __FDSET_INTS
+#define __FDSET_INTS (__FD_SETSIZE/__NFDBITS)
+
+typedef struct fd_set {
+ unsigned int fds_bits [__FDSET_INTS];
+} fd_set;
+
+#include <asm/types.h>
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+#define _LOFF_T
+typedef long long loff_t;
+#endif
+
+#ifndef MACH_INCLUDE
+/* bsd */
+typedef unsigned char u_char;
+typedef unsigned short u_short;
+typedef unsigned int u_int;
+typedef unsigned long u_long;
+#endif
+
+/* sysv */
+typedef unsigned char unchar;
+typedef unsigned short ushort;
+typedef unsigned int uint;
+typedef unsigned long ulong;
+
+#ifndef MACH_INCLUDE
+typedef char *caddr_t;
+#endif
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
+
+struct ustat {
+ daddr_t f_tfree;
+ ino_t f_tinode;
+ char f_fname[6];
+ char f_fpack[6];
+};
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/uio.h b/i386/i386at/gpl/linux/include/linux/uio.h
new file mode 100644
index 00000000..8051b3d0
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/uio.h
@@ -0,0 +1,25 @@
+#ifndef __LINUX_UIO_H
+#define __LINUX_UIO_H
+
+/*
+ * Berkeley style UIO structures - Alan Cox 1994.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+
+/* A word of warning: Our uio structure will clash with the C library one (which is now obsolete). Remove the C
+ library one from sys/uio.h */
+
+struct iovec
+{
+ void *iov_base; /* BSD uses caddr_t (same thing in effect) */
+ int iov_len;
+};
+
+#define MAX_IOVEC 8 /* Maximum iovec's in one operation */
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/unistd.h b/i386/i386at/gpl/linux/include/linux/unistd.h
new file mode 100644
index 00000000..10ed9834
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/unistd.h
@@ -0,0 +1,11 @@
+#ifndef _LINUX_UNISTD_H_
+#define _LINUX_UNISTD_H_
+
+extern int errno;
+
+/*
+ * Include machine specific syscallX macros
+ */
+#include <asm/unistd.h>
+
+#endif /* _LINUX_UNISTD_H_ */
diff --git a/i386/i386at/gpl/linux/include/linux/utsname.h b/i386/i386at/gpl/linux/include/linux/utsname.h
new file mode 100644
index 00000000..7aef28fc
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/utsname.h
@@ -0,0 +1,35 @@
+#ifndef _LINUX_UTSNAME_H
+#define _LINUX_UTSNAME_H
+
+#define __OLD_UTS_LEN 8
+
+struct oldold_utsname {
+ char sysname[9];
+ char nodename[9];
+ char release[9];
+ char version[9];
+ char machine[9];
+};
+
+#define __NEW_UTS_LEN 64
+
+struct old_utsname {
+ char sysname[65];
+ char nodename[65];
+ char release[65];
+ char version[65];
+ char machine[65];
+};
+
+struct new_utsname {
+ char sysname[65];
+ char nodename[65];
+ char release[65];
+ char version[65];
+ char machine[65];
+ char domainname[65];
+};
+
+extern struct new_utsname system_utsname;
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/version.h b/i386/i386at/gpl/linux/include/linux/version.h
new file mode 100644
index 00000000..39c1b599
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/version.h
@@ -0,0 +1,8 @@
+#define UTS_RELEASE "1.3.68"
+#define UTS_VERSION "#1 Thu Feb 29 16:37:10 MST 1996"
+#define LINUX_COMPILE_TIME "09:03:52"
+#define LINUX_COMPILE_BY "goel"
+#define LINUX_COMPILE_HOST "stamp.cs.utah.edu"
+#define LINUX_COMPILE_DOMAIN "cs.utah.edu"
+#define LINUX_COMPILER "gcc version 2.7.2"
+#define LINUX_VERSION_CODE (65536 + 4 * 256)
diff --git a/i386/i386at/gpl/linux/include/linux/vfs.h b/i386/i386at/gpl/linux/include/linux/vfs.h
new file mode 100644
index 00000000..b3a58657
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/vfs.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_VFS_H
+#define _LINUX_VFS_H
+
+#include <asm/statfs.h>
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/vm86.h b/i386/i386at/gpl/linux/include/linux/vm86.h
new file mode 100644
index 00000000..ceb10358
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/vm86.h
@@ -0,0 +1,109 @@
+#ifndef _LINUX_VM86_H
+#define _LINUX_VM86_H
+
+/*
+ * I'm guessing at the VIF/VIP flag usage, but hope that this is how
+ * the Pentium uses them. Linux will return from vm86 mode when both
+ * VIF and VIP is set.
+ *
+ * On a Pentium, we could probably optimize the virtual flags directly
+ * in the eflags register instead of doing it "by hand" in vflags...
+ *
+ * Linus
+ */
+
+#define TF_MASK 0x00000100
+#define IF_MASK 0x00000200
+#define IOPL_MASK 0x00003000
+#define NT_MASK 0x00004000
+#define VM_MASK 0x00020000
+#define AC_MASK 0x00040000
+#define VIF_MASK 0x00080000 /* virtual interrupt flag */
+#define VIP_MASK 0x00100000 /* virtual interrupt pending */
+#define ID_MASK 0x00200000
+
+#define BIOSSEG 0x0f000
+
+#define CPU_086 0
+#define CPU_186 1
+#define CPU_286 2
+#define CPU_386 3
+#define CPU_486 4
+#define CPU_586 5
+
+/*
+ * Return values for the 'vm86()' system call
+ */
+#define VM86_TYPE(retval) ((retval) & 0xff)
+#define VM86_ARG(retval) ((retval) >> 8)
+
+#define VM86_SIGNAL 0 /* return due to signal */
+#define VM86_UNKNOWN 1 /* unhandled GP fault - IO-instruction or similar */
+#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */
+#define VM86_STI 3 /* sti/popf/iret instruction enabled virtual interrupts */
+
+/*
+ * This is the stack-layout when we have done a "SAVE_ALL" from vm86
+ * mode - the main change is that the old segment descriptors aren't
+ * useful any more and are forced to be zero by the kernel (and the
+ * hardware when a trap occurs), and the real segment descriptors are
+ * at the end of the structure. Look at ptrace.h to see the "normal"
+ * setup.
+ */
+
+struct vm86_regs {
+/*
+ * normal regs, with special meaning for the segment descriptors..
+ */
+ long ebx;
+ long ecx;
+ long edx;
+ long esi;
+ long edi;
+ long ebp;
+ long eax;
+ long __null_ds;
+ long __null_es;
+ long __null_fs;
+ long __null_gs;
+ long orig_eax;
+ long eip;
+ unsigned short cs, __csh;
+ long eflags;
+ long esp;
+ unsigned short ss, __ssh;
+/*
+ * these are specific to v86 mode:
+ */
+ unsigned short es, __esh;
+ unsigned short ds, __dsh;
+ unsigned short fs, __fsh;
+ unsigned short gs, __gsh;
+};
+
+struct revectored_struct {
+ unsigned long __map[8]; /* 256 bits */
+};
+
+struct vm86_struct {
+ struct vm86_regs regs;
+ unsigned long flags;
+ unsigned long screen_bitmap;
+ unsigned long cpu_type;
+ struct revectored_struct int_revectored;
+ struct revectored_struct int21_revectored;
+};
+
+/*
+ * flags masks
+ */
+#define VM86_SCREEN_BITMAP 0x0001
+
+#ifdef __KERNEL__
+
+void handle_vm86_fault(struct vm86_regs *, long);
+void handle_vm86_debug(struct vm86_regs *, long);
+
+#endif
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/linux/wait.h b/i386/i386at/gpl/linux/include/linux/wait.h
new file mode 100644
index 00000000..90ffe7b3
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/linux/wait.h
@@ -0,0 +1,38 @@
+#ifndef _LINUX_WAIT_H
+#define _LINUX_WAIT_H
+
+#define WNOHANG 0x00000001
+#define WUNTRACED 0x00000002
+
+#define __WCLONE 0x80000000
+
+#ifdef __KERNEL__
+
+struct wait_queue {
+ struct task_struct * task;
+ struct wait_queue * next;
+};
+
+struct semaphore {
+ int count;
+ struct wait_queue * wait;
+};
+
+#define MUTEX ((struct semaphore) { 1, NULL })
+#define MUTEX_LOCKED ((struct semaphore) { 0, NULL })
+
+struct select_table_entry {
+ struct wait_queue wait;
+ struct wait_queue ** wait_address;
+};
+
+typedef struct select_table_struct {
+ int nr;
+ struct select_table_entry * entry;
+} select_table;
+
+#define __MAX_SELECT_TABLE_ENTRIES (4096 / sizeof (struct select_table_entry))
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/net/af_unix.h b/i386/i386at/gpl/linux/include/net/af_unix.h
new file mode 100644
index 00000000..dc4a48d6
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/af_unix.h
@@ -0,0 +1,4 @@
+extern void unix_proto_init(struct net_proto *pro);
+
+typedef struct sock unix_socket;
+
diff --git a/i386/i386at/gpl/linux/include/net/arp.h b/i386/i386at/gpl/linux/include/net/arp.h
new file mode 100644
index 00000000..db7a29c3
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/arp.h
@@ -0,0 +1,17 @@
+/* linux/net/inet/arp.h */
+#ifndef _ARP_H
+#define _ARP_H
+
+extern void arp_init(void);
+extern int arp_rcv(struct sk_buff *skb, struct device *dev,
+ struct packet_type *pt);
+extern int arp_query(unsigned char *haddr, u32 paddr, struct device *dev);
+extern int arp_find(unsigned char *haddr, u32 paddr,
+ struct device *dev, u32 saddr, struct sk_buff *skb);
+extern int arp_ioctl(unsigned int cmd, void *arg);
+extern void arp_send(int type, int ptype, u32 dest_ip,
+ struct device *dev, u32 src_ip,
+ unsigned char *dest_hw, unsigned char *src_hw, unsigned char *th);
+extern int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short type, __u32 daddr);
+extern int arp_update_cache(struct hh_cache * hh);
+#endif /* _ARP_H */
diff --git a/i386/i386at/gpl/linux/include/net/atalkcall.h b/i386/i386at/gpl/linux/include/net/atalkcall.h
new file mode 100644
index 00000000..726e33cd
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/atalkcall.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of protocols.c simpler */
+extern void atalk_proto_init(struct net_proto *pro);
diff --git a/i386/i386at/gpl/linux/include/net/ax25.h b/i386/i386at/gpl/linux/include/net/ax25.h
new file mode 100644
index 00000000..45967cb1
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/ax25.h
@@ -0,0 +1,246 @@
+/*
+ * Declarations of AX.25 type objects.
+ *
+ * Alan Cox (GW4PTS) 10/11/93
+ */
+
+#ifndef _AX25_H
+#define _AX25_H
+#include <linux/ax25.h>
+
+#define PR_SLOWHZ 10 /* Run timing at 1/10 second - gives us better resolution for 56kbit links */
+
+#define AX25_T1CLAMPLO (1 * PR_SLOWHZ) /* If defined, clamp at 1 second **/
+#define AX25_T1CLAMPHI (30 * PR_SLOWHZ) /* If defined, clamp at 30 seconds **/
+
+#define AX25_BROKEN_NETMAC
+
+#define AX25_BPQ_HEADER_LEN 16
+#define AX25_KISS_HEADER_LEN 1
+
+#define AX25_HEADER_LEN 17
+#define AX25_ADDR_LEN 7
+#define AX25_DIGI_HEADER_LEN (AX25_MAX_DIGIS * AX25_ADDR_LEN)
+#define AX25_MAX_HEADER_LEN (AX25_HEADER_LEN + AX25_DIGI_HEADER_LEN)
+
+#define AX25_P_IP 0xCC
+#define AX25_P_ARP 0xCD
+#define AX25_P_TEXT 0xF0
+#define AX25_P_NETROM 0xCF
+#define AX25_P_SEGMENT 0x08
+
+#define SEG_REM 0x7F
+#define SEG_FIRST 0x80
+
+#define LAPB_UI 0x03
+#define LAPB_C 0x80
+#define LAPB_E 0x01
+
+#define SSSID_SPARE 0x60 /* Unused bits in SSID for standard AX.25 */
+#define ESSID_SPARE 0x20 /* Unused bits in SSID for extended AX.25 */
+#define DAMA_FLAG 0x40 /* Well, it is *NOT* unused! (dl1bke 951121 */
+
+#define AX25_REPEATED 0x80
+
+#define ACK_PENDING_CONDITION 0x01
+#define REJECT_CONDITION 0x02
+#define PEER_RX_BUSY_CONDITION 0x04
+#define OWN_RX_BUSY_CONDITION 0x08
+
+#ifndef _LINUX_NETDEVICE_H
+#include <linux/netdevice.h>
+#endif
+
+/*
+ * These headers are taken from the KA9Q package by Phil Karn. These specific
+ * files have been placed under the GPL (not the whole package) by Phil.
+ *
+ *
+ * Copyright 1991 Phil Karn, KA9Q
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 dated June, 1991.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave., Cambridge, MA 02139, USA.
+ */
+
+/* Upper sub-layer (LAPB) definitions */
+
+/* Control field templates */
+#define I 0x00 /* Information frames */
+#define S 0x01 /* Supervisory frames */
+#define RR 0x01 /* Receiver ready */
+#define RNR 0x05 /* Receiver not ready */
+#define REJ 0x09 /* Reject */
+#define U 0x03 /* Unnumbered frames */
+#define SABM 0x2f /* Set Asynchronous Balanced Mode */
+#define SABME 0x6f /* Set Asynchronous Balanced Mode Extended */
+#define DISC 0x43 /* Disconnect */
+#define DM 0x0f /* Disconnected mode */
+#define UA 0x63 /* Unnumbered acknowledge */
+#define FRMR 0x87 /* Frame reject */
+#define UI 0x03 /* Unnumbered information */
+#define PF 0x10 /* Poll/final bit for standard AX.25 */
+#define EPF 0x01 /* Poll/final bit for extended AX.25 */
+
+#define ILLEGAL 0x100 /* Impossible to be a real frame type */
+
+#define POLLOFF 0
+#define POLLON 1
+
+/* AX25 L2 C-bit */
+
+#define C_COMMAND 1 /* C_ otherwise it clashes with the de600 defines (sigh)) */
+#define C_RESPONSE 2
+
+/* Define Link State constants. */
+
+#define AX25_STATE_0 0
+#define AX25_STATE_1 1
+#define AX25_STATE_2 2
+#define AX25_STATE_3 3
+#define AX25_STATE_4 4
+
+#define MODULUS 8 /* Standard AX.25 modulus */
+#define EMODULUS 128 /* Extended AX.25 modulus */
+
+#define AX25_DEF_IPDEFMODE 'D'
+#define AX25_DEF_AXDEFMODE 8
+#define AX25_DEF_NETROM 1
+#define AX25_DEF_TEXT 1
+#define AX25_DEF_BACKOFF 'E'
+#define AX25_DEF_CONMODE 1
+#define AX25_DEF_WINDOW 2
+#define AX25_DEF_EWINDOW 32
+#define AX25_DEF_T1 10
+#define AX25_DEF_T2 3
+#define AX25_DEF_T3 300
+#define AX25_DEF_N2 10
+#define AX25_DEF_DIGI (AX25_DIGI_INBAND|AX25_DIGI_XBAND)
+
+typedef struct ax25_uid_assoc {
+ struct ax25_uid_assoc *next;
+ uid_t uid;
+ ax25_address call;
+} ax25_uid_assoc;
+
+typedef struct {
+ ax25_address calls[AX25_MAX_DIGIS];
+ unsigned char repeated[AX25_MAX_DIGIS];
+ unsigned char ndigi;
+ char lastrepeat;
+} ax25_digi;
+
+typedef struct ax25_cb {
+ struct ax25_cb *next;
+ ax25_address source_addr, dest_addr;
+ struct device *device;
+ unsigned char dama_slave; /* dl1bke 951121 */
+ unsigned char state, modulus, hdrincl;
+ unsigned short vs, vr, va;
+ unsigned char condition, backoff;
+ unsigned char n2, n2count;
+ unsigned short t1, t2, t3, rtt;
+ unsigned short t1timer, t2timer, t3timer;
+ unsigned short fragno, fraglen;
+ ax25_digi *digipeat;
+ struct sk_buff_head write_queue;
+ struct sk_buff_head reseq_queue;
+ struct sk_buff_head ack_queue;
+ struct sk_buff_head frag_queue;
+ unsigned char window;
+ struct timer_list timer;
+ struct sock *sk; /* Backlink to socket */
+} ax25_cb;
+
+/* af_ax25.c */
+extern ax25_address null_ax25_address;
+extern char *ax2asc(ax25_address *);
+extern int ax25cmp(ax25_address *, ax25_address *);
+extern int ax25_send_frame(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *, struct device *);
+extern void ax25_destroy_socket(ax25_cb *);
+extern struct device *ax25rtr_get_dev(ax25_address *);
+extern int ax25_encapsulate(struct sk_buff *, struct device *, unsigned short,
+ void *, void *, unsigned int);
+extern int ax25_rebuild_header(unsigned char *, struct device *, unsigned long, struct sk_buff *);
+extern ax25_uid_assoc *ax25_uid_list;
+extern int ax25_uid_policy;
+extern ax25_address *ax25_findbyuid(uid_t);
+extern void ax25_queue_xmit(struct sk_buff *, struct device *, int);
+extern int ax25_dev_is_dama_slave(struct device *); /* dl1bke 951121 */
+
+#include <net/ax25call.h>
+
+/* ax25_in.c */
+extern int ax25_process_rx_frame(ax25_cb *, struct sk_buff *, int, int);
+
+/* ax25_out.c */
+extern void ax25_output(ax25_cb *, struct sk_buff *);
+extern void ax25_kick(ax25_cb *);
+extern void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
+extern void ax25_nr_error_recovery(ax25_cb *);
+extern void ax25_establish_data_link(ax25_cb *);
+extern void ax25_transmit_enquiry(ax25_cb *);
+extern void ax25_enquiry_response(ax25_cb *);
+extern void ax25_timeout_response(ax25_cb *);
+extern void ax25_check_iframes_acked(ax25_cb *, unsigned short);
+extern void ax25_check_need_response(ax25_cb *, int, int);
+extern void dama_enquiry_response(ax25_cb *); /* dl1bke 960114 */
+extern void dama_check_need_response(ax25_cb *, int, int); /* dl1bke 960114 */
+extern void dama_establish_data_link(ax25_cb *);
+
+/* ax25_route.c */
+extern void ax25_rt_rx_frame(ax25_address *, struct device *, ax25_digi *);
+extern int ax25_rt_get_info(char *, char **, off_t, int, int);
+extern int ax25_cs_get_info(char *, char **, off_t, int, int);
+extern int ax25_rt_autobind(ax25_cb *, ax25_address *);
+extern void ax25_rt_build_path(ax25_cb *, ax25_address *);
+extern void ax25_dg_build_path(struct sk_buff *, ax25_address *, struct device *);
+extern void ax25_rt_device_down(struct device *);
+extern int ax25_rt_ioctl(unsigned int, void *);
+extern void ax25_ip_mode_set(ax25_address *, struct device *, char);
+extern char ax25_ip_mode_get(ax25_address *, struct device *);
+extern unsigned short ax25_dev_get_value(struct device *, int);
+extern void ax25_dev_device_up(struct device *);
+extern void ax25_dev_device_down(struct device *);
+extern int ax25_dev_ioctl(unsigned int, void *);
+extern int ax25_bpq_get_info(char *, char **, off_t, int, int);
+extern ax25_address *ax25_bpq_get_addr(struct device *);
+extern int ax25_bpq_ioctl(unsigned int, void *);
+
+/* ax25_subr.c */
+extern void ax25_clear_queues(ax25_cb *);
+extern void ax25_frames_acked(ax25_cb *, unsigned short);
+extern void ax25_requeue_frames(ax25_cb *);
+extern int ax25_validate_nr(ax25_cb *, unsigned short);
+extern int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *);
+extern void ax25_send_control(ax25_cb *, int, int, int);
+extern unsigned short ax25_calculate_t1(ax25_cb *);
+extern void ax25_calculate_rtt(ax25_cb *);
+extern unsigned char *ax25_parse_addr(unsigned char *, int, ax25_address *,
+ ax25_address *, ax25_digi *, int *, int *); /* dl1bke 951121 */
+extern int build_ax25_addr(unsigned char *, ax25_address *, ax25_address *,
+ ax25_digi *, int, int);
+extern int size_ax25_addr(ax25_digi *);
+extern void ax25_digi_invert(ax25_digi *, ax25_digi *);
+extern void ax25_return_dm(struct device *, ax25_address *, ax25_address *, ax25_digi *);
+extern void ax25_dama_on(ax25_cb *); /* dl1bke 951121 */
+extern void ax25_dama_off(ax25_cb *); /* dl1bke 951121 */
+
+/* ax25_timer */
+extern void ax25_set_timer(ax25_cb *);
+extern void ax25_t1_timeout(ax25_cb *);
+
+/* ... */
+
+extern ax25_cb * volatile ax25_list;
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/net/ax25call.h b/i386/i386at/gpl/linux/include/net/ax25call.h
new file mode 100644
index 00000000..89569656
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/ax25call.h
@@ -0,0 +1,2 @@
+/* Seperate to keep compilation of protocols.c simpler */
+extern void ax25_proto_init(struct net_proto *pro);
diff --git a/i386/i386at/gpl/linux/include/net/checksum.h b/i386/i386at/gpl/linux/include/net/checksum.h
new file mode 100644
index 00000000..aee4fd47
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/checksum.h
@@ -0,0 +1,25 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Checksumming functions for IP, TCP, UDP and so on
+ *
+ * Authors: Jorge Cwik, <jorge@laser.satlink.net>
+ * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
+ * Borrows very liberally from tcp.c and ip.c, see those
+ * files for more names.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _CHECKSUM_H
+#define _CHECKSUM_H
+
+#include <asm/byteorder.h>
+#include <net/ip.h>
+#include <asm/checksum.h>
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/net/datalink.h b/i386/i386at/gpl/linux/include/net/datalink.h
new file mode 100644
index 00000000..44e56990
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/datalink.h
@@ -0,0 +1,16 @@
+#ifndef _NET_INET_DATALINK_H_
+#define _NET_INET_DATALINK_H_
+
+struct datalink_proto {
+ unsigned short type_len;
+ unsigned char type[8];
+ const char *string_name;
+ unsigned short header_length;
+ int (*rcvfunc)(struct sk_buff *, struct device *,
+ struct packet_type *);
+ void (*datalink_header)(struct datalink_proto *, struct sk_buff *,
+ unsigned char *);
+ struct datalink_proto *next;
+};
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/net/icmp.h b/i386/i386at/gpl/linux/include/net/icmp.h
new file mode 100644
index 00000000..e4ae8213
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/icmp.h
@@ -0,0 +1,40 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the ICMP module.
+ *
+ * Version: @(#)icmp.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ICMP_H
+#define _ICMP_H
+
+#include <linux/icmp.h>
+#include <linux/skbuff.h>
+
+#include <net/sock.h>
+#include <net/protocol.h>
+
+extern struct icmp_err icmp_err_convert[];
+extern struct icmp_mib icmp_statistics;
+
+extern void icmp_send(struct sk_buff *skb_in, int type, int code,
+ unsigned long info, struct device *dev);
+extern int icmp_rcv(struct sk_buff *skb1, struct device *dev,
+ struct options *opt, __u32 daddr,
+ unsigned short len, __u32 saddr,
+ int redo, struct inet_protocol *protocol);
+extern int icmp_ioctl(struct sock *sk, int cmd,
+ unsigned long arg);
+extern void icmp_init(struct proto_ops *ops);
+
+#endif /* _ICMP_H */
diff --git a/i386/i386at/gpl/linux/include/net/ip.h b/i386/i386at/gpl/linux/include/net/ip.h
new file mode 100644
index 00000000..c7bd9987
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/ip.h
@@ -0,0 +1,154 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the IP module.
+ *
+ * Version: @(#)ip.h 1.0.2 05/07/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _IP_H
+#define _IP_H
+
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <net/route.h>
+
+#ifndef _SNMP_H
+#include <net/snmp.h>
+#endif
+
+#include <net/sock.h> /* struct sock */
+
+/* IP flags. */
+#define IP_CE 0x8000 /* Flag: "Congestion" */
+#define IP_DF 0x4000 /* Flag: "Don't Fragment" */
+#define IP_MF 0x2000 /* Flag: "More Fragments" */
+#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
+
+#define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
+
+#ifdef CONFIG_IP_MULTICAST
+extern void ip_mc_dropsocket(struct sock *);
+extern void ip_mc_dropdevice(struct device *dev);
+extern int ip_mc_procinfo(char *, char **, off_t, int, int);
+#endif
+
+#include <net/ip_forward.h>
+
+/* Describe an IP fragment. */
+struct ipfrag
+{
+ int offset; /* offset of fragment in IP datagram */
+ int end; /* last byte of data in datagram */
+ int len; /* length of this fragment */
+ struct sk_buff *skb; /* complete received fragment */
+ unsigned char *ptr; /* pointer into real fragment data */
+ struct ipfrag *next; /* linked list pointers */
+ struct ipfrag *prev;
+};
+
+/*
+ * Describe an entry in the "incomplete datagrams" queue.
+ */
+
+struct ipq
+{
+ unsigned char *mac; /* pointer to MAC header */
+ struct iphdr *iph; /* pointer to IP header */
+ int len; /* total length of original datagram */
+ short ihlen; /* length of the IP header */
+ short maclen; /* length of the MAC header */
+ struct timer_list timer; /* when will this queue expire? */
+ struct ipfrag *fragments; /* linked list of received fragments */
+ struct ipq *next; /* linked list pointers */
+ struct ipq *prev;
+ struct device *dev; /* Device - for icmp replies */
+};
+
+/*
+ * Functions provided by ip.c
+ */
+
+extern void ip_print(const struct iphdr *ip);
+extern int ip_ioctl(struct sock *sk, int cmd, unsigned long arg);
+extern void ip_route_check(__u32 daddr);
+extern int ip_send(struct rtable *rt, struct sk_buff *skb, __u32 daddr, int len, struct device *dev, __u32 saddr);
+extern int ip_build_header(struct sk_buff *skb,
+ __u32 saddr,
+ __u32 daddr,
+ struct device **dev, int type,
+ struct options *opt, int len,
+ int tos,int ttl,struct rtable **rp);
+extern int ip_rcv(struct sk_buff *skb, struct device *dev,
+ struct packet_type *pt);
+extern int ip_options_echo(struct options * dopt, struct options * sopt,
+ __u32 daddr, __u32 saddr,
+ struct sk_buff * skb);
+extern int ip_options_compile(struct options * opt, struct sk_buff * skb);
+extern void ip_send_check(struct iphdr *ip);
+extern int ip_id_count;
+extern void ip_queue_xmit(struct sock *sk,
+ struct device *dev, struct sk_buff *skb,
+ int free);
+extern void ip_init(void);
+extern int ip_build_xmit(struct sock *sk,
+ void getfrag (const void *,
+ __u32,
+ char *,
+ unsigned int,
+ unsigned int),
+ const void *frag,
+ unsigned short int length,
+ __u32 daddr,
+ __u32 saddr,
+ struct options * opt,
+ int flags,
+ int type,
+ int noblock);
+
+extern struct ip_mib ip_statistics;
+
+/*
+ * Functions provided by ip_fragment.o
+ */
+
+struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev);
+void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag);
+
+/*
+ * Functions provided by ip_forward.c
+ */
+
+extern int ip_forward(struct sk_buff *skb, struct device *dev, int is_frag, __u32 target_addr);
+
+/*
+ * Functions provided by ip_options.c
+ */
+
+extern void ip_options_build(struct sk_buff *skb, struct options *opt, __u32 daddr, __u32 saddr, int is_frag);
+extern int ip_options_echo(struct options *dopt, struct options *sopt, __u32 daddr, __u32 saddr, struct sk_buff *skb);
+extern void ip_options_fragment(struct sk_buff *skb);
+extern int ip_options_compile(struct options *opt, struct sk_buff *skb);
+
+/*
+ * Functions provided by ip_sockglue.c
+ */
+
+extern int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen);
+extern int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen);
+
+#endif /* _IP_H */
diff --git a/i386/i386at/gpl/linux/include/net/ip_alias.h b/i386/i386at/gpl/linux/include/net/ip_alias.h
new file mode 100644
index 00000000..683a0427
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/ip_alias.h
@@ -0,0 +1,23 @@
+/*
+ * IP_ALIAS (AF_INET) aliasing definitions.
+ *
+ *
+ * Version: @(#)ip_alias.h 0.43 12/20/95
+ *
+ * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _IP_ALIAS_H
+#define _IP_ALIAS_H
+
+extern int ip_alias_init(void);
+extern int ip_alias_done(void);
+
+#endif /* _IP_ALIAS_H */
diff --git a/i386/i386at/gpl/linux/include/net/ip_forward.h b/i386/i386at/gpl/linux/include/net/ip_forward.h
new file mode 100644
index 00000000..b8596500
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/ip_forward.h
@@ -0,0 +1,10 @@
+#ifndef __NET_IP_FORWARD_H
+#define __NET_IP_FORWARD_H
+
+#define IPFWD_FRAGMENT 1
+#define IPFWD_LASTFRAG 2
+#define IPFWD_MASQUERADED 4
+#define IPFWD_MULTICASTING 8
+#define IPFWD_MULTITUNNEL 16
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/net/ipip.h b/i386/i386at/gpl/linux/include/net/ipip.h
new file mode 100644
index 00000000..bba1492e
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/ipip.h
@@ -0,0 +1,4 @@
+extern int ipip_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
+ __u32 daddr, unsigned short len, __u32 saddr,
+ int redo, struct inet_protocol *protocol);
+
diff --git a/i386/i386at/gpl/linux/include/net/ipx.h b/i386/i386at/gpl/linux/include/net/ipx.h
new file mode 100644
index 00000000..96c62405
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/ipx.h
@@ -0,0 +1,85 @@
+
+/*
+ * The following information is in its entirety obtained from:
+ *
+ * Novell 'IPX Router Specification' Version 1.10
+ * Part No. 107-000029-001
+ *
+ * Which is available from ftp.novell.com
+ */
+
+#ifndef _NET_INET_IPX_H_
+#define _NET_INET_IPX_H_
+
+#include <linux/skbuff.h>
+#include <net/datalink.h>
+#include <linux/ipx.h>
+
+typedef struct
+{
+ unsigned long net;
+ unsigned char node[IPX_NODE_LEN];
+ unsigned short sock;
+} ipx_address;
+
+#define ipx_broadcast_node "\377\377\377\377\377\377"
+#define ipx_this_node "\0\0\0\0\0\0"
+
+typedef struct ipx_packet
+{
+ unsigned short ipx_checksum;
+#define IPX_NO_CHECKSUM 0xFFFF
+ unsigned short ipx_pktsize;
+ unsigned char ipx_tctrl;
+ unsigned char ipx_type;
+#define IPX_TYPE_UNKNOWN 0x00
+#define IPX_TYPE_RIP 0x01 /* may also be 0 */
+#define IPX_TYPE_SAP 0x04 /* may also be 0 */
+#define IPX_TYPE_SPX 0x05 /* Not yet implemented */
+#define IPX_TYPE_NCP 0x11 /* $lots for docs on this (SPIT) */
+#define IPX_TYPE_PPROP 0x14 /* complicated flood fill brdcast [Not supported] */
+ ipx_address ipx_dest __attribute__ ((packed));
+ ipx_address ipx_source __attribute__ ((packed));
+} ipx_packet;
+
+
+typedef struct sock ipx_socket;
+
+#include <net/ipxcall.h>
+extern int ipx_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt);
+extern void ipxrtr_device_down(struct device *dev);
+
+typedef struct ipx_interface {
+ /* IPX address */
+ unsigned long if_netnum;
+ unsigned char if_node[IPX_NODE_LEN];
+
+ /* physical device info */
+ struct device *if_dev;
+ struct datalink_proto *if_dlink;
+ unsigned short if_dlink_type;
+
+ /* socket support */
+ unsigned short if_sknum;
+ ipx_socket *if_sklist;
+
+ /* administrative overhead */
+ int if_ipx_offset;
+ unsigned char if_internal;
+ unsigned char if_primary;
+
+ struct ipx_interface *if_next;
+} ipx_interface;
+
+typedef struct ipx_route {
+ unsigned long ir_net;
+ ipx_interface *ir_intrfc;
+ unsigned char ir_routed;
+ unsigned char ir_router_node[IPX_NODE_LEN];
+ struct ipx_route *ir_next;
+} ipx_route;
+
+#define IPX_MIN_EPHEMERAL_SOCKET 0x4000
+#define IPX_MAX_EPHEMERAL_SOCKET 0x7fff
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/net/ipxcall.h b/i386/i386at/gpl/linux/include/net/ipxcall.h
new file mode 100644
index 00000000..eb5bd2bd
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/ipxcall.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of protocols.c simpler */
+extern void ipx_proto_init(struct net_proto *pro);
diff --git a/i386/i386at/gpl/linux/include/net/netlink.h b/i386/i386at/gpl/linux/include/net/netlink.h
new file mode 100644
index 00000000..e32af15b
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/netlink.h
@@ -0,0 +1,26 @@
+#ifndef __NET_NETLINK_H
+#define __NET_NETLINK_H
+
+#define NET_MAJOR 36 /* Major 18 is reserved for networking */
+#define MAX_LINKS 4 /* 18,0 for route updates, 18,1 for SKIP, 18,2 debug tap 18,3 PPP reserved */
+#define MAX_QBYTES 32768 /* Maximum bytes in the queue */
+
+#include <linux/config.h>
+
+extern int netlink_attach(int unit, int (*function)(struct sk_buff *skb));
+extern int netlink_donothing(struct sk_buff *skb);
+extern void netlink_detach(int unit);
+extern int netlink_post(int unit, struct sk_buff *skb);
+extern int init_netlink(void);
+
+#define NETLINK_ROUTE 0 /* Routing/device hook */
+#define NETLINK_SKIP 1 /* Reserved for ENskip */
+#define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */
+#define NETLINK_FIREWALL 3 /* Firewalling hook */
+
+#ifdef CONFIG_RTNETLINK
+extern void ip_netlink_msg(unsigned long, __u32, __u32, __u32, short, short, char *);
+#else
+#define ip_netlink_msg(a,b,c,d,e,f,g)
+#endif
+#endif
diff --git a/i386/i386at/gpl/linux/include/net/netrom.h b/i386/i386at/gpl/linux/include/net/netrom.h
new file mode 100644
index 00000000..5e343bbc
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/netrom.h
@@ -0,0 +1,139 @@
+/*
+ * Declarations of NET/ROM type objects.
+ *
+ * Jonathan Naylor G4KLX 9/4/95
+ */
+
+#ifndef _NETROM_H
+#define _NETROM_H
+#include <linux/netrom.h>
+
+#define NR_T1CLAMPLO (1 * PR_SLOWHZ) /* If defined, clamp at 1 second **/
+#define NR_T1CLAMPHI (300 * PR_SLOWHZ) /* If defined, clamp at 30 seconds **/
+
+#define NR_NETWORK_LEN 15
+#define NR_TRANSPORT_LEN 5
+
+#define NR_PROTO_IP 0x0C
+
+#define NR_PROTOEXT 0x00
+#define NR_CONNREQ 0x01
+#define NR_CONNACK 0x02
+#define NR_DISCREQ 0x03
+#define NR_DISCACK 0x04
+#define NR_INFO 0x05
+#define NR_INFOACK 0x06
+
+#define NR_CHOKE_FLAG 0x80
+#define NR_NAK_FLAG 0x40
+#define NR_MORE_FLAG 0x20
+
+/* Define Link State constants. */
+
+#define NR_STATE_0 0
+#define NR_STATE_1 1
+#define NR_STATE_2 2
+#define NR_STATE_3 3
+
+#define NR_DEFAULT_T1 (120 * PR_SLOWHZ) /* Outstanding frames - 120 seconds */
+#define NR_DEFAULT_T2 (5 * PR_SLOWHZ) /* Response delay - 5 seconds */
+#define NR_DEFAULT_N2 3 /* Number of Retries */
+#define NR_DEFAULT_T4 (180 * PR_SLOWHZ) /* Transport Busy Delay */
+#define NR_DEFAULT_WINDOW 4 /* Default Window Size */
+#define NR_DEFAULT_OBS 6 /* Default Obscolesence Count */
+#define NR_DEFAULT_QUAL 10 /* Default Neighbour Quality */
+#define NR_DEFAULT_TTL 16 /* Default Time To Live */
+#define NR_MODULUS 256
+#define NR_MAX_WINDOW_SIZE 127 /* Maximum Window Allowable */
+
+typedef struct {
+ ax25_address user_addr, source_addr, dest_addr;
+ struct device *device;
+ unsigned char my_index, my_id;
+ unsigned char your_index, your_id;
+ unsigned char state, condition, bpqext, hdrincl;
+ unsigned short vs, vr, va, vl;
+ unsigned char n2, n2count;
+ unsigned short t1, t2, rtt;
+ unsigned short t1timer, t2timer, t4timer;
+ unsigned short fraglen;
+ struct sk_buff_head ack_queue;
+ struct sk_buff_head reseq_queue;
+ struct sk_buff_head frag_queue;
+ struct sock *sk; /* Backlink to socket */
+} nr_cb;
+
+struct nr_route {
+ unsigned char quality;
+ unsigned char obs_count;
+ unsigned short neighbour;
+};
+
+struct nr_node {
+ struct nr_node *next;
+ ax25_address callsign;
+ char mnemonic[7];
+ unsigned char which;
+ unsigned char count;
+ struct nr_route routes[3];
+};
+
+struct nr_neigh {
+ struct nr_neigh *next;
+ ax25_address callsign;
+ ax25_digi *digipeat;
+ struct device *dev;
+ unsigned char quality;
+ unsigned char locked;
+ unsigned short count;
+ unsigned short number;
+};
+
+/* af_netrom.c */
+extern struct nr_parms_struct nr_default;
+extern int nr_rx_frame(struct sk_buff *, struct device *);
+extern void nr_destroy_socket(struct sock *);
+
+/* nr_dev.c */
+extern int nr_rx_ip(struct sk_buff *, struct device *);
+extern int nr_init(struct device *);
+
+#include <net/nrcall.h>
+
+/* nr_in.c */
+extern int nr_process_rx_frame(struct sock *, struct sk_buff *);
+
+/* nr_out.c */
+extern void nr_output(struct sock *, struct sk_buff *);
+extern void nr_send_nak_frame(struct sock *);
+extern void nr_kick(struct sock *);
+extern void nr_transmit_buffer(struct sock *, struct sk_buff *);
+extern void nr_establish_data_link(struct sock *);
+extern void nr_enquiry_response(struct sock *);
+extern void nr_check_iframes_acked(struct sock *, unsigned short);
+
+/* nr_route.c */
+extern void nr_rt_device_down(struct device *);
+extern struct device *nr_dev_first(void);
+extern struct device *nr_dev_get(ax25_address *);
+extern int nr_rt_ioctl(unsigned int, void *);
+extern void nr_link_failed(ax25_address *, struct device *);
+extern int nr_route_frame(struct sk_buff *, ax25_cb *);
+extern int nr_nodes_get_info(char *, char **, off_t, int, int);
+extern int nr_neigh_get_info(char *, char **, off_t, int, int);
+
+/* nr_subr.c */
+extern void nr_clear_queues(struct sock *);
+extern void nr_frames_acked(struct sock *, unsigned short);
+extern void nr_requeue_frames(struct sock *);
+extern int nr_validate_nr(struct sock *, unsigned short);
+extern int nr_in_rx_window(struct sock *, unsigned short);
+extern void nr_write_internal(struct sock *, int);
+extern void nr_transmit_dm(struct sk_buff *);
+extern unsigned short nr_calculate_t1(struct sock *);
+extern void nr_calculate_rtt(struct sock *);
+
+/* ax25_timer */
+extern void nr_set_timer(struct sock *);
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/net/nrcall.h b/i386/i386at/gpl/linux/include/net/nrcall.h
new file mode 100644
index 00000000..f58c2d4f
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/nrcall.h
@@ -0,0 +1,2 @@
+/* Seperate to keep compilation of protocols.c simpler */
+extern void nr_proto_init(struct net_proto *pro);
diff --git a/i386/i386at/gpl/linux/include/net/p8022.h b/i386/i386at/gpl/linux/include/net/p8022.h
new file mode 100644
index 00000000..52c676be
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/p8022.h
@@ -0,0 +1,2 @@
+struct datalink_proto *register_8022_client(unsigned char type, int (*rcvfunc)(struct sk_buff *, struct device *, struct packet_type *));
+
diff --git a/i386/i386at/gpl/linux/include/net/p8022call.h b/i386/i386at/gpl/linux/include/net/p8022call.h
new file mode 100644
index 00000000..14f0c2ce
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/p8022call.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of Space.c simpler */
+extern void p8022_proto_init(struct net_proto *);
diff --git a/i386/i386at/gpl/linux/include/net/protocol.h b/i386/i386at/gpl/linux/include/net/protocol.h
new file mode 100644
index 00000000..ae328b69
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/protocol.h
@@ -0,0 +1,55 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the protocol dispatcher.
+ *
+ * Version: @(#)protocol.h 1.0.2 05/07/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Changes:
+ * Alan Cox : Added a name field and a frag handler
+ * field for later.
+ * Alan Cox : Cleaned up, and sorted types.
+ */
+
+#ifndef _PROTOCOL_H
+#define _PROTOCOL_H
+
+#define MAX_INET_PROTOS 32 /* Must be a power of 2 */
+
+
+/* This is used to register protocols. */
+struct inet_protocol {
+ int (*handler)(struct sk_buff *skb, struct device *dev,
+ struct options *opt, __u32 daddr,
+ unsigned short len, __u32 saddr,
+ int redo, struct inet_protocol *protocol);
+ void (*err_handler)(int type, int code, unsigned char *buff,
+ __u32 daddr,
+ __u32 saddr,
+ struct inet_protocol *protocol);
+ struct inet_protocol *next;
+ unsigned char protocol;
+ unsigned char copy:1;
+ void *data;
+ const char *name;
+};
+
+
+extern struct inet_protocol *inet_protocol_base;
+extern struct inet_protocol *inet_protos[MAX_INET_PROTOS];
+
+
+extern void inet_add_protocol(struct inet_protocol *prot);
+extern int inet_del_protocol(struct inet_protocol *prot);
+
+
+#endif /* _PROTOCOL_H */
diff --git a/i386/i386at/gpl/linux/include/net/psnap.h b/i386/i386at/gpl/linux/include/net/psnap.h
new file mode 100644
index 00000000..b69859db
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/psnap.h
@@ -0,0 +1,2 @@
+struct datalink_proto *register_snap_client(unsigned char *desc, int (*rcvfunc)(struct sk_buff *, struct device *, struct packet_type *));
+
diff --git a/i386/i386at/gpl/linux/include/net/psnapcall.h b/i386/i386at/gpl/linux/include/net/psnapcall.h
new file mode 100644
index 00000000..9da5763c
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/psnapcall.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of Space.c simpler */
+extern void snap_proto_init(struct net_proto *);
diff --git a/i386/i386at/gpl/linux/include/net/rarp.h b/i386/i386at/gpl/linux/include/net/rarp.h
new file mode 100644
index 00000000..7bfb08ef
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/rarp.h
@@ -0,0 +1,12 @@
+/* linux/net/inet/rarp.h */
+#ifndef _RARP_H
+#define _RARP_H
+
+extern int rarp_ioctl(unsigned int cmd, void *arg);
+extern int rarp_get_info(char *buffer,
+ char **start,
+ off_t offset,
+ int length,
+ int dummy);
+#endif /* _RARP_H */
+
diff --git a/i386/i386at/gpl/linux/include/net/raw.h b/i386/i386at/gpl/linux/include/net/raw.h
new file mode 100644
index 00000000..4b424879
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/raw.h
@@ -0,0 +1,34 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the RAW-IP module.
+ *
+ * Version: @(#)raw.h 1.0.2 05/07/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _RAW_H
+#define _RAW_H
+
+
+extern struct proto raw_prot;
+
+
+extern void raw_err(int type, int code, unsigned char *header, __u32 daddr,
+ __u32 saddr, struct inet_protocol *protocol);
+extern int raw_recvfrom(struct sock *sk, unsigned char *to,
+ int len, int noblock, unsigned flags,
+ struct sockaddr_in *sin, int *addr_len);
+extern int raw_read(struct sock *sk, unsigned char *buff,
+ int len, int noblock, unsigned flags);
+extern int raw_rcv(struct sock *, struct sk_buff *, struct device *,
+ __u32, __u32);
+
+#endif /* _RAW_H */
diff --git a/i386/i386at/gpl/linux/include/net/route.h b/i386/i386at/gpl/linux/include/net/route.h
new file mode 100644
index 00000000..8ce67383
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/route.h
@@ -0,0 +1,280 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the IP router.
+ *
+ * Version: @(#)route.h 1.0.4 05/27/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Fixes:
+ * Alan Cox : Reformatted. Added ip_rt_local()
+ * Alan Cox : Support for TCP parameters.
+ * Alexey Kuznetsov: Major changes for new routing code.
+ *
+ * FIXME:
+ * Modules stuff is broken at the moment.
+ * Make atomic ops more generic and hide them in asm/...
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ROUTE_H
+#define _ROUTE_H
+
+#include <linux/config.h>
+
+/*
+ * 0 - no debugging messages
+ * 1 - rare events and bugs situations (default)
+ * 2 - trace mode.
+ */
+#define RT_CACHE_DEBUG 1
+
+#define RT_HASH_DIVISOR 256
+#define RT_CACHE_SIZE_MAX 256
+
+#define RTZ_HASH_DIVISOR 256
+
+#if RT_CACHE_DEBUG >= 2
+#define RTZ_HASHING_LIMIT 0
+#else
+#define RTZ_HASHING_LIMIT 16
+#endif
+
+/*
+ * Maximal time to live for unused entry.
+ */
+#define RT_CACHE_TIMEOUT (HZ*300)
+
+/*
+ * Prevents LRU trashing, entries considered equivalent,
+ * if the difference between last use times is less then this number.
+ */
+#define RT_CACHE_BUBBLE_THRESHOULD (HZ*5)
+
+#include <linux/route.h>
+
+#ifdef __KERNEL__
+#define RTF_LOCAL 0x8000
+#endif
+
+/*
+ * Semaphores.
+ */
+#if defined(__alpha__)
+
+static __inline__ void ATOMIC_INCR(unsigned int * addr)
+{
+ unsigned tmp;
+
+ __asm__ __volatile__(
+ "1:\n\
+ ldl_l %1,%2\n\
+ addl %1,1,%1\n\
+ stl_c %1,%0\n\
+ beq %1,1b\n"
+ : "m=" (*addr), "r=&" (tmp)
+ : "m"(*addr));
+}
+
+static __inline__ void ATOMIC_DECR(unsigned int * addr)
+{
+ unsigned tmp;
+
+ __asm__ __volatile__(
+ "1:\n\
+ ldl_l %1,%2\n\
+ subl %1,1,%1\n\
+ stl_c %1,%0\n\
+ beq %1,1b\n"
+ : "m=" (*addr), "r=&" (tmp)
+ : "m"(*addr));
+}
+
+static __inline__ int ATOMIC_DECR_AND_CHECK (unsigned int * addr)
+{
+ unsigned tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1:\n\
+ ldl_l %1,%3\n\
+ subl %1,1,%1\n\
+ mov %1,%2\n\
+ stl_c %1,%0\n\
+ beq %1,1b\n"
+ : "m=" (*addr), "r=&" (tmp), "r=&"(result)
+ : "m"(*addr));
+ return result;
+}
+
+#elif defined(__i386__)
+#include <asm/bitops.h>
+
+extern __inline__ void ATOMIC_INCR(void * addr)
+{
+ __asm__ __volatile__(
+ "incl %0"
+ :"=m" (ADDR));
+}
+
+extern __inline__ void ATOMIC_DECR(void * addr)
+{
+ __asm__ __volatile__(
+ "decl %0"
+ :"=m" (ADDR));
+}
+
+/*
+ * It is DECR that is ATOMIC, not CHECK!
+ * If you want to do atomic checks, use cli()/sti(). --ANK
+ */
+
+extern __inline__ unsigned long ATOMIC_DECR_AND_CHECK(void * addr)
+{
+ unsigned long retval;
+ __asm__ __volatile__(
+ "decl %0\nmovl %0,%1"
+ : "=m" (ADDR), "=r"(retval));
+ return retval;
+}
+
+
+#else
+
+static __inline__ void ATOMIC_INCR(unsigned int * addr)
+{
+ (*(__volatile__ unsigned int*)addr)++;
+}
+
+static __inline__ void ATOMIC_DECR(unsigned int * addr)
+{
+ (*(__volatile__ unsigned int*)addr)--;
+}
+
+static __inline__ int ATOMIC_DECR_AND_CHECK (unsigned int * addr)
+{
+ ATOMIC_DECR(addr);
+ return *(volatile unsigned int*)addr;
+}
+
+#endif
+
+
+
+struct rtable
+{
+ struct rtable *rt_next;
+ __u32 rt_dst;
+ __u32 rt_src;
+ __u32 rt_gateway;
+ unsigned rt_refcnt;
+ unsigned rt_use;
+ unsigned long rt_window;
+ unsigned long rt_lastuse;
+ struct hh_cache *rt_hh;
+ struct device *rt_dev;
+ unsigned short rt_flags;
+ unsigned short rt_mtu;
+ unsigned short rt_irtt;
+ unsigned char rt_tos;
+};
+
+extern void ip_rt_flush(struct device *dev);
+extern void ip_rt_redirect(__u32 src, __u32 dst, __u32 gw, struct device *dev);
+extern struct rtable *ip_rt_slow_route(__u32 daddr, int local);
+extern int rt_get_info(char * buffer, char **start, off_t offset, int length, int dummy);
+extern int rt_cache_get_info(char *buffer, char **start, off_t offset, int length, int dummy);
+extern int ip_rt_ioctl(unsigned int cmd, void *arg);
+extern int ip_rt_new(struct rtentry *rt);
+extern void ip_rt_check_expire(void);
+extern void ip_rt_advice(struct rtable **rp, int advice);
+
+extern void ip_rt_run_bh(void);
+extern int ip_rt_lock;
+extern unsigned ip_rt_bh_mask;
+extern struct rtable *ip_rt_hash_table[RT_HASH_DIVISOR];
+
+extern __inline__ void ip_rt_fast_lock(void)
+{
+ ATOMIC_INCR(&ip_rt_lock);
+}
+
+extern __inline__ void ip_rt_fast_unlock(void)
+{
+ ATOMIC_DECR(&ip_rt_lock);
+}
+
+extern __inline__ void ip_rt_unlock(void)
+{
+ if (!ATOMIC_DECR_AND_CHECK(&ip_rt_lock) && ip_rt_bh_mask)
+ ip_rt_run_bh();
+}
+
+extern __inline__ unsigned ip_rt_hash_code(__u32 addr)
+{
+ unsigned tmp = addr + (addr>>16);
+ return (tmp + (tmp>>8)) & 0xFF;
+}
+
+
+extern __inline__ void ip_rt_put(struct rtable * rt)
+#ifndef MODULE
+{
+ if (rt)
+ ATOMIC_DECR(&rt->rt_refcnt);
+}
+#else
+;
+#endif
+
+#ifdef CONFIG_KERNELD
+extern struct rtable * ip_rt_route(__u32 daddr, int local);
+#else
+extern __inline__ struct rtable * ip_rt_route(__u32 daddr, int local)
+#ifndef MODULE
+{
+ struct rtable * rth;
+
+ ip_rt_fast_lock();
+
+ for (rth=ip_rt_hash_table[ip_rt_hash_code(daddr)^local]; rth; rth=rth->rt_next)
+ {
+ if (rth->rt_dst == daddr)
+ {
+ rth->rt_lastuse = jiffies;
+ ATOMIC_INCR(&rth->rt_use);
+ ATOMIC_INCR(&rth->rt_refcnt);
+ ip_rt_unlock();
+ return rth;
+ }
+ }
+ return ip_rt_slow_route (daddr, local);
+}
+#else
+;
+#endif
+#endif
+
+extern __inline__ struct rtable * ip_check_route(struct rtable ** rp,
+ __u32 daddr, int local)
+{
+ struct rtable * rt = *rp;
+
+ if (!rt || rt->rt_dst != daddr || !(rt->rt_flags&RTF_UP)
+ || ((local==1)^((rt->rt_flags&RTF_LOCAL) != 0)))
+ {
+ ip_rt_put(rt);
+ rt = ip_rt_route(daddr, local);
+ *rp = rt;
+ }
+ return rt;
+}
+
+
+#endif /* _ROUTE_H */
diff --git a/i386/i386at/gpl/linux/include/net/slhc.h b/i386/i386at/gpl/linux/include/net/slhc.h
new file mode 100644
index 00000000..c7b39db5
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/slhc.h
@@ -0,0 +1,6 @@
+#ifndef __NET_SLHC_H
+#define __NET_SLHC_H
+
+extern void slhc_install(void);
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/net/snmp.h b/i386/i386at/gpl/linux/include/net/snmp.h
new file mode 100644
index 00000000..552292be
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/snmp.h
@@ -0,0 +1,107 @@
+/*
+ *
+ * SNMP MIB entries for the IP subsystem.
+ *
+ * Alan Cox <gw4pts@gw4pts.ampr.org>
+ *
+ * We don't chose to implement SNMP in the kernel (this would
+ * be silly as SNMP is a pain in the backside in places). We do
+ * however need to collect the MIB statistics and export them
+ * out of /proc (eventually)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _SNMP_H
+#define _SNMP_H
+
+/*
+ * We use all unsigned longs. Linux will soon be so reliable that even these
+ * will rapidly get too small 8-). Seriously consider the IpInReceives count
+ * on the 20Gb/s + networks people expect in a few years time!
+ */
+
+struct ip_mib
+{
+ unsigned long IpForwarding;
+ unsigned long IpDefaultTTL;
+ unsigned long IpInReceives;
+ unsigned long IpInHdrErrors;
+ unsigned long IpInAddrErrors;
+ unsigned long IpForwDatagrams;
+ unsigned long IpInUnknownProtos;
+ unsigned long IpInDiscards;
+ unsigned long IpInDelivers;
+ unsigned long IpOutRequests;
+ unsigned long IpOutDiscards;
+ unsigned long IpOutNoRoutes;
+ unsigned long IpReasmTimeout;
+ unsigned long IpReasmReqds;
+ unsigned long IpReasmOKs;
+ unsigned long IpReasmFails;
+ unsigned long IpFragOKs;
+ unsigned long IpFragFails;
+ unsigned long IpFragCreates;
+};
+
+
+struct icmp_mib
+{
+ unsigned long IcmpInMsgs;
+ unsigned long IcmpInErrors;
+ unsigned long IcmpInDestUnreachs;
+ unsigned long IcmpInTimeExcds;
+ unsigned long IcmpInParmProbs;
+ unsigned long IcmpInSrcQuenchs;
+ unsigned long IcmpInRedirects;
+ unsigned long IcmpInEchos;
+ unsigned long IcmpInEchoReps;
+ unsigned long IcmpInTimestamps;
+ unsigned long IcmpInTimestampReps;
+ unsigned long IcmpInAddrMasks;
+ unsigned long IcmpInAddrMaskReps;
+ unsigned long IcmpOutMsgs;
+ unsigned long IcmpOutErrors;
+ unsigned long IcmpOutDestUnreachs;
+ unsigned long IcmpOutTimeExcds;
+ unsigned long IcmpOutParmProbs;
+ unsigned long IcmpOutSrcQuenchs;
+ unsigned long IcmpOutRedirects;
+ unsigned long IcmpOutEchos;
+ unsigned long IcmpOutEchoReps;
+ unsigned long IcmpOutTimestamps;
+ unsigned long IcmpOutTimestampReps;
+ unsigned long IcmpOutAddrMasks;
+ unsigned long IcmpOutAddrMaskReps;
+};
+
+struct tcp_mib
+{
+ unsigned long TcpRtoAlgorithm;
+ unsigned long TcpRtoMin;
+ unsigned long TcpRtoMax;
+ unsigned long TcpMaxConn;
+ unsigned long TcpActiveOpens;
+ unsigned long TcpPassiveOpens;
+ unsigned long TcpAttemptFails;
+ unsigned long TcpEstabResets;
+ unsigned long TcpCurrEstab;
+ unsigned long TcpInSegs;
+ unsigned long TcpOutSegs;
+ unsigned long TcpRetransSegs;
+};
+
+struct udp_mib
+{
+ unsigned long UdpInDatagrams;
+ unsigned long UdpNoPorts;
+ unsigned long UdpInErrors;
+ unsigned long UdpOutDatagrams;
+};
+
+
+#endif
diff --git a/i386/i386at/gpl/linux/include/net/sock.h b/i386/i386at/gpl/linux/include/net/sock.h
new file mode 100644
index 00000000..dc7a4a90
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/sock.h
@@ -0,0 +1,486 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the AF_INET socket handler.
+ *
+ * Version: @(#)sock.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Corey Minyard <wf-rch!minyard@relay.EU.net>
+ * Florian La Roche <flla@stud.uni-sb.de>
+ *
+ * Fixes:
+ * Alan Cox : Volatiles in skbuff pointers. See
+ * skbuff comments. May be overdone,
+ * better to prove they can be removed
+ * than the reverse.
+ * Alan Cox : Added a zapped field for tcp to note
+ * a socket is reset and must stay shut up
+ * Alan Cox : New fields for options
+ * Pauline Middelink : identd support
+ * Alan Cox : Eliminate low level recv/recvfrom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _SOCK_H
+#define _SOCK_H
+
+#include <linux/timer.h>
+#include <linux/ip.h> /* struct options */
+#include <linux/in.h> /* struct sockaddr_in */
+#include <linux/tcp.h> /* struct tcphdr */
+#include <linux/config.h>
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h> /* struct sk_buff */
+#include <net/protocol.h> /* struct inet_protocol */
+#ifdef CONFIG_AX25
+#include <net/ax25.h>
+#ifdef CONFIG_NETROM
+#include <net/netrom.h>
+#endif
+#endif
+#ifdef CONFIG_IPX
+#include <net/ipx.h>
+#endif
+#ifdef CONFIG_ATALK
+#include <linux/atalk.h>
+#endif
+
+#include <linux/igmp.h>
+
+/* Think big (also on some systems a byte is faster) */
+#define SOCK_ARRAY_SIZE 256
+
+
+/*
+ * The AF_UNIX specific socket options
+ */
+
+struct unix_opt
+{
+ int family;
+ char * name;
+ int locks;
+ struct inode * inode;
+ struct semaphore readsem;
+ struct sock * other;
+};
+
+/*
+ * IP packet socket options
+ */
+
+struct inet_packet_opt
+{
+ struct notifier_block notifier; /* Used when bound */
+ struct device *bound_dev;
+ unsigned long dev_stamp;
+ struct packet_type *prot_hook;
+ char device_name[15];
+};
+
+
+/*
+ * This structure really needs to be cleaned up.
+ * Most of it is for TCP, and not used by any of
+ * the other protocols.
+ */
+struct sock
+{
+ struct options *opt;
+ volatile unsigned long wmem_alloc;
+ volatile unsigned long rmem_alloc;
+ unsigned long allocation; /* Allocation mode */
+ __u32 write_seq;
+ __u32 sent_seq;
+ __u32 acked_seq;
+ __u32 copied_seq;
+ __u32 rcv_ack_seq;
+ __u32 window_seq;
+ __u32 fin_seq;
+ __u32 urg_seq;
+ __u32 urg_data;
+ int users; /* user count */
+ /*
+ * Not all are volatile, but some are, so we
+ * might as well say they all are.
+ */
+ volatile char dead,
+ urginline,
+ intr,
+ blog,
+ done,
+ reuse,
+ keepopen,
+ linger,
+ delay_acks,
+ destroy,
+ ack_timed,
+ no_check,
+ zapped, /* In ax25 & ipx means not linked */
+ broadcast,
+ nonagle,
+ bsdism;
+ unsigned long lingertime;
+ int proc;
+ struct sock *next;
+ struct sock *prev; /* Doubly linked chain.. */
+ struct sock *pair;
+ struct sk_buff * volatile send_head;
+ struct sk_buff * volatile send_tail;
+ struct sk_buff_head back_log;
+ struct sk_buff *partial;
+ struct timer_list partial_timer;
+ long retransmits;
+ struct sk_buff_head write_queue,
+ receive_queue;
+ struct proto *prot;
+ struct wait_queue **sleep;
+ __u32 daddr;
+ __u32 saddr; /* Sending source */
+ __u32 rcv_saddr; /* Bound address */
+ unsigned short max_unacked;
+ unsigned short window;
+ __u32 lastwin_seq; /* sequence number when we last updated the window we offer */
+ volatile unsigned long ato; /* ack timeout */
+ volatile unsigned long lrcvtime; /* jiffies at last rcv */
+ unsigned short bytes_rcv;
+/*
+ * mss is min(mtu, max_window)
+ */
+ unsigned short mtu; /* mss negotiated in the syn's */
+ volatile unsigned short mss; /* current eff. mss - can change */
+ volatile unsigned short user_mss; /* mss requested by user in ioctl */
+ volatile unsigned short max_window;
+ unsigned long window_clamp;
+ unsigned short num;
+ volatile unsigned short cong_window;
+ volatile unsigned short cong_count;
+ volatile unsigned short ssthresh;
+ volatile unsigned short packets_out;
+ volatile unsigned short shutdown;
+ volatile unsigned long rtt;
+ volatile unsigned long mdev;
+ volatile unsigned long rto;
+
+/*
+ * currently backoff isn't used, but I'm maintaining it in case
+ * we want to go back to a backoff formula that needs it
+ */
+
+ volatile unsigned short backoff;
+ volatile int err, err_soft; /* Soft holds errors that don't
+ cause failure but are the cause
+ of a persistent failure not just
+ 'timed out' */
+ unsigned char protocol;
+ volatile unsigned char state;
+ volatile unsigned char ack_backlog;
+ unsigned char max_ack_backlog;
+ unsigned char priority;
+ unsigned char debug;
+ unsigned short rcvbuf;
+ unsigned short sndbuf;
+ unsigned short type;
+ unsigned char localroute; /* Route locally only */
+#ifdef CONFIG_IPX
+/*
+ * Once the IPX ncpd patches are in these are going into protinfo
+ */
+ ipx_address ipx_dest_addr;
+ ipx_interface *ipx_intrfc;
+ unsigned short ipx_port;
+
+/* To handle asynchronous messages from the NetWare server, we have to
+ * know the connection this socket belongs to. Sorry to blow up this
+ * structure even more. */
+ struct ncp_server *ipx_ncp_server;
+
+#ifdef CONFIG_IPX_INTERN
+ unsigned char ipx_node[IPX_NODE_LEN];
+#endif
+ unsigned short ipx_type;
+#endif
+#ifdef CONFIG_AX25
+ ax25_cb *ax25;
+#ifdef CONFIG_NETROM
+ nr_cb *nr;
+#endif
+#endif
+
+/*
+ * This is where all the private (optional) areas that don't
+ * overlap will eventually live.
+ */
+
+ union
+ {
+ struct unix_opt af_unix;
+#ifdef CONFIG_ATALK
+ struct atalk_sock af_at;
+#endif
+#ifdef CONFIG_INET
+ struct inet_packet_opt af_packet;
+#endif
+ } protinfo;
+
+/*
+ * IP 'private area' or will be eventually
+ */
+ int ip_ttl; /* TTL setting */
+ int ip_tos; /* TOS */
+ struct tcphdr dummy_th;
+ struct timer_list keepalive_timer; /* TCP keepalive hack */
+ struct timer_list retransmit_timer; /* TCP retransmit timer */
+ struct timer_list ack_timer; /* TCP delayed ack timer */
+ int ip_xmit_timeout; /* Why the timeout is running */
+ struct rtable *ip_route_cache; /* Cached output route */
+ unsigned char ip_hdrincl; /* Include headers ? */
+#ifdef CONFIG_IP_MULTICAST
+ int ip_mc_ttl; /* Multicasting TTL */
+ int ip_mc_loop; /* Loopback */
+ char ip_mc_name[MAX_ADDR_LEN];/* Multicast device name */
+ struct ip_mc_socklist *ip_mc_list; /* Group array */
+#endif
+
+/*
+ * This part is used for the timeout functions (timer.c).
+ */
+
+ int timeout; /* What are we waiting for? */
+ struct timer_list timer; /* This is the TIME_WAIT/receive timer
+ * when we are doing IP
+ */
+ struct timeval stamp;
+
+ /*
+ * Identd
+ */
+
+ struct socket *socket;
+
+ /*
+ * Callbacks
+ */
+
+ void (*state_change)(struct sock *sk);
+ void (*data_ready)(struct sock *sk,int bytes);
+ void (*write_space)(struct sock *sk);
+ void (*error_report)(struct sock *sk);
+
+};
+
+/*
+ * IP protocol blocks we attach to sockets.
+ */
+
+struct proto
+{
+ void (*close)(struct sock *sk, unsigned long timeout);
+ int (*build_header)(struct sk_buff *skb,
+ __u32 saddr,
+ __u32 daddr,
+ struct device **dev, int type,
+ struct options *opt, int len,
+ int tos, int ttl, struct rtable ** rp);
+ int (*connect)(struct sock *sk,
+ struct sockaddr_in *usin, int addr_len);
+ struct sock * (*accept) (struct sock *sk, int flags);
+ void (*queue_xmit)(struct sock *sk,
+ struct device *dev, struct sk_buff *skb,
+ int free);
+ void (*retransmit)(struct sock *sk, int all);
+ void (*write_wakeup)(struct sock *sk);
+ void (*read_wakeup)(struct sock *sk);
+ int (*rcv)(struct sk_buff *buff, struct device *dev,
+ struct options *opt, __u32 daddr,
+ unsigned short len, __u32 saddr,
+ int redo, struct inet_protocol *protocol);
+ int (*select)(struct sock *sk, int which,
+ select_table *wait);
+ int (*ioctl)(struct sock *sk, int cmd,
+ unsigned long arg);
+ int (*init)(struct sock *sk);
+ void (*shutdown)(struct sock *sk, int how);
+ int (*setsockopt)(struct sock *sk, int level, int optname,
+ char *optval, int optlen);
+ int (*getsockopt)(struct sock *sk, int level, int optname,
+ char *optval, int *option);
+ int (*sendmsg)(struct sock *sk, struct msghdr *msg, int len,
+ int noblock, int flags);
+ int (*recvmsg)(struct sock *sk, struct msghdr *msg, int len,
+ int noblock, int flags, int *addr_len);
+ int (*bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+ unsigned short max_header;
+ unsigned long retransmits;
+ char name[32];
+ int inuse, highestinuse;
+ struct sock * sock_array[SOCK_ARRAY_SIZE];
+};
+
+#define TIME_WRITE 1
+#define TIME_CLOSE 2
+#define TIME_KEEPOPEN 3
+#define TIME_DESTROY 4
+#define TIME_DONE 5 /* Used to absorb those last few packets */
+#define TIME_PROBE0 6
+/*
+ * About 10 seconds
+ */
+#define SOCK_DESTROY_TIME (10*HZ)
+
+
+/*
+ * Sockets 0-1023 can't be bound too unless you are superuser
+ */
+
+#define PROT_SOCK 1024
+
+
+#define SHUTDOWN_MASK 3
+#define RCV_SHUTDOWN 1
+#define SEND_SHUTDOWN 2
+
+/*
+ * Used by processes to "lock" a socket state, so that
+ * interrupts and bottom half handlers won't change it
+ * from under us. It essentially blocks any incoming
+ * packets, so that we won't get any new data or any
+ * packets that change the state of the socket.
+ *
+ * Note the 'barrier()' calls: gcc may not move a lock
+ * "downwards" or a unlock "upwards" when optimizing.
+ */
+extern void __release_sock(struct sock *sk);
+
+static inline void lock_sock(struct sock *sk)
+{
+#if 1
+/* debugging code: the test isn't even 100% correct, but it can catch bugs */
+/* Note that a double lock is ok in theory - it's just _usually_ a bug */
+ if (sk->users) {
+ __label__ here;
+ printk("double lock on socket at %p\n", &&here);
+here:
+ }
+#endif
+ sk->users++;
+ barrier();
+}
+
+static inline void release_sock(struct sock *sk)
+{
+ barrier();
+#if 1
+/* debugging code: remove me when ok */
+ if (sk->users == 0) {
+ __label__ here;
+ sk->users = 1;
+ printk("trying to unlock unlocked socket at %p\n", &&here);
+here:
+ }
+#endif
+ if (!--sk->users)
+ __release_sock(sk);
+}
+
+
+extern void destroy_sock(struct sock *sk);
+extern unsigned short get_new_socknum(struct proto *,
+ unsigned short);
+extern void put_sock(unsigned short, struct sock *);
+extern struct sock *get_sock(struct proto *, unsigned short,
+ unsigned long, unsigned short,
+ unsigned long);
+extern struct sock *get_sock_mcast(struct sock *, unsigned short,
+ unsigned long, unsigned short,
+ unsigned long);
+extern struct sock *get_sock_raw(struct sock *, unsigned short,
+ unsigned long, unsigned long);
+
+extern struct sk_buff *sock_wmalloc(struct sock *sk,
+ unsigned long size, int force,
+ int priority);
+extern struct sk_buff *sock_rmalloc(struct sock *sk,
+ unsigned long size, int force,
+ int priority);
+extern void sock_wfree(struct sock *sk,
+ struct sk_buff *skb);
+extern void sock_rfree(struct sock *sk,
+ struct sk_buff *skb);
+extern unsigned long sock_rspace(struct sock *sk);
+extern unsigned long sock_wspace(struct sock *sk);
+
+extern int sock_setsockopt(struct sock *sk, int level,
+ int op, char *optval,
+ int optlen);
+
+extern int sock_getsockopt(struct sock *sk, int level,
+ int op, char *optval,
+ int *optlen);
+extern struct sk_buff *sock_alloc_send_skb(struct sock *skb,
+ unsigned long size,
+ unsigned long fallback,
+ int noblock,
+ int *errcode);
+
+/*
+ * Queue a received datagram if it will fit. Stream and sequenced
+ * protocols can't normally use this as they need to fit buffers in
+ * and play with them.
+ *
+ * Inlined as its very short and called for pretty much every
+ * packet ever received.
+ */
+
+extern __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ unsigned long flags;
+ if(sk->rmem_alloc + skb->truesize >= sk->rcvbuf)
+ return -ENOMEM;
+ save_flags(flags);
+ cli();
+ sk->rmem_alloc+=skb->truesize;
+ skb->sk=sk;
+ restore_flags(flags);
+ skb_queue_tail(&sk->receive_queue,skb);
+ if(!sk->dead)
+ sk->data_ready(sk,skb->len);
+ return 0;
+}
+
+/*
+ * Recover an error report and clear atomically
+ */
+
+extern __inline__ int sock_error(struct sock *sk)
+{
+ int err=xchg(&sk->err,0);
+ return -err;
+}
+
+/*
+ * Declarations from timer.c
+ */
+
+extern struct sock *timer_base;
+
+extern void delete_timer (struct sock *);
+extern void reset_timer (struct sock *, int, unsigned long);
+extern void net_timer (unsigned long);
+
+
+/*
+ * Enable debug/info messages
+ */
+
+#define NETDEBUG(x) x
+
+#endif /* _SOCK_H */
diff --git a/i386/i386at/gpl/linux/include/net/tcp.h b/i386/i386at/gpl/linux/include/net/tcp.h
new file mode 100644
index 00000000..3c7eb7de
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/tcp.h
@@ -0,0 +1,329 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the TCP module.
+ *
+ * Version: @(#)tcp.h 1.0.5 05/23/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _TCP_H
+#define _TCP_H
+
+#include <linux/tcp.h>
+#include <net/checksum.h>
+
+#define MAX_SYN_SIZE 44 + MAX_HEADER + 15
+#define MAX_FIN_SIZE 40 + MAX_HEADER + 15
+#define MAX_ACK_SIZE 40 + MAX_HEADER + 15
+#define MAX_RESET_SIZE 40 + MAX_HEADER + 15
+#define MAX_WINDOW 32767 /* Never offer a window over 32767 without using
+ window scaling (not yet supported). Some poor
+ stacks do signed 16bit maths! */
+#define MIN_WINDOW 2048
+#define MAX_ACK_BACKLOG 2
+#define MIN_WRITE_SPACE 2048
+#define TCP_WINDOW_DIFF 2048
+
+/* urg_data states */
+#define URG_VALID 0x0100
+#define URG_NOTYET 0x0200
+#define URG_READ 0x0400
+
+#define TCP_RETR1 7 /*
+ * This is how many retries it does before it
+ * tries to figure out if the gateway is
+ * down.
+ */
+
+#define TCP_RETR2 15 /*
+ * This should take at least
+ * 90 minutes to time out.
+ */
+
+#define TCP_TIMEOUT_LEN (15*60*HZ) /* should be about 15 mins */
+#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to successfully
+ * close the socket, about 60 seconds */
+#define TCP_FIN_TIMEOUT (3*60*HZ) /* BSD style FIN_WAIT2 deadlock breaker */
+#define TCP_ACK_TIME (3*HZ) /* time to delay before sending an ACK */
+#define TCP_DONE_TIME (5*HZ/2)/* maximum time to wait before actually
+ * destroying a socket */
+#define TCP_WRITE_TIME (30*HZ) /* initial time to wait for an ACK,
+ * after last transmit */
+#define TCP_TIMEOUT_INIT (3*HZ) /* RFC 1122 initial timeout value */
+#define TCP_SYN_RETRIES 10 /* number of times to retry opening a
+ * connection (TCP_RETR2-....) */
+#define TCP_PROBEWAIT_LEN (1*HZ)/* time to wait between probes when
+ * I've got something to write and
+ * there is no window */
+
+#define TCP_NO_CHECK 0 /* turn to one if you want the default
+ * to be no checksum */
+
+
+/*
+ * TCP option
+ */
+
+#define TCPOPT_NOP 1 /* Padding */
+#define TCPOPT_EOL 0 /* End of options */
+#define TCPOPT_MSS 2 /* Segment size negotiating */
+/*
+ * We don't use these yet, but they are for PAWS and big windows
+ */
+#define TCPOPT_WINDOW 3 /* Window scaling */
+#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
+
+
+/*
+ * The next routines deal with comparing 32 bit unsigned ints
+ * and worry about wraparound (automatic with unsigned arithmetic).
+ */
+
+extern __inline int before(__u32 seq1, __u32 seq2)
+{
+ return (__s32)(seq1-seq2) < 0;
+}
+
+extern __inline int after(__u32 seq1, __u32 seq2)
+{
+ return (__s32)(seq2-seq1) < 0;
+}
+
+
+/* is s2<=s1<=s3 ? */
+extern __inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
+{
+ return (after(seq1+1, seq2) && before(seq1, seq3+1));
+}
+
+static __inline__ int min(unsigned int a, unsigned int b)
+{
+ if (a < b)
+ return(a);
+ return(b);
+}
+
+extern struct proto tcp_prot;
+extern struct tcp_mib tcp_statistics;
+extern struct wait_queue *master_select_wakeup;
+
+extern void tcp_err(int type, int code, unsigned char *header, __u32 daddr,
+ __u32, struct inet_protocol *protocol);
+extern void tcp_shutdown (struct sock *sk, int how);
+extern int tcp_rcv(struct sk_buff *skb, struct device *dev,
+ struct options *opt, __u32 daddr,
+ unsigned short len, __u32 saddr, int redo,
+ struct inet_protocol *protocol);
+
+extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+
+extern void tcp_read_wakeup(struct sock *);
+extern void tcp_write_xmit(struct sock *);
+extern void tcp_time_wait(struct sock *);
+extern void tcp_retransmit(struct sock *, int);
+extern void tcp_do_retransmit(struct sock *, int);
+extern void tcp_send_check(struct tcphdr *th, unsigned long saddr,
+ unsigned long daddr, int len, struct sk_buff *skb);
+
+/* tcp_output.c */
+
+extern void tcp_send_probe0(struct sock *);
+extern void tcp_send_partial(struct sock *);
+extern void tcp_write_wakeup(struct sock *);
+extern void tcp_send_fin(struct sock *sk);
+extern void tcp_send_synack(struct sock *, struct sock *, struct sk_buff *);
+extern void tcp_send_skb(struct sock *, struct sk_buff *);
+extern void tcp_send_ack(u32, u32, struct sock *sk, struct tcphdr *th, u32);
+extern void tcp_send_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
+ struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl);
+
+extern void tcp_enqueue_partial(struct sk_buff *, struct sock *);
+extern struct sk_buff * tcp_dequeue_partial(struct sock *);
+
+/* tcp_input.c */
+extern void tcp_cache_zap(void);
+
+/* tcp_timer.c */
+#define tcp_reset_msl_timer(x,y,z) reset_timer(x,y,z)
+extern void tcp_reset_xmit_timer(struct sock *, int, unsigned long);
+extern void tcp_retransmit_timer(unsigned long);
+
+/*
+ * Default sequence number picking algorithm.
+ * As close as possible to RFC 793, which
+ * suggests using a 250kHz clock.
+ * Further reading shows this assumes 2MB/s networks.
+ * For 10MB/s ethernet, a 1MHz clock is appropriate.
+ * That's funny, Linux has one built in! Use it!
+ */
+
+static inline u32 tcp_init_seq(void)
+{
+ struct timeval tv;
+ do_gettimeofday(&tv);
+ return tv.tv_usec+tv.tv_sec*1000000;
+}
+
+/*
+ * This function returns the amount that we can raise the
+ * usable window based on the following constraints
+ *
+ * 1. The window can never be shrunk once it is offered (RFC 793)
+ * 2. We limit memory per socket
+ */
+
+static __inline__ unsigned short tcp_raise_window(struct sock *sk)
+{
+ long free_space = sock_rspace(sk);
+ long window;
+
+ if (free_space > 1024)
+ free_space &= ~0x3FF; /* make free space a multiple of 1024 */
+
+ if(sk->window_clamp)
+ free_space = min(sk->window_clamp, free_space);
+
+ /*
+ * compute the actual window i.e.
+ * old_window - received_bytes_on_that_win
+ */
+
+ window = sk->window - (sk->acked_seq - sk->lastwin_seq);
+
+ if (sk->mss == 0)
+ sk->mss = sk->mtu;
+
+ if ( window < 0 ) {
+ window = 0;
+ printk(KERN_DEBUG "TRW: win < 0 w=%d 1=%u 2=%u\n",
+ sk->window, sk->acked_seq, sk->lastwin_seq);
+ }
+
+ if ( (free_space - window) >= min(sk->mss, MAX_WINDOW/2) )
+ return ((free_space - window) / sk->mss) * sk->mss;
+
+ return 0;
+}
+
+static __inline__ unsigned short tcp_select_window(struct sock *sk)
+{
+ long free_space = sock_rspace(sk);
+ long window;
+
+ if (free_space > 1024)
+ free_space &= ~0x3FF; /* make free space a multiple of 1024 */
+
+ if (sk->window_clamp)
+ free_space = min(sk->window_clamp, free_space);
+
+ /*
+ * compute the actual window i.e.
+ * old_window - received_bytes_on_that_win
+ */
+
+ if (sk->mss == 0)
+ sk->mss = sk->mtu;
+
+ window = sk->window - (sk->acked_seq - sk->lastwin_seq);
+
+ if ( window < 0 ) {
+ window = 0;
+ printk(KERN_DEBUG "TSW: win < 0 w=%d 1=%u 2=%u\n",
+ sk->window, sk->acked_seq, sk->lastwin_seq);
+ }
+
+ /*
+ * RFC 1122:
+ * "the suggested [SWS] avoidance algoritm for the receiver is to keep
+ * RECV.NEXT + RCV.WIN fixed until:
+ * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
+ *
+ * i.e. don't raise the right edge of the window until you can't raise
+ * it MSS bytes
+ */
+
+ if ( (free_space - window) >= min(sk->mss, MAX_WINDOW/2) )
+ window += ((free_space - window) / sk->mss) * sk->mss;
+
+ sk->window = window;
+ sk->lastwin_seq = sk->acked_seq;
+
+ return sk->window;
+}
+
+/*
+ * List all states of a TCP socket that can be viewed as a "connected"
+ * state. This now includes TCP_SYN_RECV, although I am not yet fully
+ * convinced that this is the solution for the 'getpeername(2)'
+ * problem. Thanks to Stephen A. Wood <saw@cebaf.gov> -FvK
+ */
+
+extern __inline const int tcp_connected(const int state)
+{
+ return(state == TCP_ESTABLISHED || state == TCP_CLOSE_WAIT ||
+ state == TCP_FIN_WAIT1 || state == TCP_FIN_WAIT2 ||
+ state == TCP_SYN_RECV);
+}
+
+/*
+ * Calculate(/check) TCP checksum
+ */
+static __inline__ u16 tcp_check(struct tcphdr *th, int len,
+ unsigned long saddr, unsigned long daddr, unsigned long base)
+{
+ return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
+}
+
+#undef STATE_TRACE
+
+#ifdef STATE_TRACE
+static char *statename[]={
+ "Unused","Established","Syn Sent","Syn Recv",
+ "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
+ "Close Wait","Last ACK","Listen","Closing"
+};
+#endif
+
+static __inline__ void tcp_set_state(struct sock *sk, int state)
+{
+ int oldstate = sk->state;
+
+ sk->state = state;
+
+#ifdef STATE_TRACE
+ if(sk->debug)
+ printk("TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
+#endif
+
+ switch (state) {
+ case TCP_ESTABLISHED:
+ if (oldstate != TCP_ESTABLISHED) {
+ tcp_statistics.TcpCurrEstab++;
+ /* This is a hack but it doesn't occur often and it's going to
+ be a real to fix nicely */
+ if (oldstate == TCP_SYN_RECV)
+ wake_up_interruptible(&master_select_wakeup);
+ }
+ break;
+
+ case TCP_CLOSE:
+ tcp_cache_zap();
+ /* Should be about 2 rtt's */
+ reset_timer(sk, TIME_DONE, min(sk->rtt * 2, TCP_DONE_TIME));
+ /* fall through */
+ default:
+ if (oldstate==TCP_ESTABLISHED)
+ tcp_statistics.TcpCurrEstab--;
+ }
+}
+
+#endif /* _TCP_H */
diff --git a/i386/i386at/gpl/linux/include/net/udp.h b/i386/i386at/gpl/linux/include/net/udp.h
new file mode 100644
index 00000000..13735d17
--- /dev/null
+++ b/i386/i386at/gpl/linux/include/net/udp.h
@@ -0,0 +1,52 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the UDP module.
+ *
+ * Version: @(#)udp.h 1.0.2 05/07/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Fixes:
+ * Alan Cox : Turned on udp checksums. I don't want to
+ * chase 'memory corruption' bugs that aren't!
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _UDP_H
+#define _UDP_H
+
+#include <linux/udp.h>
+
+
+#define UDP_NO_CHECK 0
+
+
+extern struct proto udp_prot;
+
+
+extern void udp_err(int type, int code, unsigned char *header, __u32 daddr,
+ __u32 saddr, struct inet_protocol *protocol);
+extern void udp_send_check(struct udphdr *uh, __u32 saddr,
+ __u32 daddr, int len, struct sock *sk);
+extern int udp_recvfrom(struct sock *sk, unsigned char *to,
+ int len, int noblock, unsigned flags,
+ struct sockaddr_in *sin, int *addr_len);
+extern int udp_read(struct sock *sk, unsigned char *buff,
+ int len, int noblock, unsigned flags);
+extern int udp_connect(struct sock *sk,
+ struct sockaddr_in *usin, int addr_len);
+extern int udp_rcv(struct sk_buff *skb, struct device *dev,
+ struct options *opt, __u32 daddr,
+ unsigned short len, __u32 saddr, int redo,
+ struct inet_protocol *protocol);
+extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+extern void udp_cache_zap(void); /* Remove udp last socket cache */
+
+#endif /* _UDP_H */
diff --git a/i386/i386at/gpl/linux/linux_autoirq.c b/i386/i386at/gpl/linux/linux_autoirq.c
new file mode 100644
index 00000000..2e3d4e61
--- /dev/null
+++ b/i386/i386at/gpl/linux/linux_autoirq.c
@@ -0,0 +1,161 @@
+/*
+ * Linux auto-irq support.
+ * Copyright (C) 1995 Shantanu Goel.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Written 1994 by Donald Becker.
+ *
+ * The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ * Center of Excellence in Space Data and Information Sciences
+ * Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+ *
+ * This code is a general-purpose IRQ line detector for devices with
+ * jumpered IRQ lines. If you can make the device raise an IRQ (and
+ * that IRQ line isn't already being used), these routines will tell
+ * you what IRQ line it's using -- perfect for those oh-so-cool boot-time
+ * device probes!
+ *
+ * To use this, first call autoirq_setup(timeout). TIMEOUT is how many
+ * 'jiffies' (1/100 sec.) to detect other devices that have active IRQ lines,
+ * and can usually be zero at boot. 'autoirq_setup()' returns the bit
+ * vector of nominally-available IRQ lines (lines may be physically in-use,
+ * but not yet registered to a device).
+ * Next, set up your device to trigger an interrupt.
+ * Finally call autoirq_report(TIMEOUT) to find out which IRQ line was
+ * most recently active. The TIMEOUT should usually be zero, but may
+ * be set to the number of jiffies to wait for a slow device to raise an IRQ.
+ *
+ * The idea of using the setup timeout to filter out bogus IRQs came from
+ * the serial driver.
+ */
+
+#include <i386/pic.h>
+#include <i386/ipl.h>
+
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+
+#include <asm/bitops.h>
+#include <asm/system.h>
+
+/*
+ * IRQ to network device map.
+ */
+void *irq2dev_map[16];
+
+/*
+ * Set of fixed IRQs
+ * (fpu, rtc, com1, PIC slave cascade, keyboard, timer).
+ */
+int irqs_busy = 0x2147;
+
+static volatile int irq_number; /* latest irq found */
+static volatile int irq_bitmap; /* bitmap of IRQs found */
+static int irq_handled; /* irq lines we have a handler on */
+
+extern unsigned long loops_per_sec;
+
+/*
+ * Interrupt handler when probing an IRQ.
+ */
+static void
+autoirq_probe(irq)
+ int irq;
+{
+ /*
+ * Mark this IRQ as the last one
+ * that interrupted and disable it.
+ */
+ irq_number = irq;
+ set_bit(irq, (void *)&irq_bitmap);
+ disable_irq(irq);
+}
+
+/*
+ * Set up for auto-irq.
+ */
+int
+autoirq_setup(waittime)
+ int waittime;
+{
+ int i, mask;
+ int timeout = jiffies + waittime;
+ int boguscount = (waittime * loops_per_sec) / 100;
+
+ /*
+ * Allocate all possible IRQs.
+ */
+ irq_handled = 0;
+ for (i = 0; i < 16; i++) {
+ if (test_bit(i, (void *)&irqs_busy) == 0
+ && request_irq(i, autoirq_probe, 0, 0) == 0)
+ set_bit(i, (void *)&irq_handled);
+ }
+
+ irq_number = 0;
+ irq_bitmap = 0;
+
+ /*
+ * Hang out at least <waittime>
+ * jiffies waiting for bogus IRQ hits.
+ */
+ while (timeout > jiffies && --boguscount > 0)
+ ;
+
+ /*
+ * Free IRQs that caused bogus hits.
+ */
+ for (i = 0, mask = 0x01; i < 16; i++, mask <<= 1) {
+ if (irq_bitmap & irq_handled & mask) {
+ irq_handled &= ~mask;
+ free_irq(i);
+ }
+ }
+
+ return (irq_handled);
+}
+
+/*
+ * Return the last IRQ that caused an interrupt.
+ */
+int
+autoirq_report(waittime)
+ int waittime;
+{
+ int i;
+ int timeout = jiffies + waittime;
+ int boguscount = (waittime * loops_per_sec) / 100;
+
+ /*
+ * Hang out at least <waittime>
+ * jiffies waiting for the IRQ.
+ */
+ while (timeout > jiffies && --boguscount > 0)
+ if (irq_number)
+ break;
+
+ /*
+ * Retract the IRQ handlers that we handled.
+ */
+ for (i = 0; i < 16; i++) {
+ if (test_bit(i, (void *)&irq_handled))
+ free_irq(i);
+ }
+
+ return (irq_number);
+}
diff --git a/i386/i386at/gpl/linux/linux_block.c b/i386/i386at/gpl/linux/linux_block.c
new file mode 100644
index 00000000..e06cc403
--- /dev/null
+++ b/i386/i386at/gpl/linux/linux_block.c
@@ -0,0 +1,2579 @@
+/*
+ * Linux block driver support.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * linux/drivers/block/ll_rw_blk.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
+ */
+
+/*
+ * linux/fs/block_dev.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * linux/fs/buffer.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <sys/types.h>
+#include <mach/mach_types.h>
+#include <mach/kern_return.h>
+#include <mach/mig_errors.h>
+#include <mach/port.h>
+#include <mach/vm_param.h>
+#include <mach/notify.h>
+
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+
+#include <device/device_types.h>
+#include <device/device_port.h>
+#include <device/disk_status.h>
+#include "device_reply.h"
+
+#include <i386at/dev_hdr.h>
+#include <i386at/device_emul.h>
+#include <i386at/disk.h>
+
+#include <i386at/gpl/linux/linux_emul.h>
+
+#define MACH_INCLUDE
+#include <linux/fs.h>
+#include <linux/blk.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/major.h>
+#include <linux/kdev_t.h>
+#include <linux/delay.h>
+#include <linux/malloc.h>
+
+/* Location of VTOC in units for sectors (512 bytes). */
+#define PDLOCATION 29
+
+/* Linux kernel variables. */
+
+/* One of these exists for each
+ driver associated with a major number. */
+struct device_struct
+{
+ const char *name; /* device name */
+ struct file_operations *fops; /* operations vector */
+ int busy:1; /* driver is being opened/closed */
+ int want:1; /* someone wants to open/close driver */
+ struct gendisk *gd; /* DOS partition information */
+ int *default_slice; /* what slice to use when none is given */
+ struct disklabel **label; /* disklabels for each DOS partition */
+};
+
+/* An entry in the Mach name to Linux major number conversion table. */
+struct name_map
+{
+ const char *name; /* Mach name for device */
+ unsigned major; /* Linux major number */
+ unsigned unit; /* Linux unit number */
+ int read_only; /* 1 if device is read only */
+};
+
+/* Driver operation table. */
+static struct device_struct blkdevs[MAX_BLKDEV];
+
+/* Driver request function table. */
+struct blk_dev_struct blk_dev[MAX_BLKDEV] =
+{
+ { NULL, NULL }, /* 0 no_dev */
+ { NULL, NULL }, /* 1 dev mem */
+ { NULL, NULL }, /* 2 dev fd */
+ { NULL, NULL }, /* 3 dev ide0 or hd */
+ { NULL, NULL }, /* 4 dev ttyx */
+ { NULL, NULL }, /* 5 dev tty */
+ { NULL, NULL }, /* 6 dev lp */
+ { NULL, NULL }, /* 7 dev pipes */
+ { NULL, NULL }, /* 8 dev sd */
+ { NULL, NULL }, /* 9 dev st */
+ { NULL, NULL }, /* 10 */
+ { NULL, NULL }, /* 11 */
+ { NULL, NULL }, /* 12 */
+ { NULL, NULL }, /* 13 */
+ { NULL, NULL }, /* 14 */
+ { NULL, NULL }, /* 15 */
+ { NULL, NULL }, /* 16 */
+ { NULL, NULL }, /* 17 */
+ { NULL, NULL }, /* 18 */
+ { NULL, NULL }, /* 19 */
+ { NULL, NULL }, /* 20 */
+ { NULL, NULL }, /* 21 */
+ { NULL, NULL } /* 22 dev ide1 */
+};
+
+/*
+ * blk_size contains the size of all block-devices in units of 1024 byte
+ * sectors:
+ *
+ * blk_size[MAJOR][MINOR]
+ *
+ * if (!blk_size[MAJOR]) then no minor size checking is done.
+ */
+int *blk_size[MAX_BLKDEV] = { NULL, NULL, };
+
+/*
+ * blksize_size contains the size of all block-devices:
+ *
+ * blksize_size[MAJOR][MINOR]
+ *
+ * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
+ */
+int *blksize_size[MAX_BLKDEV] = { NULL, NULL, };
+
+/*
+ * hardsect_size contains the size of the hardware sector of a device.
+ *
+ * hardsect_size[MAJOR][MINOR]
+ *
+ * if (!hardsect_size[MAJOR])
+ * then 512 bytes is assumed.
+ * else
+ * sector_size is hardsect_size[MAJOR][MINOR]
+ * This is currently set by some scsi device and read by the msdos fs driver
+ * This might be a some uses later.
+ */
+int *hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
+
+/* This specifies how many sectors to read ahead on the disk.
+ This is unused in Mach. It is here to make drivers compile. */
+int read_ahead[MAX_BLKDEV] = {0, };
+
+/* Use to wait on when there are no free requests.
+ This is unused in Mach. It is here to make drivers compile. */
+struct wait_queue *wait_for_request = NULL;
+
+/* Initialize block drivers. */
+void
+blk_dev_init ()
+{
+#ifdef CONFIG_BLK_DEV_IDE
+ ide_init ();
+#endif
+#ifdef CONFIG_BLK_DEV_FD
+ floppy_init ();
+#endif
+}
+
+/* Return 1 if major number MAJOR corresponds to a disk device. */
+static inline int
+disk_major (int major)
+{
+ return (major == IDE0_MAJOR
+ || major == IDE1_MAJOR
+ || major == IDE2_MAJOR
+ || major == IDE3_MAJOR
+ || major == SCSI_DISK_MAJOR);
+}
+
+/* Linux kernel block support routines. */
+
+/* Register a driver for major number MAJOR,
+ with name NAME, and operations vector FOPS. */
+int
+register_blkdev (unsigned major, const char *name,
+ struct file_operations *fops)
+{
+ int err = 0;
+
+ if (major == 0)
+ {
+ for (major = MAX_BLKDEV - 1; major > 0; major--)
+ if (blkdevs[major].fops == NULL)
+ goto out;
+ return -LINUX_EBUSY;
+ }
+ if (major >= MAX_BLKDEV)
+ return -LINUX_EINVAL;
+ if (blkdevs[major].fops && blkdevs[major].fops != fops)
+ return -LINUX_EBUSY;
+
+out:
+ blkdevs[major].name = name;
+ blkdevs[major].fops = fops;
+ blkdevs[major].busy = 0;
+ blkdevs[major].want = 0;
+ blkdevs[major].gd = NULL;
+ blkdevs[major].default_slice = NULL;
+ blkdevs[major].label = NULL;
+ return 0;
+}
+
+/* Unregister the driver associated with
+ major number MAJOR and having the name NAME. */
+int
+unregister_blkdev (unsigned major, const char *name)
+{
+ int err;
+
+ if (major >= MAX_BLKDEV)
+ return -LINUX_EINVAL;
+ if (! blkdevs[major].fops || strcmp (blkdevs[major].name, name))
+ return -LINUX_EINVAL;
+ blkdevs[major].fops = NULL;
+ if (blkdevs[major].default_slice)
+ {
+ assert (blkdevs[major].gd);
+ kfree ((vm_offset_t) blkdevs[major].default_slice,
+ sizeof (int) * blkdevs[major].gd->max_nr);
+ }
+ if (blkdevs[major].label)
+ {
+ assert (blkdevs[major].gd);
+ kfree ((vm_offset_t) blkdevs[major].label,
+ (sizeof (struct disklabel *)
+ * blkdevs[major].gd->max_p * blkdevs[major].gd->max_nr));
+ }
+ return 0;
+}
+
+/* One of these is associated with
+ each page allocated by the buffer management routines. */
+struct pagehdr
+{
+ unsigned char busy; /* page header is in use */
+ unsigned char avail; /* number of blocks available in page */
+ unsigned short bitmap; /* free space bitmap */
+ void *blks; /* the actual page */
+ struct pagehdr *next; /* next header in list */
+};
+
+/* This structure describes the different block sizes. */
+struct bufsize
+{
+ unsigned short size; /* size of block */
+ unsigned short avail; /* # available blocks */
+ struct pagehdr *pages; /* page list */
+};
+
+/* List of supported block sizes. */
+static struct bufsize bufsizes[] =
+{
+ { 512, 0, NULL },
+ { 1024, 0, NULL },
+ { 2048, 0, NULL },
+ { 4096, 0, NULL },
+};
+
+/* Page headers. */
+static struct pagehdr pagehdrs[50]; /* XXX: needs to be dynamic */
+
+/* Find the block size that is greater than or equal to SIZE. */
+static struct bufsize *
+get_bufsize (int size)
+{
+ struct bufsize *bs, *ebs;
+
+ bs = &bufsizes[0];
+ ebs = &bufsizes[sizeof (bufsizes) / sizeof (bufsizes[0])];
+ while (bs < ebs)
+ {
+ if (bs->size >= size)
+ return bs;
+ bs++;
+ }
+
+ panic ("%s:%d: alloc_buffer: bad buffer size %d", __FILE__, __LINE__, size);
+}
+
+/* Free all pages that are not in use.
+ Called by __get_free_pages when pages are running low. */
+void
+collect_buffer_pages ()
+{
+ struct bufsize *bs, *ebs;
+ struct pagehdr *ph, **prev_ph;
+
+ bs = &bufsizes[0];
+ ebs = &bufsizes[sizeof (bufsizes) / sizeof (bufsizes[0])];
+ while (bs < ebs)
+ {
+ if (bs->avail >= PAGE_SIZE / bs->size)
+ {
+ ph = bs->pages;
+ prev_ph = &bs->pages;
+ while (ph)
+ if (ph->avail == PAGE_SIZE / bs->size)
+ {
+ bs->avail -= ph->avail;
+ ph->busy = 0;
+ *prev_ph = ph->next;
+ free_pages ((unsigned long) ph->blks, 0);
+ ph = *prev_ph;
+ }
+ else
+ {
+ prev_ph = &ph->next;
+ ph = ph->next;
+ }
+ }
+ bs++;
+ }
+}
+
+/* Allocate a buffer of at least SIZE bytes. */
+static void *
+alloc_buffer (int size)
+{
+ int i;
+ unsigned flags;
+ struct bufsize *bs;
+ struct pagehdr *ph, *eph;
+
+ bs = get_bufsize (size);
+ save_flags (flags);
+ cli ();
+ if (bs->avail == 0)
+ {
+ ph = &pagehdrs[0];
+ eph = &pagehdrs[sizeof (pagehdrs) / sizeof (pagehdrs[0])];
+ while (ph < eph && ph->busy)
+ ph++;
+ if (ph == eph)
+ {
+ restore_flags (flags);
+ printf ("%s:%d: alloc_buffer: ran out of page headers\n",
+ __FILE__, __LINE__);
+ return NULL;
+ }
+ ph->blks = (void *) __get_free_pages (GFP_KERNEL, 0, ~0UL);
+ if (! ph->blks)
+ {
+ restore_flags (flags);
+ return NULL;
+ }
+ ph->busy = 1;
+ ph->avail = PAGE_SIZE / bs->size;
+ ph->bitmap = 0;
+ ph->next = bs->pages;
+ bs->pages = ph;
+ bs->avail += ph->avail;
+ }
+ for (ph = bs->pages; ph; ph = ph->next)
+ if (ph->avail)
+ for (i = 0; i < PAGE_SIZE / bs->size; i++)
+ if ((ph->bitmap & (1 << i)) == 0)
+ {
+ bs->avail--;
+ ph->avail--;
+ ph->bitmap |= 1 << i;
+ restore_flags (flags);
+ return ph->blks + i * bs->size;
+ }
+
+ panic ("%s:%d: alloc_buffer: list destroyed", __FILE__, __LINE__);
+}
+
+/* Free buffer P of SIZE bytes previously allocated by alloc_buffer. */
+static void
+free_buffer (void *p, int size)
+{
+ int i;
+ unsigned flags;
+ struct bufsize *bs;
+ struct pagehdr *ph;
+
+ bs = get_bufsize (size);
+ save_flags (flags);
+ cli ();
+ for (ph = bs->pages; ph; ph = ph->next)
+ if (p >= ph->blks && p < ph->blks + PAGE_SIZE)
+ break;
+ assert (ph);
+ i = (int) (p - ph->blks) / bs->size;
+ assert (ph->bitmap & (1 << i));
+ ph->bitmap &= ~(1 << i);
+ ph->avail++;
+ bs->avail++;
+ restore_flags (flags);
+}
+
+/* Allocate a buffer of SIZE bytes and
+ associate it with block number BLOCK of device DEV. */
+struct buffer_head *
+getblk (kdev_t dev, int block, int size)
+{
+ struct buffer_head *bh;
+
+ assert (size <= PAGE_SIZE);
+
+ bh = linux_kmalloc (sizeof (struct buffer_head), GFP_KERNEL);
+ if (! bh)
+ return NULL;
+ bh->b_data = alloc_buffer (size);
+ if (! bh->b_data)
+ {
+ linux_kfree (bh);
+ return NULL;
+ }
+ bh->b_dev = dev;
+ bh->b_size = size;
+ bh->b_state = 1 << BH_Lock;
+ bh->b_blocknr = block;
+ bh->b_page_list = NULL;
+ bh->b_request = NULL;
+ bh->b_reqnext = NULL;
+ bh->b_wait = NULL;
+ bh->b_sem = NULL;
+ return bh;
+}
+
+/* Release buffer BH previously allocated by getblk. */
+void
+__brelse (struct buffer_head *bh)
+{
+ if (bh->b_request)
+ linux_kfree (bh->b_request);
+ free_buffer (bh->b_data, bh->b_size);
+ linux_kfree (bh);
+}
+
+/* Check for I/O errors upon completion of I/O operation RW
+ on the buffer list BH. The number of buffers is NBUF.
+ Copy any data from bounce buffers and free them. */
+static int
+check_for_error (int rw, int nbuf, struct buffer_head **bh)
+{
+ int err;
+ struct request *req;
+
+ req = bh[0]->b_request;
+ if (! req)
+ {
+ while (--nbuf >= 0)
+ if (bh[nbuf]->b_page_list)
+ {
+ bh[nbuf]->b_page_list = NULL;
+ free_buffer (bh[nbuf]->b_data, bh[nbuf]->b_size);
+ }
+ return -LINUX_ENOMEM;
+ }
+
+ bh[0]->b_request = NULL;
+ err = 0;
+
+ while (--nbuf >= 0)
+ {
+ struct buffer_head *bhp = bh[nbuf];
+
+ if (bhp->b_page_list)
+ {
+ if (rw == READ && buffer_uptodate (bhp))
+ {
+ int amt;
+ vm_page_t *pages = bhp->b_page_list;
+
+ amt = PAGE_SIZE - bhp->b_off;
+ if (amt > bhp->b_usrcnt)
+ amt = bhp->b_usrcnt;
+ memcpy ((void *) pages[bhp->b_index]->phys_addr + bhp->b_off,
+ bhp->b_data, amt);
+ if (amt < bhp->b_usrcnt)
+ memcpy ((void *) pages[bhp->b_index + 1]->phys_addr,
+ bhp->b_data + amt, bhp->b_usrcnt - amt);
+ }
+ bhp->b_page_list = NULL;
+ free_buffer (bhp->b_data, bhp->b_size);
+ }
+ if (! buffer_uptodate (bhp))
+ err = -LINUX_EIO;
+ }
+
+ linux_kfree (req);
+ return err;
+}
+
+/* Allocate a buffer of SIZE bytes and fill it with data
+ from device DEV starting at block number BLOCK. */
+struct buffer_head *
+bread (kdev_t dev, int block, int size)
+{
+ int err;
+ struct buffer_head *bh;
+
+ bh = getblk (dev, block, size);
+ if (! bh)
+ return NULL;
+ ll_rw_block (READ, 1, &bh);
+ wait_on_buffer (bh);
+ err = check_for_error (READ, 1, &bh);
+ if (err)
+ {
+ __brelse (bh);
+ return NULL;
+ }
+ return bh;
+}
+
+/* Return the block size for device DEV in *BSIZE and
+ log2(block size) in *BSHIFT. */
+static inline void
+get_block_size (kdev_t dev, int *bsize, int *bshift)
+{
+ int i, size, shift;
+
+ size = BLOCK_SIZE;
+ if (blksize_size[MAJOR (dev)]
+ && blksize_size[MAJOR (dev)][MINOR (dev)])
+ size = blksize_size[MAJOR (dev)][MINOR (dev)];
+ for (i = size, shift = 0; i != 1; shift++, i >>= 1)
+ ;
+ *bsize = size;
+ *bshift = shift;
+}
+
+/* Enqueue request REQ on a driver's queue. */
+static inline void
+enqueue_request (struct request *req)
+{
+ struct request *tmp;
+ struct blk_dev_struct *dev;
+
+ dev = blk_dev + MAJOR (req->rq_dev);
+ cli ();
+ tmp = dev->current_request;
+ if (! tmp)
+ {
+ dev->current_request = req;
+ (*dev->request_fn) ();
+ sti ();
+ return;
+ }
+ while (tmp->next)
+ {
+ if ((IN_ORDER (tmp, req) || ! IN_ORDER (tmp, tmp->next))
+ && IN_ORDER (req, tmp->next))
+ break;
+ tmp = tmp->next;
+ }
+ req->next = tmp->next;
+ tmp->next = req;
+ if (scsi_major (MAJOR (req->rq_dev)))
+ (*dev->request_fn) ();
+ sti ();
+}
+
+/* Perform the I/O operation RW on the buffer list BH
+ containing NR buffers. */
+void
+ll_rw_block (int rw, int nr, struct buffer_head **bh)
+{
+ int i, bsize, bshift;
+ unsigned major;
+ struct request *r;
+
+ r = (struct request *) linux_kmalloc (sizeof (struct request), GFP_KERNEL);
+ if (! r)
+ {
+ bh[0]->b_request = NULL;
+ return;
+ }
+ bh[0]->b_request = r;
+
+ major = MAJOR (bh[0]->b_dev);
+ assert (major < MAX_BLKDEV);
+
+ get_block_size (bh[0]->b_dev, &bsize, &bshift);
+ assert (bsize <= PAGE_SIZE);
+
+ for (i = 0, r->nr_sectors = 0; i < nr - 1; i++)
+ {
+ r->nr_sectors += bh[i]->b_size >> 9;
+ bh[i]->b_reqnext = bh[i + 1];
+ }
+ r->nr_sectors += bh[i]->b_size >> 9;
+ bh[i]->b_reqnext = NULL;
+
+ r->rq_status = RQ_ACTIVE;
+ r->rq_dev = bh[0]->b_dev;
+ r->cmd = rw;
+ r->errors = 0;
+ r->sector = bh[0]->b_blocknr << (bshift - 9);
+ r->current_nr_sectors = bh[0]->b_size >> 9;
+ r->buffer = bh[0]->b_data;
+ r->sem = bh[0]->b_sem;
+ r->bh = bh[0];
+ r->bhtail = bh[nr - 1];
+ r->next = NULL;
+
+ enqueue_request (r);
+}
+
+/* Maximum amount of data to write per invocation of the driver. */
+#define WRITE_MAXPHYS (VM_MAP_COPY_PAGE_LIST_MAX << PAGE_SHIFT)
+#define WRITE_MAXPHYSPG (WRITE_MAXPHYS >> PAGE_SHIFT)
+
+int linux_block_write_trace = 0;
+
+/* Write COUNT bytes of data from user buffer BUF
+ to device specified by INODE at location specified by FILP. */
+int
+block_write (struct inode *inode, struct file *filp,
+ const char *buf, int count)
+{
+ char *p;
+ int i, bsize, bmask, bshift;
+ int err = 0, have_page_list = 1;
+ int resid = count, unaligned;
+ int page_index, pages, amt, cnt, nbuf;
+ unsigned blk;
+ vm_map_copy_t copy;
+ struct request req;
+ struct semaphore sem;
+ struct name_map *np = filp->f_np;
+ struct buffer_head *bh, *bhp, **bhlist;
+
+ /* Compute device block size. */
+ get_block_size (inode->i_rdev, &bsize, &bshift);
+ assert (bsize <= PAGE_SIZE);
+ bmask = bsize - 1;
+
+ copy = (vm_map_copy_t) buf;
+ assert (copy);
+ assert (copy->type == VM_MAP_COPY_PAGE_LIST);
+
+ p = (char *) copy->offset;
+ pages = copy->cpy_npages;
+ blk = (filp->f_pos + bmask) >> bshift;
+
+ if (linux_block_write_trace)
+ printf ("block_write: at %d: f_pos 0x%x, count %d, blk 0x%x, p 0x%x\n",
+ __LINE__, (unsigned) filp->f_pos, count, blk, p);
+
+ /* Allocate buffer headers. */
+ nbuf = ((round_page ((vm_offset_t) p + resid) - trunc_page ((vm_offset_t) p))
+ >> PAGE_SHIFT);
+ if (nbuf > WRITE_MAXPHYSPG)
+ nbuf = WRITE_MAXPHYSPG;
+ if ((filp->f_pos & bmask) || ((int) p & PAGE_MASK))
+ nbuf *= 2;
+ bh = (struct buffer_head *) kalloc ((sizeof (*bh) + sizeof (*bhlist))
+ * nbuf);
+ if (! bh)
+ {
+ err = -LINUX_ENOMEM;
+ goto out;
+ }
+ bhlist = (struct buffer_head **) (bh + nbuf);
+
+ /* Write any partial block. */
+ if (filp->f_pos & bmask)
+ {
+ char *b, *q;
+ int use_req;
+
+ use_req = (disk_major (MAJOR (inode->i_rdev)) && ! np->read_only);
+
+ amt = bsize - (filp->f_pos & bmask);
+ if (amt > resid)
+ amt = resid;
+
+ if (linux_block_write_trace)
+ printf ("block_write: at %d: amt %d, resid %d\n",
+ __LINE__, amt, resid);
+
+ if (use_req)
+ {
+ i = (amt + 511) & ~511;
+ req.buffer = b = alloc_buffer (i);
+ if (! b)
+ {
+ printf ("%s:%d: block_write: ran out of buffers\n",
+ __FILE__, __LINE__);
+ err = -LINUX_ENOMEM;
+ goto out;
+ }
+ req.sector = filp->f_pos >> 9;
+ req.nr_sectors = i >> 9;
+ req.current_nr_sectors = i >> 9;
+ req.rq_status = RQ_ACTIVE;
+ req.rq_dev = inode->i_rdev;
+ req.cmd = READ;
+ req.errors = 0;
+ req.sem = &sem;
+ req.bh = NULL;
+ req.bhtail = NULL;
+ req.next = NULL;
+
+ sem.count = 0;
+ sem.wait = NULL;
+
+ enqueue_request (&req);
+ __down (&sem);
+
+ if (req.errors)
+ {
+ free_buffer (b, i);
+ err = -LINUX_EIO;
+ goto out;
+ }
+ q = b + (filp->f_pos & 511);
+ }
+ else
+ {
+ i = bsize;
+ bhp = bh;
+ bhp->b_data = b = alloc_buffer (i);
+ if (! b)
+ {
+ err = -LINUX_ENOMEM;
+ goto out;
+ }
+ bhp->b_blocknr = filp->f_pos >> bshift;
+ bhp->b_dev = inode->i_rdev;
+ bhp->b_size = bsize;
+ bhp->b_state = 1 << BH_Lock;
+ bhp->b_page_list = NULL;
+ bhp->b_request = NULL;
+ bhp->b_reqnext = NULL;
+ bhp->b_wait = NULL;
+ bhp->b_sem = NULL;
+
+ ll_rw_block (READ, 1, &bhp);
+ wait_on_buffer (bhp);
+ err = check_for_error (READ, 1, &bhp);
+ if (err)
+ {
+ free_buffer (b, i);
+ goto out;
+ }
+ q = b + (filp->f_pos & bmask);
+ }
+
+ cnt = PAGE_SIZE - ((int) p & PAGE_MASK);
+ if (cnt > amt)
+ cnt = amt;
+ memcpy (q, ((void *) copy->cpy_page_list[0]->phys_addr
+ + ((int) p & PAGE_MASK)),
+ cnt);
+ if (cnt < amt)
+ {
+ assert (copy->cpy_npages >= 2);
+ memcpy (q + cnt,
+ (void *) copy->cpy_page_list[1]->phys_addr, amt - cnt);
+ }
+ else
+ assert (copy->cpy_npages >= 1);
+
+ if (use_req)
+ {
+ req.buffer = b;
+ req.sector = filp->f_pos >> 9;
+ req.nr_sectors = i >> 9;
+ req.current_nr_sectors = i >> 9;
+ req.rq_status = RQ_ACTIVE;
+ req.rq_dev = inode->i_rdev;
+ req.cmd = WRITE;
+ req.errors = 0;
+ req.sem = &sem;
+ req.bh = NULL;
+ req.bhtail = NULL;
+ req.next = NULL;
+
+ sem.count = 0;
+ sem.wait = NULL;
+
+ enqueue_request (&req);
+ __down (&sem);
+
+ if (req.errors)
+ err = -LINUX_EIO;
+ }
+ else
+ {
+ bhp->b_state = (1 << BH_Dirty) | (1 << BH_Lock);
+ ll_rw_block (WRITE, 1, &bhp);
+ err = check_for_error (WRITE, 1, &bhp);
+ }
+ free_buffer (b, i);
+ if (err)
+ {
+ if (linux_block_write_trace)
+ printf ("block_write: at %d\n", __LINE__);
+
+ goto out;
+ }
+ resid -= amt;
+ if (resid == 0)
+ goto out;
+ p += amt;
+ }
+
+ unaligned = (int) p & 511;
+
+ /* Write full blocks. */
+ while (resid > bsize)
+ {
+ assert (have_page_list == 1);
+
+ /* Construct buffer list. */
+ for (i = 0, bhp = bh; resid > bsize && i < nbuf; i++, bhp++)
+ {
+ page_index = ((trunc_page ((vm_offset_t) p)
+ - trunc_page (copy->offset))
+ >> PAGE_SHIFT);
+
+ if (page_index == pages)
+ break;
+
+ bhlist[i] = bhp;
+ bhp->b_dev = inode->i_rdev;
+ bhp->b_state = (1 << BH_Dirty) | (1 << BH_Lock);
+ bhp->b_blocknr = blk;
+ bhp->b_wait = NULL;
+ bhp->b_page_list = NULL;
+ bhp->b_sem = &sem;
+
+ cnt = PAGE_SIZE - ((int) p & PAGE_MASK);
+ if (! unaligned && cnt >= bsize)
+ {
+ if (cnt > resid)
+ cnt = resid;
+ bhp->b_size = cnt & ~bmask;
+ bhp->b_data = (((char *)
+ copy->cpy_page_list[page_index]->phys_addr)
+ + ((int) p & PAGE_MASK));
+ }
+ else
+ {
+ if (cnt < bsize)
+ {
+ if (page_index == pages - 1)
+ break;
+ bhp->b_size = bsize;
+ }
+ else
+ {
+ bhp->b_size = cnt;
+ if (bhp->b_size > resid)
+ bhp->b_size = resid;
+ bhp->b_size &= ~bmask;
+ }
+ bhp->b_data = alloc_buffer (bhp->b_size);
+ if (! bhp->b_data)
+ {
+ printf ("%s:%d: block_write: ran out of buffers\n",
+ __FILE__, __LINE__);
+ while (--i >= 0)
+ if (bhlist[i]->b_page_list)
+ free_buffer (bhlist[i]->b_data, bhlist[i]->b_size);
+ err = -LINUX_ENOMEM;
+ goto out;
+ }
+ bhp->b_page_list = (void *) 1;
+ if (cnt > bhp->b_size)
+ cnt = bhp->b_size;
+ memcpy (bhp->b_data,
+ ((void *) copy->cpy_page_list[page_index]->phys_addr
+ + ((int) p & PAGE_MASK)),
+ cnt);
+ if (cnt < bhp->b_size)
+ memcpy (bhp->b_data + cnt,
+ ((void *)
+ copy->cpy_page_list[page_index + 1]->phys_addr),
+ bhp->b_size - cnt);
+ }
+
+ p += bhp->b_size;
+ resid -= bhp->b_size;
+ blk += bhp->b_size >> bshift;
+ }
+
+ assert (i > 0);
+
+ sem.count = 0;
+ sem.wait = NULL;
+
+ /* Do the write. */
+ ll_rw_block (WRITE, i, bhlist);
+ __down (&sem);
+ err = check_for_error (WRITE, i, bhlist);
+ if (err || resid == 0)
+ goto out;
+
+ /* Discard current page list. */
+ vm_map_copy_discard (copy);
+ have_page_list = 0;
+
+ /* Compute # pages to wire down. */
+ pages = ((round_page ((vm_offset_t) p + resid)
+ - trunc_page ((vm_offset_t) p))
+ >> PAGE_SHIFT);
+ if (pages > WRITE_MAXPHYSPG)
+ pages = WRITE_MAXPHYSPG;
+
+ /* Wire down user pages and get page list. */
+ err = vm_map_copyin_page_list (current_map (),
+ trunc_page ((vm_offset_t) p),
+ pages << PAGE_SHIFT, FALSE,
+ FALSE, &copy, FALSE);
+ if (err)
+ {
+ if (err == KERN_INVALID_ADDRESS || err == KERN_PROTECTION_FAILURE)
+ err = -LINUX_EINVAL;
+ else
+ err = -LINUX_ENOMEM;
+ goto out;
+ }
+
+ assert (pages == copy->cpy_npages);
+ assert (! vm_map_copy_has_cont (copy));
+
+ have_page_list = 1;
+ }
+
+ /* Write any partial count. */
+ if (resid > 0)
+ {
+ char *b;
+ int use_req;
+
+ assert (have_page_list);
+ assert (pages >= 1);
+
+ use_req = (disk_major (MAJOR (inode->i_rdev)) && ! np->read_only);
+
+ if (linux_block_write_trace)
+ printf ("block_write: at %d: resid %d\n", __LINE__, resid);
+
+ if (use_req)
+ {
+ i = (resid + 511) & ~511;
+ req.buffer = b = alloc_buffer (i);
+ if (! b)
+ {
+ printf ("%s:%d: block_write: ran out of buffers\n",
+ __FILE__, __LINE__);
+ err = -LINUX_ENOMEM;
+ goto out;
+ }
+ req.sector = blk << (bshift - 9);
+ req.nr_sectors = i >> 9;
+ req.current_nr_sectors = i >> 9;
+ req.rq_status = RQ_ACTIVE;
+ req.rq_dev = inode->i_rdev;
+ req.cmd = READ;
+ req.errors = 0;
+ req.sem = &sem;
+ req.bh = NULL;
+ req.bhtail = NULL;
+ req.next = NULL;
+
+ sem.count = 0;
+ sem.wait = NULL;
+
+ enqueue_request (&req);
+ __down (&sem);
+
+ if (req.errors)
+ {
+ free_buffer (b, i);
+ err = -LINUX_EIO;
+ goto out;
+ }
+ }
+ else
+ {
+ i = bsize;
+ bhp = bh;
+ bhp->b_data = b = alloc_buffer (i);
+ if (! b)
+ {
+ err = -LINUX_ENOMEM;
+ goto out;
+ }
+ bhp->b_blocknr = blk;
+ bhp->b_dev = inode->i_rdev;
+ bhp->b_size = bsize;
+ bhp->b_state = 1 << BH_Lock;
+ bhp->b_page_list = NULL;
+ bhp->b_request = NULL;
+ bhp->b_reqnext = NULL;
+ bhp->b_wait = NULL;
+ bhp->b_sem = NULL;
+
+ ll_rw_block (READ, 1, &bhp);
+ wait_on_buffer (bhp);
+ err = check_for_error (READ, 1, &bhp);
+ if (err)
+ {
+ free_buffer (b, i);
+ goto out;
+ }
+ }
+
+ page_index = ((trunc_page ((vm_offset_t) p) - trunc_page (copy->offset))
+ >> PAGE_SHIFT);
+ cnt = PAGE_SIZE - ((int) p & PAGE_MASK);
+ if (cnt > resid)
+ cnt = resid;
+ memcpy (b, ((void *) copy->cpy_page_list[page_index]->phys_addr
+ + ((int) p & PAGE_MASK)),
+ cnt);
+ if (cnt < resid)
+ {
+ assert (copy->cpy_npages >= 2);
+ memcpy (b + cnt,
+ (void *) copy->cpy_page_list[page_index + 1]->phys_addr,
+ resid - cnt);
+ }
+ else
+ assert (copy->cpy_npages >= 1);
+
+ if (use_req)
+ {
+ req.buffer = b;
+ req.sector = blk << (bshift - 9);
+ req.nr_sectors = i >> 9;
+ req.current_nr_sectors = i >> 9;
+ req.rq_status = RQ_ACTIVE;
+ req.rq_dev = inode->i_rdev;
+ req.cmd = WRITE;
+ req.errors = 0;
+ req.sem = &sem;
+ req.bh = NULL;
+ req.bhtail = NULL;
+ req.next = NULL;
+
+ sem.count = 0;
+ sem.wait = NULL;
+
+ enqueue_request (&req);
+ __down (&sem);
+
+ if (req.errors)
+ err = -LINUX_EIO;
+ }
+ else
+ {
+ bhp->b_state = (1 << BH_Dirty) | (1 << BH_Lock);
+ ll_rw_block (WRITE, 1, &bhp);
+ err = check_for_error (WRITE, 1, &bhp);
+ }
+ free_buffer (b, i);
+ if (! err)
+ resid = 0;
+ }
+
+out:
+ if (have_page_list)
+ vm_map_copy_discard (copy);
+ if (bh)
+ kfree ((vm_offset_t) bh,
+ (sizeof (*bh) + sizeof (*bhlist)) * nbuf);
+ filp->f_resid = resid;
+ return err;
+}
+
+int linux_block_read_trace = 0;
+#define LINUX_BLOCK_READ_TRACE (linux_block_read_trace == -1 \
+ || linux_block_read_trace == inode->i_rdev)
+
+/* Maximum amount of data to read per driver invocation. */
+#define READ_MAXPHYS (64*1024)
+#define READ_MAXPHYSPG (READ_MAXPHYS >> PAGE_SHIFT)
+
+/* Read COUNT bytes of data into user buffer BUF
+ from device specified by INODE from location specified by FILP. */
+int
+block_read (struct inode *inode, struct file *filp, char *buf, int count)
+{
+ int err = 0, resid = count;
+ int i, bsize, bmask, bshift;
+ int pages, amt, unaligned;
+ int page_index, nbuf;
+ int have_page_list = 0;
+ unsigned blk;
+ vm_offset_t off, wire_offset, offset;
+ vm_object_t object;
+ vm_page_t *page_list;
+ struct request req;
+ struct semaphore sem;
+ struct name_map *np = filp->f_np;
+ struct buffer_head *bh, *bhp, **bhlist;
+
+ /* Get device block size. */
+ get_block_size (inode->i_rdev, &bsize, &bshift);
+ assert (bsize <= PAGE_SIZE);
+ bmask = bsize - 1;
+
+ off = 0;
+ blk = (filp->f_pos + bmask) >> bshift;
+
+ /* Allocate buffer headers. */
+ nbuf = round_page (count) >> PAGE_SHIFT;
+ if (nbuf > READ_MAXPHYSPG)
+ nbuf = READ_MAXPHYSPG;
+ if (filp->f_pos & bmask)
+ nbuf *= 2;
+ bh = (struct buffer_head *) kalloc ((sizeof (*bh) + sizeof (*bhlist)) * nbuf
+ + sizeof (*page_list) * READ_MAXPHYSPG);
+ if (! bh)
+ return -LINUX_ENOMEM;
+ bhlist = (struct buffer_head **) (bh + nbuf);
+ page_list = (vm_page_t *) (bhlist + nbuf);
+
+ /* Allocate an object to hold the data. */
+ object = vm_object_allocate (round_page (count));
+ if (! object)
+ {
+ err = -LINUX_ENOMEM;
+ goto out;
+ }
+
+ /* Compute number of pages to be wired at a time. */
+ pages = round_page (count) >> PAGE_SHIFT;
+ if (pages > READ_MAXPHYSPG)
+ pages = READ_MAXPHYSPG;
+
+ /* Allocate and wire down pages in the object. */
+ for (i = 0, wire_offset = offset = 0; i < pages; i++, offset += PAGE_SIZE)
+ {
+ while (1)
+ {
+ page_list[i] = vm_page_grab ();
+ if (page_list[i])
+ {
+ assert (page_list[i]->busy);
+ assert (! page_list[i]->wanted);
+ break;
+ }
+ vm_page_wait (NULL);
+ }
+ vm_object_lock (object);
+ vm_page_lock_queues ();
+ assert (! vm_page_lookup (object, offset));
+ vm_page_insert (page_list[i], object, offset);
+ assert (page_list[i]->wire_count == 0);
+ vm_page_wire (page_list[i]);
+ vm_page_unlock_queues ();
+ vm_object_unlock (object);
+ }
+ have_page_list = 1;
+
+ /* Read any partial block. */
+ if (filp->f_pos & bmask)
+ {
+ char *b, *q;
+ int use_req;
+
+ use_req = (disk_major (MAJOR (inode->i_rdev)) && ! np->read_only);
+
+ amt = bsize - (filp->f_pos & bmask);
+ if (amt > resid)
+ amt = resid;
+
+ if (LINUX_BLOCK_READ_TRACE)
+ printf ("block_read: at %d: amt %d, resid %d\n",
+ __LINE__, amt, resid);
+
+ if (use_req)
+ {
+ i = (amt + 511) & ~511;
+ req.buffer = b = alloc_buffer (i);
+ if (! b)
+ {
+ printf ("%s:%d: block_read: ran out of buffers\n",
+ __FILE__, __LINE__);
+ err = -LINUX_ENOMEM;
+ goto out;
+ }
+ req.sector = filp->f_pos >> 9;
+ req.nr_sectors = i >> 9;
+ req.current_nr_sectors = i >> 9;
+ req.rq_status = RQ_ACTIVE;
+ req.rq_dev = inode->i_rdev;
+ req.cmd = READ;
+ req.errors = 0;
+ req.sem = &sem;
+ req.bh = NULL;
+ req.bhtail = NULL;
+ req.next = NULL;
+
+ sem.count = 0;
+ sem.wait = NULL;
+
+ enqueue_request (&req);
+ __down (&sem);
+
+ if (req.errors)
+ {
+ free_buffer (b, i);
+ err = -LINUX_EIO;
+ goto out;
+ }
+ q = b + (filp->f_pos & 511);
+ }
+ else
+ {
+ i = bsize;
+ bhp = bh;
+ bhp->b_data = b = alloc_buffer (i);
+ if (! b)
+ {
+ err = -LINUX_ENOMEM;
+ goto out;
+ }
+ bhp->b_blocknr = filp->f_pos >> bshift;
+ bhp->b_dev = inode->i_rdev;
+ bhp->b_size = bsize;
+ bhp->b_state = 1 << BH_Lock;
+ bhp->b_page_list = NULL;
+ bhp->b_request = NULL;
+ bhp->b_reqnext = NULL;
+ bhp->b_wait = NULL;
+ bhp->b_sem = NULL;
+
+ ll_rw_block (READ, 1, &bhp);
+ wait_on_buffer (bhp);
+ err = check_for_error (READ, 1, &bhp);
+ if (err)
+ {
+ free_buffer (b, i);
+ goto out;
+ }
+ q = b + (filp->f_pos & bmask);
+ }
+
+ memcpy ((void *) page_list[0]->phys_addr, q, amt);
+
+ free_buffer (b, i);
+ resid -= amt;
+ if (resid == 0)
+ {
+ if (LINUX_BLOCK_READ_TRACE)
+ printf ("block_read: at %d\n", __LINE__);
+
+ assert (pages == 1);
+ goto out;
+ }
+ off += amt;
+ }
+
+ unaligned = off & 511;
+
+ /* Read full blocks. */
+ while (resid > bsize)
+ {
+ /* Construct buffer list to hand to the driver. */
+ for (i = 0, bhp = bh; resid > bsize && i < nbuf; bhp++, i++)
+ {
+ if (off == wire_offset + (pages << PAGE_SHIFT))
+ break;
+
+ bhlist[i] = bhp;
+ bhp->b_dev = inode->i_rdev;
+ bhp->b_state = 1 << BH_Lock;
+ bhp->b_blocknr = blk;
+ bhp->b_wait = NULL;
+ bhp->b_sem = &sem;
+
+ page_index = (trunc_page (off) - wire_offset) >> PAGE_SHIFT;
+ amt = PAGE_SIZE - (off & PAGE_MASK);
+ if (! unaligned && amt >= bsize)
+ {
+ if (amt > resid)
+ amt = resid;
+ bhp->b_size = amt & ~bmask;
+ bhp->b_data = ((char *) page_list[page_index]->phys_addr
+ + (off & PAGE_MASK));
+ bhp->b_page_list = NULL;
+ }
+ else
+ {
+ if (amt < bsize)
+ {
+ if (page_index == pages - 1)
+ {
+ assert (round_page (count) - off >= resid);
+ break;
+ }
+ bhp->b_size = bsize;
+ }
+ else
+ {
+ if (amt > resid)
+ amt = resid;
+ bhp->b_size = amt & ~bmask;
+ }
+ bhp->b_data = alloc_buffer (bhp->b_size);
+ if (! bhp->b_data)
+ {
+ printf ("%s:%d: block_read: ran out of buffers\n",
+ __FILE__, __LINE__);
+
+ while (--i >= 0)
+ if (bhp->b_page_list)
+ free_buffer (bhp->b_data, bhp->b_size);
+ err = -LINUX_ENOMEM;
+ goto out;
+ }
+ bhp->b_page_list = page_list;
+ bhp->b_index = page_index;
+ bhp->b_off = off & PAGE_MASK;
+ bhp->b_usrcnt = bhp->b_size;
+ }
+
+ resid -= bhp->b_size;
+ off += bhp->b_size;
+ blk += bhp->b_size >> bshift;
+ }
+
+ assert (i > 0);
+
+ sem.count = 0;
+ sem.wait = NULL;
+
+ /* Do the read. */
+ ll_rw_block (READ, i, bhlist);
+ __down (&sem);
+ err = check_for_error (READ, i, bhlist);
+ if (err || resid == 0)
+ goto out;
+
+ /* Unwire the pages and mark them dirty. */
+ offset = trunc_page (off);
+ for (i = 0; wire_offset < offset; i++, wire_offset += PAGE_SIZE)
+ {
+ vm_object_lock (object);
+ vm_page_lock_queues ();
+ assert (vm_page_lookup (object, wire_offset) == page_list[i]);
+ assert (page_list[i]->wire_count == 1);
+ assert (! page_list[i]->active && ! page_list[i]->inactive);
+ assert (! page_list[i]->reference);
+ page_list[i]->dirty = TRUE;
+ page_list[i]->reference = TRUE;
+ page_list[i]->busy = FALSE;
+ vm_page_unwire (page_list[i]);
+ vm_page_unlock_queues ();
+ vm_object_unlock (object);
+ }
+
+ assert (i <= pages);
+
+ /* Wire down the next chunk of the object. */
+ if (i == pages)
+ {
+ i = 0;
+ offset = wire_offset;
+ have_page_list = 0;
+ }
+ else
+ {
+ int j;
+
+ for (j = 0; i < pages; page_list[j++] = page_list[i++])
+ offset += PAGE_SIZE;
+ i = j;
+ }
+ pages = (round_page (count) - wire_offset) >> PAGE_SHIFT;
+ if (pages > READ_MAXPHYSPG)
+ pages = READ_MAXPHYSPG;
+ while (i < pages)
+ {
+ while (1)
+ {
+ page_list[i] = vm_page_grab ();
+ if (page_list[i])
+ {
+ assert (page_list[i]->busy);
+ assert (! page_list[i]->wanted);
+ break;
+ }
+ vm_page_wait (NULL);
+ }
+ vm_object_lock (object);
+ vm_page_lock_queues ();
+ assert (! vm_page_lookup (object, offset));
+ vm_page_insert (page_list[i], object, offset);
+ assert (page_list[i]->wire_count == 0);
+ vm_page_wire (page_list[i]);
+ vm_page_unlock_queues ();
+ vm_object_unlock (object);
+ i++;
+ offset += PAGE_SIZE;
+ }
+ have_page_list = 1;
+ }
+
+ /* Read any partial count. */
+ if (resid > 0)
+ {
+ char *b;
+ int use_req;
+
+ assert (have_page_list);
+ assert (pages >= 1);
+
+ use_req = (disk_major (MAJOR (inode->i_rdev)) && ! np->read_only);
+
+ amt = bsize - (filp->f_pos & bmask);
+ if (amt > resid)
+ amt = resid;
+
+ if (LINUX_BLOCK_READ_TRACE)
+ printf ("block_read: at %d: resid %d\n", __LINE__, amt, resid);
+
+ if (use_req)
+ {
+ i = (resid + 511) & ~511;
+ req.buffer = b = alloc_buffer (i);
+ if (! b)
+ {
+ printf ("%s:%d: block_read: ran out of buffers\n",
+ __FILE__, __LINE__);
+ err = -LINUX_ENOMEM;
+ goto out;
+ }
+ req.sector = blk << (bshift - 9);
+ req.nr_sectors = i >> 9;
+ req.current_nr_sectors = i >> 9;
+ req.rq_status = RQ_ACTIVE;
+ req.rq_dev = inode->i_rdev;
+ req.cmd = READ;
+ req.errors = 0;
+ req.sem = &sem;
+ req.bh = NULL;
+ req.bhtail = NULL;
+ req.next = NULL;
+
+ sem.count = 0;
+ sem.wait = NULL;
+
+ enqueue_request (&req);
+ __down (&sem);
+
+ if (req.errors)
+ {
+ free_buffer (b, i);
+ err = -LINUX_EIO;
+ goto out;
+ }
+ }
+ else
+ {
+ i = bsize;
+ bhp = bh;
+ bhp->b_data = b = alloc_buffer (i);
+ if (! b)
+ {
+ err = -LINUX_ENOMEM;
+ goto out;
+ }
+ bhp->b_blocknr = blk;
+ bhp->b_dev = inode->i_rdev;
+ bhp->b_size = bsize;
+ bhp->b_state = 1 << BH_Lock;
+ bhp->b_page_list = NULL;
+ bhp->b_request = NULL;
+ bhp->b_reqnext = NULL;
+ bhp->b_wait = NULL;
+ bhp->b_sem = NULL;
+
+ ll_rw_block (READ, 1, &bhp);
+ wait_on_buffer (bhp);
+ err = check_for_error (READ, 1, &bhp);
+ if (err)
+ {
+ free_buffer (b, i);
+ goto out;
+ }
+ }
+
+ page_index = (trunc_page (off) - wire_offset) >> PAGE_SHIFT;
+ amt = PAGE_SIZE - (off & PAGE_MASK);
+ if (amt > resid)
+ amt = resid;
+ memcpy (((void *) page_list[page_index]->phys_addr
+ + (off & PAGE_MASK)),
+ b, amt);
+ if (amt < resid)
+ {
+ assert (pages >= 2);
+ memcpy ((void *) page_list[page_index + 1]->phys_addr,
+ b + amt, resid - amt);
+ }
+ else
+ assert (pages >= 1);
+
+ free_buffer (b, i);
+ }
+
+out:
+ if (have_page_list)
+ {
+ for (i = 0; i < pages; i++, wire_offset += PAGE_SIZE)
+ {
+ vm_object_lock (object);
+ vm_page_lock_queues ();
+ assert (vm_page_lookup (object, wire_offset) == page_list[i]);
+ assert (page_list[i]->wire_count == 1);
+ assert (! page_list[i]->active && ! page_list[i]->inactive);
+ assert (! page_list[i]->reference);
+ page_list[i]->dirty = TRUE;
+ page_list[i]->reference = TRUE;
+ page_list[i]->busy = FALSE;
+ vm_page_unwire (page_list[i]);
+ vm_page_unlock_queues ();
+ vm_object_unlock (object);
+ }
+ }
+ kfree ((vm_offset_t) bh,
+ ((sizeof (*bh) + sizeof (*bhlist)) * nbuf
+ + sizeof (*page_list) * READ_MAXPHYSPG));
+ if (err)
+ {
+ if (object)
+ {
+ assert (object->ref_count == 1);
+ vm_object_deallocate (object);
+ }
+ }
+ else
+ {
+ assert (object);
+ assert (object->ref_count == 1);
+
+ filp->f_resid = 0;
+ filp->f_object = object;
+ }
+
+ if (LINUX_BLOCK_READ_TRACE)
+ printf ("block_read: at %d: err %d\n", __LINE__, err);
+
+ return err;
+}
+
+/*
+ * This routine checks whether a removable media has been changed,
+ * and invalidates all buffer-cache-entries in that case. This
+ * is a relatively slow routine, so we have to try to minimize using
+ * it. Thus it is called only upon a 'mount' or 'open'. This
+ * is the best way of combining speed and utility, I think.
+ * People changing diskettes in the middle of an operation deserve
+ * to loose :-)
+ */
+int
+check_disk_change (kdev_t dev)
+{
+ unsigned i;
+ struct file_operations * fops;
+
+ i = MAJOR(dev);
+ if (i >= MAX_BLKDEV || (fops = blkdevs[i].fops) == NULL)
+ return 0;
+ if (fops->check_media_change == NULL)
+ return 0;
+ if (! (*fops->check_media_change) (dev))
+ return 0;
+
+ printf ("Disk change detected on device %s\n", kdevname(dev));
+
+ if (fops->revalidate)
+ (*fops->revalidate) (dev);
+
+ return 1;
+}
+
+/* Mach device interface routines. */
+
+/* Mach name to Linux major/minor number mapping table. */
+static struct name_map name_to_major[] =
+{
+ /* IDE disks */
+ { "hd0", IDE0_MAJOR, 0, 0 },
+ { "hd1", IDE0_MAJOR, 1, 0 },
+ { "hd2", IDE1_MAJOR, 0, 0 },
+ { "hd3", IDE1_MAJOR, 1, 0 },
+ { "hd4", IDE2_MAJOR, 0, 0 },
+ { "hd5", IDE2_MAJOR, 1, 0 },
+ { "hd6", IDE3_MAJOR, 0, 0 },
+ { "hd7", IDE3_MAJOR, 1, 0 },
+
+ /* IDE CDROMs */
+ { "wcd0", IDE0_MAJOR, 0, 1 },
+ { "wcd1", IDE0_MAJOR, 1, 1 },
+ { "wcd2", IDE1_MAJOR, 0, 1 },
+ { "wcd3", IDE1_MAJOR, 1, 1 },
+ { "wcd4", IDE2_MAJOR, 0, 1 },
+ { "wcd5", IDE2_MAJOR, 1, 1 },
+ { "wcd6", IDE3_MAJOR, 0, 1 },
+ { "wcd7", IDE3_MAJOR, 1, 1 },
+
+ /* SCSI disks */
+ { "sd0", SCSI_DISK_MAJOR, 0, 0 },
+ { "sd1", SCSI_DISK_MAJOR, 1, 0 },
+ { "sd2", SCSI_DISK_MAJOR, 2, 0 },
+ { "sd3", SCSI_DISK_MAJOR, 3, 0 },
+ { "sd4", SCSI_DISK_MAJOR, 4, 0 },
+ { "sd5", SCSI_DISK_MAJOR, 5, 0 },
+ { "sd6", SCSI_DISK_MAJOR, 6, 0 },
+ { "sd7", SCSI_DISK_MAJOR, 7, 0 },
+
+ /* SCSI CDROMs */
+ { "cd0", SCSI_CDROM_MAJOR, 0, 1 },
+ { "cd1", SCSI_CDROM_MAJOR, 1, 1 },
+
+ /* Floppy disks */
+ { "fd0", FLOPPY_MAJOR, 0, 0 },
+ { "fd1", FLOPPY_MAJOR, 1, 0 },
+};
+
+#define NUM_NAMES (sizeof (name_to_major) / sizeof (name_to_major[0]))
+
+/* One of these is associated with each open instance of a device. */
+struct block_data
+{
+ const char *name; /* Mach name for device */
+ int want:1; /* someone is waiting for I/O to complete */
+ int open_count; /* number of opens */
+ int iocount; /* number of pending I/O operations */
+ int part; /* BSD partition number (-1 if none) */
+ ipc_port_t port; /* port representing device */
+ struct device_struct *ds; /* driver operation table entry */
+ struct device device; /* generic device header */
+ struct file file; /* Linux file structure */
+ struct inode inode; /* Linux inode structure */
+ struct name_map *np; /* name to inode map */
+ struct block_data *next; /* forward link */
+};
+
+/* List of open devices. */
+static struct block_data *open_list;
+
+/* Forward declarations. */
+
+extern struct device_emulation_ops linux_block_emulation_ops;
+
+static io_return_t device_close (void *);
+
+/* Return a send right for block device BD. */
+static ipc_port_t
+dev_to_port (void *bd)
+{
+ return (bd
+ ? ipc_port_make_send (((struct block_data *) bd)->port)
+ : IP_NULL);
+}
+
+/* Return 1 if C is a letter of the alphabet. */
+static inline int
+isalpha (int c)
+{
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
+}
+
+/* Return 1 if C is a digit. */
+static inline int
+isdigit (int c)
+{
+ return c >= '0' && c <= '9';
+}
+
+int linux_device_open_trace = 0;
+
+static io_return_t
+device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode, char *name, device_t *devp)
+{
+ char *p;
+ int i, part = -1, slice = 0, err = 0;
+ unsigned major, minor;
+ kdev_t dev;
+ ipc_port_t notify;
+ struct file file;
+ struct inode inode;
+ struct name_map *np;
+ struct device_struct *ds;
+ struct block_data *bd = NULL, *bdp;
+
+ if (linux_device_open_trace)
+ printf ("device_open: at %d: name %s\n", __LINE__, name);
+
+ /* Parse name into name, unit, DOS partition (slice) and partition. */
+ for (p = name; isalpha (*p); p++)
+ ;
+ if (p == name || ! isdigit (*p))
+ {
+ if (linux_device_open_trace)
+ printf ("device_open: at %d\n", __LINE__);
+
+ return D_NO_SUCH_DEVICE;
+ }
+ do
+ p++;
+ while (isdigit (*p));
+ if (*p)
+ {
+ char *q = p;
+
+ if (! isalpha (*q))
+ {
+ if (linux_device_open_trace)
+ printf ("device_open: at %d\n", __LINE__);
+
+ return D_NO_SUCH_DEVICE;
+ }
+ if (*q == 's' && isdigit (*(q + 1)))
+ {
+ q++;
+ slice = 0;
+ do
+ slice = slice * 10 + *q++ - '0';
+ while (isdigit (*q));
+ if (! *q)
+ goto find_major;
+ if (! isalpha (*q))
+ {
+ if (linux_device_open_trace)
+ printf ("device_open: at %d\n", __LINE__);
+
+ return D_NO_SUCH_DEVICE;
+ }
+ }
+ if (*(q + 1))
+ {
+ if (linux_device_open_trace)
+ printf ("device_open: at %d\n", __LINE__);
+
+ return D_NO_SUCH_DEVICE;
+ }
+ part = *q - 'a';
+ }
+ else
+ slice = -1;
+
+find_major:
+ /* Convert name to major number. */
+ for (i = 0, np = name_to_major; i < NUM_NAMES; i++, np++)
+ {
+ int len = strlen (np->name);
+
+ if (len == p - name && ! strncmp (np->name, name, len))
+ break;
+ }
+ if (i == NUM_NAMES)
+ {
+ if (linux_device_open_trace)
+ printf ("device_open: at %d\n", __LINE__);
+
+ return D_NO_SUCH_DEVICE;
+ }
+
+ major = np->major;
+ ds = &blkdevs[major];
+
+ /* Check that driver exists. */
+ if (! ds->fops)
+ {
+ if (linux_device_open_trace)
+ printf ("device_open: at %d\n", __LINE__);
+
+ return D_NO_SUCH_DEVICE;
+ }
+
+ /* Slice and partition numbers are only used by disk drives.
+ The test for read-only is for IDE CDROMs. */
+ if (! disk_major (major) || np->read_only)
+ {
+ slice = -1;
+ part = -1;
+ }
+
+ /* Wait for any other open/close calls to finish. */
+ ds = &blkdevs[major];
+ while (ds->busy)
+ {
+ ds->want = 1;
+ assert_wait ((event_t) ds, FALSE);
+ thread_block (0);
+ }
+ ds->busy = 1;
+
+ /* Compute minor number. */
+ if (disk_major (major) && ! ds->gd)
+ {
+ struct gendisk *gd;
+
+ for (gd = gendisk_head; gd && gd->major != major; gd = gd->next)
+ ;
+ assert (gd);
+ ds->gd = gd;
+ }
+ minor = np->unit;
+ if (ds->gd)
+ minor <<= ds->gd->minor_shift;
+ dev = MKDEV (major, minor);
+
+ /* If no DOS partition is specified, find one we can handle. */
+ if (slice == 0 && (! ds->default_slice || ds->default_slice[np->unit] == 0))
+ {
+ int sysid, bsize, bshift;
+ struct mboot *mp;
+ struct ipart *pp;
+ struct buffer_head *bhp;
+
+ /* Open partition 0. */
+ inode.i_rdev = dev;
+ file.f_mode = O_RDONLY;
+ file.f_flags = 0;
+ if (ds->fops->open)
+ {
+ linux_intr_pri = SPL5;
+ err = (*ds->fops->open) (&inode, &file);
+ if (err)
+ {
+ if (linux_device_open_trace)
+ printf ("device_open: at %d\n", __LINE__);
+
+ err = linux_to_mach_error (err);
+ goto out;
+ }
+ }
+
+ /* Allocate a buffer for I/O. */
+ get_block_size (inode.i_rdev, &bsize, &bshift);
+ assert (bsize <= PAGE_SIZE);
+ bhp = getblk (inode.i_rdev, 0, bsize);
+ if (! bhp)
+ {
+ if (linux_device_open_trace)
+ printf ("device_open: at %d\n", __LINE__);
+
+ err = D_NO_MEMORY;
+ goto slice_done;
+ }
+
+ /* Read DOS partition table. */
+ ll_rw_block (READ, 1, &bhp);
+ wait_on_buffer (bhp);
+ err = check_for_error (READ, 1, &bhp);
+ if (err)
+ {
+ printf ("%s: error reading boot sector\n", np->name);
+ err = linux_to_mach_error (err);
+ goto slice_done;
+ }
+
+ /* Check for valid partition table. */
+ mp = (struct mboot *) bhp->b_data;
+ if (mp->signature != BOOT_MAGIC)
+ {
+ printf ("%s: invalid partition table\n", np->name);
+ err = D_NO_SUCH_DEVICE;
+ goto slice_done;
+ }
+
+ /* Search for a Mach, BSD or Linux partition. */
+ sysid = 0;
+ pp = (struct ipart *) mp->parts;
+ for (i = 0; i < FD_NUMPART; i++, pp++)
+ {
+ if ((pp->systid == UNIXOS
+ || pp->systid == BSDOS
+ || pp->systid == LINUXOS)
+ && (! sysid || pp->bootid == ACTIVE))
+ {
+ sysid = pp->systid;
+ slice = i + 1;
+ }
+ }
+ if (! sysid)
+ {
+ printf ("%s: No Mach, BSD or Linux partition found\n", np->name);
+ err = D_NO_SUCH_DEVICE;
+ goto slice_done;
+ }
+
+ printf ("%s: default slice %d: %s OS\n", np->name, slice,
+ (sysid == UNIXOS ? "Mach" : (sysid == BSDOS ? "BSD" : "LINUX")));
+
+ slice_done:
+ if (ds->fops->release)
+ (*ds->fops->release) (&inode, &file);
+ __brelse (bhp);
+ if (err)
+ goto out;
+ if (! ds->default_slice)
+ {
+ ds->default_slice = (int *) kalloc (sizeof (int) * ds->gd->max_nr);
+ if (! ds->default_slice)
+ {
+ if (linux_device_open_trace)
+ printf ("device_open: at %d\n", __LINE__);
+
+ err = D_NO_MEMORY;
+ goto out;
+ }
+ memset (ds->default_slice, 0, sizeof (int) * ds->gd->max_nr);
+ }
+ ds->default_slice[np->unit] = slice;
+ }
+
+ /* Add slice to minor number. */
+ if (slice == 0)
+ slice = ds->default_slice[np->unit];
+ if (slice > 0)
+ {
+ if (slice >= ds->gd->max_p)
+ {
+ if (linux_device_open_trace)
+ printf ("device_open: at %d\n", __LINE__);
+
+ err = D_NO_SUCH_DEVICE;
+ goto out;
+ }
+ minor |= slice;
+
+ if (linux_device_open_trace)
+ printf ("device_open: at %d: start_sect 0x%x, nr_sects %d\n",
+ __LINE__, ds->gd->part[minor].start_sect,
+ ds->gd->part[minor].nr_sects);
+ }
+ dev = MKDEV (major, minor);
+
+ /* Initialize file structure. */
+ file.f_mode = (mode == D_READ || np->read_only) ? O_RDONLY : O_RDWR;
+ file.f_flags = (mode & D_NODELAY) ? O_NDELAY : 0;
+
+ /* Check if the device is currently open. */
+ for (bdp = open_list; bdp; bdp = bdp->next)
+ if (bdp->inode.i_rdev == dev
+ && bdp->part == part
+ && bdp->file.f_mode == file.f_mode
+ && bdp->file.f_flags == file.f_flags)
+ {
+ bd = bdp;
+ goto out;
+ }
+
+ /* Open the device. */
+ if (ds->fops->open)
+ {
+ inode.i_rdev = dev;
+ linux_intr_pri = SPL5;
+ err = (*ds->fops->open) (&inode, &file);
+ if (err)
+ {
+ if (linux_device_open_trace)
+ printf ("device_open: at %d\n", __LINE__);
+
+ err = linux_to_mach_error (err);
+ goto out;
+ }
+ }
+
+ /* Read disklabel. */
+ if (part >= 0 && (! ds->label || ! ds->label[minor]))
+ {
+ int bsize, bshift;
+ struct evtoc *evp;
+ struct disklabel *lp, *dlp;
+ struct buffer_head *bhp;
+
+ assert (disk_major (major));
+
+ /* Allocate a disklabel. */
+ lp = (struct disklabel *) kalloc (sizeof (struct disklabel));
+ if (! lp)
+ {
+ if (linux_device_open_trace)
+ printf ("device_open: at %d\n", __LINE__);
+
+ err = D_NO_MEMORY;
+ goto bad;
+ }
+
+ /* Allocate a buffer for I/O. */
+ get_block_size (dev, &bsize, &bshift);
+ assert (bsize <= PAGE_SIZE);
+ bhp = getblk (dev, LBLLOC >> (bshift - 9), bsize);
+ if (! bhp)
+ {
+ if (linux_device_open_trace)
+ printf ("device_open: at %d\n", __LINE__);
+
+ err = D_NO_MEMORY;
+ goto label_done;
+ }
+
+ /* Set up 'c' partition to span the entire DOS partition. */
+ lp->d_npartitions = PART_DISK + 1;
+ memset (lp->d_partitions, 0, MAXPARTITIONS * sizeof (struct partition));
+ lp->d_partitions[PART_DISK].p_offset = ds->gd->part[minor].start_sect;
+ lp->d_partitions[PART_DISK].p_size = ds->gd->part[minor].nr_sects;
+
+ /* Try reading a BSD disklabel. */
+ ll_rw_block (READ, 1, &bhp);
+ wait_on_buffer (bhp);
+ err = check_for_error (READ, 1, &bhp);
+ if (err)
+ {
+ printf ("%s: error reading BSD label\n", np->name);
+ err = 0;
+ goto vtoc;
+ }
+ dlp = (struct disklabel *) (bhp->b_data + ((LBLLOC << 9) & (bsize - 1)));
+ if (dlp->d_magic != DISKMAGIC || dlp->d_magic2 != DISKMAGIC)
+ goto vtoc;
+ printf ("%s: BSD LABEL\n", np->name);
+ lp->d_npartitions = dlp->d_npartitions;
+ memcpy (lp->d_partitions, dlp->d_partitions,
+ MAXPARTITIONS * sizeof (struct partition));
+
+ /* Check for NetBSD DOS partition bogosity. */
+ for (i = 0; i < lp->d_npartitions; i++)
+ if (lp->d_partitions[i].p_size > ds->gd->part[minor].nr_sects)
+ ds->gd->part[minor].nr_sects = lp->d_partitions[i].p_size;
+ goto label_done;
+
+ vtoc:
+ /* Try reading VTOC. */
+ bhp->b_blocknr = PDLOCATION >> (bshift - 9);
+ bhp->b_state = 1 << BH_Lock;
+ ll_rw_block (READ, 1, &bhp);
+ wait_on_buffer (bhp);
+ err = check_for_error (READ, 1, &bhp);
+ if (err)
+ {
+ printf ("%s: error reading evtoc\n", np->name);
+ err = linux_to_mach_error (err);
+ goto label_done;
+ }
+ evp = (struct evtoc *) (bhp->b_data + ((PDLOCATION << 9) & (bsize - 1)));
+ if (evp->sanity != VTOC_SANE)
+ {
+ printf ("%s: No BSD or Mach label found\n", np->name);
+ err = D_NO_SUCH_DEVICE;
+ goto label_done;
+ }
+ printf ("%s: LOCAL LABEL\n", np->name);
+ lp->d_npartitions = (evp->nparts > MAXPARTITIONS
+ ? MAXPARTITIONS : evp->nparts);
+ for (i = 0; i < lp->d_npartitions; i++)
+ {
+ lp->d_partitions[i].p_size = evp->part[i].p_size;
+ lp->d_partitions[i].p_offset = evp->part[i].p_start;
+ lp->d_partitions[i].p_fstype = FS_BSDFFS;
+ }
+
+ label_done:
+ if (bhp)
+ __brelse (bhp);
+ if (err)
+ {
+ kfree ((vm_offset_t) lp, sizeof (struct disklabel));
+ goto bad;
+ }
+ if (! ds->label)
+ {
+ ds->label = (struct disklabel **) kalloc (sizeof (struct disklabel *)
+ * ds->gd->max_p
+ * ds->gd->max_nr);
+ if (! ds->label)
+ {
+ if (linux_device_open_trace)
+ printf ("device_open: at %d\n", __LINE__);
+
+ kfree ((vm_offset_t) lp, sizeof (struct disklabel));
+ err = D_NO_MEMORY;
+ goto bad;
+ }
+ memset (ds->label, 0,
+ (sizeof (struct disklabel *)
+ * ds->gd->max_p * ds->gd->max_nr));
+ }
+ ds->label[minor] = lp;
+ }
+
+ /* Check partition number. */
+ if (part >= 0
+ && (part >= ds->label[minor]->d_npartitions
+ || ds->label[minor]->d_partitions[part].p_size == 0))
+ {
+ err = D_NO_SUCH_DEVICE;
+ goto bad;
+ }
+
+ /* Allocate and initialize device data. */
+ bd = (struct block_data *) kalloc (sizeof (struct block_data));
+ if (! bd)
+ {
+ if (linux_device_open_trace)
+ printf ("device_open: at %d\n", __LINE__);
+
+ err = D_NO_MEMORY;
+ goto bad;
+ }
+ bd->want = 0;
+ bd->open_count = 0;
+ bd->iocount = 0;
+ bd->part = part;
+ bd->ds = ds;
+ bd->device.emul_data = bd;
+ bd->device.emul_ops = &linux_block_emulation_ops;
+ bd->inode.i_rdev = dev;
+ bd->file.f_mode = file.f_mode;
+ bd->file.f_np = np;
+ bd->file.f_flags = file.f_flags;
+ bd->port = ipc_port_alloc_kernel ();
+ if (bd->port == IP_NULL)
+ {
+ if (linux_device_open_trace)
+ printf ("device_open: at %d\n", __LINE__);
+
+ err = KERN_RESOURCE_SHORTAGE;
+ goto bad;
+ }
+ ipc_kobject_set (bd->port, (ipc_kobject_t) &bd->device, IKOT_DEVICE);
+ notify = ipc_port_make_sonce (bd->port);
+ ip_lock (bd->port);
+ ipc_port_nsrequest (bd->port, 1, notify, &notify);
+ assert (notify == IP_NULL);
+
+ goto out;
+
+bad:
+ if (ds->fops->release)
+ (*ds->fops->release) (&inode, &file);
+
+out:
+ ds->busy = 0;
+ if (ds->want)
+ {
+ ds->want = 0;
+ thread_wakeup ((event_t) ds);
+ }
+
+ if (bd && bd->open_count > 0)
+ {
+ if (err)
+ *devp = NULL;
+ else
+ {
+ *devp = &bd->device;
+ bd->open_count++;
+ }
+ return err;
+ }
+
+ if (err)
+ {
+ if (bd)
+ {
+ if (bd->port != IP_NULL)
+ {
+ ipc_kobject_set (bd->port, IKO_NULL, IKOT_NONE);
+ ipc_port_dealloc_kernel (bd->port);
+ }
+ kfree ((vm_offset_t) bd, sizeof (struct block_data));
+ bd = NULL;
+ }
+ }
+ else
+ {
+ bd->open_count = 1;
+ bd->next = open_list;
+ open_list = bd;
+ }
+
+ if (IP_VALID (reply_port))
+ ds_device_open_reply (reply_port, reply_port_type, err, dev_to_port (bd));
+ else if (! err)
+ device_close (bd);
+
+ return MIG_NO_REPLY;
+}
+
+static io_return_t
+device_close (void *d)
+{
+ struct block_data *bd = d, *bdp, **prev;
+ struct device_struct *ds = bd->ds;
+
+ /* Wait for any other open/close to complete. */
+ while (ds->busy)
+ {
+ ds->want = 1;
+ assert_wait ((event_t) ds, FALSE);
+ thread_block (0);
+ }
+ ds->busy = 1;
+
+ if (--bd->open_count == 0)
+ {
+ /* Wait for pending I/O to complete. */
+ while (bd->iocount > 0)
+ {
+ bd->want = 1;
+ assert_wait ((event_t) bd, FALSE);
+ thread_block (0);
+ }
+
+ /* Remove device from open list. */
+ prev = &open_list;
+ bdp = open_list;
+ while (bdp)
+ {
+ if (bdp == bd)
+ {
+ *prev = bdp->next;
+ break;
+ }
+ prev = &bdp->next;
+ bdp = bdp->next;
+ }
+
+ assert (bdp == bd);
+
+ if (ds->fops->release)
+ (*ds->fops->release) (&bd->inode, &bd->file);
+
+ ipc_kobject_set (bd->port, IKO_NULL, IKOT_NONE);
+ ipc_port_dealloc_kernel (bd->port);
+ kfree ((vm_offset_t) bd, sizeof (struct block_data));
+ }
+
+ ds->busy = 0;
+ if (ds->want)
+ {
+ ds->want = 0;
+ thread_wakeup ((event_t) ds);
+ }
+ return D_SUCCESS;
+}
+
+/* XXX: Assumes all drivers use block_write. */
+static io_return_t
+device_write (void *d, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t bn, io_buf_ptr_t data, unsigned int count,
+ int *bytes_written)
+{
+ int major, minor;
+ unsigned sz, maxsz, off;
+ io_return_t err = 0;
+ struct block_data *bd = d;
+
+ if (! bd->ds->fops->write)
+ {
+ printf ("device_write: at %d\n", __LINE__);
+ return D_INVALID_OPERATION;
+ }
+
+ if ((int) count <= 0)
+ {
+ printf ("device_write: at %d\n", __LINE__);
+ return D_INVALID_SIZE;
+ }
+
+ major = MAJOR (bd->inode.i_rdev);
+ minor = MINOR (bd->inode.i_rdev);
+
+ if (disk_major (major))
+ {
+ assert (bd->ds->gd);
+
+ if (bd->part >= 0)
+ {
+ struct disklabel *lp;
+
+ assert (bd->ds->label);
+ lp = bd->ds->label[minor];
+ assert (lp);
+ maxsz = lp->d_partitions[bd->part].p_size;
+ off = (lp->d_partitions[bd->part].p_offset
+ - bd->ds->gd->part[minor].start_sect);
+
+ if (linux_block_write_trace)
+ printf ("device_write: at %d: dev %s, part %d, "
+ "offset 0x%x (%u), start_sect 0x%x (%u), "
+ "maxsz 0x%x (%u)\n",
+ __LINE__,
+ kdevname (bd->inode.i_rdev),
+ bd->part,
+ lp->d_partitions[bd->part].p_offset,
+ lp->d_partitions[bd->part].p_offset,
+ bd->ds->gd->part[minor].start_sect,
+ bd->ds->gd->part[minor].start_sect,
+ maxsz, maxsz);
+
+ assert (off < bd->ds->gd->part[minor].nr_sects);
+ }
+ else
+ {
+ maxsz = bd->ds->gd->part[minor].nr_sects;
+ off = 0;
+ }
+ }
+ else
+ {
+ assert (blk_size[major]);
+ maxsz = blk_size[major][minor] << (BLOCK_SIZE_BITS - 9);
+ off = 0;
+ }
+
+ if (bn >= maxsz)
+ {
+ if (linux_block_write_trace)
+ printf ("device_write: at %d\n", __LINE__);
+
+ return D_INVALID_SIZE;
+ }
+
+ bd->iocount++;
+
+ sz = (count + 511) >> 9;
+ if (sz > maxsz - bn)
+ {
+ sz = maxsz - bn;
+ if (count > (sz << 9))
+ count = sz << 9;
+ }
+
+ bd->file.f_pos = (loff_t) (bn + off) << 9;
+
+ err = (*bd->ds->fops->write) (&bd->inode, &bd->file, (char *) data, count);
+ if (err)
+ err = linux_to_mach_error (err);
+
+ if (linux_block_write_trace)
+ printf ("device_write: at %d: err %d\n", __LINE__, err);
+
+ if (IP_VALID (reply_port))
+ ds_device_write_reply (reply_port, reply_port_type,
+ err, count - bd->file.f_resid);
+
+ if (--bd->iocount == 0 && bd->want)
+ {
+ bd->want = 0;
+ thread_wakeup ((event_t) bd);
+ }
+ return MIG_NO_REPLY;
+}
+
+/* XXX: Assumes all drivers use block_read. */
+static io_return_t
+device_read (void *d, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t bn, int count, io_buf_ptr_t *data,
+ unsigned *bytes_read)
+{
+ int major, minor;
+ unsigned sz, maxsz, off;
+ io_return_t err = 0;
+ vm_offset_t addr;
+ vm_object_t object;
+ vm_map_copy_t copy;
+ struct block_data *bd = d;
+ struct inode *inode = &bd->inode;
+
+ *data = 0;
+ *bytes_read = 0;
+
+ if (! bd->ds->fops->read)
+ return D_INVALID_OPERATION;
+
+ if (count <= 0)
+ return D_INVALID_SIZE;
+
+ major = MAJOR (bd->inode.i_rdev);
+ minor = MINOR (bd->inode.i_rdev);
+
+ if (LINUX_BLOCK_READ_TRACE)
+ printf ("device_read: at %d: major %d, minor %d, count %d, recnum %u\n",
+ __LINE__, major, minor, count, bn);
+
+ if (disk_major (major))
+ {
+ assert (bd->ds->gd);
+
+ if (bd->part >= 0)
+ {
+ struct disklabel *lp;
+
+ assert (bd->ds->label);
+ lp = bd->ds->label[minor];
+ assert (lp);
+ maxsz = lp->d_partitions[bd->part].p_size;
+ off = (lp->d_partitions[bd->part].p_offset
+ - bd->ds->gd->part[minor].start_sect);
+
+ if (LINUX_BLOCK_READ_TRACE)
+ printf ("device_read: at %d: dev %s, part %d, offset 0x%x, "
+ "size %d, start_sect 0x%x, nr_sects %d\n",
+ __LINE__, kdevname (major), bd->part, off, maxsz,
+ bd->ds->gd->part[minor].start_sect,
+ bd->ds->gd->part[minor].nr_sects);
+
+ assert (off < bd->ds->gd->part[minor].nr_sects);
+ }
+ else
+ {
+ maxsz = bd->ds->gd->part[minor].nr_sects;
+ off = 0;
+ }
+ }
+ else
+ {
+ assert (blk_size[major]);
+ maxsz = blk_size[major][minor] << (BLOCK_SIZE_BITS - 9);
+ off = 0;
+ }
+
+ if (bn > maxsz)
+ return D_INVALID_SIZE;
+
+ /* Be backward compatible with Unix. */
+ if (bn == maxsz)
+ {
+ if (LINUX_BLOCK_READ_TRACE)
+ printf ("device_read: at %d\n", __LINE__);
+ return 0;
+ }
+
+ sz = (count + 511) >> 9;
+ if (sz > maxsz - bn)
+ {
+ sz = maxsz - bn;
+ if (count > (sz << 9))
+ count = sz << 9;
+ }
+
+ bd->file.f_pos = (loff_t) (bn + off) << 9;
+ bd->file.f_object = NULL;
+
+ if (LINUX_BLOCK_READ_TRACE)
+ printf ("device_read: at %d: f_pos 0x%x\n",
+ __LINE__, (unsigned) bd->file.f_pos);
+
+ bd->iocount++;
+
+ err = (*bd->ds->fops->read) (&bd->inode, &bd->file, (char *) data, count);
+ if (err)
+ err = linux_to_mach_error (err);
+ else
+ {
+ object = bd->file.f_object;
+ assert (object);
+ assert (object->ref_count == 1);
+ err = vm_map_copyin_object (object, 0, round_page (count), &copy);
+ assert (object->ref_count == 1);
+ if (err)
+ vm_object_deallocate (object);
+ else
+ {
+ assert (copy->cpy_object->ref_count == 1);
+ *data = (io_buf_ptr_t) copy;
+ *bytes_read = count - bd->file.f_resid;
+ }
+ }
+ if (--bd->iocount == 0 && bd->want)
+ {
+ bd->want = 0;
+ thread_wakeup ((event_t) bd);
+ }
+ return err;
+}
+
+static io_return_t
+device_get_status (void *d, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t *status_count)
+{
+ struct block_data *bd = d;
+
+ switch (flavor)
+ {
+ case DEV_GET_SIZE:
+ if (*status_count != DEV_GET_SIZE_COUNT)
+ return D_INVALID_SIZE;
+ if (disk_major (MAJOR (bd->inode.i_rdev)))
+ {
+ assert (bd->ds->gd);
+
+ if (bd->part >= 0)
+ {
+ struct disklabel *lp;
+
+ assert (bd->ds->label);
+ lp = bd->ds->label[MINOR (bd->inode.i_rdev)];
+ assert (lp);
+ (status[DEV_GET_SIZE_DEVICE_SIZE]
+ = lp->d_partitions[bd->part].p_size << 9);
+ }
+ else
+ (status[DEV_GET_SIZE_DEVICE_SIZE]
+ = bd->ds->gd->part[MINOR (bd->inode.i_rdev)].nr_sects << 9);
+ }
+ else
+ {
+ assert (blk_size[MAJOR (bd->inode.i_rdev)]);
+ (status[DEV_GET_SIZE_DEVICE_SIZE]
+ = (blk_size[MAJOR (bd->inode.i_rdev)][MINOR (bd->inode.i_rdev)]
+ << BLOCK_SIZE_BITS));
+ }
+ /* It would be nice to return the block size as reported by
+ the driver, but a lot of user level code assumes the sector
+ size to be 512. */
+ status[DEV_GET_SIZE_RECORD_SIZE] = 512;
+ break;
+
+ default:
+ return D_INVALID_OPERATION;
+ }
+
+ return D_SUCCESS;
+}
+
+struct device_emulation_ops linux_block_emulation_ops =
+{
+ NULL,
+ NULL,
+ dev_to_port,
+ device_open,
+ device_close,
+ device_write,
+ NULL,
+ device_read,
+ NULL,
+ NULL,
+ device_get_status,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
diff --git a/i386/i386at/gpl/linux/linux_dma.c b/i386/i386at/gpl/linux/linux_dma.c
new file mode 100644
index 00000000..aab0fa8e
--- /dev/null
+++ b/i386/i386at/gpl/linux/linux_dma.c
@@ -0,0 +1,52 @@
+/*
+ * Linux DMA channel management.
+ * Copyright (C) 1995 Shantanu Goel.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define MACH_INCLUDE
+#include <linux/errno.h>
+#include <asm/dma.h>
+
+/*
+ * Bitmap of allocated/free DMA channels.
+ */
+static int dma_busy = 0x10;
+
+/*
+ * Allocate a DMA channel.
+ */
+int
+request_dma(unsigned int drq, const char *name)
+{
+ if (drq > 7)
+ panic("request_dma: bad DRQ number");
+ if (dma_busy & (1 << drq))
+ return (-LINUX_EBUSY);
+ dma_busy |= 1 << drq;
+ return (0);
+}
+
+/*
+ * Free a DMA channel.
+ */
+void
+free_dma(unsigned int drq)
+{
+ if (drq > 7)
+ panic("free_dma: bad DRQ number");
+ dma_busy &= ~(1 << drq);
+}
diff --git a/i386/i386at/gpl/linux/linux_emul.h b/i386/i386at/gpl/linux/linux_emul.h
new file mode 100644
index 00000000..dc338020
--- /dev/null
+++ b/i386/i386at/gpl/linux/linux_emul.h
@@ -0,0 +1,32 @@
+/*
+ * Defintions for Linux driver emulation.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+#include <i386/ipl.h>
+
+extern int linux_auto_config;
+extern int linux_intr_pri;
+
+int linux_to_mach_error (int);
+void *alloc_contig_mem (unsigned, unsigned, unsigned, vm_page_t *);
+void free_contig_mem (vm_page_t);
+void collect_buffer_pages (void);
diff --git a/i386/i386at/gpl/linux/linux_init.c b/i386/i386at/gpl/linux/linux_init.c
new file mode 100644
index 00000000..d2abae28
--- /dev/null
+++ b/i386/i386at/gpl/linux/linux_init.c
@@ -0,0 +1,412 @@
+/*
+ * Linux initialization.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * linux/init/main.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <sys/types.h>
+
+#include <mach/vm_param.h>
+#include <mach/vm_prot.h>
+#include <mach/machine.h>
+
+#include <vm/vm_page.h>
+
+#include <i386/ipl.h>
+#include <i386/pic.h>
+#include <i386/pit.h>
+#include <i386/machspl.h>
+#include <i386/pmap.h>
+#include <i386/vm_param.h>
+
+#include <i386at/gpl/linux/linux_emul.h>
+
+#define MACH_INCLUDE
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+
+#include <asm/system.h>
+
+/*
+ * Set if the machine has an EISA bus.
+ */
+int EISA_bus = 0;
+
+/*
+ * Timing loop count.
+ */
+unsigned long loops_per_sec = 1;
+
+/*
+ * End of physical memory.
+ */
+unsigned long high_memory;
+
+/*
+ * Flag to indicate auto-configuration is in progress.
+ */
+int linux_auto_config = 1;
+
+/*
+ * Hard drive parameters obtained from the BIOS.
+ */
+struct drive_info_struct {
+ char dummy[32];
+} drive_info;
+
+/*
+ * Forward declarations.
+ */
+static void calibrate_delay(void);
+
+extern int hz;
+extern vm_offset_t phys_last_addr;
+
+extern void timer_bh(void *);
+extern void tqueue_bh(void *);
+extern void startrtclock(void);
+extern void linux_version_init(void);
+extern void linux_kmem_init(void);
+extern unsigned long pci_init(unsigned long, unsigned long);
+extern void linux_net_emulation_init (void);
+extern void device_setup(void);
+extern void linux_printk(char *, ...);
+extern int linux_timer_intr();
+
+/*
+ * Amount of contiguous memory to allocate for initialization.
+ */
+#define CONTIG_ALLOC (512 * 1024)
+
+/*
+ * Initialize Linux drivers.
+ */
+void
+linux_init()
+{
+ char *p;
+ int i, addr;
+ int (*old_clock_handler)(), old_clock_pri;
+ unsigned memory_start, memory_end;
+ vm_page_t pages;
+
+ /*
+ * Initialize memory size.
+ */
+ high_memory = phys_last_addr;
+
+ /*
+ * Ensure interrupts are disabled.
+ */
+ (void) splhigh();
+
+ /*
+ * Program counter 0 of 8253 to interrupt hz times per second.
+ */
+ outb(PITCTL_PORT, PIT_C0|PIT_SQUAREMODE|PIT_READMODE);
+ outb(PITCTR0_PORT, CLKNUM / hz);
+ outb(PITCTR0_PORT, (CLKNUM / hz) >> 8);
+
+ /*
+ * Install our clock interrupt handler.
+ */
+ old_clock_handler = ivect[0];
+ old_clock_pri = intpri[0];
+ ivect[0] = linux_timer_intr;
+ intpri[0] = SPLHI;
+ form_pic_mask();
+
+ /*
+ * Enable interrupts.
+ */
+ (void) spl0();
+
+ /*
+ * Set Linux version.
+ */
+ linux_version_init();
+
+ /*
+ * Check if the machine has an EISA bus.
+ */
+ p = (char *)0x0FFFD9;
+ if (*p++ == 'E' && *p++ == 'I' && *p++ == 'S' && *p == 'A')
+ EISA_bus = 1;
+
+ /*
+ * Permanently allocate standard device ports.
+ */
+ request_region(0x00, 0x20, "dma1");
+ request_region(0x40, 0x20, "timer");
+ request_region(0x70, 0x10, "rtc");
+ request_region(0x80, 0x20, "dma page reg");
+ request_region(0xc0, 0x20, "dma2");
+ request_region(0xf0, 0x02, "fpu");
+ request_region(0xf8, 0x08, "fpu");
+
+ /*
+ * Install software interrupt handlers.
+ */
+ bh_base[TIMER_BH].routine = timer_bh;
+ bh_base[TIMER_BH].data = 0;
+ enable_bh(TIMER_BH);
+ bh_base[TQUEUE_BH].routine = tqueue_bh;
+ bh_base[TQUEUE_BH].data = 0;
+ enable_bh(TQUEUE_BH);
+
+ /*
+ * Set loop count.
+ */
+ calibrate_delay();
+
+ /*
+ * Initialize drive info.
+ */
+ addr = *((unsigned *)phystokv(0x104));
+ memcpy (&drive_info,
+ (void *)((addr & 0xffff) + ((addr >> 12) & 0xffff0)), 16);
+ addr = *((unsigned *)phystokv(0x118));
+ memcpy ((char *)&drive_info + 16,
+ (void *)((addr & 0xffff) + ((addr >> 12) & 0xffff0)), 16);
+
+ /*
+ * Initialize Linux memory allocator.
+ */
+ linux_kmem_init();
+
+ /*
+ * Allocate contiguous memory below 16 MB.
+ */
+ memory_start = (unsigned long)alloc_contig_mem(CONTIG_ALLOC,
+ 16 * 1024 * 1024,
+ 0, &pages);
+ if (memory_start == 0)
+ panic("linux_init: alloc_contig_mem failed");
+ memory_end = memory_start + CONTIG_ALLOC;
+
+ /*
+ * Initialize PCI bus.
+ */
+ memory_start = pci_init(memory_start, memory_end);
+
+ if (memory_start > memory_end)
+ panic("linux_init: ran out memory");
+
+ /*
+ * Free unused memory.
+ */
+ while (pages && pages->phys_addr < round_page(memory_start))
+ pages = (vm_page_t)pages->pageq.next;
+ if (pages)
+ free_contig_mem(pages);
+
+ /*
+ * Initialize devices.
+ */
+ linux_net_emulation_init();
+ cli();
+ device_setup();
+
+ /*
+ * Disable interrupts.
+ */
+ (void) splhigh();
+
+ /*
+ * Restore clock interrupt handler.
+ */
+ ivect[0] = old_clock_handler;
+ intpri[0] = old_clock_pri;
+ form_pic_mask();
+
+ linux_auto_config = 0;
+}
+
+#ifndef NBPW
+#define NBPW 32
+#endif
+
+/*
+ * Allocate contiguous memory with the given constraints.
+ * This routine is horribly inefficient but it is presently
+ * only used during initialization so it's not that bad.
+ */
+void *
+alloc_contig_mem(unsigned size, unsigned limit,
+ unsigned mask, vm_page_t *pages)
+{
+ int i, j, bits_len;
+ unsigned *bits, len;
+ void *m;
+ vm_page_t p, page_list, tail, prev;
+ vm_offset_t addr, max_addr;
+
+ if (size == 0)
+ return (NULL);
+ size = round_page(size);
+ if ((size >> PAGE_SHIFT) > vm_page_free_count)
+ return (NULL);
+
+ /* Allocate bit array. */
+ max_addr = phys_last_addr;
+ if (max_addr > limit)
+ max_addr = limit;
+ bits_len = ((((max_addr >> PAGE_SHIFT) + NBPW - 1) / NBPW)
+ * sizeof(unsigned));
+ bits = (unsigned *)kalloc(bits_len);
+ if (!bits)
+ return (NULL);
+ memset (bits, 0, bits_len);
+
+ /*
+ * Walk the page free list and set a bit for every usable page.
+ */
+ simple_lock(&vm_page_queue_free_lock);
+ p = vm_page_queue_free;
+ while (p) {
+ if (p->phys_addr < limit)
+ (bits[(p->phys_addr >> PAGE_SHIFT) / NBPW]
+ |= 1 << ((p->phys_addr >> PAGE_SHIFT) % NBPW));
+ p = (vm_page_t)p->pageq.next;
+ }
+
+ /*
+ * Scan bit array for contiguous pages.
+ */
+ len = 0;
+ m = NULL;
+ for (i = 0; len < size && i < bits_len / sizeof (unsigned); i++)
+ for (j = 0; len < size && j < NBPW; j++)
+ if (!(bits[i] & (1 << j))) {
+ len = 0;
+ m = NULL;
+ } else {
+ if (len == 0) {
+ addr = ((vm_offset_t)(i * NBPW + j)
+ << PAGE_SHIFT);
+ if ((addr & mask) == 0) {
+ len += PAGE_SIZE;
+ m = (void *) addr;
+ }
+ } else
+ len += PAGE_SIZE;
+ }
+
+ if (len != size) {
+ simple_unlock(&vm_page_queue_free_lock);
+ kfree ((vm_offset_t)bits, bits_len);
+ return (NULL);
+ }
+
+ /*
+ * Remove pages from free list
+ * and construct list to return to caller.
+ */
+ page_list = NULL;
+ for (len = 0; len < size; len += PAGE_SIZE, addr += PAGE_SIZE) {
+ prev = NULL;
+ for (p = vm_page_queue_free; p; p = (vm_page_t)p->pageq.next) {
+ if (p->phys_addr == addr)
+ break;
+ prev = p;
+ }
+ if (!p)
+ panic("alloc_contig_mem: page not on free list");
+ if (prev)
+ prev->pageq.next = p->pageq.next;
+ else
+ vm_page_queue_free = (vm_page_t)p->pageq.next;
+ p->free = FALSE;
+ p->pageq.next = NULL;
+ if (!page_list)
+ page_list = tail = p;
+ else {
+ tail->pageq.next = (queue_entry_t)p;
+ tail = p;
+ }
+ vm_page_free_count--;
+ }
+
+ simple_unlock(&vm_page_queue_free_lock);
+ kfree((vm_offset_t)bits, bits_len);
+ if (pages)
+ *pages = page_list;
+ return (m);
+}
+
+/*
+ * Free memory allocated by alloc_contig_mem.
+ */
+void
+free_contig_mem(vm_page_t pages)
+{
+ int i;
+ vm_page_t p;
+
+ for (p = pages, i = 0; p->pageq.next; p = (vm_page_t)p->pageq.next, i++)
+ p->free = TRUE;
+ p->free = TRUE;
+ simple_lock(&vm_page_queue_free_lock);
+ vm_page_free_count += i + 1;
+ p->pageq.next = (queue_entry_t)vm_page_queue_free;
+ vm_page_queue_free = pages;
+ simple_unlock(&vm_page_queue_free_lock);
+}
+
+/*
+ * Calibrate delay loop.
+ * Lifted straight from Linux.
+ */
+static void
+calibrate_delay()
+{
+ int ticks;
+
+ printk("Calibrating delay loop.. ");
+ while (loops_per_sec <<= 1) {
+ /* Wait for "start of" clock tick. */
+ ticks = jiffies;
+ while (ticks == jiffies)
+ /* nothing */;
+ /* Go .. */
+ ticks = jiffies;
+ __delay(loops_per_sec);
+ ticks = jiffies - ticks;
+ if (ticks >= hz) {
+ loops_per_sec = muldiv(loops_per_sec,
+ hz, ticks);
+ printk("ok - %lu.%02lu BogoMips\n",
+ loops_per_sec / 500000,
+ (loops_per_sec / 5000) % 100);
+ return;
+ }
+ }
+ printk("failed\n");
+}
diff --git a/i386/i386at/gpl/linux/linux_irq.c b/i386/i386at/gpl/linux/linux_irq.c
new file mode 100644
index 00000000..d04e0531
--- /dev/null
+++ b/i386/i386at/gpl/linux/linux_irq.c
@@ -0,0 +1,246 @@
+/*
+ * Linux IRQ management.
+ * Copyright (C) 1995 Shantanu Goel.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * linux/arch/i386/kernel/irq.c
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ */
+
+#include <sys/types.h>
+
+#include <kern/assert.h>
+
+#include <i386/machspl.h>
+#include <i386/ipl.h>
+#include <i386/pic.h>
+
+#define MACH_INCLUDE
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/irq.h>
+
+/*
+ * Priority at which a Linux handler should be called.
+ * This is used at the time of an IRQ allocation. It is
+ * set by emulation routines for each class of device.
+ */
+spl_t linux_intr_pri;
+
+/*
+ * Flag indicating an interrupt is being handled.
+ */
+unsigned long intr_count = 0;
+
+/*
+ * List of Linux interrupt handlers.
+ */
+static void (*linux_handlers[16])(int, struct pt_regs *);
+
+extern spl_t curr_ipl;
+extern int curr_pic_mask;
+extern int pic_mask[];
+
+extern int intnull(), prtnull();
+
+/*
+ * Generic interrupt handler for Linux devices.
+ * Set up a fake `struct pt_regs' then call the real handler.
+ */
+static int
+linux_intr(irq)
+ int irq;
+{
+ struct pt_regs regs;
+
+ kstat.interrupts[irq]++;
+ intr_count++;
+ (*linux_handlers[irq])(irq, &regs);
+ intr_count--;
+}
+
+/*
+ * Mask an IRQ.
+ */
+void
+disable_irq(irq)
+ unsigned int irq;
+{
+ int i, flags;
+
+ assert (irq < NR_IRQS);
+
+ save_flags(flags);
+ cli();
+ for (i = 0; i < intpri[irq]; i++)
+ pic_mask[i] |= 1 << irq;
+ if (curr_pic_mask != pic_mask[curr_ipl]) {
+ curr_pic_mask = pic_mask[curr_ipl];
+ outb(PIC_MASTER_OCW, curr_pic_mask);
+ outb(PIC_SLAVE_OCW, curr_pic_mask >> 8);
+ }
+ restore_flags(flags);
+}
+
+/*
+ * Unmask an IRQ.
+ */
+void
+enable_irq(irq)
+ unsigned int irq;
+{
+ int mask, i, flags;
+
+ assert (irq < NR_IRQS);
+
+ mask = 1 << irq;
+ if (irq >= 8)
+ mask |= 1 << 2;
+ save_flags(flags);
+ cli();
+ for (i = 0; i < intpri[irq]; i++)
+ pic_mask[i] &= ~mask;
+ if (curr_pic_mask != pic_mask[curr_ipl]) {
+ curr_pic_mask = pic_mask[curr_ipl];
+ outb(PIC_MASTER_OCW, curr_pic_mask);
+ outb(PIC_SLAVE_OCW, curr_pic_mask >> 8);
+ }
+ restore_flags(flags);
+}
+
+/*
+ * Attach a handler to an IRQ.
+ */
+int
+request_irq(unsigned int irq, void (*handler)(int, struct pt_regs *),
+ unsigned long flags, const char *device)
+{
+ assert(irq < 16);
+
+ if (ivect[irq] == intnull || ivect[irq] == prtnull) {
+ if (!handler)
+ return (-LINUX_EINVAL);
+ linux_handlers[irq] = handler;
+ ivect[irq] = linux_intr;
+ iunit[irq] = irq;
+ intpri[irq] = linux_intr_pri;
+ enable_irq(irq);
+ return (0);
+ }
+ return (-LINUX_EBUSY);
+}
+
+/*
+ * Deallocate an irq.
+ */
+void
+free_irq(unsigned int irq)
+{
+ if (irq > 15)
+ panic("free_irq: bad irq number");
+
+ disable_irq(irq);
+ ivect[irq] = (irq == 7) ? prtnull : intnull;
+ iunit[irq] = irq;
+ intpri[irq] = SPL0;
+}
+
+/*
+ * IRQ probe interrupt handler.
+ */
+void
+probe_intr(irq)
+ int irq;
+{
+ disable_irq(irq);
+}
+
+/*
+ * Set for an irq probe.
+ */
+unsigned long
+probe_irq_on()
+{
+ unsigned i, irqs = 0;
+ unsigned long delay;
+
+ assert (curr_ipl == 0);
+
+ /*
+ * Allocate all available IRQs.
+ */
+ for (i = 15; i > 0; i--)
+ if (request_irq(i, probe_intr, 0, "probe") == 0)
+ irqs |= 1 << i;
+
+ /*
+ * Wait for spurious interrupts to mask themselves out.
+ */
+ for (delay = jiffies + 2; delay > jiffies; )
+ ;
+
+ /*
+ * Free IRQs that caused spurious interrupts.
+ */
+ for (i = 15; i > 0; i--) {
+ if (irqs & (1 << i) & pic_mask[0]) {
+ irqs ^= 1 << i;
+ free_irq(i);
+ }
+ }
+
+ return (irqs);
+}
+
+/*
+ * Return the result of an irq probe.
+ */
+int
+probe_irq_off(unsigned long irqs)
+{
+ unsigned i, irqs_save = irqs;
+
+ assert (curr_ipl == 0);
+
+ irqs &= pic_mask[0];
+
+ /*
+ * Deallocate IRQs.
+ */
+ for (i = 15; i > 0; i--)
+ if (irqs_save & (1 << i))
+ free_irq(i);
+
+ /*
+ * Return IRQ number.
+ */
+ if (!irqs)
+ return (0);
+ i = ffz(~irqs);
+ if (irqs != (irqs & (1 << i)))
+ i = -i;
+ return (i);
+}
diff --git a/i386/i386at/gpl/linux/linux_kmem.c b/i386/i386at/gpl/linux/linux_kmem.c
new file mode 100644
index 00000000..fe8de194
--- /dev/null
+++ b/i386/i386at/gpl/linux/linux_kmem.c
@@ -0,0 +1,481 @@
+/*
+ * Linux memory allocation.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ *
+ */
+
+#include <sys/types.h>
+
+#include <mach/mach_types.h>
+#include <mach/vm_param.h>
+
+#include <kern/assert.h>
+#include <kern/kalloc.h>
+
+#include <vm/vm_page.h>
+
+#include <i386at/gpl/linux/linux_emul.h>
+
+#define MACH_INCLUDE
+#include <linux/sched.h>
+#include <linux/malloc.h>
+#include <linux/delay.h>
+
+#include <asm/system.h>
+
+/* Amount of memory to reserve for Linux memory allocator.
+ We reserve 64K chunks to stay within DMA limits.
+ Increase MEM_CHUNKS if the kernel is running out of memory. */
+#define MEM_CHUNK_SIZE (64 * 1024)
+#define MEM_CHUNKS 3
+
+/* Mininum amount that linux_kmalloc will allocate. */
+#define MIN_ALLOC 12
+
+#ifndef NBPW
+#define NBPW 32
+#endif
+
+/* Memory block header. */
+struct blkhdr
+{
+ unsigned short free; /* 1 if block is free */
+ unsigned short size; /* size of block */
+};
+
+/* This structure heads a page allocated by linux_kmalloc. */
+struct pagehdr
+{
+ unsigned size; /* size (multiple of PAGE_SIZE) */
+ struct pagehdr *next; /* next header in list */
+};
+
+/* This structure describes a memory chunk. */
+struct chunkhdr
+{
+ unsigned long start; /* start address */
+ unsigned long end; /* end address */
+ unsigned long bitmap; /* busy/free bitmap of pages */
+};
+
+/* Chunks from which pages are allocated. */
+static struct chunkhdr pages_free[MEM_CHUNKS];
+
+/* Memory list maintained by linux_kmalloc. */
+static struct pagehdr *memlist;
+
+/* Some statistics. */
+int num_block_coalesce = 0;
+int num_page_collect = 0;
+int linux_mem_avail;
+
+/* Initialize the Linux memory allocator. */
+void
+linux_kmem_init ()
+{
+ int i, j;
+ vm_page_t p, pages;
+
+ for (i = 0; i < MEM_CHUNKS; i++)
+ {
+ /* Allocate memory. */
+ pages_free[i].start = (unsigned long) alloc_contig_mem (MEM_CHUNK_SIZE,
+ 16 * 1024 * 1024,
+ 0xffff, &pages);
+
+ assert (pages_free[i].start);
+ assert ((pages_free[i].start & 0xffff) == 0);
+
+ /* Sanity check: ensure pages are contiguous and within DMA limits. */
+ for (p = pages, j = 0; j < MEM_CHUNK_SIZE - PAGE_SIZE; j += PAGE_SIZE)
+ {
+ assert (p->phys_addr < 16 * 1024 * 1024);
+ assert (p->phys_addr + PAGE_SIZE
+ == ((vm_page_t) p->pageq.next)->phys_addr);
+
+ p = (vm_page_t) p->pageq.next;
+ }
+
+ pages_free[i].end = pages_free[i].start + MEM_CHUNK_SIZE;
+
+ /* Initialize free page bitmap. */
+ pages_free[i].bitmap = 0;
+ j = MEM_CHUNK_SIZE >> PAGE_SHIFT;
+ while (--j >= 0)
+ pages_free[i].bitmap |= 1 << j;
+ }
+
+ linux_mem_avail = (MEM_CHUNKS * MEM_CHUNK_SIZE) >> PAGE_SHIFT;
+}
+
+/* Return the number by which the page size should be
+ shifted such that the resulting value is >= SIZE. */
+static unsigned long
+get_page_order (int size)
+{
+ unsigned long order;
+
+ for (order = 0; (PAGE_SIZE << order) < size; order++)
+ ;
+ return order;
+}
+
+#ifdef LINUX_DEV_DEBUG
+static void
+check_page_list (int line)
+{
+ unsigned size;
+ struct pagehdr *ph;
+ struct blkhdr *bh;
+
+ for (ph = memlist; ph; ph = ph->next)
+ {
+ if ((int) ph & PAGE_MASK)
+ panic ("%s:%d: page header not aligned", __FILE__, line);
+
+ size = 0;
+ bh = (struct blkhdr *) (ph + 1);
+ while (bh < (struct blkhdr *) ((void *) ph + ph->size))
+ {
+ size += bh->size + sizeof (struct blkhdr);
+ bh = (void *) (bh + 1) + bh->size;
+ }
+
+ if (size + sizeof (struct pagehdr) != ph->size)
+ panic ("%s:%d: memory list destroyed", __FILE__, line);
+ }
+}
+#else
+#define check_page_list(line)
+#endif
+
+/* Merge adjacent free blocks in the memory list. */
+static void
+coalesce_blocks ()
+{
+ struct pagehdr *ph;
+ struct blkhdr *bh, *bhp, *ebh;
+
+ num_block_coalesce++;
+
+ for (ph = memlist; ph; ph = ph->next)
+ {
+ bh = (struct blkhdr *) (ph + 1);
+ ebh = (struct blkhdr *) ((void *) ph + ph->size);
+ while (1)
+ {
+ /* Skip busy blocks. */
+ while (bh < ebh && ! bh->free)
+ bh = (struct blkhdr *) ((void *) (bh + 1) + bh->size);
+ if (bh == ebh)
+ break;
+
+ /* Merge adjacent free blocks. */
+ while (1)
+ {
+ bhp = (struct blkhdr *) ((void *) (bh + 1) + bh->size);
+ if (bhp == ebh)
+ {
+ bh = bhp;
+ break;
+ }
+ if (! bhp->free)
+ {
+ bh = (struct blkhdr *) ((void *) (bhp + 1) + bhp->size);
+ break;
+ }
+ bh->size += bhp->size + sizeof (struct blkhdr);
+ }
+ }
+ }
+}
+
+/* Allocate SIZE bytes of memory.
+ The PRIORITY parameter specifies various flags
+ such as DMA, atomicity, etc. It is not used by Mach. */
+void *
+linux_kmalloc (unsigned int size, int priority)
+{
+ int order, coalesced = 0;
+ unsigned flags;
+ struct pagehdr *ph;
+ struct blkhdr *bh, *new_bh;
+
+ if (size < MIN_ALLOC)
+ size = MIN_ALLOC;
+ else
+ size = (size + sizeof (int) - 1) & ~(sizeof (int) - 1);
+
+ assert (size <= (MEM_CHUNK_SIZE
+ - sizeof (struct pagehdr)
+ - sizeof (struct blkhdr)));
+
+ save_flags (flags);
+ cli ();
+
+again:
+ check_page_list (__LINE__);
+
+ /* Walk the page list and find the first free block with size
+ greater than or equal to the one required. */
+ for (ph = memlist; ph; ph = ph->next)
+ {
+ bh = (struct blkhdr *) (ph + 1);
+ while (bh < (struct blkhdr *) ((void *) ph + ph->size))
+ {
+ if (bh->free && bh->size >= size)
+ {
+ bh->free = 0;
+ if (bh->size - size >= MIN_ALLOC + sizeof (struct blkhdr))
+ {
+ /* Split the current block and create a new free block. */
+ new_bh = (void *) (bh + 1) + size;
+ new_bh->free = 1;
+ new_bh->size = bh->size - size - sizeof (struct blkhdr);
+ bh->size = size;
+ }
+
+ check_page_list (__LINE__);
+
+ restore_flags (flags);
+ return bh + 1;
+ }
+ bh = (void *) (bh + 1) + bh->size;
+ }
+ }
+
+ check_page_list (__LINE__);
+
+ /* Allocation failed; coalesce free blocks and try again. */
+ if (! coalesced)
+ {
+ coalesce_blocks ();
+ coalesced = 1;
+ goto again;
+ }
+
+ /* Allocate more pages. */
+ order = get_page_order (size
+ + sizeof (struct pagehdr)
+ + sizeof (struct blkhdr));
+ ph = (struct pagehdr *) __get_free_pages (GFP_KERNEL, order, ~0UL);
+ if (! ph)
+ {
+ restore_flags (flags);
+ return NULL;
+ }
+
+ ph->size = PAGE_SIZE << order;
+ ph->next = memlist;
+ memlist = ph;
+ bh = (struct blkhdr *) (ph + 1);
+ bh->free = 0;
+ bh->size = ph->size - sizeof (struct pagehdr) - sizeof (struct blkhdr);
+ if (bh->size - size >= MIN_ALLOC + sizeof (struct blkhdr))
+ {
+ new_bh = (void *) (bh + 1) + size;
+ new_bh->free = 1;
+ new_bh->size = bh->size - size - sizeof (struct blkhdr);
+ bh->size = size;
+ }
+
+ check_page_list (__LINE__);
+
+ restore_flags (flags);
+ return bh + 1;
+}
+
+/* Free memory P previously allocated by linux_kmalloc. */
+void
+linux_kfree (void *p)
+{
+ unsigned flags;
+ struct blkhdr *bh, *bhp;
+ struct pagehdr *ph;
+
+ assert (((int) p & (sizeof (int) - 1)) == 0);
+
+ save_flags (flags);
+ cli ();
+
+ check_page_list (__LINE__);
+
+ for (ph = memlist; ph; ph = ph->next)
+ if (p >= (void *) ph && p < (void *) ph + ph->size)
+ break;
+
+ assert (ph);
+
+ bh = (struct blkhdr *) p - 1;
+
+ assert (! bh->free);
+ assert (bh->size >= MIN_ALLOC);
+ assert ((bh->size & (sizeof (int) - 1)) == 0);
+
+ bh->free = 1;
+
+ check_page_list (__LINE__);
+
+ restore_flags (flags);
+}
+
+/* Free any pages that are not in use.
+ Called by __get_free_pages when pages are running low. */
+static void
+collect_kmalloc_pages ()
+{
+ struct blkhdr *bh;
+ struct pagehdr *ph, **prev_ph;
+
+ check_page_list (__LINE__);
+
+ coalesce_blocks ();
+
+ check_page_list (__LINE__);
+
+ ph = memlist;
+ prev_ph = &memlist;
+ while (ph)
+ {
+ bh = (struct blkhdr *) (ph + 1);
+ if (bh->free && (void *) (bh + 1) + bh->size == (void *) ph + ph->size)
+ {
+ *prev_ph = ph->next;
+ free_pages ((unsigned long) ph, get_page_order (ph->size));
+ ph = *prev_ph;
+ }
+ else
+ {
+ prev_ph = &ph->next;
+ ph = ph->next;
+ }
+ }
+
+ check_page_list (__LINE__);
+}
+
+/* Allocate ORDER + 1 number of physically contiguous pages.
+ PRIORITY and MAX_ADDR are not used in Mach.
+
+ XXX: This needs to be dynamic. To do that we need to make
+ the Mach page manipulation routines interrupt safe and they
+ must provide machine dependant hooks. */
+unsigned long
+__get_free_pages (int priority, unsigned long order, unsigned long max_addr)
+{
+ int i, pages_collected = 0;
+ unsigned flags, bits, off, j, len;
+
+ assert ((PAGE_SIZE << order) <= MEM_CHUNK_SIZE);
+
+ /* Construct bitmap of contiguous pages. */
+ bits = 0;
+ j = 0;
+ len = 0;
+ while (len < (PAGE_SIZE << order))
+ {
+ bits |= 1 << j++;
+ len += PAGE_SIZE;
+ }
+
+again:
+ save_flags (flags);
+ cli ();
+
+ /* Search each chunk for the required number of contiguous pages. */
+ for (i = 0; i < MEM_CHUNKS; i++)
+ {
+ off = 0;
+ j = bits;
+ while (MEM_CHUNK_SIZE - off >= (PAGE_SIZE << order))
+ {
+ if ((pages_free[i].bitmap & j) == j)
+ {
+ pages_free[i].bitmap &= ~j;
+ linux_mem_avail -= order + 1;
+ restore_flags (flags);
+ return pages_free[i].start + off;
+ }
+ j <<= 1;
+ off += PAGE_SIZE;
+ }
+ }
+
+ /* Allocation failed; collect kmalloc and buffer pages
+ and try again. */
+ if (! pages_collected)
+ {
+ num_page_collect++;
+ collect_kmalloc_pages ();
+ collect_buffer_pages ();
+ pages_collected = 1;
+ goto again;
+ }
+
+ printf ("%s:%d: __get_free_pages: ran out of pages\n", __FILE__, __LINE__);
+
+ restore_flags (flags);
+ return 0;
+}
+
+/* Free ORDER + 1 number of physically
+ contiguous pages starting at address ADDR. */
+void
+free_pages (unsigned long addr, unsigned long order)
+{
+ int i;
+ unsigned flags, bits, len, j;
+
+ assert ((addr & PAGE_MASK) == 0);
+
+ for (i = 0; i < MEM_CHUNKS; i++)
+ if (addr >= pages_free[i].start && addr < pages_free[i].end)
+ break;
+
+ assert (i < MEM_CHUNKS);
+
+ /* Contruct bitmap of contiguous pages. */
+ len = 0;
+ j = 0;
+ bits = 0;
+ while (len < (PAGE_SIZE << order))
+ {
+ bits |= 1 << j++;
+ len += PAGE_SIZE;
+ }
+ bits <<= (addr - pages_free[i].start) >> PAGE_SHIFT;
+
+ save_flags (flags);
+ cli ();
+
+ assert ((pages_free[i].bitmap & bits) == 0);
+
+ pages_free[i].bitmap |= bits;
+ linux_mem_avail += order + 1;
+ restore_flags (flags);
+}
+
+/* Allocate SIZE bytes of memory. The pages need not be contiguous. */
+void *
+vmalloc (unsigned long size)
+{
+ return (void *) __get_free_pages (GFP_KERNEL, get_page_order (size), ~0UL);
+}
diff --git a/i386/i386at/gpl/linux/linux_misc.c b/i386/i386at/gpl/linux/linux_misc.c
new file mode 100644
index 00000000..6e7b33b9
--- /dev/null
+++ b/i386/i386at/gpl/linux/linux_misc.c
@@ -0,0 +1,303 @@
+/*
+ * Miscellaneous routines and data for Linux emulation.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * linux/fs/proc/scsi.c
+ * (c) 1995 Michael Neuffer neuffer@goofy.zdv.uni-mainz.de
+ *
+ * The original version was derived from linux/fs/proc/net.c,
+ * which is Copyright (C) 1991, 1992 Linus Torvalds.
+ * Much has been rewritten, but some of the code still remains.
+ *
+ * /proc/scsi directory handling functions
+ *
+ * last change: 95/07/04
+ *
+ * Initial version: March '95
+ * 95/05/15 Added subdirectories for each driver and show every
+ * registered HBA as a single file.
+ * 95/05/30 Added rudimentary write support for parameter passing
+ * 95/07/04 Fixed bugs in directory handling
+ * 95/09/13 Update to support the new proc-dir tree
+ *
+ * TODO: Improve support to write to the driver files
+ * Add some more comments
+ */
+
+/*
+ * linux/fs/buffer.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <sys/types.h>
+#include <mach/vm_param.h>
+#include <kern/thread.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+#include <device/device_types.h>
+#include <i386at/gpl/linux/linux_emul.h>
+
+#define MACH_INCLUDE
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/blk.h>
+#include <linux/proc_fs.h>
+#include <linux/kernel_stat.h>
+
+int (*dispatch_scsi_info_ptr) (int ino, char *buffer, char **start,
+ off_t offset, int length, int inout) = 0;
+
+struct kernel_stat kstat;
+
+int
+linux_to_mach_error (int err)
+{
+ switch (err)
+ {
+ case 0:
+ return D_SUCCESS;
+
+ case -LINUX_EPERM:
+ return D_INVALID_OPERATION;
+
+ case -LINUX_EIO:
+ return D_IO_ERROR;
+
+ case -LINUX_ENXIO:
+ return D_NO_SUCH_DEVICE;
+
+ case -LINUX_EACCES:
+ return D_INVALID_OPERATION;
+
+ case -LINUX_EFAULT:
+ return D_INVALID_SIZE;
+
+ case -LINUX_EBUSY:
+ return D_ALREADY_OPEN;
+
+ case -LINUX_EINVAL:
+ return D_INVALID_SIZE;
+
+ case -LINUX_EROFS:
+ return D_READ_ONLY;
+
+ case -LINUX_EWOULDBLOCK:
+ return D_WOULD_BLOCK;
+
+ default:
+ printf ("linux_to_mach_error: unknown code %d\n", err);
+ return D_IO_ERROR;
+ }
+}
+
+int
+issig ()
+{
+ return current_thread ()->wait_result != THREAD_AWAKENED;
+}
+
+int
+block_fsync (struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+int
+verify_area (int rw, const void *p, unsigned long size)
+{
+ vm_prot_t prot = (rw == VERIFY_WRITE) ? VM_PROT_WRITE : VM_PROT_READ;
+ vm_offset_t addr = trunc_page ((vm_offset_t) p);
+ vm_size_t len = round_page ((vm_size_t) size);
+ vm_map_entry_t entry;
+
+ vm_map_lock_read (current_map ());
+
+ while (1)
+ {
+ if (! vm_map_lookup_entry (current_map (), addr, &entry)
+ || (entry->protection & prot) != prot)
+ {
+ vm_map_unlock_read (current_map ());
+ return -LINUX_EFAULT;
+ }
+ if (entry->vme_end - entry->vme_start >= len)
+ break;
+ len -= entry->vme_end - entry->vme_start;
+ addr += entry->vme_end - entry->vme_start;
+ }
+
+ vm_map_unlock_read (current_map ());
+ return 0;
+}
+
+/*
+ * Print device name (in decimal, hexadecimal or symbolic) -
+ * at present hexadecimal only.
+ * Note: returns pointer to static data!
+ */
+char *
+kdevname(kdev_t dev)
+{
+ static char buffer[32];
+ sprintf(buffer, "%02x:%02x", MAJOR(dev), MINOR(dev));
+ return buffer;
+}
+
+/* RO fail safe mechanism */
+
+static long ro_bits[MAX_BLKDEV][8];
+
+int
+is_read_only(kdev_t dev)
+{
+ int minor,major;
+
+ major = MAJOR(dev);
+ minor = MINOR(dev);
+ if (major < 0 || major >= MAX_BLKDEV) return 0;
+ return ro_bits[major][minor >> 5] & (1 << (minor & 31));
+}
+
+void
+set_device_ro(kdev_t dev,int flag)
+{
+ int minor,major;
+
+ major = MAJOR(dev);
+ minor = MINOR(dev);
+ if (major < 0 || major >= MAX_BLKDEV) return;
+ if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
+ else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
+}
+
+/*
+ * linux/lib/string.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * stupid library routines.. The optimized versions should generally be found
+ * as inline code in <asm-xx/string.h>
+ *
+ * These are buggy as well..
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+
+char * ___strtok = NULL;
+
+#ifndef __HAVE_ARCH_STRSPN
+size_t strspn(const char *s, const char *accept)
+{
+ const char *p;
+ const char *a;
+ size_t count = 0;
+
+ for (p = s; *p != '\0'; ++p) {
+ for (a = accept; *a != '\0'; ++a) {
+ if (*p == *a)
+ break;
+ }
+ if (*a == '\0')
+ return count;
+ ++count;
+ }
+
+ return count;
+}
+#endif
+
+#ifndef __HAVE_ARCH_STRPBRK
+char * strpbrk(const char * cs,const char * ct)
+{
+ const char *sc1,*sc2;
+
+ for( sc1 = cs; *sc1 != '\0'; ++sc1) {
+ for( sc2 = ct; *sc2 != '\0'; ++sc2) {
+ if (*sc1 == *sc2)
+ return (char *) sc1;
+ }
+ }
+ return NULL;
+}
+#endif
+
+#ifndef __HAVE_ARCH_STRTOK
+char * strtok(char * s,const char * ct)
+{
+ char *sbegin, *send;
+
+ sbegin = s ? s : ___strtok;
+ if (!sbegin) {
+ return NULL;
+ }
+ sbegin += strspn(sbegin,ct);
+ if (*sbegin == '\0') {
+ ___strtok = NULL;
+ return( NULL );
+ }
+ send = strpbrk( sbegin, ct);
+ if (send && *send != '\0')
+ *send++ = '\0';
+ ___strtok = send;
+ return (sbegin);
+}
+#endif
+
+struct proc_dir_entry proc_scsi;
+struct inode_operations proc_scsi_inode_operations;
+struct proc_dir_entry proc_net;
+struct inode_operations proc_net_inode_operations;
+
+int
+proc_register (struct proc_dir_entry *xxx1, struct proc_dir_entry *xxx2)
+{
+ return 0;
+}
+
+int
+proc_unregister (struct proc_dir_entry *xxx1, int xxx2)
+{
+ return 0;
+}
+
+void
+add_blkdev_randomness (int major)
+{
+}
+
+void
+do_gettimeofday (struct timeval *tv)
+{
+ host_get_time (1, tv);
+}
+
+int
+dev_get_info (char *buffer, char **start, off_t offset, int length, int dummy)
+{
+ return 0;
+}
diff --git a/i386/i386at/gpl/linux/linux_net.c b/i386/i386at/gpl/linux/linux_net.c
new file mode 100644
index 00000000..6a83a98f
--- /dev/null
+++ b/i386/i386at/gpl/linux/linux_net.c
@@ -0,0 +1,520 @@
+/*
+ * Linux network driver support.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Ethernet-type device handling.
+ *
+ * Version: @(#)eth.c 1.0.7 05/25/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * Fixes:
+ * Mr Linux : Arp problems
+ * Alan Cox : Generic queue tidyup (very tiny here)
+ * Alan Cox : eth_header ntohs should be htons
+ * Alan Cox : eth_rebuild_header missing an htons and
+ * minor other things.
+ * Tegge : Arp bug fixes.
+ * Florian : Removed many unnecessary functions, code cleanup
+ * and changes for new arp and skbuff.
+ * Alan Cox : Redid header building to reflect new format.
+ * Alan Cox : ARP only when compiled with CONFIG_INET
+ * Greg Page : 802.2 and SNAP stuff.
+ * Alan Cox : MAC layer pointers/new format.
+ * Paul Gortmaker : eth_copy_and_sum shouldn't csum padding.
+ * Alan Cox : Protect against forwarding explosions with
+ * older network drivers and IFF_ALLMULTI
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <sys/types.h>
+
+#include <mach/mach_types.h>
+#include <mach/kern_return.h>
+#include <mach/mig_errors.h>
+#include <mach/port.h>
+#include <mach/vm_param.h>
+#include <mach/notify.h>
+
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+
+#include <device/device_types.h>
+#include <device/device_port.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+#include <device/if_hdr.h>
+#include <device/net_io.h>
+#include "device_reply.h"
+
+#include <i386at/dev_hdr.h>
+#include <i386at/device_emul.h>
+
+#include <i386at/gpl/linux/linux_emul.h>
+
+#define MACH_INCLUDE
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/malloc.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+/* One of these is associated with each instance of a device. */
+struct net_data
+{
+ ipc_port_t port; /* device port */
+ struct ifnet ifnet; /* Mach ifnet structure (needed for filters) */
+ struct device device; /* generic device structure */
+ struct linux_device *dev; /* Linux network device structure */
+};
+
+/* List of sk_buffs waiting to be freed. */
+static struct sk_buff_head skb_done_list;
+
+/* Forward declarations. */
+
+extern struct device_emulation_ops linux_net_emulation_ops;
+
+static int print_packet_size = 0;
+
+/* Linux kernel network support routines. */
+
+/* Requeue packet SKB for transmission after the interface DEV
+ has timed out. The priority of the packet is PRI.
+ In Mach, we simply drop the packet like the native drivers. */
+void
+dev_queue_xmit (struct sk_buff *skb, struct linux_device *dev, int pri)
+{
+ dev_kfree_skb (skb, FREE_WRITE);
+}
+
+/* Close the device DEV. */
+int
+dev_close (struct linux_device *dev)
+{
+ return 0;
+}
+
+/* Network software interrupt handler. */
+void
+net_bh (void *xxx)
+{
+ int len;
+ struct sk_buff *skb;
+ struct linux_device *dev;
+
+ /* Start transmission on interfaces. */
+ for (dev = dev_base; dev; dev = dev->next)
+ {
+ if (dev->base_addr && dev->base_addr != 0xffe0)
+ while (1)
+ {
+ skb = skb_dequeue (&dev->buffs[0]);
+ if (skb)
+ {
+ len = skb->len;
+ if ((*dev->hard_start_xmit) (skb, dev))
+ {
+ skb_queue_head (&dev->buffs[0], skb);
+ mark_bh (NET_BH);
+ break;
+ }
+ else if (print_packet_size)
+ printf ("net_bh: length %d\n", len);
+ }
+ else
+ break;
+ }
+ }
+}
+
+/* Free all sk_buffs on the done list.
+ This routine is called by the iodone thread in ds_routines.c. */
+void
+free_skbuffs ()
+{
+ struct sk_buff *skb;
+
+ while (1)
+ {
+ skb = skb_dequeue (&skb_done_list);
+ if (skb)
+ {
+ if (skb->copy)
+ {
+ vm_map_copy_discard (skb->copy);
+ skb->copy = NULL;
+ }
+ if (IP_VALID (skb->reply))
+ {
+ ds_device_write_reply (skb->reply, skb->reply_type, 0, skb->len);
+ skb->reply = IP_NULL;
+ }
+ dev_kfree_skb (skb, FREE_WRITE);
+ }
+ else
+ break;
+ }
+}
+
+/* Allocate an sk_buff with SIZE bytes of data space. */
+struct sk_buff *
+alloc_skb (unsigned int size, int priority)
+{
+ return dev_alloc_skb (size);
+}
+
+/* Free SKB. */
+void
+kfree_skb (struct sk_buff *skb, int priority)
+{
+ dev_kfree_skb (skb, priority);
+}
+
+/* Allocate an sk_buff with SIZE bytes of data space. */
+struct sk_buff *
+dev_alloc_skb (unsigned int size)
+{
+ struct sk_buff *skb;
+
+ skb = linux_kmalloc (sizeof (struct sk_buff) + size, GFP_KERNEL);
+ if (skb)
+ {
+ skb->dev = NULL;
+ skb->reply = IP_NULL;
+ skb->copy = NULL;
+ skb->len = size;
+ skb->prev = skb->next = NULL;
+ skb->list = NULL;
+ if (size)
+ {
+ skb->data = (unsigned char *) (skb + 1);
+ skb->tail = skb->data + size;
+ }
+ else
+ skb->data = skb->tail = NULL;
+ skb->head = skb->data;
+ }
+ return skb;
+}
+
+/* Free the sk_buff SKB. */
+void
+dev_kfree_skb (struct sk_buff *skb, int mode)
+{
+ unsigned flags;
+ extern void *io_done_list;
+
+ /* Queue sk_buff on done list if there is a
+ page list attached or we need to send a reply.
+ Wakeup the iodone thread to process the list. */
+ if (skb->copy || IP_VALID (skb->reply))
+ {
+ skb_queue_tail (&skb_done_list, skb);
+ save_flags (flags);
+ thread_wakeup ((event_t) &io_done_list);
+ restore_flags (flags);
+ return;
+ }
+ linux_kfree (skb);
+}
+
+/* Accept packet SKB received on an interface. */
+void
+netif_rx (struct sk_buff *skb)
+{
+ ipc_kmsg_t kmsg;
+ struct ether_header *eh;
+ struct packet_header *ph;
+ struct linux_device *dev = skb->dev;
+
+ assert (skb != NULL);
+
+ if (print_packet_size)
+ printf ("netif_rx: length %d\n", skb->len);
+
+ /* Allocate a kernel message buffer. */
+ kmsg = net_kmsg_get ();
+ if (! kmsg)
+ {
+ dev_kfree_skb (skb, FREE_READ);
+ return;
+ }
+
+ /* Copy packet into message buffer. */
+ eh = (struct ether_header *) (net_kmsg (kmsg)->header);
+ ph = (struct packet_header *) (net_kmsg (kmsg)->packet);
+ memcpy (eh, skb->data, sizeof (struct ether_header));
+ memcpy (ph + 1, skb->data + sizeof (struct ether_header),
+ skb->len - sizeof (struct ether_header));
+ ph->type = eh->ether_type;
+ ph->length = (skb->len - sizeof (struct ether_header)
+ + sizeof (struct packet_header));
+
+ dev_kfree_skb (skb, FREE_READ);
+
+ /* Pass packet up to the microkernel. */
+ net_packet (&dev->net_data->ifnet, kmsg,
+ ph->length, ethernet_priority (kmsg));
+}
+
+/* Mach device interface routines. */
+
+/* Return a send right associated with network device ND. */
+static ipc_port_t
+dev_to_port (void *nd)
+{
+ return (nd
+ ? ipc_port_make_send (((struct net_data *) nd)->port)
+ : IP_NULL);
+}
+
+static io_return_t
+device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode, char *name, device_t *devp)
+{
+ io_return_t err = D_SUCCESS;
+ ipc_port_t notify;
+ struct ifnet *ifp;
+ struct linux_device *dev;
+ struct net_data *nd;
+
+ /* Search for the device. */
+ for (dev = dev_base; dev; dev = dev->next)
+ if (dev->base_addr
+ && dev->base_addr != 0xffe0
+ && ! strcmp (name, dev->name))
+ break;
+ if (! dev)
+ return D_NO_SUCH_DEVICE;
+
+ /* Allocate and initialize device data if this is the first open. */
+ nd = dev->net_data;
+ if (! nd)
+ {
+ dev->net_data = nd = ((struct net_data *)
+ kalloc (sizeof (struct net_data)));
+ if (! nd)
+ {
+ err = D_NO_MEMORY;
+ goto out;
+ }
+ nd->dev = dev;
+ nd->device.emul_data = nd;
+ nd->device.emul_ops = &linux_net_emulation_ops;
+ nd->port = ipc_port_alloc_kernel ();
+ if (nd->port == IP_NULL)
+ {
+ err = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ ipc_kobject_set (nd->port, (ipc_kobject_t) &nd->device, IKOT_DEVICE);
+ notify = ipc_port_make_sonce (nd->port);
+ ip_lock (nd->port);
+ ipc_port_nsrequest (nd->port, 1, notify, &notify);
+ assert (notify == IP_NULL);
+
+ ifp = &nd->ifnet;
+ ifp->if_unit = dev->name[strlen (dev->name) - 1] - '0';
+ ifp->if_flags = IFF_UP|IFF_RUNNING;
+ ifp->if_mtu = dev->mtu;
+ ifp->if_header_size = dev->hard_header_len;
+ ifp->if_header_format = dev->type;
+ ifp->if_address_size = dev->addr_len;
+ ifp->if_address = dev->dev_addr;
+ if_init_queues (ifp);
+
+ if (dev->open)
+ {
+ linux_intr_pri = SPL6;
+ if ((*dev->open) (dev))
+ err = D_NO_SUCH_DEVICE;
+ }
+
+ out:
+ if (err)
+ {
+ if (nd)
+ {
+ if (nd->port != IP_NULL)
+ {
+ ipc_kobject_set (nd->port, IKO_NULL, IKOT_NONE);
+ ipc_port_dealloc_kernel (nd->port);
+ }
+ kfree ((vm_offset_t) nd, sizeof (struct net_data));
+ nd = NULL;
+ dev->net_data = NULL;
+ }
+ }
+ else
+ {
+ dev->flags |= LINUX_IFF_UP|LINUX_IFF_RUNNING;
+ skb_queue_head_init (&dev->buffs[0]);
+ }
+ if (IP_VALID (reply_port))
+ ds_device_open_reply (reply_port, reply_port_type,
+ err, dev_to_port (nd));
+ return MIG_NO_REPLY;
+ }
+
+ *devp = &nd->device;
+ return D_SUCCESS;
+}
+
+static io_return_t
+device_write (void *d, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t bn, io_buf_ptr_t data, unsigned int count,
+ int *bytes_written)
+{
+ unsigned char *p;
+ int i, amt, skblen, s;
+ io_return_t err = 0;
+ vm_map_copy_t copy = (vm_map_copy_t) data;
+ struct net_data *nd = d;
+ struct linux_device *dev = nd->dev;
+ struct sk_buff *skb;
+
+ if (count == 0 || count > dev->mtu + dev->hard_header_len)
+ return D_INVALID_SIZE;
+
+ /* Allocate a sk_buff. */
+ amt = PAGE_SIZE - (copy->offset & PAGE_MASK);
+ skblen = (amt >= count) ? 0 : count;
+ skb = dev_alloc_skb (skblen);
+ if (! skb)
+ return D_NO_MEMORY;
+
+ /* Copy user data. This is only required if it spans multiple pages. */
+ if (skblen == 0)
+ {
+ assert (copy->cpy_npages == 1);
+
+ skb->copy = copy;
+ skb->data = ((void *) copy->cpy_page_list[0]->phys_addr
+ + (copy->offset & PAGE_MASK));
+ skb->len = count;
+ skb->head = skb->data;
+ skb->tail = skb->data + skb->len;
+ }
+ else
+ {
+ memcpy (skb->data,
+ ((void *) copy->cpy_page_list[0]->phys_addr
+ + (copy->offset & PAGE_MASK)),
+ amt);
+ count -= amt;
+ p = skb->data + amt;
+ for (i = 1; count > 0 && i < copy->cpy_npages; i++)
+ {
+ amt = PAGE_SIZE;
+ if (amt > count)
+ amt = count;
+ memcpy (p, (void *) copy->cpy_page_list[i]->phys_addr, amt);
+ count -= amt;
+ p += amt;
+ }
+
+ assert (count == 0);
+
+ vm_map_copy_discard (copy);
+ }
+
+ skb->dev = dev;
+ skb->reply = reply_port;
+ skb->reply_type = reply_port_type;
+
+ /* Queue packet for transmission and schedule a software interrupt. */
+ s = splimp ();
+ if (dev->buffs[0].next != (struct sk_buff *) &dev->buffs[0]
+ || (*dev->hard_start_xmit) (skb, dev))
+ {
+ __skb_queue_tail (&dev->buffs[0], skb);
+ mark_bh (NET_BH);
+ }
+ splx (s);
+
+ return MIG_NO_REPLY;
+}
+
+static io_return_t
+device_get_status (void *d, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t *count)
+{
+ return net_getstat (&((struct net_data *) d)->ifnet, flavor, status, count);
+}
+
+static io_return_t
+device_set_filter (void *d, ipc_port_t port, int priority,
+ filter_t *filter, unsigned filter_count)
+{
+ return net_set_filter (&((struct net_data *) d)->ifnet,
+ port, priority, filter, filter_count);
+}
+
+struct device_emulation_ops linux_net_emulation_ops =
+{
+ NULL,
+ NULL,
+ dev_to_port,
+ device_open,
+ NULL,
+ device_write,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ device_get_status,
+ device_set_filter,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+/* Do any initialization required for network devices. */
+void
+linux_net_emulation_init ()
+{
+ skb_queue_head_init (&skb_done_list);
+}
diff --git a/i386/i386at/gpl/linux/linux_port.c b/i386/i386at/gpl/linux/linux_port.c
new file mode 100644
index 00000000..4a79c499
--- /dev/null
+++ b/i386/i386at/gpl/linux/linux_port.c
@@ -0,0 +1,79 @@
+/*
+ * Linux I/O port management.
+ * Copyright (C) 1995 Shantanu Goel.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/ioport.h>
+
+#define NPORTS 65536
+#define BITS_PER_WORD 32
+#define NWORDS (NPORTS / BITS_PER_WORD)
+
+/*
+ * This bitmap keeps track of all allocated ports.
+ * A bit is set if the port has been allocated.
+ */
+static unsigned port_bitmap[NWORDS];
+
+void snarf_region(unsigned, unsigned);
+
+/*
+ * Check if a region is available for use.
+ */
+int
+check_region(unsigned port, unsigned size)
+{
+ unsigned i;
+
+ for (i = port; i < port + size; i++)
+ if (port_bitmap[i/BITS_PER_WORD] & (1 << (i%BITS_PER_WORD)))
+ return (1);
+ return (0);
+}
+
+/*
+ * Allocate a region.
+ */
+void
+request_region(unsigned port, unsigned size, const char *name)
+{
+ unsigned i;
+
+ for (i = port; i < port + size; i++)
+ port_bitmap[i / BITS_PER_WORD] |= 1 << (i % BITS_PER_WORD);
+}
+
+/*
+ * For compatibility with older kernels.
+ */
+void
+snarf_region(unsigned port, unsigned size)
+{
+ request_region(port, size, 0);
+}
+
+/*
+ * Deallocate a region.
+ */
+void
+release_region(unsigned port, unsigned size)
+{
+ unsigned i;
+
+ for (i = port; i < port + size; i++)
+ port_bitmap[i / BITS_PER_WORD] &= ~(1 << (i % BITS_PER_WORD));
+}
diff --git a/i386/i386at/gpl/linux/linux_printk.c b/i386/i386at/gpl/linux/linux_printk.c
new file mode 100644
index 00000000..c4e489d2
--- /dev/null
+++ b/i386/i386at/gpl/linux/linux_printk.c
@@ -0,0 +1,47 @@
+/*
+ * Linux kernel print routine.
+ * Copyright (C) 1995 Shantanu Goel.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * linux/kernel/printk.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <stdarg.h>
+#include <asm/system.h>
+
+static char buf[2048];
+
+void
+printk(char *fmt, ...)
+{
+ va_list args;
+ int i, n, flags;
+ extern void cnputc();
+ extern int linux_vsprintf(char *buf, char *fmt, ...);
+
+ save_flags(flags);
+ cli();
+ va_start(args, fmt);
+ n = linux_vsprintf(buf, fmt, args);
+ va_end(args);
+ for (i = 0; i < n; i++)
+ cnputc(buf[i]);
+ restore_flags(flags);
+}
diff --git a/i386/i386at/gpl/linux/linux_sched.c b/i386/i386at/gpl/linux/linux_sched.c
new file mode 100644
index 00000000..fdb0f693
--- /dev/null
+++ b/i386/i386at/gpl/linux/linux_sched.c
@@ -0,0 +1,237 @@
+/*
+ * Linux scheduling support.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * linux/kernel/sched.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <sys/types.h>
+
+#include <mach/boolean.h>
+
+#include <kern/thread.h>
+#include <kern/sched_prim.h>
+
+#include <i386at/gpl/linux/linux_emul.h>
+
+#define MACH_INCLUDE
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+
+#include <asm/system.h>
+
+struct tq_struct tq_last =
+{
+ &tq_last, 0, 0, 0
+};
+
+DECLARE_TASK_QUEUE(tq_timer);
+
+static struct wait_queue **auto_config_queue;
+
+void
+tqueue_bh (void *unused)
+{
+ run_task_queue(&tq_timer);
+}
+
+void
+add_wait_queue (struct wait_queue **q, struct wait_queue *wait)
+{
+ unsigned long flags;
+
+ if (! linux_auto_config)
+ {
+ save_flags (flags);
+ cli ();
+ assert_wait ((event_t) q, FALSE);
+ restore_flags (flags);
+ return;
+ }
+
+ if (auto_config_queue)
+ printf ("add_wait_queue: queue not empty\n");
+ auto_config_queue = q;
+}
+
+void
+remove_wait_queue (struct wait_queue **q, struct wait_queue *wait)
+{
+ unsigned long flags;
+
+ if (! linux_auto_config)
+ {
+ save_flags (flags);
+ thread_wakeup ((event_t) q);
+ restore_flags (flags);
+ return;
+ }
+
+ auto_config_queue = NULL;
+}
+
+void
+__down (struct semaphore *sem)
+{
+ int s;
+ unsigned long flags;
+
+ if (! linux_auto_config)
+ {
+ save_flags (flags);
+ s = splhigh ();
+ while (sem->count <= 0)
+ {
+ assert_wait ((event_t) &sem->wait, FALSE);
+ splx (s);
+ thread_block (0);
+ s = splhigh ();
+ }
+ splx (s);
+ restore_flags (flags);
+ return;
+ }
+
+ while (sem->count <= 0)
+ barrier ();
+}
+
+void
+__sleep_on (struct wait_queue **q, int interruptible)
+{
+ unsigned long flags;
+
+ if (! q)
+ return;
+ save_flags (flags);
+ if (! linux_auto_config)
+ {
+ assert_wait ((event_t) q, interruptible);
+ sti ();
+ thread_block (0);
+ restore_flags (flags);
+ return;
+ }
+
+ add_wait_queue (q, NULL);
+ sti ();
+ while (auto_config_queue)
+ barrier ();
+ restore_flags (flags);
+}
+
+void
+sleep_on (struct wait_queue **q)
+{
+ __sleep_on (q, FALSE);
+}
+
+void
+interruptible_sleep_on (struct wait_queue **q)
+{
+ __sleep_on (q, TRUE);
+}
+
+void
+wake_up (struct wait_queue **q)
+{
+ unsigned long flags;
+
+ if (! linux_auto_config)
+ {
+ if (q != &wait_for_request)
+ {
+ save_flags (flags);
+ thread_wakeup ((event_t) q);
+ restore_flags (flags);
+ }
+ return;
+ }
+
+ if (auto_config_queue == q)
+ auto_config_queue = NULL;
+}
+
+void
+__wait_on_buffer (struct buffer_head *bh)
+{
+ unsigned long flags;
+
+ save_flags (flags);
+ if (! linux_auto_config)
+ {
+ while (1)
+ {
+ cli ();
+ if (! buffer_locked (bh))
+ break;
+ bh->b_wait = (struct wait_queue *) 1;
+ assert_wait ((event_t) bh, FALSE);
+ sti ();
+ thread_block (0);
+ }
+ restore_flags (flags);
+ return;
+ }
+
+ sti ();
+ while (buffer_locked (bh))
+ barrier ();
+ restore_flags (flags);
+}
+
+void
+unlock_buffer (struct buffer_head *bh)
+{
+ unsigned long flags;
+
+ save_flags (flags);
+ cli ();
+ clear_bit (BH_Lock, &bh->b_state);
+ if (bh->b_wait && ! linux_auto_config)
+ {
+ bh->b_wait = NULL;
+ thread_wakeup ((event_t) bh);
+ }
+ restore_flags (flags);
+}
+
+void
+schedule ()
+{
+ if (! linux_auto_config)
+ thread_block (0);
+}
+
+void
+cdrom_sleep (int t)
+{
+ int xxx;
+
+ assert_wait ((event_t) &xxx, TRUE);
+ thread_set_timeout (t);
+ thread_block (0);
+}
diff --git a/i386/i386at/gpl/linux/linux_soft.c b/i386/i386at/gpl/linux/linux_soft.c
new file mode 100644
index 00000000..efcae987
--- /dev/null
+++ b/i386/i386at/gpl/linux/linux_soft.c
@@ -0,0 +1,74 @@
+/*
+ * Linux software interrupts.
+ * Copyright (C) 1995 Shantanu Goel.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * linux/kernel/softirq.c
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ */
+
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+
+/*
+ * Mask of pending interrupts.
+ */
+unsigned long bh_active = 0;
+
+/*
+ * Mask of enabled interrupts.
+ */
+unsigned long bh_mask = 0;
+
+/*
+ * List of software interrupt handlers.
+ */
+struct bh_struct bh_base[32];
+
+
+/*
+ * Software interrupt handler.
+ */
+void
+linux_soft_intr()
+{
+ unsigned long active;
+ unsigned long mask, left;
+ struct bh_struct *bh;
+
+ bh = bh_base;
+ active = bh_active & bh_mask;
+ for (mask = 1, left = ~0;
+ left & active; bh++, mask += mask, left += left) {
+ if (mask & active) {
+ void (*fn)(void *);
+
+ bh_active &= ~mask;
+ fn = bh->routine;
+ if (fn == 0)
+ goto bad_bh;
+ (*fn)(bh->data);
+ }
+ }
+ return;
+ bad_bh:
+ printf("linux_soft_intr: bad interrupt handler entry 0x%08lx\n", mask);
+}
diff --git a/i386/i386at/gpl/linux/linux_timer.c b/i386/i386at/gpl/linux/linux_timer.c
new file mode 100644
index 00000000..c1575323
--- /dev/null
+++ b/i386/i386at/gpl/linux/linux_timer.c
@@ -0,0 +1,190 @@
+/*
+ * Linux timers.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * linux/kernel/sched.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <asm/system.h>
+
+unsigned long volatile jiffies = 0;
+
+/*
+ * Mask of active timers.
+ */
+unsigned long timer_active = 0;
+
+/*
+ * List of timeout routines.
+ */
+struct timer_struct timer_table[32];
+
+/*
+ * The head for the timer-list has a "expires" field of MAX_UINT,
+ * and the sorting routine counts on this..
+ */
+static struct timer_list timer_head =
+{
+ &timer_head, &timer_head, ~0, 0, NULL
+};
+
+#define SLOW_BUT_DEBUGGING_TIMERS 0
+
+void
+add_timer(struct timer_list *timer)
+{
+ unsigned long flags;
+ struct timer_list *p;
+
+#if SLOW_BUT_DEBUGGING_TIMERS
+ if (timer->next || timer->prev) {
+ printk("add_timer() called with non-zero list from %p\n",
+ __builtin_return_address(0));
+ return;
+ }
+#endif
+ p = &timer_head;
+ save_flags(flags);
+ cli();
+ do {
+ p = p->next;
+ } while (timer->expires > p->expires);
+ timer->next = p;
+ timer->prev = p->prev;
+ p->prev = timer;
+ timer->prev->next = timer;
+ restore_flags(flags);
+}
+
+int
+del_timer(struct timer_list *timer)
+{
+ unsigned long flags;
+#if SLOW_BUT_DEBUGGING_TIMERS
+ struct timer_list * p;
+
+ p = &timer_head;
+ save_flags(flags);
+ cli();
+ while ((p = p->next) != &timer_head) {
+ if (p == timer) {
+ timer->next->prev = timer->prev;
+ timer->prev->next = timer->next;
+ timer->next = timer->prev = NULL;
+ restore_flags(flags);
+ return 1;
+ }
+ }
+ if (timer->next || timer->prev)
+ printk("del_timer() called from %p with timer not initialized\n",
+ __builtin_return_address(0));
+ restore_flags(flags);
+ return 0;
+#else
+ struct timer_list * next;
+ int ret = 0;
+ save_flags(flags);
+ cli();
+ if ((next = timer->next) != NULL) {
+ (next->prev = timer->prev)->next = next;
+ timer->next = timer->prev = NULL;
+ ret = 1;
+ }
+ restore_flags(flags);
+ return ret;
+#endif
+}
+
+/*
+ * Timer software interrupt handler.
+ */
+void
+timer_bh()
+{
+ unsigned long mask;
+ struct timer_struct *tp;
+ struct timer_list * timer;
+
+ cli();
+ while ((timer = timer_head.next) != &timer_head
+ && timer->expires <= jiffies) {
+ void (*fn)(unsigned long) = timer->function;
+ unsigned long data = timer->data;
+
+ timer->next->prev = timer->prev;
+ timer->prev->next = timer->next;
+ timer->next = timer->prev = NULL;
+ sti();
+ fn(data);
+ cli();
+ }
+ sti();
+
+ for (mask = 1, tp = timer_table; mask; tp++, mask <<= 1) {
+ if (mask > timer_active)
+ break;
+ if ((mask & timer_active)
+ && tp->expires > jiffies) {
+ timer_active &= ~mask;
+ (*tp->fn)();
+ sti();
+ }
+ }
+}
+
+int linux_timer_print = 0;
+
+/*
+ * Timer interrupt handler.
+ */
+void
+linux_timer_intr()
+{
+ unsigned long mask;
+ struct timer_struct *tp;
+ extern int pic_mask[];
+
+ jiffies++;
+
+ for (mask = 1, tp = timer_table; mask; tp++, mask += mask) {
+ if (mask > timer_active)
+ break;
+ if (!(mask & timer_active))
+ continue;
+ if (tp->expires > jiffies)
+ continue;
+ mark_bh(TIMER_BH);
+ }
+ if (timer_head.next->expires <= jiffies)
+ mark_bh(TIMER_BH);
+ if (tq_timer != &tq_last)
+ mark_bh(TQUEUE_BH);
+ if (linux_timer_print)
+ printf ("linux_timer_intr: pic_mask[0] %x\n", pic_mask[0]);
+}
+
diff --git a/i386/i386at/gpl/linux/linux_version.c b/i386/i386at/gpl/linux/linux_version.c
new file mode 100644
index 00000000..0195c42f
--- /dev/null
+++ b/i386/i386at/gpl/linux/linux_version.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 1996 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+#include <linux/utsname.h>
+#include <linux/string.h>
+
+struct new_utsname system_utsname;
+
+void
+linux_version_init ()
+{
+ strcpy (system_utsname.version, "1.3.68");
+}
diff --git a/i386/i386at/gpl/linux/linux_vsprintf.c b/i386/i386at/gpl/linux/linux_vsprintf.c
new file mode 100644
index 00000000..236d38da
--- /dev/null
+++ b/i386/i386at/gpl/linux/linux_vsprintf.c
@@ -0,0 +1,341 @@
+/*
+ * linux/kernel/vsprintf.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */
+/*
+ * Wirzenius wrote this portably, Torvalds fucked it up :-)
+ */
+
+#include <stdarg.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+static inline
+isxdigit(c)
+ char c;
+{
+ return ((c >= '0' && c <= '9')
+ || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F'));
+}
+
+static inline
+islower(c)
+ char c;
+{
+ return (c >= 'a' && c <= 'z');
+}
+
+static inline
+toupper(c)
+ char c;
+{
+ return (islower(c) ? c - 'a' + 'A' : c);
+}
+
+static inline
+isdigit(c)
+ char c;
+{
+ return (c >= '0' && c <= '9');
+}
+
+unsigned long
+simple_strtoul(const char *cp,char **endp,unsigned int base)
+{
+ unsigned long result = 0,value;
+
+ if (!base) {
+ base = 10;
+ if (*cp == '0') {
+ base = 8;
+ cp++;
+ if ((*cp == 'x') && isxdigit(cp[1])) {
+ cp++;
+ base = 16;
+ }
+ }
+ }
+ while (isxdigit(*cp) && (value = isdigit(*cp) ? *cp-'0' : (islower(*cp)
+ ? toupper(*cp) : *cp)-'A'+10) < base) {
+ result = result*base + value;
+ cp++;
+ }
+ if (endp)
+ *endp = (char *)cp;
+ return result;
+}
+
+/* we use this so that we can do without the ctype library */
+#define is_digit(c) ((c) >= '0' && (c) <= '9')
+
+static int
+skip_atoi(const char **s)
+{
+ int i=0;
+
+ while (is_digit(**s))
+ i = i*10 + *((*s)++) - '0';
+ return i;
+}
+
+#define ZEROPAD 1 /* pad with zero */
+#define SIGN 2 /* unsigned/signed long */
+#define PLUS 4 /* show plus */
+#define SPACE 8 /* space if plus */
+#define LEFT 16 /* left justified */
+#define SPECIAL 32 /* 0x */
+#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
+
+#define do_div(n,base) ({ \
+int __res; \
+__res = ((unsigned long) n) % (unsigned) base; \
+n = ((unsigned long) n) / (unsigned) base; \
+__res; })
+
+static char *
+number(char * str, long num, int base, int size, int precision, int type)
+{
+ char c,sign,tmp[36];
+ const char *digits="0123456789abcdefghijklmnopqrstuvwxyz";
+ int i;
+
+ if (type & LARGE)
+ digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ if (type & LEFT)
+ type &= ~ZEROPAD;
+ if (base < 2 || base > 36)
+ return 0;
+ c = (type & ZEROPAD) ? '0' : ' ';
+ sign = 0;
+ if (type & SIGN) {
+ if (num < 0) {
+ sign = '-';
+ num = -num;
+ size--;
+ } else if (type & PLUS) {
+ sign = '+';
+ size--;
+ } else if (type & SPACE) {
+ sign = ' ';
+ size--;
+ }
+ }
+ if (type & SPECIAL) {
+ if (base == 16)
+ size -= 2;
+ else if (base == 8)
+ size--;
+ }
+ i = 0;
+ if (num == 0)
+ tmp[i++]='0';
+ else while (num != 0)
+ tmp[i++] = digits[do_div(num,base)];
+ if (i > precision)
+ precision = i;
+ size -= precision;
+ if (!(type&(ZEROPAD+LEFT)))
+ while(size-->0)
+ *str++ = ' ';
+ if (sign)
+ *str++ = sign;
+ if (type & SPECIAL)
+ if (base==8)
+ *str++ = '0';
+ else if (base==16) {
+ *str++ = '0';
+ *str++ = digits[33];
+ }
+ if (!(type & LEFT))
+ while (size-- > 0)
+ *str++ = c;
+ while (i < precision--)
+ *str++ = '0';
+ while (i-- > 0)
+ *str++ = tmp[i];
+ while (size-- > 0)
+ *str++ = ' ';
+ return str;
+}
+
+int
+linux_vsprintf(char *buf, const char *fmt, va_list args)
+{
+ int len;
+ unsigned long num;
+ int i, base;
+ char * str;
+ char *s;
+
+ int flags; /* flags to number() */
+
+ int field_width; /* width of output field */
+ int precision; /* min. # of digits for integers; max
+ number of chars for from string */
+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
+
+ for (str=buf ; *fmt ; ++fmt) {
+ if (*fmt != '%') {
+ *str++ = *fmt;
+ continue;
+ }
+
+ /* process flags */
+ flags = 0;
+ repeat:
+ ++fmt; /* this also skips first '%' */
+ switch (*fmt) {
+ case '-': flags |= LEFT; goto repeat;
+ case '+': flags |= PLUS; goto repeat;
+ case ' ': flags |= SPACE; goto repeat;
+ case '#': flags |= SPECIAL; goto repeat;
+ case '0': flags |= ZEROPAD; goto repeat;
+ }
+
+ /* get field width */
+ field_width = -1;
+ if (is_digit(*fmt))
+ field_width = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ field_width = va_arg(args, int);
+ if (field_width < 0) {
+ field_width = -field_width;
+ flags |= LEFT;
+ }
+ }
+
+ /* get the precision */
+ precision = -1;
+ if (*fmt == '.') {
+ ++fmt;
+ if (is_digit(*fmt))
+ precision = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ precision = va_arg(args, int);
+ }
+ if (precision < 0)
+ precision = 0;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L') {
+ qualifier = *fmt;
+ ++fmt;
+ }
+
+ /* default base */
+ base = 10;
+
+ switch (*fmt) {
+ case 'c':
+ if (!(flags & LEFT))
+ while (--field_width > 0)
+ *str++ = ' ';
+ *str++ = (unsigned char) va_arg(args, int);
+ while (--field_width > 0)
+ *str++ = ' ';
+ continue;
+
+ case 's':
+ s = va_arg(args, char *);
+ if (!s)
+ s = "<NULL>";
+ len = strlen(s);
+ if (precision < 0)
+ precision = len;
+ else if (len > precision)
+ len = precision;
+
+ if (!(flags & LEFT))
+ while (len < field_width--)
+ *str++ = ' ';
+ for (i = 0; i < len; ++i)
+ *str++ = *s++;
+ while (len < field_width--)
+ *str++ = ' ';
+ continue;
+
+ case 'p':
+ if (field_width == -1) {
+ field_width = 2*sizeof(void *);
+ flags |= ZEROPAD;
+ }
+ str = number(str,
+ (unsigned long) va_arg(args, void *), 16,
+ field_width, precision, flags);
+ continue;
+
+
+ case 'n':
+ if (qualifier == 'l') {
+ long * ip = va_arg(args, long *);
+ *ip = (str - buf);
+ } else {
+ int * ip = va_arg(args, int *);
+ *ip = (str - buf);
+ }
+ continue;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ base = 8;
+ break;
+
+ case 'X':
+ flags |= LARGE;
+ case 'x':
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= SIGN;
+ case 'u':
+ break;
+
+ default:
+ if (*fmt != '%')
+ *str++ = '%';
+ if (*fmt)
+ *str++ = *fmt;
+ else
+ --fmt;
+ continue;
+ }
+ if (qualifier == 'l')
+ num = va_arg(args, unsigned long);
+ else if (qualifier == 'h')
+ if (flags & SIGN)
+ num = va_arg(args, short);
+ else
+ num = va_arg(args, unsigned short);
+ else if (flags & SIGN)
+ num = va_arg(args, int);
+ else
+ num = va_arg(args, unsigned int);
+ str = number(str, num, base, field_width, precision, flags);
+ }
+ *str = '\0';
+ return str-buf;
+}
+
+int
+linux_sprintf(char * buf, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = linux_vsprintf(buf, fmt, args);
+ va_end(args);
+ return i;
+}
diff --git a/i386/i386at/gpl/linux/net/3c501.c b/i386/i386at/gpl/linux/net/3c501.c
new file mode 100644
index 00000000..6f8dceb6
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/3c501.c
@@ -0,0 +1,860 @@
+/* 3c501.c: A 3Com 3c501 ethernet driver for linux. */
+/*
+ Written 1992,1993,1994 Donald Becker
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU Public License,
+ incorporated herein by reference.
+
+ This is a device driver for the 3Com Etherlink 3c501.
+ Do not purchase this card, even as a joke. It's performance is horrible,
+ and it breaks in many ways.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ Fixed (again!) the missing interrupt locking on TX/RX shifting.
+ Alan Cox <Alan.Cox@linux.org>
+
+ Removed calls to init_etherdev since they are no longer needed, and
+ cleaned up modularization just a bit. The driver still allows only
+ the default address for cards when loaded as a module, but that's
+ really less braindead than anyone using a 3c501 board. :)
+ 19950208 (invid@msen.com)
+
+ Added traps for interrupts hitting the window as we clear and TX load
+ the board. Now getting 150K/second FTP with a 3c501 card. Still playing
+ with a TX-TX optimisation to see if we can touch 180-200K/second as seems
+ theoretically maximum.
+ 19950402 Alan Cox <Alan.Cox@linux.org>
+
+ Some notes on this thing if you have to hack it. [Alan]
+
+ 1] Some documentation is available from 3Com. Due to the boards age
+ standard responses when you ask for this will range from 'be serious'
+ to 'give it to a museum'. The documentation is incomplete and mostly
+ of historical interest anyway.
+
+ 2] The basic system is a single buffer which can be used to receive or
+ transmit a packet. A third command mode exists when you are setting
+ things up.
+
+ 3] If it's transmitting it's not receiving and vice versa. In fact the
+ time to get the board back into useful state after an operation is
+ quite large.
+
+ 4] The driver works by keeping the board in receive mode waiting for a
+ packet to arrive. When one arrives it is copied out of the buffer
+ and delivered to the kernel. The card is reloaded and off we go.
+
+ 5] When transmitting dev->tbusy is set and the card is reset (from
+ receive mode) [possibly losing a packet just received] to command
+ mode. A packet is loaded and transmit mode triggered. The interrupt
+ handler runs different code for transmit interrupts and can handle
+ returning to receive mode or retransmissions (yes you have to help
+ out with those too).
+
+ Problems:
+ There are a wide variety of undocumented error returns from the card
+ and you basically have to kick the board and pray if they turn up. Most
+ only occur under extreme load or if you do something the board doesn't
+ like (eg touching a register at the wrong time).
+
+ The driver is less efficient than it could be. It switches through
+ receive mode even if more transmits are queued. If this worries you buy
+ a real ethernet card.
+
+ The combination of slow receive restart and no real multicast
+ filter makes the board unusable with a kernel compiled for IP
+ multicasting in a real multicast environment. Thats down to the board,
+ but even with no multicast programs running a multicast IP kernel is
+ in group 224.0.0.1 and you will therefore be listening to all multicasts.
+ One nv conference running over that ethernet and you can give up.
+
+*/
+
+static const char *version =
+ "3c501.c: 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov).\n";
+
+/*
+ * Braindamage remaining:
+ * The 3c501 board.
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/fcntl.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/errno.h>
+#include <linux/config.h> /* for CONFIG_IP_MULTICAST */
+
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#define BLOCKOUT_2
+
+/* A zero-terminated list of I/O addresses to be probed.
+ The 3c501 can be at many locations, but here are the popular ones. */
+static unsigned int netcard_portlist[] =
+ { 0x280, 0x300, 0};
+
+
+/*
+ * Index to functions.
+ */
+
+int el1_probe(struct device *dev);
+static int el1_probe1(struct device *dev, int ioaddr);
+static int el_open(struct device *dev);
+static int el_start_xmit(struct sk_buff *skb, struct device *dev);
+static void el_interrupt(int irq, struct pt_regs *regs);
+static void el_receive(struct device *dev);
+static void el_reset(struct device *dev);
+static int el1_close(struct device *dev);
+static struct enet_statistics *el1_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+#define EL1_IO_EXTENT 16
+
+#ifndef EL_DEBUG
+#define EL_DEBUG 0 /* use 0 for production, 1 for devel., >2 for debug */
+#endif /* Anything above 5 is wordy death! */
+static int el_debug = EL_DEBUG;
+
+/*
+ * Board-specific info in dev->priv.
+ */
+
+struct net_local
+{
+ struct enet_statistics stats;
+ int tx_pkt_start; /* The length of the current Tx packet. */
+ int collisions; /* Tx collisions this packet */
+ int loading; /* Spot buffer load collisions */
+};
+
+
+#define RX_STATUS (ioaddr + 0x06)
+#define RX_CMD RX_STATUS
+#define TX_STATUS (ioaddr + 0x07)
+#define TX_CMD TX_STATUS
+#define GP_LOW (ioaddr + 0x08)
+#define GP_HIGH (ioaddr + 0x09)
+#define RX_BUF_CLR (ioaddr + 0x0A)
+#define RX_LOW (ioaddr + 0x0A)
+#define RX_HIGH (ioaddr + 0x0B)
+#define SAPROM (ioaddr + 0x0C)
+#define AX_STATUS (ioaddr + 0x0E)
+#define AX_CMD AX_STATUS
+#define DATAPORT (ioaddr + 0x0F)
+#define TX_RDY 0x08 /* In TX_STATUS */
+
+#define EL1_DATAPTR 0x08
+#define EL1_RXPTR 0x0A
+#define EL1_SAPROM 0x0C
+#define EL1_DATAPORT 0x0f
+
+/*
+ * Writes to the ax command register.
+ */
+
+#define AX_OFF 0x00 /* Irq off, buffer access on */
+#define AX_SYS 0x40 /* Load the buffer */
+#define AX_XMIT 0x44 /* Transmit a packet */
+#define AX_RX 0x48 /* Receive a packet */
+#define AX_LOOP 0x0C /* Loopback mode */
+#define AX_RESET 0x80
+
+/*
+ * Normal receive mode written to RX_STATUS. We must intr on short packets
+ * to avoid bogus rx lockups.
+ */
+
+#define RX_NORM 0xA8 /* 0x68 == all addrs, 0xA8 only to me. */
+#define RX_PROM 0x68 /* Senior Prom, uhmm promiscuous mode. */
+#define RX_MULT 0xE8 /* Accept multicast packets. */
+#define TX_NORM 0x0A /* Interrupt on everything that might hang the chip */
+
+/*
+ * TX_STATUS register.
+ */
+
+#define TX_COLLISION 0x02
+#define TX_16COLLISIONS 0x04
+#define TX_READY 0x08
+
+#define RX_RUNT 0x08
+#define RX_MISSED 0x01 /* Missed a packet due to 3c501 braindamage. */
+#define RX_GOOD 0x30 /* Good packet 0x20, or simple overflow 0x10. */
+
+
+/*
+ * The boilerplate probe code.
+ */
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry el1_drv = {"3c501", el1_probe1, EL1_IO_EXTENT, netcard_portlist};
+#else
+
+int el1_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return el1_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; netcard_portlist[i]; i++)
+ {
+ int ioaddr = netcard_portlist[i];
+ if (check_region(ioaddr, EL1_IO_EXTENT))
+ continue;
+ if (el1_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/*
+ * The actual probe.
+ */
+
+static int el1_probe1(struct device *dev, int ioaddr)
+{
+#ifndef MODULE
+
+ const char *mname; /* Vendor name */
+ unsigned char station_addr[6];
+ int autoirq = 0;
+ int i;
+
+ /*
+ * Read the station address PROM data from the special port.
+ */
+
+ for (i = 0; i < 6; i++)
+ {
+ outw(i, ioaddr + EL1_DATAPTR);
+ station_addr[i] = inb(ioaddr + EL1_SAPROM);
+ }
+ /*
+ * Check the first three octets of the S.A. for 3Com's prefix, or
+ * for the Sager NP943 prefix.
+ */
+
+ if (station_addr[0] == 0x02 && station_addr[1] == 0x60
+ && station_addr[2] == 0x8c)
+ {
+ mname = "3c501";
+ } else if (station_addr[0] == 0x00 && station_addr[1] == 0x80
+ && station_addr[2] == 0xC8)
+ {
+ mname = "NP943";
+ }
+ else
+ return ENODEV;
+
+ /*
+ * Grab the region so we can find the another board if autoIRQ fails.
+ */
+
+ request_region(ioaddr, EL1_IO_EXTENT,"3c501");
+
+ /*
+ * We auto-IRQ by shutting off the interrupt line and letting it float
+ * high.
+ */
+
+ if (dev->irq < 2)
+ {
+ autoirq_setup(2);
+ inb(RX_STATUS); /* Clear pending interrupts. */
+ inb(TX_STATUS);
+ outb(AX_LOOP + 1, AX_CMD);
+
+ outb(0x00, AX_CMD);
+
+ autoirq = autoirq_report(1);
+
+ if (autoirq == 0)
+ {
+ printk("%s probe at %#x failed to detect IRQ line.\n",
+ mname, ioaddr);
+ return EAGAIN;
+ }
+ }
+
+ outb(AX_RESET+AX_LOOP, AX_CMD); /* Loopback mode. */
+ dev->base_addr = ioaddr;
+ memcpy(dev->dev_addr, station_addr, ETH_ALEN);
+
+ if (dev->mem_start & 0xf)
+ el_debug = dev->mem_start & 0x7;
+ if (autoirq)
+ dev->irq = autoirq;
+
+ printk("%s: %s EtherLink at %#lx, using %sIRQ %d.\n", dev->name, mname, dev->base_addr,
+ autoirq ? "auto":"assigned ", dev->irq);
+
+#ifdef CONFIG_IP_MULTICAST
+ printk("WARNING: Use of the 3c501 in a multicast kernel is NOT recommended.\n");
+#endif
+
+ if (el_debug)
+ printk("%s", version);
+
+ /*
+ * Initialize the device structure.
+ */
+
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ /*
+ * The EL1-specific entries in the device structure.
+ */
+
+ dev->open = &el_open;
+ dev->hard_start_xmit = &el_start_xmit;
+ dev->stop = &el1_close;
+ dev->get_stats = &el1_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /*
+ * Setup the generic properties
+ */
+
+ ether_setup(dev);
+
+#endif /* !MODULE */
+
+ return 0;
+}
+
+/*
+ * Open/initialize the board.
+ */
+
+static int el_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (el_debug > 2)
+ printk("%s: Doing el_open()...", dev->name);
+
+ if (request_irq(dev->irq, &el_interrupt, 0, "3c501"))
+ return -EAGAIN;
+
+ irq2dev_map[dev->irq] = dev;
+ el_reset(dev);
+
+ dev->start = 1;
+
+ outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int el_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ if(dev->interrupt) /* May be unloading, don't stamp on */
+ return 1; /* the packet buffer this time */
+
+ if (dev->tbusy)
+ {
+ if (jiffies - dev->trans_start < 20)
+ {
+ if (el_debug > 2)
+ printk(" transmitter busy, deferred.\n");
+ return 1;
+ }
+ if (el_debug)
+ printk ("%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n",
+ dev->name, inb(TX_STATUS), inb(AX_STATUS), inb(RX_STATUS));
+ lp->stats.tx_errors++;
+ outb(TX_NORM, TX_CMD);
+ outb(RX_NORM, RX_CMD);
+ outb(AX_OFF, AX_CMD); /* Just trigger a false interrupt. */
+ outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
+ dev->tbusy = 0;
+ dev->trans_start = jiffies;
+ }
+
+ if (skb == NULL)
+ {
+ dev_tint(dev);
+ return 0;
+ }
+
+ save_flags(flags);
+
+ /*
+ * Avoid incoming interrupts between us flipping tbusy and flipping
+ * mode as the driver assumes tbusy is a faithful indicator of card
+ * state
+ */
+
+ cli();
+
+ /*
+ * Avoid timer-based retransmission conflicts.
+ */
+
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ {
+ restore_flags(flags);
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ }
+ else
+ {
+ int gp_start = 0x800 - (ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
+ unsigned char *buf = skb->data;
+
+load_it_again_sam:
+ lp->tx_pkt_start = gp_start;
+ lp->collisions = 0;
+
+ /*
+ * Command mode with status cleared should [in theory]
+ * mean no more interrupts can be pending on the card.
+ */
+
+#ifdef BLOCKOUT_1
+ disable_irq(dev->irq);
+#endif
+ outb_p(AX_SYS, AX_CMD);
+ inb_p(RX_STATUS);
+ inb_p(TX_STATUS);
+
+ lp->loading=1;
+
+ /*
+ * Turn interrupts back on while we spend a pleasant afternoon
+ * loading bytes into the board
+ */
+
+ restore_flags(flags);
+ outw(0x00, RX_BUF_CLR); /* Set rx packet area to 0. */
+ outw(gp_start, GP_LOW); /* aim - packet will be loaded into buffer start */
+ outsb(DATAPORT,buf,skb->len); /* load buffer (usual thing each byte increments the pointer) */
+ outw(gp_start, GP_LOW); /* the board reuses the same register */
+#ifndef BLOCKOUT_1
+ if(lp->loading==2) /* A receive upset our load, despite our best efforts */
+ {
+ if(el_debug>2)
+ printk("%s: burped during tx load.\n", dev->name);
+ goto load_it_again_sam; /* Sigh... */
+ }
+#endif
+ outb(AX_XMIT, AX_CMD); /* fire ... Trigger xmit. */
+ lp->loading=0;
+#ifdef BLOCKOUT_1
+ enable_irq(dev->irq);
+#endif
+ dev->trans_start = jiffies;
+ }
+
+ if (el_debug > 2)
+ printk(" queued xmit.\n");
+ dev_kfree_skb (skb, FREE_WRITE);
+ return 0;
+}
+
+
+/*
+ * The typical workload of the driver:
+ * Handle the ether interface interrupts.
+ */
+
+static void el_interrupt(int irq, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct net_local *lp;
+ int ioaddr;
+ int axsr; /* Aux. status reg. */
+
+ if (dev == NULL || dev->irq != irq)
+ {
+ printk ("3c501 driver: irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+
+ /*
+ * What happened ?
+ */
+
+ axsr = inb(AX_STATUS);
+
+ /*
+ * Log it
+ */
+
+ if (el_debug > 3)
+ printk("%s: el_interrupt() aux=%#02x", dev->name, axsr);
+ if (dev->interrupt)
+ printk("%s: Reentering the interrupt driver!\n", dev->name);
+ dev->interrupt = 1;
+#ifndef BLOCKOUT_1
+ if(lp->loading==1 && !dev->tbusy)
+ printk("%s: Inconsistent state loading while not in tx\n",
+ dev->name);
+#endif
+#ifdef BLOCKOUT_3
+ lp->loading=2; /* So we can spot loading interruptions */
+#endif
+
+ if (dev->tbusy)
+ {
+
+ /*
+ * Board in transmit mode. May be loading. If we are
+ * loading we shouldn't have got this.
+ */
+
+ int txsr = inb(TX_STATUS);
+#ifdef BLOCKOUT_2
+ if(lp->loading==1)
+ {
+ if(el_debug > 2)
+ {
+ printk("%s: Interrupt while loading [", dev->name);
+ printk(" txsr=%02x gp=%04x rp=%04x]\n", txsr, inw(GP_LOW),inw(RX_LOW));
+ }
+ lp->loading=2; /* Force a reload */
+ dev->interrupt = 0;
+ return;
+ }
+#endif
+ if (el_debug > 6)
+ printk(" txsr=%02x gp=%04x rp=%04x", txsr, inw(GP_LOW),inw(RX_LOW));
+
+ if ((axsr & 0x80) && (txsr & TX_READY) == 0)
+ {
+ /*
+ * FIXME: is there a logic to whether to keep on trying or
+ * reset immediately ?
+ */
+ printk("%s: Unusual interrupt during Tx, txsr=%02x axsr=%02x"
+ " gp=%03x rp=%03x.\n", dev->name, txsr, axsr,
+ inw(ioaddr + EL1_DATAPTR), inw(ioaddr + EL1_RXPTR));
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ else if (txsr & TX_16COLLISIONS)
+ {
+ /*
+ * Timed out
+ */
+ if (el_debug)
+ printk("%s: Transmit failed 16 times, ethernet jammed?\n",dev->name);
+ outb(AX_SYS, AX_CMD);
+ lp->stats.tx_aborted_errors++;
+ }
+ else if (txsr & TX_COLLISION)
+ {
+ /*
+ * Retrigger xmit.
+ */
+
+ if (el_debug > 6)
+ printk(" retransmitting after a collision.\n");
+ /*
+ * Poor little chip can't reset its own start pointer
+ */
+
+ outb(AX_SYS, AX_CMD);
+ outw(lp->tx_pkt_start, GP_LOW);
+ outb(AX_XMIT, AX_CMD);
+ lp->stats.collisions++;
+ dev->interrupt = 0;
+ return;
+ }
+ else
+ {
+ /*
+ * It worked.. we will now fall through and receive
+ */
+ lp->stats.tx_packets++;
+ if (el_debug > 6)
+ printk(" Tx succeeded %s\n",
+ (txsr & TX_RDY) ? "." : "but tx is busy!");
+ /*
+ * This is safe the interrupt is atomic WRT itself.
+ */
+
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* In case more to transmit */
+ }
+ }
+ else
+ {
+ /*
+ * In receive mode.
+ */
+
+ int rxsr = inb(RX_STATUS);
+ if (el_debug > 5)
+ printk(" rxsr=%02x txsr=%02x rp=%04x", rxsr, inb(TX_STATUS),inw(RX_LOW));
+ /*
+ * Just reading rx_status fixes most errors.
+ */
+ if (rxsr & RX_MISSED)
+ lp->stats.rx_missed_errors++;
+ else if (rxsr & RX_RUNT)
+ { /* Handled to avoid board lock-up. */
+ lp->stats.rx_length_errors++;
+ if (el_debug > 5)
+ printk(" runt.\n");
+ }
+ else if (rxsr & RX_GOOD)
+ {
+ /*
+ * Receive worked.
+ */
+ el_receive(dev);
+ }
+ else
+ {
+ /*
+ * Nothing? Something is broken!
+ */
+ if (el_debug > 2)
+ printk("%s: No packet seen, rxsr=%02x **resetting 3c501***\n",
+ dev->name, rxsr);
+ el_reset(dev);
+ }
+ if (el_debug > 3)
+ printk(".\n");
+ }
+
+ /*
+ * Move into receive mode
+ */
+
+ outb(AX_RX, AX_CMD);
+ outw(0x00, RX_BUF_CLR);
+ inb(RX_STATUS); /* Be certain that interrupts are cleared. */
+ inb(TX_STATUS);
+ dev->interrupt = 0;
+ return;
+}
+
+
+/*
+ * We have a good packet. Well, not really "good", just mostly not broken.
+ * We must check everything to see if it is good.
+ */
+
+static void el_receive(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int pkt_len;
+ struct sk_buff *skb;
+
+ pkt_len = inw(RX_LOW);
+
+ if (el_debug > 4)
+ printk(" el_receive %d.\n", pkt_len);
+
+ if ((pkt_len < 60) || (pkt_len > 1536))
+ {
+ if (el_debug)
+ printk("%s: bogus packet, length=%d\n", dev->name, pkt_len);
+ lp->stats.rx_over_errors++;
+ return;
+ }
+
+ /*
+ * Command mode so we can empty the buffer
+ */
+
+ outb(AX_SYS, AX_CMD);
+ skb = dev_alloc_skb(pkt_len+2);
+
+ /*
+ * Start of frame
+ */
+
+ outw(0x00, GP_LOW);
+ if (skb == NULL)
+ {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ return;
+ }
+ else
+ {
+ skb_reserve(skb,2); /* Force 16 byte alignment */
+ skb->dev = dev;
+ /*
+ * The read increments through the bytes. The interrupt
+ * handler will fix the pointer when it returns to
+ * receive mode.
+ */
+ insb(DATAPORT, skb_put(skb,pkt_len), pkt_len);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ return;
+}
+
+static void el_reset(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (el_debug> 2)
+ printk("3c501 reset...");
+ outb(AX_RESET, AX_CMD); /* Reset the chip */
+ outb(AX_LOOP, AX_CMD); /* Aux control, irq and loopback enabled */
+ {
+ int i;
+ for (i = 0; i < 6; i++) /* Set the station address. */
+ outb(dev->dev_addr[i], ioaddr + i);
+ }
+
+ outw(0, RX_BUF_CLR); /* Set rx packet area to 0. */
+ cli(); /* Avoid glitch on writes to CMD regs */
+ outb(TX_NORM, TX_CMD); /* tx irq on done, collision */
+ outb(RX_NORM, RX_CMD); /* Set Rx commands. */
+ inb(RX_STATUS); /* Clear status. */
+ inb(TX_STATUS);
+ dev->interrupt = 0;
+ dev->tbusy = 0;
+ sti();
+}
+
+static int el1_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (el_debug > 2)
+ printk("%s: Shutting down ethercard at %#x.\n", dev->name, ioaddr);
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /*
+ * Free and disable the IRQ.
+ */
+
+ free_irq(dev->irq);
+ outb(AX_RESET, AX_CMD); /* Reset the chip */
+ irq2dev_map[dev->irq] = 0;
+
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static struct enet_statistics *el1_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ return &lp->stats;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * best-effort filtering.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if(dev->flags&IFF_PROMISC)
+ {
+ outb(RX_PROM, RX_CMD);
+ inb(RX_STATUS);
+ }
+ else if (dev->mc_list || dev->flags&IFF_ALLMULTI)
+ {
+ outb(RX_MULT, RX_CMD); /* Multicast or all multicast is the same */
+ inb(RX_STATUS); /* Clear status. */
+ }
+ else
+ {
+ outb(RX_NORM, RX_CMD);
+ inb(RX_STATUS);
+ }
+}
+
+#ifdef MODULE
+
+static char devicename[9] = { 0, };
+
+static struct device dev_3c501 =
+{
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0x280, 5,
+ 0, 0, 0, NULL, el1_probe
+};
+
+static int io=0x280;
+static int irq=5;
+
+int init_module(void)
+{
+ dev_3c501.irq=irq;
+ dev_3c501.base_addr=io;
+ if (register_netdev(&dev_3c501) != 0)
+ return -EIO;
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ /*
+ * No need to check MOD_IN_USE, as sys_delete_module() checks.
+ */
+
+ unregister_netdev(&dev_3c501);
+
+ /*
+ * Free up the private structure, or leak memory :-)
+ */
+
+ kfree(dev_3c501.priv);
+ dev_3c501.priv = NULL; /* gets re-allocated by el1_probe1 */
+
+ /*
+ * If we don't do this, we can't re-insmod it later.
+ */
+ release_region(dev_3c501.base_addr, EL1_IO_EXTENT);
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer -m486 -c -o 3c501.o 3c501.c"
+ * kept-new-versions: 5
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/3c503.c b/i386/i386at/gpl/linux/net/3c503.c
new file mode 100644
index 00000000..9a0c0b9e
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/3c503.c
@@ -0,0 +1,627 @@
+/* 3c503.c: A shared-memory NS8390 ethernet driver for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU Public License,
+ incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This driver should work with the 3c503 and 3c503/16. It should be used
+ in shared memory mode for best performance, although it may also work
+ in programmed-I/O mode.
+
+ Sources:
+ EtherLink II Technical Reference Manual,
+ EtherLink II/16 Technical Reference Manual Supplement,
+ 3Com Corporation, 5400 Bayfront Plaza, Santa Clara CA 95052-8145
+
+ The Crynwr 3c503 packet driver.
+
+ Changelog:
+
+ Paul Gortmaker : add support for the 2nd 8kB of RAM on 16 bit cards.
+ Paul Gortmaker : multiple card support for module users.
+
+*/
+
+static const char *version =
+ "3c503.c:v1.10 9/23/93 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+#include "3c503.h"
+
+
+int el2_probe(struct device *dev);
+int el2_pio_probe(struct device *dev);
+int el2_probe1(struct device *dev, int ioaddr);
+
+/* A zero-terminated list of I/O addresses to be probed in PIO mode. */
+static unsigned int netcard_portlist[] =
+ { 0x300,0x310,0x330,0x350,0x250,0x280,0x2a0,0x2e0,0};
+
+#define EL2_IO_EXTENT 16
+
+#ifdef HAVE_DEVLIST
+/* The 3c503 uses two entries, one for the safe memory-mapped probe and
+ the other for the typical I/O probe. */
+struct netdev_entry el2_drv =
+{"3c503", el2_probe, EL1_IO_EXTENT, 0};
+struct netdev_entry el2pio_drv =
+{"3c503pio", el2_pioprobe1, EL1_IO_EXTENT, netcard_portlist};
+#endif
+
+static int el2_open(struct device *dev);
+static int el2_close(struct device *dev);
+static void el2_reset_8390(struct device *dev);
+static void el2_init_card(struct device *dev);
+static void el2_block_output(struct device *dev, int count,
+ const unsigned char *buf, const start_page);
+static void el2_block_input(struct device *dev, int count, struct sk_buff *skb,
+ int ring_offset);
+static void el2_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+
+
+/* This routine probes for a memory-mapped 3c503 board by looking for
+ the "location register" at the end of the jumpered boot PROM space.
+ This works even if a PROM isn't there.
+
+ If the ethercard isn't found there is an optional probe for
+ ethercard jumpered to programmed-I/O mode.
+ */
+int
+el2_probe(struct device *dev)
+{
+ int *addr, addrs[] = { 0xddffe, 0xd9ffe, 0xcdffe, 0xc9ffe, 0};
+ int base_addr = dev->base_addr;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return el2_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (addr = addrs; *addr; addr++) {
+ int i;
+ unsigned int base_bits = readb(*addr);
+ /* Find first set bit. */
+ for(i = 7; i >= 0; i--, base_bits >>= 1)
+ if (base_bits & 0x1)
+ break;
+ if (base_bits != 1)
+ continue;
+ if (check_region(netcard_portlist[i], EL2_IO_EXTENT))
+ continue;
+ if (el2_probe1(dev, netcard_portlist[i]) == 0)
+ return 0;
+ }
+#if ! defined(no_probe_nonshared_memory) && ! defined (HAVE_DEVLIST)
+ return el2_pio_probe(dev);
+#else
+ return ENODEV;
+#endif
+}
+
+#ifndef HAVE_DEVLIST
+/* Try all of the locations that aren't obviously empty. This touches
+ a lot of locations, and is much riskier than the code above. */
+int
+el2_pio_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return el2_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; netcard_portlist[i]; i++) {
+ int ioaddr = netcard_portlist[i];
+ if (check_region(ioaddr, EL2_IO_EXTENT))
+ continue;
+ if (el2_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/* Probe for the Etherlink II card at I/O port base IOADDR,
+ returning non-zero on success. If found, set the station
+ address and memory parameters in DEVICE. */
+int
+el2_probe1(struct device *dev, int ioaddr)
+{
+ int i, iobase_reg, membase_reg, saved_406, wordlength;
+ static unsigned version_printed = 0;
+ unsigned long vendor_id;
+
+ /* Reset and/or avoid any lurking NE2000 */
+ if (inb(ioaddr + 0x408) == 0xff) {
+ udelay(1000);
+ return ENODEV;
+ }
+
+ /* We verify that it's a 3C503 board by checking the first three octets
+ of its ethernet address. */
+ iobase_reg = inb(ioaddr+0x403);
+ membase_reg = inb(ioaddr+0x404);
+ /* ASIC location registers should be 0 or have only a single bit set. */
+ if ( (iobase_reg & (iobase_reg - 1))
+ || (membase_reg & (membase_reg - 1))) {
+ return ENODEV;
+ }
+ saved_406 = inb_p(ioaddr + 0x406);
+ outb_p(ECNTRL_RESET|ECNTRL_THIN, ioaddr + 0x406); /* Reset it... */
+ outb_p(ECNTRL_THIN, ioaddr + 0x406);
+ /* Map the station addr PROM into the lower I/O ports. We now check
+ for both the old and new 3Com prefix */
+ outb(ECNTRL_SAPROM|ECNTRL_THIN, ioaddr + 0x406);
+ vendor_id = inb(ioaddr)*0x10000 + inb(ioaddr + 1)*0x100 + inb(ioaddr + 2);
+ if ((vendor_id != OLD_3COM_ID) && (vendor_id != NEW_3COM_ID)) {
+ /* Restore the register we frobbed. */
+ outb(saved_406, ioaddr + 0x406);
+ return ENODEV;
+ }
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("3c503.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ dev->base_addr = ioaddr;
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk ("3c503: unable to allocate memory for dev->priv.\n");
+ return -ENOMEM;
+ }
+
+ printk("%s: 3c503 at i/o base %#3x, node ", dev->name, ioaddr);
+
+ /* Retrieve and print the ethernet address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
+
+ /* Map the 8390 back into the window. */
+ outb(ECNTRL_THIN, ioaddr + 0x406);
+
+ /* Check for EL2/16 as described in tech. man. */
+ outb_p(E8390_PAGE0, ioaddr + E8390_CMD);
+ outb_p(0, ioaddr + EN0_DCFG);
+ outb_p(E8390_PAGE2, ioaddr + E8390_CMD);
+ wordlength = inb_p(ioaddr + EN0_DCFG) & ENDCFG_WTS;
+ outb_p(E8390_PAGE0, ioaddr + E8390_CMD);
+
+ /* Probe for, turn on and clear the board's shared memory. */
+ if (ei_debug > 2) printk(" memory jumpers %2.2x ", membase_reg);
+ outb(EGACFR_NORM, ioaddr + 0x405); /* Enable RAM */
+
+ /* This should be probed for (or set via an ioctl()) at run-time.
+ Right now we use a sleazy hack to pass in the interface number
+ at boot-time via the low bits of the mem_end field. That value is
+ unused, and the low bits would be discarded even if it was used. */
+#if defined(EI8390_THICK) || defined(EL2_AUI)
+ ei_status.interface_num = 1;
+#else
+ ei_status.interface_num = dev->mem_end & 0xf;
+#endif
+ printk(", using %sternal xcvr.\n", ei_status.interface_num == 0 ? "in" : "ex");
+
+ if ((membase_reg & 0xf0) == 0) {
+ dev->mem_start = 0;
+ ei_status.name = "3c503-PIO";
+ } else {
+ dev->mem_start = ((membase_reg & 0xc0) ? 0xD8000 : 0xC8000) +
+ ((membase_reg & 0xA0) ? 0x4000 : 0);
+
+#define EL2_MEMSIZE (EL2_MB1_STOP_PG - EL2_MB1_START_PG)*256
+#ifdef EL2MEMTEST
+ /* This has never found an error, but someone might care.
+ Note that it only tests the 2nd 8kB on 16kB 3c503/16
+ cards between card addr. 0x2000 and 0x3fff. */
+ { /* Check the card's memory. */
+ unsigned long mem_base = dev->mem_start;
+ unsigned int test_val = 0xbbadf00d;
+ writel(0xba5eba5e, mem_base);
+ for (i = sizeof(test_val); i < EL2_MEMSIZE; i+=sizeof(test_val)) {
+ writel(test_val, mem_base + i);
+ if (readl(mem_base) != 0xba5eba5e
+ || readl(mem_base + i) != test_val) {
+ printk("3c503.c: memory failure or memory address conflict.\n");
+ dev->mem_start = 0;
+ ei_status.name = "3c503-PIO";
+ break;
+ }
+ test_val += 0x55555555;
+ writel(0, mem_base + i);
+ }
+ }
+#endif /* EL2MEMTEST */
+
+ dev->mem_end = dev->rmem_end = dev->mem_start + EL2_MEMSIZE;
+
+ if (wordlength) { /* No Tx pages to skip over to get to Rx */
+ dev->rmem_start = dev->mem_start;
+ ei_status.name = "3c503/16";
+ } else {
+ dev->rmem_start = TX_PAGES*256 + dev->mem_start;
+ ei_status.name = "3c503";
+ }
+ }
+
+ /*
+ Divide up the memory on the card. This is the same regardless of
+ whether shared-mem or PIO is used. For 16 bit cards (16kB RAM),
+ we use the entire 8k of bank1 for an Rx ring. We only use 3k
+ of the bank0 for 2 full size Tx packet slots. For 8 bit cards,
+ (8kB RAM) we use 3kB of bank1 for two Tx slots, and the remaining
+ 5kB for an Rx ring. */
+
+ if (wordlength) {
+ ei_status.tx_start_page = EL2_MB0_START_PG;
+ ei_status.rx_start_page = EL2_MB1_START_PG;
+ } else {
+ ei_status.tx_start_page = EL2_MB1_START_PG;
+ ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES;
+ }
+
+ /* Finish setting the board's parameters. */
+ ei_status.stop_page = EL2_MB1_STOP_PG;
+ ei_status.word16 = wordlength;
+ ei_status.reset_8390 = &el2_reset_8390;
+ ei_status.get_8390_hdr = &el2_get_8390_hdr;
+ ei_status.block_input = &el2_block_input;
+ ei_status.block_output = &el2_block_output;
+
+ request_region(ioaddr, EL2_IO_EXTENT, ei_status.name);
+
+ if (dev->irq == 2)
+ dev->irq = 9;
+ else if (dev->irq > 5 && dev->irq != 9) {
+ printk("3c503: configured interrupt %d invalid, will use autoIRQ.\n",
+ dev->irq);
+ dev->irq = 0;
+ }
+
+ ei_status.saved_irq = dev->irq;
+
+ dev->start = 0;
+ dev->open = &el2_open;
+ dev->stop = &el2_close;
+
+ if (dev->mem_start)
+ printk("%s: %s - %dkB RAM, 8kB shared mem window at %#6lx-%#6lx.\n",
+ dev->name, ei_status.name, (wordlength+1)<<3,
+ dev->mem_start, dev->mem_end-1);
+
+ else
+ printk("\n%s: %s, %dkB RAM, using programmed I/O (REJUMPER for SHARED MEMORY).\n",
+ dev->name, ei_status.name, (wordlength+1)<<3);
+
+ return 0;
+}
+
+static int
+el2_open(struct device *dev)
+{
+
+ if (dev->irq < 2) {
+ int irqlist[] = {5, 9, 3, 4, 0};
+ int *irqp = irqlist;
+
+ outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
+ do {
+ if (request_irq (*irqp, NULL, 0, "bogus") != -EBUSY) {
+ /* Twinkle the interrupt, and check if it's seen. */
+ autoirq_setup(0);
+ outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
+ outb_p(0x00, E33G_IDCFR);
+ if (*irqp == autoirq_report(0) /* It's a good IRQ line! */
+ && request_irq (dev->irq = *irqp, &ei_interrupt, 0, ei_status.name) == 0)
+ break;
+ }
+ } while (*++irqp);
+ if (*irqp == 0) {
+ outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
+ return -EAGAIN;
+ }
+ } else {
+ if (request_irq(dev->irq, &ei_interrupt, 0, ei_status.name)) {
+ return -EAGAIN;
+ }
+ }
+
+ el2_init_card(dev);
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int
+el2_close(struct device *dev)
+{
+ free_irq(dev->irq);
+ dev->irq = ei_status.saved_irq;
+ irq2dev_map[dev->irq] = NULL;
+ outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
+
+ ei_close(dev);
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/* This is called whenever we have a unrecoverable failure:
+ transmit timeout
+ Bad ring buffer packet header
+ */
+static void
+el2_reset_8390(struct device *dev)
+{
+ if (ei_debug > 1) {
+ printk("%s: Resetting the 3c503 board...", dev->name);
+ printk("%#lx=%#02x %#lx=%#02x %#lx=%#02x...", E33G_IDCFR, inb(E33G_IDCFR),
+ E33G_CNTRL, inb(E33G_CNTRL), E33G_GACFR, inb(E33G_GACFR));
+ }
+ outb_p(ECNTRL_RESET|ECNTRL_THIN, E33G_CNTRL);
+ ei_status.txing = 0;
+ outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+ el2_init_card(dev);
+ if (ei_debug > 1) printk("done\n");
+}
+
+/* Initialize the 3c503 GA registers after a reset. */
+static void
+el2_init_card(struct device *dev)
+{
+ /* Unmap the station PROM and select the DIX or BNC connector. */
+ outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+
+ /* Set ASIC copy of rx's first and last+1 buffer pages */
+ /* These must be the same as in the 8390. */
+ outb(ei_status.rx_start_page, E33G_STARTPG);
+ outb(ei_status.stop_page, E33G_STOPPG);
+
+ /* Point the vector pointer registers somewhere ?harmless?. */
+ outb(0xff, E33G_VP2); /* Point at the ROM restart location 0xffff0 */
+ outb(0xff, E33G_VP1);
+ outb(0x00, E33G_VP0);
+ /* Turn off all interrupts until we're opened. */
+ outb_p(0x00, dev->base_addr + EN0_IMR);
+ /* Enable IRQs iff started. */
+ outb(EGACFR_NORM, E33G_GACFR);
+
+ /* Set the interrupt line. */
+ outb_p((0x04 << (dev->irq == 9 ? 2 : dev->irq)), E33G_IDCFR);
+ outb_p(8, E33G_DRQCNT); /* Set burst size to 8 */
+ outb_p(0x20, E33G_DMAAH); /* Put a valid addr in the GA DMA */
+ outb_p(0x00, E33G_DMAAL);
+ return; /* We always succeed */
+}
+
+/* Either use the shared memory (if enabled on the board) or put the packet
+ out through the ASIC FIFO. The latter is probably much slower. */
+static void
+el2_block_output(struct device *dev, int count,
+ const unsigned char *buf, const start_page)
+{
+ int i; /* Buffer index */
+ int boguscount = 0; /* timeout counter */
+
+ if (ei_status.word16) /* Tx packets go into bank 0 on EL2/16 card */
+ outb(EGACFR_RSEL|EGACFR_TCM, E33G_GACFR);
+ else
+ outb(EGACFR_NORM, E33G_GACFR);
+
+ if (dev->mem_start) { /* Shared memory transfer */
+ unsigned long dest_addr = dev->mem_start +
+ ((start_page - ei_status.tx_start_page) << 8);
+ memcpy_toio(dest_addr, buf, count);
+ outb(EGACFR_NORM, E33G_GACFR); /* Back to bank1 in case on bank0 */
+ return;
+ }
+ /* No shared memory, put the packet out the slow way. */
+ /* Set up then start the internal memory transfer to Tx Start Page */
+ outb(0x00, E33G_DMAAL);
+ outb_p(start_page, E33G_DMAAH);
+ outb_p((ei_status.interface_num ? ECNTRL_AUI : ECNTRL_THIN ) | ECNTRL_OUTPUT
+ | ECNTRL_START, E33G_CNTRL);
+
+ /* This is the byte copy loop: it should probably be tuned for
+ speed once everything is working. I think it is possible
+ to output 8 bytes between each check of the status bit. */
+ for(i = 0; i < count; i++) {
+ if (i % 8 == 0)
+ while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
+ if (++boguscount > (i<<3) + 32) {
+ printk("%s: FIFO blocked in el2_block_output (at %d of %d, bc=%d).\n",
+ dev->name, i, count, boguscount);
+ outb(EGACFR_NORM, E33G_GACFR); /* To MB1 for EL2/16 */
+ return;
+ }
+ outb(buf[i], E33G_FIFOH);
+ }
+ outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+ outb(EGACFR_NORM, E33G_GACFR); /* Back to bank1 in case on bank0 */
+ return;
+}
+
+/* Read the 4 byte, page aligned 8390 specific header. */
+static void
+el2_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ unsigned int i;
+ unsigned long hdr_start = dev->mem_start + ((ring_page - EL2_MB1_START_PG)<<8);
+ unsigned long fifo_watchdog;
+
+ if (dev->mem_start) { /* Use the shared memory. */
+#ifdef notdef
+ /* Officially this is what we are doing, but the readl() is faster */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+#else
+ ((unsigned int*)hdr)[0] = readl(hdr_start);
+#endif
+ return;
+ }
+
+ /* No shared memory, use programmed I/O. Ugh. */
+ outb(0, E33G_DMAAL);
+ outb_p(ring_page & 0xff, E33G_DMAAH);
+ outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT
+ | ECNTRL_START, E33G_CNTRL);
+
+ /* Header is < 8 bytes, so only check the FIFO at the beginning. */
+ fifo_watchdog = jiffies;
+ while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0) {
+ if (jiffies - fifo_watchdog > 2*HZ/100) {
+ printk("%s: FIFO blocked in el2_get_8390_hdr.\n", dev->name);
+ break;
+ }
+ }
+
+ for(i = 0; i < sizeof(struct e8390_pkt_hdr); i++)
+ ((char *)(hdr))[i] = inb_p(E33G_FIFOH);
+
+ outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+}
+
+/* Returns the new ring pointer. */
+static void
+el2_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int boguscount = 0;
+ int end_of_ring = dev->rmem_end;
+ unsigned int i;
+
+ /* Maybe enable shared memory just be to be safe... nahh.*/
+ if (dev->mem_start) { /* Use the shared memory. */
+ ring_offset -= (EL2_MB1_START_PG<<8);
+ if (dev->mem_start + ring_offset + count > end_of_ring) {
+ /* We must wrap the input move. */
+ int semi_count = end_of_ring - (dev->mem_start + ring_offset);
+ memcpy_fromio(skb->data, dev->mem_start + ring_offset, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, dev->rmem_start, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, dev->mem_start + ring_offset, count, 0);
+ }
+ return;
+ }
+ /* No shared memory, use programmed I/O. */
+ outb(ring_offset & 0xff, E33G_DMAAL);
+ outb_p((ring_offset >> 8) & 0xff, E33G_DMAAH);
+ outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT
+ | ECNTRL_START, E33G_CNTRL);
+
+ /* This is the byte copy loop: it should probably be tuned for
+ speed once everything is working. */
+ for(i = 0; i < count; i++) {
+ if (i % 8 == 0)
+ while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
+ if (++boguscount > (i<<3) + 32) {
+ printk("%s: FIFO blocked in el2_block_input() (at %d of %d, bc=%d).\n",
+ dev->name, i, count, boguscount);
+ boguscount = 0;
+ break;
+ }
+ (skb->data)[i] = inb_p(E33G_FIFOH);
+ }
+ outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+}
+
+
+#ifdef MODULE
+#define MAX_EL2_CARDS 4 /* Max number of EL2 cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_EL2_CARDS] = { 0, };
+static struct device dev_el2[MAX_EL2_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_EL2_CARDS] = { 0, };
+static int irq[MAX_EL2_CARDS] = { 0, };
+static int xcvr[MAX_EL2_CARDS] = { 0, }; /* choose int. or ext. xcvr */
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) {
+ struct device *dev = &dev_el2[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */
+ dev->init = el2_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "3c503.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "3c503.c: No 3c503 card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) {
+ struct device *dev = &dev_el2[this_dev];
+ if (dev->priv != NULL) {
+ /* NB: el2_close() handles free_irq + irq2dev map */
+ kfree(dev->priv);
+ dev->priv = NULL;
+ release_region(dev->base_addr, EL2_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/3c503.h b/i386/i386at/gpl/linux/net/3c503.h
new file mode 100644
index 00000000..b9f8a46f
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/3c503.h
@@ -0,0 +1,91 @@
+/* Definitions for the 3Com 3c503 Etherlink 2. */
+/* This file is distributed under the GPL.
+ Many of these names and comments are directly from the Crynwr packet
+ drivers, which are released under the GPL. */
+
+#define EL2H (dev->base_addr + 0x400)
+#define EL2L (dev->base_addr)
+
+/* Vendor unique hardware addr. prefix. 3Com has 2 because they ran
+ out of available addresses on the first one... */
+
+#define OLD_3COM_ID 0x02608c
+#define NEW_3COM_ID 0x0020af
+
+/* Shared memory management parameters. NB: The 8 bit cards have only
+ one bank (MB1) which serves both Tx and Rx packet space. The 16bit
+ cards have 2 banks, MB0 for Tx packets, and MB1 for Rx packets.
+ You choose which bank appears in the sh. mem window with EGACFR_MBSn */
+
+#define EL2_MB0_START_PG (0x00) /* EL2/16 Tx packets go in bank 0 */
+#define EL2_MB1_START_PG (0x20) /* First page of bank 1 */
+#define EL2_MB1_STOP_PG (0x40) /* Last page +1 of bank 1 */
+
+/* 3Com 3c503 ASIC registers */
+#define E33G_STARTPG (EL2H+0) /* Start page, matching EN0_STARTPG */
+#define E33G_STOPPG (EL2H+1) /* Stop page, must match EN0_STOPPG */
+#define E33G_DRQCNT (EL2H+2) /* DMA burst count */
+#define E33G_IOBASE (EL2H+3) /* Read of I/O base jumpers. */
+ /* (non-useful, but it also appears at the end of EPROM space) */
+#define E33G_ROMBASE (EL2H+4) /* Read of memory base jumpers. */
+#define E33G_GACFR (EL2H+5) /* Config/setup bits for the ASIC GA */
+#define E33G_CNTRL (EL2H+6) /* Board's main control register */
+#define E33G_STATUS (EL2H+7) /* Status on completions. */
+#define E33G_IDCFR (EL2H+8) /* Interrupt/DMA config register */
+ /* (Which IRQ to assert, DMA chan to use) */
+#define E33G_DMAAH (EL2H+9) /* High byte of DMA address reg */
+#define E33G_DMAAL (EL2H+10) /* Low byte of DMA address reg */
+/* "Vector pointer" - if this address matches a read, the EPROM (rather than
+ shared RAM) is mapped into memory space. */
+#define E33G_VP2 (EL2H+11)
+#define E33G_VP1 (EL2H+12)
+#define E33G_VP0 (EL2H+13)
+#define E33G_FIFOH (EL2H+14) /* FIFO for programmed I/O moves */
+#define E33G_FIFOL (EL2H+15) /* ... low byte of above. */
+
+/* Bits in E33G_CNTRL register: */
+
+#define ECNTRL_RESET (0x01) /* Software reset of the ASIC and 8390 */
+#define ECNTRL_THIN (0x02) /* Onboard xcvr enable, AUI disable */
+#define ECNTRL_AUI (0x00) /* Onboard xcvr disable, AUI enable */
+#define ECNTRL_SAPROM (0x04) /* Map the station address prom */
+#define ECNTRL_DBLBFR (0x20) /* FIFO configuration bit */
+#define ECNTRL_OUTPUT (0x40) /* PC-to-3C503 direction if 1 */
+#define ECNTRL_INPUT (0x00) /* 3C503-to-PC direction if 0 */
+#define ECNTRL_START (0x80) /* Start the DMA logic */
+
+/* Bits in E33G_STATUS register: */
+
+#define ESTAT_DPRDY (0x80) /* Data port (of FIFO) ready */
+#define ESTAT_UFLW (0x40) /* Tried to read FIFO when it was empty */
+#define ESTAT_OFLW (0x20) /* Tried to write FIFO when it was full */
+#define ESTAT_DTC (0x10) /* Terminal Count from PC bus DMA logic */
+#define ESTAT_DIP (0x08) /* DMA In Progress */
+
+/* Bits in E33G_GACFR register: */
+
+#define EGACFR_NIM (0x80) /* NIC interrupt mask */
+#define EGACFR_TCM (0x40) /* DMA term. count interrupt mask */
+#define EGACFR_RSEL (0x08) /* Map a bank of card mem into system mem */
+#define EGACFR_MBS2 (0x04) /* Memory bank select, bit 2. */
+#define EGACFR_MBS1 (0x02) /* Memory bank select, bit 1. */
+#define EGACFR_MBS0 (0x01) /* Memory bank select, bit 0. */
+
+#define EGACFR_NORM (0x49) /* TCM | RSEL | MBS0 */
+#define EGACFR_IRQOFF (0xc9) /* TCM | RSEL | MBS0 | NIM */
+
+/*
+ MBS2 MBS1 MBS0 Sh. mem windows card mem at:
+ ---- ---- ---- -----------------------------
+ 0 0 0 0x0000 -- bank 0
+ 0 0 1 0x2000 -- bank 1 (only choice for 8bit card)
+ 0 1 0 0x4000 -- bank 2, not used
+ 0 1 1 0x6000 -- bank 3, not used
+
+There was going to be a 32k card that used bank 2 and 3, but it
+never got produced.
+
+*/
+
+
+/* End of 3C503 parameter definitions */
diff --git a/i386/i386at/gpl/linux/net/3c505.c b/i386/i386at/gpl/linux/net/3c505.c
new file mode 100644
index 00000000..63ccc9cf
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/3c505.c
@@ -0,0 +1,1518 @@
+/*
+ * Linux ethernet device driver for the 3Com Etherlink Plus (3C505)
+ * By Craig Southeren and Juha Laiho
+ *
+ * 3c505.c This module implements an interface to the 3Com
+ * Etherlink Plus (3c505) ethernet card. Linux device
+ * driver interface reverse engineered from the Linux 3C509
+ * device drivers. Some 3C505 information gleaned from
+ * the Crynwr packet driver. Still this driver would not
+ * be here without 3C505 technical reference provided by
+ * 3Com.
+ *
+ * Version: @(#)3c505.c 0.8.4 17-Dec-95
+ *
+ * Authors: Linux 3c505 device driver by
+ * Craig Southeren, <craigs@ineluki.apana.org.au>
+ * Final debugging by
+ * Andrew Tridgell, <tridge@nimbus.anu.edu.au>
+ * Auto irq/address, tuning, cleanup and v1.1.4+ kernel mods by
+ * Juha Laiho, <jlaiho@ichaos.nullnet.fi>
+ * Linux 3C509 driver by
+ * Donald Becker, <becker@super.org>
+ * Crynwr packet driver by
+ * Krishnan Gopalan and Gregg Stefancik,
+ * Clemson University Engineering Computer Operations.
+ * Portions of the code have been adapted from the 3c505
+ * driver for NCSA Telnet by Bruce Orchard and later
+ * modified by Warren Van Houten and krus@diku.dk.
+ * 3C505 technical information provided by
+ * Terry Murphy, of 3Com Network Adapter Division
+ * Linux 1.3.0 changes by
+ * Alan Cox <Alan.Cox@linux.org>
+ *
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "3c505.h"
+
+/*********************************************************
+ *
+ * define debug messages here as common strings to reduce space
+ *
+ *********************************************************/
+
+static const char * filename = __FILE__;
+
+static const char * null_msg = "*** NULL at %s:%s (line %d) ***\n";
+#define CHECK_NULL(p) \
+ if (!p) printk(null_msg, filename,__FUNCTION__,__LINE__)
+
+static const char * timeout_msg = "*** timeout at %s:%s (line %d) ***\n";
+#define TIMEOUT_MSG(lineno) \
+ printk(timeout_msg, filename,__FUNCTION__,(lineno))
+
+static const char * invalid_pcb_msg =
+ "*** invalid pcb length %d at %s:%s (line %d) ***\n";
+#define INVALID_PCB_MSG(len) \
+ printk(invalid_pcb_msg, (len),filename,__FUNCTION__,__LINE__)
+
+static const char * search_msg = "%s: Looking for 3c505 adapter at address %#x...";
+
+static const char * stilllooking_msg = "still looking...";
+
+static const char * found_msg = "found.\n";
+
+static const char * notfound_msg = "not found (reason = %d)\n";
+
+static const char * couldnot_msg = "%s: 3c505 not found\n";
+
+/*********************************************************
+ *
+ * various other debug stuff
+ *
+ *********************************************************/
+
+#ifdef ELP_DEBUG
+static int elp_debug = ELP_DEBUG;
+#else
+static int elp_debug = 0;
+#endif
+
+/*
+ * 0 = no messages (well, some)
+ * 1 = messages when high level commands performed
+ * 2 = messages when low level commands performed
+ * 3 = messages when interrupts received
+ */
+
+#define ELP_VERSION "0.8.4"
+
+#ifdef MACH
+#define ELP_NEED_HARD_RESET 0
+#endif
+
+/*****************************************************************
+ *
+ * useful macros
+ *
+ *****************************************************************/
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+
+/*****************************************************************
+ *
+ * List of I/O-addresses we try to auto-sense
+ * Last element MUST BE 0!
+ *****************************************************************/
+
+const int addr_list[]={0x300,0x280,0x310,0};
+
+/*****************************************************************
+ *
+ * Functions for I/O (note the inline !)
+ *
+ *****************************************************************/
+
+static inline unsigned char
+inb_status (unsigned int base_addr)
+{
+ return inb(base_addr+PORT_STATUS);
+}
+
+static inline unsigned char
+inb_control (unsigned int base_addr)
+{
+ return inb(base_addr+PORT_CONTROL);
+}
+
+static inline int
+inb_command (unsigned int base_addr)
+{
+ return inb(base_addr+PORT_COMMAND);
+}
+
+static inline void
+outb_control (unsigned char val, unsigned int base_addr)
+{
+ outb(val, base_addr+PORT_CONTROL);
+}
+
+static inline void
+outb_command (unsigned char val, unsigned int base_addr)
+{
+ outb(val, base_addr+PORT_COMMAND);
+}
+
+static inline unsigned int
+inw_data (unsigned int base_addr)
+{
+ return inw(base_addr+PORT_DATA);
+}
+
+static inline void
+outw_data (unsigned int val, unsigned int base_addr)
+{
+ outw(val, base_addr+PORT_DATA);
+}
+
+
+/*****************************************************************
+ *
+ * structure to hold context information for adapter
+ *
+ *****************************************************************/
+
+typedef struct {
+ volatile short got[NUM_TRANSMIT_CMDS]; /* flags for command completion */
+ pcb_struct tx_pcb; /* PCB for foreground sending */
+ pcb_struct rx_pcb; /* PCB for foreground receiving */
+ pcb_struct itx_pcb; /* PCB for background sending */
+ pcb_struct irx_pcb; /* PCB for background receiving */
+ struct enet_statistics stats;
+} elp_device;
+
+static int reset_count=0;
+
+/*****************************************************************
+ *
+ * useful functions for accessing the adapter
+ *
+ *****************************************************************/
+
+/*
+ * use this routine when accessing the ASF bits as they are
+ * changed asynchronously by the adapter
+ */
+
+/* get adapter PCB status */
+#define GET_ASF(addr) \
+ (get_status(addr)&ASF_PCB_MASK)
+
+static inline int
+get_status (unsigned int base_addr)
+{
+ int timeout = jiffies + 10;
+ register int stat1;
+ do {
+ stat1 = inb_status(base_addr);
+ } while (stat1 != inb_status(base_addr) && jiffies < timeout);
+ if (jiffies >= timeout)
+ TIMEOUT_MSG(__LINE__);
+ return stat1;
+}
+
+static inline void
+set_hsf (unsigned int base_addr, int hsf)
+{
+ cli();
+ outb_control((inb_control(base_addr)&~HSF_PCB_MASK)|hsf, base_addr);
+ sti();
+}
+
+#define WAIT_HCRE(addr,toval) wait_hcre((addr),(toval),__LINE__)
+static inline int
+wait_hcre (unsigned int base_addr, int toval, int lineno)
+{
+ int timeout = jiffies + toval;
+ while (((inb_status(base_addr)&HCRE)==0) && (jiffies <= timeout))
+ ;
+ if (jiffies >= timeout) {
+ TIMEOUT_MSG(lineno);
+ return FALSE;
+ }
+ return TRUE;
+}
+
+static inline int
+wait_fast_hcre (unsigned int base_addr, int toval, int lineno)
+{
+ int timeout = 0;
+ while (((inb_status(base_addr)&HCRE)==0) && (timeout++ < toval))
+ ;
+ if (timeout >= toval) {
+ sti();
+ TIMEOUT_MSG(lineno);
+ return FALSE;
+ }
+ return TRUE;
+}
+
+static int start_receive (struct device *, pcb_struct *);
+static void adapter_hard_reset (struct device *);
+
+inline static void
+adapter_reset (struct device * dev)
+{
+ int timeout;
+ unsigned char orig_hcr=inb_control(dev->base_addr);
+
+ elp_device * adapter=dev->priv;
+
+ outb_control(0,dev->base_addr);
+
+ if (inb_status(dev->base_addr)&ACRF) {
+ do {
+ inb_command(dev->base_addr);
+ timeout=jiffies+2;
+ while ((jiffies<=timeout) && !(inb_status(dev->base_addr)&ACRF))
+ ;
+ } while (inb_status(dev->base_addr)&ACRF);
+ set_hsf(dev->base_addr,HSF_PCB_NAK);
+ }
+
+ outb_control(inb_control(dev->base_addr)|ATTN|DIR,dev->base_addr);
+ timeout=jiffies+1;
+ while (jiffies<=timeout)
+ ;
+ outb_control(inb_control(dev->base_addr)&~ATTN,dev->base_addr);
+ timeout=jiffies+1;
+ while (jiffies<=timeout)
+ ;
+ outb_control(inb_control(dev->base_addr)|FLSH,dev->base_addr);
+ timeout=jiffies+1;
+ while (jiffies<=timeout)
+ ;
+ outb_control(inb_control(dev->base_addr)&~FLSH,dev->base_addr);
+ timeout=jiffies+1;
+ while (jiffies<=timeout)
+ ;
+
+ outb_control(orig_hcr, dev->base_addr);
+ if (!start_receive(dev, &adapter->tx_pcb))
+ printk("%s: start receive command failed \n", dev->name);
+}
+
+/*****************************************************************
+ *
+ * send_pcb
+ * Send a PCB to the adapter.
+ *
+ * output byte to command reg --<--+
+ * wait until HCRE is non zero |
+ * loop until all bytes sent -->--+
+ * set HSF1 and HSF2 to 1
+ * output pcb length
+ * wait until ASF give ACK or NAK
+ * set HSF1 and HSF2 to 0
+ *
+ *****************************************************************/
+
+static int
+send_pcb (struct device * dev, pcb_struct * pcb)
+{
+ int i;
+ int timeout;
+ int cont;
+
+ /*
+ * load each byte into the command register and
+ * wait for the HCRE bit to indicate the adapter
+ * had read the byte
+ */
+ set_hsf(dev->base_addr,0);
+ if ((cont = WAIT_HCRE(dev->base_addr,5))) {
+ cli();
+ if (pcb->command==CMD_TRANSMIT_PACKET)
+ outb_control(inb_control(dev->base_addr)&~DIR,dev->base_addr);
+ outb_command(pcb->command, dev->base_addr);
+ sti();
+ cont = WAIT_HCRE(dev->base_addr,5);
+ }
+
+ if (cont) {
+ outb_command(pcb->length, dev->base_addr);
+ cont = WAIT_HCRE(dev->base_addr,5);
+ }
+
+ cli();
+ for (i = 0; cont && (i < pcb->length); i++) {
+ outb_command(pcb->data.raw[i], dev->base_addr);
+ cont = wait_fast_hcre(dev->base_addr,20000,__LINE__);
+ } /* if wait_fast_hcre() failed, has already done sti() */
+
+ /* set the host status bits to indicate end of PCB */
+ /* send the total packet length as well */
+ /* wait for the adapter to indicate that it has read the PCB */
+ if (cont) {
+ set_hsf(dev->base_addr,HSF_PCB_END);
+ outb_command(2+pcb->length, dev->base_addr);
+ sti();
+ timeout = jiffies + 7;
+ while (jiffies < timeout) {
+ i = GET_ASF(dev->base_addr);
+ if ((i == ASF_PCB_ACK) || (i == ASF_PCB_NAK))
+ break;
+ }
+
+ if (i == ASF_PCB_ACK) {
+ reset_count=0;
+ return TRUE;
+ }
+ else if (i == ASF_PCB_NAK) {
+ printk("%s: PCB send was NAKed\n", dev->name);
+ } else {
+ printk("%s: timeout after sending PCB\n", dev->name);
+ }
+ } else {
+ sti();
+ printk("%s: timeout in middle of sending PCB\n", dev->name);
+ }
+
+ adapter_reset(dev);
+ return FALSE;
+}
+
+/*****************************************************************
+ *
+ * receive_pcb
+ * Read a PCB to the adapter
+ *
+ * wait for ACRF to be non-zero ---<---+
+ * input a byte |
+ * if ASF1 and ASF2 were not both one |
+ * before byte was read, loop --->---+
+ * set HSF1 and HSF2 for ack
+ *
+ *****************************************************************/
+
+static int
+receive_pcb (struct device * dev, pcb_struct * pcb)
+{
+ int i, j;
+ int total_length;
+ int stat;
+ int timeout;
+
+ CHECK_NULL(pcb);
+ CHECK_NULL(dev);
+
+ set_hsf(dev->base_addr,0);
+
+ /* get the command code */
+ timeout = jiffies + 2;
+ while (((stat = get_status(dev->base_addr))&ACRF) == 0 && jiffies < timeout)
+ ;
+ if (jiffies >= timeout) {
+ TIMEOUT_MSG(__LINE__);
+ return FALSE;
+ }
+
+ pcb->command = inb_command(dev->base_addr);
+
+ /* read the data length */
+ timeout = jiffies + 3;
+ while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && jiffies < timeout)
+ ;
+ if (jiffies >= timeout) {
+ TIMEOUT_MSG(__LINE__);
+ return FALSE;
+ }
+ pcb->length = inb_command(dev->base_addr);
+
+ if (pcb->length > MAX_PCB_DATA) {
+ INVALID_PCB_MSG(pcb->length);
+ adapter_reset(dev);
+ return FALSE;
+ }
+
+ /* read the data */
+ cli();
+ i = 0;
+ do {
+ j = 0;
+ while (((stat = get_status(dev->base_addr))&ACRF) == 0 && j++ < 20000)
+ ;
+ pcb->data.raw[i++] = inb_command(dev->base_addr);
+ if (i > MAX_PCB_DATA)
+ INVALID_PCB_MSG(i);
+ } while ((stat & ASF_PCB_MASK) != ASF_PCB_END && j < 20000);
+ sti();
+ if (j >= 20000) {
+ TIMEOUT_MSG(__LINE__);
+ return FALSE;
+ }
+
+ /* woops, the last "data" byte was really the length! */
+ total_length = pcb->data.raw[--i];
+
+ /* safety check total length vs data length */
+ if (total_length != (pcb->length + 2)) {
+ if (elp_debug >= 2)
+ printk("%s: mangled PCB received\n", dev->name);
+ set_hsf(dev->base_addr,HSF_PCB_NAK);
+ return FALSE;
+ }
+
+ set_hsf(dev->base_addr,HSF_PCB_ACK);
+ reset_count=0;
+ return TRUE;
+}
+
+static void
+adapter_hard_reset (struct device * dev)
+{
+ int timeout;
+ long flags;
+
+ CHECK_NULL(dev);
+
+ save_flags(flags);
+ sti();
+
+ if (elp_debug > 0)
+ printk("%s: Resetting the adapter, please wait (approx 20 s)\n",
+ dev->name);
+ /*
+ * take FLSH and ATTN high
+ */
+ outb_control(ATTN|FLSH, dev->base_addr);
+
+ /*
+ * wait for a little bit
+ */
+ for (timeout = jiffies + 20; jiffies <= timeout; )
+ ;
+
+ /*
+ * now take them low
+ */
+ outb_control(0, dev->base_addr);
+
+ /*
+ * wait for a little bit
+ */
+ for (timeout = jiffies + 20; jiffies <= timeout; )
+ ;
+
+ /*
+ * now hang around until the board gets it's act together
+ */
+ for (timeout = jiffies + (100 * 15); jiffies <= timeout; )
+ if (GET_ASF(dev->base_addr) != ASF_PCB_END)
+ break;
+ restore_flags(flags);
+}
+
+/******************************************************
+ *
+ * queue a receive command on the adapter so we will get an
+ * interrupt when a packet is received.
+ *
+ ******************************************************/
+
+static int
+start_receive (struct device * dev, pcb_struct * tx_pcb)
+{
+ CHECK_NULL(dev);
+ CHECK_NULL(tx_pcb);
+
+ if (elp_debug >= 3)
+ printk("%s: restarting receiver\n", dev->name);
+ tx_pcb->command = CMD_RECEIVE_PACKET;
+ tx_pcb->length = sizeof(struct Rcv_pkt);
+ tx_pcb->data.rcv_pkt.buf_seg
+ = tx_pcb->data.rcv_pkt.buf_ofs = 0; /* Unused */
+ tx_pcb->data.rcv_pkt.buf_len = 1600;
+ tx_pcb->data.rcv_pkt.timeout = 0; /* set timeout to zero */
+ return send_pcb(dev, tx_pcb);
+}
+
+/******************************************************
+ *
+ * extract a packet from the adapter
+ * this routine is only called from within the interrupt
+ * service routine, so no cli/sti calls are needed
+ * note that the length is always assumed to be even
+ *
+ ******************************************************/
+
+static void
+receive_packet (struct device * dev, int len)
+{
+ register int i;
+ unsigned short * ptr;
+ int timeout;
+ int rlen;
+ struct sk_buff *skb;
+ elp_device * adapter;
+
+ CHECK_NULL(dev);
+ adapter=dev->priv;
+
+ if (len <= 0 || ((len & ~1) != len))
+ if (elp_debug >= 3) {
+ sti();
+ printk("*** bad packet len %d at %s(%d)\n",len,filename,__LINE__);
+ cli();
+ }
+
+ rlen = (len+1) & ~1;
+
+ skb = dev_alloc_skb(rlen+2);
+
+ /*
+ * make sure the data register is going the right way
+ */
+
+ outb_control(inb_control(dev->base_addr)|DIR, dev->base_addr);
+
+ /*
+ * if buffer could not be allocated, swallow it
+ */
+ if (skb == NULL) {
+ for (i = 0; i < (rlen/2); i++) {
+ timeout = 0;
+ while ((inb_status(dev->base_addr)&HRDY) == 0 && timeout++ < 20000)
+ ;
+ if (timeout >= 20000) {
+ sti();
+ TIMEOUT_MSG(__LINE__);
+ break;
+ }
+
+ inw_data(dev->base_addr);
+ }
+ adapter->stats.rx_dropped++;
+
+ } else {
+ skb_reserve(skb,2); /* 16 byte alignment */
+ skb->dev = dev;
+
+ /*
+ * now read the data from the adapter
+ */
+ ptr = (unsigned short *)skb_put(skb,len);
+ for (i = 0; i < (rlen/2); i++) {
+ timeout = 0;
+ while ((inb_status(dev->base_addr)&HRDY) == 0 && timeout++ < 20000)
+ ;
+ if (timeout >= 20000) {
+ sti();
+ printk("*** timeout at %s(%d) reading word %d of %d ***\n",
+ filename,__LINE__, i, rlen/2);
+ kfree_skb(skb, FREE_WRITE);
+ return;
+ }
+
+ *ptr = inw_data(dev->base_addr);
+ ptr++;
+ }
+
+ sti();
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ }
+
+ outb_control(inb_control(dev->base_addr)&~DIR, dev->base_addr);
+}
+
+
+/******************************************************
+ *
+ * interrupt handler
+ *
+ ******************************************************/
+
+static void
+elp_interrupt (int irq, struct pt_regs *reg_ptr)
+{
+ int len;
+ int dlen;
+ struct device *dev;
+ elp_device * adapter;
+ int timeout;
+
+ if (irq < 0 || irq > 15) {
+ printk ("elp_interrupt(): illegal IRQ number found in interrupt routine (%i)\n", irq);
+ return;
+ }
+
+ dev = irq2dev_map[irq];
+
+ if (dev == NULL) {
+ printk ("elp_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ adapter = (elp_device *) dev->priv;
+
+ CHECK_NULL(adapter);
+
+ if (dev->interrupt)
+ if (elp_debug >= 2)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+ dev->interrupt = 1;
+
+ /*
+ * allow interrupts (we need timers!)
+ */
+ sti();
+
+ /*
+ * receive a PCB from the adapter
+ */
+ timeout = jiffies + 3;
+ while ((inb_status(dev->base_addr)&ACRF) != 0 && jiffies < timeout) {
+
+ if (receive_pcb(dev, &adapter->irx_pcb)) {
+
+ switch (adapter->irx_pcb.command) {
+
+ /*
+ * received a packet - this must be handled fast
+ */
+ case CMD_RECEIVE_PACKET_COMPLETE:
+ /* if the device isn't open, don't pass packets up the stack */
+ if (dev->start == 0)
+ break;
+ cli();
+ /* Set direction of adapter FIFO */
+ outb_control(inb_control(dev->base_addr)|DIR,
+ dev->base_addr);
+ len = adapter->irx_pcb.data.rcv_resp.pkt_len;
+ dlen = adapter->irx_pcb.data.rcv_resp.buf_len;
+ if (adapter->irx_pcb.data.rcv_resp.timeout != 0) {
+ printk("%s: interrupt - packet not received correctly\n", dev->name);
+ sti();
+ } else {
+ if (elp_debug >= 3) {
+ sti();
+ printk("%s: interrupt - packet received of length %i (%i)\n", dev->name, len, dlen);
+ cli();
+ }
+ receive_packet(dev, dlen);
+ sti();
+ if (elp_debug >= 3)
+ printk("%s: packet received\n", dev->name);
+ }
+ if (dev->start && !start_receive(dev, &adapter->itx_pcb))
+ if (elp_debug >= 2)
+ printk("%s: interrupt - failed to send receive start PCB\n", dev->name);
+ if (elp_debug >= 3)
+ printk("%s: receive procedure complete\n", dev->name);
+
+ break;
+
+ /*
+ * 82586 configured correctly
+ */
+ case CMD_CONFIGURE_82586_RESPONSE:
+ adapter->got[CMD_CONFIGURE_82586] = 1;
+ if (elp_debug >= 3)
+ printk("%s: interrupt - configure response received\n", dev->name);
+ break;
+
+ /*
+ * Adapter memory configuration
+ */
+ case CMD_CONFIGURE_ADAPTER_RESPONSE:
+ adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 1;
+ if (elp_debug >= 3)
+ printk("%s: Adapter memory configuration %s.\n",dev->name,
+ adapter->irx_pcb.data.failed?"failed":"succeeded");
+ break;
+
+ /*
+ * Multicast list loading
+ */
+ case CMD_LOAD_MULTICAST_RESPONSE:
+ adapter->got[CMD_LOAD_MULTICAST_LIST] = 1;
+ if (elp_debug >= 3)
+ printk("%s: Multicast address list loading %s.\n",dev->name,
+ adapter->irx_pcb.data.failed?"failed":"succeeded");
+ break;
+
+ /*
+ * Station address setting
+ */
+ case CMD_SET_ADDRESS_RESPONSE:
+ adapter->got[CMD_SET_STATION_ADDRESS] = 1;
+ if (elp_debug >= 3)
+ printk("%s: Ethernet address setting %s.\n",dev->name,
+ adapter->irx_pcb.data.failed?"failed":"succeeded");
+ break;
+
+
+ /*
+ * received board statistics
+ */
+ case CMD_NETWORK_STATISTICS_RESPONSE:
+ adapter->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv;
+ adapter->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit;
+ adapter->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC;
+ adapter->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align;
+ adapter->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun;
+ adapter->got[CMD_NETWORK_STATISTICS] = 1;
+ if (elp_debug >= 3)
+ printk("%s: interrupt - statistics response received\n", dev->name);
+ break;
+
+ /*
+ * sent a packet
+ */
+ case CMD_TRANSMIT_PACKET_COMPLETE:
+ if (elp_debug >= 3)
+ printk("%s: interrupt - packet sent\n", dev->name);
+ if (dev->start == 0)
+ break;
+ if (adapter->irx_pcb.data.xmit_resp.c_stat != 0)
+ if (elp_debug >= 2)
+ printk("%s: interrupt - error sending packet %4.4x\n",
+ dev->name, adapter->irx_pcb.data.xmit_resp.c_stat);
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ break;
+
+ /*
+ * some unknown PCB
+ */
+ default:
+ printk("%s: unknown PCB received - %2.2x\n", dev->name, adapter->irx_pcb.command);
+ break;
+ }
+ } else {
+ printk("%s: failed to read PCB on interrupt\n", dev->name);
+ adapter_reset(dev);
+ }
+ }
+
+ /*
+ * indicate no longer in interrupt routine
+ */
+ dev->interrupt = 0;
+}
+
+
+/******************************************************
+ *
+ * open the board
+ *
+ ******************************************************/
+
+static int
+elp_open (struct device *dev)
+{
+ elp_device * adapter;
+
+ CHECK_NULL(dev);
+
+ adapter = dev->priv;
+
+ if (elp_debug >= 3)
+ printk("%s: request to open device\n", dev->name);
+
+ /*
+ * make sure we actually found the device
+ */
+ if (adapter == NULL) {
+ printk("%s: Opening a non-existent physical device\n", dev->name);
+ return -EAGAIN;
+ }
+
+ /*
+ * disable interrupts on the board
+ */
+ outb_control(0x00, dev->base_addr);
+
+ /*
+ * clear any pending interrupts
+ */
+ inb_command(dev->base_addr);
+ adapter_reset(dev);
+
+ /*
+ * interrupt routine not entered
+ */
+ dev->interrupt = 0;
+
+ /*
+ * transmitter not busy
+ */
+ dev->tbusy = 0;
+
+ /*
+ * make sure we can find the device header given the interrupt number
+ */
+ irq2dev_map[dev->irq] = dev;
+
+ /*
+ * install our interrupt service routine
+ */
+ if (request_irq(dev->irq, &elp_interrupt, 0, "3c505")) {
+ irq2dev_map[dev->irq] = NULL;
+ return -EAGAIN;
+ }
+
+ /*
+ * enable interrupts on the board
+ */
+ outb_control(CMDE, dev->base_addr);
+
+ /*
+ * device is now officially open!
+ */
+ dev->start = 1;
+
+ /*
+ * configure adapter memory: we need 10 multicast addresses, default==0
+ */
+ if (elp_debug >= 3)
+ printk("%s: sending 3c505 memory configuration command\n", dev->name);
+ adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY;
+ adapter->tx_pcb.data.memconf.cmd_q = 10;
+ adapter->tx_pcb.data.memconf.rcv_q = 20;
+ adapter->tx_pcb.data.memconf.mcast = 10;
+ adapter->tx_pcb.data.memconf.frame = 20;
+ adapter->tx_pcb.data.memconf.rcv_b = 20;
+ adapter->tx_pcb.data.memconf.progs = 0;
+ adapter->tx_pcb.length = sizeof(struct Memconf);
+ adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk("%s: couldn't send memory configuration command\n", dev->name);
+ else {
+ int timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] == 0 && jiffies < timeout)
+ ;
+ if (jiffies >= timeout)
+ TIMEOUT_MSG(__LINE__);
+ }
+
+
+ /*
+ * configure adapter to receive broadcast messages and wait for response
+ */
+ if (elp_debug >= 3)
+ printk("%s: sending 82586 configure command\n", dev->name);
+ adapter->tx_pcb.command = CMD_CONFIGURE_82586;
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
+ adapter->tx_pcb.length = 2;
+ adapter->got[CMD_CONFIGURE_82586] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk("%s: couldn't send 82586 configure command\n", dev->name);
+ else {
+ int timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_CONFIGURE_82586] == 0 && jiffies < timeout)
+ ;
+ if (jiffies >= timeout)
+ TIMEOUT_MSG(__LINE__);
+ }
+
+ /*
+ * queue receive commands to provide buffering
+ */
+ if (!start_receive(dev, &adapter->tx_pcb))
+ printk("%s: start receive command failed \n", dev->name);
+ if (elp_debug >= 3)
+ printk("%s: start receive command sent\n", dev->name);
+
+ MOD_INC_USE_COUNT;
+
+ return 0; /* Always succeed */
+}
+
+
+/******************************************************
+ *
+ * send a packet to the adapter
+ *
+ ******************************************************/
+
+static int
+send_packet (struct device * dev, unsigned char * ptr, int len)
+{
+ int i;
+ int timeout = 0;
+ elp_device * adapter;
+
+ /*
+ * make sure the length is even and no shorter than 60 bytes
+ */
+ unsigned int nlen = (((len < 60) ? 60 : len) + 1) & (~1);
+
+ CHECK_NULL(dev);
+ CHECK_NULL(ptr);
+
+ adapter = dev->priv;
+
+ if (nlen < len)
+ printk("Warning, bad length nlen=%d len=%d %s(%d)\n",nlen,len,filename,__LINE__);
+
+ /*
+ * send the adapter a transmit packet command. Ignore segment and offset
+ * and make sure the length is even
+ */
+ adapter->tx_pcb.command = CMD_TRANSMIT_PACKET;
+ adapter->tx_pcb.length = sizeof(struct Xmit_pkt);
+ adapter->tx_pcb.data.xmit_pkt.buf_ofs
+ = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0; /* Unused */
+ adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen;
+ if (!send_pcb(dev, &adapter->tx_pcb)) {
+ return FALSE;
+ }
+
+ /*
+ * write data to the adapter
+ */
+ cli();
+ for (i = 0; i < (nlen/2);i++) {
+ while (((inb_status(dev->base_addr)&HRDY) == 0)
+ && (timeout++ < 20000))
+ ;
+ if (timeout >= 20000) {
+ sti();
+ printk("%s: timeout at %s(%d) writing word %d of %d ***\n",
+ dev->name,filename,__LINE__, i, nlen/2);
+ return FALSE;
+ }
+
+ outw_data(*(short *)ptr, dev->base_addr);
+ ptr +=2;
+ }
+ sti();
+
+ return TRUE;
+}
+
+/******************************************************
+ *
+ * start the transmitter
+ * return 0 if sent OK, else return 1
+ *
+ ******************************************************/
+
+static int
+elp_start_xmit (struct sk_buff *skb, struct device *dev)
+{
+ CHECK_NULL(dev);
+
+ /*
+ * not sure what this does, but the 3c509 driver does it, so...
+ */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /*
+ * if we ended up with a munged length, don't send it
+ */
+ if (skb->len <= 0)
+ return 0;
+
+ if (elp_debug >= 3)
+ printk("%s: request to send packet of length %d\n", dev->name, (int)skb->len);
+
+ /*
+ * if the transmitter is still busy, we have a transmit timeout...
+ */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ int stat;
+ if (tickssofar < 50) /* was 500, AJT */
+ return 1;
+ printk("%s: transmit timed out, not resetting adapter\n", dev->name);
+ if (((stat=inb_status(dev->base_addr))&ACRF) != 0)
+ printk("%s: hmmm...seemed to have missed an interrupt!\n", dev->name);
+ printk("%s: status %#02x\n", dev->name, stat);
+ dev->trans_start = jiffies;
+ dev->tbusy = 0;
+ }
+
+ /*
+ * send the packet at skb->data for skb->len
+ */
+ if (!send_packet(dev, skb->data, skb->len)) {
+ printk("%s: send packet PCB failed\n", dev->name);
+ return 1;
+ }
+
+ if (elp_debug >= 3)
+ printk("%s: packet of length %d sent\n", dev->name, (int)skb->len);
+
+
+ /*
+ * start the transmit timeout
+ */
+ dev->trans_start = jiffies;
+
+ /*
+ * the transmitter is now busy
+ */
+ dev->tbusy = 1;
+
+ /*
+ * free the buffer
+ */
+ dev_kfree_skb(skb, FREE_WRITE);
+
+ return 0;
+}
+
+/******************************************************
+ *
+ * return statistics on the board
+ *
+ ******************************************************/
+
+static struct enet_statistics *
+elp_get_stats (struct device *dev)
+{
+ elp_device *adapter = (elp_device *) dev->priv;
+
+ if (elp_debug >= 3)
+ printk("%s: request for stats\n", dev->name);
+
+ /* If the device is closed, just return the latest stats we have,
+ - we cannot ask from the adapter without interrupts */
+ if (!dev->start)
+ return &adapter->stats;
+
+ /* send a get statistics command to the board */
+ adapter->tx_pcb.command = CMD_NETWORK_STATISTICS;
+ adapter->tx_pcb.length = 0;
+ adapter->got[CMD_NETWORK_STATISTICS] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk("%s: couldn't send get statistics command\n", dev->name);
+ else {
+ int timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && jiffies < timeout)
+ ;
+ if (jiffies >= timeout) {
+ TIMEOUT_MSG(__LINE__);
+ return &adapter->stats;
+ }
+ }
+
+ /* statistics are now up to date */
+ return &adapter->stats;
+}
+
+/******************************************************
+ *
+ * close the board
+ *
+ ******************************************************/
+
+static int
+elp_close (struct device *dev)
+{
+ elp_device * adapter;
+
+ CHECK_NULL(dev);
+ adapter = dev->priv;
+ CHECK_NULL(adapter);
+
+ if (elp_debug >= 3)
+ printk("%s: request to close device\n", dev->name);
+
+ /* Someone may request the device statistic information even when
+ * the interface is closed. The following will update the statistics
+ * structure in the driver, so we'll be able to give current statistics.
+ */
+ (void) elp_get_stats(dev);
+
+ /*
+ * disable interrupts on the board
+ */
+ outb_control(0x00, dev->base_addr);
+
+ /*
+ * flag transmitter as busy (i.e. not available)
+ */
+ dev->tbusy = 1;
+
+ /*
+ * indicate device is closed
+ */
+ dev->start = 0;
+
+ /*
+ * release the IRQ
+ */
+ free_irq(dev->irq);
+
+ /*
+ * and we no longer have to map irq to dev either
+ */
+ irq2dev_map[dev->irq] = 0;
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+
+/************************************************************
+ *
+ * Set multicast list
+ * num_addrs==0: clear mc_list
+ * num_addrs==-1: set promiscuous mode
+ * num_addrs>0: set mc_list
+ *
+ ************************************************************/
+
+static void
+elp_set_mc_list (struct device *dev)
+{
+ elp_device *adapter = (elp_device *) dev->priv;
+ struct dev_mc_list *dmi=dev->mc_list;
+ int i;
+
+ if (elp_debug >= 3)
+ printk("%s: request to set multicast list\n", dev->name);
+
+ if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
+ {
+ /* send a "load multicast list" command to the board, max 10 addrs/cmd */
+ /* if num_addrs==0 the list will be cleared */
+ adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
+ adapter->tx_pcb.length = 6*dev->mc_count;
+ for (i=0;i<dev->mc_count;i++)
+ {
+ memcpy(adapter->tx_pcb.data.multicast[i], dmi->dmi_addr,6);
+ dmi=dmi->next;
+ }
+ adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk("%s: couldn't send set_multicast command\n", dev->name);
+ else {
+ int timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_LOAD_MULTICAST_LIST] == 0 && jiffies < timeout)
+ ;
+ if (jiffies >= timeout) {
+ TIMEOUT_MSG(__LINE__);
+ }
+ }
+ if (dev->mc_count)
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI;
+ else /* num_addrs == 0 */
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
+ }
+ else
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_PROMISC;
+ /*
+ * configure adapter to receive messages (as specified above)
+ * and wait for response
+ */
+ if (elp_debug >= 3)
+ printk("%s: sending 82586 configure command\n", dev->name);
+ adapter->tx_pcb.command = CMD_CONFIGURE_82586;
+ adapter->tx_pcb.length = 2;
+ adapter->got[CMD_CONFIGURE_82586] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk("%s: couldn't send 82586 configure command\n", dev->name);
+ else {
+ int timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_CONFIGURE_82586] == 0 && jiffies < timeout)
+ ;
+ if (jiffies >= timeout)
+ TIMEOUT_MSG(__LINE__);
+ }
+}
+
+/******************************************************
+ *
+ * initialise Etherlink Plus board
+ *
+ ******************************************************/
+
+static void
+elp_init (struct device *dev)
+{
+ elp_device * adapter;
+
+ CHECK_NULL(dev);
+
+ /*
+ * set ptrs to various functions
+ */
+ dev->open = elp_open; /* local */
+ dev->stop = elp_close; /* local */
+ dev->get_stats = elp_get_stats; /* local */
+ dev->hard_start_xmit = elp_start_xmit; /* local */
+ dev->set_multicast_list = elp_set_mc_list; /* local */
+
+ /* Setup the generic properties */
+ ether_setup(dev);
+
+ /*
+ * setup ptr to adapter specific information
+ */
+ adapter = (elp_device *)(dev->priv = kmalloc(sizeof(elp_device), GFP_KERNEL));
+ CHECK_NULL(adapter);
+ if (adapter == NULL)
+ return;
+ memset(&(adapter->stats), 0, sizeof(struct enet_statistics));
+
+ /*
+ * memory information
+ */
+ dev->mem_start = dev->mem_end = dev->rmem_end = dev->rmem_start = 0;
+}
+
+/************************************************************
+ *
+ * A couple of tests to see if there's 3C505 or not
+ * Called only by elp_autodetect
+ ************************************************************/
+
+static int
+elp_sense (struct device * dev)
+{
+ int timeout;
+ int addr=dev->base_addr;
+ const char *name=dev->name;
+ long flags;
+ byte orig_HCR, orig_HSR;
+
+ if (check_region(addr, 0xf))
+ return -1;
+
+ orig_HCR=inb_control(addr);
+ orig_HSR=inb_status(addr);
+
+ if (elp_debug > 0)
+ printk(search_msg, name, addr);
+
+ if (((orig_HCR==0xff) && (orig_HSR==0xff)) ||
+ ((orig_HCR & DIR) != (orig_HSR & DIR))) {
+ if (elp_debug > 0)
+ printk(notfound_msg, 1);
+ return -1; /* It can't be 3c505 if HCR.DIR != HSR.DIR */
+ }
+
+ /* Enable interrupts - we need timers! */
+ save_flags(flags);
+ sti();
+
+ /* Wait for a while; the adapter may still be booting up */
+ if (elp_debug > 0)
+ printk(stilllooking_msg);
+ for (timeout = jiffies + (100 * 15); jiffies <= timeout; )
+ if (GET_ASF(addr) != ASF_PCB_END)
+ break;
+
+ if (orig_HCR & DIR) {
+ /* If HCR.DIR is up, we pull it down. HSR.DIR should follow. */
+ outb_control(orig_HCR & ~DIR,addr);
+ timeout = jiffies+30;
+ while (jiffies < timeout)
+ ;
+ restore_flags(flags);
+ if (inb_status(addr) & DIR) {
+ outb_control(orig_HCR,addr);
+ if (elp_debug > 0)
+ printk(notfound_msg, 2);
+ return -1;
+ }
+ } else {
+ /* If HCR.DIR is down, we pull it up. HSR.DIR should follow. */
+ outb_control(orig_HCR | DIR,addr);
+ timeout = jiffies+300;
+ while (jiffies < timeout)
+ ;
+ restore_flags(flags);
+ if (!(inb_status(addr) & DIR)) {
+ outb_control(orig_HCR,addr);
+ if (elp_debug > 0)
+ printk(notfound_msg, 3);
+ return -1;
+ }
+ }
+ /*
+ * It certainly looks like a 3c505. If it has DMA enabled, it needs
+ * a hard reset. Also, do a hard reset if selected at the compile time.
+ */
+ if (elp_debug > 0)
+ printk(found_msg);
+
+ if (((orig_HCR==0x35) && (orig_HSR==0x5b)) || ELP_NEED_HARD_RESET)
+ adapter_hard_reset(dev);
+ return 0;
+}
+
+/*************************************************************
+ *
+ * Search through addr_list[] and try to find a 3C505
+ * Called only by eplus_probe
+ *************************************************************/
+
+static int
+elp_autodetect (struct device * dev)
+{
+ int idx=0;
+
+ /* if base address set, then only check that address
+ otherwise, run through the table */
+ if (dev->base_addr != 0) { /* dev->base_addr == 0 ==> plain autodetect */
+ if (elp_sense(dev) == 0)
+ return dev->base_addr;
+ } else while ( (dev->base_addr=addr_list[idx++]) ) {
+ if (elp_sense(dev) == 0)
+ return dev->base_addr;
+ }
+
+ /* could not find an adapter */
+ if (elp_debug > 0)
+ printk(couldnot_msg, dev->name);
+
+ return 0; /* Because of this, the layer above will return -ENODEV */
+}
+
+/******************************************************
+ *
+ * probe for an Etherlink Plus board at the specified address
+ *
+ ******************************************************/
+
+int
+elplus_probe (struct device *dev)
+{
+ elp_device adapter;
+ int i;
+
+ CHECK_NULL(dev);
+
+ /*
+ * setup adapter structure
+ */
+
+ dev->base_addr = elp_autodetect(dev);
+ if ( !(dev->base_addr) )
+ return -ENODEV;
+
+ /*
+ * As we enter here from bootup, the adapter should have IRQs enabled,
+ * but we can as well enable them anyway.
+ */
+ outb_control(inb_control(dev->base_addr) | CMDE, dev->base_addr);
+ autoirq_setup(0);
+
+ /*
+ * use ethernet address command to probe for board in polled mode
+ * (this also makes us the IRQ that we need for automatic detection)
+ */
+ adapter.tx_pcb.command = CMD_STATION_ADDRESS;
+ adapter.tx_pcb.length = 0;
+ if (!send_pcb (dev, &adapter.tx_pcb) ||
+ !receive_pcb(dev, &adapter.rx_pcb) ||
+ (adapter.rx_pcb.command != CMD_ADDRESS_RESPONSE) ||
+ (adapter.rx_pcb.length != 6)) {
+ printk("%s: not responding to first PCB\n", dev->name);
+ return -ENODEV;
+ }
+
+ if (dev->irq) { /* Is there a preset IRQ? */
+ if (dev->irq != autoirq_report(0)) {
+ printk("%s: Detected IRQ doesn't match user-defined one.\n",dev->name);
+ return -ENODEV;
+ }
+ /* if dev->irq == autoirq_report(0), all is well */
+ } else /* No preset IRQ; just use what we can detect */
+ dev->irq=autoirq_report(0);
+ switch (dev->irq) { /* Legal, sane? */
+ case 0:
+ printk("%s: No IRQ reported by autoirq_report().\n",dev->name);
+ printk("%s: Check the jumpers of your 3c505 board.\n",dev->name);
+ return -ENODEV;
+ case 1:
+ case 6:
+ case 8:
+ case 13:
+ printk("%s: Impossible IRQ %d reported by autoirq_report().\n",
+ dev->name, dev->irq);
+ return -ENODEV;
+ }
+ /*
+ * Now we have the IRQ number so we can disable the interrupts from
+ * the board until the board is opened.
+ */
+ outb_control(inb_control(dev->base_addr) & ~CMDE, dev->base_addr);
+
+ /*
+ * copy ethernet address into structure
+ */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = adapter.rx_pcb.data.eth_addr[i];
+
+ /*
+ * print remainder of startup message
+ */
+ printk("%s: 3c505 card found at I/O %#lx using IRQ%d"
+ " has address %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name, dev->base_addr, dev->irq,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+ /*
+ * and reserve the address region
+ */
+ request_region(dev->base_addr, ELP_IO_EXTENT, "3c505");
+
+ /*
+ * initialise the device
+ */
+ elp_init(dev);
+ return 0;
+}
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device dev_3c505 = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, elplus_probe };
+
+int io = 0x300;
+int irq = 0;
+
+int init_module(void)
+{
+ if (io == 0)
+ printk("3c505: You should not use auto-probing with insmod!\n");
+ dev_3c505.base_addr = io;
+ dev_3c505.irq = irq;
+ if (register_netdev(&dev_3c505) != 0) {
+ printk("3c505: register_netdev() returned non-zero.\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_3c505);
+ kfree(dev_3c505.priv);
+ dev_3c505.priv = NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ release_region(dev_3c505.base_addr, ELP_IO_EXTENT);
+}
+#endif /* MODULE */
diff --git a/i386/i386at/gpl/linux/net/3c505.h b/i386/i386at/gpl/linux/net/3c505.h
new file mode 100644
index 00000000..f7d28368
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/3c505.h
@@ -0,0 +1,245 @@
+/*****************************************************************
+ *
+ * defines for 3Com Etherlink Plus adapter
+ *
+ *****************************************************************/
+
+/*
+ * I/O register offsets
+ */
+#define PORT_COMMAND 0x00 /* read/write, 8-bit */
+#define PORT_STATUS 0x02 /* read only, 8-bit */
+#define PORT_AUXDMA 0x02 /* write only, 8-bit */
+#define PORT_DATA 0x04 /* read/write, 16-bit */
+#define PORT_CONTROL 0x06 /* read/write, 8-bit */
+
+#define ELP_IO_EXTENT 0x10 /* size of used IO registers */
+
+/*
+ * host control registers bits
+ */
+#define ATTN 0x80 /* attention */
+#define FLSH 0x40 /* flush data register */
+#define DMAE 0x20 /* DMA enable */
+#define DIR 0x10 /* direction */
+#define TCEN 0x08 /* terminal count interrupt enable */
+#define CMDE 0x04 /* command register interrupt enable */
+#define HSF2 0x02 /* host status flag 2 */
+#define HSF1 0x01 /* host status flag 1 */
+
+/*
+ * combinations of HSF flags used for PCB transmission
+ */
+#define HSF_PCB_ACK HSF1
+#define HSF_PCB_NAK HSF2
+#define HSF_PCB_END (HSF2|HSF1)
+#define HSF_PCB_MASK (HSF2|HSF1)
+
+/*
+ * host status register bits
+ */
+#define HRDY 0x80 /* data register ready */
+#define HCRE 0x40 /* command register empty */
+#define ACRF 0x20 /* adapter command register full */
+/* #define DIR 0x10 direction - same as in control register */
+#define DONE 0x08 /* DMA done */
+#define ASF3 0x04 /* adapter status flag 3 */
+#define ASF2 0x02 /* adapter status flag 2 */
+#define ASF1 0x01 /* adapter status flag 1 */
+
+/*
+ * combinations of ASF flags used for PCB reception
+ */
+#define ASF_PCB_ACK ASF1
+#define ASF_PCB_NAK ASF2
+#define ASF_PCB_END (ASF2|ASF1)
+#define ASF_PCB_MASK (ASF2|ASF1)
+
+/*
+ * host aux DMA register bits
+ */
+#define DMA_BRST 0x01 /* DMA burst */
+
+/*
+ * maximum amount of data data allowed in a PCB
+ */
+#define MAX_PCB_DATA 62
+
+/*****************************************************************
+ *
+ * timeout value
+ * this is a rough value used for loops to stop them from
+ * locking up the whole machine in the case of failure or
+ * error conditions
+ *
+ *****************************************************************/
+
+#define TIMEOUT 300
+
+/*****************************************************************
+ *
+ * PCB commands
+ *
+ *****************************************************************/
+
+enum {
+ /*
+ * host PCB commands
+ */
+ CMD_CONFIGURE_ADAPTER_MEMORY = 0x01,
+ CMD_CONFIGURE_82586 = 0x02,
+ CMD_STATION_ADDRESS = 0x03,
+ CMD_DMA_DOWNLOAD = 0x04,
+ CMD_DMA_UPLOAD = 0x05,
+ CMD_PIO_DOWNLOAD = 0x06,
+ CMD_PIO_UPLOAD = 0x07,
+ CMD_RECEIVE_PACKET = 0x08,
+ CMD_TRANSMIT_PACKET = 0x09,
+ CMD_NETWORK_STATISTICS = 0x0a,
+ CMD_LOAD_MULTICAST_LIST = 0x0b,
+ CMD_CLEAR_PROGRAM = 0x0c,
+ CMD_DOWNLOAD_PROGRAM = 0x0d,
+ CMD_EXECUTE_PROGRAM = 0x0e,
+ CMD_SELF_TEST = 0x0f,
+ CMD_SET_STATION_ADDRESS = 0x10,
+ CMD_ADAPTER_INFO = 0x11,
+ NUM_TRANSMIT_CMDS,
+
+ /*
+ * adapter PCB commands
+ */
+ CMD_CONFIGURE_ADAPTER_RESPONSE = 0x31,
+ CMD_CONFIGURE_82586_RESPONSE = 0x32,
+ CMD_ADDRESS_RESPONSE = 0x33,
+ CMD_DOWNLOAD_DATA_REQUEST = 0x34,
+ CMD_UPLOAD_DATA_REQUEST = 0x35,
+ CMD_RECEIVE_PACKET_COMPLETE = 0x38,
+ CMD_TRANSMIT_PACKET_COMPLETE = 0x39,
+ CMD_NETWORK_STATISTICS_RESPONSE = 0x3a,
+ CMD_LOAD_MULTICAST_RESPONSE = 0x3b,
+ CMD_CLEAR_PROGRAM_RESPONSE = 0x3c,
+ CMD_DOWNLOAD_PROGRAM_RESPONSE = 0x3d,
+ CMD_EXECUTE_RESPONSE = 0x3e,
+ CMD_SELF_TEST_RESPONSE = 0x3f,
+ CMD_SET_ADDRESS_RESPONSE = 0x40,
+ CMD_ADAPTER_INFO_RESPONSE = 0x41
+};
+
+/* Definitions for the PCB data structure */
+
+/* Data units */
+typedef unsigned char byte;
+typedef unsigned short int word;
+typedef unsigned long int dword;
+
+/* Data structures */
+struct Memconf {
+ word cmd_q,
+ rcv_q,
+ mcast,
+ frame,
+ rcv_b,
+ progs;
+};
+
+struct Rcv_pkt {
+ word buf_ofs,
+ buf_seg,
+ buf_len,
+ timeout;
+};
+
+struct Xmit_pkt {
+ word buf_ofs,
+ buf_seg,
+ pkt_len;
+};
+
+struct Rcv_resp {
+ word buf_ofs,
+ buf_seg,
+ buf_len,
+ pkt_len,
+ timeout,
+ status;
+ dword timetag;
+};
+
+struct Xmit_resp {
+ word buf_ofs,
+ buf_seg,
+ c_stat,
+ status;
+};
+
+
+struct Netstat {
+ dword tot_recv,
+ tot_xmit;
+ word err_CRC,
+ err_align,
+ err_res,
+ err_ovrrun;
+};
+
+
+struct Selftest {
+ word error;
+ union {
+ word ROM_cksum;
+ struct {
+ word ofs, seg;
+ } RAM;
+ word i82586;
+ } failure;
+};
+
+struct Info {
+ byte minor_vers,
+ major_vers;
+ word ROM_cksum,
+ RAM_sz,
+ free_ofs,
+ free_seg;
+};
+
+struct Memdump {
+ word size,
+ off,
+ seg;
+};
+
+/*
+Primary Command Block. The most important data structure. All communication
+between the host and the adapter is done with these. (Except for the actual
+ethernet data, which has different packaging.)
+*/
+typedef struct {
+ byte command;
+ byte length;
+ union {
+ struct Memconf memconf;
+ word configure;
+ struct Rcv_pkt rcv_pkt;
+ struct Xmit_pkt xmit_pkt;
+ byte multicast[10][6];
+ byte eth_addr[6];
+ byte failed;
+ struct Rcv_resp rcv_resp;
+ struct Xmit_resp xmit_resp;
+ struct Netstat netstat;
+ struct Selftest selftest;
+ struct Info info;
+ struct Memdump memdump;
+ byte raw[62];
+ } data;
+} pcb_struct;
+
+/* These defines for 'configure' */
+#define RECV_STATION 0x00
+#define RECV_BROAD 0x01
+#define RECV_MULTI 0x02
+#define RECV_PROMISC 0x04
+#define NO_LOOPBACK 0x00
+#define INT_LOOPBACK 0x08
+#define EXT_LOOPBACK 0x10
diff --git a/i386/i386at/gpl/linux/net/3c507.c b/i386/i386at/gpl/linux/net/3c507.c
new file mode 100644
index 00000000..f18bc0a3
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/3c507.c
@@ -0,0 +1,923 @@
+/* 3c507.c: An EtherLink16 device driver for Linux. */
+/*
+ Written 1993,1994 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ Thanks go to jennings@Montrouge.SMR.slb.com ( Patrick Jennings)
+ and jrs@world.std.com (Rick Sladkey) for testing and bugfixes.
+ Mark Salazar <leslie@access.digex.net> made the changes for cards with
+ only 16K packet buffers.
+
+ Things remaining to do:
+ Verify that the tx and rx buffers don't have fencepost errors.
+ Move the theory of operation and memory map documentation.
+ The statistics need to be updated correctly.
+*/
+
+static const char *version =
+ "3c507.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+
+#include <linux/module.h>
+
+/*
+ Sources:
+ This driver wouldn't have been written with the availability of the
+ Crynwr driver source code. It provided a known-working implementation
+ that filled in the gaping holes of the Intel documentation. Three cheers
+ for Russ Nelson.
+
+ Intel Microcommunications Databook, Vol. 1, 1990. It provides just enough
+ info that the casual reader might think that it documents the i82586 :-<.
+*/
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/malloc.h>
+
+
+/* use 0 for production, 1 for verification, 2..7 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+/* A zero-terminated list of common I/O addresses to be probed. */
+static unsigned int netcard_portlist[] =
+ { 0x300, 0x320, 0x340, 0x280, 0};
+
+/*
+ Details of the i82586.
+
+ You'll really need the databook to understand the details of this part,
+ but the outline is that the i82586 has two separate processing units.
+ Both are started from a list of three configuration tables, of which only
+ the last, the System Control Block (SCB), is used after reset-time. The SCB
+ has the following fields:
+ Status word
+ Command word
+ Tx/Command block addr.
+ Rx block addr.
+ The command word accepts the following controls for the Tx and Rx units:
+ */
+
+#define CUC_START 0x0100
+#define CUC_RESUME 0x0200
+#define CUC_SUSPEND 0x0300
+#define RX_START 0x0010
+#define RX_RESUME 0x0020
+#define RX_SUSPEND 0x0030
+
+/* The Rx unit uses a list of frame descriptors and a list of data buffer
+ descriptors. We use full-sized (1518 byte) data buffers, so there is
+ a one-to-one pairing of frame descriptors to buffer descriptors.
+
+ The Tx ("command") unit executes a list of commands that look like:
+ Status word Written by the 82586 when the command is done.
+ Command word Command in lower 3 bits, post-command action in upper 3
+ Link word The address of the next command.
+ Parameters (as needed).
+
+ Some definitions related to the Command Word are:
+ */
+#define CMD_EOL 0x8000 /* The last command of the list, stop. */
+#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
+#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
+
+enum commands {
+ CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
+ CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7};
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct enet_statistics stats;
+ int last_restart;
+ ushort rx_head;
+ ushort rx_tail;
+ ushort tx_head;
+ ushort tx_cmd_link;
+ ushort tx_reap;
+};
+
+/*
+ Details of the EtherLink16 Implementation
+ The 3c507 is a generic shared-memory i82586 implementation.
+ The host can map 16K, 32K, 48K, or 64K of the 64K memory into
+ 0x0[CD][08]0000, or all 64K into 0xF[02468]0000.
+ */
+
+/* Offsets from the base I/O address. */
+#define SA_DATA 0 /* Station address data, or 3Com signature. */
+#define MISC_CTRL 6 /* Switch the SA_DATA banks, and bus config bits. */
+#define RESET_IRQ 10 /* Reset the latched IRQ line. */
+#define SIGNAL_CA 11 /* Frob the 82586 Channel Attention line. */
+#define ROM_CONFIG 13
+#define MEM_CONFIG 14
+#define IRQ_CONFIG 15
+#define EL16_IO_EXTENT 16
+
+/* The ID port is used at boot-time to locate the ethercard. */
+#define ID_PORT 0x100
+
+/* Offsets to registers in the mailbox (SCB). */
+#define iSCB_STATUS 0x8
+#define iSCB_CMD 0xA
+#define iSCB_CBL 0xC /* Command BLock offset. */
+#define iSCB_RFA 0xE /* Rx Frame Area offset. */
+
+/* Since the 3c507 maps the shared memory window so that the last byte is
+ at 82586 address FFFF, the first byte is at 82586 address 0, 16K, 32K, or
+ 48K corresponding to window sizes of 64K, 48K, 32K and 16K respectively.
+ We can account for this be setting the 'SBC Base' entry in the ISCP table
+ below for all the 16 bit offset addresses, and also adding the 'SCB Base'
+ value to all 24 bit physical addresses (in the SCP table and the TX and RX
+ Buffer Descriptors).
+ -Mark
+ */
+#define SCB_BASE ((unsigned)64*1024 - (dev->mem_end - dev->mem_start))
+
+/*
+ What follows in 'init_words[]' is the "program" that is downloaded to the
+ 82586 memory. It's mostly tables and command blocks, and starts at the
+ reset address 0xfffff6. This is designed to be similar to the EtherExpress,
+ thus the unusual location of the SCB at 0x0008.
+
+ Even with the additional "don't care" values, doing it this way takes less
+ program space than initializing the individual tables, and I feel it's much
+ cleaner.
+
+ The databook is particularly useless for the first two structures, I had
+ to use the Crynwr driver as an example.
+
+ The memory setup is as follows:
+ */
+
+#define CONFIG_CMD 0x0018
+#define SET_SA_CMD 0x0024
+#define SA_OFFSET 0x002A
+#define IDLELOOP 0x30
+#define TDR_CMD 0x38
+#define TDR_TIME 0x3C
+#define DUMP_CMD 0x40
+#define DIAG_CMD 0x48
+#define SET_MC_CMD 0x4E
+#define DUMP_DATA 0x56 /* A 170 byte buffer for dump and Set-MC into. */
+
+#define TX_BUF_START 0x0100
+#define NUM_TX_BUFS 4
+#define TX_BUF_SIZE (1518+14+20+16) /* packet+header+TBD */
+
+#define RX_BUF_START 0x2000
+#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */
+#define RX_BUF_END (dev->mem_end - dev->mem_start)
+
+/*
+ That's it: only 86 bytes to set up the beast, including every extra
+ command available. The 170 byte buffer at DUMP_DATA is shared between the
+ Dump command (called only by the diagnostic program) and the SetMulticastList
+ command.
+
+ To complete the memory setup you only have to write the station address at
+ SA_OFFSET and create the Tx & Rx buffer lists.
+
+ The Tx command chain and buffer list is setup as follows:
+ A Tx command table, with the data buffer pointing to...
+ A Tx data buffer descriptor. The packet is in a single buffer, rather than
+ chaining together several smaller buffers.
+ A NoOp command, which initially points to itself,
+ And the packet data.
+
+ A transmit is done by filling in the Tx command table and data buffer,
+ re-writing the NoOp command, and finally changing the offset of the last
+ command to point to the current Tx command. When the Tx command is finished,
+ it jumps to the NoOp, when it loops until the next Tx command changes the
+ "link offset" in the NoOp. This way the 82586 never has to go through the
+ slow restart sequence.
+
+ The Rx buffer list is set up in the obvious ring structure. We have enough
+ memory (and low enough interrupt latency) that we can avoid the complicated
+ Rx buffer linked lists by alway associating a full-size Rx data buffer with
+ each Rx data frame.
+
+ I current use four transmit buffers starting at TX_BUF_START (0x0100), and
+ use the rest of memory, from RX_BUF_START to RX_BUF_END, for Rx buffers.
+
+ */
+
+unsigned short init_words[] = {
+ /* System Configuration Pointer (SCP). */
+ 0x0000, /* Set bus size to 16 bits. */
+ 0,0, /* pad words. */
+ 0x0000,0x0000, /* ISCP phys addr, set in init_82586_mem(). */
+
+ /* Intermediate System Configuration Pointer (ISCP). */
+ 0x0001, /* Status word that's cleared when init is done. */
+ 0x0008,0,0, /* SCB offset, (skip, skip) */
+
+ /* System Control Block (SCB). */
+ 0,0xf000|RX_START|CUC_START, /* SCB status and cmd. */
+ CONFIG_CMD, /* Command list pointer, points to Configure. */
+ RX_BUF_START, /* Rx block list. */
+ 0,0,0,0, /* Error count: CRC, align, buffer, overrun. */
+
+ /* 0x0018: Configure command. Change to put MAC data with packet. */
+ 0, CmdConfigure, /* Status, command. */
+ SET_SA_CMD, /* Next command is Set Station Addr. */
+ 0x0804, /* "4" bytes of config data, 8 byte FIFO. */
+ 0x2e40, /* Magic values, including MAC data location. */
+ 0, /* Unused pad word. */
+
+ /* 0x0024: Setup station address command. */
+ 0, CmdSASetup,
+ SET_MC_CMD, /* Next command. */
+ 0xaa00,0xb000,0x0bad, /* Station address (to be filled in) */
+
+ /* 0x0030: NOP, looping back to itself. Point to first Tx buffer to Tx. */
+ 0, CmdNOp, IDLELOOP, 0 /* pad */,
+
+ /* 0x0038: A unused Time-Domain Reflectometer command. */
+ 0, CmdTDR, IDLELOOP, 0,
+
+ /* 0x0040: An unused Dump State command. */
+ 0, CmdDump, IDLELOOP, DUMP_DATA,
+
+ /* 0x0048: An unused Diagnose command. */
+ 0, CmdDiagnose, IDLELOOP,
+
+ /* 0x004E: An empty set-multicast-list command. */
+ 0, CmdMulticastList, IDLELOOP, 0,
+};
+
+/* Index to functions, as function prototypes. */
+
+extern int el16_probe(struct device *dev); /* Called from Space.c */
+
+static int el16_probe1(struct device *dev, int ioaddr);
+static int el16_open(struct device *dev);
+static int el16_send_packet(struct sk_buff *skb, struct device *dev);
+static void el16_interrupt(int irq, struct pt_regs *regs);
+static void el16_rx(struct device *dev);
+static int el16_close(struct device *dev);
+static struct enet_statistics *el16_get_stats(struct device *dev);
+
+static void hardware_send_packet(struct device *dev, void *buf, short length);
+void init_82586_mem(struct device *dev);
+
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry netcard_drv =
+{"3c507", el16_probe1, EL16_IO_EXTENT, netcard_portlist};
+#endif
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, (detachable devices only) allocate space for the
+ device and return success.
+ */
+int
+el16_probe(struct device *dev)
+{
+ int base_addr = dev ? dev->base_addr : 0;
+ int i;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return el16_probe1(dev, base_addr);
+ else if (base_addr != 0)
+ return ENXIO; /* Don't probe at all. */
+
+ for (i = 0; netcard_portlist[i]; i++) {
+ int ioaddr = netcard_portlist[i];
+ if (check_region(ioaddr, EL16_IO_EXTENT))
+ continue;
+ if (el16_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+
+int el16_probe1(struct device *dev, int ioaddr)
+{
+ static unsigned char init_ID_done = 0, version_printed = 0;
+ int i, irq, irqval;
+
+ if (init_ID_done == 0) {
+ ushort lrs_state = 0xff;
+ /* Send the ID sequence to the ID_PORT to enable the board(s). */
+ outb(0x00, ID_PORT);
+ for(i = 0; i < 255; i++) {
+ outb(lrs_state, ID_PORT);
+ lrs_state <<= 1;
+ if (lrs_state & 0x100)
+ lrs_state ^= 0xe7;
+ }
+ outb(0x00, ID_PORT);
+ init_ID_done = 1;
+ }
+
+ if (inb(ioaddr) == '*' && inb(ioaddr+1) == '3'
+ && inb(ioaddr+2) == 'C' && inb(ioaddr+3) == 'O')
+ ;
+ else
+ return ENODEV;
+
+ /* Allocate a new 'dev' if needed. */
+ if (dev == NULL)
+ dev = init_etherdev(0, sizeof(struct net_local));
+
+ if (net_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("%s: 3c507 at %#x,", dev->name, ioaddr);
+
+ /* We should make a few more checks here, like the first three octets of
+ the S.A. for the manufacturer's code. */
+
+ irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
+
+ irqval = request_irq(irq, &el16_interrupt, 0, "3c507");
+ if (irqval) {
+ printk ("unable to get IRQ %d (irqval=%d).\n", irq, irqval);
+ return EAGAIN;
+ }
+
+ /* We've committed to using the board, and can start filling in *dev. */
+ request_region(ioaddr, EL16_IO_EXTENT, "3c507");
+ dev->base_addr = ioaddr;
+
+ outb(0x01, ioaddr + MISC_CTRL);
+ for (i = 0; i < 6; i++) {
+ dev->dev_addr[i] = inb(ioaddr + i);
+ printk(" %02x", dev->dev_addr[i]);
+ }
+
+ if ((dev->mem_start & 0xf) > 0)
+ net_debug = dev->mem_start & 7;
+
+#ifdef MEM_BASE
+ dev->mem_start = MEM_BASE;
+ dev->mem_end = dev->mem_start + 0x10000;
+#else
+ {
+ int base;
+ int size;
+ char mem_config = inb(ioaddr + MEM_CONFIG);
+ if (mem_config & 0x20) {
+ size = 64*1024;
+ base = 0xf00000 + (mem_config & 0x08 ? 0x080000
+ : ((mem_config & 3) << 17));
+ } else {
+ size = ((mem_config & 3) + 1) << 14;
+ base = 0x0c0000 + ( (mem_config & 0x18) << 12);
+ }
+ dev->mem_start = base;
+ dev->mem_end = base + size;
+ }
+#endif
+
+ dev->if_port = (inb(ioaddr + ROM_CONFIG) & 0x80) ? 1 : 0;
+ dev->irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
+
+ printk(", IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->irq,
+ dev->if_port ? "ex" : "in", dev->mem_start, dev->mem_end-1);
+
+ if (net_debug)
+ printk(version);
+
+ /* Initialize the device structure. */
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ dev->open = el16_open;
+ dev->stop = el16_close;
+ dev->hard_start_xmit = el16_send_packet;
+ dev->get_stats = el16_get_stats;
+
+ ether_setup(dev); /* Generic ethernet behaviour */
+
+ dev->flags&=~IFF_MULTICAST; /* Multicast doesn't work */
+
+ return 0;
+}
+
+
+
+static int
+el16_open(struct device *dev)
+{
+ irq2dev_map[dev->irq] = dev;
+
+ /* Initialize the 82586 memory and start it. */
+ init_82586_mem(dev);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+static int
+el16_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ short *shmem = (short*)dev->mem_start;
+
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+ if (net_debug > 1)
+ printk("%s: transmit timed out, %s? ", dev->name,
+ shmem[iSCB_STATUS>>1] & 0x8000 ? "IRQ conflict" :
+ "network cable problem");
+ /* Try to restart the adaptor. */
+ if (lp->last_restart == lp->stats.tx_packets) {
+ if (net_debug > 1) printk("Resetting board.\n");
+ /* Completely reset the adaptor. */
+ init_82586_mem(dev);
+ } else {
+ /* Issue the channel attention signal and hope it "gets better". */
+ if (net_debug > 1) printk("Kicking board.\n");
+ shmem[iSCB_CMD>>1] = 0xf000|CUC_START|RX_START;
+ outb(0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
+ lp->last_restart = lp->stats.tx_packets;
+ }
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ /* Disable the 82586's input to the interrupt line. */
+ outb(0x80, ioaddr + MISC_CTRL);
+ hardware_send_packet(dev, buf, length);
+ dev->trans_start = jiffies;
+ /* Enable the 82586 interrupt input. */
+ outb(0x84, ioaddr + MISC_CTRL);
+ }
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ /* You might need to clean up and record Tx statistics here. */
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static void
+el16_interrupt(int irq, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct net_local *lp;
+ int ioaddr, status, boguscount = 0;
+ ushort ack_cmd = 0;
+ ushort *shmem;
+
+ if (dev == NULL) {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+ shmem = ((ushort*)dev->mem_start);
+
+ status = shmem[iSCB_STATUS>>1];
+
+ if (net_debug > 4) {
+ printk("%s: 3c507 interrupt, status %4.4x.\n", dev->name, status);
+ }
+
+ /* Disable the 82586's input to the interrupt line. */
+ outb(0x80, ioaddr + MISC_CTRL);
+
+ /* Reap the Tx packet buffers. */
+ while (lp->tx_reap != lp->tx_head) {
+ unsigned short tx_status = shmem[lp->tx_reap>>1];
+
+ if (tx_status == 0) {
+ if (net_debug > 5) printk("Couldn't reap %#x.\n", lp->tx_reap);
+ break;
+ }
+ if (tx_status & 0x2000) {
+ lp->stats.tx_packets++;
+ lp->stats.collisions += tx_status & 0xf;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ } else {
+ lp->stats.tx_errors++;
+ if (tx_status & 0x0600) lp->stats.tx_carrier_errors++;
+ if (tx_status & 0x0100) lp->stats.tx_fifo_errors++;
+ if (!(tx_status & 0x0040)) lp->stats.tx_heartbeat_errors++;
+ if (tx_status & 0x0020) lp->stats.tx_aborted_errors++;
+ }
+ if (net_debug > 5)
+ printk("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status);
+ lp->tx_reap += TX_BUF_SIZE;
+ if (lp->tx_reap > RX_BUF_START - TX_BUF_SIZE)
+ lp->tx_reap = TX_BUF_START;
+ if (++boguscount > 4)
+ break;
+ }
+
+ if (status & 0x4000) { /* Packet received. */
+ if (net_debug > 5)
+ printk("Received packet, rx_head %04x.\n", lp->rx_head);
+ el16_rx(dev);
+ }
+
+ /* Acknowledge the interrupt sources. */
+ ack_cmd = status & 0xf000;
+
+ if ((status & 0x0700) != 0x0200 && dev->start) {
+ if (net_debug)
+ printk("%s: Command unit stopped, status %04x, restarting.\n",
+ dev->name, status);
+ /* If this ever occurs we should really re-write the idle loop, reset
+ the Tx list, and do a complete restart of the command unit.
+ For now we rely on the Tx timeout if the resume doesn't work. */
+ ack_cmd |= CUC_RESUME;
+ }
+
+ if ((status & 0x0070) != 0x0040 && dev->start) {
+ static void init_rx_bufs(struct device *);
+ /* The Rx unit is not ready, it must be hung. Restart the receiver by
+ initializing the rx buffers, and issuing an Rx start command. */
+ if (net_debug)
+ printk("%s: Rx unit stopped, status %04x, restarting.\n",
+ dev->name, status);
+ init_rx_bufs(dev);
+ shmem[iSCB_RFA >> 1] = RX_BUF_START;
+ ack_cmd |= RX_START;
+ }
+
+ shmem[iSCB_CMD>>1] = ack_cmd;
+ outb(0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
+
+ /* Clear the latched interrupt. */
+ outb(0, ioaddr + RESET_IRQ);
+
+ /* Enable the 82586's interrupt input. */
+ outb(0x84, ioaddr + MISC_CTRL);
+
+ return;
+}
+
+static int
+el16_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ ushort *shmem = (short*)dev->mem_start;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* Flush the Tx and disable Rx. */
+ shmem[iSCB_CMD >> 1] = RX_SUSPEND | CUC_SUSPEND;
+ outb(0, ioaddr + SIGNAL_CA);
+
+ /* Disable the 82586's input to the interrupt line. */
+ outb(0x80, ioaddr + MISC_CTRL);
+
+ /* We always physically use the IRQ line, so we don't do free_irq().
+ We do remove ourselves from the map. */
+
+ irq2dev_map[dev->irq] = 0;
+
+ /* Update the statistics here. */
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *
+el16_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ /* ToDo: decide if there are any useful statistics from the SCB. */
+
+ return &lp->stats;
+}
+
+/* Initialize the Rx-block list. */
+static void
+init_rx_bufs(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short *write_ptr;
+ unsigned short SCB_base = SCB_BASE;
+
+ int cur_rxbuf = lp->rx_head = RX_BUF_START;
+
+ /* Initialize each Rx frame + data buffer. */
+ do { /* While there is room for one more. */
+
+ write_ptr = (unsigned short *)(dev->mem_start + cur_rxbuf);
+
+ *write_ptr++ = 0x0000; /* Status */
+ *write_ptr++ = 0x0000; /* Command */
+ *write_ptr++ = cur_rxbuf + RX_BUF_SIZE; /* Link */
+ *write_ptr++ = cur_rxbuf + 22; /* Buffer offset */
+ *write_ptr++ = 0x0000; /* Pad for dest addr. */
+ *write_ptr++ = 0x0000;
+ *write_ptr++ = 0x0000;
+ *write_ptr++ = 0x0000; /* Pad for source addr. */
+ *write_ptr++ = 0x0000;
+ *write_ptr++ = 0x0000;
+ *write_ptr++ = 0x0000; /* Pad for protocol. */
+
+ *write_ptr++ = 0x0000; /* Buffer: Actual count */
+ *write_ptr++ = -1; /* Buffer: Next (none). */
+ *write_ptr++ = cur_rxbuf + 0x20 + SCB_base; /* Buffer: Address low */
+ *write_ptr++ = 0x0000;
+ /* Finally, the number of bytes in the buffer. */
+ *write_ptr++ = 0x8000 + RX_BUF_SIZE-0x20;
+
+ lp->rx_tail = cur_rxbuf;
+ cur_rxbuf += RX_BUF_SIZE;
+ } while (cur_rxbuf <= RX_BUF_END - RX_BUF_SIZE);
+
+ /* Terminate the list by setting the EOL bit, and wrap the pointer to make
+ the list a ring. */
+ write_ptr = (unsigned short *)
+ (dev->mem_start + lp->rx_tail + 2);
+ *write_ptr++ = 0xC000; /* Command, mark as last. */
+ *write_ptr++ = lp->rx_head; /* Link */
+
+}
+
+void
+init_82586_mem(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ ushort *shmem = (short*)dev->mem_start;
+
+ /* Enable loopback to protect the wire while starting up,
+ and hold the 586 in reset during the memory initialization. */
+ outb(0x20, ioaddr + MISC_CTRL);
+
+ /* Fix the ISCP address and base. */
+ init_words[3] = SCB_BASE;
+ init_words[7] = SCB_BASE;
+
+ /* Write the words at 0xfff6 (address-aliased to 0xfffff6). */
+ memcpy((void*)dev->mem_end-10, init_words, 10);
+
+ /* Write the words at 0x0000. */
+ memcpy((char*)dev->mem_start, init_words + 5, sizeof(init_words) - 10);
+
+ /* Fill in the station address. */
+ memcpy((char*)dev->mem_start+SA_OFFSET, dev->dev_addr,
+ sizeof(dev->dev_addr));
+
+ /* The Tx-block list is written as needed. We just set up the values. */
+ lp->tx_cmd_link = IDLELOOP + 4;
+ lp->tx_head = lp->tx_reap = TX_BUF_START;
+
+ init_rx_bufs(dev);
+
+ /* Start the 586 by releasing the reset line, but leave loopback. */
+ outb(0xA0, ioaddr + MISC_CTRL);
+
+ /* This was time consuming to track down: you need to give two channel
+ attention signals to reliably start up the i82586. */
+ outb(0, ioaddr + SIGNAL_CA);
+
+ {
+ int boguscnt = 50;
+ while (shmem[iSCB_STATUS>>1] == 0)
+ if (--boguscnt == 0) {
+ printk("%s: i82586 initialization timed out with status %04x,"
+ "cmd %04x.\n", dev->name,
+ shmem[iSCB_STATUS>>1], shmem[iSCB_CMD>>1]);
+ break;
+ }
+ /* Issue channel-attn -- the 82586 won't start. */
+ outb(0, ioaddr + SIGNAL_CA);
+ }
+
+ /* Disable loopback and enable interrupts. */
+ outb(0x84, ioaddr + MISC_CTRL);
+ if (net_debug > 4)
+ printk("%s: Initialized 82586, status %04x.\n", dev->name,
+ shmem[iSCB_STATUS>>1]);
+ return;
+}
+
+static void
+hardware_send_packet(struct device *dev, void *buf, short length)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ ushort tx_block = lp->tx_head;
+ ushort *write_ptr = (ushort *)(dev->mem_start + tx_block);
+
+ /* Set the write pointer to the Tx block, and put out the header. */
+ *write_ptr++ = 0x0000; /* Tx status */
+ *write_ptr++ = CMD_INTR|CmdTx; /* Tx command */
+ *write_ptr++ = tx_block+16; /* Next command is a NoOp. */
+ *write_ptr++ = tx_block+8; /* Data Buffer offset. */
+
+ /* Output the data buffer descriptor. */
+ *write_ptr++ = length | 0x8000; /* Byte count parameter. */
+ *write_ptr++ = -1; /* No next data buffer. */
+ *write_ptr++ = tx_block+22+SCB_BASE;/* Buffer follows the NoOp command. */
+ *write_ptr++ = 0x0000; /* Buffer address high bits (always zero). */
+
+ /* Output the Loop-back NoOp command. */
+ *write_ptr++ = 0x0000; /* Tx status */
+ *write_ptr++ = CmdNOp; /* Tx command */
+ *write_ptr++ = tx_block+16; /* Next is myself. */
+
+ /* Output the packet at the write pointer. */
+ memcpy(write_ptr, buf, length);
+
+ /* Set the old command link pointing to this send packet. */
+ *(ushort*)(dev->mem_start + lp->tx_cmd_link) = tx_block;
+ lp->tx_cmd_link = tx_block + 20;
+
+ /* Set the next free tx region. */
+ lp->tx_head = tx_block + TX_BUF_SIZE;
+ if (lp->tx_head > RX_BUF_START - TX_BUF_SIZE)
+ lp->tx_head = TX_BUF_START;
+
+ if (net_debug > 4) {
+ printk("%s: 3c507 @%x send length = %d, tx_block %3x, next %3x.\n",
+ dev->name, ioaddr, length, tx_block, lp->tx_head);
+ }
+
+ if (lp->tx_head != lp->tx_reap)
+ dev->tbusy = 0;
+}
+
+static void
+el16_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short *shmem = (short*)dev->mem_start;
+ ushort rx_head = lp->rx_head;
+ ushort rx_tail = lp->rx_tail;
+ ushort boguscount = 10;
+ short frame_status;
+
+ while ((frame_status = shmem[rx_head>>1]) < 0) { /* Command complete */
+ ushort *read_frame = (short *)(dev->mem_start + rx_head);
+ ushort rfd_cmd = read_frame[1];
+ ushort next_rx_frame = read_frame[2];
+ ushort data_buffer_addr = read_frame[3];
+ ushort *data_frame = (short *)(dev->mem_start + data_buffer_addr);
+ ushort pkt_len = data_frame[0];
+
+ if (rfd_cmd != 0 || data_buffer_addr != rx_head + 22
+ || (pkt_len & 0xC000) != 0xC000) {
+ printk("%s: Rx frame at %#x corrupted, status %04x cmd %04x"
+ "next %04x data-buf @%04x %04x.\n", dev->name, rx_head,
+ frame_status, rfd_cmd, next_rx_frame, data_buffer_addr,
+ pkt_len);
+ } else if ((frame_status & 0x2000) == 0) {
+ /* Frame Rxed, but with error. */
+ lp->stats.rx_errors++;
+ if (frame_status & 0x0800) lp->stats.rx_crc_errors++;
+ if (frame_status & 0x0400) lp->stats.rx_frame_errors++;
+ if (frame_status & 0x0200) lp->stats.rx_fifo_errors++;
+ if (frame_status & 0x0100) lp->stats.rx_over_errors++;
+ if (frame_status & 0x0080) lp->stats.rx_length_errors++;
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ pkt_len &= 0x3fff;
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+
+ skb_reserve(skb,2);
+ skb->dev = dev;
+
+ /* 'skb->data' points to the start of sk_buff data area. */
+ memcpy(skb_put(skb,pkt_len), data_frame + 5, pkt_len);
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+
+ /* Clear the status word and set End-of-List on the rx frame. */
+ read_frame[0] = 0;
+ read_frame[1] = 0xC000;
+ /* Clear the end-of-list on the prev. RFD. */
+ *(short*)(dev->mem_start + rx_tail + 2) = 0x0000;
+
+ rx_tail = rx_head;
+ rx_head = next_rx_frame;
+ if (--boguscount == 0)
+ break;
+ }
+
+ lp->rx_head = rx_head;
+ lp->rx_tail = rx_tail;
+}
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device dev_3c507 = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, el16_probe
+};
+
+static int io = 0x300;
+static int irq = 0;
+
+int init_module(void)
+{
+ if (io == 0)
+ printk("3c507: You should not use auto-probing with insmod!\n");
+ dev_3c507.base_addr = io;
+ dev_3c507.irq = irq;
+ if (register_netdev(&dev_3c507) != 0) {
+ printk("3c507: register_netdev() returned non-zero.\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_3c507);
+ kfree(dev_3c507.priv);
+ dev_3c507.priv = NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ free_irq(dev_3c507.irq);
+ release_region(dev_3c507.base_addr, EL16_IO_EXTENT);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -I/usr/src/linux/drivers/net -Wall -Wstrict-prototypes -O6 -m486 -c 3c507.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/3c509.c b/i386/i386at/gpl/linux/net/3c509.c
new file mode 100644
index 00000000..5e7dce4f
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/3c509.c
@@ -0,0 +1,739 @@
+/* 3c509.c: A 3c509 EtherLink3 ethernet driver for linux. */
+/*
+ Written 1993,1994 by Donald Becker.
+
+ Copyright 1994 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU Public License,
+ incorporated herein by reference.
+
+ This driver is for the 3Com EtherLinkIII series.
+
+ The author may be reached as becker@cesdis.gsfc.nasa.gov or
+ C/O Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ Known limitations:
+ Because of the way 3c509 ISA detection works it's difficult to predict
+ a priori which of several ISA-mode cards will be detected first.
+
+ This driver does not use predictive interrupt mode, resulting in higher
+ packet latency but lower overhead. If interrupts are disabled for an
+ unusually long time it could also result in missed packets, but in
+ practice this rarely happens.
+*/
+
+static const char *version = "3c509.c:1.03 10/8/94 becker@cesdis.gsfc.nasa.gov\n";
+
+#include <linux/module.h>
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/config.h> /* for CONFIG_MCA */
+
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+
+#ifdef EL3_DEBUG
+int el3_debug = EL3_DEBUG;
+#else
+int el3_debug = 2;
+#endif
+
+/* To minimize the size of the driver source I only define operating
+ constants if they are used several times. You'll need the manual
+ if you want to understand driver details. */
+/* Offsets from base I/O address. */
+#define EL3_DATA 0x00
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+#define ID_PORT 0x100
+#define EEPROM_READ 0x80
+
+#define EL3_IO_EXTENT 16
+
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable. */
+enum c509cmd {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
+ TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrMask = 14<<11,
+ SetReadZero = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11,};
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
+
+/* Register window 1 offsets, the window used in normal operation. */
+#define TX_FIFO 0x00
+#define RX_FIFO 0x00
+#define RX_STATUS 0x08
+#define TX_STATUS 0x0B
+#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
+
+#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
+#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */
+#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
+
+struct el3_private {
+ struct enet_statistics stats;
+};
+
+static ushort id_read_eeprom(int index);
+static ushort read_eeprom(short ioaddr, int index);
+static int el3_open(struct device *dev);
+static int el3_start_xmit(struct sk_buff *skb, struct device *dev);
+static void el3_interrupt(int irq, struct pt_regs *regs);
+static void update_stats(int addr, struct device *dev);
+static struct enet_statistics *el3_get_stats(struct device *dev);
+static int el3_rx(struct device *dev);
+static int el3_close(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+
+
+int el3_probe(struct device *dev)
+{
+ short lrs_state = 0xff, i;
+ ushort ioaddr, irq, if_port;
+ short *phys_addr = (short *)dev->dev_addr;
+ static int current_tag = 0;
+
+ /* First check all slots of the EISA bus. The next slot address to
+ probe is kept in 'eisa_addr' to support multiple probe() calls. */
+ if (EISA_bus) {
+ static int eisa_addr = 0x1000;
+ while (eisa_addr < 0x9000) {
+ ioaddr = eisa_addr;
+ eisa_addr += 0x1000;
+
+ /* Check the standard EISA ID register for an encoded '3Com'. */
+ if (inw(ioaddr + 0xC80) != 0x6d50)
+ continue;
+
+ /* Change the register set to the configuration window 0. */
+ outw(SelectWindow | 0, ioaddr + 0xC80 + EL3_CMD);
+
+ irq = inw(ioaddr + WN0_IRQ) >> 12;
+ if_port = inw(ioaddr + 6)>>14;
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+
+ /* Restore the "Product ID" to the EEPROM read register. */
+ read_eeprom(ioaddr, 3);
+
+ /* Was the EISA code an add-on hack? Nahhhhh... */
+ goto found;
+ }
+ }
+
+#ifdef CONFIG_MCA
+ if (MCA_bus) {
+ mca_adaptor_select_mode(1);
+ for (i = 0; i < 8; i++)
+ if ((mca_adaptor_id(i) | 1) == 0x627c) {
+ ioaddr = mca_pos_base_addr(i);
+ irq = inw(ioaddr + WN0_IRQ) >> 12;
+ if_port = inw(ioaddr + 6)>>14;
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+
+ mca_adaptor_select_mode(0);
+ goto found;
+ }
+ mca_adaptor_select_mode(0);
+
+ }
+#endif
+
+ /* Next check for all ISA bus boards by sending the ID sequence to the
+ ID_PORT. We find cards past the first by setting the 'current_tag'
+ on cards as they are found. Cards with their tag set will not
+ respond to subsequent ID sequences. */
+
+ if (check_region(ID_PORT,1)) {
+ static int once = 1;
+ if (once) printk("3c509: Somebody has reserved 0x%x, can't do ID_PORT lookup, nor card auto-probing\n",ID_PORT);
+ once = 0;
+ return -ENODEV;
+ }
+
+ outb(0x00, ID_PORT);
+ outb(0x00, ID_PORT);
+ for(i = 0; i < 255; i++) {
+ outb(lrs_state, ID_PORT);
+ lrs_state <<= 1;
+ lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state;
+ }
+
+ /* For the first probe, clear all board's tag registers. */
+ if (current_tag == 0)
+ outb(0xd0, ID_PORT);
+ else /* Otherwise kill off already-found boards. */
+ outb(0xd8, ID_PORT);
+
+ if (id_read_eeprom(7) != 0x6d50) {
+ return -ENODEV;
+ }
+
+ /* Read in EEPROM data, which does contention-select.
+ Only the lowest address board will stay "on-line".
+ 3Com got the byte order backwards. */
+ for (i = 0; i < 3; i++) {
+ phys_addr[i] = htons(id_read_eeprom(i));
+ }
+
+ {
+ unsigned short iobase = id_read_eeprom(8);
+ if_port = iobase >> 14;
+ ioaddr = 0x200 + ((iobase & 0x1f) << 4);
+ }
+ irq = id_read_eeprom(9) >> 12;
+
+ if (dev->base_addr != 0
+ && dev->base_addr != (unsigned short)ioaddr) {
+ return -ENODEV;
+ }
+
+ /* Set the adaptor tag so that the next card can be found. */
+ outb(0xd0 + ++current_tag, ID_PORT);
+
+ /* Activate the adaptor at the EEPROM location. */
+ outb(0xff, ID_PORT);
+
+ EL3WINDOW(0);
+ if (inw(ioaddr) != 0x6d50)
+ return -ENODEV;
+
+ /* Free the interrupt so that some other card can use it. */
+ outw(0x0f00, ioaddr + WN0_IRQ);
+ found:
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->if_port = if_port;
+ request_region(dev->base_addr, EL3_IO_EXTENT, "3c509");
+
+ {
+ const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"};
+ printk("%s: 3c509 at %#3.3lx tag %d, %s port, address ",
+ dev->name, dev->base_addr, current_tag, if_names[dev->if_port]);
+ }
+
+ /* Read in the station address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i]);
+ printk(", IRQ %d.\n", dev->irq);
+
+ /* Make up a EL3-specific-data structure. */
+ dev->priv = kmalloc(sizeof(struct el3_private), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct el3_private));
+
+ if (el3_debug > 0)
+ printk(version);
+
+ /* The EL3-specific entries in the device structure. */
+ dev->open = &el3_open;
+ dev->hard_start_xmit = &el3_start_xmit;
+ dev->stop = &el3_close;
+ dev->get_stats = &el3_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /* Fill in the generic fields of the device structure. */
+ ether_setup(dev);
+ return 0;
+}
+
+/* Read a word from the EEPROM using the regular EEPROM access register.
+ Assume that we are in register window zero.
+ */
+static ushort read_eeprom(short ioaddr, int index)
+{
+ int timer;
+
+ outw(EEPROM_READ + index, ioaddr + 10);
+ /* Pause for at least 162 us. for the read to take place. */
+ for (timer = 0; timer < 162*4 + 400; timer++)
+ SLOW_DOWN_IO;
+ return inw(ioaddr + 12);
+}
+
+/* Read a word from the EEPROM when in the ISA ID probe state. */
+static ushort id_read_eeprom(int index)
+{
+ int timer, bit, word = 0;
+
+ /* Issue read command, and pause for at least 162 us. for it to complete.
+ Assume extra-fast 16Mhz bus. */
+ outb(EEPROM_READ + index, ID_PORT);
+
+ /* This should really be done by looking at one of the timer channels. */
+ for (timer = 0; timer < 162*4 + 400; timer++)
+ SLOW_DOWN_IO;
+
+ for (bit = 15; bit >= 0; bit--)
+ word = (word << 1) + (inb(ID_PORT) & 0x01);
+
+ if (el3_debug > 3)
+ printk(" 3c509 EEPROM word %d %#4.4x.\n", index, word);
+
+ return word;
+}
+
+
+
+static int
+el3_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ int i;
+
+ outw(TxReset, ioaddr + EL3_CMD);
+ outw(RxReset, ioaddr + EL3_CMD);
+ outw(SetReadZero | 0x00, ioaddr + EL3_CMD);
+
+ if (request_irq(dev->irq, &el3_interrupt, 0, "3c509")) {
+ return -EAGAIN;
+ }
+
+ EL3WINDOW(0);
+ if (el3_debug > 3)
+ printk("%s: Opening, IRQ %d status@%x %4.4x.\n", dev->name,
+ dev->irq, ioaddr + EL3_STATUS, inw(ioaddr + EL3_STATUS));
+
+ /* Activate board: this is probably unnecessary. */
+ outw(0x0001, ioaddr + 4);
+
+ irq2dev_map[dev->irq] = dev;
+
+ /* Set the IRQ line. */
+ outw((dev->irq << 12) | 0x0f00, ioaddr + WN0_IRQ);
+
+ /* Set the station address in window 2 each time opened. */
+ EL3WINDOW(2);
+
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+
+ if (dev->if_port == 3)
+ /* Start the thinnet transceiver. We should really wait 50ms...*/
+ outw(StartCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 0) {
+ /* 10baseT interface, enabled link beat and jabber check. */
+ EL3WINDOW(4);
+ outw(inw(ioaddr + WN4_MEDIA) | MEDIA_TP, ioaddr + WN4_MEDIA);
+ }
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 9; i++)
+ inb(ioaddr + i);
+ inb(ioaddr + 10);
+ inb(ioaddr + 12);
+
+ /* Switch to register set 1 for normal use. */
+ EL3WINDOW(1);
+
+ /* Accept b-case and phys addr only. */
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+
+ dev->interrupt = 0;
+ dev->tbusy = 0;
+ dev->start = 1;
+
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetReadZero | 0xff, ioaddr + EL3_CMD);
+ outw(AckIntr | 0x69, ioaddr + EL3_CMD); /* Ack IRQ */
+ outw(SetIntrMask | 0x98, ioaddr + EL3_CMD); /* Set interrupt mask. */
+
+ if (el3_debug > 3)
+ printk("%s: Opened 3c509 IRQ %d status %4.4x.\n",
+ dev->name, dev->irq, inw(ioaddr + EL3_STATUS));
+
+ MOD_INC_USE_COUNT;
+ return 0; /* Always succeed */
+}
+
+static int
+el3_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 10)
+ return 1;
+ printk("%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
+ dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS));
+ dev->trans_start = jiffies;
+ /* Issue TX_RESET and TX_START commands. */
+ outw(TxReset, ioaddr + EL3_CMD);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->tbusy = 0;
+ }
+
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ if (skb->len <= 0)
+ return 0;
+
+ if (el3_debug > 4) {
+ printk("%s: el3_start_xmit(length = %ld) called, status %4.4x.\n",
+ dev->name, skb->len, inw(ioaddr + EL3_STATUS));
+ }
+#ifndef final_version
+ { /* Error-checking code, delete for 1.30. */
+ ushort status = inw(ioaddr + EL3_STATUS);
+ if (status & 0x0001 /* IRQ line active, missed one. */
+ && inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */
+ printk("%s: Missed interrupt, status then %04x now %04x"
+ " Tx %2.2x Rx %4.4x.\n", dev->name, status,
+ inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS),
+ inw(ioaddr + RX_STATUS));
+ /* Fake interrupt trigger by masking, acknowledge interrupts. */
+ outw(SetReadZero | 0x00, ioaddr + EL3_CMD);
+ outw(AckIntr | 0x69, ioaddr + EL3_CMD); /* Ack IRQ */
+ outw(SetReadZero | 0xff, ioaddr + EL3_CMD);
+ }
+ }
+#endif
+
+ /* Avoid timer-based retransmission conflicts. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ /* Put out the doubleword header... */
+ outw(skb->len, ioaddr + TX_FIFO);
+ outw(0x00, ioaddr + TX_FIFO);
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+
+ dev->trans_start = jiffies;
+ if (inw(ioaddr + TX_FREE) > 1536) {
+ dev->tbusy = 0;
+ } else
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
+ }
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ /* Clear the Tx status stack. */
+ {
+ short tx_status;
+ int i = 4;
+
+ while (--i > 0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
+ if (tx_status & 0x38) lp->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
+ if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
+ outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
+ }
+ }
+ return 0;
+}
+
+/* The EL3 interrupt handler. */
+static void
+el3_interrupt(int irq, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ int ioaddr, status;
+ int i = 0;
+
+ if (dev == NULL) {
+ printk ("el3_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ status = inw(ioaddr + EL3_STATUS);
+
+ if (el3_debug > 4)
+ printk("%s: interrupt, status %4.4x.\n", dev->name, status);
+
+ while ((status = inw(ioaddr + EL3_STATUS)) & 0x91) {
+
+ if (status & 0x10)
+ el3_rx(dev);
+
+ if (status & 0x08) {
+ if (el3_debug > 5)
+ printk(" TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | 0x08, ioaddr + EL3_CMD);
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ if (status & 0x80) /* Statistics full. */
+ update_stats(ioaddr, dev);
+
+ if (++i > 10) {
+ printk("%s: Infinite loop in interrupt, status %4.4x.\n",
+ dev->name, status);
+ /* Clear all interrupts. */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | 0x41, ioaddr + EL3_CMD); /* Ack IRQ */
+
+ }
+
+ if (el3_debug > 4) {
+ printk("%s: exiting interrupt, status %4.4x.\n", dev->name,
+ inw(ioaddr + EL3_STATUS));
+ }
+
+ dev->interrupt = 0;
+ return;
+}
+
+
+static struct enet_statistics *
+el3_get_stats(struct device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ update_stats(dev->base_addr, dev);
+ restore_flags(flags);
+ return &lp->stats;
+}
+
+/* Update statistics. We change to register window 6, so this should be run
+ single-threaded if the device is active. This is expected to be a rare
+ operation, and it's simpler for the rest of the driver to assume that
+ window 1 is always valid rather than use a special window-state variable.
+ */
+static void update_stats(int ioaddr, struct device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+
+ if (el3_debug > 5)
+ printk(" Updating the statistics.\n");
+ /* Turn off statistics updates while reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ lp->stats.tx_carrier_errors += inb(ioaddr + 0);
+ lp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ lp->stats.collisions += inb(ioaddr + 3);
+ lp->stats.tx_window_errors += inb(ioaddr + 4);
+ lp->stats.rx_fifo_errors += inb(ioaddr + 5);
+ lp->stats.tx_packets += inb(ioaddr + 6);
+ /* Rx packets */ inb(ioaddr + 7);
+ /* Tx deferrals */ inb(ioaddr + 8);
+ inw(ioaddr + 10); /* Total Rx and Tx octets. */
+ inw(ioaddr + 12);
+
+ /* Back to window 1, and turn statistics back on. */
+ EL3WINDOW(1);
+ outw(StatsEnable, ioaddr + EL3_CMD);
+ return;
+}
+
+static int
+el3_rx(struct device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ short rx_status;
+
+ if (el3_debug > 5)
+ printk(" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
+ while ((rx_status = inw(ioaddr + RX_STATUS)) > 0) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ short error = rx_status & 0x3800;
+ lp->stats.rx_errors++;
+ switch (error) {
+ case 0x0000: lp->stats.rx_over_errors++; break;
+ case 0x0800: lp->stats.rx_length_errors++; break;
+ case 0x1000: lp->stats.rx_frame_errors++; break;
+ case 0x1800: lp->stats.rx_length_errors++; break;
+ case 0x2000: lp->stats.rx_frame_errors++; break;
+ case 0x2800: lp->stats.rx_crc_errors++; break;
+ }
+ } else {
+ short pkt_len = rx_status & 0x7ff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+5);
+ if (el3_debug > 4)
+ printk("Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb,2); /* Align IP on 16 byte boundaries */
+
+ /* 'skb->data' points to the start of sk_buff data area. */
+ insl(ioaddr+RX_FIFO, skb_put(skb,pkt_len),
+ (pkt_len + 3) >> 2);
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+ lp->stats.rx_packets++;
+ continue;
+ } else if (el3_debug)
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ }
+ lp->stats.rx_dropped++;
+ outw(RxDiscard, ioaddr + EL3_CMD);
+ while (inw(ioaddr + EL3_STATUS) & 0x1000)
+ printk(" Waiting for 3c509 to discard packet, status %x.\n",
+ inw(ioaddr + EL3_STATUS) );
+ }
+
+ return 0;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+ if (el3_debug > 1) {
+ static int old = 0;
+ if (old != dev->mc_count) {
+ old = dev->mc_count;
+ printk("%s: Setting Rx mode to %d addresses.\n", dev->name, dev->mc_count);
+ }
+ }
+ if (dev->flags&IFF_PROMISC)
+ {
+ outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
+ ioaddr + EL3_CMD);
+ }
+ else if (dev->mc_count || (dev->flags&IFF_ALLMULTI))
+ {
+ outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
+ }
+ else
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+}
+
+static int
+el3_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (el3_debug > 2)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* Turn off statistics ASAP. We update lp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == 3)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 0) {
+ /* Disable link beat and jabber, if_port may change ere next open(). */
+ EL3WINDOW(4);
+ outw(inw(ioaddr + WN4_MEDIA) & ~MEDIA_TP, ioaddr + WN4_MEDIA);
+ }
+
+ free_irq(dev->irq);
+ /* Switching back to window 0 disables the IRQ. */
+ EL3WINDOW(0);
+ /* But we explicitly zero the IRQ line select anyway. */
+ outw(0x0f00, ioaddr + WN0_IRQ);
+
+
+ irq2dev_map[dev->irq] = 0;
+
+ update_stats(ioaddr, dev);
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device dev_3c509 = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, el3_probe };
+
+static int io = 0;
+static int irq = 0;
+
+int
+init_module(void)
+{
+ dev_3c509.base_addr = io;
+ dev_3c509.irq = irq;
+ if (!EISA_bus) {
+ printk("3c509: WARNING! Module load-time probing works reliably only for EISA-bus!\n");
+ }
+ if (register_netdev(&dev_3c509) != 0)
+ return -EIO;
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_3c509);
+ kfree_s(dev_3c509.priv,sizeof(struct el3_private));
+ dev_3c509.priv=NULL;
+ /* If we don't do this, we can't re-insmod it later. */
+ release_region(dev_3c509.base_addr, EL3_IO_EXTENT);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c 3c509.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/3c59x.c b/i386/i386at/gpl/linux/net/3c59x.c
new file mode 100644
index 00000000..b5c4d5b7
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/3c59x.c
@@ -0,0 +1,1066 @@
+/* 3c59x.c: A 3Com 3c590/3c595 "Vortex" ethernet driver for linux. */
+/*
+ Written 1995 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ This driver is for the 3Com "Vortex" series ethercards. Members of
+ the series include the 3c590 PCI EtherLink III and 3c595-Tx PCI Fast
+ EtherLink. It also works with the 10Mbs-only 3c590 PCI EtherLink III.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+*/
+
+static char *version = "3c59x.c:v0.13 2/13/96 becker@cesdis.gsfc.nasa.gov\n";
+
+/* "Knobs" that turn on special features. */
+/* Allow the use of bus master transfers instead of programmed-I/O for the
+ Tx process. Bus master transfers are always disabled by default, but
+ iff this is set they may be turned on using 'options'. */
+#define VORTEX_BUS_MASTER
+
+/* Put out somewhat more debugging messages. (0 - no msg, 1 minimal msgs). */
+#define VORTEX_DEBUG 1
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <linux/timer.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#ifdef HAVE_SHARED_IRQ
+#define USE_SHARED_IRQ
+#include <linux/shared_irq.h>
+#endif
+
+/* The total size is twice that of the original EtherLinkIII series: the
+ runtime register window, window 1, is now always mapped in. */
+#define VORTEX_TOTAL_SIZE 0x20
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry tc59x_drv =
+{"Vortex", vortex_pci_probe, VORTEX_TOTAL_SIZE, NULL};
+#endif
+
+#ifdef VORTEX_DEBUG
+int vortex_debug = VORTEX_DEBUG;
+#else
+int vortex_debug = 1;
+#endif
+
+static int product_ids[] = {0x5900, 0x5950, 0x5951, 0x5952, 0, 0};
+static const char *product_names[] = {
+ "3c590 Vortex 10Mbps",
+ "3c595 Vortex 100baseTX",
+ "3c595 Vortex 100baseT4",
+ "3c595 Vortex 100base-MII",
+ "EISA Vortex 3c597",
+};
+#define DEMON_INDEX 5 /* Caution! Must be consistent with above! */
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the 3Com FastEtherLink, 3Com's PCI to
+10/100baseT adapter. It also works with the 3c590, a similar product
+with only a 10Mbs interface.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS should be set to assign the
+PCI INTA signal to an otherwise unused system IRQ line. While it's
+physically possible to shared PCI interrupt lines, the 1.2.0 kernel doesn't
+support it.
+
+III. Driver operation
+
+The 3c59x series use an interface that's very similar to the previous 3c5x9
+series. The primary interface is two programmed-I/O FIFOs, with an
+alternate single-contiguous-region bus-master transfer (see next).
+
+One extension that is advertised in a very large font is that the adapters
+are capable of being bus masters. Unfortunately this capability is only for
+a single contiguous region making it less useful than the list of transfer
+regions available with the DEC Tulip or AMD PCnet. Given the significant
+performance impact of taking an extra interrupt for each transfer, using
+DMA transfers is a win only with large blocks.
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+IV. Notes
+
+Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing both
+3c590 and 3c595 boards.
+The name "Vortex" is the internal 3Com project name for the PCI ASIC, and
+the not-yet-released (3/95) EISA version is called "Demon". According to
+Terry these names come from rides at the local amusement park.
+
+The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes!
+This driver only supports ethernet packets because of the skbuff allocation
+limit of 4K.
+*/
+
+#define TCOM_VENDOR_ID 0x10B7 /* 3Com's manufacturer's ID. */
+
+/* Operational defintions.
+ These are not used by other compilation units and thus are not
+ exported in a ".h" file.
+
+ First the windows. There are eight register windows, with the command
+ and status registers available in each.
+ */
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable.
+ Note that 11 parameters bits was fine for ethernet, but the new chip
+ can handle FDDI lenght frames (~4500 octets) and now parameters count
+ 32-bit 'Dwords' rather than octets. */
+
+enum vortex_cmd {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
+ TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11,
+ StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11,};
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
+
+/* Bits in the general status register. */
+enum vortex_status {
+ IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080, DMADone = 1<<8,
+ DMAInProgress = 1<<11, /* DMA controller is still busy.*/
+ CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/
+};
+
+/* Register window 1 offsets, the window used in normal operation.
+ On the Vortex this window is always mapped at offsets 0x10-0x1f. */
+enum Window1 {
+ TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14,
+ RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B,
+ TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
+};
+enum Window0 {
+ Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */
+};
+enum Win0_EEPROM_bits {
+ EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
+ EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */
+ EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */
+};
+/* EEPROM locations. */
+enum eeprom_offset {
+ PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
+ EtherLink3ID=7, IFXcvrIO=8, IRQLine=9,
+ NodeAddr01=10, NodeAddr23=11, NodeAddr45=12,
+ DriverTune=13, Checksum=15};
+
+enum Window3 { /* Window 3: MAC/config bits. */
+ Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8,
+};
+union wn3_config {
+ int i;
+ struct w3_config_fields {
+ unsigned int ram_size:3, ram_width:1, ram_speed:2, rom_size:2;
+ int pad8:8;
+ unsigned int ram_split:2, pad18:2, xcvr:3, pad21:1, autoselect:1;
+ int pad24:8;
+ } u;
+};
+
+enum Window4 {
+ Wn4_Media = 0x0A, /* Window 4: Various transcvr/media bits. */
+};
+enum Win4_Media_bits {
+ Media_TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */
+};
+enum Window7 { /* Window 7: Bus Master control. */
+ Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12,
+};
+
+struct vortex_private {
+ char devname[8]; /* "ethN" string, also for kernel debug. */
+ const char *product_name;
+ struct device *next_module;
+ struct enet_statistics stats;
+#ifdef VORTEX_BUS_MASTER
+ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
+#endif
+ struct timer_list timer; /* Media selection timer. */
+ int options; /* User-settable driver options (none yet). */
+ unsigned int media_override:3, full_duplex:1, bus_master:1, autoselect:1;
+};
+
+static char *if_names[] = {
+ "10baseT", "10Mbs AUI", "undefined", "10base2",
+ "100baseTX", "100baseFX", "MII", "undefined"};
+
+static int vortex_scan(struct device *dev);
+static int vortex_found_device(struct device *dev, int ioaddr, int irq,
+ int product_index, int options);
+static int vortex_probe1(struct device *dev);
+static int vortex_open(struct device *dev);
+static void vortex_timer(unsigned long arg);
+static int vortex_start_xmit(struct sk_buff *skb, struct device *dev);
+static int vortex_rx(struct device *dev);
+static void vortex_interrupt(int irq, struct pt_regs *regs);
+static int vortex_close(struct device *dev);
+static void update_stats(int addr, struct device *dev);
+static struct enet_statistics *vortex_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+
+/* Unlike the other PCI cards the 59x cards don't need a large contiguous
+ memory region, so making the driver a loadable module is feasible.
+
+ Unfortuneately maximizing the shared code between the integrated and
+ module version of the driver results in a complicated set of initialization
+ procedures.
+ init_module() -- modules / tc59x_init() -- built-in
+ The wrappers for vortex_scan()
+ vortex_scan() The common routine that scans for PCI and EISA cards
+ vortex_found_device() Allocate a device structure when we find a card.
+ Different versions exist for modules and built-in.
+ vortex_probe1() Fill in the device structure -- this is seperated
+ so that the modules code can put it in dev->init.
+*/
+/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
+/* Note: this is the only limit on the number of cards supported!! */
+int options[8] = { -1, -1, -1, -1, -1, -1, -1, -1,};
+
+#ifdef MODULE
+static int debug = -1;
+/* A list of all installed Vortex devices, for removing the driver module. */
+static struct device *root_vortex_dev = NULL;
+
+int
+init_module(void)
+{
+ int cards_found;
+
+ if (debug >= 0)
+ vortex_debug = debug;
+ if (vortex_debug)
+ printk(version);
+
+ root_vortex_dev = NULL;
+ cards_found = vortex_scan(0);
+ return cards_found < 0 ? cards_found : 0;
+}
+
+#else
+unsigned long tc59x_probe(struct device *dev)
+{
+ int cards_found = 0;
+
+ cards_found = vortex_scan(dev);
+
+ if (vortex_debug > 0 && cards_found)
+ printk(version);
+
+ return cards_found ? 0 : -ENODEV;
+}
+#endif /* not MODULE */
+
+static int vortex_scan(struct device *dev)
+{
+ int cards_found = 0;
+
+ if (pcibios_present()) {
+ static int pci_index = 0;
+ for (; pci_index < 8; pci_index++) {
+ unsigned char pci_bus, pci_device_fn, pci_irq_line, pci_latency;
+ unsigned int pci_ioaddr;
+ unsigned short pci_command;
+ int index;
+
+ for (index = 0; product_ids[index]; index++) {
+ if ( ! pcibios_find_device(TCOM_VENDOR_ID, product_ids[index],
+ pci_index, &pci_bus,
+ &pci_device_fn))
+ break;
+ }
+ if ( ! product_ids[index])
+ break;
+
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &pci_irq_line);
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &pci_ioaddr);
+ /* Remove I/O space marker in bit 0. */
+ pci_ioaddr &= ~3;
+
+#ifdef VORTEX_BUS_MASTER
+ /* Get and check the bus-master and latency values.
+ Some PCI BIOSes fail to set the master-enable bit, and
+ the latency timer must be set to the maximum value to avoid
+ data corruption that occurs when the timer expires during
+ a transfer. Yes, it's a bug. */
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command);
+ if ( ! (pci_command & PCI_COMMAND_MASTER)) {
+ printk(" PCI Master Bit has not been set! Setting...\n");
+ pci_command |= PCI_COMMAND_MASTER;
+ pcibios_write_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, pci_command);
+ }
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_LATENCY_TIMER, &pci_latency);
+ if (pci_latency != 255) {
+ printk(" Overriding PCI latency timer (CFLT) setting of %d, new value is 255.\n", pci_latency);
+ pcibios_write_config_byte(pci_bus, pci_device_fn,
+ PCI_LATENCY_TIMER, 255);
+ }
+#endif /* VORTEX_BUS_MASTER */
+ vortex_found_device(dev, pci_ioaddr, pci_irq_line, index,
+ dev && dev->mem_start ? dev->mem_start
+ : options[cards_found]);
+ dev = 0;
+ cards_found++;
+ }
+ }
+
+ /* Now check all slots of the EISA bus. */
+ if (EISA_bus) {
+ static int ioaddr = 0x1000;
+ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
+ /* Check the standard EISA ID register for an encoded '3Com'. */
+ if (inw(ioaddr + 0xC80) != 0x6d50)
+ continue;
+ /* Check for a product that we support. */
+ if ((inw(ioaddr + 0xC82) & 0xFFF0) != 0x5970
+ && (inw(ioaddr + 0xC82) & 0xFFF0) != 0x5920)
+ continue;
+ vortex_found_device(dev, ioaddr, inw(ioaddr + 0xC88) >> 12,
+ DEMON_INDEX, dev && dev->mem_start
+ ? dev->mem_start : options[cards_found]);
+ dev = 0;
+ cards_found++;
+ }
+ }
+
+ return cards_found;
+}
+
+static int vortex_found_device(struct device *dev, int ioaddr, int irq,
+ int product_index, int options)
+{
+ struct vortex_private *vp;
+
+#ifdef MODULE
+ /* Allocate and fill new device structure. */
+ int dev_size = sizeof(struct device) +
+ sizeof(struct vortex_private);
+
+ dev = (struct device *) kmalloc(dev_size, GFP_KERNEL);
+ memset(dev, 0, dev_size);
+ dev->priv = ((void *)dev) + sizeof(struct device);
+ vp = (struct vortex_private *)dev->priv;
+ dev->name = vp->devname; /* An empty string. */
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->init = vortex_probe1;
+ vp->product_name = product_names[product_index];
+ vp->options = options;
+ if (options >= 0) {
+ vp->media_override = ((options & 7) == 2) ? 0 : options & 7;
+ vp->full_duplex = (options & 8) ? 1 : 0;
+ vp->bus_master = (options & 16) ? 1 : 0;
+ } else {
+ vp->media_override = 7;
+ vp->full_duplex = 0;
+ vp->bus_master = 0;
+ }
+ ether_setup(dev);
+ vp->next_module = root_vortex_dev;
+ root_vortex_dev = dev;
+ if (register_netdev(dev) != 0)
+ return -EIO;
+#else /* not a MODULE */
+ if (dev) {
+ dev->priv = kmalloc(sizeof (struct vortex_private), GFP_KERNEL);
+ memset(dev->priv, 0, sizeof (struct vortex_private));
+ }
+ dev = init_etherdev(dev, sizeof(struct vortex_private));
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ vp = (struct vortex_private *)dev->priv;
+ vp->product_name = product_names[product_index];
+ vp->options = options;
+ if (options >= 0) {
+ vp->media_override = ((options & 7) == 2) ? 0 : options & 7;
+ vp->full_duplex = (options & 8) ? 1 : 0;
+ vp->bus_master = (options & 16) ? 1 : 0;
+ } else {
+ vp->media_override = 7;
+ vp->full_duplex = 0;
+ vp->bus_master = 0;
+ }
+
+ vortex_probe1(dev);
+#endif /* MODULE */
+ return 0;
+}
+
+static int vortex_probe1(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int i;
+
+ printk("%s: 3Com %s at %#3x,", dev->name,
+ vp->product_name, ioaddr);
+
+ /* Read the station address from the EEPROM. */
+ EL3WINDOW(0);
+ for (i = 0; i < 3; i++) {
+ short *phys_addr = (short *)dev->dev_addr;
+ int timer;
+ outw(EEPROM_Read + PhysAddr01 + i, ioaddr + Wn0EepromCmd);
+ /* Pause for at least 162 us. for the read to take place. */
+ for (timer = 0; timer < 162*4 + 400; timer++) {
+ SLOW_DOWN_IO;
+ if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
+ break;
+ }
+ phys_addr[i] = htons(inw(ioaddr + 12));
+ }
+ for (i = 0; i < 6; i++)
+ printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]);
+ printk(", IRQ %d\n", dev->irq);
+ /* Tell them about an invalid IRQ. */
+ if (vortex_debug && (dev->irq <= 0 || dev->irq > 15))
+ printk(" *** Warning: this IRQ is unlikely to work!\n");
+
+ {
+ char *ram_split[] = {"5:3", "3:1", "1:1", "invalid"};
+ union wn3_config config;
+ EL3WINDOW(3);
+ config.i = inl(ioaddr + Wn3_Config);
+ if (vortex_debug > 1)
+ printk(" Internal config register is %4.4x, transceivers %#x.\n",
+ config.i, inw(ioaddr + Wn3_Options));
+ printk(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
+ 8 << config.u.ram_size,
+ config.u.ram_width ? "word" : "byte",
+ ram_split[config.u.ram_split],
+ config.u.autoselect ? "autoselect/" : "",
+ if_names[config.u.xcvr]);
+ dev->if_port = config.u.xcvr;
+ vp->autoselect = config.u.autoselect;
+ }
+
+ /* We do a request_region() only to register /proc/ioports info. */
+ request_region(ioaddr, VORTEX_TOTAL_SIZE, vp->product_name);
+
+ /* The 3c59x-specific entries in the device structure. */
+ dev->open = &vortex_open;
+ dev->hard_start_xmit = &vortex_start_xmit;
+ dev->stop = &vortex_close;
+ dev->get_stats = &vortex_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+#if defined (HAVE_SET_MAC_ADDR) && 0
+ dev->set_mac_address = &set_mac_address;
+#endif
+
+ return 0;
+}
+
+
+static int
+vortex_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ union wn3_config config;
+ int i;
+
+ /* Before initializing select the active media port. */
+ EL3WINDOW(3);
+ if (vp->full_duplex)
+ outb(0x20, ioaddr + Wn3_MAC_Ctrl); /* Set the full-duplex bit. */
+ config.i = inl(ioaddr + Wn3_Config);
+
+ if (vp->media_override != 7) {
+ if (vortex_debug > 1)
+ printk("%s: Media override to transceiver %d (%s).\n",
+ dev->name, vp->media_override, if_names[vp->media_override]);
+ config.u.xcvr = vp->media_override;
+ dev->if_port = vp->media_override;
+ outl(config.i, ioaddr + Wn3_Config);
+ }
+
+ if (vortex_debug > 1) {
+ printk("%s: vortex_open() InternalConfig %8.8x.\n",
+ dev->name, config.i);
+ }
+
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (i = 20; i >= 0 ; i--)
+ if ( ! inw(ioaddr + EL3_STATUS) & CmdInProgress)
+ break;
+
+ outw(RxReset, ioaddr + EL3_CMD);
+ /* Wait a few ticks for the RxReset command to complete. */
+ for (i = 20; i >= 0 ; i--)
+ if ( ! inw(ioaddr + EL3_STATUS) & CmdInProgress)
+ break;
+
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+
+#ifdef USE_SHARED_IRQ
+ i = request_shared_irq(dev->irq, &vortex_interrupt, dev, vp->product_name);
+ if (i) /* Error */
+ return i;
+#else
+ if (dev->irq == 0 || irq2dev_map[dev->irq] != NULL)
+ return -EAGAIN;
+ irq2dev_map[dev->irq] = dev;
+ if (request_irq(dev->irq, &vortex_interrupt, 0, vp->product_name)) {
+ irq2dev_map[dev->irq] = NULL;
+ return -EAGAIN;
+ }
+#endif
+
+ if (vortex_debug > 1) {
+ EL3WINDOW(4);
+ printk("%s: vortex_open() irq %d media status %4.4x.\n",
+ dev->name, dev->irq, inw(ioaddr + Wn4_Media));
+ }
+
+ /* Set the station address and mask in window 2 each time opened. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+ for (; i < 12; i+=2)
+ outw(0, ioaddr + i);
+
+ if (dev->if_port == 3)
+ /* Start the thinnet transceiver. We should really wait 50ms...*/
+ outw(StartCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 0) {
+ /* 10baseT interface, enabled link beat and jabber check. */
+ EL3WINDOW(4);
+ outw(inw(ioaddr + Wn4_Media) | Media_TP, ioaddr + Wn4_Media);
+ }
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 10; i++)
+ inb(ioaddr + i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+ /* New: On the Vortex we must also clear the BadSSD counter. */
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+
+ /* Switch to register set 7 for normal use. */
+ EL3WINDOW(7);
+
+ /* Accept b-case and phys addr only. */
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+ | DMADone, ioaddr + EL3_CMD);
+
+#ifdef MODULE
+ MOD_INC_USE_COUNT;
+#endif
+
+ if (vp->autoselect) {
+ init_timer(&vp->timer);
+ vp->timer.expires = (14*HZ)/10; /* 1.4 sec. */
+ vp->timer.data = (unsigned long)dev;
+ vp->timer.function = &vortex_timer; /* timer handler */
+ add_timer(&vp->timer);
+ }
+ return 0;
+}
+
+static void vortex_timer(unsigned long data)
+{
+ struct device *dev = (struct device *)data;
+ if (vortex_debug > 2)
+ printk("%s: Media selection timer tick happened.\n", dev->name);
+ /* ToDo: active media selection here! */
+}
+
+static int
+vortex_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 40)
+ return 1;
+ printk("%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
+ dev->name, inb(ioaddr + TxStatus), inw(ioaddr + EL3_STATUS));
+ vp->stats.tx_errors++;
+ /* Issue TX_RESET and TX_START commands. */
+ outw(TxReset, ioaddr + EL3_CMD);
+ {
+ int i;
+ for (i = 20; i >= 0 ; i--)
+ if ( ! inw(ioaddr + EL3_STATUS) & CmdInProgress)
+ break;
+ }
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->trans_start = jiffies;
+ dev->tbusy = 0;
+ return 0;
+ }
+
+ if (skb == NULL || skb->len <= 0) {
+ printk("%s: Obsolete driver layer request made: skbuff==NULL.\n",
+ dev->name);
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
+ If this ever occurs the queue layer is doing something evil! */
+ if (set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ return 1;
+ }
+
+ /* Put out the doubleword header... */
+ outl(skb->len, ioaddr + TX_FIFO);
+#ifdef VORTEX_BUS_MASTER
+ if (vp->bus_master) {
+ /* Set the bus-master controller to transfer the packet. */
+ outl((int)(skb->data), ioaddr + Wn7_MasterAddr);
+ outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
+ vp->tx_skb = skb;
+ outw(StartDMADown, ioaddr + EL3_CMD);
+ } else {
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ dev_kfree_skb (skb, FREE_WRITE);
+ if (inw(ioaddr + TxFree) > 1536) {
+ dev->tbusy = 0;
+ } else
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
+ }
+#else
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ dev_kfree_skb (skb, FREE_WRITE);
+ if (inw(ioaddr + TxFree) > 1536) {
+ dev->tbusy = 0;
+ } else
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
+#endif /* bus master */
+
+ dev->trans_start = jiffies;
+
+ /* Clear the Tx status stack. */
+ {
+ short tx_status;
+ int i = 4;
+
+ while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) {
+ if (tx_status & 0x3C) { /* A Tx-disabling error occured. */
+ if (vortex_debug > 2)
+ printk("%s: Tx error, status %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
+ if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) {
+ int j;
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (j = 20; j >= 0 ; j--)
+ if ( ! inw(ioaddr + EL3_STATUS) & CmdInProgress)
+ break;
+ }
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
+ }
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void vortex_interrupt(int irq, struct pt_regs *regs)
+{
+#ifdef USE_SHARED_IRQ
+ struct device *dev = (struct device *)(irq == 0 ? regs : irq2dev_map[irq]);
+#else
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+#endif
+ struct vortex_private *lp;
+ int ioaddr, status;
+ int latency;
+ int i = 0;
+
+ if (dev == NULL) {
+ printk ("vortex_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ latency = inb(ioaddr + Timer);
+ lp = (struct vortex_private *)dev->priv;
+
+ status = inw(ioaddr + EL3_STATUS);
+
+ if (vortex_debug > 4)
+ printk("%s: interrupt, status %4.4x, timer %d.\n", dev->name,
+ status, latency);
+ if ((status & 0xE000) != 0xE000) {
+ static int donedidthis=0;
+ /* Some interrupt controllers store a bogus interrupt from boot-time.
+ Ignore a single early interrupt, but don't hang the machine for
+ other interrupt problems. */
+ if (donedidthis++ > 1) {
+ printk("%s: Bogus interrupt, bailing. Status %4.4x, start=%d.\n",
+ dev->name, status, dev->start);
+ free_irq(dev->irq);
+ }
+ }
+
+ do {
+ if (vortex_debug > 5)
+ printk("%s: In interrupt loop, status %4.4x.\n",
+ dev->name, status);
+ if (status & RxComplete)
+ vortex_rx(dev);
+
+ if (status & TxAvailable) {
+ if (vortex_debug > 5)
+ printk(" TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+#ifdef VORTEX_BUS_MASTER
+ if (status & DMADone) {
+ outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+#endif
+ if (status & (AdapterFailure | RxEarly | StatsFull)) {
+ /* Handle all uncommon interrupts at once. */
+ if (status & RxEarly) { /* Rx early is unused. */
+ vortex_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & StatsFull) { /* Empty statistics. */
+ static int DoneDidThat = 0;
+ if (vortex_debug > 4)
+ printk("%s: Updating stats.\n", dev->name);
+ update_stats(ioaddr, dev);
+ /* DEBUG HACK: Disable statistics as an interrupt source. */
+ /* This occurs when we have the wrong media type! */
+ if (DoneDidThat == 0 &&
+ inw(ioaddr + EL3_STATUS) & StatsFull) {
+ int win, reg;
+ printk("%s: Updating stats failed, disabling stats as an"
+ " interrupt source.\n", dev->name);
+ for (win = 0; win < 8; win++) {
+ EL3WINDOW(win);
+ printk("\n Vortex window %d:", win);
+ for (reg = 0; reg < 16; reg++)
+ printk(" %2.2x", inb(ioaddr+reg));
+ }
+ EL3WINDOW(7);
+ outw(SetIntrEnb | 0x18, ioaddr + EL3_CMD);
+ DoneDidThat++;
+ }
+ }
+ if (status & AdapterFailure) {
+ /* Adapter failure requires Rx reset and reinit. */
+ outw(RxReset, ioaddr + EL3_CMD);
+ /* Set the Rx filter to the current state. */
+ outw(SetRxFilter | RxStation | RxBroadcast
+ | (dev->flags & IFF_ALLMULTI ? RxMulticast : 0)
+ | (dev->flags & IFF_PROMISC ? RxProm : 0),
+ ioaddr + EL3_CMD);
+ outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (++i > 10) {
+ printk("%s: Infinite loop in interrupt, status %4.4x. "
+ "Disabling functions (%4.4x).\n",
+ dev->name, status, SetStatusEnb | ((~status) & 0xFE));
+ /* Disable all pending interrupts. */
+ outw(SetStatusEnb | ((~status) & 0xFE), ioaddr + EL3_CMD);
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+
+ } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
+
+ if (vortex_debug > 4)
+ printk("%s: exiting interrupt, status %4.4x.\n", dev->name, status);
+
+ dev->interrupt = 0;
+ return;
+}
+
+static int
+vortex_rx(struct device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+ short rx_status;
+
+ if (vortex_debug > 5)
+ printk(" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
+ while ((rx_status = inw(ioaddr + RxStatus)) > 0) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ unsigned char rx_error = inb(ioaddr + RxErrors);
+ if (vortex_debug > 4)
+ printk(" Rx error: status %2.2x.\n", rx_error);
+ vp->stats.rx_errors++;
+ if (rx_error & 0x01) vp->stats.rx_over_errors++;
+ if (rx_error & 0x02) vp->stats.rx_length_errors++;
+ if (rx_error & 0x04) vp->stats.rx_frame_errors++;
+ if (rx_error & 0x08) vp->stats.rx_crc_errors++;
+ if (rx_error & 0x10) vp->stats.rx_length_errors++;
+ } else {
+ /* The packet length: up to 4.5K!. */
+ short pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 5);
+ if (vortex_debug > 4)
+ printk("Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ insl(ioaddr + RX_FIFO, skb_put(skb, pkt_len),
+ (pkt_len + 3) >> 2);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+ /* Wait a limited time to go to next packet. */
+ for (i = 200; i >= 0; i--)
+ if ( ! inw(ioaddr + EL3_STATUS) & CmdInProgress)
+ break;
+ vp->stats.rx_packets++;
+ continue;
+ } else if (vortex_debug)
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ }
+ vp->stats.rx_dropped++;
+ outw(RxDiscard, ioaddr + EL3_CMD);
+ /* Wait a limited time to skip this packet. */
+ for (i = 200; i >= 0; i--)
+ if ( ! inw(ioaddr + EL3_STATUS) & CmdInProgress)
+ break;
+ }
+
+ return 0;
+}
+
+static int
+vortex_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (vortex_debug > 1)
+ printk("%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus));
+
+ /* Turn off statistics ASAP. We update lp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == 3)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 0) {
+ /* Disable link beat and jabber, if_port may change ere next open(). */
+ EL3WINDOW(4);
+ outw(inw(ioaddr + Wn4_Media) & ~Media_TP, ioaddr + Wn4_Media);
+ }
+
+#ifdef USE_SHARED_IRQ
+ free_shared_irq(dev->irq, dev);
+#else
+ free_irq(dev->irq);
+ /* Mmmm, we should diable all interrupt sources here. */
+ irq2dev_map[dev->irq] = 0;
+#endif
+
+ update_stats(ioaddr, dev);
+#ifdef MODULE
+ MOD_DEC_USE_COUNT;
+#endif
+
+ return 0;
+}
+
+static struct enet_statistics *
+vortex_get_stats(struct device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ update_stats(dev->base_addr, dev);
+ restore_flags(flags);
+ return &vp->stats;
+}
+
+/* Update statistics.
+ Unlike with the EL3 we need not worry about interrupts changing
+ the window setting from underneath us, but we must still guard
+ against a race condition with a StatsUpdate interrupt updating the
+ table. This is done by checking that the ASM (!) code generated uses
+ atomic updates with '+='.
+ */
+static void update_stats(int ioaddr, struct device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+
+ /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ vp->stats.tx_carrier_errors += inb(ioaddr + 0);
+ vp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ vp->stats.collisions += inb(ioaddr + 3);
+ vp->stats.tx_window_errors += inb(ioaddr + 4);
+ vp->stats.rx_fifo_errors += inb(ioaddr + 5);
+ vp->stats.tx_packets += inb(ioaddr + 6);
+ vp->stats.tx_packets += (inb(ioaddr + 9)&0x30) << 4;
+ /* Rx packets */ inb(ioaddr + 7); /* Must read to clear */
+ /* Tx deferrals */ inb(ioaddr + 8);
+ /* Don't bother with register 9, an extention of registers 6&7.
+ If we do use the 6&7 values the atomic update assumption above
+ is invalid. */
+ inw(ioaddr + 10); /* Total Rx and Tx octets. */
+ inw(ioaddr + 12);
+ /* New: On the Vortex we must also clear the BadSSD counter. */
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+
+ /* We change back to window 7 (not 1) with the Vortex. */
+ EL3WINDOW(7);
+ return;
+}
+
+/* There are two version of set_multicast_list() to support both v1.2 and
+ v1.4 kernels. */
+static void
+set_multicast_list(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
+ outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
+ if (vortex_debug > 3) {
+ printk("%s: Setting Rx multicast mode, %d addresses.\n",
+ dev->name, dev->mc_count);
+ }
+ } else if (dev->flags & IFF_PROMISC) {
+ outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
+ ioaddr + EL3_CMD);
+ } else
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+}
+
+
+#ifdef MODULE
+void
+cleanup_module(void)
+{
+ struct device *next_dev;
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_vortex_dev) {
+ next_dev = ((struct vortex_private *)root_vortex_dev->priv)->next_module;
+ unregister_netdev(root_vortex_dev);
+ release_region(root_vortex_dev->base_addr, VORTEX_TOTAL_SIZE);
+ kfree(root_vortex_dev);
+ root_vortex_dev = next_dev;
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c 3c59x.c -o 3c59x.o"
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/8390.c b/i386/i386at/gpl/linux/net/8390.c
new file mode 100644
index 00000000..05ea32f6
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/8390.c
@@ -0,0 +1,727 @@
+/* 8390.c: A general NS8390 ethernet driver core for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This is the chip-specific code for many 8390-based ethernet adaptors.
+ This is not a complete driver, it must be combined with board-specific
+ code such as ne.c, wd.c, 3c503.c, etc.
+
+ Changelog:
+
+ Paul Gortmaker : remove set_bit lock, other cleanups.
+ Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
+ ei_block_input() for eth_io_copy_and_sum().
+
+ */
+
+static const char *version =
+ "8390.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+/*
+ Braindamage remaining:
+ Much of this code should have been cleaned up, but every attempt
+ has broken some clone part.
+
+ Sources:
+ The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "8390.h"
+
+/* These are the operational function interfaces to board-specific
+ routines.
+ void reset_8390(struct device *dev)
+ Resets the board associated with DEV, including a hardware reset of
+ the 8390. This is only called when there is a transmit timeout, and
+ it is always followed by 8390_init().
+ void block_output(struct device *dev, int count, const unsigned char *buf,
+ int start_page)
+ Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
+ "page" value uses the 8390's 256-byte pages.
+ void get_8390_hdr(struct device *dev, struct e8390_hdr *hdr, int ring_page)
+ Read the 4 byte, page aligned 8390 header. *If* there is a
+ subsequent read, it will be of the rest of the packet.
+ void block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+ Read COUNT bytes from the packet buffer into the skb data area. Start
+ reading from RING_OFFSET, the address as the 8390 sees it. This will always
+ follow the read of the 8390 header.
+*/
+#define ei_reset_8390 (ei_local->reset_8390)
+#define ei_block_output (ei_local->block_output)
+#define ei_block_input (ei_local->block_input)
+#define ei_get_8390_hdr (ei_local->get_8390_hdr)
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifdef EI_DEBUG
+int ei_debug = EI_DEBUG;
+#else
+int ei_debug = 1;
+#endif
+#ifdef EI_PINGPONG
+static int ei_pingpong = 1;
+#else
+static int ei_pingpong = 0;
+#endif
+
+/* Max number of packets received at one Intr.
+ Currently this may only be examined by a kernel debugger. */
+static int high_water_mark = 0;
+
+/* Index to functions. */
+static void ei_tx_intr(struct device *dev);
+static void ei_receive(struct device *dev);
+static void ei_rx_overrun(struct device *dev);
+
+/* Routines generic to NS8390-based boards. */
+static void NS8390_trigger_send(struct device *dev, unsigned int length,
+ int start_page);
+static void set_multicast_list(struct device *dev);
+
+
+/* Open/initialize the board. This routine goes all-out, setting everything
+ up anew at each open, even though many of these registers should only
+ need to be set once at boot.
+ */
+int ei_open(struct device *dev)
+{
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+ /* This can't happen unless somebody forgot to call ethdev_init(). */
+ if (ei_local == NULL) {
+ printk(KERN_EMERG "%s: ei_open passed a non-existent device!\n", dev->name);
+ return -ENXIO;
+ }
+
+ irq2dev_map[dev->irq] = dev;
+ NS8390_init(dev, 1);
+ dev->start = 1;
+ ei_local->irqlock = 0;
+ return 0;
+}
+
+/* Opposite of above. Only used when "ifconfig <devname> down" is done. */
+int ei_close(struct device *dev)
+{
+ NS8390_init(dev, 0);
+ dev->start = 0;
+ return 0;
+}
+
+static int ei_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ int e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ int length, send_length;
+
+/*
+ * We normally shouldn't be called if dev->tbusy is set, but the
+ * existing code does anyway. If it has been too long since the
+ * last Tx, we assume the board has died and kick it.
+ */
+
+ if (dev->tbusy) { /* Do timeouts, just like the 8003 driver. */
+ int txsr = inb(e8390_base+EN0_TSR), isr;
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < TX_TIMEOUT || (tickssofar < (TX_TIMEOUT+5) && ! (txsr & ENTSR_PTX))) {
+ return 1;
+ }
+ isr = inb(e8390_base+EN0_ISR);
+ if (dev->start == 0) {
+ printk("%s: xmit on stopped card\n", dev->name);
+ return 1;
+ }
+
+ printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
+ dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
+ (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
+
+ if (!isr && !ei_local->stat.tx_packets) {
+ /* The 8390 probably hasn't gotten on the cable yet. */
+ ei_local->interface_num ^= 1; /* Try a different xcvr. */
+ }
+
+ /* Try to restart the card. Perhaps the user has fixed something. */
+ ei_reset_8390(dev);
+ NS8390_init(dev, 1);
+ dev->trans_start = jiffies;
+ }
+
+ /* Sending a NULL skb means some higher layer thinks we've missed an
+ tx-done interrupt. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ length = skb->len;
+ if (skb->len <= 0)
+ return 0;
+
+ /* Mask interrupts from the ethercard. */
+ outb_p(0x00, e8390_base + EN0_IMR);
+ if (dev->interrupt) {
+ printk("%s: Tx request while isr active.\n",dev->name);
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ return 1;
+ }
+ ei_local->irqlock = 1;
+
+ send_length = ETH_ZLEN < length ? length : ETH_ZLEN;
+
+ if (ei_local->pingpong) {
+ int output_page;
+ if (ei_local->tx1 == 0) {
+ output_page = ei_local->tx_start_page;
+ ei_local->tx1 = send_length;
+ if (ei_debug && ei_local->tx2 > 0)
+ printk("%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
+ dev->name, ei_local->tx2, ei_local->lasttx,
+ ei_local->txing);
+ } else if (ei_local->tx2 == 0) {
+ output_page = ei_local->tx_start_page + 6;
+ ei_local->tx2 = send_length;
+ if (ei_debug && ei_local->tx1 > 0)
+ printk("%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
+ dev->name, ei_local->tx1, ei_local->lasttx,
+ ei_local->txing);
+ } else { /* We should never get here. */
+ if (ei_debug)
+ printk("%s: No Tx buffers free. irq=%d tx1=%d tx2=%d last=%d\n",
+ dev->name, dev->interrupt, ei_local->tx1,
+ ei_local->tx2, ei_local->lasttx);
+ ei_local->irqlock = 0;
+ dev->tbusy = 1;
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ return 1;
+ }
+ ei_block_output(dev, length, skb->data, output_page);
+ if (! ei_local->txing) {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, send_length, output_page);
+ dev->trans_start = jiffies;
+ if (output_page == ei_local->tx_start_page)
+ ei_local->tx1 = -1, ei_local->lasttx = -1;
+ else
+ ei_local->tx2 = -1, ei_local->lasttx = -2;
+ } else
+ ei_local->txqueue++;
+
+ dev->tbusy = (ei_local->tx1 && ei_local->tx2);
+ } else { /* No pingpong, just a single Tx buffer. */
+ ei_block_output(dev, length, skb->data, ei_local->tx_start_page);
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, send_length, ei_local->tx_start_page);
+ dev->trans_start = jiffies;
+ dev->tbusy = 1;
+ }
+
+ /* Turn 8390 interrupts back on. */
+ ei_local->irqlock = 0;
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the ether interface interrupts. */
+void ei_interrupt(int irq, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ int e8390_base;
+ int interrupts, nr_serviced = 0;
+ struct ei_device *ei_local;
+
+ if (dev == NULL) {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ e8390_base = dev->base_addr;
+ ei_local = (struct ei_device *) dev->priv;
+ if (dev->interrupt || ei_local->irqlock) {
+ /* The "irqlock" check is only for testing. */
+ printk(ei_local->irqlock
+ ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
+ : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
+ dev->name, inb_p(e8390_base + EN0_ISR),
+ inb_p(e8390_base + EN0_IMR));
+ return;
+ }
+
+ dev->interrupt = 1;
+
+ /* Change to page 0 and read the intr status reg. */
+ outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
+ if (ei_debug > 3)
+ printk("%s: interrupt(isr=%#2.2x).\n", dev->name,
+ inb_p(e8390_base + EN0_ISR));
+
+ /* !!Assumption!! -- we stay in page 0. Don't break this. */
+ while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0
+ && ++nr_serviced < MAX_SERVICE) {
+ if (dev->start == 0) {
+ printk("%s: interrupt from stopped card\n", dev->name);
+ interrupts = 0;
+ break;
+ }
+ if (interrupts & ENISR_OVER) {
+ ei_rx_overrun(dev);
+ } else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
+ /* Got a good (?) packet. */
+ ei_receive(dev);
+ }
+ /* Push the next to-transmit packet through. */
+ if (interrupts & ENISR_TX) {
+ ei_tx_intr(dev);
+ } else if (interrupts & ENISR_COUNTERS) {
+ ei_local->stat.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0);
+ ei_local->stat.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1);
+ ei_local->stat.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2);
+ outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
+ }
+
+ /* Ignore the transmit errs and reset intr for now. */
+ if (interrupts & ENISR_TX_ERR) {
+ outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
+ }
+
+ /* Ignore any RDC interrupts that make it back to here. */
+ if (interrupts & ENISR_RDC) {
+ outb_p(ENISR_RDC, e8390_base + EN0_ISR);
+ }
+
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
+ }
+
+ if (interrupts && ei_debug) {
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
+ if (nr_serviced >= MAX_SERVICE) {
+ printk("%s: Too much work at interrupt, status %#2.2x\n",
+ dev->name, interrupts);
+ outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
+ } else {
+ printk("%s: unknown interrupt %#2x\n", dev->name, interrupts);
+ outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
+ }
+ }
+ dev->interrupt = 0;
+ return;
+}
+
+/* We have finished a transmit: check for errors and then trigger the next
+ packet to be sent. */
+static void ei_tx_intr(struct device *dev)
+{
+ int e8390_base = dev->base_addr;
+ int status = inb(e8390_base + EN0_TSR);
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+ outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
+
+ if (ei_local->pingpong) {
+ ei_local->txqueue--;
+ if (ei_local->tx1 < 0) {
+ if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
+ printk("%s: bogus last_tx_buffer %d, tx1=%d.\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx1);
+ ei_local->tx1 = 0;
+ dev->tbusy = 0;
+ if (ei_local->tx2 > 0) {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
+ dev->trans_start = jiffies;
+ ei_local->tx2 = -1,
+ ei_local->lasttx = 2;
+ } else
+ ei_local->lasttx = 20, ei_local->txing = 0;
+ } else if (ei_local->tx2 < 0) {
+ if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
+ printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx2);
+ ei_local->tx2 = 0;
+ dev->tbusy = 0;
+ if (ei_local->tx1 > 0) {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
+ dev->trans_start = jiffies;
+ ei_local->tx1 = -1;
+ ei_local->lasttx = 1;
+ } else
+ ei_local->lasttx = 10, ei_local->txing = 0;
+ } else
+ printk("%s: unexpected TX-done interrupt, lasttx=%d.\n",
+ dev->name, ei_local->lasttx);
+ } else {
+ ei_local->txing = 0;
+ dev->tbusy = 0;
+ }
+
+ /* Minimize Tx latency: update the statistics after we restart TXing. */
+ if (status & ENTSR_COL) ei_local->stat.collisions++;
+ if (status & ENTSR_PTX)
+ ei_local->stat.tx_packets++;
+ else {
+ ei_local->stat.tx_errors++;
+ if (status & ENTSR_ABT) ei_local->stat.tx_aborted_errors++;
+ if (status & ENTSR_CRS) ei_local->stat.tx_carrier_errors++;
+ if (status & ENTSR_FU) ei_local->stat.tx_fifo_errors++;
+ if (status & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++;
+ if (status & ENTSR_OWC) ei_local->stat.tx_window_errors++;
+ }
+
+ mark_bh (NET_BH);
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+
+static void ei_receive(struct device *dev)
+{
+ int e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ int rxing_page, this_frame, next_frame, current_offset;
+ int rx_pkt_count = 0;
+ struct e8390_pkt_hdr rx_frame;
+ int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
+
+ while (++rx_pkt_count < 10) {
+ int pkt_len;
+
+ /* Get the rx page (incoming packet pointer). */
+ outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
+ rxing_page = inb_p(e8390_base + EN1_CURPAG);
+ outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
+
+ /* Remove one frame from the ring. Boundary is always a page behind. */
+ this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1;
+ if (this_frame >= ei_local->stop_page)
+ this_frame = ei_local->rx_start_page;
+
+ /* Someday we'll omit the previous, iff we never get this message.
+ (There is at least one clone claimed to have a problem.) */
+ if (ei_debug > 0 && this_frame != ei_local->current_page)
+ printk("%s: mismatched read page pointers %2x vs %2x.\n",
+ dev->name, this_frame, ei_local->current_page);
+
+ if (this_frame == rxing_page) /* Read all the frames? */
+ break; /* Done for now */
+
+ current_offset = this_frame << 8;
+ ei_get_8390_hdr(dev, &rx_frame, this_frame);
+
+ pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
+
+ next_frame = this_frame + 1 + ((pkt_len+4)>>8);
+
+ /* Check for bogosity warned by 3c503 book: the status byte is never
+ written. This happened a lot during testing! This code should be
+ cleaned up someday. */
+ if (rx_frame.next != next_frame
+ && rx_frame.next != next_frame + 1
+ && rx_frame.next != next_frame - num_rx_pages
+ && rx_frame.next != next_frame + 1 - num_rx_pages) {
+ ei_local->current_page = rxing_page;
+ outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
+ ei_local->stat.rx_errors++;
+ continue;
+ }
+
+ if (pkt_len < 60 || pkt_len > 1518) {
+ if (ei_debug)
+ printk("%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
+ dev->name, rx_frame.count, rx_frame.status,
+ rx_frame.next);
+ ei_local->stat.rx_errors++;
+ } else if ((rx_frame.status & 0x0F) == ENRSR_RXOK) {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ if (ei_debug > 1)
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ ei_local->stat.rx_dropped++;
+ break;
+ } else {
+ skb_reserve(skb,2); /* IP headers on 16 byte boundaries */
+ skb->dev = dev;
+ skb_put(skb, pkt_len); /* Make room */
+ ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ ei_local->stat.rx_packets++;
+ }
+ } else {
+ int errs = rx_frame.status;
+ if (ei_debug)
+ printk("%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ dev->name, rx_frame.status, rx_frame.next,
+ rx_frame.count);
+ if (errs & ENRSR_FO)
+ ei_local->stat.rx_fifo_errors++;
+ }
+ next_frame = rx_frame.next;
+
+ /* This _should_ never happen: it's here for avoiding bad clones. */
+ if (next_frame >= ei_local->stop_page) {
+ printk("%s: next frame inconsistency, %#2x\n", dev->name,
+ next_frame);
+ next_frame = ei_local->rx_start_page;
+ }
+ ei_local->current_page = next_frame;
+ outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
+ }
+ /* If any worth-while packets have been received, netif_rx()
+ has done a mark_bh(NET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+
+ /* Record the maximum Rx packet queue. */
+ if (rx_pkt_count > high_water_mark)
+ high_water_mark = rx_pkt_count;
+
+ /* We used to also ack ENISR_OVER here, but that would sometimes mask
+ a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
+ outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
+ return;
+}
+
+/* We have a receiver overrun: we have to kick the 8390 to get it started
+ again.*/
+static void ei_rx_overrun(struct device *dev)
+{
+ int e8390_base = dev->base_addr;
+ int reset_start_time = jiffies;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+ /* We should already be stopped and in page0. Remove after testing. */
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+ if (ei_debug > 1)
+ printk("%s: Receiver overrun.\n", dev->name);
+ ei_local->stat.rx_over_errors++;
+
+ /* The old Biro driver does dummy = inb_p( RBCR[01] ); at this point.
+ It might mean something -- magic to speed up a reset? A 8390 bug?*/
+
+ /* Wait for the reset to complete. This should happen almost instantly,
+ but could take up to 1.5msec in certain rare instances. There is no
+ easy way of timing something in that range, so we use 'jiffies' as
+ a sanity check. */
+ while ((inb_p(e8390_base+EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2*HZ/100) {
+ printk("%s: reset did not complete at ei_rx_overrun.\n",
+ dev->name);
+ NS8390_init(dev, 1);
+ return;
+ }
+
+ /* Remove packets right away. */
+ ei_receive(dev);
+
+ outb_p(ENISR_OVER, e8390_base+EN0_ISR);
+ /* Generic 8390 insns to start up again, same as in open_8390(). */
+ outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
+ outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
+}
+
+static struct enet_statistics *get_stats(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+ /* If the card is stopped, just return the present stats. */
+ if (dev->start == 0) return &ei_local->stat;
+
+ /* Read the counter registers, assuming we are in page 0. */
+ ei_local->stat.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0);
+ ei_local->stat.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1);
+ ei_local->stat.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2);
+
+ return &ei_local->stat;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ if(dev->flags&IFF_PROMISC)
+ {
+ outb_p(E8390_RXCONFIG | 0x18, ioaddr + EN0_RXCR);
+ }
+ else if((dev->flags&IFF_ALLMULTI)||dev->mc_list)
+ {
+ /* The multicast-accept list is initialized to accept-all, and we
+ rely on higher-level filtering for now. */
+ outb_p(E8390_RXCONFIG | 0x08, ioaddr + EN0_RXCR);
+ }
+ else
+ outb_p(E8390_RXCONFIG, ioaddr + EN0_RXCR);
+}
+
+/* Initialize the rest of the 8390 device structure. */
+int ethdev_init(struct device *dev)
+{
+ if (ei_debug > 1)
+ printk(version);
+
+ if (dev->priv == NULL) {
+ struct ei_device *ei_local;
+
+ dev->priv = kmalloc(sizeof(struct ei_device), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct ei_device));
+ ei_local = (struct ei_device *)dev->priv;
+ ei_local->pingpong = ei_pingpong;
+ }
+
+ dev->hard_start_xmit = &ei_start_xmit;
+ dev->get_stats = get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ ether_setup(dev);
+
+ return 0;
+}
+
+
+/* This page of functions should be 8390 generic */
+/* Follow National Semi's recommendations for initializing the "NIC". */
+void NS8390_init(struct device *dev, int startp)
+{
+ int e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ int i;
+ int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
+ unsigned long flags;
+
+ /* Follow National Semi's recommendations for initing the DP83902. */
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base); /* 0x21 */
+ outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
+ /* Clear the remote byte count registers. */
+ outb_p(0x00, e8390_base + EN0_RCNTLO);
+ outb_p(0x00, e8390_base + EN0_RCNTHI);
+ /* Set to monitor and loopback mode -- this is vital!. */
+ outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
+ outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
+ /* Set the transmit page and receive ring. */
+ outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
+ ei_local->tx1 = ei_local->tx2 = 0;
+ outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
+ outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
+ ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
+ outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
+ /* Clear the pending interrupts and mask. */
+ outb_p(0xFF, e8390_base + EN0_ISR);
+ outb_p(0x00, e8390_base + EN0_IMR);
+
+ /* Copy the station address into the DS8390 registers,
+ and set the multicast hash bitmap to receive all multicasts. */
+ save_flags(flags);
+ cli();
+ outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base); /* 0x61 */
+ for(i = 0; i < 6; i++) {
+ outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS + i);
+ }
+ /* Initialize the multicast list to accept-all. If we enable multicast
+ the higher levels can do the filtering. */
+ for(i = 0; i < 8; i++)
+ outb_p(0xff, e8390_base + EN1_MULT + i);
+
+ outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base);
+ restore_flags(flags);
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ ei_local->tx1 = ei_local->tx2 = 0;
+ ei_local->txing = 0;
+ if (startp) {
+ outb_p(0xff, e8390_base + EN0_ISR);
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base);
+ outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
+ /* 3c503 TechMan says rxconfig only after the NIC is started. */
+ outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */
+ dev->set_multicast_list(dev); /* Get the multicast status right if this
+ was a reset. */
+ }
+ return;
+}
+
+/* Trigger a transmit start, assuming the length is valid. */
+static void NS8390_trigger_send(struct device *dev, unsigned int length,
+ int start_page)
+{
+ int e8390_base = dev->base_addr;
+
+ outb_p(E8390_NODMA+E8390_PAGE0, e8390_base);
+
+ if (inb_p(e8390_base) & E8390_TRANS) {
+ printk("%s: trigger_send() called with the transmitter busy.\n",
+ dev->name);
+ return;
+ }
+ outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
+ outb_p(length >> 8, e8390_base + EN0_TCNTHI);
+ outb_p(start_page, e8390_base + EN0_TPSR);
+ outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base);
+ return;
+}
+
+#ifdef MODULE
+
+int init_module(void)
+{
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c 8390.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/8390.h b/i386/i386at/gpl/linux/net/8390.h
new file mode 100644
index 00000000..17b8cdb5
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/8390.h
@@ -0,0 +1,168 @@
+/* Generic NS8390 register definitions. */
+/* This file is part of Donald Becker's 8390 drivers, and is distributed
+ under the same license.
+ Some of these names and comments originated from the Crynwr
+ packet drivers, which are distributed under the GPL. */
+
+#ifndef _8390_h
+#define _8390_h
+
+#include <linux/if_ether.h>
+#include <linux/ioport.h>
+#include <linux/skbuff.h>
+
+#define TX_2X_PAGES 12
+#define TX_1X_PAGES 6
+#define TX_PAGES (ei_status.pingpong ? TX_2X_PAGES : TX_1X_PAGES)
+
+#define ETHER_ADDR_LEN 6
+
+/* The 8390 specific per-packet-header format. */
+struct e8390_pkt_hdr {
+ unsigned char status; /* status */
+ unsigned char next; /* pointer to next packet. */
+ unsigned short count; /* header + packet length in bytes */
+};
+
+/* From 8390.c */
+extern int ei_debug;
+extern struct sigaction ei_sigaction;
+
+extern int ethif_init(struct device *dev);
+extern int ethdev_init(struct device *dev);
+extern void NS8390_init(struct device *dev, int startp);
+extern int ei_open(struct device *dev);
+extern int ei_close(struct device *dev);
+extern void ei_interrupt(int irq, struct pt_regs *regs);
+
+#ifndef HAVE_AUTOIRQ
+/* From auto_irq.c */
+extern struct device *irq2dev_map[16];
+extern int autoirq_setup(int waittime);
+extern int autoirq_report(int waittime);
+#endif
+
+/* Most of these entries should be in 'struct device' (or most of the
+ things in there should be here!) */
+/* You have one of these per-board */
+struct ei_device {
+ const char *name;
+ void (*reset_8390)(struct device *);
+ void (*get_8390_hdr)(struct device *, struct e8390_pkt_hdr *, int);
+ void (*block_output)(struct device *, int, const unsigned char *, int);
+ void (*block_input)(struct device *, int, struct sk_buff *, int);
+ unsigned open:1;
+ unsigned word16:1; /* We have the 16-bit (vs 8-bit) version of the card. */
+ unsigned txing:1; /* Transmit Active */
+ unsigned irqlock:1; /* 8390's intrs disabled when '1'. */
+ unsigned dmaing:1; /* Remote DMA Active */
+ unsigned pingpong:1; /* Using the ping-pong driver */
+ unsigned char tx_start_page, rx_start_page, stop_page;
+ unsigned char current_page; /* Read pointer in buffer */
+ unsigned char interface_num; /* Net port (AUI, 10bT.) to use. */
+ unsigned char txqueue; /* Tx Packet buffer queue length. */
+ short tx1, tx2; /* Packet lengths for ping-pong tx. */
+ short lasttx; /* Alpha version consistency check. */
+ unsigned char reg0; /* Register '0' in a WD8013 */
+ unsigned char reg5; /* Register '5' in a WD8013 */
+ unsigned char saved_irq; /* Original dev->irq value. */
+ /* The new statistics table. */
+ struct enet_statistics stat;
+};
+
+/* The maximum number of 8390 interrupt service routines called per IRQ. */
+#define MAX_SERVICE 12
+
+/* The maximum time waited (in jiffies) before assuming a Tx failed. (20ms) */
+#define TX_TIMEOUT (20*HZ/100)
+
+#define ei_status (*(struct ei_device *)(dev->priv))
+
+/* Some generic ethernet register configurations. */
+#define E8390_TX_IRQ_MASK 0xa /* For register EN0_ISR */
+#define E8390_RX_IRQ_MASK 0x5
+#define E8390_RXCONFIG 0x4 /* EN0_RXCR: broadcasts, no multicast,errors */
+#define E8390_RXOFF 0x20 /* EN0_RXCR: Accept no packets */
+#define E8390_TXCONFIG 0x00 /* EN0_TXCR: Normal transmit mode */
+#define E8390_TXOFF 0x02 /* EN0_TXCR: Transmitter off */
+
+/* Register accessed at EN_CMD, the 8390 base addr. */
+#define E8390_STOP 0x01 /* Stop and reset the chip */
+#define E8390_START 0x02 /* Start the chip, clear reset */
+#define E8390_TRANS 0x04 /* Transmit a frame */
+#define E8390_RREAD 0x08 /* Remote read */
+#define E8390_RWRITE 0x10 /* Remote write */
+#define E8390_NODMA 0x20 /* Remote DMA */
+#define E8390_PAGE0 0x00 /* Select page chip registers */
+#define E8390_PAGE1 0x40 /* using the two high-order bits */
+#define E8390_PAGE2 0x80 /* Page 3 is invalid. */
+
+#define E8390_CMD 0x00 /* The command register (for all pages) */
+/* Page 0 register offsets. */
+#define EN0_CLDALO 0x01 /* Low byte of current local dma addr RD */
+#define EN0_STARTPG 0x01 /* Starting page of ring bfr WR */
+#define EN0_CLDAHI 0x02 /* High byte of current local dma addr RD */
+#define EN0_STOPPG 0x02 /* Ending page +1 of ring bfr WR */
+#define EN0_BOUNDARY 0x03 /* Boundary page of ring bfr RD WR */
+#define EN0_TSR 0x04 /* Transmit status reg RD */
+#define EN0_TPSR 0x04 /* Transmit starting page WR */
+#define EN0_NCR 0x05 /* Number of collision reg RD */
+#define EN0_TCNTLO 0x05 /* Low byte of tx byte count WR */
+#define EN0_FIFO 0x06 /* FIFO RD */
+#define EN0_TCNTHI 0x06 /* High byte of tx byte count WR */
+#define EN0_ISR 0x07 /* Interrupt status reg RD WR */
+#define EN0_CRDALO 0x08 /* low byte of current remote dma address RD */
+#define EN0_RSARLO 0x08 /* Remote start address reg 0 */
+#define EN0_CRDAHI 0x09 /* high byte, current remote dma address RD */
+#define EN0_RSARHI 0x09 /* Remote start address reg 1 */
+#define EN0_RCNTLO 0x0a /* Remote byte count reg WR */
+#define EN0_RCNTHI 0x0b /* Remote byte count reg WR */
+#define EN0_RSR 0x0c /* rx status reg RD */
+#define EN0_RXCR 0x0c /* RX configuration reg WR */
+#define EN0_TXCR 0x0d /* TX configuration reg WR */
+#define EN0_COUNTER0 0x0d /* Rcv alignment error counter RD */
+#define EN0_DCFG 0x0e /* Data configuration reg WR */
+#define EN0_COUNTER1 0x0e /* Rcv CRC error counter RD */
+#define EN0_IMR 0x0f /* Interrupt mask reg WR */
+#define EN0_COUNTER2 0x0f /* Rcv missed frame error counter RD */
+
+/* Bits in EN0_ISR - Interrupt status register */
+#define ENISR_RX 0x01 /* Receiver, no error */
+#define ENISR_TX 0x02 /* Transmitter, no error */
+#define ENISR_RX_ERR 0x04 /* Receiver, with error */
+#define ENISR_TX_ERR 0x08 /* Transmitter, with error */
+#define ENISR_OVER 0x10 /* Receiver overwrote the ring */
+#define ENISR_COUNTERS 0x20 /* Counters need emptying */
+#define ENISR_RDC 0x40 /* remote dma complete */
+#define ENISR_RESET 0x80 /* Reset completed */
+#define ENISR_ALL 0x3f /* Interrupts we will enable */
+
+/* Bits in EN0_DCFG - Data config register */
+#define ENDCFG_WTS 0x01 /* word transfer mode selection */
+
+/* Page 1 register offsets. */
+#define EN1_PHYS 0x01 /* This board's physical enet addr RD WR */
+#define EN1_CURPAG 0x07 /* Current memory page RD WR */
+#define EN1_MULT 0x08 /* Multicast filter mask array (8 bytes) RD WR */
+
+/* Bits in received packet status byte and EN0_RSR*/
+#define ENRSR_RXOK 0x01 /* Received a good packet */
+#define ENRSR_CRC 0x02 /* CRC error */
+#define ENRSR_FAE 0x04 /* frame alignment error */
+#define ENRSR_FO 0x08 /* FIFO overrun */
+#define ENRSR_MPA 0x10 /* missed pkt */
+#define ENRSR_PHY 0x20 /* physical/multicase address */
+#define ENRSR_DIS 0x40 /* receiver disable. set in monitor mode */
+#define ENRSR_DEF 0x80 /* deferring */
+
+/* Transmitted packet status, EN0_TSR. */
+#define ENTSR_PTX 0x01 /* Packet transmitted without error */
+#define ENTSR_ND 0x02 /* The transmit wasn't deferred. */
+#define ENTSR_COL 0x04 /* The transmit collided at least once. */
+#define ENTSR_ABT 0x08 /* The transmit collided 16 times, and was deferred. */
+#define ENTSR_CRS 0x10 /* The carrier sense was lost. */
+#define ENTSR_FU 0x20 /* A "FIFO underrun" occurred during transmit. */
+#define ENTSR_CDH 0x40 /* The collision detect "heartbeat" signal was lost. */
+#define ENTSR_OWC 0x80 /* There was an out-of-window collision. */
+
+#endif /* _8390_h */
diff --git a/i386/i386at/gpl/linux/net/Space.c b/i386/i386at/gpl/linux/net/Space.c
new file mode 100644
index 00000000..a05507d3
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/Space.c
@@ -0,0 +1,400 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Holds initial configuration information for devices.
+ *
+ * NOTE: This file is a nice idea, but its current format does not work
+ * well for drivers that support multiple units, like the SLIP
+ * driver. We should actually have only one pointer to a driver
+ * here, with the driver knowing how many units it supports.
+ * Currently, the SLIP driver abuses the "base_addr" integer
+ * field of the 'device' structure to store the unit number...
+ * -FvK
+ *
+ * Version: @(#)Space.c 1.0.7 08/12/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald J. Becker, <becker@super.org>
+ *
+ * FIXME:
+ * Sort the device chain fastest first.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+
+#define NEXT_DEV NULL
+
+
+/* A unified ethernet device probe. This is the easiest way to have every
+ ethernet adaptor have the name "eth[0123...]".
+ */
+
+extern int hp100_probe(struct device *dev);
+extern int ultra_probe(struct device *dev);
+extern int wd_probe(struct device *dev);
+extern int el2_probe(struct device *dev);
+extern int ne_probe(struct device *dev);
+extern int hp_probe(struct device *dev);
+extern int hp_plus_probe(struct device *dev);
+extern int znet_probe(struct device *);
+extern int express_probe(struct device *);
+extern int eepro_probe(struct device *);
+extern int el3_probe(struct device *);
+extern int at1500_probe(struct device *);
+extern int at1700_probe(struct device *);
+extern int eth16i_probe(struct device *);
+extern int depca_probe(struct device *);
+extern int apricot_probe(struct device *);
+extern int ewrk3_probe(struct device *);
+extern int de4x5_probe(struct device *);
+extern int el1_probe(struct device *);
+#if defined(CONFIG_WAVELAN)
+extern int wavelan_probe(struct device *);
+#endif /* defined(CONFIG_WAVELAN) */
+extern int el16_probe(struct device *);
+extern int elplus_probe(struct device *);
+extern int ac3200_probe(struct device *);
+extern int e2100_probe(struct device *);
+extern int ni52_probe(struct device *);
+extern int ni65_probe(struct device *);
+extern int SK_init(struct device *);
+extern int seeq8005_probe(struct device *);
+extern int tc59x_probe(struct device *);
+
+/* Detachable devices ("pocket adaptors") */
+extern int atp_init(struct device *);
+extern int de600_probe(struct device *);
+extern int de620_probe(struct device *);
+
+static int
+ethif_probe(struct device *dev)
+{
+ u_long base_addr = dev->base_addr;
+
+ if ((base_addr == 0xffe0) || (base_addr == 1))
+ return 1; /* ENXIO */
+
+ if (1
+#if defined(CONFIG_VORTEX)
+ && tc59x_probe(dev)
+#endif
+#if defined(CONFIG_SEEQ8005)
+ && seeq8005_probe(dev)
+#endif
+#if defined(CONFIG_HP100)
+ && hp100_probe(dev)
+#endif
+#if defined(CONFIG_ULTRA)
+ && ultra_probe(dev)
+#endif
+#if defined(CONFIG_WD80x3) || defined(WD80x3)
+
+ && wd_probe(dev)
+#endif
+#if defined(CONFIG_EL2) || defined(EL2) /* 3c503 */
+ && el2_probe(dev)
+#endif
+#if defined(CONFIG_HPLAN) || defined(HPLAN)
+ && hp_probe(dev)
+#endif
+#if 0
+#if defined(CONFIG_HPLAN_PLUS)
+ && hp_plus_probe(dev)
+#endif
+#endif
+#ifdef CONFIG_AC3200 /* Ansel Communications EISA 3200. */
+ && ac3200_probe(dev)
+#endif
+#ifdef CONFIG_E2100 /* Cabletron E21xx series. */
+ && e2100_probe(dev)
+#endif
+#if defined(CONFIG_NE2000) || defined(NE2000)
+ && ne_probe(dev)
+#endif
+#ifdef CONFIG_AT1500
+ && at1500_probe(dev)
+#endif
+#ifdef CONFIG_AT1700
+ && at1700_probe(dev)
+#endif
+#ifdef CONFIG_ETH16I
+ && eth16i_probe(dev) /* ICL EtherTeam 16i/32 */
+#endif
+#ifdef CONFIG_EL3 /* 3c509 */
+ && el3_probe(dev)
+#endif
+#ifdef CONFIG_ZNET /* Zenith Z-Note and some IBM Thinkpads. */
+ && znet_probe(dev)
+#endif
+#ifdef CONFIG_EEXPRESS /* Intel EtherExpress */
+ && express_probe(dev)
+#endif
+#ifdef CONFIG_EEXPRESS_PRO /* Intel EtherExpress Pro/10 */
+ && eepro_probe(dev)
+#endif
+#ifdef CONFIG_DEPCA /* DEC DEPCA */
+ && depca_probe(dev)
+#endif
+#ifdef CONFIG_EWRK3 /* DEC EtherWORKS 3 */
+ && ewrk3_probe(dev)
+#endif
+#ifdef CONFIG_DE4X5 /* DEC DE425, DE434, DE435 adapters */
+ && de4x5_probe(dev)
+#endif
+#ifdef CONFIG_APRICOT /* Apricot I82596 */
+ && apricot_probe(dev)
+#endif
+#ifdef CONFIG_EL1 /* 3c501 */
+ && el1_probe(dev)
+#endif
+#if defined(CONFIG_WAVELAN) /* WaveLAN */
+ && wavelan_probe(dev)
+#endif /* defined(CONFIG_WAVELAN) */
+#ifdef CONFIG_EL16 /* 3c507 */
+ && el16_probe(dev)
+#endif
+#ifdef CONFIG_ELPLUS /* 3c505 */
+ && elplus_probe(dev)
+#endif
+#ifdef CONFIG_DE600 /* D-Link DE-600 adapter */
+ && de600_probe(dev)
+#endif
+#ifdef CONFIG_DE620 /* D-Link DE-620 adapter */
+ && de620_probe(dev)
+#endif
+#if defined(CONFIG_SK_G16)
+ && SK_init(dev)
+#endif
+#ifdef CONFIG_NI52
+ && ni52_probe(dev)
+#endif
+#ifdef CONFIG_NI65
+ && ni65_probe(dev)
+#endif
+ && 1 ) {
+ return 1; /* -ENODEV or -EAGAIN would be more accurate. */
+ }
+ return 0;
+}
+
+
+#ifdef CONFIG_NETROM
+ extern int nr_init(struct device *);
+
+ static struct device nr3_dev = { "nr3", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, nr_init, };
+ static struct device nr2_dev = { "nr2", 0, 0, 0, 0, 0, 0, 0, 0, 0, &nr3_dev, nr_init, };
+ static struct device nr1_dev = { "nr1", 0, 0, 0, 0, 0, 0, 0, 0, 0, &nr2_dev, nr_init, };
+ static struct device nr0_dev = { "nr0", 0, 0, 0, 0, 0, 0, 0, 0, 0, &nr1_dev, nr_init, };
+
+# undef NEXT_DEV
+# define NEXT_DEV (&nr0_dev)
+#endif
+
+/* Run-time ATtachable (Pocket) devices have a different (not "eth#") name. */
+#ifdef CONFIG_ATP /* AT-LAN-TEC (RealTek) pocket adaptor. */
+static struct device atp_dev = {
+ "atp0", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, atp_init, /* ... */ };
+# undef NEXT_DEV
+# define NEXT_DEV (&atp_dev)
+#endif
+
+#ifdef CONFIG_ARCNET
+ extern int arcnet_probe(struct device *dev);
+ static struct device arcnet_dev = {
+ "arc0", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, arcnet_probe, };
+# undef NEXT_DEV
+# define NEXT_DEV (&arcnet_dev)
+#endif
+
+/* In Mach, by default allow at least 2 interfaces. */
+#ifdef MACH
+#ifndef ETH1_ADDR
+# define ETH1_ADDR 0
+#endif
+#ifndef ETH1_IRQ
+# define ETH1_IRQ 0
+#endif
+#endif
+
+/* The first device defaults to I/O base '0', which means autoprobe. */
+#ifndef ETH0_ADDR
+# define ETH0_ADDR 0
+#endif
+#ifndef ETH0_IRQ
+# define ETH0_IRQ 0
+#endif
+/* "eth0" defaults to autoprobe (== 0), other use a base of 0xffe0 (== -0x20),
+ which means "don't probe". These entries exist to only to provide empty
+ slots which may be enabled at boot-time. */
+
+static struct device eth3_dev = {
+ "eth3", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, NEXT_DEV, ethif_probe };
+static struct device eth2_dev = {
+ "eth2", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth3_dev, ethif_probe };
+#ifdef MACH
+static struct device eth1_dev = {
+ "eth1", 0,0,0,0,ETH1_ADDR, ETH1_IRQ,0,0,0, &eth2_dev, ethif_probe };
+#else
+static struct device eth1_dev = {
+ "eth1", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth2_dev, ethif_probe };
+#endif
+static struct device eth0_dev = {
+ "eth0", 0, 0, 0, 0, ETH0_ADDR, ETH0_IRQ, 0, 0, 0, &eth1_dev, ethif_probe };
+
+# undef NEXT_DEV
+# define NEXT_DEV (&eth0_dev)
+
+#if defined(PLIP) || defined(CONFIG_PLIP)
+ extern int plip_init(struct device *);
+ static struct device plip2_dev = {
+ "plip2", 0, 0, 0, 0, 0x278, 2, 0, 0, 0, NEXT_DEV, plip_init, };
+ static struct device plip1_dev = {
+ "plip1", 0, 0, 0, 0, 0x378, 7, 0, 0, 0, &plip2_dev, plip_init, };
+ static struct device plip0_dev = {
+ "plip0", 0, 0, 0, 0, 0x3BC, 5, 0, 0, 0, &plip1_dev, plip_init, };
+# undef NEXT_DEV
+# define NEXT_DEV (&plip0_dev)
+#endif /* PLIP */
+
+#if defined(SLIP) || defined(CONFIG_SLIP)
+ /* To be exact, this node just hooks the initialization
+ routines to the device structures. */
+extern int slip_init_ctrl_dev(struct device *);
+static struct device slip_bootstrap = {
+ "slip_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, slip_init_ctrl_dev, };
+#undef NEXT_DEV
+#define NEXT_DEV (&slip_bootstrap)
+#endif /* SLIP */
+
+#if defined(CONFIG_PPP)
+extern int ppp_init(struct device *);
+static struct device ppp_bootstrap = {
+ "ppp_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, ppp_init, };
+#undef NEXT_DEV
+#define NEXT_DEV (&ppp_bootstrap)
+#endif /* PPP */
+
+#ifdef CONFIG_DUMMY
+ extern int dummy_init(struct device *dev);
+ static struct device dummy_dev = {
+ "dummy", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, dummy_init, };
+# undef NEXT_DEV
+# define NEXT_DEV (&dummy_dev)
+#endif
+
+#ifdef CONFIG_EQUALIZER
+extern int eql_init(struct device *dev);
+struct device eql_dev = {
+ "eql", /* Master device for IP traffic load
+ balancing */
+ 0x0, 0x0, 0x0, 0x0, /* recv end/start; mem end/start */
+ 0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ eql_init /* set up the rest */
+};
+# undef NEXT_DEV
+# define NEXT_DEV (&eql_dev)
+#endif
+
+#ifdef CONFIG_IBMTR
+
+ extern int tok_probe(struct device *dev);
+ static struct device ibmtr_dev1 = {
+ "tr1", /* IBM Token Ring (Non-DMA) Interface */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0xa24, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ tok_probe /* ??? Token_init should set up the rest */
+ };
+# undef NEXT_DEV
+# define NEXT_DEV (&ibmtr_dev1)
+
+
+ static struct device ibmtr_dev0 = {
+ "tr0", /* IBM Token Ring (Non-DMA) Interface */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0xa20, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ tok_probe /* ??? Token_init should set up the rest */
+ };
+# undef NEXT_DEV
+# define NEXT_DEV (&ibmtr_dev0)
+
+#endif
+#ifdef CONFIG_NET_IPIP
+#ifdef CONFIG_IP_FORWARD
+ extern int tunnel_init(struct device *);
+
+ static struct device tunnel_dev1 =
+ {
+ "tunl1", /* IPIP tunnel */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0x0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ tunnel_init /* Fill in the details */
+ };
+
+ static struct device tunnel_dev0 =
+ {
+ "tunl0", /* IPIP tunnel */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0x0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ &tunnel_dev1, /* next device */
+ tunnel_init /* Fill in the details */
+ };
+# undef NEXT_DEV
+# define NEXT_DEV (&tunnel_dev0)
+
+#endif
+#endif
+
+#ifdef MACH
+struct device *dev_base = &eth0_dev;
+#else
+extern int loopback_init(struct device *dev);
+struct device loopback_dev = {
+ "lo", /* Software Loopback interface */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ loopback_init /* loopback_init should set up the rest */
+};
+
+struct device *dev_base = &loopback_dev;
+#endif
diff --git a/i386/i386at/gpl/linux/net/ac3200.c b/i386/i386at/gpl/linux/net/ac3200.c
new file mode 100644
index 00000000..054af13a
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/ac3200.c
@@ -0,0 +1,385 @@
+/* ac3200.c: A driver for the Ansel Communications EISA ethernet adaptor. */
+/*
+ Written 1993, 1994 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and distributed
+ according to the terms of the GNU Public License as modified by SRC,
+ incorporated herein by reference.
+
+ The author may be reached as becker@cesdis.gsfc.nasa.gov, or
+ C/O Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This is driver for the Ansel Communications Model 3200 EISA Ethernet LAN
+ Adapter. The programming information is from the users manual, as related
+ by glee@ardnassak.math.clemson.edu.
+ */
+
+static const char *version =
+ "ac3200.c:v1.01 7/1/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include "8390.h"
+
+/* Offsets from the base address. */
+#define AC_NIC_BASE 0x00
+#define AC_SA_PROM 0x16 /* The station address PROM. */
+#define AC_ADDR0 0x00 /* Prefix station address values. */
+#define AC_ADDR1 0x40 /* !!!!These are just guesses!!!! */
+#define AC_ADDR2 0x90
+#define AC_ID_PORT 0xC80
+#define AC_EISA_ID 0x0110d305
+#define AC_RESET_PORT 0xC84
+#define AC_RESET 0x00
+#define AC_ENABLE 0x01
+#define AC_CONFIG 0xC90 /* The configuration port. */
+
+#define AC_IO_EXTENT 0x10 /* IS THIS REALLY TRUE ??? */
+ /* Actually accessed is:
+ * AC_NIC_BASE (0-15)
+ * AC_SA_PROM (0-5)
+ * AC_ID_PORT (0-3)
+ * AC_RESET_PORT
+ * AC_CONFIG
+ */
+
+/* Decoding of the configuration register. */
+static unsigned char config2irqmap[8] = {15, 12, 11, 10, 9, 7, 5, 3};
+static int addrmap[8] =
+{0xFF0000, 0xFE0000, 0xFD0000, 0xFFF0000, 0xFFE0000, 0xFFC0000, 0xD0000, 0 };
+static const char *port_name[4] = { "10baseT", "invalid", "AUI", "10base2"};
+
+#define config2irq(configval) config2irqmap[((configval) >> 3) & 7]
+#define config2mem(configval) addrmap[(configval) & 7]
+#define config2name(configval) port_name[((configval) >> 6) & 3]
+
+/* First and last 8390 pages. */
+#define AC_START_PG 0x00 /* First page of 8390 TX buffer */
+#define AC_STOP_PG 0x80 /* Last page +1 of the 8390 RX ring */
+
+int ac3200_probe(struct device *dev);
+static int ac_probe1(int ioaddr, struct device *dev);
+
+static int ac_open(struct device *dev);
+static void ac_reset_8390(struct device *dev);
+static void ac_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ac_block_output(struct device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+static void ac_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+
+static int ac_close_card(struct device *dev);
+
+
+/* Probe for the AC3200.
+
+ The AC3200 can be identified by either the EISA configuration registers,
+ or the unique value in the station address PROM.
+ */
+
+int ac3200_probe(struct device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+
+ if (ioaddr > 0x1ff) /* Check a single specified location. */
+ return ac_probe1(ioaddr, dev);
+ else if (ioaddr > 0) /* Don't probe at all. */
+ return ENXIO;
+
+ /* If you have a pre 0.99pl15 machine you should delete this line. */
+ if ( ! EISA_bus)
+ return ENXIO;
+
+ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
+ if (check_region(ioaddr, AC_IO_EXTENT))
+ continue;
+ if (ac_probe1(ioaddr, dev) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+
+static int ac_probe1(int ioaddr, struct device *dev)
+{
+ int i;
+
+#ifndef final_version
+ printk("AC3200 ethercard probe at %#3x:", ioaddr);
+
+ for(i = 0; i < 6; i++)
+ printk(" %02x", inb(ioaddr + AC_SA_PROM + i));
+#endif
+
+ /* !!!!The values of AC_ADDRn (see above) should be corrected when we
+ find out the correct station address prefix!!!! */
+ if (inb(ioaddr + AC_SA_PROM + 0) != AC_ADDR0
+ || inb(ioaddr + AC_SA_PROM + 1) != AC_ADDR1
+ || inb(ioaddr + AC_SA_PROM + 2) != AC_ADDR2 ) {
+#ifndef final_version
+ printk(" not found (invalid prefix).\n");
+#endif
+ return ENODEV;
+ }
+
+ /* The correct probe method is to check the EISA ID. */
+ for (i = 0; i < 4; i++)
+ if (inl(ioaddr + AC_ID_PORT) != AC_EISA_ID) {
+ printk("EISA ID mismatch, %8x vs %8x.\n",
+ inl(ioaddr + AC_EISA_ID), AC_EISA_ID);
+ return ENODEV;
+ }
+
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("ac3200.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ dev->dev_addr[i] = inb(ioaddr + AC_SA_PROM + i);
+
+#ifndef final_version
+ printk("\nAC3200 ethercard configuration register is %#02x,"
+ " EISA ID %02x %02x %02x %02x.\n", inb(ioaddr + AC_CONFIG),
+ inb(ioaddr + AC_ID_PORT + 0), inb(ioaddr + AC_ID_PORT + 1),
+ inb(ioaddr + AC_ID_PORT + 2), inb(ioaddr + AC_ID_PORT + 3));
+#endif
+
+ /* Assign and allocate the interrupt now. */
+ if (dev->irq == 0)
+ dev->irq = config2irq(inb(ioaddr + AC_CONFIG));
+ else if (dev->irq == 2)
+ dev->irq = 9;
+
+ if (request_irq(dev->irq, ei_interrupt, 0, "ac3200")) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ return EAGAIN;
+ }
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (" unable to allocate memory for dev->priv.\n");
+ free_irq(dev->irq);
+ return -ENOMEM;
+ }
+
+ request_region(ioaddr, AC_IO_EXTENT, "ac3200");
+
+ dev->base_addr = ioaddr;
+
+#ifdef notyet
+ if (dev->mem_start) { /* Override the value from the board. */
+ for (i = 0; i < 7; i++)
+ if (addrmap[i] == dev->mem_start)
+ break;
+ if (i >= 7)
+ i = 0;
+ outb((inb(ioaddr + AC_CONFIG) & ~7) | i, ioaddr + AC_CONFIG);
+ }
+#endif
+
+ dev->if_port = inb(ioaddr + AC_CONFIG) >> 6;
+ dev->mem_start = config2mem(inb(ioaddr + AC_CONFIG));
+ dev->rmem_start = dev->mem_start + TX_PAGES*256;
+ dev->mem_end = dev->rmem_end = dev->mem_start
+ + (AC_STOP_PG - AC_START_PG)*256;
+
+ ei_status.name = "AC3200";
+ ei_status.tx_start_page = AC_START_PG;
+ ei_status.rx_start_page = AC_START_PG + TX_PAGES;
+ ei_status.stop_page = AC_STOP_PG;
+ ei_status.word16 = 1;
+
+ printk("\n%s: AC3200 at %#x, IRQ %d, %s port, shared memory %#lx-%#lx.\n",
+ dev->name, ioaddr, dev->irq, port_name[dev->if_port],
+ dev->mem_start, dev->mem_end-1);
+
+ if (ei_debug > 0)
+ printk(version);
+
+ ei_status.reset_8390 = &ac_reset_8390;
+ ei_status.block_input = &ac_block_input;
+ ei_status.block_output = &ac_block_output;
+ ei_status.get_8390_hdr = &ac_get_8390_hdr;
+
+ dev->open = &ac_open;
+ dev->stop = &ac_close_card;
+ NS8390_init(dev, 0);
+ return 0;
+}
+
+static int ac_open(struct device *dev)
+{
+#ifdef notyet
+ /* Someday we may enable the IRQ and shared memory here. */
+ int ioaddr = dev->base_addr;
+
+ if (request_irq(dev->irq, ei_interrupt, 0, "ac3200"))
+ return -EAGAIN;
+#endif
+
+ ei_open(dev);
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+static void ac_reset_8390(struct device *dev)
+{
+ ushort ioaddr = dev->base_addr;
+
+ outb(AC_RESET, ioaddr + AC_RESET_PORT);
+ if (ei_debug > 1) printk("resetting AC3200, t=%ld...", jiffies);
+
+ ei_status.txing = 0;
+ outb(AC_ENABLE, ioaddr + AC_RESET_PORT);
+ if (ei_debug > 1) printk("reset done\n");
+
+ return;
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+ac_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ unsigned long hdr_start = dev->mem_start + ((ring_page - AC_START_PG)<<8);
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+}
+
+/* Block input and output are easy on shared memory ethercards, the only
+ complication is when the ring buffer wraps. */
+
+static void ac_block_input(struct device *dev, int count, struct sk_buff *skb,
+ int ring_offset)
+{
+ unsigned long xfer_start = dev->mem_start + ring_offset - (AC_START_PG<<8);
+
+ if (xfer_start + count > dev->rmem_end) {
+ /* We must wrap the input move. */
+ int semi_count = dev->rmem_end - xfer_start;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, dev->rmem_start, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, xfer_start, count, 0);
+ }
+}
+
+static void ac_block_output(struct device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ unsigned long shmem = dev->mem_start + ((start_page - AC_START_PG)<<8);
+
+ memcpy_toio(shmem, buf, count);
+}
+
+static int ac_close_card(struct device *dev)
+{
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+#ifdef notyet
+ /* We should someday disable shared memory and interrupts. */
+ outb(0x00, ioaddr + 6); /* Disable interrupts. */
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = 0;
+#endif
+
+ ei_close(dev);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+#ifdef MODULE
+#define MAX_AC32_CARDS 4 /* Max number of AC32 cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_AC32_CARDS] = { 0, };
+static struct device dev_ac32[MAX_AC32_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_AC32_CARDS] = { 0, };
+static int irq[MAX_AC32_CARDS] = { 0, };
+static int mem[MAX_AC32_CARDS] = { 0, };
+
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) {
+ struct device *dev = &dev_ac32[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev]; /* Currently ignored by driver */
+ dev->init = ac3200_probe;
+ /* Default is to only install one card. */
+ if (io[this_dev] == 0 && this_dev != 0) break;
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "ac3200.c: No ac3200 card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) {
+ struct device *dev = &dev_ac32[this_dev];
+ if (dev->priv != NULL) {
+ kfree(dev->priv);
+ dev->priv = NULL;
+ /* Someday free_irq + irq2dev may be in ac_close_card() */
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = NULL;
+ release_region(dev->base_addr, AC_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c ac3200.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/apricot.c b/i386/i386at/gpl/linux/net/apricot.c
new file mode 100644
index 00000000..130d7759
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/apricot.c
@@ -0,0 +1,1046 @@
+/* apricot.c: An Apricot 82596 ethernet driver for linux. */
+/*
+ Apricot
+ Written 1994 by Mark Evans.
+ This driver is for the Apricot 82596 bus-master interface
+
+ Modularised 12/94 Mark Evans
+
+ Driver skeleton
+ Written 1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and distributed
+ according to the terms of the GNU Public License as modified by SRC,
+ incorporated herein by reference.
+
+ The author may be reached as becker@super.org or
+ C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
+
+
+*/
+
+static const char *version = "apricot.c:v0.2 05/12/94\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#ifndef HAVE_PORTRESERVE
+#define check_region(addr, size) 0
+#define request_region(addr, size,name) do ; while(0)
+#endif
+
+#ifndef HAVE_ALLOC_SKB
+#define alloc_skb(size, priority) (struct sk_buff *) kmalloc(size,priority)
+#define kfree_skbmem(buff, size) kfree_s(buff,size)
+#endif
+
+#define APRICOT_DEBUG 1
+
+#ifdef APRICOT_DEBUG
+int i596_debug = APRICOT_DEBUG;
+#else
+int i596_debug = 1;
+#endif
+
+#define APRICOT_TOTAL_SIZE 17
+
+#define I596_NULL -1
+
+#define CMD_EOL 0x8000 /* The last command of the list, stop. */
+#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
+#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
+
+#define CMD_FLEX 0x0008 /* Enable flexible memory model */
+
+enum commands {
+ CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
+ CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7};
+
+#define STAT_C 0x8000 /* Set to 0 after execution */
+#define STAT_B 0x4000 /* Command being executed */
+#define STAT_OK 0x2000 /* Command executed ok */
+#define STAT_A 0x1000 /* Command aborted */
+
+#define CUC_START 0x0100
+#define CUC_RESUME 0x0200
+#define CUC_SUSPEND 0x0300
+#define CUC_ABORT 0x0400
+#define RX_START 0x0010
+#define RX_RESUME 0x0020
+#define RX_SUSPEND 0x0030
+#define RX_ABORT 0x0040
+
+struct i596_cmd {
+ unsigned short status;
+ unsigned short command;
+ struct i596_cmd *next;
+};
+
+#define EOF 0x8000
+#define SIZE_MASK 0x3fff
+
+struct i596_tbd {
+ unsigned short size;
+ unsigned short pad;
+ struct i596_tbd *next;
+ char *data;
+};
+
+struct tx_cmd {
+ struct i596_cmd cmd;
+ struct i596_tbd *tbd;
+ unsigned short size;
+ unsigned short pad;
+};
+
+struct i596_rfd {
+ unsigned short stat;
+ unsigned short cmd;
+ struct i596_rfd *next;
+ long rbd;
+ unsigned short count;
+ unsigned short size;
+ char data[1532];
+};
+
+#define RX_RING_SIZE 8
+
+struct i596_scb {
+ unsigned short status;
+ unsigned short command;
+ struct i596_cmd *cmd;
+ struct i596_rfd *rfd;
+ unsigned long crc_err;
+ unsigned long align_err;
+ unsigned long resource_err;
+ unsigned long over_err;
+ unsigned long rcvdt_err;
+ unsigned long short_err;
+ unsigned short t_on;
+ unsigned short t_off;
+};
+
+struct i596_iscp {
+ unsigned long stat;
+ struct i596_scb *scb;
+};
+
+struct i596_scp {
+ unsigned long sysbus;
+ unsigned long pad;
+ struct i596_iscp *iscp;
+};
+
+struct i596_private {
+ struct i596_scp scp;
+ struct i596_iscp iscp;
+ struct i596_scb scb;
+ struct i596_cmd set_add;
+ char eth_addr[8];
+ struct i596_cmd set_conf;
+ char i596_config[16];
+ struct i596_cmd tdr;
+ unsigned long stat;
+ int last_restart;
+ struct i596_rfd *rx_tail;
+ struct i596_cmd *cmd_tail;
+ struct i596_cmd *cmd_head;
+ int cmd_backlog;
+ unsigned long last_cmd;
+ struct enet_statistics stats;
+};
+
+char init_setup[] = {
+ 0x8E, /* length, prefetch on */
+ 0xC8, /* fifo to 8, monitor off */
+ 0x80, /* don't save bad frames */
+ 0x2E, /* No source address insertion, 8 byte preamble */
+ 0x00, /* priority and backoff defaults */
+ 0x60, /* interframe spacing */
+ 0x00, /* slot time LSB */
+ 0xf2, /* slot time and retries */
+ 0x00, /* promiscuous mode */
+ 0x00, /* collision detect */
+ 0x40, /* minimum frame length */
+ 0xff,
+ 0x00,
+ 0x7f /* *multi IA */ };
+
+static int i596_open(struct device *dev);
+static int i596_start_xmit(struct sk_buff *skb, struct device *dev);
+static void i596_interrupt(int irq, struct pt_regs *regs);
+static int i596_close(struct device *dev);
+static struct enet_statistics *i596_get_stats(struct device *dev);
+static void i596_add_cmd(struct device *dev, struct i596_cmd *cmd);
+static void print_eth(char *);
+static void set_multicast_list(struct device *dev);
+
+
+static inline int
+init_rx_bufs(struct device *dev, int num)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ int i;
+ struct i596_rfd *rfd;
+
+ lp->scb.rfd = (struct i596_rfd *)I596_NULL;
+
+ if (i596_debug > 1) printk ("%s: init_rx_bufs %d.\n", dev->name, num);
+
+ for (i = 0; i < num; i++)
+ {
+ if (!(rfd = (struct i596_rfd *)kmalloc(sizeof(struct i596_rfd), GFP_KERNEL)))
+ break;
+
+ rfd->stat = 0x0000;
+ rfd->rbd = I596_NULL;
+ rfd->count = 0;
+ rfd->size = 1532;
+ if (i == 0)
+ {
+ rfd->cmd = CMD_EOL;
+ lp->rx_tail = rfd;
+ }
+ else
+ rfd->cmd = 0x0000;
+
+ rfd->next = lp->scb.rfd;
+ lp->scb.rfd = rfd;
+ }
+
+ if (i != 0)
+ lp->rx_tail->next = lp->scb.rfd;
+
+ return (i);
+}
+
+static inline void
+remove_rx_bufs(struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ struct i596_rfd *rfd = lp->scb.rfd;
+
+ lp->rx_tail->next = (struct i596_rfd *)I596_NULL;
+
+ do
+ {
+ lp->scb.rfd = rfd->next;
+ kfree_s(rfd, sizeof(struct i596_rfd));
+ rfd = lp->scb.rfd;
+ }
+ while (rfd != lp->rx_tail);
+}
+
+static inline void
+init_i596_mem(struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ short ioaddr = dev->base_addr;
+ int boguscnt = 100;
+
+ /* change the scp address */
+ outw(0, ioaddr);
+ outw(0, ioaddr);
+ outb(4, ioaddr+0xf);
+ outw(((((int)&lp->scp) & 0xffff) | 2), ioaddr);
+ outw((((int)&lp->scp)>>16) & 0xffff, ioaddr);
+
+ lp->last_cmd = jiffies;
+
+ lp->scp.sysbus = 0x00440000;
+ lp->scp.iscp = &(lp->iscp);
+ lp->iscp.scb = &(lp->scb);
+ lp->iscp.stat = 0x0001;
+ lp->cmd_backlog = 0;
+
+ lp->cmd_head = lp->scb.cmd = (struct i596_cmd *) I596_NULL;
+
+ if (i596_debug > 2) printk("%s: starting i82596.\n", dev->name);
+
+ (void) inb (ioaddr+0x10);
+ outb(4, ioaddr+0xf);
+ outw(0, ioaddr+4);
+
+ while (lp->iscp.stat)
+ if (--boguscnt == 0)
+ {
+ printk("%s: i82596 initialization timed out with status %4.4x, cmd %4.4x.\n",
+ dev->name, lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ lp->scb.command = 0;
+
+ memcpy (lp->i596_config, init_setup, 14);
+ lp->set_conf.command = CmdConfigure;
+ i596_add_cmd(dev, &lp->set_conf);
+
+ memcpy (lp->eth_addr, dev->dev_addr, 6);
+ lp->set_add.command = CmdSASetup;
+ i596_add_cmd(dev, &lp->set_add);
+
+ lp->tdr.command = CmdTDR;
+ i596_add_cmd(dev, &lp->tdr);
+
+ boguscnt = 200;
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("%s: receive unit start timed out with status %4.4x, cmd %4.4x.\n",
+ dev->name, lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ lp->scb.command = RX_START;
+ outw(0, ioaddr+4);
+
+ boguscnt = 200;
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("i82596 init timed out with status %4.4x, cmd %4.4x.\n",
+ lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ return;
+}
+
+static inline int
+i596_rx(struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ int frames = 0;
+
+ if (i596_debug > 3) printk ("i596_rx()\n");
+
+ while ((lp->scb.rfd->stat) & STAT_C)
+ {
+ if (i596_debug >2) print_eth(lp->scb.rfd->data);
+
+ if ((lp->scb.rfd->stat) & STAT_OK)
+ {
+ /* a good frame */
+ int pkt_len = lp->scb.rfd->count & 0x3fff;
+ struct sk_buff *skb = dev_alloc_skb(pkt_len);
+
+ frames++;
+
+ if (skb == NULL)
+ {
+ printk ("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+
+ skb->dev = dev;
+ memcpy(skb_put(skb,pkt_len), lp->scb.rfd->data, pkt_len);
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+
+ if (i596_debug > 4) print_eth(skb->data);
+ }
+ else
+ {
+ lp->stats.rx_errors++;
+ if ((lp->scb.rfd->stat) & 0x0001) lp->stats.collisions++;
+ if ((lp->scb.rfd->stat) & 0x0080) lp->stats.rx_length_errors++;
+ if ((lp->scb.rfd->stat) & 0x0100) lp->stats.rx_over_errors++;
+ if ((lp->scb.rfd->stat) & 0x0200) lp->stats.rx_fifo_errors++;
+ if ((lp->scb.rfd->stat) & 0x0400) lp->stats.rx_frame_errors++;
+ if ((lp->scb.rfd->stat) & 0x0800) lp->stats.rx_crc_errors++;
+ if ((lp->scb.rfd->stat) & 0x1000) lp->stats.rx_length_errors++;
+ }
+
+ lp->scb.rfd->stat = 0;
+ lp->rx_tail->cmd = 0;
+ lp->rx_tail = lp->scb.rfd;
+ lp->scb.rfd = lp->scb.rfd->next;
+ lp->rx_tail->count = 0;
+ lp->rx_tail->cmd = CMD_EOL;
+
+ }
+
+ if (i596_debug > 3) printk ("frames %d\n", frames);
+
+ return 0;
+}
+
+static inline void
+i596_cleanup_cmd(struct i596_private *lp)
+{
+ struct i596_cmd *ptr;
+ int boguscnt = 100;
+
+ if (i596_debug > 4) printk ("i596_cleanup_cmd\n");
+
+ while (lp->cmd_head != (struct i596_cmd *) I596_NULL)
+ {
+ ptr = lp->cmd_head;
+
+ lp->cmd_head = lp->cmd_head->next;
+ lp->cmd_backlog--;
+
+ switch ((ptr->command) & 0x7)
+ {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = ((struct sk_buff *)(tx_cmd->tbd->data)) -1;
+
+ dev_kfree_skb(skb, FREE_WRITE);
+
+ lp->stats.tx_errors++;
+ lp->stats.tx_aborted_errors++;
+
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+ kfree_s((unsigned char *)tx_cmd, (sizeof (struct tx_cmd) + sizeof (struct i596_tbd)));
+ break;
+ }
+ case CmdMulticastList:
+ {
+ unsigned short count = *((unsigned short *) (ptr + 1));
+
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+ kfree_s((unsigned char *)ptr, (sizeof (struct i596_cmd) + count + 2));
+ break;
+ }
+ default:
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+ }
+ }
+
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("i596_cleanup_cmd timed out with status %4.4x, cmd %4.4x.\n",
+ lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ lp->scb.cmd = lp->cmd_head;
+}
+
+static inline void
+i596_reset(struct device *dev, struct i596_private *lp, int ioaddr)
+{
+ int boguscnt = 100;
+
+ if (i596_debug > 4) printk ("i596_reset\n");
+
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("i596_reset timed out with status %4.4x, cmd %4.4x.\n",
+ lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ lp->scb.command = CUC_ABORT|RX_ABORT;
+ outw(0, ioaddr+4);
+
+ /* wait for shutdown */
+ boguscnt = 400;
+
+ while ((lp->scb.status, lp->scb.command) || lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("i596_reset 2 timed out with status %4.4x, cmd %4.4x.\n",
+ lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ i596_cleanup_cmd(lp);
+ i596_rx(dev);
+
+ dev->start = 1;
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ init_i596_mem(dev);
+}
+
+static void i596_add_cmd(struct device *dev, struct i596_cmd *cmd)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+ int boguscnt = 100;
+
+ if (i596_debug > 4) printk ("i596_add_cmd\n");
+
+ cmd->status = 0;
+ cmd->command |= (CMD_EOL|CMD_INTR);
+ cmd->next = (struct i596_cmd *) I596_NULL;
+
+ save_flags(flags);
+ cli();
+ if (lp->cmd_head != (struct i596_cmd *) I596_NULL)
+ lp->cmd_tail->next = cmd;
+ else
+ {
+ lp->cmd_head = cmd;
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("i596_add_cmd timed out with status %4.4x, cmd %4.4x.\n",
+ lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ lp->scb.cmd = cmd;
+ lp->scb.command = CUC_START;
+ outw (0, ioaddr+4);
+ }
+ lp->cmd_tail = cmd;
+ lp->cmd_backlog++;
+
+ lp->cmd_head = lp->scb.cmd;
+ restore_flags(flags);
+
+ if (lp->cmd_backlog > 16)
+ {
+ int tickssofar = jiffies - lp->last_cmd;
+
+ if (tickssofar < 25) return;
+
+ printk("%s: command unit timed out, status resetting.\n", dev->name);
+
+ i596_reset(dev, lp, ioaddr);
+ }
+}
+
+static int
+i596_open(struct device *dev)
+{
+ int i;
+
+ if (i596_debug > 1)
+ printk("%s: i596_open() irq %d.\n", dev->name, dev->irq);
+
+ if (request_irq(dev->irq, &i596_interrupt, 0, "apricot"))
+ return -EAGAIN;
+
+ irq2dev_map[dev->irq] = dev;
+
+ i = init_rx_bufs(dev, RX_RING_SIZE);
+
+ if ((i = init_rx_bufs(dev, RX_RING_SIZE)) < RX_RING_SIZE)
+ printk("%s: only able to allocate %d receive buffers\n", dev->name, i);
+
+ if (i < 4)
+ {
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = 0;
+ return -EAGAIN;
+ }
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+ MOD_INC_USE_COUNT;
+
+ /* Initialize the 82596 memory */
+ init_i596_mem(dev);
+
+ return 0; /* Always succeed */
+}
+
+static int
+i596_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ struct tx_cmd *tx_cmd;
+
+ if (i596_debug > 2) printk ("%s: Apricot start xmit\n", dev->name);
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+ printk("%s: transmit timed out, status resetting.\n",
+ dev->name);
+ lp->stats.tx_errors++;
+ /* Try to restart the adaptor */
+ if (lp->last_restart == lp->stats.tx_packets) {
+ if (i596_debug > 1) printk ("Resetting board.\n");
+
+ /* Shutdown and restart */
+ i596_reset(dev,lp, ioaddr);
+ } else {
+ /* Issue a channel attention signal */
+ if (i596_debug > 1) printk ("Kicking board.\n");
+
+ lp->scb.command = CUC_START|RX_START;
+ outw(0, ioaddr+4);
+
+ lp->last_restart = lp->stats.tx_packets;
+ }
+ dev->tbusy = 0;
+ dev->trans_start = jiffies;
+ }
+
+ /* If some higher level thinks we've misses a tx-done interrupt
+ we are passed NULL. n.b. dev_tint handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* shouldn't happen */
+ if (skb->len <= 0) return 0;
+
+ if (i596_debug > 3) printk("%s: i596_start_xmit() called\n", dev->name);
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else
+ {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ dev->trans_start = jiffies;
+
+ tx_cmd = (struct tx_cmd *) kmalloc ((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
+ if (tx_cmd == NULL)
+ {
+ printk ("%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.tx_dropped++;
+
+ dev_kfree_skb(skb, FREE_WRITE);
+ }
+ else
+ {
+ tx_cmd->tbd = (struct i596_tbd *) (tx_cmd + 1);
+ tx_cmd->tbd->next = (struct i596_tbd *) I596_NULL;
+
+ tx_cmd->cmd.command = CMD_FLEX|CmdTx;
+
+ tx_cmd->pad = 0;
+ tx_cmd->size = 0;
+ tx_cmd->tbd->pad = 0;
+ tx_cmd->tbd->size = EOF | length;
+
+ tx_cmd->tbd->data = skb->data;
+
+ if (i596_debug > 3) print_eth(skb->data);
+
+ i596_add_cmd(dev, (struct i596_cmd *)tx_cmd);
+
+ lp->stats.tx_packets++;
+ }
+ }
+
+ dev->tbusy = 0;
+
+ return 0;
+}
+
+
+static void print_eth(char *add)
+{
+ int i;
+
+ printk ("Dest ");
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", (unsigned char)add[i]);
+ printk ("\n");
+
+ printk ("Source");
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", (unsigned char)add[i+6]);
+ printk ("\n");
+ printk ("type %2.2X%2.2X\n", (unsigned char)add[12], (unsigned char)add[13]);
+}
+
+int apricot_probe(struct device *dev)
+{
+ int i;
+ struct i596_private *lp;
+ int checksum = 0;
+ int ioaddr = 0x300;
+ char eth_addr[6];
+
+ /* this is easy the ethernet interface can only be at 0x300 */
+ /* first check nothing is already registered here */
+
+ if (check_region(ioaddr, APRICOT_TOTAL_SIZE))
+ return ENODEV;
+
+ for (i = 0; i < 8; i++)
+ {
+ eth_addr[i] = inb(ioaddr+8+i);
+ checksum += eth_addr[i];
+ }
+
+ /* checksum is a multiple of 0x100, got this wrong first time
+ some machines have 0x100, some 0x200. The DOS driver doesn't
+ even bother with the checksum */
+
+ if (checksum % 0x100) return ENODEV;
+
+ /* Some other boards trip the checksum.. but then appear as ether
+ address 0. Trap these - AC */
+
+ if(memcmp(eth_addr,"\x00\x00\x49",3)!= 0)
+ return ENODEV;
+
+ request_region(ioaddr, APRICOT_TOTAL_SIZE, "apricot");
+
+ dev->base_addr = ioaddr;
+ ether_setup(dev);
+ printk("%s: Apricot 82596 at %#3x,", dev->name, ioaddr);
+
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]);
+
+ dev->base_addr = ioaddr;
+ dev->irq = 10;
+ printk(" IRQ %d.\n", dev->irq);
+
+ if (i596_debug > 0) printk(version);
+
+ /* The APRICOT-specific entries in the device structure. */
+ dev->open = &i596_open;
+ dev->stop = &i596_close;
+ dev->hard_start_xmit = &i596_start_xmit;
+ dev->get_stats = &i596_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ dev->mem_start = (int)kmalloc(sizeof(struct i596_private)+ 0x0f, GFP_KERNEL);
+ /* align for scp */
+ dev->priv = (void *)((dev->mem_start + 0xf) & 0xfffffff0);
+
+ lp = (struct i596_private *)dev->priv;
+ memset((void *)lp, 0, sizeof(struct i596_private));
+ lp->scb.command = 0;
+ lp->scb.cmd = (struct i596_cmd *) I596_NULL;
+ lp->scb.rfd = (struct i596_rfd *)I596_NULL;
+
+ return 0;
+}
+
+static void
+i596_interrupt(int irq, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct i596_private *lp;
+ short ioaddr;
+ int boguscnt = 200;
+ unsigned short status, ack_cmd = 0;
+
+ if (dev == NULL) {
+ printk ("i596_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ if (i596_debug > 3) printk ("%s: i596_interrupt(): irq %d\n",dev->name, irq);
+
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+
+ lp = (struct i596_private *)dev->priv;
+
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("%s: i596 interrupt, timeout status %4.4x command %4.4x.\n", dev->name, lp->scb.status, lp->scb.command);
+ break;
+ }
+ status = lp->scb.status;
+
+ if (i596_debug > 4)
+ printk("%s: i596 interrupt, status %4.4x.\n", dev->name, status);
+
+ ack_cmd = status & 0xf000;
+
+ if ((status & 0x8000) || (status & 0x2000))
+ {
+ struct i596_cmd *ptr;
+
+ if ((i596_debug > 4) && (status & 0x8000))
+ printk("%s: i596 interrupt completed command.\n", dev->name);
+ if ((i596_debug > 4) && (status & 0x2000))
+ printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700);
+
+ while ((lp->cmd_head != (struct i596_cmd *) I596_NULL) && (lp->cmd_head->status & STAT_C))
+ {
+ ptr = lp->cmd_head;
+
+ lp->cmd_head = lp->cmd_head->next;
+ lp->cmd_backlog--;
+
+ switch ((ptr->command) & 0x7)
+ {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = ((struct sk_buff *)(tx_cmd->tbd->data)) -1;
+
+ dev_kfree_skb(skb, FREE_WRITE);
+
+ if ((ptr->status) & STAT_OK)
+ {
+ if (i596_debug >2) print_eth(skb->data);
+ }
+ else
+ {
+ lp->stats.tx_errors++;
+ if ((ptr->status) & 0x0020) lp->stats.collisions++;
+ if (!((ptr->status) & 0x0040)) lp->stats.tx_heartbeat_errors++;
+ if ((ptr->status) & 0x0400) lp->stats.tx_carrier_errors++;
+ if ((ptr->status) & 0x0800) lp->stats.collisions++;
+ if ((ptr->status) & 0x1000) lp->stats.tx_aborted_errors++;
+ }
+
+
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+ kfree_s((unsigned char *)tx_cmd, (sizeof (struct tx_cmd) + sizeof (struct i596_tbd)));
+ break;
+ }
+ case CmdMulticastList:
+ {
+ unsigned short count = *((unsigned short *) (ptr + 1));
+
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+ kfree_s((unsigned char *)ptr, (sizeof (struct i596_cmd) + count + 2));
+ break;
+ }
+ case CmdTDR:
+ {
+ unsigned long status = *((unsigned long *) (ptr + 1));
+
+ if (status & 0x8000)
+ {
+ if (i596_debug > 3)
+ printk("%s: link ok.\n", dev->name);
+ }
+ else
+ {
+ if (status & 0x4000)
+ printk("%s: Transceiver problem.\n", dev->name);
+ if (status & 0x2000)
+ printk("%s: Termination problem.\n", dev->name);
+ if (status & 0x1000)
+ printk("%s: Short circuit.\n", dev->name);
+
+ printk("%s: Time %ld.\n", dev->name, status & 0x07ff);
+ }
+ }
+ default:
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+
+ lp->last_cmd = jiffies;
+ }
+ }
+
+ ptr = lp->cmd_head;
+ while ((ptr != (struct i596_cmd *) I596_NULL) && (ptr != lp->cmd_tail))
+ {
+ ptr->command &= 0x1fff;
+ ptr = ptr->next;
+ }
+
+ if ((lp->cmd_head != (struct i596_cmd *) I596_NULL) && (dev->start)) ack_cmd |= CUC_START;
+ lp->scb.cmd = lp->cmd_head;
+ }
+
+ if ((status & 0x1000) || (status & 0x4000))
+ {
+ if ((i596_debug > 4) && (status & 0x4000))
+ printk("%s: i596 interrupt received a frame.\n", dev->name);
+ if ((i596_debug > 4) && (status & 0x1000))
+ printk("%s: i596 interrupt receive unit inactive %x.\n", dev->name, status & 0x0070);
+
+ i596_rx(dev);
+
+ if (dev->start) ack_cmd |= RX_START;
+ }
+
+ /* acknowledge the interrupt */
+
+/*
+ if ((lp->scb.cmd != (struct i596_cmd *) I596_NULL) && (dev->start)) ack_cmd | = CUC_START;
+*/
+ boguscnt = 100;
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("%s: i596 interrupt, timeout status %4.4x command %4.4x.\n", dev->name, lp->scb.status, lp->scb.command);
+ break;
+ }
+ lp->scb.command = ack_cmd;
+
+ (void) inb (ioaddr+0x10);
+ outb (4, ioaddr+0xf);
+ outw (0, ioaddr+4);
+
+ if (i596_debug > 4)
+ printk("%s: exiting interrupt.\n", dev->name);
+
+ dev->interrupt = 0;
+ return;
+}
+
+static int
+i596_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ int boguscnt = 200;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (i596_debug > 1)
+ printk("%s: Shutting down ethercard, status was %4.4x.\n",
+ dev->name, lp->scb.status);
+
+ lp->scb.command = CUC_ABORT|RX_ABORT;
+ outw(0, ioaddr+4);
+
+ i596_cleanup_cmd(lp);
+
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("%s: close timed timed out with status %4.4x, cmd %4.4x.\n",
+ dev->name, lp->scb.status, lp->scb.command);
+ break;
+ }
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = 0;
+ remove_rx_bufs(dev);
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct enet_statistics *
+i596_get_stats(struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+
+ return &lp->stats;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ struct i596_cmd *cmd;
+
+ if (i596_debug > 1)
+ printk ("%s: set multicast list %d\n", dev->name, dev->mc_count);
+
+ if (dev->mc_count > 0)
+ {
+ struct dev_mc_list *dmi;
+ char *cp;
+ cmd = (struct i596_cmd *) kmalloc(sizeof(struct i596_cmd)+2+dev->mc_count*6, GFP_ATOMIC);
+ if (cmd == NULL)
+ {
+ printk ("%s: set_multicast Memory squeeze.\n", dev->name);
+ return;
+ }
+ cmd->command = CmdMulticastList;
+ *((unsigned short *) (cmd + 1)) = dev->mc_count * 6;
+ cp=((char *)(cmd + 1))+2;
+ for(dmi=dev->mc_list;dmi!=NULL;dmi=dmi->next)
+ {
+ memcpy(cp, dmi,6);
+ cp+=6;
+ }
+ print_eth (((char *)(cmd + 1)) + 2);
+ i596_add_cmd(dev, cmd);
+ }
+ else
+ {
+ if (lp->set_conf.next != (struct i596_cmd * ) I596_NULL)
+ return;
+ if (dev->mc_count == 0 && !(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
+ {
+ if(dev->flags&IFF_ALLMULTI)
+ dev->flags|=IFF_PROMISC;
+ lp->i596_config[8] &= ~0x01;
+ }
+ else
+ lp->i596_config[8] |= 0x01;
+
+ i596_add_cmd(dev, &lp->set_conf);
+ }
+}
+
+#ifdef HAVE_DEVLIST
+static unsigned int apricot_portlist[] = {0x300, 0};
+struct netdev_entry apricot_drv =
+{"apricot", apricot_probe, APRICOT_TOTAL_SIZE, apricot_portlist};
+#endif
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device dev_apricot = {
+ devicename, /* device name inserted by /linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0x300, 10,
+ 0, 0, 0, NULL, apricot_probe };
+
+static int io = 0x300;
+static int irq = 10;
+
+int
+init_module(void)
+{
+ dev_apricot.base_addr = io;
+ dev_apricot.irq = irq;
+ if (register_netdev(&dev_apricot) != 0)
+ return -EIO;
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_apricot);
+ kfree_s((void *)dev_apricot.mem_start, sizeof(struct i596_private) + 0xf);
+ dev_apricot.priv = NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ release_region(dev_apricot.base_addr, APRICOT_TOTAL_SIZE);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c apricot.c"
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/at1700.c b/i386/i386at/gpl/linux/net/at1700.c
new file mode 100644
index 00000000..3d684c0a
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/at1700.c
@@ -0,0 +1,677 @@
+/* at1700.c: A network device driver for the Allied Telesis AT1700.
+
+ Written 1993-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This is a device driver for the Allied Telesis AT1700, which is a
+ straight-forward Fujitsu MB86965 implementation.
+
+ Sources:
+ The Fujitsu MB86965 datasheet.
+
+ After the initial version of this driver was written Gerry Sawkins of
+ ATI provided their EEPROM configuration code header file.
+ Thanks to NIIBE Yutaka <gniibe@mri.co.jp> for bug fixes.
+
+ Bugs:
+ The MB86965 has a design flaw that makes all probes unreliable. Not
+ only is it difficult to detect, it also moves around in I/O space in
+ response to inb()s from other device probes!
+*/
+
+static const char *version =
+ "at1700.c:v1.12 1/18/95 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+/* This unusual address order is used to verify the CONFIG register. */
+static int at1700_probe_list[] =
+{0x260, 0x280, 0x2a0, 0x240, 0x340, 0x320, 0x380, 0x300, 0};
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+typedef unsigned char uchar;
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct enet_statistics stats;
+ uint tx_started:1; /* Number of packet on the Tx queue. */
+ uchar tx_queue; /* Number of packet on the Tx queue. */
+ ushort tx_queue_len; /* Current length of the Tx queue. */
+};
+
+
+/* Offsets from the base address. */
+#define STATUS 0
+#define TX_STATUS 0
+#define RX_STATUS 1
+#define TX_INTR 2 /* Bit-mapped interrupt enable registers. */
+#define RX_INTR 3
+#define TX_MODE 4
+#define RX_MODE 5
+#define CONFIG_0 6 /* Misc. configuration settings. */
+#define CONFIG_1 7
+/* Run-time register bank 2 definitions. */
+#define DATAPORT 8 /* Word-wide DMA or programmed-I/O dataport. */
+#define TX_START 10
+#define MODE13 13
+#define EEPROM_Ctrl 16
+#define EEPROM_Data 17
+#define IOCONFIG 19
+#define RESET 31 /* Write to reset some parts of the chip. */
+#define AT1700_IO_EXTENT 32
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x40 /* EEPROM shift clock, in reg. 16. */
+#define EE_CS 0x20 /* EEPROM chip select, in reg. 16. */
+#define EE_DATA_WRITE 0x80 /* EEPROM chip data in, in reg. 17. */
+#define EE_DATA_READ 0x80 /* EEPROM chip data out, in reg. 17. */
+
+/* Delay between EEPROM clock transitions. */
+#define eeprom_delay() do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5 << 6)
+#define EE_READ_CMD (6 << 6)
+#define EE_ERASE_CMD (7 << 6)
+
+
+/* Index to functions, as function prototypes. */
+
+extern int at1700_probe(struct device *dev);
+
+static int at1700_probe1(struct device *dev, short ioaddr);
+static int read_eeprom(int ioaddr, int location);
+static int net_open(struct device *dev);
+static int net_send_packet(struct sk_buff *skb, struct device *dev);
+static void net_interrupt(int irq, struct pt_regs *regs);
+static void net_rx(struct device *dev);
+static int net_close(struct device *dev);
+static struct enet_statistics *net_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ */
+#ifdef HAVE_DEVLIST
+/* Support for a alternate probe manager, which will eliminate the
+ boilerplate below. */
+struct netdev_entry at1700_drv =
+{"at1700", at1700_probe1, AT1700_IO_EXTENT, at1700_probe_list};
+#else
+int
+at1700_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return at1700_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; at1700_probe_list[i]; i++) {
+ int ioaddr = at1700_probe_list[i];
+ if (check_region(ioaddr, AT1700_IO_EXTENT))
+ continue;
+ if (at1700_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/* The Fujitsu datasheet suggests that the NIC be probed for by checking its
+ "signature", the default bit pattern after a reset. This *doesn't* work --
+ there is no way to reset the bus interface without a complete power-cycle!
+
+ It turns out that ATI came to the same conclusion I did: the only thing
+ that can be done is checking a few bits and then diving right into an
+ EEPROM read. */
+
+int at1700_probe1(struct device *dev, short ioaddr)
+{
+ char irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15};
+ unsigned int i, irq;
+
+ /* Resetting the chip doesn't reset the ISA interface, so don't bother.
+ That means we have to be careful with the register values we probe for.
+ */
+#ifdef notdef
+ printk("at1700 probe at %#x, eeprom is %4.4x %4.4x %4.4x ctrl %4.4x.\n",
+ ioaddr, read_eeprom(ioaddr, 4), read_eeprom(ioaddr, 5),
+ read_eeprom(ioaddr, 6), inw(ioaddr + EEPROM_Ctrl));
+#endif
+ if (at1700_probe_list[inb(ioaddr + IOCONFIG) & 0x07] != ioaddr
+ || read_eeprom(ioaddr, 4) != 0x0000
+ || (read_eeprom(ioaddr, 5) & 0xff00) != 0xF400)
+ return -ENODEV;
+
+ /* Reset the internal state machines. */
+ outb(0, ioaddr + RESET);
+
+ irq = irqmap[(read_eeprom(ioaddr, 12)&0x04)
+ | (read_eeprom(ioaddr, 0)>>14)];
+
+ /* Snarf the interrupt vector now. */
+ if (request_irq(irq, &net_interrupt, 0, "at1700")) {
+ printk ("AT1700 found at %#3x, but it's unusable due to a conflict on"
+ "IRQ %d.\n", ioaddr, irq);
+ return EAGAIN;
+ }
+
+ /* Allocate a new 'dev' if needed. */
+ if (dev == NULL)
+ dev = init_etherdev(0, sizeof(struct net_local));
+
+ /* Grab the region so that we can find another board if the IRQ request
+ fails. */
+ request_region(ioaddr, AT1700_IO_EXTENT, "at1700");
+
+ printk("%s: AT1700 found at %#3x, IRQ %d, address ", dev->name,
+ ioaddr, irq);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ irq2dev_map[irq] = dev;
+
+ for(i = 0; i < 3; i++) {
+ unsigned short eeprom_val = read_eeprom(ioaddr, 4+i);
+ printk("%04x", eeprom_val);
+ ((unsigned short *)dev->dev_addr)[i] = ntohs(eeprom_val);
+ }
+
+ /* The EEPROM word 12 bit 0x0400 means use regular 100 ohm 10baseT signals,
+ rather than 150 ohm shielded twisted pair compensation.
+ 0x0000 == auto-sense the interface
+ 0x0800 == use TP interface
+ 0x1800 == use coax interface
+ */
+ {
+ const char *porttype[] = {"auto-sense", "10baseT", "auto-sense", "10base2"};
+ ushort setup_value = read_eeprom(ioaddr, 12);
+
+ dev->if_port = setup_value >> 8;
+ printk(" %s interface.\n", porttype[(dev->if_port>>3) & 3]);
+ }
+
+ /* Set the station address in bank zero. */
+ outb(0xe0, ioaddr + 7);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + 8 + i);
+
+ /* Switch to bank 1 and set the multicast table to accept none. */
+ outb(0xe4, ioaddr + 7);
+ for (i = 0; i < 8; i++)
+ outb(0x00, ioaddr + 8 + i);
+
+ /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit
+ bus access, two 4K Tx queues, and disabled Tx and Rx. */
+ outb(0xda, ioaddr + CONFIG_0);
+
+ /* Switch to bank 2 and lock our I/O address. */
+ outb(0xe8, ioaddr + 7);
+ outb(dev->if_port, MODE13);
+
+ /* Power-down the chip. Aren't we green! */
+ outb(0x00, ioaddr + CONFIG_1);
+
+ if (net_debug)
+ printk(version);
+
+ /* Initialize the device structure. */
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ dev->open = net_open;
+ dev->stop = net_close;
+ dev->hard_start_xmit = net_send_packet;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /* Fill in the fields of 'dev' with ethernet-generic values. */
+
+ ether_setup(dev);
+ return 0;
+}
+
+static int read_eeprom(int ioaddr, int location)
+{
+ int i;
+ unsigned short retval = 0;
+ short ee_addr = ioaddr + EEPROM_Ctrl;
+ short ee_daddr = ioaddr + EEPROM_Data;
+ int read_cmd = location | EE_READ_CMD;
+ short ctrl_val = EE_CS;
+
+ outb(ctrl_val, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 9; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ outb(dataval, ee_daddr);
+ outb(EE_CS | EE_SHIFT_CLK, ee_addr); /* EEPROM clock tick. */
+ eeprom_delay();
+ outb(EE_CS, ee_addr); /* Finish EEPROM a clock tick. */
+ eeprom_delay();
+ }
+ outb(EE_CS, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outb(EE_CS | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inb(ee_daddr) & EE_DATA_READ) ? 1 : 0);
+ outb(EE_CS, ee_addr);
+ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ ctrl_val &= ~EE_CS;
+ outb(ctrl_val | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ outb(ctrl_val, ee_addr);
+ eeprom_delay();
+ return retval;
+}
+
+
+
+static int net_open(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ /* Powerup the chip, initialize config register 1, and select bank 0. */
+ outb(0xe0, ioaddr + CONFIG_1);
+
+ /* Set the station address in bank zero. */
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + 8 + i);
+
+ /* Switch to bank 1 and set the multicast table to accept none. */
+ outb(0xe4, ioaddr + 7);
+ for (i = 0; i < 8; i++)
+ outb(0x00, ioaddr + 8 + i);
+
+ /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit
+ bus access, and two 4K Tx queues. */
+ outb(0xda, ioaddr + CONFIG_0);
+
+ /* Same config 0, except enable the Rx and Tx. */
+ outb(0x5a, ioaddr + CONFIG_0);
+ /* Switch to register bank 2 for the run-time registers. */
+ outb(0xe8, ioaddr + CONFIG_1);
+
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ /* Turn on Rx interrupts, leave Tx interrupts off until packet Tx. */
+ outb(0x00, ioaddr + TX_INTR);
+ outb(0x81, ioaddr + RX_INTR);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+static int
+net_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 10)
+ return 1;
+ printk("%s: transmit timed out with status %04x, %s?\n", dev->name,
+ inw(ioaddr + STATUS), inb(ioaddr + TX_STATUS) & 0x80
+ ? "IRQ conflict" : "network cable problem");
+ printk("%s: timeout registers: %04x %04x %04x %04x %04x %04x %04x %04x.\n",
+ dev->name, inw(ioaddr + 0), inw(ioaddr + 2), inw(ioaddr + 4),
+ inw(ioaddr + 6), inw(ioaddr + 8), inw(ioaddr + 10),
+ inw(ioaddr + 12), inw(ioaddr + 14));
+ lp->stats.tx_errors++;
+ /* ToDo: We should try to restart the adaptor... */
+ outw(0xffff, ioaddr + 24);
+ outw(0xffff, ioaddr + TX_STATUS);
+ outw(0xe85a, ioaddr + CONFIG_0);
+ outw(0x8100, ioaddr + TX_INTR);
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ /* Turn off the possible Tx interrupts. */
+ outb(0x00, ioaddr + TX_INTR);
+
+ outw(length, ioaddr + DATAPORT);
+ outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
+
+ lp->tx_queue++;
+ lp->tx_queue_len += length + 2;
+
+ if (lp->tx_started == 0) {
+ /* If the Tx is idle, always trigger a transmit. */
+ outb(0x80 | lp->tx_queue, ioaddr + TX_START);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ lp->tx_started = 1;
+ dev->tbusy = 0;
+ } else if (lp->tx_queue_len < 4096 - 1502)
+ /* Yes, there is room for one more packet. */
+ dev->tbusy = 0;
+
+ /* Turn on Tx interrupts back on. */
+ outb(0x82, ioaddr + TX_INTR);
+ }
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static void
+net_interrupt(int irq, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct net_local *lp;
+ int ioaddr, status;
+
+ if (dev == NULL) {
+ printk ("at1700_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+ status = inw(ioaddr + TX_STATUS);
+ outw(status, ioaddr + TX_STATUS);
+
+ if (net_debug > 4)
+ printk("%s: Interrupt with status %04x.\n", dev->name, status);
+ if (status & 0xff00
+ || (inb(ioaddr + RX_MODE) & 0x40) == 0) { /* Got a packet(s). */
+ net_rx(dev);
+ }
+ if (status & 0x00ff) {
+ if (status & 0x80) {
+ lp->stats.tx_packets++;
+ if (lp->tx_queue) {
+ outb(0x80 | lp->tx_queue, ioaddr + TX_START);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ } else {
+ lp->tx_started = 0;
+ /* Turn on Tx interrupts off. */
+ outb(0x00, ioaddr + TX_INTR);
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ }
+ }
+ }
+
+ dev->interrupt = 0;
+ return;
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+net_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int boguscount = 5;
+
+ while ((inb(ioaddr + RX_MODE) & 0x40) == 0) {
+ ushort status = inw(ioaddr + DATAPORT);
+ ushort pkt_len = inw(ioaddr + DATAPORT);
+
+ if (net_debug > 4)
+ printk("%s: Rxing packet mode %02x status %04x.\n",
+ dev->name, inb(ioaddr + RX_MODE), status);
+#ifndef final_version
+ if (status == 0) {
+ outb(0x05, ioaddr + 14);
+ break;
+ }
+#endif
+
+ if ((status & 0xF0) != 0x20) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (status & 0x08) lp->stats.rx_length_errors++;
+ if (status & 0x04) lp->stats.rx_frame_errors++;
+ if (status & 0x02) lp->stats.rx_crc_errors++;
+ if (status & 0x01) lp->stats.rx_over_errors++;
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ if (pkt_len > 1550) {
+ printk("%s: The AT1700 claimed a very large packet, size %d.\n",
+ dev->name, pkt_len);
+ /* Prime the FIFO and then flush the packet. */
+ inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT);
+ outb(0x05, ioaddr + 14);
+ lp->stats.rx_errors++;
+ break;
+ }
+ skb = dev_alloc_skb(pkt_len+3);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet (len %d).\n",
+ dev->name, pkt_len);
+ /* Prime the FIFO and then flush the packet. */
+ inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT);
+ outb(0x05, ioaddr + 14);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2);
+
+ insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1);
+ skb->protocol=eth_type_trans(skb, dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ if (--boguscount <= 0)
+ break;
+ }
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a mark_bh(NET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+ {
+ int i;
+ for (i = 0; i < 20; i++) {
+ if ((inb(ioaddr + RX_MODE) & 0x40) == 0x40)
+ break;
+ inw(ioaddr + DATAPORT); /* dummy status read */
+ outb(0x05, ioaddr + 14);
+ }
+
+ if (net_debug > 5)
+ printk("%s: Exint Rx packet with mode %02x after %d ticks.\n",
+ dev->name, inb(ioaddr + RX_MODE), i);
+ }
+ return;
+}
+
+/* The inverse routine to net_open(). */
+static int net_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* Set configuration register 0 to disable Tx and Rx. */
+ outb(0xda, ioaddr + CONFIG_0);
+
+ /* Update the statistics -- ToDo. */
+
+ /* Power-down the chip. Green, green, green! */
+ outb(0x00, ioaddr + CONFIG_1);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *
+net_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ cli();
+ /* ToDo: Update the statistics from the device registers. */
+ sti();
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+static void
+set_multicast_list(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+ if (dev->mc_count || dev->flags&(IFF_PROMISC|IFF_ALLMULTI))
+ {
+ /*
+ * We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. - AC
+ */
+ dev->flags|=IFF_PROMISC;
+
+ outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
+ }
+ else
+ outb(2, ioaddr + RX_MODE); /* Disable promiscuous, use normal mode */
+}
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device dev_at1700 = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, at1700_probe };
+
+static int io = 0x260;
+static int irq = 0;
+
+int init_module(void)
+{
+ if (io == 0)
+ printk("at1700: You should not use auto-probing with insmod!\n");
+ dev_at1700.base_addr = io;
+ dev_at1700.irq = irq;
+ if (register_netdev(&dev_at1700) != 0) {
+ printk("at1700: register_netdev() returned non-zero.\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_at1700);
+ kfree(dev_at1700.priv);
+ dev_at1700.priv = NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ free_irq(dev_at1700.irq);
+ irq2dev_map[dev_at1700.irq] = NULL;
+ release_region(dev_at1700.base_addr, AT1700_IO_EXTENT);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c at1700.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/atp.c b/i386/i386at/gpl/linux/net/atp.c
new file mode 100644
index 00000000..62aa04ef
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/atp.c
@@ -0,0 +1,787 @@
+/* atp.c: Attached (pocket) ethernet adapter driver for linux. */
+/*
+ This is a driver for a commonly OEMed pocket (parallel port)
+ ethernet adapter.
+
+ Written 1993,1994,1995 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ The timer-based reset code was written by Bill Carlson, wwc@super.org.
+*/
+
+static const char *version =
+ "atp.c:v1.01 1/18/95 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+/*
+ This file is a device driver for the RealTek (aka AT-Lan-Tec) pocket
+ ethernet adapter. This is a common low-cost OEM pocket ethernet
+ adapter, sold under many names.
+
+ Sources:
+ This driver was written from the packet driver assembly code provided by
+ Vincent Bono of AT-Lan-Tec. Ever try to figure out how a complicated
+ device works just from the assembly code? It ain't pretty. The following
+ description is written based on guesses and writing lots of special-purpose
+ code to test my theorized operation.
+
+ Theory of Operation
+
+ The RTL8002 adapter seems to be built around a custom spin of the SEEQ
+ controller core. It probably has a 16K or 64K internal packet buffer, of
+ which the first 4K is devoted to transmit and the rest to receive.
+ The controller maintains the queue of received packet and the packet buffer
+ access pointer internally, with only 'reset to beginning' and 'skip to next
+ packet' commands visible. The transmit packet queue holds two (or more?)
+ packets: both 'retransmit this packet' (due to collision) and 'transmit next
+ packet' commands must be started by hand.
+
+ The station address is stored in a standard bit-serial EEPROM which must be
+ read (ughh) by the device driver. (Provisions have been made for
+ substituting a 74S288 PROM, but I haven't gotten reports of any models
+ using it.) Unlike built-in devices, a pocket adapter can temporarily lose
+ power without indication to the device driver. The major effect is that
+ the station address, receive filter (promiscuous, etc.) and transceiver
+ must be reset.
+
+ The controller itself has 16 registers, some of which use only the lower
+ bits. The registers are read and written 4 bits at a time. The four bit
+ register address is presented on the data lines along with a few additional
+ timing and control bits. The data is then read from status port or written
+ to the data port.
+
+ Since the bulk data transfer of the actual packets through the slow
+ parallel port dominates the driver's running time, four distinct data
+ (non-register) transfer modes are provided by the adapter, two in each
+ direction. In the first mode timing for the nibble transfers is
+ provided through the data port. In the second mode the same timing is
+ provided through the control port. In either case the data is read from
+ the status port and written to the data port, just as it is accessing
+ registers.
+
+ In addition to the basic data transfer methods, several more are modes are
+ created by adding some delay by doing multiple reads of the data to allow
+ it to stabilize. This delay seems to be needed on most machines.
+
+ The data transfer mode is stored in the 'dev->if_port' field. Its default
+ value is '4'. It may be overridden at boot-time using the third parameter
+ to the "ether=..." initialization.
+
+ The header file <atp.h> provides inline functions that encapsulate the
+ register and data access methods. These functions are hand-tuned to
+ generate reasonable object code. This header file also documents my
+ interpretations of the device registers.
+*/
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "atp.h"
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+/* The number of low I/O ports used by the ethercard. */
+#define ETHERCARD_TOTAL_SIZE 3
+
+/* This code, written by wwc@super.org, resets the adapter every
+ TIMED_CHECKER ticks. This recovers from an unknown error which
+ hangs the device. */
+#define TIMED_CHECKER (HZ/4)
+#ifdef TIMED_CHECKER
+#include <linux/timer.h>
+static void atp_timed_checker(unsigned long ignored);
+static struct device *atp_timed_dev;
+static struct timer_list atp_timer = {NULL, NULL, 0, 0, atp_timed_checker};
+#endif
+
+/* Index to functions, as function prototypes. */
+
+extern int atp_probe(struct device *dev);
+
+static int atp_probe1(struct device *dev, short ioaddr);
+static void get_node_ID(struct device *dev);
+static unsigned short eeprom_op(short ioaddr, unsigned int cmd);
+static int net_open(struct device *dev);
+static void hardware_init(struct device *dev);
+static void write_packet(short ioaddr, int length, unsigned char *packet, int mode);
+static void trigger_send(short ioaddr, int length);
+static int net_send_packet(struct sk_buff *skb, struct device *dev);
+static void net_interrupt(int irq, struct pt_regs *regs);
+static void net_rx(struct device *dev);
+static void read_block(short ioaddr, int length, unsigned char *buffer, int data_mode);
+static int net_close(struct device *dev);
+static struct enet_statistics *net_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+
+/* Check for a network adapter of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ */
+int
+atp_init(struct device *dev)
+{
+ int *port, ports[] = {0x378, 0x278, 0x3bc, 0};
+ int base_addr = dev->base_addr;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return atp_probe1(dev, base_addr);
+ else if (base_addr == 1) /* Don't probe at all. */
+ return ENXIO;
+
+ for (port = ports; *port; port++) {
+ int ioaddr = *port;
+ outb(0x57, ioaddr + PAR_DATA);
+ if (inb(ioaddr + PAR_DATA) != 0x57)
+ continue;
+ if (atp_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+
+static int atp_probe1(struct device *dev, short ioaddr)
+{
+ int saved_ctrl_reg, status;
+
+ outb(0xff, ioaddr + PAR_DATA);
+ /* Save the original value of the Control register, in case we guessed
+ wrong. */
+ saved_ctrl_reg = inb(ioaddr + PAR_CONTROL);
+ /* IRQEN=0, SLCTB=high INITB=high, AUTOFDB=high, STBB=high. */
+ outb(0x04, ioaddr + PAR_CONTROL);
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET);
+ eeprom_delay(2048);
+ status = read_nibble(ioaddr, CMR1);
+
+ if ((status & 0x78) != 0x08) {
+ /* The pocket adapter probe failed, restore the control register. */
+ outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
+ return 1;
+ }
+ status = read_nibble(ioaddr, CMR2_h);
+ if ((status & 0x78) != 0x10) {
+ outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
+ return 1;
+ }
+ /* Find the IRQ used by triggering an interrupt. */
+ write_reg_byte(ioaddr, CMR2, 0x01); /* No accept mode, IRQ out. */
+ write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); /* Enable Tx and Rx. */
+
+ /* Omit autoIRQ routine for now. Use "table lookup" instead. Uhgggh. */
+ if (ioaddr == 0x378)
+ dev->irq = 7;
+ else
+ dev->irq = 5;
+ write_reg_high(ioaddr, CMR1, CMR1h_TxRxOFF); /* Disable Tx and Rx units. */
+ write_reg(ioaddr, CMR2, CMR2_NULL);
+
+ dev->base_addr = ioaddr;
+
+ /* Read the station address PROM. */
+ get_node_ID(dev);
+
+ printk("%s: Pocket adapter found at %#3lx, IRQ %d, SAPROM "
+ "%02X:%02X:%02X:%02X:%02X:%02X.\n", dev->name, dev->base_addr,
+ dev->irq, dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+ /* Leave the hardware in a reset state. */
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET);
+
+ if (net_debug)
+ printk(version);
+
+ /* Initialize the device structure. */
+ ether_setup(dev);
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+
+ {
+ struct net_local *lp = (struct net_local *)dev->priv;
+ lp->addr_mode = CMR2h_Normal;
+ }
+
+ /* For the ATP adapter the "if_port" is really the data transfer mode. */
+ dev->if_port = (dev->mem_start & 0xf) ? dev->mem_start & 0x7 : 4;
+ if (dev->mem_end & 0xf)
+ net_debug = dev->mem_end & 7;
+
+ dev->open = net_open;
+ dev->stop = net_close;
+ dev->hard_start_xmit = net_send_packet;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+#ifdef TIMED_CHECKER
+ del_timer(&atp_timer);
+ atp_timer.expires = jiffies + TIMED_CHECKER;
+ atp_timed_dev = dev;
+ add_timer(&atp_timer);
+#endif
+ return 0;
+}
+
+/* Read the station address PROM, usually a word-wide EEPROM. */
+static void get_node_ID(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+ int sa_offset = 0;
+ int i;
+
+ write_reg(ioaddr, CMR2, CMR2_EEPROM); /* Point to the EEPROM control registers. */
+
+ /* Some adapters have the station address at offset 15 instead of offset
+ zero. Check for it, and fix it if needed. */
+ if (eeprom_op(ioaddr, EE_READ(0)) == 0xffff)
+ sa_offset = 15;
+
+ for (i = 0; i < 3; i++)
+ ((unsigned short *)dev->dev_addr)[i] =
+ ntohs(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
+
+ write_reg(ioaddr, CMR2, CMR2_NULL);
+}
+
+/*
+ An EEPROM read command starts by shifting out 0x60+address, and then
+ shifting in the serial data. See the NatSemi databook for details.
+ * ________________
+ * CS : __|
+ * ___ ___
+ * CLK: ______| |___| |
+ * __ _______ _______
+ * DI : __X_______X_______X
+ * DO : _________X_______X
+ */
+
+static unsigned short eeprom_op(short ioaddr, unsigned int cmd)
+{
+ unsigned eedata_out = 0;
+ int num_bits = EE_CMD_SIZE;
+
+ while (--num_bits >= 0) {
+ char outval = test_bit(num_bits, &cmd) ? EE_DATA_WRITE : 0;
+ write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_LOW);
+ eeprom_delay(5);
+ write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_HIGH);
+ eedata_out <<= 1;
+ if (read_nibble(ioaddr, PROM_DATA) & EE_DATA_READ)
+ eedata_out++;
+ eeprom_delay(5);
+ }
+ write_reg_high(ioaddr, PROM_CMD, EE_CLK_LOW & ~EE_CS);
+ return eedata_out;
+}
+
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine sets everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+
+ This is an attachable device: if there is no dev->priv entry then it wasn't
+ probed for at boot-time, and we need to probe for it again.
+ */
+static int net_open(struct device *dev)
+{
+
+ /* The interrupt line is turned off (tri-stated) when the device isn't in
+ use. That's especially important for "attached" interfaces where the
+ port or interrupt may be shared. */
+ if (irq2dev_map[dev->irq] != 0
+ || (irq2dev_map[dev->irq] = dev) == 0
+ || request_irq(dev->irq, &net_interrupt, 0, "ATP")) {
+ return -EAGAIN;
+ }
+
+ hardware_init(dev);
+ dev->start = 1;
+ return 0;
+}
+
+/* This routine resets the hardware. We initialize everything, assuming that
+ the hardware may have been temporarily detached. */
+static void hardware_init(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET);
+
+ for (i = 0; i < 6; i++)
+ write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
+
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+
+ if (net_debug > 2) {
+ printk("%s: Reset: current Rx mode %d.\n", dev->name,
+ (read_nibble(ioaddr, CMR2_h) >> 3) & 0x0f);
+ }
+
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT);
+ write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
+
+ /* Enable the interrupt line from the serial port. */
+ outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+
+ /* Unmask the interesting interrupts. */
+ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
+ write_reg_high(ioaddr, IMR, ISRh_RxErr);
+
+ lp->tx_unit_busy = 0;
+ lp->pac_cnt_in_tx_buf = 0;
+ lp->saved_tx_size = 0;
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+}
+
+static void trigger_send(short ioaddr, int length)
+{
+ write_reg_byte(ioaddr, TxCNT0, length & 0xff);
+ write_reg(ioaddr, TxCNT1, length >> 8);
+ write_reg(ioaddr, CMR1, CMR1_Xmit);
+}
+
+static void write_packet(short ioaddr, int length, unsigned char *packet, int data_mode)
+{
+ length = (length + 1) & ~1; /* Round up to word length. */
+ outb(EOC+MAR, ioaddr + PAR_DATA);
+ if ((data_mode & 1) == 0) {
+ /* Write the packet out, starting with the write addr. */
+ outb(WrAddr+MAR, ioaddr + PAR_DATA);
+ do {
+ write_byte_mode0(ioaddr, *packet++);
+ } while (--length > 0) ;
+ } else {
+ /* Write the packet out in slow mode. */
+ unsigned char outbyte = *packet++;
+
+ outb(Ctrl_LNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+ outb(WrAddr+MAR, ioaddr + PAR_DATA);
+
+ outb((outbyte & 0x0f)|0x40, ioaddr + PAR_DATA);
+ outb(outbyte & 0x0f, ioaddr + PAR_DATA);
+ outbyte >>= 4;
+ outb(outbyte & 0x0f, ioaddr + PAR_DATA);
+ outb(Ctrl_HNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+ while (--length > 0)
+ write_byte_mode1(ioaddr, *packet++);
+ }
+ /* Terminate the Tx frame. End of write: ECB. */
+ outb(0xff, ioaddr + PAR_DATA);
+ outb(Ctrl_HNibWrite | Ctrl_SelData | Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+}
+
+static int
+net_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+ printk("%s: transmit timed out, %s?\n", dev->name,
+ inb(ioaddr + PAR_CONTROL) & 0x10 ? "network cable problem"
+ : "IRQ conflict");
+ lp->stats.tx_errors++;
+ /* Try to restart the adapter. */
+ hardware_init(dev);
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+ int flags;
+
+ /* Disable interrupts by writing 0x00 to the Interrupt Mask Register.
+ This sequence must not be interrupted by an incoming packet. */
+ save_flags(flags);
+ cli();
+ write_reg(ioaddr, IMR, 0);
+ write_reg_high(ioaddr, IMR, 0);
+ restore_flags(flags);
+
+ write_packet(ioaddr, length, buf, dev->if_port);
+
+ lp->pac_cnt_in_tx_buf++;
+ if (lp->tx_unit_busy == 0) {
+ trigger_send(ioaddr, length);
+ lp->saved_tx_size = 0; /* Redundant */
+ lp->re_tx = 0;
+ lp->tx_unit_busy = 1;
+ } else
+ lp->saved_tx_size = length;
+
+ dev->trans_start = jiffies;
+ /* Re-enable the LPT interrupts. */
+ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
+ write_reg_high(ioaddr, IMR, ISRh_RxErr);
+ }
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static void
+net_interrupt(int irq, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct net_local *lp;
+ int ioaddr, status, boguscount = 20;
+ static int num_tx_since_rx = 0;
+
+ if (dev == NULL) {
+ printk ("ATP_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+
+ /* Disable additional spurious interrupts. */
+ outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
+
+ /* The adapter's output is currently the IRQ line, switch it to data. */
+ write_reg(ioaddr, CMR2, CMR2_NULL);
+ write_reg(ioaddr, IMR, 0);
+
+ if (net_debug > 5) printk("%s: In interrupt ", dev->name);
+ while (--boguscount > 0) {
+ status = read_nibble(ioaddr, ISR);
+ if (net_debug > 5) printk("loop status %02x..", status);
+
+ if (status & (ISR_RxOK<<3)) {
+ write_reg(ioaddr, ISR, ISR_RxOK); /* Clear the Rx interrupt. */
+ do {
+ int read_status = read_nibble(ioaddr, CMR1);
+ if (net_debug > 6)
+ printk("handling Rx packet %02x..", read_status);
+ /* We acknowledged the normal Rx interrupt, so if the interrupt
+ is still outstanding we must have a Rx error. */
+ if (read_status & (CMR1_IRQ << 3)) { /* Overrun. */
+ lp->stats.rx_over_errors++;
+ /* Set to no-accept mode long enough to remove a packet. */
+ write_reg_high(ioaddr, CMR2, CMR2h_OFF);
+ net_rx(dev);
+ /* Clear the interrupt and return to normal Rx mode. */
+ write_reg_high(ioaddr, ISR, ISRh_RxErr);
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+ } else if ((read_status & (CMR1_BufEnb << 3)) == 0) {
+ net_rx(dev);
+ dev->last_rx = jiffies;
+ num_tx_since_rx = 0;
+ } else
+ break;
+ } while (--boguscount > 0);
+ } else if (status & ((ISR_TxErr + ISR_TxOK)<<3)) {
+ if (net_debug > 6) printk("handling Tx done..");
+ /* Clear the Tx interrupt. We should check for too many failures
+ and reinitialize the adapter. */
+ write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK);
+ if (status & (ISR_TxErr<<3)) {
+ lp->stats.collisions++;
+ if (++lp->re_tx > 15) {
+ lp->stats.tx_aborted_errors++;
+ hardware_init(dev);
+ break;
+ }
+ /* Attempt to retransmit. */
+ if (net_debug > 6) printk("attempting to ReTx");
+ write_reg(ioaddr, CMR1, CMR1_ReXmit + CMR1_Xmit);
+ } else {
+ /* Finish up the transmit. */
+ lp->stats.tx_packets++;
+ lp->pac_cnt_in_tx_buf--;
+ if ( lp->saved_tx_size) {
+ trigger_send(ioaddr, lp->saved_tx_size);
+ lp->saved_tx_size = 0;
+ lp->re_tx = 0;
+ } else
+ lp->tx_unit_busy = 0;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ }
+ num_tx_since_rx++;
+ } else if (num_tx_since_rx > 8
+ && jiffies > dev->last_rx + 100) {
+ if (net_debug > 2)
+ printk("%s: Missed packet? No Rx after %d Tx and %ld jiffies"
+ " status %02x CMR1 %02x.\n", dev->name,
+ num_tx_since_rx, jiffies - dev->last_rx, status,
+ (read_nibble(ioaddr, CMR1) >> 3) & 15);
+ lp->stats.rx_missed_errors++;
+ hardware_init(dev);
+ num_tx_since_rx = 0;
+ break;
+ } else
+ break;
+ }
+
+ /* This following code fixes a rare (and very difficult to track down)
+ problem where the adapter forgets its ethernet address. */
+ {
+ int i;
+ for (i = 0; i < 6; i++)
+ write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
+#ifdef TIMED_CHECKER
+ del_timer(&atp_timer);
+ atp_timer.expires = jiffies + TIMED_CHECKER;
+ add_timer(&atp_timer);
+#endif
+ }
+
+ /* Tell the adapter that it can go back to using the output line as IRQ. */
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT);
+ /* Enable the physical interrupt line, which is sure to be low until.. */
+ outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+ /* .. we enable the interrupt sources. */
+ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
+ write_reg_high(ioaddr, IMR, ISRh_RxErr); /* Hmmm, really needed? */
+
+ if (net_debug > 5) printk("exiting interrupt.\n");
+
+ dev->interrupt = 0;
+
+ return;
+}
+
+#ifdef TIMED_CHECKER
+/* This following code fixes a rare (and very difficult to track down)
+ problem where the adapter forgets its ethernet address. */
+static void atp_timed_checker(unsigned long ignored)
+{
+ int i;
+ int ioaddr = atp_timed_dev->base_addr;
+
+ if (!atp_timed_dev->interrupt)
+ {
+ for (i = 0; i < 6; i++)
+#if 0
+ if (read_cmd_byte(ioaddr, PAR0 + i) != atp_timed_dev->dev_addr[i])
+ {
+ struct net_local *lp = (struct net_local *)atp_timed_dev->priv;
+ write_reg_byte(ioaddr, PAR0 + i, atp_timed_dev->dev_addr[i]);
+ if (i == 2)
+ lp->stats.tx_errors++;
+ else if (i == 3)
+ lp->stats.tx_dropped++;
+ else if (i == 4)
+ lp->stats.collisions++;
+ else
+ lp->stats.rx_errors++;
+ }
+#else
+ write_reg_byte(ioaddr, PAR0 + i, atp_timed_dev->dev_addr[i]);
+#endif
+ }
+ del_timer(&atp_timer);
+ atp_timer.expires = jiffies + TIMED_CHECKER;
+ add_timer(&atp_timer);
+}
+#endif
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void net_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+#ifdef notdef
+ ushort header[4];
+#else
+ struct rx_header rx_head;
+#endif
+
+ /* Process the received packet. */
+ outb(EOC+MAR, ioaddr + PAR_DATA);
+ read_block(ioaddr, 8, (unsigned char*)&rx_head, dev->if_port);
+ if (net_debug > 5)
+ printk(" rx_count %04x %04x %04x %04x..", rx_head.pad,
+ rx_head.rx_count, rx_head.rx_status, rx_head.cur_addr);
+ if ((rx_head.rx_status & 0x77) != 0x01) {
+ lp->stats.rx_errors++;
+ /* Ackkk! I don't have any documentation on what the error bits mean!
+ The best I can do is slap the device around a bit. */
+ if (net_debug > 3) printk("%s: Unknown ATP Rx error %04x.\n",
+ dev->name, rx_head.rx_status);
+ hardware_init(dev);
+ return;
+ } else {
+ /* Malloc up new buffer. */
+ int pkt_len = (rx_head.rx_count & 0x7ff) - 4; /* The "-4" is omits the FCS (CRC). */
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ goto done;
+ }
+ skb->dev = dev;
+
+ read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port);
+
+ if (net_debug > 6) {
+ unsigned char *data = skb->data;
+ printk(" data %02x%02x%02x %02x%02x%02x %02x%02x%02x"
+ "%02x%02x%02x %02x%02x..",
+ data[0], data[1], data[2], data[3], data[4], data[5],
+ data[6], data[7], data[8], data[9], data[10], data[11],
+ data[12], data[13]);
+ }
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ done:
+ write_reg(ioaddr, CMR1, CMR1_NextPkt);
+ return;
+}
+
+static void read_block(short ioaddr, int length, unsigned char *p, int data_mode)
+{
+
+ if (data_mode <= 3) { /* Mode 0 or 1 */
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ outb(length == 8 ? RdAddr | HNib | MAR : RdAddr | MAR,
+ ioaddr + PAR_DATA);
+ if (data_mode <= 1) { /* Mode 0 or 1 */
+ do *p++ = read_byte_mode0(ioaddr); while (--length > 0);
+ } else /* Mode 2 or 3 */
+ do *p++ = read_byte_mode2(ioaddr); while (--length > 0);
+ } else if (data_mode <= 5)
+ do *p++ = read_byte_mode4(ioaddr); while (--length > 0);
+ else
+ do *p++ = read_byte_mode6(ioaddr); while (--length > 0);
+
+ outb(EOC+HNib+MAR, ioaddr + PAR_DATA);
+ outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
+}
+
+/* The inverse routine to net_open(). */
+static int
+net_close(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* Flush the Tx and disable Rx here. */
+ lp->addr_mode = CMR2h_OFF;
+ write_reg_high(ioaddr, CMR2, CMR2h_OFF);
+
+ /* Free the IRQ line. */
+ outb(0x00, ioaddr + PAR_CONTROL);
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = 0;
+
+ /* Leave the hardware in a reset state. */
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET);
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *
+net_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ return &lp->stats;
+}
+
+/*
+ * Set or clear the multicast filter for this adapter.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ int num_addrs=dev->mc_list;
+
+ if(dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ num_addrs=1;
+ /*
+ * We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. - AC
+ */
+ if(num_addrs)
+ dev->flags|=IFF_PROMISC;
+ lp->addr_mode = num_addrs ? CMR2h_PROMISC : CMR2h_Normal;
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+}
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c atp.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/atp.h b/i386/i386at/gpl/linux/net/atp.h
new file mode 100644
index 00000000..e58f8c10
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/atp.h
@@ -0,0 +1,264 @@
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+struct net_local {
+#ifdef __KERNEL__
+ struct enet_statistics stats;
+#endif
+ ushort saved_tx_size;
+ unsigned char
+ re_tx, /* Number of packet retransmissions. */
+ tx_unit_busy,
+ addr_mode, /* Current Rx filter e.g. promiscuous, etc. */
+ pac_cnt_in_tx_buf;
+};
+
+struct rx_header {
+ ushort pad; /* The first read is always corrupted. */
+ ushort rx_count;
+ ushort rx_status; /* Unknown bit assignments :-<. */
+ ushort cur_addr; /* Apparently the current buffer address(?) */
+};
+
+#define PAR_DATA 0
+#define PAR_STATUS 1
+#define PAR_CONTROL 2
+
+#define Ctrl_LNibRead 0x08 /* LP_PSELECP */
+#define Ctrl_HNibRead 0
+#define Ctrl_LNibWrite 0x08 /* LP_PSELECP */
+#define Ctrl_HNibWrite 0
+#define Ctrl_SelData 0x04 /* LP_PINITP */
+#define Ctrl_IRQEN 0x10 /* LP_PINTEN */
+
+#define EOW 0xE0
+#define EOC 0xE0
+#define WrAddr 0x40 /* Set address of EPLC read, write register. */
+#define RdAddr 0xC0
+#define HNib 0x10
+
+enum page0_regs
+{
+ /* The first six registers hold the ethernet physical station address. */
+ PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5,
+ TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */
+ TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */
+ ISR = 10, IMR = 11, /* Interrupt status and mask. */
+ CMR1 = 12, /* Command register 1. */
+ CMR2 = 13, /* Command register 2. */
+ MAR = 14, /* Memory address register. */
+ CMR2_h = 0x1d, };
+
+enum eepage_regs
+{ PROM_CMD = 6, PROM_DATA = 7 }; /* Note that PROM_CMD is in the "high" bits. */
+
+
+#define ISR_TxOK 0x01
+#define ISR_RxOK 0x04
+#define ISR_TxErr 0x02
+#define ISRh_RxErr 0x11 /* ISR, high nibble */
+
+#define CMR1h_RESET 0x04 /* Reset. */
+#define CMR1h_RxENABLE 0x02 /* Rx unit enable. */
+#define CMR1h_TxENABLE 0x01 /* Tx unit enable. */
+#define CMR1h_TxRxOFF 0x00
+#define CMR1_ReXmit 0x08 /* Trigger a retransmit. */
+#define CMR1_Xmit 0x04 /* Trigger a transmit. */
+#define CMR1_IRQ 0x02 /* Interrupt active. */
+#define CMR1_BufEnb 0x01 /* Enable the buffer(?). */
+#define CMR1_NextPkt 0x01 /* Enable the buffer(?). */
+
+#define CMR2_NULL 8
+#define CMR2_IRQOUT 9
+#define CMR2_RAMTEST 10
+#define CMR2_EEPROM 12 /* Set to page 1, for reading the EEPROM. */
+
+#define CMR2h_OFF 0 /* No accept mode. */
+#define CMR2h_Physical 1 /* Accept a physical address match only. */
+#define CMR2h_Normal 2 /* Accept physical and broadcast address. */
+#define CMR2h_PROMISC 3 /* Promiscuous mode. */
+
+/* An inline function used below: it differs from inb() by explicitly return an unsigned
+ char, saving a truncation. */
+extern inline unsigned char inbyte(unsigned short port)
+{
+ unsigned char _v;
+ __asm__ __volatile__ ("inb %w1,%b0" :"=a" (_v):"d" (port));
+ return _v;
+}
+
+/* Read register OFFSET.
+ This command should always be terminated with read_end(). */
+extern inline unsigned char read_nibble(short port, unsigned char offset)
+{
+ unsigned char retval;
+ outb(EOC+offset, port + PAR_DATA);
+ outb(RdAddr+offset, port + PAR_DATA);
+ inbyte(port + PAR_STATUS); /* Settling time delay */
+ retval = inbyte(port + PAR_STATUS);
+ outb(EOC+offset, port + PAR_DATA);
+
+ return retval;
+}
+
+/* Functions for bulk data read. The interrupt line is always disabled. */
+/* Get a byte using read mode 0, reading data from the control lines. */
+extern inline unsigned char read_byte_mode0(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+/* The same as read_byte_mode0(), but does multiple inb()s for stability. */
+extern inline unsigned char read_byte_mode2(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+/* Read a byte through the data register. */
+extern inline unsigned char read_byte_mode4(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(RdAddr | MAR, ioaddr + PAR_DATA);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+/* Read a byte through the data register, double reading to allow settling. */
+extern inline unsigned char read_byte_mode6(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(RdAddr | MAR, ioaddr + PAR_DATA);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
+ inbyte(ioaddr + PAR_STATUS);
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+extern inline void
+write_reg(short port, unsigned char reg, unsigned char value)
+{
+ unsigned char outval;
+ outb(EOC | reg, port + PAR_DATA);
+ outval = WrAddr | reg;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outval &= 0xf0;
+ outval |= value;
+ outb(outval, port + PAR_DATA);
+ outval &= 0x1f;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA);
+
+ outb(EOC | outval, port + PAR_DATA);
+}
+
+extern inline void
+write_reg_high(short port, unsigned char reg, unsigned char value)
+{
+ unsigned char outval = EOC | HNib | reg;
+
+ outb(outval, port + PAR_DATA);
+ outval &= WrAddr | HNib | 0x0f;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outval = WrAddr | HNib | value;
+ outb(outval, port + PAR_DATA);
+ outval &= HNib | 0x0f; /* HNib | value */
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA);
+
+ outb(EOC | HNib | outval, port + PAR_DATA);
+}
+
+/* Write a byte out using nibble mode. The low nibble is written first. */
+extern inline void
+write_reg_byte(short port, unsigned char reg, unsigned char value)
+{
+ unsigned char outval;
+ outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */
+ outval = WrAddr | reg;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA);
+ outb(value & 0x0f, port + PAR_DATA);
+ value >>= 4;
+ outb(value, port + PAR_DATA);
+ outb(0x10 | value, port + PAR_DATA);
+ outb(0x10 | value, port + PAR_DATA);
+
+ outb(EOC | value, port + PAR_DATA); /* Reset the address register. */
+}
+
+/*
+ * Bulk data writes to the packet buffer. The interrupt line remains enabled.
+ * The first, faster method uses only the dataport (data modes 0, 2 & 4).
+ * The second (backup) method uses data and control regs (modes 1, 3 & 5).
+ * It should only be needed when there is skew between the individual data
+ * lines.
+ */
+extern inline void write_byte_mode0(short ioaddr, unsigned char value)
+{
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ outb((value>>4) | 0x10, ioaddr + PAR_DATA);
+}
+
+extern inline void write_byte_mode1(short ioaddr, unsigned char value)
+{
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL);
+ outb((value>>4) | 0x10, ioaddr + PAR_DATA);
+ outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL);
+}
+
+/* Write 16bit VALUE to the packet buffer: the same as above just doubled. */
+extern inline void write_word_mode0(short ioaddr, unsigned short value)
+{
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
+}
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x02 /* EEPROM chip select. */
+#define EE_CLK_HIGH 0x12
+#define EE_CLK_LOW 0x16
+#define EE_DATA_WRITE 0x01 /* EEPROM chip data in. */
+#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
+
+/* Delay between EEPROM clock transitions. */
+#define eeprom_delay(ticks) \
+do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17)
+#define EE_READ(offset) (((6 << 6) + (offset)) << 17)
+#define EE_ERASE(offset) (((7 << 6) + (offset)) << 17)
+#define EE_CMD_SIZE 27 /* The command+address+data size. */
diff --git a/i386/i386at/gpl/linux/net/de4x5.c b/i386/i386at/gpl/linux/net/de4x5.c
new file mode 100644
index 00000000..249887a6
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/de4x5.c
@@ -0,0 +1,2788 @@
+/* de4x5.c: A DIGITAL DE425/DE434/DE435/DE500 ethernet driver for Linux.
+
+ Copyright 1994, 1995 Digital Equipment Corporation.
+
+ This software may be used and distributed according to the terms of
+ the GNU Public License, incorporated herein by reference.
+
+ This driver is written for the Digital Equipment Corporation series
+ of EtherWORKS ethernet cards:
+
+ DE425 TP/COAX EISA
+ DE434 TP PCI
+ DE435 TP/COAX/AUI PCI
+ DE500 10/100 PCI Fasternet
+
+ The driver has been tested on a relatively busy network using the DE425,
+ DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred
+ 16M of data to a DECstation 5000/200 as follows:
+
+ TCP UDP
+ TX RX TX RX
+ DE425 1030k 997k 1170k 1128k
+ DE434 1063k 995k 1170k 1125k
+ DE435 1063k 995k 1170k 1125k
+ DE500 1063k 998k 1170k 1125k in 10Mb/s mode
+
+ All values are typical (in kBytes/sec) from a sample of 4 for each
+ measurement. Their error is +/-20k on a quiet (private) network and also
+ depend on what load the CPU has.
+
+ The author may be reached as davies@wanton.lkg.dec.com or Digital
+ Equipment Corporation, 550 King Street, Littleton MA 01460.
+
+ =========================================================================
+ This driver has been written substantially from scratch, although its
+ inheritance of style and stack interface from 'ewrk3.c' and in turn from
+ Donald Becker's 'lance.c' should be obvious.
+
+ Upto 15 EISA cards can be supported under this driver, limited primarily
+ by the available IRQ lines. I have checked different configurations of
+ multiple depca, EtherWORKS 3 cards and de4x5 cards and have not found a
+ problem yet (provided you have at least depca.c v0.38) ...
+
+ PCI support has been added to allow the driver to work with the DE434
+ and DE435 cards. The I/O accesses are a bit of a kludge due to the
+ differences in the EISA and PCI CSR address offsets from the base
+ address.
+
+ The ability to load this driver as a loadable module has been included
+ and used extensively during the driver development (to save those long
+ reboot sequences). Loadable module support under PCI has been achieved
+ by letting any I/O address less than 0x1000 be assigned as:
+
+ 0xghh
+
+ where g is the bus number (usually 0 until the BIOS's get fixed)
+ hh is the device number (max is 32 per bus).
+
+ Essentially, the I/O address and IRQ information are ignored and filled
+ in later by the PCI BIOS during the PCI probe. Note that the board
+ should be in the system at boot time so that its I/O address and IRQ are
+ allocated by the PCI BIOS automatically. The special case of device 0 on
+ bus 0 is not allowed as the probe will think you're autoprobing a
+ module.
+
+ To utilise this ability, you have to do 8 things:
+
+ 0) have a copy of the loadable modules code installed on your system.
+ 1) copy de4x5.c from the /linux/drivers/net directory to your favourite
+ temporary directory.
+ 2) edit the source code near line 2762 to reflect the I/O address and
+ IRQ you're using, or assign these when loading by:
+
+ insmod de4x5.o irq=x io=y
+
+ 3) compile de4x5.c, but include -DMODULE in the command line to ensure
+ that the correct bits are compiled (see end of source code).
+ 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
+ kernel with the de4x5 configuration turned off and reboot.
+ 5) insmod de4x5.o
+ 6) run the net startup bits for your new eth?? interface manually
+ (usually /etc/rc.inet[12] at boot time).
+ 7) enjoy!
+
+ Note that autoprobing is not allowed in loadable modules - the system is
+ already up and running and you're messing with interrupts.
+
+ To unload a module, turn off the associated interface
+ 'ifconfig eth?? down' then 'rmmod de4x5'.
+
+ Automedia detection is included so that in principal you can disconnect
+ from, e.g. TP, reconnect to BNC and things will still work (after a
+ pause whilst the driver figures out where its media went). My tests
+ using ping showed that it appears to work....
+
+ A compile time switch to allow Znyx recognition has been added. This
+ "feature" is in no way supported nor tested in this driver and the user
+ may use it at his/her sole discretion. I have had 2 conflicting reports
+ that my driver will or won't work with Znyx. Try Donald Becker's
+ 'tulip.c' if this driver doesn't work for you. I will not be supporting
+ Znyx cards since I have no information on them and can't test them in a
+ system.
+
+ TO DO:
+ ------
+
+
+ Revision History
+ ----------------
+
+ Version Date Description
+
+ 0.1 17-Nov-94 Initial writing. ALPHA code release.
+ 0.2 13-Jan-95 Added PCI support for DE435's.
+ 0.21 19-Jan-95 Added auto media detection.
+ 0.22 10-Feb-95 Fix interrupt handler call <chris@cosy.sbg.ac.at>.
+ Fix recognition bug reported by <bkm@star.rl.ac.uk>.
+ Add request/release_region code.
+ Add loadable modules support for PCI.
+ Clean up loadable modules support.
+ 0.23 28-Feb-95 Added DC21041 and DC21140 support.
+ Fix missed frame counter value and initialisation.
+ Fixed EISA probe.
+ 0.24 11-Apr-95 Change delay routine to use <linux/udelay>.
+ Change TX_BUFFS_AVAIL macro.
+ Change media autodetection to allow manual setting.
+ Completed DE500 (DC21140) support.
+ 0.241 18-Apr-95 Interim release without DE500 Autosense Algorithm.
+ 0.242 10-May-95 Minor changes
+ 0.30 12-Jun-95 Timer fix for DC21140
+ Portability changes.
+ Add ALPHA changes from <jestabro@ant.tay1.dec.com>.
+ Add DE500 semi automatic autosense.
+ Add Link Fail interrupt TP failure detection.
+ Add timer based link change detection.
+ Plugged a memory leak in de4x5_queue_pkt().
+ 0.31 13-Jun-95 Fixed PCI stuff for 1.3.1
+ 0.32 26-Jun-95 Added verify_area() calls in de4x5_ioctl() from
+ suggestion by <heiko@colossus.escape.de>
+
+ =========================================================================
+*/
+
+static const char *version = "de4x5.c:v0.32 6/26/95 davies@wanton.lkg.dec.com\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/segment.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+
+#include "de4x5.h"
+
+#ifdef DE4X5_DEBUG
+static int de4x5_debug = DE4X5_DEBUG;
+#else
+static int de4x5_debug = 1;
+#endif
+
+#ifdef DE4X5_AUTOSENSE /* Should be done on a per adapter basis */
+static int de4x5_autosense = DE4X5_AUTOSENSE;
+#else
+static int de4x5_autosense = AUTO; /* Do auto media/mode sensing */
+#endif
+
+#ifdef DE4X5_FULL_DUPLEX /* Should be done on a per adapter basis */
+static s32 de4x5_full_duplex = 1;
+#else
+static s32 de4x5_full_duplex = 0;
+#endif
+
+#define DE4X5_NDA 0xffe0 /* No Device (I/O) Address */
+
+/*
+** Ethernet PROM defines
+*/
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+/*
+** Ethernet Info
+*/
+#define PKT_BUF_SZ 1536 /* Buffer size for each Tx/Rx buffer */
+#define MAX_PKT_SZ 1514 /* Maximum ethernet packet length */
+#define MAX_DAT_SZ 1500 /* Maximum ethernet data length */
+#define MIN_DAT_SZ 1 /* Minimum ethernet data length */
+#define PKT_HDR_LEN 14 /* Addresses and data length info */
+#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
+#define QUEUE_PKT_TIMEOUT (3*HZ) /* 3 second timeout */
+
+
+#define CRC_POLYNOMIAL_BE 0x04c11db7UL /* Ethernet CRC, big endian */
+#define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */
+
+/*
+** EISA bus defines
+*/
+#define DE4X5_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
+#define DE4X5_EISA_TOTAL_SIZE 0xfff /* I/O address extent */
+
+#define MAX_EISA_SLOTS 16
+#define EISA_SLOT_INC 0x1000
+
+#define DE4X5_SIGNATURE {"DE425",""}
+#define DE4X5_NAME_LENGTH 8
+
+/*
+** PCI Bus defines
+*/
+#define PCI_MAX_BUS_NUM 8
+#define DE4X5_PCI_TOTAL_SIZE 0x80 /* I/O address extent */
+#define DE4X5_CLASS_CODE 0x00020000 /* Network controller, Ethernet */
+
+/*
+** Memory Alignment. Each descriptor is 4 longwords long. To force a
+** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
+** DESC_ALIGN. ALIGN aligns the start address of the private memory area
+** and hence the RX descriptor ring's first entry.
+*/
+#define ALIGN4 ((u_long)4 - 1) /* 1 longword align */
+#define ALIGN8 ((u_long)8 - 1) /* 2 longword align */
+#define ALIGN16 ((u_long)16 - 1) /* 4 longword align */
+#define ALIGN32 ((u_long)32 - 1) /* 8 longword align */
+#define ALIGN64 ((u_long)64 - 1) /* 16 longword align */
+#define ALIGN128 ((u_long)128 - 1) /* 32 longword align */
+
+#define ALIGN ALIGN32 /* Keep the DC21040 happy... */
+#define CACHE_ALIGN CAL_16LONG
+#define DESC_SKIP_LEN DSL_0 /* Must agree with DESC_ALIGN */
+/*#define DESC_ALIGN u32 dummy[4]; / * Must agree with DESC_SKIP_LEN */
+#define DESC_ALIGN
+
+#ifdef MACH
+#define IS_NOT_DEC
+#endif
+
+#ifndef IS_NOT_DEC /* See README.de4x5 for using this */
+static int is_not_dec = 0;
+#else
+static int is_not_dec = 1;
+#endif
+
+/*
+** DE4X5 IRQ ENABLE/DISABLE
+*/
+#define ENABLE_IRQs { \
+ imr |= lp->irq_en;\
+ outl(imr, DE4X5_IMR); /* Enable the IRQs */\
+}
+
+#define DISABLE_IRQs {\
+ imr = inl(DE4X5_IMR);\
+ imr &= ~lp->irq_en;\
+ outl(imr, DE4X5_IMR); /* Disable the IRQs */\
+}
+
+#define UNMASK_IRQs {\
+ imr |= lp->irq_mask;\
+ outl(imr, DE4X5_IMR); /* Unmask the IRQs */\
+}
+
+#define MASK_IRQs {\
+ imr = inl(DE4X5_IMR);\
+ imr &= ~lp->irq_mask;\
+ outl(imr, DE4X5_IMR); /* Mask the IRQs */\
+}
+
+/*
+** DE4X5 START/STOP
+*/
+#define START_DE4X5 {\
+ omr = inl(DE4X5_OMR);\
+ omr |= OMR_ST | OMR_SR;\
+ outl(omr, DE4X5_OMR); /* Enable the TX and/or RX */\
+}
+
+#define STOP_DE4X5 {\
+ omr = inl(DE4X5_OMR);\
+ omr &= ~(OMR_ST|OMR_SR);\
+ outl(omr, DE4X5_OMR); /* Disable the TX and/or RX */ \
+}
+
+/*
+** DE4X5 SIA RESET
+*/
+#define RESET_SIA outl(0, DE4X5_SICR); /* Reset SIA connectivity regs */
+
+/*
+** DE500 AUTOSENSE TIMER INTERVAL (MILLISECS)
+*/
+#define DE4X5_AUTOSENSE_MS 250
+
+/*
+** SROM Structure
+*/
+struct de4x5_srom {
+ char reserved[18];
+ char version;
+ char num_adapters;
+ char ieee_addr[6];
+ char info[100];
+ short chksum;
+};
+
+/*
+** DE4X5 Descriptors. Make sure that all the RX buffers are contiguous
+** and have sizes of both a power of 2 and a multiple of 4.
+** A size of 256 bytes for each buffer could be chosen because over 90% of
+** all packets in our network are <256 bytes long and 64 longword alignment
+** is possible. 1536 showed better 'ttcp' performance. Take your pick. 32 TX
+** descriptors are needed for machines with an ALPHA CPU.
+*/
+#define NUM_RX_DESC 8 /* Number of RX descriptors */
+#define NUM_TX_DESC 32 /* Number of TX descriptors */
+#define BUFF_ALLOC_RETRIES 10 /* In case of memory shortage */
+#define RX_BUFF_SZ 1536 /* Power of 2 for kmalloc and */
+ /* Multiple of 4 for DC21040 */
+struct de4x5_desc {
+ volatile s32 status;
+ u32 des1;
+ u32 buf;
+ u32 next;
+ DESC_ALIGN
+};
+
+/*
+** The DE4X5 private structure
+*/
+#define DE4X5_PKT_STAT_SZ 16
+#define DE4X5_PKT_BIN_SZ 128 /* Should be >=100 unless you
+ increase DE4X5_PKT_STAT_SZ */
+
+struct de4x5_private {
+ char adapter_name[80]; /* Adapter name */
+ struct de4x5_desc rx_ring[NUM_RX_DESC]; /* RX descriptor ring */
+ struct de4x5_desc tx_ring[NUM_TX_DESC]; /* TX descriptor ring */
+ struct sk_buff *skb[NUM_TX_DESC]; /* TX skb for freeing when sent */
+ int rx_new, rx_old; /* RX descriptor ring pointers */
+ int tx_new, tx_old; /* TX descriptor ring pointers */
+ char setup_frame[SETUP_FRAME_LEN]; /* Holds MCA and PA info. */
+ struct enet_statistics stats; /* Public stats */
+ struct {
+ u_int bins[DE4X5_PKT_STAT_SZ]; /* Private stats counters */
+ u_int unicast;
+ u_int multicast;
+ u_int broadcast;
+ u_int excessive_collisions;
+ u_int tx_underruns;
+ u_int excessive_underruns;
+ } pktStats;
+ char rxRingSize;
+ char txRingSize;
+ int bus; /* EISA or PCI */
+ int bus_num; /* PCI Bus number */
+ int chipset; /* DC21040, DC21041 or DC21140 */
+ s32 irq_mask; /* Interrupt Mask (Enable) bits */
+ s32 irq_en; /* Summary interrupt bits */
+ int media; /* Media (eg TP), mode (eg 100B)*/
+ int linkProb; /* Possible Link Problem */
+ int autosense; /* Allow/disallow autosensing */
+ int tx_enable; /* Enable descriptor polling */
+ int lostMedia; /* Possibly lost media */
+ int setup_f; /* Setup frame filtering type */
+};
+
+
+/*
+** The transmit ring full condition is described by the tx_old and tx_new
+** pointers by:
+** tx_old = tx_new Empty ring
+** tx_old = tx_new+1 Full ring
+** tx_old+txRingSize = tx_new+1 Full ring (wrapped condition)
+*/
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+lp->txRingSize-lp->tx_new-1:\
+ lp->tx_old -lp->tx_new-1)
+
+/*
+** Public Functions
+*/
+static int de4x5_open(struct device *dev);
+static int de4x5_queue_pkt(struct sk_buff *skb, struct device *dev);
+static void de4x5_interrupt(int irq, struct pt_regs *regs);
+static int de4x5_close(struct device *dev);
+static struct enet_statistics *de4x5_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd);
+
+/*
+** Private functions
+*/
+static int de4x5_hw_init(struct device *dev, u_long iobase);
+static int de4x5_init(struct device *dev);
+static int de4x5_rx(struct device *dev);
+static int de4x5_tx(struct device *dev);
+static int de4x5_ast(struct device *dev);
+
+static int autoconf_media(struct device *dev);
+static void create_packet(struct device *dev, char *frame, int len);
+static void dce_us_delay(u32 usec);
+static void dce_ms_delay(u32 msec);
+static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb);
+static void dc21040_autoconf(struct device *dev);
+static void dc21041_autoconf(struct device *dev);
+static void dc21140_autoconf(struct device *dev);
+static int test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
+/*static int test_sym_link(struct device *dev, u32 msec);*/
+static int ping_media(struct device *dev);
+static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr);
+static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec);
+static void load_ms_timer(struct device *dev, u32 msec);
+static int EISA_signature(char *name, s32 eisa_id);
+static int DevicePresent(u_long iobase);
+static short srom_rd(u_long address, u_char offset);
+static void srom_latch(u_int command, u_long address);
+static void srom_command(u_int command, u_long address);
+static void srom_address(u_int command, u_long address, u_char offset);
+static short srom_data(u_int command, u_long address);
+/*static void srom_busy(u_int command, u_long address);*/
+static void sendto_srom(u_int command, u_long addr);
+static int getfrom_srom(u_long addr);
+static void SetMulticastFilter(struct device *dev);
+static int get_hw_addr(struct device *dev);
+
+static void eisa_probe(struct device *dev, u_long iobase);
+static void pci_probe(struct device *dev, u_long iobase);
+static struct device *alloc_device(struct device *dev, u_long iobase);
+static char *build_setup_frame(struct device *dev, int mode);
+static void disable_ast(struct device *dev);
+static void enable_ast(struct device *dev, u32 time_out);
+static void kick_tx(struct device *dev);
+
+#ifdef MODULE
+int init_module(void);
+void cleanup_module(void);
+static int autoprobed = 1, loading_module = 1;
+# else
+static unsigned char de4x5_irq[] = {5,9,10,11};
+static int autoprobed = 0, loading_module = 0;
+#endif /* MODULE */
+
+static char name[DE4X5_NAME_LENGTH + 1];
+static int num_de4x5s = 0, num_eth = 0;
+
+/*
+** Kludge to get around the fact that the CSR addresses have different
+** offsets in the PCI and EISA boards. Also note that the ethernet address
+** PROM is accessed differently.
+*/
+static struct bus_type {
+ int bus;
+ int bus_num;
+ int device;
+ int chipset;
+ struct de4x5_srom srom;
+ int autosense;
+} bus;
+
+/*
+** Miscellaneous defines...
+*/
+#define RESET_DE4X5 {\
+ int i;\
+ i=inl(DE4X5_BMR);\
+ dce_ms_delay(1);\
+ outl(i | BMR_SWR, DE4X5_BMR);\
+ dce_ms_delay(1);\
+ outl(i, DE4X5_BMR);\
+ dce_ms_delay(1);\
+ for (i=0;i<5;i++) {inl(DE4X5_BMR); dce_ms_delay(1);}\
+ dce_ms_delay(1);\
+}
+
+
+
+int de4x5_probe(struct device *dev)
+{
+ int tmp = num_de4x5s, status = -ENODEV;
+ u_long iobase = dev->base_addr;
+
+ if ((iobase == 0) && loading_module){
+ printk("Autoprobing is not supported when loading a module based driver.\n");
+ status = -EIO;
+ } else {
+ eisa_probe(dev, iobase);
+ pci_probe(dev, iobase);
+
+ if ((tmp == num_de4x5s) && (iobase != 0) && loading_module) {
+ printk("%s: de4x5_probe() cannot find device at 0x%04lx.\n", dev->name,
+ iobase);
+ }
+
+ /*
+ ** Walk the device list to check that at least one device
+ ** initialised OK
+ */
+ for (; (dev->priv == NULL) && (dev->next != NULL); dev = dev->next);
+
+ if (dev->priv) status = 0;
+ if (iobase == 0) autoprobed = 1;
+ }
+
+ return status;
+}
+
+static int
+de4x5_hw_init(struct device *dev, u_long iobase)
+{
+ struct bus_type *lp = &bus;
+ int tmpbus, tmpchs, i, j, status=0;
+ char *tmp;
+
+ /* Ensure we're not sleeping */
+ if (lp->chipset == DC21041) {
+ outl(0, PCI_CFDA);
+ dce_ms_delay(10);
+ }
+
+ RESET_DE4X5;
+
+ if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) == 0) {
+ /*
+ ** Now find out what kind of DC21040/DC21041/DC21140 board we have.
+ */
+ if (lp->bus == PCI) {
+ if (!is_not_dec) {
+ if ((lp->chipset == DC21040) || (lp->chipset == DC21041)) {
+ strcpy(name, "DE435");
+ } else if (lp->chipset == DC21140) {
+ strcpy(name, "DE500"); /* Must read the SROM here! */
+ }
+ } else {
+ strcpy(name, "UNKNOWN");
+ }
+ } else {
+ EISA_signature(name, EISA_ID0);
+ }
+
+ if (*name != '\0') { /* found a board signature */
+ dev->base_addr = iobase;
+ if (lp->bus == EISA) {
+ printk("%s: %s at %04lx (EISA slot %ld)",
+ dev->name, name, iobase, ((iobase>>12)&0x0f));
+ } else { /* PCI port address */
+ printk("%s: %s at %04lx (PCI bus %d, device %d)", dev->name, name,
+ iobase, lp->bus_num, lp->device);
+ }
+
+ printk(", h/w address ");
+ status = get_hw_addr(dev);
+ for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */
+ printk("%2.2x:", dev->dev_addr[i]);
+ }
+ printk("%2.2x,\n", dev->dev_addr[i]);
+
+ tmpbus = lp->bus;
+ tmpchs = lp->chipset;
+
+ if (status == 0) {
+ struct de4x5_private *lp;
+
+ /*
+ ** Reserve a section of kernel memory for the adapter
+ ** private area and the TX/RX descriptor rings.
+ */
+ dev->priv = (void *) kmalloc(sizeof(struct de4x5_private) + ALIGN,
+ GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ /*
+ ** Align to a longword boundary
+ */
+ dev->priv = (void *)(((u_long)dev->priv + ALIGN) & ~ALIGN);
+ lp = (struct de4x5_private *)dev->priv;
+ memset(dev->priv, 0, sizeof(struct de4x5_private));
+ lp->bus = tmpbus;
+ lp->chipset = tmpchs;
+
+ /*
+ ** Choose autosensing
+ */
+ if (de4x5_autosense & AUTO) {
+ lp->autosense = AUTO;
+ } else {
+ if (lp->chipset != DC21140) {
+ if ((lp->chipset == DC21040) && (de4x5_autosense & TP_NW)) {
+ de4x5_autosense = TP;
+ }
+ if ((lp->chipset == DC21041) && (de4x5_autosense & BNC_AUI)) {
+ de4x5_autosense = BNC;
+ }
+ lp->autosense = de4x5_autosense & 0x001f;
+ } else {
+ lp->autosense = de4x5_autosense & 0x00c0;
+ }
+ }
+
+ sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
+ request_region(iobase, (lp->bus == PCI ? DE4X5_PCI_TOTAL_SIZE :
+ DE4X5_EISA_TOTAL_SIZE),
+ lp->adapter_name);
+
+ /*
+ ** Allocate contiguous receive buffers, long word aligned.
+ ** This could be a possible memory leak if the private area
+ ** is ever hosed.
+ */
+ for (tmp=NULL, j=0; (j<BUFF_ALLOC_RETRIES) && (tmp==NULL); j++) {
+ if ((tmp = (void *)kmalloc(RX_BUFF_SZ * NUM_RX_DESC + ALIGN,
+ GFP_KERNEL)) != NULL) {
+ tmp = (char *)(((u_long) tmp + ALIGN) & ~ALIGN);
+ for (i=0; i<NUM_RX_DESC; i++) {
+ lp->rx_ring[i].status = 0;
+ lp->rx_ring[i].des1 = RX_BUFF_SZ;
+ lp->rx_ring[i].buf = virt_to_bus(tmp + i * RX_BUFF_SZ);
+ lp->rx_ring[i].next = (u32)NULL;
+ }
+ barrier();
+ }
+ }
+
+ if (tmp != NULL) {
+ lp->rxRingSize = NUM_RX_DESC;
+ lp->txRingSize = NUM_TX_DESC;
+
+ /* Write the end of list marker to the descriptor lists */
+ lp->rx_ring[lp->rxRingSize - 1].des1 |= RD_RER;
+ lp->tx_ring[lp->txRingSize - 1].des1 |= TD_TER;
+
+ /* Tell the adapter where the TX/RX rings are located. */
+ outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
+ outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
+
+ /* Initialise the IRQ mask and Enable/Disable */
+ lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM ;
+ lp->irq_en = IMR_NIM | IMR_AIM;
+
+ lp->tx_enable = TRUE;
+
+ if (dev->irq < 2) {
+#ifndef MODULE
+ unsigned char irqnum;
+ s32 omr;
+ autoirq_setup(0);
+
+ omr = inl(DE4X5_OMR);
+ outl(IMR_AIM|IMR_RUM, DE4X5_IMR); /* Unmask RUM interrupt */
+ outl(OMR_SR | omr, DE4X5_OMR); /* Start RX w/no descriptors */
+
+ irqnum = autoirq_report(1);
+ if (!irqnum) {
+ printk(" and failed to detect IRQ line.\n");
+ status = -ENXIO;
+ } else {
+ for (dev->irq=0,i=0; (i<sizeof(de4x5_irq)) && (!dev->irq); i++) {
+ if (irqnum == de4x5_irq[i]) {
+ dev->irq = irqnum;
+ printk(" and uses IRQ%d.\n", dev->irq);
+ }
+ }
+
+ if (!dev->irq) {
+ printk(" but incorrect IRQ line detected.\n");
+ status = -ENXIO;
+ }
+ }
+
+ outl(0, DE4X5_IMR); /* Re-mask RUM interrupt */
+
+#endif /* MODULE */
+ } else {
+ printk(" and requires IRQ%d (not probed).\n", dev->irq);
+ }
+ } else {
+ printk("%s: Kernel could not allocate RX buffer memory.\n",
+ dev->name);
+ status = -ENXIO;
+ }
+ if (status) release_region(iobase, (lp->bus == PCI ?
+ DE4X5_PCI_TOTAL_SIZE :
+ DE4X5_EISA_TOTAL_SIZE));
+ } else {
+ printk(" which has an Ethernet PROM CRC error.\n");
+ status = -ENXIO;
+ }
+ } else {
+ status = -ENXIO;
+ }
+ } else {
+ status = -ENXIO;
+ }
+
+ if (!status) {
+ if (de4x5_debug > 0) {
+ printk(version);
+ }
+
+ /* The DE4X5-specific entries in the device structure. */
+ dev->open = &de4x5_open;
+ dev->hard_start_xmit = &de4x5_queue_pkt;
+ dev->stop = &de4x5_close;
+ dev->get_stats = &de4x5_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->do_ioctl = &de4x5_ioctl;
+
+ dev->mem_start = 0;
+
+ /* Fill in the generic field of the device structure. */
+ ether_setup(dev);
+
+ /* Let the adapter sleep to save power */
+ if (lp->chipset == DC21041) {
+ outl(0, DE4X5_SICR);
+ outl(CFDA_PSM, PCI_CFDA);
+ }
+ } else { /* Incorrectly initialised hardware */
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ if (lp) {
+ kfree_s(bus_to_virt(lp->rx_ring[0].buf),
+ RX_BUFF_SZ * NUM_RX_DESC + ALIGN);
+ }
+ if (dev->priv) {
+ kfree_s(dev->priv, sizeof(struct de4x5_private) + ALIGN);
+ dev->priv = NULL;
+ }
+ }
+
+ return status;
+}
+
+
+static int
+de4x5_open(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i, status = 0;
+ s32 imr, omr, sts;
+
+ /*
+ ** Wake up the adapter
+ */
+ if (lp->chipset == DC21041) {
+ outl(0, PCI_CFDA);
+ dce_ms_delay(10);
+ }
+
+ if (request_irq(dev->irq, (void *)de4x5_interrupt, 0, lp->adapter_name)) {
+ printk("de4x5_open(): Requested IRQ%d is busy\n",dev->irq);
+ status = -EAGAIN;
+ } else {
+
+ irq2dev_map[dev->irq] = dev;
+ /*
+ ** Re-initialize the DE4X5...
+ */
+ status = de4x5_init(dev);
+
+ if (de4x5_debug > 1){
+ printk("%s: de4x5 open with irq %d\n",dev->name,dev->irq);
+ printk("\tphysical address: ");
+ for (i=0;i<6;i++){
+ printk("%2.2x:",(short)dev->dev_addr[i]);
+ }
+ printk("\n");
+ printk("Descriptor head addresses:\n");
+ printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
+ printk("Descriptor addresses:\nRX: ");
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
+ }
+ }
+ printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
+ printk("TX: ");
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
+ }
+ }
+ printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
+ printk("Descriptor buffers:\nRX: ");
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8x ",lp->rx_ring[i].buf);
+ }
+ }
+ printk("...0x%8.8x\n",lp->rx_ring[i].buf);
+ printk("TX: ");
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8x ", lp->tx_ring[i].buf);
+ }
+ }
+ printk("...0x%8.8x\n", lp->tx_ring[i].buf);
+ printk("Ring size: \nRX: %d\nTX: %d\n",
+ (short)lp->rxRingSize,
+ (short)lp->txRingSize);
+ printk("\tstatus: %d\n", status);
+ }
+
+ if (!status) {
+ dev->tbusy = 0;
+ dev->start = 1;
+ dev->interrupt = UNMASK_INTERRUPTS;
+ dev->trans_start = jiffies;
+
+ START_DE4X5;
+
+ /* Unmask and enable DE4X5 board interrupts */
+ imr = 0;
+ UNMASK_IRQs;
+
+ /* Reset any pending (stale) interrupts */
+ sts = inl(DE4X5_STS);
+ outl(sts, DE4X5_STS);
+
+ ENABLE_IRQs;
+ }
+ if (de4x5_debug > 1) {
+ printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
+ printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
+ printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
+ printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
+ printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
+ printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
+ printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
+ printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
+ }
+ }
+
+ MOD_INC_USE_COUNT;
+
+ return status;
+}
+
+/*
+** Initialize the DE4X5 operating conditions. NB: a chip problem with the
+** DC21140 requires using perfect filtering mode for that chip. Since I can't
+** see why I'd want > 14 multicast addresses, I may change all chips to use
+** the perfect filtering mode. Keep the DMA burst length at 8: there seems
+** to be data corruption problems if it is larger (UDP errors seen from a
+** ttcp source).
+*/
+static int
+de4x5_init(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i, j, status = 0;
+ s32 bmr, omr;
+
+ /* Lock out other processes whilst setting up the hardware */
+ set_bit(0, (void *)&dev->tbusy);
+
+ RESET_DE4X5;
+
+ bmr = inl(DE4X5_BMR);
+ bmr |= PBL_8 | DESC_SKIP_LEN | CACHE_ALIGN;
+ outl(bmr, DE4X5_BMR);
+
+ if (lp->chipset != DC21140) {
+ omr = TR_96;
+ lp->setup_f = HASH_PERF;
+ } else {
+ omr = OMR_SDP | OMR_SF;
+ lp->setup_f = PERFECT;
+ }
+ outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
+ outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
+
+ lp->rx_new = lp->rx_old = 0;
+ lp->tx_new = lp->tx_old = 0;
+
+ for (i = 0; i < lp->rxRingSize; i++) {
+ lp->rx_ring[i].status = R_OWN;
+ }
+
+ for (i = 0; i < lp->txRingSize; i++) {
+ lp->tx_ring[i].status = 0;
+ }
+
+ barrier();
+
+ /* Build the setup frame depending on filtering mode */
+ SetMulticastFilter(dev);
+
+ if (lp->chipset != DC21140) {
+ load_packet(dev, lp->setup_frame, HASH_F|TD_SET|SETUP_FRAME_LEN, NULL);
+ } else {
+ load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, NULL);
+ }
+ outl(omr|OMR_ST, DE4X5_OMR);
+
+ /* Poll for completion of setup frame (interrupts are disabled for now) */
+ for (j=0, i=jiffies;(i<=jiffies+HZ/100) && (j==0);) {
+ if (lp->tx_ring[lp->tx_new].status >= 0) j=1;
+ }
+ outl(omr, DE4X5_OMR); /* Stop everything! */
+
+ if (j == 0) {
+ printk("%s: Setup frame timed out, status %08x\n", dev->name,
+ inl(DE4X5_STS));
+ status = -EIO;
+ }
+
+ lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_old = lp->tx_new;
+
+ /* Autoconfigure the connected port */
+ if (autoconf_media(dev) == 0) {
+ status = -EIO;
+ }
+
+ return 0;
+}
+
+/*
+** Writes a socket buffer address to the next available transmit descriptor
+*/
+static int
+de4x5_queue_pkt(struct sk_buff *skb, struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i, status = 0;
+ s32 imr, omr, sts;
+
+ /*
+ ** Clean out the TX ring asynchronously to interrupts - sometimes the
+ ** interrupts are lost by delayed descriptor status updates relative to
+ ** the irq assertion, especially with a busy PCI bus.
+ */
+ if (set_bit(0, (void*)&dev->tbusy) == 0) {
+ cli();
+ de4x5_tx(dev);
+ dev->tbusy = 0;
+ sti();
+ }
+
+ /*
+ ** Transmitter timeout, possibly serious problems.
+ ** The 'lostMedia' threshold accounts for transient errors that
+ ** were noticed when switching media.
+ */
+ if (dev->tbusy || (lp->lostMedia > LOST_MEDIA_THRESHOLD)) {
+ u_long tickssofar = jiffies - dev->trans_start;
+ if ((tickssofar < QUEUE_PKT_TIMEOUT) &&
+ (lp->lostMedia <= LOST_MEDIA_THRESHOLD)) {
+ status = -1;
+ } else {
+ if (de4x5_debug >= 1) {
+ printk("%s: transmit timed out, status %08x, tbusy:%ld, lostMedia:%d tickssofar:%ld, resetting.\n",dev->name, inl(DE4X5_STS), dev->tbusy, lp->lostMedia, tickssofar);
+ }
+
+ /* Stop and reset the TX and RX... */
+ STOP_DE4X5;
+
+ /* Re-queue any skb's. */
+ for (i=lp->tx_old; i!=lp->tx_new; i=(++i)%lp->txRingSize) {
+ if (lp->skb[i] != NULL) {
+ if (lp->skb[i]->len != FAKE_FRAME_LEN) {
+ if (lp->tx_ring[i].status == T_OWN) {
+ dev_queue_xmit(lp->skb[i], dev, SOPRI_NORMAL);
+ } else { /* already sent */
+ dev_kfree_skb(lp->skb[i], FREE_WRITE);
+ }
+ } else {
+ dev_kfree_skb(lp->skb[i], FREE_WRITE);
+ }
+ lp->skb[i] = NULL;
+ }
+ }
+ if (skb->len != FAKE_FRAME_LEN) {
+ dev_queue_xmit(skb, dev, SOPRI_NORMAL);
+ } else {
+ dev_kfree_skb(skb, FREE_WRITE);
+ }
+
+ /* Initialise the hardware */
+ status = de4x5_init(dev);
+
+ /* Unmask DE4X5 board interrupts */
+ if (!status) {
+ /* Start here to clean stale interrupts later */
+ dev->interrupt = UNMASK_INTERRUPTS;
+ dev->start = 1;
+ dev->tbusy = 0;
+ dev->trans_start = jiffies;
+
+ START_DE4X5;
+
+ /* Unmask DE4X5 board interrupts */
+ imr = 0;
+ UNMASK_IRQs;
+
+ /* Clear any pending (stale) interrupts */
+ sts = inl(DE4X5_STS);
+ outl(sts, DE4X5_STS);
+
+ ENABLE_IRQs;
+ } else {
+ printk("%s: hardware initialisation failure, status %08x.\n",
+ dev->name, inl(DE4X5_STS));
+ }
+ }
+ } else if (skb == NULL) {
+ dev_tint(dev);
+ } else if (skb->len == FAKE_FRAME_LEN) { /* Don't TX a fake frame! */
+ dev_kfree_skb(skb, FREE_WRITE);
+ } else if (skb->len > 0) {
+ /* Enforce 1 process per h/w access */
+ if (set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ status = -1; /* Re-queue packet */
+ } else {
+ cli();
+ if (TX_BUFFS_AVAIL) { /* Fill in a Tx ring entry */
+ load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
+ if (lp->tx_enable) {
+ outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
+ }
+
+ lp->tx_new = (++lp->tx_new) % lp->txRingSize; /* Ensure a wrap */
+ dev->trans_start = jiffies;
+
+ if (TX_BUFFS_AVAIL) {
+ dev->tbusy = 0; /* Another pkt may be queued */
+ }
+ } else { /* Ring full - re-queue */
+ status = -1;
+ }
+ sti();
+ }
+ }
+
+ return status;
+}
+
+/*
+** The DE4X5 interrupt handler.
+**
+** I/O Read/Writes through intermediate PCI bridges are never 'posted',
+** so that the asserted interrupt always has some real data to work with -
+** if these I/O accesses are ever changed to memory accesses, ensure the
+** STS write is read immediately to complete the transaction if the adapter
+** is not on bus 0. Lost interrupts can still occur when the PCI bus load
+** is high and descriptor status bits cannot be set before the associated
+** interrupt is asserted and this routine entered.
+*/
+static void
+de4x5_interrupt(int irq, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct de4x5_private *lp;
+ s32 imr, omr, sts;
+ u_long iobase;
+
+ if (dev == NULL) {
+ printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
+ } else {
+ lp = (struct de4x5_private *)dev->priv;
+ iobase = dev->base_addr;
+
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ DISABLE_IRQs; /* Ensure non re-entrancy */
+ dev->interrupt = MASK_INTERRUPTS;
+
+ while ((sts = inl(DE4X5_STS)) & lp->irq_mask) { /* Read IRQ status */
+ outl(sts, DE4X5_STS); /* Reset the board interrupts */
+
+ if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */
+ de4x5_rx(dev);
+
+ if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */
+ de4x5_tx(dev);
+
+ if (sts & STS_TM) /* Autosense tick */
+ de4x5_ast(dev);
+
+ if (sts & STS_LNF) { /* TP Link has failed */
+ lp->lostMedia = LOST_MEDIA_THRESHOLD + 1;
+ lp->irq_mask &= ~IMR_LFM;
+ kick_tx(dev);
+ }
+
+ if (sts & STS_SE) { /* Bus Error */
+ STOP_DE4X5;
+ printk("%s: Fatal bus error occured, sts=%#8x, device stopped.\n",
+ dev->name, sts);
+ }
+ }
+
+ if (TX_BUFFS_AVAIL && dev->tbusy) {/* Any resources available? */
+ dev->tbusy = 0; /* Clear TX busy flag */
+ mark_bh(NET_BH);
+ }
+
+ dev->interrupt = UNMASK_INTERRUPTS;
+ ENABLE_IRQs;
+ }
+
+ return;
+}
+
+static int
+de4x5_rx(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int i, entry;
+ s32 status;
+ char *buf;
+
+ for (entry = lp->rx_new; lp->rx_ring[entry].status >= 0;entry = lp->rx_new) {
+ status = lp->rx_ring[entry].status;
+
+ if (status & RD_FS) { /* Remember the start of frame */
+ lp->rx_old = entry;
+ }
+
+ if (status & RD_LS) { /* Valid frame status */
+ if (status & RD_ES) { /* There was an error. */
+ lp->stats.rx_errors++; /* Update the error stats. */
+ if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
+ if (status & RD_CE) lp->stats.rx_crc_errors++;
+ if (status & RD_OF) lp->stats.rx_fifo_errors++;
+ } else { /* A valid frame received */
+ struct sk_buff *skb;
+ short pkt_len = (short)(lp->rx_ring[entry].status >> 16) - 4;
+
+ if ((skb = dev_alloc_skb(pkt_len+2)) != NULL) {
+ skb->dev = dev;
+
+ skb_reserve(skb,2); /* Align */
+ if (entry < lp->rx_old) { /* Wrapped buffer */
+ short len = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
+ memcpy(skb_put(skb,len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), len);
+ memcpy(skb_put(skb,pkt_len-len), bus_to_virt(lp->rx_ring[0].buf), pkt_len - len);
+ } else { /* Linear buffer */
+ memcpy(skb_put(skb,pkt_len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), pkt_len);
+ }
+
+ /* Push up the protocol stack */
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+
+ /* Update stats */
+ lp->stats.rx_packets++;
+ for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
+ if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
+ lp->pktStats.bins[i]++;
+ i = DE4X5_PKT_STAT_SZ;
+ }
+ }
+ buf = skb->data; /* Look at the dest addr */
+ if (buf[0] & 0x01) { /* Multicast/Broadcast */
+ if ((*(s32 *)&buf[0] == -1) && (*(s16 *)&buf[4] == -1)) {
+ lp->pktStats.broadcast++;
+ } else {
+ lp->pktStats.multicast++;
+ }
+ } else if ((*(s32 *)&buf[0] == *(s32 *)&dev->dev_addr[0]) &&
+ (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
+ lp->pktStats.unicast++;
+ }
+
+ lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
+ if (lp->pktStats.bins[0] == 0) { /* Reset counters */
+ memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
+ }
+ } else {
+ printk("%s: Insufficient memory; nuking packet.\n", dev->name);
+ lp->stats.rx_dropped++; /* Really, deferred. */
+ break;
+ }
+ }
+
+ /* Change buffer ownership for this last frame, back to the adapter */
+ for (; lp->rx_old!=entry; lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
+ lp->rx_ring[lp->rx_old].status = R_OWN;
+ barrier();
+ }
+ lp->rx_ring[entry].status = R_OWN;
+ barrier();
+ }
+
+ /*
+ ** Update entry information
+ */
+ lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
+ }
+
+ return 0;
+}
+
+/*
+** Buffer sent - check for TX buffer errors.
+*/
+static int
+de4x5_tx(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int entry;
+ s32 status;
+
+ for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
+ status = lp->tx_ring[entry].status;
+ if (status < 0) { /* Buffer not sent yet */
+ break;
+ } else if (status & TD_ES) { /* An error happened */
+ lp->stats.tx_errors++;
+ if (status & TD_NC) lp->stats.tx_carrier_errors++;
+ if (status & TD_LC) lp->stats.tx_window_errors++;
+ if (status & TD_UF) lp->stats.tx_fifo_errors++;
+ if (status & TD_LC) lp->stats.collisions++;
+ if (status & TD_EC) lp->pktStats.excessive_collisions++;
+ if (status & TD_DE) lp->stats.tx_aborted_errors++;
+
+ if ((status != 0x7fffffff) && /* Not setup frame */
+ (status & (TD_LO | TD_NC | TD_EC | TD_LF))) {
+ lp->lostMedia++;
+ if (lp->lostMedia > LOST_MEDIA_THRESHOLD) { /* Trip autosense */
+ kick_tx(dev);
+ }
+ } else {
+ outl(POLL_DEMAND, DE4X5_TPD); /* Restart a stalled TX */
+ }
+ } else { /* Packet sent */
+ lp->stats.tx_packets++;
+ lp->lostMedia = 0; /* Remove transient problem */
+ }
+ /* Free the buffer if it's not a setup frame. */
+ if (lp->skb[entry] != NULL) {
+ dev_kfree_skb(lp->skb[entry], FREE_WRITE);
+ lp->skb[entry] = NULL;
+ }
+
+ /* Update all the pointers */
+ lp->tx_old = (++lp->tx_old) % lp->txRingSize;
+ }
+
+ return 0;
+}
+
+static int
+de4x5_ast(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 gep;
+
+ disable_ast(dev);
+
+ if (lp->chipset == DC21140) {
+ gep = inl(DE4X5_GEP);
+ if (((lp->media == _100Mb) && (gep & GEP_SLNK)) ||
+ ((lp->media == _10Mb) && (gep & GEP_LNP)) ||
+ ((lp->media == _10Mb) && !(gep & GEP_SLNK)) ||
+ (lp->media == NC)) {
+ if (lp->linkProb || ((lp->media == NC) && (!(gep & GEP_LNP)))) {
+ lp->lostMedia = LOST_MEDIA_THRESHOLD + 1;
+ lp->linkProb = 0;
+ kick_tx(dev);
+ } else {
+ switch(lp->media) {
+ case NC:
+ lp->linkProb = 0;
+ enable_ast(dev, DE4X5_AUTOSENSE_MS);
+ break;
+
+ case _10Mb:
+ lp->linkProb = 1; /* Flag a potential problem */
+ enable_ast(dev, 1500);
+ break;
+
+ case _100Mb:
+ lp->linkProb = 1; /* Flag a potential problem */
+ enable_ast(dev, 4000);
+ break;
+ }
+ }
+ } else {
+ lp->linkProb = 0; /* Link OK */
+ enable_ast(dev, DE4X5_AUTOSENSE_MS);
+ }
+ }
+
+ return 0;
+}
+
+static int
+de4x5_close(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 imr, omr;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (de4x5_debug > 1) {
+ printk("%s: Shutting down ethercard, status was %8.8x.\n",
+ dev->name, inl(DE4X5_STS));
+ }
+
+ /*
+ ** We stop the DE4X5 here... mask interrupts and stop TX & RX
+ */
+ DISABLE_IRQs;
+
+ STOP_DE4X5;
+
+ /*
+ ** Free the associated irq
+ */
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = 0;
+
+ MOD_DEC_USE_COUNT;
+
+ /* Put the adapter to sleep to save power */
+ if (lp->chipset == DC21041) {
+ outl(0, DE4X5_SICR);
+ outl(CFDA_PSM, PCI_CFDA);
+ }
+
+ return 0;
+}
+
+static struct enet_statistics *
+de4x5_get_stats(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ lp->stats.rx_missed_errors = (int) (inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
+
+ return &lp->stats;
+}
+
+static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+
+ lp->tx_ring[lp->tx_new].buf = virt_to_bus(buf);
+ lp->tx_ring[lp->tx_new].des1 &= TD_TER;
+ lp->tx_ring[lp->tx_new].des1 |= flags;
+ lp->skb[lp->tx_new] = skb;
+ barrier();
+ lp->tx_ring[lp->tx_new].status = T_OWN;
+ barrier();
+
+ return;
+}
+/*
+** Set or clear the multicast filter for this adaptor.
+** num_addrs == -1 Promiscuous mode, receive all packets - now supported.
+** Can also use the ioctls.
+** num_addrs == 0 Normal mode, clear multicast list
+** num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+** best-effort filtering.
+** num_addrs == HASH_TABLE_LEN
+** Set all multicast bits (pass all multicasts).
+*/
+static void
+set_multicast_list(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ /* First, double check that the adapter is open */
+ if (irq2dev_map[dev->irq] != NULL) {
+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
+ u32 omr;
+ omr = inl(DE4X5_OMR);
+ omr |= OMR_PR;
+ outl(omr, DE4X5_OMR);
+ } else {
+ SetMulticastFilter(dev);
+ if (lp->setup_f == HASH_PERF) {
+ load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
+ SETUP_FRAME_LEN, NULL);
+ } else {
+ load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
+ SETUP_FRAME_LEN, NULL);
+ }
+
+ lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
+ dev->trans_start = jiffies;
+ }
+ }
+
+ return;
+}
+
+/*
+** Calculate the hash code and update the logical address filter
+** from a list of ethernet multicast addresses.
+** Little endian crc one liner from Matt Thomas, DEC.
+*/
+static void SetMulticastFilter(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ struct dev_mc_list *dmi=dev->mc_list;
+ u_long iobase = dev->base_addr;
+ int i, j, bit, byte;
+ u16 hashcode;
+ u32 omr, crc, poly = CRC_POLYNOMIAL_LE;
+ char *pa;
+ unsigned char *addrs;
+
+ omr = inl(DE4X5_OMR);
+ omr &= ~OMR_PR;
+ pa = build_setup_frame(dev, ALL); /* Build the basic frame */
+
+ if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) {
+ omr |= OMR_PM; /* Pass all multicasts */
+ } else if (lp->setup_f == HASH_PERF) {
+ /* Now update the MCA table */
+ for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
+ addrs=dmi->dmi_addr;
+ dmi=dmi->next;
+ if ((*addrs & 0x01) == 1) { /* multicast address? */
+ crc = 0xffffffff; /* init CRC for each address */
+ for (byte=0;byte<ETH_ALEN;byte++) { /* for each address byte */
+ /* process each address bit */
+ for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
+ crc = (crc >> 1) ^ (((crc ^ bit) & 0x01) ? poly : 0);
+ }
+ }
+ hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
+
+ byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
+ bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
+
+ byte <<= 1; /* calc offset into setup frame */
+ if (byte & 0x02) {
+ byte -= 1;
+ }
+ lp->setup_frame[byte] |= bit;
+ }
+ }
+ } else { /* Perfect filtering */
+ for (j=0; j<dev->mc_count; j++) {
+ addrs=dmi->dmi_addr;
+ dmi=dmi->next;
+ for (i=0; i<ETH_ALEN; i++) {
+ *(pa + (i&1)) = *addrs++;
+ if (i & 0x01) pa += 4;
+ }
+ }
+ }
+ outl(omr, DE4X5_OMR);
+
+ return;
+}
+
+/*
+** EISA bus I/O device probe. Probe from slot 1 since slot 0 is usually
+** the motherboard. Upto 15 EISA devices are supported.
+*/
+static void eisa_probe(struct device *dev, u_long ioaddr)
+{
+ int i, maxSlots, status;
+ u_short vendor, device;
+ s32 cfid;
+ u_long iobase;
+ struct bus_type *lp = &bus;
+ char name[DE4X5_STRLEN];
+
+ if (!ioaddr && autoprobed) return ; /* Been here before ! */
+ if ((ioaddr < 0x1000) && (ioaddr > 0)) return; /* PCI MODULE special */
+
+ lp->bus = EISA;
+
+ if (ioaddr == 0) { /* Autoprobing */
+ iobase = EISA_SLOT_INC; /* Get the first slot address */
+ i = 1;
+ maxSlots = MAX_EISA_SLOTS;
+ } else { /* Probe a specific location */
+ iobase = ioaddr;
+ i = (ioaddr >> 12);
+ maxSlots = i + 1;
+ }
+
+ for (status = -ENODEV; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
+ if (EISA_signature(name, EISA_ID)) {
+ cfid = inl(PCI_CFID);
+ device = (u_short)(cfid >> 16);
+ vendor = (u_short) cfid;
+
+ lp->bus = EISA;
+ lp->chipset = device;
+ if (DevicePresent(EISA_APROM) == 0) {
+ /* Write the PCI Configuration Registers */
+ outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
+ outl(0x00004000, PCI_CFLT);
+ outl(iobase, PCI_CBIO);
+
+ if (check_region(iobase, DE4X5_EISA_TOTAL_SIZE) == 0) {
+ if ((dev = alloc_device(dev, iobase)) != NULL) {
+ if ((status = de4x5_hw_init(dev, iobase)) == 0) {
+ num_de4x5s++;
+ }
+ num_eth++;
+ }
+ } else if (autoprobed) {
+ printk("%s: region already allocated at 0x%04lx.\n", dev->name, iobase);
+ }
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** PCI bus I/O device probe
+** NB: PCI I/O accesses and Bus Mastering are enabled by the PCI BIOS, not
+** the driver. Some PCI BIOS's, pre V2.1, need the slot + features to be
+** enabled by the user first in the set up utility. Hence we just check for
+** enabled features and silently ignore the card if they're not.
+**
+** STOP PRESS: Some BIOS's __require__ the driver to enable the bus mastering
+** bit. Here, check for I/O accesses and then set BM. If you put the card in
+** a non BM slot, you're on your own (and complain to the PC vendor that your
+** PC doesn't conform to the PCI standard)!
+*/
+#define PCI_DEVICE (dev_num << 3)
+#define PCI_LAST_DEV 32
+
+static void pci_probe(struct device *dev, u_long ioaddr)
+{
+ u_char irq;
+ u_char pb, pbus, dev_num, dnum, dev_fn;
+ u_short vendor, device, index, status;
+ u_int class = DE4X5_CLASS_CODE;
+ u_int iobase;
+ struct bus_type *lp = &bus;
+
+ if (!ioaddr && autoprobed) return ; /* Been here before ! */
+
+ if (pcibios_present()) {
+ lp->bus = PCI;
+
+ if (ioaddr < 0x1000) {
+ pbus = (u_short)(ioaddr >> 8);
+ dnum = (u_short)(ioaddr & 0xff);
+ } else {
+ pbus = 0;
+ dnum = 0;
+ }
+
+ for (index=0;
+ (pcibios_find_class(class, index, &pb, &dev_fn)!= PCIBIOS_DEVICE_NOT_FOUND);
+ index++) {
+ dev_num = PCI_SLOT(dev_fn);
+
+ if ((!pbus && !dnum) || ((pbus == pb) && (dnum == dev_num))) {
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_VENDOR_ID, &vendor);
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_DEVICE_ID, &device);
+ if (is_DC21040 || is_DC21041 || is_DC21140) {
+ /* Set the device number information */
+ lp->device = dev_num;
+ lp->bus_num = pb;
+
+ /* Set the chipset information */
+ lp->chipset = device;
+
+ /* Get the board I/O address */
+ pcibios_read_config_dword(pb, PCI_DEVICE, PCI_BASE_ADDRESS_0, &iobase);
+ iobase &= CBIO_MASK;
+
+ /* Fetch the IRQ to be used */
+ pcibios_read_config_byte(pb, PCI_DEVICE, PCI_INTERRUPT_LINE, &irq);
+
+ /* Check if I/O accesses and Bus Mastering are enabled */
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
+ if (status & PCI_COMMAND_IO) {
+ if (!(status & PCI_COMMAND_MASTER)) {
+ status |= PCI_COMMAND_MASTER;
+ pcibios_write_config_word(pb, PCI_DEVICE, PCI_COMMAND, status);
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
+ }
+ if (status & PCI_COMMAND_MASTER) {
+ if ((DevicePresent(DE4X5_APROM) == 0) || is_not_dec) {
+ if (check_region(iobase, DE4X5_PCI_TOTAL_SIZE) == 0) {
+ if ((dev = alloc_device(dev, iobase)) != NULL) {
+ dev->irq = irq;
+ if ((status = de4x5_hw_init(dev, iobase)) == 0) {
+ num_de4x5s++;
+ }
+ num_eth++;
+ }
+ } else if (autoprobed) {
+ printk("%s: region already allocated at 0x%04x.\n", dev->name, (u_short)iobase);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** Allocate the device by pointing to the next available space in the
+** device structure. Should one not be available, it is created.
+*/
+static struct device *alloc_device(struct device *dev, u_long iobase)
+{
+ int addAutoProbe = 0;
+ struct device *tmp = NULL, *ret;
+ int (*init)(struct device *) = NULL;
+
+ /*
+ ** Check the device structures for an end of list or unused device
+ */
+ if (!loading_module) {
+ while (dev->next != NULL) {
+ if ((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0)) break;
+ dev = dev->next; /* walk through eth device list */
+ num_eth++; /* increment eth device number */
+ }
+
+ /*
+ ** If an autoprobe is requested for another device, we must re-insert
+ ** the request later in the list. Remember the current position first.
+ */
+ if ((dev->base_addr == 0) && (num_de4x5s > 0)) {
+ addAutoProbe++;
+ tmp = dev->next; /* point to the next device */
+ init = dev->init; /* remember the probe function */
+ }
+
+ /*
+ ** If at end of list and can't use current entry, malloc one up.
+ ** If memory could not be allocated, print an error message.
+ */
+ if ((dev->next == NULL) &&
+ !((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0))){
+ dev->next = (struct device *)kmalloc(sizeof(struct device) + 8,
+ GFP_KERNEL);
+
+ dev = dev->next; /* point to the new device */
+ if (dev == NULL) {
+ printk("eth%d: Device not initialised, insufficient memory\n",
+ num_eth);
+ } else {
+ /*
+ ** If the memory was allocated, point to the new memory area
+ ** and initialize it (name, I/O address, next device (NULL) and
+ ** initialisation probe routine).
+ */
+ dev->name = (char *)(dev + sizeof(struct device));
+ if (num_eth > 9999) {
+ sprintf(dev->name,"eth????"); /* New device name */
+ } else {
+ sprintf(dev->name,"eth%d", num_eth);/* New device name */
+ }
+ dev->base_addr = iobase; /* assign the io address */
+ dev->next = NULL; /* mark the end of list */
+ dev->init = &de4x5_probe; /* initialisation routine */
+ num_de4x5s++;
+ }
+ }
+ ret = dev; /* return current struct, or NULL */
+
+ /*
+ ** Now figure out what to do with the autoprobe that has to be inserted.
+ ** Firstly, search the (possibly altered) list for an empty space.
+ */
+ if (ret != NULL) {
+ if (addAutoProbe) {
+ for (; (tmp->next!=NULL) && (tmp->base_addr!=DE4X5_NDA); tmp=tmp->next);
+
+ /*
+ ** If no more device structures and can't use the current one, malloc
+ ** one up. If memory could not be allocated, print an error message.
+ */
+ if ((tmp->next == NULL) && !(tmp->base_addr == DE4X5_NDA)) {
+ tmp->next = (struct device *)kmalloc(sizeof(struct device) + 8,
+ GFP_KERNEL);
+ tmp = tmp->next; /* point to the new device */
+ if (tmp == NULL) {
+ printk("%s: Insufficient memory to extend the device list.\n",
+ dev->name);
+ } else {
+ /*
+ ** If the memory was allocated, point to the new memory area
+ ** and initialize it (name, I/O address, next device (NULL) and
+ ** initialisation probe routine).
+ */
+ tmp->name = (char *)(tmp + sizeof(struct device));
+ if (num_eth > 9999) {
+ sprintf(tmp->name,"eth????"); /* New device name */
+ } else {
+ sprintf(tmp->name,"eth%d", num_eth);/* New device name */
+ }
+ tmp->base_addr = 0; /* re-insert the io address */
+ tmp->next = NULL; /* mark the end of list */
+ tmp->init = init; /* initialisation routine */
+ }
+ } else { /* structure already exists */
+ tmp->base_addr = 0; /* re-insert the io address */
+ }
+ }
+ }
+ } else {
+ ret = dev;
+ }
+
+ return ret;
+}
+
+/*
+** Auto configure the media here rather than setting the port at compile
+** time. This routine is called by de4x5_init() when a loss of media is
+** detected (excessive collisions, loss of carrier, no carrier or link fail
+** [TP]) to check whether the user has been sneaky and changed the port on us.
+*/
+static int autoconf_media(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ lp->tx_enable = YES;
+ if (de4x5_debug > 0 ) {
+ if (lp->chipset != DC21140) {
+ printk("%s: Searching for media... ",dev->name);
+ } else {
+ printk("%s: Searching for mode... ",dev->name);
+ }
+ }
+
+ if (lp->chipset == DC21040) {
+ lp->media = (lp->autosense == AUTO ? TP : lp->autosense);
+ dc21040_autoconf(dev);
+ } else if (lp->chipset == DC21041) {
+ lp->media = (lp->autosense == AUTO ? TP_NW : lp->autosense);
+ dc21041_autoconf(dev);
+ } else if (lp->chipset == DC21140) {
+ disable_ast(dev);
+ lp->media = (lp->autosense == AUTO ? _10Mb : lp->autosense);
+ dc21140_autoconf(dev);
+ }
+
+ if (de4x5_debug > 0 ) {
+ if (lp->chipset != DC21140) {
+ printk("media is %s\n", (lp->media == NC ? "unconnected!" :
+ (lp->media == TP ? "TP." :
+ (lp->media == ANS ? "TP/Nway." :
+ (lp->media == BNC ? "BNC." :
+ (lp->media == AUI ? "AUI." :
+ "BNC/AUI."
+ ))))));
+ } else {
+ printk("mode is %s\n",(lp->media == NC ? "link down.":
+ (lp->media == _100Mb ? "100Mb/s." :
+ (lp->media == _10Mb ? "10Mb/s." :
+ "\?\?\?"
+ ))));
+ }
+ }
+
+ if (lp->media) {
+ lp->lostMedia = 0;
+ inl(DE4X5_MFC); /* Zero the lost frames counter */
+ if ((lp->media == TP) || (lp->media == ANS)) {
+ lp->irq_mask |= IMR_LFM;
+ }
+ }
+ dce_ms_delay(10);
+
+ return (lp->media);
+}
+
+static void dc21040_autoconf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i, linkBad;
+ s32 sisr = 0, t_3s = 3000;
+
+ switch (lp->media) {
+ case TP:
+ reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
+ for (linkBad=1,i=0;(i<t_3s) && linkBad && !(sisr & SISR_NCR);i++) {
+ if (((sisr = inl(DE4X5_SISR)) & SISR_LKF) == 0) linkBad = 0;
+ dce_ms_delay(1);
+ }
+ if (linkBad && (lp->autosense == AUTO)) {
+ lp->media = BNC_AUI;
+ dc21040_autoconf(dev);
+ }
+ break;
+
+ case BNC:
+ case AUI:
+ case BNC_AUI:
+ reset_init_sia(dev, 0x8f09, 0x0705, 0x0006);
+ dce_ms_delay(500);
+ linkBad = ping_media(dev);
+ if (linkBad && (lp->autosense == AUTO)) {
+ lp->media = EXT_SIA;
+ dc21040_autoconf(dev);
+ }
+ break;
+
+ case EXT_SIA:
+ reset_init_sia(dev, 0x3041, 0x0000, 0x0006);
+ dce_ms_delay(500);
+ linkBad = ping_media(dev);
+ if (linkBad && (lp->autosense == AUTO)) {
+ lp->media = NC;
+ dc21040_autoconf(dev);
+ }
+ break;
+
+ case NC:
+#ifndef __alpha__
+ reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
+ break;
+#else
+ /* JAE: for Alpha, default to BNC/AUI, *not* TP */
+ reset_init_sia(dev, 0x8f09, 0x0705, 0x0006);
+#endif /* i386 */
+ }
+
+ return;
+}
+
+/*
+** Autoconfigure the media when using the DC21041. AUI needs to be tested
+** before BNC, because the BNC port will indicate activity if it's not
+** terminated correctly. The only way to test for that is to place a loopback
+** packet onto the network and watch for errors.
+*/
+static void dc21041_autoconf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 sts, irqs, irq_mask, omr;
+
+ switch (lp->media) {
+ case TP_NW:
+ omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */
+ outl(omr | OMR_FD, DE4X5_OMR);
+ irqs = STS_LNF | STS_LNP;
+ irq_mask = IMR_LFM | IMR_LPM;
+ sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
+ if (sts & STS_LNP) {
+ lp->media = ANS;
+ } else {
+ lp->media = AUI;
+ }
+ dc21041_autoconf(dev);
+ break;
+
+ case ANS:
+ irqs = STS_LNP;
+ irq_mask = IMR_LPM;
+ sts = test_ans(dev, irqs, irq_mask, 3000);
+ if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
+ lp->media = TP;
+ dc21041_autoconf(dev);
+ }
+ break;
+
+ case TP:
+ omr = inl(DE4X5_OMR); /* Set up half duplex for TP */
+ outl(omr & ~OMR_FD, DE4X5_OMR);
+ irqs = STS_LNF | STS_LNP;
+ irq_mask = IMR_LFM | IMR_LPM;
+ sts = test_media(dev, irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
+ if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
+ if (inl(DE4X5_SISR) & SISR_NRA) { /* Non selected port activity */
+ lp->media = AUI;
+ } else {
+ lp->media = BNC;
+ }
+ dc21041_autoconf(dev);
+ }
+ break;
+
+ case AUI:
+ omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
+ outl(omr & ~OMR_FD, DE4X5_OMR);
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev, irqs, irq_mask, 0xef09, 0xf7fd, 0x000e, 1000);
+ if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
+ lp->media = BNC;
+ dc21041_autoconf(dev);
+ }
+ break;
+
+ case BNC:
+ omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
+ outl(omr & ~OMR_FD, DE4X5_OMR);
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev, irqs, irq_mask, 0xef09, 0xf7fd, 0x0006, 1000);
+ if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
+ lp->media = NC;
+ } else { /* Ensure media connected */
+ if (ping_media(dev)) lp->media = NC;
+ }
+ break;
+
+ case NC:
+ omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */
+ outl(omr | OMR_FD, DE4X5_OMR);
+ reset_init_sia(dev, 0xef01, 0xffff, 0x0008);/* Initialise the SIA */
+ break;
+ }
+
+ return;
+}
+
+/*
+** Reduced feature version (temporary I hope)
+*/
+static void dc21140_autoconf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 omr;
+
+ switch(lp->media) {
+ case _100Mb: /* Set 100Mb/s, MII Port with PCS Function and Scrambler */
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
+ omr |= (de4x5_full_duplex ? OMR_FD : 0); /* Set up Full Duplex */
+ outl(omr | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);
+ outl(GEP_FDXD | GEP_MODE, DE4X5_GEP);
+ break;
+
+ case _10Mb: /* Set conventional 10Mb/s ENDEC interface */
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
+ omr |= (de4x5_full_duplex ? OMR_FD : 0); /* Set up Full Duplex */
+ outl(omr | OMR_TTM, DE4X5_OMR);
+ outl(GEP_FDXD, DE4X5_GEP);
+ break;
+ }
+
+ return;
+}
+
+static int
+test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 sts, time, csr12;
+
+ reset_init_sia(dev, csr13, csr14, csr15);
+
+ /* Set link_fail_inhibit_timer */
+ load_ms_timer(dev, msec);
+
+ /* clear all pending interrupts */
+ sts = inl(DE4X5_STS);
+ outl(sts, DE4X5_STS);
+
+ /* clear csr12 NRA and SRA bits */
+ csr12 = inl(DE4X5_SISR);
+ outl(csr12, DE4X5_SISR);
+
+ /* Poll for timeout - timer interrupt doesn't work correctly */
+ do {
+ time = inl(DE4X5_GPT) & GPT_VAL;
+ sts = inl(DE4X5_STS);
+ } while ((time != 0) && !(sts & irqs));
+
+ sts = inl(DE4X5_STS);
+
+ return sts;
+}
+/*
+static int test_sym_link(struct device *dev, u32 msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ u32 gep, time;
+
+ / * Set link_fail_inhibit_timer * /
+ load_ms_timer(dev, msec);
+
+ / * Poll for timeout or SYM_LINK=0 * /
+ do {
+ time = inl(DE4X5_GPT) & GPT_VAL;
+ gep = inl(DE4X5_GEP) & (GEP_SLNK | GEP_LNP);
+ } while ((time > 0) && (gep & GEP_SLNK));
+
+ return gep;
+}
+*/
+/*
+** Send a packet onto the media and watch for send errors that indicate the
+** media is bad or unconnected.
+*/
+static int ping_media(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i, entry, linkBad;
+ s32 omr, t_3s = 4000;
+ char frame[64];
+
+ create_packet(dev, frame, sizeof(frame));
+
+ entry = lp->tx_new; /* Remember the ring position */
+ load_packet(dev, frame, TD_LS | TD_FS | sizeof(frame),NULL);
+
+ omr = inl(DE4X5_OMR);
+ outl(omr|OMR_ST, DE4X5_OMR);
+
+ lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_old = lp->tx_new;
+
+ /* Poll for completion of frame (interrupts are disabled for now)... */
+ for (linkBad=1,i=0;(i<t_3s) && linkBad;i++) {
+ if ((inl(DE4X5_SISR) & SISR_NCR) == 1) break;
+ if (lp->tx_ring[entry].status >= 0) linkBad=0;
+ dce_ms_delay(1);
+ }
+ outl(omr, DE4X5_OMR);
+
+ return ((linkBad || (lp->tx_ring[entry].status & TD_ES)) ? 1 : 0);
+}
+
+/*
+** Check the Auto Negotiation State. Return OK when a link pass interrupt
+** is received and the auto-negotiation status is NWAY OK.
+*/
+static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 sts, ans;
+
+ outl(irq_mask, DE4X5_IMR);
+
+ /* Set timeout limit */
+ load_ms_timer(dev, msec);
+
+ /* clear all pending interrupts */
+ sts = inl(DE4X5_STS);
+ outl(sts, DE4X5_STS);
+
+ /* Poll for interrupts */
+ do {
+ ans = inl(DE4X5_SISR) & SISR_ANS;
+ sts = inl(DE4X5_STS);
+ } while (!(sts & irqs) && (ans ^ ANS_NWOK) != 0);
+
+ return ((sts & STS_LNP) && ((ans ^ ANS_NWOK) == 0) ? STS_LNP : 0);
+}
+
+/*
+**
+*/
+static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ RESET_SIA;
+ outl(sigr, DE4X5_SIGR);
+ outl(strr, DE4X5_STRR);
+ outl(sicr, DE4X5_SICR);
+
+ return;
+}
+
+/*
+** Load the timer on the DC21041 and 21140. Max time is 13.42 secs.
+*/
+static void load_ms_timer(struct device *dev, u32 msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 i = 2048, j;
+
+ if (lp->chipset == DC21140) {
+ j = inl(DE4X5_OMR);
+ if ((j & OMR_TTM) && (j & OMR_PS)) { /* 10Mb/s MII */
+ i = 8192;
+ } else if ((~j & OMR_TTM) && (j & OMR_PS)) { /* 100Mb/s MII */
+ i = 819;
+ }
+ }
+
+ outl((s32)(msec * 10000)/i, DE4X5_GPT);
+
+ return;
+}
+
+/*
+** Create an Ethernet packet with an invalid CRC
+*/
+static void create_packet(struct device *dev, char *frame, int len)
+{
+ int i;
+ char *buf = frame;
+
+ for (i=0; i<ETH_ALEN; i++) { /* Use this source address */
+ *buf++ = dev->dev_addr[i];
+ }
+ for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */
+ *buf++ = dev->dev_addr[i];
+ }
+
+ *buf++ = 0; /* Packet length (2 bytes) */
+ *buf++ = 1;
+
+ return;
+}
+
+/*
+** Known delay in microseconds
+*/
+static void dce_us_delay(u32 usec)
+{
+ udelay(usec);
+
+ return;
+}
+
+/*
+** Known delay in milliseconds, in millisecond steps.
+*/
+static void dce_ms_delay(u32 msec)
+{
+ u_int i;
+
+ for (i=0; i<msec; i++) {
+ dce_us_delay(1000);
+ }
+
+ return;
+}
+
+
+/*
+** Look for a particular board name in the EISA configuration space
+*/
+static int EISA_signature(char *name, s32 eisa_id)
+{
+ u_int i;
+ const char *signatures[] = DE4X5_SIGNATURE;
+ char ManCode[DE4X5_STRLEN];
+ union {
+ s32 ID;
+ char Id[4];
+ } Eisa;
+ int status = 0;
+
+ *name = '\0';
+ Eisa.ID = inl(eisa_id);
+
+ ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
+ ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
+ ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
+ ManCode[3]=((Eisa.Id[2]&0x0f)+0x30);
+ ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
+ ManCode[5]='\0';
+
+ for (i=0;(*signatures[i] != '\0') && (*name == '\0');i++) {
+ if (strstr(ManCode, signatures[i]) != NULL) {
+ strcpy(name,ManCode);
+ status = 1;
+ }
+ }
+
+ return status; /* return the device name string */
+}
+
+/*
+** Look for a special sequence in the Ethernet station address PROM that
+** is common across all DIGITAL network adapter products.
+**
+** Search the Ethernet address ROM for the signature. Since the ROM address
+** counter can start at an arbitrary point, the search must include the entire
+** probe sequence length plus the (length_of_the_signature - 1).
+** Stop the search IMMEDIATELY after the signature is found so that the
+** PROM address counter is correctly positioned at the start of the
+** ethernet address for later read out.
+*/
+
+static int DevicePresent(u_long aprom_addr)
+{
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ } llsig;
+ char Sig[sizeof(u32) << 1];
+ } dev;
+ char data;
+ int i, j, tmp, status = 0;
+ short sigLength;
+ struct bus_type *lp = &bus;
+
+ dev.llsig.a = ETH_PROM_SIG;
+ dev.llsig.b = ETH_PROM_SIG;
+ sigLength = sizeof(u32) << 1;
+
+ if (lp->chipset == DC21040) {
+ for (i=0,j=0;(j<sigLength) && (i<PROBE_LENGTH+sigLength-1);i++) {
+ if (lp->bus == PCI) {
+ while ((tmp = inl(aprom_addr)) < 0);
+ data = (char)tmp;
+ } else {
+ data = inb(aprom_addr);
+ }
+ if (dev.Sig[j] == data) { /* track signature */
+ j++;
+ } else { /* lost signature; begin search again */
+ if (data == dev.Sig[0]) {
+ j=1;
+ } else {
+ j=0;
+ }
+ }
+ }
+
+ if (j!=sigLength) {
+ status = -ENODEV; /* search failed */
+ }
+
+ } else { /* use new srom */
+ short *p = (short *)&lp->srom;
+ for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
+ *p++ = srom_rd(aprom_addr, i);
+ }
+ }
+
+ return status;
+}
+
+static int get_hw_addr(struct device *dev)
+{
+ u_long iobase = dev->base_addr;
+ int i, k, tmp, status = 0;
+ u_short j,chksum;
+ struct bus_type *lp = &bus;
+
+ for (i=0,k=0,j=0;j<3;j++) {
+ k <<= 1 ;
+ if (k > 0xffff) k-=0xffff;
+
+ if (lp->bus == PCI) {
+ if (lp->chipset == DC21040) {
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ k += (u_char) tmp;
+ dev->dev_addr[i++] = (u_char) tmp;
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ k += (u_short) (tmp << 8);
+ dev->dev_addr[i++] = (u_char) tmp;
+ } else {
+ dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ }
+ } else {
+ k += (u_char) (tmp = inb(EISA_APROM));
+ dev->dev_addr[i++] = (u_char) tmp;
+ k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
+ dev->dev_addr[i++] = (u_char) tmp;
+ }
+
+ if (k > 0xffff) k-=0xffff;
+ }
+ if (k == 0xffff) k=0;
+
+ if (lp->bus == PCI) {
+ if (lp->chipset == DC21040) {
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ chksum = (u_char) tmp;
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ chksum |= (u_short) (tmp << 8);
+ if (k != chksum) status = -1;
+ }
+ } else {
+ chksum = (u_char) inb(EISA_APROM);
+ chksum |= (u_short) (inb(EISA_APROM) << 8);
+ if (k != chksum) status = -1;
+ }
+
+
+ return status;
+}
+
+/*
+** SROM Read
+*/
+static short srom_rd(u_long addr, u_char offset)
+{
+ sendto_srom(SROM_RD | SROM_SR, addr);
+
+ srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
+ srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
+ srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
+
+ return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
+}
+
+static void srom_latch(u_int command, u_long addr)
+{
+ sendto_srom(command, addr);
+ sendto_srom(command | DT_CLK, addr);
+ sendto_srom(command, addr);
+
+ return;
+}
+
+static void srom_command(u_int command, u_long addr)
+{
+ srom_latch(command, addr);
+ srom_latch(command, addr);
+ srom_latch((command & 0x0000ff00) | DT_CS, addr);
+
+ return;
+}
+
+static void srom_address(u_int command, u_long addr, u_char offset)
+{
+ int i;
+ char a;
+
+ a = (char)(offset << 2);
+ for (i=0; i<6; i++, a <<= 1) {
+ srom_latch(command | ((a < 0) ? DT_IN : 0), addr);
+ }
+ dce_us_delay(1);
+
+ i = (getfrom_srom(addr) >> 3) & 0x01;
+ if (i != 0) {
+ printk("Bad SROM address phase.....\n");
+/* printk(".");*/
+ }
+
+ return;
+}
+
+static short srom_data(u_int command, u_long addr)
+{
+ int i;
+ short word = 0;
+ s32 tmp;
+
+ for (i=0; i<16; i++) {
+ sendto_srom(command | DT_CLK, addr);
+ tmp = getfrom_srom(addr);
+ sendto_srom(command, addr);
+
+ word = (word << 1) | ((tmp >> 3) & 0x01);
+ }
+
+ sendto_srom(command & 0x0000ff00, addr);
+
+ return word;
+}
+
+/*
+static void srom_busy(u_int command, u_long addr)
+{
+ sendto_srom((command & 0x0000ff00) | DT_CS, addr);
+
+ while (!((getfrom_srom(addr) >> 3) & 0x01)) {
+ dce_ms_delay(1);
+ }
+
+ sendto_srom(command & 0x0000ff00, addr);
+
+ return;
+}
+*/
+
+static void sendto_srom(u_int command, u_long addr)
+{
+ outl(command, addr);
+ dce_us_delay(1);
+
+ return;
+}
+
+static int getfrom_srom(u_long addr)
+{
+ s32 tmp;
+
+ tmp = inl(addr);
+ dce_us_delay(1);
+
+ return tmp;
+}
+
+static char *build_setup_frame(struct device *dev, int mode)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int i;
+ char *pa = lp->setup_frame;
+
+ /* Initialise the setup frame */
+ if (mode == ALL) {
+ memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
+ }
+
+ if (lp->setup_f == HASH_PERF) {
+ for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
+ *(pa + i) = dev->dev_addr[i]; /* Host address */
+ if (i & 0x01) pa += 2;
+ }
+ *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80; /* B'cast address */
+ } else {
+ for (i=0; i<ETH_ALEN; i++) { /* Host address */
+ *(pa + (i&1)) = dev->dev_addr[i];
+ if (i & 0x01) pa += 4;
+ }
+ for (i=0; i<ETH_ALEN; i++) { /* Broadcast address */
+ *(pa + (i&1)) = (char) 0xff;
+ if (i & 0x01) pa += 4;
+ }
+ }
+
+ return pa; /* Points to the next entry */
+}
+
+static void enable_ast(struct device *dev, u32 time_out)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ lp->irq_mask |= IMR_TMM;
+ outl(lp->irq_mask, DE4X5_IMR);
+ load_ms_timer(dev, time_out);
+
+ return;
+}
+
+static void disable_ast(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ lp->irq_mask &= ~IMR_TMM;
+ outl(lp->irq_mask, DE4X5_IMR);
+ load_ms_timer(dev, 0);
+
+ return;
+}
+
+static void kick_tx(struct device *dev)
+{
+ struct sk_buff *skb;
+
+ if ((skb = alloc_skb(0, GFP_ATOMIC)) != NULL) {
+ skb->len= FAKE_FRAME_LEN;
+ skb->arp=1;
+ skb->dev=dev;
+ dev_queue_xmit(skb, dev, SOPRI_NORMAL);
+ }
+
+ return;
+}
+
+/*
+** Perform IOCTL call functions here. Some are privileged operations and the
+** effective uid is checked in those cases.
+*/
+static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_data;
+ u_long iobase = dev->base_addr;
+ int i, j, status = 0;
+ s32 omr;
+ union {
+ u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
+ u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
+ u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
+ } tmp;
+
+ switch(ioc->cmd) {
+ case DE4X5_GET_HWADDR: /* Get the hardware address */
+ ioc->len = ETH_ALEN;
+ status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
+ if (status)
+ break;
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[i] = dev->dev_addr[i];
+ }
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+
+ break;
+ case DE4X5_SET_HWADDR: /* Set the hardware address */
+ status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN);
+ if (status)
+ break;
+ status = -EPERM;
+ if (!suser())
+ break;
+ status = 0;
+ memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN);
+ for (i=0; i<ETH_ALEN; i++) {
+ dev->dev_addr[i] = tmp.addr[i];
+ }
+ build_setup_frame(dev, PHYS_ADDR_ONLY);
+ /* Set up the descriptor and give ownership to the card */
+ while (set_bit(0, (void *)&dev->tbusy) != 0);/* Wait for lock to free*/
+ if (lp->setup_f == HASH_PERF) {
+ load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
+ SETUP_FRAME_LEN, NULL);
+ } else {
+ load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
+ SETUP_FRAME_LEN, NULL);
+ }
+ lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
+ dev->tbusy = 0; /* Unlock the TX ring */
+
+ break;
+ case DE4X5_SET_PROM: /* Set Promiscuous Mode */
+ if (suser()) {
+ omr = inl(DE4X5_OMR);
+ omr |= OMR_PR;
+ outl(omr, DE4X5_OMR);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DE4X5_CLR_PROM: /* Clear Promiscuous Mode */
+ if (suser()) {
+ omr = inl(DE4X5_OMR);
+ omr &= ~OMR_PR;
+ outb(omr, DE4X5_OMR);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */
+ printk("%s: Boo!\n", dev->name);
+
+ break;
+ case DE4X5_GET_MCA: /* Get the multicast address table */
+ ioc->len = (HASH_TABLE_LEN >> 3);
+ status = verify_area(VERIFY_WRITE, ioc->data, ioc->len);
+ if (status)
+ break;
+ memcpy_tofs(ioc->data, lp->setup_frame, ioc->len);
+
+ break;
+ case DE4X5_SET_MCA: /* Set a multicast address */
+ if (suser()) {
+ if (ioc->len != HASH_TABLE_LEN) { /* MCA changes */
+ if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN * ioc->len))) {
+ memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
+ set_multicast_list(dev);
+ }
+ } else {
+ set_multicast_list(dev);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DE4X5_CLR_MCA: /* Clear all multicast addresses */
+ if (suser()) {
+ set_multicast_list(dev);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DE4X5_MCA_EN: /* Enable pass all multicast addressing */
+ if (suser()) {
+ omr = inl(DE4X5_OMR);
+ omr |= OMR_PM;
+ outl(omr, DE4X5_OMR);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DE4X5_GET_STATS: /* Get the driver statistics */
+ ioc->len = sizeof(lp->pktStats);
+ status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
+ if (status)
+ break;
+
+ cli();
+ memcpy_tofs(ioc->data, &lp->pktStats, ioc->len);
+ sti();
+
+ break;
+ case DE4X5_CLR_STATS: /* Zero out the driver statistics */
+ if (suser()) {
+ cli();
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ sti();
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DE4X5_GET_OMR: /* Get the OMR Register contents */
+ tmp.addr[0] = inl(DE4X5_OMR);
+ if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, 1))) {
+ memcpy_tofs(ioc->data, tmp.addr, 1);
+ }
+
+ break;
+ case DE4X5_SET_OMR: /* Set the OMR Register contents */
+ if (suser()) {
+ if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, 1))) {
+ memcpy_fromfs(tmp.addr, ioc->data, 1);
+ outl(tmp.addr[0], DE4X5_OMR);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DE4X5_GET_REG: /* Get the DE4X5 Registers */
+ j = 0;
+ tmp.lval[0] = inl(DE4X5_STS); j+=4;
+ tmp.lval[1] = inl(DE4X5_BMR); j+=4;
+ tmp.lval[2] = inl(DE4X5_IMR); j+=4;
+ tmp.lval[3] = inl(DE4X5_OMR); j+=4;
+ tmp.lval[4] = inl(DE4X5_SISR); j+=4;
+ tmp.lval[5] = inl(DE4X5_SICR); j+=4;
+ tmp.lval[6] = inl(DE4X5_STRR); j+=4;
+ tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
+ ioc->len = j;
+ if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+ break;
+
+#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
+
+ case DE4X5_DUMP:
+ j = 0;
+ tmp.addr[j++] = dev->irq;
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[j++] = dev->dev_addr[i];
+ }
+ tmp.addr[j++] = lp->rxRingSize;
+ tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
+ tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
+
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
+
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
+
+ for (i=0;i<lp->rxRingSize;i++){
+ tmp.lval[j>>2] = lp->rx_ring[i].status; j+=4;
+ }
+ for (i=0;i<lp->txRingSize;i++){
+ tmp.lval[j>>2] = lp->tx_ring[i].status; j+=4;
+ }
+
+ tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
+
+ tmp.addr[j++] = lp->txRingSize;
+ tmp.addr[j++] = dev->tbusy;
+
+ ioc->len = j;
+ if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+ default:
+ status = -EOPNOTSUPP;
+ }
+
+ return status;
+}
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device thisDE4X5 = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0x2000, 10, /* I/O address, IRQ */
+ 0, 0, 0, NULL, de4x5_probe };
+
+static int io=0x000b; /* EDIT THESE LINES FOR YOUR CONFIGURATION */
+static int irq=10; /* or use the insmod io= irq= options */
+
+int
+init_module(void)
+{
+ thisDE4X5.base_addr=io;
+ thisDE4X5.irq=irq;
+ if (register_netdev(&thisDE4X5) != 0)
+ return -EIO;
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ struct de4x5_private *lp = (struct de4x5_private *) thisDE4X5.priv;
+
+ if (lp) {
+ kfree_s(bus_to_virt(lp->rx_ring[0].buf), RX_BUFF_SZ * NUM_RX_DESC + ALIGN);
+ }
+ kfree_s(thisDE4X5.priv, sizeof(struct de4x5_private) + ALIGN);
+ thisDE4X5.priv = NULL;
+
+ release_region(thisDE4X5.base_addr, (lp->bus == PCI ?
+ DE4X5_PCI_TOTAL_SIZE :
+ DE4X5_EISA_TOTAL_SIZE));
+ unregister_netdev(&thisDE4X5);
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * kernel-compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de4x5.c"
+ *
+ * module-compile-command: "gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de4x5.c"
+ * End:
+ */
+
+
diff --git a/i386/i386at/gpl/linux/net/de4x5.h b/i386/i386at/gpl/linux/net/de4x5.h
new file mode 100644
index 00000000..b0ee43ea
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/de4x5.h
@@ -0,0 +1,645 @@
+/*
+ Copyright 1994 Digital Equipment Corporation.
+
+ This software may be used and distributed according to the terms of the
+ GNU Public License, incorporated herein by reference.
+
+ The author may be reached as davies@wanton.lkg.dec.com or Digital
+ Equipment Corporation, 550 King Street, Littleton MA 01460.
+
+ =========================================================================
+*/
+
+/*
+** DC21040 CSR<1..15> Register Address Map
+*/
+#define DE4X5_BMR iobase+(0x000 << lp->bus) /* Bus Mode Register */
+#define DE4X5_TPD iobase+(0x008 << lp->bus) /* Transmit Poll Demand Reg */
+#define DE4X5_RPD iobase+(0x010 << lp->bus) /* Receive Poll Demand Reg */
+#define DE4X5_RRBA iobase+(0x018 << lp->bus) /* RX Ring Base Address Reg */
+#define DE4X5_TRBA iobase+(0x020 << lp->bus) /* TX Ring Base Address Reg */
+#define DE4X5_STS iobase+(0x028 << lp->bus) /* Status Register */
+#define DE4X5_OMR iobase+(0x030 << lp->bus) /* Operation Mode Register */
+#define DE4X5_IMR iobase+(0x038 << lp->bus) /* Interrupt Mask Register */
+#define DE4X5_MFC iobase+(0x040 << lp->bus) /* Missed Frame Counter */
+#define DE4X5_APROM iobase+(0x048 << lp->bus) /* Ethernet Address PROM */
+#define DE4X5_BROM iobase+(0x048 << lp->bus) /* Boot ROM Register */
+#define DE4X5_SROM iobase+(0x048 << lp->bus) /* Serial ROM Register */
+#define DE4X5_DDR iobase+(0x050 << lp->bus) /* Data Diagnostic Register */
+#define DE4X5_FDR iobase+(0x058 << lp->bus) /* Full Duplex Register */
+#define DE4X5_GPT iobase+(0x058 << lp->bus) /* General Purpose Timer Reg.*/
+#define DE4X5_GEP iobase+(0x060 << lp->bus) /* General Purpose Register */
+#define DE4X5_SISR iobase+(0x060 << lp->bus) /* SIA Status Register */
+#define DE4X5_SICR iobase+(0x068 << lp->bus) /* SIA Connectivity Register */
+#define DE4X5_STRR iobase+(0x070 << lp->bus) /* SIA TX/RX Register */
+#define DE4X5_SIGR iobase+(0x078 << lp->bus) /* SIA General Register */
+
+/*
+** EISA Register Address Map
+*/
+#define EISA_ID iobase+0x0c80 /* EISA ID Registers */
+#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */
+#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */
+#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */
+#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */
+#define EISA_CR iobase+0x0c84 /* EISA Control Register */
+#define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */
+#define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */
+#define EISA_REG2 iobase+0x0c8a /* EISA Configuration Register 2 */
+#define EISA_REG3 iobase+0x0c8f /* EISA Configuration Register 3 */
+#define EISA_APROM iobase+0x0c90 /* Ethernet Address PROM */
+
+/*
+** PCI/EISA Configuration Registers Address Map
+*/
+#define PCI_CFID iobase+0x0008 /* PCI Configuration ID Register */
+#define PCI_CFCS iobase+0x000c /* PCI Command/Status Register */
+#define PCI_CFRV iobase+0x0018 /* PCI Revision Register */
+#define PCI_CFLT iobase+0x001c /* PCI Latency Timer Register */
+#define PCI_CBIO iobase+0x0028 /* PCI Base I/O Register */
+#define PCI_CBMA iobase+0x002c /* PCI Base Memory Address Register */
+#define PCI_CBER iobase+0x0030 /* PCI Expansion ROM Base Address Reg. */
+#define PCI_CFIT iobase+0x003c /* PCI Configuration Interrupt Register */
+#define PCI_CFDA iobase+0x0040 /* PCI Driver Area Register */
+
+/*
+** EISA Configuration Register 0 bit definitions
+*/
+#define ER0_BSW 0x80 /* EISA Bus Slave Width, 1: 32 bits */
+#define ER0_BMW 0x40 /* EISA Bus Master Width, 1: 32 bits */
+#define ER0_EPT 0x20 /* EISA PREEMPT Time, 0: 23 BCLKs */
+#define ER0_ISTS 0x10 /* Interrupt Status (X) */
+#define ER0_LI 0x08 /* Latch Interrupts */
+#define ER0_INTL 0x06 /* INTerrupt Level */
+#define ER0_INTT 0x01 /* INTerrupt Type, 0: Level, 1: Edge */
+
+/*
+** EISA Configuration Register 1 bit definitions
+*/
+#define ER1_IAM 0xe0 /* ISA Address Mode */
+#define ER1_IAE 0x10 /* ISA Addressing Enable */
+#define ER1_UPIN 0x0f /* User Pins */
+
+/*
+** EISA Configuration Register 2 bit definitions
+*/
+#define ER2_BRS 0xc0 /* Boot ROM Size */
+#define ER2_BRA 0x3c /* Boot ROM Address <16:13> */
+
+/*
+** EISA Configuration Register 3 bit definitions
+*/
+#define ER3_BWE 0x40 /* Burst Write Enable */
+#define ER3_BRE 0x04 /* Burst Read Enable */
+#define ER3_LSR 0x02 /* Local Software Reset */
+
+/*
+** PCI Configuration ID Register (PCI_CFID)
+*/
+#define CFID_DID 0xff00 /* Device ID */
+#define CFID_VID 0x00ff /* Vendor ID */
+#define DC21040_DID 0x0002 /* Unique Device ID # */
+#define DC21040_VID 0x1011 /* DC21040 Manufacturer */
+#define DC21041_DID 0x0014 /* Unique Device ID # */
+#define DC21041_VID 0x1011 /* DC21041 Manufacturer */
+#define DC21140_DID 0x0009 /* Unique Device ID # */
+#define DC21140_VID 0x1011 /* DC21140 Manufacturer */
+
+/*
+** Chipset defines
+*/
+#define DC21040 DC21040_DID
+#define DC21041 DC21041_DID
+#define DC21140 DC21140_DID
+
+#define is_DC21040 ((vendor == DC21040_VID) && (device == DC21040_DID))
+#define is_DC21041 ((vendor == DC21041_VID) && (device == DC21041_DID))
+#define is_DC21140 ((vendor == DC21140_VID) && (device == DC21140_DID))
+
+/*
+** PCI Configuration Command/Status Register (PCI_CFCS)
+*/
+#define CFCS_DPE 0x80000000 /* Detected Parity Error (S) */
+#define CFCS_SSE 0x40000000 /* Signal System Error (S) */
+#define CFCS_RMA 0x20000000 /* Receive Master Abort (S) */
+#define CFCS_RTA 0x10000000 /* Receive Target Abort (S) */
+#define CFCS_DST 0x06000000 /* DEVSEL Timing (S) */
+#define CFCS_DPR 0x01000000 /* Data Parity Report (S) */
+#define CFCS_FBB 0x00800000 /* Fast Back-To-Back (S) */
+#define CFCS_SLE 0x00000100 /* System Error Enable (C) */
+#define CFCS_PER 0x00000040 /* Parity Error Response (C) */
+#define CFCS_MO 0x00000004 /* Master Operation (C) */
+#define CFCS_MSA 0x00000002 /* Memory Space Access (C) */
+#define CFCS_IOSA 0x00000001 /* I/O Space Access (C) */
+
+/*
+** PCI Configuration Revision Register (PCI_CFRV)
+*/
+#define CFRV_BC 0xff000000 /* Base Class */
+#define CFRV_SC 0x00ff0000 /* Subclass */
+#define CFRV_SN 0x000000f0 /* Step Number */
+#define CFRV_RN 0x0000000f /* Revision Number */
+#define BASE_CLASS 0x02000000 /* Indicates Network Controller */
+#define SUB_CLASS 0x00000000 /* Indicates Ethernet Controller */
+#define STEP_NUMBER 0x00000020 /* Increments for future chips */
+#define REV_NUMBER 0x00000003 /* 0x00, 0x01, 0x02, 0x03: Rev in Step */
+#define CFRV_MASK 0xffff0000 /* Register mask */
+
+/*
+** PCI Configuration Latency Timer Register (PCI_CFLT)
+*/
+#define CFLT_BC 0x0000ff00 /* Latency Timer bits */
+
+/*
+** PCI Configuration Base I/O Address Register (PCI_CBIO)
+*/
+#define CBIO_MASK 0xffffff80 /* Base I/O Address Mask */
+#define CBIO_IOSI 0x00000001 /* I/O Space Indicator (RO, value is 1) */
+
+/*
+** PCI Configuration Expansion ROM Base Address Register (PCI_CBER)
+*/
+#define CBER_MASK 0xfffffc00 /* Expansion ROM Base Address Mask */
+#define CBER_ROME 0x00000001 /* ROM Enable */
+
+/*
+** PCI Configuration Driver Area Register (PCI_CFDA)
+*/
+#define CFDA_PSM 0x80000000 /* Power Saving Mode */
+
+/*
+** DC21040 Bus Mode Register (DE4X5_BMR)
+*/
+#define BMR_DBO 0x00100000 /* Descriptor Byte Ordering (Endian) */
+#define BMR_TAP 0x000e0000 /* Transmit Automatic Polling */
+#define BMR_DAS 0x00010000 /* Diagnostic Address Space */
+#define BMR_CAL 0x0000c000 /* Cache Alignment */
+#define BMR_PBL 0x00003f00 /* Programmable Burst Length */
+#define BMR_BLE 0x00000080 /* Big/Little Endian */
+#define BMR_DSL 0x0000007c /* Descriptor Skip Length */
+#define BMR_BAR 0x00000002 /* Bus ARbitration */
+#define BMR_SWR 0x00000001 /* Software Reset */
+
+#define TAP_NOPOLL 0x00000000 /* No automatic polling */
+#define TAP_200US 0x00020000 /* TX automatic polling every 200us */
+#define TAP_800US 0x00040000 /* TX automatic polling every 800us */
+#define TAP_1_6MS 0x00060000 /* TX automatic polling every 1.6ms */
+#define TAP_12_8US 0x00080000 /* TX automatic polling every 12.8us */
+#define TAP_25_6US 0x000a0000 /* TX automatic polling every 25.6us */
+#define TAP_51_2US 0x000c0000 /* TX automatic polling every 51.2us */
+#define TAP_102_4US 0x000e0000 /* TX automatic polling every 102.4us */
+
+#define CAL_NOUSE 0x00000000 /* Not used */
+#define CAL_8LONG 0x00004000 /* 8-longword alignment */
+#define CAL_16LONG 0x00008000 /* 16-longword alignment */
+#define CAL_32LONG 0x0000c000 /* 32-longword alignment */
+
+#define PBL_0 0x00000000 /* DMA burst length = amount in RX FIFO */
+#define PBL_1 0x00000100 /* 1 longword DMA burst length */
+#define PBL_2 0x00000200 /* 2 longwords DMA burst length */
+#define PBL_4 0x00000400 /* 4 longwords DMA burst length */
+#define PBL_8 0x00000800 /* 8 longwords DMA burst length */
+#define PBL_16 0x00001000 /* 16 longwords DMA burst length */
+#define PBL_32 0x00002000 /* 32 longwords DMA burst length */
+
+#define DSL_0 0x00000000 /* 0 longword / descriptor */
+#define DSL_1 0x00000004 /* 1 longword / descriptor */
+#define DSL_2 0x00000008 /* 2 longwords / descriptor */
+#define DSL_4 0x00000010 /* 4 longwords / descriptor */
+#define DSL_8 0x00000020 /* 8 longwords / descriptor */
+#define DSL_16 0x00000040 /* 16 longwords / descriptor */
+#define DSL_32 0x00000080 /* 32 longwords / descriptor */
+
+/*
+** DC21040 Transmit Poll Demand Register (DE4X5_TPD)
+*/
+#define TPD 0x00000001 /* Transmit Poll Demand */
+
+/*
+** DC21040 Receive Poll Demand Register (DE4X5_RPD)
+*/
+#define RPD 0x00000001 /* Receive Poll Demand */
+
+/*
+** DC21040 Receive Ring Base Address Register (DE4X5_RRBA)
+*/
+#define RRBA 0xfffffffc /* RX Descriptor List Start Address */
+
+/*
+** DC21040 Transmit Ring Base Address Register (DE4X5_TRBA)
+*/
+#define TRBA 0xfffffffc /* TX Descriptor List Start Address */
+
+/*
+** DC21040 Status Register (DE4X5_STS)
+*/
+#define STS_BE 0x03800000 /* Bus Error Bits */
+#define STS_TS 0x00700000 /* Transmit Process State */
+#define STS_RS 0x000e0000 /* Receive Process State */
+#define STS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define STS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define STS_ER 0x00004000 /* Early Receive */
+#define STS_SE 0x00002000 /* System Error */
+#define STS_LNF 0x00001000 /* Link Fail */
+#define STS_FD 0x00000800 /* Full-Duplex Short Frame Received */
+#define STS_TM 0x00000800 /* Timer Expired (DC21041) */
+#define STS_AT 0x00000400 /* AUI/TP Pin */
+#define STS_RWT 0x00000200 /* Receive Watchdog Time-Out */
+#define STS_RPS 0x00000100 /* Receive Process Stopped */
+#define STS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define STS_RI 0x00000040 /* Receive Interrupt */
+#define STS_UNF 0x00000020 /* Transmit Underflow */
+#define STS_LNP 0x00000010 /* Link Pass */
+#define STS_TJT 0x00000008 /* Transmit Jabber Time-Out */
+#define STS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define STS_TPS 0x00000002 /* Transmit Process Stopped */
+#define STS_TI 0x00000001 /* Transmit Interrupt */
+
+#define EB_PAR 0x00000000 /* Parity Error */
+#define EB_MA 0x00800000 /* Master Abort */
+#define EB_TA 0x01000000 /* Target Abort */
+#define EB_RES0 0x01800000 /* Reserved */
+#define EB_RES1 0x02000000 /* Reserved */
+
+#define TS_STOP 0x00000000 /* Stopped */
+#define TS_FTD 0x00100000 /* Fetch Transmit Descriptor */
+#define TS_WEOT 0x00200000 /* Wait for End Of Transmission */
+#define TS_QDAT 0x00300000 /* Queue skb data into TX FIFO */
+#define TS_RES 0x00400000 /* Reserved */
+#define TS_SPKT 0x00500000 /* Setup Packet */
+#define TS_SUSP 0x00600000 /* Suspended */
+#define TS_CLTD 0x00700000 /* Close Transmit Descriptor */
+
+#define RS_STOP 0x00000000 /* Stopped */
+#define RS_FRD 0x00020000 /* Fetch Receive Descriptor */
+#define RS_CEOR 0x00040000 /* Check for End of Receive Packet */
+#define RS_WFRP 0x00060000 /* Wait for Receive Packet */
+#define RS_SUSP 0x00080000 /* Suspended */
+#define RS_CLRD 0x000a0000 /* Close Receive Descriptor */
+#define RS_FLUSH 0x000c0000 /* Flush RX FIFO */
+#define RS_QRFS 0x000e0000 /* Queue RX FIFO into RX Skb */
+
+#define INT_CANCEL 0x0001ffff /* For zeroing all interrupt sources */
+
+/*
+** DC21040 Operation Mode Register (DE4X5_OMR)
+*/
+#define OMR_SDP 0x02000000 /* SD Polarity - MUST BE ASSERTED */
+#define OMR_SCR 0x01000000 /* Scrambler Mode */
+#define OMR_PCS 0x00800000 /* PCS Function */
+#define OMR_TTM 0x00400000 /* Transmit Threshold Mode */
+#define OMR_SF 0x00200000 /* Store and Forward */
+#define OMR_HBD 0x00080000 /* HeartBeat Disable */
+#define OMR_PS 0x00040000 /* Port Select */
+#define OMR_CA 0x00020000 /* Capture Effect Enable */
+#define OMR_BP 0x00010000 /* Back Pressure */
+#define OMR_TR 0x0000c000 /* Threshold Control Bits */
+#define OMR_ST 0x00002000 /* Start/Stop Transmission Command */
+#define OMR_FC 0x00001000 /* Force Collision Mode */
+#define OMR_OM 0x00000c00 /* Operating Mode */
+#define OMR_FD 0x00000200 /* Full Duplex Mode */
+#define OMR_FKD 0x00000100 /* Flaky Oscillator Disable */
+#define OMR_PM 0x00000080 /* Pass All Multicast */
+#define OMR_PR 0x00000040 /* Promiscuous Mode */
+#define OMR_SB 0x00000020 /* Start/Stop Backoff Counter */
+#define OMR_IF 0x00000010 /* Inverse Filtering */
+#define OMR_PB 0x00000008 /* Pass Bad Frames */
+#define OMR_HO 0x00000004 /* Hash Only Filtering Mode */
+#define OMR_SR 0x00000002 /* Start/Stop Receive */
+#define OMR_HP 0x00000001 /* Hash/Perfect Receive Filtering Mode */
+
+#define TR_72 0x00000000 /* Threshold set to 72 bytes */
+#define TR_96 0x00004000 /* Threshold set to 96 bytes */
+#define TR_128 0x00008000 /* Threshold set to 128 bytes */
+#define TR_160 0x0000c000 /* Threshold set to 160 bytes */
+
+/*
+** DC21040 Interrupt Mask Register (DE4X5_IMR)
+*/
+#define IMR_NIM 0x00010000 /* Normal Interrupt Summary Mask */
+#define IMR_AIM 0x00008000 /* Abnormal Interrupt Summary Mask */
+#define IMR_ERM 0x00004000 /* Early Receive Mask */
+#define IMR_SEM 0x00002000 /* System Error Mask */
+#define IMR_LFM 0x00001000 /* Link Fail Mask */
+#define IMR_FDM 0x00000800 /* Full-Duplex (Short Frame) Mask */
+#define IMR_TMM 0x00000800 /* Timer Expired Mask (DC21041) */
+#define IMR_ATM 0x00000400 /* AUI/TP Switch Mask */
+#define IMR_RWM 0x00000200 /* Receive Watchdog Time-Out Mask */
+#define IMR_RSM 0x00000100 /* Receive Stopped Mask */
+#define IMR_RUM 0x00000080 /* Receive Buffer Unavailable Mask */
+#define IMR_RIM 0x00000040 /* Receive Interrupt Mask */
+#define IMR_UNM 0x00000020 /* Underflow Interrupt Mask */
+#define IMR_LPM 0x00000010 /* Link Pass */
+#define IMR_TJM 0x00000008 /* Transmit Time-Out Jabber Mask */
+#define IMR_TUM 0x00000004 /* Transmit Buffer Unavailable Mask */
+#define IMR_TSM 0x00000002 /* Transmission Stopped Mask */
+#define IMR_TIM 0x00000001 /* Transmit Interrupt Mask */
+
+/*
+** DC21040 Missed Frame Counter (DE4X5_MFC)
+*/
+#define MFC_OVFL 0x00010000 /* Counter Overflow Bit */
+#define MFC_CNTR 0x0000ffff /* Counter Bits */
+
+/*
+** DC21040 Ethernet Address PROM (DE4X5_APROM)
+*/
+#define APROM_DN 0x80000000 /* Data Not Valid */
+#define APROM_DT 0x000000ff /* Address Byte */
+
+/*
+** DC21041 Boot/Ethernet Address ROM (DE4X5_BROM)
+*/
+#define BROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
+#define BROM_RD 0x00004000 /* Read from Boot ROM */
+#define BROM_WR 0x00002000 /* Write to Boot ROM */
+#define BROM_BR 0x00001000 /* Select Boot ROM when set */
+#define BROM_SR 0x00000800 /* Select Serial ROM when set */
+#define BROM_REG 0x00000400 /* External Register Select */
+#define BROM_DT 0x000000ff /* Data Byte */
+
+/*
+** DC21041 Serial/Ethernet Address ROM (DE4X5_SROM)
+*/
+#define SROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
+#define SROM_RD 0x00004000 /* Read from Boot ROM */
+#define SROM_WR 0x00002000 /* Write to Boot ROM */
+#define SROM_BR 0x00001000 /* Select Boot ROM when set */
+#define SROM_SR 0x00000800 /* Select Serial ROM when set */
+#define SROM_REG 0x00000400 /* External Register Select */
+#define SROM_DT 0x000000ff /* Data Byte */
+
+#define DT_OUT 0x00000008 /* Serial Data Out */
+#define DT_IN 0x00000004 /* Serial Data In */
+#define DT_CLK 0x00000002 /* Serial ROM Clock */
+#define DT_CS 0x00000001 /* Serial ROM Chip Select */
+
+/*
+** DC21040 Full Duplex Register (DE4X5_FDR)
+*/
+#define FDR_FDACV 0x0000ffff /* Full Duplex Auto Configuration Value */
+
+/*
+** DC21041 General Purpose Timer Register (DE4X5_GPT)
+*/
+#define GPT_CON 0x00010000 /* One shot: 0, Continuous: 1 */
+#define GPT_VAL 0x0000ffff /* Timer Value */
+
+/*
+** DC21140 General Purpose Register (DE4X5_GEP) (hardware dependent bits)
+*/
+/* Valid ONLY for DE500 hardware */
+#define GEP_LNP 0x00000080 /* Link Pass (input) */
+#define GEP_SLNK 0x00000040 /* SYM LINK (input) */
+#define GEP_SDET 0x00000020 /* Signal Detect (input) */
+#define GEP_FDXD 0x00000008 /* Full Duplex Disable (output) */
+#define GEP_PHYL 0x00000004 /* PHY Loopback (output) */
+#define GEP_FLED 0x00000002 /* Force Activity LED on (output) */
+#define GEP_MODE 0x00000001 /* 0: 10Mb/s, 1: 100Mb/s */
+#define GEP_INIT 0x0000010f /* Setup inputs (0) and outputs (1) */
+
+
+/*
+** DC21040 SIA Status Register (DE4X5_SISR)
+*/
+#define SISR_LPC 0xffff0000 /* Link Partner's Code Word */
+#define SISR_LPN 0x00008000 /* Link Partner Negotiable */
+#define SISR_ANS 0x00007000 /* Auto Negotiation Arbitration State */
+#define SISR_NSN 0x00000800 /* Non Stable NLPs Detected */
+#define SISR_ANR_FDS 0x00000400 /* Auto Negotiate Restart/Full Duplex Sel.*/
+#define SISR_NRA 0x00000200 /* Non Selected Port Receive Activity */
+#define SISR_SRA 0x00000100 /* Selected Port Receive Activity */
+#define SISR_DAO 0x00000080 /* PLL All One */
+#define SISR_DAZ 0x00000040 /* PLL All Zero */
+#define SISR_DSP 0x00000020 /* PLL Self-Test Pass */
+#define SISR_DSD 0x00000010 /* PLL Self-Test Done */
+#define SISR_APS 0x00000008 /* Auto Polarity State */
+#define SISR_LKF 0x00000004 /* Link Fail Status */
+#define SISR_NCR 0x00000002 /* Network Connection Error */
+#define SISR_PAUI 0x00000001 /* AUI_TP Indication */
+#define SIA_RESET 0x00000000 /* SIA Reset */
+
+#define ANS_NDIS 0x00000000 /* Nway disable */
+#define ANS_TDIS 0x00001000 /* Transmit Disable */
+#define ANS_ADET 0x00002000 /* Ability Detect */
+#define ANS_ACK 0x00003000 /* Acknowledge */
+#define ANS_CACK 0x00004000 /* Complete Acknowledge */
+#define ANS_NWOK 0x00005000 /* Nway OK - FLP Link Good */
+#define ANS_LCHK 0x00006000 /* Link Check */
+
+/*
+** DC21040 SIA Connectivity Register (DE4X5_SICR)
+*/
+#define SICR_SDM 0xffff0000 /* SIA Diagnostics Mode */
+#define SICR_OE57 0x00008000 /* Output Enable 5 6 7 */
+#define SICR_OE24 0x00004000 /* Output Enable 2 4 */
+#define SICR_OE13 0x00002000 /* Output Enable 1 3 */
+#define SICR_IE 0x00001000 /* Input Enable */
+#define SICR_EXT 0x00000000 /* SIA MUX Select External SIA Mode */
+#define SICR_D_SIA 0x00000400 /* SIA MUX Select Diagnostics - SIA Sigs */
+#define SICR_DPLL 0x00000800 /* SIA MUX Select Diagnostics - DPLL Sigs*/
+#define SICR_APLL 0x00000a00 /* SIA MUX Select Diagnostics - DPLL Sigs*/
+#define SICR_D_RxM 0x00000c00 /* SIA MUX Select Diagnostics - RxM Sigs */
+#define SICR_M_RxM 0x00000d00 /* SIA MUX Select Diagnostics - RxM Sigs */
+#define SICR_LNKT 0x00000e00 /* SIA MUX Select Diagnostics - Link Test*/
+#define SICR_SEL 0x00000f00 /* SIA MUX Select AUI or TP with LEDs */
+#define SICR_ASE 0x00000080 /* APLL Start Enable*/
+#define SICR_SIM 0x00000040 /* Serial Interface Input Multiplexer */
+#define SICR_ENI 0x00000020 /* Encoder Input Multiplexer */
+#define SICR_EDP 0x00000010 /* SIA PLL External Input Enable */
+#define SICR_AUI 0x00000008 /* 10Base-T or AUI */
+#define SICR_CAC 0x00000004 /* CSR Auto Configuration */
+#define SICR_PS 0x00000002 /* Pin AUI/TP Selection */
+#define SICR_SRL 0x00000001 /* SIA Reset */
+#define SICR_RESET 0xffff0000 /* Reset value for SICR */
+
+/*
+** DC21040 SIA Transmit and Receive Register (DE4X5_STRR)
+*/
+#define STRR_TAS 0x00008000 /* 10Base-T/AUI Autosensing Enable */
+#define STRR_SPP 0x00004000 /* Set Polarity Plus */
+#define STRR_APE 0x00002000 /* Auto Polarity Enable */
+#define STRR_LTE 0x00001000 /* Link Test Enable */
+#define STRR_SQE 0x00000800 /* Signal Quality Enable */
+#define STRR_CLD 0x00000400 /* Collision Detect Enable */
+#define STRR_CSQ 0x00000200 /* Collision Squelch Enable */
+#define STRR_RSQ 0x00000100 /* Receive Squelch Enable */
+#define STRR_ANE 0x00000080 /* Auto Negotiate Enable */
+#define STRR_HDE 0x00000040 /* Half Duplex Enable */
+#define STRR_CPEN 0x00000030 /* Compensation Enable */
+#define STRR_LSE 0x00000008 /* Link Pulse Send Enable */
+#define STRR_DREN 0x00000004 /* Driver Enable */
+#define STRR_LBK 0x00000002 /* Loopback Enable */
+#define STRR_ECEN 0x00000001 /* Encoder Enable */
+#define STRR_RESET 0xffffffff /* Reset value for STRR */
+
+/*
+** DC21040 SIA General Register (DE4X5_SIGR)
+*/
+#define SIGR_LV2 0x00008000 /* General Purpose LED2 value */
+#define SIGR_LE2 0x00004000 /* General Purpose LED2 enable */
+#define SIGR_FRL 0x00002000 /* Force Receiver Low */
+#define SIGR_DPST 0x00001000 /* PLL Self Test Start */
+#define SIGR_LSD 0x00000800 /* LED Stretch Disable */
+#define SIGR_FLF 0x00000400 /* Force Link Fail */
+#define SIGR_FUSQ 0x00000200 /* Force Unsquelch */
+#define SIGR_TSCK 0x00000100 /* Test Clock */
+#define SIGR_LV1 0x00000080 /* General Purpose LED1 value */
+#define SIGR_LE1 0x00000040 /* General Purpose LED1 enable */
+#define SIGR_RWR 0x00000020 /* Receive Watchdog Release */
+#define SIGR_RWD 0x00000010 /* Receive Watchdog Disable */
+#define SIGR_ABM 0x00000008 /* BNC: 0, AUI:1 */
+#define SIGR_JCK 0x00000004 /* Jabber Clock */
+#define SIGR_HUJ 0x00000002 /* Host Unjab */
+#define SIGR_JBD 0x00000001 /* Jabber Disable */
+#define SIGR_RESET 0xffff0000 /* Reset value for SIGR */
+
+/*
+** Receive Descriptor Bit Summary
+*/
+#define R_OWN 0x80000000 /* Own Bit */
+#define RD_FL 0x7fff0000 /* Frame Length */
+#define RD_ES 0x00008000 /* Error Summary */
+#define RD_LE 0x00004000 /* Length Error */
+#define RD_DT 0x00003000 /* Data Type */
+#define RD_RF 0x00000800 /* Runt Frame */
+#define RD_MF 0x00000400 /* Multicast Frame */
+#define RD_FS 0x00000200 /* First Descriptor */
+#define RD_LS 0x00000100 /* Last Descriptor */
+#define RD_TL 0x00000080 /* Frame Too Long */
+#define RD_CS 0x00000040 /* Collision Seen */
+#define RD_FT 0x00000020 /* Frame Type */
+#define RD_RJ 0x00000010 /* Receive Watchdog */
+#define RD_DB 0x00000004 /* Dribbling Bit */
+#define RD_CE 0x00000002 /* CRC Error */
+#define RD_OF 0x00000001 /* Overflow */
+
+#define RD_RER 0x02000000 /* Receive End Of Ring */
+#define RD_RCH 0x01000000 /* Second Address Chained */
+#define RD_RBS2 0x003ff800 /* Buffer 2 Size */
+#define RD_RBS1 0x000007ff /* Buffer 1 Size */
+
+/*
+** Transmit Descriptor Bit Summary
+*/
+#define T_OWN 0x80000000 /* Own Bit */
+#define TD_ES 0x00008000 /* Error Summary */
+#define TD_TO 0x00004000 /* Transmit Jabber Time-Out */
+#define TD_LO 0x00000800 /* Loss Of Carrier */
+#define TD_NC 0x00000400 /* No Carrier */
+#define TD_LC 0x00000200 /* Late Collision */
+#define TD_EC 0x00000100 /* Excessive Collisions */
+#define TD_HF 0x00000080 /* Heartbeat Fail */
+#define TD_CC 0x00000078 /* Collision Counter */
+#define TD_LF 0x00000004 /* Link Fail */
+#define TD_UF 0x00000002 /* Underflow Error */
+#define TD_DE 0x00000001 /* Deferred */
+
+#define TD_IC 0x80000000 /* Interrupt On Completion */
+#define TD_LS 0x40000000 /* Last Segment */
+#define TD_FS 0x20000000 /* First Segment */
+#define TD_FT1 0x10000000 /* Filtering Type */
+#define TD_SET 0x08000000 /* Setup Packet */
+#define TD_AC 0x04000000 /* Add CRC Disable */
+#define TD_TER 0x02000000 /* Transmit End Of Ring */
+#define TD_TCH 0x01000000 /* Second Address Chained */
+#define TD_DPD 0x00800000 /* Disabled Padding */
+#define TD_FT0 0x00400000 /* Filtering Type */
+#define TD_RBS2 0x003ff800 /* Buffer 2 Size */
+#define TD_RBS1 0x000007ff /* Buffer 1 Size */
+
+#define PERFECT_F 0x00000000
+#define HASH_F TD_FT0
+#define INVERSE_F TD_FT1
+#define HASH_O_F TD_FT1| TD_F0
+
+/*
+** Media / mode state machine definitions
+*/
+#define NC 0x0000 /* No Connection */
+#define TP 0x0001 /* 10Base-T */
+#define TP_NW 0x0002 /* 10Base-T with Nway */
+#define BNC 0x0004 /* Thinwire */
+#define AUI 0x0008 /* Thickwire */
+#define BNC_AUI 0x0010 /* BNC/AUI on DC21040 indistinguishable */
+#define ANS 0x0020 /* Intermediate AutoNegotiation State */
+#define EXT_SIA 0x0400 /* external SIA (as on DEC MULTIA) */
+
+#define _10Mb 0x0040 /* 10Mb/s Ethernet */
+#define _100Mb 0x0080 /* 100Mb/s Ethernet */
+#define SYM_WAIT 0x0100 /* Wait for SYM_LINK */
+#define INIT 0x0200 /* Initial state */
+
+#define AUTO 0x4000 /* Auto sense the media or speed */
+
+/*
+** Miscellaneous
+*/
+#define PCI 0
+#define EISA 1
+
+#define HASH_TABLE_LEN 512 /* Bits */
+#define HASH_BITS 0x01ff /* 9 LS bits */
+
+#define SETUP_FRAME_LEN 192 /* Bytes */
+#define IMPERF_PA_OFFSET 156 /* Bytes */
+
+#define POLL_DEMAND 1
+
+#define LOST_MEDIA_THRESHOLD 3
+
+#define MASK_INTERRUPTS 1
+#define UNMASK_INTERRUPTS 0
+
+#define DE4X5_STRLEN 8
+
+/*
+** Address Filtering Modes
+*/
+#define PERFECT 0 /* 16 perfect physical addresses */
+#define HASH_PERF 1 /* 1 perfect, 512 multicast addresses */
+#define PERFECT_REJ 2 /* Reject 16 perfect physical addresses */
+#define ALL_HASH 3 /* Hashes all physical & multicast addrs */
+
+#define ALL 0 /* Clear out all the setup frame */
+#define PHYS_ADDR_ONLY 1 /* Update the physical address only */
+
+/*
+** Booleans
+*/
+#define NO 0
+#define FALSE 0
+
+#define YES !0
+#define TRUE !0
+
+/*
+** Include the IOCTL stuff
+*/
+#include <linux/sockios.h>
+
+#define DE4X5IOCTL SIOCDEVPRIVATE
+
+struct de4x5_ioctl {
+ unsigned short cmd; /* Command to run */
+ unsigned short len; /* Length of the data buffer */
+ unsigned char *data; /* Pointer to the data buffer */
+};
+
+/*
+** Recognised commands for the driver
+*/
+#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */
+#define DE4X5_SET_HWADDR 0x02 /* Get the hardware address */
+#define DE4X5_SET_PROM 0x03 /* Set Promiscuous Mode */
+#define DE4X5_CLR_PROM 0x04 /* Clear Promiscuous Mode */
+#define DE4X5_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
+#define DE4X5_GET_MCA 0x06 /* Get a multicast address */
+#define DE4X5_SET_MCA 0x07 /* Set a multicast address */
+#define DE4X5_CLR_MCA 0x08 /* Clear a multicast address */
+#define DE4X5_MCA_EN 0x09 /* Enable a multicast address group */
+#define DE4X5_GET_STATS 0x0a /* Get the driver statistics */
+#define DE4X5_CLR_STATS 0x0b /* Zero out the driver statistics */
+#define DE4X5_GET_OMR 0x0c /* Get the OMR Register contents */
+#define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */
+#define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */
diff --git a/i386/i386at/gpl/linux/net/de600.c b/i386/i386at/gpl/linux/net/de600.c
new file mode 100644
index 00000000..256759df
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/de600.c
@@ -0,0 +1,853 @@
+static const char *version =
+ "de600.c: $Revision: 1.1.1.1 $, Bjorn Ekwall (bj0rn@blox.se)\n";
+/*
+ * de600.c
+ *
+ * Linux driver for the D-Link DE-600 Ethernet pocket adapter.
+ *
+ * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall
+ * The Author may be reached as bj0rn@blox.se
+ *
+ * Based on adapter information gathered from DE600.ASM by D-Link Inc.,
+ * as included on disk C in the v.2.11 of PC/TCP from FTP Software.
+ * For DE600.asm:
+ * Portions (C) Copyright 1990 D-Link, Inc.
+ * Copyright, 1988-1992, Russell Nelson, Crynwr Software
+ *
+ * Adapted to the sample network driver core for linux,
+ * written by: Donald Becker <becker@super.org>
+ * C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
+ *
+ * compile-command:
+ * "gcc -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer \
+ * -m486 -c de600.c
+ *
+ **************************************************************/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ **************************************************************/
+/* Add another "; SLOW_DOWN_IO" here if your adapter won't work OK: */
+#define DE600_SLOW_DOWN SLOW_DOWN_IO; SLOW_DOWN_IO; SLOW_DOWN_IO
+
+ /*
+ * If you still have trouble reading/writing to the adapter,
+ * modify the following "#define": (see <asm/io.h> for more info)
+#define REALLY_SLOW_IO
+ */
+#define SLOW_IO_BY_JUMPING /* Looks "better" than dummy write to port 0x80 :-) */
+
+/*
+ * If you want to enable automatic continuous checking for the DE600,
+ * keep this #define enabled.
+ * It doesn't cost much per packet, so I think it is worth it!
+ * If you disagree, comment away the #define, and live with it...
+ *
+ */
+#define CHECK_LOST_DE600
+
+/*
+ * Enable this #define if you want the adapter to do a "ifconfig down" on
+ * itself when we have detected that something is possibly wrong with it.
+ * The default behaviour is to retry with "adapter_init()" until success.
+ * This should be used for debugging purposes only.
+ * (Depends on the CHECK_LOST_DE600 above)
+ *
+ */
+#define SHUTDOWN_WHEN_LOST
+
+/*
+ * See comment at "de600_rspace()"!
+ * This is an *ugly* hack, but for now it achieves its goal of
+ * faking a TCP flow-control that will not flood the poor DE600.
+ *
+ * Tricks TCP to announce a small max window (max 2 fast packets please :-)
+ *
+ * Comment away at your own risk!
+ *
+ * Update: Use the more general per-device maxwindow parameter instead.
+ */
+#undef FAKE_SMALL_MAX
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifdef DE600_DEBUG
+#define PRINTK(x) if (de600_debug >= 2) printk x
+#else
+#define DE600_DEBUG 0
+#define PRINTK(x) /**/
+#endif
+unsigned int de600_debug = DE600_DEBUG;
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include <linux/in.h>
+#include <linux/ptrace.h>
+#include <asm/system.h>
+#include <linux/errno.h>
+
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#ifdef FAKE_SMALL_MAX
+static unsigned long de600_rspace(struct sock *sk);
+#include <net/sock.h>
+#endif
+
+#define netstats enet_statistics
+typedef unsigned char byte;
+
+/**************************************************
+ * *
+ * Definition of D-Link Ethernet Pocket adapter *
+ * *
+ **************************************************/
+/*
+ * D-Link Ethernet pocket adapter ports
+ */
+/*
+ * OK, so I'm cheating, but there are an awful lot of
+ * reads and writes in order to get anything in and out
+ * of the DE-600 with 4 bits at a time in the parallel port,
+ * so every saved instruction really helps :-)
+ *
+ * That is, I don't care what the device struct says
+ * but hope that Space.c will keep the rest of the drivers happy.
+ */
+#ifndef DE600_IO
+#define DE600_IO 0x378
+#endif
+
+#define DATA_PORT (DE600_IO)
+#define STATUS_PORT (DE600_IO + 1)
+#define COMMAND_PORT (DE600_IO + 2)
+
+#ifndef DE600_IRQ
+#define DE600_IRQ 7
+#endif
+/*
+ * It really should look like this, and autoprobing as well...
+ *
+#define DATA_PORT (dev->base_addr + 0)
+#define STATUS_PORT (dev->base_addr + 1)
+#define COMMAND_PORT (dev->base_addr + 2)
+#define DE600_IRQ dev->irq
+ */
+
+/*
+ * D-Link COMMAND_PORT commands
+ */
+#define SELECT_NIC 0x04 /* select Network Interface Card */
+#define SELECT_PRN 0x1c /* select Printer */
+#define NML_PRN 0xec /* normal Printer situation */
+#define IRQEN 0x10 /* enable IRQ line */
+
+/*
+ * D-Link STATUS_PORT
+ */
+#define RX_BUSY 0x80
+#define RX_GOOD 0x40
+#define TX_FAILED16 0x10
+#define TX_BUSY 0x08
+
+/*
+ * D-Link DATA_PORT commands
+ * command in low 4 bits
+ * data in high 4 bits
+ * select current data nibble with HI_NIBBLE bit
+ */
+#define WRITE_DATA 0x00 /* write memory */
+#define READ_DATA 0x01 /* read memory */
+#define STATUS 0x02 /* read status register */
+#define COMMAND 0x03 /* write command register (see COMMAND below) */
+#define NULL_COMMAND 0x04 /* null command */
+#define RX_LEN 0x05 /* read received packet length */
+#define TX_ADDR 0x06 /* set adapter transmit memory address */
+#define RW_ADDR 0x07 /* set adapter read/write memory address */
+#define HI_NIBBLE 0x08 /* read/write the high nibble of data,
+ or-ed with rest of command */
+
+/*
+ * command register, accessed through DATA_PORT with low bits = COMMAND
+ */
+#define RX_ALL 0x01 /* PROMISCUOUS */
+#define RX_BP 0x02 /* default: BROADCAST & PHYSICAL ADDRESS */
+#define RX_MBP 0x03 /* MULTICAST, BROADCAST & PHYSICAL ADDRESS */
+
+#define TX_ENABLE 0x04 /* bit 2 */
+#define RX_ENABLE 0x08 /* bit 3 */
+
+#define RESET 0x80 /* set bit 7 high */
+#define STOP_RESET 0x00 /* set bit 7 low */
+
+/*
+ * data to command register
+ * (high 4 bits in write to DATA_PORT)
+ */
+#define RX_PAGE2_SELECT 0x10 /* bit 4, only 2 pages to select */
+#define RX_BASE_PAGE 0x20 /* bit 5, always set when specifying RX_ADDR */
+#define FLIP_IRQ 0x40 /* bit 6 */
+
+/*
+ * D-Link adapter internal memory:
+ *
+ * 0-2K 1:st transmit page (send from pointer up to 2K)
+ * 2-4K 2:nd transmit page (send from pointer up to 4K)
+ *
+ * 4-6K 1:st receive page (data from 4K upwards)
+ * 6-8K 2:nd receive page (data from 6K upwards)
+ *
+ * 8K+ Adapter ROM (contains magic code and last 3 bytes of Ethernet address)
+ */
+#define MEM_2K 0x0800 /* 2048 */
+#define MEM_4K 0x1000 /* 4096 */
+#define MEM_6K 0x1800 /* 6144 */
+#define NODE_ADDRESS 0x2000 /* 8192 */
+
+#define RUNT 60 /* Too small Ethernet packet */
+
+/**************************************************
+ * *
+ * End of definition *
+ * *
+ **************************************************/
+
+/*
+ * Index to functions, as function prototypes.
+ */
+/* Routines used internally. (See "convenience macros") */
+static byte de600_read_status(struct device *dev);
+static byte de600_read_byte(unsigned char type, struct device *dev);
+
+/* Put in the device structure. */
+static int de600_open(struct device *dev);
+static int de600_close(struct device *dev);
+static struct netstats *get_stats(struct device *dev);
+static int de600_start_xmit(struct sk_buff *skb, struct device *dev);
+
+/* Dispatch from interrupts. */
+static void de600_interrupt(int irq, struct pt_regs *regs);
+static int de600_tx_intr(struct device *dev, int irq_status);
+static void de600_rx_intr(struct device *dev);
+
+/* Initialization */
+static void trigger_interrupt(struct device *dev);
+int de600_probe(struct device *dev);
+static int adapter_init(struct device *dev);
+
+/*
+ * D-Link driver variables:
+ */
+static volatile int rx_page = 0;
+
+#define TX_PAGES 2
+static volatile int tx_fifo[TX_PAGES];
+static volatile int tx_fifo_in = 0;
+static volatile int tx_fifo_out = 0;
+static volatile int free_tx_pages = TX_PAGES;
+static int was_down = 0;
+
+/*
+ * Convenience macros/functions for D-Link adapter
+ */
+
+#define select_prn() outb_p(SELECT_PRN, COMMAND_PORT); DE600_SLOW_DOWN
+#define select_nic() outb_p(SELECT_NIC, COMMAND_PORT); DE600_SLOW_DOWN
+
+/* Thanks for hints from Mark Burton <markb@ordern.demon.co.uk> */
+#define de600_put_byte(data) ( \
+ outb_p(((data) << 4) | WRITE_DATA , DATA_PORT), \
+ outb_p(((data) & 0xf0) | WRITE_DATA | HI_NIBBLE, DATA_PORT))
+
+/*
+ * The first two outb_p()'s below could perhaps be deleted if there
+ * would be more delay in the last two. Not certain about it yet...
+ */
+#define de600_put_command(cmd) ( \
+ outb_p(( rx_page << 4) | COMMAND , DATA_PORT), \
+ outb_p(( rx_page & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT), \
+ outb_p(((rx_page | cmd) << 4) | COMMAND , DATA_PORT), \
+ outb_p(((rx_page | cmd) & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT))
+
+#define de600_setup_address(addr,type) ( \
+ outb_p((((addr) << 4) & 0xf0) | type , DATA_PORT), \
+ outb_p(( (addr) & 0xf0) | type | HI_NIBBLE, DATA_PORT), \
+ outb_p((((addr) >> 4) & 0xf0) | type , DATA_PORT), \
+ outb_p((((addr) >> 8) & 0xf0) | type | HI_NIBBLE, DATA_PORT))
+
+#define rx_page_adr() ((rx_page & RX_PAGE2_SELECT)?(MEM_6K):(MEM_4K))
+
+/* Flip bit, only 2 pages */
+#define next_rx_page() (rx_page ^= RX_PAGE2_SELECT)
+
+#define tx_page_adr(a) (((a) + 1) * MEM_2K)
+
+static inline byte
+de600_read_status(struct device *dev)
+{
+ byte status;
+
+ outb_p(STATUS, DATA_PORT);
+ status = inb(STATUS_PORT);
+ outb_p(NULL_COMMAND | HI_NIBBLE, DATA_PORT);
+
+ return status;
+}
+
+static inline byte
+de600_read_byte(unsigned char type, struct device *dev) { /* dev used by macros */
+ byte lo;
+
+ (void)outb_p((type), DATA_PORT);
+ lo = ((unsigned char)inb(STATUS_PORT)) >> 4;
+ (void)outb_p((type) | HI_NIBBLE, DATA_PORT);
+ return ((unsigned char)inb(STATUS_PORT) & (unsigned char)0xf0) | lo;
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * after booting when 'ifconfig <dev->name> $IP_ADDR' is run (in rc.inet1).
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is a non-reboot way to recover if something goes wrong.
+ */
+static int
+de600_open(struct device *dev)
+{
+ if (request_irq(DE600_IRQ, de600_interrupt, 0, "de600")) {
+ printk ("%s: unable to get IRQ %d\n", dev->name, DE600_IRQ);
+ return 1;
+ }
+ irq2dev_map[DE600_IRQ] = dev;
+
+ MOD_INC_USE_COUNT;
+ dev->start = 1;
+ if (adapter_init(dev)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * The inverse routine to de600_open().
+ */
+static int
+de600_close(struct device *dev)
+{
+ select_nic();
+ rx_page = 0;
+ de600_put_command(RESET);
+ de600_put_command(STOP_RESET);
+ de600_put_command(0);
+ select_prn();
+
+ if (dev->start) {
+ free_irq(DE600_IRQ);
+ irq2dev_map[DE600_IRQ] = NULL;
+ dev->start = 0;
+ MOD_DEC_USE_COUNT;
+ }
+ return 0;
+}
+
+static struct netstats *
+get_stats(struct device *dev)
+{
+ return (struct netstats *)(dev->priv);
+}
+
+static inline void
+trigger_interrupt(struct device *dev)
+{
+ de600_put_command(FLIP_IRQ);
+ select_prn();
+ DE600_SLOW_DOWN;
+ select_nic();
+ de600_put_command(0);
+}
+
+/*
+ * Copy a buffer to the adapter transmit page memory.
+ * Start sending.
+ */
+static int
+de600_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ int transmit_from;
+ int len;
+ int tickssofar;
+ byte *buffer = skb->data;
+
+ /*
+ * If some higher layer thinks we've missed a
+ * tx-done interrupt we are passed NULL.
+ * Caution: dev_tint() handles the cli()/sti() itself.
+ */
+
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */
+ tickssofar = jiffies - dev->trans_start;
+
+ if (tickssofar < 5)
+ return 1;
+
+ /* else */
+ printk("%s: transmit timed out (%d), %s?\n",
+ dev->name,
+ tickssofar,
+ "network cable problem"
+ );
+ /* Restart the adapter. */
+ if (adapter_init(dev)) {
+ return 1;
+ }
+ }
+
+ /* Start real output */
+ PRINTK(("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages));
+
+ if ((len = skb->len) < RUNT)
+ len = RUNT;
+
+ cli();
+ select_nic();
+ tx_fifo[tx_fifo_in] = transmit_from = tx_page_adr(tx_fifo_in) - len;
+ tx_fifo_in = (tx_fifo_in + 1) % TX_PAGES; /* Next free tx page */
+
+#ifdef CHECK_LOST_DE600
+ /* This costs about 40 instructions per packet... */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ de600_read_byte(READ_DATA, dev);
+ if (was_down || (de600_read_byte(READ_DATA, dev) != 0xde)) {
+ if (adapter_init(dev)) {
+ sti();
+ return 1;
+ }
+ }
+#endif
+
+ de600_setup_address(transmit_from, RW_ADDR);
+ for ( ; len > 0; --len, ++buffer)
+ de600_put_byte(*buffer);
+
+ if (free_tx_pages-- == TX_PAGES) { /* No transmission going on */
+ dev->trans_start = jiffies;
+ dev->tbusy = 0; /* allow more packets into adapter */
+ /* Send page and generate a faked interrupt */
+ de600_setup_address(transmit_from, TX_ADDR);
+ de600_put_command(TX_ENABLE);
+ }
+ else {
+ dev->tbusy = !free_tx_pages;
+ select_prn();
+ }
+
+ sti(); /* interrupts back on */
+
+#ifdef FAKE_SMALL_MAX
+ /* This will "patch" the socket TCP proto at an early moment */
+ if (skb->sk && (skb->sk->protocol == IPPROTO_TCP) &&
+ (skb->sk->prot->rspace != &de600_rspace))
+ skb->sk->prot->rspace = de600_rspace; /* Ugh! */
+#endif
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ return 0;
+}
+
+/*
+ * The typical workload of the driver:
+ * Handle the network interface interrupts.
+ */
+static void
+de600_interrupt(int irq, struct pt_regs * regs)
+{
+ struct device *dev = irq2dev_map[irq];
+ byte irq_status;
+ int retrig = 0;
+ int boguscount = 0;
+
+ /* This might just as well be deleted now, no crummy drivers present :-) */
+ if ((dev == NULL) || (dev->start == 0) || (DE600_IRQ != irq)) {
+ printk("%s: bogus interrupt %d\n", dev?dev->name:"DE-600", irq);
+ return;
+ }
+
+ dev->interrupt = 1;
+ select_nic();
+ irq_status = de600_read_status(dev);
+
+ do {
+ PRINTK(("de600_interrupt (%02X)\n", irq_status));
+
+ if (irq_status & RX_GOOD)
+ de600_rx_intr(dev);
+ else if (!(irq_status & RX_BUSY))
+ de600_put_command(RX_ENABLE);
+
+ /* Any transmission in progress? */
+ if (free_tx_pages < TX_PAGES)
+ retrig = de600_tx_intr(dev, irq_status);
+ else
+ retrig = 0;
+
+ irq_status = de600_read_status(dev);
+ } while ( (irq_status & RX_GOOD) || ((++boguscount < 100) && retrig) );
+ /*
+ * Yeah, it _looks_ like busy waiting, smells like busy waiting
+ * and I know it's not PC, but please, it will only occur once
+ * in a while and then only for a loop or so (< 1ms for sure!)
+ */
+
+ /* Enable adapter interrupts */
+ dev->interrupt = 0;
+ select_prn();
+
+ if (retrig)
+ trigger_interrupt(dev);
+
+ sti();
+ return;
+}
+
+static int
+de600_tx_intr(struct device *dev, int irq_status)
+{
+ /*
+ * Returns 1 if tx still not done
+ */
+
+ mark_bh(NET_BH);
+ /* Check if current transmission is done yet */
+ if (irq_status & TX_BUSY)
+ return 1; /* tx not done, try again */
+
+ /* else */
+ /* If last transmission OK then bump fifo index */
+ if (!(irq_status & TX_FAILED16)) {
+ tx_fifo_out = (tx_fifo_out + 1) % TX_PAGES;
+ ++free_tx_pages;
+ ((struct netstats *)(dev->priv))->tx_packets++;
+ dev->tbusy = 0;
+ }
+
+ /* More to send, or resend last packet? */
+ if ((free_tx_pages < TX_PAGES) || (irq_status & TX_FAILED16)) {
+ dev->trans_start = jiffies;
+ de600_setup_address(tx_fifo[tx_fifo_out], TX_ADDR);
+ de600_put_command(TX_ENABLE);
+ return 1;
+ }
+ /* else */
+
+ return 0;
+}
+
+/*
+ * We have a good packet, get it out of the adapter.
+ */
+static void
+de600_rx_intr(struct device *dev)
+{
+ struct sk_buff *skb;
+ int i;
+ int read_from;
+ int size;
+ register unsigned char *buffer;
+
+ cli();
+ /* Get size of received packet */
+ size = de600_read_byte(RX_LEN, dev); /* low byte */
+ size += (de600_read_byte(RX_LEN, dev) << 8); /* high byte */
+ size -= 4; /* Ignore trailing 4 CRC-bytes */
+
+ /* Tell adapter where to store next incoming packet, enable receiver */
+ read_from = rx_page_adr();
+ next_rx_page();
+ de600_put_command(RX_ENABLE);
+ sti();
+
+ if ((size < 32) || (size > 1535)) {
+ printk("%s: Bogus packet size %d.\n", dev->name, size);
+ if (size > 10000)
+ adapter_init(dev);
+ return;
+ }
+
+ skb = dev_alloc_skb(size+2);
+ sti();
+ if (skb == NULL) {
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, size);
+ return;
+ }
+ /* else */
+
+ skb->dev = dev;
+ skb_reserve(skb,2); /* Align */
+
+ /* 'skb->data' points to the start of sk_buff data area. */
+ buffer = skb_put(skb,size);
+
+ /* copy the packet into the buffer */
+ de600_setup_address(read_from, RW_ADDR);
+ for (i = size; i > 0; --i, ++buffer)
+ *buffer = de600_read_byte(READ_DATA, dev);
+
+ ((struct netstats *)(dev->priv))->rx_packets++; /* count all receives */
+
+ skb->protocol=eth_type_trans(skb,dev);
+
+ netif_rx(skb);
+ /*
+ * If any worth-while packets have been received, netif_rx()
+ * has done a mark_bh(INET_BH) for us and will work on them
+ * when we get to the bottom-half routine.
+ */
+}
+
+int
+de600_probe(struct device *dev)
+{
+ int i;
+ static struct netstats de600_netstats;
+ /*dev->priv = kmalloc(sizeof(struct netstats), GFP_KERNEL);*/
+
+ printk("%s: D-Link DE-600 pocket adapter", dev->name);
+ /* Alpha testers must have the version number to report bugs. */
+ if (de600_debug > 1)
+ printk(version);
+
+ /* probe for adapter */
+ rx_page = 0;
+ select_nic();
+ (void)de600_read_status(dev);
+ de600_put_command(RESET);
+ de600_put_command(STOP_RESET);
+ if (de600_read_status(dev) & 0xf0) {
+ printk(": not at I/O %#3x.\n", DATA_PORT);
+ return ENODEV;
+ }
+
+ /*
+ * Maybe we found one,
+ * have to check if it is a D-Link DE-600 adapter...
+ */
+
+ /* Get the adapter ethernet address from the ROM */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ for (i = 0; i < ETH_ALEN; i++) {
+ dev->dev_addr[i] = de600_read_byte(READ_DATA, dev);
+ dev->broadcast[i] = 0xff;
+ }
+
+ /* Check magic code */
+ if ((dev->dev_addr[1] == 0xde) && (dev->dev_addr[2] == 0x15)) {
+ /* OK, install real address */
+ dev->dev_addr[0] = 0x00;
+ dev->dev_addr[1] = 0x80;
+ dev->dev_addr[2] = 0xc8;
+ dev->dev_addr[3] &= 0x0f;
+ dev->dev_addr[3] |= 0x70;
+ } else {
+ printk(" not identified in the printer port\n");
+ return ENODEV;
+ }
+
+#if 0 /* Not yet */
+ if (check_region(DE600_IO, 3)) {
+ printk(", port 0x%x busy\n", DE600_IO);
+ return EBUSY;
+ }
+#endif
+ request_region(DE600_IO, 3, "de600");
+
+ printk(", Ethernet Address: %02X", dev->dev_addr[0]);
+ for (i = 1; i < ETH_ALEN; i++)
+ printk(":%02X",dev->dev_addr[i]);
+ printk("\n");
+
+ /* Initialize the device structure. */
+ /*dev->priv = kmalloc(sizeof(struct netstats), GFP_KERNEL);*/
+ dev->priv = &de600_netstats;
+
+ memset(dev->priv, 0, sizeof(struct netstats));
+ dev->get_stats = get_stats;
+
+ dev->open = de600_open;
+ dev->stop = de600_close;
+ dev->hard_start_xmit = &de600_start_xmit;
+
+ ether_setup(dev);
+
+ dev->flags&=~IFF_MULTICAST;
+
+ select_prn();
+ return 0;
+}
+
+static int
+adapter_init(struct device *dev)
+{
+ int i;
+ long flags;
+
+ save_flags(flags);
+ cli();
+
+ select_nic();
+ rx_page = 0; /* used by RESET */
+ de600_put_command(RESET);
+ de600_put_command(STOP_RESET);
+#ifdef CHECK_LOST_DE600
+ /* Check if it is still there... */
+ /* Get the some bytes of the adapter ethernet address from the ROM */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ de600_read_byte(READ_DATA, dev);
+ if ((de600_read_byte(READ_DATA, dev) != 0xde) ||
+ (de600_read_byte(READ_DATA, dev) != 0x15)) {
+ /* was: if (de600_read_status(dev) & 0xf0) { */
+ printk("Something has happened to the DE-600! Please check it"
+#ifdef SHUTDOWN_WHEN_LOST
+ " and do a new ifconfig"
+#endif /* SHUTDOWN_WHEN_LOST */
+ "!\n");
+#ifdef SHUTDOWN_WHEN_LOST
+ /* Goodbye, cruel world... */
+ dev->flags &= ~IFF_UP;
+ de600_close(dev);
+#endif /* SHUTDOWN_WHEN_LOST */
+ was_down = 1;
+ dev->tbusy = 1; /* Transmit busy... */
+ restore_flags(flags);
+ return 1; /* failed */
+ }
+#endif /* CHECK_LOST_DE600 */
+ if (was_down) {
+ printk("Thanks, I feel much better now!\n");
+ was_down = 0;
+ }
+
+ dev->tbusy = 0; /* Transmit busy... */
+ dev->interrupt = 0;
+ tx_fifo_in = 0;
+ tx_fifo_out = 0;
+ free_tx_pages = TX_PAGES;
+
+ /* set the ether address. */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ for (i = 0; i < ETH_ALEN; i++)
+ de600_put_byte(dev->dev_addr[i]);
+
+ /* where to start saving incoming packets */
+ rx_page = RX_BP | RX_BASE_PAGE;
+ de600_setup_address(MEM_4K, RW_ADDR);
+ /* Enable receiver */
+ de600_put_command(RX_ENABLE);
+ select_prn();
+ restore_flags(flags);
+
+ return 0; /* OK */
+}
+
+#ifdef FAKE_SMALL_MAX
+/*
+ * The new router code (coming soon 8-) ) will fix this properly.
+ */
+#define DE600_MIN_WINDOW 1024
+#define DE600_MAX_WINDOW 2048
+#define DE600_TCP_WINDOW_DIFF 1024
+/*
+ * Copied from "net/inet/sock.c"
+ *
+ * Sets a lower max receive window in order to achieve <= 2
+ * packets arriving at the adapter in fast succession.
+ * (No way that a DE-600 can keep up with a net saturated
+ * with packets homing in on it :-( )
+ *
+ * Since there are only 2 receive buffers in the DE-600
+ * and it takes some time to copy from the adapter,
+ * this is absolutely necessary for any TCP performance whatsoever!
+ *
+ * Note that the returned window info will never be smaller than
+ * DE600_MIN_WINDOW, i.e. 1024
+ * This differs from the standard function, that can return an
+ * arbitrarily small window!
+ */
+#define min(a,b) ((a)<(b)?(a):(b))
+static unsigned long
+de600_rspace(struct sock *sk)
+{
+ int amt;
+
+ if (sk != NULL) {
+/*
+ * Hack! You might want to play with commenting away the following line,
+ * if you know what you do!
+ sk->max_unacked = DE600_MAX_WINDOW - DE600_TCP_WINDOW_DIFF;
+ */
+
+ if (sk->rmem_alloc >= sk->rcvbuf-2*DE600_MIN_WINDOW) return(0);
+ amt = min((sk->rcvbuf-sk->rmem_alloc)/2/*-DE600_MIN_WINDOW*/, DE600_MAX_WINDOW);
+ if (amt < 0) return(0);
+ return(amt);
+ }
+ return(0);
+}
+#endif
+
+#ifdef MODULE
+static char nullname[8];
+static struct device de600_dev = {
+ nullname, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, de600_probe };
+
+int
+init_module(void)
+{
+ if (register_netdev(&de600_dev) != 0)
+ return -EIO;
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&de600_dev);
+ release_region(DE600_IO, 3);
+}
+#endif /* MODULE */
+/*
+ * Local variables:
+ * kernel-compile-command: "gcc -D__KERNEL__ -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de600.c"
+ * module-compile-command: "gcc -D__KERNEL__ -DMODULE -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de600.c"
+ * compile-command: "gcc -D__KERNEL__ -DMODULE -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de600.c"
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/de620.c b/i386/i386at/gpl/linux/net/de620.c
new file mode 100644
index 00000000..2b17c390
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/de620.c
@@ -0,0 +1,1045 @@
+/*
+ * de620.c $Revision: 1.1.1.1 $ BETA
+ *
+ *
+ * Linux driver for the D-Link DE-620 Ethernet pocket adapter.
+ *
+ * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall <bj0rn@blox.se>
+ *
+ * Based on adapter information gathered from DOS packetdriver
+ * sources from D-Link Inc: (Special thanks to Henry Ngai of D-Link.)
+ * Portions (C) Copyright D-Link SYSTEM Inc. 1991, 1992
+ * Copyright, 1988, Russell Nelson, Crynwr Software
+ *
+ * Adapted to the sample network driver core for linux,
+ * written by: Donald Becker <becker@super.org>
+ * (Now at <becker@cesdis.gsfc.nasa.gov>
+ *
+ * Valuable assistance from:
+ * J. Joshua Kopper <kopper@rtsg.mot.com>
+ * Olav Kvittem <Olav.Kvittem@uninett.no>
+ * Germano Caronni <caronni@nessie.cs.id.ethz.ch>
+ * Jeremy Fitzhardinge <jeremy@suite.sw.oz.au>
+ *
+ *****************************************************************************/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+static const char *version =
+ "de620.c: $Revision: 1.1.1.1 $, Bjorn Ekwall <bj0rn@blox.se>\n";
+
+/***********************************************************************
+ *
+ * "Tuning" section.
+ *
+ * Compile-time options: (see below for descriptions)
+ * -DDE620_IO=0x378 (lpt1)
+ * -DDE620_IRQ=7 (lpt1)
+ * -DDE602_DEBUG=...
+ * -DSHUTDOWN_WHEN_LOST
+ * -DCOUNT_LOOPS
+ * -DLOWSPEED
+ * -DREAD_DELAY
+ * -DWRITE_DELAY
+ */
+
+/*
+ * This driver assumes that the printer port is a "normal",
+ * dumb, uni-directional port!
+ * If your port is "fancy" in any way, please try to set it to "normal"
+ * with your BIOS setup. I have no access to machines with bi-directional
+ * ports, so I can't test such a driver :-(
+ * (Yes, I _know_ it is possible to use DE620 with bidirectional ports...)
+ *
+ * There are some clones of DE620 out there, with different names.
+ * If the current driver does not recognize a clone, try to change
+ * the following #define to:
+ *
+ * #define DE620_CLONE 1
+ */
+#define DE620_CLONE 0
+
+/*
+ * If the adapter has problems with high speeds, enable this #define
+ * otherwise full printerport speed will be attempted.
+ *
+ * You can tune the READ_DELAY/WRITE_DELAY below if you enable LOWSPEED
+ *
+#define LOWSPEED
+ */
+
+#ifndef READ_DELAY
+#define READ_DELAY 100 /* adapter internal read delay in 100ns units */
+#endif
+
+#ifndef WRITE_DELAY
+#define WRITE_DELAY 100 /* adapter internal write delay in 100ns units */
+#endif
+
+/*
+ * Enable this #define if you want the adapter to do a "ifconfig down" on
+ * itself when we have detected that something is possibly wrong with it.
+ * The default behaviour is to retry with "adapter_init()" until success.
+ * This should be used for debugging purposes only.
+ *
+#define SHUTDOWN_WHEN_LOST
+ */
+
+/*
+ * Enable debugging by "-DDE620_DEBUG=3" when compiling,
+ * OR in "./CONFIG"
+ * OR by enabling the following #define
+ *
+ * use 0 for production, 1 for verification, >2 for debug
+ *
+#define DE620_DEBUG 3
+ */
+
+#ifdef LOWSPEED
+/*
+ * Enable this #define if you want to see debugging output that show how long
+ * we have to wait before the DE-620 is ready for the next read/write/command.
+ *
+#define COUNT_LOOPS
+ */
+#endif
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include <linux/in.h>
+#include <linux/ptrace.h>
+#include <asm/system.h>
+#include <linux/errno.h>
+
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+/* Constant definitions for the DE-620 registers, commands and bits */
+#include "de620.h"
+
+#define netstats enet_statistics
+typedef unsigned char byte;
+
+/*******************************************************
+ * *
+ * Definition of D-Link DE-620 Ethernet Pocket adapter *
+ * See also "de620.h" *
+ * *
+ *******************************************************/
+#ifndef DE620_IO /* Compile-time configurable */
+#define DE620_IO 0x378
+#endif
+
+#ifndef DE620_IRQ /* Compile-time configurable */
+#define DE620_IRQ 7
+#endif
+
+#define DATA_PORT (dev->base_addr)
+#define STATUS_PORT (dev->base_addr + 1)
+#define COMMAND_PORT (dev->base_addr + 2)
+
+#define RUNT 60 /* Too small Ethernet packet */
+#define GIANT 1514 /* largest legal size packet, no fcs */
+
+#ifdef DE620_DEBUG /* Compile-time configurable */
+#define PRINTK(x) if (de620_debug >= 2) printk x
+#else
+#define DE620_DEBUG 0
+#define PRINTK(x) /**/
+#endif
+
+
+/*
+ * Force media with insmod:
+ * insmod de620.o bnc=1
+ * or
+ * insmod de620.o utp=1
+ *
+ * Force io and/or irq with insmod:
+ * insmod de620.o io=0x378 irq=7
+ *
+ * Make a clone skip the Ethernet-address range check:
+ * insmod de620.o clone=1
+ */
+static int bnc = 0;
+static int utp = 0;
+static int io = DE620_IO;
+static int irq = DE620_IRQ;
+static int clone = DE620_CLONE;
+
+static unsigned int de620_debug = DE620_DEBUG;
+
+/***********************************************
+ * *
+ * Index to functions, as function prototypes. *
+ * *
+ ***********************************************/
+
+/*
+ * Routines used internally. (See also "convenience macros.. below")
+ */
+
+/* Put in the device structure. */
+static int de620_open(struct device *);
+static int de620_close(struct device *);
+static struct netstats *get_stats(struct device *);
+static void de620_set_multicast_list(struct device *);
+static int de620_start_xmit(struct sk_buff *, struct device *);
+
+/* Dispatch from interrupts. */
+static void de620_interrupt(int, struct pt_regs *);
+static int de620_rx_intr(struct device *);
+
+/* Initialization */
+static int adapter_init(struct device *);
+int de620_probe(struct device *);
+static int read_eeprom(struct device *);
+
+
+/*
+ * D-Link driver variables:
+ */
+#define SCR_DEF NIBBLEMODE |INTON | SLEEP | AUTOTX
+#define TCR_DEF RXPB /* not used: | TXSUCINT | T16INT */
+#define DE620_RX_START_PAGE 12 /* 12 pages (=3k) reserved for tx */
+#define DEF_NIC_CMD IRQEN | ICEN | DS1
+
+static volatile byte NIC_Cmd;
+static volatile byte next_rx_page;
+static byte first_rx_page;
+static byte last_rx_page;
+static byte EIPRegister;
+
+static struct nic {
+ byte NodeID[6];
+ byte RAM_Size;
+ byte Model;
+ byte Media;
+ byte SCR;
+} nic_data;
+
+/**********************************************************
+ * *
+ * Convenience macros/functions for D-Link DE-620 adapter *
+ * *
+ **********************************************************/
+#define de620_tx_buffs(dd) (inb(STATUS_PORT) & (TXBF0 | TXBF1))
+#define de620_flip_ds(dd) NIC_Cmd ^= DS0 | DS1; outb(NIC_Cmd, COMMAND_PORT);
+
+/* Check for ready-status, and return a nibble (high 4 bits) for data input */
+#ifdef COUNT_LOOPS
+static int tot_cnt;
+#endif
+static inline byte
+de620_ready(struct device *dev)
+{
+ byte value;
+ register short int cnt = 0;
+
+ while ((((value = inb(STATUS_PORT)) & READY) == 0) && (cnt <= 1000))
+ ++cnt;
+
+#ifdef COUNT_LOOPS
+ tot_cnt += cnt;
+#endif
+ return value & 0xf0; /* nibble */
+}
+
+static inline void
+de620_send_command(struct device *dev, byte cmd)
+{
+ de620_ready(dev);
+ if (cmd == W_DUMMY)
+ outb(NIC_Cmd, COMMAND_PORT);
+
+ outb(cmd, DATA_PORT);
+
+ outb(NIC_Cmd ^ CS0, COMMAND_PORT);
+ de620_ready(dev);
+ outb(NIC_Cmd, COMMAND_PORT);
+}
+
+static inline void
+de620_put_byte(struct device *dev, byte value)
+{
+ /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
+ de620_ready(dev);
+ outb(value, DATA_PORT);
+ de620_flip_ds(dev);
+}
+
+static inline byte
+de620_read_byte(struct device *dev)
+{
+ byte value;
+
+ /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
+ value = de620_ready(dev); /* High nibble */
+ de620_flip_ds(dev);
+ value |= de620_ready(dev) >> 4; /* Low nibble */
+ return value;
+}
+
+static inline void
+de620_write_block(struct device *dev, byte *buffer, int count)
+{
+#ifndef LOWSPEED
+ byte uflip = NIC_Cmd ^ (DS0 | DS1);
+ byte dflip = NIC_Cmd;
+#else /* LOWSPEED */
+#ifdef COUNT_LOOPS
+ int bytes = count;
+#endif /* COUNT_LOOPS */
+#endif /* LOWSPEED */
+
+#ifdef LOWSPEED
+#ifdef COUNT_LOOPS
+ tot_cnt = 0;
+#endif /* COUNT_LOOPS */
+ /* No further optimization useful, the limit is in the adapter. */
+ for ( ; count > 0; --count, ++buffer) {
+ de620_put_byte(dev,*buffer);
+ }
+ de620_send_command(dev,W_DUMMY);
+#ifdef COUNT_LOOPS
+ /* trial debug output: loops per byte in de620_ready() */
+ printk("WRITE(%d)\n", tot_cnt/((bytes?bytes:1)));
+#endif /* COUNT_LOOPS */
+#else /* not LOWSPEED */
+ for ( ; count > 0; count -=2) {
+ outb(*buffer++, DATA_PORT);
+ outb(uflip, COMMAND_PORT);
+ outb(*buffer++, DATA_PORT);
+ outb(dflip, COMMAND_PORT);
+ }
+ de620_send_command(dev,W_DUMMY);
+#endif /* LOWSPEED */
+}
+
+static inline void
+de620_read_block(struct device *dev, byte *data, int count)
+{
+#ifndef LOWSPEED
+ byte value;
+ byte uflip = NIC_Cmd ^ (DS0 | DS1);
+ byte dflip = NIC_Cmd;
+#else /* LOWSPEED */
+#ifdef COUNT_LOOPS
+ int bytes = count;
+
+ tot_cnt = 0;
+#endif /* COUNT_LOOPS */
+#endif /* LOWSPEED */
+
+#ifdef LOWSPEED
+ /* No further optimization useful, the limit is in the adapter. */
+ while (count-- > 0) {
+ *data++ = de620_read_byte(dev);
+ de620_flip_ds(dev);
+ }
+#ifdef COUNT_LOOPS
+ /* trial debug output: loops per byte in de620_ready() */
+ printk("READ(%d)\n", tot_cnt/(2*(bytes?bytes:1)));
+#endif /* COUNT_LOOPS */
+#else /* not LOWSPEED */
+ while (count-- > 0) {
+ value = inb(STATUS_PORT) & 0xf0; /* High nibble */
+ outb(uflip, COMMAND_PORT);
+ *data++ = value | inb(STATUS_PORT) >> 4; /* Low nibble */
+ outb(dflip , COMMAND_PORT);
+ }
+#endif /* LOWSPEED */
+}
+
+static inline void
+de620_set_delay(struct device *dev)
+{
+ de620_ready(dev);
+ outb(W_DFR, DATA_PORT);
+ outb(NIC_Cmd ^ CS0, COMMAND_PORT);
+
+ de620_ready(dev);
+#ifdef LOWSPEED
+ outb(WRITE_DELAY, DATA_PORT);
+#else
+ outb(0, DATA_PORT);
+#endif
+ de620_flip_ds(dev);
+
+ de620_ready(dev);
+#ifdef LOWSPEED
+ outb(READ_DELAY, DATA_PORT);
+#else
+ outb(0, DATA_PORT);
+#endif
+ de620_flip_ds(dev);
+}
+
+static inline void
+de620_set_register(struct device *dev, byte reg, byte value)
+{
+ de620_ready(dev);
+ outb(reg, DATA_PORT);
+ outb(NIC_Cmd ^ CS0, COMMAND_PORT);
+
+ de620_put_byte(dev, value);
+}
+
+static inline byte
+de620_get_register(struct device *dev, byte reg)
+{
+ byte value;
+
+ de620_send_command(dev,reg);
+ value = de620_read_byte(dev);
+ de620_send_command(dev,W_DUMMY);
+
+ return value;
+}
+
+/*********************************************************************
+ *
+ * Open/initialize the board.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is a non-reboot way to recover if something goes wrong.
+ *
+ */
+static int
+de620_open(struct device *dev)
+{
+ if (request_irq(dev->irq, de620_interrupt, 0, "de620")) {
+ printk ("%s: unable to get IRQ %d\n", dev->name, dev->irq);
+ return 1;
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ MOD_INC_USE_COUNT;
+ if (adapter_init(dev)) {
+ return 1;
+ }
+ dev->start = 1;
+ return 0;
+}
+
+/************************************************
+ *
+ * The inverse routine to de620_open().
+ *
+ */
+static int
+de620_close(struct device *dev)
+{
+ /* disable recv */
+ de620_set_register(dev, W_TCR, RXOFF);
+
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = NULL;
+
+ dev->start = 0;
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/*********************************************
+ *
+ * Return current statistics
+ *
+ */
+static struct netstats *
+get_stats(struct device *dev)
+{
+ return (struct netstats *)(dev->priv);
+}
+
+/*********************************************
+ *
+ * Set or clear the multicast filter for this adaptor.
+ * (no real multicast implemented for the DE-620, but she can be promiscuous...)
+ *
+ */
+
+static void de620_set_multicast_list(struct device *dev)
+{
+ if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ { /* Enable promiscuous mode */
+ /*
+ * We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. - AC
+ */
+ dev->flags|=IFF_PROMISC;
+
+ de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
+ }
+ else
+ { /* Disable promiscuous mode, use normal mode */
+ de620_set_register(dev, W_TCR, TCR_DEF);
+ }
+}
+
+/*******************************************************
+ *
+ * Copy a buffer to the adapter transmit page memory.
+ * Start sending.
+ */
+static int
+de620_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ unsigned long flags;
+ int len;
+ int tickssofar;
+ byte *buffer = skb->data;
+ byte using_txbuf;
+
+ /*
+ * If some higher layer thinks we've missed a
+ * tx-done interrupt we are passed NULL.
+ * Caution: dev_tint() handles the cli()/sti() itself.
+ */
+
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ using_txbuf = de620_tx_buffs(dev); /* Peek at the adapter */
+ dev->tbusy = (using_txbuf == (TXBF0 | TXBF1)); /* Boolean! */
+
+ if (dev->tbusy) { /* Do timeouts, to avoid hangs. */
+ tickssofar = jiffies - dev->trans_start;
+
+ if (tickssofar < 5)
+ return 1;
+
+ /* else */
+ printk("%s: transmit timed out (%d), %s?\n",
+ dev->name,
+ tickssofar,
+ "network cable problem"
+ );
+ /* Restart the adapter. */
+ if (adapter_init(dev)) /* maybe close it */
+ return 1;
+ }
+
+ if ((len = skb->len) < RUNT)
+ len = RUNT;
+ if (len & 1) /* send an even number of bytes */
+ ++len;
+
+ /* Start real output */
+ save_flags(flags);
+ cli();
+
+ PRINTK(("de620_start_xmit: len=%d, bufs 0x%02x\n",
+ (int)skb->len, using_txbuf));
+
+ /* select a free tx buffer. if there is one... */
+ switch (using_txbuf) {
+ default: /* both are free: use TXBF0 */
+ case TXBF1: /* use TXBF0 */
+ de620_send_command(dev,W_CR | RW0);
+ using_txbuf |= TXBF0;
+ break;
+
+ case TXBF0: /* use TXBF1 */
+ de620_send_command(dev,W_CR | RW1);
+ using_txbuf |= TXBF1;
+ break;
+
+ case (TXBF0 | TXBF1): /* NONE!!! */
+ printk("de620: Ouch! No tx-buffer available!\n");
+ restore_flags(flags);
+ return 1;
+ break;
+ }
+ de620_write_block(dev, buffer, len);
+
+ dev->trans_start = jiffies;
+ dev->tbusy = (using_txbuf == (TXBF0 | TXBF1)); /* Boolean! */
+
+ ((struct netstats *)(dev->priv))->tx_packets++;
+
+ restore_flags(flags); /* interrupts maybe back on */
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ return 0;
+}
+
+/*****************************************************
+ *
+ * Handle the network interface interrupts.
+ *
+ */
+static void
+de620_interrupt(int irq_in, struct pt_regs *regs)
+{
+ struct device *dev = irq2dev_map[irq_in];
+ byte irq_status;
+ int bogus_count = 0;
+ int again = 0;
+
+ /* This might be deleted now, no crummy drivers present :-) Or..? */
+ if ((dev == NULL) || (irq != irq_in)) {
+ printk("%s: bogus interrupt %d\n", dev?dev->name:"de620", irq_in);
+ return;
+ }
+
+ cli();
+ dev->interrupt = 1;
+
+ /* Read the status register (_not_ the status port) */
+ irq_status = de620_get_register(dev, R_STS);
+
+ PRINTK(("de620_interrupt (%2.2X)\n", irq_status));
+
+ if (irq_status & RXGOOD) {
+ do {
+ again = de620_rx_intr(dev);
+ PRINTK(("again=%d\n", again));
+ }
+ while (again && (++bogus_count < 100));
+ }
+
+ dev->tbusy = (de620_tx_buffs(dev) == (TXBF0 | TXBF1)); /* Boolean! */
+
+ dev->interrupt = 0;
+ sti();
+ return;
+}
+
+/**************************************
+ *
+ * Get a packet from the adapter
+ *
+ * Send it "upstairs"
+ *
+ */
+static int
+de620_rx_intr(struct device *dev)
+{
+ struct header_buf {
+ byte status;
+ byte Rx_NextPage;
+ unsigned short Rx_ByteCount;
+ } header_buf;
+ struct sk_buff *skb;
+ int size;
+ byte *buffer;
+ byte pagelink;
+ byte curr_page;
+
+ PRINTK(("de620_rx_intr: next_rx_page = %d\n", next_rx_page));
+
+ /* Tell the adapter that we are going to read data, and from where */
+ de620_send_command(dev, W_CR | RRN);
+ de620_set_register(dev, W_RSA1, next_rx_page);
+ de620_set_register(dev, W_RSA0, 0);
+
+ /* Deep breath, and away we goooooo */
+ de620_read_block(dev, (byte *)&header_buf, sizeof(struct header_buf));
+ PRINTK(("page status=0x%02x, nextpage=%d, packetsize=%d\n",
+ header_buf.status, header_buf.Rx_NextPage, header_buf.Rx_ByteCount));
+
+ /* Plausible page header? */
+ pagelink = header_buf.Rx_NextPage;
+ if ((pagelink < first_rx_page) || (last_rx_page < pagelink)) {
+ /* Ouch... Forget it! Skip all and start afresh... */
+ printk("%s: Ring overrun? Restoring...\n", dev->name);
+ /* You win some, you loose some. And sometimes plenty... */
+ adapter_init(dev);
+ ((struct netstats *)(dev->priv))->rx_over_errors++;
+ return 0;
+ }
+
+ /* OK, this look good, so far. Let's see if it's consistent... */
+ /* Let's compute the start of the next packet, based on where we are */
+ pagelink = next_rx_page +
+ ((header_buf.Rx_ByteCount + (4 - 1 + 0x100)) >> 8);
+
+ /* Are we going to wrap around the page counter? */
+ if (pagelink > last_rx_page)
+ pagelink -= (last_rx_page - first_rx_page + 1);
+
+ /* Is the _computed_ next page number equal to what the adapter says? */
+ if (pagelink != header_buf.Rx_NextPage) {
+ /* Naah, we'll skip this packet. Probably bogus data as well */
+ printk("%s: Page link out of sync! Restoring...\n", dev->name);
+ next_rx_page = header_buf.Rx_NextPage; /* at least a try... */
+ de620_send_command(dev, W_DUMMY);
+ de620_set_register(dev, W_NPRF, next_rx_page);
+ ((struct netstats *)(dev->priv))->rx_over_errors++;
+ return 0;
+ }
+ next_rx_page = pagelink;
+
+ size = header_buf.Rx_ByteCount - 4;
+ if ((size < RUNT) || (GIANT < size)) {
+ printk("%s: Illegal packet size: %d!\n", dev->name, size);
+ }
+ else { /* Good packet? */
+ skb = dev_alloc_skb(size+2);
+ if (skb == NULL) { /* Yeah, but no place to put it... */
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, size);
+ ((struct netstats *)(dev->priv))->rx_dropped++;
+ }
+ else { /* Yep! Go get it! */
+ skb_reserve(skb,2); /* Align */
+ skb->dev = dev;
+ skb->free = 1;
+ /* skb->data points to the start of sk_buff data area */
+ buffer = skb_put(skb,size);
+ /* copy the packet into the buffer */
+ de620_read_block(dev, buffer, size);
+ PRINTK(("Read %d bytes\n", size));
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb); /* deliver it "upstairs" */
+ /* count all receives */
+ ((struct netstats *)(dev->priv))->rx_packets++;
+ }
+ }
+
+ /* Let's peek ahead to see if we have read the last current packet */
+ /* NOTE! We're _not_ checking the 'EMPTY'-flag! This seems better... */
+ curr_page = de620_get_register(dev, R_CPR);
+ de620_set_register(dev, W_NPRF, next_rx_page);
+ PRINTK(("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page));
+
+ return (next_rx_page != curr_page); /* That was slightly tricky... */
+}
+
+/*********************************************
+ *
+ * Reset the adapter to a known state
+ *
+ */
+static int
+adapter_init(struct device *dev)
+{
+ int i;
+ static int was_down = 0;
+
+ if ((nic_data.Model == 3) || (nic_data.Model == 0)) { /* CT */
+ EIPRegister = NCTL0;
+ if (nic_data.Media != 1)
+ EIPRegister |= NIS0; /* not BNC */
+ }
+ else if (nic_data.Model == 2) { /* UTP */
+ EIPRegister = NCTL0 | NIS0;
+ }
+
+ if (utp)
+ EIPRegister = NCTL0 | NIS0;
+ if (bnc)
+ EIPRegister = NCTL0;
+
+ de620_send_command(dev, W_CR | RNOP | CLEAR);
+ de620_send_command(dev, W_CR | RNOP);
+
+ de620_set_register(dev, W_SCR, SCR_DEF);
+ /* disable recv to wait init */
+ de620_set_register(dev, W_TCR, RXOFF);
+
+ /* Set the node ID in the adapter */
+ for (i = 0; i < 6; ++i) { /* W_PARn = 0xaa + n */
+ de620_set_register(dev, W_PAR0 + i, dev->dev_addr[i]);
+ }
+
+ de620_set_register(dev, W_EIP, EIPRegister);
+
+ next_rx_page = first_rx_page = DE620_RX_START_PAGE;
+ if (nic_data.RAM_Size)
+ last_rx_page = nic_data.RAM_Size - 1;
+ else /* 64k RAM */
+ last_rx_page = 255;
+
+ de620_set_register(dev, W_SPR, first_rx_page); /* Start Page Register*/
+ de620_set_register(dev, W_EPR, last_rx_page); /* End Page Register */
+ de620_set_register(dev, W_CPR, first_rx_page);/*Current Page Register*/
+ de620_send_command(dev, W_NPR | first_rx_page); /* Next Page Register*/
+ de620_send_command(dev, W_DUMMY);
+ de620_set_delay(dev);
+
+ /* Final sanity check: Anybody out there? */
+ /* Let's hope some bits from the statusregister make a good check */
+#define CHECK_MASK ( 0 | TXSUC | T16 | 0 | RXCRC | RXSHORT | 0 | 0 )
+#define CHECK_OK ( 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 )
+ /* success: X 0 0 X 0 0 X X */
+ /* ignore: EEDI RXGOOD COLS LNKS*/
+
+ if (((i = de620_get_register(dev, R_STS)) & CHECK_MASK) != CHECK_OK) {
+ printk("Something has happened to the DE-620! Please check it"
+#ifdef SHUTDOWN_WHEN_LOST
+ " and do a new ifconfig"
+#endif
+ "! (%02x)\n", i);
+#ifdef SHUTDOWN_WHEN_LOST
+ /* Goodbye, cruel world... */
+ dev->flags &= ~IFF_UP;
+ de620_close(dev);
+#endif
+ was_down = 1;
+ return 1; /* failed */
+ }
+ if (was_down) {
+ printk("Thanks, I feel much better now!\n");
+ was_down = 0;
+ }
+
+ /* All OK, go ahead... */
+ de620_set_register(dev, W_TCR, TCR_DEF);
+
+ return 0; /* all ok */
+}
+
+/******************************************************************************
+ *
+ * Only start-up code below
+ *
+ */
+/****************************************
+ *
+ * Check if there is a DE-620 connected
+ */
+int
+de620_probe(struct device *dev)
+{
+ static struct netstats de620_netstats;
+ int i;
+ byte checkbyte = 0xa5;
+
+ /*
+ * This is where the base_addr and irq gets set.
+ * Tunable at compile-time and insmod-time
+ */
+ dev->base_addr = io;
+ dev->irq = irq;
+
+ if (de620_debug)
+ printk(version);
+
+ printk("D-Link DE-620 pocket adapter");
+
+ /* Initially, configure basic nibble mode, so we can read the EEPROM */
+ NIC_Cmd = DEF_NIC_CMD;
+ de620_set_register(dev, W_EIP, EIPRegister);
+
+ /* Anybody out there? */
+ de620_set_register(dev, W_CPR, checkbyte);
+ checkbyte = de620_get_register(dev, R_CPR);
+
+ if ((checkbyte != 0xa5) || (read_eeprom(dev) != 0)) {
+ printk(" not identified in the printer port\n");
+ return ENODEV;
+ }
+
+#if 0 /* Not yet */
+ if (check_region(dev->base_addr, 3)) {
+ printk(", port 0x%x busy\n", dev->base_addr);
+ return EBUSY;
+ }
+#endif
+ request_region(dev->base_addr, 3, "de620");
+
+ /* else, got it! */
+ printk(", Ethernet Address: %2.2X",
+ dev->dev_addr[0] = nic_data.NodeID[0]);
+ for (i = 1; i < ETH_ALEN; i++) {
+ printk(":%2.2X", dev->dev_addr[i] = nic_data.NodeID[i]);
+ dev->broadcast[i] = 0xff;
+ }
+
+ printk(" (%dk RAM,",
+ (nic_data.RAM_Size) ? (nic_data.RAM_Size >> 2) : 64);
+
+ if (nic_data.Media == 1)
+ printk(" BNC)\n");
+ else
+ printk(" UTP)\n");
+
+ /* Initialize the device structure. */
+ /*dev->priv = kmalloc(sizeof(struct netstats), GFP_KERNEL);*/
+ dev->priv = &de620_netstats;
+
+ memset(dev->priv, 0, sizeof(struct netstats));
+ dev->get_stats = get_stats;
+ dev->open = de620_open;
+ dev->stop = de620_close;
+ dev->hard_start_xmit = &de620_start_xmit;
+ dev->set_multicast_list = &de620_set_multicast_list;
+ /* base_addr and irq are already set, see above! */
+
+ ether_setup(dev);
+
+ /* dump eeprom */
+ if (de620_debug) {
+ printk("\nEEPROM contents:\n");
+ printk("RAM_Size = 0x%02X\n", nic_data.RAM_Size);
+ printk("NodeID = %02X:%02X:%02X:%02X:%02X:%02X\n",
+ nic_data.NodeID[0], nic_data.NodeID[1],
+ nic_data.NodeID[2], nic_data.NodeID[3],
+ nic_data.NodeID[4], nic_data.NodeID[5]);
+ printk("Model = %d\n", nic_data.Model);
+ printk("Media = %d\n", nic_data.Media);
+ printk("SCR = 0x%02x\n", nic_data.SCR);
+ }
+
+ return 0;
+}
+
+/**********************************
+ *
+ * Read info from on-board EEPROM
+ *
+ * Note: Bitwise serial I/O to/from the EEPROM vi the status _register_!
+ */
+#define sendit(dev,data) de620_set_register(dev, W_EIP, data | EIPRegister);
+
+static unsigned short
+ReadAWord(struct device *dev, int from)
+{
+ unsigned short data;
+ int nbits;
+
+ /* cs [__~~] SET SEND STATE */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 0); sendit(dev, 1); sendit(dev, 5); sendit(dev, 4);
+
+ /* Send the 9-bit address from where we want to read the 16-bit word */
+ for (nbits = 9; nbits > 0; --nbits, from <<= 1) {
+ if (from & 0x0100) { /* bit set? */
+ /* cs [~~~~] SEND 1 */
+ /* di [~~~~] */
+ /* sck [_~~_] */
+ sendit(dev, 6); sendit(dev, 7); sendit(dev, 7); sendit(dev, 6);
+ }
+ else {
+ /* cs [~~~~] SEND 0 */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
+ }
+ }
+
+ /* Shift in the 16-bit word. The bits appear serially in EEDI (=0x80) */
+ for (data = 0, nbits = 16; nbits > 0; --nbits) {
+ /* cs [~~~~] SEND 0 */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
+ data = (data << 1) | ((de620_get_register(dev, R_STS) & EEDI) >> 7);
+ }
+ /* cs [____] RESET SEND STATE */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 0); sendit(dev, 1); sendit(dev, 1); sendit(dev, 0);
+
+ return data;
+}
+
+static int
+read_eeprom(struct device *dev)
+{
+ unsigned short wrd;
+
+ /* D-Link Ethernet addresses are in the series 00:80:c8:7X:XX:XX:XX */
+ wrd = ReadAWord(dev, 0x1aa); /* bytes 0 + 1 of NodeID */
+ if (!clone && (wrd != htons(0x0080))) /* Valid D-Link ether sequence? */
+ return -1; /* Nope, not a DE-620 */
+ nic_data.NodeID[0] = wrd & 0xff;
+ nic_data.NodeID[1] = wrd >> 8;
+
+ wrd = ReadAWord(dev, 0x1ab); /* bytes 2 + 3 of NodeID */
+ if (!clone && ((wrd & 0xff) != 0xc8)) /* Valid D-Link ether sequence? */
+ return -1; /* Nope, not a DE-620 */
+ nic_data.NodeID[2] = wrd & 0xff;
+ nic_data.NodeID[3] = wrd >> 8;
+
+ wrd = ReadAWord(dev, 0x1ac); /* bytes 4 + 5 of NodeID */
+ nic_data.NodeID[4] = wrd & 0xff;
+ nic_data.NodeID[5] = wrd >> 8;
+
+ wrd = ReadAWord(dev, 0x1ad); /* RAM size in pages (256 bytes). 0 = 64k */
+ nic_data.RAM_Size = (wrd >> 8);
+
+ wrd = ReadAWord(dev, 0x1ae); /* hardware model (CT = 3) */
+ nic_data.Model = (wrd & 0xff);
+
+ wrd = ReadAWord(dev, 0x1af); /* media (indicates BNC/UTP) */
+ nic_data.Media = (wrd & 0xff);
+
+ wrd = ReadAWord(dev, 0x1a8); /* System Configuration Register */
+ nic_data.SCR = (wrd >> 8);
+
+ return 0; /* no errors */
+}
+
+/******************************************************************************
+ *
+ * Loadable module skeleton
+ *
+ */
+#ifdef MODULE
+static char nullname[8] = "";
+static struct device de620_dev = {
+ nullname, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, de620_probe };
+
+int
+init_module(void)
+{
+ if (register_netdev(&de620_dev) != 0)
+ return -EIO;
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&de620_dev);
+ release_region(de620_dev.base_addr, 3);
+}
+#endif /* MODULE */
+
+/*
+ * (add '-DMODULE' when compiling as loadable module)
+ *
+ * compile-command:
+ * gcc -D__KERNEL__ -Wall -Wstrict-prototypes -O2 \
+ * -fomit-frame-pointer -m486 \
+ * -I/usr/src/linux/include -I../../net/inet -c de620.c
+*/
+/*
+ * Local variables:
+ * kernel-compile-command: "gcc -D__KERNEL__ -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de620.c"
+ * module-compile-command: "gcc -D__KERNEL__ -DMODULE -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de620.c"
+ * compile-command: "gcc -D__KERNEL__ -DMODULE -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de620.c"
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/de620.h b/i386/i386at/gpl/linux/net/de620.h
new file mode 100644
index 00000000..e8d9a88f
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/de620.h
@@ -0,0 +1,117 @@
+/*********************************************************
+ * *
+ * Definition of D-Link DE-620 Ethernet Pocket adapter *
+ * *
+ *********************************************************/
+
+/* DE-620's CMD port Command */
+#define CS0 0x08 /* 1->0 command strobe */
+#define ICEN 0x04 /* 0=enable DL3520 host interface */
+#define DS0 0x02 /* 1->0 data strobe 0 */
+#define DS1 0x01 /* 1->0 data strobe 1 */
+
+#define WDIR 0x20 /* general 0=read 1=write */
+#define RDIR 0x00 /* (not 100% confirm ) */
+#define PS2WDIR 0x00 /* ps/2 mode 1=read, 0=write */
+#define PS2RDIR 0x20
+
+#define IRQEN 0x10 /* 1 = enable printer IRQ line */
+#define SELECTIN 0x08 /* 1 = select printer */
+#define INITP 0x04 /* 0 = initial printer */
+#define AUTOFEED 0x02 /* 1 = printer auto form feed */
+#define STROBE 0x01 /* 0->1 data strobe */
+
+#define RESET 0x08
+#define NIS0 0x20 /* 0 = BNC, 1 = UTP */
+#define NCTL0 0x10
+
+/* DE-620 DIC Command */
+#define W_DUMMY 0x00 /* DIC reserved command */
+#define W_CR 0x20 /* DIC write command register */
+#define W_NPR 0x40 /* DIC write Next Page Register */
+#define W_TBR 0x60 /* DIC write Tx Byte Count 1 reg */
+#define W_RSA 0x80 /* DIC write Remote Start Addr 1 */
+
+/* DE-620's STAT port bits 7-4 */
+#define EMPTY 0x80 /* 1 = receive buffer empty */
+#define INTLEVEL 0x40 /* 1 = interrupt level is high */
+#define TXBF1 0x20 /* 1 = transmit buffer 1 is in use */
+#define TXBF0 0x10 /* 1 = transmit buffer 0 is in use */
+#define READY 0x08 /* 1 = h/w ready to accept cmd/data */
+
+/* IDC 1 Command */
+#define W_RSA1 0xa0 /* write remote start address 1 */
+#define W_RSA0 0xa1 /* write remote start address 0 */
+#define W_NPRF 0xa2 /* write next page register NPR15-NPR8 */
+#define W_DFR 0xa3 /* write delay factor register */
+#define W_CPR 0xa4 /* write current page register */
+#define W_SPR 0xa5 /* write start page register */
+#define W_EPR 0xa6 /* write end page register */
+#define W_SCR 0xa7 /* write system configuration register */
+#define W_TCR 0xa8 /* write Transceiver Configuration reg */
+#define W_EIP 0xa9 /* write EEPM Interface port */
+#define W_PAR0 0xaa /* write physical address register 0 */
+#define W_PAR1 0xab /* write physical address register 1 */
+#define W_PAR2 0xac /* write physical address register 2 */
+#define W_PAR3 0xad /* write physical address register 3 */
+#define W_PAR4 0xae /* write physical address register 4 */
+#define W_PAR5 0xaf /* write physical address register 5 */
+
+/* IDC 2 Command */
+#define R_STS 0xc0 /* read status register */
+#define R_CPR 0xc1 /* read current page register */
+#define R_BPR 0xc2 /* read boundary page register */
+#define R_TDR 0xc3 /* read time domain reflectometry reg */
+
+/* STATUS Register */
+#define EEDI 0x80 /* EEPM DO pin */
+#define TXSUC 0x40 /* tx success */
+#define T16 0x20 /* tx fail 16 times */
+#define TS1 0x40 /* 0=Tx success, 1=T16 */
+#define TS0 0x20 /* 0=Tx success, 1=T16 */
+#define RXGOOD 0x10 /* rx a good packet */
+#define RXCRC 0x08 /* rx a CRC error packet */
+#define RXSHORT 0x04 /* rx a short packet */
+#define COLS 0x02 /* coaxial collision status */
+#define LNKS 0x01 /* UTP link status */
+
+/* Command Register */
+#define CLEAR 0x10 /* reset part of hardware */
+#define NOPER 0x08 /* No Operation */
+#define RNOP 0x08
+#define RRA 0x06 /* After RR then auto-advance NPR & BPR(=NPR-1) */
+#define RRN 0x04 /* Normal Remote Read mode */
+#define RW1 0x02 /* Remote Write tx buffer 1 ( page 6 - 11 ) */
+#define RW0 0x00 /* Remote Write tx buffer 0 ( page 0 - 5 ) */
+#define TXEN 0x01 /* 0->1 tx enable */
+
+/* System Configuration Register */
+#define TESTON 0x80 /* test host data transfer reliability */
+#define SLEEP 0x40 /* sleep mode */
+#if 0
+#define FASTMODE 0x04 /* fast mode for intel 82360SL fast mode */
+#define BYTEMODE 0x02 /* byte mode */
+#else
+#define FASTMODE 0x20 /* fast mode for intel 82360SL fast mode */
+#define BYTEMODE 0x10 /* byte mode */
+#endif
+#define NIBBLEMODE 0x00 /* nibble mode */
+#define IRQINV 0x08 /* turn off IRQ line inverter */
+#define IRQNML 0x00 /* turn on IRQ line inverter */
+#define INTON 0x04
+#define AUTOFFSET 0x02 /* auto shift address to TPR+12 */
+#define AUTOTX 0x01 /* auto tx when leave RW mode */
+
+/* Transceiver Configuration Register */
+#define JABBER 0x80 /* generate jabber condition */
+#define TXSUCINT 0x40 /* enable tx success interrupt */
+#define T16INT 0x20 /* enable T16 interrupt */
+#define RXERRPKT 0x10 /* accept CRC error or short packet */
+#define EXTERNALB2 0x0C /* external loopback 2 */
+#define EXTERNALB1 0x08 /* external loopback 1 */
+#define INTERNALB 0x04 /* internal loopback */
+#define NMLOPERATE 0x00 /* normal operation */
+#define RXPBM 0x03 /* rx physical, broadcast, multicast */
+#define RXPB 0x02 /* rx physical, broadcast */
+#define RXALL 0x01 /* rx all packet */
+#define RXOFF 0x00 /* rx disable */
diff --git a/i386/i386at/gpl/linux/net/depca.c b/i386/i386at/gpl/linux/net/depca.c
new file mode 100644
index 00000000..ae9a8ca3
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/depca.c
@@ -0,0 +1,1901 @@
+/* depca.c: A DIGITAL DEPCA & EtherWORKS ethernet driver for linux.
+
+ Written 1994, 1995 by David C. Davies.
+
+
+ Copyright 1994 David C. Davies
+ and
+ United States Government
+ (as represented by the Director, National Security Agency).
+
+ Copyright 1995 Digital Equipment Corporation.
+
+
+ This software may be used and distributed according to the terms of
+ the GNU Public License, incorporated herein by reference.
+
+ This driver is written for the Digital Equipment Corporation series
+ of DEPCA and EtherWORKS ethernet cards:
+
+ DEPCA (the original)
+ DE100
+ DE101
+ DE200 Turbo
+ DE201 Turbo
+ DE202 Turbo (TP BNC)
+ DE210
+ DE422 (EISA)
+
+ The driver has been tested on DE100, DE200 and DE202 cards in a
+ relatively busy network. The DE422 has been tested a little.
+
+ This driver will NOT work for the DE203, DE204 and DE205 series of
+ cards, since they have a new custom ASIC in place of the AMD LANCE
+ chip. See the 'ewrk3.c' driver in the Linux source tree for running
+ those cards.
+
+ I have benchmarked the driver with a DE100 at 595kB/s to (542kB/s from)
+ a DECstation 5000/200.
+
+ The author may be reached at davies@wanton.lkg.dec.com or
+ davies@maniac.ultranet.com or Digital Equipment Corporation, 550 King
+ Street, Littleton MA 01460.
+
+ =========================================================================
+
+ The driver was originally based on the 'lance.c' driver from Donald
+ Becker which is included with the standard driver distribution for
+ linux. V0.4 is a complete re-write with only the kernel interface
+ remaining from the original code.
+
+ 1) Lance.c code in /linux/drivers/net/
+ 2) "Ethernet/IEEE 802.3 Family. 1992 World Network Data Book/Handbook",
+ AMD, 1992 [(800) 222-9323].
+ 3) "Am79C90 CMOS Local Area Network Controller for Ethernet (C-LANCE)",
+ AMD, Pub. #17881, May 1993.
+ 4) "Am79C960 PCnet-ISA(tm), Single-Chip Ethernet Controller for ISA",
+ AMD, Pub. #16907, May 1992
+ 5) "DEC EtherWORKS LC Ethernet Controller Owners Manual",
+ Digital Equipment corporation, 1990, Pub. #EK-DE100-OM.003
+ 6) "DEC EtherWORKS Turbo Ethernet Controller Owners Manual",
+ Digital Equipment corporation, 1990, Pub. #EK-DE200-OM.003
+ 7) "DEPCA Hardware Reference Manual", Pub. #EK-DEPCA-PR
+ Digital Equipment Corporation, 1989
+ 8) "DEC EtherWORKS Turbo_(TP BNC) Ethernet Controller Owners Manual",
+ Digital Equipment corporation, 1991, Pub. #EK-DE202-OM.001
+
+
+ Peter Bauer's depca.c (V0.5) was referred to when debugging V0.1 of this
+ driver.
+
+ The original DEPCA card requires that the ethernet ROM address counter
+ be enabled to count and has an 8 bit NICSR. The ROM counter enabling is
+ only done when a 0x08 is read as the first address octet (to minimise
+ the chances of writing over some other hardware's I/O register). The
+ NICSR accesses have been changed to byte accesses for all the cards
+ supported by this driver, since there is only one useful bit in the MSB
+ (remote boot timeout) and it is not used. Also, there is a maximum of
+ only 48kB network RAM for this card. My thanks to Torbjorn Lindh for
+ help debugging all this (and holding my feet to the fire until I got it
+ right).
+
+ The DE200 series boards have on-board 64kB RAM for use as a shared
+ memory network buffer. Only the DE100 cards make use of a 2kB buffer
+ mode which has not been implemented in this driver (only the 32kB and
+ 64kB modes are supported [16kB/48kB for the original DEPCA]).
+
+ At the most only 2 DEPCA cards can be supported on the ISA bus because
+ there is only provision for two I/O base addresses on each card (0x300
+ and 0x200). The I/O address is detected by searching for a byte sequence
+ in the Ethernet station address PROM at the expected I/O address for the
+ Ethernet PROM. The shared memory base address is 'autoprobed' by
+ looking for the self test PROM and detecting the card name. When a
+ second DEPCA is detected, information is placed in the base_addr
+ variable of the next device structure (which is created if necessary),
+ thus enabling ethif_probe initialization for the device. More than 2
+ EISA cards can be supported, but care will be needed assigning the
+ shared memory to ensure that each slot has the correct IRQ, I/O address
+ and shared memory address assigned.
+
+ ************************************************************************
+
+ NOTE: If you are using two ISA DEPCAs, it is important that you assign
+ the base memory addresses correctly. The driver autoprobes I/O 0x300
+ then 0x200. The base memory address for the first device must be less
+ than that of the second so that the auto probe will correctly assign the
+ I/O and memory addresses on the same card. I can't think of a way to do
+ this unambiguously at the moment, since there is nothing on the cards to
+ tie I/O and memory information together.
+
+ I am unable to test 2 cards together for now, so this code is
+ unchecked. All reports, good or bad, are welcome.
+
+ ************************************************************************
+
+ The board IRQ setting must be at an unused IRQ which is auto-probed
+ using Donald Becker's autoprobe routines. DEPCA and DE100 board IRQs are
+ {2,3,4,5,7}, whereas the DE200 is at {5,9,10,11,15}. Note that IRQ2 is
+ really IRQ9 in machines with 16 IRQ lines.
+
+ No 16MB memory limitation should exist with this driver as DMA is not
+ used and the common memory area is in low memory on the network card (my
+ current system has 20MB and I've not had problems yet).
+
+ The ability to load this driver as a loadable module has been added. To
+ utilise this ability, you have to do <8 things:
+
+ 0) have a copy of the loadable modules code installed on your system.
+ 1) copy depca.c from the /linux/drivers/net directory to your favourite
+ temporary directory.
+ 2) if you wish, edit the source code near line 1530 to reflect the I/O
+ address and IRQ you're using (see also 5).
+ 3) compile depca.c, but include -DMODULE in the command line to ensure
+ that the correct bits are compiled (see end of source code).
+ 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
+ kernel with the depca configuration turned off and reboot.
+ 5) insmod depca.o [irq=7] [io=0x200] [mem=0xd0000] [adapter_name=DE100]
+ [Alan Cox: Changed the code to allow command line irq/io assignments]
+ [Dave Davies: Changed the code to allow command line mem/name
+ assignments]
+ 6) run the net startup bits for your eth?? interface manually
+ (usually /etc/rc.inet[12] at boot time).
+ 7) enjoy!
+
+ Note that autoprobing is not allowed in loadable modules - the system is
+ already up and running and you're messing with interrupts.
+
+ To unload a module, turn off the associated interface
+ 'ifconfig eth?? down' then 'rmmod depca'.
+
+ To assign a base memory address for the shared memory when running as a
+ loadable module, see 5 above. To include the adapter name (if you have
+ no PROM but know the card name) also see 5 above. Note that this last
+ option will not work with kernel built-in depca's.
+
+ The shared memory assignment for a loadable module makes sense to avoid
+ the 'memory autoprobe' picking the wrong shared memory (for the case of
+ 2 depca's in a PC).
+
+
+ TO DO:
+ ------
+
+
+ Revision History
+ ----------------
+
+ Version Date Description
+
+ 0.1 25-jan-94 Initial writing.
+ 0.2 27-jan-94 Added LANCE TX hardware buffer chaining.
+ 0.3 1-feb-94 Added multiple DEPCA support.
+ 0.31 4-feb-94 Added DE202 recognition.
+ 0.32 19-feb-94 Tidy up. Improve multi-DEPCA support.
+ 0.33 25-feb-94 Fix DEPCA ethernet ROM counter enable.
+ Add jabber packet fix from murf@perftech.com
+ and becker@super.org
+ 0.34 7-mar-94 Fix DEPCA max network memory RAM & NICSR access.
+ 0.35 8-mar-94 Added DE201 recognition. Tidied up.
+ 0.351 30-apr-94 Added EISA support. Added DE422 recognition.
+ 0.36 16-may-94 DE422 fix released.
+ 0.37 22-jul-94 Added MODULE support
+ 0.38 15-aug-94 Added DBR ROM switch in depca_close().
+ Multi DEPCA bug fix.
+ 0.38axp 15-sep-94 Special version for Alpha AXP Linux V1.0.
+ 0.381 12-dec-94 Added DE101 recognition, fix multicast bug.
+ 0.382 9-feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
+ 0.383 22-feb-95 Fix for conflict with VESA SCSI reported by
+ <stromain@alf.dec.com>
+ 0.384 17-mar-95 Fix a ring full bug reported by <bkm@star.rl.ac.uk>
+ 0.385 3-apr-95 Fix a recognition bug reported by
+ <ryan.niemi@lastfrontier.com>
+ 0.386 21-apr-95 Fix the last fix...sorry, must be galloping senility
+ 0.40 25-May-95 Rewrite for portability & updated.
+ ALPHA support from <jestabro@amt.tay1.dec.com>
+ 0.41 26-Jun-95 Added verify_area() calls in depca_ioctl() from
+ suggestion by <heiko@colossus.escape.de>
+ 0.42 27-Dec-95 Add 'mem' shared memory assigment for loadable
+ modules.
+ Add 'adapter_name' for loadable modules when no PROM.
+ Both above from a suggestion by
+ <pchen@woodruffs121.residence.gatech.edu>.
+ Add new multicasting code.
+
+ =========================================================================
+*/
+
+static const char *version = "depca.c:v0.42 95/12/27 davies@wanton.lkg.dec.com\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/segment.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+
+#include "depca.h"
+
+#ifdef DEPCA_DEBUG
+static int depca_debug = DEPCA_DEBUG;
+#else
+static int depca_debug = 1;
+#endif
+
+#define DEPCA_NDA 0xffe0 /* No Device Address */
+
+/*
+** Ethernet PROM defines
+*/
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+/*
+** Set the number of Tx and Rx buffers. Ensure that the memory requested
+** here is <= to the amount of shared memory set up by the board switches.
+** The number of descriptors MUST BE A POWER OF 2.
+**
+** total_memory = NUM_RX_DESC*(8+RX_BUFF_SZ) + NUM_TX_DESC*(8+TX_BUFF_SZ)
+*/
+#define NUM_RX_DESC 8 /* Number of RX descriptors */
+#define NUM_TX_DESC 8 /* Number of TX descriptors */
+#define RX_BUFF_SZ 1536 /* Buffer size for each Rx buffer */
+#define TX_BUFF_SZ 1536 /* Buffer size for each Tx buffer */
+
+#define CRC_POLYNOMIAL_BE 0x04c11db7UL /* Ethernet CRC, big endian */
+#define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */
+
+/*
+** EISA bus defines
+*/
+#define DEPCA_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
+#define MAX_EISA_SLOTS 16
+#define EISA_SLOT_INC 0x1000
+
+/*
+** ISA Bus defines
+*/
+#define DEPCA_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0xe0000,0x00000}
+#define DEPCA_IO_PORTS {0x300, 0x200, 0}
+#define DEPCA_TOTAL_SIZE 0x10
+static short mem_chkd = 0;
+
+/*
+** Name <-> Adapter mapping
+*/
+#define DEPCA_SIGNATURE {"DEPCA",\
+ "DE100","DE101",\
+ "DE200","DE201","DE202",\
+ "DE210",\
+ "DE422",\
+ ""}
+static enum {DEPCA, de100, de101, de200, de201, de202, de210, de422, unknown} adapter;
+
+/*
+** Miscellaneous info...
+*/
+#define DEPCA_STRLEN 16
+#define MAX_NUM_DEPCAS 2
+
+/*
+** Memory Alignment. Each descriptor is 4 longwords long. To force a
+** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
+** DESC_ALIGN. ALIGN aligns the start address of the private memory area
+** and hence the RX descriptor ring's first entry.
+*/
+#define ALIGN4 ((u_long)4 - 1) /* 1 longword align */
+#define ALIGN8 ((u_long)8 - 1) /* 2 longword (quadword) align */
+#define ALIGN ALIGN8 /* Keep the LANCE happy... */
+
+/*
+** The DEPCA Rx and Tx ring descriptors.
+*/
+struct depca_rx_desc {
+ volatile s32 base;
+ s16 buf_length; /* This length is negative 2's complement! */
+ s16 msg_length; /* This length is "normal". */
+};
+
+struct depca_tx_desc {
+ volatile s32 base;
+ s16 length; /* This length is negative 2's complement! */
+ s16 misc; /* Errors and TDR info */
+};
+
+#define LA_MASK 0x0000ffff /* LANCE address mask for mapping network RAM
+ to LANCE memory address space */
+
+/*
+** The Lance initialization block, described in databook, in common memory.
+*/
+struct depca_init {
+ u16 mode; /* Mode register */
+ u8 phys_addr[ETH_ALEN]; /* Physical ethernet address */
+ u8 mcast_table[8]; /* Multicast Hash Table. */
+ u32 rx_ring; /* Rx ring base pointer & ring length */
+ u32 tx_ring; /* Tx ring base pointer & ring length */
+};
+
+#define DEPCA_PKT_STAT_SZ 16
+#define DEPCA_PKT_BIN_SZ 128 /* Should be >=100 unless you
+ increase DEPCA_PKT_STAT_SZ */
+struct depca_private {
+ char devname[DEPCA_STRLEN]; /* Device Product String */
+ char adapter_name[DEPCA_STRLEN];/* /proc/ioports string */
+ char adapter; /* Adapter type */
+ struct depca_rx_desc *rx_ring; /* Pointer to start of RX descriptor ring */
+ struct depca_tx_desc *tx_ring; /* Pointer to start of TX descriptor ring */
+ struct depca_init init_block;/* Shadow Initialization block */
+ char *rx_memcpy[NUM_RX_DESC]; /* CPU virt address of sh'd memory buffs */
+ char *tx_memcpy[NUM_TX_DESC]; /* CPU virt address of sh'd memory buffs */
+ u_long bus_offset; /* (E)ISA bus address offset vs LANCE */
+ u_long sh_mem; /* Physical start addr of shared mem area */
+ u_long dma_buffs; /* LANCE Rx and Tx buffers start address. */
+ int rx_new, tx_new; /* The next free ring entry */
+ int rx_old, tx_old; /* The ring entries to be free()ed. */
+ struct enet_statistics stats;
+ struct { /* Private stats counters */
+ u32 bins[DEPCA_PKT_STAT_SZ];
+ u32 unicast;
+ u32 multicast;
+ u32 broadcast;
+ u32 excessive_collisions;
+ u32 tx_underruns;
+ u32 excessive_underruns;
+ } pktStats;
+ int txRingMask; /* TX ring mask */
+ int rxRingMask; /* RX ring mask */
+ s32 rx_rlen; /* log2(rxRingMask+1) for the descriptors */
+ s32 tx_rlen; /* log2(txRingMask+1) for the descriptors */
+};
+
+/*
+** The transmit ring full condition is described by the tx_old and tx_new
+** pointers by:
+** tx_old = tx_new Empty ring
+** tx_old = tx_new+1 Full ring
+** tx_old+txRingMask = tx_new Full ring (wrapped condition)
+*/
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+lp->txRingMask-lp->tx_new:\
+ lp->tx_old -lp->tx_new-1)
+
+/*
+** Public Functions
+*/
+static int depca_open(struct device *dev);
+static int depca_start_xmit(struct sk_buff *skb, struct device *dev);
+static void depca_interrupt(int irq, struct pt_regs * regs);
+static int depca_close(struct device *dev);
+static int depca_ioctl(struct device *dev, struct ifreq *rq, int cmd);
+static struct enet_statistics *depca_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+/*
+** Private functions
+*/
+static int depca_hw_init(struct device *dev, u_long ioaddr);
+static void depca_init_ring(struct device *dev);
+static int depca_rx(struct device *dev);
+static int depca_tx(struct device *dev);
+
+static void LoadCSRs(struct device *dev);
+static int InitRestartDepca(struct device *dev);
+static void DepcaSignature(char *name, u_long paddr);
+static int DevicePresent(u_long ioaddr);
+static int get_hw_addr(struct device *dev);
+static int EISA_signature(char *name, s32 eisa_id);
+static void SetMulticastFilter(struct device *dev);
+static void isa_probe(struct device *dev, u_long iobase);
+static void eisa_probe(struct device *dev, u_long iobase);
+static struct device *alloc_device(struct device *dev, u_long iobase);
+static int load_packet(struct device *dev, struct sk_buff *skb);
+static void depca_dbg_open(struct device *dev);
+
+#ifdef MODULE
+int init_module(void);
+void cleanup_module(void);
+static int autoprobed = 1, loading_module = 1;
+# else
+static u_char de1xx_irq[] = {2,3,4,5,7,0};
+static u_char de2xx_irq[] = {5,9,10,11,15,0};
+static u_char de422_irq[] = {5,9,10,11,0};
+static u_char *depca_irq;
+static int autoprobed = 0, loading_module = 0;
+#endif /* MODULE */
+
+static char name[DEPCA_STRLEN];
+static int num_depcas = 0, num_eth = 0;
+static int mem=0; /* For loadable module assignment
+ use insmod mem=0x????? .... */
+static char *adapter_name = '\0'; /* If no PROM when loadable module
+ use insmod adapter_name=DE??? ...
+ */
+/*
+** Miscellaneous defines...
+*/
+#define STOP_DEPCA \
+ outw(CSR0, DEPCA_ADDR);\
+ outw(STOP, DEPCA_DATA)
+
+
+
+int depca_probe(struct device *dev)
+{
+ int tmp = num_depcas, status = -ENODEV;
+ u_long iobase = dev->base_addr;
+
+ if ((iobase == 0) && loading_module){
+ printk("Autoprobing is not supported when loading a module based driver.\n");
+ status = -EIO;
+ } else {
+ isa_probe(dev, iobase);
+ eisa_probe(dev, iobase);
+
+ if ((tmp == num_depcas) && (iobase != 0) && loading_module) {
+ printk("%s: depca_probe() cannot find device at 0x%04lx.\n", dev->name,
+ iobase);
+ }
+
+ /*
+ ** Walk the device list to check that at least one device
+ ** initialised OK
+ */
+ for (; (dev->priv == NULL) && (dev->next != NULL); dev = dev->next);
+
+ if (dev->priv) status = 0;
+ if (iobase == 0) autoprobed = 1;
+ }
+
+ return status;
+}
+
+static int
+depca_hw_init(struct device *dev, u_long ioaddr)
+{
+ struct depca_private *lp;
+ int i, j, offset, netRAM, mem_len, status=0;
+ s16 nicsr;
+ u_long mem_start=0, mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
+
+ STOP_DEPCA;
+
+ nicsr = inb(DEPCA_NICSR);
+ nicsr = ((nicsr & ~SHE & ~RBE & ~IEN) | IM);
+ outb(nicsr, DEPCA_NICSR);
+
+ if (inw(DEPCA_DATA) == STOP) {
+ if (mem == 0) {
+ for (; mem_base[mem_chkd]; mem_chkd++) {
+ mem_start = mem_base[mem_chkd];
+ DepcaSignature(name, mem_start);
+ if (*name != '\0') break;
+ }
+ } else {
+ mem_start = mem;
+ if (adapter_name) {
+ strcpy(name, adapter_name);
+ } else{
+ DepcaSignature(name, mem_start);
+ }
+ }
+
+ if ((*name != '\0') && mem_start) { /* found a DEPCA device */
+ dev->base_addr = ioaddr;
+
+ if ((ioaddr&0x0fff)==DEPCA_EISA_IO_PORTS) {/* EISA slot address */
+ printk("%s: %s at 0x%04lx (EISA slot %d)",
+ dev->name, name, ioaddr, (int)((ioaddr>>12)&0x0f));
+ } else { /* ISA port address */
+ printk("%s: %s at 0x%04lx", dev->name, name, ioaddr);
+ }
+
+ printk(", h/w address ");
+ status = get_hw_addr(dev);
+ for (i=0; i<ETH_ALEN - 1; i++) { /* get the ethernet address */
+ printk("%2.2x:", dev->dev_addr[i]);
+ }
+ printk("%2.2x", dev->dev_addr[i]);
+
+ if (status == 0) {
+ /* Set up the maximum amount of network RAM(kB) */
+ netRAM = ((adapter != DEPCA) ? 64 : 48);
+ if ((nicsr & _128KB) && (adapter == de422)) netRAM = 128;
+ offset = 0x0000;
+
+ /* Shared Memory Base Address */
+ if (nicsr & BUF) {
+ offset = 0x8000; /* 32kbyte RAM offset*/
+ nicsr &= ~BS; /* DEPCA RAM in top 32k */
+ netRAM -= 32;
+ }
+ mem_start += offset; /* (E)ISA start address */
+ if ((mem_len = (NUM_RX_DESC*(sizeof(struct depca_rx_desc)+RX_BUFF_SZ) +
+ NUM_TX_DESC*(sizeof(struct depca_tx_desc)+TX_BUFF_SZ) +
+ sizeof(struct depca_init))) <=
+ (netRAM<<10)) {
+ printk(",\n has %dkB RAM at 0x%.5lx", netRAM, mem_start);
+
+ /* Enable the shadow RAM. */
+ if (adapter != DEPCA) {
+ nicsr |= SHE;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ /* Define the device private memory */
+ dev->priv = (void *) kmalloc(sizeof(struct depca_private), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ lp = (struct depca_private *)dev->priv;
+ memset((char *)dev->priv, 0, sizeof(struct depca_private));
+ lp->adapter = adapter;
+ sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
+ request_region(ioaddr, DEPCA_TOTAL_SIZE, lp->adapter_name);
+
+ /* Initialisation Block */
+ lp->sh_mem = mem_start;
+ mem_start += sizeof(struct depca_init);
+
+ /* Tx & Rx descriptors (aligned to a quadword boundary) */
+ mem_start = (mem_start + ALIGN) & ~ALIGN;
+ lp->rx_ring = (struct depca_rx_desc *)mem_start;
+
+ mem_start += (sizeof(struct depca_rx_desc) * NUM_RX_DESC);
+ lp->tx_ring = (struct depca_tx_desc *)mem_start;
+
+ mem_start += (sizeof(struct depca_tx_desc) * NUM_TX_DESC);
+ lp->bus_offset = mem_start & 0x00ff0000;
+ mem_start &= LA_MASK; /* LANCE re-mapped start address */
+
+ lp->dma_buffs = mem_start;
+
+ /* Finish initialising the ring information. */
+ lp->rxRingMask = NUM_RX_DESC - 1;
+ lp->txRingMask = NUM_TX_DESC - 1;
+
+ /* Calculate Tx/Rx RLEN size for the descriptors. */
+ for (i=0, j = lp->rxRingMask; j>0; i++) {
+ j >>= 1;
+ }
+ lp->rx_rlen = (s32)(i << 29);
+ for (i=0, j = lp->txRingMask; j>0; i++) {
+ j >>= 1;
+ }
+ lp->tx_rlen = (s32)(i << 29);
+
+ /* Load the initialisation block */
+ depca_init_ring(dev);
+
+ /* Initialise the control and status registers */
+ LoadCSRs(dev);
+
+ /* Enable DEPCA board interrupts for autoprobing */
+ nicsr = ((nicsr & ~IM)|IEN);
+ outb(nicsr, DEPCA_NICSR);
+
+ /* To auto-IRQ we enable the initialization-done and DMA err,
+ interrupts. For now we will always get a DMA error. */
+ if (dev->irq < 2) {
+#ifndef MODULE
+ unsigned char irqnum;
+ autoirq_setup(0);
+
+ /* Assign the correct irq list */
+ switch (lp->adapter) {
+ case DEPCA:
+ case de100:
+ case de101:
+ depca_irq = de1xx_irq;
+ break;
+ case de200:
+ case de201:
+ case de202:
+ case de210:
+ depca_irq = de2xx_irq;
+ break;
+ case de422:
+ depca_irq = de422_irq;
+ break;
+ }
+
+ /* Trigger an initialization just for the interrupt. */
+ outw(INEA | INIT, DEPCA_DATA);
+
+ irqnum = autoirq_report(1);
+ if (!irqnum) {
+ printk(" and failed to detect IRQ line.\n");
+ status = -ENXIO;
+ } else {
+ for (dev->irq=0,i=0; (depca_irq[i]) && (!dev->irq); i++) {
+ if (irqnum == depca_irq[i]) {
+ dev->irq = irqnum;
+ printk(" and uses IRQ%d.\n", dev->irq);
+ }
+ }
+
+ if (!dev->irq) {
+ printk(" but incorrect IRQ line detected.\n");
+ status = -ENXIO;
+ }
+ }
+#endif /* MODULE */
+ } else {
+ printk(" and assigned IRQ%d.\n", dev->irq);
+ }
+ if (status) release_region(ioaddr, DEPCA_TOTAL_SIZE);
+ } else {
+ printk(",\n requests %dkB RAM: only %dkB is available!\n",
+ (mem_len>>10), netRAM);
+ status = -ENXIO;
+ }
+ } else {
+ printk(" which has an Ethernet PROM CRC error.\n");
+ status = -ENXIO;
+ }
+ }
+ if (!status) {
+ if (depca_debug > 0) {
+ printk(version);
+ }
+
+ /* The DEPCA-specific entries in the device structure. */
+ dev->open = &depca_open;
+ dev->hard_start_xmit = &depca_start_xmit;
+ dev->stop = &depca_close;
+ dev->get_stats = &depca_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->do_ioctl = &depca_ioctl;
+
+ dev->mem_start = 0;
+
+ /* Fill in the generic field of the device structure. */
+ ether_setup(dev);
+ } else { /* Incorrectly initialised hardware */
+ if (dev->priv) {
+ kfree_s(dev->priv, sizeof(struct depca_private));
+ dev->priv = NULL;
+ }
+ }
+ } else {
+ status = -ENXIO;
+ }
+
+ return status;
+}
+
+
+static int
+depca_open(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ s16 nicsr;
+ int status = 0;
+
+ irq2dev_map[dev->irq] = dev;
+ STOP_DEPCA;
+ nicsr = inb(DEPCA_NICSR);
+
+ /* Make sure the shadow RAM is enabled */
+ if (adapter != DEPCA) {
+ nicsr |= SHE;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ /* Re-initialize the DEPCA... */
+ depca_init_ring(dev);
+ LoadCSRs(dev);
+
+ depca_dbg_open(dev);
+
+ if (request_irq(dev->irq, &depca_interrupt, 0, lp->adapter_name)) {
+ printk("depca_open(): Requested IRQ%d is busy\n",dev->irq);
+ status = -EAGAIN;
+ } else {
+
+ /* Enable DEPCA board interrupts and turn off LED */
+ nicsr = ((nicsr & ~IM & ~LED)|IEN);
+ outb(nicsr, DEPCA_NICSR);
+ outw(CSR0,DEPCA_ADDR);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ status = InitRestartDepca(dev);
+
+ if (depca_debug > 1){
+ printk("CSR0: 0x%4.4x\n",inw(DEPCA_DATA));
+ printk("nicsr: 0x%02x\n",inb(DEPCA_NICSR));
+ }
+ }
+
+ MOD_INC_USE_COUNT;
+
+ return status;
+}
+
+/* Initialize the lance Rx and Tx descriptor rings. */
+static void
+depca_init_ring(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_int i;
+ u_long p;
+
+ /* Lock out other processes whilst setting up the hardware */
+ set_bit(0, (void *)&dev->tbusy);
+
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ /* Initialize the base addresses and length of each buffer in the ring */
+ for (i = 0; i <= lp->rxRingMask; i++) {
+ writel((p=lp->dma_buffs+i*RX_BUFF_SZ) | R_OWN, &lp->rx_ring[i].base);
+ writew(-RX_BUFF_SZ, &lp->rx_ring[i].buf_length);
+ lp->rx_memcpy[i]=(char *)(p+lp->bus_offset);
+ }
+ for (i = 0; i <= lp->txRingMask; i++) {
+ writel((p=lp->dma_buffs+(i+lp->txRingMask+1)*TX_BUFF_SZ) & 0x00ffffff,
+ &lp->tx_ring[i].base);
+ lp->tx_memcpy[i]=(char *)(p+lp->bus_offset);
+ }
+
+ /* Set up the initialization block */
+ lp->init_block.rx_ring = ((u32)((u_long)lp->rx_ring)&LA_MASK) | lp->rx_rlen;
+ lp->init_block.tx_ring = ((u32)((u_long)lp->tx_ring)&LA_MASK) | lp->tx_rlen;
+
+ SetMulticastFilter(dev);
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ }
+
+ lp->init_block.mode = 0x0000; /* Enable the Tx and Rx */
+
+ return;
+}
+
+/*
+** Writes a socket buffer to TX descriptor ring and starts transmission
+*/
+static int
+depca_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ int status = 0;
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 1*HZ) {
+ status = -1;
+ } else {
+ printk("%s: transmit timed out, status %04x, resetting.\n",
+ dev->name, inw(DEPCA_DATA));
+
+ STOP_DEPCA;
+ depca_init_ring(dev);
+ LoadCSRs(dev);
+ dev->interrupt = UNMASK_INTERRUPTS;
+ dev->start = 1;
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ InitRestartDepca(dev);
+ }
+ return status;
+ } else if (skb == NULL) {
+ dev_tint(dev);
+ } else if (skb->len > 0) {
+ /* Enforce 1 process per h/w access */
+ if (set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ status = -1;
+ } else {
+ if (TX_BUFFS_AVAIL) { /* Fill in a Tx ring entry */
+ status = load_packet(dev, skb);
+
+ if (!status) {
+ /* Trigger an immediate send demand. */
+ outw(CSR0, DEPCA_ADDR);
+ outw(INEA | TDMD, DEPCA_DATA);
+
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb, FREE_WRITE);
+ }
+ if (TX_BUFFS_AVAIL) {
+ dev->tbusy=0;
+ }
+ } else {
+ status = -1;
+ }
+ }
+ }
+
+ return status;
+}
+
+/*
+** The DEPCA interrupt handler.
+*/
+static void
+depca_interrupt(int irq, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct depca_private *lp;
+ s16 csr0, nicsr;
+ u_long ioaddr;
+
+ if (dev == NULL) {
+ printk ("depca_interrupt(): irq %d for unknown device.\n", irq);
+ } else {
+ lp = (struct depca_private *)dev->priv;
+ ioaddr = dev->base_addr;
+
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = MASK_INTERRUPTS;
+
+ /* mask the DEPCA board interrupts and turn on the LED */
+ nicsr = inb(DEPCA_NICSR);
+ nicsr |= (IM|LED);
+ outb(nicsr, DEPCA_NICSR);
+
+ outw(CSR0, DEPCA_ADDR);
+ csr0 = inw(DEPCA_DATA);
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(csr0 & INTE, DEPCA_DATA);
+
+ if (csr0 & RINT) /* Rx interrupt (packet arrived) */
+ depca_rx(dev);
+
+ if (csr0 & TINT) /* Tx interrupt (packet sent) */
+ depca_tx(dev);
+
+ if ((TX_BUFFS_AVAIL >= 0) && dev->tbusy) { /* any resources available? */
+ dev->tbusy = 0; /* clear TX busy flag */
+ mark_bh(NET_BH);
+ }
+
+ /* Unmask the DEPCA board interrupts and turn off the LED */
+ nicsr = (nicsr & ~IM & ~LED);
+ outb(nicsr, DEPCA_NICSR);
+
+ dev->interrupt = UNMASK_INTERRUPTS;
+ }
+
+ return;
+}
+
+static int
+depca_rx(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ int i, entry;
+ s32 status;
+
+ for (entry=lp->rx_new;
+ !(readl(&lp->rx_ring[entry].base) & R_OWN);
+ entry=lp->rx_new){
+ status = readl(&lp->rx_ring[entry].base) >> 16 ;
+ if (status & R_STP) { /* Remember start of frame */
+ lp->rx_old = entry;
+ }
+ if (status & R_ENP) { /* Valid frame status */
+ if (status & R_ERR) { /* There was an error. */
+ lp->stats.rx_errors++; /* Update the error stats. */
+ if (status & R_FRAM) lp->stats.rx_frame_errors++;
+ if (status & R_OFLO) lp->stats.rx_over_errors++;
+ if (status & R_CRC) lp->stats.rx_crc_errors++;
+ if (status & R_BUFF) lp->stats.rx_fifo_errors++;
+ } else {
+ short len, pkt_len = readw(&lp->rx_ring[entry].msg_length);
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb != NULL) {
+ unsigned char *buf;
+ skb_reserve(skb,2); /* 16 byte align the IP header */
+ buf = skb_put(skb,pkt_len);
+ skb->dev = dev;
+ if (entry < lp->rx_old) { /* Wrapped buffer */
+ len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ;
+ memcpy_fromio(buf, lp->rx_memcpy[lp->rx_old], len);
+ memcpy_fromio(buf + len, lp->rx_memcpy[0], pkt_len-len);
+ } else { /* Linear buffer */
+ memcpy_fromio(buf, lp->rx_memcpy[lp->rx_old], pkt_len);
+ }
+
+ /*
+ ** Notify the upper protocol layers that there is another
+ ** packet to handle
+ */
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+
+ /*
+ ** Update stats
+ */
+ lp->stats.rx_packets++;
+ for (i=1; i<DEPCA_PKT_STAT_SZ-1; i++) {
+ if (pkt_len < (i*DEPCA_PKT_BIN_SZ)) {
+ lp->pktStats.bins[i]++;
+ i = DEPCA_PKT_STAT_SZ;
+ }
+ }
+ if (buf[0] & 0x01) { /* Multicast/Broadcast */
+ if ((*(s16 *)&buf[0] == -1) &&
+ (*(s16 *)&buf[2] == -1) &&
+ (*(s16 *)&buf[4] == -1)) {
+ lp->pktStats.broadcast++;
+ } else {
+ lp->pktStats.multicast++;
+ }
+ } else if ((*(s16 *)&buf[0] == *(s16 *)&dev->dev_addr[0]) &&
+ (*(s16 *)&buf[2] == *(s16 *)&dev->dev_addr[2]) &&
+ (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
+ lp->pktStats.unicast++;
+ }
+
+ lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
+ if (lp->pktStats.bins[0] == 0) { /* Reset counters */
+ memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
+ }
+ } else {
+ printk("%s: Memory squeeze, deferring packet.\n", dev->name);
+ lp->stats.rx_dropped++; /* Really, deferred. */
+ break;
+ }
+ }
+ /* Change buffer ownership for this last frame, back to the adapter */
+ for (; lp->rx_old!=entry; lp->rx_old=(++lp->rx_old)&lp->rxRingMask) {
+ writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN,
+ &lp->rx_ring[lp->rx_old].base);
+ }
+ writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
+ }
+
+ /*
+ ** Update entry information
+ */
+ lp->rx_new = (++lp->rx_new) & lp->rxRingMask;
+ }
+
+ return 0;
+}
+
+/*
+** Buffer sent - check for buffer errors.
+*/
+static int
+depca_tx(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ int entry;
+ s32 status;
+ u_long ioaddr = dev->base_addr;
+
+ for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
+ status = readl(&lp->tx_ring[entry].base) >> 16 ;
+
+ if (status < 0) { /* Packet not yet sent! */
+ break;
+ } else if (status & T_ERR) { /* An error occured. */
+ status = readl(&lp->tx_ring[entry].misc);
+ lp->stats.tx_errors++;
+ if (status & TMD3_RTRY) lp->stats.tx_aborted_errors++;
+ if (status & TMD3_LCAR) lp->stats.tx_carrier_errors++;
+ if (status & TMD3_LCOL) lp->stats.tx_window_errors++;
+ if (status & TMD3_UFLO) lp->stats.tx_fifo_errors++;
+ if (status & (TMD3_BUFF | TMD3_UFLO)) {
+ /* Trigger an immediate send demand. */
+ outw(CSR0, DEPCA_ADDR);
+ outw(INEA | TDMD, DEPCA_DATA);
+ }
+ } else if (status & (T_MORE | T_ONE)) {
+ lp->stats.collisions++;
+ } else {
+ lp->stats.tx_packets++;
+ }
+
+ /* Update all the pointers */
+ lp->tx_old = (++lp->tx_old) & lp->txRingMask;
+ }
+
+ return 0;
+}
+
+static int
+depca_close(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ s16 nicsr;
+ u_long ioaddr = dev->base_addr;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ outw(CSR0, DEPCA_ADDR);
+
+ if (depca_debug > 1) {
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inw(DEPCA_DATA));
+ }
+
+ /*
+ ** We stop the DEPCA here -- it occasionally polls
+ ** memory if we don't.
+ */
+ outw(STOP, DEPCA_DATA);
+
+ /*
+ ** Give back the ROM in case the user wants to go to DOS
+ */
+ if (lp->adapter != DEPCA) {
+ nicsr = inb(DEPCA_NICSR);
+ nicsr &= ~SHE;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ /*
+ ** Free the associated irq
+ */
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = NULL;
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static void LoadCSRs(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+
+ outw(CSR1, DEPCA_ADDR); /* initialisation block address LSW */
+ outw((u16)(lp->sh_mem & LA_MASK), DEPCA_DATA);
+ outw(CSR2, DEPCA_ADDR); /* initialisation block address MSW */
+ outw((u16)((lp->sh_mem & LA_MASK) >> 16), DEPCA_DATA);
+ outw(CSR3, DEPCA_ADDR); /* ALE control */
+ outw(ACON, DEPCA_DATA);
+
+ outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */
+
+ return;
+}
+
+static int InitRestartDepca(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ int i, status=0;
+
+ /* Copy the shadow init_block to shared memory */
+ memcpy_toio((char *)lp->sh_mem, &lp->init_block, sizeof(struct depca_init));
+
+ outw(CSR0, DEPCA_ADDR); /* point back to CSR0 */
+ outw(INIT, DEPCA_DATA); /* initialize DEPCA */
+
+ /* wait for lance to complete initialisation */
+ for (i=0;(i<100) && !(inw(DEPCA_DATA) & IDON); i++);
+
+ if (i!=100) {
+ /* clear IDON by writing a "1", enable interrupts and start lance */
+ outw(IDON | INEA | STRT, DEPCA_DATA);
+ if (depca_debug > 2) {
+ printk("%s: DEPCA open after %d ticks, init block 0x%08lx csr0 %4.4x.\n",
+ dev->name, i, lp->sh_mem, inw(DEPCA_DATA));
+ }
+ } else {
+ printk("%s: DEPCA unopen after %d ticks, init block 0x%08lx csr0 %4.4x.\n",
+ dev->name, i, lp->sh_mem, inw(DEPCA_DATA));
+ status = -1;
+ }
+
+ return status;
+}
+
+static struct enet_statistics *
+depca_get_stats(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+
+ /* Null body since there is no framing error counter */
+
+ return &lp->stats;
+}
+
+/*
+** Set or clear the multicast filter for this adaptor.
+*/
+static void
+set_multicast_list(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+
+ if (irq2dev_map[dev->irq] != NULL) {
+ while(dev->tbusy); /* Stop ring access */
+ set_bit(0, (void*)&dev->tbusy);
+ while(lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous mode */
+ lp->init_block.mode |= PROM;
+ } else {
+ SetMulticastFilter(dev);
+ lp->init_block.mode &= ~PROM; /* Unset promiscuous mode */
+ }
+
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ dev->tbusy = 0; /* Unlock the TX ring */
+ }
+}
+
+/*
+** Calculate the hash code and update the logical address filter
+** from a list of ethernet multicast addresses.
+** Big endian crc one liner is mine, all mine, ha ha ha ha!
+** LANCE calculates its hash codes big endian.
+*/
+static void SetMulticastFilter(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ struct dev_mc_list *dmi=dev->mc_list;
+ char *addrs;
+ int i, j, bit, byte;
+ u16 hashcode;
+ s32 crc, poly = CRC_POLYNOMIAL_BE;
+
+ if (dev->flags & IFF_ALLMULTI) { /* Set all multicast bits */
+ for (i=0; i<(HASH_TABLE_LEN>>3); i++) {
+ lp->init_block.mcast_table[i] = (char)0xff;
+ }
+ } else {
+ for (i=0; i<(HASH_TABLE_LEN>>3); i++){ /* Clear the multicast table */
+ lp->init_block.mcast_table[i]=0;
+ }
+ /* Add multicast addresses */
+ for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
+ addrs=dmi->dmi_addr;
+ dmi=dmi->next;
+ if ((*addrs & 0x01) == 1) { /* multicast address? */
+ crc = 0xffffffff; /* init CRC for each address */
+ for (byte=0;byte<ETH_ALEN;byte++) {/* for each address byte */
+ /* process each address bit */
+ for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
+ crc = (crc << 1) ^ ((((crc<0?1:0) ^ bit) & 0x01) ? poly : 0);
+ }
+ }
+ hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
+ for (j=0;j<5;j++) { /* ... in reverse order. */
+ hashcode = (hashcode << 1) | ((crc>>=1) & 1);
+ }
+
+
+ byte = hashcode >> 3; /* bit[3-5] -> byte in filter */
+ bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
+ lp->init_block.mcast_table[byte] |= bit;
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** ISA bus I/O device probe
+*/
+static void isa_probe(struct device *dev, u_long ioaddr)
+{
+ int i = num_depcas, maxSlots;
+ s32 ports[] = DEPCA_IO_PORTS;
+
+ if (!ioaddr && autoprobed) return ; /* Been here before ! */
+ if (ioaddr > 0x400) return; /* EISA Address */
+ if (i >= MAX_NUM_DEPCAS) return; /* Too many ISA adapters */
+
+ if (ioaddr == 0) { /* Autoprobing */
+ maxSlots = MAX_NUM_DEPCAS;
+ } else { /* Probe a specific location */
+ ports[i] = ioaddr;
+ maxSlots = i + 1;
+ }
+
+ for (; (i<maxSlots) && (dev!=NULL) && ports[i]; i++) {
+ if (DevicePresent(ports[i]) == 0) {
+ if (check_region(ports[i], DEPCA_TOTAL_SIZE) == 0) {
+ if ((dev = alloc_device(dev, ports[i])) != NULL) {
+ if (depca_hw_init(dev, ports[i]) == 0) {
+ num_depcas++;
+ }
+ num_eth++;
+ }
+ } else if (autoprobed) {
+ printk("%s: region already allocated at 0x%04x.\n", dev->name,ports[i]);
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** EISA bus I/O device probe. Probe from slot 1 since slot 0 is usually
+** the motherboard. Upto 15 EISA devices are supported.
+*/
+static void eisa_probe(struct device *dev, u_long ioaddr)
+{
+ int i, maxSlots;
+ u_long iobase;
+ char name[DEPCA_STRLEN];
+
+ if (!ioaddr && autoprobed) return ; /* Been here before ! */
+ if ((ioaddr < 0x400) && (ioaddr > 0)) return; /* ISA Address */
+
+ if (ioaddr == 0) { /* Autoprobing */
+ iobase = EISA_SLOT_INC; /* Get the first slot address */
+ i = 1;
+ maxSlots = MAX_EISA_SLOTS;
+ } else { /* Probe a specific location */
+ iobase = ioaddr;
+ i = (ioaddr >> 12);
+ maxSlots = i + 1;
+ }
+ if ((iobase & 0x0fff) == 0) iobase += DEPCA_EISA_IO_PORTS;
+
+ for (; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
+ if (EISA_signature(name, EISA_ID)) {
+ if (DevicePresent(iobase) == 0) {
+ if (check_region(iobase, DEPCA_TOTAL_SIZE) == 0) {
+ if ((dev = alloc_device(dev, iobase)) != NULL) {
+ if (depca_hw_init(dev, iobase) == 0) {
+ num_depcas++;
+ }
+ num_eth++;
+ }
+ } else if (autoprobed) {
+ printk("%s: region already allocated at 0x%04lx.\n",dev->name,iobase);
+ }
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** Allocate the device by pointing to the next available space in the
+** device structure. Should one not be available, it is created.
+*/
+static struct device *alloc_device(struct device *dev, u_long iobase)
+{
+ int addAutoProbe = 0;
+ struct device *tmp = NULL, *ret;
+ int (*init)(struct device *) = NULL;
+
+ /*
+ ** Check the device structures for an end of list or unused device
+ */
+ if (!loading_module) {
+ while (dev->next != NULL) {
+ if ((dev->base_addr == DEPCA_NDA) || (dev->base_addr == 0)) break;
+ dev = dev->next; /* walk through eth device list */
+ num_eth++; /* increment eth device number */
+ }
+
+ /*
+ ** If an autoprobe is requested for another device, we must re-insert
+ ** the request later in the list. Remember the current information.
+ */
+ if ((dev->base_addr == 0) && (num_depcas > 0)) {
+ addAutoProbe++;
+ tmp = dev->next; /* point to the next device */
+ init = dev->init; /* remember the probe function */
+ }
+
+ /*
+ ** If at end of list and can't use current entry, malloc one up.
+ ** If memory could not be allocated, print an error message.
+ */
+ if ((dev->next == NULL) &&
+ !((dev->base_addr == DEPCA_NDA) || (dev->base_addr == 0))){
+ dev->next = (struct device *)kmalloc(sizeof(struct device) + 8,
+ GFP_KERNEL);
+
+ dev = dev->next; /* point to the new device */
+ if (dev == NULL) {
+ printk("eth%d: Device not initialised, insufficient memory\n",
+ num_eth);
+ } else {
+ /*
+ ** If the memory was allocated, point to the new memory area
+ ** and initialize it (name, I/O address, next device (NULL) and
+ ** initialisation probe routine).
+ */
+ dev->name = (char *)(dev + sizeof(struct device));
+ if (num_eth > 9999) {
+ sprintf(dev->name,"eth????"); /* New device name */
+ } else {
+ sprintf(dev->name,"eth%d", num_eth);/* New device name */
+ }
+ dev->base_addr = iobase; /* assign the io address */
+ dev->next = NULL; /* mark the end of list */
+ dev->init = &depca_probe; /* initialisation routine */
+ num_depcas++;
+ }
+ }
+ ret = dev; /* return current struct, or NULL */
+
+ /*
+ ** Now figure out what to do with the autoprobe that has to be inserted.
+ ** Firstly, search the (possibly altered) list for an empty space.
+ */
+ if (ret != NULL) {
+ if (addAutoProbe) {
+ for (;(tmp->next!=NULL) && (tmp->base_addr!=DEPCA_NDA); tmp=tmp->next);
+
+ /*
+ ** If no more device structures and can't use the current one, malloc
+ ** one up. If memory could not be allocated, print an error message.
+ */
+ if ((tmp->next == NULL) && !(tmp->base_addr == DEPCA_NDA)) {
+ tmp->next = (struct device *)kmalloc(sizeof(struct device) + 8,
+ GFP_KERNEL);
+ tmp = tmp->next; /* point to the new device */
+ if (tmp == NULL) {
+ printk("%s: Insufficient memory to extend the device list.\n",
+ dev->name);
+ } else {
+ /*
+ ** If the memory was allocated, point to the new memory area
+ ** and initialize it (name, I/O address, next device (NULL) and
+ ** initialisation probe routine).
+ */
+ tmp->name = (char *)(tmp + sizeof(struct device));
+ if (num_eth > 9999) {
+ sprintf(tmp->name,"eth????"); /* New device name */
+ } else {
+ sprintf(tmp->name,"eth%d", num_eth);/* New device name */
+ }
+ tmp->base_addr = 0; /* re-insert the io address */
+ tmp->next = NULL; /* mark the end of list */
+ tmp->init = init; /* initialisation routine */
+ }
+ } else { /* structure already exists */
+ tmp->base_addr = 0; /* re-insert the io address */
+ }
+ }
+ }
+ } else {
+ ret = dev;
+ }
+
+ return ret;
+}
+
+/*
+** Look for a particular board name in the on-board Remote Diagnostics
+** and Boot (readb) ROM. This will also give us a clue to the network RAM
+** base address.
+*/
+static void DepcaSignature(char *name, u_long paddr)
+{
+ u_int i,j,k;
+ const char *signatures[] = DEPCA_SIGNATURE;
+ char tmpstr[16];
+
+ for (i=0;i<16;i++) { /* copy the first 16 bytes of ROM to */
+ tmpstr[i] = readb(paddr+0xc000+i); /* a temporary string */
+ }
+
+ strcpy(name,"");
+ for (i=0;*signatures[i]!='\0' && *name=='\0';i++) {
+ for (j=0,k=0;j<16 && k<strlen(signatures[i]);j++) {
+ if (signatures[i][k] == tmpstr[j]) { /* track signature */
+ k++;
+ } else { /* lost signature; begin search again */
+ k=0;
+ }
+ }
+ if (k == strlen(signatures[i])) {
+ strcpy(name,signatures[i]);
+ }
+ }
+
+ adapter = i - 1;
+
+ return;
+}
+
+/*
+** Look for a special sequence in the Ethernet station address PROM that
+** is common across all DEPCA products. Note that the original DEPCA needs
+** its ROM address counter to be initialized and enabled. Only enable
+** if the first address octet is a 0x08 - this minimises the chances of
+** messing around with some other hardware, but it assumes that this DEPCA
+** card initialized itself correctly.
+**
+** Search the Ethernet address ROM for the signature. Since the ROM address
+** counter can start at an arbitrary point, the search must include the entire
+** probe sequence length plus the (length_of_the_signature - 1).
+** Stop the search IMMEDIATELY after the signature is found so that the
+** PROM address counter is correctly positioned at the start of the
+** ethernet address for later read out.
+*/
+static int DevicePresent(u_long ioaddr)
+{
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ } llsig;
+ char Sig[sizeof(u32) << 1];
+ } dev;
+ short sigLength=0;
+ s8 data;
+ s16 nicsr;
+ int i, j, status = 0;
+
+ data = inb(DEPCA_PROM); /* clear counter on DEPCA */
+ data = inb(DEPCA_PROM); /* read data */
+
+ if (data == 0x08) { /* Enable counter on DEPCA */
+ nicsr = inb(DEPCA_NICSR);
+ nicsr |= AAC;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ dev.llsig.a = ETH_PROM_SIG;
+ dev.llsig.b = ETH_PROM_SIG;
+ sigLength = sizeof(u32) << 1;
+
+ for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
+ data = inb(DEPCA_PROM);
+ if (dev.Sig[j] == data) { /* track signature */
+ j++;
+ } else { /* lost signature; begin search again */
+ if (data == dev.Sig[0]) { /* rare case.... */
+ j=1;
+ } else {
+ j=0;
+ }
+ }
+ }
+
+ if (j!=sigLength) {
+ status = -ENODEV; /* search failed */
+ }
+
+ return status;
+}
+
+/*
+** The DE100 and DE101 PROM accesses were made non-standard for some bizarre
+** reason: access the upper half of the PROM with x=0; access the lower half
+** with x=1.
+*/
+static int get_hw_addr(struct device *dev)
+{
+ u_long ioaddr = dev->base_addr;
+ int i, k, tmp, status = 0;
+ u_short j, x, chksum;
+
+ x = (((adapter == de100) || (adapter == de101)) ? 1 : 0);
+
+ for (i=0,k=0,j=0;j<3;j++) {
+ k <<= 1 ;
+ if (k > 0xffff) k-=0xffff;
+
+ k += (u_char) (tmp = inb(DEPCA_PROM + x));
+ dev->dev_addr[i++] = (u_char) tmp;
+ k += (u_short) ((tmp = inb(DEPCA_PROM + x)) << 8);
+ dev->dev_addr[i++] = (u_char) tmp;
+
+ if (k > 0xffff) k-=0xffff;
+ }
+ if (k == 0xffff) k=0;
+
+ chksum = (u_char) inb(DEPCA_PROM + x);
+ chksum |= (u_short) (inb(DEPCA_PROM + x) << 8);
+ if (k != chksum) status = -1;
+
+ return status;
+}
+
+/*
+** Load a packet into the shared memory
+*/
+static int load_packet(struct device *dev, struct sk_buff *skb)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ int i, entry, end, len, status = 0;
+
+ entry = lp->tx_new; /* Ring around buffer number. */
+ end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask;
+ if (!(readl(&lp->tx_ring[end].base) & T_OWN)) {/* Enough room? */
+ /*
+ ** Caution: the write order is important here... don't set up the
+ ** ownership rights until all the other information is in place.
+ */
+ if (end < entry) { /* wrapped buffer */
+ len = (lp->txRingMask - entry + 1) * TX_BUFF_SZ;
+ memcpy_toio(lp->tx_memcpy[entry], skb->data, len);
+ memcpy_toio(lp->tx_memcpy[0], skb->data + len, skb->len - len);
+ } else { /* linear buffer */
+ memcpy_toio(lp->tx_memcpy[entry], skb->data, skb->len);
+ }
+
+ /* set up the buffer descriptors */
+ len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+ for (i = entry; i != end; i = (++i) & lp->txRingMask) {
+ /* clean out flags */
+ writel(readl(&lp->tx_ring[i].base) & ~T_FLAGS, &lp->tx_ring[i].base);
+ writew(0x0000, &lp->tx_ring[i].misc); /* clears other error flags */
+ writew(-TX_BUFF_SZ, &lp->tx_ring[i].length);/* packet length in buffer */
+ len -= TX_BUFF_SZ;
+ }
+ /* clean out flags */
+ writel(readl(&lp->tx_ring[end].base) & ~T_FLAGS, &lp->tx_ring[end].base);
+ writew(0x0000, &lp->tx_ring[end].misc); /* clears other error flags */
+ writew(-len, &lp->tx_ring[end].length); /* packet length in last buff */
+
+ /* start of packet */
+ writel(readl(&lp->tx_ring[entry].base) | T_STP, &lp->tx_ring[entry].base);
+ /* end of packet */
+ writel(readl(&lp->tx_ring[end].base) | T_ENP, &lp->tx_ring[end].base);
+
+ for (i=end; i!=entry; --i) {
+ /* ownership of packet */
+ writel(readl(&lp->tx_ring[i].base) | T_OWN, &lp->tx_ring[i].base);
+ if (i == 0) i=lp->txRingMask+1;
+ }
+ writel(readl(&lp->tx_ring[entry].base) | T_OWN, &lp->tx_ring[entry].base);
+
+ lp->tx_new = (++end) & lp->txRingMask; /* update current pointers */
+ } else {
+ status = -1;
+ }
+
+ return status;
+}
+
+/*
+** Look for a particular board name in the EISA configuration space
+*/
+static int EISA_signature(char *name, s32 eisa_id)
+{
+ u_int i;
+ const char *signatures[] = DEPCA_SIGNATURE;
+ char ManCode[DEPCA_STRLEN];
+ union {
+ s32 ID;
+ char Id[4];
+ } Eisa;
+ int status = 0;
+
+ *name = '\0';
+ Eisa.ID = inl(eisa_id);
+
+ ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
+ ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
+ ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
+ ManCode[3]=(( Eisa.Id[2]&0x0f)+0x30);
+ ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
+ ManCode[5]='\0';
+
+ for (i=0;(*signatures[i] != '\0') && (*name == '\0');i++) {
+ if (strstr(ManCode, signatures[i]) != NULL) {
+ strcpy(name,ManCode);
+ status = 1;
+ }
+ }
+
+ return status;
+}
+
+static void depca_dbg_open(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ struct depca_init *p = (struct depca_init *)lp->sh_mem;
+ int i;
+
+ if (depca_debug > 1){
+ /* Copy the shadow init_block to shared memory */
+ memcpy_toio((char *)lp->sh_mem,&lp->init_block,sizeof(struct depca_init));
+
+ printk("%s: depca open with irq %d\n",dev->name,dev->irq);
+ printk("Descriptor head addresses:\n");
+ printk("\t0x%lx 0x%lx\n",(u_long)lp->rx_ring, (u_long)lp->tx_ring);
+ printk("Descriptor addresses:\nRX: ");
+ for (i=0;i<lp->rxRingMask;i++){
+ if (i < 3) {
+ printk("0x%8.8lx ", (long) &lp->rx_ring[i].base);
+ }
+ }
+ printk("...0x%8.8lx\n", (long) &lp->rx_ring[i].base);
+ printk("TX: ");
+ for (i=0;i<lp->txRingMask;i++){
+ if (i < 3) {
+ printk("0x%8.8lx ", (long) &lp->tx_ring[i].base);
+ }
+ }
+ printk("...0x%8.8lx\n", (long) &lp->tx_ring[i].base);
+ printk("\nDescriptor buffers:\nRX: ");
+ for (i=0;i<lp->rxRingMask;i++){
+ if (i < 3) {
+ printk("0x%8.8x ", (u32) readl(&lp->rx_ring[i].base));
+ }
+ }
+ printk("...0x%8.8x\n", (u32) readl(&lp->rx_ring[i].base));
+ printk("TX: ");
+ for (i=0;i<lp->txRingMask;i++){
+ if (i < 3) {
+ printk("0x%8.8x ", (u32) readl(&lp->tx_ring[i].base));
+ }
+ }
+ printk("...0x%8.8x\n", (u32) readl(&lp->tx_ring[i].base));
+ printk("Initialisation block at 0x%8.8lx\n",lp->sh_mem);
+ printk("\tmode: 0x%4.4x\n", (u16) readw(&p->mode));
+ printk("\tphysical address: ");
+ for (i=0;i<ETH_ALEN-1;i++){
+ printk("%2.2x:",(u_char)readb(&p->phys_addr[i]));
+ }
+ printk("%2.2x\n",(u_char)readb(&p->phys_addr[i]));
+ printk("\tmulticast hash table: ");
+ for (i=0;i<(HASH_TABLE_LEN >> 3)-1;i++){
+ printk("%2.2x:",(u_char)readb(&p->mcast_table[i]));
+ }
+ printk("%2.2x\n",(u_char)readb(&p->mcast_table[i]));
+ printk("\trx_ring at: 0x%8.8x\n", (u32) readl(&p->rx_ring));
+ printk("\ttx_ring at: 0x%8.8x\n", (u32) readl(&p->tx_ring));
+ printk("dma_buffs: 0x%8.8lx\n",lp->dma_buffs);
+ printk("Ring size:\nRX: %d Log2(rxRingMask): 0x%8.8x\n",
+ (int)lp->rxRingMask + 1,
+ lp->rx_rlen);
+ printk("TX: %d Log2(txRingMask): 0x%8.8x\n",
+ (int)lp->txRingMask + 1,
+ lp->tx_rlen);
+ outw(CSR2,DEPCA_ADDR);
+ printk("CSR2&1: 0x%4.4x",inw(DEPCA_DATA));
+ outw(CSR1,DEPCA_ADDR);
+ printk("%4.4x\n",inw(DEPCA_DATA));
+ outw(CSR3,DEPCA_ADDR);
+ printk("CSR3: 0x%4.4x\n",inw(DEPCA_DATA));
+ }
+
+ return;
+}
+
+/*
+** Perform IOCTL call functions here. Some are privileged operations and the
+** effective uid is checked in those cases.
+** All MCA IOCTLs will not work here and are for testing purposes only.
+*/
+static int depca_ioctl(struct device *dev, struct ifreq *rq, int cmd)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ struct depca_ioctl *ioc = (struct depca_ioctl *) &rq->ifr_data;
+ int i, status = 0;
+ u_long ioaddr = dev->base_addr;
+ union {
+ u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
+ u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
+ u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
+ } tmp;
+
+ switch(ioc->cmd) {
+ case DEPCA_GET_HWADDR: /* Get the hardware address */
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[i] = dev->dev_addr[i];
+ }
+ ioc->len = ETH_ALEN;
+ if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+ case DEPCA_SET_HWADDR: /* Set the hardware address */
+ if (suser()) {
+ if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN))) {
+ memcpy_fromfs(tmp.addr,ioc->data,ETH_ALEN);
+ for (i=0; i<ETH_ALEN; i++) {
+ dev->dev_addr[i] = tmp.addr[i];
+ }
+ while(dev->tbusy); /* Stop ring access */
+ set_bit(0, (void*)&dev->tbusy);
+ while(lp->tx_old != lp->tx_new);/* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ dev->tbusy = 0; /* Unlock the TX ring */
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_SET_PROM: /* Set Promiscuous Mode */
+ if (suser()) {
+ while(dev->tbusy); /* Stop ring access */
+ set_bit(0, (void*)&dev->tbusy);
+ while(lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+ lp->init_block.mode |= PROM; /* Set promiscuous mode */
+
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ dev->tbusy = 0; /* Unlock the TX ring */
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_CLR_PROM: /* Clear Promiscuous Mode */
+ if (suser()) {
+ while(dev->tbusy); /* Stop ring access */
+ set_bit(0, (void*)&dev->tbusy);
+ while(lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+ lp->init_block.mode &= ~PROM; /* Clear promiscuous mode */
+
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ dev->tbusy = 0; /* Unlock the TX ring */
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_SAY_BOO: /* Say "Boo!" to the kernel log file */
+ printk("%s: Boo!\n", dev->name);
+
+ break;
+ case DEPCA_GET_MCA: /* Get the multicast address table */
+ ioc->len = (HASH_TABLE_LEN >> 3);
+ if (!(status = verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, lp->init_block.mcast_table, ioc->len);
+ }
+
+ break;
+ case DEPCA_SET_MCA: /* Set a multicast address */
+ if (suser()) {
+ if (!(status=verify_area(VERIFY_READ, ioc->data, ETH_ALEN*ioc->len))) {
+ memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
+ set_multicast_list(dev);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_CLR_MCA: /* Clear all multicast addresses */
+ if (suser()) {
+ set_multicast_list(dev);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_MCA_EN: /* Enable pass all multicast addressing */
+ if (suser()) {
+ set_multicast_list(dev);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_GET_STATS: /* Get the driver statistics */
+ cli();
+ ioc->len = sizeof(lp->pktStats);
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, &lp->pktStats, ioc->len);
+ }
+ sti();
+
+ break;
+ case DEPCA_CLR_STATS: /* Zero out the driver statistics */
+ if (suser()) {
+ cli();
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ sti();
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_GET_REG: /* Get the DEPCA Registers */
+ i=0;
+ tmp.sval[i++] = inw(DEPCA_NICSR);
+ outw(CSR0, DEPCA_ADDR); /* status register */
+ tmp.sval[i++] = inw(DEPCA_DATA);
+ memcpy(&tmp.sval[i], &lp->init_block, sizeof(struct depca_init));
+ ioc->len = i+sizeof(struct depca_init);
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+ default:
+ status = -EOPNOTSUPP;
+ }
+
+ return status;
+}
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device thisDepca = {
+ devicename, /* device name is inserted by /linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0x200, 7, /* I/O address, IRQ */
+ 0, 0, 0, NULL, depca_probe };
+
+static int irq=7; /* EDIT THESE LINE FOR YOUR CONFIGURATION */
+static int io=0x200; /* Or use the irq= io= options to insmod */
+
+/* See depca_probe() for autoprobe messages when a module */
+int
+init_module(void)
+{
+ thisDepca.irq=irq;
+ thisDepca.base_addr=io;
+
+ if (register_netdev(&thisDepca) != 0)
+ return -EIO;
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ release_region(thisDepca.base_addr, DEPCA_TOTAL_SIZE);
+ if (thisDepca.priv) {
+ kfree(thisDepca.priv);
+ thisDepca.priv = NULL;
+ }
+ thisDepca.irq=0;
+
+ unregister_netdev(&thisDepca);
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * kernel-compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O2 -m486 -c depca.c"
+ *
+ * module-compile-command: "gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O2 -m486 -c depca.c"
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/depca.h b/i386/i386at/gpl/linux/net/depca.h
new file mode 100644
index 00000000..012f7399
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/depca.h
@@ -0,0 +1,185 @@
+/*
+ Written 1994 by David C. Davies.
+
+ Copyright 1994 David C. Davies. This software may be used and distributed
+ according to the terms of the GNU Public License, incorporated herein by
+ reference.
+*/
+
+/*
+** I/O addresses. Note that the 2k buffer option is not supported in
+** this driver.
+*/
+#define DEPCA_NICSR ioaddr+0x00 /* Network interface CSR */
+#define DEPCA_RBI ioaddr+0x02 /* RAM buffer index (2k buffer mode) */
+#define DEPCA_DATA ioaddr+0x04 /* LANCE registers' data port */
+#define DEPCA_ADDR ioaddr+0x06 /* LANCE registers' address port */
+#define DEPCA_HBASE ioaddr+0x08 /* EISA high memory base address reg. */
+#define DEPCA_PROM ioaddr+0x0c /* Ethernet address ROM data port */
+#define DEPCA_CNFG ioaddr+0x0c /* EISA Configuration port */
+#define DEPCA_RBSA ioaddr+0x0e /* RAM buffer starting address (2k buff.) */
+
+/*
+** These are LANCE registers addressable through DEPCA_ADDR
+*/
+#define CSR0 0
+#define CSR1 1
+#define CSR2 2
+#define CSR3 3
+
+/*
+** NETWORK INTERFACE CSR (NI_CSR) bit definitions
+*/
+
+#define TO 0x0100 /* Time Out for remote boot */
+#define SHE 0x0080 /* SHadow memory Enable */
+#define BS 0x0040 /* Bank Select */
+#define BUF 0x0020 /* BUFfer size (1->32k, 0->64k) */
+#define RBE 0x0010 /* Remote Boot Enable (1->net boot) */
+#define AAC 0x0008 /* Address ROM Address Counter (1->enable) */
+#define _128KB 0x0008 /* 128kB Network RAM (1->enable) */
+#define IM 0x0004 /* Interrupt Mask (1->mask) */
+#define IEN 0x0002 /* Interrupt tristate ENable (1->enable) */
+#define LED 0x0001 /* LED control */
+
+/*
+** Control and Status Register 0 (CSR0) bit definitions
+*/
+
+#define ERR 0x8000 /* Error summary */
+#define BABL 0x4000 /* Babble transmitter timeout error */
+#define CERR 0x2000 /* Collision Error */
+#define MISS 0x1000 /* Missed packet */
+#define MERR 0x0800 /* Memory Error */
+#define RINT 0x0400 /* Receiver Interrupt */
+#define TINT 0x0200 /* Transmit Interrupt */
+#define IDON 0x0100 /* Initialization Done */
+#define INTR 0x0080 /* Interrupt Flag */
+#define INEA 0x0040 /* Interrupt Enable */
+#define RXON 0x0020 /* Receiver on */
+#define TXON 0x0010 /* Transmitter on */
+#define TDMD 0x0008 /* Transmit Demand */
+#define STOP 0x0004 /* Stop */
+#define STRT 0x0002 /* Start */
+#define INIT 0x0001 /* Initialize */
+#define INTM 0xff00 /* Interrupt Mask */
+#define INTE 0xfff0 /* Interrupt Enable */
+
+/*
+** CONTROL AND STATUS REGISTER 3 (CSR3)
+*/
+
+#define BSWP 0x0004 /* Byte SWaP */
+#define ACON 0x0002 /* ALE control */
+#define BCON 0x0001 /* Byte CONtrol */
+
+/*
+** Initialization Block Mode Register
+*/
+
+#define PROM 0x8000 /* Promiscuous Mode */
+#define EMBA 0x0080 /* Enable Modified Back-off Algorithm */
+#define INTL 0x0040 /* Internal Loopback */
+#define DRTY 0x0020 /* Disable Retry */
+#define COLL 0x0010 /* Force Collision */
+#define DTCR 0x0008 /* Disable Transmit CRC */
+#define LOOP 0x0004 /* Loopback */
+#define DTX 0x0002 /* Disable the Transmitter */
+#define DRX 0x0001 /* Disable the Receiver */
+
+/*
+** Receive Message Descriptor 1 (RMD1) bit definitions.
+*/
+
+#define R_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
+#define R_ERR 0x4000 /* Error Summary */
+#define R_FRAM 0x2000 /* Framing Error */
+#define R_OFLO 0x1000 /* Overflow Error */
+#define R_CRC 0x0800 /* CRC Error */
+#define R_BUFF 0x0400 /* Buffer Error */
+#define R_STP 0x0200 /* Start of Packet */
+#define R_ENP 0x0100 /* End of Packet */
+
+/*
+** Transmit Message Descriptor 1 (TMD1) bit definitions.
+*/
+
+#define T_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
+#define T_ERR 0x4000 /* Error Summary */
+#define T_ADD_FCS 0x2000 /* More the 1 retry needed to Xmit */
+#define T_MORE 0x1000 /* >1 retry to transmit packet */
+#define T_ONE 0x0800 /* 1 try needed to transmit the packet */
+#define T_DEF 0x0400 /* Deferred */
+#define T_STP 0x02000000 /* Start of Packet */
+#define T_ENP 0x01000000 /* End of Packet */
+#define T_FLAGS 0xff000000 /* TX Flags Field */
+
+/*
+** Transmit Message Descriptor 3 (TMD3) bit definitions.
+*/
+
+#define TMD3_BUFF 0x8000 /* BUFFer error */
+#define TMD3_UFLO 0x4000 /* UnderFLOw error */
+#define TMD3_RES 0x2000 /* REServed */
+#define TMD3_LCOL 0x1000 /* Late COLlision */
+#define TMD3_LCAR 0x0800 /* Loss of CARrier */
+#define TMD3_RTRY 0x0400 /* ReTRY error */
+
+/*
+** EISA configuration Register (CNFG) bit definitions
+*/
+
+#define TIMEOUT 0x0100 /* 0:2.5 mins, 1: 30 secs */
+#define REMOTE 0x0080 /* Remote Boot Enable -> 1 */
+#define IRQ11 0x0040 /* Enable -> 1 */
+#define IRQ10 0x0020 /* Enable -> 1 */
+#define IRQ9 0x0010 /* Enable -> 1 */
+#define IRQ5 0x0008 /* Enable -> 1 */
+#define BUFF 0x0004 /* 0: 64kB or 128kB, 1: 32kB */
+#define PADR16 0x0002 /* RAM on 64kB boundary */
+#define PADR17 0x0001 /* RAM on 128kB boundary */
+
+/*
+** Miscellaneous
+*/
+#define HASH_TABLE_LEN 64 /* Bits */
+#define HASH_BITS 0x003f /* 6 LS bits */
+
+#define MASK_INTERRUPTS 1
+#define UNMASK_INTERRUPTS 0
+
+#define EISA_EN 0x0001 /* Enable EISA bus buffers */
+#define EISA_ID iobase+0x0080 /* ID long word for EISA card */
+#define EISA_CTRL iobase+0x0084 /* Control word for EISA card */
+
+/*
+** Include the IOCTL stuff
+*/
+#include <linux/sockios.h>
+
+#define DEPCAIOCTL SIOCDEVPRIVATE
+
+struct depca_ioctl {
+ unsigned short cmd; /* Command to run */
+ unsigned short len; /* Length of the data buffer */
+ unsigned char *data; /* Pointer to the data buffer */
+};
+
+/*
+** Recognised commands for the driver
+*/
+#define DEPCA_GET_HWADDR 0x01 /* Get the hardware address */
+#define DEPCA_SET_HWADDR 0x02 /* Get the hardware address */
+#define DEPCA_SET_PROM 0x03 /* Set Promiscuous Mode */
+#define DEPCA_CLR_PROM 0x04 /* Clear Promiscuous Mode */
+#define DEPCA_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
+#define DEPCA_GET_MCA 0x06 /* Get a multicast address */
+#define DEPCA_SET_MCA 0x07 /* Set a multicast address */
+#define DEPCA_CLR_MCA 0x08 /* Clear a multicast address */
+#define DEPCA_MCA_EN 0x09 /* Enable a multicast address group */
+#define DEPCA_GET_STATS 0x0a /* Get the driver statistics */
+#define DEPCA_CLR_STATS 0x0b /* Zero out the driver statistics */
+#define DEPCA_GET_REG 0x0c /* Get the Register contents */
+#define DEPCA_SET_REG 0x0d /* Set the Register contents */
+#define DEPCA_DUMP 0x0f /* Dump the DEPCA Status */
+
diff --git a/i386/i386at/gpl/linux/net/dev.c b/i386/i386at/gpl/linux/net/dev.c
new file mode 100644
index 00000000..69d576ff
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/dev.c
@@ -0,0 +1,1413 @@
+/*
+ * NET3 Protocol independent device support routines.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Derived from the non IP parts of dev.c 1.0.19
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ *
+ * Additional Authors:
+ * Florian la Roche <rzsfl@rz.uni-sb.de>
+ * Alan Cox <gw4pts@gw4pts.ampr.org>
+ * David Hinds <dhinds@allegro.stanford.edu>
+ *
+ * Changes:
+ * Alan Cox : device private ioctl copies fields back.
+ * Alan Cox : Transmit queue code does relevant stunts to
+ * keep the queue safe.
+ * Alan Cox : Fixed double lock.
+ * Alan Cox : Fixed promisc NULL pointer trap
+ * ???????? : Support the full private ioctl range
+ * Alan Cox : Moved ioctl permission check into drivers
+ * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
+ * Alan Cox : 100 backlog just doesn't cut it when
+ * you start doing multicast video 8)
+ * Alan Cox : Rewrote net_bh and list manager.
+ * Alan Cox : Fix ETH_P_ALL echoback lengths.
+ * Alan Cox : Took out transmit every packet pass
+ * Saved a few bytes in the ioctl handler
+ * Alan Cox : Network driver sets packet type before calling netif_rx. Saves
+ * a function call a packet.
+ * Alan Cox : Hashed net_bh()
+ * Richard Kooijman: Timestamp fixes.
+ * Alan Cox : Wrong field in SIOCGIFDSTADDR
+ * Alan Cox : Device lock protection.
+ * Alan Cox : Fixed nasty side effect of device close changes.
+ * Rudi Cilibrasi : Pass the right thing to set_mac_address()
+ * Dave Miller : 32bit quantity for the device lock to make it work out
+ * on a Sparc.
+ * Bjorn Ekwall : Added KERNELD hack.
+ * Alan Cox : Cleaned up the backlog initialise.
+ * Craig Metz : SIOCGIFCONF fix if space for under
+ * 1 device.
+ *
+ */
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/notifier.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/arp.h>
+#include <net/slhc.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#ifdef CONFIG_NET_ALIAS
+#include <linux/net_alias.h>
+#endif
+#ifdef CONFIG_KERNELD
+#include <linux/kerneld.h>
+#endif
+
+#ifndef MACH
+/*
+ * The list of packet types we will receive (as opposed to discard)
+ * and the routines to invoke.
+ */
+
+struct packet_type *ptype_base[16];
+struct packet_type *ptype_all = NULL; /* Taps */
+
+/*
+ * Device list lock
+ */
+
+int dev_lockct=0;
+
+/*
+ * Our notifier list
+ */
+
+struct notifier_block *netdev_chain=NULL;
+
+/*
+ * Device drivers call our routines to queue packets here. We empty the
+ * queue in the bottom half handler.
+ */
+
+static struct sk_buff_head backlog;
+
+/*
+ * We don't overdo the queue or we will thrash memory badly.
+ */
+
+static int backlog_size = 0;
+
+/*
+ * Return the lesser of the two values.
+ */
+
+static __inline__ unsigned long min(unsigned long a, unsigned long b)
+{
+ return (a < b)? a : b;
+}
+
+
+/******************************************************************************************
+
+ Protocol management and registration routines
+
+*******************************************************************************************/
+
+/*
+ * For efficiency
+ */
+
+static int dev_nit=0;
+
+/*
+ * Add a protocol ID to the list. Now that the input handler is
+ * smarter we can dispense with all the messy stuff that used to be
+ * here.
+ */
+
+void dev_add_pack(struct packet_type *pt)
+{
+ int hash;
+ if(pt->type==htons(ETH_P_ALL))
+ {
+ dev_nit++;
+ pt->next=ptype_all;
+ ptype_all=pt;
+ }
+ else
+ {
+ hash=ntohs(pt->type)&15;
+ pt->next = ptype_base[hash];
+ ptype_base[hash] = pt;
+ }
+}
+
+
+/*
+ * Remove a protocol ID from the list.
+ */
+
+void dev_remove_pack(struct packet_type *pt)
+{
+ struct packet_type **pt1;
+ if(pt->type==htons(ETH_P_ALL))
+ {
+ dev_nit--;
+ pt1=&ptype_all;
+ }
+ else
+ pt1=&ptype_base[ntohs(pt->type)&15];
+ for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
+ {
+ if(pt==(*pt1))
+ {
+ *pt1=pt->next;
+ return;
+ }
+ }
+ printk("dev_remove_pack: %p not found.\n", pt);
+}
+
+/*****************************************************************************************
+
+ Device Interface Subroutines
+
+******************************************************************************************/
+
+/*
+ * Find an interface by name.
+ */
+
+struct device *dev_get(const char *name)
+{
+ struct device *dev;
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if (strcmp(dev->name, name) == 0)
+ return(dev);
+ }
+ return NULL;
+}
+
+/*
+ * Find and possibly load an interface.
+ */
+
+#ifdef CONFIG_KERNELD
+
+extern __inline__ void dev_load(const char *name)
+{
+ char *sptr;
+
+ if(!dev_get(name)) {
+#ifdef CONFIG_NET_ALIAS
+ for (sptr=name ; *sptr ; sptr++) if(*sptr==':') break;
+ if (!(*sptr && *(sptr+1)))
+#endif
+ request_module(name);
+ }
+}
+
+#endif
+
+/*
+ * Prepare an interface for use.
+ */
+
+int dev_open(struct device *dev)
+{
+ int ret = 0;
+
+ /*
+ * Call device private open method
+ */
+ if (dev->open)
+ ret = dev->open(dev);
+
+ /*
+ * If it went open OK then set the flags
+ */
+
+ if (ret == 0)
+ {
+ dev->flags |= (IFF_UP | IFF_RUNNING);
+ /*
+ * Initialise multicasting status
+ */
+ dev_mc_upload(dev);
+ notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
+ }
+ return(ret);
+}
+
+
+/*
+ * Completely shutdown an interface.
+ */
+
+int dev_close(struct device *dev)
+{
+ int ct=0;
+
+ /*
+ * Call the device specific close. This cannot fail.
+ * Only if device is UP
+ */
+
+ if ((dev->flags & IFF_UP) && dev->stop)
+ dev->stop(dev);
+
+ /*
+ * Device is now down.
+ */
+
+ dev->flags&=~(IFF_UP|IFF_RUNNING);
+
+ /*
+ * Tell people we are going down
+ */
+ notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
+ /*
+ * Flush the multicast chain
+ */
+ dev_mc_discard(dev);
+ /*
+ * Blank the IP addresses
+ */
+ dev->pa_addr = 0;
+ dev->pa_dstaddr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ /*
+ * Purge any queued packets when we down the link
+ */
+ while(ct<DEV_NUMBUFFS)
+ {
+ struct sk_buff *skb;
+ while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
+ if(skb->free)
+ kfree_skb(skb,FREE_WRITE);
+ ct++;
+ }
+ return(0);
+}
+
+
+/*
+ * Device change register/unregister. These are not inline or static
+ * as we export them to the world.
+ */
+
+int register_netdevice_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_register(&netdev_chain, nb);
+}
+
+int unregister_netdevice_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_unregister(&netdev_chain,nb);
+}
+
+/*
+ * Send (or queue for sending) a packet.
+ *
+ * IMPORTANT: When this is called to resend frames. The caller MUST
+ * already have locked the sk_buff. Apart from that we do the
+ * rest of the magic.
+ */
+
+void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
+{
+ unsigned long flags;
+ struct sk_buff_head *list;
+ int retransmission = 0; /* used to say if the packet should go */
+ /* at the front or the back of the */
+ /* queue - front is a retransmit try */
+
+ if(pri>=0 && !skb_device_locked(skb))
+ skb_device_lock(skb); /* Shove a lock on the frame */
+#if CONFIG_SKB_CHECK
+ IS_SKB(skb);
+#endif
+ skb->dev = dev;
+
+ /*
+ * Negative priority is used to flag a frame that is being pulled from the
+ * queue front as a retransmit attempt. It therefore goes back on the queue
+ * start on a failure.
+ */
+
+ if (pri < 0)
+ {
+ pri = -pri-1;
+ retransmission = 1;
+ }
+
+#ifdef CONFIG_NET_DEBUG
+ if (pri >= DEV_NUMBUFFS)
+ {
+ printk("bad priority in dev_queue_xmit.\n");
+ pri = 1;
+ }
+#endif
+
+ /*
+ * If the address has not been resolved. Call the device header rebuilder.
+ * This can cover all protocols and technically not just ARP either.
+ */
+
+ if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
+ return;
+ }
+
+ /*
+ *
+ * If dev is an alias, switch to its main device.
+ * "arp" resolution has been made with alias device, so
+ * arp entries refer to alias, not main.
+ *
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev))
+ skb->dev = dev = net_alias_main_dev(dev);
+#endif
+ list = dev->buffs + pri;
+
+ save_flags(flags);
+ /* if this isn't a retransmission, use the first packet instead... */
+ if (!retransmission) {
+ if (skb_queue_len(list)) {
+ /* avoid overrunning the device queue.. */
+ if (skb_queue_len(list) > dev->tx_queue_len) {
+ dev_kfree_skb(skb, FREE_WRITE);
+ return;
+ }
+ cli();
+ skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */
+ __skb_queue_tail(list, skb);
+ skb = __skb_dequeue(list);
+ skb_device_lock(skb); /* New buffer needs locking down */
+ restore_flags(flags);
+ }
+
+ /* copy outgoing packets to any sniffer packet handlers */
+ if (dev_nit) {
+ struct packet_type *ptype;
+ skb->stamp=xtime;
+ for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
+ {
+ /* Never send packets back to the socket
+ * they originated from - MvS (miquels@drinkel.ow.org)
+ */
+ if ((ptype->dev == dev || !ptype->dev) &&
+ ((struct sock *)ptype->data != skb->sk))
+ {
+ struct sk_buff *skb2;
+ if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
+ break;
+ skb2->h.raw = skb2->data + dev->hard_header_len;
+ skb2->mac.raw = skb2->data;
+ ptype->func(skb2, skb->dev, ptype);
+ }
+ }
+ }
+ }
+ start_bh_atomic();
+ if (dev->hard_start_xmit(skb, dev) == 0) {
+ /*
+ * Packet is now solely the responsibility of the driver
+ */
+ end_bh_atomic();
+ return;
+ }
+ end_bh_atomic();
+
+ /*
+ * Transmission failed, put skb back into a list. Once on the list it's safe and
+ * no longer device locked (it can be freed safely from the device queue)
+ */
+ cli();
+ skb_device_unlock(skb);
+ __skb_queue_head(list,skb);
+ restore_flags(flags);
+}
+
+/*
+ * Receive a packet from a device driver and queue it for the upper
+ * (protocol) levels. It always succeeds. This is the recommended
+ * interface to use.
+ */
+
+void netif_rx(struct sk_buff *skb)
+{
+ static int dropping = 0;
+
+ /*
+ * Any received buffers are un-owned and should be discarded
+ * when freed. These will be updated later as the frames get
+ * owners.
+ */
+
+ skb->sk = NULL;
+ skb->free = 1;
+ if(skb->stamp.tv_sec==0)
+ skb->stamp = xtime;
+
+ /*
+ * Check that we aren't overdoing things.
+ */
+
+ if (!backlog_size)
+ dropping = 0;
+ else if (backlog_size > 300)
+ dropping = 1;
+
+ if (dropping)
+ {
+ kfree_skb(skb, FREE_READ);
+ return;
+ }
+
+ /*
+ * Add it to the "backlog" queue.
+ */
+#if CONFIG_SKB_CHECK
+ IS_SKB(skb);
+#endif
+ skb_queue_tail(&backlog,skb);
+ backlog_size++;
+
+ /*
+ * If any packet arrived, mark it for processing after the
+ * hardware interrupt returns.
+ */
+
+#ifdef CONFIG_NET_RUNONIRQ /* Dont enable yet, needs some driver mods */
+ net_bh();
+#else
+ mark_bh(NET_BH);
+#endif
+ return;
+}
+
+/*
+ * This routine causes all interfaces to try to send some data.
+ */
+
+void dev_transmit(void)
+{
+ struct device *dev;
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if (dev->flags != 0 && !dev->tbusy) {
+ /*
+ * Kick the device
+ */
+ dev_tint(dev);
+ }
+ }
+}
+
+
+/**********************************************************************************
+
+ Receive Queue Processor
+
+***********************************************************************************/
+
+/*
+ * This is a single non-reentrant routine which takes the received packet
+ * queue and throws it at the networking layers in the hope that something
+ * useful will emerge.
+ */
+
+volatile unsigned long in_bh = 0; /* Non-reentrant remember */
+
+int in_net_bh() /* Used by timer.c */
+{
+ return(in_bh==0?0:1);
+}
+
+/*
+ * When we are called the queue is ready to grab, the interrupts are
+ * on and hardware can interrupt and queue to the receive queue a we
+ * run with no problems.
+ * This is run as a bottom half after an interrupt handler that does
+ * mark_bh(NET_BH);
+ */
+
+void net_bh(void *tmp)
+{
+ struct sk_buff *skb;
+ struct packet_type *ptype;
+ struct packet_type *pt_prev;
+ unsigned short type;
+
+ /*
+ * Atomically check and mark our BUSY state.
+ */
+
+ if (set_bit(1, (void*)&in_bh))
+ return;
+
+ /*
+ * Can we send anything now? We want to clear the
+ * decks for any more sends that get done as we
+ * process the input. This also minimises the
+ * latency on a transmit interrupt bh.
+ */
+
+ dev_transmit();
+
+ /*
+ * Any data left to process. This may occur because a
+ * mark_bh() is done after we empty the queue including
+ * that from the device which does a mark_bh() just after
+ */
+
+ cli();
+
+ /*
+ * While the queue is not empty
+ */
+
+ while((skb=__skb_dequeue(&backlog))!=NULL)
+ {
+ /*
+ * We have a packet. Therefore the queue has shrunk
+ */
+ backlog_size--;
+
+ sti();
+
+ /*
+ * Bump the pointer to the next structure.
+ *
+ * On entry to the protocol layer. skb->data and
+ * skb->h.raw point to the MAC and encapsulated data
+ */
+
+ skb->h.raw = skb->data;
+
+ /*
+ * Fetch the packet protocol ID.
+ */
+
+ type = skb->protocol;
+
+ /*
+ * We got a packet ID. Now loop over the "known protocols"
+ * list. There are two lists. The ptype_all list of taps (normally empty)
+ * and the main protocol list which is hashed perfectly for normal protocols.
+ */
+ pt_prev = NULL;
+ for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
+ {
+ if(pt_prev)
+ {
+ struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
+ if(skb2)
+ pt_prev->func(skb2,skb->dev, pt_prev);
+ }
+ pt_prev=ptype;
+ }
+
+ for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
+ {
+ if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
+ {
+ /*
+ * We already have a match queued. Deliver
+ * to it and then remember the new match
+ */
+ if(pt_prev)
+ {
+ struct sk_buff *skb2;
+
+ skb2=skb_clone(skb, GFP_ATOMIC);
+
+ /*
+ * Kick the protocol handler. This should be fast
+ * and efficient code.
+ */
+
+ if(skb2)
+ pt_prev->func(skb2, skb->dev, pt_prev);
+ }
+ /* Remember the current last to do */
+ pt_prev=ptype;
+ }
+ } /* End of protocol list loop */
+
+ /*
+ * Is there a last item to send to ?
+ */
+
+ if(pt_prev)
+ pt_prev->func(skb, skb->dev, pt_prev);
+ /*
+ * Has an unknown packet has been received ?
+ */
+
+ else
+ kfree_skb(skb, FREE_WRITE);
+
+ /*
+ * Again, see if we can transmit anything now.
+ * [Ought to take this out judging by tests it slows
+ * us down not speeds us up]
+ */
+#ifdef XMIT_EVERY
+ dev_transmit();
+#endif
+ cli();
+ } /* End of queue loop */
+
+ /*
+ * We have emptied the queue
+ */
+
+ in_bh = 0;
+ sti();
+
+ /*
+ * One last output flush.
+ */
+
+#ifdef XMIT_AFTER
+ dev_transmit();
+#endif
+}
+
+
+/*
+ * This routine is called when an device driver (i.e. an
+ * interface) is ready to transmit a packet.
+ */
+
+void dev_tint(struct device *dev)
+{
+ int i;
+ unsigned long flags;
+ struct sk_buff_head * head;
+
+ /*
+ * aliases do not transmit (for now :) )
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev)) return;
+#endif
+ head = dev->buffs;
+ save_flags(flags);
+ cli();
+
+ /*
+ * Work the queues in priority order
+ */
+ for(i = 0;i < DEV_NUMBUFFS; i++,head++)
+ {
+ struct sk_buff *skb = skb_peek(head);
+
+ if (skb) {
+ __skb_unlink(skb, head);
+ /*
+ * Stop anyone freeing the buffer while we retransmit it
+ */
+ skb_device_lock(skb);
+ restore_flags(flags);
+ /*
+ * Feed them to the output stage and if it fails
+ * indicate they re-queue at the front.
+ */
+ dev_queue_xmit(skb,dev,-i - 1);
+ /*
+ * If we can take no more then stop here.
+ */
+ if (dev->tbusy)
+ return;
+ cli();
+ }
+ }
+ restore_flags(flags);
+}
+
+
+/*
+ * Perform a SIOCGIFCONF call. This structure will change
+ * size shortly, and there is nothing I can do about it.
+ * Thus we will need a 'compatibility mode'.
+ */
+
+static int dev_ifconf(char *arg)
+{
+ struct ifconf ifc;
+ struct ifreq ifr;
+ struct device *dev;
+ char *pos;
+ int len;
+ int err;
+
+ /*
+ * Fetch the caller's info block.
+ */
+
+ err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
+ if(err)
+ return err;
+ memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
+ len = ifc.ifc_len;
+ pos = ifc.ifc_buf;
+
+ /*
+ * We now walk the device list filling each active device
+ * into the array.
+ */
+
+ err=verify_area(VERIFY_WRITE,pos,len);
+ if(err)
+ return err;
+
+ /*
+ * Loop over the interfaces, and write an info block for each.
+ */
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if(!(dev->flags & IFF_UP)) /* Downed devices don't count */
+ continue;
+ /*
+ * Have we run out of space here ?
+ */
+
+ if (len < sizeof(struct ifreq))
+ break;
+
+ memset(&ifr, 0, sizeof(struct ifreq));
+ strcpy(ifr.ifr_name, dev->name);
+ (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
+ (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
+
+
+ /*
+ * Write this block to the caller's space.
+ */
+
+ memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
+ pos += sizeof(struct ifreq);
+ len -= sizeof(struct ifreq);
+ }
+
+ /*
+ * All done. Write the updated control block back to the caller.
+ */
+
+ ifc.ifc_len = (pos - ifc.ifc_buf);
+ ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
+ memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
+
+ /*
+ * Report how much was filled in
+ */
+
+ return(pos - arg);
+}
+
+
+/*
+ * This is invoked by the /proc filesystem handler to display a device
+ * in detail.
+ */
+
+#ifdef CONFIG_PROC_FS
+static int sprintf_stats(char *buffer, struct device *dev)
+{
+ struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
+ int size;
+
+ if (stats)
+ size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
+ dev->name,
+ stats->rx_packets, stats->rx_errors,
+ stats->rx_dropped + stats->rx_missed_errors,
+ stats->rx_fifo_errors,
+ stats->rx_length_errors + stats->rx_over_errors
+ + stats->rx_crc_errors + stats->rx_frame_errors,
+ stats->tx_packets, stats->tx_errors, stats->tx_dropped,
+ stats->tx_fifo_errors, stats->collisions,
+ stats->tx_carrier_errors + stats->tx_aborted_errors
+ + stats->tx_window_errors + stats->tx_heartbeat_errors);
+ else
+ size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
+
+ return size;
+}
+
+/*
+ * Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface
+ * to create /proc/net/dev
+ */
+
+int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
+{
+ int len=0;
+ off_t begin=0;
+ off_t pos=0;
+ int size;
+
+ struct device *dev;
+
+
+ size = sprintf(buffer, "Inter-| Receive | Transmit\n"
+ " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
+
+ pos+=size;
+ len+=size;
+
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ size = sprintf_stats(buffer+len, dev);
+ len+=size;
+ pos=begin+len;
+
+ if(pos<offset)
+ {
+ len=0;
+ begin=pos;
+ }
+ if(pos>offset+length)
+ break;
+ }
+
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len=length; /* Ending slop */
+ return len;
+}
+#endif /* CONFIG_PROC_FS */
+
+
+/*
+ * This checks bitmasks for the ioctl calls for devices.
+ */
+
+static inline int bad_mask(unsigned long mask, unsigned long addr)
+{
+ if (addr & (mask = ~mask))
+ return 1;
+ mask = ntohl(mask);
+ if (mask & (mask+1))
+ return 1;
+ return 0;
+}
+
+/*
+ * Perform the SIOCxIFxxx calls.
+ *
+ * The socket layer has seen an ioctl the address family thinks is
+ * for the device. At this point we get invoked to make a decision
+ */
+
+static int dev_ifsioc(void *arg, unsigned int getset)
+{
+ struct ifreq ifr;
+ struct device *dev;
+ int ret;
+
+ /*
+ * Fetch the caller's info block into kernel space
+ */
+
+ int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
+ if(err)
+ return err;
+
+ memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
+
+ /*
+ * See which interface the caller is talking about.
+ */
+
+ /*
+ *
+ * net_alias_dev_get(): dev_get() with added alias naming magic.
+ * only allow alias creation/deletion if (getset==SIOCSIFADDR)
+ *
+ */
+
+#ifdef CONFIG_KERNELD
+ dev_load(ifr.ifr_name);
+#endif
+
+#ifdef CONFIG_NET_ALIAS
+ if ((dev = net_alias_dev_get(ifr.ifr_name, getset == SIOCSIFADDR, &err, NULL, NULL)) == NULL)
+ return(err);
+#else
+ if ((dev = dev_get(ifr.ifr_name)) == NULL)
+ return(-ENODEV);
+#endif
+ switch(getset)
+ {
+ case SIOCGIFFLAGS: /* Get interface flags */
+ ifr.ifr_flags = dev->flags;
+ goto rarok;
+
+ case SIOCSIFFLAGS: /* Set interface flags */
+ {
+ int old_flags = dev->flags;
+
+ /*
+ * We are not allowed to potentially close/unload
+ * a device until we get this lock.
+ */
+
+ dev_lock_wait();
+
+ /*
+ * Set the flags on our device.
+ */
+
+ dev->flags = (ifr.ifr_flags & (
+ IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
+ IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
+ IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
+ | IFF_MULTICAST)) | (dev->flags & IFF_UP);
+ /*
+ * Load in the correct multicast list now the flags have changed.
+ */
+
+ dev_mc_upload(dev);
+
+ /*
+ * Have we downed the interface. We handle IFF_UP ourselves
+ * according to user attempts to set it, rather than blindly
+ * setting it.
+ */
+
+ if ((old_flags^ifr.ifr_flags)&IFF_UP) /* Bit is different ? */
+ {
+ if(old_flags&IFF_UP) /* Gone down */
+ ret=dev_close(dev);
+ else /* Come up */
+ {
+ ret=dev_open(dev);
+ if(ret<0)
+ dev->flags&=~IFF_UP; /* Open failed */
+ }
+ }
+ else
+ ret=0;
+ /*
+ * Load in the correct multicast list now the flags have changed.
+ */
+
+ dev_mc_upload(dev);
+ }
+ break;
+
+ case SIOCGIFADDR: /* Get interface address (and family) */
+ if(ifr.ifr_addr.sa_family==AF_UNSPEC)
+ {
+ memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
+ ifr.ifr_hwaddr.sa_family=dev->type;
+ goto rarok;
+ }
+ else
+ {
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_port = 0;
+ }
+ goto rarok;
+
+ case SIOCSIFADDR: /* Set interface address (and family) */
+
+ /*
+ * BSDism. SIOCSIFADDR family=AF_UNSPEC sets the
+ * physical address. We can cope with this now.
+ */
+
+ if(ifr.ifr_addr.sa_family==AF_UNSPEC)
+ {
+ if(dev->set_mac_address==NULL)
+ return -EOPNOTSUPP;
+ ret=dev->set_mac_address(dev,&ifr.ifr_addr);
+ }
+ else
+ {
+
+ /*
+ * if dev is an alias, must rehash to update
+ * address change
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev))
+ net_alias_dev_rehash(dev ,&ifr.ifr_addr);
+#endif
+ dev->pa_addr = (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_addr.s_addr;
+ dev->family = ifr.ifr_addr.sa_family;
+
+#ifdef CONFIG_INET
+ /* This is naughty. When net-032e comes out It wants moving into the net032
+ code not the kernel. Till then it can sit here (SIGH) */
+ dev->pa_mask = ip_get_mask(dev->pa_addr);
+#endif
+ dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFBRDADDR: /* Get the broadcast address */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFBRDADDR: /* Set the broadcast address */
+ dev->pa_brdaddr = (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_addr.s_addr;
+ ret = 0;
+ break;
+
+ case SIOCGIFDSTADDR: /* Get the destination address (for point-to-point links) */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFDSTADDR: /* Set the destination address (for point-to-point links) */
+ dev->pa_dstaddr = (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_addr.s_addr;
+ ret = 0;
+ break;
+
+ case SIOCGIFNETMASK: /* Get the netmask for the interface */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFNETMASK: /* Set the netmask for the interface */
+ {
+ unsigned long mask = (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_addr.s_addr;
+ ret = -EINVAL;
+ /*
+ * The mask we set must be legal.
+ */
+ if (bad_mask(mask,0))
+ break;
+ dev->pa_mask = mask;
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */
+
+ ifr.ifr_metric = dev->metric;
+ goto rarok;
+
+ case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */
+ dev->metric = ifr.ifr_metric;
+ ret=0;
+ break;
+
+ case SIOCGIFMTU: /* Get the MTU of a device */
+ ifr.ifr_mtu = dev->mtu;
+ goto rarok;
+
+ case SIOCSIFMTU: /* Set the MTU of a device */
+
+ /*
+ * MTU must be positive.
+ */
+
+ if(ifr.ifr_mtu<68)
+ return -EINVAL;
+ dev->mtu = ifr.ifr_mtu;
+ ret = 0;
+ break;
+
+ case SIOCGIFMEM: /* Get the per device memory space. We can add this but currently
+ do not support it */
+ ret = -EINVAL;
+ break;
+
+ case SIOCSIFMEM: /* Set the per device memory buffer space. Not applicable in our case */
+ ret = -EINVAL;
+ break;
+
+ case SIOCGIFHWADDR:
+ memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
+ ifr.ifr_hwaddr.sa_family=dev->type;
+ goto rarok;
+
+ case SIOCSIFHWADDR:
+ if(dev->set_mac_address==NULL)
+ return -EOPNOTSUPP;
+ if(ifr.ifr_hwaddr.sa_family!=dev->type)
+ return -EINVAL;
+ ret=dev->set_mac_address(dev,&ifr.ifr_hwaddr);
+ break;
+
+ case SIOCGIFMAP:
+ ifr.ifr_map.mem_start=dev->mem_start;
+ ifr.ifr_map.mem_end=dev->mem_end;
+ ifr.ifr_map.base_addr=dev->base_addr;
+ ifr.ifr_map.irq=dev->irq;
+ ifr.ifr_map.dma=dev->dma;
+ ifr.ifr_map.port=dev->if_port;
+ goto rarok;
+
+ case SIOCSIFMAP:
+ if(dev->set_config==NULL)
+ return -EOPNOTSUPP;
+ return dev->set_config(dev,&ifr.ifr_map);
+
+ case SIOCADDMULTI:
+ if(dev->set_multicast_list==NULL)
+ return -EINVAL;
+ if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
+ return -EINVAL;
+ dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
+ return 0;
+
+ case SIOCDELMULTI:
+ if(dev->set_multicast_list==NULL)
+ return -EINVAL;
+ if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
+ return -EINVAL;
+ dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
+ return 0;
+ /*
+ * Unknown or private ioctl
+ */
+
+ default:
+ if((getset >= SIOCDEVPRIVATE) &&
+ (getset <= (SIOCDEVPRIVATE + 15))) {
+ if(dev->do_ioctl==NULL)
+ return -EOPNOTSUPP;
+ ret=dev->do_ioctl(dev, &ifr, getset);
+ memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
+ break;
+ }
+
+ ret = -EINVAL;
+ }
+ return(ret);
+/*
+ * The load of calls that return an ifreq and ok (saves memory).
+ */
+rarok:
+ memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
+ return 0;
+}
+
+
+/*
+ * This function handles all "interface"-type I/O control requests. The actual
+ * 'doing' part of this is dev_ifsioc above.
+ */
+
+int dev_ioctl(unsigned int cmd, void *arg)
+{
+ switch(cmd)
+ {
+ case SIOCGIFCONF:
+ (void) dev_ifconf((char *) arg);
+ return 0;
+
+ /*
+ * Ioctl calls that can be done by all.
+ */
+
+ case SIOCGIFFLAGS:
+ case SIOCGIFADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCGIFNETMASK:
+ case SIOCGIFMETRIC:
+ case SIOCGIFMTU:
+ case SIOCGIFMEM:
+ case SIOCGIFHWADDR:
+ case SIOCSIFHWADDR:
+ case SIOCGIFSLAVE:
+ case SIOCGIFMAP:
+ return dev_ifsioc(arg, cmd);
+
+ /*
+ * Ioctl calls requiring the power of a superuser
+ */
+
+ case SIOCSIFFLAGS:
+ case SIOCSIFADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCSIFNETMASK:
+ case SIOCSIFMETRIC:
+ case SIOCSIFMTU:
+ case SIOCSIFMEM:
+ case SIOCSIFMAP:
+ case SIOCSIFSLAVE:
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (!suser())
+ return -EPERM;
+ return dev_ifsioc(arg, cmd);
+
+ case SIOCSIFLINK:
+ return -EINVAL;
+
+ /*
+ * Unknown or private ioctl.
+ */
+
+ default:
+ if((cmd >= SIOCDEVPRIVATE) &&
+ (cmd <= (SIOCDEVPRIVATE + 15))) {
+ return dev_ifsioc(arg, cmd);
+ }
+ return -EINVAL;
+ }
+}
+#endif /* ! MACH */
+
+/*
+ * Initialize the DEV module. At boot time this walks the device list and
+ * unhooks any devices that fail to initialise (normally hardware not
+ * present) and leaves us with a valid list of present and active devices.
+ *
+ */
+extern int lance_init(void);
+extern int pi_init(void);
+extern int dec21040_init(void);
+
+int net_dev_init(void)
+{
+ struct device *dev, **dp;
+
+#ifndef MACH
+ /*
+ * Initialise the packet receive queue.
+ */
+
+ skb_queue_head_init(&backlog);
+#endif
+
+ /*
+ * This is VeryUgly(tm).
+ *
+ * Some devices want to be initialized eary..
+ */
+#if defined(CONFIG_LANCE)
+ lance_init();
+#endif
+#if defined(CONFIG_PI)
+ pi_init();
+#endif
+#if defined(CONFIG_PT)
+ pt_init();
+#endif
+#if defined(CONFIG_DEC_ELCP)
+ dec21040_init();
+#endif
+ /*
+ * SLHC if present needs attaching so other people see it
+ * even if not opened.
+ */
+#if (defined(CONFIG_SLIP_COMPRESSED) || defined(CONFIG_PPP)) && defined(CONFIG_SLHC_BUILTIN)
+ slhc_install();
+#endif
+
+ /*
+ * Add the devices.
+ * If the call to dev->init fails, the dev is removed
+ * from the chain disconnecting the device until the
+ * next reboot.
+ */
+
+ dp = &dev_base;
+ while ((dev = *dp) != NULL)
+ {
+ int i;
+ for (i = 0; i < DEV_NUMBUFFS; i++) {
+ skb_queue_head_init(dev->buffs + i);
+ }
+
+ if (dev->init && dev->init(dev))
+ {
+ /*
+ * It failed to come up. Unhook it.
+ */
+ *dp = dev->next;
+ }
+ else
+ {
+ dp = &dev->next;
+ }
+ }
+
+#ifdef CONFIG_PROC_FS
+ proc_net_register(&(struct proc_dir_entry) {
+ PROC_NET_DEV, 3, "dev",
+ S_IFREG | S_IRUGO, 1, 0, 0,
+ 0, &proc_net_inode_operations,
+ dev_get_info
+ });
+#endif
+
+ /*
+ * Initialise net_alias engine
+ *
+ * - register net_alias device notifier
+ * - register proc entries: /proc/net/alias_types
+ * /proc/net/aliases
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ net_alias_init();
+#endif
+
+ bh_base[NET_BH].routine = net_bh;
+ enable_bh(NET_BH);
+ return 0;
+}
diff --git a/i386/i386at/gpl/linux/net/e2100.c b/i386/i386at/gpl/linux/net/e2100.c
new file mode 100644
index 00000000..fb0f1de6
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/e2100.c
@@ -0,0 +1,456 @@
+/* e2100.c: A Cabletron E2100 series ethernet driver for linux. */
+/*
+ Written 1993-1994 by Donald Becker.
+
+ Copyright 1994 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU Public License,
+ incorporated herein by reference.
+
+ This is a driver for the Cabletron E2100 series ethercards.
+
+ The Author may be reached as becker@cesdis.gsfc.nasa.gov, or
+ C/O Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ The E2100 series ethercard is a fairly generic shared memory 8390
+ implementation. The only unusual aspect is the way the shared memory
+ registers are set: first you do an inb() in what is normally the
+ station address region, and the low three bits of next outb() *address*
+ is used as the write value for that register. Either someone wasn't
+ too used to dem bit en bites, or they were trying to obfuscate the
+ programming interface.
+
+ There is an additional complication when setting the window on the packet
+ buffer. You must first do a read into the packet buffer region with the
+ low 8 address bits the address setting the page for the start of the packet
+ buffer window, and then do the above operation. See mem_on() for details.
+
+ One bug on the chip is that even a hard reset won't disable the memory
+ window, usually resulting in a hung machine if mem_off() isn't called.
+ If this happens, you must power down the machine for about 30 seconds.
+*/
+
+static const char *version =
+ "e2100.c:v1.01 7/21/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+static int e21_probe_list[] = {0x300, 0x280, 0x380, 0x220, 0};
+
+/* Offsets from the base_addr.
+ Read from the ASIC register, and the low three bits of the next outb()
+ address is used to set the corresponding register. */
+#define E21_NIC_OFFSET 0 /* Offset to the 8390 NIC. */
+#define E21_ASIC 0x10
+#define E21_MEM_ENABLE 0x10
+#define E21_MEM_ON 0x05 /* Enable memory in 16 bit mode. */
+#define E21_MEM_ON_8 0x07 /* Enable memory in 8 bit mode. */
+#define E21_MEM_BASE 0x11
+#define E21_IRQ_LOW 0x12 /* The low three bits of the IRQ number. */
+#define E21_IRQ_HIGH 0x14 /* The high IRQ bit and media select ... */
+#define E21_MEDIA 0x14 /* (alias). */
+#define E21_ALT_IFPORT 0x02 /* Set to use the other (BNC,AUI) port. */
+#define E21_BIG_MEM 0x04 /* Use a bigger (64K) buffer (we don't) */
+#define E21_SAPROM 0x10 /* Offset to station address data. */
+#define E21_IO_EXTENT 0x20
+
+extern inline void mem_on(short port, volatile char *mem_base,
+ unsigned char start_page )
+{
+ /* This is a little weird: set the shared memory window by doing a
+ read. The low address bits specify the starting page. */
+ mem_base[start_page];
+ inb(port + E21_MEM_ENABLE);
+ outb(E21_MEM_ON, port + E21_MEM_ENABLE + E21_MEM_ON);
+}
+
+extern inline void mem_off(short port)
+{
+ inb(port + E21_MEM_ENABLE);
+ outb(0x00, port + E21_MEM_ENABLE);
+}
+
+/* In other drivers I put the TX pages first, but the E2100 window circuitry
+ is designed to have a 4K Tx region last. The windowing circuitry wraps the
+ window at 0x2fff->0x0000 so that the packets at e.g. 0x2f00 in the RX ring
+ appear contiguously in the window. */
+#define E21_RX_START_PG 0x00 /* First page of RX buffer */
+#define E21_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
+#define E21_BIG_RX_STOP_PG 0xF0 /* Last page +1 of RX ring */
+#define E21_TX_START_PG E21_RX_STOP_PG /* First page of TX buffer */
+
+int e2100_probe(struct device *dev);
+int e21_probe1(struct device *dev, int ioaddr);
+
+static int e21_open(struct device *dev);
+static void e21_reset_8390(struct device *dev);
+static void e21_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void e21_block_output(struct device *dev, int count,
+ const unsigned char *buf, const start_page);
+static void e21_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+
+static int e21_close(struct device *dev);
+
+
+/* Probe for the E2100 series ethercards. These cards have an 8390 at the
+ base address and the station address at both offset 0x10 and 0x18. I read
+ the station address from offset 0x18 to avoid the dataport of NE2000
+ ethercards, and look for Ctron's unique ID (first three octets of the
+ station address).
+ */
+
+int e2100_probe(struct device *dev)
+{
+ int *port;
+ int base_addr = dev->base_addr;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return e21_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (port = e21_probe_list; *port; port++) {
+ if (check_region(*port, E21_IO_EXTENT))
+ continue;
+ if (e21_probe1(dev, *port) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+
+int e21_probe1(struct device *dev, int ioaddr)
+{
+ int i, status;
+ unsigned char *station_addr = dev->dev_addr;
+ static unsigned version_printed = 0;
+
+ /* First check the station address for the Ctron prefix. */
+ if (inb(ioaddr + E21_SAPROM + 0) != 0x00
+ || inb(ioaddr + E21_SAPROM + 1) != 0x00
+ || inb(ioaddr + E21_SAPROM + 2) != 0x1d)
+ return ENODEV;
+
+ /* Verify by making certain that there is a 8390 at there. */
+ outb(E8390_NODMA + E8390_STOP, ioaddr);
+ SLOW_DOWN_IO;
+ status = inb(ioaddr);
+ if (status != 0x21 && status != 0x23)
+ return ENODEV;
+
+ /* Read the station address PROM. */
+ for (i = 0; i < 6; i++)
+ station_addr[i] = inb(ioaddr + E21_SAPROM + i);
+
+ inb(ioaddr + E21_MEDIA); /* Point to media selection. */
+ outb(0, ioaddr + E21_ASIC); /* and disable the secondary interface. */
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("e2100.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ printk("%s: E21** at %#3x,", dev->name, ioaddr);
+ for (i = 0; i < 6; i++)
+ printk(" %02X", station_addr[i]);
+
+ if (dev->irq < 2) {
+ int irqlist[] = {15,11,10,12,5,9,3,4}, i;
+ for (i = 0; i < 8; i++)
+ if (request_irq (irqlist[i], NULL, 0, "bogus") != -EBUSY) {
+ dev->irq = irqlist[i];
+ break;
+ }
+ if (i >= 8) {
+ printk(" unable to get IRQ %d.\n", dev->irq);
+ return EAGAIN;
+ }
+ } else if (dev->irq == 2) /* Fixup luser bogosity: IRQ2 is really IRQ9 */
+ dev->irq = 9;
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (" unable to get memory for dev->priv.\n");
+ return -ENOMEM;
+ }
+
+ /* Grab the region so we can find a different board if IRQ select fails. */
+ request_region(ioaddr, E21_IO_EXTENT, "e2100");
+
+ /* The 8390 is at the base address. */
+ dev->base_addr = ioaddr;
+
+ ei_status.name = "E2100";
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = E21_TX_START_PG;
+ ei_status.rx_start_page = E21_RX_START_PG;
+ ei_status.stop_page = E21_RX_STOP_PG;
+ ei_status.saved_irq = dev->irq;
+
+ /* Check the media port used. The port can be passed in on the
+ low mem_end bits. */
+ if (dev->mem_end & 15)
+ dev->if_port = dev->mem_end & 7;
+ else {
+ dev->if_port = 0;
+ inb(ioaddr + E21_MEDIA); /* Turn automatic media detection on. */
+ for(i = 0; i < 6; i++)
+ if (station_addr[i] != inb(ioaddr + E21_SAPROM + 8 + i)) {
+ dev->if_port = 1;
+ break;
+ }
+ }
+
+ /* Never map in the E21 shared memory unless you are actively using it.
+ Also, the shared memory has effective only one setting -- spread all
+ over the 128K region! */
+ if (dev->mem_start == 0)
+ dev->mem_start = 0xd0000;
+
+#ifdef notdef
+ /* These values are unused. The E2100 has a 2K window into the packet
+ buffer. The window can be set to start on any page boundary. */
+ dev->rmem_start = dev->mem_start + TX_PAGES*256;
+ dev->mem_end = dev->rmem_end = dev->mem_start + 2*1024;
+#endif
+
+ printk(", IRQ %d, %s media, memory @ %#lx.\n", dev->irq,
+ dev->if_port ? "secondary" : "primary", dev->mem_start);
+
+ ei_status.reset_8390 = &e21_reset_8390;
+ ei_status.block_input = &e21_block_input;
+ ei_status.block_output = &e21_block_output;
+ ei_status.get_8390_hdr = &e21_get_8390_hdr;
+ dev->open = &e21_open;
+ dev->stop = &e21_close;
+ NS8390_init(dev, 0);
+
+ return 0;
+}
+
+static int
+e21_open(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ if (request_irq(dev->irq, ei_interrupt, 0, "e2100")) {
+ return EBUSY;
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ /* Set the interrupt line and memory base on the hardware. */
+ inb(ioaddr + E21_IRQ_LOW);
+ outb(0, ioaddr + E21_ASIC + (dev->irq & 7));
+ inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
+ outb(0, ioaddr + E21_ASIC + (dev->irq > 7 ? 1:0)
+ + (dev->if_port ? E21_ALT_IFPORT : 0));
+ inb(ioaddr + E21_MEM_BASE);
+ outb(0, ioaddr + E21_ASIC + ((dev->mem_start >> 17) & 7));
+
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static void
+e21_reset_8390(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ outb(0x01, ioaddr);
+ if (ei_debug > 1) printk("resetting the E2180x3 t=%ld...", jiffies);
+ ei_status.txing = 0;
+
+ /* Set up the ASIC registers, just in case something changed them. */
+
+ if (ei_debug > 1) printk("reset done\n");
+ return;
+}
+
+/* Grab the 8390 specific header. We put the 2k window so the header page
+ appears at the start of the shared memory. */
+
+static void
+e21_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ short ioaddr = dev->base_addr;
+ char *shared_mem = (char *)dev->mem_start;
+
+ mem_on(ioaddr, shared_mem, ring_page);
+
+#ifdef notdef
+ /* Officially this is what we are doing, but the readl() is faster */
+ memcpy_fromio(hdr, shared_mem, sizeof(struct e8390_pkt_hdr));
+#else
+ ((unsigned int*)hdr)[0] = readl(shared_mem);
+#endif
+
+ /* Turn off memory access: we would need to reprogram the window anyway. */
+ mem_off(ioaddr);
+
+}
+
+/* Block input and output are easy on shared memory ethercards.
+ The E21xx makes block_input() especially easy by wrapping the top
+ ring buffer to the bottom automatically. */
+static void
+e21_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ short ioaddr = dev->base_addr;
+ char *shared_mem = (char *)dev->mem_start;
+
+ mem_on(ioaddr, shared_mem, (ring_offset>>8));
+
+ /* Packet is always in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, dev->mem_start + (ring_offset & 0xff), count, 0);
+
+ mem_off(ioaddr);
+}
+
+static void
+e21_block_output(struct device *dev, int count, const unsigned char *buf,
+ int start_page)
+{
+ short ioaddr = dev->base_addr;
+ volatile char *shared_mem = (char *)dev->mem_start;
+
+ /* Set the shared memory window start by doing a read, with the low address
+ bits specifying the starting page. */
+ readb(shared_mem + start_page);
+ mem_on(ioaddr, shared_mem, start_page);
+
+ memcpy_toio(shared_mem, buf, count);
+ mem_off(ioaddr);
+}
+
+static int
+e21_close(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ free_irq(dev->irq);
+ dev->irq = ei_status.saved_irq;
+
+ /* Shut off the interrupt line and secondary interface. */
+ inb(ioaddr + E21_IRQ_LOW);
+ outb(0, ioaddr + E21_ASIC);
+ inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
+ outb(0, ioaddr + E21_ASIC);
+
+ irq2dev_map[dev->irq] = NULL;
+
+ ei_close(dev);
+
+ /* Double-check that the memory has been turned off, because really
+ really bad things happen if it isn't. */
+ mem_off(ioaddr);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry e21_drv =
+{"e21", e21_probe1, E21_IO_EXTENT, e21_probe_list};
+#endif
+
+
+#ifdef MODULE
+#define MAX_E21_CARDS 4 /* Max number of E21 cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_E21_CARDS] = { 0, };
+static struct device dev_e21[MAX_E21_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_E21_CARDS] = { 0, };
+static int irq[MAX_E21_CARDS] = { 0, };
+static int mem[MAX_E21_CARDS] = { 0, };
+static int xcvr[MAX_E21_CARDS] = { 0, }; /* choose int. or ext. xcvr */
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
+ struct device *dev = &dev_e21[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev];
+ dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */
+ dev->init = e2100_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "e2100.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "e2100.c: No E2100 card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
+ struct device *dev = &dev_e21[this_dev];
+ if (dev->priv != NULL) {
+ /* NB: e21_close() handles free_irq + irq2dev map */
+ kfree(dev->priv);
+ dev->priv = NULL;
+ release_region(dev->base_addr, E21_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c e2100.c"
+ * version-control: t
+ * tab-width: 4
+ * kept-new-versions: 5
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/eepro.c b/i386/i386at/gpl/linux/net/eepro.c
new file mode 100644
index 00000000..2aa2bd14
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/eepro.c
@@ -0,0 +1,1169 @@
+/* eepro.c: Intel EtherExpress Pro/10 device driver for Linux. */
+/*
+ Written 1994, 1995 by Bao C. Ha.
+
+ Copyright (C) 1994, 1995 by Bao C. Ha.
+
+ This software may be used and distributed
+ according to the terms of the GNU Public License,
+ incorporated herein by reference.
+
+ The author may be reached at bao@saigon.async.com
+ or 418 Hastings Place, Martinez, GA 30907.
+
+ Things remaining to do:
+ Better record keeping of errors.
+ Eliminate transmit interrupt to reduce overhead.
+ Implement "concurrent processing". I won't be doing it!
+ Allow changes to the partition of the transmit and receive
+ buffers, currently the ratio is 3:1 of receive to transmit
+ buffer ratio.
+
+ Bugs:
+
+ If you have a problem of not detecting the 82595 during a
+ reboot (warm reset), disable the FLASH memory should fix it.
+ This is a compatibility hardware problem.
+
+ Versions:
+
+ 0.07a Fix a stat report which counts every packet as a
+ heart-beat failure. (BCH, 6/3/95)
+
+ 0.07 Modified to support all other 82595-based lan cards.
+ The IRQ vector of the EtherExpress Pro will be set
+ according to the value saved in the EEPROM. For other
+ cards, I will do autoirq_request() to grab the next
+ available interrupt vector. (BCH, 3/17/95)
+
+ 0.06a,b Interim released. Minor changes in the comments and
+ print out format. (BCH, 3/9/95 and 3/14/95)
+
+ 0.06 First stable release that I am comfortable with. (BCH,
+ 3/2/95)
+
+ 0.05 Complete testing of multicast. (BCH, 2/23/95)
+
+ 0.04 Adding multicast support. (BCH, 2/14/95)
+
+ 0.03 First widely alpha release for public testing.
+ (BCH, 2/14/95)
+
+*/
+
+static const char *version =
+ "eepro.c: v0.07a 6/5/95 Bao C. Ha (bao@saigon.async.com)\n";
+
+#include <linux/module.h>
+
+/*
+ Sources:
+
+ This driver wouldn't have been written without the availability
+ of the Crynwr's Lan595 driver source code. It helps me to
+ familiarize with the 82595 chipset while waiting for the Intel
+ documentation. I also learned how to detect the 82595 using
+ the packet driver's technique.
+
+ This driver is written by cutting and pasting the skeleton.c driver
+ provided by Donald Becker. I also borrowed the EEPROM routine from
+ Donald Becker's 82586 driver.
+
+ Datasheet for the Intel 82595. It provides just enough info that
+ the casual reader might think that it documents the i82595.
+
+ The User Manual for the 82595. It provides a lot of the missing
+ information.
+
+*/
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+
+/* First, a few definitions that the brave might change. */
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int eepro_portlist[] =
+ { 0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0x360, 0};
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 2
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+/* The number of low I/O ports used by the ethercard. */
+#define EEPRO_IO_EXTENT 16
+
+/* Information that need to be kept for each board. */
+struct eepro_local {
+ struct enet_statistics stats;
+ unsigned rx_start;
+ unsigned tx_start; /* start of the transmit chain */
+ int tx_last; /* pointer to last packet in the transmit chain */
+ unsigned tx_end; /* end of the transmit chain (plus 1) */
+ int eepro; /* a flag, TRUE=1 for the EtherExpress Pro/10,
+ FALSE = 0 for other 82595-based lan cards. */
+};
+
+/* The station (ethernet) address prefix, used for IDing the board. */
+#define SA_ADDR0 0x00
+#define SA_ADDR1 0xaa
+#define SA_ADDR2 0x00
+
+/* Index to functions, as function prototypes. */
+
+extern int eepro_probe(struct device *dev);
+
+static int eepro_probe1(struct device *dev, short ioaddr);
+static int eepro_open(struct device *dev);
+static int eepro_send_packet(struct sk_buff *skb, struct device *dev);
+static void eepro_interrupt(int irq, struct pt_regs *regs);
+static void eepro_rx(struct device *dev);
+static void eepro_transmit_interrupt(struct device *dev);
+static int eepro_close(struct device *dev);
+static struct enet_statistics *eepro_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+static int read_eeprom(int ioaddr, int location);
+static void hardware_send_packet(struct device *dev, void *buf, short length);
+static int eepro_grab_irq(struct device *dev);
+
+/*
+ Details of the i82595.
+
+You will need either the datasheet or the user manual to understand what
+is going on here. The 82595 is very different from the 82586, 82593.
+
+The receive algorithm in eepro_rx() is just an implementation of the
+RCV ring structure that the Intel 82595 imposes at the hardware level.
+The receive buffer is set at 24K, and the transmit buffer is 8K. I
+am assuming that the total buffer memory is 32K, which is true for the
+Intel EtherExpress Pro/10. If it is less than that on a generic card,
+the driver will be broken.
+
+The transmit algorithm in the hardware_send_packet() is similar to the
+one in the eepro_rx(). The transmit buffer is a ring linked list.
+I just queue the next available packet to the end of the list. In my
+system, the 82595 is so fast that the list seems to always contain a
+single packet. In other systems with faster computers and more congested
+network traffics, the ring linked list should improve performance by
+allowing up to 8K worth of packets to be queued.
+
+*/
+#define RAM_SIZE 0x8000
+#define RCV_HEADER 8
+#define RCV_RAM 0x6000 /* 24KB for RCV buffer */
+#define RCV_LOWER_LIMIT 0x00 /* 0x0000 */
+#define RCV_UPPER_LIMIT ((RCV_RAM - 2) >> 8) /* 0x5ffe */
+#define XMT_RAM (RAM_SIZE - RCV_RAM) /* 8KB for XMT buffer */
+#define XMT_LOWER_LIMIT (RCV_RAM >> 8) /* 0x6000 */
+#define XMT_UPPER_LIMIT ((RAM_SIZE - 2) >> 8) /* 0x7ffe */
+#define XMT_HEADER 8
+
+#define RCV_DONE 0x0008
+#define RX_OK 0x2000
+#define RX_ERROR 0x0d81
+
+#define TX_DONE_BIT 0x0080
+#define CHAIN_BIT 0x8000
+#define XMT_STATUS 0x02
+#define XMT_CHAIN 0x04
+#define XMT_COUNT 0x06
+
+#define BANK0_SELECT 0x00
+#define BANK1_SELECT 0x40
+#define BANK2_SELECT 0x80
+
+/* Bank 0 registers */
+#define COMMAND_REG 0x00 /* Register 0 */
+#define MC_SETUP 0x03
+#define XMT_CMD 0x04
+#define DIAGNOSE_CMD 0x07
+#define RCV_ENABLE_CMD 0x08
+#define RCV_DISABLE_CMD 0x0a
+#define STOP_RCV_CMD 0x0b
+#define RESET_CMD 0x0e
+#define POWER_DOWN_CMD 0x18
+#define RESUME_XMT_CMD 0x1c
+#define SEL_RESET_CMD 0x1e
+#define STATUS_REG 0x01 /* Register 1 */
+#define RX_INT 0x02
+#define TX_INT 0x04
+#define EXEC_STATUS 0x30
+#define ID_REG 0x02 /* Register 2 */
+#define R_ROBIN_BITS 0xc0 /* round robin counter */
+#define ID_REG_MASK 0x2c
+#define ID_REG_SIG 0x24
+#define AUTO_ENABLE 0x10
+#define INT_MASK_REG 0x03 /* Register 3 */
+#define RX_STOP_MASK 0x01
+#define RX_MASK 0x02
+#define TX_MASK 0x04
+#define EXEC_MASK 0x08
+#define ALL_MASK 0x0f
+#define RCV_BAR 0x04 /* The following are word (16-bit) registers */
+#define RCV_STOP 0x06
+#define XMT_BAR 0x0a
+#define HOST_ADDRESS_REG 0x0c
+#define IO_PORT 0x0e
+
+/* Bank 1 registers */
+#define REG1 0x01
+#define WORD_WIDTH 0x02
+#define INT_ENABLE 0x80
+#define INT_NO_REG 0x02
+#define RCV_LOWER_LIMIT_REG 0x08
+#define RCV_UPPER_LIMIT_REG 0x09
+#define XMT_LOWER_LIMIT_REG 0x0a
+#define XMT_UPPER_LIMIT_REG 0x0b
+
+/* Bank 2 registers */
+#define XMT_Chain_Int 0x20 /* Interrupt at the end of the transmit chain */
+#define XMT_Chain_ErrStop 0x40 /* Interrupt at the end of the chain even if there are errors */
+#define RCV_Discard_BadFrame 0x80 /* Throw bad frames away, and continue to receive others */
+#define REG2 0x02
+#define PRMSC_Mode 0x01
+#define Multi_IA 0x20
+#define REG3 0x03
+#define TPE_BIT 0x04
+#define BNC_BIT 0x20
+
+#define I_ADD_REG0 0x04
+#define I_ADD_REG1 0x05
+#define I_ADD_REG2 0x06
+#define I_ADD_REG3 0x07
+#define I_ADD_REG4 0x08
+#define I_ADD_REG5 0x09
+
+#define EEPROM_REG 0x0a
+#define EESK 0x01
+#define EECS 0x02
+#define EEDI 0x04
+#define EEDO 0x08
+
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ */
+#ifdef HAVE_DEVLIST
+/* Support for a alternate probe manager, which will eliminate the
+ boilerplate below. */
+struct netdev_entry netcard_drv =
+{"eepro", eepro_probe1, EEPRO_IO_EXTENT, eepro_portlist};
+#else
+int
+eepro_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return eepro_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; eepro_portlist[i]; i++) {
+ int ioaddr = eepro_portlist[i];
+ if (check_region(ioaddr, EEPRO_IO_EXTENT))
+ continue;
+ if (eepro_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/* This is the real probe routine. Linux has a history of friendly device
+ probes on the ISA bus. A good device probes avoids doing writes, and
+ verifies that the correct device exists and functions. */
+
+int eepro_probe1(struct device *dev, short ioaddr)
+{
+ unsigned short station_addr[6], id, counter;
+ int i;
+ int eepro; /* a flag, TRUE=1 for the EtherExpress Pro/10,
+ FALSE = 0 for other 82595-based lan cards. */
+ const char *ifmap[] = {"AUI", "10Base2", "10BaseT"};
+ enum iftype { AUI=0, BNC=1, TPE=2 };
+
+ /* Now, we are going to check for the signature of the
+ ID_REG (register 2 of bank 0) */
+
+ if (((id=inb(ioaddr + ID_REG)) & ID_REG_MASK) == ID_REG_SIG) {
+
+ /* We seem to have the 82595 signature, let's
+ play with its counter (last 2 bits of
+ register 2 of bank 0) to be sure. */
+
+ counter = (id & R_ROBIN_BITS);
+ if (((id=inb(ioaddr+ID_REG)) & R_ROBIN_BITS) ==
+ (counter + 0x40)) {
+
+ /* Yes, the 82595 has been found */
+
+ /* Now, get the ethernet hardware address from
+ the EEPROM */
+
+ station_addr[0] = read_eeprom(ioaddr, 2);
+ station_addr[1] = read_eeprom(ioaddr, 3);
+ station_addr[2] = read_eeprom(ioaddr, 4);
+
+ /* Check the station address for the manufacturer's code */
+
+ if (station_addr[2] != 0x00aa || (station_addr[1] & 0xff00) != 0x0000) {
+ eepro = 0;
+ printk("%s: Intel 82595-based lan card at %#x,",
+ dev->name, ioaddr);
+ }
+ else {
+ eepro = 1;
+ printk("%s: Intel EtherExpress Pro/10 at %#x,",
+ dev->name, ioaddr);
+ }
+
+ /* Fill in the 'dev' fields. */
+ dev->base_addr = ioaddr;
+
+ for (i=0; i < 6; i++) {
+ dev->dev_addr[i] = ((unsigned char *) station_addr)[5-i];
+ printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
+ }
+
+ outb(BANK2_SELECT, ioaddr); /* be CAREFUL, BANK 2 now */
+ id = inb(ioaddr + REG3);
+ if (id & TPE_BIT)
+ dev->if_port = TPE;
+ else dev->if_port = BNC;
+
+ if (dev->irq < 2 && eepro) {
+ i = read_eeprom(ioaddr, 1);
+ switch (i & 0x07) {
+ case 0: dev->irq = 9; break;
+ case 1: dev->irq = 3; break;
+ case 2: dev->irq = 5; break;
+ case 3: dev->irq = 10; break;
+ case 4: dev->irq = 11; break;
+ default: /* should never get here !!!!! */
+ printk(" illegal interrupt vector stored in EEPROM.\n");
+ return ENODEV;
+ }
+ }
+ else if (dev->irq == 2)
+ dev->irq = 9;
+
+ if (dev->irq > 2) {
+ printk(", IRQ %d, %s.\n", dev->irq,
+ ifmap[dev->if_port]);
+ if (request_irq(dev->irq, &eepro_interrupt, 0, "eepro")) {
+ printk("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+ }
+ else printk(", %s.\n", ifmap[dev->if_port]);
+
+ if ((dev->mem_start & 0xf) > 0)
+ net_debug = dev->mem_start & 7;
+
+ if (net_debug > 3) {
+ i = read_eeprom(ioaddr, 5);
+ if (i & 0x2000) /* bit 13 of EEPROM word 5 */
+ printk("%s: Concurrent Processing is enabled but not used!\n",
+ dev->name);
+ }
+
+ if (net_debug)
+ printk(version);
+
+ /* Grab the region so we can find another board if autoIRQ fails. */
+ request_region(ioaddr, EEPRO_IO_EXTENT, "eepro");
+
+ /* Initialize the device structure */
+ dev->priv = kmalloc(sizeof(struct eepro_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct eepro_local));
+
+ dev->open = eepro_open;
+ dev->stop = eepro_close;
+ dev->hard_start_xmit = eepro_send_packet;
+ dev->get_stats = eepro_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /* Fill in the fields of the device structure with
+ ethernet generic values */
+
+ ether_setup(dev);
+
+ outb(RESET_CMD, ioaddr); /* RESET the 82595 */
+
+ return 0;
+ }
+ else return ENODEV;
+ }
+ else if (net_debug > 3)
+ printk ("EtherExpress Pro probed failed!\n");
+ return ENODEV;
+}
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine should set everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+ */
+
+static char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
+static int eepro_grab_irq(struct device *dev)
+{
+ int irqlist[] = { 5, 9, 10, 11, 4, 3, 0};
+ int *irqp = irqlist, temp_reg, ioaddr = dev->base_addr;
+
+ outb(BANK1_SELECT, ioaddr); /* be CAREFUL, BANK 1 now */
+
+ /* Enable the interrupt line. */
+ temp_reg = inb(ioaddr + REG1);
+ outb(temp_reg | INT_ENABLE, ioaddr + REG1);
+
+ outb(BANK0_SELECT, ioaddr); /* be CAREFUL, BANK 0 now */
+
+ /* clear all interrupts */
+ outb(ALL_MASK, ioaddr + STATUS_REG);
+ /* Let EXEC event to interrupt */
+ outb(ALL_MASK & ~(EXEC_MASK), ioaddr + INT_MASK_REG);
+
+ do {
+ outb(BANK1_SELECT, ioaddr); /* be CAREFUL, BANK 1 now */
+
+ temp_reg = inb(ioaddr + INT_NO_REG);
+ outb((temp_reg & 0xf8) | irqrmap[*irqp], ioaddr + INT_NO_REG);
+
+ outb(BANK0_SELECT, ioaddr); /* Switch back to Bank 0 */
+
+ if (request_irq (*irqp, NULL, 0, "bogus") != EBUSY) {
+ /* Twinkle the interrupt, and check if it's seen */
+ autoirq_setup(0);
+
+ outb(DIAGNOSE_CMD, ioaddr); /* RESET the 82595 */
+
+ if (*irqp == autoirq_report(2) && /* It's a good IRQ line */
+ (request_irq(dev->irq = *irqp, &eepro_interrupt, 0, "eepro") == 0))
+ break;
+
+ /* clear all interrupts */
+ outb(ALL_MASK, ioaddr + STATUS_REG);
+ }
+ } while (*++irqp);
+
+ outb(BANK1_SELECT, ioaddr); /* Switch back to Bank 1 */
+
+ /* Disable the physical interrupt line. */
+ temp_reg = inb(ioaddr + REG1);
+ outb(temp_reg & 0x7f, ioaddr + REG1);
+
+ outb(BANK0_SELECT, ioaddr); /* Switch back to Bank 0 */
+
+ /* Mask all the interrupts. */
+ outb(ALL_MASK, ioaddr + INT_MASK_REG);
+
+ /* clear all interrupts */
+ outb(ALL_MASK, ioaddr + STATUS_REG);
+
+ return dev->irq;
+}
+
+static int
+eepro_open(struct device *dev)
+{
+ unsigned short temp_reg;
+ int i, ioaddr = dev->base_addr;
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+
+ if (net_debug > 3)
+ printk("eepro: entering eepro_open routine.\n");
+
+ if (dev->dev_addr[0] == SA_ADDR0 &&
+ dev->dev_addr[1] == SA_ADDR1 &&
+ dev->dev_addr[2] == SA_ADDR2)
+ lp->eepro = 1; /* Yes, an Intel EtherExpress Pro/10 */
+ else lp->eepro = 0; /* No, it is a generic 82585 lan card */
+
+ /* Get the interrupt vector for the 82595 */
+ if (dev->irq < 2 && eepro_grab_irq(dev) == 0) {
+ printk("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+
+ if (irq2dev_map[dev->irq] != 0
+ || (irq2dev_map[dev->irq] = dev) == 0)
+ return -EAGAIN;
+
+ /* Initialize the 82595. */
+
+ outb(BANK2_SELECT, ioaddr); /* be CAREFUL, BANK 2 now */
+ temp_reg = inb(ioaddr + EEPROM_REG);
+ if (temp_reg & 0x10) /* Check the TurnOff Enable bit */
+ outb(temp_reg & 0xef, ioaddr + EEPROM_REG);
+ for (i=0; i < 6; i++)
+ outb(dev->dev_addr[i] , ioaddr + I_ADD_REG0 + i);
+
+ temp_reg = inb(ioaddr + REG1); /* Setup Transmit Chaining */
+ outb(temp_reg | XMT_Chain_Int | XMT_Chain_ErrStop /* and discard bad RCV frames */
+ | RCV_Discard_BadFrame, ioaddr + REG1);
+
+ temp_reg = inb(ioaddr + REG2); /* Match broadcast */
+ outb(temp_reg | 0x14, ioaddr + REG2);
+
+ temp_reg = inb(ioaddr + REG3);
+ outb(temp_reg & 0x3f, ioaddr + REG3); /* clear test mode */
+
+ /* Set the receiving mode */
+ outb(BANK1_SELECT, ioaddr); /* be CAREFUL, BANK 1 now */
+
+ temp_reg = inb(ioaddr + INT_NO_REG);
+ outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
+
+ /* Initialize the RCV and XMT upper and lower limits */
+ outb(RCV_LOWER_LIMIT, ioaddr + RCV_LOWER_LIMIT_REG);
+ outb(RCV_UPPER_LIMIT, ioaddr + RCV_UPPER_LIMIT_REG);
+ outb(XMT_LOWER_LIMIT, ioaddr + XMT_LOWER_LIMIT_REG);
+ outb(XMT_UPPER_LIMIT, ioaddr + XMT_UPPER_LIMIT_REG);
+
+ /* Enable the interrupt line. */
+ temp_reg = inb(ioaddr + REG1);
+ outb(temp_reg | INT_ENABLE, ioaddr + REG1);
+
+ outb(BANK0_SELECT, ioaddr); /* Switch back to Bank 0 */
+
+ /* Let RX and TX events to interrupt */
+ outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
+ /* clear all interrupts */
+ outb(ALL_MASK, ioaddr + STATUS_REG);
+
+ /* Initialize RCV */
+ outw(RCV_LOWER_LIMIT << 8, ioaddr + RCV_BAR);
+ lp->rx_start = (RCV_LOWER_LIMIT << 8) ;
+ outw((RCV_UPPER_LIMIT << 8) | 0xfe, ioaddr + RCV_STOP);
+
+ /* Initialize XMT */
+ outw(XMT_LOWER_LIMIT << 8, ioaddr + XMT_BAR);
+
+ outb(SEL_RESET_CMD, ioaddr);
+ /* We are supposed to wait for 2 us after a SEL_RESET */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+
+ lp->tx_start = lp->tx_end = XMT_LOWER_LIMIT << 8; /* or = RCV_RAM */
+ lp->tx_last = 0;
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ if (net_debug > 3)
+ printk("eepro: exiting eepro_open routine.\n");
+
+ outb(RCV_ENABLE_CMD, ioaddr);
+
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int
+eepro_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ if (net_debug > 5)
+ printk("eepro: entering eepro_send_packet routine.\n");
+
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+ if (net_debug > 1)
+ printk("%s: transmit timed out, %s?\n", dev->name,
+ "network cable problem");
+ lp->stats.tx_errors++;
+ /* Try to restart the adaptor. */
+ outb(SEL_RESET_CMD, ioaddr);
+ /* We are supposed to wait for 2 us after a SEL_RESET */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+
+ /* Do I also need to flush the transmit buffers here? YES? */
+ lp->tx_start = lp->tx_end = RCV_RAM;
+ lp->tx_last = 0;
+
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+
+ outb(RCV_ENABLE_CMD, ioaddr);
+
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ hardware_send_packet(dev, buf, length);
+ dev->trans_start = jiffies;
+ }
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ /* You might need to clean up and record Tx statistics here. */
+ /* lp->stats.tx_aborted_errors++; */
+
+ if (net_debug > 5)
+ printk("eepro: exiting eepro_send_packet routine.\n");
+
+ return 0;
+}
+
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static void
+eepro_interrupt(int irq, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ int ioaddr, status, boguscount = 0;
+
+ if (net_debug > 5)
+ printk("eepro: entering eepro_interrupt routine.\n");
+
+ if (dev == NULL) {
+ printk ("eepro_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+
+ do {
+ status = inb(ioaddr + STATUS_REG);
+
+ if (status & RX_INT) {
+ if (net_debug > 4)
+ printk("eepro: packet received interrupt.\n");
+
+ /* Acknowledge the RX_INT */
+ outb(RX_INT, ioaddr + STATUS_REG);
+
+ /* Get the received packets */
+ eepro_rx(dev);
+ }
+ else if (status & TX_INT) {
+ if (net_debug > 4)
+ printk("eepro: packet transmit interrupt.\n");
+
+ /* Acknowledge the TX_INT */
+ outb(TX_INT, ioaddr + STATUS_REG);
+
+ /* Process the status of transmitted packets */
+ eepro_transmit_interrupt(dev);
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ } while ((++boguscount < 10) && (status & 0x06));
+
+ dev->interrupt = 0;
+ if (net_debug > 5)
+ printk("eepro: exiting eepro_interrupt routine.\n");
+
+ return;
+}
+
+static int
+eepro_close(struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ short temp_reg;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ outb(BANK1_SELECT, ioaddr); /* Switch back to Bank 1 */
+
+ /* Disable the physical interrupt line. */
+ temp_reg = inb(ioaddr + REG1);
+ outb(temp_reg & 0x7f, ioaddr + REG1);
+
+ outb(BANK0_SELECT, ioaddr); /* Switch back to Bank 0 */
+
+ /* Flush the Tx and disable Rx. */
+ outb(STOP_RCV_CMD, ioaddr);
+ lp->tx_start = lp->tx_end = RCV_RAM ;
+ lp->tx_last = 0;
+
+ /* Mask all the interrupts. */
+ outb(ALL_MASK, ioaddr + INT_MASK_REG);
+
+ /* clear all interrupts */
+ outb(ALL_MASK, ioaddr + STATUS_REG);
+
+ /* Reset the 82595 */
+ outb(RESET_CMD, ioaddr);
+
+ /* release the interrupt */
+ free_irq(dev->irq);
+
+ irq2dev_map[dev->irq] = 0;
+
+ /* Update the statistics here. What statistics? */
+
+ /* We are supposed to wait for 200 us after a RESET */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO; /* May not be enough? */
+
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *
+eepro_get_stats(struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ */
+static void
+set_multicast_list(struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ unsigned short mode;
+ struct dev_mc_list *dmi=dev->mc_list;
+
+ if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63)
+ {
+ /*
+ * We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. If it was a promisc rewquest the
+ * flag is already set. If not we assert it.
+ */
+ dev->flags|=IFF_PROMISC;
+
+ outb(BANK2_SELECT, ioaddr); /* be CAREFUL, BANK 2 now */
+ mode = inb(ioaddr + REG2);
+ outb(mode | PRMSC_Mode, ioaddr + REG2);
+ mode = inb(ioaddr + REG3);
+ outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
+ outb(BANK0_SELECT, ioaddr); /* Return to BANK 0 now */
+ printk("%s: promiscuous mode enabled.\n", dev->name);
+ }
+ else if (dev->mc_count==0 )
+ {
+ outb(BANK2_SELECT, ioaddr); /* be CAREFUL, BANK 2 now */
+ mode = inb(ioaddr + REG2);
+ outb(mode & 0xd6, ioaddr + REG2); /* Turn off Multi-IA and PRMSC_Mode bits */
+ mode = inb(ioaddr + REG3);
+ outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
+ outb(BANK0_SELECT, ioaddr); /* Return to BANK 0 now */
+ }
+ else
+ {
+ unsigned short status, *eaddrs;
+ int i, boguscount = 0;
+
+ /* Disable RX and TX interrupts. Neccessary to avoid
+ corruption of the HOST_ADDRESS_REG by interrupt
+ service routines. */
+ outb(ALL_MASK, ioaddr + INT_MASK_REG);
+
+ outb(BANK2_SELECT, ioaddr); /* be CAREFUL, BANK 2 now */
+ mode = inb(ioaddr + REG2);
+ outb(mode | Multi_IA, ioaddr + REG2);
+ mode = inb(ioaddr + REG3);
+ outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
+ outb(BANK0_SELECT, ioaddr); /* Return to BANK 0 now */
+ outw(lp->tx_end, ioaddr + HOST_ADDRESS_REG);
+ outw(MC_SETUP, ioaddr + IO_PORT);
+ outw(0, ioaddr + IO_PORT);
+ outw(0, ioaddr + IO_PORT);
+ outw(6*(dev->mc_count + 1), ioaddr + IO_PORT);
+ for (i = 0; i < dev->mc_count; i++)
+ {
+ eaddrs=(unsigned short *)dmi->dmi_addr;
+ dmi=dmi->next;
+ outw(*eaddrs++, ioaddr + IO_PORT);
+ outw(*eaddrs++, ioaddr + IO_PORT);
+ outw(*eaddrs++, ioaddr + IO_PORT);
+ }
+ eaddrs = (unsigned short *) dev->dev_addr;
+ outw(eaddrs[0], ioaddr + IO_PORT);
+ outw(eaddrs[1], ioaddr + IO_PORT);
+ outw(eaddrs[2], ioaddr + IO_PORT);
+ outw(lp->tx_end, ioaddr + XMT_BAR);
+ outb(MC_SETUP, ioaddr);
+
+ /* Update the transmit queue */
+ i = lp->tx_end + XMT_HEADER + 6*(dev->mc_count + 1);
+ if (lp->tx_start != lp->tx_end)
+ {
+ /* update the next address and the chain bit in the
+ last packet */
+ outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
+ outw(i, ioaddr + IO_PORT);
+ outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
+ status = inw(ioaddr + IO_PORT);
+ outw(status | CHAIN_BIT, ioaddr + IO_PORT);
+ lp->tx_end = i ;
+ }
+ else lp->tx_start = lp->tx_end = i ;
+
+ /* Acknowledge that the MC setup is done */
+ do { /* We should be doing this in the eepro_interrupt()! */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ if (inb(ioaddr + STATUS_REG) & 0x08)
+ {
+ i = inb(ioaddr);
+ outb(0x08, ioaddr + STATUS_REG);
+ if (i & 0x20) { /* command ABORTed */
+ printk("%s: multicast setup failed.\n",
+ dev->name);
+ break;
+ } else if ((i & 0x0f) == 0x03) { /* MC-Done */
+ printk("%s: set Rx mode to %d addresses.\n",
+ dev->name, dev->mc_count);
+ break;
+ }
+ }
+ } while (++boguscount < 100);
+
+ /* Re-enable RX and TX interrupts */
+ outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
+
+ }
+ outb(RCV_ENABLE_CMD, ioaddr);
+}
+
+/* The horrible routine to read a word from the serial EEPROM. */
+/* IMPORTANT - the 82595 will be set to Bank 0 after the eeprom is read */
+
+/* The delay between EEPROM clock transitions. */
+#define eeprom_delay() { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }}
+#define EE_READ_CMD (6 << 6)
+
+int
+read_eeprom(int ioaddr, int location)
+{
+ int i;
+ unsigned short retval = 0;
+ short ee_addr = ioaddr + EEPROM_REG;
+ int read_cmd = location | EE_READ_CMD;
+ short ctrl_val = EECS ;
+
+ outb(BANK2_SELECT, ioaddr);
+ outb(ctrl_val, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 8; i >= 0; i--) {
+ short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI
+ : ctrl_val;
+ outb(outval, ee_addr);
+ outb(outval | EESK, ee_addr); /* EEPROM clock tick. */
+ eeprom_delay();
+ outb(outval, ee_addr); /* Finish EEPROM a clock tick. */
+ eeprom_delay();
+ }
+ outb(ctrl_val, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outb(ctrl_val | EESK, ee_addr); eeprom_delay();
+ retval = (retval << 1) | ((inb(ee_addr) & EEDO) ? 1 : 0);
+ outb(ctrl_val, ee_addr); eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ ctrl_val &= ~EECS;
+ outb(ctrl_val | EESK, ee_addr);
+ eeprom_delay();
+ outb(ctrl_val, ee_addr);
+ eeprom_delay();
+ outb(BANK0_SELECT, ioaddr);
+ return retval;
+}
+
+static void
+hardware_send_packet(struct device *dev, void *buf, short length)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ unsigned status, tx_available, last, end, boguscount = 10;
+
+ if (net_debug > 5)
+ printk("eepro: entering hardware_send_packet routine.\n");
+
+ while (boguscount-- > 0) {
+
+ /* determine how much of the transmit buffer space is available */
+ if (lp->tx_end > lp->tx_start)
+ tx_available = XMT_RAM - (lp->tx_end - lp->tx_start);
+ else if (lp->tx_end < lp->tx_start)
+ tx_available = lp->tx_start - lp->tx_end;
+ else tx_available = XMT_RAM;
+
+ /* Disable RX and TX interrupts. Neccessary to avoid
+ corruption of the HOST_ADDRESS_REG by interrupt
+ service routines. */
+ outb(ALL_MASK, ioaddr + INT_MASK_REG);
+
+ if (((((length + 1) >> 1) << 1) + 2*XMT_HEADER)
+ >= tx_available) /* No space available ??? */
+ continue;
+
+ last = lp->tx_end;
+ end = last + (((length + 1) >> 1) << 1) + XMT_HEADER;
+
+ if (end >= RAM_SIZE) { /* the transmit buffer is wrapped around */
+ if ((RAM_SIZE - last) <= XMT_HEADER) {
+ /* Arrrr!!!, must keep the xmt header together,
+ several days were lost to chase this one down. */
+ last = RCV_RAM;
+ end = last + (((length + 1) >> 1) << 1) + XMT_HEADER;
+ }
+ else end = RCV_RAM + (end - RAM_SIZE);
+ }
+
+ outw(last, ioaddr + HOST_ADDRESS_REG);
+ outw(XMT_CMD, ioaddr + IO_PORT);
+ outw(0, ioaddr + IO_PORT);
+ outw(end, ioaddr + IO_PORT);
+ outw(length, ioaddr + IO_PORT);
+ outsw(ioaddr + IO_PORT, buf, (length + 1) >> 1);
+
+ if (lp->tx_start != lp->tx_end) {
+ /* update the next address and the chain bit in the
+ last packet */
+ if (lp->tx_end != last) {
+ outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
+ outw(last, ioaddr + IO_PORT);
+ }
+ outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
+ status = inw(ioaddr + IO_PORT);
+ outw(status | CHAIN_BIT, ioaddr + IO_PORT);
+ }
+
+ /* A dummy read to flush the DRAM write pipeline */
+ status = inw(ioaddr + IO_PORT);
+
+ /* Enable RX and TX interrupts */
+ outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
+
+ if (lp->tx_start == lp->tx_end) {
+ outw(last, ioaddr + XMT_BAR);
+ outb(XMT_CMD, ioaddr);
+ lp->tx_start = last; /* I don't like to change tx_start here */
+ }
+ else outb(RESUME_XMT_CMD, ioaddr);
+
+ lp->tx_last = last;
+ lp->tx_end = end;
+
+ if (dev->tbusy) {
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+
+ if (net_debug > 5)
+ printk("eepro: exiting hardware_send_packet routine.\n");
+ return;
+ }
+ dev->tbusy = 1;
+ if (net_debug > 5)
+ printk("eepro: exiting hardware_send_packet routine.\n");
+}
+
+static void
+eepro_rx(struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ short boguscount = 20;
+ short rcv_car = lp->rx_start;
+ unsigned rcv_event, rcv_status, rcv_next_frame, rcv_size;
+
+ if (net_debug > 5)
+ printk("eepro: entering eepro_rx routine.\n");
+
+ /* Set the read pointer to the start of the RCV */
+ outw(rcv_car, ioaddr + HOST_ADDRESS_REG);
+ rcv_event = inw(ioaddr + IO_PORT);
+
+ while (rcv_event == RCV_DONE) {
+ rcv_status = inw(ioaddr + IO_PORT);
+ rcv_next_frame = inw(ioaddr + IO_PORT);
+ rcv_size = inw(ioaddr + IO_PORT);
+
+ if ((rcv_status & (RX_OK | RX_ERROR)) == RX_OK) {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ rcv_size &= 0x3fff;
+ skb = dev_alloc_skb(rcv_size+2);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2);
+
+ insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 1) >> 1);
+
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ else { /* Not sure will ever reach here,
+ I set the 595 to discard bad received frames */
+ lp->stats.rx_errors++;
+ if (rcv_status & 0x0100)
+ lp->stats.rx_over_errors++;
+ else if (rcv_status & 0x0400)
+ lp->stats.rx_frame_errors++;
+ else if (rcv_status & 0x0800)
+ lp->stats.rx_crc_errors++;
+ printk("%s: event = %#x, status = %#x, next = %#x, size = %#x\n",
+ dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size);
+ }
+ if (rcv_status & 0x1000)
+ lp->stats.rx_length_errors++;
+ if (--boguscount == 0)
+ break;
+
+ rcv_car = lp->rx_start + RCV_HEADER + rcv_size;
+ lp->rx_start = rcv_next_frame;
+ outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG);
+ rcv_event = inw(ioaddr + IO_PORT);
+
+ }
+ if (rcv_car == 0)
+ rcv_car = (RCV_UPPER_LIMIT << 8) | 0xff;
+ outw(rcv_car - 1, ioaddr + RCV_STOP);
+
+ if (net_debug > 5)
+ printk("eepro: exiting eepro_rx routine.\n");
+}
+
+static void
+eepro_transmit_interrupt(struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ short boguscount = 10;
+ short xmt_status;
+
+ while (lp->tx_start != lp->tx_end) {
+
+ outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG);
+ xmt_status = inw(ioaddr+IO_PORT);
+ if ((xmt_status & TX_DONE_BIT) == 0) break;
+ xmt_status = inw(ioaddr+IO_PORT);
+ lp->tx_start = inw(ioaddr+IO_PORT);
+
+ if (dev->tbusy) {
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+
+ if (xmt_status & 0x2000)
+ lp->stats.tx_packets++;
+ else {
+ lp->stats.tx_errors++;
+ if (xmt_status & 0x0400)
+ lp->stats.tx_carrier_errors++;
+ printk("%s: XMT status = %#x\n",
+ dev->name, xmt_status);
+ }
+ if (xmt_status & 0x000f)
+ lp->stats.collisions += (xmt_status & 0x000f);
+ if ((xmt_status & 0x0040) == 0x0)
+ lp->stats.tx_heartbeat_errors++;
+
+ if (--boguscount == 0)
+ break;
+ }
+}
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device dev_eepro = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, eepro_probe };
+
+static int io = 0x200;
+static int irq = 0;
+
+int
+init_module(void)
+{
+ if (io == 0)
+ printk("eepro: You should not use auto-probing with insmod!\n");
+ dev_eepro.base_addr = io;
+ dev_eepro.irq = irq;
+
+ if (register_netdev(&dev_eepro) != 0)
+ return -EIO;
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_eepro);
+ kfree_s(dev_eepro.priv,sizeof(struct eepro_local));
+ dev_eepro.priv=NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ release_region(dev_eepro.base_addr, EEPRO_IO_EXTENT);
+}
+#endif /* MODULE */
diff --git a/i386/i386at/gpl/linux/net/eexpress.c b/i386/i386at/gpl/linux/net/eexpress.c
new file mode 100644
index 00000000..2f641d68
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/eexpress.c
@@ -0,0 +1,1034 @@
+/* eexpress.c: Intel EtherExpress device driver for Linux. */
+/*
+ Written 1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and distributed
+ according to the terms of the GNU Public License as modified by SRC,
+ incorporated herein by reference.
+
+ The author may be reached as becker@super.org or
+ C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
+
+ Things remaining to do:
+ Check that the 586 and ASIC are reset/unreset at the right times.
+ Check tx and rx buffer setup.
+ The current Tx is single-buffer-only.
+ Move the theory of operation and memory map documentation.
+ Rework the board error reset
+ The statistics need to be updated correctly.
+
+ Modularized by Pauline Middelink <middelin@polyware.iaf.nl>
+ Changed to support io= irq= by Alan Cox <Alan.Cox@linux.org>
+*/
+
+static const char *version =
+ "eexpress.c:v0.07 1/19/94 Donald Becker (becker@super.org)\n";
+
+/*
+ Sources:
+ This driver wouldn't have been written with the availability of the
+ Crynwr driver source code. It provided a known-working implementation
+ that filled in the gaping holes of the Intel documentation. Three cheers
+ for Russ Nelson.
+
+ Intel Microcommunications Databook, Vol. 1, 1990. It provides just enough
+ info that the casual reader might think that it documents the i82586.
+*/
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/malloc.h>
+
+/* use 0 for production, 1 for verification, 2..7 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 2
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+/*
+ Details of the i82586.
+
+ You'll really need the databook to understand the details of this part,
+ but the outline is that the i82586 has two separate processing units.
+
+ The Rx unit uses a list of frame descriptors and a list of data buffer
+ descriptors. We use full-sized (1518 byte) data buffers, so there is
+ a one-to-one pairing of frame descriptors to buffer descriptors.
+
+ The Tx ("command") unit executes a list of commands that look like:
+ Status word Written by the 82586 when the command is done.
+ Command word Command in lower 3 bits, post-command action in upper 3
+ Link word The address of the next command.
+ Parameters (as needed).
+
+ Some definitions related to the Command Word are:
+ */
+#define CMD_EOL 0x8000 /* The last command of the list, stop. */
+#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
+#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
+
+enum commands {
+ CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
+ CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7};
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct enet_statistics stats;
+ int last_restart;
+ short rx_head;
+ short rx_tail;
+ short tx_head;
+ short tx_cmd_link;
+ short tx_reap;
+};
+
+/*
+ Details of the EtherExpress Implementation
+ The EtherExpress takes an unusual approach to host access to packet buffer
+ memory. The host can use either the Dataport, with independent
+ autoincrementing read and write pointers, or it can I/O map 32 bytes of the
+ memory using the "Shadow Memory Pointer" (SMB) as follows:
+ ioaddr Normal EtherExpress registers
+ ioaddr+0x4000...0x400f Buffer Memory at SMB...SMB+15
+ ioaddr+0x8000...0x800f Buffer Memory at SMB+16...SMB+31
+ ioaddr+0xC000...0xC007 "" SMB+16...SMB+23 (hardware flaw?)
+ ioaddr+0xC008...0xC00f Buffer Memory at 0x0008...0x000f
+ The last I/O map set is useful if you put the i82586 System Command Block
+ (the command mailbox) exactly at 0x0008. (There seems to be some
+ undocumented init structure at 0x0000-7, so I had to use the Crywnr memory
+ setup verbatim for those four words anyway.)
+
+ A problem with using either one of these mechanisms is that you must run
+ single-threaded, or the interrupt handler must restore a changed value of
+ the read, write, or SMB pointers.
+
+ Unlike the Crynwr driver, my driver mostly ignores the I/O mapped "feature"
+ and relies heavily on the dataport for buffer memory access. To minimize
+ switching, the read_pointer is dedicated to the Rx interrupt handler, and
+ the write_pointer is used by the send_packet() routine (it's carefully saved
+ and restored when it's needed by the interrupt handler).
+ */
+
+/* Offsets from the base I/O address. */
+#define DATAPORT 0 /* Data Transfer Register. */
+#define WRITE_PTR 2 /* Write Address Pointer. */
+#define READ_PTR 4 /* Read Address Pointer. */
+#define SIGNAL_CA 6 /* Frob the 82586 Channel Attention line. */
+#define SET_IRQ 7 /* IRQ Select. */
+#define SHADOW_PTR 8 /* Shadow Memory Bank Pointer. */
+#define MEM_Ctrl 11
+#define MEM_Page_Ctrl 12
+#define Config 13
+#define EEPROM_Ctrl 14
+#define ID_PORT 15
+
+#define EEXPRESS_IO_EXTENT 16
+
+/* EEPROM_Ctrl bits. */
+
+#define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
+#define EE_CS 0x02 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
+#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
+#define EE_CTRL_BITS (EE_SHIFT_CLK | EE_CS | EE_DATA_WRITE | EE_DATA_READ)
+#define ASIC_RESET 0x40
+#define _586_RESET 0x80
+
+/* Offsets to elements of the System Control Block structure. */
+#define SCB_STATUS 0xc008
+#define SCB_CMD 0xc00A
+#define CUC_START 0x0100
+#define CUC_RESUME 0x0200
+#define CUC_SUSPEND 0x0300
+#define RX_START 0x0010
+#define RX_RESUME 0x0020
+#define RX_SUSPEND 0x0030
+#define SCB_CBL 0xc00C /* Command BLock offset. */
+#define SCB_RFA 0xc00E /* Rx Frame Area offset. */
+
+/*
+ What follows in 'init_words[]' is the "program" that is downloaded to the
+ 82586 memory. It's mostly tables and command blocks, and starts at the
+ reset address 0xfffff6.
+
+ Even with the additional "don't care" values, doing it this way takes less
+ program space than initializing the individual tables, and I feel it's much
+ cleaner.
+
+ The databook is particularly useless for the first two structures; they are
+ completely undocumented. I had to use the Crynwr driver as an example.
+
+ The memory setup is as follows:
+ */
+
+#define CONFIG_CMD 0x0018
+#define SET_SA_CMD 0x0024
+#define SA_OFFSET 0x002A
+#define IDLELOOP 0x30
+#define TDR_CMD 0x38
+#define TDR_TIME 0x3C
+#define DUMP_CMD 0x40
+#define DIAG_CMD 0x48
+#define SET_MC_CMD 0x4E
+#define DUMP_DATA 0x56 /* A 170 byte buffer for dump and Set-MC into. */
+
+#define TX_BUF_START 0x0100
+#define NUM_TX_BUFS 4
+#define TX_BUF_SIZE 0x0680 /* packet+header+TBD+extra (1518+14+20+16) */
+#define TX_BUF_END 0x2000
+
+#define RX_BUF_START 0x2000
+#define RX_BUF_SIZE (0x640) /* packet+header+RBD+extra */
+#define RX_BUF_END 0x4000
+
+/*
+ That's it: only 86 bytes to set up the beast, including every extra
+ command available. The 170 byte buffer at DUMP_DATA is shared between the
+ Dump command (called only by the diagnostic program) and the SetMulticastList
+ command.
+
+ To complete the memory setup you only have to write the station address at
+ SA_OFFSET and create the Tx & Rx buffer lists.
+
+ The Tx command chain and buffer list is setup as follows:
+ A Tx command table, with the data buffer pointing to...
+ A Tx data buffer descriptor. The packet is in a single buffer, rather than
+ chaining together several smaller buffers.
+ A NoOp command, which initially points to itself,
+ And the packet data.
+
+ A transmit is done by filling in the Tx command table and data buffer,
+ re-writing the NoOp command, and finally changing the offset of the last
+ command to point to the current Tx command. When the Tx command is finished,
+ it jumps to the NoOp, when it loops until the next Tx command changes the
+ "link offset" in the NoOp. This way the 82586 never has to go through the
+ slow restart sequence.
+
+ The Rx buffer list is set up in the obvious ring structure. We have enough
+ memory (and low enough interrupt latency) that we can avoid the complicated
+ Rx buffer linked lists by alway associating a full-size Rx data buffer with
+ each Rx data frame.
+
+ I current use four transmit buffers starting at TX_BUF_START (0x0100), and
+ use the rest of memory, from RX_BUF_START to RX_BUF_END, for Rx buffers.
+
+ */
+
+static short init_words[] = {
+ 0x0000, /* Set bus size to 16 bits. */
+ 0x0000,0x0000, /* Set control mailbox (SCB) addr. */
+ 0,0, /* pad to 0x000000. */
+ 0x0001, /* Status word that's cleared when init is done. */
+ 0x0008,0,0, /* SCB offset, (skip, skip) */
+
+ 0,0xf000|RX_START|CUC_START, /* SCB status and cmd. */
+ CONFIG_CMD, /* Command list pointer, points to Configure. */
+ RX_BUF_START, /* Rx block list. */
+ 0,0,0,0, /* Error count: CRC, align, buffer, overrun. */
+
+ /* 0x0018: Configure command. Change to put MAC data with packet. */
+ 0, CmdConfigure, /* Status, command. */
+ SET_SA_CMD, /* Next command is Set Station Addr. */
+ 0x0804, /* "4" bytes of config data, 8 byte FIFO. */
+ 0x2e40, /* Magic values, including MAC data location. */
+ 0, /* Unused pad word. */
+
+ /* 0x0024: Setup station address command. */
+ 0, CmdSASetup,
+ SET_MC_CMD, /* Next command. */
+ 0xaa00,0xb000,0x0bad, /* Station address (to be filled in) */
+
+ /* 0x0030: NOP, looping back to itself. Point to first Tx buffer to Tx. */
+ 0, CmdNOp, IDLELOOP, 0 /* pad */,
+
+ /* 0x0038: A unused Time-Domain Reflectometer command. */
+ 0, CmdTDR, IDLELOOP, 0,
+
+ /* 0x0040: An unused Dump State command. */
+ 0, CmdDump, IDLELOOP, DUMP_DATA,
+
+ /* 0x0048: An unused Diagnose command. */
+ 0, CmdDiagnose, IDLELOOP,
+
+ /* 0x004E: An empty set-multicast-list command. */
+#ifdef initial_text_tx
+ 0, CmdMulticastList, DUMP_DATA, 0,
+#else
+ 0, CmdMulticastList, IDLELOOP, 0,
+#endif
+
+ /* 0x0056: A continuous transmit command, only here for testing. */
+ 0, CmdTx, DUMP_DATA, DUMP_DATA+8, 0x83ff, -1, DUMP_DATA, 0,
+};
+
+/* Index to functions, as function prototypes. */
+
+extern int express_probe(struct device *dev); /* Called from Space.c */
+
+static int eexp_probe1(struct device *dev, short ioaddr);
+static int eexp_open(struct device *dev);
+static int eexp_send_packet(struct sk_buff *skb, struct device *dev);
+static void eexp_interrupt(int irq, struct pt_regs *regs);
+static void eexp_rx(struct device *dev);
+static int eexp_close(struct device *dev);
+static struct enet_statistics *eexp_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+static int read_eeprom(int ioaddr, int location);
+static void hardware_send_packet(struct device *dev, void *buf, short length);
+static void init_82586_mem(struct device *dev);
+static void init_rx_bufs(struct device *dev);
+
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, (detachable devices only) allocate space for the
+ device and return success.
+ */
+int
+express_probe(struct device *dev)
+{
+ /* Don't probe all settable addresses, 0x[23][0-7]0, just common ones. */
+ int *port, ports[] = {0x300, 0x270, 0x320, 0x340, 0};
+ int base_addr = dev->base_addr;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return eexp_probe1(dev, base_addr);
+ else if (base_addr > 0)
+ return ENXIO; /* Don't probe at all. */
+
+ for (port = &ports[0]; *port; port++) {
+ short id_addr = *port + ID_PORT;
+ unsigned short sum = 0;
+ int i;
+#ifdef notdef
+ for (i = 16; i > 0; i--)
+ sum += inb(id_addr);
+ printk("EtherExpress ID checksum is %04x.\n", sum);
+#else
+ for (i = 4; i > 0; i--) {
+ short id_val = inb(id_addr);
+ sum |= (id_val >> 4) << ((id_val & 3) << 2);
+ }
+#endif
+ if (sum == 0xbaba
+ && eexp_probe1(dev, *port) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+
+int eexp_probe1(struct device *dev, short ioaddr)
+{
+ unsigned short station_addr[3];
+ int i;
+
+ printk("%s: EtherExpress at %#x,", dev->name, ioaddr);
+
+ /* The station address is stored !backwards! in the EEPROM, reverse
+ after reading. (Hmmm, a little brain-damage there at Intel, eh?) */
+ station_addr[0] = read_eeprom(ioaddr, 2);
+ station_addr[1] = read_eeprom(ioaddr, 3);
+ station_addr[2] = read_eeprom(ioaddr, 4);
+
+ /* Check the first three octets of the S.A. for the manufacturer's code. */
+ if (station_addr[2] != 0x00aa || (station_addr[1] & 0xff00) != 0x0000) {
+ printk(" rejected (invalid address %04x%04x%04x).\n",
+ station_addr[2], station_addr[1], station_addr[0]);
+ return ENODEV;
+ }
+
+ /* We've committed to using the board, and can start filling in *dev. */
+ request_region(ioaddr, EEXPRESS_IO_EXTENT, "eexpress");
+ dev->base_addr = ioaddr;
+
+ for (i = 0; i < 6; i++) {
+ dev->dev_addr[i] = ((unsigned char*)station_addr)[5-i];
+ printk(" %02x", dev->dev_addr[i]);
+ }
+
+ /* There is no reason for the driver to care, but I print out the
+ interface to minimize bogus bug reports. */
+ {
+ char irqmap[] = {0, 9, 3, 4, 5, 10, 11, 0};
+ const char *ifmap[] = {"AUI", "BNC", "10baseT"};
+ enum iftype {AUI=0, BNC=1, TP=2};
+ unsigned short setupval = read_eeprom(ioaddr, 0);
+
+ dev->irq = irqmap[setupval >> 13];
+ dev->if_port = (setupval & 0x1000) == 0 ? AUI :
+ read_eeprom(ioaddr, 5) & 0x1 ? TP : BNC;
+ printk(", IRQ %d, Interface %s.\n", dev->irq, ifmap[dev->if_port]);
+ /* Release the IRQ line so that it can be shared if we don't use the
+ ethercard. */
+ outb(0x00, ioaddr + SET_IRQ);
+ }
+
+ /* It's now OK to leave the board in reset, pending the open(). */
+ outb(ASIC_RESET, ioaddr + EEPROM_Ctrl);
+
+ if ((dev->mem_start & 0xf) > 0)
+ net_debug = dev->mem_start & 7;
+
+ if (net_debug)
+ printk(version);
+
+ /* Initialize the device structure. */
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ dev->open = eexp_open;
+ dev->stop = eexp_close;
+ dev->hard_start_xmit = eexp_send_packet;
+ dev->get_stats = eexp_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /* Fill in the fields of the device structure with ethernet-generic values. */
+
+ ether_setup(dev);
+
+ dev->flags&=~IFF_MULTICAST;
+
+ return 0;
+}
+
+
+/* Reverse IRQ map: the value to put in the SET_IRQ reg. for IRQ<index>. */
+static char irqrmap[]={0,0,1,2,3,4,0,0,0,1,5,6,0,0,0,0};
+
+static int
+eexp_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (dev->irq == 0 || irqrmap[dev->irq] == 0)
+ return -ENXIO;
+
+ if (irq2dev_map[dev->irq] != 0
+ /* This is always true, but avoid the false IRQ. */
+ || (irq2dev_map[dev->irq] = dev) == 0
+ || request_irq(dev->irq, &eexp_interrupt, 0, "EExpress")) {
+ return -EAGAIN;
+ }
+
+ /* Initialize the 82586 memory and start it. */
+ init_82586_mem(dev);
+
+ /* Enable the interrupt line. */
+ outb(irqrmap[dev->irq] | 0x08, ioaddr + SET_IRQ);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int
+eexp_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+ if (net_debug > 1)
+ printk("%s: transmit timed out, %s? ", dev->name,
+ inw(ioaddr+SCB_STATUS) & 0x8000 ? "IRQ conflict" :
+ "network cable problem");
+ lp->stats.tx_errors++;
+ /* Try to restart the adaptor. */
+ if (lp->last_restart == lp->stats.tx_packets) {
+ if (net_debug > 1) printk("Resetting board.\n");
+ /* Completely reset the adaptor. */
+ init_82586_mem(dev);
+ } else {
+ /* Issue the channel attention signal and hope it "gets better". */
+ if (net_debug > 1) printk("Kicking board.\n");
+ outw(0xf000|CUC_START|RX_START, ioaddr + SCB_CMD);
+ outb(0, ioaddr + SIGNAL_CA);
+ lp->last_restart = lp->stats.tx_packets;
+ }
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ /* Disable the 82586's input to the interrupt line. */
+ outb(irqrmap[dev->irq], ioaddr + SET_IRQ);
+ hardware_send_packet(dev, buf, length);
+ dev->trans_start = jiffies;
+ /* Enable the 82586 interrupt input. */
+ outb(0x08 | irqrmap[dev->irq], ioaddr + SET_IRQ);
+ }
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ /* You might need to clean up and record Tx statistics here. */
+ lp->stats.tx_aborted_errors++;
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static void
+eexp_interrupt(int irq, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct net_local *lp;
+ int ioaddr, status, boguscount = 0;
+ short ack_cmd;
+
+ if (dev == NULL) {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+
+ status = inw(ioaddr + SCB_STATUS);
+
+ if (net_debug > 4) {
+ printk("%s: EExp interrupt, status %4.4x.\n", dev->name, status);
+ }
+
+ /* Disable the 82586's input to the interrupt line. */
+ outb(irqrmap[dev->irq], ioaddr + SET_IRQ);
+
+ /* Reap the Tx packet buffers. */
+ while (lp->tx_reap != lp->tx_head) { /* if (status & 0x8000) */
+ unsigned short tx_status;
+ outw(lp->tx_reap, ioaddr + READ_PTR);
+ tx_status = inw(ioaddr);
+ if (tx_status == 0) {
+ if (net_debug > 5) printk("Couldn't reap %#x.\n", lp->tx_reap);
+ break;
+ }
+ if (tx_status & 0x2000) {
+ lp->stats.tx_packets++;
+ lp->stats.collisions += tx_status & 0xf;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ } else {
+ lp->stats.tx_errors++;
+ if (tx_status & 0x0600) lp->stats.tx_carrier_errors++;
+ if (tx_status & 0x0100) lp->stats.tx_fifo_errors++;
+ if (!(tx_status & 0x0040)) lp->stats.tx_heartbeat_errors++;
+ if (tx_status & 0x0020) lp->stats.tx_aborted_errors++;
+ }
+ if (net_debug > 5)
+ printk("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status);
+ lp->tx_reap += TX_BUF_SIZE;
+ if (lp->tx_reap > TX_BUF_END - TX_BUF_SIZE)
+ lp->tx_reap = TX_BUF_START;
+ if (++boguscount > 4)
+ break;
+ }
+
+ if (status & 0x4000) { /* Packet received. */
+ if (net_debug > 5)
+ printk("Received packet, rx_head %04x.\n", lp->rx_head);
+ eexp_rx(dev);
+ }
+
+ /* Acknowledge the interrupt sources. */
+ ack_cmd = status & 0xf000;
+
+ if ((status & 0x0700) != 0x0200 && dev->start) {
+ short saved_write_ptr = inw(ioaddr + WRITE_PTR);
+ if (net_debug > 1)
+ printk("%s: Command unit stopped, status %04x, restarting.\n",
+ dev->name, status);
+ /* If this ever occurs we must re-write the idle loop, reset
+ the Tx list, and do a complete restart of the command unit. */
+ outw(IDLELOOP, ioaddr + WRITE_PTR);
+ outw(0, ioaddr);
+ outw(CmdNOp, ioaddr);
+ outw(IDLELOOP, ioaddr);
+ outw(IDLELOOP, SCB_CBL);
+ lp->tx_cmd_link = IDLELOOP + 4;
+ lp->tx_head = lp->tx_reap = TX_BUF_START;
+ /* Restore the saved write pointer. */
+ outw(saved_write_ptr, ioaddr + WRITE_PTR);
+ ack_cmd |= CUC_START;
+ }
+
+ if ((status & 0x0070) != 0x0040 && dev->start) {
+ short saved_write_ptr = inw(ioaddr + WRITE_PTR);
+ /* The Rx unit is not ready, it must be hung. Restart the receiver by
+ initializing the rx buffers, and issuing an Rx start command. */
+ lp->stats.rx_errors++;
+ if (net_debug > 1) {
+ int cur_rxbuf = RX_BUF_START;
+ printk("%s: Rx unit stopped status %04x rx head %04x tail %04x.\n",
+ dev->name, status, lp->rx_head, lp->rx_tail);
+ while (cur_rxbuf <= RX_BUF_END - RX_BUF_SIZE) {
+ int i;
+ printk(" Rx buf at %04x:", cur_rxbuf);
+ outw(cur_rxbuf, ioaddr + READ_PTR);
+ for (i = 0; i < 0x20; i += 2)
+ printk(" %04x", inw(ioaddr));
+ printk(".\n");
+ cur_rxbuf += RX_BUF_SIZE;
+ }
+ }
+ init_rx_bufs(dev);
+ outw(RX_BUF_START, SCB_RFA);
+ outw(saved_write_ptr, ioaddr + WRITE_PTR);
+ ack_cmd |= RX_START;
+ }
+
+ outw(ack_cmd, ioaddr + SCB_CMD);
+ outb(0, ioaddr + SIGNAL_CA);
+
+ if (net_debug > 5) {
+ printk("%s: EExp exiting interrupt, status %4.4x.\n", dev->name,
+ inw(ioaddr + SCB_CMD));
+ }
+ /* Enable the 82586's input to the interrupt line. */
+ outb(irqrmap[dev->irq] | 0x08, ioaddr + SET_IRQ);
+ return;
+}
+
+static int
+eexp_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* Flush the Tx and disable Rx. */
+ outw(RX_SUSPEND | CUC_SUSPEND, ioaddr + SCB_CMD);
+ outb(0, ioaddr + SIGNAL_CA);
+
+ /* Disable the physical interrupt line. */
+ outb(0, ioaddr + SET_IRQ);
+
+ free_irq(dev->irq);
+
+ irq2dev_map[dev->irq] = 0;
+
+ /* Update the statistics here. */
+
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *
+eexp_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ /* ToDo: decide if there are any useful statistics from the SCB. */
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ */
+static void
+set_multicast_list(struct device *dev)
+{
+/* This doesn't work yet */
+#if 0
+ short ioaddr = dev->base_addr;
+ if (num_addrs < 0) {
+ /* Not written yet, this requires expanding the init_words config
+ cmd. */
+ } else if (num_addrs > 0) {
+ /* Fill in the SET_MC_CMD with the number of address bytes, followed
+ by the list of multicast addresses to be accepted. */
+ outw(SET_MC_CMD + 6, ioaddr + WRITE_PTR);
+ outw(num_addrs * 6, ioaddr);
+ outsw(ioaddr, addrs, num_addrs*3); /* 3 = addr len in words */
+ /* We must trigger a whole 586 reset due to a bug. */
+ } else {
+ /* Not written yet, this requires expanding the init_words config
+ cmd. */
+ outw(99, ioaddr); /* Disable promiscuous mode, use normal mode */
+ }
+#endif
+}
+
+/* The horrible routine to read a word from the serial EEPROM. */
+
+/* The delay between EEPROM clock transitions. */
+#define eeprom_delay() { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }}
+#define EE_READ_CMD (6 << 6)
+
+int
+read_eeprom(int ioaddr, int location)
+{
+ int i;
+ unsigned short retval = 0;
+ short ee_addr = ioaddr + EEPROM_Ctrl;
+ int read_cmd = location | EE_READ_CMD;
+ short ctrl_val = EE_CS | _586_RESET;
+
+ outb(ctrl_val, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 8; i >= 0; i--) {
+ short outval = (read_cmd & (1 << i)) ? ctrl_val | EE_DATA_WRITE
+ : ctrl_val;
+ outb(outval, ee_addr);
+ outb(outval | EE_SHIFT_CLK, ee_addr); /* EEPROM clock tick. */
+ eeprom_delay();
+ outb(outval, ee_addr); /* Finish EEPROM a clock tick. */
+ eeprom_delay();
+ }
+ outb(ctrl_val, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outb(ctrl_val | EE_SHIFT_CLK, ee_addr); eeprom_delay();
+ retval = (retval << 1) | ((inb(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ outb(ctrl_val, ee_addr); eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ ctrl_val &= ~EE_CS;
+ outb(ctrl_val | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ outb(ctrl_val, ee_addr);
+ eeprom_delay();
+ return retval;
+}
+
+static void
+init_82586_mem(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+
+ /* Enable loopback to protect the wire while starting up.
+ This is Superstition From Crynwr. */
+ outb(inb(ioaddr + Config) | 0x02, ioaddr + Config);
+
+ /* Hold the 586 in reset during the memory initialization. */
+ outb(_586_RESET, ioaddr + EEPROM_Ctrl);
+
+ /* Place the write pointer at 0xfff6 (address-aliased to 0xfffff6). */
+ outw(0xfff6, ioaddr + WRITE_PTR);
+ outsw(ioaddr, init_words, sizeof(init_words)>>1);
+
+ /* Fill in the station address. */
+ outw(SA_OFFSET, ioaddr + WRITE_PTR);
+ outsw(ioaddr, dev->dev_addr, 3);
+
+ /* The Tx-block list is written as needed. We just set up the values. */
+#ifdef initial_text_tx
+ lp->tx_cmd_link = DUMP_DATA + 4;
+#else
+ lp->tx_cmd_link = IDLELOOP + 4;
+#endif
+ lp->tx_head = lp->tx_reap = TX_BUF_START;
+
+ init_rx_bufs(dev);
+
+ /* Start the 586 by releasing the reset line. */
+ outb(0x00, ioaddr + EEPROM_Ctrl);
+
+ /* This was time consuming to track down: you need to give two channel
+ attention signals to reliably start up the i82586. */
+ outb(0, ioaddr + SIGNAL_CA);
+
+ {
+ int boguscnt = 50;
+ while (inw(ioaddr + SCB_STATUS) == 0)
+ if (--boguscnt == 0) {
+ printk("%s: i82586 initialization timed out with status %04x, cmd %04x.\n",
+ dev->name, inw(ioaddr + SCB_STATUS), inw(ioaddr + SCB_CMD));
+ break;
+ }
+ /* Issue channel-attn -- the 82586 won't start without it. */
+ outb(0, ioaddr + SIGNAL_CA);
+ }
+
+ /* Disable loopback. */
+ outb(inb(ioaddr + Config) & ~0x02, ioaddr + Config);
+ if (net_debug > 4)
+ printk("%s: Initialized 82586, status %04x.\n", dev->name,
+ inw(ioaddr + SCB_STATUS));
+ return;
+}
+
+/* Initialize the Rx-block list. */
+static void init_rx_bufs(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+
+ int cur_rxbuf = lp->rx_head = RX_BUF_START;
+
+ /* Initialize each Rx frame + data buffer. */
+ do { /* While there is room for one more. */
+ outw(cur_rxbuf, ioaddr + WRITE_PTR);
+ outw(0x0000, ioaddr); /* Status */
+ outw(0x0000, ioaddr); /* Command */
+ outw(cur_rxbuf + RX_BUF_SIZE, ioaddr); /* Link */
+ outw(cur_rxbuf + 22, ioaddr); /* Buffer offset */
+ outw(0xFeed, ioaddr); /* Pad for dest addr. */
+ outw(0xF00d, ioaddr);
+ outw(0xF001, ioaddr);
+ outw(0x0505, ioaddr); /* Pad for source addr. */
+ outw(0x2424, ioaddr);
+ outw(0x6565, ioaddr);
+ outw(0xdeaf, ioaddr); /* Pad for protocol. */
+
+ outw(0x0000, ioaddr); /* Buffer: Actual count */
+ outw(-1, ioaddr); /* Buffer: Next (none). */
+ outw(cur_rxbuf + 0x20, ioaddr); /* Buffer: Address low */
+ outw(0x0000, ioaddr);
+ /* Finally, the number of bytes in the buffer. */
+ outw(0x8000 + RX_BUF_SIZE-0x20, ioaddr);
+
+ lp->rx_tail = cur_rxbuf;
+ cur_rxbuf += RX_BUF_SIZE;
+ } while (cur_rxbuf <= RX_BUF_END - RX_BUF_SIZE);
+
+ /* Terminate the list by setting the EOL bit, and wrap the pointer to make
+ the list a ring. */
+ outw(lp->rx_tail + 2, ioaddr + WRITE_PTR);
+ outw(0xC000, ioaddr); /* Command, mark as last. */
+ outw(lp->rx_head, ioaddr); /* Link */
+}
+
+static void
+hardware_send_packet(struct device *dev, void *buf, short length)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ short tx_block = lp->tx_head;
+
+ /* Set the write pointer to the Tx block, and put out the header. */
+ outw(tx_block, ioaddr + WRITE_PTR);
+ outw(0x0000, ioaddr); /* Tx status */
+ outw(CMD_INTR|CmdTx, ioaddr); /* Tx command */
+ outw(tx_block+16, ioaddr); /* Next command is a NoOp. */
+ outw(tx_block+8, ioaddr); /* Data Buffer offset. */
+
+ /* Output the data buffer descriptor. */
+ outw(length | 0x8000, ioaddr); /* Byte count parameter. */
+ outw(-1, ioaddr); /* No next data buffer. */
+ outw(tx_block+22, ioaddr); /* Buffer follows the NoOp command. */
+ outw(0x0000, ioaddr); /* Buffer address high bits (always zero). */
+
+ /* Output the Loop-back NoOp command. */
+ outw(0x0000, ioaddr); /* Tx status */
+ outw(CmdNOp, ioaddr); /* Tx command */
+ outw(tx_block+16, ioaddr); /* Next is myself. */
+
+ /* Output the packet using the write pointer.
+ Hmmm, it feels a little like a 3c501! */
+ outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
+
+ /* Set the old command link pointing to this send packet. */
+ outw(lp->tx_cmd_link, ioaddr + WRITE_PTR);
+ outw(tx_block, ioaddr);
+ lp->tx_cmd_link = tx_block + 20;
+
+ /* Set the next free tx region. */
+ lp->tx_head = tx_block + TX_BUF_SIZE;
+ if (lp->tx_head > TX_BUF_END - TX_BUF_SIZE)
+ lp->tx_head = TX_BUF_START;
+
+ if (net_debug > 4) {
+ printk("%s: EExp @%x send length = %d, tx_block %3x, next %3x, "
+ "reap %4x status %4.4x.\n", dev->name, ioaddr, length,
+ tx_block, lp->tx_head, lp->tx_reap, inw(ioaddr + SCB_STATUS));
+ }
+
+ if (lp->tx_head != lp->tx_reap)
+ dev->tbusy = 0;
+}
+
+static void
+eexp_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ short saved_write_ptr = inw(ioaddr + WRITE_PTR);
+ short rx_head = lp->rx_head;
+ short rx_tail = lp->rx_tail;
+ short boguscount = 10;
+ short frame_status;
+
+ /* Set the read pointer to the Rx frame. */
+ outw(rx_head, ioaddr + READ_PTR);
+ while ((frame_status = inw(ioaddr)) < 0) { /* Command complete */
+ short rfd_cmd = inw(ioaddr);
+ short next_rx_frame = inw(ioaddr);
+ short data_buffer_addr = inw(ioaddr);
+ short pkt_len;
+
+ /* Set the read pointer the data buffer. */
+ outw(data_buffer_addr, ioaddr + READ_PTR);
+ pkt_len = inw(ioaddr);
+
+ if (rfd_cmd != 0 || data_buffer_addr != rx_head + 22
+ || (pkt_len & 0xC000) != 0xC000) {
+ printk("%s: Rx frame at %#x corrupted, status %04x cmd %04x"
+ "next %04x data-buf @%04x %04x.\n", dev->name, rx_head,
+ frame_status, rfd_cmd, next_rx_frame, data_buffer_addr,
+ pkt_len);
+ } else if ((frame_status & 0x2000) == 0) {
+ /* Frame Rxed, but with error. */
+ lp->stats.rx_errors++;
+ if (frame_status & 0x0800) lp->stats.rx_crc_errors++;
+ if (frame_status & 0x0400) lp->stats.rx_frame_errors++;
+ if (frame_status & 0x0200) lp->stats.rx_fifo_errors++;
+ if (frame_status & 0x0100) lp->stats.rx_over_errors++;
+ if (frame_status & 0x0080) lp->stats.rx_length_errors++;
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ pkt_len &= 0x3fff;
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2);
+
+ outw(data_buffer_addr + 10, ioaddr + READ_PTR);
+
+ insw(ioaddr, skb_put(skb,pkt_len), (pkt_len + 1) >> 1);
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+
+ /* Clear the status word and set End-of-List on the rx frame. */
+ outw(rx_head, ioaddr + WRITE_PTR);
+ outw(0x0000, ioaddr);
+ outw(0xC000, ioaddr);
+#ifndef final_version
+ if (next_rx_frame != rx_head + RX_BUF_SIZE
+ && next_rx_frame != RX_BUF_START) {
+ printk("%s: Rx next frame at %#x is %#x instead of %#x.\n", dev->name,
+ rx_head, next_rx_frame, rx_head + RX_BUF_SIZE);
+ next_rx_frame = rx_head + RX_BUF_SIZE;
+ if (next_rx_frame >= RX_BUF_END - RX_BUF_SIZE)
+ next_rx_frame = RX_BUF_START;
+ }
+#endif
+ outw(rx_tail+2, ioaddr + WRITE_PTR);
+ outw(0x0000, ioaddr); /* Clear the end-of-list on the prev. RFD. */
+
+#ifndef final_version
+ outw(rx_tail+4, ioaddr + READ_PTR);
+ if (inw(ioaddr) != rx_head) {
+ printk("%s: Rx buf link mismatch, at %04x link %04x instead of %04x.\n",
+ dev->name, rx_tail, (outw(rx_tail+4, ioaddr + READ_PTR),inw(ioaddr)),
+ rx_head);
+ outw(rx_head, ioaddr);
+ }
+#endif
+
+ rx_tail = rx_head;
+ rx_head = next_rx_frame;
+ if (--boguscount == 0)
+ break;
+ outw(rx_head, ioaddr + READ_PTR);
+ }
+
+ lp->rx_head = rx_head;
+ lp->rx_tail = rx_tail;
+
+ /* Restore the original write pointer. */
+ outw(saved_write_ptr, ioaddr + WRITE_PTR);
+}
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device dev_eexpress = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, express_probe };
+
+
+static int irq=0x300;
+static int io=0;
+
+int
+init_module(void)
+{
+ if (io == 0)
+ printk("eexpress: You should not use auto-probing with insmod!\n");
+ dev_eexpress.base_addr=io;
+ dev_eexpress.irq=irq;
+ if (register_netdev(&dev_eexpress) != 0)
+ return -EIO;
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_eexpress);
+ kfree_s(dev_eexpress.priv,sizeof(struct net_local));
+ dev_eexpress.priv=NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ release_region(dev_eexpress.base_addr, EEXPRESS_IO_EXTENT);
+}
+#endif /* MODULE */
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -I/usr/src/linux/drivers/net -Wall -Wstrict-prototypes -O6 -m486 -c eexpress.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/eth16i.c b/i386/i386at/gpl/linux/net/eth16i.c
new file mode 100644
index 00000000..b21e4f33
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/eth16i.c
@@ -0,0 +1,1214 @@
+/* eth16i.c An ICL EtherTeam 16i and 32 EISA ethernet driver for Linux
+
+ Written 1994-95 by Mika Kuoppala
+
+ Copyright (C) 1994, 1995 by Mika Kuoppala
+ Based on skeleton.c and at1700.c by Donald Becker
+
+ This software may be used and distributed according to the terms
+ of the GNU Public Licence, incorporated herein by reference.
+
+ The author may be reached as miku@elt.icl.fi
+
+ This driver supports following cards :
+ - ICL EtherTeam 16i
+ - ICL EtherTeam 32 EISA
+
+ Sources:
+ - skeleton.c a sample network driver core for linux,
+ written by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
+ - at1700.c a driver for Allied Telesis AT1700, written
+ by Donald Becker.
+ - e16iSRV.asm a Netware 3.X Server Driver for ICL EtherTeam16i
+ written by Markku Viima
+ - The Fujitsu MB86965 databook.
+
+ Valuable assistance from:
+ Markku Viima (ICL)
+ Ari Valve (ICL)
+
+ Revision history:
+
+ Version Date Description
+
+ 0.01 15.12-94 Initial version (card detection)
+ 0.02 23.01-95 Interrupt is now hooked correctly
+ 0.03 01.02-95 Rewrote initialization part
+ 0.04 07.02-95 Base skeleton done...
+ Made a few changes to signature checking
+ to make it a bit reliable.
+ - fixed bug in tx_buf mapping
+ - fixed bug in initialization (DLC_EN
+ wasn't enabled when initialization
+ was done.)
+ 0.05 08.02-95 If there were more than one packet to send,
+ transmit was jammed due to invalid
+ register write...now fixed
+ 0.06 19.02-95 Rewrote interrupt handling
+ 0.07 13.04-95 Wrote EEPROM read routines
+ Card configuration now set according to
+ data read from EEPROM
+ 0.08 23.06-95 Wrote part that tries to probe used interface
+ port if AUTO is selected
+
+ 0.09 01.09-95 Added module support
+
+ 0.10 04.09-95 Fixed receive packet allocation to work
+ with kernels > 1.3.x
+
+ 0.20 20.09-95 Added support for EtherTeam32 EISA
+
+ 0.21 17.10-95 Removed the unnecessary extern
+ init_etherdev() declaration. Some
+ other cleanups.
+ Bugs:
+ In some cases the interface autoprobing code doesn't find
+ the correct interface type. In this case you can
+ manually choose the interface type in DOS with E16IC.EXE which is
+ configuration software for EtherTeam16i and EtherTeam32 cards.
+
+ To do:
+ - Real multicast support
+*/
+
+static char *version =
+ "eth16i.c: v0.21 17-10-95 Mika Kuoppala (miku@elt.icl.fi)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+/* Few macros */
+#define BIT(a) ( (1 << (a)) )
+#define BITSET(ioaddr, bnum) ((outb(((inb(ioaddr)) | (bnum)), ioaddr)))
+#define BITCLR(ioaddr, bnum) ((outb(((inb(ioaddr)) & (~(bnum))), ioaddr)))
+
+/* This is the I/O address space for Etherteam 16i adapter. */
+#define ETH16I_IO_EXTENT 32
+
+/* Ticks before deciding that transmit has timed out */
+#define TIMEOUT_TICKS 30
+
+/* Maximum loop count when receiving packets */
+#define MAX_RX_LOOP 40
+
+/* Some interrupt masks */
+#define ETH16I_INTR_ON 0x8f82
+#define ETH16I_INTR_OFF 0x0000
+
+/* Buffers header status byte meanings */
+#define PKT_GOOD BIT(5)
+#define PKT_GOOD_RMT BIT(4)
+#define PKT_SHORT BIT(3)
+#define PKT_ALIGN_ERR BIT(2)
+#define PKT_CRC_ERR BIT(1)
+#define PKT_RX_BUF_OVERFLOW BIT(0)
+
+/* Transmit status register (DLCR0) */
+#define TX_STATUS_REG 0
+#define TX_DONE BIT(7)
+#define NET_BUSY BIT(6)
+#define TX_PKT_RCD BIT(5)
+#define CR_LOST BIT(4)
+#define COLLISION BIT(2)
+#define COLLISIONS_16 BIT(1)
+
+/* Receive status register (DLCR1) */
+#define RX_STATUS_REG 1
+#define RX_PKT BIT(7) /* Packet received */
+#define BUS_RD_ERR BIT(6)
+#define SHORT_PKT_ERR BIT(3)
+#define ALIGN_ERR BIT(2)
+#define CRC_ERR BIT(1)
+#define RX_BUF_OVERFLOW BIT(0)
+
+/* Transmit Interrupt Enable Register (DLCR2) */
+#define TX_INTR_REG 2
+#define TX_INTR_DONE BIT(7)
+#define TX_INTR_COL BIT(2)
+#define TX_INTR_16_COL BIT(1)
+
+/* Receive Interrupt Enable Register (DLCR3) */
+#define RX_INTR_REG 3
+#define RX_INTR_RECEIVE BIT(7)
+#define RX_INTR_SHORT_PKT BIT(3)
+#define RX_INTR_CRC_ERR BIT(1)
+#define RX_INTR_BUF_OVERFLOW BIT(0)
+
+/* Transmit Mode Register (DLCR4) */
+#define TRANSMIT_MODE_REG 4
+#define LOOPBACK_CONTROL BIT(1)
+#define CONTROL_OUTPUT BIT(2)
+
+/* Receive Mode Register (DLCR5) */
+#define RECEIVE_MODE_REG 5
+#define RX_BUFFER_EMPTY BIT(6)
+#define ACCEPT_BAD_PACKETS BIT(5)
+#define RECEIVE_SHORT_ADDR BIT(4)
+#define ACCEPT_SHORT_PACKETS BIT(3)
+#define REMOTE_RESET BIT(2)
+
+#define ADDRESS_FILTER_MODE BIT(1) | BIT(0)
+#define REJECT_ALL 0
+#define ACCEPT_ALL 3
+#define MODE_1 1 /* NODE ID, BC, MC, 2-24th bit */
+#define MODE_2 2 /* NODE ID, BC, MC, Hash Table */
+
+/* Configuration Register 0 (DLCR6) */
+#define CONFIG_REG_0 6
+#define DLC_EN BIT(7)
+#define SRAM_CYCLE_TIME_100NS BIT(6)
+#define SYSTEM_BUS_WIDTH_8 BIT(5) /* 1 = 8bit, 0 = 16bit */
+#define BUFFER_WIDTH_8 BIT(4) /* 1 = 8bit, 0 = 16bit */
+#define TBS1 BIT(3)
+#define TBS0 BIT(2)
+#define BS1 BIT(1) /* 00=8kb, 01=16kb */
+#define BS0 BIT(0) /* 10=32kb, 11=64kb */
+
+#ifndef ETH16I_TX_BUF_SIZE /* 0 = 2kb, 1 = 4kb */
+#define ETH16I_TX_BUF_SIZE 2 /* 2 = 8kb, 3 = 16kb */
+#endif
+#define TX_BUF_1x2048 0
+#define TX_BUF_2x2048 1
+#define TX_BUF_2x4098 2
+#define TX_BUF_2x8192 3
+
+/* Configuration Register 1 (DLCR7) */
+#define CONFIG_REG_1 7
+#define POWERUP BIT(5)
+
+/* Transmit start register */
+#define TRANSMIT_START_REG 10
+#define TRANSMIT_START_RB 2
+#define TX_START BIT(7) /* Rest of register bit indicate*/
+ /* number of packets in tx buffer*/
+/* Node ID registers (DLCR8-13) */
+#define NODE_ID_0 8
+#define NODE_ID_RB 0
+
+/* Hash Table registers (HT8-15) */
+#define HASH_TABLE_0 8
+#define HASH_TABLE_RB 1
+
+/* Buffer memory ports */
+#define BUFFER_MEM_PORT_LB 8
+#define DATAPORT BUFFER_MEM_PORT_LB
+#define BUFFER_MEM_PORT_HB 9
+
+/* 16 Collision control register (BMPR11) */
+#define COL_16_REG 11
+#define HALT_ON_16 0x00
+#define RETRANS_AND_HALT_ON_16 0x02
+
+/* DMA Burst and Transceiver Mode Register (BMPR13) */
+#define TRANSCEIVER_MODE_REG 13
+#define TRANSCEIVER_MODE_RB 2
+#define IO_BASE_UNLOCK BIT(7)
+#define LOWER_SQUELCH_TRESH BIT(6)
+#define LINK_TEST_DISABLE BIT(5)
+#define AUI_SELECT BIT(4)
+#define DIS_AUTO_PORT_SEL BIT(3)
+
+/* Filter Self Receive Register (BMPR14) */
+#define FILTER_SELF_RX_REG 14
+#define SKIP_RECEIVE_PACKET BIT(2)
+#define FILTER_SELF_RECEIVE BIT(0)
+#define RX_BUF_SKIP_PACKET SKIP_RECEIVE_PACKET | FILTER_SELF_RECEIVE
+
+/* EEPROM Control Register (BMPR 16) */
+#define EEPROM_CTRL_REG 16
+
+/* EEPROM Data Register (BMPR 17) */
+#define EEPROM_DATA_REG 17
+
+/* NMC93CSx6 EEPROM Control Bits */
+#define CS_0 0x00
+#define CS_1 0x20
+#define SK_0 0x00
+#define SK_1 0x40
+#define DI_0 0x00
+#define DI_1 0x80
+
+/* NMC93CSx6 EEPROM Instructions */
+#define EEPROM_READ 0x80
+
+/* NMC93CSx6 EEPROM Addresses */
+#define E_NODEID_0 0x02
+#define E_NODEID_1 0x03
+#define E_NODEID_2 0x04
+#define E_PORT_SELECT 0x14
+ #define E_PORT_BNC 0
+ #define E_PORT_DIX 1
+ #define E_PORT_TP 2
+ #define E_PORT_AUTO 3
+#define E_PRODUCT_CFG 0x30
+
+
+/* Macro to slow down io between EEPROM clock transitions */
+#define eeprom_slow_io() do { int _i = 40; while(--_i > 0) { __SLOW_DOWN_IO; }}while(0)
+
+/* Jumperless Configuration Register (BMPR19) */
+#define JUMPERLESS_CONFIG 19
+
+/* ID ROM registers, writing to them also resets some parts of chip */
+#define ID_ROM_0 24
+#define ID_ROM_7 31
+#define RESET ID_ROM_0
+
+/* This is the I/O address list to be probed when seeking the card */
+static unsigned int eth16i_portlist[] =
+ { 0x260, 0x280, 0x2A0, 0x240, 0x340, 0x320, 0x380, 0x300, 0 };
+
+static unsigned int eth32i_portlist[] =
+ { 0x1000, 0x2000, 0x3000, 0x4000, 0x5000, 0x6000, 0x7000, 0x8000,
+ 0x9000, 0xA000, 0xB000, 0xC000, 0xD000, 0xE000, 0xF000, 0 };
+
+/* This is the Interrupt lookup table for Eth16i card */
+static unsigned int eth16i_irqmap[] = { 9, 10, 5, 15 };
+
+/* This is the Interrupt lookup table for Eth32i card */
+static unsigned int eth32i_irqmap[] = { 3, 5, 7, 9, 10, 11, 12, 15 };
+#define EISA_IRQ_REG 0xc89
+
+static unsigned int eth16i_tx_buf_map[] = { 2048, 2048, 4096, 8192 };
+unsigned int boot = 1;
+
+/* Use 0 for production, 1 for verification, >2 for debug */
+#ifndef ETH16I_DEBUG
+#define ETH16I_DEBUG 0
+#endif
+static unsigned int eth16i_debug = ETH16I_DEBUG;
+
+/* Information for each board */
+struct eth16i_local {
+ struct enet_statistics stats;
+ unsigned int tx_started:1;
+ unsigned char tx_queue; /* Number of packets in transmit buffer */
+ unsigned short tx_queue_len;
+ unsigned int tx_buf_size;
+ unsigned long open_time;
+};
+
+/* Function prototypes */
+
+extern int eth16i_probe(struct device *dev);
+
+static int eth16i_probe1(struct device *dev, short ioaddr);
+static int eth16i_check_signature(short ioaddr);
+static int eth16i_probe_port(short ioaddr);
+static void eth16i_set_port(short ioaddr, int porttype);
+static int eth16i_send_probe_packet(short ioaddr, unsigned char *b, int l);
+static int eth16i_receive_probe_packet(short ioaddr);
+static int eth16i_get_irq(short ioaddr);
+static int eth16i_read_eeprom(int ioaddr, int offset);
+static int eth16i_read_eeprom_word(int ioaddr);
+static void eth16i_eeprom_cmd(int ioaddr, unsigned char command);
+static int eth16i_open(struct device *dev);
+static int eth16i_close(struct device *dev);
+static int eth16i_tx(struct sk_buff *skb, struct device *dev);
+static void eth16i_rx(struct device *dev);
+static void eth16i_interrupt(int irq, struct pt_regs *regs);
+static void eth16i_multicast(struct device *dev, int num_addrs, void *addrs);
+static void eth16i_select_regbank(unsigned char regbank, short ioaddr);
+static void eth16i_initialize(struct device *dev);
+static struct enet_statistics *eth16i_get_stats(struct device *dev);
+
+static char *cardname = "ICL EtherTeam 16i/32";
+
+#ifdef HAVE_DEVLIST
+/* Support for alternate probe manager */
+/struct netdev_entry eth16i_drv =
+ {"eth16i", eth16i_probe1, ETH16I_IO_EXTENT, eth16i_probe_list};
+
+#else /* Not HAVE_DEVLIST */
+int eth16i_probe(struct device *dev)
+{
+ int i;
+ int ioaddr;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if(eth16i_debug > 4)
+ printk("Probing started for %s\n", cardname);
+
+ if(base_addr > 0x1ff) /* Check only single location */
+ return eth16i_probe1(dev, base_addr);
+ else if(base_addr != 0) /* Don't probe at all */
+ return ENXIO;
+
+ /* Seek card from the ISA io address space */
+ for(i = 0; (ioaddr = eth16i_portlist[i]) ; i++) {
+ if(check_region(ioaddr, ETH16I_IO_EXTENT))
+ continue;
+ if(eth16i_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ /* Seek card from the EISA io address space */
+ for(i = 0; (ioaddr = eth32i_portlist[i]) ; i++) {
+ if(check_region(ioaddr, ETH16I_IO_EXTENT))
+ continue;
+ if(eth16i_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif /* Not HAVE_DEVLIST */
+
+static int eth16i_probe1(struct device *dev, short ioaddr)
+{
+ static unsigned version_printed = 0;
+ unsigned int irq = 0;
+ boot = 1; /* To inform initilization that we are in boot probe */
+
+ /*
+ The MB86985 chip has on register which holds information in which
+ io address the chip lies. First read this register and compare
+ it to our current io address and if match then this could
+ be our chip.
+ */
+
+ if(ioaddr < 0x1000) {
+ if(eth16i_portlist[(inb(ioaddr + JUMPERLESS_CONFIG) & 0x07)] != ioaddr)
+ return -ENODEV;
+ }
+
+ /* Now we will go a bit deeper and try to find the chip's signature */
+
+ if(eth16i_check_signature(ioaddr) != 0) /* Can we find the signature here */
+ return -ENODEV;
+
+ /*
+ Now it seems that we have found a ethernet chip in this particular
+ ioaddr. The MB86985 chip has this feature, that when you read a
+ certain register it will increase it's io base address to next
+ configurable slot. Now when we have found the chip, first thing is
+ to make sure that the chip's ioaddr will hold still here.
+ */
+
+ eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
+ outb(0x00, ioaddr + TRANSCEIVER_MODE_REG);
+
+ outb(0x00, ioaddr + RESET); /* Will reset some parts of chip */
+ BITSET(ioaddr + CONFIG_REG_0, BIT(7)); /* This will disable the data link */
+
+ if(dev == NULL)
+ dev = init_etherdev(0, sizeof(struct eth16i_local));
+
+ if( (eth16i_debug & version_printed++) == 0)
+ printk(version);
+
+ dev->base_addr = ioaddr;
+
+ irq = eth16i_get_irq(ioaddr);
+ dev->irq = irq;
+
+ /* Try to obtain interrupt vector */
+ if(request_irq(dev->irq, &eth16i_interrupt, 0, "eth16i")) {
+ printk("%s: %s at %#3x, but is unusable due
+ conflict on IRQ %d.\n", dev->name, cardname, ioaddr, irq);
+ return EAGAIN;
+ }
+
+ printk("%s: %s at %#3x, IRQ %d, ",
+ dev->name, cardname, ioaddr, dev->irq);
+
+ /* Let's grab the region */
+ request_region(ioaddr, ETH16I_IO_EXTENT, "eth16i");
+
+ /* Now we will have to lock the chip's io address */
+ eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
+ outb(0x38, ioaddr + TRANSCEIVER_MODE_REG);
+
+ eth16i_initialize(dev); /* Initialize rest of the chip's registers */
+
+ /* Now let's same some energy by shutting down the chip ;) */
+ BITCLR(ioaddr + CONFIG_REG_1, POWERUP);
+
+ /* Initialize the device structure */
+ if(dev->priv == NULL)
+ dev->priv = kmalloc(sizeof(struct eth16i_local), GFP_KERNEL);
+ memset(dev->priv, 0, sizeof(struct eth16i_local));
+
+ dev->open = eth16i_open;
+ dev->stop = eth16i_close;
+ dev->hard_start_xmit = eth16i_tx;
+ dev->get_stats = eth16i_get_stats;
+ dev->set_multicast_list = &eth16i_multicast;
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(dev);
+
+ boot = 0;
+
+ return 0;
+}
+
+
+static void eth16i_initialize(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+ int i, node_w = 0;
+ unsigned char node_byte = 0;
+
+ /* Setup station address */
+ eth16i_select_regbank(NODE_ID_RB, ioaddr);
+ for(i = 0 ; i < 3 ; i++) {
+ unsigned short node_val = eth16i_read_eeprom(ioaddr, E_NODEID_0 + i);
+ ((unsigned short *)dev->dev_addr)[i] = ntohs(node_val);
+ }
+
+ for(i = 0; i < 6; i++) {
+ outb( ((unsigned char *)dev->dev_addr)[i], ioaddr + NODE_ID_0 + i);
+ if(boot) {
+ printk("%02x", inb(ioaddr + NODE_ID_0 + i));
+ if(i != 5)
+ printk(":");
+ }
+ }
+
+ /* Now we will set multicast addresses to accept none */
+ eth16i_select_regbank(HASH_TABLE_RB, ioaddr);
+ for(i = 0; i < 8; i++)
+ outb(0x00, ioaddr + HASH_TABLE_0 + i);
+
+ /*
+ Now let's disable the transmitter and receiver, set the buffer ram
+ cycle time, bus width and buffer data path width. Also we shall
+ set transmit buffer size and total buffer size.
+ */
+
+ eth16i_select_regbank(2, ioaddr);
+
+ node_byte = 0;
+ node_w = eth16i_read_eeprom(ioaddr, E_PRODUCT_CFG);
+
+ if( (node_w & 0xFF00) == 0x0800)
+ node_byte |= BUFFER_WIDTH_8;
+
+ node_byte |= BS1;
+
+ if( (node_w & 0x00FF) == 64)
+ node_byte |= BS0;
+
+ node_byte |= DLC_EN | SRAM_CYCLE_TIME_100NS | (ETH16I_TX_BUF_SIZE << 2);
+
+ outb(node_byte, ioaddr + CONFIG_REG_0);
+
+ /* We shall halt the transmitting, if 16 collisions are detected */
+ outb(RETRANS_AND_HALT_ON_16, ioaddr + COL_16_REG);
+
+ if(boot) /* Now set port type */
+ {
+ char *porttype[] = {"BNC", "DIX", "TP", "AUTO"};
+
+ ushort ptype = eth16i_read_eeprom(ioaddr, E_PORT_SELECT);
+ dev->if_port = (ptype & 0x00FF);
+
+ printk(" %s interface.\n", porttype[dev->if_port]);
+
+ if(ptype == E_PORT_AUTO)
+ ptype = eth16i_probe_port(ioaddr);
+
+ eth16i_set_port(ioaddr, ptype);
+ }
+
+ /* Set Receive Mode to normal operation */
+ outb(MODE_2, ioaddr + RECEIVE_MODE_REG);
+}
+
+static int eth16i_probe_port(short ioaddr)
+{
+ int i;
+ int retcode;
+ unsigned char dummy_packet[64] = { 0 };
+
+ /* Powerup the chip */
+ outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1);
+
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+
+ eth16i_select_regbank(NODE_ID_RB, ioaddr);
+
+ for(i = 0; i < 6; i++) {
+ dummy_packet[i] = inb(ioaddr + NODE_ID_0 + i);
+ dummy_packet[i+6] = inb(ioaddr + NODE_ID_0 + i);
+ }
+
+ dummy_packet[12] = 0x00;
+ dummy_packet[13] = 0x04;
+
+ eth16i_select_regbank(2, ioaddr);
+
+ for(i = 0; i < 3; i++) {
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+ BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
+ eth16i_set_port(ioaddr, i);
+
+ if(eth16i_debug > 1)
+ printk("Set port number %d\n", i);
+
+ retcode = eth16i_send_probe_packet(ioaddr, dummy_packet, 64);
+ if(retcode == 0) {
+ retcode = eth16i_receive_probe_packet(ioaddr);
+ if(retcode != -1) {
+ if(eth16i_debug > 1)
+ printk("Eth16i interface port found at %d\n", i);
+ return i;
+ }
+ }
+ else {
+ if(eth16i_debug > 1)
+ printk("TRANSMIT_DONE timeout\n");
+ }
+ }
+
+ if( eth16i_debug > 1)
+ printk("Using default port\n");
+
+ return E_PORT_BNC;
+}
+
+static void eth16i_set_port(short ioaddr, int porttype)
+{
+ unsigned short temp = 0;
+
+ eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
+ outb(LOOPBACK_CONTROL, ioaddr + TRANSMIT_MODE_REG);
+
+ temp |= DIS_AUTO_PORT_SEL;
+
+ switch(porttype) {
+
+ case E_PORT_BNC :
+ temp |= AUI_SELECT;
+ break;
+
+ case E_PORT_TP :
+ break;
+
+ case E_PORT_DIX :
+ temp |= AUI_SELECT;
+ BITSET(ioaddr + TRANSMIT_MODE_REG, CONTROL_OUTPUT);
+ break;
+ }
+ outb(temp, ioaddr + TRANSCEIVER_MODE_REG);
+
+ if(eth16i_debug > 1) {
+ printk("TRANSMIT_MODE_REG = %x\n", inb(ioaddr + TRANSMIT_MODE_REG));
+ printk("TRANSCEIVER_MODE_REG = %x\n", inb(ioaddr+TRANSCEIVER_MODE_REG));
+ }
+}
+
+static int eth16i_send_probe_packet(short ioaddr, unsigned char *b, int l)
+{
+ int starttime;
+
+ outb(0xff, ioaddr + TX_STATUS_REG);
+
+ outw(l, ioaddr + DATAPORT);
+ outsw(ioaddr + DATAPORT, (unsigned short *)b, (l + 1) >> 1);
+
+ starttime = jiffies;
+ outb(TX_START | 1, ioaddr + TRANSMIT_START_REG);
+
+ while( (inb(ioaddr + TX_STATUS_REG) & 0x80) == 0) {
+ if( (jiffies - starttime) > TIMEOUT_TICKS) {
+ break;
+ }
+ }
+
+ return(0);
+}
+
+static int eth16i_receive_probe_packet(short ioaddr)
+{
+ int starttime;
+
+ starttime = jiffies;
+
+ while((inb(ioaddr + TX_STATUS_REG) & 0x20) == 0) {
+ if( (jiffies - starttime) > TIMEOUT_TICKS) {
+
+ if(eth16i_debug > 1)
+ printk("Timeout occured waiting transmit packet received\n");
+ starttime = jiffies;
+ while((inb(ioaddr + RX_STATUS_REG) & 0x80) == 0) {
+ if( (jiffies - starttime) > TIMEOUT_TICKS) {
+ if(eth16i_debug > 1)
+ printk("Timeout occured waiting receive packet\n");
+ return -1;
+ }
+ }
+
+ if(eth16i_debug > 1)
+ printk("RECEIVE_PACKET\n");
+ return(0); /* Found receive packet */
+ }
+ }
+
+ if(eth16i_debug > 1) {
+ printk("TRANSMIT_PACKET_RECEIVED %x\n", inb(ioaddr + TX_STATUS_REG));
+ printk("RX_STATUS_REG = %x\n", inb(ioaddr + RX_STATUS_REG));
+ }
+
+ return(0); /* Return success */
+}
+
+static int eth16i_get_irq(short ioaddr)
+{
+ unsigned char cbyte;
+
+ if( ioaddr < 0x1000) {
+ cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
+ return( eth16i_irqmap[ ((cbyte & 0xC0) >> 6) ] );
+ } else { /* Oh..the card is EISA so method getting IRQ different */
+ unsigned short index = 0;
+ cbyte = inb(ioaddr + EISA_IRQ_REG);
+ while( (cbyte & 0x01) == 0) {
+ cbyte = cbyte >> 1;
+ index++;
+ }
+ return( eth32i_irqmap[ index ] );
+ }
+}
+
+static int eth16i_check_signature(short ioaddr)
+{
+ int i;
+ unsigned char creg[4] = { 0 };
+
+ for(i = 0; i < 4 ; i++) {
+
+ creg[i] = inb(ioaddr + TRANSMIT_MODE_REG + i);
+
+ if(eth16i_debug > 1)
+ printk("eth16i: read signature byte %x at %x\n", creg[i],
+ ioaddr + TRANSMIT_MODE_REG + i);
+ }
+
+ creg[0] &= 0x0F; /* Mask collision cnr */
+ creg[2] &= 0x7F; /* Mask DCLEN bit */
+
+#ifdef 0
+/*
+ This was removed because the card was sometimes left to state
+ from which it couldn't be find anymore. If there is need
+ to more strict chech still this have to be fixed.
+*/
+ if( !( (creg[0] == 0x06) && (creg[1] == 0x41)) ) {
+ if(creg[1] != 0x42)
+ return -1;
+ }
+#endif
+
+ if( !( (creg[2] == 0x36) && (creg[3] == 0xE0)) ) {
+ creg[2] &= 0x42;
+ creg[3] &= 0x03;
+
+ if( !( (creg[2] == 0x42) && (creg[3] == 0x00)) )
+ return -1;
+ }
+
+ if(eth16i_read_eeprom(ioaddr, E_NODEID_0) != 0)
+ return -1;
+ if((eth16i_read_eeprom(ioaddr, E_NODEID_1) & 0xFF00) != 0x4B00)
+ return -1;
+
+ return 0;
+}
+
+static int eth16i_read_eeprom(int ioaddr, int offset)
+{
+ int data = 0;
+
+ eth16i_eeprom_cmd(ioaddr, EEPROM_READ | offset);
+ outb(CS_1, ioaddr + EEPROM_CTRL_REG);
+ data = eth16i_read_eeprom_word(ioaddr);
+ outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
+
+ return(data);
+}
+
+static int eth16i_read_eeprom_word(int ioaddr)
+{
+ int i;
+ int data = 0;
+
+ for(i = 16; i > 0; i--) {
+ outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ data = (data << 1) | ((inb(ioaddr + EEPROM_DATA_REG) & DI_1) ? 1 : 0);
+ eeprom_slow_io();
+ }
+
+ return(data);
+}
+
+static void eth16i_eeprom_cmd(int ioaddr, unsigned char command)
+{
+ int i;
+
+ outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ outb(DI_0, ioaddr + EEPROM_DATA_REG);
+ outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ outb(DI_1, ioaddr + EEPROM_DATA_REG);
+ outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
+
+ for(i = 7; i >= 0; i--) {
+ short cmd = ( (command & (1 << i)) ? DI_1 : DI_0 );
+ outb(cmd, ioaddr + EEPROM_DATA_REG);
+ outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ }
+}
+
+static int eth16i_open(struct device *dev)
+{
+ struct eth16i_local *lp = (struct eth16i_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ irq2dev_map[dev->irq] = dev;
+
+ /* Powerup the chip */
+ outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1);
+
+ /* Initialize the chip */
+ eth16i_initialize(dev);
+
+ /* Set the transmit buffer size */
+ lp->tx_buf_size = eth16i_tx_buf_map[ETH16I_TX_BUF_SIZE & 0x03];
+
+ if(eth16i_debug > 3)
+ printk("%s: transmit buffer size %d\n", dev->name, lp->tx_buf_size);
+
+ /* Now enable Transmitter and Receiver sections */
+ BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
+
+ /* Now switch to register bank 2, for run time operation */
+ eth16i_select_regbank(2, ioaddr);
+
+ lp->open_time = jiffies;
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ /* Turn on interrupts*/
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+#ifdef MODULE
+ MOD_INC_USE_COUNT;
+#endif
+
+ return 0;
+}
+
+static int eth16i_close(struct device *dev)
+{
+ struct eth16i_local *lp = (struct eth16i_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ lp->open_time = 0;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* Disable transmit and receive */
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+
+ /* Reset the chip */
+ outb(0xff, ioaddr + RESET);
+
+ /* Save some energy by switching off power */
+ BITCLR(ioaddr + CONFIG_REG_1, POWERUP);
+
+#ifdef MODULE
+ MOD_DEC_USE_COUNT;
+#endif
+
+ return 0;
+}
+
+static int eth16i_tx(struct sk_buff *skb, struct device *dev)
+{
+ struct eth16i_local *lp = (struct eth16i_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ if(dev->tbusy) {
+ /*
+ If we get here, some higher level has decided that we are broken.
+ There should really be a "kick me" function call instead.
+ */
+
+ int tickssofar = jiffies - dev->trans_start;
+ if(tickssofar < TIMEOUT_TICKS) /* Let's not rush with our timeout, */
+ return 1; /* wait a couple of ticks first */
+
+ printk("%s: transmit timed out with status %04x, %s ?\n", dev->name,
+ inw(ioaddr + TX_STATUS_REG),
+ (inb(ioaddr + TX_STATUS_REG) & TX_DONE) ?
+ "IRQ conflict" : "network cable problem");
+
+ /* Let's dump all registers */
+ if(eth16i_debug > 0) {
+ printk("%s: timeout regs: %02x %02x %02x %02x %02x %02x %02x %02x.\n",
+ dev->name, inb(ioaddr + 0), inb(ioaddr + 1), inb(ioaddr + 2),
+ inb(ioaddr + 3), inb(ioaddr + 4), inb(ioaddr + 5),
+ inb(ioaddr + 6), inb(ioaddr + 7));
+
+
+ printk("lp->tx_queue = %d\n", lp->tx_queue);
+ printk("lp->tx_queue_len = %d\n", lp->tx_queue_len);
+ printk("lp->tx_started = %d\n", lp->tx_started);
+
+ }
+
+ lp->stats.tx_errors++;
+
+ /* Now let's try to restart the adaptor */
+
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+ outw(0xffff, ioaddr + RESET);
+ eth16i_initialize(dev);
+ outw(0xffff, ioaddr + TX_STATUS_REG);
+ BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
+
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+
+ dev->tbusy = 0;
+ dev->trans_start = jiffies;
+ }
+
+ /*
+ If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself
+ */
+ if(skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer based transmitter from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+
+ /* Turn off TX interrupts */
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+
+ if(set_bit(0, (void *)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ outw(length, ioaddr + DATAPORT);
+
+ if( ioaddr < 0x1000 )
+ outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
+ else {
+ unsigned char frag = length % 4;
+
+ outsl(ioaddr + DATAPORT, buf, length >> 2);
+
+ if( frag != 0 ) {
+ outsw(ioaddr + DATAPORT, (buf + (length & 0xFFFC)), 1);
+ if( frag == 3 )
+ outsw(ioaddr + DATAPORT, (buf + (length & 0xFFFC) + 2), 1);
+ }
+ }
+
+ lp->tx_queue++;
+ lp->tx_queue_len += length + 2;
+
+ if(lp->tx_started == 0) {
+ /* If the transmitter is idle..always trigger a transmit */
+ outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ lp->tx_started = 1;
+ dev->tbusy = 0;
+ }
+ else if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) {
+ /* There is still more room for one more packet in tx buffer */
+ dev->tbusy = 0;
+ }
+
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+
+ /* Turn TX interrupts back on */
+ /* outb(TX_INTR_DONE | TX_INTR_16_COL, ioaddr + TX_INTR_REG); */
+ }
+ dev_kfree_skb(skb, FREE_WRITE);
+
+ return 0;
+}
+
+static void eth16i_rx(struct device *dev)
+{
+ struct eth16i_local *lp = (struct eth16i_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int boguscount = MAX_RX_LOOP;
+
+ /* Loop until all packets have been read */
+ while( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) {
+
+ /* Read status byte from receive buffer */
+ ushort status = inw(ioaddr + DATAPORT);
+
+ if(eth16i_debug > 4)
+ printk("%s: Receiving packet mode %02x status %04x.\n",
+ dev->name, inb(ioaddr + RECEIVE_MODE_REG), status);
+
+ if( !(status & PKT_GOOD) ) {
+ /* Hmm..something went wrong. Let's check what error occured */
+ lp->stats.rx_errors++;
+ if( status & PKT_SHORT ) lp->stats.rx_length_errors++;
+ if( status & PKT_ALIGN_ERR ) lp->stats.rx_frame_errors++;
+ if( status & PKT_CRC_ERR ) lp->stats.rx_crc_errors++;
+ if( status & PKT_RX_BUF_OVERFLOW) lp->stats.rx_over_errors++;
+ }
+ else { /* Ok so now we should have a good packet */
+ struct sk_buff *skb;
+
+ /* Get the size of the packet from receive buffer */
+ ushort pkt_len = inw(ioaddr + DATAPORT);
+
+ if(pkt_len > ETH_FRAME_LEN) {
+ printk("%s: %s claimed a very large packet, size of %d bytes.\n",
+ dev->name, cardname, pkt_len);
+ outb(RX_BUF_SKIP_PACKET, ioaddr + FILTER_SELF_RX_REG);
+ lp->stats.rx_dropped++;
+ break;
+ }
+
+ skb = dev_alloc_skb(pkt_len + 3);
+ if( skb == NULL ) {
+ printk("%s: Could'n allocate memory for packet (len %d)\n",
+ dev->name, pkt_len);
+ outb(RX_BUF_SKIP_PACKET, ioaddr + FILTER_SELF_RX_REG);
+ lp->stats.rx_dropped++;
+ break;
+ }
+
+ skb->dev = dev;
+ skb_reserve(skb,2);
+ /*
+ Now let's get the packet out of buffer.
+ size is (pkt_len + 1) >> 1, cause we are now reading words
+ and it have to be even aligned.
+ */
+
+ if( ioaddr < 0x1000)
+ insw(ioaddr + DATAPORT, skb_put(skb, pkt_len), (pkt_len + 1) >> 1);
+ else {
+ unsigned char *buf = skb_put(skb, pkt_len);
+ unsigned char frag = pkt_len % 4;
+
+ insl(ioaddr + DATAPORT, buf, pkt_len >> 2);
+
+ if(frag != 0) {
+ unsigned short rest[2];
+ rest[0] = inw( ioaddr + DATAPORT );
+ if(frag == 3)
+ rest[1] = inw( ioaddr + DATAPORT );
+
+ memcpy(buf + (pkt_len & 0xfffc), (char *)rest, frag);
+ }
+ }
+
+ skb->protocol=eth_type_trans(skb, dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+
+ if( eth16i_debug > 5 ) {
+ int i;
+ printk("%s: Received packet of length %d.\n", dev->name, pkt_len);
+ for(i = 0; i < 14; i++)
+ printk(" %02x", skb->data[i]);
+ printk(".\n");
+ }
+
+ } /* else */
+
+ if(--boguscount <= 0)
+ break;
+
+ } /* while */
+
+#if 0
+ {
+ int i;
+
+ for(i = 0; i < 20; i++) {
+ if( (inb(ioaddr+RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == RX_BUFFER_EMPTY)
+ break;
+ inw(ioaddr + DATAPORT);
+ outb(RX_BUF_SKIP_PACKET, ioaddr + FILTER_SELF_RX_REG);
+ }
+
+ if(eth16i_debug > 1)
+ printk("%s: Flushed receive buffer.\n", dev->name);
+ }
+#endif
+
+ return;
+}
+
+static void eth16i_interrupt(int irq, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct eth16i_local *lp;
+ int ioaddr = 0,
+ status;
+
+ if(dev == NULL) {
+ printk("eth16i_interrupt(): irq %d for unknown device. \n", irq);
+ return;
+ }
+
+ /* Turn off all interrupts from adapter */
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct eth16i_local *)dev->priv;
+ status = inw(ioaddr + TX_STATUS_REG); /* Get the status */
+ outw(status, ioaddr + TX_STATUS_REG); /* Clear status bits */
+
+ if(eth16i_debug > 3)
+ printk("%s: Interrupt with status %04x.\n", dev->name, status);
+
+ if( status & 0x00ff ) { /* Let's check the transmit status reg */
+
+ if(status & TX_DONE) { /* The transmit has been done */
+ lp->stats.tx_packets++;
+
+ if(lp->tx_queue) { /* Is there still packets ? */
+ /* There was packet(s) so start transmitting and write also
+ how many packets there is to be sended */
+ outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ else {
+ lp->tx_started = 0;
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ }
+ }
+
+ if( ( status & 0xff00 ) ||
+ ( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) ) {
+ eth16i_rx(dev); /* We have packet in receive buffer */
+ }
+
+ dev->interrupt = 0;
+
+ /* Turn interrupts back on */
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+
+ return;
+}
+
+static void eth16i_multicast(struct device *dev, int num_addrs, void *addrs)
+{
+ short ioaddr = dev->base_addr;
+
+ if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ {
+ dev->flags|=IFF_PROMISC; /* Must do this */
+ outb(3, ioaddr + RECEIVE_MODE_REG);
+ } else {
+ outb(2, ioaddr + RECEIVE_MODE_REG);
+ }
+}
+
+static struct enet_statistics *eth16i_get_stats(struct device *dev)
+{
+ struct eth16i_local *lp = (struct eth16i_local *)dev->priv;
+
+ return &lp->stats;
+}
+
+static void eth16i_select_regbank(unsigned char banknbr, short ioaddr)
+{
+ unsigned char data;
+
+ data = inb(ioaddr + CONFIG_REG_1);
+ outb( ((data & 0xF3) | ( (banknbr & 0x03) << 2)), ioaddr + CONFIG_REG_1);
+}
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device dev_eth16i = {
+ devicename,
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, eth16i_probe };
+
+int io = 0x2a0;
+int irq = 0;
+
+int init_module(void)
+{
+ if(io == 0)
+ printk("eth16i: You should not use auto-probing with insmod!\n");
+
+ dev_eth16i.base_addr = io;
+ dev_eth16i.irq = irq;
+ if( register_netdev( &dev_eth16i ) != 0 ) {
+ printk("eth16i: register_netdev() returned non-zero.\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_netdev( &dev_eth16i );
+ free_irq( dev_eth16i.irq );
+ irq2dev_map[ dev_eth16i.irq ] = NULL;
+ release_region( dev_eth16i.base_addr, ETH16I_IO_EXTENT );
+}
+
+#endif /* MODULE */
+
+
diff --git a/i386/i386at/gpl/linux/net/ewrk3.c b/i386/i386at/gpl/linux/net/ewrk3.c
new file mode 100644
index 00000000..90e3b932
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/ewrk3.c
@@ -0,0 +1,1933 @@
+/* ewrk3.c: A DIGITAL EtherWORKS 3 ethernet driver for Linux.
+
+ Written 1994 by David C. Davies.
+
+ Copyright 1994 Digital Equipment Corporation.
+
+ This software may be used and distributed according to the terms of
+ the GNU Public License, incorporated herein by reference.
+
+ This driver is written for the Digital Equipment Corporation series
+ of EtherWORKS ethernet cards:
+
+ DE203 Turbo (BNC)
+ DE204 Turbo (TP)
+ DE205 Turbo (TP BNC)
+
+ The driver has been tested on a relatively busy network using the DE205
+ card and benchmarked with 'ttcp': it transferred 16M of data at 975kB/s
+ (7.8Mb/s) to a DECstation 5000/200.
+
+ The author may be reached at davies@wanton.lkg.dec.com or
+ davies@maniac.ultranet.com or Digital Equipment Corporation, 550 King
+ Street, Littleton MA 01460.
+
+ =========================================================================
+ This driver has been written substantially from scratch, although its
+ inheritance of style and stack interface from 'depca.c' and in turn from
+ Donald Becker's 'lance.c' should be obvious.
+
+ The DE203/4/5 boards all use a new proprietary chip in place of the
+ LANCE chip used in prior cards (DEPCA, DE100, DE200/1/2, DE210, DE422).
+ Use the depca.c driver in the standard distribution for the LANCE based
+ cards from DIGITAL; this driver will not work with them.
+
+ The DE203/4/5 cards have 2 main modes: shared memory and I/O only. I/O
+ only makes all the card accesses through I/O transactions and no high
+ (shared) memory is used. This mode provides a >48% performance penalty
+ and is deprecated in this driver, although allowed to provide initial
+ setup when hardstrapped.
+
+ The shared memory mode comes in 3 flavours: 2kB, 32kB and 64kB. There is
+ no point in using any mode other than the 2kB mode - their performances
+ are virtually identical, although the driver has been tested in the 2kB
+ and 32kB modes. I would suggest you uncomment the line:
+
+ FORCE_2K_MODE;
+
+ to allow the driver to configure the card as a 2kB card at your current
+ base address, thus leaving more room to clutter your system box with
+ other memory hungry boards.
+
+ As many ISA and EISA cards can be supported under this driver as you
+ wish, limited primarily by the available IRQ lines, rather than by the
+ available I/O addresses (24 ISA, 16 EISA). I have checked different
+ configurations of multiple depca cards and ewrk3 cards and have not
+ found a problem yet (provided you have at least depca.c v0.38) ...
+
+ The board IRQ setting must be at an unused IRQ which is auto-probed
+ using Donald Becker's autoprobe routines. All these cards are at
+ {5,10,11,15}.
+
+ No 16MB memory limitation should exist with this driver as DMA is not
+ used and the common memory area is in low memory on the network card (my
+ current system has 20MB and I've not had problems yet).
+
+ The ability to load this driver as a loadable module has been included
+ and used extensively during the driver development (to save those long
+ reboot sequences). To utilise this ability, you have to do 8 things:
+
+ 0) have a copy of the loadable modules code installed on your system.
+ 1) copy ewrk3.c from the /linux/drivers/net directory to your favourite
+ temporary directory.
+ 2) edit the source code near line 1880 to reflect the I/O address and
+ IRQ you're using.
+ 3) compile ewrk3.c, but include -DMODULE in the command line to ensure
+ that the correct bits are compiled (see end of source code).
+ 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
+ kernel with the ewrk3 configuration turned off and reboot.
+ 5) insmod ewrk3.o
+ [Alan Cox: Changed this so you can insmod ewrk3.o irq=x io=y]
+ 6) run the net startup bits for your new eth?? interface manually
+ (usually /etc/rc.inet[12] at boot time).
+ 7) enjoy!
+
+ Note that autoprobing is not allowed in loadable modules - the system is
+ already up and running and you're messing with interrupts.
+
+ To unload a module, turn off the associated interface
+ 'ifconfig eth?? down' then 'rmmod ewrk3'.
+
+ Promiscuous mode has been turned off in this driver, but all the
+ multicast address bits have been turned on. This improved the send
+ performance on a busy network by about 13%.
+
+ Ioctl's have now been provided (primarily because I wanted to grab some
+ packet size statistics). They are patterned after 'plipconfig.c' from a
+ suggestion by Alan Cox. Using these ioctls, you can enable promiscuous
+ mode, add/delete multicast addresses, change the hardware address, get
+ packet size distribution statistics and muck around with the control and
+ status register. I'll add others if and when the need arises.
+
+ TO DO:
+ ------
+
+
+ Revision History
+ ----------------
+
+ Version Date Description
+
+ 0.1 26-aug-94 Initial writing. ALPHA code release.
+ 0.11 31-aug-94 Fixed: 2k mode memory base calc.,
+ LeMAC version calc.,
+ IRQ vector assignments during autoprobe.
+ 0.12 31-aug-94 Tested working on LeMAC2 (DE20[345]-AC) card.
+ Fixed up MCA hash table algorithm.
+ 0.20 4-sep-94 Added IOCTL functionality.
+ 0.21 14-sep-94 Added I/O mode.
+ 0.21axp 15-sep-94 Special version for ALPHA AXP Linux V1.0.
+ 0.22 16-sep-94 Added more IOCTLs & tidied up.
+ 0.23 21-sep-94 Added transmit cut through.
+ 0.24 31-oct-94 Added uid checks in some ioctls.
+ 0.30 1-nov-94 BETA code release.
+ 0.31 5-dec-94 Added check/allocate region code.
+ 0.32 16-jan-95 Broadcast packet fix.
+ 0.33 10-Feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
+ 0.40 27-Dec-95 Rationalise MODULE and autoprobe code.
+ Rewrite for portability & updated.
+ ALPHA support from <jestabro@amt.tay1.dec.com>
+ Added verify_area() calls in depca_ioctl() from
+ suggestion by <heiko@colossus.escape.de>.
+ Add new multicasting code.
+
+ =========================================================================
+*/
+
+static const char *version = "ewrk3.c:v0.40 95/12/27 davies@wanton.lkg.dec.com\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/segment.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+
+#include "ewrk3.h"
+
+#ifdef EWRK3_DEBUG
+static int ewrk3_debug = EWRK3_DEBUG;
+#else
+static int ewrk3_debug = 1;
+#endif
+
+#define EWRK3_NDA 0xffe0 /* No Device Address */
+
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+#ifndef EWRK3_SIGNATURE
+#define EWRK3_SIGNATURE {"DE203","DE204","DE205",""}
+#define EWRK3_STRLEN 8
+#endif
+
+#ifndef EWRK3_RAM_BASE_ADDRESSES
+#define EWRK3_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0x00000}
+#endif
+
+/*
+** Sets up the I/O area for the autoprobe.
+*/
+#define EWRK3_IO_BASE 0x100 /* Start address for probe search */
+#define EWRK3_IOP_INC 0x20 /* I/O address increment */
+#define EWRK3_TOTAL_SIZE 0x20 /* required I/O address length */
+
+#ifndef MAX_NUM_EWRK3S
+#define MAX_NUM_EWRK3S 21
+#endif
+
+#ifndef EWRK3_EISA_IO_PORTS
+#define EWRK3_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
+#endif
+
+#ifndef MAX_EISA_SLOTS
+#define MAX_EISA_SLOTS 16
+#define EISA_SLOT_INC 0x1000
+#endif
+
+#define CRC_POLYNOMIAL_BE 0x04c11db7UL /* Ethernet CRC, big endian */
+#define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */
+
+#define QUEUE_PKT_TIMEOUT (100) /* Jiffies */
+
+/*
+** EtherWORKS 3 shared memory window sizes
+*/
+#define IO_ONLY 0x00
+#define SHMEM_2K 0x800
+#define SHMEM_32K 0x8000
+#define SHMEM_64K 0x10000
+
+/*
+** EtherWORKS 3 IRQ ENABLE/DISABLE
+*/
+#define ENABLE_IRQs { \
+ icr |= lp->irq_mask;\
+ outb(icr, EWRK3_ICR); /* Enable the IRQs */\
+}
+
+#define DISABLE_IRQs { \
+ icr = inb(EWRK3_ICR);\
+ icr &= ~lp->irq_mask;\
+ outb(icr, EWRK3_ICR); /* Disable the IRQs */\
+}
+
+/*
+** EtherWORKS 3 START/STOP
+*/
+#define START_EWRK3 { \
+ csr = inb(EWRK3_CSR);\
+ csr &= ~(CSR_TXD|CSR_RXD);\
+ outb(csr, EWRK3_CSR); /* Enable the TX and/or RX */\
+}
+
+#define STOP_EWRK3 { \
+ csr = (CSR_TXD|CSR_RXD);\
+ outb(csr, EWRK3_CSR); /* Disable the TX and/or RX */\
+}
+
+/*
+** The EtherWORKS 3 private structure
+*/
+#define EWRK3_PKT_STAT_SZ 16
+#define EWRK3_PKT_BIN_SZ 128 /* Should be >=100 unless you
+ increase EWRK3_PKT_STAT_SZ */
+
+struct ewrk3_private {
+ char adapter_name[80]; /* Name exported to /proc/ioports */
+ u_long shmem_base; /* Shared memory start address */
+ u_long shmem_length; /* Shared memory window length */
+ struct enet_statistics stats; /* Public stats */
+ struct {
+ u32 bins[EWRK3_PKT_STAT_SZ]; /* Private stats counters */
+ u32 unicast;
+ u32 multicast;
+ u32 broadcast;
+ u32 excessive_collisions;
+ u32 tx_underruns;
+ u32 excessive_underruns;
+ } pktStats;
+ u_char irq_mask; /* Adapter IRQ mask bits */
+ u_char mPage; /* Maximum 2kB Page number */
+ u_char lemac; /* Chip rev. level */
+ u_char hard_strapped; /* Don't allow a full open */
+ u_char lock; /* Lock the page register */
+ u_char txc; /* Transmit cut through */
+ u_char *mctbl; /* Pointer to the multicast table */
+};
+
+/*
+** Force the EtherWORKS 3 card to be in 2kB MODE
+*/
+#define FORCE_2K_MODE { \
+ shmem_length = SHMEM_2K;\
+ outb(((mem_start - 0x80000) >> 11), EWRK3_MBR);\
+}
+
+/*
+** Public Functions
+*/
+static int ewrk3_open(struct device *dev);
+static int ewrk3_queue_pkt(struct sk_buff *skb, struct device *dev);
+static void ewrk3_interrupt(int irq, struct pt_regs *regs);
+static int ewrk3_close(struct device *dev);
+static struct enet_statistics *ewrk3_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+static int ewrk3_ioctl(struct device *dev, struct ifreq *rq, int cmd);
+
+/*
+** Private functions
+*/
+static int ewrk3_hw_init(struct device *dev, u_long iobase);
+static void ewrk3_init(struct device *dev);
+static int ewrk3_rx(struct device *dev);
+static int ewrk3_tx(struct device *dev);
+
+static void EthwrkSignature(char * name, char *eeprom_image);
+static int DevicePresent(u_long iobase);
+static void SetMulticastFilter(struct device *dev);
+static int EISA_signature(char *name, s32 eisa_id);
+
+static int Read_EEPROM(u_long iobase, u_char eaddr);
+static int Write_EEPROM(short data, u_long iobase, u_char eaddr);
+static u_char get_hw_addr (struct device *dev, u_char *eeprom_image, char chipType);
+
+static void isa_probe(struct device *dev, u_long iobase);
+static void eisa_probe(struct device *dev, u_long iobase);
+static struct device *alloc_device(struct device *dev, u_long iobase);
+
+
+#ifdef MODULE
+int init_module(void);
+void cleanup_module(void);
+static int autoprobed = 1, loading_module = 1;
+
+# else
+static u_char irq[] = {5,0,10,3,11,9,15,12};
+static int autoprobed = 0, loading_module = 0;
+
+#endif /* MODULE */
+
+static char name[EWRK3_STRLEN + 1];
+static int num_ewrk3s = 0, num_eth = 0;
+
+/*
+** Miscellaneous defines...
+*/
+#define INIT_EWRK3 {\
+ outb(EEPROM_INIT, EWRK3_IOPR);\
+ udelay(1000);\
+}
+
+
+
+
+int ewrk3_probe(struct device *dev)
+{
+ int tmp = num_ewrk3s, status = -ENODEV;
+ u_long iobase = dev->base_addr;
+
+ if ((iobase == 0) && loading_module){
+ printk("Autoprobing is not supported when loading a module based driver.\n");
+ status = -EIO;
+ } else { /* First probe for the Ethernet */
+ /* Address PROM pattern */
+ isa_probe(dev, iobase);
+ eisa_probe(dev, iobase);
+
+ if ((tmp == num_ewrk3s) && (iobase != 0) && loading_module) {
+ printk("%s: ewrk3_probe() cannot find device at 0x%04lx.\n", dev->name,
+ iobase);
+ }
+
+ /*
+ ** Walk the device list to check that at least one device
+ ** initialised OK
+ */
+ for (; (dev->priv == NULL) && (dev->next != NULL); dev = dev->next);
+
+ if (dev->priv) status = 0;
+ if (iobase == 0) autoprobed = 1;
+ }
+
+ return status;
+}
+
+static int
+ewrk3_hw_init(struct device *dev, u_long iobase)
+{
+ struct ewrk3_private *lp;
+ int i, status=0;
+ u_long mem_start, shmem_length;
+ u_char cr, cmr, icr, nicsr, lemac, hard_strapped = 0;
+ u_char eeprom_image[EEPROM_MAX], chksum, eisa_cr = 0;
+
+ /*
+ ** Stop the EWRK3. Enable the DBR ROM. Disable interrupts and remote boot.
+ ** This also disables the EISA_ENABLE bit in the EISA Control Register.
+ */
+ if (iobase > 0x400) eisa_cr = inb(EISA_CR);
+ INIT_EWRK3;
+
+ nicsr = inb(EWRK3_CSR);
+
+ icr = inb(EWRK3_ICR);
+ icr |= 0xf0;
+ outb(icr, EWRK3_ICR); /* Disable all the IRQs */
+
+ if (nicsr == CSR_TXD|CSR_RXD) {
+
+ /* Check that the EEPROM is alive and well and not living on Pluto... */
+ for (chksum=0, i=0; i<EEPROM_MAX; i+=2) {
+ union {
+ short val;
+ char c[2];
+ } tmp;
+
+ tmp.val = (short)Read_EEPROM(iobase, (i>>1));
+ eeprom_image[i] = tmp.c[0];
+ eeprom_image[i+1] = tmp.c[1];
+ chksum += eeprom_image[i] + eeprom_image[i+1];
+ }
+
+ if (chksum != 0) { /* Bad EEPROM Data! */
+ printk("%s: Device has a bad on-board EEPROM.\n", dev->name);
+ status = -ENXIO;
+ } else {
+ EthwrkSignature(name, eeprom_image);
+ if (*name != '\0') { /* found a EWRK3 device */
+ dev->base_addr = iobase;
+
+ if (iobase > 0x400) {
+ outb(eisa_cr, EISA_CR); /* Rewrite the EISA CR */
+ }
+
+ lemac = eeprom_image[EEPROM_CHIPVER];
+ cmr = inb(EWRK3_CMR);
+
+ if (((lemac == LeMAC) && ((cmr & CMR_NO_EEPROM) != CMR_NO_EEPROM)) ||
+ ((lemac == LeMAC2) && !(cmr & CMR_HS))) {
+ printk("%s: %s at %#4lx", dev->name, name, iobase);
+ hard_strapped = 1;
+ } else if ((iobase&0x0fff)==EWRK3_EISA_IO_PORTS) {
+ /* EISA slot address */
+ printk("%s: %s at %#4lx (EISA slot %ld)",
+ dev->name, name, iobase, ((iobase>>12)&0x0f));
+ } else { /* ISA port address */
+ printk("%s: %s at %#4lx", dev->name, name, iobase);
+ }
+
+ if (!status) {
+ printk(", h/w address ");
+ if (lemac!=LeMAC2) DevicePresent(iobase);/* need after EWRK3_INIT */
+ status = get_hw_addr(dev, eeprom_image, lemac);
+ for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */
+ printk("%2.2x:", dev->dev_addr[i]);
+ }
+ printk("%2.2x,\n", dev->dev_addr[i]);
+
+ if (status) {
+ printk(" which has an EEPROM CRC error.\n");
+ status = -ENXIO;
+ } else {
+ if (lemac == LeMAC2) { /* Special LeMAC2 CMR things */
+ cmr &= ~(CMR_RA | CMR_WB | CMR_LINK | CMR_POLARITY | CMR_0WS);
+ if (eeprom_image[EEPROM_MISC0] & READ_AHEAD) cmr |= CMR_RA;
+ if (eeprom_image[EEPROM_MISC0] & WRITE_BEHIND) cmr |= CMR_WB;
+ if (eeprom_image[EEPROM_NETMAN0] & NETMAN_POL) cmr |= CMR_POLARITY;
+ if (eeprom_image[EEPROM_NETMAN0] & NETMAN_LINK) cmr |= CMR_LINK;
+ if (eeprom_image[EEPROM_MISC0] & _0WS_ENA) cmr |= CMR_0WS;
+ }
+ if (eeprom_image[EEPROM_SETUP] & SETUP_DRAM) cmr |= CMR_DRAM;
+ outb(cmr, EWRK3_CMR);
+
+ cr = inb(EWRK3_CR); /* Set up the Control Register */
+ cr |= eeprom_image[EEPROM_SETUP] & SETUP_APD;
+ if (cr & SETUP_APD) cr |= eeprom_image[EEPROM_SETUP] & SETUP_PS;
+ cr |= eeprom_image[EEPROM_MISC0] & FAST_BUS;
+ cr |= eeprom_image[EEPROM_MISC0] & ENA_16;
+ outb(cr, EWRK3_CR);
+
+ /*
+ ** Determine the base address and window length for the EWRK3
+ ** RAM from the memory base register.
+ */
+ mem_start = inb(EWRK3_MBR);
+ shmem_length = 0;
+ if (mem_start != 0) {
+ if ((mem_start >= 0x0a) && (mem_start <= 0x0f)) {
+ mem_start *= SHMEM_64K;
+ shmem_length = SHMEM_64K;
+ } else if ((mem_start >= 0x14) && (mem_start <= 0x1f)) {
+ mem_start *= SHMEM_32K;
+ shmem_length = SHMEM_32K;
+ } else if ((mem_start >= 0x40) && (mem_start <= 0xff)) {
+ mem_start = mem_start * SHMEM_2K + 0x80000;
+ shmem_length = SHMEM_2K;
+ } else {
+ status = -ENXIO;
+ }
+ }
+
+ /*
+ ** See the top of this source code for comments about
+ ** uncommenting this line.
+ */
+/* FORCE_2K_MODE;*/
+
+ if (!status) {
+ if (hard_strapped) {
+ printk(" is hard strapped.\n");
+ } else if (mem_start) {
+ printk(" has a %dk RAM window", (int)(shmem_length >> 10));
+ printk(" at 0x%.5lx", mem_start);
+ } else {
+ printk(" is in I/O only mode");
+ }
+
+ /* private area & initialise */
+ dev->priv = (void *) kmalloc(sizeof(struct ewrk3_private),
+ GFP_KERNEL);
+ if (dev->priv == NULL) {
+ return -ENOMEM;
+ }
+ lp = (struct ewrk3_private *)dev->priv;
+ memset(dev->priv, 0, sizeof(struct ewrk3_private));
+ lp->shmem_base = mem_start;
+ lp->shmem_length = shmem_length;
+ lp->lemac = lemac;
+ lp->hard_strapped = hard_strapped;
+
+ lp->mPage = 64;
+ if (cmr & CMR_DRAM) lp->mPage <<= 1 ;/* 2 DRAMS on module */
+
+ sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
+ request_region(iobase, EWRK3_TOTAL_SIZE, lp->adapter_name);
+
+ lp->irq_mask = ICR_TNEM|ICR_TXDM|ICR_RNEM|ICR_RXDM;
+
+ if (!hard_strapped) {
+ /*
+ ** Enable EWRK3 board interrupts for autoprobing
+ */
+ icr |= ICR_IE; /* Enable interrupts */
+ outb(icr, EWRK3_ICR);
+
+ /* The DMA channel may be passed in on this parameter. */
+ dev->dma = 0;
+
+ /* To auto-IRQ we enable the initialization-done and DMA err,
+ interrupts. For now we will always get a DMA error. */
+ if (dev->irq < 2) {
+#ifndef MODULE
+ u_char irqnum;
+
+ autoirq_setup(0);
+
+ /*
+ ** Trigger a TNE interrupt.
+ */
+ icr |=ICR_TNEM;
+ outb(1,EWRK3_TDQ); /* Write to the TX done queue */
+ outb(icr, EWRK3_ICR); /* Unmask the TXD interrupt */
+
+ irqnum = irq[((icr & IRQ_SEL) >> 4)];
+
+ dev->irq = autoirq_report(1);
+ if ((dev->irq) && (irqnum == dev->irq)) {
+ printk(" and uses IRQ%d.\n", dev->irq);
+ } else {
+ if (!dev->irq) {
+ printk(" and failed to detect IRQ line.\n");
+ } else if ((irqnum == 1) && (lemac == LeMAC2)) {
+ printk(" and an illegal IRQ line detected.\n");
+ } else {
+ printk(", but incorrect IRQ line detected.\n");
+ }
+ status = -ENXIO;
+ }
+
+ DISABLE_IRQs; /* Mask all interrupts */
+
+#endif /* MODULE */
+ } else {
+ printk(" and requires IRQ%d.\n", dev->irq);
+ }
+ }
+ if (status) release_region(iobase, EWRK3_TOTAL_SIZE);
+ } else {
+ status = -ENXIO;
+ }
+ }
+ }
+ } else {
+ status = -ENXIO;
+ }
+ }
+
+ if (!status) {
+ if (ewrk3_debug > 0) {
+ printk(version);
+ }
+
+ /* The EWRK3-specific entries in the device structure. */
+ dev->open = &ewrk3_open;
+ dev->hard_start_xmit = &ewrk3_queue_pkt;
+ dev->stop = &ewrk3_close;
+ dev->get_stats = &ewrk3_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->do_ioctl = &ewrk3_ioctl;
+
+ dev->mem_start = 0;
+
+ /* Fill in the generic field of the device structure. */
+ ether_setup(dev);
+ }
+ } else {
+ status = -ENXIO;
+ }
+
+ return status;
+}
+
+
+static int
+ewrk3_open(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i, status = 0;
+ u_char icr, csr;
+
+ /*
+ ** Stop the TX and RX...
+ */
+ STOP_EWRK3;
+
+ if (!lp->hard_strapped) {
+ irq2dev_map[dev->irq] = dev; /* For latched interrupts */
+
+ if (request_irq(dev->irq, (void *)ewrk3_interrupt, 0, "ewrk3")) {
+ printk("ewrk3_open(): Requested IRQ%d is busy\n",dev->irq);
+ status = -EAGAIN;
+ } else {
+
+ /*
+ ** Re-initialize the EWRK3...
+ */
+ ewrk3_init(dev);
+
+ if (ewrk3_debug > 1){
+ printk("%s: ewrk3 open with irq %d\n",dev->name,dev->irq);
+ printk(" physical address: ");
+ for (i=0;i<5;i++){
+ printk("%2.2x:",(u_char)dev->dev_addr[i]);
+ }
+ printk("%2.2x\n",(u_char)dev->dev_addr[i]);
+ if (lp->shmem_length == 0) {
+ printk(" no shared memory, I/O only mode\n");
+ } else {
+ printk(" start of shared memory: 0x%08lx\n",lp->shmem_base);
+ printk(" window length: 0x%04lx\n",lp->shmem_length);
+ }
+ printk(" # of DRAMS: %d\n",((inb(EWRK3_CMR) & 0x02) ? 2 : 1));
+ printk(" csr: 0x%02x\n", inb(EWRK3_CSR));
+ printk(" cr: 0x%02x\n", inb(EWRK3_CR));
+ printk(" icr: 0x%02x\n", inb(EWRK3_ICR));
+ printk(" cmr: 0x%02x\n", inb(EWRK3_CMR));
+ printk(" fmqc: 0x%02x\n", inb(EWRK3_FMQC));
+ }
+
+ dev->tbusy = 0;
+ dev->start = 1;
+ dev->interrupt = UNMASK_INTERRUPTS;
+
+ /*
+ ** Unmask EWRK3 board interrupts
+ */
+ icr = inb(EWRK3_ICR);
+ ENABLE_IRQs;
+
+ }
+ } else {
+ dev->start = 0;
+ dev->tbusy = 1;
+ printk("%s: ewrk3 available for hard strapped set up only.\n", dev->name);
+ printk(" Run the 'ewrk3setup' utility or remove the hard straps.\n");
+ }
+
+ MOD_INC_USE_COUNT;
+
+ return status;
+}
+
+/*
+** Initialize the EtherWORKS 3 operating conditions
+*/
+static void
+ewrk3_init(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_char csr, page;
+ u_long iobase = dev->base_addr;
+
+ /*
+ ** Enable any multicasts
+ */
+ set_multicast_list(dev);
+
+ /*
+ ** Clean out any remaining entries in all the queues here
+ */
+ while (inb(EWRK3_TQ));
+ while (inb(EWRK3_TDQ));
+ while (inb(EWRK3_RQ));
+ while (inb(EWRK3_FMQ));
+
+ /*
+ ** Write a clean free memory queue
+ */
+ for (page=1;page<lp->mPage;page++) { /* Write the free page numbers */
+ outb(page, EWRK3_FMQ); /* to the Free Memory Queue */
+ }
+
+ lp->lock = 0; /* Ensure there are no locks */
+
+ START_EWRK3; /* Enable the TX and/or RX */
+}
+
+/*
+** Writes a socket buffer to the free page queue
+*/
+static int
+ewrk3_queue_pkt(struct sk_buff *skb, struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int status = 0;
+ u_char icr, csr;
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy || lp->lock) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < QUEUE_PKT_TIMEOUT) {
+ status = -1;
+ } else if (!lp->hard_strapped) {
+ printk("%s: transmit timed/locked out, status %04x, resetting.\n",
+ dev->name, inb(EWRK3_CSR));
+
+ /*
+ ** Mask all board interrupts
+ */
+ DISABLE_IRQs;
+
+ /*
+ ** Stop the TX and RX...
+ */
+ STOP_EWRK3;
+
+ ewrk3_init(dev);
+
+ /*
+ ** Unmask EWRK3 board interrupts
+ */
+ ENABLE_IRQs;
+
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ }
+ } else if (skb == NULL) {
+ dev_tint(dev);
+ } else if (skb->len > 0) {
+
+ /*
+ ** Block a timer-based transmit from overlapping. This could better be
+ ** done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
+ */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+
+ DISABLE_IRQs; /* So that the page # remains correct */
+
+ /*
+ ** Get a free page from the FMQ when resources are available
+ */
+ if (inb(EWRK3_FMQC) > 0) {
+ u_long buf = 0;
+ u_char page;
+
+ if ((page = inb(EWRK3_FMQ)) < lp->mPage) {
+ /*
+ ** Set up shared memory window and pointer into the window
+ */
+ while (set_bit(0, (void *)&lp->lock) != 0); /* Wait for lock to free */
+ if (lp->shmem_length == IO_ONLY) {
+ outb(page, EWRK3_IOPR);
+ } else if (lp->shmem_length == SHMEM_2K) {
+ buf = lp->shmem_base;
+ outb(page, EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_32K) {
+ buf = ((((short)page << 11) & 0x7800) + lp->shmem_base);
+ outb((page >> 4), EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_64K) {
+ buf = ((((short)page << 11) & 0xf800) + lp->shmem_base);
+ outb((page >> 5), EWRK3_MPR);
+ } else {
+ status = -1;
+ printk("%s: Oops - your private data area is hosed!\n",dev->name);
+ }
+
+ if (!status) {
+
+ /*
+ ** Set up the buffer control structures and copy the data from
+ ** the socket buffer to the shared memory .
+ */
+
+ if (lp->shmem_length == IO_ONLY) {
+ int i;
+ u_char *p = skb->data;
+
+ outb((char)(TCR_QMODE | TCR_PAD | TCR_IFC), EWRK3_DATA);
+ outb((char)(skb->len & 0xff), EWRK3_DATA);
+ outb((char)((skb->len >> 8) & 0xff), EWRK3_DATA);
+ outb((char)0x04, EWRK3_DATA);
+ for (i=0; i<skb->len; i++) {
+ outb(*p++, EWRK3_DATA);
+ }
+ outb(page, EWRK3_TQ); /* Start sending pkt */
+ } else {
+ writeb((char)(TCR_QMODE|TCR_PAD|TCR_IFC), (char *)buf);/* ctrl byte*/
+ buf+=1;
+ writeb((char)(skb->len & 0xff), (char *)buf);/* length (16 bit xfer)*/
+ buf+=1;
+ if (lp->txc) {
+ writeb((char)(((skb->len >> 8) & 0xff) | XCT), (char *)buf);
+ buf+=1;
+ writeb(0x04, (char *)buf); /* index byte */
+ buf+=1;
+ writeb(0x00, (char *)(buf + skb->len)); /* Write the XCT flag */
+ memcpy_toio(buf, skb->data, PRELOAD);/* Write PRELOAD bytes*/
+ outb(page, EWRK3_TQ); /* Start sending pkt */
+ memcpy_toio(buf+PRELOAD, skb->data+PRELOAD, skb->len-PRELOAD);
+ writeb(0xff, (char *)(buf + skb->len)); /* Write the XCT flag */
+ } else {
+ writeb((char)((skb->len >> 8) & 0xff), (char *)buf);
+ buf+=1;
+ writeb(0x04, (char *)buf); /* index byte */
+ buf+=1;
+ memcpy_toio((char *)buf, skb->data, skb->len);/* Write data bytes */
+ outb(page, EWRK3_TQ); /* Start sending pkt */
+ }
+ }
+
+ dev->trans_start = jiffies;
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ } else { /* return unused page to the free memory queue */
+ outb(page, EWRK3_FMQ);
+ }
+ lp->lock = 0; /* unlock the page register */
+ } else {
+ printk("ewrk3_queue_pkt(): Invalid free memory page (%d).\n",
+ (u_char) page);
+ }
+ } else {
+ printk("ewrk3_queue_pkt(): No free resources...\n");
+ printk("ewrk3_queue_pkt(): CSR: %02x ICR: %02x FMQC: %02x\n",inb(EWRK3_CSR),inb(EWRK3_ICR),inb(EWRK3_FMQC));
+ }
+
+ /* Check for free resources: clear 'tbusy' if there are some */
+ if (inb(EWRK3_FMQC) > 0) {
+ dev->tbusy = 0;
+ }
+
+ ENABLE_IRQs;
+ }
+
+ return status;
+}
+
+/*
+** The EWRK3 interrupt handler.
+*/
+static void
+ewrk3_interrupt(int irq, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct ewrk3_private *lp;
+ u_long iobase;
+ u_char icr, cr, csr;
+
+ if (dev == NULL) {
+ printk ("ewrk3_interrupt(): irq %d for unknown device.\n", irq);
+ } else {
+ lp = (struct ewrk3_private *)dev->priv;
+ iobase = dev->base_addr;
+
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = MASK_INTERRUPTS;
+
+ /* get the interrupt information */
+ csr = inb(EWRK3_CSR);
+
+ /*
+ ** Mask the EWRK3 board interrupts and turn on the LED
+ */
+ DISABLE_IRQs;
+
+ cr = inb(EWRK3_CR);
+ cr |= CR_LED;
+ outb(cr, EWRK3_CR);
+
+ if (csr & CSR_RNE) /* Rx interrupt (packet[s] arrived) */
+ ewrk3_rx(dev);
+
+ if (csr & CSR_TNE) /* Tx interrupt (packet sent) */
+ ewrk3_tx(dev);
+
+ /*
+ ** Now deal with the TX/RX disable flags. These are set when there
+ ** are no more resources. If resources free up then enable these
+ ** interrupts, otherwise mask them - failure to do this will result
+ ** in the system hanging in an interrupt loop.
+ */
+ if (inb(EWRK3_FMQC)) { /* any resources available? */
+ lp->irq_mask |= ICR_TXDM|ICR_RXDM;/* enable the interrupt source */
+ csr &= ~(CSR_TXD|CSR_RXD);/* ensure restart of a stalled TX or RX */
+ outb(csr, EWRK3_CSR);
+ dev->tbusy = 0; /* clear TX busy flag */
+ mark_bh(NET_BH);
+ } else {
+ lp->irq_mask &= ~(ICR_TXDM|ICR_RXDM);/* disable the interrupt source */
+ }
+
+ /* Unmask the EWRK3 board interrupts and turn off the LED */
+ cr &= ~CR_LED;
+ outb(cr, EWRK3_CR);
+
+ dev->interrupt = UNMASK_INTERRUPTS;
+ ENABLE_IRQs;
+ }
+
+ return;
+}
+
+static int
+ewrk3_rx(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i, status = 0;
+ u_char page, tmpPage = 0, tmpLock = 0;
+ u_long buf = 0;
+
+ while (inb(EWRK3_RQC) && !status) { /* Whilst there's incoming data */
+ if ((page = inb(EWRK3_RQ)) < lp->mPage) {/* Get next entry's buffer page */
+ /*
+ ** Preempt any process using the current page register. Check for
+ ** an existing lock to reduce time taken in I/O transactions.
+ */
+ if ((tmpLock = set_bit(0, (void *)&lp->lock)) == 1) { /* Assert lock */
+ if (lp->shmem_length == IO_ONLY) { /* Get existing page */
+ tmpPage = inb(EWRK3_IOPR);
+ } else {
+ tmpPage = inb(EWRK3_MPR);
+ }
+ }
+
+ /*
+ ** Set up shared memory window and pointer into the window
+ */
+ if (lp->shmem_length == IO_ONLY) {
+ outb(page, EWRK3_IOPR);
+ } else if (lp->shmem_length == SHMEM_2K) {
+ buf = lp->shmem_base;
+ outb(page, EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_32K) {
+ buf = ((((short)page << 11) & 0x7800) + lp->shmem_base);
+ outb((page >> 4), EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_64K) {
+ buf = ((((short)page << 11) & 0xf800) + lp->shmem_base);
+ outb((page >> 5), EWRK3_MPR);
+ } else {
+ status = -1;
+ printk("%s: Oops - your private data area is hosed!\n",dev->name);
+ }
+
+ if (!status) {
+ char rx_status;
+ int pkt_len;
+
+ if (lp->shmem_length == IO_ONLY) {
+ rx_status = inb(EWRK3_DATA);
+ pkt_len = inb(EWRK3_DATA);
+ pkt_len |= ((u_short)inb(EWRK3_DATA) << 8);
+ } else {
+ rx_status = readb(buf);
+ buf+=1;
+ pkt_len = readw(buf);
+ buf+=3;
+ }
+
+ if (!(rx_status & R_ROK)) { /* There was an error. */
+ lp->stats.rx_errors++; /* Update the error stats. */
+ if (rx_status & R_DBE) lp->stats.rx_frame_errors++;
+ if (rx_status & R_CRC) lp->stats.rx_crc_errors++;
+ if (rx_status & R_PLL) lp->stats.rx_fifo_errors++;
+ } else {
+ struct sk_buff *skb;
+
+ if ((skb = dev_alloc_skb(pkt_len+2)) != NULL) {
+ unsigned char *p;
+ skb->dev = dev;
+ skb_reserve(skb,2); /* Align to 16 bytes */
+ p = skb_put(skb,pkt_len);
+
+ if (lp->shmem_length == IO_ONLY) {
+ *p = inb(EWRK3_DATA); /* dummy read */
+ for (i=0; i<pkt_len; i++) {
+ *p++ = inb(EWRK3_DATA);
+ }
+ } else {
+ memcpy_fromio(p, buf, pkt_len);
+ }
+
+ /*
+ ** Notify the upper protocol layers that there is another
+ ** packet to handle
+ */
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+
+ /*
+ ** Update stats
+ */
+ lp->stats.rx_packets++;
+ for (i=1; i<EWRK3_PKT_STAT_SZ-1; i++) {
+ if (pkt_len < i*EWRK3_PKT_BIN_SZ) {
+ lp->pktStats.bins[i]++;
+ i = EWRK3_PKT_STAT_SZ;
+ }
+ }
+ p = skb->data; /* Look at the dest addr */
+ if (p[0] & 0x01) { /* Multicast/Broadcast */
+ if ((*(s32 *)&p[0] == -1) && (*(s16 *)&p[4] == -1)) {
+ lp->pktStats.broadcast++;
+ } else {
+ lp->pktStats.multicast++;
+ }
+ } else if ((*(s32 *)&p[0] == *(s32 *)&dev->dev_addr[0]) &&
+ (*(s16 *)&p[4] == *(s16 *)&dev->dev_addr[4])) {
+ lp->pktStats.unicast++;
+ }
+
+ lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
+ if (lp->pktStats.bins[0] == 0) { /* Reset counters */
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ }
+ } else {
+ printk("%s: Insufficient memory; nuking packet.\n", dev->name);
+ lp->stats.rx_dropped++; /* Really, deferred. */
+ break;
+ }
+ }
+ }
+ /*
+ ** Return the received buffer to the free memory queue
+ */
+ outb(page, EWRK3_FMQ);
+
+ if (tmpLock) { /* If a lock was preempted */
+ if (lp->shmem_length == IO_ONLY) { /* Replace old page */
+ outb(tmpPage, EWRK3_IOPR);
+ } else {
+ outb(tmpPage, EWRK3_MPR);
+ }
+ }
+ lp->lock = 0; /* Unlock the page register */
+ } else {
+ printk("ewrk3_rx(): Illegal page number, page %d\n",page);
+ printk("ewrk3_rx(): CSR: %02x ICR: %02x FMQC: %02x\n",inb(EWRK3_CSR),inb(EWRK3_ICR),inb(EWRK3_FMQC));
+ }
+ }
+ return status;
+}
+
+/*
+** Buffer sent - check for TX buffer errors.
+*/
+static int
+ewrk3_tx(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ u_char tx_status;
+
+ while ((tx_status = inb(EWRK3_TDQ)) > 0) { /* Whilst there's old buffers */
+ if (tx_status & T_VSTS) { /* The status is valid */
+ if (tx_status & T_TXE) {
+ lp->stats.tx_errors++;
+ if (tx_status & T_NCL) lp->stats.tx_carrier_errors++;
+ if (tx_status & T_LCL) lp->stats.tx_window_errors++;
+ if (tx_status & T_CTU) {
+ if ((tx_status & T_COLL) ^ T_XUR) {
+ lp->pktStats.tx_underruns++;
+ } else {
+ lp->pktStats.excessive_underruns++;
+ }
+ } else if (tx_status & T_COLL) {
+ if ((tx_status & T_COLL) ^ T_XCOLL) {
+ lp->stats.collisions++;
+ } else {
+ lp->pktStats.excessive_collisions++;
+ }
+ }
+ } else {
+ lp->stats.tx_packets++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+ewrk3_close(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ u_char icr, csr;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (ewrk3_debug > 1) {
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inb(EWRK3_CSR));
+ }
+
+ /*
+ ** We stop the EWRK3 here... mask interrupts and stop TX & RX
+ */
+ DISABLE_IRQs;
+
+ STOP_EWRK3;
+
+ /*
+ ** Clean out the TX and RX queues here (note that one entry
+ ** may get added to either the TXD or RX queues if the the TX or RX
+ ** just starts processing a packet before the STOP_EWRK3 command
+ ** is received. This will be flushed in the ewrk3_open() call).
+ */
+ while (inb(EWRK3_TQ));
+ while (inb(EWRK3_TDQ));
+ while (inb(EWRK3_RQ));
+
+ if (!lp->hard_strapped) {
+ free_irq(dev->irq);
+
+ irq2dev_map[dev->irq] = 0;
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct enet_statistics *
+ewrk3_get_stats(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+
+ /* Null body since there is no framing error counter */
+
+ return &lp->stats;
+}
+
+/*
+** Set or clear the multicast filter for this adaptor.
+*/
+static void
+set_multicast_list(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ u_char csr;
+
+ if (irq2dev_map[dev->irq] != NULL) {
+ csr = inb(EWRK3_CSR);
+
+ if (lp->shmem_length == IO_ONLY) {
+ lp->mctbl = (char *) PAGE0_HTE;
+ } else {
+ lp->mctbl = (char *)(lp->shmem_base + PAGE0_HTE);
+ }
+
+ csr &= ~(CSR_PME | CSR_MCE);
+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
+ csr |= CSR_PME;
+ outb(csr, EWRK3_CSR);
+ } else {
+ SetMulticastFilter(dev);
+ csr |= CSR_MCE;
+ outb(csr, EWRK3_CSR);
+ }
+ }
+}
+
+/*
+** Calculate the hash code and update the logical address filter
+** from a list of ethernet multicast addresses.
+** Little endian crc one liner from Matt Thomas, DEC.
+**
+** Note that when clearing the table, the broadcast bit must remain asserted
+** to receive broadcast messages.
+*/
+static void SetMulticastFilter(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ struct dev_mc_list *dmi=dev->mc_list;
+ u_long iobase = dev->base_addr;
+ int i;
+ char *addrs, j, bit, byte;
+ short *p = (short *) lp->mctbl;
+ u16 hashcode;
+ s32 crc, poly = CRC_POLYNOMIAL_LE;
+
+ while (set_bit(0, (void *)&lp->lock) != 0); /* Wait for lock to free */
+
+ if (lp->shmem_length == IO_ONLY) {
+ outb(0, EWRK3_IOPR);
+ outw(EEPROM_OFFSET(lp->mctbl), EWRK3_PIR1);
+ } else {
+ outb(0, EWRK3_MPR);
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i=0; i<(HASH_TABLE_LEN >> 3); i++) {
+ if (lp->shmem_length == IO_ONLY) {
+ outb(0xff, EWRK3_DATA);
+ } else { /* memset didn't work here */
+ writew(0xffff, p);
+ p++; i++;
+ }
+ }
+ } else {
+ /* Clear table except for broadcast bit */
+ if (lp->shmem_length == IO_ONLY) {
+ for (i=0; i<(HASH_TABLE_LEN >> 4) - 1; i++) {
+ outb(0x00, EWRK3_DATA);
+ }
+ outb(0x80, EWRK3_DATA); i++; /* insert the broadcast bit */
+ for (; i<(HASH_TABLE_LEN >> 3); i++) {
+ outb(0x00, EWRK3_DATA);
+ }
+ } else {
+ memset_io(lp->mctbl, 0, (HASH_TABLE_LEN >> 3));
+ writeb(0x80, (char *)(lp->mctbl + (HASH_TABLE_LEN >> 4) - 1));
+ }
+
+ /* Update table */
+ for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
+ addrs=dmi->dmi_addr;
+ dmi=dmi->next;
+ if ((*addrs & 0x01) == 1) { /* multicast address? */
+ crc = 0xffffffff; /* init CRC for each address */
+ for (byte=0;byte<ETH_ALEN;byte++) { /* for each address byte */
+ /* process each address bit */
+ for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
+ crc = (crc >> 1) ^ (((crc ^ bit) & 0x01) ? poly : 0);
+ }
+ }
+ hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
+
+ byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
+ bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
+
+ if (lp->shmem_length == IO_ONLY) {
+ u_char tmp;
+
+ outw((short)((long)lp->mctbl) + byte, EWRK3_PIR1);
+ tmp = inb(EWRK3_DATA);
+ tmp |= bit;
+ outw((short)((long)lp->mctbl) + byte, EWRK3_PIR1);
+ outb(tmp, EWRK3_DATA);
+ } else {
+ writeb(readb(lp->mctbl + byte) | bit, lp->mctbl + byte);
+ }
+ }
+ }
+ }
+
+ lp->lock = 0; /* Unlock the page register */
+
+ return;
+}
+
+/*
+** ISA bus I/O device probe
+*/
+static void isa_probe(struct device *dev, u_long ioaddr)
+{
+ int i = num_ewrk3s, maxSlots;
+ u_long iobase;
+
+ if (!ioaddr && autoprobed) return ; /* Been here before ! */
+ if (ioaddr >= 0x400) return; /* Not ISA */
+
+ if (ioaddr == 0) { /* Autoprobing */
+ iobase = EWRK3_IO_BASE; /* Get the first slot address */
+ maxSlots = 24;
+ } else { /* Probe a specific location */
+ iobase = ioaddr;
+ maxSlots = i + 1;
+ }
+
+ for (; (i<maxSlots) && (dev!=NULL);iobase+=EWRK3_IOP_INC, i++) {
+ if (!check_region(iobase, EWRK3_TOTAL_SIZE)) {
+ if (DevicePresent(iobase) == 0) {
+ if ((dev = alloc_device(dev, iobase)) != NULL) {
+ if (ewrk3_hw_init(dev, iobase) == 0) {
+ num_ewrk3s++;
+ }
+ num_eth++;
+ }
+ }
+ } else if (autoprobed) {
+ printk("%s: region already allocated at 0x%04lx.\n", dev->name, iobase);
+ }
+ }
+
+ return;
+}
+
+/*
+** EISA bus I/O device probe. Probe from slot 1 since slot 0 is usually
+** the motherboard.
+*/
+static void eisa_probe(struct device *dev, u_long ioaddr)
+{
+ int i, maxSlots;
+ u_long iobase;
+ char name[EWRK3_STRLEN];
+
+ if (!ioaddr && autoprobed) return ; /* Been here before ! */
+ if (ioaddr < 0x1000) return; /* Not EISA */
+
+ if (ioaddr == 0) { /* Autoprobing */
+ iobase = EISA_SLOT_INC; /* Get the first slot address */
+ i = 1;
+ maxSlots = MAX_EISA_SLOTS;
+ } else { /* Probe a specific location */
+ iobase = ioaddr;
+ i = (ioaddr >> 12);
+ maxSlots = i + 1;
+ }
+
+ for (i=1; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
+ if (EISA_signature(name, EISA_ID) == 0) {
+ if (!check_region(iobase, EWRK3_TOTAL_SIZE)) {
+ if (DevicePresent(iobase) == 0) {
+ if ((dev = alloc_device(dev, iobase)) != NULL) {
+ if (ewrk3_hw_init(dev, iobase) == 0) {
+ num_ewrk3s++;
+ }
+ num_eth++;
+ }
+ }
+ } else if (autoprobed) {
+ printk("%s: region already allocated at 0x%04lx.\n", dev->name, iobase);
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** Allocate the device by pointing to the next available space in the
+** device structure. Should one not be available, it is created.
+*/
+static struct device *alloc_device(struct device *dev, u_long iobase)
+{
+ int addAutoProbe = 0;
+ struct device *tmp = NULL, *ret;
+ int (*init)(struct device *) = NULL;
+
+ /*
+ ** Check the device structures for an end of list or unused device
+ */
+ if (!loading_module) {
+ while (dev->next != NULL) {
+ if ((dev->base_addr == EWRK3_NDA) || (dev->base_addr == 0)) break;
+ dev = dev->next; /* walk through eth device list */
+ num_eth++; /* increment eth device number */
+ }
+
+ /*
+ ** If an autoprobe is requested for another device, we must re-insert
+ ** the request later in the list. Remember the current position first.
+ */
+ if ((dev->base_addr == 0) && (num_ewrk3s > 0)) {
+ addAutoProbe++;
+ tmp = dev->next; /* point to the next device */
+ init = dev->init; /* remember the probe function */
+ }
+
+ /*
+ ** If at end of list and can't use current entry, malloc one up.
+ ** If memory could not be allocated, print an error message.
+ */
+ if ((dev->next == NULL) &&
+ !((dev->base_addr == EWRK3_NDA) || (dev->base_addr == 0))){
+ dev->next = (struct device *)kmalloc(sizeof(struct device) + 8,
+ GFP_KERNEL);
+
+ dev = dev->next; /* point to the new device */
+ if (dev == NULL) {
+ printk("eth%d: Device not initialised, insufficient memory\n",
+ num_eth);
+ } else {
+ /*
+ ** If the memory was allocated, point to the new memory area
+ ** and initialize it (name, I/O address, next device (NULL) and
+ ** initialisation probe routine).
+ */
+ dev->name = (char *)(dev + sizeof(struct device));
+ if (num_eth > 9999) {
+ sprintf(dev->name,"eth????"); /* New device name */
+ } else {
+ sprintf(dev->name,"eth%d", num_eth);/* New device name */
+ }
+ dev->base_addr = iobase; /* assign the io address */
+ dev->next = NULL; /* mark the end of list */
+ dev->init = &ewrk3_probe; /* initialisation routine */
+ num_ewrk3s++;
+ }
+ }
+ ret = dev; /* return current struct, or NULL */
+
+ /*
+ ** Now figure out what to do with the autoprobe that has to be inserted.
+ ** Firstly, search the (possibly altered) list for an empty space.
+ */
+ if (ret != NULL) {
+ if (addAutoProbe) {
+ for (;(tmp->next!=NULL) && (tmp->base_addr!=EWRK3_NDA); tmp=tmp->next);
+
+ /*
+ ** If no more device structures and can't use the current one, malloc
+ ** one up. If memory could not be allocated, print an error message.
+ */
+ if ((tmp->next == NULL) && !(tmp->base_addr == EWRK3_NDA)) {
+ tmp->next = (struct device *)kmalloc(sizeof(struct device) + 8,
+ GFP_KERNEL);
+ tmp = tmp->next; /* point to the new device */
+ if (tmp == NULL) {
+ printk("%s: Insufficient memory to extend the device list.\n",
+ dev->name);
+ } else {
+ /*
+ ** If the memory was allocated, point to the new memory area
+ ** and initialize it (name, I/O address, next device (NULL) and
+ ** initialisation probe routine).
+ */
+ tmp->name = (char *)(tmp + sizeof(struct device));
+ if (num_eth > 9999) {
+ sprintf(tmp->name,"eth????"); /* New device name */
+ } else {
+ sprintf(tmp->name,"eth%d", num_eth);/* New device name */
+ }
+ tmp->base_addr = 0; /* re-insert the io address */
+ tmp->next = NULL; /* mark the end of list */
+ tmp->init = init; /* initialisation routine */
+ }
+ } else { /* structure already exists */
+ tmp->base_addr = 0; /* re-insert the io address */
+ }
+ }
+ }
+ } else {
+ ret = dev;
+ }
+
+ return ret;
+}
+
+/*
+** Read the EWRK3 EEPROM using this routine
+*/
+static int Read_EEPROM(u_long iobase, u_char eaddr)
+{
+ int i;
+
+ outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */
+ outb(EEPROM_RD, EWRK3_IOPR); /* issue read command */
+ for (i=0;i<5000;i++) inb(EWRK3_CSR); /* wait 1msec */
+
+ return inw(EWRK3_EPROM1); /* 16 bits data return */
+}
+
+/*
+** Write the EWRK3 EEPROM using this routine
+*/
+static int Write_EEPROM(short data, u_long iobase, u_char eaddr)
+{
+ int i;
+
+ outb(EEPROM_WR_EN, EWRK3_IOPR); /* issue write enable command */
+ for (i=0;i<5000;i++) inb(EWRK3_CSR); /* wait 1msec */
+ outw(data, EWRK3_EPROM1); /* write data to register */
+ outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */
+ outb(EEPROM_WR, EWRK3_IOPR); /* issue write command */
+ for (i=0;i<75000;i++) inb(EWRK3_CSR); /* wait 15msec */
+ outb(EEPROM_WR_DIS, EWRK3_IOPR); /* issue write disable command */
+ for (i=0;i<5000;i++) inb(EWRK3_CSR); /* wait 1msec */
+
+ return 0;
+}
+
+/*
+** Look for a particular board name in the on-board EEPROM.
+*/
+static void EthwrkSignature(char *name, char *eeprom_image)
+{
+ u_long i,j,k;
+ char *signatures[] = EWRK3_SIGNATURE;
+
+ strcpy(name, "");
+ for (i=0;*signatures[i] != '\0' && *name == '\0';i++) {
+ for (j=EEPROM_PNAME7,k=0;j<=EEPROM_PNAME0 && k<strlen(signatures[i]);j++) {
+ if (signatures[i][k] == eeprom_image[j]) { /* track signature */
+ k++;
+ } else { /* lost signature; begin search again */
+ k=0;
+ }
+ }
+ if (k == strlen(signatures[i])) {
+ for (k=0; k<EWRK3_STRLEN; k++) {
+ name[k] = eeprom_image[EEPROM_PNAME7 + k];
+ name[EWRK3_STRLEN] = '\0';
+ }
+ }
+ }
+
+ return; /* return the device name string */
+}
+
+/*
+** Look for a special sequence in the Ethernet station address PROM that
+** is common across all EWRK3 products.
+**
+** Search the Ethernet address ROM for the signature. Since the ROM address
+** counter can start at an arbitrary point, the search must include the entire
+** probe sequence length plus the (length_of_the_signature - 1).
+** Stop the search IMMEDIATELY after the signature is found so that the
+** PROM address counter is correctly positioned at the start of the
+** ethernet address for later read out.
+*/
+
+static int DevicePresent(u_long iobase)
+{
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ } llsig;
+ char Sig[sizeof(u32) << 1];
+ } dev;
+ short sigLength;
+ char data;
+ int i, j, status = 0;
+
+ dev.llsig.a = ETH_PROM_SIG;
+ dev.llsig.b = ETH_PROM_SIG;
+ sigLength = sizeof(u32) << 1;
+
+ for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
+ data = inb(EWRK3_APROM);
+ if (dev.Sig[j] == data) { /* track signature */
+ j++;
+ } else { /* lost signature; begin search again */
+ if (data == dev.Sig[0]) {
+ j=1;
+ } else {
+ j=0;
+ }
+ }
+ }
+
+ if (j!=sigLength) {
+ status = -ENODEV; /* search failed */
+ }
+
+ return status;
+}
+
+static u_char get_hw_addr(struct device *dev, u_char *eeprom_image, char chipType)
+{
+ int i, j, k;
+ u_short chksum;
+ u_char crc, lfsr, sd, status = 0;
+ u_long iobase = dev->base_addr;
+ u16 tmp;
+
+ if (chipType == LeMAC2) {
+ for (crc=0x6a, j=0; j<ETH_ALEN; j++) {
+ sd = dev->dev_addr[j] = eeprom_image[EEPROM_PADDR0 + j];
+ outb(dev->dev_addr[j], EWRK3_PAR0 + j);
+ for (k=0; k<8; k++, sd >>= 1) {
+ lfsr = ((((crc & 0x02) >> 1) ^ (crc & 0x01)) ^ (sd & 0x01)) << 7;
+ crc = (crc >> 1) + lfsr;
+ }
+ }
+ if (crc != eeprom_image[EEPROM_PA_CRC]) status = -1;
+ } else {
+ for (i=0,k=0;i<ETH_ALEN;) {
+ k <<= 1 ;
+ if (k > 0xffff) k-=0xffff;
+
+ k += (u_char) (tmp = inb(EWRK3_APROM));
+ dev->dev_addr[i] = (u_char) tmp;
+ outb(dev->dev_addr[i], EWRK3_PAR0 + i);
+ i++;
+ k += (u_short) ((tmp = inb(EWRK3_APROM)) << 8);
+ dev->dev_addr[i] = (u_char) tmp;
+ outb(dev->dev_addr[i], EWRK3_PAR0 + i);
+ i++;
+
+ if (k > 0xffff) k-=0xffff;
+ }
+ if (k == 0xffff) k=0;
+ chksum = inb(EWRK3_APROM);
+ chksum |= (inb(EWRK3_APROM)<<8);
+ if (k != chksum) status = -1;
+ }
+
+ return status;
+}
+
+/*
+** Look for a particular board name in the EISA configuration space
+*/
+static int EISA_signature(char *name, s32 eisa_id)
+{
+ u_long i;
+ char *signatures[] = EWRK3_SIGNATURE;
+ char ManCode[EWRK3_STRLEN];
+ union {
+ s32 ID;
+ char Id[4];
+ } Eisa;
+ int status = 0;
+
+ *name = '\0';
+ for (i=0; i<4; i++) {
+ Eisa.Id[i] = inb(eisa_id + i);
+ }
+
+ ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
+ ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
+ ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
+ ManCode[3]=((Eisa.Id[2]&0x0f)+0x30);
+ ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
+ ManCode[5]='\0';
+
+ for (i=0;(*signatures[i] != '\0') && (*name == '\0');i++) {
+ if (strstr(ManCode, signatures[i]) != NULL) {
+ strcpy(name,ManCode);
+ status = 1;
+ }
+ }
+
+ return status; /* return the device name string */
+}
+
+/*
+** Perform IOCTL call functions here. Some are privileged operations and the
+** effective uid is checked in those cases.
+*/
+static int ewrk3_ioctl(struct device *dev, struct ifreq *rq, int cmd)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ struct ewrk3_ioctl *ioc = (struct ewrk3_ioctl *) &rq->ifr_data;
+ u_long iobase = dev->base_addr;
+ int i, j, status = 0;
+ u_char csr;
+ union {
+ u_char addr[HASH_TABLE_LEN * ETH_ALEN];
+ u_short val[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
+ } tmp;
+
+ switch(ioc->cmd) {
+ case EWRK3_GET_HWADDR: /* Get the hardware address */
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[i] = dev->dev_addr[i];
+ }
+ ioc->len = ETH_ALEN;
+ if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+ case EWRK3_SET_HWADDR: /* Set the hardware address */
+ if (suser()) {
+ if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN))) {
+ csr = inb(EWRK3_CSR);
+ csr |= (CSR_TXD|CSR_RXD);
+ outb(csr, EWRK3_CSR); /* Disable the TX and RX */
+
+ memcpy_fromfs(tmp.addr,ioc->data,ETH_ALEN);
+ for (i=0; i<ETH_ALEN; i++) {
+ dev->dev_addr[i] = tmp.addr[i];
+ outb(tmp.addr[i], EWRK3_PAR0 + i);
+ }
+
+ csr &= ~(CSR_TXD|CSR_RXD); /* Enable the TX and RX */
+ outb(csr, EWRK3_CSR);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_SET_PROM: /* Set Promiscuous Mode */
+ if (suser()) {
+ csr = inb(EWRK3_CSR);
+ csr |= CSR_PME;
+ csr &= ~CSR_MCE;
+ outb(csr, EWRK3_CSR);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_CLR_PROM: /* Clear Promiscuous Mode */
+ if (suser()) {
+ csr = inb(EWRK3_CSR);
+ csr &= ~CSR_PME;
+ outb(csr, EWRK3_CSR);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_SAY_BOO: /* Say "Boo!" to the kernel log file */
+ printk("%s: Boo!\n", dev->name);
+
+ break;
+ case EWRK3_GET_MCA: /* Get the multicast address table */
+ if (!(status = verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ while (set_bit(0, (void *)&lp->lock) != 0); /* Wait for lock to free */
+ if (lp->shmem_length == IO_ONLY) {
+ outb(0, EWRK3_IOPR);
+ outw(PAGE0_HTE, EWRK3_PIR1);
+ for (i=0; i<(HASH_TABLE_LEN >> 3); i++) {
+ tmp.addr[i] = inb(EWRK3_DATA);
+ }
+ } else {
+ outb(0, EWRK3_MPR);
+ memcpy_fromio(tmp.addr, (char *)(lp->shmem_base + PAGE0_HTE), (HASH_TABLE_LEN >> 3));
+ }
+ ioc->len = (HASH_TABLE_LEN >> 3);
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+ lp->lock = 0; /* Unlock the page register */
+
+ break;
+ case EWRK3_SET_MCA: /* Set a multicast address */
+ if (suser()) {
+ if (!(status=verify_area(VERIFY_READ, ioc->data, ETH_ALEN*ioc->len))) {
+ memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
+ set_multicast_list(dev);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_CLR_MCA: /* Clear all multicast addresses */
+ if (suser()) {
+ set_multicast_list(dev);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_MCA_EN: /* Enable multicast addressing */
+ if (suser()) {
+ csr = inb(EWRK3_CSR);
+ csr |= CSR_MCE;
+ csr &= ~CSR_PME;
+ outb(csr, EWRK3_CSR);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_STATS: /* Get the driver statistics */
+ cli();
+ ioc->len = sizeof(lp->pktStats);
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, &lp->pktStats, ioc->len);
+ }
+ sti();
+
+ break;
+ case EWRK3_CLR_STATS: /* Zero out the driver statistics */
+ if (suser()) {
+ cli();
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ sti();
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_CSR: /* Get the CSR Register contents */
+ tmp.addr[0] = inb(EWRK3_CSR);
+ ioc->len = 1;
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+ case EWRK3_SET_CSR: /* Set the CSR Register contents */
+ if (suser()) {
+ if (!(status=verify_area(VERIFY_READ, ioc->data, 1))) {
+ memcpy_fromfs(tmp.addr, ioc->data, 1);
+ outb(tmp.addr[0], EWRK3_CSR);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_EEPROM: /* Get the EEPROM contents */
+ if (suser()) {
+ for (i=0; i<(EEPROM_MAX>>1); i++) {
+ tmp.val[i] = (short)Read_EEPROM(iobase, i);
+ }
+ i = EEPROM_MAX;
+ tmp.addr[i++] = inb(EWRK3_CMR); /* Config/Management Reg. */
+ for (j=0;j<ETH_ALEN;j++) {
+ tmp.addr[i++] = inb(EWRK3_PAR0 + j);
+ }
+ ioc->len = EEPROM_MAX + 1 + ETH_ALEN;
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_SET_EEPROM: /* Set the EEPROM contents */
+ if (suser()) {
+ if (!(status=verify_area(VERIFY_READ, ioc->data, EEPROM_MAX))) {
+ memcpy_fromfs(tmp.addr, ioc->data, EEPROM_MAX);
+ for (i=0; i<(EEPROM_MAX>>1); i++) {
+ Write_EEPROM(tmp.val[i], iobase, i);
+ }
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_CMR: /* Get the CMR Register contents */
+ tmp.addr[0] = inb(EWRK3_CMR);
+ ioc->len = 1;
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+ case EWRK3_SET_TX_CUT_THRU: /* Set TX cut through mode */
+ if (suser()) {
+ lp->txc = 1;
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_CLR_TX_CUT_THRU: /* Clear TX cut through mode */
+ if (suser()) {
+ lp->txc = 0;
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ default:
+ status = -EOPNOTSUPP;
+ }
+
+ return status;
+}
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device thisEthwrk = {
+ devicename, /* device name is inserted by /linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0x300, 5, /* I/O address, IRQ */
+ 0, 0, 0, NULL, ewrk3_probe };
+
+static int io=0x300; /* <--- EDIT THESE LINES FOR YOUR CONFIGURATION */
+static int irq=5; /* or use the insmod io= irq= options */
+
+int
+init_module(void)
+{
+ thisEthwrk.base_addr=io;
+ thisEthwrk.irq=irq;
+ if (register_netdev(&thisEthwrk) != 0)
+ return -EIO;
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ release_region(thisEthwrk.base_addr, EWRK3_TOTAL_SIZE);
+
+ if (thisEthwrk.priv) {
+ kfree(thisEthwrk.priv);
+ thisEthwrk.priv = NULL;
+ }
+ thisEthwrk.irq = 0;
+
+ unregister_netdev(&thisEthwrk);
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * kernel-compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O2 -m486 -c ewrk3.c"
+ *
+ * module-compile-command: "gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O2 -m486 -c ewrk3.c"
+ * End:
+ */
+
diff --git a/i386/i386at/gpl/linux/net/ewrk3.h b/i386/i386at/gpl/linux/net/ewrk3.h
new file mode 100644
index 00000000..b37abf46
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/ewrk3.h
@@ -0,0 +1,322 @@
+/*
+ Written 1994 by David C. Davies.
+
+ Copyright 1994 Digital Equipment Corporation.
+
+ This software may be used and distributed according to the terms of the
+ GNU Public License, incorporated herein by reference.
+
+ The author may be reached as davies@wanton.lkg.dec.com or Digital
+ Equipment Corporation, 550 King Street, Littleton MA 01460.
+
+ =========================================================================
+*/
+
+/*
+** I/O Address Register Map
+*/
+#define EWRK3_CSR iobase+0x00 /* Control and Status Register */
+#define EWRK3_CR iobase+0x01 /* Control Register */
+#define EWRK3_ICR iobase+0x02 /* Interrupt Control Register */
+#define EWRK3_TSR iobase+0x03 /* Transmit Status Register */
+#define EWRK3_RSVD1 iobase+0x04 /* RESERVED */
+#define EWRK3_RSVD2 iobase+0x05 /* RESERVED */
+#define EWRK3_FMQ iobase+0x06 /* Free Memory Queue */
+#define EWRK3_FMQC iobase+0x07 /* Free Memory Queue Counter */
+#define EWRK3_RQ iobase+0x08 /* Receive Queue */
+#define EWRK3_RQC iobase+0x09 /* Receive Queue Counter */
+#define EWRK3_TQ iobase+0x0a /* Transmit Queue */
+#define EWRK3_TQC iobase+0x0b /* Transmit Queue Counter */
+#define EWRK3_TDQ iobase+0x0c /* Transmit Done Queue */
+#define EWRK3_TDQC iobase+0x0d /* Transmit Done Queue Counter */
+#define EWRK3_PIR1 iobase+0x0e /* Page Index Register 1 */
+#define EWRK3_PIR2 iobase+0x0f /* Page Index Register 2 */
+#define EWRK3_DATA iobase+0x10 /* Data Register */
+#define EWRK3_IOPR iobase+0x11 /* I/O Page Register */
+#define EWRK3_IOBR iobase+0x12 /* I/O Base Register */
+#define EWRK3_MPR iobase+0x13 /* Memory Page Register */
+#define EWRK3_MBR iobase+0x14 /* Memory Base Register */
+#define EWRK3_APROM iobase+0x15 /* Address PROM */
+#define EWRK3_EPROM1 iobase+0x16 /* EEPROM Data Register 1 */
+#define EWRK3_EPROM2 iobase+0x17 /* EEPROM Data Register 2 */
+#define EWRK3_PAR0 iobase+0x18 /* Physical Address Register 0 */
+#define EWRK3_PAR1 iobase+0x19 /* Physical Address Register 1 */
+#define EWRK3_PAR2 iobase+0x1a /* Physical Address Register 2 */
+#define EWRK3_PAR3 iobase+0x1b /* Physical Address Register 3 */
+#define EWRK3_PAR4 iobase+0x1c /* Physical Address Register 4 */
+#define EWRK3_PAR5 iobase+0x1d /* Physical Address Register 5 */
+#define EWRK3_CMR iobase+0x1e /* Configuration/Management Register */
+
+/*
+** Control Page Map
+*/
+#define PAGE0_FMQ 0x000 /* Free Memory Queue */
+#define PAGE0_RQ 0x080 /* Receive Queue */
+#define PAGE0_TQ 0x100 /* Transmit Queue */
+#define PAGE0_TDQ 0x180 /* Transmit Done Queue */
+#define PAGE0_HTE 0x200 /* Hash Table Entries */
+#define PAGE0_RSVD 0x240 /* RESERVED */
+#define PAGE0_USRD 0x600 /* User Data */
+
+/*
+** Control and Status Register bit definitions (EWRK3_CSR)
+*/
+#define CSR_RA 0x80 /* Runt Accept */
+#define CSR_PME 0x40 /* Promiscuous Mode Enable */
+#define CSR_MCE 0x20 /* Multicast Enable */
+#define CSR_TNE 0x08 /* TX Done Queue Not Empty */
+#define CSR_RNE 0x04 /* RX Queue Not Empty */
+#define CSR_TXD 0x02 /* TX Disable */
+#define CSR_RXD 0x01 /* RX Disable */
+
+/*
+** Control Register bit definitions (EWRK3_CR)
+*/
+#define CR_APD 0x80 /* Auto Port Disable */
+#define CR_PSEL 0x40 /* Port Select (0->TP port) */
+#define CR_LBCK 0x20 /* LoopBaCK enable */
+#define CR_FDUP 0x10 /* Full DUPlex enable */
+#define CR_FBUS 0x08 /* Fast BUS enable (ISA clk > 8.33MHz) */
+#define CR_EN_16 0x04 /* ENable 16 bit memory accesses */
+#define CR_LED 0x02 /* LED (1-> turn on) */
+
+/*
+** Interrupt Control Register bit definitions (EWRK3_ICR)
+*/
+#define ICR_IE 0x80 /* Interrupt Enable */
+#define ICR_IS 0x60 /* Interrupt Selected */
+#define ICR_TNEM 0x08 /* TNE Mask (0->mask) */
+#define ICR_RNEM 0x04 /* RNE Mask (0->mask) */
+#define ICR_TXDM 0x02 /* TXD Mask (0->mask) */
+#define ICR_RXDM 0x01 /* RXD Mask (0->mask) */
+
+/*
+** Transmit Status Register bit definitions (EWRK3_TSR)
+*/
+#define TSR_NCL 0x80 /* No Carrier Loopback */
+#define TSR_ID 0x40 /* Initially Deferred */
+#define TSR_LCL 0x20 /* Late CoLlision */
+#define TSR_ECL 0x10 /* Excessive CoLlisions */
+#define TSR_RCNTR 0x0f /* Retries CouNTeR */
+
+/*
+** I/O Page Register bit definitions (EWRK3_IOPR)
+*/
+#define EEPROM_INIT 0xc0 /* EEPROM INIT command */
+#define EEPROM_WR_EN 0xc8 /* EEPROM WRITE ENABLE command */
+#define EEPROM_WR 0xd0 /* EEPROM WRITE command */
+#define EEPROM_WR_DIS 0xd8 /* EEPROM WRITE DISABLE command */
+#define EEPROM_RD 0xe0 /* EEPROM READ command */
+
+/*
+** I/O Base Register bit definitions (EWRK3_IOBR)
+*/
+#define EISA_REGS_EN 0x20 /* Enable EISA ID and Control Registers */
+#define EISA_IOB 0x1f /* Compare bits for I/O Base Address */
+
+/*
+** I/O Congiguration/Management Register bit definitions (EWRK3_CMR)
+*/
+#define CMR_RA 0x80 /* Read Ahead */
+#define CMR_WB 0x40 /* Write Behind */
+#define CMR_LINK 0x20 /* 0->TP */
+#define CMR_POLARITY 0x10 /* Informational */
+#define CMR_NO_EEPROM 0x0c /* NO_EEPROM<1:0> pin status */
+#define CMR_HS 0x08 /* Hard Strapped pin status (LeMAC2) */
+#define CMR_PNP 0x04 /* Plug 'n Play */
+#define CMR_DRAM 0x02 /* 0-> 1DRAM, 1-> 2 DRAM on board */
+#define CMR_0WS 0x01 /* Zero Wait State */
+
+/*
+** MAC Receive Status Register bit definitions
+*/
+
+#define R_ROK 0x80 /* Receive OK summary */
+#define R_IAM 0x10 /* Individual Address Match */
+#define R_MCM 0x08 /* MultiCast Match */
+#define R_DBE 0x04 /* Dribble Bit Error */
+#define R_CRC 0x02 /* CRC error */
+#define R_PLL 0x01 /* Phase Lock Lost */
+
+/*
+** MAC Transmit Control Register bit definitions
+*/
+
+#define TCR_SQEE 0x40 /* SQE Enable - look for heartbeat */
+#define TCR_SED 0x20 /* Stop when Error Detected */
+#define TCR_QMODE 0x10 /* Q_MODE */
+#define TCR_LAB 0x08 /* Less Aggressive Backoff */
+#define TCR_PAD 0x04 /* PAD Runt Packets */
+#define TCR_IFC 0x02 /* Insert Frame Check */
+#define TCR_ISA 0x01 /* Insert Source Address */
+
+/*
+** MAC Transmit Status Register bit definitions
+*/
+
+#define T_VSTS 0x80 /* Valid STatuS */
+#define T_CTU 0x40 /* Cut Through Used */
+#define T_SQE 0x20 /* Signal Quality Error */
+#define T_NCL 0x10 /* No Carrier Loopback */
+#define T_LCL 0x08 /* Late Collision */
+#define T_ID 0x04 /* Initially Deferred */
+#define T_COLL 0x03 /* COLLision status */
+#define T_XCOLL 0x03 /* Excessive Collisions */
+#define T_MCOLL 0x02 /* Multiple Collisions */
+#define T_OCOLL 0x01 /* One Collision */
+#define T_NOCOLL 0x00 /* No Collisions */
+#define T_XUR 0x03 /* Excessive Underruns */
+#define T_TXE 0x7f /* TX Errors */
+
+/*
+** EISA Configuration Register bit definitions
+*/
+
+#define EISA_ID iobase + 0x0c80 /* EISA ID Registers */
+#define EISA_ID0 iobase + 0x0c80 /* EISA ID Register 0 */
+#define EISA_ID1 iobase + 0x0c81 /* EISA ID Register 1 */
+#define EISA_ID2 iobase + 0x0c82 /* EISA ID Register 2 */
+#define EISA_ID3 iobase + 0x0c83 /* EISA ID Register 3 */
+#define EISA_CR iobase + 0x0c84 /* EISA Control Register */
+
+/*
+** EEPROM BYTES
+*/
+#define EEPROM_MEMB 0x00
+#define EEPROM_IOB 0x01
+#define EEPROM_EISA_ID0 0x02
+#define EEPROM_EISA_ID1 0x03
+#define EEPROM_EISA_ID2 0x04
+#define EEPROM_EISA_ID3 0x05
+#define EEPROM_MISC0 0x06
+#define EEPROM_MISC1 0x07
+#define EEPROM_PNAME7 0x08
+#define EEPROM_PNAME6 0x09
+#define EEPROM_PNAME5 0x0a
+#define EEPROM_PNAME4 0x0b
+#define EEPROM_PNAME3 0x0c
+#define EEPROM_PNAME2 0x0d
+#define EEPROM_PNAME1 0x0e
+#define EEPROM_PNAME0 0x0f
+#define EEPROM_SWFLAGS 0x10
+#define EEPROM_HWCAT 0x11
+#define EEPROM_NETMAN2 0x12
+#define EEPROM_REVLVL 0x13
+#define EEPROM_NETMAN0 0x14
+#define EEPROM_NETMAN1 0x15
+#define EEPROM_CHIPVER 0x16
+#define EEPROM_SETUP 0x17
+#define EEPROM_PADDR0 0x18
+#define EEPROM_PADDR1 0x19
+#define EEPROM_PADDR2 0x1a
+#define EEPROM_PADDR3 0x1b
+#define EEPROM_PADDR4 0x1c
+#define EEPROM_PADDR5 0x1d
+#define EEPROM_PA_CRC 0x1e
+#define EEPROM_CHKSUM 0x1f
+
+/*
+** EEPROM bytes for checksumming
+*/
+#define EEPROM_MAX 32 /* bytes */
+
+/*
+** EEPROM MISCELLANEOUS FLAGS
+*/
+#define RBE_SHADOW 0x0100 /* Remote Boot Enable Shadow */
+#define READ_AHEAD 0x0080 /* Read Ahead feature */
+#define IRQ_SEL2 0x0070 /* IRQ line selection (LeMAC2) */
+#define IRQ_SEL 0x0060 /* IRQ line selection */
+#define FAST_BUS 0x0008 /* ISA Bus speeds > 8.33MHz */
+#define ENA_16 0x0004 /* Enables 16 bit memory transfers */
+#define WRITE_BEHIND 0x0002 /* Write Behind feature */
+#define _0WS_ENA 0x0001 /* Zero Wait State Enable */
+
+/*
+** EEPROM NETWORK MANAGEMENT FLAGS
+*/
+#define NETMAN_POL 0x04 /* Polarity defeat */
+#define NETMAN_LINK 0x02 /* Link defeat */
+#define NETMAN_CCE 0x01 /* Custom Counters Enable */
+
+/*
+** EEPROM SW FLAGS
+*/
+#define SW_SQE 0x10 /* Signal Quality Error */
+#define SW_LAB 0x08 /* Less Aggressive Backoff */
+#define SW_INIT 0x04 /* Initialized */
+#define SW_TIMEOUT 0x02 /* 0:2.5 mins, 1: 30 secs */
+#define SW_REMOTE 0x01 /* Remote Boot Enable -> 1 */
+
+/*
+** EEPROM SETUP FLAGS
+*/
+#define SETUP_APD 0x80 /* AutoPort Disable */
+#define SETUP_PS 0x40 /* Port Select */
+#define SETUP_MP 0x20 /* MultiPort */
+#define SETUP_1TP 0x10 /* 1 port, TP */
+#define SETUP_1COAX 0x00 /* 1 port, Coax */
+#define SETUP_DRAM 0x02 /* Number of DRAMS on board */
+
+/*
+** EEPROM MANAGEMENT FLAGS
+*/
+#define MGMT_CCE 0x01 /* Custom Counters Enable */
+
+/*
+** EEPROM VERSIONS
+*/
+#define LeMAC 0x11
+#define LeMAC2 0x12
+
+/*
+** Miscellaneous
+*/
+
+#define EEPROM_WAIT_TIME 1000 /* Number of microseconds */
+#define EISA_EN 0x0001 /* Enable EISA bus buffers */
+
+#define HASH_TABLE_LEN 512 /* Bits */
+
+#define XCT 0x80 /* Transmit Cut Through */
+#define PRELOAD 16 /* 4 long words */
+
+#define MASK_INTERRUPTS 1
+#define UNMASK_INTERRUPTS 0
+
+#define EEPROM_OFFSET(a) ((u_short)((u_long)(a)))
+
+/*
+** Include the IOCTL stuff
+*/
+#include <linux/sockios.h>
+
+#define EWRK3IOCTL SIOCDEVPRIVATE
+
+struct ewrk3_ioctl {
+ unsigned short cmd; /* Command to run */
+ unsigned short len; /* Length of the data buffer */
+ unsigned char *data; /* Pointer to the data buffer */
+};
+
+/*
+** Recognised commands for the driver
+*/
+#define EWRK3_GET_HWADDR 0x01 /* Get the hardware address */
+#define EWRK3_SET_HWADDR 0x02 /* Get the hardware address */
+#define EWRK3_SET_PROM 0x03 /* Set Promiscuous Mode */
+#define EWRK3_CLR_PROM 0x04 /* Clear Promiscuous Mode */
+#define EWRK3_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
+#define EWRK3_GET_MCA 0x06 /* Get a multicast address */
+#define EWRK3_SET_MCA 0x07 /* Set a multicast address */
+#define EWRK3_CLR_MCA 0x08 /* Clear a multicast address */
+#define EWRK3_MCA_EN 0x09 /* Enable a multicast address group */
+#define EWRK3_GET_STATS 0x0a /* Get the driver statistics */
+#define EWRK3_CLR_STATS 0x0b /* Zero out the driver statistics */
+#define EWRK3_GET_CSR 0x0c /* Get the CSR Register contents */
+#define EWRK3_SET_CSR 0x0d /* Set the CSR Register contents */
+#define EWRK3_GET_EEPROM 0x0e /* Get the EEPROM contents */
+#define EWRK3_SET_EEPROM 0x0f /* Set the EEPROM contents */
+#define EWRK3_GET_CMR 0x10 /* Get the CMR Register contents */
+#define EWRK3_CLR_TX_CUT_THRU 0x11 /* Clear the TX cut through mode */
+#define EWRK3_SET_TX_CUT_THRU 0x12 /* Set the TX cut through mode */
diff --git a/i386/i386at/gpl/linux/net/hp-plus.c b/i386/i386at/gpl/linux/net/hp-plus.c
new file mode 100644
index 00000000..aed7ee01
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/hp-plus.c
@@ -0,0 +1,483 @@
+/* hp-plus.c: A HP PCLAN/plus ethernet driver for linux. */
+/*
+ Written 1994 by Donald Becker.
+
+ This driver is for the Hewlett Packard PC LAN (27***) plus ethercards.
+ These cards are sold under several model numbers, usually 2724*.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ As is often the case, a great deal of credit is owed to Russ Nelson.
+ The Crynwr packet driver was my primary source of HP-specific
+ programming information.
+*/
+
+static const char *version =
+"hp-plus.c:v1.10 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/string.h> /* Important -- this inlines word moves. */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+
+#include "8390.h"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int hpplus_portlist[] =
+{0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0};
+
+/*
+ The HP EtherTwist chip implementation is a fairly routine DP8390
+ implementation. It allows both shared memory and programmed-I/O buffer
+ access, using a custom interface for both. The programmed-I/O mode is
+ entirely implemented in the HP EtherTwist chip, bypassing the problem
+ ridden built-in 8390 facilities used on NE2000 designs. The shared
+ memory mode is likewise special, with an offset register used to make
+ packets appear at the shared memory base. Both modes use a base and bounds
+ page register to hide the Rx ring buffer wrap -- a packet that spans the
+ end of physical buffer memory appears continuous to the driver. (c.f. the
+ 3c503 and Cabletron E2100)
+
+ A special note: the internal buffer of the board is only 8 bits wide.
+ This lays several nasty traps for the unaware:
+ - the 8390 must be programmed for byte-wide operations
+ - all I/O and memory operations must work on whole words (the access
+ latches are serially preloaded and have no byte-swapping ability).
+
+ This board is laid out in I/O space much like the earlier HP boards:
+ the first 16 locations are for the board registers, and the second 16 are
+ for the 8390. The board is easy to identify, with both a dedicated 16 bit
+ ID register and a constant 0x530* value in the upper bits of the paging
+ register.
+*/
+
+#define HP_ID 0x00 /* ID register, always 0x4850. */
+#define HP_PAGING 0x02 /* Registers visible @ 8-f, see PageName. */
+#define HPP_OPTION 0x04 /* Bitmapped options, see HP_Option. */
+#define HPP_OUT_ADDR 0x08 /* I/O output location in Perf_Page. */
+#define HPP_IN_ADDR 0x0A /* I/O input location in Perf_Page. */
+#define HP_DATAPORT 0x0c /* I/O data transfer in Perf_Page. */
+#define NIC_OFFSET 0x10 /* Offset to the 8390 registers. */
+#define HP_IO_EXTENT 32
+
+#define HP_START_PG 0x00 /* First page of TX buffer */
+#define HP_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+/* The register set selected in HP_PAGING. */
+enum PageName {
+ Perf_Page = 0, /* Normal operation. */
+ MAC_Page = 1, /* The ethernet address (+checksum). */
+ HW_Page = 2, /* EEPROM-loaded hardware parameters. */
+ LAN_Page = 4, /* Transceiver selection, testing, etc. */
+ ID_Page = 6 };
+
+/* The bit definitions for the HPP_OPTION register. */
+enum HP_Option {
+ NICReset = 1, ChipReset = 2, /* Active low, really UNreset. */
+ EnableIRQ = 4, FakeIntr = 8, BootROMEnb = 0x10, IOEnb = 0x20,
+ MemEnable = 0x40, ZeroWait = 0x80, MemDisable = 0x1000, };
+
+int hp_plus_probe(struct device *dev);
+int hpp_probe1(struct device *dev, int ioaddr);
+
+static void hpp_reset_8390(struct device *dev);
+static int hpp_open(struct device *dev);
+static int hpp_close(struct device *dev);
+static void hpp_mem_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void hpp_mem_block_output(struct device *dev, int count,
+ const unsigned char *buf, const start_page);
+static void hpp_mem_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void hpp_io_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void hpp_io_block_output(struct device *dev, int count,
+ const unsigned char *buf, const start_page);
+static void hpp_io_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+
+
+/* Probe a list of addresses for an HP LAN+ adaptor.
+ This routine is almost boilerplate. */
+#ifdef HAVE_DEVLIST
+/* Support for a alternate probe manager, which will eliminate the
+ boilerplate below. */
+struct netdev_entry hpplus_drv =
+{"hpplus", hpp_probe1, HP_IO_EXTENT, hpplus_portlist};
+#else
+
+int hp_plus_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return hpp_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; hpplus_portlist[i]; i++) {
+ int ioaddr = hpplus_portlist[i];
+ if (check_region(ioaddr, HP_IO_EXTENT))
+ continue;
+ if (hpp_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/* Do the interesting part of the probe at a single address. */
+int hpp_probe1(struct device *dev, int ioaddr)
+{
+ int i;
+ unsigned char checksum = 0;
+ const char *name = "HP-PC-LAN+";
+ int mem_start;
+ static unsigned version_printed = 0;
+
+ /* Check for the HP+ signature, 50 48 0x 53. */
+ if (inw(ioaddr + HP_ID) != 0x4850
+ || (inw(ioaddr + HP_PAGING) & 0xfff0) != 0x5300)
+ return ENODEV;
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("hp-plus.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("%s: %s at %#3x,", dev->name, name, ioaddr);
+
+ /* Retrieve and checksum the station address. */
+ outw(MAC_Page, ioaddr + HP_PAGING);
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ unsigned char inval = inb(ioaddr + 8 + i);
+ dev->dev_addr[i] = inval;
+ checksum += inval;
+ printk(" %2.2x", inval);
+ }
+ checksum += inb(ioaddr + 14);
+
+ if (checksum != 0xff) {
+ printk(" bad checksum %2.2x.\n", checksum);
+ return ENODEV;
+ } else {
+ /* Point at the Software Configuration Flags. */
+ outw(ID_Page, ioaddr + HP_PAGING);
+ printk(" ID %4.4x", inw(ioaddr + 12));
+ }
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk ("hp-plus.c: unable to allocate memory for dev->priv.\n");
+ return -ENOMEM;
+ }
+
+ /* Grab the region so we can find another board if something fails. */
+ request_region(ioaddr, HP_IO_EXTENT,"hp-plus");
+
+ /* Read the IRQ line. */
+ outw(HW_Page, ioaddr + HP_PAGING);
+ {
+ int irq = inb(ioaddr + 13) & 0x0f;
+ int option = inw(ioaddr + HPP_OPTION);
+
+ dev->irq = irq;
+ if (option & MemEnable) {
+ mem_start = inw(ioaddr + 9) << 8;
+ printk(", IRQ %d, memory address %#x.\n", irq, mem_start);
+ } else {
+ mem_start = 0;
+ printk(", IRQ %d, programmed-I/O mode.\n", irq);
+ }
+ }
+
+ /* Set the wrap registers for string I/O reads. */
+ outw((HP_START_PG + TX_2X_PAGES) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
+
+ /* Set the base address to point to the NIC, not the "real" base! */
+ dev->base_addr = ioaddr + NIC_OFFSET;
+
+ dev->open = &hpp_open;
+ dev->stop = &hpp_close;
+
+ ei_status.name = name;
+ ei_status.word16 = 0; /* Agggghhhhh! Debug time: 2 days! */
+ ei_status.tx_start_page = HP_START_PG;
+ ei_status.rx_start_page = HP_START_PG + TX_2X_PAGES;
+ ei_status.stop_page = HP_STOP_PG;
+
+ ei_status.reset_8390 = &hpp_reset_8390;
+ ei_status.block_input = &hpp_io_block_input;
+ ei_status.block_output = &hpp_io_block_output;
+ ei_status.get_8390_hdr = &hpp_io_get_8390_hdr;
+
+ /* Check if the memory_enable flag is set in the option register. */
+ if (mem_start) {
+ ei_status.block_input = &hpp_mem_block_input;
+ ei_status.block_output = &hpp_mem_block_output;
+ ei_status.get_8390_hdr = &hpp_mem_get_8390_hdr;
+ dev->mem_start = mem_start;
+ dev->rmem_start = dev->mem_start + TX_2X_PAGES*256;
+ dev->mem_end = dev->rmem_end
+ = dev->mem_start + (HP_STOP_PG - HP_START_PG)*256;
+ }
+
+ outw(Perf_Page, ioaddr + HP_PAGING);
+ NS8390_init(dev, 0);
+ /* Leave the 8390 and HP chip reset. */
+ outw(inw(ioaddr + HPP_OPTION) & ~EnableIRQ, ioaddr + HPP_OPTION);
+
+ return 0;
+}
+
+static int
+hpp_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg;
+
+ if (request_irq(dev->irq, &ei_interrupt, 0, "hp-plus")) {
+ return -EAGAIN;
+ }
+
+ /* Reset the 8390 and HP chip. */
+ option_reg = inw(ioaddr + HPP_OPTION);
+ outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
+ SLOW_DOWN_IO; SLOW_DOWN_IO;
+ /* Unreset the board and enable interrupts. */
+ outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
+
+ /* Set the wrap registers for programmed-I/O operation. */
+ outw(HW_Page, ioaddr + HP_PAGING);
+ outw((HP_START_PG + TX_2X_PAGES) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
+
+ /* Select the operational page. */
+ outw(Perf_Page, ioaddr + HP_PAGING);
+
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int
+hpp_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = NULL;
+ ei_close(dev);
+ outw((option_reg & ~EnableIRQ) | MemDisable | NICReset | ChipReset,
+ ioaddr + HPP_OPTION);
+
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static void
+hpp_reset_8390(struct device *dev)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies);
+
+ outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
+ /* Pause a few cycles for the hardware reset to take place. */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ ei_status.txing = 0;
+ outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
+
+ SLOW_DOWN_IO; SLOW_DOWN_IO;
+
+
+ if ((inb_p(ioaddr+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0)
+ printk("%s: hp_reset_8390() did not complete.\n", dev->name);
+
+ if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
+ return;
+}
+
+/* The programmed-I/O version of reading the 4 byte 8390 specific header.
+ Note that transfer with the EtherTwist+ must be on word boundaries. */
+
+static void
+hpp_io_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+
+ outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
+ insw(ioaddr + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. */
+
+static void
+hpp_io_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ char *buf = skb->data;
+
+ outw(ring_offset, ioaddr + HPP_IN_ADDR);
+ insw(ioaddr + HP_DATAPORT, buf, count>>1);
+ if (count & 0x01)
+ buf[count-1] = inw(ioaddr + HP_DATAPORT);
+}
+
+/* The corresponding shared memory versions of the above 2 functions. */
+
+static void
+hpp_mem_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
+ outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
+ memcpy_fromio(hdr, dev->mem_start, sizeof(struct e8390_pkt_hdr));
+ outw(option_reg, ioaddr + HPP_OPTION);
+ hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
+}
+
+static void
+hpp_mem_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ outw(ring_offset, ioaddr + HPP_IN_ADDR);
+
+ outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
+
+ /* Caution: this relies on get_8390_hdr() rounding up count!
+ Also note that we *can't* use eth_io_copy_and_sum() because
+ it will not always copy "count" bytes (e.g. padded IP). */
+
+ memcpy_fromio(skb->data, dev->mem_start, count);
+ outw(option_reg, ioaddr + HPP_OPTION);
+}
+
+/* A special note: we *must* always transfer >=16 bit words.
+ It's always safe to round up, so we do. */
+static void
+hpp_io_block_output(struct device *dev, int count,
+ const unsigned char *buf, const start_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
+ outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2);
+ return;
+}
+
+static void
+hpp_mem_block_output(struct device *dev, int count,
+ const unsigned char *buf, const start_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
+ outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
+ memcpy_toio(dev->mem_start, buf, (count + 3) & ~3);
+ outw(option_reg, ioaddr + HPP_OPTION);
+
+ return;
+}
+
+
+#ifdef MODULE
+#define MAX_HPP_CARDS 4 /* Max number of HPP cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_HPP_CARDS] = { 0, };
+static struct device dev_hpp[MAX_HPP_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_HPP_CARDS] = { 0, };
+static int irq[MAX_HPP_CARDS] = { 0, };
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
+ struct device *dev = &dev_hpp[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->init = hp_plus_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "hp-plus.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "hp-plus.c: No HP-Plus card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
+ struct device *dev = &dev_hpp[this_dev];
+ if (dev->priv != NULL) {
+ /* NB: hpp_close() handles free_irq + irq2dev map */
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ kfree(dev->priv);
+ dev->priv = NULL;
+ release_region(ioaddr, HP_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c hp-plus.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/hp.c b/i386/i386at/gpl/linux/net/hp.c
new file mode 100644
index 00000000..d0443a7d
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/hp.c
@@ -0,0 +1,451 @@
+/* hp.c: A HP LAN ethernet driver for linux. */
+/*
+ Written 1993-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This is a driver for the HP PC-LAN adaptors.
+
+ Sources:
+ The Crynwr packet driver.
+*/
+
+static const char *version =
+ "hp.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include "8390.h"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int hppclan_portlist[] =
+{ 0x300, 0x320, 0x340, 0x280, 0x2C0, 0x200, 0x240, 0};
+
+#define HP_IO_EXTENT 32
+
+#define HP_DATAPORT 0x0c /* "Remote DMA" data port. */
+#define HP_ID 0x07
+#define HP_CONFIGURE 0x08 /* Configuration register. */
+#define HP_RUN 0x01 /* 1 == Run, 0 == reset. */
+#define HP_IRQ 0x0E /* Mask for software-configured IRQ line. */
+#define HP_DATAON 0x10 /* Turn on dataport */
+#define NIC_OFFSET 0x10 /* Offset the 8390 registers. */
+
+#define HP_START_PG 0x00 /* First page of TX buffer */
+#define HP_8BSTOP_PG 0x80 /* Last page +1 of RX ring */
+#define HP_16BSTOP_PG 0xFF /* Same, for 16 bit cards. */
+
+int hp_probe(struct device *dev);
+int hp_probe1(struct device *dev, int ioaddr);
+
+static int hp_open(struct device *dev);
+static int hp_close(struct device *dev);
+static void hp_reset_8390(struct device *dev);
+static void hp_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void hp_block_input(struct device *dev, int count,
+ struct sk_buff *skb , int ring_offset);
+static void hp_block_output(struct device *dev, int count,
+ const unsigned char *buf, const start_page);
+
+static void hp_init_card(struct device *dev);
+
+/* The map from IRQ number to HP_CONFIGURE register setting. */
+/* My default is IRQ5 0 1 2 3 4 5 6 7 8 9 10 11 */
+static char irqmap[16] = { 0, 0, 4, 6, 8,10, 0,14, 0, 4, 2,12,0,0,0,0};
+
+
+/* Probe for an HP LAN adaptor.
+ Also initialize the card and fill in STATION_ADDR with the station
+ address. */
+#ifdef HAVE_DEVLIST
+struct netdev_entry netcard_drv =
+{"hp", hp_probe1, HP_IO_EXTENT, hppclan_portlist};
+#else
+
+int hp_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return hp_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; hppclan_portlist[i]; i++) {
+ int ioaddr = hppclan_portlist[i];
+ if (check_region(ioaddr, HP_IO_EXTENT))
+ continue;
+ if (hp_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+int hp_probe1(struct device *dev, int ioaddr)
+{
+ int i, board_id, wordmode;
+ const char *name;
+ static unsigned version_printed = 0;
+
+ /* Check for the HP physical address, 08 00 09 xx xx xx. */
+ /* This really isn't good enough: we may pick up HP LANCE boards
+ also! Avoid the lance 0x5757 signature. */
+ if (inb(ioaddr) != 0x08
+ || inb(ioaddr+1) != 0x00
+ || inb(ioaddr+2) != 0x09
+ || inb(ioaddr+14) == 0x57)
+ return ENODEV;
+
+ /* Set up the parameters based on the board ID.
+ If you have additional mappings, please mail them to me -djb. */
+ if ((board_id = inb(ioaddr + HP_ID)) & 0x80) {
+ name = "HP27247";
+ wordmode = 1;
+ } else {
+ name = "HP27250";
+ wordmode = 0;
+ }
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("hp.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr);
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
+
+ /* Snarf the interrupt now. Someday this could be moved to open(). */
+ if (dev->irq < 2) {
+ int irq_16list[] = { 11, 10, 5, 3, 4, 7, 9, 0};
+ int irq_8list[] = { 7, 5, 3, 4, 9, 0};
+ int *irqp = wordmode ? irq_16list : irq_8list;
+ do {
+ int irq = *irqp;
+ if (request_irq (irq, NULL, 0, "bogus") != -EBUSY) {
+ autoirq_setup(0);
+ /* Twinkle the interrupt, and check if it's seen. */
+ outb_p(irqmap[irq] | HP_RUN, ioaddr + HP_CONFIGURE);
+ outb_p( 0x00 | HP_RUN, ioaddr + HP_CONFIGURE);
+ if (irq == autoirq_report(0) /* It's a good IRQ line! */
+ && request_irq (irq, &ei_interrupt, 0, "hp") == 0) {
+ printk(" selecting IRQ %d.\n", irq);
+ dev->irq = *irqp;
+ break;
+ }
+ }
+ } while (*++irqp);
+ if (*irqp == 0) {
+ printk(" no free IRQ lines.\n");
+ return EBUSY;
+ }
+ } else {
+ if (dev->irq == 2)
+ dev->irq = 9;
+ if (request_irq(dev->irq, ei_interrupt, 0, "hp")) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ return EBUSY;
+ }
+ }
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (" unable to get memory for dev->priv.\n");
+ free_irq(dev->irq);
+ return -ENOMEM;
+ }
+
+ /* Grab the region so we can find another board if something fails. */
+ request_region(ioaddr, HP_IO_EXTENT,"hp");
+
+ /* Set the base address to point to the NIC, not the "real" base! */
+ dev->base_addr = ioaddr + NIC_OFFSET;
+ dev->open = &hp_open;
+ dev->stop = &hp_close;
+
+ ei_status.name = name;
+ ei_status.word16 = wordmode;
+ ei_status.tx_start_page = HP_START_PG;
+ ei_status.rx_start_page = HP_START_PG + TX_PAGES;
+ ei_status.stop_page = wordmode ? HP_16BSTOP_PG : HP_8BSTOP_PG;
+
+ ei_status.reset_8390 = &hp_reset_8390;
+ ei_status.get_8390_hdr = &hp_get_8390_hdr;
+ ei_status.block_input = &hp_block_input;
+ ei_status.block_output = &hp_block_output;
+ hp_init_card(dev);
+
+ return 0;
+}
+
+static int
+hp_open(struct device *dev)
+{
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int
+hp_close(struct device *dev)
+{
+ ei_close(dev);
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static void
+hp_reset_8390(struct device *dev)
+{
+ int hp_base = dev->base_addr - NIC_OFFSET;
+ int saved_config = inb_p(hp_base + HP_CONFIGURE);
+
+ if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies);
+ outb_p(0x00, hp_base + HP_CONFIGURE);
+ ei_status.txing = 0;
+ /* Pause just a few cycles for the hardware reset to take place. */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+
+ outb_p(saved_config, hp_base + HP_CONFIGURE);
+ SLOW_DOWN_IO; SLOW_DOWN_IO;
+
+ if ((inb_p(hp_base+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0)
+ printk("%s: hp_reset_8390() did not complete.\n", dev->name);
+
+ if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
+ return;
+}
+
+static void
+hp_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int nic_base = dev->base_addr;
+ int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
+
+ outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base);
+ outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base);
+
+ if (ei_status.word16)
+ insw(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ else
+ insb(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+
+ outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you are
+ porting to a new ethercard look at the packet driver source for hints.
+ The HP LAN doesn't use shared memory -- we put the packet
+ out through the "remote DMA" dataport. */
+
+static void
+hp_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int nic_base = dev->base_addr;
+ int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
+ int xfer_count = count;
+ char *buf = skb->data;
+
+ outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base);
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base);
+ if (ei_status.word16) {
+ insw(nic_base - NIC_OFFSET + HP_DATAPORT,buf,count>>1);
+ if (count & 0x01)
+ buf[count-1] = inb(nic_base - NIC_OFFSET + HP_DATAPORT), xfer_count++;
+ } else {
+ insb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count);
+ }
+ /* This is for the ALPHA version only, remove for later releases. */
+ if (ei_debug > 0) { /* DMA termination address check... */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ int addr = (high << 8) + low;
+ /* Check only the lower 8 bits so we can ignore ring wrap. */
+ if (((ring_offset + xfer_count) & 0xff) != (addr & 0xff))
+ printk("%s: RX transfer address mismatch, %#4.4x vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + xfer_count, addr);
+ }
+ outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
+}
+
+static void
+hp_block_output(struct device *dev, int count,
+ const unsigned char *buf, const start_page)
+{
+ int nic_base = dev->base_addr;
+ int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
+
+ outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (ei_status.word16 && (count & 0x01))
+ count++;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base);
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work. */
+ outb_p(0x42, nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0xff, nic_base + EN0_RSARLO);
+ outb_p(0x00, nic_base + EN0_RSARHI);
+#define NE_CMD 0x00
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ /* Make certain that the dummy read has occurred. */
+ inb_p(0x61);
+ inb_p(0x61);
+#endif
+
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, nic_base);
+ if (ei_status.word16) {
+ /* Use the 'rep' sequence for 16 bit boards. */
+ outsw(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count>>1);
+ } else {
+ outsb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count);
+ }
+
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here -- it's broken! */
+
+ /* This is for the ALPHA version only, remove for later releases. */
+ if (ei_debug > 0) { /* DMA termination address check... */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ int addr = (high << 8) + low;
+ if ((start_page << 8) + count != addr)
+ printk("%s: TX Transfer address mismatch, %#4.4x vs. %#4.4x.\n",
+ dev->name, (start_page << 8) + count, addr);
+ }
+ outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
+ return;
+}
+
+/* This function resets the ethercard if something screws up. */
+static void
+hp_init_card(struct device *dev)
+{
+ int irq = dev->irq;
+ NS8390_init(dev, 0);
+ outb_p(irqmap[irq&0x0f] | HP_RUN,
+ dev->base_addr - NIC_OFFSET + HP_CONFIGURE);
+ return;
+}
+
+#ifdef MODULE
+#define MAX_HP_CARDS 4 /* Max number of HP cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_HP_CARDS] = { 0, };
+static struct device dev_hp[MAX_HP_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_HP_CARDS] = { 0, };
+static int irq[MAX_HP_CARDS] = { 0, };
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) {
+ struct device *dev = &dev_hp[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->init = hp_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "hp.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "hp.c: No HP card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) {
+ struct device *dev = &dev_hp[this_dev];
+ if (dev->priv != NULL) {
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ kfree(dev->priv);
+ dev->priv = NULL;
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = NULL;
+ release_region(ioaddr, HP_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c hp.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/hp100.c b/i386/i386at/gpl/linux/net/hp100.c
new file mode 100644
index 00000000..d8186bf1
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/hp100.c
@@ -0,0 +1,1144 @@
+/*
+ * hp100.c: Hewlett Packard HP10/100VG ANY LAN ethernet driver for Linux.
+ *
+ * Author: Jaroslav Kysela, <perex@pf.jcu.cz>
+ *
+ * Supports only the following Hewlett Packard cards:
+ *
+ * HP J2577 10/100 EISA card with REVA Cascade chip
+ * HP J2573 10/100 ISA card with REVA Cascade chip
+ * HP 27248B 10 only EISA card with Cascade chip
+ * HP J2577 10/100 EISA card with Cascade chip
+ * HP J2573 10/100 ISA card with Cascade chip
+ * HP J2585 10/100 PCI card
+ *
+ * Other ATT2MD01 Chip based boards might be supported in the future
+ * (there are some minor changes needed).
+ *
+ * This driver is based on the 'hpfepkt' crynwr packet driver.
+ *
+ * This source/code is public free; you can distribute it and/or modify
+ * it under terms of the GNU General Public License (published by the
+ * Free Software Foundation) either version two of this License, or any
+ * later version.
+ * ----------------------------------------------------------------------------
+ *
+ * Note: Some routines (interrupt handling, transmit) assumes that
+ * there is the PERFORMANCE page selected...
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * If you are going to use the module version of this driver, you may
+ * change this values at the "insert time" :
+ *
+ * Variable Description
+ *
+ * hp100_rx_ratio Range 1-99 - onboard memory used for RX
+ * packets in %.
+ * hp100_priority_tx If this variable is nonzero - all outgoing
+ * packets will be transmitted as priority.
+ * hp100_port Adapter port (for example 0x380).
+ *
+ * ----------------------------------------------------------------------------
+ * MY BEST REGARDS GOING TO:
+ *
+ * IPEX s.r.o which lend me two HP J2573 cards and
+ * the HP AdvanceStack 100VG Hub-15 for debugging.
+ *
+ * Russel Nellson <nelson@crynwr.com> for help with obtaining sources
+ * of the 'hpfepkt' packet driver.
+ *
+ * Also thanks to Abacus Electric s.r.o which let me to use their
+ * motherboard for my second computer.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * TO DO:
+ * ======
+ * - ioctl handling - some runtime setup things
+ * - 100Mb/s Voice Grade AnyLAN network adapter/hub services support
+ * - 802.5 frames
+ * - promiscuous mode
+ * - bridge mode
+ * - cascaded repeater mode
+ * - 100Mbit MAC
+ *
+ * Revision history:
+ * =================
+ *
+ * Version Date Description
+ *
+ * 0.1 14-May-95 Initial writing. ALPHA code was released.
+ * Only HP J2573 on 10Mb/s (two machines) tested.
+ * 0.11 14-Jun-95 Reset interface bug fixed?
+ * Little bug in hp100_close function fixed.
+ * 100Mb/s connection debugged.
+ * 0.12 14-Jul-95 Link down is now handled better.
+ * 0.20 01-Aug-95 Added PCI support for HP J2585A card.
+ * Statistics bug fixed.
+ * 0.21 04-Aug-95 Memory mapped access support for PCI card.
+ * Added priority transmit support for 100Mb/s
+ * Voice Grade AnyLAN network.
+ *
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/types.h>
+#include <linux/config.h> /* for CONFIG_PCI */
+
+#include "hp100.h"
+
+/*
+ * defines
+ */
+
+#define HP100_BUS_ISA 0
+#define HP100_BUS_EISA 1
+#define HP100_BUS_PCI 2
+
+#define HP100_REGION_SIZE 0x20
+
+#define HP100_MAX_PACKET_SIZE (1536+4)
+#define HP100_MIN_PACKET_SIZE 60
+
+#ifndef HP100_DEFAULT_RX_RATIO
+/* default - 65% onboard memory on the card are used for RX packets */
+#define HP100_DEFAULT_RX_RATIO 65
+#endif
+
+#ifndef HP100_DEFAULT_PRIORITY_TX
+/* default - don't enable transmit outgoing packets as priority */
+#define HP100_DEFAULT_PRIORITY_TX 0
+#endif
+
+#ifdef MACH
+#define HP100_IO_MAPPED
+#endif
+
+/*
+ * structures
+ */
+
+struct hp100_eisa_id {
+ u_int id;
+ const char *name;
+ u_char bus;
+};
+
+struct hp100_private {
+ struct hp100_eisa_id *id;
+ u_short soft_model;
+ u_int memory_size;
+ u_short rx_ratio; /* 1 - 99 */
+ u_short priority_tx; /* != 0 - priority tx */
+ short mem_mapped; /* memory mapped access */
+ u_char *mem_ptr_virt; /* virtual memory mapped area, maybe NULL */
+ u_char *mem_ptr_phys; /* physical memory mapped area */
+ short lan_type; /* 10Mb/s, 100Mb/s or -1 (error) */
+ int hub_status; /* login to hub was successfull? */
+ u_char mac1_mode;
+ u_char mac2_mode;
+ struct enet_statistics stats;
+};
+
+/*
+ * variables
+ */
+
+static struct hp100_eisa_id hp100_eisa_ids[] = {
+
+ /* 10/100 EISA card with REVA Cascade chip */
+ { 0x080F1F022, "HP J2577 rev A", HP100_BUS_EISA },
+
+ /* 10/100 ISA card with REVA Cascade chip */
+ { 0x050F1F022, "HP J2573 rev A", HP100_BUS_ISA },
+
+ /* 10 only EISA card with Cascade chip */
+ { 0x02019F022, "HP 27248B", HP100_BUS_EISA },
+
+ /* 10/100 EISA card with Cascade chip */
+ { 0x04019F022, "HP J2577", HP100_BUS_EISA },
+
+ /* 10/100 ISA card with Cascade chip */
+ { 0x05019F022, "HP J2573", HP100_BUS_ISA },
+
+ /* 10/100 PCI card */
+ /* Note: ID for this card is same as PCI vendor/device numbers. */
+ { 0x01030103c, "HP J2585", HP100_BUS_PCI },
+};
+
+int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO;
+int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX;
+
+/*
+ * prototypes
+ */
+
+static int hp100_probe1( struct device *dev, int ioaddr, int bus );
+static int hp100_open( struct device *dev );
+static int hp100_close( struct device *dev );
+static int hp100_start_xmit( struct sk_buff *skb, struct device *dev );
+static void hp100_rx( struct device *dev );
+static struct enet_statistics *hp100_get_stats( struct device *dev );
+static void hp100_update_stats( struct device *dev );
+static void hp100_clear_stats( int ioaddr );
+static void hp100_set_multicast_list( struct device *dev);
+static void hp100_interrupt( int irq, struct pt_regs *regs );
+
+static void hp100_start_interface( struct device *dev );
+static void hp100_stop_interface( struct device *dev );
+static void hp100_load_eeprom( struct device *dev );
+static int hp100_sense_lan( struct device *dev );
+static int hp100_login_to_vg_hub( struct device *dev );
+static int hp100_down_vg_link( struct device *dev );
+
+/*
+ * probe functions
+ */
+
+int hp100_probe( struct device *dev )
+{
+ int base_addr = dev ? dev -> base_addr : 0;
+ int ioaddr;
+#ifdef CONFIG_PCI
+ int pci_start_index = 0;
+#endif
+
+ if ( base_addr > 0xff ) /* Check a single specified location. */
+ {
+ if ( check_region( base_addr, HP100_REGION_SIZE ) ) return -EINVAL;
+ if ( base_addr < 0x400 )
+ return hp100_probe1( dev, base_addr, HP100_BUS_ISA );
+ else
+ return hp100_probe1( dev, base_addr, HP100_BUS_EISA );
+ }
+ else
+#ifdef CONFIG_PCI
+ if ( base_addr > 0 && base_addr < 8 + 1 )
+ pci_start_index = 0x100 | ( base_addr - 1 );
+ else
+#endif
+ if ( base_addr != 0 ) return -ENXIO;
+
+ /* at first - scan PCI bus(es) */
+
+#ifdef CONFIG_PCI
+ if ( pcibios_present() )
+ {
+ int pci_index;
+
+#ifdef HP100_DEBUG_PCI
+ printk( "hp100: PCI BIOS is present, checking for devices..\n" );
+#endif
+ for ( pci_index = pci_start_index & 7; pci_index < 8; pci_index++ )
+ {
+ u_char pci_bus, pci_device_fn;
+ u_short pci_command;
+
+ if ( pcibios_find_device( PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A,
+ pci_index, &pci_bus,
+ &pci_device_fn ) != 0 ) break;
+ pcibios_read_config_dword( pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &ioaddr );
+
+ ioaddr &= ~3; /* remove I/O space marker in bit 0. */
+
+ if ( check_region( ioaddr, HP100_REGION_SIZE ) ) continue;
+
+ pcibios_read_config_word( pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command );
+ if ( !( pci_command & PCI_COMMAND_MASTER ) )
+ {
+#ifdef HP100_DEBUG_PCI
+ printk( "hp100: PCI Master Bit has not been set. Setting...\n" );
+#endif
+ pci_command |= PCI_COMMAND_MASTER;
+ pcibios_write_config_word( pci_bus, pci_device_fn,
+ PCI_COMMAND, pci_command );
+ }
+#ifdef HP100_DEBUG_PCI
+ printk( "hp100: PCI adapter found at 0x%x\n", ioaddr );
+#endif
+ if ( hp100_probe1( dev, ioaddr, HP100_BUS_PCI ) == 0 ) return 0;
+ }
+ }
+ if ( pci_start_index > 0 ) return -ENODEV;
+#endif /* CONFIG_PCI */
+
+ /* at second - probe all EISA possible port regions (if EISA bus present) */
+
+ for ( ioaddr = 0x1c38; EISA_bus && ioaddr < 0x10000; ioaddr += 0x400 )
+ {
+ if ( check_region( ioaddr, HP100_REGION_SIZE ) ) continue;
+ if ( hp100_probe1( dev, ioaddr, HP100_BUS_EISA ) == 0 ) return 0;
+ }
+
+ /* at third - probe all ISA possible port regions */
+
+ for ( ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x20 )
+ {
+ if ( check_region( ioaddr, HP100_REGION_SIZE ) ) continue;
+ if ( hp100_probe1( dev, ioaddr, HP100_BUS_ISA ) == 0 ) return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int hp100_probe1( struct device *dev, int ioaddr, int bus )
+{
+ int i;
+ u_char uc, uc_1;
+ u_int eisa_id;
+ short mem_mapped;
+ u_char *mem_ptr_phys, *mem_ptr_virt;
+ struct hp100_private *lp;
+ struct hp100_eisa_id *eid;
+
+ if ( dev == NULL )
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100_probe1: dev == NULL ?\n" );
+#endif
+ return EIO;
+ }
+
+ if ( bus != HP100_BUS_PCI ) /* don't check PCI cards again */
+ if ( inb( ioaddr + 0 ) != HP100_HW_ID_0 ||
+ inb( ioaddr + 1 ) != HP100_HW_ID_1 ||
+ ( inb( ioaddr + 2 ) & 0xf0 ) != HP100_HW_ID_2_REVA ||
+ inb( ioaddr + 3 ) != HP100_HW_ID_3 )
+ return -ENODEV;
+
+ dev -> base_addr = ioaddr;
+
+#ifdef HP100_DEBUG_PROBE1
+ printk( "hp100_probe1: card found at port 0x%x\n", ioaddr );
+#endif
+
+ hp100_page( ID_MAC_ADDR );
+ for ( i = uc = eisa_id = 0; i < 4; i++ )
+ {
+ eisa_id >>= 8;
+ uc_1 = hp100_inb( BOARD_ID + i );
+ eisa_id |= uc_1 << 24;
+ uc += uc_1;
+ }
+ uc += hp100_inb( BOARD_ID + 4 );
+
+#ifdef HP100_DEBUG_PROBE1
+ printk( "hp100_probe1: EISA ID = 0x%08x checksum = 0x%02x\n", eisa_id, uc );
+#endif
+
+ if ( uc != 0xff ) /* bad checksum? */
+ {
+ printk( "hp100_probe: bad EISA ID checksum at base port 0x%x\n", ioaddr );
+ return -ENODEV;
+ }
+
+ for ( i = 0; i < sizeof( hp100_eisa_ids ) / sizeof( struct hp100_eisa_id ); i++ )
+ if ( ( hp100_eisa_ids[ i ].id & 0xf0ffffff ) == ( eisa_id & 0xf0ffffff ) )
+ break;
+ if ( i >= sizeof( hp100_eisa_ids ) / sizeof( struct hp100_eisa_id ) )
+ {
+ printk( "hp100_probe1: card at port 0x%x isn't known (id = 0x%x)\n", ioaddr, eisa_id );
+ return -ENODEV;
+ }
+ eid = &hp100_eisa_ids[ i ];
+ if ( ( eid -> id & 0x0f000000 ) < ( eisa_id & 0x0f000000 ) )
+ {
+ printk( "hp100_probe1: newer version of card %s at port 0x%x - unsupported\n",
+ eid -> name, ioaddr );
+ return -ENODEV;
+ }
+
+ for ( i = uc = 0; i < 7; i++ )
+ uc += hp100_inb( LAN_ADDR + i );
+ if ( uc != 0xff )
+ {
+ printk( "hp100_probe1: bad lan address checksum (card %s at port 0x%x)\n",
+ eid -> name, ioaddr );
+ return -EIO;
+ }
+
+#ifndef HP100_IO_MAPPED
+ hp100_page( HW_MAP );
+ mem_mapped = ( hp100_inw( OPTION_LSW ) &
+ ( HP100_MEM_EN | HP100_BM_WRITE | HP100_BM_READ ) ) != 0;
+ mem_ptr_phys = mem_ptr_virt = NULL;
+ if ( mem_mapped )
+ {
+ mem_ptr_phys = (u_char *)( hp100_inw( MEM_MAP_LSW ) |
+ ( hp100_inw( MEM_MAP_MSW ) << 16 ) );
+ (u_int)mem_ptr_phys &= ~0x1fff; /* 8k aligment */
+ if ( bus == HP100_BUS_ISA && ( (u_long)mem_ptr_phys & ~0xfffff ) != 0 )
+ {
+ mem_ptr_phys = NULL;
+ mem_mapped = 0;
+ }
+ if ( mem_mapped && bus == HP100_BUS_PCI )
+ {
+ if ( ( mem_ptr_virt = vremap( (u_long)mem_ptr_phys, 0x2000 ) ) == NULL )
+ {
+ printk( "hp100: vremap for high PCI memory at 0x%lx failed\n", (u_long)mem_ptr_phys );
+ mem_ptr_phys = NULL;
+ mem_mapped = 0;
+ }
+ }
+ }
+#else
+ mem_mapped = 0;
+ mem_ptr_phys = mem_ptr_virt = NULL;
+#endif
+
+ if ( ( dev -> priv = kmalloc( sizeof( struct hp100_private ), GFP_KERNEL ) ) == NULL )
+ return -ENOMEM;
+ memset( dev -> priv, 0, sizeof( struct hp100_private ) );
+
+ lp = (struct hp100_private *)dev -> priv;
+ lp -> id = eid;
+ lp -> mem_mapped = mem_mapped;
+ lp -> mem_ptr_phys = mem_ptr_phys;
+ lp -> mem_ptr_virt = mem_ptr_virt;
+ hp100_page( ID_MAC_ADDR );
+ lp -> soft_model = hp100_inb( SOFT_MODEL );
+ lp -> mac1_mode = HP100_MAC1MODE3;
+ lp -> mac2_mode = HP100_MAC2MODE3;
+
+ dev -> base_addr = ioaddr;
+ hp100_page( HW_MAP );
+ dev -> irq = hp100_inb( IRQ_CHANNEL ) & HP100_IRQ_MASK;
+ if ( dev -> irq == 2 ) dev -> irq = 9;
+ lp -> memory_size = 0x200 << ( ( hp100_inb( SRAM ) & 0xe0 ) >> 5 );
+ lp -> rx_ratio = hp100_rx_ratio;
+
+ dev -> open = hp100_open;
+ dev -> stop = hp100_close;
+ dev -> hard_start_xmit = hp100_start_xmit;
+ dev -> get_stats = hp100_get_stats;
+ dev -> set_multicast_list = &hp100_set_multicast_list;
+
+ request_region( dev -> base_addr, HP100_REGION_SIZE, eid -> name );
+
+ hp100_page( ID_MAC_ADDR );
+ for ( i = uc = 0; i < 6; i++ )
+ dev -> dev_addr[ i ] = hp100_inb( LAN_ADDR + i );
+
+ hp100_clear_stats( ioaddr );
+
+ ether_setup( dev );
+
+ lp -> lan_type = hp100_sense_lan( dev );
+
+ printk( "%s: %s at 0x%x, IRQ %d, ",
+ dev -> name, lp -> id -> name, ioaddr, dev -> irq );
+ switch ( bus ) {
+ case HP100_BUS_EISA: printk( "EISA" ); break;
+ case HP100_BUS_PCI: printk( "PCI" ); break;
+ default: printk( "ISA" ); break;
+ }
+ printk( " bus, %dk SRAM (rx/tx %d%%).\n",
+ lp -> memory_size >> ( 10 - 4 ), lp -> rx_ratio );
+ if ( mem_mapped )
+ {
+ printk( "%s: Memory area at 0x%lx-0x%lx",
+ dev -> name, (u_long)mem_ptr_phys, (u_long)mem_ptr_phys + 0x1fff );
+ if ( mem_ptr_virt )
+ printk( " (virtual base 0x%lx)", (u_long)mem_ptr_virt );
+ printk( ".\n" );
+ }
+ printk( "%s: ", dev -> name );
+ if ( lp -> lan_type != HP100_LAN_ERR )
+ printk( "Adapter is attached to " );
+ switch ( lp -> lan_type ) {
+ case HP100_LAN_100:
+ printk( "100Mb/s Voice Grade AnyLAN network.\n" );
+ break;
+ case HP100_LAN_10:
+ printk( "10Mb/s network.\n" );
+ break;
+ default:
+ printk( "Warning! Link down.\n" );
+ }
+
+ hp100_stop_interface( dev );
+
+ return 0;
+}
+
+/*
+ * open/close functions
+ */
+
+static int hp100_open( struct device *dev )
+{
+ int i;
+ int ioaddr = dev -> base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev -> priv;
+
+ if ( request_irq( dev -> irq, hp100_interrupt, SA_INTERRUPT, lp -> id -> name ) )
+ {
+ printk( "%s: unable to get IRQ %d\n", dev -> name, dev -> irq );
+ return -EAGAIN;
+ }
+ irq2dev_map[ dev -> irq ] = dev;
+
+ MOD_INC_USE_COUNT;
+
+ dev -> tbusy = 0;
+ dev -> trans_start = jiffies;
+ dev -> interrupt = 0;
+ dev -> start = 1;
+
+ lp -> lan_type = hp100_sense_lan( dev );
+ lp -> mac1_mode = HP100_MAC1MODE3;
+ lp -> mac2_mode = HP100_MAC2MODE3;
+
+ hp100_page( MAC_CTRL );
+ hp100_orw( HP100_LINK_BEAT_DIS | HP100_RESET_LB, LAN_CFG_10 );
+
+ hp100_stop_interface( dev );
+ hp100_load_eeprom( dev );
+
+ hp100_outw( HP100_MMAP_DIS | HP100_SET_HB |
+ HP100_IO_EN | HP100_SET_LB, OPTION_LSW );
+ hp100_outw( HP100_DEBUG_EN | HP100_RX_HDR | HP100_EE_EN | HP100_RESET_HB |
+ HP100_FAKE_INT | HP100_RESET_LB, OPTION_LSW );
+ hp100_outw( HP100_ADV_NXT_PKT | HP100_TX_CMD | HP100_RESET_LB |
+ HP100_PRIORITY_TX | ( hp100_priority_tx ? HP100_SET_HB : HP100_RESET_HB ),
+ OPTION_MSW );
+
+ hp100_page( MAC_ADDRESS );
+ for ( i = 0; i < 6; i++ )
+ hp100_outb( dev -> dev_addr[ i ], MAC_ADDR + i );
+ for ( i = 0; i < 8; i++ ) /* setup multicast filter to receive all */
+ hp100_outb( 0xff, HASH_BYTE0 + i );
+ hp100_page( PERFORMANCE );
+ hp100_outw( 0xfefe, IRQ_MASK ); /* mask off all ints */
+ hp100_outw( 0xffff, IRQ_STATUS ); /* ack IRQ */
+ hp100_outw( (HP100_RX_PACKET | HP100_RX_ERROR | HP100_SET_HB) |
+ (HP100_TX_ERROR | HP100_SET_LB ), IRQ_MASK );
+ /* and enable few */
+ hp100_reset_card();
+ hp100_page( MMU_CFG );
+ hp100_outw( ( lp -> memory_size * lp -> rx_ratio ) / 100, RX_MEM_STOP );
+ hp100_outw( lp -> memory_size - 1, TX_MEM_STOP );
+ hp100_unreset_card();
+
+ if ( lp -> lan_type == HP100_LAN_100 )
+ lp -> hub_status = hp100_login_to_vg_hub( dev );
+
+ hp100_start_interface( dev );
+
+ return 0;
+}
+
+static int hp100_close( struct device *dev )
+{
+ int ioaddr = dev -> base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev -> priv;
+
+ hp100_page( PERFORMANCE );
+ hp100_outw( 0xfefe, IRQ_MASK ); /* mask off all IRQs */
+
+ hp100_stop_interface( dev );
+
+ if ( lp -> lan_type == HP100_LAN_100 ) /* relogin */
+ hp100_login_to_vg_hub( dev );
+
+ dev -> tbusy = 1;
+ dev -> start = 0;
+
+ free_irq( dev -> irq );
+ irq2dev_map[ dev -> irq ] = NULL;
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * transmit
+ */
+
+static int hp100_start_xmit( struct sk_buff *skb, struct device *dev )
+{
+ int i, ok_flag;
+ int ioaddr = dev -> base_addr;
+ u_short val;
+ struct hp100_private *lp = (struct hp100_private *)dev -> priv;
+
+ if ( lp -> lan_type < 0 )
+ {
+ hp100_stop_interface( dev );
+ if ( ( lp -> lan_type = hp100_sense_lan( dev ) ) < 0 )
+ {
+ printk( "%s: no connection found - check wire\n", dev -> name );
+ hp100_start_interface( dev ); /* 10Mb/s RX packets maybe handled */
+ return -EIO;
+ }
+ if ( lp -> lan_type == HP100_LAN_100 )
+ lp -> hub_status = hp100_login_to_vg_hub( dev );
+ hp100_start_interface( dev );
+ }
+
+ if ( ( i = ( hp100_inl( TX_MEM_FREE ) & ~0x7fffffff ) ) < skb -> len + 16 )
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100_start_xmit: rx free mem = 0x%x\n", i );
+#endif
+ if ( jiffies - dev -> trans_start < 2 * HZ ) return -EAGAIN;
+ if ( lp -> lan_type == HP100_LAN_100 && lp -> hub_status < 0 )
+ /* 100Mb/s adapter isn't connected to hub */
+ {
+ printk( "%s: login to 100Mb/s hub retry\n", dev -> name );
+ hp100_stop_interface( dev );
+ lp -> hub_status = hp100_login_to_vg_hub( dev );
+ hp100_start_interface( dev );
+ }
+ else
+ {
+ hp100_ints_off();
+ i = hp100_sense_lan( dev );
+ hp100_page( PERFORMANCE );
+ hp100_ints_on();
+ if ( i == HP100_LAN_ERR )
+ printk( "%s: link down detected\n", dev -> name );
+ else
+ if ( lp -> lan_type != i )
+ {
+ /* it's very heavy - all network setting must be changed!!! */
+ printk( "%s: cable change 10Mb/s <-> 100Mb/s detected\n", dev -> name );
+ lp -> lan_type = i;
+ hp100_stop_interface( dev );
+ if ( lp -> lan_type == HP100_LAN_100 )
+ lp -> hub_status = hp100_login_to_vg_hub( dev );
+ hp100_start_interface( dev );
+ }
+ else
+ {
+ printk( "%s: interface reset\n", dev -> name );
+ hp100_stop_interface( dev );
+ hp100_start_interface( dev );
+ }
+ }
+ dev -> trans_start = jiffies;
+ return -EAGAIN;
+ }
+
+ if ( skb == NULL )
+ {
+ dev_tint( dev );
+ return 0;
+ }
+
+ if ( skb -> len <= 0 ) return 0;
+
+ for ( i = 0; i < 6000 && ( hp100_inw( OPTION_MSW ) & HP100_TX_CMD ); i++ )
+ {
+#ifdef HP100_DEBUG_TX
+ printk( "hp100_start_xmit: busy\n" );
+#endif
+ }
+
+ hp100_ints_off();
+ val = hp100_inw( IRQ_STATUS );
+ hp100_outw( val & HP100_TX_COMPLETE, IRQ_STATUS );
+#ifdef HP100_DEBUG_TX
+ printk( "hp100_start_xmit: irq_status = 0x%x, len = %d\n", val, (int)skb -> len );
+#endif
+ ok_flag = skb -> len >= HP100_MIN_PACKET_SIZE;
+ i = ok_flag ? skb -> len : HP100_MIN_PACKET_SIZE;
+ hp100_outw( i, DATA32 ); /* length to memory manager */
+ hp100_outw( i, FRAGMENT_LEN );
+ if ( lp -> mem_mapped )
+ {
+ if ( lp -> mem_ptr_virt )
+ {
+ memcpy( lp -> mem_ptr_virt, skb -> data, skb -> len );
+ if ( !ok_flag )
+ memset( lp -> mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb -> len );
+ }
+ else
+ {
+ memcpy_toio( lp -> mem_ptr_phys, skb -> data, skb -> len );
+ if ( !ok_flag )
+ memset_io( lp -> mem_ptr_phys, 0, HP100_MIN_PACKET_SIZE - skb -> len );
+ }
+ }
+ else
+ {
+ outsl( ioaddr + HP100_REG_DATA32, skb -> data, ( skb -> len + 3 ) >> 2 );
+ if ( !ok_flag )
+ for ( i = ( skb -> len + 3 ) & ~3; i < HP100_MIN_PACKET_SIZE; i += 4 )
+ hp100_outl( 0, DATA32 );
+ }
+ hp100_outw( HP100_TX_CMD | HP100_SET_LB, OPTION_MSW ); /* send packet */
+ lp -> stats.tx_packets++;
+ dev -> trans_start = jiffies;
+ hp100_ints_on();
+
+ dev_kfree_skb( skb, FREE_WRITE );
+
+#ifdef HP100_DEBUG_TX
+ printk( "hp100_start_xmit: end\n" );
+#endif
+
+ return 0;
+}
+
+/*
+ * receive - called from interrupt handler
+ */
+
+static void hp100_rx( struct device *dev )
+{
+ int packets, pkt_len;
+ int ioaddr = dev -> base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev -> priv;
+ u_int header;
+ struct sk_buff *skb;
+
+#if 0
+ if ( lp -> lan_type < 0 )
+ {
+ if ( ( lp -> lan_type = hp100_sense_lan( dev ) ) == HP100_LAN_100 )
+ lp -> hub_status = hp100_login_to_vg_hub( dev );
+ hp100_page( PERFORMANCE );
+ }
+#endif
+
+ packets = hp100_inb( RX_PKT_CNT );
+#ifdef HP100_DEBUG
+ if ( packets > 1 )
+ printk( "hp100_rx: waiting packets = %d\n", packets );
+#endif
+ while ( packets-- > 0 )
+ {
+ for ( pkt_len = 0; pkt_len < 6000 && ( hp100_inw( OPTION_MSW ) & HP100_ADV_NXT_PKT ); pkt_len++ )
+ {
+#ifdef HP100_DEBUG_TX
+ printk( "hp100_rx: busy, remaining packets = %d\n", packets );
+#endif
+ }
+ if ( lp -> mem_mapped )
+ {
+ if ( lp -> mem_ptr_virt )
+ header = *(__u32 *)lp -> mem_ptr_virt;
+ else
+ header = readl( lp -> mem_ptr_phys );
+ }
+ else
+ header = hp100_inl( DATA32 );
+ pkt_len = header & HP100_PKT_LEN_MASK;
+#ifdef HP100_DEBUG_RX
+ printk( "hp100_rx: new packet - length = %d, errors = 0x%x, dest = 0x%x\n",
+ header & HP100_PKT_LEN_MASK, ( header >> 16 ) & 0xfff8, ( header >> 16 ) & 7 );
+#endif
+ /*
+ * NOTE! This (and the skb_put() below) depends on the skb-functions
+ * allocating more than asked (notably, aligning the request up to
+ * the next 16-byte length).
+ */
+ skb = dev_alloc_skb( pkt_len );
+ if ( skb == NULL )
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100_rx: couldn't allocate a sk_buff of size %d\n", pkt_len );
+#endif
+ lp -> stats.rx_dropped++;
+ }
+ else
+ {
+ u_char *ptr;
+
+ skb -> dev = dev;
+ ptr = (u_char *)skb_put( skb, pkt_len );
+ if ( lp -> mem_mapped )
+ {
+ if ( lp -> mem_ptr_virt )
+ memcpy( ptr, lp -> mem_ptr_virt, ( pkt_len + 3 ) & ~3 );
+ else
+ memcpy_fromio( ptr, lp -> mem_ptr_phys, ( pkt_len + 3 ) & ~3 );
+ }
+ else
+ insl( ioaddr + HP100_REG_DATA32, ptr, ( pkt_len + 3 ) >> 2 );
+ skb -> protocol = eth_type_trans( skb, dev );
+ netif_rx( skb );
+ lp -> stats.rx_packets++;
+#ifdef HP100_DEBUG_RX
+ printk( "rx: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ ptr[ 0 ], ptr[ 1 ], ptr[ 2 ], ptr[ 3 ], ptr[ 4 ], ptr[ 5 ],
+ ptr[ 6 ], ptr[ 7 ], ptr[ 8 ], ptr[ 9 ], ptr[ 10 ], ptr[ 11 ] );
+#endif
+ }
+ hp100_outw( HP100_ADV_NXT_PKT | HP100_SET_LB, OPTION_MSW );
+ switch ( header & 0x00070000 ) {
+ case (HP100_MULTI_ADDR_HASH<<16):
+ case (HP100_MULTI_ADDR_NO_HASH<<16):
+ lp -> stats.multicast++; break;
+ }
+ }
+#ifdef HP100_DEBUG_RX
+ printk( "hp100_rx: end\n" );
+#endif
+}
+
+/*
+ * statistics
+ */
+
+static struct enet_statistics *hp100_get_stats( struct device *dev )
+{
+ int ioaddr = dev -> base_addr;
+
+ hp100_ints_off();
+ hp100_update_stats( dev );
+ hp100_ints_on();
+ return &((struct hp100_private *)dev -> priv) -> stats;
+}
+
+static void hp100_update_stats( struct device *dev )
+{
+ int ioaddr = dev -> base_addr;
+ u_short val;
+ struct hp100_private *lp = (struct hp100_private *)dev -> priv;
+
+ hp100_page( MAC_CTRL ); /* get all statistics bytes */
+ val = hp100_inw( DROPPED ) & 0x0fff;
+ lp -> stats.rx_errors += val;
+ lp -> stats.rx_over_errors += val;
+ val = hp100_inb( CRC );
+ lp -> stats.rx_errors += val;
+ lp -> stats.rx_crc_errors += val;
+ val = hp100_inb( ABORT );
+ lp -> stats.tx_errors += val;
+ lp -> stats.tx_aborted_errors += val;
+ hp100_page( PERFORMANCE );
+}
+
+static void hp100_clear_stats( int ioaddr )
+{
+ cli();
+ hp100_page( MAC_CTRL ); /* get all statistics bytes */
+ hp100_inw( DROPPED );
+ hp100_inb( CRC );
+ hp100_inb( ABORT );
+ hp100_page( PERFORMANCE );
+ sti();
+}
+
+/*
+ * multicast setup
+ */
+
+/*
+ * Set or clear the multicast filter for this adapter.
+ */
+
+static void hp100_set_multicast_list( struct device *dev)
+{
+ int ioaddr = dev -> base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev -> priv;
+
+#ifdef HP100_DEBUG_MULTI
+ printk( "hp100_set_multicast_list: num_addrs = %d\n", dev->mc_count);
+#endif
+ cli();
+ hp100_ints_off();
+ hp100_page( MAC_CTRL );
+ hp100_andb( ~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1 ); /* stop rx/tx */
+
+ if ( dev->flags&IFF_PROMISC)
+ {
+ lp -> mac2_mode = HP100_MAC2MODE6; /* promiscuous mode, all good */
+ lp -> mac1_mode = HP100_MAC1MODE6; /* packets on the net */
+ }
+ else
+ if ( dev->mc_count || dev->flags&IFF_ALLMULTI )
+ {
+ lp -> mac2_mode = HP100_MAC2MODE5; /* multicast mode, packets for me */
+ lp -> mac1_mode = HP100_MAC1MODE5; /* broadcasts and all multicasts */
+ }
+ else
+ {
+ lp -> mac2_mode = HP100_MAC2MODE3; /* normal mode, packets for me */
+ lp -> mac1_mode = HP100_MAC1MODE3; /* and broadcasts */
+ }
+
+ hp100_outb( lp -> mac2_mode, MAC_CFG_2 );
+ hp100_andb( HP100_MAC1MODEMASK, MAC_CFG_1 );
+ hp100_orb( lp -> mac1_mode |
+ HP100_RX_EN | HP100_RX_IDLE | /* enable rx */
+ HP100_TX_EN | HP100_TX_IDLE, MAC_CFG_1 ); /* enable tx */
+ hp100_page( PERFORMANCE );
+ hp100_ints_on();
+ sti();
+}
+
+/*
+ * hardware interrupt handling
+ */
+
+static void hp100_interrupt( int irq, struct pt_regs *regs )
+{
+ struct device *dev = (struct device *)irq2dev_map[ irq ];
+ struct hp100_private *lp;
+ int ioaddr;
+ u_short val;
+
+ if ( dev == NULL ) return;
+ ioaddr = dev -> base_addr;
+ if ( dev -> interrupt )
+ printk( "%s: re-entering the interrupt handler\n", dev -> name );
+ hp100_ints_off();
+ dev -> interrupt = 1;
+ hp100_page( PERFORMANCE );
+ val = hp100_inw( IRQ_STATUS );
+#ifdef HP100_DEBUG_IRQ
+ printk( "hp100_interrupt: irq_status = 0x%x\n", val );
+#endif
+ if ( val & HP100_RX_PACKET )
+ {
+ hp100_rx( dev );
+ hp100_outw( HP100_RX_PACKET, IRQ_STATUS );
+ }
+ if ( val & (HP100_TX_SPACE_AVAIL | HP100_TX_COMPLETE) )
+ {
+ hp100_outw( val & (HP100_TX_SPACE_AVAIL | HP100_TX_COMPLETE), IRQ_STATUS );
+ }
+ if ( val & ( HP100_TX_ERROR | HP100_RX_ERROR ) )
+ {
+ lp = (struct hp100_private *)dev -> priv;
+ hp100_update_stats( dev );
+ hp100_outw( val & (HP100_TX_ERROR | HP100_RX_ERROR), IRQ_STATUS );
+ }
+#ifdef HP100_DEBUG_IRQ
+ printk( "hp100_interrupt: end\n" );
+#endif
+ dev -> interrupt = 0;
+ hp100_ints_on();
+}
+
+/*
+ * some misc functions
+ */
+
+static void hp100_start_interface( struct device *dev )
+{
+ int ioaddr = dev -> base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev -> priv;
+
+ cli();
+ hp100_unreset_card();
+ hp100_page( MAC_CTRL );
+ hp100_outb( lp -> mac2_mode, MAC_CFG_2 );
+ hp100_andb( HP100_MAC1MODEMASK, MAC_CFG_1 );
+ hp100_orb( lp -> mac1_mode |
+ HP100_RX_EN | HP100_RX_IDLE |
+ HP100_TX_EN | HP100_TX_IDLE, MAC_CFG_1 );
+ hp100_page( PERFORMANCE );
+ hp100_outw( HP100_INT_EN | HP100_SET_LB, OPTION_LSW );
+ hp100_outw( HP100_TRI_INT | HP100_RESET_HB, OPTION_LSW );
+ if ( lp -> mem_mapped )
+ {
+ /* enable memory mapping */
+ hp100_outw( HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW );
+ }
+ sti();
+}
+
+static void hp100_stop_interface( struct device *dev )
+{
+ int ioaddr = dev -> base_addr;
+ u_short val;
+
+ hp100_outw( HP100_INT_EN | HP100_RESET_LB |
+ HP100_TRI_INT | HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW );
+ val = hp100_inw( OPTION_LSW );
+ hp100_page( HW_MAP );
+ hp100_andb( HP100_BM_SLAVE, BM );
+ hp100_page( MAC_CTRL );
+ hp100_andb( ~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1 );
+ if ( !(val & HP100_HW_RST) ) return;
+ for ( val = 0; val < 6000; val++ )
+ if ( ( hp100_inb( MAC_CFG_1 ) & (HP100_TX_IDLE | HP100_RX_IDLE) ) ==
+ (HP100_TX_IDLE | HP100_RX_IDLE) )
+ return;
+ printk( "%s: hp100_stop_interface - timeout\n", dev -> name );
+}
+
+static void hp100_load_eeprom( struct device *dev )
+{
+ int i;
+ int ioaddr = dev -> base_addr;
+
+ hp100_page( EEPROM_CTRL );
+ hp100_andw( ~HP100_EEPROM_LOAD, EEPROM_CTRL );
+ hp100_orw( HP100_EEPROM_LOAD, EEPROM_CTRL );
+ for ( i = 0; i < 6000; i++ )
+ if ( !( hp100_inw( OPTION_MSW ) & HP100_EE_LOAD ) ) return;
+ printk( "%s: hp100_load_eeprom - timeout\n", dev -> name );
+}
+
+/* return values: LAN_10, LAN_100 or LAN_ERR (not connected or hub is down)... */
+
+static int hp100_sense_lan( struct device *dev )
+{
+ int i;
+ int ioaddr = dev -> base_addr;
+ u_short val_VG, val_10;
+ struct hp100_private *lp = (struct hp100_private *)dev -> priv;
+
+ hp100_page( MAC_CTRL );
+ hp100_orw( HP100_VG_RESET, LAN_CFG_VG );
+ val_10 = hp100_inw( LAN_CFG_10 );
+ val_VG = hp100_inw( LAN_CFG_VG );
+#ifdef HP100_DEBUG_SENSE
+ printk( "hp100_sense_lan: val_VG = 0x%04x, val_10 = 0x%04x\n", val_VG, val_10 );
+#endif
+ if ( val_10 & HP100_LINK_BEAT_ST ) return HP100_LAN_10;
+ if ( lp -> id -> id == 0x02019F022 ) /* HP J27248B doesn't have 100Mb/s interface */
+ return HP100_LAN_ERR;
+ for ( i = 0; i < 2500; i++ )
+ {
+ val_VG = hp100_inw( LAN_CFG_VG );
+ if ( val_VG & HP100_LINK_CABLE_ST ) return HP100_LAN_100;
+ }
+ return HP100_LAN_ERR;
+}
+
+static int hp100_down_vg_link( struct device *dev )
+{
+ int ioaddr = dev -> base_addr;
+ unsigned long time;
+ int i;
+
+ hp100_page( MAC_CTRL );
+ for ( i = 2500; i > 0; i-- )
+ if ( hp100_inw( LAN_CFG_VG ) & HP100_LINK_CABLE_ST ) break;
+ if ( i <= 0 ) /* not signal - not logout */
+ return 0;
+ hp100_andw( ~HP100_LINK_CMD, LAN_CFG_VG );
+ time = jiffies + 10;
+ while ( time > jiffies )
+ if ( !( hp100_inw( LAN_CFG_VG ) & ( HP100_LINK_UP_ST |
+ HP100_LINK_CABLE_ST |
+ HP100_LINK_GOOD_ST ) ) )
+ return 0;
+#ifdef HP100_DEBUG
+ printk( "hp100_down_vg_link: timeout\n" );
+#endif
+ return -EIO;
+}
+
+static int hp100_login_to_vg_hub( struct device *dev )
+{
+ int i;
+ int ioaddr = dev -> base_addr;
+ u_short val;
+ unsigned long time;
+
+ hp100_page( MAC_CTRL );
+ hp100_orw( HP100_VG_RESET, LAN_CFG_VG );
+ time = jiffies + ( HZ / 2 );
+ do {
+ if ( hp100_inw( LAN_CFG_VG ) & HP100_LINK_CABLE_ST ) break;
+ } while ( time > jiffies );
+ if ( time <= jiffies )
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100_login_to_vg_hub: timeout for link\n" );
+#endif
+ return -EIO;
+ }
+
+ if ( hp100_down_vg_link( dev ) < 0 ) /* if fail, try reset VG link */
+ {
+ hp100_andw( ~HP100_VG_RESET, LAN_CFG_VG );
+ hp100_orw( HP100_VG_RESET, LAN_CFG_VG );
+ }
+ /* bring up link */
+ hp100_orw( HP100_LOAD_ADDR | HP100_LINK_CMD, LAN_CFG_VG );
+ for ( i = 2500; i > 0; i-- )
+ if ( hp100_inw( LAN_CFG_VG ) & HP100_LINK_CABLE_ST ) break;
+ if ( i <= 0 )
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100_login_to_vg_hub: timeout for link (bring up)\n" );
+#endif
+ goto down_link;
+ }
+
+ time = jiffies + ( HZ / 2 );
+ do {
+ val = hp100_inw( LAN_CFG_VG );
+ if ( ( val & ( HP100_LINK_UP_ST | HP100_LINK_GOOD_ST ) ) ==
+ ( HP100_LINK_UP_ST | HP100_LINK_GOOD_ST ) )
+ return 0; /* success */
+ } while ( time > jiffies );
+ if ( val & HP100_LINK_GOOD_ST )
+ printk( "%s: 100Mb cable training failed, check cable.\n", dev -> name );
+ else
+ printk( "%s: 100Mb node not accepted by hub, check frame type or security.\n", dev -> name );
+
+down_link:
+ hp100_down_vg_link( dev );
+ hp100_page( MAC_CTRL );
+ hp100_andw( ~( HP100_LOAD_ADDR | HP100_PROM_MODE ), LAN_CFG_VG );
+ hp100_orw( HP100_LINK_CMD, LAN_CFG_VG );
+ return -EIO;
+}
+
+/*
+ * module section
+ */
+
+#ifdef MODULE
+
+static int hp100_port = -1;
+
+static char devicename[9] = { 0, };
+static struct device dev_hp100 = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, hp100_probe
+};
+
+int init_module( void )
+{
+ if (hp100_port == 0 && !EISA_bus)
+ printk("HP100: You should not use auto-probing with insmod!\n");
+ if ( hp100_port > 0 )
+ dev_hp100.base_addr = hp100_port;
+ if ( register_netdev( &dev_hp100 ) != 0 )
+ return -EIO;
+ return 0;
+}
+
+void cleanup_module( void )
+{
+ unregister_netdev( &dev_hp100 );
+ release_region( dev_hp100.base_addr, HP100_REGION_SIZE );
+ if ( ((struct hp100_private *)dev_hp100.priv) -> mem_ptr_virt )
+ vfree( ((struct hp100_private *)dev_hp100.priv) -> mem_ptr_virt );
+ kfree_s( dev_hp100.priv, sizeof( struct hp100_private ) );
+ dev_hp100.priv = NULL;
+}
+
+#endif
diff --git a/i386/i386at/gpl/linux/net/hp100.h b/i386/i386at/gpl/linux/net/hp100.h
new file mode 100644
index 00000000..9f14f95a
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/hp100.h
@@ -0,0 +1,374 @@
+/*
+ * hp100.h: Hewlett Packard HP10/100VG ANY LAN ethernet driver for Linux.
+ *
+ * Author: Jaroslav Kysela, <perex@pf.jcu.cz>
+ *
+ * Header file...
+ *
+ * This driver is based on the 'hpfepkt' crynwr packet driver.
+ *
+ * This source/code is public free; you can distribute it and/or modify
+ * it under terms of the GNU General Public License (published by the
+ * Free Software Foundation) either version two of this License, or any
+ * later version.
+ */
+
+/****************************************************************************
+ * Hardware Constants
+ ****************************************************************************/
+
+/*
+ * ATT2MD01 Register Page Constants
+ */
+
+#define HP100_PAGE_PERFORMANCE 0x0 /* Page 0 */
+#define HP100_PAGE_MAC_ADDRESS 0x1 /* Page 1 */
+#define HP100_PAGE_HW_MAP 0x2 /* Page 2 */
+#define HP100_PAGE_EEPROM_CTRL 0x3 /* Page 3 */
+#define HP100_PAGE_MAC_CTRL 0x4 /* Page 4 */
+#define HP100_PAGE_MMU_CFG 0x5 /* Page 5 */
+#define HP100_PAGE_ID_MAC_ADDR 0x6 /* Page 6 */
+#define HP100_PAGE_MMU_POINTER 0x7 /* Page 7 */
+
+/*
+ * ATT2MD01 Register Addresses
+ */
+
+/* Present on all pages */
+
+#define HP100_REG_HW_ID 0x00 /* R: (16) Unique card ID */
+#define HP100_REG_TRACE 0x00 /* W: (16) Used for debug output */
+#define HP100_REG_PAGING 0x02 /* R: (16),15:4 Card ID */
+ /* W: (16),3:0 Switch pages */
+#define HP100_REG_OPTION_LSW 0x04 /* RW: (16) Select card functions */
+#define HP100_REG_OPTION_MSW 0x06 /* RW: (16) Select card functions */
+
+/* Page 0 - Performance */
+
+#define HP100_REG_IRQ_STATUS 0x08 /* RW: (16) Which ints are pending */
+#define HP100_REG_IRQ_MASK 0x0a /* RW: (16) Select ints to allow */
+#define HP100_REG_FRAGMENT_LEN 0x0c /* RW: (16)12:0 Current fragment len */
+#define HP100_REG_OFFSET 0x0e /* RW: (16)12:0 Offset to start read */
+#define HP100_REG_DATA32 0x10 /* RW: (32) I/O mode data port */
+#define HP100_REG_DATA16 0x12 /* RW: WORDs must be read from here */
+#define HP100_REG_TX_MEM_FREE 0x14 /* RD: (32) Amount of free Tx mem */
+#define HP100_REG_RX_PKT_CNT 0x18 /* RD: (8) Rx count of pkts on card */
+#define HP100_REG_TX_PKT_CNT 0x19 /* RD: (8) Tx count of pkts on card */
+
+/* Page 1 - MAC Address/Hash Table */
+
+#define HP100_REG_MAC_ADDR 0x08 /* RW: (8) Cards MAC address */
+#define HP100_REG_HASH_BYTE0 0x10 /* RW: (8) Cards multicast filter */
+
+/* Page 2 - Hardware Mapping */
+
+#define HP100_REG_MEM_MAP_LSW 0x08 /* RW: (16) LSW of cards mem addr */
+#define HP100_REG_MEM_MAP_MSW 0x0a /* RW: (16) MSW of cards mem addr */
+#define HP100_REG_IO_MAP 0x0c /* RW: (8) Cards I/O address */
+#define HP100_REG_IRQ_CHANNEL 0x0d /* RW: (8) IRQ and edge/level int */
+#define HP100_REG_SRAM 0x0e /* RW: (8) How much RAM on card */
+#define HP100_REG_BM 0x0f /* RW: (8) Controls BM functions */
+
+/* Page 3 - EEPROM/Boot ROM */
+
+#define HP100_REG_EEPROM_CTRL 0x08 /* RW: (16) Used to load EEPROM */
+
+/* Page 4 - LAN Configuration */
+
+#define HP100_REG_LAN_CFG_10 0x08 /* RW: (16) Set 10M XCVR functions */
+#define HP100_REG_LAN_CFG_VG 0x0a /* RW: (16) Set 100M XCVR functions */
+#define HP100_REG_MAC_CFG_1 0x0c /* RW: (8) Types of pkts to accept */
+#define HP100_REG_MAC_CFG_2 0x0d /* RW: (8) Misc MAC functions */
+/* The follow clear when read: */
+#define HP100_REG_DROPPED 0x10 /* R: (16),11:0 Pkts cant fit in mem*/
+#define HP100_REG_CRC 0x12 /* R: (8) Pkts with CRC */
+#define HP100_REG_ABORT 0x13 /* R: (8) Aborted Tx pkts */
+
+/* Page 5 - MMU */
+
+#define HP100_REG_RX_MEM_STOP 0x0c /* RW: (16) End of Rx ring addr */
+#define HP100_REG_TX_MEM_STOP 0x0e /* RW: (16) End of Tx ring addr */
+
+/* Page 6 - Card ID/Physical LAN Address */
+
+#define HP100_REG_BOARD_ID 0x08 /* R: (8) EISA/ISA card ID */
+#define HP100_REG_BOARD_IO_CHCK 0x0c /* R: (8) Added to ID to get FFh */
+#define HP100_REG_SOFT_MODEL 0x0d /* R: (8) Config program defined */
+#define HP100_REG_LAN_ADDR 0x10 /* R: (8) MAC addr of card */
+#define HP100_REG_LAN_ADDR_CHCK 0x16 /* R: (8) Added to addr to get FFh */
+
+/* Page 7 - MMU Current Pointers */
+
+#define HP100_REG_RX_MEM_BR 0x08 /* R: (16) Current begin of Rx ring */
+#define HP100_REG_RX_MEM_ER 0x0a /* R: (16) Current end of Rx ring */
+#define HP100_REG_TX_MEM_BR 0x0c /* R: (16) Current begin of Tx ring */
+#define HP100_REG_TX_MEM_ER 0x0e /* R: (16) Current end of Rx ring */
+#define HP100_REG_MEM_DEBUG 0x1a /* RW: (16) Used for memory tests */
+
+/*
+ * HardwareIDReg bits/masks
+ */
+
+#define HP100_HW_ID_0 0x50 /* Hardware ID bytes. */
+#define HP100_HW_ID_1 0x48
+#define HP100_HW_ID_2_REVA 0x50 /* Rev. A ID. NOTE: lower nibble not used */
+#define HP100_HW_ID_3 0x53
+
+/*
+ * OptionLSWReg bits/masks
+ */
+
+#define HP100_DEBUG_EN 0x8000 /* 0:Disable, 1:Enable Debug Dump Pointer */
+#define HP100_RX_HDR 0x4000 /* 0:Disable, 1:Enable putting pkt into */
+ /* system memory before Rx interrupt */
+#define HP100_MMAP_DIS 0x2000 /* 0:Enable, 1:Disable memory mapping. */
+ /* MMAP_DIS must be 0 and MEM_EN must */
+ /* be 1 for memory-mapped mode to be */
+ /* enabled */
+#define HP100_EE_EN 0x1000 /* 0:Disable,1:Enable EEPROM writing */
+#define HP100_BM_WRITE 0x0800 /* 0:Slave, 1:Bus Master for Tx data */
+#define HP100_BM_READ 0x0400 /* 0:Slave, 1:Bus Master for Rx data */
+#define HP100_TRI_INT 0x0200 /* 0:Dont, 1:Do tri-state the int */
+#define HP100_MEM_EN 0x0040 /* Config program set this to */
+ /* 0:Disable, 1:Enable mem map. */
+ /* See MMAP_DIS. */
+#define HP100_IO_EN 0x0020 /* 0:Disable, 1:Enable I/O transfers */
+#define HP100_BOOT_EN 0x0010 /* 0:Disable, 1:Enable boot ROM access */
+#define HP100_FAKE_INT 0x0008 /* 0:No int, 1:int */
+#define HP100_INT_EN 0x0004 /* 0:Disable, 1:Enable ints from card */
+#define HP100_HW_RST 0x0002 /* 0:Reset, 1:Out of reset */
+
+/*
+ * OptionMSWReg bits/masks
+ */
+#define HP100_PRIORITY_TX 0x0080 /* 0:Don't, 1:Do all Tx pkts as priority */
+#define HP100_EE_LOAD 0x0040 /* 1:EEPROM loading, 0 when done */
+#define HP100_ADV_NXT_PKT 0x0004 /* 1:Advance to next pkt in Rx queue, */
+ /* h/w will set to 0 when done */
+#define HP100_TX_CMD 0x0002 /* 1:Tell h/w download done, h/w will set */
+ /* to 0 when done */
+
+/*
+ * InterruptStatusReg/InterruptMaskReg bits/masks. These bits will 0 when a 1
+ * is written to them.
+ */
+#define HP100_RX_PACKET 0x0400 /* 0:No, 1:Yes pkt has been Rx */
+#define HP100_RX_ERROR 0x0200 /* 0:No, 1:Yes Rx pkt had error */
+#define HP100_TX_SPACE_AVAIL 0x0010 /* 0:<8192, 1:>=8192 Tx free bytes */
+#define HP100_TX_COMPLETE 0x0008 /* 0:No, 1:Yes a Tx has completed */
+#define HP100_TX_ERROR 0x0002 /* 0:No, 1:Yes Tx pkt had error */
+
+/*
+ * TxMemoryFreeCountReg bits/masks.
+ */
+#define HP100_AUTO_COMPARE 0x8000 /* Says at least 8k is available for Tx. */
+ /* NOTE: This mask is for the upper */
+ /* word of the register. */
+
+/*
+ * IRQChannelReg bits/masks.
+ */
+#define HP100_ZERO_WAIT_EN 0x80 /* 0:No, 1:Yes assers NOWS signal */
+#define HP100_LEVEL_IRQ 0x10 /* 0:Edge, 1:Level type interrupts. */
+ /* Only valid on EISA cards. */
+#define HP100_IRQ_MASK 0x0F /* Isolate the IRQ bits */
+
+/*
+ * SRAMReg bits/masks.
+ */
+#define HP100_RAM_SIZE_MASK 0xe0 /* AND to get SRAM size index */
+#define HP100_RAM_SIZE_SHIFT 0x05 /* Shift count to put index in lower bits */
+
+/*
+ * BMReg bits/masks.
+ */
+#define HP100_BM_SLAVE 0x04 /* 0:Slave, 1:BM mode */
+
+/*
+ * EEPROMControlReg bits/masks.
+ */
+#define HP100_EEPROM_LOAD 0x0001 /* 0->1 loads the EEPROM into registers. */
+ /* When it goes back to 0, load is */
+ /* complete. This should take ~600us. */
+
+/*
+ * LANCntrCfg10Reg bits/masks.
+ */
+#define HP100_SQU_ST 0x0100 /* 0:No, 1:Yes collision signal sent */
+ /* after Tx. Only used for AUI. */
+#define HP100_MAC10_SEL 0x00c0 /* Get bits to indicate MAC */
+#define HP100_AUI_SEL 0x0020 /* Status of AUI selection */
+#define HP100_LOW_TH 0x0010 /* 0:No, 1:Yes allow better cabling */
+#define HP100_LINK_BEAT_DIS 0x0008 /* 0:Enable, 1:Disable link beat */
+#define HP100_LINK_BEAT_ST 0x0004 /* 0:No, 1:Yes link beat being Rx */
+#define HP100_R_ROL_ST 0x0002 /* 0:No, 1:Yes Rx twisted pair has been */
+ /* reversed */
+#define HP100_AUI_ST 0x0001 /* 0:No, 1:Yes use AUI on TP card */
+
+/* MAC Selection, use with MAC10_SEL bits */
+#define HP100_AUTO_SEL_10 0x0 /* Auto select */
+#define HP100_XCVR_LXT901_10 0x1 /* LXT901 10BaseT transceiver */
+#define HP100_XCVR_7213 0x2 /* 7213 transceiver */
+#define HP100_XCVR_82503 0x3 /* 82503 transceiver */
+
+
+/*
+ * LANCntrCfgVGReg bits/masks.
+ */
+#define HP100_FRAME_FORMAT 0x0800 /* 0:802.3, 1:802.5 frames */
+#define HP100_BRIDGE 0x0400 /* 0:No, 1:Yes tell hub it's a bridge */
+#define HP100_PROM_MODE 0x0200 /* 0:No, 1:Yes tell hub card is */
+ /* promiscuous */
+#define HP100_REPEATER 0x0100 /* 0:No, 1:Yes tell hub MAC wants to be */
+ /* a cascaded repeater */
+#define HP100_MAC100_SEL 0x0080 /* 0:No, 1:Yes use 100 Mbit MAC */
+#define HP100_LINK_UP_ST 0x0040 /* 0:No, 1:Yes endnode logged in */
+#define HP100_LINK_CABLE_ST 0x0020 /* 0:No, 1:Yes cable can hear tones from */
+ /* hub */
+#define HP100_LOAD_ADDR 0x0010 /* 0->1 card addr will be sent to hub. */
+ /* 100ms later the link status bits are */
+ /* valid */
+#define HP100_LINK_CMD 0x0008 /* 0->1 link will attempt to log in. */
+ /* 100ms later the link status bits are */
+ /* valid */
+#define HP100_LINK_GOOD_ST 0x0002 /* 0:No, 1:Yes cable passed training */
+#define HP100_VG_RESET 0x0001 /* 0:Yes, 1:No reset the 100VG MAC */
+
+
+/*
+ * MACConfiguration1Reg bits/masks.
+ */
+#define HP100_RX_IDLE 0x80 /* 0:Yes, 1:No currently receiving pkts */
+#define HP100_TX_IDLE 0x40 /* 0:Yes, 1:No currently Txing pkts */
+#define HP100_RX_EN 0x20 /* 0:No, 1:Yes allow receiving of pkts */
+#define HP100_TX_EN 0x10 /* 0:No, 1:Yes allow transmiting of pkts */
+#define HP100_ACC_ERRORED 0x08 /* 0:No, 1:Yes allow Rx of errored pkts */
+#define HP100_ACC_MC 0x04 /* 0:No, 1:Yes allow Rx of multicast pkts */
+#define HP100_ACC_BC 0x02 /* 0:No, 1:Yes allow Rx of broadcast pkts */
+#define HP100_ACC_PHY 0x01 /* 0:No, 1:Yes allow Rx of ALL physical pkts */
+
+#define HP100_MAC1MODEMASK 0xf0 /* Hide ACC bits */
+#define HP100_MAC1MODE1 0x00 /* Receive nothing, must also disable RX */
+#define HP100_MAC1MODE2 0x00
+#define HP100_MAC1MODE3 HP100_MAC1MODE2 | HP100_ACC_BC
+#define HP100_MAC1MODE4 HP100_MAC1MODE3 | HP100_ACC_MC
+#define HP100_MAC1MODE5 HP100_MAC1MODE4 /* set mc hash to all ones also */
+#define HP100_MAC1MODE6 HP100_MAC1MODE5 | HP100_ACC_PHY /* Promiscuous */
+
+/* Note MODE6 will receive all GOOD packets on the LAN. This really needs
+ a mode 7 defined to be LAN Analyzer mode, which will receive errored and
+ runt packets, and keep the CRC bytes. */
+
+#define HP100_MAC1MODE7 MAC1MODE6 OR ACC_ERRORED
+
+/*
+ * MACConfiguration2Reg bits/masks.
+ */
+#define HP100_TR_MODE 0x80 /* 0:No, 1:Yes support Token Ring formats */
+#define HP100_TX_SAME 0x40 /* 0:No, 1:Yes Tx same packet continuous */
+#define HP100_LBK_XCVR 0x20 /* 0:No, 1:Yes loopback through MAC & */
+ /* transceiver */
+#define HP100_LBK_MAC 0x10 /* 0:No, 1:Yes loopback through MAC */
+#define HP100_CRC_I 0x08 /* 0:No, 1:Yes inhibit CRC on Tx packets */
+#define HP100_KEEP_CRC 0x02 /* 0:No, 1:Yes keep CRC on Rx packets. */
+ /* The length will reflect this. */
+
+#define HP100_MAC2MODEMASK 0x02
+#define HP100_MAC2MODE1 0x00
+#define HP100_MAC2MODE2 0x00
+#define HP100_MAC2MODE3 0x00
+#define HP100_MAC2MODE4 0x00
+#define HP100_MAC2MODE5 0x00
+#define HP100_MAC2MODE6 0x00
+#define HP100_MAC2MODE7 KEEP_CRC
+
+/*
+ * Set/Reset bits
+ */
+#define HP100_SET_HB 0x0100 /* 0:Set fields to 0 whose mask is 1 */
+#define HP100_SET_LB 0x0001 /* HB sets upper byte, LB sets lower byte */
+#define HP100_RESET_HB 0x0000 /* For readability when resetting bits */
+#define HP100_RESET_LB 0x0000 /* For readability when resetting bits */
+
+/*
+ * Misc. Constants
+ */
+#define HP100_LAN_100 100 /* lan_type value for VG */
+#define HP100_LAN_10 10 /* lan_type value for 10BaseT */
+#define HP100_LAN_ERR (-1) /* lan_type value for link down */
+
+/*
+ * Receive Header Definition.
+ */
+
+struct hp100_rx_header {
+ u_short rx_length; /* Pkt length is bits 12:0 */
+ u_short rx_status; /* status of the packet */
+};
+
+#define HP100_PKT_LEN_MASK 0x1FFF /* AND with RxLength to get length bits */
+
+/* Receive Packet Status. Note, the error bits are only valid if ACC_ERRORED
+ bit in the MAC Configuration Register 1 is set. */
+
+#define HP100_RX_PRI 0x8000 /* 0:No, 1:Yes packet is priority */
+#define HP100_SDF_ERR 0x4000 /* 0:No, 1:Yes start of frame error */
+#define HP100_SKEW_ERR 0x2000 /* 0:No, 1:Yes skew out of range */
+#define HP100_BAD_SYMBOL_ERR 0x1000 /* 0:No, 1:Yes invalid symbol received */
+#define HP100_RCV_IPM_ERR 0x0800 /* 0:No, 1:Yes pkt had an invalid packet */
+ /* marker */
+#define HP100_SYMBOL_BAL_ERR 0x0400 /* 0:No, 1:Yes symbol balance error */
+#define HP100_VG_ALN_ERR 0x0200 /* 0:No, 1:Yes non-octet received */
+#define HP100_TRUNC_ERR 0x0100 /* 0:No, 1:Yes the packet was truncated */
+#define HP100_RUNT_ERR 0x0040 /* 0:No, 1:Yes pkt length < Min Pkt */
+ /* Length Reg. */
+#define HP100_ALN_ERR 0x0010 /* 0:No, 1:Yes align error. */
+#define HP100_CRC_ERR 0x0008 /* 0:No, 1:Yes CRC occurred. */
+
+/* The last three bits indicate the type of destination address */
+
+#define HP100_MULTI_ADDR_HASH 0x0006 /* 110: Addr multicast, matched hash */
+#define HP100_BROADCAST_ADDR 0x0003 /* x11: Addr broadcast */
+#define HP100_MULTI_ADDR_NO_HASH 0x0002 /* 010: Addr multicast, didn't match hash */
+#define HP100_PHYS_ADDR_MATCH 0x0001 /* x01: Addr was physical and mine */
+#define HP100_PHYS_ADDR_NO_MATCH 0x0000 /* x00: Addr was physical but not mine */
+
+/*
+ * macros
+ */
+
+#define hp100_inb( reg ) \
+ inb( ioaddr + HP100_REG_##reg )
+#define hp100_inw( reg ) \
+ inw( ioaddr + HP100_REG_##reg )
+#define hp100_inl( reg ) \
+ inl( ioaddr + HP100_REG_##reg )
+#define hp100_outb( data, reg ) \
+ outb( data, ioaddr + HP100_REG_##reg )
+#define hp100_outw( data, reg ) \
+ outw( data, ioaddr + HP100_REG_##reg )
+#define hp100_outl( data, reg ) \
+ outl( data, ioaddr + HP100_REG_##reg )
+#define hp100_orb( data, reg ) \
+ outb( inb( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg )
+#define hp100_orw( data, reg ) \
+ outw( inw( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg )
+#define hp100_andb( data, reg ) \
+ outb( inb( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg )
+#define hp100_andw( data, reg ) \
+ outw( inw( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg )
+
+#define hp100_page( page ) \
+ outw( HP100_PAGE_##page, ioaddr + HP100_REG_PAGING )
+#define hp100_ints_off() \
+ outw( HP100_INT_EN | HP100_RESET_LB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_ints_on() \
+ outw( HP100_INT_EN | HP100_SET_LB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_mem_map_enable() \
+ outw( HP100_MMAP_DIS | HP100_RESET_HB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_mem_map_disable() \
+ outw( HP100_MMAP_DIS | HP100_SET_HB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_reset_card() \
+ outw( HP100_HW_RST | HP100_RESET_LB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_unreset_card() \
+ outw( HP100_HW_RST | HP100_SET_LB, ioaddr + HP100_REG_OPTION_LSW )
diff --git a/i386/i386at/gpl/linux/net/i82586.h b/i386/i386at/gpl/linux/net/i82586.h
new file mode 100644
index 00000000..ff229e98
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/i82586.h
@@ -0,0 +1,408 @@
+/*
+ * Intel 82586 IEEE 802.3 Ethernet LAN Coprocessor.
+ *
+ * See:
+ * Intel Microcommunications 1991
+ * p1-1 to p1-37
+ * Intel order No. 231658
+ * ISBN 1-55512-119-5
+ *
+ * Unfortunately, the above chapter mentions neither
+ * the System Configuration Pointer (SCP) nor the
+ * Intermediate System Configuration Pointer (ISCP),
+ * so we probably need to look elsewhere for the
+ * whole story -- some recommend the "Intel LAN
+ * Components manual" but I have neither a copy
+ * nor a full reference. But "elsewhere" may be
+ * in the same publication...
+ * The description of a later device, the
+ * "82596CA High-Performance 32-Bit Local Area Network
+ * Coprocessor", (ibid. p1-38 to p1-109) does mention
+ * the SCP and ISCP and also has an i82586 compatibility
+ * mode. Even more useful is "AP-235 An 82586 Data Link
+ * Driver" (ibid. p1-337 to p1-417).
+ */
+
+#define I82586_MEMZ (64 * 1024)
+
+#define I82586_SCP_ADDR (I82586_MEMZ - sizeof(scp_t))
+
+#define ADDR_LEN 6
+#define I82586NULL 0xFFFF
+
+#define toff(t,p,f) (unsigned short)((void *)(&((t *)((void *)0 + (p)))->f) - (void *)0)
+
+/*
+ * System Configuration Pointer (SCP).
+ */
+typedef struct scp_t scp_t;
+struct scp_t
+{
+ unsigned short scp_sysbus; /* 82586 bus width: */
+#define SCP_SY_16BBUS (0x0 << 0) /* 16 bits */
+#define SCP_SY_8BBUS (0x1 << 0) /* 8 bits. */
+ unsigned short scp_junk[2]; /* Unused */
+ unsigned short scp_iscpl; /* lower 16 bits of ISCP_ADDR */
+ unsigned short scp_iscph; /* upper 16 bits of ISCP_ADDR */
+};
+
+/*
+ * Intermediate System Configuration Pointer (ISCP).
+ */
+typedef struct iscp_t iscp_t;
+struct iscp_t
+{
+ unsigned short iscp_busy; /* set by CPU before first CA, */
+ /* cleared by 82586 after read. */
+ unsigned short iscp_offset; /* offset of SCB */
+ unsigned short iscp_basel; /* base of SCB */
+ unsigned short iscp_baseh; /* " */
+};
+
+/*
+ * System Control Block (SCB).
+ * The 82586 writes its status to scb_status and then
+ * raises an interrupt to alert the CPU.
+ * The CPU writes a command to scb_command and
+ * then issues a Channel Attention (CA) to alert the 82586.
+ */
+typedef struct scb_t scb_t;
+struct scb_t
+{
+ unsigned short scb_status; /* Status of 82586 */
+#define SCB_ST_INT (0xF << 12) /* Some of: */
+#define SCB_ST_CX (0x1 << 15) /* Cmd completed */
+#define SCB_ST_FR (0x1 << 14) /* Frame received */
+#define SCB_ST_CNA (0x1 << 13) /* Cmd unit not active */
+#define SCB_ST_RNR (0x1 << 12) /* Rcv unit not ready */
+#define SCB_ST_JUNK0 (0x1 << 11) /* 0 */
+#define SCB_ST_CUS (0x7 << 8) /* Cmd unit status */
+#define SCB_ST_CUS_IDLE (0 << 8) /* Idle */
+#define SCB_ST_CUS_SUSP (1 << 8) /* Suspended */
+#define SCB_ST_CUS_ACTV (2 << 8) /* Active */
+#define SCB_ST_JUNK1 (0x1 << 7) /* 0 */
+#define SCB_ST_RUS (0x7 << 4) /* Rcv unit status */
+#define SCB_ST_RUS_IDLE (0 << 4) /* Idle */
+#define SCB_ST_RUS_SUSP (1 << 4) /* Suspended */
+#define SCB_ST_RUS_NRES (2 << 4) /* No resources */
+#define SCB_ST_RUS_RDY (4 << 4) /* Ready */
+ unsigned short scb_command; /* Next command */
+#define SCB_CMD_ACK_CX (0x1 << 15) /* Ack cmd completion */
+#define SCB_CMD_ACK_FR (0x1 << 14) /* Ack frame received */
+#define SCB_CMD_ACK_CNA (0x1 << 13) /* Ack CU not active */
+#define SCB_CMD_ACK_RNR (0x1 << 12) /* Ack RU not ready */
+#define SCB_CMD_JUNKX (0x1 << 11) /* Unused */
+#define SCB_CMD_CUC (0x7 << 8) /* Command Unit command */
+#define SCB_CMD_CUC_NOP (0 << 8) /* Nop */
+#define SCB_CMD_CUC_GO (1 << 8) /* Start cbl_offset */
+#define SCB_CMD_CUC_RES (2 << 8) /* Resume execution */
+#define SCB_CMD_CUC_SUS (3 << 8) /* Suspend " */
+#define SCB_CMD_CUC_ABT (4 << 8) /* Abort " */
+#define SCB_CMD_RESET (0x1 << 7) /* Reset chip (hardware) */
+#define SCB_CMD_RUC (0x7 << 4) /* Receive Unit command */
+#define SCB_CMD_RUC_NOP (0 << 4) /* Nop */
+#define SCB_CMD_RUC_GO (1 << 4) /* Start rfa_offset */
+#define SCB_CMD_RUC_RES (2 << 4) /* Resume reception */
+#define SCB_CMD_RUC_SUS (3 << 4) /* Suspend " */
+#define SCB_CMD_RUC_ABT (4 << 4) /* Abort " */
+ unsigned short scb_cbl_offset; /* Offset of first command unit */
+ /* Action Command */
+ unsigned short scb_rfa_offset; /* Offset of first Receive */
+ /* Frame Descriptor in the */
+ /* Receive Frame Area */
+ unsigned short scb_crcerrs; /* Properly aligned frames */
+ /* received with a CRC error */
+ unsigned short scb_alnerrs; /* Misaligned frames received */
+ /* with a CRC error */
+ unsigned short scb_rscerrs; /* Frames lost due to no space */
+ unsigned short scb_ovrnerrs; /* Frames lost due to slow bus */
+};
+
+#define scboff(p,f) toff(scb_t, p, f)
+
+/*
+ * The eight Action Commands.
+ */
+typedef enum acmd_e acmd_e;
+enum acmd_e
+{
+ acmd_nop = 0, /* Do nothing */
+ acmd_ia_setup = 1, /* Load an (ethernet) address into the */
+ /* 82586 */
+ acmd_configure = 2, /* Update the 82586 operating parameters */
+ acmd_mc_setup = 3, /* Load a list of (ethernet) multicast */
+ /* addresses into the 82586 */
+ acmd_transmit = 4, /* Transmit a frame */
+ acmd_tdr = 5, /* Perform a Time Domain Reflectometer */
+ /* test on the serial link */
+ acmd_dump = 6, /* Copy 82586 registers to memory */
+ acmd_diagnose = 7, /* Run an internal self test */
+};
+
+/*
+ * Generic Action Command header.
+ */
+typedef struct ach_t ach_t;
+struct ach_t
+{
+ unsigned short ac_status; /* Command status: */
+#define AC_SFLD_C (0x1 << 15) /* Command completed */
+#define AC_SFLD_B (0x1 << 14) /* Busy executing */
+#define AC_SFLD_OK (0x1 << 13) /* Completed error free */
+#define AC_SFLD_A (0x1 << 12) /* Command aborted */
+#define AC_SFLD_FAIL (0x1 << 11) /* Selftest failed */
+#define AC_SFLD_S10 (0x1 << 10) /* No carrier sense */
+ /* during transmission */
+#define AC_SFLD_S9 (0x1 << 9) /* Tx unsuccessful: */
+ /* (stopped) lost CTS */
+#define AC_SFLD_S8 (0x1 << 8) /* Tx unsuccessful: */
+ /* (stopped) slow DMA */
+#define AC_SFLD_S7 (0x1 << 7) /* Tx deferred: */
+ /* other link traffic */
+#define AC_SFLD_S6 (0x1 << 6) /* Heart Beat: collision */
+ /* detect after last tx */
+#define AC_SFLD_S5 (0x1 << 5) /* Tx stopped: */
+ /* excessive collisions */
+#define AC_SFLD_MAXCOL (0xF << 0) /* Collision count */
+ unsigned short ac_command; /* Command specifier: */
+#define AC_CFLD_EL (0x1 << 15) /* End of command list */
+#define AC_CFLD_S (0x1 << 14) /* Suspend on completion */
+#define AC_CFLD_I (0x1 << 13) /* Interrupt on completion */
+#define AC_CFLD_CMD (0x7 << 0) /* acmd_e */
+ unsigned short ac_link; /* Next Action Command */
+};
+
+#define acoff(p,f) toff(ach_t, p, f)
+
+/*
+ * The Nop Action Command.
+ */
+typedef struct ac_nop_t ac_nop_t;
+struct ac_nop_t
+{
+ ach_t nop_h;
+};
+
+/*
+ * The IA-Setup Action Command.
+ */
+typedef struct ac_ias_t ac_ias_t;
+struct ac_ias_t
+{
+ ach_t ias_h;
+ unsigned char ias_addr[ADDR_LEN]; /* The (ethernet) address */
+};
+
+/*
+ * The Configure Action Command.
+ */
+typedef struct ac_cfg_t ac_cfg_t;
+struct ac_cfg_t
+{
+ ach_t cfg_h;
+ unsigned char cfg_byte_cnt; /* Size foll data: 4-12 */
+#define AC_CFG_BYTE_CNT(v) (((v) & 0xF) << 0)
+ unsigned char cfg_fifolim; /* FIFO threshold */
+#define AC_CFG_FIFOLIM(v) (((v) & 0xF) << 0)
+ unsigned char cfg_byte8;
+#define AC_CFG_SAV_BF(v) (((v) & 0x1) << 7) /* Save rxd bad frames */
+#define AC_CFG_SRDY(v) (((v) & 0x1) << 6) /* SRDY/ARDY pin means */
+ /* external sync. */
+ unsigned char cfg_byte9;
+#define AC_CFG_ELPBCK(v) (((v) & 0x1) << 7) /* External loopback */
+#define AC_CFG_ILPBCK(v) (((v) & 0x1) << 6) /* Internal loopback */
+#define AC_CFG_PRELEN(v) (((v) & 0x3) << 4) /* Preamble length */
+#define AC_CFG_PLEN_2 0 /* 2 bytes */
+#define AC_CFG_PLEN_4 1 /* 4 bytes */
+#define AC_CFG_PLEN_8 2 /* 8 bytes */
+#define AC_CFG_PLEN_16 3 /* 16 bytes */
+#define AC_CFG_ALOC(v) (((v) & 0x1) << 3) /* Addr/len data is */
+ /* explicit in buffers */
+#define AC_CFG_ADDRLEN(v) (((v) & 0x7) << 0) /* Bytes per address */
+ unsigned char cfg_byte10;
+#define AC_CFG_BOFMET(v) (((v) & 0x1) << 7) /* Use alternate expo. */
+ /* backoff method */
+#define AC_CFG_ACR(v) (((v) & 0x7) << 4) /* Accelerated cont. res. */
+#define AC_CFG_LINPRIO(v) (((v) & 0x7) << 0) /* Linear priority */
+ unsigned char cfg_ifs; /* Interframe spacing */
+ unsigned char cfg_slotl; /* Slot time (low byte) */
+ unsigned char cfg_byte13;
+#define AC_CFG_RETRYNUM(v) (((v) & 0xF) << 4) /* Max. collision retry */
+#define AC_CFG_SLTTMHI(v) (((v) & 0x7) << 0) /* Slot time (high bits) */
+ unsigned char cfg_byte14;
+#define AC_CFG_FLGPAD(v) (((v) & 0x1) << 7) /* Pad with HDLC flags */
+#define AC_CFG_BTSTF(v) (((v) & 0x1) << 6) /* Do HDLC bitstuffing */
+#define AC_CFG_CRC16(v) (((v) & 0x1) << 5) /* 16 bit CCITT CRC */
+#define AC_CFG_NCRC(v) (((v) & 0x1) << 4) /* Insert no CRC */
+#define AC_CFG_TNCRS(v) (((v) & 0x1) << 3) /* Tx even if no carrier */
+#define AC_CFG_MANCH(v) (((v) & 0x1) << 2) /* Manchester coding */
+#define AC_CFG_BCDIS(v) (((v) & 0x1) << 1) /* Disable broadcast */
+#define AC_CFG_PRM(v) (((v) & 0x1) << 0) /* Promiscuous mode */
+ unsigned char cfg_byte15;
+#define AC_CFG_ICDS(v) (((v) & 0x1) << 7) /* Internal collision */
+ /* detect source */
+#define AC_CFG_CDTF(v) (((v) & 0x7) << 4) /* Collision detect */
+ /* filter in bit times */
+#define AC_CFG_ICSS(v) (((v) & 0x1) << 3) /* Internal carrier */
+ /* sense source */
+#define AC_CFG_CSTF(v) (((v) & 0x7) << 0) /* Carrier sense */
+ /* filter in bit times */
+ unsigned short cfg_min_frm_len;
+#define AC_CFG_MNFRM(v) (((v) & 0xFF) << 0) /* Min. bytes/frame (<= 255) */
+};
+
+/*
+ * The MC-Setup Action Command.
+ */
+typedef struct ac_mcs_t ac_mcs_t;
+struct ac_mcs_t
+{
+ ach_t mcs_h;
+ unsigned short mcs_cnt; /* No. of bytes of MC addresses */
+ unsigned short mcs_data[3]; /* The first MC address .. */
+};
+
+/*
+ * The Transmit Action Command.
+ */
+typedef struct ac_tx_t ac_tx_t;
+struct ac_tx_t
+{
+ ach_t tx_h;
+ unsigned short tx_tbd_offset; /* Address of list of buffers. */
+#if 0
+Linux packets are passed down with the destination MAC address
+and length/type field already prepended to the data,
+so we do not need to insert it. Consistent with this
+we must also set the AC_CFG_ALOC(..) flag during the
+ac_cfg_t action command.
+ unsigned char tx_addr[ADDR_LEN]; /* The frame dest. address */
+ unsigned short tx_length; /* The frame length */
+#endif /* 0 */
+};
+
+/*
+ * The Time Domain Reflectometer Action Command.
+ */
+typedef struct ac_tdr_t ac_tdr_t;
+struct ac_tdr_t
+{
+ ach_t tdr_h;
+ unsigned short tdr_result; /* Result. */
+#define AC_TDR_LNK_OK (0x1 << 15) /* No link problem */
+#define AC_TDR_XCVR_PRB (0x1 << 14) /* Txcvr cable problem */
+#define AC_TDR_ET_OPN (0x1 << 13) /* Open on the link */
+#define AC_TDR_ET_SRT (0x1 << 12) /* Short on the link */
+#define AC_TDR_TIME (0x7FF << 0) /* Distance to problem */
+ /* site in transmit */
+ /* clock cycles */
+};
+
+/*
+ * The Dump Action Command.
+ */
+typedef struct ac_dmp_t ac_dmp_t;
+struct ac_dmp_t
+{
+ ach_t dmp_h;
+ unsigned short dmp_offset; /* Result. */
+};
+
+/*
+ * Size of the result of the dump command.
+ */
+#define DUMPBYTES 170
+
+/*
+ * The Diagnose Action Command.
+ */
+typedef struct ac_dgn_t ac_dgn_t;
+struct ac_dgn_t
+{
+ ach_t dgn_h;
+};
+
+/*
+ * Transmit Buffer Descriptor (TBD).
+ */
+typedef struct tbd_t tbd_t;
+struct tbd_t
+{
+ unsigned short tbd_status; /* Written by the CPU */
+#define TBD_STATUS_EOF (0x1 << 15) /* This TBD is the */
+ /* last for this frame */
+#define TBD_STATUS_ACNT (0x3FFF << 0) /* Actual count of data */
+ /* bytes in this buffer */
+ unsigned short tbd_next_bd_offset; /* Next in list */
+ unsigned short tbd_bufl; /* Buffer address (low) */
+ unsigned short tbd_bufh; /* " " (high) */
+};
+
+/*
+ * Receive Buffer Descriptor (RBD).
+ */
+typedef struct rbd_t rbd_t;
+struct rbd_t
+{
+ unsigned short rbd_status; /* Written by the 82586 */
+#define RBD_STATUS_EOF (0x1 << 15) /* This RBD is the */
+ /* last for this frame */
+#define RBD_STATUS_F (0x1 << 14) /* ACNT field is valid */
+#define RBD_STATUS_ACNT (0x3FFF << 0) /* Actual no. of data */
+ /* bytes in this buffer */
+ unsigned short rbd_next_rbd_offset; /* Next rbd in list */
+ unsigned short rbd_bufl; /* Data pointer (low) */
+ unsigned short rbd_bufh; /* " " (high) */
+ unsigned short rbd_el_size; /* EL+Data buf. size */
+#define RBD_EL (0x1 << 15) /* This BD is the */
+ /* last in the list */
+#define RBD_SIZE (0x3FFF << 0) /* No. of bytes the */
+ /* buffer can hold */
+};
+
+#define rbdoff(p,f) toff(rbd_t, p, f)
+
+/*
+ * Frame Descriptor (FD).
+ */
+typedef struct fd_t fd_t;
+struct fd_t
+{
+ unsigned short fd_status; /* Written by the 82586 */
+#define FD_STATUS_C (0x1 << 15) /* Completed storing frame */
+#define FD_STATUS_B (0x1 << 14) /* FD was consumed by RU */
+#define FD_STATUS_OK (0x1 << 13) /* Frame rxd successfully */
+#define FD_STATUS_S11 (0x1 << 11) /* CRC error */
+#define FD_STATUS_S10 (0x1 << 10) /* Alignment error */
+#define FD_STATUS_S9 (0x1 << 9) /* Ran out of resources */
+#define FD_STATUS_S8 (0x1 << 8) /* Rx DMA overrun */
+#define FD_STATUS_S7 (0x1 << 7) /* Frame too short */
+#define FD_STATUS_S6 (0x1 << 6) /* No EOF flag */
+ unsigned short fd_command; /* Command */
+#define FD_COMMAND_EL (0x1 << 15) /* Last FD in list */
+#define FD_COMMAND_S (0x1 << 14) /* Suspend RU after rx */
+ unsigned short fd_link_offset; /* Next FD */
+ unsigned short fd_rbd_offset; /* First RBD (data) */
+ /* Prepared by CPU, */
+ /* updated by 82586 */
+#if 0
+I think the rest is unused since we
+have set AC_CFG_ALOC(..). However, just
+in case, we leave the space.
+#endif /* 0 */
+ unsigned char fd_dest[ADDR_LEN]; /* Destination address */
+ /* Written by 82586 */
+ unsigned char fd_src[ADDR_LEN]; /* Source address */
+ /* Written by 82586 */
+ unsigned short fd_length; /* Frame length or type */
+ /* Written by 82586 */
+};
+
+#define fdoff(p,f) toff(fd_t, p, f)
+
+/*
+ * This software may only be used and distributed
+ * according to the terms of the GNU Public License.
+ *
+ * For more details, see wavelan.c.
+ */
diff --git a/i386/i386at/gpl/linux/net/iow.h b/i386/i386at/gpl/linux/net/iow.h
new file mode 100644
index 00000000..6e15688f
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/iow.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_IOW_H
+#define _ASM_IOW_H
+
+/* no longer used */
+
+#endif
diff --git a/i386/i386at/gpl/linux/net/lance.c b/i386/i386at/gpl/linux/net/lance.c
new file mode 100644
index 00000000..4a388f77
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/lance.c
@@ -0,0 +1,1129 @@
+/* lance.c: An AMD LANCE ethernet driver for linux. */
+/*
+ Written 1993,1994,1995 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
+ with most other LANCE-based bus-master (NE2100 clone) ethercards.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+*/
+
+static const char *version = "lance.c:v1.08 4/10/95 dplatt@3do.com\n";
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+static unsigned int lance_portlist[] = {0x300, 0x320, 0x340, 0x360, 0};
+void lance_probe1(int ioaddr);
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry lance_drv =
+{"lance", lance_probe1, LANCE_TOTAL_SIZE, lance_portlist};
+#endif
+
+#ifdef LANCE_DEBUG
+int lance_debug = LANCE_DEBUG;
+#else
+int lance_debug = 1;
+#endif
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the AMD 79C960, the "PCnet-ISA
+single-chip ethernet controller for ISA". This chip is used in a wide
+variety of boards from vendors such as Allied Telesis, HP, Kingston,
+and Boca. This driver is also intended to work with older AMD 7990
+designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
+I use the name LANCE to refer to all of the AMD chips, even though it properly
+refers only to the original 7990.
+
+II. Board-specific settings
+
+The driver is designed to work the boards that use the faster
+bus-master mode, rather than in shared memory mode. (Only older designs
+have on-board buffer memory needed to support the slower shared memory mode.)
+
+Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
+channel. This driver probes the likely base addresses:
+{0x300, 0x320, 0x340, 0x360}.
+After the board is found it generates a DMA-timeout interrupt and uses
+autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
+of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
+probed for by enabling each free DMA channel in turn and checking if
+initialization succeeds.
+
+The HP-J2405A board is an exception: with this board it's easy to read the
+EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
+_know_ the base address -- that field is for writing the EEPROM.)
+
+III. Driver operation
+
+IIIa. Ring buffers
+The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
+the base and length of the data buffer, along with status bits. The length
+of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
+the buffer length (rather than being directly the buffer length) for
+implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
+ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
+needlessly uses extra space and reduces the chance that an upper layer will
+be able to reorder queued Tx packets based on priority. Decreasing the number
+of entries makes it more difficult to achieve back-to-back packet transmission
+and increases the chance that Rx ring will overflow. (Consider the worst case
+of receiving back-to-back minimum-sized packets.)
+
+The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
+statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
+avoid the administrative overhead. For the Rx side this avoids dynamically
+allocating full-sized buffers "just in case", at the expense of a
+memory-to-memory data copy for each packet received. For most systems this
+is a good tradeoff: the Rx buffer will always be in low memory, the copy
+is inexpensive, and it primes the cache for later packet processing. For Tx
+the buffers are only used when needed as low-memory bounce buffers.
+
+IIIB. 16M memory limitations.
+For the ISA bus master mode all structures used directly by the LANCE,
+the initialization block, Rx and Tx rings, and data buffers, must be
+accessible from the ISA bus, i.e. in the lower 16M of real memory.
+This is a problem for current Linux kernels on >16M machines. The network
+devices are initialized after memory initialization, and the kernel doles out
+memory from the top of memory downward. The current solution is to have a
+special network initialization routine that's called before memory
+initialization; this will eventually be generalized for all network devices.
+As mentioned before, low-memory "bounce-buffers" are used when needed.
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
+we can't avoid the interrupt overhead by having the Tx routine reap the Tx
+stats.) After reaping the stats, it marks the queue entry as empty by setting
+the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
+tx_full and tbusy flags.
+
+*/
+
+/* Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
+ That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). */
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 4
+#define LANCE_LOG_RX_BUFFERS 4
+#endif
+
+#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+
+#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+
+#define PKT_BUF_SZ 1544
+
+/* Offsets from base I/O address. */
+#define LANCE_DATA 0x10
+#define LANCE_ADDR 0x12
+#define LANCE_RESET 0x14
+#define LANCE_BUS_IF 0x16
+#define LANCE_TOTAL_SIZE 0x18
+
+/* The LANCE Rx and Tx ring descriptors. */
+struct lance_rx_head {
+ int base;
+ short buf_length; /* This length is 2s complement (negative)! */
+ short msg_length; /* This length is "normal". */
+};
+
+struct lance_tx_head {
+ int base;
+ short length; /* Length is 2s complement (negative)! */
+ short misc;
+};
+
+/* The LANCE initialization block, described in databook. */
+struct lance_init_block {
+ unsigned short mode; /* Pre-set mode (reg. 15) */
+ unsigned char phys_addr[6]; /* Physical ethernet address */
+ unsigned filter[2]; /* Multicast filter (unused). */
+ /* Receive and transmit ring base, along with extra bits. */
+ unsigned rx_ring; /* Tx and Rx ring base pointers */
+ unsigned tx_ring;
+};
+
+struct lance_private {
+ /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
+ This is always true for kmalloc'ed memory */
+ struct lance_rx_head rx_ring[RX_RING_SIZE];
+ struct lance_tx_head tx_ring[TX_RING_SIZE];
+ struct lance_init_block init_block;
+ const char *name;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ long rx_buffs; /* Address of Rx and Tx buffers. */
+ /* Tx low-memory "bounce buffer" address. */
+ char (*tx_bounce_buffs)[PKT_BUF_SZ];
+ int cur_rx, cur_tx; /* The next free ring entry */
+ int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ int dma;
+ struct enet_statistics stats;
+ unsigned char chip_version; /* See lance_chip_type. */
+ char tx_full;
+ char lock;
+};
+
+#define LANCE_MUST_PAD 0x00000001
+#define LANCE_ENABLE_AUTOSELECT 0x00000002
+#define LANCE_MUST_REINIT_RING 0x00000004
+#define LANCE_MUST_UNRESET 0x00000008
+#define LANCE_HAS_MISSED_FRAME 0x00000010
+
+/* A mapping from the chip ID number to the part number and features.
+ These are from the datasheets -- in real life the '970 version
+ reportedly has the same ID as the '965. */
+static struct lance_chip_type {
+ int id_number;
+ const char *name;
+ int flags;
+} chip_table[] = {
+ {0x0000, "LANCE 7990", /* Ancient lance chip. */
+ LANCE_MUST_PAD + LANCE_MUST_UNRESET},
+ {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
+ it the PCnet32. */
+ {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x0, "PCnet (unknown)",
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+};
+
+enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, LANCE_UNKNOWN=5};
+
+/* Non-zero only if the current card is a PCI with BIOS-set IRQ. */
+static unsigned char pci_irq_line = 0;
+
+/* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
+ Assume yes until we know the memory size. */
+static unsigned char lance_need_isa_bounce_buffers = 1;
+
+static int lance_open(struct device *dev);
+static void lance_init_ring(struct device *dev);
+static int lance_start_xmit(struct sk_buff *skb, struct device *dev);
+static int lance_rx(struct device *dev);
+static void lance_interrupt(int irq, struct pt_regs *regs);
+static int lance_close(struct device *dev);
+static struct enet_statistics *lance_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+
+
+/* This lance probe is unlike the other board probes in 1.0.*. The LANCE may
+ have to allocate a contiguous low-memory region for bounce buffers.
+ This requirement is satisfied by having the lance initialization occur
+ before the memory management system is started, and thus well before the
+ other probes. */
+
+int lance_init(void)
+{
+ int *port;
+
+ if (high_memory <= 16*1024*1024)
+ lance_need_isa_bounce_buffers = 0;
+
+#if defined(CONFIG_PCI)
+ if (pcibios_present()) {
+ int pci_index;
+ printk("lance.c: PCI bios is present, checking for devices...\n");
+ for (pci_index = 0; pci_index < 8; pci_index++) {
+ unsigned char pci_bus, pci_device_fn;
+ unsigned int pci_ioaddr;
+ unsigned short pci_command;
+
+ if (pcibios_find_device (PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_LANCE, pci_index,
+ &pci_bus, &pci_device_fn) != 0)
+ break;
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &pci_irq_line);
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &pci_ioaddr);
+ /* Remove I/O space marker in bit 0. */
+ pci_ioaddr &= ~3;
+ /* PCI Spec 2.1 states that it is either the driver or PCI card's
+ * responsibility to set the PCI Master Enable Bit if needed.
+ * (From Mark Stockton <marks@schooner.sys.hou.compaq.com>)
+ */
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command);
+ if ( ! (pci_command & PCI_COMMAND_MASTER)) {
+ printk("PCI Master Bit has not been set. Setting...\n");
+ pci_command |= PCI_COMMAND_MASTER;
+ pcibios_write_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, pci_command);
+ }
+ printk("Found PCnet/PCI at %#x, irq %d.\n",
+ pci_ioaddr, pci_irq_line);
+ lance_probe1(pci_ioaddr);
+ pci_irq_line = 0;
+ }
+ }
+#endif /* defined(CONFIG_PCI) */
+
+ for (port = lance_portlist; *port; port++) {
+ int ioaddr = *port;
+
+ if ( check_region(ioaddr, LANCE_TOTAL_SIZE) == 0) {
+ /* Detect "normal" 0x57 0x57 and the NI6510EB 0x52 0x44
+ signatures w/ minimal I/O reads */
+ char offset15, offset14 = inb(ioaddr + 14);
+
+ if ((offset14 == 0x52 || offset14 == 0x57) &&
+ ((offset15 = inb(ioaddr + 15)) == 0x57 || offset15 == 0x44))
+ lance_probe1(ioaddr);
+ }
+ }
+
+ return 0;
+}
+
+void lance_probe1(int ioaddr)
+{
+ struct device *dev;
+ struct lance_private *lp;
+ short dma_channels; /* Mark spuriously-busy DMA channels */
+ int i, reset_val, lance_version;
+ const char *chipname;
+ /* Flags for specific chips or boards. */
+ unsigned char hpJ2405A = 0; /* HP ISA adaptor */
+ int hp_builtin = 0; /* HP on-board ethernet. */
+ static int did_version = 0; /* Already printed version info. */
+
+ /* First we look for special cases.
+ Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
+ There are two HP versions, check the BIOS for the configuration port.
+ This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
+ */
+ if ( *((unsigned short *) 0x000f0102) == 0x5048) {
+ static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
+ int hp_port = ( *((unsigned char *) 0x000f00f1) & 1) ? 0x499 : 0x99;
+ /* We can have boards other than the built-in! Verify this is on-board. */
+ if ((inb(hp_port) & 0xc0) == 0x80
+ && ioaddr_table[inb(hp_port) & 3] == ioaddr)
+ hp_builtin = hp_port;
+ }
+ /* We also recognize the HP Vectra on-board here, but check below. */
+ hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
+ && inb(ioaddr+2) == 0x09);
+
+ /* Reset the LANCE. */
+ reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
+
+ /* The Un-Reset needed is only needed for the real NE2100, and will
+ confuse the HP board. */
+ if (!hpJ2405A)
+ outw(reset_val, ioaddr+LANCE_RESET);
+
+ outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
+ if (inw(ioaddr+LANCE_DATA) != 0x0004)
+ return;
+
+ /* Get the version of the chip. */
+ outw(88, ioaddr+LANCE_ADDR);
+ if (inw(ioaddr+LANCE_ADDR) != 88) {
+ lance_version = 0;
+ } else { /* Good, it's a newer chip. */
+ int chip_version = inw(ioaddr+LANCE_DATA);
+ outw(89, ioaddr+LANCE_ADDR);
+ chip_version |= inw(ioaddr+LANCE_DATA) << 16;
+ if (lance_debug > 2)
+ printk(" LANCE chip version is %#x.\n", chip_version);
+ if ((chip_version & 0xfff) != 0x003)
+ return;
+ chip_version = (chip_version >> 12) & 0xffff;
+ for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
+ if (chip_table[lance_version].id_number == chip_version)
+ break;
+ }
+ }
+
+ dev = init_etherdev(0, 0);
+ chipname = chip_table[lance_version].name;
+ printk("%s: %s at %#3x,", dev->name, chipname, ioaddr);
+
+ /* There is a 16 byte station address PROM at the base address.
+ The first six bytes are the station address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
+
+ dev->base_addr = ioaddr;
+ request_region(ioaddr, LANCE_TOTAL_SIZE, chip_table[lance_version].name);
+
+ /* Make certain the data structures used by the LANCE are aligned and DMAble. */
+ lp = (struct lance_private *) kmalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
+ memset(lp, 0, sizeof(*lp));
+ dev->priv = lp;
+ lp->name = chipname;
+ lp->rx_buffs = (unsigned long) kmalloc(PKT_BUF_SZ*RX_RING_SIZE, GFP_DMA | GFP_KERNEL);
+ lp->tx_bounce_buffs = NULL;
+ if (lance_need_isa_bounce_buffers)
+ lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE, GFP_DMA | GFP_KERNEL);
+
+ lp->chip_version = lance_version;
+
+ lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
+ lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
+
+ outw(0x0001, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+ outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+ outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+
+ if (pci_irq_line) {
+ dev->dma = 4; /* Native bus-master, no DMA channel needed. */
+ dev->irq = pci_irq_line;
+ } else if (hp_builtin) {
+ static const char dma_tbl[4] = {3, 5, 6, 0};
+ static const char irq_tbl[4] = {3, 4, 5, 9};
+ unsigned char port_val = inb(hp_builtin);
+ dev->dma = dma_tbl[(port_val >> 4) & 3];
+ dev->irq = irq_tbl[(port_val >> 2) & 3];
+ printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
+ } else if (hpJ2405A) {
+ static const char dma_tbl[4] = {3, 5, 6, 7};
+ static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
+ short reset_val = inw(ioaddr+LANCE_RESET);
+ dev->dma = dma_tbl[(reset_val >> 2) & 3];
+ dev->irq = irq_tbl[(reset_val >> 4) & 7];
+ printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
+ } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
+ short bus_info;
+ outw(8, ioaddr+LANCE_ADDR);
+ bus_info = inw(ioaddr+LANCE_BUS_IF);
+ dev->dma = bus_info & 0x07;
+ dev->irq = (bus_info >> 4) & 0x0F;
+ } else {
+ /* The DMA channel may be passed in PARAM1. */
+ if (dev->mem_start & 0x07)
+ dev->dma = dev->mem_start & 0x07;
+ }
+
+ if (dev->dma == 0) {
+ /* Read the DMA channel status register, so that we can avoid
+ stuck DMA channels in the DMA detection below. */
+ dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
+ (inb(DMA2_STAT_REG) & 0xf0);
+ }
+ if (dev->irq >= 2)
+ printk(" assigned IRQ %d", dev->irq);
+ else {
+ /* To auto-IRQ we enable the initialization-done and DMA error
+ interrupts. For ISA boards we get a DMA error, but VLB and PCI
+ boards will work. */
+ autoirq_setup(0);
+
+ /* Trigger an initialization just for the interrupt. */
+ outw(0x0041, ioaddr+LANCE_DATA);
+
+ dev->irq = autoirq_report(1);
+ if (dev->irq)
+ printk(", probed IRQ %d", dev->irq);
+ else {
+ printk(", failed to detect IRQ line.\n");
+ return;
+ }
+
+ /* Check for the initialization done bit, 0x0100, which means
+ that we don't need a DMA channel. */
+ if (inw(ioaddr+LANCE_DATA) & 0x0100)
+ dev->dma = 4;
+ }
+
+ if (dev->dma == 4) {
+ printk(", no DMA needed.\n");
+ } else if (dev->dma) {
+ if (request_dma(dev->dma, chipname)) {
+ printk("DMA %d allocation failed.\n", dev->dma);
+ return;
+ } else
+ printk(", assigned DMA %d.\n", dev->dma);
+ } else { /* OK, we have to auto-DMA. */
+ for (i = 0; i < 4; i++) {
+ static const char dmas[] = { 5, 6, 7, 3 };
+ int dma = dmas[i];
+ int boguscnt;
+
+ /* Don't enable a permanently busy DMA channel, or the machine
+ will hang. */
+ if (test_bit(dma, &dma_channels))
+ continue;
+ outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
+ if (request_dma(dma, chipname))
+ continue;
+ set_dma_mode(dma, DMA_MODE_CASCADE);
+ enable_dma(dma);
+
+ /* Trigger an initialization. */
+ outw(0x0001, ioaddr+LANCE_DATA);
+ for (boguscnt = 100; boguscnt > 0; --boguscnt)
+ if (inw(ioaddr+LANCE_DATA) & 0x0900)
+ break;
+ if (inw(ioaddr+LANCE_DATA) & 0x0100) {
+ dev->dma = dma;
+ printk(", DMA %d.\n", dev->dma);
+ break;
+ } else {
+ disable_dma(dma);
+ free_dma(dma);
+ }
+ }
+ if (i == 4) { /* Failure: bail. */
+ printk("DMA detection failed.\n");
+ return;
+ }
+ }
+
+ if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
+ /* Turn on auto-select of media (10baseT or BNC) so that the user
+ can watch the LEDs even if the board isn't opened. */
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ outw(0x0002, ioaddr+LANCE_BUS_IF);
+ }
+
+ if (lance_debug > 0 && did_version++ == 0)
+ printk(version);
+
+ /* The LANCE-specific entries in the device structure. */
+ dev->open = &lance_open;
+ dev->hard_start_xmit = &lance_start_xmit;
+ dev->stop = &lance_close;
+ dev->get_stats = &lance_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ return;
+}
+
+
+static int
+lance_open(struct device *dev)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ if (dev->irq == 0 ||
+ request_irq(dev->irq, &lance_interrupt, 0, lp->name)) {
+ return -EAGAIN;
+ }
+
+ /* We used to allocate DMA here, but that was silly.
+ DMA lines can't be shared! We now permanently allocate them. */
+
+ irq2dev_map[dev->irq] = dev;
+
+ /* Reset the LANCE */
+ inw(ioaddr+LANCE_RESET);
+
+ /* The DMA controller is used as a no-operation slave, "cascade mode". */
+ if (dev->dma != 4) {
+ enable_dma(dev->dma);
+ set_dma_mode(dev->dma, DMA_MODE_CASCADE);
+ }
+
+ /* Un-Reset the LANCE, needed only for the NE2100. */
+ if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
+ outw(0, ioaddr+LANCE_RESET);
+
+ if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
+ /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ outw(0x0002, ioaddr+LANCE_BUS_IF);
+ }
+
+ if (lance_debug > 1)
+ printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
+ dev->name, dev->irq, dev->dma, (int) lp->tx_ring, (int) lp->rx_ring,
+ (int) &lp->init_block);
+
+ lance_init_ring(dev);
+ /* Re-initialize the LANCE, and start it when done. */
+ outw(0x0001, ioaddr+LANCE_ADDR);
+ outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
+
+ outw(0x0004, ioaddr+LANCE_ADDR);
+ outw(0x0915, ioaddr+LANCE_DATA);
+
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ outw(0x0001, ioaddr+LANCE_DATA);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+ i = 0;
+ while (i++ < 100)
+ if (inw(ioaddr+LANCE_DATA) & 0x0100)
+ break;
+ /*
+ * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+ * reports that doing so triggers a bug in the '974.
+ */
+ outw(0x0042, ioaddr+LANCE_DATA);
+
+ if (lance_debug > 2)
+ printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
+ dev->name, i, (int) &lp->init_block, inw(ioaddr+LANCE_DATA));
+
+ return 0; /* Always succeed */
+}
+
+/* The LANCE has been halted for one reason or another (busmaster memory
+ arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
+ etc.). Modern LANCE variants always reload their ring-buffer
+ configuration when restarted, so we must reinitialize our ring
+ context before restarting. As part of this reinitialization,
+ find all packets still on the Tx ring and pretend that they had been
+ sent (in effect, drop the packets on the floor) - the higher-level
+ protocols will time out and retransmit. It'd be better to shuffle
+ these skbs to a temp list and then actually re-Tx them after
+ restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
+*/
+
+static void
+lance_purge_tx_ring(struct device *dev)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int i;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (lp->tx_skbuff[i]) {
+ dev_kfree_skb(lp->tx_skbuff[i],FREE_WRITE);
+ lp->tx_skbuff[i] = NULL;
+ }
+ }
+}
+
+
+/* Initialize the LANCE Rx and Tx rings. */
+static void
+lance_init_ring(struct device *dev)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int i;
+
+ lp->lock = 0, lp->tx_full = 0;
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_rx = lp->dirty_tx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_ring[i].base = (lp->rx_buffs + i*PKT_BUF_SZ) | 0x80000000;
+ lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
+ }
+ /* The Tx buffer address is filled in as needed, but we do need to clear
+ the upper ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_ring[i].base = 0;
+ }
+
+ lp->init_block.mode = 0x0000;
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
+ lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
+}
+
+static void
+lance_restart(struct device *dev, unsigned int csr0_bits, int must_reinit)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+
+ if (must_reinit ||
+ (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
+ lance_purge_tx_ring(dev);
+ lance_init_ring(dev);
+ }
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(csr0_bits, dev->base_addr + LANCE_DATA);
+}
+
+static int
+lance_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int entry;
+ unsigned long flags;
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 20)
+ return 1;
+ outw(0, ioaddr+LANCE_ADDR);
+ printk("%s: transmit timed out, status %4.4x, resetting.\n",
+ dev->name, inw(ioaddr+LANCE_DATA));
+ outw(0x0004, ioaddr+LANCE_DATA);
+ lp->stats.tx_errors++;
+#ifndef final_version
+ {
+ int i;
+ printk(" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
+ lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
+ lp->cur_rx);
+ for (i = 0 ; i < RX_RING_SIZE; i++)
+ printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
+ lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
+ lp->rx_ring[i].msg_length);
+ for (i = 0 ; i < TX_RING_SIZE; i++)
+ printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
+ lp->tx_ring[i].base, -lp->tx_ring[i].length,
+ lp->tx_ring[i].misc);
+ printk("\n");
+ }
+#endif
+ lance_restart(dev, 0x0043, 1);
+
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+
+ return 0;
+ }
+
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ if (skb->len <= 0)
+ return 0;
+
+ if (lance_debug > 3) {
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
+ inw(ioaddr+LANCE_DATA));
+ outw(0x0000, ioaddr+LANCE_DATA);
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ return 1;
+ }
+
+ if (set_bit(0, (void*)&lp->lock) != 0) {
+ if (lance_debug > 0)
+ printk("%s: tx queue lock!.\n", dev->name);
+ /* don't clear dev->tbusy flag. */
+ return 1;
+ }
+
+ /* Fill in a Tx ring entry */
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & TX_RING_MOD_MASK;
+
+ /* Caution: the write order is important here, set the base address
+ with the "ownership" bits last. */
+
+ /* The old LANCE chips doesn't automatically pad buffers to min. size. */
+ if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
+ lp->tx_ring[entry].length =
+ -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
+ } else
+ lp->tx_ring[entry].length = -skb->len;
+
+ lp->tx_ring[entry].misc = 0x0000;
+
+ /* If any part of this buffer is >16M we must copy it to a low-memory
+ buffer. */
+ if ((int)(skb->data) + skb->len > 0x01000000) {
+ if (lance_debug > 5)
+ printk("%s: bouncing a high-memory packet (%#x).\n",
+ dev->name, (int)(skb->data));
+ memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
+ lp->tx_ring[entry].base =
+ (int)(lp->tx_bounce_buffs + entry) | 0x83000000;
+ dev_kfree_skb (skb, FREE_WRITE);
+ } else {
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_ring[entry].base = (int)(skb->data) | 0x83000000;
+ }
+ lp->cur_tx++;
+
+ /* Trigger an immediate send poll. */
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ outw(0x0048, ioaddr+LANCE_DATA);
+
+ dev->trans_start = jiffies;
+
+ save_flags(flags);
+ cli();
+ lp->lock = 0;
+ if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
+ dev->tbusy=0;
+ else
+ lp->tx_full = 1;
+ restore_flags(flags);
+
+ return 0;
+}
+
+/* The LANCE interrupt handler. */
+static void
+lance_interrupt(int irq, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct lance_private *lp;
+ int csr0, ioaddr, boguscnt=10;
+ int must_restart;
+
+ if (dev == NULL) {
+ printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = (struct lance_private *)dev->priv;
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = 1;
+
+ outw(0x00, dev->base_addr + LANCE_ADDR);
+ while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
+ && --boguscnt >= 0) {
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
+
+ must_restart = 0;
+
+ if (lance_debug > 5)
+ printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
+ dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
+
+ if (csr0 & 0x0400) /* Rx interrupt */
+ lance_rx(dev);
+
+ if (csr0 & 0x0200) { /* Tx-done interrupt */
+ int dirty_tx = lp->dirty_tx;
+
+ while (dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx & TX_RING_MOD_MASK;
+ int status = lp->tx_ring[entry].base;
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[entry].base = 0;
+
+ if (status & 0x40000000) {
+ /* There was an major error, log it. */
+ int err_status = lp->tx_ring[entry].misc;
+ lp->stats.tx_errors++;
+ if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
+ if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
+ if (err_status & 0x1000) lp->stats.tx_window_errors++;
+ if (err_status & 0x4000) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ lp->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ printk("%s: Tx FIFO error! Status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+ } else {
+ if (status & 0x18000000)
+ lp->stats.collisions++;
+ lp->stats.tx_packets++;
+ }
+
+ /* We must free the original skb if it's not a data-only copy
+ in the bounce buffer. */
+ if (lp->tx_skbuff[entry]) {
+ dev_kfree_skb(lp->tx_skbuff[entry],FREE_WRITE);
+ lp->tx_skbuff[entry] = 0;
+ }
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dirty_tx, lp->cur_tx, lp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (lp->tx_full && dev->tbusy
+ && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & 0x1000) lp->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & 0x0800) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+
+ if (must_restart) {
+ /* stop the chip to clear the error condition, then restart */
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(0x0004, dev->base_addr + LANCE_DATA);
+ lance_restart(dev, 0x0002, 0);
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable. */
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(0x7940, dev->base_addr + LANCE_DATA);
+
+ if (lance_debug > 4)
+ printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
+ dev->name, inw(ioaddr + LANCE_ADDR),
+ inw(dev->base_addr + LANCE_DATA));
+
+ dev->interrupt = 0;
+ return;
+}
+
+static int
+lance_rx(struct device *dev)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int entry = lp->cur_rx & RX_RING_MOD_MASK;
+ int i;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (lp->rx_ring[entry].base >= 0) {
+ int status = lp->rx_ring[entry].base >> 24;
+
+ if (status != 0x03) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with full-sized
+ buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & 0x01) /* Only count a general error at the */
+ lp->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x20) lp->stats.rx_frame_errors++;
+ if (status & 0x10) lp->stats.rx_over_errors++;
+ if (status & 0x08) lp->stats.rx_crc_errors++;
+ if (status & 0x04) lp->stats.rx_fifo_errors++;
+ lp->rx_ring[entry].base &= 0x03ffffff;
+ }
+ else
+ {
+ /* Malloc up new buffer, compatible with net-2e. */
+ short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
+ struct sk_buff *skb;
+
+ if(pkt_len<60)
+ {
+ printk("%s: Runt packet!\n",dev->name);
+ lp->stats.rx_errors++;
+ }
+ else
+ {
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL)
+ {
+ printk("%s: Memory squeeze, deferring packet.\n", dev->name);
+ for (i=0; i < RX_RING_SIZE; i++)
+ if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
+ break;
+
+ if (i > RX_RING_SIZE -2)
+ {
+ lp->stats.rx_dropped++;
+ lp->rx_ring[entry].base |= 0x80000000;
+ lp->cur_rx++;
+ }
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2); /* 16 byte align */
+ skb_put(skb,pkt_len); /* Make room */
+ eth_copy_and_sum(skb,
+ (unsigned char *)(lp->rx_ring[entry].base & 0x00ffffff),
+ pkt_len,0);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ }
+ /* The docs say that the buffer length isn't touched, but Andrew Boyd
+ of QNX reports that some revs of the 79C965 clear it. */
+ lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
+ lp->rx_ring[entry].base |= 0x80000000;
+ entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+ }
+
+ /* We should check that at least two ring entries are free. If not,
+ we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+static int
+lance_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
+ outw(112, ioaddr+LANCE_ADDR);
+ lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
+ }
+ outw(0, ioaddr+LANCE_ADDR);
+
+ if (lance_debug > 1)
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inw(ioaddr+LANCE_DATA));
+
+ /* We stop the LANCE here -- it occasionally polls
+ memory if we don't. */
+ outw(0x0004, ioaddr+LANCE_DATA);
+
+ if (dev->dma != 4)
+ disable_dma(dev->dma);
+
+ free_irq(dev->irq);
+
+ irq2dev_map[dev->irq] = 0;
+
+ return 0;
+}
+
+static struct enet_statistics *
+lance_get_stats(struct device *dev)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ short ioaddr = dev->base_addr;
+ short saved_addr;
+ unsigned long flags;
+
+ if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
+ save_flags(flags);
+ cli();
+ saved_addr = inw(ioaddr+LANCE_ADDR);
+ outw(112, ioaddr+LANCE_ADDR);
+ lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
+ outw(saved_addr, ioaddr+LANCE_ADDR);
+ restore_flags(flags);
+ }
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ outw(0, ioaddr+LANCE_ADDR);
+ outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
+
+ if (dev->flags&IFF_PROMISC) {
+ /* Log any net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ outw(15, ioaddr+LANCE_ADDR);
+ outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int i;
+ int num_addrs=dev->mc_count;
+ if(dev->flags&IFF_ALLMULTI)
+ num_addrs=1;
+ /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
+ memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
+ for (i = 0; i < 4; i++) {
+ outw(8 + i, ioaddr+LANCE_ADDR);
+ outw(multicast_table[i], ioaddr+LANCE_DATA);
+ }
+ outw(15, ioaddr+LANCE_ADDR);
+ outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
+ }
+
+ lance_restart(dev, 0x0142, 0); /* Resume normal operation */
+
+}
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c lance.c"
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/ne.c b/i386/i386at/gpl/linux/net/ne.c
new file mode 100644
index 00000000..4da4efb7
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/ne.c
@@ -0,0 +1,733 @@
+/* ne.c: A general non-shared-memory NS8390 ethernet driver for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This driver should work with many programmed-I/O 8390-based ethernet
+ boards. Currently it supports the NE1000, NE2000, many clones,
+ and some Cabletron products.
+
+ Changelog:
+
+ Paul Gortmaker : use ENISR_RDC to monitor Tx PIO uploads, made
+ sanity checks and bad clone support optional.
+ Paul Gortmaker : new reset code, reset card after probe at boot.
+ Paul Gortmaker : multiple card support for module users.
+ Paul Gortmaker : Support for PCI ne2k clones, similar to lance.c
+
+*/
+
+/* Routines for the NatSemi-based designs (NE[12]000). */
+
+static const char *version =
+ "ne.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "8390.h"
+
+/* Some defines that people can play with if so inclined. */
+
+/* Do we support clones that don't adhere to 14,15 of the SAprom ? */
+#define SUPPORT_NE_BAD_CLONES
+
+/* Do we perform extra sanity checks on stuff ? */
+/* #define NE_SANITY_CHECK */
+
+/* Do we implement the read before write bugfix ? */
+/* #define NE_RW_BUGFIX */
+
+/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
+/* #define PACKETBUF_MEMSIZE 0x40 */
+
+/* ---- No user-serviceable parts below ---- */
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int netcard_portlist[] =
+{ 0x300, 0x280, 0x320, 0x340, 0x360, 0};
+
+#ifdef SUPPORT_NE_BAD_CLONES
+/* A list of bad clones that we none-the-less recognize. */
+static struct { const char *name8, *name16; unsigned char SAprefix[4];}
+bad_clone_list[] = {
+ {"DE100", "DE200", {0x00, 0xDE, 0x01,}},
+ {"DE120", "DE220", {0x00, 0x80, 0xc8,}},
+ {"DFI1000", "DFI2000", {'D', 'F', 'I',}}, /* Original, eh? */
+ {"EtherNext UTP8", "EtherNext UTP16", {0x00, 0x00, 0x79}},
+ {"NE1000","NE2000-invalid", {0x00, 0x00, 0xd8}}, /* Ancient real NE1000. */
+ {"NN1000", "NN2000", {0x08, 0x03, 0x08}}, /* Outlaw no-name clone. */
+ {"4-DIM8","4-DIM16", {0x00,0x00,0x4d,}}, /* Outlaw 4-Dimension cards. */
+ {"Con-Intl_8", "Con-Intl_16", {0x00, 0x00, 0x24}}, /* Connect Int'nl */
+ {0,}
+};
+#endif
+
+#define NE_BASE (dev->base_addr)
+#define NE_CMD 0x00
+#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */
+#define NE_IO_EXTENT 0x20
+
+#define NE1SM_START_PG 0x20 /* First page of TX buffer */
+#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+/* Non-zero only if the current card is a PCI with BIOS-set IRQ. */
+static unsigned char pci_irq_line = 0;
+
+int ne_probe(struct device *dev);
+static int ne_probe1(struct device *dev, int ioaddr);
+
+static int ne_open(struct device *dev);
+static int ne_close(struct device *dev);
+
+static void ne_reset_8390(struct device *dev);
+static void ne_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ne_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ne_block_output(struct device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+
+
+/* Probe for various non-shared-memory ethercards.
+
+ NEx000-clone boards have a Station Address PROM (SAPROM) in the packet
+ buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of
+ the SAPROM, while other supposed NE2000 clones must be detected by their
+ SA prefix.
+
+ Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
+ mode results in doubled values, which can be detected and compensated for.
+
+ The probe is also responsible for initializing the card and filling
+ in the 'dev' and 'ei_status' structures.
+
+ We use the minimum memory size for some ethercard product lines, iff we can't
+ distinguish models. You can increase the packet buffer size by setting
+ PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are:
+ E1010 starts at 0x100 and ends at 0x2000.
+ E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory")
+ E2010 starts at 0x100 and ends at 0x4000.
+ E2010-x starts at 0x100 and ends at 0xffff. */
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry netcard_drv =
+{"ne", ne_probe1, NE_IO_EXTENT, netcard_portlist};
+#else
+
+/* Note that this probe only picks up one card at a time, even for multiple
+ PCI ne2k cards. Use "ether=0,0,eth1" if you have a second PCI ne2k card.
+ This keeps things consistent regardless of the bus type of the card. */
+
+int ne_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ /* First check any supplied i/o locations. User knows best. <cough> */
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return ne_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ /* Then look for any installed PCI clones */
+#if defined(CONFIG_PCI)
+ if (pcibios_present()) {
+ int pci_index;
+ for (pci_index = 0; pci_index < 8; pci_index++) {
+ unsigned char pci_bus, pci_device_fn;
+ unsigned int pci_ioaddr;
+
+ /* Currently only Realtek are making PCI ne2k clones. */
+ if (pcibios_find_device (PCI_VENDOR_ID_REALTEK,
+ PCI_DEVICE_ID_REALTEK_8029, pci_index,
+ &pci_bus, &pci_device_fn) != 0)
+ break; /* OK, now try to probe for std. ISA card */
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &pci_irq_line);
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &pci_ioaddr);
+ /* Strip the I/O address out of the returned value */
+ pci_ioaddr &= PCI_BASE_ADDRESS_IO_MASK;
+ /* Avoid already found cards from previous ne_probe() calls */
+ if (check_region(pci_ioaddr, NE_IO_EXTENT))
+ continue;
+ printk("ne.c: PCI BIOS reports ne2000 clone at i/o %#x, irq %d.\n",
+ pci_ioaddr, pci_irq_line);
+ if (ne_probe1(dev, pci_ioaddr) != 0) { /* Shouldn't happen. */
+ printk(KERN_ERR "ne.c: Probe of PCI card at %#x failed.\n", pci_ioaddr);
+ break; /* Hrmm, try to probe for ISA card... */
+ }
+ pci_irq_line = 0;
+ return 0;
+ }
+ }
+#endif /* defined(CONFIG_PCI) */
+
+ /* Last resort. The semi-risky ISA auto-probe. */
+ for (i = 0; netcard_portlist[i]; i++) {
+ int ioaddr = netcard_portlist[i];
+ if (check_region(ioaddr, NE_IO_EXTENT))
+ continue;
+ if (ne_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+static int ne_probe1(struct device *dev, int ioaddr)
+{
+ int i;
+ unsigned char SA_prom[32];
+ int wordlength = 2;
+ const char *name = NULL;
+ int start_page, stop_page;
+ int neX000, ctron;
+ int reg0 = inb_p(ioaddr);
+ static unsigned version_printed = 0;
+
+ if (reg0 == 0xFF)
+ return ENODEV;
+
+ /* Do a preliminary verification that we have a 8390. */
+ { int regd;
+ outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+ regd = inb_p(ioaddr + 0x0d);
+ outb_p(0xff, ioaddr + 0x0d);
+ outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
+ inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
+ if (inb_p(ioaddr + EN0_COUNTER0) != 0) {
+ outb_p(reg0, ioaddr);
+ outb_p(regd, ioaddr + 0x0d); /* Restore the old values. */
+ return ENODEV;
+ }
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("NE*000 ethercard probe at %#3x:", ioaddr);
+
+ /* Reset card. Who knows what dain-bramaged state it was left in. */
+ { unsigned long reset_start_time = jiffies;
+
+ /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
+ outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
+
+ while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2*HZ/100) {
+ printk(" not found (no reset ack).\n");
+ return ENODEV;
+ }
+
+ outb_p(0xff, ioaddr + EN0_ISR); /* Ack all intr. */
+ }
+
+ /* Read the 16 bytes of station address PROM.
+ We must first initialize registers, similar to NS8390_init(eifdev, 0).
+ We can't reliably read the SAPROM address without this.
+ (I learned the hard way!). */
+ {
+ struct {unsigned char value, offset; } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+ for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
+ outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ }
+ for(i = 0; i < 32 /*sizeof(SA_prom)*/; i+=2) {
+ SA_prom[i] = inb(ioaddr + NE_DATAPORT);
+ SA_prom[i+1] = inb(ioaddr + NE_DATAPORT);
+ if (SA_prom[i] != SA_prom[i+1])
+ wordlength = 1;
+ }
+
+ if (wordlength == 2) {
+ /* We must set the 8390 for word mode. */
+ outb_p(0x49, ioaddr + EN0_DCFG);
+ /* We used to reset the ethercard here, but it doesn't seem
+ to be necessary. */
+ /* Un-double the SA_prom values. */
+ for (i = 0; i < 16; i++)
+ SA_prom[i] = SA_prom[i+i];
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+ } else {
+ start_page = NE1SM_START_PG;
+ stop_page = NE1SM_STOP_PG;
+ }
+
+ neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57);
+ ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d);
+
+ /* Set up the rest of the parameters. */
+ if (neX000) {
+ name = (wordlength == 2) ? "NE2000" : "NE1000";
+ } else if (ctron) {
+ name = (wordlength == 2) ? "Ctron-8" : "Ctron-16";
+ start_page = 0x01;
+ stop_page = (wordlength == 2) ? 0x40 : 0x20;
+ } else {
+#ifdef SUPPORT_NE_BAD_CLONES
+ /* Ack! Well, there might be a *bad* NE*000 clone there.
+ Check for total bogus addresses. */
+ for (i = 0; bad_clone_list[i].name8; i++) {
+ if (SA_prom[0] == bad_clone_list[i].SAprefix[0] &&
+ SA_prom[1] == bad_clone_list[i].SAprefix[1] &&
+ SA_prom[2] == bad_clone_list[i].SAprefix[2]) {
+ if (wordlength == 2) {
+ name = bad_clone_list[i].name16;
+ } else {
+ name = bad_clone_list[i].name8;
+ }
+ break;
+ }
+ }
+ if (bad_clone_list[i].name8 == NULL) {
+ printk(" not found (invalid signature %2.2x %2.2x).\n",
+ SA_prom[14], SA_prom[15]);
+ return ENXIO;
+ }
+#else
+ printk(" not found.\n");
+ return ENXIO;
+#endif
+
+ }
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("ne.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ if (pci_irq_line) {
+ dev->irq = pci_irq_line;
+ }
+
+ if (dev->irq < 2) {
+ autoirq_setup(0);
+ outb_p(0x50, ioaddr + EN0_IMR); /* Enable one interrupt. */
+ outb_p(0x00, ioaddr + EN0_RCNTLO);
+ outb_p(0x00, ioaddr + EN0_RCNTHI);
+ outb_p(E8390_RREAD+E8390_START, ioaddr); /* Trigger it... */
+ outb_p(0x00, ioaddr + EN0_IMR); /* Mask it again. */
+ dev->irq = autoirq_report(0);
+ if (ei_debug > 2)
+ printk(" autoirq is %d\n", dev->irq);
+ } else if (dev->irq == 2)
+ /* Fixup for users that don't know that IRQ 2 is really IRQ 9,
+ or don't know which one to set. */
+ dev->irq = 9;
+
+ if (! dev->irq) {
+ printk(" failed to detect IRQ line.\n");
+ return EAGAIN;
+ }
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+ {
+ int irqval = request_irq(dev->irq, ei_interrupt, 0, name);
+ if (irqval) {
+ printk (" unable to get IRQ %d (irqval=%d).\n", dev->irq, irqval);
+ return EAGAIN;
+ }
+ }
+
+ dev->base_addr = ioaddr;
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (" unable to get memory for dev->priv.\n");
+ free_irq(dev->irq);
+ return -ENOMEM;
+ }
+
+ request_region(ioaddr, NE_IO_EXTENT, name);
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ printk(" %2.2x", SA_prom[i]);
+ dev->dev_addr[i] = SA_prom[i];
+ }
+
+ printk("\n%s: %s found at %#x, using IRQ %d.\n",
+ dev->name, name, ioaddr, dev->irq);
+
+ ei_status.name = name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = (wordlength == 2);
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+#ifdef PACKETBUF_MEMSIZE
+ /* Allow the packet buffer size to be overridden by know-it-alls. */
+ ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+#endif
+
+ ei_status.reset_8390 = &ne_reset_8390;
+ ei_status.block_input = &ne_block_input;
+ ei_status.block_output = &ne_block_output;
+ ei_status.get_8390_hdr = &ne_get_8390_hdr;
+ dev->open = &ne_open;
+ dev->stop = &ne_close;
+ NS8390_init(dev, 0);
+ return 0;
+}
+
+static int
+ne_open(struct device *dev)
+{
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int
+ne_close(struct device *dev)
+{
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+ ei_close(dev);
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/* Hard reset the card. This used to pause for the same period that a
+ 8390 reset command required, but that shouldn't be necessary. */
+static void
+ne_reset_8390(struct device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+
+ if (ei_debug > 1) printk("resetting the 8390 t=%ld...", jiffies);
+
+ /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
+ outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2*HZ/100) {
+ printk("%s: ne_reset_8390() did not complete.\n", dev->name);
+ break;
+ }
+ outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+ne_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ int nic_base = dev->base_addr;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+ dev->interrupt);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_status.word16)
+ insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ else
+ insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you
+ are porting to a new ethercard, look at the packet driver source for hints.
+ The NEx000 doesn't share the on-board packet memory -- you have to put
+ the packet out through the "remote DMA" dataport using outb. */
+
+static void
+ne_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+#ifdef NE_SANITY_CHECK
+ int xfer_count = count;
+#endif
+ int nic_base = dev->base_addr;
+ char *buf = skb->data;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+ dev->interrupt);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16) {
+ insw(NE_BASE + NE_DATAPORT,buf,count>>1);
+ if (count & 0x01) {
+ buf[count-1] = inb(NE_BASE + NE_DATAPORT);
+#ifdef NE_SANITY_CHECK
+ xfer_count++;
+#endif
+ }
+ } else {
+ insb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. If you see
+ this message you either 1) have a slightly incompatible clone
+ or 2) have noise/speed problems with your bus. */
+ if (ei_debug > 1) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
+ -- it's broken for Rx on some cards! */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if (((ring_offset + xfer_count) & 0xff) == low)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0)
+ printk("%s: RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + xfer_count, addr);
+ }
+#endif
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void
+ne_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int nic_base = NE_BASE;
+ unsigned long dma_start;
+#ifdef NE_SANITY_CHECK
+ int retries = 0;
+#endif
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (ei_status.word16 && (count & 0x01))
+ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d][intr:%d]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+ dev->interrupt);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
+
+#ifdef NE_SANITY_CHECK
+ retry:
+#endif
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work.
+ Actually this doesn't always work either, but if you have
+ problems with your NEx000 this is better than nothing! */
+ outb_p(0x42, nic_base + EN0_RCNTLO);
+ outb_p(0x00, nic_base + EN0_RCNTHI);
+ outb_p(0x42, nic_base + EN0_RSARLO);
+ outb_p(0x00, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ /* Make certain that the dummy read has occurred. */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+#endif
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16) {
+ outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
+ } else {
+ outsb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+ dma_start = jiffies;
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. */
+ if (ei_debug > 1) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if ((start_page << 8) + count == addr)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0) {
+ printk("%s: Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, (start_page << 8) + count, addr);
+ if (retries++ == 0)
+ goto retry;
+ }
+ }
+#endif
+
+ while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
+ if (jiffies - dma_start > 2*HZ/100) { /* 20ms */
+ printk("%s: timeout waiting for Tx RDC.\n", dev->name);
+ ne_reset_8390(dev);
+ NS8390_init(dev,1);
+ break;
+ }
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+ return;
+}
+
+
+#ifdef MODULE
+#define MAX_NE_CARDS 4 /* Max number of NE cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_NE_CARDS] = { 0, };
+static struct device dev_ne[MAX_NE_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_NE_CARDS] = { 0, };
+static int irq[MAX_NE_CARDS] = { 0, };
+
+/* This is set up so that no autoprobe takes place. We can't guarantee
+that the ne2k probe is the last 8390 based probe to take place (as it
+is at boot) and so the probe will get confused by any other 8390 cards.
+ISA device autoprobes on a running machine are not recommended anyway. */
+
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ struct device *dev = &dev_ne[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->init = ne_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only complain once */
+ printk(KERN_NOTICE "ne.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
+ return -EPERM;
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "ne.c: No NE*000 card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ struct device *dev = &dev_ne[this_dev];
+ if (dev->priv != NULL) {
+ kfree(dev->priv);
+ dev->priv = NULL;
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = NULL;
+ release_region(dev->base_addr, NE_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DKERNEL -Wall -O6 -fomit-frame-pointer -I/usr/src/linux/net/tcp -c ne.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/net_init.c b/i386/i386at/gpl/linux/net/net_init.c
new file mode 100644
index 00000000..cedee941
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/net_init.c
@@ -0,0 +1,380 @@
+/* netdrv_init.c: Initialization for network devices. */
+/*
+ Written 1993,1994,1995 by Donald Becker.
+
+ The author may be reached as becker@cesdis.gsfc.nasa.gov or
+ C/O Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This file contains the initialization for the "pl14+" style ethernet
+ drivers. It should eventually replace most of drivers/net/Space.c.
+ It's primary advantage is that it's able to allocate low-memory buffers.
+ A secondary advantage is that the dangerous NE*000 netcards can reserve
+ their I/O port region before the SCSI probes start.
+
+ Modifications/additions by Bjorn Ekwall <bj0rn@blox.se>:
+ ethdev_index[MAX_ETH_CARDS]
+ register_netdev() / unregister_netdev()
+
+ Modifications by Wolfgang Walter
+ Use dev_close cleanly so we always shut things down tidily.
+
+ Changed 29/10/95, Alan Cox to pass sockaddr's around for mac addresses.
+*/
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/malloc.h>
+#include <linux/if_ether.h>
+#include <linux/if_arp.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/trdevice.h>
+#ifdef CONFIG_NET_ALIAS
+#include <linux/net_alias.h>
+#endif
+
+/* The network devices currently exist only in the socket namespace, so these
+ entries are unused. The only ones that make sense are
+ open start the ethercard
+ close stop the ethercard
+ ioctl To get statistics, perhaps set the interface port (AUI, BNC, etc.)
+ One can also imagine getting raw packets using
+ read & write
+ but this is probably better handled by a raw packet socket.
+
+ Given that almost all of these functions are handled in the current
+ socket-based scheme, putting ethercard devices in /dev/ seems pointless.
+
+ [Removed all support for /dev network devices. When someone adds
+ streams then by magic we get them, but otherwise they are un-needed
+ and a space waste]
+*/
+
+/* The list of used and available "eth" slots (for "eth0", "eth1", etc.) */
+#define MAX_ETH_CARDS 16 /* same as the number if irq's in irq2dev[] */
+static struct device *ethdev_index[MAX_ETH_CARDS];
+
+/* Fill in the fields of the device structure with ethernet-generic values.
+
+ If no device structure is passed, a new one is constructed, complete with
+ a SIZEOF_PRIVATE private data area.
+
+ If an empty string area is passed as dev->name, or a new structure is made,
+ a new name string is constructed. The passed string area should be 8 bytes
+ long.
+ */
+
+struct device *
+init_etherdev(struct device *dev, int sizeof_priv)
+{
+ int new_device = 0;
+ int i;
+
+ /* Use an existing correctly named device in Space.c:dev_base. */
+ if (dev == NULL) {
+ int alloc_size = sizeof(struct device) + sizeof("eth%d ")
+ + sizeof_priv + 3;
+ struct device *cur_dev;
+ char pname[8]; /* Putative name for the device. */
+
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ if (ethdev_index[i] == NULL) {
+ sprintf(pname, "eth%d", i);
+ for (cur_dev = dev_base; cur_dev; cur_dev = cur_dev->next)
+ if (strcmp(pname, cur_dev->name) == 0) {
+ dev = cur_dev;
+ dev->init = NULL;
+ sizeof_priv = (sizeof_priv + 3) & ~3;
+ dev->priv = sizeof_priv
+ ? kmalloc(sizeof_priv, GFP_KERNEL)
+ : NULL;
+ if (dev->priv) memset(dev->priv, 0, sizeof_priv);
+ goto found;
+ }
+ }
+
+ alloc_size &= ~3; /* Round to dword boundary. */
+
+ dev = (struct device *)kmalloc(alloc_size, GFP_KERNEL);
+ memset(dev, 0, alloc_size);
+ if (sizeof_priv)
+ dev->priv = (void *) (dev + 1);
+ dev->name = sizeof_priv + (char *)(dev + 1);
+ new_device = 1;
+ }
+
+ found: /* From the double loop above. */
+
+ if (dev->name &&
+ ((dev->name[0] == '\0') || (dev->name[0] == ' '))) {
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ if (ethdev_index[i] == NULL) {
+ sprintf(dev->name, "eth%d", i);
+ ethdev_index[i] = dev;
+ break;
+ }
+ }
+
+ ether_setup(dev); /* Hmmm, should this be called here? */
+
+ if (new_device) {
+ /* Append the device to the device queue. */
+ struct device **old_devp = &dev_base;
+ while ((*old_devp)->next)
+ old_devp = & (*old_devp)->next;
+ (*old_devp)->next = dev;
+ dev->next = 0;
+ }
+ return dev;
+}
+
+
+static int eth_mac_addr(struct device *dev, void *p)
+{
+ struct sockaddr *addr=p;
+ if(dev->start)
+ return -EBUSY;
+ memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
+ return 0;
+}
+
+void ether_setup(struct device *dev)
+{
+ int i;
+
+ /* Fill in the fields of the device structure with ethernet-generic values.
+ This should be in a common file instead of per-driver. */
+ for (i = 0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ /* register boot-defined "eth" devices */
+ if (dev->name && (strncmp(dev->name, "eth", 3) == 0)) {
+ i = simple_strtoul(dev->name + 3, NULL, 0);
+ if (ethdev_index[i] == NULL) {
+ ethdev_index[i] = dev;
+ }
+ else if (dev != ethdev_index[i]) {
+ /* Really shouldn't happen! */
+#ifdef MACH
+ panic ("ether_setup: Ouch! Someone else took %s, i = %d\n",
+ dev->name, i);
+#else
+ printk("ether_setup: Ouch! Someone else took %s, i = %d\n",
+ dev->name, i);
+#endif
+ }
+ }
+
+#ifndef MACH
+ dev->hard_header = eth_header;
+ dev->rebuild_header = eth_rebuild_header;
+ dev->set_mac_address = eth_mac_addr;
+ dev->header_cache_bind = eth_header_cache_bind;
+ dev->header_cache_update= eth_header_cache_update;
+#endif
+
+ dev->type = ARPHRD_ETHER;
+ dev->hard_header_len = ETH_HLEN;
+ dev->mtu = 1500; /* eth_mtu */
+ dev->addr_len = ETH_ALEN;
+ dev->tx_queue_len = 100; /* Ethernet wants good queues */
+
+ memset(dev->broadcast,0xFF, ETH_ALEN);
+
+ /* New-style flags. */
+ dev->flags = IFF_BROADCAST|IFF_MULTICAST;
+ dev->family = AF_INET;
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+}
+
+#ifdef CONFIG_TR
+
+void tr_setup(struct device *dev)
+{
+ int i;
+ /* Fill in the fields of the device structure with ethernet-generic values.
+ This should be in a common file instead of per-driver. */
+ for (i = 0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ dev->hard_header = tr_header;
+ dev->rebuild_header = tr_rebuild_header;
+
+ dev->type = ARPHRD_IEEE802;
+ dev->hard_header_len = TR_HLEN;
+ dev->mtu = 2000; /* bug in fragmenter...*/
+ dev->addr_len = TR_ALEN;
+ dev->tx_queue_len = 100; /* Long queues on tr */
+
+ memset(dev->broadcast,0xFF, TR_ALEN);
+
+ /* New-style flags. */
+ dev->flags = IFF_BROADCAST;
+ dev->family = AF_INET;
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+}
+
+#endif
+
+int ether_config(struct device *dev, struct ifmap *map)
+{
+ if (map->mem_start != (u_long)(-1))
+ dev->mem_start = map->mem_start;
+ if (map->mem_end != (u_long)(-1))
+ dev->mem_end = map->mem_end;
+ if (map->base_addr != (u_short)(-1))
+ dev->base_addr = map->base_addr;
+ if (map->irq != (u_char)(-1))
+ dev->irq = map->irq;
+ if (map->dma != (u_char)(-1))
+ dev->dma = map->dma;
+ if (map->port != (u_char)(-1))
+ dev->if_port = map->port;
+ return 0;
+}
+
+int register_netdev(struct device *dev)
+{
+ struct device *d = dev_base;
+ unsigned long flags;
+ int i=MAX_ETH_CARDS;
+
+ save_flags(flags);
+ cli();
+
+ if (dev && dev->init) {
+ if (dev->name &&
+ ((dev->name[0] == '\0') || (dev->name[0] == ' '))) {
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ if (ethdev_index[i] == NULL) {
+ sprintf(dev->name, "eth%d", i);
+ printk("loading device '%s'...\n", dev->name);
+ ethdev_index[i] = dev;
+ break;
+ }
+ }
+
+ sti(); /* device probes assume interrupts enabled */
+ if (dev->init(dev) != 0) {
+ if (i < MAX_ETH_CARDS) ethdev_index[i] = NULL;
+ restore_flags(flags);
+ return -EIO;
+ }
+ cli();
+
+ /* Add device to end of chain */
+ if (dev_base) {
+ while (d->next)
+ d = d->next;
+ d->next = dev;
+ }
+ else
+ dev_base = dev;
+ dev->next = NULL;
+ }
+ restore_flags(flags);
+ return 0;
+}
+
+void unregister_netdev(struct device *dev)
+{
+ struct device *d = dev_base;
+ unsigned long flags;
+ int i;
+
+ save_flags(flags);
+ cli();
+
+ if (dev == NULL)
+ {
+ printk("was NULL\n");
+ restore_flags(flags);
+ return;
+ }
+ /* else */
+ if (dev->start)
+ printk("ERROR '%s' busy and not MOD_IN_USE.\n", dev->name);
+
+ /*
+ * must jump over main_device+aliases
+ * avoid alias devices unregistration so that only
+ * net_alias module manages them
+ */
+#ifdef CONFIG_NET_ALIAS
+ if (dev_base == dev)
+ dev_base = net_alias_nextdev(dev);
+ else
+ {
+ while(d && (net_alias_nextdev(d) != dev)) /* skip aliases */
+ d = net_alias_nextdev(d);
+
+ if (d && (net_alias_nextdev(d) == dev))
+ {
+ /*
+ * Critical: Bypass by consider devices as blocks (maindev+aliases)
+ */
+ net_alias_nextdev_set(d, net_alias_nextdev(dev));
+ }
+#else
+ if (dev_base == dev)
+ dev_base = dev->next;
+ else
+ {
+ while (d && (d->next != dev))
+ d = d->next;
+
+ if (d && (d->next == dev))
+ {
+ d->next = dev->next;
+ }
+#endif
+ else
+ {
+ printk("unregister_netdev: '%s' not found\n", dev->name);
+ restore_flags(flags);
+ return;
+ }
+ }
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ {
+ if (ethdev_index[i] == dev)
+ {
+ ethdev_index[i] = NULL;
+ break;
+ }
+ }
+
+ restore_flags(flags);
+
+ /*
+ * You can i.e use a interfaces in a route though it is not up.
+ * We call close_dev (which is changed: it will down a device even if
+ * dev->flags==0 (but it will not call dev->stop if IFF_UP
+ * is not set).
+ * This will call notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev),
+ * dev_mc_discard(dev), ....
+ */
+
+ dev_close(dev);
+}
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c net_init.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/ni52.c b/i386/i386at/gpl/linux/net/ni52.c
new file mode 100644
index 00000000..13d12359
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/ni52.c
@@ -0,0 +1,1110 @@
+/*
+ * net-3-driver for the NI5210 card (i82586 Ethernet chip)
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same Gnu Public License that covers that work.
+ *
+ * Alphacode 0.62 (95/01/19) for Linux 1.1.82 (or later)
+ * Copyrights (c) 1994,1995 by M.Hipp (Michael.Hipp@student.uni-tuebingen.de)
+ * [feel free to mail ....]
+ *
+ * CAN YOU PLEASE REPORT ME YOUR PERFORMANCE EXPERIENCES !!.
+ *
+ * If you find a bug, please report me:
+ * The kernel panic output and any kmsg from the ni52 driver
+ * the ni5210-driver-version and the linux-kernel version
+ * how many shared memory (memsize) on the netcard,
+ * bootprom: yes/no, base_addr, mem_start
+ * maybe the ni5210-card revision and the i82586 version
+ *
+ * autoprobe for: base_addr: 0x300,0x280,0x360,0x320,0x340
+ * mem_start: 0xc8000,0xd0000,0xd4000,0xd8000 (8K and 16K)
+ *
+ * sources:
+ * skeleton.c from Donald Becker
+ *
+ * I have also done a look in the following sources: (mail me if you need them)
+ * crynwr-packet-driver by Russ Nelson
+ * Garret A. Wollman's (fourth) i82586-driver for BSD
+ * (before getting an i82596 (yes 596 not 586) manual, the existing drivers helped
+ * me a lot to understand this tricky chip.)
+ *
+ * Known Problems:
+ * The internal sysbus seems to be slow. So we often lose packets because of
+ * overruns while receiving from a fast remote host.
+ * This can slow down TCP connections. Maybe the newer ni5210 cards are better.
+ *
+ * IMPORTANT NOTE:
+ * On fast networks, it's a (very) good idea to have 16K shared memory. With
+ * 8K, we can store only 4 receive frames, so it can (easily) happen that a remote
+ * machine 'overruns' our system.
+ *
+ * Known i82586 bugs (I'm sure, there are many more!):
+ * Running the NOP-mode, the i82586 sometimes seems to forget to report
+ * every xmit-interrupt until we restart the CU.
+ * Another MAJOR bug is, that the RU sometimes seems to ignore the EL-Bit
+ * in the RBD-Struct which indicates an end of the RBD queue.
+ * Instead, the RU fetches another (randomly selected and
+ * usually used) RBD and begins to fill it. (Maybe, this happens only if
+ * the last buffer from the previous RFD fits exact into the queue and
+ * the next RFD can't fetch an initial RBD. Anyone knows more? )
+ */
+
+/*
+ * 18.Nov.95: Mcast changes (AC).
+ *
+ * 19.Jan.95: verified (MH)
+ *
+ * 19.Sep.94: Added Multicast support (not tested yet) (MH)
+ *
+ * 18.Sep.94: Workaround for 'EL-Bug'. Removed flexible RBD-handling.
+ * Now, every RFD has exact one RBD. (MH)
+ *
+ * 14.Sep.94: added promiscuous mode, a few cleanups (MH)
+ *
+ * 19.Aug.94: changed request_irq() parameter (MH)
+ *
+ * 20.July.94: removed cleanup bugs, removed a 16K-mem-probe-bug (MH)
+ *
+ * 19.July.94: lotsa cleanups .. (MH)
+ *
+ * 17.July.94: some patches ... verified to run with 1.1.29 (MH)
+ *
+ * 4.July.94: patches for Linux 1.1.24 (MH)
+ *
+ * 26.March.94: patches for Linux 1.0 and iomem-auto-probe (MH)
+ *
+ * 30.Sep.93: Added nop-chain .. driver now runs with only one Xmit-Buff, too (MH)
+ *
+ * < 30.Sep.93: first versions
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "ni52.h"
+
+#define DEBUG /* debug on */
+#define SYSBUSVAL 1 /* 8 Bit */
+
+#define ni_attn586() {outb(0,dev->base_addr+NI52_ATTENTION);}
+#define ni_reset586() {outb(0,dev->base_addr+NI52_RESET);}
+
+#define make32(ptr16) (p->memtop + (short) (ptr16) )
+#define make24(ptr32) ((char *) (ptr32) - p->base)
+#define make16(ptr32) ((unsigned short) ((unsigned long) (ptr32) - (unsigned long) p->memtop ))
+
+/******************* how to calculate the buffers *****************************
+
+ * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works
+ * --------------- in a different (more stable?) mode. Only in this mode it's
+ * possible to configure the driver with 'NO_NOPCOMMANDS'
+
+sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8;
+sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT
+sizeof(rfd) = 24; sizeof(rbd) = 12;
+sizeof(tbd) = 8; sizeof(transmit_cmd) = 16;
+sizeof(nop_cmd) = 8;
+
+ * if you don't know the driver, better do not change this values: */
+
+#define RECV_BUFF_SIZE 1524 /* slightly oversized */
+#define XMIT_BUFF_SIZE 1524 /* slightly oversized */
+#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */
+#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */
+#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */
+#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */
+
+/**************************************************************************/
+
+#define DELAY(x) {int i=jiffies; \
+ if(loops_per_sec == 1) \
+ while(i+(x)>jiffies); \
+ else \
+ __delay((loops_per_sec>>5)*x); \
+ }
+
+/* a much shorter delay: */
+#define DELAY_16(); { __delay( (loops_per_sec>>16)+1 ); }
+
+/* wait for command with timeout: */
+#define WAIT_4_SCB_CMD() { int i; \
+ for(i=0;i<1024;i++) { \
+ if(!p->scb->cmd) break; \
+ DELAY_16(); \
+ if(i == 1023) { \
+ printk("%s: scb_cmd timed out .. resetting i82586\n",dev->name); \
+ ni_reset586(); } } }
+
+
+#define NI52_TOTAL_SIZE 16
+#define NI52_ADDR0 0x02
+#define NI52_ADDR1 0x07
+#define NI52_ADDR2 0x01
+
+#ifndef HAVE_PORTRESERVE
+#define check_region(ioaddr, size) 0
+#define request_region(ioaddr, size,name) do ; while (0)
+#endif
+
+static int ni52_probe1(struct device *dev,int ioaddr);
+static void ni52_interrupt(int irq,struct pt_regs *reg_ptr);
+static int ni52_open(struct device *dev);
+static int ni52_close(struct device *dev);
+static int ni52_send_packet(struct sk_buff *,struct device *);
+static struct enet_statistics *ni52_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+/* helper-functions */
+static int init586(struct device *dev);
+static int check586(struct device *dev,char *where,unsigned size);
+static void alloc586(struct device *dev);
+static void startrecv586(struct device *dev);
+static void *alloc_rfa(struct device *dev,void *ptr);
+static void ni52_rcv_int(struct device *dev);
+static void ni52_xmt_int(struct device *dev);
+static void ni52_rnr_int(struct device *dev);
+
+struct priv
+{
+ struct enet_statistics stats;
+ unsigned long base;
+ char *memtop;
+ volatile struct rfd_struct *rfd_last,*rfd_top,*rfd_first;
+ volatile struct scp_struct *scp; /* volatile is important */
+ volatile struct iscp_struct *iscp; /* volatile is important */
+ volatile struct scb_struct *scb; /* volatile is important */
+ volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS];
+ volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS];
+#if (NUM_XMIT_BUFFS == 1)
+ volatile struct nop_cmd_struct *nop_cmds[2];
+#else
+ volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS];
+#endif
+ volatile int nop_point,num_recv_buffs;
+ volatile char *xmit_cbuffs[NUM_XMIT_BUFFS];
+ volatile int xmit_count,xmit_last;
+};
+
+
+/**********************************************
+ * close device
+ */
+
+static int ni52_close(struct device *dev)
+{
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = 0;
+
+ ni_reset586(); /* the hard way to stop the receiver */
+
+ dev->start = 0;
+ dev->tbusy = 0;
+
+ return 0;
+}
+
+/**********************************************
+ * open device
+ */
+
+static int ni52_open(struct device *dev)
+{
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+
+ if(request_irq(dev->irq, &ni52_interrupt,0,"ni52"))
+ {
+ ni_reset586();
+ return -EAGAIN;
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ dev->interrupt = 0;
+ dev->tbusy = 0;
+ dev->start = 1;
+
+ return 0; /* most done by init */
+}
+
+/**********************************************
+ * Check to see if there's an 82586 out there.
+ */
+
+static int check586(struct device *dev,char *where,unsigned size)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ char *iscp_addrs[2];
+ int i;
+
+ p->base = (unsigned long) where + size - 0x01000000;
+ p->memtop = where + size;
+ p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS);
+ memset((char *)p->scp,0, sizeof(struct scp_struct));
+ p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */
+
+ iscp_addrs[0] = where;
+ iscp_addrs[1]= (char *) p->scp - sizeof(struct iscp_struct);
+
+ for(i=0;i<2;i++)
+ {
+ p->iscp = (struct iscp_struct *) iscp_addrs[i];
+ memset((char *)p->iscp,0, sizeof(struct iscp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->iscp->busy = 1;
+
+ ni_reset586();
+ ni_attn586();
+ DELAY(2); /* wait a while... */
+
+ if(p->iscp->busy) /* i82586 clears 'busy' after successful init */
+ return 0;
+ }
+ return 1;
+}
+
+/******************************************************************
+ * set iscp at the right place, called by ni52_probe1 and open586.
+ */
+
+void alloc586(struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ ni_reset586();
+ DELAY(2);
+
+ p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS);
+ p->scb = (struct scb_struct *) (dev->mem_start);
+ p->iscp = (struct iscp_struct *) ((char *)p->scp - sizeof(struct iscp_struct));
+
+ memset((char *) p->iscp,0,sizeof(struct iscp_struct));
+ memset((char *) p->scp ,0,sizeof(struct scp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->scp->sysbus = SYSBUSVAL;
+ p->iscp->scb_offset = make16(p->scb);
+
+ p->iscp->busy = 1;
+ ni_reset586();
+ ni_attn586();
+
+ DELAY(2);
+
+ if(p->iscp->busy)
+ printk("%s: Init-Problems (alloc).\n",dev->name);
+
+ memset((char *)p->scb,0,sizeof(struct scb_struct));
+}
+
+/**********************************************
+ * probe the ni5210-card
+ */
+
+int ni52_probe(struct device *dev)
+{
+ int *port, ports[] = {0x300, 0x280, 0x360 , 0x320 , 0x340, 0};
+ int base_addr = dev->base_addr;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ if( (inb(base_addr+NI52_MAGIC1) == NI52_MAGICVAL1) &&
+ (inb(base_addr+NI52_MAGIC2) == NI52_MAGICVAL2))
+ return ni52_probe1(dev, base_addr);
+ else if (base_addr > 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (port = ports; *port; port++) {
+ int ioaddr = *port;
+ if (check_region(ioaddr, NI52_TOTAL_SIZE))
+ continue;
+ if( !(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) ||
+ !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2))
+ continue;
+
+ dev->base_addr = ioaddr;
+ if (ni52_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ dev->base_addr = base_addr;
+ return ENODEV;
+}
+
+static int ni52_probe1(struct device *dev,int ioaddr)
+{
+ long memaddrs[] = { 0xd0000,0xd2000,0xc8000,0xca000,0xd4000,0xd6000,0xd8000, 0 };
+ int i,size;
+
+ for(i=0;i<ETH_ALEN;i++)
+ dev->dev_addr[i] = inb(dev->base_addr+i);
+
+ if(dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1
+ || dev->dev_addr[2] != NI52_ADDR2)
+ return ENODEV;
+
+ printk("%s: Ni52 found at %#3lx, ",dev->name,dev->base_addr);
+
+ request_region(ioaddr,NI52_TOTAL_SIZE,"ni52");
+
+ dev->priv = (void *) kmalloc(sizeof(struct priv),GFP_KERNEL);
+ /* warning: we don't free it on errors */
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset((char *) dev->priv,0,sizeof(struct priv));
+
+ /*
+ * check (or search) IO-Memory, 8K and 16K
+ */
+ if(dev->mem_start != 0) /* no auto-mem-probe */
+ {
+ size = 0x4000; /* check for 16K mem */
+ if(!check586(dev,(char *) dev->mem_start,size)) {
+ size = 0x2000; /* check for 8K mem */
+ if(!check586(dev,(char *) dev->mem_start,size)) {
+ printk("?memprobe, Can't find memory at 0x%lx!\n",dev->mem_start);
+ return ENODEV;
+ }
+ }
+ }
+ else
+ {
+ for(i=0;;i++)
+ {
+ if(!memaddrs[i]) {
+ printk("?memprobe, Can't find io-memory!\n");
+ return ENODEV;
+ }
+ dev->mem_start = memaddrs[i];
+ size = 0x2000; /* check for 8K mem */
+ if(check586(dev,(char *)dev->mem_start,size)) /* 8K-check */
+ break;
+ size = 0x4000; /* check for 16K mem */
+ if(check586(dev,(char *)dev->mem_start,size)) /* 16K-check */
+ break;
+ }
+ }
+ dev->mem_end = dev->mem_start + size; /* set mem_end showed by 'ifconfig' */
+
+ ((struct priv *) (dev->priv))->base = dev->mem_start + size - 0x01000000;
+ alloc586(dev);
+
+ /* set number of receive-buffs according to memsize */
+ if(size == 0x2000)
+ ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_8;
+ else
+ ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_16;
+
+ printk("Memaddr: 0x%lx, Memsize: %d, ",dev->mem_start,size);
+
+ if(dev->irq < 2)
+ {
+ autoirq_setup(0);
+ ni_reset586();
+ ni_attn586();
+ if(!(dev->irq = autoirq_report(2)))
+ {
+ printk("?autoirq, Failed to detect IRQ line!\n");
+ return 1;
+ }
+ }
+ else if(dev->irq == 2)
+ dev->irq = 9;
+
+ printk("IRQ %d.\n",dev->irq);
+
+ dev->open = &ni52_open;
+ dev->stop = &ni52_close;
+ dev->get_stats = &ni52_get_stats;
+ dev->hard_start_xmit = &ni52_send_packet;
+ dev->set_multicast_list = &set_multicast_list;
+
+ dev->if_port = 0;
+
+ ether_setup(dev);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 0;
+
+ return 0;
+}
+
+/**********************************************
+ * init the chip (ni52-interrupt should be disabled?!)
+ * needs a correct 'allocated' memory
+ */
+
+static int init586(struct device *dev)
+{
+ void *ptr;
+ unsigned long s;
+ int i,result=0;
+ struct priv *p = (struct priv *) dev->priv;
+ volatile struct configure_cmd_struct *cfg_cmd;
+ volatile struct iasetup_cmd_struct *ias_cmd;
+ volatile struct tdr_cmd_struct *tdr_cmd;
+ volatile struct mcsetup_cmd_struct *mc_cmd;
+ struct dev_mc_list *dmi=dev->mc_list;
+ int num_addrs=dev->mc_count;
+
+ ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
+
+ cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */
+ cfg_cmd->cmd_status = 0;
+ cfg_cmd->cmd_cmd = CMD_CONFIGURE | CMD_LAST;
+ cfg_cmd->cmd_link = 0xffff;
+
+ cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */
+ cfg_cmd->fifo = 0x08; /* fifo-limit (8=tx:32/rx:64) */
+ cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */
+ cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */
+ cfg_cmd->priority = 0x00;
+ cfg_cmd->ifs = 0x60;
+ cfg_cmd->time_low = 0x00;
+ cfg_cmd->time_high = 0xf2;
+ cfg_cmd->promisc = 0;
+ if(dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ {
+ cfg_cmd->promisc=1;
+ dev->flags|=IFF_PROMISC;
+ }
+ cfg_cmd->carr_coll = 0x00;
+
+ p->scb->cbl_offset = make16(cfg_cmd);
+
+ p->scb->cmd = CUC_START; /* cmd.-unit start */
+ ni_attn586();
+
+ s = jiffies; /* warning: only active with interrupts on !! */
+ while(!(cfg_cmd->cmd_status & STAT_COMPL))
+ if(jiffies-s > 30) break;
+
+ if((cfg_cmd->cmd_status & (STAT_OK|STAT_COMPL)) != (STAT_COMPL|STAT_OK))
+ {
+ printk("%s (ni52): configure command failed: %x\n",dev->name,cfg_cmd->cmd_status);
+ return 1;
+ }
+
+ /*
+ * individual address setup
+ */
+ ias_cmd = (struct iasetup_cmd_struct *)ptr;
+
+ ias_cmd->cmd_status = 0;
+ ias_cmd->cmd_cmd = CMD_IASETUP | CMD_LAST;
+ ias_cmd->cmd_link = 0xffff;
+
+ memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN);
+
+ p->scb->cbl_offset = make16(ias_cmd);
+
+ p->scb->cmd = CUC_START; /* cmd.-unit start */
+ ni_attn586();
+
+ s = jiffies;
+ while(!(ias_cmd->cmd_status & STAT_COMPL))
+ if(jiffies-s > 30) break;
+
+ if((ias_cmd->cmd_status & (STAT_OK|STAT_COMPL)) != (STAT_OK|STAT_COMPL)) {
+ printk("%s (ni52): individual address setup command failed: %04x\n",dev->name,ias_cmd->cmd_status);
+ return 1;
+ }
+
+ /*
+ * TDR, wire check .. e.g. no resistor e.t.c
+ */
+ tdr_cmd = (struct tdr_cmd_struct *)ptr;
+
+ tdr_cmd->cmd_status = 0;
+ tdr_cmd->cmd_cmd = CMD_TDR | CMD_LAST;
+ tdr_cmd->cmd_link = 0xffff;
+ tdr_cmd->status = 0;
+
+ p->scb->cbl_offset = make16(tdr_cmd);
+
+ p->scb->cmd = CUC_START; /* cmd.-unit start */
+ ni_attn586();
+
+ s = jiffies;
+ while(!(tdr_cmd->cmd_status & STAT_COMPL))
+ if(jiffies - s > 30) {
+ printk("%s: Problems while running the TDR.\n",dev->name);
+ result = 1;
+ }
+
+ if(!result)
+ {
+ DELAY(2); /* wait for result */
+ result = tdr_cmd->status;
+
+ p->scb->cmd = p->scb->status & STAT_MASK;
+ ni_attn586(); /* ack the interrupts */
+
+ if(result & TDR_LNK_OK) ;
+ else if(result & TDR_XCVR_PRB)
+ printk("%s: TDR: Transceiver problem!\n",dev->name);
+ else if(result & TDR_ET_OPN)
+ printk("%s: TDR: No correct termination %d clocks away.\n",dev->name,result & TDR_TIMEMASK);
+ else if(result & TDR_ET_SRT)
+ {
+ if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */
+ printk("%s: TDR: Detected a short circuit %d clocks away.\n",dev->name,result & TDR_TIMEMASK);
+ }
+ else
+ printk("%s: TDR: Unknown status %04x\n",dev->name,result);
+ }
+
+ /*
+ * ack interrupts
+ */
+ p->scb->cmd = p->scb->status & STAT_MASK;
+ ni_attn586();
+
+ /*
+ * alloc nop/xmit-cmds
+ */
+#if (NUM_XMIT_BUFFS == 1)
+ for(i=0;i<2;i++)
+ {
+ p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
+ p->nop_cmds[i]->cmd_cmd = CMD_NOP;
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ }
+ p->xmit_cmds[0] = (struct transmit_cmd_struct *)ptr; /* transmit cmd/buff 0 */
+ ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
+#else
+ for(i=0;i<NUM_XMIT_BUFFS;i++)
+ {
+ p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
+ p->nop_cmds[i]->cmd_cmd = CMD_NOP;
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr; /*transmit cmd/buff 0*/
+ ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
+ }
+#endif
+
+ ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */
+
+ /*
+ * Multicast setup
+ */
+
+ if(dev->mc_count)
+ { /* I don't understand this: do we really need memory after the init? */
+ int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
+ if(len <= 0)
+ {
+ printk("%s: Ooooops, no memory for MC-Setup!\n",dev->name);
+ }
+ else
+ {
+ if(len < num_addrs)
+ {
+ /* BUG - should go ALLMULTI in this case */
+ num_addrs = len;
+ printk("%s: Sorry, can only apply %d MC-Address(es).\n",dev->name,num_addrs);
+ }
+ mc_cmd = (struct mcsetup_cmd_struct *) ptr;
+ mc_cmd->cmd_status = 0;
+ mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST;
+ mc_cmd->cmd_link = 0xffff;
+ mc_cmd->mc_cnt = num_addrs * 6;
+ for(i=0;i<num_addrs;i++)
+ {
+ memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr,6);
+ dmi=dmi->next;
+ }
+ p->scb->cbl_offset = make16(mc_cmd);
+ p->scb->cmd = CUC_START;
+ ni_attn586();
+ s = jiffies;
+ while(!(mc_cmd->cmd_status & STAT_COMPL))
+ if(jiffies - s > 30)
+ break;
+ if(!(mc_cmd->cmd_status & STAT_COMPL))
+ printk("%s: Can't apply multicast-address-list.\n",dev->name);
+ }
+ }
+
+ /*
+ * alloc xmit-buffs / init xmit_cmds
+ */
+ for(i=0;i<NUM_XMIT_BUFFS;i++)
+ {
+ p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */
+ ptr = (char *) ptr + XMIT_BUFF_SIZE;
+ p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
+ ptr = (char *) ptr + sizeof(struct tbd_struct);
+ if((void *)ptr > (void *)p->iscp)
+ {
+ printk("%s: not enough shared-mem for your configuration!\n",dev->name);
+ return 1;
+ }
+ memset((char *)(p->xmit_cmds[i]) ,0, sizeof(struct transmit_cmd_struct));
+ memset((char *)(p->xmit_buffs[i]),0, sizeof(struct tbd_struct));
+ p->xmit_cmds[i]->cmd_status = STAT_COMPL;
+ p->xmit_cmds[i]->cmd_cmd = CMD_XMIT | CMD_INT;
+ p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i]));
+ p->xmit_buffs[i]->next = 0xffff;
+ p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i]));
+ }
+
+ p->xmit_count = 0;
+ p->xmit_last = 0;
+#ifndef NO_NOPCOMMANDS
+ p->nop_point = 0;
+#endif
+
+ /*
+ * 'start transmitter' (nop-loop)
+ */
+#ifndef NO_NOPCOMMANDS
+ p->scb->cbl_offset = make16(p->nop_cmds[0]);
+ p->scb->cmd = CUC_START;
+ ni_attn586();
+ WAIT_4_SCB_CMD();
+#else
+ p->xmit_cmds[0]->cmd_link = 0xffff;
+ p->xmit_cmds[0]->cmd_cmd = CMD_XMIT | CMD_LAST | CMD_INT;
+#endif
+
+ return 0;
+}
+
+/******************************************************
+ * This is a helper routine for ni52_rnr_int() and init586().
+ * It sets up the Receive Frame Area (RFA).
+ */
+
+static void *alloc_rfa(struct device *dev,void *ptr)
+{
+ volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr;
+ volatile struct rbd_struct *rbd;
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+
+ memset((char *) rfd,0,sizeof(struct rfd_struct)*p->num_recv_buffs);
+ p->rfd_first = rfd;
+
+ for(i = 0; i < p->num_recv_buffs; i++)
+ rfd[i].next = make16(rfd + (i+1) % p->num_recv_buffs);
+ rfd[p->num_recv_buffs-1].last = RFD_SUSP; /* RU suspend */
+
+ ptr = (void *) (rfd + p->num_recv_buffs);
+
+ rbd = (struct rbd_struct *) ptr;
+ ptr = (void *) (rbd + p->num_recv_buffs);
+
+ /* clr descriptors */
+ memset((char *) rbd,0,sizeof(struct rbd_struct)*p->num_recv_buffs);
+
+ for(i=0;i<p->num_recv_buffs;i++)
+ {
+ rbd[i].next = make16((rbd + (i+1) % p->num_recv_buffs));
+ rbd[i].size = RECV_BUFF_SIZE;
+ rbd[i].buffer = make24(ptr);
+ ptr = (char *) ptr + RECV_BUFF_SIZE;
+ }
+
+ p->rfd_top = p->rfd_first;
+ p->rfd_last = p->rfd_first + p->num_recv_buffs - 1;
+
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->rfd_first->rbd_offset = make16(rbd);
+
+ return ptr;
+}
+
+
+/**************************************************
+ * Interrupt Handler ...
+ */
+
+static void ni52_interrupt(int irq,struct pt_regs *reg_ptr)
+{
+ struct device *dev = (struct device *) irq2dev_map[irq];
+ unsigned short stat;
+ struct priv *p;
+
+ if (dev == NULL) {
+ printk ("ni52-interrupt: irq %d for unknown device.\n",(int) -(((struct pt_regs *)reg_ptr)->orig_eax+2));
+ return;
+ }
+ p = (struct priv *) dev->priv;
+
+ dev->interrupt = 1;
+
+ while((stat=p->scb->status & STAT_MASK))
+ {
+ p->scb->cmd = stat;
+ ni_attn586(); /* ack inter. */
+
+ if(stat & STAT_CX) /* command with I-bit set complete */
+ ni52_xmt_int(dev);
+
+ if(stat & STAT_FR) /* received a frame */
+ ni52_rcv_int(dev);
+
+#ifndef NO_NOPCOMMANDS
+ if(stat & STAT_CNA) /* CU went 'not ready' */
+ {
+ if(dev->start)
+ printk("%s: oops! CU has left active state. stat: %04x/%04x.\n",dev->name,(int) stat,(int) p->scb->status);
+ }
+#endif
+
+ if(stat & STAT_RNR) /* RU went 'not ready' */
+ {
+ if(p->scb->status & RU_SUSPEND) /* special case: RU_SUSPEND */
+ {
+ WAIT_4_SCB_CMD();
+ p->scb->cmd = RUC_RESUME;
+ ni_attn586();
+ }
+ else
+ {
+ printk("%s: Receiver-Unit went 'NOT READY': %04x/%04x.\n",dev->name,(int) stat,(int) p->scb->status);
+ ni52_rnr_int(dev);
+ }
+ }
+ WAIT_4_SCB_CMD(); /* wait for ack. (ni52_xmt_int can be faster than ack!!) */
+ if(p->scb->cmd) /* timed out? */
+ break;
+ }
+
+ dev->interrupt = 0;
+}
+
+/*******************************************************
+ * receive-interrupt
+ */
+
+static void ni52_rcv_int(struct device *dev)
+{
+ int status;
+ unsigned short totlen;
+ struct sk_buff *skb;
+ struct rbd_struct *rbd;
+ struct priv *p = (struct priv *) dev->priv;
+
+ for(;(status = p->rfd_top->status) & STAT_COMPL;)
+ {
+ rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
+
+ if(status & STAT_OK) /* frame received without error? */
+ {
+ if( (totlen = rbd->status) & RBD_LAST) /* the first and the last buffer? */
+ {
+ totlen &= RBD_MASK; /* length of this frame */
+ rbd->status = 0;
+ skb = (struct sk_buff *) dev_alloc_skb(totlen+2);
+ if(skb != NULL)
+ {
+ skb->dev = dev;
+ skb_reserve(skb,2); /* 16 byte alignment */
+ memcpy(skb_put(skb,totlen),(char *) p->base+(unsigned long) rbd->buffer, totlen);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ p->stats.rx_packets++;
+ }
+ else
+ p->stats.rx_dropped++;
+ }
+ else
+ {
+ printk("%s: received oversized frame.\n",dev->name);
+ p->stats.rx_dropped++;
+ }
+ }
+ else /* frame !(ok), only with 'save-bad-frames' */
+ {
+ printk("%s: oops! rfd-error-status: %04x\n",dev->name,status);
+ p->stats.rx_errors++;
+ }
+ p->rfd_top->status = 0;
+ p->rfd_top->last = RFD_SUSP;
+ p->rfd_last->last = 0; /* delete RU_SUSP */
+ p->rfd_last = p->rfd_top;
+ p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
+ }
+}
+
+/**********************************************************
+ * handle 'Receiver went not ready'.
+ */
+
+static void ni52_rnr_int(struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ p->stats.rx_errors++;
+
+ WAIT_4_SCB_CMD(); /* wait for the last cmd */
+ p->scb->cmd = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */
+ ni_attn586();
+ WAIT_4_SCB_CMD(); /* wait for accept cmd. */
+
+ alloc_rfa(dev,(char *)p->rfd_first);
+ startrecv586(dev); /* restart RU */
+
+ printk("%s: Receive-Unit restarted. Status: %04x\n",dev->name,p->scb->status);
+
+}
+
+/**********************************************************
+ * handle xmit - interrupt
+ */
+
+static void ni52_xmt_int(struct device *dev)
+{
+ int status;
+ struct priv *p = (struct priv *) dev->priv;
+
+ status = p->xmit_cmds[p->xmit_last]->cmd_status;
+ if(!(status & STAT_COMPL))
+ printk("%s: strange .. xmit-int without a 'COMPLETE'\n",dev->name);
+
+ if(status & STAT_OK)
+ {
+ p->stats.tx_packets++;
+ p->stats.collisions += (status & TCMD_MAXCOLLMASK);
+ }
+ else
+ {
+ p->stats.tx_errors++;
+ if(status & TCMD_LATECOLL) {
+ printk("%s: late collision detected.\n",dev->name);
+ p->stats.collisions++;
+ }
+ else if(status & TCMD_NOCARRIER) {
+ p->stats.tx_carrier_errors++;
+ printk("%s: no carrier detected.\n",dev->name);
+ }
+ else if(status & TCMD_LOSTCTS)
+ printk("%s: loss of CTS detected.\n",dev->name);
+ else if(status & TCMD_UNDERRUN) {
+ p->stats.tx_fifo_errors++;
+ printk("%s: DMA underrun detected.\n",dev->name);
+ }
+ else if(status & TCMD_MAXCOLL) {
+ printk("%s: Max. collisions exceeded.\n",dev->name);
+ p->stats.collisions += 16;
+ }
+ }
+
+#if (NUM_XMIT_BUFFS != 1)
+ if( (++p->xmit_last) == NUM_XMIT_BUFFS)
+ p->xmit_last = 0;
+#endif
+
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+}
+
+/***********************************************************
+ * (re)start the receiver
+ */
+
+static void startrecv586(struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->scb->cmd = RUC_START;
+ ni_attn586(); /* start cmd. */
+ WAIT_4_SCB_CMD(); /* wait for accept cmd. (no timeout!!) */
+}
+
+/******************************************************
+ * send frame
+ */
+
+static int ni52_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ int len,i;
+#ifndef NO_NOPCOMMANDS
+ int next_nop;
+#endif
+ struct priv *p = (struct priv *) dev->priv;
+
+ if(dev->tbusy)
+ {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+
+ if(p->scb->status & CU_ACTIVE) /* COMMAND-UNIT active? */
+ {
+ dev->tbusy = 0;
+#ifdef DEBUG
+ printk("%s: strange ... timeout with CU active?!?\n",dev->name);
+ printk("%s: X0: %04x N0: %04x N1: %04x %d\n",dev->name,(int)p->xmit_cmds[0]->cmd_status,(int)p->nop_cmds[0]->cmd_status,(int)p->nop_cmds[1]->cmd_status,(int)p->nop_point);
+#endif
+ p->scb->cmd = CUC_ABORT;
+ ni_attn586();
+ WAIT_4_SCB_CMD();
+ p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]);
+ p->scb->cmd = CUC_START;
+ ni_attn586();
+ WAIT_4_SCB_CMD();
+ dev->trans_start = jiffies;
+ return 0;
+ }
+ else
+ {
+#ifdef DEBUG
+ printk("%s: xmitter timed out, try to restart! stat: %04x\n",dev->name,p->scb->status);
+ printk("%s: command-stats: %04x %04x\n",dev->name,p->xmit_cmds[0]->cmd_status,p->xmit_cmds[1]->cmd_status);
+#endif
+ ni52_close(dev);
+ ni52_open(dev);
+ }
+ dev->trans_start = jiffies;
+ return 0;
+ }
+
+ if(skb == NULL)
+ {
+ dev_tint(dev);
+ return 0;
+ }
+
+ if (skb->len <= 0)
+ return 0;
+ if(skb->len > XMIT_BUFF_SIZE)
+ {
+ printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %ld bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len);
+ return 0;
+ }
+
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else
+ {
+ memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len);
+ len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
+
+#if (NUM_XMIT_BUFFS == 1)
+# ifdef NO_NOPCOMMANDS
+ p->xmit_buffs[0]->size = TBD_LAST | len;
+ for(i=0;i<16;i++)
+ {
+ p->scb->cbl_offset = make16(p->xmit_cmds[0]);
+ p->scb->cmd = CUC_START;
+ p->xmit_cmds[0]->cmd_status = 0;
+
+ ni_attn586();
+ dev->trans_start = jiffies;
+ if(!i)
+ dev_kfree_skb(skb,FREE_WRITE);
+ WAIT_4_SCB_CMD();
+ if( (p->scb->status & CU_ACTIVE)) /* test it, because CU sometimes doesn't start immediately */
+ break;
+ if(p->xmit_cmds[0]->cmd_status)
+ break;
+ if(i==15)
+ printk("%s: Can't start transmit-command.\n",dev->name);
+ }
+# else
+ next_nop = (p->nop_point + 1) & 0x1;
+ p->xmit_buffs[0]->size = TBD_LAST | len;
+
+ p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link
+ = make16((p->nop_cmds[next_nop]));
+ p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
+
+ p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
+ dev->trans_start = jiffies;
+ p->nop_point = next_nop;
+ dev_kfree_skb(skb,FREE_WRITE);
+# endif
+#else
+ p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len;
+ if( (next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS )
+ next_nop = 0;
+
+ p->xmit_cmds[p->xmit_count]->cmd_status = 0;
+ p->xmit_cmds[p->xmit_count]->cmd_link = p->nop_cmds[next_nop]->cmd_link
+ = make16((p->nop_cmds[next_nop]));
+ p->nop_cmds[next_nop]->cmd_status = 0;
+
+ p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
+ dev->trans_start = jiffies;
+ p->xmit_count = next_nop;
+
+ cli();
+ if(p->xmit_count != p->xmit_last)
+ dev->tbusy = 0;
+ sti();
+ dev_kfree_skb(skb,FREE_WRITE);
+#endif
+ }
+ return 0;
+}
+
+/*******************************************
+ * Someone wanna have the statistics
+ */
+
+static struct enet_statistics *ni52_get_stats(struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ unsigned short crc,aln,rsc,ovrn;
+
+ crc = p->scb->crc_errs; /* get error-statistic from the ni82586 */
+ p->scb->crc_errs -= crc;
+ aln = p->scb->aln_errs;
+ p->scb->aln_errs -= aln;
+ rsc = p->scb->rsc_errs;
+ p->scb->rsc_errs -= rsc;
+ ovrn = p->scb->ovrn_errs;
+ p->scb->ovrn_errs -= ovrn;
+
+ p->stats.rx_crc_errors += crc;
+ p->stats.rx_fifo_errors += ovrn;
+ p->stats.rx_frame_errors += aln;
+ p->stats.rx_dropped += rsc;
+
+ return &p->stats;
+}
+
+/********************************************************
+ * Set MC list ..
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+ if(!dev->start)
+ {
+ printk("%s: Can't apply promiscuous/multicastmode to a not running interface.\n",dev->name);
+ return;
+ }
+
+ dev->start = 0;
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ dev->start = 1;
+}
+
+/*
+ * END: linux/drivers/net/ni52.c
+ */
diff --git a/i386/i386at/gpl/linux/net/ni52.h b/i386/i386at/gpl/linux/net/ni52.h
new file mode 100644
index 00000000..23b0a0e8
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/ni52.h
@@ -0,0 +1,284 @@
+/*
+ * Intel i82586 Ethernet definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same Gnu Public License that covers that work.
+ *
+ * copyrights (c) 1994 by Michael Hipp (mhipp@student.uni-tuebingen.de)
+ *
+ * I have done a look in the following sources:
+ * crynwr-packet-driver by Russ Nelson
+ * Garret A. Wollman's i82586-driver for BSD
+ */
+
+
+#define NI52_RESET 0 /* writing to this address, resets the i82586 */
+#define NI52_ATTENTION 1 /* channel attention, kick the 586 */
+#define NI52_TENA 3 /* 2-5 possibly wrong, Xmit enable */
+#define NI52_TDIS 2 /* Xmit disable */
+#define NI52_INTENA 5 /* Interrupt enable */
+#define NI52_INTDIS 4 /* Interrupt disable */
+#define NI52_MAGIC1 6 /* dunno exact function */
+#define NI52_MAGIC2 7 /* dunno exact function */
+
+#define NI52_MAGICVAL1 0x00 /* magic-values for ni5210 card */
+#define NI52_MAGICVAL2 0x55
+
+/*
+ * where to find the System Configuration Pointer (SCP)
+ */
+#define SCP_DEFAULT_ADDRESS 0xfffff4
+
+
+/*
+ * System Configuration Pointer Struct
+ */
+
+struct scp_struct
+{
+ unsigned short zero_dum0; /* has to be zero */
+ unsigned char sysbus; /* 0=16Bit,1=8Bit */
+ unsigned char zero_dum1; /* has to be zero for 586 */
+ unsigned short zero_dum2;
+ unsigned short zero_dum3;
+ char *iscp; /* pointer to the iscp-block */
+};
+
+
+/*
+ * Intermediate System Configuration Pointer (ISCP)
+ */
+struct iscp_struct
+{
+ unsigned char busy; /* 586 clears after successful init */
+ unsigned char zero_dummy; /* hast to be zero */
+ unsigned short scb_offset; /* pointeroffset to the scb_base */
+ char *scb_base; /* base-address of all 16-bit offsets */
+};
+
+/*
+ * System Control Block (SCB)
+ */
+struct scb_struct
+{
+ unsigned short status; /* status word */
+ unsigned short cmd; /* command word */
+ unsigned short cbl_offset; /* pointeroffset, command block list */
+ unsigned short rfa_offset; /* pointeroffset, receive frame area */
+ unsigned short crc_errs; /* CRC-Error counter */
+ unsigned short aln_errs; /* allignmenterror counter */
+ unsigned short rsc_errs; /* Resourceerror counter */
+ unsigned short ovrn_errs; /* OVerrunerror counter */
+};
+
+/*
+ * possible command values for the command word
+ */
+#define RUC_MASK 0x0070 /* mask for RU commands */
+#define RUC_NOP 0x0000 /* NOP-command */
+#define RUC_START 0x0010 /* start RU */
+#define RUC_RESUME 0x0020 /* resume RU after suspend */
+#define RUC_SUSPEND 0x0030 /* suspend RU */
+#define RUC_ABORT 0x0040 /* abort receiver operation immediately */
+
+#define CUC_MASK 0x0700 /* mask for CU command */
+#define CUC_NOP 0x0000 /* NOP-command */
+#define CUC_START 0x0100 /* start execution of 1. cmd on the CBL */
+#define CUC_RESUME 0x0200 /* resume after suspend */
+#define CUC_SUSPEND 0x0300 /* Suspend CU */
+#define CUC_ABORT 0x0400 /* abort command operation immediately */
+
+#define ACK_MASK 0xf000 /* mask for ACK command */
+#define ACK_CX 0x8000 /* acknowledges STAT_CX */
+#define ACK_FR 0x4000 /* ack. STAT_FR */
+#define ACK_CNA 0x2000 /* ack. STAT_CNA */
+#define ACK_RNR 0x1000 /* ack. STAT_RNR */
+
+/*
+ * possible status values for the status word
+ */
+#define STAT_MASK 0xf000 /* mask for cause of interrupt */
+#define STAT_CX 0x8000 /* CU finished cmd with its I bit set */
+#define STAT_FR 0x4000 /* RU finished receiving a frame */
+#define STAT_CNA 0x2000 /* CU left active state */
+#define STAT_RNR 0x1000 /* RU left ready state */
+
+#define CU_STATUS 0x700 /* CU status, 0=idle */
+#define CU_SUSPEND 0x100 /* CU is suspended */
+#define CU_ACTIVE 0x200 /* CU is active */
+
+#define RU_STATUS 0x70 /* RU status, 0=idle */
+#define RU_SUSPEND 0x10 /* RU suspended */
+#define RU_NOSPACE 0x20 /* RU no resources */
+#define RU_READY 0x40 /* RU is ready */
+
+/*
+ * Receive Frame Descriptor (RFD)
+ */
+struct rfd_struct
+{
+ unsigned short status; /* status word */
+ unsigned short last; /* Bit15,Last Frame on List / Bit14,suspend */
+ unsigned short next; /* linkoffset to next RFD */
+ unsigned short rbd_offset; /* pointeroffset to RBD-buffer */
+ unsigned char dest[6]; /* ethernet-address, destination */
+ unsigned char source[6]; /* ethernet-address, source */
+ unsigned short length; /* 802.3 frame-length */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RFD_LAST 0x8000 /* last: last rfd in the list */
+#define RFD_SUSP 0x4000 /* last: suspend RU after */
+#define RFD_ERRMASK 0x0fe1 /* status: errormask */
+#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA */
+#define RFD_RNR 0x0200 /* status: receiver out of resources */
+
+/*
+ * Receive Buffer Descriptor (RBD)
+ */
+struct rbd_struct
+{
+ unsigned short status; /* status word,number of used bytes in buff */
+ unsigned short next; /* pointeroffset to next RBD */
+ char *buffer; /* receive buffer address pointer */
+ unsigned short size; /* size of this buffer */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RBD_LAST 0x8000 /* last buffer */
+#define RBD_USED 0x4000 /* this buffer has data */
+#define RBD_MASK 0x3fff /* size-mask for length */
+
+/*
+ * Statusvalues for Commands/RFD
+ */
+#define STAT_COMPL 0x8000 /* status: frame/command is complete */
+#define STAT_BUSY 0x4000 /* status: frame/command is busy */
+#define STAT_OK 0x2000 /* status: frame/command is ok */
+
+/*
+ * Action-Commands
+ */
+#define CMD_NOP 0x0000 /* NOP */
+#define CMD_IASETUP 0x0001 /* initial address setup command */
+#define CMD_CONFIGURE 0x0002 /* configure command */
+#define CMD_MCSETUP 0x0003 /* MC setup command */
+#define CMD_XMIT 0x0004 /* transmit command */
+#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */
+#define CMD_DUMP 0x0006 /* dump command */
+#define CMD_DIAGNOSE 0x0007 /* diagnose command */
+
+/*
+ * Action command bits
+ */
+#define CMD_LAST 0x8000 /* indicates last command in the CBL */
+#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */
+#define CMD_INT 0x2000 /* generate interrupt after execution */
+
+/*
+ * NOP - command
+ */
+struct nop_cmd_struct
+{
+ unsigned short cmd_status; /* status of this command */
+ unsigned short cmd_cmd; /* the command itself (+bits) */
+ unsigned short cmd_link; /* offsetpointer to next command */
+};
+
+/*
+ * IA Setup command
+ */
+struct iasetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char iaddr[6];
+};
+
+/*
+ * Configure command
+ */
+struct configure_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char byte_cnt; /* size of the config-cmd */
+ unsigned char fifo; /* fifo/recv monitor */
+ unsigned char sav_bf; /* save bad frames (bit7=1)*/
+ unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
+ unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
+ unsigned char ifs; /* inter frame spacing */
+ unsigned char time_low; /* slot time low */
+ unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */
+ unsigned char promisc; /* promisc-mode(0) , et al (1-7) */
+ unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */
+ unsigned char fram_len; /* minimal frame len */
+ unsigned char dummy; /* dummy */
+};
+
+/*
+ * Multicast Setup command
+ */
+struct mcsetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short mc_cnt; /* number of bytes in the MC-List */
+ unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */
+};
+
+/*
+ * transmit command
+ */
+struct transmit_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short tbd_offset; /* pointeroffset to TBD */
+ unsigned char dest[6]; /* destination address of the frame */
+ unsigned short length; /* user defined: 802.3 length / Ether type */
+};
+
+#define TCMD_ERRMASK 0x0fa0
+#define TCMD_MAXCOLLMASK 0x000f
+#define TCMD_MAXCOLL 0x0020
+#define TCMD_HEARTBEAT 0x0040
+#define TCMD_DEFERRED 0x0080
+#define TCMD_UNDERRUN 0x0100
+#define TCMD_LOSTCTS 0x0200
+#define TCMD_NOCARRIER 0x0400
+#define TCMD_LATECOLL 0x0800
+
+struct tdr_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short status;
+};
+
+#define TDR_LNK_OK 0x8000 /* No link problem identified */
+#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */
+#define TDR_ET_OPN 0x2000 /* open, no correct termination */
+#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */
+#define TDR_TIMEMASK 0x07ff /* mask for the time field */
+
+/*
+ * Transmit Buffer Descriptor (TBD)
+ */
+struct tbd_struct
+{
+ unsigned short size; /* size + EOF-Flag(15) */
+ unsigned short next; /* pointeroffset to next TBD */
+ char *buffer; /* pointer to buffer */
+};
+
+#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
+
+
+
+
diff --git a/i386/i386at/gpl/linux/net/ni65.c b/i386/i386at/gpl/linux/net/ni65.c
new file mode 100644
index 00000000..44a58112
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/ni65.c
@@ -0,0 +1,648 @@
+/*
+ * ni6510 (am7990 'lance' chip) driver for Linux-net-3 by MH
+ * Alphacode v0.33 (94/08/22) for 1.1.47 (or later)
+ *
+ * ----------------------------------------------------------
+ * WARNING: DOESN'T WORK ON MACHINES WITH MORE THAN 16MB !!!!
+ * ----------------------------------------------------------
+ *
+ * copyright (c) 1994 M.Hipp
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same Gnu Public License that covers the Linux-kernel.
+ *
+ * comments/bugs/suggestions can be sent to:
+ * Michael Hipp
+ * email: mhipp@student.uni-tuebingen.de
+ *
+ * sources:
+ * some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
+ * and from the original drivers by D.Becker
+ */
+
+/*
+ * Nov.18: multicast tweaked (AC).
+ *
+ * Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH)
+ *
+ * July.16: fixed bugs in recv_skb and skb-alloc stuff (MH)
+ */
+
+/*
+ * known BUGS: 16MB limit
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "ni65.h"
+
+/************************************
+ * skeleton-stuff
+ */
+
+#ifndef HAVE_PORTRESERVE
+#define check_region(ioaddr, size) 0
+#define request_region(ioaddr, size,name) do ; while (0)
+#endif
+
+#ifndef NET_DEBUG
+#define NET_DEBUG 2
+#endif
+/*
+static unsigned int net_debug = NET_DEBUG;
+*/
+
+#define NI65_TOTAL_SIZE 16
+
+#define SA_ADDR0 0x02
+#define SA_ADDR1 0x07
+#define SA_ADDR2 0x01
+#define CARD_ID0 0x00
+#define CARD_ID1 0x55
+
+/*****************************************/
+
+#define PORT dev->base_addr
+
+#define RMDNUM 8
+#define RMDNUMMASK 0x6000 /* log2(RMDNUM)<<13 */
+#define TMDNUM 4
+#define TMDNUMMASK 0x4000 /* log2(TMDNUM)<<13 */
+
+#define R_BUF_SIZE 1518
+#define T_BUF_SIZE 1518
+
+#define MEMSIZE 8+RMDNUM*8+TMDNUM*8
+
+#define L_DATAREG 0x00
+#define L_ADDRREG 0x02
+
+#define L_RESET 0x04
+#define L_CONFIG 0x05
+#define L_EBASE 0x08
+
+/*
+ * to access the am7990-regs, you have to write
+ * reg-number into L_ADDRREG, then you can access it using L_DATAREG
+ */
+#define CSR0 0x00
+#define CSR1 0x01
+#define CSR2 0x02
+#define CSR3 0x03
+
+/* if you #define NO_STATIC the driver is faster but you will have (more) problems with >16MB memory */
+#undef NO_STATIC
+
+#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
+ outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
+#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
+ inw(PORT+L_DATAREG))
+#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
+
+static int ni65_probe1(struct device *dev,int);
+static void ni65_interrupt(int irq, struct pt_regs *regs);
+ static void recv_intr(struct device *dev);
+ static void xmit_intr(struct device *dev);
+static int ni65_open(struct device *dev);
+ static int am7990_reinit(struct device *dev);
+static int ni65_send_packet(struct sk_buff *skb, struct device *dev);
+static int ni65_close(struct device *dev);
+static struct enet_statistics *ni65_get_stats(struct device *);
+
+static void set_multicast_list(struct device *dev);
+
+struct priv
+{
+ struct init_block ib;
+ void *memptr;
+ struct rmd *rmdhead;
+ struct tmd *tmdhead;
+ int rmdnum;
+ int tmdnum,tmdlast;
+ struct sk_buff *recv_skb[RMDNUM];
+ void *tmdbufs[TMDNUM];
+ int lock,xmit_queued;
+ struct enet_statistics stats;
+};
+
+int irqtab[] = { 9,12,15,5 }; /* irq config-translate */
+int dmatab[] = { 0,3,5,6 }; /* dma config-translate */
+
+/*
+ * open (most done by init)
+ */
+
+static int ni65_open(struct device *dev)
+{
+ if(am7990_reinit(dev))
+ {
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+ return 0;
+ }
+ else
+ {
+ dev->start = 0;
+ return -EAGAIN;
+ }
+}
+
+static int ni65_close(struct device *dev)
+{
+ outw(0,PORT+L_RESET); /* that's the hard way */
+ dev->tbusy = 1;
+ dev->start = 0;
+ return 0;
+}
+
+/*
+ * Probe The Card (not the lance-chip)
+ * and set hardaddress
+ */
+
+int ni65_probe(struct device *dev)
+{
+ int *port, ports[] = {0x300,0x320,0x340,0x360, 0};
+ int base_addr = dev->base_addr;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return ni65_probe1(dev, base_addr);
+ else if (base_addr > 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (port = ports; *port; port++)
+ {
+ int ioaddr = *port;
+ if (check_region(ioaddr, NI65_TOTAL_SIZE))
+ continue;
+ if( !(inb(ioaddr+L_EBASE+6) == CARD_ID0) ||
+ !(inb(ioaddr+L_EBASE+7) == CARD_ID1) )
+ continue;
+ dev->base_addr = ioaddr;
+ if (ni65_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ dev->base_addr = base_addr;
+ return ENODEV;
+}
+
+
+static int ni65_probe1(struct device *dev,int ioaddr)
+{
+ int i;
+ unsigned char station_addr[6];
+ struct priv *p;
+
+ for(i=0;i<6;i++)
+ station_addr[i] = dev->dev_addr[i] = inb(PORT+L_EBASE+i);
+
+ if(station_addr[0] != SA_ADDR0 || station_addr[1] != SA_ADDR1)
+ {
+ printk("%s: wrong Hardaddress \n",dev->name);
+ return ENODEV;
+ }
+
+ if(dev->irq == 0)
+ dev->irq = irqtab[(inw(PORT+L_CONFIG)>>2)&3];
+ if(dev->dma == 0)
+ dev->dma = dmatab[inw(PORT+L_CONFIG)&3];
+
+ printk("%s: %s found at %#3lx, IRQ %d DMA %d.\n", dev->name,
+ "network card", dev->base_addr, dev->irq,dev->dma);
+
+ {
+ int irqval = request_irq(dev->irq, &ni65_interrupt,0,"ni65");
+ if (irqval) {
+ printk ("%s: unable to get IRQ %d (irqval=%d).\n",
+ dev->name,dev->irq, irqval);
+ return EAGAIN;
+ }
+ if(request_dma(dev->dma, "ni65") != 0)
+ {
+ printk("%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
+ free_irq(dev->irq);
+ return EAGAIN;
+ }
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ /* Grab the region so we can find another board if autoIRQ fails. */
+ request_region(ioaddr,NI65_TOTAL_SIZE,"ni65");
+
+ p = dev->priv = (void *) kmalloc(sizeof(struct priv),GFP_KERNEL);
+ if (p == NULL)
+ return -ENOMEM;
+ memset((char *) dev->priv,0,sizeof(struct priv));
+
+ dev->open = ni65_open;
+ dev->stop = ni65_close;
+ dev->hard_start_xmit = ni65_send_packet;
+ dev->get_stats = ni65_get_stats;
+ dev->set_multicast_list = set_multicast_list;
+
+ ether_setup(dev);
+
+ dev->flags &= ~IFF_MULTICAST;
+ dev->interrupt = 0;
+ dev->tbusy = 0;
+ dev->start = 0;
+
+ if( (p->memptr = kmalloc(MEMSIZE,GFP_KERNEL)) == NULL) {
+ printk("%s: Can't alloc TMD/RMD-buffer.\n",dev->name);
+ return EAGAIN;
+ }
+ if( (unsigned long) (p->memptr + MEMSIZE) & 0xff000000) {
+ printk("%s: Can't alloc TMD/RMD buffer in lower 16MB!\n",dev->name);
+ return EAGAIN;
+ }
+ p->tmdhead = (struct tmd *) ((( (unsigned long)p->memptr ) + 8) & 0xfffffff8);
+ p->rmdhead = (struct rmd *) (p->tmdhead + TMDNUM);
+
+#ifndef NO_STATIC
+ for(i=0;i<TMDNUM;i++)
+ {
+ if( (p->tmdbufs[i] = kmalloc(T_BUF_SIZE,GFP_ATOMIC)) == NULL) {
+ printk("%s: Can't alloc Xmit-Mem.\n",dev->name);
+ return EAGAIN;
+ }
+ if( (unsigned long) (p->tmdbufs[i]+T_BUF_SIZE) & 0xff000000) {
+ printk("%s: Can't alloc Xmit-Mem in lower 16MB!\n",dev->name);
+ return EAGAIN;
+ }
+ }
+#endif
+
+ for(i=0;i<RMDNUM;i++)
+ {
+ if( (p->recv_skb[i] = dev_alloc_skb(R_BUF_SIZE)) == NULL) {
+ printk("%s: unable to alloc recv-mem\n",dev->name);
+ return EAGAIN;
+ }
+ if( (unsigned long) (p->recv_skb[i]->data + R_BUF_SIZE) & 0xff000000) {
+ printk("%s: unable to alloc receive-memory in lower 16MB!\n",dev->name);
+ return EAGAIN;
+ }
+ }
+
+ return 0; /* we've found everything */
+}
+
+/*
+ * init lance (write init-values .. init-buffers) (open-helper)
+ */
+
+static int am7990_reinit(struct device *dev)
+{
+ int i,j;
+ struct tmd *tmdp;
+ struct rmd *rmdp;
+ struct priv *p = (struct priv *) dev->priv;
+
+ p->lock = 0;
+ p->xmit_queued = 0;
+
+ disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */
+ set_dma_mode(dev->dma,DMA_MODE_CASCADE);
+ enable_dma(dev->dma);
+
+ outw(0,PORT+L_RESET); /* first: reset the card */
+ if(inw(PORT+L_DATAREG) != 0x4)
+ {
+ printk("%s: can't RESET ni6510 card: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
+ disable_dma(dev->dma);
+ free_dma(dev->dma);
+ free_irq(dev->irq);
+ return 0;
+ }
+
+ /* here: memset all buffs to zero */
+
+ memset(p->memptr,0,MEMSIZE);
+
+ p->tmdnum = 0; p->tmdlast = 0;
+ for(i=0;i<TMDNUM;i++)
+ {
+ tmdp = p->tmdhead + i;
+#ifndef NO_STATIC
+ tmdp->u.buffer = (unsigned long) p->tmdbufs[i];
+#endif
+ tmdp->u.s.status = XMIT_START | XMIT_END;
+ }
+
+ p->rmdnum = 0;
+ for(i=0;i<RMDNUM;i++)
+ {
+ rmdp = p->rmdhead + i;
+ rmdp->u.buffer = (unsigned long) p->recv_skb[i]->data;
+ rmdp->u.s.status = RCV_OWN;
+ rmdp->blen = -R_BUF_SIZE;
+ rmdp->mlen = 0;
+ }
+
+ for(i=0;i<6;i++)
+ {
+ p->ib.eaddr[i] = dev->dev_addr[i];
+ }
+ p->ib.mode = 0;
+ for(i=0;i<8;i++)
+ p->ib.filter[i] = 0;
+ p->ib.trplow = (unsigned short) (( (unsigned long) p->tmdhead ) & 0xffff);
+ p->ib.trphigh = (unsigned short) ((( (unsigned long) p->tmdhead )>>16) & 0x00ff) | TMDNUMMASK;
+ p->ib.rrplow = (unsigned short) (( (unsigned long) p->rmdhead ) & 0xffff);
+ p->ib.rrphigh = (unsigned short) ((( (unsigned long) p->rmdhead )>>16) & 0x00ff) | RMDNUMMASK;
+
+ writereg(0,CSR3); /* busmaster/no word-swap */
+ writereg((unsigned short) (((unsigned long) &(p->ib)) & 0xffff),CSR1);
+ writereg((unsigned short) (((unsigned long) &(p->ib))>>16),CSR2);
+
+ writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */
+
+ /*
+ * NOW, WE NEVER WILL CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED
+ */
+
+ for(i=0;i<5;i++)
+ {
+ for(j=0;j<2000000;j++); /* wait a while */
+ if(inw(PORT+L_DATAREG) & CSR0_IDON) break; /* init ok ? */
+ }
+ if(i == 5)
+ {
+ printk("%s: can't init am7990, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
+ disable_dma(dev->dma);
+ free_dma(dev->dma);
+ free_irq(dev->irq);
+ return 0; /* false */
+ }
+
+ writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT); /* start lance , enable interrupts */
+
+ return 1; /* OK */
+}
+
+/*
+ * interrupt handler
+ */
+
+static void ni65_interrupt(int irq, struct pt_regs * regs)
+{
+ int csr0;
+ struct device *dev = (struct device *) irq2dev_map[irq];
+
+ if (dev == NULL) {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ csr0 = inw(PORT+L_DATAREG);
+ writedatareg(csr0 & CSR0_CLRALL); /* ack interrupts, disable int. */
+
+ dev->interrupt = 1;
+
+ if(csr0 & CSR0_ERR)
+ {
+ struct priv *p = (struct priv *) dev->priv;
+
+ if(csr0 & CSR0_BABL)
+ p->stats.tx_errors++;
+ if(csr0 & CSR0_MISS)
+ p->stats.rx_errors++;
+ }
+
+ if(csr0 & CSR0_RINT) /* RECV-int? */
+ {
+ recv_intr(dev);
+ }
+ if(csr0 & CSR0_TINT) /* XMIT-int? */
+ {
+ xmit_intr(dev);
+ }
+
+ writedatareg(CSR0_INEA); /* reenable inter. */
+ dev->interrupt = 0;
+
+ return;
+}
+
+/*
+ * We have received an Xmit-Interrupt ..
+ * send a new packet if necessary
+ */
+
+static void xmit_intr(struct device *dev)
+{
+ int tmdstat;
+ struct tmd *tmdp;
+ struct priv *p = (struct priv *) dev->priv;
+
+#ifdef NO_STATIC
+ struct sk_buff *skb;
+#endif
+
+ while(p->xmit_queued)
+ {
+ tmdp = p->tmdhead + p->tmdlast;
+ tmdstat = tmdp->u.s.status;
+ if(tmdstat & XMIT_OWN)
+ break;
+#ifdef NO_STATIC
+ skb = (struct sk_buff *) p->tmdbufs[p->tmdlast];
+ dev_kfree_skb(skb,FREE_WRITE);
+#endif
+
+ if(tmdstat & XMIT_ERR)
+ {
+ printk("%s: xmit-error: %04x %04x\n",dev->name,(int) tmdstat,(int) tmdp->status2);
+ if(tmdp->status2 & XMIT_TDRMASK)
+ printk("%s: tdr-problems (e.g. no resistor)\n",dev->name);
+
+ /* checking some errors */
+ if(tmdp->status2 & XMIT_RTRY)
+ p->stats.tx_aborted_errors++;
+ if(tmdp->status2 & XMIT_LCAR)
+ p->stats.tx_carrier_errors++;
+ p->stats.tx_errors++;
+ tmdp->status2 = 0;
+ }
+ else
+ p->stats.tx_packets++;
+
+ p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
+ if(p->tmdlast == p->tmdnum)
+ p->xmit_queued = 0;
+ }
+
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+}
+
+/*
+ * We have received a packet
+ */
+
+static void recv_intr(struct device *dev)
+{
+ struct rmd *rmdp;
+ int rmdstat,len;
+ struct sk_buff *skb,*skb1;
+ struct priv *p = (struct priv *) dev->priv;
+
+ rmdp = p->rmdhead + p->rmdnum;
+ while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
+ {
+ if( (rmdstat & (RCV_START | RCV_END)) != (RCV_START | RCV_END) ) /* is packet start & end? */
+ {
+ if(rmdstat & RCV_START)
+ {
+ p->stats.rx_errors++;
+ p->stats.rx_length_errors++;
+ printk("%s: packet too long\n",dev->name);
+ }
+ rmdp->u.s.status = RCV_OWN; /* change owner */
+ }
+ else if(rmdstat & RCV_ERR)
+ {
+ printk("%s: receive-error: %04x\n",dev->name,(int) rmdstat );
+ p->stats.rx_errors++;
+ if(rmdstat & RCV_FRAM) p->stats.rx_frame_errors++;
+ if(rmdstat & RCV_OFLO) p->stats.rx_over_errors++;
+ if(rmdstat & RCV_CRC) p->stats.rx_crc_errors++;
+ rmdp->u.s.status = RCV_OWN;
+ printk("%s: lance-status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
+ }
+ else
+ {
+ len = (rmdp->mlen & 0x0fff) - 4; /* -4: ignore FCS */
+ skb = dev_alloc_skb(R_BUF_SIZE);
+ if(skb != NULL)
+ {
+ if( (unsigned long) (skb->data + R_BUF_SIZE) & 0xff000000) {
+ memcpy(skb_put(skb,len),p->recv_skb[p->rmdnum]->data,len);
+ skb1 = skb;
+ }
+ else {
+ skb1 = p->recv_skb[p->rmdnum];
+ p->recv_skb[p->rmdnum] = skb;
+ rmdp->u.buffer = (unsigned long) skb_put(skb1,len);
+ }
+ rmdp->u.s.status = RCV_OWN;
+ rmdp->mlen = 0; /* not necc ???? */
+ skb1->dev = dev;
+ p->stats.rx_packets++;
+ skb1->protocol=eth_type_trans(skb1,dev);
+ netif_rx(skb1);
+ }
+ else
+ {
+ rmdp->u.s.status = RCV_OWN;
+ printk("%s: can't alloc new sk_buff\n",dev->name);
+ p->stats.rx_dropped++;
+ }
+ }
+ p->rmdnum++; p->rmdnum &= RMDNUM-1;
+ rmdp = p->rmdhead + p->rmdnum;
+ }
+}
+
+/*
+ * kick xmitter ..
+ */
+
+static int ni65_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ struct tmd *tmdp;
+
+ if(dev->tbusy)
+ {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 25)
+ return 1;
+
+ printk("%s: xmitter timed out, try to restart!\n",dev->name);
+ am7990_reinit(dev);
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ }
+
+ if(skb == NULL)
+ {
+ dev_tint(dev);
+ return 0;
+ }
+
+ if (skb->len <= 0)
+ return 0;
+
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ return 1;
+ }
+ if(set_bit(0,(void*) &p->lock) != 0)
+ {
+ printk("%s: Queue was locked!\n",dev->name);
+ return 1;
+ }
+
+ {
+ short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+
+ tmdp = p->tmdhead + p->tmdnum;
+
+#ifdef NO_STATIC
+ tmdp->u.buffer = (unsigned long) (skb->data);
+ p->tmdbufs[p->tmdnum] = skb;
+#else
+ memcpy((char *) (tmdp->u.buffer & 0x00ffffff),(char *)skb->data,skb->len);
+ dev_kfree_skb (skb, FREE_WRITE);
+#endif
+ tmdp->blen = -len;
+ tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
+
+ cli();
+ p->xmit_queued = 1;
+ writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */
+ p->tmdnum++; p->tmdnum &= TMDNUM-1;
+
+ if( !((p->tmdhead + p->tmdnum)->u.s.status & XMIT_OWN) )
+ dev->tbusy = 0;
+ p->lock = 0;
+ sti();
+
+ dev->trans_start = jiffies;
+
+ }
+
+ return 0;
+}
+
+static struct enet_statistics *ni65_get_stats(struct device *dev)
+{
+ return &((struct priv *) dev->priv)->stats;
+}
+
+static void set_multicast_list(struct device *dev)
+{
+}
+
+/*
+ * END of ni65.c
+ */
+
diff --git a/i386/i386at/gpl/linux/net/ni65.h b/i386/i386at/gpl/linux/net/ni65.h
new file mode 100644
index 00000000..144523fa
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/ni65.h
@@ -0,0 +1,130 @@
+/* am7990 (lance) definitions
+ *
+ * This is a extension to the Linux operating system, and is covered by
+ * same Gnu Public License that covers that work.
+ *
+ * Michael Hipp
+ * email: mhipp@student.uni-tuebingen.de
+ *
+ * sources: (mail me or ask archie if you need them)
+ * crynwr-packet-driver
+ */
+
+/*
+ * Control and Status Register 0 (CSR0) bit definitions
+ * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write)
+ *
+ */
+
+#define CSR0_ERR 0x8000 /* Error summary (R) */
+#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */
+#define CSR0_CERR 0x2000 /* Collision Error (RC) */
+#define CSR0_MISS 0x1000 /* Missed packet (RC) */
+#define CSR0_MERR 0x0800 /* Memory Error (RC) */
+#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */
+#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */
+#define CSR0_IDON 0x0100 /* Initialization Done (RC) */
+#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */
+#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */
+#define CSR0_RXON 0x0020 /* Receiver on (R) */
+#define CSR0_TXON 0x0010 /* Transmitter on (R) */
+#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */
+#define CSR0_STOP 0x0004 /* Stop (RS) */
+#define CSR0_STRT 0x0002 /* Start (RS) */
+#define CSR0_INIT 0x0001 /* Initialize (RS) */
+
+#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */
+/*
+ * Initialization Block Mode operation Bit Definitions.
+ */
+
+#define M_PROM 0x8000 /* Promiscuous Mode */
+#define M_INTL 0x0040 /* Internal Loopback */
+#define M_DRTY 0x0020 /* Disable Retry */
+#define M_COLL 0x0010 /* Force Collision */
+#define M_DTCR 0x0008 /* Disable Transmit CRC) */
+#define M_LOOP 0x0004 /* Loopback */
+#define M_DTX 0x0002 /* Disable the Transmitter */
+#define M_DRX 0x0001 /* Disable the Receiver */
+
+
+/*
+ * Receive message descriptor bit definitions.
+ */
+
+#define RCV_OWN 0x80 /* owner bit 0 = host, 1 = lance */
+#define RCV_ERR 0x40 /* Error Summary */
+#define RCV_FRAM 0x20 /* Framing Error */
+#define RCV_OFLO 0x10 /* Overflow Error */
+#define RCV_CRC 0x08 /* CRC Error */
+#define RCV_BUF_ERR 0x04 /* Buffer Error */
+#define RCV_START 0x02 /* Start of Packet */
+#define RCV_END 0x01 /* End of Packet */
+
+
+/*
+ * Transmit message descriptor bit definitions.
+ */
+
+#define XMIT_OWN 0x80 /* owner bit 0 = host, 1 = lance */
+#define XMIT_ERR 0x40 /* Error Summary */
+#define XMIT_RETRY 0x10 /* more the 1 retry needed to Xmit */
+#define XMIT_1_RETRY 0x08 /* one retry needed to Xmit */
+#define XMIT_DEF 0x04 /* Deferred */
+#define XMIT_START 0x02 /* Start of Packet */
+#define XMIT_END 0x01 /* End of Packet */
+
+/*
+ * transmit status (2) (valid if XMIT_ERR == 1)
+ */
+
+#define XMIT_RTRY 0x0200 /* Failed after 16 retransmissions */
+#define XMIT_LCAR 0x0400 /* Loss of Carrier */
+#define XMIT_LCOL 0x1000 /* Late collision */
+#define XMIT_RESERV 0x2000 /* Reserved */
+#define XMIT_UFLO 0x4000 /* Underflow (late memory) */
+#define XMIT_BUFF 0x8000 /* Buffering error (no ENP) */
+#define XMIT_TDRMASK 0x003f /* time-domain-reflectometer-value */
+
+struct init_block
+{
+ unsigned short mode;
+ unsigned char eaddr[6];
+ unsigned char filter[8];
+ unsigned short rrplow; /* receive ring pointer (align 8) */
+ unsigned short rrphigh; /* bit 13-15: number of rmd's (power of 2) */
+ unsigned short trplow; /* transmit ring pointer (align 8) */
+ unsigned short trphigh; /* bit 13-15: number of tmd's (power of 2) */
+};
+
+struct rmd /* Receive Message Descriptor */
+{
+ union
+ {
+ volatile unsigned long buffer;
+ struct
+ {
+ volatile unsigned char dummy[3];
+ volatile unsigned char status;
+ } s;
+ } u;
+ short blen;
+ volatile unsigned short mlen;
+};
+
+struct tmd
+{
+ union
+ {
+ volatile unsigned long buffer;
+ struct
+ {
+ volatile unsigned char dummy[3];
+ volatile unsigned char status;
+ } s;
+ } u;
+ unsigned short blen;
+ volatile unsigned short status2;
+};
+
+
diff --git a/i386/i386at/gpl/linux/net/seeq8005.c b/i386/i386at/gpl/linux/net/seeq8005.c
new file mode 100644
index 00000000..5799d80c
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/seeq8005.c
@@ -0,0 +1,760 @@
+/* seeq8005.c: A network driver for linux. */
+/*
+ Based on skeleton.c,
+ Written 1993-94 by Donald Becker.
+ See the skeleton.c file for further copyright information.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as hamish@zot.apana.org.au
+
+ This file is a network device driver for the SEEQ 8005 chipset and
+ the Linux operating system.
+
+*/
+
+static const char *version =
+ "seeq8005.c:v1.00 8/07/95 Hamish Coleman (hamish@zot.apana.org.au)\n";
+
+/*
+ Sources:
+ SEEQ 8005 databook
+
+ Version history:
+ 1.00 Public release. cosmetic changes (no warnings now)
+ 0.68 Turning per- packet,interrupt debug messages off - testing for release.
+ 0.67 timing problems/bad buffer reads seem to be fixed now
+ 0.63 *!@$ protocol=eth_type_trans -- now packets flow
+ 0.56 Send working
+ 0.48 Receive working
+*/
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include "seeq8005.h"
+
+/* First, a few definitions that the brave might change. */
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int seeq8005_portlist[] =
+ { 0x300, 0x320, 0x340, 0x360, 0};
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct enet_statistics stats;
+ unsigned short receive_ptr; /* What address in packet memory do we expect a recv_pkt_header? */
+ long open_time; /* Useless example local info. */
+};
+
+/* The station (ethernet) address prefix, used for IDing the board. */
+#define SA_ADDR0 0x00
+#define SA_ADDR1 0x80
+#define SA_ADDR2 0x4b
+
+/* Index to functions, as function prototypes. */
+
+extern int seeq8005_probe(struct device *dev);
+
+static int seeq8005_probe1(struct device *dev, int ioaddr);
+static int seeq8005_open(struct device *dev);
+static int seeq8005_send_packet(struct sk_buff *skb, struct device *dev);
+static void seeq8005_interrupt(int irq, struct pt_regs *regs);
+static void seeq8005_rx(struct device *dev);
+static int seeq8005_close(struct device *dev);
+static struct enet_statistics *seeq8005_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+/* Example routines you must write ;->. */
+#define tx_done(dev) (inw(SEEQ_STATUS) & SEEQSTAT_TX_ON)
+extern void hardware_send_packet(struct device *dev, char *buf, int length);
+extern void seeq8005_init(struct device *dev, int startp);
+inline void wait_for_buffer(struct device *dev);
+
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ */
+#ifdef HAVE_DEVLIST
+/* Support for a alternate probe manager, which will eliminate the
+ boilerplate below. */
+struct netdev_entry seeq8005_drv =
+{"seeq8005", seeq8005_probe1, SEEQ8005_IO_EXTENT, seeq8005_portlist};
+#else
+int
+seeq8005_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return seeq8005_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; seeq8005_portlist[i]; i++) {
+ int ioaddr = seeq8005_portlist[i];
+ if (check_region(ioaddr, SEEQ8005_IO_EXTENT))
+ continue;
+ if (seeq8005_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/* This is the real probe routine. Linux has a history of friendly device
+ probes on the ISA bus. A good device probes avoids doing writes, and
+ verifies that the correct device exists and functions. */
+
+static int seeq8005_probe1(struct device *dev, int ioaddr)
+{
+ static unsigned version_printed = 0;
+ int i,j;
+ unsigned char SA_prom[32];
+ int old_cfg1;
+ int old_cfg2;
+ int old_stat;
+ int old_dmaar;
+ int old_rear;
+
+ if (net_debug>1)
+ printk("seeq8005: probing at 0x%x\n",ioaddr);
+
+ old_stat = inw(SEEQ_STATUS); /* read status register */
+ if (old_stat == 0xffff)
+ return ENODEV; /* assume that 0xffff == no device */
+ if ( (old_stat & 0x1800) != 0x1800 ) { /* assume that unused bits are 1, as my manual says */
+ if (net_debug>1) {
+ printk("seeq8005: reserved stat bits != 0x1800\n");
+ printk(" == 0x%04x\n",old_stat);
+ }
+ return ENODEV;
+ }
+
+ old_rear = inw(SEEQ_REA);
+ if (old_rear == 0xffff) {
+ outw(0,SEEQ_REA);
+ if (inw(SEEQ_REA) == 0xffff) { /* assume that 0xffff == no device */
+ return ENODEV;
+ }
+ } else if ((old_rear & 0xff00) != 0xff00) { /* assume that unused bits are 1 */
+ if (net_debug>1) {
+ printk("seeq8005: unused rear bits != 0xff00\n");
+ printk(" == 0x%04x\n",old_rear);
+ }
+ return ENODEV;
+ }
+
+ old_cfg2 = inw(SEEQ_CFG2); /* read CFG2 register */
+ old_cfg1 = inw(SEEQ_CFG1);
+ old_dmaar = inw(SEEQ_DMAAR);
+
+ if (net_debug>4) {
+ printk("seeq8005: stat = 0x%04x\n",old_stat);
+ printk("seeq8005: cfg1 = 0x%04x\n",old_cfg1);
+ printk("seeq8005: cfg2 = 0x%04x\n",old_cfg2);
+ printk("seeq8005: raer = 0x%04x\n",old_rear);
+ printk("seeq8005: dmaar= 0x%04x\n",old_dmaar);
+ }
+
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD); /* setup for reading PROM */
+ outw( 0, SEEQ_DMAAR); /* set starting PROM address */
+ outw( SEEQCFG1_BUFFER_PROM, SEEQ_CFG1); /* set buffer to look at PROM */
+
+
+ j=0;
+ for(i=0; i <32; i++) {
+ j+= SA_prom[i] = inw(SEEQ_BUFFER) & 0xff;
+ }
+
+#if 0
+ /* untested because I only have the one card */
+ if ( (j&0xff) != 0 ) { /* checksum appears to be 8bit = 0 */
+ if (net_debug>1) { /* check this before deciding that we have a card */
+ printk("seeq8005: prom sum error\n");
+ }
+ outw( old_stat, SEEQ_STATUS);
+ outw( old_dmaar, SEEQ_DMAAR);
+ outw( old_cfg1, SEEQ_CFG1);
+ return ENODEV;
+ }
+#endif
+
+ outw( SEEQCFG2_RESET, SEEQ_CFG2); /* reset the card */
+ SLOW_DOWN_IO; /* have to wait 4us after a reset - should be fixed */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+
+ if (net_debug) {
+ printk("seeq8005: prom sum = 0x%08x\n",j);
+ for(j=0; j<32; j+=16) {
+ printk("seeq8005: prom %02x: ",j);
+ for(i=0;i<16;i++) {
+ printk("%02x ",SA_prom[j|i]);
+ }
+ printk(" ");
+ for(i=0;i<16;i++) {
+ if ((SA_prom[j|i]>31)&&(SA_prom[j|i]<127)) {
+ printk("%c", SA_prom[j|i]);
+ } else {
+ printk(" ");
+ }
+ }
+ printk("\n");
+ }
+ }
+
+#if 0
+ /*
+ * testing the packet buffer memory doesnt work yet
+ * but all other buffer accesses do
+ * - fixing is not a priority
+ */
+ if (net_debug>1) { /* test packet buffer memory */
+ printk("seeq8005: testing packet buffer ... ");
+ outw( SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1);
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ outw( 0 , SEEQ_DMAAR);
+ for(i=0;i<32768;i++) {
+ outw(0x5a5a, SEEQ_BUFFER);
+ }
+ j=jiffies+HZ;
+ while ( ((inw(SEEQ_STATUS) & SEEQSTAT_FIFO_EMPTY) != SEEQSTAT_FIFO_EMPTY) && jiffies < j )
+ mb();
+ outw( 0 , SEEQ_DMAAR);
+ while ( ((inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && jiffies < j+HZ)
+ mb();
+ if ( (inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
+ outw( SEEQCMD_WINDOW_INT_ACK | (inw(SEEQ_STATUS)& SEEQCMD_INT_MASK), SEEQ_CMD);
+ outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ j=0;
+ for(i=0;i<32768;i++) {
+ if (inw(SEEQ_BUFFER) != 0x5a5a)
+ j++;
+ }
+ if (j) {
+ printk("%i\n",j);
+ } else {
+ printk("ok.\n");
+ }
+ }
+#endif
+
+ /* Allocate a new 'dev' if needed. */
+ if (dev == NULL)
+ dev = init_etherdev(0, sizeof(struct net_local));
+
+ if (net_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("%s: %s found at %#3x, ", dev->name, "seeq8005", ioaddr);
+
+ /* Fill in the 'dev' fields. */
+ dev->base_addr = ioaddr;
+
+ /* Retrieve and print the ethernet address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = SA_prom[i+6]);
+
+ if (dev->irq == 0xff)
+ ; /* Do nothing: a user-level program will set it. */
+ else if (dev->irq < 2) { /* "Auto-IRQ" */
+ autoirq_setup(0);
+
+ outw( SEEQCMD_RX_INT_EN | SEEQCMD_SET_RX_ON | SEEQCMD_SET_RX_OFF, SEEQ_CMD );
+
+ dev->irq = autoirq_report(0);
+
+ if (net_debug >= 2)
+ printk(" autoirq is %d\n", dev->irq);
+ } else if (dev->irq == 2)
+ /* Fixup for users that don't know that IRQ 2 is really IRQ 9,
+ * or don't know which one to set.
+ */
+ dev->irq = 9;
+
+#if 0
+ {
+ int irqval = request_irq(dev->irq, &seeq8005_interrupt, 0, "seeq8005");
+ if (irqval) {
+ printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
+ dev->irq, irqval);
+ return EAGAIN;
+ }
+ }
+#endif
+
+ /* Grab the region so we can find another board if autoIRQ fails. */
+ request_region(ioaddr, SEEQ8005_IO_EXTENT,"seeq8005");
+
+ /* Initialize the device structure. */
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ dev->open = seeq8005_open;
+ dev->stop = seeq8005_close;
+ dev->hard_start_xmit = seeq8005_send_packet;
+ dev->get_stats = seeq8005_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(dev);
+
+ dev->flags &= ~IFF_MULTICAST;
+
+ return 0;
+}
+
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine should set everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+ */
+static int
+seeq8005_open(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ {
+ int irqval = request_irq(dev->irq, &seeq8005_interrupt, 0, "seeq8005");
+ if (irqval) {
+ printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
+ dev->irq, irqval);
+ return EAGAIN;
+ }
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ /* Reset the hardware here. Don't forget to set the station address. */
+ seeq8005_init(dev, 1);
+
+ lp->open_time = jiffies;
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+ return 0;
+}
+
+static int
+seeq8005_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+ printk("%s: transmit timed out, %s?\n", dev->name,
+ tx_done(dev) ? "IRQ conflict" : "network cable problem");
+ /* Try to restart the adaptor. */
+ seeq8005_init(dev, 1);
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ hardware_send_packet(dev, buf, length);
+ dev->trans_start = jiffies;
+ }
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ /* You might need to clean up and record Tx statistics here. */
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static void
+seeq8005_interrupt(int irq, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct net_local *lp;
+ int ioaddr, status, boguscount = 0;
+
+ if (dev == NULL) {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ if (dev->interrupt)
+ printk ("%s: Re-entering the interrupt handler.\n", dev->name);
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+
+ status = inw(SEEQ_STATUS);
+ do {
+ if (net_debug >2) {
+ printk("%s: int, status=0x%04x\n",dev->name,status);
+ }
+
+ if (status & SEEQSTAT_WINDOW_INT) {
+ outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ if (net_debug) {
+ printk("%s: window int!\n",dev->name);
+ }
+ }
+ if (status & SEEQSTAT_TX_INT) {
+ outw( SEEQCMD_TX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ lp->stats.tx_packets++;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ }
+ if (status & SEEQSTAT_RX_INT) {
+ /* Got a packet(s). */
+ seeq8005_rx(dev);
+ }
+ status = inw(SEEQ_STATUS);
+ } while ( (++boguscount < 10) && (status & SEEQSTAT_ANY_INT)) ;
+
+ if(net_debug>2) {
+ printk("%s: eoi\n",dev->name);
+ }
+ dev->interrupt = 0;
+ return;
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+seeq8005_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int boguscount = 10;
+ int pkt_hdr;
+ int ioaddr = dev->base_addr;
+
+ do {
+ int next_packet;
+ int pkt_len;
+ int i;
+ int status;
+
+ status = inw(SEEQ_STATUS);
+ outw( lp->receive_ptr, SEEQ_DMAAR);
+ outw(SEEQCMD_FIFO_READ | SEEQCMD_RX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ wait_for_buffer(dev);
+ next_packet = ntohs(inw(SEEQ_BUFFER));
+ pkt_hdr = inw(SEEQ_BUFFER);
+
+ if (net_debug>2) {
+ printk("%s: 0x%04x recv next=0x%04x, hdr=0x%04x\n",dev->name,lp->receive_ptr,next_packet,pkt_hdr);
+ }
+
+ if ((next_packet == 0) || ((pkt_hdr & SEEQPKTH_CHAIN)==0)) { /* Read all the frames? */
+ return; /* Done for now */
+ }
+
+ if ((pkt_hdr & SEEQPKTS_DONE)==0)
+ break;
+
+ if (next_packet < lp->receive_ptr) {
+ pkt_len = (next_packet + 0x10000 - ((DEFAULT_TEA+1)<<8)) - lp->receive_ptr - 4;
+ } else {
+ pkt_len = next_packet - lp->receive_ptr - 4;
+ }
+
+ if (next_packet < ((DEFAULT_TEA+1)<<8)) { /* is the next_packet address sane? */
+ printk("%s: recv packet ring corrupt, resetting board\n",dev->name);
+ seeq8005_init(dev,1);
+ return;
+ }
+
+ lp->receive_ptr = next_packet;
+
+ if (net_debug>2) {
+ printk("%s: recv len=0x%04x\n",dev->name,pkt_len);
+ }
+
+ if (pkt_hdr & SEEQPKTS_ANY_ERROR) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (pkt_hdr & SEEQPKTS_SHORT) lp->stats.rx_frame_errors++;
+ if (pkt_hdr & SEEQPKTS_DRIB) lp->stats.rx_frame_errors++;
+ if (pkt_hdr & SEEQPKTS_OVERSIZE) lp->stats.rx_over_errors++;
+ if (pkt_hdr & SEEQPKTS_CRC_ERR) lp->stats.rx_crc_errors++;
+ /* skip over this packet */
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_DMA_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ outw( (lp->receive_ptr & 0xff00)>>8, SEEQ_REA);
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+ unsigned char *buf;
+
+ skb = dev_alloc_skb(pkt_len);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* align data on 16 byte */
+ buf = skb_put(skb,pkt_len);
+
+ insw(SEEQ_BUFFER, buf, (pkt_len + 1) >> 1);
+
+ if (net_debug>2) {
+ char * p = buf;
+ printk("%s: recv ",dev->name);
+ for(i=0;i<14;i++) {
+ printk("%02x ",*(p++)&0xff);
+ }
+ printk("\n");
+ }
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ } while ((--boguscount) && (pkt_hdr & SEEQPKTH_CHAIN));
+
+ /* If any worth-while packets have been received, netif_rx()
+ has done a mark_bh(NET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+ return;
+}
+
+/* The inverse routine to net_open(). */
+static int
+seeq8005_close(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ lp->open_time = 0;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* Flush the Tx and disable Rx here. */
+ outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+
+ free_irq(dev->irq);
+
+ irq2dev_map[dev->irq] = 0;
+
+ /* Update the statistics here. */
+
+ return 0;
+
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *
+seeq8005_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+static void
+set_multicast_list(struct device *dev)
+{
+/*
+ * I _could_ do upto 6 addresses here, but wont (yet?)
+ */
+
+#if 0
+ int ioaddr = dev->base_addr;
+/*
+ * hmm, not even sure if my matching works _anyway_ - seem to be receiving
+ * _everything_ . . .
+ */
+
+ if (num_addrs) { /* Enable promiscuous mode */
+ outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_ALL, SEEQ_CFG1);
+ dev->flags|=IFF_PROMISC;
+ } else { /* Disable promiscuous mode, use normal mode */
+ outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_BROAD, SEEQ_CFG1);
+ }
+#endif
+}
+
+void seeq8005_init(struct device *dev, int startp)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ outw(SEEQCFG2_RESET, SEEQ_CFG2); /* reset device */
+ SLOW_DOWN_IO; /* have to wait 4us after a reset - should be fixed */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ outw( 0, SEEQ_DMAAR); /* load start address into both low and high byte */
+/* wait_for_buffer(dev); */ /* I think that you only need a wait for memory buffer */
+ outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1);
+
+ for(i=0;i<6;i++) { /* set Station address */
+ outb(dev->dev_addr[i], SEEQ_BUFFER);
+ SLOW_DOWN_IO;
+ }
+
+ outw( SEEQCFG1_BUFFER_TEA, SEEQ_CFG1); /* set xmit end area pointer to 16K */
+ outb( DEFAULT_TEA, SEEQ_BUFFER); /* this gives us 16K of send buffer and 48K of recv buffer */
+
+ lp->receive_ptr = (DEFAULT_TEA+1)<<8; /* so we can find our packet_header */
+ outw( lp->receive_ptr, SEEQ_RPR); /* Receive Pointer Register is set to recv buffer memory */
+
+ outw( 0x00ff, SEEQ_REA); /* Receive Area End */
+
+ if (net_debug>4) {
+ printk("%s: SA0 = ",dev->name);
+
+ outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ outw( 0, SEEQ_DMAAR);
+ outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1);
+
+ for(i=0;i<6;i++) {
+ printk("%02x ",inb(SEEQ_BUFFER));
+ }
+ printk("\n");
+ }
+
+ outw( SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD | SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1);
+ outw( SEEQCFG2_AUTO_REA | SEEQCFG2_CTRLO, SEEQ_CFG2);
+ outw( SEEQCMD_SET_RX_ON | SEEQCMD_TX_INT_EN | SEEQCMD_RX_INT_EN, SEEQ_CMD);
+
+ if (net_debug>4) {
+ int old_cfg1;
+ old_cfg1 = inw(SEEQ_CFG1);
+ printk("%s: stat = 0x%04x\n",dev->name,inw(SEEQ_STATUS));
+ printk("%s: cfg1 = 0x%04x\n",dev->name,old_cfg1);
+ printk("%s: cfg2 = 0x%04x\n",dev->name,inw(SEEQ_CFG2));
+ printk("%s: raer = 0x%04x\n",dev->name,inw(SEEQ_REA));
+ printk("%s: dmaar= 0x%04x\n",dev->name,inw(SEEQ_DMAAR));
+
+ }
+}
+
+
+void hardware_send_packet(struct device * dev, char *buf, int length)
+{
+ int ioaddr = dev->base_addr;
+ int status = inw(SEEQ_STATUS);
+ int transmit_ptr = 0;
+ int tmp;
+
+ if (net_debug>4) {
+ printk("%s: send 0x%04x\n",dev->name,length);
+ }
+
+ /* Set FIFO to writemode and set packet-buffer address */
+ outw( SEEQCMD_FIFO_WRITE | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ outw( transmit_ptr, SEEQ_DMAAR);
+
+ /* output SEEQ Packet header barfage */
+ outw( htons(length + 4), SEEQ_BUFFER);
+ outw( SEEQPKTH_XMIT | SEEQPKTH_DATA_FOLLOWS | SEEQPKTH_XMIT_INT_EN, SEEQ_BUFFER );
+
+ /* blat the buffer */
+ outsw( SEEQ_BUFFER, buf, (length +1) >> 1);
+ /* paranoia !! */
+ outw( 0, SEEQ_BUFFER);
+ outw( 0, SEEQ_BUFFER);
+
+ /* set address of start of transmit chain */
+ outw( transmit_ptr, SEEQ_TPR);
+
+ /* drain FIFO */
+ tmp = jiffies;
+ while ( (((status=inw(SEEQ_STATUS)) & SEEQSTAT_FIFO_EMPTY) == 0) && (jiffies < tmp + HZ))
+ mb();
+
+ /* doit ! */
+ outw( SEEQCMD_WINDOW_INT_ACK | SEEQCMD_SET_TX_ON | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+
+}
+
+
+/*
+ * wait_for_buffer
+ *
+ * This routine waits for the SEEQ chip to assert that the FIFO is ready
+ * by checking for a window interrupt, and then clearing it
+ */
+inline void wait_for_buffer(struct device * dev)
+{
+ int ioaddr = dev->base_addr;
+ int tmp;
+ int status;
+
+ tmp = jiffies + HZ;
+ while ( ( ((status=inw(SEEQ_STATUS)) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && jiffies < tmp)
+ mb();
+
+ if ( (status & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
+ outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+}
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c skeleton.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/seeq8005.h b/i386/i386at/gpl/linux/net/seeq8005.h
new file mode 100644
index 00000000..7122340c
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/seeq8005.h
@@ -0,0 +1,156 @@
+/*
+ * defines, etc for the seeq8005
+ */
+
+/*
+ * This file is distributed under GPL.
+ *
+ * This style and layout of this file is also copied
+ * from many of the other linux network device drivers.
+ */
+
+/* The number of low I/O ports used by the ethercard. */
+#define SEEQ8005_IO_EXTENT 16
+
+#define SEEQ_B (ioaddr)
+
+#define SEEQ_CMD (SEEQ_B) /* Write only */
+#define SEEQ_STATUS (SEEQ_B) /* Read only */
+#define SEEQ_CFG1 (SEEQ_B + 2)
+#define SEEQ_CFG2 (SEEQ_B + 4)
+#define SEEQ_REA (SEEQ_B + 6) /* Receive End Area Register */
+#define SEEQ_RPR (SEEQ_B + 10) /* Receive Pointer Register */
+#define SEEQ_TPR (SEEQ_B + 12) /* Transmit Pointer Register */
+#define SEEQ_DMAAR (SEEQ_B + 14) /* DMA Address Register */
+#define SEEQ_BUFFER (SEEQ_B + 8) /* Buffer Window Register */
+
+#define DEFAULT_TEA (0x3f)
+
+#define SEEQCMD_DMA_INT_EN (0x0001) /* DMA Interrupt Enable */
+#define SEEQCMD_RX_INT_EN (0x0002) /* Receive Interrupt Enable */
+#define SEEQCMD_TX_INT_EN (0x0004) /* Transmit Interrupt Enable */
+#define SEEQCMD_WINDOW_INT_EN (0x0008) /* What the hell is this for?? */
+#define SEEQCMD_INT_MASK (0x000f)
+
+#define SEEQCMD_DMA_INT_ACK (0x0010) /* DMA ack */
+#define SEEQCMD_RX_INT_ACK (0x0020)
+#define SEEQCMD_TX_INT_ACK (0x0040)
+#define SEEQCMD_WINDOW_INT_ACK (0x0080)
+#define SEEQCMD_ACK_ALL (0x00f0)
+
+#define SEEQCMD_SET_DMA_ON (0x0100) /* Enables DMA Request logic */
+#define SEEQCMD_SET_RX_ON (0x0200) /* Enables Packet RX */
+#define SEEQCMD_SET_TX_ON (0x0400) /* Starts TX run */
+#define SEEQCMD_SET_DMA_OFF (0x0800)
+#define SEEQCMD_SET_RX_OFF (0x1000)
+#define SEEQCMD_SET_TX_OFF (0x2000)
+#define SEEQCMD_SET_ALL_OFF (0x3800) /* set all logic off */
+
+#define SEEQCMD_FIFO_READ (0x4000) /* Set FIFO to read mode (read from Buffer) */
+#define SEEQCMD_FIFO_WRITE (0x8000) /* Set FIFO to write mode */
+
+#define SEEQSTAT_DMA_INT_EN (0x0001) /* Status of interrupt enable */
+#define SEEQSTAT_RX_INT_EN (0x0002)
+#define SEEQSTAT_TX_INT_EN (0x0004)
+#define SEEQSTAT_WINDOW_INT_EN (0x0008)
+
+#define SEEQSTAT_DMA_INT (0x0010) /* Interrupt flagged */
+#define SEEQSTAT_RX_INT (0x0020)
+#define SEEQSTAT_TX_INT (0x0040)
+#define SEEQSTAT_WINDOW_INT (0x0080)
+#define SEEQSTAT_ANY_INT (0x00f0)
+
+#define SEEQSTAT_DMA_ON (0x0100) /* DMA logic on */
+#define SEEQSTAT_RX_ON (0x0200) /* Packet RX on */
+#define SEEQSTAT_TX_ON (0x0400) /* TX running */
+
+#define SEEQSTAT_FIFO_FULL (0x2000)
+#define SEEQSTAT_FIFO_EMPTY (0x4000)
+#define SEEQSTAT_FIFO_DIR (0x8000) /* 1=read, 0=write */
+
+#define SEEQCFG1_BUFFER_MASK (0x000f) /* define what mapps into the BUFFER register */
+#define SEEQCFG1_BUFFER_MAC0 (0x0000) /* MAC station addresses 0-5 */
+#define SEEQCFG1_BUFFER_MAC1 (0x0001)
+#define SEEQCFG1_BUFFER_MAC2 (0x0002)
+#define SEEQCFG1_BUFFER_MAC3 (0x0003)
+#define SEEQCFG1_BUFFER_MAC4 (0x0004)
+#define SEEQCFG1_BUFFER_MAC5 (0x0005)
+#define SEEQCFG1_BUFFER_PROM (0x0006) /* The Address/CFG PROM */
+#define SEEQCFG1_BUFFER_TEA (0x0007) /* Transmit end area */
+#define SEEQCFG1_BUFFER_BUFFER (0x0008) /* Packet buffer memory */
+#define SEEQCFG1_BUFFER_INT_VEC (0x0009) /* Interrupt Vector */
+
+#define SEEQCFG1_DMA_INTVL_MASK (0x0030)
+#define SEEQCFG1_DMA_CONT (0x0000)
+#define SEEQCFG1_DMA_800ns (0x0010)
+#define SEEQCFG1_DMA_1600ns (0x0020)
+#define SEEQCFG1_DMA_3200ns (0x0030)
+
+#define SEEQCFG1_DMA_LEN_MASK (0x00c0)
+#define SEEQCFG1_DMA_LEN1 (0x0000)
+#define SEEQCFG1_DMA_LEN2 (0x0040)
+#define SEEQCFG1_DMA_LEN4 (0x0080)
+#define SEEQCFG1_DMA_LEN8 (0x00c0)
+
+#define SEEQCFG1_MAC_MASK (0x3f00) /* Dis/enable bits for MAC addresses */
+#define SEEQCFG1_MAC0_EN (0x0100)
+#define SEEQCFG1_MAC1_EN (0x0200)
+#define SEEQCFG1_MAC2_EN (0x0400)
+#define SEEQCFG1_MAC3_EN (0x0800)
+#define SEEQCFG1_MAC4_EN (0x1000)
+#define SEEQCFG1_MAC5_EN (0x2000)
+
+#define SEEQCFG1_MATCH_MASK (0xc000) /* Packet matching logic cfg bits */
+#define SEEQCFG1_MATCH_SPECIFIC (0x0000) /* only matching MAC addresses */
+#define SEEQCFG1_MATCH_BROAD (0x4000) /* matching and broadcast addresses */
+#define SEEQCFG1_MATCH_MULTI (0x8000) /* matching, broadcast and multicast */
+#define SEEQCFG1_MATCH_ALL (0xc000) /* Promiscuous mode */
+
+#define SEEQCFG1_DEFAULT (SEEQCFG1_BUFFER_BUFFER | SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD)
+
+#define SEEQCFG2_BYTE_SWAP (0x0001) /* 0=Intel byte-order */
+#define SEEQCFG2_AUTO_REA (0x0002) /* if set, Receive End Area will be updated when reading from Buffer */
+
+#define SEEQCFG2_CRC_ERR_EN (0x0008) /* enables receiving of packets with CRC errors */
+#define SEEQCFG2_DRIBBLE_EN (0x0010) /* enables receiving of non-aligned packets */
+#define SEEQCFG2_SHORT_EN (0x0020) /* enables receiving of short packets */
+
+#define SEEQCFG2_SLOTSEL (0x0040) /* 0= standard IEEE802.3, 1= smaller,faster, non-standard */
+#define SEEQCFG2_NO_PREAM (0x0080) /* 1= user supplies Xmit preamble bytes */
+#define SEEQCFG2_ADDR_LEN (0x0100) /* 1= 2byte addresses */
+#define SEEQCFG2_REC_CRC (0x0200) /* 0= received packets will have CRC stripped from them */
+#define SEEQCFG2_XMIT_NO_CRC (0x0400) /* dont xmit CRC with each packet (user supplies it) */
+#define SEEQCFG2_LOOPBACK (0x0800)
+#define SEEQCFG2_CTRLO (0x1000)
+#define SEEQCFG2_RESET (0x8000) /* software Hard-reset bit */
+
+struct seeq_pkt_hdr {
+ unsigned short next; /* address of next packet header */
+ unsigned char babble_int:1, /* enable int on >1514 byte packet */
+ coll_int:1, /* enable int on collision */
+ coll_16_int:1, /* enable int on >15 collision */
+ xmit_int:1, /* enable int on success (or xmit with <15 collision) */
+ unused:1,
+ data_follows:1, /* if not set, process this as a header and pointer only */
+ chain_cont:1, /* if set, more headers in chain only cmd bit valid in recv header */
+ xmit_recv:1; /* if set, a xmit packet, else a receive packet.*/
+ unsigned char status;
+};
+
+#define SEEQPKTH_BAB_INT_EN (0x01) /* xmit only */
+#define SEEQPKTH_COL_INT_EN (0x02) /* xmit only */
+#define SEEQPKTH_COL16_INT_EN (0x04) /* xmit only */
+#define SEEQPKTH_XMIT_INT_EN (0x08) /* xmit only */
+#define SEEQPKTH_DATA_FOLLOWS (0x20) /* supposedly in xmit only */
+#define SEEQPKTH_CHAIN (0x40) /* more headers follow */
+#define SEEQPKTH_XMIT (0x80)
+
+#define SEEQPKTS_BABBLE (0x0100) /* xmit only */
+#define SEEQPKTS_OVERSIZE (0x0100) /* recv only */
+#define SEEQPKTS_COLLISION (0x0200) /* xmit only */
+#define SEEQPKTS_CRC_ERR (0x0200) /* recv only */
+#define SEEQPKTS_COLL16 (0x0400) /* xmit only */
+#define SEEQPKTS_DRIB (0x0400) /* recv only */
+#define SEEQPKTS_SHORT (0x0800) /* recv only */
+#define SEEQPKTS_DONE (0x8000)
+#define SEEQPKTS_ANY_ERROR (0x0f00)
diff --git a/i386/i386at/gpl/linux/net/sk_g16.c b/i386/i386at/gpl/linux/net/sk_g16.c
new file mode 100644
index 00000000..83989485
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/sk_g16.c
@@ -0,0 +1,2111 @@
+/*-
+ * Copyright (C) 1994 by PJD Weichmann & SWS Bern, Switzerland
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ * Module : sk_g16.c
+ *
+ * Version : $Revision: 1.1.1.1 $
+ *
+ * Author : Patrick J.D. Weichmann
+ *
+ * Date Created : 94/05/26
+ * Last Updated : $Date: 1997/02/25 21:27:39 $
+ *
+ * Description : Schneider & Koch G16 Ethernet Device Driver for
+ * Linux Kernel >= 1.1.22
+ * Update History :
+ *
+-*/
+
+static const char *rcsid = "$Id: sk_g16.c,v 1.1.1.1 1997/02/25 21:27:39 thomas Exp $";
+
+/*
+ * The Schneider & Koch (SK) G16 Network device driver is based
+ * on the 'ni6510' driver from Michael Hipp which can be found at
+ * ftp://sunsite.unc.edu/pub/Linux/system/Network/drivers/nidrivers.tar.gz
+ *
+ * Sources: 1) ni6510.c by M. Hipp
+ * 2) depca.c by D.C. Davies
+ * 3) skeleton.c by D. Becker
+ * 4) Am7990 Local Area Network Controller for Ethernet (LANCE),
+ * AMD, Pub. #05698, June 1989
+ *
+ * Many Thanks for helping me to get things working to:
+ *
+ * A. Cox (A.Cox@swansea.ac.uk)
+ * M. Hipp (mhipp@student.uni-tuebingen.de)
+ * R. Bolz (Schneider & Koch, Germany)
+ *
+ * See README.sk_g16 for details about limitations and bugs for the
+ * current version.
+ *
+ * To Do:
+ * - Support of SK_G8 and other SK Network Cards.
+ * - Autoset memory mapped RAM. Check for free memory and then
+ * configure RAM correctly.
+ * - SK_close should really set card in to initial state.
+ * - Test if IRQ 3 is not switched off. Use autoirq() functionality.
+ * (as in /drivers/net/skeleton.c)
+ * - Implement Multicast addressing. At minimum something like
+ * in depca.c.
+ * - Redo the statistics part.
+ * - Try to find out if the board is in 8 Bit or 16 Bit slot.
+ * If in 8 Bit mode don't use IRQ 11.
+ * - (Try to make it slightly faster.)
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/fcntl.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/bitops.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "sk_g16.h"
+
+/*
+ * Schneider & Koch Card Definitions
+ * =================================
+ */
+
+#define SK_NAME "SK_G16"
+
+/*
+ * SK_G16 Configuration
+ * --------------------
+ */
+
+/*
+ * Abbreviations
+ * -------------
+ *
+ * RAM - used for the 16KB shared memory
+ * Boot_ROM, ROM - are used for referencing the BootEPROM
+ *
+ * SK_BOOT_ROM and SK_ADDR are symbolic constants used to configure
+ * the behaviour of the driver and the SK_G16.
+ *
+ * ! See sk_g16.install on how to install and configure the driver !
+ *
+ * SK_BOOT_ROM defines if the Boot_ROM should be switched off or not.
+ *
+ * SK_ADDR defines the address where the RAM will be mapped into the real
+ * host memory.
+ * valid addresses are from 0xa0000 to 0xfc000 in 16Kbyte steps.
+ */
+
+#define SK_BOOT_ROM 1 /* 1=BootROM on 0=off */
+
+#define SK_ADDR 0xcc000
+
+/*
+ * In POS3 are bits A14-A19 of the address bus. These bits can be set
+ * to choose the RAM address. Thats why we only can choose the RAM address
+ * in 16KB steps.
+ */
+
+#define POS_ADDR (rom_addr>>14) /* Do not change this line */
+
+/*
+ * SK_G16 I/O PORT's + IRQ's + Boot_ROM locations
+ * ----------------------------------------------
+ */
+
+/*
+ * As nearly every card has also SK_G16 a specified I/O Port region and
+ * only a few possible IRQ's.
+ * In the Installation Guide from Schneider & Koch is listed a possible
+ * Interrupt IRQ2. IRQ2 is always IRQ9 in boards with two cascaded interrupt
+ * controllers. So we use in SK_IRQS IRQ9.
+ */
+
+/* Don't touch any of the following #defines. */
+
+#define SK_IO_PORTS { 0x100, 0x180, 0x208, 0x220, 0x288, 0x320, 0x328, 0x390, 0 }
+
+#define SK_IRQS { 3, 5, 9, 11, 0 }
+
+#define SK_BOOT_ROM_LOCATIONS { 0xc0000, 0xc4000, 0xc8000, 0xcc000, 0xd0000, 0xd4000, 0xd8000, 0xdc000, 0 }
+
+#define SK_BOOT_ROM_ID { 0x55, 0xaa, 0x10, 0x50, 0x06, 0x33 }
+
+/*
+ * SK_G16 POS REGISTERS
+ * --------------------
+ */
+
+/*
+ * SK_G16 has a Programmable Option Select (POS) Register.
+ * The POS is composed of 8 separate registers (POS0-7) which
+ * are I/O mapped on an address set by the W1 switch.
+ *
+ */
+
+#define SK_POS_SIZE 8 /* 8 I/O Ports are used by SK_G16 */
+
+#define SK_POS0 ioaddr /* Card-ID Low (R) */
+#define SK_POS1 ioaddr+1 /* Card-ID High (R) */
+#define SK_POS2 ioaddr+2 /* Card-Enable, Boot-ROM Disable (RW) */
+#define SK_POS3 ioaddr+3 /* Base address of RAM */
+#define SK_POS4 ioaddr+4 /* IRQ */
+
+/* POS5 - POS7 are unused */
+
+/*
+ * SK_G16 MAC PREFIX
+ * -----------------
+ */
+
+/*
+ * Scheider & Koch manufacturer code (00:00:a5).
+ * This must be checked, that we are sure it is a SK card.
+ */
+
+#define SK_MAC0 0x00
+#define SK_MAC1 0x00
+#define SK_MAC2 0x5a
+
+/*
+ * SK_G16 ID
+ * ---------
+ */
+
+/*
+ * If POS0,POS1 contain the following ID, then we know
+ * at which I/O Port Address we are.
+ */
+
+#define SK_IDLOW 0xfd
+#define SK_IDHIGH 0x6a
+
+
+/*
+ * LANCE POS Bit definitions
+ * -------------------------
+ */
+
+#define SK_ROM_RAM_ON (POS2_CARD)
+#define SK_ROM_RAM_OFF (POS2_EPROM)
+#define SK_ROM_ON (inb(SK_POS2) & POS2_CARD)
+#define SK_ROM_OFF (inb(SK_POS2) | POS2_EPROM)
+#define SK_RAM_ON (inb(SK_POS2) | POS2_CARD)
+#define SK_RAM_OFF (inb(SK_POS2) & POS2_EPROM)
+
+#define POS2_CARD 0x0001 /* 1 = SK_G16 on 0 = off */
+#define POS2_EPROM 0x0002 /* 1 = Boot EPROM off 0 = on */
+
+/*
+ * SK_G16 Memory mapped Registers
+ * ------------------------------
+ *
+ */
+
+#define SK_IOREG (board->ioreg) /* LANCE data registers. */
+#define SK_PORT (board->port) /* Control, Status register */
+#define SK_IOCOM (board->iocom) /* I/O Command */
+
+/*
+ * SK_G16 Status/Control Register bits
+ * -----------------------------------
+ *
+ * (C) Controlreg (S) Statusreg
+ */
+
+/*
+ * Register transfer: 0 = no transfer
+ * 1 = transferring data between LANCE and I/O reg
+ */
+#define SK_IORUN 0x20
+
+/*
+ * LANCE interrupt: 0 = LANCE interrupt occurred
+ * 1 = no LANCE interrupt occurred
+ */
+#define SK_IRQ 0x10
+
+#define SK_RESET 0x08 /* Reset SK_CARD: 0 = RESET 1 = normal */
+#define SK_RW 0x02 /* 0 = write to 1 = read from */
+#define SK_ADR 0x01 /* 0 = REG DataPort 1 = RAP Reg addr port */
+
+
+#define SK_RREG SK_RW /* Transferdirection to read from lance */
+#define SK_WREG 0 /* Transferdirection to write to lance */
+#define SK_RAP SK_ADR /* Destination Register RAP */
+#define SK_RDATA 0 /* Destination Register REG DataPort */
+
+/*
+ * SK_G16 I/O Command
+ * ------------------
+ */
+
+/*
+ * Any bitcombination sets the internal I/O bit (transfer will start)
+ * when written to I/O Command
+ */
+
+#define SK_DOIO 0x80 /* Do Transfer */
+
+/*
+ * LANCE RAP (Register Address Port).
+ * ---------------------------------
+ */
+
+/*
+ * The LANCE internal registers are selected through the RAP.
+ * The Registers are:
+ *
+ * CSR0 - Status and Control flags
+ * CSR1 - Low order bits of initialize block (bits 15:00)
+ * CSR2 - High order bits of initialize block (bits 07:00, 15:08 are reserved)
+ * CSR3 - Allows redefinition of the Bus Master Interface.
+ * This register must be set to 0x0002, which means BSWAP = 0,
+ * ACON = 1, BCON = 0;
+ *
+ */
+
+#define CSR0 0x00
+#define CSR1 0x01
+#define CSR2 0x02
+#define CSR3 0x03
+
+/*
+ * General Definitions
+ * ===================
+ */
+
+/*
+ * Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ * We have 16KB RAM which can be accessed by the LANCE. In the
+ * memory are not only the buffers but also the ring descriptors and
+ * the initialize block.
+ * Don't change anything unless you really know what you do.
+ */
+
+#define LC_LOG_TX_BUFFERS 1 /* (2 == 2^^1) 2 Transmit buffers */
+#define LC_LOG_RX_BUFFERS 3 /* (8 == 2^^3) 8 Receive buffers */
+
+/* Descriptor ring sizes */
+
+#define TMDNUM (1 << (LC_LOG_TX_BUFFERS)) /* 2 Transmit descriptor rings */
+#define RMDNUM (1 << (LC_LOG_RX_BUFFERS)) /* 8 Receive Buffers */
+
+/* Define Mask for setting RMD, TMD length in the LANCE init_block */
+
+#define TMDNUMMASK (LC_LOG_TX_BUFFERS << 29)
+#define RMDNUMMASK (LC_LOG_RX_BUFFERS << 29)
+
+/*
+ * Data Buffer size is set to maximum packet length.
+ */
+
+#define PKT_BUF_SZ 1518
+
+/*
+ * The number of low I/O ports used by the ethercard.
+ */
+
+#define ETHERCARD_TOTAL_SIZE SK_POS_SIZE
+
+/*
+ * Portreserve is there to mark the Card I/O Port region as used.
+ * Check_region is to check if the region at ioaddr with the size "size"
+ * is free or not.
+ * Snarf_region allocates the I/O Port region.
+ */
+
+#ifndef HAVE_PORTRESERVE
+
+#define check_region(ioaddr, size) 0
+#define request_region(ioaddr, size,name) do ; while (0)
+
+#endif
+
+/*
+ * SK_DEBUG
+ *
+ * Here you can choose what level of debugging wanted.
+ *
+ * If SK_DEBUG and SK_DEBUG2 are undefined, then only the
+ * necessary messages will be printed.
+ *
+ * If SK_DEBUG is defined, there will be many debugging prints
+ * which can help to find some mistakes in configuration or even
+ * in the driver code.
+ *
+ * If SK_DEBUG2 is defined, many many messages will be printed
+ * which normally you don't need. I used this to check the interrupt
+ * routine.
+ *
+ * (If you define only SK_DEBUG2 then only the messages for
+ * checking interrupts will be printed!)
+ *
+ * Normal way of live is:
+ *
+ * For the whole thing get going let both symbolic constants
+ * undefined. If you face any problems and you know what's going
+ * on (you know something about the card and you can interpret some
+ * hex LANCE register output) then define SK_DEBUG
+ *
+ */
+
+#undef SK_DEBUG /* debugging */
+#undef SK_DEBUG2 /* debugging with more verbose report */
+
+#ifdef SK_DEBUG
+#define PRINTK(x) printk x
+#else
+#define PRINTK(x) /**/
+#endif
+
+#ifdef SK_DEBUG2
+#define PRINTK2(x) printk x
+#else
+#define PRINTK2(x) /**/
+#endif
+
+/*
+ * SK_G16 RAM
+ *
+ * The components are memory mapped and can be set in a region from
+ * 0x00000 through 0xfc000 in 16KB steps.
+ *
+ * The Network components are: dual ported RAM, Prom, I/O Reg, Status-,
+ * Controlregister and I/O Command.
+ *
+ * dual ported RAM: This is the only memory region which the LANCE chip
+ * has access to. From the Lance it is addressed from 0x0000 to
+ * 0x3fbf. The host accesses it normally.
+ *
+ * PROM: The PROM obtains the ETHERNET-MAC-Address. It is realised as a
+ * 8-Bit PROM, this means only the 16 even addresses are used of the
+ * 32 Byte Address region. Access to a odd address results in invalid
+ * data.
+ *
+ * LANCE I/O Reg: The I/O Reg is build of 4 single Registers, Low-Byte Write,
+ * Hi-Byte Write, Low-Byte Read, Hi-Byte Read.
+ * Transfer from or to the LANCE is always in 16Bit so Low and High
+ * registers are always relevant.
+ *
+ * The Data from the Readregister is not the data in the Writeregister!!
+ *
+ * Port: Status- and Controlregister.
+ * Two different registers which share the same address, Status is
+ * read-only, Control is write-only.
+ *
+ * I/O Command:
+ * Any bitcombination written in here starts the transmission between
+ * Host and LANCE.
+ */
+
+typedef struct
+{
+ unsigned char ram[0x3fc0]; /* 16KB dual ported ram */
+ unsigned char rom[0x0020]; /* 32Byte PROM containing 6Byte MAC */
+ unsigned char res1[0x0010]; /* reserved */
+ unsigned volatile short ioreg;/* LANCE I/O Register */
+ unsigned volatile char port; /* Statusregister and Controlregister */
+ unsigned char iocom; /* I/O Command Register */
+} SK_RAM;
+
+/* struct */
+
+/*
+ * This is the structure for the dual ported ram. We
+ * have exactly 16 320 Bytes. In here there must be:
+ *
+ * - Initialize Block (starting at a word boundary)
+ * - Receive and Transmit Descriptor Rings (quadword boundary)
+ * - Data Buffers (arbitrary boundary)
+ *
+ * This is because LANCE has on SK_G16 only access to the dual ported
+ * RAM and nowhere else.
+ */
+
+struct SK_ram
+{
+ struct init_block ib;
+ struct tmd tmde[TMDNUM];
+ struct rmd rmde[RMDNUM];
+ char tmdbuf[TMDNUM][PKT_BUF_SZ];
+ char rmdbuf[RMDNUM][PKT_BUF_SZ];
+};
+
+/*
+ * Structure where all necessary information is for ring buffer
+ * management and statistics.
+ */
+
+struct priv
+{
+ struct SK_ram *ram; /* dual ported ram structure */
+ struct rmd *rmdhead; /* start of receive ring descriptors */
+ struct tmd *tmdhead; /* start of transmit ring descriptors */
+ int rmdnum; /* actual used ring descriptor */
+ int tmdnum; /* actual transmit descriptor for transmitting data */
+ int tmdlast; /* last sent descriptor used for error handling, etc */
+ void *rmdbufs[RMDNUM]; /* pointer to the receive buffers */
+ void *tmdbufs[TMDNUM]; /* pointer to the transmit buffers */
+ struct enet_statistics stats; /* Device driver statistics */
+};
+
+/* global variable declaration */
+
+/* IRQ map used to reserve a IRQ (see SK_open()) */
+
+/* extern void *irq2dev_map[16]; */ /* Declared in <linux/ioport.h> */
+
+/* static variables */
+
+static SK_RAM *board; /* pointer to our memory mapped board components */
+
+/* Macros */
+
+
+/* Function Prototypes */
+
+/*
+ * Device Driver functions
+ * -----------------------
+ * See for short explanation of each function its definitions header.
+ */
+
+int SK_init(struct device *dev);
+static int SK_probe(struct device *dev, short ioaddr);
+
+static int SK_open(struct device *dev);
+static int SK_send_packet(struct sk_buff *skb, struct device *dev);
+static void SK_interrupt(int irq, struct pt_regs * regs);
+static void SK_rxintr(struct device *dev);
+static void SK_txintr(struct device *dev);
+static int SK_close(struct device *dev);
+
+static struct enet_statistics *SK_get_stats(struct device *dev);
+
+unsigned int SK_rom_addr(void);
+
+static void set_multicast_list(struct device *dev);
+
+/*
+ * LANCE Functions
+ * ---------------
+ */
+
+static int SK_lance_init(struct device *dev, unsigned short mode);
+void SK_reset_board(void);
+void SK_set_RAP(int reg_number);
+int SK_read_reg(int reg_number);
+int SK_rread_reg(void);
+void SK_write_reg(int reg_number, int value);
+
+/*
+ * Debugging functions
+ * -------------------
+ */
+
+void SK_print_pos(struct device *dev, char *text);
+void SK_print_dev(struct device *dev, char *text);
+void SK_print_ram(struct device *dev);
+
+
+/*-
+ * Function : SK_init
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : Check for a SK_G16 network adaptor and initialize it.
+ * This function gets called by dev_init which initializes
+ * all Network devices.
+ *
+ * Parameters : I : struct device *dev - structure preconfigured
+ * from Space.c
+ * Return Value : 0 = Driver Found and initialized
+ * Errors : ENODEV - no device found
+ * ENXIO - not probed
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+/*
+ * Check for a network adaptor of this type, and return '0' if one exists.
+ * If dev->base_addr == 0, probe all likely locations.
+ * If dev->base_addr == 1, always return failure.
+ * If dev->base_addr == 2, allocate space for the device and return success
+ * (detachable devices only).
+ */
+
+int SK_init(struct device *dev)
+{
+ int ioaddr = 0; /* I/O port address used for POS regs */
+ int *port, ports[] = SK_IO_PORTS; /* SK_G16 supported ports */
+
+ /* get preconfigured base_addr from dev which is done in Space.c */
+ int base_addr = dev->base_addr;
+
+ PRINTK(("%s: %s", SK_NAME, rcsid));
+ rcsid = NULL; /* We do not want to use this further */
+
+ if (base_addr > 0x0ff) /* Check a single specified address */
+ {
+ /* Check if on specified address is a SK_G16 */
+
+ if ( (inb(SK_POS0) == SK_IDLOW) ||
+ (inb(SK_POS1) == SK_IDHIGH) )
+ {
+ return SK_probe(dev, base_addr);
+ }
+
+ return ENODEV; /* Sorry, but on specified address NO SK_G16 */
+ }
+ else if (base_addr > 0) /* Don't probe at all */
+ {
+ return ENXIO;
+ }
+
+ /* Autoprobe base_addr */
+
+ for (port = &ports[0]; *port; port++)
+ {
+ ioaddr = *port; /* we need ioaddr for accessing POS regs */
+
+ /* Check if I/O Port region is used by another board */
+
+ if (check_region(ioaddr, ETHERCARD_TOTAL_SIZE))
+ {
+ continue; /* Try next Port address */
+ }
+
+ /* Check if at ioaddr is a SK_G16 */
+
+ if ( !(inb(SK_POS0) == SK_IDLOW) ||
+ !(inb(SK_POS1) == SK_IDHIGH) )
+ {
+ continue; /* Try next Port address */
+ }
+
+ dev->base_addr = ioaddr; /* Set I/O Port Address */
+
+ if (SK_probe(dev, ioaddr) == 0)
+ {
+ return 0; /* Card found and initialized */
+ }
+ }
+
+ dev->base_addr = base_addr; /* Write back original base_addr */
+
+ return ENODEV; /* Failed to find or init driver */
+
+} /* End of SK_init */
+
+
+/*-
+ * Function : SK_probe
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : This function is called by SK_init and
+ * does the main part of initialization.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * I : short ioaddr - I/O Port address where POS is.
+ * Return Value : 0 = Initialization done
+ * Errors : ENODEV - No SK_G16 found
+ * -1 - Configuration problem
+ * Globals : irq2dev_map - Which device uses which IRQ
+ * : board - pointer to SK_RAM
+ * Update History :
+ * YY/MM/DD uid Description
+ * 94/06/30 pwe SK_ADDR now checked and at the correct place
+-*/
+
+int SK_probe(struct device *dev, short ioaddr)
+{
+ int i,j; /* Counters */
+ int sk_addr_flag = 0; /* SK ADDR correct? 1 - no, 0 - yes */
+ unsigned int rom_addr; /* used to store RAM address used for POS_ADDR */
+
+ struct priv *p; /* SK_G16 private structure */
+
+ if (SK_ADDR & 0x3fff || SK_ADDR < 0xa0000)
+ {
+
+ sk_addr_flag = 1;
+
+ /*
+ * Now here we could use a routine which searches for a free
+ * place in the ram and set SK_ADDR if found. TODO.
+ */
+ }
+
+ if (SK_BOOT_ROM) /* Shall we keep Boot_ROM on ? */
+ {
+ PRINTK(("## %s: SK_BOOT_ROM is set.\n", SK_NAME));
+
+ rom_addr = SK_rom_addr();
+
+ if (rom_addr == 0) /* No Boot_ROM found */
+ {
+ if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
+ {
+ printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
+ dev->name, SK_ADDR);
+ return -1;
+ }
+
+ rom_addr = SK_ADDR; /* assign predefined address */
+
+ PRINTK(("## %s: NO Bootrom found \n", SK_NAME));
+
+ outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off */
+ outb(POS_ADDR, SK_POS3); /* Set RAM address */
+ outb(SK_RAM_ON, SK_POS2); /* enable RAM */
+ }
+ else if (rom_addr == SK_ADDR)
+ {
+ printk("%s: RAM + ROM are set to the same address %#08x\n"
+ " Check configuration. Now switching off Boot_ROM\n",
+ SK_NAME, rom_addr);
+
+ outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off*/
+ outb(POS_ADDR, SK_POS3); /* Set RAM address */
+ outb(SK_RAM_ON, SK_POS2); /* enable RAM */
+ }
+ else
+ {
+ PRINTK(("## %s: Found ROM at %#08x\n", SK_NAME, rom_addr));
+ PRINTK(("## %s: Keeping Boot_ROM on\n", SK_NAME));
+
+ if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
+ {
+ printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
+ dev->name, SK_ADDR);
+ return -1;
+ }
+
+ rom_addr = SK_ADDR;
+
+ outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off */
+ outb(POS_ADDR, SK_POS3); /* Set RAM address */
+ outb(SK_ROM_RAM_ON, SK_POS2); /* RAM on, BOOT_ROM on */
+ }
+ }
+ else /* Don't keep Boot_ROM */
+ {
+ PRINTK(("## %s: SK_BOOT_ROM is not set.\n", SK_NAME));
+
+ if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
+ {
+ printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
+ dev->name, SK_ADDR);
+ return -1;
+ }
+
+ rom_addr = SK_rom_addr(); /* Try to find a Boot_ROM */
+
+ /* IF we find a Boot_ROM disable it */
+
+ outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off */
+
+ /* We found a Boot_ROM and it's gone. Set RAM address on
+ * Boot_ROM address.
+ */
+
+ if (rom_addr)
+ {
+ printk("%s: We found Boot_ROM at %#08x. Now setting RAM on"
+ "that address\n", SK_NAME, rom_addr);
+
+ outb(POS_ADDR, SK_POS3); /* Set RAM on Boot_ROM address */
+ }
+ else /* We did not find a Boot_ROM, use predefined SK_ADDR for ram */
+ {
+ if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
+ {
+ printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
+ dev->name, SK_ADDR);
+ return -1;
+ }
+
+ rom_addr = SK_ADDR;
+
+ outb(POS_ADDR, SK_POS3); /* Set RAM address */
+ }
+ outb(SK_RAM_ON, SK_POS2); /* enable RAM */
+ }
+
+#ifdef SK_DEBUG
+ SK_print_pos(dev, "POS registers after ROM, RAM config");
+#endif
+
+ board = (SK_RAM *) rom_addr;
+
+ /* Read in station address */
+ for (i = 0, j = 0; i < ETH_ALEN; i++, j+=2)
+ {
+ dev->dev_addr[i] = board->rom[j];
+ }
+
+ /* Check for manufacturer code */
+ if (!(dev->dev_addr[0] == SK_MAC0 &&
+ dev->dev_addr[1] == SK_MAC1 &&
+ dev->dev_addr[2] == SK_MAC2) )
+ {
+ PRINTK(("## %s: We did not find SK_G16 at RAM location.\n",
+ SK_NAME));
+ return ENODEV; /* NO SK_G16 found */
+ }
+
+ printk("%s: %s found at %#3x, HW addr: %#04x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name,
+ "Schneider & Koch Netcard",
+ (unsigned int) dev->base_addr,
+ dev->dev_addr[0],
+ dev->dev_addr[1],
+ dev->dev_addr[2],
+ dev->dev_addr[3],
+ dev->dev_addr[4],
+ dev->dev_addr[5]);
+
+ /* Grab the I/O Port region */
+ request_region(ioaddr, ETHERCARD_TOTAL_SIZE,"sk_g16");
+
+ /* Initialize device structure */
+
+ /* Allocate memory for private structure */
+ p = dev->priv = (void *) kmalloc(sizeof(struct priv), GFP_KERNEL);
+ if (p == NULL)
+ return -ENOMEM;
+ memset((char *) dev->priv, 0, sizeof(struct priv)); /* clear memory */
+
+ /* Assign our Device Driver functions */
+
+ dev->open = &SK_open;
+ dev->stop = &SK_close;
+ dev->hard_start_xmit = &SK_send_packet;
+ dev->get_stats = &SK_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+
+ /* Set the generic fields of the device structure */
+
+ ether_setup(dev);
+
+ dev->flags &= ~IFF_MULTICAST;
+
+ /* Initialize private structure */
+
+ p->ram = (struct SK_ram *) rom_addr; /* Set dual ported RAM addr */
+ p->tmdhead = &(p->ram)->tmde[0]; /* Set TMD head */
+ p->rmdhead = &(p->ram)->rmde[0]; /* Set RMD head */
+
+ /* Initialize buffer pointers */
+
+ for (i = 0; i < TMDNUM; i++)
+ {
+ p->tmdbufs[i] = &(p->ram)->tmdbuf[i];
+ }
+
+ for (i = 0; i < RMDNUM; i++)
+ {
+ p->rmdbufs[i] = &(p->ram)->rmdbuf[i];
+ }
+
+#ifdef SK_DEBUG
+ SK_print_pos(dev, "End of SK_probe");
+ SK_print_ram(dev);
+#endif
+
+ return 0; /* Initialization done */
+
+} /* End of SK_probe() */
+
+
+/*-
+ * Function : SK_open
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : This function is called sometimes after booting
+ * when ifconfig program is run.
+ *
+ * This function requests an IRQ, sets the correct
+ * IRQ in the card. Then calls SK_lance_init() to
+ * init and start the LANCE chip. Then if everything is
+ * ok returns with 0 (OK), which means SK_G16 is now
+ * opened and operational.
+ *
+ * (Called by dev_open() /net/inet/dev.c)
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * Return Value : 0 - Device opened
+ * Errors : -EAGAIN - Open failed
+ * Globals : irq2dev_map - which device uses which irq
+ * Side Effects : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static int SK_open(struct device *dev)
+{
+ int i = 0;
+ int irqval = 0;
+ int ioaddr = dev->base_addr;
+
+ int irqtab[] = SK_IRQS;
+
+ struct priv *p = (struct priv *)dev->priv;
+
+ PRINTK(("## %s: At beginning of SK_open(). CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ if (dev->irq == 0) /* Autoirq */
+ {
+ i = 0;
+
+ /*
+ * Check if one IRQ out of SK_IRQS is free and install
+ * interrupt handler.
+ * Most done by request_irq().
+ * irqval: 0 - interrupt handler installed for IRQ irqtab[i]
+ * -EBUSY - interrupt busy
+ * -EINVAL - irq > 15 or handler = NULL
+ */
+
+ do
+ {
+ irqval = request_irq(irqtab[i], &SK_interrupt, 0, "sk_g16");
+ i++;
+ } while (irqval && irqtab[i]);
+
+ if (irqval) /* We tried every possible IRQ but no success */
+ {
+ printk("%s: unable to get an IRQ\n", dev->name);
+ return -EAGAIN;
+ }
+
+ dev->irq = irqtab[--i];
+
+ outb(i<<2, SK_POS4); /* Set Card on probed IRQ */
+
+ }
+ else if (dev->irq == 2) /* IRQ2 is always IRQ9 */
+ {
+ if (request_irq(9, &SK_interrupt, 0, "sk_g16"))
+ {
+ printk("%s: unable to get IRQ 9\n", dev->name);
+ return -EAGAIN;
+ }
+ dev->irq = 9;
+
+ /*
+ * Now we set card on IRQ2.
+ * This can be confusing, but remember that IRQ2 on the network
+ * card is in reality IRQ9
+ */
+ outb(0x08, SK_POS4); /* set card to IRQ2 */
+
+ }
+ else /* Check IRQ as defined in Space.c */
+ {
+ int i = 0;
+
+ /* check if IRQ free and valid. Then install Interrupt handler */
+
+ if (request_irq(dev->irq, &SK_interrupt, 0, "sk_g16"))
+ {
+ printk("%s: unable to get selected IRQ\n", dev->name);
+ return -EAGAIN;
+ }
+
+ switch(dev->irq)
+ {
+ case 3: i = 0;
+ break;
+ case 5: i = 1;
+ break;
+ case 2: i = 2;
+ break;
+ case 11:i = 3;
+ break;
+ default:
+ printk("%s: Preselected IRQ %d is invalid for %s boards",
+ dev->name,
+ dev->irq,
+ SK_NAME);
+ return -EAGAIN;
+ }
+
+ outb(i<<2, SK_POS4); /* Set IRQ on card */
+ }
+
+ irq2dev_map[dev->irq] = dev; /* Set IRQ as used by us */
+
+ printk("%s: Schneider & Koch G16 at %#3x, IRQ %d, shared mem at %#08x\n",
+ dev->name, (unsigned int)dev->base_addr,
+ (int) dev->irq, (unsigned int) p->ram);
+
+ if (!(i = SK_lance_init(dev, 0))) /* LANCE init OK? */
+ {
+
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+#ifdef SK_DEBUG
+
+ /*
+ * This debug block tries to stop LANCE,
+ * reinit LANCE with transmitter and receiver disabled,
+ * then stop again and reinit with NORMAL_MODE
+ */
+
+ printk("## %s: After lance init. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_write_reg(CSR0, CSR0_STOP);
+ printk("## %s: LANCE stopped. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_lance_init(dev, MODE_DTX | MODE_DRX);
+ printk("## %s: Reinit with DTX + DRX off. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_write_reg(CSR0, CSR0_STOP);
+ printk("## %s: LANCE stopped. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_lance_init(dev, MODE_NORMAL);
+ printk("## %s: LANCE back to normal mode. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_print_pos(dev, "POS regs before returning OK");
+
+#endif /* SK_DEBUG */
+
+ return 0; /* SK_open() is successful */
+ }
+ else /* LANCE init failed */
+ {
+
+ PRINTK(("## %s: LANCE init failed: CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ dev->start = 0; /* Device not ready */
+ return -EAGAIN;
+ }
+
+} /* End of SK_open() */
+
+
+/*-
+ * Function : SK_lance_init
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : Reset LANCE chip, fill RMD, TMD structures with
+ * start values and Start LANCE.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * I : int mode - put LANCE into "mode" see data-sheet for
+ * more info.
+ * Return Value : 0 - Init done
+ * Errors : -1 - Init failed
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static int SK_lance_init(struct device *dev, unsigned short mode)
+{
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+ struct tmd *tmdp;
+ struct rmd *rmdp;
+
+ PRINTK(("## %s: At beginning of LANCE init. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ /* Reset LANCE */
+ SK_reset_board();
+
+ /* Initialize TMD's with start values */
+ p->tmdnum = 0; /* First descriptor for transmitting */
+ p->tmdlast = 0; /* First descriptor for reading stats */
+
+ for (i = 0; i < TMDNUM; i++) /* Init all TMD's */
+ {
+ tmdp = p->tmdhead + i;
+
+ tmdp->u.buffer = (unsigned long) p->tmdbufs[i]; /* assign buffer */
+
+ /* Mark TMD as start and end of packet */
+ tmdp->u.s.status = TX_STP | TX_ENP;
+ }
+
+
+ /* Initialize RMD's with start values */
+
+ p->rmdnum = 0; /* First RMD which will be used */
+
+ for (i = 0; i < RMDNUM; i++) /* Init all RMD's */
+ {
+ rmdp = p->rmdhead + i;
+
+
+ rmdp->u.buffer = (unsigned long) p->rmdbufs[i]; /* assign buffer */
+
+ /*
+ * LANCE must be owner at beginning so that he can fill in
+ * receiving packets, set status and release RMD
+ */
+
+ rmdp->u.s.status = RX_OWN;
+
+ rmdp->blen = -PKT_BUF_SZ; /* Buffer Size in a two's complement */
+
+ rmdp->mlen = 0; /* init message length */
+
+ }
+
+ /* Fill LANCE Initialize Block */
+
+ (p->ram)->ib.mode = mode; /* Set operation mode */
+
+ for (i = 0; i < ETH_ALEN; i++) /* Set physical address */
+ {
+ (p->ram)->ib.paddr[i] = dev->dev_addr[i];
+ }
+
+ for (i = 0; i < 8; i++) /* Set multicast, logical address */
+ {
+ (p->ram)->ib.laddr[i] = 0; /* We do not use logical addressing */
+ }
+
+ /* Set ring descriptor pointers and set number of descriptors */
+
+ (p->ram)->ib.rdrp = (int) p->rmdhead | RMDNUMMASK;
+ (p->ram)->ib.tdrp = (int) p->tmdhead | TMDNUMMASK;
+
+ /* Prepare LANCE Control and Status Registers */
+
+ cli();
+
+ SK_write_reg(CSR3, CSR3_ACON); /* Ale Control !!!THIS MUST BE SET!!!! */
+
+ /*
+ * LANCE addresses the RAM from 0x0000 to 0x3fbf and has no access to
+ * PC Memory locations.
+ *
+ * In structure SK_ram is defined that the first thing in ram
+ * is the initialization block. So his address is for LANCE always
+ * 0x0000
+ *
+ * CSR1 contains low order bits 15:0 of initialization block address
+ * CSR2 is built of:
+ * 7:0 High order bits 23:16 of initialization block address
+ * 15:8 reserved, must be 0
+ */
+
+ /* Set initialization block address (must be on word boundary) */
+ SK_write_reg(CSR1, 0); /* Set low order bits 15:0 */
+ SK_write_reg(CSR2, 0); /* Set high order bits 23:16 */
+
+
+ PRINTK(("## %s: After setting CSR1-3. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ /* Initialize LANCE */
+
+ /*
+ * INIT = Initialize, when set, causes the LANCE to begin the
+ * initialization procedure and access the Init Block.
+ */
+
+ SK_write_reg(CSR0, CSR0_INIT);
+
+ sti();
+
+ /* Wait until LANCE finished initialization */
+
+ SK_set_RAP(CSR0); /* Register Address Pointer to CSR0 */
+
+ for (i = 0; (i < 100) && !(SK_rread_reg() & CSR0_IDON); i++)
+ ; /* Wait until init done or go ahead if problems (i>=100) */
+
+ if (i >= 100) /* Something is wrong ! */
+ {
+ printk("%s: can't init am7990, status: %04x "
+ "init_block: %#08x\n",
+ dev->name, (int) SK_read_reg(CSR0),
+ (unsigned int) &(p->ram)->ib);
+
+#ifdef SK_DEBUG
+ SK_print_pos(dev, "LANCE INIT failed");
+ SK_print_dev(dev,"Device Structure:");
+#endif
+
+ return -1; /* LANCE init failed */
+ }
+
+ PRINTK(("## %s: init done after %d ticks\n", SK_NAME, i));
+
+ /* Clear Initialize done, enable Interrupts, start LANCE */
+
+ SK_write_reg(CSR0, CSR0_IDON | CSR0_INEA | CSR0_STRT);
+
+ PRINTK(("## %s: LANCE started. CSR0: %#06x\n", SK_NAME,
+ SK_read_reg(CSR0)));
+
+ return 0; /* LANCE is up and running */
+
+} /* End of SK_lance_init() */
+
+
+
+/*-
+ * Function : SK_send_packet
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/27
+ *
+ * Description : Writes an socket buffer into a transmit descriptor
+ * and starts transmission.
+ *
+ * Parameters : I : struct sk_buff *skb - packet to transfer
+ * I : struct device *dev - SK_G16 device structure
+ * Return Value : 0 - OK
+ * 1 - Could not transmit (dev_queue_xmit will queue it)
+ * and try to sent it later
+ * Globals : None
+ * Side Effects : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static int SK_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ struct tmd *tmdp;
+
+ if (dev->tbusy)
+ {
+ /* if Transmitter more than 150ms busy -> time_out */
+
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 15)
+ {
+ return 1; /* We have to try transmit later */
+ }
+
+ printk("%s: xmitter timed out, try to restart!\n", dev->name);
+
+ SK_lance_init(dev, MODE_NORMAL); /* Reinit LANCE */
+
+ dev->tbusy = 0; /* Clear Transmitter flag */
+
+ dev->trans_start = jiffies; /* Mark Start of transmission */
+
+ }
+
+ /*
+ * If some upper Layer thinks we missed a transmit done interrupt
+ * we are passed NULL.
+ * (dev_queue_xmit net/inet/dev.c
+ */
+
+ if (skb == NULL)
+ {
+ /*
+ * Dequeue packets from transmit queue and send them.
+ */
+ dev_tint(dev);
+
+ return 0;
+ }
+
+ PRINTK2(("## %s: SK_send_packet() called, CSR0 %#04x.\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+
+ /*
+ * Block a timer-based transmit from overlapping.
+ * This means check if we are already in.
+ */
+
+ if (set_bit(0, (void *) &dev->tbusy) != 0) /* dev->tbusy already set ? */
+ {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ }
+ else
+ {
+ /* Evaluate Packet length */
+ short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+
+ tmdp = p->tmdhead + p->tmdnum; /* Which descriptor for transmitting */
+
+ /* Fill in Transmit Message Descriptor */
+
+ /* Copy data into dual ported ram */
+
+ memcpy((char *) (tmdp->u.buffer & 0x00ffffff), (char *)skb->data,
+ skb->len);
+
+ tmdp->blen = -len; /* set length to transmit */
+
+ /*
+ * Packet start and end is always set because we use the maximum
+ * packet length as buffer length.
+ * Relinquish ownership to LANCE
+ */
+
+ tmdp->u.s.status = TX_OWN | TX_STP | TX_ENP;
+
+ /* Start Demand Transmission */
+ SK_write_reg(CSR0, CSR0_TDMD | CSR0_INEA);
+
+ dev->trans_start = jiffies; /* Mark start of transmission */
+
+ /* Set pointer to next transmit buffer */
+ p->tmdnum++;
+ p->tmdnum &= TMDNUM-1;
+
+ /* Do we own the next transmit buffer ? */
+ if (! ((p->tmdhead + p->tmdnum)->u.s.status & TX_OWN) )
+ {
+ /*
+ * We own next buffer and are ready to transmit, so
+ * clear busy flag
+ */
+ dev->tbusy = 0;
+ }
+ }
+ dev_kfree_skb(skb, FREE_WRITE);
+ return 0;
+} /* End of SK_send_packet */
+
+
+/*-
+ * Function : SK_interrupt
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/27
+ *
+ * Description : SK_G16 interrupt handler which checks for LANCE
+ * Errors, handles transmit and receive interrupts
+ *
+ * Parameters : I : int irq, struct pt_regs * regs -
+ * Return Value : None
+ * Errors : None
+ * Globals : None
+ * Side Effects : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static void SK_interrupt(int irq, struct pt_regs * regs)
+{
+ int csr0;
+ struct device *dev = (struct device *) irq2dev_map[irq];
+ struct priv *p = (struct priv *) dev->priv;
+
+
+ PRINTK2(("## %s: SK_interrupt(). status: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ if (dev == NULL)
+ {
+ printk("SK_interrupt(): IRQ %d for unknown device.\n", irq);
+ }
+
+
+ if (dev->interrupt)
+ {
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+ }
+
+ csr0 = SK_read_reg(CSR0); /* store register for checking */
+
+ dev->interrupt = 1; /* We are handling an interrupt */
+
+ /*
+ * Acknowledge all of the current interrupt sources, disable
+ * Interrupts (INEA = 0)
+ */
+
+ SK_write_reg(CSR0, csr0 & CSR0_CLRALL);
+
+ if (csr0 & CSR0_ERR) /* LANCE Error */
+ {
+ printk("%s: error: %04x\n", dev->name, csr0);
+
+ if (csr0 & CSR0_MISS) /* No place to store packet ? */
+ {
+ p->stats.rx_dropped++;
+ }
+ }
+
+ if (csr0 & CSR0_RINT) /* Receive Interrupt (packet arrived) */
+ {
+ SK_rxintr(dev);
+ }
+
+ if (csr0 & CSR0_TINT) /* Transmit interrupt (packet sent) */
+ {
+ SK_txintr(dev);
+ }
+
+ SK_write_reg(CSR0, CSR0_INEA); /* Enable Interrupts */
+
+ dev->interrupt = 0; /* We are out */
+} /* End of SK_interrupt() */
+
+
+/*-
+ * Function : SK_txintr
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/27
+ *
+ * Description : After sending a packet we check status, update
+ * statistics and relinquish ownership of transmit
+ * descriptor ring.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * Return Value : None
+ * Errors : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static void SK_txintr(struct device *dev)
+{
+ int tmdstat;
+ struct tmd *tmdp;
+ struct priv *p = (struct priv *) dev->priv;
+
+
+ PRINTK2(("## %s: SK_txintr() status: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ tmdp = p->tmdhead + p->tmdlast; /* Which buffer we sent at last ? */
+
+ /* Set next buffer */
+ p->tmdlast++;
+ p->tmdlast &= TMDNUM-1;
+
+ tmdstat = tmdp->u.s.status & 0xff00; /* filter out status bits 15:08 */
+
+ /*
+ * We check status of transmitted packet.
+ * see LANCE data-sheet for error explanation
+ */
+ if (tmdstat & TX_ERR) /* Error occurred */
+ {
+ printk("%s: TX error: %04x %04x\n", dev->name, (int) tmdstat,
+ (int) tmdp->status2);
+
+ if (tmdp->status2 & TX_TDR) /* TDR problems? */
+ {
+ printk("%s: tdr-problems \n", dev->name);
+ }
+
+ if (tmdp->status2 & TX_RTRY) /* Failed in 16 attempts to transmit ? */
+ p->stats.tx_aborted_errors++;
+ if (tmdp->status2 & TX_LCOL) /* Late collision ? */
+ p->stats.tx_window_errors++;
+ if (tmdp->status2 & TX_LCAR) /* Loss of Carrier ? */
+ p->stats.tx_carrier_errors++;
+ if (tmdp->status2 & TX_UFLO) /* Underflow error ? */
+ {
+ p->stats.tx_fifo_errors++;
+
+ /*
+ * If UFLO error occurs it will turn transmitter of.
+ * So we must reinit LANCE
+ */
+
+ SK_lance_init(dev, MODE_NORMAL);
+ }
+
+ p->stats.tx_errors++;
+
+ tmdp->status2 = 0; /* Clear error flags */
+ }
+ else if (tmdstat & TX_MORE) /* Collisions occurred ? */
+ {
+ /*
+ * Here I have a problem.
+ * I only know that there must be one or up to 15 collisions.
+ * Thats why TX_MORE is set, because after 16 attempts TX_RTRY
+ * will be set which means couldn't send packet aborted transfer.
+ *
+ * First I did not have this in but then I thought at minimum
+ * we see that something was not ok.
+ * If anyone knows something better than this to handle this
+ * please report it. (see Email addresses in the README file)
+ */
+
+ p->stats.collisions++;
+ }
+ else /* Packet sent without any problems */
+ {
+ p->stats.tx_packets++;
+ }
+
+ /*
+ * We mark transmitter not busy anymore, because now we have a free
+ * transmit descriptor which can be filled by SK_send_packet and
+ * afterwards sent by the LANCE
+ */
+
+ dev->tbusy = 0;
+
+ /*
+ * mark_bh(NET_BH);
+ * This will cause net_bh() to run after this interrupt handler.
+ *
+ * The function which do handle slow IRQ parts is do_bottom_half()
+ * which runs at normal kernel priority, that means all interrupt are
+ * enabled. (see kernel/irq.c)
+ *
+ * net_bh does something like this:
+ * - check if already in net_bh
+ * - try to transmit something from the send queue
+ * - if something is in the receive queue send it up to higher
+ * levels if it is a known protocol
+ * - try to transmit something from the send queue
+ */
+
+ mark_bh(NET_BH);
+
+} /* End of SK_txintr() */
+
+
+/*-
+ * Function : SK_rxintr
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/27
+ *
+ * Description : Buffer sent, check for errors, relinquish ownership
+ * of the receive message descriptor.
+ *
+ * Parameters : I : SK_G16 device structure
+ * Return Value : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static void SK_rxintr(struct device *dev)
+{
+
+ struct rmd *rmdp;
+ int rmdstat;
+ struct priv *p = (struct priv *) dev->priv;
+
+ PRINTK2(("## %s: SK_rxintr(). CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ rmdp = p->rmdhead + p->rmdnum;
+
+ /* As long as we own the next entry, check status and send
+ * it up to higher layer
+ */
+
+ while (!( (rmdstat = rmdp->u.s.status) & RX_OWN))
+ {
+ /*
+ * Start and end of packet must be set, because we use
+ * the ethernet maximum packet length (1518) as buffer size.
+ *
+ * Because our buffers are at maximum OFLO and BUFF errors are
+ * not to be concerned (see Data sheet)
+ */
+
+ if ((rmdstat & (RX_STP | RX_ENP)) != (RX_STP | RX_ENP))
+ {
+ /* Start of a frame > 1518 Bytes ? */
+
+ if (rmdstat & RX_STP)
+ {
+ p->stats.rx_errors++; /* bad packet received */
+ p->stats.rx_length_errors++; /* packet to long */
+
+ printk("%s: packet too long\n", dev->name);
+ }
+
+ /*
+ * All other packets will be ignored until a new frame with
+ * start (RX_STP) set follows.
+ *
+ * What we do is just give descriptor free for new incoming
+ * packets.
+ */
+
+ rmdp->u.s.status = RX_OWN; /* Relinquish ownership to LANCE */
+
+ }
+ else if (rmdstat & RX_ERR) /* Receive Error ? */
+ {
+ printk("%s: RX error: %04x\n", dev->name, (int) rmdstat);
+
+ p->stats.rx_errors++;
+
+ if (rmdstat & RX_FRAM) p->stats.rx_frame_errors++;
+ if (rmdstat & RX_CRC) p->stats.rx_crc_errors++;
+
+ rmdp->u.s.status = RX_OWN; /* Relinquish ownership to LANCE */
+
+ }
+ else /* We have a packet which can be queued for the upper layers */
+ {
+
+ int len = (rmdp->mlen & 0x0fff); /* extract message length from receive buffer */
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(len+2); /* allocate socket buffer */
+
+ if (skb == NULL) /* Could not get mem ? */
+ {
+
+ /*
+ * Couldn't allocate sk_buffer so we give descriptor back
+ * to Lance, update statistics and go ahead.
+ */
+
+ rmdp->u.s.status = RX_OWN; /* Relinquish ownership to LANCE */
+ printk("%s: Couldn't allocate sk_buff, deferring packet.\n",
+ dev->name);
+ p->stats.rx_dropped++;
+
+ break; /* Jump out */
+ }
+
+ /* Prepare sk_buff to queue for upper layers */
+
+ skb->dev = dev;
+ skb_reserve(skb,2); /* Align IP header on 16 byte boundary */
+
+ /*
+ * Copy data out of our receive descriptor into sk_buff.
+ *
+ * (rmdp->u.buffer & 0x00ffffff) -> get address of buffer and
+ * ignore status fields)
+ */
+
+ memcpy(skb_put(skb,len), (unsigned char *) (rmdp->u.buffer & 0x00ffffff),
+ len);
+
+
+ /*
+ * Notify the upper protocol layers that there is another packet
+ * to handle
+ *
+ * netif_rx() always succeeds. see /net/inet/dev.c for more.
+ */
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb); /* queue packet and mark it for processing */
+
+ /*
+ * Packet is queued and marked for processing so we
+ * free our descriptor and update statistics
+ */
+
+ rmdp->u.s.status = RX_OWN;
+ p->stats.rx_packets++;
+
+
+ p->rmdnum++;
+ p->rmdnum %= RMDNUM;
+
+ rmdp = p->rmdhead + p->rmdnum;
+ }
+ }
+} /* End of SK_rxintr() */
+
+
+/*-
+ * Function : SK_close
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : close gets called from dev_close() and should
+ * deinstall the card (free_irq, mem etc).
+ *
+ * Parameters : I : struct device *dev - our device structure
+ * Return Value : 0 - closed device driver
+ * Errors : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+/* I have tried to set BOOT_ROM on and RAM off but then, after a 'ifconfig
+ * down' the system stops. So I don't shut set card to init state.
+ */
+
+static int SK_close(struct device *dev)
+{
+
+ PRINTK(("## %s: SK_close(). CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ dev->tbusy = 1; /* Transmitter busy */
+ dev->start = 0; /* Card down */
+
+ printk("%s: Shutting %s down CSR0 %#06x\n", dev->name, SK_NAME,
+ (int) SK_read_reg(CSR0));
+
+ SK_write_reg(CSR0, CSR0_STOP); /* STOP the LANCE */
+
+ free_irq(dev->irq); /* Free IRQ */
+ irq2dev_map[dev->irq] = 0; /* Mark IRQ as unused */
+
+ return 0; /* always succeed */
+
+} /* End of SK_close() */
+
+
+/*-
+ * Function : SK_get_stats
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : Return current status structure to upper layers.
+ * It is called by sprintf_stats (dev.c).
+ *
+ * Parameters : I : struct device *dev - our device structure
+ * Return Value : struct enet_statistics * - our current statistics
+ * Errors : None
+ * Side Effects : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static struct enet_statistics *SK_get_stats(struct device *dev)
+{
+
+ struct priv *p = (struct priv *) dev->priv;
+
+ PRINTK(("## %s: SK_get_stats(). CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ return &p->stats; /* Return Device status */
+
+} /* End of SK_get_stats() */
+
+
+/*-
+ * Function : set_multicast_list
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : This function gets called when a program performs
+ * a SIOCSIFFLAGS call. Ifconfig does this if you call
+ * 'ifconfig [-]allmulti' which enables or disables the
+ * Promiscuous mode.
+ * Promiscuous mode is when the Network card accepts all
+ * packets, not only the packets which match our MAC
+ * Address. It is useful for writing a network monitor,
+ * but it is also a security problem. You have to remember
+ * that all information on the net is not encrypted.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device Structure
+ * Return Value : None
+ * Errors : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+ * 95/10/18 ACox Noew multicast calling scheme
+-*/
+
+
+/* Set or clear the multicast filter for SK_G16.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+
+ if (dev->flags&IFF_PROMISC)
+ {
+ /* Reinitialize LANCE with MODE_PROM set */
+ SK_lance_init(dev, MODE_PROM);
+ }
+ else if (dev->mc_count==0 && !(dev->flags&IFF_ALLMULTI))
+ {
+ /* Reinitialize LANCE without MODE_PROM */
+ SK_lance_init(dev, MODE_NORMAL);
+ }
+ else
+ {
+ /* Multicast with logical address filter on */
+ /* Reinitialize LANCE without MODE_PROM */
+ SK_lance_init(dev, MODE_NORMAL);
+
+ /* Not implemented yet. */
+ }
+} /* End of set_multicast_list() */
+
+
+
+/*-
+ * Function : SK_rom_addr
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/06/01
+ *
+ * Description : Try to find a Boot_ROM at all possible locations
+ *
+ * Parameters : None
+ * Return Value : Address where Boot_ROM is
+ * Errors : 0 - Did not find Boot_ROM
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+unsigned int SK_rom_addr(void)
+{
+ int i,j;
+ int rom_found = 0;
+ unsigned int rom_location[] = SK_BOOT_ROM_LOCATIONS;
+ unsigned char rom_id[] = SK_BOOT_ROM_ID;
+ unsigned char *test_byte;
+
+ /* Autodetect Boot_ROM */
+ PRINTK(("## %s: Autodetection of Boot_ROM\n", SK_NAME));
+
+ for (i = 0; (rom_location[i] != 0) && (rom_found == 0); i++)
+ {
+
+ PRINTK(("## Trying ROM location %#08x", rom_location[i]));
+
+ rom_found = 1;
+ for (j = 0; j < 6; j++)
+ {
+ test_byte = (unsigned char *) (rom_location[i]+j);
+ PRINTK((" %02x ", *test_byte));
+
+ if(!(*test_byte == rom_id[j]))
+ {
+ rom_found = 0;
+ }
+ }
+ PRINTK(("\n"));
+ }
+
+ if (rom_found == 1)
+ {
+ PRINTK(("## %s: Boot_ROM found at %#08x\n",
+ SK_NAME, rom_location[(i-1)]));
+
+ return (rom_location[--i]);
+ }
+ else
+ {
+ PRINTK(("%s: No Boot_ROM found\n", SK_NAME));
+ return 0;
+ }
+} /* End of SK_rom_addr() */
+
+
+
+/* LANCE access functions
+ *
+ * ! CSR1-3 can only be accessed when in CSR0 the STOP bit is set !
+ */
+
+
+/*-
+ * Function : SK_reset_board
+ *
+ * Author : Patrick J.D. Weichmann
+ *
+ * Date Created : 94/05/25
+ *
+ * Description : This function resets SK_G16 and all components, but
+ * POS registers are not changed
+ *
+ * Parameters : None
+ * Return Value : None
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ *
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_reset_board(void)
+{
+ int i;
+
+ SK_PORT = 0x00; /* Reset active */
+ for (i = 0; i < 10 ; i++) /* Delay min 5ms */
+ ;
+ SK_PORT = SK_RESET; /* Set back to normal operation */
+
+} /* End of SK_reset_board() */
+
+
+/*-
+ * Function : SK_set_RAP
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : Set LANCE Register Address Port to register
+ * for later data transfer.
+ *
+ * Parameters : I : reg_number - which CSR to read/write from/to
+ * Return Value : None
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_set_RAP(int reg_number)
+{
+ SK_IOREG = reg_number;
+ SK_PORT = SK_RESET | SK_RAP | SK_WREG;
+ SK_IOCOM = SK_DOIO;
+
+ while (SK_PORT & SK_IORUN)
+ ;
+} /* End of SK_set_RAP() */
+
+
+/*-
+ * Function : SK_read_reg
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : Set RAP and read data from a LANCE CSR register
+ *
+ * Parameters : I : reg_number - which CSR to read from
+ * Return Value : Register contents
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+int SK_read_reg(int reg_number)
+{
+ SK_set_RAP(reg_number);
+
+ SK_PORT = SK_RESET | SK_RDATA | SK_RREG;
+ SK_IOCOM = SK_DOIO;
+
+ while (SK_PORT & SK_IORUN)
+ ;
+ return (SK_IOREG);
+
+} /* End of SK_read_reg() */
+
+
+/*-
+ * Function : SK_rread_reg
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/28
+ *
+ * Description : Read data from preseted register.
+ * This function requires that you know which
+ * Register is actually set. Be aware that CSR1-3
+ * can only be accessed when in CSR0 STOP is set.
+ *
+ * Return Value : Register contents
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+int SK_rread_reg(void)
+{
+ SK_PORT = SK_RESET | SK_RDATA | SK_RREG;
+
+ SK_IOCOM = SK_DOIO;
+
+ while (SK_PORT & SK_IORUN)
+ ;
+ return (SK_IOREG);
+
+} /* End of SK_rread_reg() */
+
+
+/*-
+ * Function : SK_write_reg
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : This function sets the RAP then fills in the
+ * LANCE I/O Reg and starts Transfer to LANCE.
+ * It waits until transfer has ended which is max. 7 ms
+ * and then it returns.
+ *
+ * Parameters : I : reg_number - which CSR to write to
+ * I : value - what value to fill into register
+ * Return Value : None
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_write_reg(int reg_number, int value)
+{
+ SK_set_RAP(reg_number);
+
+ SK_IOREG = value;
+ SK_PORT = SK_RESET | SK_RDATA | SK_WREG;
+ SK_IOCOM = SK_DOIO;
+
+ while (SK_PORT & SK_IORUN)
+ ;
+} /* End of SK_write_reg */
+
+
+
+/*
+ * Debugging functions
+ * -------------------
+ */
+
+/*-
+ * Function : SK_print_pos
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : This function prints out the 4 POS (Programmable
+ * Option Select) Registers. Used mainly to debug operation.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * I : char * - Text which will be printed as title
+ * Return Value : None
+ * Errors : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_print_pos(struct device *dev, char *text)
+{
+ int ioaddr = dev->base_addr;
+
+ unsigned char pos0 = inb(SK_POS0),
+ pos1 = inb(SK_POS1),
+ pos2 = inb(SK_POS2),
+ pos3 = inb(SK_POS3),
+ pos4 = inb(SK_POS4);
+
+
+ printk("## %s: %s.\n"
+ "## pos0=%#4x pos1=%#4x pos2=%#04x pos3=%#08x pos4=%#04x\n",
+ SK_NAME, text, pos0, pos1, pos2, (pos3<<14), pos4);
+
+} /* End of SK_print_pos() */
+
+
+
+/*-
+ * Function : SK_print_dev
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : This function simply prints out the important fields
+ * of the device structure.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * I : char *text - Title for printing
+ * Return Value : None
+ * Errors : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_print_dev(struct device *dev, char *text)
+{
+ if (dev == NULL)
+ {
+ printk("## %s: Device Structure. %s\n", SK_NAME, text);
+ printk("## DEVICE == NULL\n");
+ }
+ else
+ {
+ printk("## %s: Device Structure. %s\n", SK_NAME, text);
+ printk("## Device Name: %s Base Address: %#06lx IRQ: %d\n",
+ dev->name, dev->base_addr, dev->irq);
+
+ printk("## FLAGS: start: %d tbusy: %ld int: %d\n",
+ dev->start, dev->tbusy, dev->interrupt);
+
+ printk("## next device: %#08x init function: %#08x\n",
+ (int) dev->next, (int) dev->init);
+ }
+
+} /* End of SK_print_dev() */
+
+
+
+/*-
+ * Function : SK_print_ram
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/06/02
+ *
+ * Description : This function is used to check how are things set up
+ * in the 16KB RAM. Also the pointers to the receive and
+ * transmit descriptor rings and rx and tx buffers locations.
+ * It contains a minor bug in printing, but has no effect to the values
+ * only newlines are not correct.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * Return Value : None
+ * Errors : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_print_ram(struct device *dev)
+{
+
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+
+ printk("## %s: RAM Details.\n"
+ "## RAM at %#08x tmdhead: %#08x rmdhead: %#08x initblock: %#08x\n",
+ SK_NAME,
+ (unsigned int) p->ram,
+ (unsigned int) p->tmdhead,
+ (unsigned int) p->rmdhead,
+ (unsigned int) &(p->ram)->ib);
+
+ printk("## ");
+
+ for(i = 0; i < TMDNUM; i++)
+ {
+ if (!(i % 3)) /* Every third line do a newline */
+ {
+ printk("\n## ");
+ }
+ printk("tmdbufs%d: %#08x ", (i+1), (int) p->tmdbufs[i]);
+ }
+ printk("## ");
+
+ for(i = 0; i < RMDNUM; i++)
+ {
+ if (!(i % 3)) /* Every third line do a newline */
+ {
+ printk("\n## ");
+ }
+ printk("rmdbufs%d: %#08x ", (i+1), (int) p->rmdbufs[i]);
+ }
+ printk("\n");
+
+} /* End of SK_print_ram() */
+
diff --git a/i386/i386at/gpl/linux/net/sk_g16.h b/i386/i386at/gpl/linux/net/sk_g16.h
new file mode 100644
index 00000000..3a92f1f9
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/sk_g16.h
@@ -0,0 +1,171 @@
+/*-
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ * Module : sk_g16.h
+ * Version : $Revision: 1.1.1.1 $
+ *
+ * Author : M.Hipp (mhipp@student.uni-tuebingen.de)
+ * changes by : Patrick J.D. Weichmann
+ *
+ * Date Created : 94/05/25
+ *
+ * Description : In here are all necessary definitions of
+ * the am7990 (LANCE) chip used for writing a
+ * network device driver which uses this chip
+ *
+ * $Log: sk_g16.h,v $
+ * Revision 1.1.1.1 1996/10/30 01:39:56 thomas
+ * Imported from UK22
+ *
+ * Revision 1.3 1996/03/25 20:24:35 goel
+ * Linux driver merge.
+ *
+-*/
+
+#ifndef SK_G16_H
+
+#define SK_G16_H
+
+
+/*
+ * Control and Status Register 0 (CSR0) bit definitions
+ *
+ * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write)
+ *
+ */
+
+#define CSR0_ERR 0x8000 /* Error summary (R) */
+#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */
+#define CSR0_CERR 0x2000 /* Collision Error (RC) */
+#define CSR0_MISS 0x1000 /* Missed packet (RC) */
+#define CSR0_MERR 0x0800 /* Memory Error (RC) */
+#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */
+#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */
+#define CSR0_IDON 0x0100 /* Initialization Done (RC) */
+#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */
+#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */
+#define CSR0_RXON 0x0020 /* Receiver on (R) */
+#define CSR0_TXON 0x0010 /* Transmitter on (R) */
+#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */
+#define CSR0_STOP 0x0004 /* Stop (RS) */
+#define CSR0_STRT 0x0002 /* Start (RS) */
+#define CSR0_INIT 0x0001 /* Initialize (RS) */
+
+#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */
+
+/*
+ * Control and Status Register 3 (CSR3) bit definitions
+ *
+ */
+
+#define CSR3_BSWAP 0x0004 /* Byte Swap (RW) */
+#define CSR3_ACON 0x0002 /* ALE Control (RW) */
+#define CSR3_BCON 0x0001 /* Byte Control (RW) */
+
+/*
+ * Initialization Block Mode operation Bit Definitions.
+ */
+
+#define MODE_PROM 0x8000 /* Promiscuous Mode */
+#define MODE_INTL 0x0040 /* Internal Loopback */
+#define MODE_DRTY 0x0020 /* Disable Retry */
+#define MODE_COLL 0x0010 /* Force Collision */
+#define MODE_DTCR 0x0008 /* Disable Transmit CRC) */
+#define MODE_LOOP 0x0004 /* Loopback */
+#define MODE_DTX 0x0002 /* Disable the Transmitter */
+#define MODE_DRX 0x0001 /* Disable the Receiver */
+
+#define MODE_NORMAL 0x0000 /* Normal operation mode */
+
+/*
+ * Receive message descriptor status bit definitions.
+ */
+
+#define RX_OWN 0x80 /* Owner bit 0 = host, 1 = lance */
+#define RX_ERR 0x40 /* Error Summary */
+#define RX_FRAM 0x20 /* Framing Error */
+#define RX_OFLO 0x10 /* Overflow Error */
+#define RX_CRC 0x08 /* CRC Error */
+#define RX_BUFF 0x04 /* Buffer Error */
+#define RX_STP 0x02 /* Start of Packet */
+#define RX_ENP 0x01 /* End of Packet */
+
+
+/*
+ * Transmit message descriptor status bit definitions.
+ */
+
+#define TX_OWN 0x80 /* Owner bit 0 = host, 1 = lance */
+#define TX_ERR 0x40 /* Error Summary */
+#define TX_MORE 0x10 /* More the 1 retry needed to Xmit */
+#define TX_ONE 0x08 /* One retry needed to Xmit */
+#define TX_DEF 0x04 /* Deferred */
+#define TX_STP 0x02 /* Start of Packet */
+#define TX_ENP 0x01 /* End of Packet */
+
+/*
+ * Transmit status (2) (valid if TX_ERR == 1)
+ */
+
+#define TX_BUFF 0x8000 /* Buffering error (no ENP) */
+#define TX_UFLO 0x4000 /* Underflow (late memory) */
+#define TX_LCOL 0x1000 /* Late collision */
+#define TX_LCAR 0x0400 /* Loss of Carrier */
+#define TX_RTRY 0x0200 /* Failed after 16 retransmissions */
+#define TX_TDR 0x003f /* Time-domain-reflectometer-value */
+
+
+/*
+ * Structures used for Communication with the LANCE
+ */
+
+/* LANCE Initialize Block */
+
+struct init_block
+{
+ unsigned short mode; /* Mode Register */
+ unsigned char paddr[6]; /* Physical Address (MAC) */
+ unsigned char laddr[8]; /* Logical Filter Address (not used) */
+ unsigned int rdrp; /* Receive Descriptor Ring pointer */
+ unsigned int tdrp; /* Transmit Descriptor Ring pointer */
+};
+
+
+/* Receive Message Descriptor Entry */
+
+struct rmd
+{
+ union
+ {
+ unsigned long buffer; /* Address of buffer */
+ struct
+ {
+ unsigned char unused[3];
+ unsigned volatile char status; /* Status Bits */
+ } s;
+ } u;
+ volatile short blen; /* Buffer Length (two's complement) */
+ unsigned short mlen; /* Message Byte Count */
+};
+
+
+/* Transmit Message Descriptor Entry */
+
+struct tmd
+{
+ union
+ {
+ unsigned long buffer; /* Address of buffer */
+ struct
+ {
+ unsigned char unused[3];
+ unsigned volatile char status; /* Status Bits */
+ } s;
+ } u;
+ unsigned short blen; /* Buffer Length (two's complement) */
+ unsigned volatile short status2; /* Error Status Bits */
+};
+
+#endif /* End of SK_G16_H */
diff --git a/i386/i386at/gpl/linux/net/smc-ultra.c b/i386/i386at/gpl/linux/net/smc-ultra.c
new file mode 100644
index 00000000..f13cd0a7
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/smc-ultra.c
@@ -0,0 +1,419 @@
+/* smc-ultra.c: A SMC Ultra ethernet driver for linux. */
+/*
+ Written 1993,1994,1995 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This is a driver for the SMC Ultra and SMC EtherEZ ethercards.
+
+ This driver uses the cards in the 8390-compatible, shared memory mode.
+ Most of the run-time complexity is handled by the generic code in
+ 8390.c. The code in this file is responsible for
+
+ ultra_probe() Detecting and initializing the card.
+ ultra_probe1()
+
+ ultra_open() The card-specific details of starting, stopping
+ ultra_reset_8390() and resetting the 8390 NIC core.
+ ultra_close()
+
+ ultra_block_input() Routines for reading and writing blocks of
+ ultra_block_output() packet buffer memory.
+
+ This driver enables the shared memory only when doing the actual data
+ transfers to avoid a bug in early version of the card that corrupted
+ data transferred by a AHA1542.
+
+ This driver does not support the programmed-I/O data transfer mode of
+ the EtherEZ. That support (if available) is smc-ez.c. Nor does it
+ use the non-8390-compatible "Altego" mode. (No support currently planned.)
+
+ Changelog:
+
+ Paul Gortmaker : multiple card support for module users.
+*/
+
+static const char *version =
+ "smc-ultra.c:v1.12 1/18/95 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "8390.h"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int ultra_portlist[] =
+{0x200, 0x220, 0x240, 0x280, 0x300, 0x340, 0x380, 0};
+
+int ultra_probe(struct device *dev);
+int ultra_probe1(struct device *dev, int ioaddr);
+
+static int ultra_open(struct device *dev);
+static void ultra_reset_8390(struct device *dev);
+static void ultra_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ultra_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ultra_block_output(struct device *dev, int count,
+ const unsigned char *buf, const start_page);
+static int ultra_close_card(struct device *dev);
+
+
+#define START_PG 0x00 /* First page of TX buffer */
+
+#define ULTRA_CMDREG 0 /* Offset to ASIC command register. */
+#define ULTRA_RESET 0x80 /* Board reset, in ULTRA_CMDREG. */
+#define ULTRA_MEMENB 0x40 /* Enable the shared memory. */
+#define ULTRA_NIC_OFFSET 16 /* NIC register offset from the base_addr. */
+#define ULTRA_IO_EXTENT 32
+
+/* Probe for the Ultra. This looks like a 8013 with the station
+ address PROM at I/O ports <base>+8 to <base>+13, with a checksum
+ following.
+*/
+#ifdef HAVE_DEVLIST
+struct netdev_entry ultra_drv =
+{"ultra", ultra_probe1, NETCARD_IO_EXTENT, netcard_portlist};
+#else
+
+int ultra_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return ultra_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; ultra_portlist[i]; i++) {
+ int ioaddr = ultra_portlist[i];
+ if (check_region(ioaddr, ULTRA_IO_EXTENT))
+ continue;
+ if (ultra_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+int ultra_probe1(struct device *dev, int ioaddr)
+{
+ int i;
+ int checksum = 0;
+ const char *model_name;
+ unsigned char eeprom_irq = 0;
+ static unsigned version_printed = 0;
+ /* Values from various config regs. */
+ unsigned char num_pages, irqreg, addr;
+ unsigned char idreg = inb(ioaddr + 7);
+ unsigned char reg4 = inb(ioaddr + 4) & 0x7f;
+
+ /* Check the ID nibble. */
+ if ((idreg & 0xF0) != 0x20 /* SMC Ultra */
+ && (idreg & 0xF0) != 0x40) /* SMC EtherEZ */
+ return ENODEV;
+
+ /* Select the station address register set. */
+ outb(reg4, ioaddr + 4);
+
+ for (i = 0; i < 8; i++)
+ checksum += inb(ioaddr + 8 + i);
+ if ((checksum & 0xff) != 0xFF)
+ return ENODEV;
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("smc-ultra.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ";
+
+ printk("%s: %s at %#3x,", dev->name, model_name, ioaddr);
+
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i));
+
+ /* Switch from the station address to the alternate register set and
+ read the useful registers there. */
+ outb(0x80 | reg4, ioaddr + 4);
+
+ /* Enabled FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */
+ outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c);
+ irqreg = inb(ioaddr + 0xd);
+ addr = inb(ioaddr + 0xb);
+
+ /* Switch back to the station address register set so that the MS-DOS driver
+ can find the card after a warm boot. */
+ outb(reg4, ioaddr + 4);
+
+ if (dev->irq < 2) {
+ unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15};
+ int irq;
+
+ /* The IRQ bits are split. */
+ irq = irqmap[((irqreg & 0x40) >> 4) + ((irqreg & 0x0c) >> 2)];
+
+ if (irq == 0) {
+ printk(", failed to detect IRQ line.\n");
+ return -EAGAIN;
+ }
+ dev->irq = irq;
+ eeprom_irq = 1;
+ }
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (", no memory for dev->priv.\n");
+ return -ENOMEM;
+ }
+
+ /* OK, we are certain this is going to work. Setup the device. */
+ request_region(ioaddr, ULTRA_IO_EXTENT, model_name);
+
+ /* The 8390 isn't at the base address, so fake the offset */
+ dev->base_addr = ioaddr+ULTRA_NIC_OFFSET;
+
+ {
+ int addr_tbl[4] = {0x0C0000, 0x0E0000, 0xFC0000, 0xFE0000};
+ short num_pages_tbl[4] = {0x20, 0x40, 0x80, 0xff};
+
+ dev->mem_start = ((addr & 0x0f) << 13) + addr_tbl[(addr >> 6) & 3] ;
+ num_pages = num_pages_tbl[(addr >> 4) & 3];
+ }
+
+ ei_status.name = model_name;
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = START_PG;
+ ei_status.rx_start_page = START_PG + TX_PAGES;
+ ei_status.stop_page = num_pages;
+
+ dev->rmem_start = dev->mem_start + TX_PAGES*256;
+ dev->mem_end = dev->rmem_end
+ = dev->mem_start + (ei_status.stop_page - START_PG)*256;
+
+ printk(",%s IRQ %d memory %#lx-%#lx.\n", eeprom_irq ? "" : "assigned ",
+ dev->irq, dev->mem_start, dev->mem_end-1);
+
+ ei_status.reset_8390 = &ultra_reset_8390;
+ ei_status.block_input = &ultra_block_input;
+ ei_status.block_output = &ultra_block_output;
+ ei_status.get_8390_hdr = &ultra_get_8390_hdr;
+ dev->open = &ultra_open;
+ dev->stop = &ultra_close_card;
+ NS8390_init(dev, 0);
+
+ return 0;
+}
+
+static int
+ultra_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+
+ if (request_irq(dev->irq, ei_interrupt, 0, ei_status.name))
+ return -EAGAIN;
+
+ outb(ULTRA_MEMENB, ioaddr); /* Enable memory, 16 bit mode. */
+ outb(0x80, ioaddr + 5);
+ outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static void
+ultra_reset_8390(struct device *dev)
+{
+ int cmd_port = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC base addr */
+
+ outb(ULTRA_RESET, cmd_port);
+ if (ei_debug > 1) printk("resetting Ultra, t=%ld...", jiffies);
+ ei_status.txing = 0;
+
+ outb(ULTRA_MEMENB, cmd_port);
+
+ if (ei_debug > 1) printk("reset done\n");
+ return;
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+ultra_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ unsigned long hdr_start = dev->mem_start + ((ring_page - START_PG)<<8);
+
+ outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET); /* shmem on */
+#ifdef notdef
+ /* Officially this is what we are doing, but the readl() is faster */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+#else
+ ((unsigned int*)hdr)[0] = readl(hdr_start);
+#endif
+ outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* shmem off */
+}
+
+/* Block input and output are easy on shared memory ethercards, the only
+ complication is when the ring buffer wraps. */
+
+static void
+ultra_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ unsigned long xfer_start = dev->mem_start + ring_offset - (START_PG<<8);
+
+ /* Enable shared memory. */
+ outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET);
+
+ if (xfer_start + count > dev->rmem_end) {
+ /* We must wrap the input move. */
+ int semi_count = dev->rmem_end - xfer_start;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, dev->rmem_start, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, xfer_start, count, 0);
+ }
+
+ outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */
+}
+
+static void
+ultra_block_output(struct device *dev, int count, const unsigned char *buf,
+ int start_page)
+{
+ unsigned long shmem = dev->mem_start + ((start_page - START_PG)<<8);
+
+ /* Enable shared memory. */
+ outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET);
+
+ memcpy_toio(shmem, buf, count);
+
+ outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */
+}
+
+static int
+ultra_close_card(struct device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* CMDREG */
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ outb(0x00, ioaddr + 6); /* Disable interrupts. */
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = 0;
+
+ NS8390_init(dev, 0);
+
+ /* We should someday disable shared memory and change to 8-bit mode
+ "just in case"... */
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+
+#ifdef MODULE
+#define MAX_ULTRA_CARDS 4 /* Max number of Ultra cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_ULTRA_CARDS] = { 0, };
+static struct device dev_ultra[MAX_ULTRA_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_ULTRA_CARDS] = { 0, };
+static int irq[MAX_ULTRA_CARDS] = { 0, };
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA_CARDS; this_dev++) {
+ struct device *dev = &dev_ultra[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->init = ultra_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "smc-ultra.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "smc-ultra.c: No SMC Ultra card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA_CARDS; this_dev++) {
+ struct device *dev = &dev_ultra[this_dev];
+ if (dev->priv != NULL) {
+ /* NB: ultra_close_card() does free_irq + irq2dev */
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET;
+ kfree(dev->priv);
+ dev->priv = NULL;
+ release_region(ioaddr, ULTRA_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -Wall -O6 -I/usr/src/linux/net/inet -c smc-ultra.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/tulip.c b/i386/i386at/gpl/linux/net/tulip.c
new file mode 100644
index 00000000..3386a9b8
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/tulip.c
@@ -0,0 +1,782 @@
+/* tulip.c: A DEC 21040 ethernet driver for linux. */
+/*
+ NOTICE: this version works with kernels 1.1.82 and later only!
+ Written 1994,1995 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ This driver is for the SMC EtherPower PCI ethernet adapter.
+ It should work with most other DEC 21*40-based ethercards.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+*/
+
+static const char *version = "tulip.c:v0.05 1/20/95 becker@cesdis.gsfc.nasa.gov\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+/* The total size is unusually large: The 21040 aligns each of its 16
+ longword-wide registers on a quadword boundary. */
+#define TULIP_TOTAL_SIZE 0x80
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry tulip_drv =
+{"Tulip", tulip_pci_probe, TULIP_TOTAL_SIZE, NULL};
+#endif
+
+#define TULIP_DEBUG 1
+#ifdef TULIP_DEBUG
+int tulip_debug = TULIP_DEBUG;
+#else
+int tulip_debug = 1;
+#endif
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the DECchip 21040 "Tulip", Digital's
+single-chip ethernet controller for PCI, as used on the SMC EtherPower
+ethernet adapter.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS should be set to assign the
+PCI INTA signal to an otherwise unused system IRQ line. While it's
+physically possible to shared PCI interrupt lines, the kernel doesn't
+support it.
+
+III. Driver operation
+
+IIIa. Ring buffers
+The Tulip can use either ring buffers or lists of Tx and Rx descriptors.
+The current driver uses a statically allocated Rx ring of descriptors and
+buffers, and a list of the Tx buffers.
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'tp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
+we can't avoid the interrupt overhead by having the Tx routine reap the Tx
+stats.) After reaping the stats, it marks the queue entry as empty by setting
+the 'base' to zero. Iff the 'tp->tx_full' flag is set, it clears both the
+tx_full and tbusy flags.
+
+IV. Notes
+
+Thanks to Duke Kamstra of SMC for providing an EtherPower board.
+
+The DEC databook doesn't document which Rx filter settings accept broadcast
+packets. Nor does it document how to configure the part to configure the
+serial subsystem for normal (vs. loopback) operation or how to have it
+autoswitch between internal 10baseT, SIA and AUI transceivers.
+
+The databook claims that CSR13, CSR14, and CSR15 should each be the last
+register of the set CSR12-15 written. Hmmm, now how is that possible?
+*/
+
+#define DEC_VENDOR_ID 0x1011 /* Hex 'D' :-> */
+#define DEC_21040_ID 0x0002 /* Change for 21140. */
+
+/* Keep the ring sizes a power of two for efficiency. */
+#define TX_RING_SIZE 4
+#define RX_RING_SIZE 4
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+/* Offsets to the Command and Status Registers, "CSRs". All accesses
+ must be longword instructions and quadword aligned. */
+enum tulip_offsets {
+ CSR0=0, CSR1=0x08, CSR2=0x10, CSR3=0x18, CSR4=0x20, CSR5=0x28,
+ CSR6=0x30, CSR7=0x38, CSR8=0x40, CSR9=0x48, CSR10=0x50, CSR11=0x58,
+ CSR12=0x60, CSR13=0x68, CSR14=0x70, CSR15=0x78 };
+
+/* The Tulip Rx and Tx buffer descriptors. */
+struct tulip_rx_desc {
+ int status;
+ int length;
+ char *buffer1, *buffer2; /* We use only buffer 1. */
+};
+
+struct tulip_tx_desc {
+ int status;
+ int length;
+ char *buffer1, *buffer2; /* We use only buffer 1. */
+};
+
+struct tulip_private {
+ char devname[8]; /* Used only for kernel debugging. */
+ struct tulip_rx_desc rx_ring[RX_RING_SIZE];
+ struct tulip_tx_desc tx_ring[TX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ long rx_buffs; /* Address of temporary Rx buffers. */
+ struct enet_statistics stats;
+ int setup_frame[48]; /* Pseudo-Tx frame to init address table. */
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ unsigned int tx_full:1;
+ int pad0, pad1; /* Used for 8-byte alignment */
+};
+
+static void tulip_probe1(int ioaddr, int irq);
+static int tulip_open(struct device *dev);
+static void tulip_init_ring(struct device *dev);
+static int tulip_start_xmit(struct sk_buff *skb, struct device *dev);
+static int tulip_rx(struct device *dev);
+static void tulip_interrupt(int irq, struct pt_regs *regs);
+static int tulip_close(struct device *dev);
+static struct enet_statistics *tulip_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+static int set_mac_address(struct device *dev, void *addr);
+
+
+
+#ifndef MODULE
+/* This 21040 probe is unlike most other board probes. We can use memory
+ efficiently by allocating a large contiguous region and dividing it
+ ourselves. This is done by having the initialization occur before
+ the 'kmalloc()' memory management system is started. */
+
+int dec21040_init(void)
+{
+
+ if (pcibios_present()) {
+ int pci_index;
+ for (pci_index = 0; pci_index < 8; pci_index++) {
+ unsigned char pci_bus, pci_device_fn, pci_irq_line;
+ unsigned long pci_ioaddr;
+
+ if (pcibios_find_device (DEC_VENDOR_ID, DEC_21040_ID, pci_index,
+ &pci_bus, &pci_device_fn) != 0)
+ break;
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &pci_irq_line);
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &pci_ioaddr);
+ /* Remove I/O space marker in bit 0. */
+ pci_ioaddr &= ~3;
+ if (tulip_debug > 2)
+ printk("Found DEC PCI Tulip at I/O %#lx, IRQ %d.\n",
+ pci_ioaddr, pci_irq_line);
+ tulip_probe1(pci_ioaddr, pci_irq_line);
+ }
+ }
+
+ return 0;
+}
+#endif
+#ifdef MODULE
+static int tulip_probe(struct device *dev)
+{
+ printk("tulip: This driver does not yet install properly from module!\n");
+ return -1;
+}
+#endif
+
+static void tulip_probe1(int ioaddr, int irq)
+{
+ static int did_version = 0; /* Already printed version info. */
+ struct device *dev;
+ struct tulip_private *tp;
+ int i;
+
+ if (tulip_debug > 0 && did_version++ == 0)
+ printk(version);
+
+ dev = init_etherdev(0, 0);
+
+ printk("%s: DEC 21040 Tulip at %#3x,", dev->name, ioaddr);
+
+ /* Stop the chip's Tx and Rx processes. */
+ outl(inl(ioaddr + CSR6) & ~0x2002, ioaddr + CSR6);
+ /* Clear the missed-packet counter. */
+ inl(ioaddr + CSR8) & 0xffff;
+
+ /* The station address ROM is read byte serially. The register must
+ be polled, waiting for the value to be read bit serially from the
+ EEPROM.
+ */
+ outl(0, ioaddr + CSR9); /* Reset the pointer with a dummy write. */
+ for (i = 0; i < 6; i++) {
+ int value, boguscnt = 100000;
+ do
+ value = inl(ioaddr + CSR9);
+ while (value < 0 && --boguscnt > 0);
+ printk(" %2.2x", dev->dev_addr[i] = value);
+ }
+ printk(", IRQ %d\n", irq);
+
+ /* We do a request_region() only to register /proc/ioports info. */
+ request_region(ioaddr, TULIP_TOTAL_SIZE, "DEC Tulip Ethernet");
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ /* Make certain the data structures are quadword aligned. */
+ tp = kmalloc(sizeof(*tp), GFP_KERNEL | GFP_DMA);
+ dev->priv = tp;
+ tp->rx_buffs = kmalloc(PKT_BUF_SZ*RX_RING_SIZE, GFP_KERNEL | GFP_DMA);
+
+ /* The Tulip-specific entries in the device structure. */
+ dev->open = &tulip_open;
+ dev->hard_start_xmit = &tulip_start_xmit;
+ dev->stop = &tulip_close;
+ dev->get_stats = &tulip_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->set_mac_address = &set_mac_address;
+
+ return;
+}
+
+
+static int
+tulip_open(struct device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ /* Reset the chip, holding bit 0 set at least 10 PCI cycles. */
+ outl(0xfff80001, ioaddr + CSR0);
+ SLOW_DOWN_IO;
+ /* Deassert reset. Set 8 longword cache alignment, 8 longword burst.
+ Cache alignment bits 15:14 Burst length 13:8
+ 0000 No alignment 0x00000000 unlimited 0800 8 longwords
+ 4000 8 longwords 0100 1 longword 1000 16 longwords
+ 8000 16 longwords 0200 2 longwords 2000 32 longwords
+ C000 32 longwords 0400 4 longwords
+ Wait the specified 50 PCI cycles after a reset by initializing
+ Tx and Rx queues and the address filter list. */
+ outl(0xfff84800, ioaddr + CSR0);
+
+ if (irq2dev_map[dev->irq] != NULL
+ || (irq2dev_map[dev->irq] = dev) == NULL
+ || dev->irq == 0
+ || request_irq(dev->irq, &tulip_interrupt, 0, "DEC 21040 Tulip")) {
+ return -EAGAIN;
+ }
+
+ if (tulip_debug > 1)
+ printk("%s: tulip_open() irq %d.\n", dev->name, dev->irq);
+
+ tulip_init_ring(dev);
+
+ /* Fill the whole address filter table with our physical address. */
+ {
+ unsigned short *eaddrs = (unsigned short *)dev->dev_addr;
+ int *setup_frm = tp->setup_frame, i;
+
+ /* You must add the broadcast address when doing perfect filtering! */
+ *setup_frm++ = 0xffff;
+ *setup_frm++ = 0xffff;
+ *setup_frm++ = 0xffff;
+ /* Fill the rest of the accept table with our physical address. */
+ for (i = 1; i < 16; i++) {
+ *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2];
+ }
+ /* Put the setup frame on the Tx list. */
+ tp->tx_ring[0].length = 0x08000000 | 192;
+ tp->tx_ring[0].buffer1 = (char *)tp->setup_frame;
+ tp->tx_ring[0].buffer2 = 0;
+ tp->tx_ring[0].status = 0x80000000;
+
+ tp->cur_tx++, tp->dirty_tx++;
+ }
+
+ outl((int)tp->rx_ring, ioaddr + CSR3);
+ outl((int)tp->tx_ring, ioaddr + CSR4);
+
+ /* Turn on the xcvr interface. */
+ outl(0x00000000, ioaddr + CSR13);
+ outl(0x00000004, ioaddr + CSR13);
+
+ /* Start the chip's Tx and Rx processes. */
+ outl(0xfffe2002, ioaddr + CSR6);
+
+ /* Trigger an immediate transmit demand to process the setup frame. */
+ outl(0, ioaddr + CSR1);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ /* Enable interrupts by setting the interrupt mask. */
+ outl(0xFFFFFFFF, ioaddr + CSR7);
+
+ if (tulip_debug > 2) {
+ printk("%s: Done tulip_open(), CSR0 %8.8x, CSR13 %8.8x.\n",
+ dev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR13));
+ }
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void
+tulip_init_ring(struct device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int i;
+
+ tp->tx_full = 0;
+ tp->cur_rx = tp->cur_tx = 0;
+ tp->dirty_rx = tp->dirty_tx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ tp->rx_ring[i].status = 0x80000000; /* Owned by Tulip chip */
+ tp->rx_ring[i].length = PKT_BUF_SZ;
+ tp->rx_ring[i].buffer1 = (char *)(tp->rx_buffs + i*PKT_BUF_SZ);
+ tp->rx_ring[i].buffer2 = (char *)&tp->rx_ring[i+1];
+ }
+ /* Mark the last entry as wrapping the ring. */
+ tp->rx_ring[i-1].length = PKT_BUF_SZ | 0x02000000;
+ tp->rx_ring[i-1].buffer2 = (char *)&tp->rx_ring[0];
+
+ /* The Tx buffer descriptor is filled in as needed, but we
+ do need to clear the ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ tp->tx_ring[i].status = 0x00000000;
+ }
+}
+
+static int
+tulip_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int entry;
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ int i;
+ if (tickssofar < 20)
+ return 1;
+ printk("%s: transmit timed out, status %8.8x, SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
+ dev->name, inl(ioaddr + CSR5), inl(ioaddr + CSR12),
+ inl(ioaddr + CSR13), inl(ioaddr + CSR14), inl(ioaddr + CSR15));
+ printk(" Rx ring %8.8x: ", (int)tp->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
+ printk("\n Tx ring %8.8x: ", (int)tp->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
+ printk("\n");
+
+ tp->stats.tx_errors++;
+ /* We should reinitialize the hardware here. */
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ return 0;
+ }
+
+ if (skb == NULL || skb->len <= 0) {
+ printk("%s: Obsolete driver layer request made: skbuff==NULL.\n",
+ dev->name);
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
+ If this ever occurs the queue layer is doing something evil! */
+ if (set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ return 1;
+ }
+
+ /* Caution: the write order is important here, set the base address
+ with the "ownership" bits last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = tp->cur_tx % TX_RING_SIZE;
+
+ tp->tx_full = 1;
+ tp->tx_skbuff[entry] = skb;
+ tp->tx_ring[entry].length = skb->len |
+ (entry == TX_RING_SIZE-1 ? 0xe2000000 : 0xe0000000);
+ tp->tx_ring[entry].buffer1 = skb->data;
+ tp->tx_ring[entry].buffer2 = 0;
+ tp->tx_ring[entry].status = 0x80000000; /* Pass ownership to the chip. */
+
+ tp->cur_tx++;
+
+ /* Trigger an immediate transmit demand. */
+ outl(0, ioaddr + CSR1);
+
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void tulip_interrupt(int irq, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct tulip_private *lp;
+ int csr5, ioaddr, boguscnt=10;
+
+ if (dev == NULL) {
+ printk ("tulip_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = (struct tulip_private *)dev->priv;
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = 1;
+
+ do {
+ csr5 = inl(ioaddr + CSR5);
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outl(csr5 & 0x0001ffff, ioaddr + CSR5);
+
+ if (tulip_debug > 4)
+ printk("%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
+ dev->name, csr5, inl(dev->base_addr + CSR5));
+
+ if ((csr5 & 0x00018000) == 0)
+ break;
+
+ if (csr5 & 0x0040) /* Rx interrupt */
+ tulip_rx(dev);
+
+ if (csr5 & 0x0001) { /* Tx-done interrupt */
+ int dirty_tx = lp->dirty_tx;
+
+ while (dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = lp->tx_ring[entry].status;
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ if (status & 0x8000) {
+ /* There was an major error, log it. */
+ lp->stats.tx_errors++;
+ if (status & 0x4104) lp->stats.tx_aborted_errors++;
+ if (status & 0x0C00) lp->stats.tx_carrier_errors++;
+ if (status & 0x0200) lp->stats.tx_window_errors++;
+ if (status & 0x0002) lp->stats.tx_fifo_errors++;
+ if (status & 0x0080) lp->stats.tx_heartbeat_errors++;
+#ifdef ETHER_STATS
+ if (status & 0x0100) lp->stats.collisions16++;
+#endif
+ } else {
+#ifdef ETHER_STATS
+ if (status & 0x0001) lp->stats.tx_deferred++;
+#endif
+ lp->stats.collisions += (status >> 3) & 15;
+ lp->stats.tx_packets++;
+ }
+
+ /* Free the original skb. */
+ dev_kfree_skb(lp->tx_skbuff[entry], FREE_WRITE);
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dirty_tx, lp->cur_tx, lp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (lp->tx_full && dev->tbusy
+ && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log errors. */
+ if (csr5 & 0x8000) { /* Abnormal error summary bit. */
+ if (csr5 & 0x0008) lp->stats.tx_errors++; /* Tx babble. */
+ if (csr5 & 0x0100) { /* Missed a Rx frame. */
+ lp->stats.rx_errors++;
+ lp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+ }
+ if (csr5 & 0x0800) {
+ printk("%s: Something Wicked happened! %8.8x.\n",
+ dev->name, csr5);
+ /* Hmmmmm, it's not clear what to do here. */
+ }
+ }
+ if (--boguscnt < 0) {
+ printk("%s: Too much work at interrupt, csr5=0x%8.8x.\n",
+ dev->name, csr5);
+ /* Clear all interrupt sources. */
+ outl(0x0001ffff, ioaddr + CSR5);
+ break;
+ }
+ } while (1);
+
+ if (tulip_debug > 3)
+ printk("%s: exiting interrupt, csr5=%#4.4x.\n",
+ dev->name, inl(ioaddr + CSR5));
+
+ /* Special code for testing *only*. */
+ {
+ static int stopit = 10;
+ if (dev->start == 0 && --stopit < 0) {
+ printk("%s: Emergency stop, looping startup interrupt.\n",
+ dev->name);
+ free_irq(irq);
+ }
+ }
+
+ dev->interrupt = 0;
+ return;
+}
+
+static int
+tulip_rx(struct device *dev)
+{
+ struct tulip_private *lp = (struct tulip_private *)dev->priv;
+ int entry = lp->cur_rx % RX_RING_SIZE;
+ int i;
+
+ if (tulip_debug > 4)
+ printk(" In tulip_rx().\n");
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (lp->rx_ring[entry].status >= 0) {
+ int status = lp->rx_ring[entry].status;
+
+ if (tulip_debug > 4)
+ printk(" tulip_rx() status was %8.8x.\n", status);
+ if ((status & 0x0300) != 0x0300) {
+ printk("%s: Ethernet frame spanned multiple buffers, status %8.8x!\n",
+ dev->name, status);
+ } else if (status & 0x8000) {
+ /* There was a fatal error. */
+ lp->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x0890) lp->stats.rx_length_errors++;
+ if (status & 0x0004) lp->stats.rx_frame_errors++;
+ if (status & 0x0002) lp->stats.rx_crc_errors++;
+ if (status & 0x0001) lp->stats.rx_fifo_errors++;
+ } else {
+ /* Malloc up new buffer, compatible with net-2e. */
+ short pkt_len = lp->rx_ring[entry].status >> 16;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, deferring packet.\n", dev->name);
+ /* Check that at least two ring entries are free.
+ If not, free one and mark stats->rx_dropped++. */
+ for (i=0; i < RX_RING_SIZE; i++)
+ if (lp->rx_ring[(entry+i) % RX_RING_SIZE].status < 0)
+ break;
+
+ if (i > RX_RING_SIZE -2) {
+ lp->stats.rx_dropped++;
+ lp->rx_ring[entry].status = 0x80000000;
+ lp->cur_rx++;
+ }
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2); /* 16 byte align the data fields */
+ memcpy(skb_put(skb,pkt_len), lp->rx_ring[entry].buffer1, pkt_len);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+
+ lp->rx_ring[entry].status = 0x80000000;
+ entry = (++lp->cur_rx) % RX_RING_SIZE;
+ }
+
+ return 0;
+}
+
+static int
+tulip_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (tulip_debug > 1)
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inl(ioaddr + CSR5));
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0x00000000, ioaddr + CSR7);
+ /* Stop the chip's Tx and Rx processes. */
+ outl(inl(ioaddr + CSR6) & ~0x2002, ioaddr + CSR6);
+
+ tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = 0;
+
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static struct enet_statistics *
+tulip_get_stats(struct device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ short ioaddr = dev->base_addr;
+
+ tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+
+ return &tp->stats;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+ int csr6 = inl(ioaddr + CSR6) & ~0x00D5;
+
+ if (dev->flags&IFF_PROMISC)
+ { /* Set promiscuous. */
+ outl(csr6 | 0x00C0, ioaddr + CSR6);
+ /* Log any net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ }
+ else if (dev->mc_count > 15 || (dev->flags&IFF_ALLMULTI))
+ {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ outl(csr6 | 0x0080, ioaddr + CSR6);
+ }
+ else
+ {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ struct dev_mc_list *dmi=dev->mc_list;
+ int *setup_frm = tp->setup_frame;
+ unsigned short *eaddrs;
+ int i;
+
+ /* We have <= 15 addresses that we can use the wonderful
+ 16 address perfect filtering of the Tulip. Note that only
+ the low shortword of setup_frame[] is valid. */
+ outl(csr6 | 0x0000, ioaddr + CSR6);
+ i=0;
+ while(dmi)
+ {
+ eaddrs=(unsigned short *)dmi->dmi_addr;
+ dmi=dmi->next;
+ i++;
+ *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs++;
+ }
+ /* Fill the rest of the table with our physical address. */
+ eaddrs = (unsigned short *)dev->dev_addr;
+ do {
+ *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2];
+ } while (++i < 16);
+
+ /* Now add this frame to the Tx list. */
+ }
+}
+
+static int
+set_mac_address(struct device *dev, void *addr)
+{
+ int i;
+ struct sockaddr *sa=(struct sockaddr *)addr;
+ if (dev->start)
+ return -EBUSY;
+ printk("%s: Setting MAC address to ", dev->name);
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = sa->sa_data[i]);
+ printk(".\n");
+ return 0;
+}
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device dev_tulip = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, tulip_probe
+};
+
+static int io = 0;
+static int irq = 0;
+
+int init_module(void)
+{
+ printk("tulip: Sorry, modularization is not completed\n");
+ return -EIO;
+#if 0
+ if (io == 0)
+ printk("tulip: You should not use auto-probing with insmod!\n");
+ dev_tulip.base_addr = io;
+ dev_tulip.irq = irq;
+ if (register_netdev(&dev_tulip) != 0) {
+ printk("tulip: register_netdev() returned non-zero.\n");
+ return -EIO;
+ }
+ return 0;
+#endif
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_tulip);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c tulip.c"
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/wavelan.c b/i386/i386at/gpl/linux/net/wavelan.c
new file mode 100644
index 00000000..4d09badd
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/wavelan.c
@@ -0,0 +1,2526 @@
+/*
+ * AT&T GIS (nee NCR) WaveLAN card:
+ * An Ethernet-like radio transceiver
+ * controlled by an Intel 82586 coprocessor.
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/malloc.h>
+#include <linux/timer.h>
+#include <linux/proc_fs.h>
+#define STRUCT_CHECK 1
+#ifdef MACH
+#include <net/i82586.h>
+#else
+#include "i82586.h"
+#endif
+#include "wavelan.h"
+
+#ifndef WAVELAN_DEBUG
+#define WAVELAN_DEBUG 0
+#endif /* WAVELAN_DEBUG */
+
+#define WATCHDOG_JIFFIES 512 /* TODO: express in HZ. */
+#define ENABLE_FULL_PROMISCUOUS 0x10000
+
+#define nels(a) (sizeof(a) / sizeof(a[0]))
+
+typedef struct device device;
+typedef struct enet_statistics en_stats;
+typedef struct net_local net_local;
+typedef struct timer_list timer_list;
+
+struct net_local
+{
+ en_stats stats;
+ unsigned int tx_n_in_use;
+ unsigned char nwid[2];
+ unsigned short hacr;
+ unsigned short rx_head;
+ unsigned short rx_last;
+ unsigned short tx_first_free;
+ unsigned short tx_first_in_use;
+ unsigned int nresets;
+ unsigned int correct_nwid;
+ unsigned int wrong_nwid;
+ unsigned int promiscuous;
+ unsigned int full_promiscuous;
+ timer_list watchdog;
+ device *dev;
+ net_local *prev;
+ net_local *next;
+};
+
+extern int wavelan_probe(device *); /* See Space.c */
+
+static const char *version = "wavelan.c:v7 95/4/8\n";
+
+/*
+ * Entry point forward declarations.
+ */
+static int wavelan_probe1(device *, unsigned short);
+static int wavelan_open(device *);
+static int wavelan_send_packet(struct sk_buff *, device *);
+static void wavelan_interrupt(int, struct pt_regs *);
+static int wavelan_close(device *);
+static en_stats *wavelan_get_stats(device *);
+static void wavelan_set_multicast_list(device *);
+static int wavelan_get_info(char*, char**, off_t, int, int);
+
+/*
+ * Other forward declarations.
+ */
+static void wavelan_cu_show_one(device *, net_local *, int, unsigned short);
+static void wavelan_cu_start(device *);
+static void wavelan_ru_start(device *);
+static void wavelan_watchdog(unsigned long);
+#if 0
+static void wavelan_psa_show(psa_t *);
+static void wavelan_mmc_show(unsigned short);
+#endif /* 0 */
+static void wavelan_scb_show(unsigned short);
+static void wavelan_ru_show(device *);
+static void wavelan_cu_show(device *);
+static void wavelan_dev_show(device *);
+static void wavelan_local_show(device *);
+
+static unsigned int wavelan_debug = WAVELAN_DEBUG;
+static net_local *first_wavelan = (net_local *)0;
+
+static
+unsigned long
+wavelan_splhi(void)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ return flags;
+}
+
+static
+void
+wavelan_splx(unsigned long flags)
+{
+ restore_flags(flags);
+}
+
+static
+unsigned short
+hasr_read(unsigned short ioaddr)
+{
+ return inw(HASR(ioaddr));
+}
+
+static
+void
+hacr_write(unsigned short ioaddr, int hacr)
+{
+ outw(hacr, HACR(ioaddr));
+}
+
+static
+void
+hacr_write_slow(unsigned short ioaddr, int hacr)
+{
+ hacr_write(ioaddr, hacr);
+ /* delay might only be needed sometimes */
+ udelay(1000);
+}
+
+/*
+ * Set the channel attention bit.
+ */
+static
+void
+set_chan_attn(unsigned short ioaddr, unsigned short current_hacr)
+{
+ hacr_write(ioaddr, current_hacr | HACR_CA);
+}
+
+/*
+ * Reset, and then set host adaptor into default mode.
+ */
+static
+void
+wavelan_reset(unsigned short ioaddr)
+{
+ hacr_write_slow(ioaddr, HACR_RESET);
+ hacr_write(ioaddr, HACR_DEFAULT);
+}
+
+static
+void
+wavelan_16_off(unsigned short ioaddr, unsigned short hacr)
+{
+ hacr &= ~HACR_16BITS;
+
+ hacr_write(ioaddr, hacr);
+}
+
+static
+void
+wavelan_16_on(unsigned short ioaddr, unsigned short hacr)
+{
+ hacr |= HACR_16BITS;
+
+ hacr_write(ioaddr, hacr);
+}
+
+static
+void
+wavelan_ints_off(device *dev)
+{
+ unsigned short ioaddr;
+ net_local *lp;
+ unsigned long x;
+
+ ioaddr = dev->base_addr;
+ lp = (net_local *)dev->priv;
+
+ x = wavelan_splhi();
+
+ lp->hacr &= ~HACR_INTRON;
+ hacr_write(ioaddr, lp->hacr);
+
+ wavelan_splx(x);
+}
+
+static
+void
+wavelan_ints_on(device *dev)
+{
+ unsigned short ioaddr;
+ net_local *lp;
+ unsigned long x;
+
+ ioaddr = dev->base_addr;
+ lp = (net_local *)dev->priv;
+
+ x = wavelan_splhi();
+
+ lp->hacr |= HACR_INTRON;
+ hacr_write(ioaddr, lp->hacr);
+
+ wavelan_splx(x);
+}
+
+/*
+ * Read bytes from the PSA.
+ */
+static
+void
+psa_read(unsigned short ioaddr, unsigned short hacr, int o, unsigned char *b, int n)
+{
+ wavelan_16_off(ioaddr, hacr);
+
+ while (n-- > 0)
+ {
+ outw(o, PIOR2(ioaddr));
+ o++;
+ *b++ = inb(PIOP2(ioaddr));
+ }
+
+ wavelan_16_on(ioaddr, hacr);
+}
+
+#if defined(IRQ_SET_WORKS)
+/*
+ * Write bytes to the PSA.
+ */
+static
+void
+psa_write(unsigned short ioaddr, unsigned short hacr, int o, unsigned char *b, int n)
+{
+ wavelan_16_off(ioaddr, hacr);
+
+ while (n-- > 0)
+ {
+ outw(o, PIOR2(ioaddr));
+ o++;
+ outb(*b, PIOP2(ioaddr));
+ b++;
+ }
+
+ wavelan_16_on(ioaddr, hacr);
+}
+#endif /* defined(IRQ_SET_WORKS) */
+
+/*
+ * Read bytes from the on-board RAM.
+ */
+static
+void
+obram_read(unsigned short ioaddr, unsigned short o, unsigned char *b, int n)
+{
+ n = (n + 1) / (sizeof(unsigned short) / sizeof(unsigned char));
+
+ outw(o, PIOR1(ioaddr));
+
+ insw(PIOP1(ioaddr), (unsigned short *)b, n);
+}
+
+/*
+ * Write bytes to the on-board RAM.
+ */
+static
+void
+obram_write(unsigned short ioaddr, unsigned short o, unsigned char *b, int n)
+{
+ n = (n + 1) / (sizeof(unsigned short) / sizeof(unsigned char));
+
+ outw(o, PIOR1(ioaddr));
+
+ outsw(PIOP1(ioaddr), (unsigned short *)b, n);
+}
+
+/*
+ * Read bytes from the MMC.
+ */
+static
+void
+mmc_read(unsigned short ioaddr, unsigned short o, unsigned char *b, int n)
+{
+ while (n-- > 0)
+ {
+ while (inw(HASR(ioaddr)) & HASR_MMC_BUSY)
+ ;
+
+ outw(o << 1, MMCR(ioaddr));
+ o++;
+
+ while (inw(HASR(ioaddr)) & HASR_MMC_BUSY)
+ ;
+
+ *b++ = (unsigned char)(inw(MMCR(ioaddr)) >> 8);
+ }
+}
+
+/*
+ * Write bytes to the MMC.
+ */
+static
+void
+mmc_write(unsigned short ioaddr, unsigned short o, unsigned char *b, int n)
+{
+ while (n-- > 0)
+ {
+ while (inw(HASR(ioaddr)) & HASR_MMC_BUSY)
+ ;
+
+ outw((unsigned short)(((unsigned short)*b << 8) | (o << 1) | 1), MMCR(ioaddr));
+ b++;
+ o++;
+ }
+}
+
+static int irqvals[] =
+{
+ 0, 0, 0, 0x01,
+ 0x02, 0x04, 0, 0x08,
+ 0, 0, 0x10, 0x20,
+ 0x40, 0, 0, 0x80,
+};
+
+#if defined(IRQ_SET_WORKS)
+static
+int
+wavelan_unmap_irq(int irq, unsigned char *irqval)
+{
+ if (irq < 0 || irq >= nels(irqvals) || irqvals[irq] == 0)
+ return -1;
+
+ *irqval = (unsigned char)irqvals[irq];
+
+ return 0;
+}
+#endif /* defined(IRQ_SET_WORKS) */
+
+/*
+ * Map values from the irq parameter register to irq numbers.
+ */
+static
+int
+wavelan_map_irq(unsigned char irqval)
+{
+ int irq;
+
+ for (irq = 0; irq < nels(irqvals); irq++)
+ {
+ if (irqvals[irq] == (int)irqval)
+ return irq;
+ }
+
+ return -1;
+}
+
+/*
+ * Initialize the Modem Management Controller.
+ */
+static
+void
+wavelan_mmc_init(device *dev, psa_t *psa)
+{
+ unsigned short ioaddr;
+ net_local *lp;
+ mmw_t m;
+ int configured;
+
+ ioaddr = dev->base_addr;
+ lp = (net_local *)dev->priv;
+ memset(&m, 0x00, sizeof(m));
+
+ /*
+ * configured = psa->psa_conf_status & 1;
+ *
+ * For now we use the persistent PSA
+ * information as little as possible, thereby
+ * allowing us to return to the same known state
+ * during a hardware reset.
+ */
+ configured = 0;
+
+ /*
+ * Set default modem control parameters.
+ * See NCR document 407-0024326 Rev. A.
+ */
+ m.mmw_jabber_enable = 0x01;
+ m.mmw_anten_sel = MMW_ANTEN_SEL_ALG_EN;
+ m.mmw_ifs = 0x20;
+ m.mmw_mod_delay = 0x04;
+ m.mmw_jam_time = 0x38;
+
+ m.mmw_encr_enable = 0;
+ m.mmw_des_io_invert = 0;
+ m.mmw_freeze = 0;
+ m.mmw_decay_prm = 0;
+ m.mmw_decay_updat_prm = 0;
+
+ if (configured)
+ {
+ /*
+ * Use configuration defaults from parameter storage area.
+ */
+ if (psa->psa_undefined & 1)
+ m.mmw_loopt_sel = 0x00;
+ else
+ m.mmw_loopt_sel = MMW_LOOPT_SEL_UNDEFINED;
+
+ m.mmw_thr_pre_set = psa->psa_thr_pre_set & 0x3F;
+ m.mmw_quality_thr = psa->psa_quality_thr & 0x0F;
+ }
+ else
+ {
+ if (lp->promiscuous && lp->full_promiscuous)
+ m.mmw_loopt_sel = MMW_LOOPT_SEL_UNDEFINED;
+ else
+ m.mmw_loopt_sel = 0x00;
+
+ /*
+ * 0x04 for AT,
+ * 0x01 for MCA.
+ */
+ if (psa->psa_comp_number & 1)
+ m.mmw_thr_pre_set = 0x01;
+ else
+ m.mmw_thr_pre_set = 0x04;
+
+ m.mmw_quality_thr = 0x03;
+ }
+
+ m.mmw_netw_id_l = lp->nwid[1];
+ m.mmw_netw_id_h = lp->nwid[0];
+
+ mmc_write(ioaddr, 0, (unsigned char *)&m, sizeof(m));
+}
+
+static
+void
+wavelan_ack(device *dev)
+{
+ unsigned short ioaddr;
+ net_local *lp;
+ unsigned short scb_cs;
+ int i;
+
+ ioaddr = dev->base_addr;
+ lp = (net_local *)dev->priv;
+
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_status), (unsigned char *)&scb_cs, sizeof(scb_cs));
+ scb_cs &= SCB_ST_INT;
+
+ if (scb_cs == 0)
+ return;
+
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command), (unsigned char *)&scb_cs, sizeof(scb_cs));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ for (i = 1000; i > 0; i--)
+ {
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_command), (unsigned char *)&scb_cs, sizeof(scb_cs));
+ if (scb_cs == 0)
+ break;
+
+ udelay(1000);
+ }
+
+ if (i <= 0)
+ printk("%s: wavelan_ack(): board not accepting command.\n", dev->name);
+}
+
+/*
+ * Set channel attention bit and busy wait until command has
+ * completed, then acknowledge the command completion.
+ */
+static
+int
+wavelan_synchronous_cmd(device *dev, const char *str)
+{
+ unsigned short ioaddr;
+ net_local *lp;
+ unsigned short scb_cmd;
+ ach_t cb;
+ int i;
+
+ ioaddr = dev->base_addr;
+ lp = (net_local *)dev->priv;
+
+ scb_cmd = SCB_CMD_CUC & SCB_CMD_CUC_GO;
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command), (unsigned char *)&scb_cmd, sizeof(scb_cmd));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ for (i = 64; i > 0; i--)
+ {
+ obram_read(ioaddr, OFFSET_CU, (unsigned char *)&cb, sizeof(cb));
+ if (cb.ac_status & AC_SFLD_C)
+ break;
+
+ udelay(1000);
+ }
+
+ if (i <= 0 || !(cb.ac_status & AC_SFLD_OK))
+ {
+ printk("%s: %s failed; status = 0x%x\n", dev->name, str, cb.ac_status);
+ wavelan_scb_show(ioaddr);
+ return -1;
+ }
+
+ wavelan_ack(dev);
+
+ return 0;
+}
+
+static
+int
+wavelan_hardware_reset(device *dev)
+{
+ unsigned short ioaddr;
+ psa_t psa;
+ net_local *lp;
+ scp_t scp;
+ iscp_t iscp;
+ scb_t scb;
+ ach_t cb;
+ int i;
+ ac_cfg_t cfg;
+ ac_ias_t ias;
+
+ if (wavelan_debug > 0)
+ printk("%s: ->wavelan_hardware_reset(dev=0x%x)\n", dev->name, (unsigned int)dev);
+
+ ioaddr = dev->base_addr;
+ lp = (net_local *)dev->priv;
+
+ lp->nresets++;
+
+ wavelan_reset(ioaddr);
+ lp->hacr = HACR_DEFAULT;
+
+ /*
+ * Clear the onboard RAM.
+ */
+ {
+ unsigned char zeroes[512];
+
+ memset(&zeroes[0], 0x00, sizeof(zeroes));
+
+ for (i = 0; i < I82586_MEMZ; i += sizeof(zeroes))
+ obram_write(ioaddr, i, &zeroes[0], sizeof(zeroes));
+ }
+
+ psa_read(ioaddr, lp->hacr, 0, (unsigned char *)&psa, sizeof(psa));
+
+ wavelan_mmc_init(dev, &psa);
+
+ /*
+ * Construct the command unit structures:
+ * scp, iscp, scb, cb.
+ */
+ memset(&scp, 0x00, sizeof(scp));
+ scp.scp_sysbus = SCP_SY_16BBUS;
+ scp.scp_iscpl = OFFSET_ISCP;
+ obram_write(ioaddr, OFFSET_SCP, (unsigned char *)&scp, sizeof(scp));
+
+ memset(&iscp, 0x00, sizeof(iscp));
+ iscp.iscp_busy = 1;
+ iscp.iscp_offset = OFFSET_SCB;
+ obram_write(ioaddr, OFFSET_ISCP, (unsigned char *)&iscp, sizeof(iscp));
+
+ memset(&scb, 0x00, sizeof(scb));
+ scb.scb_command = SCB_CMD_RESET;
+ scb.scb_cbl_offset = OFFSET_CU;
+ scb.scb_rfa_offset = OFFSET_RU;
+ obram_write(ioaddr, OFFSET_SCB, (unsigned char *)&scb, sizeof(scb));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ for (i = 1000; i > 0; i--)
+ {
+ obram_read(ioaddr, OFFSET_ISCP, (unsigned char *)&iscp, sizeof(iscp));
+
+ if (iscp.iscp_busy == (unsigned short)0)
+ break;
+
+ udelay(1000);
+ }
+
+ if (i <= 0)
+ {
+ printk("%s: wavelan_hardware_reset(): iscp_busy timeout.\n", dev->name);
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_hardware_reset(): -1\n", dev->name);
+ return -1;
+ }
+
+ for (i = 15; i > 0; i--)
+ {
+ obram_read(ioaddr, OFFSET_SCB, (unsigned char *)&scb, sizeof(scb));
+
+ if (scb.scb_status == (SCB_ST_CX | SCB_ST_CNA))
+ break;
+
+ udelay(1000);
+ }
+
+ if (i <= 0)
+ {
+ printk("%s: wavelan_hardware_reset(): status: expected 0x%02x, got 0x%02x.\n", dev->name, SCB_ST_CX | SCB_ST_CNA, scb.scb_status);
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_hardware_reset(): -1\n", dev->name);
+ return -1;
+ }
+
+ wavelan_ack(dev);
+
+ memset(&cb, 0x00, sizeof(cb));
+ cb.ac_command = AC_CFLD_EL | (AC_CFLD_CMD & acmd_diagnose);
+ cb.ac_link = OFFSET_CU;
+ obram_write(ioaddr, OFFSET_CU, (unsigned char *)&cb, sizeof(cb));
+
+ if (wavelan_synchronous_cmd(dev, "diag()") == -1)
+ {
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_hardware_reset(): -1\n", dev->name);
+ return -1;
+ }
+
+ obram_read(ioaddr, OFFSET_CU, (unsigned char *)&cb, sizeof(cb));
+ if (cb.ac_status & AC_SFLD_FAIL)
+ {
+ printk("%s: wavelan_hardware_reset(): i82586 Self Test failed.\n", dev->name);
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_hardware_reset(): -1\n", dev->name);
+ return -1;
+ }
+
+ memset(&cfg, 0x00, sizeof(cfg));
+
+#if 0
+ /*
+ * The default board configuration.
+ */
+ cfg.fifolim_bytecnt = 0x080c;
+ cfg.addrlen_mode = 0x2600;
+ cfg.linprio_interframe = 0x7820; /* IFS=120, ACS=2 */
+ cfg.slot_time = 0xf00c; /* slottime=12 */
+ cfg.hardware = 0x0008; /* tx even w/o CD */
+ cfg.min_frame_len = 0x0040;
+#endif /* 0 */
+
+ /*
+ * For Linux we invert AC_CFG_ALOC(..) so as to conform
+ * to the way that net packets reach us from above.
+ * (See also ac_tx_t.)
+ */
+ cfg.cfg_byte_cnt = AC_CFG_BYTE_CNT(sizeof(ac_cfg_t) - sizeof(ach_t));
+ cfg.cfg_fifolim = AC_CFG_FIFOLIM(8);
+ cfg.cfg_byte8 = AC_CFG_SAV_BF(0) |
+ AC_CFG_SRDY(0);
+ cfg.cfg_byte9 = AC_CFG_ELPBCK(0) |
+ AC_CFG_ILPBCK(0) |
+ AC_CFG_PRELEN(AC_CFG_PLEN_2) |
+ AC_CFG_ALOC(1) |
+ AC_CFG_ADDRLEN(WAVELAN_ADDR_SIZE);
+ cfg.cfg_byte10 = AC_CFG_BOFMET(0) |
+ AC_CFG_ACR(0) |
+ AC_CFG_LINPRIO(0);
+ cfg.cfg_ifs = 32;
+ cfg.cfg_slotl = 0;
+ cfg.cfg_byte13 = AC_CFG_RETRYNUM(15) |
+ AC_CFG_SLTTMHI(2);
+ cfg.cfg_byte14 = AC_CFG_FLGPAD(0) |
+ AC_CFG_BTSTF(0) |
+ AC_CFG_CRC16(0) |
+ AC_CFG_NCRC(0) |
+ AC_CFG_TNCRS(1) |
+ AC_CFG_MANCH(0) |
+ AC_CFG_BCDIS(0) |
+ AC_CFG_PRM(lp->promiscuous);
+ cfg.cfg_byte15 = AC_CFG_ICDS(0) |
+ AC_CFG_CDTF(0) |
+ AC_CFG_ICSS(0) |
+ AC_CFG_CSTF(0);
+/*
+ cfg.cfg_min_frm_len = AC_CFG_MNFRM(64);
+*/
+ cfg.cfg_min_frm_len = AC_CFG_MNFRM(8);
+
+ cfg.cfg_h.ac_command = AC_CFLD_EL | (AC_CFLD_CMD & acmd_configure);
+ cfg.cfg_h.ac_link = OFFSET_CU;
+ obram_write(ioaddr, OFFSET_CU, (unsigned char *)&cfg, sizeof(cfg));
+
+ if (wavelan_synchronous_cmd(dev, "reset()-configure") == -1)
+ {
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_hardware_reset(): -1\n", dev->name);
+
+ return -1;
+ }
+
+ memset(&ias, 0x00, sizeof(ias));
+ ias.ias_h.ac_command = AC_CFLD_EL | (AC_CFLD_CMD & acmd_ia_setup);
+ ias.ias_h.ac_link = OFFSET_CU;
+ memcpy(&ias.ias_addr[0], (unsigned char *)&dev->dev_addr[0], sizeof(ias.ias_addr));
+ obram_write(ioaddr, OFFSET_CU, (unsigned char *)&ias, sizeof(ias));
+
+ if (wavelan_synchronous_cmd(dev, "reset()-address") == -1)
+ {
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_hardware_reset(): -1\n", dev->name);
+
+ return -1;
+ }
+
+ wavelan_ints_on(dev);
+
+ if (wavelan_debug > 4)
+ wavelan_scb_show(ioaddr);
+
+ wavelan_ru_start(dev);
+ wavelan_cu_start(dev);
+
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_hardware_reset(): 0\n", dev->name);
+
+ return 0;
+}
+
+#if STRUCT_CHECK == 1
+
+static
+const char *
+wavelan_struct_check(void)
+{
+#define SC(t,s,n) if (sizeof(t) != s) return n
+ SC(psa_t, PSA_SIZE, "psa_t");
+ SC(mmw_t, MMW_SIZE, "mmw_t");
+ SC(mmr_t, MMR_SIZE, "mmr_t");
+ SC(ha_t, HA_SIZE, "ha_t");
+#undef SC
+
+ return (char *)0;
+}
+
+#endif /* STRUCT_CHECK == 1 */
+
+/*
+ * Check for a network adaptor of this type.
+ * Return '0' iff one exists.
+ * (There seem to be different interpretations of
+ * the initial value of dev->base_addr.
+ * We follow the example in drivers/net/ne.c.)
+ */
+int
+wavelan_probe(device *dev)
+{
+ int i;
+ int r;
+ short base_addr;
+ static unsigned short iobase[] =
+ {
+#if 0
+ Leave out 0x3C0 for now -- seems to clash
+ with some video controllers.
+ Leave out the others too -- we will always
+ use 0x390 and leave 0x300 for the Ethernet device.
+ 0x300, 0x390, 0x3E0, 0x3C0,
+#endif /* 0 */
+ 0x390,
+ };
+
+ if (wavelan_debug > 0)
+ printk("%s: ->wavelan_probe(dev=0x%x (base_addr=0x%x))\n", dev->name, (unsigned int)dev, (unsigned int)dev->base_addr);
+
+#if STRUCT_CHECK == 1
+ if (wavelan_struct_check() != (char *)0)
+ {
+ printk("%s: structure/compiler botch: \"%s\"\n", dev->name, wavelan_struct_check());
+
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_probe(): ENODEV\n", dev->name);
+
+ return ENODEV;
+ }
+#endif /* STRUCT_CHECK == 1 */
+
+ base_addr = dev->base_addr;
+
+ if (base_addr < 0)
+ {
+ /*
+ * Don't probe at all.
+ */
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_probe(): ENXIO\n", dev->name);
+ return ENXIO;
+ }
+
+ if (base_addr > 0x100)
+ {
+ /*
+ * Check a single specified location.
+ */
+ r = wavelan_probe1(dev, base_addr);
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_probe(): %d\n", dev->name, r);
+ return r;
+ }
+
+ for (i = 0; i < nels(iobase); i++)
+ {
+ if (check_region(iobase[i], sizeof(ha_t)))
+ continue;
+
+ if (wavelan_probe1(dev, iobase[i]) == 0)
+ {
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_probe(): 0\n", dev->name);
+ proc_net_register(&(struct proc_dir_entry) {
+ PROC_NET_WAVELAN, 7, "wavelan",
+ S_IFREG | S_IRUGO, 1, 0, 0,
+ 0, &proc_net_inode_operations,
+ wavelan_get_info
+ });
+
+ return 0;
+ }
+ }
+
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_probe(): ENODEV\n", dev->name);
+
+ return ENODEV;
+}
+
+static
+int
+wavelan_probe1(device *dev, unsigned short ioaddr)
+{
+ psa_t psa;
+ int irq;
+ int i;
+ net_local *lp;
+ int enable_full_promiscuous;
+
+ if (wavelan_debug > 0)
+ printk("%s: ->wavelan_probe1(dev=0x%x, ioaddr=0x%x)\n", dev->name, (unsigned int)dev, ioaddr);
+
+ wavelan_reset(ioaddr);
+
+ psa_read(ioaddr, HACR_DEFAULT, 0, (unsigned char *)&psa, sizeof(psa));
+
+ /*
+ * Check the first three octets of the MAC address
+ * for the manufacturer's code.
+ */
+ if
+ (
+ psa.psa_univ_mac_addr[0] != SA_ADDR0
+ ||
+ psa.psa_univ_mac_addr[1] != SA_ADDR1
+ ||
+ psa.psa_univ_mac_addr[2] != SA_ADDR2
+ )
+ {
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_probe1(): ENODEV\n", dev->name);
+ return ENODEV;
+ }
+
+ printk("%s: WaveLAN at %#x,", dev->name, ioaddr);
+
+ if (dev->irq != 0)
+ {
+ printk("[WARNING: explicit IRQ value %d ignored: using PSA value instead]", dev->irq);
+#if defined(IRQ_SET_WORKS)
+Leave this out until I can get it to work -- BJ.
+ if (wavelan_unmap_irq(dev->irq, &psa.psa_int_req_no) == -1)
+ {
+ printk(" could not wavelan_unmap_irq(%d, ..) -- ignored.\n", dev->irq);
+ dev->irq = 0;
+ }
+ else
+ {
+ psa_write(ioaddr, HACR_DEFAULT, (char *)&psa.psa_int_req_no - (char *)&psa, (unsigned char *)&psa.psa_int_req_no, sizeof(psa.psa_int_req_no));
+ wavelan_reset(ioaddr);
+ }
+#endif /* defined(IRQ_SET_WORKS) */
+ }
+
+ if ((irq = wavelan_map_irq(psa.psa_int_req_no)) == -1)
+ {
+ printk(" could not wavelan_map_irq(%d).\n", psa.psa_int_req_no);
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_probe1(): EAGAIN\n", dev->name);
+ return EAGAIN;
+ }
+
+ dev->irq = irq;
+
+ request_region(ioaddr, sizeof(ha_t), "wavelan");
+ dev->base_addr = ioaddr;
+
+ /*
+ * The third numeric argument to LILO's
+ * `ether=' control line arrives here as `dev->mem_start'.
+ *
+ * If bit 16 of dev->mem_start is non-zero we enable
+ * full promiscuity.
+ *
+ * If either of the least significant two bytes of
+ * dev->mem_start are non-zero we use them instead
+ * of the PSA NWID.
+ */
+ enable_full_promiscuous = (dev->mem_start & ENABLE_FULL_PROMISCUOUS) == ENABLE_FULL_PROMISCUOUS;
+ dev->mem_start &= ~ENABLE_FULL_PROMISCUOUS;
+
+ if (dev->mem_start != 0)
+ {
+ psa.psa_nwid[0] = (dev->mem_start >> 8) & 0xFF;
+ psa.psa_nwid[1] = (dev->mem_start >> 0) & 0xFF;
+ }
+
+ dev->mem_start = 0x0000;
+ dev->mem_end = 0x0000;
+ dev->if_port = 0;
+
+ memcpy(&dev->dev_addr[0], &psa.psa_univ_mac_addr[0], WAVELAN_ADDR_SIZE);
+
+ for (i = 0; i < WAVELAN_ADDR_SIZE; i++)
+ printk("%s%02x", (i == 0) ? " " : ":", dev->dev_addr[i]);
+
+ printk(", IRQ %d", dev->irq);
+ if (enable_full_promiscuous)
+ printk(", promisc");
+ printk(", nwid 0x%02x%02x", psa.psa_nwid[0], psa.psa_nwid[1]);
+
+ printk(", PC");
+ switch (psa.psa_comp_number)
+ {
+ case PSA_COMP_PC_AT_915:
+ case PSA_COMP_PC_AT_2400:
+ printk("-AT");
+ break;
+
+ case PSA_COMP_PC_MC_915:
+ case PSA_COMP_PC_MC_2400:
+ printk("-MC");
+ break;
+
+ case PSA_COMP_PCMCIA_915:
+ printk("MCIA");
+ break;
+
+ default:
+ printk("???");
+ break;
+ }
+
+ printk(", ");
+ switch (psa.psa_subband)
+ {
+ case PSA_SUBBAND_915:
+ printk("915");
+ break;
+
+ case PSA_SUBBAND_2425:
+ printk("2425");
+ break;
+
+ case PSA_SUBBAND_2460:
+ printk("2460");
+ break;
+
+ case PSA_SUBBAND_2484:
+ printk("2484");
+ break;
+
+ case PSA_SUBBAND_2430_5:
+ printk("2430.5");
+ break;
+
+ default:
+ printk("???");
+ break;
+ }
+ printk(" MHz");
+
+ printk("\n");
+
+ if (wavelan_debug > 0)
+ printk(version);
+
+ dev->priv = kmalloc(sizeof(net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0x00, sizeof(net_local));
+ lp = (net_local *)dev->priv;
+
+ if (first_wavelan == (net_local *)0)
+ {
+ first_wavelan = lp;
+ lp->prev = lp;
+ lp->next = lp;
+ }
+ else
+ {
+ lp->prev = first_wavelan->prev;
+ lp->next = first_wavelan;
+ first_wavelan->prev->next = lp;
+ first_wavelan->prev = lp;
+ }
+ lp->dev = dev;
+
+ lp->hacr = HACR_DEFAULT;
+
+ lp->full_promiscuous = enable_full_promiscuous;
+ lp->nwid[0] = psa.psa_nwid[0];
+ lp->nwid[1] = psa.psa_nwid[1];
+
+ lp->watchdog.function = wavelan_watchdog;
+ lp->watchdog.data = (unsigned long)dev;
+
+ dev->open = wavelan_open;
+ dev->stop = wavelan_close;
+ dev->hard_start_xmit = wavelan_send_packet;
+ dev->get_stats = wavelan_get_stats;
+ dev->set_multicast_list = &wavelan_set_multicast_list;
+
+ /*
+ * Fill in the fields of the device structure
+ * with ethernet-generic values.
+ */
+ ether_setup(dev);
+
+ dev->flags &= ~IFF_MULTICAST; /* Not yet supported */
+
+ dev->mtu = WAVELAN_MTU;
+
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_probe1(): 0\n", dev->name);
+
+ return 0;
+}
+
+/*
+ * Construct the fd and rbd structures.
+ * Start the receive unit.
+ */
+static
+void
+wavelan_ru_start(device *dev)
+{
+ unsigned short ioaddr;
+ net_local *lp;
+ unsigned short scb_cs;
+ fd_t fd;
+ rbd_t rbd;
+ unsigned short rx;
+ unsigned short rx_next;
+ int i;
+
+ ioaddr = dev->base_addr;
+ lp = (net_local *)dev->priv;
+
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_status), (unsigned char *)&scb_cs, sizeof(scb_cs));
+ if ((scb_cs & SCB_ST_RUS) == SCB_ST_RUS_RDY)
+ return;
+
+ lp->rx_head = OFFSET_RU;
+
+ for (i = 0, rx = lp->rx_head; i < NRXBLOCKS; i++, rx = rx_next)
+ {
+ rx_next = (i == NRXBLOCKS - 1) ? lp->rx_head : rx + RXBLOCKZ;
+
+ fd.fd_status = 0;
+ fd.fd_command = (i == NRXBLOCKS - 1) ? FD_COMMAND_EL : 0;
+ fd.fd_link_offset = rx_next;
+ fd.fd_rbd_offset = rx + sizeof(fd);
+ obram_write(ioaddr, rx, (unsigned char *)&fd, sizeof(fd));
+
+ rbd.rbd_status = 0;
+ rbd.rbd_next_rbd_offset = I82586NULL;
+ rbd.rbd_bufl = rx + sizeof(fd) + sizeof(rbd);
+ rbd.rbd_bufh = 0;
+ rbd.rbd_el_size = RBD_EL | (RBD_SIZE & MAXDATAZ);
+ obram_write(ioaddr, rx + sizeof(fd), (unsigned char *)&rbd, sizeof(rbd));
+
+ lp->rx_last = rx;
+ }
+
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_rfa_offset), (unsigned char *)&lp->rx_head, sizeof(lp->rx_head));
+
+ scb_cs = SCB_CMD_RUC_GO;
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command), (unsigned char *)&scb_cs, sizeof(scb_cs));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ for (i = 1000; i > 0; i--)
+ {
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_command), (unsigned char *)&scb_cs, sizeof(scb_cs));
+ if (scb_cs == 0)
+ break;
+
+ udelay(1000);
+ }
+
+ if (i <= 0)
+ printk("%s: wavelan_ru_start(): board not accepting command.\n", dev->name);
+}
+
+/*
+ * Initialise the transmit blocks.
+ * Start the command unit executing the NOP
+ * self-loop of the first transmit block.
+ */
+static
+void
+wavelan_cu_start(device *dev)
+{
+ unsigned short ioaddr;
+ net_local *lp;
+ int i;
+ unsigned short txblock;
+ unsigned short first_nop;
+ unsigned short scb_cs;
+
+ ioaddr = dev->base_addr;
+ lp = (net_local *)dev->priv;
+
+ lp->tx_first_free = OFFSET_CU;
+ lp->tx_first_in_use = I82586NULL;
+
+ for
+ (
+ i = 0, txblock = OFFSET_CU;
+ i < NTXBLOCKS;
+ i++, txblock += TXBLOCKZ
+ )
+ {
+ ac_tx_t tx;
+ ac_nop_t nop;
+ tbd_t tbd;
+ unsigned short tx_addr;
+ unsigned short nop_addr;
+ unsigned short tbd_addr;
+ unsigned short buf_addr;
+
+ tx_addr = txblock;
+ nop_addr = tx_addr + sizeof(tx);
+ tbd_addr = nop_addr + sizeof(nop);
+ buf_addr = tbd_addr + sizeof(tbd);
+
+ tx.tx_h.ac_status = 0;
+ tx.tx_h.ac_command = acmd_transmit | AC_CFLD_I;
+ tx.tx_h.ac_link = nop_addr;
+ tx.tx_tbd_offset = tbd_addr;
+ obram_write(ioaddr, tx_addr, (unsigned char *)&tx, sizeof(tx));
+
+ nop.nop_h.ac_status = 0;
+ nop.nop_h.ac_command = acmd_nop;
+ nop.nop_h.ac_link = nop_addr;
+ obram_write(ioaddr, nop_addr, (unsigned char *)&nop, sizeof(nop));
+
+ tbd.tbd_status = TBD_STATUS_EOF;
+ tbd.tbd_next_bd_offset = I82586NULL;
+ tbd.tbd_bufl = buf_addr;
+ tbd.tbd_bufh = 0;
+ obram_write(ioaddr, tbd_addr, (unsigned char *)&tbd, sizeof(tbd));
+ }
+
+ first_nop = OFFSET_CU + (NTXBLOCKS - 1) * TXBLOCKZ + sizeof(ac_tx_t);
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_cbl_offset), (unsigned char *)&first_nop, sizeof(first_nop));
+
+ scb_cs = SCB_CMD_CUC_GO;
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command), (unsigned char *)&scb_cs, sizeof(scb_cs));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ for (i = 1000; i > 0; i--)
+ {
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_command), (unsigned char *)&scb_cs, sizeof(scb_cs));
+ if (scb_cs == 0)
+ break;
+
+ udelay(1000);
+ }
+
+ if (i <= 0)
+ printk("%s: wavelan_cu_start(): board not accepting command.\n", dev->name);
+
+ lp->tx_n_in_use = 0;
+ dev->tbusy = 0;
+}
+
+static
+int
+wavelan_open(device *dev)
+{
+ unsigned short ioaddr;
+ net_local *lp;
+ unsigned long x;
+ int r;
+
+ if (wavelan_debug > 0)
+ printk("%s: ->wavelan_open(dev=0x%x)\n", dev->name, (unsigned int)dev);
+
+ ioaddr = dev->base_addr;
+ lp = (net_local *)dev->priv;
+
+ if (dev->irq == 0)
+ {
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_open(): -ENXIO\n", dev->name);
+ return -ENXIO;
+ }
+
+ if
+ (
+ irq2dev_map[dev->irq] != (device *)0
+ /* This is always true, but avoid the false IRQ. */
+ ||
+ (irq2dev_map[dev->irq] = dev) == (device *)0
+ ||
+ request_irq(dev->irq, &wavelan_interrupt, 0, "WaveLAN") != 0
+ )
+ {
+ irq2dev_map[dev->irq] = (device *)0;
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_open(): -EAGAIN\n", dev->name);
+ return -EAGAIN;
+ }
+
+ x = wavelan_splhi();
+ if ((r = wavelan_hardware_reset(dev)) != -1)
+ {
+ dev->interrupt = 0;
+ dev->start = 1;
+ }
+ wavelan_splx(x);
+
+ if (r == -1)
+ {
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = (device *)0;
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_open(): -EAGAIN(2)\n", dev->name);
+ return -EAGAIN;
+ }
+
+ MOD_INC_USE_COUNT;
+
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_open(): 0\n", dev->name);
+
+ return 0;
+}
+
+static
+void
+hardware_send_packet(device *dev, void *buf, short length)
+{
+ unsigned short ioaddr;
+ net_local *lp;
+ unsigned short txblock;
+ unsigned short txpred;
+ unsigned short tx_addr;
+ unsigned short nop_addr;
+ unsigned short tbd_addr;
+ unsigned short buf_addr;
+ ac_tx_t tx;
+ ac_nop_t nop;
+ tbd_t tbd;
+ unsigned long x;
+
+ ioaddr = dev->base_addr;
+ lp = (net_local *)dev->priv;
+
+ x = wavelan_splhi();
+
+ txblock = lp->tx_first_free;
+ txpred = txblock - TXBLOCKZ;
+ if (txpred < OFFSET_CU)
+ txpred += NTXBLOCKS * TXBLOCKZ;
+ lp->tx_first_free += TXBLOCKZ;
+ if (lp->tx_first_free >= OFFSET_CU + NTXBLOCKS * TXBLOCKZ)
+ lp->tx_first_free -= NTXBLOCKS * TXBLOCKZ;
+
+/*
+if (lp->tx_n_in_use > 0)
+ printk("%c", "0123456789abcdefghijk"[lp->tx_n_in_use]);
+*/
+
+ lp->tx_n_in_use++;
+
+ tx_addr = txblock;
+ nop_addr = tx_addr + sizeof(tx);
+ tbd_addr = nop_addr + sizeof(nop);
+ buf_addr = tbd_addr + sizeof(tbd);
+
+ /*
+ * Transmit command.
+ */
+ tx.tx_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_tx_t, tx_addr, tx_h.ac_status), (unsigned char *)&tx.tx_h.ac_status, sizeof(tx.tx_h.ac_status));
+
+ /*
+ * NOP command.
+ */
+ nop.nop_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status), (unsigned char *)&nop.nop_h.ac_status, sizeof(nop.nop_h.ac_status));
+ nop.nop_h.ac_link = nop_addr;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link), (unsigned char *)&nop.nop_h.ac_link, sizeof(nop.nop_h.ac_link));
+
+ /*
+ * Transmit buffer descriptor.
+ */
+ tbd.tbd_status = TBD_STATUS_EOF | (TBD_STATUS_ACNT & length);
+ tbd.tbd_next_bd_offset = I82586NULL;
+ tbd.tbd_bufl = buf_addr;
+ tbd.tbd_bufh = 0;
+ obram_write(ioaddr, tbd_addr, (unsigned char *)&tbd, sizeof(tbd));
+
+ /*
+ * Data.
+ */
+ obram_write(ioaddr, buf_addr, buf, length);
+
+ /*
+ * Overwrite the predecessor NOP link
+ * so that it points to this txblock.
+ */
+ nop_addr = txpred + sizeof(tx);
+ nop.nop_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status), (unsigned char *)&nop.nop_h.ac_status, sizeof(nop.nop_h.ac_status));
+ nop.nop_h.ac_link = txblock;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link), (unsigned char *)&nop.nop_h.ac_link, sizeof(nop.nop_h.ac_link));
+
+ if (lp->tx_first_in_use == I82586NULL)
+ lp->tx_first_in_use = txblock;
+
+ if (lp->tx_n_in_use < NTXBLOCKS - 1)
+ dev->tbusy = 0;
+
+ dev->trans_start = jiffies;
+
+ if (lp->watchdog.next == (timer_list *)0)
+ wavelan_watchdog((unsigned long)dev);
+
+ wavelan_splx(x);
+
+ if (wavelan_debug > 4)
+ {
+ unsigned char *a;
+
+ a = (unsigned char *)buf;
+
+ printk
+ (
+ "%s: tx: dest %02x:%02x:%02x:%02x:%02x:%02x, length %d, tbd.tbd_bufl 0x%x.\n",
+ dev->name,
+ a[0], a[1], a[2], a[3], a[4], a[5],
+ length,
+ buf_addr
+ );
+ }
+}
+
+static
+int
+wavelan_send_packet(struct sk_buff *skb, device *dev)
+{
+ unsigned short ioaddr;
+
+ ioaddr = dev->base_addr;
+
+ if (dev->tbusy)
+ {
+ /*
+ * If we get here, some higher level
+ * has decided we are broken.
+ */
+ int tickssofar;
+
+ tickssofar = jiffies - dev->trans_start;
+
+ /*
+ * But for the moment, we will rely on wavelan_watchdog()
+ * instead as it allows finer control over exactly when we
+ * make the determination of failure.
+ *
+ if (tickssofar < 5)
+ */
+ return 1;
+
+ wavelan_scb_show(ioaddr);
+ wavelan_ru_show(dev);
+ wavelan_cu_show(dev);
+ wavelan_dev_show(dev);
+ wavelan_local_show(dev);
+
+ printk("%s: transmit timed out -- resetting board.\n", dev->name);
+
+ (void)wavelan_hardware_reset(dev);
+ }
+
+ /*
+ * If some higher layer thinks we've missed
+ * a tx-done interrupt we are passed NULL.
+ * Caution: dev_tint() handles the cli()/sti() itself.
+ */
+ if (skb == (struct sk_buff *)0)
+ {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /*
+ * Block a timer-based transmit from overlapping.
+ */
+ if (set_bit(0, (void *)&dev->tbusy) == 0)
+ {
+ short length;
+ unsigned char *buf;
+
+ length = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
+ buf = skb->data;
+
+ hardware_send_packet(dev, buf, length);
+ }
+ else
+ printk("%s: Transmitter access conflict.\n", dev->name);
+
+ dev_kfree_skb(skb, FREE_WRITE);
+
+ return 0;
+}
+
+#if 0
+static
+int
+addrcmp(unsigned char *a0, unsigned char *a1)
+{
+ int i;
+
+ for (i = 0; i < WAVELAN_ADDR_SIZE; i++)
+ {
+ if (a0[i] != a1[i])
+ return a0[i] - a1[i];
+ }
+
+ return 0;
+}
+#endif /* 0 */
+
+/*
+ * Transfer as many packets as we can
+ * from the device RAM.
+ * Called by the interrupt handler.
+ */
+static
+void
+wavelan_receive(device *dev)
+{
+ unsigned short ioaddr;
+ net_local *lp;
+ int nreaped;
+
+ ioaddr = dev->base_addr;
+ lp = (net_local *)dev->priv;
+ nreaped = 0;
+
+ for (;;)
+ {
+ fd_t fd;
+ rbd_t rbd;
+ ushort pkt_len;
+ int sksize;
+ struct sk_buff *skb;
+
+ obram_read(ioaddr, lp->rx_head, (unsigned char *)&fd, sizeof(fd));
+
+ if ((fd.fd_status & FD_STATUS_C) != FD_STATUS_C)
+ break;
+
+ nreaped++;
+
+ if
+ (
+ (fd.fd_status & (FD_STATUS_B | FD_STATUS_OK))
+ !=
+ (FD_STATUS_B | FD_STATUS_OK)
+ )
+ {
+ /*
+ * Not sure about this one -- it does not seem
+ * to be an error so we will keep quiet about it.
+ if ((fd.fd_status & FD_STATUS_B) != FD_STATUS_B)
+ printk("%s: frame not consumed by RU.\n", dev->name);
+ */
+
+ if ((fd.fd_status & FD_STATUS_OK) != FD_STATUS_OK)
+ printk("%s: frame not received successfully.\n", dev->name);
+ }
+
+ if ((fd.fd_status & (FD_STATUS_S6 | FD_STATUS_S7 | FD_STATUS_S8 | FD_STATUS_S9 | FD_STATUS_S10 | FD_STATUS_S11)) != 0)
+ {
+ lp->stats.rx_errors++;
+
+ if ((fd.fd_status & FD_STATUS_S6) != 0)
+ printk("%s: no EOF flag.\n", dev->name);
+
+ if ((fd.fd_status & FD_STATUS_S7) != 0)
+ {
+ lp->stats.rx_length_errors++;
+ printk("%s: frame too short.\n", dev->name);
+ }
+
+ if ((fd.fd_status & FD_STATUS_S8) != 0)
+ {
+ lp->stats.rx_over_errors++;
+ printk("%s: rx DMA overrun.\n", dev->name);
+ }
+
+ if ((fd.fd_status & FD_STATUS_S9) != 0)
+ {
+ lp->stats.rx_fifo_errors++;
+ printk("%s: ran out of resources.\n", dev->name);
+ }
+
+ if ((fd.fd_status & FD_STATUS_S10) != 0)
+ {
+ lp->stats.rx_frame_errors++;
+ printk("%s: alignment error.\n", dev->name);
+ }
+
+ if ((fd.fd_status & FD_STATUS_S11) != 0)
+ {
+ lp->stats.rx_crc_errors++;
+ printk("%s: CRC error.\n", dev->name);
+ }
+ }
+
+ if (fd.fd_rbd_offset == I82586NULL)
+ printk("%s: frame has no data.\n", dev->name);
+ else
+ {
+ obram_read(ioaddr, fd.fd_rbd_offset, (unsigned char *)&rbd, sizeof(rbd));
+
+ if ((rbd.rbd_status & RBD_STATUS_EOF) != RBD_STATUS_EOF)
+ printk("%s: missing EOF flag.\n", dev->name);
+
+ if ((rbd.rbd_status & RBD_STATUS_F) != RBD_STATUS_F)
+ printk("%s: missing F flag.\n", dev->name);
+
+ pkt_len = rbd.rbd_status & RBD_STATUS_ACNT;
+
+#if 0
+ {
+ unsigned char addr[WAVELAN_ADDR_SIZE];
+ int i;
+ static unsigned char toweraddr[WAVELAN_ADDR_SIZE] =
+ {
+ 0x08, 0x00, 0x0e, 0x20, 0x3e, 0xd3,
+ };
+
+ obram_read(ioaddr, rbd.rbd_bufl + sizeof(addr), &addr[0], sizeof(addr));
+ if
+ (
+ /*
+ addrcmp(&addr[0], &dev->dev_addr[0]) != 0
+ &&
+ */
+ addrcmp(&addr[0], toweraddr) != 0
+ )
+ {
+ printk("%s: foreign MAC source addr=", dev->name);
+ for (i = 0; i < WAVELAN_ADDR_SIZE; i++)
+ printk("%s%02x", (i == 0) ? "" : ":", addr[i]);
+ printk("\n");
+ }
+ }
+#endif /* 0 */
+
+ if (wavelan_debug > 5)
+ {
+ unsigned char addr[WAVELAN_ADDR_SIZE];
+ unsigned short ltype;
+ int i;
+
+#if 0
+ printk("%s: fd_dest=", dev->name);
+ for (i = 0; i < WAVELAN_ADDR_SIZE; i++)
+ printk("%s%02x", (i == 0) ? "" : ":", fd.fd_dest[i]);
+ printk("\n");
+
+ printk("%s: fd_src=", dev->name);
+ for (i = 0; i < WAVELAN_ADDR_SIZE; i++)
+ printk("%s%02x", (i == 0) ? "" : ":", fd.fd_src[i]);
+ printk("\n");
+ printk("%s: fd_length=%d\n", dev->name, fd.fd_length);
+#endif /* 0 */
+
+ obram_read(ioaddr, rbd.rbd_bufl, &addr[0], sizeof(addr));
+ printk("%s: dest=", dev->name);
+ for (i = 0; i < WAVELAN_ADDR_SIZE; i++)
+ printk("%s%02x", (i == 0) ? "" : ":", addr[i]);
+ printk("\n");
+
+ obram_read(ioaddr, rbd.rbd_bufl + sizeof(addr), &addr[0], sizeof(addr));
+ printk("%s: src=", dev->name);
+ for (i = 0; i < WAVELAN_ADDR_SIZE; i++)
+ printk("%s%02x", (i == 0) ? "" : ":", addr[i]);
+ printk("\n");
+
+ obram_read(ioaddr, rbd.rbd_bufl + sizeof(addr) * 2, (unsigned char *)&ltype, sizeof(ltype));
+ printk("%s: ntohs(length/type)=0x%04x\n", dev->name, ntohs(ltype));
+ }
+
+ sksize = pkt_len;
+
+ if ((skb = dev_alloc_skb(sksize)) == (struct sk_buff *)0)
+ {
+ printk("%s: could not alloc_skb(%d, GFP_ATOMIC).\n", dev->name, sksize);
+ lp->stats.rx_dropped++;
+ }
+ else
+ {
+ skb->dev = dev;
+
+ obram_read(ioaddr, rbd.rbd_bufl, skb_put(skb,pkt_len), pkt_len);
+
+ if (wavelan_debug > 5)
+ {
+ int i;
+ int maxi;
+
+ printk("%s: pkt_len=%d, data=\"", dev->name, pkt_len);
+
+ if ((maxi = pkt_len) > 16)
+ maxi = 16;
+
+ for (i = 0; i < maxi; i++)
+ {
+ unsigned char c;
+
+ c = skb->data[i];
+ if (c >= ' ' && c <= '~')
+ printk(" %c", skb->data[i]);
+ else
+ printk("%02x", skb->data[i]);
+ }
+
+ if (maxi < pkt_len)
+ printk("..");
+
+ printk("\"\n\n");
+ }
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+
+ lp->stats.rx_packets++;
+ }
+ }
+
+ fd.fd_status = 0;
+ obram_write(ioaddr, fdoff(lp->rx_head, fd_status), (unsigned char *)&fd.fd_status, sizeof(fd.fd_status));
+
+ fd.fd_command = FD_COMMAND_EL;
+ obram_write(ioaddr, fdoff(lp->rx_head, fd_command), (unsigned char *)&fd.fd_command, sizeof(fd.fd_command));
+
+ fd.fd_command = 0;
+ obram_write(ioaddr, fdoff(lp->rx_last, fd_command), (unsigned char *)&fd.fd_command, sizeof(fd.fd_command));
+
+ lp->rx_last = lp->rx_head;
+ lp->rx_head = fd.fd_link_offset;
+ }
+
+/*
+ if (nreaped > 1)
+ printk("r%d", nreaped);
+*/
+}
+
+/*
+ * Command completion interrupt.
+ * Reclaim as many freed tx buffers as we can.
+ */
+static
+int
+wavelan_complete(device *dev, unsigned short ioaddr, net_local *lp)
+{
+ int nreaped;
+
+ nreaped = 0;
+
+ for (;;)
+ {
+ unsigned short tx_status;
+
+ if (lp->tx_first_in_use == I82586NULL)
+ break;
+
+ obram_read(ioaddr, acoff(lp->tx_first_in_use, ac_status), (unsigned char *)&tx_status, sizeof(tx_status));
+
+ if ((tx_status & AC_SFLD_C) == 0)
+ break;
+
+ nreaped++;
+
+ --lp->tx_n_in_use;
+
+/*
+if (lp->tx_n_in_use > 0)
+ printk("%c", "0123456789abcdefghijk"[lp->tx_n_in_use]);
+*/
+
+ if (lp->tx_n_in_use <= 0)
+ lp->tx_first_in_use = I82586NULL;
+ else
+ {
+ lp->tx_first_in_use += TXBLOCKZ;
+ if (lp->tx_first_in_use >= OFFSET_CU + NTXBLOCKS * TXBLOCKZ)
+ lp->tx_first_in_use -= NTXBLOCKS * TXBLOCKZ;
+ }
+
+ if (tx_status & AC_SFLD_OK)
+ {
+ int ncollisions;
+
+ lp->stats.tx_packets++;
+ ncollisions = tx_status & AC_SFLD_MAXCOL;
+ lp->stats.collisions += ncollisions;
+ /*
+ if (ncollisions > 0)
+ printk("%s: tx completed after %d collisions.\n", dev->name, ncollisions);
+ */
+ }
+ else
+ {
+ lp->stats.tx_errors++;
+ if (tx_status & AC_SFLD_S10)
+ {
+ lp->stats.tx_carrier_errors++;
+ if (wavelan_debug > 0)
+ printk("%s: tx error: no CS.\n", dev->name);
+ }
+ if (tx_status & AC_SFLD_S9)
+ {
+ lp->stats.tx_carrier_errors++;
+ printk("%s: tx error: lost CTS.\n", dev->name);
+ }
+ if (tx_status & AC_SFLD_S8)
+ {
+ lp->stats.tx_fifo_errors++;
+ printk("%s: tx error: slow DMA.\n", dev->name);
+ }
+ if (tx_status & AC_SFLD_S6)
+ {
+ lp->stats.tx_heartbeat_errors++;
+ if (wavelan_debug > 0)
+ printk("%s: tx error: heart beat.\n", dev->name);
+ }
+ if (tx_status & AC_SFLD_S5)
+ {
+ lp->stats.tx_aborted_errors++;
+ if (wavelan_debug > 0)
+ printk("%s: tx error: too many collisions.\n", dev->name);
+ }
+ }
+
+ if (wavelan_debug > 5)
+ printk("%s: tx completed, tx_status 0x%04x.\n", dev->name, tx_status);
+ }
+
+/*
+ if (nreaped > 1)
+ printk("c%d", nreaped);
+*/
+
+ /*
+ * Inform upper layers.
+ */
+ if (lp->tx_n_in_use < NTXBLOCKS - 1)
+ {
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+
+ return nreaped;
+}
+
+static
+void
+wavelan_watchdog(unsigned long a)
+{
+ device *dev;
+ net_local *lp;
+ unsigned short ioaddr;
+ unsigned long x;
+ unsigned int nreaped;
+
+ x = wavelan_splhi();
+
+ dev = (device *)a;
+ ioaddr = dev->base_addr;
+ lp = (net_local *)dev->priv;
+
+ if (lp->tx_n_in_use <= 0)
+ {
+ wavelan_splx(x);
+ return;
+ }
+
+ lp->watchdog.expires = jiffies+WATCHDOG_JIFFIES;
+ add_timer(&lp->watchdog);
+
+ if (jiffies - dev->trans_start < WATCHDOG_JIFFIES)
+ {
+ wavelan_splx(x);
+ return;
+ }
+
+ nreaped = wavelan_complete(dev, ioaddr, lp);
+
+ printk("%s: warning: wavelan_watchdog(): %d reaped, %d remain.\n", dev->name, nreaped, lp->tx_n_in_use);
+ /*
+ wavelan_scb_show(ioaddr);
+ wavelan_ru_show(dev);
+ wavelan_cu_show(dev);
+ wavelan_dev_show(dev);
+ wavelan_local_show(dev);
+ */
+
+ wavelan_splx(x);
+}
+
+static
+void
+wavelan_interrupt(int irq, struct pt_regs *regs)
+{
+ device *dev;
+ unsigned short ioaddr;
+ net_local *lp;
+ unsigned short hasr;
+ unsigned short status;
+ unsigned short ack_cmd;
+
+ if ((dev = (device *)(irq2dev_map[irq])) == (device *)0)
+ {
+ printk("wavelan_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = (net_local *)dev->priv;
+
+ dev->interrupt = 1;
+
+ if ((hasr = hasr_read(ioaddr)) & HASR_MMC_INTR)
+ {
+ unsigned char dce_status;
+
+ /*
+ * Interrupt from the modem management controller.
+ * This will clear it -- ignored for now.
+ */
+ mmc_read(ioaddr, mmroff(0, mmr_dce_status), &dce_status, sizeof(dce_status));
+ if (wavelan_debug > 0)
+ printk("%s: warning: wavelan_interrupt(): unexpected mmc interrupt: status 0x%04x.\n", dev->name, dce_status);
+ }
+
+ if ((hasr & HASR_82586_INTR) == 0)
+ {
+ dev->interrupt = 0;
+ if (wavelan_debug > 0)
+ printk("%s: warning: wavelan_interrupt() but (hasr & HASR_82586_INTR) == 0.\n", dev->name);
+ return;
+ }
+
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_status), (unsigned char *)&status, sizeof(status));
+
+ /*
+ * Acknowledge the interrupt(s).
+ */
+ ack_cmd = status & SCB_ST_INT;
+
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command), (unsigned char *)&ack_cmd, sizeof(ack_cmd));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ if (wavelan_debug > 5)
+ printk("%s: interrupt, status 0x%04x.\n", dev->name, status);
+
+ if ((status & SCB_ST_CX) == SCB_ST_CX)
+ {
+ /*
+ * Command completed.
+ */
+ if (wavelan_debug > 5)
+ printk("%s: command completed.\n", dev->name);
+ (void)wavelan_complete(dev, ioaddr, lp);
+ }
+
+ if ((status & SCB_ST_FR) == SCB_ST_FR)
+ {
+ /*
+ * Frame received.
+ */
+ if (wavelan_debug > 5)
+ printk("%s: received packet.\n", dev->name);
+ wavelan_receive(dev);
+ }
+
+ if
+ (
+ (status & SCB_ST_CNA) == SCB_ST_CNA
+ ||
+ (((status & SCB_ST_CUS) != SCB_ST_CUS_ACTV) && dev->start)
+ )
+ {
+ printk("%s: warning: CU inactive -- restarting.\n", dev->name);
+
+ (void)wavelan_hardware_reset(dev);
+ }
+
+ if
+ (
+ (status & SCB_ST_RNR) == SCB_ST_RNR
+ ||
+ (((status & SCB_ST_RUS) != SCB_ST_RUS_RDY) && dev->start)
+ )
+ {
+ printk("%s: warning: RU not ready -- restarting.\n", dev->name);
+
+ (void)wavelan_hardware_reset(dev);
+ }
+
+ dev->interrupt = 0;
+}
+
+static
+int
+wavelan_close(device *dev)
+{
+ unsigned short ioaddr;
+ net_local *lp;
+ unsigned short scb_cmd;
+
+ if (wavelan_debug > 0)
+ printk("%s: ->wavelan_close(dev=0x%x)\n", dev->name, (unsigned int)dev);
+
+ ioaddr = dev->base_addr;
+ lp = (net_local *)dev->priv;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /*
+ * Flush the Tx and disable Rx.
+ */
+ scb_cmd = (SCB_CMD_CUC & SCB_CMD_CUC_SUS) | (SCB_CMD_RUC & SCB_CMD_RUC_SUS);
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command), (unsigned char *)&scb_cmd, sizeof(scb_cmd));
+ set_chan_attn(ioaddr, lp->hacr);
+
+ wavelan_ints_off(dev);
+
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = (device *)0;
+
+ /*
+ * Release the ioport-region.
+ */
+ release_region(ioaddr, sizeof(ha_t));
+
+ MOD_DEC_USE_COUNT;
+
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_close(): 0\n", dev->name);
+
+ return 0;
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static
+en_stats *
+wavelan_get_stats(device *dev)
+{
+ net_local *lp;
+
+ lp = (net_local *)dev->priv;
+
+ return &lp->stats;
+}
+
+static
+void
+wavelan_set_multicast_list(device *dev)
+{
+ net_local *lp;
+ unsigned long x;
+
+ if (wavelan_debug > 0)
+ printk("%s: ->wavelan_set_multicast_list(dev=0x%x)", dev->name, dev);
+
+ lp = (net_local *)dev->priv;
+
+ if(dev->flags&IFF_PROMISC)
+ {
+ /*
+ * Promiscuous mode: receive all packets.
+ */
+ lp->promiscuous = 1;
+ x = wavelan_splhi();
+ (void)wavelan_hardware_reset(dev);
+ wavelan_splx(x);
+ }
+#if MULTICAST_IS_ADDED
+ else if((dev->flags&IFF_ALLMULTI)||dev->mc_list)
+ {
+
+
+ }
+#endif
+ else
+ {
+ /*
+ * Normal mode: disable promiscuous mode,
+ * clear multicast list.
+ */
+ lp->promiscuous = 0;
+ x = wavelan_splhi();
+ (void)wavelan_hardware_reset(dev);
+ wavelan_splx(x);
+ }
+
+ if (wavelan_debug > 0)
+ printk("%s: <-wavelan_set_multicast_list()\n", dev->name);
+}
+
+/*
+ * Extra WaveLAN-specific device data.
+ * "cat /proc/net/wavelan" -- see fs/proc/net.c.
+ */
+static
+int
+sprintf_stats(char *buffer, device *dev)
+{
+ net_local *lp;
+ unsigned char v;
+ mmr_t m;
+
+ lp = (net_local *)dev->priv;
+
+ if (lp == (net_local *)0)
+ return sprintf(buffer, "%6s: No statistics available.\n", dev->name);
+
+ v = (unsigned char)1;
+ mmc_write(dev->base_addr, mmwoff(0, mmw_freeze), &v, sizeof(v));
+
+ mmc_read(dev->base_addr, mmroff(0, mmr_dce_status), &m.mmr_dce_status, sizeof(m.mmr_dce_status));
+ mmc_read(dev->base_addr, mmroff(0, mmr_correct_nwid_h), &m.mmr_correct_nwid_h, sizeof(m.mmr_correct_nwid_h));
+ mmc_read(dev->base_addr, mmroff(0, mmr_correct_nwid_l), &m.mmr_correct_nwid_l, sizeof(m.mmr_correct_nwid_l));
+ mmc_read(dev->base_addr, mmroff(0, mmr_wrong_nwid_h), &m.mmr_wrong_nwid_h, sizeof(m.mmr_wrong_nwid_h));
+ mmc_read(dev->base_addr, mmroff(0, mmr_wrong_nwid_l), &m.mmr_wrong_nwid_l, sizeof(m.mmr_wrong_nwid_l));
+ mmc_read(dev->base_addr, mmroff(0, mmr_signal_lvl), &m.mmr_signal_lvl, sizeof(m.mmr_signal_lvl));
+ mmc_read(dev->base_addr, mmroff(0, mmr_silence_lvl), &m.mmr_silence_lvl, sizeof(m.mmr_silence_lvl));
+ mmc_read(dev->base_addr, mmroff(0, mmr_sgnl_qual), &m.mmr_sgnl_qual, sizeof(m.mmr_sgnl_qual));
+
+ v = (unsigned char)0;
+ mmc_write(dev->base_addr, mmwoff(0, mmw_freeze), &v, sizeof(v));
+
+ lp->correct_nwid += (m.mmr_correct_nwid_h << 8) | m.mmr_correct_nwid_l;
+ lp->wrong_nwid += (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
+
+ return sprintf
+ (
+ buffer,
+ "%6s: %02x %08x %08x %02x %02x %02x %02x %u\n",
+ dev->name,
+ m.mmr_dce_status,
+ lp->correct_nwid,
+ lp->wrong_nwid,
+ m.mmr_signal_lvl,
+ m.mmr_silence_lvl,
+ m.mmr_sgnl_qual,
+ lp->tx_n_in_use,
+ lp->nresets
+ );
+}
+
+static int
+wavelan_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
+{
+ int len;
+ off_t begin;
+ off_t pos;
+ int size;
+ unsigned long x;
+
+ len = 0;
+ begin = 0;
+ pos = 0;
+
+ size = sprintf(buffer, "%s", "Iface | dce +nwid -nwid lvl slnc qual ntxq nrst\n");
+
+ pos += size;
+ len += size;
+
+ x = wavelan_splhi();
+
+ if (first_wavelan != (net_local *)0)
+ {
+ net_local *lp;
+
+ lp = first_wavelan;
+ do
+ {
+ size = sprintf_stats(buffer + len, lp->dev);
+
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset)
+ {
+ len = 0;
+ begin = pos;
+ }
+
+ if (pos > offset + length)
+ break;
+ }
+ while ((lp = lp->next) != first_wavelan);
+ }
+
+ wavelan_splx(x);
+
+ *start = buffer + (offset - begin); /* Start of wanted data */
+ len -= (offset - begin); /* Start slop */
+ if (len > length)
+ len = length; /* Ending slop */
+
+ return len;
+}
+
+#if defined(MODULE)
+static char devicename[9] = { 0, };
+static struct device dev_wavelan =
+{
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, wavelan_probe
+};
+
+static int io = 0x390; /* Default from above.. */
+static int irq = 0;
+
+int
+init_module(void)
+{
+ dev_wavelan.base_addr = io;
+ dev_wavelan.irq = irq;
+ if (register_netdev(&dev_wavelan) != 0)
+ return -EIO;
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ proc_net_unregister(PROC_NET_WAVELAN);
+ unregister_netdev(&dev_wavelan);
+ kfree_s(dev_wavelan.priv, sizeof(struct net_local));
+ dev_wavelan.priv = NULL;
+}
+#endif /* defined(MODULE) */
+
+static
+void
+wavelan_cu_show_one(device *dev, net_local *lp, int i, unsigned short p)
+{
+ unsigned short ioaddr;
+ ac_tx_t actx;
+
+ ioaddr = dev->base_addr;
+
+ printk("%d: 0x%x:", i, p);
+
+ obram_read(ioaddr, p, (unsigned char *)&actx, sizeof(actx));
+ printk(" status=0x%x,", actx.tx_h.ac_status);
+ printk(" command=0x%x,", actx.tx_h.ac_command);
+
+/*
+ {
+ tbd_t tbd;
+
+ obram_read(ioaddr, actx.tx_tbd_offset, (unsigned char *)&tbd, sizeof(tbd));
+ printk(" tbd_status=0x%x,", tbd.tbd_status);
+ }
+*/
+
+ printk("|");
+}
+
+#if 0
+static
+void
+wavelan_psa_show(psa_t *p)
+{
+ printk("psa:");
+
+ printk("psa_io_base_addr_1: 0x%02x,", p->psa_io_base_addr_1);
+ printk("psa_io_base_addr_2: 0x%02x,", p->psa_io_base_addr_2);
+ printk("psa_io_base_addr_3: 0x%02x,", p->psa_io_base_addr_3);
+ printk("psa_io_base_addr_4: 0x%02x,", p->psa_io_base_addr_4);
+ printk("psa_rem_boot_addr_1: 0x%02x,", p->psa_rem_boot_addr_1);
+ printk("psa_rem_boot_addr_2: 0x%02x,", p->psa_rem_boot_addr_2);
+ printk("psa_rem_boot_addr_3: 0x%02x,", p->psa_rem_boot_addr_3);
+ printk("psa_holi_params: 0x%02x,", p->psa_holi_params);
+ printk("psa_int_req_no: %d,", p->psa_int_req_no);
+ printk
+ (
+ "psa_univ_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x,",
+ p->psa_univ_mac_addr[0],
+ p->psa_univ_mac_addr[1],
+ p->psa_univ_mac_addr[2],
+ p->psa_univ_mac_addr[3],
+ p->psa_univ_mac_addr[4],
+ p->psa_univ_mac_addr[5]
+ );
+ printk
+ (
+ "psa_local_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x,",
+ p->psa_local_mac_addr[0],
+ p->psa_local_mac_addr[1],
+ p->psa_local_mac_addr[2],
+ p->psa_local_mac_addr[3],
+ p->psa_local_mac_addr[4],
+ p->psa_local_mac_addr[5]
+ );
+ printk("psa_univ_local_sel: %d,", p->psa_univ_local_sel);
+ printk("psa_comp_number: %d,", p->psa_comp_number);
+ printk("psa_thr_pre_set: 0x%02x,", p->psa_thr_pre_set);
+ printk("psa_feature_select/decay_prm: 0x%02x,", p->psa_feature_select);
+ printk("psa_subband/decay_update_prm: %d,", p->psa_subband);
+ printk("psa_quality_thr: 0x%02x,", p->psa_quality_thr);
+ printk("psa_mod_delay: 0x%02x,", p->psa_mod_delay);
+ printk("psa_nwid: 0x%02x%02x,", p->psa_nwid[0], p->psa_nwid[1]);
+ printk("psa_undefined: %d,", p->psa_undefined);
+ printk("psa_encryption_select: %d,", p->psa_encryption_select);
+ printk
+ (
+ "psa_encryption_key[]: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x,",
+ p->psa_encryption_key[0],
+ p->psa_encryption_key[1],
+ p->psa_encryption_key[2],
+ p->psa_encryption_key[3],
+ p->psa_encryption_key[4],
+ p->psa_encryption_key[5],
+ p->psa_encryption_key[6],
+ p->psa_encryption_key[7]
+ );
+ printk("psa_databus_width: %d,", p->psa_databus_width);
+ printk("psa_call_code/auto_squelch: 0x%02x,", p->psa_call_code);
+ printk("psa_no_of_retries: %d,", p->psa_no_of_retries);
+ printk("psa_acr: %d,", p->psa_acr);
+ printk("psa_dump_count: %d,", p->psa_dump_count);
+ printk("psa_nwid_prefix: 0x%02x,", p->psa_nwid_prefix);
+ printk("psa_conf_status: %d,", p->psa_conf_status);
+ printk("psa_crc: 0x%02x%02x,", p->psa_crc[0], p->psa_crc[1]);
+ printk("psa_crc_status: 0x%02x,", p->psa_crc_status);
+
+ printk("\n");
+}
+
+static
+void
+wavelan_mmc_show(unsigned short ioaddr)
+{
+ mmr_t m;
+
+ mmc_read(ioaddr, 0, (unsigned char *)&m, sizeof(m));
+
+ printk("mmr:");
+ printk(" des_status: 0x%x", m.mmr_des_status);
+ printk(" des_avail: 0x%x", m.mmr_des_avail);
+ printk(" des_io_invert: 0x%x", m.mmr_des_io_invert);
+ printk
+ (
+ " dce_status: 0x%x[%s%s%s%s]",
+ m.mmr_dce_status & 0x0F,
+ (m.mmr_dce_status & MMR_DCE_STATUS_ENERG_DET) ? "energy detected," : "",
+ (m.mmr_dce_status & MMR_DCE_STATUS_LOOPT_IND) ? "loop test indicated," : "",
+ (m.mmr_dce_status & MMR_DCE_STATUS_XMTITR_IND) ? "transmitter on," : "",
+ (m.mmr_dce_status & MMR_DCE_STATUS_JBR_EXPIRED) ? "jabber timer expired," : ""
+ );
+ printk(" correct_nwid: %d", m.mmr_correct_nwid_h << 8 | m.mmr_correct_nwid_l);
+ printk(" wrong_nwid: %d", (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l);
+ printk(" thr_pre_set: 0x%x", m.mmr_thr_pre_set);
+ printk(" signal_lvl: %d", m.mmr_signal_lvl);
+ printk(" silence_lvl: %d", m.mmr_silence_lvl);
+ printk(" sgnl_qual: 0x%x", m.mmr_sgnl_qual);
+ printk(" netw_id_l: %x", m.mmr_netw_id_l);
+
+ printk("\n");
+}
+#endif /* 0 */
+
+static
+void
+wavelan_scb_show(unsigned short ioaddr)
+{
+ scb_t scb;
+
+ obram_read(ioaddr, OFFSET_SCB, (unsigned char *)&scb, sizeof(scb));
+
+ printk("scb:");
+
+ printk(" status:");
+ printk
+ (
+ " stat 0x%x[%s%s%s%s]",
+ (scb.scb_status & (SCB_ST_CX | SCB_ST_FR | SCB_ST_CNA | SCB_ST_RNR)) >> 12,
+ (scb.scb_status & SCB_ST_CX) ? "cmd completion interrupt," : "",
+ (scb.scb_status & SCB_ST_FR) ? "frame received," : "",
+ (scb.scb_status & SCB_ST_CNA) ? "cmd unit not active," : "",
+ (scb.scb_status & SCB_ST_RNR) ? "rcv unit not ready," : ""
+ );
+ printk
+ (
+ " cus 0x%x[%s%s%s]",
+ (scb.scb_status & SCB_ST_CUS) >> 8,
+ ((scb.scb_status & SCB_ST_CUS) == SCB_ST_CUS_IDLE) ? "idle" : "",
+ ((scb.scb_status & SCB_ST_CUS) == SCB_ST_CUS_SUSP) ? "suspended" : "",
+ ((scb.scb_status & SCB_ST_CUS) == SCB_ST_CUS_ACTV) ? "active" : ""
+ );
+ printk
+ (
+ " rus 0x%x[%s%s%s%s]",
+ (scb.scb_status & SCB_ST_RUS) >> 4,
+ ((scb.scb_status & SCB_ST_RUS) == SCB_ST_RUS_IDLE) ? "idle" : "",
+ ((scb.scb_status & SCB_ST_RUS) == SCB_ST_RUS_SUSP) ? "suspended" : "",
+ ((scb.scb_status & SCB_ST_RUS) == SCB_ST_RUS_NRES) ? "no resources" : "",
+ ((scb.scb_status & SCB_ST_RUS) == SCB_ST_RUS_RDY) ? "ready" : ""
+ );
+
+ printk(" command:");
+ printk
+ (
+ " ack 0x%x[%s%s%s%s]",
+ (scb.scb_command & (SCB_CMD_ACK_CX | SCB_CMD_ACK_FR | SCB_CMD_ACK_CNA | SCB_CMD_ACK_RNR)) >> 12,
+ (scb.scb_command & SCB_CMD_ACK_CX) ? "ack cmd completion," : "",
+ (scb.scb_command & SCB_CMD_ACK_FR) ? "ack frame received," : "",
+ (scb.scb_command & SCB_CMD_ACK_CNA) ? "ack CU not active," : "",
+ (scb.scb_command & SCB_CMD_ACK_RNR) ? "ack RU not ready," : ""
+ );
+ printk
+ (
+ " cuc 0x%x[%s%s%s%s%s]",
+ (scb.scb_command & SCB_CMD_CUC) >> 8,
+ ((scb.scb_command & SCB_CMD_CUC) == SCB_CMD_CUC_NOP) ? "nop" : "",
+ ((scb.scb_command & SCB_CMD_CUC) == SCB_CMD_CUC_GO) ? "start cbl_offset" : "",
+ ((scb.scb_command & SCB_CMD_CUC) == SCB_CMD_CUC_RES) ? "resume execution" : "",
+ ((scb.scb_command & SCB_CMD_CUC) == SCB_CMD_CUC_SUS) ? "suspend execution" : "",
+ ((scb.scb_command & SCB_CMD_CUC) == SCB_CMD_CUC_ABT) ? "abort execution" : ""
+ );
+ printk
+ (
+ " ruc 0x%x[%s%s%s%s%s]",
+ (scb.scb_command & SCB_CMD_RUC) >> 4,
+ ((scb.scb_command & SCB_CMD_RUC) == SCB_CMD_RUC_NOP) ? "nop" : "",
+ ((scb.scb_command & SCB_CMD_RUC) == SCB_CMD_RUC_GO) ? "start rfa_offset" : "",
+ ((scb.scb_command & SCB_CMD_RUC) == SCB_CMD_RUC_RES) ? "resume reception" : "",
+ ((scb.scb_command & SCB_CMD_RUC) == SCB_CMD_RUC_SUS) ? "suspend reception" : "",
+ ((scb.scb_command & SCB_CMD_RUC) == SCB_CMD_RUC_ABT) ? "abort reception" : ""
+ );
+
+ printk(" cbl_offset 0x%x", scb.scb_cbl_offset);
+ printk(" rfa_offset 0x%x", scb.scb_rfa_offset);
+
+ printk(" crcerrs %d", scb.scb_crcerrs);
+ printk(" alnerrs %d", scb.scb_alnerrs);
+ printk(" rscerrs %d", scb.scb_rscerrs);
+ printk(" ovrnerrs %d", scb.scb_ovrnerrs);
+
+ printk("\n");
+}
+
+static
+void
+wavelan_ru_show(device *dev)
+{
+ net_local *lp;
+
+ lp = (net_local *)dev->priv;
+
+ printk("ru:");
+ /*
+ * Not implemented yet...
+ */
+ printk("\n");
+}
+
+static
+void
+wavelan_cu_show(device *dev)
+{
+ net_local *lp;
+ unsigned int i;
+ unsigned short p;
+
+ lp = (net_local *)dev->priv;
+
+ printk("cu:");
+ printk("\n");
+
+ for (i = 0, p = lp->tx_first_in_use; i < NTXBLOCKS; i++)
+ {
+ wavelan_cu_show_one(dev, lp, i, p);
+
+ p += TXBLOCKZ;
+ if (p >= OFFSET_CU + NTXBLOCKS * TXBLOCKZ)
+ p -= NTXBLOCKS * TXBLOCKZ;
+ }
+}
+
+static
+void
+wavelan_dev_show(device *dev)
+{
+ printk("dev:");
+ printk(" start=%d,", dev->start);
+ printk(" tbusy=%ld,", dev->tbusy);
+ printk(" interrupt=%d,", dev->interrupt);
+ printk(" trans_start=%ld,", dev->trans_start);
+ printk(" flags=0x%x,", dev->flags);
+ printk("\n");
+}
+
+static
+void
+wavelan_local_show(device *dev)
+{
+ net_local *lp;
+
+ lp = (net_local *)dev->priv;
+
+ printk("local:");
+ printk(" tx_n_in_use=%d,", lp->tx_n_in_use);
+ printk(" hacr=0x%x,", lp->hacr);
+ printk(" rx_head=0x%x,", lp->rx_head);
+ printk(" rx_last=0x%x,", lp->rx_last);
+ printk(" tx_first_free=0x%x,", lp->tx_first_free);
+ printk(" tx_first_in_use=0x%x,", lp->tx_first_in_use);
+ printk("\n");
+}
+
+/*
+ * This software may only be used and distributed
+ * according to the terms of the GNU Public License.
+ *
+ * This software was developed as a component of the
+ * Linux operating system.
+ * It is based on other device drivers and information
+ * either written or supplied by:
+ * Ajay Bakre (bakre@paul.rutgers.edu),
+ * Donald Becker (becker@cesdis.gsfc.nasa.gov),
+ * Loeke Brederveld (Loeke.Brederveld@Utrecht.NCR.com),
+ * Anders Klemets (klemets@it.kth.se),
+ * Vladimir V. Kolpakov (w@stier.koenig.ru),
+ * Marc Meertens (Marc.Meertens@Utrecht.NCR.com),
+ * Pauline Middelink (middelin@polyware.iaf.nl),
+ * Robert Morris (rtm@das.harvard.edu),
+ * Girish Welling (welling@paul.rutgers.edu),
+ *
+ * Thanks go also to:
+ * James Ashton (jaa101@syseng.anu.edu.au),
+ * Alan Cox (iialan@iiit.swan.ac.uk),
+ * Allan Creighton (allanc@cs.usyd.edu.au),
+ * Matthew Geier (matthew@cs.usyd.edu.au),
+ * Remo di Giovanni (remo@cs.usyd.edu.au),
+ * Eckhard Grah (grah@wrcs1.urz.uni-wuppertal.de),
+ * Vipul Gupta (vgupta@cs.binghamton.edu),
+ * Mark Hagan (mhagan@wtcpost.daytonoh.NCR.COM),
+ * Tim Nicholson (tim@cs.usyd.edu.au),
+ * Ian Parkin (ian@cs.usyd.edu.au),
+ * John Rosenberg (johnr@cs.usyd.edu.au),
+ * George Rossi (george@phm.gov.au),
+ * Arthur Scott (arthur@cs.usyd.edu.au),
+ * Peter Storey,
+ * for their assistance and advice.
+ *
+ * Please send bug reports, updates, comments to:
+ *
+ * Bruce Janson Email: bruce@cs.usyd.edu.au
+ * Basser Department of Computer Science Phone: +61-2-351-3423
+ * University of Sydney, N.S.W., 2006, AUSTRALIA Fax: +61-2-351-3838
+ */
diff --git a/i386/i386at/gpl/linux/net/wavelan.h b/i386/i386at/gpl/linux/net/wavelan.h
new file mode 100644
index 00000000..3eb221c0
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/wavelan.h
@@ -0,0 +1,252 @@
+#define WAVELAN_ADDR_SIZE 6 /* Size of a MAC address */
+#define SA_ADDR0 0x08 /* First octet of WaveLAN MAC addresses */
+#define SA_ADDR1 0x00 /* Second octet of WaveLAN MAC addresses */
+#define SA_ADDR2 0x0E /* Third octet of WaveLAN MAC addresses */
+#define WAVELAN_MTU 1500 /* Maximum size of WaveLAN packet */
+
+/*
+ * Parameter Storage Area (PSA).
+ */
+typedef struct psa_t psa_t;
+struct psa_t
+{
+ unsigned char psa_io_base_addr_1; /* Base address 1 ??? */
+ unsigned char psa_io_base_addr_2; /* Base address 2 */
+ unsigned char psa_io_base_addr_3; /* Base address 3 */
+ unsigned char psa_io_base_addr_4; /* Base address 4 */
+ unsigned char psa_rem_boot_addr_1; /* Remote Boot Address 1 */
+ unsigned char psa_rem_boot_addr_2; /* Remote Boot Address 2 */
+ unsigned char psa_rem_boot_addr_3; /* Remote Boot Address 3 */
+ unsigned char psa_holi_params; /* HOst Lan Interface (HOLI) Parameters */
+ unsigned char psa_int_req_no; /* Interrupt Request Line */
+ unsigned char psa_unused0[7]; /* unused */
+ unsigned char psa_univ_mac_addr[WAVELAN_ADDR_SIZE]; /* Universal (factory) MAC Address */
+ unsigned char psa_local_mac_addr[WAVELAN_ADDR_SIZE]; /* Local MAC Address */
+ unsigned char psa_univ_local_sel; /* Universal Local Selection */
+#define PSA_UNIVERSAL 0 /* Universal (factory) */
+#define PSA_LOCAL 1 /* Local */
+ unsigned char psa_comp_number; /* Compatibility Number: */
+#define PSA_COMP_PC_AT_915 0 /* PC-AT 915 MHz */
+#define PSA_COMP_PC_MC_915 1 /* PC-MC 915 MHz */
+#define PSA_COMP_PC_AT_2400 2 /* PC-AT 2.4 GHz */
+#define PSA_COMP_PC_MC_2400 3 /* PC-MC 2.4 GHz */
+#define PSA_COMP_PCMCIA_915 4 /* PCMCIA 915 MHz */
+ unsigned char psa_thr_pre_set; /* Modem Threshold Preset */
+ unsigned char psa_feature_select; /* ??? */
+#if 0
+ <alias for above>
+ unsigned char psa_decay_prm; /* Modem Decay */
+#endif /* 0 */
+ unsigned char psa_subband; /* Subband */
+#define PSA_SUBBAND_915 0 /* 915 MHz */
+#define PSA_SUBBAND_2425 1 /* 2425 MHz */
+#define PSA_SUBBAND_2460 2 /* 2460 MHz */
+#define PSA_SUBBAND_2484 3 /* 2484 MHz */
+#define PSA_SUBBAND_2430_5 4 /* 2430.5 MHz */
+#if 0
+ <alias for above>
+ unsigned char psa_decay_updat_prm; /* Modem Decay Update ??? */
+#endif /* 0 */
+ unsigned char psa_quality_thr; /* Modem Quality Threshold */
+ unsigned char psa_mod_delay; /* Modem Delay ??? */
+ unsigned char psa_nwid[2]; /* Network ID */
+ unsigned char psa_undefined; /* undefined */
+ unsigned char psa_encryption_select; /* Encryption On Off */
+ unsigned char psa_encryption_key[8]; /* Encryption Key */
+ unsigned char psa_databus_width; /* 8/16 bit bus width */
+ unsigned char psa_call_code; /* ??? */
+#if 0
+ <alias for above>
+ unsigned char psa_auto_squelch; /* Automatic Squelch level On off ??? */
+#endif /* 0 */
+ unsigned char psa_no_of_retries; /* LAN Cont. No of retries */
+ unsigned char psa_acr; /* LAN Cont. ACR */
+ unsigned char psa_dump_count; /* number of Dump Commands in TFB */
+ unsigned char psa_unused1[4]; /* unused */
+ unsigned char psa_nwid_prefix; /* ??? */
+ unsigned char psa_unused2[3]; /* unused */
+ unsigned char psa_conf_status; /* Card Configuration Status */
+ unsigned char psa_crc[2]; /* CRC over PSA */
+ unsigned char psa_crc_status; /* CRC Valid Flag */
+};
+#if STRUCT_CHECK == 1
+#define PSA_SIZE 64
+#endif /* STRUCT_CHECK == 1 */
+
+/*
+ * Modem Management Controller (MMC) write structure.
+ */
+typedef struct mmw_t mmw_t;
+struct mmw_t
+{
+ unsigned char mmw_encr_key[8]; /* encryption key */
+ unsigned char mmw_encr_enable; /* enable/disable encryption */
+ unsigned char mmw_unused0[1]; /* unused */
+ unsigned char mmw_des_io_invert; /* ??? */
+ unsigned char mmw_unused1[5]; /* unused */
+ unsigned char mmw_loopt_sel; /* looptest selection */
+#define MMW_LOOPT_SEL_UNDEFINED 0x40 /* undefined */
+#define MMW_LOOPT_SEL_INT 0x20 /* activate Attention Request */
+#define MMW_LOOPT_SEL_LS 0x10 /* looptest without collision avoidance */
+#define MMW_LOOPT_SEL_LT3A 0x08 /* looptest 3a */
+#define MMW_LOOPT_SEL_LT3B 0x04 /* looptest 3b */
+#define MMW_LOOPT_SEL_LT3C 0x02 /* looptest 3c */
+#define MMW_LOOPT_SEL_LT3D 0x01 /* looptest 3d */
+ unsigned char mmw_jabber_enable; /* jabber timer enable */
+ unsigned char mmw_freeze; /* freeze / unfreeze signal level */
+ unsigned char mmw_anten_sel; /* antenna selection */
+#define MMW_ANTEN_SEL_SEL 0x01 /* direct antenna selection */
+#define MMW_ANTEN_SEL_ALG_EN 0x02 /* antenna selection algorithm enable */
+ unsigned char mmw_ifs; /* inter frame spacing */
+ unsigned char mmw_mod_delay; /* modem delay */
+ unsigned char mmw_jam_time; /* jamming time */
+ unsigned char mmw_unused2[1]; /* unused */
+ unsigned char mmw_thr_pre_set; /* level threshold preset */
+ unsigned char mmw_decay_prm; /* decay parameters */
+ unsigned char mmw_decay_updat_prm; /* decay update parameters */
+ unsigned char mmw_quality_thr; /* quality (z-quotient) threshold */
+ unsigned char mmw_netw_id_l; /* NWID low order byte */
+ unsigned char mmw_netw_id_h; /* NWID high order byte */
+};
+#if STRUCT_CHECK == 1
+#define MMW_SIZE 30
+#endif /* STRUCT_CHECK == 1 */
+
+#define mmwoff(p,f) (unsigned short)((void *)(&((mmw_t *)((void *)0 + (p)))->f) - (void *)0)
+
+/*
+ * Modem Management Controller (MMC) read structure.
+ */
+typedef struct mmr_t mmr_t;
+struct mmr_t
+{
+ unsigned char mmr_unused0[8]; /* unused */
+ unsigned char mmr_des_status; /* encryption status */
+ unsigned char mmr_des_avail; /* encryption available (0x55 read) */
+ unsigned char mmr_des_io_invert; /* des I/O invert register */
+ unsigned char mmr_unused1[5]; /* unused */
+ unsigned char mmr_dce_status; /* DCE status */
+#define MMR_DCE_STATUS_ENERG_DET 0x01 /* energy detected */
+#define MMR_DCE_STATUS_LOOPT_IND 0x02 /* loop test indicated */
+#define MMR_DCE_STATUS_XMTITR_IND 0x04 /* transmitter on */
+#define MMR_DCE_STATUS_JBR_EXPIRED 0x08 /* jabber timer expired */
+ unsigned char mmr_unused2[3]; /* unused */
+ unsigned char mmr_correct_nwid_l; /* no. of correct NWID's rxd (low) */
+ unsigned char mmr_correct_nwid_h; /* no. of correct NWID's rxd (high) */
+ unsigned char mmr_wrong_nwid_l; /* count of wrong NWID's received (low) */
+ unsigned char mmr_wrong_nwid_h; /* count of wrong NWID's received (high) */
+ unsigned char mmr_thr_pre_set; /* level threshold preset */
+ unsigned char mmr_signal_lvl; /* signal level */
+ unsigned char mmr_silence_lvl; /* silence level */
+ unsigned char mmr_sgnl_qual; /* signal quality */
+#define MMR_SGNL_QUAL_0 0x01 /* signal quality 0 */
+#define MMR_SGNL_QUAL_1 0x02 /* signal quality 1 */
+#define MMR_SGNL_QUAL_2 0x04 /* signal quality 2 */
+#define MMR_SGNL_QUAL_3 0x08 /* signal quality 3 */
+#define MMR_SGNL_QUAL_S_A 0x80 /* currently selected antenna */
+ unsigned char mmr_netw_id_l; /* NWID low order byte ??? */
+ unsigned char mmr_unused3[1]; /* unused */
+};
+#if STRUCT_CHECK == 1
+#define MMR_SIZE 30
+#endif /* STRUCT_CHECK == 1 */
+
+#define MMR_LEVEL_MASK 0x3F
+
+#define mmroff(p,f) (unsigned short)((void *)(&((mmr_t *)((void *)0 + (p)))->f) - (void *)0)
+
+/*
+ * Host Adaptor structure.
+ * (base is board port address).
+ */
+typedef union hacs_u hacs_u;
+union hacs_u
+{
+ unsigned short hu_command; /* Command register */
+#define HACR_RESET 0x0001 /* Reset board */
+#define HACR_CA 0x0002 /* Set Channel Attention for 82586 */
+#define HACR_16BITS 0x0004 /* 16 bits operation (0 => 8bits) */
+#define HACR_OUT0 0x0008 /* General purpose output pin 0 */
+ /* not used - must be 1 */
+#define HACR_OUT1 0x0010 /* General purpose output pin 1 */
+ /* not used - must be 1 */
+#define HACR_82586_INT_ENABLE 0x0020 /* Enable 82586 interrupts */
+#define HACR_MMC_INT_ENABLE 0x0040 /* Enable MMC interrupts */
+#define HACR_INTR_CLR_ENABLE 0x0080 /* Enable interrupt status read/clear */
+ unsigned short hu_status; /* Status Register */
+#define HASR_82586_INTR 0x0001 /* Interrupt request from 82586 */
+#define HASR_MMC_INTR 0x0002 /* Interrupt request from MMC */
+#define HASR_MMC_BUSY 0x0004 /* MMC busy indication */
+#define HASR_PSA_BUSY 0x0008 /* LAN parameter storage area busy */
+};
+
+typedef struct ha_t ha_t;
+struct ha_t
+{
+ hacs_u ha_cs; /* Command and status registers */
+#define ha_command ha_cs.hu_command
+#define ha_status ha_cs.hu_status
+ unsigned short ha_mmcr; /* Modem Management Ctrl Register */
+ unsigned short ha_pior0; /* Program I/O Address Register Port 0 */
+ unsigned short ha_piop0; /* Program I/O Port 0 */
+ unsigned short ha_pior1; /* Program I/O Address Register Port 1 */
+ unsigned short ha_piop1; /* Program I/O Port 1 */
+ unsigned short ha_pior2; /* Program I/O Address Register Port 2 */
+ unsigned short ha_piop2; /* Program I/O Port 2 */
+};
+#if STRUCT_CHECK == 1
+#define HA_SIZE 16
+#endif /* STRUCT_CHECK == 1 */
+
+#define hoff(p,f) (unsigned short)((void *)(&((ha_t *)((void *)0 + (p)))->f) - (void *)0)
+#define HACR(p) hoff(p, ha_command)
+#define HASR(p) hoff(p, ha_status)
+#define MMCR(p) hoff(p, ha_mmcr)
+#define PIOR0(p) hoff(p, ha_pior0)
+#define PIOP0(p) hoff(p, ha_piop0)
+#define PIOR1(p) hoff(p, ha_pior1)
+#define PIOP1(p) hoff(p, ha_piop1)
+#define PIOR2(p) hoff(p, ha_pior2)
+#define PIOP2(p) hoff(p, ha_piop2)
+
+/*
+ * Program I/O Mode Register values.
+ */
+#define STATIC_PIO 0 /* Mode 1: static mode */
+ /* RAM access ??? */
+#define AUTOINCR_PIO 1 /* Mode 2: auto increment mode */
+ /* RAM access ??? */
+#define AUTODECR_PIO 2 /* Mode 3: auto decrement mode */
+ /* RAM access ??? */
+#define PARAM_ACCESS_PIO 3 /* Mode 4: LAN parameter access mode */
+ /* Parameter access. */
+#define PIO_MASK 3 /* register mask */
+#define PIOM(cmd,piono) ((u_short)cmd << 10 << (piono * 2))
+
+#define HACR_DEFAULT (HACR_OUT0 | HACR_OUT1 | HACR_16BITS | PIOM(STATIC_PIO, 0) | PIOM(AUTOINCR_PIO, 1) | PIOM(PARAM_ACCESS_PIO, 2))
+#define HACR_INTRON (HACR_82586_INT_ENABLE | HACR_MMC_INT_ENABLE | HACR_INTR_CLR_ENABLE)
+
+#define MAXDATAZ (WAVELAN_ADDR_SIZE + WAVELAN_ADDR_SIZE + 2 + WAVELAN_MTU)
+
+/*
+ * Onboard 64k RAM layout.
+ * (Offsets from 0x0000.)
+ */
+#define OFFSET_RU 0x0000
+#define OFFSET_CU 0x8000
+#define OFFSET_SCB (OFFSET_ISCP - sizeof(scb_t))
+#define OFFSET_ISCP (OFFSET_SCP - sizeof(iscp_t))
+#define OFFSET_SCP I82586_SCP_ADDR
+
+#define RXBLOCKZ (sizeof(fd_t) + sizeof(rbd_t) + MAXDATAZ)
+#define TXBLOCKZ (sizeof(ac_tx_t) + sizeof(ac_nop_t) + sizeof(tbd_t) + MAXDATAZ)
+
+#define NRXBLOCKS ((OFFSET_CU - OFFSET_RU) / RXBLOCKZ)
+#define NTXBLOCKS ((OFFSET_SCB - OFFSET_CU) / TXBLOCKZ)
+
+/*
+ * This software may only be used and distributed
+ * according to the terms of the GNU Public License.
+ *
+ * For more details, see wavelan.c.
+ */
diff --git a/i386/i386at/gpl/linux/net/wd.c b/i386/i386at/gpl/linux/net/wd.c
new file mode 100644
index 00000000..5eaa6585
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/wd.c
@@ -0,0 +1,513 @@
+/* wd.c: A WD80x3 ethernet driver for linux. */
+/*
+ Written 1993-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This is a driver for WD8003 and WD8013 "compatible" ethercards.
+
+ Thanks to Russ Nelson (nelson@crnwyr.com) for loaning me a WD8013.
+
+ Changelog:
+
+ Paul Gortmaker : multiple card support for module users, support
+ for non-standard memory sizes.
+
+
+*/
+
+static const char *version =
+ "wd.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "8390.h"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int wd_portlist[] =
+{0x300, 0x280, 0x380, 0x240, 0};
+
+int wd_probe(struct device *dev);
+int wd_probe1(struct device *dev, int ioaddr);
+
+static int wd_open(struct device *dev);
+static void wd_reset_8390(struct device *dev);
+static void wd_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void wd_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void wd_block_output(struct device *dev, int count,
+ const unsigned char *buf, const start_page);
+static int wd_close_card(struct device *dev);
+
+
+#define WD_START_PG 0x00 /* First page of TX buffer */
+#define WD03_STOP_PG 0x20 /* Last page +1 of RX ring */
+#define WD13_STOP_PG 0x40 /* Last page +1 of RX ring */
+
+#define WD_CMDREG 0 /* Offset to ASIC command register. */
+#define WD_RESET 0x80 /* Board reset, in WD_CMDREG. */
+#define WD_MEMENB 0x40 /* Enable the shared memory. */
+#define WD_CMDREG5 5 /* Offset to 16-bit-only ASIC register 5. */
+#define ISA16 0x80 /* Enable 16 bit access from the ISA bus. */
+#define NIC16 0x40 /* Enable 16 bit access from the 8390. */
+#define WD_NIC_OFFSET 16 /* Offset to the 8390 from the base_addr. */
+#define WD_IO_EXTENT 32
+
+
+/* Probe for the WD8003 and WD8013. These cards have the station
+ address PROM at I/O ports <base>+8 to <base>+13, with a checksum
+ following. A Soundblaster can have the same checksum as an WDethercard,
+ so we have an extra exclusionary check for it.
+
+ The wd_probe1() routine initializes the card and fills the
+ station address field. */
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry wd_drv =
+{"wd", wd_probe1, WD_IO_EXTENT, wd_portlist};
+#else
+
+int wd_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return wd_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; wd_portlist[i]; i++) {
+ int ioaddr = wd_portlist[i];
+ if (check_region(ioaddr, WD_IO_EXTENT))
+ continue;
+ if (wd_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+int wd_probe1(struct device *dev, int ioaddr)
+{
+ int i;
+ int checksum = 0;
+ int ancient = 0; /* An old card without config registers. */
+ int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */
+ const char *model_name;
+ static unsigned version_printed = 0;
+
+ for (i = 0; i < 8; i++)
+ checksum += inb(ioaddr + 8 + i);
+ if (inb(ioaddr + 8) == 0xff /* Extra check to avoid soundcard. */
+ || inb(ioaddr + 9) == 0xff
+ || (checksum & 0xff) != 0xFF)
+ return ENODEV;
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("wd.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ /* Check for semi-valid mem_start/end values if supplied. */
+ if ((dev->mem_start % 0x2000) || (dev->mem_end % 0x2000)) {
+ printk(KERN_WARNING "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n");
+ dev->mem_start = 0;
+ dev->mem_end = 0;
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("%s: WD80x3 at %#3x, ", dev->name, ioaddr);
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i));
+
+ /* The following PureData probe code was contributed by
+ Mike Jagdis <jaggy@purplet.demon.co.uk>. Puredata does software
+ configuration differently from others so we have to check for them.
+ This detects an 8 bit, 16 bit or dumb (Toshiba, jumpered) card.
+ */
+ if (inb(ioaddr+0) == 'P' && inb(ioaddr+1) == 'D') {
+ unsigned char reg5 = inb(ioaddr+5);
+
+ switch (inb(ioaddr+2)) {
+ case 0x03: word16 = 0; model_name = "PDI8023-8"; break;
+ case 0x05: word16 = 0; model_name = "PDUC8023"; break;
+ case 0x0a: word16 = 1; model_name = "PDI8023-16"; break;
+ /* Either 0x01 (dumb) or they've released a new version. */
+ default: word16 = 0; model_name = "PDI8023"; break;
+ }
+ dev->mem_start = ((reg5 & 0x1c) + 0xc0) << 12;
+ dev->irq = (reg5 & 0xe0) == 0xe0 ? 10 : (reg5 >> 5) + 1;
+ } else { /* End of PureData probe */
+ /* This method of checking for a 16-bit board is borrowed from the
+ we.c driver. A simpler method is just to look in ASIC reg. 0x03.
+ I'm comparing the two method in alpha test to make certain they
+ return the same result. */
+ /* Check for the old 8 bit board - it has register 0/8 aliasing.
+ Do NOT check i>=6 here -- it hangs the old 8003 boards! */
+ for (i = 0; i < 6; i++)
+ if (inb(ioaddr+i) != inb(ioaddr+8+i))
+ break;
+ if (i >= 6) {
+ ancient = 1;
+ model_name = "WD8003-old";
+ word16 = 0;
+ } else {
+ int tmp = inb(ioaddr+1); /* fiddle with 16bit bit */
+ outb( tmp ^ 0x01, ioaddr+1 ); /* attempt to clear 16bit bit */
+ if (((inb( ioaddr+1) & 0x01) == 0x01) /* A 16 bit card */
+ && (tmp & 0x01) == 0x01 ) { /* In a 16 slot. */
+ int asic_reg5 = inb(ioaddr+WD_CMDREG5);
+ /* Magic to set ASIC to word-wide mode. */
+ outb( NIC16 | (asic_reg5&0x1f), ioaddr+WD_CMDREG5);
+ outb(tmp, ioaddr+1);
+ model_name = "WD8013";
+ word16 = 1; /* We have a 16bit board here! */
+ } else {
+ model_name = "WD8003";
+ word16 = 0;
+ }
+ outb(tmp, ioaddr+1); /* Restore original reg1 value. */
+ }
+#ifndef final_version
+ if ( !ancient && (inb(ioaddr+1) & 0x01) != (word16 & 0x01))
+ printk("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).",
+ word16 ? 16 : 8, (inb(ioaddr+1) & 0x01) ? 16 : 8);
+#endif
+ }
+
+#if defined(WD_SHMEM) && WD_SHMEM > 0x80000
+ /* Allow a compile-time override. */
+ dev->mem_start = WD_SHMEM;
+#else
+ if (dev->mem_start == 0) {
+ /* Sanity and old 8003 check */
+ int reg0 = inb(ioaddr);
+ if (reg0 == 0xff || reg0 == 0) {
+ /* Future plan: this could check a few likely locations first. */
+ dev->mem_start = 0xd0000;
+ printk(" assigning address %#lx", dev->mem_start);
+ } else {
+ int high_addr_bits = inb(ioaddr+WD_CMDREG5) & 0x1f;
+ /* Some boards don't have the register 5 -- it returns 0xff. */
+ if (high_addr_bits == 0x1f || word16 == 0)
+ high_addr_bits = 0x01;
+ dev->mem_start = ((reg0&0x3f) << 13) + (high_addr_bits << 19);
+ }
+ }
+#endif
+
+ /* The 8390 isn't at the base address -- the ASIC regs are there! */
+ dev->base_addr = ioaddr+WD_NIC_OFFSET;
+
+ if (dev->irq < 2) {
+ int irqmap[] = {9,3,5,7,10,11,15,4};
+ int reg1 = inb(ioaddr+1);
+ int reg4 = inb(ioaddr+4);
+ if (ancient || reg1 == 0xff) { /* Ack!! No way to read the IRQ! */
+ short nic_addr = ioaddr+WD_NIC_OFFSET;
+
+ /* We have an old-style ethercard that doesn't report its IRQ
+ line. Do autoirq to find the IRQ line. Note that this IS NOT
+ a reliable way to trigger an interrupt. */
+ outb_p(E8390_NODMA + E8390_STOP, nic_addr);
+ outb(0x00, nic_addr+EN0_IMR); /* Disable all intrs. */
+ autoirq_setup(0);
+ outb_p(0xff, nic_addr + EN0_IMR); /* Enable all interrupts. */
+ outb_p(0x00, nic_addr + EN0_RCNTLO);
+ outb_p(0x00, nic_addr + EN0_RCNTHI);
+ outb(E8390_RREAD+E8390_START, nic_addr); /* Trigger it... */
+ dev->irq = autoirq_report(2);
+ outb_p(0x00, nic_addr+EN0_IMR); /* Mask all intrs. again. */
+
+ if (ei_debug > 2)
+ printk(" autoirq is %d", dev->irq);
+ if (dev->irq < 2)
+ dev->irq = word16 ? 10 : 5;
+ } else
+ dev->irq = irqmap[((reg4 >> 5) & 0x03) + (reg1 & 0x04)];
+ } else if (dev->irq == 2) /* Fixup bogosity: IRQ2 is really IRQ9 */
+ dev->irq = 9;
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+ if (request_irq(dev->irq, ei_interrupt, 0, model_name)) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ return EAGAIN;
+ }
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (" unable to get memory for dev->priv.\n");
+ free_irq(dev->irq);
+ return -ENOMEM;
+ }
+
+ /* OK, were are certain this is going to work. Setup the device. */
+ request_region(ioaddr, WD_IO_EXTENT, model_name);
+
+ ei_status.name = model_name;
+ ei_status.word16 = word16;
+ ei_status.tx_start_page = WD_START_PG;
+ ei_status.rx_start_page = WD_START_PG + TX_PAGES;
+
+ /* Don't map in the shared memory until the board is actually opened. */
+ dev->rmem_start = dev->mem_start + TX_PAGES*256;
+
+ /* Some cards (eg WD8003EBT) can be jumpered for more (32k!) memory. */
+ if (dev->mem_end != 0) {
+ ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
+ } else {
+ ei_status.stop_page = word16 ? WD13_STOP_PG : WD03_STOP_PG;
+ dev->mem_end = dev->mem_start + (ei_status.stop_page - WD_START_PG)*256;
+ }
+ dev->rmem_end = dev->mem_end;
+
+ printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
+ model_name, dev->irq, dev->mem_start, dev->mem_end-1);
+
+ ei_status.reset_8390 = &wd_reset_8390;
+ ei_status.block_input = &wd_block_input;
+ ei_status.block_output = &wd_block_output;
+ ei_status.get_8390_hdr = &wd_get_8390_hdr;
+ dev->open = &wd_open;
+ dev->stop = &wd_close_card;
+ NS8390_init(dev, 0);
+
+#if 1
+ /* Enable interrupt generation on softconfig cards -- M.U */
+ /* .. but possibly potentially unsafe - Donald */
+ if (inb(ioaddr+14) & 0x20)
+ outb(inb(ioaddr+4)|0x80, ioaddr+4);
+#endif
+
+ return 0;
+}
+
+static int
+wd_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+
+ /* Map in the shared memory. Always set register 0 last to remain
+ compatible with very old boards. */
+ ei_status.reg0 = ((dev->mem_start>>13) & 0x3f) | WD_MEMENB;
+ ei_status.reg5 = ((dev->mem_start>>19) & 0x1f) | NIC16;
+
+ if (ei_status.word16)
+ outb(ei_status.reg5, ioaddr+WD_CMDREG5);
+ outb(ei_status.reg0, ioaddr); /* WD_CMDREG */
+
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static void
+wd_reset_8390(struct device *dev)
+{
+ int wd_cmd_port = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+
+ outb(WD_RESET, wd_cmd_port);
+ if (ei_debug > 1) printk("resetting the WD80x3 t=%lu...", jiffies);
+ ei_status.txing = 0;
+
+ /* Set up the ASIC registers, just in case something changed them. */
+ outb((((dev->mem_start>>13) & 0x3f)|WD_MEMENB), wd_cmd_port);
+ if (ei_status.word16)
+ outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5);
+
+ if (ei_debug > 1) printk("reset done\n");
+ return;
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+wd_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ unsigned long hdr_start = dev->mem_start + ((ring_page - WD_START_PG)<<8);
+
+ /* We'll always get a 4 byte header read followed by a packet read, so
+ we enable 16 bit mode before the header, and disable after the body. */
+ if (ei_status.word16)
+ outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+
+#ifdef notdef
+ /* Officially this is what we are doing, but the readl() is faster */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+#else
+ ((unsigned int*)hdr)[0] = readl(hdr_start);
+#endif
+}
+
+/* Block input and output are easy on shared memory ethercards, and trivial
+ on the Western digital card where there is no choice of how to do it.
+ The only complications are that the ring buffer wraps, and need to map
+ switch between 8- and 16-bit modes. */
+
+static void
+wd_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ unsigned long xfer_start = dev->mem_start + ring_offset - (WD_START_PG<<8);
+
+ if (xfer_start + count > dev->rmem_end) {
+ /* We must wrap the input move. */
+ int semi_count = dev->rmem_end - xfer_start;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, dev->rmem_start, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, xfer_start, count, 0);
+ }
+
+ /* Turn off 16 bit access so that reboot works. ISA brain-damage */
+ if (ei_status.word16)
+ outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+}
+
+static void
+wd_block_output(struct device *dev, int count, const unsigned char *buf,
+ int start_page)
+{
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ long shmem = dev->mem_start + ((start_page - WD_START_PG)<<8);
+
+
+ if (ei_status.word16) {
+ /* Turn on and off 16 bit access so that reboot works. */
+ outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+ memcpy_toio(shmem, buf, count);
+ outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+ } else
+ memcpy_toio(shmem, buf, count);
+}
+
+
+static int
+wd_close_card(struct device *dev)
+{
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+ ei_close(dev);
+
+ /* Change from 16-bit to 8-bit shared memory so reboot works. */
+ if (ei_status.word16)
+ outb(ei_status.reg5, wd_cmdreg + WD_CMDREG5 );
+
+ /* And disable the shared memory. */
+ outb(ei_status.reg0 & ~WD_MEMENB, wd_cmdreg);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+
+#ifdef MODULE
+#define MAX_WD_CARDS 4 /* Max number of wd cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_WD_CARDS] = { 0, };
+static struct device dev_wd[MAX_WD_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_WD_CARDS] = { 0, };
+static int irq[MAX_WD_CARDS] = { 0, };
+static int mem[MAX_WD_CARDS] = { 0, };
+static int mem_end[MAX_WD_CARDS] = { 0, }; /* for non std. mem size */
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) {
+ struct device *dev = &dev_wd[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev];
+ dev->mem_end = mem_end[this_dev];
+ dev->init = wd_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "wd.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "wd.c: No wd80x3 card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) {
+ struct device *dev = &dev_wd[this_dev];
+ if (dev->priv != NULL) {
+ int ioaddr = dev->base_addr - WD_NIC_OFFSET;
+ kfree(dev->priv);
+ dev->priv = NULL;
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = NULL;
+ release_region(ioaddr, WD_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c wd.c"
+ * version-control: t
+ * tab-width: 4
+ * kept-new-versions: 5
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/net/znet.c b/i386/i386at/gpl/linux/net/znet.c
new file mode 100644
index 00000000..9f44928e
--- /dev/null
+++ b/i386/i386at/gpl/linux/net/znet.c
@@ -0,0 +1,746 @@
+/* znet.c: An Zenith Z-Note ethernet driver for linux. */
+
+static const char *version = "znet.c:v1.02 9/23/94 becker@cesdis.gsfc.nasa.gov\n";
+
+/*
+ Written by Donald Becker.
+
+ The author may be reached as becker@cesdis.gsfc.nasa.gov.
+ This driver is based on the Linux skeleton driver. The copyright of the
+ skeleton driver is held by the United States Government, as represented
+ by DIRNSA, and it is released under the GPL.
+
+ Thanks to Mike Hollick for alpha testing and suggestions.
+
+ References:
+ The Crynwr packet driver.
+
+ "82593 CSMA/CD Core LAN Controller" Intel datasheet, 1992
+ Intel Microcommunications Databook, Vol. 1, 1990.
+ As usual with Intel, the documentation is incomplete and inaccurate.
+ I had to read the Crynwr packet driver to figure out how to actually
+ use the i82593, and guess at what register bits matched the loosely
+ related i82586.
+
+ Theory of Operation
+
+ The i82593 used in the Zenith Z-Note series operates using two(!) slave
+ DMA channels, one interrupt, and one 8-bit I/O port.
+
+ While there several ways to configure '593 DMA system, I chose the one
+ that seemed commensurate with the highest system performance in the face
+ of moderate interrupt latency: Both DMA channels are configured as
+ recirculating ring buffers, with one channel (#0) dedicated to Rx and
+ the other channel (#1) to Tx and configuration. (Note that this is
+ different than the Crynwr driver, where the Tx DMA channel is initialized
+ before each operation. That approach simplifies operation and Tx error
+ recovery, but requires additional I/O in normal operation and precludes
+ transmit buffer chaining.)
+
+ Both rings are set to 8192 bytes using {TX,RX}_RING_SIZE. This provides
+ a reasonable ring size for Rx, while simplifying DMA buffer allocation --
+ DMA buffers must not cross a 128K boundary. (In truth the size selection
+ was influenced by my lack of '593 documentation. I thus was constrained
+ to use the Crynwr '593 initialization table, which sets the Rx ring size
+ to 8K.)
+
+ Despite my usual low opinion about Intel-designed parts, I must admit
+ that the bulk data handling of the i82593 is a good design for
+ an integrated system, like a laptop, where using two slave DMA channels
+ doesn't pose a problem. I still take issue with using only a single I/O
+ port. In the same controlled environment there are essentially no
+ limitations on I/O space, and using multiple locations would eliminate
+ the need for multiple operations when looking at status registers,
+ setting the Rx ring boundary, or switching to promiscuous mode.
+
+ I also question Zenith's selection of the '593: one of the advertised
+ advantages of earlier Intel parts was that if you figured out the magic
+ initialization incantation you could use the same part on many different
+ network types. Zenith's use of the "FriendlyNet" (sic) connector rather
+ than an on-board transceiver leads me to believe that they were planning
+ to take advantage of this. But, uhmmm, the '593 omits all but ethernet
+ functionality from the serial subsystem.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+
+#ifndef ZNET_DEBUG
+#define ZNET_DEBUG 1
+#endif
+static unsigned int znet_debug = ZNET_DEBUG;
+
+/* The DMA modes we need aren't in <dma.h>. */
+#define DMA_RX_MODE 0x14 /* Auto init, I/O to mem, ++, demand. */
+#define DMA_TX_MODE 0x18 /* Auto init, Mem to I/O, ++, demand. */
+#define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17)
+#define DMA_BUF_SIZE 8192
+#define RX_BUF_SIZE 8192
+#define TX_BUF_SIZE 8192
+
+/* Commands to the i82593 channel 0. */
+#define CMD0_CHNL_0 0x00
+#define CMD0_CHNL_1 0x10 /* Switch to channel 1. */
+#define CMD0_NOP (CMD0_CHNL_0)
+#define CMD0_PORT_1 CMD0_CHNL_1
+#define CMD1_PORT_0 1
+#define CMD0_IA_SETUP 1
+#define CMD0_CONFIGURE 2
+#define CMD0_MULTICAST_LIST 3
+#define CMD0_TRANSMIT 4
+#define CMD0_DUMP 6
+#define CMD0_DIAGNOSE 7
+#define CMD0_Rx_ENABLE 8
+#define CMD0_Rx_DISABLE 10
+#define CMD0_Rx_STOP 11
+#define CMD0_RETRANSMIT 12
+#define CMD0_ABORT 13
+#define CMD0_RESET 14
+
+#define CMD0_ACK 0x80
+
+#define CMD0_STAT0 (0 << 5)
+#define CMD0_STAT1 (1 << 5)
+#define CMD0_STAT2 (2 << 5)
+#define CMD0_STAT3 (3 << 5)
+
+#define net_local znet_private
+struct znet_private {
+ int rx_dma, tx_dma;
+ struct enet_statistics stats;
+ /* The starting, current, and end pointers for the packet buffers. */
+ ushort *rx_start, *rx_cur, *rx_end;
+ ushort *tx_start, *tx_cur, *tx_end;
+ ushort tx_buf_len; /* Tx buffer length, in words. */
+};
+
+/* Only one can be built-in;-> */
+static struct znet_private zn;
+static ushort dma_buffer1[DMA_BUF_SIZE/2];
+static ushort dma_buffer2[DMA_BUF_SIZE/2];
+static ushort dma_buffer3[DMA_BUF_SIZE/2 + 8];
+
+/* The configuration block. What an undocumented nightmare. The first
+ set of values are those suggested (without explanation) for ethernet
+ in the Intel 82586 databook. The rest appear to be completely undocumented,
+ except for cryptic notes in the Crynwr packet driver. This driver uses
+ the Crynwr values verbatim. */
+
+static unsigned char i593_init[] = {
+ 0xAA, /* 0: 16-byte input & 80-byte output FIFO. */
+ /* threshold, 96-byte FIFO, 82593 mode. */
+ 0x88, /* 1: Continuous w/interrupts, 128-clock DMA.*/
+ 0x2E, /* 2: 8-byte preamble, NO address insertion, */
+ /* 6-byte Ethernet address, loopback off.*/
+ 0x00, /* 3: Default priorities & backoff methods. */
+ 0x60, /* 4: 96-bit interframe spacing. */
+ 0x00, /* 5: 512-bit slot time (low-order). */
+ 0xF2, /* 6: Slot time (high-order), 15 COLL retries. */
+ 0x00, /* 7: Promisc-off, broadcast-on, default CRC. */
+ 0x00, /* 8: Default carrier-sense, collision-detect. */
+ 0x40, /* 9: 64-byte minimum frame length. */
+ 0x5F, /* A: Type/length checks OFF, no CRC input,
+ "jabber" termination, etc. */
+ 0x00, /* B: Full-duplex disabled. */
+ 0x3F, /* C: Default multicast addresses & backoff. */
+ 0x07, /* D: Default IFS retriggering. */
+ 0x31, /* E: Internal retransmit, drop "runt" packets,
+ synchr. DRQ deassertion, 6 status bytes. */
+ 0x22, /* F: Receive ring-buffer size (8K),
+ receive-stop register enable. */
+};
+
+struct netidblk {
+ char magic[8]; /* The magic number (string) "NETIDBLK" */
+ unsigned char netid[8]; /* The physical station address */
+ char nettype, globalopt;
+ char vendor[8]; /* The machine vendor and product name. */
+ char product[8];
+ char irq1, irq2; /* Interrupts, only one is currently used. */
+ char dma1, dma2;
+ short dma_mem_misc[8]; /* DMA buffer locations (unused in Linux). */
+ short iobase1, iosize1;
+ short iobase2, iosize2; /* Second iobase unused. */
+ char driver_options; /* Misc. bits */
+ char pad;
+};
+
+int znet_probe(struct device *dev);
+static int znet_open(struct device *dev);
+static int znet_send_packet(struct sk_buff *skb, struct device *dev);
+static void znet_interrupt(int irq, struct pt_regs *regs);
+static void znet_rx(struct device *dev);
+static int znet_close(struct device *dev);
+static struct enet_statistics *net_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+static void hardware_init(struct device *dev);
+static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset);
+
+#ifdef notdef
+static struct sigaction znet_sigaction = { &znet_interrupt, 0, 0, NULL, };
+#endif
+
+
+/* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe
+ BIOS area. We just scan for the signature, and pull the vital parameters
+ out of the structure. */
+
+int znet_probe(struct device *dev)
+{
+ int i;
+ struct netidblk *netinfo;
+ char *p;
+
+ /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */
+ for(p = (char *)0xf0000; p < (char *)0x100000; p++)
+ if (*p == 'N' && strncmp(p, "NETIDBLK", 8) == 0)
+ break;
+
+ if (p >= (char *)0x100000) {
+ if (znet_debug > 1)
+ printk(KERN_INFO "No Z-Note ethernet adaptor found.\n");
+ return ENODEV;
+ }
+ netinfo = (struct netidblk *)p;
+ dev->base_addr = netinfo->iobase1;
+ dev->irq = netinfo->irq1;
+
+ printk(KERN_INFO "%s: ZNET at %#3lx,", dev->name, dev->base_addr);
+
+ /* The station address is in the "netidblk" at 0x0f0000. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = netinfo->netid[i]);
+
+ printk(", using IRQ %d DMA %d and %d.\n", dev->irq, netinfo->dma1,
+ netinfo->dma2);
+
+ if (znet_debug > 1) {
+ printk(KERN_INFO "%s: vendor '%16.16s' IRQ1 %d IRQ2 %d DMA1 %d DMA2 %d.\n",
+ dev->name, netinfo->vendor,
+ netinfo->irq1, netinfo->irq2,
+ netinfo->dma1, netinfo->dma2);
+ printk(KERN_INFO "%s: iobase1 %#x size %d iobase2 %#x size %d net type %2.2x.\n",
+ dev->name, netinfo->iobase1, netinfo->iosize1,
+ netinfo->iobase2, netinfo->iosize2, netinfo->nettype);
+ }
+
+ if (znet_debug > 0)
+ printk("%s%s", KERN_INFO, version);
+
+ dev->priv = (void *) &zn;
+ zn.rx_dma = netinfo->dma1;
+ zn.tx_dma = netinfo->dma2;
+
+ /* These should never fail. You can't add devices to a sealed box! */
+ if (request_irq(dev->irq, &znet_interrupt, 0, "ZNet")
+ || request_dma(zn.rx_dma,"ZNet rx")
+ || request_dma(zn.tx_dma,"ZNet tx")) {
+ printk(KERN_WARNING "%s: Not opened -- resource busy?!?\n", dev->name);
+ return EBUSY;
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ /* Allocate buffer memory. We can cross a 128K boundary, so we
+ must be careful about the allocation. It's easiest to waste 8K. */
+ if (dma_page_eq(dma_buffer1, &dma_buffer1[RX_BUF_SIZE/2-1]))
+ zn.rx_start = dma_buffer1;
+ else
+ zn.rx_start = dma_buffer2;
+
+ if (dma_page_eq(dma_buffer3, &dma_buffer3[RX_BUF_SIZE/2-1]))
+ zn.tx_start = dma_buffer3;
+ else
+ zn.tx_start = dma_buffer2;
+ zn.rx_end = zn.rx_start + RX_BUF_SIZE/2;
+ zn.tx_buf_len = TX_BUF_SIZE/2;
+ zn.tx_end = zn.tx_start + zn.tx_buf_len;
+
+ /* The ZNET-specific entries in the device structure. */
+ dev->open = &znet_open;
+ dev->hard_start_xmit = &znet_send_packet;
+ dev->stop = &znet_close;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /* Fill in the 'dev' with ethernet-generic values. */
+ ether_setup(dev);
+
+ return 0;
+}
+
+
+static int znet_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (znet_debug > 2)
+ printk(KERN_DEBUG "%s: znet_open() called.\n", dev->name);
+
+ /* Turn on the 82501 SIA, using zenith-specific magic. */
+ outb(0x10, 0xe6); /* Select LAN control register */
+ outb(inb(0xe7) | 0x84, 0xe7); /* Turn on LAN power (bit 2). */
+ /* According to the Crynwr driver we should wait 50 msec. for the
+ LAN clock to stabilize. My experiments indicates that the '593 can
+ be initialized immediately. The delay is probably needed for the
+ DC-to-DC converter to come up to full voltage, and for the oscillator
+ to be spot-on at 20Mhz before transmitting.
+ Until this proves to be a problem we rely on the higher layers for the
+ delay and save allocating a timer entry. */
+
+ /* This follows the packet driver's lead, and checks for success. */
+ if (inb(ioaddr) != 0x10 && inb(ioaddr) != 0x00)
+ printk(KERN_WARNING "%s: Problem turning on the transceiver power.\n",
+ dev->name);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ hardware_init(dev);
+ dev->start = 1;
+
+ return 0;
+}
+
+static int znet_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (znet_debug > 4)
+ printk(KERN_DEBUG "%s: ZNet_send_packet(%ld).\n", dev->name, dev->tbusy);
+
+ /* Transmitter timeout, likely just recovery after suspending the machine. */
+ if (dev->tbusy) {
+ ushort event, tx_status, rx_offset, state;
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 10)
+ return 1;
+ outb(CMD0_STAT0, ioaddr); event = inb(ioaddr);
+ outb(CMD0_STAT1, ioaddr); tx_status = inw(ioaddr);
+ outb(CMD0_STAT2, ioaddr); rx_offset = inw(ioaddr);
+ outb(CMD0_STAT3, ioaddr); state = inb(ioaddr);
+ printk(KERN_WARNING "%s: transmit timed out, status %02x %04x %04x %02x,"
+ " resetting.\n", dev->name, event, tx_status, rx_offset, state);
+ if (tx_status == 0x0400)
+ printk(KERN_WARNING "%s: Tx carrier error, check transceiver cable.\n",
+ dev->name);
+ outb(CMD0_RESET, ioaddr);
+ hardware_init(dev);
+ }
+
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Check that the part hasn't reset itself, probably from suspend. */
+ outb(CMD0_STAT0, ioaddr);
+ if (inw(ioaddr) == 0x0010
+ && inw(ioaddr) == 0x0000
+ && inw(ioaddr) == 0x0010)
+ hardware_init(dev);
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk(KERN_WARNING "%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = (void *)skb->data;
+ ushort *tx_link = zn.tx_cur - 1;
+ ushort rnd_len = (length + 1)>>1;
+
+ {
+ short dma_port = ((zn.tx_dma&3)<<2) + IO_DMA2_BASE;
+ unsigned addr = inb(dma_port);
+ addr |= inb(dma_port) << 8;
+ addr <<= 1;
+ if (((int)zn.tx_cur & 0x1ffff) != addr)
+ printk(KERN_WARNING "Address mismatch at Tx: %#x vs %#x.\n",
+ (int)zn.tx_cur & 0xffff, addr);
+ zn.tx_cur = (ushort *)(((int)zn.tx_cur & 0xfe0000) | addr);
+ }
+
+ if (zn.tx_cur >= zn.tx_end)
+ zn.tx_cur = zn.tx_start;
+ *zn.tx_cur++ = length;
+ if (zn.tx_cur + rnd_len + 1 > zn.tx_end) {
+ int semi_cnt = (zn.tx_end - zn.tx_cur)<<1; /* Cvrt to byte cnt. */
+ memcpy(zn.tx_cur, buf, semi_cnt);
+ rnd_len -= semi_cnt>>1;
+ memcpy(zn.tx_start, buf + semi_cnt, length - semi_cnt);
+ zn.tx_cur = zn.tx_start + rnd_len;
+ } else {
+ memcpy(zn.tx_cur, buf, skb->len);
+ zn.tx_cur += rnd_len;
+ }
+ *zn.tx_cur++ = 0;
+ cli(); {
+ *tx_link = CMD0_TRANSMIT + CMD0_CHNL_1;
+ /* Is this always safe to do? */
+ outb(CMD0_TRANSMIT + CMD0_CHNL_1,ioaddr);
+ } sti();
+
+ dev->trans_start = jiffies;
+ if (znet_debug > 4)
+ printk(KERN_DEBUG "%s: Transmitter queued, length %d.\n", dev->name, length);
+ }
+ dev_kfree_skb(skb, FREE_WRITE);
+ return 0;
+}
+
+/* The ZNET interrupt handler. */
+static void znet_interrupt(int irq, struct pt_regs * regs)
+{
+ struct device *dev = irq2dev_map[irq];
+ int ioaddr;
+ int boguscnt = 20;
+
+ if (dev == NULL) {
+ printk(KERN_WARNING "znet_interrupt(): IRQ %d for unknown device.\n", irq);
+ return;
+ }
+
+ dev->interrupt = 1;
+ ioaddr = dev->base_addr;
+
+ outb(CMD0_STAT0, ioaddr);
+ do {
+ ushort status = inb(ioaddr);
+ if (znet_debug > 5) {
+ ushort result, rx_ptr, running;
+ outb(CMD0_STAT1, ioaddr);
+ result = inw(ioaddr);
+ outb(CMD0_STAT2, ioaddr);
+ rx_ptr = inw(ioaddr);
+ outb(CMD0_STAT3, ioaddr);
+ running = inb(ioaddr);
+ printk(KERN_DEBUG "%s: interrupt, status %02x, %04x %04x %02x serial %d.\n",
+ dev->name, status, result, rx_ptr, running, boguscnt);
+ }
+ if ((status & 0x80) == 0)
+ break;
+
+ if ((status & 0x0F) == 4) { /* Transmit done. */
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int tx_status;
+ outb(CMD0_STAT1, ioaddr);
+ tx_status = inw(ioaddr);
+ /* It's undocumented, but tx_status seems to match the i82586. */
+ if (tx_status & 0x2000) {
+ lp->stats.tx_packets++;
+ lp->stats.collisions += tx_status & 0xf;
+ } else {
+ if (tx_status & 0x0600) lp->stats.tx_carrier_errors++;
+ if (tx_status & 0x0100) lp->stats.tx_fifo_errors++;
+ if (!(tx_status & 0x0040)) lp->stats.tx_heartbeat_errors++;
+ if (tx_status & 0x0020) lp->stats.tx_aborted_errors++;
+ /* ...and the catch-all. */
+ if ((tx_status | 0x0760) != 0x0760)
+ lp->stats.tx_errors++;
+ }
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ }
+
+ if ((status & 0x40)
+ || (status & 0x0f) == 11) {
+ znet_rx(dev);
+ }
+ /* Clear the interrupts we've handled. */
+ outb(CMD0_ACK,ioaddr);
+ } while (boguscnt--);
+
+ dev->interrupt = 0;
+ return;
+}
+
+static void znet_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int boguscount = 1;
+ short next_frame_end_offset = 0; /* Offset of next frame start. */
+ short *cur_frame_end;
+ short cur_frame_end_offset;
+
+ outb(CMD0_STAT2, ioaddr);
+ cur_frame_end_offset = inw(ioaddr);
+
+ if (cur_frame_end_offset == zn.rx_cur - zn.rx_start) {
+ printk(KERN_WARNING "%s: Interrupted, but nothing to receive, offset %03x.\n",
+ dev->name, cur_frame_end_offset);
+ return;
+ }
+
+ /* Use same method as the Crynwr driver: construct a forward list in
+ the same area of the backwards links we now have. This allows us to
+ pass packets to the upper layers in the order they were received --
+ important for fast-path sequential operations. */
+ while (zn.rx_start + cur_frame_end_offset != zn.rx_cur
+ && ++boguscount < 5) {
+ unsigned short hi_cnt, lo_cnt, hi_status, lo_status;
+ int count, status;
+
+ if (cur_frame_end_offset < 4) {
+ /* Oh no, we have a special case: the frame trailer wraps around
+ the end of the ring buffer. We've saved space at the end of
+ the ring buffer for just this problem. */
+ memcpy(zn.rx_end, zn.rx_start, 8);
+ cur_frame_end_offset += (RX_BUF_SIZE/2);
+ }
+ cur_frame_end = zn.rx_start + cur_frame_end_offset - 4;
+
+ lo_status = *cur_frame_end++;
+ hi_status = *cur_frame_end++;
+ status = ((hi_status & 0xff) << 8) + (lo_status & 0xff);
+ lo_cnt = *cur_frame_end++;
+ hi_cnt = *cur_frame_end++;
+ count = ((hi_cnt & 0xff) << 8) + (lo_cnt & 0xff);
+
+ if (znet_debug > 5)
+ printk(KERN_DEBUG "Constructing trailer at location %03x, %04x %04x %04x %04x"
+ " count %#x status %04x.\n",
+ cur_frame_end_offset<<1, lo_status, hi_status, lo_cnt, hi_cnt,
+ count, status);
+ cur_frame_end[-4] = status;
+ cur_frame_end[-3] = next_frame_end_offset;
+ cur_frame_end[-2] = count;
+ next_frame_end_offset = cur_frame_end_offset;
+ cur_frame_end_offset -= ((count + 1)>>1) + 3;
+ if (cur_frame_end_offset < 0)
+ cur_frame_end_offset += RX_BUF_SIZE/2;
+ };
+
+ /* Now step forward through the list. */
+ do {
+ ushort *this_rfp_ptr = zn.rx_start + next_frame_end_offset;
+ int status = this_rfp_ptr[-4];
+ int pkt_len = this_rfp_ptr[-2];
+
+ if (znet_debug > 5)
+ printk(KERN_DEBUG "Looking at trailer ending at %04x status %04x length %03x"
+ " next %04x.\n", next_frame_end_offset<<1, status, pkt_len,
+ this_rfp_ptr[-3]<<1);
+ /* Once again we must assume that the i82586 docs apply. */
+ if ( ! (status & 0x2000)) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (status & 0x0800) lp->stats.rx_crc_errors++;
+ if (status & 0x0400) lp->stats.rx_frame_errors++;
+ if (status & 0x0200) lp->stats.rx_over_errors++; /* Wrong. */
+ if (status & 0x0100) lp->stats.rx_fifo_errors++;
+ if (status & 0x0080) lp->stats.rx_length_errors++;
+ } else if (pkt_len > 1536) {
+ lp->stats.rx_length_errors++;
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len);
+ if (skb == NULL) {
+ if (znet_debug)
+ printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+
+ if (&zn.rx_cur[(pkt_len+1)>>1] > zn.rx_end) {
+ int semi_cnt = (zn.rx_end - zn.rx_cur)<<1;
+ memcpy(skb_put(skb,semi_cnt), zn.rx_cur, semi_cnt);
+ memcpy(skb_put(skb,pkt_len-semi_cnt), zn.rx_start,
+ pkt_len - semi_cnt);
+ } else {
+ memcpy(skb_put(skb,pkt_len), zn.rx_cur, pkt_len);
+ if (znet_debug > 6) {
+ unsigned int *packet = (unsigned int *) skb->data;
+ printk(KERN_DEBUG "Packet data is %08x %08x %08x %08x.\n", packet[0],
+ packet[1], packet[2], packet[3]);
+ }
+ }
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ zn.rx_cur = this_rfp_ptr;
+ if (zn.rx_cur >= zn.rx_end)
+ zn.rx_cur -= RX_BUF_SIZE/2;
+ update_stop_hit(ioaddr, (zn.rx_cur - zn.rx_start)<<1);
+ next_frame_end_offset = this_rfp_ptr[-3];
+ if (next_frame_end_offset == 0) /* Read all the frames? */
+ break; /* Done for now */
+ this_rfp_ptr = zn.rx_start + next_frame_end_offset;
+ } while (--boguscount);
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a mark_bh(INET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+ return;
+}
+
+/* The inverse routine to znet_open(). */
+static int znet_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ outb(CMD0_RESET, ioaddr); /* CMD0_RESET */
+
+ disable_dma(zn.rx_dma);
+ disable_dma(zn.tx_dma);
+
+ free_irq(dev->irq);
+
+ if (znet_debug > 1)
+ printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+ /* Turn off transceiver power. */
+ outb(0x10, 0xe6); /* Select LAN control register */
+ outb(inb(0xe7) & ~0x84, 0xe7); /* Turn on LAN power (bit 2). */
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *net_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ As a side effect this routine must also initialize the device parameters.
+ This is taken advantage of in open().
+
+ N.B. that we change i593_init[] in place. This (properly) makes the
+ mode change persistent, but must be changed if this code is moved to
+ a multiple adaptor environment.
+ */
+static void set_multicast_list(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ if (dev->flags&IFF_PROMISC) {
+ /* Enable promiscuous mode */
+ i593_init[7] &= ~3; i593_init[7] |= 1;
+ i593_init[13] &= ~8; i593_init[13] |= 8;
+ } else if (dev->mc_list || (dev->flags&IFF_ALLMULTI)) {
+ /* Enable accept-all-multicast mode */
+ i593_init[7] &= ~3; i593_init[7] |= 0;
+ i593_init[13] &= ~8; i593_init[13] |= 8;
+ } else { /* Enable normal mode. */
+ i593_init[7] &= ~3; i593_init[7] |= 0;
+ i593_init[13] &= ~8; i593_init[13] |= 0;
+ }
+ *zn.tx_cur++ = sizeof(i593_init);
+ memcpy(zn.tx_cur, i593_init, sizeof(i593_init));
+ zn.tx_cur += sizeof(i593_init)/2;
+ outb(CMD0_CONFIGURE+CMD0_CHNL_1, ioaddr);
+#ifdef not_tested
+ if (num_addrs > 0) {
+ int addrs_len = 6*num_addrs;
+ *zn.tx_cur++ = addrs_len;
+ memcpy(zn.tx_cur, addrs, addrs_len);
+ outb(CMD0_MULTICAST_LIST+CMD0_CHNL_1, ioaddr);
+ zn.tx_cur += addrs_len>>1;
+ }
+#endif
+}
+
+void show_dma(void)
+{
+ short dma_port = ((zn.tx_dma&3)<<2) + IO_DMA2_BASE;
+ unsigned addr = inb(dma_port);
+ addr |= inb(dma_port) << 8;
+ printk("Addr: %04x cnt:%3x...", addr<<1, get_dma_residue(zn.tx_dma));
+}
+
+/* Initialize the hardware. We have to do this when the board is open()ed
+ or when we come out of suspend mode. */
+static void hardware_init(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ zn.rx_cur = zn.rx_start;
+ zn.tx_cur = zn.tx_start;
+
+ /* Reset the chip, and start it up. */
+ outb(CMD0_RESET, ioaddr);
+
+ cli(); { /* Protect against a DMA flip-flop */
+ disable_dma(zn.rx_dma); /* reset by an interrupting task. */
+ clear_dma_ff(zn.rx_dma);
+ set_dma_mode(zn.rx_dma, DMA_RX_MODE);
+ set_dma_addr(zn.rx_dma, (unsigned int) zn.rx_start);
+ set_dma_count(zn.rx_dma, RX_BUF_SIZE);
+ enable_dma(zn.rx_dma);
+ /* Now set up the Tx channel. */
+ disable_dma(zn.tx_dma);
+ clear_dma_ff(zn.tx_dma);
+ set_dma_mode(zn.tx_dma, DMA_TX_MODE);
+ set_dma_addr(zn.tx_dma, (unsigned int) zn.tx_start);
+ set_dma_count(zn.tx_dma, zn.tx_buf_len<<1);
+ enable_dma(zn.tx_dma);
+ } sti();
+
+ if (znet_debug > 1)
+ printk(KERN_DEBUG "%s: Initializing the i82593, tx buf %p... ", dev->name,
+ zn.tx_start);
+ /* Do an empty configure command, just like the Crynwr driver. This
+ resets to chip to its default values. */
+ *zn.tx_cur++ = 0;
+ *zn.tx_cur++ = 0;
+ printk("stat:%02x ", inb(ioaddr)); show_dma();
+ outb(CMD0_CONFIGURE+CMD0_CHNL_1, ioaddr);
+ *zn.tx_cur++ = sizeof(i593_init);
+ memcpy(zn.tx_cur, i593_init, sizeof(i593_init));
+ zn.tx_cur += sizeof(i593_init)/2;
+ printk("stat:%02x ", inb(ioaddr)); show_dma();
+ outb(CMD0_CONFIGURE+CMD0_CHNL_1, ioaddr);
+ *zn.tx_cur++ = 6;
+ memcpy(zn.tx_cur, dev->dev_addr, 6);
+ zn.tx_cur += 3;
+ printk("stat:%02x ", inb(ioaddr)); show_dma();
+ outb(CMD0_IA_SETUP + CMD0_CHNL_1, ioaddr);
+ printk("stat:%02x ", inb(ioaddr)); show_dma();
+
+ update_stop_hit(ioaddr, 8192);
+ if (znet_debug > 1) printk("enabling Rx.\n");
+ outb(CMD0_Rx_ENABLE+CMD0_CHNL_0, ioaddr);
+ dev->tbusy = 0;
+}
+
+static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset)
+{
+ outb(CMD0_PORT_1, ioaddr);
+ if (znet_debug > 5)
+ printk(KERN_DEBUG "Updating stop hit with value %02x.\n",
+ (rx_stop_offset >> 6) | 0x80);
+ outb((rx_stop_offset >> 6) | 0x80, ioaddr);
+ outb(CMD1_PORT_0, ioaddr);
+}
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c znet.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/pci/bios32.c b/i386/i386at/gpl/linux/pci/bios32.c
new file mode 100644
index 00000000..e10fdab3
--- /dev/null
+++ b/i386/i386at/gpl/linux/pci/bios32.c
@@ -0,0 +1,460 @@
+/*
+ * bios32.c - BIOS32, PCI BIOS functions.
+ *
+ * Sponsored by
+ * iX Multiuser Multitasking Magazine
+ * Hannover, Germany
+ * hm@ix.de
+ *
+ * Copyright 1993, 1994 Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * Drew@Colorado.EDU
+ * +1 (303) 786-7975
+ *
+ * For more information, please consult
+ *
+ * PCI BIOS Specification Revision
+ * PCI Local Bus Specification
+ * PCI System Design Guide
+ *
+ * PCI Special Interest Group
+ * M/S HF3-15A
+ * 5200 N.E. Elam Young Parkway
+ * Hillsboro, Oregon 97124-6497
+ * +1 (503) 696-2000
+ * +1 (800) 433-5177
+ *
+ * Manuals are $25 each or $50 for all three, plus $7 shipping
+ * within the United States, $35 abroad.
+ *
+ *
+ * CHANGELOG :
+ * Jun 17, 1994 : Modified to accommodate the broken pre-PCI BIOS SPECIFICATION
+ * Revision 2.0 present on <thys@dennis.ee.up.ac.za>'s ASUS mainboard.
+ *
+ * Jan 5, 1995 : Modified to probe PCI hardware at boot time by Frederic
+ * Potter, potter@cao-vlsi.ibp.fr
+ *
+ * Jan 10, 1995 : Modified to store the information about configured pci
+ * devices into a list, which can be accessed via /proc/pci by
+ * Curtis Varner, cvarner@cs.ucr.edu
+ *
+ * Jan 12, 1995 : CPU-PCI bridge optimization support by Frederic Potter.
+ * Alpha version. Intel & UMC chipset support only.
+ *
+ * Apr 16, 1995 : Source merge with the DEC Alpha PCI support. Most of the code
+ * moved to drivers/pci/pci.c.
+ *
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+
+#include <asm/segment.h>
+
+#define PCIBIOS_PCI_FUNCTION_ID 0xb1XX
+#define PCIBIOS_PCI_BIOS_PRESENT 0xb101
+#define PCIBIOS_FIND_PCI_DEVICE 0xb102
+#define PCIBIOS_FIND_PCI_CLASS_CODE 0xb103
+#define PCIBIOS_GENERATE_SPECIAL_CYCLE 0xb106
+#define PCIBIOS_READ_CONFIG_BYTE 0xb108
+#define PCIBIOS_READ_CONFIG_WORD 0xb109
+#define PCIBIOS_READ_CONFIG_DWORD 0xb10a
+#define PCIBIOS_WRITE_CONFIG_BYTE 0xb10b
+#define PCIBIOS_WRITE_CONFIG_WORD 0xb10c
+#define PCIBIOS_WRITE_CONFIG_DWORD 0xb10d
+
+
+/* BIOS32 signature: "_32_" */
+#define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24))
+
+/* PCI signature: "PCI " */
+#define PCI_SIGNATURE (('P' << 0) + ('C' << 8) + ('I' << 16) + (' ' << 24))
+
+/* PCI service signature: "$PCI" */
+#define PCI_SERVICE (('$' << 0) + ('P' << 8) + ('C' << 16) + ('I' << 24))
+
+/*
+ * This is the standard structure used to identify the entry point
+ * to the BIOS32 Service Directory, as documented in
+ * Standard BIOS 32-bit Service Directory Proposal
+ * Revision 0.4 May 24, 1993
+ * Phoenix Technologies Ltd.
+ * Norwood, MA
+ * and the PCI BIOS specification.
+ */
+
+union bios32 {
+ struct {
+ unsigned long signature; /* _32_ */
+ unsigned long entry; /* 32 bit physical address */
+ unsigned char revision; /* Revision level, 0 */
+ unsigned char length; /* Length in paragraphs should be 01 */
+ unsigned char checksum; /* All bytes must add up to zero */
+ unsigned char reserved[5]; /* Must be zero */
+ } fields;
+ char chars[16];
+};
+
+/*
+ * Physical address of the service directory. I don't know if we're
+ * allowed to have more than one of these or not, so just in case
+ * we'll make pcibios_present() take a memory start parameter and store
+ * the array there.
+ */
+
+static unsigned long bios32_entry = 0;
+static struct {
+ unsigned long address;
+ unsigned short segment;
+} bios32_indirect = { 0, KERNEL_CS };
+
+#ifdef CONFIG_PCI
+/*
+ * Returns the entry point for the given service, NULL on error
+ */
+
+static unsigned long bios32_service(unsigned long service)
+{
+ unsigned char return_code; /* %al */
+ unsigned long address; /* %ebx */
+ unsigned long length; /* %ecx */
+ unsigned long entry; /* %edx */
+
+ __asm__("lcall (%%edi)"
+ : "=a" (return_code),
+ "=b" (address),
+ "=c" (length),
+ "=d" (entry)
+ : "0" (service),
+ "1" (0),
+ "D" (&bios32_indirect));
+
+ switch (return_code) {
+ case 0:
+ return address + entry;
+ case 0x80: /* Not present */
+ printk("bios32_service(%ld) : not present\n", service);
+ return 0;
+ default: /* Shouldn't happen */
+ printk("bios32_service(%ld) : returned 0x%x, mail drew@colorado.edu\n",
+ service, return_code);
+ return 0;
+ }
+}
+
+static long pcibios_entry = 0;
+static struct {
+ unsigned long address;
+ unsigned short segment;
+} pci_indirect = { 0, KERNEL_CS };
+
+
+extern unsigned long check_pcibios(unsigned long memory_start, unsigned long memory_end)
+{
+ unsigned long signature;
+ unsigned char present_status;
+ unsigned char major_revision;
+ unsigned char minor_revision;
+ int pack;
+
+ if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
+ pci_indirect.address = pcibios_entry;
+
+ __asm__("lcall (%%edi)\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:\tshl $8, %%eax\n\t"
+ "movw %%bx, %%ax"
+ : "=d" (signature),
+ "=a" (pack)
+ : "1" (PCIBIOS_PCI_BIOS_PRESENT),
+ "D" (&pci_indirect)
+ : "bx", "cx");
+
+ present_status = (pack >> 16) & 0xff;
+ major_revision = (pack >> 8) & 0xff;
+ minor_revision = pack & 0xff;
+ if (present_status || (signature != PCI_SIGNATURE)) {
+ printk ("pcibios_init : %s : BIOS32 Service Directory says PCI BIOS is present,\n"
+ " but PCI_BIOS_PRESENT subfunction fails with present status of 0x%x\n"
+ " and signature of 0x%08lx (%c%c%c%c). mail drew@Colorado.EDU\n",
+ (signature == PCI_SIGNATURE) ? "WARNING" : "ERROR",
+ present_status, signature,
+ (char) (signature >> 0), (char) (signature >> 8),
+ (char) (signature >> 16), (char) (signature >> 24));
+
+ if (signature != PCI_SIGNATURE)
+ pcibios_entry = 0;
+ }
+ if (pcibios_entry) {
+ printk ("pcibios_init : PCI BIOS revision %x.%02x entry at 0x%lx\n",
+ major_revision, minor_revision, pcibios_entry);
+ }
+ }
+ return memory_start;
+}
+
+int pcibios_present(void)
+{
+ return pcibios_entry ? 1 : 0;
+}
+
+int pcibios_find_class (unsigned int class_code, unsigned short index,
+ unsigned char *bus, unsigned char *device_fn)
+{
+ unsigned long bx;
+ unsigned long ret;
+
+ __asm__ ("lcall (%%edi)\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=b" (bx),
+ "=a" (ret)
+ : "1" (PCIBIOS_FIND_PCI_CLASS_CODE),
+ "c" (class_code),
+ "S" ((int) index),
+ "D" (&pci_indirect));
+ *bus = (bx >> 8) & 0xff;
+ *device_fn = bx & 0xff;
+ return (int) (ret & 0xff00) >> 8;
+}
+
+
+int pcibios_find_device (unsigned short vendor, unsigned short device_id,
+ unsigned short index, unsigned char *bus, unsigned char *device_fn)
+{
+ unsigned short bx;
+ unsigned short ret;
+
+ __asm__("lcall (%%edi)\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=b" (bx),
+ "=a" (ret)
+ : "1" (PCIBIOS_FIND_PCI_DEVICE),
+ "c" (device_id),
+ "d" (vendor),
+ "S" ((int) index),
+ "D" (&pci_indirect));
+ *bus = (bx >> 8) & 0xff;
+ *device_fn = bx & 0xff;
+ return (int) (ret & 0xff00) >> 8;
+}
+
+int pcibios_read_config_byte(unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned char *value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+
+ __asm__("lcall (%%esi)\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=c" (*value),
+ "=a" (ret)
+ : "1" (PCIBIOS_READ_CONFIG_BYTE),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ return (int) (ret & 0xff00) >> 8;
+}
+
+int pcibios_read_config_word (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned short *value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+
+ __asm__("lcall (%%esi)\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=c" (*value),
+ "=a" (ret)
+ : "1" (PCIBIOS_READ_CONFIG_WORD),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ return (int) (ret & 0xff00) >> 8;
+}
+
+int pcibios_read_config_dword (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned int *value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+
+ __asm__("lcall (%%esi)\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=c" (*value),
+ "=a" (ret)
+ : "1" (PCIBIOS_READ_CONFIG_DWORD),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ return (int) (ret & 0xff00) >> 8;
+}
+
+int pcibios_write_config_byte (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned char value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+
+ __asm__("lcall (%%esi)\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=a" (ret)
+ : "0" (PCIBIOS_WRITE_CONFIG_BYTE),
+ "c" (value),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ return (int) (ret & 0xff00) >> 8;
+}
+
+int pcibios_write_config_word (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned short value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+
+ __asm__("lcall (%%esi)\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=a" (ret)
+ : "0" (PCIBIOS_WRITE_CONFIG_WORD),
+ "c" (value),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ return (int) (ret & 0xff00) >> 8;
+}
+
+int pcibios_write_config_dword (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned int value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+
+ __asm__("lcall (%%esi)\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=a" (ret)
+ : "0" (PCIBIOS_WRITE_CONFIG_DWORD),
+ "c" (value),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ return (int) (ret & 0xff00) >> 8;
+}
+
+const char *pcibios_strerror (int error)
+{
+ static char buf[80];
+
+ switch (error) {
+ case PCIBIOS_SUCCESSFUL:
+ return "SUCCESSFUL";
+
+ case PCIBIOS_FUNC_NOT_SUPPORTED:
+ return "FUNC_NOT_SUPPORTED";
+
+ case PCIBIOS_BAD_VENDOR_ID:
+ return "SUCCESSFUL";
+
+ case PCIBIOS_DEVICE_NOT_FOUND:
+ return "DEVICE_NOT_FOUND";
+
+ case PCIBIOS_BAD_REGISTER_NUMBER:
+ return "BAD_REGISTER_NUMBER";
+
+ case PCIBIOS_SET_FAILED:
+ return "SET_FAILED";
+
+ case PCIBIOS_BUFFER_TOO_SMALL:
+ return "BUFFER_TOO_SMALL";
+
+ default:
+ sprintf (buf, "UNKNOWN RETURN 0x%x", error);
+ return buf;
+ }
+}
+
+
+unsigned long pcibios_fixup(unsigned long mem_start, unsigned long mem_end)
+{
+return mem_start;
+}
+
+
+#endif
+
+unsigned long pcibios_init(unsigned long memory_start, unsigned long memory_end)
+{
+ union bios32 *check;
+ unsigned char sum;
+ int i, length;
+
+ /*
+ * Follow the standard procedure for locating the BIOS32 Service
+ * directory by scanning the permissible address range from
+ * 0xe0000 through 0xfffff for a valid BIOS32 structure.
+ *
+ */
+
+ for (check = (union bios32 *) 0xe0000; check <= (union bios32 *) 0xffff0; ++check) {
+ if (check->fields.signature != BIOS32_SIGNATURE)
+ continue;
+ length = check->fields.length * 16;
+ if (!length)
+ continue;
+ sum = 0;
+ for (i = 0; i < length ; ++i)
+ sum += check->chars[i];
+ if (sum != 0)
+ continue;
+ if (check->fields.revision != 0) {
+ printk("pcibios_init : unsupported revision %d at 0x%p, mail drew@colorado.edu\n",
+ check->fields.revision, check);
+ continue;
+ }
+ printk ("pcibios_init : BIOS32 Service Directory structure at 0x%p\n", check);
+ if (!bios32_entry) {
+ if (check->fields.entry >= 0x100000) {
+ printk("pcibios_init: entry in high memory, unable to access\n");
+ } else {
+ bios32_indirect.address = bios32_entry = check->fields.entry;
+ printk ("pcibios_init : BIOS32 Service Directory entry at 0x%lx\n", bios32_entry);
+ }
+ } else {
+ printk ("pcibios_init : multiple entries, mail drew@colorado.edu\n");
+ /*
+ * Jeremy Fitzhardinge reports at least one PCI BIOS
+ * with two different service directories, and as both
+ * worked for him, we'll just mention the fact, and
+ * not actually disallow it..
+ */
+ }
+ }
+#ifdef CONFIG_PCI
+ if (bios32_entry) {
+ memory_start = check_pcibios (memory_start, memory_end);
+ }
+#endif
+ return memory_start;
+}
diff --git a/i386/i386at/gpl/linux/pci/pci.c b/i386/i386at/gpl/linux/pci/pci.c
new file mode 100644
index 00000000..03846d02
--- /dev/null
+++ b/i386/i386at/gpl/linux/pci/pci.c
@@ -0,0 +1,915 @@
+/*
+ * drivers/pci/pci.c
+ *
+ * PCI services that are built on top of the BIOS32 service.
+ *
+ * Copyright 1993, 1994, 1995 Drew Eckhardt, Frederic Potter,
+ * David Mosberger-Tang
+ */
+#include <linux/config.h>
+#include <linux/ptrace.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+
+#include <asm/page.h>
+
+struct pci_bus pci_root;
+struct pci_dev *pci_devices = 0;
+
+
+/*
+ * The bridge_id field is an offset of an item into the array
+ * BRIDGE_MAPPING_TYPE. 0xff indicates that the device is not a PCI
+ * bridge, or that we don't know for the moment how to configure it.
+ * I'm trying to do my best so that the kernel stays small. Different
+ * chipset can have same optimization structure. i486 and pentium
+ * chipsets from the same manufacturer usually have the same
+ * structure.
+ */
+#define DEVICE(vid,did,name) \
+ {PCI_VENDOR_ID_##vid, PCI_DEVICE_ID_##did, (name), 0xff}
+
+#define BRIDGE(vid,did,name,bridge) \
+ {PCI_VENDOR_ID_##vid, PCI_DEVICE_ID_##did, (name), (bridge)}
+
+/*
+ * Sorted in ascending order by vendor and device.
+ * Use binary search for lookup. If you add a device make sure
+ * it is sequential by both vendor and device id.
+ */
+struct pci_dev_info dev_info[] = {
+ DEVICE( COMPAQ, COMPAQ_1280, "QVision 1280/p"),
+ DEVICE( COMPAQ, COMPAQ_THUNDER, "ThunderLAN"),
+ DEVICE( NCR, NCR_53C810, "53c810"),
+ DEVICE( NCR, NCR_53C820, "53c820"),
+ DEVICE( NCR, NCR_53C825, "53c825"),
+ DEVICE( NCR, NCR_53C815, "53c815"),
+ DEVICE( ATI, ATI_68800, "68800AX"),
+ DEVICE( ATI, ATI_215CT222, "215CT222"),
+ DEVICE( ATI, ATI_210888CX, "210888CX"),
+ DEVICE( ATI, ATI_210888GX, "210888GX"),
+ DEVICE( VLSI, VLSI_82C592, "82C592-FC1"),
+ DEVICE( VLSI, VLSI_82C593, "82C593-FC1"),
+ DEVICE( VLSI, VLSI_82C594, "82C594-AFC2"),
+ DEVICE( VLSI, VLSI_82C597, "82C597-AFC2"),
+ DEVICE( ADL, ADL_2301, "2301"),
+ DEVICE( NS, NS_87410, "87410"),
+ DEVICE( TSENG, TSENG_W32P_2, "ET4000W32P"),
+ DEVICE( TSENG, TSENG_W32P_b, "ET4000W32P rev B"),
+ DEVICE( TSENG, TSENG_W32P_c, "ET4000W32P rev C"),
+ DEVICE( TSENG, TSENG_W32P_d, "ET4000W32P rev D"),
+ DEVICE( WEITEK, WEITEK_P9000, "P9000"),
+ DEVICE( WEITEK, WEITEK_P9100, "P9100"),
+ BRIDGE( DEC, DEC_BRD, "DC21050", 0x00),
+ DEVICE( DEC, DEC_TULIP, "DC21040"),
+ DEVICE( DEC, DEC_TGA, "DC21030"),
+ DEVICE( DEC, DEC_TULIP_FAST, "DC21140"),
+ DEVICE( DEC, DEC_FDDI, "DEFPA"),
+ DEVICE( DEC, DEC_TULIP_PLUS, "DC21041"),
+ DEVICE( CIRRUS, CIRRUS_5430, "GD 5430"),
+ DEVICE( CIRRUS, CIRRUS_5434_4, "GD 5434"),
+ DEVICE( CIRRUS, CIRRUS_5434_8, "GD 5434"),
+ DEVICE( CIRRUS, CIRRUS_5436, "GD 5436"),
+ DEVICE( CIRRUS, CIRRUS_6205, "GD 6205"),
+ DEVICE( CIRRUS, CIRRUS_6729, "CL 6729"),
+ DEVICE( CIRRUS, CIRRUS_7542, "CL 7542"),
+ DEVICE( CIRRUS, CIRRUS_7543, "CL 7543"),
+ DEVICE( IBM, IBM_82G2675, "82G2675"),
+ DEVICE( WD, WD_7197, "WD 7197"),
+ DEVICE( AMD, AMD_LANCE, "79C970"),
+ DEVICE( AMD, AMD_SCSI, "53C974"),
+ DEVICE( TRIDENT, TRIDENT_9420, "TG 9420"),
+ DEVICE( TRIDENT, TRIDENT_9440, "TG 9440"),
+ DEVICE( TRIDENT, TRIDENT_9660, "TG 9660"),
+ DEVICE( AI, AI_M1435, "M1435"),
+ DEVICE( MATROX, MATROX_MGA_2, "Atlas PX2085"),
+ DEVICE( MATROX, MATROX_MIL ,"Millenium"),
+ DEVICE( MATROX, MATROX_MGA_IMP, "MGA Impression"),
+ DEVICE( CT, CT_65545, "65545"),
+ DEVICE( FD, FD_36C70, "TMC-18C30"),
+ DEVICE( SI, SI_6201, "6201"),
+ DEVICE( SI, SI_6202, "6202"),
+ DEVICE( SI, SI_503, "85C503"),
+ DEVICE( SI, SI_501, "85C501"),
+ DEVICE( SI, SI_496, "85C496"),
+ DEVICE( SI, SI_601, "85C601"),
+ DEVICE( SI, SI_5511, "85C5511"),
+ DEVICE( SI, SI_5513, "85C5513"),
+ DEVICE( HP, HP_J2585A, "J2585A"),
+ DEVICE( PCTECH, PCTECH_RZ1000, "RZ1000 (buggy)"),
+ DEVICE( DPT, DPT, "SmartCache/Raid"),
+ DEVICE( OPTI, OPTI_92C178, "92C178"),
+ DEVICE( OPTI, OPTI_82C557, "82C557"),
+ DEVICE( OPTI, OPTI_82C558, "82C558"),
+ DEVICE( OPTI, OPTI_82C621, "82C621"),
+ DEVICE( OPTI, OPTI_82C822, "82C822"),
+ DEVICE( SGS, SGS_2000, "STG 2000X"),
+ DEVICE( SGS, SGS_1764, "STG 1764X"),
+ DEVICE( BUSLOGIC, BUSLOGIC_946C_2,"BT-946C"),
+ DEVICE( BUSLOGIC, BUSLOGIC_946C, "BT-946C"),
+ DEVICE( BUSLOGIC, BUSLOGIC_930, "BT-930"),
+ DEVICE( OAK, OAK_OTI107, "OTI107"),
+ DEVICE( PROMISE, PROMISE_5300, "DC5030"),
+ DEVICE( N9, N9_I128, "Imagine 128"),
+ DEVICE( N9, N9_I128_2, "Imagine 128v2"),
+ DEVICE( UMC, UMC_UM8673F, "UM8673F"),
+ BRIDGE( UMC, UMC_UM8891A, "UM8891A", 0x01),
+ DEVICE( UMC, UMC_UM8886BF, "UM8886BF"),
+ DEVICE( UMC, UMC_UM8886A, "UM8886A"),
+ BRIDGE( UMC, UMC_UM8881F, "UM8881F", 0x02),
+ DEVICE( UMC, UMC_UM8886F, "UM8886F"),
+ DEVICE( UMC, UMC_UM9017F, "UM9017F"),
+ DEVICE( UMC, UMC_UM8886N, "UM8886N"),
+ DEVICE( UMC, UMC_UM8891N, "UM8891N"),
+ DEVICE( X, X_AGX016, "ITT AGX016"),
+ DEVICE( NEXGEN, NEXGEN_82C501, "82C501"),
+ DEVICE( QLOGIC, QLOGIC_ISP1020, "ISP1020"),
+ DEVICE( QLOGIC, QLOGIC_ISP1022, "ISP1022"),
+ DEVICE( LEADTEK, LEADTEK_805, "S3 805"),
+ DEVICE( CONTAQ, CONTAQ_82C599, "82C599"),
+ DEVICE( CMD, CMD_640, "640 (buggy)"),
+ DEVICE( CMD, CMD_646, "646"),
+ DEVICE( VISION, VISION_QD8500, "QD-8500"),
+ DEVICE( VISION, VISION_QD8580, "QD-8580"),
+ DEVICE( SIERRA, SIERRA_STB, "STB Horizon 64"),
+ DEVICE( ACC, ACC_2056, "2056"),
+ DEVICE( WINBOND, WINBOND_83769, "W83769F"),
+ DEVICE( WINBOND, WINBOND_82C105, "SL82C105"),
+ DEVICE( 3COM, 3COM_3C590, "3C590 10bT"),
+ DEVICE( 3COM, 3COM_3C595TX, "3C595 100bTX"),
+ DEVICE( 3COM, 3COM_3C595T4, "3C595 100bT4"),
+ DEVICE( 3COM, 3COM_3C595MII, "3C595 100b-MII"),
+ DEVICE( AL, AL_M1445, "M1445"),
+ DEVICE( AL, AL_M1449, "M1449"),
+ DEVICE( AL, AL_M1451, "M1451"),
+ DEVICE( AL, AL_M1461, "M1461"),
+ DEVICE( AL, AL_M1489, "M1489"),
+ DEVICE( AL, AL_M1511, "M1511"),
+ DEVICE( AL, AL_M1513, "M1513"),
+ DEVICE( AL, AL_M4803, "M4803"),
+ DEVICE( ASP, ASP_ABP940, "ABP940"),
+ DEVICE( IMS, IMS_8849, "8849"),
+ DEVICE( TEKRAM2, TEKRAM2_690c, "DC690c"),
+ DEVICE( AMCC, AMCC_MYRINET, "Myrinet PCI (M2-PCI-32)"),
+ DEVICE( INTERG, INTERG_1680, "IGA-1680"),
+ DEVICE( REALTEK, REALTEK_8029, "8029"),
+ DEVICE( INIT, INIT_320P, "320 P"),
+ DEVICE( VIA, VIA_82C505, "VT 82C505"),
+ DEVICE( VIA, VIA_82C561, "VT 82C561"),
+ DEVICE( VIA, VIA_82C576, "VT 82C576 3V"),
+ DEVICE( VIA, VIA_82C416, "VT 82C416MV"),
+ DEVICE( VORTEX, VORTEX_GDT, "GDT 6000b"),
+ DEVICE( EF, EF_ATM_FPGA, "155P-MF1 (FPGA)"),
+ DEVICE( EF, EF_ATM_ASIC, "155P-MF1 (ASIC)"),
+ DEVICE( IMAGINGTECH, IMAGINGTECH_ICPCI, "MVC IC-PCI"),
+ DEVICE( FORE, FORE_PCA200PC, "PCA-200PC"),
+ DEVICE( PLX, PLX_9060, "PCI9060 i960 bridge"),
+ DEVICE( ALLIANCE, ALLIANCE_PROMOTIO, "Promotion-6410"),
+ DEVICE( ALLIANCE, ALLIANCE_PROVIDEO, "Provideo"),
+ DEVICE( MUTECH, MUTECH_MV1000, "MV-1000"),
+ DEVICE( ZEITNET, ZEITNET_1221, "1221"),
+ DEVICE( ZEITNET, ZEITNET_1225, "1225"),
+ DEVICE( SPECIALIX, SPECIALIX_XIO, "XIO/SIO host"),
+ DEVICE( SPECIALIX, SPECIALIX_RIO, "RIO host"),
+ DEVICE( RP, RP8OCTA, "RocketPort 8 Oct"),
+ DEVICE( RP, RP8INTF, "RocketPort 8 Intf"),
+ DEVICE( RP, RP16INTF, "RocketPort 16 Intf"),
+ DEVICE( RP, RP32INTF, "RocketPort 32 Intf"),
+ DEVICE( CYCLADES, CYCLADES_Y, "Cyclome-Y"),
+ DEVICE( SYMPHONY, SYMPHONY_101, "82C101"),
+ DEVICE( TEKRAM, TEKRAM_DC290, "DC-290"),
+ DEVICE( AVANCE, AVANCE_2302, "ALG-2302"),
+ DEVICE( S3, S3_811, "Trio32/Trio64"),
+ DEVICE( S3, S3_868, "Vision 868"),
+ DEVICE( S3, S3_928, "Vision 928-P"),
+ DEVICE( S3, S3_864_1, "Vision 864-P"),
+ DEVICE( S3, S3_864_2, "Vision 864-P"),
+ DEVICE( S3, S3_964_1, "Vision 964-P"),
+ DEVICE( S3, S3_964_2, "Vision 964-P"),
+ DEVICE( S3, S3_968, "Vision 968"),
+ DEVICE( INTEL, INTEL_82375, "82375EB"),
+ BRIDGE( INTEL, INTEL_82424, "82424ZX Saturn", 0x00),
+ DEVICE( INTEL, INTEL_82378, "82378IB"),
+ DEVICE( INTEL, INTEL_82430, "82430ZX Aries"),
+ BRIDGE( INTEL, INTEL_82434, "82434LX Mercury/Neptune", 0x00),
+ DEVICE( INTEL, INTEL_7116, "SAA7116"),
+ DEVICE( INTEL, INTEL_82596, "82596"),
+ DEVICE( INTEL, INTEL_82865, "82865"),
+ DEVICE( INTEL, INTEL_82557, "82557"),
+ DEVICE( INTEL, INTEL_82437, "82437"),
+ DEVICE( INTEL, INTEL_82371_0, "82371 Triton PIIX"),
+ DEVICE( INTEL, INTEL_82371_1, "82371 Triton PIIX"),
+ DEVICE( INTEL, INTEL_P6, "Orion P6"),
+ DEVICE( ADAPTEC, ADAPTEC_7850, "AIC-7850"),
+ DEVICE( ADAPTEC, ADAPTEC_7870, "AIC-7870"),
+ DEVICE( ADAPTEC, ADAPTEC_7871, "AIC-7871"),
+ DEVICE( ADAPTEC, ADAPTEC_7872, "AIC-7872"),
+ DEVICE( ADAPTEC, ADAPTEC_7873, "AIC-7873"),
+ DEVICE( ADAPTEC, ADAPTEC_7874, "AIC-7874"),
+ DEVICE( ADAPTEC, ADAPTEC_7880, "AIC-7880U"),
+ DEVICE( ADAPTEC, ADAPTEC_7881, "AIC-7881U"),
+ DEVICE( ADAPTEC, ADAPTEC_7882, "AIC-7882U"),
+ DEVICE( ADAPTEC, ADAPTEC_7883, "AIC-7883U"),
+ DEVICE( ADAPTEC, ADAPTEC_7884, "AIC-7884U"),
+ DEVICE( ATRONICS, ATRONICS_2015, "IDE-2015PL"),
+ DEVICE( HER, HER_STING, "Stingray"),
+ DEVICE( HER, HER_STINGARK, "Stingray ARK 2000PV")
+};
+
+
+#ifdef CONFIG_PCI_OPTIMIZE
+
+/*
+ * An item of this structure has the following meaning:
+ * for each optimization, the register address, the mask
+ * and value to write to turn it on.
+ * There are 5 optimizations for the moment:
+ * Cache L2 write back best than write through
+ * Posted Write for CPU to PCI enable
+ * Posted Write for CPU to MEMORY enable
+ * Posted Write for PCI to MEMORY enable
+ * PCI Burst enable
+ *
+ * Half of the bios I've meet don't allow you to turn that on, and you
+ * can gain more than 15% on graphic accesses using those
+ * optimizations...
+ */
+struct optimization_type {
+ const char *type;
+ const char *off;
+ const char *on;
+} bridge_optimization[] = {
+ {"Cache L2", "write through", "write back"},
+ {"CPU-PCI posted write", "off", "on"},
+ {"CPU-Memory posted write", "off", "on"},
+ {"PCI-Memory posted write", "off", "on"},
+ {"PCI burst", "off", "on"}
+};
+
+#define NUM_OPTIMIZATIONS \
+ (sizeof(bridge_optimization) / sizeof(bridge_optimization[0]))
+
+struct bridge_mapping_type {
+ unsigned char addr; /* config space address */
+ unsigned char mask;
+ unsigned char value;
+} bridge_mapping[] = {
+ /*
+ * Intel Neptune/Mercury/Saturn:
+ * If the internal cache is write back,
+ * the L2 cache must be write through!
+ * I've to check out how to control that
+ * for the moment, we won't touch the cache
+ */
+ {0x0 ,0x02 ,0x02 },
+ {0x53 ,0x02 ,0x02 },
+ {0x53 ,0x01 ,0x01 },
+ {0x54 ,0x01 ,0x01 },
+ {0x54 ,0x02 ,0x02 },
+
+ /*
+ * UMC 8891A Pentium chipset:
+ * Why did you think UMC was cheaper ??
+ */
+ {0x50 ,0x10 ,0x00 },
+ {0x51 ,0x40 ,0x40 },
+ {0x0 ,0x0 ,0x0 },
+ {0x0 ,0x0 ,0x0 },
+ {0x0 ,0x0 ,0x0 },
+
+ /*
+ * UMC UM8881F
+ * This is a dummy entry for my tests.
+ * I have this chipset and no docs....
+ */
+ {0x0 ,0x1 ,0x1 },
+ {0x0 ,0x2 ,0x0 },
+ {0x0 ,0x0 ,0x0 },
+ {0x0 ,0x0 ,0x0 },
+ {0x0 ,0x0 ,0x0 }
+};
+
+#endif /* CONFIG_PCI_OPTIMIZE */
+
+
+/*
+ * device_info[] is sorted so we can use binary search
+ */
+struct pci_dev_info *pci_lookup_dev(unsigned int vendor, unsigned int dev)
+{
+ int min = 0,
+ max = sizeof(dev_info)/sizeof(dev_info[0]) - 1;
+
+ for ( ; ; )
+ {
+ int i = (min + max) >> 1;
+ long order;
+
+ order = dev_info[i].vendor - (long) vendor;
+ if (!order)
+ order = dev_info[i].device - (long) dev;
+
+ if (order < 0)
+ {
+ min = i + 1;
+ if ( min > max )
+ return 0;
+ continue;
+ }
+
+ if (order > 0)
+ {
+ max = i - 1;
+ if ( min > max )
+ return 0;
+ continue;
+ }
+
+ return & dev_info[ i ];
+ }
+}
+
+const char *pci_strclass (unsigned int class)
+{
+ switch (class >> 8) {
+ case PCI_CLASS_NOT_DEFINED: return "Non-VGA device";
+ case PCI_CLASS_NOT_DEFINED_VGA: return "VGA compatible device";
+
+ case PCI_CLASS_STORAGE_SCSI: return "SCSI storage controller";
+ case PCI_CLASS_STORAGE_IDE: return "IDE interface";
+ case PCI_CLASS_STORAGE_FLOPPY: return "Floppy disk controller";
+ case PCI_CLASS_STORAGE_IPI: return "IPI bus controller";
+ case PCI_CLASS_STORAGE_RAID: return "RAID bus controller";
+ case PCI_CLASS_STORAGE_OTHER: return "Unknown mass storage controller";
+
+ case PCI_CLASS_NETWORK_ETHERNET: return "Ethernet controller";
+ case PCI_CLASS_NETWORK_TOKEN_RING: return "Token ring network controller";
+ case PCI_CLASS_NETWORK_FDDI: return "FDDI network controller";
+ case PCI_CLASS_NETWORK_ATM: return "ATM network controller";
+ case PCI_CLASS_NETWORK_OTHER: return "Network controller";
+
+ case PCI_CLASS_DISPLAY_VGA: return "VGA compatible controller";
+ case PCI_CLASS_DISPLAY_XGA: return "XGA compatible controller";
+ case PCI_CLASS_DISPLAY_OTHER: return "Display controller";
+
+ case PCI_CLASS_MULTIMEDIA_VIDEO: return "Multimedia video controller";
+ case PCI_CLASS_MULTIMEDIA_AUDIO: return "Multimedia audio controller";
+ case PCI_CLASS_MULTIMEDIA_OTHER: return "Multimedia controller";
+
+ case PCI_CLASS_MEMORY_RAM: return "RAM memory";
+ case PCI_CLASS_MEMORY_FLASH: return "FLASH memory";
+ case PCI_CLASS_MEMORY_OTHER: return "Memory";
+
+ case PCI_CLASS_BRIDGE_HOST: return "Host bridge";
+ case PCI_CLASS_BRIDGE_ISA: return "ISA bridge";
+ case PCI_CLASS_BRIDGE_EISA: return "EISA bridge";
+ case PCI_CLASS_BRIDGE_MC: return "MicroChannel bridge";
+ case PCI_CLASS_BRIDGE_PCI: return "PCI bridge";
+ case PCI_CLASS_BRIDGE_PCMCIA: return "PCMCIA bridge";
+ case PCI_CLASS_BRIDGE_NUBUS: return "NuBus bridge";
+ case PCI_CLASS_BRIDGE_CARDBUS: return "CardBus bridge";
+ case PCI_CLASS_BRIDGE_OTHER: return "Bridge";
+
+ case PCI_CLASS_COMMUNICATION_SERIAL: return "Serial controller";
+ case PCI_CLASS_COMMUNICATION_PARALLEL: return "Parallel controller";
+ case PCI_CLASS_COMMUNICATION_OTHER: return "Communication controller";
+
+ case PCI_CLASS_SYSTEM_PIC: return "PIC";
+ case PCI_CLASS_SYSTEM_DMA: return "DMA controller";
+ case PCI_CLASS_SYSTEM_TIMER: return "Timer";
+ case PCI_CLASS_SYSTEM_RTC: return "RTC";
+ case PCI_CLASS_SYSTEM_OTHER: return "System peripheral";
+
+ case PCI_CLASS_INPUT_KEYBOARD: return "Keyboard controller";
+ case PCI_CLASS_INPUT_PEN: return "Digitizer Pen";
+ case PCI_CLASS_INPUT_MOUSE: return "Mouse controller";
+ case PCI_CLASS_INPUT_OTHER: return "Input device controller";
+
+ case PCI_CLASS_DOCKING_GENERIC: return "Generic Docking Station";
+ case PCI_CLASS_DOCKING_OTHER: return "Docking Station";
+
+ case PCI_CLASS_PROCESSOR_386: return "386";
+ case PCI_CLASS_PROCESSOR_486: return "486";
+ case PCI_CLASS_PROCESSOR_PENTIUM: return "Pentium";
+ case PCI_CLASS_PROCESSOR_ALPHA: return "Alpha";
+ case PCI_CLASS_PROCESSOR_POWERPC: return "Power PC";
+ case PCI_CLASS_PROCESSOR_CO: return "Co-processor";
+
+ case PCI_CLASS_SERIAL_FIREWIRE: return "FireWire (IEEE 1394)";
+ case PCI_CLASS_SERIAL_ACCESS: return "ACCESS Bus";
+ case PCI_CLASS_SERIAL_SSA: return "SSA";
+ case PCI_CLASS_SERIAL_FIBER: return "Fiber Channel";
+
+ default: return "Unknown class";
+ }
+}
+
+
+const char *pci_strvendor(unsigned int vendor)
+{
+ switch (vendor) {
+ case PCI_VENDOR_ID_COMPAQ: return "Compaq";
+ case PCI_VENDOR_ID_NCR: return "NCR";
+ case PCI_VENDOR_ID_ATI: return "ATI";
+ case PCI_VENDOR_ID_VLSI: return "VLSI";
+ case PCI_VENDOR_ID_ADL: return "Advance Logic";
+ case PCI_VENDOR_ID_NS: return "NS";
+ case PCI_VENDOR_ID_TSENG: return "Tseng'Lab";
+ case PCI_VENDOR_ID_WEITEK: return "Weitek";
+ case PCI_VENDOR_ID_DEC: return "DEC";
+ case PCI_VENDOR_ID_CIRRUS: return "Cirrus Logic";
+ case PCI_VENDOR_ID_IBM: return "IBM";
+ case PCI_VENDOR_ID_WD: return "Western Digital";
+ case PCI_VENDOR_ID_AMD: return "AMD";
+ case PCI_VENDOR_ID_TRIDENT: return "Trident";
+ case PCI_VENDOR_ID_AI: return "Acer Incorporated";
+ case PCI_VENDOR_ID_MATROX: return "Matrox";
+ case PCI_VENDOR_ID_CT: return "Chips & Technologies";
+ case PCI_VENDOR_ID_FD: return "Future Domain";
+ case PCI_VENDOR_ID_SI: return "Silicon Integrated Systems";
+ case PCI_VENDOR_ID_HP: return "Hewlett Packard";
+ case PCI_VENDOR_ID_PCTECH: return "PCTECH";
+ case PCI_VENDOR_ID_DPT: return "DPT";
+ case PCI_VENDOR_ID_OPTI: return "OPTI";
+ case PCI_VENDOR_ID_SGS: return "SGS Thomson";
+ case PCI_VENDOR_ID_BUSLOGIC: return "BusLogic";
+ case PCI_VENDOR_ID_OAK: return "OAK";
+ case PCI_VENDOR_ID_PROMISE: return "Promise Technology";
+ case PCI_VENDOR_ID_N9: return "Number Nine";
+ case PCI_VENDOR_ID_UMC: return "UMC";
+ case PCI_VENDOR_ID_X: return "X TECHNOLOGY";
+ case PCI_VENDOR_ID_NEXGEN: return "Nexgen";
+ case PCI_VENDOR_ID_QLOGIC: return "Q Logic";
+ case PCI_VENDOR_ID_LEADTEK: return "Leadtek Research";
+ case PCI_VENDOR_ID_CONTAQ: return "Contaq";
+ case PCI_VENDOR_ID_FOREX: return "Forex";
+ case PCI_VENDOR_ID_OLICOM: return "Olicom";
+ case PCI_VENDOR_ID_CMD: return "CMD";
+ case PCI_VENDOR_ID_VISION: return "Vision";
+ case PCI_VENDOR_ID_SIERRA: return "Sierra";
+ case PCI_VENDOR_ID_ACC: return "ACC MICROELECTRONICS";
+ case PCI_VENDOR_ID_WINBOND: return "Winbond";
+ case PCI_VENDOR_ID_3COM: return "3Com";
+ case PCI_VENDOR_ID_AL: return "Acer Labs";
+ case PCI_VENDOR_ID_ASP: return "Advanced System Products";
+ case PCI_VENDOR_ID_IMS: return "IMS";
+ case PCI_VENDOR_ID_TEKRAM2: return "Tekram";
+ case PCI_VENDOR_ID_AMCC: return "AMCC";
+ case PCI_VENDOR_ID_INTERG: return "Intergraphics";
+ case PCI_VENDOR_ID_REALTEK: return "Realtek";
+ case PCI_VENDOR_ID_INIT: return "Initio Corp";
+ case PCI_VENDOR_ID_VIA: return "VIA Technologies";
+ case PCI_VENDOR_ID_VORTEX: return "VORTEX";
+ case PCI_VENDOR_ID_EF: return "Efficient Networks";
+ case PCI_VENDOR_ID_FORE: return "Fore Systems";
+ case PCI_VENDOR_ID_IMAGINGTECH: return "Imaging Technology";
+ case PCI_VENDOR_ID_PLX: return "PLX";
+ case PCI_VENDOR_ID_ALLIANCE: return "Alliance";
+ case PCI_VENDOR_ID_MUTECH: return "Mutech";
+ case PCI_VENDOR_ID_ZEITNET: return "ZeitNet";
+ case PCI_VENDOR_ID_SPECIALIX: return "Specialix";
+ case PCI_VENDOR_ID_RP: return "Comtrol";
+ case PCI_VENDOR_ID_CYCLADES: return "Cyclades";
+ case PCI_VENDOR_ID_SYMPHONY: return "Symphony";
+ case PCI_VENDOR_ID_TEKRAM: return "Tekram";
+ case PCI_VENDOR_ID_AVANCE: return "Avance";
+ case PCI_VENDOR_ID_S3: return "S3 Inc.";
+ case PCI_VENDOR_ID_INTEL: return "Intel";
+ case PCI_VENDOR_ID_ADAPTEC: return "Adaptec";
+ case PCI_VENDOR_ID_ATRONICS: return "Atronics";
+ case PCI_VENDOR_ID_HER: return "Hercules";
+ default: return "Unknown vendor";
+ }
+}
+
+
+const char *pci_strdev(unsigned int vendor, unsigned int device)
+{
+ struct pci_dev_info *info;
+
+ info = pci_lookup_dev(vendor, device);
+ return info ? info->name : "Unknown device";
+}
+
+
+
+/*
+ * Turn on/off PCI bridge optimization. This should allow benchmarking.
+ */
+static void burst_bridge(unsigned char bus, unsigned char devfn,
+ unsigned char pos, int turn_on)
+{
+#ifdef CONFIG_PCI_OPTIMIZE
+ struct bridge_mapping_type *bmap;
+ unsigned char val;
+ int i;
+
+ pos *= NUM_OPTIMIZATIONS;
+ printk("PCI bridge optimization.\n");
+ for (i = 0; i < NUM_OPTIMIZATIONS; i++) {
+ printk(" %s: ", bridge_optimization[i].type);
+ bmap = &bridge_mapping[pos + i];
+ if (!bmap->addr) {
+ printk("Not supported.");
+ } else {
+ pcibios_read_config_byte(bus, devfn, bmap->addr, &val);
+ if ((val & bmap->mask) == bmap->value) {
+ printk("%s.", bridge_optimization[i].on);
+ if (!turn_on) {
+ pcibios_write_config_byte(bus, devfn,
+ bmap->addr,
+ (val | bmap->mask)
+ - bmap->value);
+ printk("Changed! Now %s.", bridge_optimization[i].off);
+ }
+ } else {
+ printk("%s.", bridge_optimization[i].off);
+ if (turn_on) {
+ pcibios_write_config_byte(bus, devfn,
+ bmap->addr,
+ (val & (0xff - bmap->mask))
+ + bmap->value);
+ printk("Changed! Now %s.", bridge_optimization[i].on);
+ }
+ }
+ }
+ printk("\n");
+ }
+#endif /* CONFIG_PCI_OPTIMIZE */
+}
+
+
+/*
+ * Convert some of the configuration space registers of the device at
+ * address (bus,devfn) into a string (possibly several lines each).
+ * The configuration string is stored starting at buf[len]. If the
+ * string would exceed the size of the buffer (SIZE), 0 is returned.
+ */
+static int sprint_dev_config(struct pci_dev *dev, char *buf, int size)
+{
+ unsigned long base;
+ unsigned int l, class_rev, bus, devfn;
+ unsigned short vendor, device, status;
+ unsigned char bist, latency, min_gnt, max_lat;
+ int reg, len = 0;
+ const char *str;
+
+ bus = dev->bus->number;
+ devfn = dev->devfn;
+
+ pcibios_read_config_dword(bus, devfn, PCI_CLASS_REVISION, &class_rev);
+ pcibios_read_config_word (bus, devfn, PCI_VENDOR_ID, &vendor);
+ pcibios_read_config_word (bus, devfn, PCI_DEVICE_ID, &device);
+ pcibios_read_config_word (bus, devfn, PCI_STATUS, &status);
+ pcibios_read_config_byte (bus, devfn, PCI_BIST, &bist);
+ pcibios_read_config_byte (bus, devfn, PCI_LATENCY_TIMER, &latency);
+ pcibios_read_config_byte (bus, devfn, PCI_MIN_GNT, &min_gnt);
+ pcibios_read_config_byte (bus, devfn, PCI_MAX_LAT, &max_lat);
+ if (len + 80 > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, " Bus %2d, device %3d, function %2d:\n",
+ bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+ if (len + 80 > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, " %s: %s %s (rev %d).\n ",
+ pci_strclass(class_rev >> 8), pci_strvendor(vendor),
+ pci_strdev(vendor, device), class_rev & 0xff);
+
+ if (!pci_lookup_dev(vendor, device)) {
+ len += sprintf(buf + len,
+ "Vendor id=%x. Device id=%x.\n ",
+ vendor, device);
+ }
+
+ str = 0; /* to keep gcc shut... */
+ switch (status & PCI_STATUS_DEVSEL_MASK) {
+ case PCI_STATUS_DEVSEL_FAST: str = "Fast devsel. "; break;
+ case PCI_STATUS_DEVSEL_MEDIUM: str = "Medium devsel. "; break;
+ case PCI_STATUS_DEVSEL_SLOW: str = "Slow devsel. "; break;
+ }
+ if (len + strlen(str) > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, str);
+
+ if (status & PCI_STATUS_FAST_BACK) {
+# define fast_b2b_capable "Fast back-to-back capable. "
+ if (len + strlen(fast_b2b_capable) > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, fast_b2b_capable);
+# undef fast_b2b_capable
+ }
+
+ if (bist & PCI_BIST_CAPABLE) {
+# define BIST_capable "BIST capable. "
+ if (len + strlen(BIST_capable) > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, BIST_capable);
+# undef BIST_capable
+ }
+
+ if (dev->irq) {
+ if (len + 40 > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, "IRQ %d. ", dev->irq);
+ }
+
+ if (dev->master) {
+ if (len + 80 > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, "Master Capable. ");
+ if (latency)
+ len += sprintf(buf + len, "Latency=%d. ", latency);
+ else
+ len += sprintf(buf + len, "No bursts. ");
+ if (min_gnt)
+ len += sprintf(buf + len, "Min Gnt=%d.", min_gnt);
+ if (max_lat)
+ len += sprintf(buf + len, "Max Lat=%d.", max_lat);
+ }
+
+ for (reg = PCI_BASE_ADDRESS_0; reg <= PCI_BASE_ADDRESS_5; reg += 4) {
+ if (len + 40 > size) {
+ return -1;
+ }
+ pcibios_read_config_dword(bus, devfn, reg, &l);
+ base = l;
+ if (!base) {
+ continue;
+ }
+
+ if (base & PCI_BASE_ADDRESS_SPACE_IO) {
+ len += sprintf(buf + len,
+ "\n I/O at 0x%lx.",
+ base & PCI_BASE_ADDRESS_IO_MASK);
+ } else {
+ const char *pref, *type = "unknown";
+
+ if (base & PCI_BASE_ADDRESS_MEM_PREFETCH) {
+ pref = "P";
+ } else {
+ pref = "Non-p";
+ }
+ switch (base & PCI_BASE_ADDRESS_MEM_TYPE_MASK) {
+ case PCI_BASE_ADDRESS_MEM_TYPE_32:
+ type = "32 bit"; break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+ type = "20 bit"; break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_64:
+ type = "64 bit";
+ /* read top 32 bit address of base addr: */
+ reg += 4;
+ pcibios_read_config_dword(bus, devfn, reg, &l);
+ base |= ((u64) l) << 32;
+ break;
+ }
+ len += sprintf(buf + len,
+ "\n %srefetchable %s memory at "
+ "0x%lx.", pref, type,
+ base & PCI_BASE_ADDRESS_MEM_MASK);
+ }
+ }
+
+ len += sprintf(buf + len, "\n");
+ return len;
+}
+
+
+/*
+ * Return list of PCI devices as a character string for /proc/pci.
+ * BUF is a buffer that is PAGE_SIZE bytes long.
+ */
+int get_pci_list(char *buf)
+{
+ int nprinted, len, size;
+ struct pci_dev *dev;
+# define MSG "\nwarning: page-size limit reached!\n"
+
+ /* reserve same for truncation warning message: */
+ size = PAGE_SIZE - (strlen(MSG) + 1);
+ len = sprintf(buf, "PCI devices found:\n");
+
+ for (dev = pci_devices; dev; dev = dev->next) {
+ nprinted = sprint_dev_config(dev, buf + len, size - len);
+ if (nprinted < 0) {
+ return len + sprintf(buf + len, MSG);
+ }
+ len += nprinted;
+ }
+ return len;
+}
+
+
+/*
+ * pci_malloc() returns initialized memory of size SIZE. Can be
+ * used only while pci_init() is active.
+ */
+static void *pci_malloc(long size, unsigned long *mem_startp)
+{
+ void *mem;
+
+#ifdef DEBUG
+ printk("...pci_malloc(size=%ld,mem=%p)", size, *mem_startp);
+#endif
+ mem = (void*) *mem_startp;
+ *mem_startp += (size + sizeof(void*) - 1) & ~(sizeof(void*) - 1);
+ memset(mem, 0, size);
+ return mem;
+}
+
+
+static unsigned int scan_bus(struct pci_bus *bus, unsigned long *mem_startp)
+{
+ unsigned int devfn, l, max;
+ unsigned char cmd, tmp, hdr_type = 0;
+ struct pci_dev_info *info;
+ struct pci_dev *dev;
+ struct pci_bus *child;
+
+#ifdef DEBUG
+ printk("...scan_bus(busno=%d,mem=%p)\n", bus->number, *mem_startp);
+#endif
+
+ max = bus->secondary;
+ for (devfn = 0; devfn < 0xff; ++devfn) {
+ if (PCI_FUNC(devfn) == 0) {
+ pcibios_read_config_byte(bus->number, devfn,
+ PCI_HEADER_TYPE, &hdr_type);
+ } else if (!(hdr_type & 0x80)) {
+ /* not a multi-function device */
+ continue;
+ }
+
+ pcibios_read_config_dword(bus->number, devfn, PCI_VENDOR_ID,
+ &l);
+ /* some broken boards return 0 if a slot is empty: */
+ if (l == 0xffffffff || l == 0x00000000) {
+ hdr_type = 0;
+ continue;
+ }
+
+ dev = pci_malloc(sizeof(*dev), mem_startp);
+ dev->bus = bus;
+ /*
+ * Put it into the simple chain of devices on this
+ * bus. It is used to find devices once everything is
+ * set up.
+ */
+ dev->next = pci_devices;
+ pci_devices = dev;
+
+ dev->devfn = devfn;
+ dev->vendor = l & 0xffff;
+ dev->device = (l >> 16) & 0xffff;
+
+ /*
+ * Check to see if we know about this device and report
+ * a message at boot time. This is the only way to
+ * learn about new hardware...
+ */
+ info = pci_lookup_dev(dev->vendor, dev->device);
+ if (!info) {
+ printk("Warning : Unknown PCI device (%x:%x). Please read include/linux/pci.h \n",
+ dev->vendor, dev->device);
+ } else {
+ /* Some BIOS' are lazy. Let's do their job: */
+ if (info->bridge_type != 0xff) {
+ burst_bridge(bus->number, devfn,
+ info->bridge_type, 1);
+ }
+ }
+
+ /* non-destructively determine if device can be a master: */
+ pcibios_read_config_byte(bus->number, devfn, PCI_COMMAND,
+ &cmd);
+ pcibios_write_config_byte(bus->number, devfn, PCI_COMMAND,
+ cmd | PCI_COMMAND_MASTER);
+ pcibios_read_config_byte(bus->number, devfn, PCI_COMMAND,
+ &tmp);
+ dev->master = ((tmp & PCI_COMMAND_MASTER) != 0);
+ pcibios_write_config_byte(bus->number, devfn, PCI_COMMAND,
+ cmd);
+
+ /* read irq level (may be changed during pcibios_fixup()): */
+ pcibios_read_config_byte(bus->number, devfn,
+ PCI_INTERRUPT_LINE, &dev->irq);
+
+ /* check to see if this device is a PCI-PCI bridge: */
+ pcibios_read_config_dword(bus->number, devfn,
+ PCI_CLASS_REVISION, &l);
+ l = l >> 8; /* upper 3 bytes */
+ dev->class = l;
+ /*
+ * Now insert it into the list of devices held
+ * by the parent bus.
+ */
+ dev->sibling = bus->devices;
+ bus->devices = dev;
+
+ if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI) {
+ unsigned int buses;
+ unsigned short cr;
+
+ /*
+ * Insert it into the tree of buses.
+ */
+ child = pci_malloc(sizeof(*child), mem_startp);
+ child->next = bus->children;
+ bus->children = child;
+ child->self = dev;
+ child->parent = bus;
+
+ /*
+ * Set up the primary, secondary and subordinate
+ * bus numbers.
+ */
+ child->number = child->secondary = ++max;
+ child->primary = bus->secondary;
+ child->subordinate = 0xff;
+ /*
+ * Clear all status bits and turn off memory,
+ * I/O and master enables.
+ */
+ pcibios_read_config_word(bus->number, devfn,
+ PCI_COMMAND, &cr);
+ pcibios_write_config_word(bus->number, devfn,
+ PCI_COMMAND, 0x0000);
+ pcibios_write_config_word(bus->number, devfn,
+ PCI_STATUS, 0xffff);
+ /*
+ * Configure the bus numbers for this bridge:
+ */
+ pcibios_read_config_dword(bus->number, devfn, 0x18,
+ &buses);
+ buses &= 0xff000000;
+ buses |= (((unsigned int)(child->primary) << 0) |
+ ((unsigned int)(child->secondary) << 8) |
+ ((unsigned int)(child->subordinate) << 16));
+ pcibios_write_config_dword(bus->number, devfn, 0x18,
+ buses);
+ /*
+ * Now we can scan all subordinate buses:
+ */
+ max = scan_bus(child, mem_startp);
+ /*
+ * Set the subordinate bus number to its real
+ * value:
+ */
+ child->subordinate = max;
+ buses = (buses & 0xff00ffff)
+ | ((unsigned int)(child->subordinate) << 16);
+ pcibios_write_config_dword(bus->number, devfn, 0x18,
+ buses);
+ pcibios_write_config_word(bus->number, devfn,
+ PCI_COMMAND, cr);
+ }
+ }
+ /*
+ * We've scanned the bus and so we know all about what's on
+ * the other side of any bridges that may be on this bus plus
+ * any devices.
+ *
+ * Return how far we've got finding sub-buses.
+ */
+ return max;
+}
+
+
+unsigned long pci_init (unsigned long mem_start, unsigned long mem_end)
+{
+ mem_start = pcibios_init(mem_start, mem_end);
+
+ if (!pcibios_present()) {
+ printk("pci_init: no BIOS32 detected\n");
+ return mem_start;
+ }
+
+ printk("Probing PCI hardware.\n");
+
+ memset(&pci_root, 0, sizeof(pci_root));
+ pci_root.subordinate = scan_bus(&pci_root, &mem_start);
+
+ /* give BIOS a chance to apply platform specific fixes: */
+ mem_start = pcibios_fixup(mem_start, mem_end);
+
+#ifdef DEBUG
+ {
+ int len = get_pci_list((char*)mem_start);
+ if (len) {
+ ((char *) mem_start)[len] = '\0';
+ printk("%s\n", (char *) mem_start);
+ }
+ }
+#endif
+ return mem_start;
+}
diff --git a/i386/i386at/gpl/linux/scsi/53c7,8xx.c b/i386/i386at/gpl/linux/scsi/53c7,8xx.c
new file mode 100644
index 00000000..74350b08
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/53c7,8xx.c
@@ -0,0 +1,6381 @@
+/*
+ * PERM_OPTIONS are driver options which will be enabled for all NCR boards
+ * in the system at driver initialization time.
+ *
+ * Don't THINK about touching these in PERM_OPTIONS :
+ * OPTION_IO_MAPPED
+ * Memory mapped IO does not work under i86 Linux.
+ *
+ * OPTION_DEBUG_TEST1
+ * Test 1 does bus mastering and interrupt tests, which will help weed
+ * out brain damaged main boards.
+ *
+ * These are development kernel changes. Code for them included in this
+ * driver release may or may not work. If you turn them on, you should be
+ * running the latest copy of the development sources from
+ *
+ * ftp://tsx-11.mit.edu/pub/linux/ALPHA/scsi/53c7,8xx
+ *
+ * and be subscribed to the ncr53c810@colorado.edu mailing list. To
+ * subscribe, send mail to majordomo@colorado.edu with
+ *
+ * subscribe ncr53c810
+ *
+ * in the text.
+ *
+ *
+ * OPTION_NOASYNC
+ * Don't negotiate for asynchronous transfers on the first command
+ * when OPTION_ALWAYS_SYNCHRONOUS is set. Useful for dain bramaged
+ * devices which do something bad rather than sending a MESSAGE
+ * REJECT back to us like they should if they can't cope.
+ *
+ * OPTION_SYNCHRONOUS
+ * Enable support for synchronous transfers. Target negotiated
+ * synchronous transfers will be responded to. To initiate
+ * a synchronous transfer request, call
+ *
+ * request_synchronous (hostno, target)
+ *
+ * from within KGDB.
+ *
+ * OPTION_ALWAYS_SYNCHRONOUS
+ * Negotiate for synchronous transfers with every target after
+ * driver initialization or a SCSI bus reset. This is a bit dangerous,
+ * since there are some dain bramaged SCSI devices which will accept
+ * SDTR messages but keep talking asynchronously.
+ *
+ * OPTION_DISCONNECT
+ * Enable support for disconnect/reconnect. To change the
+ * default setting on a given host adapter, call
+ *
+ * request_disconnect (hostno, allow)
+ *
+ * where allow is non-zero to allow, 0 to disallow.
+ *
+ * If you really want to run 10MHz FAST SCSI-II transfers, you should
+ * know that the NCR driver currently ignores parity information. Most
+ * systems do 5MHz SCSI fine. I've seen a lot that have problems faster
+ * than 8MHz. To play it safe, we only request 5MHz transfers.
+ *
+ * If you'd rather get 10MHz transfers, edit sdtr_message and change
+ * the fourth byte from 50 to 25.
+ */
+
+#define PERM_OPTIONS (OPTION_IO_MAPPED|OPTION_DEBUG_TEST1|OPTION_DISCONNECT|\
+ OPTION_SYNCHRONOUS)
+
+/*
+ * Sponsored by
+ * iX Multiuser Multitasking Magazine
+ * Hannover, Germany
+ * hm@ix.de
+ *
+ * Copyright 1993, 1994, 1995 Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@PoohSticks.ORG
+ * +1 (303) 786-7975
+ *
+ * TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
+ *
+ * For more information, please consult
+ *
+ * NCR53C810
+ * SCSI I/O Processor
+ * Programmer's Guide
+ *
+ * NCR 53C810
+ * PCI-SCSI I/O Processor
+ * Data Manual
+ *
+ * NCR 53C810/53C820
+ * PCI-SCSI I/O Processor Design In Guide
+ *
+ * For literature on Symbios Logic Inc. formerly NCR, SCSI,
+ * and Communication products please call (800) 334-5454 or
+ * (719) 536-3300.
+ *
+ * PCI BIOS Specification Revision
+ * PCI Local Bus Specification
+ * PCI System Design Guide
+ *
+ * PCI Special Interest Group
+ * M/S HF3-15A
+ * 5200 N.E. Elam Young Parkway
+ * Hillsboro, Oregon 97124-6497
+ * +1 (503) 696-2000
+ * +1 (800) 433-5177
+ */
+
+/*
+ * Design issues :
+ * The cumulative latency needed to propagate a read/write request
+ * through the file system, buffer cache, driver stacks, SCSI host, and
+ * SCSI device is ultimately the limiting factor in throughput once we
+ * have a sufficiently fast host adapter.
+ *
+ * So, to maximize performance we want to keep the ratio of latency to data
+ * transfer time to a minimum by
+ * 1. Minimizing the total number of commands sent (typical command latency
+ * including drive and bus mastering host overhead is as high as 4.5ms)
+ * to transfer a given amount of data.
+ *
+ * This is accomplished by placing no arbitrary limit on the number
+ * of scatter/gather buffers supported, since we can transfer 1K
+ * per scatter/gather buffer without Eric's cluster patches,
+ * 4K with.
+ *
+ * 2. Minimizing the number of fatal interrupts serviced, since
+ * fatal interrupts halt the SCSI I/O processor. Basically,
+ * this means offloading the practical maximum amount of processing
+ * to the SCSI chip.
+ *
+ * On the NCR53c810/820/720, this is accomplished by using
+ * interrupt-on-the-fly signals when commands complete,
+ * and only handling fatal errors and SDTR / WDTR messages
+ * in the host code.
+ *
+ * On the NCR53c710, interrupts are generated as on the NCR53c8x0,
+ * only the lack of a interrupt-on-the-fly facility complicates
+ * things. Also, SCSI ID registers and commands are
+ * bit fielded rather than binary encoded.
+ *
+ * On the NCR53c700 and NCR53c700-66, operations that are done via
+ * indirect, table mode on the more advanced chips must be
+ * replaced by calls through a jump table which
+ * acts as a surrogate for the DSA. Unfortunately, this
+ * will mean that we must service an interrupt for each
+ * disconnect/reconnect.
+ *
+ * 3. Eliminating latency by pipelining operations at the different levels.
+ *
+ * This driver allows a configurable number of commands to be enqueued
+ * for each target/lun combination (experimentally, I have discovered
+ * that two seems to work best) and will ultimately allow for
+ * SCSI-II tagged queuing.
+ *
+ *
+ * Architecture :
+ * This driver is built around a Linux queue of commands waiting to
+ * be executed, and a shared Linux/NCR array of commands to start. Commands
+ * are transfered to the array by the run_process_issue_queue() function
+ * which is called whenever a command completes.
+ *
+ * As commands are completed, the interrupt routine is triggered,
+ * looks for commands in the linked list of completed commands with
+ * valid status, removes these commands from a list of running commands,
+ * calls the done routine, and flags their target/luns as not busy.
+ *
+ * Due to limitations in the intelligence of the NCR chips, certain
+ * concessions are made. In many cases, it is easier to dynamically
+ * generate/fix-up code rather than calculate on the NCR at run time.
+ * So, code is generated or fixed up for
+ *
+ * - Handling data transfers, using a variable number of MOVE instructions
+ * interspersed with CALL MSG_IN, WHEN MSGIN instructions.
+ *
+ * The DATAIN and DATAOUT routines are separate, so that an incorrect
+ * direction can be trapped, and space isn't wasted.
+ *
+ * It may turn out that we're better off using some sort
+ * of table indirect instruction in a loop with a variable
+ * sized table on the NCR53c710 and newer chips.
+ *
+ * - Checking for reselection (NCR53c710 and better)
+ *
+ * - Handling the details of SCSI context switches (NCR53c710 and better),
+ * such as reprogramming appropriate synchronous parameters,
+ * removing the dsa structure from the NCR's queue of outstanding
+ * commands, etc.
+ *
+ */
+
+/*
+ * Accommodate differences between stock 1.2.x and 1.3.x asm-i386/types.h
+ * so lusers can drop in 53c7,8xx.* and get something which compiles
+ * without warnings.
+ */
+
+#if !defined(LINUX_1_2) && !defined(LINUX_1_3)
+#include <linux/version.h>
+#if LINUX_VERSION_CODE > 65536 + 3 * 256
+#define LINUX_1_3
+#else
+#define LINUX_1_2
+#endif
+#endif
+
+#ifdef LINUX_1_2
+#define u32 bogus_u32
+#define s32 bogus_s32
+#include <asm/types.h>
+#undef u32
+#undef s32
+typedef __signed__ int s32;
+typedef unsigned int u32;
+#endif /* def LINUX_1_2 */
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/delay.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/time.h>
+#ifdef LINUX_1_2
+#include "../block/blk.h"
+#else
+#include <linux/blk.h>
+#endif
+#undef current
+
+#include "scsi.h"
+#include "hosts.h"
+#include "53c7,8xx.h"
+#include "constants.h"
+#include "sd.h"
+#include <linux/stat.h>
+#include <linux/stddef.h>
+
+#ifndef LINUX_1_2
+struct proc_dir_entry proc_scsi_ncr53c7xx = {
+ PROC_SCSI_NCR53C7xx, 9, "ncr53c7xx",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+#endif
+
+static int check_address (unsigned long addr, int size);
+static void dump_events (struct Scsi_Host *host, int count);
+static Scsi_Cmnd * return_outstanding_commands (struct Scsi_Host *host,
+ int free, int issue);
+static void hard_reset (struct Scsi_Host *host);
+static void ncr_scsi_reset (struct Scsi_Host *host);
+static void print_lots (struct Scsi_Host *host);
+static void set_synchronous (struct Scsi_Host *host, int target, int sxfer,
+ int scntl3, int now_connected);
+static int datapath_residual (struct Scsi_Host *host);
+static const char * sbcl_to_phase (int sbcl);
+static void print_progress (Scsi_Cmnd *cmd);
+static void print_queues (struct Scsi_Host *host);
+static void process_issue_queue (unsigned long flags);
+static int shutdown (struct Scsi_Host *host);
+static void abnormal_finished (struct NCR53c7x0_cmd *cmd, int result);
+static int disable (struct Scsi_Host *host);
+static int NCR53c8xx_run_tests (struct Scsi_Host *host);
+static int NCR53c8xx_script_len;
+static int NCR53c8xx_dsa_len;
+static void NCR53c7x0_intr(int irq, struct pt_regs * regs);
+static int ncr_halt (struct Scsi_Host *host);
+static void intr_phase_mismatch (struct Scsi_Host *host, struct NCR53c7x0_cmd
+ *cmd);
+static void intr_dma (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd);
+static void print_dsa (struct Scsi_Host *host, u32 *dsa,
+ const char *prefix);
+static int print_insn (struct Scsi_Host *host, const u32 *insn,
+ const char *prefix, int kernel);
+
+static void NCR53c8xx_dsa_fixup (struct NCR53c7x0_cmd *cmd);
+static void NCR53c8x0_init_fixup (struct Scsi_Host *host);
+static int NCR53c8x0_dstat_sir_intr (struct Scsi_Host *host, struct
+ NCR53c7x0_cmd *cmd);
+static void NCR53c8x0_soft_reset (struct Scsi_Host *host);
+
+/* INSMOD variables */
+static long long perm_options = PERM_OPTIONS;
+/* 14 = .5s; 15 is max; decreasing divides by two. */
+static int selection_timeout = 14;
+/* Size of event list (per host adapter) */
+static int track_events = 0;
+
+static struct Scsi_Host *first_host = NULL; /* Head of list of NCR boards */
+static Scsi_Host_Template *the_template = NULL;
+
+/*
+ * KNOWN BUGS :
+ * - There is some sort of conflict when the PPP driver is compiled with
+ * support for 16 channels?
+ *
+ * - On systems which predate the 1.3.x initialization order change,
+ * the NCR driver will cause Cannot get free page messages to appear.
+ * These are harmless, but I don't know of an easy way to avoid them.
+ *
+ * - With OPTION_DISCONNECT, on two systems under unknown circumstances,
+ * we get a PHASE MISMATCH with DSA set to zero (suggests that we
+ * are occurring somewhere in the reselection code) where
+ * DSP=some value DCMD|DBC=same value.
+ *
+ * Closer inspection suggests that we may be trying to execute
+ * some portion of the DSA?
+ * scsi0 : handling residual transfer (+ 0 bytes from DMA FIFO)
+ * scsi0 : handling residual transfer (+ 0 bytes from DMA FIFO)
+ * scsi0 : no current command : unexpected phase MSGIN.
+ * DSP=0x1c46cc, DCMD|DBC=0x1c46ac, DSA=0x0
+ * DSPS=0x0, TEMP=0x1c3e70, DMODE=0x80
+ * scsi0 : DSP->
+ * 001c46cc : 0x001c46cc 0x00000000
+ * 001c46d4 : 0x001c5ea0 0x000011f8
+ *
+ * Changed the print code in the phase_mismatch handler so
+ * that we call print_lots to try and diagnose this.
+ *
+ */
+
+/*
+ * Possible future direction of architecture for max performance :
+ *
+ * We're using a single start array for the NCR chip. This is
+ * sub-optimal, because we cannot add a command which would conflict with
+ * an executing command to this start queue, and therefore must insert the
+ * next command for a given I/T/L combination after the first has completed;
+ * incurring our interrupt latency between SCSI commands.
+ *
+ * To allow furthur pipelining of the NCR and host CPU operation, we want
+ * to set things up so that immediately on termination of a command destined
+ * for a given LUN, we get that LUN busy again.
+ *
+ * To do this, we need to add a 32 bit pointer to which is jumped to
+ * on completion of a command. If no new command is available, this
+ * would point to the usual DSA issue queue select routine.
+ *
+ * If one were, it would point to a per-NCR53c7x0_cmd select routine
+ * which starts execution immediately, inserting the command at the head
+ * of the start queue if the NCR chip is selected or reselected.
+ *
+ * We would chanage so that we keep a list of outstanding commands
+ * for each unit, rather than a single running_list. We'd insert
+ * a new command into the right running list; if the NCR didn't
+ * have something running for that yet, we'd put it in the
+ * start queue as well. Some magic needs to happen to handle the
+ * race condition between the first command terminating before the
+ * new one is written.
+ *
+ * Potential for profiling :
+ * Call do_gettimeofday(struct timeval *tv) to get 800ns resolution.
+ */
+
+
+/*
+ * TODO :
+ * 1. To support WIDE transfers, not much needs to happen. We
+ * should do CHMOVE instructions instead of MOVEs when
+ * we have scatter/gather segments of uneven length. When
+ * we do this, we need to handle the case where we disconnect
+ * between segments.
+ *
+ * 2. Currently, when Icky things happen we do a FATAL(). Instead,
+ * we want to do an integrity check on the parts of the NCR hostdata
+ * structure which were initialized at boot time; FATAL() if that
+ * fails, and otherwise try to recover. Keep track of how many
+ * times this has happened within a single SCSI command; if it
+ * gets excessive, then FATAL().
+ *
+ * 3. Parity checking is currently disabled, and a few things should
+ * happen here now that we support synchronous SCSI transfers :
+ * 1. On soft-reset, we shuld set the EPC (Enable Parity Checking)
+ * and AAP (Assert SATN/ on parity error) bits in SCNTL0.
+ *
+ * 2. We should enable the parity interrupt in the SIEN0 register.
+ *
+ * 3. intr_phase_mismatch() needs to believe that message out is
+ * allways an "acceptable" phase to have a mismatch in. If
+ * the old phase was MSG_IN, we should send a MESSAGE PARITY
+ * error. If the old phase was something else, we should send
+ * a INITIATOR_DETECTED_ERROR message. Note that this could
+ * cause a RESTORE POINTERS message; so we should handle that
+ * correctly first. Instead, we should probably do an
+ * initiator_abort.
+ *
+ * 4. MPEE bit of CTEST4 should be set so we get interrupted if
+ * we detect an error.
+ *
+ *
+ * 5. The initial code has been tested on the NCR53c810. I don't
+ * have access to NCR53c700, 700-66 (Forex boards), NCR53c710
+ * (NCR Pentium systems), NCR53c720, NCR53c820, or NCR53c825 boards to
+ * finish development on those platforms.
+ *
+ * NCR53c820/825/720 - need to add wide transfer support, including WDTR
+ * negotiation, programming of wide transfer capabilities
+ * on reselection and table indirect selection.
+ *
+ * NCR53c710 - need to add fatal interrupt or GEN code for
+ * command completion signaling. Need to modify all
+ * SDID, SCID, etc. registers, and table indirect select code
+ * since these use bit fielded (ie 1<<target) instead of
+ * binary encoded target ids. Need to accomodate
+ * different register mappings, probably scan through
+ * the SCRIPT code and change the non SFBR register operand
+ * of all MOVE instructions.
+ *
+ * NCR53c700/700-66 - need to add code to refix addresses on
+ * every nexus change, eliminate all table indirect code,
+ * very messy.
+ *
+ * 6. The NCR53c7x0 series is very popular on other platforms that
+ * could be running Linux - ie, some high performance AMIGA SCSI
+ * boards use it.
+ *
+ * So, I should include #ifdef'd code so that it is
+ * compatible with these systems.
+ *
+ * Specifically, the little Endian assumptions I made in my
+ * bit fields need to change, and if the NCR doesn't see memory
+ * the right way, we need to provide options to reverse words
+ * when the scripts are relocated.
+ *
+ * 7. Use vremap() to access memory mapped boards.
+ */
+
+/*
+ * Allow for simultaneous existence of multiple SCSI scripts so we
+ * can have a single driver binary for all of the family.
+ *
+ * - one for NCR53c700 and NCR53c700-66 chips (not yet supported)
+ * - one for rest (only the NCR53c810, 815, 820, and 825 are currently
+ * supported)
+ *
+ * So that we only need two SCSI scripts, we need to modify things so
+ * that we fixup register accesses in READ/WRITE instructions, and
+ * we'll also have to accomodate the bit vs. binary encoding of IDs
+ * with the 7xx chips.
+ */
+
+/*
+ * Use pci_chips_ids to translate in both directions between PCI device ID
+ * and chip numbers.
+ */
+
+static struct {
+ unsigned short pci_device_id;
+ int chip;
+/*
+ * The revision field of the PCI_CLASS_REVISION register is compared
+ * against each of these fields if the field is not -1. If it
+ * is less than min_revision or larger than max_revision, a warning
+ * message is printed.
+ */
+ int max_revision;
+ int min_revision;
+} pci_chip_ids[] = {
+ {PCI_DEVICE_ID_NCR_53C810, 810, 2, 1},
+ {PCI_DEVICE_ID_NCR_53C815, 815, 3, 2},
+ {PCI_DEVICE_ID_NCR_53C820, 820, -1, -1},
+ {PCI_DEVICE_ID_NCR_53C825, 825, -1, -1}
+};
+
+#define NPCI_CHIP_IDS (sizeof (pci_chip_ids) / sizeof(pci_chip_ids[0]))
+
+#define ROUNDUP(adr,type) \
+ ((void *) (((long) (adr) + sizeof(type) - 1) & ~(sizeof(type) - 1)))
+
+/*
+ * Forced detection and autoprobe code for various hardware. Currently,
+ * entry points for these are not included in init/main.c because if the
+ * PCI BIOS code isn't working right, you're not going to be able to use
+ * the hardware anyways; this way we force users to solve their
+ * problems rather than forcing detection and blaming us when it
+ * does not work.
+ */
+
+static struct override {
+ int chip; /* 700, 70066, 710, 720, 810, 820 */
+ int board; /* Any special board level gunk */
+ unsigned pci:1;
+ union {
+ struct {
+ int base; /* Memory address - indicates memory mapped regs */
+ int io_port;/* I/O port address - indicates I/O mapped regs */
+ int irq; /* IRQ line */
+ int dma; /* DMA channel - often none */
+ } normal;
+ struct {
+ int bus;
+ int device;
+ int function;
+ } pci;
+ } data;
+ long long options;
+} overrides [4] = {{0,},};
+static int commandline_current = 0;
+static int no_overrides = 0;
+
+#if 0
+#define OVERRIDE_LIMIT (sizeof(overrides) / sizeof(struct override))
+#else
+#define OVERRIDE_LIMIT commandline_current
+#endif
+
+/*
+ * Function: issue_to_cmd
+ *
+ * Purpose: convert jump instruction in issue array to NCR53c7x0_cmd
+ * structure pointer.
+ *
+ * Inputs; issue - pointer to start of NOP or JUMP instruction
+ * in issue array.
+ *
+ * Returns: pointer to command on success; 0 if opcode is NOP.
+ */
+
+static inline struct NCR53c7x0_cmd *
+issue_to_cmd (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
+ u32 *issue)
+{
+ return (issue[0] != hostdata->NOP_insn) ?
+ /*
+ * If the IF TRUE bit is set, it's a JUMP instruction. The
+ * operand is a bus pointer to the dsa_begin routine for this DSA. The
+ * dsa field of the NCR53c7x0_cmd structure starts with the
+ * DSA code template. By converting to a virtual address,
+ * subtracting the code template size, and offset of the
+ * dsa field, we end up with a pointer to the start of the
+ * structure (alternatively, we could use the
+ * dsa_cmnd field, an anachronism from when we weren't
+ * sure what the relationship between the NCR structures
+ * and host structures were going to be.
+ */
+ (struct NCR53c7x0_cmd *) ((char *) bus_to_virt (issue[1]) -
+ (hostdata->E_dsa_code_begin - hostdata->E_dsa_code_template) -
+ offsetof(struct NCR53c7x0_cmd, dsa))
+ /* If the IF TRUE bit is not set, it's a NOP */
+ : NULL;
+}
+
+
+/*
+ * Function : static internal_setup(int board, int chip, char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : board - currently, unsupported. chip - 700, 70066, 710, 720
+ * 810, 815, 820, 825, although currently only the NCR53c810 is
+ * supported.
+ *
+ */
+
+static void
+internal_setup(int board, int chip, char *str, int *ints) {
+ unsigned char pci; /* Specifies a PCI override, with bus, device,
+ function */
+
+ pci = (str && !strcmp (str, "pci")) ? 1 : 0;
+
+/*
+ * Override syntaxes are as follows :
+ * ncr53c700,ncr53c700-66,ncr53c710,ncr53c720=mem,io,irq,dma
+ * ncr53c810,ncr53c820,ncr53c825=mem,io,irq or pci,bus,device,function
+ */
+
+ if (commandline_current < OVERRIDE_LIMIT) {
+ overrides[commandline_current].pci = pci ? 1 : 0;
+ if (!pci) {
+ overrides[commandline_current].data.normal.base = ints[1];
+ overrides[commandline_current].data.normal.io_port = ints[2];
+ overrides[commandline_current].data.normal.irq = ints[3];
+ overrides[commandline_current].data.normal.dma = (ints[0] >= 4) ?
+ ints[4] : DMA_NONE;
+ /* FIXME: options is now a long long */
+ overrides[commandline_current].options = (ints[0] >= 5) ?
+ ints[5] : 0;
+ } else {
+ overrides[commandline_current].data.pci.bus = ints[1];
+ overrides[commandline_current].data.pci.device = ints[2];
+ overrides[commandline_current].data.pci.function = ints[3];
+ /* FIXME: options is now a long long */
+ overrides[commandline_current].options = (ints[0] >= 4) ?
+ ints[4] : 0;
+ }
+ overrides[commandline_current].board = board;
+ overrides[commandline_current].chip = chip;
+ ++commandline_current;
+ ++no_overrides;
+ } else {
+ printk ("53c7,7x0.c:internal_setup() : too many overrides\n");
+ }
+}
+
+/*
+ * XXX - we might want to implement a single override function
+ * with a chip type field, revamp the command line configuration,
+ * etc.
+ */
+
+#define setup_wrapper(x) \
+void ncr53c##x##_setup (char *str, int *ints) { \
+ internal_setup (BOARD_GENERIC, x, str, ints); \
+}
+
+setup_wrapper(700)
+setup_wrapper(70066)
+setup_wrapper(710)
+setup_wrapper(720)
+setup_wrapper(810)
+setup_wrapper(815)
+setup_wrapper(820)
+setup_wrapper(825)
+
+/*
+ * FIXME: we should junk these, in favor of synchronous_want and
+ * wide_want in the NCR53c7x0_hostdata structure.
+ */
+
+/* Template for "preferred" synchronous transfer parameters. */
+
+static const unsigned char sdtr_message[] = {
+ EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 50 /* *4ns */, 8 /* off */
+};
+
+/* Template to request asynchronous transfers */
+
+static const unsigned char async_message[] = {
+ EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 0, 0 /* asynchronous */
+};
+
+/* Template for "preferred" WIDE transfer parameters */
+
+static const unsigned char wdtr_message[] = {
+ EXTENDED_MESSAGE, 2 /* length */, EXTENDED_WDTR, 1 /* 2^1 bytes */
+};
+
+/*
+ * Function : struct Scsi_Host *find_host (int host)
+ *
+ * Purpose : KGDB support function which translates a host number
+ * to a host structure.
+ *
+ * Inputs : host - number of SCSI host
+ *
+ * Returns : NULL on failure, pointer to host structure on success.
+ */
+
+static struct Scsi_Host *
+find_host (int host) {
+ struct Scsi_Host *h;
+ for (h = first_host; h && h->host_no != host; h = h->next);
+ if (!h) {
+ printk (KERN_ALERT "scsi%d not found\n", host);
+ return NULL;
+ } else if (h->hostt != the_template) {
+ printk (KERN_ALERT "scsi%d is not a NCR board\n", host);
+ return NULL;
+ }
+ return h;
+}
+
+/*
+ * Function : request_synchronous (int host, int target)
+ *
+ * Purpose : KGDB interface which will allow us to negotiate for
+ * synchronous transfers. This ill be replaced with a more
+ * integrated function; perhaps a new entry in the scsi_host
+ * structure, accessable via an ioctl() or perhaps /proc/scsi.
+ *
+ * Inputs : host - number of SCSI host; target - number of target.
+ *
+ * Returns : 0 when negotiation has been setup for next SCSI command,
+ * -1 on failure.
+ */
+
+static int
+request_synchronous (int host, int target) {
+ struct Scsi_Host *h;
+ struct NCR53c7x0_hostdata *hostdata;
+ unsigned long flags;
+ if (target < 0) {
+ printk (KERN_ALERT "target %d is bogus\n", target);
+ return -1;
+ }
+ if (!(h = find_host (host)))
+ return -1;
+ else if (h->this_id == target) {
+ printk (KERN_ALERT "target %d is host ID\n", target);
+ return -1;
+ }
+#ifndef LINUX_1_2
+ else if (target > h->max_id) {
+ printk (KERN_ALERT "target %d exceeds maximum of %d\n", target,
+ h->max_id);
+ return -1;
+ }
+#endif
+ hostdata = (struct NCR53c7x0_hostdata *)h->hostdata;
+
+ save_flags(flags);
+ cli();
+ if (hostdata->initiate_sdtr & (1 << target)) {
+ restore_flags(flags);
+ printk (KERN_ALERT "target %d allready doing SDTR\n", target);
+ return -1;
+ }
+ hostdata->initiate_sdtr |= (1 << target);
+ restore_flags(flags);
+ return 0;
+}
+
+/*
+ * Function : request_disconnect (int host, int on_or_off)
+ *
+ * Purpose : KGDB support function, tells us to allow or disallow
+ * disconnections.
+ *
+ * Inputs : host - number of SCSI host; on_or_off - non-zero to allow,
+ * zero to disallow.
+ *
+ * Returns : 0 on success, * -1 on failure.
+ */
+
+static int
+request_disconnect (int host, int on_or_off) {
+ struct Scsi_Host *h;
+ struct NCR53c7x0_hostdata *hostdata;
+ if (!(h = find_host (host)))
+ return -1;
+ hostdata = (struct NCR53c7x0_hostdata *) h->hostdata;
+ if (on_or_off)
+ hostdata->options |= OPTION_DISCONNECT;
+ else
+ hostdata->options &= ~OPTION_DISCONNECT;
+ return 0;
+}
+
+/*
+ * Function : static void NCR53c7x0_driver_init (struct Scsi_Host *host)
+ *
+ * Purpose : Initialize internal structures, as required on startup, or
+ * after a SCSI bus reset.
+ *
+ * Inputs : host - pointer to this host adapter's structure
+ */
+
+static void
+NCR53c7x0_driver_init (struct Scsi_Host *host) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int i, j;
+ u32 *current;
+ for (i = 0; i < 16; ++i) {
+ hostdata->request_sense[i] = 0;
+ for (j = 0; j < 8; ++j)
+ hostdata->busy[i][j] = 0;
+ set_synchronous (host, i, /* sxfer */ 0, hostdata->saved_scntl3, 0);
+ }
+ hostdata->issue_queue = NULL;
+ hostdata->running_list = hostdata->finished_queue =
+ hostdata->current = NULL;
+ for (i = 0, current = (u32 *) hostdata->schedule;
+ i < host->can_queue; ++i, current += 2) {
+ current[0] = hostdata->NOP_insn;
+ current[1] = 0xdeadbeef;
+ }
+ current[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24) | DBC_TCI_TRUE;
+ current[1] = (u32) virt_to_bus (hostdata->script) +
+ hostdata->E_wait_reselect;
+ hostdata->reconnect_dsa_head = 0;
+ hostdata->addr_reconnect_dsa_head = (u32)
+ virt_to_bus((void *) &(hostdata->reconnect_dsa_head));
+ hostdata->expecting_iid = 0;
+ hostdata->expecting_sto = 0;
+ if (hostdata->options & OPTION_ALWAYS_SYNCHRONOUS)
+ hostdata->initiate_sdtr = 0xffff;
+ else
+ hostdata->initiate_sdtr = 0;
+ hostdata->talked_to = 0;
+ hostdata->idle = 1;
+}
+
+/*
+ * Function : static int ccf_to_clock (int ccf)
+ *
+ * Purpose : Return the largest SCSI clock allowable for a given
+ * clock conversion factor, allowing us to do synchronous periods
+ * when we don't know what the SCSI clock is by taking at least
+ * as long as the device says we can.
+ *
+ * Inputs : ccf
+ *
+ * Returns : clock on success, -1 on failure.
+ */
+
+static int
+ccf_to_clock (int ccf) {
+ switch (ccf) {
+ case 1: return 25000000; /* Divide by 1.0 */
+ case 2: return 37500000; /* Divide by 1.5 */
+ case 3: return 50000000; /* Divide by 2.0 */
+ case 0: /* Divide by 3.0 */
+ case 4: return 66000000;
+ default: return -1;
+ }
+}
+
+/*
+ * Function : static int clock_to_ccf (int clock)
+ *
+ * Purpose : Return the clock conversion factor for a given SCSI clock.
+ *
+ * Inputs : clock - SCSI clock expressed in Hz.
+ *
+ * Returns : ccf on success, -1 on failure.
+ */
+
+static int
+clock_to_ccf (int clock) {
+ if (clock < 16666666)
+ return -1;
+ if (clock < 25000000)
+ return 1; /* Divide by 1.0 */
+ else if (clock < 37500000)
+ return 2; /* Divide by 1.5 */
+ else if (clock < 50000000)
+ return 3; /* Divide by 2.0 */
+ else if (clock < 66000000)
+ return 4; /* Divide by 3.0 */
+ else
+ return -1;
+}
+
+/*
+ * Function : static int NCR53c7x0_init (struct Scsi_Host *host)
+ *
+ * Purpose : initialize the internal structures for a given SCSI host
+ *
+ * Inputs : host - pointer to this host adapter's structure
+ *
+ * Preconditions : when this function is called, the chip_type
+ * field of the hostdata structure MUST have been set.
+ *
+ * Returns : 0 on success, -1 on failure.
+ */
+
+static int
+NCR53c7x0_init (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ int i, ccf, expected_ccf;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct Scsi_Host *search;
+ /*
+ * There are some things which we need to know about in order to provide
+ * a semblance of support. Print 'em if they aren't what we expect,
+ * otherwise don't add to the noise.
+ *
+ * -1 means we don't know what to expect.
+ */
+ int expected_id = -1;
+ int expected_clock = -1;
+ int uninitialized = 0;
+ /*
+ * FIXME : this is only on Intel boxes. On other platforms, this
+ * will differ.
+ */
+ int expected_mapping = OPTION_IO_MAPPED;
+ NCR53c7x0_local_setup(host);
+
+ switch (hostdata->chip) {
+ case 820:
+ case 825:
+#ifdef notyet
+ host->max_id = 15;
+#endif
+ /* Fall through */
+ case 810:
+ case 815:
+ hostdata->dstat_sir_intr = NCR53c8x0_dstat_sir_intr;
+ hostdata->init_save_regs = NULL;
+ hostdata->dsa_fixup = NCR53c8xx_dsa_fixup;
+ hostdata->init_fixup = NCR53c8x0_init_fixup;
+ hostdata->soft_reset = NCR53c8x0_soft_reset;
+ hostdata->run_tests = NCR53c8xx_run_tests;
+/* Is the SCSI clock ever anything else on these chips? */
+ expected_clock = hostdata->scsi_clock = 40000000;
+ expected_id = 7;
+ break;
+ default:
+ printk ("scsi%d : chip type of %d is not supported yet, detaching.\n",
+ host->host_no, hostdata->chip);
+ scsi_unregister (host);
+ return -1;
+ }
+
+ /* Assign constants accessed by NCR */
+ hostdata->NCR53c7xx_zero = 0;
+ hostdata->NCR53c7xx_msg_reject = MESSAGE_REJECT;
+ hostdata->NCR53c7xx_msg_abort = ABORT;
+ hostdata->NCR53c7xx_msg_nop = NOP;
+ hostdata->NOP_insn = (DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24;
+
+ if (expected_mapping == -1 ||
+ (hostdata->options & (OPTION_MEMORY_MAPPED)) !=
+ (expected_mapping & OPTION_MEMORY_MAPPED))
+ printk ("scsi%d : using %s mapped access\n", host->host_no,
+ (hostdata->options & OPTION_MEMORY_MAPPED) ? "memory" :
+ "io");
+
+ hostdata->dmode = (hostdata->chip == 700 || hostdata->chip == 70066) ?
+ DMODE_REG_00 : DMODE_REG_10;
+ hostdata->istat = ((hostdata->chip / 100) == 8) ?
+ ISTAT_REG_800 : ISTAT_REG_700;
+
+/* Only the ISTAT register is readable when the NCR is running, so make
+ sure it's halted. */
+ ncr_halt(host);
+
+/*
+ * XXX - the NCR53c700 uses bitfielded registers for SCID, SDID, etc,
+ * as does the 710 with one bit per SCSI ID. Conversely, the NCR
+ * uses a normal, 3 bit binary representation of these values.
+ *
+ * Get the rest of the NCR documentation, and FIND OUT where the change
+ * was.
+ */
+#if 0
+ tmp = hostdata->this_id_mask = NCR53c7x0_read8(SCID_REG);
+ for (host->this_id = 0; tmp != 1; tmp >>=1, ++host->this_id);
+#else
+ host->this_id = NCR53c7x0_read8(SCID_REG) & 15;
+ if (host->this_id == 0)
+ host->this_id = 7; /* sanitize hostid---0 doesn't make sense */
+ hostdata->this_id_mask = 1 << host->this_id;
+#endif
+
+/*
+ * Note : we should never encounter a board setup for ID0. So,
+ * if we see ID0, assume that it was uninitialized and set it
+ * to the industry standard 7.
+ */
+ if (!host->this_id) {
+ printk("scsi%d : initiator ID was %d, changing to 7\n",
+ host->host_no, host->this_id);
+ host->this_id = 7;
+ hostdata->this_id_mask = 1 << 7;
+ uninitialized = 1;
+ };
+
+ if (expected_id == -1 || host->this_id != expected_id)
+ printk("scsi%d : using initiator ID %d\n", host->host_no,
+ host->this_id);
+
+ /*
+ * Save important registers to allow a soft reset.
+ */
+
+ if ((hostdata->chip / 100) == 8) {
+ /*
+ * CTEST4 controls burst mode disable.
+ */
+ hostdata->saved_ctest4 = NCR53c7x0_read8(CTEST4_REG_800) &
+ CTEST4_800_SAVE;
+ } else {
+ /*
+ * CTEST7 controls cache snooping, burst mode, and support for
+ * external differential drivers.
+ */
+ hostdata->saved_ctest7 = NCR53c7x0_read8(CTEST7_REG) & CTEST7_SAVE;
+ }
+
+ /*
+ * On NCR53c700 series chips, DCNTL controls the SCSI clock divisor,
+ * on 800 series chips, it allows for a totem-pole IRQ driver.
+ */
+
+ hostdata->saved_dcntl = NCR53c7x0_read8(DCNTL_REG);
+
+ /*
+ * DCNTL_800_IRQM controls weather we are using an open drain
+ * driver (reset) or totem pole driver (set). In all cases,
+ * it's level active. I suppose this is an issue when we're trying to
+ * wire-or the same PCI INTx line?
+ */
+ if ((hostdata->chip / 100) == 8)
+ hostdata->saved_dcntl &= ~DCNTL_800_IRQM;
+
+ /*
+ * DMODE controls DMA burst length, and on 700 series chips,
+ * 286 mode and bus width
+ */
+ hostdata->saved_dmode = NCR53c7x0_read8(hostdata->dmode);
+
+ /*
+ * Now that burst length and enabled/disabled status is known,
+ * clue the user in on it.
+ */
+
+ if ((hostdata->chip / 100) == 8) {
+ if (hostdata->saved_ctest4 & CTEST4_800_BDIS) {
+ printk ("scsi%d : burst mode disabled\n", host->host_no);
+ } else {
+ switch (hostdata->saved_dmode & DMODE_BL_MASK) {
+ case DMODE_BL_2: i = 2; break;
+ case DMODE_BL_4: i = 4; break;
+ case DMODE_BL_8: i = 8; break;
+ case DMODE_BL_16: i = 16; break;
+ default: i = 0;
+ }
+ printk ("scsi%d : burst length %d\n", host->host_no, i);
+ }
+ }
+
+ /*
+ * On NCR53c810 and NCR53c820 chips, SCNTL3 contails the synchronous
+ * and normal clock conversion factors.
+ */
+ if (hostdata->chip / 100 == 8) {
+ expected_ccf = clock_to_ccf (expected_clock);
+ hostdata->saved_scntl3 = NCR53c7x0_read8(SCNTL3_REG_800);
+ ccf = hostdata->saved_scntl3 & SCNTL3_800_CCF_MASK;
+ if (expected_ccf != -1 && ccf != expected_ccf && !ccf) {
+ hostdata->saved_scntl3 = (hostdata->saved_scntl3 &
+ ~SCNTL3_800_CCF_MASK) | expected_ccf;
+ if (!uninitialized) {
+ printk ("scsi%d : reset ccf to %d from %d\n",
+ host->host_no, expected_ccf, ccf);
+ uninitialized = 1;
+ }
+ }
+ } else
+ ccf = 0;
+
+ /*
+ * If we don't have a SCSI clock programmed, pick one on the upper
+ * bound of that allowed by NCR so that our transfers err on the
+ * slow side, since transfer period must be >= the agreed
+ * upon period.
+ */
+
+ if ((!hostdata->scsi_clock) && (hostdata->scsi_clock = ccf_to_clock (ccf))
+ == -1) {
+ printk ("scsi%d : clock conversion factor %d unknown.\n"
+ " synchronous transfers disabled\n",
+ host->host_no, ccf);
+ hostdata->options &= ~OPTION_SYNCHRONOUS;
+ hostdata->scsi_clock = 0;
+ }
+
+ if (expected_clock == -1 || hostdata->scsi_clock != expected_clock)
+ printk ("scsi%d : using %dMHz SCSI clock\n", host->host_no,
+ hostdata->scsi_clock / 1000000);
+
+ for (i = 0; i < 16; ++i)
+ hostdata->cmd_allocated[i] = 0;
+
+ if (hostdata->init_save_regs)
+ hostdata->init_save_regs (host);
+ if (hostdata->init_fixup)
+ hostdata->init_fixup (host);
+
+ if (!the_template) {
+ the_template = host->hostt;
+ first_host = host;
+ }
+
+ /*
+ * Linux SCSI drivers have always been plagued with initialization
+ * problems - some didn't work with the BIOS disabled since they expected
+ * initialization from it, some didn't work when the networking code
+ * was enabled and registers got scrambled, etc.
+ *
+ * To avoid problems like this, in the future, we will do a soft
+ * reset on the SCSI chip, taking it back to a sane state.
+ */
+
+ hostdata->soft_reset (host);
+
+#if 1
+ hostdata->debug_count_limit = -1;
+#else
+ hostdata->debug_count_limit = 1;
+#endif
+ hostdata->intrs = -1;
+ hostdata->resets = -1;
+ memcpy ((void *) hostdata->synchronous_want, (void *) sdtr_message,
+ sizeof (hostdata->synchronous_want));
+
+ NCR53c7x0_driver_init (host);
+
+ /*
+ * Set up an interrupt handler if we aren't already sharing an IRQ
+ * with another board.
+ */
+
+ for (search = first_host; search && !(search->hostt == the_template &&
+ search->irq == host->irq && search != host); search=search->next);
+
+ if (!search) {
+ if (request_irq(host->irq, NCR53c7x0_intr, SA_INTERRUPT, "53c7,8xx")) {
+ printk("scsi%d : IRQ%d not free, detaching\n"
+ " You have either a configuration problem, or a\n"
+ " broken BIOS. You may wish to manually assign\n"
+ " an interrupt to the NCR board rather than using\n"
+ " an automatic setting.\n",
+ host->host_no, host->irq);
+ scsi_unregister (host);
+ return -1;
+ }
+ } else {
+ printk("scsi%d : using interrupt handler previously installed for scsi%d\n",
+ host->host_no, search->host_no);
+ }
+
+
+ if ((hostdata->run_tests && hostdata->run_tests(host) == -1) ||
+ (hostdata->options & OPTION_DEBUG_TESTS_ONLY)) {
+ /* XXX Should disable interrupts, etc. here */
+ scsi_unregister (host);
+ return -1;
+ } else {
+ if (host->io_port) {
+ host->n_io_port = 128;
+ request_region (host->io_port, host->n_io_port, "ncr53c7,8xx");
+ }
+ }
+
+ if (NCR53c7x0_read8 (SBCL_REG) & SBCL_BSY) {
+ printk ("scsi%d : bus wedge, doing SCSI reset\n", host->host_no);
+ hard_reset (host);
+ }
+ return 0;
+}
+
+/*
+ * Function : static int normal_init(Scsi_Host_Template *tpnt, int board,
+ * int chip, u32 base, int io_port, int irq, int dma, int pcivalid,
+ * unsigned char pci_bus, unsigned char pci_device_fn,
+ * long long options);
+ *
+ * Purpose : initializes a NCR53c7,8x0 based on base addresses,
+ * IRQ, and DMA channel.
+ *
+ * Useful where a new NCR chip is backwards compatible with
+ * a supported chip, but the DEVICE ID has changed so it
+ * doesn't show up when the autoprobe does a pcibios_find_device.
+ *
+ * Inputs : tpnt - Template for this SCSI adapter, board - board level
+ * product, chip - 810, 820, or 825, bus - PCI bus, device_fn -
+ * device and function encoding as used by PCI BIOS calls.
+ *
+ * Returns : 0 on success, -1 on failure.
+ *
+ */
+
+static int
+normal_init (Scsi_Host_Template *tpnt, int board, int chip,
+ u32 base, int io_port, int irq, int dma, int pci_valid,
+ unsigned char pci_bus, unsigned char pci_device_fn, long long options) {
+ struct Scsi_Host *instance;
+ struct NCR53c7x0_hostdata *hostdata;
+ char chip_str[80];
+ int script_len = 0, dsa_len = 0, size = 0, max_cmd_size = 0,
+ schedule_size = 0, ok = 0;
+ void *tmp;
+
+ options |= perm_options;
+
+ switch (chip) {
+ case 825:
+ case 820:
+ case 815:
+ case 810:
+ schedule_size = (tpnt->can_queue + 1) * 8 /* JUMP instruction size */;
+ script_len = NCR53c8xx_script_len;
+ dsa_len = NCR53c8xx_dsa_len;
+ options |= OPTION_INTFLY;
+ sprintf (chip_str, "NCR53c%d", chip);
+ break;
+ default:
+ printk("scsi-ncr53c7,8xx : unsupported SCSI chip %d\n", chip);
+ return -1;
+ }
+
+ printk("scsi-ncr53c7,8xx : %s at memory 0x%x, io 0x%x, irq %d",
+ chip_str, (unsigned) base, io_port, irq);
+ if (dma == DMA_NONE)
+ printk("\n");
+ else
+ printk(", dma %d\n", dma);
+
+ if ((chip / 100 == 8) && !pci_valid)
+ printk ("scsi-ncr53c7,8xx : for better reliability and performance, please use the\n"
+ " PCI override instead.\n"
+ " Syntax : ncr53c8{10,15,20,25}=pci,<bus>,<device>,<function>\n"
+ " <bus> and <device> are usually 0.\n");
+
+ if (options & OPTION_DEBUG_PROBE_ONLY) {
+ printk ("scsi-ncr53c7,8xx : probe only enabled, aborting initialization\n");
+ return -1;
+ }
+
+ max_cmd_size = sizeof(struct NCR53c7x0_cmd) + dsa_len +
+ /* Size of dynamic part of command structure : */
+ 2 * /* Worst case : we don't know if we need DATA IN or DATA out */
+ ( 2 * /* Current instructions per scatter/gather segment */
+ tpnt->sg_tablesize +
+ 3 /* Current startup / termination required per phase */
+ ) *
+ 8 /* Each instruction is eight bytes */;
+
+ /* Allocate fixed part of hostdata, dynamic part to hold appropriate
+ SCSI SCRIPT(tm) plus a single, maximum-sized NCR53c7x0_cmd structure.
+
+ We need a NCR53c7x0_cmd structure for scan_scsis() when we are
+ not loaded as a module, and when we're loaded as a module, we
+ can't use a non-dynamically allocated structure because modules
+ are vmalloc()'d, which can allow structures to cross page
+ boundaries and breaks our physical/virtual address assumptions
+ for DMA.
+
+ So, we stick it past the end of our hostdata structure.
+
+ ASSUMPTION :
+ Regardless of how many simultaneous SCSI commands we allow,
+ the probe code only executes a _single_ instruction at a time,
+ so we only need one here, and don't need to allocate NCR53c7x0_cmd
+ structures for each target until we are no longer in scan_scsis
+ and kmalloc() has become functional (memory_init() happens
+ after all device driver initialization).
+ */
+
+ size = sizeof(struct NCR53c7x0_hostdata) + script_len +
+ /* Note that alignment will be guaranteed, since we put the command
+ allocated at probe time after the fixed-up SCSI script, which
+ consists of 32 bit words, aligned on a 32 bit boundary. But
+ on a 64bit machine we need 8 byte alignment for hostdata->free, so
+ we add in another 4 bytes to take care of potential misalignment
+ */
+ (sizeof(void *) - sizeof(u32)) + max_cmd_size + schedule_size;
+
+ instance = scsi_register (tpnt, size);
+ if (!instance)
+ return -1;
+
+ /* FIXME : if we ever support an ISA NCR53c7xx based board, we
+ need to check if the chip is running in a 16 bit mode, and if so
+ unregister it if it is past the 16M (0x1000000) mark */
+
+ hostdata = (struct NCR53c7x0_hostdata *)
+ instance->hostdata;
+ hostdata->size = size;
+ hostdata->script_count = script_len / sizeof(u32);
+ hostdata = (struct NCR53c7x0_hostdata *) instance->hostdata;
+ hostdata->board = board;
+ hostdata->chip = chip;
+ if ((hostdata->pci_valid = pci_valid)) {
+ hostdata->pci_bus = pci_bus;
+ hostdata->pci_device_fn = pci_device_fn;
+ }
+
+ /*
+ * Being memory mapped is more desirable, since
+ *
+ * - Memory accesses may be faster.
+ *
+ * - The destination and source address spaces are the same for
+ * all instructions, meaning we don't have to twiddle dmode or
+ * any other registers.
+ *
+ * So, we try for memory mapped, and if we don't get it,
+ * we go for port mapped, and that failing we tell the user
+ * it can't work.
+ */
+
+ if (base) {
+ instance->base = (unsigned char *) (unsigned long) base;
+ /* Check for forced I/O mapping */
+ if (!(options & OPTION_IO_MAPPED)) {
+ options |= OPTION_MEMORY_MAPPED;
+ ok = 1;
+ }
+ } else {
+ options &= ~OPTION_MEMORY_MAPPED;
+ }
+
+ if (io_port) {
+ instance->io_port = io_port;
+ options |= OPTION_IO_MAPPED;
+ ok = 1;
+ } else {
+ options &= ~OPTION_IO_MAPPED;
+ }
+
+ if (!ok) {
+ printk ("scsi%d : not initializing, no I/O or memory mapping known \n",
+ instance->host_no);
+ scsi_unregister (instance);
+ return -1;
+ }
+ instance->irq = irq;
+ instance->dma_channel = dma;
+
+ hostdata->options = options;
+ hostdata->dsa_len = dsa_len;
+ hostdata->max_cmd_size = max_cmd_size;
+ hostdata->num_cmds = 1;
+ /* Initialize single command */
+ tmp = (hostdata->script + hostdata->script_count);
+ hostdata->free = ROUNDUP(tmp, void *);
+ hostdata->free->real = tmp;
+ hostdata->free->size = max_cmd_size;
+ hostdata->free->free = NULL;
+ hostdata->free->next = NULL;
+ hostdata->extra_allocate = 0;
+
+ /* Allocate command start code space */
+ hostdata->schedule = (chip == 700 || chip == 70066) ?
+ NULL : (u32 *) ((char *)hostdata->free + max_cmd_size);
+
+/*
+ * For diagnostic purposes, we don't really care how fast things blaze.
+ * For profiling, we want to access the 800ns resolution system clock,
+ * using a 'C' call on the host processor.
+ *
+ * Therefore, there's no need for the NCR chip to directly manipulate
+ * this data, and we should put it wherever is most convienient for
+ * Linux.
+ */
+ if (track_events)
+ hostdata->events = (struct NCR53c7x0_event *) (track_events ?
+ vmalloc (sizeof (struct NCR53c7x0_event) * track_events) : NULL);
+ else
+ hostdata->events = NULL;
+
+ if (hostdata->events) {
+ memset ((void *) hostdata->events, 0, sizeof(struct NCR53c7x0_event) *
+ track_events);
+ hostdata->event_size = track_events;
+ hostdata->event_index = 0;
+ } else
+ hostdata->event_size = 0;
+
+ return NCR53c7x0_init(instance);
+}
+
+
+/*
+ * Function : static int ncr_pci_init(Scsi_Host_Template *tpnt, int board,
+ * int chip, int bus, int device_fn, long long options)
+ *
+ * Purpose : initializes a NCR53c800 family based on the PCI
+ * bus, device, and function location of it. Allows
+ * reprogramming of latency timer and determining addresses
+ * and whether bus mastering, etc. are OK.
+ *
+ * Useful where a new NCR chip is backwards compatible with
+ * a supported chip, but the DEVICE ID has changed so it
+ * doesn't show up when the autoprobe does a pcibios_find_device.
+ *
+ * Inputs : tpnt - Template for this SCSI adapter, board - board level
+ * product, chip - 810, 820, or 825, bus - PCI bus, device_fn -
+ * device and function encoding as used by PCI BIOS calls.
+ *
+ * Returns : 0 on success, -1 on failure.
+ *
+ */
+
+static int
+ncr_pci_init (Scsi_Host_Template *tpnt, int board, int chip,
+ unsigned char bus, unsigned char device_fn, long long options) {
+ unsigned short vendor_id, device_id, command;
+#ifdef LINUX_1_2
+ unsigned long
+#else
+ unsigned int
+#endif
+ base, io_port;
+ unsigned char irq, revision;
+ int error, expected_chip;
+ int expected_id = -1, max_revision = -1, min_revision = -1;
+ int i;
+
+ printk("scsi-ncr53c7,8xx : at PCI bus %d, device %d, function %d\n",
+ bus, (int) (device_fn & 0xf8) >> 3,
+ (int) device_fn & 7);
+
+ if (!pcibios_present()) {
+ printk("scsi-ncr53c7,8xx : not initializing due to lack of PCI BIOS,\n"
+ " try using memory, port, irq override instead.\n");
+ return -1;
+ }
+
+ if ((error = pcibios_read_config_word (bus, device_fn, PCI_VENDOR_ID,
+ &vendor_id)) ||
+ (error = pcibios_read_config_word (bus, device_fn, PCI_DEVICE_ID,
+ &device_id)) ||
+ (error = pcibios_read_config_word (bus, device_fn, PCI_COMMAND,
+ &command)) ||
+ (error = pcibios_read_config_dword (bus, device_fn,
+ PCI_BASE_ADDRESS_0, &io_port)) ||
+ (error = pcibios_read_config_dword (bus, device_fn,
+ PCI_BASE_ADDRESS_1, &base)) ||
+ (error = pcibios_read_config_byte (bus, device_fn, PCI_CLASS_REVISION,
+ &revision)) ||
+ (error = pcibios_read_config_byte (bus, device_fn, PCI_INTERRUPT_LINE,
+ &irq))) {
+ printk ("scsi-ncr53c7,8xx : error %s not initializing due to error reading configuration space\n"
+ " perhaps you specified an incorrect PCI bus, device, or function.\n"
+ , pcibios_strerror(error));
+ return -1;
+ }
+
+ /* If any one ever clones the NCR chips, this will have to change */
+
+ if (vendor_id != PCI_VENDOR_ID_NCR) {
+ printk ("scsi-ncr53c7,8xx : not initializing, 0x%04x is not NCR vendor ID\n",
+ (int) vendor_id);
+ return -1;
+ }
+
+
+ /*
+ * Bit 0 is the address space indicator and must be one for I/O
+ * space mappings, bit 1 is reserved, discard them after checking
+ * that they have the correct value of 1.
+ */
+
+ if (command & PCI_COMMAND_IO) {
+ if ((io_port & 3) != 1) {
+ printk ("scsi-ncr53c7,8xx : disabling I/O mapping since base address 0 (0x%x)\n"
+ " bits 0..1 indicate a non-IO mapping\n",
+ (unsigned) io_port);
+ io_port = 0;
+ } else
+ io_port &= PCI_BASE_ADDRESS_IO_MASK;
+ } else {
+ io_port = 0;
+ }
+
+ if (command & PCI_COMMAND_MEMORY) {
+ if ((base & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY) {
+ printk("scsi-ncr53c7,8xx : disabling memory mapping since base address 1\n"
+ " contains a non-memory mapping\n");
+ base = 0;
+ } else
+ base &= PCI_BASE_ADDRESS_MEM_MASK;
+ } else {
+ base = 0;
+ }
+
+ if (!io_port && !base) {
+ printk ("scsi-ncr53c7,8xx : not initializing, both I/O and memory mappings disabled\n");
+ return -1;
+ }
+
+ if (!(command & PCI_COMMAND_MASTER)) {
+ printk ("scsi-ncr53c7,8xx : not initializing, BUS MASTERING was disabled\n");
+ return -1;
+ }
+
+ for (i = 0; i < NPCI_CHIP_IDS; ++i) {
+ if (device_id == pci_chip_ids[i].pci_device_id) {
+ max_revision = pci_chip_ids[i].max_revision;
+ min_revision = pci_chip_ids[i].min_revision;
+ expected_chip = pci_chip_ids[i].chip;
+ }
+ if (chip == pci_chip_ids[i].chip)
+ expected_id = pci_chip_ids[i].pci_device_id;
+ }
+
+ if (chip && device_id != expected_id)
+ printk ("scsi-ncr53c7,8xx : warning : device id of 0x%04x doesn't\n"
+ " match expected 0x%04x\n",
+ (unsigned int) device_id, (unsigned int) expected_id );
+
+ if (max_revision != -1 && revision > max_revision)
+ printk ("scsi-ncr53c7,8xx : warning : revision of %d is greater than %d.\n",
+ (int) revision, max_revision);
+ else if (min_revision != -1 && revision < min_revision)
+ printk ("scsi-ncr53c7,8xx : warning : revision of %d is less than %d.\n",
+ (int) revision, min_revision);
+
+ if (io_port && check_region (io_port, 128)) {
+ printk ("scsi-ncr53c7,8xx : IO region 0x%x to 0x%x is in use\n",
+ (unsigned) io_port, (unsigned) io_port + 127);
+ return -1;
+ }
+
+ return normal_init (tpnt, board, chip, (int) base, io_port,
+ (int) irq, DMA_NONE, 1, bus, device_fn, options);
+}
+
+
+/*
+ * Function : int NCR53c7xx_detect(Scsi_Host_Template *tpnt)
+ *
+ * Purpose : detects and initializes NCR53c7,8x0 SCSI chips
+ * that were autoprobed, overridden on the LILO command line,
+ * or specified at compile time.
+ *
+ * Inputs : tpnt - template for this SCSI adapter
+ *
+ * Returns : number of host adapters detected
+ *
+ */
+
+int
+NCR53c7xx_detect(Scsi_Host_Template *tpnt) {
+ int i;
+ int current_override;
+ int count; /* Number of boards detected */
+ unsigned char pci_bus, pci_device_fn;
+ static short pci_index=0; /* Device index to PCI BIOS calls */
+
+#ifndef LINUX_1_2
+ tpnt->proc_dir = &proc_scsi_ncr53c7xx;
+#endif
+
+ for (current_override = count = 0; current_override < OVERRIDE_LIMIT;
+ ++current_override) {
+ if (overrides[current_override].pci ?
+ !ncr_pci_init (tpnt, overrides[current_override].board,
+ overrides[current_override].chip,
+ (unsigned char) overrides[current_override].data.pci.bus,
+ (((overrides[current_override].data.pci.device
+ << 3) & 0xf8)|(overrides[current_override].data.pci.function &
+ 7)), overrides[current_override].options):
+ !normal_init (tpnt, overrides[current_override].board,
+ overrides[current_override].chip,
+ overrides[current_override].data.normal.base,
+ overrides[current_override].data.normal.io_port,
+ overrides[current_override].data.normal.irq,
+ overrides[current_override].data.normal.dma,
+ 0 /* PCI data invalid */, 0 /* PCI bus place holder */,
+ 0 /* PCI device_function place holder */,
+ overrides[current_override].options)) {
+ ++count;
+ }
+ }
+
+ if (pcibios_present()) {
+ for (i = 0; i < NPCI_CHIP_IDS; ++i)
+ for (pci_index = 0;
+ !pcibios_find_device (PCI_VENDOR_ID_NCR,
+ pci_chip_ids[i].pci_device_id, pci_index, &pci_bus,
+ &pci_device_fn);
+ ++pci_index)
+ if (!ncr_pci_init (tpnt, BOARD_GENERIC, pci_chip_ids[i].chip,
+ pci_bus, pci_device_fn, /* no options */ 0))
+ ++count;
+ }
+ return count;
+}
+
+/* NCR53c810 and NCR53c820 script handling code */
+
+#include "53c8xx_d.h"
+#ifdef A_int_debug_sync
+#define DEBUG_SYNC_INTR A_int_debug_sync
+#endif
+static int NCR53c8xx_script_len = sizeof (SCRIPT);
+static int NCR53c8xx_dsa_len = A_dsa_end + Ent_dsa_zero - Ent_dsa_code_template;
+
+/*
+ * Function : static void NCR53c8x0_init_fixup (struct Scsi_Host *host)
+ *
+ * Purpose : copy and fixup the SCSI SCRIPTS(tm) code for this device.
+ *
+ * Inputs : host - pointer to this host adapter's structure
+ *
+ */
+
+static void
+NCR53c8x0_init_fixup (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned char tmp;
+ int i, ncr_to_memory, memory_to_ncr;
+ u32 base;
+ NCR53c7x0_local_setup(host);
+
+
+ /* XXX - NOTE : this code MUST be made endian aware */
+ /* Copy code into buffer that was allocated at detection time. */
+ memcpy ((void *) hostdata->script, (void *) SCRIPT,
+ sizeof(SCRIPT));
+ /* Fixup labels */
+ for (i = 0; i < PATCHES; ++i)
+ hostdata->script[LABELPATCHES[i]] +=
+ virt_to_bus(hostdata->script);
+ /* Fixup addresses of constants that used to be EXTERNAL */
+
+ patch_abs_32 (hostdata->script, 0, NCR53c7xx_msg_abort,
+ virt_to_bus(&(hostdata->NCR53c7xx_msg_abort)));
+ patch_abs_32 (hostdata->script, 0, NCR53c7xx_msg_reject,
+ virt_to_bus(&(hostdata->NCR53c7xx_msg_reject)));
+ patch_abs_32 (hostdata->script, 0, NCR53c7xx_zero,
+ virt_to_bus(&(hostdata->NCR53c7xx_zero)));
+ patch_abs_32 (hostdata->script, 0, NCR53c7xx_sink,
+ virt_to_bus(&(hostdata->NCR53c7xx_sink)));
+ patch_abs_32 (hostdata->script, 0, NOP_insn,
+ virt_to_bus(&(hostdata->NOP_insn)));
+ patch_abs_32 (hostdata->script, 0, schedule,
+ virt_to_bus((void *) hostdata->schedule));
+
+ /* Fixup references to external variables: */
+ for (i = 0; i < EXTERNAL_PATCHES_LEN; ++i)
+ hostdata->script[EXTERNAL_PATCHES[i].offset] +=
+ virt_to_bus(EXTERNAL_PATCHES[i].address);
+
+ /*
+ * Fixup absolutes set at boot-time.
+ *
+ * All non-code absolute variables suffixed with "dsa_" and "int_"
+ * are constants, and need no fixup provided the assembler has done
+ * it for us (I don't know what the "real" NCR assembler does in
+ * this case, my assembler does the right magic).
+ */
+
+ patch_abs_rwri_data (hostdata->script, 0, dsa_save_data_pointer,
+ Ent_dsa_code_save_data_pointer - Ent_dsa_zero);
+ patch_abs_rwri_data (hostdata->script, 0, dsa_restore_pointers,
+ Ent_dsa_code_restore_pointers - Ent_dsa_zero);
+ patch_abs_rwri_data (hostdata->script, 0, dsa_check_reselect,
+ Ent_dsa_code_check_reselect - Ent_dsa_zero);
+
+ /*
+ * Just for the hell of it, preserve the settings of
+ * Burst Length and Enable Read Line bits from the DMODE
+ * register. Make sure SCRIPTS start automagically.
+ */
+
+ tmp = NCR53c7x0_read8(DMODE_REG_10);
+ tmp &= (DMODE_800_ERL | DMODE_BL_MASK);
+
+ if (!(hostdata->options & OPTION_MEMORY_MAPPED)) {
+ base = (u32) host->io_port;
+ memory_to_ncr = tmp|DMODE_800_DIOM;
+ ncr_to_memory = tmp|DMODE_800_SIOM;
+ } else {
+ base = virt_to_bus(host->base);
+ memory_to_ncr = ncr_to_memory = tmp;
+ }
+
+ patch_abs_32 (hostdata->script, 0, addr_scratch, base + SCRATCHA_REG_800);
+ patch_abs_32 (hostdata->script, 0, addr_temp, base + TEMP_REG);
+
+ /*
+ * I needed some variables in the script to be accessible to
+ * both the NCR chip and the host processor. For these variables,
+ * I made the arbitrary decision to store them directly in the
+ * hostdata structure rather than in the RELATIVE area of the
+ * SCRIPTS.
+ */
+
+
+ patch_abs_rwri_data (hostdata->script, 0, dmode_memory_to_memory, tmp);
+ patch_abs_rwri_data (hostdata->script, 0, dmode_memory_to_ncr, memory_to_ncr);
+ patch_abs_rwri_data (hostdata->script, 0, dmode_ncr_to_memory, ncr_to_memory);
+
+ patch_abs_32 (hostdata->script, 0, msg_buf,
+ virt_to_bus((void *)&(hostdata->msg_buf)));
+ patch_abs_32 (hostdata->script, 0, reconnect_dsa_head,
+ virt_to_bus((void *)&(hostdata->reconnect_dsa_head)));
+ patch_abs_32 (hostdata->script, 0, addr_reconnect_dsa_head,
+ virt_to_bus((void *)&(hostdata->addr_reconnect_dsa_head)));
+ patch_abs_32 (hostdata->script, 0, reselected_identify,
+ virt_to_bus((void *)&(hostdata->reselected_identify)));
+/* reselected_tag is currently unused */
+#if 0
+ patch_abs_32 (hostdata->script, 0, reselected_tag,
+ virt_to_bus((void *)&(hostdata->reselected_tag)));
+#endif
+
+ patch_abs_32 (hostdata->script, 0, test_dest,
+ virt_to_bus((void*)&hostdata->test_dest));
+ patch_abs_32 (hostdata->script, 0, test_src,
+ virt_to_bus(&hostdata->test_source));
+
+ patch_abs_rwri_data (hostdata->script, 0, dsa_check_reselect,
+ (unsigned char)(Ent_dsa_code_check_reselect - Ent_dsa_zero));
+
+/* These are for event logging; the ncr_event enum contains the
+ actual interrupt numbers. */
+#ifdef A_int_EVENT_SELECT
+ patch_abs_32 (hostdata->script, 0, int_EVENT_SELECT, (u32) EVENT_SELECT);
+#endif
+#ifdef A_int_EVENT_DISCONNECT
+ patch_abs_32 (hostdata->script, 0, int_EVENT_DISCONNECT, (u32) EVENT_DISCONNECT);
+#endif
+#ifdef A_int_EVENT_RESELECT
+ patch_abs_32 (hostdata->script, 0, int_EVENT_RESELECT, (u32) EVENT_RESELECT);
+#endif
+#ifdef A_int_EVENT_COMPLETE
+ patch_abs_32 (hostdata->script, 0, int_EVENT_COMPLETE, (u32) EVENT_COMPLETE);
+#endif
+#ifdef A_int_EVENT_IDLE
+ patch_abs_32 (hostdata->script, 0, int_EVENT_IDLE, (u32) EVENT_IDLE);
+#endif
+#ifdef A_int_EVENT_SELECT_FAILED
+ patch_abs_32 (hostdata->script, 0, int_EVENT_SELECT_FAILED,
+ (u32) EVENT_SELECT_FAILED);
+#endif
+#ifdef A_int_EVENT_BEFORE_SELECT
+ patch_abs_32 (hostdata->script, 0, int_EVENT_BEFORE_SELECT,
+ (u32) EVENT_BEFORE_SELECT);
+#endif
+#ifdef A_int_EVENT_RESELECT_FAILED
+ patch_abs_32 (hostdata->script, 0, int_EVENT_RESELECT_FAILED,
+ (u32) EVENT_RESELECT_FAILED);
+#endif
+
+ /*
+ * Make sure the NCR and Linux code agree on the location of
+ * certain fields.
+ */
+
+ hostdata->E_accept_message = Ent_accept_message;
+ hostdata->E_command_complete = Ent_command_complete;
+ hostdata->E_cmdout_cmdout = Ent_cmdout_cmdout;
+ hostdata->E_data_transfer = Ent_data_transfer;
+ hostdata->E_debug_break = Ent_debug_break;
+ hostdata->E_dsa_code_template = Ent_dsa_code_template;
+ hostdata->E_dsa_code_template_end = Ent_dsa_code_template_end;
+ hostdata->E_end_data_transfer = Ent_end_data_transfer;
+ hostdata->E_initiator_abort = Ent_initiator_abort;
+ hostdata->E_msg_in = Ent_msg_in;
+ hostdata->E_other_transfer = Ent_other_transfer;
+ hostdata->E_other_in = Ent_other_in;
+ hostdata->E_other_out = Ent_other_out;
+ hostdata->E_reject_message = Ent_reject_message;
+ hostdata->E_respond_message = Ent_respond_message;
+ hostdata->E_select = Ent_select;
+ hostdata->E_select_msgout = Ent_select_msgout;
+ hostdata->E_target_abort = Ent_target_abort;
+#ifdef Ent_test_0
+ hostdata->E_test_0 = Ent_test_0;
+#endif
+ hostdata->E_test_1 = Ent_test_1;
+ hostdata->E_test_2 = Ent_test_2;
+#ifdef Ent_test_3
+ hostdata->E_test_3 = Ent_test_3;
+#endif
+ hostdata->E_wait_reselect = Ent_wait_reselect;
+ hostdata->E_dsa_code_begin = Ent_dsa_code_begin;
+
+ hostdata->dsa_cmdout = A_dsa_cmdout;
+ hostdata->dsa_cmnd = A_dsa_cmnd;
+ hostdata->dsa_datain = A_dsa_datain;
+ hostdata->dsa_dataout = A_dsa_dataout;
+ hostdata->dsa_end = A_dsa_end;
+ hostdata->dsa_msgin = A_dsa_msgin;
+ hostdata->dsa_msgout = A_dsa_msgout;
+ hostdata->dsa_msgout_other = A_dsa_msgout_other;
+ hostdata->dsa_next = A_dsa_next;
+ hostdata->dsa_select = A_dsa_select;
+ hostdata->dsa_start = Ent_dsa_code_template - Ent_dsa_zero;
+ hostdata->dsa_status = A_dsa_status;
+ hostdata->dsa_jump_dest = Ent_dsa_code_fix_jump - Ent_dsa_zero +
+ 8 /* destination operand */;
+
+ /* sanity check */
+ if (A_dsa_fields_start != Ent_dsa_code_template_end -
+ Ent_dsa_zero)
+ printk("scsi%d : NCR dsa_fields start is %d not %d\n",
+ host->host_no, A_dsa_fields_start, Ent_dsa_code_template_end -
+ Ent_dsa_zero);
+
+ printk("scsi%d : NCR code relocated to 0x%lx (virt 0x%p)\n", host->host_no,
+ virt_to_bus(hostdata->script), hostdata->script);
+}
+
+/*
+ * Function : static int NCR53c8xx_run_tests (struct Scsi_Host *host)
+ *
+ * Purpose : run various verification tests on the NCR chip,
+ * including interrupt generation, and proper bus mastering
+ * operation.
+ *
+ * Inputs : host - a properly initialized Scsi_Host structure
+ *
+ * Preconditions : the NCR chip must be in a halted state.
+ *
+ * Returns : 0 if all tests were successful, -1 on error.
+ *
+ */
+
+static int
+NCR53c8xx_run_tests (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long timeout;
+ u32 start;
+ int failed, i;
+ unsigned long flags;
+ NCR53c7x0_local_setup(host);
+
+ /* The NCR chip _must_ be idle to run the test scripts */
+
+ save_flags(flags);
+ cli();
+ if (!hostdata->idle) {
+ printk ("scsi%d : chip not idle, aborting tests\n", host->host_no);
+ restore_flags(flags);
+ return -1;
+ }
+
+ /*
+ * Check for functional interrupts, this could work as an
+ * autoprobe routine.
+ */
+
+ if ((hostdata->options & OPTION_DEBUG_TEST1) &&
+ hostdata->state != STATE_DISABLED) {
+ hostdata->idle = 0;
+ hostdata->test_running = 1;
+ hostdata->test_completed = -1;
+ hostdata->test_dest = 0;
+ hostdata->test_source = 0xdeadbeef;
+ start = virt_to_bus (hostdata->script) + hostdata->E_test_1;
+ hostdata->state = STATE_RUNNING;
+ printk ("scsi%d : test 1", host->host_no);
+ NCR53c7x0_write32 (DSP_REG, start);
+ printk (" started\n");
+ sti();
+
+ /*
+ * This is currently a .5 second timeout, since (in theory) no slow
+ * board will take that long. In practice, we've seen one
+ * pentium which ocassionally fails with this, but works with
+ * 10 times as much?
+ */
+
+ timeout = jiffies + 5 * HZ / 10;
+ while ((hostdata->test_completed == -1) && jiffies < timeout)
+ barrier();
+
+ failed = 1;
+ if (hostdata->test_completed == -1)
+ printk ("scsi%d : driver test 1 timed out%s\n",host->host_no ,
+ (hostdata->test_dest == 0xdeadbeef) ?
+ " due to lost interrupt.\n"
+ " Please verify that the correct IRQ is being used for your board,\n"
+ " and that the motherboard IRQ jumpering matches the PCI setup on\n"
+ " PCI systems.\n"
+ " If you are using a NCR53c810 board in a PCI system, you should\n"
+ " also verify that the board is jumpered to use PCI INTA, since\n"
+ " most PCI motherboards lack support for INTB, INTC, and INTD.\n"
+ : "");
+ else if (hostdata->test_completed != 1)
+ printk ("scsi%d : test 1 bad interrupt value (%d)\n",
+ host->host_no, hostdata->test_completed);
+ else
+ failed = (hostdata->test_dest != 0xdeadbeef);
+
+ if (hostdata->test_dest != 0xdeadbeef) {
+ printk ("scsi%d : driver test 1 read 0x%x instead of 0xdeadbeef indicating a\n"
+ " probable cache invalidation problem. Please configure caching\n"
+ " as write-through or disabled\n",
+ host->host_no, hostdata->test_dest);
+ }
+
+ if (failed) {
+ printk ("scsi%d : DSP = 0x%p (script at 0x%p, start at 0x%x)\n",
+ host->host_no, bus_to_virt(NCR53c7x0_read32(DSP_REG)),
+ hostdata->script, start);
+ printk ("scsi%d : DSPS = 0x%x\n", host->host_no,
+ NCR53c7x0_read32(DSPS_REG));
+ restore_flags(flags);
+ return -1;
+ }
+ hostdata->test_running = 0;
+ }
+
+ if ((hostdata->options & OPTION_DEBUG_TEST2) &&
+ hostdata->state != STATE_DISABLED) {
+ u32 dsa[48];
+ unsigned char identify = IDENTIFY(0, 0);
+ unsigned char cmd[6];
+ unsigned char data[36];
+ unsigned char status = 0xff;
+ unsigned char msg = 0xff;
+
+ cmd[0] = INQUIRY;
+ cmd[1] = cmd[2] = cmd[3] = cmd[5] = 0;
+ cmd[4] = sizeof(data);
+
+ dsa[2] = 1;
+ dsa[3] = virt_to_bus(&identify);
+ dsa[4] = 6;
+ dsa[5] = virt_to_bus(&cmd);
+ dsa[6] = sizeof(data);
+ dsa[7] = virt_to_bus(&data);
+ dsa[8] = 1;
+ dsa[9] = virt_to_bus(&status);
+ dsa[10] = 1;
+ dsa[11] = virt_to_bus(&msg);
+
+ for (i = 0; i < 3; ++i) {
+ cli();
+ if (!hostdata->idle) {
+ printk ("scsi%d : chip not idle, aborting tests\n", host->host_no);
+ restore_flags(flags);
+ return -1;
+ }
+
+ /* SCNTL3 SDID */
+ dsa[0] = (0x33 << 24) | (i << 16) ;
+ hostdata->idle = 0;
+ hostdata->test_running = 2;
+ hostdata->test_completed = -1;
+ start = virt_to_bus(hostdata->script) + hostdata->E_test_2;
+ hostdata->state = STATE_RUNNING;
+ NCR53c7x0_write32 (DSA_REG, virt_to_bus(dsa));
+ NCR53c7x0_write32 (DSP_REG, start);
+ sti();
+
+ timeout = jiffies + 5 * HZ; /* arbitrary */
+ while ((hostdata->test_completed == -1) && jiffies < timeout)
+ barrier();
+ NCR53c7x0_write32 (DSA_REG, 0);
+
+ if (hostdata->test_completed == 2) {
+ data[35] = 0;
+ printk ("scsi%d : test 2 INQUIRY to target %d, lun 0 : %s\n",
+ host->host_no, i, data + 8);
+ printk ("scsi%d : status ", host->host_no);
+ print_status (status);
+ printk ("\nscsi%d : message ", host->host_no);
+ print_msg (&msg);
+ printk ("\n");
+ } else if (hostdata->test_completed == 3) {
+ printk("scsi%d : test 2 no connection with target %d\n",
+ host->host_no, i);
+ if (!hostdata->idle) {
+ printk("scsi%d : not idle\n", host->host_no);
+ restore_flags(flags);
+ return -1;
+ }
+ } else if (hostdata->test_completed == -1) {
+ printk ("scsi%d : test 2 timed out\n", host->host_no);
+ restore_flags(flags);
+ return -1;
+ }
+ hostdata->test_running = 0;
+ }
+ }
+
+ restore_flags(flags);
+ return 0;
+}
+
+/*
+ * Function : static void NCR53c8xx_dsa_fixup (struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : copy the NCR53c8xx dsa structure into cmd's dsa buffer,
+ * performing all necessary relocation.
+ *
+ * Inputs : cmd, a NCR53c7x0_cmd structure with a dsa area large
+ * enough to hold the NCR53c8xx dsa.
+ */
+
+static void
+NCR53c8xx_dsa_fixup (struct NCR53c7x0_cmd *cmd) {
+ Scsi_Cmnd *c = cmd->cmd;
+ struct Scsi_Host *host = c->host;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int i;
+
+ memcpy (cmd->dsa, hostdata->script + (hostdata->E_dsa_code_template / 4),
+ hostdata->E_dsa_code_template_end - hostdata->E_dsa_code_template);
+
+ /*
+ * Note : within the NCR 'C' code, dsa points to the _start_
+ * of the DSA structure, and _not_ the offset of dsa_zero within
+ * that structure used to facilitate shorter signed offsets
+ * for the 8 bit ALU.
+ *
+ * The implications of this are that
+ *
+ * - 32 bit A_dsa_* absolute values require an additional
+ * dsa_zero added to their value to be correct, since they are
+ * relative to dsa_zero which is in essentially a separate
+ * space from the code symbols.
+ *
+ * - All other symbols require no special treatment.
+ */
+
+ patch_abs_tci_data (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_lun, c->lun);
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_addr_next, virt_to_bus(&cmd->dsa_next_addr));
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_next, virt_to_bus(cmd->dsa) + Ent_dsa_zero -
+ Ent_dsa_code_template + A_dsa_next);
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_sync, virt_to_bus((void *)hostdata->sync[c->target].script));
+ patch_abs_tci_data (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_target, c->target);
+ /* XXX - new pointer stuff */
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_addr_saved_pointer, virt_to_bus(&cmd->saved_data_pointer));
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_addr_saved_residual, virt_to_bus(&cmd->saved_residual));
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_addr_residual, virt_to_bus(&cmd->residual));
+
+ /* XXX - new start stuff */
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_addr_dsa_value, virt_to_bus(&cmd->dsa_addr));
+
+}
+
+/*
+ * Function : run_process_issue_queue (void)
+ *
+ * Purpose : insure that the coroutine is running and will process our
+ * request. process_issue_queue_running is checked/set here (in an
+ * inline function) rather than in process_issue_queue itself to reduce
+ * the chances of stack overflow.
+ *
+ */
+
+static volatile int process_issue_queue_running = 0;
+
+static __inline__ void
+run_process_issue_queue(void) {
+ unsigned long flags;
+ save_flags (flags);
+ cli();
+ if (!process_issue_queue_running) {
+ process_issue_queue_running = 1;
+ process_issue_queue(flags);
+ /*
+ * process_issue_queue_running is cleared in process_issue_queue
+ * once it can't do more work, and process_issue_queue exits with
+ * interrupts disabled.
+ */
+ }
+ restore_flags (flags);
+}
+
+/*
+ * Function : static void abnormal_finished (struct NCR53c7x0_cmd *cmd, int
+ * result)
+ *
+ * Purpose : mark SCSI command as finished, OR'ing the host portion
+ * of the result word into the result field of the corresponding
+ * Scsi_Cmnd structure, and removing it from the internal queues.
+ *
+ * Inputs : cmd - command, result - entire result field
+ *
+ * Preconditions : the NCR chip should be in a halted state when
+ * abnormal_finished is run, since it modifies structures which
+ * the NCR expects to have exclusive access to.
+ */
+
+static void
+abnormal_finished (struct NCR53c7x0_cmd *cmd, int result) {
+ Scsi_Cmnd *c = cmd->cmd;
+ struct Scsi_Host *host = c->host;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long flags;
+ int left, found;
+ volatile struct NCR53c7x0_cmd * linux_search;
+ volatile struct NCR53c7x0_cmd * volatile *linux_prev;
+ volatile u32 *ncr_prev, *current, ncr_search;
+
+#if 0
+ printk ("scsi%d: abnormal finished\n", host->host_no);
+#endif
+
+ save_flags(flags);
+ cli();
+ found = 0;
+ /*
+ * Traverse the NCR issue array until we find a match or run out
+ * of instructions. Instructions in the NCR issue array are
+ * either JUMP or NOP instructions, which are 2 words in length.
+ */
+
+
+ for (found = 0, left = host->can_queue, current = hostdata->schedule;
+ left > 0; --left, current += 2)
+ {
+ if (issue_to_cmd (host, hostdata, (u32 *) current) == cmd)
+ {
+ current[0] = hostdata->NOP_insn;
+ current[1] = 0xdeadbeef;
+ ++found;
+ break;
+ }
+ }
+
+ /*
+ * Traverse the NCR reconnect list of DSA structures until we find
+ * a pointer to this dsa or have found too many command structures.
+ * We let prev point at the next field of the previous element or
+ * head of the list, so we don't do anything different for removing
+ * the head element.
+ */
+
+ for (left = host->can_queue,
+ ncr_search = hostdata->reconnect_dsa_head,
+ ncr_prev = &hostdata->reconnect_dsa_head;
+ left >= 0 && ncr_search &&
+ ((char*)bus_to_virt(ncr_search) + hostdata->dsa_start)
+ != (char *) cmd->dsa;
+ ncr_prev = (u32*) ((char*)bus_to_virt(ncr_search) +
+ hostdata->dsa_next), ncr_search = *ncr_prev, --left);
+
+ if (left < 0)
+ printk("scsi%d: loop detected in ncr reonncect list\n",
+ host->host_no);
+ else if (ncr_search)
+ if (found)
+ printk("scsi%d: scsi %ld in ncr issue array and reconnect lists\n",
+ host->host_no, c->pid);
+ else {
+ volatile u32 * next = (u32 *)
+ ((char *)bus_to_virt(ncr_search) + hostdata->dsa_next);
+ *ncr_prev = *next;
+/* If we're at the tail end of the issue queue, update that pointer too. */
+ found = 1;
+ }
+
+ /*
+ * Traverse the host running list until we find this command or discover
+ * we have too many elements, pointing linux_prev at the next field of the
+ * linux_previous element or head of the list, search at this element.
+ */
+
+ for (left = host->can_queue, linux_search = hostdata->running_list,
+ linux_prev = &hostdata->running_list;
+ left >= 0 && linux_search && linux_search != cmd;
+ linux_prev = &(linux_search->next),
+ linux_search = linux_search->next, --left);
+
+ if (left < 0)
+ printk ("scsi%d: loop detected in host running list for scsi pid %ld\n",
+ host->host_no, c->pid);
+ else if (linux_search) {
+ *linux_prev = linux_search->next;
+ --hostdata->busy[c->target][c->lun];
+ }
+
+ /* Return the NCR command structure to the free list */
+ cmd->next = hostdata->free;
+ hostdata->free = cmd;
+ c->host_scribble = NULL;
+
+ /* And return */
+ c->result = result;
+ c->scsi_done(c);
+
+ restore_flags(flags);
+ run_process_issue_queue();
+}
+
+/*
+ * Function : static void intr_break (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : Handler for breakpoint interrupts from a SCSI script
+ *
+ * Inputs : host - pointer to this host adapter's structure,
+ * cmd - pointer to the command (if any) dsa was pointing
+ * to.
+ *
+ */
+
+static void
+intr_break (struct Scsi_Host *host, struct
+ NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_break *bp;
+#if 0
+ Scsi_Cmnd *c = cmd ? cmd->cmd : NULL;
+#endif
+ u32 *dsp;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long flags;
+ NCR53c7x0_local_setup(host);
+
+ /*
+ * Find the break point corresponding to this address, and
+ * dump the appropriate debugging information to standard
+ * output.
+ */
+ save_flags(flags);
+ cli();
+ dsp = (u32 *) bus_to_virt(NCR53c7x0_read32(DSP_REG));
+ for (bp = hostdata->breakpoints; bp && bp->address != dsp;
+ bp = bp->next);
+ if (!bp)
+ panic("scsi%d : break point interrupt from %p with no breakpoint!",
+ host->host_no, dsp);
+
+ /*
+ * Configure the NCR chip for manual start mode, so that we can
+ * point the DSP register at the instruction that follows the
+ * INT int_debug_break instruction.
+ */
+
+ NCR53c7x0_write8 (hostdata->dmode,
+ NCR53c7x0_read8(hostdata->dmode)|DMODE_MAN);
+
+ /*
+ * And update the DSP register, using the size of the old
+ * instruction in bytes.
+ */
+
+ restore_flags(flags);
+}
+/*
+ * Function : static void print_synchronous (const char *prefix,
+ * const unsigned char *msg)
+ *
+ * Purpose : print a pretty, user and machine parsable representation
+ * of a SDTR message, including the "real" parameters, data
+ * clock so we can tell transfer rate at a glance.
+ *
+ * Inputs ; prefix - text to prepend, msg - SDTR message (5 bytes)
+ */
+
+static void
+print_synchronous (const char *prefix, const unsigned char *msg) {
+ if (msg[4]) {
+ int Hz = 1000000000 / (msg[3] * 4);
+ int integer = Hz / 1000000;
+ int fraction = (Hz - (integer * 1000000)) / 10000;
+ printk ("%speriod %dns offset %d %d.%02dMHz %s SCSI%s\n",
+ prefix, (int) msg[3] * 4, (int) msg[4], integer, fraction,
+ (((msg[3] * 4) < 200) ? "FAST" : "synchronous"),
+ (((msg[3] * 4) < 200) ? "-II" : ""));
+ } else
+ printk ("%sasynchronous SCSI\n", prefix);
+}
+
+/*
+ * Function : static void set_synchronous (struct Scsi_Host *host,
+ * int target, int sxfer, int scntl3, int now_connected)
+ *
+ * Purpose : reprogram transfers between the selected SCSI initiator and
+ * target with the given register values; in the indirect
+ * select operand, reselection script, and chip registers.
+ *
+ * Inputs : host - NCR53c7,8xx SCSI host, target - number SCSI target id,
+ * sxfer and scntl3 - NCR registers. now_connected - if non-zero,
+ * we should reprogram the registers now too.
+ */
+
+static void
+set_synchronous (struct Scsi_Host *host, int target, int sxfer, int scntl3,
+ int now_connected) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ u32 *script;
+ NCR53c7x0_local_setup(host);
+
+ /* These are eight bit registers */
+ sxfer &= 0xff;
+ scntl3 &= 0xff;
+
+ hostdata->sync[target].sxfer_sanity = sxfer;
+ hostdata->sync[target].scntl3_sanity = scntl3;
+
+/*
+ * HARD CODED : synchronous script is EIGHT words long. This
+ * must agree with 53c7.8xx.h
+ */
+
+ if ((hostdata->chip != 700) && (hostdata->chip != 70066)) {
+ hostdata->sync[target].select_indirect = (scntl3 << 24) |
+ (target << 16) | (sxfer << 8);
+
+ script = (u32 *) hostdata->sync[target].script;
+
+ /* XXX - add NCR53c7x0 code to reprogram SCF bits if we want to */
+ if ((hostdata->chip / 100) == 8) {
+ script[0] = ((DCMD_TYPE_RWRI | DCMD_RWRI_OPC_MODIFY |
+ DCMD_RWRI_OP_MOVE) << 24) |
+ (SCNTL3_REG_800 << 16) | (scntl3 << 8);
+ script[1] = 0;
+ script += 2;
+ }
+
+ script[0] = ((DCMD_TYPE_RWRI | DCMD_RWRI_OPC_MODIFY |
+ DCMD_RWRI_OP_MOVE) << 24) |
+ (SXFER_REG << 16) | (sxfer << 8);
+ script[1] = 0;
+ script += 2;
+
+#ifdef DEBUG_SYNC_INTR
+ if (hostdata->options & OPTION_DEBUG_DISCONNECT) {
+ script[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_INT) << 24) | DBC_TCI_TRUE;
+ script[1] = DEBUG_SYNC_INTR;
+ script += 2;
+ }
+#endif
+
+ script[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_RETURN) << 24) | DBC_TCI_TRUE;
+ script[1] = 0;
+ script += 2;
+ }
+
+ if (hostdata->options & OPTION_DEBUG_SYNCHRONOUS)
+ printk ("scsi%d : target %d sync parameters are sxfer=0x%x, scntl3=0x%x\n",
+ host->host_no, target, sxfer, scntl3);
+
+ if (now_connected) {
+ if ((hostdata->chip / 100) == 8)
+ NCR53c7x0_write8(SCNTL3_REG_800, scntl3);
+ NCR53c7x0_write8(SXFER_REG, sxfer);
+ }
+}
+
+
+/*
+ * Function : static int asynchronous (struct Scsi_Host *host, int target)
+ *
+ * Purpose : reprogram between the selected SCSI Host adapter and target
+ * (assumed to be currently connected) for asynchronous transfers.
+ *
+ * Inputs : host - SCSI host structure, target - numeric target ID.
+ *
+ * Preconditions : the NCR chip should be in one of the halted states
+ */
+
+static void
+asynchronous (struct Scsi_Host *host, int target) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ NCR53c7x0_local_setup(host);
+ set_synchronous (host, target, /* no offset */ 0, hostdata->saved_scntl3,
+ 1);
+ printk ("scsi%d : setting target %d to asynchronous SCSI\n",
+ host->host_no, target);
+}
+
+/*
+ * XXX - do we want to go out of our way (ie, add extra code to selection
+ * in the NCR53c710/NCR53c720 script) to reprogram the synchronous
+ * conversion bits, or can we be content in just setting the
+ * sxfer bits?
+ */
+
+/* Table for NCR53c8xx synchronous values */
+static const struct {
+ int div; /* Total clock divisor * 10 */
+ unsigned char scf; /* */
+ unsigned char tp; /* 4 + tp = xferp divisor */
+} syncs[] = {
+/* div scf tp div scf tp div scf tp */
+ { 40, 1, 0}, { 50, 1, 1}, { 60, 1, 2},
+ { 70, 1, 3}, { 75, 2, 1}, { 80, 1, 4},
+ { 90, 1, 5}, { 100, 1, 6}, { 105, 2, 3},
+ { 110, 1, 7}, { 120, 2, 4}, { 135, 2, 5},
+ { 140, 3, 3}, { 150, 2, 6}, { 160, 3, 4},
+ { 165, 2, 7}, { 180, 3, 5}, { 200, 3, 6},
+ { 210, 4, 3}, { 220, 3, 7}, { 240, 4, 4},
+ { 270, 4, 5}, { 300, 4, 6}, { 330, 4, 7}
+};
+
+/*
+ * Function : static void synchronous (struct Scsi_Host *host, int target,
+ * char *msg)
+ *
+ * Purpose : reprogram transfers between the selected SCSI initiator and
+ * target for synchronous SCSI transfers such that the synchronous
+ * offset is less than that requested and period at least as long
+ * as that requested. Also modify *msg such that it contains
+ * an appropriate response.
+ *
+ * Inputs : host - NCR53c7,8xx SCSI host, target - number SCSI target id,
+ * msg - synchronous transfer request.
+ */
+
+
+static void
+synchronous (struct Scsi_Host *host, int target, char *msg) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int desire, divisor, i, limit;
+ unsigned char scntl3, sxfer;
+/* The diagnostic message fits on one line, even with max. width integers */
+ char buf[80];
+
+/* Desired transfer clock in Hz */
+ desire = 1000000000L / (msg[3] * 4);
+/* Scale the available SCSI clock by 10 so we get tenths */
+ divisor = (hostdata->scsi_clock * 10) / desire;
+
+/* NCR chips can handle at most an offset of 8 */
+ if (msg[4] > 8)
+ msg[4] = 8;
+
+ if (hostdata->options & OPTION_DEBUG_SDTR)
+ printk("scsi%d : optimal synchronous divisor of %d.%01d\n",
+ host->host_no, divisor / 10, divisor % 10);
+
+ limit = (sizeof(syncs) / sizeof(syncs[0]) -1);
+ for (i = 0; (i < limit) && (divisor > syncs[i].div); ++i);
+
+ if (hostdata->options & OPTION_DEBUG_SDTR)
+ printk("scsi%d : selected synchronous divisor of %d.%01d\n",
+ host->host_no, syncs[i].div / 10, syncs[i].div % 10);
+
+ msg[3] = ((1000000000L / hostdata->scsi_clock) * syncs[i].div / 10 / 4);
+
+ if (hostdata->options & OPTION_DEBUG_SDTR)
+ printk("scsi%d : selected synchronous period of %dns\n", host->host_no,
+ msg[3] * 4);
+
+ scntl3 = (hostdata->chip / 100 == 8) ? ((hostdata->saved_scntl3 &
+ ~SCNTL3_800_SCF_MASK) | (syncs[i].scf << SCNTL3_800_SCF_SHIFT)) : 0;
+ sxfer = (msg[4] << SXFER_MO_SHIFT) | ((syncs[i].tp) << SXFER_TP_SHIFT);
+ if (hostdata->options & OPTION_DEBUG_SDTR)
+ printk ("scsi%d : sxfer=0x%x scntl3=0x%x\n",
+ host->host_no, (int) sxfer, (int) scntl3);
+ set_synchronous (host, target, sxfer, scntl3, 1);
+ sprintf (buf, "scsi%d : setting target %d to ", host->host_no, target);
+ print_synchronous (buf, msg);
+}
+
+/*
+ * Function : static int NCR53c8x0_dstat_sir_intr (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : Handler for INT generated instructions for the
+ * NCR53c810/820 SCSI SCRIPT
+ *
+ * Inputs : host - pointer to this host adapter's structure,
+ * cmd - pointer to the command (if any) dsa was pointing
+ * to.
+ *
+ */
+
+static int
+NCR53c8x0_dstat_sir_intr (struct Scsi_Host *host, struct
+ NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ int print;
+ Scsi_Cmnd *c = cmd ? cmd->cmd : NULL;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ u32 dsps,*dsp; /* Argument of the INT instruction */
+ NCR53c7x0_local_setup(host);
+ dsps = NCR53c7x0_read32(DSPS_REG);
+ dsp = (u32 *) bus_to_virt(NCR53c7x0_read32(DSP_REG));
+
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : DSPS = 0x%x\n", host->host_no, dsps);
+
+ switch (dsps) {
+ case A_int_msg_1:
+ print = 1;
+ switch (hostdata->msg_buf[0]) {
+ /*
+ * Unless we've initiated synchronous negotiation, I don't
+ * think that this should happen.
+ */
+ case MESSAGE_REJECT:
+ hostdata->dsp = hostdata->script + hostdata->E_accept_message /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ if (cmd && (cmd->flags & CMD_FLAG_SDTR)) {
+ printk ("scsi%d : target %d rejected SDTR\n", host->host_no,
+ c->target);
+ cmd->flags &= ~CMD_FLAG_SDTR;
+ asynchronous (host, c->target);
+ print = 0;
+ }
+ break;
+ case INITIATE_RECOVERY:
+ printk ("scsi%d : extended contingent allegiance not supported yet, rejecting\n",
+ host->host_no);
+ /* Fall through to default */
+ hostdata->dsp = hostdata->script + hostdata->E_reject_message /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ break;
+ default:
+ printk ("scsi%d : unsupported message, resjecting\n",
+ host->host_no);
+ hostdata->dsp = hostdata->script + hostdata->E_reject_message /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ }
+ if (print) {
+ printk ("scsi%d : received message", host->host_no);
+ if (c)
+ printk (" from target %d lun %d ", c->target, c->lun);
+ print_msg ((unsigned char *) hostdata->msg_buf);
+ printk("\n");
+ }
+
+ return SPECIFIC_INT_NOTHING;
+
+
+ case A_int_msg_sdtr:
+/*
+ * At this point, hostdata->msg_buf contains
+ * 0 EXTENDED MESSAGE
+ * 1 length
+ * 2 SDTR
+ * 3 period * 4ns
+ * 4 offset
+ */
+
+ if (cmd) {
+ char buf[80];
+ sprintf (buf, "scsi%d : target %d %s ", host->host_no, c->target,
+ (cmd->flags & CMD_FLAG_SDTR) ? "accepting" : "requesting");
+ print_synchronous (buf, (unsigned char *) hostdata->msg_buf);
+
+ /*
+ * Initiator initiated, won't happen unless synchronous
+ * transfers are enabled. If we get a SDTR message in
+ * response to our SDTR, we should program our parameters
+ * such that
+ * offset <= requested offset
+ * period >= requested period
+ */
+ if (cmd->flags & CMD_FLAG_SDTR) {
+ cmd->flags &= ~CMD_FLAG_SDTR;
+ if (hostdata->msg_buf[4])
+ synchronous (host, c->target, (unsigned char *)
+ hostdata->msg_buf);
+ else
+ asynchronous (host, c->target);
+ hostdata->dsp = hostdata->script + hostdata->E_accept_message /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ return SPECIFIC_INT_NOTHING;
+ } else {
+ if (hostdata->options & OPTION_SYNCHRONOUS) {
+ cmd->flags |= CMD_FLAG_DID_SDTR;
+ synchronous (host, c->target, (unsigned char *)
+ hostdata->msg_buf);
+ } else {
+ hostdata->msg_buf[4] = 0; /* 0 offset = async */
+ asynchronous (host, c->target);
+ }
+ patch_dsa_32 (cmd->dsa, dsa_msgout_other, 0, 5);
+ patch_dsa_32 (cmd->dsa, dsa_msgout_other, 1, (u32)
+ virt_to_bus ((void *)&hostdata->msg_buf));
+ hostdata->dsp = hostdata->script +
+ hostdata->E_respond_message / sizeof(u32);
+ hostdata->dsp_changed = 1;
+ }
+ return SPECIFIC_INT_NOTHING;
+ }
+ /* Fall through to abort if we couldn't find a cmd, and
+ therefore a dsa structure to twiddle */
+ case A_int_msg_wdtr:
+ hostdata->dsp = hostdata->script + hostdata->E_reject_message /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ return SPECIFIC_INT_NOTHING;
+ case A_int_err_unexpected_phase:
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : unexpected phase\n", host->host_no);
+ return SPECIFIC_INT_ABORT;
+ case A_int_err_selected:
+ printk ("scsi%d : selected by target %d\n", host->host_no,
+ (int) NCR53c7x0_read8(SDID_REG_800) &7);
+ hostdata->dsp = hostdata->script + hostdata->E_target_abort /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ return SPECIFIC_INT_NOTHING;
+ case A_int_err_unexpected_reselect:
+ printk ("scsi%d : unexpected reselect by target %d lun %d\n",
+ host->host_no, (int) NCR53c7x0_read8(SDID_REG_800) & 7,
+ hostdata->reselected_identify & 7);
+ hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ return SPECIFIC_INT_NOTHING;
+/*
+ * Since contingent allegiance conditions are cleared by the next
+ * command issued to a target, we must issue a REQUEST SENSE
+ * command after receiving a CHECK CONDITION status, before
+ * another command is issued.
+ *
+ * Since this NCR53c7x0_cmd will be freed after use, we don't
+ * care if we step on the various fields, so modify a few things.
+ */
+ case A_int_err_check_condition:
+#if 0
+ if (hostdata->options & OPTION_DEBUG_INTR)
+#endif
+ printk ("scsi%d : CHECK CONDITION\n", host->host_no);
+ if (!c) {
+ printk("scsi%d : CHECK CONDITION with no SCSI command\n",
+ host->host_no);
+ return SPECIFIC_INT_PANIC;
+ }
+
+ /*
+ * FIXME : this uses the normal one-byte selection message.
+ * We may want to renegotiate for synchronous & WIDE transfers
+ * since these could be the crux of our problem.
+ *
+ hostdata->NOP_insn* FIXME : once SCSI-II tagged queuing is implemented, we'll
+ * have to set this up so that the rest of the DSA
+ * agrees with this being an untagged queue'd command.
+ */
+
+ patch_dsa_32 (cmd->dsa, dsa_msgout, 0, 1);
+
+ /*
+ * Modify the table indirect for COMMAND OUT phase, since
+ * Request Sense is a six byte command.
+ */
+
+ patch_dsa_32 (cmd->dsa, dsa_cmdout, 0, 6);
+
+ c->cmnd[0] = REQUEST_SENSE;
+ c->cmnd[1] &= 0xe0; /* Zero all but LUN */
+ c->cmnd[2] = 0;
+ c->cmnd[3] = 0;
+ c->cmnd[4] = sizeof(c->sense_buffer);
+ c->cmnd[5] = 0;
+
+ /*
+ * Disable dataout phase, and program datain to transfer to the
+ * sense buffer, and add a jump to other_transfer after the
+ * command so overflow/underrun conditions are detected.
+ */
+
+ patch_dsa_32 (cmd->dsa, dsa_dataout, 0,
+ virt_to_bus(hostdata->script) + hostdata->E_other_transfer);
+ patch_dsa_32 (cmd->dsa, dsa_datain, 0,
+ virt_to_bus(cmd->data_transfer_start));
+ cmd->data_transfer_start[0] = (((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I |
+ DCMD_BMI_IO)) << 24) | sizeof(c->sense_buffer);
+ cmd->data_transfer_start[1] = (u32) virt_to_bus(c->sense_buffer);
+
+ cmd->data_transfer_start[2] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP)
+ << 24) | DBC_TCI_TRUE;
+ cmd->data_transfer_start[3] = (u32) virt_to_bus(hostdata->script) +
+ hostdata->E_other_transfer;
+
+ /*
+ * Currently, this command is flagged as completed, ie
+ * it has valid status and message data. Reflag it as
+ * incomplete. Q - need to do something so that original
+ * status, etc are used.
+ */
+
+ cmd->cmd->result = 0xffff;
+
+ /*
+ * Restart command as a REQUEST SENSE.
+ */
+ hostdata->dsp = (u32 *) hostdata->script + hostdata->E_select /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ return SPECIFIC_INT_NOTHING;
+ case A_int_debug_break:
+ return SPECIFIC_INT_BREAK;
+ case A_int_norm_aborted:
+ hostdata->dsp = (u32 *) hostdata->schedule;
+ hostdata->dsp_changed = 1;
+ if (cmd)
+ abnormal_finished (cmd, DID_ERROR << 16);
+ return SPECIFIC_INT_NOTHING;
+ case A_int_test_1:
+ case A_int_test_2:
+ hostdata->idle = 1;
+ hostdata->test_completed = (dsps - A_int_test_1) / 0x00010000 + 1;
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk("scsi%d : test%d complete\n", host->host_no,
+ hostdata->test_completed);
+ return SPECIFIC_INT_NOTHING;
+#ifdef A_int_debug_reselected_ok
+ case A_int_debug_reselected_ok:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT)) {
+ /*
+ * Note - this dsa is not based on location relative to
+ * the command structure, but to location relative to the
+ * DSA register
+ */
+ u32 *dsa;
+ dsa = (u32 *) bus_to_virt (NCR53c7x0_read32(DSA_REG));
+
+ printk("scsi%d : reselected_ok (DSA = 0x%x (virt 0x%p)\n",
+ host->host_no, NCR53c7x0_read32(DSA_REG), dsa);
+ printk("scsi%d : resume address is 0x%x (virt 0x%p)\n",
+ host->host_no, cmd->saved_data_pointer,
+ bus_to_virt(cmd->saved_data_pointer));
+ print_insn (host, hostdata->script + Ent_reselected_ok /
+ sizeof(u32), "", 1);
+ printk ("scsi%d : sxfer=0x%x, scntl3=0x%x\n",
+ host->host_no, NCR53c7x0_read8(SXFER_REG),
+ NCR53c7x0_read8(SCNTL3_REG_800));
+ if (c) {
+ print_insn (host, (u32 *)
+ hostdata->sync[c->target].script, "", 1);
+ print_insn (host, (u32 *)
+ hostdata->sync[c->target].script + 2, "", 1);
+ }
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_reselect_check
+ case A_int_debug_reselect_check:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ u32 *dsa;
+#if 0
+ u32 *code;
+#endif
+ /*
+ * Note - this dsa is not based on location relative to
+ * the command structure, but to location relative to the
+ * DSA register
+ */
+ dsa = bus_to_virt (NCR53c7x0_read32(DSA_REG));
+ printk("scsi%d : reselected_check_next (DSA = 0x%lx (virt 0x%p))\n",
+ host->host_no, virt_to_bus(dsa), dsa);
+ if (dsa) {
+ printk("scsi%d : resume address is 0x%x (virt 0x%p)\n",
+ host->host_no, cmd->saved_data_pointer,
+ bus_to_virt (cmd->saved_data_pointer));
+#if 0
+ printk("scsi%d : template code :\n", host->host_no);
+ for (code = dsa + (Ent_dsa_code_check_reselect - Ent_dsa_zero)
+ / sizeof(u32); code < (dsa + Ent_dsa_zero / sizeof(u32));
+ code += print_insn (host, code, "", 1));
+#endif
+ }
+ print_insn (host, hostdata->script + Ent_reselected_ok /
+ sizeof(u32), "", 1);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_dsa_schedule
+ case A_int_debug_dsa_schedule:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ u32 *dsa;
+ /*
+ * Note - this dsa is not based on location relative to
+ * the command structure, but to location relative to the
+ * DSA register
+ */
+ dsa = (u32 *) bus_to_virt (NCR53c7x0_read32(DSA_REG));
+ printk("scsi%d : dsa_schedule (old DSA = 0x%lx (virt 0x%p))\n",
+ host->host_no, virt_to_bus(dsa), dsa);
+ if (dsa)
+ printk("scsi%d : resume address is 0x%x (virt 0x%p)\n"
+ " (temp was 0x%x (virt 0x%p))\n",
+ host->host_no, cmd->saved_data_pointer,
+ bus_to_virt (cmd->saved_data_pointer),
+ NCR53c7x0_read32 (TEMP_REG),
+ bus_to_virt (NCR53c7x0_read32(TEMP_REG)));
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_scheduled
+ case A_int_debug_scheduled:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ printk("scsi%d : new I/O 0x%x (virt 0x%p) scheduled\n",
+ host->host_no, NCR53c7x0_read32(DSA_REG),
+ bus_to_virt(NCR53c7x0_read32(DSA_REG)));
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_idle
+ case A_int_debug_idle:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ printk("scsi%d : idle\n", host->host_no);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_cmd
+ case A_int_debug_cmd:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ printk("scsi%d : command sent\n");
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_dsa_loaded
+ case A_int_debug_dsa_loaded:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ printk("scsi%d : DSA loaded with 0x%x (virt 0x%p)\n", host->host_no,
+ NCR53c7x0_read32(DSA_REG),
+ bus_to_virt(NCR53c7x0_read32(DSA_REG)));
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_reselected
+ case A_int_debug_reselected:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT)) {
+ printk("scsi%d : reselected by target %d lun %d\n",
+ host->host_no, (int) NCR53c7x0_read8(SDID_REG_800) & ~0x80,
+ (int) hostdata->reselected_identify & 7);
+ print_queues(host);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_disconnect_msg
+ case A_int_debug_disconnect_msg:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ if (c)
+ printk("scsi%d : target %d lun %d disconnecting\n",
+ host->host_no, c->target, c->lun);
+ else
+ printk("scsi%d : unknown target disconnecting\n",
+ host->host_no);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_disconnected
+ case A_int_debug_disconnected:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT)) {
+ printk ("scsi%d : disconnected, new queues are\n",
+ host->host_no);
+ print_queues(host);
+#if 0
+ printk ("scsi%d : sxfer=0x%x, scntl3=0x%x\n",
+ host->host_no, NCR53c7x0_read8(SXFER_REG),
+ NCR53c7x0_read8(SCNTL3_REG_800));
+#endif
+ if (c) {
+ print_insn (host, (u32 *)
+ hostdata->sync[c->target].script, "", 1);
+ print_insn (host, (u32 *)
+ hostdata->sync[c->target].script + 2, "", 1);
+ }
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_panic
+ case A_int_debug_panic:
+ printk("scsi%d : int_debug_panic received\n", host->host_no);
+ print_lots (host);
+ return SPECIFIC_INT_PANIC;
+#endif
+#ifdef A_int_debug_saved
+ case A_int_debug_saved:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT)) {
+ printk ("scsi%d : saved data pointer 0x%x (virt 0x%p)\n",
+ host->host_no, cmd->saved_data_pointer,
+ bus_to_virt (cmd->saved_data_pointer));
+ print_progress (c);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_restored
+ case A_int_debug_restored:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT)) {
+ if (cmd) {
+ int size;
+ printk ("scsi%d : restored data pointer 0x%x (virt 0x%p)\n",
+ host->host_no, cmd->saved_data_pointer, bus_to_virt (
+ cmd->saved_data_pointer));
+ size = print_insn (host, (u32 *)
+ bus_to_virt(cmd->saved_data_pointer), "", 1);
+ size = print_insn (host, (u32 *)
+ bus_to_virt(cmd->saved_data_pointer) + size, "", 1);
+ print_progress (c);
+ }
+#if 0
+ printk ("scsi%d : datapath residual %d\n",
+ host->host_no, datapath_residual (host)) ;
+#endif
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_sync
+ case A_int_debug_sync:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT|OPTION_DEBUG_SDTR)) {
+ unsigned char sxfer = NCR53c7x0_read8 (SXFER_REG),
+ scntl3 = NCR53c7x0_read8 (SCNTL3_REG_800);
+ if (c) {
+ if (sxfer != hostdata->sync[c->target].sxfer_sanity ||
+ scntl3 != hostdata->sync[c->target].scntl3_sanity) {
+ printk ("scsi%d : sync sanity check failed sxfer=0x%x, scntl3=0x%x",
+ host->host_no, sxfer, scntl3);
+ NCR53c7x0_write8 (SXFER_REG, sxfer);
+ NCR53c7x0_write8 (SCNTL3_REG_800, scntl3);
+ }
+ } else
+ printk ("scsi%d : unknown command sxfer=0x%x, scntl3=0x%x\n",
+ host->host_no, (int) sxfer, (int) scntl3);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_datain
+ case A_int_debug_datain:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT|OPTION_DEBUG_SDTR)) {
+ int size;
+ printk ("scsi%d : In do_datain (%s) sxfer=0x%x, scntl3=0x%x\n"
+ " datapath residual=%d\n",
+ host->host_no, sbcl_to_phase (NCR53c7x0_read8 (SBCL_REG)),
+ (int) NCR53c7x0_read8(SXFER_REG),
+ (int) NCR53c7x0_read8(SCNTL3_REG_800),
+ datapath_residual (host)) ;
+ print_insn (host, dsp, "", 1);
+ size = print_insn (host, (u32 *) bus_to_virt(dsp[1]), "", 1);
+ print_insn (host, (u32 *) bus_to_virt(dsp[1]) + size, "", 1);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+/*
+ * FIXME : for 7xx support, we need to read SDID_REG_700 and handle
+ * the comparison as bitfielded, not binary.
+ */
+#ifdef A_int_debug_check_dsa
+ case A_int_debug_check_dsa:
+ if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
+ int sdid = NCR53c7x0_read8 (SDID_REG_800) & 15;
+ char *where = dsp - NCR53c7x0_insn_size(NCR53c7x0_read8
+ (DCMD_REG)) == hostdata->script +
+ Ent_select_check_dsa / sizeof(u32) ?
+ "selection" : "reselection";
+ if (c && sdid != c->target) {
+ printk ("scsi%d : SDID target %d != DSA target %d at %s\n",
+ host->host_no, sdid, c->target, where);
+ print_lots(host);
+ dump_events (host, 20);
+ return SPECIFIC_INT_PANIC;
+ }
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+ default:
+ if ((dsps & 0xff000000) == 0x03000000) {
+ printk ("scsi%d : misc debug interrupt 0x%x\n",
+ host->host_no, dsps);
+ return SPECIFIC_INT_RESTART;
+ } else if ((dsps & 0xff000000) == 0x05000000) {
+ if (hostdata->events) {
+ struct NCR53c7x0_event *event;
+ ++hostdata->event_index;
+ if (hostdata->event_index >= hostdata->event_size)
+ hostdata->event_index = 0;
+ event = (struct NCR53c7x0_event *) hostdata->events +
+ hostdata->event_index;
+ event->event = (enum ncr_event) dsps;
+ event->dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
+ /* FIXME : this needs to change for the '7xx family */
+ if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON)
+ event->target = NCR53c7x0_read8(SSID_REG_800);
+ else
+ event->target = 255;
+
+ if (event->event == EVENT_RESELECT)
+ event->lun = hostdata->reselected_identify & 0xf;
+ else if (c)
+ event->lun = c->lun;
+ else
+ event->lun = 255;
+ do_gettimeofday(&(event->time));
+ if (c) {
+ event->pid = c->pid;
+ memcpy ((void *) event->cmnd, (void *) c->cmnd,
+ sizeof (event->cmnd));
+ } else {
+ event->pid = -1;
+ }
+ }
+ return SPECIFIC_INT_RESTART;
+ }
+
+ printk ("scsi%d : unknown user interrupt 0x%x\n",
+ host->host_no, (unsigned) dsps);
+ return SPECIFIC_INT_PANIC;
+ }
+}
+
+/*
+ * XXX - the stock NCR assembler won't output the scriptu.h file,
+ * which undefine's all #define'd CPP symbols from the script.h
+ * file, which will create problems if you use multiple scripts
+ * with the same symbol names.
+ *
+ * If you insist on using NCR's assembler, you could generate
+ * scriptu.h from script.h using something like
+ *
+ * grep #define script.h | \
+ * sed 's/#define[ ][ ]*\([_a-zA-Z][_a-zA-Z0-9]*\).*$/#undefine \1/' \
+ * > scriptu.h
+ */
+
+#include "53c8xx_u.h"
+
+/* XXX - add alternate script handling code here */
+
+
+#ifdef NCR_DEBUG
+/*
+ * Debugging without a debugger is no fun. So, I've provided
+ * a debugging interface in the NCR53c7x0 driver. To avoid
+ * kernel cruft, there's just enough here to act as an interface
+ * to a user level debugger (aka, GDB).
+ *
+ *
+ * The following restrictions apply to debugger commands :
+ * 1. The command must be terminated by a newline.
+ * 2. Command length must be less than 80 bytes including the
+ * newline.
+ * 3. The entire command must be written with one system call.
+ */
+
+static const char debugger_help =
+"bc <addr> - clear breakpoint\n"
+"bl - list breakpoints\n"
+"bs <addr> - set breakpoint\n"
+"g - start\n"
+"h - halt\n"
+"? - this message\n"
+"i - info\n"
+"mp <addr> <size> - print memory\n"
+"ms <addr> <size> <value> - store memory\n"
+"rp <num> <size> - print register\n"
+"rs <num> <size> <value> - store register\n"
+"s - single step\n"
+"tb - begin trace \n"
+"te - end trace\n";
+
+/*
+ * Whenever we change a break point, we should probably
+ * set the NCR up so that it is in a single step mode.
+ */
+
+static int debugger_fn_bc (struct Scsi_Host *host, struct debugger_token *token,
+ u32 args[]) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ instance->hostdata;
+ struct NCR53c7x0_break *bp, **prev;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ for (bp = (struct NCR53c7x0_break *) instance->breakpoints,
+ prev = (struct NCR53c7x0_break **) &instance->breakpoints;
+ bp; prev = (struct NCR53c7x0_break **) &(bp->next),
+ bp = (struct NCR53c7x0_break *) bp->next);
+
+ if (!bp) {
+ restore_flags(flags);
+ return -EIO;
+ }
+
+ /*
+ * XXX - we need to insure that the processor is halted
+ * here in order to prevent a race condition.
+ */
+
+ memcpy ((void *) bp->addr, (void *) bp->old, sizeof(bp->old));
+ if (prev)
+ *prev = bp->next;
+
+ restore_flags(flags);
+ return 0;
+}
+
+
+static int
+debugger_fn_bl (struct Scsi_Host *host, struct debugger_token *token,
+ u32 args[]) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct NCR53c7x0_break *bp;
+ char buf[80];
+ size_t len;
+ unsigned long flags;
+ /*
+ * XXX - we need to insure that the processor is halted
+ * here in order to prevent a race condition. So, if the
+ * processor isn't halted, print an error message and continue.
+ */
+
+ sprintf (buf, "scsi%d : bp : warning : processor not halted\b",
+ host->host_no);
+ debugger_kernel_write (host, buf, strlen(buf));
+
+ save_flags(flags);
+ cli();
+ for (bp = (struct NCR53c7x0_break *) host->breakpoints;
+ bp; bp = (struct NCR53c7x0_break *) bp->next); {
+ sprintf (buf, "scsi%d : bp : success : at %08x, replaces %08x %08x",
+ bp->addr, bp->old[0], bp->old[1]);
+ len = strlen(buf);
+ if ((bp->old[0] & (DCMD_TYPE_MASK << 24)) ==
+ (DCMD_TYPE_MMI << 24)) {
+ sprintf(buf + len, "%08x\n", * (u32 *) bp->addr);
+ } else {
+ sprintf(buf + len, "\n");
+ }
+ len = strlen(buf);
+ debugger_kernel_write (host, buf, len);
+ }
+ restore_flags(flags);
+ return 0;
+}
+
+static int
+debugger_fn_bs (struct Scsi_Host *host, struct debugger_token *token,
+ u32 args[]) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct NCR53c7x0_break *bp;
+ char buf[80];
+ size_t len;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+
+ if (hostdata->state != STATE_HALTED) {
+ sprintf (buf, "scsi%d : bs : failure : NCR not halted\n", host->host_no);
+ debugger_kernel_write (host, buf, strlen(buf));
+ restore_flags(flags);
+ return -1;
+ }
+
+ if (!(bp = kmalloc (sizeof (struct NCR53c7x0_break)))) {
+ printk ("scsi%d : kmalloc(%d) of breakpoint structure failed, try again\n",
+ host->host_no, sizeof(struct NCR53c7x0_break));
+ restore_flags(flags);
+ return -1;
+ }
+
+ bp->address = (u32 *) args[0];
+ memcpy ((void *) bp->old_instruction, (void *) bp->address, 8);
+ bp->old_size = (((bp->old_instruction[0] >> 24) & DCMD_TYPE_MASK) ==
+ DCMD_TYPE_MMI ? 3 : 2;
+ bp->next = hostdata->breakpoints;
+ hostdata->breakpoints = bp->next;
+ memcpy ((void *) bp->address, (void *) hostdata->E_debug_break, 8);
+
+ restore_flags(flags);
+ return 0;
+}
+
+#define TOKEN(name,nargs) {#name, nargs, debugger_fn_##name}
+static const struct debugger_token {
+ char *name;
+ int numargs;
+ int (*fn)(struct debugger_token *token, u32 args[]);
+} debugger_tokens[] = {
+ TOKEN(bc,1), TOKEN(bl,0), TOKEN(bs,1), TOKEN(g,0), TOKEN(halt,0),
+ {DT_help, "?", 0} , TOKEN(h,0), TOKEN(i,0), TOKEN(mp,2),
+ TOKEN(ms,3), TOKEN(rp,2), TOKEN(rs,2), TOKEN(s,0), TOKEN(tb,0), TOKEN(te,0)
+};
+
+#define NDT sizeof(debugger_tokens / sizeof(struct debugger_token))
+
+static struct Scsi_Host * inode_to_host (struct inode *inode) {
+ int dev;
+ struct Scsi_Host *tmp;
+ for (dev = MINOR(inode->rdev), host = first_host;
+ (host->hostt == the_template); --dev, host = host->next)
+ if (!dev) return host;
+ return NULL;
+}
+
+
+static int
+debugger_user_write (struct inode *inode,struct file *filp,
+ char *buf,int count) {
+ struct Scsi_Host *host; /* This SCSI host */
+ struct NCR53c7x0_hostadata *hostdata;
+ char input_buf[80], /* Kernel space copy of buf */
+ *ptr; /* Pointer to argument list */
+ u32 args[3]; /* Arguments */
+ int i, j, error, len;
+
+ if (!(host = inode_to_host(inode)))
+ return -ENXIO;
+
+ hostdata = (struct NCR53c7x0_hostdata *) host->hostdata;
+
+ if (error = verify_area(VERIFY_READ,buf,count))
+ return error;
+
+ if (count > 80)
+ return -EIO;
+
+ memcpy_from_fs(input_buf, buf, count);
+
+ if (input_buf[count - 1] != '\n')
+ return -EIO;
+
+ input_buf[count - 1]=0;
+
+ for (i = 0; i < NDT; ++i) {
+ len = strlen (debugger_tokens[i].name);
+ if (!strncmp(input_buf, debugger_tokens[i].name, len))
+ break;
+ };
+
+ if (i == NDT)
+ return -EIO;
+
+ for (ptr = input_buf + len, j = 0; j < debugger_tokens[i].nargs && *ptr;) {
+ if (*ptr == ' ' || *ptr == '\t') {
+ ++ptr;
+ } else if (isdigit(*ptr)) {
+ args[j++] = simple_strtoul (ptr, &ptr, 0);
+ } else {
+ return -EIO;
+ }
+ }
+
+ if (j != debugger_tokens[i].nargs)
+ return -EIO;
+
+ return count;
+}
+
+static int
+debugger_user_read (struct inode *inode,struct file *filp,
+ char *buf,int count) {
+ struct Scsi_Host *instance;
+
+}
+
+static int
+debugger_kernel_write (struct Scsi_Host *host, char *buf, size_t
+ buflen) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int copy, left;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ while (buflen) {
+ left = (hostdata->debug_buf + hostdata->debug_size - 1) -
+ hostdata->debug_write;
+ copy = (buflen <= left) ? buflen : left;
+ memcpy (hostdata->debug_write, buf, copy);
+ buf += copy;
+ buflen -= copy;
+ hostdata->debug_count += copy;
+ if ((hostdata->debug_write += copy) ==
+ (hostdata->debug_buf + hostdata->debug_size))
+ hosdata->debug_write = hostdata->debug_buf;
+ }
+ restore_flags(flags);
+}
+
+#endif /* def NCRDEBUG */
+
+/*
+ * Function : static void NCR538xx_soft_reset (struct Scsi_Host *host)
+ *
+ * Purpose : perform a soft reset of the NCR53c8xx chip
+ *
+ * Inputs : host - pointer to this host adapter's structure
+ *
+ * Preconditions : NCR53c7x0_init must have been called for this
+ * host.
+ *
+ */
+
+static void
+NCR53c8x0_soft_reset (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ NCR53c7x0_local_setup(host);
+
+
+ /*
+ * Do a soft reset of the chip so that everything is
+ * reinitialized to the power-on state.
+ *
+ * Basically follow the procedure outlined in the NCR53c700
+ * data manual under Chapter Six, How to Use, Steps Necessary to
+ * Start SCRIPTS, with the exception of actually starting the
+ * script and setting up the synchronous transfer gunk.
+ */
+
+ NCR53c7x0_write8(ISTAT_REG_800, ISTAT_10_SRST);
+ NCR53c7x0_write8(ISTAT_REG_800, 0);
+ NCR53c7x0_write8(hostdata->dmode, hostdata->saved_dmode & ~DMODE_MAN);
+
+
+ /*
+ * Respond to reselection by targets and use our _initiator_ SCSI ID
+ * for arbitration. If notyet, also respond to SCSI selection.
+ *
+ * XXX - Note : we must reprogram this when reselecting as
+ * a target.
+ */
+
+#ifdef notyet
+ NCR53c7x0_write8(SCID_REG, (host->this_id & 7)|SCID_800_RRE|SCID_800_SRE);
+#else
+ NCR53c7x0_write8(SCID_REG, (host->this_id & 7)|SCID_800_RRE);
+#endif
+ NCR53c7x0_write8(RESPID_REG_800, hostdata->this_id_mask);
+
+ /*
+ * Use a maximum (1.6) second handshake to handshake timeout,
+ * and SCSI recommended .5s selection timeout.
+ */
+
+ /*
+ * The new gcc won't recognize preprocessing directives
+ * within macro args.
+ */
+#if 0
+ NCR53c7x0_write8(STIME0_REG_800,
+ ((selection_timeout << STIME0_800_SEL_SHIFT) & STIME0_800_SEL_MASK)
+ | ((15 << STIME0_800_HTH_SHIFT) & STIME0_800_HTH_MASK));
+#else
+/* Disable HTH interrupt */
+ NCR53c7x0_write8(STIME0_REG_800,
+ ((selection_timeout << STIME0_800_SEL_SHIFT) & STIME0_800_SEL_MASK));
+#endif
+
+
+ /*
+ * Enable active negation for happy synchronous transfers.
+ */
+
+ NCR53c7x0_write8(STEST3_REG_800, STEST3_800_TE);
+
+ /*
+ * Enable all interrupts, except parity which we only want when
+ * the user requests it.
+ */
+
+ NCR53c7x0_write8(DIEN_REG, DIEN_800_MDPE | DIEN_800_BF |
+ DIEN_ABRT | DIEN_SSI | DIEN_SIR | DIEN_800_IID);
+
+
+ NCR53c7x0_write8(SIEN0_REG_800, ((hostdata->options & OPTION_PARITY) ?
+ SIEN_PAR : 0) | SIEN_RST | SIEN_UDC | SIEN_SGE | SIEN_MA);
+ NCR53c7x0_write8(SIEN1_REG_800, SIEN1_800_STO | SIEN1_800_HTH);
+
+ /*
+ * Use saved clock frequency divisor and scripts loaded in 16 bit
+ * mode flags from the saved dcntl.
+ */
+
+ NCR53c7x0_write8(DCNTL_REG, hostdata->saved_dcntl);
+ NCR53c7x0_write8(CTEST4_REG_800, hostdata->saved_ctest4);
+
+ /* Enable active negation */
+ NCR53c7x0_write8(STEST3_REG_800, STEST3_800_TE);
+}
+
+/*
+ * Function static struct NCR53c7x0_cmd *allocate_cmd (Scsi_Cmnd *cmd)
+ *
+ * Purpose : Return the first free NCR53c7x0_cmd structure (which are
+ * reused in a LIFO maner to minimize cache thrashing).
+ *
+ * Side effects : If we haven't yet scheduled allocation of NCR53c7x0_cmd
+ * structures for this device, do so. Attempt to complete all scheduled
+ * allocations using kmalloc(), putting NCR53c7x0_cmd structures on
+ * the free list. Teach programmers not to drink and hack.
+ *
+ * Inputs : cmd - SCSI command
+ *
+ * Returns : NCR53c7x0_cmd structure allocated on behalf of cmd;
+ * NULL on failure.
+ */
+
+static struct NCR53c7x0_cmd *
+allocate_cmd (Scsi_Cmnd *cmd) {
+ struct Scsi_Host *host = cmd->host;
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+ void *real; /* Real address */
+ int size; /* Size of *tmp */
+ struct NCR53c7x0_cmd *tmp;
+ unsigned long flags;
+
+ if (hostdata->options & OPTION_DEBUG_ALLOCATION)
+ printk ("scsi%d : num_cmds = %d, can_queue = %d\n"
+ " target = %d, lun = %d, %s\n",
+ host->host_no, hostdata->num_cmds, host->can_queue,
+ cmd->target, cmd->lun, (hostdata->cmd_allocated[cmd->target] &
+ (1 << cmd->lun)) ? "allready allocated" : "not allocated");
+
+/*
+ * If we have not yet reserved commands for this I_T_L nexus, and
+ * the device exists (as indicated by permanant Scsi_Cmnd structures
+ * being allocated under 1.3.x, or being outside of scan_scsis in
+ * 1.2.x), do so now.
+ */
+ if (!(hostdata->cmd_allocated[cmd->target] & (1 << cmd->lun)) &&
+#ifdef LINUX_1_2
+ !in_scan_scsis
+#else
+ cmd->device && cmd->device->has_cmdblocks
+#endif
+ ) {
+ if ((hostdata->extra_allocate + hostdata->num_cmds) < host->can_queue)
+ hostdata->extra_allocate += host->cmd_per_lun;
+ hostdata->cmd_allocated[cmd->target] |= (1 << cmd->lun);
+ }
+
+ for (; hostdata->extra_allocate > 0 ; --hostdata->extra_allocate,
+ ++hostdata->num_cmds) {
+ /* historically, kmalloc has returned unaligned addresses; pad so we
+ have enough room to ROUNDUP */
+ size = hostdata->max_cmd_size + sizeof (void *);
+/* FIXME: for ISA bus '7xx chips, we need to or GFP_DMA in here */
+ real = kmalloc (size, GFP_ATOMIC);
+ if (!real) {
+ if (hostdata->options & OPTION_DEBUG_ALLOCATION)
+ printk ("scsi%d : kmalloc(%d) failed\n",
+ host->host_no, size);
+ break;
+ }
+ tmp = ROUNDUP(real, void *);
+ tmp->real = real;
+ tmp->size = size;
+#ifdef LINUX_1_2
+ tmp->free = ((void (*)(void *, int)) kfree_s);
+#else
+ tmp->free = ((void (*)(void *, int)) kfree);
+#endif
+ save_flags (flags);
+ cli();
+ tmp->next = hostdata->free;
+ hostdata->free = tmp;
+ restore_flags (flags);
+ }
+ save_flags(flags);
+ cli();
+ tmp = (struct NCR53c7x0_cmd *) hostdata->free;
+ if (tmp) {
+ hostdata->free = tmp->next;
+ }
+ restore_flags(flags);
+ if (!tmp)
+ printk ("scsi%d : can't allocate command for target %d lun %d\n",
+ host->host_no, cmd->target, cmd->lun);
+ return tmp;
+}
+
+/*
+ * Function static struct NCR53c7x0_cmd *create_cmd (Scsi_Cmnd *cmd)
+ *
+ *
+ * Purpose : allocate a NCR53c7x0_cmd structure, initialize it based on the
+ * Scsi_Cmnd structure passed in cmd, including dsa and Linux field
+ * initialization, and dsa code relocation.
+ *
+ * Inputs : cmd - SCSI command
+ *
+ * Returns : NCR53c7x0_cmd structure corresponding to cmd,
+ * NULL on failure.
+ */
+
+static struct NCR53c7x0_cmd *
+create_cmd (Scsi_Cmnd *cmd) {
+ NCR53c7x0_local_declare();
+ struct Scsi_Host *host = cmd->host;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct NCR53c7x0_cmd *tmp; /* NCR53c7x0_cmd structure for this command */
+ int datain, /* Number of instructions per phase */
+ dataout;
+ int data_transfer_instructions, /* Count of dynamic instructions */
+ i; /* Counter */
+ u32 *cmd_datain, /* Address of datain/dataout code */
+ *cmd_dataout; /* Incremented as we assemble */
+#ifdef notyet
+ unsigned char *msgptr; /* Current byte in select message */
+ int msglen; /* Length of whole select message */
+#endif
+ unsigned long flags;
+ NCR53c7x0_local_setup(cmd->host);
+
+ if (!(tmp = allocate_cmd (cmd)))
+ return NULL;
+
+
+ /*
+ * Decide whether we need to generate commands for DATA IN,
+ * DATA OUT, neither, or both based on the SCSI command
+ */
+
+ switch (cmd->cmnd[0]) {
+ /* These commands do DATA IN */
+ case INQUIRY:
+ case MODE_SENSE:
+ case READ_6:
+ case READ_10:
+ case READ_CAPACITY:
+ case REQUEST_SENSE:
+ datain = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
+ dataout = 0;
+ break;
+ /* These commands do DATA OUT */
+ case MODE_SELECT:
+ case WRITE_6:
+ case WRITE_10:
+#if 0
+ printk("scsi%d : command is ", host->host_no);
+ print_command(cmd->cmnd);
+#endif
+#if 0
+ printk ("scsi%d : %d scatter/gather segments\n", host->host_no,
+ cmd->use_sg);
+#endif
+ datain = 0;
+ dataout = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
+#if 0
+ hostdata->options |= OPTION_DEBUG_INTR;
+#endif
+ break;
+ /*
+ * These commands do no data transfer, we should force an
+ * interrupt if a data phase is attempted on them.
+ */
+ case START_STOP:
+ case TEST_UNIT_READY:
+ datain = dataout = 0;
+ break;
+ /*
+ * We don't know about these commands, so generate code to handle
+ * both DATA IN and DATA OUT phases.
+ */
+ default:
+ datain = dataout = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
+ }
+
+ /*
+ * New code : so that active pointers work correctly irregardless
+ * of where the saved data pointer is at, we want to immediately
+ * enter the dynamic code after selection, and on a non-data
+ * phase perform a CALL to the non-data phase handler, with
+ * returns back to this address.
+ *
+ * If a phase mismatch is encountered in the middle of a
+ * Block MOVE instruction, we want to _leave_ that instruction
+ * unchanged as the current case is, modify a temporary buffer,
+ * and point the active pointer (TEMP) at that.
+ *
+ * Furthermore, we want to implement a saved data pointer,
+ * set by the SAVE_DATA_POINTERs message.
+ *
+ * So, the data transfer segments will change to
+ * CALL data_transfer, WHEN NOT data phase
+ * MOVE x, x, WHEN data phase
+ * ( repeat )
+ * JUMP other_transfer
+ */
+
+ data_transfer_instructions = datain + dataout;
+
+ /*
+ * When we perform a request sense, we overwrite various things,
+ * including the data transfer code. Make sure we have enough
+ * space to do that.
+ */
+
+ if (data_transfer_instructions < 2)
+ data_transfer_instructions = 2;
+
+
+ /*
+ * The saved data pointer is set up so that a RESTORE POINTERS message
+ * will start the data transfer over at the beggining.
+ */
+
+ tmp->saved_data_pointer = virt_to_bus (hostdata->script) +
+ hostdata->E_data_transfer;
+
+ /*
+ * Initialize Linux specific fields.
+ */
+
+ tmp->cmd = cmd;
+ tmp->next = NULL;
+ tmp->flags = 0;
+ tmp->dsa_next_addr = virt_to_bus(tmp->dsa) + hostdata->dsa_next -
+ hostdata->dsa_start;
+ tmp->dsa_addr = virt_to_bus(tmp->dsa) - hostdata->dsa_start;
+
+ /*
+ * Calculate addresses of dynamic code to fill in DSA
+ */
+
+ tmp->data_transfer_start = tmp->dsa + (hostdata->dsa_end -
+ hostdata->dsa_start) / sizeof(u32);
+ tmp->data_transfer_end = tmp->data_transfer_start +
+ 2 * data_transfer_instructions;
+
+ cmd_datain = datain ? tmp->data_transfer_start : NULL;
+ cmd_dataout = dataout ? (datain ? cmd_datain + 2 * datain : tmp->
+ data_transfer_start) : NULL;
+
+ /*
+ * Fill in the NCR53c7x0_cmd structure as follows
+ * dsa, with fixed up DSA code
+ * datain code
+ * dataout code
+ */
+
+ /* Copy template code into dsa and perform all necessary fixups */
+ if (hostdata->dsa_fixup)
+ hostdata->dsa_fixup(tmp);
+
+ patch_dsa_32(tmp->dsa, dsa_next, 0, 0);
+ patch_dsa_32(tmp->dsa, dsa_cmnd, 0, virt_to_bus(cmd));
+
+ if (hostdata->options & OPTION_DEBUG_SYNCHRONOUS)
+ if (hostdata->sync[cmd->target].select_indirect !=
+ ((hostdata->sync[cmd->target].scntl3_sanity << 24) |
+ (cmd->target << 16) |
+ (hostdata->sync[cmd->target].sxfer_sanity << 8))) {
+ printk ("scsi%d : sanity check failed select_indirect=0x%x\n",
+ host->host_no, hostdata->sync[cmd->target].select_indirect);
+ FATAL(host);
+
+ }
+
+ patch_dsa_32(tmp->dsa, dsa_select, 0, hostdata->sync[cmd->target].
+ select_indirect);
+ /*
+ * Right now, we'll do the WIDE and SYNCHRONOUS negotiations on
+ * different commands; although it should be trivial to do them
+ * both at the same time.
+ */
+ if (hostdata->initiate_wdtr & (1 << cmd->target)) {
+ memcpy ((void *) (tmp->select + 1), (void *) wdtr_message,
+ sizeof(wdtr_message));
+ patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(wdtr_message));
+ save_flags(flags);
+ cli();
+ hostdata->initiate_wdtr &= ~(1 << cmd->target);
+ restore_flags(flags);
+ } else if (hostdata->initiate_sdtr & (1 << cmd->target)) {
+ memcpy ((void *) (tmp->select + 1), (void *) sdtr_message,
+ sizeof(sdtr_message));
+ patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(sdtr_message));
+ tmp->flags |= CMD_FLAG_SDTR;
+ save_flags(flags);
+ cli();
+ hostdata->initiate_sdtr &= ~(1 << cmd->target);
+ restore_flags(flags);
+
+ }
+#if 1
+ else if (!(hostdata->talked_to & (1 << cmd->target)) &&
+ !(hostdata->options & OPTION_NO_ASYNC)) {
+ memcpy ((void *) (tmp->select + 1), (void *) async_message,
+ sizeof(async_message));
+ patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(async_message));
+ tmp->flags |= CMD_FLAG_SDTR;
+ }
+#endif
+ else
+ patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1);
+ hostdata->talked_to |= (1 << cmd->target);
+ tmp->select[0] = (hostdata->options & OPTION_DISCONNECT) ?
+ IDENTIFY (1, cmd->lun) : IDENTIFY (0, cmd->lun);
+ patch_dsa_32(tmp->dsa, dsa_msgout, 1, virt_to_bus(tmp->select));
+ patch_dsa_32(tmp->dsa, dsa_cmdout, 0, cmd->cmd_len);
+ patch_dsa_32(tmp->dsa, dsa_cmdout, 1, virt_to_bus(cmd->cmnd));
+ patch_dsa_32(tmp->dsa, dsa_dataout, 0, cmd_dataout ?
+ virt_to_bus (cmd_dataout)
+ : virt_to_bus (hostdata->script) + hostdata->E_other_transfer);
+ patch_dsa_32(tmp->dsa, dsa_datain, 0, cmd_datain ?
+ virt_to_bus (cmd_datain)
+ : virt_to_bus (hostdata->script) + hostdata->E_other_transfer);
+ /*
+ * XXX - need to make endian aware, should use separate variables
+ * for both status and message bytes.
+ */
+ patch_dsa_32(tmp->dsa, dsa_msgin, 0, 1);
+/*
+ * FIXME : these only works for little endian. We probably want to
+ * provide message and status fields in the NCR53c7x0_cmd
+ * structure, and assign them to cmd->result when we're done.
+ */
+ patch_dsa_32(tmp->dsa, dsa_msgin, 1, virt_to_bus(&cmd->result) + 1);
+ patch_dsa_32(tmp->dsa, dsa_status, 0, 1);
+ patch_dsa_32(tmp->dsa, dsa_status, 1, virt_to_bus(&cmd->result));
+ patch_dsa_32(tmp->dsa, dsa_msgout_other, 0, 1);
+ patch_dsa_32(tmp->dsa, dsa_msgout_other, 1,
+ virt_to_bus(&(hostdata->NCR53c7xx_msg_nop)));
+
+ /*
+ * Generate code for zero or more of the DATA IN, DATA OUT phases
+ * in the format
+ *
+ * CALL data_transfer, WHEN NOT phase
+ * MOVE first buffer length, first buffer address, WHEN phase
+ * ...
+ * MOVE last buffer length, last buffer address, WHEN phase
+ * JUMP other_transfer
+ */
+
+/*
+ * See if we're getting to data transfer by generating an unconditional
+ * interrupt.
+ */
+#if 0
+ if (datain) {
+ cmd_datain[0] = 0x98080000;
+ cmd_datain[1] = 0x03ffd00d;
+ cmd_datain += 2;
+ }
+#endif
+
+/*
+ * XXX - I'm undecided whether all of this nonsense is faster
+ * in the long run, or whether I should just go and implement a loop
+ * on the NCR chip using table indirect mode?
+ *
+ * In any case, this is how it _must_ be done for 53c700/700-66 chips,
+ * so this stays even when we come up with something better.
+ *
+ * When we're limited to 1 simultaneous command, no overlapping processing,
+ * we're seeing 630K/sec, with 7% CPU usage on a slow Syquest 45M
+ * drive.
+ *
+ * Not bad, not good. We'll see.
+ */
+
+ for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4,
+ cmd_dataout += 4, ++i) {
+ u32 buf = cmd->use_sg ?
+ virt_to_bus(((struct scatterlist *)cmd->buffer)[i].address) :
+ virt_to_bus(cmd->request_buffer);
+ u32 count = cmd->use_sg ?
+ ((struct scatterlist *)cmd->buffer)[i].length :
+ cmd->request_bufflen;
+
+ if (datain) {
+ /* CALL other_in, WHEN NOT DATA_IN */
+ cmd_datain[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL |
+ DCMD_TCI_IO) << 24) |
+ DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
+ cmd_datain[1] = virt_to_bus (hostdata->script) +
+ hostdata->E_other_in;
+ /* MOVE count, buf, WHEN DATA_IN */
+ cmd_datain[2] = ((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I | DCMD_BMI_IO)
+ << 24) | count;
+ cmd_datain[3] = buf;
+#if 0
+ print_insn (host, cmd_datain, "dynamic ", 1);
+ print_insn (host, cmd_datain + 2, "dynamic ", 1);
+#endif
+ }
+ if (dataout) {
+ /* CALL other_out, WHEN NOT DATA_OUT */
+ cmd_dataout[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL) << 24) |
+ DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
+ cmd_dataout[1] = virt_to_bus(hostdata->script) +
+ hostdata->E_other_out;
+ /* MOVE count, buf, WHEN DATA+OUT */
+ cmd_dataout[2] = ((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I) << 24)
+ | count;
+ cmd_dataout[3] = buf;
+#if 0
+ print_insn (host, cmd_dataout, "dynamic ", 1);
+ print_insn (host, cmd_dataout + 2, "dynamic ", 1);
+#endif
+ }
+ }
+
+ /*
+ * Install JUMP instructions after the data transfer routines to return
+ * control to the do_other_transfer routines.
+ */
+
+
+ if (datain) {
+ cmd_datain[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP) << 24) |
+ DBC_TCI_TRUE;
+ cmd_datain[1] = virt_to_bus(hostdata->script) +
+ hostdata->E_other_transfer;
+#if 0
+ print_insn (host, cmd_datain, "dynamic jump ", 1);
+#endif
+ cmd_datain += 2;
+ }
+#if 0
+ if (datain) {
+ cmd_datain[0] = 0x98080000;
+ cmd_datain[1] = 0x03ffdeed;
+ cmd_datain += 2;
+ }
+#endif
+ if (dataout) {
+ cmd_dataout[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP) << 24) |
+ DBC_TCI_TRUE;
+ cmd_dataout[1] = virt_to_bus(hostdata->script) +
+ hostdata->E_other_transfer;
+#if 0
+ print_insn (host, cmd_dataout, "dynamic jump ", 1);
+#endif
+ cmd_dataout += 2;
+ }
+ return tmp;
+}
+
+/*
+ * Function : int NCR53c7xx_queue_command (Scsi_Cmnd *cmd,
+ * void (*done)(Scsi_Cmnd *))
+ *
+ * Purpose : enqueues a SCSI command
+ *
+ * Inputs : cmd - SCSI command, done - function called on completion, with
+ * a pointer to the command descriptor.
+ *
+ * Returns : 0
+ *
+ * Side effects :
+ * cmd is added to the per instance driver issue_queue, with major
+ * twiddling done to the host specific fields of cmd. If the
+ * process_issue_queue corouting isn't running, it is restarted.
+ *
+ * NOTE : we use the host_scribble field of the Scsi_Cmnd structure to
+ * hold our own data, and pervert the ptr field of the SCp field
+ * to create a linked list.
+ */
+
+int
+NCR53c7xx_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *)) {
+ struct Scsi_Host *host = cmd->host;
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+ unsigned long flags;
+ Scsi_Cmnd *tmp;
+
+ cmd->scsi_done = done;
+ cmd->host_scribble = NULL;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.buffer = NULL;
+
+ save_flags(flags);
+ cli();
+ if ((hostdata->options & (OPTION_DEBUG_INIT_ONLY|OPTION_DEBUG_PROBE_ONLY))
+ || ((hostdata->options & OPTION_DEBUG_TARGET_LIMIT) &&
+ !(hostdata->debug_lun_limit[cmd->target] & (1 << cmd->lun)))
+#ifdef LINUX_1_2
+ || cmd->target > 7
+#else
+ || cmd->target > host->max_id
+#endif
+ || cmd->target == host->this_id
+ || hostdata->state == STATE_DISABLED) {
+ printk("scsi%d : disabled or bad target %d lun %d\n", host->host_no,
+ cmd->target, cmd->lun);
+ cmd->result = (DID_BAD_TARGET << 16);
+ } else if ((hostdata->options & OPTION_DEBUG_NCOMMANDS_LIMIT) &&
+ (hostdata->debug_count_limit == 0)) {
+ printk("scsi%d : maximum commands exceeded\n", host->host_no);
+ cmd->result = (DID_BAD_TARGET << 16);
+ cmd->result = (DID_BAD_TARGET << 16);
+ } else if (hostdata->options & OPTION_DEBUG_READ_ONLY) {
+ switch (cmd->cmnd[0]) {
+ case WRITE_6:
+ case WRITE_10:
+ printk("scsi%d : WRITE attempted with NO_WRITE debugging flag set\n",
+ host->host_no);
+ cmd->result = (DID_BAD_TARGET << 16);
+ }
+ } else {
+ if ((hostdata->options & OPTION_DEBUG_TARGET_LIMIT) &&
+ hostdata->debug_count_limit != -1)
+ --hostdata->debug_count_limit;
+ restore_flags (flags);
+ cmd->result = 0xffff; /* The NCR will overwrite message
+ and status with valid data */
+ cmd->host_scribble = (unsigned char *) tmp = create_cmd (cmd);
+ }
+ cli();
+ /*
+ * REQUEST SENSE commands are inserted at the head of the queue
+ * so that we do not clear the contingent allegience condition
+ * they may be looking at.
+ */
+
+ if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
+ cmd->SCp.ptr = (unsigned char *) hostdata->issue_queue;
+ hostdata->issue_queue = cmd;
+ } else {
+ for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp->SCp.ptr;
+ tmp = (Scsi_Cmnd *) tmp->SCp.ptr);
+ tmp->SCp.ptr = (unsigned char *) cmd;
+ }
+ restore_flags (flags);
+ run_process_issue_queue();
+ return 0;
+}
+
+/*
+ * Function : void to_schedule_list (struct Scsi_Host *host,
+ * struct NCR53c7x0_hostdata * hostdata, Scsi_Cmnd *cmd)
+ *
+ * Purpose : takes a SCSI command which was just removed from the
+ * issue queue, and deals with it by inserting it in the first
+ * free slot in the schedule list or by terminating it immediately.
+ *
+ * Inputs :
+ * host - SCSI host adater; hostdata - hostdata structure for
+ * this adapter; cmd - a pointer to the command; should have
+ * the host_scribble field initialized to point to a valid
+ *
+ * Side effects :
+ * cmd is added to the per instance schedule list, with minor
+ * twiddling done to the host specific fields of cmd.
+ *
+ */
+
+static __inline__ void
+to_schedule_list (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
+ struct NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ Scsi_Cmnd *tmp = cmd->cmd;
+ unsigned long flags;
+ /* dsa start is negative, so subtraction is used */
+ volatile u32 *current;
+
+ int i;
+ NCR53c7x0_local_setup(host);
+#if 0
+ printk("scsi%d : new dsa is 0x%lx (virt 0x%p)\n", host->host_no,
+ virt_to_bus(dsa), dsa);
+#endif
+
+ save_flags(flags);
+ cli();
+
+ /*
+ * Work arround race condition : if an interrupt fired and we
+ * got disabled forget about this command.
+ */
+
+ if (hostdata->state == STATE_DISABLED) {
+ printk("scsi%d : driver disabled\n", host->host_no);
+ tmp->result = (DID_BAD_TARGET << 16);
+ cmd->next = (struct NCR53c7x0_cmd *) hostdata->free;
+ hostdata->free = cmd;
+ tmp->scsi_done(tmp);
+ restore_flags (flags);
+ return;
+ }
+
+ for (i = host->can_queue, current = hostdata->schedule;
+ i > 0 && current[0] != hostdata->NOP_insn;
+ --i, current += 2 /* JUMP instructions are two words */);
+
+ if (i > 0) {
+ ++hostdata->busy[tmp->target][tmp->lun];
+ cmd->next = hostdata->running_list;
+ hostdata->running_list = cmd;
+
+ /* Restore this instruction to a NOP once the command starts */
+ cmd->dsa [(hostdata->dsa_jump_dest - hostdata->dsa_start) /
+ sizeof(u32)] = (u32) virt_to_bus ((void *)current);
+ /* Replace the current jump operand. */
+ current[1] =
+ virt_to_bus ((void *) cmd->dsa) + hostdata->E_dsa_code_begin -
+ hostdata->E_dsa_code_template;
+ /* Replace the NOP instruction with a JUMP */
+ current[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24) |
+ DBC_TCI_TRUE;
+ } else {
+ printk ("scsi%d: no free slot\n", host->host_no);
+ disable(host);
+ tmp->result = (DID_ERROR << 16);
+ cmd->next = (struct NCR53c7x0_cmd *) hostdata->free;
+ hostdata->free = cmd;
+ tmp->scsi_done(tmp);
+ restore_flags (flags);
+ return;
+ }
+
+ /*
+ * If the NCR chip is in an idle state, start it running the scheduler
+ * immediately. Otherwise, signal the chip to jump to schedule as
+ * soon as it is idle.
+ */
+ if (hostdata->idle) {
+ hostdata->idle = 0;
+ hostdata->state = STATE_RUNNING;
+ NCR53c7x0_write32 (DSP_REG, virt_to_bus ((void *)hostdata->schedule));
+ } else {
+ NCR53c7x0_write8(hostdata->istat, ISTAT_10_SIGP);
+ }
+
+ restore_flags(flags);
+}
+
+/*
+ * Function : busyp (struct Scsi_Host *host, struct NCR53c7x0_hostdata
+ * *hostdata, Scsi_Cmnd *cmd)
+ *
+ * Purpose : decide if we can pass the given SCSI command on to the
+ * device in question or not.
+ *
+ * Returns : non-zero when we're busy, 0 when we aren't.
+ */
+
+static __inline__ int
+busyp (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
+ Scsi_Cmnd *cmd) {
+ /* FIXME : in the future, this needs to accomodate SCSI-II tagged
+ queuing, and we may be able to play with fairness here a bit.
+ */
+ return hostdata->busy[cmd->target][cmd->lun];
+}
+
+/*
+ * Function : process_issue_queue (void)
+ *
+ * Purpose : transfer commands from the issue queue to NCR start queue
+ * of each NCR53c7/8xx in the system, avoiding kernel stack
+ * overflows when the scsi_done() function is invoked recursively.
+ *
+ * NOTE : process_issue_queue exits with interrupts *disabled*, so the
+ * caller must renable them if it desires.
+ *
+ * NOTE : process_issue_queue should be called from both
+ * NCR53c7x0_queue_command() and from the interrupt handler
+ * after command completion in case NCR53c7x0_queue_command()
+ * isn't invoked again but we've freed up resources that are
+ * needed.
+ */
+
+static void
+process_issue_queue (unsigned long flags) {
+ Scsi_Cmnd *tmp, *prev;
+ struct Scsi_Host *host;
+ struct NCR53c7x0_hostdata *hostdata;
+ int done;
+
+ /*
+ * We run (with interrupts disabled) until we're sure that none of
+ * the host adapters have anything that can be done, at which point
+ * we set process_issue_queue_running to 0 and exit.
+ *
+ * Interrupts are enabled before doing various other internal
+ * instructions, after we've decided that we need to run through
+ * the loop again.
+ *
+ */
+
+ do {
+ cli(); /* Freeze request queues */
+ done = 1;
+ for (host = first_host; host && host->hostt == the_template;
+ host = host->next) {
+ hostdata = (struct NCR53c7x0_hostdata *) host->hostdata;
+ cli();
+ if (hostdata->issue_queue) {
+ if (hostdata->state == STATE_DISABLED) {
+ tmp = (Scsi_Cmnd *) hostdata->issue_queue;
+ hostdata->issue_queue = (Scsi_Cmnd *) tmp->SCp.ptr;
+ tmp->result = (DID_BAD_TARGET << 16);
+ if (tmp->host_scribble) {
+ ((struct NCR53c7x0_cmd *)tmp->host_scribble)->next =
+ hostdata->free;
+ hostdata->free =
+ (struct NCR53c7x0_cmd *)tmp->host_scribble;
+ tmp->host_scribble = NULL;
+ }
+ tmp->scsi_done (tmp);
+ done = 0;
+ } else
+ for (tmp = (Scsi_Cmnd *) hostdata->issue_queue,
+ prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *)
+ tmp->SCp.ptr)
+ if (!tmp->host_scribble ||
+ !busyp (host, hostdata, tmp)) {
+ if (prev)
+ prev->SCp.ptr = tmp->SCp.ptr;
+ else
+ hostdata->issue_queue = (Scsi_Cmnd *)
+ tmp->SCp.ptr;
+ tmp->SCp.ptr = NULL;
+ if (tmp->host_scribble) {
+ if (hostdata->options & OPTION_DEBUG_QUEUES)
+ printk ("scsi%d : moving command for target %d lun %d to start list\n",
+ host->host_no, tmp->target, tmp->lun);
+
+
+ to_schedule_list (host, hostdata,
+ (struct NCR53c7x0_cmd *)
+ tmp->host_scribble);
+ } else {
+ if (((tmp->result & 0xff) == 0xff) ||
+ ((tmp->result & 0xff00) == 0xff00)) {
+ printk ("scsi%d : danger Will Robinson!\n",
+ host->host_no);
+ tmp->result = DID_ERROR << 16;
+ disable (host);
+ }
+ tmp->scsi_done(tmp);
+ }
+ done = 0;
+ } /* if target/lun is not busy */
+ } /* if hostdata->issue_queue */
+ if (!done)
+ restore_flags (flags);
+ } /* for host */
+ } while (!done);
+ process_issue_queue_running = 0;
+}
+
+/*
+ * Function : static void intr_scsi (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : handle all SCSI interrupts, indicated by the setting
+ * of the SIP bit in the ISTAT register.
+ *
+ * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
+ * may be NULL.
+ */
+
+static void
+intr_scsi (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+ unsigned char sstat0_sist0, sist1, /* Registers */
+ fatal; /* Did a fatal interrupt
+ occur ? */
+
+ int is_8xx_chip;
+ NCR53c7x0_local_setup(host);
+
+ fatal = 0;
+
+ is_8xx_chip = ((unsigned) (hostdata->chip - 800)) < 100;
+ if (is_8xx_chip) {
+ sstat0_sist0 = NCR53c7x0_read8(SIST0_REG_800);
+ udelay(1);
+ sist1 = NCR53c7x0_read8(SIST1_REG_800);
+ } else {
+ sstat0_sist0 = NCR53c7x0_read8(SSTAT0_REG);
+ sist1 = 0;
+ }
+
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : SIST0 0x%0x, SIST1 0x%0x\n", host->host_no,
+ sstat0_sist0, sist1);
+
+ /* 250ms selection timeout */
+ if ((is_8xx_chip && (sist1 & SIST1_800_STO)) ||
+ (!is_8xx_chip && (sstat0_sist0 & SSTAT0_700_STO))) {
+ fatal = 1;
+ if (hostdata->options & OPTION_DEBUG_INTR) {
+ printk ("scsi%d : Selection Timeout\n", host->host_no);
+ if (cmd) {
+ printk("scsi%d : target %d, lun %d, command ",
+ host->host_no, cmd->cmd->target, cmd->cmd->lun);
+ print_command (cmd->cmd->cmnd);
+ printk("scsi%d : dsp = 0x%x (virt 0x%p)\n", host->host_no,
+ NCR53c7x0_read32(DSP_REG),
+ bus_to_virt(NCR53c7x0_read32(DSP_REG)));
+ } else {
+ printk("scsi%d : no command\n", host->host_no);
+ }
+ }
+/*
+ * XXX - question : how do we want to handle the Illegal Instruction
+ * interrupt, which may occur before or after the Selection Timeout
+ * interrupt?
+ */
+
+ if (1) {
+ hostdata->idle = 1;
+ hostdata->expecting_sto = 0;
+
+ if (hostdata->test_running) {
+ hostdata->test_running = 0;
+ hostdata->test_completed = 3;
+ } else if (cmd) {
+ abnormal_finished(cmd, DID_BAD_TARGET << 16);
+ }
+#if 0
+ hostdata->intrs = 0;
+#endif
+ }
+ }
+
+/*
+ * FIXME : in theory, we can also get a UDC when a STO occurs.
+ */
+ if (sstat0_sist0 & SSTAT0_UDC) {
+ fatal = 1;
+ if (cmd) {
+ printk("scsi%d : target %d lun %d unexpected disconnect\n",
+ host->host_no, cmd->cmd->target, cmd->cmd->lun);
+ print_lots (host);
+ abnormal_finished(cmd, DID_ERROR << 16);
+ } else
+ printk("scsi%d : unexpected disconnect (no command)\n",
+ host->host_no);
+
+ hostdata->dsp = (u32 *) hostdata->schedule;
+ hostdata->dsp_changed = 1;
+ }
+
+ /* SCSI PARITY error */
+ if (sstat0_sist0 & SSTAT0_PAR) {
+ fatal = 1;
+ if (cmd && cmd->cmd) {
+ printk("scsi%d : target %d lun %d parity error.\n",
+ host->host_no, cmd->cmd->target, cmd->cmd->lun);
+ abnormal_finished (cmd, DID_PARITY << 16);
+ } else
+ printk("scsi%d : parity error\n", host->host_no);
+ /* Should send message out, parity error */
+
+ /* XXX - Reduce synchronous transfer rate! */
+ hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ /* SCSI GROSS error */
+ }
+
+ if (sstat0_sist0 & SSTAT0_SGE) {
+ fatal = 1;
+ printk("scsi%d : gross error\n", host->host_no);
+ /* Reset SCSI offset */
+ if ((hostdata->chip / 100) == 8) {
+ NCR53c7x0_write8 (STEST2_REG_800, STEST2_800_ROF);
+ }
+
+ /*
+ * A SCSI gross error may occur when we have
+ *
+ * - A synchronous offset which causes the SCSI FIFO to be overwritten.
+ *
+ * - A REQ which causes the maxmimum synchronous offset programmed in
+ * the SXFER register to be exceeded.
+ *
+ * - A phase change with an outstanding synchronous offset.
+ *
+ * - Residual data in the synchronous data FIFO, with a transfer
+ * other than a synchronous receive is started.$#
+ */
+
+
+ /* XXX Should deduce synchronous transfer rate! */
+ hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ /* Phase mismatch */
+ }
+
+ if (sstat0_sist0 & SSTAT0_MA) {
+ fatal = 1;
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : SSTAT0_MA\n", host->host_no);
+ intr_phase_mismatch (host, cmd);
+ }
+
+#if 0
+ if (sstat0_sist0 & SIST0_800_RSL)
+ printk ("scsi%d : Oh no Mr. Bill!\n", host->host_no);
+#endif
+
+/*
+ * If a fatal SCSI interrupt occurs, we must insure that the DMA and
+ * SCSI FIFOs were flushed.
+ */
+
+ if (fatal) {
+ if (!hostdata->dstat_valid) {
+ hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
+ hostdata->dstat_valid = 1;
+ }
+
+/* XXX - code check for 700/800 chips */
+ if (!(hostdata->dstat & DSTAT_DFE)) {
+ printk ("scsi%d : DMA FIFO not empty\n", host->host_no);
+ if (NCR53c7x0_read8 (CTEST2_REG_800) & CTEST2_800_DDIR) {
+ printk ("scsi%d: Flushing DMA FIFO\n",
+ host->host_no);
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_FLF);
+ while (!((hostdata->dstat = NCR53c7x0_read8(DSTAT_REG)) &
+ DSTAT_DFE));
+ } else {
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_CLF);
+ while (NCR53c7x0_read8 (CTEST3_REG_800) & CTEST3_800_CLF);
+ }
+ hostdata->dstat |= DSTAT_DFE;
+ }
+ }
+}
+
+/*
+ * Function : static void NCR53c7x0_intr (int irq, struct pt_regs * regs)
+ *
+ * Purpose : handle NCR53c7x0 interrupts for all NCR devices sharing
+ * the same IRQ line.
+ *
+ * Inputs : Since we're using the SA_INTERRUPT interrupt handler
+ * semantics, irq indicates the interrupt which invoked
+ * this handler.
+ */
+
+static void
+NCR53c7x0_intr (int irq, struct pt_regs * regs) {
+ NCR53c7x0_local_declare();
+ struct Scsi_Host *host; /* Host we are looking at */
+ unsigned char istat; /* Values of interrupt regs */
+ struct NCR53c7x0_hostdata *hostdata; /* host->hostdata */
+ struct NCR53c7x0_cmd *cmd, /* command which halted */
+ **cmd_prev_ptr;
+ u32 *dsa; /* DSA */
+ int done = 1; /* Indicates when handler
+ should terminate */
+ int interrupted = 0; /* This HA generated
+ an interrupt */
+ int have_intfly; /* Don't print warning
+ messages when we stack
+ INTFLYs */
+ unsigned long flags;
+
+#ifdef NCR_DEBUG
+ char buf[80]; /* Debugging sprintf buffer */
+ size_t buflen; /* Length of same */
+#endif
+
+ do {
+ done = 1;
+ for (host = first_host; host; host = host->next)
+ if (host->hostt == the_template && host->irq == irq) {
+ NCR53c7x0_local_setup(host);
+
+ hostdata = (struct NCR53c7x0_hostdata *) host->hostdata;
+ hostdata->dsp_changed = 0;
+ interrupted = 0;
+ have_intfly = 0;
+
+ do {
+ int is_8xx_chip;
+
+ hostdata->dstat_valid = 0;
+ interrupted = 0;
+ /*
+ * Only read istat once, since reading it again will unstack
+ * interrupts?
+ */
+ istat = NCR53c7x0_read8(hostdata->istat);
+
+ /*
+ * INTFLY interrupts are used by the NCR53c720, NCR53c810,
+ * and NCR53c820 to signify completion of a command. Since
+ * the SCSI processor continues running, we can't just look
+ * at the contents of the DSA register and continue running.
+ */
+/* XXX - this is too big, offends my sense of aesthetics, and should
+ move to intr_intfly() */
+ is_8xx_chip = ((unsigned) (hostdata->chip - 800)) < 100;
+ if ((hostdata->options & OPTION_INTFLY) &&
+ (is_8xx_chip && (istat & ISTAT_800_INTF))) {
+ char search_found = 0; /* Got at least one ? */
+ done = 0;
+ interrupted = 1;
+
+ /*
+ * Clear the INTF bit by writing a one.
+ * This reset operation is self-clearing.
+ */
+ NCR53c7x0_write8(hostdata->istat, istat|ISTAT_800_INTF);
+
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : INTFLY\n", host->host_no);
+
+ /*
+ * Traverse our list of running commands, and look
+ * for those with valid (non-0xff ff) status and message
+ * bytes encoded in the result which signify command
+ * completion.
+ */
+
+
+ save_flags(flags);
+ cli();
+restart:
+ for (cmd_prev_ptr = (struct NCR53c7x0_cmd **)
+ &(hostdata->running_list), cmd =
+ (struct NCR53c7x0_cmd *) hostdata->running_list; cmd ;
+ cmd_prev_ptr = (struct NCR53c7x0_cmd **) &(cmd->next),
+ cmd = (struct NCR53c7x0_cmd *) cmd->next) {
+ Scsi_Cmnd *tmp;
+
+ if (!cmd) {
+ printk("scsi%d : very weird.\n", host->host_no);
+ break;
+ }
+
+ if (!(tmp = cmd->cmd)) {
+ printk("scsi%d : weird. NCR53c7x0_cmd has no Scsi_Cmnd\n",
+ host->host_no);
+ continue;
+ }
+#if 0
+ printk ("scsi%d : looking at result of 0x%x\n",
+ host->host_no, cmd->cmd->result);
+#endif
+
+ if (((tmp->result & 0xff) == 0xff) ||
+ ((tmp->result & 0xff00) == 0xff00))
+ continue;
+
+ search_found = 1;
+
+ /* Important - remove from list _before_ done is called */
+ if (cmd_prev_ptr)
+ *cmd_prev_ptr = (struct NCR53c7x0_cmd *) cmd->next;
+
+ --hostdata->busy[tmp->target][tmp->lun];
+ cmd->next = hostdata->free;
+ hostdata->free = cmd;
+
+ tmp->host_scribble = NULL;
+
+ if (hostdata->options & OPTION_DEBUG_INTR) {
+ printk ("scsi%d : command complete : pid %lu, id %d,lun %d result 0x%x ",
+ host->host_no, tmp->pid, tmp->target, tmp->lun, tmp->result);
+ print_command (tmp->cmnd);
+ }
+
+#if 0
+ hostdata->options &= ~OPTION_DEBUG_INTR;
+#endif
+ tmp->scsi_done(tmp);
+ goto restart;
+
+ }
+ restore_flags(flags);
+
+ /*
+ * I think that we're stacking INTFLY interrupts; taking care of
+ * all the finished commands on the first one, and then getting
+ * worried when we see the next one. The magic with have_intfly
+ * should tell if this is the case..
+ */
+
+ if (!search_found && !have_intfly) {
+ printk ("scsi%d : WARNING : INTFLY with no completed commands.\n",
+ host->host_no);
+ } else if (!have_intfly) {
+ have_intfly = 1;
+ run_process_issue_queue();
+ }
+ }
+
+ if (istat & (ISTAT_SIP|ISTAT_DIP)) {
+ done = 0;
+ interrupted = 1;
+ hostdata->state = STATE_HALTED;
+
+ if (NCR53c7x0_read8 ((hostdata->chip / 100) == 8 ?
+ SSTAT1_REG : SSTAT2_REG) & SSTAT2_FF_MASK)
+ printk ("scsi%d : SCSI FIFO not empty\n",
+ host->host_no);
+
+ /*
+ * NCR53c700 and NCR53c700-66 change the current SCSI
+ * process, hostdata->current, in the Linux driver so
+ * cmd = hostdata->current.
+ *
+ * With other chips, we must look through the commands
+ * executing and find the command structure which
+ * corresponds to the DSA register.
+ */
+
+ if (hostdata->options & OPTION_700) {
+ cmd = (struct NCR53c7x0_cmd *) hostdata->current;
+ } else {
+ dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
+ for (cmd = (struct NCR53c7x0_cmd *)
+ hostdata->running_list; cmd &&
+ (dsa + (hostdata->dsa_start / sizeof(u32))) !=
+ cmd->dsa;
+ cmd = (struct NCR53c7x0_cmd *)(cmd->next));
+ }
+ if (hostdata->options & OPTION_DEBUG_INTR) {
+ if (cmd) {
+ printk("scsi%d : interrupt for pid %lu, id %d, lun %d ",
+ host->host_no, cmd->cmd->pid, (int) cmd->cmd->target,
+ (int) cmd->cmd->lun);
+ print_command (cmd->cmd->cmnd);
+ } else {
+ printk("scsi%d : no active command\n", host->host_no);
+ }
+ }
+
+ if (istat & ISTAT_SIP) {
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : ISTAT_SIP\n", host->host_no);
+ intr_scsi (host, cmd);
+ }
+
+ if (istat & ISTAT_DIP) {
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : ISTAT_DIP\n", host->host_no);
+ intr_dma (host, cmd);
+ }
+
+ if (!hostdata->dstat_valid) {
+ hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
+ hostdata->dstat_valid = 1;
+ }
+
+ /* XXX - code check for 700/800 chips */
+ if (!(hostdata->dstat & DSTAT_DFE)) {
+ printk ("scsi%d : DMA FIFO not empty\n", host->host_no);
+ if (NCR53c7x0_read8 (CTEST2_REG_800) & CTEST2_800_DDIR) {
+ printk ("scsi%d: Flushing DMA FIFO\n",
+ host->host_no);
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_FLF);
+ while (!((hostdata->dstat = NCR53c7x0_read8(DSTAT_REG)) &
+ DSTAT_DFE));
+ } else
+ {
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_CLF);
+ while (NCR53c7x0_read8 (CTEST3_REG_800) & CTEST3_800_CLF);
+ }
+ hostdata->dstat |= DSTAT_DFE;
+ }
+ }
+ } while (interrupted);
+
+
+
+ if (hostdata->intrs != -1)
+ hostdata->intrs++;
+#if 0
+ if (hostdata->intrs > 40) {
+ printk("scsi%d : too many interrupts, halting", host->host_no);
+ disable(host);
+ }
+#endif
+
+ if (!hostdata->idle && hostdata->state == STATE_HALTED) {
+ if (!hostdata->dsp_changed) {
+ hostdata->dsp = (u32 *)
+ bus_to_virt(NCR53c7x0_read32(DSP_REG));
+ }
+
+#if 0
+ printk("scsi%d : new dsp is 0x%lx (virt 0x%p)\n",
+ host->host_no, virt_to_bus(hostdata->dsp), hostdata->dsp);
+#endif
+
+ hostdata->state = STATE_RUNNING;
+ NCR53c7x0_write32 (DSP_REG, virt_to_bus(hostdata->dsp));
+ }
+ }
+ } while (!done);
+}
+
+
+/*
+ * Function : static int abort_connected (struct Scsi_Host *host)
+ *
+ * Purpose : Assuming that the NCR SCSI processor is currently
+ * halted, break the currently established nexus. Clean
+ * up of the NCR53c7x0_cmd and Scsi_Cmnd structures should
+ * be done on receipt of the abort interrupt.
+ *
+ * Inputs : host - SCSI host
+ *
+ */
+
+static int
+abort_connected (struct Scsi_Host *host) {
+#ifdef NEW_ABORT
+ NCR53c7x0_local_declare();
+#endif
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+/* FIXME : this probably should change for production kernels; at the
+ least, counter sould move to a per-host structure. */
+ static int counter = 5;
+#ifdef NEW_ABORT
+ int sstat, phase, offset;
+ u32 *script;
+ NCR53c7x0_local_setup(host);
+#endif
+
+ if (--counter <= 0) {
+ disable(host);
+ return 0;
+ }
+
+ printk ("scsi%d : DANGER : abort_connected() called \n",
+ host->host_no);
+
+#ifdef NEW_ABORT
+
+/*
+ * New strategy : Rather than using a generic abort routine,
+ * we'll specifically try to source or sink the appropriate
+ * amount of data for the phase we're currently in (taking into
+ * account the current synchronous offset)
+ */
+
+ sstat = (NCR53c8x0_read8 ((chip / 100) == 8 ? SSTAT1_REG : SSTAT2_REG);
+ offset = OFFSET (sstat & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT;
+ phase = sstat & SSTAT2_PHASE_MASK;
+
+/*
+ * SET ATN
+ * MOVE source_or_sink, WHEN CURRENT PHASE
+ * < repeat for each outstanding byte >
+ * JUMP send_abort_message
+ */
+
+ script = hostdata->abort_script = kmalloc (
+ 8 /* instruction size */ * (
+ 1 /* set ATN */ +
+ (!offset ? 1 : offset) /* One transfer per outstanding byte */ +
+ 1 /* send abort message */),
+ GFP_ATOMIC);
+
+
+#else /* def NEW_ABORT */
+ hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
+ sizeof(u32);
+#endif /* def NEW_ABORT */
+ hostdata->dsp_changed = 1;
+
+/* XXX - need to flag the command as aborted after the abort_connected
+ code runs
+ */
+ return 0;
+}
+
+/*
+ * Function : static int datapath_residual (Scsi_Host *host)
+ *
+ * Purpose : return residual data count of what's in the chip.
+ *
+ * Inputs : host - SCSI host
+ */
+
+static int
+datapath_residual (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int count, synchronous, sstat;
+ NCR53c7x0_local_setup(host);
+ /* COMPAT : the 700 and 700-66 need to use DFIFO_00_BO_MASK */
+ count = ((NCR53c7x0_read8 (DFIFO_REG) & DFIFO_10_BO_MASK) -
+ (NCR53c7x0_read32 (DBC_REG) & DFIFO_10_BO_MASK)) & DFIFO_10_BO_MASK;
+ synchronous = NCR53c7x0_read8 (SXFER_REG) & SXFER_MO_MASK;
+ /* COMPAT : DDIR is elsewhere on non-'8xx chips. */
+ if (NCR53c7x0_read8 (CTEST2_REG_800) & CTEST2_800_DDIR) {
+ /* Receive */
+ if (synchronous)
+ count += (NCR53c7x0_read8 ((hostdata->chip / 100) == 8 ?
+ SSTAT1_REG : SSTAT2_REG) & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT;
+ else
+ if (NCR53c7x0_read8 ((hostdata->chip / 100) == 8 ?
+ SSTAT0_REG : SSTAT1_REG) & SSTAT1_ILF)
+ ++count;
+ } else {
+ /* Send */
+ sstat = ((hostdata->chip / 100) == 8) ? NCR53c7x0_read8 (SSTAT0_REG) :
+ NCR53c7x0_read8 (SSTAT1_REG);
+ if (sstat & SSTAT1_OLF)
+ ++count;
+ if (synchronous && (sstat & SSTAT1_ORF))
+ ++count;
+ }
+ return count;
+}
+
+/*
+ * Function : static const char * sbcl_to_phase (int sbcl)_
+ *
+ * Purpose : Convert SBCL register to user-parsable phase representation
+ *
+ * Inputs : sbcl - value of sbcl register
+ */
+
+
+static const char *
+sbcl_to_phase (int sbcl) {
+ switch (sbcl & SBCL_PHASE_MASK) {
+ case SBCL_PHASE_DATAIN:
+ return "DATAIN";
+ case SBCL_PHASE_DATAOUT:
+ return "DATAOUT";
+ case SBCL_PHASE_MSGIN:
+ return "MSGIN";
+ case SBCL_PHASE_MSGOUT:
+ return "MSGOUT";
+ case SBCL_PHASE_CMDOUT:
+ return "CMDOUT";
+ case SBCL_PHASE_STATIN:
+ return "STATUSIN";
+ default:
+ return "unknown";
+ }
+}
+
+/*
+ * Function : static const char * sstat2_to_phase (int sstat)_
+ *
+ * Purpose : Convert SSTAT2 register to user-parsable phase representation
+ *
+ * Inputs : sstat - value of sstat register
+ */
+
+
+static const char *
+sstat2_to_phase (int sstat) {
+ switch (sstat & SSTAT2_PHASE_MASK) {
+ case SSTAT2_PHASE_DATAIN:
+ return "DATAIN";
+ case SSTAT2_PHASE_DATAOUT:
+ return "DATAOUT";
+ case SSTAT2_PHASE_MSGIN:
+ return "MSGIN";
+ case SSTAT2_PHASE_MSGOUT:
+ return "MSGOUT";
+ case SSTAT2_PHASE_CMDOUT:
+ return "CMDOUT";
+ case SSTAT2_PHASE_STATIN:
+ return "STATUSIN";
+ default:
+ return "unknown";
+ }
+}
+
+/*
+ * Function : static void intr_phase_mismatch (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : Handle phase mismatch interrupts
+ *
+ * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
+ * may be NULL.
+ *
+ * Side effects : The abort_connected() routine is called or the NCR chip
+ * is restarted, jumping to the command_complete entry point, or
+ * patching the address and transfer count of the current instruction
+ * and calling the msg_in entry point as appropriate.
+ */
+
+static void
+intr_phase_mismatch (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ u32 dbc_dcmd, *dsp, *dsp_next;
+ unsigned char dcmd, sbcl;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int residual;
+ enum {ACTION_ABORT, ACTION_ABORT_PRINT, ACTION_CONTINUE} action =
+ ACTION_ABORT_PRINT;
+ const char *where = NULL;
+ NCR53c7x0_local_setup(host);
+
+ /*
+ * Corrective action is based on where in the SCSI SCRIPT(tm) the error
+ * occurred, as well as which SCSI phase we are currently in.
+ */
+ dsp_next = bus_to_virt(NCR53c7x0_read32(DSP_REG));
+
+ /*
+ * Fetch the current instruction, and remove the operands for easier
+ * interpretation.
+ */
+ dbc_dcmd = NCR53c7x0_read32(DBC_REG);
+ dcmd = (dbc_dcmd & 0xff000000) >> 24;
+ /*
+ * Like other processors, the NCR adjusts the instruction pointer before
+ * instruction decode. Set the DSP address back to what it should
+ * be for this instruction based on its size (2 or 3 32 bit words).
+ */
+ dsp = dsp_next - NCR53c7x0_insn_size(dcmd);
+
+
+ /*
+ * Read new SCSI phase from the SBCL lines. Since all of our code uses
+ * a WHEN conditional instead of an IF conditional, we don't need to
+ * wait for a new REQ.
+ */
+ sbcl = NCR53c7x0_read8(SBCL_REG) & SBCL_PHASE_MASK;
+
+ if (!cmd) {
+ action = ACTION_ABORT_PRINT;
+ where = "no current command";
+ /*
+ * The way my SCSI SCRIPTS(tm) are architected, recoverable phase
+ * mismatches should only occur where we're doing a multi-byte
+ * BMI instruction. Specifically, this means
+ *
+ * - select messages (a SCSI-I target may ignore additional messages
+ * after the IDENTIFY; any target may reject a SDTR or WDTR)
+ *
+ * - command out (targets may send a message to signal an error
+ * condition, or go into STATUSIN after they've decided
+ * they don't like the command.
+ *
+ * - reply_message (targets may reject a multi-byte message in the
+ * middle)
+ *
+ * - data transfer routines (command completion with buffer space
+ * left, disconnect message, or error message)
+ */
+ } else if (((dsp >= cmd->data_transfer_start &&
+ dsp < cmd->data_transfer_end)) || dsp == (cmd->residual + 2)) {
+ if ((dcmd & (DCMD_TYPE_MASK|DCMD_BMI_OP_MASK|DCMD_BMI_INDIRECT|
+ DCMD_BMI_MSG|DCMD_BMI_CD)) == (DCMD_TYPE_BMI|
+ DCMD_BMI_OP_MOVE_I)) {
+ residual = datapath_residual (host);
+ if (hostdata->options & OPTION_DEBUG_DISCONNECT)
+ printk ("scsi%d : handling residual transfer (+ %d bytes from DMA FIFO)\n",
+ host->host_no, residual);
+
+ /*
+ * The first instruction is a CALL to the alternate handler for
+ * this data transfer phase, so we can do calls to
+ * munge_msg_restart as we would if control were passed
+ * from normal dynamic code.
+ */
+ if (dsp != cmd->residual + 2) {
+ cmd->residual[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL |
+ ((dcmd & DCMD_BMI_IO) ? DCMD_TCI_IO : 0)) << 24) |
+ DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
+ cmd->residual[1] = virt_to_bus(hostdata->script)
+ + ((dcmd & DCMD_BMI_IO)
+ ? hostdata->E_other_in : hostdata->E_other_out);
+ }
+
+ /*
+ * The second instruction is the a data transfer block
+ * move instruction, reflecting the pointer and count at the
+ * time of the phase mismatch.
+ */
+ cmd->residual[2] = dbc_dcmd + residual;
+ cmd->residual[3] = NCR53c7x0_read32(DNAD_REG) - residual;
+
+ /*
+ * The third and final instruction is a jump to the instruction
+ * which follows the instruction which had to be 'split'
+ */
+ if (dsp != cmd->residual + 2) {
+ cmd->residual[4] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP)
+ << 24) | DBC_TCI_TRUE;
+ cmd->residual[5] = virt_to_bus(dsp_next);
+ }
+
+ /*
+ * For the sake of simplicity, transfer control to the
+ * conditional CALL at the start of the residual buffer.
+ */
+ hostdata->dsp = cmd->residual;
+ hostdata->dsp_changed = 1;
+ action = ACTION_CONTINUE;
+ } else {
+ where = "non-BMI dynamic DSA code";
+ action = ACTION_ABORT_PRINT;
+ }
+ } else if (dsp == (hostdata->script + hostdata->E_select_msgout / 4)) {
+ /* Release ATN */
+ NCR53c7x0_write8 (SOCL_REG, 0);
+ switch (sbcl) {
+ /*
+ * Some devices (SQ555 come to mind) grab the IDENTIFY message
+ * sent on selection, and decide to go into COMMAND OUT phase
+ * rather than accepting the rest of the messages or rejecting
+ * them. Handle these devices gracefully.
+ */
+ case SBCL_PHASE_CMDOUT:
+ hostdata->dsp = dsp + 2 /* two _words_ */;
+ hostdata->dsp_changed = 1;
+ printk ("scsi%d : target %d ignored SDTR and went into COMMAND OUT\n",
+ host->host_no, cmd->cmd->target);
+ cmd->flags &= ~CMD_FLAG_SDTR;
+ action = ACTION_CONTINUE;
+ break;
+ case SBCL_PHASE_MSGIN:
+ hostdata->dsp = hostdata->script + hostdata->E_msg_in /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ action = ACTION_CONTINUE;
+ break;
+ default:
+ where="select message out";
+ action = ACTION_ABORT_PRINT;
+ }
+ /*
+ * Some SCSI devices will interpret a command as they read the bytes
+ * off the SCSI bus, and may decide that the command is Bogus before
+ * they've read the entire commad off the bus.
+ */
+ } else if (dsp == hostdata->script + hostdata->E_cmdout_cmdout / sizeof
+ (u32)) {
+ hostdata->dsp = hostdata->script + hostdata->E_data_transfer /
+ sizeof (u32);
+ hostdata->dsp_changed = 1;
+ action = ACTION_CONTINUE;
+ /* FIXME : we need to handle message reject, etc. within msg_respond. */
+#ifdef notyet
+ } else if (dsp == hostdata->script + hostdata->E_reply_message) {
+ switch (sbcl) {
+ /* Any other phase mismatches abort the currently executing command. */
+#endif
+ } else {
+ where = "unknown location";
+ action = ACTION_ABORT_PRINT;
+ }
+
+ /* Flush DMA FIFO */
+ if (!hostdata->dstat_valid) {
+ hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
+ hostdata->dstat_valid = 1;
+ }
+ if (!(hostdata->dstat & DSTAT_DFE)) {
+ if (NCR53c7x0_read8 (CTEST2_REG_800) & CTEST2_800_DDIR) {
+ printk ("scsi%d: Flushing DMA FIFO\n",
+ host->host_no);
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_FLF);
+ /* FIXME : what about stacked DMA interrupts? */
+ while (!((hostdata->dstat = NCR53c7x0_read8(DSTAT_REG)) &
+ DSTAT_DFE));
+ } else {
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_CLF);
+ while (NCR53c7x0_read8 (CTEST3_REG_800) & CTEST3_800_CLF);
+ }
+ hostdata->dstat |= DSTAT_DFE;
+ }
+
+ switch (action) {
+ case ACTION_ABORT_PRINT:
+ printk("scsi%d : %s : unexpected phase %s.\n",
+ host->host_no, where ? where : "unknown location",
+ sbcl_to_phase(sbcl));
+ print_lots (host);
+ /* Fall through to ACTION_ABORT */
+ case ACTION_ABORT:
+ abort_connected (host);
+ break;
+ case ACTION_CONTINUE:
+ break;
+ }
+
+#if 0
+ if (hostdata->dsp_changed) {
+ printk("scsi%d: new dsp 0x%p\n", host->host_no, hostdata->dsp);
+ print_insn (host, hostdata->dsp, "", 1);
+ }
+#endif
+
+}
+
+/*
+ * Function : static void intr_bf (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : handle BUS FAULT interrupts
+ *
+ * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
+ * may be NULL.
+ */
+
+static void
+intr_bf (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ u32 *dsp,
+ *next_dsp, /* Current dsp */
+ *dsa,
+ dbc_dcmd; /* DCMD (high eight bits) + DBC */
+ unsigned short pci_status;
+ int tmp;
+ unsigned long flags;
+ char *reason = NULL;
+ /* Default behavior is for a silent error, with a retry until we've
+ exhausted retries. */
+ enum {MAYBE, ALWAYS, NEVER} retry = MAYBE;
+ int report = 0;
+ NCR53c7x0_local_setup(host);
+
+ dbc_dcmd = NCR53c7x0_read32 (DBC_REG);
+ next_dsp = bus_to_virt (NCR53c7x0_read32(DSP_REG));
+ dsp = next_dsp - NCR53c7x0_insn_size ((dbc_dcmd >> 24) & 0xff);
+/* FIXME - check chip type */
+ dsa = bus_to_virt (NCR53c7x0_read32(DSA_REG));
+
+ /*
+ * Bus faults can be caused by either a Bad Address or
+ * Target Abort. We should check the Received Target Abort
+ * bit of the PCI status register and Master Abort Bit.
+ *
+ * - Master Abort bit indicates that no device claimed
+ * the address with DEVSEL within five clocks
+ *
+ * - Target Abort bit indicates that a target claimed it,
+ * but changed its mind once it saw the byte enables.
+ *
+ */
+
+ if ((hostdata->chip / 100) == 8) {
+ save_flags (flags);
+ cli();
+ tmp = pcibios_read_config_word (hostdata->pci_bus,
+ hostdata->pci_device_fn, PCI_STATUS, &pci_status);
+ restore_flags (flags);
+ if (tmp == PCIBIOS_SUCCESSFUL) {
+ if (pci_status & PCI_STATUS_REC_TARGET_ABORT) {
+ reason = "PCI target abort";
+ pci_status &= ~PCI_STATUS_REC_TARGET_ABORT;
+ } else if (pci_status & PCI_STATUS_REC_MASTER_ABORT) {
+ reason = "No device asserted PCI DEVSEL within five bus clocks";
+ pci_status &= ~PCI_STATUS_REC_MASTER_ABORT;
+ } else if (pci_status & PCI_STATUS_PARITY) {
+ report = 1;
+ pci_status &= ~PCI_STATUS_PARITY;
+ }
+ } else {
+ printk ("scsi%d : couldn't read status register : %s\n",
+ host->host_no, pcibios_strerror (tmp));
+ retry = NEVER;
+ }
+ }
+
+#ifndef notyet
+ report = 1;
+#endif
+ if (report && reason) {
+ printk(KERN_ALERT "scsi%d : BUS FAULT reason = %s\n",
+ host->host_no, reason ? reason : "unknown");
+ print_lots (host);
+ }
+
+#ifndef notyet
+ retry = NEVER;
+#endif
+
+ /*
+ * TODO : we should attempt to recover from any spurious bus
+ * faults. After X retries, we should figure that things are
+ * sufficiently wedged, and call NCR53c7xx_reset.
+ *
+ * This code should only get executed once we've decided that we
+ * cannot retry.
+ */
+
+ if (retry == NEVER) {
+ printk(KERN_ALERT " mail drew@PoohSticks.ORG\n");
+ FATAL (host);
+ }
+}
+
+/*
+ * Function : static void intr_dma (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : handle all DMA interrupts, indicated by the setting
+ * of the DIP bit in the ISTAT register.
+ *
+ * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
+ * may be NULL.
+ */
+
+static void
+intr_dma (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned char dstat; /* DSTAT */
+ u32 *dsp,
+ *next_dsp, /* Current dsp */
+ *dsa,
+ dbc_dcmd; /* DCMD (high eight bits) + DBC */
+ int tmp;
+ unsigned long flags;
+ NCR53c7x0_local_setup(host);
+
+ if (!hostdata->dstat_valid) {
+ hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
+ hostdata->dstat_valid = 1;
+ }
+
+ dstat = hostdata->dstat;
+
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk("scsi%d : DSTAT=0x%x\n", host->host_no, (int) dstat);
+
+ dbc_dcmd = NCR53c7x0_read32 (DBC_REG);
+ next_dsp = bus_to_virt(NCR53c7x0_read32(DSP_REG));
+ dsp = next_dsp - NCR53c7x0_insn_size ((dbc_dcmd >> 24) & 0xff);
+/* XXX - check chip type */
+ dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
+
+ /*
+ * DSTAT_ABRT is the aborted interrupt. This is set whenever the
+ * SCSI chip is aborted.
+ *
+ * With NCR53c700 and NCR53c700-66 style chips, we should only
+ * get this when the chip is currently running the accept
+ * reselect/select code and we have set the abort bit in the
+ * ISTAT register.
+ *
+ */
+
+ if (dstat & DSTAT_ABRT) {
+#if 0
+ /* XXX - add code here to deal with normal abort */
+ if ((hostdata->options & OPTION_700) && (hostdata->state ==
+ STATE_ABORTING)) {
+ } else
+#endif
+ {
+ printk(KERN_ALERT "scsi%d : unexpected abort interrupt at\n"
+ " ", host->host_no);
+ print_insn (host, dsp, KERN_ALERT "s ", 1);
+ FATAL (host);
+ }
+ }
+
+ /*
+ * DSTAT_SSI is the single step interrupt. Should be generated
+ * whenever we have single stepped or are tracing.
+ */
+
+ if (dstat & DSTAT_SSI) {
+ if (hostdata->options & OPTION_DEBUG_TRACE) {
+ } else if (hostdata->options & OPTION_DEBUG_SINGLE) {
+ print_insn (host, dsp, "s ", 0);
+ save_flags(flags);
+ cli();
+/* XXX - should we do this, or can we get away with writing dsp? */
+
+ NCR53c7x0_write8 (DCNTL_REG, (NCR53c7x0_read8(DCNTL_REG) &
+ ~DCNTL_SSM) | DCNTL_STD);
+ restore_flags(flags);
+ } else {
+ printk(KERN_ALERT "scsi%d : unexpected single step interrupt at\n"
+ " ", host->host_no);
+ print_insn (host, dsp, KERN_ALERT "", 1);
+ printk(KERN_ALERT " mail drew@PoohSticks.ORG\n");
+ FATAL (host);
+ }
+ }
+
+ /*
+ * DSTAT_IID / DSTAT_OPC (same bit, same meaning, only the name
+ * is different) is generated whenever an illegal instruction is
+ * encountered.
+ *
+ * XXX - we may want to emulate INTFLY here, so we can use
+ * the same SCSI SCRIPT (tm) for NCR53c710 through NCR53c810
+ * chips.
+ */
+
+ if (dstat & DSTAT_OPC) {
+ /*
+ * Ascertain if this IID interrupts occurred before or after a STO
+ * interrupt. Since the interrupt handling code now leaves
+ * DSP unmodified until _after_ all stacked interrupts have been
+ * processed, reading the DSP returns the original DSP register.
+ * This means that if dsp lies between the select code, and
+ * message out following the selection code (where the IID interrupt
+ * would have to have occurred by due to the implicit wait for REQ),
+ * we have an IID interrupt resulting from a STO condition and
+ * can ignore it.
+ */
+
+ if (((dsp >= (hostdata->script + hostdata->E_select / sizeof(u32))) &&
+ (dsp <= (hostdata->script + hostdata->E_select_msgout /
+ sizeof(u32) + 8))) || (hostdata->test_running == 2)) {
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : ignoring DSTAT_IID for SSTAT_STO\n",
+ host->host_no);
+ if (hostdata->expecting_iid) {
+ hostdata->expecting_iid = 0;
+ hostdata->idle = 1;
+ if (hostdata->test_running == 2) {
+ hostdata->test_running = 0;
+ hostdata->test_completed = 3;
+ } else if (cmd)
+ abnormal_finished (cmd, DID_BAD_TARGET << 16);
+ } else {
+ hostdata->expecting_sto = 1;
+ }
+ /*
+ * We can't guarantee we'll be able to execute the WAIT DISCONNECT
+ * instruction within the 3.4us of bus free and arbitration delay
+ * that a target can RESELECT in and assert REQ after we've dropped
+ * ACK. If this happens, we'll get an illegal instruction interrupt.
+ * Doing away with the WAIT DISCONNECT instructions broke everything,
+ * so instead I'll settle for moving one WAIT DISCONNECT a few
+ * instructions closer to the CLEAR ACK before it to minimize the
+ * chances of this happening, and handle it if it occurs anyway.
+ *
+ * Simply continue with what we were doing, and control should
+ * be transfered to the schedule routine which will ultimately
+ * pass control onto the reselection or selection (not yet)
+ * code.
+ */
+ } else if (dbc_dcmd == 0x48000000 && (NCR53c7x0_read8 (SBCL_REG) &
+ SBCL_REQ)) {
+ if (!(hostdata->options & OPTION_NO_PRINT_RACE))
+ {
+ printk("scsi%d: REQ before WAIT DISCONNECT IID\n",
+ host->host_no);
+ hostdata->options |= OPTION_NO_PRINT_RACE;
+ }
+ } else {
+ printk(KERN_ALERT "scsi%d : illegal instruction\n", host->host_no);
+ print_lots (host);
+ printk(KERN_ALERT " mail drew@PoohSticks.ORG with ALL\n"
+ " boot messages and diagnostic output\n");
+ FATAL (host);
+ }
+ }
+
+ /*
+ * DSTAT_BF are bus fault errors
+ */
+
+ if (dstat & DSTAT_800_BF) {
+ intr_bf (host, cmd);
+ }
+
+
+ /*
+ * DSTAT_SIR interrupts are generated by the execution of
+ * the INT instruction. Since the exact values available
+ * are determined entirely by the SCSI script running,
+ * and are local to a particular script, a unique handler
+ * is called for each script.
+ */
+
+ if (dstat & DSTAT_SIR) {
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : DSTAT_SIR\n", host->host_no);
+ switch ((tmp = hostdata->dstat_sir_intr (host, cmd))) {
+ case SPECIFIC_INT_NOTHING:
+ case SPECIFIC_INT_RESTART:
+ break;
+ case SPECIFIC_INT_ABORT:
+ abort_connected(host);
+ break;
+ case SPECIFIC_INT_PANIC:
+ printk(KERN_ALERT "scsi%d : failure at ", host->host_no);
+ print_insn (host, dsp, KERN_ALERT "", 1);
+ printk(KERN_ALERT " dstat_sir_intr() returned SPECIFIC_INT_PANIC\n");
+ FATAL (host);
+ break;
+ case SPECIFIC_INT_BREAK:
+ intr_break (host, cmd);
+ break;
+ default:
+ printk(KERN_ALERT "scsi%d : failure at ", host->host_no);
+ print_insn (host, dsp, KERN_ALERT "", 1);
+ printk(KERN_ALERT" dstat_sir_intr() returned unknown value %d\n",
+ tmp);
+ FATAL (host);
+ }
+ }
+
+ if ((hostdata->chip / 100) == 8 && (dstat & DSTAT_800_MDPE)) {
+ printk(KERN_ALERT "scsi%d : Master Data Parity Error\n",
+ host->host_no);
+ FATAL (host);
+ }
+}
+
+/*
+ * Function : static int print_insn (struct Scsi_Host *host,
+ * u32 *insn, int kernel)
+ *
+ * Purpose : print numeric representation of the instruction pointed
+ * to by insn to the debugging or kernel message buffer
+ * as appropriate.
+ *
+ * If desired, a user level program can interpret this
+ * information.
+ *
+ * Inputs : host, insn - host, pointer to instruction, prefix -
+ * string to prepend, kernel - use printk instead of debugging buffer.
+ *
+ * Returns : size, in u32s, of instruction printed.
+ */
+
+/*
+ * FIXME: should change kernel parameter so that it takes an ENUM
+ * specifying severity - either KERN_ALERT or KERN_PANIC so
+ * all panic messages are output with the same severity.
+ */
+
+static int
+print_insn (struct Scsi_Host *host, const u32 *insn,
+ const char *prefix, int kernel) {
+ char buf[160], /* Temporary buffer and pointer. ICKY
+ arbitrary length. */
+
+
+ *tmp;
+ unsigned char dcmd; /* dcmd register for *insn */
+ int size;
+
+ /*
+ * Check to see if the instruction pointer is not bogus before
+ * indirecting through it; avoiding red-zone at start of
+ * memory.
+ *
+ * FIXME: icky magic needs to happen here on non-intel boxes which
+ * don't have kernel memory mapped in like this. Might be reasonable
+ * to use vverify()?
+ */
+
+ if (MAP_NR(insn) < 1 || MAP_NR(insn + 8) > MAP_NR(high_memory) ||
+ ((((dcmd = (insn[0] >> 24) & 0xff) & DCMD_TYPE_MMI) == DCMD_TYPE_MMI) &&
+ MAP_NR(insn + 12) > MAP_NR(high_memory))) {
+ size = 0;
+ sprintf (buf, "%s%p: address out of range\n",
+ prefix, insn);
+ } else {
+/*
+ * FIXME : (void *) cast in virt_to_bus should be unecessary, because
+ * it should take const void * as argument.
+ */
+ sprintf(buf, "%s0x%lx (virt 0x%p) : 0x%08x 0x%08x (virt 0x%p)",
+ (prefix ? prefix : ""), virt_to_bus((void *) insn), insn,
+ insn[0], insn[1], bus_to_virt (insn[1]));
+ tmp = buf + strlen(buf);
+ if ((dcmd & DCMD_TYPE_MASK) == DCMD_TYPE_MMI) {
+ sprintf (tmp, " 0x%08x (virt 0x%p)\n", insn[2],
+ bus_to_virt(insn[2]));
+ size = 3;
+ } else {
+ sprintf (tmp, "\n");
+ size = 2;
+ }
+ }
+
+ if (kernel)
+ printk ("%s", buf);
+#ifdef NCR_DEBUG
+ else {
+ size_t len = strlen(buf);
+ debugger_kernel_write(host, buf, len);
+ }
+#endif
+ return size;
+}
+
+/*
+ * Function : static const char *ncr_state (int state)
+ *
+ * Purpose : convert state (probably from hostdata->state) to a string
+ *
+ * Inputs : state
+ *
+ * Returns : char * representation of state, "unknown" on error.
+ */
+
+static const char *
+ncr_state (int state) {
+ switch (state) {
+ case STATE_HALTED: return "halted";
+ case STATE_WAITING: return "waiting";
+ case STATE_RUNNING: return "running";
+ case STATE_ABORTING: return "aborting";
+ case STATE_DISABLED: return "disabled";
+ default: return "unknown";
+ }
+}
+
+/*
+ * Function : int NCR53c7xx_abort (Scsi_Cmnd *cmd)
+ *
+ * Purpose : Abort an errant SCSI command, doing all necessary
+ * cleanup of the issue_queue, running_list, shared Linux/NCR
+ * dsa issue and reconnect queues.
+ *
+ * Inputs : cmd - command to abort, code - entire result field
+ *
+ * Returns : 0 on success, -1 on failure.
+ */
+
+int
+NCR53c7xx_abort (Scsi_Cmnd *cmd) {
+ NCR53c7x0_local_declare();
+ struct Scsi_Host *host = cmd->host;
+ struct NCR53c7x0_hostdata *hostdata = host ? (struct NCR53c7x0_hostdata *)
+ host->hostdata : NULL;
+ unsigned long flags;
+ struct NCR53c7x0_cmd *curr, **prev;
+ Scsi_Cmnd *me, **last;
+#if 0
+ static long cache_pid = -1;
+#endif
+
+
+ if (!host) {
+ printk ("Bogus SCSI command pid %ld; no host structure\n",
+ cmd->pid);
+ return SCSI_ABORT_ERROR;
+ } else if (!hostdata) {
+ printk ("Bogus SCSI host %d; no hostdata\n", host->host_no);
+ return SCSI_ABORT_ERROR;
+ }
+ NCR53c7x0_local_setup(host);
+
+/*
+ * CHECK : I don't think that reading ISTAT will unstack any interrupts,
+ * since we need to write the INTF bit to clear it, and SCSI/DMA
+ * interrupts don't clear until we read SSTAT/SIST and DSTAT registers.
+ *
+ * See that this is the case.
+ *
+ * I suspect that several of our failures may be coming from a new fatal
+ * interrupt (possibly due to a phase mismatch) happening after we've left
+ * the interrupt handler, but before the PIC has had the interrupt condition
+ * cleared.
+ */
+
+ if (NCR53c7x0_read8(hostdata->istat) &
+ (ISTAT_DIP|ISTAT_SIP|
+ (hostdata->chip / 100 == 8 ? ISTAT_800_INTF : 0))) {
+ printk ("scsi%d : dropped interrupt for command %ld\n", host->host_no,
+ cmd->pid);
+ NCR53c7x0_intr (host->irq, NULL);
+ return SCSI_ABORT_BUSY;
+ }
+
+ save_flags(flags);
+ cli();
+#if 0
+ if (cache_pid == cmd->pid)
+ panic ("scsi%d : bloody fetus %d\n", host->host_no, cmd->pid);
+ else
+ cache_pid = cmd->pid;
+#endif
+
+
+/*
+ * The command could be hiding in the issue_queue. This would be very
+ * nice, as commands can't be moved from the high level driver's issue queue
+ * into the shared queue until an interrupt routine is serviced, and this
+ * moving is atomic.
+ *
+ * If this is the case, we don't have to worry about anything - we simply
+ * pull the command out of the old queue, and call it aborted.
+ */
+
+ for (me = (Scsi_Cmnd *) hostdata->issue_queue,
+ last = (Scsi_Cmnd **) &(hostdata->issue_queue);
+ me && me != cmd; last = (Scsi_Cmnd **)&(me->SCp.ptr),
+ me = (Scsi_Cmnd *)me->SCp.ptr);
+
+ if (me) {
+ *last = (Scsi_Cmnd *) me->SCp.ptr;
+ if (me->host_scribble) {
+ ((struct NCR53c7x0_cmd *)me->host_scribble)->next = hostdata->free;
+ hostdata->free = (struct NCR53c7x0_cmd *) me->host_scribble;
+ me->host_scribble = NULL;
+ }
+ cmd->result = DID_ABORT << 16;
+ cmd->scsi_done(cmd);
+ printk ("scsi%d : found command %ld in Linux issue queue\n",
+ host->host_no, me->pid);
+ restore_flags(flags);
+ run_process_issue_queue();
+ return SCSI_ABORT_SUCCESS;
+ }
+
+/*
+ * That failing, the command could be in our list of already executing
+ * commands. If this is the case, drastic measures are called for.
+ */
+
+ for (curr = (struct NCR53c7x0_cmd *) hostdata->running_list,
+ prev = (struct NCR53c7x0_cmd **) &(hostdata->running_list);
+ curr && curr->cmd != cmd; prev = (struct NCR53c7x0_cmd **)
+ &(curr->next), curr = (struct NCR53c7x0_cmd *) curr->next);
+
+ if (curr) {
+ if ((cmd->result & 0xff) != 0xff && (cmd->result & 0xff00) != 0xff00) {
+ if (prev)
+ *prev = (struct NCR53c7x0_cmd *) curr->next;
+ curr->next = (struct NCR53c7x0_cmd *) hostdata->free;
+ cmd->host_scribble = NULL;
+ hostdata->free = curr;
+ cmd->scsi_done(cmd);
+ printk ("scsi%d : found finished command %ld in running list\n",
+ host->host_no, cmd->pid);
+ restore_flags(flags);
+ return SCSI_ABORT_NOT_RUNNING;
+ } else {
+ printk ("scsi%d : DANGER : command running, can not abort.\n",
+ cmd->host->host_no);
+ restore_flags(flags);
+ return SCSI_ABORT_BUSY;
+ }
+ }
+
+/*
+ * And if we couldn't find it in any of our queues, it must have been
+ * a dropped interrupt.
+ */
+
+ curr = (struct NCR53c7x0_cmd *) cmd->host_scribble;
+ if (curr) {
+ curr->next = hostdata->free;
+ hostdata->free = curr;
+ cmd->host_scribble = NULL;
+ }
+
+ if (((cmd->result & 0xff00) == 0xff00) ||
+ ((cmd->result & 0xff) == 0xff)) {
+ printk ("scsi%d : did this command ever run?\n", host->host_no);
+ cmd->result = DID_ABORT << 16;
+ } else {
+ printk ("scsi%d : probably lost INTFLY, normal completion\n",
+ host->host_no);
+/*
+ * FIXME : We need to add an additional flag which indicates if a
+ * command was ever counted as BUSY, so if we end up here we can
+ * decrement the busy count if and only if it is necessary.
+ */
+ --hostdata->busy[cmd->target][cmd->lun];
+ }
+ restore_flags(flags);
+ cmd->scsi_done(cmd);
+
+/*
+ * We need to run process_issue_queue since termination of this command
+ * may allow another queued command to execute first?
+ */
+ return SCSI_ABORT_NOT_RUNNING;
+}
+
+/*
+ * Function : int NCR53c7xx_reset (Scsi_Cmnd *cmd)
+ *
+ * Purpose : perform a hard reset of the SCSI bus and NCR
+ * chip.
+ *
+ * Inputs : cmd - command which caused the SCSI RESET
+ *
+ * Returns : 0 on success.
+ */
+
+int
+NCR53c7xx_reset (Scsi_Cmnd *cmd) {
+ NCR53c7x0_local_declare();
+ unsigned long flags;
+ int found = 0;
+ struct NCR53c7x0_cmd * c;
+ Scsi_Cmnd *tmp;
+ /*
+ * When we call scsi_done(), it's going to wake up anything sleeping on the
+ * resources which were in use by the aborted commands, and we'll start to
+ * get new commands.
+ *
+ * We can't let this happen until after we've re-initialized the driver
+ * structures, and can't reinitilize those structures until after we've
+ * dealt with their contents.
+ *
+ * So, we need to find all of the commands which were running, stick
+ * them on a linked list of completed commands (we'll use the host_scribble
+ * pointer), do our reinitialization, and then call the done function for
+ * each command.
+ */
+ Scsi_Cmnd *nuke_list = NULL;
+ struct Scsi_Host *host = cmd->host;
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+
+ NCR53c7x0_local_setup(host);
+ save_flags(flags);
+ cli();
+ ncr_halt (host);
+ print_lots (host);
+ dump_events (host, 30);
+ ncr_scsi_reset (host);
+ for (tmp = nuke_list = return_outstanding_commands (host, 1 /* free */,
+ 0 /* issue */ ); tmp; tmp = (Scsi_Cmnd *) tmp->SCp.buffer)
+ if (tmp == cmd) {
+ found = 1;
+ break;
+ }
+
+ /*
+ * If we didn't find the command which caused this reset in our running
+ * list, then we've lost it. See that it terminates normally anyway.
+ */
+ if (!found) {
+ c = (struct NCR53c7x0_cmd *) cmd->host_scribble;
+ if (c) {
+ cmd->host_scribble = NULL;
+ c->next = hostdata->free;
+ hostdata->free = c;
+ } else
+ printk ("scsi%d: lost command %ld\n", host->host_no, cmd->pid);
+ cmd->SCp.buffer = (struct scatterlist *) nuke_list;
+ nuke_list = cmd;
+ }
+
+ NCR53c7x0_driver_init (host);
+ hostdata->soft_reset (host);
+ if (hostdata->resets == 0)
+ disable(host);
+ else if (hostdata->resets != -1)
+ --hostdata->resets;
+ sti();
+ for (; nuke_list; nuke_list = tmp) {
+ tmp = (Scsi_Cmnd *) nuke_list->SCp.buffer;
+ nuke_list->result = DID_RESET << 16;
+ nuke_list->scsi_done (nuke_list);
+ }
+ restore_flags(flags);
+ return SCSI_RESET_SUCCESS;
+}
+
+/*
+ * The NCR SDMS bios follows Annex A of the SCSI-CAM draft, and
+ * therefore shares the scsicam_bios_param function.
+ */
+
+/*
+ * Function : int insn_to_offset (Scsi_Cmnd *cmd, u32 *insn)
+ *
+ * Purpose : convert instructions stored at NCR pointer into data
+ * pointer offset.
+ *
+ * Inputs : cmd - SCSI command; insn - pointer to instruction. Either current
+ * DSP, or saved data pointer.
+ *
+ * Returns : offset on success, -1 on failure.
+ */
+
+
+static int
+insn_to_offset (Scsi_Cmnd *cmd, u32 *insn) {
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) cmd->host->hostdata;
+ struct NCR53c7x0_cmd *ncmd =
+ (struct NCR53c7x0_cmd *) cmd->host_scribble;
+ int offset = 0, buffers;
+ struct scatterlist *segment;
+ char *ptr;
+ int found = 0;
+
+/*
+ * With the current code implementation, if the insn is inside dynamically
+ * generated code, the data pointer will be the instruction preceeding
+ * the next transfer segment.
+ */
+
+ if (!check_address ((unsigned long) ncmd, sizeof (struct NCR53c7x0_cmd)) &&
+ ((insn >= ncmd->data_transfer_start &&
+ insn < ncmd->data_transfer_end) ||
+ (insn >= ncmd->residual &&
+ insn < (ncmd->residual +
+ sizeof(ncmd->residual))))) {
+ ptr = bus_to_virt(insn[3]);
+
+ if ((buffers = cmd->use_sg)) {
+ for (offset = 0,
+ segment = (struct scatterlist *) cmd->buffer;
+ buffers && !((found = ((ptr >= segment->address) &&
+ (ptr < (segment->address + segment->length)))));
+ --buffers, offset += segment->length, ++segment)
+#if 0
+ printk("scsi%d: comparing 0x%p to 0x%p\n",
+ cmd->host->host_no, saved, segment->address);
+#else
+ ;
+#endif
+ offset += ptr - segment->address;
+ } else {
+ found = 1;
+ offset = ptr - (char *) (cmd->request_buffer);
+ }
+ } else if ((insn >= hostdata->script +
+ hostdata->E_data_transfer / sizeof(u32)) &&
+ (insn <= hostdata->script +
+ hostdata->E_end_data_transfer / sizeof(u32))) {
+ found = 1;
+ offset = 0;
+ }
+ return found ? offset : -1;
+}
+
+
+
+/*
+ * Function : void print_progress (Scsi_Cmnd *cmd)
+ *
+ * Purpose : print the current location of the saved data pointer
+ *
+ * Inputs : cmd - command we are interested in
+ *
+ */
+
+static void
+print_progress (Scsi_Cmnd *cmd) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_cmd *ncmd =
+ (struct NCR53c7x0_cmd *) cmd->host_scribble;
+ int offset, i;
+ char *where;
+ u32 *ptr;
+ NCR53c7x0_local_setup (cmd->host);
+ for (i = 0; i < 2; ++i) {
+ if (check_address ((unsigned long) ncmd,
+ sizeof (struct NCR53c7x0_cmd)) == -1)
+ continue;
+ if (!i) {
+ where = "saved";
+ ptr = bus_to_virt(ncmd->saved_data_pointer);
+ } else {
+ where = "active";
+ ptr = bus_to_virt (NCR53c7x0_read32 (DSP_REG) -
+ NCR53c7x0_insn_size (NCR53c7x0_read8 (DCMD_REG)) *
+ sizeof(u32));
+ }
+ offset = insn_to_offset (cmd, ptr);
+
+ if (offset != -1)
+ printk ("scsi%d : %s data pointer at offset %d\n",
+ cmd->host->host_no, where, offset);
+ else {
+ int size;
+ printk ("scsi%d : can't determine %s data pointer offset\n",
+ cmd->host->host_no, where);
+ if (ncmd) {
+ size = print_insn (cmd->host,
+ bus_to_virt(ncmd->saved_data_pointer), "", 1);
+ print_insn (cmd->host,
+ bus_to_virt(ncmd->saved_data_pointer) + size * sizeof(u32),
+ "", 1);
+ }
+ }
+ }
+}
+
+
+static void
+print_dsa (struct Scsi_Host *host, u32 *dsa, const char *prefix) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int i, len;
+ char *ptr;
+ Scsi_Cmnd *cmd;
+
+ if (check_address ((unsigned long) dsa, hostdata->dsa_end -
+ hostdata->dsa_start) == -1) {
+ printk("scsi%d : bad dsa virt 0x%p\n", host->host_no, dsa);
+ return;
+ }
+ printk("%sscsi%d : dsa at phys 0x%lx (virt 0x%p)\n"
+ " + %d : dsa_msgout length = %u, data = 0x%x (virt 0x%p)\n" ,
+ prefix ? prefix : "",
+ host->host_no, virt_to_bus (dsa), dsa, hostdata->dsa_msgout,
+ dsa[hostdata->dsa_msgout / sizeof(u32)],
+ dsa[hostdata->dsa_msgout / sizeof(u32) + 1],
+ bus_to_virt (dsa[hostdata->dsa_msgout / sizeof(u32) + 1]));
+
+ /*
+ * Only print messages if they're sane in length so we don't
+ * blow the kernel printk buffer on something which won't buy us
+ * anything.
+ */
+
+ if (dsa[hostdata->dsa_msgout / sizeof(u32)] <
+ sizeof (hostdata->free->select))
+ for (i = dsa[hostdata->dsa_msgout / sizeof(u32)],
+ ptr = bus_to_virt (dsa[hostdata->dsa_msgout / sizeof(u32) + 1]);
+ i > 0 && !check_address ((unsigned long) ptr, 1);
+ ptr += len, i -= len) {
+ printk(" ");
+ len = print_msg (ptr);
+ printk("\n");
+ if (!len)
+ break;
+ }
+
+ printk(" + %d : select_indirect = 0x%x\n",
+ hostdata->dsa_select, dsa[hostdata->dsa_select / sizeof(u32)]);
+ cmd = (Scsi_Cmnd *) bus_to_virt(dsa[hostdata->dsa_cmnd / sizeof(u32)]);
+ printk(" + %d : dsa_cmnd = 0x%x ", hostdata->dsa_cmnd,
+ (u32) virt_to_bus(cmd));
+ if (cmd) {
+ printk(" result = 0x%x, target = %d, lun = %d, cmd = ",
+ cmd->result, cmd->target, cmd->lun);
+ print_command(cmd->cmnd);
+ } else
+ printk("\n");
+ printk(" + %d : dsa_next = 0x%x\n", hostdata->dsa_next,
+ dsa[hostdata->dsa_next / sizeof(u32)]);
+ if (cmd) {
+ printk("scsi%d target %d : sxfer_sanity = 0x%x, scntl3_sanity = 0x%x\n"
+ " script : ",
+ host->host_no, cmd->target,
+ hostdata->sync[cmd->target].sxfer_sanity,
+ hostdata->sync[cmd->target].scntl3_sanity);
+ for (i = 0; i < (sizeof(hostdata->sync[cmd->target].script) / 4); ++i)
+ printk ("0x%x ", hostdata->sync[cmd->target].script[i]);
+ printk ("\n");
+ print_progress (cmd);
+ }
+}
+/*
+ * Function : void print_queues (Scsi_Host *host)
+ *
+ * Purpose : print the contents of the NCR issue and reconnect queues
+ *
+ * Inputs : host - SCSI host we are interested in
+ *
+ */
+
+static void
+print_queues (struct Scsi_Host *host) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ u32 *dsa, *next_dsa;
+ volatile u32 *current;
+ int left;
+ Scsi_Cmnd *cmd, *next_cmd;
+ unsigned long flags;
+
+ printk ("scsi%d : issue queue\n", host->host_no);
+
+ for (left = host->can_queue, cmd = (Scsi_Cmnd *) hostdata->issue_queue;
+ left >= 0 && cmd;
+ cmd = next_cmd) {
+ next_cmd = (Scsi_Cmnd *) cmd->SCp.ptr;
+ save_flags(flags);
+ cli();
+ if (cmd->host_scribble) {
+ if (check_address ((unsigned long) (cmd->host_scribble),
+ sizeof (cmd->host_scribble)) == -1)
+ printk ("scsi%d: scsi pid %ld bad pointer to NCR53c7x0_cmd\n",
+ host->host_no, cmd->pid);
+ /* print_dsa does sanity check on address, no need to check */
+ else
+ print_dsa (host, ((struct NCR53c7x0_cmd *) cmd->host_scribble)
+ -> dsa, "");
+ } else
+ printk ("scsi%d : scsi pid %ld for target %d lun %d has no NCR53c7x0_cmd\n",
+ host->host_no, cmd->pid, cmd->target, cmd->lun);
+ restore_flags(flags);
+ }
+
+ if (left <= 0) {
+ printk ("scsi%d : loop detected in issue queue\n",
+ host->host_no);
+ }
+
+ /*
+ * Traverse the NCR reconnect and start DSA structures, printing out
+ * each element until we hit the end or detect a loop. Currently,
+ * the reconnect structure is a linked list; and the start structure
+ * is an array. Eventually, the reconnect structure will become a
+ * list as well, since this simplifies the code.
+ */
+
+ printk ("scsi%d : schedule dsa array :\n", host->host_no);
+ for (left = host->can_queue, current = hostdata->schedule;
+ left > 0; current += 2, --left)
+ if (current[0] != hostdata->NOP_insn)
+/* FIXME : convert pointer to dsa_begin to pointer to dsa. */
+ print_dsa (host, bus_to_virt (current[1] -
+ (hostdata->E_dsa_code_begin -
+ hostdata->E_dsa_code_template)), "");
+ printk ("scsi%d : end schedule dsa array\n", host->host_no);
+
+ printk ("scsi%d : reconnect_dsa_head :\n", host->host_no);
+
+ for (left = host->can_queue,
+ dsa = bus_to_virt (hostdata->reconnect_dsa_head);
+ left >= 0 && dsa;
+ dsa = next_dsa) {
+ save_flags (flags);
+ cli();
+ if (check_address ((unsigned long) dsa, sizeof(dsa)) == -1) {
+ printk ("scsi%d: bad DSA pointer 0x%p", host->host_no,
+ dsa);
+ next_dsa = NULL;
+ }
+ else
+ {
+ next_dsa = bus_to_virt(dsa[hostdata->dsa_next / sizeof(u32)]);
+ print_dsa (host, dsa, "");
+ }
+ restore_flags(flags);
+ }
+ printk ("scsi%d : end reconnect_dsa_head\n", host->host_no);
+ if (left < 0)
+ printk("scsi%d: possible loop in ncr reconnect list\n",
+ host->host_no);
+}
+
+static void
+print_lots (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+ u32 *dsp_next, *dsp, *dsa, dbc_dcmd;
+ unsigned char dcmd, sbcl;
+ int i, size;
+ NCR53c7x0_local_setup(host);
+
+ if ((dsp_next = bus_to_virt(NCR53c7x0_read32 (DSP_REG)))) {
+ dbc_dcmd = NCR53c7x0_read32(DBC_REG);
+ dcmd = (dbc_dcmd & 0xff000000) >> 24;
+ dsp = dsp_next - NCR53c7x0_insn_size(dcmd);
+ dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
+ sbcl = NCR53c7x0_read8 (SBCL_REG);
+
+
+ printk ("scsi%d : DCMD|DBC=0x%x, DNAD=0x%x (virt 0x%p)\n"
+ " DSA=0x%lx (virt 0x%p)\n"
+ " DSPS=0x%x, TEMP=0x%x (virt 0x%p), DMODE=0x%x\n"
+ " SXFER=0x%x, SCNTL3=0x%x\n"
+ " %s%s%sphase=%s, %d bytes in SCSI FIFO\n"
+ " STEST0=0x%x\n",
+ host->host_no, dbc_dcmd, NCR53c7x0_read32(DNAD_REG),
+ bus_to_virt(NCR53c7x0_read32(DNAD_REG)),
+ virt_to_bus(dsa), dsa,
+ NCR53c7x0_read32(DSPS_REG), NCR53c7x0_read32(TEMP_REG),
+ bus_to_virt (NCR53c7x0_read32(TEMP_REG)),
+ (int) NCR53c7x0_read8(hostdata->dmode),
+ (int) NCR53c7x0_read8(SXFER_REG),
+ (int) NCR53c7x0_read8(SCNTL3_REG_800),
+ (sbcl & SBCL_BSY) ? "BSY " : "",
+ (sbcl & SBCL_SEL) ? "SEL " : "",
+ (sbcl & SBCL_REQ) ? "REQ " : "",
+ sstat2_to_phase(NCR53c7x0_read8 (((hostdata->chip / 100) == 8) ?
+ SSTAT1_REG : SSTAT2_REG)),
+ (NCR53c7x0_read8 ((hostdata->chip / 100) == 8 ?
+ SSTAT1_REG : SSTAT2_REG) & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT,
+ NCR53c7x0_read8 (STEST0_REG_800));
+ printk ("scsi%d : DSP 0x%lx (virt 0x%p) ->\n", host->host_no,
+ virt_to_bus(dsp), dsp);
+ for (i = 6; i > 0; --i, dsp += size)
+ size = print_insn (host, dsp, "", 1);
+ if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
+ printk ("scsi%d : connected (SDID=0x%x, SSID=0x%x)\n",
+ host->host_no, NCR53c7x0_read8 (SDID_REG_800),
+ NCR53c7x0_read8 (SSID_REG_800));
+ print_dsa (host, dsa, "");
+ }
+
+#if 1
+ print_queues (host);
+#endif
+ }
+}
+
+/*
+ * Function : static int shutdown (struct Scsi_Host *host)
+ *
+ * Purpose : does a clean (we hope) shutdown of the NCR SCSI
+ * chip. Use prior to dumping core, unloading the NCR driver,
+ *
+ * Returns : 0 on success
+ */
+static int
+shutdown (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ unsigned long flags;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ NCR53c7x0_local_setup(host);
+ save_flags (flags);
+ cli();
+/* Get in a state where we can reset the SCSI bus */
+ ncr_halt (host);
+ ncr_scsi_reset (host);
+ hostdata->soft_reset(host);
+
+ disable (host);
+ restore_flags (flags);
+ return 0;
+}
+
+/*
+ * Function : void ncr_scsi_reset (struct Scsi_Host *host)
+ *
+ * Purpose : reset the SCSI bus.
+ */
+
+static void
+ncr_scsi_reset (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long flags;
+ int sien = 0;
+ NCR53c7x0_local_setup(host);
+ save_flags (flags);
+ cli();
+ if ((hostdata->chip / 100) == 8) {
+ sien = NCR53c7x0_read8(SIEN0_REG_800);
+ NCR53c7x0_write8(SIEN0_REG_800, sien & ~SIEN_RST);
+ }
+ NCR53c7x0_write8(SCNTL1_REG, SCNTL1_RST);
+ udelay(25); /* Minimum amount of time to assert RST */
+ NCR53c7x0_write8(SCNTL1_REG, 0);
+ if ((hostdata->chip / 100) == 8) {
+ NCR53c7x0_write8(SIEN0_REG_800, sien);
+ }
+ restore_flags (flags);
+}
+
+/*
+ * Function : void hard_reset (struct Scsi_Host *host)
+ *
+ */
+
+static void
+hard_reset (struct Scsi_Host *host) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long flags;
+ save_flags (flags);
+ cli();
+ ncr_scsi_reset(host);
+ NCR53c7x0_driver_init (host);
+ if (hostdata->soft_reset)
+ hostdata->soft_reset (host);
+ restore_flags(flags);
+}
+
+
+/*
+ * Function : Scsi_Cmnd *return_outstanding_commands (struct Scsi_Host *host,
+ * int free, int issue)
+ *
+ * Purpose : return a linked list (using the SCp.buffer field as next,
+ * so we don't perturb hostdata. We don't use a field of the
+ * NCR53c7x0_cmd structure since we may not have allocated one
+ * for the command causing the reset.) of Scsi_Cmnd structures that
+ * had propogated bellow the Linux issue queue level. If free is set,
+ * free the NCR53c7x0_cmd structures which are associated with
+ * the Scsi_Cmnd structures, and clean up any internal
+ * NCR lists that the commands were on. If issue is set,
+ * also return commands in the issue queue.
+ *
+ * Returns : linked list of commands
+ *
+ * NOTE : the caller should insure that the NCR chip is halted
+ * if the free flag is set.
+ */
+
+static Scsi_Cmnd *
+return_outstanding_commands (struct Scsi_Host *host, int free, int issue) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct NCR53c7x0_cmd *c;
+ int i;
+ u32 *current;
+ Scsi_Cmnd *list = NULL, *tmp;
+ for (c = (struct NCR53c7x0_cmd *) hostdata->running_list; c;
+ c = (struct NCR53c7x0_cmd *) c->next) {
+ if (c->cmd->SCp.buffer) {
+ printk ("scsi%d : loop detected in running list!\n", host->host_no);
+ break;
+ } else {
+ printk ("The sti() implicit in a printk() prevents hangs\n");
+ break;
+ }
+
+ c->cmd->SCp.buffer = (struct scatterlist *) list;
+ list = c->cmd;
+ if (free) {
+ c->next = hostdata->free;
+ hostdata->free = c;
+ }
+ }
+
+ if (free) {
+ for (i = 0, current = (u32 *) hostdata->schedule;
+ i < host->can_queue; ++i, current += 2) {
+ current[0] = hostdata->NOP_insn;
+ current[1] = 0xdeadbeef;
+ }
+ hostdata->current = NULL;
+ }
+
+ if (issue) {
+ for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; tmp = tmp->next) {
+ if (tmp->SCp.buffer) {
+ printk ("scsi%d : loop detected in issue queue!\n",
+ host->host_no);
+ break;
+ }
+ tmp->SCp.buffer = (struct scatterlist *) list;
+ list = tmp;
+ }
+ if (free)
+ hostdata->issue_queue = NULL;
+
+ }
+ return list;
+}
+
+/*
+ * Function : static int disable (struct Scsi_Host *host)
+ *
+ * Purpose : disables the given NCR host, causing all commands
+ * to return a driver error. Call this so we can unload the
+ * module during development and try again. Eventually,
+ * we should be able to find clean workarrounds for these
+ * problems.
+ *
+ * Inputs : host - hostadapter to twiddle
+ *
+ * Returns : 0 on success.
+ */
+
+static int
+disable (struct Scsi_Host *host) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long flags;
+ Scsi_Cmnd *nuke_list, *tmp;
+ save_flags(flags);
+ cli();
+ if (hostdata->state != STATE_HALTED)
+ ncr_halt (host);
+ nuke_list = return_outstanding_commands (host, 1 /* free */, 1 /* issue */);
+ hard_reset (host);
+ hostdata->state = STATE_DISABLED;
+ restore_flags(flags);
+ printk ("scsi%d : nuking commands\n", host->host_no);
+ for (; nuke_list; nuke_list = tmp) {
+ tmp = (Scsi_Cmnd *) nuke_list->SCp.buffer;
+ nuke_list->result = DID_ERROR << 16;
+ nuke_list->scsi_done(nuke_list);
+ }
+ printk ("scsi%d : done. \n", host->host_no);
+ printk (KERN_ALERT "scsi%d : disabled. Unload and reload\n",
+ host->host_no);
+ return 0;
+}
+
+/*
+ * Function : static int ncr_halt (struct Scsi_Host *host)
+ *
+ * Purpose : halts the SCSI SCRIPTS(tm) processor on the NCR chip
+ *
+ * Inputs : host - SCSI chip to halt
+ *
+ * Returns : 0 on success
+ */
+
+static int
+ncr_halt (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ unsigned long flags;
+ unsigned char istat, tmp;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int stage;
+ NCR53c7x0_local_setup(host);
+
+ save_flags(flags);
+ cli();
+ /* Stage 0 : eat all interrupts
+ Stage 1 : set ABORT
+ Stage 2 : eat all but abort interrupts
+ Stage 3 : eat all interrupts
+ */
+ for (stage = 0;;) {
+ if (stage == 1) {
+ NCR53c7x0_write8(hostdata->istat, ISTAT_ABRT);
+ ++stage;
+ }
+ istat = NCR53c7x0_read8 (hostdata->istat);
+ if (istat & ISTAT_SIP) {
+ if ((hostdata->chip / 100) == 8) {
+ tmp = NCR53c7x0_read8(SIST0_REG_800);
+ udelay(1);
+ tmp = NCR53c7x0_read8(SIST1_REG_800);
+ } else {
+ tmp = NCR53c7x0_read8(SSTAT0_REG);
+ }
+ } else if (istat & ISTAT_DIP) {
+ tmp = NCR53c7x0_read8(DSTAT_REG);
+ if (stage == 2) {
+ if (tmp & DSTAT_ABRT) {
+ NCR53c7x0_write8(hostdata->istat, 0);
+ ++stage;
+ } else {
+ printk(KERN_ALERT "scsi%d : could not halt NCR chip\n",
+ host->host_no);
+ disable (host);
+ }
+ }
+ }
+ if (!(istat & (ISTAT_SIP|ISTAT_DIP)))
+ if (stage == 0)
+ ++stage;
+ else if (stage == 3)
+ break;
+ }
+ hostdata->state = STATE_HALTED;
+ restore_flags(flags);
+#if 0
+ print_lots (host);
+#endif
+ return 0;
+}
+
+/*
+ * Function: event_name (int event)
+ *
+ * Purpose: map event enum into user-readable strings.
+ */
+
+static const char *
+event_name (int event) {
+ switch (event) {
+ case EVENT_NONE: return "none";
+ case EVENT_ISSUE_QUEUE: return "to issue queue";
+ case EVENT_START_QUEUE: return "to start queue";
+ case EVENT_SELECT: return "selected";
+ case EVENT_DISCONNECT: return "disconnected";
+ case EVENT_RESELECT: return "reselected";
+ case EVENT_COMPLETE: return "completed";
+ case EVENT_IDLE: return "idle";
+ case EVENT_SELECT_FAILED: return "select failed";
+ case EVENT_BEFORE_SELECT: return "before select";
+ case EVENT_RESELECT_FAILED: return "reselect failed";
+ default: return "unknown";
+ }
+}
+
+/*
+ * Function : void dump_events (struct Scsi_Host *host, count)
+ *
+ * Purpose : print last count events which have occurred.
+ */
+static void
+dump_events (struct Scsi_Host *host, int count) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct NCR53c7x0_event event;
+ int i;
+ unsigned long flags;
+ if (hostdata->events) {
+ if (count > hostdata->event_size)
+ count = hostdata->event_size;
+ for (i = hostdata->event_index; count > 0;
+ i = (i ? i - 1 : hostdata->event_size -1), --count) {
+ save_flags(flags);
+/*
+ * By copying the event we're currently examinging with interrupts
+ * disabled, we can do multiple printk(), etc. operations and
+ * still be guaranteed that they're happening on the same
+ * event structure.
+ */
+ cli();
+#if 0
+ event = hostdata->events[i];
+#else
+ memcpy ((void *) &event, (void *) &(hostdata->events[i]),
+ sizeof(event));
+#endif
+
+ restore_flags(flags);
+ printk ("scsi%d : %s event %d at %ld secs %ld usecs target %d lun %d\n",
+ host->host_no, event_name (event.event), count,
+ (long) event.time.tv_sec, (long) event.time.tv_usec,
+ event.target, event.lun);
+ if (event.dsa)
+ printk (" event for dsa 0x%lx (virt 0x%p)\n",
+ virt_to_bus(event.dsa), event.dsa);
+ if (event.pid != -1) {
+ printk (" event for pid %ld ", event.pid);
+ print_command (event.cmnd);
+ }
+ }
+ }
+}
+
+/*
+ * Function: check_address
+ *
+ * Purpose: Check to see if a possibly corrupt pointer will fault the
+ * kernel.
+ *
+ * Inputs: addr - address; size - size of area
+ *
+ * Returns: 0 if area is OK, -1 on error.
+ *
+ * NOTES: should be implemented in terms of vverify on kernels
+ * that have it.
+ */
+
+static int
+check_address (unsigned long addr, int size) {
+ return (MAP_NR(addr) < 1 || MAP_NR(addr + size) > MAP_NR(high_memory) ?
+ -1 : 0);
+}
+
+#ifdef MODULE
+int
+NCR53c7x0_release(struct Scsi_Host *host) {
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+ struct NCR53c7x0_cmd *cmd, *tmp;
+ shutdown (host);
+ if (host->irq != IRQ_NONE)
+ {
+ int irq_count;
+ struct Scsi_Host *tmp;
+ for (irq_count = 0, tmp = first_host; tmp; tmp = tmp->next)
+ if (tmp->hostt == the_template && tmp->irq == host->irq)
+ ++irq_count;
+ if (irq_count == 1)
+ free_irq(host->irq);
+ }
+ if (host->dma_channel != DMA_NONE)
+ free_dma(host->dma_channel);
+ if (host->io_port)
+ release_region(host->io_port, host->n_io_port);
+
+ for (cmd = (struct NCR53c7x0_cmd *) hostdata->free; cmd; cmd = tmp,
+ --hostdata->num_cmds) {
+ tmp = (struct NCR53c7x0_cmd *) cmd->next;
+ /*
+ * If we're going to loop, try to stop it to get a more accurate
+ * count of the leaked commands.
+ */
+ cmd->next = NULL;
+ if (cmd->free)
+ cmd->free ((void *) cmd->real, cmd->size);
+ }
+ if (hostdata->num_cmds)
+ printk ("scsi%d : leaked %d NCR53c7x0_cmd structures\n",
+ host->host_no, hostdata->num_cmds);
+ if (hostdata->events)
+ vfree ((void *)hostdata->events);
+ return 1;
+}
+Scsi_Host_Template driver_template = NCR53c7xx;
+#include "scsi_module.c"
+#endif /* def MODULE */
diff --git a/i386/i386at/gpl/linux/scsi/53c7,8xx.h b/i386/i386at/gpl/linux/scsi/53c7,8xx.h
new file mode 100644
index 00000000..f1dfc4de
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/53c7,8xx.h
@@ -0,0 +1,1584 @@
+/*
+ * NCR 53c{7,8}0x0 driver, header file
+ *
+ * Sponsored by
+ * iX Multiuser Multitasking Magazine
+ * Hannover, Germany
+ * hm@ix.de
+ *
+ * Copyright 1993, 1994, 1995 Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@PoohSticks.ORG
+ * +1 (303) 786-7975
+ *
+ * TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
+ *
+ * PRE-ALPHA
+ *
+ * For more information, please consult
+ *
+ * NCR 53C700/53C700-66
+ * SCSI I/O Processor
+ * Data Manual
+ *
+ * NCR 53C810
+ * PCI-SCSI I/O Processor
+ * Data Manual
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * +1 (719) 578-3400
+ *
+ * Toll free literature number
+ * +1 (800) 334-5454
+ *
+ */
+
+#ifndef NCR53c7x0_H
+#define NCR53c7x0_H
+#if !defined(LINUX_1_2) && !defined(LINUX_1_3)
+#include <linux/version.h>
+#if LINUX_VERSION_CODE > 65536 + 3 * 256
+#define LINUX_1_3
+#else
+#define LINUX_1_2
+#endif
+#endif
+
+/*
+ * Prevent name space pollution in hosts.c, and only provide the
+ * define we need to get the NCR53c7x0 driver into the host template
+ * array.
+ */
+
+#if defined(HOSTS_C) || defined(MODULE)
+#include <linux/scsicam.h>
+
+extern int NCR53c7xx_abort(Scsi_Cmnd *);
+extern int NCR53c7xx_detect(Scsi_Host_Template *tpnt);
+extern int NCR53c7xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+extern int NCR53c7xx_reset(Scsi_Cmnd *);
+#ifdef MODULE
+extern int NCR53c7xx_release(struct Scsi_Host *);
+#else
+#define NCR53c7xx_release NULL
+#endif
+
+#ifdef LINUX_1_2
+#define NCR53c7xx {NULL, NULL, "NCR53c{7,8}xx (rel 17)", NCR53c7xx_detect,\
+ NULL, /* info */ NULL, /* command, deprecated */ NULL, \
+ NCR53c7xx_queue_command, NCR53c7xx_abort, NCR53c7xx_reset, \
+ NULL /* slave attach */, scsicam_bios_param, /* can queue */ 24, \
+ /* id */ 7, 127 /* old SG_ALL */, /* cmd per lun */ 3, \
+ /* present */ 0, /* unchecked isa dma */ 0, DISABLE_CLUSTERING}
+#else
+#define NCR53c7xx {NULL, NULL, NULL, NULL, \
+ "NCR53c{7,8}xx (rel 17)", NCR53c7xx_detect,\
+ NULL, /* info */ NULL, /* command, deprecated */ NULL, \
+ NCR53c7xx_queue_command, NCR53c7xx_abort, NCR53c7xx_reset, \
+ NULL /* slave attach */, scsicam_bios_param, /* can queue */ 24, \
+ /* id */ 7, 127 /* old SG_ALL */, /* cmd per lun */ 3, \
+ /* present */ 0, /* unchecked isa dma */ 0, DISABLE_CLUSTERING}
+#endif
+
+#endif /* defined(HOSTS_C) || defined(MODULE) */
+
+#ifndef HOSTS_C
+#ifdef LINUX_1_2
+/*
+ * Change virtual addresses to physical addresses and vv.
+ * These are trivial on the 1:1 Linux/i386 mapping (but if we ever
+ * make the kernel segment mapped at 0, we need to do translation
+ * on the i386 as well)
+ */
+extern inline unsigned long virt_to_phys(volatile void * address)
+{
+ return (unsigned long) address;
+}
+
+extern inline void * phys_to_virt(unsigned long address)
+{
+ return (void *) address;
+}
+
+/*
+ * IO bus memory addresses are also 1:1 with the physical address
+ */
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently. On the x86 architecture, we just read/write the
+ * memory location directly.
+ */
+#define readb(addr) (*(volatile unsigned char *) (addr))
+#define readw(addr) (*(volatile unsigned short *) (addr))
+#define readl(addr) (*(volatile unsigned int *) (addr))
+
+#define writeb(b,addr) ((*(volatile unsigned char *) (addr)) = (b))
+#define writew(b,addr) ((*(volatile unsigned short *) (addr)) = (b))
+#define writel(b,addr) ((*(volatile unsigned int *) (addr)) = (b))
+
+#define mb()
+
+#endif /* def LINUX_1_2 */
+
+/* Register addresses, ordered numerically */
+
+/* SCSI control 0 rw, default = 0xc0 */
+#define SCNTL0_REG 0x00
+#define SCNTL0_ARB1 0x80 /* 0 0 = simple arbitration */
+#define SCNTL0_ARB2 0x40 /* 1 1 = full arbitration */
+#define SCNTL0_STRT 0x20 /* Start Sequence */
+#define SCNTL0_WATN 0x10 /* Select with ATN */
+#define SCNTL0_EPC 0x08 /* Enable parity checking */
+/* Bit 2 is reserved on 800 series chips */
+#define SCNTL0_EPG_700 0x04 /* Enable parity generation */
+#define SCNTL0_AAP 0x02 /* ATN/ on parity error */
+#define SCNTL0_TRG 0x01 /* Target mode */
+
+/* SCSI control 1 rw, default = 0x00 */
+
+#define SCNTL1_REG 0x01
+#define SCNTL1_EXC 0x80 /* Extra Clock Cycle of Data setup */
+#define SCNTL1_ADB 0x40 /* contents of SODL on bus */
+#define SCNTL1_ESR_700 0x20 /* Enable SIOP response to selection
+ and reselection */
+#define SCNTL1_DHP_800 0x20 /* Disable halt on parity error or ATN
+ target mode only */
+#define SCNTL1_CON 0x10 /* Connected */
+#define SCNTL1_RST 0x08 /* SCSI RST/ */
+#define SCNTL1_AESP 0x04 /* Force bad parity */
+#define SCNTL1_SND_700 0x02 /* Start SCSI send */
+#define SCNTL1_IARB_800 0x02 /* Immediate Arbitration, start
+ arbitration immediately after
+ busfree is detected */
+#define SCNTL1_RCV_700 0x01 /* Start SCSI receive */
+#define SCNTL1_SST_800 0x01 /* Start SCSI transfer */
+
+/* SCSI control 2 rw, */
+
+#define SCNTL2_REG_800 0x02
+#define SCNTL2_800_SDU 0x80 /* SCSI disconnect unexpected */
+
+/* SCSI control 3 rw */
+
+#define SCNTL3_REG_800 0x03
+#define SCNTL3_800_SCF_SHIFT 4
+#define SCNTL3_800_SCF_MASK 0x70
+#define SCNTL3_800_SCF2 0x40 /* Synchronous divisor */
+#define SCNTL3_800_SCF1 0x20 /* 0x00 = SCLK/3 */
+#define SCNTL3_800_SCF0 0x10 /* 0x10 = SCLK/1 */
+ /* 0x20 = SCLK/1.5
+ 0x30 = SCLK/2
+ 0x40 = SCLK/3 */
+
+#define SCNTL3_800_CCF_SHIFT 0
+#define SCNTL3_800_CCF_MASK 0x07
+#define SCNTL3_800_CCF2 0x04 /* 0x00 50.01 to 66 */
+#define SCNTL3_800_CCF1 0x02 /* 0x01 16.67 to 25 */
+#define SCNTL3_800_CCF0 0x01 /* 0x02 25.01 - 37.5
+ 0x03 37.51 - 50
+ 0x04 50.01 - 66 */
+
+/*
+ * SCSI destination ID rw - the appropriate bit is set for the selected
+ * target ID. This is written by the SCSI SCRIPTS processor.
+ * default = 0x00
+ */
+#define SDID_REG_700 0x02
+#define SDID_REG_800 0x06
+
+#define GP_REG_800 0x07 /* General purpose IO */
+#define GP_800_IO1 0x02
+#define GP_800_IO2 0x01
+
+
+/* SCSI interrupt enable rw, default = 0x00 */
+#define SIEN_REG_700 0x03
+#define SIEN0_REG_800 0x40
+#define SIEN_MA 0x80 /* Phase mismatch (ini) or ATN (tgt) */
+#define SIEN_FC 0x40 /* Function complete */
+#define SIEN_700_STO 0x20 /* Selection or reselection timeout */
+#define SIEN_800_SEL 0x20 /* Selected */
+#define SIEN_700_SEL 0x10 /* Selected or reselected */
+#define SIEN_800_RESEL 0x10 /* Reselected */
+#define SIEN_SGE 0x08 /* SCSI gross error */
+#define SIEN_UDC 0x04 /* Unexpected disconnect */
+#define SIEN_RST 0x02 /* SCSI RST/ received */
+#define SIEN_PAR 0x01 /* Parity error */
+
+/*
+ * SCSI chip ID rw
+ * NCR53c700 :
+ * When arbitrating, the highest bit is used, when reselection or selection
+ * occurs, the chip responds to all IDs for which a bit is set.
+ * default = 0x00
+ * NCR53c810 :
+ * Uses bit mapping
+ */
+#define SCID_REG 0x04
+/* Bit 7 is reserved on 800 series chips */
+#define SCID_800_RRE 0x40 /* Enable response to reselection */
+#define SCID_800_SRE 0x20 /* Enable response to selection */
+/* Bits four and three are reserved on 800 series chips */
+#define SCID_800_ENC_MASK 0x07 /* Encoded SCSI ID */
+
+/* SCSI transfer rw, default = 0x00 */
+#define SXFER_REG 0x05
+#define SXFER_DHP 0x80 /* Disable halt on parity */
+
+#define SXFER_TP2 0x40 /* Transfer period msb */
+#define SXFER_TP1 0x20
+#define SXFER_TP0 0x10 /* lsb */
+#define SXFER_TP_MASK 0x70
+/* FIXME : SXFER_TP_SHIFT == 5 is right for '8xx chips */
+#define SXFER_TP_SHIFT 5
+#define SXFER_TP_4 0x00 /* Divisors */
+#define SXFER_TP_5 0x10<<1
+#define SXFER_TP_6 0x20<<1
+#define SXFER_TP_7 0x30<<1
+#define SXFER_TP_8 0x40<<1
+#define SXFER_TP_9 0x50<<1
+#define SXFER_TP_10 0x60<<1
+#define SXFER_TP_11 0x70<<1
+
+#define SXFER_MO3 0x08 /* Max offset msb */
+#define SXFER_MO2 0x04
+#define SXFER_MO1 0x02
+#define SXFER_MO0 0x01 /* lsb */
+#define SXFER_MO_MASK 0x0f
+#define SXFER_MO_SHIFT 0
+
+/*
+ * SCSI output data latch rw
+ * The contents of this register are driven onto the SCSI bus when
+ * the Assert Data Bus bit of the SCNTL1 register is set and
+ * the CD, IO, and MSG bits of the SOCL register match the SCSI phase
+ */
+#define SODL_REG_700 0x06
+#define SODL_REG_800 0x54
+
+
+/*
+ * SCSI output control latch rw, default = 0
+ * Note that when the chip is being manually programmed as an initiator,
+ * the MSG, CD, and IO bits must be set correctly for the phase the target
+ * is driving the bus in. Otherwise no data transfer will occur due to
+ * phase mismatch.
+ */
+
+#define SBCL_REG 0x0b
+#define SBCL_REQ 0x80 /* REQ */
+#define SBCL_ACK 0x40 /* ACK */
+#define SBCL_BSY 0x20 /* BSY */
+#define SBCL_SEL 0x10 /* SEL */
+#define SBCL_ATN 0x08 /* ATN */
+#define SBCL_MSG 0x04 /* MSG */
+#define SBCL_CD 0x02 /* C/D */
+#define SBCL_IO 0x01 /* I/O */
+#define SBCL_PHASE_CMDOUT SBCL_CD
+#define SBCL_PHASE_DATAIN SBCL_IO
+#define SBCL_PHASE_DATAOUT 0
+#define SBCL_PHASE_MSGIN (SBCL_CD|SBCL_IO|SBCL_MSG)
+#define SBCL_PHASE_MSGOUT (SBCL_CD|SBCL_MSG)
+#define SBCL_PHASE_STATIN (SBCL_CD|SBCL_IO)
+#define SBCL_PHASE_MASK (SBCL_CD|SBCL_IO|SBCL_MSG)
+
+/*
+ * SCSI first byte received latch ro
+ * This register contains the first byte received during a block MOVE
+ * SCSI SCRIPTS instruction, including
+ *
+ * Initiator mode Target mode
+ * Message in Command
+ * Status Message out
+ * Data in Data out
+ *
+ * It also contains the selecting or reselecting device's ID and our
+ * ID.
+ *
+ * Note that this is the register the various IF conditionals can
+ * operate on.
+ */
+#define SFBR_REG 0x08
+
+/*
+ * SCSI input data latch ro
+ * In initiator mode, data is latched into this register on the rising
+ * edge of REQ/. In target mode, data is latched on the rising edge of
+ * ACK/
+ */
+#define SIDL_REG_700 0x09
+#define SIDL_REG_800 0x50
+
+/*
+ * SCSI bus data lines ro
+ * This register reflects the instantaneous status of the SCSI data
+ * lines. Note that SCNTL0 must be set to disable parity checking,
+ * otherwise reading this register will latch new parity.
+ */
+#define SBDL_REG_700 0x0a
+#define SBDL_REG_800 0x58
+
+#define SSID_REG_800 0x0a
+#define SSID_800_VAL 0x80 /* Exactly two bits asserted at sel */
+#define SSID_800_ENCID_MASK 0x07 /* Device which performed operation */
+
+
+/*
+ * SCSI bus control lines rw,
+ * instantaneous readout of control lines
+ */
+#define SOCL_REG 0x0b
+#define SOCL_REQ 0x80 /* REQ ro */
+#define SOCL_ACK 0x40 /* ACK ro */
+#define SOCL_BSY 0x20 /* BSY ro */
+#define SOCL_SEL 0x10 /* SEL ro */
+#define SOCL_ATN 0x08 /* ATN ro */
+#define SOCL_MSG 0x04 /* MSG ro */
+#define SOCL_CD 0x02 /* C/D ro */
+#define SOCL_IO 0x01 /* I/O ro */
+/*
+ * Synchronous SCSI Clock Control bits
+ * 0 - set by DCNTL
+ * 1 - SCLK / 1.0
+ * 2 - SCLK / 1.5
+ * 3 - SCLK / 2.0
+ */
+#define SBCL_SSCF1 0x02 /* wo, -66 only */
+#define SBCL_SSCF0 0x01 /* wo, -66 only */
+#define SBCL_SSCF_MASK 0x03
+
+/*
+ * XXX note : when reading the DSTAT and STAT registers to clear interrupts,
+ * insure that 10 clocks elapse between the two
+ */
+/* DMA status ro */
+#define DSTAT_REG 0x0c
+#define DSTAT_DFE 0x80 /* DMA FIFO empty */
+#define DSTAT_800_MDPE 0x40 /* Master Data Parity Error */
+#define DSTAT_800_BF 0x20 /* Bus Fault */
+#define DSTAT_ABRT 0x10 /* Aborted - set on error */
+#define DSTAT_SSI 0x08 /* SCRIPTS single step interrupt */
+#define DSTAT_SIR 0x04 /* SCRIPTS interrupt received -
+ set when INT instruction is
+ executed */
+#define DSTAT_WTD 0x02 /* Watchdog timeout detected */
+#define DSTAT_OPC 0x01 /* Illegal instruction */
+#define DSTAT_800_IID 0x01 /* Same thing, different name */
+
+
+/* NCR53c800 moves this stuff into SIST0 */
+#define SSTAT0_REG 0x0d /* SCSI status 0 ro */
+#define SIST0_REG_800 0x42
+#define SSTAT0_MA 0x80 /* ini : phase mismatch,
+ * tgt : ATN/ asserted
+ */
+#define SSTAT0_CMP 0x40 /* function complete */
+#define SSTAT0_700_STO 0x20 /* Selection or reselection timeout */
+#define SIST0_800_SEL 0x20 /* Selected */
+#define SSTAT0_700_SEL 0x10 /* Selected or reselected */
+#define SIST0_800_RSL 0x10 /* Reselected */
+#define SSTAT0_SGE 0x08 /* SCSI gross error */
+#define SSTAT0_UDC 0x04 /* Unexpected disconnect */
+#define SSTAT0_RST 0x02 /* SCSI RST/ received */
+#define SSTAT0_PAR 0x01 /* Parity error */
+
+/* And uses SSTAT0 for what was SSTAT1 */
+
+#define SSTAT1_REG 0x0e /* SCSI status 1 ro */
+#define SSTAT1_ILF 0x80 /* SIDL full */
+#define SSTAT1_ORF 0x40 /* SODR full */
+#define SSTAT1_OLF 0x20 /* SODL full */
+#define SSTAT1_AIP 0x10 /* Arbitration in progress */
+#define SSTAT1_LOA 0x08 /* Lost arbitration */
+#define SSTAT1_WOA 0x04 /* Won arbitration */
+#define SSTAT1_RST 0x02 /* Instant readout of RST/ */
+#define SSTAT1_SDP 0x01 /* Instant readout of SDP/ */
+
+#define SSTAT2_REG 0x0f /* SCSI status 2 ro */
+#define SSTAT2_FF3 0x80 /* number of bytes in synchronous */
+#define SSTAT2_FF2 0x40 /* data FIFO */
+#define SSTAT2_FF1 0x20
+#define SSTAT2_FF0 0x10
+#define SSTAT2_FF_MASK 0xf0
+#define SSTAT2_FF_SHIFT 4
+
+/*
+ * Latched signals, latched on the leading edge of REQ/ for initiators,
+ * ACK/ for targets.
+ */
+#define SSTAT2_SDP 0x08 /* SDP */
+#define SSTAT2_MSG 0x04 /* MSG */
+#define SSTAT2_CD 0x02 /* C/D */
+#define SSTAT2_IO 0x01 /* I/O */
+#define SSTAT2_PHASE_CMDOUT SSTAT2_CD
+#define SSTAT2_PHASE_DATAIN SSTAT2_IO
+#define SSTAT2_PHASE_DATAOUT 0
+#define SSTAT2_PHASE_MSGIN (SSTAT2_CD|SSTAT2_IO|SSTAT2_MSG)
+#define SSTAT2_PHASE_MSGOUT (SSTAT2_CD|SSTAT2_MSG)
+#define SSTAT2_PHASE_STATIN (SSTAT2_CD|SSTAT2_IO)
+#define SSTAT2_PHASE_MASK (SSTAT2_CD|SSTAT2_IO|SSTAT2_MSG)
+
+
+/* NCR53c700-66 only */
+#define SCRATCHA_REG_00 0x10 /* through 0x13 Scratch A rw */
+/* NCR53c710 and higher */
+#define DSA_REG 0x10 /* DATA structure address */
+
+#define CTEST0_REG_700 0x14 /* Chip test 0 ro */
+#define CTEST0_REG_800 0x18 /* Chip test 0 rw, general purpose */
+/* 0x80 - 0x04 are reserved */
+#define CTEST0_700_RTRG 0x02 /* Real target mode */
+#define CTEST0_700_DDIR 0x01 /* Data direction, 1 =
+ * SCSI bus to host, 0 =
+ * host to SCSI.
+ */
+
+#define CTEST1_REG_700 0x15 /* Chip test 1 ro */
+#define CTEST1_REG_800 0x19 /* Chip test 1 ro */
+#define CTEST1_FMT3 0x80 /* Identify which byte lanes are empty */
+#define CTEST1_FMT2 0x40 /* in the DMA FIFO */
+#define CTEST1_FMT1 0x20
+#define CTEST1_FMT0 0x10
+
+#define CTEST1_FFL3 0x08 /* Identify which bytes lanes are full */
+#define CTEST1_FFL2 0x04 /* in the DMA FIFO */
+#define CTEST1_FFL1 0x02
+#define CTEST1_FFL0 0x01
+
+#define CTEST2_REG_700 0x16 /* Chip test 2 ro */
+#define CTEST2_REG_800 0x1a /* Chip test 2 ro */
+
+#define CTEST2_800_DDIR 0x80 /* 1 = SCSI->host */
+#define CTEST2_800_SIGP 0x40 /* A copy of SIGP in ISTAT.
+ Reading this register clears */
+#define CTEST2_800_CIO 0x20 /* Configured as IO */.
+#define CTEST2_800_CM 0x10 /* Configured as memory */
+
+/* 0x80 - 0x40 are reserved on 700 series chips */
+#define CTEST2_700_SOFF 0x20 /* SCSI Offset Compare,
+ * As an initiator, this bit is
+ * one when the synchronous offset
+ * is zero, as a target this bit
+ * is one when the synchronous
+ * offset is at the maximum
+ * defined in SXFER
+ */
+#define CTEST2_700_SFP 0x10 /* SCSI FIFO parity bit,
+ * reading CTEST3 unloads a byte
+ * from the FIFO and sets this
+ */
+#define CTEST2_700_DFP 0x08 /* DMA FIFO parity bit,
+ * reading CTEST6 unloads a byte
+ * from the FIFO and sets this
+ */
+#define CTEST2_TEOP 0x04 /* SCSI true end of process,
+ * indicates a totally finished
+ * transfer
+ */
+#define CTEST2_DREQ 0x02 /* Data request signal */
+/* 0x01 is reserved on 700 series chips */
+#define CTEST2_800_DACK 0x01
+
+/*
+ * Chip test 3 ro
+ * Unloads the bottom byte of the eight deep SCSI synchronous FIFO,
+ * check SSTAT2 FIFO full bits to determine size. Note that a GROSS
+ * error results if a read is attempted on this register. Also note
+ * that 16 and 32 bit reads of this register will cause corruption.
+ */
+#define CTEST3_REG_700 0x17
+/* Chip test 3 rw */
+#define CTEST3_REG_800 0x1b
+#define CTEST3_800_V3 0x80 /* Chip revision */
+#define CTEST3_800_V2 0x40
+#define CTEST3_800_V1 0x20
+#define CTEST3_800_V0 0x10
+#define CTEST3_800_FLF 0x08 /* Flush DMA FIFO */
+#define CTEST3_800_CLF 0x04 /* Clear DMA FIFO */
+#define CTEST3_800_FM 0x02 /* Fetch mode pin */
+/* bit 0 is reserved on 800 series chips */
+
+#define CTEST4_REG_700 0x18 /* Chip test 4 rw */
+#define CTEST4_REG_800 0x21 /* Chip test 4 rw */
+/* 0x80 is reserved on 700 series chips */
+#define CTEST4_800_BDIS 0x80 /* Burst mode disable */
+#define CTEST4_ZMOD 0x40 /* High impedance mode */
+#define CTEST4_SZM 0x20 /* SCSI bus high impedance */
+#define CTEST4_700_SLBE 0x10 /* SCSI loopback enabled */
+#define CTEST4_800_SRTM 0x10 /* Shadow Register Test Mode */
+#define CTEST4_700_SFWR 0x08 /* SCSI FIFO write enable,
+ * redirects writes from SODL
+ * to the SCSI FIFO.
+ */
+#define CTEST4_800_MPEE 0x08 /* Enable parity checking
+ during master cycles on PCI
+ bus */
+
+/*
+ * These bits send the contents of the CTEST6 register to the appropriate
+ * byte lane of the 32 bit DMA FIFO. Normal operation is zero, otherwise
+ * the high bit means the low two bits select the byte lane.
+ */
+#define CTEST4_FBL2 0x04
+#define CTEST4_FBL1 0x02
+#define CTEST4_FBL0 0x01
+#define CTEST4_FBL_MASK 0x07
+#define CTEST4_FBL_0 0x04 /* Select DMA FIFO byte lane 0 */
+#define CTEST4_FBL_1 0x05 /* Select DMA FIFO byte lane 1 */
+#define CTEST4_FBL_2 0x06 /* Select DMA FIFO byte lane 2 */
+#define CTEST4_FBL_3 0x07 /* Select DMA FIFO byte lane 3 */
+#define CTEST4_800_SAVE (CTEST4_800_BDIS)
+
+
+#define CTEST5_REG_700 0x19 /* Chip test 5 rw */
+#define CTEST5_REG_800 0x22 /* Chip test 5 rw */
+/*
+ * Clock Address Incrementor. When set, it increments the
+ * DNAD register to the next bus size boundary. It automatically
+ * resets itself when the operation is complete.
+ */
+#define CTEST5_ADCK 0x80
+/*
+ * Clock Byte Counter. When set, it decrements the DBC register to
+ * the next bus size boundary.
+ */
+#define CTEST5_BBCK 0x40
+/*
+ * Reset SCSI Offset. Setting this bit to 1 clears the current offset
+ * pointer in the SCSI synchronous offset counter (SSTAT). This bit
+ * is set to 1 if a SCSI Gross Error Condition occurs. The offset should
+ * be cleared when a synchronous transfer fails. When written, it is
+ * automatically cleared after the SCSI synchronous offset counter is
+ * reset.
+ */
+/* Bit 5 is reserved on 800 series chips */
+#define CTEST5_700_ROFF 0x20
+/*
+ * Master Control for Set or Reset pulses. When 1, causes the low
+ * four bits of register to set when set, 0 causes the low bits to
+ * clear when set.
+ */
+#define CTEST5_MASR 0x10
+#define CTEST5_DDIR 0x08 /* DMA direction */
+/*
+ * Bits 2-0 are reserved on 800 series chips
+ */
+#define CTEST5_700_EOP 0x04 /* End of process */
+#define CTEST5_700_DREQ 0x02 /* Data request */
+#define CTEST5_700_DACK 0x01 /* Data acknowledge */
+
+/*
+ * Chip test 6 rw - writing to this register writes to the byte
+ * lane in the DMA FIFO as determined by the FBL bits in the CTEST4
+ * register.
+ */
+#define CTEST6_REG_700 0x1a
+#define CTEST6_REG_800 0x23
+
+#define CTEST7_REG 0x1b /* Chip test 7 rw */
+/* 0x80 - 0x40 are reserved on NCR53c700 and NCR53c700-66 chips */
+#define CTEST7_10_CDIS 0x80 /* Cache burst disable */
+#define CTEST7_10_SC1 0x40 /* Snoop control bits */
+#define CTEST7_10_SC0 0x20
+#define CTEST7_10_SC_MASK 0x60
+/* 0x20 is reserved on the NCR53c700 */
+#define CTEST7_0060_FM 0x20 /* Fetch mode */
+#define CTEST7_STD 0x10 /* Selection timeout disable */
+#define CTEST7_DFP 0x08 /* DMA FIFO parity bit for CTEST6 */
+#define CTEST7_EVP 0x04 /* 1 = host bus even parity, 0 = odd */
+#define CTEST7_10_TT1 0x02 /* Transfer type */
+#define CTEST7_00_DC 0x02 /* Set to drive DC low during instruction
+ fetch */
+#define CTEST7_DIFF 0x01 /* Differential mode */
+
+#define CTEST7_SAVE ( CTEST7_EVP | CTEST7_DIFF )
+
+
+#define TEMP_REG 0x1c /* through 0x1f Temporary stack rw */
+
+#define DFIFO_REG 0x20 /* DMA FIFO rw */
+/*
+ * 0x80 is reserved on the NCR53c710, the CLF and FLF bits have been
+ * moved into the CTEST8 register.
+ */
+#define DFIFO_00_FLF 0x80 /* Flush DMA FIFO to memory */
+#define DFIFO_00_CLF 0x40 /* Clear DMA and SCSI FIFOs */
+#define DFIFO_BO6 0x40
+#define DFIFO_BO5 0x20
+#define DFIFO_BO4 0x10
+#define DFIFO_BO3 0x08
+#define DFIFO_BO2 0x04
+#define DFIFO_BO1 0x02
+#define DFIFO_BO0 0x01
+#define DFIFO_10_BO_MASK 0x7f /* 7 bit counter */
+#define DFIFO_00_BO_MASK 0x3f /* 6 bit counter */
+
+/*
+ * Interrupt status rw
+ * Note that this is the only register which can be read while SCSI
+ * SCRIPTS are being executed.
+ */
+#define ISTAT_REG_700 0x21
+#define ISTAT_REG_800 0x14
+#define ISTAT_ABRT 0x80 /* Software abort, write
+ *1 to abort, wait for interrupt. */
+/* 0x40 and 0x20 are reserved on NCR53c700 and NCR53c700-66 chips */
+#define ISTAT_10_SRST 0x40 /* software reset */
+#define ISTAT_10_SIGP 0x20 /* signal script */
+/* 0x10 is reserved on NCR53c700 series chips */
+#define ISTAT_800_SEM 0x10 /* semaphore */
+#define ISTAT_CON 0x08 /* 1 when connected */
+#define ISTAT_800_INTF 0x04 /* Interrupt on the fly */
+#define ISTAT_700_PRE 0x04 /* Pointer register empty.
+ * Set to 1 when DSPS and DSP
+ * registers are empty in pipeline
+ * mode, always set otherwise.
+ */
+#define ISTAT_SIP 0x02 /* SCSI interrupt pending from
+ * SCSI portion of SIOP see
+ * SSTAT0
+ */
+#define ISTAT_DIP 0x01 /* DMA interrupt pending
+ * see DSTAT
+ */
+
+/* NCR53c700-66 and NCR53c710 only */
+#define CTEST8_REG 0x22 /* Chip test 8 rw */
+#define CTEST8_0066_EAS 0x80 /* Enable alternate SCSI clock,
+ * ie read from SCLK/ rather than CLK/
+ */
+#define CTEST8_0066_EFM 0x40 /* Enable fetch and master outputs */
+#define CTEST8_0066_GRP 0x20 /* Generate Receive Parity for
+ * pass through. This insures that
+ * bad parity won't reach the host
+ * bus.
+ */
+#define CTEST8_0066_TE 0x10 /* TolerANT enable. Enable
+ * active negation, should only
+ * be used for slow SCSI
+ * non-differential.
+ */
+#define CTEST8_0066_HSC 0x08 /* Halt SCSI clock */
+#define CTEST8_0066_SRA 0x04 /* Shorten REQ/ACK filtering,
+ * must be set for fast SCSI-II
+ * speeds.
+ */
+#define CTEST8_0066_DAS 0x02 /* Disable automatic target/initiator
+ * switching.
+ */
+#define CTEST8_0066_LDE 0x01 /* Last disconnect enable.
+ * The status of pending
+ * disconnect is maintained by
+ * the core, eliminating
+ * the possibility of missing a
+ * selection or reselection
+ * while waiting to fetch a
+ * WAIT DISCONNECT opcode.
+ */
+
+#define CTEST8_10_V3 0x80 /* Chip revision */
+#define CTEST8_10_V2 0x40
+#define CTEST8_10_V1 0x20
+#define CTEST8_10_V0 0x10
+#define CTEST8_10_V_MASK 0xf0
+#define CTEST8_10_FLF 0x08 /* Flush FIFOs */
+#define CTEST8_10_CLF 0x04 /* Clear FIFOs */
+#define CTEST8_10_FM 0x02 /* Fetch pin mode */
+#define CTEST8_10_SM 0x01 /* Snoop pin mode */
+
+
+/*
+ * The CTEST9 register may be used to differentiate between a
+ * NCR53c700 and a NCR53c710.
+ *
+ * Write 0xff to this register.
+ * Read it.
+ * If the contents are 0xff, it is a NCR53c700
+ * If the contents are 0x00, it is a NCR53c700-66 first revision
+ * If the contents are some other value, it is some other NCR53c700-66
+ */
+#define CTEST9_REG_00 0x23 /* Chip test 9 ro */
+#define LCRC_REG_10 0x23
+
+/*
+ * 0x24 through 0x27 are the DMA byte counter register. Instructions
+ * write their high 8 bits into the DCMD register, the low 24 bits into
+ * the DBC register.
+ *
+ * Function is dependent on the command type being executed.
+ */
+
+
+#define DBC_REG 0x24
+/*
+ * For Block Move Instructions, DBC is a 24 bit quantity representing
+ * the number of bytes to transfer.
+ * For Transfer Control Instructions, DBC is bit fielded as follows :
+ */
+/* Bits 20 - 23 should be clear */
+#define DBC_TCI_TRUE (1 << 19) /* Jump when true */
+#define DBC_TCI_COMPARE_DATA (1 << 18) /* Compare data */
+#define DBC_TCI_COMPARE_PHASE (1 << 17) /* Compare phase with DCMD field */
+#define DBC_TCI_WAIT_FOR_VALID (1 << 16) /* Wait for REQ */
+/* Bits 8 - 15 are reserved on some implementations ? */
+#define DBC_TCI_MASK_MASK 0xff00 /* Mask for data compare */
+#define DBC_TCI_MASK_SHIFT 8
+#define DBC_TCI_DATA_MASK 0xff /* Data to be compared */
+#define DBC_TCI_DATA_SHIFT 0
+
+#define DBC_RWRI_IMMEDIATE_MASK 0xff00 /* Immediate data */
+#define DBC_RWRI_IMMEDIATE_SHIFT 8 /* Amount to shift */
+#define DBC_RWRI_ADDRESS_MASK 0x3f0000 /* Register address */
+#define DBC_RWRI_ADDRESS_SHIFT 16
+
+
+/*
+ * DMA command r/w
+ */
+#define DCMD_REG 0x27
+#define DCMD_TYPE_MASK 0xc0 /* Masks off type */
+#define DCMD_TYPE_BMI 0x00 /* Indicates a Block Move instruction */
+#define DCMD_BMI_IO 0x01 /* I/O, CD, and MSG bits selecting */
+#define DCMD_BMI_CD 0x02 /* the phase for the block MOVE */
+#define DCMD_BMI_MSG 0x04 /* instruction */
+
+#define DCMD_BMI_OP_MASK 0x18 /* mask for opcode */
+#define DCMD_BMI_OP_MOVE_T 0x00 /* MOVE */
+#define DCMD_BMI_OP_MOVE_I 0x08 /* MOVE Initiator */
+
+#define DCMD_BMI_INDIRECT 0x20 /* Indirect addressing */
+
+#define DCMD_TYPE_TCI 0x80 /* Indicates a Transfer Control
+ instruction */
+#define DCMD_TCI_IO 0x01 /* I/O, CD, and MSG bits selecting */
+#define DCMD_TCI_CD 0x02 /* the phase for the block MOVE */
+#define DCMD_TCI_MSG 0x04 /* instruction */
+#define DCMD_TCI_OP_MASK 0x38 /* mask for opcode */
+#define DCMD_TCI_OP_JUMP 0x00 /* JUMP */
+#define DCMD_TCI_OP_CALL 0x08 /* CALL */
+#define DCMD_TCI_OP_RETURN 0x10 /* RETURN */
+#define DCMD_TCI_OP_INT 0x18 /* INT */
+
+#define DCMD_TYPE_RWRI 0x40 /* Indicates I/O or register Read/Write
+ instruction */
+#define DCMD_RWRI_OPC_MASK 0x38 /* Opcode mask */
+#define DCMD_RWRI_OPC_WRITE 0x28 /* Write SFBR to register */
+#define DCMD_RWRI_OPC_READ 0x30 /* Read register to SFBR */
+#define DCMD_RWRI_OPC_MODIFY 0x38 /* Modify in place */
+
+#define DCMD_RWRI_OP_MASK 0x07
+#define DCMD_RWRI_OP_MOVE 0x00
+#define DCMD_RWRI_OP_SHL 0x01
+#define DCMD_RWRI_OP_OR 0x02
+#define DCMD_RWRI_OP_XOR 0x03
+#define DCMD_RWRI_OP_AND 0x04
+#define DCMD_RWRI_OP_SHR 0x05
+#define DCMD_RWRI_OP_ADD 0x06
+#define DCMD_RWRI_OP_ADDC 0x07
+
+#define DCMD_TYPE_MMI 0xc0 /* Indicates a Memory Move instruction
+ (three words) */
+
+
+#define DNAD_REG 0x28 /* through 0x2b DMA next address for
+ data */
+#define DSP_REG 0x2c /* through 0x2f DMA SCRIPTS pointer rw */
+#define DSPS_REG 0x30 /* through 0x33 DMA SCRIPTS pointer
+ save rw */
+#define DMODE_REG_00 0x34 /* DMA mode rw */
+#define DMODE_00_BL1 0x80 /* Burst length bits */
+#define DMODE_00_BL0 0x40
+#define DMODE_BL_MASK 0xc0
+/* Burst lengths (800) */
+#define DMODE_BL_2 0x00 /* 2 transfer */
+#define DMODE_BL_4 0x40 /* 4 transfers */
+#define DMODE_BL_8 0x80 /* 8 transfers */
+#define DMODE_BL_16 0xc0 /* 16 transfers */
+
+#define DMODE_700_BW16 0x20 /* Host buswidth = 16 */
+#define DMODE_700_286 0x10 /* 286 mode */
+#define DMODE_700_IOM 0x08 /* Transfer to IO port */
+#define DMODE_700_FAM 0x04 /* Fixed address mode */
+#define DMODE_700_PIPE 0x02 /* Pipeline mode disables
+ * automatic fetch / exec
+ */
+#define DMODE_MAN 0x01 /* Manual start mode,
+ * requires a 1 to be written
+ * to the start DMA bit in the DCNTL
+ * register to run scripts
+ */
+
+#define DMODE_700_SAVE ( DMODE_00_BL_MASK | DMODE_00_BW16 | DMODE_00_286 )
+
+/* NCR53c800 series only */
+#define SCRATCHA_REG_800 0x34 /* through 0x37 Scratch A rw */
+/* NCR53c710 only */
+#define SCRATCB_REG_10 0x34 /* through 0x37 scratch B rw */
+
+#define DMODE_REG_10 0x38 /* DMA mode rw, NCR53c710 and newer */
+#define DMODE_800_SIOM 0x20 /* Source IO = 1 */
+#define DMODE_800_DIOM 0x10 /* Destination IO = 1 */
+#define DMODE_800_ERL 0x08 /* Enable Read Line */
+
+/* 35-38 are reserved on 700 and 700-66 series chips */
+#define DIEN_REG 0x39 /* DMA interrupt enable rw */
+/* 0x80, 0x40, and 0x20 are reserved on 700-series chips */
+#define DIEN_800_MDPE 0x40 /* Master data parity error */
+#define DIEN_800_BF 0x20 /* BUS fault */
+#define DIEN_ABRT 0x10 /* Enable aborted interrupt */
+#define DIEN_SSI 0x08 /* Enable single step interrupt */
+#define DIEN_SIR 0x04 /* Enable SCRIPTS INT command
+ * interrupt
+ */
+/* 0x02 is reserved on 800 series chips */
+#define DIEN_700_WTD 0x02 /* Enable watchdog timeout interrupt */
+#define DIEN_700_OPC 0x01 /* Enable illegal instruction
+ * interrupt
+ */
+#define DIEN_800_IID 0x01 /* Same meaning, different name */
+
+/*
+ * DMA watchdog timer rw
+ * set in 16 CLK input periods.
+ */
+#define DWT_REG 0x3a
+
+/* DMA control rw */
+#define DCNTL_REG 0x3b
+#define DCNTL_700_CF1 0x80 /* Clock divisor bits */
+#define DCNTL_700_CF0 0x40
+#define DCNTL_700_CF_MASK 0xc0
+/* Clock divisors Divisor SCLK range (MHZ) */
+#define DCNTL_700_CF_2 0x00 /* 2.0 37.51-50.00 */
+#define DCNTL_700_CF_1_5 0x40 /* 1.5 25.01-37.50 */
+#define DCNTL_700_CF_1 0x80 /* 1.0 16.67-25.00 */
+#define DCNTL_700_CF_3 0xc0 /* 3.0 50.01-66.67 (53c700-66) */
+
+#define DCNTL_700_S16 0x20 /* Load scripts 16 bits at a time */
+#define DCNTL_SSM 0x10 /* Single step mode */
+#define DCNTL_700_LLM 0x08 /* Low level mode, can only be set
+ * after selection */
+#define DCNTL_800_IRQM 0x08 /* Totem pole IRQ pin */
+#define DCNTL_STD 0x04 /* Start DMA / SCRIPTS */
+/* 0x02 is reserved */
+#define DCNTL_00_RST 0x01 /* Software reset, resets everything
+ * but 286 mode bit in DMODE. On the
+ * NCR53c710, this bit moved to CTEST8
+ */
+#define DCNTL_10_COM 0x01 /* 700 software compatibility mode */
+
+#define DCNTL_700_SAVE ( DCNTL_CF_MASK | DCNTL_S16)
+
+
+/* NCR53c700-66 only */
+#define SCRATCHB_REG_00 0x3c /* through 0x3f scratch b rw */
+#define SCRATCHB_REG_800 0x5c /* through 0x5f scratch b rw */
+/* NCR53c710 only */
+#define ADDER_REG_10 0x3c /* Adder, NCR53c710 only */
+
+#define SIEN1_REG_800 0x41
+#define SIEN1_800_STO 0x04 /* selection/reselection timeout */
+#define SIEN1_800_GEN 0x02 /* general purpose timer */
+#define SIEN1_800_HTH 0x01 /* handshake to handshake */
+
+#define SIST1_REG_800 0x43
+#define SIST1_800_STO 0x04 /* selection/reselection timeout */
+#define SIST1_800_GEN 0x02 /* general purpose timer */
+#define SIST1_800_HTH 0x01 /* handshake to handshake */
+
+#define SLPAR_REG_800 0x44 /* Parity */
+
+#define MACNTL_REG_800 0x46 /* Memory access control */
+#define MACNTL_800_TYP3 0x80
+#define MACNTL_800_TYP2 0x40
+#define MACNTL_800_TYP1 0x20
+#define MACNTL_800_TYP0 0x10
+#define MACNTL_800_DWR 0x08
+#define MACNTL_800_DRD 0x04
+#define MACNTL_800_PSCPT 0x02
+#define MACNTL_800_SCPTS 0x01
+
+#define GPCNTL_REG_800 0x47 /* General Purpose Pin Control */
+
+/* Timeouts are expressed such that 0=off, 1=100us, doubling after that */
+#define STIME0_REG_800 0x48 /* SCSI Timer Register 0 */
+#define STIME0_800_HTH_MASK 0xf0 /* Handshake to Handshake timeout */
+#define STIME0_800_HTH_SHIFT 4
+#define STIME0_800_SEL_MASK 0x0f /* Selection timeout */
+#define STIME0_800_SEL_SHIFT 0
+
+#define STIME1_REG_800 0x49
+#define STIME1_800_GEN_MASK 0x0f /* General purpose timer */
+
+#define RESPID_REG_800 0x4a /* Response ID, bit fielded. 8
+ bits on narrow chips, 16 on WIDE */
+
+#define STEST0_REG_800 0x4c
+#define STEST0_800_SLT 0x08 /* Selection response logic test */
+#define STEST0_800_ART 0x04 /* Arbitration priority encoder test */
+#define STEST0_800_SOZ 0x02 /* Synchronous offset zero */
+#define STEST0_800_SOM 0x01 /* Synchronous offset maximum */
+
+#define STEST1_REG_800 0x4d
+#define STEST1_800_SCLK 0x80 /* Disable SCSI clock */
+
+#define STEST2_REG_800 0x4e
+#define STEST2_800_SCE 0x80 /* Enable SOCL/SODL */
+#define STEST2_800_ROF 0x40 /* Reset SCSI sync offset */
+#define STEST2_800_SLB 0x10 /* Enable SCSI loopback mode */
+#define STEST2_800_SZM 0x08 /* SCSI high impedance mode */
+#define STEST2_800_EXT 0x02 /* Extend REQ/ACK filter 30 to 60ns */
+#define STEST2_800_LOW 0x01 /* SCSI low level mode */
+
+#define STEST3_REG_800 0x4f
+#define STEST3_800_TE 0x80 /* Enable active negation */
+#define STEST3_800_STR 0x40 /* SCSI FIFO test read */
+#define STEST3_800_HSC 0x20 /* Halt SCSI clock */
+#define STEST3_800_DSI 0x10 /* Disable single initiator response */
+#define STEST3_800_TTM 0x04 /* Time test mode */
+#define STEST3_800_CSF 0x02 /* Clear SCSI FIFO */
+#define STEST3_800_STW 0x01 /* SCSI FIFO test write */
+
+#define OPTION_PARITY 0x1 /* Enable parity checking */
+#define OPTION_TAGGED_QUEUE 0x2 /* Enable SCSI-II tagged queuing */
+#define OPTION_700 0x8 /* Always run NCR53c700 scripts */
+#define OPTION_INTFLY 0x10 /* Use INTFLY interrupts */
+#define OPTION_DEBUG_INTR 0x20 /* Debug interrupts */
+#define OPTION_DEBUG_INIT_ONLY 0x40 /* Run initialization code and
+ simple test code, return
+ DID_NO_CONNECT if any SCSI
+ commands are attempted. */
+#define OPTION_DEBUG_READ_ONLY 0x80 /* Return DID_ERROR if any
+ SCSI write is attempted */
+#define OPTION_DEBUG_TRACE 0x100 /* Animated trace mode, print
+ each address and instruction
+ executed to debug buffer. */
+#define OPTION_DEBUG_SINGLE 0x200 /* stop after executing one
+ instruction */
+#define OPTION_SYNCHRONOUS 0x400 /* Enable sync SCSI. */
+#define OPTION_MEMORY_MAPPED 0x800 /* NCR registers have valid
+ memory mapping */
+#define OPTION_IO_MAPPED 0x1000 /* NCR registers have valid
+ I/O mapping */
+#define OPTION_DEBUG_PROBE_ONLY 0x2000 /* Probe only, don't even init */
+#define OPTION_DEBUG_TESTS_ONLY 0x4000 /* Probe, init, run selected tests */
+#define OPTION_DEBUG_TEST0 0x08000 /* Run test 0 */
+#define OPTION_DEBUG_TEST1 0x10000 /* Run test 1 */
+#define OPTION_DEBUG_TEST2 0x20000 /* Run test 2 */
+#define OPTION_DEBUG_DUMP 0x40000 /* Dump commands */
+#define OPTION_DEBUG_TARGET_LIMIT 0x80000 /* Only talk to target+luns specified */
+#define OPTION_DEBUG_NCOMMANDS_LIMIT 0x100000 /* Limit the number of commands */
+#define OPTION_DEBUG_SCRIPT 0x200000 /* Print when checkpoints are passed */
+#define OPTION_DEBUG_FIXUP 0x400000 /* print fixup values */
+#define OPTION_DEBUG_DSA 0x800000
+#define OPTION_DEBUG_CORRUPTION 0x1000000 /* Detect script corruption */
+#define OPTION_DEBUG_SDTR 0x2000000 /* Debug SDTR problem */
+#define OPTION_DEBUG_MISMATCH 0x4000000 /* Debug phase mismatches */
+#define OPTION_DISCONNECT 0x8000000 /* Allow disconect */
+#define OPTION_DEBUG_DISCONNECT 0x10000000
+#define OPTION_ALWAYS_SYNCHRONOUS 0x20000000 /* Negotiate sync. transfers
+ on power up */
+#define OPTION_DEBUG_QUEUES 0x80000000
+#define OPTION_DEBUG_ALLOCATION 0x100000000LL
+#define OPTION_DEBUG_SYNCHRONOUS 0x200000000LL /* Sanity check SXFER and
+ SCNTL3 registers */
+#define OPTION_NO_ASYNC 0x400000000LL /* Don't automagically send
+ SDTR for async transfers when
+ we haven't been told to do
+ a synchronous transfer. */
+#define OPTION_NO_PRINT_RACE 0x800000000LL /* Don't print message when
+ the reselect/WAIT DISCONNECT
+ race condition hits */
+#if !defined(PERM_OPTIONS)
+#define PERM_OPTIONS 0
+#endif
+
+struct NCR53c7x0_synchronous {
+ u32 select_indirect; /* Value used for indirect selection */
+ u32 script[8]; /* Size ?? Script used when target is
+ reselected */
+ unsigned char synchronous_want[5]; /* Per target desired SDTR */
+/*
+ * Set_synchronous programs these, select_indirect and current settings after
+ * int_debug_should show a match.
+ */
+ unsigned char sxfer_sanity, scntl3_sanity;
+};
+
+#define CMD_FLAG_SDTR 1 /* Initiating synchronous
+ transfer negotiation */
+#define CMD_FLAG_WDTR 2 /* Initiating wide transfer
+ negotiation */
+#define CMD_FLAG_DID_SDTR 4 /* did SDTR */
+#define CMD_FLAG_DID_WDTR 8 /* did WDTR */
+
+struct NCR53c7x0_table_indirect {
+ u32 count;
+ void *address;
+};
+
+enum ncr_event {
+ EVENT_NONE = 0,
+/*
+ * Order is IMPORTANT, since these must correspond to the event interrupts
+ * in 53c7,8xx.scr
+ */
+
+ EVENT_ISSUE_QUEUE = 0x5000000, /* Command was added to issue queue */
+ EVENT_START_QUEUE, /* Command moved to start queue */
+ EVENT_SELECT, /* Command completed selection */
+ EVENT_DISCONNECT, /* Command disconnected */
+ EVENT_RESELECT, /* Command reselected */
+ EVENT_COMPLETE, /* Command completed */
+ EVENT_IDLE,
+ EVENT_SELECT_FAILED,
+ EVENT_BEFORE_SELECT,
+ EVENT_RESELECT_FAILED
+};
+
+struct NCR53c7x0_event {
+ enum ncr_event event; /* What type of event */
+ unsigned char target;
+ unsigned char lun;
+ struct timeval time;
+ u32 *dsa; /* What's in the DSA register now (virt) */
+/*
+ * A few things from that SCSI pid so we know what happened after
+ * the Scsi_Cmnd structure in question may have disappeared.
+ */
+ unsigned long pid; /* The SCSI PID which caused this
+ event */
+ unsigned char cmnd[12];
+};
+
+/*
+ * Things in the NCR53c7x0_cmd structure are split into two parts :
+ *
+ * 1. A fixed portion, for things which are not accessed directly by static NCR
+ * code (ie, are referenced only by the Linux side of the driver,
+ * or only by dynamically genreated code).
+ *
+ * 2. The DSA portion, for things which are accessed directly by static NCR
+ * code.
+ *
+ * This is a little ugly, but it
+ * 1. Avoids conflicts between the NCR code's picture of the structure, and
+ * Linux code's idea of what it looks like.
+ *
+ * 2. Minimizes the pain in the Linux side of the code needed
+ * to calculate real dsa locations for things, etc.
+ *
+ */
+
+struct NCR53c7x0_cmd {
+ void *real; /* Real, unaligned address for
+ free function */
+ void (* free)(void *, int); /* Command to deallocate; NULL
+ for structures allocated with
+ scsi_register, etc. */
+ Scsi_Cmnd *cmd; /* Associated Scsi_Cmnd
+ structure, Scsi_Cmnd points
+ at NCR53c7x0_cmd using
+ host_scribble structure */
+
+ int size; /* scsi_malloc'd size of this
+ structure */
+
+ int flags; /* CMD_* flags */
+
+/*
+ * SDTR and WIDE messages are an either/or affair
+ * in this message, since we will go into message out and send
+ * _the whole mess_ without dropping out of message out to
+ * let the target go into message in after sending the first
+ * message.
+ */
+
+ unsigned char select[11]; /* Select message, includes
+ IDENTIFY
+ (optional) QUEUE TAG
+ (optional) SDTR or WDTR
+ */
+
+
+ volatile struct NCR53c7x0_cmd *next; /* Linux maintained lists (free,
+ running, eventually finished */
+
+
+ u32 *data_transfer_start; /* Start of data transfer routines */
+ u32 *data_transfer_end; /* Address after end of data transfer o
+ routines */
+/*
+ * The following three fields were moved from the DSA propper to here
+ * since only dynamically generated NCR code refers to them, meaning
+ * we don't need dsa_* absolutes, and it is simpler to let the
+ * host code refer to them directly.
+ */
+
+/*
+ * HARD CODED : residual and saved_residual need to agree with the sizes
+ * used in NCR53c7,8xx.scr.
+ *
+ * FIXME: we want to consider the case where we have odd-length
+ * scatter/gather buffers and a WIDE transfer, in which case
+ * we'll need to use the CHAIN MOVE instruction. Ick.
+ */
+ u32 residual[6]; /* Residual data transfer which
+ allows pointer code to work
+ right.
+
+ [0-1] : Conditional call to
+ appropriate other transfer
+ routine.
+ [2-3] : Residual block transfer
+ instruction.
+ [4-5] : Jump to instruction
+ after splice.
+ */
+ u32 saved_residual[6]; /* Copy of old residual, so we
+ can get another partial
+ transfer and still recover
+ */
+
+ u32 saved_data_pointer; /* Saved data pointer */
+
+ u32 dsa_next_addr; /* _Address_ of dsa_next field
+ in this dsa for RISCy
+ style constant. */
+
+ u32 dsa_addr; /* Address of dsa; RISCy style
+ constant */
+
+ u32 dsa[0]; /* Variable length (depending
+ on host type, number of scatter /
+ gather buffers, etc). */
+};
+
+struct NCR53c7x0_break {
+ u32 *address, old_instruction[2];
+ struct NCR53c7x0_break *next;
+ unsigned char old_size; /* Size of old instruction */
+};
+
+/* Indicates that the NCR is not executing code */
+#define STATE_HALTED 0
+/*
+ * Indicates that the NCR is executing the wait for select / reselect
+ * script. Only used when running NCR53c700 compatible scripts, only
+ * state during which an ABORT is _not_ considered an error condition.
+ */
+#define STATE_WAITING 1
+/* Indicates that the NCR is executing other code. */
+#define STATE_RUNNING 2
+/*
+ * Indicates that the NCR was being aborted.
+ */
+#define STATE_ABORTING 3
+/* Indicates that the NCR was successfully aborted. */
+#define STATE_ABORTED 4
+/* Indicates that the NCR has been disabled due to a fatal error */
+#define STATE_DISABLED 5
+
+/*
+ * Where knowledge of SCSI SCRIPT(tm) specified values are needed
+ * in an interrupt handler, an interrupt handler exists for each
+ * different SCSI script so we don't have name space problems.
+ *
+ * Return values of these handlers are as follows :
+ */
+#define SPECIFIC_INT_NOTHING 0 /* don't even restart */
+#define SPECIFIC_INT_RESTART 1 /* restart at the next instruction */
+#define SPECIFIC_INT_ABORT 2 /* recoverable error, abort cmd */
+#define SPECIFIC_INT_PANIC 3 /* unrecoverable error, panic */
+#define SPECIFIC_INT_DONE 4 /* normal command completion */
+#define SPECIFIC_INT_BREAK 5 /* break point encountered */
+
+struct NCR53c7x0_hostdata {
+ int size; /* Size of entire Scsi_Host
+ structure */
+ int board; /* set to board type, useful if
+ we have host specific things,
+ ie, a general purpose I/O
+ bit is being used to enable
+ termination, etc. */
+
+ int chip; /* set to chip type; 700-66 is
+ 700-66, rest are last three
+ digits of part number */
+ /*
+ * PCI bus, device, function, only for NCR53c8x0 chips.
+ * pci_valid indicates that the PCI configuration information
+ * is valid, and we can twiddle MAX_LAT, etc. as recommended
+ * for maximum performance in the NCR documentation.
+ */
+ unsigned char pci_bus, pci_device_fn;
+ unsigned pci_valid:1;
+
+ u32 *dsp; /* dsp to restart with after
+ all stacked interrupts are
+ handled. */
+
+ unsigned dsp_changed:1; /* Has dsp changed within this
+ set of stacked interrupts ? */
+
+ unsigned char dstat; /* Most recent value of dstat */
+ unsigned dstat_valid:1;
+
+ unsigned expecting_iid:1; /* Expect IID interrupt */
+ unsigned expecting_sto:1; /* Expect STO interrupt */
+
+ /*
+ * The code stays cleaner if we use variables with function
+ * pointers and offsets that are unique for the different
+ * scripts rather than having a slew of switch(hostdata->chip)
+ * statements.
+ *
+ * It also means that the #defines from the SCSI SCRIPTS(tm)
+ * don't have to be visible outside of the script-specific
+ * instructions, preventing name space pollution.
+ */
+
+ void (* init_fixup)(struct Scsi_Host *host);
+ void (* init_save_regs)(struct Scsi_Host *host);
+ void (* dsa_fixup)(struct NCR53c7x0_cmd *cmd);
+ void (* soft_reset)(struct Scsi_Host *host);
+ int (* run_tests)(struct Scsi_Host *host);
+
+ /*
+ * Called when DSTAT_SIR is set, indicating an interrupt generated
+ * by the INT instruction, where values are unique for each SCSI
+ * script. Should return one of the SPEC_* values.
+ */
+
+ int (* dstat_sir_intr)(struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd);
+
+ int dsa_len; /* Size of DSA structure */
+
+ /*
+ * Location of DSA fields for the SCSI SCRIPT corresponding to this
+ * chip.
+ */
+
+ s32 dsa_start;
+ s32 dsa_end;
+ s32 dsa_next;
+ s32 dsa_prev;
+ s32 dsa_cmnd;
+ s32 dsa_select;
+ s32 dsa_msgout;
+ s32 dsa_cmdout;
+ s32 dsa_dataout;
+ s32 dsa_datain;
+ s32 dsa_msgin;
+ s32 dsa_msgout_other;
+ s32 dsa_write_sync;
+ s32 dsa_write_resume;
+ s32 dsa_check_reselect;
+ s32 dsa_status;
+ s32 dsa_saved_pointer;
+ s32 dsa_jump_dest;
+
+ /*
+ * Important entry points that generic fixup code needs
+ * to know about, fixed up.
+ */
+
+ s32 E_accept_message;
+ s32 E_command_complete;
+ s32 E_data_transfer;
+ s32 E_dsa_code_template;
+ s32 E_dsa_code_template_end;
+ s32 E_end_data_transfer;
+ s32 E_msg_in;
+ s32 E_initiator_abort;
+ s32 E_other_transfer;
+ s32 E_other_in;
+ s32 E_other_out;
+ s32 E_target_abort;
+ s32 E_debug_break;
+ s32 E_reject_message;
+ s32 E_respond_message;
+ s32 E_select;
+ s32 E_select_msgout;
+ s32 E_test_0;
+ s32 E_test_1;
+ s32 E_test_2;
+ s32 E_test_3;
+ s32 E_dsa_zero;
+ s32 E_cmdout_cmdout;
+ s32 E_wait_reselect;
+ s32 E_dsa_code_begin;
+
+ long long options; /* Bitfielded set of options enabled */
+ volatile u32 test_completed; /* Test completed */
+ int test_running; /* Test currently running */
+ s32 test_source;
+ volatile s32 test_dest;
+
+ volatile int state; /* state of driver, only used for
+ OPTION_700 */
+
+ unsigned char dmode; /*
+ * set to the address of the DMODE
+ * register for this chip.
+ */
+ unsigned char istat; /*
+ * set to the address of the ISTAT
+ * register for this chip.
+ */
+
+ int scsi_clock; /*
+ * SCSI clock in HZ. 0 may be used
+ * for unknown, although this will
+ * disable synchronous negotiation.
+ */
+
+ volatile int intrs; /* Number of interrupts */
+ volatile int resets; /* Number of SCSI resets */
+ unsigned char saved_dmode;
+ unsigned char saved_ctest4;
+ unsigned char saved_ctest7;
+ unsigned char saved_dcntl;
+ unsigned char saved_scntl3;
+
+ unsigned char this_id_mask;
+
+ /* Debugger information */
+ struct NCR53c7x0_break *breakpoints, /* Linked list of all break points */
+ *breakpoint_current; /* Current breakpoint being stepped
+ through, NULL if we are running
+ normally. */
+#ifdef NCR_DEBUG
+ int debug_size; /* Size of debug buffer */
+ volatile int debug_count; /* Current data count */
+ volatile char *debug_buf; /* Output ring buffer */
+ volatile char *debug_write; /* Current write pointer */
+ volatile char *debug_read; /* Current read pointer */
+#endif /* def NCR_DEBUG */
+
+ /* XXX - primitive debugging junk, remove when working ? */
+ int debug_print_limit; /* Number of commands to print
+ out exhaustive debugging
+ information for if
+ OPTION_DEBUG_DUMP is set */
+
+ unsigned char debug_lun_limit[16]; /* If OPTION_DEBUG_TARGET_LIMIT
+ set, puke if commands are sent
+ to other target/lun combinations */
+
+ int debug_count_limit; /* Number of commands to execute
+ before puking to limit debugging
+ output */
+
+
+ volatile unsigned idle:1; /* set to 1 if idle */
+
+ /*
+ * Table of synchronous+wide transfer parameters set on a per-target
+ * basis.
+ */
+
+ volatile struct NCR53c7x0_synchronous sync[16];
+
+ volatile Scsi_Cmnd *issue_queue;
+ /* waiting to be issued by
+ Linux driver */
+ volatile struct NCR53c7x0_cmd *running_list;
+ /* commands running, maintained
+ by Linux driver */
+
+ volatile struct NCR53c7x0_cmd *current; /* currently connected
+ nexus, ONLY valid for
+ NCR53c700/NCR53c700-66
+ */
+
+ volatile struct NCR53c7x0_cmd *spare; /* pointer to spare,
+ allocated at probe time,
+ which we can use for
+ initialization */
+ volatile struct NCR53c7x0_cmd *free;
+ int max_cmd_size; /* Maximum size of NCR53c7x0_cmd
+ based on number of
+ scatter/gather segments, etc.
+ */
+ volatile int num_cmds; /* Number of commands
+ allocated */
+ volatile int extra_allocate;
+ volatile unsigned char cmd_allocated[16]; /* Have we allocated commands
+ for this target yet? If not,
+ do so ASAP */
+ volatile unsigned char busy[16][8]; /* number of commands
+ executing on each target
+ */
+ /*
+ * Eventually, I'll switch to a coroutine for calling
+ * cmd->done(cmd), etc. so that we can overlap interrupt
+ * processing with this code for maximum performance.
+ */
+
+ volatile struct NCR53c7x0_cmd *finished_queue;
+
+
+ /* Shared variables between SCRIPT and host driver */
+ volatile u32 *schedule; /* Array of JUMPs to dsa_begin
+ routines of various DSAs.
+ When not in use, replace
+ with jump to next slot */
+
+
+ volatile unsigned char msg_buf[16]; /* buffer for messages
+ other than the command
+ complete message */
+
+ /* Per-target default synchronous and WIDE messages */
+ volatile unsigned char synchronous_want[16][5];
+ volatile unsigned char wide_want[16][4];
+
+ /* Bit fielded set of targets we want to speak synchronously with */
+ volatile u16 initiate_sdtr;
+ /* Bit fielded set of targets we want to speak wide with */
+ volatile u16 initiate_wdtr;
+ /* Bit fielded list of targets we've talked to. */
+ volatile u16 talked_to;
+
+ /* Array of bit-fielded lun lists that we need to request_sense */
+ volatile unsigned char request_sense[16];
+
+ u32 addr_reconnect_dsa_head; /* RISCy style constant,
+ address of following */
+ volatile u32 reconnect_dsa_head;
+ /* Data identifying nexus we are trying to match during reselection */
+ volatile unsigned char reselected_identify; /* IDENTIFY message */
+ volatile unsigned char reselected_tag; /* second byte of queue tag
+ message or 0 */
+ /* These were static variables before we moved them */
+
+ s32 NCR53c7xx_zero;
+ s32 NCR53c7xx_sink;
+ u32 NOP_insn;
+ char NCR53c7xx_msg_reject;
+ char NCR53c7xx_msg_abort;
+ char NCR53c7xx_msg_nop;
+
+ volatile int event_size, event_index;
+ volatile struct NCR53c7x0_event *events;
+
+ /* If we need to generate code to kill off the currently connected
+ command, this is where we do it. Should have a BMI instruction
+ to source or sink the current data, followed by a JUMP
+ to abort_connected */
+
+ u32 *abort_script;
+
+ int script_count; /* Size of script in words */
+ u32 script[0]; /* Relocated SCSI script */
+
+};
+
+#define IRQ_NONE 255
+#define DMA_NONE 255
+#define IRQ_AUTO 254
+#define DMA_AUTO 254
+
+#define BOARD_GENERIC 0
+
+#define NCR53c7x0_insn_size(insn) \
+ (((insn) & DCMD_TYPE_MASK) == DCMD_TYPE_MMI ? 3 : 2)
+
+
+#define NCR53c7x0_local_declare() \
+ volatile unsigned char *NCR53c7x0_address_memory; \
+ unsigned int NCR53c7x0_address_io; \
+ int NCR53c7x0_memory_mapped
+
+#define NCR53c7x0_local_setup(host) \
+ NCR53c7x0_address_memory = (void *) (host)->base; \
+ NCR53c7x0_address_io = (unsigned int) (host)->io_port; \
+ NCR53c7x0_memory_mapped = ((struct NCR53c7x0_hostdata *) \
+ host->hostdata)-> options & OPTION_MEMORY_MAPPED
+
+#define NCR53c7x0_read8(address) \
+ (NCR53c7x0_memory_mapped ? \
+ (unsigned int)readb(NCR53c7x0_address_memory + (address)) : \
+ inb(NCR53c7x0_address_io + (address)))
+
+#define NCR53c7x0_read16(address) \
+ (NCR53c7x0_memory_mapped ? \
+ (unsigned int)readw(NCR53c7x0_address_memory + (address)) : \
+ inw(NCR53c7x0_address_io + (address)))
+
+#define NCR53c7x0_read32(address) \
+ (NCR53c7x0_memory_mapped ? \
+ (unsigned int) readl(NCR53c7x0_address_memory + (address)) : \
+ inl(NCR53c7x0_address_io + (address)))
+
+#define NCR53c7x0_write8(address,value) \
+ (NCR53c7x0_memory_mapped ? \
+ ({writeb((value), NCR53c7x0_address_memory + (address)); mb();}) : \
+ outb((value), NCR53c7x0_address_io + (address)))
+
+#define NCR53c7x0_write16(address,value) \
+ (NCR53c7x0_memory_mapped ? \
+ ({writew((value), NCR53c7x0_address_memory + (address)); mb();}) : \
+ outw((value), NCR53c7x0_address_io + (address)))
+
+#define NCR53c7x0_write32(address,value) \
+ (NCR53c7x0_memory_mapped ? \
+ ({writel((value), NCR53c7x0_address_memory + (address)); mb();}) : \
+ outl((value), NCR53c7x0_address_io + (address)))
+
+/* Patch arbitrary 32 bit words in the script */
+#define patch_abs_32(script, offset, symbol, value) \
+ for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
+ (u32)); ++i) { \
+ (script)[A_##symbol##_used[i] - (offset)] += (value); \
+ if (hostdata->options & OPTION_DEBUG_FIXUP) \
+ printk("scsi%d : %s reference %d at 0x%x in %s is now 0x%x\n",\
+ host->host_no, #symbol, i, A_##symbol##_used[i] - \
+ (int)(offset), #script, (script)[A_##symbol##_used[i] - \
+ (offset)]); \
+ }
+
+/* Patch read/write instruction immediate field */
+#define patch_abs_rwri_data(script, offset, symbol, value) \
+ for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
+ (u32)); ++i) \
+ (script)[A_##symbol##_used[i] - (offset)] = \
+ ((script)[A_##symbol##_used[i] - (offset)] & \
+ ~DBC_RWRI_IMMEDIATE_MASK) | \
+ (((value) << DBC_RWRI_IMMEDIATE_SHIFT) & \
+ DBC_RWRI_IMMEDIATE_MASK)
+
+/* Patch transfer control instruction data field */
+#define patch_abs_tci_data(script, offset, symbol, value) \
+ for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
+ (u32)); ++i) \
+ (script)[A_##symbol##_used[i] - (offset)] = \
+ ((script)[A_##symbol##_used[i] - (offset)] & \
+ ~DBC_TCI_DATA_MASK) | \
+ (((value) << DBC_TCI_DATA_SHIFT) & \
+ DBC_TCI_DATA_MASK)
+
+/* Patch field in dsa structure (assignment should be +=?) */
+#define patch_dsa_32(dsa, symbol, word, value) \
+ { \
+ (dsa)[(hostdata->##symbol - hostdata->dsa_start) / sizeof(u32) \
+ + (word)] = (value); \
+ if (hostdata->options & OPTION_DEBUG_DSA) \
+ printk("scsi : dsa %s symbol %s(%d) word %d now 0x%x\n", \
+ #dsa, #symbol, hostdata->##symbol, \
+ (word), (u32) (value)); \
+ }
+
+/* Paranoid people could use panic() here. */
+#define FATAL(host) shutdown((host));
+
+#endif /* NCR53c7x0_C */
+#endif /* NCR53c7x0_H */
diff --git a/i386/i386at/gpl/linux/scsi/53c8xx_d.h b/i386/i386at/gpl/linux/scsi/53c8xx_d.h
new file mode 100644
index 00000000..dd45baee
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/53c8xx_d.h
@@ -0,0 +1,2677 @@
+u32 SCRIPT[] = {
+/*
+
+
+; NCR 53c810 driver, main script
+; Sponsored by
+; iX Multiuser Multitasking Magazine
+; hm@ix.de
+;
+; Copyright 1993, 1994, 1995 Drew Eckhardt
+; Visionary Computing
+; (Unix and Linux consulting and custom programming)
+; drew@PoohSticks.ORG
+; +1 (303) 786-7975
+;
+; TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
+;
+; PRE-ALPHA
+;
+; For more information, please consult
+;
+; NCR 53C810
+; PCI-SCSI I/O Processor
+; Data Manual
+;
+; NCR 53C710
+; SCSI I/O Processor
+; Programmers Guide
+;
+; NCR Microelectronics
+; 1635 Aeroplaza Drive
+; Colorado Springs, CO 80916
+; 1+ (719) 578-3400
+;
+; Toll free literature number
+; +1 (800) 334-5454
+;
+; IMPORTANT : This code is self modifying due to the limitations of
+; the NCR53c7,8xx series chips. Persons debugging this code with
+; the remote debugger should take this into account, and NOT set
+; breakpoints in modified instructions.
+;
+; Design:
+; The NCR53c7,8xx family of SCSI chips are busmasters with an onboard
+; microcontroller using a simple instruction set.
+;
+; So, to minimize the effects of interrupt latency, and to maximize
+; throughput, this driver offloads the practical maximum amount
+; of processing to the SCSI chip while still maintaining a common
+; structure.
+;
+; Where tradeoffs were needed between efficiency on the older
+; chips and the newer NCR53c800 series, the NCR53c800 series
+; was chosen.
+;
+; While the NCR53c700 and NCR53c700-66 lacked the facilities to fully
+; automate SCSI transfers without host processor intervention, this
+; isn't the case with the NCR53c710 and newer chips which allow
+;
+; - reads and writes to the internal registers from within the SCSI
+; scripts, allowing the SCSI SCRIPTS(tm) code to save processor
+; state so that multiple threads of execution are possible, and also
+; provide an ALU for loop control, etc.
+;
+; - table indirect addressing for some instructions. This allows
+; pointers to be located relative to the DSA ((Data Structure
+; Address) register.
+;
+; These features make it possible to implement a mailbox style interface,
+; where the same piece of code is run to handle I/O for multiple threads
+; at once minimizing our need to relocate code. Since the NCR53c700/
+; NCR53c800 series have a unique combination of features, making a
+; a standard ingoing/outgoing mailbox system, costly, I've modified it.
+;
+; - Mailboxes are a mixture of code and data. This lets us greatly
+; simplify the NCR53c810 code and do things that would otherwise
+; not be possible.
+;
+; The saved data pointer is now implemented as follows :
+;
+; Control flow has been architected such that if control reaches
+; munge_save_data_pointer, on a restore pointers message or
+; reconnection, a jump to the address formerly in the TEMP register
+; will allow the SCSI command to resume execution.
+;
+
+;
+; Note : the DSA structures must be aligned on 32 bit boundaries,
+; since the source and destination of MOVE MEMORY instructions
+; must share the same alignment and this is the alignment of the
+; NCR registers.
+;
+
+ABSOLUTE dsa_temp_lun = 0 ; Patch to lun for current dsa
+ABSOLUTE dsa_temp_next = 0 ; Patch to dsa next for current dsa
+ABSOLUTE dsa_temp_addr_next = 0 ; Patch to address of dsa next address
+ ; for current dsa
+ABSOLUTE dsa_temp_sync = 0 ; Patch to address of per-target
+ ; sync routine
+ABSOLUTE dsa_temp_target = 0 ; Patch to id for current dsa
+ABSOLUTE dsa_temp_addr_saved_pointer = 0; Patch to address of per-command
+ ; saved data pointer
+ABSOLUTE dsa_temp_addr_residual = 0 ; Patch to address of per-command
+ ; current residual code
+ABSOLUTE dsa_temp_addr_saved_residual = 0; Patch to address of per-command
+ ; saved residual code
+ABSOLUTE dsa_temp_addr_new_value = 0 ; Address of value for JUMP operand
+ABSOLUTE dsa_temp_addr_array_value = 0 ; Address to copy to
+ABSOLUTE dsa_temp_addr_dsa_value = 0 ; Address of this DSA value
+
+;
+; Once a device has initiated reselection, we need to compare it
+; against the singly linked list of commands which have disconnected
+; and are pending reselection. These commands are maintained in
+; an unordered singly linked list of DSA structures, through the
+; DSA pointers at their 'centers' headed by the reconnect_dsa_head
+; pointer.
+;
+; To avoid complications in removing commands from the list,
+; I minimize the amount of expensive (at eight operations per
+; addition @ 500-600ns each) pointer operations which must
+; be done in the NCR driver by precomputing them on the
+; host processor during dsa structure generation.
+;
+; The fixed-up per DSA code knows how to recognize the nexus
+; associated with the corresponding SCSI command, and modifies
+; the source and destination pointers for the MOVE MEMORY
+; instruction which is executed when reselected_ok is called
+; to remove the command from the list. Similarly, DSA is
+; loaded with the address of the next DSA structure and
+; reselected_check_next is called if a failure occurs.
+;
+; Perhaps more conscisely, the net effect of the mess is
+;
+; for (dsa = reconnect_dsa_head, dest = &reconnect_dsa_head,
+; src = NULL; dsa; dest = &dsa->next, dsa = dsa->next) {
+; src = &dsa->next;
+; if (target_id == dsa->id && target_lun == dsa->lun) {
+; *dest = *src;
+; break;
+; }
+; }
+;
+; if (!dsa)
+; error (int_err_unexpected_reselect);
+; else
+; longjmp (dsa->jump_resume, 0);
+;
+;
+
+
+; Define DSA structure used for mailboxes
+ENTRY dsa_code_template
+dsa_code_template:
+ENTRY dsa_code_begin
+dsa_code_begin:
+ MOVE dmode_memory_to_ncr TO DMODE
+
+at 0x00000000 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, dsa_temp_addr_dsa_value, addr_scratch
+
+at 0x00000002 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000005 : */ 0x78380000,0x00000000,
+/*
+ CALL scratch_to_dsa
+
+at 0x00000007 : */ 0x88080000,0x00000980,
+/*
+ CALL select
+
+at 0x00000009 : */ 0x88080000,0x000001fc,
+/*
+; Handle the phase mismatch which may have resulted from the
+; MOVE FROM dsa_msgout if we returned here. The CLEAR ATN
+; may or may not be necessary, and we should update script_asm.pl
+; to handle multiple pieces.
+ CLEAR ATN
+
+at 0x0000000b : */ 0x60000008,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000000d : */ 0x60000040,0x00000000,
+/*
+
+; Replace second operand with address of JUMP instruction dest operand
+; in schedule table for this DSA. Becomes dsa_jump_dest in 53c7,8xx.c.
+ENTRY dsa_code_fix_jump
+dsa_code_fix_jump:
+ MOVE MEMORY 4, NOP_insn, 0
+
+at 0x0000000f : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ JUMP select_done
+
+at 0x00000012 : */ 0x80080000,0x00000224,
+/*
+
+; wrong_dsa loads the DSA register with the value of the dsa_next
+; field.
+;
+wrong_dsa:
+; Patch the MOVE MEMORY INSTRUCTION such that
+; the destination address is the address of the OLD
+; next pointer.
+;
+ MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok + 8
+
+at 0x00000014 : */ 0xc0000004,0x00000000,0x00000758,
+/*
+ MOVE dmode_memory_to_ncr TO DMODE
+
+at 0x00000017 : */ 0x78380000,0x00000000,
+/*
+;
+; Move the _contents_ of the next pointer into the DSA register as
+; the next I_T_L or I_T_L_Q tupple to check against the established
+; nexus.
+;
+ MOVE MEMORY 4, dsa_temp_next, addr_scratch
+
+at 0x00000019 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x0000001c : */ 0x78380000,0x00000000,
+/*
+ CALL scratch_to_dsa
+
+at 0x0000001e : */ 0x88080000,0x00000980,
+/*
+ JUMP reselected_check_next
+
+at 0x00000020 : */ 0x80080000,0x000006a4,
+/*
+
+ABSOLUTE dsa_save_data_pointer = 0
+ENTRY dsa_code_save_data_pointer
+dsa_code_save_data_pointer:
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x00000022 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_temp, dsa_temp_addr_saved_pointer
+
+at 0x00000024 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000027 : */ 0x78380000,0x00000000,
+/*
+; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
+ MOVE MEMORY 24, dsa_temp_addr_residual, dsa_temp_addr_saved_residual
+
+at 0x00000029 : */ 0xc0000018,0x00000000,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000002c : */ 0x60000040,0x00000000,
+/*
+
+
+
+ RETURN
+
+at 0x0000002e : */ 0x90080000,0x00000000,
+/*
+ABSOLUTE dsa_restore_pointers = 0
+ENTRY dsa_code_restore_pointers
+dsa_code_restore_pointers:
+ MOVE dmode_memory_to_ncr TO DMODE
+
+at 0x00000030 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, dsa_temp_addr_saved_pointer, addr_temp
+
+at 0x00000032 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000035 : */ 0x78380000,0x00000000,
+/*
+; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
+ MOVE MEMORY 24, dsa_temp_addr_saved_residual, dsa_temp_addr_residual
+
+at 0x00000037 : */ 0xc0000018,0x00000000,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000003a : */ 0x60000040,0x00000000,
+/*
+
+
+
+ RETURN
+
+at 0x0000003c : */ 0x90080000,0x00000000,
+/*
+
+ABSOLUTE dsa_check_reselect = 0
+; dsa_check_reselect determines whether or not the current target and
+; lun match the current DSA
+ENTRY dsa_code_check_reselect
+dsa_code_check_reselect:
+ MOVE SSID TO SFBR ; SSID contains 3 bit target ID
+
+at 0x0000003e : */ 0x720a0000,0x00000000,
+/*
+; FIXME : we need to accomodate bit fielded and binary here for '7xx/'8xx chips
+ JUMP REL (wrong_dsa), IF NOT dsa_temp_target, AND MASK 0xf8
+
+at 0x00000040 : */ 0x8084f800,0x00ffff48,
+/*
+;
+; Hack - move to scratch first, since SFBR is not writeable
+; via the CPU and hence a MOVE MEMORY instruction.
+;
+ MOVE dmode_memory_to_ncr TO DMODE
+
+at 0x00000042 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 1, reselected_identify, addr_scratch
+
+at 0x00000044 : */ 0xc0000001,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000047 : */ 0x78380000,0x00000000,
+/*
+ MOVE SCRATCH0 TO SFBR
+
+at 0x00000049 : */ 0x72340000,0x00000000,
+/*
+; FIXME : we need to accomodate bit fielded and binary here for '7xx/'8xx chips
+ JUMP REL (wrong_dsa), IF NOT dsa_temp_lun, AND MASK 0xf8
+
+at 0x0000004b : */ 0x8084f800,0x00ffff1c,
+/*
+; Patch the MOVE MEMORY INSTRUCTION such that
+; the source address is the address of this dsa's
+; next pointer.
+ MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok + 4
+
+at 0x0000004d : */ 0xc0000004,0x00000000,0x00000754,
+/*
+ CALL reselected_ok
+
+at 0x00000050 : */ 0x88080000,0x00000750,
+/*
+ CALL dsa_temp_sync
+
+at 0x00000052 : */ 0x88080000,0x00000000,
+/*
+; Release ACK on the IDENTIFY message _after_ we've set the synchronous
+; transfer parameters!
+ CLEAR ACK
+
+at 0x00000054 : */ 0x60000040,0x00000000,
+/*
+; Implicitly restore pointers on reselection, so a RETURN
+; will transfer control back to the right spot.
+ CALL REL (dsa_code_restore_pointers)
+
+at 0x00000056 : */ 0x88880000,0x00ffff60,
+/*
+ RETURN
+
+at 0x00000058 : */ 0x90080000,0x00000000,
+/*
+ENTRY dsa_zero
+dsa_zero:
+ENTRY dsa_code_template_end
+dsa_code_template_end:
+
+; Perform sanity check for dsa_fields_start == dsa_code_template_end -
+; dsa_zero, puke.
+
+ABSOLUTE dsa_fields_start = 0 ; Sanity marker
+ ; pad 48 bytes (fix this RSN)
+ABSOLUTE dsa_next = 48 ; len 4 Next DSA
+ ; del 4 Previous DSA address
+ABSOLUTE dsa_cmnd = 56 ; len 4 Scsi_Cmnd * for this thread.
+ABSOLUTE dsa_select = 60 ; len 4 Device ID, Period, Offset for
+ ; table indirect select
+ABSOLUTE dsa_msgout = 64 ; len 8 table indirect move parameter for
+ ; select message
+ABSOLUTE dsa_cmdout = 72 ; len 8 table indirect move parameter for
+ ; command
+ABSOLUTE dsa_dataout = 80 ; len 4 code pointer for dataout
+ABSOLUTE dsa_datain = 84 ; len 4 code pointer for datain
+ABSOLUTE dsa_msgin = 88 ; len 8 table indirect move for msgin
+ABSOLUTE dsa_status = 96 ; len 8 table indirect move for status byte
+ABSOLUTE dsa_msgout_other = 104 ; len 8 table indirect for normal message out
+ ; (Synchronous transfer negotiation, etc).
+ABSOLUTE dsa_end = 112
+
+ABSOLUTE schedule = 0 ; Array of JUMP dsa_begin or JUMP (next),
+ ; terminated by a call to JUMP wait_reselect
+
+; Linked lists of DSA structures
+ABSOLUTE reconnect_dsa_head = 0 ; Link list of DSAs which can reconnect
+ABSOLUTE addr_reconnect_dsa_head = 0 ; Address of variable contataining
+ ; address of reconnect_dsa_head
+
+; These select the source and destination of a MOVE MEMORY instruction
+ABSOLUTE dmode_memory_to_memory = 0x0
+ABSOLUTE dmode_memory_to_ncr = 0x0
+ABSOLUTE dmode_ncr_to_memory = 0x0
+
+ABSOLUTE addr_scratch = 0x0
+ABSOLUTE addr_temp = 0x0
+
+
+; Interrupts -
+; MSB indicates type
+; 0 handle error condition
+; 1 handle message
+; 2 handle normal condition
+; 3 debugging interrupt
+; 4 testing interrupt
+; Next byte indicates specific error
+
+; XXX not yet implemented, I'm not sure if I want to -
+; Next byte indicates the routine the error occurred in
+; The LSB indicates the specific place the error occurred
+
+ABSOLUTE int_err_unexpected_phase = 0x00000000 ; Unexpected phase encountered
+ABSOLUTE int_err_selected = 0x00010000 ; SELECTED (nee RESELECTED)
+ABSOLUTE int_err_unexpected_reselect = 0x00020000
+ABSOLUTE int_err_check_condition = 0x00030000
+ABSOLUTE int_err_no_phase = 0x00040000
+ABSOLUTE int_msg_wdtr = 0x01000000 ; WDTR message received
+ABSOLUTE int_msg_sdtr = 0x01010000 ; SDTR received
+ABSOLUTE int_msg_1 = 0x01020000 ; single byte special message
+ ; received
+
+ABSOLUTE int_norm_select_complete = 0x02000000 ; Select complete, reprogram
+ ; registers.
+ABSOLUTE int_norm_reselect_complete = 0x02010000 ; Nexus established
+ABSOLUTE int_norm_command_complete = 0x02020000 ; Command complete
+ABSOLUTE int_norm_disconnected = 0x02030000 ; Disconnected
+ABSOLUTE int_norm_aborted =0x02040000 ; Aborted *dsa
+ABSOLUTE int_norm_reset = 0x02050000 ; Generated BUS reset.
+ABSOLUTE int_debug_break = 0x03000000 ; Break point
+
+ABSOLUTE int_debug_panic = 0x030b0000 ; Panic driver
+
+
+ABSOLUTE int_test_1 = 0x04000000 ; Test 1 complete
+ABSOLUTE int_test_2 = 0x04010000 ; Test 2 complete
+ABSOLUTE int_test_3 = 0x04020000 ; Test 3 complete
+
+
+; These should start with 0x05000000, with low bits incrementing for
+; each one.
+
+
+
+ABSOLUTE NCR53c7xx_msg_abort = 0 ; Pointer to abort message
+ABSOLUTE NCR53c7xx_msg_reject = 0 ; Pointer to reject message
+ABSOLUTE NCR53c7xx_zero = 0 ; long with zero in it, use for source
+ABSOLUTE NCR53c7xx_sink = 0 ; long to dump worthless data in
+ABSOLUTE NOP_insn = 0 ; NOP instruction
+
+; Pointer to message, potentially multi-byte
+ABSOLUTE msg_buf = 0
+
+; Pointer to holding area for reselection information
+ABSOLUTE reselected_identify = 0
+ABSOLUTE reselected_tag = 0
+
+; Request sense command pointer, it's a 6 byte command, should
+; be constant for all commands since we always want 16 bytes of
+; sense and we don't need to change any fields as we did under
+; SCSI-I when we actually cared about the LUN field.
+;EXTERNAL NCR53c7xx_sense ; Request sense command
+
+
+; dsa_schedule
+; PURPOSE : after a DISCONNECT message has been received, and pointers
+; saved, insert the current DSA structure at the head of the
+; disconnected queue and fall through to the scheduler.
+;
+; CALLS : OK
+;
+; INPUTS : dsa - current DSA structure, reconnect_dsa_head - list
+; of disconnected commands
+;
+; MODIFIES : SCRATCH, reconnect_dsa_head
+;
+; EXITS : always passes control to schedule
+
+ENTRY dsa_schedule
+dsa_schedule:
+
+
+
+
+;
+; Calculate the address of the next pointer within the DSA
+; structure of the command that is currently disconnecting
+;
+ CALL dsa_to_scratch
+
+at 0x0000005a : */ 0x88080000,0x00000938,
+/*
+ MOVE SCRATCH0 + dsa_next TO SCRATCH0
+
+at 0x0000005c : */ 0x7e343000,0x00000000,
+/*
+ MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
+
+at 0x0000005e : */ 0x7f350000,0x00000000,
+/*
+ MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
+
+at 0x00000060 : */ 0x7f360000,0x00000000,
+/*
+ MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
+
+at 0x00000062 : */ 0x7f370000,0x00000000,
+/*
+
+; Point the next field of this DSA structure at the current disconnected
+; list
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x00000064 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, dsa_schedule_insert + 8
+
+at 0x00000066 : */ 0xc0000004,0x00000000,0x000001b4,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000069 : */ 0x78380000,0x00000000,
+/*
+dsa_schedule_insert:
+ MOVE MEMORY 4, reconnect_dsa_head, 0
+
+at 0x0000006b : */ 0xc0000004,0x00000000,0x00000000,
+/*
+
+; And update the head pointer.
+ CALL dsa_to_scratch
+
+at 0x0000006e : */ 0x88080000,0x00000938,
+/*
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x00000070 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, reconnect_dsa_head
+
+at 0x00000072 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000075 : */ 0x78380000,0x00000000,
+/*
+
+
+ MOVE SCNTL2 & 0x7f TO SCNTL2
+
+at 0x00000077 : */ 0x7c027f00,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x00000079 : */ 0x60000040,0x00000000,
+/*
+
+ WAIT DISCONNECT
+
+at 0x0000007b : */ 0x48000000,0x00000000,
+/*
+
+
+
+
+
+
+ JUMP schedule
+
+at 0x0000007d : */ 0x80080000,0x00000000,
+/*
+
+
+;
+; select
+;
+; PURPOSE : establish a nexus for the SCSI command referenced by DSA.
+; On success, the current DSA structure is removed from the issue
+; queue. Usually, this is entered as a fall-through from schedule,
+; although the contingent allegiance handling code will write
+; the select entry address to the DSP to restart a command as a
+; REQUEST SENSE. A message is sent (usually IDENTIFY, although
+; additional SDTR or WDTR messages may be sent). COMMAND OUT
+; is handled.
+;
+; INPUTS : DSA - SCSI command, issue_dsa_head
+;
+; CALLS : NOT OK
+;
+; MODIFIES : SCRATCH, issue_dsa_head
+;
+; EXITS : on reselection or selection, go to select_failed
+; otherwise, RETURN so control is passed back to
+; dsa_begin.
+;
+
+ENTRY select
+select:
+
+
+
+
+
+
+
+
+
+
+
+
+ CLEAR TARGET
+
+at 0x0000007f : */ 0x60000200,0x00000000,
+/*
+
+; XXX
+;
+; In effect, SELECTION operations are backgrounded, with execution
+; continuing until code which waits for REQ or a fatal interrupt is
+; encountered.
+;
+; So, for more performance, we could overlap the code which removes
+; the command from the NCRs issue queue with the selection, but
+; at this point I don't want to deal with the error recovery.
+;
+
+
+ SELECT ATN FROM dsa_select, select_failed
+
+at 0x00000081 : */ 0x4300003c,0x000007a4,
+/*
+ JUMP select_msgout, WHEN MSG_OUT
+
+at 0x00000083 : */ 0x860b0000,0x00000214,
+/*
+ENTRY select_msgout
+select_msgout:
+ MOVE FROM dsa_msgout, WHEN MSG_OUT
+
+at 0x00000085 : */ 0x1e000000,0x00000040,
+/*
+
+
+
+
+
+
+
+
+
+
+ RETURN
+
+at 0x00000087 : */ 0x90080000,0x00000000,
+/*
+
+;
+; select_done
+;
+; PURPOSE: continue on to normal data transfer; called as the exit
+; point from dsa_begin.
+;
+; INPUTS: dsa
+;
+; CALLS: OK
+;
+;
+
+select_done:
+
+
+
+
+
+
+
+; After a successful selection, we should get either a CMD phase or
+; some transfer request negotiation message.
+
+ JUMP cmdout, WHEN CMD
+
+at 0x00000089 : */ 0x820b0000,0x00000244,
+/*
+ INT int_err_unexpected_phase, WHEN NOT MSG_IN
+
+at 0x0000008b : */ 0x9f030000,0x00000000,
+/*
+
+select_msg_in:
+ CALL msg_in, WHEN MSG_IN
+
+at 0x0000008d : */ 0x8f0b0000,0x00000404,
+/*
+ JUMP select_msg_in, WHEN MSG_IN
+
+at 0x0000008f : */ 0x870b0000,0x00000234,
+/*
+
+cmdout:
+ INT int_err_unexpected_phase, WHEN NOT CMD
+
+at 0x00000091 : */ 0x9a030000,0x00000000,
+/*
+
+
+
+ENTRY cmdout_cmdout
+cmdout_cmdout:
+
+ MOVE FROM dsa_cmdout, WHEN CMD
+
+at 0x00000093 : */ 0x1a000000,0x00000048,
+/*
+
+
+
+
+;
+; data_transfer
+; other_out
+; other_in
+; other_transfer
+;
+; PURPOSE : handle the main data transfer for a SCSI command in
+; several parts. In the first part, data_transfer, DATA_IN
+; and DATA_OUT phases are allowed, with the user provided
+; code (usually dynamically generated based on the scatter/gather
+; list associated with a SCSI command) called to handle these
+; phases.
+;
+; After control has passed to one of the user provided
+; DATA_IN or DATA_OUT routines, back calls are made to
+; other_tranfer_in or other_transfer_out to handle non-DATA IN
+; and DATA OUT phases respectively, with the state of the active
+; data pointer being preserved in TEMP.
+;
+; On completion, the user code passes control to other_transfer
+; which causes DATA_IN and DATA_OUT to result in unexpected_phase
+; interrupts so that data overruns may be trapped.
+;
+; INPUTS : DSA - SCSI command
+;
+; CALLS : OK in data_transfer_start, not ok in other_out and other_in, ok in
+; other_transfer
+;
+; MODIFIES : SCRATCH
+;
+; EXITS : if STATUS IN is detected, signifying command completion,
+; the NCR jumps to command_complete. If MSG IN occurs, a
+; CALL is made to msg_in. Otherwise, other_transfer runs in
+; an infinite loop.
+;
+
+ENTRY data_transfer
+data_transfer:
+ JUMP cmdout_cmdout, WHEN CMD
+
+at 0x00000095 : */ 0x820b0000,0x0000024c,
+/*
+ CALL msg_in, WHEN MSG_IN
+
+at 0x00000097 : */ 0x8f0b0000,0x00000404,
+/*
+ INT int_err_unexpected_phase, WHEN MSG_OUT
+
+at 0x00000099 : */ 0x9e0b0000,0x00000000,
+/*
+ JUMP do_dataout, WHEN DATA_OUT
+
+at 0x0000009b : */ 0x800b0000,0x0000028c,
+/*
+ JUMP do_datain, WHEN DATA_IN
+
+at 0x0000009d : */ 0x810b0000,0x000002e4,
+/*
+ JUMP command_complete, WHEN STATUS
+
+at 0x0000009f : */ 0x830b0000,0x0000060c,
+/*
+ JUMP data_transfer
+
+at 0x000000a1 : */ 0x80080000,0x00000254,
+/*
+ENTRY end_data_transfer
+end_data_transfer:
+
+;
+; FIXME: On NCR53c700 and NCR53c700-66 chips, do_dataout/do_datain
+; should be fixed up whenever the nexus changes so it can point to the
+; correct routine for that command.
+;
+
+
+; Nasty jump to dsa->dataout
+do_dataout:
+ CALL dsa_to_scratch
+
+at 0x000000a3 : */ 0x88080000,0x00000938,
+/*
+ MOVE SCRATCH0 + dsa_dataout TO SCRATCH0
+
+at 0x000000a5 : */ 0x7e345000,0x00000000,
+/*
+ MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
+
+at 0x000000a7 : */ 0x7f350000,0x00000000,
+/*
+ MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
+
+at 0x000000a9 : */ 0x7f360000,0x00000000,
+/*
+ MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
+
+at 0x000000ab : */ 0x7f370000,0x00000000,
+/*
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x000000ad : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, dataout_to_jump + 4
+
+at 0x000000af : */ 0xc0000004,0x00000000,0x000002d4,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x000000b2 : */ 0x78380000,0x00000000,
+/*
+dataout_to_jump:
+ MOVE MEMORY 4, 0, dataout_jump + 4
+
+at 0x000000b4 : */ 0xc0000004,0x00000000,0x000002e0,
+/*
+dataout_jump:
+ JUMP 0
+
+at 0x000000b7 : */ 0x80080000,0x00000000,
+/*
+
+; Nasty jump to dsa->dsain
+do_datain:
+ CALL dsa_to_scratch
+
+at 0x000000b9 : */ 0x88080000,0x00000938,
+/*
+ MOVE SCRATCH0 + dsa_datain TO SCRATCH0
+
+at 0x000000bb : */ 0x7e345400,0x00000000,
+/*
+ MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
+
+at 0x000000bd : */ 0x7f350000,0x00000000,
+/*
+ MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
+
+at 0x000000bf : */ 0x7f360000,0x00000000,
+/*
+ MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
+
+at 0x000000c1 : */ 0x7f370000,0x00000000,
+/*
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x000000c3 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, datain_to_jump + 4
+
+at 0x000000c5 : */ 0xc0000004,0x00000000,0x0000032c,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x000000c8 : */ 0x78380000,0x00000000,
+/*
+ENTRY datain_to_jump
+datain_to_jump:
+ MOVE MEMORY 4, 0, datain_jump + 4
+
+at 0x000000ca : */ 0xc0000004,0x00000000,0x00000338,
+/*
+
+
+
+datain_jump:
+ JUMP 0
+
+at 0x000000cd : */ 0x80080000,0x00000000,
+/*
+
+
+
+; Note that other_out and other_in loop until a non-data phase
+; is discoverred, so we only execute return statements when we
+; can go on to the next data phase block move statement.
+
+ENTRY other_out
+other_out:
+
+
+
+ INT int_err_unexpected_phase, WHEN CMD
+
+at 0x000000cf : */ 0x9a0b0000,0x00000000,
+/*
+ JUMP msg_in_restart, WHEN MSG_IN
+
+at 0x000000d1 : */ 0x870b0000,0x000003e4,
+/*
+ INT int_err_unexpected_phase, WHEN MSG_OUT
+
+at 0x000000d3 : */ 0x9e0b0000,0x00000000,
+/*
+ INT int_err_unexpected_phase, WHEN DATA_IN
+
+at 0x000000d5 : */ 0x990b0000,0x00000000,
+/*
+ JUMP command_complete, WHEN STATUS
+
+at 0x000000d7 : */ 0x830b0000,0x0000060c,
+/*
+ JUMP other_out, WHEN NOT DATA_OUT
+
+at 0x000000d9 : */ 0x80030000,0x0000033c,
+/*
+ RETURN
+
+at 0x000000db : */ 0x90080000,0x00000000,
+/*
+
+ENTRY other_in
+other_in:
+
+
+
+ INT int_err_unexpected_phase, WHEN CMD
+
+at 0x000000dd : */ 0x9a0b0000,0x00000000,
+/*
+ JUMP msg_in_restart, WHEN MSG_IN
+
+at 0x000000df : */ 0x870b0000,0x000003e4,
+/*
+ INT int_err_unexpected_phase, WHEN MSG_OUT
+
+at 0x000000e1 : */ 0x9e0b0000,0x00000000,
+/*
+ INT int_err_unexpected_phase, WHEN DATA_OUT
+
+at 0x000000e3 : */ 0x980b0000,0x00000000,
+/*
+ JUMP command_complete, WHEN STATUS
+
+at 0x000000e5 : */ 0x830b0000,0x0000060c,
+/*
+ JUMP other_in, WHEN NOT DATA_IN
+
+at 0x000000e7 : */ 0x81030000,0x00000374,
+/*
+ RETURN
+
+at 0x000000e9 : */ 0x90080000,0x00000000,
+/*
+
+
+ENTRY other_transfer
+other_transfer:
+ INT int_err_unexpected_phase, WHEN CMD
+
+at 0x000000eb : */ 0x9a0b0000,0x00000000,
+/*
+ CALL msg_in, WHEN MSG_IN
+
+at 0x000000ed : */ 0x8f0b0000,0x00000404,
+/*
+ INT int_err_unexpected_phase, WHEN MSG_OUT
+
+at 0x000000ef : */ 0x9e0b0000,0x00000000,
+/*
+ INT int_err_unexpected_phase, WHEN DATA_OUT
+
+at 0x000000f1 : */ 0x980b0000,0x00000000,
+/*
+ INT int_err_unexpected_phase, WHEN DATA_IN
+
+at 0x000000f3 : */ 0x990b0000,0x00000000,
+/*
+ JUMP command_complete, WHEN STATUS
+
+at 0x000000f5 : */ 0x830b0000,0x0000060c,
+/*
+ JUMP other_transfer
+
+at 0x000000f7 : */ 0x80080000,0x000003ac,
+/*
+
+;
+; msg_in_restart
+; msg_in
+; munge_msg
+;
+; PURPOSE : process messages from a target. msg_in is called when the
+; caller hasn't read the first byte of the message. munge_message
+; is called when the caller has read the first byte of the message,
+; and left it in SFBR. msg_in_restart is called when the caller
+; hasnt read the first byte of the message, and wishes RETURN
+; to transfer control back to the address of the conditional
+; CALL instruction rather than to the instruction after it.
+;
+; Various int_* interrupts are generated when the host system
+; needs to intervene, as is the case with SDTR, WDTR, and
+; INITIATE RECOVERY messages.
+;
+; When the host system handles one of these interrupts,
+; it can respond by reentering at reject_message,
+; which rejects the message and returns control to
+; the caller of msg_in or munge_msg, accept_message
+; which clears ACK and returns control, or reply_message
+; which sends the message pointed to by the DSA
+; msgout_other table indirect field.
+;
+; DISCONNECT messages are handled by moving the command
+; to the reconnect_dsa_queue.
+;
+; INPUTS : DSA - SCSI COMMAND, SFBR - first byte of message (munge_msg
+; only)
+;
+; CALLS : NO. The TEMP register isn't backed up to allow nested calls.
+;
+; MODIFIES : SCRATCH, DSA on DISCONNECT
+;
+; EXITS : On receipt of SAVE DATA POINTER, RESTORE POINTERS,
+; and normal return from message handlers running under
+; Linux, control is returned to the caller. Receipt
+; of DISCONNECT messages pass control to dsa_schedule.
+;
+ENTRY msg_in_restart
+msg_in_restart:
+; XXX - hackish
+;
+; Since it's easier to debug changes to the statically
+; compiled code, rather than the dynamically generated
+; stuff, such as
+;
+; MOVE x, y, WHEN data_phase
+; CALL other_z, WHEN NOT data_phase
+; MOVE x, y, WHEN data_phase
+;
+; I'd like to have certain routines (notably the message handler)
+; restart on the conditional call rather than the next instruction.
+;
+; So, subtract 8 from the return address
+
+ MOVE TEMP0 + 0xf8 TO TEMP0
+
+at 0x000000f9 : */ 0x7e1cf800,0x00000000,
+/*
+ MOVE TEMP1 + 0xff TO TEMP1 WITH CARRY
+
+at 0x000000fb : */ 0x7f1dff00,0x00000000,
+/*
+ MOVE TEMP2 + 0xff TO TEMP2 WITH CARRY
+
+at 0x000000fd : */ 0x7f1eff00,0x00000000,
+/*
+ MOVE TEMP3 + 0xff TO TEMP3 WITH CARRY
+
+at 0x000000ff : */ 0x7f1fff00,0x00000000,
+/*
+
+ENTRY msg_in
+msg_in:
+ MOVE 1, msg_buf, WHEN MSG_IN
+
+at 0x00000101 : */ 0x0f000001,0x00000000,
+/*
+
+munge_msg:
+ JUMP munge_extended, IF 0x01 ; EXTENDED MESSAGE
+
+at 0x00000103 : */ 0x800c0001,0x00000524,
+/*
+ JUMP munge_2, IF 0x20, AND MASK 0xdf ; two byte message
+
+at 0x00000105 : */ 0x800cdf20,0x0000044c,
+/*
+;
+; XXX - I've seen a handful of broken SCSI devices which fail to issue
+; a SAVE POINTERS message before disconnecting in the middle of
+; a transfer, assuming that the DATA POINTER will be implicitly
+; restored.
+;
+; Historically, I've often done an implicit save when the DISCONNECT
+; message is processed. We may want to consider having the option of
+; doing that here.
+;
+ JUMP munge_save_data_pointer, IF 0x02 ; SAVE DATA POINTER
+
+at 0x00000107 : */ 0x800c0002,0x00000454,
+/*
+ JUMP munge_restore_pointers, IF 0x03 ; RESTORE POINTERS
+
+at 0x00000109 : */ 0x800c0003,0x000004b8,
+/*
+ JUMP munge_disconnect, IF 0x04 ; DISCONNECT
+
+at 0x0000010b : */ 0x800c0004,0x0000051c,
+/*
+ INT int_msg_1, IF 0x07 ; MESSAGE REJECT
+
+at 0x0000010d : */ 0x980c0007,0x01020000,
+/*
+ INT int_msg_1, IF 0x0f ; INITIATE RECOVERY
+
+at 0x0000010f : */ 0x980c000f,0x01020000,
+/*
+
+
+
+ JUMP reject_message
+
+at 0x00000111 : */ 0x80080000,0x000005b4,
+/*
+
+munge_2:
+ JUMP reject_message
+
+at 0x00000113 : */ 0x80080000,0x000005b4,
+/*
+;
+; The SCSI standard allows targets to recover from transient
+; error conditions by backing up the data pointer with a
+; RESTORE POINTERS message.
+;
+; So, we must save and restore the _residual_ code as well as
+; the current instruction pointer. Because of this messiness,
+; it is simpler to put dynamic code in the dsa for this and to
+; just do a simple jump down there.
+;
+
+munge_save_data_pointer:
+ MOVE DSA0 + dsa_save_data_pointer TO SFBR
+
+at 0x00000115 : */ 0x76100000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH0
+
+at 0x00000117 : */ 0x6a340000,0x00000000,
+/*
+ MOVE DSA1 + 0xff TO SFBR WITH CARRY
+
+at 0x00000119 : */ 0x7711ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH1
+
+at 0x0000011b : */ 0x6a350000,0x00000000,
+/*
+ MOVE DSA2 + 0xff TO SFBR WITH CARRY
+
+at 0x0000011d : */ 0x7712ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH2
+
+at 0x0000011f : */ 0x6a360000,0x00000000,
+/*
+ MOVE DSA3 + 0xff TO SFBR WITH CARRY
+
+at 0x00000121 : */ 0x7713ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH3
+
+at 0x00000123 : */ 0x6a370000,0x00000000,
+/*
+
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x00000125 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, jump_dsa_save + 4
+
+at 0x00000127 : */ 0xc0000004,0x00000000,0x000004b4,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x0000012a : */ 0x78380000,0x00000000,
+/*
+jump_dsa_save:
+ JUMP 0
+
+at 0x0000012c : */ 0x80080000,0x00000000,
+/*
+
+munge_restore_pointers:
+ MOVE DSA0 + dsa_restore_pointers TO SFBR
+
+at 0x0000012e : */ 0x76100000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH0
+
+at 0x00000130 : */ 0x6a340000,0x00000000,
+/*
+ MOVE DSA1 + 0xff TO SFBR WITH CARRY
+
+at 0x00000132 : */ 0x7711ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH1
+
+at 0x00000134 : */ 0x6a350000,0x00000000,
+/*
+ MOVE DSA2 + 0xff TO SFBR WITH CARRY
+
+at 0x00000136 : */ 0x7712ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH2
+
+at 0x00000138 : */ 0x6a360000,0x00000000,
+/*
+ MOVE DSA3 + 0xff TO SFBR WITH CARRY
+
+at 0x0000013a : */ 0x7713ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH3
+
+at 0x0000013c : */ 0x6a370000,0x00000000,
+/*
+
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x0000013e : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, jump_dsa_restore + 4
+
+at 0x00000140 : */ 0xc0000004,0x00000000,0x00000518,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000143 : */ 0x78380000,0x00000000,
+/*
+jump_dsa_restore:
+ JUMP 0
+
+at 0x00000145 : */ 0x80080000,0x00000000,
+/*
+
+
+munge_disconnect:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ JUMP dsa_schedule
+
+at 0x00000147 : */ 0x80080000,0x00000168,
+/*
+
+
+
+
+
+munge_extended:
+ CLEAR ACK
+
+at 0x00000149 : */ 0x60000040,0x00000000,
+/*
+ INT int_err_unexpected_phase, WHEN NOT MSG_IN
+
+at 0x0000014b : */ 0x9f030000,0x00000000,
+/*
+ MOVE 1, msg_buf + 1, WHEN MSG_IN
+
+at 0x0000014d : */ 0x0f000001,0x00000001,
+/*
+ JUMP munge_extended_2, IF 0x02
+
+at 0x0000014f : */ 0x800c0002,0x00000554,
+/*
+ JUMP munge_extended_3, IF 0x03
+
+at 0x00000151 : */ 0x800c0003,0x00000584,
+/*
+ JUMP reject_message
+
+at 0x00000153 : */ 0x80080000,0x000005b4,
+/*
+
+munge_extended_2:
+ CLEAR ACK
+
+at 0x00000155 : */ 0x60000040,0x00000000,
+/*
+ MOVE 1, msg_buf + 2, WHEN MSG_IN
+
+at 0x00000157 : */ 0x0f000001,0x00000002,
+/*
+ JUMP reject_message, IF NOT 0x02 ; Must be WDTR
+
+at 0x00000159 : */ 0x80040002,0x000005b4,
+/*
+ CLEAR ACK
+
+at 0x0000015b : */ 0x60000040,0x00000000,
+/*
+ MOVE 1, msg_buf + 3, WHEN MSG_IN
+
+at 0x0000015d : */ 0x0f000001,0x00000003,
+/*
+ INT int_msg_wdtr
+
+at 0x0000015f : */ 0x98080000,0x01000000,
+/*
+
+munge_extended_3:
+ CLEAR ACK
+
+at 0x00000161 : */ 0x60000040,0x00000000,
+/*
+ MOVE 1, msg_buf + 2, WHEN MSG_IN
+
+at 0x00000163 : */ 0x0f000001,0x00000002,
+/*
+ JUMP reject_message, IF NOT 0x01 ; Must be SDTR
+
+at 0x00000165 : */ 0x80040001,0x000005b4,
+/*
+ CLEAR ACK
+
+at 0x00000167 : */ 0x60000040,0x00000000,
+/*
+ MOVE 2, msg_buf + 3, WHEN MSG_IN
+
+at 0x00000169 : */ 0x0f000002,0x00000003,
+/*
+ INT int_msg_sdtr
+
+at 0x0000016b : */ 0x98080000,0x01010000,
+/*
+
+ENTRY reject_message
+reject_message:
+ SET ATN
+
+at 0x0000016d : */ 0x58000008,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000016f : */ 0x60000040,0x00000000,
+/*
+ MOVE 1, NCR53c7xx_msg_reject, WHEN MSG_OUT
+
+at 0x00000171 : */ 0x0e000001,0x00000000,
+/*
+ RETURN
+
+at 0x00000173 : */ 0x90080000,0x00000000,
+/*
+
+ENTRY accept_message
+accept_message:
+ CLEAR ATN
+
+at 0x00000175 : */ 0x60000008,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x00000177 : */ 0x60000040,0x00000000,
+/*
+ RETURN
+
+at 0x00000179 : */ 0x90080000,0x00000000,
+/*
+
+ENTRY respond_message
+respond_message:
+ SET ATN
+
+at 0x0000017b : */ 0x58000008,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000017d : */ 0x60000040,0x00000000,
+/*
+ MOVE FROM dsa_msgout_other, WHEN MSG_OUT
+
+at 0x0000017f : */ 0x1e000000,0x00000068,
+/*
+ RETURN
+
+at 0x00000181 : */ 0x90080000,0x00000000,
+/*
+
+;
+; command_complete
+;
+; PURPOSE : handle command termination when STATUS IN is detected by reading
+; a status byte followed by a command termination message.
+;
+; Normal termination results in an INTFLY instruction, and
+; the host system can pick out which command terminated by
+; examining the MESSAGE and STATUS buffers of all currently
+; executing commands;
+;
+; Abnormal (CHECK_CONDITION) termination results in an
+; int_err_check_condition interrupt so that a REQUEST SENSE
+; command can be issued out-of-order so that no other command
+; clears the contingent allegiance condition.
+;
+;
+; INPUTS : DSA - command
+;
+; CALLS : OK
+;
+; EXITS : On successful termination, control is passed to schedule.
+; On abnormal termination, the user will usually modify the
+; DSA fields and corresponding buffers and return control
+; to select.
+;
+
+ENTRY command_complete
+command_complete:
+ MOVE FROM dsa_status, WHEN STATUS
+
+at 0x00000183 : */ 0x1b000000,0x00000060,
+/*
+
+ MOVE SFBR TO SCRATCH0 ; Save status
+
+at 0x00000185 : */ 0x6a340000,0x00000000,
+/*
+
+ENTRY command_complete_msgin
+command_complete_msgin:
+ MOVE FROM dsa_msgin, WHEN MSG_IN
+
+at 0x00000187 : */ 0x1f000000,0x00000058,
+/*
+; Indicate that we should be expecting a disconnect
+ MOVE SCNTL2 & 0x7f TO SCNTL2
+
+at 0x00000189 : */ 0x7c027f00,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000018b : */ 0x60000040,0x00000000,
+/*
+
+ WAIT DISCONNECT
+
+at 0x0000018d : */ 0x48000000,0x00000000,
+/*
+
+;
+; The SCSI specification states that when a UNIT ATTENTION condition
+; is pending, as indicated by a CHECK CONDITION status message,
+; the target shall revert to asynchronous transfers. Since
+; synchronous transfers parameters are maintained on a per INITIATOR/TARGET
+; basis, and returning control to our scheduler could work on a command
+; running on another lun on that target using the old parameters, we must
+; interrupt the host processor to get them changed, or change them ourselves.
+;
+; Once SCSI-II tagged queueing is implemented, things will be even more
+; hairy, since contingent allegiance conditions exist on a per-target/lun
+; basis, and issuing a new command with a different tag would clear it.
+; In these cases, we must interrupt the host processor to get a request
+; added to the HEAD of the queue with the request sense command, or we
+; must automatically issue the request sense command.
+
+
+
+
+
+ INTFLY
+
+at 0x0000018f : */ 0x98180000,0x00000000,
+/*
+
+
+
+
+
+ JUMP schedule
+
+at 0x00000191 : */ 0x80080000,0x00000000,
+/*
+command_failed:
+ INT int_err_check_condition
+
+at 0x00000193 : */ 0x98080000,0x00030000,
+/*
+
+
+
+
+;
+; wait_reselect
+;
+; PURPOSE : This is essentially the idle routine, where control lands
+; when there are no new processes to schedule. wait_reselect
+; waits for reselection, selection, and new commands.
+;
+; When a successful reselection occurs, with the aid
+; of fixed up code in each DSA, wait_reselect walks the
+; reconnect_dsa_queue, asking each dsa if the target ID
+; and LUN match its.
+;
+; If a match is found, a call is made back to reselected_ok,
+; which through the miracles of self modifying code, extracts
+; the found DSA from the reconnect_dsa_queue and then
+; returns control to the DSAs thread of execution.
+;
+; INPUTS : NONE
+;
+; CALLS : OK
+;
+; MODIFIES : DSA,
+;
+; EXITS : On successful reselection, control is returned to the
+; DSA which called reselected_ok. If the WAIT RESELECT
+; was interrupted by a new commands arrival signaled by
+; SIG_P, control is passed to schedule. If the NCR is
+; selected, the host system is interrupted with an
+; int_err_selected which is usually responded to by
+; setting DSP to the target_abort address.
+
+ENTRY wait_reselect
+wait_reselect:
+
+
+
+
+
+
+ WAIT RESELECT wait_reselect_failed
+
+at 0x00000195 : */ 0x50000000,0x0000076c,
+/*
+
+reselected:
+
+
+
+ CLEAR TARGET
+
+at 0x00000197 : */ 0x60000200,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000199 : */ 0x78380000,0x00000000,
+/*
+ ; Read all data needed to reestablish the nexus -
+ MOVE 1, reselected_identify, WHEN MSG_IN
+
+at 0x0000019b : */ 0x0f000001,0x00000000,
+/*
+ ; We used to CLEAR ACK here.
+
+
+
+
+
+ ; Point DSA at the current head of the disconnected queue.
+ MOVE dmode_memory_to_ncr TO DMODE
+
+at 0x0000019d : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, reconnect_dsa_head, addr_scratch
+
+at 0x0000019f : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x000001a2 : */ 0x78380000,0x00000000,
+/*
+ CALL scratch_to_dsa
+
+at 0x000001a4 : */ 0x88080000,0x00000980,
+/*
+
+ ; Fix the update-next pointer so that the reconnect_dsa_head
+ ; pointer is the one that will be updated if this DSA is a hit
+ ; and we remove it from the queue.
+
+ MOVE MEMORY 4, addr_reconnect_dsa_head, reselected_ok + 8
+
+at 0x000001a6 : */ 0xc0000004,0x00000000,0x00000758,
+/*
+
+ENTRY reselected_check_next
+reselected_check_next:
+
+
+
+ ; Check for a NULL pointer.
+ MOVE DSA0 TO SFBR
+
+at 0x000001a9 : */ 0x72100000,0x00000000,
+/*
+ JUMP reselected_not_end, IF NOT 0
+
+at 0x000001ab : */ 0x80040000,0x000006ec,
+/*
+ MOVE DSA1 TO SFBR
+
+at 0x000001ad : */ 0x72110000,0x00000000,
+/*
+ JUMP reselected_not_end, IF NOT 0
+
+at 0x000001af : */ 0x80040000,0x000006ec,
+/*
+ MOVE DSA2 TO SFBR
+
+at 0x000001b1 : */ 0x72120000,0x00000000,
+/*
+ JUMP reselected_not_end, IF NOT 0
+
+at 0x000001b3 : */ 0x80040000,0x000006ec,
+/*
+ MOVE DSA3 TO SFBR
+
+at 0x000001b5 : */ 0x72130000,0x00000000,
+/*
+ JUMP reselected_not_end, IF NOT 0
+
+at 0x000001b7 : */ 0x80040000,0x000006ec,
+/*
+ INT int_err_unexpected_reselect
+
+at 0x000001b9 : */ 0x98080000,0x00020000,
+/*
+
+reselected_not_end:
+ ;
+ ; XXX the ALU is only eight bits wide, and the assembler
+ ; wont do the dirt work for us. As long as dsa_check_reselect
+ ; is negative, we need to sign extend with 1 bits to the full
+ ; 32 bit width of the address.
+ ;
+ ; A potential work around would be to have a known alignment
+ ; of the DSA structure such that the base address plus
+ ; dsa_check_reselect doesn't require carrying from bytes
+ ; higher than the LSB.
+ ;
+
+ MOVE DSA0 TO SFBR
+
+at 0x000001bb : */ 0x72100000,0x00000000,
+/*
+ MOVE SFBR + dsa_check_reselect TO SCRATCH0
+
+at 0x000001bd : */ 0x6e340000,0x00000000,
+/*
+ MOVE DSA1 TO SFBR
+
+at 0x000001bf : */ 0x72110000,0x00000000,
+/*
+ MOVE SFBR + 0xff TO SCRATCH1 WITH CARRY
+
+at 0x000001c1 : */ 0x6f35ff00,0x00000000,
+/*
+ MOVE DSA2 TO SFBR
+
+at 0x000001c3 : */ 0x72120000,0x00000000,
+/*
+ MOVE SFBR + 0xff TO SCRATCH2 WITH CARRY
+
+at 0x000001c5 : */ 0x6f36ff00,0x00000000,
+/*
+ MOVE DSA3 TO SFBR
+
+at 0x000001c7 : */ 0x72130000,0x00000000,
+/*
+ MOVE SFBR + 0xff TO SCRATCH3 WITH CARRY
+
+at 0x000001c9 : */ 0x6f37ff00,0x00000000,
+/*
+
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x000001cb : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, reselected_check + 4
+
+at 0x000001cd : */ 0xc0000004,0x00000000,0x0000074c,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x000001d0 : */ 0x78380000,0x00000000,
+/*
+reselected_check:
+ JUMP 0
+
+at 0x000001d2 : */ 0x80080000,0x00000000,
+/*
+
+
+;
+;
+ENTRY reselected_ok
+reselected_ok:
+ MOVE MEMORY 4, 0, 0 ; Patched : first word
+
+at 0x000001d4 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ ; is address of
+ ; successful dsa_next
+ ; Second word is last
+ ; unsuccessful dsa_next,
+ ; starting with
+ ; dsa_reconnect_head
+ ; We used to CLEAR ACK here.
+
+
+
+
+
+
+ RETURN ; Return control to where
+
+at 0x000001d7 : */ 0x90080000,0x00000000,
+/*
+
+
+
+
+selected:
+ INT int_err_selected;
+
+at 0x000001d9 : */ 0x98080000,0x00010000,
+/*
+
+;
+; A select or reselect failure can be caused by one of two conditions :
+; 1. SIG_P was set. This will be the case if the user has written
+; a new value to a previously NULL head of the issue queue.
+;
+; 2. The NCR53c810 was selected or reselected by another device.
+;
+; 3. The bus was allready busy since we were selected or reselected
+; before starting the command.
+
+wait_reselect_failed:
+
+
+
+; Check selected bit.
+ MOVE SIST0 & 0x20 TO SFBR
+
+at 0x000001db : */ 0x74422000,0x00000000,
+/*
+ JUMP selected, IF 0x20
+
+at 0x000001dd : */ 0x800c0020,0x00000764,
+/*
+; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
+ MOVE CTEST2 & 0x40 TO SFBR
+
+at 0x000001df : */ 0x741a4000,0x00000000,
+/*
+ JUMP schedule, IF 0x40
+
+at 0x000001e1 : */ 0x800c0040,0x00000000,
+/*
+; Check connected bit.
+; FIXME: this needs to change if we support target mode
+ MOVE ISTAT & 0x08 TO SFBR
+
+at 0x000001e3 : */ 0x74140800,0x00000000,
+/*
+ JUMP reselected, IF 0x08
+
+at 0x000001e5 : */ 0x800c0008,0x0000065c,
+/*
+; FIXME : Something bogus happened, and we shouldn't fail silently.
+
+
+
+ INT int_debug_panic
+
+at 0x000001e7 : */ 0x98080000,0x030b0000,
+/*
+
+
+
+select_failed:
+
+
+
+; Otherwise, mask the selected and reselected bits off SIST0
+ MOVE SIST0 & 0x30 TO SFBR
+
+at 0x000001e9 : */ 0x74423000,0x00000000,
+/*
+ JUMP selected, IF 0x20
+
+at 0x000001eb : */ 0x800c0020,0x00000764,
+/*
+ JUMP reselected, IF 0x10
+
+at 0x000001ed : */ 0x800c0010,0x0000065c,
+/*
+; If SIGP is set, the user just gave us another command, and
+; we should restart or return to the scheduler.
+; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
+ MOVE CTEST2 & 0x40 TO SFBR
+
+at 0x000001ef : */ 0x741a4000,0x00000000,
+/*
+ JUMP select, IF 0x40
+
+at 0x000001f1 : */ 0x800c0040,0x000001fc,
+/*
+; Check connected bit.
+; FIXME: this needs to change if we support target mode
+; FIXME: is this really necessary?
+ MOVE ISTAT & 0x08 TO SFBR
+
+at 0x000001f3 : */ 0x74140800,0x00000000,
+/*
+ JUMP reselected, IF 0x08
+
+at 0x000001f5 : */ 0x800c0008,0x0000065c,
+/*
+; FIXME : Something bogus happened, and we shouldn't fail silently.
+
+
+
+ INT int_debug_panic
+
+at 0x000001f7 : */ 0x98080000,0x030b0000,
+/*
+
+
+;
+; test_1
+; test_2
+;
+; PURPOSE : run some verification tests on the NCR. test_1
+; copies test_src to test_dest and interrupts the host
+; processor, testing for cache coherency and interrupt
+; problems in the processes.
+;
+; test_2 runs a command with offsets relative to the
+; DSA on entry, and is useful for miscellaneous experimentation.
+;
+
+; Verify that interrupts are working correctly and that we don't
+; have a cache invalidation problem.
+
+ABSOLUTE test_src = 0, test_dest = 0
+ENTRY test_1
+test_1:
+ MOVE MEMORY 4, test_src, test_dest
+
+at 0x000001f9 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ INT int_test_1
+
+at 0x000001fc : */ 0x98080000,0x04000000,
+/*
+
+;
+; Run arbitrary commands, with test code establishing a DSA
+;
+
+ENTRY test_2
+test_2:
+ CLEAR TARGET
+
+at 0x000001fe : */ 0x60000200,0x00000000,
+/*
+ SELECT ATN FROM 0, test_2_fail
+
+at 0x00000200 : */ 0x43000000,0x00000850,
+/*
+ JUMP test_2_msgout, WHEN MSG_OUT
+
+at 0x00000202 : */ 0x860b0000,0x00000810,
+/*
+ENTRY test_2_msgout
+test_2_msgout:
+ MOVE FROM 8, WHEN MSG_OUT
+
+at 0x00000204 : */ 0x1e000000,0x00000008,
+/*
+ MOVE FROM 16, WHEN CMD
+
+at 0x00000206 : */ 0x1a000000,0x00000010,
+/*
+ MOVE FROM 24, WHEN DATA_IN
+
+at 0x00000208 : */ 0x19000000,0x00000018,
+/*
+ MOVE FROM 32, WHEN STATUS
+
+at 0x0000020a : */ 0x1b000000,0x00000020,
+/*
+ MOVE FROM 40, WHEN MSG_IN
+
+at 0x0000020c : */ 0x1f000000,0x00000028,
+/*
+ MOVE SCNTL2 & 0x7f TO SCNTL2
+
+at 0x0000020e : */ 0x7c027f00,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x00000210 : */ 0x60000040,0x00000000,
+/*
+ WAIT DISCONNECT
+
+at 0x00000212 : */ 0x48000000,0x00000000,
+/*
+test_2_fail:
+ INT int_test_2
+
+at 0x00000214 : */ 0x98080000,0x04010000,
+/*
+
+ENTRY debug_break
+debug_break:
+ INT int_debug_break
+
+at 0x00000216 : */ 0x98080000,0x03000000,
+/*
+
+;
+; initiator_abort
+; target_abort
+;
+; PURPOSE : Abort the currently established nexus from with initiator
+; or target mode.
+;
+;
+
+ENTRY target_abort
+target_abort:
+ SET TARGET
+
+at 0x00000218 : */ 0x58000200,0x00000000,
+/*
+ DISCONNECT
+
+at 0x0000021a : */ 0x48000000,0x00000000,
+/*
+ CLEAR TARGET
+
+at 0x0000021c : */ 0x60000200,0x00000000,
+/*
+ JUMP schedule
+
+at 0x0000021e : */ 0x80080000,0x00000000,
+/*
+
+ENTRY initiator_abort
+initiator_abort:
+ SET ATN
+
+at 0x00000220 : */ 0x58000008,0x00000000,
+/*
+;
+; The SCSI-I specification says that targets may go into MSG out at
+; their leisure upon receipt of the ATN single. On all versions of the
+; specification, we can't change phases until REQ transitions true->false,
+; so we need to sink/source one byte of data to allow the transition.
+;
+; For the sake of safety, we'll only source one byte of data in all
+; cases, but to accomodate the SCSI-I dain bramage, we'll sink an
+; arbitrary number of bytes.
+ JUMP spew_cmd, WHEN CMD
+
+at 0x00000222 : */ 0x820b0000,0x000008b8,
+/*
+ JUMP eat_msgin, WHEN MSG_IN
+
+at 0x00000224 : */ 0x870b0000,0x000008c8,
+/*
+ JUMP eat_datain, WHEN DATA_IN
+
+at 0x00000226 : */ 0x810b0000,0x000008f8,
+/*
+ JUMP eat_status, WHEN STATUS
+
+at 0x00000228 : */ 0x830b0000,0x000008e0,
+/*
+ JUMP spew_dataout, WHEN DATA_OUT
+
+at 0x0000022a : */ 0x800b0000,0x00000910,
+/*
+ JUMP sated
+
+at 0x0000022c : */ 0x80080000,0x00000918,
+/*
+spew_cmd:
+ MOVE 1, NCR53c7xx_zero, WHEN CMD
+
+at 0x0000022e : */ 0x0a000001,0x00000000,
+/*
+ JUMP sated
+
+at 0x00000230 : */ 0x80080000,0x00000918,
+/*
+eat_msgin:
+ MOVE 1, NCR53c7xx_sink, WHEN MSG_IN
+
+at 0x00000232 : */ 0x0f000001,0x00000000,
+/*
+ JUMP eat_msgin, WHEN MSG_IN
+
+at 0x00000234 : */ 0x870b0000,0x000008c8,
+/*
+ JUMP sated
+
+at 0x00000236 : */ 0x80080000,0x00000918,
+/*
+eat_status:
+ MOVE 1, NCR53c7xx_sink, WHEN STATUS
+
+at 0x00000238 : */ 0x0b000001,0x00000000,
+/*
+ JUMP eat_status, WHEN STATUS
+
+at 0x0000023a : */ 0x830b0000,0x000008e0,
+/*
+ JUMP sated
+
+at 0x0000023c : */ 0x80080000,0x00000918,
+/*
+eat_datain:
+ MOVE 1, NCR53c7xx_sink, WHEN DATA_IN
+
+at 0x0000023e : */ 0x09000001,0x00000000,
+/*
+ JUMP eat_datain, WHEN DATA_IN
+
+at 0x00000240 : */ 0x810b0000,0x000008f8,
+/*
+ JUMP sated
+
+at 0x00000242 : */ 0x80080000,0x00000918,
+/*
+spew_dataout:
+ MOVE 1, NCR53c7xx_zero, WHEN DATA_OUT
+
+at 0x00000244 : */ 0x08000001,0x00000000,
+/*
+sated:
+ MOVE SCNTL2 & 0x7f TO SCNTL2
+
+at 0x00000246 : */ 0x7c027f00,0x00000000,
+/*
+ MOVE 1, NCR53c7xx_msg_abort, WHEN MSG_OUT
+
+at 0x00000248 : */ 0x0e000001,0x00000000,
+/*
+ WAIT DISCONNECT
+
+at 0x0000024a : */ 0x48000000,0x00000000,
+/*
+ INT int_norm_aborted
+
+at 0x0000024c : */ 0x98080000,0x02040000,
+/*
+
+;
+; dsa_to_scratch
+; scratch_to_dsa
+;
+; PURPOSE :
+; The NCR chips cannot do a move memory instruction with the DSA register
+; as the source or destination. So, we provide a couple of subroutines
+; that let us switch between the DSA register and scratch register.
+;
+; Memory moves to/from the DSPS register also don't work, but we
+; don't use them.
+;
+;
+
+
+dsa_to_scratch:
+ MOVE DSA0 TO SFBR
+
+at 0x0000024e : */ 0x72100000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH0
+
+at 0x00000250 : */ 0x6a340000,0x00000000,
+/*
+ MOVE DSA1 TO SFBR
+
+at 0x00000252 : */ 0x72110000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH1
+
+at 0x00000254 : */ 0x6a350000,0x00000000,
+/*
+ MOVE DSA2 TO SFBR
+
+at 0x00000256 : */ 0x72120000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH2
+
+at 0x00000258 : */ 0x6a360000,0x00000000,
+/*
+ MOVE DSA3 TO SFBR
+
+at 0x0000025a : */ 0x72130000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH3
+
+at 0x0000025c : */ 0x6a370000,0x00000000,
+/*
+ RETURN
+
+at 0x0000025e : */ 0x90080000,0x00000000,
+/*
+
+scratch_to_dsa:
+ MOVE SCRATCH0 TO SFBR
+
+at 0x00000260 : */ 0x72340000,0x00000000,
+/*
+ MOVE SFBR TO DSA0
+
+at 0x00000262 : */ 0x6a100000,0x00000000,
+/*
+ MOVE SCRATCH1 TO SFBR
+
+at 0x00000264 : */ 0x72350000,0x00000000,
+/*
+ MOVE SFBR TO DSA1
+
+at 0x00000266 : */ 0x6a110000,0x00000000,
+/*
+ MOVE SCRATCH2 TO SFBR
+
+at 0x00000268 : */ 0x72360000,0x00000000,
+/*
+ MOVE SFBR TO DSA2
+
+at 0x0000026a : */ 0x6a120000,0x00000000,
+/*
+ MOVE SCRATCH3 TO SFBR
+
+at 0x0000026c : */ 0x72370000,0x00000000,
+/*
+ MOVE SFBR TO DSA3
+
+at 0x0000026e : */ 0x6a130000,0x00000000,
+/*
+ RETURN
+
+at 0x00000270 : */ 0x90080000,0x00000000,
+};
+
+#define A_NCR53c7xx_msg_abort 0x00000000
+u32 A_NCR53c7xx_msg_abort_used[] = {
+ 0x00000249,
+};
+
+#define A_NCR53c7xx_msg_reject 0x00000000
+u32 A_NCR53c7xx_msg_reject_used[] = {
+ 0x00000172,
+};
+
+#define A_NCR53c7xx_sink 0x00000000
+u32 A_NCR53c7xx_sink_used[] = {
+ 0x00000233,
+ 0x00000239,
+ 0x0000023f,
+};
+
+#define A_NCR53c7xx_zero 0x00000000
+u32 A_NCR53c7xx_zero_used[] = {
+ 0x0000022f,
+ 0x00000245,
+};
+
+#define A_NOP_insn 0x00000000
+u32 A_NOP_insn_used[] = {
+ 0x00000010,
+};
+
+#define A_addr_reconnect_dsa_head 0x00000000
+u32 A_addr_reconnect_dsa_head_used[] = {
+ 0x000001a7,
+};
+
+#define A_addr_scratch 0x00000000
+u32 A_addr_scratch_used[] = {
+ 0x00000004,
+ 0x0000001b,
+ 0x00000046,
+ 0x00000067,
+ 0x00000073,
+ 0x000000b0,
+ 0x000000c6,
+ 0x00000128,
+ 0x00000141,
+ 0x000001a1,
+ 0x000001ce,
+};
+
+#define A_addr_temp 0x00000000
+u32 A_addr_temp_used[] = {
+ 0x00000025,
+ 0x00000034,
+};
+
+#define A_dmode_memory_to_memory 0x00000000
+u32 A_dmode_memory_to_memory_used[] = {
+ 0x00000005,
+ 0x0000001c,
+ 0x00000027,
+ 0x00000035,
+ 0x00000047,
+ 0x00000069,
+ 0x00000075,
+ 0x000000b2,
+ 0x000000c8,
+ 0x0000012a,
+ 0x00000143,
+ 0x00000199,
+ 0x000001a2,
+ 0x000001d0,
+};
+
+#define A_dmode_memory_to_ncr 0x00000000
+u32 A_dmode_memory_to_ncr_used[] = {
+ 0x00000000,
+ 0x00000017,
+ 0x00000030,
+ 0x00000042,
+ 0x0000019d,
+};
+
+#define A_dmode_ncr_to_memory 0x00000000
+u32 A_dmode_ncr_to_memory_used[] = {
+ 0x00000022,
+ 0x00000064,
+ 0x00000070,
+ 0x000000ad,
+ 0x000000c3,
+ 0x00000125,
+ 0x0000013e,
+ 0x000001cb,
+};
+
+#define A_dsa_check_reselect 0x00000000
+u32 A_dsa_check_reselect_used[] = {
+ 0x000001bd,
+};
+
+#define A_dsa_cmdout 0x00000048
+u32 A_dsa_cmdout_used[] = {
+ 0x00000094,
+};
+
+#define A_dsa_cmnd 0x00000038
+u32 A_dsa_cmnd_used[] = {
+};
+
+#define A_dsa_datain 0x00000054
+u32 A_dsa_datain_used[] = {
+ 0x000000bb,
+};
+
+#define A_dsa_dataout 0x00000050
+u32 A_dsa_dataout_used[] = {
+ 0x000000a5,
+};
+
+#define A_dsa_end 0x00000070
+u32 A_dsa_end_used[] = {
+};
+
+#define A_dsa_fields_start 0x00000000
+u32 A_dsa_fields_start_used[] = {
+};
+
+#define A_dsa_msgin 0x00000058
+u32 A_dsa_msgin_used[] = {
+ 0x00000188,
+};
+
+#define A_dsa_msgout 0x00000040
+u32 A_dsa_msgout_used[] = {
+ 0x00000086,
+};
+
+#define A_dsa_msgout_other 0x00000068
+u32 A_dsa_msgout_other_used[] = {
+ 0x00000180,
+};
+
+#define A_dsa_next 0x00000030
+u32 A_dsa_next_used[] = {
+ 0x0000005c,
+};
+
+#define A_dsa_restore_pointers 0x00000000
+u32 A_dsa_restore_pointers_used[] = {
+ 0x0000012e,
+};
+
+#define A_dsa_save_data_pointer 0x00000000
+u32 A_dsa_save_data_pointer_used[] = {
+ 0x00000115,
+};
+
+#define A_dsa_select 0x0000003c
+u32 A_dsa_select_used[] = {
+ 0x00000081,
+};
+
+#define A_dsa_status 0x00000060
+u32 A_dsa_status_used[] = {
+ 0x00000184,
+};
+
+#define A_dsa_temp_addr_array_value 0x00000000
+u32 A_dsa_temp_addr_array_value_used[] = {
+};
+
+#define A_dsa_temp_addr_dsa_value 0x00000000
+u32 A_dsa_temp_addr_dsa_value_used[] = {
+ 0x00000003,
+};
+
+#define A_dsa_temp_addr_new_value 0x00000000
+u32 A_dsa_temp_addr_new_value_used[] = {
+};
+
+#define A_dsa_temp_addr_next 0x00000000
+u32 A_dsa_temp_addr_next_used[] = {
+ 0x00000015,
+ 0x0000004e,
+};
+
+#define A_dsa_temp_addr_residual 0x00000000
+u32 A_dsa_temp_addr_residual_used[] = {
+ 0x0000002a,
+ 0x00000039,
+};
+
+#define A_dsa_temp_addr_saved_pointer 0x00000000
+u32 A_dsa_temp_addr_saved_pointer_used[] = {
+ 0x00000026,
+ 0x00000033,
+};
+
+#define A_dsa_temp_addr_saved_residual 0x00000000
+u32 A_dsa_temp_addr_saved_residual_used[] = {
+ 0x0000002b,
+ 0x00000038,
+};
+
+#define A_dsa_temp_lun 0x00000000
+u32 A_dsa_temp_lun_used[] = {
+ 0x0000004b,
+};
+
+#define A_dsa_temp_next 0x00000000
+u32 A_dsa_temp_next_used[] = {
+ 0x0000001a,
+};
+
+#define A_dsa_temp_sync 0x00000000
+u32 A_dsa_temp_sync_used[] = {
+ 0x00000053,
+};
+
+#define A_dsa_temp_target 0x00000000
+u32 A_dsa_temp_target_used[] = {
+ 0x00000040,
+};
+
+#define A_int_debug_break 0x03000000
+u32 A_int_debug_break_used[] = {
+ 0x00000217,
+};
+
+#define A_int_debug_panic 0x030b0000
+u32 A_int_debug_panic_used[] = {
+ 0x000001e8,
+ 0x000001f8,
+};
+
+#define A_int_err_check_condition 0x00030000
+u32 A_int_err_check_condition_used[] = {
+ 0x00000194,
+};
+
+#define A_int_err_no_phase 0x00040000
+u32 A_int_err_no_phase_used[] = {
+};
+
+#define A_int_err_selected 0x00010000
+u32 A_int_err_selected_used[] = {
+ 0x000001da,
+};
+
+#define A_int_err_unexpected_phase 0x00000000
+u32 A_int_err_unexpected_phase_used[] = {
+ 0x0000008c,
+ 0x00000092,
+ 0x0000009a,
+ 0x000000d0,
+ 0x000000d4,
+ 0x000000d6,
+ 0x000000de,
+ 0x000000e2,
+ 0x000000e4,
+ 0x000000ec,
+ 0x000000f0,
+ 0x000000f2,
+ 0x000000f4,
+ 0x0000014c,
+};
+
+#define A_int_err_unexpected_reselect 0x00020000
+u32 A_int_err_unexpected_reselect_used[] = {
+ 0x000001ba,
+};
+
+#define A_int_msg_1 0x01020000
+u32 A_int_msg_1_used[] = {
+ 0x0000010e,
+ 0x00000110,
+};
+
+#define A_int_msg_sdtr 0x01010000
+u32 A_int_msg_sdtr_used[] = {
+ 0x0000016c,
+};
+
+#define A_int_msg_wdtr 0x01000000
+u32 A_int_msg_wdtr_used[] = {
+ 0x00000160,
+};
+
+#define A_int_norm_aborted 0x02040000
+u32 A_int_norm_aborted_used[] = {
+ 0x0000024d,
+};
+
+#define A_int_norm_command_complete 0x02020000
+u32 A_int_norm_command_complete_used[] = {
+};
+
+#define A_int_norm_disconnected 0x02030000
+u32 A_int_norm_disconnected_used[] = {
+};
+
+#define A_int_norm_reselect_complete 0x02010000
+u32 A_int_norm_reselect_complete_used[] = {
+};
+
+#define A_int_norm_reset 0x02050000
+u32 A_int_norm_reset_used[] = {
+};
+
+#define A_int_norm_select_complete 0x02000000
+u32 A_int_norm_select_complete_used[] = {
+};
+
+#define A_int_test_1 0x04000000
+u32 A_int_test_1_used[] = {
+ 0x000001fd,
+};
+
+#define A_int_test_2 0x04010000
+u32 A_int_test_2_used[] = {
+ 0x00000215,
+};
+
+#define A_int_test_3 0x04020000
+u32 A_int_test_3_used[] = {
+};
+
+#define A_msg_buf 0x00000000
+u32 A_msg_buf_used[] = {
+ 0x00000102,
+ 0x0000014e,
+ 0x00000158,
+ 0x0000015e,
+ 0x00000164,
+ 0x0000016a,
+};
+
+#define A_reconnect_dsa_head 0x00000000
+u32 A_reconnect_dsa_head_used[] = {
+ 0x0000006c,
+ 0x00000074,
+ 0x000001a0,
+};
+
+#define A_reselected_identify 0x00000000
+u32 A_reselected_identify_used[] = {
+ 0x00000045,
+ 0x0000019c,
+};
+
+#define A_reselected_tag 0x00000000
+u32 A_reselected_tag_used[] = {
+};
+
+#define A_schedule 0x00000000
+u32 A_schedule_used[] = {
+ 0x0000007e,
+ 0x00000192,
+ 0x000001e2,
+ 0x0000021f,
+};
+
+#define A_test_dest 0x00000000
+u32 A_test_dest_used[] = {
+ 0x000001fb,
+};
+
+#define A_test_src 0x00000000
+u32 A_test_src_used[] = {
+ 0x000001fa,
+};
+
+#define Ent_accept_message 0x000005d4
+#define Ent_cmdout_cmdout 0x0000024c
+#define Ent_command_complete 0x0000060c
+#define Ent_command_complete_msgin 0x0000061c
+#define Ent_data_transfer 0x00000254
+#define Ent_datain_to_jump 0x00000328
+#define Ent_debug_break 0x00000858
+#define Ent_dsa_code_begin 0x00000000
+#define Ent_dsa_code_check_reselect 0x000000f8
+#define Ent_dsa_code_fix_jump 0x0000003c
+#define Ent_dsa_code_restore_pointers 0x000000c0
+#define Ent_dsa_code_save_data_pointer 0x00000088
+#define Ent_dsa_code_template 0x00000000
+#define Ent_dsa_code_template_end 0x00000168
+#define Ent_dsa_schedule 0x00000168
+#define Ent_dsa_zero 0x00000168
+#define Ent_end_data_transfer 0x0000028c
+#define Ent_initiator_abort 0x00000880
+#define Ent_msg_in 0x00000404
+#define Ent_msg_in_restart 0x000003e4
+#define Ent_other_in 0x00000374
+#define Ent_other_out 0x0000033c
+#define Ent_other_transfer 0x000003ac
+#define Ent_reject_message 0x000005b4
+#define Ent_reselected_check_next 0x000006a4
+#define Ent_reselected_ok 0x00000750
+#define Ent_respond_message 0x000005ec
+#define Ent_select 0x000001fc
+#define Ent_select_msgout 0x00000214
+#define Ent_target_abort 0x00000860
+#define Ent_test_1 0x000007e4
+#define Ent_test_2 0x000007f8
+#define Ent_test_2_msgout 0x00000810
+#define Ent_wait_reselect 0x00000654
+u32 LABELPATCHES[] = {
+ 0x00000008,
+ 0x0000000a,
+ 0x00000013,
+ 0x00000016,
+ 0x0000001f,
+ 0x00000021,
+ 0x0000004f,
+ 0x00000051,
+ 0x0000005b,
+ 0x00000068,
+ 0x0000006f,
+ 0x00000082,
+ 0x00000084,
+ 0x0000008a,
+ 0x0000008e,
+ 0x00000090,
+ 0x00000096,
+ 0x00000098,
+ 0x0000009c,
+ 0x0000009e,
+ 0x000000a0,
+ 0x000000a2,
+ 0x000000a4,
+ 0x000000b1,
+ 0x000000b6,
+ 0x000000ba,
+ 0x000000c7,
+ 0x000000cc,
+ 0x000000d2,
+ 0x000000d8,
+ 0x000000da,
+ 0x000000e0,
+ 0x000000e6,
+ 0x000000e8,
+ 0x000000ee,
+ 0x000000f6,
+ 0x000000f8,
+ 0x00000104,
+ 0x00000106,
+ 0x00000108,
+ 0x0000010a,
+ 0x0000010c,
+ 0x00000112,
+ 0x00000114,
+ 0x00000129,
+ 0x00000142,
+ 0x00000148,
+ 0x00000150,
+ 0x00000152,
+ 0x00000154,
+ 0x0000015a,
+ 0x00000166,
+ 0x00000196,
+ 0x000001a5,
+ 0x000001a8,
+ 0x000001ac,
+ 0x000001b0,
+ 0x000001b4,
+ 0x000001b8,
+ 0x000001cf,
+ 0x000001de,
+ 0x000001e6,
+ 0x000001ec,
+ 0x000001ee,
+ 0x000001f2,
+ 0x000001f6,
+ 0x00000201,
+ 0x00000203,
+ 0x00000223,
+ 0x00000225,
+ 0x00000227,
+ 0x00000229,
+ 0x0000022b,
+ 0x0000022d,
+ 0x00000231,
+ 0x00000235,
+ 0x00000237,
+ 0x0000023b,
+ 0x0000023d,
+ 0x00000241,
+ 0x00000243,
+};
+
+struct {
+ u32 offset;
+ void *address;
+} EXTERNAL_PATCHES[] = {
+};
+
+u32 INSTRUCTIONS = 301;
+u32 PATCHES = 81;
+u32 EXTERNAL_PATCHES_LEN = 0;
diff --git a/i386/i386at/gpl/linux/scsi/53c8xx_u.h b/i386/i386at/gpl/linux/scsi/53c8xx_u.h
new file mode 100644
index 00000000..c3d486fe
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/53c8xx_u.h
@@ -0,0 +1,97 @@
+#undef A_NCR53c7xx_msg_abort
+#undef A_NCR53c7xx_msg_reject
+#undef A_NCR53c7xx_sink
+#undef A_NCR53c7xx_zero
+#undef A_NOP_insn
+#undef A_addr_reconnect_dsa_head
+#undef A_addr_scratch
+#undef A_addr_temp
+#undef A_dmode_memory_to_memory
+#undef A_dmode_memory_to_ncr
+#undef A_dmode_ncr_to_memory
+#undef A_dsa_check_reselect
+#undef A_dsa_cmdout
+#undef A_dsa_cmnd
+#undef A_dsa_datain
+#undef A_dsa_dataout
+#undef A_dsa_end
+#undef A_dsa_fields_start
+#undef A_dsa_msgin
+#undef A_dsa_msgout
+#undef A_dsa_msgout_other
+#undef A_dsa_next
+#undef A_dsa_restore_pointers
+#undef A_dsa_save_data_pointer
+#undef A_dsa_select
+#undef A_dsa_status
+#undef A_dsa_temp_addr_array_value
+#undef A_dsa_temp_addr_dsa_value
+#undef A_dsa_temp_addr_new_value
+#undef A_dsa_temp_addr_next
+#undef A_dsa_temp_addr_residual
+#undef A_dsa_temp_addr_saved_pointer
+#undef A_dsa_temp_addr_saved_residual
+#undef A_dsa_temp_lun
+#undef A_dsa_temp_next
+#undef A_dsa_temp_sync
+#undef A_dsa_temp_target
+#undef A_int_debug_break
+#undef A_int_debug_panic
+#undef A_int_err_check_condition
+#undef A_int_err_no_phase
+#undef A_int_err_selected
+#undef A_int_err_unexpected_phase
+#undef A_int_err_unexpected_reselect
+#undef A_int_msg_1
+#undef A_int_msg_sdtr
+#undef A_int_msg_wdtr
+#undef A_int_norm_aborted
+#undef A_int_norm_command_complete
+#undef A_int_norm_disconnected
+#undef A_int_norm_reselect_complete
+#undef A_int_norm_reset
+#undef A_int_norm_select_complete
+#undef A_int_test_1
+#undef A_int_test_2
+#undef A_int_test_3
+#undef A_msg_buf
+#undef A_reconnect_dsa_head
+#undef A_reselected_identify
+#undef A_reselected_tag
+#undef A_schedule
+#undef A_test_dest
+#undef A_test_src
+#undef Ent_accept_message
+#undef Ent_cmdout_cmdout
+#undef Ent_command_complete
+#undef Ent_command_complete_msgin
+#undef Ent_data_transfer
+#undef Ent_datain_to_jump
+#undef Ent_debug_break
+#undef Ent_dsa_code_begin
+#undef Ent_dsa_code_check_reselect
+#undef Ent_dsa_code_fix_jump
+#undef Ent_dsa_code_restore_pointers
+#undef Ent_dsa_code_save_data_pointer
+#undef Ent_dsa_code_template
+#undef Ent_dsa_code_template_end
+#undef Ent_dsa_schedule
+#undef Ent_dsa_zero
+#undef Ent_end_data_transfer
+#undef Ent_initiator_abort
+#undef Ent_msg_in
+#undef Ent_msg_in_restart
+#undef Ent_other_in
+#undef Ent_other_out
+#undef Ent_other_transfer
+#undef Ent_reject_message
+#undef Ent_reselected_check_next
+#undef Ent_reselected_ok
+#undef Ent_respond_message
+#undef Ent_select
+#undef Ent_select_msgout
+#undef Ent_target_abort
+#undef Ent_test_1
+#undef Ent_test_2
+#undef Ent_test_2_msgout
+#undef Ent_wait_reselect
diff --git a/i386/i386at/gpl/linux/scsi/AM53C974.c b/i386/i386at/gpl/linux/scsi/AM53C974.c
new file mode 100644
index 00000000..5d092c9a
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/AM53C974.c
@@ -0,0 +1,2249 @@
+#include <linux/config.h>
+#include <linux/delay.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/blk.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "scsi.h"
+#include "hosts.h"
+#include "AM53C974.h"
+#include "constants.h"
+#include "sd.h"
+
+/* AM53/79C974 (PCscsi) driver release 0.5
+ *
+ * The architecture and much of the code of this device
+ * driver was originally developed by Drew Eckhardt for
+ * the NCR5380. The following copyrights apply:
+ * For the architecture and all pieces of code which can also be found
+ * in the NCR5380 device driver:
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * The AM53C974_nobios_detect code was origininally developed by
+ * Robin Cutshaw (robin@xfree86.org) and is used here in a
+ * slightly modified form.
+ *
+ * For the remaining code:
+ * Copyright 1994, D. Frieauff
+ * EMail: fri@rsx42sun0.dofn.de
+ * Phone: x49-7545-8-2256 , x49-7541-42305
+ */
+
+/*
+ * $Log: AM53C974.c,v $
+ * Revision 1.1.1.1 1996/10/30 01:39:58 thomas
+ * Imported from UK22
+ *
+ * Revision 1.1 1996/03/25 20:25:05 goel
+ * Linux driver merge.
+ *
+ */
+
+#ifdef AM53C974_DEBUG
+ #define DEB(x) x
+ #ifdef AM53C974_DEBUG_KEYWAIT
+ #define KEYWAIT() AM53C974_keywait()
+ #else
+ #define KEYWAIT()
+ #endif
+ #ifdef AM53C974_DEBUG_INIT
+ #define DEB_INIT(x) x
+ #else
+ #define DEB_INIT(x)
+ #endif
+ #ifdef AM53C974_DEBUG_MSG
+ #define DEB_MSG(x) x
+ #else
+ #define DEB_MSG(x)
+ #endif
+ #ifdef AM53C974_DEB_RESEL
+ #define DEB_RESEL(x) x
+ #else
+ #define DEB_RESEL(x)
+ #endif
+ #ifdef AM53C974_DEBUG_QUEUE
+ #define DEB_QUEUE(x) x
+ #define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); }
+ #define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); }
+ #else
+ #define DEB_QUEUE(x)
+ #define LIST(x,y)
+ #define REMOVE(w,x,y,z)
+ #endif
+ #ifdef AM53C974_DEBUG_INFO
+ #define DEB_INFO(x) x
+ #else
+ #define DEB_INFO(x)
+ #endif
+ #ifdef AM53C974_DEBUG_LINKED
+ #define DEB_LINKED(x) x
+ #else
+ #define DEB_LINKED(x)
+ #endif
+ #ifdef AM53C974_DEBUG_INTR
+ #define DEB_INTR(x) x
+ #else
+ #define DEB_INTR(x)
+ #endif
+#else
+ #define DEB_INIT(x)
+ #define DEB(x)
+ #define DEB_QUEUE(x)
+ #define LIST(x,y)
+ #define REMOVE(w,x,y,z)
+ #define DEB_INFO(x)
+ #define DEB_LINKED(x)
+ #define DEB_INTR(x)
+ #define DEB_MSG(x)
+ #define DEB_RESEL(x)
+ #define KEYWAIT()
+#endif
+ #ifdef AM53C974_DEBUG_ABORT
+ #define DEB_ABORT(x) x
+ #else
+ #define DEB_ABORT(x)
+ #endif
+
+#ifdef VERBOSE_AM53C974_DEBUG
+#define VDEB(x) x
+#else
+#define VDEB(x)
+#endif
+
+#define INSIDE(x,l,h) ( ((x) >= (l)) && ((x) <= (h)) )
+
+#ifdef AM53C974_DEBUG
+static void AM53C974_print_pci(struct Scsi_Host *instance);
+static void AM53C974_print_phase(struct Scsi_Host *instance);
+static void AM53C974_print_queues(struct Scsi_Host *instance);
+#endif /* AM53C974_DEBUG */
+static void AM53C974_print(struct Scsi_Host *instance);
+static void AM53C974_keywait(void);
+static int AM53C974_bios_detect(Scsi_Host_Template *tpnt);
+static int AM53C974_nobios_detect(Scsi_Host_Template *tpnt);
+static int AM53C974_init(Scsi_Host_Template *tpnt, pci_config_t pci_config);
+static void AM53C974_config_after_reset(struct Scsi_Host *instance);
+static __inline__ void initialize_SCp(Scsi_Cmnd *cmd);
+static __inline__ void run_main(void);
+static void AM53C974_main (void);
+static void AM53C974_intr(int irq, struct pt_regs *regs);
+static void AM53C974_intr_disconnect(struct Scsi_Host *instance);
+static int AM53C974_sync_neg(struct Scsi_Host *instance, int target, unsigned char *msg);
+static __inline__ void AM53C974_set_async(struct Scsi_Host *instance, int target);
+static __inline__ void AM53C974_set_sync(struct Scsi_Host *instance, int target);
+static void AM53C974_information_transfer(struct Scsi_Host *instance,
+ unsigned char statreg, unsigned char isreg,
+ unsigned char instreg, unsigned char cfifo,
+ unsigned char dmastatus);
+static int AM53C974_message(struct Scsi_Host *instance, Scsi_Cmnd *cmd, unsigned char msg);
+static void AM53C974_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag);
+static void AM53C974_intr_reselect(struct Scsi_Host *instance, unsigned char statreg);
+static __inline__ void AM53C974_transfer_dma(struct Scsi_Host *instance, short dir,
+ unsigned long length, char *data);
+static void AM53C974_dma_blast(struct Scsi_Host *instance, unsigned char dmastatus,
+ unsigned char statreg);
+static void AM53C974_intr_bus_reset(struct Scsi_Host *instance);
+
+static struct Scsi_Host *first_instance = NULL;
+static Scsi_Host_Template *the_template = NULL;
+static struct Scsi_Host *first_host = NULL; /* Head of list of AMD boards */
+static volatile int main_running = 0;
+static int commandline_current = 0;
+override_t overrides[7] = { {-1, 0, 0, 0}, }; /* LILO overrides */
+
+#ifdef AM53C974_DEBUG
+static int deb_stop = 1;
+
+/**************************************************************************
+ * Function : void AM53C974_print_pci(struct Scsi_Host *instance)
+ *
+ * Purpose : dump the PCI registers for debugging purposes
+ *
+ * Input : instance - which AM53C974
+ **************************************************************************/
+static void AM53C974_print_pci(struct Scsi_Host *instance)
+{
+int i;
+unsigned short vendor_id, device_id, command, status, scratch[8];
+unsigned long class_revision, base;
+unsigned char irq, cache_line_size, latency_timer, header_type;
+
+AM53C974_PCIREG_OPEN();
+
+for (i = 0; i < 8; i++) *(scratch + i) = AM53C974_PCIREG_READ_WORD(instance, PCI_SCRATCH_REG_0 + 2*i);
+vendor_id = AM53C974_PCIREG_READ_WORD(instance, PCI_VENDOR_ID);
+device_id = AM53C974_PCIREG_READ_WORD(instance, PCI_DEVICE_ID);
+command = AM53C974_PCIREG_READ_WORD(instance, PCI_COMMAND);
+status = AM53C974_PCIREG_READ_WORD(instance, PCI_STATUS);
+class_revision = AM53C974_PCIREG_READ_DWORD(instance, PCI_CLASS_REVISION);
+cache_line_size = AM53C974_PCIREG_READ_BYTE(instance, PCI_CACHE_LINE_SIZE);
+latency_timer = AM53C974_PCIREG_READ_BYTE(instance, PCI_LATENCY_TIMER);
+header_type = AM53C974_PCIREG_READ_BYTE(instance, PCI_HEADER_TYPE);
+base = AM53C974_PCIREG_READ_DWORD(instance, PCI_BASE_ADDRESS_0);
+irq = AM53C974_PCIREG_READ_BYTE(instance, PCI_INTERRUPT_LINE);
+
+AM53C974_PCIREG_CLOSE();
+
+
+printk("------------- start of PCI register dump -------------\n");
+printk("PCI_VENDOR_ID: 0x%x\n", vendor_id);
+printk("PCI_DEVICE_ID: 0x%x\n", device_id);
+printk("PCI_COMMAND: 0x%x\n", command);
+printk("PCI_STATUS: 0x%x\n", status);
+printk("PCI_CLASS_REVISION: 0x%lx\n", class_revision);
+printk("PCI_CACHE_LINE_SIZE: 0x%x\n", cache_line_size);
+printk("PCI_LATENCY_TIMER: 0x%x\n", latency_timer);
+printk("PCI_HEADER_TYPE: 0x%x\n", header_type);
+printk("PCI_BASE_ADDRESS_0: 0x%lx\n", base);
+printk("PCI_INTERRUPT_LINE: %d\n", irq);
+for (i = 0; i < 8; i++) printk("PCI_SCRATCH_%d: 0x%x\n", i, scratch[i]);
+printk("------------- end of PCI register dump -------------\n\n");
+}
+
+static struct {
+ unsigned char value;
+ char *name;
+} phases[] = {
+{PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"},
+{PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"},
+{PHASE_RES_0, "RESERVED 0"}, {PHASE_RES_1, "RESERVED 1"}};
+
+/**************************************************************************
+ * Function : void AM53C974_print_phase(struct Scsi_Host *instance)
+ *
+ * Purpose : print the current SCSI phase for debugging purposes
+ *
+ * Input : instance - which AM53C974
+ **************************************************************************/
+static void AM53C974_print_phase(struct Scsi_Host *instance)
+{
+AM53C974_local_declare();
+unsigned char statreg, latched;
+int i;
+AM53C974_setio(instance);
+
+latched = (AM53C974_read_8(CNTLREG2)) & CNTLREG2_ENF;
+statreg = AM53C974_read_8(STATREG);
+for (i = 0; (phases[i].value != PHASE_RES_1) &&
+ (phases[i].value != (statreg & STATREG_PHASE)); ++i);
+if (latched)
+ printk("scsi%d : phase %s, latched at end of last command\n", instance->host_no, phases[i].name);
+ else
+ printk("scsi%d : phase %s, real time\n", instance->host_no, phases[i].name);
+}
+
+/**************************************************************************
+ * Function : void AM53C974_print_queues(struct Scsi_Host *instance)
+ *
+ * Purpose : print commands in the various queues
+ *
+ * Inputs : instance - which AM53C974
+ **************************************************************************/
+static void AM53C974_print_queues(struct Scsi_Host *instance)
+{
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+Scsi_Cmnd *ptr;
+
+printk("AM53C974: coroutine is%s running.\n", main_running ? "" : "n't");
+
+cli();
+
+if (!hostdata->connected) {
+ printk ("scsi%d: no currently connected command\n", instance->host_no); }
+ else {
+ print_Scsi_Cmnd ((Scsi_Cmnd *)hostdata->connected); }
+if (!hostdata->sel_cmd) {
+ printk ("scsi%d: no currently arbitrating command\n", instance->host_no); }
+ else {
+ print_Scsi_Cmnd ((Scsi_Cmnd *)hostdata->sel_cmd); }
+
+printk ("scsi%d: issue_queue ", instance->host_no);
+if (!hostdata->issue_queue)
+ printk("empty\n");
+ else {
+ printk(":\n");
+ for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = (Scsi_Cmnd *)ptr->host_scribble)
+ print_Scsi_Cmnd (ptr); }
+
+printk ("scsi%d: disconnected_queue ", instance->host_no);
+if (!hostdata->disconnected_queue)
+ printk("empty\n");
+ else {
+ printk(":\n");
+ for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; ptr = (Scsi_Cmnd *)ptr->host_scribble)
+ print_Scsi_Cmnd (ptr); }
+
+sti();
+}
+
+#endif /* AM53C974_DEBUG */
+
+/**************************************************************************
+ * Function : void AM53C974_print(struct Scsi_Host *instance)
+ *
+ * Purpose : dump the chip registers for debugging purposes
+ *
+ * Input : instance - which AM53C974
+ **************************************************************************/
+static void AM53C974_print(struct Scsi_Host *instance)
+{
+AM53C974_local_declare();
+unsigned long ctcreg, dmastc, dmaspa, dmawbc, dmawac;
+unsigned char cmdreg, statreg, isreg, cfireg, cntlreg[4], dmacmd, dmastatus;
+AM53C974_setio(instance);
+
+cli();
+ctcreg = AM53C974_read_8(CTCHREG) << 16;
+ctcreg |= AM53C974_read_8(CTCMREG) << 8;
+ctcreg |= AM53C974_read_8(CTCLREG);
+cmdreg = AM53C974_read_8(CMDREG);
+statreg = AM53C974_read_8(STATREG);
+isreg = AM53C974_read_8(ISREG);
+cfireg = AM53C974_read_8(CFIREG);
+cntlreg[0] = AM53C974_read_8(CNTLREG1);
+cntlreg[1] = AM53C974_read_8(CNTLREG2);
+cntlreg[2] = AM53C974_read_8(CNTLREG3);
+cntlreg[3] = AM53C974_read_8(CNTLREG4);
+dmacmd = AM53C974_read_8(DMACMD);
+dmastc = AM53C974_read_32(DMASTC);
+dmaspa = AM53C974_read_32(DMASPA);
+dmawbc = AM53C974_read_32(DMAWBC);
+dmawac = AM53C974_read_32(DMAWAC);
+dmastatus = AM53C974_read_8(DMASTATUS);
+sti();
+
+printk("AM53C974 register dump:\n");
+printk("IO base: 0x%04lx; CTCREG: 0x%04lx; CMDREG: 0x%02x; STATREG: 0x%02x; ISREG: 0x%02x\n",
+ io_port, ctcreg, cmdreg, statreg, isreg);
+printk("CFIREG: 0x%02x; CNTLREG1-4: 0x%02x; 0x%02x; 0x%02x; 0x%02x\n",
+ cfireg, cntlreg[0], cntlreg[1], cntlreg[2], cntlreg[3]);
+printk("DMACMD: 0x%02x; DMASTC: 0x%04lx; DMASPA: 0x%04lx\n", dmacmd, dmastc, dmaspa);
+printk("DMAWBC: 0x%04lx; DMAWAC: 0x%04lx; DMASTATUS: 0x%02x\n", dmawbc, dmawac, dmastatus);
+printk("---------------------------------------------------------\n");
+}
+
+/**************************************************************************
+* Function : void AM53C974_keywait(void)
+*
+* Purpose : wait until a key is pressed, if it was the 'r' key leave singlestep mode;
+* this function is used for debugging only
+*
+* Input : none
+**************************************************************************/
+static void AM53C974_keywait(void)
+{
+#ifdef AM53C974_DEBUG
+int key;
+
+if (!deb_stop) return;
+#endif
+
+cli();
+while ((inb_p(0x64) & 0x01) != 0x01) ;
+#ifdef AM53C974_DEBUG
+key = inb(0x60);
+if (key == 0x93) deb_stop = 0; /* don't stop if 'r' was pressed */
+#endif
+sti();
+}
+
+/**************************************************************************
+* Function : AM53C974_setup(char *str, int *ints)
+*
+* Purpose : LILO command line initialization of the overrides array,
+*
+* Inputs : str - unused, ints - array of integer parameters with ints[0]
+* equal to the number of ints.
+*
+* NOTE : this function needs to be declared as an external function
+* in init/main.c and included there in the bootsetups list
+***************************************************************************/
+void AM53C974_setup(char *str, int *ints)
+{
+if (ints[0] < 4)
+ printk("AM53C974_setup: wrong number of parameters;\n correct syntax is: AM53C974=host-scsi-id, target-scsi-id, max-rate, max-offset\n");
+ else {
+ if (commandline_current < (sizeof(overrides) / sizeof(override_t))) {
+ if ((ints[1] < 0) || (ints[1] > 7) ||
+ (ints[2] < 0) || (ints[2] > 7) ||
+ (ints[1] == ints[2]) ||
+ (ints[3] < (DEF_CLK / MAX_PERIOD)) || (ints[3] > (DEF_CLK / MIN_PERIOD)) ||
+ (ints[4] < 0) || (ints[4] > MAX_OFFSET))
+ printk("AM53C974_setup: illegal parameter\n");
+ else {
+ overrides[commandline_current].host_scsi_id = ints[1];
+ overrides[commandline_current].target_scsi_id = ints[2];
+ overrides[commandline_current].max_rate = ints[3];
+ overrides[commandline_current].max_offset = ints[4];
+ commandline_current++; }
+ }
+ else
+ printk("AM53C974_setup: too many overrides\n");
+ }
+}
+
+#if defined (CONFIG_PCI)
+/**************************************************************************
+* Function : int AM53C974_bios_detect(Scsi_Host_Template *tpnt)
+*
+* Purpose : detects and initializes AM53C974 SCSI chips with PCI Bios
+*
+* Inputs : tpnt - host template
+*
+* Returns : number of host adapters detected
+**************************************************************************/
+int AM53C974_bios_detect(Scsi_Host_Template *tpnt)
+{
+int count = 0; /* number of boards detected */
+int pci_index;
+pci_config_t pci_config;
+
+for (pci_index = 0; pci_index <= 16; ++pci_index) {
+ unsigned char pci_bus, pci_device_fn;
+ if (pcibios_find_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SCSI, pci_index, &pci_bus, &pci_device_fn) != 0)
+ break;
+
+ pcibios_read_config_word(pci_bus, pci_device_fn, PCI_VENDOR_ID, &pci_config._vendor);
+ pcibios_read_config_word(pci_bus, pci_device_fn, PCI_DEVICE_ID, &pci_config._device);
+ pcibios_read_config_word(pci_bus, pci_device_fn, PCI_COMMAND, &pci_config._command);
+ pcibios_read_config_word(pci_bus, pci_device_fn, PCI_STATUS, &pci_config._status);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_CLASS_REVISION, &pci_config._class_revision);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_CACHE_LINE_SIZE, &pci_config._cache_line_size);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_LATENCY_TIMER, &pci_config._latency_timer);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_HEADER_TYPE, &pci_config._header_type);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_BIST, &pci_config._bist);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_0, &pci_config._base0);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_1, &pci_config._base1);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_2, &pci_config._base2);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_3, &pci_config._base3);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_4, &pci_config._base4);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_5, &pci_config._base5);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_ROM_ADDRESS, &pci_config._baserom);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_INTERRUPT_LINE, &pci_config._int_line);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_INTERRUPT_PIN, &pci_config._int_pin);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_MIN_GNT, &pci_config._min_gnt);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_MAX_LAT, &pci_config._max_lat);
+ pci_config._pcibus = 0xFFFFFFFF;
+ pci_config._cardnum = 0xFFFFFFFF;
+
+ /* check whether device is I/O mapped -- should be */
+ if (!(pci_config._command & PCI_COMMAND_IO)) continue;
+
+ /* PCI Spec 2.1 states that it is either the driver's or the PCI card's responsibility
+ to set the PCI Master Enable Bit if needed.
+ (from Mark Stockton <marks@schooner.sys.hou.compaq.com>) */
+ if (!(pci_config._command & PCI_COMMAND_MASTER)) {
+ pci_config._command |= PCI_COMMAND_MASTER;
+ printk("PCI Master Bit has not been set. Setting...\n");
+ pcibios_write_config_word(pci_bus, pci_device_fn, PCI_COMMAND, pci_config._command); }
+
+ /* everything seems OK now, so initialize */
+ if (AM53C974_init(tpnt, pci_config)) count++ ;
+ }
+return (count);
+}
+#endif
+
+/**************************************************************************
+* Function : int AM53C974_nobios_detect(Scsi_Host_Template *tpnt)
+*
+* Purpose : detects and initializes AM53C974 SCSI chips using PCI config 2
+*
+* Inputs : tpnt - host template
+*
+* Returns : number of host adapters detected
+*
+* NOTE : This code assumes the controller on PCI bus 0.
+*
+* Origin: Robin Cutshaw (robin@xfree86.org)
+**************************************************************************/
+int AM53C974_nobios_detect(Scsi_Host_Template *tpnt)
+{
+int count = 0; /* number of boards detected */
+pci_config_t pci_config;
+
+/* first try PCI config method 1 */
+for (pci_config._pcibus = 0; pci_config._pcibus < 0x10; pci_config._pcibus++) {
+ for (pci_config._cardnum = 0; pci_config._cardnum < 0x20; pci_config._cardnum++) {
+ unsigned long config_cmd;
+ config_cmd = 0x80000000 | (pci_config._pcibus<<16) | (pci_config._cardnum<<11);
+
+ outl(config_cmd, 0xCF8); /* ioreg 0 */
+ pci_config._device_vendor = inl(0xCFC);
+
+ if ((pci_config._vendor == PCI_VENDOR_ID_AMD) && (pci_config._device == PCI_DEVICE_ID_AMD_SCSI)) {
+ outl(config_cmd | PCI_COMMAND, 0xCF8); pci_config._status_command = inl(0xCFC);
+ outl(config_cmd | PCI_CLASS_REVISION, 0xCF8); pci_config._class_revision = inl(0xCFC);
+ outl(config_cmd | PCI_CACHE_LINE_SIZE, 0xCF8); pci_config._bist_header_latency_cache = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_0, 0xCF8); pci_config._base0 = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_1, 0xCF8); pci_config._base1 = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_2, 0xCF8); pci_config._base2 = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_3, 0xCF8); pci_config._base3 = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_4, 0xCF8); pci_config._base4 = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_5, 0xCF8); pci_config._base5 = inl(0xCFC);
+ outl(config_cmd | PCI_ROM_ADDRESS, 0xCF8); pci_config._baserom = inl(0xCFC);
+ outl(config_cmd | PCI_INTERRUPT_LINE, 0xCF8); pci_config._max_min_ipin_iline = inl(0xCFC);
+
+ /* check whether device is I/O mapped -- should be */
+ if (!(pci_config._command & PCI_COMMAND_IO)) continue;
+
+ /* PCI Spec 2.1 states that it is either the driver's or the PCI card's responsibility
+ to set the PCI Master Enable Bit if needed.
+ From Mark Stockton <marks@schooner.sys.hou.compaq.com> */
+ if (!(pci_config._command & PCI_COMMAND_MASTER)) {
+ pci_config._command |= PCI_COMMAND_MASTER;
+ printk("Config 1; PCI Master Bit has not been set. Setting...\n");
+ outl(config_cmd | PCI_COMMAND, 0xCF8); outw(pci_config._command, 0xCFC); }
+
+ /* everything seems OK now, so initialize */
+ if (AM53C974_init(tpnt, pci_config)) count++ ;
+ }
+ }
+ }
+outb(0, 0xCF8); /* is this really necessary? */
+
+/* try PCI config method 2, if no device was detected by method 1 */
+if (!count) {
+ AM53C974_PCIREG_OPEN();
+
+ pci_config._pcibus = 0xFFFFFFFF;
+ pci_config._cardnum = 0xFFFFFFFF;
+
+ for (pci_config._ioaddr = 0xC000; pci_config._ioaddr < 0xD000; pci_config._ioaddr += 0x0100) {
+ pci_config._device_vendor = inl(pci_config._ioaddr);
+
+ if ((pci_config._vendor == PCI_VENDOR_ID_AMD) && (pci_config._device == PCI_DEVICE_ID_AMD_SCSI)) {
+ pci_config._status_command = inl(pci_config._ioaddr + PCI_COMMAND);
+ pci_config._class_revision = inl(pci_config._ioaddr + PCI_CLASS_REVISION);
+ pci_config._bist_header_latency_cache = inl(pci_config._ioaddr + PCI_CACHE_LINE_SIZE);
+ pci_config._base0 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_0);
+ pci_config._base1 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_1);
+ pci_config._base2 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_2);
+ pci_config._base3 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_3);
+ pci_config._base4 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_4);
+ pci_config._base5 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_5);
+ pci_config._baserom = inl(pci_config._ioaddr + PCI_ROM_ADDRESS);
+ pci_config._max_min_ipin_iline = inl(pci_config._ioaddr + PCI_INTERRUPT_LINE);
+
+ /* check whether device is I/O mapped -- should be */
+ if (!(pci_config._command & PCI_COMMAND_IO)) continue;
+
+ /* PCI Spec 2.1 states that it is either the driver's or the PCI card's responsibility
+ to set the PCI Master Enable Bit if needed.
+ From Mark Stockton <marks@schooner.sys.hou.compaq.com> */
+ if (!(pci_config._command & PCI_COMMAND_MASTER)) {
+ pci_config._command |= PCI_COMMAND_MASTER;
+ printk("Config 2; PCI Master Bit has not been set. Setting...\n");
+ outw(pci_config._command, pci_config._ioaddr + PCI_COMMAND); }
+
+ /* everything seems OK now, so initialize */
+ if (AM53C974_init(tpnt, pci_config)) count++ ;
+ }
+ }
+ AM53C974_PCIREG_CLOSE();
+ }
+
+return(count);
+}
+
+/**************************************************************************
+* Function : int AM53C974_detect(Scsi_Host_Template *tpnt)
+*
+* Purpose : detects and initializes AM53C974 SCSI chips
+*
+* Inputs : tpnt - host template
+*
+* Returns : number of host adapters detected
+**************************************************************************/
+int AM53C974_detect(Scsi_Host_Template *tpnt)
+{
+int count; /* number of boards detected */
+
+#if defined (CONFIG_PCI)
+if (pcibios_present())
+ count = AM53C974_bios_detect(tpnt);
+ else
+#endif
+count = AM53C974_nobios_detect(tpnt);
+return (count);
+}
+
+/**************************************************************************
+* Function : int AM53C974_init(Scsi_Host_Template *tpnt, pci_config_t pci_config)
+*
+* Purpose : initializes instance and corresponding AM53/79C974 chip,
+*
+* Inputs : tpnt - template, pci_config - PCI configuration,
+*
+* Returns : 1 on success, 0 on failure.
+*
+* NOTE: If no override for the controller's SCSI id is given and AM53C974_SCSI_ID
+* is not defined we assume that the SCSI address of this controller is correctly
+* set up by the BIOS (as reflected by contents of register CNTLREG1).
+* This is the only BIOS assistance we need.
+**************************************************************************/
+static int AM53C974_init(Scsi_Host_Template *tpnt, pci_config_t pci_config)
+{
+AM53C974_local_declare();
+int i, j;
+struct Scsi_Host *instance, *search;
+struct AM53C974_hostdata *hostdata;
+
+#ifdef AM53C974_OPTION_DEBUG_PROBE_ONLY
+ printk ("AM53C974: probe only enabled, aborting initialization\n");
+ return -1;
+#endif
+
+instance = scsi_register(tpnt, sizeof(struct AM53C974_hostdata));
+hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+instance->base = NULL;
+instance->io_port = pci_config._base0 & (pci_config._base0 & 0x1 ?
+ 0xFFFFFFFC : 0xFFFFFFF0);
+instance->irq = pci_config._int_line;
+instance->dma_channel = -1;
+AM53C974_setio(instance);
+
+#ifdef AM53C974_SCSI_ID
+instance->this_id = AM53C974_SCSI_ID;
+AM53C974_write_8(CNTLREG1, instance->this_id & CNTLREG1_SID);
+#else
+instance->this_id = AM53C974_read_8(CNTLREG1) & CNTLREG1_SID;
+if (instance->this_id != 7)
+ printk("scsi%d: WARNING: unusual hostadapter SCSI id %d; please verify!\n",
+ instance->host_no, instance->this_id);
+#endif
+
+for (i = 0; i < sizeof(hostdata->msgout); i++) {
+ hostdata->msgout[i] = NOP;
+ hostdata->last_message[i] = NOP; }
+for (i = 0; i < 8; i++) {
+ hostdata->busy[i] = 0;
+ hostdata->sync_per[i] = DEF_STP;
+ hostdata->sync_off[i] = 0;
+ hostdata->sync_neg[i] = 0;
+ hostdata->sync_en[i] = DEFAULT_SYNC_NEGOTIATION_ENABLED;
+ hostdata->max_rate[i] = DEFAULT_RATE;
+ hostdata->max_offset[i] = DEFAULT_SYNC_OFFSET; }
+
+/* overwrite defaults by LILO overrides */
+for (i = 0; i < commandline_current; i++) {
+ if (overrides[i].host_scsi_id == instance->this_id) {
+ j = overrides[i].target_scsi_id;
+ hostdata->sync_en[j] = 1;
+ hostdata->max_rate[j] = overrides[i].max_rate;
+ hostdata->max_offset[j] = overrides[i].max_offset;
+ }
+ }
+
+hostdata->sel_cmd = NULL;
+hostdata->connected = NULL;
+hostdata->issue_queue = NULL;
+hostdata->disconnected_queue = NULL;
+hostdata->in_reset = 0;
+hostdata->aborted = 0;
+hostdata->selecting = 0;
+hostdata->disconnecting = 0;
+hostdata->dma_busy = 0;
+
+/* Set up an interrupt handler if we aren't already sharing an IRQ with another board */
+for (search = first_host;
+ search && ( ((the_template != NULL) && (search->hostt != the_template)) ||
+ (search->irq != instance->irq) || (search == instance) );
+ search = search->next);
+if (!search) {
+ if (request_irq(instance->irq, AM53C974_intr, SA_INTERRUPT, "AM53C974")) {
+ printk("scsi%d: IRQ%d not free, detaching\n", instance->host_no, instance->irq);
+ scsi_unregister(instance);
+ return -1; }
+ }
+ else {
+ printk("scsi%d: using interrupt handler previously installed for scsi%d\n",
+ instance->host_no, search->host_no); }
+
+if (!the_template) {
+ the_template = instance->hostt;
+ first_instance = instance; }
+
+/* do hard reset */
+AM53C974_write_8(CMDREG, CMDREG_RDEV); /* reset device */
+udelay(5);
+AM53C974_write_8(CMDREG, CMDREG_NOP);
+AM53C974_write_8(CNTLREG1, CNTLREG1_DISR | instance->this_id);
+AM53C974_write_8(CMDREG, CMDREG_RBUS); /* reset SCSI bus */
+udelay(10);
+AM53C974_config_after_reset(instance);
+
+return(0);
+}
+
+/*********************************************************************
+* Function : AM53C974_config_after_reset(struct Scsi_Host *instance) *
+* *
+* Purpose : initializes chip registers after reset *
+* *
+* Inputs : instance - which AM53C974 *
+* *
+* Returns : nothing *
+**********************************************************************/
+static void AM53C974_config_after_reset(struct Scsi_Host *instance)
+{
+AM53C974_local_declare();
+AM53C974_setio(instance);
+
+/* clear SCSI FIFO */
+AM53C974_write_8(CMDREG, CMDREG_CFIFO);
+
+/* configure device */
+AM53C974_write_8(STIMREG, DEF_SCSI_TIMEOUT);
+AM53C974_write_8(STPREG, DEF_STP & STPREG_STP);
+AM53C974_write_8(SOFREG, (DEF_SOF_RAD<<6) | (DEF_SOF_RAA<<4));
+AM53C974_write_8(CLKFREG, DEF_CLKF & CLKFREG_MASK);
+AM53C974_write_8(CNTLREG1, (DEF_ETM<<7) | CNTLREG1_DISR | (DEF_PERE<<4) | instance->this_id);
+AM53C974_write_8(CNTLREG2, (DEF_ENF<<6));
+AM53C974_write_8(CNTLREG3, (DEF_ADIDCHK<<7) | (DEF_FASTSCSI<<4) | (DEF_FASTCLK<<3));
+AM53C974_write_8(CNTLREG4, (DEF_GLITCH<<6) | (DEF_PWD<<5) | (DEF_RAE<<3) | (DEF_RADE<<2) | CNTLREG4_RES);
+}
+
+/***********************************************************************
+* Function : const char *AM53C974_info(struct Scsi_Host *instance) *
+* *
+* Purpose : return device driver information *
+* *
+* Inputs : instance - which AM53C974 *
+* *
+* Returns : info string *
+************************************************************************/
+const char *AM53C974_info(struct Scsi_Host *instance)
+{
+static char info[100];
+
+sprintf(info, "AM53/79C974 PCscsi driver rev. %d.%d; host I/O address: 0x%x; irq: %d\n",
+ AM53C974_DRIVER_REVISION_MAJOR, AM53C974_DRIVER_REVISION_MINOR,
+ instance->io_port, instance->irq);
+return (info);
+}
+
+/**************************************************************************
+* Function : int AM53C974_command (Scsi_Cmnd *SCpnt) *
+* *
+* Purpose : the unqueued SCSI command function, replaced by the *
+* AM53C974_queue_command function *
+* *
+* Inputs : SCpnt - pointer to command structure *
+* *
+* Returns :status, see hosts.h for details *
+***************************************************************************/
+int AM53C974_command(Scsi_Cmnd *SCpnt)
+{
+DEB(printk("AM53C974_command called\n"));
+return 0;
+}
+
+/**************************************************************************
+* Function : void initialize_SCp(Scsi_Cmnd *cmd) *
+* *
+* Purpose : initialize the saved data pointers for cmd to point to the *
+* start of the buffer. *
+* *
+* Inputs : cmd - Scsi_Cmnd structure to have pointers reset. *
+* *
+* Returns : nothing *
+**************************************************************************/
+static __inline__ void initialize_SCp(Scsi_Cmnd *cmd)
+{
+if (cmd->use_sg) {
+ cmd->SCp.buffer = (struct scatterlist *)cmd->buffer;
+ cmd->SCp.buffers_residual = cmd->use_sg - 1;
+ cmd->SCp.ptr = (char *)cmd->SCp.buffer->address;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length; }
+ else {
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = (char *)cmd->request_buffer;
+ cmd->SCp.this_residual = cmd->request_bufflen; }
+}
+
+/**************************************************************************
+* Function : run_main(void) *
+* *
+* Purpose : insure that the coroutine is running and will process our *
+* request. main_running is checked/set here (in an inline *
+* function rather than in AM53C974_main itself to reduce the *
+* chances of stack overflow. *
+* *
+* *
+* Inputs : none *
+* *
+* Returns : nothing *
+**************************************************************************/
+static __inline__ void run_main(void)
+{
+cli();
+if (!main_running) {
+ /* main_running is cleared in AM53C974_main once it can't do
+ more work, and AM53C974_main exits with interrupts disabled. */
+ main_running = 1;
+ AM53C974_main();
+ sti(); }
+ else
+ sti();
+}
+
+/**************************************************************************
+* Function : int AM53C974_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
+*
+* Purpose : writes SCSI command into AM53C974 FIFO
+*
+* Inputs : cmd - SCSI command, done - function called on completion, with
+* a pointer to the command descriptor.
+*
+* Returns : status, see hosts.h for details
+*
+* Side effects :
+* cmd is added to the per instance issue_queue, with minor
+* twiddling done to the host specific fields of cmd. If the
+* main coroutine is not running, it is restarted.
+**************************************************************************/
+int AM53C974_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
+{
+struct Scsi_Host *instance = cmd->host;
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+Scsi_Cmnd *tmp;
+
+cli();
+DEB_QUEUE(printk(SEPARATOR_LINE));
+DEB_QUEUE(printk("scsi%d: AM53C974_queue_command called\n", instance->host_no));
+DEB_QUEUE(printk("cmd=%02x target=%02x lun=%02x bufflen=%d use_sg = %02x\n",
+ cmd->cmnd[0], cmd->target, cmd->lun, cmd->request_bufflen, cmd->use_sg));
+
+/* We use the host_scribble field as a pointer to the next command in a queue */
+cmd->host_scribble = NULL;
+cmd->scsi_done = done;
+cmd->result = 0;
+cmd->device->disconnect = 0;
+
+/* Insert the cmd into the issue queue. Note that REQUEST SENSE
+ * commands are added to the head of the queue since any command will
+ * clear the contingent allegiance condition that exists and the
+ * sense data is only guaranteed to be valid while the condition exists. */
+if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
+ LIST(cmd, hostdata->issue_queue);
+ cmd->host_scribble = (unsigned char *)hostdata->issue_queue;
+ hostdata->issue_queue = cmd; }
+ else {
+ for (tmp = (Scsi_Cmnd *)hostdata->issue_queue; tmp->host_scribble;
+ tmp = (Scsi_Cmnd *)tmp->host_scribble);
+ LIST(cmd, tmp);
+ tmp->host_scribble = (unsigned char *)cmd; }
+
+DEB_QUEUE(printk("scsi%d : command added to %s of queue\n", instance->host_no,
+ (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"));
+
+/* Run the coroutine if it isn't already running. */
+run_main();
+return 0;
+}
+
+/**************************************************************************
+ * Function : AM53C974_main (void)
+ *
+ * Purpose : AM53C974_main is a coroutine that runs as long as more work can
+ * be done on the AM53C974 host adapters in a system. Both
+ * AM53C974_queue_command() and AM53C974_intr() will try to start it
+ * in case it is not running.
+ *
+ * NOTE : AM53C974_main exits with interrupts *disabled*, the caller should
+ * reenable them. This prevents reentrancy and kernel stack overflow.
+ **************************************************************************/
+static void AM53C974_main(void)
+{
+AM53C974_local_declare();
+Scsi_Cmnd *tmp, *prev;
+struct Scsi_Host *instance;
+struct AM53C974_hostdata *hostdata;
+int done;
+
+/* We run (with interrupts disabled) until we're sure that none of
+ * the host adapters have anything that can be done, at which point
+ * we set main_running to 0 and exit. */
+
+do {
+ cli(); /* Freeze request queues */
+ done = 1;
+ for (instance = first_instance; instance && instance->hostt == the_template;
+ instance = instance->next) {
+ hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+ AM53C974_setio(instance);
+ /* start to select target if we are not connected and not in the
+ selection process */
+ if (!hostdata->connected && !hostdata->sel_cmd) {
+ /* Search through the issue_queue for a command destined for a target
+ that is not busy. */
+ for (tmp = (Scsi_Cmnd *)hostdata->issue_queue, prev = NULL; tmp;
+ prev = tmp, tmp = (Scsi_Cmnd *)tmp->host_scribble) {
+ /* When we find one, remove it from the issue queue. */
+ if (!(hostdata->busy[tmp->target] & (1 << tmp->lun))) {
+ if (prev) {
+ REMOVE(prev, (Scsi_Cmnd *)(prev->host_scribble), tmp,
+ (Scsi_Cmnd *)(tmp->host_scribble));
+ prev->host_scribble = tmp->host_scribble; }
+ else {
+ REMOVE(-1, hostdata->issue_queue, tmp, tmp->host_scribble);
+ hostdata->issue_queue = (Scsi_Cmnd *)tmp->host_scribble; }
+ tmp->host_scribble = NULL;
+
+ /* go into selection mode, disable reselection and wait for
+ SO interrupt which will continue with the selection process */
+ hostdata->selecting = 1;
+ hostdata->sel_cmd = tmp;
+ AM53C974_write_8(CMDREG, CMDREG_DSR);
+ break;
+ } /* if target/lun is not busy */
+
+ } /* for */
+ } /* if (!hostdata->connected) */
+ else {
+ DEB(printk("main: connected; cmd = 0x%lx, sel_cmd = 0x%lx\n",
+ (long)hostdata->connected, (long)hostdata->sel_cmd));
+ }
+ } /* for instance */
+ } while (!done);
+main_running = 0;
+}
+
+/*********************************************************************
+* Function : AM53C974_intr(int irq, struct pt_regs *regs) *
+* *
+* Purpose : interrupt handler *
+* *
+* Inputs : irq - interrupt line, regs - ? *
+* *
+* Returns : nothing *
+**********************************************************************/
+static void AM53C974_intr(int irq, struct pt_regs *regs)
+{
+AM53C974_local_declare();
+struct Scsi_Host *instance;
+struct AM53C974_hostdata *hostdata;
+unsigned char cmdreg, dmastatus, statreg, isreg, instreg, cfifo;
+
+/* find AM53C974 hostadapter responsible for this interrupt */
+for (instance = first_instance; instance; instance = instance->next)
+ if ((instance->irq == irq) && (instance->hostt == the_template)) goto FOUND;
+sti();
+return;
+
+/* found; now decode and process */
+FOUND:
+hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+AM53C974_setio(instance);
+dmastatus = AM53C974_read_8(DMASTATUS);
+
+DEB_INTR(printk(SEPARATOR_LINE));
+DEB_INTR(printk("AM53C974 interrupt; dmastatus=0x%02x\n", dmastatus));
+KEYWAIT();
+
+/*** DMA related interrupts ***/
+if (hostdata->connected && (dmastatus & (DMASTATUS_ERROR | DMASTATUS_PWDN |
+ DMASTATUS_ABORT))) {
+ /* DMA error or POWERDOWN */
+ printk("scsi%d: DMA error or powerdown; dmastatus: 0x%02x\n",
+ instance->host_no, dmastatus);
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ panic("scsi%d: cannot recover\n", instance->host_no); }
+
+if (hostdata->connected && (dmastatus & DMASTATUS_DONE)) {
+ /* DMA transfer done */
+ unsigned long residual;
+ cli();
+ if (!(AM53C974_read_8(DMACMD) & DMACMD_DIR)) {
+ do {
+ dmastatus = AM53C974_read_8(DMASTATUS);
+ residual = AM53C974_read_8(CTCLREG) | (AM53C974_read_8(CTCMREG) << 8) |
+ (AM53C974_read_8(CTCHREG) << 16);
+ residual += AM53C974_read_8(CFIREG) & CFIREG_CF;
+ } while (!(dmastatus & DMASTATUS_SCSIINT) && residual);
+ residual = AM53C974_read_8(CTCLREG) | (AM53C974_read_8(CTCMREG) << 8) |
+ (AM53C974_read_8(CTCHREG) << 16);
+ residual += AM53C974_read_8(CFIREG) & CFIREG_CF;
+ }
+ else
+ residual = 0;
+ hostdata->connected->SCp.ptr += hostdata->connected->SCp.this_residual - residual;
+ hostdata->connected->SCp.this_residual = residual;
+
+ AM53C974_write_8(DMACMD, DMACMD_IDLE);
+
+ /* if service request missed before, process it now (ugly) */
+ if (hostdata->dma_busy) {
+ hostdata->dma_busy = 0;
+ cmdreg = AM53C974_read_8(CMDREG);
+ statreg = AM53C974_read_8(STATREG);
+ isreg = AM53C974_read_8(ISREG);
+ instreg = AM53C974_read_8(INSTREG);
+ cfifo = AM53C974_cfifo();
+ AM53C974_information_transfer(instance, statreg, isreg, instreg, cfifo,
+ dmastatus); }
+ sti();
+ }
+
+if (!(dmastatus & DMASTATUS_SCSIINT)) {
+ sti();
+ return; }
+
+/*** SCSI related interrupts ***/
+cmdreg = AM53C974_read_8(CMDREG);
+statreg = AM53C974_read_8(STATREG);
+isreg = AM53C974_read_8(ISREG);
+instreg = AM53C974_read_8(INSTREG);
+cfifo = AM53C974_cfifo();
+
+DEB_INTR(printk("scsi%d: statreg: 0x%02x; isreg: 0x%02x; instreg: 0x%02x; cfifo: 0x%02x\n",
+ instance->host_no, statreg, isreg, instreg, cfifo));
+
+if (statreg & STATREG_PE) {
+ /* parity error */
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ printk("scsi%d : PARITY error\n", instance->host_no);
+ if (hostdata->connected) hostdata->sync_off[hostdata->connected->target] = 0; /* setup asynchronous transfer */
+ hostdata->aborted = 1; }
+
+if (statreg & STATREG_IOE) {
+ /* illegal operation error */
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ printk("scsi%d : ILLEGAL OPERATION error\n", instance->host_no);
+ printk("cmdreg: 0x%02x; dmacmd: 0x%02x; statreg: 0x%02x; \n"
+ "isreg: 0x%02x; instreg: 0x%02x; cfifo: 0x%02x\n",
+ cmdreg, AM53C974_read_8(DMACMD), statreg, isreg, instreg, cfifo); }
+if (hostdata->in_reset && (instreg & INSTREG_SRST)) {
+ /* RESET INTERRUPT */
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ DEB(printk("Bus reset interrupt received\n"));
+ AM53C974_intr_bus_reset(instance);
+ cli();
+ if (hostdata->connected) {
+ hostdata->connected->result = DID_RESET << 16;
+ hostdata->connected->scsi_done((Scsi_Cmnd *)hostdata->connected);
+ hostdata->connected = NULL; }
+ else {
+ if (hostdata->sel_cmd) {
+ hostdata->sel_cmd->result = DID_RESET << 16;
+ hostdata->sel_cmd->scsi_done((Scsi_Cmnd *)hostdata->sel_cmd);
+ hostdata->sel_cmd = NULL; }
+ }
+ sti();
+ if (hostdata->in_reset == 1) goto EXIT;
+ else return;
+ }
+
+if (instreg & INSTREG_ICMD) {
+ /* INVALID COMMAND INTERRUPT */
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ printk("scsi%d: Invalid command interrupt\n", instance->host_no);
+ printk("cmdreg: 0x%02x; dmacmd: 0x%02x; statreg: 0x%02x; dmastatus: 0x%02x; \n"
+ "isreg: 0x%02x; instreg: 0x%02x; cfifo: 0x%02x\n",
+ cmdreg, AM53C974_read_8(DMACMD), statreg, dmastatus, isreg, instreg, cfifo);
+ panic("scsi%d: cannot recover\n", instance->host_no); }
+
+if (instreg & INSTREG_DIS) {
+ /* DISCONNECT INTERRUPT */
+ DEB_INTR(printk("Disconnect interrupt received; "));
+ cli();
+ AM53C974_intr_disconnect(instance);
+ sti();
+ goto EXIT; }
+
+if (instreg & INSTREG_RESEL) {
+ /* RESELECTION INTERRUPT */
+ DEB_INTR(printk("Reselection interrupt received\n"));
+ cli();
+ AM53C974_intr_reselect(instance, statreg);
+ sti();
+ goto EXIT; }
+
+if (instreg & INSTREG_SO) {
+ DEB_INTR(printk("Successful operation interrupt received\n"));
+ if (hostdata->selecting) {
+ DEB_INTR(printk("DSR completed, starting select\n"));
+ cli();
+ AM53C974_select(instance, (Scsi_Cmnd *)hostdata->sel_cmd,
+ (hostdata->sel_cmd->cmnd[0] == REQUEST_SENSE) ?
+ TAG_NONE : TAG_NEXT);
+ hostdata->selecting = 0;
+ AM53C974_set_sync(instance, hostdata->sel_cmd->target);
+ sti();
+ return; }
+
+ if (hostdata->sel_cmd != NULL) {
+ if ( ((isreg & ISREG_IS) != ISREG_OK_NO_STOP) &&
+ ((isreg & ISREG_IS) != ISREG_OK_STOP) ) {
+ /* UNSUCCESSFUL SELECTION */
+ DEB_INTR(printk("unsuccessful selection\n"));
+ cli();
+ hostdata->dma_busy = 0;
+ LIST(hostdata->sel_cmd, hostdata->issue_queue);
+ hostdata->sel_cmd->host_scribble = (unsigned char *)hostdata->issue_queue;
+ hostdata->issue_queue = hostdata->sel_cmd;
+ hostdata->sel_cmd = NULL;
+ hostdata->selecting = 0;
+ sti();
+ goto EXIT; }
+ else {
+ /* SUCCESSFUL SELECTION */
+ DEB(printk("successful selection; cmd=0x%02lx\n", (long)hostdata->sel_cmd));
+ cli();
+ hostdata->dma_busy = 0;
+ hostdata->disconnecting = 0;
+ hostdata->connected = hostdata->sel_cmd;
+ hostdata->sel_cmd = NULL;
+ hostdata->selecting = 0;
+#ifdef SCSI2
+ if (!hostdata->connected->device->tagged_queue)
+#endif
+ hostdata->busy[hostdata->connected->target] |= (1 << hostdata->connected->lun);
+ /* very strange -- use_sg is sometimes nonzero for request sense commands !! */
+ if ((hostdata->connected->cmnd[0] == REQUEST_SENSE) && hostdata->connected->use_sg) {
+ DEB(printk("scsi%d: REQUEST_SENSE command with nonzero use_sg\n", instance->host_no));
+ KEYWAIT();
+ hostdata->connected->use_sg = 0; }
+ initialize_SCp((Scsi_Cmnd *)hostdata->connected);
+ hostdata->connected->SCp.phase = PHASE_CMDOUT;
+ AM53C974_information_transfer(instance, statreg, isreg, instreg, cfifo, dmastatus);
+ sti();
+ return; }
+ }
+ else {
+ cli();
+ AM53C974_information_transfer(instance, statreg, isreg, instreg, cfifo, dmastatus);
+ sti();
+ return; }
+ }
+
+if (instreg & INSTREG_SR) {
+ DEB_INTR(printk("Service request interrupt received, "));
+ if (hostdata->connected) {
+ DEB_INTR(printk("calling information_transfer\n"));
+ cli();
+ AM53C974_information_transfer(instance, statreg, isreg, instreg, cfifo, dmastatus);
+ sti(); }
+ else {
+ printk("scsi%d: weird: service request when no command connected\n", instance->host_no);
+ AM53C974_write_8(CMDREG, CMDREG_CFIFO); } /* clear FIFO */
+ return;
+ }
+
+EXIT:
+ DEB_INTR(printk("intr: starting main\n"));
+ run_main();
+ DEB_INTR(printk("end of intr\n"));
+}
+
+/**************************************************************************
+* Function : AM53C974_intr_disconnect(struct Scsi_Host *instance)
+*
+* Purpose : manage target disconnection
+*
+* Inputs : instance -- which AM53C974
+*
+* Returns : nothing
+**************************************************************************/
+static void AM53C974_intr_disconnect(struct Scsi_Host *instance)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+Scsi_Cmnd *cmd;
+AM53C974_setio(instance);
+
+if (hostdata->sel_cmd != NULL) {
+ /* normal selection timeout, typical for nonexisting targets */
+ cmd = (Scsi_Cmnd *)hostdata->sel_cmd;
+ DEB_INTR(printk("bad target\n"));
+ cmd->result = DID_BAD_TARGET << 16;
+ goto EXIT_FINISHED; }
+
+if (!hostdata->connected) {
+ /* can happen if controller was reset, a device tried to reconnect,
+ failed and disconnects now */
+ AM53C974_write_8(CMDREG, CMDREG_CFIFO);
+ return; }
+
+if (hostdata->disconnecting) {
+ /* target sent disconnect message, so we are prepared */
+ cmd = (Scsi_Cmnd *)hostdata->connected;
+ AM53C974_set_async(instance, cmd->target);
+ DEB_INTR(printk("scsi%d : disc. from cmnd %d for ta %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ if (cmd->device->disconnect) {
+ /* target wants to reselect later */
+ DEB_INTR(printk("ok, re-enabling selection\n"));
+ LIST(cmd,hostdata->disconnected_queue);
+ cmd->host_scribble = (unsigned char *)hostdata->disconnected_queue;
+ hostdata->disconnected_queue = cmd;
+ DEB_QUEUE(printk("scsi%d : command for target %d lun %d this %d was moved from connected to"
+ " the disconnected_queue\n", instance->host_no, cmd->target,
+ cmd->lun, hostdata->disconnected_queue->SCp.this_residual));
+ DEB_QUEUE(AM53C974_print_queues(instance));
+ goto EXIT_UNFINISHED; }
+ else {
+ /* target does not want to reselect later, we are really finished */
+#ifdef AM53C974_DEBUG
+ if (cmd->cmnd[0] == REQUEST_SENSE) {
+ int i;
+ printk("Request sense data dump:\n");
+ for (i = 0; i < cmd->request_bufflen; i++) {
+ printk("%02x ", *((char *)(cmd->request_buffer) + i));
+ if (i && !(i % 16)) printk("\n"); }
+ printk("\n"); }
+#endif
+ goto EXIT_FINISHED; } /* !cmd->device->disconnect */
+ } /* if (hostdata->disconnecting) */
+
+/* no disconnect message received; unexpected disconnection */
+cmd = (Scsi_Cmnd *)hostdata->connected;
+if (cmd) {
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ AM53C974_set_async(instance, cmd->target);
+ printk("scsi%d: Unexpected disconnect; phase: %d; target: %d; this_residual: %d; buffers_residual: %d; message: %d\n",
+ instance->host_no, cmd->SCp.phase, cmd->target, cmd->SCp.this_residual, cmd->SCp.buffers_residual,
+ cmd->SCp.Message);
+ printk("cmdreg: 0x%02x; statreg: 0x%02x; isreg: 0x%02x; cfifo: 0x%02x\n",
+ AM53C974_read_8(CMDREG), AM53C974_read_8(STATREG), AM53C974_read_8(ISREG),
+ AM53C974_read_8(CFIREG) & CFIREG_CF);
+
+ if ((hostdata->last_message[0] == EXTENDED_MESSAGE) &&
+ (hostdata->last_message[2] == EXTENDED_SDTR)) {
+ /* sync. negotiation was aborted, setup asynchronous transfer with target */
+ hostdata->sync_off[cmd->target] = 0; }
+ if (hostdata->aborted || hostdata->msgout[0] == ABORT)
+ cmd->result = DID_ABORT << 16;
+ else
+ cmd->result = DID_ERROR << 16;
+ goto EXIT_FINISHED; }
+
+EXIT_FINISHED:
+hostdata->aborted = 0;
+hostdata->msgout[0] = NOP;
+hostdata->sel_cmd = NULL;
+hostdata->connected = NULL;
+hostdata->selecting = 0;
+hostdata->disconnecting = 0;
+hostdata->dma_busy = 0;
+hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+AM53C974_write_8(CMDREG, CMDREG_CFIFO);
+DEB(printk("disconnect; issue_queue: 0x%lx, disconnected_queue: 0x%lx\n",
+ (long)hostdata->issue_queue, (long)hostdata->disconnected_queue));
+cmd->scsi_done(cmd);
+
+if (!hostdata->selecting) {
+ AM53C974_set_async(instance, cmd->target);
+ AM53C974_write_8(CMDREG, CMDREG_ESR); } /* allow reselect */
+return;
+
+EXIT_UNFINISHED:
+hostdata->msgout[0] = NOP;
+hostdata->sel_cmd = NULL;
+hostdata->connected = NULL;
+hostdata->aborted = 0;
+hostdata->selecting = 0;
+hostdata->disconnecting = 0;
+hostdata->dma_busy = 0;
+DEB(printk("disconnect; issue_queue: 0x%lx, disconnected_queue: 0x%lx\n",
+ (long)hostdata->issue_queue, (long)hostdata->disconnected_queue));
+if (!hostdata->selecting) {
+ AM53C974_set_async(instance, cmd->target);
+ AM53C974_write_8(CMDREG, CMDREG_ESR); } /* allow reselect */
+return;
+}
+
+/**************************************************************************
+* Function : int AM53C974_sync_neg(struct Scsi_Host *instance, int target, unsigned char *msg)
+*
+* Purpose : setup message string for sync. negotiation
+*
+* Inputs : instance -- which AM53C974
+* target -- which SCSI target to deal with
+* msg -- input message string
+*
+* Returns : 0 if parameters accepted or 1 if not accepted
+*
+* Side effects: hostdata is changed
+*
+* Note: we assume here that fastclk is enabled
+**************************************************************************/
+static int AM53C974_sync_neg(struct Scsi_Host *instance, int target, unsigned char *msg)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+int period, offset, i, rate, rate_rem;
+AM53C974_setio(instance);
+
+period = (DEF_CLK * msg[3] * 8 + 1000) / 2000;
+if (period < MIN_PERIOD) {
+ period = MIN_PERIOD;
+ hostdata->msgout[3] = period / 4; }
+ else
+ if (period > MAX_PERIOD) {
+ period = MAX_PERIOD;
+ hostdata->msgout[3] = period / 4; }
+ else
+ hostdata->msgout[3] = msg[3];
+offset = msg[4];
+if (offset > MAX_OFFSET) offset = MAX_OFFSET;
+hostdata->msgout[4] = offset;
+hostdata->sync_per[target] = period;
+hostdata->sync_off[target] = offset;
+for (i = 0; i < 3; i++) hostdata->msgout[i] = msg[i];
+if ((hostdata->msgout[3] != msg[3]) || (msg[4] != offset)) return(1);
+
+rate = DEF_CLK / period;
+rate_rem = 10 * (DEF_CLK - period * rate) / period;
+
+if (offset)
+ printk("\ntarget %d: rate=%d.%d Mhz, synchronous, sync offset=%d bytes\n",
+ target, rate, rate_rem, offset);
+ else
+ printk("\ntarget %d: rate=%d.%d Mhz, asynchronous\n", target, rate, rate_rem);
+
+return(0);
+}
+
+/**************************************************************************
+* Function : AM53C974_set_async(struct Scsi_Host *instance, int target)
+*
+* Purpose : put controller into async. mode
+*
+* Inputs : instance -- which AM53C974
+* target -- which SCSI target to deal with
+*
+* Returns : nothing
+**************************************************************************/
+static __inline__ void AM53C974_set_async(struct Scsi_Host *instance, int target)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+AM53C974_setio(instance);
+
+AM53C974_write_8(STPREG, hostdata->sync_per[target]);
+AM53C974_write_8(SOFREG, (DEF_SOF_RAD<<6) | (DEF_SOF_RAA<<4));
+}
+
+/**************************************************************************
+* Function : AM53C974_set_sync(struct Scsi_Host *instance, int target)
+*
+* Purpose : put controller into sync. mode
+*
+* Inputs : instance -- which AM53C974
+* target -- which SCSI target to deal with
+*
+* Returns : nothing
+**************************************************************************/
+static __inline__ void AM53C974_set_sync(struct Scsi_Host *instance, int target)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+AM53C974_setio(instance);
+
+AM53C974_write_8(STPREG, hostdata->sync_per[target]);
+AM53C974_write_8(SOFREG, (SOFREG_SO & hostdata->sync_off[target]) |
+ (DEF_SOF_RAD<<6) | (DEF_SOF_RAA<<4));
+}
+
+/***********************************************************************
+* Function : AM53C974_information_transfer(struct Scsi_Host *instance, *
+* unsigned char statreg, unsigned char isreg, *
+* unsigned char instreg, unsigned char cfifo, *
+* unsigned char dmastatus) *
+* *
+* Purpose : handle phase changes *
+* *
+* Inputs : instance - which AM53C974 *
+* statreg - stus register *
+* isreg - internal state register *
+* instreg - interrupt status register *
+* cfifo - number of bytes in FIFO *
+* dmastatus - dma status register *
+* *
+* Returns : nothing *
+************************************************************************/
+static void AM53C974_information_transfer(struct Scsi_Host *instance,
+ unsigned char statreg, unsigned char isreg,
+ unsigned char instreg, unsigned char cfifo,
+ unsigned char dmastatus)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+Scsi_Cmnd *cmd = (Scsi_Cmnd *)hostdata->connected;
+int ret, i, len, residual=-1;
+AM53C974_setio(instance);
+
+DEB_INFO(printk(SEPARATOR_LINE));
+switch (statreg & STATREG_PHASE) { /* scsi phase */
+ case PHASE_DATAOUT:
+ DEB_INFO(printk("Dataout phase; cmd=0x%lx, sel_cmd=0x%lx, this_residual=%d, buffers_residual=%d\n",
+ (long)hostdata->connected, (long)hostdata->sel_cmd, cmd->SCp.this_residual, cmd->SCp.buffers_residual));
+ cmd->SCp.phase = PHASE_DATAOUT;
+ goto PHASE_DATA_IO;
+
+ case PHASE_DATAIN:
+ DEB_INFO(printk("Datain phase; cmd=0x%lx, sel_cmd=0x%lx, this_residual=%d, buffers_residual=%d\n",
+ (long)hostdata->connected, (long)hostdata->sel_cmd, cmd->SCp.this_residual, cmd->SCp.buffers_residual));
+ cmd->SCp.phase = PHASE_DATAIN;
+ PHASE_DATA_IO:
+ if (hostdata->aborted) {
+ AM53C974_write_8(DMACMD, DMACMD_IDLE);
+ AM53C974_write_8(CMDREG, CMDREG_CFIFO);
+ AM53C974_write_8(CMDREG, CMDREG_SATN);
+ return; }
+ if ((!cmd->SCp.this_residual) && cmd->SCp.buffers_residual) {
+ cmd->SCp.buffer++;
+ cmd->SCp.buffers_residual--;
+ cmd->SCp.ptr = (unsigned char *)cmd->SCp.buffer->address;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length; }
+ if (cmd->SCp.this_residual) {
+ if (!(AM53C974_read_8(DMACMD) & DMACMD_START)) {
+ hostdata->dma_busy = 0;
+ AM53C974_transfer_dma(instance, statreg & STATREG_IO,
+ (unsigned long)cmd->SCp.this_residual,
+ cmd->SCp.ptr); }
+ else
+ hostdata->dma_busy = 1;
+ }
+ return;
+
+ case PHASE_MSGIN:
+ DEB_INFO(printk("Message-In phase; cmd=0x%lx, sel_cmd=0x%lx\n",
+ (long)hostdata->connected, (long)hostdata->sel_cmd));
+ AM53C974_set_async(instance, cmd->target);
+ if (cmd->SCp.phase == PHASE_DATAIN)
+ AM53C974_dma_blast(instance, dmastatus, statreg);
+ if ((cmd->SCp.phase == PHASE_DATAOUT) && (AM53C974_read_8(DMACMD) & DMACMD_START)) {
+ AM53C974_write_8(DMACMD, DMACMD_IDLE);
+ residual = cfifo + (AM53C974_read_8(CTCLREG) | (AM53C974_read_8(CTCMREG) << 8) |
+ (AM53C974_read_8(CTCHREG) << 16));
+ cmd->SCp.ptr += cmd->SCp.this_residual - residual;
+ cmd->SCp.this_residual = residual;
+ if (cfifo) { AM53C974_write_8(CMDREG, CMDREG_CFIFO); cfifo = 0; }
+ }
+ if (cmd->SCp.phase == PHASE_STATIN) {
+ while ((AM53C974_read_8(CFIREG) & CFIREG_CF) < 2) ;
+ cmd->SCp.Status = AM53C974_read_8(FFREG);
+ cmd->SCp.Message = AM53C974_read_8(FFREG);
+ DEB_INFO(printk("Message-In phase; status=0x%02x, message=0x%02x\n",
+ cmd->SCp.Status, cmd->SCp.Message));
+ ret = AM53C974_message(instance, cmd, cmd->SCp.Message); }
+ else {
+ if (!cfifo) {
+ AM53C974_write_8(CMDREG, CMDREG_IT);
+ AM53C974_poll_int();
+ cmd->SCp.Message = AM53C974_read_8(FFREG);
+ }
+ ret = AM53C974_message(instance, cmd, cmd->SCp.Message);
+ }
+ cmd->SCp.phase = PHASE_MSGIN;
+ AM53C974_set_sync(instance, cmd->target);
+ break;
+ case PHASE_MSGOUT:
+ DEB_INFO(printk("Message-Out phase; cfifo=%d; msgout[0]=0x%02x\n",
+ AM53C974_read_8(CFIREG) & CFIREG_CF, hostdata->msgout[0]));
+ AM53C974_write_8(DMACMD, DMACMD_IDLE);
+ AM53C974_set_async(instance, cmd->target);
+ for (i = 0; i < sizeof(hostdata->last_message); i++)
+ hostdata->last_message[i] = hostdata->msgout[i];
+ if ((hostdata->msgout[0] == 0) || INSIDE(hostdata->msgout[0], 0x02, 0x1F) ||
+ INSIDE(hostdata->msgout[0], 0x80, 0xFF))
+ len = 1;
+ else {
+ if (hostdata->msgout[0] == EXTENDED_MESSAGE) {
+#ifdef AM53C974_DEBUG_INFO
+ printk("Extended message dump:\n");
+ for (i = 0; i < hostdata->msgout[1] + 2; i++) {
+ printk("%02x ", hostdata->msgout[i]);
+ if (i && !(i % 16)) printk("\n"); }
+ printk("\n");
+#endif
+ len = hostdata->msgout[1] + 2; }
+ else
+ len = 2;
+ }
+ for (i = 0; i < len; i++) AM53C974_write_8(FFREG, hostdata->msgout[i]);
+ AM53C974_write_8(CMDREG, CMDREG_IT);
+ cmd->SCp.phase = PHASE_MSGOUT;
+ hostdata->msgout[0] = NOP;
+ AM53C974_set_sync(instance, cmd->target);
+ break;
+
+ case PHASE_CMDOUT:
+ DEB_INFO(printk("Command-Out phase\n"));
+ AM53C974_set_async(instance, cmd->target);
+ for (i = 0; i < cmd->cmd_len; i++) AM53C974_write_8(FFREG, cmd->cmnd[i]);
+ AM53C974_write_8(CMDREG, CMDREG_IT);
+ cmd->SCp.phase = PHASE_CMDOUT;
+ AM53C974_set_sync(instance, cmd->target);
+ break;
+
+ case PHASE_STATIN:
+ DEB_INFO(printk("Status phase\n"));
+ if (cmd->SCp.phase == PHASE_DATAIN)
+ AM53C974_dma_blast(instance, dmastatus, statreg);
+ AM53C974_set_async(instance, cmd->target);
+ if (cmd->SCp.phase == PHASE_DATAOUT) {
+ unsigned long residual;
+
+ if (AM53C974_read_8(DMACMD) & DMACMD_START) {
+ AM53C974_write_8(DMACMD, DMACMD_IDLE);
+ residual = cfifo + (AM53C974_read_8(CTCLREG) | (AM53C974_read_8(CTCMREG) << 8) |
+ (AM53C974_read_8(CTCHREG) << 16));
+ cmd->SCp.ptr += cmd->SCp.this_residual - residual;
+ cmd->SCp.this_residual = residual; }
+ if (cfifo) { AM53C974_write_8(CMDREG, CMDREG_CFIFO); cfifo = 0; }
+ }
+ cmd->SCp.phase = PHASE_STATIN;
+ AM53C974_write_8(CMDREG, CMDREG_ICCS); /* command complete */
+ break;
+
+ case PHASE_RES_0:
+ case PHASE_RES_1:
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ DEB_INFO(printk("Reserved phase\n"));
+ break;
+ }
+KEYWAIT();
+}
+
+/******************************************************************************
+* Function : int AM53C974_message(struct Scsi_Host *instance, Scsi_Cmnd *cmd,
+* unsigned char msg)
+*
+* Purpose : handle SCSI messages
+*
+* Inputs : instance -- which AM53C974
+* cmd -- SCSI command the message belongs to
+* msg -- message id byte
+*
+* Returns : 1 on success, 0 on failure.
+**************************************************************************/
+static int AM53C974_message(struct Scsi_Host *instance, Scsi_Cmnd *cmd,
+ unsigned char msg)
+{
+AM53C974_local_declare();
+static unsigned char extended_msg[10];
+unsigned char statreg;
+int len, ret = 0;
+unsigned char *p;
+#ifdef AM53C974_DEBUG_MSG
+int j;
+#endif
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+AM53C974_setio(instance);
+
+DEB_MSG(printk(SEPARATOR_LINE));
+
+/* Linking lets us reduce the time required to get the
+ * next command out to the device, hopefully this will
+ * mean we don't waste another revolution due to the delays
+ * required by ARBITRATION and another SELECTION.
+ * In the current implementation proposal, low level drivers
+ * merely have to start the next command, pointed to by
+ * next_link, done() is called as with unlinked commands. */
+switch (msg) {
+#ifdef LINKED
+ case LINKED_CMD_COMPLETE:
+ case LINKED_FLG_CMD_COMPLETE:
+ /* Accept message by releasing ACK */
+ DEB_LINKED(printk("scsi%d : target %d lun %d linked command complete.\n",
+ instance->host_no, cmd->target, cmd->lun));
+ /* Sanity check : A linked command should only terminate with
+ * one of these messages if there are more linked commands available. */
+ if (!cmd->next_link) {
+ printk("scsi%d : target %d lun %d linked command complete, no next_link\n"
+ instance->host_no, cmd->target, cmd->lun);
+ hostdata->aborted = 1;
+ AM53C974_write_8(CMDREG, CMDREG_SATN);
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ break; }
+ if (hostdata->aborted) {
+ DEB_ABORT(printk("ATN set for cmnd %d upon reception of LINKED_CMD_COMPLETE or"
+ "LINKED_FLG_CMD_COMPLETE message\n", cmd->cmnd[0]));
+ AM53C974_write_8(CMDREG, CMDREG_SATN); }
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+
+ initialize_SCp(cmd->next_link);
+ /* The next command is still part of this process */
+ cmd->next_link->tag = cmd->tag;
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ DEB_LINKED(printk("scsi%d : target %d lun %d linked request done, calling scsi_done().\n",
+ instance->host_no, cmd->target, cmd->lun));
+ cmd->scsi_done(cmd);
+ cmd = hostdata->connected;
+ break;
+
+#endif /* def LINKED */
+
+ case ABORT:
+ case COMMAND_COMPLETE:
+ DEB_MSG(printk("scsi%d: command complete message received; cmd %d for target %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ hostdata->disconnecting = 1;
+ cmd->device->disconnect = 0;
+
+ /* I'm not sure what the correct thing to do here is :
+ *
+ * If the command that just executed is NOT a request
+ * sense, the obvious thing to do is to set the result
+ * code to the values of the stored parameters.
+ * If it was a REQUEST SENSE command, we need some way
+ * to differentiate between the failure code of the original
+ * and the failure code of the REQUEST sense - the obvious
+ * case is success, where we fall through and leave the result
+ * code unchanged.
+ *
+ * The non-obvious place is where the REQUEST SENSE failed */
+ if (cmd->cmnd[0] != REQUEST_SENSE)
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ else if (cmd->SCp.Status != GOOD)
+ cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+ if (hostdata->aborted) {
+ AM53C974_write_8(CMDREG, CMDREG_SATN);
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ DEB_ABORT(printk("ATN set for cmnd %d upon reception of ABORT or"
+ "COMMAND_COMPLETE message\n", cmd->cmnd[0]));
+ break; }
+ if ((cmd->cmnd[0] != REQUEST_SENSE) && (cmd->SCp.Status == CHECK_CONDITION)) {
+ DEB_MSG(printk("scsi%d : performing request sense\n", instance->host_no));
+ cmd->cmnd[0] = REQUEST_SENSE;
+ cmd->cmnd[1] &= 0xe0;
+ cmd->cmnd[2] = 0;
+ cmd->cmnd[3] = 0;
+ cmd->cmnd[4] = sizeof(cmd->sense_buffer);
+ cmd->cmnd[5] = 0;
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = (char *)cmd->sense_buffer;
+ cmd->SCp.this_residual = sizeof(cmd->sense_buffer);
+ LIST(cmd,hostdata->issue_queue);
+ cmd->host_scribble = (unsigned char *)hostdata->issue_queue;
+ hostdata->issue_queue = (Scsi_Cmnd *)cmd;
+ DEB_MSG(printk("scsi%d : REQUEST SENSE added to head of issue queue\n",instance->host_no));
+ }
+
+ /* Accept message by clearing ACK */
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ break;
+
+ case MESSAGE_REJECT:
+ DEB_MSG(printk("scsi%d: reject message received; cmd %d for target %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ switch (hostdata->last_message[0]) {
+ case EXTENDED_MESSAGE:
+ if (hostdata->last_message[2] == EXTENDED_SDTR) {
+ /* sync. negotiation was rejected, setup asynchronous transfer with target */
+ printk("\ntarget %d: rate=%d Mhz, asynchronous (sync. negotiation rejected)\n",
+ cmd->target, DEF_CLK / DEF_STP);
+ hostdata->sync_off[cmd->target] = 0;
+ hostdata->sync_per[cmd->target] = DEF_STP; }
+ break;
+ case HEAD_OF_QUEUE_TAG:
+ case ORDERED_QUEUE_TAG:
+ case SIMPLE_QUEUE_TAG:
+ cmd->device->tagged_queue = 0;
+ hostdata->busy[cmd->target] |= (1 << cmd->lun);
+ break;
+ default:
+ break;
+ }
+ if (hostdata->aborted) AM53C974_write_8(CMDREG, CMDREG_SATN);
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ break;
+
+ case DISCONNECT:
+ DEB_MSG(printk("scsi%d: disconnect message received; cmd %d for target %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ cmd->device->disconnect = 1;
+ hostdata->disconnecting = 1;
+ AM53C974_write_8(CMDREG, CMDREG_MA); /* Accept message by clearing ACK */
+ break;
+
+ case SAVE_POINTERS:
+ case RESTORE_POINTERS:
+ DEB_MSG(printk("scsi%d: save/restore pointers message received; cmd %d for target %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ /* The SCSI data pointer is *IMPLICITLY* saved on a disconnect
+ * operation, in violation of the SCSI spec so we can safely
+ * ignore SAVE/RESTORE pointers calls.
+ *
+ * Unfortunately, some disks violate the SCSI spec and
+ * don't issue the required SAVE_POINTERS message before
+ * disconnecting, and we have to break spec to remain
+ * compatible. */
+ if (hostdata->aborted) {
+ DEB_ABORT(printk("ATN set for cmnd %d upon reception of SAVE/REST. POINTERS message\n",
+ cmd->cmnd[0]));
+ AM53C974_write_8(CMDREG, CMDREG_SATN); }
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ break;
+
+ case EXTENDED_MESSAGE:
+ DEB_MSG(printk("scsi%d: extended message received; cmd %d for target %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ /* Extended messages are sent in the following format :
+ * Byte
+ * 0 EXTENDED_MESSAGE == 1
+ * 1 length (includes one byte for code, doesn't include first two bytes)
+ * 2 code
+ * 3..length+1 arguments
+ */
+ /* BEWARE!! THIS CODE IS EXTREMELY UGLY */
+ extended_msg[0] = EXTENDED_MESSAGE;
+ AM53C974_read_8(INSTREG) ; /* clear int */
+ AM53C974_write_8(CMDREG, CMDREG_MA); /* ack. msg byte, then wait for SO */
+ AM53C974_poll_int();
+ /* get length */
+ AM53C974_write_8(CMDREG, CMDREG_IT);
+ AM53C974_poll_int();
+ AM53C974_write_8(CMDREG, CMDREG_MA); /* ack. msg byte, then wait for SO */
+ AM53C974_poll_int();
+ extended_msg[1] = len = AM53C974_read_8(FFREG); /* get length */
+ p = extended_msg+2;
+ /* read the remaining (len) bytes */
+ while (len) {
+ AM53C974_write_8(CMDREG, CMDREG_IT);
+ AM53C974_poll_int();
+ if (len > 1) {
+ AM53C974_write_8(CMDREG, CMDREG_MA); /* ack. msg byte, then wait for SO */
+ AM53C974_poll_int(); }
+ *p = AM53C974_read_8(FFREG);
+ p++; len--; }
+
+#ifdef AM53C974_DEBUG_MSG
+ printk("scsi%d: received extended message: ", instance->host_no);
+ for (j = 0; j < extended_msg[1] + 2; j++) {
+ printk("0x%02x ", extended_msg[j]);
+ if (j && !(j % 16)) printk("\n"); }
+ printk("\n");
+#endif
+
+ /* check message */
+ if (extended_msg[2] == EXTENDED_SDTR)
+ ret = AM53C974_sync_neg(instance, cmd->target, extended_msg);
+ if (ret || hostdata->aborted) AM53C974_write_8(CMDREG, CMDREG_SATN);
+
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ break;
+
+ default:
+ printk("scsi%d: unknown message 0x%02x received\n",instance->host_no, msg);
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ /* reject message */
+ hostdata->msgout[0] = MESSAGE_REJECT;
+ AM53C974_write_8(CMDREG, CMDREG_SATN);
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ return(0);
+ break;
+
+ } /* switch (msg) */
+KEYWAIT();
+return(1);
+}
+
+/**************************************************************************
+* Function : AM53C974_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
+*
+* Purpose : try to establish nexus for the command;
+* start sync negotiation via start stop and transfer the command in
+* cmdout phase in case of an inquiry or req. sense command with no
+* sync. neg. performed yet
+*
+* Inputs : instance -- which AM53C974
+* cmd -- command which requires the selection
+* tag -- tagged queueing
+*
+* Returns : nothing
+*
+* Note: this function initializes the selection process, which is continued
+* in the interrupt handler
+**************************************************************************/
+static void AM53C974_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+unsigned char cfifo, tmp[3];
+unsigned int i, len, cmd_size = COMMAND_SIZE(cmd->cmnd[0]);
+AM53C974_setio(instance);
+
+cfifo = AM53C974_cfifo();
+if (cfifo) {
+ printk("scsi%d: select error; %d residual bytes in FIFO\n", instance->host_no, cfifo);
+ AM53C974_write_8(CMDREG, CMDREG_CFIFO); /* clear FIFO */
+ }
+
+tmp[0] = IDENTIFY(1, cmd->lun);
+
+#ifdef SCSI2
+if (cmd->device->tagged_queue && (tag != TAG_NONE)) {
+ tmp[1] = SIMPLE_QUEUE_TAG;
+ if (tag == TAG_NEXT) {
+ /* 0 is TAG_NONE, used to imply no tag for this command */
+ if (cmd->device->current_tag == 0) cmd->device->current_tag = 1;
+ cmd->tag = cmd->device->current_tag;
+ cmd->device->current_tag++; }
+ else
+ cmd->tag = (unsigned char)tag;
+ tmp[2] = cmd->tag;
+ hostdata->last_message[0] = SIMPLE_QUEUE_TAG;
+ len = 3;
+ AM53C974_write_8(FFREG, tmp[0]);
+ AM53C974_write_8(FFREG, tmp[1]);
+ AM53C974_write_8(FFREG, tmp[2]);
+ }
+ else
+#endif /* def SCSI2 */
+ {
+ len = 1;
+ AM53C974_write_8(FFREG, tmp[0]);
+ cmd->tag = 0; }
+
+/* in case of an inquiry or req. sense command with no sync. neg performed yet, we start
+ sync negotiation via start stops and transfer the command in cmdout phase */
+if (((cmd->cmnd[0] == INQUIRY) || (cmd->cmnd[0] == REQUEST_SENSE)) &&
+ !(hostdata->sync_neg[cmd->target]) && hostdata->sync_en[cmd->target]) {
+ hostdata->sync_neg[cmd->target] = 1;
+ hostdata->msgout[0] = EXTENDED_MESSAGE;
+ hostdata->msgout[1] = 3;
+ hostdata->msgout[2] = EXTENDED_SDTR;
+ hostdata->msgout[3] = 250 / (int)hostdata->max_rate[cmd->target];
+ hostdata->msgout[4] = hostdata->max_offset[cmd->target];
+ len += 5; }
+
+AM53C974_write_8(SDIDREG, SDIREG_MASK & cmd->target); /* setup dest. id */
+AM53C974_write_8(STIMREG, DEF_SCSI_TIMEOUT); /* setup timeout reg */
+switch (len) {
+ case 1:
+ for (i = 0; i < cmd_size; i++) AM53C974_write_8(FFREG, cmd->cmnd[i]);
+ AM53C974_write_8(CMDREG, CMDREG_SAS); /* select with ATN, 1 msg byte */
+ hostdata->msgout[0] = NOP;
+ break;
+ case 3:
+ for (i = 0; i < cmd_size; i++) AM53C974_write_8(FFREG, cmd->cmnd[i]);
+ AM53C974_write_8(CMDREG, CMDREG_SA3S); /* select with ATN, 3 msg bytes */
+ hostdata->msgout[0] = NOP;
+ break;
+ default:
+ AM53C974_write_8(CMDREG, CMDREG_SASS); /* select with ATN, stop steps; continue in message out phase */
+ break;
+ }
+}
+
+/**************************************************************************
+* Function : AM53C974_intr_select(struct Scsi_Host *instance, unsigned char statreg)
+*
+* Purpose : handle reselection
+*
+* Inputs : instance -- which AM53C974
+* statreg -- status register
+*
+* Returns : nothing
+*
+* side effects: manipulates hostdata
+**************************************************************************/
+static void AM53C974_intr_reselect(struct Scsi_Host *instance, unsigned char statreg)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+unsigned char cfifo, msg[3], lun, t, target = 0;
+#ifdef SCSI2
+ unsigned char tag;
+#endif
+Scsi_Cmnd *tmp = NULL, *prev;
+AM53C974_setio(instance);
+
+cfifo = AM53C974_cfifo();
+
+if (hostdata->selecting) {
+ /* caught reselect interrupt in selection process;
+ put selecting command back into the issue queue and continue with the
+ reselecting command */
+ DEB_RESEL(printk("AM53C974_intr_reselect: in selection process\n"));
+ LIST(hostdata->sel_cmd, hostdata->issue_queue);
+ hostdata->sel_cmd->host_scribble = (unsigned char *)hostdata->issue_queue;
+ hostdata->issue_queue = hostdata->sel_cmd;
+ hostdata->sel_cmd = NULL;
+ hostdata->selecting = 0; }
+
+/* 2 bytes must be in the FIFO now */
+if (cfifo != 2) {
+ printk("scsi %d: error: %d bytes in fifo, 2 expected\n", instance->host_no, cfifo);
+ hostdata->aborted = 1;
+ goto EXIT_ABORT; }
+
+/* determine target which reselected */
+t = AM53C974_read_8(FFREG);
+if (!(t & (1 << instance->this_id))) {
+ printk("scsi %d: error: invalid host id\n", instance->host_no);
+ hostdata->aborted = 1;
+ goto EXIT_ABORT; }
+t ^= (1 << instance->this_id);
+target = 0; while (t != 1) { t >>= 1; target++; }
+DEB_RESEL(printk("scsi %d: reselect; target: %d\n", instance->host_no, target));
+
+if (hostdata->aborted) goto EXIT_ABORT;
+
+if ((statreg & STATREG_PHASE) != PHASE_MSGIN) {
+ printk("scsi %d: error: upon reselection interrupt not in MSGIN\n", instance->host_no);
+ hostdata->aborted = 1;
+ goto EXIT_ABORT; }
+
+msg[0] = AM53C974_read_8(FFREG);
+if (!msg[0] & 0x80) {
+ printk("scsi%d: error: expecting IDENTIFY message, got ", instance->host_no);
+ print_msg(msg);
+ hostdata->aborted = 1;
+ goto EXIT_ABORT; }
+
+lun = (msg[0] & 0x07);
+
+/* We need to add code for SCSI-II to track which devices have
+ * I_T_L_Q nexuses established, and which have simple I_T_L
+ * nexuses so we can chose to do additional data transfer. */
+#ifdef SCSI2
+#error "SCSI-II tagged queueing is not supported yet"
+#endif
+
+/* Find the command corresponding to the I_T_L or I_T_L_Q nexus we
+ * just reestablished, and remove it from the disconnected queue. */
+for (tmp = (Scsi_Cmnd *)hostdata->disconnected_queue, prev = NULL;
+ tmp; prev = tmp, tmp = (Scsi_Cmnd *)tmp->host_scribble)
+ if ((target == tmp->target) && (lun == tmp->lun)
+#ifdef SCSI2
+ && (tag == tmp->tag)
+#endif
+ ) {
+ if (prev) {
+ REMOVE(prev, (Scsi_Cmnd *)(prev->host_scribble), tmp,
+ (Scsi_Cmnd *)(tmp->host_scribble));
+ prev->host_scribble = tmp->host_scribble; }
+ else {
+ REMOVE(-1, hostdata->disconnected_queue, tmp, tmp->host_scribble);
+ hostdata->disconnected_queue = (Scsi_Cmnd *)tmp->host_scribble; }
+ tmp->host_scribble = NULL;
+ hostdata->connected = tmp;
+ break; }
+
+if (!tmp) {
+#ifdef SCSI2
+ printk("scsi%d: warning : target %d lun %d tag %d not in disconnect_queue.\n",
+ instance->host_no, target, lun, tag);
+#else
+ printk("scsi%d: warning : target %d lun %d not in disconnect_queue.\n",
+ instance->host_no, target, lun);
+#endif
+ /* Since we have an established nexus that we can't do anything with, we must abort it. */
+ hostdata->aborted = 1;
+ DEB(AM53C974_keywait());
+ goto EXIT_ABORT; }
+ else
+ goto EXIT_OK;
+
+EXIT_ABORT:
+AM53C974_write_8(CMDREG, CMDREG_SATN);
+AM53C974_write_8(CMDREG, CMDREG_MA);
+return;
+
+EXIT_OK:
+DEB_RESEL(printk("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
+ instance->host_no, target, tmp->lun, tmp->tag));
+AM53C974_set_sync(instance, target);
+AM53C974_write_8(SDIDREG, SDIREG_MASK & target); /* setup dest. id */
+AM53C974_write_8(CMDREG, CMDREG_MA);
+hostdata->dma_busy = 0;
+hostdata->connected->SCp.phase = PHASE_CMDOUT;
+}
+
+/**************************************************************************
+* Function : AM53C974_transfer_dma(struct Scsi_Host *instance, short dir,
+* unsigned long length, char *data)
+*
+* Purpose : setup DMA transfer
+*
+* Inputs : instance -- which AM53C974
+* dir -- direction flag, 0: write to device, read from memory;
+* 1: read from device, write to memory
+* length -- number of bytes to transfer to from buffer
+* data -- pointer to data buffer
+*
+* Returns : nothing
+**************************************************************************/
+static __inline__ void AM53C974_transfer_dma(struct Scsi_Host *instance, short dir,
+ unsigned long length, char *data)
+{
+AM53C974_local_declare();
+AM53C974_setio(instance);
+
+AM53C974_write_8(CMDREG, CMDREG_NOP);
+AM53C974_write_8(DMACMD, (dir << 7) | DMACMD_INTE_D); /* idle command */
+AM53C974_write_8(STCLREG, (unsigned char)(length & 0xff));
+AM53C974_write_8(STCMREG, (unsigned char)((length & 0xff00) >> 8));
+AM53C974_write_8(STCHREG, (unsigned char)((length & 0xff0000) >> 16));
+AM53C974_write_32(DMASTC, length & 0xffffff);
+AM53C974_write_32(DMASPA, (unsigned long)data);
+AM53C974_write_8(CMDREG, CMDREG_IT | CMDREG_DMA);
+AM53C974_write_8(DMACMD, (dir << 7) | DMACMD_INTE_D | DMACMD_START);
+}
+
+/**************************************************************************
+* Function : AM53C974_dma_blast(struct Scsi_Host *instance, unsigned char dmastatus,
+* unsigned char statreg)
+*
+* Purpose : cleanup DMA transfer
+*
+* Inputs : instance -- which AM53C974
+* dmastatus -- dma status register
+* statreg -- status register
+*
+* Returns : nothing
+**************************************************************************/
+static void AM53C974_dma_blast(struct Scsi_Host *instance, unsigned char dmastatus,
+ unsigned char statreg)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+unsigned long ctcreg;
+int dir = statreg & STATREG_IO;
+int cfifo, pio, i = 0;
+AM53C974_setio(instance);
+
+do {
+ cfifo = AM53C974_cfifo();
+ i++;
+ } while (cfifo && (i < 50000));
+pio = (i == 50000) ? 1: 0;
+
+if (statreg & STATREG_CTZ) { AM53C974_write_8(DMACMD, DMACMD_IDLE); return; }
+
+if (dmastatus & DMASTATUS_DONE) { AM53C974_write_8(DMACMD, DMACMD_IDLE); return; }
+
+AM53C974_write_8(DMACMD, ((dir << 7) & DMACMD_DIR) | DMACMD_BLAST);
+while(!(AM53C974_read_8(DMASTATUS) & DMASTATUS_BCMPLT)) ;
+AM53C974_write_8(DMACMD, DMACMD_IDLE);
+
+if (pio) {
+ /* transfer residual bytes via PIO */
+ unsigned char *wac = (unsigned char *)AM53C974_read_32(DMAWAC);
+ printk("pio mode, residual=%d\n", AM53C974_read_8(CFIREG) & CFIREG_CF);
+ while (AM53C974_read_8(CFIREG) & CFIREG_CF) *(wac++) = AM53C974_read_8(FFREG);
+ }
+
+ctcreg = AM53C974_read_8(CTCLREG) | (AM53C974_read_8(CTCMREG) << 8) |
+ (AM53C974_read_8(CTCHREG) << 16);
+
+hostdata->connected->SCp.ptr += hostdata->connected->SCp.this_residual - ctcreg;
+hostdata->connected->SCp.this_residual = ctcreg;
+}
+
+/**************************************************************************
+* Function : AM53C974_intr_bus_reset(struct Scsi_Host *instance)
+*
+* Purpose : handle bus reset interrupt
+*
+* Inputs : instance -- which AM53C974
+*
+* Returns : nothing
+**************************************************************************/
+static void AM53C974_intr_bus_reset(struct Scsi_Host *instance)
+{
+AM53C974_local_declare();
+unsigned char cntlreg1;
+AM53C974_setio(instance);
+
+AM53C974_write_8(CMDREG, CMDREG_CFIFO);
+AM53C974_write_8(CMDREG, CMDREG_NOP);
+
+cntlreg1 = AM53C974_read_8(CNTLREG1);
+AM53C974_write_8(CNTLREG1, cntlreg1 | CNTLREG1_DISR);
+}
+
+/**************************************************************************
+* Function : int AM53C974_abort(Scsi_Cmnd *cmd)
+*
+* Purpose : abort a command
+*
+* Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the
+* host byte of the result field to, if zero DID_ABORTED is
+* used.
+*
+* Returns : 0 - success, -1 on failure.
+ **************************************************************************/
+int AM53C974_abort(Scsi_Cmnd *cmd)
+{
+AM53C974_local_declare();
+struct Scsi_Host *instance = cmd->host;
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+Scsi_Cmnd *tmp, **prev;
+
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+cli();
+AM53C974_setio(instance);
+
+DEB_ABORT(printk(SEPARATOR_LINE));
+DEB_ABORT(printk("scsi%d : AM53C974_abort called -- trouble starts!!\n", instance->host_no));
+DEB_ABORT(AM53C974_print(instance));
+DEB_ABORT(AM53C974_keywait());
+
+/* Case 1 : If the command is the currently executing command,
+ we'll set the aborted flag and return control so that the
+ information transfer routine can exit cleanly. */
+if ((hostdata->connected == cmd) || (hostdata->sel_cmd == cmd)) {
+ DEB_ABORT(printk("scsi%d: aborting connected command\n", instance->host_no));
+ hostdata->aborted = 1;
+ hostdata->msgout[0] = ABORT;
+ sti();
+ return(SCSI_ABORT_PENDING); }
+
+/* Case 2 : If the command hasn't been issued yet,
+ we simply remove it from the issue queue. */
+for (prev = (Scsi_Cmnd **)&(hostdata->issue_queue),
+ tmp = (Scsi_Cmnd *)hostdata->issue_queue; tmp;
+ prev = (Scsi_Cmnd **)&(tmp->host_scribble),
+ tmp = (Scsi_Cmnd *)tmp->host_scribble) {
+ if (cmd == tmp) {
+ DEB_ABORT(printk("scsi%d : abort removed command from issue queue.\n", instance->host_no));
+ REMOVE(5, *prev, tmp, tmp->host_scribble);
+ (*prev) = (Scsi_Cmnd *)tmp->host_scribble;
+ tmp->host_scribble = NULL;
+ tmp->result = DID_ABORT << 16;
+ sti();
+ tmp->done(tmp);
+ return(SCSI_ABORT_SUCCESS); }
+#ifdef AM53C974_DEBUG_ABORT
+ else {
+ if (prev == (Scsi_Cmnd **)tmp)
+ printk("scsi%d : LOOP\n", instance->host_no);
+ }
+#endif
+ }
+
+/* Case 3 : If any commands are connected, we're going to fail the abort
+ * and let the high level SCSI driver retry at a later time or
+ * issue a reset.
+ *
+ * Timeouts, and therefore aborted commands, will be highly unlikely
+ * and handling them cleanly in this situation would make the common
+ * case of noresets less efficient, and would pollute our code. So,
+ * we fail. */
+if (hostdata->connected || hostdata->sel_cmd) {
+ DEB_ABORT(printk("scsi%d : abort failed, other command connected.\n", instance->host_no));
+ sti();
+ return(SCSI_ABORT_NOT_RUNNING); }
+
+/* Case 4: If the command is currently disconnected from the bus, and
+ * there are no connected commands, we reconnect the I_T_L or
+ * I_T_L_Q nexus associated with it, go into message out, and send
+ * an abort message. */
+for (tmp = (Scsi_Cmnd *)hostdata->disconnected_queue; tmp;
+ tmp = (Scsi_Cmnd *)tmp->host_scribble) {
+ if (cmd == tmp) {
+ DEB_ABORT(printk("scsi%d: aborting disconnected command\n", instance->host_no));
+ hostdata->aborted = 1;
+ hostdata->msgout[0] = ABORT;
+ hostdata->selecting = 1;
+ hostdata->sel_cmd = tmp;
+ AM53C974_write_8(CMDREG, CMDREG_DSR);
+ sti();
+ return(SCSI_ABORT_PENDING); }
+ }
+
+/* Case 5 : If we reached this point, the command was not found in any of
+ * the queues.
+ *
+ * We probably reached this point because of an unlikely race condition
+ * between the command completing successfully and the abortion code,
+ * so we won't panic, but we will notify the user in case something really
+ * broke. */
+DEB_ABORT(printk("scsi%d : abort failed, command not found.\n", instance->host_no));
+sti();
+return(SCSI_ABORT_NOT_RUNNING);
+}
+
+/**************************************************************************
+* Function : int AM53C974_reset(Scsi_Cmnd *cmd)
+*
+* Purpose : reset the SCSI controller and bus
+*
+* Inputs : cmd -- which command within the command block was responsible for the reset
+*
+* Returns : status (SCSI_ABORT_SUCCESS)
+**************************************************************************/
+int AM53C974_reset(Scsi_Cmnd *cmd)
+{
+AM53C974_local_declare();
+int i;
+struct Scsi_Host *instance = cmd->host;
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+AM53C974_setio(instance);
+
+cli();
+DEB(printk("AM53C974_reset called; "));
+
+printk("AM53C974_reset called\n");
+AM53C974_print(instance);
+AM53C974_keywait();
+
+/* do hard reset */
+AM53C974_write_8(CMDREG, CMDREG_RDEV);
+AM53C974_write_8(CMDREG, CMDREG_NOP);
+hostdata->msgout[0] = NOP;
+for (i = 0; i < 8; i++) {
+ hostdata->busy[i] = 0;
+ hostdata->sync_per[i] = DEF_STP;
+ hostdata->sync_off[i] = 0;
+ hostdata->sync_neg[i] = 0; }
+hostdata->last_message[0] = NOP;
+hostdata->sel_cmd = NULL;
+hostdata->connected = NULL;
+hostdata->issue_queue = NULL;
+hostdata->disconnected_queue = NULL;
+hostdata->in_reset = 0;
+hostdata->aborted = 0;
+hostdata->selecting = 0;
+hostdata->disconnecting = 0;
+hostdata->dma_busy = 0;
+
+/* reset bus */
+AM53C974_write_8(CNTLREG1, CNTLREG1_DISR | instance->this_id); /* disable interrupt upon SCSI RESET */
+AM53C974_write_8(CMDREG, CMDREG_RBUS); /* reset SCSI bus */
+udelay(40);
+AM53C974_config_after_reset(instance);
+
+sti();
+cmd->result = DID_RESET << 16;
+cmd->scsi_done(cmd);
+return SCSI_ABORT_SUCCESS;
+}
diff --git a/i386/i386at/gpl/linux/scsi/AM53C974.h b/i386/i386at/gpl/linux/scsi/AM53C974.h
new file mode 100644
index 00000000..2a07a5a4
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/AM53C974.h
@@ -0,0 +1,419 @@
+/* AM53/79C974 (PCscsi) driver release 0.5
+ *
+ * The architecture and much of the code of this device
+ * driver was originally developed by Drew Eckhardt for
+ * the NCR5380. The following copyrights apply:
+ * For the architecture and all parts similar to the NCR5380:
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * The AM53C974_nobios_detect code was origininally developed by
+ * Robin Cutshaw (robin@xfree86.org) and is used here in a
+ * modified form.
+ *
+ * For the other parts:
+ * Copyright 1994, D. Frieauff
+ * EMail: fri@rsx42sun0.dofn.de
+ * Phone: x49-7545-8-2256 , x49-7541-42305
+ */
+
+/*
+ * $Log: AM53C974.h,v $
+ * Revision 1.1.1.1 1996/10/30 01:39:58 thomas
+ * Imported from UK22
+ *
+ * Revision 1.1 1996/03/25 20:25:06 goel
+ * Linux driver merge.
+ *
+ */
+
+#ifndef AM53C974_H
+#define AM53C974_H
+
+#include <linux/scsicam.h>
+
+/***************************************************************************************
+* Default setting of the controller's SCSI id. Edit and uncomment this only if your *
+* BIOS does not correctly initialize the controller's SCSI id. *
+* If you don't get a warning during boot, it is correctly initialized. *
+****************************************************************************************/
+/* #define AM53C974_SCSI_ID 7 */
+
+/***************************************************************************************
+* Default settings for sync. negotiation enable, transfer rate and sync. offset. *
+* These settings can be replaced by LILO overrides (append) with the following syntax: *
+* AM53C974=host-scsi-id, target-scsi-id, max-rate, max-offset *
+* Sync. negotiation is disabled by default and will be enabled for those targets which *
+* are specified in the LILO override *
+****************************************************************************************/
+#define DEFAULT_SYNC_NEGOTIATION_ENABLED 0 /* 0 or 1 */
+#define DEFAULT_RATE 5 /* MHz, min: 3; max: 10 */
+#define DEFAULT_SYNC_OFFSET 0 /* bytes, min: 0; max: 15; use 0 for async. mode */
+
+
+/* --------------------- don't edit below here --------------------- */
+
+#define AM53C974_DRIVER_REVISION_MAJOR 0
+#define AM53C974_DRIVER_REVISION_MINOR 5
+#define SEPARATOR_LINE \
+"--------------------------------------------------------------------------\n"
+
+/* debug control */
+/* #define AM53C974_DEBUG */
+/* #define AM53C974_DEBUG_MSG */
+/* #define AM53C974_DEBUG_KEYWAIT */
+/* #define AM53C974_DEBUG_INIT */
+/* #define AM53C974_DEBUG_QUEUE */
+/* #define AM53C974_DEBUG_INFO */
+/* #define AM53C974_DEBUG_LINKED */
+/* #define VERBOSE_AM53C974_DEBUG */
+/* #define AM53C974_DEBUG_INTR */
+/* #define AM53C974_DEB_RESEL */
+#define AM53C974_DEBUG_ABORT
+/* #define AM53C974_OPTION_DEBUG_PROBE_ONLY */
+
+/* special options/constants */
+#define DEF_CLK 40 /* chip clock freq. in MHz */
+#define MIN_PERIOD 4 /* for negotiation: min. number of clocks per cycle */
+#define MAX_PERIOD 13 /* for negotiation: max. number of clocks per cycle */
+#define MAX_OFFSET 15 /* for negotiation: max. offset (0=async) */
+
+#define DEF_SCSI_TIMEOUT 245 /* STIMREG value, 40 Mhz */
+#define DEF_STP 8 /* STPREG value assuming 5.0 MB/sec, FASTCLK, FASTSCSI */
+#define DEF_SOF_RAD 0 /* REQ/ACK deassertion delay */
+#define DEF_SOF_RAA 0 /* REQ/ACK assertion delay */
+#define DEF_ETM 0 /* CNTLREG1, ext. timing mode */
+#define DEF_PERE 1 /* CNTLREG1, parity error reporting */
+#define DEF_CLKF 0 /* CLKFREG, 0=40 Mhz */
+#define DEF_ENF 1 /* CNTLREG2, enable features */
+#define DEF_ADIDCHK 0 /* CNTLREG3, additional ID check */
+#define DEF_FASTSCSI 1 /* CNTLREG3, fast SCSI */
+#define DEF_FASTCLK 1 /* CNTLREG3, fast clocking, 5 MB/sec at 40MHz chip clk */
+#define DEF_GLITCH 1 /* CNTLREG4, glitch eater, 0=12ns, 1=35ns, 2=25ns, 3=off */
+#define DEF_PWD 0 /* CNTLREG4, reduced power feature */
+#define DEF_RAE 0 /* CNTLREG4, RAE active negation on REQ, ACK only */
+#define DEF_RADE 1 /* 1CNTLREG4, active negation on REQ, ACK and data */
+
+/*** PCI block ***/
+/* standard registers are defined in <linux/pci.h> */
+#ifndef PCI_VENDOR_ID_AMD
+#define PCI_VENDOR_ID_AMD 0x1022
+#define PCI_DEVICE_ID_AMD_SCSI 0x2020
+#endif
+#define PCI_BASE_MASK 0xFFFFFFE0
+#define PCI_COMMAND_PERREN 0x40
+#define PCI_SCRATCH_REG_0 0x40 /* 16 bits */
+#define PCI_SCRATCH_REG_1 0x42 /* 16 bits */
+#define PCI_SCRATCH_REG_2 0x44 /* 16 bits */
+#define PCI_SCRATCH_REG_3 0x46 /* 16 bits */
+#define PCI_SCRATCH_REG_4 0x48 /* 16 bits */
+#define PCI_SCRATCH_REG_5 0x4A /* 16 bits */
+#define PCI_SCRATCH_REG_6 0x4C /* 16 bits */
+#define PCI_SCRATCH_REG_7 0x4E /* 16 bits */
+
+/*** SCSI block ***/
+#define CTCLREG 0x00 /* r current transf. count, low byte */
+#define CTCMREG 0x04 /* r current transf. count, middle byte */
+#define CTCHREG 0x38 /* r current transf. count, high byte */
+#define STCLREG 0x00 /* w start transf. count, low byte */
+#define STCMREG 0x04 /* w start transf. count, middle byte */
+#define STCHREG 0x38 /* w start transf. count, high byte */
+#define FFREG 0x08 /* rw SCSI FIFO reg. */
+#define STIMREG 0x14 /* w SCSI timeout reg. */
+
+#define SDIDREG 0x10 /* w SCSI destination ID reg. */
+#define SDIREG_MASK 0x07 /* mask */
+
+#define STPREG 0x18 /* w synchronous transf. period reg. */
+#define STPREG_STP 0x1F /* synchr. transfer period */
+
+#define CLKFREG 0x24 /* w clock factor reg. */
+#define CLKFREG_MASK 0x07 /* mask */
+
+#define CMDREG 0x0C /* rw SCSI command reg. */
+#define CMDREG_DMA 0x80 /* set DMA mode (set together with opcodes below) */
+#define CMDREG_IT 0x10 /* information transfer */
+#define CMDREG_ICCS 0x11 /* initiator command complete steps */
+#define CMDREG_MA 0x12 /* message accepted */
+#define CMDREG_TPB 0x98 /* transfer pad bytes, DMA mode only */
+#define CMDREG_SATN 0x1A /* set ATN */
+#define CMDREG_RATN 0x1B /* reset ATN */
+#define CMDREG_SOAS 0x41 /* select without ATN steps */
+#define CMDREG_SAS 0x42 /* select with ATN steps (1 msg byte) */
+#define CMDREG_SASS 0x43 /* select with ATN and stop steps */
+#define CMDREG_ESR 0x44 /* enable selection/reselection */
+#define CMDREG_DSR 0x45 /* disable selection/reselection */
+#define CMDREG_SA3S 0x46 /* select with ATN 3 steps (3 msg bytes) */
+#define CMDREG_NOP 0x00 /* no operation */
+#define CMDREG_CFIFO 0x01 /* clear FIFO */
+#define CMDREG_RDEV 0x02 /* reset device */
+#define CMDREG_RBUS 0x03 /* reset SCSI bus */
+
+#define STATREG 0x10 /* r SCSI status reg. */
+#define STATREG_INT 0x80 /* SCSI interrupt condition detected */
+#define STATREG_IOE 0x40 /* SCSI illegal operation error detected */
+#define STATREG_PE 0x20 /* SCSI parity error detected */
+#define STATREG_CTZ 0x10 /* CTC reg decremented to zero */
+#define STATREG_MSG 0x04 /* SCSI MSG phase (latched?) */
+#define STATREG_CD 0x02 /* SCSI C/D phase (latched?) */
+#define STATREG_IO 0x01 /* SCSI I/O phase (latched?) */
+#define STATREG_PHASE 0x07 /* SCSI phase mask */
+
+#define INSTREG 0x14 /* r interrupt status reg. */
+#define INSTREG_SRST 0x80 /* SCSI reset detected */
+#define INSTREG_ICMD 0x40 /* SCSI invalid command detected */
+#define INSTREG_DIS 0x20 /* target disconnected or sel/resel timeout*/
+#define INSTREG_SR 0x10 /* device on bus has service request */
+#define INSTREG_SO 0x08 /* successful operation */
+#define INSTREG_RESEL 0x04 /* device reselected as initiator */
+
+#define ISREG 0x18 /* r internal state reg. */
+#define ISREG_SOF 0x08 /* synchronous offset flag (act. low) */
+#define ISREG_IS 0x07 /* status of intermediate op. */
+#define ISREG_OK_NO_STOP 0x04 /* selection successful */
+#define ISREG_OK_STOP 0x01 /* selection successful */
+
+#define CFIREG 0x1C /* r current FIFO/internal state reg. */
+#define CFIREG_IS 0xE0 /* status of intermediate op. */
+#define CFIREG_CF 0x1F /* number of bytes in SCSI FIFO */
+
+#define SOFREG 0x1C /* w synchr. offset reg. */
+#define SOFREG_RAD 0xC0 /* REQ/ACK deassertion delay (sync.) */
+#define SOFREG_RAA 0x30 /* REQ/ACK assertion delay (sync.) */
+#define SOFREG_SO 0x0F /* synch. offset (sync.) */
+
+#define CNTLREG1 0x20 /* rw control register one */
+#define CNTLREG1_ETM 0x80 /* set extended timing mode */
+#define CNTLREG1_DISR 0x40 /* disable interrupt on SCSI reset */
+#define CNTLREG1_PERE 0x10 /* enable parity error reporting */
+#define CNTLREG1_SID 0x07 /* host adapter SCSI ID */
+
+#define CNTLREG2 0x2C /* rw control register two */
+#define CNTLREG2_ENF 0x40 /* enable features */
+
+#define CNTLREG3 0x30 /* rw control register three */
+#define CNTLREG3_ADIDCHK 0x80 /* additional ID check */
+#define CNTLREG3_FASTSCSI 0x10 /* fast SCSI */
+#define CNTLREG3_FASTCLK 0x08 /* fast SCSI clocking */
+
+#define CNTLREG4 0x34 /* rw control register four */
+#define CNTLREG4_GLITCH 0xC0 /* glitch eater */
+#define CNTLREG4_PWD 0x20 /* reduced power feature */
+#define CNTLREG4_RAE 0x08 /* write only, active negot. ctrl. */
+#define CNTLREG4_RADE 0x04 /* active negot. ctrl. */
+#define CNTLREG4_RES 0x10 /* reserved bit, must be 1 */
+
+/*** DMA block ***/
+#define DMACMD 0x40 /* rw command */
+#define DMACMD_DIR 0x80 /* transfer direction (1=read from device) */
+#define DMACMD_INTE_D 0x40 /* DMA transfer interrupt enable */
+#define DMACMD_INTE_P 0x20 /* page transfer interrupt enable */
+#define DMACMD_MDL 0x10 /* map to memory descriptor list */
+#define DMACMD_DIAG 0x04 /* diagnostics, set to 0 */
+#define DMACMD_IDLE 0x00 /* idle cmd */
+#define DMACMD_BLAST 0x01 /* flush FIFO to memory */
+#define DMACMD_ABORT 0x02 /* terminate DMA */
+#define DMACMD_START 0x03 /* start DMA */
+
+#define DMASTATUS 0x54 /* r status register */
+#define DMASTATUS_BCMPLT 0x20 /* BLAST complete */
+#define DMASTATUS_SCSIINT 0x10 /* SCSI interrupt pending */
+#define DMASTATUS_DONE 0x08 /* DMA transfer terminated */
+#define DMASTATUS_ABORT 0x04 /* DMA transfer aborted */
+#define DMASTATUS_ERROR 0x02 /* DMA transfer error */
+#define DMASTATUS_PWDN 0x02 /* power down indicator */
+
+#define DMASTC 0x44 /* rw starting transfer count */
+#define DMASPA 0x48 /* rw starting physical address */
+#define DMAWBC 0x4C /* r working byte counter */
+#define DMAWAC 0x50 /* r working address counter */
+#define DMASMDLA 0x58 /* rw starting MDL address */
+#define DMAWMAC 0x5C /* r working MDL counter */
+
+/*** SCSI phases ***/
+#define PHASE_MSGIN 0x07
+#define PHASE_MSGOUT 0x06
+#define PHASE_RES_1 0x05
+#define PHASE_RES_0 0x04
+#define PHASE_STATIN 0x03
+#define PHASE_CMDOUT 0x02
+#define PHASE_DATAIN 0x01
+#define PHASE_DATAOUT 0x00
+
+struct AM53C974_hostdata {
+ volatile unsigned in_reset:1; /* flag, says bus reset pending */
+ volatile unsigned aborted:1; /* flag, says aborted */
+ volatile unsigned selecting:1; /* selection started, but not yet finished */
+ volatile unsigned disconnecting: 1; /* disconnection started, but not yet finished */
+ volatile unsigned dma_busy:1; /* dma busy when service request for info transfer received */
+ volatile unsigned char msgout[10]; /* message to output in MSGOUT_PHASE */
+ volatile unsigned char last_message[10]; /* last message OUT */
+ volatile Scsi_Cmnd *issue_queue; /* waiting to be issued */
+ volatile Scsi_Cmnd *disconnected_queue; /* waiting for reconnect */
+ volatile Scsi_Cmnd *sel_cmd; /* command for selection */
+ volatile Scsi_Cmnd *connected; /* currently connected command */
+ volatile unsigned char busy[8]; /* index = target, bit = lun */
+ unsigned char sync_per[8]; /* synchronous transfer period (in effect) */
+ unsigned char sync_off[8]; /* synchronous offset (in effect) */
+ unsigned char sync_neg[8]; /* sync. negotiation performed (in effect) */
+ unsigned char sync_en[8]; /* sync. negotiation performed (in effect) */
+ unsigned char max_rate[8]; /* max. transfer rate (setup) */
+ unsigned char max_offset[8]; /* max. sync. offset (setup), only valid if corresponding sync_en is nonzero */
+ };
+
+#define AM53C974 { \
+ NULL, /* pointer to next in list */ \
+ NULL, /* long * usage_count */ \
+ NULL, /* struct proc_dir_entry *proc_dir */ \
+ NULL, /* int (*proc_info)(char *, char **, off_t, int, int, int); */ \
+ "AM53C974", /* name */ \
+ AM53C974_detect, /* int (* detect)(struct SHT *) */ \
+ NULL, /* int (*release)(struct Scsi_Host *) */ \
+ AM53C974_info, /* const char *(* info)(struct Scsi_Host *) */ \
+ AM53C974_command, /* int (* command)(Scsi_Cmnd *) */ \
+ AM53C974_queue_command, /* int (* queuecommand)(Scsi_Cmnd *, \
+ void (*done)(Scsi_Cmnd *)) */ \
+ AM53C974_abort, /* int (* abort)(Scsi_Cmnd *) */ \
+ AM53C974_reset, /* int (* reset)(Scsi_Cmnd *) */ \
+ NULL, /* int (* slave_attach)(int, int) */ \
+ scsicam_bios_param, /* int (* bios_param)(Disk *, int, int[]) */ \
+ 12, /* can_queue */ \
+ -1, /* this_id */ \
+ SG_ALL, /* sg_tablesize */ \
+ 1, /* cmd_per_lun */ \
+ 0, /* present, i.e. how many adapters of this kind */ \
+ 0, /* unchecked_isa_dma */ \
+ DISABLE_CLUSTERING /* use_clustering */ \
+ }
+
+void AM53C974_setup(char *str, int *ints);
+int AM53C974_detect(Scsi_Host_Template *tpnt);
+int AM53C974_biosparm(Disk *disk, int dev, int *info_array);
+const char *AM53C974_info(struct Scsi_Host *);
+int AM53C974_command(Scsi_Cmnd *SCpnt);
+int AM53C974_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *));
+int AM53C974_abort(Scsi_Cmnd *cmd);
+int AM53C974_reset (Scsi_Cmnd *cmd);
+
+#define AM53C974_local_declare() unsigned long io_port
+#define AM53C974_setio(instance) io_port = instance->io_port
+#define AM53C974_read_8(addr) inb(io_port + (addr))
+#define AM53C974_write_8(addr,x) outb((x), io_port + (addr))
+#define AM53C974_read_16(addr) inw(io_port + (addr))
+#define AM53C974_write_16(addr,x) outw((x), io_port + (addr))
+#define AM53C974_read_32(addr) inl(io_port + (addr))
+#define AM53C974_write_32(addr,x) outl((x), io_port + (addr))
+
+#define AM53C974_poll_int() { do { statreg = AM53C974_read_8(STATREG); } \
+ while (!(statreg & STATREG_INT)) ; \
+ AM53C974_read_8(INSTREG) ; } /* clear int */
+#define AM53C974_cfifo() (AM53C974_read_8(CFIREG) & CFIREG_CF)
+
+/* These are "special" values for the tag parameter passed to AM53C974_select. */
+#define TAG_NEXT -1 /* Use next free tag */
+#define TAG_NONE -2 /* Establish I_T_L nexus instead of I_T_L_Q
+ * even on SCSI-II devices */
+
+/************ LILO overrides *************/
+typedef struct _override_t {
+ int host_scsi_id; /* SCSI id of the bus controller */
+ int target_scsi_id; /* SCSI id of target */
+ int max_rate; /* max. transfer rate */
+ int max_offset; /* max. sync. offset, 0 = asynchronous */
+ } override_t;
+
+/************ PCI stuff *************/
+#define AM53C974_PCIREG_OPEN() outb(0xF1, 0xCF8); outb(0, 0xCFA)
+#define AM53C974_PCIREG_CLOSE() outb(0, 0xCF8)
+#define AM53C974_PCIREG_READ_BYTE(instance,a) ( inb((a) + (instance)->io_port) )
+#define AM53C974_PCIREG_READ_WORD(instance,a) ( inw((a) + (instance)->io_port) )
+#define AM53C974_PCIREG_READ_DWORD(instance,a) ( inl((a) + (instance)->io_port) )
+#define AM53C974_PCIREG_WRITE_BYTE(instance,x,a) ( outb((x), (a) + (instance)->io_port) )
+#define AM53C974_PCIREG_WRITE_WORD(instance,x,a) ( outw((x), (a) + (instance)->io_port) )
+#define AM53C974_PCIREG_WRITE_DWORD(instance,x,a) ( outl((x), (a) + (instance)->io_port) )
+
+typedef struct _pci_config_t {
+ /* start of official PCI config space header */
+ union {
+ unsigned int device_vendor;
+ struct {
+ unsigned short vendor;
+ unsigned short device;
+ } dv;
+ } dv_id;
+#define _device_vendor dv_id.device_vendor
+#define _vendor dv_id.dv.vendor
+#define _device dv_id.dv.device
+ union {
+ unsigned int status_command;
+ struct {
+ unsigned short command;
+ unsigned short status;
+ } sc;
+ } stat_cmd;
+#define _status_command stat_cmd.status_command
+#define _command stat_cmd.sc.command
+#define _status stat_cmd.sc.status
+ union {
+ unsigned int class_revision;
+ struct {
+ unsigned char rev_id;
+ unsigned char prog_if;
+ unsigned char sub_class;
+ unsigned char base_class;
+ } cr;
+ } class_rev;
+#define _class_revision class_rev.class_revision
+#define _rev_id class_rev.cr.rev_id
+#define _prog_if class_rev.cr.prog_if
+#define _sub_class class_rev.cr.sub_class
+#define _base_class class_rev.cr.base_class
+ union {
+ unsigned int bist_header_latency_cache;
+ struct {
+ unsigned char cache_line_size;
+ unsigned char latency_timer;
+ unsigned char header_type;
+ unsigned char bist;
+ } bhlc;
+ } bhlc;
+#define _bist_header_latency_cache bhlc.bist_header_latency_cache
+#define _cache_line_size bhlc.bhlc.cache_line_size
+#define _latency_timer bhlc.bhlc.latency_timer
+#define _header_type bhlc.bhlc.header_type
+#define _bist bhlc.bhlc.bist
+ unsigned int _base0;
+ unsigned int _base1;
+ unsigned int _base2;
+ unsigned int _base3;
+ unsigned int _base4;
+ unsigned int _base5;
+ unsigned int rsvd1;
+ unsigned int rsvd2;
+ unsigned int _baserom;
+ unsigned int rsvd3;
+ unsigned int rsvd4;
+ union {
+ unsigned int max_min_ipin_iline;
+ struct {
+ unsigned char int_line;
+ unsigned char int_pin;
+ unsigned char min_gnt;
+ unsigned char max_lat;
+ } mmii;
+ } mmii;
+#define _max_min_ipin_iline mmii.max_min_ipin_iline
+#define _int_line mmii.mmii.int_line
+#define _int_pin mmii.mmii.int_pin
+#define _min_gnt mmii.mmii.min_gnt
+#define _max_lat mmii.mmii.max_lat
+ /* end of official PCI config space header */
+ unsigned short _ioaddr; /* config type 1 - private I/O addr */
+ unsigned int _pcibus; /* config type 2 - private bus id */
+ unsigned int _cardnum; /* config type 2 - private card number */
+} pci_config_t;
+
+#endif /* AM53C974_H */
diff --git a/i386/i386at/gpl/linux/scsi/BusLogic.c b/i386/i386at/gpl/linux/scsi/BusLogic.c
new file mode 100644
index 00000000..7472dc3d
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/BusLogic.c
@@ -0,0 +1,2779 @@
+/*
+
+ Linux Driver for BusLogic MultiMaster SCSI Host Adapters
+
+ Copyright 1995 by Leonard N. Zubkoff <lnz@dandelion.com>
+
+ This program is free software; you may redistribute and/or modify it under
+ the terms of the GNU General Public License Version 2 as published by the
+ Free Software Foundation, provided that none of the source code or runtime
+ copyright notices are removed or modified.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for complete details.
+
+ The author respectfully requests that all modifications to this software be
+ sent directly to him for evaluation and testing.
+
+ Special thanks to Alex T. Win of BusLogic, whose advice has been invaluable,
+ to David B. Gentzel, for writing the original Linux BusLogic driver, and to
+ Paul Gortmaker, for being such a dedicated test site.
+
+*/
+
+
+#define BusLogic_DriverVersion "1.3.1"
+#define BusLogic_DriverDate "31 December 1995"
+
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/kernel_stat.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/stat.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include "BusLogic.h"
+
+
+/*
+ BusLogic_CommandLineEntryCount is a count of the number of "BusLogic="
+ entries provided on the Linux Kernel Command Line.
+*/
+
+static int
+ BusLogic_CommandLineEntryCount = 0;
+
+
+/*
+ BusLogic_CommandLineEntries is an array of Command Line Entry structures
+ representing the "BusLogic=" entries provided on the Linux Kernel Command
+ Line.
+*/
+
+static BusLogic_CommandLineEntry_T
+ BusLogic_CommandLineEntries[BusLogic_MaxHostAdapters];
+
+
+/*
+ BusLogic_GlobalOptions is a bit mask of Global Options to be applied
+ across all Host Adapters.
+*/
+
+static int
+ BusLogic_GlobalOptions = 0;
+
+
+/*
+ BusLogic_RegisteredHostAdapters is a linked list of all the registered
+ BusLogic Host Adapters.
+*/
+
+static BusLogic_HostAdapter_T
+ *BusLogic_RegisteredHostAdapters = NULL;
+
+
+/*
+ BusLogic_Standard_IO_Addresses is the list of standard I/O Addresses at which
+ BusLogic Host Adapters may potentially be found.
+*/
+
+static unsigned short
+ BusLogic_IO_StandardAddresses[] =
+ { 0x330, 0x334, 0x230, 0x234, 0x130, 0x134, 0 };
+
+
+/*
+ BusLogic_IO_AddressProbeList is the list of I/O Addresses to be probed for
+ potential BusLogic Host Adapters. It is initialized by interrogating the
+ PCI Configuration Space on PCI machines as well as from the list of
+ standard BusLogic I/O Addresses.
+*/
+
+static unsigned short
+ BusLogic_IO_AddressProbeList[BusLogic_IO_MaxProbeAddresses+1] = { 0 };
+
+
+/*
+ BusLogic_IRQ_UsageCount stores a count of the number of Host Adapters using
+ a given IRQ Channel, which is necessary to support PCI, EISA, or MCA shared
+ interrupts. Only IRQ Channels 9, 10, 11, 12, 14, and 15 are supported by
+ BusLogic Host Adapters.
+*/
+
+static short
+ BusLogic_IRQ_UsageCount[7] = { 0 };
+
+
+/*
+ BusLogic_CommandFailureReason holds a string identifying the reason why a
+ call to BusLogic_Command failed. It is only valid when BusLogic_Command
+ returns a failure code.
+*/
+
+static char
+ *BusLogic_CommandFailureReason;
+
+
+/*
+ BusLogic_ProcDirectoryEntry is the BusLogic /proc/scsi directory entry.
+*/
+
+static struct proc_dir_entry
+ BusLogic_ProcDirectoryEntry =
+ { PROC_SCSI_BUSLOGIC, 8, "BusLogic", S_IFDIR | S_IRUGO | S_IXUGO, 2 };
+
+
+/*
+ BusLogic_AnnounceDriver announces the Driver Version and Date, Author's
+ Name, Copyright Notice, and Contact Address.
+*/
+
+static void BusLogic_AnnounceDriver(void)
+{
+ static boolean DriverAnnouncementPrinted = false;
+ if (DriverAnnouncementPrinted) return;
+ printk("scsi: ***** BusLogic SCSI Driver Version "
+ BusLogic_DriverVersion " of " BusLogic_DriverDate " *****\n");
+ printk("scsi: Copyright 1995 by Leonard N. Zubkoff <lnz@dandelion.com>\n");
+ DriverAnnouncementPrinted = true;
+}
+
+
+/*
+ BusLogic_DriverInfo returns the Board Name to identify this SCSI Driver
+ and Host Adapter.
+*/
+
+const char *BusLogic_DriverInfo(SCSI_Host_T *Host)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Host->hostdata;
+ return HostAdapter->BoardName;
+}
+
+
+/*
+ BusLogic_InitializeAddressProbeList initializes the list of I/O Addresses
+ to be probed for potential BusLogic SCSI Host Adapters by interrogating the
+ PCI Configuration Space on PCI machines as well as from the list of standard
+ BusLogic I/O Addresses.
+*/
+
+static void BusLogic_InitializeAddressProbeList(void)
+{
+ int DestinationIndex = 0, SourceIndex = 0;
+ /*
+ If BusLogic_Setup has been called, do not override the Kernel Command
+ Line specifications.
+ */
+ if (BusLogic_IO_AddressProbeList[0] != 0) return;
+#ifdef CONFIG_PCI
+ /*
+ Interrogate PCI Configuration Space for any BusLogic SCSI Host Adapters.
+ */
+ if (pcibios_present())
+ {
+ unsigned short Index = 0, VendorID;
+ unsigned char Bus, DeviceAndFunction;
+ unsigned int BaseAddress0;
+ while (pcibios_find_class(PCI_CLASS_STORAGE_SCSI<<8, Index++,
+ &Bus, &DeviceAndFunction) == 0)
+ if (pcibios_read_config_word(Bus, DeviceAndFunction,
+ PCI_VENDOR_ID, &VendorID) == 0 &&
+ VendorID == PCI_VENDOR_ID_BUSLOGIC &&
+ pcibios_read_config_dword(Bus, DeviceAndFunction,
+ PCI_BASE_ADDRESS_0, &BaseAddress0) == 0 &&
+ (BaseAddress0 & PCI_BASE_ADDRESS_SPACE) ==
+ PCI_BASE_ADDRESS_SPACE_IO)
+ {
+ BusLogic_IO_AddressProbeList[DestinationIndex++] =
+ BaseAddress0 & PCI_BASE_ADDRESS_IO_MASK;
+ }
+ }
+#endif
+ /*
+ Append the list of standard BusLogic I/O Addresses.
+ */
+ while (DestinationIndex < BusLogic_IO_MaxProbeAddresses &&
+ BusLogic_IO_StandardAddresses[SourceIndex] > 0)
+ BusLogic_IO_AddressProbeList[DestinationIndex++] =
+ BusLogic_IO_StandardAddresses[SourceIndex++];
+ BusLogic_IO_AddressProbeList[DestinationIndex] = 0;
+}
+
+
+/*
+ BusLogic_RegisterHostAdapter adds Host Adapter to the list of registered
+ BusLogic Host Adapters.
+*/
+
+static void BusLogic_RegisterHostAdapter(BusLogic_HostAdapter_T *HostAdapter)
+{
+ HostAdapter->Next = NULL;
+ if (BusLogic_RegisteredHostAdapters != NULL)
+ {
+ BusLogic_HostAdapter_T *LastHostAdapter = BusLogic_RegisteredHostAdapters;
+ BusLogic_HostAdapter_T *NextHostAdapter;
+ while ((NextHostAdapter = LastHostAdapter->Next) != NULL)
+ LastHostAdapter = NextHostAdapter;
+ LastHostAdapter->Next = HostAdapter;
+ }
+ else BusLogic_RegisteredHostAdapters = HostAdapter;
+}
+
+
+/*
+ BusLogic_UnregisterHostAdapter removes Host Adapter from the list of
+ registered BusLogic Host Adapters.
+*/
+
+static void BusLogic_UnregisterHostAdapter(BusLogic_HostAdapter_T *HostAdapter)
+{
+ if (BusLogic_RegisteredHostAdapters != HostAdapter)
+ {
+ BusLogic_HostAdapter_T *LastHostAdapter = BusLogic_RegisteredHostAdapters;
+ while (LastHostAdapter != NULL && LastHostAdapter->Next != HostAdapter)
+ LastHostAdapter = LastHostAdapter->Next;
+ if (LastHostAdapter != NULL)
+ LastHostAdapter->Next = HostAdapter->Next;
+ }
+ else BusLogic_RegisteredHostAdapters = HostAdapter->Next;
+ HostAdapter->Next = NULL;
+}
+
+
+/*
+ BusLogic_CreateCCBs allocates the initial Command Control Blocks (CCBs)
+ for Host Adapter.
+*/
+
+static boolean BusLogic_CreateCCBs(BusLogic_HostAdapter_T *HostAdapter)
+{
+ int i;
+ for (i = 0; i < BusLogic_InitialCCBs; i++)
+ {
+ BusLogic_CCB_T *CCB = (BusLogic_CCB_T *)
+ scsi_init_malloc(sizeof(BusLogic_CCB_T), GFP_ATOMIC | GFP_DMA);
+ if (CCB == NULL)
+ {
+ printk("scsi%d: UNABLE TO ALLOCATE CCB %d - DETACHING\n",
+ HostAdapter->HostNumber, i);
+ return false;
+ }
+ memset(CCB, 0, sizeof(BusLogic_CCB_T));
+ CCB->HostAdapter = HostAdapter;
+ CCB->Status = BusLogic_CCB_Free;
+ CCB->Next = HostAdapter->Free_CCBs;
+ CCB->NextAll = HostAdapter->All_CCBs;
+ HostAdapter->Free_CCBs = CCB;
+ HostAdapter->All_CCBs = CCB;
+ }
+ return true;
+}
+
+
+/*
+ BusLogic_DestroyCCBs deallocates the CCBs for Host Adapter.
+*/
+
+static void BusLogic_DestroyCCBs(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_CCB_T *NextCCB = HostAdapter->All_CCBs, *CCB;
+ HostAdapter->All_CCBs = NULL;
+ HostAdapter->Free_CCBs = NULL;
+ while ((CCB = NextCCB) != NULL)
+ {
+ NextCCB = CCB->NextAll;
+ scsi_init_free((char *) CCB, sizeof(BusLogic_CCB_T));
+ }
+}
+
+
+/*
+ BusLogic_AllocateCCB allocates a CCB from the Host Adapter's free list,
+ allocating more memory from the Kernel if necessary.
+*/
+
+static BusLogic_CCB_T *BusLogic_AllocateCCB(BusLogic_HostAdapter_T *HostAdapter)
+{
+ static unsigned int SerialNumber = 0;
+ BusLogic_CCB_T *CCB;
+ BusLogic_LockHostAdapter(HostAdapter);
+ CCB = HostAdapter->Free_CCBs;
+ if (CCB != NULL)
+ {
+ CCB->SerialNumber = ++SerialNumber;
+ HostAdapter->Free_CCBs = CCB->Next;
+ CCB->Next = NULL;
+ BusLogic_UnlockHostAdapter(HostAdapter);
+ return CCB;
+ }
+ BusLogic_UnlockHostAdapter(HostAdapter);
+ CCB = (BusLogic_CCB_T *) scsi_init_malloc(sizeof(BusLogic_CCB_T),
+ GFP_ATOMIC | GFP_DMA);
+ if (CCB == NULL)
+ {
+ printk("scsi%d: Failed to allocate an additional CCB\n",
+ HostAdapter->HostNumber);
+ return NULL;
+ }
+ printk("scsi%d: Allocated an additional CCB\n", HostAdapter->HostNumber);
+ memset(CCB, 0, sizeof(BusLogic_CCB_T));
+ CCB->HostAdapter = HostAdapter;
+ CCB->Status = BusLogic_CCB_Free;
+ BusLogic_LockHostAdapter(HostAdapter);
+ CCB->SerialNumber = ++SerialNumber;
+ CCB->NextAll = HostAdapter->All_CCBs;
+ HostAdapter->All_CCBs = CCB;
+ BusLogic_UnlockHostAdapter(HostAdapter);
+ return CCB;
+}
+
+
+/*
+ BusLogic_DeallocateCCB deallocates a CCB, returning it to the Host Adapter's
+ free list.
+*/
+
+static void BusLogic_DeallocateCCB(BusLogic_CCB_T *CCB)
+{
+ BusLogic_HostAdapter_T *HostAdapter = CCB->HostAdapter;
+ BusLogic_LockHostAdapter(HostAdapter);
+ CCB->Command = NULL;
+ CCB->Status = BusLogic_CCB_Free;
+ CCB->Next = HostAdapter->Free_CCBs;
+ HostAdapter->Free_CCBs = CCB;
+ BusLogic_UnlockHostAdapter(HostAdapter);
+}
+
+
+/*
+ BusLogic_Command sends the command OperationCode to HostAdapter, optionally
+ providing ParameterLength bytes of ParameterData and receiving at most
+ ReplyLength bytes of ReplyData; any excess reply data is received but
+ discarded.
+
+ On success, this function returns the number of reply bytes read from
+ the Host Adapter (including any discarded data); on failure, it returns
+ -1 if the command was invalid, or -2 if a timeout occurred.
+
+ This function is only called during board detection and initialization, so
+ performance and latency are not critical, and exclusive access to the Host
+ Adapter hardware is assumed. Once the board and driver are initialized, the
+ only Host Adapter command that is issued is the single byte Start Mailbox
+ Scan command, which does not require waiting for the Host Adapter Ready bit
+ to be set in the Status Register.
+*/
+
+static int BusLogic_Command(BusLogic_HostAdapter_T *HostAdapter,
+ BusLogic_OperationCode_T OperationCode,
+ void *ParameterData,
+ int ParameterLength,
+ void *ReplyData,
+ int ReplyLength)
+{
+ unsigned char *ParameterPointer = (unsigned char *) ParameterData;
+ unsigned char *ReplyPointer = (unsigned char *) ReplyData;
+ unsigned char StatusRegister = 0, InterruptRegister;
+ long TimeoutCounter;
+ int ReplyBytes = 0;
+ /*
+ Clear out the Reply Data if provided.
+ */
+ if (ReplyLength > 0)
+ memset(ReplyData, 0, ReplyLength);
+ /*
+ Wait for the Host Adapter Ready bit to be set and the Command/Parameter
+ Register Busy bit to be reset in the Status Register.
+ */
+ TimeoutCounter = loops_per_sec >> 3;
+ while (--TimeoutCounter >= 0)
+ {
+ StatusRegister = BusLogic_ReadStatusRegister(HostAdapter);
+ if ((StatusRegister & BusLogic_HostAdapterReady) &&
+ !(StatusRegister & BusLogic_CommandParameterRegisterBusy))
+ break;
+ }
+ BusLogic_CommandFailureReason = "Timeout waiting for Host Adapter Ready";
+ if (TimeoutCounter < 0) return -2;
+ /*
+ Write the OperationCode to the Command/Parameter Register.
+ */
+ HostAdapter->HostAdapterCommandCompleted = false;
+ BusLogic_WriteCommandParameterRegister(HostAdapter, OperationCode);
+ /*
+ Write any additional Parameter Bytes.
+ */
+ TimeoutCounter = 10000;
+ while (ParameterLength > 0 && --TimeoutCounter >= 0)
+ {
+ /*
+ Wait 100 microseconds to give the Host Adapter enough time to determine
+ whether the last value written to the Command/Parameter Register was
+ valid or not. If the Command Complete bit is set in the Interrupt
+ Register, then the Command Invalid bit in the Status Register will be
+ reset if the Operation Code or Parameter was valid and the command
+ has completed, or set if the Operation Code or Parameter was invalid.
+ If the Data In Register Ready bit is set in the Status Register, then
+ the Operation Code was valid, and data is waiting to be read back
+ from the Host Adapter. Otherwise, wait for the Command/Parameter
+ Register Busy bit in the Status Register to be reset.
+ */
+ udelay(100);
+ InterruptRegister = BusLogic_ReadInterruptRegister(HostAdapter);
+ StatusRegister = BusLogic_ReadStatusRegister(HostAdapter);
+ if (InterruptRegister & BusLogic_CommandComplete) break;
+ if (HostAdapter->HostAdapterCommandCompleted) break;
+ if (StatusRegister & BusLogic_DataInRegisterReady) break;
+ if (StatusRegister & BusLogic_CommandParameterRegisterBusy) continue;
+ BusLogic_WriteCommandParameterRegister(HostAdapter, *ParameterPointer++);
+ ParameterLength--;
+ }
+ BusLogic_CommandFailureReason = "Timeout waiting for Parameter Acceptance";
+ if (TimeoutCounter < 0) return -2;
+ /*
+ The Modify I/O Address command does not cause a Command Complete Interrupt.
+ */
+ if (OperationCode == BusLogic_ModifyIOAddress)
+ {
+ StatusRegister = BusLogic_ReadStatusRegister(HostAdapter);
+ BusLogic_CommandFailureReason = "Modify I/O Address Invalid";
+ if (StatusRegister & BusLogic_CommandInvalid) return -1;
+ BusLogic_CommandFailureReason = NULL;
+ return 0;
+ }
+ /*
+ Select an appropriate timeout value for awaiting command completion.
+ */
+ switch (OperationCode)
+ {
+ case BusLogic_InquireInstalledDevicesID0to7:
+ case BusLogic_InquireInstalledDevicesID8to15:
+ /* Approximately 60 seconds. */
+ TimeoutCounter = loops_per_sec << 2;
+ break;
+ default:
+ /* Approximately 1 second. */
+ TimeoutCounter = loops_per_sec >> 4;
+ break;
+ }
+ /*
+ Receive any Reply Bytes, waiting for either the Command Complete bit to
+ be set in the Interrupt Register, or for the Interrupt Handler to set the
+ Host Adapter Command Completed bit in the Host Adapter structure.
+ */
+ while (--TimeoutCounter >= 0)
+ {
+ InterruptRegister = BusLogic_ReadInterruptRegister(HostAdapter);
+ StatusRegister = BusLogic_ReadStatusRegister(HostAdapter);
+ if (InterruptRegister & BusLogic_CommandComplete) break;
+ if (HostAdapter->HostAdapterCommandCompleted) break;
+ if (StatusRegister & BusLogic_DataInRegisterReady)
+ if (++ReplyBytes <= ReplyLength)
+ *ReplyPointer++ = BusLogic_ReadDataInRegister(HostAdapter);
+ else BusLogic_ReadDataInRegister(HostAdapter);
+ }
+ BusLogic_CommandFailureReason = "Timeout waiting for Command Complete";
+ if (TimeoutCounter < 0) return -2;
+ /*
+ If testing Command Complete Interrupts, wait a short while in case the
+ loop immediately above terminated due to the Command Complete bit being
+ set in the Interrupt Register, but the interrupt hasn't actually been
+ processed yet. Otherwise, acknowledging the interrupt here could prevent
+ the interrupt test from succeeding.
+ */
+ if (OperationCode == BusLogic_TestCommandCompleteInterrupt)
+ udelay(10000);
+ /*
+ Clear any pending Command Complete Interrupt.
+ */
+ BusLogic_WriteControlRegister(HostAdapter, BusLogic_InterruptReset);
+ if (BusLogic_GlobalOptions & BusLogic_TraceConfiguration)
+ if (OperationCode != BusLogic_TestCommandCompleteInterrupt)
+ {
+ int i;
+ printk("BusLogic_Command(%02X) Status = %02X: %2d ==> %2d:",
+ OperationCode, StatusRegister, ReplyLength, ReplyBytes);
+ if (ReplyLength > ReplyBytes) ReplyLength = ReplyBytes;
+ for (i = 0; i < ReplyLength; i++)
+ printk(" %02X", ((unsigned char *) ReplyData)[i]);
+ printk("\n");
+ }
+ /*
+ Process Command Invalid conditions.
+ */
+ if (StatusRegister & BusLogic_CommandInvalid)
+ {
+ /*
+ Some early BusLogic Host Adapters may not recover properly from
+ a Command Invalid condition, so if this appears to be the case,
+ a Soft Reset is issued to the Host Adapter. Potentially invalid
+ commands are never attempted after Mailbox Initialization is
+ performed, so there should be no Host Adapter state lost by a
+ Soft Reset in response to a Command Invalid condition.
+ */
+ udelay(1000);
+ StatusRegister = BusLogic_ReadStatusRegister(HostAdapter);
+ if (StatusRegister != (BusLogic_HostAdapterReady |
+ BusLogic_InitializationRequired))
+ {
+ BusLogic_WriteControlRegister(HostAdapter, BusLogic_SoftReset);
+ udelay(1000);
+ }
+ BusLogic_CommandFailureReason = "Command Invalid";
+ return -1;
+ }
+ /*
+ Handle Excess Parameters Supplied conditions.
+ */
+ BusLogic_CommandFailureReason = "Excess Parameters Supplied";
+ if (ParameterLength > 0) return -1;
+ /*
+ Indicate the command completed successfully.
+ */
+ BusLogic_CommandFailureReason = NULL;
+ return ReplyBytes;
+}
+
+
+/*
+ BusLogic_Failure prints a standardized error message, and then returns false.
+*/
+
+static boolean BusLogic_Failure(BusLogic_HostAdapter_T *HostAdapter,
+ char *ErrorMessage)
+{
+ BusLogic_AnnounceDriver();
+ printk("While configuring BusLogic Host Adapter at I/O Address 0x%X:\n",
+ HostAdapter->IO_Address);
+ printk("%s FAILED - DETACHING\n", ErrorMessage);
+ if (BusLogic_CommandFailureReason != NULL)
+ printk("ADDITIONAL FAILURE INFO - %s\n", BusLogic_CommandFailureReason);
+ return false;
+}
+
+
+/*
+ BusLogic_ProbeHostAdapter probes for a BusLogic Host Adapter.
+*/
+
+static boolean BusLogic_ProbeHostAdapter(BusLogic_HostAdapter_T *HostAdapter)
+{
+ boolean TraceProbe = (BusLogic_GlobalOptions & BusLogic_TraceProbe);
+ unsigned char StatusRegister, GeometryRegister;
+ /*
+ Read the Status Register to test if there is an I/O port that responds. A
+ nonexistent I/O port will return 0xFF, in which case there is definitely no
+ BusLogic Host Adapter at this base I/O Address.
+ */
+ StatusRegister = BusLogic_ReadStatusRegister(HostAdapter);
+ if (TraceProbe)
+ printk("BusLogic_Probe(0x%X): Status 0x%02X\n",
+ HostAdapter->IO_Address, StatusRegister);
+ if (StatusRegister == 0xFF) return false;
+ /*
+ Read the undocumented BusLogic Geometry Register to test if there is an I/O
+ port that responds. Adaptec Host Adapters do not implement the Geometry
+ Register, so this test helps serve to avoid incorrectly recognizing an
+ Adaptec 1542A or 1542B as a BusLogic. Unfortunately, the Adaptec 1542C
+ series does respond to the Geometry Register I/O port, but it will be
+ rejected later when the Inquire Extended Setup Information command is
+ issued in BusLogic_CheckHostAdapter. The AMI FastDisk Host Adapter is a
+ BusLogic clone that implements the same interface as earlier BusLogic
+ boards, including the undocumented commands, and is therefore supported by
+ this driver. However, the AMI FastDisk always returns 0x00 upon reading
+ the Geometry Register, so the extended translation option should always be
+ left disabled on the AMI FastDisk.
+ */
+ GeometryRegister = BusLogic_ReadGeometryRegister(HostAdapter);
+ if (TraceProbe)
+ printk("BusLogic_Probe(0x%X): Geometry 0x%02X\n",
+ HostAdapter->IO_Address, GeometryRegister);
+ if (GeometryRegister == 0xFF) return false;
+ /*
+ Indicate the Host Adapter Probe completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_HardResetHostAdapter issues a Hard Reset to the Host Adapter,
+ and waits for Host Adapter Diagnostics to complete.
+*/
+
+static boolean BusLogic_HardResetHostAdapter(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ boolean TraceHardReset = (BusLogic_GlobalOptions & BusLogic_TraceHardReset);
+ long TimeoutCounter = loops_per_sec >> 2;
+ unsigned char StatusRegister = 0;
+ /*
+ Issue a Hard Reset Command to the Host Adapter. The Host Adapter should
+ respond by setting Diagnostic Active in the Status Register.
+ */
+ BusLogic_WriteControlRegister(HostAdapter, BusLogic_HardReset);
+ /*
+ Wait until Diagnostic Active is set in the Status Register.
+ */
+ while (--TimeoutCounter >= 0)
+ {
+ StatusRegister = BusLogic_ReadStatusRegister(HostAdapter);
+ if ((StatusRegister & BusLogic_DiagnosticActive)) break;
+ }
+ if (TraceHardReset)
+ printk("BusLogic_HardReset(0x%X): Diagnostic Active, Status 0x%02X\n",
+ HostAdapter->IO_Address, StatusRegister);
+ if (TimeoutCounter < 0) return false;
+ /*
+ Wait 100 microseconds to allow completion of any initial diagnostic
+ activity which might leave the contents of the Status Register
+ unpredictable.
+ */
+ udelay(100);
+ /*
+ Wait until Diagnostic Active is reset in the Status Register.
+ */
+ while (--TimeoutCounter >= 0)
+ {
+ StatusRegister = BusLogic_ReadStatusRegister(HostAdapter);
+ if (!(StatusRegister & BusLogic_DiagnosticActive)) break;
+ }
+ if (TraceHardReset)
+ printk("BusLogic_HardReset(0x%X): Diagnostic Completed, Status 0x%02X\n",
+ HostAdapter->IO_Address, StatusRegister);
+ if (TimeoutCounter < 0) return false;
+ /*
+ Wait until at least one of the Diagnostic Failure, Host Adapter Ready,
+ or Data In Register Ready bits is set in the Status Register.
+ */
+ while (--TimeoutCounter >= 0)
+ {
+ StatusRegister = BusLogic_ReadStatusRegister(HostAdapter);
+ if (StatusRegister & (BusLogic_DiagnosticFailure |
+ BusLogic_HostAdapterReady |
+ BusLogic_DataInRegisterReady))
+ break;
+ }
+ if (TraceHardReset)
+ printk("BusLogic_HardReset(0x%X): Host Adapter Ready, Status 0x%02X\n",
+ HostAdapter->IO_Address, StatusRegister);
+ if (TimeoutCounter < 0) return false;
+ /*
+ If Diagnostic Failure is set or Host Adapter Ready is reset, then an
+ error occurred during the Host Adapter diagnostics. If Data In Register
+ Ready is set, then there is an Error Code available.
+ */
+ if ((StatusRegister & BusLogic_DiagnosticFailure) ||
+ !(StatusRegister & BusLogic_HostAdapterReady))
+ {
+ BusLogic_CommandFailureReason = NULL;
+ BusLogic_Failure(HostAdapter, "HARD RESET DIAGNOSTICS");
+ printk("HOST ADAPTER STATUS REGISTER = %02X\n", StatusRegister);
+ if (StatusRegister & BusLogic_DataInRegisterReady)
+ {
+ unsigned char ErrorCode = BusLogic_ReadDataInRegister(HostAdapter);
+ printk("HOST ADAPTER ERROR CODE = %d\n", ErrorCode);
+ }
+ return false;
+ }
+ /*
+ Indicate the Host Adapter Hard Reset completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_CheckHostAdapter checks to be sure this really is a BusLogic
+ Host Adapter.
+*/
+
+static boolean BusLogic_CheckHostAdapter(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_ExtendedSetupInformation_T ExtendedSetupInformation;
+ BusLogic_RequestedReplyLength_T RequestedReplyLength;
+ unsigned long ProcessorFlags;
+ int Result;
+ /*
+ Issue the Inquire Extended Setup Information command. Only genuine
+ BusLogic Host Adapters and true clones support this command. Adaptec 1542C
+ series Host Adapters that respond to the Geometry Register I/O port will
+ fail this command. Interrupts must be disabled around the call to
+ BusLogic_Command since a Command Complete interrupt could occur if the IRQ
+ Channel was previously enabled for another BusLogic Host Adapter sharing
+ the same IRQ Channel.
+ */
+ save_flags(ProcessorFlags);
+ cli();
+ RequestedReplyLength = sizeof(ExtendedSetupInformation);
+ Result = BusLogic_Command(HostAdapter,
+ BusLogic_InquireExtendedSetupInformation,
+ &RequestedReplyLength, sizeof(RequestedReplyLength),
+ &ExtendedSetupInformation,
+ sizeof(ExtendedSetupInformation));
+ restore_flags(ProcessorFlags);
+ if (BusLogic_GlobalOptions & BusLogic_TraceProbe)
+ printk("BusLogic_Check(0x%X): Result %d\n",
+ HostAdapter->IO_Address, Result);
+ return (Result == sizeof(ExtendedSetupInformation));
+}
+
+
+/*
+ BusLogic_ReadHostAdapterConfiguration reads the Configuration Information
+ from Host Adapter.
+*/
+
+static boolean BusLogic_ReadHostAdapterConfiguration(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ BusLogic_BoardID_T BoardID;
+ BusLogic_Configuration_T Configuration;
+ BusLogic_SetupInformation_T SetupInformation;
+ BusLogic_ExtendedSetupInformation_T ExtendedSetupInformation;
+ BusLogic_BoardModelNumber_T BoardModelNumber;
+ BusLogic_FirmwareVersion3rdDigit_T FirmwareVersion3rdDigit;
+ BusLogic_FirmwareVersionLetter_T FirmwareVersionLetter;
+ BusLogic_RequestedReplyLength_T RequestedReplyLength;
+ unsigned char GeometryRegister, *TargetPointer, Character;
+ unsigned short AllTargetsMask, DisconnectPermitted;
+ unsigned short TaggedQueuingPermitted, TaggedQueuingPermittedDefault;
+ boolean CommonErrorRecovery;
+ int TargetID, i;
+ /*
+ Issue the Inquire Board ID command.
+ */
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireBoardID, NULL, 0,
+ &BoardID, sizeof(BoardID)) != sizeof(BoardID))
+ return BusLogic_Failure(HostAdapter, "INQUIRE BOARD ID");
+ /*
+ Issue the Inquire Configuration command.
+ */
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireConfiguration, NULL, 0,
+ &Configuration, sizeof(Configuration))
+ != sizeof(Configuration))
+ return BusLogic_Failure(HostAdapter, "INQUIRE CONFIGURATION");
+ /*
+ Issue the Inquire Setup Information command.
+ */
+ RequestedReplyLength = sizeof(SetupInformation);
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireSetupInformation,
+ &RequestedReplyLength, sizeof(RequestedReplyLength),
+ &SetupInformation, sizeof(SetupInformation))
+ != sizeof(SetupInformation))
+ return BusLogic_Failure(HostAdapter, "INQUIRE SETUP INFORMATION");
+ /*
+ Issue the Inquire Extended Setup Information command.
+ */
+ RequestedReplyLength = sizeof(ExtendedSetupInformation);
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireExtendedSetupInformation,
+ &RequestedReplyLength, sizeof(RequestedReplyLength),
+ &ExtendedSetupInformation,
+ sizeof(ExtendedSetupInformation))
+ != sizeof(ExtendedSetupInformation))
+ return BusLogic_Failure(HostAdapter, "INQUIRE EXTENDED SETUP INFORMATION");
+ /*
+ Issue the Inquire Board Model Number command.
+ */
+ if (!(BoardID.FirmwareVersion1stDigit == '2' &&
+ ExtendedSetupInformation.BusType == 'A'))
+ {
+ RequestedReplyLength = sizeof(BoardModelNumber);
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireBoardModelNumber,
+ &RequestedReplyLength, sizeof(RequestedReplyLength),
+ &BoardModelNumber, sizeof(BoardModelNumber))
+ != sizeof(BoardModelNumber))
+ return BusLogic_Failure(HostAdapter, "INQUIRE BOARD MODEL NUMBER");
+ }
+ else strcpy(BoardModelNumber, "542B");
+ /*
+ Issue the Inquire Firmware Version 3rd Digit command.
+ */
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireFirmwareVersion3rdDigit,
+ NULL, 0, &FirmwareVersion3rdDigit,
+ sizeof(FirmwareVersion3rdDigit))
+ != sizeof(FirmwareVersion3rdDigit))
+ return BusLogic_Failure(HostAdapter, "INQUIRE FIRMWARE 3RD DIGIT");
+ /*
+ Issue the Inquire Firmware Version Letter command.
+ */
+ FirmwareVersionLetter = '\0';
+ if (BoardID.FirmwareVersion1stDigit > '3' ||
+ (BoardID.FirmwareVersion1stDigit == '3' &&
+ BoardID.FirmwareVersion2ndDigit >= '3'))
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireFirmwareVersionLetter,
+ NULL, 0, &FirmwareVersionLetter,
+ sizeof(FirmwareVersionLetter))
+ != sizeof(FirmwareVersionLetter))
+ return BusLogic_Failure(HostAdapter, "INQUIRE FIRMWARE VERSION LETTER");
+ /*
+ BusLogic Host Adapters can be identified by their model number and
+ the major version number of their firmware as follows:
+
+ 4.xx BusLogic "C" Series Host Adapters:
+ BT-946C/956C/956CD/747C/757C/757CD/445C/545C/540CF
+ 3.xx BusLogic "S" Series Host Adapters:
+ BT-747S/747D/757S/757D/445S/545S/542D
+ BT-542B/742A (revision H)
+ 2.xx BusLogic "A" Series Host Adapters:
+ BT-542B/742A (revision G and below)
+ 0.xx AMI FastDisk VLB/EISA BusLogic Clone Host Adapter
+ */
+ /*
+ Save the Model Name and Board Name in the Host Adapter structure.
+ */
+ TargetPointer = HostAdapter->ModelName;
+ *TargetPointer++ = 'B';
+ *TargetPointer++ = 'T';
+ *TargetPointer++ = '-';
+ for (i = 0; i < sizeof(BoardModelNumber); i++)
+ {
+ Character = BoardModelNumber[i];
+ if (Character == ' ' || Character == '\0') break;
+ *TargetPointer++ = Character;
+ }
+ *TargetPointer++ = '\0';
+ strcpy(HostAdapter->BoardName, "BusLogic ");
+ strcat(HostAdapter->BoardName, HostAdapter->ModelName);
+ strcpy(HostAdapter->InterruptLabel, HostAdapter->BoardName);
+ /*
+ Save the Firmware Version in the Host Adapter structure.
+ */
+ TargetPointer = HostAdapter->FirmwareVersion;
+ *TargetPointer++ = BoardID.FirmwareVersion1stDigit;
+ *TargetPointer++ = '.';
+ *TargetPointer++ = BoardID.FirmwareVersion2ndDigit;
+ if (FirmwareVersion3rdDigit != ' ' && FirmwareVersion3rdDigit != '\0')
+ *TargetPointer++ = FirmwareVersion3rdDigit;
+ if (FirmwareVersionLetter != ' ' && FirmwareVersionLetter != '\0')
+ *TargetPointer++ = FirmwareVersionLetter;
+ *TargetPointer++ = '\0';
+ /*
+ Determine the IRQ Channel and save it in the Host Adapter structure.
+ */
+ if (Configuration.IRQ_Channel9)
+ HostAdapter->IRQ_Channel = 9;
+ else if (Configuration.IRQ_Channel10)
+ HostAdapter->IRQ_Channel = 10;
+ else if (Configuration.IRQ_Channel11)
+ HostAdapter->IRQ_Channel = 11;
+ else if (Configuration.IRQ_Channel12)
+ HostAdapter->IRQ_Channel = 12;
+ else if (Configuration.IRQ_Channel14)
+ HostAdapter->IRQ_Channel = 14;
+ else if (Configuration.IRQ_Channel15)
+ HostAdapter->IRQ_Channel = 15;
+ /*
+ Determine the DMA Channel and save it in the Host Adapter structure.
+ */
+ if (Configuration.DMA_Channel5)
+ HostAdapter->DMA_Channel = 5;
+ else if (Configuration.DMA_Channel6)
+ HostAdapter->DMA_Channel = 6;
+ else if (Configuration.DMA_Channel7)
+ HostAdapter->DMA_Channel = 7;
+ /*
+ Save the Host Adapter SCSI ID in the Host Adapter structure.
+ */
+ HostAdapter->SCSI_ID = Configuration.HostAdapterID;
+ /*
+ Save the Synchronous Initiation flag and SCSI Parity Checking flag
+ in the Host Adapter structure.
+ */
+ HostAdapter->SynchronousInitiation =
+ SetupInformation.SynchronousInitiationEnabled;
+ HostAdapter->ParityChecking = SetupInformation.ParityCheckEnabled;
+ /*
+ Determine the Bus Type and save it in the Host Adapter structure,
+ overriding the DMA Channel if it is inappropriate for the bus type.
+ */
+ if (ExtendedSetupInformation.BusType == 'A')
+ HostAdapter->BusType = BusLogic_ISA_Bus;
+ else
+ switch (HostAdapter->ModelName[3])
+ {
+ case '4':
+ HostAdapter->BusType = BusLogic_VESA_Bus;
+ HostAdapter->DMA_Channel = 0;
+ break;
+ case '5':
+ HostAdapter->BusType = BusLogic_ISA_Bus;
+ break;
+ case '6':
+ HostAdapter->BusType = BusLogic_MCA_Bus;
+ HostAdapter->DMA_Channel = 0;
+ break;
+ case '7':
+ HostAdapter->BusType = BusLogic_EISA_Bus;
+ HostAdapter->DMA_Channel = 0;
+ break;
+ case '9':
+ HostAdapter->BusType = BusLogic_PCI_Bus;
+ HostAdapter->DMA_Channel = 0;
+ break;
+ }
+ /*
+ Determine whether Extended Translation is enabled and save it in
+ the Host Adapter structure.
+ */
+ GeometryRegister = BusLogic_ReadGeometryRegister(HostAdapter);
+ if (GeometryRegister & BusLogic_ExtendedTranslationEnabled)
+ HostAdapter->ExtendedTranslation = true;
+ /*
+ Save the Disconnect/Reconnect Permitted flag bits in the Host Adapter
+ structure. The Disconnect Permitted information is only valid on "C"
+ Series boards, but Disconnect/Reconnect is always permitted on "S" and
+ "A" Series boards.
+ */
+ if (HostAdapter->FirmwareVersion[0] >= '4')
+ HostAdapter->DisconnectPermitted =
+ (SetupInformation.DisconnectPermittedID8to15 << 8)
+ | SetupInformation.DisconnectPermittedID0to7;
+ else HostAdapter->DisconnectPermitted = 0xFF;
+ /*
+ Save the Scatter Gather Limits, Level Sensitive Interrupts flag,
+ Wide SCSI flag, and Differential SCSI flag in the Host Adapter structure.
+ */
+ HostAdapter->HostAdapterScatterGatherLimit =
+ ExtendedSetupInformation.ScatterGatherLimit;
+ HostAdapter->DriverScatterGatherLimit =
+ HostAdapter->HostAdapterScatterGatherLimit;
+ if (HostAdapter->HostAdapterScatterGatherLimit > BusLogic_ScatterGatherLimit)
+ HostAdapter->DriverScatterGatherLimit = BusLogic_ScatterGatherLimit;
+ if (ExtendedSetupInformation.Misc.LevelSensitiveInterrupts)
+ HostAdapter->LevelSensitiveInterrupts = true;
+ if (ExtendedSetupInformation.HostWideSCSI)
+ {
+ HostAdapter->HostWideSCSI = true;
+ HostAdapter->MaxTargetIDs = 16;
+ HostAdapter->MaxLogicalUnits = 64;
+ }
+ else
+ {
+ HostAdapter->HostWideSCSI = false;
+ HostAdapter->MaxTargetIDs = 8;
+ HostAdapter->MaxLogicalUnits = 8;
+ }
+ HostAdapter->HostDifferentialSCSI =
+ ExtendedSetupInformation.HostDifferentialSCSI;
+ /*
+ Determine the Host Adapter BIOS Address if the BIOS is enabled and
+ save it in the Host Adapter structure. The BIOS is disabled if the
+ BIOS_Address is 0.
+ */
+ HostAdapter->BIOS_Address = ExtendedSetupInformation.BIOS_Address << 12;
+ /*
+ BusLogic BT-445S Host Adapters prior to board revision D have a hardware
+ bug whereby when the BIOS is enabled, transfers to/from the same address
+ range the BIOS occupies modulo 16MB are handled incorrectly. Only properly
+ functioning BT-445S boards have firmware version 3.37, so we require that
+ ISA bounce buffers be used for the buggy BT-445S models as well as for all
+ ISA models.
+ */
+ if (HostAdapter->BusType == BusLogic_ISA_Bus ||
+ (HostAdapter->BIOS_Address > 0 &&
+ strcmp(HostAdapter->ModelName, "BT-445S") == 0 &&
+ strcmp(HostAdapter->FirmwareVersion, "3.37") < 0))
+ HostAdapter->BounceBuffersRequired = true;
+ /*
+ Select an appropriate value for Concurrency (Commands per Logical Unit)
+ either from a Command Line Entry, or based on whether this Host Adapter
+ requires that ISA bounce buffers be used.
+ */
+ if (HostAdapter->CommandLineEntry != NULL &&
+ HostAdapter->CommandLineEntry->Concurrency > 0)
+ HostAdapter->Concurrency = HostAdapter->CommandLineEntry->Concurrency;
+ else if (HostAdapter->BounceBuffersRequired)
+ HostAdapter->Concurrency = BusLogic_Concurrency_BB;
+ else HostAdapter->Concurrency = BusLogic_Concurrency;
+ /*
+ Select an appropriate value for Bus Settle Time either from a Command
+ Line Entry, or from BusLogic_DefaultBusSettleTime.
+ */
+ if (HostAdapter->CommandLineEntry != NULL &&
+ HostAdapter->CommandLineEntry->BusSettleTime > 0)
+ HostAdapter->BusSettleTime = HostAdapter->CommandLineEntry->BusSettleTime;
+ else HostAdapter->BusSettleTime = BusLogic_DefaultBusSettleTime;
+ /*
+ Select an appropriate value for Local Options from a Command Line Entry.
+ */
+ if (HostAdapter->CommandLineEntry != NULL)
+ HostAdapter->LocalOptions = HostAdapter->CommandLineEntry->LocalOptions;
+ /*
+ Select appropriate values for the Error Recovery Option array either from
+ a Command Line Entry, or using BusLogic_ErrorRecoveryDefault.
+ */
+ if (HostAdapter->CommandLineEntry != NULL)
+ memcpy(HostAdapter->ErrorRecoveryOption,
+ HostAdapter->CommandLineEntry->ErrorRecoveryOption,
+ sizeof(HostAdapter->ErrorRecoveryOption));
+ else memset(HostAdapter->ErrorRecoveryOption,
+ BusLogic_ErrorRecoveryDefault,
+ sizeof(HostAdapter->ErrorRecoveryOption));
+ /*
+ Tagged Queuing support is available and operates properly only on "C"
+ Series boards with firmware version 4.22 and above and on "S" Series
+ boards with firmware version 3.35 and above. Tagged Queuing is disabled
+ by default when the Concurrency value is 1 since queuing multiple commands
+ is not possible.
+ */
+ TaggedQueuingPermittedDefault = 0;
+ if (HostAdapter->Concurrency > 1)
+ switch (HostAdapter->FirmwareVersion[0])
+ {
+ case '5':
+ TaggedQueuingPermittedDefault = 0xFFFF;
+ break;
+ case '4':
+ if (strcmp(HostAdapter->FirmwareVersion, "4.22") >= 0)
+ TaggedQueuingPermittedDefault = 0xFFFF;
+ break;
+ case '3':
+ if (strcmp(HostAdapter->FirmwareVersion, "3.35") >= 0)
+ TaggedQueuingPermittedDefault = 0xFFFF;
+ break;
+ }
+ /*
+ Tagged Queuing is only useful if Disconnect/Reconnect is permitted.
+ Therefore, mask the Tagged Queuing Permitted Default bits with the
+ Disconnect/Reconnect Permitted bits.
+ */
+ TaggedQueuingPermittedDefault &= HostAdapter->DisconnectPermitted;
+ /*
+ Combine the default Tagged Queuing Permitted Default bits with any
+ Command Line Entry Tagged Queuing specification.
+ */
+ if (HostAdapter->CommandLineEntry != NULL)
+ HostAdapter->TaggedQueuingPermitted =
+ (HostAdapter->CommandLineEntry->TaggedQueuingPermitted &
+ HostAdapter->CommandLineEntry->TaggedQueuingPermittedMask) |
+ (TaggedQueuingPermittedDefault &
+ ~HostAdapter->CommandLineEntry->TaggedQueuingPermittedMask);
+ else HostAdapter->TaggedQueuingPermitted = TaggedQueuingPermittedDefault;
+ /*
+ Announce the Host Adapter Configuration.
+ */
+ printk("scsi%d: Configuring BusLogic Model %s %s%s%s SCSI Host Adapter\n",
+ HostAdapter->HostNumber, HostAdapter->ModelName,
+ BusLogic_BusNames[HostAdapter->BusType],
+ (HostAdapter->HostWideSCSI ? " Wide" : ""),
+ (HostAdapter->HostDifferentialSCSI ? " Differential" : ""));
+ printk("scsi%d: Firmware Version: %s, I/O Address: 0x%X, "
+ "IRQ Channel: %d/%s\n",
+ HostAdapter->HostNumber, HostAdapter->FirmwareVersion,
+ HostAdapter->IO_Address, HostAdapter->IRQ_Channel,
+ (HostAdapter->LevelSensitiveInterrupts ? "Level" : "Edge"));
+ printk("scsi%d: DMA Channel: ", HostAdapter->HostNumber);
+ if (HostAdapter->DMA_Channel > 0)
+ printk("%d, ", HostAdapter->DMA_Channel);
+ else printk("None, ");
+ if (HostAdapter->BIOS_Address > 0)
+ printk("BIOS Address: 0x%lX, ", HostAdapter->BIOS_Address);
+ else printk("BIOS Address: None, ");
+ printk("Host Adapter SCSI ID: %d\n", HostAdapter->SCSI_ID);
+ printk("scsi%d: Scatter/Gather Limit: %d segments, "
+ "Synchronous Initiation: %s\n", HostAdapter->HostNumber,
+ HostAdapter->HostAdapterScatterGatherLimit,
+ (HostAdapter->SynchronousInitiation ? "Enabled" : "Disabled"));
+ printk("scsi%d: SCSI Parity Checking: %s, "
+ "Extended Disk Translation: %s\n", HostAdapter->HostNumber,
+ (HostAdapter->ParityChecking ? "Enabled" : "Disabled"),
+ (HostAdapter->ExtendedTranslation ? "Enabled" : "Disabled"));
+ AllTargetsMask = (1 << HostAdapter->MaxTargetIDs) - 1;
+ DisconnectPermitted = HostAdapter->DisconnectPermitted & AllTargetsMask;
+ printk("scsi%d: Disconnect/Reconnect: ", HostAdapter->HostNumber);
+ if (DisconnectPermitted == 0)
+ printk("Disabled");
+ else if (DisconnectPermitted == AllTargetsMask)
+ printk("Enabled");
+ else
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetIDs; TargetID++)
+ printk("%c", (DisconnectPermitted & (1 << TargetID)) ? 'Y' : 'N');
+ printk(", Tagged Queuing: ");
+ TaggedQueuingPermitted =
+ HostAdapter->TaggedQueuingPermitted & AllTargetsMask;
+ if (TaggedQueuingPermitted == 0)
+ printk("Disabled");
+ else if (TaggedQueuingPermitted == AllTargetsMask)
+ printk("Enabled");
+ else
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetIDs; TargetID++)
+ printk("%c", (TaggedQueuingPermitted & (1 << TargetID)) ? 'Y' : 'N');
+ printk("\n");
+ CommonErrorRecovery = true;
+ for (TargetID = 1; TargetID < HostAdapter->MaxTargetIDs; TargetID++)
+ if (HostAdapter->ErrorRecoveryOption[TargetID] !=
+ HostAdapter->ErrorRecoveryOption[0])
+ {
+ CommonErrorRecovery = false;
+ break;
+ }
+ printk("scsi%d: Error Recovery: ", HostAdapter->HostNumber);
+ if (CommonErrorRecovery)
+ printk("%s", BusLogic_ErrorRecoveryOptions[
+ HostAdapter->ErrorRecoveryOption[0]]);
+ else
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetIDs; TargetID++)
+ printk("%s", BusLogic_ErrorRecoveryOptions2[
+ HostAdapter->ErrorRecoveryOption[TargetID]]);
+ printk(", Mailboxes: %d, Initial CCBs: %d\n",
+ BusLogic_MailboxCount, BusLogic_InitialCCBs);
+ printk("scsi%d: Driver Scatter/Gather Limit: %d segments, "
+ "Concurrency: %d\n", HostAdapter->HostNumber,
+ HostAdapter->DriverScatterGatherLimit, HostAdapter->Concurrency);
+ /*
+ Indicate reading the Host Adapter Configuration completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_AcquireResources acquires the system resources necessary to use Host
+ Adapter, and initializes the fields in the SCSI Host structure. The base,
+ io_port, n_io_ports, irq, and dma_channel fields in the SCSI Host structure
+ are intentionally left uninitialized, as this driver handles acquisition and
+ release of these resources explicitly, as well as ensuring exclusive access
+ to the Host Adapter hardware and data structures through explicit locking.
+*/
+
+static boolean BusLogic_AcquireResources(BusLogic_HostAdapter_T *HostAdapter,
+ SCSI_Host_T *Host)
+{
+ /*
+ Acquire exclusive or shared access to the IRQ Channel. A usage count is
+ maintained so that PCI, EISA, or MCA shared Interrupts can be supported.
+ */
+ if (BusLogic_IRQ_UsageCount[HostAdapter->IRQ_Channel - 9]++ == 0)
+ {
+ if (request_irq(HostAdapter->IRQ_Channel, BusLogic_InterruptHandler,
+ SA_INTERRUPT, HostAdapter->InterruptLabel) < 0)
+ {
+ BusLogic_IRQ_UsageCount[HostAdapter->IRQ_Channel - 9]--;
+ printk("scsi%d: UNABLE TO ACQUIRE IRQ CHANNEL %d - DETACHING\n",
+ HostAdapter->HostNumber, HostAdapter->IRQ_Channel);
+ return false;
+ }
+ }
+ else
+ {
+ BusLogic_HostAdapter_T *FirstHostAdapter =
+ BusLogic_RegisteredHostAdapters;
+ while (FirstHostAdapter != NULL)
+ {
+ if (FirstHostAdapter->IRQ_Channel == HostAdapter->IRQ_Channel)
+ {
+ if (strlen(FirstHostAdapter->InterruptLabel) + 11
+ < sizeof(FirstHostAdapter->InterruptLabel))
+ {
+ strcat(FirstHostAdapter->InterruptLabel, " + ");
+ strcat(FirstHostAdapter->InterruptLabel,
+ HostAdapter->ModelName);
+ }
+ break;
+ }
+ FirstHostAdapter = FirstHostAdapter->Next;
+ }
+ }
+ HostAdapter->IRQ_ChannelAcquired = true;
+ /*
+ Acquire exclusive access to the DMA Channel.
+ */
+ if (HostAdapter->DMA_Channel > 0)
+ {
+ if (request_dma(HostAdapter->DMA_Channel, HostAdapter->BoardName) < 0)
+ {
+ printk("scsi%d: UNABLE TO ACQUIRE DMA CHANNEL %d - DETACHING\n",
+ HostAdapter->HostNumber, HostAdapter->DMA_Channel);
+ return false;
+ }
+ set_dma_mode(HostAdapter->DMA_Channel, DMA_MODE_CASCADE);
+ enable_dma(HostAdapter->DMA_Channel);
+ HostAdapter->DMA_ChannelAcquired = true;
+ }
+ /*
+ Initialize necessary fields in the SCSI Host structure.
+ */
+ Host->max_id = HostAdapter->MaxTargetIDs;
+ Host->max_lun = HostAdapter->MaxLogicalUnits;
+ Host->max_channel = 0;
+ Host->this_id = HostAdapter->SCSI_ID;
+ Host->can_queue = BusLogic_MailboxCount;
+ Host->cmd_per_lun = HostAdapter->Concurrency;
+ Host->sg_tablesize = HostAdapter->DriverScatterGatherLimit;
+ Host->unchecked_isa_dma = HostAdapter->BounceBuffersRequired;
+ /*
+ Indicate the System Resource Acquisition completed successfully,
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_ReleaseResources releases any system resources previously acquired
+ by BusLogic_AcquireResources.
+*/
+
+static void BusLogic_ReleaseResources(BusLogic_HostAdapter_T *HostAdapter)
+{
+ /*
+ Release exclusive or shared access to the IRQ Channel.
+ */
+ if (HostAdapter->IRQ_ChannelAcquired)
+ if (--BusLogic_IRQ_UsageCount[HostAdapter->IRQ_Channel - 9] == 0)
+ free_irq(HostAdapter->IRQ_Channel);
+ /*
+ Release exclusive access to the DMA Channel.
+ */
+ if (HostAdapter->DMA_ChannelAcquired)
+ free_dma(HostAdapter->DMA_Channel);
+}
+
+
+/*
+ BusLogic_TestInterrupts tests for proper functioning of the Host Adapter
+ Interrupt Register and that interrupts generated by the Host Adapter are
+ getting through to the Interrupt Handler. A large proportion of initial
+ problems with installing PCI Host Adapters are due to configuration problems
+ where either the Host Adapter or Motherboard is configured incorrectly, and
+ interrupts do not get through as a result.
+*/
+
+static boolean BusLogic_TestInterrupts(BusLogic_HostAdapter_T *HostAdapter)
+{
+ unsigned int InitialInterruptCount, FinalInterruptCount;
+ int TestCount = 5, i;
+ InitialInterruptCount = kstat.interrupts[HostAdapter->IRQ_Channel];
+ /*
+ Issue the Test Command Complete Interrupt commands.
+ */
+ for (i = 0; i < TestCount; i++)
+ BusLogic_Command(HostAdapter, BusLogic_TestCommandCompleteInterrupt,
+ NULL, 0, NULL, 0);
+ /*
+ Verify that BusLogic_InterruptHandler was called at least TestCount times.
+ Shared IRQ Channels could cause more than TestCount interrupts to occur,
+ but there should never be fewer than TestCount.
+ */
+ FinalInterruptCount = kstat.interrupts[HostAdapter->IRQ_Channel];
+ if (FinalInterruptCount < InitialInterruptCount + TestCount)
+ {
+ BusLogic_Failure(HostAdapter, "HOST ADAPTER INTERRUPT TEST");
+ printk("\n\
+Interrupts are not getting through from the Host Adapter to the BusLogic\n\
+Driver Interrupt Handler. The most likely cause is that either the Host\n\
+Adapter or Motherboard is configured incorrectly. Please check the Host\n\
+Adapter configuration with AutoSCSI or by examining any dip switch and\n\
+jumper settings on the Host Adapter, and verify that no other device is\n\
+attempting to use the same IRQ Channel. For PCI Host Adapters, it may also\n\
+be necessary to investigate and manually set the PCI interrupt assignments\n\
+and edge/level interrupt type selection in the BIOS Setup Program or with\n\
+Motherboard jumpers.\n\n");
+ return false;
+ }
+ /*
+ Indicate the Host Adapter Interrupt Test completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_InitializeHostAdapter initializes Host Adapter. This is the only
+ function called during SCSI Host Adapter detection which modifies the state
+ of the Host Adapter from its initial power on or hard reset state.
+*/
+
+static boolean BusLogic_InitializeHostAdapter(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ BusLogic_ExtendedMailboxRequest_T ExtendedMailboxRequest;
+ BusLogic_RoundRobinModeRequest_T RoundRobinModeRequest;
+ BusLogic_WideModeCCBRequest_T WideModeCCBRequest;
+ BusLogic_ModifyIOAddressRequest_T ModifyIOAddressRequest;
+ /*
+ Initialize the Command Successful Flag, Read/Write Operation Count,
+ and Queued Operation Count for each Target.
+ */
+ memset(HostAdapter->CommandSuccessfulFlag, false,
+ sizeof(HostAdapter->CommandSuccessfulFlag));
+ memset(HostAdapter->ReadWriteOperationCount, 0,
+ sizeof(HostAdapter->ReadWriteOperationCount));
+ memset(HostAdapter->QueuedOperationCount, 0,
+ sizeof(HostAdapter->QueuedOperationCount));
+ /*
+ Initialize the Outgoing and Incoming Mailbox structures.
+ */
+ memset(HostAdapter->OutgoingMailboxes, 0,
+ sizeof(HostAdapter->OutgoingMailboxes));
+ memset(HostAdapter->IncomingMailboxes, 0,
+ sizeof(HostAdapter->IncomingMailboxes));
+ /*
+ Initialize the pointers to the First, Last, and Next Mailboxes.
+ */
+ HostAdapter->FirstOutgoingMailbox = &HostAdapter->OutgoingMailboxes[0];
+ HostAdapter->LastOutgoingMailbox =
+ &HostAdapter->OutgoingMailboxes[BusLogic_MailboxCount-1];
+ HostAdapter->NextOutgoingMailbox = HostAdapter->FirstOutgoingMailbox;
+ HostAdapter->FirstIncomingMailbox = &HostAdapter->IncomingMailboxes[0];
+ HostAdapter->LastIncomingMailbox =
+ &HostAdapter->IncomingMailboxes[BusLogic_MailboxCount-1];
+ HostAdapter->NextIncomingMailbox = HostAdapter->FirstIncomingMailbox;
+ /*
+ Initialize the Host Adapter's Pointer to the Outgoing/Incoming Mailboxes.
+ */
+ ExtendedMailboxRequest.MailboxCount = BusLogic_MailboxCount;
+ ExtendedMailboxRequest.BaseMailboxAddress = HostAdapter->OutgoingMailboxes;
+ if (BusLogic_Command(HostAdapter, BusLogic_InitializeExtendedMailbox,
+ &ExtendedMailboxRequest,
+ sizeof(ExtendedMailboxRequest), NULL, 0) < 0)
+ return BusLogic_Failure(HostAdapter, "MAILBOX INITIALIZATION");
+ /*
+ Enable Strict Round Robin Mode if supported by the Host Adapter. In Strict
+ Round Robin Mode, the Host Adapter only looks at the next Outgoing Mailbox
+ for each new command, rather than scanning through all the Outgoing
+ Mailboxes to find any that have new commands in them. BusLogic indicates
+ that Strict Round Robin Mode is significantly more efficient.
+ */
+ if (strcmp(HostAdapter->FirmwareVersion, "3.31") >= 0)
+ {
+ RoundRobinModeRequest = BusLogic_StrictRoundRobinMode;
+ if (BusLogic_Command(HostAdapter, BusLogic_EnableStrictRoundRobinMode,
+ &RoundRobinModeRequest,
+ sizeof(RoundRobinModeRequest), NULL, 0) < 0)
+ return BusLogic_Failure(HostAdapter, "ENABLE STRICT ROUND ROBIN MODE");
+ }
+ /*
+ For Wide SCSI Host Adapters, issue the Enable Wide Mode CCB command to
+ allow more than 8 Logical Units per Target to be supported.
+ */
+ if (HostAdapter->HostWideSCSI)
+ {
+ WideModeCCBRequest = BusLogic_WideModeCCB;
+ if (BusLogic_Command(HostAdapter, BusLogic_EnableWideModeCCB,
+ &WideModeCCBRequest,
+ sizeof(WideModeCCBRequest), NULL, 0) < 0)
+ return BusLogic_Failure(HostAdapter, "ENABLE WIDE MODE CCB");
+ }
+ /*
+ For PCI Host Adapters being accessed through the PCI compliant I/O
+ Address, disable the ISA compatible I/O Address to avoid detecting the
+ same Host Adapter at both I/O Addresses.
+ */
+ if (HostAdapter->BusType == BusLogic_PCI_Bus)
+ {
+ int Index;
+ for (Index = 0; BusLogic_IO_StandardAddresses[Index] > 0; Index++)
+ if (HostAdapter->IO_Address == BusLogic_IO_StandardAddresses[Index])
+ break;
+ if (BusLogic_IO_StandardAddresses[Index] == 0)
+ {
+ ModifyIOAddressRequest = BusLogic_ModifyIO_Disable;
+ if (BusLogic_Command(HostAdapter, BusLogic_ModifyIOAddress,
+ &ModifyIOAddressRequest,
+ sizeof(ModifyIOAddressRequest), NULL, 0) < 0)
+ return BusLogic_Failure(HostAdapter, "MODIFY I/O ADDRESS");
+ }
+ }
+ /*
+ Announce Successful Initialization.
+ */
+ printk("scsi%d: *** %s Initialized Successfully ***\n",
+ HostAdapter->HostNumber, HostAdapter->BoardName);
+ /*
+ Indicate the Host Adapter Initialization completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_InquireTargetDevices inquires about the Target Devices accessible
+ through Host Adapter and reports on the results.
+*/
+
+static boolean BusLogic_InquireTargetDevices(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ BusLogic_InstalledDevices8_T InstalledDevicesID0to7;
+ BusLogic_InstalledDevices8_T InstalledDevicesID8to15;
+ BusLogic_SetupInformation_T SetupInformation;
+ BusLogic_SynchronousPeriod_T SynchronousPeriod;
+ BusLogic_RequestedReplyLength_T RequestedReplyLength;
+ int TargetDevicesFound = 0, TargetID;
+ /*
+ Wait a few seconds between the Host Adapter Hard Reset which initiates
+ a SCSI Bus Reset and issuing any SCSI commands. Some SCSI devices get
+ confused if they receive SCSI commands too soon after a SCSI Bus Reset.
+ */
+ BusLogic_Delay(HostAdapter->BusSettleTime);
+ /*
+ Inhibit the Target Devices Inquiry if requested.
+ */
+ if (HostAdapter->LocalOptions & BusLogic_InhibitTargetInquiry)
+ {
+ printk("scsi%d: Target Device Inquiry Inhibited\n",
+ HostAdapter->HostNumber);
+ return true;
+ }
+ /*
+ Issue the Inquire Installed Devices ID 0 to 7 command, and for Wide SCSI
+ Host Adapters the Inquire Installed Devices ID 8 to 15 command. This is
+ necessary to force Synchronous Transfer Negotiation so that the Inquire
+ Setup Information and Inquire Synchronous Period commands will return
+ valid data.
+ */
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireInstalledDevicesID0to7,
+ NULL, 0, &InstalledDevicesID0to7,
+ sizeof(InstalledDevicesID0to7))
+ != sizeof(InstalledDevicesID0to7))
+ return BusLogic_Failure(HostAdapter, "INQUIRE INSTALLED DEVICES ID 0 TO 7");
+ if (HostAdapter->HostWideSCSI)
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireInstalledDevicesID8to15,
+ NULL, 0, &InstalledDevicesID8to15,
+ sizeof(InstalledDevicesID8to15))
+ != sizeof(InstalledDevicesID8to15))
+ return BusLogic_Failure(HostAdapter,
+ "INQUIRE INSTALLED DEVICES ID 8 TO 15");
+ /*
+ Issue the Inquire Setup Information command.
+ */
+ RequestedReplyLength = sizeof(SetupInformation);
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireSetupInformation,
+ &RequestedReplyLength, sizeof(RequestedReplyLength),
+ &SetupInformation, sizeof(SetupInformation))
+ != sizeof(SetupInformation))
+ return BusLogic_Failure(HostAdapter, "INQUIRE SETUP INFORMATION");
+ /*
+ Issue the Inquire Synchronous Period command.
+ */
+ if (HostAdapter->FirmwareVersion[0] >= '3')
+ {
+ RequestedReplyLength = sizeof(SynchronousPeriod);
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireSynchronousPeriod,
+ &RequestedReplyLength, sizeof(RequestedReplyLength),
+ &SynchronousPeriod, sizeof(SynchronousPeriod))
+ != sizeof(SynchronousPeriod))
+ return BusLogic_Failure(HostAdapter, "INQUIRE SYNCHRONOUS PERIOD");
+ }
+ else
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetIDs; TargetID++)
+ if (SetupInformation.SynchronousValuesID0to7[TargetID].Offset > 0)
+ SynchronousPeriod[TargetID] =
+ 20 + 5 * SetupInformation.SynchronousValuesID0to7[TargetID]
+ .TransferPeriod;
+ else SynchronousPeriod[TargetID] = 0;
+ /*
+ Save the Installed Devices, Synchronous Values, and Synchronous Period
+ information in the Host Adapter structure.
+ */
+ memcpy(HostAdapter->InstalledDevices, InstalledDevicesID0to7,
+ sizeof(BusLogic_InstalledDevices8_T));
+ memcpy(HostAdapter->SynchronousValues,
+ SetupInformation.SynchronousValuesID0to7,
+ sizeof(BusLogic_SynchronousValues8_T));
+ if (HostAdapter->HostWideSCSI)
+ {
+ memcpy(&HostAdapter->InstalledDevices[8], InstalledDevicesID8to15,
+ sizeof(BusLogic_InstalledDevices8_T));
+ memcpy(&HostAdapter->SynchronousValues[8],
+ SetupInformation.SynchronousValuesID8to15,
+ sizeof(BusLogic_SynchronousValues8_T));
+ }
+ memcpy(HostAdapter->SynchronousPeriod, SynchronousPeriod,
+ sizeof(BusLogic_SynchronousPeriod_T));
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetIDs; TargetID++)
+ if (HostAdapter->InstalledDevices[TargetID] != 0)
+ {
+ int SynchronousPeriod = HostAdapter->SynchronousPeriod[TargetID];
+ if (SynchronousPeriod > 10)
+ {
+ int SynchronousTransferRate = 100000000 / SynchronousPeriod;
+ int RoundedSynchronousTransferRate =
+ (SynchronousTransferRate + 5000) / 10000;
+ printk("scsi%d: Target %d: Synchronous at "
+ "%d.%02d mega-transfers/second, offset %d\n",
+ HostAdapter->HostNumber, TargetID,
+ RoundedSynchronousTransferRate / 100,
+ RoundedSynchronousTransferRate % 100,
+ HostAdapter->SynchronousValues[TargetID].Offset);
+ }
+ else if (SynchronousPeriod > 0)
+ {
+ int SynchronousTransferRate = 100000000 / SynchronousPeriod;
+ int RoundedSynchronousTransferRate =
+ (SynchronousTransferRate + 50000) / 100000;
+ printk("scsi%d: Target %d: Synchronous at "
+ "%d.%01d mega-transfers/second, offset %d\n",
+ HostAdapter->HostNumber, TargetID,
+ RoundedSynchronousTransferRate / 10,
+ RoundedSynchronousTransferRate % 10,
+ HostAdapter->SynchronousValues[TargetID].Offset);
+ }
+ else printk("scsi%d: Target %d: Asynchronous\n",
+ HostAdapter->HostNumber, TargetID);
+ TargetDevicesFound++;
+ }
+ if (TargetDevicesFound == 0)
+ printk("scsi%d: No Target Devices Found\n", HostAdapter->HostNumber);
+ /*
+ Indicate the Target Device Inquiry completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_DetectHostAdapter probes for BusLogic Host Adapters at the standard
+ I/O Addresses where they may be located, initializing, registering, and
+ reporting the configuration of each BusLogic Host Adapter it finds. It
+ returns the number of BusLogic Host Adapters successfully initialized and
+ registered.
+*/
+
+int BusLogic_DetectHostAdapter(SCSI_Host_Template_T *HostTemplate)
+{
+ int BusLogicHostAdapterCount = 0, CommandLineEntryIndex = 0;
+ int AddressProbeIndex = 0;
+ BusLogic_InitializeAddressProbeList();
+ while (BusLogic_IO_AddressProbeList[AddressProbeIndex] > 0)
+ {
+ BusLogic_HostAdapter_T HostAdapterPrototype;
+ BusLogic_HostAdapter_T *HostAdapter = &HostAdapterPrototype;
+ SCSI_Host_T *Host;
+ memset(HostAdapter, 0, sizeof(BusLogic_HostAdapter_T));
+ HostAdapter->IO_Address =
+ BusLogic_IO_AddressProbeList[AddressProbeIndex++];
+ /*
+ Initialize the Command Line Entry field if an explicit I/O Address
+ was specified.
+ */
+ if (CommandLineEntryIndex < BusLogic_CommandLineEntryCount &&
+ BusLogic_CommandLineEntries[CommandLineEntryIndex].IO_Address ==
+ HostAdapter->IO_Address)
+ HostAdapter->CommandLineEntry =
+ &BusLogic_CommandLineEntries[CommandLineEntryIndex++];
+ /*
+ Check whether the I/O Address range is already in use.
+ */
+ if (check_region(HostAdapter->IO_Address, BusLogic_IO_PortCount) < 0)
+ continue;
+ /*
+ Probe the Host Adapter. If unsuccessful, abort further initialization.
+ */
+ if (!BusLogic_ProbeHostAdapter(HostAdapter)) continue;
+ /*
+ Hard Reset the Host Adapter. If unsuccessful, abort further
+ initialization.
+ */
+ if (!BusLogic_HardResetHostAdapter(HostAdapter)) continue;
+ /*
+ Check the Host Adapter. If unsuccessful, abort further initialization.
+ */
+ if (!BusLogic_CheckHostAdapter(HostAdapter)) continue;
+ /*
+ Initialize the Command Line Entry field if an explicit I/O Address
+ was not specified.
+ */
+ if (CommandLineEntryIndex < BusLogic_CommandLineEntryCount &&
+ BusLogic_CommandLineEntries[CommandLineEntryIndex].IO_Address == 0)
+ HostAdapter->CommandLineEntry =
+ &BusLogic_CommandLineEntries[CommandLineEntryIndex++];
+ /*
+ Announce the Driver Version and Date, Author's Name, Copyright Notice,
+ and Contact Address.
+ */
+ BusLogic_AnnounceDriver();
+ /*
+ Register usage of the I/O Address range. From this point onward, any
+ failure will be assumed to be due to a problem with the Host Adapter,
+ rather than due to having mistakenly identified this port as belonging
+ to a BusLogic Host Adapter. The I/O Address range will not be
+ released, thereby preventing it from being incorrectly identified as
+ any other type of Host Adapter.
+ */
+ request_region(HostAdapter->IO_Address, BusLogic_IO_PortCount,
+ "BusLogic");
+ /*
+ Register the SCSI Host structure.
+ */
+ HostTemplate->proc_dir = &BusLogic_ProcDirectoryEntry;
+ Host = scsi_register(HostTemplate, sizeof(BusLogic_HostAdapter_T));
+ HostAdapter = (BusLogic_HostAdapter_T *) Host->hostdata;
+ memcpy(HostAdapter, &HostAdapterPrototype,
+ sizeof(BusLogic_HostAdapter_T));
+ HostAdapter->SCSI_Host = Host;
+ HostAdapter->HostNumber = Host->host_no;
+ /*
+ Add Host Adapter to the end of the list of registered BusLogic
+ Host Adapters. In order for Command Complete Interrupts to be
+ properly dismissed by BusLogic_InterruptHandler, the Host Adapter
+ must be registered. This must be done before the IRQ Channel is
+ acquired, and in a shared IRQ Channel environment, must be done
+ before any Command Complete Interrupts occur, since the IRQ Channel
+ may have already been acquired by a previous BusLogic Host Adapter.
+ */
+ BusLogic_RegisterHostAdapter(HostAdapter);
+ /*
+ Read the Host Adapter Configuration, Acquire the System Resources
+ necessary to use Host Adapter and initialize the fields in the SCSI
+ Host structure, then Test Interrupts, Create the CCBs, Initialize
+ the Host Adapter, and finally Inquire about the Target Devices.
+ */
+ if (BusLogic_ReadHostAdapterConfiguration(HostAdapter) &&
+ BusLogic_AcquireResources(HostAdapter, Host) &&
+ BusLogic_TestInterrupts(HostAdapter) &&
+ BusLogic_CreateCCBs(HostAdapter) &&
+ BusLogic_InitializeHostAdapter(HostAdapter) &&
+ BusLogic_InquireTargetDevices(HostAdapter))
+ {
+ /*
+ Initialization has been completed successfully. Release and
+ re-register usage of the I/O Address range so that the Model
+ Name of the Host Adapter will appear.
+ */
+ release_region(HostAdapter->IO_Address, BusLogic_IO_PortCount);
+ request_region(HostAdapter->IO_Address, BusLogic_IO_PortCount,
+ HostAdapter->BoardName);
+ BusLogicHostAdapterCount++;
+ }
+ else
+ {
+ /*
+ An error occurred during Host Adapter Configuration Querying,
+ Resource Acquisition, Interrupt Testing, CCB Creation, Host
+ Adapter Initialization, or Target Device Inquiry, so remove
+ Host Adapter from the list of registered BusLogic Host Adapters,
+ destroy the CCBs, Release the System Resources, and Unregister
+ the SCSI Host.
+ */
+ BusLogic_DestroyCCBs(HostAdapter);
+ BusLogic_ReleaseResources(HostAdapter);
+ BusLogic_UnregisterHostAdapter(HostAdapter);
+ scsi_unregister(Host);
+ }
+ }
+ return BusLogicHostAdapterCount;
+}
+
+
+/*
+ BusLogic_ReleaseHostAdapter releases all resources previously acquired to
+ support a specific Host Adapter, including the I/O Address range, and
+ unregisters the BusLogic Host Adapter.
+*/
+
+int BusLogic_ReleaseHostAdapter(SCSI_Host_T *Host)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Host->hostdata;
+ /*
+ Destroy the CCBs and release any system resources acquired to use
+ Host Adapter.
+ */
+ BusLogic_DestroyCCBs(HostAdapter);
+ BusLogic_ReleaseResources(HostAdapter);
+ /*
+ Release usage of the I/O Address range.
+ */
+ release_region(HostAdapter->IO_Address, BusLogic_IO_PortCount);
+ /*
+ Remove Host Adapter from the list of registered BusLogic Host Adapters.
+ */
+ BusLogic_UnregisterHostAdapter(HostAdapter);
+ return 0;
+}
+
+
+/*
+ BusLogic_ComputeResultCode computes a SCSI Subsystem Result Code from
+ the Host Adapter Status and Target Device Status.
+*/
+
+static int BusLogic_ComputeResultCode(BusLogic_HostAdapterStatus_T
+ HostAdapterStatus,
+ BusLogic_TargetDeviceStatus_T
+ TargetDeviceStatus)
+{
+ int HostStatus;
+ switch (HostAdapterStatus)
+ {
+ case BusLogic_CommandCompletedNormally:
+ case BusLogic_LinkedCommandCompleted:
+ case BusLogic_LinkedCommandCompletedWithFlag:
+ HostStatus = DID_OK;
+ break;
+ case BusLogic_SCSISelectionTimeout:
+ HostStatus = DID_TIME_OUT;
+ break;
+ case BusLogic_InvalidOutgoingMailboxActionCode:
+ case BusLogic_InvalidCommandOperationCode:
+ case BusLogic_InvalidCommandParameter:
+ printk("BusLogic: BusLogic Driver Protocol Error 0x%02X\n",
+ HostAdapterStatus);
+ case BusLogic_DataOverUnderRun:
+ case BusLogic_UnexpectedBusFree:
+ case BusLogic_LinkedCCBhasInvalidLUN:
+ case BusLogic_AutoRequestSenseFailed:
+ case BusLogic_TaggedQueuingMessageRejected:
+ case BusLogic_UnsupportedMessageReceived:
+ case BusLogic_HostAdapterHardwareFailed:
+ case BusLogic_TargetDeviceReconnectedImproperly:
+ case BusLogic_AbortQueueGenerated:
+ case BusLogic_HostAdapterSoftwareError:
+ case BusLogic_HostAdapterHardwareTimeoutError:
+ case BusLogic_SCSIParityErrorDetected:
+ HostStatus = DID_ERROR;
+ break;
+ case BusLogic_InvalidBusPhaseRequested:
+ case BusLogic_TargetFailedResponseToATN:
+ case BusLogic_HostAdapterAssertedRST:
+ case BusLogic_OtherDeviceAssertedRST:
+ case BusLogic_HostAdapterAssertedBusDeviceReset:
+ HostStatus = DID_RESET;
+ break;
+ default:
+ printk("BusLogic: unknown Host Adapter Status 0x%02X\n",
+ HostAdapterStatus);
+ HostStatus = DID_ERROR;
+ break;
+ }
+ return (HostStatus << 16) | TargetDeviceStatus;
+}
+
+
+/*
+ BusLogic_InterruptHandler handles hardware interrupts from BusLogic Host
+ Adapters. To simplify handling shared IRQ Channels, all installed BusLogic
+ Host Adapters are scanned whenever any one of them signals a hardware
+ interrupt.
+*/
+
+static void BusLogic_InterruptHandler(int IRQ_Channel,
+ Registers_T *InterruptRegisters)
+{
+ BusLogic_CCB_T *FirstCompletedCCB = NULL, *LastCompletedCCB = NULL;
+ BusLogic_HostAdapter_T *HostAdapter;
+ int HostAdapterResetPendingCount = 0;
+ /*
+ Iterate over the installed BusLogic Host Adapters accepting any Incoming
+ Mailbox entries and saving the completed CCBs for processing. This
+ interrupt handler is installed with SA_INTERRUPT, so interrupts are
+ disabled when the interrupt handler is entered.
+ */
+ for (HostAdapter = BusLogic_RegisteredHostAdapters;
+ HostAdapter != NULL;
+ HostAdapter = HostAdapter->Next)
+ {
+ unsigned char InterruptRegister;
+ /*
+ Acquire exclusive access to Host Adapter.
+ */
+ BusLogic_LockHostAdapterID(HostAdapter);
+ /*
+ Read the Host Adapter Interrupt Register.
+ */
+ InterruptRegister = BusLogic_ReadInterruptRegister(HostAdapter);
+ if (InterruptRegister & BusLogic_InterruptValid)
+ {
+ /*
+ Acknowledge the interrupt and reset the Host Adapter
+ Interrupt Register.
+ */
+ BusLogic_WriteControlRegister(HostAdapter, BusLogic_InterruptReset);
+ /*
+ Process valid SCSI Reset State and Incoming Mailbox Loaded
+ interrupts. Command Complete interrupts are noted, and
+ Outgoing Mailbox Available interrupts are ignored, as they
+ are never enabled.
+ */
+ if (InterruptRegister & BusLogic_SCSIResetState)
+ {
+ HostAdapter->HostAdapterResetPending = true;
+ HostAdapterResetPendingCount++;
+ }
+ else if (InterruptRegister & BusLogic_IncomingMailboxLoaded)
+ {
+ /*
+ Scan through the Incoming Mailboxes in Strict Round Robin
+ fashion, saving any completed CCBs for further processing.
+ It is essential that for each CCB and SCSI Command issued,
+ command completion processing is performed exactly once.
+ Therefore, only Incoming Mailboxes with completion code
+ Command Completed Without Error, Command Completed With
+ Error, or Command Aborted At Host Request are saved for
+ completion processing. When an Incoming Mailbox has a
+ completion code of Aborted Command Not Found, the CCB had
+ already completed or been aborted before the current Abort
+ request was processed, and so completion processing has
+ already occurred and no further action should be taken.
+ */
+ BusLogic_IncomingMailbox_T *NextIncomingMailbox =
+ HostAdapter->NextIncomingMailbox;
+ BusLogic_CompletionCode_T MailboxCompletionCode;
+ while ((MailboxCompletionCode =
+ NextIncomingMailbox->CompletionCode) !=
+ BusLogic_IncomingMailboxFree)
+ {
+ BusLogic_CCB_T *CCB = NextIncomingMailbox->CCB;
+ if (MailboxCompletionCode != BusLogic_AbortedCommandNotFound)
+ if (CCB->Status == BusLogic_CCB_Active)
+ {
+ /*
+ Mark this CCB as completed and add it to the end
+ of the list of completed CCBs.
+ */
+ CCB->Status = BusLogic_CCB_Completed;
+ CCB->MailboxCompletionCode = MailboxCompletionCode;
+ CCB->Next = NULL;
+ if (FirstCompletedCCB == NULL)
+ {
+ FirstCompletedCCB = CCB;
+ LastCompletedCCB = CCB;
+ }
+ else
+ {
+ LastCompletedCCB->Next = CCB;
+ LastCompletedCCB = CCB;
+ }
+ HostAdapter->QueuedOperationCount[CCB->TargetID]--;
+ }
+ else
+ {
+ /*
+ If a CCB ever appears in an Incoming Mailbox and
+ is not marked as status Active, then there is
+ most likely a bug in the Host Adapter firmware.
+ */
+ printk("scsi%d: Illegal CCB #%d status %d in "
+ "Incoming Mailbox\n", HostAdapter->HostNumber,
+ CCB->SerialNumber, CCB->Status);
+ }
+ else printk("scsi%d: Aborted CCB #%d to Target %d "
+ "Not Found\n", HostAdapter->HostNumber,
+ CCB->SerialNumber, CCB->TargetID);
+ NextIncomingMailbox->CompletionCode =
+ BusLogic_IncomingMailboxFree;
+ if (++NextIncomingMailbox > HostAdapter->LastIncomingMailbox)
+ NextIncomingMailbox = HostAdapter->FirstIncomingMailbox;
+ }
+ HostAdapter->NextIncomingMailbox = NextIncomingMailbox;
+ }
+ else if (InterruptRegister & BusLogic_CommandComplete)
+ HostAdapter->HostAdapterCommandCompleted = true;
+ }
+ /*
+ Release exclusive access to Host Adapter.
+ */
+ BusLogic_UnlockHostAdapterID(HostAdapter);
+ }
+ /*
+ Enable interrupts while the completed CCBs are processed.
+ */
+ sti();
+ /*
+ Iterate over the Host Adapters performing any pending Host Adapter Resets.
+ */
+ if (HostAdapterResetPendingCount > 0)
+ for (HostAdapter = BusLogic_RegisteredHostAdapters;
+ HostAdapter != NULL;
+ HostAdapter = HostAdapter->Next)
+ if (HostAdapter->HostAdapterResetPending)
+ {
+ BusLogic_ResetHostAdapter(HostAdapter, NULL);
+ HostAdapter->HostAdapterResetPending = false;
+ scsi_mark_host_bus_reset(HostAdapter->SCSI_Host);
+ }
+ /*
+ Iterate over the completed CCBs setting the SCSI Command Result Codes,
+ deallocating the CCBs, and calling the Completion Routines.
+ */
+ while (FirstCompletedCCB != NULL)
+ {
+ BusLogic_CCB_T *CCB = FirstCompletedCCB;
+ SCSI_Command_T *Command = CCB->Command;
+ FirstCompletedCCB = FirstCompletedCCB->Next;
+ HostAdapter = CCB->HostAdapter;
+ /*
+ Bus Device Reset CCBs have the Command field non-NULL only when a Bus
+ Device Reset was requested for a command that was not currently active
+ in the Host Adapter, and hence would not have its Completion Routine
+ called otherwise.
+ */
+ if (CCB->Opcode == BusLogic_SCSIBusDeviceReset)
+ {
+ printk("scsi%d: Bus Device Reset CCB #%d to Target %d Completed\n",
+ HostAdapter->HostNumber, CCB->SerialNumber, CCB->TargetID);
+ if (Command != NULL) Command->result = DID_RESET << 16;
+ }
+ else
+ /*
+ Translate the Mailbox Completion Code, Host Adapter Status, and
+ Target Device Status into a SCSI Subsystem Result Code.
+ */
+ switch (CCB->MailboxCompletionCode)
+ {
+ case BusLogic_IncomingMailboxFree:
+ case BusLogic_AbortedCommandNotFound:
+ printk("scsi%d: CCB #%d to Target %d Impossible State\n",
+ HostAdapter->HostNumber, CCB->SerialNumber, CCB->TargetID);
+ break;
+ case BusLogic_CommandCompletedWithoutError:
+ HostAdapter->CommandSuccessfulFlag[CCB->TargetID] = true;
+ Command->result = DID_OK << 16;
+ break;
+ case BusLogic_CommandAbortedAtHostRequest:
+ printk("scsi%d: CCB #%d to Target %d Aborted\n",
+ HostAdapter->HostNumber, CCB->SerialNumber, CCB->TargetID);
+ Command->result = DID_ABORT << 16;
+ break;
+ case BusLogic_CommandCompletedWithError:
+ Command->result =
+ BusLogic_ComputeResultCode(CCB->HostAdapterStatus,
+ CCB->TargetDeviceStatus);
+ if (BusLogic_GlobalOptions & BusLogic_TraceErrors)
+ if (CCB->HostAdapterStatus != BusLogic_SCSISelectionTimeout)
+ {
+ int i;
+ printk("scsi%d: CCB #%d Target %d: Result %X "
+ "Host Adapter Status %02X Target Status %02X\n",
+ HostAdapter->HostNumber, CCB->SerialNumber,
+ CCB->TargetID, Command->result,
+ CCB->HostAdapterStatus, CCB->TargetDeviceStatus);
+ printk("scsi%d: CDB ", HostAdapter->HostNumber);
+ for (i = 0; i < CCB->CDB_Length; i++)
+ printk(" %02X", CCB->CDB[i]);
+ printk("\n");
+ printk("scsi%d: Sense ", HostAdapter->HostNumber);
+ for (i = 0; i < CCB->SenseDataLength; i++)
+ printk(" %02X", (*CCB->SenseDataPointer)[i]);
+ printk("\n");
+ }
+ break;
+ }
+ /*
+ Place CCB back on the Host Adapter's free list.
+ */
+ BusLogic_DeallocateCCB(CCB);
+ /*
+ Call the SCSI Command Completion Routine if appropriate.
+ */
+ if (Command != NULL) Command->scsi_done(Command);
+ }
+}
+
+
+/*
+ BusLogic_WriteOutgoingMailbox places CCB and Action Code into an Outgoing
+ Mailbox for execution by Host Adapter.
+*/
+
+static boolean BusLogic_WriteOutgoingMailbox(BusLogic_HostAdapter_T
+ *HostAdapter,
+ BusLogic_ActionCode_T ActionCode,
+ BusLogic_CCB_T *CCB)
+{
+ BusLogic_OutgoingMailbox_T *NextOutgoingMailbox;
+ boolean Result = false;
+ BusLogic_LockHostAdapter(HostAdapter);
+ NextOutgoingMailbox = HostAdapter->NextOutgoingMailbox;
+ if (NextOutgoingMailbox->ActionCode == BusLogic_OutgoingMailboxFree)
+ {
+ CCB->Status = BusLogic_CCB_Active;
+ /*
+ The CCB field must be written before the Action Code field since
+ the Host Adapter is operating asynchronously and the locking code
+ does not protect against simultaneous access by the Host Adapter.
+ */
+ NextOutgoingMailbox->CCB = CCB;
+ NextOutgoingMailbox->ActionCode = ActionCode;
+ BusLogic_StartMailboxScan(HostAdapter);
+ if (++NextOutgoingMailbox > HostAdapter->LastOutgoingMailbox)
+ NextOutgoingMailbox = HostAdapter->FirstOutgoingMailbox;
+ HostAdapter->NextOutgoingMailbox = NextOutgoingMailbox;
+ if (ActionCode == BusLogic_MailboxStartCommand)
+ HostAdapter->QueuedOperationCount[CCB->TargetID]++;
+ Result = true;
+ }
+ BusLogic_UnlockHostAdapter(HostAdapter);
+ return Result;
+}
+
+
+/*
+ BusLogic_QueueCommand creates a CCB for Command and places it into an
+ Outgoing Mailbox for execution by the associated Host Adapter.
+*/
+
+int BusLogic_QueueCommand(SCSI_Command_T *Command,
+ void (*CompletionRoutine)(SCSI_Command_T *))
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Command->host->hostdata;
+ unsigned char *CDB = Command->cmnd;
+ unsigned char CDB_Length = Command->cmd_len;
+ unsigned char TargetID = Command->target;
+ unsigned char LogicalUnit = Command->lun;
+ void *BufferPointer = Command->request_buffer;
+ int BufferLength = Command->request_bufflen;
+ int SegmentCount = Command->use_sg;
+ BusLogic_CCB_T *CCB;
+ long EnableTQ;
+ /*
+ SCSI REQUEST_SENSE commands will be executed automatically by the Host
+ Adapter for any errors, so they should not be executed explicitly unless
+ the Sense Data is zero indicating that no error occurred.
+ */
+ if (CDB[0] == REQUEST_SENSE && Command->sense_buffer[0] != 0)
+ {
+ Command->result = DID_OK << 16;
+ CompletionRoutine(Command);
+ return 0;
+ }
+ /*
+ Allocate a CCB from the Host Adapter's free list. If there are none
+ available and memory allocation fails, return a result code of Bus Busy
+ so that this Command will be retried.
+ */
+ CCB = BusLogic_AllocateCCB(HostAdapter);
+ if (CCB == NULL)
+ {
+ Command->result = DID_BUS_BUSY << 16;
+ CompletionRoutine(Command);
+ return 0;
+ }
+ /*
+ Initialize the fields in the BusLogic Command Control Block (CCB).
+ */
+ if (SegmentCount == 0)
+ {
+ CCB->Opcode = BusLogic_InitiatorCCB;
+ CCB->DataLength = BufferLength;
+ CCB->DataPointer = BufferPointer;
+ }
+ else
+ {
+ SCSI_ScatterList_T *ScatterList = (SCSI_ScatterList_T *) BufferPointer;
+ int Segment;
+ CCB->Opcode = BusLogic_InitiatorCCB_ScatterGather;
+ CCB->DataLength = SegmentCount * sizeof(BusLogic_ScatterGatherSegment_T);
+ CCB->DataPointer = CCB->ScatterGatherList;
+ for (Segment = 0; Segment < SegmentCount; Segment++)
+ {
+ CCB->ScatterGatherList[Segment].SegmentByteCount =
+ ScatterList[Segment].length;
+ CCB->ScatterGatherList[Segment].SegmentDataPointer =
+ ScatterList[Segment].address;
+ }
+ }
+ switch (CDB[0])
+ {
+ case READ_6:
+ case READ_10:
+ CCB->DataDirection = BusLogic_DataInLengthChecked;
+ HostAdapter->ReadWriteOperationCount[TargetID]++;
+ break;
+ case WRITE_6:
+ case WRITE_10:
+ CCB->DataDirection = BusLogic_DataOutLengthChecked;
+ HostAdapter->ReadWriteOperationCount[TargetID]++;
+ break;
+ default:
+ CCB->DataDirection = BusLogic_UncheckedDataTransfer;
+ break;
+ }
+ CCB->CDB_Length = CDB_Length;
+ CCB->SenseDataLength = sizeof(Command->sense_buffer);
+ CCB->HostAdapterStatus = 0;
+ CCB->TargetDeviceStatus = 0;
+ CCB->TargetID = TargetID;
+ CCB->LogicalUnit = LogicalUnit;
+ /*
+ For Wide SCSI Host Adapters, Wide Mode CCBs are used to support more than
+ 8 Logical Units per Target, and this requires setting the overloaded
+ TagEnable field to Logical Unit bit 5.
+ */
+ if (HostAdapter->HostWideSCSI)
+ {
+ CCB->TagEnable = LogicalUnit >> 5;
+ CCB->WideModeTagEnable = false;
+ }
+ else CCB->TagEnable = false;
+ /*
+ BusLogic recommends that after a Reset the first couple of commands that
+ are sent to a Target be sent in a non Tagged Queue fashion so that the Host
+ Adapter and Target can establish Synchronous Transfer before Queue Tag
+ messages can interfere with the Synchronous Negotiation message. By
+ waiting to enable tagged Queuing until after the first 16 read/write
+ commands have been sent, it is assured that the Tagged Queuing message
+ will not occur while the partition table is printed.
+ */
+ if ((HostAdapter->TaggedQueuingPermitted & (1 << TargetID)) &&
+ Command->device->tagged_supported &&
+ (EnableTQ = HostAdapter->ReadWriteOperationCount[TargetID] - 16) >= 0)
+ {
+ BusLogic_QueueTag_T QueueTag = BusLogic_SimpleQueueTag;
+ unsigned long CurrentTime = jiffies;
+ if (EnableTQ == 0)
+ printk("scsi%d: Tagged Queuing now active for Target %d\n",
+ HostAdapter->HostNumber, TargetID);
+ /*
+ When using Tagged Queuing with Simple Queue Tags, it appears that disk
+ drive controllers do not guarantee that a queued command will not
+ remain in a disconnected state indefinitely if commands that read or
+ write nearer the head position continue to arrive without interruption.
+ Therefore, for each Target Device this driver keeps track of the last
+ time either the queue was empty or an Ordered Queue Tag was issued. If
+ more than 2 seconds have elapsed since this last sequence point, this
+ command will be issued with an Ordered Queue Tag rather than a Simple
+ Queue Tag, which forces the Target Device to complete all previously
+ queued commands before this command may be executed.
+ */
+ if (HostAdapter->QueuedOperationCount[TargetID] == 0)
+ HostAdapter->LastSequencePoint[TargetID] = CurrentTime;
+ else if (CurrentTime - HostAdapter->LastSequencePoint[TargetID] > 2*HZ)
+ {
+ HostAdapter->LastSequencePoint[TargetID] = CurrentTime;
+ QueueTag = BusLogic_OrderedQueueTag;
+ }
+ if (HostAdapter->HostWideSCSI)
+ {
+ CCB->WideModeTagEnable = true;
+ CCB->WideModeQueueTag = QueueTag;
+ }
+ else
+ {
+ CCB->TagEnable = true;
+ CCB->QueueTag = QueueTag;
+ }
+ }
+ memcpy(CCB->CDB, CDB, CDB_Length);
+ CCB->SenseDataPointer = (SCSI_SenseData_T *) &Command->sense_buffer;
+ CCB->Command = Command;
+ Command->scsi_done = CompletionRoutine;
+ /*
+ Place the CCB in an Outgoing Mailbox. If there are no Outgoing
+ Mailboxes available, return a result code of Bus Busy so that this
+ Command will be retried.
+ */
+ if (!(BusLogic_WriteOutgoingMailbox(HostAdapter,
+ BusLogic_MailboxStartCommand, CCB)))
+ {
+ printk("scsi%d: cannot write Outgoing Mailbox\n",
+ HostAdapter->HostNumber);
+ BusLogic_DeallocateCCB(CCB);
+ Command->result = DID_BUS_BUSY << 16;
+ CompletionRoutine(Command);
+ }
+ return 0;
+}
+
+
+/*
+ BusLogic_AbortCommand aborts Command if possible.
+*/
+
+int BusLogic_AbortCommand(SCSI_Command_T *Command)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Command->host->hostdata;
+ unsigned long CommandPID = Command->pid;
+ unsigned char InterruptRegister;
+ BusLogic_CCB_T *CCB;
+ int Result;
+ /*
+ If the Host Adapter has posted an interrupt but the Interrupt Handler
+ has not been called for some reason (i.e. the interrupt was lost), try
+ calling the Interrupt Handler directly to process the commands that
+ have been completed.
+ */
+ InterruptRegister = BusLogic_ReadInterruptRegister(HostAdapter);
+ if (InterruptRegister & BusLogic_InterruptValid)
+ {
+ unsigned long ProcessorFlags;
+ printk("scsi%d: Recovering Lost/Delayed Interrupt for IRQ Channel %d\n",
+ HostAdapter->HostNumber, HostAdapter->IRQ_Channel);
+ save_flags(ProcessorFlags);
+ cli();
+ BusLogic_InterruptHandler(HostAdapter->IRQ_Channel, NULL);
+ restore_flags(ProcessorFlags);
+ return SCSI_ABORT_SNOOZE;
+ }
+ /*
+ Find the CCB to be aborted if possible.
+ */
+ BusLogic_LockHostAdapter(HostAdapter);
+ for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll)
+ if (CCB->Command == Command) break;
+ BusLogic_UnlockHostAdapter(HostAdapter);
+ if (CCB == NULL)
+ {
+ printk("scsi%d: Unable to Abort Command to Target %d - No CCB Found\n",
+ HostAdapter->HostNumber, Command->target);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+ /*
+ Briefly pause to see if this command will complete.
+ */
+ printk("scsi%d: Pausing briefly to see if CCB #%d "
+ "to Target %d will complete\n",
+ HostAdapter->HostNumber, CCB->SerialNumber, CCB->TargetID);
+ BusLogic_Delay(2);
+ /*
+ If this CCB is still Active and still refers to the same Command, then
+ actually aborting this Command is necessary.
+ */
+ BusLogic_LockHostAdapter(HostAdapter);
+ Result = SCSI_ABORT_NOT_RUNNING;
+ if (CCB->Status == BusLogic_CCB_Active &&
+ CCB->Command == Command && Command->pid == CommandPID)
+ {
+ /*
+ Attempt to abort this CCB.
+ */
+ if (BusLogic_WriteOutgoingMailbox(HostAdapter,
+ BusLogic_MailboxAbortCommand, CCB))
+ {
+ printk("scsi%d: Aborting CCB #%d to Target %d\n",
+ HostAdapter->HostNumber, CCB->SerialNumber, CCB->TargetID);
+ Result = SCSI_ABORT_PENDING;
+ }
+ else
+ {
+ printk("scsi%d: Unable to Abort CCB #%d to Target %d - "
+ "No Outgoing Mailboxes\n", HostAdapter->HostNumber,
+ CCB->SerialNumber, CCB->TargetID);
+ Result = SCSI_ABORT_BUSY;
+ }
+ }
+ else printk("scsi%d: CCB #%d to Target %d completed\n",
+ HostAdapter->HostNumber, CCB->SerialNumber, CCB->TargetID);
+ BusLogic_UnlockHostAdapter(HostAdapter);
+ return Result;
+}
+
+
+/*
+ BusLogic_ResetHostAdapter resets Host Adapter if possible, marking all
+ currently executing SCSI commands as having been reset, as well as
+ the specified Command if non-NULL.
+*/
+
+static int BusLogic_ResetHostAdapter(BusLogic_HostAdapter_T *HostAdapter,
+ SCSI_Command_T *Command)
+{
+ BusLogic_CCB_T *CCB;
+ if (Command == NULL)
+ printk("scsi%d: Resetting %s due to SCSI Reset State Interrupt\n",
+ HostAdapter->HostNumber, HostAdapter->BoardName);
+ else printk("scsi%d: Resetting %s due to Target %d\n",
+ HostAdapter->HostNumber, HostAdapter->BoardName, Command->target);
+ /*
+ Attempt to Reset and Reinitialize the Host Adapter.
+ */
+ BusLogic_LockHostAdapter(HostAdapter);
+ if (!(BusLogic_HardResetHostAdapter(HostAdapter) &&
+ BusLogic_InitializeHostAdapter(HostAdapter)))
+ {
+ printk("scsi%d: Resetting %s Failed\n",
+ HostAdapter->HostNumber, HostAdapter->BoardName);
+ BusLogic_UnlockHostAdapter(HostAdapter);
+ return SCSI_RESET_ERROR;
+ }
+ BusLogic_UnlockHostAdapter(HostAdapter);
+ /*
+ Wait a few seconds between the Host Adapter Hard Reset which initiates
+ a SCSI Bus Reset and issuing any SCSI commands. Some SCSI devices get
+ confused if they receive SCSI commands too soon after a SCSI Bus Reset.
+ */
+ BusLogic_Delay(HostAdapter->BusSettleTime);
+ /*
+ Mark all currently executing CCBs as having been reset.
+ */
+ BusLogic_LockHostAdapter(HostAdapter);
+ for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll)
+ if (CCB->Status == BusLogic_CCB_Active)
+ {
+ CCB->Status = BusLogic_CCB_Reset;
+ if (CCB->Command == Command)
+ {
+ CCB->Command = NULL;
+ /*
+ Disable Tagged Queuing if it was active for this Target Device.
+ */
+ if (((HostAdapter->HostWideSCSI && CCB->WideModeTagEnable) ||
+ (!HostAdapter->HostWideSCSI && CCB->TagEnable)) &&
+ (HostAdapter->TaggedQueuingPermitted & (1 << CCB->TargetID)))
+ {
+ HostAdapter->TaggedQueuingPermitted &= ~(1 << CCB->TargetID);
+ printk("scsi%d: Tagged Queuing now disabled for Target %d\n",
+ HostAdapter->HostNumber, CCB->TargetID);
+ }
+ }
+ }
+ BusLogic_UnlockHostAdapter(HostAdapter);
+ /*
+ Perform completion processing for the Command being Reset.
+ */
+ if (Command != NULL)
+ {
+ Command->result = DID_RESET << 16;
+ Command->scsi_done(Command);
+ }
+ /*
+ Perform completion processing for any other active CCBs.
+ */
+ for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll)
+ if (CCB->Status == BusLogic_CCB_Reset)
+ {
+ Command = CCB->Command;
+ BusLogic_DeallocateCCB(CCB);
+ if (Command != NULL)
+ {
+ Command->result = DID_RESET << 16;
+ Command->scsi_done(Command);
+ }
+ }
+ return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET;
+}
+
+
+/*
+ BusLogic_BusDeviceReset sends a Bus Device Reset to the Target
+ associated with Command.
+*/
+
+static int BusLogic_BusDeviceReset(BusLogic_HostAdapter_T *HostAdapter,
+ SCSI_Command_T *Command)
+{
+ BusLogic_CCB_T *CCB = BusLogic_AllocateCCB(HostAdapter), *XCCB;
+ unsigned char TargetID = Command->target;
+ /*
+ If sending a Bus Device Reset is impossible, attempt a full Host
+ Adapter Hard Reset and SCSI Bus Reset.
+ */
+ if (CCB == NULL)
+ return BusLogic_ResetHostAdapter(HostAdapter, Command);
+ printk("scsi%d: Sending Bus Device Reset CCB #%d to Target %d\n",
+ HostAdapter->HostNumber, CCB->SerialNumber, TargetID);
+ CCB->Opcode = BusLogic_SCSIBusDeviceReset;
+ CCB->TargetID = TargetID;
+ CCB->Command = Command;
+ /*
+ If there is a currently executing CCB in the Host Adapter for this Command,
+ then an Incoming Mailbox entry will be made with a completion code of
+ BusLogic_HostAdapterAssertedBusDeviceReset. Otherwise, the CCB's Command
+ field will be left pointing to the Command so that the interrupt for the
+ completion of the Bus Device Reset can call the Completion Routine for the
+ Command.
+ */
+ BusLogic_LockHostAdapter(HostAdapter);
+ for (XCCB = HostAdapter->All_CCBs; XCCB != NULL; XCCB = XCCB->NextAll)
+ if (XCCB->Command == Command && XCCB->Status == BusLogic_CCB_Active)
+ {
+ CCB->Command = NULL;
+ /*
+ Disable Tagged Queuing if it was active for this Target Device.
+ */
+ if (((HostAdapter->HostWideSCSI && XCCB->WideModeTagEnable) ||
+ (!HostAdapter->HostWideSCSI && XCCB->TagEnable)) &&
+ (HostAdapter->TaggedQueuingPermitted & (1 << TargetID)))
+ {
+ HostAdapter->TaggedQueuingPermitted &= ~(1 << TargetID);
+ printk("scsi%d: Tagged Queuing now disabled for Target %d\n",
+ HostAdapter->HostNumber, TargetID);
+ }
+ break;
+ }
+ BusLogic_UnlockHostAdapter(HostAdapter);
+ /*
+ Attempt to write an Outgoing Mailbox with the Bus Device Reset CCB.
+ If sending a Bus Device Reset is impossible, attempt a full Host
+ Adapter Hard Reset and SCSI Bus Reset.
+ */
+ if (!(BusLogic_WriteOutgoingMailbox(HostAdapter,
+ BusLogic_MailboxStartCommand, CCB)))
+ {
+ printk("scsi%d: cannot write Outgoing Mailbox for Bus Device Reset\n",
+ HostAdapter->HostNumber);
+ BusLogic_DeallocateCCB(CCB);
+ return BusLogic_ResetHostAdapter(HostAdapter, Command);
+ }
+ HostAdapter->ReadWriteOperationCount[TargetID] = 0;
+ HostAdapter->QueuedOperationCount[TargetID] = 0;
+ return SCSI_RESET_PENDING;
+}
+
+
+/*
+ BusLogic_ResetCommand takes appropriate action to reset Command.
+*/
+
+int BusLogic_ResetCommand(SCSI_Command_T *Command)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Command->host->hostdata;
+ unsigned char TargetID = Command->target;
+ unsigned char ErrorRecoveryOption =
+ HostAdapter->ErrorRecoveryOption[TargetID];
+ if (ErrorRecoveryOption == BusLogic_ErrorRecoveryDefault)
+ if (Command->host->suggest_bus_reset)
+ ErrorRecoveryOption = BusLogic_ErrorRecoveryHardReset;
+ else ErrorRecoveryOption = BusLogic_ErrorRecoveryBusDeviceReset;
+ switch (ErrorRecoveryOption)
+ {
+ case BusLogic_ErrorRecoveryHardReset:
+ return BusLogic_ResetHostAdapter(HostAdapter, Command);
+ case BusLogic_ErrorRecoveryBusDeviceReset:
+ if (HostAdapter->CommandSuccessfulFlag[TargetID])
+ {
+ HostAdapter->CommandSuccessfulFlag[TargetID] = false;
+ return BusLogic_BusDeviceReset(HostAdapter, Command);
+ }
+ else return BusLogic_ResetHostAdapter(HostAdapter, Command);
+ }
+ printk("scsi%d: Error Recovery for Target %d Suppressed\n",
+ HostAdapter->HostNumber, TargetID);
+ return SCSI_RESET_PUNT;
+}
+
+
+/*
+ BusLogic_BIOSDiskParameters returns the Heads/Sectors/Cylinders BIOS Disk
+ Parameters for Disk. The default disk geometry is 64 heads, 32 sectors, and
+ the appropriate number of cylinders so as not to exceed drive capacity. In
+ order for disks equal to or larger than 1 GB to be addressable by the BIOS
+ without exceeding the BIOS limitation of 1024 cylinders, Extended Translation
+ may be enabled in AutoSCSI on "C" Series boards or by a dip switch setting
+ on older boards. With Extended Translation enabled, drives between 1 GB
+ inclusive and 2 GB exclusive are given a disk geometry of 128 heads and 32
+ sectors, and drives between 2 GB inclusive and 8 GB exclusive are given a
+ disk geometry of 255 heads and 63 sectors. On "C" Series boards the firmware
+ can be queried for the precise translation in effect for each drive
+ individually, but there is really no need to do so since we know the total
+ capacity of the drive and whether Extended Translation is enabled, hence we
+ can deduce the BIOS disk geometry that must be in effect.
+*/
+
+int BusLogic_BIOSDiskParameters(SCSI_Disk_T *Disk, KernelDevice_T Device,
+ int *Parameters)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Disk->device->host->hostdata;
+ BIOS_DiskParameters_T *DiskParameters = (BIOS_DiskParameters_T *) Parameters;
+ if (HostAdapter->ExtendedTranslation &&
+ Disk->capacity >= 2*1024*1024 /* 1 GB in 512 byte sectors */)
+ if (Disk->capacity >= 4*1024*1024 /* 2 GB in 512 byte sectors */)
+ {
+ DiskParameters->Heads = 255;
+ DiskParameters->Sectors = 63;
+ }
+ else
+ {
+ DiskParameters->Heads = 128;
+ DiskParameters->Sectors = 32;
+ }
+ else
+ {
+ DiskParameters->Heads = 64;
+ DiskParameters->Sectors = 32;
+ }
+ DiskParameters->Cylinders =
+ Disk->capacity / (DiskParameters->Heads * DiskParameters->Sectors);
+ return 0;
+}
+
+
+/*
+ BusLogic_Setup handles processing of Kernel Command Line Arguments.
+
+ For the BusLogic driver, a kernel command line entry comprises the driver
+ identifier "BusLogic=" optionally followed by a comma-separated sequence of
+ integers and then optionally followed by a comma-separated sequence of
+ strings. Each command line entry applies to one BusLogic Host Adapter.
+ Multiple command line entries may be used in systems which contain multiple
+ BusLogic Host Adapters.
+
+ The first integer specified is the I/O Address at which the Host Adapter is
+ located. If unspecified, it defaults to 0 which means to apply this entry to
+ the first BusLogic Host Adapter found during the default probe sequence. If
+ any I/O Address parameters are provided on the command line, then the default
+ probe sequence is omitted.
+
+ The second integer specified is the number of Concurrent Commands per Logical
+ Unit to allow for Target Devices on the Host Adapter. If unspecified, it
+ defaults to 0 which means to use the value of BusLogic_Concurrency for
+ non-ISA Host Adapters, or BusLogic_Concurrency_ISA for ISA Host Adapters.
+
+ The third integer specified is the Bus Settle Time in seconds. This is
+ the amount of time to wait between a Host Adapter Hard Reset which initiates
+ a SCSI Bus Reset and issuing any SCSI commands. If unspecified, it defaults
+ to 0 which means to use the value of BusLogic_DefaultBusSettleTime.
+
+ The fourth integer specified is the Local Options. If unspecified, it
+ defaults to 0. Note that Local Options are only applied to a specific Host
+ Adapter.
+
+ The fifth integer specified is the Global Options. If unspecified, it
+ defaults to 0. Note that Global Options are applied across all Host
+ Adapters.
+
+ The string options are used to provide control over Tagged Queuing and Error
+ Recovery. If both Tagged Queuing and Error Recovery strings are provided, the
+ Tagged Queuing specification string must come first.
+
+ The Tagged Queuing specification begins with "TQ:" and allows for explicitly
+ specifying whether Tagged Queuing is permitted on Target Devices that support
+ it. The following specification options are available:
+
+ TQ:Default Tagged Queuing will be permitted based on the firmware
+ version of the BusLogic Host Adapter and based on
+ whether the Concurrency value allows queuing multiple
+ commands.
+
+ TQ:Enable Tagged Queuing will be enabled for all Target Devices
+ on this Host Adapter overriding any limitation that
+ would otherwise be imposed based on the Host Adapter
+ firmware version.
+
+ TQ:Disable Tagged Queuing will be disabled for all Target Devices
+ on this Host Adapter.
+
+ TQ:<Per-Target-Spec> Tagged Queuing will be controlled individually for each
+ Target Device. <Per-Target-Spec> is a sequence of "Y",
+ "N", and "X" characters. "Y" enabled Tagged Queuing,
+ "N" disables Tagged Queuing, and "X" accepts the
+ default based on the firmware version. The first
+ character refers to Target 0, the second to Target 1,
+ and so on; if the sequence of "Y", "N", and "X"
+ characters does not cover all the Target Devices,
+ unspecified characters are assumed to be "X".
+
+ Note that explicitly requesting Tagged Queuing may lead to problems; this
+ facility is provided primarily to allow disabling Tagged Queuing on Target
+ Devices that do not implement it correctly.
+
+ The Error Recovery specification begins with "ER:" and allows for explicitly
+ specifying the Error Recovery action to be performed when ResetCommand is
+ called due to a SCSI Command failing to complete successfully. The following
+ specification options are available:
+
+ ER:Default Error Recovery will select between the Hard Reset and
+ Bus Device Reset options based on the recommendation
+ of the SCSI Subsystem.
+
+ ER:HardReset Error Recovery will initiate a Host Adapter Hard Reset
+ which also causes a SCSI Bus Reset.
+
+ ER:BusDeviceReset Error Recovery will send a Bus Device Reset message to
+ the individual Target Device causing the error. If
+ Error Recovery is again initiated for this Target
+ Device and no SCSI Command to this Target Device has
+ completed successfully since the Bus Device Reset
+ message was sent, then a Hard Reset will be attempted.
+
+ ER:None Error Recovery will be suppressed. This option should
+ only be selected if a SCSI Bus Reset or Bus Device
+ Reset will cause the Target Device to fail completely
+ and unrecoverably.
+
+ ER:<Per-Target-Spec> Error Recovery will be controlled individually for each
+ Target Device. <Per-Target-Spec> is a sequence of "D",
+ "H", "B", and "N" characters. "D" selects Default, "H"
+ selects Hard Reset, "B" selects Bus Device Reset, and
+ "N" selects None. The first character refers to Target
+ 0, the second to Target 1, and so on; if the sequence
+ of "D", "H", "B", and "N" characters does not cover all
+ the Target Devices, unspecified characters are assumed
+ to be "D".
+*/
+
+void BusLogic_Setup(char *Strings, int *Integers)
+{
+ BusLogic_CommandLineEntry_T *CommandLineEntry =
+ &BusLogic_CommandLineEntries[BusLogic_CommandLineEntryCount++];
+ static int ProbeListIndex = 0;
+ int IntegerCount = Integers[0], TargetID, i;
+ CommandLineEntry->IO_Address = 0;
+ CommandLineEntry->Concurrency = 0;
+ CommandLineEntry->BusSettleTime = 0;
+ CommandLineEntry->LocalOptions = 0;
+ CommandLineEntry->TaggedQueuingPermitted = 0;
+ CommandLineEntry->TaggedQueuingPermittedMask = 0;
+ memset(CommandLineEntry->ErrorRecoveryOption,
+ BusLogic_ErrorRecoveryDefault,
+ sizeof(CommandLineEntry->ErrorRecoveryOption));
+ if (IntegerCount > 5)
+ printk("BusLogic: Unexpected Command Line Integers ignored\n");
+ if (IntegerCount >= 1)
+ {
+ unsigned short IO_Address = Integers[1];
+ if (IO_Address > 0)
+ {
+ for (i = 0; ; i++)
+ if (BusLogic_IO_StandardAddresses[i] == 0)
+ {
+ printk("BusLogic: Invalid Command Line Entry "
+ "(illegal I/O Address 0x%X)\n", IO_Address);
+ return;
+ }
+ else if (i < ProbeListIndex &&
+ IO_Address == BusLogic_IO_AddressProbeList[i])
+ {
+ printk("BusLogic: Invalid Command Line Entry "
+ "(duplicate I/O Address 0x%X)\n", IO_Address);
+ return;
+ }
+ else if (IO_Address >= 0x1000 ||
+ IO_Address == BusLogic_IO_StandardAddresses[i]) break;
+ BusLogic_IO_AddressProbeList[ProbeListIndex++] = IO_Address;
+ BusLogic_IO_AddressProbeList[ProbeListIndex] = 0;
+ }
+ CommandLineEntry->IO_Address = IO_Address;
+ }
+ if (IntegerCount >= 2)
+ {
+ unsigned short Concurrency = Integers[2];
+ if (Concurrency > BusLogic_MailboxCount)
+ {
+ printk("BusLogic: Invalid Command Line Entry "
+ "(illegal Concurrency %d)\n", Concurrency);
+ return;
+ }
+ CommandLineEntry->Concurrency = Concurrency;
+ }
+ if (IntegerCount >= 3)
+ CommandLineEntry->BusSettleTime = Integers[3];
+ if (IntegerCount >= 4)
+ CommandLineEntry->LocalOptions = Integers[4];
+ if (IntegerCount >= 5)
+ BusLogic_GlobalOptions |= Integers[5];
+ if (!(BusLogic_CommandLineEntryCount == 0 || ProbeListIndex == 0 ||
+ BusLogic_CommandLineEntryCount == ProbeListIndex))
+ {
+ printk("BusLogic: Invalid Command Line Entry "
+ "(all or no I/O Addresses must be specified)\n");
+ return;
+ }
+ if (Strings == NULL) return;
+ if (strncmp(Strings, "TQ:", 3) == 0)
+ {
+ Strings += 3;
+ if (strncmp(Strings, "Default", 7) == 0)
+ Strings += 7;
+ else if (strncmp(Strings, "Enable", 6) == 0)
+ {
+ Strings += 6;
+ CommandLineEntry->TaggedQueuingPermitted = 0xFFFF;
+ CommandLineEntry->TaggedQueuingPermittedMask = 0xFFFF;
+ }
+ else if (strncmp(Strings, "Disable", 7) == 0)
+ {
+ Strings += 7;
+ CommandLineEntry->TaggedQueuingPermitted = 0x0000;
+ CommandLineEntry->TaggedQueuingPermittedMask = 0xFFFF;
+ }
+ else
+ for (TargetID = 0; TargetID < BusLogic_MaxTargetIDs; TargetID++)
+ switch (*Strings++)
+ {
+ case 'Y':
+ CommandLineEntry->TaggedQueuingPermitted |= 1 << TargetID;
+ CommandLineEntry->TaggedQueuingPermittedMask |= 1 << TargetID;
+ break;
+ case 'N':
+ CommandLineEntry->TaggedQueuingPermittedMask |= 1 << TargetID;
+ break;
+ case 'X':
+ break;
+ default:
+ Strings--;
+ TargetID = BusLogic_MaxTargetIDs;
+ break;
+ }
+ }
+ if (*Strings == ',') Strings++;
+ if (strncmp(Strings, "ER:", 3) == 0)
+ {
+ Strings += 3;
+ if (strncmp(Strings, "Default", 7) == 0)
+ Strings += 7;
+ else if (strncmp(Strings, "HardReset", 9) == 0)
+ {
+ Strings += 9;
+ memset(CommandLineEntry->ErrorRecoveryOption,
+ BusLogic_ErrorRecoveryHardReset,
+ sizeof(CommandLineEntry->ErrorRecoveryOption));
+ }
+ else if (strncmp(Strings, "BusDeviceReset", 14) == 0)
+ {
+ Strings += 14;
+ memset(CommandLineEntry->ErrorRecoveryOption,
+ BusLogic_ErrorRecoveryBusDeviceReset,
+ sizeof(CommandLineEntry->ErrorRecoveryOption));
+ }
+ else if (strncmp(Strings, "None", 4) == 0)
+ {
+ Strings += 4;
+ memset(CommandLineEntry->ErrorRecoveryOption,
+ BusLogic_ErrorRecoveryNone,
+ sizeof(CommandLineEntry->ErrorRecoveryOption));
+ }
+ else
+ for (TargetID = 0; TargetID < BusLogic_MaxTargetIDs; TargetID++)
+ switch (*Strings++)
+ {
+ case 'D':
+ CommandLineEntry->ErrorRecoveryOption[TargetID] =
+ BusLogic_ErrorRecoveryDefault;
+ break;
+ case 'H':
+ CommandLineEntry->ErrorRecoveryOption[TargetID] =
+ BusLogic_ErrorRecoveryHardReset;
+ break;
+ case 'B':
+ CommandLineEntry->ErrorRecoveryOption[TargetID] =
+ BusLogic_ErrorRecoveryBusDeviceReset;
+ break;
+ case 'N':
+ CommandLineEntry->ErrorRecoveryOption[TargetID] =
+ BusLogic_ErrorRecoveryNone;
+ break;
+ default:
+ Strings--;
+ TargetID = BusLogic_MaxTargetIDs;
+ break;
+ }
+ }
+ if (*Strings != '\0')
+ printk("BusLogic: Unexpected Command Line String '%s' ignored\n", Strings);
+}
+
+
+/*
+ Include Module support if requested.
+*/
+
+
+#ifdef MODULE
+
+SCSI_Host_Template_T driver_template = BUSLOGIC;
+
+#include "scsi_module.c"
+
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/BusLogic.h b/i386/i386at/gpl/linux/scsi/BusLogic.h
new file mode 100644
index 00000000..69048e9f
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/BusLogic.h
@@ -0,0 +1,977 @@
+/*
+
+ Linux Driver for BusLogic MultiMaster SCSI Host Adapters
+
+ Copyright 1995 by Leonard N. Zubkoff <lnz@dandelion.com>
+
+ See BusLogic.c for licensing information.
+
+*/
+
+
+/*
+ Define types for some of the structures that interface with the rest
+ of the Linux Kernel and SCSI Subsystem.
+*/
+
+typedef struct pt_regs Registers_T;
+typedef Scsi_Host_Template SCSI_Host_Template_T;
+typedef struct Scsi_Host SCSI_Host_T;
+typedef struct scsi_disk SCSI_Disk_T;
+typedef struct scsi_cmnd SCSI_Command_T;
+typedef struct scatterlist SCSI_ScatterList_T;
+typedef kdev_t KernelDevice_T;
+
+
+/*
+ Define prototypes for the BusLogic Driver Interface Functions.
+*/
+
+const char *BusLogic_DriverInfo(SCSI_Host_T *);
+int BusLogic_DetectHostAdapter(SCSI_Host_Template_T *);
+int BusLogic_ReleaseHostAdapter(SCSI_Host_T *);
+int BusLogic_QueueCommand(SCSI_Command_T *,
+ void (*CompletionRoutine)(SCSI_Command_T *));
+int BusLogic_AbortCommand(SCSI_Command_T *);
+int BusLogic_ResetCommand(SCSI_Command_T *);
+int BusLogic_BIOSDiskParameters(SCSI_Disk_T *, KernelDevice_T, int *);
+
+
+/*
+ Define the BusLogic SCSI Host Template structure.
+*/
+
+#define BUSLOGIC \
+ { NULL, /* Next */ \
+ NULL, /* Usage Count Pointer */ \
+ NULL, /* /proc Directory Entry */ \
+ NULL, /* /proc Info Function */ \
+ "BusLogic", /* Driver Name */ \
+ BusLogic_DetectHostAdapter, /* Detect Host Adapter */ \
+ BusLogic_ReleaseHostAdapter, /* Release Host Adapter */ \
+ BusLogic_DriverInfo, /* Driver Info Function */ \
+ NULL, /* Command Function */ \
+ BusLogic_QueueCommand, /* Queue Command Function */ \
+ BusLogic_AbortCommand, /* Abort Command Function */ \
+ BusLogic_ResetCommand, /* Reset Command Function */ \
+ NULL, /* Slave Attach Function */ \
+ BusLogic_BIOSDiskParameters, /* Disk BIOS Parameters */ \
+ 0, /* Can Queue */ \
+ 0, /* This ID */ \
+ 0, /* Scatter/Gather Table Size */ \
+ 0, /* SCSI Commands per LUN */ \
+ 0, /* Present */ \
+ 1, /* Default Unchecked ISA DMA */ \
+ ENABLE_CLUSTERING } /* Enable Clustering */
+
+
+/*
+ BusLogic_DriverVersion protects the private portion of this file.
+*/
+
+#ifdef BusLogic_DriverVersion
+
+
+/*
+ Define the maximum number of BusLogic Host Adapters that are supported.
+*/
+
+#define BusLogic_MaxHostAdapters 10
+
+
+/*
+ Define the maximum number of I/O Addresses that may be probed.
+*/
+
+#define BusLogic_IO_MaxProbeAddresses 16
+
+
+/*
+ Define the maximum number of Target IDs supported by this driver.
+*/
+
+#define BusLogic_MaxTargetIDs 16
+
+
+/*
+ Define the number of Incoming and Outgoing Mailboxes used by this driver.
+ The maximum possible value is 255, since the MailboxCount parameter to the
+ Initialize Extended Mailbox command is limited to a single byte.
+*/
+
+#define BusLogic_MailboxCount 64
+
+
+/*
+ Define the number of Command Control Blocks (CCBs) to create during
+ initialization for each Host Adapter. Additional CCBs will be allocated
+ if necessary as commands are queued.
+*/
+
+#define BusLogic_InitialCCBs 32
+
+
+/*
+ Define the maximum number of Scatter/Gather Segments used by this driver.
+ For maximum performance, it is important that this limit be at least as
+ large as the maximum single request generated by the routine make_request.
+*/
+
+#define BusLogic_ScatterGatherLimit 128
+
+
+/*
+ Define the default number of Concurrent Commands per Logical Unit to allow
+ for Target Devices depending on whether or not ISA bounce buffers are
+ required.
+*/
+
+#define BusLogic_Concurrency 7
+#define BusLogic_Concurrency_BB 1
+
+
+/*
+ Define the default amount of time in seconds to wait between a Host Adapter
+ Hard Reset which initiates a SCSI Bus Reset and issuing any SCSI commands.
+ Some SCSI devices get confused if they receive SCSI commands too soon after
+ a SCSI Bus Reset.
+*/
+
+#define BusLogic_DefaultBusSettleTime 2
+
+
+/*
+ Define the possible Local Options.
+*/
+
+#define BusLogic_InhibitTargetInquiry 1
+
+
+/*
+ Define the possible Global Options.
+*/
+
+#define BusLogic_TraceProbe 1
+#define BusLogic_TraceHardReset 2
+#define BusLogic_TraceConfiguration 4
+#define BusLogic_TraceErrors 8
+
+
+/*
+ Define the possible Error Recovery Options.
+*/
+
+#define BusLogic_ErrorRecoveryDefault 0
+#define BusLogic_ErrorRecoveryHardReset 1
+#define BusLogic_ErrorRecoveryBusDeviceReset 2
+#define BusLogic_ErrorRecoveryNone 3
+
+static char
+ *BusLogic_ErrorRecoveryOptions[] =
+ { "Default", "Hard Reset", "Bus Device Reset", "None" },
+ *BusLogic_ErrorRecoveryOptions2[] =
+ { "D", "H", "B", "N" };
+
+
+/*
+ Define a boolean data type.
+*/
+
+#define false 0
+#define true 1
+typedef unsigned char boolean;
+
+
+/*
+ Define the BusLogic SCSI Host Adapter I/O Register Offsets.
+*/
+
+#define BusLogic_IO_PortCount 4 /* I/O Registers */
+#define BusLogic_ControlRegister 0 /* WO register */
+#define BusLogic_StatusRegister 0 /* RO register */
+#define BusLogic_CommandParameterRegister 1 /* WO register */
+#define BusLogic_DataInRegister 1 /* RO register */
+#define BusLogic_InterruptRegister 2 /* RO register */
+#define BusLogic_GeometryRegister 3 /* RO, undocumented */
+
+
+/*
+ Define the bits in the write-only Control Register.
+*/
+
+#define BusLogic_ReservedCR 0x0F
+#define BusLogic_SCSIBusReset 0x10
+#define BusLogic_InterruptReset 0x20
+#define BusLogic_SoftReset 0x40
+#define BusLogic_HardReset 0x80
+
+
+/*
+ Define the bits in the read-only Status Register.
+*/
+
+#define BusLogic_CommandInvalid 0x01
+#define BusLogic_ReservedSR 0x02
+#define BusLogic_DataInRegisterReady 0x04
+#define BusLogic_CommandParameterRegisterBusy 0x08
+#define BusLogic_HostAdapterReady 0x10
+#define BusLogic_InitializationRequired 0x20
+#define BusLogic_DiagnosticFailure 0x40
+#define BusLogic_DiagnosticActive 0x80
+
+
+/*
+ Define the bits in the read-only Interrupt Register.
+*/
+
+#define BusLogic_IncomingMailboxLoaded 0x01
+#define BusLogic_OutgoingMailboxAvailable 0x02
+#define BusLogic_CommandComplete 0x04
+#define BusLogic_SCSIResetState 0x08
+#define BusLogic_ReservedIR 0x70
+#define BusLogic_InterruptValid 0x80
+
+
+/*
+ Define the bits in the undocumented read-only Geometry Register.
+*/
+
+#define BusLogic_Drive0Geometry 0x03
+#define BusLogic_Drive1Geometry 0x0C
+#define BusLogic_ReservedGR 0x70
+#define BusLogic_ExtendedTranslationEnabled 0x80
+
+
+/*
+ Define the BusLogic SCSI Host Adapter Command Register Operation Codes.
+*/
+
+typedef enum
+{
+ BusLogic_TestCommandCompleteInterrupt = 0x00, /* documented */
+ BusLogic_InitializeMailbox = 0x01, /* documented */
+ BusLogic_StartMailboxCommand = 0x02, /* documented */
+ BusLogic_StartBIOSCommand = 0x03, /* documented */
+ BusLogic_InquireBoardID = 0x04, /* documented */
+ BusLogic_EnableOutgoingMailboxAvailableIRQ = 0x05, /* documented */
+ BusLogic_SetSCSISelectionTimeout = 0x06, /* documented */
+ BusLogic_SetPreemptTimeOnBus = 0x07, /* documented */
+ BusLogic_SetTimeOffBus = 0x08, /* ISA Bus only */
+ BusLogic_SetBusTransferRate = 0x09, /* ISA Bus only */
+ BusLogic_InquireInstalledDevicesID0to7 = 0x0A, /* documented */
+ BusLogic_InquireConfiguration = 0x0B, /* documented */
+ BusLogic_SetTargetMode = 0x0C, /* now undocumented */
+ BusLogic_InquireSetupInformation = 0x0D, /* documented */
+ BusLogic_WriteAdapterLocalRAM = 0x1A, /* documented */
+ BusLogic_ReadAdapterLocalRAM = 0x1B, /* documented */
+ BusLogic_WriteBusMasterChipFIFO = 0x1C, /* documented */
+ BusLogic_ReadBusMasterChipFIFO = 0x1D, /* documented */
+ BusLogic_EchoCommandData = 0x1F, /* documented */
+ BusLogic_HostAdapterDiagnostic = 0x20, /* documented */
+ BusLogic_SetAdapterOptions = 0x21, /* documented */
+ BusLogic_InquireInstalledDevicesID8to15 = 0x23, /* Wide only */
+ BusLogic_InitializeExtendedMailbox = 0x81, /* documented */
+ BusLogic_InquireFirmwareVersion3rdDigit = 0x84, /* undocumented */
+ BusLogic_InquireFirmwareVersionLetter = 0x85, /* undocumented */
+ BusLogic_InquireBoardModelNumber = 0x8B, /* undocumented */
+ BusLogic_InquireSynchronousPeriod = 0x8C, /* undocumented */
+ BusLogic_InquireExtendedSetupInformation = 0x8D, /* documented */
+ BusLogic_EnableStrictRoundRobinMode = 0x8F, /* documented */
+ BusLogic_ModifyIOAddress = 0x95, /* PCI only */
+ BusLogic_EnableWideModeCCB = 0x96 /* Wide only */
+}
+BusLogic_OperationCode_T;
+
+
+/*
+ Define the Inquire Board ID reply structure.
+*/
+
+typedef struct BusLogic_BoardID
+{
+ unsigned char BoardType;
+ unsigned char CustomFeatures;
+ unsigned char FirmwareVersion1stDigit;
+ unsigned char FirmwareVersion2ndDigit;
+}
+BusLogic_BoardID_T;
+
+
+/*
+ Define the Inquire Installed Devices ID 0 to 7 and Inquire Installed
+ Devices ID 8 to 15 reply type. For each Target ID, a byte is returned
+ where bit 0 set indicates that Logical Unit 0 exists, bit 1 set indicates
+ that Logical Unit 1 exists, and so on.
+*/
+
+typedef unsigned char BusLogic_InstalledDevices8_T[8];
+
+typedef unsigned char BusLogic_InstalledDevices_T[BusLogic_MaxTargetIDs];
+
+
+/*
+ Define the Inquire Configuration reply structure.
+*/
+
+typedef struct BusLogic_Configuration
+{
+ unsigned char :5; /* Byte 0: DMA Channel */
+ boolean DMA_Channel5:1;
+ boolean DMA_Channel6:1;
+ boolean DMA_Channel7:1;
+ boolean IRQ_Channel9:1; /* Byte 1: IRQ Channel */
+ boolean IRQ_Channel10:1;
+ boolean IRQ_Channel11:1;
+ boolean IRQ_Channel12:1;
+ unsigned char :1;
+ boolean IRQ_Channel14:1;
+ boolean IRQ_Channel15:1;
+ unsigned char :1;
+ unsigned char HostAdapterID:4; /* Byte 2: Host Adapter ID */
+ unsigned char :4;
+}
+BusLogic_Configuration_T;
+
+
+/*
+ Define the Inquire Setup Information reply structure.
+*/
+
+typedef struct BusLogic_SynchronousValue
+{
+ unsigned char Offset:4;
+ unsigned char TransferPeriod:3;
+ boolean Synchronous:1;
+}
+BusLogic_SynchronousValue_T;
+
+typedef BusLogic_SynchronousValue_T
+ BusLogic_SynchronousValues8_T[8];
+
+typedef BusLogic_SynchronousValue_T
+ BusLogic_SynchronousValues_T[BusLogic_MaxTargetIDs];
+
+typedef struct BusLogic_SetupInformation
+{
+ boolean SynchronousInitiationEnabled:1; /* Byte 0 */
+ boolean ParityCheckEnabled:1;
+ unsigned char :6;
+ unsigned char BusTransferRate; /* Byte 1 */
+ unsigned char PreemptTimeOnBus; /* Byte 2 */
+ unsigned char TimeOffBus; /* Byte 3 */
+ unsigned char MailboxCount; /* Byte 4 */
+ unsigned char MailboxAddress[3]; /* Bytes 5-7 */
+ BusLogic_SynchronousValues8_T SynchronousValuesID0to7; /* Bytes 8-15 */
+ unsigned char DisconnectPermittedID0to7; /* Byte 16 */
+ unsigned char Signature; /* Byte 17 */
+ unsigned char CharacterD; /* Byte 18 */
+ unsigned char BusLetter; /* Byte 19 */
+ unsigned char :8; /* Byte 20 */
+ unsigned char :8; /* Byte 21 */
+ BusLogic_SynchronousValues8_T SynchronousValuesID8to15; /* Bytes 22-29 */
+ unsigned char DisconnectPermittedID8to15; /* Byte 30 */
+}
+BusLogic_SetupInformation_T;
+
+
+/*
+ Define the Initialize Extended Mailbox request structure.
+*/
+
+typedef struct BusLogic_ExtendedMailboxRequest
+{
+ unsigned char MailboxCount;
+ void *BaseMailboxAddress __attribute__ ((packed));
+}
+BusLogic_ExtendedMailboxRequest_T;
+
+
+/*
+ Define the Inquire Firmware Version 3rd Digit reply type.
+*/
+
+typedef unsigned char BusLogic_FirmwareVersion3rdDigit_T;
+
+
+/*
+ Define the Inquire Firmware Version Letter reply type.
+*/
+
+typedef unsigned char BusLogic_FirmwareVersionLetter_T;
+
+
+/*
+ Define the Inquire Board Model Number reply type.
+*/
+
+typedef unsigned char BusLogic_BoardModelNumber_T[5];
+
+
+/*
+ Define the Inquire Synchronous Period reply type. For each Target ID, a byte
+ is returned which represents the Synchronous Transfer Period in units of 10
+ nanoseconds.
+*/
+
+typedef unsigned char BusLogic_SynchronousPeriod_T[BusLogic_MaxTargetIDs];
+
+
+/*
+ Define the Inquire Extended Setup Information reply structure.
+*/
+
+typedef struct BusLogic_ExtendedSetupInformation
+{
+ unsigned char BusType; /* Byte 0 */
+ unsigned char BIOS_Address; /* Byte 1 */
+ unsigned short ScatterGatherLimit; /* Bytes 2-3 */
+ unsigned char MailboxCount; /* Byte 4 */
+ void *BaseMailboxAddress __attribute__ ((packed)); /* Bytes 5-8 */
+ struct { unsigned char :6; /* Byte 9 */
+ boolean LevelSensitiveInterrupts:1;
+ unsigned char :1; } Misc;
+ unsigned char FirmwareRevision[3]; /* Bytes 10-12 */
+ boolean HostWideSCSI:1; /* Byte 13 Bit 0 */
+ boolean HostDifferentialSCSI:1; /* Byte 13 Bit 1 */
+ unsigned char :6;
+}
+BusLogic_ExtendedSetupInformation_T;
+
+
+/*
+ Define the Enable Strict Round Robin Mode request type.
+*/
+
+#define BusLogic_AggressiveRoundRobinMode 0x00
+#define BusLogic_StrictRoundRobinMode 0x01
+
+typedef unsigned char BusLogic_RoundRobinModeRequest_T;
+
+
+/*
+ Define the Modify I/O Address request type. On PCI Host Adapters, the
+ Modify I/O Address command allows modification of the ISA compatible I/O
+ Address that the Host Adapter responds to; it does not affect the PCI
+ compliant I/O Address assigned at system initialization.
+*/
+
+#define BusLogic_ModifyIO_330 0x00
+#define BusLogic_ModifyIO_334 0x01
+#define BusLogic_ModifyIO_230 0x02
+#define BusLogic_ModifyIO_234 0x03
+#define BusLogic_ModifyIO_130 0x04
+#define BusLogic_ModifyIO_134 0x05
+#define BusLogic_ModifyIO_Disable 0x06
+#define BusLogic_ModifyIO_Disable2 0x07
+
+typedef unsigned char BusLogic_ModifyIOAddressRequest_T;
+
+
+/*
+ Define the Enable Wide Mode SCSI CCB request type. Wide Mode CCBs are
+ necessary to support more than 8 Logical Units per Target.
+*/
+
+#define BusLogic_NormalModeCCB 0x00
+#define BusLogic_WideModeCCB 0x01
+
+typedef unsigned char BusLogic_WideModeCCBRequest_T;
+
+
+/*
+ Define the Requested Reply Length type used by the Inquire Setup Information,
+ Inquire Board Model Number, Inquire Synchronous Period, and Inquire Extended
+ Setup Information commands.
+*/
+
+typedef unsigned char BusLogic_RequestedReplyLength_T;
+
+
+/*
+ Define a Lock data structure. Until a true symmetric multiprocessing kernel
+ is available, locking is implemented as saving the processor flags and
+ disabling interrupts, and unlocking restores the saved processor flags.
+*/
+
+typedef unsigned long BusLogic_Lock_T;
+
+
+/*
+ Define the Outgoing Mailbox Action Codes.
+*/
+
+typedef enum
+{
+ BusLogic_OutgoingMailboxFree = 0,
+ BusLogic_MailboxStartCommand = 1,
+ BusLogic_MailboxAbortCommand = 2
+}
+BusLogic_ActionCode_T;
+
+
+/*
+ Define the Incoming Mailbox Completion Codes.
+*/
+
+typedef enum
+{
+ BusLogic_IncomingMailboxFree = 0,
+ BusLogic_CommandCompletedWithoutError = 1,
+ BusLogic_CommandAbortedAtHostRequest = 2,
+ BusLogic_AbortedCommandNotFound = 3,
+ BusLogic_CommandCompletedWithError = 4
+}
+BusLogic_CompletionCode_T;
+
+
+/*
+ Define the Command Control Block (CCB) Opcodes.
+*/
+
+typedef enum
+{
+ BusLogic_InitiatorCCB = 0x00,
+ BusLogic_TargetCCB = 0x01,
+ BusLogic_InitiatorCCB_ScatterGather = 0x02,
+ BusLogic_InitiatorCCB_ResidualDataLength = 0x03,
+ BusLogic_InitiatorCCB_ScatterGatherResidual = 0x04,
+ BusLogic_SCSIBusDeviceReset = 0x81
+}
+BusLogic_CCB_Opcode_T;
+
+
+/*
+ Define the CCB Data Direction Codes.
+*/
+
+typedef enum
+{
+ BusLogic_UncheckedDataTransfer = 0x00,
+ BusLogic_DataInLengthChecked = 0x01,
+ BusLogic_DataOutLengthChecked = 0x02,
+ BusLogic_NoDataTransfer = 0x03
+}
+BusLogic_DataDirection_T;
+
+
+/*
+ Define the Host Adapter Status Codes.
+*/
+
+typedef enum
+{
+ BusLogic_CommandCompletedNormally = 0x00,
+ BusLogic_LinkedCommandCompleted = 0x0A,
+ BusLogic_LinkedCommandCompletedWithFlag = 0x0B,
+ BusLogic_SCSISelectionTimeout = 0x11,
+ BusLogic_DataOverUnderRun = 0x12,
+ BusLogic_UnexpectedBusFree = 0x13,
+ BusLogic_InvalidBusPhaseRequested = 0x14,
+ BusLogic_InvalidOutgoingMailboxActionCode = 0x15,
+ BusLogic_InvalidCommandOperationCode = 0x16,
+ BusLogic_LinkedCCBhasInvalidLUN = 0x17,
+ BusLogic_InvalidCommandParameter = 0x1A,
+ BusLogic_AutoRequestSenseFailed = 0x1B,
+ BusLogic_TaggedQueuingMessageRejected = 0x1C,
+ BusLogic_UnsupportedMessageReceived = 0x1D,
+ BusLogic_HostAdapterHardwareFailed = 0x20,
+ BusLogic_TargetFailedResponseToATN = 0x21,
+ BusLogic_HostAdapterAssertedRST = 0x22,
+ BusLogic_OtherDeviceAssertedRST = 0x23,
+ BusLogic_TargetDeviceReconnectedImproperly = 0x24,
+ BusLogic_HostAdapterAssertedBusDeviceReset = 0x25,
+ BusLogic_AbortQueueGenerated = 0x26,
+ BusLogic_HostAdapterSoftwareError = 0x27,
+ BusLogic_HostAdapterHardwareTimeoutError = 0x30,
+ BusLogic_SCSIParityErrorDetected = 0x34
+}
+BusLogic_HostAdapterStatus_T;
+
+
+/*
+ Define the SCSI Target Device Status Codes.
+*/
+
+typedef enum
+{
+ BusLogic_OperationGood = 0x00,
+ BusLogic_CheckCondition = 0x02,
+ BusLogic_DeviceBusy = 0x08
+}
+BusLogic_TargetDeviceStatus_T;
+
+
+/*
+ Define the Queue Tag Codes.
+*/
+
+typedef enum
+{
+ BusLogic_SimpleQueueTag = 0x00,
+ BusLogic_HeadOfQueueTag = 0x01,
+ BusLogic_OrderedQueueTag = 0x02,
+ BusLogic_ReservedQT = 0x03
+}
+BusLogic_QueueTag_T;
+
+
+/*
+ Define the SCSI Command Descriptor Block (CDB).
+*/
+
+#define BusLogic_CDB_MaxLength 12
+
+typedef unsigned char SCSI_CDB_T[BusLogic_CDB_MaxLength];
+
+
+/*
+ Define the SCSI Sense Data.
+*/
+
+#define BusLogic_SenseDataMaxLength 255
+
+typedef unsigned char SCSI_SenseData_T[BusLogic_SenseDataMaxLength];
+
+
+/*
+ Define the Scatter/Gather Segment structure required by the Host Adapter
+ Firmware Interface.
+*/
+
+typedef struct BusLogic_ScatterGatherSegment
+{
+ unsigned long SegmentByteCount;
+ void *SegmentDataPointer;
+}
+BusLogic_ScatterGatherSegment_T;
+
+
+/*
+ Define the 32 Bit Mode Command Control Block (CCB) structure. The first 40
+ bytes are defined by the Host Adapter Firmware Interface. The remaining
+ components are defined by the Linux BusLogic Driver. Wide Mode CCBs differ
+ from standard 32 Bit Mode CCBs only in having the TagEnable and QueueTag
+ fields moved from byte 17 to byte 1, and the Logical Unit field in byte 17
+ expanded to 6 bits; unfortunately, using a union of structs containing
+ enumeration type bitfields to provide both definitions leads to packing
+ problems, so the following definition is used which requires setting
+ TagEnable to Logical Unit bit 5 in Wide Mode CCBs.
+*/
+
+typedef struct BusLogic_CCB
+{
+ /*
+ BusLogic Host Adapter Firmware Portion.
+ */
+ BusLogic_CCB_Opcode_T Opcode:8; /* Byte 0 */
+ unsigned char :3; /* Byte 1 Bits 0-2 */
+ BusLogic_DataDirection_T DataDirection:2; /* Byte 1 Bits 3-4 */
+ boolean WideModeTagEnable:1; /* Byte 1 Bit 5 */
+ BusLogic_QueueTag_T WideModeQueueTag:2; /* Byte 1 Bits 6-7 */
+ unsigned char CDB_Length; /* Byte 2 */
+ unsigned char SenseDataLength; /* Byte 3 */
+ unsigned long DataLength; /* Bytes 4-7 */
+ void *DataPointer; /* Bytes 8-11 */
+ unsigned char :8; /* Byte 12 */
+ unsigned char :8; /* Byte 13 */
+ BusLogic_HostAdapterStatus_T HostAdapterStatus:8; /* Byte 14 */
+ BusLogic_TargetDeviceStatus_T TargetDeviceStatus:8; /* Byte 15 */
+ unsigned char TargetID; /* Byte 16 */
+ unsigned char LogicalUnit:5; /* Byte 17 Bits 0-4 */
+ boolean TagEnable:1; /* Byte 17 Bit 5 */
+ BusLogic_QueueTag_T QueueTag:2; /* Byte 17 Bits 6-7 */
+ SCSI_CDB_T CDB; /* Bytes 18-29 */
+ unsigned char :8; /* Byte 30 */
+ unsigned char :8; /* Byte 31 */
+ unsigned long :32; /* Bytes 32-35 */
+ SCSI_SenseData_T *SenseDataPointer; /* Bytes 36-39 */
+ /*
+ BusLogic Linux Driver Portion.
+ */
+ struct BusLogic_HostAdapter *HostAdapter;
+ SCSI_Command_T *Command;
+ enum { BusLogic_CCB_Free = 0,
+ BusLogic_CCB_Active = 1,
+ BusLogic_CCB_Completed = 2,
+ BusLogic_CCB_Reset = 3 } Status;
+ BusLogic_CompletionCode_T MailboxCompletionCode;
+ unsigned int SerialNumber;
+ struct BusLogic_CCB *Next;
+ struct BusLogic_CCB *NextAll;
+ BusLogic_ScatterGatherSegment_T
+ ScatterGatherList[BusLogic_ScatterGatherLimit];
+}
+BusLogic_CCB_T;
+
+
+/*
+ Define the 32 Bit Mode Outgoing Mailbox structure.
+*/
+
+typedef struct BusLogic_OutgoingMailbox
+{
+ BusLogic_CCB_T *CCB;
+ unsigned long :24;
+ BusLogic_ActionCode_T ActionCode:8;
+}
+BusLogic_OutgoingMailbox_T;
+
+
+/*
+ Define the 32 Bit Mode Incoming Mailbox structure.
+*/
+
+typedef struct BusLogic_IncomingMailbox
+{
+ BusLogic_CCB_T *CCB;
+ BusLogic_HostAdapterStatus_T HostAdapterStatus:8;
+ BusLogic_TargetDeviceStatus_T TargetDeviceStatus:8;
+ unsigned char :8;
+ BusLogic_CompletionCode_T CompletionCode:8;
+}
+BusLogic_IncomingMailbox_T;
+
+
+/*
+ Define the possible Bus Types.
+*/
+
+typedef enum
+{
+ BusLogic_Unknown_Bus = 0,
+ BusLogic_ISA_Bus = 1,
+ BusLogic_MCA_Bus = 2,
+ BusLogic_EISA_Bus = 3,
+ BusLogic_VESA_Bus = 4,
+ BusLogic_PCI_Bus = 5
+}
+BusLogic_BusType_T;
+
+static char
+ *BusLogic_BusNames[] =
+ { "Unknown", "ISA", "MCA", "EISA", "VESA", "PCI" };
+
+
+/*
+ Define the Linux BusLogic Driver Command Line Entry structure.
+*/
+
+typedef struct BusLogic_CommandLineEntry
+{
+ unsigned short IO_Address;
+ unsigned short Concurrency;
+ unsigned short BusSettleTime;
+ unsigned short LocalOptions;
+ unsigned short TaggedQueuingPermitted;
+ unsigned short TaggedQueuingPermittedMask;
+ unsigned char ErrorRecoveryOption[BusLogic_MaxTargetIDs];
+}
+BusLogic_CommandLineEntry_T;
+
+
+/*
+ Define the Linux BusLogic Driver Host Adapter structure.
+*/
+
+typedef struct BusLogic_HostAdapter
+{
+ SCSI_Host_T *SCSI_Host;
+ unsigned char HostNumber;
+ unsigned char ModelName[9];
+ unsigned char FirmwareVersion[6];
+ unsigned char BoardName[18];
+ unsigned char InterruptLabel[62];
+ unsigned short IO_Address;
+ unsigned char IRQ_Channel;
+ unsigned char DMA_Channel;
+ unsigned char SCSI_ID;
+ BusLogic_BusType_T BusType:3;
+ boolean IRQ_ChannelAcquired:1;
+ boolean DMA_ChannelAcquired:1;
+ boolean SynchronousInitiation:1;
+ boolean ParityChecking:1;
+ boolean ExtendedTranslation:1;
+ boolean LevelSensitiveInterrupts:1;
+ boolean HostWideSCSI:1;
+ boolean HostDifferentialSCSI:1;
+ boolean HostAdapterResetPending:1;
+ boolean BounceBuffersRequired:1;
+ volatile boolean HostAdapterCommandCompleted:1;
+ unsigned short HostAdapterScatterGatherLimit;
+ unsigned short DriverScatterGatherLimit;
+ unsigned short MaxTargetIDs;
+ unsigned short MaxLogicalUnits;
+ unsigned short Concurrency;
+ unsigned short BusSettleTime;
+ unsigned short LocalOptions;
+ unsigned short DisconnectPermitted;
+ unsigned short TaggedQueuingPermitted;
+ unsigned long BIOS_Address;
+ BusLogic_InstalledDevices_T InstalledDevices;
+ BusLogic_SynchronousValues_T SynchronousValues;
+ BusLogic_SynchronousPeriod_T SynchronousPeriod;
+ BusLogic_Lock_T Lock;
+ struct BusLogic_HostAdapter *Next;
+ BusLogic_CommandLineEntry_T *CommandLineEntry;
+ BusLogic_CCB_T *All_CCBs;
+ BusLogic_CCB_T *Free_CCBs;
+ unsigned char ErrorRecoveryOption[BusLogic_MaxTargetIDs];
+ unsigned char CommandSuccessfulFlag[BusLogic_MaxTargetIDs];
+ unsigned long ReadWriteOperationCount[BusLogic_MaxTargetIDs];
+ unsigned char QueuedOperationCount[BusLogic_MaxTargetIDs];
+ unsigned long LastSequencePoint[BusLogic_MaxTargetIDs];
+ BusLogic_OutgoingMailbox_T *FirstOutgoingMailbox;
+ BusLogic_OutgoingMailbox_T *LastOutgoingMailbox;
+ BusLogic_OutgoingMailbox_T *NextOutgoingMailbox;
+ BusLogic_IncomingMailbox_T *FirstIncomingMailbox;
+ BusLogic_IncomingMailbox_T *LastIncomingMailbox;
+ BusLogic_IncomingMailbox_T *NextIncomingMailbox;
+ BusLogic_OutgoingMailbox_T OutgoingMailboxes[BusLogic_MailboxCount];
+ BusLogic_IncomingMailbox_T IncomingMailboxes[BusLogic_MailboxCount];
+}
+BusLogic_HostAdapter_T;
+
+
+/*
+ Define a symbolic structure for the BIOS Disk Parameters.
+*/
+
+typedef struct BIOS_DiskParameters
+{
+ int Heads;
+ int Sectors;
+ int Cylinders;
+}
+BIOS_DiskParameters_T;
+
+
+/*
+ BusLogic_LockHostAdapter acquires exclusive access to Host Adapter.
+*/
+
+static inline
+void BusLogic_LockHostAdapter(BusLogic_HostAdapter_T *HostAdapter)
+{
+ save_flags(HostAdapter->Lock);
+ cli();
+}
+
+
+/*
+ BusLogic_UnlockHostAdapter releases exclusive access to Host Adapter.
+*/
+
+static inline
+void BusLogic_UnlockHostAdapter(BusLogic_HostAdapter_T *HostAdapter)
+{
+ restore_flags(HostAdapter->Lock);
+}
+
+
+/*
+ BusLogic_LockHostAdapterID acquires exclusive access to Host Adapter,
+ but is only called when interrupts are disabled.
+*/
+
+static inline
+void BusLogic_LockHostAdapterID(BusLogic_HostAdapter_T *HostAdapter)
+{
+}
+
+
+/*
+ BusLogic_UnlockHostAdapterID releases exclusive access to Host Adapter,
+ but is only called when interrupts are disabled.
+*/
+
+static inline
+void BusLogic_UnlockHostAdapterID(BusLogic_HostAdapter_T *HostAdapter)
+{
+}
+
+
+/*
+ Define functions to provide an abstraction for reading and writing the
+ Host Adapter I/O Registers.
+*/
+
+static inline
+void BusLogic_WriteControlRegister(BusLogic_HostAdapter_T *HostAdapter,
+ unsigned char Value)
+{
+ outb(Value, HostAdapter->IO_Address + BusLogic_ControlRegister);
+}
+
+static inline
+unsigned char BusLogic_ReadStatusRegister(BusLogic_HostAdapter_T *HostAdapter)
+{
+ return inb(HostAdapter->IO_Address + BusLogic_StatusRegister);
+}
+
+static inline
+void BusLogic_WriteCommandParameterRegister(BusLogic_HostAdapter_T *HostAdapter,
+ unsigned char Value)
+{
+ outb(Value, HostAdapter->IO_Address + BusLogic_CommandParameterRegister);
+}
+
+static inline
+unsigned char BusLogic_ReadDataInRegister(BusLogic_HostAdapter_T *HostAdapter)
+{
+ return inb(HostAdapter->IO_Address + BusLogic_DataInRegister);
+}
+
+static inline
+unsigned char BusLogic_ReadInterruptRegister(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ return inb(HostAdapter->IO_Address + BusLogic_InterruptRegister);
+}
+
+static inline
+unsigned char BusLogic_ReadGeometryRegister(BusLogic_HostAdapter_T *HostAdapter)
+{
+ return inb(HostAdapter->IO_Address + BusLogic_GeometryRegister);
+}
+
+
+/*
+ BusLogic_StartMailboxScan issues a Start Mailbox Scan command, which
+ notifies the Host Adapter that an entry has been made in an Outgoing
+ Mailbox.
+*/
+
+static inline
+void BusLogic_StartMailboxScan(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_WriteCommandParameterRegister(HostAdapter,
+ BusLogic_StartMailboxCommand);
+}
+
+
+/*
+ BusLogic_Delay waits for Seconds to elapse.
+*/
+
+static inline void BusLogic_Delay(int Seconds)
+{
+ unsigned long TimeoutJiffies = jiffies + Seconds * HZ;
+ unsigned long ProcessorFlags;
+ save_flags(ProcessorFlags);
+ sti();
+ while (jiffies < TimeoutJiffies) ;
+ restore_flags(ProcessorFlags);
+}
+
+
+/*
+ Define prototypes for the forward referenced BusLogic Driver
+ Internal Functions.
+*/
+
+static void BusLogic_InterruptHandler(int, Registers_T *);
+static int BusLogic_ResetHostAdapter(BusLogic_HostAdapter_T *,
+ SCSI_Command_T *);
+
+
+#endif /* BusLogic_DriverVersion */
diff --git a/i386/i386at/gpl/linux/scsi/NCR5380.h b/i386/i386at/gpl/linux/scsi/NCR5380.h
new file mode 100644
index 00000000..3a121810
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/NCR5380.h
@@ -0,0 +1,363 @@
+/*
+ * NCR 5380 defines
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * DISTRIBUTION RELEASE 6
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * $Log: NCR5380.h,v $
+ * Revision 1.1.1.1 1996/10/30 01:39:59 thomas
+ * Imported from UK22
+ *
+ * Revision 1.1 1996/03/25 20:25:09 goel
+ * Linux driver merge.
+ *
+ */
+
+#ifndef NCR5380_H
+#define NCR5380_H
+
+#define NCR5380_PUBLIC_RELEASE 6
+#define NCR53C400_PUBLIC_RELEASE 2
+
+#define NDEBUG_ARBITRATION 0x1
+#define NDEBUG_AUTOSENSE 0x2
+#define NDEBUG_DMA 0x4
+#define NDEBUG_HANDSHAKE 0x8
+#define NDEBUG_INFORMATION 0x10
+#define NDEBUG_INIT 0x20
+#define NDEBUG_INTR 0x40
+#define NDEBUG_LINKED 0x80
+#define NDEBUG_MAIN 0x100
+#define NDEBUG_NO_DATAOUT 0x200
+#define NDEBUG_NO_WRITE 0x400
+#define NDEBUG_PIO 0x800
+#define NDEBUG_PSEUDO_DMA 0x1000
+#define NDEBUG_QUEUES 0x2000
+#define NDEBUG_RESELECTION 0x4000
+#define NDEBUG_SELECTION 0x8000
+#define NDEBUG_USLEEP 0x10000
+#define NDEBUG_LAST_BYTE_SENT 0x20000
+#define NDEBUG_RESTART_SELECT 0x40000
+#define NDEBUG_EXTENDED 0x80000
+#define NDEBUG_C400_PREAD 0x100000
+#define NDEBUG_C400_PWRITE 0x200000
+#define NDEBUG_LISTS 0x400000
+
+/*
+ * The contents of the OUTPUT DATA register are asserted on the bus when
+ * either arbitration is occurring or the phase-indicating signals (
+ * IO, CD, MSG) in the TARGET COMMAND register and the ASSERT DATA
+ * bit in the INITIATOR COMMAND register is set.
+ */
+
+#define OUTPUT_DATA_REG 0 /* wo DATA lines on SCSI bus */
+#define CURRENT_SCSI_DATA_REG 0 /* ro same */
+
+#define INITIATOR_COMMAND_REG 1 /* rw */
+#define ICR_ASSERT_RST 0x80 /* rw Set to assert RST */
+#define ICR_ARBITRATION_PROGRESS 0x40 /* ro Indicates arbitration complete */
+#define ICR_TRI_STATE 0x40 /* wo Set to tri-state drivers */
+#define ICR_ARBITRATION_LOST 0x20 /* ro Indicates arbitration lost */
+#define ICR_DIFF_ENABLE 0x20 /* wo Set to enable diff. drivers */
+#define ICR_ASSERT_ACK 0x10 /* rw ini Set to assert ACK */
+#define ICR_ASSERT_BSY 0x08 /* rw Set to assert BSY */
+#define ICR_ASSERT_SEL 0x04 /* rw Set to assert SEL */
+#define ICR_ASSERT_ATN 0x02 /* rw Set to assert ATN */
+#define ICR_ASSERT_DATA 0x01 /* rw SCSI_DATA_REG is asserted */
+
+#ifdef DIFFERENTIAL
+#define ICR_BASE ICR_DIFF_ENABLE
+#else
+#define ICR_BASE 0
+#endif
+
+#define MODE_REG 2
+/*
+ * Note : BLOCK_DMA code will keep DRQ asserted for the duration of the
+ * transfer, causing the chip to hog the bus. You probably don't want
+ * this.
+ */
+#define MR_BLOCK_DMA_MODE 0x80 /* rw block mode DMA */
+#define MR_TARGET 0x40 /* rw target mode */
+#define MR_ENABLE_PAR_CHECK 0x20 /* rw enable parity checking */
+#define MR_ENABLE_PAR_INTR 0x10 /* rw enable bad parity interrupt */
+#define MR_ENABLE_EOP_INTR 0x08 /* rw enable eop interrupt */
+#define MR_MONITOR_BSY 0x04 /* rw enable int on unexpected bsy fail */
+#define MR_DMA_MODE 0x02 /* rw DMA / pseudo DMA mode */
+#define MR_ARBITRATE 0x01 /* rw start arbitration */
+
+#ifdef PARITY
+#define MR_BASE MR_ENABLE_PAR_CHECK
+#else
+#define MR_BASE 0
+#endif
+
+#define TARGET_COMMAND_REG 3
+#define TCR_LAST_BYTE_SENT 0x80 /* ro DMA done */
+#define TCR_ASSERT_REQ 0x08 /* tgt rw assert REQ */
+#define TCR_ASSERT_MSG 0x04 /* tgt rw assert MSG */
+#define TCR_ASSERT_CD 0x02 /* tgt rw assert CD */
+#define TCR_ASSERT_IO 0x01 /* tgt rw assert IO */
+
+#define STATUS_REG 4 /* ro */
+/*
+ * Note : a set bit indicates an active signal, driven by us or another
+ * device.
+ */
+#define SR_RST 0x80
+#define SR_BSY 0x40
+#define SR_REQ 0x20
+#define SR_MSG 0x10
+#define SR_CD 0x08
+#define SR_IO 0x04
+#define SR_SEL 0x02
+#define SR_DBP 0x01
+
+/*
+ * Setting a bit in this register will cause an interrupt to be generated when
+ * BSY is false and SEL true and this bit is asserted on the bus.
+ */
+#define SELECT_ENABLE_REG 4 /* wo */
+
+#define BUS_AND_STATUS_REG 5 /* ro */
+#define BASR_END_DMA_TRANSFER 0x80 /* ro set on end of transfer */
+#define BASR_DRQ 0x40 /* ro mirror of DRQ pin */
+#define BASR_PARITY_ERROR 0x20 /* ro parity error detected */
+#define BASR_IRQ 0x10 /* ro mirror of IRQ pin */
+#define BASR_PHASE_MATCH 0x08 /* ro Set when MSG CD IO match TCR */
+#define BASR_BUSY_ERROR 0x04 /* ro Unexpected change to inactive state */
+#define BASR_ATN 0x02 /* ro BUS status */
+#define BASR_ACK 0x01 /* ro BUS status */
+
+/* Write any value to this register to start a DMA send */
+#define START_DMA_SEND_REG 5 /* wo */
+
+/*
+ * Used in DMA transfer mode, data is latched from the SCSI bus on
+ * the falling edge of REQ (ini) or ACK (tgt)
+ */
+#define INPUT_DATA_REG 6 /* ro */
+
+/* Write any value to this register to start a DMA receive */
+#define START_DMA_TARGET_RECEIVE_REG 6 /* wo */
+
+/* Read this register to clear interrupt conditions */
+#define RESET_PARITY_INTERRUPT_REG 7 /* ro */
+
+/* Write any value to this register to start an ini mode DMA receive */
+#define START_DMA_INITIATOR_RECEIVE_REG 7 /* wo */
+
+#define C400_CONTROL_STATUS_REG NCR53C400_register_offset-8 /* rw */
+
+#define CSR_RESET 0x80 /* wo Resets 53c400 */
+#define CSR_53C80_REG 0x80 /* ro 5380 registers busy */
+#define CSR_TRANS_DIR 0x40 /* rw Data transfer direction */
+#define CSR_SCSI_BUFF_INTR 0x20 /* rw Enable int on transfer ready */
+#define CSR_53C80_INTR 0x10 /* rw Enable 53c80 interrupts */
+#define CSR_SHARED_INTR 0x08 /* rw Interrupt sharing */
+#define CSR_HOST_BUF_NOT_RDY 0x04 /* ro Is Host buffer ready */
+#define CSR_SCSI_BUF_RDY 0x02 /* ro SCSI buffer read */
+#define CSR_GATED_53C80_IRQ 0x01 /* ro Last block xferred */
+
+#if 0
+#define CSR_BASE CSR_SCSI_BUFF_INTR | CSR_53C80_INTR
+#else
+#define CSR_BASE CSR_53C80_INTR
+#endif
+
+/* Number of 128-byte blocks to be transferred */
+#define C400_BLOCK_COUNTER_REG NCR53C400_register_offset-7 /* rw */
+
+/* Resume transfer after disconnect */
+#define C400_RESUME_TRANSFER_REG NCR53C400_register_offset-6 /* wo */
+
+/* Access to host buffer stack */
+#define C400_HOST_BUFFER NCR53C400_register_offset-4 /* rw */
+
+
+/* Note : PHASE_* macros are based on the values of the STATUS register */
+#define PHASE_MASK (SR_MSG | SR_CD | SR_IO)
+
+#define PHASE_DATAOUT 0
+#define PHASE_DATAIN SR_IO
+#define PHASE_CMDOUT SR_CD
+#define PHASE_STATIN (SR_CD | SR_IO)
+#define PHASE_MSGOUT (SR_MSG | SR_CD)
+#define PHASE_MSGIN (SR_MSG | SR_CD | SR_IO)
+#define PHASE_UNKNOWN 0xff
+
+/*
+ * Convert status register phase to something we can use to set phase in
+ * the target register so we can get phase mismatch interrupts on DMA
+ * transfers.
+ */
+
+#define PHASE_SR_TO_TCR(phase) ((phase) >> 2)
+
+/*
+ * The internal should_disconnect() function returns these based on the
+ * expected length of a disconnect if a device supports disconnect/
+ * reconnect.
+ */
+
+#define DISCONNECT_NONE 0
+#define DISCONNECT_TIME_TO_DATA 1
+#define DISCONNECT_LONG 2
+
+/*
+ * These are "special" values for the tag parameter passed to NCR5380_select.
+ */
+
+#define TAG_NEXT -1 /* Use next free tag */
+#define TAG_NONE -2 /*
+ * Establish I_T_L nexus instead of I_T_L_Q
+ * even on SCSI-II devices.
+ */
+
+/*
+ * These are "special" values for the irq and dma_channel fields of the
+ * Scsi_Host structure
+ */
+
+#define IRQ_NONE 255
+#define DMA_NONE 255
+#define IRQ_AUTO 254
+#define DMA_AUTO 254
+
+#define FLAG_HAS_LAST_BYTE_SENT 1 /* NCR53c81 or better */
+#define FLAG_CHECK_LAST_BYTE_SENT 2 /* Only test once */
+#define FLAG_NCR53C400 4 /* NCR53c400 */
+#define FLAG_NO_PSEUDO_DMA 8 /* Inhibit DMA */
+
+#ifndef ASM
+struct NCR5380_hostdata {
+ NCR5380_implementation_fields; /* implementation specific */
+ unsigned char id_mask, id_higher_mask; /* 1 << id, all bits greater */
+ unsigned char targets_present; /* targets we have connected
+ to, so we can call a select
+ failure a retryable condition */
+ volatile unsigned char busy[8]; /* index = target, bit = lun */
+#if defined(REAL_DMA) || defined(REAL_DMA_POLL)
+ volatile int dma_len; /* requested length of DMA */
+#endif
+ volatile unsigned char last_message; /* last message OUT */
+ volatile Scsi_Cmnd *connected; /* currently connected command */
+ volatile Scsi_Cmnd *issue_queue; /* waiting to be issued */
+ volatile Scsi_Cmnd *disconnected_queue; /* waiting for reconnect */
+ volatile int restart_select; /* we have disconnected,
+ used to restart
+ NCR5380_select() */
+ volatile unsigned aborted:1; /* flag, says aborted */
+ int flags;
+#ifdef USLEEP
+ unsigned long time_expires; /* in jiffies, set prior to sleeping */
+ struct Scsi_Host *next_timer;
+#endif
+};
+
+#ifdef __KERNEL__
+static struct Scsi_Host *first_instance; /* linked list of 5380's */
+
+#if defined(AUTOPROBE_IRQ)
+static int NCR5380_probe_irq (struct Scsi_Host *instance, int possible);
+#endif
+static void NCR5380_init (struct Scsi_Host *instance, int flags);
+static void NCR5380_information_transfer (struct Scsi_Host *instance);
+static void NCR5380_intr (int irq, struct pt_regs * regs);
+static void NCR5380_main (void);
+static void NCR5380_print_options (struct Scsi_Host *instance);
+static void NCR5380_print_phase (struct Scsi_Host *instance);
+static void NCR5380_print (struct Scsi_Host *instance);
+#ifndef NCR5380_abort
+static
+#endif
+int NCR5380_abort (Scsi_Cmnd *cmd);
+#ifndef NCR5380_reset
+static
+#endif
+int NCR5380_reset (Scsi_Cmnd *cmd);
+#ifndef NCR5380_queue_command
+static
+#endif
+int NCR5380_queue_command (Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *));
+
+
+static void NCR5380_reselect (struct Scsi_Host *instance);
+static int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag);
+#if defined(PSEUDO_DMA) || defined(REAL_DMA) || defined(REAL_DMA_POLL)
+static int NCR5380_transfer_dma (struct Scsi_Host *instance,
+ unsigned char *phase, int *count, unsigned char **data);
+#endif
+static int NCR5380_transfer_pio (struct Scsi_Host *instance,
+ unsigned char *phase, int *count, unsigned char **data);
+
+#if (defined(REAL_DMA) || defined(REAL_DMA_POLL)) && defined(i386)
+static __inline__ int NCR5380_i386_dma_setup (struct Scsi_Host *instance,
+ unsigned char *ptr, unsigned int count, unsigned char mode) {
+ unsigned limit;
+
+ if (instance->dma_channel <=3) {
+ if (count > 65536)
+ count = 65536;
+ limit = 65536 - (((unsigned) ptr) & 0xFFFF);
+ } else {
+ if (count > 65536 * 2)
+ count = 65536 * 2;
+ limit = 65536* 2 - (((unsigned) ptr) & 0x1FFFF);
+ }
+
+ if (count > limit) count = limit;
+
+ if ((count & 1) || (((unsigned) ptr) & 1))
+ panic ("scsi%d : attempted unaligned DMA transfer\n", instance->host_no);
+ cli();
+ disable_dma(instance->dma_channel);
+ clear_dma_ff(instance->dma_channel);
+ set_dma_addr(instance->dma_channel, (unsigned int) ptr);
+ set_dma_count(instance->dma_channel, count);
+ set_dma_mode(instance->dma_channel, mode);
+ enable_dma(instance->dma_channel);
+ sti();
+ return count;
+}
+
+static __inline__ int NCR5380_i386_dma_write_setup (struct Scsi_Host *instance,
+ unsigned char *src, unsigned int count) {
+ return NCR5380_i386_dma_setup (instance, src, count, DMA_MODE_WRITE);
+}
+
+static __inline__ int NCR5380_i386_dma_read_setup (struct Scsi_Host *instance,
+ unsigned char *src, unsigned int count) {
+ return NCR5380_i386_dma_setup (instance, src, count, DMA_MODE_READ);
+}
+
+static __inline__ int NCR5380_i386_dma_residual (struct Scsi_Host *instance) {
+ register int tmp;
+ cli();
+ clear_dma_ff(instance->dma_channel);
+ tmp = get_dma_residue(instance->dma_channel);
+ sti();
+ return tmp;
+}
+#endif /* defined(REAL_DMA) && defined(i386) */
+#endif __KERNEL_
+#endif /* ndef ASM */
+#endif /* NCR5380_H */
diff --git a/i386/i386at/gpl/linux/scsi/NCR5380.src b/i386/i386at/gpl/linux/scsi/NCR5380.src
new file mode 100644
index 00000000..64beb813
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/NCR5380.src
@@ -0,0 +1,3035 @@
+#ifndef NDEBUG
+#define NDEBUG (NDEBUG_RESTART_SELECT | NDEBUG_ABORT)
+#endif
+/*
+ * NCR 5380 generic driver routines. These should make it *trivial*
+ * to implement 5380 SCSI drivers under Linux with a non-trantor
+ * architecture.
+ *
+ * Note that these routines also work with NR53c400 family chips.
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * DISTRIBUTION RELEASE 6.
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * $Log: NCR5380.src,v $
+ * Revision 1.1.1.1 1996/10/30 01:39:59 thomas
+ * Imported from UK22
+ *
+# Revision 1.1 1996/03/25 20:25:10 goel
+# Linux driver merge.
+#
+ * Revision 1.5 1994/01/19 09:14:57 drew
+ * Fixed udelay() hack that was being used on DATAOUT phases
+ * instead of a proper wait for the final handshake.
+ *
+ * Revision 1.4 1994/01/19 06:44:25 drew
+ * *** empty log message ***
+ *
+ * Revision 1.3 1994/01/19 05:24:40 drew
+ * Added support for TCR LAST_BYTE_SENT bit.
+ *
+ * Revision 1.2 1994/01/15 06:14:11 drew
+ * REAL DMA support, bug fixes.
+ *
+ * Revision 1.1 1994/01/15 06:00:54 drew
+ * Initial revision
+ *
+ */
+
+/*
+ * Further development / testing that should be done :
+ * 1. Cleanup the NCR5380_transfer_dma function and DMA operation complete
+ * code so that everything does the same thing that's done at the
+ * end of a pseudo-DMA read operation.
+ *
+ * 2. Fix REAL_DMA (interrupt driven, polled works fine) -
+ * basically, transfer size needs to be reduced by one
+ * and the last byte read as is done with PSEUDO_DMA.
+ *
+ * 3. Test USLEEP code
+ *
+ * 4. Test SCSI-II tagged queueing (I have no devices which support
+ * tagged queueing)
+ *
+ * 5. Test linked command handling code after Eric is ready with
+ * the high level code.
+ */
+
+#if (NDEBUG & NDEBUG_LISTS)
+#define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); }
+#define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); }
+#else
+#define LIST(x,y)
+#define REMOVE(w,x,y,z)
+#endif
+
+#ifndef notyet
+#undef LINKED
+#undef USLEEP
+#undef REAL_DMA
+#endif
+
+#ifdef REAL_DMA_POLL
+#undef READ_OVERRUNS
+#define READ_OVERRUNS
+#endif
+
+/*
+ * Design
+ * Issues :
+ *
+ * The other Linux SCSI drivers were written when Linux was Intel PC-only,
+ * and specifically for each board rather than each chip. This makes their
+ * adaptation to platforms like the Mac (Some of which use NCR5380's)
+ * more difficult than it has to be.
+ *
+ * Also, many of the SCSI drivers were written before the command queuing
+ * routines were implemented, meaning their implementations of queued
+ * commands were hacked on rather than designed in from the start.
+ *
+ * When I designed the Linux SCSI drivers I figured that
+ * while having two different SCSI boards in a system might be useful
+ * for debugging things, two of the same type wouldn't be used.
+ * Well, I was wrong and a number of users have mailed me about running
+ * multiple high-performance SCSI boards in a server.
+ *
+ * Finally, when I get questions from users, I have no idea what
+ * revision of my driver they are running.
+ *
+ * This driver attempts to address these problems :
+ * This is a generic 5380 driver. To use it on a different platform,
+ * one simply writes appropriate system specific macros (ie, data
+ * transfer - some PC's will use the I/O bus, 68K's must use
+ * memory mapped) and drops this file in their 'C' wrapper.
+ *
+ * As far as command queueing, two queues are maintained for
+ * each 5380 in the system - commands that haven't been issued yet,
+ * and commands that are currently executing. This means that an
+ * unlimited number of commands may be queued, letting
+ * more commands propagate from the higher driver levels giving higher
+ * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported,
+ * allowing multiple commands to propagate all the way to a SCSI-II device
+ * while a command is already executing.
+ *
+ * To solve the multiple-boards-in-the-same-system problem,
+ * there is a separate instance structure for each instance
+ * of a 5380 in the system. So, multiple NCR5380 drivers will
+ * be able to coexist with appropriate changes to the high level
+ * SCSI code.
+ *
+ * A NCR5380_PUBLIC_REVISION macro is provided, with the release
+ * number (updated for each public release) printed by the
+ * NCR5380_print_options command, which should be called from the
+ * wrapper detect function, so that I know what release of the driver
+ * users are using.
+ *
+ * Issues specific to the NCR5380 :
+ *
+ * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead
+ * piece of hardware that requires you to sit in a loop polling for
+ * the REQ signal as long as you are connected. Some devices are
+ * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect
+ * while doing long seek operations.
+ *
+ * The workaround for this is to keep track of devices that have
+ * disconnected. If the device hasn't disconnected, for commands that
+ * should disconnect, we do something like
+ *
+ * while (!REQ is asserted) { sleep for N usecs; poll for M usecs }
+ *
+ * Some tweaking of N and M needs to be done. An algorithm based
+ * on "time to data" would give the best results as long as short time
+ * to datas (ie, on the same track) were considered, however these
+ * broken devices are the exception rather than the rule and I'd rather
+ * spend my time optimizing for the normal case.
+ *
+ * Architecture :
+ *
+ * At the heart of the design is a coroutine, NCR5380_main,
+ * which is started when not running by the interrupt handler,
+ * timer, and queue command function. It attempts to establish
+ * I_T_L or I_T_L_Q nexuses by removing the commands from the
+ * issue queue and calling NCR5380_select() if a nexus
+ * is not established.
+ *
+ * Once a nexus is established, the NCR5380_information_transfer()
+ * phase goes through the various phases as instructed by the target.
+ * if the target goes into MSG IN and sends a DISCONNECT message,
+ * the command structure is placed into the per instance disconnected
+ * queue, and NCR5380_main tries to find more work. If USLEEP
+ * was defined, and the target is idle for too long, the system
+ * will try to sleep.
+ *
+ * If a command has disconnected, eventually an interrupt will trigger,
+ * calling NCR5380_intr() which will in turn call NCR5380_reselect
+ * to reestablish a nexus. This will run main if necessary.
+ *
+ * On command termination, the done function will be called as
+ * appropriate.
+ *
+ * SCSI pointers are maintained in the SCp field of SCSI command
+ * structures, being initialized after the command is connected
+ * in NCR5380_select, and set as appropriate in NCR5380_information_transfer.
+ * Note that in violation of the standard, an implicit SAVE POINTERS operation
+ * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS.
+ */
+
+/*
+ * Using this file :
+ * This file a skeleton Linux SCSI driver for the NCR 5380 series
+ * of chips. To use it, you write a architecture specific functions
+ * and macros and include this file in your driver.
+ *
+ * These macros control options :
+ * AUTOPROBE_IRQ - if defined, the NCR5380_probe_irq() function will be
+ * defined.
+ *
+ * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
+ * for commands that return with a CHECK CONDITION status.
+ *
+ * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential
+ * transceivers.
+ *
+ * LIMIT_TRANSFERSIZE - if defined, limit the pseudo-dma transfers to 512
+ * bytes at a time. Since interrupts are disabled by default during
+ * these transfers, we might need this to give reasonable interrupt
+ * service time if the transfer size gets too large.
+ *
+ * LINKED - if defined, linked commands are supported.
+ *
+ * PSEUDO_DMA - if defined, PSEUDO DMA is used during the data transfer phases.
+ *
+ * REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
+ *
+ * REAL_DMA_POLL - if defined, REAL DMA is used but the driver doesn't
+ * rely on phase mismatch and EOP interrupts to determine end
+ * of phase.
+ *
+ * SCSI2 - if defined, SCSI-2 tagged queuing is used where possible
+ *
+ * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. You
+ * only really want to use this if you're having a problem with
+ * dropped characters during high speed communications, and even
+ * then, you're going to be better off twiddling with transfersize
+ * in the high level code.
+ *
+ * USLEEP - if defined, on devices that aren't disconnecting from the
+ * bus, we will go to sleep so that the CPU can get real work done
+ * when we run a command that won't complete immediately.
+ *
+ * Note that if USLEEP is defined, NCR5380_TIMER *must* also be
+ * defined.
+ *
+ * Defaults for these will be provided if USLEEP is defined, although
+ * the user may want to adjust these to allocate CPU resources to
+ * the SCSI driver or "real" code.
+ *
+ * USLEEP_SLEEP - amount of time, in jiffies, to sleep
+ *
+ * USLEEP_POLL - amount of time, in jiffies, to poll
+ *
+ * These macros MUST be defined :
+ * NCR5380_local_declare() - declare any local variables needed for your transfer
+ * routines.
+ *
+ * NCR5380_setup(instance) - initialize any local variables needed from a given
+ * instance of the host adapter for NCR5380_{read,write,pread,pwrite}
+ *
+ * NCR5380_read(register) - read from the specified register
+ *
+ * NCR5380_write(register, value) - write to the specific register
+ *
+ * NCR5380_implementation_fields - additional fields needed for this
+ * specific implementation of the NCR5380
+ *
+ * Either real DMA *or* pseudo DMA may be implemented
+ * REAL functions :
+ * NCR5380_REAL_DMA should be defined if real DMA is to be used.
+ * Note that the DMA setup functions should return the number of bytes
+ * that they were able to program the controller for.
+ *
+ * Also note that generic i386/PC versions of these macros are
+ * available as NCR5380_i386_dma_write_setup,
+ * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual.
+ *
+ * NCR5380_dma_write_setup(instance, src, count) - initialize
+ * NCR5380_dma_read_setup(instance, dst, count) - initialize
+ * NCR5380_dma_residual(instance); - residual count
+ *
+ * PSEUDO functions :
+ * NCR5380_pwrite(instance, src, count)
+ * NCR5380_pread(instance, dst, count);
+ *
+ * If nothing specific to this implementation needs doing (ie, with external
+ * hardware), you must also define
+ *
+ * NCR5380_queue_command
+ * NCR5380_reset
+ * NCR5380_abort
+ *
+ * to be the global entry points into the specific driver, ie
+ * #define NCR5380_queue_command t128_queue_command.
+ *
+ * If this is not done, the routines will be defined as static functions
+ * with the NCR5380* names and the user must provide a globally
+ * accessible wrapper function.
+ *
+ * The generic driver is initialized by calling NCR5380_init(instance),
+ * after setting the appropriate host specific fields and ID. If the
+ * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
+ * possible) function may be used. Before the specific driver initialization
+ * code finishes, NCR5380_print_options should be called.
+ */
+
+static int do_abort (struct Scsi_Host *host);
+static void do_reset (struct Scsi_Host *host);
+static struct Scsi_Host *first_instance = NULL;
+static Scsi_Host_Template *the_template = NULL;
+
+/*
+ * Function : void initialize_SCp(Scsi_Cmnd *cmd)
+ *
+ * Purpose : initialize the saved data pointers for cmd to point to the
+ * start of the buffer.
+ *
+ * Inputs : cmd - Scsi_Cmnd structure to have pointers reset.
+ */
+
+static __inline__ void initialize_SCp(Scsi_Cmnd *cmd) {
+ /*
+ * Initialize the Scsi Pointer field so that all of the commands in the
+ * various queues are valid.
+ */
+
+ if (cmd->use_sg) {
+ cmd->SCp.buffer = (struct scatterlist *) cmd->buffer;
+ cmd->SCp.buffers_residual = cmd->use_sg - 1;
+ cmd->SCp.ptr = (char *) cmd->SCp.buffer->address;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ } else {
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = (char *) cmd->request_buffer;
+ cmd->SCp.this_residual = cmd->request_bufflen;
+ }
+}
+
+#include <linux/delay.h>
+
+#ifdef NDEBUG
+static struct {
+ unsigned char mask;
+ const char * name;}
+signals[] = {{ SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" },
+ { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" },
+ { SR_SEL, "SEL" }, {0, NULL}},
+basrs[] = {{BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL}},
+icrs[] = {{ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"},
+ {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"},
+ {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"},
+ {0, NULL}},
+mrs[] = {{MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"},
+ {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR,
+ "MODE PARITY INTR"}, {MR_MONITOR_BSY, "MODE MONITOR BSY"},
+ {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"},
+ {0, NULL}};
+
+/*
+ * Function : void NCR5380_print(struct Scsi_Host *instance)
+ *
+ * Purpose : print the SCSI bus signals for debugging purposes
+ *
+ * Input : instance - which NCR5380
+ */
+
+static void NCR5380_print(struct Scsi_Host *instance) {
+ NCR5380_local_declare();
+ unsigned char status, data, basr, mr, icr, i;
+ NCR5380_setup(instance);
+ cli();
+ data = NCR5380_read(CURRENT_SCSI_DATA_REG);
+ status = NCR5380_read(STATUS_REG);
+ mr = NCR5380_read(MODE_REG);
+ icr = NCR5380_read(INITIATOR_COMMAND_REG);
+ basr = NCR5380_read(BUS_AND_STATUS_REG);
+ sti();
+ printk("STATUS_REG: %02x ", status);
+ for (i = 0; signals[i].mask ; ++i)
+ if (status & signals[i].mask)
+ printk(",%s", signals[i].name);
+ printk("\nBASR: %02x ", basr);
+ for (i = 0; basrs[i].mask ; ++i)
+ if (basr & basrs[i].mask)
+ printk(",%s", basrs[i].name);
+ printk("\nICR: %02x ", icr);
+ for (i = 0; icrs[i].mask; ++i)
+ if (icr & icrs[i].mask)
+ printk(",%s", icrs[i].name);
+ printk("\nMODE: %02x ", mr);
+ for (i = 0; mrs[i].mask; ++i)
+ if (mr & mrs[i].mask)
+ printk(",%s", mrs[i].name);
+ printk("\n");
+}
+
+static struct {
+ unsigned char value;
+ const char *name;
+} phases[] = {
+{PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"},
+{PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"},
+{PHASE_UNKNOWN, "UNKNOWN"}};
+
+/*
+ * Function : void NCR5380_print_phase(struct Scsi_Host *instance)
+ *
+ * Purpose : print the current SCSI phase for debugging purposes
+ *
+ * Input : instance - which NCR5380
+ */
+
+static void NCR5380_print_phase(struct Scsi_Host *instance) {
+ NCR5380_local_declare();
+ unsigned char status;
+ int i;
+ NCR5380_setup(instance);
+
+ status = NCR5380_read(STATUS_REG);
+ if (!(status & SR_REQ))
+ printk("scsi%d : REQ not asserted, phase unknown.\n",
+ instance->host_no);
+ else {
+ for (i = 0; (phases[i].value != PHASE_UNKNOWN) &&
+ (phases[i].value != (status & PHASE_MASK)); ++i);
+ printk("scsi%d : phase %s\n", instance->host_no, phases[i].name);
+ }
+}
+#endif
+
+/*
+ * We need to have our coroutine active given these constraints :
+ * 1. The mutex flag, main_running, can only be set when the main
+ * routine can actually process data, otherwise SCSI commands
+ * will never get issued.
+ *
+ * 2. NCR5380_main() shouldn't be called before it has exited, because
+ * other drivers have had kernel stack overflows in similar
+ * situations.
+ *
+ * 3. We don't want to inline NCR5380_main() because of space concerns,
+ * even though it is only called in two places.
+ *
+ * So, the solution is to set the mutex in an inline wrapper for the
+ * main coroutine, and have the main coroutine exit with interrupts
+ * disabled after the final search through the queues so that no race
+ * conditions are possible.
+ */
+
+static volatile int main_running = 0;
+
+/*
+ * Function : run_main(void)
+ *
+ * Purpose : insure that the coroutine is running and will process our
+ * request. main_running is checked/set here (in an inline function)
+ * rather than in NCR5380_main itself to reduce the chances of stack
+ * overflow.
+ *
+ */
+
+static __inline__ void run_main(void) {
+ cli();
+ if (!main_running) {
+ main_running = 1;
+ NCR5380_main();
+ /*
+ * main_running is cleared in NCR5380_main once it can't do
+ * more work, and NCR5380_main exits with interrupts disabled.
+ */
+ sti();
+ } else
+ sti();
+}
+
+#ifdef USLEEP
+#ifndef NCR5380_TIMER
+#error "NCR5380_TIMER must be defined so that this type of NCR5380 driver gets a unique timer."
+#endif
+
+/*
+ * These need tweaking, and would probably work best as per-device
+ * flags initialized differently for disk, tape, cd, etc devices.
+ * People with broken devices are free to experiment as to what gives
+ * the best results for them.
+ *
+ * USLEEP_SLEEP should be a minimum seek time.
+ *
+ * USLEEP_POLL should be a maximum rotational latency.
+ */
+#ifndef USLEEP_SLEEP
+/* 20 ms (reasonable hard disk speed) */
+#define USLEEP_SLEEP 2
+#endif
+/* 300 RPM (floppy speed) */
+#ifndef USLEEP_POLL
+#define USLEEP_POLL 20
+#endif
+
+static struct Scsi_Host * expires_first = NULL;
+
+/*
+ * Function : int should_disconnect (unsigned char cmd)
+ *
+ * Purpose : decide weather a command would normally disconnect or
+ * not, since if it won't disconnect we should go to sleep.
+ *
+ * Input : cmd - opcode of SCSI command
+ *
+ * Returns : DISCONNECT_LONG if we should disconnect for a really long
+ * time (ie always, sleep, look for REQ active, sleep),
+ * DISCONNECT_TIME_TO_DATA if we would only disconnect for a normal
+ * time-to-data delay, DISCONNECT_NONE if this command would return
+ * immediately.
+ *
+ * Future sleep algorithms based on time to data can exploit
+ * something like this so they can differentiate between "normal"
+ * (ie, read, write, seek) and unusual commands (ie, * format).
+ *
+ * Note : We don't deal with commands that handle an immediate disconnect,
+ *
+ */
+
+static int should_disconnect (unsigned char cmd) {
+ switch (cmd) {
+ case READ_6:
+ case WRITE_6:
+ case SEEK_6:
+ case READ_10:
+ case WRITE_10:
+ case SEEK_10:
+ return DISCONNECT_TIME_TO_DATA;
+ case FORMAT_UNIT:
+ case SEARCH_HIGH:
+ case SEARCH_LOW:
+ case SEARCH_EQUAL:
+ return DISCONNECT_LONG;
+ default:
+ return DISCONNECT_NONE;
+ }
+}
+
+/*
+ * Assumes instance->time_expires has been set in higher level code.
+ */
+
+static int NCR5380_set_timer (struct Scsi_Host *instance) {
+ struct Scsi_Host *tmp, **prev;
+
+ cli();
+ if (((struct NCR5380_hostdata *) (instance->host_data))->next_timer) {
+ sti();
+ return -1;
+ }
+
+ for (prev = &expires_first, tmp = expires_first; tmp;
+ prev = &(((struct NCR5380_hostdata *) tmp->host_data)->next_timer),
+ tmp = ((struct NCR5380_hostdata *) tmp->host_data)->next_timer)
+ if (instance->time_expires < tmp->time_expires)
+ break;
+
+ instance->next_timer = tmp;
+ *prev = instance;
+ timer_table[NCR5380_TIMER].expires = expires_first->time_expires;
+ timer_active |= 1 << NCR5380_TIMER;
+ sti();
+ return 0;
+}
+
+/* Doing something about unwanted reentrancy here might be useful */
+void NCR5380_timer_fn(void) {
+ struct Scsi_Host *instance;
+ cli();
+ for (; expires_first && expires_first->time_expires >= jiffies; ) {
+ instance = ((NCR5380_hostdata *) expires_first->host_data)->
+ expires_next;
+ ((NCR5380_hostdata *) expires_first->host_data)->expires_next =
+ NULL;
+ ((NCR5380_hostdata *) expires_first->host_data)->time_expires =
+ 0;
+ expires_first = instance;
+ }
+
+ if (expires_first) {
+ timer_table[NCR5380_TIMER].expires = ((NCR5380_hostdata *)
+ expires_first->host_data)->time_expires;
+ timer_active |= (1 << NCR5380_TIMER);
+ } else {
+ timer_table[NCR5380_TIMER].expires = 0;
+ timer_active &= ~(1 << MCR5380_TIMER);
+ }
+ sti();
+
+ run_main();
+}
+#endif /* def USLEEP */
+
+static void NCR5380_all_init (void) {
+ static int done = 0;
+ if (!done) {
+#if (NDEBUG & NDEBUG_INIT)
+ printk("scsi : NCR5380_all_init()\n");
+#endif
+ done = 1;
+#ifdef USLEEP
+ timer_table[NCR5380_TIMER].expires = 0;
+ timer_table[NCR5380_TIMER].fn = NCR5380_timer_fn;
+#endif
+ }
+}
+
+#ifdef AUTOPROBE_IRQ
+/*
+ * Function : int NCR5380_probe_irq (struct Scsi_Host *instance, int possible)
+ *
+ * Purpose : autoprobe for the IRQ line used by the NCR5380.
+ *
+ * Inputs : instance - pointer to this instance of the NCR5380 driver,
+ * possible - bitmask of permissible interrupts.
+ *
+ * Returns : number of the IRQ selected, IRQ_NONE if no interrupt fired.
+ *
+ * XXX no effort is made to deal with spurious interrupts.
+ */
+
+
+static int probe_irq;
+static void probe_intr (int irq, struct pt_regs * regs) {
+ probe_irq = irq;
+};
+
+static int NCR5380_probe_irq (struct Scsi_Host *instance, int possible) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+ unsigned long timeout;
+ int trying_irqs, i, mask;
+ NCR5380_setup(instance);
+
+ for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1)
+ if ((mask & possible) && (request_irq(i, &probe_intr, SA_INTERRUPT, "NCR-probe")
+ == 0))
+ trying_irqs |= mask;
+
+ timeout = jiffies + 25;
+ probe_irq = IRQ_NONE;
+
+/*
+ * A interrupt is triggered whenever BSY = false, SEL = true
+ * and a bit set in the SELECT_ENABLE_REG is asserted on the
+ * SCSI bus.
+ *
+ * Note that the bus is only driven when the phase control signals
+ * (I/O, C/D, and MSG) match those in the TCR, so we must reset that
+ * to zero.
+ */
+
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA |
+ ICR_ASSERT_SEL);
+
+ while (probe_irq == IRQ_NONE && jiffies < timeout)
+ barrier();
+
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ for (i = 0, mask = 1; i < 16; ++i, mask <<= 1)
+ if (trying_irqs & mask)
+ free_irq(i);
+
+ return probe_irq;
+}
+#endif /* AUTOPROBE_IRQ */
+
+/*
+ * Function : void NCR58380_print_options (struct Scsi_Host *instance)
+ *
+ * Purpose : called by probe code indicating the NCR5380 driver
+ * options that were selected.
+ *
+ * Inputs : instance, pointer to this instance. Unused.
+ */
+
+static void NCR5380_print_options (struct Scsi_Host *instance) {
+ printk(" generic options"
+#ifdef AUTOPROBE_IRQ
+ " AUTOPROBE_IRQ"
+#endif
+#ifdef AUTOSENSE
+ " AUTOSENSE"
+#endif
+#ifdef DIFFERENTIAL
+ " DIFFERENTIAL"
+#endif
+#ifdef REAL_DMA
+ " REAL DMA"
+#endif
+#ifdef REAL_DMA_POLL
+ " REAL DMA POLL"
+#endif
+#ifdef PARITY
+ " PARITY"
+#endif
+#ifdef PSEUDO_DMA
+ " PSEUDO DMA"
+#endif
+#ifdef SCSI2
+ " SCSI-2"
+#endif
+#ifdef UNSAFE
+ " UNSAFE "
+#endif
+ );
+#ifdef USLEEP
+ printk(" USLEEP, USLEEP_POLL=%d USLEEP_SLEEP=%d", USLEEP_POLL, USLEEP_SLEEP);
+#endif
+ printk(" generic release=%d", NCR5380_PUBLIC_RELEASE);
+ if (((struct NCR5380_hostdata *)instance->hostdata)->flags & FLAG_NCR53C400) {
+ printk(" ncr53c400 release=%d", NCR53C400_PUBLIC_RELEASE);
+ }
+}
+
+/*
+ * Function : void NCR5380_print_status (struct Scsi_Host *instance)
+ *
+ * Purpose : print commands in the various queues, called from
+ * NCR5380_abort and NCR5380_debug to aid debugging.
+ *
+ * Inputs : instance, pointer to this instance.
+ */
+
+static void NCR5380_print_status (struct Scsi_Host *instance) {
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+ Scsi_Cmnd *ptr;
+
+
+ printk("NCR5380 : coroutine is%s running.\n",
+ main_running ? "" : "n't");
+
+#ifdef NDEBUG
+ NCR5380_print (instance);
+ NCR5380_print_phase (instance);
+#endif
+
+ cli();
+ if (!hostdata->connected) {
+ printk ("scsi%d: no currently connected command\n",
+ instance->host_no);
+ } else {
+ print_Scsi_Cmnd ((Scsi_Cmnd *) hostdata->connected);
+ }
+
+ printk ("scsi%d: issue_queue\n", instance->host_no);
+
+ for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr;
+ ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ print_Scsi_Cmnd (ptr);
+
+ printk ("scsi%d: disconnected_queue\n", instance->host_no);
+
+ for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr;
+ ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ print_Scsi_Cmnd (ptr);
+
+ sti();
+}
+
+
+/*
+ * Function : void NCR5380_init (struct Scsi_Host *instance, flags)
+ *
+ * Purpose : initializes *instance and corresponding 5380 chip,
+ * with flags OR'd into the initial flags value.
+ *
+ * Inputs : instance - instantiation of the 5380 driver.
+ *
+ * Notes : I assume that the host, hostno, and id bits have been
+ * set correctly. I don't care about the irq and other fields.
+ *
+ */
+
+static void NCR5380_init (struct Scsi_Host *instance, int flags) {
+ NCR5380_local_declare();
+ int i, pass;
+ unsigned long timeout;
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+
+ /*
+ * On NCR53C400 boards, NCR5380 registers are mapped 8 past
+ * the base address.
+ */
+
+#ifdef NCR53C400
+ if (flags & FLAG_NCR53C400)
+ instance->NCR5380_instance_name += NCR53C400_address_adjust;
+#endif
+
+ NCR5380_setup(instance);
+
+ NCR5380_all_init();
+
+ hostdata->aborted = 0;
+ hostdata->id_mask = 1 << instance->this_id;
+ for (i = hostdata->id_mask; i <= 0x80; i <<= 1)
+ if (i > hostdata->id_mask)
+ hostdata->id_higher_mask |= i;
+ for (i = 0; i < 8; ++i)
+ hostdata->busy[i] = 0;
+#ifdef REAL_DMA
+ hostdata->dmalen = 0;
+#endif
+ hostdata->targets_present = 0;
+ hostdata->connected = NULL;
+ hostdata->issue_queue = NULL;
+ hostdata->disconnected_queue = NULL;
+
+ /* The CHECK code seems to break the 53C400. Will check it later maybe */
+ if (flags & FLAG_NCR53C400)
+ hostdata->flags = FLAG_HAS_LAST_BYTE_SENT | flags;
+ else
+ hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT | flags;
+
+ if (!the_template) {
+ the_template = instance->hostt;
+ first_instance = instance;
+ }
+
+
+#ifdef USLEEP
+ hostdata->time_expires = 0;
+ hostdata->next_timer = NULL;
+#endif
+
+#ifndef AUTOSENSE
+ if ((instance->cmd_per_lun > 1) || instance->can_queue > 1))
+ printk("scsi%d : WARNING : support for multiple outstanding commands enabled\n"
+ " without AUTOSENSE option, contingent allegiance conditions may\n"
+ " be incorrectly cleared.\n", instance->host_no);
+#endif /* def AUTOSENSE */
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+
+#ifdef NCR53C400
+ if (hostdata->flags & FLAG_NCR53C400) {
+ NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE);
+ }
+#endif
+
+ /*
+ * Detect and correct bus wedge problems.
+ *
+ * If the system crashed, it may have crashed in a state
+ * where a SCSI command was still executing, and the
+ * SCSI bus is not in a BUS FREE STATE.
+ *
+ * If this is the case, we'll try to abort the currently
+ * established nexus which we know nothing about, and that
+ * failing, do a hard reset of the SCSI bus
+ */
+
+ for (pass = 1; (NCR5380_read(STATUS_REG) & SR_BSY) &&
+ pass <= 6 ; ++pass) {
+ switch (pass) {
+ case 1:
+ case 3:
+ case 5:
+ printk("scsi%d: SCSI bus busy, waiting up to five seconds\n",
+ instance->host_no);
+ timeout = jiffies + 500;
+ while (jiffies < timeout && (NCR5380_read(STATUS_REG) & SR_BSY));
+ break;
+ case 2:
+ printk("scsi%d: bus busy, attempting abort\n",
+ instance->host_no);
+ do_abort (instance);
+ break;
+ case 4:
+ printk("scsi%d: bus busy, attempting reset\n",
+ instance->host_no);
+ do_reset (instance);
+ break;
+ case 6:
+ printk("scsi%d: bus locked solid or invalid override\n",
+ instance->host_no);
+ }
+ }
+}
+
+/*
+ * Function : int NCR5380_queue_command (Scsi_Cmnd *cmd,
+ * void (*done)(Scsi_Cmnd *))
+ *
+ * Purpose : enqueues a SCSI command
+ *
+ * Inputs : cmd - SCSI command, done - function called on completion, with
+ * a pointer to the command descriptor.
+ *
+ * Returns : 0
+ *
+ * Side effects :
+ * cmd is added to the per instance issue_queue, with minor
+ * twiddling done to the host specific fields of cmd. If the
+ * main coroutine is not running, it is restarted.
+ *
+ */
+
+/* Only make static if a wrapper function is used */
+#ifndef NCR5380_queue_command
+static
+#endif
+int NCR5380_queue_command (Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) {
+ struct Scsi_Host *instance = cmd->host;
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+ Scsi_Cmnd *tmp;
+
+#if (NDEBUG & NDEBUG_NO_WRITE)
+ switch (cmd->cmnd[0]) {
+ case WRITE:
+ case WRITE_10:
+ printk("scsi%d : WRITE attempted with NO_WRITE debugging flag set\n",
+ instance->host_no);
+ cmd->result = (DID_ERROR << 16);
+ done(cmd);
+ return 0;
+ }
+#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
+
+
+ /*
+ * We use the host_scribble field as a pointer to the next command
+ * in a queue
+ */
+
+ cmd->host_scribble = NULL;
+ cmd->scsi_done = done;
+
+ cmd->result = 0;
+
+
+ /*
+ * Insert the cmd into the issue queue. Note that REQUEST SENSE
+ * commands are added to the head of the queue since any command will
+ * clear the contingent allegiance condition that exists and the
+ * sense data is only guaranteed to be valid while the condition exists.
+ */
+
+ cli();
+ if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
+ LIST(cmd, hostdata->issue_queue);
+ cmd->host_scribble = (unsigned char *) hostdata->issue_queue;
+ hostdata->issue_queue = cmd;
+ } else {
+ for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp->host_scribble;
+ tmp = (Scsi_Cmnd *) tmp->host_scribble);
+ LIST(cmd, tmp);
+ tmp->host_scribble = (unsigned char *) cmd;
+ }
+#if (NDEBUG & NDEBUG_QUEUES)
+ printk("scsi%d : command added to %s of queue\n", instance->host_no,
+ (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
+#endif
+
+/* Run the coroutine if it isn't already running. */
+ run_main();
+ return 0;
+}
+
+/*
+ * Function : NCR5380_main (void)
+ *
+ * Purpose : NCR5380_main is a coroutine that runs as long as more work can
+ * be done on the NCR5380 host adapters in a system. Both
+ * NCR5380_queue_command() and NCR5380_intr() will try to start it
+ * in case it is not running.
+ *
+ * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should
+ * reenable them. This prevents reentrancy and kernel stack overflow.
+ */
+
+static void NCR5380_main (void) {
+ Scsi_Cmnd *tmp, *prev;
+ struct Scsi_Host *instance;
+ struct NCR5380_hostdata *hostdata;
+ int done;
+
+ /*
+ * We run (with interrupts disabled) until we're sure that none of
+ * the host adapters have anything that can be done, at which point
+ * we set main_running to 0 and exit.
+ *
+ * Interrupts are enabled before doing various other internal
+ * instructions, after we've decided that we need to run through
+ * the loop again.
+ *
+ * this should prevent any race conditions.
+ */
+
+ do {
+ cli(); /* Freeze request queues */
+ done = 1;
+ for (instance = first_instance; instance &&
+ instance->hostt == the_template; instance=instance->next) {
+ hostdata = (struct NCR5380_hostdata *) instance->hostdata;
+ cli();
+ if (!hostdata->connected) {
+#if (NDEBUG & NDEBUG_MAIN)
+ printk("scsi%d : not connected\n", instance->host_no);
+#endif
+ /*
+ * Search through the issue_queue for a command destined
+ * for a target that's not busy.
+ */
+#if (NDEBUG & NDEBUG_LISTS)
+ for (tmp= (Scsi_Cmnd *) hostdata->issue_queue, prev=NULL; tmp && (tmp != prev); prev=tmp, tmp=(Scsi_Cmnd*)tmp->host_scribble)
+ ;
+ /*printk("%p ", tmp);*/
+ if ((tmp == prev) && tmp) printk(" LOOP\n");/* else printk("\n");*/
+#endif
+ for (tmp = (Scsi_Cmnd *) hostdata->issue_queue,
+ prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *)
+ tmp->host_scribble) {
+
+#if (NDEBUG & NDEBUG_LISTS)
+ if (prev != tmp)
+ printk("MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->target, hostdata->busy[tmp->target], tmp->lun);
+#endif
+ /* When we find one, remove it from the issue queue. */
+ if (!(hostdata->busy[tmp->target] & (1 << tmp->lun))) {
+ if (prev) {
+ REMOVE(prev,prev->host_scribble,tmp,tmp->host_scribble);
+ prev->host_scribble = tmp->host_scribble;
+ } else {
+ REMOVE(-1,hostdata->issue_queue,tmp,tmp->host_scribble);
+ hostdata->issue_queue = (Scsi_Cmnd *) tmp->host_scribble;
+ }
+ tmp->host_scribble = NULL;
+
+ /* reenable interrupts after finding one */
+ sti();
+
+ /*
+ * Attempt to establish an I_T_L nexus here.
+ * On success, instance->hostdata->connected is set.
+ * On failure, we must add the command back to the
+ * issue queue so we can keep trying.
+ */
+#if (NDEBUG & (NDEBUG_MAIN | NDEBUG_QUEUES))
+ printk("scsi%d : main() : command for target %d lun %d removed from issue_queue\n",
+ instance->host_no, tmp->target, tmp->lun);
+#endif
+
+ /*
+ * A successful selection is defined as one that
+ * leaves us with the command connected and
+ * in hostdata->connected, OR has terminated the
+ * command.
+ *
+ * With successfull commands, we fall through
+ * and see if we can do an information transfer,
+ * with failures we will restart.
+ */
+
+ if (!NCR5380_select(instance, tmp,
+ /*
+ * REQUEST SENSE commands are issued without tagged
+ * queueing, even on SCSI-II devices because the
+ * contingent allegiance condition exists for the
+ * entire unit.
+ */
+ (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE :
+ TAG_NEXT)) {
+ break;
+ } else {
+ cli();
+ LIST(tmp, hostdata->issue_queue);
+ tmp->host_scribble = (unsigned char *)
+ hostdata->issue_queue;
+ hostdata->issue_queue = tmp;
+ done = 0;
+ sti();
+#if (NDEBUG & (NDEBUG_MAIN | NDEBUG_QUEUES))
+ printk("scsi%d : main(): select() failed, returned to issue_queue\n",
+ instance->host_no);
+#endif
+ }
+ } /* if target/lun is not busy */
+ } /* for */
+ } /* if (!hostdata->connected) */
+
+ if (hostdata->connected
+#ifdef REAL_DMA
+ && !hostdata->dmalen
+#endif
+#ifdef USLEEP
+ && (!hostdata->time_expires || hostdata->time_expires >= jiffies)
+#endif
+ ) {
+ sti();
+#if (NDEBUG & NDEBUG_MAIN)
+ printk("scsi%d : main() : performing information transfer\n",
+ instance->host_no);
+#endif
+ NCR5380_information_transfer(instance);
+#if (NDEBUG & NDEBUG_MAIN)
+ printk("scsi%d : main() : done set false\n", instance->host_no);
+#endif
+ done = 0;
+ } else
+ break;
+ } /* for instance */
+ } while (!done);
+ main_running = 0;
+}
+
+/*
+ * Function : void NCR5380_intr (int irq)
+ *
+ * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses
+ * from the disconnected queue, and restarting NCR5380_main()
+ * as required.
+ *
+ * Inputs : int irq, irq that caused this interrupt.
+ *
+ */
+
+static void NCR5380_intr (int irq, struct pt_regs * regs) {
+ NCR5380_local_declare();
+ struct Scsi_Host *instance;
+ int done;
+ unsigned char basr;
+#if (NDEBUG & NDEBUG_INTR)
+ printk("scsi : NCR5380 irq %d triggered\n", irq);
+#endif
+ do {
+ done = 1;
+ for (instance = first_instance; instance && (instance->hostt ==
+ the_template); instance = instance->next)
+ if (instance->irq == irq) {
+
+ /* Look for pending interrupts */
+ NCR5380_setup(instance);
+ basr = NCR5380_read(BUS_AND_STATUS_REG);
+ /* XXX dispatch to appropriate routine if found and done=0 */
+ if (basr & BASR_IRQ) {
+#if (NDEBUG & NDEBUG_INTR)
+ NCR5380_print(instance);
+#endif
+ if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) ==
+ (SR_SEL | SR_IO)) {
+ done = 0;
+ sti();
+#if (NDEBUG & NDEBUG_INTR)
+ printk("scsi%d : SEL interrupt\n", instance->host_no);
+#endif
+ NCR5380_reselect(instance);
+ (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else if (basr & BASR_PARITY_ERROR) {
+#if (NDEBUG & NDEBUG_INTR)
+ printk("scsi%d : PARITY interrupt\n", instance->host_no);
+#endif
+ (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
+#if (NDEBUG & NDEBUG_INTR)
+ printk("scsi%d : RESET interrupt\n", instance->host_no);
+#endif
+ (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else {
+/*
+ * XXX the rest of the interrupt conditions should *only* occur during a
+ * DMA transfer, which I haven't gotten around to fixing yet.
+ */
+
+#if defined(REAL_DMA)
+ /*
+ * We should only get PHASE MISMATCH and EOP interrupts
+ * if we have DMA enabled, so do a sanity check based on
+ * the current setting of the MODE register.
+ */
+
+ if ((NCR5380_read(MODE_REG) & MR_DMA) && ((basr &
+ BASR_END_DMA_TRANSFER) ||
+ !(basr & BASR_PHASE_MATCH))) {
+ int transfered;
+
+ if (!hostdata->connected)
+ panic("scsi%d : received end of DMA interrupt with no connected cmd\n",
+ instance->hostno);
+
+ transfered = (hostdata->dmalen - NCR5380_dma_residual(instance));
+ hostdata->connected->SCp.this_residual -= transferred;
+ hostdata->connected->SCp.ptr += transferred;
+ hostdata->dmalen = 0;
+
+ (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+#if NCR_TIMEOUT
+ {
+ unsigned long timeout = jiffies + NCR_TIMEOUT;
+
+ while (NCR5380_read(BUS_AND_STATUS_REG) & BASR_ACK
+ && jiffies < timeout)
+ ;
+ if (jiffies >= timeout)
+ printk("scsi%d: timeout at NCR5380.c:%d\n",
+ host->host_no, __LINE__);
+ }
+#else /* NCR_TIMEOUT */
+ while (NCR5380_read(BUS_AND_STATUS_REG) & BASR_ACK);
+#endif
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ }
+#else
+#if (NDEBUG & NDEBUG_INTR)
+ printk("scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG));
+#endif
+ (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+#endif
+ }
+ } /* if BASR_IRQ */
+ if (!done)
+ run_main();
+ } /* if (instance->irq == irq) */
+ } while (!done);
+}
+
+/*
+ * Function : int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd,
+ * int tag);
+ *
+ * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
+ * including ARBITRATION, SELECTION, and initial message out for
+ * IDENTIFY and queue messages.
+ *
+ * Inputs : instance - instantiation of the 5380 driver on which this
+ * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for
+ * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for
+ * the command that is presently connected.
+ *
+ * Returns : -1 if selection could not execute for some reason,
+ * 0 if selection succeeded or failed because the target
+ * did not respond.
+ *
+ * Side effects :
+ * If bus busy, arbitration failed, etc, NCR5380_select() will exit
+ * with registers as they should have been on entry - ie
+ * SELECT_ENABLE will be set appropriately, the NCR5380
+ * will cease to drive any SCSI bus signals.
+ *
+ * If successful : I_T_L or I_T_L_Q nexus will be established,
+ * instance->connected will be set to cmd.
+ * SELECT interrupt will be disabled.
+ *
+ * If failed (no target) : cmd->scsi_done() will be called, and the
+ * cmd->result host byte set to DID_BAD_TARGET.
+ */
+
+static int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd,
+ int tag) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata*)
+ instance->hostdata;
+ unsigned char tmp[3], phase;
+ unsigned char *data;
+ int len;
+ unsigned long timeout;
+ NCR5380_setup(instance);
+
+ hostdata->restart_select = 0;
+#if defined (NDEBUG) && (NDEBUG & NDEBUG_ARBITRATION)
+ NCR5380_print(instance);
+ printk("scsi%d : starting arbitration, id = %d\n", instance->host_no,
+ instance->this_id);
+#endif
+ cli();
+
+ /*
+ * Set the phase bits to 0, otherwise the NCR5380 won't drive the
+ * data bus during SELECTION.
+ */
+
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+
+ /*
+ * Start arbitration.
+ */
+
+ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
+ NCR5380_write(MODE_REG, MR_ARBITRATE);
+
+ sti();
+
+ /* Wait for arbitration logic to complete */
+#if NCR_TIMEOUT
+ {
+ unsigned long timeout = jiffies + 2*NCR_TIMEOUT;
+
+ while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS)
+ && jiffies < timeout)
+ ;
+ if (jiffies >= timeout)
+ {
+ printk("scsi: arbitration timeout at %d\n", __LINE__);
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return -1;
+ }
+ }
+#else /* NCR_TIMEOUT */
+ while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS));
+#endif
+
+#if (NDEBUG & NDEBUG_ARBITRATION)
+ printk("scsi%d : arbitration complete\n", instance->host_no);
+/* Avoid GCC 2.4.5 asm needs to many reloads error */
+ __asm__("nop");
+#endif
+
+ /*
+ * The arbitration delay is 2.2us, but this is a minimum and there is
+ * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate
+ * the integral nature of udelay().
+ *
+ */
+
+ udelay(3);
+
+ /* Check for lost arbitration */
+ if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
+ (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) ||
+ (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {
+ NCR5380_write(MODE_REG, MR_BASE);
+#if (NDEBUG & NDEBUG_ARBITRATION)
+ printk("scsi%d : lost arbitration, deasserting MR_ARBITRATE\n",
+ instance->host_no);
+#endif
+ return -1;
+ }
+
+
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL);
+
+ if (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) {
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+#if (NDEBUG & NDEBUG_ARBITRATION)
+ printk("scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n",
+ instance->host_no);
+#endif
+ return -1;
+ }
+
+ /*
+ * Again, bus clear + bus settle time is 1.2us, however, this is
+ * a minimum so we'll udelay ceil(1.2)
+ */
+
+ udelay(2);
+
+#if (NDEBUG & NDEBUG_ARBITRATION)
+ printk("scsi%d : won arbitration\n", instance->host_no);
+#endif
+
+
+ /*
+ * Now that we have won arbitration, start Selection process, asserting
+ * the host and target ID's on the SCSI bus.
+ */
+
+ NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->target)));
+
+ /*
+ * Raise ATN while SEL is true before BSY goes false from arbitration,
+ * since this is the only way to guarantee that we'll get a MESSAGE OUT
+ * phase immediately after selection.
+ */
+
+ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY |
+ ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL ));
+ NCR5380_write(MODE_REG, MR_BASE);
+
+ /*
+ * Reselect interrupts must be turned off prior to the dropping of BSY,
+ * otherwise we will trigger an interrupt.
+ */
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+
+ /*
+ * The initiator shall then wait at least two deskew delays and release
+ * the BSY signal.
+ */
+ udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */
+
+ /* Reset BSY */
+ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA |
+ ICR_ASSERT_ATN | ICR_ASSERT_SEL));
+
+ /*
+ * Something weird happens when we cease to drive BSY - looks
+ * like the board/chip is letting us do another read before the
+ * appropriate propagation delay has expired, and we're confusing
+ * a BSY signal from ourselves as the target's response to SELECTION.
+ *
+ * A small delay (the 'C++' frontend breaks the pipeline with an
+ * unnecessary jump, making it work on my 386-33/Trantor T128, the
+ * tighter 'C' code breaks and requires this) solves the problem -
+ * the 1 us delay is arbitrary, and only used because this delay will
+ * be the same on other platforms and since it works here, it should
+ * work there.
+ *
+ * wingel suggests that this could be due to failing to wait
+ * one deskew delay.
+ */
+
+ udelay(1);
+
+#if (NDEBUG & NDEBUG_SELECTION)
+ printk("scsi%d : selecting target %d\n", instance->host_no, cmd->target);
+#endif
+
+ /*
+ * The SCSI specification calls for a 250 ms timeout for the actual
+ * selection.
+ */
+
+ timeout = jiffies + 25;
+
+ /*
+ * XXX very interesting - we're seeing a bounce where the BSY we
+ * asserted is being reflected / still asserted (propagation delay?)
+ * and it's detecting as true. Sigh.
+ */
+
+ while ((jiffies < timeout) && !(NCR5380_read(STATUS_REG) &
+ (SR_BSY | SR_IO)));
+
+ if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) ==
+ (SR_SEL | SR_IO)) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_reselect(instance);
+ printk ("scsi%d : reselection after won arbitration?\n",
+ instance->host_no);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return -1;
+ }
+
+ /*
+ * No less than two deskew delays after the initiator detects the
+ * BSY signal is true, it shall release the SEL signal and may
+ * change the DATA BUS. -wingel
+ */
+
+ udelay(1);
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+
+ if (!(NCR5380_read(STATUS_REG) & SR_BSY)) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ if (hostdata->targets_present & (1 << cmd->target)) {
+ printk("scsi%d : weirdness\n", instance->host_no);
+ if (hostdata->restart_select)
+ printk("\trestart select\n");
+#ifdef NDEBUG
+ NCR5380_print (instance);
+#endif
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return -1;
+ }
+ cmd->result = DID_BAD_TARGET << 16;
+ cmd->scsi_done(cmd);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+#if (NDEBUG & NDEBUG_SELECTION)
+ printk("scsi%d : target did not respond within 250ms\n",
+ instance->host_no);
+#endif
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return 0;
+ }
+
+ hostdata->targets_present |= (1 << cmd->target);
+
+ /*
+ * Since we followed the SCSI spec, and raised ATN while SEL
+ * was true but before BSY was false during selection, the information
+ * transfer phase should be a MESSAGE OUT phase so that we can send the
+ * IDENTIFY message.
+ *
+ * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG
+ * message (2 bytes) with a tag ID that we increment with every command
+ * until it wraps back to 0.
+ *
+ * XXX - it turns out that there are some broken SCSI-II devices,
+ * which claim to support tagged queuing but fail when more than
+ * some number of commands are issued at once.
+ */
+
+ /* Wait for start of REQ/ACK handshake */
+#ifdef NCR_TIMEOUT
+ {
+ unsigned long timeout = jiffies + NCR_TIMEOUT;
+
+ while (!(NCR5380_read(STATUS_REG) & SR_REQ) && jiffies < timeout);
+
+ if (jiffies >= timeout) {
+ printk("scsi%d: timeout at NCR5380.c:%d\n", __LINE__);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return -1;
+ }
+ }
+#else /* NCR_TIMEOUT */
+ while (!(NCR5380_read(STATUS_REG) & SR_REQ));
+#endif /* def NCR_TIMEOUT */
+
+#if (NDEBUG & NDEBUG_SELECTION)
+ printk("scsi%d : target %d selected, going into MESSAGE OUT phase.\n",
+ instance->host_no, cmd->target);
+#endif
+ tmp[0] = IDENTIFY(((instance->irq == IRQ_NONE) ? 0 : 1), cmd->lun);
+#ifdef SCSI2
+ if (cmd->device->tagged_queue && (tag != TAG_NONE)) {
+ tmp[1] = SIMPLE_QUEUE_TAG;
+ if (tag == TAG_NEXT) {
+ /* 0 is TAG_NONE, used to imply no tag for this command */
+ if (cmd->device->current_tag == 0)
+ cmd->device->current_tag = 1;
+
+ cmd->tag = cmd->device->current_tag;
+ cmd->device->current_tag++;
+ } else
+ cmd->tag = (unsigned char) tag;
+
+ tmp[2] = cmd->tag;
+ hostdata->last_message = SIMPLE_QUEUE_TAG;
+ len = 3;
+ } else
+#endif /* def SCSI2 */
+ {
+ len = 1;
+ cmd->tag=0;
+ }
+
+ /* Send message(s) */
+ data = tmp;
+ phase = PHASE_MSGOUT;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+#if (NDEBUG & NDEBUG_SELECTION)
+ printk("scsi%d : nexus established.\n", instance->host_no);
+#endif
+ /* XXX need to handle errors here */
+ hostdata->connected = cmd;
+#ifdef SCSI2
+ if (!cmd->device->tagged_queue)
+#endif
+ hostdata->busy[cmd->target] |= (1 << cmd->lun);
+
+ initialize_SCp(cmd);
+
+
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance,
+ * unsigned char *phase, int *count, unsigned char **data)
+ *
+ * Purpose : transfers data in given phase using polled I/O
+ *
+ * Inputs : instance - instance of driver, *phase - pointer to
+ * what phase is expected, *count - pointer to number of
+ * bytes to transfer, **data - pointer to data pointer.
+ *
+ * Returns : -1 when different phase is entered without transferring
+ * maximum number of bytes, 0 if all bytes or transfered or exit
+ * is in same phase.
+ *
+ * Also, *phase, *count, *data are modified in place.
+ *
+ * XXX Note : handling for bus free may be useful.
+ */
+
+/*
+ * Note : this code is not as quick as it could be, however it
+ * IS 100% reliable, and for the actual data transfer where speed
+ * counts, we will always do a pseudo DMA or DMA transfer.
+ */
+
+static int NCR5380_transfer_pio (struct Scsi_Host *instance,
+ unsigned char *phase, int *count, unsigned char **data) {
+ NCR5380_local_declare();
+ register unsigned char p = *phase, tmp;
+ register int c = *count;
+ register unsigned char *d = *data;
+ NCR5380_setup(instance);
+
+#if (NDEBUG & NDEBUG_PIO)
+ if (!(p & SR_IO))
+ printk("scsi%d : pio write %d bytes\n", instance->host_no, c);
+ else
+ printk("scsi%d : pio read %d bytes\n", instance->host_no, c);
+#endif
+
+ /*
+ * The NCR5380 chip will only drive the SCSI bus when the
+ * phase specified in the appropriate bits of the TARGET COMMAND
+ * REGISTER match the STATUS REGISTER
+ */
+
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
+
+ do {
+ /*
+ * Wait for assertion of REQ, after which the phase bits will be
+ * valid
+ */
+ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ));
+
+#if (NDEBUG & NDEBUG_HANDSHAKE)
+ printk("scsi%d : REQ detected\n", instance->host_no);
+#endif
+
+ /* Check for phase mismatch */
+ if ((tmp & PHASE_MASK) != p) {
+#if (NDEBUG & NDEBUG_PIO)
+ printk("scsi%d : phase mismatch\n", instance->host_no);
+ NCR5380_print_phase(instance);
+#endif
+ break;
+ }
+
+ /* Do actual transfer from SCSI bus to / from memory */
+ if (!(p & SR_IO))
+ NCR5380_write(OUTPUT_DATA_REG, *d);
+ else
+ *d = NCR5380_read(CURRENT_SCSI_DATA_REG);
+
+ ++d;
+
+ /*
+ * The SCSI standard suggests that in MSGOUT phase, the initiator
+ * should drop ATN on the last byte of the message phase
+ * after REQ has been asserted for the handshake but before
+ * the initiator raises ACK.
+ */
+
+ if (!(p & SR_IO)) {
+ if (!((p & SR_MSG) && c > 1)) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_DATA);
+#if (NDEBUG & NDEBUG_PIO)
+ NCR5380_print(instance);
+#endif
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_DATA | ICR_ASSERT_ACK);
+ } else {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_DATA | ICR_ASSERT_ATN);
+#if (NDEBUG & NDEBUG_PIO)
+ NCR5380_print(instance);
+#endif
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
+ }
+ } else {
+#if (NDEBUG & NDEBUG_PIO)
+ NCR5380_print(instance);
+#endif
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
+ }
+
+ while (NCR5380_read(STATUS_REG) & SR_REQ);
+
+#if (NDEBUG & NDEBUG_HANDSHAKE)
+ printk("scsi%d : req false, handshake complete\n", instance->host_no);
+#endif
+
+/*
+ * We have several special cases to consider during REQ/ACK handshaking :
+ * 1. We were in MSGOUT phase, and we are on the last byte of the
+ * message. ATN must be dropped as ACK is dropped.
+ *
+ * 2. We are in a MSGIN phase, and we are on the last byte of the
+ * message. We must exit with ACK asserted, so that the calling
+ * code may raise ATN before dropping ACK to reject the message.
+ *
+ * 3. ACK and ATN are clear and the target may proceed as normal.
+ */
+ if (!(p == PHASE_MSGIN && c == 1)) {
+ if (p == PHASE_MSGOUT && c > 1)
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+ else
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ }
+ } while (--c);
+
+#if (NDEBUG & NDEBUG_PIO)
+ printk("scsi%d : residual %d\n", instance->host_no, c);
+#endif
+
+ *count = c;
+ *data = d;
+ tmp = NCR5380_read(STATUS_REG);
+ if (tmp & SR_REQ)
+ *phase = tmp & PHASE_MASK;
+ else
+ *phase = PHASE_UNKNOWN;
+
+ if (!c || (*phase == p))
+ return 0;
+ else
+ return -1;
+}
+
+static void do_reset (struct Scsi_Host *host) {
+ NCR5380_local_declare();
+ NCR5380_setup(host);
+
+ cli();
+ NCR5380_write(TARGET_COMMAND_REG,
+ PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK));
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST);
+ udelay(25);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ sti();
+}
+
+/*
+ * Function : do_abort (Scsi_Host *host)
+ *
+ * Purpose : abort the currently established nexus. Should only be
+ * called from a routine which can drop into a
+ *
+ * Returns : 0 on success, -1 on failure.
+ */
+
+static int do_abort (struct Scsi_Host *host) {
+ NCR5380_local_declare();
+ unsigned char tmp, *msgptr, phase;
+ int len;
+ NCR5380_setup(host);
+
+
+ /* Request message out phase */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+
+ /*
+ * Wait for the target to indicate a valid phase by asserting
+ * REQ. Once this happens, we'll have either a MSGOUT phase
+ * and can immediately send the ABORT message, or we'll have some
+ * other phase and will have to source/sink data.
+ *
+ * We really don't care what value was on the bus or what value
+ * the target see's, so we just handshake.
+ */
+
+ while (!(tmp = NCR5380_read(STATUS_REG)) & SR_REQ);
+
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
+
+ if ((tmp & PHASE_MASK) != PHASE_MSGOUT) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN |
+ ICR_ASSERT_ACK);
+ while (NCR5380_read(STATUS_REG) & SR_REQ);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+ }
+
+ tmp = ABORT;
+ msgptr = &tmp;
+ len = 1;
+ phase = PHASE_MSGOUT;
+ NCR5380_transfer_pio (host, &phase, &len, &msgptr);
+
+ /*
+ * If we got here, and the command completed successfully,
+ * we're about to go into bus free state.
+ */
+
+ return len ? -1 : 0;
+}
+
+#if defined(REAL_DMA) || defined(PSEUDO_DMA) || defined (REAL_DMA_POLL)
+/*
+ * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance,
+ * unsigned char *phase, int *count, unsigned char **data)
+ *
+ * Purpose : transfers data in given phase using either real
+ * or pseudo DMA.
+ *
+ * Inputs : instance - instance of driver, *phase - pointer to
+ * what phase is expected, *count - pointer to number of
+ * bytes to transfer, **data - pointer to data pointer.
+ *
+ * Returns : -1 when different phase is entered without transferring
+ * maximum number of bytes, 0 if all bytes or transfered or exit
+ * is in same phase.
+ *
+ * Also, *phase, *count, *data are modified in place.
+ *
+ */
+
+
+static int NCR5380_transfer_dma (struct Scsi_Host *instance,
+ unsigned char *phase, int *count, unsigned char **data) {
+ NCR5380_local_declare();
+ register int c = *count;
+ register unsigned char p = *phase;
+ register unsigned char *d = *data;
+ unsigned char tmp;
+ int foo;
+#if defined(REAL_DMA_POLL)
+ int cnt, toPIO;
+ unsigned char saved_data = 0, overrun = 0, residue;
+#endif
+
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+
+ NCR5380_setup(instance);
+
+ if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) {
+ *phase = tmp;
+ return -1;
+ }
+#if defined(REAL_DMA) || defined(REAL_DMA_POLL)
+#ifdef READ_OVERRUNS
+ if (p & SR_IO) {
+ c -= 2;
+ }
+#endif
+#if (NDEBUG & NDEBUG_DMA)
+ printk("scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n",
+ instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" :
+ "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d);
+#endif
+ hostdata->dma_len = (p & SR_IO) ?
+ NCR5380_dma_read_setup(instance, d, c) :
+ NCR5380_dma_write_setup(instance, d, c);
+#endif
+
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
+
+#ifdef REAL_DMA
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY);
+#elif defined(REAL_DMA_POLL)
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);
+#else
+ /*
+ * Note : on my sample board, watch-dog timeouts occurred when interrupts
+ * were not disabled for the duration of a single DMA transfer, from
+ * before the setting of DMA mode to after transfer of the last byte.
+ */
+
+#if defined(PSEUDO_DMA) && !defined(UNSAFE)
+ cli();
+#endif
+ /* KLL May need eop and parity in 53c400 */
+ if (hostdata->flags & FLAG_NCR53C400)
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_PAR_CHECK
+ | MR_ENABLE_PAR_INTR | MR_ENABLE_EOP_INTR | MR_DMA_MODE
+ | MR_MONITOR_BSY);
+ else
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);
+#endif /* def REAL_DMA */
+
+#if (NDEBUG & NDEBUG_DMA) & 0
+ printk("scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG));
+#endif
+
+/*
+ * FOO stuff. For some UNAPPARENT reason, I'm getting
+ * watchdog timers fired on bootup for NO APPARENT REASON, meaning it's
+ * probably a timing problem.
+ *
+ * Since this is the only place I have back-to-back writes, perhaps this
+ * is the problem?
+ */
+
+ if (p & SR_IO) {
+#ifndef FOO
+ udelay(1);
+#endif
+ NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
+ } else {
+#ifndef FOO
+ udelay(1);
+#endif
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
+#ifndef FOO
+ udelay(1);
+#endif
+ NCR5380_write(START_DMA_SEND_REG, 0);
+#ifndef FOO
+ udelay(1);
+#endif
+ }
+
+#if defined(REAL_DMA_POLL)
+ do {
+ tmp = NCR5380_read(BUS_AND_STATUS_REG);
+ } while ((tmp & BASR_PHASE_MATCH) && !(tmp & (BASR_BUSY_ERROR |
+ BASR_END_DMA_TRANSFER)));
+
+/*
+ At this point, either we've completed DMA, or we have a phase mismatch,
+ or we've unexpectedly lost BUSY (which is a real error).
+
+ For write DMAs, we want to wait until the last byte has been
+ transferred out over the bus before we turn off DMA mode. Alas, there
+ seems to be no terribly good way of doing this on a 5380 under all
+ conditions. For non-scatter-gather operations, we can wait until REQ
+ and ACK both go false, or until a phase mismatch occurs. Gather-writes
+ are nastier, since the device will be expecting more data than we
+ are prepared to send it, and REQ will remain asserted. On a 53C8[01] we
+ could test LAST BIT SENT to assure transfer (I imagine this is precisely
+ why this signal was added to the newer chips) but on the older 538[01]
+ this signal does not exist. The workaround for this lack is a watchdog;
+ we bail out of the wait-loop after a modest amount of wait-time if
+ the usual exit conditions are not met. Not a terribly clean or
+ correct solution :-%
+
+ Reads are equally tricky due to a nasty characteristic of the NCR5380.
+ If the chip is in DMA mode for an READ, it will respond to a target's
+ REQ by latching the SCSI data into the INPUT DATA register and asserting
+ ACK, even if it has _already_ been notified by the DMA controller that
+ the current DMA transfer has completed! If the NCR5380 is then taken
+ out of DMA mode, this already-acknowledged byte is lost.
+
+ This is not a problem for "one DMA transfer per command" reads, because
+ the situation will never arise... either all of the data is DMA'ed
+ properly, or the target switches to MESSAGE IN phase to signal a
+ disconnection (either operation bringing the DMA to a clean halt).
+ However, in order to handle scatter-reads, we must work around the
+ problem. The chosen fix is to DMA N-2 bytes, then check for the
+ condition before taking the NCR5380 out of DMA mode. One or two extra
+ bytes are transferred via PIO as necessary to fill out the original
+ request.
+*/
+
+ if (p & SR_IO) {
+#ifdef READ_OVERRUNS
+ udelay(10);
+ if (((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH|BASR_ACK)) ==
+ (BASR_PHASE_MATCH | BASR_ACK))) {
+ saved_data = NCR5380_read(INPUT_DATA_REGISTER);
+ overrun = 1;
+ }
+#endif
+ } else {
+ int limit = 100;
+ while (((tmp = NCR5380_read(BUS_AND_STATUS_REG)) & BASR_ACK) ||
+ (NCR5380_read(STATUS_REG) & SR_REQ)) {
+ if (!(tmp & BASR_PHASE_MATCH)) break;
+ if (--limit < 0) break;
+ }
+ }
+
+
+#if (NDEBUG & NDEBUG_DMA)
+ printk("scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n",
+ instance->host_no, tmp, NCR5380_read(STATUS_REG));
+#endif
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ residue = NCR5380_dma_residual(instance);
+ c -= residue;
+ *count -= c;
+ *data += c;
+ *phase = NCR5380_read(STATUS_REG) & PHASE_MASK;
+
+#ifdef READ_OVERRUNS
+ if (*phase == p && (p & SR_IO) && residue == 0) {
+ if (overrun) {
+#if (NDEBUG & NDEBUG_DMA)
+ printk("Got an input overrun, using saved byte\n");
+#endif
+ **data = saved_data;
+ *data += 1;
+ *count -= 1;
+ cnt = toPIO = 1;
+ } else {
+ printk("No overrun??\n");
+ cnt = toPIO = 2;
+ }
+#if (NDEBUG & NDEBUG_DMA)
+ printk("Doing %d-byte PIO to 0x%X\n", cnt, *data);
+#endif
+ NCR5380_transfer_pio(instance, phase, &cnt, data);
+ *count -= toPIO - cnt;
+ }
+#endif
+
+#if (NDEBUG & NDEBUG_DMA)
+ printk("Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n",
+ *data, *count, *(*data+*count-1), *(*data+*count));
+#endif
+ return 0;
+
+#elif defined(REAL_DMA)
+ return 0;
+#else /* defined(REAL_DMA_POLL) */
+ if (p & SR_IO) {
+ int diff = 1;
+ if (hostdata->flags & FLAG_NCR53C400) {
+ diff=0;
+ }
+
+ if (!(foo = NCR5380_pread(instance, d, c - diff))) {
+ /*
+ * We can't disable DMA mode after successfully transferring
+ * what we plan to be the last byte, since that would open up
+ * a race condition where if the target asserted REQ before
+ * we got the DMA mode reset, the NCR5380 would have latched
+ * an additional byte into the INPUT DATA register and we'd
+ * have dropped it.
+ *
+ * The workaround was to transfer one fewer bytes than we
+ * intended to with the pseudo-DMA read function, wait for
+ * the chip to latch the last byte, read it, and then disable
+ * pseudo-DMA mode.
+ *
+ * After REQ is asserted, the NCR5380 asserts DRQ and ACK.
+ * REQ is deasserted when ACK is asserted, and not reasserted
+ * until ACK goes false. Since the NCR5380 won't lower ACK
+ * until DACK is asserted, which won't happen unless we twiddle
+ * the DMA port or we take the NCR5380 out of DMA mode, we
+ * can guarantee that we won't handshake another extra
+ * byte.
+ */
+
+ if (!(hostdata->flags & FLAG_NCR53C400)) {
+ while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ));
+ /* Wait for clean handshake */
+ while (NCR5380_read(STATUS_REG) & SR_REQ);
+ d[c - 1] = NCR5380_read(INPUT_DATA_REG);
+ }
+ }
+ } else {
+ int timeout;
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("About to pwrite %d bytes\n", c);
+#endif
+ if (!(foo = NCR5380_pwrite(instance, d, c))) {
+ /*
+ * Wait for the last byte to be sent. If REQ is being asserted for
+ * the byte we're interested, we'll ACK it and it will go false.
+ */
+ if (!(hostdata->flags & FLAG_HAS_LAST_BYTE_SENT)) {
+ timeout = 20000;
+#if 1
+#if 1
+ while (!(NCR5380_read(BUS_AND_STATUS_REG) &
+ BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) &
+ BASR_PHASE_MATCH));
+#else
+ if (NCR5380_read(STATUS_REG) & SR_REQ) {
+ for (; timeout &&
+ !(NCR5380_read(BUS_AND_STATUS_REG) & BASR_ACK);
+ --timeout);
+ for (; timeout && (NCR5380_read(STATUS_REG) & SR_REQ);
+ --timeout);
+ }
+#endif
+
+
+#if (NDEBUG & NDEBUG_LAST_BYTE_SENT)
+ if (!timeout)
+ printk("scsi%d : timed out on last byte\n",
+ instance->host_no);
+#endif
+
+
+ if (hostdata->flags & FLAG_CHECK_LAST_BYTE_SENT) {
+ hostdata->flags &= ~FLAG_CHECK_LAST_BYTE_SENT;
+ if (NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT) {
+ hostdata->flags |= FLAG_HAS_LAST_BYTE_SENT;
+#if (NDEBUG & NDEBUG_LAST_BYTE_SENT)
+ printk("scsi%d : last bit sent works\n",
+ instance->host_no);
+#endif
+ }
+ }
+ } else {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("Waiting for LASTBYTE\n");
+#endif
+ while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT));
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("Got LASTBYTE\n");
+#endif
+ }
+#else
+ udelay (5);
+#endif
+ }
+ }
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ if ((!(p & SR_IO)) && (hostdata->flags & FLAG_NCR53C400)) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: Checking for IRQ\n");
+#endif
+ if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_IRQ) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: got it, reading reset interupt reg\n");
+#endif
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else {
+ printk("53C400w: IRQ NOT THERE!\n");
+ }
+ }
+
+ *data = d + c;
+ *count = 0;
+ *phase = (NCR5380_read(STATUS_REG & PHASE_MASK));
+#if 0
+ NCR5380_print_phase(instance);
+#endif
+#if defined(PSEUDO_DMA) && !defined(UNSAFE)
+ sti();
+#endif /* defined(REAL_DMA_POLL) */
+ return foo;
+#endif /* def REAL_DMA */
+}
+#endif /* defined(REAL_DMA) | defined(PSEUDO_DMA) */
+
+/*
+ * Function : NCR5380_information_transfer (struct Scsi_Host *instance)
+ *
+ * Purpose : run through the various SCSI phases and do as the target
+ * directs us to. Operates on the currently connected command,
+ * instance->connected.
+ *
+ * Inputs : instance, instance for which we are doing commands
+ *
+ * Side effects : SCSI things happen, the disconnected queue will be
+ * modified if a command disconnects, *instance->connected will
+ * change.
+ *
+ * XXX Note : we need to watch for bus free or a reset condition here
+ * to recover from an unexpected bus free condition.
+ */
+
+static void NCR5380_information_transfer (struct Scsi_Host *instance) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+ unsigned char msgout = NOP;
+ int sink = 0;
+ int len;
+#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
+ int transfersize;
+#endif
+ unsigned char *data;
+ unsigned char phase, tmp, extended_msg[10], old_phase=0xff;
+ Scsi_Cmnd *cmd = (Scsi_Cmnd *) hostdata->connected;
+ NCR5380_setup(instance);
+
+ while (1) {
+ tmp = NCR5380_read(STATUS_REG);
+ /* We only have a valid SCSI phase when REQ is asserted */
+ if (tmp & SR_REQ) {
+ phase = (tmp & PHASE_MASK);
+ if (phase != old_phase) {
+ old_phase = phase;
+#if (NDEBUG & NDEBUG_INFORMATION)
+ NCR5380_print_phase(instance);
+#endif
+ }
+
+ if (sink && (phase != PHASE_MSGOUT)) {
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN |
+ ICR_ASSERT_ACK);
+ while (NCR5380_read(STATUS_REG) & SR_REQ);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_ATN);
+ sink = 0;
+ continue;
+ }
+
+ switch (phase) {
+ case PHASE_DATAIN:
+ case PHASE_DATAOUT:
+#if (NDEBUG & NDEBUG_NO_DATAOUT)
+ printk("scsi%d : NDEBUG_NO_DATAOUT set, attempted DATAOUT aborted\n",
+ instance->host_no);
+ sink = 1;
+ do_abort(instance);
+ cmd->result = DID_ERROR << 16;
+ cmd->done(cmd);
+ return;
+#endif
+ /*
+ * If there is no room left in the current buffer in the
+ * scatter-gather list, move onto the next one.
+ */
+
+ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
+ ++cmd->SCp.buffer;
+ --cmd->SCp.buffers_residual;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = cmd->SCp.buffer->address;
+#if (NDEBUG & NDEBUG_INFORMATION)
+ printk("scsi%d : %d bytes and %d buffers left\n",
+ instance->host_no, cmd->SCp.this_residual,
+ cmd->SCp.buffers_residual);
+#endif
+ }
+
+ /*
+ * The preferred transfer method is going to be
+ * PSEUDO-DMA for systems that are strictly PIO,
+ * since we can let the hardware do the handshaking.
+ *
+ * For this to work, we need to know the transfersize
+ * ahead of time, since the pseudo-DMA code will sit
+ * in an unconditional loop.
+ */
+
+#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
+ /* KLL
+ * PSEUDO_DMA is defined here. If this is the g_NCR5380
+ * driver then it will always be defined, so the
+ * FLAG_NO_PSEUDO_DMA is used to inhibit PDMA in the base
+ * NCR5380 case. I think this is a fairly clean solution.
+ * We supplement these 2 if's with the flag.
+ */
+#ifdef NCR5380_dma_xfer_len
+ if (!cmd->device->borken &&
+ !(hostdata->flags & FLAG_NO_PSEUDO_DMA) &&
+ (transfersize = NCR5380_dma_xfer_len(instance, cmd)) != 0) {
+#else
+ transfersize = cmd->transfersize;
+
+#ifdef LIMIT_TRANSFERSIZE /* If we have problems with interrupt service */
+ if( transfersize > 512 )
+ transfersize = 512;
+#endif /* LIMIT_TRANSFERSIZE */
+
+ if (!cmd->device->borken && transfersize &&
+ !(hostdata->flags & FLAG_NO_PSEUDO_DMA) &&
+ cmd->SCp.this_residual && !(cmd->SCp.this_residual %
+ transfersize)) {
+#endif
+ len = transfersize;
+ if (NCR5380_transfer_dma(instance, &phase,
+ &len, (unsigned char **) &cmd->SCp.ptr)) {
+ /*
+ * If the watchdog timer fires, all future accesses to this
+ * device will use the polled-IO.
+ */
+ printk("scsi%d : switching target %d lun %d to slow handshake\n",
+ instance->host_no, cmd->target, cmd->lun);
+ cmd->device->borken = 1;
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_ATN);
+ sink = 1;
+ do_abort(instance);
+ cmd->result = DID_ERROR << 16;
+ cmd->done(cmd);
+ /* XXX - need to source or sink data here, as appropriate */
+ } else
+ cmd->SCp.this_residual -= transfersize - len;
+ } else
+#endif /* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */
+ NCR5380_transfer_pio(instance, &phase,
+ (int *) &cmd->SCp.this_residual, (unsigned char **)
+ &cmd->SCp.ptr);
+ break;
+ case PHASE_MSGIN:
+ len = 1;
+ data = &tmp;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ cmd->SCp.Message = tmp;
+
+ switch (tmp) {
+ /*
+ * Linking lets us reduce the time required to get the
+ * next command out to the device, hopefully this will
+ * mean we don't waste another revolution due to the delays
+ * required by ARBITRATION and another SELECTION.
+ *
+ * In the current implementation proposal, low level drivers
+ * merely have to start the next command, pointed to by
+ * next_link, done() is called as with unlinked commands.
+ */
+#ifdef LINKED
+ case LINKED_CMD_COMPLETE:
+ case LINKED_FLG_CMD_COMPLETE:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+#if (NDEBUG & NDEBUG_LINKED)
+ printk("scsi%d : target %d lun %d linked command complete.\n",
+ instance->host_no, cmd->target, cmd->lun);
+#endif
+ /*
+ * Sanity check : A linked command should only terminate with
+ * one of these messages if there are more linked commands
+ * available.
+ */
+
+ if (!cmd->next_link) {
+ printk("scsi%d : target %d lun %d linked command complete, no next_link\n"
+ instance->host_no, cmd->target, cmd->lun);
+ sink = 1;
+ do_abort (instance);
+ return;
+ }
+
+ initialize_SCp(cmd->next_link);
+ /* The next command is still part of this process */
+ cmd->next_link->tag = cmd->tag;
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+#if (NDEBUG & NDEBUG_LINKED)
+ printk("scsi%d : target %d lun %d linked request done, calling scsi_done().\n",
+ instance->host_no, cmd->target, cmd->lun);
+#endif
+ cmd->scsi_done(cmd);
+ cmd = hostdata->connected;
+ break;
+#endif /* def LINKED */
+ case ABORT:
+ case COMMAND_COMPLETE:
+ /* Accept message by clearing ACK */
+ sink = 1;
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ hostdata->connected = NULL;
+#if (NDEBUG & NDEBUG_QUEUES)
+ printk("scsi%d : command for target %d, lun %d completed\n",
+ instance->host_no, cmd->target, cmd->lun);
+#endif
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+
+ /*
+ * I'm not sure what the correct thing to do here is :
+ *
+ * If the command that just executed is NOT a request
+ * sense, the obvious thing to do is to set the result
+ * code to the values of the stored parameters.
+ *
+ * If it was a REQUEST SENSE command, we need some way
+ * to differentiate between the failure code of the original
+ * and the failure code of the REQUEST sense - the obvious
+ * case is success, where we fall through and leave the result
+ * code unchanged.
+ *
+ * The non-obvious place is where the REQUEST SENSE failed
+ */
+
+ if (cmd->cmnd[0] != REQUEST_SENSE)
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ else if (cmd->SCp.Status != GOOD)
+ cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+
+#ifdef AUTOSENSE
+ if ((cmd->cmnd[0] != REQUEST_SENSE) &&
+ (cmd->SCp.Status == CHECK_CONDITION)) {
+#if (NDEBUG & NDEBUG_AUTOSENSE)
+ printk("scsi%d : performing request sense\n",
+ instance->host_no);
+#endif
+ cmd->cmnd[0] = REQUEST_SENSE;
+ cmd->cmnd[1] &= 0xe0;
+ cmd->cmnd[2] = 0;
+ cmd->cmnd[3] = 0;
+ cmd->cmnd[4] = sizeof(cmd->sense_buffer);
+ cmd->cmnd[5] = 0;
+
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = (char *) cmd->sense_buffer;
+ cmd->SCp.this_residual = sizeof(cmd->sense_buffer);
+
+ cli();
+ LIST(cmd,hostdata->issue_queue);
+ cmd->host_scribble = (unsigned char *)
+ hostdata->issue_queue;
+ hostdata->issue_queue = (Scsi_Cmnd *) cmd;
+ sti();
+#if (NDEBUG & NDEBUG_QUEUES)
+ printk("scsi%d : REQUEST SENSE added to head of issue queue\n",instance->host_no);
+#endif
+ } else
+#endif /* def AUTOSENSE */
+ cmd->scsi_done(cmd);
+
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ /*
+ * Restore phase bits to 0 so an interrupted selection,
+ * arbitration can resume.
+ */
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+ while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
+ barrier();
+ return;
+ case MESSAGE_REJECT:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ switch (hostdata->last_message) {
+ case HEAD_OF_QUEUE_TAG:
+ case ORDERED_QUEUE_TAG:
+ case SIMPLE_QUEUE_TAG:
+ cmd->device->tagged_queue = 0;
+ hostdata->busy[cmd->target] |= (1 << cmd->lun);
+ break;
+ default:
+ break;
+ }
+ case DISCONNECT:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ cmd->device->disconnect = 1;
+ cli();
+ LIST(cmd,hostdata->disconnected_queue);
+ cmd->host_scribble = (unsigned char *)
+ hostdata->disconnected_queue;
+ hostdata->connected = NULL;
+ hostdata->disconnected_queue = cmd;
+ sti();
+#if (NDEBUG & NDEBUG_QUEUES)
+ printk("scsi%d : command for target %d lun %d was moved from connected to"
+ " the disconnected_queue\n", instance->host_no,
+ cmd->target, cmd->lun);
+#endif
+ /*
+ * Restore phase bits to 0 so an interrupted selection,
+ * arbitration can resume.
+ */
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+ /* Enable reselect interrupts */
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ /* Wait for bus free to avoid nasty timeouts */
+ while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
+ barrier();
+#if 0
+ NCR5380_print_status(instance);
+#endif
+ return;
+ /*
+ * The SCSI data pointer is *IMPLICITLY* saved on a disconnect
+ * operation, in violation of the SCSI spec so we can safely
+ * ignore SAVE/RESTORE pointers calls.
+ *
+ * Unfortunately, some disks violate the SCSI spec and
+ * don't issue the required SAVE_POINTERS message before
+ * disconnecting, and we have to break spec to remain
+ * compatible.
+ */
+ case SAVE_POINTERS:
+ case RESTORE_POINTERS:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ break;
+ case EXTENDED_MESSAGE:
+/*
+ * Extended messages are sent in the following format :
+ * Byte
+ * 0 EXTENDED_MESSAGE == 1
+ * 1 length (includes one byte for code, doesn't
+ * include first two bytes)
+ * 2 code
+ * 3..length+1 arguments
+ *
+ * Start the extended message buffer with the EXTENDED_MESSAGE
+ * byte, since print_msg() wants the whole thing.
+ */
+ extended_msg[0] = EXTENDED_MESSAGE;
+ /* Accept first byte by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+#if (NDEBUG & NDEBUG_EXTENDED)
+ printk("scsi%d : receiving extended message\n",
+ instance->host_no);
+#endif
+
+ len = 2;
+ data = extended_msg + 1;
+ phase = PHASE_MSGIN;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+
+#if (NDEBUG & NDEBUG_EXTENDED)
+ printk("scsi%d : length=%d, code=0x%02x\n",
+ instance->host_no, (int) extended_msg[1],
+ (int) extended_msg[2]);
+#endif
+
+ if (!len && extended_msg[1] <=
+ (sizeof (extended_msg) - 1)) {
+ /* Accept third byte by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ len = extended_msg[1] - 1;
+ data = extended_msg + 3;
+ phase = PHASE_MSGIN;
+
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+
+#if (NDEBUG & NDEBUG_EXTENDED)
+ printk("scsi%d : message received, residual %d\n",
+ instance->host_no, len);
+#endif
+
+ switch (extended_msg[2]) {
+ case EXTENDED_SDTR:
+ case EXTENDED_WDTR:
+ case EXTENDED_MODIFY_DATA_POINTER:
+ case EXTENDED_EXTENDED_IDENTIFY:
+ tmp = 0;
+ }
+ } else if (len) {
+ printk("scsi%d: error receiving extended message\n",
+ instance->host_no);
+ tmp = 0;
+ } else {
+ printk("scsi%d: extended message code %02x length %d is too long\n",
+ instance->host_no, extended_msg[2], extended_msg[1]);
+ tmp = 0;
+ }
+ /* Fall through to reject message */
+
+ /*
+ * If we get something weird that we aren't expecting,
+ * reject it.
+ */
+ default:
+ if (!tmp) {
+ printk("scsi%d: rejecting message ", instance->host_no);
+ print_msg (extended_msg);
+ printk("\n");
+ } else if (tmp != EXTENDED_MESSAGE)
+ printk("scsi%d: rejecting unknown message %02x from target %d, lun %d\n",
+ instance->host_no, tmp, cmd->target, cmd->lun);
+ else
+ printk("scsi%d: rejecting unknown extended message code %02x, length %d from target %d, lun %d\n",
+ instance->host_no, extended_msg[1], extended_msg[0], cmd->target, cmd->lun);
+
+ msgout = MESSAGE_REJECT;
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_ATN);
+ break;
+ } /* switch (tmp) */
+ break;
+ case PHASE_MSGOUT:
+ len = 1;
+ data = &msgout;
+ hostdata->last_message = msgout;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ if (msgout == ABORT) {
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+ hostdata->connected = NULL;
+ cmd->result = DID_ERROR << 16;
+ cmd->scsi_done(cmd);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return;
+ }
+ msgout = NOP;
+ break;
+ case PHASE_CMDOUT:
+ len = cmd->cmd_len;
+ data = cmd->cmnd;
+ /*
+ * XXX for performance reasons, on machines with a
+ * PSEUDO-DMA architecture we should probably
+ * use the dma transfer function.
+ */
+ NCR5380_transfer_pio(instance, &phase, &len,
+ &data);
+#ifdef USLEEP
+ if (!disconnect && should_disconnect(cmd->cmnd[0])) {
+ hostdata->time_expires = jiffies + USLEEP_SLEEP;
+#if (NDEBUG & NDEBUG_USLEEP)
+ printk("scsi%d : issued command, sleeping until %ul\n", instance->host_no,
+ hostdata->time_expires);
+#endif
+ NCR5380_set_timer (instance);
+ return;
+ }
+#endif /* def USLEEP */
+ break;
+ case PHASE_STATIN:
+ len = 1;
+ data = &tmp;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ cmd->SCp.Status = tmp;
+ break;
+ default:
+ printk("scsi%d : unknown phase\n", instance->host_no);
+#ifdef NDEBUG
+ NCR5380_print(instance);
+#endif
+ } /* switch(phase) */
+ } /* if (tmp * SR_REQ) */
+#ifdef USLEEP
+ else {
+ if (!disconnect && hostdata->time_expires && jiffies >
+ hostdata->time_expires) {
+ hostdata->time_expires = jiffies + USLEEP_SLEEP;
+#if (NDEBUG & NDEBUG_USLEEP)
+ printk("scsi%d : poll timed out, sleeping until %ul\n", instance->host_no,
+ hostdata->time_expires);
+#endif
+ NCR5380_set_timer (instance);
+ return;
+ }
+ }
+#endif
+ } /* while (1) */
+}
+
+/*
+ * Function : void NCR5380_reselect (struct Scsi_Host *instance)
+ *
+ * Purpose : does reselection, initializing the instance->connected
+ * field to point to the Scsi_Cmnd for which the I_T_L or I_T_L_Q
+ * nexus has been reestablished,
+ *
+ * Inputs : instance - this instance of the NCR5380.
+ *
+ */
+
+
+static void NCR5380_reselect (struct Scsi_Host *instance) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+ unsigned char target_mask;
+ unsigned char lun, phase;
+ int len;
+#ifdef SCSI2
+ unsigned char tag;
+#endif
+ unsigned char msg[3];
+ unsigned char *data;
+ Scsi_Cmnd *tmp = NULL, *prev;
+ int abort = 0;
+ NCR5380_setup(instance);
+
+ /*
+ * Disable arbitration, etc. since the host adapter obviously
+ * lost, and tell an interrupted NCR5380_select() to restart.
+ */
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ hostdata->restart_select = 1;
+
+ target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
+
+#if (NDEBUG & NDEBUG_RESELECTION)
+ printk("scsi%d : reselect\n", instance->host_no);
+#endif
+
+ /*
+ * At this point, we have detected that our SCSI ID is on the bus,
+ * SEL is true and BSY was false for at least one bus settle delay
+ * (400 ns).
+ *
+ * We must assert BSY ourselves, until the target drops the SEL
+ * signal.
+ */
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
+
+ while (NCR5380_read(STATUS_REG) & SR_SEL);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ /*
+ * Wait for target to go into MSGIN.
+ */
+
+ while (!(NCR5380_read(STATUS_REG) & SR_REQ));
+
+ len = 1;
+ data = msg;
+ phase = PHASE_MSGIN;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+
+
+ if (!msg[0] & 0x80) {
+ printk("scsi%d : expecting IDENTIFY message, got ",
+ instance->host_no);
+ print_msg(msg);
+ abort = 1;
+ } else {
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ lun = (msg[0] & 0x07);
+
+ /*
+ * We need to add code for SCSI-II to track which devices have
+ * I_T_L_Q nexuses established, and which have simple I_T_L
+ * nexuses so we can chose to do additional data transfer.
+ */
+
+#ifdef SCSI2
+#error "SCSI-II tagged queueing is not supported yet"
+#endif
+
+ /*
+ * Find the command corresponding to the I_T_L or I_T_L_Q nexus we
+ * just reestablished, and remove it from the disconnected queue.
+ */
+
+
+ for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL;
+ tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble)
+ if ((target_mask == (1 << tmp->target)) && (lun == tmp->lun)
+#ifdef SCSI2
+ && (tag == tmp->tag)
+#endif
+) {
+ if (prev) {
+ REMOVE(prev,prev->host_scribble,tmp,tmp->host_scribble);
+ prev->host_scribble = tmp->host_scribble;
+ } else {
+ REMOVE(-1,hostdata->disconnected_queue,tmp,tmp->host_scribble);
+ hostdata->disconnected_queue = (Scsi_Cmnd *) tmp->host_scribble;
+ }
+ tmp->host_scribble = NULL;
+ break;
+ }
+
+ if (!tmp) {
+#ifdef SCSI2
+ printk("scsi%d : warning : target bitmask %02x lun %d tag %d not in disconnect_queue.\n",
+ instance->host_no, target_mask, lun, tag);
+#else
+ printk("scsi%d : warning : target bitmask %02x lun %d not in disconnect_queue.\n",
+ instance->host_no, target_mask, lun);
+#endif
+ /*
+ * Since we have an established nexus that we can't do anything with,
+ * we must abort it.
+ */
+ abort = 1;
+ }
+ }
+
+ if (abort) {
+ do_abort (instance);
+ } else {
+ hostdata->connected = tmp;
+#if (NDEBUG & NDEBUG_RESELECTION)
+ printk("scsi%d : nexus established, target = %d, lun = %d, tag = %d\n",
+ instance->host_no, tmp->target, tmp->lun, tmp->tag);
+#endif
+ }
+}
+
+/*
+ * Function : void NCR5380_dma_complete (struct Scsi_Host *instance)
+ *
+ * Purpose : called by interrupt handler when DMA finishes or a phase
+ * mismatch occurs (which would finish the DMA transfer).
+ *
+ * Inputs : instance - this instance of the NCR5380.
+ *
+ * Returns : pointer to the Scsi_Cmnd structure for which the I_T_L
+ * nexus has been reestablished, on failure NULL is returned.
+ */
+
+#ifdef REAL_DMA
+static void NCR5380_dma_complete (NCR5380_instance *instance) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *
+ instance->hostdata);
+ int transferred;
+ NCR5380_setup(instance);
+
+ /*
+ * XXX this might not be right.
+ *
+ * Wait for final byte to transfer, ie wait for ACK to go false.
+ *
+ * We should use the Last Byte Sent bit, unfortunately this is
+ * not available on the 5380/5381 (only the various CMOS chips)
+ */
+
+ while (NCR5380_read(BUS_AND_STATUS_REG) & BASR_ACK);
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ /*
+ * The only places we should see a phase mismatch and have to send
+ * data from the same set of pointers will be the data transfer
+ * phases. So, residual, requested length are only important here.
+ */
+
+ if (!(hostdata->connected->SCp.phase & SR_CD)) {
+ transferred = instance->dmalen - NCR5380_dma_residual();
+ hostdata->connected->SCp.this_residual -= transferred;
+ hostdata->connected->SCp.ptr += transferred;
+ }
+}
+#endif /* def REAL_DMA */
+
+/*
+ * Function : int NCR5380_abort (Scsi_Cmnd *cmd)
+ *
+ * Purpose : abort a command
+ *
+ * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the
+ * host byte of the result field to, if zero DID_ABORTED is
+ * used.
+ *
+ * Returns : 0 - success, -1 on failure.
+ *
+ * XXX - there is no way to abort the command that is currently
+ * connected, you have to wait for it to complete. If this is
+ * a problem, we could implement longjmp() / setjmp(), setjmp()
+ * called where the loop started in NCR5380_main().
+ */
+
+#ifndef NCR5380_abort
+static
+#endif
+int NCR5380_abort (Scsi_Cmnd *cmd) {
+ NCR5380_local_declare();
+ struct Scsi_Host *instance = cmd->host;
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+ Scsi_Cmnd *tmp, **prev;
+
+ printk("scsi%d : aborting command\n", instance->host_no);
+ print_Scsi_Cmnd (cmd);
+
+ NCR5380_print_status (instance);
+
+ printk("scsi%d : aborting command\n", instance->host_no);
+ print_Scsi_Cmnd (cmd);
+
+ NCR5380_print_status (instance);
+
+ cli();
+ NCR5380_setup(instance);
+
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : abort called\n", instance->host_no);
+ printk(" basr 0x%X, sr 0x%X\n",
+ NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG));
+#endif
+
+#if 0
+/*
+ * Case 1 : If the command is the currently executing command,
+ * we'll set the aborted flag and return control so that
+ * information transfer routine can exit cleanly.
+ */
+
+ if (hostdata->connected == cmd) {
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : aborting connected command\n", instance->host_no);
+#endif
+ hostdata->aborted = 1;
+/*
+ * We should perform BSY checking, and make sure we haven't slipped
+ * into BUS FREE.
+ */
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN);
+/*
+ * Since we can't change phases until we've completed the current
+ * handshake, we have to source or sink a byte of data if the current
+ * phase is not MSGOUT.
+ */
+
+/*
+ * Return control to the executing NCR drive so we can clear the
+ * aborted flag and get back into our main loop.
+ */
+
+ return 0;
+ }
+#endif
+
+/*
+ * Case 2 : If the command hasn't been issued yet, we simply remove it
+ * from the issue queue.
+ */
+#if (NDEBUG & NDEBUG_ABORT)
+ /* KLL */
+ printk("scsi%d : abort going into loop.\n", instance->host_no);
+#endif
+ for (prev = (Scsi_Cmnd **) &(hostdata->issue_queue),
+ tmp = (Scsi_Cmnd *) hostdata->issue_queue;
+ tmp; prev = (Scsi_Cmnd **) &(tmp->host_scribble), tmp =
+ (Scsi_Cmnd *) tmp->host_scribble)
+ if (cmd == tmp) {
+ REMOVE(5,*prev,tmp,tmp->host_scribble);
+ (*prev) = (Scsi_Cmnd *) tmp->host_scribble;
+ tmp->host_scribble = NULL;
+ tmp->result = DID_ABORT << 16;
+ sti();
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : abort removed command from issue queue.\n",
+ instance->host_no);
+#endif
+ tmp->done(tmp);
+ return SCSI_ABORT_SUCCESS;
+ }
+#if (NDEBUG & NDEBUG_ABORT)
+ /* KLL */
+ else if (prev == tmp) printk("scsi%d : LOOP\n", instance->host_no);
+#endif
+
+/*
+ * Case 3 : If any commands are connected, we're going to fail the abort
+ * and let the high level SCSI driver retry at a later time or
+ * issue a reset.
+ *
+ * Timeouts, and therefore aborted commands, will be highly unlikely
+ * and handling them cleanly in this situation would make the common
+ * case of noresets less efficient, and would pollute our code. So,
+ * we fail.
+ */
+
+ if (hostdata->connected) {
+ sti();
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : abort failed, command connected.\n", instance->host_no);
+#endif
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+/*
+ * Case 4: If the command is currently disconnected from the bus, and
+ * there are no connected commands, we reconnect the I_T_L or
+ * I_T_L_Q nexus associated with it, go into message out, and send
+ * an abort message.
+ *
+ * This case is especially ugly. In order to reestablish the nexus, we
+ * need to call NCR5380_select(). The easiest way to implement this
+ * function was to abort if the bus was busy, and let the interrupt
+ * handler triggered on the SEL for reselect take care of lost arbitrations
+ * where necessary, meaning interrupts need to be enabled.
+ *
+ * When interrupts are enabled, the queues may change - so we
+ * can't remove it from the disconnected queue before selecting it
+ * because that could cause a failure in hashing the nexus if that
+ * device reselected.
+ *
+ * Since the queues may change, we can't use the pointers from when we
+ * first locate it.
+ *
+ * So, we must first locate the command, and if NCR5380_select()
+ * succeeds, then issue the abort, relocate the command and remove
+ * it from the disconnected queue.
+ */
+
+ for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp;
+ tmp = (Scsi_Cmnd *) tmp->host_scribble)
+ if (cmd == tmp) {
+ sti();
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : aborting disconnected command.\n", instance->host_no);
+#endif
+
+ if (NCR5380_select (instance, cmd, (int) cmd->tag))
+ return SCSI_ABORT_BUSY;
+
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : nexus reestablished.\n", instance->host_no);
+#endif
+
+ do_abort (instance);
+
+ cli();
+ for (prev = (Scsi_Cmnd **) &(hostdata->disconnected_queue),
+ tmp = (Scsi_Cmnd *) hostdata->disconnected_queue;
+ tmp; prev = (Scsi_Cmnd **) &(tmp->host_scribble), tmp =
+ (Scsi_Cmnd *) tmp->host_scribble)
+ if (cmd == tmp) {
+ REMOVE(5,*prev,tmp,tmp->host_scribble);
+ *prev = (Scsi_Cmnd *) tmp->host_scribble;
+ tmp->host_scribble = NULL;
+ tmp->result = DID_ABORT << 16;
+ sti();
+ tmp->done(tmp);
+ return SCSI_ABORT_SUCCESS;
+ }
+ }
+
+/*
+ * Case 5 : If we reached this point, the command was not found in any of
+ * the queues.
+ *
+ * We probably reached this point because of an unlikely race condition
+ * between the command completing successfully and the abortion code,
+ * so we won't panic, but we will notify the user in case something really
+ * broke.
+ */
+
+ sti();
+ printk("scsi%d : warning : SCSI command probably completed successfully\n"
+ " before abortion\n", instance->host_no);
+ return SCSI_ABORT_NOT_RUNNING;
+}
+
+
+/*
+ * Function : int NCR5380_reset (Scsi_Cmnd *cmd)
+ *
+ * Purpose : reset the SCSI bus.
+ *
+ * Returns : SCSI_RESET_WAKEUP
+ *
+ */
+
+#ifndef NCR5380_reset
+static
+#endif
+int NCR5380_reset (Scsi_Cmnd *cmd) {
+ NCR5380_local_declare();
+ NCR5380_setup(cmd->host);
+
+ NCR5380_print_status (cmd->host);
+ do_reset (cmd->host);
+
+ return SCSI_RESET_WAKEUP;
+}
+
diff --git a/i386/i386at/gpl/linux/scsi/NCR53c406a.c b/i386/i386at/gpl/linux/scsi/NCR53c406a.c
new file mode 100644
index 00000000..3105b1bb
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/NCR53c406a.c
@@ -0,0 +1,1079 @@
+/*
+ * NCR53c406.c
+ * Low-level SCSI driver for NCR53c406a chip.
+ * Copyright (C) 1994, 1995, 1996 Normunds Saumanis (normunds@fi.ibm.com)
+ *
+ * LILO command line usage: ncr53c406a=<PORTBASE>[,<IRQ>[,<FASTPIO>]]
+ * Specify IRQ = 0 for non-interrupt driven mode.
+ * FASTPIO = 1 for fast pio mode, 0 for slow mode.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#define NCR53C406A_DEBUG 0
+#define VERBOSE_NCR53C406A_DEBUG 0
+
+/* Set this to 1 for PIO mode (recommended) or to 0 for DMA mode */
+#define USE_PIO 1
+
+#define USE_BIOS 0
+/* #define BIOS_ADDR 0xD8000 */ /* define this if autoprobe fails */
+/* #define PORT_BASE 0x330 */ /* define this if autoprobe fails */
+/* #define IRQ_LEV 0 */ /* define this if autoprobe fails */
+#define DMA_CHAN 5 /* this is ignored if DMA is disabled */
+
+/* Set this to 0 if you encounter kernel lockups while transferring
+ * data in PIO mode */
+#define USE_FAST_PIO 1
+
+/* ============= End of user configurable parameters ============= */
+
+#include <linux/module.h>
+
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/bitops.h>
+#include <asm/irq.h>
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+
+#include "NCR53c406a.h"
+
+/* ============================================================= */
+
+#define WATCHDOG 5000000
+
+#define SYNC_MODE 0 /* Synchrounous transfer mode */
+
+#if DEBUG
+#undef NCR53C406A_DEBUG
+#define NCR53C406A_DEBUG 1
+#endif
+
+#if USE_PIO
+#define USE_DMA 0
+#else
+#define USE_DMA 1
+#endif
+
+/* Default configuration */
+#define C1_IMG 0x07 /* ID=7 */
+#define C2_IMG 0x48 /* FE SCSI2 */
+#if USE_DMA
+#define C3_IMG 0x21 /* CDB TE */
+#else
+#define C3_IMG 0x20 /* CDB */
+#endif
+#define C4_IMG 0x04 /* ANE */
+#define C5_IMG 0xb6 /* AA PI SIE POL */
+
+#define REG0 (outb(C4_IMG, CONFIG4))
+#define REG1 (outb(C5_IMG, CONFIG5))
+
+#if NCR53C406A_DEBUG
+#define DEB(x) x
+#else
+#define DEB(x)
+#endif
+
+#if VERBOSE_NCR53C406A_DEBUG
+#define VDEB(x) x
+#else
+#define VDEB(x)
+#endif
+
+#define LOAD_DMA_COUNT(count) \
+ outb(count & 0xff, TC_LSB); \
+ outb((count >> 8) & 0xff, TC_MSB); \
+ outb((count >> 16) & 0xff, TC_HIGH);
+
+/* Chip commands */
+#define DMA_OP 0x80
+
+#define SCSI_NOP 0x00
+#define FLUSH_FIFO 0x01
+#define CHIP_RESET 0x02
+#define SCSI_RESET 0x03
+#define RESELECT 0x40
+#define SELECT_NO_ATN 0x41
+#define SELECT_ATN 0x42
+#define SELECT_ATN_STOP 0x43
+#define ENABLE_SEL 0x44
+#define DISABLE_SEL 0x45
+#define SELECT_ATN3 0x46
+#define RESELECT3 0x47
+#define TRANSFER_INFO 0x10
+#define INIT_CMD_COMPLETE 0x11
+#define MSG_ACCEPT 0x12
+#define TRANSFER_PAD 0x18
+#define SET_ATN 0x1a
+#define RESET_ATN 0x1b
+#define SEND_MSG 0x20
+#define SEND_STATUS 0x21
+#define SEND_DATA 0x22
+#define DISCONN_SEQ 0x23
+#define TERMINATE_SEQ 0x24
+#define TARG_CMD_COMPLETE 0x25
+#define DISCONN 0x27
+#define RECV_MSG 0x28
+#define RECV_CMD 0x29
+#define RECV_DATA 0x2a
+#define RECV_CMD_SEQ 0x2b
+#define TARGET_ABORT_DMA 0x04
+
+/*----------------------------------------------------------------*/
+/* the following will set the monitor border color (useful to find
+ where something crashed or gets stuck at */
+/* 1 = blue
+ 2 = green
+ 3 = cyan
+ 4 = red
+ 5 = magenta
+ 6 = yellow
+ 7 = white
+*/
+
+#if NCR53C406A_DEBUG
+#define rtrc(i) {inb(0x3da);outb(0x31,0x3c0);outb((i),0x3c0);}
+#else
+#define rtrc(i) {}
+#endif
+/*----------------------------------------------------------------*/
+
+enum Phase {
+ idle,
+ data_out,
+ data_in,
+ command_ph,
+ status_ph,
+ message_out,
+ message_in
+};
+
+/* Static function prototypes */
+static void NCR53c406a_intr(int, struct pt_regs *);
+static void internal_done(Scsi_Cmnd *);
+static void wait_intr(void);
+static void chip_init(void);
+static void calc_port_addr(void);
+#ifndef IRQ_LEV
+static int irq_probe(void);
+#endif
+
+/* ================================================================= */
+
+#if USE_BIOS
+static void *bios_base = (void *)0;
+#endif
+
+#if PORT_BASE
+static int port_base = PORT_BASE;
+#else
+static int port_base = 0;
+#endif
+
+#if IRQ_LEV
+static int irq_level = IRQ_LEV;
+#else
+static int irq_level = -1; /* 0 is 'no irq', so use -1 for 'uninitialized'*/
+#endif
+
+#if USE_DMA
+static int dma_chan = 0;
+#endif
+
+#if USE_PIO
+static int fast_pio = USE_FAST_PIO;
+#endif
+
+static Scsi_Cmnd *current_SC = NULL;
+static volatile int internal_done_flag = 0;
+static volatile int internal_done_errcode = 0;
+static char info_msg[256];
+
+struct proc_dir_entry proc_scsi_NCR53c406a = {
+ PROC_SCSI_NCR53C406A, 7, "NCR53c406a",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+/* ================================================================= */
+
+/* possible BIOS locations */
+#if USE_BIOS
+static void *addresses[] = {
+ (void *)0xd8000,
+ (void *)0xc8000
+};
+#define ADDRESS_COUNT (sizeof( addresses ) / sizeof( unsigned ))
+#endif USE_BIOS
+
+/* possible i/o port addresses */
+static unsigned short ports[] = { 0x230, 0x330 };
+#define PORT_COUNT (sizeof( ports ) / sizeof( unsigned short ))
+
+/* possible interrupt channels */
+static unsigned short intrs[] = { 10, 11, 12, 15 };
+#define INTR_COUNT (sizeof( intrs ) / sizeof( unsigned short ))
+
+/* signatures for NCR 53c406a based controllers */
+#if USE_BIOS
+struct signature {
+ char *signature;
+ int sig_offset;
+ int sig_length;
+} signatures[] = {
+ /* 1 2 3 4 5 6 */
+ /* 123456789012345678901234567890123456789012345678901234567890 */
+ { "Copyright (C) Acculogic, Inc.\r\n2.8M Diskette Extension Bios ver 4.04.03 03/01/1993", 61, 82 },
+};
+#define SIGNATURE_COUNT (sizeof( signatures ) / sizeof( struct signature ))
+#endif USE_BIOS
+
+/* ============================================================ */
+
+/* Control Register Set 0 */
+static int TC_LSB; /* transfer counter lsb */
+static int TC_MSB; /* transfer counter msb */
+static int SCSI_FIFO; /* scsi fifo register */
+static int CMD_REG; /* command register */
+static int STAT_REG; /* status register */
+static int DEST_ID; /* selection/reselection bus id */
+static int INT_REG; /* interrupt status register */
+static int SRTIMOUT; /* select/reselect timeout reg */
+static int SEQ_REG; /* sequence step register */
+static int SYNCPRD; /* synchronous transfer period */
+static int FIFO_FLAGS; /* indicates # of bytes in fifo */
+static int SYNCOFF; /* synchronous offset register */
+static int CONFIG1; /* configuration register */
+static int CLKCONV; /* clock conversion reg */
+/*static int TESTREG;*/ /* test mode register */
+static int CONFIG2; /* Configuration 2 Register */
+static int CONFIG3; /* Configuration 3 Register */
+static int CONFIG4; /* Configuration 4 Register */
+static int TC_HIGH; /* Transfer Counter High */
+/*static int FIFO_BOTTOM;*/ /* Reserve FIFO byte register */
+
+/* Control Register Set 1 */
+/*static int JUMPER_SENSE;*/ /* Jumper sense port reg (r/w) */
+/*static int SRAM_PTR;*/ /* SRAM address pointer reg (r/w) */
+/*static int SRAM_DATA;*/ /* SRAM data register (r/w) */
+static int PIO_FIFO; /* PIO FIFO registers (r/w) */
+/*static int PIO_FIFO1;*/ /* */
+/*static int PIO_FIFO2;*/ /* */
+/*static int PIO_FIFO3;*/ /* */
+static int PIO_STATUS; /* PIO status (r/w) */
+/*static int ATA_CMD;*/ /* ATA command/status reg (r/w) */
+/*static int ATA_ERR;*/ /* ATA features/error register (r/w)*/
+static int PIO_FLAG; /* PIO flag interrupt enable (r/w) */
+static int CONFIG5; /* Configuration 5 register (r/w) */
+/*static int SIGNATURE;*/ /* Signature Register (r) */
+/*static int CONFIG6;*/ /* Configuration 6 register (r) */
+
+/* ============================================================== */
+
+#if USE_DMA
+static __inline__ int
+NCR53c406a_dma_setup (unsigned char *ptr,
+ unsigned int count,
+ unsigned char mode) {
+ unsigned limit;
+ unsigned long flags = 0;
+
+ VDEB(printk("dma: before count=%d ", count));
+ if (dma_chan <=3) {
+ if (count > 65536)
+ count = 65536;
+ limit = 65536 - (((unsigned) ptr) & 0xFFFF);
+ } else {
+ if (count > (65536<<1))
+ count = (65536<<1);
+ limit = (65536<<1) - (((unsigned) ptr) & 0x1FFFF);
+ }
+
+ if (count > limit) count = limit;
+
+ VDEB(printk("after count=%d\n", count));
+ if ((count & 1) || (((unsigned) ptr) & 1))
+ panic ("NCR53c406a: attempted unaligned DMA transfer\n");
+
+ save_flags(flags);
+ cli();
+ disable_dma(dma_chan);
+ clear_dma_ff(dma_chan);
+ set_dma_addr(dma_chan, (long) ptr);
+ set_dma_count(dma_chan, count);
+ set_dma_mode(dma_chan, mode);
+ enable_dma(dma_chan);
+ restore_flags(flags);
+
+ return count;
+}
+
+static __inline__ int
+NCR53c406a_dma_write(unsigned char *src, unsigned int count) {
+ return NCR53c406a_dma_setup (src, count, DMA_MODE_WRITE);
+}
+
+static __inline__ int
+NCR53c406a_dma_read(unsigned char *src, unsigned int count) {
+ return NCR53c406a_dma_setup (src, count, DMA_MODE_READ);
+}
+
+static __inline__ int
+NCR53c406a_dma_residual (void) {
+ register int tmp;
+ unsigned long flags = 0;
+ save_flags(flags);
+ cli();
+ clear_dma_ff(dma_chan);
+ tmp = get_dma_residue(dma_chan);
+ restore_flags(flags);
+
+ return tmp;
+}
+#endif USE_DMA
+
+#if USE_PIO
+static __inline__ int NCR53c406a_pio_read(unsigned char *request,
+ unsigned int reqlen)
+{
+ int i;
+ int len; /* current scsi fifo size */
+ unsigned long flags = 0;
+
+ REG1;
+ while (reqlen) {
+ i = inb(PIO_STATUS);
+ /* VDEB(printk("pio_status=%x\n", i)); */
+ if (i & 0x80)
+ return 0;
+
+ switch( i & 0x1e ) {
+ default:
+ case 0x10:
+ len=0; break;
+ case 0x0:
+ len=1; break;
+ case 0x8:
+ len=42; break;
+ case 0xc:
+ len=84; break;
+ case 0xe:
+ len=128; break;
+ }
+
+ if ((i & 0x40) && len == 0) { /* fifo empty and interrupt occured */
+ return 0;
+ }
+
+ if (len) {
+ if( len > reqlen )
+ len = reqlen;
+
+ save_flags(flags);
+ cli();
+ if( fast_pio && len > 3 ) {
+ insl(PIO_FIFO,request,len>>2);
+ request += len & 0xfc;
+ reqlen -= len & 0xfc;
+ }
+ else {
+ while(len--) {
+ *request++ = inb(PIO_FIFO);
+ reqlen--;
+ }
+ }
+ restore_flags(flags);
+ }
+ }
+ return 0;
+}
+
+static __inline__ int NCR53c406a_pio_write(unsigned char *request,
+ unsigned int reqlen)
+{
+ int i = 0;
+ int len; /* current scsi fifo size */
+ unsigned long flags = 0;
+
+ REG1;
+ while (reqlen && !(i&0x40)) {
+ i = inb(PIO_STATUS);
+ /* VDEB(printk("pio_status=%x\n", i)); */
+ if (i & 0x80) /* error */
+ return 0;
+
+ switch( i & 0x1e ) {
+ case 0x10:
+ len=128; break;
+ case 0x0:
+ len=84; break;
+ case 0x8:
+ len=42; break;
+ case 0xc:
+ len=1; break;
+ default:
+ case 0xe:
+ len=0; break;
+ }
+
+ if (len) {
+ if( len > reqlen )
+ len = reqlen;
+
+ save_flags(flags);
+ cli();
+ if( fast_pio && len > 3 ) {
+ outsl(PIO_FIFO,request,len>>2);
+ request += len & 0xfc;
+ reqlen -= len & 0xfc;
+ }
+ else {
+ while(len--) {
+ outb(*request++, PIO_FIFO);
+ reqlen--;
+ }
+ }
+ restore_flags(flags);
+ }
+ }
+ return 0;
+}
+#endif USE_PIO
+
+int
+NCR53c406a_detect(Scsi_Host_Template * tpnt){
+ struct Scsi_Host *shpnt;
+#ifndef PORT_BASE
+ int i;
+#endif
+
+#if USE_BIOS
+ int ii, jj;
+ bios_base = 0;
+ /* look for a valid signature */
+ for( ii=0; ii < ADDRESS_COUNT && !bios_base; ii++)
+ for( jj=0; (jj < SIGNATURE_COUNT) && !bios_base; jj++)
+ if(!memcmp((void *) addresses[ii]+signatures[jj].sig_offset,
+ (void *) signatures[jj].signature,
+ (int) signatures[jj].sig_length))
+ bios_base=addresses[ii];
+
+ if(!bios_base){
+ printk("NCR53c406a: BIOS signature not found\n");
+ return 0;
+ }
+
+ DEB(printk("NCR53c406a BIOS found at %X\n", (unsigned int) bios_base););
+#endif USE_BIOS
+
+#ifdef PORT_BASE
+ if (check_region(port_base, 0x10)) /* ports already snatched */
+ port_base = 0;
+
+#else /* autodetect */
+ if (port_base) { /* LILO override */
+ if (check_region(port_base, 0x10))
+ port_base = 0;
+ }
+ else {
+ for(i=0; i<PORT_COUNT && !port_base; i++){
+ if(check_region(ports[i], 0x10)){
+ DEB(printk("NCR53c406a: port %x in use\n", ports[i]));
+ }
+ else {
+ VDEB(printk("NCR53c406a: port %x available\n", ports[i]));
+ outb(C5_IMG, ports[i] + 0x0d); /* reg set 1 */
+ if( (inb(ports[i] + 0x0e) ^ inb(ports[i] + 0x0e)) == 7
+ && (inb(ports[i] + 0x0e) ^ inb(ports[i] + 0x0e)) == 7
+ && (inb(ports[i] + 0x0e) & 0xf8) == 0x58 ) {
+ VDEB(printk("NCR53c406a: Sig register valid\n"));
+ VDEB(printk("port_base=%x\n", port_base));
+ port_base = ports[i];
+ }
+ }
+ }
+ }
+#endif PORT_BASE
+
+ if(!port_base){ /* no ports found */
+ printk("NCR53c406a: no available ports found\n");
+ return 0;
+ }
+
+ DEB(printk("NCR53c406a detected\n"));
+
+ calc_port_addr();
+ chip_init();
+
+#ifndef IRQ_LEV
+ if (irq_level < 0) { /* LILO override if >= 0*/
+ irq_level=irq_probe();
+ if (irq_level < 0) { /* Trouble */
+ printk("NCR53c406a: IRQ problem, irq_level=%d, giving up\n", irq_level);
+ return 0;
+ }
+ }
+#endif
+
+ DEB(printk("NCR53c406a: using port_base %x\n", port_base));
+ request_region(port_base, 0x10, "NCR53c406a");
+
+ if(irq_level > 0) {
+ if(request_irq(irq_level, NCR53c406a_intr, 0, "NCR53c406a")){
+ printk("NCR53c406a: unable to allocate IRQ %d\n", irq_level);
+ return 0;
+ }
+ tpnt->can_queue = 1;
+ DEB(printk("NCR53c406a: allocated IRQ %d\n", irq_level));
+ }
+ else if (irq_level == 0) {
+ tpnt->can_queue = 0;
+ DEB(printk("NCR53c406a: No interrupts detected\n"));
+#if USE_DMA
+ printk("NCR53c406a: No interrupts found and DMA mode defined. Giving up.\n");
+ return 0;
+#endif USE_DMA
+ }
+ else {
+ DEB(printk("NCR53c406a: Shouldn't get here!\n"));
+ return 0;
+ }
+
+#if USE_DMA
+ dma_chan = DMA_CHAN;
+ if(request_dma(dma_chan, "NCR53c406a") != 0){
+ printk("NCR53c406a: unable to allocate DMA channel %d\n", dma_chan);
+ return 0;
+ }
+
+ DEB(printk("Allocated DMA channel %d\n", dma_chan));
+#endif USE_DMA
+
+ tpnt->present = 1;
+ tpnt->proc_dir = &proc_scsi_NCR53c406a;
+
+ shpnt = scsi_register(tpnt, 0);
+ shpnt->irq = irq_level;
+ shpnt->io_port = port_base;
+ shpnt->n_io_port = 0x10;
+#if USE_DMA
+ shpnt->dma = dma_chan;
+#endif
+
+#if USE_DMA
+ sprintf(info_msg, "NCR53c406a at 0x%x, IRQ %d, DMA channel %d.",
+ port_base, irq_level, dma_chan);
+#else
+ sprintf(info_msg, "NCR53c406a at 0x%x, IRQ %d, %s PIO mode.",
+ port_base, irq_level, fast_pio ? "fast" : "slow");
+#endif
+
+ return (tpnt->present);
+}
+
+/* called from init/main.c */
+void NCR53c406a_setup(char *str, int *ints)
+{
+ static size_t setup_idx = 0;
+ size_t i;
+
+ DEB(printk("NCR53c406a: Setup called\n"););
+
+ if (setup_idx >= PORT_COUNT - 1) {
+ printk("NCR53c406a: Setup called too many times. Bad LILO params?\n");
+ return;
+ }
+ if (ints[0] < 1 || ints[0] > 3) {
+ printk("NCR53c406a: Malformed command line\n");
+ printk("NCR53c406a: Usage: ncr53c406a=<PORTBASE>[,<IRQ>[,<FASTPIO>]]\n");
+ return;
+ }
+ for (i = 0; i < PORT_COUNT && !port_base; i++)
+ if (ports[i] == ints[1]) {
+ port_base = ints[1];
+ DEB(printk("NCR53c406a: Specified port_base 0x%X\n", port_base);)
+ }
+ if (!port_base) {
+ printk("NCR53c406a: Invalid PORTBASE 0x%X specified\n", ints[1]);
+ return;
+ }
+
+ if (ints[0] > 1) {
+ if (ints[2] == 0) {
+ irq_level = 0;
+ DEB(printk("NCR53c406a: Specified irq %d\n", irq_level);)
+ }
+ else
+ for (i = 0; i < INTR_COUNT && irq_level < 0; i++)
+ if (intrs[i] == ints[2]) {
+ irq_level = ints[2];
+ DEB(printk("NCR53c406a: Specified irq %d\n", port_base);)
+ }
+ if (irq_level < 0)
+ printk("NCR53c406a: Invalid IRQ %d specified\n", ints[2]);
+ }
+
+ if (ints[0] > 2)
+ fast_pio = ints[3];
+
+ DEB(printk("NCR53c406a: port_base=0x%X, irq=%d, fast_pio=%d\n",
+ port_base, irq_level, fast_pio);)
+}
+
+const char*
+NCR53c406a_info(struct Scsi_Host *SChost){
+ DEB(printk("NCR53c406a_info called\n"));
+ return (info_msg);
+}
+
+static void internal_done(Scsi_Cmnd *SCpnt) {
+ internal_done_errcode = SCpnt->result;
+ ++internal_done_flag;
+}
+
+
+static void wait_intr() {
+ int i = jiffies + WATCHDOG;
+
+ while(i>jiffies && !(inb(STAT_REG)&0xe0)) /* wait for a pseudo-interrupt */
+ barrier();
+
+ if (i <= jiffies) { /* Timed out */
+ rtrc(0);
+ current_SC->result = DID_TIME_OUT << 16;
+ current_SC->SCp.phase = idle;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+ NCR53c406a_intr(0, NULL);
+}
+
+int NCR53c406a_command(Scsi_Cmnd *SCpnt){
+ DEB(printk("NCR53c406a_command called\n"));
+ NCR53c406a_queue(SCpnt, internal_done);
+ if(irq_level)
+ while (!internal_done_flag);
+ else /* interrupts not supported */
+ while (!internal_done_flag)
+ wait_intr();
+
+ internal_done_flag = 0;
+ return internal_done_errcode;
+}
+
+
+int
+NCR53c406a_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)){
+ int i;
+ unsigned long flags = 0;
+
+ VDEB(printk("NCR53c406a_queue called\n"));
+ DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n",
+ SCpnt->cmnd[0],
+ SCpnt->cmd_len,
+ SCpnt->target,
+ SCpnt->lun,
+ SCpnt->request_bufflen));
+
+#if 0
+ VDEB(for(i=0; i<SCpnt->cmd_len; i++)
+ printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i]));
+ VDEB(printk("\n"));
+#endif
+
+ current_SC = SCpnt;
+ current_SC->scsi_done = done;
+ current_SC->SCp.phase = command_ph;
+ current_SC->SCp.Status = 0;
+ current_SC->SCp.Message = 0;
+
+ save_flags(flags);
+ cli();
+ REG0;
+ outb(SCpnt->target, DEST_ID); /* set destination */
+ outb(FLUSH_FIFO, CMD_REG); /* reset the fifos */
+
+ for(i=0; i<SCpnt->cmd_len; i++){
+ outb(SCpnt->cmnd[i], SCSI_FIFO);
+ }
+ outb(SELECT_NO_ATN, CMD_REG);
+ restore_flags(flags);
+
+ rtrc(1);
+ return 0;
+}
+
+int
+NCR53c406a_abort(Scsi_Cmnd *SCpnt){
+ DEB(printk("NCR53c406a_abort called\n"));
+ return SCSI_ABORT_SNOOZE; /* Don't know how to abort */
+}
+
+int
+NCR53c406a_reset(Scsi_Cmnd *SCpnt){
+ DEB(printk("NCR53c406a_reset called\n"));
+ outb(C4_IMG, CONFIG4); /* Select reg set 0 */
+ outb(CHIP_RESET, CMD_REG);
+ outb(SCSI_NOP, CMD_REG); /* required after reset */
+ outb(SCSI_RESET, CMD_REG);
+ chip_init();
+
+ rtrc(2);
+ if (irq_level)
+ return SCSI_RESET_PENDING; /* should get an interrupt */
+ else
+ return SCSI_RESET_WAKEUP; /* won't get any interrupts */
+}
+
+int
+NCR53c406a_biosparm(Scsi_Disk *disk, kdev_t dev, int* info_array){
+ int size;
+
+ DEB(printk("NCR53c406a_biosparm called\n"));
+
+ size = disk->capacity;
+ info_array[0] = 64; /* heads */
+ info_array[1] = 32; /* sectors */
+ info_array[2] = size>>11; /* cylinders */
+ if (info_array[2] > 1024) { /* big disk */
+ info_array[0] = 255;
+ info_array[1] = 63;
+ info_array[2] = size / (255*63);
+ }
+ return 0;
+ }
+
+ static void
+NCR53c406a_intr(int unused, struct pt_regs *regs){
+ DEB(unsigned char fifo_size;)
+ DEB(unsigned char seq_reg;)
+ unsigned char status, int_reg;
+ unsigned long flags = 0;
+#if USE_PIO
+ unsigned char pio_status;
+ struct scatterlist *sglist;
+ unsigned int sgcount;
+#endif
+
+ VDEB(printk("NCR53c406a_intr called\n"));
+
+ save_flags(flags);
+ cli();
+#if USE_PIO
+ REG1;
+ pio_status = inb(PIO_STATUS);
+#endif
+ REG0;
+ status = inb(STAT_REG);
+ DEB(seq_reg = inb(SEQ_REG));
+ int_reg = inb(INT_REG);
+ DEB(fifo_size = inb(FIFO_FLAGS) & 0x1f);
+ restore_flags(flags);
+
+#if NCR53C406A_DEBUG
+ printk("status=%02x, seq_reg=%02x, int_reg=%02x, fifo_size=%02x",
+ status, seq_reg, int_reg, fifo_size);
+#if (USE_DMA)
+ printk("\n");
+#else
+ printk(", pio=%02x\n", pio_status);
+#endif USE_DMA
+#endif NCR53C406A_DEBUG
+
+ if(int_reg & 0x80){ /* SCSI reset intr */
+ rtrc(3);
+ DEB(printk("NCR53c406a: reset intr received\n"));
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_RESET << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+#if USE_PIO
+ if(pio_status & 0x80) {
+ printk("NCR53C406A: Warning: PIO error!\n");
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_ERROR << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+#endif USE_PIO
+
+ if(status & 0x20) { /* Parity error */
+ printk("NCR53c406a: Warning: parity error!\n");
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_PARITY << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+ if(status & 0x40) { /* Gross error */
+ printk("NCR53c406a: Warning: gross error!\n");
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_ERROR << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+ if(int_reg & 0x20){ /* Disconnect */
+ DEB(printk("NCR53c406a: disconnect intr received\n"));
+ if(current_SC->SCp.phase != message_in){ /* Unexpected disconnect */
+ current_SC->result = DID_NO_CONNECT << 16;
+ }
+ else{ /* Command complete, return status and message */
+ current_SC->result = (current_SC->SCp.Status & 0xff)
+ | ((current_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16);
+ }
+
+ rtrc(0);
+ current_SC->SCp.phase = idle;
+ current_SC->scsi_done( current_SC );
+ return;
+ }
+
+ switch(status & 0x07){ /* scsi phase */
+ case 0x00: /* DATA-OUT */
+ if(int_reg & 0x10){ /* Target requesting info transfer */
+ rtrc(5);
+ current_SC->SCp.phase = data_out;
+ VDEB(printk("NCR53c406a: Data-Out phase\n"));
+ outb(FLUSH_FIFO, CMD_REG);
+ LOAD_DMA_COUNT(current_SC->request_bufflen); /* Max transfer size */
+#if USE_DMA /* No s/g support for DMA */
+ NCR53c406a_dma_write(current_SC->request_buffer,
+ current_SC->request_bufflen);
+#endif USE_DMA
+ outb(TRANSFER_INFO | DMA_OP, CMD_REG);
+#if USE_PIO
+ if (!current_SC->use_sg) /* Don't use scatter-gather */
+ NCR53c406a_pio_write(current_SC->request_buffer,
+ current_SC->request_bufflen);
+ else { /* use scatter-gather */
+ sgcount = current_SC->use_sg;
+ sglist = current_SC->request_buffer;
+ while( sgcount-- ) {
+ NCR53c406a_pio_write(sglist->address, sglist->length);
+ sglist++;
+ }
+ }
+ REG0;
+#endif USE_PIO
+ }
+ break;
+
+ case 0x01: /* DATA-IN */
+ if(int_reg & 0x10){ /* Target requesting info transfer */
+ rtrc(6);
+ current_SC->SCp.phase = data_in;
+ VDEB(printk("NCR53c406a: Data-In phase\n"));
+ outb(FLUSH_FIFO, CMD_REG);
+ LOAD_DMA_COUNT(current_SC->request_bufflen); /* Max transfer size */
+#if USE_DMA /* No s/g support for DMA */
+ NCR53c406a_dma_read(current_SC->request_buffer,
+ current_SC->request_bufflen);
+#endif USE_DMA
+ outb(TRANSFER_INFO | DMA_OP, CMD_REG);
+#if USE_PIO
+ if (!current_SC->use_sg) /* Don't use scatter-gather */
+ NCR53c406a_pio_read(current_SC->request_buffer,
+ current_SC->request_bufflen);
+ else { /* Use scatter-gather */
+ sgcount = current_SC->use_sg;
+ sglist = current_SC->request_buffer;
+ while( sgcount-- ) {
+ NCR53c406a_pio_read(sglist->address, sglist->length);
+ sglist++;
+ }
+ }
+ REG0;
+#endif USE_PIO
+ }
+ break;
+
+ case 0x02: /* COMMAND */
+ current_SC->SCp.phase = command_ph;
+ printk("NCR53c406a: Warning: Unknown interupt occured in command phase!\n");
+ break;
+
+ case 0x03: /* STATUS */
+ rtrc(7);
+ current_SC->SCp.phase = status_ph;
+ VDEB(printk("NCR53c406a: Status phase\n"));
+ outb(FLUSH_FIFO, CMD_REG);
+ outb(INIT_CMD_COMPLETE, CMD_REG);
+ break;
+
+ case 0x04: /* Reserved */
+ case 0x05: /* Reserved */
+ printk("NCR53c406a: WARNING: Reserved phase!!!\n");
+ break;
+
+ case 0x06: /* MESSAGE-OUT */
+ DEB(printk("NCR53c406a: Message-Out phase\n"));
+ current_SC->SCp.phase = message_out;
+ outb(SET_ATN, CMD_REG); /* Reject the message */
+ outb(MSG_ACCEPT, CMD_REG);
+ break;
+
+ case 0x07: /* MESSAGE-IN */
+ rtrc(4);
+ VDEB(printk("NCR53c406a: Message-In phase\n"));
+ current_SC->SCp.phase = message_in;
+
+ current_SC->SCp.Status = inb(SCSI_FIFO);
+ current_SC->SCp.Message = inb(SCSI_FIFO);
+
+ VDEB(printk("SCSI FIFO size=%d\n", inb(FIFO_FLAGS) & 0x1f));
+ DEB(printk("Status = %02x Message = %02x\n",
+ current_SC->SCp.Status, current_SC->SCp.Message));
+
+ if(current_SC->SCp.Message == SAVE_POINTERS ||
+ current_SC->SCp.Message == DISCONNECT) {
+ outb(SET_ATN, CMD_REG); /* Reject message */
+ DEB(printk("Discarding SAVE_POINTERS message\n"));
+ }
+ outb(MSG_ACCEPT, CMD_REG);
+ break;
+ }
+}
+
+#ifndef IRQ_LEV
+static int irq_probe()
+{
+ int irqs, irq;
+ int i;
+
+ inb(INT_REG); /* clear the interrupt register */
+ sti();
+ irqs = probe_irq_on();
+
+ /* Invalid command will cause an interrupt */
+ REG0;
+ outb(0xff, CMD_REG);
+
+ /* Wait for the interrupt to occur */
+ i = jiffies + WATCHDOG;
+ while(i > jiffies && !(inb(STAT_REG) & 0x80))
+ barrier();
+ if (i <= jiffies) { /* Timed out, must be hardware trouble */
+ probe_irq_off(irqs);
+ return -1;
+ }
+
+ irq = probe_irq_off(irqs);
+
+ /* Kick the chip */
+ outb(CHIP_RESET, CMD_REG);
+ outb(SCSI_NOP, CMD_REG);
+ chip_init();
+
+ return irq;
+}
+#endif IRQ_LEV
+
+static void chip_init()
+{
+ REG1;
+#if USE_DMA
+ outb(0x00, PIO_STATUS);
+#else /* USE_PIO */
+ outb(0x01, PIO_STATUS);
+#endif
+ outb(0x00, PIO_FLAG);
+
+ outb(C4_IMG, CONFIG4); /* REG0; */
+ outb(C3_IMG, CONFIG3);
+ outb(C2_IMG, CONFIG2);
+ outb(C1_IMG, CONFIG1);
+
+ outb(0x05, CLKCONV); /* clock conversion factor */
+ outb(0x9C, SRTIMOUT); /* Selection timeout */
+ outb(0x05, SYNCPRD); /* Synchronous transfer period */
+ outb(SYNC_MODE, SYNCOFF); /* synchronous mode */
+}
+
+void calc_port_addr()
+{
+ /* Control Register Set 0 */
+ TC_LSB = (port_base+0x00);
+ TC_MSB = (port_base+0x01);
+ SCSI_FIFO = (port_base+0x02);
+ CMD_REG = (port_base+0x03);
+ STAT_REG = (port_base+0x04);
+ DEST_ID = (port_base+0x04);
+ INT_REG = (port_base+0x05);
+ SRTIMOUT = (port_base+0x05);
+ SEQ_REG = (port_base+0x06);
+ SYNCPRD = (port_base+0x06);
+ FIFO_FLAGS = (port_base+0x07);
+ SYNCOFF = (port_base+0x07);
+ CONFIG1 = (port_base+0x08);
+ CLKCONV = (port_base+0x09);
+ /* TESTREG = (port_base+0x0A); */
+ CONFIG2 = (port_base+0x0B);
+ CONFIG3 = (port_base+0x0C);
+ CONFIG4 = (port_base+0x0D);
+ TC_HIGH = (port_base+0x0E);
+ /* FIFO_BOTTOM = (port_base+0x0F); */
+
+ /* Control Register Set 1 */
+ /* JUMPER_SENSE = (port_base+0x00);*/
+ /* SRAM_PTR = (port_base+0x01);*/
+ /* SRAM_DATA = (port_base+0x02);*/
+ PIO_FIFO = (port_base+0x04);
+ /* PIO_FIFO1 = (port_base+0x05);*/
+ /* PIO_FIFO2 = (port_base+0x06);*/
+ /* PIO_FIFO3 = (port_base+0x07);*/
+ PIO_STATUS = (port_base+0x08);
+ /* ATA_CMD = (port_base+0x09);*/
+ /* ATA_ERR = (port_base+0x0A);*/
+ PIO_FLAG = (port_base+0x0B);
+ CONFIG5 = (port_base+0x0D);
+ /* SIGNATURE = (port_base+0x0E);*/
+ /* CONFIG6 = (port_base+0x0F);*/
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = NCR53c406a;
+
+#include "scsi_module.c"
+#endif
+
+/*
+ * Overrides for Emacs so that we get a uniform tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/NCR53c406a.h b/i386/i386at/gpl/linux/scsi/NCR53c406a.h
new file mode 100644
index 00000000..dcb48870
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/NCR53c406a.h
@@ -0,0 +1,83 @@
+#ifndef _NCR53C406A_H
+#define _NCR53C406A_H
+
+/*
+ * NCR53c406a.h
+ *
+ * Copyright (C) 1994 Normunds Saumanis (normunds@rx.tech.swh.lv)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+/* NOTE: scatter-gather support only works in PIO mode.
+ * Use SG_NONE if DMA mode is enabled!
+ */
+#define NCR53c406a { \
+ NULL /* next */, \
+ NULL /* usage count */, \
+ &proc_scsi_NCR53c406a /* proc_dir */, \
+ NULL /* proc_info */, \
+ "NCR53c406a" /* name */, \
+ NCR53c406a_detect /* detect */, \
+ NULL /* release */, \
+ NCR53c406a_info /* info */, \
+ NCR53c406a_command /* command */, \
+ NCR53c406a_queue /* queuecommand */, \
+ NCR53c406a_abort /* abort */, \
+ NCR53c406a_reset /* reset */, \
+ NULL /* slave_attach */, \
+ NCR53c406a_biosparm /* biosparm */, \
+ 1 /* can_queue */, \
+ 7 /* SCSI ID of the chip */, \
+ 32 /*SG_ALL*/ /*SG_NONE*/, \
+ 1 /* commands per lun */, \
+ 0 /* number of boards in system */, \
+ 1 /* unchecked_isa_dma */, \
+ ENABLE_CLUSTERING \
+}
+
+extern struct proc_dir_entry proc_scsi_NCR53c406a;
+
+int NCR53c406a_detect(Scsi_Host_Template *);
+const char* NCR53c406a_info(struct Scsi_Host *);
+
+int NCR53c406a_command(Scsi_Cmnd *);
+int NCR53c406a_queue(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int NCR53c406a_abort(Scsi_Cmnd *);
+int NCR53c406a_reset(Scsi_Cmnd *);
+int NCR53c406a_biosparm(Disk *, kdev_t, int []);
+
+#endif /* _NCR53C406A_H */
+
+/*
+ * Overrides for Emacs so that we get a uniform tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
+
diff --git a/i386/i386at/gpl/linux/scsi/advansys.c b/i386/i386at/gpl/linux/scsi/advansys.c
new file mode 100644
index 00000000..3f8efe3d
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/advansys.c
@@ -0,0 +1,9061 @@
+/* $Id: advansys.c,v 1.1.1.1 1997/02/25 21:27:45 thomas Exp $ */
+/*
+ * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
+ *
+ * Copyright (c) 1995-1996 Advanced System Products, Inc.
+ * All Rights Reserved.
+ *
+ * This driver may be modified and freely distributed provided that
+ * the above copyright message and this comment are included in the
+ * distribution. The latest version of this driver is available at
+ * the AdvanSys FTP and BBS sites listed below.
+ *
+ * Please send questions, comments, bug reports to:
+ * bobf@advansys.com (Bob Frey)
+ */
+
+/* The driver has been tested with Linux 1.2.1 and 1.3.57 kernels. */
+#define ASC_VERSION "1.2" /* AdvanSys Driver Version */
+
+/*
+
+ Documentation for the AdvanSys Driver
+
+ A. Adapters Supported by this Driver
+ B. Linux 1.2.X - Directions for Adding the AdvanSys Driver
+ C. Linux 1.3.X - Directions for Adding the AdvanSys Driver
+ D. Source Comments
+ E. Driver Compile Time Options and Debugging
+ F. Driver LILO Option
+ G. Release History
+ H. Known Problems or Issues
+ I. Credits
+ J. AdvanSys Contact Information
+
+
+ A. Adapters Supported by this Driver
+
+ AdvanSys (Advanced System Products, Inc.) manufactures the following
+ Bus-Mastering SCSI-2 Host Adapters for the ISA, EISA, VL, and PCI
+ buses. This Linux driver supports all of these adapters.
+
+ The CDB counts below indicate the number of SCSI CDB (Command
+ Descriptor Block) requests that can be stored in the RISC chip
+ cache and board LRAM. The driver detect routine will display the
+ number of CDBs available for each adapter detected. This value
+ can be lowered in the BIOS by changing the 'Host Queue Size'
+ adapter setting.
+
+ Connectivity Products:
+ ABP920 - Bus-Master PCI 16 CDB
+ ABP930 - Bus-Master PCI 16 CDB
+ ABP5140 - Bus-Master PnP ISA 16 CDB
+
+ Single Channel Products:
+ ABP542 - Bus-Master ISA 240 CDB
+ ABP5150 - Bus-Master ISA 240 CDB *
+ ABP742 - Bus-Master EISA 240 CDB
+ ABP842 - Bus-Master VL 240 CDB
+ ABP940 - Bus-Master PCI 240 CDB
+
+ Dual Channel Products:
+ ABP950 - Dual Channel Bus-Master PCI 240 CDB Per Channel
+ ABP852 - Dual Channel Bus-Master VL 240 CDB Per Channel
+ ABP752 - Dual Channel Bus-Master EISA 240 CDB Per Channel
+
+ * This board is shipped by HP with the 4020i CD-R drive. It has
+ no BIOS so it cannot control a boot device, but it can control
+ any secondary devices.
+
+ B. Linux 1.2.X - Directions for Adding the AdvanSys Driver
+
+ There are two source files: advansys.h and advansys.c. Copy
+ both of these files to the directory /usr/src/linux/drivers/scsi.
+
+ 1. Add the following line to /usr/src/linux/arch/i386/config.in
+ after "comment 'SCSI low-level drivers'":
+
+ bool 'AdvanSys SCSI support' CONFIG_SCSI_ADVANSYS y
+
+ 2. Add the following lines to /usr/src/linux/drivers/scsi/hosts.c
+ after "#include "hosts.h"":
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ #include "advansys.h"
+ #endif
+
+ and after "static Scsi_Host_Template builtin_scsi_hosts[] =":
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ ADVANSYS,
+ #endif
+
+ 3. Add the following lines to /usr/src/linux/drivers/scsi/Makefile:
+
+ ifdef CONFIG_SCSI_ADVANSYS
+ SCSI_SRCS := $(SCSI_SRCS) advansys.c
+ SCSI_OBJS := $(SCSI_OBJS) advansys.o
+ else
+ SCSI_MODULE_OBJS := $(SCSI_MODULE_OBJS) advansys.o
+ endif
+
+ 4. (Optional) If you would like to enable the LILO command line
+ and /etc/lilo.conf 'advansys' option, make the following changes.
+ This option can be used to disable I/O port scanning or to limit
+ I/O port scanning to specific addresses. Refer to the 'Driver
+ LILO Option' section below. Add the following lines to
+ /usr/src/linux/init/main.c in the prototype section:
+
+ extern void advansys_setup(char *str, int *ints);
+
+ and add the following lines to the bootsetups[] array.
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ { "advansys=", advansys_setup },
+ #endif
+
+ 5. If you have the HP 4020i CD-R driver and Linux 1.2.X you should
+ add a fix to the CD-ROM target driver. This fix will allow
+ you to mount CDs with the iso9660 file system. Linux 1.3.X
+ already has this fix. In the file /usr/src/linux/drivers/scsi/sr.c
+ and function get_sectorsize() after the line:
+
+ if(scsi_CDs[i].sector_size == 0) scsi_CDs[i].sector_size = 2048;
+
+ add the following line:
+
+ if(scsi_CDs[i].sector_size == 2340) scsi_CDs[i].sector_size = 2048;
+
+ 6. In the directory /usr/src/linux run 'make config' to configure
+ the AdvanSys driver, then run 'make vmlinux' or 'make zlilo' to
+ make the kernel. If the AdvanSys driver is not configured, then
+ a loadable module can be built by running 'make modules' and
+ 'make modules_install'. Use 'insmod' and 'rmmod' to install
+ and remove advansys.o.
+
+ C. Linux 1.3.X - Directions for Adding the AdvanSys Driver
+
+ There are two source files: advansys.h and advansys.c. Copy
+ both of these files to the directory /usr/src/linux/drivers/scsi.
+
+ 1. Add the following line to /usr/src/linux/drivers/scsi/Config.in
+ after "comment 'SCSI low-level drivers'":
+
+ dep_tristate 'AdvanSys SCSI support' CONFIG_SCSI_ADVANSYS $CONFIG_SCSI
+
+ 2. Add the following lines to /usr/src/linux/drivers/scsi/hosts.c
+ after "#include "hosts.h"":
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ #include "advansys.h"
+ #endif
+
+ and after "static Scsi_Host_Template builtin_scsi_hosts[] =":
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ ADVANSYS,
+ #endif
+
+ 3. Add the following lines to /usr/src/linux/drivers/scsi/Makefile:
+
+ ifeq ($(CONFIG_SCSI_ADVANSYS),y)
+ L_OBJS += advansys.o
+ else
+ ifeq ($(CONFIG_SCSI_ADVANSYS),m)
+ M_OBJS += advansys.o
+ endif
+ endif
+
+ 4. Add the following line to /usr/src/linux/include/linux/proc_fs.h
+ in the enum scsi_directory_inos array:
+
+ PROC_SCSI_ADVANSYS,
+
+ 5. (Optional) If you would like to enable the LILO command line
+ and /etc/lilo.conf 'advansys' option, make the following changes.
+ This option can be used to disable I/O port scanning or to limit
+ I/O port scanning to specific addresses. Refer to the 'Driver
+ LILO Option' section below. Add the following lines to
+ /usr/src/linux/init/main.c in the prototype section:
+
+ extern void advansys_setup(char *str, int *ints);
+
+ and add the following lines to the bootsetups[] array.
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ { "advansys=", advansys_setup },
+ #endif
+
+ 6. In the directory /usr/src/linux run 'make config' to configure
+ the AdvanSys driver, then run 'make vmlinux' or 'make zlilo' to
+ make the kernel. If the AdvanSys driver is not configured, then
+ a loadable module can be built by running 'make modules' and
+ 'make modules_install'. Use 'insmod' and 'rmmod' to install
+ and remove advansys.o.
+
+ D. Source Comments
+
+ 1. Use tab stops set to 4 for the source files. For vi use 'se tabstops=4'.
+
+ 2. This driver should be maintained in multiple files. But to make
+ it easier to include with Linux and to follow Linux conventions,
+ the whole driver is maintained in the source files advansys.h and
+ advansys.c. In this file logical sections of the driver begin with
+ a comment that contains '---'. The following are the logical sections
+ of the driver below.
+
+ --- Linux Version
+ --- Linux Include Files
+ --- Driver Options
+ --- Asc Library Constants and Macros
+ --- Debugging Header
+ --- Driver Constants
+ --- Driver Macros
+ --- Driver Structures
+ --- Driver Data
+ --- Driver Function Prototypes
+ --- Linux 'Scsi_Host_Template' and advansys_setup() Functions
+ --- Loadable Driver Support
+ --- Miscellaneous Driver Functions
+ --- Functions Required by the Asc Library
+ --- Tracing and Debugging Functions
+ --- Asc Library Functions
+
+ 3. The string 'XXX' is used to flag code that needs to be re-written
+ or that contains a problem that needs to be addressed.
+
+ 4. I have stripped comments from and reformatted the source for the
+ Asc Library which is included in this file. I haven't done this
+ to obfuscate the code. Actually I have done this to deobfuscate
+ the code. The Asc Library source can be found under the following
+ headings.
+
+ --- Asc Library Constants and Macros
+ --- Asc Library Functions
+
+ E. Driver Compile Time Options and Debugging
+
+ In this source file the following constants can be defined. They are
+ defined in the source below. Both of these options are enabled by
+ default.
+
+ 1. ADVANSYS_DEBUG - enable for debugging and assertions
+
+ The amount of debugging output can be controlled with the global
+ variable 'asc_dbglvl'. The higher the number the more output. By
+ default the debug level is 0.
+
+ If the driver is loaded at boot time and the LILO Driver Option
+ is included in the system, the debug level can be changed by
+ specifying a 5th (ASC_NUM_BOARD_SUPPORTED + 1) I/O Port. The
+ first three hex digits of the pseudo I/O Port must be set to
+ 'deb' and the fourth hex digit specifies the debug level: 0 - F.
+ The following command line will look for an adapter at 0x330
+ and set the debug level to 2.
+
+ linux advansys=0x330,0x0,0x0,0x0,0xdeb2
+
+ If the driver is built as a loadable module this variable can be
+ defined when the driver is loaded. The following insmod command
+ will set the debug level to one.
+
+ insmod advansys.o asc_dbglvl=1
+
+
+ Debugging Message Levels:
+ 0: Errors Only
+ 1: High-Level Tracing
+ 2-N: Verbose Tracing
+
+ I don't know the approved way for turning on printk()s to the
+ console. Here's a program I use to do this. Debug output is
+ logged in /var/adm/messages.
+
+ main()
+ {
+ syscall(103, 7, 0, 0);
+ }
+
+ I found that increasing LOG_BUF_LEN to 40960 in kernel/printk.c
+ prevents most level 1 debug messages from being lost.
+
+ 2. ADVANSYS_STATS - enable statistics and tracing
+
+ For Linux 1.2.X if ADVANSYS_STATS_1_2_PRINT is defined every
+ 10,000 I/O operations the driver will print statistics to the
+ console. This value can be changed by modifying the constant
+ used in advansys_queuecommand(). ADVANSYS_STATS_1_2_PRINT is
+ off by default.
+
+ For Linux 1.3.X statistics can be accessed by reading the
+ /proc/scsi/advansys/[0-9] files.
+
+ Note: these statistics are currently maintained on a global driver
+ basis and not per board.
+
+ F. Driver LILO Option
+
+ If init/main.c is modified as described in the 'Directions for Adding
+ the AdvanSys Driver to Linux' section (B.4.) above, the driver will
+ recognize the 'advansys' LILO command line and /etc/lilo.conf option.
+ This option can be used to either disable I/O port scanning or to limit
+ scanning to 1 - 4 I/O ports. Regardless of the option setting EISA and
+ PCI boards will still be searched for and detected. This option only
+ affects searching for ISA and VL boards.
+
+ Examples:
+ 1. Eliminate I/O port scanning:
+ boot: linux advansys=
+ or
+ boot: linux advansys=0x0
+ 2. Limit I/O port scanning to one I/O port:
+ boot: linux advansys=0x110
+ 3. Limit I/O port scanning to four I/O ports:
+ boot: linux advansys=0x110,0x210,0x230,0x330
+
+ For a loadable module the same effect can be achieved by setting
+ the 'asc_iopflag' variable and 'asc_ioport' array when loading
+ the driver, e.g.
+
+ insmod advansys.o asc_iopflag=1 asc_ioport=0x110,0x330
+
+ If ADVANSYS_DEBUG is defined a 5th (ASC_NUM_BOARD_SUPPORTED + 1)
+ I/O Port may be added to specify the driver debug level. Refer to
+ the 'Driver Compile Time Options and Debugging' section above for
+ more information.
+
+ G. Release History
+
+ 12/23/95 BETA-1.0:
+ First Release
+
+ 12/28/95 BETA-1.1:
+ 1. Prevent advansys_detect() from being called twice.
+ 2. Add LILO 0xdeb[0-f] option to set 'asc_dbglvl'.
+
+ 1/12/96 1.2:
+ 1. Prevent re-entrancy in the interrupt handler which
+ resulted in the driver hanging Linux.
+ 2. Fix problem that prevented ABP-940 cards from being
+ recognized on some PCI motherboards.
+ 3. Add support for the ABP-5140 PnP ISA card.
+ 4. Fix check condition return status.
+ 5. Add conditionally compiled code for Linux 1.3.X.
+
+ H. Known Problems or Issues
+
+ 1. The setting for 'cmd_per_lun' needs to be changed. It is currently
+ less then what the AdvanSys boards can queue. Because the target and
+ mid-level Linux drivers base memory allocation on 'cmd_per_lun' (as
+ well as 'sg_tablesize') memory use gets out of hand with a large
+ 'cmd_per_lun'. 'cmd_per_lun' should be per device instead of per
+ adapter. When the driver is compiled as a loadable module both
+ 'cmd_per_lun' and 'sg_tablesize' are tuned down to try to prevent
+ memory allocation errors.
+
+ 2. For the first scsi command sent to a device the driver increases
+ the timeout value. This gives the driver more time to perform
+ its own initialization for the board and each device. The timeout
+ value is only changed on the first scsi command for each device
+ and never thereafter.
+
+ I. Credits
+
+ Nathan Hartwell (mage@cdc3.cdc.net) provided the directions and
+ and basis for the Linux 1.3.X changes which were included in the
+ 1.2 release.
+
+ J. AdvanSys Contact Information
+
+ Mail: Advanced System Products, Inc.
+ 1150 Ringwood Court
+ San Jose, CA 95131 USA
+ Operator: 1-408-383-9400
+ FAX: 1-408-383-9612
+ Tech Support: 1-800-525-7440
+ BBS: 1-408-383-9540 (9600,N,8,1)
+ Interactive FAX: 1-408-383-9753
+ Customer Direct Sales: 1-800-883-1099
+ Tech Support E-Mail: support@advansys.com
+ Linux Support E-Mail: bobf@advansys.com
+ FTP Site: ftp.advansys.com (login: anonymous)
+ Web Site: http://www.advansys.com
+
+*/
+
+
+/*
+ * --- Linux Version
+ */
+
+/*
+ * The driver can be used in Linux 1.2.X or 1.3.X.
+ */
+#if !defined(LINUX_1_2) && !defined(LINUX_1_3)
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#endif /* LINUX_VERSION_CODE */
+#if LINUX_VERSION_CODE > 65536 + 3 * 256
+#define LINUX_1_3
+#else /* LINUX_VERSION_CODE */
+#define LINUX_1_2
+#endif /* LINUX_VERSION_CODE */
+#endif /* !defined(LINUX_1_2) && !defined(LINUX_1_3) */
+
+
+/*
+ * --- Linux Include Files
+ */
+
+#ifdef MODULE
+#ifdef LINUX_1_3
+#include <linux/autoconf.h>
+#endif /* LINUX_1_3 */
+#include <linux/module.h>
+#endif /* MODULE */
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/head.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/malloc.h>
+#include <linux/config.h>
+#ifdef LINUX_1_3
+#include <linux/proc_fs.h>
+#endif /* LINUX_1_3 */
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#ifdef LINUX_1_2
+#include "../block/blk.h"
+#else /* LINUX_1_3 */
+#include <linux/blk.h>
+#include <linux/stat.h>
+#endif /* LINUX_1_3 */
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include "advansys.h"
+
+
+/*
+ * --- Driver Options
+ */
+#define ADVANSYS_DEBUG /* Enable for debugging and assertions. */
+#define ADVANSYS_STATS /* Enable for statistics and tracing. */
+#ifdef LINUX_1_2
+#undef ADVANSYS_STATS_1_2_PRINT /* Enable to print statistics to console. */
+#endif /* LINUX_1_2 */
+
+
+/*
+ * --- Asc Library Constants and Macros
+ */
+
+#define ASC_LIB_VERSION_MAJOR 1
+#define ASC_LIB_VERSION_MINOR 16
+#define ASC_LIB_SERIAL_NUMBER 53
+
+typedef unsigned char uchar;
+typedef unsigned char BYTE;
+typedef unsigned short WORD;
+typedef unsigned long DWORD;
+
+typedef int BOOL;
+
+#ifndef NULL
+#define NULL (0)
+#endif
+
+#ifndef TRUE
+#define TRUE (1)
+#endif
+
+#ifndef FALSE
+#define FALSE (0)
+#endif
+
+#define REG register
+
+#define rchar REG char
+#define rshort REG short
+#define rint REG int
+#define rlong REG long
+
+#define ruchar REG uchar
+#define rushort REG ushort
+#define ruint REG uint
+#define rulong REG ulong
+
+#define NULLPTR ( void *)0
+#define FNULLPTR ( void dosfar *)0UL
+#define EOF (-1)
+#define EOS '\0'
+#define ERR (-1)
+#define UB_ERR (uchar)(0xFF)
+#define UW_ERR (uint)(0xFFFF)
+#define UL_ERR (ulong)(0xFFFFFFFFUL)
+
+#define iseven_word( val ) ( ( ( ( uint )val) & ( uint )0x0001 ) == 0 )
+#define isodd_word( val ) ( ( ( ( uint )val) & ( uint )0x0001 ) != 0 )
+#define toeven_word( val ) ( ( ( uint )val ) & ( uint )0xFFFE )
+
+#define biton( val, bits ) ((( uint )( val >> bits ) & (uint)0x0001 ) != 0 )
+#define bitoff( val, bits ) ((( uint )( val >> bits ) & (uint)0x0001 ) == 0 )
+#define lbiton( val, bits ) ((( ulong )( val >> bits ) & (ulong)0x00000001UL ) != 0 )
+#define lbitoff( val, bits ) ((( ulong )( val >> bits ) & (ulong)0x00000001UL ) == 0 )
+
+#define absh( val ) ( ( val ) < 0 ? -( val ) : ( val ) )
+
+#define swapbyte( ch ) ( ( ( (ch) << 4 ) | ( (ch) >> 4 ) ) )
+
+#ifndef GBYTE
+#define GBYTE (0x40000000UL)
+#endif
+
+#ifndef MBYTE
+#define MBYTE (0x100000UL)
+#endif
+
+#ifndef KBYTE
+#define KBYTE (0x400)
+#endif
+
+#define HI_BYTE(x) ( *( ( BYTE *)(&x)+1 ) )
+#define LO_BYTE(x) ( *( ( BYTE *)&x ) )
+
+#define HI_WORD(x) ( *( ( WORD *)(&x)+1 ) )
+#define LO_WORD(x) ( *( ( WORD *)&x ) )
+
+#ifndef MAKEWORD
+#define MAKEWORD(lo, hi) ((WORD) (((WORD) lo) | ((WORD) hi << 8)))
+#endif
+
+#ifndef MAKELONG
+#define MAKELONG(lo, hi) ((DWORD) (((DWORD) lo) | ((DWORD) hi << 16)))
+#endif
+
+#define SwapWords(dWord) ((DWORD) ((dWord >> 16) | (dWord << 16)))
+#define SwapBytes(word) ((WORD) ((word >> 8) | (word << 8)))
+
+#define BigToLittle(dWord) \
+ ((DWORD) (SwapWords(MAKELONG(SwapBytes(LO_WORD(dWord)), SwapBytes(HI_WORD(dWord))))))
+#define LittleToBig(dWord) BigToLittle(dWord)
+
+#define Lptr
+#define dosfar
+#define far
+#define PortAddr unsigned short
+#define Ptr2Func ulong
+
+#define inp(port) inb(port)
+#define inpw(port) inw(port)
+#define outp(port, byte) outb((byte), (port))
+#define outpw(port, word) outw((word), (port))
+
+#define ASC_MAX_SG_QUEUE 5
+#define ASC_MAX_SG_LIST (1 + ((ASC_SG_LIST_PER_Q) * (ASC_MAX_SG_QUEUE)))
+
+#define CC_INIT_INQ_DISPLAY FALSE
+
+#define CC_CLEAR_LRAM_SRB_PTR FALSE
+#define CC_VERIFY_LRAM_COPY FALSE
+
+#define CC_DEBUG_SG_LIST FALSE
+#define CC_FAST_STRING_IO FALSE
+
+#define CC_WRITE_IO_COUNT FALSE
+#define CC_CLEAR_DMA_REMAIN FALSE
+
+#define CC_DISABLE_PCI_PARITY_INT TRUE
+
+#define CC_LINK_BUSY_Q FALSE
+
+#define CC_TARGET_MODE FALSE
+
+#define CC_SCAM FALSE
+
+#define CC_LITTLE_ENDIAN_HOST TRUE
+
+#ifndef CC_TEST_LRAM_ENDIAN
+
+#if CC_LITTLE_ENDIAN_HOST
+#define CC_TEST_LRAM_ENDIAN FALSE
+#else
+#define CC_TEST_LRAM_ENDIAN TRUE
+#endif
+
+#endif
+
+#define CC_STRUCT_ALIGNED TRUE
+
+#define CC_MEMORY_MAPPED_IO FALSE
+
+#ifndef CC_TARGET_MODE
+#define CC_TARGET_MODE FALSE
+#endif
+
+#ifndef CC_STRUCT_ALIGNED
+#define CC_STRUCT_ALIGNED FALSE
+#endif
+
+#ifndef CC_LITTLE_ENDIAN_HOST
+#define CC_LITTLE_ENDIAN_HOST TRUE
+#endif
+
+#if !CC_LITTLE_ENDIAN_HOST
+
+#ifndef CC_TEST_LRAM_ENDIAN
+#define CC_TEST_LRAM_ENDIAN TRUE
+#endif
+
+#endif
+
+#ifndef CC_MEMORY_MAPPED_IO
+#define CC_MEMORY_MAPPED_IO FALSE
+#endif
+
+#ifndef CC_WRITE_IO_COUNT
+#define CC_WRITE_IO_COUNT FALSE
+#endif
+
+#ifndef CC_CLEAR_DMA_REMAIN
+#define CC_CLEAR_DMA_REMAIN FALSE
+#endif
+
+#define ASC_CS_TYPE unsigned short
+
+#ifndef asc_ptr_type
+#define asc_ptr_type
+#endif
+
+#ifndef CC_SCAM
+#define CC_SCAM FALSE
+#endif
+
+#ifndef ASC_GET_PTR2FUNC
+#define ASC_GET_PTR2FUNC( fun ) ( Ptr2Func )( fun )
+#endif
+
+#define FLIP_BYTE_NIBBLE( x ) ( ((x<<4)& 0xFF) | (x>>4) )
+
+#define ASC_IS_ISA (0x0001)
+#define ASC_IS_ISAPNP (0x0081)
+#define ASC_IS_EISA (0x0002)
+#define ASC_IS_PCI (0x0004)
+#define ASC_IS_PCMCIA (0x0008)
+#define ASC_IS_PNP (0x0010)
+#define ASC_IS_MCA (0x0020)
+#define ASC_IS_VL (0x0040)
+
+#define ASC_ISA_PNP_PORT_ADDR (0x279)
+#define ASC_ISA_PNP_PORT_WRITE (ASC_ISA_PNP_PORT_ADDR+0x800)
+
+#define ASC_IS_WIDESCSI_16 (0x0100)
+#define ASC_IS_WIDESCSI_32 (0x0200)
+
+#define ASC_IS_BIG_ENDIAN (0x8000)
+
+#define ASC_CHIP_MIN_VER_VL (0x01)
+#define ASC_CHIP_MAX_VER_VL (0x07)
+
+#define ASC_CHIP_MIN_VER_PCI (0x09)
+#define ASC_CHIP_MAX_VER_PCI (0x0F)
+#define ASC_CHIP_VER_PCI_BIT (0x08)
+
+#define ASC_CHIP_MIN_VER_ISA (0x11)
+#define ASC_CHIP_MIN_VER_ISA_PNP (0x21)
+#define ASC_CHIP_MAX_VER_ISA (0x27)
+#define ASC_CHIP_VER_ISA_BIT (0x30)
+#define ASC_CHIP_VER_ISAPNP_BIT (0x20)
+
+#define ASC_CHIP_VER_ASYN_BUG (0x21)
+
+#define ASC_CHIP_MIN_VER_EISA (0x41)
+#define ASC_CHIP_MAX_VER_EISA (0x47)
+#define ASC_CHIP_VER_EISA_BIT (0x40)
+
+#define ASC_MAX_VL_DMA_ADDR (0x07FFFFFFL)
+#define ASC_MAX_VL_DMA_COUNT (0x07FFFFFFL)
+
+#define ASC_MAX_PCI_DMA_ADDR (0xFFFFFFFFL)
+#define ASC_MAX_PCI_DMA_COUNT (0xFFFFFFFFL)
+
+#define ASC_MAX_ISA_DMA_ADDR (0x00FFFFFFL)
+#define ASC_MAX_ISA_DMA_COUNT (0x00FFFFFFL)
+
+#define ASC_MAX_EISA_DMA_ADDR (0x07FFFFFFL)
+#define ASC_MAX_EISA_DMA_COUNT (0x07FFFFFFL)
+
+#if !CC_STRUCT_ALIGNED
+
+#define DvcGetQinfo( iop_base, s_addr, outbuf, words) \
+ AscMemWordCopyFromLram( iop_base, s_addr, outbuf, words)
+
+#define DvcPutScsiQ( iop_base, s_addr, outbuf, words) \
+ AscMemWordCopyToLram( iop_base, s_addr, outbuf, words)
+
+#endif
+
+#define ASC_SCSI_ID_BITS 3
+#define ASC_SCSI_TIX_TYPE uchar
+#define ASC_ALL_DEVICE_BIT_SET 0xFF
+
+#ifdef ASC_WIDESCSI_16
+
+#undef ASC_SCSI_ID_BITS
+#define ASC_SCSI_ID_BITS 4
+#define ASC_ALL_DEVICE_BIT_SET 0xFFFF
+
+#endif
+
+#ifdef ASC_WIDESCSI_32
+
+#undef ASC_SCSI_ID_BITS
+#define ASC_SCSI_ID_BITS 5
+#define ASC_ALL_DEVICE_BIT_SET 0xFFFFFFFFL
+
+#endif
+
+#if ASC_SCSI_ID_BITS == 3
+
+#define ASC_SCSI_BIT_ID_TYPE uchar
+#define ASC_MAX_TID 7
+#define ASC_MAX_LUN 7
+#define ASC_SCSI_WIDTH_BIT_SET 0xFF
+
+#elif ASC_SCSI_ID_BITS == 4
+
+#define ASC_SCSI_BIT_ID_TYPE ushort
+#define ASC_MAX_TID 15
+#define ASC_MAX_LUN 7
+#define ASC_SCSI_WIDTH_BIT_SET 0xFFFF
+
+#elif ASC_SCSI_ID_BITS == 5
+
+#define ASC_SCSI_BIT_ID_TYPE ulong
+#define ASC_MAX_TID 31
+#define ASC_MAX_LUN 7
+#define ASC_SCSI_WIDTH_BIT_SET 0xFFFFFFFF
+
+#else
+
+#error ASC_SCSI_ID_BITS definition is wrong
+
+#endif
+
+#define ASC_MAX_SENSE_LEN 32
+#define ASC_MIN_SENSE_LEN 14
+
+#define ASC_MAX_CDB_LEN 12
+
+#define SCSICMD_TestUnitReady 0x00
+#define SCSICMD_Rewind 0x01
+#define SCSICMD_Rezero 0x01
+#define SCSICMD_RequestSense 0x03
+#define SCSICMD_Format 0x04
+#define SCSICMD_FormatUnit 0x04
+#define SCSICMD_Read6 0x08
+#define SCSICMD_Write6 0x0A
+#define SCSICMD_Seek6 0x0B
+#define SCSICMD_Inquiry 0x12
+#define SCSICMD_Verify6 0x13
+#define SCSICMD_ModeSelect6 0x15
+#define SCSICMD_ModeSense6 0x1A
+
+#define SCSICMD_StartStopUnit 0x1B
+#define SCSICMD_LoadUnloadTape 0x1B
+#define SCSICMD_ReadCapacity 0x25
+#define SCSICMD_Read10 0x28
+#define SCSICMD_Write10 0x2A
+#define SCSICMD_Seek10 0x2B
+#define SCSICMD_Erase10 0x2C
+#define SCSICMD_WriteAndVerify10 0x2E
+#define SCSICMD_Verify10 0x2F
+
+#define SCSICMD_ModeSelect10 0x55
+#define SCSICMD_ModeSense10 0x5A
+
+#define SCSI_TYPE_DASD 0x00
+#define SCSI_TYPE_SASD 0x01
+#define SCSI_TYPE_PRN 0x02
+#define SCSI_TYPE_PROC 0x03
+
+#define SCSI_TYPE_WORM 0x04
+#define SCSI_TYPE_CDROM 0x05
+#define SCSI_TYPE_SCANNER 0x06
+#define SCSI_TYPE_OPTMEM 0x07
+#define SCSI_TYPE_MED_CHG 0x08
+#define SCSI_TYPE_COMM 0x09
+#define SCSI_TYPE_UNKNOWN 0x1F
+#define SCSI_TYPE_NO_DVC 0xFF
+
+#define ASC_SCSIDIR_NOCHK 0x00
+
+#define ASC_SCSIDIR_T2H 0x08
+
+#define ASC_SCSIDIR_H2T 0x10
+
+#define ASC_SCSIDIR_NODATA 0x18
+
+#define SCSI_SENKEY_NO_SENSE 0x00
+#define SCSI_SENKEY_UNDEFINED 0x01
+#define SCSI_SENKEY_NOT_READY 0x02
+#define SCSI_SENKEY_MEDIUM_ERR 0x03
+#define SCSI_SENKEY_HW_ERR 0x04
+#define SCSI_SENKEY_ILLEGAL 0x05
+#define SCSI_SENKEY_ATTENSION 0x06
+#define SCSI_SENKEY_PROTECTED 0x07
+#define SCSI_SENKEY_BLANK 0x08
+#define SCSI_SENKEY_V_UNIQUE 0x09
+#define SCSI_SENKEY_CPY_ABORT 0x0A
+#define SCSI_SENKEY_ABORT 0x0B
+#define SCSI_SENKEY_EQUAL 0x0C
+#define SCSI_SENKEY_VOL_OVERFLOW 0x0D
+#define SCSI_SENKEY_MISCOMP 0x0E
+#define SCSI_SENKEY_RESERVED 0x0F
+
+#define ASC_SRB_HOST( x ) ( ( uchar )( ( uchar )( x ) >> 4 ) )
+#define ASC_SRB_TID( x ) ( ( uchar )( ( uchar )( x ) & ( uchar )0x0F ) )
+
+#define ASC_SRB_LUN( x ) ( ( uchar )( ( uint )( x ) >> 13 ) )
+
+#define PUT_CDB1( x ) ( ( uchar )( ( uint )( x ) >> 8 ) )
+
+#define SS_GOOD 0x00
+#define SS_CHK_CONDITION 0x02
+#define SS_CONDITION_MET 0x04
+#define SS_TARGET_BUSY 0x08
+#define SS_INTERMID 0x10
+#define SS_INTERMID_COND_MET 0x14
+
+#define SS_RSERV_CONFLICT 0x18
+#define SS_CMD_TERMINATED 0x22
+
+#define SS_QUEUE_FULL 0x28
+
+#define MS_CMD_DONE 0x00
+#define MS_EXTEND 0x01
+#define MS_SDTR_LEN 0x03
+#define MS_SDTR_CODE 0x01
+
+#define M1_SAVE_DATA_PTR 0x02
+#define M1_RESTORE_PTRS 0x03
+#define M1_DISCONNECT 0x04
+#define M1_INIT_DETECTED_ERR 0x05
+#define M1_ABORT 0x06
+#define M1_MSG_REJECT 0x07
+#define M1_NO_OP 0x08
+#define M1_MSG_PARITY_ERR 0x09
+#define M1_LINK_CMD_DONE 0x0A
+#define M1_LINK_CMD_DONE_WFLAG 0x0B
+#define M1_BUS_DVC_RESET 0x0C
+#define M1_ABORT_TAG 0x0D
+#define M1_CLR_QUEUE 0x0E
+#define M1_INIT_RECOVERY 0x0F
+#define M1_RELEASE_RECOVERY 0x10
+#define M1_KILL_IO_PROC 0x11
+
+#define M2_QTAG_MSG_SIMPLE 0x20
+#define M2_QTAG_MSG_HEAD 0x21
+#define M2_QTAG_MSG_ORDERED 0x22
+#define M2_IGNORE_WIDE_RESIDUE 0x23
+
+typedef struct {
+ uchar peri_dvc_type:5;
+ uchar peri_qualifier:3;
+} ASC_SCSI_INQ0;
+
+typedef struct {
+ uchar dvc_type_modifier:7;
+ uchar rmb:1;
+} ASC_SCSI_INQ1;
+
+typedef struct {
+ uchar ansi_apr_ver:3;
+ uchar ecma_ver:3;
+ uchar iso_ver:2;
+} ASC_SCSI_INQ2;
+
+typedef struct {
+ uchar rsp_data_fmt:4;
+
+ uchar res:2;
+ uchar TemIOP:1;
+ uchar aenc:1;
+} ASC_SCSI_INQ3;
+
+typedef struct {
+ uchar StfRe:1;
+ uchar CmdQue:1;
+ uchar Reserved:1;
+ uchar Linked:1;
+ uchar Sync:1;
+ uchar WBus16:1;
+ uchar WBus32:1;
+ uchar RelAdr:1;
+} ASC_SCSI_INQ7;
+
+typedef struct {
+ ASC_SCSI_INQ0 byte0;
+ ASC_SCSI_INQ1 byte1;
+ ASC_SCSI_INQ2 byte2;
+ ASC_SCSI_INQ3 byte3;
+ uchar add_len;
+ uchar res1;
+ uchar res2;
+ ASC_SCSI_INQ7 byte7;
+ uchar vendor_id[8];
+ uchar product_id[16];
+ uchar product_rev_level[4];
+} ASC_SCSI_INQUIRY;
+
+typedef struct asc_req_sense {
+ uchar err_code:7;
+ uchar info_valid:1;
+ uchar segment_no;
+ uchar sense_key:4;
+ uchar reserved_bit:1;
+ uchar sense_ILI:1;
+ uchar sense_EOM:1;
+ uchar file_mark:1;
+ uchar info1[4];
+ uchar add_sense_len;
+ uchar cmd_sp_info[4];
+ uchar asc;
+ uchar ascq;
+
+ uchar fruc;
+ uchar sks_byte0:7;
+ uchar sks_valid:1;
+ uchar sks_bytes[2];
+ uchar notused[2];
+ uchar ex_sense_code;
+ uchar info2[4];
+} ASC_REQ_SENSE;
+
+#define ASC_SG_LIST_PER_Q 7
+
+#define QS_FREE 0x00
+#define QS_READY 0x01
+#define QS_DISC1 0x02
+#define QS_DISC2 0x04
+#define QS_BUSY 0x08
+
+#define QS_ABORTED 0x40
+#define QS_DONE 0x80
+
+#define QC_NO_CALLBACK 0x01
+
+#define QC_SG_SWAP_QUEUE 0x02
+#define QC_SG_HEAD 0x04
+#define QC_DATA_IN 0x08
+#define QC_DATA_OUT 0x10
+
+#define QC_URGENT 0x20
+#define QC_MSG_OUT 0x40
+#define QC_REQ_SENSE 0x80
+
+#define QCSG_SG_XFER_LIST 0x02
+#define QCSG_SG_XFER_MORE 0x04
+#define QCSG_SG_XFER_END 0x08
+
+#define QD_IN_PROGRESS 0x00
+#define QD_NO_ERROR 0x01
+#define QD_ABORTED_BY_HOST 0x02
+#define QD_WITH_ERROR 0x04
+#define QD_INVALID_REQUEST 0x80
+#define QD_INVALID_HOST_NUM 0x81
+#define QD_INVALID_DEVICE 0x82
+#define QD_ERR_INTERNAL 0xFF
+
+#define QHSTA_NO_ERROR 0x00
+#define QHSTA_M_SEL_TIMEOUT 0x11
+#define QHSTA_M_DATA_OVER_RUN 0x12
+#define QHSTA_M_DATA_UNDER_RUN 0x12
+#define QHSTA_M_UNEXPECTED_BUS_FREE 0x13
+#define QHSTA_M_BAD_BUS_PHASE_SEQ 0x14
+
+#define QHSTA_D_QDONE_SG_LIST_CORRUPTED 0x21
+#define QHSTA_D_ASC_DVC_ERROR_CODE_SET 0x22
+#define QHSTA_D_HOST_ABORT_FAILED 0x23
+#define QHSTA_D_EXE_SCSI_Q_FAILED 0x24
+#define QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT 0x25
+
+#define QHSTA_D_ASPI_NO_BUF_POOL 0x26
+
+#define QHSTA_M_WTM_TIMEOUT 0x41
+#define QHSTA_M_BAD_CMPL_STATUS_IN 0x42
+#define QHSTA_M_NO_AUTO_REQ_SENSE 0x43
+#define QHSTA_M_AUTO_REQ_SENSE_FAIL 0x44
+#define QHSTA_M_TARGET_STATUS_BUSY 0x45
+#define QHSTA_M_BAD_TAG_CODE 0x46
+
+#define QHSTA_M_BAD_QUEUE_FULL_OR_BUSY 0x47
+
+#define QHSTA_D_LRAM_CMP_ERROR 0x81
+#define QHSTA_M_MICRO_CODE_ERROR_HALT 0xA1
+
+#define ASC_FLAG_SCSIQ_REQ 0x01
+#define ASC_FLAG_BIOS_SCSIQ_REQ 0x02
+#define ASC_FLAG_BIOS_ASYNC_IO 0x04
+#define ASC_FLAG_SRB_LINEAR_ADDR 0x08
+
+#define ASC_FLAG_WIN16 0x10
+#define ASC_FLAG_WIN32 0x20
+
+#define ASC_FLAG_DOS_VM_CALLBACK 0x80
+
+#define ASC_TAG_FLAG_ADD_ONE_BYTE 0x10
+#define ASC_TAG_FLAG_ISAPNP_ADD_BYTES 0x40
+
+#define ASC_SCSIQ_CPY_BEG 4
+#define ASC_SCSIQ_SGHD_CPY_BEG 2
+
+#define ASC_SCSIQ_B_FWD 0
+#define ASC_SCSIQ_B_BWD 1
+
+#define ASC_SCSIQ_B_STATUS 2
+#define ASC_SCSIQ_B_QNO 3
+
+#define ASC_SCSIQ_B_CNTL 4
+#define ASC_SCSIQ_B_SG_QUEUE_CNT 5
+
+#define ASC_SCSIQ_D_DATA_ADDR 8
+#define ASC_SCSIQ_D_DATA_CNT 12
+#define ASC_SCSIQ_B_SENSE_LEN 20
+#define ASC_SCSIQ_DONE_INFO_BEG 22
+#define ASC_SCSIQ_D_SRBPTR 22
+#define ASC_SCSIQ_B_TARGET_IX 26
+#define ASC_SCSIQ_B_CDB_LEN 28
+#define ASC_SCSIQ_B_TAG_CODE 29
+#define ASC_SCSIQ_W_VM_ID 30
+#define ASC_SCSIQ_DONE_STATUS 32
+#define ASC_SCSIQ_HOST_STATUS 33
+#define ASC_SCSIQ_SCSI_STATUS 34
+#define ASC_SCSIQ_CDB_BEG 36
+#define ASC_SCSIQ_DW_REMAIN_XFER_ADDR 56
+#define ASC_SCSIQ_DW_REMAIN_XFER_CNT 60
+#define ASC_SCSIQ_B_SG_WK_QP 49
+#define ASC_SCSIQ_B_SG_WK_IX 50
+#define ASC_SCSIQ_W_REQ_COUNT 52
+#define ASC_SCSIQ_B_LIST_CNT 6
+#define ASC_SCSIQ_B_CUR_LIST_CNT 7
+
+#define ASC_SGQ_B_SG_CNTL 4
+#define ASC_SGQ_B_SG_HEAD_QP 5
+#define ASC_SGQ_B_SG_LIST_CNT 6
+#define ASC_SGQ_B_SG_CUR_LIST_CNT 7
+#define ASC_SGQ_LIST_BEG 8
+
+#define ASC_DEF_SCSI1_QNG 2
+#define ASC_MAX_SCSI1_QNG 2
+#define ASC_DEF_SCSI2_QNG 16
+#define ASC_MAX_SCSI2_QNG 32
+
+#define ASC_TAG_CODE_MASK 0x23
+
+#define ASC_STOP_REQ_RISC_STOP 0x01
+
+#define ASC_STOP_ACK_RISC_STOP 0x03
+
+#define ASC_STOP_CLEAN_UP_BUSY_Q 0x10
+#define ASC_STOP_CLEAN_UP_DISC_Q 0x20
+#define ASC_STOP_HOST_REQ_RISC_HALT 0x40
+#define ASC_STOP_SEND_INT_TO_HOST 0x80
+
+#define ASC_TIDLUN_TO_IX( tid, lun ) ( ASC_SCSI_TIX_TYPE )( (tid) + ((lun)<<ASC_SCSI_ID_BITS) )
+
+#define ASC_TID_TO_TARGET_ID( tid ) ( ASC_SCSI_BIT_ID_TYPE )( 0x01 << (tid) )
+#define ASC_TIX_TO_TARGET_ID( tix ) ( 0x01 << ( (tix) & ASC_MAX_TID ) )
+#define ASC_TIX_TO_TID( tix ) ( (tix) & ASC_MAX_TID )
+#define ASC_TID_TO_TIX( tid ) ( (tid) & ASC_MAX_TID )
+#define ASC_TIX_TO_LUN( tix ) ( ( (tix) >> ASC_SCSI_ID_BITS ) & ASC_MAX_LUN )
+
+#define ASC_QNO_TO_QADDR( q_no ) ( (ASC_QADR_BEG)+( ( int )(q_no) << 6 ) )
+
+typedef struct asc_scisq_1 {
+ uchar status;
+ uchar q_no;
+ uchar cntl;
+ uchar sg_queue_cnt;
+
+ uchar target_id;
+ uchar target_lun;
+
+ ulong data_addr;
+ ulong data_cnt;
+ ulong sense_addr;
+ uchar sense_len;
+ uchar user_def;
+} ASC_SCSIQ_1;
+
+typedef struct asc_scisq_2 {
+ ulong srb_ptr;
+ uchar target_ix;
+
+ uchar flag;
+ uchar cdb_len;
+ uchar tag_code;
+
+ ushort vm_id;
+} ASC_SCSIQ_2;
+
+typedef struct asc_scsiq_3 {
+ uchar done_stat;
+ uchar host_stat;
+ uchar scsi_stat;
+ uchar scsi_msg;
+} ASC_SCSIQ_3;
+
+typedef struct asc_scsiq_4 {
+ uchar cdb[ASC_MAX_CDB_LEN];
+ uchar y_first_sg_list_qp;
+ uchar y_working_sg_qp;
+ uchar y_working_sg_ix;
+ uchar y_cntl;
+ ushort x_req_count;
+ ushort x_reconnect_rtn;
+ ulong x_saved_data_addr;
+ ulong x_saved_data_cnt;
+} ASC_SCSIQ_4;
+
+typedef struct asc_q_done_info {
+ ASC_SCSIQ_2 d2;
+ ASC_SCSIQ_3 d3;
+ uchar q_status;
+ uchar q_no;
+ uchar cntl;
+ uchar sense_len;
+ uchar user_def;
+ uchar res;
+ ulong remain_bytes;
+} ASC_QDONE_INFO;
+
+typedef struct asc_sg_list {
+ ulong addr;
+ ulong bytes;
+} ASC_SG_LIST;
+
+typedef struct asc_sg_head {
+ uchar entry_cnt;
+
+ uchar queue_cnt;
+
+ uchar entry_to_copy;
+ uchar res;
+ ASC_SG_LIST sg_list[ASC_MAX_SG_LIST];
+} ASC_SG_HEAD;
+
+#define ASC_MIN_SG_LIST 2
+
+typedef struct asc_min_sg_head {
+ uchar entry_cnt;
+
+ uchar queue_cnt;
+
+ uchar entry_to_copy;
+ uchar res;
+ ASC_SG_LIST sg_list[ASC_MIN_SG_LIST];
+} ASC_MIN_SG_HEAD;
+
+#define QCX_SORT (0x0001)
+#define QCX_COALEASE (0x0002)
+
+#if CC_LINK_BUSY_Q
+typedef struct asc_ext_scsi_q {
+ ulong lba;
+ ushort lba_len;
+ struct asc_scsi_q dosfar *next;
+ struct asc_scsi_q dosfar *join;
+ ushort cntl;
+ ushort buffer_id;
+ uchar q_required;
+ uchar res;
+} ASC_EXT_SCSI_Q;
+
+#endif
+
+typedef struct asc_scsi_q {
+ ASC_SCSIQ_1 q1;
+ ASC_SCSIQ_2 q2;
+ uchar dosfar *cdbptr;
+
+ ASC_SG_HEAD dosfar *sg_head;
+
+#if CC_LINK_BUSY_Q
+ ASC_EXT_SCSI_Q ext;
+#endif
+
+} ASC_SCSI_Q;
+
+typedef struct asc_scsi_req_q {
+ ASC_SCSIQ_1 r1;
+ ASC_SCSIQ_2 r2;
+ uchar dosfar *cdbptr;
+ ASC_SG_HEAD dosfar *sg_head;
+
+#if CC_LINK_BUSY_Q
+ ASC_EXT_SCSI_Q ext;
+#endif
+
+ uchar dosfar *sense_ptr;
+
+ ASC_SCSIQ_3 r3;
+ uchar cdb[ASC_MAX_CDB_LEN];
+ uchar sense[ASC_MIN_SENSE_LEN];
+} ASC_SCSI_REQ_Q;
+
+typedef struct asc_risc_q {
+ uchar fwd;
+ uchar bwd;
+ ASC_SCSIQ_1 i1;
+ ASC_SCSIQ_2 i2;
+ ASC_SCSIQ_3 i3;
+ ASC_SCSIQ_4 i4;
+} ASC_RISC_Q;
+
+typedef struct asc_sg_list_q {
+
+ uchar seq_no;
+ uchar q_no;
+ uchar cntl;
+ uchar sg_head_qp;
+ uchar sg_list_cnt;
+ uchar sg_cur_list_cnt;
+
+} ASC_SG_LIST_Q;
+
+typedef struct asc_risc_sg_list_q {
+ uchar fwd;
+ uchar bwd;
+ ASC_SG_LIST_Q sg;
+ ASC_SG_LIST sg_list[7];
+} ASC_RISC_SG_LIST_Q;
+
+#define ASC_EXE_SCSI_IO_MAX_IDLE_LOOP 0x1000000UL
+#define ASC_EXE_SCSI_IO_MAX_WAIT_LOOP 1024
+
+#define ASCQ_ERR_NO_ERROR 0
+#define ASCQ_ERR_IO_NOT_FOUND 1
+#define ASCQ_ERR_LOCAL_MEM 2
+#define ASCQ_ERR_CHKSUM 3
+#define ASCQ_ERR_START_CHIP 4
+#define ASCQ_ERR_INT_TARGET_ID 5
+#define ASCQ_ERR_INT_LOCAL_MEM 6
+#define ASCQ_ERR_HALT_RISC 7
+#define ASCQ_ERR_GET_ASPI_ENTRY 8
+#define ASCQ_ERR_CLOSE_ASPI 9
+#define ASCQ_ERR_HOST_INQUIRY 0x0A
+#define ASCQ_ERR_SAVED_SRB_BAD 0x0B
+#define ASCQ_ERR_QCNTL_SG_LIST 0x0C
+#define ASCQ_ERR_Q_STATUS 0x0D
+#define ASCQ_ERR_WR_SCSIQ 0x0E
+#define ASCQ_ERR_PC_ADDR 0x0F
+#define ASCQ_ERR_SYN_OFFSET 0x10
+#define ASCQ_ERR_SYN_XFER_TIME 0x11
+#define ASCQ_ERR_LOCK_DMA 0x12
+#define ASCQ_ERR_UNLOCK_DMA 0x13
+#define ASCQ_ERR_VDS_CHK_INSTALL 0x14
+#define ASCQ_ERR_MICRO_CODE_HALT 0x15
+#define ASCQ_ERR_SET_LRAM_ADDR 0x16
+#define ASCQ_ERR_CUR_QNG 0x17
+#define ASCQ_ERR_SG_Q_LINKS 0x18
+#define ASCQ_ERR_SCSIQ_PTR 0x19
+#define ASCQ_ERR_ISR_RE_ENTRY 0x1A
+#define ASCQ_ERR_CRITICAL_RE_ENTRY 0x1B
+#define ASCQ_ERR_ISR_ON_CRITICAL 0x1C
+#define ASCQ_ERR_SG_LIST_ODD_ADDRESS 0x1D
+#define ASCQ_ERR_XFER_ADDRESS_TOO_BIG 0x1E
+#define ASCQ_ERR_SCSIQ_NULL_PTR 0x1F
+#define ASCQ_ERR_SCSIQ_BAD_NEXT_PTR 0x20
+#define ASCQ_ERR_GET_NUM_OF_FREE_Q 0x21
+#define ASCQ_ERR_SEND_SCSI_Q 0x22
+#define ASCQ_ERR_HOST_REQ_RISC_HALT 0x23
+#define ASCQ_ERR_RESET_SDTR 0x24
+
+#define ASC_WARN_NO_ERROR 0x0000
+#define ASC_WARN_IO_PORT_ROTATE 0x0001
+#define ASC_WARN_EEPROM_CHKSUM 0x0002
+#define ASC_WARN_IRQ_MODIFIED 0x0004
+#define ASC_WARN_AUTO_CONFIG 0x0008
+#define ASC_WARN_CMD_QNG_CONFLICT 0x0010
+
+#define ASC_WARN_EEPROM_RECOVER 0x0020
+#define ASC_WARN_CFG_MSW_RECOVER 0x0040
+
+#define ASC_IERR_WRITE_EEPROM 0x0001
+#define ASC_IERR_MCODE_CHKSUM 0x0002
+#define ASC_IERR_SET_PC_ADDR 0x0004
+#define ASC_IERR_START_STOP_CHIP 0x0008
+
+#define ASC_IERR_IRQ_NO 0x0010
+
+#define ASC_IERR_SET_IRQ_NO 0x0020
+#define ASC_IERR_CHIP_VERSION 0x0040
+#define ASC_IERR_SET_SCSI_ID 0x0080
+#define ASC_IERR_GET_PHY_ADDR 0x0100
+#define ASC_IERR_BAD_SIGNATURE 0x0200
+#define ASC_IERR_NO_BUS_TYPE 0x0400
+#define ASC_IERR_SCAM 0x0800
+#define ASC_IERR_SET_SDTR 0x1000
+#define ASC_IERR_RW_LRAM 0x8000
+
+#define ASC_DEF_IRQ_NO 10
+#define ASC_MAX_IRQ_NO 15
+#define ASC_MIN_IRQ_NO 10
+
+#define ASC_MIN_REMAIN_Q (0x02)
+#define ASC_DEF_MAX_TOTAL_QNG (0x40)
+
+#define ASC_MIN_TAG_Q_PER_DVC (0x04)
+#define ASC_DEF_TAG_Q_PER_DVC (0x04)
+
+#define ASC_MIN_FREE_Q ASC_MIN_REMAIN_Q
+
+#define ASC_MIN_TOTAL_QNG (( ASC_MAX_SG_QUEUE )+( ASC_MIN_FREE_Q ))
+
+#define ASC_MAX_TOTAL_QNG 240
+#define ASC_MAX_PCI_INRAM_TOTAL_QNG 20
+
+#define ASC_MAX_INRAM_TAG_QNG 16
+
+typedef struct asc_dvc_cfg {
+ ASC_SCSI_BIT_ID_TYPE can_tagged_qng;
+
+ ASC_SCSI_BIT_ID_TYPE cmd_qng_enabled;
+ ASC_SCSI_BIT_ID_TYPE disc_enable;
+ uchar res;
+ uchar chip_scsi_id:4;
+
+ uchar isa_dma_speed:4;
+
+ uchar isa_dma_channel;
+ uchar chip_version;
+ ushort pci_device_id;
+ ushort lib_serial_no;
+ ushort lib_version;
+ ushort mcode_date;
+ ushort mcode_version;
+ uchar sdtr_data[ASC_MAX_TID + 1];
+ uchar max_tag_qng[ASC_MAX_TID + 1];
+ uchar dosfar *overrun_buf;
+
+} ASC_DVC_CFG;
+
+#define ASC_DEF_DVC_CNTL 0xFFFF
+#define ASC_DEF_CHIP_SCSI_ID 7
+#define ASC_DEF_ISA_DMA_SPEED 4
+
+#define ASC_INIT_STATE_NULL 0x0000
+#define ASC_INIT_STATE_BEG_GET_CFG 0x0001
+#define ASC_INIT_STATE_END_GET_CFG 0x0002
+#define ASC_INIT_STATE_BEG_SET_CFG 0x0004
+#define ASC_INIT_STATE_END_SET_CFG 0x0008
+#define ASC_INIT_STATE_BEG_LOAD_MC 0x0010
+#define ASC_INIT_STATE_END_LOAD_MC 0x0020
+#define ASC_INIT_STATE_BEG_INQUIRY 0x0040
+#define ASC_INIT_STATE_END_INQUIRY 0x0080
+#define ASC_INIT_RESET_SCSI_DONE 0x0100
+
+#define ASC_PCI_DEVICE_ID_REV_A 0x1100
+#define ASC_PCI_DEVICE_ID_REV_B 0x1200
+
+#define ASC_BUG_FIX_ADD_ONE_BYTE 0x0001
+
+#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
+
+#define ASC_MIN_TAGGED_CMD 7
+
+typedef struct asc_dvc_var {
+ PortAddr iop_base;
+ ushort err_code;
+ ushort dvc_cntl;
+ ushort bug_fix_cntl;
+ ushort bus_type;
+ Ptr2Func isr_callback;
+ Ptr2Func exe_callback;
+
+ ASC_SCSI_BIT_ID_TYPE init_sdtr;
+
+ ASC_SCSI_BIT_ID_TYPE sdtr_done;
+
+ ASC_SCSI_BIT_ID_TYPE use_tagged_qng;
+
+ ASC_SCSI_BIT_ID_TYPE unit_not_ready;
+
+ ASC_SCSI_BIT_ID_TYPE queue_full_or_busy;
+
+ ASC_SCSI_BIT_ID_TYPE start_motor;
+ uchar scsi_reset_wait;
+ uchar chip_no;
+
+ char is_in_int;
+ uchar max_total_qng;
+
+ uchar cur_total_qng;
+
+ uchar in_critical_cnt;
+
+ uchar irq_no;
+ uchar last_q_shortage;
+
+ ushort init_state;
+ uchar cur_dvc_qng[ASC_MAX_TID + 1];
+ uchar max_dvc_qng[ASC_MAX_TID + 1];
+
+ ASC_SCSI_Q dosfar *scsiq_busy_head[ASC_MAX_TID + 1];
+ ASC_SCSI_Q dosfar *scsiq_busy_tail[ASC_MAX_TID + 1];
+
+ ulong int_count;
+ ulong req_count;
+ ulong busy_count;
+
+ ASC_DVC_CFG dosfar *cfg;
+ Ptr2Func saved_ptr2func;
+ ulong reserved2;
+ ulong reserved3;
+ ulong max_dma_count;
+ ASC_SCSI_BIT_ID_TYPE no_scam;
+ ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer;
+} ASC_DVC_VAR;
+
+typedef int (dosfar * ASC_ISR_CALLBACK) (ASC_DVC_VAR asc_ptr_type *, ASC_QDONE_INFO dosfar *);
+typedef int (dosfar * ASC_EXE_CALLBACK) (ASC_DVC_VAR asc_ptr_type *, ASC_SCSI_Q dosfar *);
+
+typedef struct asc_dvc_inq_info {
+ uchar type[ASC_MAX_TID + 1][ASC_MAX_LUN + 1];
+} ASC_DVC_INQ_INFO;
+
+typedef struct asc_cap_info {
+ ulong lba;
+ ulong blk_size;
+} ASC_CAP_INFO;
+
+typedef struct asc_cap_info_array {
+ ASC_CAP_INFO cap_info[ASC_MAX_TID + 1][ASC_MAX_LUN + 1];
+} ASC_CAP_INFO_ARRAY;
+
+#define ASC_IOADR_TABLE_MAX_IX 11
+#define ASC_IOADR_GAP 0x10
+#define ASC_SEARCH_IOP_GAP 0x10
+#define ASC_MIN_IOP_ADDR ( PortAddr )0x0100
+#define ASC_MAX_IOP_ADDR ( PortAddr )0x3F0
+
+#define ASC_IOADR_1 ( PortAddr )0x0110
+#define ASC_IOADR_2 ( PortAddr )0x0130
+#define ASC_IOADR_3 ( PortAddr )0x0150
+#define ASC_IOADR_4 ( PortAddr )0x0190
+#define ASC_IOADR_5 ( PortAddr )0x0210
+#define ASC_IOADR_6 ( PortAddr )0x0230
+#define ASC_IOADR_7 ( PortAddr )0x0250
+#define ASC_IOADR_8 ( PortAddr )0x0330
+#define ASC_IOADR_DEF ASC_IOADR_8
+
+#define ASC_SYN_XFER_NO 8
+#define ASC_MAX_SDTR_PERIOD_INDEX 7
+#define ASC_SYN_MAX_OFFSET 0x0F
+#define ASC_DEF_SDTR_OFFSET 0x0F
+#define ASC_DEF_SDTR_INDEX 0x00
+
+#define SYN_XFER_NS_0 25
+#define SYN_XFER_NS_1 30
+#define SYN_XFER_NS_2 35
+#define SYN_XFER_NS_3 40
+#define SYN_XFER_NS_4 50
+#define SYN_XFER_NS_5 60
+#define SYN_XFER_NS_6 70
+#define SYN_XFER_NS_7 85
+
+#define ASC_SDTR_PERIOD_IX_MIN 7
+
+#define SYN_XMSG_WLEN 3
+
+typedef struct sdtr_xmsg {
+ uchar msg_type;
+ uchar msg_len;
+ uchar msg_req;
+ uchar xfer_period;
+ uchar req_ack_offset;
+ uchar res;
+} SDTR_XMSG;
+
+#define ASC_MCNTL_NO_SEL_TIMEOUT ( ushort )0x0001
+#define ASC_MCNTL_NULL_TARGET ( ushort )0x0002
+
+#define ASC_CNTL_INITIATOR ( ushort )0x0001
+#define ASC_CNTL_BIOS_GT_1GB ( ushort )0x0002
+#define ASC_CNTL_BIOS_GT_2_DISK ( ushort )0x0004
+#define ASC_CNTL_BIOS_REMOVABLE ( ushort )0x0008
+#define ASC_CNTL_NO_SCAM ( ushort )0x0010
+#define ASC_CNTL_NO_PCI_FIX_ASYN_XFER ( ushort )0x0020
+
+#define ASC_CNTL_INT_MULTI_Q ( ushort )0x0080
+
+#define ASC_CNTL_NO_LUN_SUPPORT ( ushort )0x0040
+
+#define ASC_CNTL_NO_VERIFY_COPY ( ushort )0x0100
+#define ASC_CNTL_RESET_SCSI ( ushort )0x0200
+#define ASC_CNTL_INIT_INQUIRY ( ushort )0x0400
+#define ASC_CNTL_INIT_VERBOSE ( ushort )0x0800
+
+#define ASC_CNTL_SCSI_PARITY ( ushort )0x1000
+#define ASC_CNTL_BURST_MODE ( ushort )0x2000
+
+#define ASC_CNTL_USE_8_IOP_BASE ( ushort )0x4000
+
+#define ASC_EEP_DVC_CFG_BEG_VL 2
+#define ASC_EEP_MAX_DVC_ADDR_VL 15
+
+#define ASC_EEP_DVC_CFG_BEG 32
+#define ASC_EEP_MAX_DVC_ADDR 45
+
+#define ASC_EEP_DEFINED_WORDS 10
+#define ASC_EEP_MAX_ADDR 63
+#define ASC_EEP_RES_WORDS 0
+#define ASC_EEP_MAX_RETRY 20
+#define ASC_MAX_INIT_BUSY_RETRY 8
+
+#define ASC_EEP_ISA_PNP_WSIZE 16
+
+typedef struct asceep_config {
+ ushort cfg_lsw;
+ ushort cfg_msw;
+
+ uchar init_sdtr;
+ uchar disc_enable;
+
+ uchar use_cmd_qng;
+
+ uchar start_motor;
+ uchar max_total_qng;
+ uchar max_tag_qng;
+ uchar bios_scan;
+
+ uchar power_up_wait;
+
+ uchar no_scam;
+ uchar chip_scsi_id:4;
+
+ uchar isa_dma_speed:4;
+
+ uchar sdtr_data[ASC_MAX_TID + 1];
+
+ uchar adapter_info[6];
+
+ ushort cntl;
+
+ ushort chksum;
+} ASCEEP_CONFIG;
+
+#define ASC_EEP_CMD_READ 0x80
+#define ASC_EEP_CMD_WRITE 0x40
+#define ASC_EEP_CMD_WRITE_ABLE 0x30
+#define ASC_EEP_CMD_WRITE_DISABLE 0x00
+
+#define ASC_OVERRUN_BSIZE 0x00000048UL
+
+#define ASCV_MSGOUT_BEG 0x0000
+#define ASCV_MSGOUT_SDTR_PERIOD (ASCV_MSGOUT_BEG+3)
+#define ASCV_MSGOUT_SDTR_OFFSET (ASCV_MSGOUT_BEG+4)
+
+#define ASCV_MSGIN_BEG (ASCV_MSGOUT_BEG+8)
+#define ASCV_MSGIN_SDTR_PERIOD (ASCV_MSGIN_BEG+3)
+#define ASCV_MSGIN_SDTR_OFFSET (ASCV_MSGIN_BEG+4)
+
+#define ASCV_SDTR_DATA_BEG (ASCV_MSGIN_BEG+8)
+#define ASCV_SDTR_DONE_BEG (ASCV_SDTR_DATA_BEG+8)
+#define ASCV_MAX_DVC_QNG_BEG ( ushort )0x0020
+
+#define ASCV_ASCDVC_ERR_CODE_W ( ushort )0x0030
+#define ASCV_MCODE_CHKSUM_W ( ushort )0x0032
+#define ASCV_MCODE_SIZE_W ( ushort )0x0034
+#define ASCV_STOP_CODE_B ( ushort )0x0036
+#define ASCV_DVC_ERR_CODE_B ( ushort )0x0037
+
+#define ASCV_OVERRUN_PADDR_D ( ushort )0x0038
+#define ASCV_OVERRUN_BSIZE_D ( ushort )0x003C
+
+#define ASCV_HALTCODE_W ( ushort )0x0040
+#define ASCV_CHKSUM_W ( ushort )0x0042
+#define ASCV_MC_DATE_W ( ushort )0x0044
+#define ASCV_MC_VER_W ( ushort )0x0046
+#define ASCV_NEXTRDY_B ( ushort )0x0048
+#define ASCV_DONENEXT_B ( ushort )0x0049
+#define ASCV_USE_TAGGED_QNG_B ( ushort )0x004A
+#define ASCV_SCSIBUSY_B ( ushort )0x004B
+#define ASCV_CDBCNT_B ( ushort )0x004C
+#define ASCV_CURCDB_B ( ushort )0x004D
+#define ASCV_RCLUN_B ( ushort )0x004E
+#define ASCV_BUSY_QHEAD_B ( ushort )0x004F
+#define ASCV_DISC1_QHEAD_B ( ushort )0x0050
+
+#define ASCV_DISC_ENABLE_B ( ushort )0x0052
+#define ASCV_CAN_TAGGED_QNG_B ( ushort )0x0053
+#define ASCV_HOSTSCSI_ID_B ( ushort )0x0055
+#define ASCV_MCODE_CNTL_B ( ushort )0x0056
+#define ASCV_NULL_TARGET_B ( ushort )0x0057
+
+#define ASCV_FREE_Q_HEAD_W ( ushort )0x0058
+#define ASCV_DONE_Q_TAIL_W ( ushort )0x005A
+#define ASCV_FREE_Q_HEAD_B ( ushort )(ASCV_FREE_Q_HEAD_W+1)
+#define ASCV_DONE_Q_TAIL_B ( ushort )(ASCV_DONE_Q_TAIL_W+1)
+
+#define ASCV_HOST_FLAG_B ( ushort )0x005D
+
+#define ASCV_TOTAL_READY_Q_B ( ushort )0x0064
+#define ASCV_VER_SERIAL_B ( ushort )0x0065
+#define ASCV_HALTCODE_SAVED_W ( ushort )0x0066
+#define ASCV_WTM_FLAG_B ( ushort )0x0068
+#define ASCV_RISC_FLAG_B ( ushort )0x006A
+#define ASCV_REQ_SG_LIST_QP ( ushort )0x006B
+
+#define ASC_HOST_FLAG_IN_ISR 0x01
+#define ASC_HOST_FLAG_ACK_INT 0x02
+
+#define ASC_RISC_FLAG_GEN_INT 0x01
+#define ASC_RISC_FLAG_REQ_SG_LIST 0x02
+
+#define IOP_CTRL (0x0F)
+#define IOP_STATUS (0x0E)
+#define IOP_INT_ACK IOP_STATUS
+
+#define IOP_REG_IFC (0x0D)
+
+#define IOP_SYN_OFFSET (0x0B)
+#define IOP_REG_PC (0x0C)
+#define IOP_RAM_ADDR (0x0A)
+#define IOP_RAM_DATA (0x08)
+#define IOP_EEP_DATA (0x06)
+#define IOP_EEP_CMD (0x07)
+
+#define IOP_VERSION (0x03)
+#define IOP_CONFIG_HIGH (0x04)
+#define IOP_CONFIG_LOW (0x02)
+#define IOP_ASPI_ID_LOW (0x01)
+#define IOP_ASPI_ID_HIGH (0x00)
+
+#define IOP_REG_DC1 (0x0E)
+#define IOP_REG_DC0 (0x0C)
+#define IOP_REG_SB (0x0B)
+#define IOP_REG_DA1 (0x0A)
+#define IOP_REG_DA0 (0x08)
+#define IOP_REG_SC (0x09)
+#define IOP_DMA_SPEED (0x07)
+#define IOP_REG_FLAG (0x07)
+#define IOP_FIFO_H (0x06)
+#define IOP_FIFO_L (0x04)
+#define IOP_REG_ID (0x05)
+#define IOP_REG_QP (0x03)
+#define IOP_REG_IH (0x02)
+#define IOP_REG_IX (0x01)
+#define IOP_REG_AX (0x00)
+
+#define IFC_REG_LOCK (0x00)
+#define IFC_REG_UNLOCK (0x09)
+
+#define IFC_WR_EN_FILTER (0x10)
+#define IFC_RD_NO_EEPROM (0x10)
+#define IFC_SLEW_RATE (0x20)
+#define IFC_ACT_NEG (0x40)
+#define IFC_INP_FILTER (0x80)
+
+#define IFC_INIT_DEFAULT ( IFC_ACT_NEG | IFC_REG_UNLOCK )
+
+#define SC_SEL (0x80)
+#define SC_BSY (0x40)
+#define SC_ACK (0x20)
+#define SC_REQ (0x10)
+#define SC_ATN (0x08)
+#define SC_IO (0x04)
+#define SC_CD (0x02)
+#define SC_MSG (0x01)
+
+#define AscGetVarFreeQHead( port ) AscReadLramWord( port, ASCV_FREE_Q_HEAD_W )
+#define AscGetVarDoneQTail( port ) AscReadLramWord( port, ASCV_DONE_Q_TAIL_W )
+#define AscPutVarFreeQHead( port, val ) AscWriteLramWord( port, ASCV_FREE_Q_HEAD_W, val )
+#define AscPutVarDoneQTail( port, val ) AscWriteLramWord( port, ASCV_DONE_Q_TAIL_W, val )
+
+#define AscGetRiscVarFreeQHead( port ) AscReadLramByte( port, ASCV_NEXTRDY_B )
+#define AscGetRiscVarDoneQTail( port ) AscReadLramByte( port, ASCV_DONENEXT_B )
+#define AscPutRiscVarFreeQHead( port, val ) AscWriteLramByte( port, ASCV_NEXTRDY_B, val )
+#define AscPutRiscVarDoneQTail( port, val ) AscWriteLramByte( port, ASCV_DONENEXT_B, val )
+
+#define AscGetChipIFC( port ) inp( (port)+IOP_REG_IFC )
+#define AscPutChipIFC( port, data ) outp( (port)+IOP_REG_IFC, data )
+
+#define AscGetChipLramAddr( port ) ( ushort )inpw( ( PortAddr )((port)+IOP_RAM_ADDR) )
+#define AscSetChipLramAddr( port, addr ) outpw( ( PortAddr )( (port)+IOP_RAM_ADDR ), addr )
+#define AscPutChipLramData( port, data ) outpw( (port)+IOP_RAM_DATA, data )
+#define AscGetChipLramData( port ) inpw( (port)+IOP_RAM_DATA )
+
+#define AscWriteChipSyn( port, data ) outp( (port)+IOP_SYN_OFFSET, data )
+#define AscReadChipSyn( port ) inp( (port)+IOP_SYN_OFFSET )
+
+#define AscWriteChipIH( port, data ) outpw( (port)+IOP_REG_IH, data )
+#define AscReadChipIH( port ) inpw( (port)+IOP_REG_IH )
+
+#define AscWriteChipScsiID( port, data ) outp( (port)+IOP_REG_ID, data )
+#define AscReadChipScsiID( port ) inp( (port)+IOP_REG_ID )
+
+#define AscGetChipDmaSpeed( port ) ( uchar )inp( (port)+IOP_DMA_SPEED )
+#define AscSetChipDmaSpeed( port, data ) outp( (port)+IOP_DMA_SPEED, data )
+#define AscGetChipQP( port ) ( uchar )inp( (port)+IOP_REG_QP )
+#define AscSetPCAddr( port, data ) outpw( (port)+IOP_REG_PC, data )
+#define AscGetPCAddr( port ) inpw( (port)+IOP_REG_PC )
+#define AscGetChipVerNo( port ) ( uchar )inp( (port)+IOP_VERSION )
+
+#define AscGetChipEEPCmd( port ) ( uchar )inp( (port)+IOP_EEP_CMD )
+#define AscSetChipEEPCmd( port, data ) outp( (port)+IOP_EEP_CMD, data )
+#define AscGetChipEEPData( port ) inpw( (port)+IOP_EEP_DATA )
+#define AscSetChipEEPData( port, data ) outpw( (port)+IOP_EEP_DATA, data )
+
+#define AscGetChipControl( port ) ( uchar )inp( (port)+IOP_CTRL )
+#define AscSetChipControl( port, cc_val ) outp( (port)+IOP_CTRL, cc_val )
+
+#define AscGetChipStatus( port ) ( ASC_CS_TYPE )inpw( (port)+IOP_STATUS )
+#define AscSetChipStatus( port, cs_val ) outpw( (port)+IOP_STATUS, cs_val )
+
+#define AscGetChipCfgLsw( port ) ( ushort )inpw( (port)+IOP_CONFIG_LOW )
+#define AscGetChipCfgMsw( port ) ( ushort )inpw( (port)+IOP_CONFIG_HIGH )
+#define AscSetChipCfgLsw( port, data ) outpw( (port)+IOP_CONFIG_LOW, data )
+#define AscSetChipCfgMsw( port, data ) outpw( (port)+IOP_CONFIG_HIGH, data )
+
+#define AscIsIntPending( port ) ( AscGetChipStatus( port ) & CSW_INT_PENDING )
+#define AscGetChipScsiID( port ) ( ( AscGetChipCfgLsw( port ) >> 8 ) & ASC_MAX_TID )
+
+#define ASC_HALT_EXTMSG_IN ( ushort )0x8000
+#define ASC_HALT_CHK_CONDITION ( ushort )0x8100
+#define ASC_HALT_SS_QUEUE_FULL ( ushort )0x8200
+#define ASC_HALT_SDTR_REJECTED ( ushort )0x4000
+
+#define ASC_MAX_QNO 0xF8
+#define ASC_DATA_SEC_BEG ( ushort )0x0080
+#define ASC_DATA_SEC_END ( ushort )0x0080
+#define ASC_CODE_SEC_BEG ( ushort )0x0080
+#define ASC_CODE_SEC_END ( ushort )0x0080
+#define ASC_QADR_BEG (0x4000)
+#define ASC_QADR_USED ( ushort )( ASC_MAX_QNO * 64 )
+#define ASC_QADR_END ( ushort )0x7FFF
+#define ASC_QLAST_ADR ( ushort )0x7FC0
+#define ASC_QBLK_SIZE 0x40
+#define ASC_BIOS_DATA_QBEG 0xF8
+
+#define ASC_MIN_ACTIVE_QNO 0x01
+
+#define ASC_QLINK_END 0xFF
+#define ASC_EEPROM_WORDS 0x10
+#define ASC_MAX_MGS_LEN 0x10
+
+#define ASC_BIOS_ADDR_DEF 0xDC00
+#define ASC_BIOS_SIZE 0x3800
+#define ASC_BIOS_RAM_OFF 0x3800
+#define ASC_BIOS_RAM_SIZE 0x800
+#define ASC_BIOS_MIN_ADDR 0xC000
+#define ASC_BIOS_MAX_ADDR 0xEC00
+#define ASC_BIOS_BANK_SIZE 0x0400
+
+#define ASC_MCODE_START_ADDR 0x0080
+
+#define ASC_CFG0_HOST_INT_ON 0x0020
+#define ASC_CFG0_BIOS_ON 0x0040
+#define ASC_CFG0_VERA_BURST_ON 0x0080
+#define ASC_CFG0_SCSI_PARITY_ON 0x0800
+
+#define ASC_CFG1_SCSI_TARGET_ON 0x0080
+#define ASC_CFG1_LRAM_8BITS_ON 0x0800
+
+#define ASC_CFG_MSW_CLR_MASK 0xF0C0
+
+#define CSW_TEST1 ( ASC_CS_TYPE )0x8000
+#define CSW_AUTO_CONFIG ( ASC_CS_TYPE )0x4000
+#define CSW_RESERVED1 ( ASC_CS_TYPE )0x2000
+#define CSW_IRQ_WRITTEN ( ASC_CS_TYPE )0x1000
+#define CSW_33MHZ_SELECTED ( ASC_CS_TYPE )0x0800
+#define CSW_TEST2 ( ASC_CS_TYPE )0x0400
+#define CSW_TEST3 ( ASC_CS_TYPE )0x0200
+#define CSW_RESERVED2 ( ASC_CS_TYPE )0x0100
+#define CSW_DMA_DONE ( ASC_CS_TYPE )0x0080
+#define CSW_FIFO_RDY ( ASC_CS_TYPE )0x0040
+
+#define CSW_EEP_READ_DONE ( ASC_CS_TYPE )0x0020
+
+#define CSW_HALTED ( ASC_CS_TYPE )0x0010
+#define CSW_SCSI_RESET_ACTIVE ( ASC_CS_TYPE )0x0008
+
+#define CSW_PARITY_ERR ( ASC_CS_TYPE )0x0004
+#define CSW_SCSI_RESET_LATCH ( ASC_CS_TYPE )0x0002
+
+#define CSW_INT_PENDING ( ASC_CS_TYPE )0x0001
+
+#define CIW_INT_ACK ( ASC_CS_TYPE )0x0100
+#define CIW_TEST1 ( ASC_CS_TYPE )0x0200
+#define CIW_TEST2 ( ASC_CS_TYPE )0x0400
+#define CIW_SEL_33MHZ ( ASC_CS_TYPE )0x0800
+
+#define CIW_IRQ_ACT ( ASC_CS_TYPE )0x1000
+
+#define CC_CHIP_RESET ( uchar )0x80
+#define CC_SCSI_RESET ( uchar )0x40
+#define CC_HALT ( uchar )0x20
+#define CC_SINGLE_STEP ( uchar )0x10
+#define CC_DMA_ABLE ( uchar )0x08
+#define CC_TEST ( uchar )0x04
+#define CC_BANK_ONE ( uchar )0x02
+#define CC_DIAG ( uchar )0x01
+
+#define ASC_1000_ID0W 0x04C1
+#define ASC_1000_ID0W_FIX 0x00C1
+#define ASC_1000_ID1B 0x25
+
+#define ASC_EISA_BIG_IOP_GAP (0x1C30-0x0C50)
+#define ASC_EISA_SMALL_IOP_GAP (0x0020)
+#define ASC_EISA_MIN_IOP_ADDR (0x0C30)
+#define ASC_EISA_MAX_IOP_ADDR (0xFC50)
+#define ASC_EISA_REV_IOP_MASK (0x0C83)
+#define ASC_EISA_PID_IOP_MASK (0x0C80)
+#define ASC_EISA_CFG_IOP_MASK (0x0C86)
+
+#define ASC_GET_EISA_SLOT( iop ) ( PortAddr )( (iop) & 0xF000 )
+
+#define ASC_EISA_ID_740 0x01745004UL
+#define ASC_EISA_ID_750 0x01755004UL
+
+#define INS_HALTINT ( ushort )0x6281
+#define INS_HALT ( ushort )0x6280
+#define INS_SINT ( ushort )0x6200
+#define INS_RFLAG_WTM ( ushort )0x7380
+
+#define ASC_MC_SAVE_CODE_WSIZE 0x500
+#define ASC_MC_SAVE_DATA_WSIZE 0x40
+
+typedef struct asc_mc_saved {
+ ushort data[ASC_MC_SAVE_DATA_WSIZE];
+ ushort code[ASC_MC_SAVE_CODE_WSIZE];
+} ASC_MC_SAVED;
+
+int AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg);
+int AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg);
+void AscWaitEEPRead(void);
+void AscWaitEEPWrite(void);
+ushort AscReadEEPWord(PortAddr, uchar);
+ushort AscWriteEEPWord(PortAddr, uchar, ushort);
+ushort AscGetEEPConfig(PortAddr, ASCEEP_CONFIG dosfar *, ushort);
+int AscSetEEPConfigOnce(PortAddr, ASCEEP_CONFIG dosfar *, ushort);
+int AscSetEEPConfig(PortAddr, ASCEEP_CONFIG dosfar *, ushort);
+ushort AscEEPSum(PortAddr, uchar, uchar);
+
+int AscStartChip(PortAddr);
+int AscStopChip(PortAddr);
+void AscSetChipIH(PortAddr, ushort);
+int AscSetRunChipSynRegAtID(PortAddr, uchar, uchar);
+
+int AscIsChipHalted(PortAddr);
+
+void AscSetChipCfgDword(PortAddr, ulong);
+ulong AscGetChipCfgDword(PortAddr);
+
+void AscAckInterrupt(PortAddr);
+void AscDisableInterrupt(PortAddr);
+void AscEnableInterrupt(PortAddr);
+void AscSetBank(PortAddr, uchar);
+uchar AscGetBank(PortAddr);
+int AscResetChipAndScsiBus(PortAddr);
+ushort AscGetIsaDmaChannel(PortAddr);
+ushort AscSetIsaDmaChannel(PortAddr, ushort);
+uchar AscSetIsaDmaSpeed(PortAddr, uchar);
+uchar AscGetIsaDmaSpeed(PortAddr);
+
+uchar AscReadLramByte(PortAddr, ushort);
+ushort AscReadLramWord(PortAddr, ushort);
+ulong AscReadLramDWord(PortAddr, ushort);
+void AscWriteLramWord(PortAddr, ushort, ushort);
+void AscWriteLramDWord(PortAddr, ushort, ulong);
+void AscWriteLramByte(PortAddr, ushort, uchar);
+int AscVerWriteLramDWord(PortAddr, ushort, ulong);
+int AscVerWriteLramWord(PortAddr, ushort, ushort);
+int AscVerWriteLramByte(PortAddr, ushort, uchar);
+
+ulong AscMemSumLramWord(PortAddr, ushort, int);
+void AscMemWordSetLram(PortAddr, ushort, ushort, int);
+void AscMemWordCopyToLram(PortAddr, ushort, ushort dosfar *, int);
+void AscMemDWordCopyToLram(PortAddr, ushort, ulong dosfar *, int);
+void AscMemWordCopyFromLram(PortAddr, ushort, ushort dosfar *, int);
+int AscMemWordCmpToLram(PortAddr, ushort, ushort dosfar *, int);
+
+ushort AscInitAscDvcVar(ASC_DVC_VAR asc_ptr_type *);
+ulong AscLoadMicroCode(PortAddr, ushort,
+ ushort dosfar *, ushort);
+ushort AscInitFromEEP(ASC_DVC_VAR asc_ptr_type *);
+ushort AscInitFromAscDvcVar(ASC_DVC_VAR asc_ptr_type *);
+ushort AscInitMicroCodeVar(ASC_DVC_VAR asc_ptr_type * asc_dvc);
+
+void dosfar AscInitPollIsrCallBack(ASC_DVC_VAR asc_ptr_type *,
+ ASC_QDONE_INFO dosfar *);
+int AscTestExternalLram(ASC_DVC_VAR asc_ptr_type *);
+ushort AscTestLramEndian(PortAddr);
+
+uchar AscMsgOutSDTR(PortAddr, uchar, uchar);
+
+uchar AscCalSDTRData(uchar, uchar);
+void AscSetChipSDTR(PortAddr, uchar, uchar);
+int AscInitChipAllSynReg(ASC_DVC_VAR asc_ptr_type *, uchar);
+uchar AscGetSynPeriodIndex(uchar);
+uchar AscSynIndexToPeriod(uchar);
+uchar AscAllocFreeQueue(PortAddr, uchar);
+uchar AscAllocMultipleFreeQueue(PortAddr, uchar, uchar);
+int AscRiscHaltedAbortSRB(ASC_DVC_VAR asc_ptr_type *, ulong);
+int AscRiscHaltedAbortTIX(ASC_DVC_VAR asc_ptr_type *, uchar);
+int AscRiscHaltedAbortALL(ASC_DVC_VAR asc_ptr_type *);
+int AscHostReqRiscHalt(PortAddr);
+int AscStopQueueExe(PortAddr);
+int AscStartQueueExe(PortAddr);
+int AscCleanUpDiscQueue(PortAddr);
+int AscCleanUpBusyQueue(PortAddr);
+int _AscAbortTidBusyQueue(ASC_DVC_VAR asc_ptr_type *,
+ ASC_QDONE_INFO dosfar *, uchar);
+int _AscAbortSrbBusyQueue(ASC_DVC_VAR asc_ptr_type *,
+ ASC_QDONE_INFO dosfar *, ulong);
+int AscWaitTixISRDone(ASC_DVC_VAR asc_ptr_type *, uchar);
+int AscWaitISRDone(ASC_DVC_VAR asc_ptr_type *);
+ulong AscGetOnePhyAddr(ASC_DVC_VAR asc_ptr_type *, uchar dosfar *, ulong);
+
+int AscSendScsiQueue(ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_Q dosfar * scsiq,
+ uchar n_q_required);
+int AscPutReadyQueue(ASC_DVC_VAR asc_ptr_type *, ASC_SCSI_Q dosfar *, uchar);
+int AscPutReadySgListQueue(ASC_DVC_VAR asc_ptr_type *,
+ ASC_SCSI_Q dosfar *, uchar);
+int AscAbortScsiIO(ASC_DVC_VAR asc_ptr_type *, ASC_SCSI_Q dosfar *);
+void AscExeScsiIO(ASC_DVC_VAR asc_ptr_type *, ASC_SCSI_Q dosfar *);
+int AscSetChipSynRegAtID(PortAddr, uchar, uchar);
+int AscSetRunChipSynRegAtID(PortAddr, uchar, uchar);
+ushort AscInitLram(ASC_DVC_VAR asc_ptr_type *);
+int AscReInitLram(ASC_DVC_VAR asc_ptr_type *);
+ushort AscInitQLinkVar(ASC_DVC_VAR asc_ptr_type *);
+int AscSetLibErrorCode(ASC_DVC_VAR asc_ptr_type *, ushort);
+int _AscWaitQDone(PortAddr, ASC_SCSI_Q dosfar *);
+
+int AscEnterCritical(void);
+void AscLeaveCritical(int);
+
+int AscIsrChipHalted(ASC_DVC_VAR asc_ptr_type *);
+uchar _AscCopyLramScsiDoneQ(PortAddr, ushort,
+ ASC_QDONE_INFO dosfar *, ulong);
+int AscIsrQDone(ASC_DVC_VAR asc_ptr_type *);
+ushort AscIsrExeBusyQueue(ASC_DVC_VAR asc_ptr_type *, uchar);
+int AscScsiSetupCmdQ(ASC_DVC_VAR asc_ptr_type *, ASC_SCSI_REQ_Q dosfar *,
+ uchar dosfar *, ulong);
+
+int AscScsiInquiry(ASC_DVC_VAR asc_ptr_type *, ASC_SCSI_REQ_Q dosfar *,
+ uchar dosfar *, int);
+int AscScsiTestUnitReady(ASC_DVC_VAR asc_ptr_type *, ASC_SCSI_REQ_Q dosfar *);
+int AscScsiStartStopUnit(ASC_DVC_VAR asc_ptr_type *,
+ ASC_SCSI_REQ_Q dosfar *, uchar);
+int AscScsiReadCapacity(ASC_DVC_VAR asc_ptr_type *,
+ ASC_SCSI_REQ_Q dosfar *,
+ uchar dosfar *);
+
+ulong dosfar *swapfarbuf4(uchar dosfar *);
+int PollQueueDone(ASC_DVC_VAR asc_ptr_type *,
+ ASC_SCSI_REQ_Q dosfar *,
+ int);
+int PollScsiReadCapacity(ASC_DVC_VAR asc_ptr_type *,
+ ASC_SCSI_REQ_Q dosfar *,
+ ASC_CAP_INFO dosfar *);
+int PollScsiInquiry(ASC_DVC_VAR asc_ptr_type *, ASC_SCSI_REQ_Q dosfar *,
+ uchar dosfar *, int);
+int PollScsiTestUnitReady(ASC_DVC_VAR asc_ptr_type *,
+ ASC_SCSI_REQ_Q dosfar *);
+int PollScsiStartUnit(ASC_DVC_VAR asc_ptr_type *,
+ ASC_SCSI_REQ_Q dosfar *);
+int InitTestUnitReady(ASC_DVC_VAR asc_ptr_type *,
+ ASC_SCSI_REQ_Q dosfar *);
+void AscDispInquiry(uchar, uchar, ASC_SCSI_INQUIRY dosfar *);
+int AscPollQDone(ASC_DVC_VAR asc_ptr_type *,
+ ASC_SCSI_REQ_Q dosfar *, int);
+
+int AscSetBIOSBank(PortAddr, int, ushort);
+int AscSetVlBIOSBank(PortAddr, int);
+int AscSetEisaBIOSBank(PortAddr, int);
+int AscSetIsaBIOSBank(PortAddr, int);
+
+int AscIsBiosEnabled(PortAddr, ushort);
+void AscResetScsiBus(PortAddr);
+void AscClrResetScsiBus(PortAddr);
+
+void AscSingleStepChip(PortAddr);
+uchar AscSetChipScsiID(PortAddr, uchar);
+ushort AscGetChipBiosAddress(PortAddr, ushort);
+ushort AscSetChipBiosAddress(PortAddr, ushort, ushort);
+uchar AscGetChipVersion(PortAddr, ushort);
+ushort AscGetChipBusType(PortAddr);
+
+PortAddr AscSearchIOPortAddr11(PortAddr);
+PortAddr AscSearchIOPortAddr100(PortAddr);
+int AscFindSignature(PortAddr);
+void AscToggleIRQAct(PortAddr);
+int AscResetChip(PortAddr);
+void AscClrResetChip(PortAddr);
+
+short itos(ushort, uchar dosfar *, short, short);
+int insnchar(uchar dosfar *, short, short, ruchar, short);
+void itoh(ushort, ruchar dosfar *);
+void btoh(uchar, ruchar dosfar *);
+void ltoh(ulong, ruchar dosfar *);
+uchar dosfar *todstr(ushort, uchar dosfar *);
+uchar dosfar *tohstr(ushort, uchar dosfar *);
+uchar dosfar *tobhstr(uchar, uchar dosfar *);
+uchar dosfar *tolhstr(ulong, uchar dosfar *);
+
+void AscSetISAPNPWaitForKey(void);
+uchar AscGetChipIRQ(PortAddr, ushort);
+uchar AscSetChipIRQ(PortAddr, uchar, ushort);
+uchar AscGetChipScsiCtrl(PortAddr);
+
+ushort AscGetEisaChipCfg(PortAddr);
+ushort AscGetEisaChipGpReg(PortAddr);
+ushort AscSetEisaChipCfg(PortAddr, ushort);
+ushort AscSetEisaChipGpReg(PortAddr, ushort);
+
+ulong AscGetEisaProductID(PortAddr);
+PortAddr AscSearchIOPortAddrEISA(PortAddr);
+
+int AscPollQTailSync(PortAddr);
+int AscPollQHeadSync(PortAddr);
+int AscWaitQTailSync(PortAddr);
+
+int _AscRestoreMicroCode(PortAddr, ASC_MC_SAVED dosfar *);
+
+int AscSCAM(ASC_DVC_VAR asc_ptr_type *);
+
+ushort SwapByteOfWord(ushort word_val);
+ulong SwapWordOfDWord(ulong dword_val);
+ulong AdjEndianDword(ulong dword_val);
+
+int AscAdjEndianScsiQ(ASC_SCSI_Q dosfar *);
+int AscAdjEndianQDoneInfo(ASC_QDONE_INFO dosfar *);
+
+extern int DvcEnterCritical(void);
+extern void DvcLeaveCritical(int);
+
+extern void DvcInPortWords(PortAddr, ushort dosfar *, int);
+extern void DvcOutPortWords(PortAddr, ushort dosfar *, int);
+extern void DvcOutPortDWords(PortAddr, ulong dosfar *, int);
+
+extern void DvcSleepMilliSecond(ulong);
+extern void DvcDisplayString(uchar dosfar *);
+extern ulong DvcGetPhyAddr(uchar dosfar * buf_addr, ulong buf_len);
+extern ulong DvcGetSGList(ASC_DVC_VAR asc_ptr_type *, uchar dosfar *, ulong,
+ ASC_SG_HEAD dosfar *);
+
+extern void DvcSCAMDelayMS(ulong);
+extern int DvcDisableCPUInterrupt(void);
+extern void DvcRestoreCPUInterrupt(int);
+
+void DvcPutScsiQ(PortAddr, ushort, ushort dosfar *, int);
+void DvcGetQinfo(PortAddr, ushort, ushort dosfar *, int);
+
+PortAddr AscSearchIOPortAddr(PortAddr, ushort);
+ushort AscInitGetConfig(ASC_DVC_VAR asc_ptr_type *);
+ushort AscInitSetConfig(ASC_DVC_VAR asc_ptr_type *);
+ushort AscInitAsc1000Driver(ASC_DVC_VAR asc_ptr_type *);
+int AscInitScsiTarget(ASC_DVC_VAR asc_ptr_type *,
+ ASC_DVC_INQ_INFO dosfar *,
+ uchar dosfar *,
+ ASC_CAP_INFO_ARRAY dosfar *,
+ ushort);
+int AscInitPollBegin(ASC_DVC_VAR asc_ptr_type *);
+int AscInitPollEnd(ASC_DVC_VAR asc_ptr_type *);
+int AscInitPollTarget(ASC_DVC_VAR asc_ptr_type *,
+ ASC_SCSI_REQ_Q dosfar *,
+ ASC_SCSI_INQUIRY dosfar *,
+ ASC_CAP_INFO dosfar *);
+int AscExeScsiQueue(ASC_DVC_VAR asc_ptr_type *, ASC_SCSI_Q dosfar *);
+
+int AscISR(ASC_DVC_VAR asc_ptr_type *);
+void AscISR_AckInterrupt(ASC_DVC_VAR asc_ptr_type *);
+int AscISR_CheckQDone(ASC_DVC_VAR asc_ptr_type *,
+ ASC_QDONE_INFO dosfar *,
+ uchar dosfar *);
+
+int AscStartUnit(ASC_DVC_VAR asc_ptr_type *, ASC_SCSI_TIX_TYPE);
+int AscStopUnit(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_TIX_TYPE target_ix
+);
+
+uint AscGetNumOfFreeQueue(ASC_DVC_VAR asc_ptr_type *, uchar, uchar);
+int AscSgListToQueue(int);
+int AscQueueToSgList(int);
+int AscSetDvcErrorCode(ASC_DVC_VAR asc_ptr_type *, uchar);
+
+int AscAbortSRB(ASC_DVC_VAR asc_ptr_type *, ulong);
+int AscResetDevice(ASC_DVC_VAR asc_ptr_type *, uchar);
+int AscResetSB(ASC_DVC_VAR asc_ptr_type *);
+
+void AscEnableIsaDma(uchar);
+void AscDisableIsaDma(uchar);
+
+ulong AscGetMaxDmaAddress(ushort);
+ulong AscGetMaxDmaCount(ushort);
+
+int AscSaveMicroCode(ASC_DVC_VAR asc_ptr_type *, ASC_MC_SAVED dosfar *);
+int AscRestoreOldMicroCode(ASC_DVC_VAR asc_ptr_type *, ASC_MC_SAVED dosfar *);
+int AscRestoreNewMicroCode(ASC_DVC_VAR asc_ptr_type *, ASC_MC_SAVED dosfar *);
+
+/*
+ * --- Debugging Header
+ */
+
+#ifdef ADVANSYS_DEBUG
+#define STATIC
+#else /* ADVANSYS_DEBUG */
+#define STATIC static
+#endif /* ADVANSYS_DEBUG */
+
+
+/*
+ * --- Driver Constants and Macros
+ */
+
+#define ASC_NUM_BOARD_SUPPORTED 4
+#define ASC_NUM_BUS 4
+
+/* Reference Scsi_Host hostdata */
+#define ASC_BOARD(host) ((struct asc_board *) &(host)->hostdata)
+
+#define NO_ISA_DMA 0xff /* No ISA DMA Channel Used */
+
+#ifndef min
+#define min(a, b) (((a) < (b)) ? (a) : (b))
+#endif /* min */
+
+/* Asc Library return codes */
+#define ASC_TRUE 1
+#define ASC_FALSE 0
+#define ASC_NOERROR 1
+#define ASC_BUSY 0
+#define ASC_ERROR (-1)
+
+/* Scsi_Cmnd function return codes */
+#define STATUS_BYTE(byte) (byte)
+#define MSG_BYTE(byte) ((byte) << 8)
+#define HOST_BYTE(byte) ((byte) << 16)
+#define DRIVER_BYTE(byte) ((byte) << 24)
+
+/* asc_enqueue() flags */
+#define ASC_FRONT 1
+#define ASC_BACK 2
+
+/* PCI configuration declarations */
+
+#define ASC_PCI_REV_A_INIT 0x01
+#define ASC_PCI_REV_A_DONE 0x02
+#define ASC_PCI_REV_B_INIT 0x04
+#define ASC_PCI_REV_B_DONE 0x08
+
+#define PCI_BASE_CLASS_PREDEFINED 0x00
+#define PCI_BASE_CLASS_MASS_STORAGE 0x01
+#define PCI_BASE_CLASS_NETWORK 0x02
+#define PCI_BASE_CLASS_DISPLAY 0x03
+#define PCI_BASE_CLASS_MULTIMEDIA 0x04
+#define PCI_BASE_CLASS_MEMORY_CONTROLLER 0x05
+#define PCI_BASE_CLASS_BRIDGE_DEVICE 0x06
+
+/* MASS STORAGE */
+#define PCI_SUB_CLASS_SCSI_CONTROLLER 0x00
+#define PCI_SUB_CLASS_IDE_CONTROLLER 0x01
+#define PCI_SUB_CLASS_FLOPPY_DISK_CONTROLLER 0x02
+#define PCI_SUB_CLASS_IPI_BUS_CONTROLLER 0x03
+#define PCI_SUB_CLASS_OTHER_MASS_CONTROLLER 0x80
+
+/* NETWORK CONTROLLER */
+#define PCI_SUB_CLASS_ETHERNET_CONTROLLER 0x00
+#define PCI_SUB_CLASS_TOKEN_RING_CONTROLLER 0x01
+#define PCI_SUB_CLASS_FDDI_CONTROLLER 0x02
+#define PCI_SUB_CLASS_OTHER_NETWORK_CONTROLLER 0x80
+
+/* DISPLAY CONTROLLER */
+#define PCI_SUB_CLASS_VGA_CONTROLLER 0x00
+#define PCI_SUB_CLASS_XGA_CONTROLLER 0x01
+#define PCI_SUB_CLASS_OTHER_DISPLAY_CONTROLLER 0x80
+
+/* MULTIMEDIA CONTROLLER */
+#define PCI_SUB_CLASS_VIDEO_DEVICE 0x00
+#define PCI_SUB_CLASS_AUDIO_DEVICE 0x01
+#define PCI_SUB_CLASS_OTHER_MULTIMEDIA_DEVICE 0x80
+
+/* MEMORY CONTROLLER */
+#define PCI_SUB_CLASS_RAM_CONTROLLER 0x00
+#define PCI_SUB_CLASS_FLASH_CONTROLLER 0x01
+#define PCI_SUB_CLASS_OTHER_MEMORY_CONTROLLER 0x80
+
+/* BRIDGE CONTROLLER */
+#define PCI_SUB_CLASS_HOST_BRIDGE_CONTROLLER 0x00
+#define PCI_SUB_CLASS_ISA_BRIDGE_CONTROLLER 0x01
+#define PCI_SUB_CLASS_EISA_BRIDGE_CONTROLLER 0x02
+#define PCI_SUB_CLASS_MC_BRIDGE_CONTROLLER 0x03
+#define PCI_SUB_CLASS_PCI_TO_PCI_BRIDGE_CONTROLLER 0x04
+#define PCI_SUB_CLASS_PCMCIA_BRIDGE_CONTROLLER 0x05
+#define PCI_SUB_CLASS_OTHER_BRIDGE_CONTROLLER 0x80
+
+#define PCI_MAX_SLOT 0x1F
+#define PCI_MAX_BUS 0xFF
+#define ASC_PCI_VENDORID 0x10CD
+#define PCI_IOADDRESS_MASK 0xFFFE
+
+/* PCI IO Port Addresses to generate special cycle */
+
+#define PCI_CONFIG_ADDRESS_MECH1 0x0CF8
+#define PCI_CONFIG_DATA_MECH1 0x0CFC
+
+#define PCI_CONFIG_FORWARD_REGISTER 0x0CFA /* 0=type 0; 1=type 1; */
+
+#define PCI_CONFIG_BUS_NUMBER_MASK 0x00FF0000
+#define PCI_CONFIG_DEVICE_FUNCTION_MASK 0x0000FF00
+#define PCI_CONFIG_REGISTER_NUMBER_MASK 0x000000F8
+
+#define PCI_DEVICE_FOUND 0x0000
+#define PCI_DEVICE_NOT_FOUND 0xffff
+
+#define SUBCLASS_OFFSET 0x0A
+#define CLASSCODE_OFFSET 0x0B
+#define VENDORID_OFFSET 0x00
+#define DEVICEID_OFFSET 0x02
+
+/*
+ * --- Driver Macros
+ */
+
+#ifndef ADVANSYS_STATS
+#define ASC_STATS(counter)
+#define ASC_STATS_ADD(counter, count)
+#else /* ADVANSYS_STATS */
+#define ASC_STATS(counter) asc_stats.counter++
+#define ASC_STATS_ADD(counter, count) asc_stats.counter += (count)
+#endif /* ADVANSYS_STATS */
+
+#ifndef ADVANSYS_DEBUG
+
+#define ASC_DBG(lvl, s)
+#define ASC_DBG1(lvl, s, a1)
+#define ASC_DBG2(lvl, s, a1, a2)
+#define ASC_DBG3(lvl, s, a1, a2, a3)
+#define ASC_DBG4(lvl, s, a1, a2, a3, a4)
+#define ASC_DBG_PRT_SCSI_HOST(lvl, s)
+#define ASC_DBG_PRT_DVC_VAR(lvl, v)
+#define ASC_DBG_PRT_DVC_CFG(lvl, c)
+#define ASC_DBG_PRT_SCSI_Q(lvl, scsiqp)
+#define ASC_DBG_PRT_QDONE_INFO(lvl, qdone)
+#define ASC_DBG_PRT_HEX(lvl, name, start, length)
+#define ASC_DBG_PRT_CDB(lvl, cdb, len)
+#define ASC_DBG_PRT_SENSE(lvl, sense, len)
+#define ASC_DBG_PRT_INQUIRY(lvl, inq, len)
+#define ASC_ASSERT(a)
+
+#else /* ADVANSYS_DEBUG */
+
+/*
+ * Debugging Message Levels:
+ * 0: Errors Only
+ * 1: High-Level Tracing
+ * 2-N: Verbose Tracing
+ */
+
+#define ASC_DBG(lvl, s) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ printk(s); \
+ } \
+ }
+
+#define ASC_DBG1(lvl, s, a1) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ printk((s), (a1)); \
+ } \
+ }
+
+#define ASC_DBG2(lvl, s, a1, a2) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ printk((s), (a1), (a2)); \
+ } \
+ }
+
+#define ASC_DBG3(lvl, s, a1, a2, a3) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ printk((s), (a1), (a2), (a3)); \
+ } \
+ }
+
+#define ASC_DBG4(lvl, s, a1, a2, a3, a4) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ printk((s), (a1), (a2), (a3), (a4)); \
+ } \
+ }
+
+#define ASC_DBG_PRT_SCSI_HOST(lvl, s) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_scsi_host(s); \
+ } \
+ }
+
+#define ASC_DBG_PRT_DVC_VAR(lvl, v) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_dvc_var(v); \
+ } \
+ }
+
+#define ASC_DBG_PRT_DVC_CFG(lvl, c) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_dvc_cfg(c); \
+ } \
+ }
+
+#define ASC_DBG_PRT_SCSI_Q(lvl, scsiqp) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_scsi_q(scsiqp); \
+ } \
+ }
+
+#define ASC_DBG_PRT_QDONE_INFO(lvl, qdone) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_qdone_info(qdone); \
+ } \
+ }
+
+#define ASC_DBG_PRT_HEX(lvl, name, start, length) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_hex((name), (start), (length)); \
+ } \
+ }
+
+#define ASC_DBG_PRT_CDB(lvl, cdb, len) \
+ ASC_DBG_PRT_HEX((lvl), "CDB", (uchar *) (cdb), (len));
+
+#define ASC_DBG_PRT_SENSE(lvl, sense, len) \
+ ASC_DBG_PRT_HEX((lvl), "SENSE", (uchar *) (sense), (len));
+
+#define ASC_DBG_PRT_INQUIRY(lvl, inq, len) \
+ ASC_DBG_PRT_HEX((lvl), "INQUIRY", (uchar *) (inq), (len));
+
+#define ASC_ASSERT(a) \
+ { \
+ if (!(a)) { \
+ printk("ASC_ASSERT() Failure: file %s, line %d\n", \
+ __FILE__, __LINE__); \
+ } \
+ }
+#endif /* ADVANSYS_DEBUG */
+
+
+/*
+ * --- Driver Structures
+ */
+
+/*
+ * Structure allocated for each board.
+ *
+ * This structure is allocated by scsi_register() at the end
+ * of the 'Scsi_Host' structure starting at the 'hostdata'
+ * field. It is guaranteed to be allocated from DMA-able memory.
+ */
+struct asc_board {
+ /* Asc Library */
+ ASC_DVC_VAR board; /* Board configuration */
+ ASC_DVC_CFG cfg; /* Device configuration */
+ uchar overrun_buf[ASC_OVERRUN_BSIZE];
+ /* Queued Commands */
+ ASC_SCSI_BIT_ID_TYPE pending_tidmask; /* Pending command mask */
+ Scsi_Cmnd *pending[ASC_MAX_TID];
+ /* Target Initialization */
+ ASC_SCSI_BIT_ID_TYPE init_tidmask; /* Target initialized mask */
+ ASC_SCSI_REQ_Q scsireqq;
+ ASC_CAP_INFO cap_info;
+ ASC_SCSI_INQUIRY inquiry;
+};
+
+/*
+ * PCI configuration structures
+ */
+typedef struct _PCI_DATA_
+{
+ uchar type;
+ uchar bus;
+ uchar slot;
+ uchar func;
+ uchar offset;
+} PCI_DATA;
+
+typedef struct _PCI_DEVICE_
+{
+ ushort vendorID;
+ ushort deviceID;
+ ushort slotNumber;
+ ushort slotFound;
+ uchar busNumber;
+ uchar maxBusNumber;
+ uchar devFunc;
+ ushort startSlot;
+ ushort endSlot;
+ uchar bridge;
+ uchar type;
+} PCI_DEVICE;
+
+typedef struct _PCI_CONFIG_SPACE_
+{
+ ushort vendorID;
+ ushort deviceID;
+ ushort command;
+ ushort status;
+ uchar revision;
+ uchar classCode[3];
+ uchar cacheSize;
+ uchar latencyTimer;
+ uchar headerType;
+ uchar bist;
+ ulong baseAddress[6];
+ ushort reserved[4];
+ ulong optionRomAddr;
+ ushort reserved2[4];
+ uchar irqLine;
+ uchar irqPin;
+ uchar minGnt;
+ uchar maxLatency;
+} PCI_CONFIG_SPACE;
+
+#ifdef ADVANSYS_STATS
+struct asc_stats {
+ ulong command; /* # calls to advansys_command() */
+ ulong queuecommand; /* # calls to advansys_queuecommand() */
+ ulong abort; /* # calls to advansys_abort() */
+ ulong reset; /* # calls to advansys_reset() */
+ ulong biosparam; /* # calls to advansys_biosparam() */
+ ulong interrupt; /* # calls to advansys_interrupt() */
+ ulong callback; /* # calls asc_isr_callback() */
+ ulong cont_cnt; /* # non-scatter-gather I/O requests received */
+ ulong cont_xfer; /* contiguous transfer total (512 byte units) */
+ ulong sg_cnt; /* # scatter-gather I/O requests received */
+ ulong sg_elem; /* scatter-gather element total */
+ ulong sg_xfer; /* scatter-gather tranfer total (512 byte units) */
+ ulong error; /* # AscExeScsiQueue() ASC_ERROR returns. */
+ /*
+ * Number of times interrupts disabled in advansys_queuecommand() and
+ * asc_isr_callback(), respectively. For the former indicates how many
+ * times commands were pending when a new command was received.
+ */
+ ulong cmd_disable;
+ ulong intr_disable;
+ /*
+ * Number of times asc_enqueue() called. Indicates how many ASC_BUSY
+ * returns have occurred.
+ */
+ ulong enqueue;
+ ulong dequeue; /* # calls to asc_dequeue(). */
+ /*
+ * Number of times asc_rmqueue() called and the specified command
+ * was found and removed.
+ */
+ ulong rmqueue;
+} asc_stats;
+#endif /* ADVANSYS_STATS */
+
+
+/*
+ * --- Driver Data
+ */
+
+#ifdef LINUX_1_3
+struct proc_dir_entry proc_scsi_advansys =
+{
+ PROC_SCSI_ADVANSYS, /* unsigned short low_ino */
+ 8, /* unsigned short namelen */
+ "advansys", /* const char *name */
+ S_IFDIR | S_IRUGO | S_IXUGO, /* mode_t mode */
+ 2 /* nlink_t nlink */
+};
+#endif /* LINUX_1_3 */
+
+STATIC int asc_board_count; /* Number of boards detected in system. */
+STATIC struct Scsi_Host *asc_host[ASC_NUM_BOARD_SUPPORTED];
+STATIC Scsi_Cmnd *asc_scsi_done; /* Commands needing done function call. */
+
+STATIC ushort asc_bus[ASC_NUM_BUS] = {
+ ASC_IS_ISA,
+ ASC_IS_VL,
+ ASC_IS_EISA,
+ ASC_IS_PCI,
+};
+
+/*
+ * Used with the LILO 'advansys' option to eliminate or
+ * limit I/O port probing at boot time, cf. advansys_setup().
+ */
+int asc_iopflag = ASC_FALSE;
+int asc_ioport[ASC_NUM_BOARD_SUPPORTED] = { 0, 0, 0, 0 };
+
+#ifdef ADVANSYS_DEBUG
+char *
+asc_bus_name[ASC_NUM_BUS] = {
+ "ASC_IS_ISA",
+ "ASC_IS_VL",
+ "ASC_IS_EISA",
+ "ASC_IS_PCI",
+};
+
+int asc_dbglvl = 0;
+#endif /* ADVANSYS_DEBUG */
+
+
+/*
+ * --- Driver Function Prototypes
+ *
+ * advansys.h contains function prototypes for functions global to Linux.
+ */
+
+#ifdef LINUX_1_3
+STATIC int asc_proc_copy(off_t, off_t, char *, int , char *, int);
+#endif /* LINUX_1_3 */
+STATIC void advansys_interrupt(int, struct pt_regs *);
+STATIC void advansys_command_done(Scsi_Cmnd *);
+STATIC int asc_execute_scsi_cmnd(Scsi_Cmnd *);
+STATIC void asc_isr_callback(ASC_DVC_VAR *, ASC_QDONE_INFO *);
+STATIC void asc_execute_pending(struct Scsi_Host *);
+STATIC int asc_init_dev(ASC_DVC_VAR *, Scsi_Cmnd *);
+STATIC int asc_srch_pci_dev(PCI_DEVICE *);
+STATIC uchar asc_scan_method(PCI_DEVICE *);
+STATIC int asc_pci_find_dev(PCI_DEVICE *);
+STATIC void asc_get_pci_cfg(PCI_DEVICE *, PCI_CONFIG_SPACE *);
+STATIC ushort asc_get_cfg_word(PCI_DATA *);
+STATIC uchar asc_get_cfg_byte(PCI_DATA *);
+STATIC void asc_enqueue(struct Scsi_Host *, Scsi_Cmnd *, int, int);
+STATIC Scsi_Cmnd *asc_dequeue(struct Scsi_Host *, int);
+STATIC int asc_rmqueue(struct Scsi_Host *, Scsi_Cmnd *, int);
+
+/* XXX - Asc Library Routines not supposed to be used directly */
+ushort AscGetChipBiosAddress(PortAddr, ushort);
+int AscFindSignature(PortAddr);
+
+#ifdef ADVANSYS_STATS
+STATIC int asc_prt_stats(char *, int);
+STATIC int asc_prt_stats_line(char *, int, char *fmt, ...);
+#endif /* ADVANSYS_STATS */
+#ifdef ADVANSYS_DEBUG
+STATIC void asc_prt_scsi_host(struct Scsi_Host *);
+STATIC void asc_prt_dvc_cfg(ASC_DVC_CFG *);
+STATIC void asc_prt_dvc_var(ASC_DVC_VAR *);
+STATIC void asc_prt_scsi_q(ASC_SCSI_Q *);
+STATIC void asc_prt_qdone_info(ASC_QDONE_INFO *);
+STATIC void asc_prt_hex(char *f, uchar *, int);
+STATIC int interrupts_enabled(void);
+#endif /* ADVANSYS_DEBUG */
+
+
+/*
+ * --- Linux 'Scsi_Host_Template' and advansys_setup() Functions
+ */
+
+#ifdef LINUX_1_3
+/*
+ * advansys_proc_info() - /proc/scsi/advansys/[0-ASC_NUM_BOARD_SUPPORTED]
+ *
+ * *buffer: I/O buffer
+ * **start: if inout == FALSE pointer into buffer where user read should start
+ * offset: current offset into /proc/scsi/advansys file
+ * length: length of buffer
+ * hostno: Scsi_Host host_no
+ * inout: TRUE - user is writing; FALSE - user is reading
+ *
+ * Return the number of bytes read from or written to
+ * /proc/scsi/advansys file.
+ */
+int
+advansys_proc_info(char *buffer, char **start, off_t offset, int length,
+ int hostno, int inout)
+{
+ struct Scsi_Host *shp;
+ int i;
+ char *cp;
+ int cplen;
+ int cnt;
+ int totcnt;
+ int leftlen;
+ char *curbuf;
+ off_t advoffset;
+ Scsi_Device *scd;
+ char prtbuf[480]; /* 6 lines */
+
+ ASC_DBG(1, "advansys_proc_info: begin\n");
+
+ /*
+ * User write not supported.
+ */
+ if (inout == TRUE) {
+ return(-ENOSYS);
+ }
+
+ /*
+ * User read of /proc/scsi/advansys file.
+ */
+
+ /* Find the specified board. */
+ for (i = 0; i < asc_board_count; i++) {
+ if (asc_host[i]->host_no == hostno) {
+ break;
+ }
+ }
+ if (i == asc_board_count) {
+ return(-ENOENT);
+ }
+ shp = asc_host[i];
+
+ /* Always copy read data to the beginning of the buffer. */
+ *start = buffer;
+
+ curbuf = buffer;
+ advoffset = 0;
+ totcnt = 0;
+ leftlen = length;
+
+ /* Get board information. */
+ cp = (char *) advansys_info(shp);
+ strcat(cp, "\n");
+ cplen = strlen(cp);
+
+ /* Copy board information. */
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+
+ /*
+ * Get and copy information for each device attached to the board.
+ */
+ cp = &prtbuf[0];
+ sprintf(cp, "\nDevices attached to SCSI Host %d:\n", shp->host_no);
+ cplen = strlen(cp);
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+
+ cp = &prtbuf[0];
+ for (scd = scsi_devices; scd; scd = scd->next) {
+ if (scd->host == shp) {
+ proc_print_scsidevice(scd, cp, &cplen, 0);
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+ }
+ }
+
+#ifdef ADVANSYS_STATS
+ /*
+ * prtbuf[] has about 6 lines worth of space. If the statistics ever
+ * get longer than 6 lines, prtbuf[] should be increased in size. If
+ * prtbuf[] is too small it will not be overwritten. Instead the user
+ * just won't get all of the available statistics.
+ */
+ cp = &prtbuf[0];
+ cplen = asc_prt_stats(cp, sizeof(prtbuf));
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+#endif /* ADVANSYS_STATS */
+
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+
+ return totcnt;
+}
+#endif /* LINUX_1_3 */
+
+
+/*
+ * advansys_detect()
+ *
+ * Detect function for AdvanSys adapters.
+ *
+ * Argument is a pointer to the host driver's scsi_hosts entry.
+ *
+ * Return number of adapters found.
+ *
+ * Note: Because this function is called during system initialization
+ * it must not call SCSI mid-level functions including scsi_malloc()
+ * and scsi_free().
+ */
+int
+advansys_detect(Scsi_Host_Template *tpnt)
+{
+ static int detect_called = ASC_FALSE;
+ int iop;
+ int bus;
+ struct Scsi_Host *shp;
+ ASC_DVC_VAR *boardp;
+ int ioport = 0;
+ PCI_DEVICE pciDevice;
+ PCI_CONFIG_SPACE pciConfig;
+ int ret;
+ extern PortAddr _asc_def_iop_base[ASC_IOADR_TABLE_MAX_IX];
+
+
+ if (detect_called == ASC_FALSE) {
+ detect_called = ASC_TRUE;
+ } else {
+ printk("AdvanSys SCSI: advansys_detect() mulitple calls ignored\n");
+ return 0;
+ }
+
+ ASC_DBG(1, "advansys_detect: begin\n");
+
+#ifdef LINUX_1_3
+ tpnt->proc_dir = &proc_scsi_advansys;
+#endif /* LINUX_1_3 */
+
+#ifdef ADVANSYS_STATS
+ memset(&asc_stats, 0, sizeof(asc_stats));
+#endif /* ADVANSYS_STATS */
+
+ asc_board_count = 0;
+
+ /*
+ * If I/O port probing has been modified, then verify and
+ * clean-up the 'asc_ioport' list.
+ */
+ if (asc_iopflag == ASC_TRUE) {
+ for (ioport = 0; ioport < ASC_NUM_BOARD_SUPPORTED; ioport++) {
+ ASC_DBG2(1, "asdvansys_detect: asc_ioport[%d] %x\n",
+ ioport, asc_ioport[ioport]);
+ if (asc_ioport[ioport] != 0) {
+ for (iop = 0; iop < ASC_IOADR_TABLE_MAX_IX; iop++) {
+ if (_asc_def_iop_base[iop] == asc_ioport[ioport]) {
+ break;
+ }
+ }
+ if (iop == ASC_IOADR_TABLE_MAX_IX) {
+ printk("AdvanSys SCSI: specified I/O Port 0x%X is invalid\n",
+ asc_ioport[ioport]);
+ asc_ioport[ioport] = 0;
+ }
+ }
+ }
+ ioport = 0;
+ }
+
+ memset(&pciDevice, 0, sizeof(PCI_DEVICE));
+ memset(&pciConfig, 0, sizeof(PCI_CONFIG_SPACE));
+ pciDevice.maxBusNumber = PCI_MAX_BUS;
+ pciDevice.endSlot = PCI_MAX_SLOT;
+
+ for (bus = 0; bus < ASC_NUM_BUS; bus++) {
+
+ ASC_DBG2(1, "advansys_detect: bus search type %d (%s)\n",
+ bus, asc_bus_name[bus]);
+ iop = 0;
+
+ while (asc_board_count < ASC_NUM_BOARD_SUPPORTED) {
+
+ ASC_DBG1(2, "advansys_detect: asc_board_count %d\n",
+ asc_board_count);
+
+ switch (asc_bus[bus]) {
+ case ASC_IS_ISA:
+ case ASC_IS_VL:
+ if (asc_iopflag == ASC_FALSE) {
+ iop = AscSearchIOPortAddr(iop, asc_bus[bus]);
+ } else {
+ /*
+ * ISA and VL I/O port scanning has either been
+ * eliminated or limited to selected ports on
+ * the LILO command line, /etc/lilo.conf, or
+ * by setting variables when the module was loaded.
+ */
+ ASC_DBG(1, "advansys_detect: I/O port scanning modified\n");
+ ioport_try_again:
+ iop = 0;
+ for (; ioport < ASC_NUM_BOARD_SUPPORTED; ioport++) {
+ if ((iop = asc_ioport[ioport]) != 0) {
+ break;
+ }
+ }
+ if (iop) {
+ ASC_DBG1(1, "advansys_detect: probing I/O port %x...\n",
+ iop);
+ if (check_region(iop, ASC_IOADR_GAP) != 0) {
+ printk("AdvanSys SCSI: specified I/O Port 0x%X is busy\n", iop);
+ /* Don't try this I/O port twice. */
+ asc_ioport[ioport] = 0;
+ goto ioport_try_again;
+ } else if (AscFindSignature(iop) == ASC_FALSE) {
+ printk("AdvanSys SCSI: specified I/O Port 0x%X has no adapter\n", iop);
+ /* Don't try this I/O port twice. */
+ asc_ioport[ioport] = 0;
+ goto ioport_try_again;
+ } else {
+ /*
+ * If this isn't an ISA board, then it must be
+ * a VL board. If currently looking an ISA
+ * board is being looked for then try for
+ * another ISA board in 'asc_ioport'.
+ */
+ if (asc_bus[bus] == ASC_IS_ISA &&
+ (AscGetChipVersion(iop, ASC_IS_ISA) &
+ ASC_CHIP_VER_ISA_BIT) == 0) {
+ /*
+ * Don't clear 'asc_ioport[ioport]'. Try
+ * this board again for VL. Increment
+ * 'ioport' past this board.
+ */
+ ioport++;
+ goto ioport_try_again;
+ }
+ }
+ /*
+ * This board appears good, don't try the I/O port
+ * again by clearing its value. Increment 'ioport'
+ * for the next iteration.
+ */
+ asc_ioport[ioport++] = 0;
+ }
+ }
+ break;
+
+ case ASC_IS_EISA:
+ iop = AscSearchIOPortAddr(iop, asc_bus[bus]);
+ break;
+
+ case ASC_IS_PCI:
+ if (asc_srch_pci_dev(&pciDevice) != PCI_DEVICE_FOUND) {
+ iop = 0;
+ } else {
+ ASC_DBG2(2,
+ "advansys_detect: slotFound %d, busNumber %d\n",
+ pciDevice.slotFound, pciDevice.busNumber);
+ asc_get_pci_cfg(&pciDevice, &pciConfig);
+ iop = pciConfig.baseAddress[0] & PCI_IOADDRESS_MASK;
+ ASC_DBG2(2, "advansys_detect: iop %x, irqLine %d\n",
+ iop, pciConfig.irqLine);
+ }
+ break;
+
+ default:
+ ASC_DBG(0, "advansys_detect: unknown bus type\n");
+ break;
+ }
+ ASC_DBG1(1, "advansys_detect: iop %x\n", iop);
+
+ /*
+ * Adapter not found, try next bus type.
+ */
+ if (iop == 0) {
+ break;
+ }
+
+ /*
+ * Adapter found.
+ *
+ * Register the adapter, get its configuration, and
+ * initialize it.
+ */
+ ASC_DBG(2, "advansys_detect: scsi_register()\n");
+ shp = scsi_register(tpnt, sizeof(struct asc_board));
+
+ /* Save a pointer to the Scsi_host of each found board. */
+ asc_host[asc_board_count++] = shp;
+
+ /* Initialize private per board data */
+ memset(ASC_BOARD(shp), 0, sizeof(struct asc_board));
+ boardp = &ASC_BOARD(shp)->board;
+ boardp->cfg = &ASC_BOARD(shp)->cfg;
+ boardp->cfg->overrun_buf = &ASC_BOARD(shp)->overrun_buf[0];
+ boardp->iop_base = iop;
+
+ /*
+ * Set the board bus type and PCI IRQ for AscInitGetConfig().
+ */
+ boardp->bus_type = asc_bus[bus];
+ switch (boardp->bus_type) {
+ case ASC_IS_ISA:
+ shp->unchecked_isa_dma = TRUE;
+ break;
+ case ASC_IS_EISA:
+ shp->unchecked_isa_dma = FALSE;
+ break;
+ case ASC_IS_VL:
+ shp->unchecked_isa_dma = FALSE;
+ break;
+ case ASC_IS_PCI:
+ shp->irq = boardp->irq_no = pciConfig.irqLine;
+ boardp->cfg->pci_device_id = pciConfig.deviceID;
+ shp->unchecked_isa_dma = FALSE;
+ break;
+ default:
+ ASC_DBG(0, "advansys_detect: unknown adapter type");
+ shp->unchecked_isa_dma = TRUE;
+ break;
+ }
+
+ /*
+ * Get the board configuration. AscInitGetConfig() may change
+ * the board's bus_type value. The asc_bus[bus] value should no
+ * longer be used.
+ */
+ ASC_DBG(2, "advansys_detect: AscInitGetConfig()\n");
+ switch(ret = AscInitGetConfig(boardp)) {
+ case 0: /* No error */
+ break;
+ case ASC_WARN_IO_PORT_ROTATE:
+ ASC_DBG(0, "AscInitGetConfig: I/O port address modified\n");
+ break;
+ case ASC_WARN_EEPROM_CHKSUM:
+ ASC_DBG(0, "AscInitGetConfig: EEPROM checksum error\n");
+ break;
+ case ASC_WARN_IRQ_MODIFIED:
+ ASC_DBG(0, "AscInitGetConfig: IRQ modified\n");
+ break;
+ case ASC_WARN_CMD_QNG_CONFLICT:
+ ASC_DBG(0,
+ "AscInitGetConfig: Tag queuing enabled w/o disconnects\n");
+ break;
+ default:
+ ASC_DBG1(0, "AscInitGetConfig: Unknown warning: %x\n", ret);
+ break;
+ }
+ if (boardp->err_code != 0) {
+ ASC_DBG2(0,
+ "AscInitGetConfig: error: init_state %x, err_code %x\n",
+ boardp->init_state, boardp->err_code);
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+
+ /*
+ * Modify board configuration.
+ */
+ boardp->isr_callback = (Ptr2Func) asc_isr_callback;
+ boardp->exe_callback = (Ptr2Func) NULL;
+
+ ASC_DBG(2, "advansys_detect: AscInitSetConfig()\n");
+ switch (ret = AscInitSetConfig(boardp)) {
+ case 0: /* No error. */
+ break;
+ case ASC_WARN_IO_PORT_ROTATE:
+ ASC_DBG(0, "AscInitSetConfig: I/O port address modified\n");
+ break;
+ case ASC_WARN_EEPROM_CHKSUM:
+ ASC_DBG(0, "AscInitSetConfig: EEPROM checksum error\n");
+ break;
+ case ASC_WARN_IRQ_MODIFIED:
+ ASC_DBG(0, "AscInitSetConfig: IRQ modified\n");
+ break;
+ case ASC_WARN_CMD_QNG_CONFLICT:
+ ASC_DBG(0, "AscInitSetConfig: Tag queuing w/o disconnects\n");
+ break;
+ default:
+ ASC_DBG1(0, "AscInitSetConfig: Unknown warning: %x\n", ret);
+ break;
+ }
+ if (boardp->err_code != 0) {
+ ASC_DBG2(0,
+ "AscInitSetConfig: error: init_state %x, err_code %x\n",
+ boardp->init_state, boardp->err_code);
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+
+ /*
+ * Finish initializing the 'Scsi_Host' structure.
+ */
+
+ /* AscInitSetConfig() will set the IRQ for non-PCI boards. */
+ if (boardp->bus_type != ASC_IS_PCI) {
+ shp->irq = boardp->irq_no;
+ }
+
+ shp->io_port = boardp->iop_base;
+ shp->n_io_port = ASC_IOADR_GAP;
+ shp->this_id = boardp->cfg->chip_scsi_id;
+
+ /* Maximum number of queues this adapter can handle. */
+ shp->can_queue = boardp->max_total_qng;
+
+ /*
+ * XXX - Command queuing limits are maintained per target
+ * by AdvanSys adapters. Set 'cmd_per_lun' to the minimum
+ * value of the all the target settings for the adapter.
+ *
+ * For now set 'cmd_per_lun' to 'max_total_qng'. This
+ * value should be adjusted every time a new device is
+ * found in asc_init_dev().
+ *
+ * XXX - memory allocation is done by the mid-level scsi
+ * driver based on 'cmd_per_lun'. If 'sg_tablesize' is too large
+ * allocation failures can occur in scsi_register_host().
+ * A 'Scsi_Cmnd' structure is pre-allocated for each command
+ * also DMA memory is reserved. Set it artificially low for now.
+ *
+ * shp->cmd_per_lun = boardp->max_total_qng;
+ */
+#ifdef MODULE
+ shp->cmd_per_lun = 1;
+#else /* MODULE */
+ shp->cmd_per_lun = 4;
+#endif /* MODULE */
+ ASC_DBG1(1, "advansys_detect: cmd_per_lun: %d\n", shp->cmd_per_lun);
+
+ /* Maximum number of scatter-gather elements adapter can handle. */
+ /*
+ * XXX - memory allocation is done by the mid-level scsi
+ * driver based on sg_tablesize. If 'sg_tablesize' is too large
+ * allocation failures can occur in scsi_register_host().
+ */
+#ifdef MODULE
+ shp->sg_tablesize = 8;
+#else /* MODULE */
+ shp->sg_tablesize = ASC_MAX_SG_LIST;
+#endif /* MODULE */
+ ASC_DBG1(1, "advansys_detect: sg_tablesize: %d\n",
+ shp->sg_tablesize);
+
+ /* BIOS start address. */
+ shp->base = (char *) ((ulong) AscGetChipBiosAddress(
+ boardp->iop_base,
+ boardp->bus_type));
+
+ /*
+ * Register Board Resources - I/O Port, DMA, IRQ
+ */
+
+ /* Register I/O port range */
+ ASC_DBG(2, "advansys_detect: request_region()\n");
+ request_region(shp->io_port, shp->n_io_port, "advansys");
+
+ /* Register DMA channel for ISA bus. */
+ if ((boardp->bus_type & ASC_IS_ISA) == 0) {
+ shp->dma_channel = NO_ISA_DMA;
+ } else {
+ shp->dma_channel = boardp->cfg->isa_dma_channel;
+ if ((ret = request_dma(shp->dma_channel, "advansys")) != 0) {
+ ASC_DBG2(0, "advansys_detect: request_dma() %d failed %d\n",
+ shp->dma_channel, ret);
+ release_region(shp->io_port, shp->n_io_port);
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+ AscEnableIsaDma(shp->dma_channel);
+ }
+
+ /* Register IRQ Number. */
+ ASC_DBG1(2, "advansys_detect: request_irq() %d\n", shp->irq);
+ if ((ret = request_irq(shp->irq, advansys_interrupt,
+ SA_INTERRUPT, "advansys")) != 0) {
+ ASC_DBG1(0, "advansys_detect: request_irq() failed %d\n", ret);
+ release_region(shp->io_port, shp->n_io_port);
+ if (shp->dma_channel != NO_ISA_DMA) {
+ free_dma(shp->dma_channel);
+ }
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+
+ /*
+ * Initialize board RISC chip and enable interrupts.
+ */
+ ASC_DBG(2, "advansys_detect: AscInitAsc1000Driver()\n");
+ if (AscInitAsc1000Driver(boardp)) {
+ ASC_DBG2(0,
+ "AscInitAsc1000Driver: error: init_state %x, err_code %x\n",
+ boardp->init_state, boardp->err_code);
+ release_region(shp->io_port, shp->n_io_port);
+ if (shp->dma_channel != NO_ISA_DMA) {
+ free_dma(shp->dma_channel);
+ }
+ free_irq(shp->irq);
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+ ASC_DBG_PRT_SCSI_HOST(2, shp);
+ }
+ }
+
+ ASC_DBG1(1, "advansys_detect: done: asc_board_count %d\n", asc_board_count);
+ return asc_board_count;
+}
+
+/*
+ * advansys_release()
+ *
+ * Release resources allocated for a single AdvanSys adapter.
+ */
+int
+advansys_release(struct Scsi_Host *shp)
+{
+ ASC_DBG(1, "advansys_release: begin\n");
+ free_irq(shp->irq);
+ if (shp->dma_channel != NO_ISA_DMA) {
+ ASC_DBG(1, "advansys_release: free_dma()\n");
+ free_dma(shp->dma_channel);
+ }
+ release_region(shp->io_port, shp->n_io_port);
+ scsi_unregister(shp);
+ ASC_DBG(1, "advansys_release: end\n");
+ return 0;
+}
+
+/*
+ * advansys_info()
+ *
+ * Return suitable for printing on the console with the argument
+ * adapter's configuration information.
+ */
+const char *
+advansys_info(struct Scsi_Host *shp)
+{
+ static char info[128];
+ ASC_DVC_VAR *boardp;
+ char *busname;
+
+ boardp = &ASC_BOARD(shp)->board;
+ ASC_DBG(1, "advansys_info: begin\n");
+ if (boardp->bus_type & ASC_IS_ISA) {
+ sprintf(info,
+ "AdvanSys SCSI %s: ISA (%u CDB): BIOS %X, IO %X-%X, IRQ %u, DMA %u",
+ ASC_VERSION, ASC_BOARD(shp)->board.max_total_qng,
+ (unsigned) shp->base, shp->io_port,
+ shp->io_port + (shp->n_io_port - 1), shp->irq, shp->dma_channel);
+ } else {
+ switch (boardp->bus_type) {
+ case ASC_IS_EISA:
+ busname = "EISA";
+ break;
+ case ASC_IS_VL:
+ busname = "VL";
+ break;
+ case ASC_IS_PCI:
+ busname = "PCI";
+ break;
+ default:
+ busname = "?";
+ ASC_DBG1(0, "advansys_info: unknown bus type %d\n",
+ boardp->bus_type);
+ break;
+ }
+ /* No DMA channel for non-ISA busses. */
+ sprintf(info,
+ "AdvanSys SCSI %s: %s (%u CDB): BIOS %X, IO %X-%X, IRQ %u",
+ ASC_VERSION, busname, ASC_BOARD(shp)->board.max_total_qng,
+ (unsigned) shp->base, shp->io_port,
+ shp->io_port + (shp->n_io_port - 1), shp->irq);
+ }
+ ASC_DBG(1, "advansys_info: end\n");
+ return info;
+}
+
+/*
+ * advansys_command()
+ *
+ * Polled-I/O. Apparently host driver shouldn't return until
+ * command is finished.
+ *
+ * XXX - Can host driver block here instead of spinning on command status?
+ */
+int
+advansys_command(Scsi_Cmnd *scp)
+{
+ ASC_DBG1(1, "advansys_command: scp %x\n", (unsigned) scp);
+ ASC_STATS(command);
+ scp->SCp.Status = 0; /* Set to a known state */
+ advansys_queuecommand(scp, advansys_command_done);
+ while (scp->SCp.Status == 0) {
+ continue;
+ }
+ ASC_DBG1(1, "advansys_command: result %x\n", scp->result);
+ return scp->result;
+}
+
+/*
+ * advansys_queuecommand()
+ *
+ * This function always returns 0. Command return status is saved
+ * in the 'scp' result field.
+ */
+int
+advansys_queuecommand(Scsi_Cmnd *scp, void (*done)(Scsi_Cmnd *))
+{
+ struct Scsi_Host *shp;
+ int flags = 0;
+ int interrupts_disabled;
+
+ ASC_STATS(queuecommand);
+ shp = scp->host;
+
+#ifdef LINUX_1_2
+ /*
+ * For LINUX_1_3, if statistics are enabled they can be accessed
+ * by reading /proc/scsi/advansys/[0-9].
+ */
+#ifdef ADVANSYS_STATS_1_2_PRINT
+ /* Display statistics every 10000 commands. */
+ if ((asc_stats.queuecommand % 10000) == 0) {
+ printk("\n");
+ (void) asc_prt_stats(NULL, 0);
+ printk("\n");
+ }
+#endif /* ADVANSYS_STATS_1_2_PRINT */
+#endif /* LINUX_1_2 */
+
+ /*
+ * If there are any pending commands for this board before trying
+ * to execute them, disable interrupts to preserve request ordering.
+ *
+ * The typical case will be no pending commands and interrupts
+ * not disabled.
+ */
+ if (ASC_BOARD(shp)->pending_tidmask == 0) {
+ interrupts_disabled = ASC_FALSE;
+ } else {
+ ASC_STATS(cmd_disable);
+ /* Disable interrupts */
+ interrupts_disabled = ASC_TRUE;
+ save_flags(flags);
+ cli();
+ ASC_DBG1(1, "advansys_queuecommand: asc_execute_pending() %x\n",
+ ASC_BOARD(shp)->pending_tidmask);
+ asc_execute_pending(shp);
+ }
+
+ /*
+ * Save the function pointer to Linux mid-level 'done' function and
+ * execute the command.
+ */
+ scp->scsi_done = done;
+ if (asc_execute_scsi_cmnd(scp) == ASC_BUSY) {
+ if (interrupts_disabled == ASC_FALSE) {
+ save_flags(flags);
+ cli();
+ interrupts_disabled = ASC_TRUE;
+ }
+ asc_enqueue(shp, scp, scp->target, ASC_BACK);
+ }
+
+ if (interrupts_disabled == ASC_TRUE) {
+ restore_flags(flags);
+ }
+
+ return 0;
+}
+
+/*
+ * advansys_abort()
+ *
+ * Abort the specified command and reset the device
+ * associated with the command 'scp'.
+ */
+int
+advansys_abort(Scsi_Cmnd *scp)
+{
+ ASC_DVC_VAR *boardp;
+ int flags;
+ int ret;
+
+ ASC_DBG1(1, "advansys_abort: scp %x\n", (unsigned) scp);
+ save_flags(flags);
+ cli();
+ ASC_STATS(abort);
+ if (scp->host == NULL) {
+ scp->result = HOST_BYTE(DID_ERROR);
+ ret = SCSI_ABORT_ERROR;
+ } else if (asc_rmqueue(scp->host, scp, scp->target) == ASC_TRUE) {
+ scp->result = HOST_BYTE(DID_ABORT);
+ ret = SCSI_ABORT_SUCCESS;
+ (void) AscResetDevice(&ASC_BOARD(scp->host)->board, scp->target);
+ } else {
+ /* Must enable interrupts for AscAbortSRB() */
+ sti();
+ boardp = &ASC_BOARD(scp->host)->board;
+ scp->result = HOST_BYTE(DID_ABORT);
+ switch (AscAbortSRB(boardp, (ulong) scp)) {
+ case ASC_TRUE:
+ /* asc_isr_callback() will be called */
+ ASC_DBG(1, "advansys_abort: AscAbortSRB() TRUE\n");
+ ret = SCSI_ABORT_PENDING;
+ break;
+ case ASC_FALSE:
+ /* Request has apparently already completed. */
+ ASC_DBG(1, "advansys_abort: AscAbortSRB() FALSE\n");
+ ret = SCSI_ABORT_NOT_RUNNING;
+ break;
+ case ASC_ERROR:
+ default:
+ ASC_DBG(1, "advansys_abort: AscAbortSRB() ERROR\n");
+ ret = SCSI_ABORT_ERROR;
+ break;
+ }
+ (void) AscResetDevice(boardp, scp->target);
+ }
+ restore_flags(flags);
+ ASC_DBG1(1, "advansys_abort: ret %d\n", ret);
+ return ret;
+}
+
+/*
+ * advansys_reset()
+ *
+ * Reset all devices and the SCSI bus for the board
+ * associated with 'scp'.
+ */
+int
+advansys_reset(Scsi_Cmnd *scp)
+{
+ ASC_DVC_VAR *boardp;
+ int flags;
+ Scsi_Cmnd *tscp;
+ int i;
+ int ret;
+
+ ASC_DBG1(1, "advansys_reset: %x\n", (unsigned) scp);
+ save_flags(flags);
+ cli();
+ ASC_STATS(reset);
+ if (scp->host == NULL) {
+ scp->result = HOST_BYTE(DID_ERROR);
+ ret = SCSI_RESET_ERROR;
+ } else {
+ /* Remove any pending commands, set DID_RESET, and done them. */
+ for (i = 0; i < ASC_MAX_TID; i++) {
+ while ((tscp = asc_dequeue(scp->host, i)) != NULL) {
+ tscp->result = HOST_BYTE(DID_RESET);
+ tscp->scsi_done(tscp);
+ }
+ }
+ /* Must enable interrupts for AscResetSB() */
+ sti();
+ boardp = &ASC_BOARD(scp->host)->board;
+ scp->result = HOST_BYTE(DID_RESET);
+ switch (AscResetSB(boardp)) {
+ case ASC_TRUE:
+ ASC_DBG(1, "advansys_abort: AscResetSB() TRUE\n");
+ ret = SCSI_RESET_SUCCESS;
+ break;
+ case ASC_ERROR:
+ default:
+ ASC_DBG(1, "advansys_abort: AscResetSB() ERROR\n");
+ ret = SCSI_RESET_ERROR;
+ break;
+ }
+ }
+ restore_flags(flags);
+ ASC_DBG1(1, "advansys_reset: ret %d", ret);
+ return ret;
+}
+
+/*
+ * advansys_biosparam()
+ *
+ * Translate disk drive geometry if the "BIOS greater than 1 GB"
+ * support is enabled for a drive.
+ *
+ * ip (information pointer) is an int array with the following definition:
+ * ip[0]: heads
+ * ip[1]: sectors
+ * ip[2]: cylinders
+ */
+int
+#ifdef LINUX_1_2
+advansys_biosparam(Disk *dp, int dep, int ip[])
+#else /* LINUX_1_3 */
+advansys_biosparam(Disk *dp, kdev_t dep, int ip[])
+#endif /* LINUX_1_3 */
+{
+ ASC_DBG(1, "advansys_biosparam: begin\n");
+ ASC_STATS(biosparam);
+ if ((ASC_BOARD(dp->device->host)->board.dvc_cntl & ASC_CNTL_BIOS_GT_1GB) &&
+ dp->capacity > 0x200000) {
+ ip[0] = 255;
+ ip[1] = 64;
+ } else {
+ ip[0] = 64;
+ ip[1] = 32;
+ }
+ ip[2] = dp->capacity / (ip[0] * ip[1]);
+ ASC_DBG(1, "advansys_biosparam: end\n");
+ return 0;
+}
+
+/*
+ * advansys_setup()
+ *
+ * This function is called from init/main.c at boot time.
+ * It it passed LILO parameters that can be set from the
+ * LILO command line or in /etc/lilo.conf.
+ *
+ * It is used by the AdvanSys driver to either disable I/O
+ * port scanning or to limit scanning to 1 - 4 I/O ports.
+ * Regardless of the option setting EISA and PCI boards
+ * will still be searched for and detected. This option
+ * only affects searching for ISA and VL boards.
+ *
+ * If ADVANSYS_DEBUG is defined the driver debug level may
+ * be set using the 5th (ASC_NUM_BOARD_SUPPORTED + 1) I/O Port.
+ *
+ * Examples:
+ * 1. Eliminate I/O port scanning:
+ * boot: linux advansys=
+ * or
+ * boot: linux advansys=0x0
+ * 2. Limit I/O port scanning to one I/O port:
+ * boot: linux advansys=0x110
+ * 3. Limit I/O port scanning to four I/O ports:
+ * boot: linux advansys=0x110,0x210,0x230,0x330
+ * 4. If ADVANSYS_DEBUG, limit I/O port scanning to four I/O ports and
+ * set the driver debug level to 2.
+ * boot: linux advansys=0x110,0x210,0x230,0x330,0xdeb2
+ *
+ * ints[0] - number of arguments
+ * ints[1] - first argument
+ * ints[2] - second argument
+ * ...
+ */
+void
+advansys_setup(char *str, int *ints)
+{
+ int i;
+
+ if (asc_iopflag == ASC_TRUE) {
+ printk("AdvanSys SCSI: 'advansys' LILO option may appear only once\n");
+ return;
+ }
+
+ asc_iopflag = ASC_TRUE;
+
+ if (ints[0] > ASC_NUM_BOARD_SUPPORTED) {
+#ifdef ADVANSYS_DEBUG
+ if ((ints[0] == ASC_NUM_BOARD_SUPPORTED + 1) &&
+ (ints[ASC_NUM_BOARD_SUPPORTED + 1] >> 4 == 0xdeb)) {
+ asc_dbglvl = ints[ASC_NUM_BOARD_SUPPORTED + 1] & 0xf;
+ } else {
+#endif /* ADVANSYS_DEBUG */
+ printk("AdvanSys SCSI: only %d I/O ports accepted\n",
+ ASC_NUM_BOARD_SUPPORTED);
+#ifdef ADVANSYS_DEBUG
+ }
+#endif /* ADVANSYS_DEBUG */
+ }
+
+#ifdef ADVANSYS_DEBUG
+ ASC_DBG1(1, "advansys_setup: ints[0] %d\n", ints[0]);
+ for (i = 1; i < ints[0]; i++) {
+ ASC_DBG2(1, " ints[%d] %x", i, ints[i]);
+ }
+ ASC_DBG(1, "\n");
+#endif /* ADVANSYS_DEBUG */
+
+ for (i = 1; i <= ints[0] && i <= ASC_NUM_BOARD_SUPPORTED; i++) {
+ asc_ioport[i-1] = ints[i];
+ ASC_DBG2(1, "advansys_setup: asc_ioport[%d] %x\n",
+ i - 1, asc_ioport[i-1]);
+ }
+}
+
+
+/*
+ * --- Loadable Driver Support
+ */
+
+#ifdef MODULE
+Scsi_Host_Template driver_template = ADVANSYS;
+# include "scsi_module.c"
+#endif /* MODULE */
+
+
+/*
+ * --- Miscellaneous Driver Functions
+ */
+
+#ifdef LINUX_1_3
+/*
+ * asc_proc_copy()
+ *
+ * Copy proc information to a read buffer considering the current read
+ * offset in the file and the remaining space in the read buffer.
+ */
+STATIC int
+asc_proc_copy(off_t advoffset, off_t offset, char *curbuf, int leftlen,
+ char *cp, int cplen)
+{
+ int cnt = 0;
+
+ ASC_DBG3(2, "asc_proc_copy: offset %d, advoffset %d, cplen %d\n",
+ (unsigned) offset, (unsigned) advoffset, cplen);
+ if (offset <= advoffset) {
+ /* Read offset below current offset, copy everything. */
+ cnt = min(cplen, leftlen);
+ ASC_DBG3(2, "asc_proc_copy: curbuf %x, cp %x, cnt %d\n",
+ (unsigned) curbuf, (unsigned) cp, cnt);
+ memcpy(curbuf, cp, cnt);
+ } else if (offset < advoffset + cplen) {
+ /* Read offset within current range, partial copy. */
+ cnt = (advoffset + cplen) - offset;
+ cp = (cp + cplen) - cnt;
+ cnt = min(cnt, leftlen);
+ ASC_DBG3(2, "asc_proc_copy: curbuf %x, cp %x, cnt %d\n",
+ (unsigned) curbuf, (unsigned) cp, cnt);
+ memcpy(curbuf, cp, cnt);
+ }
+ return cnt;
+}
+#endif /* LINUX_1_3 */
+
+/*
+ * First-level interrupt handler.
+ */
+STATIC void
+advansys_interrupt(int irq, struct pt_regs *regs)
+{
+ int i;
+ int flags;
+ Scsi_Cmnd *scp;
+ Scsi_Cmnd *tscp;
+
+ /* Disable interrupts, if the aren't already disabled. */
+ save_flags(flags);
+ cli();
+
+ ASC_DBG(1, "advansys_interrupt: begin\n");
+ ASC_STATS(interrupt);
+ /*
+ * Check for interrupts on all boards.
+ * AscISR() will call asc_isr_callback().
+ */
+ for (i = 0; i < asc_board_count; i++) {
+ while (AscIsIntPending(asc_host[i]->io_port)) {
+ ASC_DBG(1, "advansys_interrupt: before AscISR()\n");
+ AscISR(&ASC_BOARD(asc_host[i])->board);
+ }
+ }
+ ASC_DBG(1, "advansys_interrupt: end\n");
+
+ /*
+ * While interrupts are still disabled save the list of requests that
+ * need their done function called. After re-enabling interrupts call
+ * the done function which may re-enable interrupts anyway.
+ */
+ if ((scp = asc_scsi_done) != NULL) {
+ asc_scsi_done = NULL;
+ }
+
+ /* Re-enable interrupts, if they were enabled on entry. */
+ restore_flags(flags);
+
+ while (scp) {
+ tscp = (Scsi_Cmnd *) scp->host_scribble;
+ scp->scsi_done(scp);
+ scp = tscp;
+ }
+
+ return;
+}
+
+/*
+ * Function used only with polled I/O requests that are initiated by
+ * advansys_command().
+ */
+STATIC void
+advansys_command_done(Scsi_Cmnd *scp)
+{
+ ASC_DBG1(1, "advansys_command_done: scp %x\n", (unsigned) scp);
+ scp->SCp.Status = 1;
+}
+
+/*
+ * Execute a single 'Scsi_Cmnd'.
+ *
+ * The function 'done' is called when the request has been completed.
+ *
+ * Scsi_Cmnd:
+ *
+ * host - board controlling device
+ * device - device to send command
+ * target - target of device
+ * lun - lun of device
+ * cmd_len - length of SCSI CDB
+ * cmnd - buffer for SCSI 8, 10, or 12 byte CDB
+ * use_sg - if non-zero indicates scatter-gather request with use_sg elements
+ *
+ * if (use_sg == 0)
+ * request_buffer - buffer address for request
+ * request_bufflen - length of request buffer
+ * else
+ * request_buffer - pointer to scatterlist structure
+ *
+ * sense_buffer - sense command buffer
+ *
+ * result (4 bytes of an int):
+ * Byte Meaning
+ * 0 SCSI Status Byte Code
+ * 1 SCSI One Byte Message Code
+ * 2 Host Error Code
+ * 3 Mid-Level Error Code
+ *
+ * host driver fields:
+ * SCp - Scsi_Pointer used for command processing status
+ * scsi_done - used to save caller's done function
+ * host_scribble - used for pointer to another Scsi_Cmnd
+ *
+ * If this function returns ASC_NOERROR or ASC_ERROR the done
+ * function has been called. If ASC_BUSY is returned the request
+ * must be enqueued by the caller and re-tried later.
+ */
+STATIC int
+asc_execute_scsi_cmnd(Scsi_Cmnd *scp)
+{
+ ASC_DVC_VAR *boardp;
+ ASC_SCSI_Q scsiq;
+ ASC_SG_HEAD sghead;
+ int ret;
+
+ ASC_DBG2(1, "asc_execute_scsi_cmnd: scp %x, done %x\n",
+ (unsigned) scp, (unsigned) scp->scsi_done);
+
+ boardp = &ASC_BOARD(scp->host)->board;
+
+ /*
+ * If this is the first command, then initialize the device. If
+ * no device is found set 'DID_BAD_TARGET' and return.
+ */
+ if ((ASC_BOARD(scp->host)->init_tidmask &
+ ASC_TIX_TO_TARGET_ID(scp->target)) == 0) {
+ if (asc_init_dev(boardp, scp) == ASC_FALSE) {
+ scp->result = HOST_BYTE(DID_BAD_TARGET);
+ scp->scsi_done(scp);
+ return ASC_ERROR;
+ }
+ ASC_BOARD(scp->host)->init_tidmask |= ASC_TIX_TO_TARGET_ID(scp->target);
+ }
+
+ memset(&scsiq, 0, sizeof(ASC_SCSI_Q));
+
+ /*
+ * Point the ASC_SCSI_Q to the 'Scsi_Cmnd'.
+ */
+ scsiq.q2.srb_ptr = (ulong) scp;
+
+ /*
+ * Build the ASC_SCSI_Q request.
+ */
+ scsiq.cdbptr = &scp->cmnd[0];
+ scsiq.q2.cdb_len = scp->cmd_len;
+ scsiq.q1.target_id = ASC_TID_TO_TARGET_ID(scp->target);
+ scsiq.q1.target_lun = scp->lun;
+ scsiq.q2.target_ix = ASC_TIDLUN_TO_IX(scp->target, scp->lun);
+ scsiq.q1.sense_addr = (ulong) &scp->sense_buffer[0];
+ scsiq.q1.sense_len = sizeof(scp->sense_buffer);
+ scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE;
+
+ /*
+ * Build ASC_SCSI_Q for a contiguous buffer or a scatter-gather
+ * buffer command.
+ */
+ if (scp->use_sg == 0) {
+ /*
+ * CDB request of single contiguous buffer.
+ */
+ ASC_STATS(cont_cnt);
+ /* request_buffer is already a real address. */
+ scsiq.q1.data_addr = (ulong) scp->request_buffer;
+ scsiq.q1.data_cnt = scp->request_bufflen;
+ ASC_STATS_ADD(cont_xfer, (scp->request_bufflen + 511) >> 9);
+ scsiq.q1.sg_queue_cnt = 0;
+ scsiq.sg_head = NULL;
+ } else {
+ /*
+ * CDB scatter-gather request list.
+ */
+ int sgcnt;
+ struct scatterlist *slp;
+
+ if (scp->use_sg > ASC_MAX_SG_LIST) {
+ ASC_DBG2(0, "asc_execute_scsi_cmnd: use_sg %d > %d\n",
+ scp->use_sg, ASC_MAX_SG_LIST);
+ scp->result = HOST_BYTE(DID_ERROR);
+ scp->scsi_done(scp);
+ return ASC_ERROR;
+ }
+
+ ASC_STATS(sg_cnt);
+
+ /*
+ * Allocate a ASC_SG_HEAD structure and set the ASC_SCSI_Q
+ * to point to it.
+ */
+ memset(&sghead, 0, sizeof(ASC_SG_HEAD));
+
+ scsiq.q1.cntl |= QC_SG_HEAD;
+ scsiq.sg_head = &sghead;
+ scsiq.q1.data_cnt = 0;
+ scsiq.q1.data_addr = 0;
+ sghead.entry_cnt = scsiq.q1.sg_queue_cnt = scp->use_sg;
+ ASC_STATS_ADD(sg_elem, sghead.entry_cnt);
+
+ /*
+ * Convert scatter-gather list into ASC_SG_HEAD list.
+ */
+ slp = (struct scatterlist *) scp->request_buffer;
+ for (sgcnt = 0; sgcnt < scp->use_sg; sgcnt++, slp++) {
+ sghead.sg_list[sgcnt].addr = (ulong) slp->address;
+ sghead.sg_list[sgcnt].bytes = slp->length;
+ ASC_STATS_ADD(sg_xfer, (slp->length + 511) >> 9);
+ }
+ }
+
+ ASC_DBG_PRT_SCSI_Q(2, &scsiq);
+ ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len);
+
+ switch (ret = AscExeScsiQueue(boardp, &scsiq)) {
+ case ASC_NOERROR:
+ ASC_DBG(1, "asc_execute_scsi_cmnd: AscExeScsiQueue() ASC_NOERROR\n");
+ break;
+ case ASC_BUSY:
+ /* Caller must enqueue request and retry later. */
+ break;
+ case ASC_ERROR:
+ ASC_DBG1(0,
+ "asc_execute_scsi_cmnd: AscExeScsiQueue() ASC_ERROR err_code %x\n",
+ boardp->err_code);
+ ASC_STATS(error);
+ scp->result = HOST_BYTE(DID_ERROR);
+ scp->scsi_done(scp);
+ break;
+ }
+
+ ASC_DBG(1, "asc_execute_scsi_cmnd: end\n");
+ return ret;
+}
+
+/*
+ * asc_isr_callback() - Second Level Interrupt Handler called by AscISR().
+ */
+void
+asc_isr_callback(ASC_DVC_VAR *boardp, ASC_QDONE_INFO *qdonep)
+{
+ Scsi_Cmnd *scp;
+ struct Scsi_Host *shp;
+ int flags;
+ Scsi_Cmnd **scpp;
+
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_DBG2(1, "asc_isr_callback: boardp %x, qdonep %x\n",
+ (unsigned) boardp, (unsigned) qdonep);
+ ASC_STATS(callback);
+ ASC_DBG_PRT_QDONE_INFO(2, qdonep);
+
+ /*
+ * Get the Scsi_Cmnd structure and Scsi_Host structure for the
+ * command that has been completed.
+ */
+ scp = (Scsi_Cmnd *) qdonep->d2.srb_ptr;
+ ASC_DBG1(1, "asc_isr_callback: scp %x\n", (unsigned) scp);
+ ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len);
+
+ shp = scp->host;
+ ASC_ASSERT(shp);
+ ASC_DBG1(1, "asc_isr_callback: shp %x\n", (unsigned) shp);
+
+ /*
+ * 'qdonep' contains the command's ending status.
+ */
+ switch (qdonep->d3.done_stat) {
+ case QD_NO_ERROR:
+ ASC_DBG(2, "asc_isr_callback: QD_NO_ERROR\n");
+ switch (qdonep->d3.host_stat) {
+ case QHSTA_NO_ERROR:
+ scp->result = 0;
+ break;
+ default:
+ /* QHSTA error occurred */
+ scp->result = HOST_BYTE(DID_ERROR);
+ break;
+ }
+ break;
+
+ case QD_WITH_ERROR:
+ ASC_DBG(2, "asc_isr_callback: QD_WITH_ERROR\n");
+ switch (qdonep->d3.host_stat) {
+ case QHSTA_NO_ERROR:
+ if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
+ ASC_DBG(2, "asc_isr_callback: SS_CHK_CONDITION\n");
+ ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
+ sizeof(scp->sense_buffer));
+ /*
+ * Note: The status_byte() macro used by target drivers
+ * defined in scsi.h shifts the status byte returned by
+ * host drivers right by 1 bit. This is why target drivers
+ * also use left shifted status byte definitions. For instance
+ * target drivers use CHECK_CONDITION, defined to 0x1, instead
+ * of the SCSI defined check condition value of 0x2.
+ */
+ scp->result = DRIVER_BYTE(DRIVER_SENSE) |
+ STATUS_BYTE(qdonep->d3.scsi_stat);
+ } else {
+ scp->result = STATUS_BYTE(qdonep->d3.scsi_stat);
+ }
+ break;
+
+ default:
+ /* QHSTA error occurred */
+ ASC_DBG1(2, "asc_isr_callback: host_stat %x\n",
+ qdonep->d3.host_stat);
+ scp->result = HOST_BYTE(DID_ERROR) | MSG_BYTE(qdonep->d3.scsi_msg) |
+ STATUS_BYTE(qdonep->d3.scsi_stat);
+ break;
+ }
+ break;
+
+ case QD_ABORTED_BY_HOST:
+ ASC_DBG(1, "asc_isr_callback: QD_ABORTED_BY_HOST\n");
+ scp->result = HOST_BYTE(DID_ABORT) | MSG_BYTE(qdonep->d3.scsi_msg) |
+ STATUS_BYTE(qdonep->d3.scsi_stat);
+ break;
+
+ default:
+ ASC_DBG1(0, "asc_isr_callback: done_stat %x\n", qdonep->d3.done_stat );
+ scp->result = HOST_BYTE(DID_ERROR) | MSG_BYTE(qdonep->d3.scsi_msg) |
+ STATUS_BYTE(qdonep->d3.scsi_stat);
+ break;
+ }
+
+ /*
+ * Before calling 'scsi_done' for the current 'Scsi_Cmnd' and possibly
+ * triggering more commands to be issued, try to start any pending
+ * commands.
+ */
+ if (ASC_BOARD(shp)->pending_tidmask != 0) {
+ /*
+ * If there are any pending commands for this board before trying
+ * to execute them, disable interrupts to preserve request ordering.
+ */
+ ASC_STATS(intr_disable);
+ save_flags(flags);
+ cli();
+ ASC_DBG1(1, "asc_isr_callback: asc_execute_pending() %x\n",
+ ASC_BOARD(shp)->pending_tidmask);
+ asc_execute_pending(shp);
+ restore_flags(flags);
+ }
+
+ /*
+ * Because interrupts may be enabled by the 'Scsi_Cmnd' done function,
+ * add the command to the end of the global done list. The done function
+ * for the command will be called in advansys_interrupt().
+ */
+ for (scpp = &asc_scsi_done; *scpp;
+ scpp = (Scsi_Cmnd **) &(*scpp)->host_scribble) {
+ ;
+ }
+ *scpp = scp;
+ scp->host_scribble = NULL;
+ return;
+}
+
+/*
+ * Execute as many pending requests as possible for the
+ * board specified by 'Scsi_Host'.
+ */
+STATIC void
+asc_execute_pending(struct Scsi_Host *shp)
+{
+ ASC_SCSI_BIT_ID_TYPE scan_tidmask;
+ Scsi_Cmnd *scp;
+ int i;
+
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ /*
+ * Execute pending commands for devices attached to
+ * the current board in round-robin fashion.
+ */
+ scan_tidmask = ASC_BOARD(shp)->pending_tidmask;
+ do {
+ for (i = 0; i < ASC_MAX_TID; i++) {
+ if (scan_tidmask & ASC_TIX_TO_TARGET_ID(i)) {
+ if ((scp = asc_dequeue(shp, i)) == NULL) {
+ scan_tidmask &= ~ASC_TIX_TO_TARGET_ID(i);
+ } else if (asc_execute_scsi_cmnd(scp) == ASC_BUSY) {
+ scan_tidmask &= ~ASC_TIX_TO_TARGET_ID(i);
+ /* Put the request back at front of the list. */
+ asc_enqueue(shp, scp, i, ASC_FRONT);
+ }
+ }
+ }
+ } while (scan_tidmask);
+ return;
+}
+
+/*
+ * asc_init_dev()
+ *
+ * Perform one-time initialization of a device.
+ */
+STATIC int
+asc_init_dev(ASC_DVC_VAR *boardp, Scsi_Cmnd *scp)
+{
+ ASC_SCSI_REQ_Q *scsireqq;
+ ASC_CAP_INFO *cap_info;
+ ASC_SCSI_INQUIRY *inquiry;
+ int found;
+ ASC_SCSI_BIT_ID_TYPE save_use_tagged_qng;
+ ASC_SCSI_BIT_ID_TYPE save_can_tagged_qng;
+ int ret;
+#ifdef ADVANSYS_DEBUG
+ ASC_SCSI_BIT_ID_TYPE tidmask; /* target id bit mask: 1 - 128 */
+#endif /* ADVANSYS_DEBUG */
+
+ ASC_DBG1(1, "asc_init_dev: target %d\n", (unsigned) scp->target);
+
+ /* Return true for the board's target id. */
+ if (boardp->cfg->chip_scsi_id == scp->target) {
+ return ASC_TRUE;
+ }
+
+ /*
+ * XXX - Host drivers should not modify the timeout field.
+ * But on the first command only add some extra time to
+ * allow the driver to complete its initialization for the
+ * device.
+ */
+ scp->timeout += 2000; /* Add 5 seconds to the request timeout. */
+
+ /* Set-up AscInitPollTarget() arguments. */
+ scsireqq = &ASC_BOARD(scp->host)->scsireqq;
+ memset(scsireqq, 0, sizeof(ASC_SCSI_REQ_Q));
+ cap_info = &ASC_BOARD(scp->host)->cap_info;
+ memset(cap_info, 0, sizeof(ASC_CAP_INFO));
+ inquiry = &ASC_BOARD(scp->host)->inquiry;
+ memset(inquiry, 0, sizeof(ASC_SCSI_INQUIRY));
+
+ /*
+ * XXX - AscInitPollBegin() re-initializes these fields to
+ * zero. 'Or' in the new values and restore them before calling
+ * AscInitPollEnd(). Normally all targets are initialized within
+ * a call to AscInitPollBegin() and AscInitPollEnd().
+ */
+ save_use_tagged_qng = boardp->use_tagged_qng;
+ save_can_tagged_qng = boardp->cfg->can_tagged_qng;
+
+ ASC_DBG(2, "asc_init_dev: AscInitPollBegin()\n");
+ if (AscInitPollBegin(boardp)) {
+ ASC_DBG(0, "asc_init_dev: AscInitPollBegin() failed\n");
+ return ASC_FALSE;
+ }
+
+ scsireqq->sense_ptr = &scsireqq->sense[0];
+ scsireqq->r1.sense_len = ASC_MIN_SENSE_LEN;
+ scsireqq->r1.target_id = ASC_TID_TO_TARGET_ID(scp->target);
+ scsireqq->r1.target_lun = 0;
+ scsireqq->r2.target_ix = ASC_TIDLUN_TO_IX(scp->target, 0);
+
+ found = ASC_FALSE;
+ ASC_DBG(2, "asc_init_dev: AscInitPollTarget()\n");
+ switch (ret = AscInitPollTarget(boardp, scsireqq, inquiry, cap_info)) {
+ case ASC_TRUE:
+ found = ASC_TRUE;
+#ifdef ADVANSYS_DEBUG
+ tidmask = ASC_TIX_TO_TARGET_ID(scp->target);
+ ASC_DBG2(1, "asc_init_dev: lba %lu, blk_size %lu\n",
+ cap_info->lba, cap_info->blk_size);
+ ASC_DBG1(1, "asc_init_dev: peri_dvc_type %x\n",
+ inquiry->byte0.peri_dvc_type);
+ if (boardp->use_tagged_qng & tidmask) {
+ ASC_DBG1(1, "asc_init_dev: command queuing enabled: %d\n",
+ boardp->max_dvc_qng[scp->target]);
+ } else {
+ ASC_DBG(1, "asc_init_dev: command queuing disabled\n");
+ }
+ if (boardp->init_sdtr & tidmask) {
+ ASC_DBG(1, "asc_init_dev: synchronous transfers enabled\n");
+ } else {
+ ASC_DBG(1, "asc_init_dev: synchronous transfers disabled\n");
+ }
+ /* Set bit means fix disabled. */
+ if (boardp->pci_fix_asyn_xfer & tidmask) {
+ ASC_DBG(1, "asc_init_dev: synchronous transfer fix disabled\n");
+ } else {
+ ASC_DBG(1, "asc_init_dev: synchronous transfer fix enabled\n");
+ }
+#endif /* ADVANSYS_DEBUG */
+ break;
+ case ASC_FALSE:
+ ASC_DBG(1, "asc_init_dev: no device found\n");
+ break;
+ case ASC_ERROR:
+ ASC_DBG(0, "asc_init_dev: AscInitPollTarget() ASC_ERROR\n");
+ break;
+ default:
+ ASC_DBG1(0, "asc_init_dev: AscInitPollTarget() unknown ret %d\n", ret);
+ break;
+ }
+
+ /* XXX - 'Or' in original tag bits. */
+ boardp->use_tagged_qng |= save_use_tagged_qng;
+ boardp->cfg->can_tagged_qng |= save_can_tagged_qng;
+
+ ASC_DBG(2, "asc_init_dev: AscInitPollEnd()\n");
+ AscInitPollEnd(boardp);
+
+#ifdef ASC_SET_CMD_PER_LUN
+ /*
+ * XXX - Refer to the comment in advansys_detect()
+ * regarding cmd_per_lun.
+ */
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if (boardp->max_dvc_qng[i] < scp->host->cmd_per_lun) {
+ scp->host->cmd_per_lun = boardp->max_dvc_qng[i];
+ }
+ }
+#endif /* ASC_SET_CMD_PER_LUN */
+
+ return found;
+}
+
+/*
+ * Search for an AdvanSys PCI device in the PCI configuration space.
+ */
+STATIC int
+asc_srch_pci_dev(PCI_DEVICE *pciDevice)
+{
+ int ret;
+ static int scan = 1;
+
+ ASC_DBG(2, "asc_srch_pci_dev: begin\n");
+
+ if (scan) {
+ pciDevice->type = asc_scan_method(pciDevice);
+ scan = 0;
+ ASC_DBG1(2, "asc_srch_pci_dev: type %d\n", pciDevice->type);
+ }
+ ret = asc_pci_find_dev(pciDevice);
+ ASC_DBG1(2, "asc_srch_pci_dev: asc_pci_find_dev() return %d\n", ret);
+ if (ret == PCI_DEVICE_FOUND) {
+ pciDevice->slotNumber = pciDevice->slotFound + 1;
+ pciDevice->startSlot = pciDevice->slotFound + 1;
+ } else {
+ if (pciDevice->bridge > pciDevice->busNumber) {
+ ASC_DBG2(2, "asc_srch_pci_dev: bridge %x, busNumber %x\n",
+ pciDevice->bridge, pciDevice->busNumber);
+ pciDevice->busNumber++;
+ pciDevice->slotNumber = 0;
+ pciDevice->startSlot = 0;
+ pciDevice->endSlot = 0x0f;
+ ret = asc_srch_pci_dev(pciDevice);
+ ASC_DBG1(2, "asc_srch_pci_dev recursive call return %d\n", ret);
+ }
+ }
+ ASC_DBG1(2, "asc_srch_pci_dev: return %d\n", ret);
+ return ret;
+}
+
+/*
+ * Determine the access method to be used for 'pciDevice'.
+ */
+STATIC uchar
+asc_scan_method(PCI_DEVICE *pciDevice)
+{
+ ushort data;
+ PCI_DATA pciData;
+ uchar type;
+ uchar slot;
+
+ ASC_DBG(2, "asc_scan_method: begin\n");
+ memset(&pciData, 0, sizeof(pciData));
+ for (type = 1; type < 3; type++) {
+ pciData.type = type;
+ for (slot = 0; slot < PCI_MAX_SLOT; slot++) {
+ pciData.slot = slot;
+ data = asc_get_cfg_word(&pciData);
+ if ((data != 0xFFFF) && (data != 0x0000)) {
+ ASC_DBG2(4, "asc_scan_method: data %x, type %d\n", data, type);
+ return (type);
+ }
+ }
+ }
+ ASC_DBG1(4, "asc_scan_method: type %d\n", type);
+ return (type);
+}
+
+/*
+ * Check for an AdvanSys PCI device in 'pciDevice'.
+ *
+ * Return PCI_DEVICE_FOUND if found, otherwise return PCI_DEVICE_NOT_FOUND.
+ */
+STATIC int
+asc_pci_find_dev(PCI_DEVICE *pciDevice)
+{
+ PCI_DATA pciData;
+ ushort vendorid, deviceid;
+ uchar classcode, subclass;
+ uchar lslot;
+
+ ASC_DBG(3, "asc_pci_find_dev: begin\n");
+ pciData.type = pciDevice->type;
+ pciData.bus = pciDevice->busNumber;
+ pciData.func = pciDevice->devFunc;
+ lslot = pciDevice->startSlot;
+ for (; lslot < pciDevice->endSlot; lslot++) {
+ pciData.slot = lslot;
+ pciData.offset = VENDORID_OFFSET;
+ vendorid = asc_get_cfg_word(&pciData);
+ ASC_DBG1(3, "asc_pci_find_dev: vendorid %x\n", vendorid);
+ if (vendorid != 0xffff) {
+ pciData.offset = DEVICEID_OFFSET;
+ deviceid = asc_get_cfg_word(&pciData);
+ ASC_DBG1(3, "asc_pci_find_dev: deviceid %x\n", deviceid);
+ if ((vendorid == ASC_PCI_VENDORID) &&
+ ((deviceid == ASC_PCI_DEVICE_ID_REV_A) ||
+ (deviceid == ASC_PCI_DEVICE_ID_REV_B))) {
+ pciDevice->slotFound = lslot;
+ ASC_DBG(3, "asc_pci_find_dev: PCI_DEVICE_FOUND\n");
+ return PCI_DEVICE_FOUND;
+ } else {
+ pciData.offset = SUBCLASS_OFFSET;
+ subclass = asc_get_cfg_byte(&pciData);
+ pciData.offset = CLASSCODE_OFFSET;
+ classcode = asc_get_cfg_byte(&pciData);
+ if ((classcode & PCI_BASE_CLASS_BRIDGE_DEVICE) &&
+ (subclass & PCI_SUB_CLASS_PCI_TO_PCI_BRIDGE_CONTROLLER)) {
+ pciDevice->bridge++;
+ }
+ ASC_DBG2(3, "asc_pci_find_dev: subclass %x, classcode %x\n",
+ subclass, classcode);
+ }
+ }
+ }
+ return PCI_DEVICE_NOT_FOUND;
+}
+
+/*
+ * Read PCI configuration data into 'pciConfig'.
+ */
+STATIC void
+asc_get_pci_cfg(PCI_DEVICE *pciDevice, PCI_CONFIG_SPACE *pciConfig)
+{
+ PCI_DATA pciData;
+ uchar counter;
+ uchar *localConfig;
+
+ ASC_DBG1(4, "asc_get_pci_cfg: slot found - %d\n ",
+ pciDevice->slotFound);
+
+ pciData.type = pciDevice->type;
+ pciData.bus = pciDevice->busNumber;
+ pciData.slot = pciDevice->slotFound;
+ pciData.func = pciDevice->devFunc;
+ localConfig = (uchar *) pciConfig;
+
+ for (counter = 0; counter < sizeof(PCI_CONFIG_SPACE); counter++) {
+ pciData.offset = counter;
+ *localConfig = asc_get_cfg_byte(&pciData);
+ ASC_DBG1(4, "asc_get_pci_cfg: byte %x\n", *localConfig);
+ localConfig++;
+ }
+ ASC_DBG1(4, "asc_get_pci_cfg: counter %d\n", counter);
+}
+
+/*
+ * Read a word (16 bits) from the PCI configuration space.
+ *
+ * The configuration mechanism is checked for the correct access method.
+ */
+STATIC ushort
+asc_get_cfg_word(PCI_DATA *pciData)
+{
+ ushort tmp;
+ ulong address;
+ ulong lbus = pciData->bus;
+ ulong lslot = pciData->slot;
+ ulong lfunc = pciData->func;
+ uchar t2CFA, t2CF8;
+ ushort t1CF8, t1CFA, t1CFC, t1CFE;
+
+ ASC_DBG4(4, "asc_get_cfg_word: type %d, bus %lu, slot %lu, func %lu\n",
+ pciData->type, lbus, lslot, lfunc);
+
+ /*
+ * check type of configuration mechanism
+ */
+ if (pciData->type == 2) {
+ /*
+ * save these registers so we can restore them after we are done
+ */
+ t2CFA = inp(0xCFA); /* save PCI bus register */
+ t2CF8 = inp(0xCF8); /* save config space enable register */
+
+ /*
+ * go out and write the bus and enable registers
+ */
+ /* set for type 1 cycle, if needed */
+ outp(0xCFA, pciData->bus);
+ /* set the function number */
+ outp(0xCF8, 0x10 | (pciData->func << 1)) ;
+
+ /*
+ * read the configuration space type 2 locations
+ */
+ tmp = (ushort) inpw(0xC000 | ((pciData->slot << 8) + pciData->offset));
+ } else {
+ /*
+ * type 1 configuration mechanism
+ *
+ * save the CONFIG_ADDRESS and CONFIG_DATA register values
+ */
+ t1CFC = inpw(0xCFC);
+ t1CFE = inpw(0xCFE);
+ t1CF8 = inpw(0xCF8);
+ t1CFA = inpw(0xCFA);
+
+ /*
+ * enable <31>, bus = <23:16>, slot = <15:11>,
+ * func = <10:8>, reg = <7:2>
+ */
+ address = (ulong) ((lbus << 16) | (lslot << 11) |
+ (lfunc << 8) | (pciData->offset & 0xFC) | 0x80000000L);
+
+ /*
+ * write out the address to CONFIG_ADDRESS
+ */
+ outl(address, 0xCF8);
+
+ /*
+ * read in the word from CONFIG_DATA
+ */
+ tmp = (ushort) ((inl(0xCFC) >>
+ ((pciData->offset & 2) * 8)) & 0xFFFF);
+ }
+ ASC_DBG1(4, "asc_get_cfg_word: config data: %x\n", tmp);
+ return tmp;
+}
+
+/*
+ * Reads a byte from the PCI configuration space.
+ *
+ * The configuration mechanism is checked for the correct access method.
+ */
+STATIC uchar
+asc_get_cfg_byte(PCI_DATA *pciData)
+{
+ uchar tmp;
+ ulong address;
+ ulong lbus = pciData->bus, lslot = pciData->slot, lfunc = pciData->func;
+ uchar t2CFA, t2CF8;
+ ushort t1CF8, t1CFA, t1CFC, t1CFE;
+
+ ASC_DBG1(4, "asc_get_cfg_byte: type: %d\n", pciData->type);
+
+ /*
+ * check type of configuration mechanism
+ */
+ if (pciData->type == 2) {
+ /*
+ * save these registers so we can restore them after we are done
+ */
+ t2CFA = inp(0xCFA); /* save PCI bus register */
+ t2CF8 = inp(0xCF8); /* save config space enable register */
+
+ /*
+ * go out and write the bus and enable registers
+ */
+ /* set for type 1 cycle, if needed */
+ outp(0xCFA, pciData->bus);
+ /* set the function number */
+ outp(0xCF8, 0x10 | (pciData->func << 1));
+
+ /*
+ * read the configuration space type 2 locations
+ */
+ tmp = inp(0xC000 | ((pciData->slot << 8) + pciData->offset));
+
+ /*
+ * restore the registers used for our transaction
+ */
+ outp(0xCF8, t2CF8); /* restore the enable register */
+ outp(0xCFA, t2CFA); /* restore PCI bus register */
+ } else {
+ /*
+ * type 1 configuration mechanism
+ *
+ * save the CONFIG_ADDRESS and CONFIG_DATA register values
+ */
+ t1CFC = inpw(0xCFC);
+ t1CFE = inpw(0xCFE);
+ t1CF8 = inpw(0xCF8);
+ t1CFA = inpw(0xCFA);
+
+ /*
+ * enable <31>, bus = <23:16>, slot = <15:11>, func = <10:8>,
+ * reg = <7:2>
+ */
+ address = (ulong) ((lbus << 16) | (lslot << 11) |
+ (lfunc << 8) | (pciData->offset & 0xFC) | 0x80000000L);
+
+ /*
+ * write out the address to CONFIG_ADDRESS
+ */
+ outl(address, 0xCF8);
+
+ /*
+ * read in the word from CONFIG_DATA
+ */
+ tmp = (uchar) ((inl(0xCFC) >> ((pciData->offset & 3) * 8)) & 0xFF);
+ }
+ ASC_DBG1(4, "asc_get_cfg_byte: config data: %x\n", tmp);
+ return tmp;
+}
+
+/*
+ * Add a 'Scsi_Cmnd' to the end of specified 'Scsi_Host'
+ * target device pending command list. Set 'pending_tidmask'
+ * to indicate a command is queued for the device.
+ *
+ * 'flag' may be either ASC_FRONT or ASC_BACK.
+ *
+ * The 'Scsi_Cmnd' host_scribble field is used as a next pointer.
+ */
+STATIC void
+asc_enqueue(struct Scsi_Host *shp, Scsi_Cmnd *scp, int tid, int flag)
+{
+ Scsi_Cmnd **scpp;
+
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_ASSERT(flag == ASC_FRONT || flag == ASC_BACK);
+ ASC_STATS(enqueue);
+ if (flag == ASC_FRONT) {
+ scp->host_scribble = (unsigned char *) ASC_BOARD(shp)->pending[tid];
+ ASC_BOARD(shp)->pending[tid] = (Scsi_Cmnd *) scp;
+ } else { /* ASC_BACK */
+ for (scpp = &ASC_BOARD(shp)->pending[tid]; *scpp;
+ scpp = (Scsi_Cmnd **) &(*scpp)->host_scribble) {
+ ;
+ }
+ *scpp = scp;
+ scp->host_scribble = NULL;
+ }
+ ASC_BOARD(shp)->pending_tidmask |= ASC_TIX_TO_TARGET_ID(tid);
+}
+
+/*
+ * Return first pending 'Scsi_Cmnd' on the specified 'Scsi_Host'
+ * for the specified target device. Clear the 'pending_tidmask'
+ * bit for the device if no more commands are left queued for it.
+ *
+ * The 'Scsi_Cmnd' host_scribble field is used as a next pointer.
+ */
+STATIC Scsi_Cmnd *
+asc_dequeue(struct Scsi_Host *shp, int tid)
+{
+ Scsi_Cmnd *scp;
+
+ ASC_STATS(dequeue);
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ if ((scp = ASC_BOARD(shp)->pending[tid]) != NULL) {
+ ASC_BOARD(shp)->pending[tid] = (Scsi_Cmnd *) scp->host_scribble;
+ }
+ if (ASC_BOARD(shp)->pending[tid] == NULL) {
+ ASC_BOARD(shp)->pending_tidmask &= ~ASC_TIX_TO_TARGET_ID(tid);
+ }
+ return scp;
+}
+
+/*
+ * Remove the specified 'Scsi_Cmnd' from the specified 'Scsi_Host'
+ * for the specified target device. Clear the 'pending_tidmask'
+ * bit for the device if no more commands are left queued for it.
+ *
+ * The 'Scsi_Cmnd' host_scribble field is used as a next pointer.
+ *
+ * Return ASC_TRUE if the command was found and removed, otherwise
+ * return ASC_FALSE if the command was not found.
+ */
+STATIC int
+asc_rmqueue(struct Scsi_Host *shp, Scsi_Cmnd *scp, int tid)
+{
+ Scsi_Cmnd **scpp;
+ int ret;
+
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ret = ASC_FALSE;
+ for (scpp = &ASC_BOARD(shp)->pending[tid];
+ *scpp; scpp = (Scsi_Cmnd **) &(*scpp)->host_scribble) {
+ if (*scpp == scp) {
+ *scpp = (Scsi_Cmnd *) scp->host_scribble;
+ scp->host_scribble = NULL;
+ ASC_STATS(rmqueue);
+ ret = ASC_TRUE;
+ }
+ }
+ if (ASC_BOARD(shp)->pending[tid] == NULL) {
+ ASC_BOARD(shp)->pending_tidmask &= ~ASC_TIX_TO_TARGET_ID(tid);
+ }
+ return ret;
+}
+
+
+/*
+ * --- Functions Required by the Asc Library
+ */
+
+/*
+ * Delay for 'n' milliseconds. Don't use the 'jiffies'
+ * global variable which is incremented once every 5 ms
+ * from a timer interrupt, because this function may be
+ * called when interrupts are disabled.
+ */
+void
+DvcSleepMilliSecond(ulong n)
+{
+ ulong i;
+
+ ASC_DBG1(4, "DvcSleepMilliSecond: %lu\n", n);
+ for (i = 0; i < n; i++) {
+ udelay(1000);
+ }
+}
+
+void
+DvcDisplayString(uchar *s)
+{
+ printk(s);
+}
+
+int
+DvcEnterCritical(void)
+{
+ int flags;
+
+ save_flags(flags);
+ cli();
+ return flags;
+}
+
+void
+DvcLeaveCritical(int flags)
+{
+ restore_flags(flags);
+}
+
+/*
+ * Convert a virtual address to a virtual address.
+ *
+ * Apparently Linux is loaded V=R (virtual equals real). Just return
+ * the virtual address.
+ */
+ulong
+DvcGetPhyAddr(uchar *buf_addr, ulong buf_len)
+{
+ ulong phys_addr;
+
+ phys_addr = (ulong) buf_addr;
+ return phys_addr;
+}
+
+ulong
+DvcGetSGList(ASC_DVC_VAR *asc_dvc_sg, uchar *buf_addr, ulong buf_len,
+ ASC_SG_HEAD *asc_sg_head_ptr)
+{
+ ulong buf_size;
+
+ buf_size = buf_len;
+ asc_sg_head_ptr->entry_cnt = 1;
+ asc_sg_head_ptr->sg_list[0].addr = (ulong) buf_addr;
+ asc_sg_head_ptr->sg_list[0].bytes = buf_size;
+ return buf_size;
+}
+
+/*
+ * void
+ * DvcPutScsiQ(PortAddr iop_base, ushort s_addr, ushort *outbuf, int words)
+ *
+ * Calling/Exit State:
+ * none
+ *
+ * Description:
+ * Output an ASC_SCSI_Q structure to the chip
+ */
+void
+DvcPutScsiQ(PortAddr iop_base, ushort s_addr, ushort *outbuf, int words)
+{
+ int i;
+
+ ASC_DBG_PRT_HEX(2, "DvcPutScsiQ", (uchar *) outbuf, 2 * words);
+ AscSetChipLramAddr(iop_base, s_addr);
+ for (i = 0; i < words; i++, outbuf++) {
+ if (i == 2 || i == 10) {
+ continue;
+ }
+ AscPutChipLramData(iop_base, *outbuf);
+ }
+}
+
+/*
+ * void
+ * DvcGetQinfo(PortAddr iop_base, ushort s_addr, ushort *inbuf, int words)
+ *
+ * Calling/Exit State:
+ * none
+ *
+ * Description:
+ * Input an ASC_QDONE_INFO structure from the chip
+ */
+void
+DvcGetQinfo(PortAddr iop_base, ushort s_addr, ushort *inbuf, int words)
+{
+ int i;
+
+ AscSetChipLramAddr(iop_base, s_addr);
+ for (i = 0; i < words; i++, inbuf++) {
+ if (i == 5) {
+ continue;
+ }
+ *inbuf = AscGetChipLramData(iop_base);
+ }
+ ASC_DBG_PRT_HEX(2, "DvcGetQinfo", (uchar *) inbuf, 2 * words);
+}
+
+/*
+ * void DvcOutPortWords(ushort iop_base, ushort &outbuf, int words)
+ *
+ * Calling/Exit State:
+ * none
+ *
+ * Description:
+ * output a buffer to an i/o port address
+ */
+void
+DvcOutPortWords(ushort iop_base, ushort *outbuf, int words)
+{
+ int i;
+
+ for (i = 0; i < words; i++, outbuf++)
+ outpw(iop_base, *outbuf);
+}
+
+/*
+ * void DvcInPortWords(ushort iop_base, ushort &outbuf, int words)
+ *
+ * Calling/Exit State:
+ * none
+ *
+ * Description:
+ * input a buffer from an i/o port address
+ */
+void
+DvcInPortWords(ushort iop_base, ushort *inbuf, int words)
+{
+ int i;
+
+ for (i = 0; i < words; i++, inbuf++)
+ *inbuf = inpw(iop_base);
+}
+
+
+/*
+ * void DvcOutPortDWords(PortAddr port, ulong *pdw, int dwords)
+ *
+ * Calling/Exit State:
+ * none
+ *
+ * Description:
+ * output a buffer of 32-bit integers to an i/o port address in
+ * 16 bit integer units
+ */
+void
+DvcOutPortDWords(PortAddr port, ulong *pdw, int dwords)
+{
+ int i;
+ int words;
+ ushort *pw;
+
+ pw = (ushort *) pdw;
+ words = dwords << 1;
+ for(i = 0; i < words; i++, pw++) {
+ outpw(port, *pw);
+ }
+ return;
+}
+
+
+/*
+ * --- Tracing and Debugging Functions
+ */
+
+#ifdef ADVANSYS_STATS
+
+#define ASC_PRT_STATS_NEXT() \
+ if (cp) { \
+ totlen += len; \
+ leftlen -= len; \
+ if (leftlen == 0) { \
+ return totlen; \
+ } \
+ cp += len; \
+ }
+
+/*
+ * asc_prt_stats()
+ *
+ * Note: no single line should be greater than 160 characters, cf.
+ * asc_prt_stats_line().
+ *
+ * Return the number of characters copied into 'cp'. No more than
+ * 'cplen' characters will be copied to 'cp'.
+ */
+STATIC int
+asc_prt_stats(char *cp, int cplen)
+{
+ struct asc_stats *s;
+ int leftlen;
+ int totlen;
+ int len;
+
+ s = &asc_stats;
+ leftlen = cplen;
+ totlen = len = 0;
+
+ len = asc_prt_stats_line(cp, leftlen,
+"\nAdvanSys SCSI Host Driver Statistics:\n");
+ ASC_PRT_STATS_NEXT();
+
+ len = asc_prt_stats_line(cp, leftlen,
+" command %lu, queuecommand %lu, abort %lu, reset %lu, biosparam %lu,\n",
+ s->command, s->queuecommand, s->abort, s->reset, s->biosparam);
+ ASC_PRT_STATS_NEXT();
+
+ len = asc_prt_stats_line(cp, leftlen,
+" interrupt %lu, callback %lu, cmd_disable %lu, intr_disable %lu,\n",
+ s->interrupt, s->callback, s->cmd_disable, s->intr_disable);
+ ASC_PRT_STATS_NEXT();
+
+ len = asc_prt_stats_line(cp, leftlen,
+" error %lu, enqueue %lu, dequeue %lu, rmqueue %lu,\n",
+ s->error, s->enqueue, s->dequeue, s->rmqueue);
+ ASC_PRT_STATS_NEXT();
+
+ if (s->cont_cnt > 0) {
+ len = asc_prt_stats_line(cp, leftlen,
+" cont_cnt %lu, cont_xfer %lu: avg_xfer=%lu kb\n",
+ s->cont_cnt, s->cont_xfer, (s->cont_xfer/2)/s->cont_cnt);
+ ASC_PRT_STATS_NEXT();
+ }
+
+ if (s->sg_cnt > 0) {
+ len = asc_prt_stats_line(cp, leftlen,
+" sg_cnt %lu, sg_elem %lu, sg_xfer %lu: avg_elem=%lu, avg_size=%lu kb\n",
+ s->sg_cnt, s->sg_elem, s->sg_xfer,
+ s->sg_elem/s->sg_cnt, (s->sg_xfer/2)/s->sg_cnt);
+ ASC_PRT_STATS_NEXT();
+ }
+
+ return totlen;
+}
+
+/*
+ * asc_prt_stats_line()
+ *
+ * If 'cp' is NULL print to the console, otherwise print to a buffer.
+ *
+ * Return 0 if printing to the console, otherwise return the number of
+ * bytes written to the buffer.
+ *
+ * Note: If any single line is greater than 160 bytes the stack
+ * will be corrupted. 's[]' is defined to be 160 bytes.
+ */
+int
+asc_prt_stats_line(char *buf, int buflen, char *fmt, ...)
+{
+ va_list args;
+ int ret;
+ char s[160]; /* 2 lines */
+
+ va_start(args, fmt);
+ ret = vsprintf(s, fmt, args);
+ if (buf == NULL) {
+ (void) printk(s);
+ ret = 0;
+ } else {
+ ret = min(buflen, ret);
+ memcpy(buf, s, ret);
+ }
+ va_end(args);
+ return ret;
+}
+#endif /* ADVANSYS_STATS */
+
+#ifdef ADVANSYS_DEBUG
+/*
+ * asc_prt_scsi_host()
+ */
+STATIC void
+asc_prt_scsi_host(struct Scsi_Host *s)
+{
+ printk("Scsi_Host at addr %x\n", (unsigned) s);
+ printk(
+" next %x, extra_bytes %u, host_busy %u, host_no %d, last_reset %d,\n",
+ (unsigned) s->next, s->extra_bytes, s->host_busy, s->host_no,
+ s->last_reset);
+
+ printk(
+" host_wait %x, host_queue %x, hostt %x, block %x,\n",
+ (unsigned) s->host_wait, (unsigned) s->host_queue,
+ (unsigned) s->hostt, (unsigned) s->block);
+
+ printk(
+" wish_block %d, base %x, io_port %d, n_io_port %d, irq %d, dma_channel %d,\n",
+ s->wish_block, (unsigned) s->base, s->io_port, s->n_io_port,
+ s->irq, s->dma_channel);
+
+ printk(
+" this_id %d, can_queue %d,\n", s->this_id, s->can_queue);
+
+ printk(
+" cmd_per_lun %d, sg_tablesize %d, unchecked_isa_dma %d, loaded_as_module %d\n",
+ s->cmd_per_lun, s->sg_tablesize, s->unchecked_isa_dma,
+ s->loaded_as_module);
+
+ printk("hostdata (struct asc_board)\n");
+ asc_prt_dvc_var(&ASC_BOARD(s)->board);
+ asc_prt_dvc_cfg(&ASC_BOARD(s)->cfg);
+ printk(" overrun_buf %x\n", (unsigned) &ASC_BOARD(s)->overrun_buf[0]);
+}
+
+/*
+ * asc_prt_dvc_var()
+ */
+STATIC void
+asc_prt_dvc_var(ASC_DVC_VAR *h)
+{
+ printk("ASC_DVC_VAR at addr %x\n", (unsigned) h);
+
+ printk(
+" iop_base %x, err_code %x, dvc_cntl %x, bug_fix_cntl %d,\n",
+ h->iop_base, h->err_code, h->dvc_cntl, h->bug_fix_cntl);
+
+ printk(
+" bus_type %d, isr_callback %x, exe_callback %x, init_sdtr %x,\n",
+ h->bus_type, (unsigned) h->isr_callback, (unsigned) h->exe_callback,
+ (unsigned) h->init_sdtr);
+
+ printk(
+" sdtr_done %x, use_tagged_qng %x, unit_not_ready %x, chip_no %x,\n",
+ (unsigned) h->sdtr_done, (unsigned) h->use_tagged_qng,
+ (unsigned) h->unit_not_ready, (unsigned) h->chip_no);
+
+ printk(
+" queue_full_or_busy %x, start_motor %x, scsi_reset_wait %x, irq_no %x,\n",
+ (unsigned) h->queue_full_or_busy, (unsigned) h->start_motor,
+ (unsigned) h->scsi_reset_wait, (unsigned) h->irq_no);
+
+ printk(
+" is_in_int %x, max_total_qng %x, cur_total_qng %x, in_critical_cnt %x,\n",
+ (unsigned) h->is_in_int, (unsigned) h->max_total_qng,
+ (unsigned) h->cur_total_qng, (unsigned) h->in_critical_cnt);
+
+ printk(
+" last_q_shortage %x, init_state %x, no_scam %x, pci_fix_asyn_xfer %x,\n",
+ (unsigned) h->last_q_shortage, (unsigned) h->init_state,
+ (unsigned) h->no_scam, (unsigned) h->pci_fix_asyn_xfer);
+
+ printk(
+" int_count %ld, req_count %ld, busy_count %ld, cfg %x, saved_ptr2func %x\n",
+ h->int_count, h->req_count, h->busy_count, (unsigned) h->cfg,
+ (unsigned) h->saved_ptr2func);
+}
+
+/*
+ * asc_prt_dvc_cfg()
+ */
+STATIC void
+asc_prt_dvc_cfg(ASC_DVC_CFG *h)
+{
+ printk("ASC_DVC_CFG at addr %x\n", (unsigned) h);
+
+ printk(
+" can_tagged_qng %x, cmd_qng_enabled %x, disc_enable %x, res %x,\n",
+ h->can_tagged_qng, h->cmd_qng_enabled, h->disc_enable, h->res);
+
+ printk(
+" chip_scsi_id %d, isa_dma_speed %d, isa_dma_channel %d, chip_version %d,\n",
+ h->chip_scsi_id, h->isa_dma_speed, h->isa_dma_channel,
+ h->chip_version);
+
+ printk(
+" pci_device_id %d, lib_serial_no %d, lib_version %d, mcode_date %d,\n",
+ h->pci_device_id, h->lib_serial_no, h->lib_version, h->mcode_date);
+
+ printk(
+" mcode_version %d, overrun_buf %x\n",
+ h->mcode_version, (unsigned) h->overrun_buf);
+}
+
+/*
+ * asc_prt_scsi_q()
+ */
+STATIC void
+asc_prt_scsi_q(ASC_SCSI_Q *q)
+{
+ ASC_SG_HEAD *sgp;
+ int i;
+
+ printk("ASC_SCSI_Q at addr %x\n", (unsigned) q);
+
+ printk(
+" target_ix %u, target_lun %u, srb_ptr %x, tag_code %u,\n",
+ q->q2.target_ix, q->q1.target_lun,
+ (unsigned) q->q2.srb_ptr, q->q2.tag_code);
+
+ printk(
+" data_addr %x, data_cnt %lu, sense_addr %x, sense_len %u,\n",
+ (unsigned) q->q1.data_addr, q->q1.data_cnt,
+ (unsigned) q->q1.sense_addr, q->q1.sense_len);
+
+ printk(
+" cdbptr %x, cdb_len %u, sg_head %x, sg_queue_cnt %u\n",
+ (unsigned) q->cdbptr, q->q2.cdb_len,
+ (unsigned) q->sg_head, q->q1.sg_queue_cnt);
+
+ if (q->sg_head) {
+ sgp = q->sg_head;
+ printk("ASC_SG_HEAD at addr %x\n", (unsigned) sgp);
+ printk(" entry_cnt %u, queue_cnt %u\n", sgp->entry_cnt, sgp->queue_cnt);
+ for (i = 0; i < sgp->entry_cnt; i++) {
+ printk(" [%u]: addr %x, bytes %lu\n",
+ i, (unsigned) sgp->sg_list[i].addr, sgp->sg_list[i].bytes);
+ }
+
+ }
+}
+
+/*
+ * asc_prt_qdone_info()
+ */
+STATIC void
+asc_prt_qdone_info(ASC_QDONE_INFO *q)
+{
+ printk("ASC_QDONE_INFO at addr %x\n", (unsigned) q);
+ printk(
+" srb_ptr %x, target_ix %u, cdb_len %u, tag_code %u, done_stat %x\n",
+ (unsigned) q->d2.srb_ptr, q->d2.target_ix, q->d2.cdb_len,
+ q->d2.tag_code, q->d3.done_stat);
+ printk(
+" host_stat %x, scsi_stat %x, scsi_msg %x\n",
+ q->d3.host_stat, q->d3.scsi_stat, q->d3.scsi_msg);
+}
+
+/*
+ * asc_prt_hex()
+ *
+ * Print hexadecimal output in 4 byte groupings 32 bytes
+ * or 8 double-words per line.
+ */
+STATIC void
+asc_prt_hex(char *f, uchar *s, int l)
+{
+ int i;
+ int j;
+ int k;
+ int m;
+
+ printk("%s: (%d bytes)\n", f, l);
+
+ for (i = 0; i < l; i += 32) {
+
+ /* Display a maximum of 8 double-words per line. */
+ if ((k = (l - i) / 4) >= 8) {
+ k = 8;
+ m = 0;
+ } else {
+ m = (l - i) % 4 ;
+ }
+
+ for (j = 0; j < k; j++) {
+ printk(" %2.2X%2.2X%2.2X%2.2X",
+ (unsigned) s[i+(j*4)], (unsigned) s[i+(j*4)+1],
+ (unsigned) s[i+(j*4)+2], (unsigned) s[i+(j*4)+3]);
+ }
+
+ switch (m) {
+ case 0:
+ default:
+ break;
+ case 1:
+ printk(" %2.2X",
+ (unsigned) s[i+(j*4)+4]);
+ break;
+ case 2:
+ printk(" %2.2X%2.2X",
+ (unsigned) s[i+(j*4)+4],
+ (unsigned) s[i+(j*4)+5]);
+ break;
+ case 3:
+ printk(" %2.2X%2.2X%2.2X",
+ (unsigned) s[i+(j*4)+4],
+ (unsigned) s[i+(j*4)+5],
+ (unsigned) s[i+(j*4)+6]);
+ break;
+ }
+
+ printk("\n");
+ }
+}
+
+/*
+ * interrupts_enabled()
+ *
+ * Return 1 if interrupts are enabled, otherwise return 0.
+ */
+STATIC int
+interrupts_enabled(void)
+{
+ int flags;
+
+ save_flags(flags);
+ if (flags & 0x0200) {
+ return ASC_TRUE;
+ } else {
+ return ASC_FALSE;
+ }
+}
+
+#endif /* ADVANSYS_DEBUG */
+
+
+/*
+ * --- Asc Library Functions
+ */
+
+ushort
+AscGetEisaChipCfg(
+ PortAddr iop_base
+)
+{
+ PortAddr eisa_cfg_iop;
+
+ eisa_cfg_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) |
+ (PortAddr) (ASC_EISA_CFG_IOP_MASK);
+ return (inpw(eisa_cfg_iop));
+}
+
+uchar
+AscSetChipScsiID(
+ PortAddr iop_base,
+ uchar new_host_id
+)
+{
+ ushort cfg_lsw;
+
+ if (AscGetChipScsiID(iop_base) == new_host_id) {
+ return (new_host_id);
+ }
+ cfg_lsw = AscGetChipCfgLsw(iop_base);
+ cfg_lsw &= 0xF8FF;
+ cfg_lsw |= (ushort) ((new_host_id & ASC_MAX_TID) << 8);
+ AscSetChipCfgLsw(iop_base, cfg_lsw);
+ return (AscGetChipScsiID(iop_base));
+}
+
+ushort
+AscGetChipBiosAddress(
+ PortAddr iop_base,
+ ushort bus_type
+)
+{
+ ushort cfg_lsw;
+ ushort bios_addr;
+
+ if ((bus_type & ASC_IS_EISA) != 0) {
+ cfg_lsw = AscGetEisaChipCfg(iop_base);
+ cfg_lsw &= 0x000F;
+ bios_addr = (ushort) (ASC_BIOS_MIN_ADDR +
+ (cfg_lsw * ASC_BIOS_BANK_SIZE));
+ return (bios_addr);
+ }
+ cfg_lsw = AscGetChipCfgLsw(iop_base);
+ bios_addr = (ushort) (((cfg_lsw >> 12) * ASC_BIOS_BANK_SIZE) + ASC_BIOS_MIN_ADDR);
+ return (bios_addr);
+}
+
+uchar
+AscGetChipVersion(
+ PortAddr iop_base,
+ ushort bus_type
+)
+{
+ if ((bus_type & ASC_IS_EISA) != 0) {
+
+ PortAddr eisa_iop;
+ uchar revision;
+
+ eisa_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) |
+ (PortAddr) ASC_EISA_REV_IOP_MASK;
+ revision = inp(eisa_iop);
+ return ((uchar) ((ASC_CHIP_MIN_VER_EISA - 1) + revision));
+ }
+ return (AscGetChipVerNo(iop_base));
+}
+
+ushort
+AscGetChipBusType(
+ PortAddr iop_base
+)
+{
+ ushort chip_ver;
+
+ chip_ver = AscGetChipVerNo(iop_base);
+ if ((chip_ver >= ASC_CHIP_MIN_VER_VL) &&
+ (chip_ver <= ASC_CHIP_MAX_VER_VL)) {
+ if (((iop_base & 0x0C30) == 0x0C30) ||
+ ((iop_base & 0x0C50) == 0x0C50)) {
+ return (ASC_IS_EISA);
+ }
+ return (ASC_IS_VL);
+ } else if ((chip_ver >= ASC_CHIP_MIN_VER_ISA) &&
+ (chip_ver <= ASC_CHIP_MAX_VER_ISA)) {
+ if (chip_ver >= ASC_CHIP_MIN_VER_ISA_PNP) {
+ return (ASC_IS_ISAPNP);
+ }
+ return (ASC_IS_ISA);
+ } else if ((chip_ver >= ASC_CHIP_MIN_VER_PCI) &&
+ (chip_ver <= ASC_CHIP_MAX_VER_PCI)) {
+ return (ASC_IS_PCI);
+ } else {
+ return (0);
+ }
+}
+
+void
+AscEnableIsaDma(
+ uchar dma_channel
+)
+{
+ if (dma_channel < 4) {
+ outp(0x000B, (ushort) (0xC0 | dma_channel));
+ outp(0x000A, dma_channel);
+ } else if (dma_channel < 8) {
+
+ outp(0x00D6, (ushort) (0xC0 | (dma_channel - 4)));
+ outp(0x00D4, (ushort) (dma_channel - 4));
+ }
+ return;
+}
+
+ulong
+AscLoadMicroCode(
+ PortAddr iop_base,
+ ushort s_addr,
+ ushort dosfar * mcode_buf,
+ ushort mcode_size
+)
+{
+ ulong chksum;
+ ushort mcode_word_size;
+ ushort mcode_chksum;
+
+ mcode_word_size = (ushort) (mcode_size >> 1);
+ AscMemWordSetLram(iop_base, s_addr, 0, mcode_word_size);
+ AscMemWordCopyToLram(iop_base, s_addr, mcode_buf, mcode_word_size);
+
+ chksum = AscMemSumLramWord(iop_base, s_addr, mcode_word_size);
+ mcode_chksum = (ushort) AscMemSumLramWord(iop_base,
+ (ushort) ASC_CODE_SEC_BEG,
+ (ushort) ((mcode_size - s_addr - (ushort) ASC_CODE_SEC_BEG) / 2));
+ AscWriteLramWord(iop_base, ASCV_MCODE_CHKSUM_W, mcode_chksum);
+ AscWriteLramWord(iop_base, ASCV_MCODE_SIZE_W, mcode_size);
+ return (chksum);
+}
+
+uchar _hextbl_[16] =
+{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'A', 'B', 'C', 'D', 'E', 'F'};
+
+uchar _isa_pnp_inited = 0;
+
+PortAddr _asc_def_iop_base[ASC_IOADR_TABLE_MAX_IX] =
+{
+ 0x100, ASC_IOADR_1, 0x120, ASC_IOADR_2, 0x140, ASC_IOADR_3, ASC_IOADR_4,
+ ASC_IOADR_5, ASC_IOADR_6, ASC_IOADR_7, ASC_IOADR_8
+};
+
+PortAddr
+AscSearchIOPortAddr(
+ PortAddr iop_beg,
+ ushort bus_type
+)
+{
+ if (bus_type & ASC_IS_VL) {
+ while ((iop_beg = AscSearchIOPortAddr11(iop_beg)) != 0) {
+ if (AscGetChipVersion(iop_beg, bus_type) <= ASC_CHIP_MAX_VER_VL) {
+ return (iop_beg);
+ }
+ }
+ return (0);
+ }
+ if (bus_type & ASC_IS_ISA) {
+ if (_isa_pnp_inited == 0) {
+ AscSetISAPNPWaitForKey();
+ _isa_pnp_inited++;
+ }
+ while ((iop_beg = AscSearchIOPortAddr11(iop_beg)) != 0) {
+ if ((AscGetChipVersion(iop_beg, bus_type) & ASC_CHIP_VER_ISA_BIT) != 0) {
+ return (iop_beg);
+ }
+ }
+ return (0);
+ }
+ if (bus_type & ASC_IS_EISA) {
+ if ((iop_beg = AscSearchIOPortAddrEISA(iop_beg)) != 0) {
+ return (iop_beg);
+ }
+ return (0);
+ }
+ return (0);
+}
+
+PortAddr
+AscSearchIOPortAddr11(
+ PortAddr s_addr
+)
+{
+
+ int i;
+ PortAddr iop_base;
+
+ for (i = 0; i < ASC_IOADR_TABLE_MAX_IX; i++) {
+ if (_asc_def_iop_base[i] > s_addr) {
+ break;
+ }
+ }
+ for (; i < ASC_IOADR_TABLE_MAX_IX; i++) {
+ iop_base = _asc_def_iop_base[i];
+ if (AscFindSignature(iop_base)) {
+ return (iop_base);
+ }
+ }
+ return (0);
+}
+
+int
+AscFindSignature(
+ PortAddr iop_base
+)
+{
+ ushort sig_word;
+
+ if ((inp((PortAddr) (iop_base + 1)) & 0xFF) == (uchar) ASC_1000_ID1B) {
+ sig_word = inpw(iop_base);
+ if ((sig_word == (ushort) ASC_1000_ID0W) ||
+ (sig_word == (ushort) ASC_1000_ID0W_FIX)) {
+ return (1);
+ }
+ }
+ return (0);
+}
+
+void
+AscToggleIRQAct(
+ PortAddr iop_base
+)
+{
+ AscSetChipStatus(iop_base, CIW_IRQ_ACT);
+ AscSetChipStatus(iop_base, 0);
+ return;
+}
+
+#if CC_INIT_INQ_DISPLAY
+
+#endif
+
+void
+AscSetISAPNPWaitForKey(
+ void)
+{
+
+ outp(ASC_ISA_PNP_PORT_ADDR, 0x02);
+ outp(ASC_ISA_PNP_PORT_WRITE, 0x02);
+ return;
+}
+
+uchar
+AscGetChipIRQ(
+ PortAddr iop_base,
+ ushort bus_type
+)
+{
+ ushort cfg_lsw;
+ uchar chip_irq;
+
+ if ((bus_type & ASC_IS_EISA) != 0) {
+
+ cfg_lsw = AscGetEisaChipCfg(iop_base);
+ chip_irq = (uchar) (((cfg_lsw >> 8) & 0x07) + 10);
+ if ((chip_irq == 13) || (chip_irq > 15)) {
+
+ return (0);
+ }
+ return (chip_irq);
+ } else {
+
+ cfg_lsw = AscGetChipCfgLsw(iop_base);
+
+ if ((bus_type & ASC_IS_VL) != 0) {
+
+ chip_irq = (uchar) (((cfg_lsw >> 2) & 0x07));
+ if ((chip_irq == 0) ||
+ (chip_irq == 4) ||
+ (chip_irq == 7)) {
+ return (0);
+ }
+ return ((uchar) (chip_irq + (ASC_MIN_IRQ_NO - 1)));
+ }
+ chip_irq = (uchar) (((cfg_lsw >> 2) & 0x03));
+ if (chip_irq == 3)
+ chip_irq += (uchar) 2;
+ return ((uchar) (chip_irq + ASC_MIN_IRQ_NO));
+ }
+}
+
+uchar
+AscSetChipIRQ(
+ PortAddr iop_base,
+ uchar irq_no,
+ ushort bus_type
+)
+{
+ ushort cfg_lsw;
+
+ if ((bus_type & ASC_IS_VL) != 0) {
+
+ if (irq_no != 0) {
+ if ((irq_no < ASC_MIN_IRQ_NO) || (irq_no > ASC_MAX_IRQ_NO)) {
+ irq_no = 0;
+ } else {
+ irq_no -= (uchar) ((ASC_MIN_IRQ_NO - 1));
+ }
+ }
+ cfg_lsw = (ushort) (AscGetChipCfgLsw(iop_base) & 0xFFE3);
+ cfg_lsw |= (ushort) 0x0010;
+ AscSetChipCfgLsw(iop_base, cfg_lsw);
+ AscToggleIRQAct(iop_base);
+
+ cfg_lsw = (ushort) (AscGetChipCfgLsw(iop_base) & 0xFFE0);
+ cfg_lsw |= (ushort) ((irq_no & 0x07) << 2);
+ AscSetChipCfgLsw(iop_base, cfg_lsw);
+ AscToggleIRQAct(iop_base);
+
+ return (AscGetChipIRQ(iop_base, bus_type));
+
+ } else if ((bus_type & (ASC_IS_ISA)) != 0) {
+
+ if (irq_no == 15)
+ irq_no -= (uchar) 2;
+ irq_no -= (uchar) ASC_MIN_IRQ_NO;
+ cfg_lsw = (ushort) (AscGetChipCfgLsw(iop_base) & 0xFFF3);
+ cfg_lsw |= (ushort) ((irq_no & 0x03) << 2);
+ AscSetChipCfgLsw(iop_base, cfg_lsw);
+ return (AscGetChipIRQ(iop_base, bus_type));
+ } else {
+
+ return (0);
+ }
+}
+
+uchar
+AscGetChipScsiCtrl(
+ PortAddr iop_base
+)
+{
+ uchar sc;
+
+ AscSetBank(iop_base, 1);
+ sc = inp(iop_base + IOP_REG_SC);
+ AscSetBank(iop_base, 0);
+ return (sc);
+}
+
+extern uchar _sdtr_period_tbl_[];
+
+int
+AscIsrChipHalted(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ SDTR_XMSG sdtr_xmsg;
+ SDTR_XMSG out_msg;
+ ushort halt_q_addr;
+ int sdtr_accept;
+ ushort int_halt_code;
+ ASC_SCSI_BIT_ID_TYPE scsi_busy;
+ ASC_SCSI_BIT_ID_TYPE target_id;
+ PortAddr iop_base;
+ uchar tag_code;
+ uchar q_status;
+ uchar halt_qp;
+ uchar sdtr_data;
+ uchar target_ix;
+ uchar q_cntl, tid_no;
+ uchar cur_dvc_qng;
+ uchar asyn_sdtr;
+ uchar scsi_status;
+
+ iop_base = asc_dvc->iop_base;
+ int_halt_code = AscReadLramWord(iop_base, ASCV_HALTCODE_W);
+
+ halt_qp = AscReadLramByte(iop_base, ASCV_CURCDB_B);
+ halt_q_addr = ASC_QNO_TO_QADDR(halt_qp);
+ target_ix = AscReadLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_TARGET_IX));
+ q_cntl = AscReadLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_CNTL));
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ target_id = (uchar) ASC_TID_TO_TARGET_ID(tid_no);
+ if (asc_dvc->pci_fix_asyn_xfer & target_id) {
+
+ asyn_sdtr = ASYN_SDTR_DATA_FIX_PCI_REV_AB;
+ } else {
+ asyn_sdtr = 0;
+ }
+ if (int_halt_code == ASC_HALT_EXTMSG_IN) {
+
+ AscMemWordCopyFromLram(iop_base,
+ ASCV_MSGIN_BEG,
+ (ushort dosfar *) & sdtr_xmsg,
+ (ushort) (sizeof (SDTR_XMSG) >> 1));
+ if ((sdtr_xmsg.msg_type == MS_EXTEND) &&
+ (sdtr_xmsg.msg_len == MS_SDTR_LEN)) {
+ sdtr_accept = TRUE;
+ if (sdtr_xmsg.msg_req == MS_SDTR_CODE) {
+ if (sdtr_xmsg.req_ack_offset > ASC_SYN_MAX_OFFSET) {
+
+ sdtr_accept = FALSE;
+ sdtr_xmsg.req_ack_offset = ASC_SYN_MAX_OFFSET;
+ }
+ sdtr_data = AscCalSDTRData(sdtr_xmsg.xfer_period,
+ sdtr_xmsg.req_ack_offset);
+ if (sdtr_xmsg.req_ack_offset == 0) {
+
+ q_cntl &= ~QC_MSG_OUT;
+ asc_dvc->init_sdtr &= ~target_id;
+ asc_dvc->sdtr_done &= ~target_id;
+ AscSetChipSDTR(iop_base, asyn_sdtr, tid_no);
+ } else if ((sdtr_data == 0xFF)) {
+
+ q_cntl |= QC_MSG_OUT;
+ asc_dvc->init_sdtr &= ~target_id;
+ asc_dvc->sdtr_done &= ~target_id;
+ AscSetChipSDTR(iop_base, asyn_sdtr, tid_no);
+ } else {
+ if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
+
+ q_cntl &= ~QC_MSG_OUT;
+ asc_dvc->sdtr_done |= target_id;
+ asc_dvc->init_sdtr |= target_id;
+ asc_dvc->pci_fix_asyn_xfer &= ~target_id;
+ AscSetChipSDTR(iop_base, sdtr_data, tid_no);
+ } else {
+
+ q_cntl |= QC_MSG_OUT;
+
+ AscMsgOutSDTR(iop_base,
+ sdtr_xmsg.xfer_period,
+ sdtr_xmsg.req_ack_offset);
+ asc_dvc->pci_fix_asyn_xfer &= ~target_id;
+ AscSetChipSDTR(iop_base, sdtr_data, tid_no);
+ asc_dvc->sdtr_done |= target_id;
+ asc_dvc->init_sdtr |= target_id;
+ }
+ }
+
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_CNTL),
+ q_cntl);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ }
+ }
+ } else if (int_halt_code == ASC_HALT_CHK_CONDITION) {
+
+ q_cntl |= QC_REQ_SENSE;
+ if (((asc_dvc->init_sdtr & target_id) != 0) &&
+ ((asc_dvc->sdtr_done & target_id) != 0)) {
+
+ sdtr_data = AscReadLramByte(iop_base,
+ (ushort) ((ushort) ASCV_SDTR_DATA_BEG + (ushort) tid_no));
+ AscMsgOutSDTR(iop_base,
+ _sdtr_period_tbl_[(sdtr_data >> 4) & (uchar) (ASC_SYN_XFER_NO - 1)],
+ (uchar) (sdtr_data & (uchar) ASC_SYN_MAX_OFFSET));
+ q_cntl |= QC_MSG_OUT;
+ }
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_CNTL),
+ q_cntl);
+
+ tag_code = AscReadLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_TAG_CODE));
+ tag_code &= 0xDC;
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_TAG_CODE),
+ tag_code);
+
+ q_status = AscReadLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_STATUS));
+ q_status |= (QS_READY | QS_BUSY);
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ q_status);
+
+ scsi_busy = AscReadLramByte(iop_base,
+ (ushort) ASCV_SCSIBUSY_B);
+ scsi_busy &= ~target_id;
+ AscWriteLramByte(iop_base, (ushort) ASCV_SCSIBUSY_B, scsi_busy);
+
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else if (int_halt_code == ASC_HALT_SDTR_REJECTED) {
+
+ AscMemWordCopyFromLram(iop_base,
+ ASCV_MSGOUT_BEG,
+ (ushort dosfar *) & out_msg,
+ (ushort) (sizeof (SDTR_XMSG) >> 1));
+
+ if ((out_msg.msg_type == MS_EXTEND) &&
+ (out_msg.msg_len == MS_SDTR_LEN) &&
+ (out_msg.msg_req == MS_SDTR_CODE)) {
+
+ asc_dvc->init_sdtr &= ~target_id;
+ asc_dvc->sdtr_done &= ~target_id;
+ AscSetChipSDTR(iop_base, asyn_sdtr, tid_no);
+
+ } else {
+
+ }
+
+ q_cntl &= ~QC_MSG_OUT;
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_CNTL),
+ q_cntl);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else if (int_halt_code == ASC_HALT_SS_QUEUE_FULL) {
+
+ scsi_status = AscReadLramByte(iop_base,
+ (ushort) ((ushort) halt_q_addr + (ushort) ASC_SCSIQ_SCSI_STATUS));
+ cur_dvc_qng = AscReadLramByte(iop_base,
+ (ushort) ((ushort) ASC_QADR_BEG + (ushort) target_ix));
+ if ((cur_dvc_qng > 0) &&
+ (asc_dvc->cur_dvc_qng[tid_no] > 0)) {
+
+ scsi_busy = AscReadLramByte(iop_base,
+ (ushort) ASCV_SCSIBUSY_B);
+ scsi_busy |= target_id;
+ AscWriteLramByte(iop_base,
+ (ushort) ASCV_SCSIBUSY_B, scsi_busy);
+ asc_dvc->queue_full_or_busy |= target_id;
+
+ if (scsi_status == SS_QUEUE_FULL) {
+ if (cur_dvc_qng > ASC_MIN_TAGGED_CMD) {
+ cur_dvc_qng -= 1;
+ asc_dvc->max_dvc_qng[tid_no] = cur_dvc_qng;
+
+ AscWriteLramByte(iop_base,
+ (ushort) ((ushort) ASCV_MAX_DVC_QNG_BEG + (ushort) tid_no),
+ cur_dvc_qng);
+ }
+ }
+ }
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ }
+ return (0);
+}
+
+uchar
+_AscCopyLramScsiDoneQ(
+ PortAddr iop_base,
+ ushort q_addr,
+ ASC_QDONE_INFO dosfar * scsiq,
+ ulong max_dma_count
+)
+{
+ ushort _val;
+ uchar sg_queue_cnt;
+
+ DvcGetQinfo(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_DONE_INFO_BEG),
+ (ushort dosfar *) scsiq,
+ (ushort) ((sizeof (ASC_SCSIQ_2) + sizeof (ASC_SCSIQ_3)) / 2));
+
+#if !CC_LITTLE_ENDIAN_HOST
+ AscAdjEndianQDoneInfo(scsiq);
+#endif
+
+ _val = AscReadLramWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_STATUS));
+ scsiq->q_status = (uchar) _val;
+ scsiq->q_no = (uchar) (_val >> 8);
+
+ _val = AscReadLramWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_CNTL));
+ scsiq->cntl = (uchar) _val;
+ sg_queue_cnt = (uchar) (_val >> 8);
+
+ _val = AscReadLramWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_SENSE_LEN));
+ scsiq->sense_len = (uchar) _val;
+ scsiq->user_def = (uchar) (_val >> 8);
+
+ scsiq->remain_bytes = AscReadLramDWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_DW_REMAIN_XFER_CNT));
+ scsiq->remain_bytes &= max_dma_count;
+
+ return (sg_queue_cnt);
+}
+
+int
+AscIsrQDone(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ uchar next_qp;
+ uchar i;
+ uchar n_q_used;
+ uchar sg_list_qp;
+ uchar sg_queue_cnt;
+ uchar done_q_tail;
+
+ uchar tid_no;
+ ASC_SCSI_BIT_ID_TYPE scsi_busy;
+ ASC_SCSI_BIT_ID_TYPE target_id;
+ PortAddr iop_base;
+ ushort q_addr;
+ ushort sg_q_addr;
+ uchar cur_target_qng;
+ ASC_QDONE_INFO scsiq_buf;
+ ASC_QDONE_INFO dosfar *scsiq;
+ int false_overrun;
+ ASC_ISR_CALLBACK asc_isr_callback;
+
+ uchar tag_code;
+
+#if CC_LINK_BUSY_Q
+ ushort n_busy_q_done;
+
+#endif
+
+ iop_base = asc_dvc->iop_base;
+ asc_isr_callback = (ASC_ISR_CALLBACK) asc_dvc->isr_callback;
+
+ n_q_used = 1;
+ scsiq = (ASC_QDONE_INFO dosfar *) & scsiq_buf;
+ done_q_tail = (uchar) AscGetVarDoneQTail(iop_base);
+ q_addr = ASC_QNO_TO_QADDR(done_q_tail);
+ next_qp = AscReadLramByte(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_FWD));
+ if (next_qp != ASC_QLINK_END) {
+
+ AscPutVarDoneQTail(iop_base, next_qp);
+ q_addr = ASC_QNO_TO_QADDR(next_qp);
+
+ sg_queue_cnt = _AscCopyLramScsiDoneQ(iop_base, q_addr, scsiq, asc_dvc->max_dma_count);
+
+ AscWriteLramByte(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ (uchar) (scsiq->q_status & (uchar) ~ (QS_READY | QS_ABORTED)));
+ tid_no = ASC_TIX_TO_TID(scsiq->d2.target_ix);
+ target_id = ASC_TIX_TO_TARGET_ID(scsiq->d2.target_ix);
+ if ((scsiq->cntl & QC_SG_HEAD) != 0) {
+ sg_q_addr = q_addr;
+ sg_list_qp = next_qp;
+ for (i = 0; i < sg_queue_cnt; i++) {
+ sg_list_qp = AscReadLramByte(iop_base,
+ (ushort) (sg_q_addr + (ushort) ASC_SCSIQ_B_FWD));
+ sg_q_addr = ASC_QNO_TO_QADDR(sg_list_qp);
+ if (sg_list_qp == ASC_QLINK_END) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_SG_Q_LINKS);
+ scsiq->d3.done_stat = QD_WITH_ERROR;
+ scsiq->d3.host_stat = QHSTA_D_QDONE_SG_LIST_CORRUPTED;
+ goto FATAL_ERR_QDONE;
+ }
+ AscWriteLramByte(iop_base,
+ (ushort) (sg_q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ QS_FREE);
+ }
+
+ n_q_used = sg_queue_cnt + 1;
+ AscPutVarDoneQTail(iop_base, sg_list_qp);
+ }
+ if (asc_dvc->queue_full_or_busy & target_id) {
+
+ cur_target_qng = AscReadLramByte(iop_base,
+ (ushort) ((ushort) ASC_QADR_BEG + (ushort) scsiq->d2.target_ix));
+ if (cur_target_qng < asc_dvc->max_dvc_qng[tid_no]) {
+ scsi_busy = AscReadLramByte(iop_base,
+ (ushort) ASCV_SCSIBUSY_B);
+ scsi_busy &= ~target_id;
+ AscWriteLramByte(iop_base,
+ (ushort) ASCV_SCSIBUSY_B, scsi_busy);
+ asc_dvc->queue_full_or_busy &= ~target_id;
+ }
+ }
+ if (asc_dvc->cur_total_qng >= n_q_used) {
+ asc_dvc->cur_total_qng -= n_q_used;
+ if (asc_dvc->cur_dvc_qng[tid_no] != 0) {
+ asc_dvc->cur_dvc_qng[tid_no]--;
+ }
+ } else {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CUR_QNG);
+ scsiq->d3.done_stat = QD_WITH_ERROR;
+ goto FATAL_ERR_QDONE;
+ }
+
+ if ((scsiq->d2.srb_ptr == 0UL) ||
+ ((scsiq->q_status & QS_ABORTED) != 0)) {
+
+ return (0x11);
+ } else if (scsiq->q_status == QS_DONE) {
+
+ false_overrun = FALSE;
+
+ if (asc_dvc->bug_fix_cntl) {
+ if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_ADD_ONE_BYTE) {
+ tag_code = AscReadLramByte(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_TAG_CODE));
+ if (tag_code & ASC_TAG_FLAG_ADD_ONE_BYTE) {
+ if (scsiq->remain_bytes != 0UL) {
+ scsiq->remain_bytes--;
+ if (scsiq->remain_bytes == 0UL) {
+ false_overrun = TRUE;
+ }
+ }
+ }
+ }
+ }
+ if ((scsiq->d3.done_stat == QD_WITH_ERROR) &&
+ (scsiq->d3.host_stat == QHSTA_M_DATA_OVER_RUN)) {
+ if ((scsiq->cntl & (QC_DATA_IN | QC_DATA_OUT)) == 0) {
+ scsiq->d3.done_stat = QD_NO_ERROR;
+ scsiq->d3.host_stat = QHSTA_NO_ERROR;
+ } else if (false_overrun) {
+ scsiq->d3.done_stat = QD_NO_ERROR;
+ scsiq->d3.host_stat = QHSTA_NO_ERROR;
+ }
+ }
+#if CC_CLEAR_LRAM_SRB_PTR
+ AscWriteLramDWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_D_SRBPTR),
+ asc_dvc->int_count);
+#endif
+
+ if ((scsiq->cntl & QC_NO_CALLBACK) == 0) {
+ (*asc_isr_callback) (asc_dvc, scsiq);
+ } else {
+ if ((AscReadLramByte(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_CDB_BEG)) ==
+ SCSICMD_StartStopUnit)) {
+
+ asc_dvc->unit_not_ready &= ~target_id;
+ if (scsiq->d3.done_stat != QD_NO_ERROR) {
+ asc_dvc->start_motor &= ~target_id;
+ }
+ }
+ }
+
+#if CC_LINK_BUSY_Q
+ n_busy_q_done = AscIsrExeBusyQueue(asc_dvc, tid_no);
+ if (n_busy_q_done == 0) {
+
+ i = tid_no + 1;
+ while (TRUE) {
+ if (i > ASC_MAX_TID)
+ i = 0;
+ if (i == tid_no)
+ break;
+ n_busy_q_done = AscIsrExeBusyQueue(asc_dvc, i);
+ if (n_busy_q_done != 0)
+ break;
+ i++;
+ }
+ }
+ if (n_busy_q_done == 0xFFFF)
+ return (0x80);
+#endif
+
+ return (1);
+ } else {
+
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_Q_STATUS);
+
+ FATAL_ERR_QDONE:
+ if ((scsiq->cntl & QC_NO_CALLBACK) == 0) {
+ (*asc_isr_callback) (asc_dvc, scsiq);
+ }
+ return (0x80);
+ }
+ }
+ return (0);
+}
+
+int
+AscISR(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ ASC_CS_TYPE chipstat;
+ PortAddr iop_base;
+ ushort saved_ram_addr;
+ uchar ctrl_reg;
+ uchar saved_ctrl_reg;
+ int int_pending;
+ int status;
+ uchar host_flag;
+
+ iop_base = asc_dvc->iop_base;
+ int_pending = FALSE;
+
+ asc_dvc->int_count++;
+
+ if (((asc_dvc->init_state & ASC_INIT_STATE_END_LOAD_MC) == 0) ||
+ (asc_dvc->isr_callback == 0)) {
+
+ return (ERR);
+ }
+ if (asc_dvc->in_critical_cnt != 0) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_ON_CRITICAL);
+ return (ERR);
+ }
+ if (asc_dvc->is_in_int) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_RE_ENTRY);
+ asc_dvc->busy_count++;
+ return (ERR);
+ }
+ asc_dvc->is_in_int = TRUE;
+ ctrl_reg = AscGetChipControl(iop_base);
+ saved_ctrl_reg = ctrl_reg & (~(CC_SCSI_RESET | CC_CHIP_RESET |
+ CC_SINGLE_STEP | CC_DIAG | CC_TEST));
+
+ if ((chipstat = AscGetChipStatus(iop_base)) & CSW_INT_PENDING) {
+ int_pending = TRUE;
+ AscAckInterrupt(iop_base);
+
+ host_flag = AscReadLramByte(iop_base, ASCV_HOST_FLAG_B);
+ AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B,
+ (uchar) (host_flag | (uchar) ASC_HOST_FLAG_IN_ISR));
+ saved_ram_addr = AscGetChipLramAddr(iop_base);
+
+ if ((chipstat & CSW_HALTED) &&
+ (ctrl_reg & CC_SINGLE_STEP)) {
+ if (AscIsrChipHalted(asc_dvc) == ERR) {
+
+ goto ISR_REPORT_QDONE_FATAL_ERROR;
+
+ } else {
+ saved_ctrl_reg &= ~CC_HALT;
+ }
+ } else {
+ ISR_REPORT_QDONE_FATAL_ERROR:
+ if ((asc_dvc->dvc_cntl & ASC_CNTL_INT_MULTI_Q) != 0) {
+ while (((status = AscIsrQDone(asc_dvc)) & 0x01) != 0) {
+
+ }
+ } else {
+ do {
+ if ((status = AscIsrQDone(asc_dvc)) == 1) {
+
+ break;
+ }
+ } while (status == 0x11);
+ }
+ if ((status & 0x80) != 0)
+ int_pending = ERR;
+ }
+ AscSetChipLramAddr(iop_base, saved_ram_addr);
+ if (AscGetChipLramAddr(iop_base) != saved_ram_addr) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_SET_LRAM_ADDR);
+ }
+ AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag);
+ }
+ AscSetChipControl(iop_base, saved_ctrl_reg);
+ asc_dvc->is_in_int = FALSE;
+ return (int_pending);
+}
+
+int
+AscScsiSetupCmdQ(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_REQ_Q dosfar * scsiq,
+ uchar dosfar * buf_addr,
+ ulong buf_len
+)
+{
+ ulong phy_addr;
+
+ scsiq->r1.cntl = 0;
+ scsiq->r1.sg_queue_cnt = 0;
+ scsiq->r1.q_no = 0;
+ scsiq->r1.user_def = 0;
+ scsiq->cdbptr = (uchar dosfar *) scsiq->cdb;
+ scsiq->r3.scsi_stat = 0;
+ scsiq->r3.scsi_msg = 0;
+ scsiq->r3.host_stat = 0;
+ scsiq->r3.done_stat = 0;
+ scsiq->r2.vm_id = 0;
+ scsiq->r1.data_cnt = buf_len;
+
+ scsiq->r2.tag_code = (uchar) M2_QTAG_MSG_SIMPLE;
+ scsiq->r2.flag = (uchar) ASC_FLAG_SCSIQ_REQ;
+ scsiq->r2.srb_ptr = (ulong) scsiq;
+ scsiq->r1.status = (uchar) QS_READY;
+ scsiq->r1.data_addr = 0L;
+
+ if (buf_len != 0L) {
+ if ((phy_addr = AscGetOnePhyAddr(asc_dvc,
+ (uchar dosfar *) buf_addr, scsiq->r1.data_cnt)) == 0L) {
+ return (ERR);
+ }
+ scsiq->r1.data_addr = phy_addr;
+ }
+ return (0);
+}
+
+uchar _mcode_buf[] =
+{
+ 0x01, 0x03, 0x01, 0x19, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xDD, 0x0A, 0x01, 0x05, 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xFF, 0x80, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x23, 0x00, 0x16, 0x00, 0x00, 0x00, 0x07, 0x00, 0xFF, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA4, 0x88, 0x00, 0x00, 0x00, 0x00,
+ 0x80, 0x73, 0x48, 0x04, 0x36, 0x00, 0x00, 0xA2, 0xC8, 0x00, 0x80, 0x73, 0x03, 0x23, 0x36, 0x40,
+ 0xB6, 0x00, 0x36, 0x00, 0x06, 0xD6, 0x0D, 0xD2, 0x15, 0xDE, 0x12, 0xDA, 0x00, 0xA2, 0xC8, 0x00,
+ 0x92, 0x80, 0xE0, 0x97, 0x50, 0x00, 0xF5, 0x00, 0x0A, 0x98, 0xDF, 0x23, 0x36, 0x60, 0xB6, 0x00,
+ 0x92, 0x80, 0x4F, 0x00, 0xF5, 0x00, 0x0A, 0x98, 0xEF, 0x23, 0x36, 0x60, 0xB6, 0x00, 0x92, 0x80,
+ 0x80, 0x62, 0x92, 0x80, 0x00, 0x62, 0x92, 0x80, 0x00, 0x46, 0x17, 0xEE, 0x13, 0xEA, 0x02, 0x01,
+ 0x09, 0xD8, 0xCD, 0x04, 0x4D, 0x00, 0x00, 0xA3, 0xDC, 0x00, 0x68, 0x97, 0x7F, 0x23, 0x04, 0x61,
+ 0x84, 0x01, 0xB2, 0x84, 0xCF, 0xC1, 0x80, 0x73, 0xCD, 0x04, 0x4D, 0x00, 0x00, 0xA3, 0xE8, 0x01,
+ 0x68, 0x97, 0xD4, 0x81, 0x00, 0x33, 0x02, 0x00, 0x82, 0x88, 0x80, 0x73, 0x80, 0x77, 0x00, 0x01,
+ 0x01, 0xA1, 0x08, 0x01, 0x4F, 0x00, 0x46, 0x97, 0x07, 0xA6, 0x12, 0x01, 0x00, 0x33, 0x03, 0x00,
+ 0x82, 0x88, 0x03, 0x03, 0x03, 0xDE, 0x00, 0x33, 0x05, 0x00, 0x82, 0x88, 0xCE, 0x00, 0x69, 0x60,
+ 0xCE, 0x00, 0x02, 0x03, 0x4A, 0x60, 0x00, 0xA2, 0x86, 0x01, 0x80, 0x63, 0x07, 0xA6, 0x32, 0x01,
+ 0x86, 0x81, 0x03, 0x03, 0x80, 0x63, 0xE2, 0x00, 0x07, 0xA6, 0x42, 0x01, 0x00, 0x33, 0x04, 0x00,
+ 0x82, 0x88, 0x03, 0x07, 0x02, 0x01, 0x04, 0xCA, 0x0D, 0x23, 0x2A, 0x98, 0x4D, 0x04, 0xD0, 0x84,
+ 0x05, 0xD8, 0x0D, 0x23, 0x2A, 0x98, 0xCD, 0x04, 0x15, 0x23, 0xB8, 0x88, 0xFB, 0x23, 0x02, 0x61,
+ 0x82, 0x01, 0x80, 0x63, 0x02, 0x03, 0x06, 0xA3, 0x70, 0x01, 0x00, 0x33, 0x0A, 0x00, 0x82, 0x88,
+ 0x4E, 0x00, 0x07, 0xA3, 0x7C, 0x01, 0x00, 0x33, 0x0B, 0x00, 0x82, 0x88, 0xCD, 0x04, 0x36, 0x2D,
+ 0x00, 0x33, 0x1A, 0x00, 0x82, 0x88, 0x50, 0x04, 0x96, 0x81, 0x06, 0xAB, 0x90, 0x01, 0x96, 0x81,
+ 0x4E, 0x00, 0x07, 0xA3, 0xA0, 0x01, 0x50, 0x00, 0x00, 0xA3, 0x4A, 0x01, 0x00, 0x05, 0x8A, 0x81,
+ 0x08, 0x97, 0x02, 0x01, 0x05, 0xC6, 0x04, 0x23, 0xA0, 0x01, 0x15, 0x23, 0xA1, 0x01, 0xCC, 0x81,
+ 0xFD, 0x23, 0x02, 0x61, 0x82, 0x01, 0x0A, 0xDA, 0x4A, 0x00, 0x06, 0x61, 0x00, 0xA0, 0xC2, 0x01,
+ 0x80, 0x63, 0xCD, 0x04, 0x36, 0x2D, 0x00, 0x33, 0x1B, 0x00, 0x82, 0x88, 0x06, 0x23, 0x2A, 0x98,
+ 0xCD, 0x04, 0xB2, 0x84, 0x06, 0x01, 0x00, 0xA2, 0xE2, 0x01, 0x57, 0x60, 0x00, 0xA0, 0xE8, 0x01,
+ 0xB2, 0x84, 0x80, 0x23, 0xA0, 0x01, 0xB2, 0x84, 0x80, 0x73, 0x4B, 0x00, 0x06, 0x61, 0x00, 0xA2,
+ 0x10, 0x02, 0x04, 0x01, 0x0D, 0xDE, 0x02, 0x01, 0x03, 0xCC, 0x4F, 0x00, 0x46, 0x97, 0x0A, 0x82,
+ 0x08, 0x23, 0x02, 0x41, 0x82, 0x01, 0x4F, 0x00, 0x24, 0x97, 0x48, 0x04, 0xFF, 0x23, 0x84, 0x80,
+ 0xB2, 0x97, 0x00, 0x46, 0x56, 0x00, 0x03, 0xC0, 0x01, 0x23, 0xE8, 0x00, 0x81, 0x73, 0x06, 0x29,
+ 0x03, 0x42, 0x06, 0xE2, 0x03, 0xEE, 0x66, 0xEB, 0x11, 0x23, 0xB8, 0x88, 0xC6, 0x97, 0xFA, 0x80,
+ 0x80, 0x73, 0x80, 0x77, 0x06, 0xA6, 0x3E, 0x02, 0x00, 0x33, 0x31, 0x00, 0x82, 0x88, 0x04, 0x01,
+ 0x03, 0xD8, 0x74, 0x98, 0x02, 0x96, 0x50, 0x82, 0xA2, 0x95, 0x80, 0x67, 0x83, 0x03, 0x80, 0x63,
+ 0xB6, 0x2D, 0x02, 0xA6, 0x7A, 0x02, 0x07, 0xA6, 0x68, 0x02, 0x06, 0xA6, 0x6C, 0x02, 0x03, 0xA6,
+ 0x70, 0x02, 0x00, 0x33, 0x10, 0x00, 0x82, 0x88, 0x4A, 0x95, 0x52, 0x82, 0xF8, 0x95, 0x52, 0x82,
+ 0x04, 0x23, 0xA0, 0x01, 0x14, 0x23, 0xA1, 0x01, 0x16, 0x84, 0x04, 0x01, 0x0C, 0xDC, 0xE0, 0x23,
+ 0x25, 0x61, 0xEF, 0x00, 0x14, 0x01, 0x4F, 0x04, 0xA8, 0x01, 0x6F, 0x00, 0xA5, 0x01, 0x03, 0x23,
+ 0xA4, 0x01, 0x06, 0x23, 0x9C, 0x01, 0x24, 0x2B, 0x1C, 0x01, 0x02, 0xA6, 0xAC, 0x02, 0x07, 0xA6,
+ 0x68, 0x02, 0x06, 0xA6, 0x6C, 0x02, 0x00, 0x33, 0x12, 0x00, 0x82, 0x88, 0x00, 0x0E, 0x80, 0x63,
+ 0x00, 0x43, 0x00, 0xA0, 0x9A, 0x02, 0x4D, 0x04, 0x04, 0x01, 0x0B, 0xDC, 0xE7, 0x23, 0x04, 0x61,
+ 0x84, 0x01, 0x10, 0x31, 0x12, 0x35, 0x14, 0x01, 0xEC, 0x00, 0x6C, 0x38, 0x00, 0x3F, 0x00, 0x00,
+ 0xEC, 0x82, 0x18, 0x23, 0x04, 0x61, 0x18, 0xA0, 0xE4, 0x02, 0x04, 0x01, 0x8E, 0xC8, 0x00, 0x33,
+ 0x1F, 0x00, 0x82, 0x88, 0x08, 0x31, 0x0A, 0x35, 0x0C, 0x39, 0x0E, 0x3D, 0x40, 0x98, 0xB6, 0x2D,
+ 0x01, 0xA6, 0x0E, 0x03, 0x00, 0xA6, 0x1C, 0x03, 0x07, 0xA6, 0x2A, 0x03, 0x06, 0xA6, 0x2E, 0x03,
+ 0x03, 0xA6, 0xFA, 0x03, 0x02, 0xA6, 0x7A, 0x02, 0x00, 0x33, 0x33, 0x00, 0x82, 0x88, 0x08, 0x23,
+ 0xB3, 0x01, 0x04, 0x01, 0x0E, 0xD0, 0x00, 0x33, 0x14, 0x00, 0x82, 0x88, 0x10, 0x23, 0xB3, 0x01,
+ 0x04, 0x01, 0x07, 0xCC, 0x00, 0x33, 0x15, 0x00, 0x82, 0x88, 0x4A, 0x95, 0xF0, 0x82, 0xF8, 0x95,
+ 0xF0, 0x82, 0x44, 0x98, 0x80, 0x42, 0x40, 0x98, 0x48, 0xE4, 0x04, 0x01, 0x29, 0xC8, 0x31, 0x05,
+ 0x07, 0x01, 0x00, 0xA2, 0x72, 0x03, 0x00, 0x43, 0x87, 0x01, 0x05, 0x05, 0x48, 0x98, 0x40, 0x98,
+ 0x00, 0xA6, 0x34, 0x03, 0x07, 0xA6, 0x6A, 0x03, 0x03, 0xA6, 0x16, 0x04, 0x06, 0xA6, 0x6E, 0x03,
+ 0x01, 0xA6, 0x34, 0x03, 0x00, 0x33, 0x25, 0x00, 0x82, 0x88, 0x4A, 0x95, 0x50, 0x83, 0xF8, 0x95,
+ 0x50, 0x83, 0x04, 0x01, 0x0C, 0xCE, 0x03, 0xC8, 0x00, 0x33, 0x42, 0x00, 0x82, 0x88, 0x00, 0x01,
+ 0x05, 0x05, 0xFF, 0xA2, 0x90, 0x03, 0xB1, 0x01, 0x08, 0x23, 0xB2, 0x01, 0x4C, 0x83, 0x05, 0x05,
+ 0x01, 0xA6, 0x9A, 0x03, 0x00, 0xA6, 0xAA, 0x03, 0xF0, 0x83, 0x68, 0x98, 0x80, 0x42, 0x01, 0xA6,
+ 0x9A, 0x03, 0xBA, 0x83, 0x00, 0x33, 0x2F, 0x00, 0x82, 0x88, 0x68, 0x98, 0x80, 0x42, 0x00, 0xA6,
+ 0xAA, 0x03, 0xBA, 0x83, 0x00, 0x33, 0x26, 0x00, 0x82, 0x88, 0x38, 0x2B, 0x80, 0x32, 0x80, 0x36,
+ 0x04, 0x23, 0xA0, 0x01, 0x12, 0x23, 0xA1, 0x01, 0xF0, 0x83, 0x04, 0xF0, 0x80, 0x6B, 0x00, 0x33,
+ 0x20, 0x00, 0x82, 0x88, 0x03, 0xA6, 0xEE, 0x03, 0x07, 0xA6, 0xE6, 0x03, 0x06, 0xA6, 0xEA, 0x03,
+ 0x00, 0x33, 0x17, 0x00, 0x82, 0x88, 0x4A, 0x95, 0xD4, 0x83, 0xF8, 0x95, 0xD4, 0x83, 0xFA, 0x83,
+ 0x04, 0xF0, 0x80, 0x6B, 0x00, 0x33, 0x20, 0x00, 0x82, 0x88, 0xB6, 0x2D, 0x03, 0xA6, 0x16, 0x04,
+ 0x07, 0xA6, 0x0E, 0x04, 0x06, 0xA6, 0x12, 0x04, 0x00, 0x33, 0x30, 0x00, 0x82, 0x88, 0x4A, 0x95,
+ 0xFA, 0x83, 0xF8, 0x95, 0xFA, 0x83, 0xA2, 0x0D, 0x80, 0x63, 0x07, 0xA6, 0x24, 0x04, 0x00, 0x33,
+ 0x18, 0x00, 0x82, 0x88, 0x03, 0x03, 0x80, 0x63, 0xA3, 0x01, 0x07, 0xA4, 0x2E, 0x04, 0x23, 0x01,
+ 0x00, 0xA2, 0x50, 0x04, 0x0A, 0xA0, 0x40, 0x04, 0xE0, 0x00, 0x00, 0x33, 0x1D, 0x00, 0x82, 0x88,
+ 0x0B, 0xA0, 0x4C, 0x04, 0xE0, 0x00, 0x00, 0x33, 0x1E, 0x00, 0x82, 0x88, 0x42, 0x23, 0xB8, 0x88,
+ 0x00, 0x23, 0x22, 0xA3, 0xB2, 0x04, 0x08, 0x23, 0x22, 0xA3, 0x6C, 0x04, 0x28, 0x23, 0x22, 0xA3,
+ 0x78, 0x04, 0x02, 0x23, 0x22, 0xA3, 0x8E, 0x04, 0x42, 0x23, 0xB8, 0x88, 0x4A, 0x00, 0x06, 0x61,
+ 0x00, 0xA0, 0x78, 0x04, 0x45, 0x23, 0xB8, 0x88, 0xC6, 0x97, 0x00, 0xA2, 0x8A, 0x04, 0x74, 0x98,
+ 0x00, 0x33, 0x00, 0x82, 0xC0, 0x20, 0x81, 0x62, 0xF6, 0x81, 0x47, 0x23, 0xB8, 0x88, 0x04, 0x01,
+ 0x0C, 0xDE, 0x14, 0x01, 0x00, 0xA2, 0xA6, 0x04, 0xC6, 0x97, 0x74, 0x98, 0x00, 0x33, 0x00, 0x81,
+ 0xC0, 0x20, 0x81, 0x62, 0x10, 0x82, 0x43, 0x23, 0xB8, 0x88, 0x04, 0x23, 0xA0, 0x01, 0x44, 0x23,
+ 0xA1, 0x01, 0x80, 0x73, 0x4D, 0x00, 0x03, 0xA3, 0xC0, 0x04, 0x00, 0x33, 0x27, 0x00, 0x82, 0x88,
+ 0x04, 0x01, 0x04, 0xDC, 0x02, 0x23, 0xA2, 0x01, 0x04, 0x23, 0xA0, 0x01, 0xC6, 0x97, 0xF4, 0x94,
+ 0x4B, 0x00, 0xF6, 0x00, 0x4F, 0x04, 0x4F, 0x00, 0x00, 0xA3, 0xEE, 0x04, 0x00, 0x05, 0x76, 0x00,
+ 0x06, 0x61, 0x00, 0xA2, 0xE8, 0x04, 0xD6, 0x84, 0x08, 0x97, 0xCD, 0x04, 0xF2, 0x84, 0x48, 0x04,
+ 0xFF, 0x23, 0x84, 0x80, 0x02, 0x01, 0x03, 0xDA, 0x80, 0x23, 0x82, 0x01, 0x02, 0x85, 0x02, 0x23,
+ 0xA0, 0x01, 0x4A, 0x00, 0x06, 0x61, 0x00, 0xA2, 0x0E, 0x05, 0x1D, 0x01, 0x04, 0xD6, 0xFF, 0x23,
+ 0x86, 0x41, 0x4B, 0x60, 0xCB, 0x00, 0xFF, 0x23, 0x80, 0x01, 0x49, 0x00, 0x81, 0x01, 0x04, 0x01,
+ 0x02, 0xC8, 0x30, 0x01, 0x80, 0x01, 0xF7, 0x04, 0x03, 0x01, 0x49, 0x04, 0x80, 0x01, 0xC9, 0x00,
+ 0x00, 0x05, 0x00, 0x01, 0xFF, 0xA0, 0x2E, 0x05, 0x77, 0x04, 0x01, 0x23, 0xEA, 0x00, 0x5D, 0x00,
+ 0xFE, 0xC7, 0x00, 0x62, 0x00, 0x23, 0xEA, 0x00, 0x00, 0x63, 0x07, 0xA4, 0xA0, 0x05, 0x03, 0x03,
+ 0x02, 0xA0, 0x5C, 0x05, 0x9C, 0x85, 0x00, 0x33, 0x2D, 0x00, 0x82, 0x88, 0x04, 0xA0, 0x82, 0x05,
+ 0x80, 0x63, 0x4A, 0x00, 0x06, 0x61, 0x00, 0xA2, 0x6E, 0x05, 0x1D, 0x01, 0x06, 0xD6, 0x02, 0x23,
+ 0x02, 0x41, 0x82, 0x01, 0x50, 0x00, 0x24, 0x97, 0xD0, 0x84, 0x04, 0x23, 0x02, 0x41, 0x82, 0x01,
+ 0xD0, 0x84, 0x08, 0xA0, 0x88, 0x05, 0x9C, 0x85, 0x03, 0xA0, 0x8E, 0x05, 0x9C, 0x85, 0x01, 0xA0,
+ 0x9A, 0x05, 0x88, 0x00, 0x80, 0x63, 0x78, 0x96, 0x4A, 0x85, 0x88, 0x86, 0x80, 0x63, 0x4A, 0x85,
+ 0x00, 0x63, 0x4A, 0x00, 0x06, 0x61, 0x00, 0xA2, 0xDE, 0x05, 0x1D, 0x01, 0x18, 0xD4, 0xC0, 0x23,
+ 0x07, 0x41, 0x83, 0x03, 0x80, 0x63, 0x06, 0xA6, 0xC0, 0x05, 0x00, 0x33, 0x37, 0x00, 0x82, 0x88,
+ 0x1D, 0x01, 0x02, 0xD6, 0x46, 0x23, 0xB8, 0x88, 0x63, 0x60, 0x83, 0x03, 0x80, 0x63, 0x06, 0xA6,
+ 0xD8, 0x05, 0x00, 0x33, 0x38, 0x00, 0x82, 0x88, 0xEF, 0x04, 0x6F, 0x00, 0x00, 0x63, 0x4B, 0x00,
+ 0x06, 0x41, 0xCB, 0x00, 0x52, 0x00, 0x06, 0x61, 0x00, 0xA2, 0xF2, 0x05, 0xC0, 0x23, 0x07, 0x41,
+ 0x00, 0x63, 0x80, 0x23, 0x07, 0x41, 0x00, 0x63, 0x80, 0x67, 0x08, 0x23, 0x83, 0x03, 0x80, 0x63,
+ 0x00, 0x63, 0x06, 0xA6, 0x14, 0x06, 0x07, 0xA6, 0x64, 0x06, 0x02, 0xA6, 0xBC, 0x06, 0x00, 0x33,
+ 0x39, 0x00, 0x82, 0x88, 0x00, 0x00, 0x01, 0xA0, 0xD6, 0x06, 0xA2, 0x95, 0x83, 0x03, 0x80, 0x63,
+ 0x06, 0xA6, 0x28, 0x06, 0x07, 0xA6, 0x64, 0x06, 0x00, 0x00, 0x00, 0x2B, 0x40, 0x0E, 0x80, 0x63,
+ 0x01, 0x00, 0x06, 0xA6, 0x40, 0x06, 0x07, 0xA6, 0x64, 0x06, 0x00, 0x33, 0x3A, 0x00, 0x82, 0x88,
+ 0x40, 0x0E, 0x80, 0x63, 0x00, 0x43, 0x00, 0xA0, 0x32, 0x06, 0x06, 0xA6, 0x58, 0x06, 0x07, 0xA6,
+ 0x64, 0x06, 0x00, 0x33, 0x3B, 0x00, 0x82, 0x88, 0x80, 0x67, 0x40, 0x0E, 0x80, 0x63, 0x07, 0xA6,
+ 0x64, 0x06, 0x00, 0x63, 0x03, 0x03, 0x80, 0x63, 0x88, 0x00, 0x01, 0xA2, 0x78, 0x06, 0x07, 0xA2,
+ 0xBC, 0x06, 0x00, 0x33, 0x35, 0x00, 0x82, 0x88, 0x07, 0xA6, 0x82, 0x06, 0x00, 0x33, 0x2A, 0x00,
+ 0x82, 0x88, 0x03, 0x03, 0x03, 0xA2, 0x8E, 0x06, 0x07, 0x23, 0x80, 0x00, 0xC8, 0x86, 0x80, 0x63,
+ 0x89, 0x00, 0x0A, 0x2B, 0x07, 0xA6, 0x9E, 0x06, 0x00, 0x33, 0x29, 0x00, 0x82, 0x88, 0x00, 0x43,
+ 0x00, 0xA2, 0xAA, 0x06, 0xC0, 0x0E, 0x80, 0x63, 0x94, 0x86, 0xC0, 0x0E, 0x00, 0x33, 0x00, 0x80,
+ 0xC0, 0x20, 0x81, 0x62, 0x04, 0x01, 0x08, 0xDA, 0x80, 0x63, 0x00, 0x63, 0x80, 0x67, 0x00, 0x33,
+ 0x00, 0x40, 0xC0, 0x20, 0x81, 0x62, 0x00, 0x63, 0x80, 0x7B, 0x80, 0x63, 0x06, 0xA6, 0x20, 0x06,
+ 0x00, 0x33, 0x2C, 0x00, 0x82, 0x88, 0x0C, 0xA2, 0xF0, 0x06, 0xA2, 0x95, 0x83, 0x03, 0x80, 0x63,
+ 0x06, 0xA6, 0xEE, 0x06, 0x07, 0xA6, 0x64, 0x06, 0x00, 0x33, 0x3D, 0x00, 0x82, 0x88, 0x00, 0x00,
+ 0x80, 0x67, 0x83, 0x03, 0x80, 0x63, 0x0C, 0xA0, 0x06, 0x07, 0x07, 0xA6, 0x64, 0x06, 0xBF, 0x23,
+ 0x04, 0x61, 0x84, 0x01, 0xB2, 0x84, 0x00, 0x63, 0xF0, 0x04, 0x01, 0x01, 0xF1, 0x00, 0x00, 0x01,
+ 0xF2, 0x00, 0x01, 0x05, 0x80, 0x01, 0x72, 0x04, 0x71, 0x00, 0x81, 0x01, 0x70, 0x04, 0x80, 0x05,
+ 0x81, 0x05, 0x00, 0x63, 0xF0, 0x04, 0xF2, 0x00, 0x72, 0x04, 0x01, 0x01, 0xF1, 0x00, 0x70, 0x00,
+ 0x81, 0x01, 0x70, 0x04, 0x71, 0x00, 0x81, 0x01, 0x72, 0x00, 0x80, 0x01, 0x71, 0x04, 0x70, 0x00,
+ 0x80, 0x01, 0x70, 0x04, 0x00, 0x63, 0xF0, 0x04, 0xF2, 0x00, 0x72, 0x04, 0x00, 0x01, 0xF1, 0x00,
+ 0x70, 0x00, 0x80, 0x01, 0x70, 0x04, 0x71, 0x00, 0x80, 0x01, 0x72, 0x00, 0x81, 0x01, 0x71, 0x04,
+ 0x70, 0x00, 0x81, 0x01, 0x70, 0x04, 0x00, 0x63, 0x00, 0x23, 0xB3, 0x01, 0x83, 0x05, 0xA3, 0x01,
+ 0xA2, 0x01, 0xA1, 0x01, 0x01, 0x23, 0xA0, 0x01, 0x00, 0x01, 0xC8, 0x00, 0x03, 0xA1, 0x86, 0x07,
+ 0x00, 0x33, 0x07, 0x00, 0x82, 0x88, 0x80, 0x05, 0x81, 0x05, 0x04, 0x01, 0x11, 0xC8, 0x48, 0x00,
+ 0xB0, 0x01, 0xB1, 0x01, 0x08, 0x23, 0xB2, 0x01, 0x05, 0x01, 0x48, 0x04, 0x00, 0x43, 0x00, 0xA2,
+ 0xA6, 0x07, 0x00, 0x05, 0x9C, 0x87, 0x00, 0x01, 0xC8, 0x00, 0xFF, 0x23, 0x80, 0x01, 0x05, 0x05,
+ 0x00, 0x63, 0xF7, 0x04, 0x1A, 0x09, 0xF6, 0x08, 0x6E, 0x04, 0x00, 0x02, 0x80, 0x43, 0x76, 0x08,
+ 0x80, 0x02, 0x77, 0x04, 0x00, 0x63, 0xF7, 0x04, 0x1A, 0x09, 0xF6, 0x08, 0x6E, 0x04, 0x00, 0x02,
+ 0x00, 0xA0, 0xD6, 0x07, 0xD8, 0x87, 0x00, 0x43, 0x76, 0x08, 0x80, 0x02, 0x77, 0x04, 0x00, 0x63,
+ 0xF3, 0x04, 0x00, 0x23, 0xF4, 0x00, 0x74, 0x00, 0x80, 0x43, 0xF4, 0x00, 0xCF, 0x40, 0x00, 0xA2,
+ 0x06, 0x08, 0x74, 0x04, 0x02, 0x01, 0xF7, 0xC9, 0xF6, 0xD9, 0x00, 0x01, 0x01, 0xA1, 0xE6, 0x07,
+ 0xC6, 0x97, 0xF4, 0x94, 0xE6, 0x87, 0x73, 0x04, 0x00, 0x63, 0xF3, 0x04, 0x75, 0x04, 0x1C, 0x88,
+ 0x02, 0x01, 0x04, 0xD8, 0x08, 0x97, 0xC6, 0x97, 0xF4, 0x94, 0x0C, 0x88, 0x75, 0x00, 0x00, 0xA3,
+ 0x26, 0x08, 0x00, 0x05, 0x10, 0x88, 0x73, 0x04, 0x00, 0x63, 0x80, 0x7B, 0x80, 0x63, 0x06, 0xA6,
+ 0x38, 0x08, 0x00, 0x33, 0x3E, 0x00, 0x82, 0x88, 0x80, 0x67, 0x83, 0x03, 0x80, 0x63, 0x00, 0x63,
+ 0x38, 0x2B, 0x5E, 0x88, 0x38, 0x2B, 0x54, 0x88, 0x32, 0x09, 0x31, 0x05, 0x54, 0x98, 0x05, 0x05,
+ 0xB2, 0x09, 0x00, 0x63, 0x00, 0x32, 0x00, 0x36, 0x00, 0x3A, 0x00, 0x3E, 0x00, 0x63, 0x80, 0x32,
+ 0x80, 0x36, 0x80, 0x3A, 0x80, 0x3E, 0x00, 0x63, 0x38, 0x2B, 0x40, 0x32, 0x40, 0x36, 0x40, 0x3A,
+ 0x40, 0x3E, 0x00, 0x63, 0x5A, 0x20, 0xC9, 0x40, 0x00, 0xA0, 0x74, 0x08, 0x5D, 0x00, 0xFE, 0xC3,
+ 0x00, 0x63, 0x80, 0x73, 0xE6, 0x20, 0x02, 0x23, 0xE8, 0x00, 0x82, 0x73, 0xFF, 0xFD, 0x80, 0x73,
+ 0x13, 0x23, 0xB8, 0x88, 0x66, 0x20, 0xC0, 0x20, 0x04, 0x23, 0xA0, 0x01, 0xA1, 0x23, 0xA1, 0x01,
+ 0x81, 0x62, 0xA2, 0x88, 0x80, 0x73, 0x80, 0x77, 0x68, 0x00, 0x00, 0xA2, 0x80, 0x00, 0x03, 0xC2,
+ 0xF1, 0xC7, 0x41, 0x23, 0xB8, 0x88, 0x11, 0x23, 0xA1, 0x01, 0x04, 0x23, 0xA0, 0x01, 0xB2, 0x84,
+
+};
+
+ushort _mcode_size = sizeof (_mcode_buf);
+ulong _mcode_chksum = 0x012258FBUL;
+
+extern uchar _sdtr_period_tbl_[];
+
+int
+AscExeScsiQueue(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_Q dosfar * scsiq
+)
+{
+ PortAddr iop_base;
+ int last_int_level;
+ int sta;
+ ulong addr;
+ uchar sg_entry_cnt;
+ uchar target_ix;
+ int n_q_required;
+ uchar sg_entry_cnt_minus_one;
+ uchar tid_no;
+ uchar sdtr_data;
+ ASC_EXE_CALLBACK asc_exe_callback;
+
+#if CC_DEBUG_SG_LIST
+ int i;
+
+#endif
+#if CC_LINK_BUSY_Q
+ ASC_SCSI_Q dosfar *scsiq_tail;
+ ASC_SCSI_Q dosfar *scsiq_next;
+ ASC_SCSI_Q dosfar *scsiq_prev;
+
+#endif
+
+ iop_base = asc_dvc->iop_base;
+ asc_exe_callback = (ASC_EXE_CALLBACK) asc_dvc->exe_callback;
+ if (asc_dvc->err_code != 0)
+ return (ERR);
+ if (scsiq == (ASC_SCSI_Q dosfar *) 0L) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_SCSIQ_NULL_PTR);
+ return (ERR);
+ }
+ scsiq->q1.q_no = 0;
+ sta = 0;
+ target_ix = scsiq->q2.target_ix;
+ tid_no = ASC_TIX_TO_TID(target_ix);
+
+ n_q_required = 1;
+
+ if (scsiq->cdbptr[0] == SCSICMD_RequestSense) {
+
+ if (((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) &&
+ ((asc_dvc->sdtr_done & scsiq->q1.target_id) != 0)) {
+ sdtr_data = AscReadLramByte(iop_base,
+ (ushort) ((ushort) ASCV_SDTR_DATA_BEG + (ushort) tid_no));
+ AscMsgOutSDTR(iop_base,
+ _sdtr_period_tbl_[(sdtr_data >> 4) & (uchar) (ASC_SYN_XFER_NO - 1)],
+ (uchar) (sdtr_data & (uchar) ASC_SYN_MAX_OFFSET));
+ scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
+ }
+ }
+ last_int_level = DvcEnterCritical();
+ if (asc_dvc->in_critical_cnt != 0) {
+ DvcLeaveCritical(last_int_level);
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CRITICAL_RE_ENTRY);
+ return (ERR);
+ }
+ asc_dvc->in_critical_cnt++;
+
+ if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
+
+ if ((sg_entry_cnt = scsiq->sg_head->entry_cnt) == 0) {
+ asc_dvc->in_critical_cnt--;
+ DvcLeaveCritical(last_int_level);
+ return (ERR);
+ }
+ if (sg_entry_cnt == 1) {
+ scsiq->q1.data_addr = scsiq->sg_head->sg_list[0].addr;
+ scsiq->q1.data_cnt = scsiq->sg_head->sg_list[0].bytes;
+ scsiq->q1.cntl &= ~(QC_SG_HEAD | QC_SG_SWAP_QUEUE);
+ goto NON_SG_LIST_REQ;
+ }
+ if (sg_entry_cnt > ASC_MAX_SG_LIST) {
+
+ return (ERR);
+ }
+ sg_entry_cnt_minus_one = sg_entry_cnt - 1;
+
+#if CC_DEBUG_SG_LIST
+ if (asc_dvc->bus_type & (ASC_IS_ISA | ASC_IS_VL | ASC_IS_EISA)) {
+ for (i = 0; i < sg_entry_cnt_minus_one; i++) {
+
+ addr = scsiq->sg_head->sg_list[i].addr +
+ scsiq->sg_head->sg_list[i].bytes;
+
+ if (((ushort) addr & 0x0003) != 0) {
+ asc_dvc->in_critical_cnt--;
+ DvcLeaveCritical(last_int_level);
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_SG_LIST_ODD_ADDRESS);
+ return (ERR);
+ }
+ }
+ }
+#endif
+
+ if (asc_dvc->bug_fix_cntl) {
+ if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_ADD_ONE_BYTE) {
+
+ addr = scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr +
+ scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
+ if (((ushort) addr & 0x0003) != 0) {
+ if ((scsiq->cdbptr[0] == SCSICMD_Read6) ||
+ (scsiq->cdbptr[0] == SCSICMD_Read10)) {
+ if ((scsiq->q2.tag_code & ASC_TAG_FLAG_ADD_ONE_BYTE) == 0) {
+
+ scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes++;
+ scsiq->q2.tag_code |= ASC_TAG_FLAG_ADD_ONE_BYTE;
+ }
+ }
+ }
+ }
+ }
+ scsiq->sg_head->entry_to_copy = scsiq->sg_head->entry_cnt;
+ n_q_required = AscSgListToQueue(sg_entry_cnt);
+
+#if CC_LINK_BUSY_Q
+ scsiq_next = (ASC_SCSI_Q dosfar *) asc_dvc->scsiq_busy_head[tid_no];
+ if (scsiq_next != (ASC_SCSI_Q dosfar *) 0L) {
+ goto link_scisq_to_busy_list;
+ }
+#endif
+
+ if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, n_q_required)
+ >= (uint) n_q_required) ||
+ ((scsiq->q1.cntl & QC_URGENT) != 0)) {
+ if ((sta = AscSendScsiQueue(asc_dvc, scsiq,
+ n_q_required)) == 1) {
+
+ asc_dvc->in_critical_cnt--;
+ if (asc_exe_callback != 0) {
+ (*asc_exe_callback) (asc_dvc, scsiq);
+ }
+ DvcLeaveCritical(last_int_level);
+ return (sta);
+ }
+ }
+ } else {
+
+ NON_SG_LIST_REQ:
+
+ if (asc_dvc->bug_fix_cntl) {
+ if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_ADD_ONE_BYTE) {
+
+ addr = scsiq->q1.data_addr + scsiq->q1.data_cnt;
+ if ((scsiq->cdbptr[0] == SCSICMD_Read6) ||
+ (scsiq->cdbptr[0] == SCSICMD_Read10)) {
+ if (((ushort) addr & 0x0003) != 0) {
+ if (((ushort) scsiq->q1.data_cnt & 0x01FF) == 0) {
+
+ if ((scsiq->q2.tag_code & ASC_TAG_FLAG_ADD_ONE_BYTE) == 0) {
+
+ scsiq->q2.tag_code |= ASC_TAG_FLAG_ADD_ONE_BYTE;
+ scsiq->q1.data_cnt++;
+ }
+ }
+ }
+ }
+ }
+ }
+ n_q_required = 1;
+
+#if CC_LINK_BUSY_Q
+ scsiq_next = (ASC_SCSI_Q dosfar *) asc_dvc->scsiq_busy_head[tid_no];
+ if (scsiq_next != (ASC_SCSI_Q dosfar *) 0L) {
+ goto link_scisq_to_busy_list;
+ }
+#endif
+ if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, 1) >= 1) ||
+ ((scsiq->q1.cntl & QC_URGENT) != 0)) {
+ if ((sta = AscSendScsiQueue(asc_dvc, scsiq,
+ n_q_required)) == 1) {
+
+ asc_dvc->in_critical_cnt--;
+ if (asc_exe_callback != 0) {
+ (*asc_exe_callback) (asc_dvc, scsiq);
+ }
+ DvcLeaveCritical(last_int_level);
+ return (sta);
+ }
+ }
+ }
+
+#if CC_LINK_BUSY_Q
+ if (sta == 0) {
+
+ link_scisq_to_busy_list:
+ scsiq->ext.q_required = n_q_required;
+ if (scsiq_next == (ASC_SCSI_Q dosfar *) 0L) {
+ asc_dvc->scsiq_busy_head[tid_no] = (ASC_SCSI_Q dosfar *) scsiq;
+ asc_dvc->scsiq_busy_tail[tid_no] = (ASC_SCSI_Q dosfar *) scsiq;
+ scsiq->ext.next = (ASC_SCSI_Q dosfar *) 0L;
+ scsiq->ext.join = (ASC_SCSI_Q dosfar *) 0L;
+ scsiq->q1.status = QS_BUSY;
+ sta = 1;
+ } else {
+ scsiq_tail = (ASC_SCSI_Q dosfar *) asc_dvc->scsiq_busy_tail[tid_no];
+ if (scsiq_tail->ext.next == (ASC_SCSI_Q dosfar *) 0L) {
+ if ((scsiq->q1.cntl & QC_URGENT) != 0) {
+
+ asc_dvc->scsiq_busy_head[tid_no] = (ASC_SCSI_Q dosfar *) scsiq;
+ scsiq->ext.next = scsiq_next;
+ scsiq->ext.join = (ASC_SCSI_Q dosfar *) 0L;
+ } else {
+ if (scsiq->ext.cntl & QCX_SORT) {
+ do {
+ scsiq_prev = scsiq_next;
+ scsiq_next = scsiq_next->ext.next;
+ if (scsiq->ext.lba < scsiq_prev->ext.lba)
+ break;
+ } while (scsiq_next != (ASC_SCSI_Q dosfar *) 0L);
+
+ scsiq_prev->ext.next = scsiq;
+ scsiq->ext.next = scsiq_next;
+ if (scsiq_next == (ASC_SCSI_Q dosfar *) 0L) {
+ asc_dvc->scsiq_busy_tail[tid_no] = (ASC_SCSI_Q dosfar *) scsiq;
+ }
+ scsiq->ext.join = (ASC_SCSI_Q dosfar *) 0L;
+ } else {
+
+ scsiq_tail->ext.next = (ASC_SCSI_Q dosfar *) scsiq;
+ asc_dvc->scsiq_busy_tail[tid_no] = (ASC_SCSI_Q dosfar *) scsiq;
+ scsiq->ext.next = (ASC_SCSI_Q dosfar *) 0L;
+ scsiq->ext.join = (ASC_SCSI_Q dosfar *) 0L;
+ }
+ }
+ scsiq->q1.status = QS_BUSY;
+ sta = 1;
+ } else {
+
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_SCSIQ_BAD_NEXT_PTR);
+ sta = ERR;
+ }
+ }
+ }
+#endif
+ asc_dvc->in_critical_cnt--;
+ DvcLeaveCritical(last_int_level);
+ return (sta);
+}
+
+int
+AscSendScsiQueue(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_Q dosfar * scsiq,
+ uchar n_q_required
+)
+{
+ PortAddr iop_base;
+ uchar free_q_head;
+ uchar next_qp;
+ uchar tid_no;
+ uchar target_ix;
+ int sta;
+
+ iop_base = asc_dvc->iop_base;
+ target_ix = scsiq->q2.target_ix;
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ sta = 0;
+ free_q_head = (uchar) AscGetVarFreeQHead(iop_base);
+ if (n_q_required > 1) {
+ if ((next_qp = AscAllocMultipleFreeQueue(iop_base,
+ free_q_head, (uchar) (n_q_required)))
+ != (uchar) ASC_QLINK_END) {
+ asc_dvc->last_q_shortage = 0;
+ scsiq->sg_head->queue_cnt = n_q_required - 1;
+ scsiq->q1.q_no = free_q_head;
+
+ if ((sta = AscPutReadySgListQueue(asc_dvc, scsiq,
+ free_q_head)) == 1) {
+
+#if CC_WRITE_IO_COUNT
+ asc_dvc->req_count++;
+#endif
+
+ AscPutVarFreeQHead(iop_base, next_qp);
+ asc_dvc->cur_total_qng += (uchar) (n_q_required);
+ asc_dvc->cur_dvc_qng[tid_no]++;
+ }
+ return (sta);
+ }
+ } else if (n_q_required == 1) {
+
+ if ((next_qp = AscAllocFreeQueue(iop_base,
+ free_q_head)) != ASC_QLINK_END) {
+
+ scsiq->q1.q_no = free_q_head;
+ if ((sta = AscPutReadyQueue(asc_dvc, scsiq,
+ free_q_head)) == 1) {
+
+#if CC_WRITE_IO_COUNT
+ asc_dvc->req_count++;
+#endif
+
+ AscPutVarFreeQHead(iop_base, next_qp);
+ asc_dvc->cur_total_qng++;
+ asc_dvc->cur_dvc_qng[tid_no]++;
+ }
+ return (sta);
+ }
+ }
+ return (sta);
+}
+
+int
+AscSgListToQueue(
+ int sg_list
+)
+{
+ int n_sg_list_qs;
+
+ n_sg_list_qs = ((sg_list - 1) / ASC_SG_LIST_PER_Q);
+ if (((sg_list - 1) % ASC_SG_LIST_PER_Q) != 0)
+ n_sg_list_qs++;
+ return (n_sg_list_qs + 1);
+}
+
+uint
+AscGetNumOfFreeQueue(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar target_ix, uchar n_qs
+)
+{
+ uint cur_used_qs;
+ uint cur_free_qs;
+ ASC_SCSI_BIT_ID_TYPE target_id;
+ uchar tid_no;
+
+ target_id = ASC_TIX_TO_TARGET_ID(target_ix);
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ if ((asc_dvc->unit_not_ready & target_id) ||
+ (asc_dvc->queue_full_or_busy & target_id)) {
+ return (0);
+ }
+ if (n_qs == 1) {
+ cur_used_qs = (uint) asc_dvc->cur_total_qng +
+ (uint) asc_dvc->last_q_shortage +
+ (uint) ASC_MIN_FREE_Q;
+ } else {
+ cur_used_qs = (uint) asc_dvc->cur_total_qng +
+ (uint) ASC_MIN_FREE_Q;
+ }
+
+ if ((uint) (cur_used_qs + n_qs) <= (uint) asc_dvc->max_total_qng) {
+ cur_free_qs = (uint) asc_dvc->max_total_qng - cur_used_qs;
+ if (asc_dvc->cur_dvc_qng[tid_no] >=
+ asc_dvc->max_dvc_qng[tid_no]) {
+ return (0);
+ }
+ return (cur_free_qs);
+ }
+ if (n_qs > 1) {
+ if (n_qs > asc_dvc->last_q_shortage) {
+ asc_dvc->last_q_shortage = n_qs;
+ }
+ }
+ return (0);
+}
+
+int
+AscPutReadyQueue(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_Q dosfar * scsiq,
+ uchar q_no
+)
+{
+ ushort q_addr;
+ uchar tid_no;
+ uchar sdtr_data;
+ uchar syn_period_ix;
+ uchar syn_offset;
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+
+ if (((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) &&
+ ((asc_dvc->sdtr_done & scsiq->q1.target_id) == 0)) {
+
+ tid_no = ASC_TIX_TO_TID(scsiq->q2.target_ix);
+
+ sdtr_data = AscReadLramByte(iop_base,
+ (ushort) ((ushort) ASCV_SDTR_DATA_BEG + (ushort) tid_no));
+ syn_period_ix = (sdtr_data >> 4) & (ASC_SYN_XFER_NO - 1);
+ syn_offset = sdtr_data & ASC_SYN_MAX_OFFSET;
+ AscMsgOutSDTR(iop_base,
+ _sdtr_period_tbl_[syn_period_ix],
+ syn_offset);
+
+ scsiq->q1.cntl |= QC_MSG_OUT;
+ }
+ q_addr = ASC_QNO_TO_QADDR(q_no);
+
+ if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) {
+ scsiq->q2.tag_code &= ~M2_QTAG_MSG_SIMPLE;
+ }
+ scsiq->q1.status = QS_FREE;
+
+ AscMemWordCopyToLram(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_CDB_BEG),
+ (ushort dosfar *) scsiq->cdbptr,
+ (ushort) ((ushort) scsiq->q2.cdb_len >> 1));
+
+#if !CC_LITTLE_ENDIAN_HOST
+ AscAdjEndianScsiQ(scsiq);
+#endif
+
+ DvcPutScsiQ(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_CPY_BEG),
+ (ushort dosfar *) & scsiq->q1.cntl,
+ (ushort) ((((sizeof (ASC_SCSIQ_1) + sizeof (ASC_SCSIQ_2)) / 2) - 1)));
+
+#if CC_WRITE_IO_COUNT
+ AscWriteLramWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_W_REQ_COUNT),
+ (ushort) asc_dvc->req_count);
+
+#endif
+
+#if CC_VERIFY_LRAM_COPY
+ if ((asc_dvc->dvc_cntl & ASC_CNTL_NO_VERIFY_COPY) == 0) {
+
+ if (AscMemWordCmpToLram(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_CDB_BEG),
+ (ushort dosfar *) scsiq->cdbptr,
+ (ushort) (scsiq->q2.cdb_len >> 1)) != 0) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_LOCAL_MEM);
+ return (ERR);
+ }
+ if (AscMemWordCmpToLram(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_CPY_BEG),
+ (ushort dosfar *) & scsiq->q1.cntl,
+ (ushort) (((sizeof (ASC_SCSIQ_1) + sizeof (ASC_SCSIQ_2)) / 2) - 1))
+ != 0) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_LOCAL_MEM);
+ return (ERR);
+ }
+ }
+#endif
+
+#if CC_CLEAR_DMA_REMAIN
+
+ AscWriteLramDWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_DW_REMAIN_XFER_ADDR), 0UL);
+ AscWriteLramDWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_DW_REMAIN_XFER_CNT), 0UL);
+
+#endif
+
+ AscWriteLramWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ (ushort) (((ushort) scsiq->q1.q_no << 8) | (ushort) QS_READY));
+ return (1);
+}
+
+int
+AscPutReadySgListQueue(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_Q dosfar * scsiq,
+ uchar q_no
+)
+{
+ uchar sg_list_dwords;
+ uchar sg_index, i;
+ uchar sg_entry_cnt;
+ uchar next_qp;
+ ushort q_addr;
+ int sta;
+ ASC_SG_HEAD dosfar *sg_head;
+ ASC_SG_LIST_Q scsi_sg_q;
+ ulong saved_data_addr;
+ ulong saved_data_cnt;
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+
+ sg_head = scsiq->sg_head;
+
+ saved_data_addr = scsiq->q1.data_addr;
+ saved_data_cnt = scsiq->q1.data_cnt;
+ scsiq->q1.data_addr = sg_head->sg_list[0].addr;
+ scsiq->q1.data_cnt = sg_head->sg_list[0].bytes;
+ sg_entry_cnt = sg_head->entry_cnt - 1;
+ if (sg_entry_cnt != 0) {
+ scsiq->q1.cntl |= QC_SG_HEAD;
+ q_addr = ASC_QNO_TO_QADDR(q_no);
+ sg_index = 1;
+ scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
+ scsi_sg_q.sg_head_qp = q_no;
+ scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
+ for (i = 0; i < sg_head->queue_cnt; i++) {
+ scsi_sg_q.seq_no = i + 1;
+ if (sg_entry_cnt > ASC_SG_LIST_PER_Q) {
+ sg_list_dwords = (uchar) (ASC_SG_LIST_PER_Q * 2);
+ sg_entry_cnt -= ASC_SG_LIST_PER_Q;
+ if (i == 0) {
+ scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q;
+ scsi_sg_q.sg_cur_list_cnt = ASC_SG_LIST_PER_Q;
+ } else {
+ scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q - 1;
+ scsi_sg_q.sg_cur_list_cnt = ASC_SG_LIST_PER_Q - 1;
+ }
+ } else {
+
+ scsi_sg_q.cntl |= QCSG_SG_XFER_END;
+ sg_list_dwords = sg_entry_cnt << 1;
+ if (i == 0) {
+ scsi_sg_q.sg_list_cnt = sg_entry_cnt;
+ scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt;
+ } else {
+ scsi_sg_q.sg_list_cnt = sg_entry_cnt - 1;
+ scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt - 1;
+ }
+ sg_entry_cnt = 0;
+ }
+ next_qp = AscReadLramByte(iop_base,
+ (ushort) (q_addr + ASC_SCSIQ_B_FWD));
+ scsi_sg_q.q_no = next_qp;
+ q_addr = ASC_QNO_TO_QADDR(next_qp);
+
+ AscMemWordCopyToLram(iop_base,
+ (ushort) (q_addr + ASC_SCSIQ_SGHD_CPY_BEG),
+ (ushort dosfar *) & scsi_sg_q,
+ (ushort) (sizeof (ASC_SG_LIST_Q) >> 1));
+
+ AscMemDWordCopyToLram(iop_base,
+ (ushort) (q_addr + ASC_SGQ_LIST_BEG),
+ (ulong dosfar *) & sg_head->sg_list[sg_index],
+ (ushort) sg_list_dwords);
+
+ sg_index += ASC_SG_LIST_PER_Q;
+ }
+ } else {
+
+ scsiq->q1.cntl &= ~QC_SG_HEAD;
+ }
+ sta = AscPutReadyQueue(asc_dvc, scsiq, q_no);
+
+ scsiq->q1.data_addr = saved_data_addr;
+ scsiq->q1.data_cnt = saved_data_cnt;
+ return (sta);
+}
+
+int
+AscAbortSRB(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ulong srb_ptr
+)
+{
+ int sta;
+ ASC_SCSI_BIT_ID_TYPE saved_unit_not_ready;
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+ sta = ERR;
+ saved_unit_not_ready = asc_dvc->unit_not_ready;
+ asc_dvc->unit_not_ready = 0xFF;
+ AscWaitISRDone(asc_dvc);
+ if (AscStopQueueExe(iop_base) == 1) {
+ if (AscRiscHaltedAbortSRB(asc_dvc, srb_ptr) == 1) {
+ sta = 1;
+ AscCleanUpBusyQueue(iop_base);
+ AscStartQueueExe(iop_base);
+
+ } else {
+ sta = 0;
+ AscStartQueueExe(iop_base);
+ }
+ }
+ asc_dvc->unit_not_ready = saved_unit_not_ready;
+ return (sta);
+}
+
+int
+AscResetDevice(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar target_ix
+)
+{
+ PortAddr iop_base;
+ int sta;
+ uchar tid_no;
+ ASC_SCSI_BIT_ID_TYPE target_id;
+ int i;
+ ASC_SCSI_REQ_Q scsiq_buf;
+ ASC_SCSI_REQ_Q dosfar *scsiq;
+ uchar dosfar *buf;
+ ASC_SCSI_BIT_ID_TYPE saved_unit_not_ready;
+
+ iop_base = asc_dvc->iop_base;
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ target_id = ASC_TID_TO_TARGET_ID(tid_no);
+ saved_unit_not_ready = asc_dvc->unit_not_ready;
+ asc_dvc->unit_not_ready = target_id;
+ sta = ERR;
+ AscWaitTixISRDone(asc_dvc, target_ix);
+ if (AscStopQueueExe(iop_base) == 1) {
+ if (AscRiscHaltedAbortTIX(asc_dvc, target_ix) == 1) {
+
+ AscCleanUpBusyQueue(iop_base);
+ AscStartQueueExe(iop_base);
+
+ AscWaitTixISRDone(asc_dvc, target_ix);
+
+ sta = TRUE;
+ scsiq = (ASC_SCSI_REQ_Q dosfar *) & scsiq_buf;
+ buf = (uchar dosfar *) & scsiq_buf;
+ for (i = 0; i < sizeof (ASC_SCSI_REQ_Q); i++) {
+ *buf++ = 0x00;
+ }
+
+ scsiq->r1.status = (uchar) QS_READY;
+ scsiq->r2.cdb_len = 6;
+ scsiq->r2.tag_code = M2_QTAG_MSG_SIMPLE;
+ scsiq->r1.target_id = target_id;
+
+ scsiq->r2.target_ix = ASC_TIDLUN_TO_IX(tid_no, 0);
+ scsiq->cdbptr = (uchar dosfar *) scsiq->cdb;
+
+ scsiq->r1.cntl = QC_NO_CALLBACK | QC_MSG_OUT | QC_URGENT;
+ AscWriteLramByte(asc_dvc->iop_base, ASCV_MSGOUT_BEG,
+ M1_BUS_DVC_RESET);
+
+ asc_dvc->unit_not_ready &= ~target_id;
+
+ asc_dvc->sdtr_done |= target_id;
+
+ if (AscExeScsiQueue(asc_dvc, (ASC_SCSI_Q dosfar *) scsiq)
+ == 1) {
+ asc_dvc->unit_not_ready = target_id;
+ DvcSleepMilliSecond(1000);
+ _AscWaitQDone(iop_base, (ASC_SCSI_Q dosfar *) scsiq);
+ if (AscStopQueueExe(iop_base) == 1) {
+
+ AscCleanUpDiscQueue(iop_base);
+ AscStartQueueExe(iop_base);
+ if (asc_dvc->pci_fix_asyn_xfer & target_id) {
+
+ AscSetRunChipSynRegAtID(iop_base, tid_no,
+ ASYN_SDTR_DATA_FIX_PCI_REV_AB);
+ }
+ AscWaitTixISRDone(asc_dvc, target_ix);
+ }
+ } else {
+
+ sta = 0;
+ }
+
+ asc_dvc->sdtr_done &= ~target_id;
+ } else {
+ sta = ERR;
+ AscStartQueueExe(iop_base);
+ }
+ }
+ asc_dvc->unit_not_ready = saved_unit_not_ready;
+ return (sta);
+}
+
+int
+AscResetSB(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ int sta;
+ int i;
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+ asc_dvc->unit_not_ready = 0xFF;
+ sta = TRUE;
+ AscWaitISRDone(asc_dvc);
+ AscStopQueueExe(iop_base);
+
+ asc_dvc->sdtr_done = 0;
+ AscResetChipAndScsiBus(iop_base);
+
+ DvcSleepMilliSecond((ulong) ((ushort) asc_dvc->scsi_reset_wait * 1000));
+
+#if CC_SCAM
+ if (!(asc_dvc->dvc_cntl & ASC_CNTL_NO_SCAM)) {
+ AscSCAM(asc_dvc);
+ }
+#endif
+ AscReInitLram(asc_dvc);
+
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ asc_dvc->cur_dvc_qng[i] = 0;
+ if (asc_dvc->pci_fix_asyn_xfer & (0x01 << i)) {
+
+ AscSetChipSynRegAtID(iop_base, i, ASYN_SDTR_DATA_FIX_PCI_REV_AB);
+ }
+ }
+
+ asc_dvc->err_code = 0;
+
+ AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR);
+ if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) {
+ sta = ERR;
+ }
+ if (AscStartChip(iop_base) == 0) {
+ sta = ERR;
+ }
+ AscStartQueueExe(iop_base);
+ asc_dvc->unit_not_ready = 0;
+ asc_dvc->queue_full_or_busy = 0;
+ return (sta);
+}
+
+int
+AscSetRunChipSynRegAtID(
+ PortAddr iop_base,
+ uchar tid_no,
+ uchar sdtr_data
+)
+{
+ int sta = FALSE;
+
+ if (AscHostReqRiscHalt(iop_base)) {
+ sta = AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data);
+
+ AscStartChip(iop_base);
+ return (sta);
+ }
+ return (sta);
+}
+
+int
+AscSetChipSynRegAtID(
+ PortAddr iop_base,
+ uchar id,
+ uchar sdtr_data
+)
+{
+ AscSetBank(iop_base, 1);
+ AscWriteChipScsiID(iop_base, id);
+ if (AscReadChipScsiID(iop_base) != (0x01 << id)) {
+ return (FALSE);
+ }
+ AscSetBank(iop_base, 0);
+ AscWriteChipSyn(iop_base, sdtr_data);
+ if (AscReadChipSyn(iop_base) != sdtr_data) {
+ return (FALSE);
+ }
+ return (TRUE);
+}
+
+int
+AscReInitLram(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ AscInitLram(asc_dvc);
+ AscInitQLinkVar(asc_dvc);
+ return (0);
+}
+
+ushort
+AscInitLram(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc)
+{
+ uchar i;
+ ushort s_addr;
+ PortAddr iop_base;
+ ushort warn_code;
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+
+ AscMemWordSetLram(iop_base, ASC_QADR_BEG, 0,
+ (ushort) (((int) (asc_dvc->max_total_qng + 2 + 1) * 64) >> 1)
+ );
+
+ i = ASC_MIN_ACTIVE_QNO;
+ s_addr = ASC_QADR_BEG + ASC_QBLK_SIZE;
+
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_FWD),
+ (uchar) (i + 1));
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_BWD),
+ (uchar) (asc_dvc->max_total_qng));
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_QNO),
+ (uchar) i);
+ i++;
+ s_addr += ASC_QBLK_SIZE;
+ for (; i < asc_dvc->max_total_qng; i++, s_addr += ASC_QBLK_SIZE) {
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_FWD),
+ (uchar) (i + 1));
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_BWD),
+ (uchar) (i - 1));
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_QNO),
+ (uchar) i);
+ }
+
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_FWD),
+ (uchar) ASC_QLINK_END);
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_BWD),
+ (uchar) (asc_dvc->max_total_qng - 1));
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_QNO),
+ (uchar) asc_dvc->max_total_qng);
+ i++;
+ s_addr += ASC_QBLK_SIZE;
+
+ for (; i <= (uchar) (asc_dvc->max_total_qng + 3);
+ i++, s_addr += ASC_QBLK_SIZE) {
+
+ AscWriteLramByte(iop_base,
+ (ushort) (s_addr + (ushort) ASC_SCSIQ_B_FWD), i);
+ AscWriteLramByte(iop_base,
+ (ushort) (s_addr + (ushort) ASC_SCSIQ_B_BWD), i);
+ AscWriteLramByte(iop_base,
+ (ushort) (s_addr + (ushort) ASC_SCSIQ_B_QNO), i);
+ }
+
+ return (warn_code);
+}
+
+ushort
+AscInitQLinkVar(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ PortAddr iop_base;
+ int i;
+ ushort lram_addr;
+
+ iop_base = asc_dvc->iop_base;
+ AscPutRiscVarFreeQHead(iop_base, 1);
+ AscPutRiscVarDoneQTail(iop_base, asc_dvc->max_total_qng);
+
+ AscPutVarFreeQHead(iop_base, 1);
+ AscPutVarDoneQTail(iop_base, asc_dvc->max_total_qng);
+
+ AscWriteLramByte(iop_base, ASCV_BUSY_QHEAD_B,
+ (uchar) ((int) asc_dvc->max_total_qng + 1));
+ AscWriteLramByte(iop_base, ASCV_DISC1_QHEAD_B,
+ (uchar) ((int) asc_dvc->max_total_qng + 2));
+
+ AscWriteLramByte(iop_base, (ushort) ASCV_TOTAL_READY_Q_B,
+ asc_dvc->max_total_qng);
+
+ AscWriteLramWord(iop_base, ASCV_ASCDVC_ERR_CODE_W, 0);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, 0);
+ AscWriteLramByte(iop_base, ASCV_SCSIBUSY_B, 0);
+ AscWriteLramByte(iop_base, ASCV_WTM_FLAG_B, 0);
+
+ AscWriteLramByte(iop_base, (ushort) ASCV_CDBCNT_B, 0);
+
+ lram_addr = ASC_QADR_BEG;
+ for (i = 0; i < 32; i++, lram_addr += 2) {
+ AscWriteLramWord(iop_base, lram_addr, 0);
+ }
+
+ return (0);
+}
+
+int
+AscSetLibErrorCode(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ushort err_code
+)
+{
+ if (asc_dvc->err_code == 0) {
+
+ asc_dvc->err_code = err_code;
+ AscWriteLramWord(asc_dvc->iop_base, ASCV_ASCDVC_ERR_CODE_W,
+ err_code);
+ }
+ return (err_code);
+}
+
+int
+_AscWaitQDone(
+ PortAddr iop_base,
+ ASC_SCSI_Q dosfar * scsiq
+)
+{
+ ushort q_addr;
+ uchar q_status;
+ int count = 0;
+
+ while (scsiq->q1.q_no == 0) ;
+ q_addr = ASC_QNO_TO_QADDR(scsiq->q1.q_no);
+
+ do {
+ q_status = AscReadLramByte(iop_base, q_addr + ASC_SCSIQ_B_STATUS);
+ DvcSleepMilliSecond(100L);
+ if (count++ > 30) {
+ return (0);
+ }
+ } while ((q_status & QS_READY) != 0);
+ return (1);
+}
+
+uchar _sdtr_period_tbl_[ASC_SYN_XFER_NO] =
+{
+ SYN_XFER_NS_0,
+ SYN_XFER_NS_1,
+ SYN_XFER_NS_2,
+ SYN_XFER_NS_3,
+ SYN_XFER_NS_4,
+ SYN_XFER_NS_5,
+ SYN_XFER_NS_6,
+ SYN_XFER_NS_7};
+
+uchar
+AscMsgOutSDTR(
+ PortAddr iop_base,
+ uchar sdtr_period,
+ uchar sdtr_offset
+)
+{
+ SDTR_XMSG sdtr_buf;
+ uchar sdtr_period_index;
+
+ sdtr_buf.msg_type = MS_EXTEND;
+ sdtr_buf.msg_len = MS_SDTR_LEN;
+ sdtr_buf.msg_req = MS_SDTR_CODE;
+ sdtr_buf.xfer_period = sdtr_period;
+ sdtr_offset &= ASC_SYN_MAX_OFFSET;
+ sdtr_buf.req_ack_offset = sdtr_offset;
+ AscMemWordCopyToLram(iop_base, ASCV_MSGOUT_BEG,
+ (ushort dosfar *) & sdtr_buf, SYN_XMSG_WLEN);
+ if ((sdtr_period_index = AscGetSynPeriodIndex(sdtr_period)) <=
+ ASC_MAX_SDTR_PERIOD_INDEX) {
+ return ((sdtr_period_index << 4) | sdtr_offset);
+ } else {
+
+ return (0);
+ }
+}
+
+uchar
+AscCalSDTRData(
+ uchar sdtr_period,
+ uchar syn_offset
+)
+{
+ uchar byte;
+ uchar sdtr_period_ix;
+
+ sdtr_period_ix = AscGetSynPeriodIndex(sdtr_period);
+ if ((sdtr_period_ix > ASC_MAX_SDTR_PERIOD_INDEX) ||
+ (sdtr_period_ix > ASC_SDTR_PERIOD_IX_MIN)) {
+ return (0xFF);
+ }
+ byte = (sdtr_period_ix << 4) | (syn_offset & ASC_SYN_MAX_OFFSET);
+ return (byte);
+}
+
+void
+AscSetChipSDTR(
+ PortAddr iop_base,
+ uchar sdtr_data,
+ uchar tid_no
+)
+{
+
+ AscWriteChipSyn(iop_base, sdtr_data);
+ AscWriteLramByte(iop_base,
+ (ushort) ((ushort) ASCV_SDTR_DONE_BEG + (ushort) tid_no),
+ sdtr_data);
+ return;
+}
+
+uchar
+AscGetSynPeriodIndex(
+ uchar syn_time
+)
+{
+ if ((syn_time >= SYN_XFER_NS_0) && (syn_time <= SYN_XFER_NS_7)) {
+ if (syn_time <= SYN_XFER_NS_6) {
+ if (syn_time <= SYN_XFER_NS_5) {
+ if (syn_time <= SYN_XFER_NS_4) {
+ if (syn_time <= SYN_XFER_NS_3) {
+ if (syn_time <= SYN_XFER_NS_2) {
+ if (syn_time <= SYN_XFER_NS_1) {
+ if (syn_time <= SYN_XFER_NS_0) {
+ return (0);
+ } else
+ return (1);
+ } else {
+ return (2);
+ }
+ } else {
+ return (3);
+ }
+ } else {
+ return (4);
+ }
+ } else {
+ return (5);
+ }
+ } else {
+ return (6);
+ }
+ } else {
+ return (7);
+ }
+ } else {
+
+ return (8);
+ }
+}
+
+uchar
+AscAllocFreeQueue(
+ PortAddr iop_base,
+ uchar free_q_head
+)
+{
+ ushort q_addr;
+ uchar next_qp;
+ uchar q_status;
+
+ q_addr = ASC_QNO_TO_QADDR(free_q_head);
+ q_status = (uchar) AscReadLramByte(iop_base,
+ (ushort) (q_addr + ASC_SCSIQ_B_STATUS));
+ next_qp = AscReadLramByte(iop_base,
+ (ushort) (q_addr + ASC_SCSIQ_B_FWD));
+ if (((q_status & QS_READY) == 0) &&
+ (next_qp != ASC_QLINK_END)) {
+ return (next_qp);
+ }
+ return (ASC_QLINK_END);
+}
+
+uchar
+AscAllocMultipleFreeQueue(
+ PortAddr iop_base,
+ uchar free_q_head,
+ uchar n_free_q
+)
+{
+ uchar i;
+
+ for (i = 0; i < n_free_q; i++) {
+ if ((free_q_head = AscAllocFreeQueue(iop_base, free_q_head))
+ == ASC_QLINK_END) {
+ return (ASC_QLINK_END);
+ }
+ }
+ return (free_q_head);
+}
+
+int
+AscRiscHaltedAbortSRB(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ulong srb_ptr
+)
+{
+ PortAddr iop_base;
+ ushort q_addr;
+ uchar q_no;
+ ASC_QDONE_INFO scsiq_buf;
+ ASC_QDONE_INFO dosfar *scsiq;
+ ASC_ISR_CALLBACK asc_isr_callback;
+ int last_int_level;
+
+ iop_base = asc_dvc->iop_base;
+ asc_isr_callback = (ASC_ISR_CALLBACK) asc_dvc->isr_callback;
+ last_int_level = DvcEnterCritical();
+ scsiq = (ASC_QDONE_INFO dosfar *) & scsiq_buf;
+
+#if CC_LINK_BUSY_Q
+ _AscAbortSrbBusyQueue(asc_dvc, scsiq, srb_ptr);
+#endif
+
+ for (q_no = ASC_MIN_ACTIVE_QNO; q_no <= asc_dvc->max_total_qng;
+ q_no++) {
+ q_addr = ASC_QNO_TO_QADDR(q_no);
+ scsiq->d2.srb_ptr = AscReadLramDWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_D_SRBPTR));
+ if (scsiq->d2.srb_ptr == srb_ptr) {
+ _AscCopyLramScsiDoneQ(iop_base, q_addr, scsiq, asc_dvc->max_dma_count);
+ if (((scsiq->q_status & QS_READY) != 0) &&
+ ((scsiq->q_status & QS_ABORTED) == 0) &&
+ ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)) {
+
+ scsiq->q_status |= QS_ABORTED;
+ scsiq->d3.done_stat = QD_ABORTED_BY_HOST;
+ AscWriteLramDWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_D_SRBPTR),
+ 0L);
+ AscWriteLramByte(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ scsiq->q_status);
+ (*asc_isr_callback) (asc_dvc, scsiq);
+ return (1);
+ }
+ }
+ }
+ DvcLeaveCritical(last_int_level);
+ return (0);
+}
+
+int
+AscRiscHaltedAbortTIX(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar target_ix
+)
+{
+ PortAddr iop_base;
+ ushort q_addr;
+ uchar q_no;
+ ASC_QDONE_INFO scsiq_buf;
+ ASC_QDONE_INFO dosfar *scsiq;
+ ASC_ISR_CALLBACK asc_isr_callback;
+ int last_int_level;
+
+#if CC_LINK_BUSY_Q
+ uchar tid_no;
+
+#endif
+
+ iop_base = asc_dvc->iop_base;
+ asc_isr_callback = (ASC_ISR_CALLBACK) asc_dvc->isr_callback;
+ last_int_level = DvcEnterCritical();
+ scsiq = (ASC_QDONE_INFO dosfar *) & scsiq_buf;
+
+#if CC_LINK_BUSY_Q
+
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ _AscAbortTidBusyQueue(asc_dvc, scsiq, tid_no);
+
+#endif
+
+ for (q_no = ASC_MIN_ACTIVE_QNO; q_no <= asc_dvc->max_total_qng;
+ q_no++) {
+ q_addr = ASC_QNO_TO_QADDR(q_no);
+ _AscCopyLramScsiDoneQ(iop_base, q_addr, scsiq, asc_dvc->max_dma_count);
+ if (((scsiq->q_status & QS_READY) != 0) &&
+ ((scsiq->q_status & QS_ABORTED) == 0) &&
+ ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)) {
+ if (scsiq->d2.target_ix == target_ix) {
+ scsiq->q_status |= QS_ABORTED;
+ scsiq->d3.done_stat = QD_ABORTED_BY_HOST;
+
+ AscWriteLramDWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_D_SRBPTR),
+ 0L);
+
+ AscWriteLramByte(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ scsiq->q_status);
+ (*asc_isr_callback) (asc_dvc, scsiq);
+ }
+ }
+ }
+ DvcLeaveCritical(last_int_level);
+ return (1);
+}
+
+#if CC_LINK_BUSY_Q
+
+#endif
+
+int
+AscHostReqRiscHalt(
+ PortAddr iop_base
+)
+{
+ int count = 0;
+ int sta = 0;
+ uchar saved_stop_code;
+
+ if (AscIsChipHalted(iop_base))
+ return (1);
+ saved_stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B);
+
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B,
+ ASC_STOP_HOST_REQ_RISC_HALT | ASC_STOP_REQ_RISC_STOP
+ );
+ do {
+ if (AscIsChipHalted(iop_base)) {
+ sta = 1;
+ break;
+ }
+ DvcSleepMilliSecond(100);
+ } while (count++ < 20);
+
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, saved_stop_code);
+ return (sta);
+}
+
+int
+AscStopQueueExe(
+ PortAddr iop_base
+)
+{
+ int count;
+
+ count = 0;
+ if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) == 0) {
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B,
+ ASC_STOP_REQ_RISC_STOP);
+ do {
+ if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) &
+ ASC_STOP_ACK_RISC_STOP) {
+ return (1);
+ }
+ DvcSleepMilliSecond(100);
+ } while (count++ < 20);
+ }
+ return (0);
+}
+
+int
+AscStartQueueExe(
+ PortAddr iop_base
+)
+{
+ if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) != 0) {
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, 0);
+ }
+ return (1);
+}
+
+int
+AscCleanUpBusyQueue(
+ PortAddr iop_base
+)
+{
+ int count;
+ uchar stop_code;
+
+ count = 0;
+ if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) != 0) {
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B,
+ ASC_STOP_CLEAN_UP_BUSY_Q);
+ do {
+ stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B);
+ if ((stop_code & ASC_STOP_CLEAN_UP_BUSY_Q) == 0)
+ break;
+ DvcSleepMilliSecond(100);
+ } while (count++ < 20);
+ }
+ return (1);
+}
+
+int
+AscCleanUpDiscQueue(
+ PortAddr iop_base
+)
+{
+ int count;
+ uchar stop_code;
+
+ count = 0;
+ if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) != 0) {
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B,
+ ASC_STOP_CLEAN_UP_DISC_Q);
+ do {
+ stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B);
+ if ((stop_code & ASC_STOP_CLEAN_UP_DISC_Q) == 0)
+ break;
+ DvcSleepMilliSecond(100);
+ } while (count++ < 20);
+ }
+ return (1);
+}
+
+int
+AscWaitTixISRDone(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar target_ix
+)
+{
+ uchar cur_req;
+ uchar tid_no;
+
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ while (TRUE) {
+ if ((cur_req = asc_dvc->cur_dvc_qng[tid_no]) == 0) {
+ break;
+ }
+ DvcSleepMilliSecond(1000L);
+ if (asc_dvc->cur_dvc_qng[tid_no] == cur_req) {
+ break;
+ }
+ }
+ return (1);
+}
+
+int
+AscWaitISRDone(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ int tid;
+
+ for (tid = 0; tid <= ASC_MAX_TID; tid++) {
+ AscWaitTixISRDone(asc_dvc, ASC_TID_TO_TIX(tid));
+ }
+ return (1);
+}
+
+ulong
+AscGetOnePhyAddr(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar dosfar * buf_addr,
+ ulong buf_size
+)
+{
+ ASC_MIN_SG_HEAD sg_head;
+
+ sg_head.entry_cnt = ASC_MIN_SG_LIST;
+ if (DvcGetSGList(asc_dvc, (uchar dosfar *) buf_addr,
+ buf_size, (ASC_SG_HEAD dosfar *) & sg_head) != buf_size) {
+ return (0L);
+ }
+ if (sg_head.entry_cnt > 1) {
+ return (0L);
+ }
+ return (sg_head.sg_list[0].addr);
+}
+
+ulong
+AscGetEisaProductID(
+ PortAddr iop_base
+)
+{
+ PortAddr eisa_iop;
+ ushort product_id_high, product_id_low;
+ ulong product_id;
+
+ eisa_iop = ASC_GET_EISA_SLOT(iop_base) | ASC_EISA_PID_IOP_MASK;
+ product_id_low = inpw(eisa_iop);
+ product_id_high = inpw(eisa_iop + 2);
+ product_id = ((ulong) product_id_high << 16) | (ulong) product_id_low;
+ return (product_id);
+}
+
+PortAddr
+AscSearchIOPortAddrEISA(
+ PortAddr iop_base
+)
+{
+ ulong eisa_product_id;
+
+ if (iop_base == 0) {
+ iop_base = ASC_EISA_MIN_IOP_ADDR;
+ } else {
+ if (iop_base == ASC_EISA_MAX_IOP_ADDR)
+ return (0);
+ if ((iop_base & 0x0050) == 0x0050) {
+ iop_base += ASC_EISA_BIG_IOP_GAP;
+ } else {
+ iop_base += ASC_EISA_SMALL_IOP_GAP;
+ }
+ }
+ while (iop_base <= ASC_EISA_MAX_IOP_ADDR) {
+
+ eisa_product_id = AscGetEisaProductID(iop_base);
+ if ((eisa_product_id == ASC_EISA_ID_740) ||
+ (eisa_product_id == ASC_EISA_ID_750)) {
+ if (AscFindSignature(iop_base)) {
+
+ inpw(iop_base + 4);
+ return (iop_base);
+ }
+ }
+ if (iop_base == ASC_EISA_MAX_IOP_ADDR)
+ return (0);
+ if ((iop_base & 0x0050) == 0x0050) {
+ iop_base += ASC_EISA_BIG_IOP_GAP;
+ } else {
+ iop_base += ASC_EISA_SMALL_IOP_GAP;
+ }
+ }
+ return (0);
+}
+
+int
+AscStartChip(
+ PortAddr iop_base
+)
+{
+ AscSetChipControl(iop_base, 0);
+ if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) {
+ return (0);
+ }
+ return (1);
+}
+
+int
+AscStopChip(
+ PortAddr iop_base
+)
+{
+ uchar cc_val;
+
+ cc_val = AscGetChipControl(iop_base) & (~(CC_SINGLE_STEP | CC_TEST | CC_DIAG));
+ AscSetChipControl(iop_base, (uchar) (cc_val | CC_HALT));
+ AscSetChipIH(iop_base, INS_HALT);
+ AscSetChipIH(iop_base, INS_RFLAG_WTM);
+ if ((AscGetChipStatus(iop_base) & CSW_HALTED) == 0) {
+ return (0);
+ }
+ return (1);
+}
+
+int
+AscIsChipHalted(
+ PortAddr iop_base
+)
+{
+
+ if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) {
+ if ((AscGetChipControl(iop_base) & CC_HALT) != 0) {
+ return (1);
+ }
+ }
+ return (0);
+}
+
+void
+AscSetChipIH(
+ PortAddr iop_base,
+ ushort ins_code
+)
+{
+ AscSetBank(iop_base, 1);
+ AscWriteChipIH(iop_base, ins_code);
+ AscSetBank(iop_base, 0);
+ return;
+}
+
+void
+AscAckInterrupt(
+ PortAddr iop_base
+)
+{
+
+ uchar host_flag;
+ uchar risc_flag;
+ ushort loop;
+
+ loop = 0;
+ do {
+ risc_flag = AscReadLramByte(iop_base, ASCV_RISC_FLAG_B);
+ if (loop++ > 0x7FFF) {
+ break;
+ }
+ } while ((risc_flag & ASC_RISC_FLAG_GEN_INT) != 0);
+
+ host_flag = AscReadLramByte(iop_base, ASCV_HOST_FLAG_B);
+ AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B,
+ (uchar) (host_flag | ASC_HOST_FLAG_ACK_INT));
+
+ AscSetChipStatus(iop_base, CIW_INT_ACK);
+ loop = 0;
+ while (AscGetChipStatus(iop_base) & CSW_INT_PENDING) {
+
+ AscSetChipStatus(iop_base, CIW_INT_ACK);
+ if (loop++ > 3) {
+ break;
+ }
+ }
+
+ AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag);
+ return;
+}
+
+void
+AscDisableInterrupt(
+ PortAddr iop_base
+)
+{
+ ushort cfg;
+
+ cfg = AscGetChipCfgLsw(iop_base);
+ AscSetChipCfgLsw(iop_base, cfg & (~ASC_CFG0_HOST_INT_ON));
+ return;
+}
+
+void
+AscEnableInterrupt(
+ PortAddr iop_base
+)
+{
+ ushort cfg;
+
+ cfg = AscGetChipCfgLsw(iop_base);
+ AscSetChipCfgLsw(iop_base, cfg | ASC_CFG0_HOST_INT_ON);
+ return;
+}
+
+void
+AscSetBank(
+ PortAddr iop_base,
+ uchar bank
+)
+{
+ uchar val;
+
+ val = AscGetChipControl(iop_base) &
+ (~(CC_SINGLE_STEP | CC_TEST | CC_DIAG | CC_SCSI_RESET | CC_CHIP_RESET));
+ if (bank == 1) {
+ val |= CC_BANK_ONE;
+ } else if (bank == 2) {
+ val |= CC_DIAG | CC_BANK_ONE;
+ } else {
+ val &= ~CC_BANK_ONE;
+ }
+ AscSetChipControl(iop_base, val);
+ return;
+}
+
+int
+AscResetChipAndScsiBus(
+ PortAddr iop_base
+)
+{
+ AscStopChip(iop_base);
+ AscSetChipControl(iop_base, CC_CHIP_RESET | CC_SCSI_RESET | CC_HALT);
+ DvcSleepMilliSecond(200);
+
+ AscSetChipIH(iop_base, INS_RFLAG_WTM);
+ AscSetChipIH(iop_base, INS_HALT);
+
+ AscSetChipControl(iop_base, CC_CHIP_RESET | CC_HALT);
+ AscSetChipControl(iop_base, CC_HALT);
+ DvcSleepMilliSecond(200);
+ return (AscIsChipHalted(iop_base));
+}
+
+ushort
+AscGetIsaDmaChannel(
+ PortAddr iop_base
+)
+{
+ ushort channel;
+
+ channel = AscGetChipCfgLsw(iop_base) & 0x0003;
+ if (channel == 0x03)
+ return (0);
+ else if (channel == 0x00)
+ return (7);
+ return (channel + 4);
+}
+
+ushort
+AscSetIsaDmaChannel(
+ PortAddr iop_base,
+ ushort dma_channel
+)
+{
+ ushort cfg_lsw;
+ uchar value;
+
+ if ((dma_channel >= 5) && (dma_channel <= 7)) {
+
+ if (dma_channel == 7)
+ value = 0x00;
+ else
+ value = dma_channel - 4;
+ cfg_lsw = AscGetChipCfgLsw(iop_base) & 0xFFFC;
+ cfg_lsw |= value;
+ AscSetChipCfgLsw(iop_base, cfg_lsw);
+ return (AscGetIsaDmaChannel(iop_base));
+ }
+ return (0);
+}
+
+uchar
+AscSetIsaDmaSpeed(
+ PortAddr iop_base,
+ uchar speed_value
+)
+{
+ speed_value &= 0x07;
+ AscSetBank(iop_base, 1);
+ AscSetChipDmaSpeed(iop_base, speed_value);
+ AscSetBank(iop_base, 0);
+ return (AscGetIsaDmaSpeed(iop_base));
+}
+
+uchar
+AscGetIsaDmaSpeed(
+ PortAddr iop_base
+)
+{
+ uchar speed_value;
+
+ AscSetBank(iop_base, 1);
+ speed_value = AscGetChipDmaSpeed(iop_base);
+ speed_value &= 0x07;
+ AscSetBank(iop_base, 0);
+ return (speed_value);
+}
+
+ulong
+AscGetMaxDmaCount(
+ ushort bus_type
+)
+{
+ if (bus_type & ASC_IS_ISA)
+ return (ASC_MAX_ISA_DMA_COUNT);
+ else if (bus_type & (ASC_IS_EISA | ASC_IS_VL))
+ return (ASC_MAX_VL_DMA_COUNT);
+ return (ASC_MAX_PCI_DMA_COUNT);
+}
+
+ushort
+AscInitGetConfig(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ ushort warn_code;
+
+ warn_code = 0;
+ asc_dvc->init_state = ASC_INIT_STATE_BEG_GET_CFG;
+ if (asc_dvc->err_code != 0)
+ return (UW_ERR);
+ if (AscFindSignature(asc_dvc->iop_base)) {
+ warn_code |= AscInitAscDvcVar(asc_dvc);
+ warn_code |= AscInitFromEEP(asc_dvc);
+ asc_dvc->init_state |= ASC_INIT_STATE_END_GET_CFG;
+
+ if (asc_dvc->scsi_reset_wait > 10)
+ asc_dvc->scsi_reset_wait = 10;
+
+ } else {
+ asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE;
+ }
+ return (warn_code);
+}
+
+ushort
+AscInitSetConfig(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ ushort warn_code;
+
+ warn_code = 0;
+ asc_dvc->init_state |= ASC_INIT_STATE_BEG_SET_CFG;
+ if (asc_dvc->err_code != 0)
+ return (UW_ERR);
+ if (AscFindSignature(asc_dvc->iop_base)) {
+ warn_code |= AscInitFromAscDvcVar(asc_dvc);
+ asc_dvc->init_state |= ASC_INIT_STATE_END_SET_CFG;
+ } else {
+ asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE;
+ }
+ return (warn_code);
+}
+
+ushort
+AscInitAsc1000Driver(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ ushort warn_code;
+ PortAddr iop_base;
+
+ extern ushort _mcode_size;
+ extern ulong _mcode_chksum;
+ extern uchar _mcode_buf[];
+
+ ASC_DBG(3, "AscInitAsc1000Driver: begin\n");
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+
+ if ((asc_dvc->dvc_cntl & ASC_CNTL_RESET_SCSI) &&
+ !(asc_dvc->init_state & ASC_INIT_RESET_SCSI_DONE)) {
+
+ ASC_DBG(3, "AscInitAsc1000Driver: AscResetChipAndScsiBus()\n");
+ AscResetChipAndScsiBus(iop_base);
+ DvcSleepMilliSecond((ulong) ((ushort) asc_dvc->scsi_reset_wait * 1000));
+ }
+ asc_dvc->init_state |= ASC_INIT_STATE_BEG_LOAD_MC;
+ if (asc_dvc->err_code != 0)
+ return (UW_ERR);
+ ASC_DBG(3, "AscInitAsc1000Driver: AscFindSignature()\n");
+ if (!AscFindSignature(asc_dvc->iop_base)) {
+ asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE;
+ return (warn_code);
+ }
+ ASC_DBG(3, "AscInitAsc1000Driver: AscDisableInterrupt()\n");
+ AscDisableInterrupt(iop_base);
+
+ ASC_DBG(3, "AscInitAsc1000Driver: AscInitLram()\n");
+ warn_code |= AscInitLram(asc_dvc);
+ if (asc_dvc->err_code != 0)
+ return (UW_ERR);
+ ASC_DBG(3, "AscInitAsc1000Driver: AscLoadMicroCode()\n");
+ if (AscLoadMicroCode(iop_base, 0, (ushort dosfar *) _mcode_buf,
+ _mcode_size) != _mcode_chksum) {
+ asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM;
+ return (warn_code);
+ }
+ ASC_DBG(3, "AscInitAsc1000Driver: AscInitMicroCodeVar()\n");
+ warn_code |= AscInitMicroCodeVar(asc_dvc);
+ asc_dvc->init_state |= ASC_INIT_STATE_END_LOAD_MC;
+ ASC_DBG(3, "AscInitAsc1000Driver: AscEnableInterrupt()\n");
+ AscEnableInterrupt(iop_base);
+ return (warn_code);
+}
+
+ushort
+AscInitAscDvcVar(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ int i;
+ PortAddr iop_base;
+ ushort warn_code;
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+ asc_dvc->err_code = 0;
+
+ if ((asc_dvc->bus_type &
+ (ASC_IS_ISA | ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) {
+ asc_dvc->err_code |= ASC_IERR_NO_BUS_TYPE;
+ }
+#if CC_LINK_BUSY_Q
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ asc_dvc->scsiq_busy_head[i] = (ASC_SCSI_Q dosfar *) 0L;
+ asc_dvc->scsiq_busy_tail[i] = (ASC_SCSI_Q dosfar *) 0L;
+ }
+#endif
+
+ asc_dvc->dvc_cntl = ASC_DEF_DVC_CNTL;
+ asc_dvc->bug_fix_cntl = 0;
+ asc_dvc->pci_fix_asyn_xfer = 0;
+ asc_dvc->init_sdtr = 0;
+ asc_dvc->sdtr_done = 0;
+ asc_dvc->max_total_qng = ASC_DEF_MAX_TOTAL_QNG;
+ asc_dvc->cur_total_qng = 0;
+ asc_dvc->is_in_int = 0;
+ asc_dvc->scsi_reset_wait = 3;
+ asc_dvc->in_critical_cnt = 0;
+
+ asc_dvc->last_q_shortage = 0;
+ asc_dvc->use_tagged_qng = 0;
+ asc_dvc->cfg->can_tagged_qng = 0;
+ asc_dvc->no_scam = 0;
+ asc_dvc->irq_no = 10;
+ asc_dvc->start_motor = ASC_SCSI_WIDTH_BIT_SET;
+ asc_dvc->cfg->disc_enable = ASC_SCSI_WIDTH_BIT_SET;
+ asc_dvc->cfg->cmd_qng_enabled = ASC_SCSI_WIDTH_BIT_SET;
+ asc_dvc->cfg->chip_scsi_id = ASC_DEF_CHIP_SCSI_ID;
+ asc_dvc->cfg->chip_version = AscGetChipVersion(iop_base,
+ asc_dvc->bus_type);
+ if (AscGetChipBusType(iop_base) == ASC_IS_ISAPNP) {
+
+ AscPutChipIFC(iop_base, IFC_INIT_DEFAULT);
+ asc_dvc->bus_type = ASC_IS_ISAPNP;
+ }
+ asc_dvc->unit_not_ready = 0;
+ asc_dvc->queue_full_or_busy = 0;
+
+ if ((asc_dvc->bus_type & ASC_IS_ISA) != 0) {
+ asc_dvc->cfg->isa_dma_channel = (uchar) AscGetIsaDmaChannel(iop_base);
+ asc_dvc->cfg->isa_dma_speed = ASC_DEF_ISA_DMA_SPEED;
+ }
+ asc_dvc->cfg->lib_serial_no = ASC_LIB_SERIAL_NUMBER;
+ asc_dvc->cfg->lib_version = (ASC_LIB_VERSION_MAJOR << 8) |
+ ASC_LIB_VERSION_MINOR;
+ asc_dvc->int_count = 0L;
+ asc_dvc->req_count = 0L;
+ asc_dvc->busy_count = 0L;
+ asc_dvc->max_dma_count = AscGetMaxDmaCount(asc_dvc->bus_type);
+
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ asc_dvc->cfg->sdtr_data[i] =
+ (uchar) (ASC_DEF_SDTR_OFFSET | (ASC_DEF_SDTR_INDEX << 4));
+ asc_dvc->cur_dvc_qng[i] = 0;
+ asc_dvc->max_dvc_qng[i] = ASC_MAX_SCSI1_QNG;
+ asc_dvc->scsiq_busy_head[i] = (ASC_SCSI_Q dosfar *) 0L;
+ asc_dvc->scsiq_busy_tail[i] = (ASC_SCSI_Q dosfar *) 0L;
+ }
+ return (warn_code);
+}
+
+ushort
+AscInitFromAscDvcVar(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ PortAddr iop_base;
+ ushort cfg_msw;
+ ushort warn_code;
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+
+ cfg_msw = AscGetChipCfgMsw(iop_base);
+
+ if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) {
+ cfg_msw &= (~(ASC_CFG_MSW_CLR_MASK));
+ warn_code |= ASC_WARN_CFG_MSW_RECOVER;
+ AscSetChipCfgMsw(iop_base, cfg_msw);
+ }
+ if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) {
+ warn_code |= ASC_WARN_AUTO_CONFIG;
+
+ }
+ if ((asc_dvc->cfg->cmd_qng_enabled & asc_dvc->cfg->disc_enable) !=
+ asc_dvc->cfg->cmd_qng_enabled) {
+ asc_dvc->cfg->disc_enable = asc_dvc->cfg->cmd_qng_enabled;
+ warn_code |= ASC_WARN_CMD_QNG_CONFLICT;
+ }
+ if ((asc_dvc->bus_type & (ASC_IS_ISA | ASC_IS_VL)) != 0) {
+
+ if (AscSetChipIRQ(iop_base, asc_dvc->irq_no, asc_dvc->bus_type)
+ != asc_dvc->irq_no) {
+ asc_dvc->err_code |= ASC_IERR_SET_IRQ_NO;
+ }
+ }
+ if (AscSetChipScsiID(iop_base, asc_dvc->cfg->chip_scsi_id) !=
+ asc_dvc->cfg->chip_scsi_id) {
+ asc_dvc->err_code |= ASC_IERR_SET_SCSI_ID;
+ }
+ if ((asc_dvc->bus_type & ASC_IS_ISA) != 0) {
+ AscSetIsaDmaChannel(iop_base, asc_dvc->cfg->isa_dma_channel);
+ AscSetIsaDmaSpeed(iop_base, asc_dvc->cfg->isa_dma_speed);
+ }
+ return (warn_code);
+}
+
+ushort
+AscInitFromEEP(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ ASCEEP_CONFIG eep_config_buf;
+ ASCEEP_CONFIG dosfar *eep_config;
+ PortAddr iop_base;
+ ushort chksum;
+ ushort warn_code;
+ ushort cfg_msw, cfg_lsw;
+ uchar i;
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0x00FE);
+
+ AscStopQueueExe(iop_base);
+ if ((AscStopChip(iop_base) == FALSE) ||
+ (AscGetChipScsiCtrl(iop_base) != 0)) {
+ asc_dvc->init_state |= ASC_INIT_RESET_SCSI_DONE;
+ AscResetChipAndScsiBus(iop_base);
+ DvcSleepMilliSecond((ulong) ((ushort) asc_dvc->scsi_reset_wait * 1000));
+ }
+ if (AscIsChipHalted(iop_base) == FALSE) {
+ asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP;
+ return (warn_code);
+ }
+ AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR);
+ if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) {
+ asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR;
+ return (warn_code);
+ }
+ eep_config = (ASCEEP_CONFIG dosfar *) & eep_config_buf;
+
+ cfg_msw = AscGetChipCfgMsw(iop_base);
+ cfg_lsw = AscGetChipCfgLsw(iop_base);
+
+ if (asc_dvc->bus_type & ASC_IS_PCI) {
+#if CC_DISABLE_PCI_PARITY_INT
+ cfg_msw &= 0xFFC0;
+ AscSetChipCfgMsw(iop_base, cfg_msw);
+#endif
+ if (asc_dvc->cfg->pci_device_id == ASC_PCI_DEVICE_ID_REV_A) {
+ asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ADD_ONE_BYTE;
+ }
+ }
+ if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) {
+ cfg_msw &= (~(ASC_CFG_MSW_CLR_MASK));
+ warn_code |= ASC_WARN_CFG_MSW_RECOVER;
+ AscSetChipCfgMsw(iop_base, cfg_msw);
+ }
+ chksum = AscGetEEPConfig(iop_base, eep_config, asc_dvc->bus_type);
+
+ eep_config->cfg_msw &= (~(ASC_CFG_MSW_CLR_MASK));
+
+ if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) {
+ warn_code |= ASC_WARN_AUTO_CONFIG;
+
+ if (asc_dvc->cfg->chip_version == 3) {
+
+ if (eep_config->cfg_lsw != cfg_lsw) {
+ warn_code |= ASC_WARN_EEPROM_RECOVER;
+ eep_config->cfg_lsw = AscGetChipCfgLsw(iop_base);
+ }
+ if (eep_config->cfg_msw != cfg_msw) {
+ warn_code |= ASC_WARN_EEPROM_RECOVER;
+ eep_config->cfg_msw = AscGetChipCfgMsw(iop_base);
+ }
+ }
+ }
+ eep_config->cfg_lsw |= ASC_CFG0_HOST_INT_ON;
+ if (chksum != eep_config->chksum) {
+ warn_code |= ASC_WARN_EEPROM_CHKSUM;
+ }
+ asc_dvc->init_sdtr = eep_config->init_sdtr;
+ asc_dvc->cfg->disc_enable = eep_config->disc_enable;
+
+ asc_dvc->cfg->cmd_qng_enabled = eep_config->use_cmd_qng;
+ asc_dvc->cfg->isa_dma_speed = eep_config->isa_dma_speed;
+ asc_dvc->start_motor = eep_config->start_motor;
+ asc_dvc->dvc_cntl = eep_config->cntl;
+ asc_dvc->no_scam = eep_config->no_scam;
+
+ if ((asc_dvc->bus_type & ASC_IS_PCI) &&
+ !(asc_dvc->dvc_cntl & ASC_CNTL_NO_PCI_FIX_ASYN_XFER)) {
+ if ((asc_dvc->cfg->pci_device_id == ASC_PCI_DEVICE_ID_REV_A) ||
+ (asc_dvc->cfg->pci_device_id == ASC_PCI_DEVICE_ID_REV_B)) {
+ asc_dvc->pci_fix_asyn_xfer = ASC_ALL_DEVICE_BIT_SET;
+ }
+ } else if (asc_dvc->bus_type & ASC_IS_ISAPNP) {
+
+ if (AscGetChipVersion(iop_base, asc_dvc->bus_type)
+ == ASC_CHIP_VER_ASYN_BUG) {
+ asc_dvc->pci_fix_asyn_xfer = ASC_ALL_DEVICE_BIT_SET;
+ }
+ }
+ if (!AscTestExternalLram(asc_dvc)) {
+ if (asc_dvc->bus_type & ASC_IS_PCI) {
+ eep_config->cfg_msw |= 0x0800;
+ cfg_msw |= 0x0800;
+ AscSetChipCfgMsw(iop_base, cfg_msw);
+ eep_config->max_total_qng = ASC_MAX_PCI_INRAM_TOTAL_QNG;
+ eep_config->max_tag_qng = ASC_MAX_INRAM_TAG_QNG;
+ }
+ } else {
+#if CC_TEST_RW_LRAM
+ asc_dvc->err_code |= AscTestLramEndian(iop_base);
+#endif
+ }
+ if (eep_config->max_total_qng < ASC_MIN_TOTAL_QNG) {
+ eep_config->max_total_qng = ASC_MIN_TOTAL_QNG;
+ }
+ if (eep_config->max_total_qng > ASC_MAX_TOTAL_QNG) {
+ eep_config->max_total_qng = ASC_MAX_TOTAL_QNG;
+ }
+ if (eep_config->max_tag_qng > eep_config->max_total_qng) {
+ eep_config->max_tag_qng = eep_config->max_total_qng;
+ }
+ if (eep_config->max_tag_qng < ASC_MIN_TAG_Q_PER_DVC) {
+ eep_config->max_tag_qng = ASC_MIN_TAG_Q_PER_DVC;
+ }
+ asc_dvc->max_total_qng = eep_config->max_total_qng;
+
+ if ((eep_config->use_cmd_qng & eep_config->disc_enable) !=
+ eep_config->use_cmd_qng) {
+ eep_config->disc_enable = eep_config->use_cmd_qng;
+ warn_code |= ASC_WARN_CMD_QNG_CONFLICT;
+ }
+ asc_dvc->irq_no = AscGetChipIRQ(iop_base, asc_dvc->bus_type);
+ eep_config->chip_scsi_id &= ASC_MAX_TID;
+ asc_dvc->cfg->chip_scsi_id = eep_config->chip_scsi_id;
+
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ asc_dvc->cfg->sdtr_data[i] = eep_config->sdtr_data[i];
+ asc_dvc->cfg->max_tag_qng[i] = eep_config->max_tag_qng;
+ }
+
+ eep_config->cfg_msw = AscGetChipCfgMsw(iop_base);
+ if (AscSetEEPConfig(iop_base, eep_config, asc_dvc->bus_type) != 0) {
+ asc_dvc->err_code |= ASC_IERR_WRITE_EEPROM;
+ }
+ return (warn_code);
+}
+
+ushort
+AscInitMicroCodeVar(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ int i;
+ ushort warn_code;
+ PortAddr iop_base;
+ ulong phy_addr;
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ AscWriteLramByte(iop_base, (ushort) (ASCV_SDTR_DATA_BEG + i),
+ asc_dvc->cfg->sdtr_data[i]);
+ }
+
+ AscInitQLinkVar(asc_dvc);
+
+ AscWriteLramByte(iop_base, ASCV_DISC_ENABLE_B,
+ asc_dvc->cfg->disc_enable);
+ AscWriteLramByte(iop_base, ASCV_HOSTSCSI_ID_B,
+ ASC_TID_TO_TARGET_ID(asc_dvc->cfg->chip_scsi_id));
+ if ((phy_addr = AscGetOnePhyAddr(asc_dvc,
+ (uchar dosfar *) asc_dvc->cfg->overrun_buf,
+ ASC_OVERRUN_BSIZE)) == 0L) {
+ asc_dvc->err_code |= ASC_IERR_GET_PHY_ADDR;
+ } else {
+
+ phy_addr = (phy_addr & 0xFFFFFFF8UL) + 8;
+ AscWriteLramDWord(iop_base, ASCV_OVERRUN_PADDR_D, phy_addr);
+ AscWriteLramDWord(iop_base, ASCV_OVERRUN_BSIZE_D,
+ ASC_OVERRUN_BSIZE - 8);
+ }
+
+ asc_dvc->cfg->mcode_date = AscReadLramWord(iop_base,
+ (ushort) ASCV_MC_DATE_W);
+ asc_dvc->cfg->mcode_version = AscReadLramWord(iop_base,
+ (ushort) ASCV_MC_VER_W);
+ AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR);
+ if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) {
+ asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR;
+ return (warn_code);
+ }
+ if (AscStartChip(iop_base) != 1) {
+ asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP;
+ return (warn_code);
+ }
+ return (warn_code);
+}
+
+void dosfar
+AscInitPollIsrCallBack(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_QDONE_INFO dosfar * scsi_done_q
+)
+{
+ ASC_SCSI_REQ_Q dosfar *scsiq_req;
+ ASC_ISR_CALLBACK asc_isr_callback;
+ uchar cp_sen_len;
+ uchar i;
+
+ if ((scsi_done_q->d2.flag & ASC_FLAG_SCSIQ_REQ) != 0) {
+ scsiq_req = (ASC_SCSI_REQ_Q dosfar *) scsi_done_q->d2.srb_ptr;
+ ASC_DBG2(3, "AscInitPollIsrCallBack: done_stat %x, host_stat %x\n",
+ scsiq_req->r3.done_stat, scsiq_req->r3.host_stat);
+ scsiq_req->r3.done_stat = scsi_done_q->d3.done_stat;
+ scsiq_req->r3.host_stat = scsi_done_q->d3.host_stat;
+ scsiq_req->r3.scsi_stat = scsi_done_q->d3.scsi_stat;
+ scsiq_req->r3.scsi_msg = scsi_done_q->d3.scsi_msg;
+ if ((scsi_done_q->d3.scsi_stat == SS_CHK_CONDITION) &&
+ (scsi_done_q->d3.host_stat == 0)) {
+ cp_sen_len = (uchar) ASC_MIN_SENSE_LEN;
+ if (scsiq_req->r1.sense_len < ASC_MIN_SENSE_LEN) {
+ cp_sen_len = (uchar) scsiq_req->r1.sense_len;
+ }
+ for (i = 0; i < cp_sen_len; i++) {
+ scsiq_req->sense[i] = scsiq_req->sense_ptr[i];
+ }
+ }
+ } else {
+ ASC_DBG1(3, "AscInitPollIsrCallBack: isr_callback %x\n",
+ (unsigned) asc_dvc->isr_callback);
+ if (asc_dvc->isr_callback != 0) {
+ asc_isr_callback = (ASC_ISR_CALLBACK) asc_dvc->isr_callback;
+ (*asc_isr_callback) (asc_dvc, scsi_done_q);
+ }
+ }
+ return;
+}
+
+int
+AscTestExternalLram(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ PortAddr iop_base;
+ ushort q_addr;
+ ushort saved_word;
+ int sta;
+
+ iop_base = asc_dvc->iop_base;
+ sta = 0;
+
+ q_addr = ASC_QNO_TO_QADDR(241);
+ saved_word = AscReadLramWord(iop_base, q_addr);
+ if (AscVerWriteLramWord(iop_base, q_addr, 0x55AA) == 0) {
+ sta = 1;
+ AscWriteLramWord(iop_base, q_addr, saved_word);
+ }
+ return (sta);
+}
+
+#if CC_TEST_LRAM_ENDIAN
+
+#endif
+
+int
+AscWriteEEPCmdReg(
+ PortAddr iop_base,
+ uchar cmd_reg
+)
+{
+ uchar read_back;
+ int retry;
+
+ retry = 0;
+ while (TRUE) {
+ AscSetChipEEPCmd(iop_base, cmd_reg);
+ DvcSleepMilliSecond(1);
+ read_back = AscGetChipEEPCmd(iop_base);
+ if (read_back == cmd_reg) {
+ return (1);
+ }
+ if (retry++ > ASC_EEP_MAX_RETRY) {
+ return (0);
+ }
+ }
+}
+
+int
+AscWriteEEPDataReg(
+ PortAddr iop_base,
+ ushort data_reg
+)
+{
+ ushort read_back;
+ int retry;
+
+ retry = 0;
+ while (TRUE) {
+ AscSetChipEEPData(iop_base, data_reg);
+ DvcSleepMilliSecond(1);
+ read_back = AscGetChipEEPData(iop_base);
+ if (read_back == data_reg) {
+ return (1);
+ }
+ if (retry++ > ASC_EEP_MAX_RETRY) {
+ return (0);
+ }
+ }
+}
+
+void
+AscWaitEEPRead(
+ void
+)
+{
+ DvcSleepMilliSecond(1);
+ return;
+}
+
+void
+AscWaitEEPWrite(
+ void
+)
+{
+ DvcSleepMilliSecond(20);
+ return;
+}
+
+ushort
+AscReadEEPWord(
+ PortAddr iop_base,
+ uchar addr
+)
+{
+ ushort read_wval;
+ uchar cmd_reg;
+
+ AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE);
+ AscWaitEEPRead();
+ cmd_reg = addr | ASC_EEP_CMD_READ;
+ AscWriteEEPCmdReg(iop_base, cmd_reg);
+ AscWaitEEPRead();
+ read_wval = AscGetChipEEPData(iop_base);
+ AscWaitEEPRead();
+ return (read_wval);
+}
+
+ushort
+AscWriteEEPWord(
+ PortAddr iop_base,
+ uchar addr,
+ ushort word_val
+)
+{
+ ushort read_wval;
+
+ read_wval = AscReadEEPWord(iop_base, addr);
+ if (read_wval != word_val) {
+ AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_ABLE);
+ AscWaitEEPRead();
+
+ AscWriteEEPDataReg(iop_base, word_val);
+ AscWaitEEPRead();
+
+ AscWriteEEPCmdReg(iop_base,
+ (uchar) ((uchar) ASC_EEP_CMD_WRITE | addr));
+ AscWaitEEPWrite();
+
+ AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE);
+ AscWaitEEPRead();
+ return (AscReadEEPWord(iop_base, addr));
+ }
+ return (read_wval);
+}
+
+ushort
+AscGetEEPConfig(
+ PortAddr iop_base,
+ ASCEEP_CONFIG dosfar * cfg_buf, ushort bus_type
+)
+{
+ ushort wval;
+ ushort sum;
+ ushort dosfar *wbuf;
+ int cfg_beg;
+ int cfg_end;
+ int s_addr;
+ int isa_pnp_wsize;
+
+ wbuf = (ushort dosfar *) cfg_buf;
+ sum = 0;
+
+ isa_pnp_wsize = 0;
+ for (s_addr = 0; s_addr < (2 + isa_pnp_wsize); s_addr++, wbuf++) {
+ wval = AscReadEEPWord(iop_base, (uchar) s_addr);
+ sum += wval;
+ *wbuf = wval;
+ }
+
+ if (bus_type & ASC_IS_VL) {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG_VL;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR_VL;
+ } else {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR;
+ }
+
+ for (s_addr = cfg_beg; s_addr <= (cfg_end - 1);
+ s_addr++, wbuf++) {
+ wval = AscReadEEPWord(iop_base, (uchar) s_addr);
+ sum += wval;
+ *wbuf = wval;
+ }
+ *wbuf = AscReadEEPWord(iop_base, (uchar) s_addr);
+ return (sum);
+}
+
+int
+AscSetEEPConfigOnce(
+ PortAddr iop_base,
+ ASCEEP_CONFIG dosfar * cfg_buf, ushort bus_type
+)
+{
+ int n_error;
+ ushort dosfar *wbuf;
+ ushort sum;
+ int s_addr;
+ int cfg_beg;
+ int cfg_end;
+
+ wbuf = (ushort dosfar *) cfg_buf;
+ n_error = 0;
+ sum = 0;
+ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
+ sum += *wbuf;
+ if (*wbuf != AscWriteEEPWord(iop_base, (uchar) s_addr, *wbuf)) {
+ n_error++;
+ }
+ }
+ if (bus_type & ASC_IS_VL) {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG_VL;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR_VL;
+ } else {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR;
+ }
+ for (s_addr = cfg_beg; s_addr <= (cfg_end - 1);
+ s_addr++, wbuf++) {
+ sum += *wbuf;
+ if (*wbuf != AscWriteEEPWord(iop_base, (uchar) s_addr, *wbuf)) {
+ n_error++;
+ }
+ }
+ *wbuf = sum;
+ if (sum != AscWriteEEPWord(iop_base, (uchar) s_addr, sum)) {
+ n_error++;
+ }
+ wbuf = (ushort dosfar *) cfg_buf;
+ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
+ if (*wbuf != AscReadEEPWord(iop_base, (uchar) s_addr)) {
+ n_error++;
+ }
+ }
+ for (s_addr = cfg_beg; s_addr <= cfg_end;
+ s_addr++, wbuf++) {
+ if (*wbuf != AscReadEEPWord(iop_base, (uchar) s_addr)) {
+ n_error++;
+ }
+ }
+ return (n_error);
+}
+
+int
+AscSetEEPConfig(
+ PortAddr iop_base,
+ ASCEEP_CONFIG dosfar * cfg_buf, ushort bus_type
+)
+{
+ int retry;
+ int n_error;
+
+ retry = 0;
+ while (TRUE) {
+ if ((n_error = AscSetEEPConfigOnce(iop_base, cfg_buf,
+ bus_type)) == 0) {
+ break;
+ }
+ if (++retry > ASC_EEP_MAX_RETRY) {
+ break;
+ }
+ }
+ return (n_error);
+}
+
+int
+AscInitPollBegin(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+
+#if CC_INIT_INQ_DISPLAY
+ DvcDisplayString((uchar dosfar *) "\r\n");
+#endif
+
+ AscDisableInterrupt(iop_base);
+
+ asc_dvc->init_state |= ASC_INIT_STATE_BEG_INQUIRY;
+
+ AscWriteLramByte(iop_base, ASCV_DISC_ENABLE_B, 0x00);
+ asc_dvc->use_tagged_qng = 0;
+ asc_dvc->cfg->can_tagged_qng = 0;
+ asc_dvc->saved_ptr2func = (ulong) asc_dvc->isr_callback;
+ asc_dvc->isr_callback = ASC_GET_PTR2FUNC(AscInitPollIsrCallBack);
+ return (0);
+}
+
+int
+AscInitPollEnd(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ PortAddr iop_base;
+ int i;
+
+ iop_base = asc_dvc->iop_base;
+ asc_dvc->isr_callback = (Ptr2Func) asc_dvc->saved_ptr2func;
+ AscWriteLramByte(iop_base, ASCV_DISC_ENABLE_B,
+ asc_dvc->cfg->disc_enable);
+ AscWriteLramByte(iop_base, ASCV_USE_TAGGED_QNG_B,
+ asc_dvc->use_tagged_qng);
+ AscWriteLramByte(iop_base, ASCV_CAN_TAGGED_QNG_B,
+ asc_dvc->cfg->can_tagged_qng);
+
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ AscWriteLramByte(iop_base,
+ (ushort) ((ushort) ASCV_MAX_DVC_QNG_BEG + (ushort) i),
+ asc_dvc->max_dvc_qng[i]);
+ }
+
+ AscEnableInterrupt(iop_base);
+
+#if CC_INIT_INQ_DISPLAY
+ DvcDisplayString((uchar dosfar *) "\r\n");
+#endif
+ asc_dvc->init_state |= ASC_INIT_STATE_END_INQUIRY;
+
+ return (0);
+}
+
+int _asc_wait_slow_device_ = FALSE;
+
+int
+AscInitPollTarget(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_REQ_Q dosfar * scsiq,
+ ASC_SCSI_INQUIRY dosfar * inq,
+ ASC_CAP_INFO dosfar * cap_info
+)
+{
+ uchar tid_no, lun;
+ uchar dvc_type;
+ ASC_SCSI_BIT_ID_TYPE tid_bits;
+ int dvc_found;
+ int support_read_cap;
+ int tmp_disable_init_sdtr;
+ ulong phy_addr;
+
+ dvc_found = 0;
+ tmp_disable_init_sdtr = FALSE;
+ tid_bits = scsiq->r1.target_id;
+ lun = scsiq->r1.target_lun;
+ tid_no = ASC_TIX_TO_TID(scsiq->r2.target_ix);
+ if ((phy_addr = AscGetOnePhyAddr(asc_dvc,
+ (uchar dosfar *) scsiq->sense_ptr,
+ (ulong) scsiq->r1.sense_len)) == 0L) {
+ return (ERR);
+ }
+ scsiq->r1.sense_addr = phy_addr;
+ if (((asc_dvc->init_sdtr & tid_bits) != 0) &&
+ ((asc_dvc->sdtr_done & tid_bits) == 0)) {
+
+ asc_dvc->init_sdtr &= ~tid_bits;
+ tmp_disable_init_sdtr = TRUE;
+ }
+ ASC_DBG(3, "AscInitPollTarget: PollScsiInquiry()\n");
+ if (PollScsiInquiry(asc_dvc, scsiq, (uchar dosfar *) inq,
+ sizeof (ASC_SCSI_INQUIRY)) == 1) {
+ dvc_found = 1;
+ support_read_cap = TRUE;
+ dvc_type = inq->byte0.peri_dvc_type;
+ if (dvc_type != SCSI_TYPE_UNKNOWN) {
+ if ((dvc_type != SCSI_TYPE_DASD) &&
+ (dvc_type != SCSI_TYPE_WORM) &&
+ (dvc_type != SCSI_TYPE_CDROM) &&
+ (dvc_type != SCSI_TYPE_OPTMEM)) {
+ asc_dvc->start_motor &= ~tid_bits;
+ support_read_cap = FALSE;
+ }
+ if ((dvc_type != SCSI_TYPE_DASD) ||
+ inq->byte1.rmb) {
+
+ if (!_asc_wait_slow_device_) {
+ DvcSleepMilliSecond(3000 - ((int) tid_no * 250));
+ _asc_wait_slow_device_ = TRUE;
+ }
+ }
+#if CC_INIT_INQ_DISPLAY
+ AscDispInquiry(tid_no, lun, inq);
+#endif
+
+ if (lun == 0) {
+
+ if ((inq->byte3.rsp_data_fmt >= 2) ||
+ (inq->byte2.ansi_apr_ver >= 2)) {
+
+ if (inq->byte7.CmdQue) {
+ asc_dvc->cfg->can_tagged_qng |= tid_bits;
+ if (asc_dvc->cfg->cmd_qng_enabled & tid_bits) {
+ asc_dvc->use_tagged_qng |= tid_bits;
+ asc_dvc->max_dvc_qng[tid_no] =
+ asc_dvc->cfg->max_tag_qng[tid_no];
+ }
+ }
+ if (!inq->byte7.Sync) {
+
+ asc_dvc->init_sdtr &= ~tid_bits;
+ asc_dvc->sdtr_done &= ~tid_bits;
+ } else if (tmp_disable_init_sdtr) {
+
+ asc_dvc->init_sdtr |= tid_bits;
+ }
+ } else {
+
+ asc_dvc->init_sdtr &= ~tid_bits;
+ asc_dvc->sdtr_done &= ~tid_bits;
+ asc_dvc->use_tagged_qng &= ~tid_bits;
+ }
+ }
+ if (asc_dvc->pci_fix_asyn_xfer & tid_bits) {
+ if (!(asc_dvc->init_sdtr & tid_bits)) {
+
+ AscSetRunChipSynRegAtID(asc_dvc->iop_base, tid_no,
+ ASYN_SDTR_DATA_FIX_PCI_REV_AB);
+ }
+ }
+ ASC_DBG(3, "AscInitPollTarget: InitTestUnitReady()\n");
+ if (InitTestUnitReady(asc_dvc, scsiq) != 1) {
+
+ } else {
+ if ((cap_info != 0L) && support_read_cap) {
+ ASC_DBG(3, "AscInitPollTarget: PollScsiReadCapacity()\n");
+ if (PollScsiReadCapacity(asc_dvc, scsiq,
+ cap_info) != 1) {
+ cap_info->lba = 0L;
+ cap_info->blk_size = 0x0000;
+ } else {
+
+ }
+ }
+ }
+ } else {
+ asc_dvc->start_motor &= ~tid_bits;
+ }
+ } else {
+
+ }
+ return (dvc_found);
+}
+
+int
+PollQueueDone(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_REQ_Q dosfar * scsiq,
+ int timeout_sec
+)
+{
+ int status;
+ int retry;
+
+ retry = 0;
+ do {
+ ASC_DBG(3, "PollQueueDone: AscExeScsiQueue()\n");
+ if ((status = AscExeScsiQueue(asc_dvc,
+ (ASC_SCSI_Q dosfar *) scsiq)) == 1) {
+ ASC_DBG(3, "PollQueueDone: AscPollQDone()\n");
+ if ((status = AscPollQDone(asc_dvc, scsiq,
+ timeout_sec)) != 1) {
+ ASC_DBG1(3, "PollQueueDone: AscPollQDone() status %x\n", status);
+ if (status == 0x80) {
+ if (retry++ > ASC_MAX_INIT_BUSY_RETRY) {
+ break;
+ }
+ scsiq->r3.done_stat = 0;
+ scsiq->r3.host_stat = 0;
+ scsiq->r3.scsi_stat = 0;
+ scsiq->r3.scsi_msg = 0;
+ DvcSleepMilliSecond(100);
+ continue;
+ }
+ scsiq->r3.done_stat = 0;
+ scsiq->r3.host_stat = 0;
+ scsiq->r3.scsi_stat = 0;
+ scsiq->r3.scsi_msg = 0;
+ ASC_DBG1(3, "PollQueueDone: AscAbortSRB() scsiq %x\n",
+ (unsigned) scsiq);
+
+ AscAbortSRB(asc_dvc, (ulong) scsiq);
+ }
+ ASC_DBG1(3, "PollQueueDone: done_stat %x\n", scsiq->r3.done_stat);
+ return (scsiq->r3.done_stat);
+ }
+ } while ((status == 0) || (status == 0x80));
+ ASC_DBG(3, "PollQueueDone: done_stat QD_WITH_ERROR\n");
+ return (scsiq->r3.done_stat = QD_WITH_ERROR);
+}
+
+int
+PollScsiInquiry(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_REQ_Q dosfar * scsiq,
+ uchar dosfar * buf,
+ int buf_len
+)
+{
+ if (AscScsiInquiry(asc_dvc, scsiq, buf, buf_len) == ERR) {
+ return (scsiq->r3.done_stat = QD_WITH_ERROR);
+ }
+ return (PollQueueDone(asc_dvc, (ASC_SCSI_REQ_Q dosfar *) scsiq, 4));
+}
+
+int
+PollScsiReadCapacity(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_REQ_Q dosfar * scsiq,
+ ASC_CAP_INFO dosfar * cap_info
+)
+{
+ ASC_CAP_INFO scsi_cap_info;
+ int status;
+
+ if (AscScsiReadCapacity(asc_dvc, scsiq,
+ (uchar dosfar *) & scsi_cap_info) == ERR) {
+ return (scsiq->r3.done_stat = QD_WITH_ERROR);
+ }
+ status = PollQueueDone(asc_dvc, (ASC_SCSI_REQ_Q dosfar *) scsiq, 8);
+ if (status == 1) {
+#if CC_LITTLE_ENDIAN_HOST
+ cap_info->lba = (ulong) * swapfarbuf4((uchar dosfar *) & scsi_cap_info.lba);
+ cap_info->blk_size = (ulong) * swapfarbuf4((uchar dosfar *) & scsi_cap_info.blk_size);
+#else
+ cap_info->lba = scsi_cap_info.lba;
+ cap_info->blk_size = scsi_cap_info.blk_size;
+#endif
+ return (scsiq->r3.done_stat);
+ }
+ return (scsiq->r3.done_stat = QD_WITH_ERROR);
+}
+
+ulong dosfar *
+swapfarbuf4(
+ uchar dosfar * buf
+)
+{
+ uchar tmp;
+
+ tmp = buf[3];
+ buf[3] = buf[0];
+ buf[0] = tmp;
+
+ tmp = buf[1];
+ buf[1] = buf[2];
+ buf[2] = tmp;
+
+ return ((ulong dosfar *) buf);
+}
+
+int
+PollScsiTestUnitReady(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_REQ_Q dosfar * scsiq
+)
+{
+ if (AscScsiTestUnitReady(asc_dvc, scsiq) == ERR) {
+ return (scsiq->r3.done_stat = QD_WITH_ERROR);
+ }
+ return (PollQueueDone(asc_dvc, (ASC_SCSI_REQ_Q dosfar *) scsiq, 12));
+}
+
+int
+PollScsiStartUnit(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_REQ_Q dosfar * scsiq
+)
+{
+ if (AscScsiStartStopUnit(asc_dvc, scsiq, 1) == ERR) {
+ return (scsiq->r3.done_stat = QD_WITH_ERROR);
+ }
+ return (PollQueueDone(asc_dvc, (ASC_SCSI_REQ_Q dosfar *) scsiq, 40));
+}
+
+int
+InitTestUnitReady(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_REQ_Q dosfar * scsiq
+)
+{
+ ASC_SCSI_BIT_ID_TYPE tid_bits;
+ int retry;
+ ASC_REQ_SENSE dosfar *sen;
+
+ retry = 0;
+ tid_bits = scsiq->r1.target_id;
+ while (retry++ < 2) {
+ ASC_DBG(3, "InitTestUnitReady: PollScsiTestUnitReady()\n");
+ PollScsiTestUnitReady(asc_dvc, scsiq);
+ ASC_DBG1(3, "InitTestUnitReady: done_stat %x\n", scsiq->r3.done_stat);
+ if (scsiq->r3.done_stat == 0x01) {
+ return (1);
+ } else if (scsiq->r3.done_stat == QD_WITH_ERROR) {
+ DvcSleepMilliSecond(100);
+
+ sen = (ASC_REQ_SENSE dosfar *) scsiq->sense_ptr;
+
+ if ((scsiq->r3.scsi_stat == SS_CHK_CONDITION) &&
+ ((sen->err_code & 0x70) != 0)) {
+
+ if (sen->sense_key == SCSI_SENKEY_NOT_READY) {
+
+ if (asc_dvc->start_motor & tid_bits) {
+ if (PollScsiStartUnit(asc_dvc, scsiq) == 1) {
+ retry = 0;
+ continue;
+ } else {
+ asc_dvc->start_motor &= ~tid_bits;
+ break;
+ }
+ } else {
+ DvcSleepMilliSecond(100);
+ }
+ } else if (sen->sense_key == SCSI_SENKEY_ATTENSION) {
+ DvcSleepMilliSecond(100);
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ } else if (scsiq->r3.done_stat == QD_ABORTED_BY_HOST) {
+ break;
+ } else {
+ break;
+ }
+ }
+ return (0);
+}
+
+#if CC_INIT_INQ_DISPLAY
+
+#endif
+
+int
+AscPollQDone(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_REQ_Q dosfar * scsiq,
+ int timeout_sec
+)
+{
+ int loop, loop_end;
+ int sta;
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+ loop = 0;
+ loop_end = timeout_sec * 100;
+ sta = 1;
+
+ while (TRUE) {
+ ASC_DBG4(3,
+ "AscPollQDone: loop %d, err_code %x, done_stat %x, scsi_stat %x\n",
+ loop, asc_dvc->err_code, scsiq->r3.done_stat, scsiq->r3.scsi_stat);
+ if (asc_dvc->err_code != 0) {
+ scsiq->r3.done_stat = QD_WITH_ERROR;
+ sta = ERR;
+ break;
+ }
+ if (scsiq->r3.done_stat != QD_IN_PROGRESS) {
+ if ((scsiq->r3.done_stat == QD_WITH_ERROR) &&
+ (scsiq->r3.scsi_stat == SS_TARGET_BUSY)) {
+ sta = 0x80;
+ break;
+ }
+ break;
+ }
+ DvcSleepMilliSecond(10);
+ if (loop++ > loop_end) {
+ sta = 0;
+ break;
+ }
+ if (AscIsChipHalted(iop_base)) {
+ AscISR(asc_dvc);
+ loop = 0;
+ } else {
+ ASC_DBG(3, "AscPollQDone: AscIsIntPending()\n");
+ if (AscIsIntPending(iop_base)) {
+ ASC_DBG(3, "AscPollQDone: AscISR()\n");
+ AscISR(asc_dvc);
+ }
+ }
+ }
+ ASC_DBG1(3, "AscPollQDone: sta %x\n", sta);
+ return (sta);
+}
+
+uchar
+AscReadLramByte(
+ PortAddr iop_base,
+ ushort addr
+)
+{
+ uchar byte_data;
+ ushort word_data;
+
+ if (isodd_word(addr)) {
+ AscSetChipLramAddr(iop_base, addr - 1);
+ word_data = AscGetChipLramData(iop_base);
+
+#if CC_LITTLE_ENDIAN_HOST
+ byte_data = (uchar) ((word_data >> 8) & 0xFF);
+#else
+ byte_data = (uchar) (word_data & 0xFF);
+#endif
+
+ } else {
+ AscSetChipLramAddr(iop_base, addr);
+ word_data = AscGetChipLramData(iop_base);
+
+#if CC_LITTLE_ENDIAN_HOST
+ byte_data = (uchar) (word_data & 0xFF);
+#else
+ byte_data = (uchar) ((word_data >> 8) & 0xFF);
+#endif
+
+ }
+ return (byte_data);
+}
+
+ushort
+AscReadLramWord(
+ PortAddr iop_base,
+ ushort addr
+)
+{
+ ushort word_data;
+
+ AscSetChipLramAddr(iop_base, addr);
+ word_data = AscGetChipLramData(iop_base);
+ return (word_data);
+}
+
+ulong
+AscReadLramDWord(
+ PortAddr iop_base,
+ ushort addr
+)
+{
+ ushort val_low, val_high;
+ ulong dword_data;
+
+ AscSetChipLramAddr(iop_base, addr);
+
+#if CC_LITTLE_ENDIAN_HOST
+ val_low = AscGetChipLramData(iop_base);
+
+ val_high = AscGetChipLramData(iop_base);
+#else
+ val_high = AscGetChipLramData(iop_base);
+ val_low = AscGetChipLramData(iop_base);
+#endif
+
+ dword_data = ((ulong) val_high << 16) | (ulong) val_low;
+ return (dword_data);
+}
+
+void
+AscWriteLramWord(
+ PortAddr iop_base,
+ ushort addr,
+ ushort word_val
+)
+{
+ AscSetChipLramAddr(iop_base, addr);
+ AscPutChipLramData(iop_base, word_val);
+ return;
+}
+
+void
+AscWriteLramDWord(
+ PortAddr iop_base,
+ ushort addr,
+ ulong dword_val
+)
+{
+ ushort word_val;
+
+ AscSetChipLramAddr(iop_base, addr);
+
+#if CC_LITTLE_ENDIAN_HOST
+ word_val = (ushort) dword_val;
+ AscPutChipLramData(iop_base, word_val);
+ word_val = (ushort) (dword_val >> 16);
+ AscPutChipLramData(iop_base, word_val);
+#else
+ word_val = (ushort) (dword_val >> 16);
+ AscPutChipLramData(iop_base, word_val);
+ word_val = (ushort) dword_val;
+ AscPutChipLramData(iop_base, word_val);
+#endif
+ return;
+}
+
+void
+AscWriteLramByte(
+ PortAddr iop_base,
+ ushort addr,
+ uchar byte_val
+)
+{
+ ushort word_data;
+
+ if (isodd_word(addr)) {
+ addr--;
+ word_data = AscReadLramWord(iop_base, addr);
+ word_data &= 0x00FF;
+ word_data |= (((ushort) byte_val << 8) & 0xFF00);
+ } else {
+ word_data = AscReadLramWord(iop_base, addr);
+ word_data &= 0xFF00;
+ word_data |= ((ushort) byte_val & 0x00FF);
+ }
+ AscWriteLramWord(iop_base, addr, word_data);
+ return;
+}
+
+int
+AscVerWriteLramWord(
+ PortAddr iop_base,
+ ushort addr,
+ ushort word_val
+)
+{
+ int sta;
+
+ sta = 0;
+ AscSetChipLramAddr(iop_base, addr);
+ AscPutChipLramData(iop_base, word_val);
+ AscSetChipLramAddr(iop_base, addr);
+ if (word_val != AscGetChipLramData(iop_base)) {
+ sta = ERR;
+ }
+ return (sta);
+}
+
+void
+AscMemWordCopyToLram(
+ PortAddr iop_base,
+ ushort s_addr,
+ ushort dosfar * s_buffer,
+ int words
+)
+{
+ AscSetChipLramAddr(iop_base, s_addr);
+ DvcOutPortWords(iop_base + IOP_RAM_DATA, s_buffer, words);
+ return;
+}
+
+void
+AscMemDWordCopyToLram(
+ PortAddr iop_base,
+ ushort s_addr,
+ ulong dosfar * s_buffer,
+ int dwords
+)
+{
+ AscSetChipLramAddr(iop_base, s_addr);
+ DvcOutPortDWords(iop_base + IOP_RAM_DATA, s_buffer, dwords);
+ return;
+}
+
+void
+AscMemWordCopyFromLram(
+ PortAddr iop_base,
+ ushort s_addr,
+ ushort dosfar * d_buffer,
+ int words
+)
+{
+ AscSetChipLramAddr(iop_base, s_addr);
+ DvcInPortWords(iop_base + IOP_RAM_DATA, d_buffer, words);
+ return;
+}
+
+ulong
+AscMemSumLramWord(
+ PortAddr iop_base,
+ ushort s_addr,
+ rint words
+)
+{
+ ulong sum;
+ int i;
+
+ sum = 0L;
+ for (i = 0; i < words; i++, s_addr += 2) {
+ sum += AscReadLramWord(iop_base, s_addr);
+ }
+ return (sum);
+}
+
+void
+AscMemWordSetLram(
+ PortAddr iop_base,
+ ushort s_addr,
+ ushort set_wval,
+ rint words
+)
+{
+ rint i;
+
+ AscSetChipLramAddr(iop_base, s_addr);
+ for (i = 0; i < words; i++) {
+ AscPutChipLramData(iop_base, set_wval);
+ }
+ return;
+}
+
+int
+AscScsiInquiry(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_REQ_Q dosfar * scsiq,
+ uchar dosfar * buf, int buf_len
+)
+{
+ if (AscScsiSetupCmdQ(asc_dvc, scsiq, buf,
+ (ulong) buf_len) == ERR) {
+ return (scsiq->r3.done_stat = QD_WITH_ERROR);
+ }
+ scsiq->cdb[0] = (uchar) SCSICMD_Inquiry;
+ scsiq->cdb[1] = scsiq->r1.target_lun << 5;
+ scsiq->cdb[2] = 0;
+ scsiq->cdb[3] = 0;
+ scsiq->cdb[4] = buf_len;
+ scsiq->cdb[5] = 0;
+ scsiq->r2.cdb_len = 6;
+ return (0);
+}
+
+int
+AscScsiReadCapacity(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_REQ_Q dosfar * scsiq,
+ uchar dosfar * info
+)
+{
+ if (AscScsiSetupCmdQ(asc_dvc, scsiq, info, 8L) == ERR) {
+ return (scsiq->r3.done_stat = QD_WITH_ERROR);
+ }
+ scsiq->cdb[0] = (uchar) SCSICMD_ReadCapacity;
+ scsiq->cdb[1] = scsiq->r1.target_lun << 5;
+ scsiq->cdb[2] = 0;
+ scsiq->cdb[3] = 0;
+ scsiq->cdb[4] = 0;
+ scsiq->cdb[5] = 0;
+ scsiq->cdb[6] = 0;
+ scsiq->cdb[7] = 0;
+ scsiq->cdb[8] = 0;
+ scsiq->cdb[9] = 0;
+ scsiq->r2.cdb_len = 10;
+ return (0);
+}
+
+int
+AscScsiTestUnitReady(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_REQ_Q dosfar * scsiq
+)
+{
+ if (AscScsiSetupCmdQ(asc_dvc, scsiq, FNULLPTR,
+ (ulong) 0L) == ERR) {
+ return (scsiq->r3.done_stat = QD_WITH_ERROR);
+ }
+ scsiq->r1.cntl = (uchar) ASC_SCSIDIR_NODATA;
+ scsiq->cdb[0] = (uchar) SCSICMD_TestUnitReady;
+ scsiq->cdb[1] = scsiq->r1.target_lun << 5;
+ scsiq->cdb[2] = 0;
+ scsiq->cdb[3] = 0;
+ scsiq->cdb[4] = 0;
+ scsiq->cdb[5] = 0;
+ scsiq->r2.cdb_len = 6;
+ return (0);
+}
+
+int
+AscScsiStartStopUnit(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_REQ_Q dosfar * scsiq,
+ uchar op_mode
+)
+{
+ if (AscScsiSetupCmdQ(asc_dvc, scsiq, FNULLPTR, (ulong) 0L) == ERR) {
+ return (scsiq->r3.done_stat = QD_WITH_ERROR);
+ }
+ scsiq->r1.cntl = (uchar) ASC_SCSIDIR_NODATA;
+ scsiq->cdb[0] = (uchar) SCSICMD_StartStopUnit;
+ scsiq->cdb[1] = scsiq->r1.target_lun << 5;
+ scsiq->cdb[2] = 0;
+ scsiq->cdb[3] = 0;
+ scsiq->cdb[4] = op_mode;
+
+ scsiq->cdb[5] = 0;
+ scsiq->r2.cdb_len = 6;
+ return (0);
+}
diff --git a/i386/i386at/gpl/linux/scsi/advansys.h b/i386/i386at/gpl/linux/scsi/advansys.h
new file mode 100644
index 00000000..255279ca
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/advansys.h
@@ -0,0 +1,131 @@
+/* $Id: advansys.h,v 1.1.1.1 1997/02/25 21:27:46 thomas Exp $ */
+/*
+ * advansys.h - Linux Host Driver for AdvanSys SCSI Adapters
+ *
+ * Copyright (c) 1995-1996 Advanced System Products, Inc.
+ *
+ * This driver may be modified and freely distributed provided that
+ * the above copyright message and this comment are included in the
+ * distribution. The latest version of this driver is available at
+ * the AdvanSys FTP and BBS sites listed below.
+ *
+ * Please send questions, comments, and bug reports to:
+ * bobf@advansys.com (Bob Frey)
+ */
+
+#ifndef _ADVANSYS_H
+#define _ADVANSYS_H
+
+/* The driver can be used in Linux 1.2.X or 1.3.X. */
+#if !defined(LINUX_1_2) && !defined(LINUX_1_3)
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#endif /* LINUX_VERSION_CODE */
+#if LINUX_VERSION_CODE > 65536 + 3 * 256
+#define LINUX_1_3
+#else /* LINUX_VERSION_CODE */
+#define LINUX_1_2
+#endif /* LINUX_VERSION_CODE */
+#endif /* !defined(LINUX_1_2) && !defined(LINUX_1_3) */
+
+/*
+ * Scsi_Host_Template function prototypes.
+ */
+int advansys_detect(Scsi_Host_Template *);
+int advansys_release(struct Scsi_Host *);
+const char *advansys_info(struct Scsi_Host *);
+int advansys_command(Scsi_Cmnd *);
+int advansys_queuecommand(Scsi_Cmnd *, void (* done)(Scsi_Cmnd *));
+int advansys_abort(Scsi_Cmnd *);
+int advansys_reset(Scsi_Cmnd *);
+#ifdef LINUX_1_2
+int advansys_biosparam(Disk *, int, int[]);
+#else /* LINUX_1_3 */
+int advansys_biosparam(Disk *, kdev_t, int[]);
+extern struct proc_dir_entry proc_scsi_advansys;
+int advansys_proc_info(char *, char **, off_t, int, int, int);
+#endif /* LINUX_1_3 */
+
+/* init/main.c setup function */
+void advansys_setup(char *, int *);
+
+/*
+ * AdvanSys Host Driver Scsi_Host_Template (struct SHT) from hosts.h.
+ */
+#ifdef LINUX_1_2
+#define ADVANSYS { \
+ NULL, /* struct SHT *next */ \
+ NULL, /* int *usage_count */ \
+ "advansys", /* char *name */ \
+ advansys_detect, /* int (*detect)(struct SHT *) */ \
+ advansys_release, /* int (*release)(struct Scsi_Host *) */ \
+ advansys_info, /* const char *(*info)(struct Scsi_Host *) */ \
+ advansys_command, /* int (*command)(Scsi_Cmnd *) */ \
+ advansys_queuecommand, \
+ /* int (*queuecommand)(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)) */ \
+ advansys_abort, /* int (*abort)(Scsi_Cmnd *) */ \
+ advansys_reset, /* int (*reset)(Scsi_Cmnd *) */ \
+ NULL, /* int (*slave_attach)(int, int) */ \
+ advansys_biosparam, /* int (* bios_param)(Disk *, int, int []) */ \
+ /* \
+ * The following fields are set per adapter in advansys_detect(). \
+ */ \
+ 0, /* int can_queue */ \
+ 0, /* int this_id */ \
+ 0, /* short unsigned int sg_tablesize */ \
+ 0, /* short cmd_per_lun */ \
+ 0, /* unsigned char present */ \
+ /* \
+ * Because the driver may control an ISA adapter 'unchecked_isa_dma' \
+ * must be set. The flag will be cleared in advansys_detect for non-ISA \
+ * adapters. Refer to the comment in scsi_module.c for more information. \
+ */ \
+ 1, /* unsigned unchecked_isa_dma:1 */ \
+ /* \
+ * All adapters controlled by this driver are capable of large \
+ * scatter-gather lists. This apparently obviates any performance
+ * gain provided by setting 'use_clustering'. \
+ */ \
+ DISABLE_CLUSTERING, /* unsigned use_clustering:1 */ \
+}
+#else /* LINUX_1_3 */
+#define ADVANSYS { \
+ NULL, /* struct SHT *next */ \
+ NULL, /* long *usage_count */ \
+ &proc_scsi_advansys, /* struct proc_dir_entry *proc_dir */ \
+ advansys_proc_info, \
+ /* int (*proc_info)(char *, char **, off_t, int, int, int) */ \
+ "advansys", /* const char *name */ \
+ advansys_detect, /* int (*detect)(struct SHT *) */ \
+ advansys_release, /* int (*release)(struct Scsi_Host *) */ \
+ advansys_info, /* const char *(*info)(struct Scsi_Host *) */ \
+ advansys_command, /* int (*command)(Scsi_Cmnd *) */ \
+ advansys_queuecommand, \
+ /* int (*queuecommand)(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)) */ \
+ advansys_abort, /* int (*abort)(Scsi_Cmnd *) */ \
+ advansys_reset, /* int (*reset)(Scsi_Cmnd *) */ \
+ NULL, /* int (*slave_attach)(int, int) */ \
+ advansys_biosparam, /* int (* bios_param)(Disk *, kdev_t, int []) */ \
+ /* \
+ * The following fields are set per adapter in advansys_detect(). \
+ */ \
+ 0, /* int can_queue */ \
+ 0, /* int this_id */ \
+ 0, /* short unsigned int sg_tablesize */ \
+ 0, /* short cmd_per_lun */ \
+ 0, /* unsigned char present */ \
+ /* \
+ * Because the driver may control an ISA adapter 'unchecked_isa_dma' \
+ * must be set. The flag will be cleared in advansys_detect for non-ISA \
+ * adapters. Refer to the comment in scsi_module.c for more information. \
+ */ \
+ 1, /* unsigned unchecked_isa_dma:1 */ \
+ /* \
+ * All adapters controlled by this driver are capable of large \
+ * scatter-gather lists. This apparently obviates any performance
+ * gain provided by setting 'use_clustering'. \
+ */ \
+ DISABLE_CLUSTERING, /* unsigned use_clustering:1 */ \
+}
+#endif /* LINUX_1_3 */
+#endif /* _ADVANSYS_H */
diff --git a/i386/i386at/gpl/linux/scsi/aha152x.c b/i386/i386at/gpl/linux/scsi/aha152x.c
new file mode 100644
index 00000000..67dcd102
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/aha152x.c
@@ -0,0 +1,2985 @@
+/* aha152x.c -- Adaptec AHA-152x driver
+ * Author: Juergen E. Fischer, fischer@et-inf.fho-emden.de
+ * Copyright 1993, 1994, 1995 Juergen E. Fischer
+ *
+ *
+ * This driver is based on
+ * fdomain.c -- Future Domain TMC-16x0 driver
+ * which is
+ * Copyright 1992, 1993 Rickard E. Faith (faith@cs.unc.edu)
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ *
+ * $Id: aha152x.c,v 1.1.1.1 1997/02/25 21:27:46 thomas Exp $
+ *
+ * $Log: aha152x.c,v $
+ * Revision 1.1.1.1 1996/10/30 01:40:01 thomas
+ * Imported from UK22
+ *
+ * Revision 1.1 1996/03/25 20:25:17 goel
+ * Linux driver merge.
+ *
+ * Revision 1.14 1996/01/17 15:11:20 fischer
+ * - fixed lockup in MESSAGE IN phase after reconnection
+ *
+ * Revision 1.13 1996/01/09 02:15:53 fischer
+ * - some cleanups
+ * - moved request_irq behind controller initialization
+ * (to avoid spurious interrupts)
+ *
+ * Revision 1.12 1995/12/16 12:26:07 fischer
+ * - barrier()'s added
+ * - configurable RESET delay added
+ *
+ * Revision 1.11 1995/12/06 21:18:35 fischer
+ * - some minor updates
+ *
+ * Revision 1.10 1995/07/22 19:18:45 fischer
+ * - support for 2 controllers
+ * - started synchronous data transfers (not working yet)
+ *
+ * Revision 1.9 1995/03/18 09:20:24 root
+ * - patches for PCMCIA and modules
+ *
+ * Revision 1.8 1995/01/21 22:07:19 root
+ * - snarf_region => request_region
+ * - aha152x_intr interface change
+ *
+ * Revision 1.7 1995/01/02 23:19:36 root
+ * - updated COMMAND_SIZE to cmd_len
+ * - changed sti() to restore_flags()
+ * - fixed some #ifdef which generated warnings
+ *
+ * Revision 1.6 1994/11/24 20:35:27 root
+ * - problem with odd number of bytes in fifo fixed
+ *
+ * Revision 1.5 1994/10/30 14:39:56 root
+ * - abort code fixed
+ * - debugging improved
+ *
+ * Revision 1.4 1994/09/12 11:33:01 root
+ * - irqaction to request_irq
+ * - abortion updated
+ *
+ * Revision 1.3 1994/08/04 13:53:05 root
+ * - updates for mid-level-driver changes
+ * - accept unexpected BUSFREE phase as error condition
+ * - parity check now configurable
+ *
+ * Revision 1.2 1994/07/03 12:56:36 root
+ * - cleaned up debugging code
+ * - more tweaking on reset delays
+ * - updated abort/reset code (pretty untested...)
+ *
+ * Revision 1.1 1994/05/28 21:18:49 root
+ * - update for mid-level interface change (abort-reset)
+ * - delays after resets adjusted for some slow devices
+ *
+ * Revision 1.0 1994/03/25 12:52:00 root
+ * - Fixed "more data than expected" problem
+ * - added new BIOS signatures
+ *
+ * Revision 0.102 1994/01/31 20:44:12 root
+ * - minor changes in insw/outsw handling
+ *
+ * Revision 0.101 1993/12/13 01:16:27 root
+ * - fixed STATUS phase (non-GOOD stati were dropped sometimes;
+ * fixes problems with CD-ROM sector size detection & media change)
+ *
+ * Revision 0.100 1993/12/10 16:58:47 root
+ * - fix for unsuccessful selections in case of non-continuous id assignments
+ * on the scsi bus.
+ *
+ * Revision 0.99 1993/10/24 16:19:59 root
+ * - fixed DATA IN (rare read errors gone)
+ *
+ * Revision 0.98 1993/10/17 12:54:44 root
+ * - fixed some recent fixes (shame on me)
+ * - moved initialization of scratch area to aha152x_queue
+ *
+ * Revision 0.97 1993/10/09 18:53:53 root
+ * - DATA IN fixed. Rarely left data in the fifo.
+ *
+ * Revision 0.96 1993/10/03 00:53:59 root
+ * - minor changes on DATA IN
+ *
+ * Revision 0.95 1993/09/24 10:36:01 root
+ * - change handling of MSGI after reselection
+ * - fixed sti/cli
+ * - minor changes
+ *
+ * Revision 0.94 1993/09/18 14:08:22 root
+ * - fixed bug in multiple outstanding command code
+ * - changed detection
+ * - support for kernel command line configuration
+ * - reset corrected
+ * - changed message handling
+ *
+ * Revision 0.93 1993/09/15 20:41:19 root
+ * - fixed bugs with multiple outstanding commands
+ *
+ * Revision 0.92 1993/09/13 02:46:33 root
+ * - multiple outstanding commands work (no problems with IBM drive)
+ *
+ * Revision 0.91 1993/09/12 20:51:46 root
+ * added multiple outstanding commands
+ * (some problem with this $%&? IBM device remain)
+ *
+ * Revision 0.9 1993/09/12 11:11:22 root
+ * - corrected auto-configuration
+ * - changed the auto-configuration (added some '#define's)
+ * - added support for dis-/reconnection
+ *
+ * Revision 0.8 1993/09/06 23:09:39 root
+ * - added support for the drive activity light
+ * - minor changes
+ *
+ * Revision 0.7 1993/09/05 14:30:15 root
+ * - improved phase detection
+ * - now using the new snarf_region code of 0.99pl13
+ *
+ * Revision 0.6 1993/09/02 11:01:38 root
+ * first public release; added some signatures and biosparam()
+ *
+ * Revision 0.5 1993/08/30 10:23:30 root
+ * fixed timing problems with my IBM drive
+ *
+ * Revision 0.4 1993/08/29 14:06:52 root
+ * fixed some problems with timeouts due incomplete commands
+ *
+ * Revision 0.3 1993/08/28 15:55:03 root
+ * writing data works too. mounted and worked on a dos partition
+ *
+ * Revision 0.2 1993/08/27 22:42:07 root
+ * reading data works. Mounted a msdos partition.
+ *
+ * Revision 0.1 1993/08/25 13:38:30 root
+ * first "damn thing doesn't work" version
+ *
+ * Revision 0.0 1993/08/14 19:54:25 root
+ * empty function bodies; detect() works.
+ *
+ *
+ **************************************************************************
+
+
+
+ DESCRIPTION:
+
+ This is the Linux low-level SCSI driver for Adaptec AHA-1520/1522
+ SCSI host adapters.
+
+
+ PER-DEFINE CONFIGURABLE OPTIONS:
+
+ AUTOCONF:
+ use configuration the controller reports (only 152x)
+
+ SKIP_BIOSTEST:
+ Don't test for BIOS signature (AHA-1510 or disabled BIOS)
+
+ SETUP0 { IOPORT, IRQ, SCSI_ID, RECONNECT, PARITY, SYNCHRONOUS, DELAY }:
+ override for the first controller
+
+ SETUP1 { IOPORT, IRQ, SCSI_ID, RECONNECT, PARITY, SYNCHRONOUS, DELAY }:
+ override for the second controller
+
+
+ LILO COMMAND LINE OPTIONS:
+
+ aha152x=<IOPORT>[,<IRQ>[,<SCSI-ID>[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>]]]]]]
+
+ The normal configuration can be overridden by specifying a command line.
+ When you do this, the BIOS test is skipped. Entered values have to be
+ valid (known). Don't use values that aren't supported under normal operation.
+ If you think that you need other values: contact me. For two controllers
+ use the aha152x statement twice.
+
+
+ REFERENCES USED:
+
+ "AIC-6260 SCSI Chip Specification", Adaptec Corporation.
+
+ "SCSI COMPUTER SYSTEM INTERFACE - 2 (SCSI-2)", X3T9.2/86-109 rev. 10h
+
+ "Writing a SCSI device driver for Linux", Rik Faith (faith@cs.unc.edu)
+
+ "Kernel Hacker's Guide", Michael K. Johnson (johnsonm@sunsite.unc.edu)
+
+ "Adaptec 1520/1522 User's Guide", Adaptec Corporation.
+
+ Michael K. Johnson (johnsonm@sunsite.unc.edu)
+
+ Drew Eckhardt (drew@cs.colorado.edu)
+
+ Eric Youngdale (ericy@cais.com)
+
+ special thanks to Eric Youngdale for the free(!) supplying the
+ documentation on the chip.
+
+ **************************************************************************/
+
+#ifdef PCMCIA
+#define MODULE
+#endif
+
+#include <linux/module.h>
+
+#ifdef PCMCIA
+#undef MODULE
+#endif
+
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "sd.h"
+#include "hosts.h"
+#include "constants.h"
+#include <asm/system.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+
+#include "aha152x.h"
+#include <linux/stat.h>
+
+struct proc_dir_entry proc_scsi_aha152x = {
+ PROC_SCSI_AHA152X, 7, "aha152x",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+/* DEFINES */
+
+#ifdef MACH
+#define AUTOCONF
+#endif
+
+/* For PCMCIA cards, always use AUTOCONF */
+#if defined(PCMCIA) || defined(MODULE)
+#if !defined(AUTOCONF)
+#define AUTOCONF
+#endif
+#endif
+
+#if !defined(AUTOCONF) && !defined(SETUP0)
+#error define AUTOCONF or SETUP0
+#endif
+
+#if defined(DEBUG_AHA152X)
+
+#undef SKIP_PORTS /* don't display ports */
+
+#undef DEBUG_QUEUE /* debug queue() */
+#undef DEBUG_RESET /* debug reset() */
+#undef DEBUG_INTR /* debug intr() */
+#undef DEBUG_SELECTION /* debug selection part in intr() */
+#undef DEBUG_MSGO /* debug message out phase in intr() */
+#undef DEBUG_MSGI /* debug message in phase in intr() */
+#undef DEBUG_STATUS /* debug status phase in intr() */
+#undef DEBUG_CMD /* debug command phase in intr() */
+#undef DEBUG_DATAI /* debug data in phase in intr() */
+#undef DEBUG_DATAO /* debug data out phase in intr() */
+#undef DEBUG_ABORT /* debug abort() */
+#undef DEBUG_DONE /* debug done() */
+#undef DEBUG_BIOSPARAM /* debug biosparam() */
+
+#undef DEBUG_RACE /* debug race conditions */
+#undef DEBUG_PHASES /* debug phases (useful to trace) */
+#undef DEBUG_QUEUES /* debug reselection */
+
+/* recently used for debugging */
+#if 0
+#endif
+
+#define DEBUG_SELECTION
+#define DEBUG_PHASES
+#define DEBUG_RESET
+#define DEBUG_ABORT
+
+#define DEBUG_DEFAULT (debug_reset|debug_abort)
+
+#endif
+
+/* END OF DEFINES */
+
+extern long loops_per_sec;
+
+#define DELAY_DEFAULT 100
+
+/* some additional "phases" for getphase() */
+#define P_BUSFREE 1
+#define P_PARITY 2
+
+/* possible irq range */
+#define IRQ_MIN 9
+#define IRQ_MAX 12
+#define IRQS IRQ_MAX-IRQ_MIN+1
+
+enum {
+ not_issued = 0x0001,
+ in_selection = 0x0002,
+ disconnected = 0x0004,
+ aborted = 0x0008,
+ sent_ident = 0x0010,
+ in_other = 0x0020,
+ in_sync = 0x0040,
+ sync_ok = 0x0080,
+};
+
+/* set by aha152x_setup according to the command line */
+static int setup_count=0;
+static struct aha152x_setup {
+ int io_port;
+ int irq;
+ int scsiid;
+ int reconnect;
+ int parity;
+ int synchronous;
+ int delay;
+#ifdef DEBUG_AHA152X
+ int debug;
+#endif
+ char *conf;
+} setup[2];
+
+static struct Scsi_Host *aha152x_host[IRQS];
+
+#define HOSTDATA(shpnt) ((struct aha152x_hostdata *) &shpnt->hostdata)
+#define CURRENT_SC (HOSTDATA(shpnt)->current_SC)
+#define ISSUE_SC (HOSTDATA(shpnt)->issue_SC)
+#define DISCONNECTED_SC (HOSTDATA(shpnt)->disconnected_SC)
+#define DELAY (HOSTDATA(shpnt)->delay)
+#define SYNCRATE (HOSTDATA(shpnt)->syncrate[CURRENT_SC->target])
+#define MSG(i) (HOSTDATA(shpnt)->message[i])
+#define MSGLEN (HOSTDATA(shpnt)->message_len)
+#define ADDMSG(x) (MSG(MSGLEN++)=x)
+
+struct aha152x_hostdata {
+ Scsi_Cmnd *issue_SC;
+ Scsi_Cmnd *current_SC;
+ Scsi_Cmnd *disconnected_SC;
+ int aborting;
+ int abortion_complete;
+ int abort_result;
+ int commands;
+
+ int reconnect;
+ int parity;
+ int synchronous;
+ int delay;
+
+ unsigned char syncrate[8];
+
+ unsigned char message[256];
+ int message_len;
+
+#ifdef DEBUG_AHA152X
+ int debug;
+#endif
+};
+
+void aha152x_intr(int irq, struct pt_regs *);
+void aha152x_done(struct Scsi_Host *shpnt, int error);
+void aha152x_setup(char *str, int *ints);
+int aha152x_checksetup(struct aha152x_setup *setup);
+
+static void aha152x_reset_ports(struct Scsi_Host *shpnt);
+static void aha152x_panic(struct Scsi_Host *shpnt, char *msg);
+
+static void disp_ports(struct Scsi_Host *shpnt);
+static void show_command(Scsi_Cmnd *ptr);
+static void show_queues(struct Scsi_Host *shpnt);
+static void disp_enintr(struct Scsi_Host *shpnt);
+
+#if defined(DEBUG_RACE)
+static void enter_driver(const char *);
+static void leave_driver(const char *);
+#endif
+
+/* possible i/o addresses for the AIC-6260 */
+static unsigned short ports[] =
+{
+ 0x340, /* default first */
+ 0x140
+};
+#define PORT_COUNT (sizeof(ports) / sizeof(unsigned short))
+
+#if !defined(SKIP_BIOSTEST)
+/* possible locations for the Adaptec BIOS */
+static void *addresses[] =
+{
+ (void *) 0xdc000, /* default first */
+ (void *) 0xc8000,
+ (void *) 0xcc000,
+ (void *) 0xd0000,
+ (void *) 0xd4000,
+ (void *) 0xd8000,
+ (void *) 0xe0000,
+ (void *) 0xeb800, /* VTech Platinum SMP */
+ (void *) 0xf0000,
+};
+#define ADDRESS_COUNT (sizeof(addresses) / sizeof(void *))
+
+/* signatures for various AIC-6[23]60 based controllers.
+ The point in detecting signatures is to avoid useless
+ and maybe harmful probes on ports. I'm not sure that
+ all listed boards pass auto-configuration. For those
+ which fail the BIOS signature is obsolete, because
+ user intervention to supply the configuration is
+ needed anyway. */
+static struct signature {
+ char *signature;
+ int sig_offset;
+ int sig_length;
+} signatures[] =
+{
+ { "Adaptec AHA-1520 BIOS", 0x102e, 21 }, /* Adaptec 152x */
+ { "Adaptec ASW-B626 BIOS", 0x1029, 21 }, /* on-board controller */
+ { "Adaptec BIOS: ASW-B626", 0x0f, 22 }, /* on-board controller */
+ { "Adaptec ASW-B626 S2", 0x2e6c, 19 }, /* on-board controller */
+ { "Adaptec BIOS:AIC-6360", 0xc, 21 }, /* on-board controller */
+ { "ScsiPro SP-360 BIOS", 0x2873, 19 }, /* ScsiPro-Controller */
+ { "GA-400 LOCAL BUS SCSI BIOS", 0x102e, 26 }, /* Gigabyte Local-Bus-SCSI */
+ { "Adaptec BIOS:AVA-282X", 0xc, 21 }, /* Adaptec 282x */
+ { "Adaptec IBM Dock II SCSI", 0x2edd, 24 }, /* IBM Thinkpad Dock II */
+ { "Adaptec BIOS:AHA-1532P", 0x1c, 22 }, /* IBM Thinkpad Dock II SCSI */
+};
+#define SIGNATURE_COUNT (sizeof(signatures) / sizeof(struct signature))
+#endif
+
+
+static void do_pause(unsigned amount) /* Pause for amount*10 milliseconds */
+{
+ unsigned long the_time = jiffies + amount; /* 0.01 seconds per jiffy */
+
+ while (jiffies < the_time)
+ barrier();
+}
+
+/*
+ * queue services:
+ */
+static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
+{
+ Scsi_Cmnd *end;
+
+ new_SC->host_scribble = (unsigned char *) NULL;
+ if(!*SC)
+ *SC=new_SC;
+ else
+ {
+ for(end=*SC; end->host_scribble; end = (Scsi_Cmnd *) end->host_scribble)
+ ;
+ end->host_scribble = (unsigned char *) new_SC;
+ }
+}
+
+static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd **SC)
+{
+ Scsi_Cmnd *ptr;
+
+ ptr=*SC;
+ if(ptr)
+ *SC= (Scsi_Cmnd *) (*SC)->host_scribble;
+ return ptr;
+}
+
+static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, int target, int lun)
+{
+ Scsi_Cmnd *ptr, *prev;
+
+ for(ptr=*SC, prev=NULL;
+ ptr && ((ptr->target!=target) || (ptr->lun!=lun));
+ prev = ptr, ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ ;
+
+ if(ptr)
+ if(prev)
+ prev->host_scribble = ptr->host_scribble;
+ else
+ *SC= (Scsi_Cmnd *) ptr->host_scribble;
+ return ptr;
+}
+
+/*
+ * read inbound byte and wait for ACK to get low
+ */
+static void make_acklow(struct Scsi_Host *shpnt)
+{
+ SETPORT(SXFRCTL0, CH1|SPIOEN);
+ GETPORT(SCSIDAT);
+ SETPORT(SXFRCTL0, CH1);
+
+ while(TESTHI(SCSISIG, ACKI))
+ barrier();
+}
+
+/*
+ * detect current phase more reliable:
+ * phase is valid, when the target asserts REQ after we've deasserted ACK.
+ *
+ * return value is a valid phase or an error code.
+ *
+ * errorcodes:
+ * P_BUSFREE BUS FREE phase detected
+ * P_PARITY parity error in DATA phase
+ */
+static int getphase(struct Scsi_Host *shpnt)
+{
+ int phase, sstat1;
+
+ while(1)
+ {
+ do
+ {
+ while(!((sstat1 = GETPORT(SSTAT1)) & (BUSFREE|SCSIRSTI|REQINIT)))
+ barrier();
+ if(sstat1 & BUSFREE)
+ return P_BUSFREE;
+ if(sstat1 & SCSIRSTI)
+ {
+ printk("aha152x: RESET IN\n");
+ SETPORT(SSTAT1, SCSIRSTI);
+ }
+ }
+ while(TESTHI(SCSISIG, ACKI) || TESTLO(SSTAT1, REQINIT));
+
+ SETPORT(SSTAT1, CLRSCSIPERR);
+
+ phase = GETPORT(SCSISIG) & P_MASK ;
+
+ if(TESTHI(SSTAT1, SCSIPERR))
+ {
+ if((phase & (CDO|MSGO))==0) /* DATA phase */
+ return P_PARITY;
+
+ make_acklow(shpnt);
+ }
+ else
+ return phase;
+ }
+}
+
+/* called from init/main.c */
+void aha152x_setup(char *str, int *ints)
+{
+ if(setup_count>2)
+ panic("aha152x: you can only configure up to two controllers\n");
+
+ setup[setup_count].conf = str;
+ setup[setup_count].io_port = ints[0] >= 1 ? ints[1] : 0x340;
+ setup[setup_count].irq = ints[0] >= 2 ? ints[2] : 11;
+ setup[setup_count].scsiid = ints[0] >= 3 ? ints[3] : 7;
+ setup[setup_count].reconnect = ints[0] >= 4 ? ints[4] : 1;
+ setup[setup_count].parity = ints[0] >= 5 ? ints[5] : 1;
+ setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 0 /* FIXME: 1 */;
+ setup[setup_count].delay = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT;
+#ifdef DEBUG_AHA152X
+ setup[setup_count].debug = ints[0] >= 8 ? ints[8] : DEBUG_DEFAULT;
+ if(ints[0]>8)
+ {
+ printk("aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>"
+ "[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<DEBUG>]]]]]]]\n");
+#else
+ if(ints[0]>7)
+ {
+ printk("aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>"
+ "[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>]]]]]]\n");
+#endif
+ }
+ else
+ setup_count++;
+}
+
+/*
+ Test, if port_base is valid.
+ */
+static int aha152x_porttest(int io_port)
+{
+ int i;
+
+ if(check_region(io_port, IO_RANGE))
+ return 0;
+
+ SETPORT(io_port+O_DMACNTRL1, 0); /* reset stack pointer */
+ for(i=0; i<16; i++)
+ SETPORT(io_port+O_STACK, i);
+
+ SETPORT(io_port+O_DMACNTRL1, 0); /* reset stack pointer */
+ for(i=0; i<16 && GETPORT(io_port+O_STACK)==i; i++)
+ ;
+
+ return(i==16);
+}
+
+int aha152x_checksetup(struct aha152x_setup *setup)
+{
+ int i;
+
+#ifndef PCMCIA
+ for(i=0; i<PORT_COUNT && (setup->io_port != ports[i]); i++)
+ ;
+
+ if(i==PORT_COUNT)
+ return 0;
+#endif
+
+ if(!aha152x_porttest(setup->io_port))
+ return 0;
+
+ if((setup->irq < IRQ_MIN) && (setup->irq > IRQ_MAX))
+ return 0;
+
+ if((setup->scsiid < 0) || (setup->scsiid > 7))
+ return 0;
+
+ if((setup->reconnect < 0) || (setup->reconnect > 1))
+ return 0;
+
+ if((setup->parity < 0) || (setup->parity > 1))
+ return 0;
+
+ if((setup->synchronous < 0) || (setup->synchronous > 1))
+ return 0;
+
+ return 1;
+}
+
+
+int aha152x_detect(Scsi_Host_Template * tpnt)
+{
+ int i, j, ok;
+#if defined(AUTOCONF)
+ aha152x_config conf;
+#endif
+
+ tpnt->proc_dir = &proc_scsi_aha152x;
+
+ for(i=0; i<IRQS; i++)
+ aha152x_host[i] = (struct Scsi_Host *) NULL;
+
+ if(setup_count)
+ {
+ printk("aha152x: processing commandline: ");
+
+ for(i=0; i<setup_count; i++)
+ if(!aha152x_checksetup(&setup[i]))
+ {
+ printk("\naha152x: %s\n", setup[i].conf);
+ printk("aha152x: invalid line (controller=%d)\n", i+1);
+ }
+
+ printk("ok\n");
+ }
+
+#ifdef SETUP0
+ if(setup_count<2)
+ {
+ struct aha152x_setup override = SETUP0;
+
+ if(setup_count==0 || (override.io_port != setup[0].io_port))
+ if(!aha152x_checksetup(&override))
+ {
+ printk("\naha152x: SETUP0 (0x%x, %d, %d, %d, %d, %d, %d) invalid\n",
+ override.io_port,
+ override.irq,
+ override.scsiid,
+ override.reconnect,
+ override.parity,
+ override.synchronous,
+ override.delay);
+ }
+ else
+ setup[setup_count++] = override;
+ }
+#endif
+
+#ifdef SETUP1
+ if(setup_count<2)
+ {
+ struct aha152x_setup override = SETUP1;
+
+ if(setup_count==0 || (override.io_port != setup[0].io_port))
+ if(!aha152x_checksetup(&override))
+ {
+ printk("\naha152x: SETUP1 (0x%x, %d, %d, %d, %d, %d, %d) invalid\n",
+ override.io_port,
+ override.irq,
+ override.scsiid,
+ override.reconnect,
+ override.parity,
+ override.synchronous,
+ override.delay);
+ }
+ else
+ setup[setup_count++] = override;
+ }
+#endif
+
+#if defined(AUTOCONF)
+ if(setup_count<2)
+ {
+#if !defined(SKIP_BIOSTEST)
+ ok=0;
+ for(i=0; i < ADDRESS_COUNT && !ok; i++)
+ for(j=0; (j < SIGNATURE_COUNT) && !ok; j++)
+ ok=!memcmp((void *) addresses[i]+signatures[j].sig_offset,
+ (void *) signatures[j].signature,
+ (int) signatures[j].sig_length);
+
+ if(!ok && setup_count==0)
+ return 0;
+
+ printk("aha152x: BIOS test: passed, ");
+#else
+ printk("aha152x: ");
+#endif /* !SKIP_BIOSTEST */
+
+ for(i=0; i<PORT_COUNT && setup_count<2; i++)
+ {
+ if((setup_count==1) && (setup[0].io_port == ports[i]))
+ continue;
+
+ if(aha152x_porttest(ports[i]))
+ {
+ setup[setup_count].io_port = ports[i];
+
+ conf.cf_port =
+ (GETPORT(ports[i]+O_PORTA)<<8) + GETPORT(ports[i]+O_PORTB);
+
+ setup[setup_count].irq = IRQ_MIN + conf.cf_irq;
+ setup[setup_count].scsiid = conf.cf_id;
+ setup[setup_count].reconnect = conf.cf_tardisc;
+ setup[setup_count].parity = !conf.cf_parity;
+ setup[setup_count].synchronous = 0 /* FIXME: conf.cf_syncneg */;
+ setup[setup_count].delay = DELAY_DEFAULT;
+#ifdef DEBUG_AHA152X
+ setup[setup_count].debug = DEBUG_DEFAULT;
+#endif
+ setup_count++;
+ }
+ }
+
+ printk("auto configuration: ok, ");
+ }
+#endif
+
+ printk("detection complete\n");
+
+ for(i=0; i<setup_count; i++)
+ {
+ struct Scsi_Host *shpnt;
+
+ shpnt = aha152x_host[setup[i].irq-IRQ_MIN] =
+ scsi_register(tpnt, sizeof(struct aha152x_hostdata));
+
+ shpnt->io_port = setup[i].io_port;
+ shpnt->n_io_port = IO_RANGE;
+ shpnt->irq = setup[i].irq;
+
+ ISSUE_SC = (Scsi_Cmnd *) NULL;
+ CURRENT_SC = (Scsi_Cmnd *) NULL;
+ DISCONNECTED_SC = (Scsi_Cmnd *) NULL;
+
+ HOSTDATA(shpnt)->reconnect = setup[i].reconnect;
+ HOSTDATA(shpnt)->parity = setup[i].parity;
+ HOSTDATA(shpnt)->synchronous = setup[i].synchronous;
+ HOSTDATA(shpnt)->delay = setup[i].delay;
+#ifdef DEBUG_AHA152X
+ HOSTDATA(shpnt)->debug = setup[i].debug;
+#endif
+
+ HOSTDATA(shpnt)->aborting = 0;
+ HOSTDATA(shpnt)->abortion_complete = 0;
+ HOSTDATA(shpnt)->abort_result = 0;
+ HOSTDATA(shpnt)->commands = 0;
+
+ HOSTDATA(shpnt)->message_len = 0;
+
+ for(j=0; j<8; j++)
+ HOSTDATA(shpnt)->syncrate[j] = 0;
+
+ SETPORT(SCSIID, setup[i].scsiid << 4);
+ shpnt->this_id=setup[i].scsiid;
+
+ if(setup[i].reconnect)
+ shpnt->hostt->can_queue=AHA152X_MAXQUEUE;
+
+ /* RESET OUT */
+ SETBITS(SCSISEQ, SCSIRSTO);
+ do_pause(30);
+ CLRBITS(SCSISEQ, SCSIRSTO);
+ do_pause(setup[i].delay);
+
+ aha152x_reset_ports(shpnt);
+
+ printk("aha152x%d: vital data: PORTBASE=0x%03x, IRQ=%d, SCSI ID=%d,"
+ " reconnect=%s, parity=%s, synchronous=%s, delay=%d\n",
+ i,
+ shpnt->io_port,
+ shpnt->irq,
+ shpnt->this_id,
+ HOSTDATA(shpnt)->reconnect ? "enabled" : "disabled",
+ HOSTDATA(shpnt)->parity ? "enabled" : "disabled",
+ HOSTDATA(shpnt)->synchronous ? "enabled" : "disabled",
+ HOSTDATA(shpnt)->delay);
+
+ request_region(shpnt->io_port, IO_RANGE, "aha152x"); /* Register */
+
+ /* not expecting any interrupts */
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, 0);
+
+ SETBITS(DMACNTRL0, INTEN);
+
+ ok = request_irq(setup[i].irq, aha152x_intr, SA_INTERRUPT, "aha152x");
+
+ if(ok<0)
+ {
+ if(ok == -EINVAL)
+ {
+ printk("aha152x%d: bad IRQ %d.\n", i, setup[i].irq);
+ printk(" Contact author.\n");
+ }
+ else
+ if(ok == -EBUSY)
+ printk("aha152x%d: IRQ %d already in use. Configure another.\n",
+ i, setup[i].irq);
+ else
+ {
+ printk("\naha152x%d: Unexpected error code on"
+ " requesting IRQ %d.\n", i, setup[i].irq);
+ printk(" Contact author.\n");
+ }
+ printk("aha152x: driver needs an IRQ.\n");
+ continue;
+ }
+ }
+
+ return (setup_count>0);
+}
+
+/*
+ * Queue a command and setup interrupts for a free bus.
+ */
+int aha152x_queue(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ struct Scsi_Host *shpnt = SCpnt->host;
+ unsigned long flags;
+
+#if defined(DEBUG_RACE)
+ enter_driver("queue");
+#else
+#if defined(DEBUG_QUEUE)
+ if(HOSTDATA(shpnt)->debug & debug_queue)
+ printk("aha152x: queue(), ");
+#endif
+#endif
+
+#if defined(DEBUG_QUEUE)
+ if(HOSTDATA(shpnt)->debug & debug_queue)
+ {
+ printk("SCpnt (target = %d lun = %d cmnd = ",
+ SCpnt->target, SCpnt->lun);
+ print_command(SCpnt->cmnd);
+ printk(", cmd_len=%d, pieces = %d size = %u), ",
+ SCpnt->cmd_len, SCpnt->use_sg, SCpnt->request_bufflen);
+ disp_ports(shpnt);
+ }
+#endif
+
+ SCpnt->scsi_done = done;
+
+ /* setup scratch area
+ SCp.ptr : buffer pointer
+ SCp.this_residual : buffer length
+ SCp.buffer : next buffer
+ SCp.buffers_residual : left buffers in list
+ SCp.phase : current state of the command */
+ SCpnt->SCp.phase = not_issued;
+ if (SCpnt->use_sg)
+ {
+ SCpnt->SCp.buffer =
+ (struct scatterlist *) SCpnt->request_buffer;
+ SCpnt->SCp.ptr = SCpnt->SCp.buffer->address;
+ SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1;
+ }
+ else
+ {
+ SCpnt->SCp.ptr = (char *)SCpnt->request_buffer;
+ SCpnt->SCp.this_residual = SCpnt->request_bufflen;
+ SCpnt->SCp.buffer = NULL;
+ SCpnt->SCp.buffers_residual = 0;
+ }
+
+ SCpnt->SCp.Status = CHECK_CONDITION;
+ SCpnt->SCp.Message = 0;
+ SCpnt->SCp.have_data_in = 0;
+ SCpnt->SCp.sent_command = 0;
+
+ /* Turn led on, when this is the first command. */
+ save_flags(flags);
+ cli();
+ HOSTDATA(shpnt)->commands++;
+ if(HOSTDATA(shpnt)->commands==1)
+ SETPORT(PORTA, 1);
+
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("i+ (%d), ", HOSTDATA(shpnt)->commands);
+#endif
+ append_SC(&ISSUE_SC, SCpnt);
+
+ /* Enable bus free interrupt, when we aren't currently on the bus */
+ if(!CURRENT_SC)
+ {
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+ }
+ restore_flags(flags);
+
+#if defined(DEBUG_RACE)
+ leave_driver("queue");
+#endif
+
+ return 0;
+}
+
+/*
+ * We only support commands in interrupt-driven fashion
+ */
+int aha152x_command(Scsi_Cmnd *SCpnt)
+{
+ printk("aha152x: interrupt driven driver; use aha152x_queue()\n");
+ return -1;
+}
+
+/*
+ * Abort a queued command
+ * (commands that are on the bus can't be aborted easily)
+ */
+int aha152x_abort(Scsi_Cmnd *SCpnt)
+{
+ struct Scsi_Host *shpnt = SCpnt->host;
+ unsigned long flags;
+ Scsi_Cmnd *ptr, *prev;
+
+ save_flags(flags);
+ cli();
+
+#if defined(DEBUG_ABORT)
+ if(HOSTDATA(shpnt)->debug & debug_abort)
+ {
+ printk("aha152x: abort(), SCpnt=0x%08x, ", (unsigned int) SCpnt);
+ show_queues(shpnt);
+ }
+#endif
+
+ /* look for command in issue queue */
+ for(ptr=ISSUE_SC, prev=NULL;
+ ptr && ptr!=SCpnt;
+ prev=ptr, ptr=(Scsi_Cmnd *) ptr->host_scribble)
+ ;
+
+ if(ptr)
+ {
+ /* dequeue */
+ if(prev)
+ prev->host_scribble = ptr->host_scribble;
+ else
+ ISSUE_SC = (Scsi_Cmnd *) ptr->host_scribble;
+ restore_flags(flags);
+
+ ptr->host_scribble = NULL;
+ ptr->result = DID_ABORT << 16;
+ ptr->scsi_done(ptr);
+ return SCSI_ABORT_SUCCESS;
+ }
+
+ /* if the bus is busy or a command is currently processed,
+ we can't do anything more */
+ if (TESTLO(SSTAT1, BUSFREE) || (CURRENT_SC && CURRENT_SC!=SCpnt))
+ {
+ /* fail abortion, if bus is busy */
+
+ if(!CURRENT_SC)
+ printk("bus busy w/o current command, ");
+
+ restore_flags(flags);
+ return SCSI_ABORT_BUSY;
+ }
+
+ /* bus is free */
+
+ if(CURRENT_SC)
+ {
+ /* target entered bus free before COMMAND COMPLETE, nothing to abort */
+ restore_flags(flags);
+ CURRENT_SC->result = DID_ERROR << 16;
+ CURRENT_SC->scsi_done(CURRENT_SC);
+ CURRENT_SC = (Scsi_Cmnd *) NULL;
+ return SCSI_ABORT_SUCCESS;
+ }
+
+ /* look for command in disconnected queue */
+ for(ptr=DISCONNECTED_SC, prev=NULL;
+ ptr && ptr!=SCpnt;
+ prev=ptr, ptr=(Scsi_Cmnd *) ptr->host_scribble)
+ ;
+
+ if(ptr)
+ if(!HOSTDATA(shpnt)->aborting)
+ {
+ /* dequeue */
+ if(prev)
+ prev->host_scribble = ptr->host_scribble;
+ else
+ DISCONNECTED_SC = (Scsi_Cmnd *) ptr->host_scribble;
+
+ /* set command current and initiate selection,
+ let the interrupt routine take care of the abortion */
+ CURRENT_SC = ptr;
+ ptr->SCp.phase = in_selection|aborted;
+ SETPORT(SCSIID, (shpnt->this_id << OID_) | CURRENT_SC->target);
+
+ ADDMSG(ABORT);
+
+ /* enable interrupts for SELECTION OUT DONE and SELECTION TIME OUT */
+ SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0));
+ SETPORT(SIMODE1, ENSELTIMO);
+
+ /* Enable SELECTION OUT sequence */
+ SETBITS(SCSISEQ, ENSELO | ENAUTOATNO);
+
+ SETBITS(DMACNTRL0, INTEN);
+ HOSTDATA(shpnt)->abort_result=SCSI_ABORT_SUCCESS;
+ HOSTDATA(shpnt)->aborting++;
+ HOSTDATA(shpnt)->abortion_complete=0;
+
+ sti(); /* Hi Eric, guess what ;-) */
+
+ /* sleep until the abortion is complete */
+ while(!HOSTDATA(shpnt)->abortion_complete)
+ barrier();
+ HOSTDATA(shpnt)->aborting=0;
+ return HOSTDATA(shpnt)->abort_result;
+ }
+ else
+ {
+ /* we're already aborting a command */
+ restore_flags(flags);
+ return SCSI_ABORT_BUSY;
+ }
+
+ /* command wasn't found */
+ printk("command not found\n");
+ restore_flags(flags);
+ return SCSI_ABORT_NOT_RUNNING;
+}
+
+/*
+ * Restore default values to the AIC-6260 registers and reset the fifos
+ */
+static void aha152x_reset_ports(struct Scsi_Host *shpnt)
+{
+ /* disable interrupts */
+ SETPORT(DMACNTRL0, RSTFIFO);
+
+ SETPORT(SCSISEQ, 0);
+
+ SETPORT(SXFRCTL1, 0);
+ SETPORT(SCSISIG, 0);
+ SETPORT(SCSIRATE, 0);
+
+ /* clear all interrupt conditions */
+ SETPORT(SSTAT0, 0x7f);
+ SETPORT(SSTAT1, 0xef);
+
+ SETPORT(SSTAT4, SYNCERR|FWERR|FRERR);
+
+ SETPORT(DMACNTRL0, 0);
+ SETPORT(DMACNTRL1, 0);
+
+ SETPORT(BRSTCNTRL, 0xf1);
+
+ /* clear SCSI fifo and transfer count */
+ SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
+ SETPORT(SXFRCTL0, CH1);
+
+ /* enable interrupts */
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+}
+
+/*
+ * Reset registers, reset a hanging bus and
+ * kill active and disconnected commands for target w/o soft reset
+ */
+int aha152x_reset(Scsi_Cmnd *SCpnt)
+{
+ struct Scsi_Host *shpnt = SCpnt->host;
+ unsigned long flags;
+ Scsi_Cmnd *ptr, *prev, *next;
+
+ aha152x_reset_ports(shpnt);
+
+ /* Reset, if bus hangs */
+ if(TESTLO(SSTAT1, BUSFREE))
+ {
+ CLRBITS(DMACNTRL0, INTEN);
+
+#if defined(DEBUG_RESET)
+ if(HOSTDATA(shpnt)->debug & debug_reset)
+ {
+ printk("aha152x: reset(), bus not free: SCSI RESET OUT\n");
+ show_queues(shpnt);
+ }
+#endif
+
+ ptr=CURRENT_SC;
+ if(ptr && !ptr->device->soft_reset)
+ {
+ ptr->host_scribble = NULL;
+ ptr->result = DID_RESET << 16;
+ ptr->scsi_done(CURRENT_SC);
+ CURRENT_SC=NULL;
+ }
+
+ save_flags(flags);
+ cli();
+ prev=NULL; ptr=DISCONNECTED_SC;
+ while(ptr)
+ {
+ if(!ptr->device->soft_reset)
+ {
+ if(prev)
+ prev->host_scribble = ptr->host_scribble;
+ else
+ DISCONNECTED_SC = (Scsi_Cmnd *) ptr->host_scribble;
+
+ next = (Scsi_Cmnd *) ptr->host_scribble;
+
+ ptr->host_scribble = NULL;
+ ptr->result = DID_RESET << 16;
+ ptr->scsi_done(ptr);
+
+ ptr = next;
+ }
+ else
+ {
+ prev=ptr;
+ ptr = (Scsi_Cmnd *) ptr->host_scribble;
+ }
+ }
+ restore_flags(flags);
+
+#if defined(DEBUG_RESET)
+ if(HOSTDATA(shpnt)->debug & debug_reset)
+ {
+ printk("commands on targets w/ soft-resets:\n");
+ show_queues(shpnt);
+ }
+#endif
+
+ /* RESET OUT */
+ SETPORT(SCSISEQ, SCSIRSTO);
+ do_pause(30);
+ SETPORT(SCSISEQ, 0);
+ do_pause(DELAY);
+
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+
+ SETPORT(DMACNTRL0, INTEN);
+ }
+
+ return SCSI_RESET_SUCCESS;
+}
+
+/*
+ * Return the "logical geometry"
+ */
+int aha152x_biosparam(Scsi_Disk * disk, kdev_t dev, int *info_array)
+{
+ int size = disk->capacity;
+
+#if defined(DEBUG_BIOSPARAM)
+ if(HOSTDATA(shpnt)->debug & debug_biosparam)
+ printk("aha152x_biosparam: dev=%s, size=%d, ", kdevname(dev), size);
+#endif
+
+/* I took this from other SCSI drivers, since it provides
+ the correct data for my devices. */
+ info_array[0]=64;
+ info_array[1]=32;
+ info_array[2]=size>>11;
+
+#if defined(DEBUG_BIOSPARAM)
+ if(HOSTDATA(shpnt)->debug & debug_biosparam)
+ {
+ printk("bios geometry: head=%d, sec=%d, cyl=%d\n",
+ info_array[0], info_array[1], info_array[2]);
+ printk("WARNING: check, if the bios geometry is correct.\n");
+ }
+#endif
+
+ return 0;
+}
+
+/*
+ * Internal done function
+ */
+void aha152x_done(struct Scsi_Host *shpnt, int error)
+{
+ unsigned long flags;
+ Scsi_Cmnd *done_SC;
+
+#if defined(DEBUG_DONE)
+ if(HOSTDATA(shpnt)->debug & debug_done)
+ {
+ printk("\naha152x: done(), ");
+ disp_ports(shpnt);
+ }
+#endif
+
+ if (CURRENT_SC)
+ {
+#if defined(DEBUG_DONE)
+ if(HOSTDATA(shpnt)->debug & debug_done)
+ printk("done(%x), ", error);
+#endif
+
+ save_flags(flags);
+ cli();
+
+ done_SC = CURRENT_SC;
+ CURRENT_SC = NULL;
+
+ /* turn led off, when no commands are in the driver */
+ HOSTDATA(shpnt)->commands--;
+ if(!HOSTDATA(shpnt)->commands)
+ SETPORT(PORTA, 0); /* turn led off */
+
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("ok (%d), ", HOSTDATA(shpnt)->commands);
+#endif
+ restore_flags(flags);
+
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+
+#if defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & debug_phases)
+ printk("BUS FREE loop, ");
+#endif
+ while(TESTLO(SSTAT1, BUSFREE))
+ barrier();
+#if defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & debug_phases)
+ printk("BUS FREE\n");
+#endif
+
+ done_SC->result = error;
+ if(done_SC->scsi_done)
+ {
+#if defined(DEBUG_DONE)
+ if(HOSTDATA(shpnt)->debug & debug_done)
+ printk("calling scsi_done, ");
+#endif
+ done_SC->scsi_done(done_SC);
+#if defined(DEBUG_DONE)
+ if(HOSTDATA(shpnt)->debug & debug_done)
+ printk("done returned, ");
+#endif
+ }
+ else
+ panic("aha152x: current_SC->scsi_done() == NULL");
+ }
+ else
+ aha152x_panic(shpnt, "done() called outside of command");
+}
+
+/*
+ * Interrupts handler (main routine of the driver)
+ */
+void aha152x_intr(int irqno, struct pt_regs * regs)
+{
+ struct Scsi_Host *shpnt = aha152x_host[irqno-IRQ_MIN];
+ unsigned int flags;
+ int done=0, phase;
+
+#if defined(DEBUG_RACE)
+ enter_driver("intr");
+#else
+#if defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & debug_intr)
+ printk("\naha152x: intr(), ");
+#endif
+#endif
+
+ /* no more interrupts from the controller, while we busy.
+ INTEN has to be restored, when we're ready to leave
+ intr(). To avoid race conditions we have to return
+ immediately afterwards. */
+ CLRBITS(DMACNTRL0, INTEN);
+ sti(); /* Yes, sti() really needs to be here */
+
+ /* disconnected target is trying to reconnect.
+ Only possible, if we have disconnected nexuses and
+ nothing is occupying the bus.
+ */
+ if(TESTHI(SSTAT0, SELDI) &&
+ DISCONNECTED_SC &&
+ (!CURRENT_SC || (CURRENT_SC->SCp.phase & in_selection)) )
+ {
+ int identify_msg, target, i;
+
+ /* Avoid conflicts when a target reconnects
+ while we are trying to connect to another. */
+ if(CURRENT_SC)
+ {
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("i+, ");
+#endif
+ save_flags(flags);
+ cli();
+ append_SC(&ISSUE_SC, CURRENT_SC);
+ CURRENT_SC=NULL;
+ restore_flags(flags);
+ }
+
+ /* disable sequences */
+ SETPORT(SCSISEQ, 0);
+ SETPORT(SSTAT0, CLRSELDI);
+ SETPORT(SSTAT1, CLRBUSFREE);
+
+#if defined(DEBUG_QUEUES) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_queues|debug_phases))
+ printk("reselected, ");
+#endif
+
+ i = GETPORT(SELID) & ~(1 << shpnt->this_id);
+ target=0;
+ if(i)
+ for(; (i & 1)==0; target++, i>>=1)
+ ;
+ else
+ aha152x_panic(shpnt, "reconnecting target unknown");
+
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("SELID=%02x, target=%d, ", GETPORT(SELID), target);
+#endif
+ SETPORT(SCSIID, (shpnt->this_id << OID_) | target);
+ SETPORT(SCSISEQ, ENRESELI);
+
+ if(TESTLO(SSTAT0, SELDI))
+ aha152x_panic(shpnt, "RESELI failed");
+
+ SETPORT(SCSIRATE, HOSTDATA(shpnt)->syncrate[target]&0x7f);
+
+ SETPORT(SCSISIG, P_MSGI);
+
+ /* Get identify message */
+ if((i=getphase(shpnt))!=P_MSGI)
+ {
+ printk("target doesn't enter MSGI to identify (phase=%02x)\n", i);
+ aha152x_panic(shpnt, "unknown lun");
+ }
+ SETPORT(SCSISEQ, 0);
+
+ SETPORT(SXFRCTL0, CH1);
+
+ identify_msg = GETPORT(SCSIBUS);
+
+ if(!(identify_msg & IDENTIFY_BASE))
+ {
+ printk("target=%d, inbound message (%02x) != IDENTIFY\n",
+ target, identify_msg);
+ aha152x_panic(shpnt, "unknown lun");
+ }
+
+
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("identify=%02x, lun=%d, ", identify_msg, identify_msg & 0x3f);
+#endif
+
+ save_flags(flags);
+ cli();
+
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("d-, ");
+#endif
+ CURRENT_SC = remove_SC(&DISCONNECTED_SC,
+ target,
+ identify_msg & 0x3f);
+
+ if(!CURRENT_SC)
+ {
+ printk("lun=%d, ", identify_msg & 0x3f);
+ aha152x_panic(shpnt, "no disconnected command for that lun");
+ }
+
+ CURRENT_SC->SCp.phase &= ~disconnected;
+ restore_flags(flags);
+
+ make_acklow(shpnt);
+ if(getphase(shpnt)!=P_MSGI) {
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENBUSFREE);
+#if defined(DEBUG_RACE)
+ leave_driver("(reselected) intr");
+#endif
+ SETBITS(DMACNTRL0, INTEN);
+ return;
+ }
+ }
+
+ /* Check, if we aren't busy with a command */
+ if(!CURRENT_SC)
+ {
+ /* bus is free to issue a queued command */
+ if(TESTHI(SSTAT1, BUSFREE) && ISSUE_SC)
+ {
+ save_flags(flags);
+ cli();
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("i-, ");
+#endif
+ CURRENT_SC = remove_first_SC(&ISSUE_SC);
+ restore_flags(flags);
+
+#if defined(DEBUG_INTR) || defined(DEBUG_SELECTION) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_intr|debug_selection|debug_phases))
+ printk("issuing command, ");
+#endif
+ CURRENT_SC->SCp.phase = in_selection;
+
+#if defined(DEBUG_INTR) || defined(DEBUG_SELECTION) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_intr|debug_selection|debug_phases))
+ printk("selecting %d, ", CURRENT_SC->target);
+#endif
+ SETPORT(SCSIID, (shpnt->this_id << OID_) | CURRENT_SC->target);
+
+ /* Enable interrupts for SELECTION OUT DONE and SELECTION OUT INITIATED */
+ SETPORT(SXFRCTL1, HOSTDATA(shpnt)->parity ? (ENSPCHK|ENSTIMER) : ENSTIMER);
+
+ /* enable interrupts for SELECTION OUT DONE and SELECTION TIME OUT */
+ SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0));
+ SETPORT(SIMODE1, ENSELTIMO);
+
+ /* Enable SELECTION OUT sequence */
+ SETBITS(SCSISEQ, ENSELO | ENAUTOATNO);
+
+ }
+ else
+ {
+ /* No command we are busy with and no new to issue */
+ printk("aha152x: ignoring spurious interrupt, nothing to do\n");
+ if(TESTHI(DMACNTRL0, SWINT)) {
+ printk("aha152x: SWINT is set! Why?\n");
+ CLRBITS(DMACNTRL0, SWINT);
+ }
+ show_queues(shpnt);
+ }
+
+#if defined(DEBUG_RACE)
+ leave_driver("(selecting) intr");
+#endif
+ SETBITS(DMACNTRL0, INTEN);
+ return;
+ }
+
+ /* the bus is busy with something */
+
+#if defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & debug_intr)
+ disp_ports(shpnt);
+#endif
+
+ /* we are waiting for the result of a selection attempt */
+ if(CURRENT_SC->SCp.phase & in_selection)
+ {
+ if(TESTLO(SSTAT1, SELTO))
+ /* no timeout */
+ if(TESTHI(SSTAT0, SELDO))
+ {
+ /* clear BUS FREE interrupt */
+ SETPORT(SSTAT1, CLRBUSFREE);
+
+ /* Disable SELECTION OUT sequence */
+ CLRBITS(SCSISEQ, ENSELO|ENAUTOATNO);
+
+ /* Disable SELECTION OUT DONE interrupt */
+ CLRBITS(SIMODE0, ENSELDO);
+ CLRBITS(SIMODE1, ENSELTIMO);
+
+ if(TESTLO(SSTAT0, SELDO))
+ {
+ printk("aha152x: passing bus free condition\n");
+
+#if defined(DEBUG_RACE)
+ leave_driver("(passing bus free) intr");
+#endif
+ SETBITS(DMACNTRL0, INTEN);
+
+ if(CURRENT_SC->SCp.phase & aborted)
+ {
+ HOSTDATA(shpnt)->abort_result=SCSI_ABORT_ERROR;
+ HOSTDATA(shpnt)->abortion_complete++;
+ }
+
+ aha152x_done(shpnt, DID_NO_CONNECT << 16);
+ return;
+ }
+#if defined(DEBUG_SELECTION) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_selection|debug_phases))
+ printk("SELDO (SELID=%x), ", GETPORT(SELID));
+#endif
+
+ /* selection was done */
+ SETPORT(SSTAT0, CLRSELDO);
+
+#if defined(DEBUG_ABORT)
+ if((HOSTDATA(shpnt)->debug & debug_abort) && (CURRENT_SC->SCp.phase & aborted))
+ printk("(ABORT) target selected, ");
+#endif
+
+ CURRENT_SC->SCp.phase &= ~in_selection;
+ CURRENT_SC->SCp.phase |= in_other;
+
+ ADDMSG(IDENTIFY(HOSTDATA(shpnt)->reconnect,CURRENT_SC->lun));
+
+ if(!(SYNCRATE&0x80) && HOSTDATA(shpnt)->synchronous)
+ {
+ ADDMSG(EXTENDED_MESSAGE);
+ ADDMSG(3);
+ ADDMSG(EXTENDED_SDTR);
+ ADDMSG(50);
+ ADDMSG(8);
+
+ printk("outbound SDTR: ");
+ print_msg(&MSG(MSGLEN-5));
+
+ SYNCRATE=0x80;
+ CURRENT_SC->SCp.phase |= in_sync;
+ }
+
+#if defined(DEBUG_RACE)
+ leave_driver("(SELDO) intr");
+#endif
+ SETPORT(SCSIRATE, SYNCRATE&0x7f);
+
+ SETPORT(SCSISIG, P_MSGO);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENREQINIT|ENBUSFREE);
+ SETBITS(DMACNTRL0, INTEN);
+ return;
+ }
+ else
+ aha152x_panic(shpnt, "neither timeout nor selection\007");
+ else
+ {
+#if defined(DEBUG_SELECTION) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_selection|debug_phases))
+ printk("SELTO, ");
+#endif
+ /* end selection attempt */
+ CLRBITS(SCSISEQ, ENSELO|ENAUTOATNO);
+
+ /* timeout */
+ SETPORT(SSTAT1, CLRSELTIMO);
+
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+ SETBITS(DMACNTRL0, INTEN);
+#if defined(DEBUG_RACE)
+ leave_driver("(SELTO) intr");
+#endif
+
+ if(CURRENT_SC->SCp.phase & aborted)
+ {
+#if defined(DEBUG_ABORT)
+ if(HOSTDATA(shpnt)->debug & debug_abort)
+ printk("(ABORT) selection timeout, ");
+#endif
+ HOSTDATA(shpnt)->abort_result=SCSI_ABORT_ERROR;
+ HOSTDATA(shpnt)->abortion_complete++;
+ }
+
+ if(TESTLO(SSTAT0, SELINGO))
+ /* ARBITRATION not won */
+ aha152x_done(shpnt, DID_BUS_BUSY << 16);
+ else
+ /* ARBITRATION won, but SELECTION failed */
+ aha152x_done(shpnt, DID_NO_CONNECT << 16);
+
+ return;
+ }
+ }
+
+ /* enable interrupt, when target leaves current phase */
+ phase = getphase(shpnt);
+ if(!(phase & ~P_MASK)) /* "real" phase */
+ SETPORT(SCSISIG, phase);
+ SETPORT(SSTAT1, CLRPHASECHG);
+ CURRENT_SC->SCp.phase =
+ (CURRENT_SC->SCp.phase & ~((P_MASK|1)<<16)) | (phase << 16);
+
+ /* information transfer phase */
+ switch(phase)
+ {
+ case P_MSGO: /* MESSAGE OUT */
+ {
+ int i, identify=0, abort=0;
+
+#if defined(DEBUG_INTR) || defined(DEBUG_MSGO) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_intr|debug_msgo|debug_phases))
+ printk("MESSAGE OUT, ");
+#endif
+ if(MSGLEN==0)
+ {
+ ADDMSG(MESSAGE_REJECT);
+#if defined(DEBUG_MSGO)
+ if(HOSTDATA(shpnt)->debug & debug_msgo)
+ printk("unexpected MSGO; rejecting, ");
+#endif
+ }
+
+
+ CLRBITS(SXFRCTL0, ENDMA);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENREQINIT|ENBUSFREE);
+
+ /* wait for data latch to become ready or a phase change */
+ while(TESTLO(DMASTAT, INTSTAT))
+ barrier();
+
+#if defined(DEBUG_MSGO)
+ if(HOSTDATA(shpnt)->debug & debug_msgo)
+ {
+ int i;
+
+ printk("messages (");
+ for(i=0; i<MSGLEN; i+=print_msg(&MSG(i)), printk(" "))
+ ;
+ printk("), ");
+ }
+#endif
+
+ for(i=0; i<MSGLEN && TESTLO(SSTAT1, PHASEMIS); i++)
+ {
+#if defined(DEBUG_MSGO)
+ if(HOSTDATA(shpnt)->debug & debug_msgo)
+ printk("%x ", MSG(i));
+#endif
+ if(i==MSGLEN-1)
+ {
+ /* Leave MESSAGE OUT after transfer */
+ SETPORT(SSTAT1, CLRATNO);
+ }
+
+ SETPORT(SCSIDAT, MSG(i));
+
+ make_acklow(shpnt);
+ getphase(shpnt);
+
+ if(MSG(i)==IDENTIFY(HOSTDATA(shpnt)->reconnect,CURRENT_SC->lun))
+ identify++;
+
+ if(MSG(i)==ABORT)
+ abort++;
+
+ }
+
+ MSGLEN=0;
+
+ if(identify)
+ CURRENT_SC->SCp.phase |= sent_ident;
+
+ if(abort)
+ {
+ /* revive abort(); abort() enables interrupts */
+ HOSTDATA(shpnt)->abort_result=SCSI_ABORT_SUCCESS;
+ HOSTDATA(shpnt)->abortion_complete++;
+
+ CURRENT_SC->SCp.phase &= ~(P_MASK<<16);
+
+ /* exit */
+ SETBITS(DMACNTRL0, INTEN);
+#if defined(DEBUG_RACE)
+ leave_driver("(ABORT) intr");
+#endif
+ aha152x_done(shpnt, DID_ABORT<<16);
+ return;
+ }
+ }
+ break;
+
+ case P_CMD: /* COMMAND phase */
+#if defined(DEBUG_INTR) || defined(DEBUG_CMD) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_intr|debug_cmd|debug_phases))
+ printk("COMMAND, ");
+#endif
+ if(!(CURRENT_SC->SCp.sent_command))
+ {
+ int i;
+
+ CLRBITS(SXFRCTL0, ENDMA);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENREQINIT|ENBUSFREE);
+
+ /* wait for data latch to become ready or a phase change */
+ while(TESTLO(DMASTAT, INTSTAT))
+ barrier();
+
+ for(i=0; i<CURRENT_SC->cmd_len && TESTLO(SSTAT1, PHASEMIS); i++)
+ {
+ SETPORT(SCSIDAT, CURRENT_SC->cmnd[i]);
+
+ make_acklow(shpnt);
+ getphase(shpnt);
+ }
+
+ if(i<CURRENT_SC->cmd_len && TESTHI(SSTAT1, PHASEMIS))
+ aha152x_panic(shpnt, "target left COMMAND");
+
+ CURRENT_SC->SCp.sent_command++;
+ }
+ else
+ aha152x_panic(shpnt, "Nothing to send while in COMMAND");
+ break;
+
+ case P_MSGI: /* MESSAGE IN phase */
+ {
+ int start_sync=0;
+
+#if defined(DEBUG_INTR) || defined(DEBUG_MSGI) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_intr|debug_msgi|debug_phases))
+ printk("MESSAGE IN, ");
+#endif
+ SETPORT(SXFRCTL0, CH1);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENBUSFREE);
+
+ while(phase == P_MSGI)
+ {
+ CURRENT_SC->SCp.Message = GETPORT(SCSIDAT);
+ switch(CURRENT_SC->SCp.Message)
+ {
+ case DISCONNECT:
+#if defined(DEBUG_MSGI) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_msgi|debug_phases))
+ printk("target disconnected, ");
+#endif
+ CURRENT_SC->SCp.Message = 0;
+ CURRENT_SC->SCp.phase |= disconnected;
+ if(!HOSTDATA(shpnt)->reconnect)
+ aha152x_panic(shpnt, "target was not allowed to disconnect");
+ break;
+
+ case COMMAND_COMPLETE:
+#if defined(DEBUG_MSGI) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_msgi|debug_phases))
+ printk("inbound message (COMMAND COMPLETE), ");
+#endif
+ done++;
+ break;
+
+ case MESSAGE_REJECT:
+ if(CURRENT_SC->SCp.phase & in_sync)
+ {
+ CURRENT_SC->SCp.phase &= ~in_sync;
+ SYNCRATE=0x80;
+ printk("synchronous rejected, ");
+ }
+ else
+ printk("inbound message (MESSAGE REJECT), ");
+#if defined(DEBUG_MSGI)
+ if(HOSTDATA(shpnt)->debug & debug_msgi)
+ printk("inbound message (MESSAGE REJECT), ");
+#endif
+ break;
+
+ case SAVE_POINTERS:
+#if defined(DEBUG_MSGI)
+ if(HOSTDATA(shpnt)->debug & debug_msgi)
+ printk("inbound message (SAVE DATA POINTERS), ");
+#endif
+ break;
+
+ case EXTENDED_MESSAGE:
+ {
+ char buffer[16];
+ int i;
+
+#if defined(DEBUG_MSGI)
+ if(HOSTDATA(shpnt)->debug & debug_msgi)
+ printk("inbound message (EXTENDED MESSAGE), ");
+#endif
+ make_acklow(shpnt);
+ if(getphase(shpnt)!=P_MSGI)
+ break;
+
+ buffer[0]=EXTENDED_MESSAGE;
+ buffer[1]=GETPORT(SCSIDAT);
+
+ for(i=0; i<buffer[1] &&
+ (make_acklow(shpnt), getphase(shpnt)==P_MSGI); i++)
+ buffer[2+i]=GETPORT(SCSIDAT);
+
+#if defined(DEBUG_MSGI)
+ if(HOSTDATA(shpnt)->debug & debug_msgi)
+ print_msg(buffer);
+#endif
+
+ switch(buffer [2])
+ {
+ case EXTENDED_SDTR:
+ {
+ long ticks;
+
+ if(buffer[1]!=3)
+ aha152x_panic(shpnt, "SDTR message length != 3");
+
+ if(!HOSTDATA(shpnt)->synchronous)
+ break;
+
+ printk("inbound SDTR: "); print_msg(buffer);
+
+ ticks=(buffer[3]*4+49)/50;
+
+ if(CURRENT_SC->SCp.phase & in_sync)
+ {
+ /* we initiated SDTR */
+ if(ticks>9 || buffer[4]<1 || buffer[4]>8)
+ aha152x_panic(shpnt, "received SDTR invalid");
+
+ SYNCRATE |= ((ticks-2)<<4) + buffer[4];
+ }
+ else if(ticks<=9 && buffer[4]>=1)
+ {
+ if(buffer[4]>8)
+ buffer[4]=8;
+
+ ADDMSG(EXTENDED_MESSAGE);
+ ADDMSG(3);
+ ADDMSG(EXTENDED_SDTR);
+ if(ticks<4)
+ {
+ ticks=4;
+ ADDMSG(50);
+ }
+ else
+ ADDMSG(buffer[3]);
+
+ ADDMSG(buffer[4]);
+
+ printk("outbound SDTR: ");
+ print_msg(&MSG(MSGLEN-5));
+
+ CURRENT_SC->SCp.phase |= in_sync;
+
+ SYNCRATE |= ((ticks-2)<<4) + buffer[4];
+
+ start_sync++;
+ }
+ else
+ {
+ /* requested SDTR is too slow, do it asynchronously */
+ ADDMSG(MESSAGE_REJECT);
+ SYNCRATE = 0;
+ }
+
+ SETPORT(SCSIRATE, SYNCRATE&0x7f);
+ }
+ break;
+
+ case EXTENDED_MODIFY_DATA_POINTER:
+ case EXTENDED_EXTENDED_IDENTIFY:
+ case EXTENDED_WDTR:
+ default:
+ ADDMSG(MESSAGE_REJECT);
+ break;
+ }
+ }
+ break;
+
+ default:
+ printk("unsupported inbound message %x, ",
+ CURRENT_SC->SCp.Message);
+ break;
+
+ }
+
+ make_acklow(shpnt);
+ phase=getphase(shpnt);
+ }
+
+ if(start_sync)
+ CURRENT_SC->SCp.phase |= in_sync;
+ else
+ CURRENT_SC->SCp.phase &= ~in_sync;
+
+ if(MSGLEN>0)
+ SETPORT(SCSISIG, P_MSGI|ATNO);
+
+ /* clear SCSI fifo on BUSFREE */
+ if(phase==P_BUSFREE)
+ SETPORT(SXFRCTL0, CH1|CLRCH1);
+
+ if(CURRENT_SC->SCp.phase & disconnected)
+ {
+ save_flags(flags);
+ cli();
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("d+, ");
+#endif
+ append_SC(&DISCONNECTED_SC, CURRENT_SC);
+ CURRENT_SC->SCp.phase |= 1<<16;
+ CURRENT_SC = NULL;
+ restore_flags(flags);
+
+ SETBITS(SCSISEQ, ENRESELI);
+
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+
+ SETBITS(DMACNTRL0, INTEN);
+ return;
+ }
+ }
+ break;
+
+ case P_STATUS: /* STATUS IN phase */
+#if defined(DEBUG_STATUS) || defined(DEBUG_INTR) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_status|debug_intr|debug_phases))
+ printk("STATUS, ");
+#endif
+ SETPORT(SXFRCTL0, CH1);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENREQINIT|ENBUSFREE);
+
+ if(TESTHI(SSTAT1, PHASEMIS))
+ printk("aha152x: passing STATUS phase");
+
+ CURRENT_SC->SCp.Status = GETPORT(SCSIBUS);
+ make_acklow(shpnt);
+ getphase(shpnt);
+
+#if defined(DEBUG_STATUS)
+ if(HOSTDATA(shpnt)->debug & debug_status)
+ {
+ printk("inbound status ");
+ print_status(CURRENT_SC->SCp.Status);
+ printk(", ");
+ }
+#endif
+ break;
+
+ case P_DATAI: /* DATA IN phase */
+ {
+ int fifodata, data_count, done;
+
+#if defined(DEBUG_DATAI) || defined(DEBUG_INTR) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_datai|debug_intr|debug_phases))
+ printk("DATA IN, ");
+#endif
+
+#if 0
+ if(GETPORT(FIFOSTAT) || GETPORT(SSTAT2) & (SFULL|SFCNT))
+ printk("aha152x: P_DATAI: %d(%d) bytes left in FIFO, resetting\n",
+ GETPORT(FIFOSTAT), GETPORT(SSTAT2) & (SFULL|SFCNT));
+#endif
+
+ /* reset host fifo */
+ SETPORT(DMACNTRL0, RSTFIFO);
+ SETPORT(DMACNTRL0, RSTFIFO|ENDMA);
+
+ SETPORT(SXFRCTL0, CH1|SCSIEN|DMAEN);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENBUSFREE);
+
+ /* done is set when the FIFO is empty after the target left DATA IN */
+ done=0;
+
+ /* while the target stays in DATA to transfer data */
+ while (!done)
+ {
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ printk("expecting data, ");
+#endif
+ /* wait for PHASEMIS or full FIFO */
+ while(TESTLO (DMASTAT, DFIFOFULL|INTSTAT))
+ barrier();
+
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ printk("ok, ");
+#endif
+
+ if(TESTHI(DMASTAT, DFIFOFULL))
+ fifodata=GETPORT(FIFOSTAT);
+ else
+ {
+ /* wait for SCSI fifo to get empty */
+ while(TESTLO(SSTAT2, SEMPTY))
+ barrier();
+
+ /* rest of data in FIFO */
+ fifodata=GETPORT(FIFOSTAT);
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ printk("last transfer, ");
+#endif
+ done=1;
+ }
+
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ printk("fifodata=%d, ", fifodata);
+#endif
+
+ while(fifodata && CURRENT_SC->SCp.this_residual)
+ {
+ data_count=fifodata;
+
+ /* limit data transfer to size of first sg buffer */
+ if (data_count > CURRENT_SC->SCp.this_residual)
+ data_count = CURRENT_SC->SCp.this_residual;
+
+ fifodata -= data_count;
+
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ printk("data_count=%d, ", data_count);
+#endif
+
+ if(data_count&1)
+ {
+ /* get a single byte in byte mode */
+ SETBITS(DMACNTRL0, _8BIT);
+ *CURRENT_SC->SCp.ptr++ = GETPORT(DATAPORT);
+ CURRENT_SC->SCp.this_residual--;
+ }
+ if(data_count>1)
+ {
+ CLRBITS(DMACNTRL0, _8BIT);
+ data_count >>= 1; /* Number of words */
+ insw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ /* show what comes with the last transfer */
+ if(done)
+ {
+#ifdef 0
+ int i;
+ unsigned char *data;
+#endif
+
+ printk("data on last transfer (%d bytes) ",
+ 2*data_count);
+#ifdef 0
+ printk("data on last transfer (%d bytes: ",
+ 2*data_count);
+ data = (unsigned char *) CURRENT_SC->SCp.ptr;
+ for(i=0; i<2*data_count; i++)
+ printk("%2x ", *data++);
+ printk("), ");
+#endif
+ }
+#endif
+ CURRENT_SC->SCp.ptr += 2 * data_count;
+ CURRENT_SC->SCp.this_residual -= 2 * data_count;
+ }
+
+ /* if this buffer is full and there are more buffers left */
+ if (!CURRENT_SC->SCp.this_residual &&
+ CURRENT_SC->SCp.buffers_residual)
+ {
+ /* advance to next buffer */
+ CURRENT_SC->SCp.buffers_residual--;
+ CURRENT_SC->SCp.buffer++;
+ CURRENT_SC->SCp.ptr =
+ CURRENT_SC->SCp.buffer->address;
+ CURRENT_SC->SCp.this_residual =
+ CURRENT_SC->SCp.buffer->length;
+ }
+ }
+
+ /*
+ * Fifo should be empty
+ */
+ if(fifodata>0)
+ {
+ printk("aha152x: more data than expected (%d bytes)\n",
+ GETPORT(FIFOSTAT));
+ SETBITS(DMACNTRL0, _8BIT);
+ printk("aha152x: data (");
+ while(fifodata--)
+ printk("%2x ", GETPORT(DATAPORT));
+ printk(")\n");
+ }
+
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ if(!fifodata)
+ printk("fifo empty, ");
+ else
+ printk("something left in fifo, ");
+#endif
+ }
+
+#if defined(DEBUG_DATAI)
+ if((HOSTDATA(shpnt)->debug & debug_datai) &&
+ (CURRENT_SC->SCp.buffers_residual ||
+ CURRENT_SC->SCp.this_residual))
+ printk("left buffers (buffers=%d, bytes=%d), ",
+ CURRENT_SC->SCp.buffers_residual,
+ CURRENT_SC->SCp.this_residual);
+#endif
+ /* transfer can be considered ended, when SCSIEN reads back zero */
+ CLRBITS(SXFRCTL0, SCSIEN|DMAEN);
+ while(TESTHI(SXFRCTL0, SCSIEN))
+ barrier();
+ CLRBITS(DMACNTRL0, ENDMA);
+
+#if defined(DEBUG_DATAI) || defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & (debug_datai|debug_intr))
+ printk("got %d bytes, ", GETSTCNT());
+#endif
+
+ CURRENT_SC->SCp.have_data_in++;
+ }
+ break;
+
+ case P_DATAO: /* DATA OUT phase */
+ {
+ int data_count;
+
+#if defined(DEBUG_DATAO) || defined(DEBUG_INTR) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_datao|debug_intr|debug_phases))
+ printk("DATA OUT, ");
+#endif
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("got data to send (bytes=%d, buffers=%d), ",
+ CURRENT_SC->SCp.this_residual,
+ CURRENT_SC->SCp.buffers_residual);
+#endif
+
+ if(GETPORT(FIFOSTAT) || GETPORT(SSTAT2) & (SFULL|SFCNT))
+ {
+ printk("%d(%d) left in FIFO, ",
+ GETPORT(FIFOSTAT), GETPORT(SSTAT2) & (SFULL|SFCNT));
+ aha152x_panic(shpnt, "FIFO should be empty");
+ }
+
+ SETPORT(SXFRCTL0, CH1|CLRSTCNT|CLRCH1);
+ SETPORT(SXFRCTL0, SCSIEN|DMAEN|CH1);
+
+ SETPORT(DMACNTRL0, WRITE_READ|RSTFIFO);
+ SETPORT(DMACNTRL0, ENDMA|WRITE_READ);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENBUSFREE);
+
+ /* while current buffer is not empty or
+ there are more buffers to transfer */
+ while(TESTLO(SSTAT1, PHASEMIS) &&
+ (CURRENT_SC->SCp.this_residual ||
+ CURRENT_SC->SCp.buffers_residual))
+ {
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("sending data (left: bytes=%d, buffers=%d), waiting, ",
+ CURRENT_SC->SCp.this_residual,
+ CURRENT_SC->SCp.buffers_residual);
+#endif
+ /* transfer rest of buffer, but max. 128 byte */
+ data_count =
+ CURRENT_SC->SCp.this_residual > 128 ?
+ 128 : CURRENT_SC->SCp.this_residual ;
+
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("data_count=%d, ", data_count);
+#endif
+
+ if(data_count&1)
+ {
+ /* put a single byte in byte mode */
+ SETBITS(DMACNTRL0, _8BIT);
+ SETPORT(DATAPORT, *CURRENT_SC->SCp.ptr++);
+ CURRENT_SC->SCp.this_residual--;
+ }
+ if(data_count>1)
+ {
+ CLRBITS(DMACNTRL0, _8BIT);
+ data_count >>= 1; /* number of words */
+ outsw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
+ CURRENT_SC->SCp.ptr += 2 * data_count;
+ CURRENT_SC->SCp.this_residual -= 2 * data_count;
+ }
+
+ /* wait for FIFO to get empty */
+ while(TESTLO(DMASTAT, DFIFOEMP|INTSTAT))
+ barrier();
+
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("fifo (%d bytes), transfered (%d bytes), ",
+ GETPORT(FIFOSTAT), GETSTCNT());
+#endif
+
+ /* if this buffer is empty and there are more buffers left */
+ if (TESTLO(SSTAT1, PHASEMIS) &&
+ !CURRENT_SC->SCp.this_residual &&
+ CURRENT_SC->SCp.buffers_residual)
+ {
+ /* advance to next buffer */
+ CURRENT_SC->SCp.buffers_residual--;
+ CURRENT_SC->SCp.buffer++;
+ CURRENT_SC->SCp.ptr =
+ CURRENT_SC->SCp.buffer->address;
+ CURRENT_SC->SCp.this_residual =
+ CURRENT_SC->SCp.buffer->length;
+ }
+ }
+
+ if (CURRENT_SC->SCp.this_residual || CURRENT_SC->SCp.buffers_residual)
+ {
+ /* target leaves DATA OUT for an other phase
+ (perhaps disconnect) */
+
+ /* data in fifos has to be resend */
+ data_count = GETPORT(SSTAT2) & (SFULL|SFCNT);
+
+ data_count += GETPORT(FIFOSTAT) ;
+ CURRENT_SC->SCp.ptr -= data_count;
+ CURRENT_SC->SCp.this_residual += data_count;
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("left data (bytes=%d, buffers=%d), fifos (bytes=%d), "
+ "transfer incomplete, resetting fifo, ",
+ CURRENT_SC->SCp.this_residual,
+ CURRENT_SC->SCp.buffers_residual,
+ data_count);
+#endif
+ SETPORT(DMACNTRL0, WRITE_READ|RSTFIFO);
+ CLRBITS(SXFRCTL0, SCSIEN|DMAEN);
+ CLRBITS(DMACNTRL0, ENDMA);
+ }
+ else
+ {
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("waiting for SCSI fifo to get empty, ");
+#endif
+ /* wait for SCSI fifo to get empty */
+ while(TESTLO(SSTAT2, SEMPTY))
+ barrier();
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("ok, left data (bytes=%d, buffers=%d) ",
+ CURRENT_SC->SCp.this_residual,
+ CURRENT_SC->SCp.buffers_residual);
+#endif
+ CLRBITS(SXFRCTL0, SCSIEN|DMAEN);
+
+ /* transfer can be considered ended, when SCSIEN reads back zero */
+ while(TESTHI(SXFRCTL0, SCSIEN))
+ barrier();
+
+ CLRBITS(DMACNTRL0, ENDMA);
+ }
+
+#if defined(DEBUG_DATAO) || defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & (debug_datao|debug_intr))
+ printk("sent %d data bytes, ", GETSTCNT());
+#endif
+ }
+ break;
+
+ case P_BUSFREE: /* BUSFREE */
+#if defined(DEBUG_RACE)
+ leave_driver("(BUSFREE) intr");
+#endif
+#if defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & debug_phases)
+ printk("unexpected BUS FREE, ");
+#endif
+ CURRENT_SC->SCp.phase &= ~(P_MASK<<16);
+
+ aha152x_done(shpnt, DID_ERROR << 16); /* Don't know any better */
+ return;
+ break;
+
+ case P_PARITY: /* parity error in DATA phase */
+#if defined(DEBUG_RACE)
+ leave_driver("(DID_PARITY) intr");
+#endif
+ printk("PARITY error in DATA phase, ");
+
+ CURRENT_SC->SCp.phase &= ~(P_MASK<<16);
+
+ SETBITS(DMACNTRL0, INTEN);
+ aha152x_done(shpnt, DID_PARITY << 16);
+ return;
+ break;
+
+ default:
+ printk("aha152x: unexpected phase\n");
+ break;
+ }
+
+ if(done)
+ {
+#if defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & debug_intr)
+ printk("command done.\n");
+#endif
+#if defined(DEBUG_RACE)
+ leave_driver("(done) intr");
+#endif
+
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+ SETPORT(SCSISEQ, DISCONNECTED_SC ? ENRESELI : 0);
+
+ SETBITS(DMACNTRL0, INTEN);
+
+ aha152x_done(shpnt,
+ (CURRENT_SC->SCp.Status & 0xff)
+ | ((CURRENT_SC->SCp.Message & 0xff) << 8)
+ | (DID_OK << 16));
+
+#if defined(DEBUG_RACE)
+ printk("done returned (DID_OK: Status=%x; Message=%x).\n",
+ CURRENT_SC->SCp.Status, CURRENT_SC->SCp.Message);
+#endif
+ return;
+ }
+
+ if(CURRENT_SC)
+ CURRENT_SC->SCp.phase |= 1<<16 ;
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENBUSFREE);
+#if defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & debug_intr)
+ disp_enintr(shpnt);
+#endif
+#if defined(DEBUG_RACE)
+ leave_driver("(PHASEEND) intr");
+#endif
+
+ SETBITS(DMACNTRL0, INTEN);
+ return;
+}
+
+/*
+ * Dump the current driver status and panic...
+ */
+static void aha152x_panic(struct Scsi_Host *shpnt, char *msg)
+{
+ printk("\naha152x: %s\n", msg);
+ show_queues(shpnt);
+ panic("aha152x panic");
+}
+
+/*
+ * Display registers of AIC-6260
+ */
+static void disp_ports(struct Scsi_Host *shpnt)
+{
+#ifdef DEBUG_AHA152X
+ int s;
+
+#ifdef SKIP_PORTS
+ if(HOSTDATA(shpnt)->debug & debug_skipports)
+ return;
+#endif
+
+ printk("\n%s: ", CURRENT_SC ? "on bus" : "waiting");
+
+ s=GETPORT(SCSISEQ);
+ printk("SCSISEQ (");
+ if(s & TEMODEO) printk("TARGET MODE ");
+ if(s & ENSELO) printk("SELO ");
+ if(s & ENSELI) printk("SELI ");
+ if(s & ENRESELI) printk("RESELI ");
+ if(s & ENAUTOATNO) printk("AUTOATNO ");
+ if(s & ENAUTOATNI) printk("AUTOATNI ");
+ if(s & ENAUTOATNP) printk("AUTOATNP ");
+ if(s & SCSIRSTO) printk("SCSIRSTO ");
+ printk(");");
+
+ printk(" SCSISIG (");
+ s=GETPORT(SCSISIG);
+ switch(s & P_MASK)
+ {
+ case P_DATAO:
+ printk("DATA OUT");
+ break;
+ case P_DATAI:
+ printk("DATA IN");
+ break;
+ case P_CMD:
+ printk("COMMAND");
+ break;
+ case P_STATUS:
+ printk("STATUS");
+ break;
+ case P_MSGO:
+ printk("MESSAGE OUT");
+ break;
+ case P_MSGI:
+ printk("MESSAGE IN");
+ break;
+ default:
+ printk("*illegal*");
+ break;
+ }
+
+ printk("); ");
+
+ printk("INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo");
+
+ printk("SSTAT (");
+ s=GETPORT(SSTAT0);
+ if(s & TARGET) printk("TARGET ");
+ if(s & SELDO) printk("SELDO ");
+ if(s & SELDI) printk("SELDI ");
+ if(s & SELINGO) printk("SELINGO ");
+ if(s & SWRAP) printk("SWRAP ");
+ if(s & SDONE) printk("SDONE ");
+ if(s & SPIORDY) printk("SPIORDY ");
+ if(s & DMADONE) printk("DMADONE ");
+
+ s=GETPORT(SSTAT1);
+ if(s & SELTO) printk("SELTO ");
+ if(s & ATNTARG) printk("ATNTARG ");
+ if(s & SCSIRSTI) printk("SCSIRSTI ");
+ if(s & PHASEMIS) printk("PHASEMIS ");
+ if(s & BUSFREE) printk("BUSFREE ");
+ if(s & SCSIPERR) printk("SCSIPERR ");
+ if(s & PHASECHG) printk("PHASECHG ");
+ if(s & REQINIT) printk("REQINIT ");
+ printk("); ");
+
+
+ printk("SSTAT (");
+
+ s=GETPORT(SSTAT0) & GETPORT(SIMODE0);
+
+ if(s & TARGET) printk("TARGET ");
+ if(s & SELDO) printk("SELDO ");
+ if(s & SELDI) printk("SELDI ");
+ if(s & SELINGO) printk("SELINGO ");
+ if(s & SWRAP) printk("SWRAP ");
+ if(s & SDONE) printk("SDONE ");
+ if(s & SPIORDY) printk("SPIORDY ");
+ if(s & DMADONE) printk("DMADONE ");
+
+ s=GETPORT(SSTAT1) & GETPORT(SIMODE1);
+
+ if(s & SELTO) printk("SELTO ");
+ if(s & ATNTARG) printk("ATNTARG ");
+ if(s & SCSIRSTI) printk("SCSIRSTI ");
+ if(s & PHASEMIS) printk("PHASEMIS ");
+ if(s & BUSFREE) printk("BUSFREE ");
+ if(s & SCSIPERR) printk("SCSIPERR ");
+ if(s & PHASECHG) printk("PHASECHG ");
+ if(s & REQINIT) printk("REQINIT ");
+ printk("); ");
+
+ printk("SXFRCTL0 (");
+
+ s=GETPORT(SXFRCTL0);
+ if(s & SCSIEN) printk("SCSIEN ");
+ if(s & DMAEN) printk("DMAEN ");
+ if(s & CH1) printk("CH1 ");
+ if(s & CLRSTCNT) printk("CLRSTCNT ");
+ if(s & SPIOEN) printk("SPIOEN ");
+ if(s & CLRCH1) printk("CLRCH1 ");
+ printk("); ");
+
+ printk("SIGNAL (");
+
+ s=GETPORT(SCSISIG);
+ if(s & ATNI) printk("ATNI ");
+ if(s & SELI) printk("SELI ");
+ if(s & BSYI) printk("BSYI ");
+ if(s & REQI) printk("REQI ");
+ if(s & ACKI) printk("ACKI ");
+ printk("); ");
+
+ printk("SELID (%02x), ", GETPORT(SELID));
+
+ printk("SSTAT2 (");
+
+ s=GETPORT(SSTAT2);
+ if(s & SOFFSET) printk("SOFFSET ");
+ if(s & SEMPTY) printk("SEMPTY ");
+ if(s & SFULL) printk("SFULL ");
+ printk("); SFCNT (%d); ", s & (SFULL|SFCNT));
+
+ s=GETPORT(SSTAT3);
+ printk("SCSICNT (%d), OFFCNT(%d), ", (s&0xf0)>>4, s&0x0f);
+
+ printk("SSTAT4 (");
+ s=GETPORT(SSTAT4);
+ if(s & SYNCERR) printk("SYNCERR ");
+ if(s & FWERR) printk("FWERR ");
+ if(s & FRERR) printk("FRERR ");
+ printk("); ");
+
+ printk("DMACNTRL0 (");
+ s=GETPORT(DMACNTRL0);
+ printk("%s ", s & _8BIT ? "8BIT" : "16BIT");
+ printk("%s ", s & DMA ? "DMA" : "PIO" );
+ printk("%s ", s & WRITE_READ ? "WRITE" : "READ" );
+ if(s & ENDMA) printk("ENDMA ");
+ if(s & INTEN) printk("INTEN ");
+ if(s & RSTFIFO) printk("RSTFIFO ");
+ if(s & SWINT) printk("SWINT ");
+ printk("); ");
+
+
+#if 0
+ printk("DMACNTRL1 (");
+
+ s=GETPORT(DMACNTRL1);
+ if(s & PWRDWN) printk("PWRDN ");
+ printk("); ");
+
+
+ printk("STK (%d); ", s & 0xf);
+
+#endif
+
+ printk("DMASTAT (");
+ s=GETPORT(DMASTAT);
+ if(s & ATDONE) printk("ATDONE ");
+ if(s & WORDRDY) printk("WORDRDY ");
+ if(s & DFIFOFULL) printk("DFIFOFULL ");
+ if(s & DFIFOEMP) printk("DFIFOEMP ");
+ printk(")");
+
+ printk("\n");
+#endif
+}
+
+/*
+ * display enabled interrupts
+ */
+static void disp_enintr(struct Scsi_Host *shpnt)
+{
+ int s;
+
+ printk("enabled interrupts (");
+
+ s=GETPORT(SIMODE0);
+ if(s & ENSELDO) printk("ENSELDO ");
+ if(s & ENSELDI) printk("ENSELDI ");
+ if(s & ENSELINGO) printk("ENSELINGO ");
+ if(s & ENSWRAP) printk("ENSWRAP ");
+ if(s & ENSDONE) printk("ENSDONE ");
+ if(s & ENSPIORDY) printk("ENSPIORDY ");
+ if(s & ENDMADONE) printk("ENDMADONE ");
+
+ s=GETPORT(SIMODE1);
+ if(s & ENSELTIMO) printk("ENSELTIMO ");
+ if(s & ENATNTARG) printk("ENATNTARG ");
+ if(s & ENPHASEMIS) printk("ENPHASEMIS ");
+ if(s & ENBUSFREE) printk("ENBUSFREE ");
+ if(s & ENSCSIPERR) printk("ENSCSIPERR ");
+ if(s & ENPHASECHG) printk("ENPHASECHG ");
+ if(s & ENREQINIT) printk("ENREQINIT ");
+ printk(")\n");
+}
+
+#if defined(DEBUG_RACE)
+
+static const char *should_leave;
+static int in_driver=0;
+
+/*
+ * Only one routine can be in the driver at once.
+ */
+static void enter_driver(const char *func)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ printk("aha152x: entering %s() (%x)\n", func, jiffies);
+ if(in_driver)
+ {
+ printk("%s should leave first.\n", should_leave);
+ panic("aha152x: already in driver\n");
+ }
+
+ in_driver++;
+ should_leave=func;
+ restore_flags(flags);
+}
+
+static void leave_driver(const char *func)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ printk("\naha152x: leaving %s() (%x)\n", func, jiffies);
+ if(!in_driver)
+ {
+ printk("aha152x: %s already left.\n", should_leave);
+ panic("aha152x: %s already left driver.\n");
+ }
+
+ in_driver--;
+ should_leave=func;
+ restore_flags(flags);
+}
+#endif
+
+/*
+ * Show the command data of a command
+ */
+static void show_command(Scsi_Cmnd *ptr)
+{
+ printk("0x%08x: target=%d; lun=%d; cmnd=(",
+ (unsigned int) ptr, ptr->target, ptr->lun);
+
+ print_command(ptr->cmnd);
+
+ printk("); residual=%d; buffers=%d; phase |",
+ ptr->SCp.this_residual, ptr->SCp.buffers_residual);
+
+ if(ptr->SCp.phase & not_issued ) printk("not issued|");
+ if(ptr->SCp.phase & in_selection) printk("in selection|");
+ if(ptr->SCp.phase & disconnected) printk("disconnected|");
+ if(ptr->SCp.phase & aborted ) printk("aborted|");
+ if(ptr->SCp.phase & sent_ident ) printk("send_ident|");
+ if(ptr->SCp.phase & in_other)
+ {
+ printk("; in other(");
+ switch((ptr->SCp.phase >> 16) & P_MASK)
+ {
+ case P_DATAO:
+ printk("DATA OUT");
+ break;
+ case P_DATAI:
+ printk("DATA IN");
+ break;
+ case P_CMD:
+ printk("COMMAND");
+ break;
+ case P_STATUS:
+ printk("STATUS");
+ break;
+ case P_MSGO:
+ printk("MESSAGE OUT");
+ break;
+ case P_MSGI:
+ printk("MESSAGE IN");
+ break;
+ default:
+ printk("*illegal*");
+ break;
+ }
+ printk(")");
+ if(ptr->SCp.phase & (1<<16))
+ printk("; phaseend");
+ }
+ printk("; next=0x%08x\n", (unsigned int) ptr->host_scribble);
+}
+
+/*
+ * Dump the queued data
+ */
+static void show_queues(struct Scsi_Host *shpnt)
+{
+ unsigned long flags;
+ Scsi_Cmnd *ptr;
+
+ save_flags(flags);
+ cli();
+ printk("QUEUE STATUS:\nissue_SC:\n");
+ for(ptr=ISSUE_SC; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ show_command(ptr);
+
+ printk("current_SC:\n");
+ if(CURRENT_SC)
+ show_command(CURRENT_SC);
+ else
+ printk("none\n");
+
+ printk("disconnected_SC:\n");
+ for(ptr=DISCONNECTED_SC; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ show_command(ptr);
+
+ disp_ports(shpnt);
+ disp_enintr(shpnt);
+ restore_flags(flags);
+}
+
+int aha152x_set_info(char *buffer, int length, struct Scsi_Host *shpnt)
+{
+ return(-ENOSYS); /* Currently this is a no-op */
+}
+
+#undef SPRINTF
+#define SPRINTF(args...) pos += sprintf(pos, ## args)
+
+static int get_command(char *pos, Scsi_Cmnd *ptr)
+{
+ char *start = pos;
+ int i;
+
+ SPRINTF("0x%08x: target=%d; lun=%d; cmnd=(",
+ (unsigned int) ptr, ptr->target, ptr->lun);
+
+ for(i=0; i<COMMAND_SIZE(ptr->cmnd[0]); i++)
+ SPRINTF("0x%02x", ptr->cmnd[i]);
+
+ SPRINTF("); residual=%d; buffers=%d; phase |",
+ ptr->SCp.this_residual, ptr->SCp.buffers_residual);
+
+ if(ptr->SCp.phase & not_issued ) SPRINTF("not issued|");
+ if(ptr->SCp.phase & in_selection) SPRINTF("in selection|");
+ if(ptr->SCp.phase & disconnected) SPRINTF("disconnected|");
+ if(ptr->SCp.phase & aborted ) SPRINTF("aborted|");
+ if(ptr->SCp.phase & sent_ident ) SPRINTF("send_ident|");
+ if(ptr->SCp.phase & in_other)
+ {
+ SPRINTF("; in other(");
+ switch((ptr->SCp.phase >> 16) & P_MASK)
+ {
+ case P_DATAO:
+ SPRINTF("DATA OUT");
+ break;
+ case P_DATAI:
+ SPRINTF("DATA IN");
+ break;
+ case P_CMD:
+ SPRINTF("COMMAND");
+ break;
+ case P_STATUS:
+ SPRINTF("STATUS");
+ break;
+ case P_MSGO:
+ SPRINTF("MESSAGE OUT");
+ break;
+ case P_MSGI:
+ SPRINTF("MESSAGE IN");
+ break;
+ default:
+ SPRINTF("*illegal*");
+ break;
+ }
+ SPRINTF(")");
+ if(ptr->SCp.phase & (1<<16))
+ SPRINTF("; phaseend");
+ }
+ SPRINTF("; next=0x%08x\n", (unsigned int) ptr->host_scribble);
+
+ return(pos-start);
+}
+
+#undef SPRINTF
+#define SPRINTF(args...) do { if(pos < buffer + length) pos += sprintf(pos, ## args); } while(0)
+
+int aha152x_proc_info(
+ char *buffer,
+ char **start,
+ off_t offset,
+ int length,
+ int hostno,
+ int inout
+ )
+{
+ int i;
+ char *pos = buffer;
+ Scsi_Device *scd;
+ struct Scsi_Host *shpnt;
+ unsigned long flags;
+ Scsi_Cmnd *ptr;
+
+ for(i=0, shpnt= (struct Scsi_Host *) NULL; i<IRQS; i++)
+ if(aha152x_host[i] && aha152x_host[i]->host_no == hostno)
+ shpnt=aha152x_host[i];
+
+ if(!shpnt)
+ return(-ESRCH);
+
+ if(inout) /* Has data been written to the file ? */
+ return(aha152x_set_info(buffer, length, shpnt));
+
+ SPRINTF(AHA152X_REVID "\n");
+
+ save_flags(flags);
+ cli();
+
+ SPRINTF("vital data:\nioports 0x%04x to 0x%04x\n",
+ shpnt->io_port, shpnt->io_port+shpnt->n_io_port-1);
+ SPRINTF("interrupt 0x%02x\n", shpnt->irq);
+ SPRINTF("disconnection/reconnection %s\n",
+ HOSTDATA(shpnt)->reconnect ? "enabled" : "disabled");
+ SPRINTF("parity checking %s\n",
+ HOSTDATA(shpnt)->parity ? "enabled" : "disabled");
+ SPRINTF("synchronous transfers %s\n",
+ HOSTDATA(shpnt)->synchronous ? "enabled" : "disabled");
+ SPRINTF("current queued %d commands\n",
+ HOSTDATA(shpnt)->commands);
+
+#if 0
+ SPRINTF("synchronously operating targets (tick=%ld ns):\n",
+ 250000000/loops_per_sec);
+ for(i=0; i<8; i++)
+ if(HOSTDATA(shpnt)->syncrate[i]&0x7f)
+ SPRINTF("target %d: period %dT/%ldns; req/ack offset %d\n",
+ i,
+ (((HOSTDATA(shpnt)->syncrate[i]&0x70)>>4)+2),
+ (((HOSTDATA(shpnt)->syncrate[i]&0x70)>>4)+2)*
+ 250000000/loops_per_sec,
+ HOSTDATA(shpnt)->syncrate[i]&0x0f);
+#else
+ SPRINTF("synchronously operating targets (tick=50 ns):\n");
+ for(i=0; i<8; i++)
+ if(HOSTDATA(shpnt)->syncrate[i]&0x7f)
+ SPRINTF("target %d: period %dT/%dns; req/ack offset %d\n",
+ i,
+ (((HOSTDATA(shpnt)->syncrate[i]&0x70)>>4)+2),
+ (((HOSTDATA(shpnt)->syncrate[i]&0x70)>>4)+2)*50,
+ HOSTDATA(shpnt)->syncrate[i]&0x0f);
+#endif
+
+#ifdef DEBUG_AHA152X
+#define PDEBUG(flags,txt) if(HOSTDATA(shpnt)->debug & flags) SPRINTF("(%s) ", txt);
+
+ SPRINTF("enabled debugging options:\n");
+
+ PDEBUG(debug_skipports, "skip ports");
+ PDEBUG(debug_queue, "queue");
+ PDEBUG(debug_intr, "interrupt");
+ PDEBUG(debug_selection, "selection");
+ PDEBUG(debug_msgo, "message out");
+ PDEBUG(debug_msgi, "message in");
+ PDEBUG(debug_status, "status");
+ PDEBUG(debug_cmd, "command");
+ PDEBUG(debug_datai, "data in");
+ PDEBUG(debug_datao, "data out");
+ PDEBUG(debug_abort, "abort");
+ PDEBUG(debug_done, "done");
+ PDEBUG(debug_biosparam, "bios parameters");
+ PDEBUG(debug_phases, "phases");
+ PDEBUG(debug_queues, "queues");
+ PDEBUG(debug_reset, "reset");
+
+ SPRINTF("\n");
+#endif
+
+ SPRINTF("queue status:\nnot yet issued commands:\n");
+ for(ptr=ISSUE_SC; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ pos += get_command(pos, ptr);
+
+ if(CURRENT_SC)
+ {
+ SPRINTF("current command:\n");
+ pos += get_command(pos, CURRENT_SC);
+ }
+
+ SPRINTF("disconnected commands:\n");
+ for(ptr=DISCONNECTED_SC; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ pos += get_command(pos, ptr);
+
+ restore_flags(flags);
+
+ scd = scsi_devices;
+
+ SPRINTF("Attached devices: %s\n", (scd)?"":"none");
+
+ while (scd) {
+ if (scd->host == shpnt) {
+
+ SPRINTF("Channel: %02d Id: %02d Lun: %02d\n Vendor: ",
+ scd->channel, scd->id, scd->lun);
+ for (i=0; i<8; i++) {
+ if (scd->vendor[i] >= 0x20)
+ SPRINTF("%c", scd->vendor[i]);
+ else
+ SPRINTF(" ");
+ }
+ SPRINTF(" Model: ");
+ for (i = 0; i < 16; i++) {
+ if (scd->model[i] >= 0x20)
+ SPRINTF("%c", scd->model[i]);
+ else
+ SPRINTF(" ");
+ }
+ SPRINTF(" Rev: ");
+ for (i = 0; i < 4; i++) {
+ if (scd->rev[i] >= 0x20)
+ SPRINTF("%c", scd->rev[i]);
+ else
+ SPRINTF(" ");
+ }
+ SPRINTF("\n");
+
+ SPRINTF(" Type: %d ", scd->type);
+ SPRINTF(" ANSI SCSI revision: %02x",
+ (scd->scsi_level < 3)?1:2);
+
+ if (scd->scsi_level == 2)
+ SPRINTF(" CCS\n");
+ else
+ SPRINTF("\n");
+ }
+ scd = scd->next;
+ }
+
+ *start=buffer+offset;
+ if (pos - buffer < offset)
+ return 0;
+ else if (pos - buffer - offset < length)
+ return pos - buffer - offset;
+ else
+ return length;
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = AHA152X;
+
+#include "scsi_module.c"
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/aha152x.h b/i386/i386at/gpl/linux/scsi/aha152x.h
new file mode 100644
index 00000000..d62e5cfe
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/aha152x.h
@@ -0,0 +1,373 @@
+#ifndef _AHA152X_H
+#define _AHA152X_H
+
+/*
+ * $Id: aha152x.h,v 1.1.1.1 1997/02/25 21:27:46 thomas Exp $
+ */
+
+#if defined(__KERNEL__)
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include <asm/io.h>
+
+int aha152x_detect(Scsi_Host_Template *);
+int aha152x_command(Scsi_Cmnd *);
+int aha152x_queue(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int aha152x_abort(Scsi_Cmnd *);
+int aha152x_reset(Scsi_Cmnd *);
+int aha152x_biosparam(Disk *, kdev_t, int*);
+int aha152x_proc_info(char *buffer, char **start, off_t offset, int length, int hostno, int inout);
+
+/* number of queueable commands
+ (unless we support more than 1 cmd_per_lun this should do) */
+#define AHA152X_MAXQUEUE 7
+
+#define AHA152X_REVID "Adaptec 152x SCSI driver; $Revision: 1.1.1.1 $"
+
+extern struct proc_dir_entry proc_scsi_aha152x;
+
+/* Initial value of Scsi_Host entry */
+#define AHA152X { /* next */ NULL, \
+ /* usage_count */ NULL, \
+ /* proc_dir */ &proc_scsi_aha152x, \
+ /* proc_info */ aha152x_proc_info, \
+ /* name */ AHA152X_REVID, \
+ /* detect */ aha152x_detect, \
+ /* release */ NULL, \
+ /* info */ NULL, \
+ /* command */ aha152x_command, \
+ /* queuecommand */ aha152x_queue, \
+ /* abort */ aha152x_abort, \
+ /* reset */ aha152x_reset, \
+ /* slave_attach */ /* NULL */ 0, \
+ /* bios_param */ aha152x_biosparam, \
+ /* can_queue */ 1, \
+ /* this_id */ 7, \
+ /* sg_tablesize */ SG_ALL, \
+ /* cmd_per_lun */ 1, \
+ /* present */ 0, \
+ /* unchecked_isa_dma */ 0, \
+ /* use_clustering */ DISABLE_CLUSTERING }
+#endif
+
+
+/* port addresses */
+#define SCSISEQ (shpnt->io_port+0x00) /* SCSI sequence control */
+#define SXFRCTL0 (shpnt->io_port+0x01) /* SCSI transfer control 0 */
+#define SXFRCTL1 (shpnt->io_port+0x02) /* SCSI transfer control 1 */
+#define SCSISIG (shpnt->io_port+0x03) /* SCSI signal in/out */
+#define SCSIRATE (shpnt->io_port+0x04) /* SCSI rate control */
+#define SELID (shpnt->io_port+0x05) /* selection/reselection ID */
+#define SCSIID SELID /* SCSI ID */
+#define SCSIDAT (shpnt->io_port+0x06) /* SCSI latched data */
+#define SCSIBUS (shpnt->io_port+0x07) /* SCSI data bus */
+#define STCNT0 (shpnt->io_port+0x08) /* SCSI transfer count 0 */
+#define STCNT1 (shpnt->io_port+0x09) /* SCSI transfer count 1 */
+#define STCNT2 (shpnt->io_port+0x0a) /* SCSI transfer count 2 */
+#define SSTAT0 (shpnt->io_port+0x0b) /* SCSI interrupt status 0 */
+#define SSTAT1 (shpnt->io_port+0x0c) /* SCSI interrupt status 1 */
+#define SSTAT2 (shpnt->io_port+0x0d) /* SCSI interrupt status 2 */
+#define SCSITEST (shpnt->io_port+0x0e) /* SCSI test control */
+#define SSTAT3 SCSITEST /* SCSI interrupt status 3 */
+#define SSTAT4 (shpnt->io_port+0x0f) /* SCSI status 4 */
+#define SIMODE0 (shpnt->io_port+0x10) /* SCSI interrupt mode 0 */
+#define SIMODE1 (shpnt->io_port+0x11) /* SCSI interrupt mode 1 */
+#define DMACNTRL0 (shpnt->io_port+0x12) /* DMA control 0 */
+#define DMACNTRL1 (shpnt->io_port+0x13) /* DMA control 1 */
+#define DMASTAT (shpnt->io_port+0x14) /* DMA status */
+#define FIFOSTAT (shpnt->io_port+0x15) /* FIFO status */
+#define DATAPORT (shpnt->io_port+0x16) /* DATA port */
+#define BRSTCNTRL (shpnt->io_port+0x18) /* burst control */
+#define PORTA (shpnt->io_port+0x1a) /* PORT A */
+#define PORTB (shpnt->io_port+0x1b) /* PORT B */
+#define REV (shpnt->io_port+0x1c) /* revision */
+#define STACK (shpnt->io_port+0x1d) /* stack */
+#define TEST (shpnt->io_port+0x1e) /* test register */
+
+/* used in aha152x_porttest */
+#define O_PORTA (0x1a) /* PORT A */
+#define O_PORTB (0x1b) /* PORT B */
+#define O_DMACNTRL1 (0x13) /* DMA control 1 */
+#define O_STACK (0x1d) /* stack */
+#define IO_RANGE 0x20
+
+/* bits and bitmasks to ports */
+
+/* SCSI sequence control */
+#define TEMODEO 0x80
+#define ENSELO 0x40
+#define ENSELI 0x20
+#define ENRESELI 0x10
+#define ENAUTOATNO 0x08
+#define ENAUTOATNI 0x04
+#define ENAUTOATNP 0x02
+#define SCSIRSTO 0x01
+
+/* SCSI transfer control 0 */
+#define SCSIEN 0x80
+#define DMAEN 0x40
+#define CH1 0x20
+#define CLRSTCNT 0x10
+#define SPIOEN 0x08
+#define CLRCH1 0x02
+
+/* SCSI transfer control 1 */
+#define BITBUCKET 0x80
+#define SWRAPEN 0x40
+#define ENSPCHK 0x20
+#define STIMESEL 0x18 /* mask */
+#define STIMESEL_ 3
+#define ENSTIMER 0x04
+#define BYTEALIGN 0x02
+
+/* SCSI signal IN */
+#define CDI 0x80
+#define IOI 0x40
+#define MSGI 0x20
+#define ATNI 0x10
+#define SELI 0x08
+#define BSYI 0x04
+#define REQI 0x02
+#define ACKI 0x01
+
+/* SCSI Phases */
+#define P_MASK (MSGI|CDI|IOI)
+#define P_DATAO (0)
+#define P_DATAI (IOI)
+#define P_CMD (CDI)
+#define P_STATUS (CDI|IOI)
+#define P_MSGO (MSGI|CDI)
+#define P_MSGI (MSGI|CDI|IOI)
+
+/* SCSI signal OUT */
+#define CDO 0x80
+#define IOO 0x40
+#define MSGO 0x20
+#define ATNO 0x10
+#define SELO 0x08
+#define BSYO 0x04
+#define REQO 0x02
+#define ACKO 0x01
+
+/* SCSI rate control */
+#define SXFR 0x70 /* mask */
+#define SXFR_ 4
+#define SOFS 0x0f /* mask */
+
+/* SCSI ID */
+#define OID 0x70
+#define OID_ 4
+#define TID 0x07
+
+/* SCSI transfer count */
+#define GETSTCNT() ( (GETPORT(STCNT2)<<16) \
+ + (GETPORT(STCNT1)<< 8) \
+ + GETPORT(STCNT0) )
+
+#define SETSTCNT(X) { SETPORT(STCNT2, ((X) & 0xFF0000) >> 16); \
+ SETPORT(STCNT1, ((X) & 0x00FF00) >> 8); \
+ SETPORT(STCNT0, ((X) & 0x0000FF) ); }
+
+/* SCSI interrupt status */
+#define TARGET 0x80
+#define SELDO 0x40
+#define SELDI 0x20
+#define SELINGO 0x10
+#define SWRAP 0x08
+#define SDONE 0x04
+#define SPIORDY 0x02
+#define DMADONE 0x01
+
+#define SETSDONE 0x80
+#define CLRSELDO 0x40
+#define CLRSELDI 0x20
+#define CLRSELINGO 0x10
+#define CLRSWRAP 0x08
+#define CLRSDONE 0x04
+#define CLRSPIORDY 0x02
+#define CLRDMADONE 0x01
+
+/* SCSI status 1 */
+#define SELTO 0x80
+#define ATNTARG 0x40
+#define SCSIRSTI 0x20
+#define PHASEMIS 0x10
+#define BUSFREE 0x08
+#define SCSIPERR 0x04
+#define PHASECHG 0x02
+#define REQINIT 0x01
+
+#define CLRSELTIMO 0x80
+#define CLRATNO 0x40
+#define CLRSCSIRSTI 0x20
+#define CLRBUSFREE 0x08
+#define CLRSCSIPERR 0x04
+#define CLRPHASECHG 0x02
+#define CLRREQINIT 0x01
+
+/* SCSI status 2 */
+#define SOFFSET 0x20
+#define SEMPTY 0x10
+#define SFULL 0x08
+#define SFCNT 0x07 /* mask */
+
+/* SCSI status 3 */
+#define SCSICNT 0xf0 /* mask */
+#define SCSICNT_ 4
+#define OFFCNT 0x0f /* mask */
+
+/* SCSI TEST control */
+#define SCTESTU 0x08
+#define SCTESTD 0x04
+#define STCTEST 0x01
+
+/* SCSI status 4 */
+#define SYNCERR 0x04
+#define FWERR 0x02
+#define FRERR 0x01
+
+#define CLRSYNCERR 0x04
+#define CLRFWERR 0x02
+#define CLRFRERR 0x01
+
+/* SCSI interrupt mode 0 */
+#define ENSELDO 0x40
+#define ENSELDI 0x20
+#define ENSELINGO 0x10
+#define ENSWRAP 0x08
+#define ENSDONE 0x04
+#define ENSPIORDY 0x02
+#define ENDMADONE 0x01
+
+/* SCSI interrupt mode 1 */
+#define ENSELTIMO 0x80
+#define ENATNTARG 0x40
+#define ENSCSIRST 0x20
+#define ENPHASEMIS 0x10
+#define ENBUSFREE 0x08
+#define ENSCSIPERR 0x04
+#define ENPHASECHG 0x02
+#define ENREQINIT 0x01
+
+/* DMA control 0 */
+#define ENDMA 0x80
+#define _8BIT 0x40
+#define DMA 0x20
+#define WRITE_READ 0x08
+#define INTEN 0x04
+#define RSTFIFO 0x02
+#define SWINT 0x01
+
+/* DMA control 1 */
+#define PWRDWN 0x80
+#define STK 0x07 /* mask */
+
+/* DMA status */
+#define ATDONE 0x80
+#define WORDRDY 0x40
+#define INTSTAT 0x20
+#define DFIFOFULL 0x10
+#define DFIFOEMP 0x08
+
+/* BURST control */
+#define BON 0xf0
+#define BOFF 0x0f
+
+/* TEST REGISTER */
+#define BOFFTMR 0x40
+#define BONTMR 0x20
+#define STCNTH 0x10
+#define STCNTM 0x08
+#define STCNTL 0x04
+#define SCSIBLK 0x02
+#define DMABLK 0x01
+
+/* On the AHA-152x board PORTA and PORTB contain
+ some information about the board's configuration. */
+typedef union {
+ struct {
+ unsigned reserved:2; /* reserved */
+ unsigned tardisc:1; /* Target disconnect: 0=disabled, 1=enabled */
+ unsigned syncneg:1; /* Initial sync neg: 0=disabled, 1=enabled */
+ unsigned msgclasses:2; /* Message classes
+ 0=#4
+ 1=#0, #1, #2, #3, #4
+ 2=#0, #3, #4
+ 3=#0, #4
+ */
+ unsigned boot:1; /* boot: 0=disabled, 1=enabled */
+ unsigned dma:1; /* Transfer mode: 0=PIO; 1=DMA */
+ unsigned id:3; /* SCSI-id */
+ unsigned irq:2; /* IRQ-Channel: 0,3=12, 1=10, 2=11 */
+ unsigned dmachan:2; /* DMA-Channel: 0=0, 1=5, 2=6, 3=7 */
+ unsigned parity:1; /* SCSI-parity: 1=enabled 0=disabled */
+ } fields;
+ unsigned short port;
+} aha152x_config ;
+
+#define cf_parity fields.parity
+#define cf_dmachan fields.dmachan
+#define cf_irq fields.irq
+#define cf_id fields.id
+#define cf_dma fields.dma
+#define cf_boot fields.boot
+#define cf_msgclasses fields.msgclasses
+#define cf_syncneg fields.syncneg
+#define cf_tardisc fields.tardisc
+#define cf_port port
+
+/* Some macros to manipulate ports and their bits */
+
+#define SETPORT(PORT, VAL) \
+ outb( (VAL), (PORT) )
+
+#define SETPORTP(PORT, VAL) \
+ outb_p( (VAL), (PORT) )
+
+#define SETPORTW(PORT, VAL) \
+ outw( (VAL), (PORT) )
+
+#define GETPORT(PORT) \
+ inb( PORT )
+
+#define GETPORTW(PORT) \
+ inw( PORT )
+
+#define SETBITS(PORT, BITS) \
+ outb( (inb(PORT) | (BITS)), (PORT) )
+
+#define CLRBITS(PORT, BITS) \
+ outb( (inb(PORT) & ~(BITS)), (PORT) )
+
+#define CLRSETBITS(PORT, CLR, SET) \
+ outb( (inb(PORT) & ~(CLR)) | (SET) , (PORT) )
+
+#define TESTHI(PORT, BITS) \
+ ((inb(PORT) & (BITS)) == BITS)
+
+#define TESTLO(PORT, BITS) \
+ ((inb(PORT) & (BITS)) == 0)
+
+#ifdef DEBUG_AHA152X
+enum {
+ debug_skipports =0x0001,
+ debug_queue =0x0002,
+ debug_intr =0x0004,
+ debug_selection =0x0008,
+ debug_msgo =0x0010,
+ debug_msgi =0x0020,
+ debug_status =0x0040,
+ debug_cmd =0x0080,
+ debug_datai =0x0100,
+ debug_datao =0x0200,
+ debug_abort =0x0400,
+ debug_done =0x0800,
+ debug_biosparam =0x1000,
+ debug_phases =0x2000,
+ debug_queues =0x4000,
+ debug_reset =0x8000,
+};
+#endif
+
+#endif /* _AHA152X_H */
diff --git a/i386/i386at/gpl/linux/scsi/aha1542.c b/i386/i386at/gpl/linux/scsi/aha1542.c
new file mode 100644
index 00000000..026292eb
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/aha1542.c
@@ -0,0 +1,1323 @@
+/* $Id: aha1542.c,v 1.1.1.1 1997/02/25 21:27:46 thomas Exp $
+ * linux/kernel/aha1542.c
+ *
+ * Copyright (C) 1992 Tommy Thorn
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * Modified by Eric Youngdale
+ * Use request_irq and request_dma to help prevent unexpected conflicts
+ * Set up on-board DMA controller, such that we do not have to
+ * have the bios enabled to use the aha1542.
+ * Modified by David Gentzel
+ * Don't call request_dma if dma mask is 0 (for BusLogic BT-445S VL-Bus
+ * controller).
+ * Modified by Matti Aarnio
+ * Accept parameters from LILO cmd-line. -- 1-Oct-94
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/head.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <asm/dma.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+
+
+#include "aha1542.h"
+
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_aha1542 = {
+ PROC_SCSI_AHA1542, 7, "aha1542",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+#ifdef DEBUG
+#define DEB(x) x
+#else
+#define DEB(x)
+#endif
+/*
+static const char RCSid[] = "$Header: cvs/gnumach/i386/i386at/gpl/linux/scsi/Attic/aha1542.c,v 1.1.1.1 1997/02/25 21:27:46 thomas Exp $";
+*/
+
+/* The adaptec can be configured for quite a number of addresses, but
+I generally do not want the card poking around at random. We allow
+two addresses - this allows people to use the Adaptec with a Midi
+card, which also used 0x330 -- can be overridden with LILO! */
+
+#define MAXBOARDS 2 /* Increase this and the sizes of the
+ arrays below, if you need more.. */
+
+static unsigned int bases[MAXBOARDS]={0x330, 0x334};
+
+/* set by aha1542_setup according to the command line */
+static int setup_called[MAXBOARDS] = {0,0};
+static int setup_buson[MAXBOARDS] = {0,0};
+static int setup_busoff[MAXBOARDS] = {0,0};
+static int setup_dmaspeed[MAXBOARDS] = {-1,-1};
+
+static char *setup_str[MAXBOARDS] = {(char *)NULL,(char *)NULL};
+
+/*
+ * LILO params: aha1542=<PORTBASE>[,<BUSON>,<BUSOFF>[,<DMASPEED>]]
+ *
+ * Where: <PORTBASE> is any of the valid AHA addresses:
+ * 0x130, 0x134, 0x230, 0x234, 0x330, 0x334
+ * <BUSON> is the time (in microsecs) that AHA spends on the AT-bus
+ * when transferring data. 1542A power-on default is 11us,
+ * valid values are in range: 2..15 (decimal)
+ * <BUSOFF> is the time that AHA spends OFF THE BUS after while
+ * it is transferring data (not to monopolize the bus).
+ * Power-on default is 4us, valid range: 1..64 microseconds.
+ * <DMASPEED> Default is jumper selected (1542A: on the J1),
+ * but experimenter can alter it with this.
+ * Valid values: 5, 6, 7, 8, 10 (MB/s)
+ * Factory default is 5 MB/s.
+ */
+
+
+/* The DMA-Controller. We need to fool with this because we want to
+ be able to use the aha1542 without having to have the bios enabled */
+#define DMA_MODE_REG 0xd6
+#define DMA_MASK_REG 0xd4
+#define CASCADE 0xc0
+
+#define BIOS_TRANSLATION_1632 0 /* Used by some old 1542A boards */
+#define BIOS_TRANSLATION_6432 1 /* Default case these days */
+#define BIOS_TRANSLATION_25563 2 /* Big disk case */
+
+struct aha1542_hostdata{
+ /* This will effectively start both of them at the first mailbox */
+ int bios_translation; /* Mapping bios uses - for compatibility */
+ int aha1542_last_mbi_used;
+ int aha1542_last_mbo_used;
+ Scsi_Cmnd * SCint[AHA1542_MAILBOXES];
+ struct mailbox mb[2*AHA1542_MAILBOXES];
+ struct ccb ccb[AHA1542_MAILBOXES];
+};
+
+#define HOSTDATA(host) ((struct aha1542_hostdata *) &host->hostdata)
+
+static struct Scsi_Host * aha_host[7] = {NULL,}; /* One for each IRQ level (9-15) */
+
+
+
+
+#define WAITnexttimeout 3000000
+
+static void setup_mailboxes(int base_io, struct Scsi_Host * shpnt);
+static int aha1542_restart(struct Scsi_Host * shost);
+
+#define aha1542_intr_reset(base) outb(IRST, CONTROL(base))
+
+#define WAIT(port, mask, allof, noneof) \
+ { register WAITbits; \
+ register WAITtimeout = WAITnexttimeout; \
+ while (1) { \
+ WAITbits = inb(port) & (mask); \
+ if ((WAITbits & (allof)) == (allof) && ((WAITbits & (noneof)) == 0)) \
+ break; \
+ if (--WAITtimeout == 0) goto fail; \
+ } \
+ }
+
+/* Similar to WAIT, except we use the udelay call to regulate the
+ amount of time we wait. */
+#define WAITd(port, mask, allof, noneof, timeout) \
+ { register WAITbits; \
+ register WAITtimeout = timeout; \
+ while (1) { \
+ WAITbits = inb(port) & (mask); \
+ if ((WAITbits & (allof)) == (allof) && ((WAITbits & (noneof)) == 0)) \
+ break; \
+ udelay(1000); \
+ if (--WAITtimeout == 0) goto fail; \
+ } \
+ }
+
+static void aha1542_stat(void)
+{
+/* int s = inb(STATUS), i = inb(INTRFLAGS);
+ printk("status=%x intrflags=%x\n", s, i, WAITnexttimeout-WAITtimeout); */
+}
+
+/* This is a bit complicated, but we need to make sure that an interrupt
+ routine does not send something out while we are in the middle of this.
+ Fortunately, it is only at boot time that multi-byte messages
+ are ever sent. */
+static int aha1542_out(unsigned int base, unchar *cmdp, int len)
+{
+ unsigned long flags = 0;
+
+ save_flags(flags);
+ if(len == 1) {
+ while(1==1){
+ WAIT(STATUS(base), CDF, 0, CDF);
+ cli();
+ if(inb(STATUS(base)) & CDF) {restore_flags(flags); continue;}
+ outb(*cmdp, DATA(base));
+ restore_flags(flags);
+ return 0;
+ }
+ } else {
+ cli();
+ while (len--)
+ {
+ WAIT(STATUS(base), CDF, 0, CDF);
+ outb(*cmdp++, DATA(base));
+ }
+ restore_flags(flags);
+ }
+ return 0;
+ fail:
+ restore_flags(flags);
+ printk("aha1542_out failed(%d): ", len+1); aha1542_stat();
+ return 1;
+}
+
+/* Only used at boot time, so we do not need to worry about latency as much
+ here */
+static int aha1542_in(unsigned int base, unchar *cmdp, int len)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ while (len--)
+ {
+ WAIT(STATUS(base), DF, DF, 0);
+ *cmdp++ = inb(DATA(base));
+ }
+ restore_flags(flags);
+ return 0;
+ fail:
+ restore_flags(flags);
+ printk("aha1542_in failed(%d): ", len+1); aha1542_stat();
+ return 1;
+}
+
+/* Similar to aha1542_in, except that we wait a very short period of time.
+ We use this if we know the board is alive and awake, but we are not sure
+ if the board will respond the the command we are about to send or not */
+static int aha1542_in1(unsigned int base, unchar *cmdp, int len)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ while (len--)
+ {
+ WAITd(STATUS(base), DF, DF, 0, 100);
+ *cmdp++ = inb(DATA(base));
+ }
+ restore_flags(flags);
+ return 0;
+ fail:
+ restore_flags(flags);
+ return 1;
+}
+
+static int makecode(unsigned hosterr, unsigned scsierr)
+{
+ switch (hosterr) {
+ case 0x0:
+ case 0xa: /* Linked command complete without error and linked normally */
+ case 0xb: /* Linked command complete without error, interrupt generated */
+ hosterr = 0;
+ break;
+
+ case 0x11: /* Selection time out-The initiator selection or target
+ reselection was not complete within the SCSI Time out period */
+ hosterr = DID_TIME_OUT;
+ break;
+
+ case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
+ than was allocated by the Data Length field or the sum of the
+ Scatter / Gather Data Length fields. */
+
+ case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */
+
+ case 0x15: /* MBO command was not 00, 01 or 02-The first byte of the CB was
+ invalid. This usually indicates a software failure. */
+
+ case 0x16: /* Invalid CCB Operation Code-The first byte of the CCB was invalid.
+ This usually indicates a software failure. */
+
+ case 0x17: /* Linked CCB does not have the same LUN-A subsequent CCB of a set
+ of linked CCB's does not specify the same logical unit number as
+ the first. */
+ case 0x18: /* Invalid Target Direction received from Host-The direction of a
+ Target Mode CCB was invalid. */
+
+ case 0x19: /* Duplicate CCB Received in Target Mode-More than once CCB was
+ received to service data transfer between the same target LUN
+ and initiator SCSI ID in the same direction. */
+
+ case 0x1a: /* Invalid CCB or Segment List Parameter-A segment list with a zero
+ length segment or invalid segment list boundaries was received.
+ A CCB parameter was invalid. */
+ DEB(printk("Aha1542: %x %x\n", hosterr, scsierr));
+ hosterr = DID_ERROR; /* Couldn't find any better */
+ break;
+
+ case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
+ phase sequence was requested by the target. The host adapter
+ will generate a SCSI Reset Condition, notifying the host with
+ a SCRD interrupt */
+ hosterr = DID_RESET;
+ break;
+ default:
+ printk("makecode: unknown hoststatus %x\n", hosterr);
+ break;
+ }
+ return scsierr|(hosterr << 16);
+}
+
+static int aha1542_test_port(int bse, struct Scsi_Host * shpnt)
+{
+ int i;
+ unchar inquiry_cmd[] = {CMD_INQUIRY };
+ unchar inquiry_result[4];
+ unchar *cmdp;
+ int len;
+ volatile int debug = 0;
+
+ /* Quick and dirty test for presence of the card. */
+ if(inb(STATUS(bse)) == 0xff) return 0;
+
+ /* Reset the adapter. I ought to make a hard reset, but it's not really necessary */
+
+ /* DEB(printk("aha1542_test_port called \n")); */
+
+ /* In case some other card was probing here, reset interrupts */
+ aha1542_intr_reset(bse); /* reset interrupts, so they don't block */
+
+ outb(SRST|IRST/*|SCRST*/, CONTROL(bse));
+
+ i = jiffies + 2;
+ while (i>jiffies); /* Wait a little bit for things to settle down. */
+
+ debug = 1;
+ /* Expect INIT and IDLE, any of the others are bad */
+ WAIT(STATUS(bse), STATMASK, INIT|IDLE, STST|DIAGF|INVDCMD|DF|CDF);
+
+ debug = 2;
+ /* Shouldn't have generated any interrupts during reset */
+ if (inb(INTRFLAGS(bse))&INTRMASK) goto fail;
+
+
+ /* Perform a host adapter inquiry instead so we do not need to set
+ up the mailboxes ahead of time */
+
+ aha1542_out(bse, inquiry_cmd, 1);
+
+ debug = 3;
+ len = 4;
+ cmdp = &inquiry_result[0];
+
+ while (len--)
+ {
+ WAIT(STATUS(bse), DF, DF, 0);
+ *cmdp++ = inb(DATA(bse));
+ }
+
+ debug = 8;
+ /* Reading port should reset DF */
+ if (inb(STATUS(bse)) & DF) goto fail;
+
+ debug = 9;
+ /* When HACC, command is completed, and we're though testing */
+ WAIT(INTRFLAGS(bse), HACC, HACC, 0);
+ /* now initialize adapter */
+
+ debug = 10;
+ /* Clear interrupts */
+ outb(IRST, CONTROL(bse));
+
+ debug = 11;
+
+ return debug; /* 1 = ok */
+ fail:
+ return 0; /* 0 = not ok */
+}
+
+/* A "high" level interrupt handler */
+static void aha1542_intr_handle(int irq, struct pt_regs *regs)
+{
+ void (*my_done)(Scsi_Cmnd *) = NULL;
+ int errstatus, mbi, mbo, mbistatus;
+ int number_serviced;
+ unsigned int flags;
+ struct Scsi_Host * shost;
+ Scsi_Cmnd * SCtmp;
+ int flag;
+ int needs_restart;
+ struct mailbox * mb;
+ struct ccb *ccb;
+
+ shost = aha_host[irq - 9];
+ if(!shost) panic("Splunge!");
+
+ mb = HOSTDATA(shost)->mb;
+ ccb = HOSTDATA(shost)->ccb;
+
+#ifdef DEBUG
+ {
+ flag = inb(INTRFLAGS(shost->io_port));
+ printk("aha1542_intr_handle: ");
+ if (!(flag&ANYINTR)) printk("no interrupt?");
+ if (flag&MBIF) printk("MBIF ");
+ if (flag&MBOA) printk("MBOF ");
+ if (flag&HACC) printk("HACC ");
+ if (flag&SCRD) printk("SCRD ");
+ printk("status %02x\n", inb(STATUS(shost->io_port)));
+ };
+#endif
+ number_serviced = 0;
+ needs_restart = 0;
+
+ while(1==1){
+ flag = inb(INTRFLAGS(shost->io_port));
+
+ /* Check for unusual interrupts. If any of these happen, we should
+ probably do something special, but for now just printing a message
+ is sufficient. A SCSI reset detected is something that we really
+ need to deal with in some way. */
+ if (flag & ~MBIF) {
+ if (flag&MBOA) printk("MBOF ");
+ if (flag&HACC) printk("HACC ");
+ if (flag&SCRD) {
+ needs_restart = 1;
+ printk("SCRD ");
+ }
+ }
+
+ aha1542_intr_reset(shost->io_port);
+
+ save_flags(flags);
+ cli();
+ mbi = HOSTDATA(shost)->aha1542_last_mbi_used + 1;
+ if (mbi >= 2*AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES;
+
+ do{
+ if(mb[mbi].status != 0) break;
+ mbi++;
+ if (mbi >= 2*AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES;
+ } while (mbi != HOSTDATA(shost)->aha1542_last_mbi_used);
+
+ if(mb[mbi].status == 0){
+ restore_flags(flags);
+ /* Hmm, no mail. Must have read it the last time around */
+ if (!number_serviced && !needs_restart)
+ printk("aha1542.c: interrupt received, but no mail.\n");
+ /* We detected a reset. Restart all pending commands for
+ devices that use the hard reset option */
+ if(needs_restart) aha1542_restart(shost);
+ return;
+ };
+
+ mbo = (scsi2int(mb[mbi].ccbptr) - ((unsigned int) &ccb[0])) / sizeof(struct ccb);
+ mbistatus = mb[mbi].status;
+ mb[mbi].status = 0;
+ HOSTDATA(shost)->aha1542_last_mbi_used = mbi;
+ restore_flags(flags);
+
+#ifdef DEBUG
+ {
+ if (ccb[mbo].tarstat|ccb[mbo].hastat)
+ printk("aha1542_command: returning %x (status %d)\n",
+ ccb[mbo].tarstat + ((int) ccb[mbo].hastat << 16), mb[mbi].status);
+ };
+#endif
+
+ if(mbistatus == 3) continue; /* Aborted command not found */
+
+#ifdef DEBUG
+ printk("...done %d %d\n",mbo, mbi);
+#endif
+
+ SCtmp = HOSTDATA(shost)->SCint[mbo];
+
+ if (!SCtmp || !SCtmp->scsi_done) {
+ printk("aha1542_intr_handle: Unexpected interrupt\n");
+ printk("tarstat=%x, hastat=%x idlun=%x ccb#=%d \n", ccb[mbo].tarstat,
+ ccb[mbo].hastat, ccb[mbo].idlun, mbo);
+ return;
+ }
+
+ my_done = SCtmp->scsi_done;
+ if (SCtmp->host_scribble) scsi_free(SCtmp->host_scribble, 512);
+
+ /* Fetch the sense data, and tuck it away, in the required slot. The
+ Adaptec automatically fetches it, and there is no guarantee that
+ we will still have it in the cdb when we come back */
+ if (ccb[mbo].tarstat == 2)
+ memcpy(SCtmp->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen],
+ sizeof(SCtmp->sense_buffer));
+
+
+ /* is there mail :-) */
+
+ /* more error checking left out here */
+ if (mbistatus != 1)
+ /* This is surely wrong, but I don't know what's right */
+ errstatus = makecode(ccb[mbo].hastat, ccb[mbo].tarstat);
+ else
+ errstatus = 0;
+
+#ifdef DEBUG
+ if(errstatus) printk("(aha1542 error:%x %x %x) ",errstatus,
+ ccb[mbo].hastat, ccb[mbo].tarstat);
+#endif
+
+ if (ccb[mbo].tarstat == 2) {
+#ifdef DEBUG
+ int i;
+#endif
+ DEB(printk("aha1542_intr_handle: sense:"));
+#ifdef DEBUG
+ for (i = 0; i < 12; i++)
+ printk("%02x ", ccb[mbo].cdb[ccb[mbo].cdblen+i]);
+ printk("\n");
+#endif
+ /*
+ DEB(printk("aha1542_intr_handle: buf:"));
+ for (i = 0; i < bufflen; i++)
+ printk("%02x ", ((unchar *)buff)[i]);
+ printk("\n");
+ */
+ }
+ DEB(if (errstatus) printk("aha1542_intr_handle: returning %6x\n", errstatus));
+ SCtmp->result = errstatus;
+ HOSTDATA(shost)->SCint[mbo] = NULL; /* This effectively frees up the mailbox slot, as
+ far as queuecommand is concerned */
+ my_done(SCtmp);
+ number_serviced++;
+ };
+}
+
+int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ unchar ahacmd = CMD_START_SCSI;
+ unchar direction;
+ unchar *cmd = (unchar *) SCpnt->cmnd;
+ unchar target = SCpnt->target;
+ unchar lun = SCpnt->lun;
+ unsigned long flags;
+ void *buff = SCpnt->request_buffer;
+ int bufflen = SCpnt->request_bufflen;
+ int mbo;
+ struct mailbox * mb;
+ struct ccb *ccb;
+
+ DEB(int i);
+
+ mb = HOSTDATA(SCpnt->host)->mb;
+ ccb = HOSTDATA(SCpnt->host)->ccb;
+
+ DEB(if (target > 1) {
+ SCpnt->result = DID_TIME_OUT << 16;
+ done(SCpnt); return 0;});
+
+ if(*cmd == REQUEST_SENSE){
+#ifndef DEBUG
+ if (bufflen != sizeof(SCpnt->sense_buffer)) {
+ printk("Wrong buffer length supplied for request sense (%d)\n",bufflen);
+ };
+#endif
+ SCpnt->result = 0;
+ done(SCpnt);
+ return 0;
+ };
+
+#ifdef DEBUG
+ if (*cmd == READ_10 || *cmd == WRITE_10)
+ i = xscsi2int(cmd+2);
+ else if (*cmd == READ_6 || *cmd == WRITE_6)
+ i = scsi2int(cmd+2);
+ else
+ i = -1;
+ if (done)
+ printk("aha1542_queuecommand: dev %d cmd %02x pos %d len %d ", target, *cmd, i, bufflen);
+ else
+ printk("aha1542_command: dev %d cmd %02x pos %d len %d ", target, *cmd, i, bufflen);
+ aha1542_stat();
+ printk("aha1542_queuecommand: dumping scsi cmd:");
+ for (i = 0; i < SCpnt->cmd_len; i++) printk("%02x ", cmd[i]);
+ printk("\n");
+ if (*cmd == WRITE_10 || *cmd == WRITE_6)
+ return 0; /* we are still testing, so *don't* write */
+#endif
+/* Use the outgoing mailboxes in a round-robin fashion, because this
+ is how the host adapter will scan for them */
+
+ save_flags(flags);
+ cli();
+ mbo = HOSTDATA(SCpnt->host)->aha1542_last_mbo_used + 1;
+ if (mbo >= AHA1542_MAILBOXES) mbo = 0;
+
+ do{
+ if(mb[mbo].status == 0 && HOSTDATA(SCpnt->host)->SCint[mbo] == NULL)
+ break;
+ mbo++;
+ if (mbo >= AHA1542_MAILBOXES) mbo = 0;
+ } while (mbo != HOSTDATA(SCpnt->host)->aha1542_last_mbo_used);
+
+ if(mb[mbo].status || HOSTDATA(SCpnt->host)->SCint[mbo])
+ panic("Unable to find empty mailbox for aha1542.\n");
+
+ HOSTDATA(SCpnt->host)->SCint[mbo] = SCpnt; /* This will effectively prevent someone else from
+ screwing with this cdb. */
+
+ HOSTDATA(SCpnt->host)->aha1542_last_mbo_used = mbo;
+ restore_flags(flags);
+
+#ifdef DEBUG
+ printk("Sending command (%d %x)...",mbo, done);
+#endif
+
+ any2scsi(mb[mbo].ccbptr, &ccb[mbo]); /* This gets trashed for some reason*/
+
+ memset(&ccb[mbo], 0, sizeof(struct ccb));
+
+ ccb[mbo].cdblen = SCpnt->cmd_len;
+
+ direction = 0;
+ if (*cmd == READ_10 || *cmd == READ_6)
+ direction = 8;
+ else if (*cmd == WRITE_10 || *cmd == WRITE_6)
+ direction = 16;
+
+ memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen);
+
+ if (SCpnt->use_sg) {
+ struct scatterlist * sgpnt;
+ struct chain * cptr;
+#ifdef DEBUG
+ unsigned char * ptr;
+#endif
+ int i;
+ ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather*/
+ SCpnt->host_scribble = (unsigned char *) scsi_malloc(512);
+ sgpnt = (struct scatterlist *) SCpnt->request_buffer;
+ cptr = (struct chain *) SCpnt->host_scribble;
+ if (cptr == NULL) panic("aha1542.c: unable to allocate DMA memory\n");
+ for(i=0; i<SCpnt->use_sg; i++) {
+ if(sgpnt[i].length == 0 || SCpnt->use_sg > 16 ||
+ (((int)sgpnt[i].address) & 1) || (sgpnt[i].length & 1)){
+ unsigned char * ptr;
+ printk("Bad segment list supplied to aha1542.c (%d, %d)\n",SCpnt->use_sg,i);
+ for(i=0;i<SCpnt->use_sg;i++){
+ printk("%d: %x %x %d\n",i,(unsigned int) sgpnt[i].address, (unsigned int) sgpnt[i].alt_address,
+ sgpnt[i].length);
+ };
+ printk("cptr %x: ",(unsigned int) cptr);
+ ptr = (unsigned char *) &cptr[i];
+ for(i=0;i<18;i++) printk("%02x ", ptr[i]);
+ panic("Foooooooood fight!");
+ };
+ any2scsi(cptr[i].dataptr, sgpnt[i].address);
+ if(((unsigned int) sgpnt[i].address) & 0xff000000) goto baddma;
+ any2scsi(cptr[i].datalen, sgpnt[i].length);
+ };
+ any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain));
+ any2scsi(ccb[mbo].dataptr, cptr);
+#ifdef DEBUG
+ printk("cptr %x: ",cptr);
+ ptr = (unsigned char *) cptr;
+ for(i=0;i<18;i++) printk("%02x ", ptr[i]);
+#endif
+ } else {
+ ccb[mbo].op = 0; /* SCSI Initiator Command */
+ SCpnt->host_scribble = NULL;
+ any2scsi(ccb[mbo].datalen, bufflen);
+ if(((unsigned int) buff & 0xff000000)) goto baddma;
+ any2scsi(ccb[mbo].dataptr, buff);
+ };
+ ccb[mbo].idlun = (target&7)<<5 | direction | (lun & 7); /*SCSI Target Id*/
+ ccb[mbo].rsalen = 12;
+ ccb[mbo].linkptr[0] = ccb[mbo].linkptr[1] = ccb[mbo].linkptr[2] = 0;
+ ccb[mbo].commlinkid = 0;
+
+#ifdef DEBUG
+ { int i;
+ printk("aha1542_command: sending.. ");
+ for (i = 0; i < sizeof(ccb[mbo])-10; i++)
+ printk("%02x ", ((unchar *)&ccb[mbo])[i]);
+ };
+#endif
+
+ if (done) {
+ DEB(printk("aha1542_queuecommand: now waiting for interrupt "); aha1542_stat());
+ SCpnt->scsi_done = done;
+ mb[mbo].status = 1;
+ aha1542_out(SCpnt->host->io_port, &ahacmd, 1); /* start scsi command */
+ DEB(aha1542_stat());
+ }
+ else
+ printk("aha1542_queuecommand: done can't be NULL\n");
+
+ return 0;
+ baddma:
+ panic("Buffer at address > 16Mb used for 1542B");
+}
+
+static void internal_done(Scsi_Cmnd * SCpnt)
+{
+ SCpnt->SCp.Status++;
+}
+
+int aha1542_command(Scsi_Cmnd * SCpnt)
+{
+ DEB(printk("aha1542_command: ..calling aha1542_queuecommand\n"));
+
+ aha1542_queuecommand(SCpnt, internal_done);
+
+ SCpnt->SCp.Status = 0;
+ while (!SCpnt->SCp.Status)
+ barrier();
+ return SCpnt->result;
+}
+
+/* Initialize mailboxes */
+static void setup_mailboxes(int bse, struct Scsi_Host * shpnt)
+{
+ int i;
+ struct mailbox * mb;
+ struct ccb *ccb;
+
+ unchar cmd[5] = {CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0};
+
+ mb = HOSTDATA(shpnt)->mb;
+ ccb = HOSTDATA(shpnt)->ccb;
+
+ for(i=0; i<AHA1542_MAILBOXES; i++){
+ mb[i].status = mb[AHA1542_MAILBOXES+i].status = 0;
+ any2scsi(mb[i].ccbptr, &ccb[i]);
+ };
+ aha1542_intr_reset(bse); /* reset interrupts, so they don't block */
+ any2scsi((cmd+2), mb);
+ aha1542_out(bse, cmd, 5);
+ WAIT(INTRFLAGS(bse), INTRMASK, HACC, 0);
+ while (0) {
+ fail:
+ printk("aha1542_detect: failed setting up mailboxes\n");
+ }
+ aha1542_intr_reset(bse);
+}
+
+static int aha1542_getconfig(int base_io, unsigned char * irq_level, unsigned char * dma_chan, unsigned char * scsi_id)
+{
+ unchar inquiry_cmd[] = {CMD_RETCONF };
+ unchar inquiry_result[3];
+ int i;
+ i = inb(STATUS(base_io));
+ if (i & DF) {
+ i = inb(DATA(base_io));
+ };
+ aha1542_out(base_io, inquiry_cmd, 1);
+ aha1542_in(base_io, inquiry_result, 3);
+ WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
+ while (0) {
+ fail:
+ printk("aha1542_detect: query board settings\n");
+ }
+ aha1542_intr_reset(base_io);
+ switch(inquiry_result[0]){
+ case 0x80:
+ *dma_chan = 7;
+ break;
+ case 0x40:
+ *dma_chan = 6;
+ break;
+ case 0x20:
+ *dma_chan = 5;
+ break;
+ case 0x01:
+ printk("DMA priority 0 not available for Adaptec driver\n");
+ return -1;
+ case 0:
+ /* This means that the adapter, although Adaptec 1542 compatible, doesn't use a DMA channel.
+ Currently only aware of the BusLogic BT-445S VL-Bus adapter which needs this. */
+ *dma_chan = 0xFF;
+ break;
+ default:
+ printk("Unable to determine Adaptec DMA priority. Disabling board\n");
+ return -1;
+ };
+ switch(inquiry_result[1]){
+ case 0x40:
+ *irq_level = 15;
+ break;
+ case 0x20:
+ *irq_level = 14;
+ break;
+ case 0x8:
+ *irq_level = 12;
+ break;
+ case 0x4:
+ *irq_level = 11;
+ break;
+ case 0x2:
+ *irq_level = 10;
+ break;
+ case 0x1:
+ *irq_level = 9;
+ break;
+ default:
+ printk("Unable to determine Adaptec IRQ level. Disabling board\n");
+ return -1;
+ };
+ *scsi_id=inquiry_result[2] & 7;
+ return 0;
+}
+
+/* This function should only be called for 1542C boards - we can detect
+ the special firmware settings and unlock the board */
+
+static int aha1542_mbenable(int base)
+{
+ static unchar mbenable_cmd[3];
+ static unchar mbenable_result[2];
+ int retval;
+
+ retval = BIOS_TRANSLATION_6432;
+
+ mbenable_cmd[0]=CMD_EXTBIOS;
+ aha1542_out(base,mbenable_cmd,1);
+ if(aha1542_in1(base,mbenable_result,2))
+ return retval;
+ WAITd(INTRFLAGS(base),INTRMASK,HACC,0,100);
+ aha1542_intr_reset(base);
+
+ if ((mbenable_result[0] & 0x08) || mbenable_result[1]) {
+ mbenable_cmd[0]=CMD_MBENABLE;
+ mbenable_cmd[1]=0;
+ mbenable_cmd[2]=mbenable_result[1];
+ if(mbenable_result[1] & 1) retval = BIOS_TRANSLATION_25563;
+ aha1542_out(base,mbenable_cmd,3);
+ WAIT(INTRFLAGS(base),INTRMASK,HACC,0);
+ };
+ while(0) {
+fail:
+ printk("aha1542_mbenable: Mailbox init failed\n");
+ }
+aha1542_intr_reset(base);
+return retval;
+}
+
+/* Query the board to find out if it is a 1542 or a 1740, or whatever. */
+static int aha1542_query(int base_io, int * transl)
+{
+ unchar inquiry_cmd[] = {CMD_INQUIRY };
+ unchar inquiry_result[4];
+ int i;
+ i = inb(STATUS(base_io));
+ if (i & DF) {
+ i = inb(DATA(base_io));
+ };
+ aha1542_out(base_io, inquiry_cmd, 1);
+ aha1542_in(base_io, inquiry_result, 4);
+ WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
+ while (0) {
+ fail:
+ printk("aha1542_detect: query card type\n");
+ }
+ aha1542_intr_reset(base_io);
+
+ *transl = BIOS_TRANSLATION_6432; /* Default case */
+
+/* For an AHA1740 series board, we ignore the board since there is a
+ hardware bug which can lead to wrong blocks being returned if the board
+ is operating in the 1542 emulation mode. Since there is an extended mode
+ driver, we simply ignore the board and let the 1740 driver pick it up.
+*/
+
+ if (inquiry_result[0] == 0x43) {
+ printk("aha1542.c: Emulation mode not supported for AHA 174N hardware.\n");
+ return 1;
+ };
+
+ /* Always call this - boards that do not support extended bios translation
+ will ignore the command, and we will set the proper default */
+
+ *transl = aha1542_mbenable(base_io);
+
+ return 0;
+}
+
+/* called from init/main.c */
+void aha1542_setup( char *str, int *ints)
+{
+ const char *ahausage = "aha1542: usage: aha1542=<PORTBASE>[,<BUSON>,<BUSOFF>[,<DMASPEED>]]\n";
+ static int setup_idx = 0;
+ int setup_portbase;
+
+ if(setup_idx >= MAXBOARDS)
+ {
+ printk("aha1542: aha1542_setup called too many times! Bad LILO params ?\n");
+ printk(" Entryline 1: %s\n",setup_str[0]);
+ printk(" Entryline 2: %s\n",setup_str[1]);
+ printk(" This line: %s\n",str);
+ return;
+ }
+ if (ints[0] < 1 || ints[0] > 4)
+ {
+ printk("aha1542: %s\n", str );
+ printk(ahausage);
+ printk("aha1542: Wrong parameters may cause system malfunction.. We try anyway..\n");
+ }
+
+ setup_called[setup_idx]=ints[0];
+ setup_str[setup_idx]=str;
+
+ setup_portbase = ints[0] >= 1 ? ints[1] : 0; /* Preserve the default value.. */
+ setup_buson [setup_idx] = ints[0] >= 2 ? ints[2] : 7;
+ setup_busoff [setup_idx] = ints[0] >= 3 ? ints[3] : 5;
+ if (ints[0] >= 4) {
+ int atbt = -1;
+ switch (ints[4]) {
+ case 5:
+ atbt = 0x00;
+ break;
+ case 6:
+ atbt = 0x04;
+ break;
+ case 7:
+ atbt = 0x01;
+ break;
+ case 8:
+ atbt = 0x02;
+ break;
+ case 10:
+ atbt = 0x03;
+ break;
+ default:
+ printk("aha1542: %s\n", str );
+ printk(ahausage);
+ printk("aha1542: Valid values for DMASPEED are 5-8, 10 MB/s. Using jumper defaults.\n");
+ break;
+ }
+ setup_dmaspeed[setup_idx] = atbt;
+ }
+
+ if (setup_portbase != 0)
+ bases[setup_idx] = setup_portbase;
+
+ ++setup_idx;
+}
+
+/* return non-zero on detection */
+int aha1542_detect(Scsi_Host_Template * tpnt)
+{
+ unsigned char dma_chan;
+ unsigned char irq_level;
+ unsigned char scsi_id;
+ unsigned long flags;
+ unsigned int base_io;
+ int trans;
+ struct Scsi_Host * shpnt = NULL;
+ int count = 0;
+ int indx;
+
+ DEB(printk("aha1542_detect: \n"));
+
+ tpnt->proc_dir = &proc_scsi_aha1542;
+
+ for(indx = 0; indx < sizeof(bases)/sizeof(bases[0]); indx++)
+ if(bases[indx] != 0 && !check_region(bases[indx], 4)) {
+ shpnt = scsi_register(tpnt,
+ sizeof(struct aha1542_hostdata));
+
+ /* For now we do this - until kmalloc is more intelligent
+ we are resigned to stupid hacks like this */
+ if ((unsigned int) shpnt > 0xffffff) {
+ printk("Invalid address for shpnt with 1542.\n");
+ goto unregister;
+ }
+
+ if(!aha1542_test_port(bases[indx], shpnt)) goto unregister;
+
+
+ base_io = bases[indx];
+
+ /* Set the Bus on/off-times as not to ruin floppy performance */
+ {
+ unchar oncmd[] = {CMD_BUSON_TIME, 7};
+ unchar offcmd[] = {CMD_BUSOFF_TIME, 5};
+
+ if(setup_called[indx])
+ {
+ oncmd[1] = setup_buson[indx];
+ offcmd[1] = setup_busoff[indx];
+ }
+
+ aha1542_intr_reset(base_io);
+ aha1542_out(base_io, oncmd, 2);
+ WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
+ aha1542_intr_reset(base_io);
+ aha1542_out(base_io, offcmd, 2);
+ WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
+ if (setup_dmaspeed[indx] >= 0)
+ {
+ unchar dmacmd[] = {CMD_DMASPEED, 0};
+ dmacmd[1] = setup_dmaspeed[indx];
+ aha1542_intr_reset(base_io);
+ aha1542_out(base_io, dmacmd, 2);
+ WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
+ }
+ while (0) {
+ fail:
+ printk("aha1542_detect: setting bus on/off-time failed\n");
+ }
+ aha1542_intr_reset(base_io);
+ }
+ if(aha1542_query(base_io, &trans)) goto unregister;
+
+ if (aha1542_getconfig(base_io, &irq_level, &dma_chan, &scsi_id) == -1) goto unregister;
+
+ printk("Configuring Adaptec (SCSI-ID %d) at IO:%x, IRQ %d", scsi_id, base_io, irq_level);
+ if (dma_chan != 0xFF)
+ printk(", DMA priority %d", dma_chan);
+ printk("\n");
+
+ DEB(aha1542_stat());
+ setup_mailboxes(base_io, shpnt);
+
+ DEB(aha1542_stat());
+
+ DEB(printk("aha1542_detect: enable interrupt channel %d\n", irq_level));
+ save_flags(flags);
+ cli();
+ if (request_irq(irq_level,aha1542_intr_handle, 0, "aha1542")) {
+ printk("Unable to allocate IRQ for adaptec controller.\n");
+ goto unregister;
+ }
+
+ if (dma_chan != 0xFF) {
+ if (request_dma(dma_chan,"aha1542")) {
+ printk("Unable to allocate DMA channel for Adaptec.\n");
+ free_irq(irq_level);
+ goto unregister;
+ }
+
+ if (dma_chan >= 5) {
+ outb((dma_chan - 4) | CASCADE, DMA_MODE_REG);
+ outb(dma_chan - 4, DMA_MASK_REG);
+ }
+ }
+ aha_host[irq_level - 9] = shpnt;
+ shpnt->this_id = scsi_id;
+ shpnt->unique_id = base_io;
+ shpnt->io_port = base_io;
+ shpnt->n_io_port = 4; /* Number of bytes of I/O space used */
+ shpnt->dma_channel = dma_chan;
+ shpnt->irq = irq_level;
+ HOSTDATA(shpnt)->bios_translation = trans;
+ if(trans == 2)
+ printk("aha1542.c: Using extended bios translation\n");
+ HOSTDATA(shpnt)->aha1542_last_mbi_used = (2*AHA1542_MAILBOXES - 1);
+ HOSTDATA(shpnt)->aha1542_last_mbo_used = (AHA1542_MAILBOXES - 1);
+ memset(HOSTDATA(shpnt)->SCint, 0, sizeof(HOSTDATA(shpnt)->SCint));
+ restore_flags(flags);
+#if 0
+ DEB(printk(" *** READ CAPACITY ***\n"));
+
+ {
+ unchar buf[8];
+ static unchar cmd[] = { READ_CAPACITY, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ int i;
+
+ for (i = 0; i < sizeof(buf); ++i) buf[i] = 0x87;
+ for (i = 0; i < 2; ++i)
+ if (!aha1542_command(i, cmd, buf, sizeof(buf))) {
+ printk("aha_detect: LU %d sector_size %d device_size %d\n",
+ i, xscsi2int(buf+4), xscsi2int(buf));
+ }
+ }
+
+ DEB(printk(" *** NOW RUNNING MY OWN TEST *** \n"));
+
+ for (i = 0; i < 4; ++i)
+ {
+ unsigned char cmd[10];
+ static buffer[512];
+
+ cmd[0] = READ_10;
+ cmd[1] = 0;
+ xany2scsi(cmd+2, i);
+ cmd[6] = 0;
+ cmd[7] = 0;
+ cmd[8] = 1;
+ cmd[9] = 0;
+ aha1542_command(0, cmd, buffer, 512);
+ }
+#endif
+ request_region(bases[indx], 4,"aha1542"); /* Register the IO ports that we use */
+ count++;
+ continue;
+ unregister:
+ scsi_unregister(shpnt);
+ continue;
+
+ };
+
+ return count;
+}
+
+static int aha1542_restart(struct Scsi_Host * shost)
+{
+ int i;
+ int count = 0;
+#if 0
+ unchar ahacmd = CMD_START_SCSI;
+#endif
+
+ for(i=0; i< AHA1542_MAILBOXES; i++)
+ if(HOSTDATA(shost)->SCint[i] &&
+ !(HOSTDATA(shost)->SCint[i]->device->soft_reset))
+ {
+#if 0
+ HOSTDATA(shost)->mb[i].status = 1; /* Indicate ready to restart... */
+#endif
+ count++;
+ }
+
+ printk("Potential to restart %d stalled commands...\n", count);
+#if 0
+ /* start scsi command */
+ if (count) aha1542_out(shost->io_port, &ahacmd, 1);
+#endif
+ return 0;
+}
+
+/* The abort command does not leave the device in a clean state where
+ it is available to be used again. Until this gets worked out, we will
+ leave it commented out. */
+
+int aha1542_abort(Scsi_Cmnd * SCpnt)
+{
+#if 0
+ unchar ahacmd = CMD_START_SCSI;
+ unsigned long flags;
+ struct mailbox * mb;
+ int mbi, mbo, i;
+
+ printk("In aha1542_abort: %x %x\n",
+ inb(STATUS(SCpnt->host->io_port)),
+ inb(INTRFLAGS(SCpnt->host->io_port)));
+
+ save_flags(flags);
+ cli();
+ mb = HOSTDATA(SCpnt->host)->mb;
+ mbi = HOSTDATA(SCpnt->host)->aha1542_last_mbi_used + 1;
+ if (mbi >= 2*AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES;
+
+ do{
+ if(mb[mbi].status != 0) break;
+ mbi++;
+ if (mbi >= 2*AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES;
+ } while (mbi != HOSTDATA(SCpnt->host)->aha1542_last_mbi_used);
+ restore_flags(flags);
+
+ if(mb[mbi].status) {
+ printk("Lost interrupt discovered on irq %d - attempting to recover\n",
+ SCpnt->host->irq);
+ aha1542_intr_handle(SCpnt->host->irq, NULL);
+ return 0;
+ }
+
+ /* OK, no lost interrupt. Try looking to see how many pending commands
+ we think we have. */
+
+ for(i=0; i< AHA1542_MAILBOXES; i++)
+ if(HOSTDATA(SCpnt->host)->SCint[i])
+ {
+ if(HOSTDATA(SCpnt->host)->SCint[i] == SCpnt) {
+ printk("Timed out command pending for %s\n",
+ kdevname(SCpnt->request.rq_dev));
+ if (HOSTDATA(SCpnt->host)->mb[i].status) {
+ printk("OGMB still full - restarting\n");
+ aha1542_out(SCpnt->host->io_port, &ahacmd, 1);
+ };
+ } else
+ printk("Other pending command %s\n",
+ kdevname(SCpnt->request.rq_dev));
+ }
+
+#endif
+
+ DEB(printk("aha1542_abort\n"));
+#if 0
+ save_flags(flags);
+ cli();
+ for(mbo = 0; mbo < AHA1542_MAILBOXES; mbo++)
+ if (SCpnt == HOSTDATA(SCpnt->host)->SCint[mbo]){
+ mb[mbo].status = 2; /* Abort command */
+ aha1542_out(SCpnt->host->io_port, &ahacmd, 1); /* start scsi command */
+ restore_flags(flags);
+ break;
+ };
+#endif
+ return SCSI_ABORT_SNOOZE;
+}
+
+/* We do not implement a reset function here, but the upper level code
+ assumes that it will get some kind of response for the command in
+ SCpnt. We must oblige, or the command will hang the scsi system.
+ For a first go, we assume that the 1542 notifies us with all of the
+ pending commands (it does implement soft reset, after all). */
+
+int aha1542_reset(Scsi_Cmnd * SCpnt)
+{
+ unchar ahacmd = CMD_START_SCSI;
+ int i;
+
+ /*
+ * See if a bus reset was suggested.
+ */
+ if( SCpnt->host->suggest_bus_reset )
+ {
+ /*
+ * This does a scsi reset for all devices on the bus.
+ * In principle, we could also reset the 1542 - should
+ * we do this? Try this first, and we can add that later
+ * if it turns out to be useful.
+ */
+ outb(HRST | SCRST, CONTROL(SCpnt->host->io_port));
+
+ /*
+ * Wait for the thing to settle down a bit. Unfortunately
+ * this is going to basically lock up the machine while we
+ * wait for this to complete. To be 100% correct, we need to
+ * check for timeout, and if we are doing something like this
+ * we are pretty desperate anyways.
+ */
+ WAIT(STATUS(SCpnt->host->io_port),
+ STATMASK, INIT|IDLE, STST|DIAGF|INVDCMD|DF|CDF);
+
+ /*
+ * We need to do this too before the 1542 can interact with
+ * us again.
+ */
+ setup_mailboxes(SCpnt->host->io_port, SCpnt->host);
+
+ /*
+ * Now try and pick up the pieces. Restart all commands
+ * that are currently active on the bus, and reset all of
+ * the datastructures. We have some time to kill while
+ * things settle down, so print a nice message.
+ */
+ printk("Sent BUS RESET to scsi host %d\n", SCpnt->host->host_no);
+
+ for(i=0; i< AHA1542_MAILBOXES; i++)
+ if(HOSTDATA(SCpnt->host)->SCint[i] != NULL)
+ {
+ Scsi_Cmnd * SCtmp;
+ SCtmp = HOSTDATA(SCpnt->host)->SCint[i];
+ SCtmp->result = DID_RESET << 16;
+ if (SCtmp->host_scribble) scsi_free(SCtmp->host_scribble, 512);
+ printk("Sending DID_RESET for target %d\n", SCpnt->target);
+ SCtmp->scsi_done(SCpnt);
+
+ HOSTDATA(SCpnt->host)->SCint[i] = NULL;
+ HOSTDATA(SCpnt->host)->mb[i].status = 0;
+ }
+ /*
+ * Now tell the mid-level code what we did here. Since
+ * we have restarted all of the outstanding commands,
+ * then report SUCCESS.
+ */
+ return (SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET);
+fail:
+ printk("aha1542.c: Unable to perform hard reset.\n");
+ printk("Power cycle machine to reset\n");
+ return (SCSI_RESET_ERROR | SCSI_RESET_BUS_RESET);
+
+
+ }
+ else
+ {
+ /* This does a selective reset of just the one device */
+ /* First locate the ccb for this command */
+ for(i=0; i< AHA1542_MAILBOXES; i++)
+ if(HOSTDATA(SCpnt->host)->SCint[i] == SCpnt)
+ {
+ HOSTDATA(SCpnt->host)->ccb[i].op = 0x81; /* BUS DEVICE RESET */
+ /* Now tell the 1542 to flush all pending commands for this target */
+ aha1542_out(SCpnt->host->io_port, &ahacmd, 1);
+
+ /* Here is the tricky part. What to do next. Do we get an interrupt
+ for the commands that we aborted with the specified target, or
+ do we generate this on our own? Try it without first and see
+ what happens */
+ printk("Sent BUS DEVICE RESET to target %d\n", SCpnt->target);
+
+ /* If the first does not work, then try the second. I think the
+ first option is more likely to be correct. Free the command
+ block for all commands running on this target... */
+ for(i=0; i< AHA1542_MAILBOXES; i++)
+ if(HOSTDATA(SCpnt->host)->SCint[i] &&
+ HOSTDATA(SCpnt->host)->SCint[i]->target == SCpnt->target)
+ {
+ Scsi_Cmnd * SCtmp;
+ SCtmp = HOSTDATA(SCpnt->host)->SCint[i];
+ SCtmp->result = DID_RESET << 16;
+ if (SCtmp->host_scribble) scsi_free(SCtmp->host_scribble, 512);
+ printk("Sending DID_RESET for target %d\n", SCpnt->target);
+ SCtmp->scsi_done(SCpnt);
+
+ HOSTDATA(SCpnt->host)->SCint[i] = NULL;
+ HOSTDATA(SCpnt->host)->mb[i].status = 0;
+ }
+ return SCSI_RESET_SUCCESS;
+ }
+ }
+ /* No active command at this time, so this means that each time we got
+ some kind of response the last time through. Tell the mid-level code
+ to request sense information in order to decide what to do next. */
+ return SCSI_RESET_PUNT;
+}
+
+#include "sd.h"
+
+int aha1542_biosparam(Scsi_Disk * disk, kdev_t dev, int * ip)
+{
+ int translation_algorithm;
+ int size = disk->capacity;
+
+ translation_algorithm = HOSTDATA(disk->device->host)->bios_translation;
+ /* Should this be > 1024, or >= 1024? Enquiring minds want to know. */
+ if((size>>11) > 1024 && translation_algorithm == 2) {
+ /* Please verify that this is the same as what DOS returns */
+ ip[0] = 255;
+ ip[1] = 63;
+ ip[2] = size /255/63;
+ } else {
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ };
+/* if (ip[2] >= 1024) ip[2] = 1024; */
+ return 0;
+}
+
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = AHA1542;
+
+#include "scsi_module.c"
+#endif
+
diff --git a/i386/i386at/gpl/linux/scsi/aha1542.h b/i386/i386at/gpl/linux/scsi/aha1542.h
new file mode 100644
index 00000000..4e0c6503
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/aha1542.h
@@ -0,0 +1,177 @@
+#ifndef _AHA1542_H
+
+/* $Id: aha1542.h,v 1.1.1.1 1997/02/25 21:27:46 thomas Exp $
+ *
+ * Header file for the adaptec 1542 driver for Linux
+ *
+ * $Log: aha1542.h,v $
+ * Revision 1.1.1.1 1996/10/30 01:40:02 thomas
+ * Imported from UK22
+ *
+ * Revision 1.1 1996/03/25 20:25:20 goel
+ * Linux driver merge.
+ *
+ * Revision 1.1 1992/07/24 06:27:38 root
+ * Initial revision
+ *
+ * Revision 1.2 1992/07/04 18:41:49 root
+ * Replaced distribution with current drivers
+ *
+ * Revision 1.3 1992/06/23 23:58:20 root
+ * Fixes.
+ *
+ * Revision 1.2 1992/05/26 22:13:23 root
+ * Changed bug that prevented DMA above first 2 mbytes.
+ *
+ * Revision 1.1 1992/05/22 21:00:29 root
+ * Initial revision
+ *
+ * Revision 1.1 1992/04/24 18:01:50 root
+ * Initial revision
+ *
+ * Revision 1.1 1992/04/02 03:23:13 drew
+ * Initial revision
+ *
+ * Revision 1.3 1992/01/27 14:46:29 tthorn
+ * *** empty log message ***
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+
+/* I/O Port interface 4.2 */
+/* READ */
+#define STATUS(base) base
+#define STST 0x80 /* Self Test in Progress */
+#define DIAGF 0x40 /* Internal Diagnostic Failure */
+#define INIT 0x20 /* Mailbox Initialization Required */
+#define IDLE 0x10 /* SCSI Host Adapter Idle */
+#define CDF 0x08 /* Command/Data Out Port Full */
+#define DF 0x04 /* Data In Port Full */
+#define INVDCMD 0x01 /* Invalid H A Command */
+#define STATMASK 0xfd /* 0x02 is reserved */
+
+#define INTRFLAGS(base) (STATUS(base)+2)
+#define ANYINTR 0x80 /* Any Interrupt */
+#define SCRD 0x08 /* SCSI Reset Detected */
+#define HACC 0x04 /* HA Command Complete */
+#define MBOA 0x02 /* MBO Empty */
+#define MBIF 0x01 /* MBI Full */
+#define INTRMASK 0x8f
+
+/* WRITE */
+#define CONTROL(base) STATUS(base)
+#define HRST 0x80 /* Hard Reset */
+#define SRST 0x40 /* Soft Reset */
+#define IRST 0x20 /* Interrupt Reset */
+#define SCRST 0x10 /* SCSI Bus Reset */
+
+/* READ/WRITE */
+#define DATA(base) (STATUS(base)+1)
+#define CMD_NOP 0x00 /* No Operation */
+#define CMD_MBINIT 0x01 /* Mailbox Initialization */
+#define CMD_START_SCSI 0x02 /* Start SCSI Command */
+#define CMD_INQUIRY 0x04 /* Adapter Inquiry */
+#define CMD_EMBOI 0x05 /* Enable MailBox Out Interrupt */
+#define CMD_BUSON_TIME 0x07 /* Set Bus-On Time */
+#define CMD_BUSOFF_TIME 0x08 /* Set Bus-Off Time */
+#define CMD_DMASPEED 0x09 /* Set AT Bus Transfer Speed */
+#define CMD_RETDEVS 0x0a /* Return Installed Devices */
+#define CMD_RETCONF 0x0b /* Return Configuration Data */
+#define CMD_RETSETUP 0x0d /* Return Setup Data */
+#define CMD_ECHO 0x1f /* ECHO Command Data */
+
+#define CMD_EXTBIOS 0x28 /* Return extend bios information only 1542C */
+#define CMD_MBENABLE 0x29 /* Set Mailbox Interface enable only 1542C */
+
+/* Mailbox Definition 5.2.1 and 5.2.2 */
+struct mailbox {
+ unchar status; /* Command/Status */
+ unchar ccbptr[3]; /* msb, .., lsb */
+};
+
+/* This is used with scatter-gather */
+struct chain {
+ unchar datalen[3]; /* Size of this part of chain */
+ unchar dataptr[3]; /* Location of data */
+};
+
+/* These belong in scsi.h also */
+#define any2scsi(up, p) \
+(up)[0] = (((unsigned long)(p)) >> 16) ; \
+(up)[1] = (((unsigned long)(p)) >> 8); \
+(up)[2] = ((unsigned long)(p));
+
+#define scsi2int(up) ( (((long)*(up)) << 16) + (((long)(up)[1]) << 8) + ((long)(up)[2]) )
+
+#define xany2scsi(up, p) \
+(up)[0] = ((long)(p)) >> 24; \
+(up)[1] = ((long)(p)) >> 16; \
+(up)[2] = ((long)(p)) >> 8; \
+(up)[3] = ((long)(p));
+
+#define xscsi2int(up) ( (((long)(up)[0]) << 24) + (((long)(up)[1]) << 16) \
+ + (((long)(up)[2]) << 8) + ((long)(up)[3]) )
+
+#define MAX_CDB 12
+#define MAX_SENSE 14
+
+struct ccb { /* Command Control Block 5.3 */
+ unchar op; /* Command Control Block Operation Code */
+ unchar idlun; /* op=0,2:Target Id, op=1:Initiator Id */
+ /* Outbound data transfer, length is checked*/
+ /* Inbound data transfer, length is checked */
+ /* Logical Unit Number */
+ unchar cdblen; /* SCSI Command Length */
+ unchar rsalen; /* Request Sense Allocation Length/Disable */
+ unchar datalen[3]; /* Data Length (msb, .., lsb) */
+ unchar dataptr[3]; /* Data Pointer */
+ unchar linkptr[3]; /* Link Pointer */
+ unchar commlinkid; /* Command Linking Identifier */
+ unchar hastat; /* Host Adapter Status (HASTAT) */
+ unchar tarstat; /* Target Device Status */
+ unchar reserved[2];
+ unchar cdb[MAX_CDB+MAX_SENSE];/* SCSI Command Descriptor Block */
+ /* REQUEST SENSE */
+};
+
+int aha1542_detect(Scsi_Host_Template *);
+int aha1542_command(Scsi_Cmnd *);
+int aha1542_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int aha1542_abort(Scsi_Cmnd *);
+int aha1542_reset(Scsi_Cmnd *);
+int aha1542_biosparam(Disk *, kdev_t, int*);
+
+#define AHA1542_MAILBOXES 8
+#define AHA1542_SCATTER 16
+#define AHA1542_CMDLUN 1
+
+#ifndef NULL
+ #define NULL 0
+#endif
+
+extern struct proc_dir_entry proc_scsi_aha1542;
+
+#define AHA1542 { NULL, NULL, \
+ &proc_scsi_aha1542,/* proc_dir_entry */ \
+ NULL, \
+ "Adaptec 1542", \
+ aha1542_detect, \
+ NULL, \
+ NULL, \
+ aha1542_command, \
+ aha1542_queuecommand, \
+ aha1542_abort, \
+ aha1542_reset, \
+ NULL, \
+ aha1542_biosparam, \
+ AHA1542_MAILBOXES, \
+ 7, \
+ AHA1542_SCATTER, \
+ AHA1542_CMDLUN, \
+ 0, \
+ 1, \
+ ENABLE_CLUSTERING}
+
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/aha1740.c b/i386/i386at/gpl/linux/scsi/aha1740.c
new file mode 100644
index 00000000..e3b8f1d0
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/aha1740.c
@@ -0,0 +1,528 @@
+/* $Id: aha1740.c,v 1.1.1.1 1997/02/25 21:27:46 thomas Exp $
+ * 1993/03/31
+ * linux/kernel/aha1740.c
+ *
+ * Based loosely on aha1542.c which is
+ * Copyright (C) 1992 Tommy Thorn and
+ * Modified by Eric Youngdale
+ *
+ * This file is aha1740.c, written and
+ * Copyright (C) 1992,1993 Brad McLean
+ *
+ * Modifications to makecode and queuecommand
+ * for proper handling of multiple devices courteously
+ * provided by Michael Weller, March, 1993
+ *
+ * aha1740_makecode may still need even more work
+ * if it doesn't work for your devices, take a look.
+ */
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/head.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <asm/dma.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+
+#include "aha1740.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_aha1740 = {
+ PROC_SCSI_AHA1740, 7, "aha1740",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+/* IF YOU ARE HAVING PROBLEMS WITH THIS DRIVER, AND WANT TO WATCH
+ IT WORK, THEN:
+#define DEBUG
+*/
+#ifdef DEBUG
+#define DEB(x) x
+#else
+#define DEB(x)
+#endif
+
+/*
+static const char RCSid[] = "$Header: cvs/gnumach/i386/i386at/gpl/linux/scsi/Attic/aha1740.c,v 1.1.1.1 1997/02/25 21:27:46 thomas Exp $";
+*/
+
+static unsigned int slot, base;
+static unsigned char irq_level;
+
+static struct ecb ecb[AHA1740_ECBS]; /* One for each queued operation */
+
+static int aha1740_last_ecb_used = 0; /* optimization */
+
+int aha1740_makecode(unchar *sense, unchar *status)
+{
+ struct statusword
+ {
+ ushort don:1, /* Command Done - No Error */
+ du:1, /* Data underrun */
+ :1, qf:1, /* Queue full */
+ sc:1, /* Specification Check */
+ dor:1, /* Data overrun */
+ ch:1, /* Chaining Halted */
+ intr:1, /* Interrupt issued */
+ asa:1, /* Additional Status Available */
+ sns:1, /* Sense information Stored */
+ :1, ini:1, /* Initialization Required */
+ me:1, /* Major error or exception */
+ :1, eca:1, /* Extended Contingent alliance */
+ :1;
+ } status_word;
+ int retval = DID_OK;
+
+ status_word = * (struct statusword *) status;
+#ifdef DEBUG
+printk("makecode from %x,%x,%x,%x %x,%x,%x,%x",status[0],status[1],status[2],status[3],
+sense[0],sense[1],sense[2],sense[3]);
+#endif
+ if (!status_word.don) /* Anything abnormal was detected */
+ {
+ if ( (status[1]&0x18) || status_word.sc ) /*Additional info available*/
+ {
+ /* Use the supplied info for further diagnostics */
+ switch ( status[2] )
+ {
+ case 0x12:
+ if ( status_word.dor )
+ retval=DID_ERROR; /* It's an Overrun */
+ /* If not overrun, assume underrun and ignore it! */
+ case 0x00: /* No info, assume no error, should not occur */
+ break;
+ case 0x11:
+ case 0x21:
+ retval=DID_TIME_OUT;
+ break;
+ case 0x0a:
+ retval=DID_BAD_TARGET;
+ break;
+ case 0x04:
+ case 0x05:
+ retval=DID_ABORT; /* Either by this driver or the AHA1740
+ itself */
+ break;
+ default:
+ retval=DID_ERROR; /* No further diagnostics possible */
+ }
+ }
+ else
+ { /* Michael suggests, and Brad concurs: */
+ if ( status_word.qf )
+ {
+ retval = DID_TIME_OUT; /* forces a redo */
+ /* I think this specific one should not happen -Brad */
+ printk("aha1740.c: WARNING: AHA1740 queue overflow!\n");
+ }
+ else if ( status[0]&0x60 )
+ {
+ retval = DID_ERROR; /* Didn't find a better error */
+ }
+ /* In any other case return DID_OK so for example
+ CONDITION_CHECKS make it through to the appropriate
+ device driver */
+ }
+ }
+ /* Under all circumstances supply the target status -Michael */
+ return status[3] | retval << 16;
+}
+
+int aha1740_test_port(void)
+{
+ char name[4],tmp;
+
+ /* Okay, look for the EISA ID's */
+ name[0]= 'A' -1 + ((tmp = inb(HID0)) >> 2); /* First character */
+ name[1]= 'A' -1 + ((tmp & 3) << 3);
+ name[1]+= ((tmp = inb(HID1)) >> 5)&0x7; /* Second Character */
+ name[2]= 'A' -1 + (tmp & 0x1f); /* Third Character */
+ name[3]=0;
+ tmp = inb(HID2);
+ if ( strcmp ( name, HID_MFG ) || inb(HID2) != HID_PRD )
+ return 0; /* Not an Adaptec 174x */
+
+/* if ( inb(HID3) != HID_REV )
+ printk("aha1740: Warning; board revision of %d; expected %d\n",
+ inb(HID3),HID_REV); */
+
+ if ( inb(EBCNTRL) != EBCNTRL_VALUE )
+ {
+ printk("aha1740: Board detected, but EBCNTRL = %x, so disabled it.\n",
+ inb(EBCNTRL));
+ return 0;
+ }
+
+ if ( inb(PORTADR) & PORTADDR_ENH )
+ return 1; /* Okay, we're all set */
+
+ printk("aha1740: Board detected, but not in enhanced mode, so disabled it.\n");
+ return 0;
+}
+
+/* A "high" level interrupt handler */
+void aha1740_intr_handle(int irq, struct pt_regs * regs)
+{
+ void (*my_done)(Scsi_Cmnd *);
+ int errstatus, adapstat;
+ int number_serviced;
+ struct ecb *ecbptr;
+ Scsi_Cmnd *SCtmp;
+
+ number_serviced = 0;
+
+ while(inb(G2STAT) & G2STAT_INTPEND)
+ {
+ DEB(printk("aha1740_intr top of loop.\n"));
+ adapstat = inb(G2INTST);
+ ecbptr = (struct ecb *) bus_to_virt(inl(MBOXIN0));
+ outb(G2CNTRL_IRST,G2CNTRL); /* interrupt reset */
+
+ switch ( adapstat & G2INTST_MASK )
+ {
+ case G2INTST_CCBRETRY:
+ case G2INTST_CCBERROR:
+ case G2INTST_CCBGOOD:
+ outb(G2CNTRL_HRDY,G2CNTRL); /* Host Ready -> Mailbox in complete */
+ if (!ecbptr)
+ {
+ printk("Aha1740 null ecbptr in interrupt (%x,%x,%x,%d)\n",
+ inb(G2STAT),adapstat,inb(G2INTST),number_serviced++);
+ continue;
+ }
+ SCtmp = ecbptr->SCpnt;
+ if (!SCtmp)
+ {
+ printk("Aha1740 null SCtmp in interrupt (%x,%x,%x,%d)\n",
+ inb(G2STAT),adapstat,inb(G2INTST),number_serviced++);
+ continue;
+ }
+ if (SCtmp->host_scribble)
+ scsi_free(SCtmp->host_scribble, 512);
+ /* Fetch the sense data, and tuck it away, in the required slot. The
+ Adaptec automatically fetches it, and there is no guarantee that
+ we will still have it in the cdb when we come back */
+ if ( (adapstat & G2INTST_MASK) == G2INTST_CCBERROR )
+ {
+ memcpy(SCtmp->sense_buffer, ecbptr->sense,
+ sizeof(SCtmp->sense_buffer));
+ errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status);
+ }
+ else
+ errstatus = 0;
+ DEB(if (errstatus) printk("aha1740_intr_handle: returning %6x\n", errstatus));
+ SCtmp->result = errstatus;
+ my_done = ecbptr->done;
+ memset(ecbptr,0,sizeof(struct ecb));
+ if ( my_done )
+ my_done(SCtmp);
+ break;
+ case G2INTST_HARDFAIL:
+ printk("aha1740 hardware failure!\n");
+ panic("aha1740.c"); /* Goodbye */
+ case G2INTST_ASNEVENT:
+ printk("aha1740 asynchronous event: %02x %02x %02x %02x %02x\n",adapstat,
+ inb(MBOXIN0),inb(MBOXIN1),inb(MBOXIN2),inb(MBOXIN3)); /* Say What? */
+ outb(G2CNTRL_HRDY,G2CNTRL); /* Host Ready -> Mailbox in complete */
+ break;
+ case G2INTST_CMDGOOD:
+ /* set immediate command success flag here: */
+ break;
+ case G2INTST_CMDERROR:
+ /* Set immediate command failure flag here: */
+ break;
+ }
+ number_serviced++;
+ }
+}
+
+int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ unchar direction;
+ unchar *cmd = (unchar *) SCpnt->cmnd;
+ unchar target = SCpnt->target;
+ unsigned long flags;
+ void *buff = SCpnt->request_buffer;
+ int bufflen = SCpnt->request_bufflen;
+ int ecbno;
+ DEB(int i);
+
+
+ if(*cmd == REQUEST_SENSE)
+ {
+ if (bufflen != sizeof(SCpnt->sense_buffer))
+ {
+ printk("Wrong buffer length supplied for request sense (%d)\n",bufflen);
+ }
+ SCpnt->result = 0;
+ done(SCpnt);
+ return 0;
+ }
+
+#ifdef DEBUG
+ if (*cmd == READ_10 || *cmd == WRITE_10)
+ i = xscsi2int(cmd+2);
+ else if (*cmd == READ_6 || *cmd == WRITE_6)
+ i = scsi2int(cmd+2);
+ else
+ i = -1;
+ printk("aha1740_queuecommand: dev %d cmd %02x pos %d len %d ", target, *cmd, i, bufflen);
+ printk("scsi cmd:");
+ for (i = 0; i < SCpnt->cmd_len; i++) printk("%02x ", cmd[i]);
+ printk("\n");
+#endif
+
+ /* locate an available ecb */
+
+ save_flags(flags);
+ cli();
+ ecbno = aha1740_last_ecb_used + 1; /* An optimization */
+ if (ecbno >= AHA1740_ECBS) ecbno = 0;
+
+ do{
+ if( ! ecb[ecbno].cmdw )
+ break;
+ ecbno++;
+ if (ecbno >= AHA1740_ECBS ) ecbno = 0;
+ } while (ecbno != aha1740_last_ecb_used);
+
+ if( ecb[ecbno].cmdw )
+ panic("Unable to find empty ecb for aha1740.\n");
+
+ ecb[ecbno].cmdw = AHA1740CMD_INIT; /* SCSI Initiator Command doubles as reserved flag */
+
+ aha1740_last_ecb_used = ecbno;
+ restore_flags(flags);
+
+#ifdef DEBUG
+ printk("Sending command (%d %x)...",ecbno, done);
+#endif
+
+ ecb[ecbno].cdblen = SCpnt->cmd_len; /* SCSI Command Descriptor Block Length */
+
+ direction = 0;
+ if (*cmd == READ_10 || *cmd == READ_6)
+ direction = 1;
+ else if (*cmd == WRITE_10 || *cmd == WRITE_6)
+ direction = 0;
+
+ memcpy(ecb[ecbno].cdb, cmd, ecb[ecbno].cdblen);
+
+ if (SCpnt->use_sg)
+ {
+ struct scatterlist * sgpnt;
+ struct aha1740_chain * cptr;
+ int i;
+#ifdef DEBUG
+ unsigned char * ptr;
+#endif
+ ecb[ecbno].sg = 1; /* SCSI Initiator Command w/scatter-gather*/
+ SCpnt->host_scribble = (unsigned char *) scsi_malloc(512);
+ sgpnt = (struct scatterlist *) SCpnt->request_buffer;
+ cptr = (struct aha1740_chain *) SCpnt->host_scribble;
+ if (cptr == NULL) panic("aha1740.c: unable to allocate DMA memory\n");
+ for(i=0; i<SCpnt->use_sg; i++)
+ {
+ cptr[i].dataptr = (long) sgpnt[i].address;
+ cptr[i].datalen = sgpnt[i].length;
+ }
+ ecb[ecbno].datalen = SCpnt->use_sg * sizeof(struct aha1740_chain);
+ ecb[ecbno].dataptr = (long) cptr;
+#ifdef DEBUG
+ printk("cptr %x: ",cptr);
+ ptr = (unsigned char *) cptr;
+ for(i=0;i<24;i++) printk("%02x ", ptr[i]);
+#endif
+ }
+ else
+ {
+ SCpnt->host_scribble = NULL;
+ ecb[ecbno].datalen = bufflen;
+ ecb[ecbno].dataptr = (long) buff;
+ }
+ ecb[ecbno].lun = SCpnt->lun;
+ ecb[ecbno].ses = 1; /* Suppress underrun errors */
+ ecb[ecbno].dir= direction;
+ ecb[ecbno].ars=1; /* Yes, get the sense on an error */
+ ecb[ecbno].senselen = 12;
+ ecb[ecbno].senseptr = (long) ecb[ecbno].sense;
+ ecb[ecbno].statusptr = (long) ecb[ecbno].status;
+ ecb[ecbno].done = done;
+ ecb[ecbno].SCpnt = SCpnt;
+#ifdef DEBUG
+ {
+ int i;
+ printk("aha1740_command: sending.. ");
+ for (i = 0; i < sizeof(ecb[ecbno])-10; i++)
+ printk("%02x ", ((unchar *)&ecb[ecbno])[i]);
+ }
+ printk("\n");
+#endif
+ if (done)
+ { /* You may question the code below, which contains potentially
+ non-terminating while loops with interrupts disabled. So did
+ I when I wrote it, but the Adaptec Spec says the card is so fast,
+ that this problem virtually never occurs so I've kept it. We
+ do printk a warning first, so that you'll know if it happens.
+ In practice the only time we've seen this message is when some-
+ thing else is in the driver was broken, like _makecode(), or
+ when a scsi device hung the scsi bus. Even under these conditions,
+ The loop actually only cycled < 3 times (we instrumented it). */
+
+ DEB(printk("aha1740[%d] critical section\n",ecbno));
+ save_flags(flags);
+ cli();
+ if ( ! (inb(G2STAT) & G2STAT_MBXOUT) )
+ {
+ printk("aha1740[%d]_mbxout wait!\n",ecbno);
+ cli(); /* printk may have done a sti()! */
+ }
+ mb();
+ while ( ! (inb(G2STAT) & G2STAT_MBXOUT) ); /* Oh Well. */
+ outl(virt_to_bus(ecb+ecbno), MBOXOUT0);
+ if ( inb(G2STAT) & G2STAT_BUSY )
+ {
+ printk("aha1740[%d]_attn wait!\n",ecbno);
+ cli();
+ }
+ while ( inb(G2STAT) & G2STAT_BUSY ); /* And Again! */
+ outb(ATTN_START | (target & 7), ATTN); /* Start it up */
+ restore_flags(flags);
+ DEB(printk("aha1740[%d] request queued.\n",ecbno));
+ }
+ else
+ printk("aha1740_queuecommand: done can't be NULL\n");
+
+ return 0;
+}
+
+static volatile int internal_done_flag = 0;
+static volatile int internal_done_errcode = 0;
+
+static void internal_done(Scsi_Cmnd * SCpnt)
+{
+ internal_done_errcode = SCpnt->result;
+ ++internal_done_flag;
+}
+
+int aha1740_command(Scsi_Cmnd * SCpnt)
+{
+ aha1740_queuecommand(SCpnt, internal_done);
+
+ while (!internal_done_flag);
+ internal_done_flag = 0;
+ return internal_done_errcode;
+}
+
+/* Query the board for its irq_level. Nothing else matters
+ in enhanced mode on an EISA bus. */
+
+void aha1740_getconfig(void)
+{
+ static int intab[] = { 9,10,11,12,0,14,15,0 };
+
+ irq_level = intab [ inb(INTDEF)&0x7 ];
+ outb(inb(INTDEF) | 0x10, INTDEF);
+}
+
+int aha1740_detect(Scsi_Host_Template * tpnt)
+{
+ tpnt->proc_dir = &proc_scsi_aha1740;
+
+ memset(&ecb, 0, sizeof(struct ecb));
+ DEB(printk("aha1740_detect: \n"));
+
+ for ( slot=MINEISA; slot <= MAXEISA; slot++ )
+ {
+ base = SLOTBASE(slot);
+ /*
+ * The ioports for eisa boards are generally beyond that used in the
+ * check/allocate region code, but this may change at some point,
+ * so we go through the motions.
+ */
+ if(check_region(base, 0x5c)) continue; /* See if in use */
+ if ( aha1740_test_port()) break;
+ }
+ if ( slot > MAXEISA )
+ return 0;
+
+ aha1740_getconfig();
+
+ if ( (inb(G2STAT) & (G2STAT_MBXOUT | G2STAT_BUSY) ) != G2STAT_MBXOUT )
+ { /* If the card isn't ready, hard reset it */
+ outb(G2CNTRL_HRST,G2CNTRL);
+ outb(0,G2CNTRL);
+ }
+
+ printk("Configuring Adaptec at IO:%x, IRQ %d\n",base,
+ irq_level);
+
+ DEB(printk("aha1740_detect: enable interrupt channel %d\n", irq_level));
+
+ if (request_irq(irq_level,aha1740_intr_handle, 0, "aha1740"))
+ {
+ printk("Unable to allocate IRQ for adaptec controller.\n");
+ return 0;
+ }
+ request_region(base, 0x5c,"aha1740"); /* Reserve the space that we need to use */
+ return 1;
+}
+
+/* Note: They following two functions do not apply very well to the Adaptec,
+which basically manages its own affairs quite well without our interference,
+so I haven't put anything into them. I can faintly imagine someone with a
+*very* badly behaved SCSI target (perhaps an old tape?) wanting the abort(),
+but it hasn't happened yet, and doing aborts brings the Adaptec to its
+knees. I cannot (at this moment in time) think of any reason to reset the
+card once it's running. So there. */
+
+int aha1740_abort(Scsi_Cmnd * SCpnt)
+{
+ DEB(printk("aha1740_abort called\n"));
+ return SCSI_ABORT_SNOOZE;
+}
+
+/* We do not implement a reset function here, but the upper level code assumes
+ that it will get some kind of response for the command in SCpnt. We must
+ oblige, or the command will hang the scsi system */
+
+int aha1740_reset(Scsi_Cmnd * SCpnt)
+{
+ DEB(printk("aha1740_reset called\n"));
+ return SCSI_RESET_PUNT;
+}
+
+int aha1740_biosparam(Disk * disk, kdev_t dev, int* ip)
+{
+ int size = disk->capacity;
+DEB(printk("aha1740_biosparam\n"));
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+/* if (ip[2] >= 1024) ip[2] = 1024; */
+ return 0;
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = AHA1740;
+
+#include "scsi_module.c"
+#endif
+
+/* Okay, you made it all the way through. As of this writing, 3/31/93, I'm
+brad@saturn.gaylord.com or brad@bradpc.gaylord.com. I'll try to help as time
+permits if you have any trouble with this driver. Happy Linuxing! */
diff --git a/i386/i386at/gpl/linux/scsi/aha1740.h b/i386/i386at/gpl/linux/scsi/aha1740.h
new file mode 100644
index 00000000..79d94697
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/aha1740.h
@@ -0,0 +1,193 @@
+#ifndef _AHA1740_H
+
+/* $Id: aha1740.h,v 1.1.1.1 1997/02/25 21:27:46 thomas Exp $
+ *
+ * Header file for the adaptec 1740 driver for Linux
+ *
+ * With minor revisions 3/31/93
+ * Written and (C) 1992,1993 Brad McLean. See aha1740.c
+ * for more info
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+
+/* Eisa Enhanced mode operation - slot locating and addressing */
+#define MINEISA 1 /* I don't have an EISA Spec to know these ranges, so I */
+#define MAXEISA 8 /* Just took my machine's specifications. Adjust to fit.*/
+ /* I just saw an ad, and bumped this from 6 to 8 */
+#define SLOTBASE(x) ((x << 12)+ 0xc80 )
+#define BASE (base)
+
+/* EISA configuration registers & values */
+#define HID0 (base + 0x0)
+#define HID1 (base + 0x1)
+#define HID2 (base + 0x2)
+#define HID3 (base + 0x3)
+#define EBCNTRL (base + 0x4)
+#define PORTADR (base + 0x40)
+#define BIOSADR (base + 0x41)
+#define INTDEF (base + 0x42)
+#define SCSIDEF (base + 0x43)
+#define BUSDEF (base + 0x44)
+#define RESV0 (base + 0x45)
+#define RESV1 (base + 0x46)
+#define RESV2 (base + 0x47)
+
+#define HID_MFG "ADP"
+#define HID_PRD 0
+#define HID_REV 2
+#define EBCNTRL_VALUE 1
+#define PORTADDR_ENH 0x80
+/* READ */
+#define G2INTST (BASE + 0x56)
+#define G2STAT (BASE + 0x57)
+#define MBOXIN0 (BASE + 0x58)
+#define MBOXIN1 (BASE + 0x59)
+#define MBOXIN2 (BASE + 0x5a)
+#define MBOXIN3 (BASE + 0x5b)
+#define G2STAT2 (BASE + 0x5c)
+
+#define G2INTST_MASK 0xf0 /* isolate the status */
+#define G2INTST_CCBGOOD 0x10 /* CCB Completed */
+#define G2INTST_CCBRETRY 0x50 /* CCB Completed with a retry */
+#define G2INTST_HARDFAIL 0x70 /* Adapter Hardware Failure */
+#define G2INTST_CMDGOOD 0xa0 /* Immediate command success */
+#define G2INTST_CCBERROR 0xc0 /* CCB Completed with error */
+#define G2INTST_ASNEVENT 0xd0 /* Asynchronous Event Notification */
+#define G2INTST_CMDERROR 0xe0 /* Immediate command error */
+
+#define G2STAT_MBXOUT 4 /* Mailbox Out Empty Bit */
+#define G2STAT_INTPEND 2 /* Interrupt Pending Bit */
+#define G2STAT_BUSY 1 /* Busy Bit (attention pending) */
+
+#define G2STAT2_READY 0 /* Host Ready Bit */
+
+/* WRITE (and ReadBack) */
+#define MBOXOUT0 (BASE + 0x50)
+#define MBOXOUT1 (BASE + 0x51)
+#define MBOXOUT2 (BASE + 0x52)
+#define MBOXOUT3 (BASE + 0x53)
+#define ATTN (BASE + 0x54)
+#define G2CNTRL (BASE + 0x55)
+
+#define ATTN_IMMED 0x10 /* Immediate Command */
+#define ATTN_START 0x40 /* Start CCB */
+#define ATTN_ABORT 0x50 /* Abort CCB */
+
+#define G2CNTRL_HRST 0x80 /* Hard Reset */
+#define G2CNTRL_IRST 0x40 /* Clear EISA Interrupt */
+#define G2CNTRL_HRDY 0x20 /* Sets HOST ready */
+
+/* This is used with scatter-gather */
+struct aha1740_chain {
+ u32 dataptr; /* Location of data */
+ u32 datalen; /* Size of this part of chain */
+};
+
+/* These belong in scsi.h */
+#define any2scsi(up, p) \
+(up)[0] = (((unsigned long)(p)) >> 16) ; \
+(up)[1] = (((unsigned long)(p)) >> 8); \
+(up)[2] = ((unsigned long)(p));
+
+#define scsi2int(up) ( (((long)*(up)) << 16) + (((long)(up)[1]) << 8) + ((long)(up)[2]) )
+
+#define xany2scsi(up, p) \
+(up)[0] = ((long)(p)) >> 24; \
+(up)[1] = ((long)(p)) >> 16; \
+(up)[2] = ((long)(p)) >> 8; \
+(up)[3] = ((long)(p));
+
+#define xscsi2int(up) ( (((long)(up)[0]) << 24) + (((long)(up)[1]) << 16) \
+ + (((long)(up)[2]) << 8) + ((long)(up)[3]) )
+
+#define MAX_CDB 12
+#define MAX_SENSE 14
+#define MAX_STATUS 32
+
+struct ecb { /* Enhanced Control Block 6.1 */
+ u16 cmdw; /* Command Word */
+ /* Flag Word 1 */
+ u16 cne:1, /* Control Block Chaining */
+ :6, di:1, /* Disable Interrupt */
+ :2, ses:1, /* Suppress Underrun error */
+ :1, sg:1, /* Scatter/Gather */
+ :1, dsb:1, /* Disable Status Block */
+ ars:1; /* Automatic Request Sense */
+ /* Flag Word 2 */
+ u16 lun:3, /* Logical Unit */
+ tag:1, /* Tagged Queuing */
+ tt:2, /* Tag Type */
+ nd:1, /* No Disconnect */
+ :1, dat:1, /* Data transfer - check direction */
+ dir:1, /* Direction of transfer 1 = datain */
+ st:1, /* Suppress Transfer */
+ chk:1, /* Calculate Checksum */
+ :2, rec:1, :1; /* Error Recovery */
+ u16 nil0; /* nothing */
+ u32 dataptr; /* Data or Scatter List ptr */
+ u32 datalen; /* Data or Scatter List len */
+ u32 statusptr; /* Status Block ptr */
+ u32 linkptr; /* Chain Address */
+ u32 nil1; /* nothing */
+ u32 senseptr; /* Sense Info Pointer */
+ u8 senselen; /* Sense Length */
+ u8 cdblen; /* CDB Length */
+ u16 datacheck; /* Data checksum */
+ u8 cdb[MAX_CDB]; /* CDB area */
+/* Hardware defined portion ends here, rest is driver defined */
+ u8 sense[MAX_SENSE]; /* Sense area */
+ u8 status[MAX_STATUS]; /* Status area */
+ Scsi_Cmnd *SCpnt; /* Link to the SCSI Command Block */
+ void (*done)(Scsi_Cmnd *); /* Completion Function */
+};
+
+#define AHA1740CMD_NOP 0x00 /* No OP */
+#define AHA1740CMD_INIT 0x01 /* Initiator SCSI Command */
+#define AHA1740CMD_DIAG 0x05 /* Run Diagnostic Command */
+#define AHA1740CMD_SCSI 0x06 /* Initialize SCSI */
+#define AHA1740CMD_SENSE 0x08 /* Read Sense Information */
+#define AHA1740CMD_DOWN 0x09 /* Download Firmware (yeah, I bet!) */
+#define AHA1740CMD_RINQ 0x0a /* Read Host Adapter Inquiry Data */
+#define AHA1740CMD_TARG 0x10 /* Target SCSI Command */
+
+int aha1740_detect(Scsi_Host_Template *);
+int aha1740_command(Scsi_Cmnd *);
+int aha1740_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int aha1740_abort(Scsi_Cmnd *);
+int aha1740_reset(Scsi_Cmnd *);
+int aha1740_biosparam(Disk *, kdev_t, int*);
+
+#define AHA1740_ECBS 32
+#define AHA1740_SCATTER 16
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+
+#define AHA1740 {NULL, NULL, \
+ NULL, \
+ NULL, \
+ "Adaptec 174x (EISA)", \
+ aha1740_detect, \
+ NULL, \
+ NULL, \
+ aha1740_command, \
+ aha1740_queuecommand, \
+ aha1740_abort, \
+ aha1740_reset, \
+ NULL, \
+ aha1740_biosparam, \
+ AHA1740_ECBS, \
+ 7, \
+ AHA1740_SCATTER, \
+ 1, \
+ 0, \
+ 0, \
+ ENABLE_CLUSTERING}
+
+#endif
+
diff --git a/i386/i386at/gpl/linux/scsi/aic7xxx.c b/i386/i386at/gpl/linux/scsi/aic7xxx.c
new file mode 100644
index 00000000..3b238983
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/aic7xxx.c
@@ -0,0 +1,4645 @@
+/*+M*************************************************************************
+ * Adaptec AIC7xxx device driver for Linux.
+ *
+ * Copyright (c) 1994 John Aycock
+ * The University of Calgary Department of Computer Science.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Sources include the Adaptec 1740 driver (aha1740.c), the Ultrastor 24F
+ * driver (ultrastor.c), various Linux kernel source, the Adaptec EISA
+ * config file (!adp7771.cfg), the Adaptec AHA-2740A Series User's Guide,
+ * the Linux Kernel Hacker's Guide, Writing a SCSI Device Driver for Linux,
+ * the Adaptec 1542 driver (aha1542.c), the Adaptec EISA overlay file
+ * (adp7770.ovl), the Adaptec AHA-2740 Series Technical Reference Manual,
+ * the Adaptec AIC-7770 Data Book, the ANSI SCSI specification, the
+ * ANSI SCSI-2 specification (draft 10c), ...
+ *
+ * ----------------------------------------------------------------
+ * Modified to include support for wide and twin bus adapters,
+ * DMAing of SCBs, tagged queueing, IRQ sharing, bug fixes,
+ * and other rework of the code.
+ *
+ * Parts of this driver are based on the FreeBSD driver by Justin
+ * T. Gibbs.
+ *
+ * A Boot time option was also added for not resetting the scsi bus.
+ *
+ * Form: aic7xxx=extended,no_reset
+ *
+ * -- Daniel M. Eischen, deischen@iworks.InterWorks.org, 04/03/95
+ *
+ * $Id: aic7xxx.c,v 1.1.1.1 1997/02/25 21:27:46 thomas Exp $
+ *-M*************************************************************************/
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <stdarg.h>
+#include <asm/io.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/bios32.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/blk.h>
+#include "sd.h"
+#include "scsi.h"
+#include "hosts.h"
+#include "aic7xxx.h"
+#include "aic7xxx_reg.h"
+#include <linux/stat.h>
+
+#include <linux/config.h> /* for CONFIG_PCI */
+
+struct proc_dir_entry proc_scsi_aic7xxx = {
+ PROC_SCSI_AIC7XXX, 7, "aic7xxx",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+#define AIC7XXX_C_VERSION "$Revision: 1.1.1.1 $"
+
+#define NUMBER(arr) (sizeof(arr) / sizeof(arr[0]))
+#define MIN(a,b) ((a < b) ? a : b)
+#define ALL_TARGETS -1
+#ifndef TRUE
+# define TRUE 1
+#endif
+#ifndef FALSE
+# define FALSE 0
+#endif
+
+/*
+ * Defines for PCI bus support, testing twin bus support, DMAing of
+ * SCBs, tagged queueing, commands (SCBs) per lun, and SCSI bus reset
+ * delay time.
+ *
+ * o PCI bus support - this has been implemented and working since
+ * the December 1, 1994 release of this driver. If you don't have
+ * a PCI bus, then you can configure your kernel without PCI
+ * support because all PCI dependent code is bracketed with
+ * "#ifdef CONFIG_PCI ... #endif CONFIG_PCI".
+ *
+ * o Twin bus support - this has been tested and does work.
+ *
+ * o DMAing of SCBs - thanks to Kai Makisara, this now works.
+ * This define is now taken out and DMAing of SCBs is always
+ * performed (8/12/95 - DE).
+ *
+ * o Tagged queueing - this driver is capable of tagged queueing
+ * but I am unsure as to how well the higher level driver implements
+ * tagged queueing. Therefore, the maximum commands per lun is
+ * set to 2. If you want to implement tagged queueing, ensure
+ * this define is not commented out.
+ *
+ * o Sharing IRQs - allowed for sharing of IRQs. This will allow
+ * for multiple aic7xxx host adapters sharing the same IRQ, but
+ * not for sharing IRQs with other devices. The higher level
+ * PCI code and interrupt handling needs to be modified to
+ * support this.
+ *
+ * o Commands per lun - If tagged queueing is enabled, then you
+ * may want to try increasing AIC7XXX_CMDS_PER_LUN to more
+ * than 2. By default, we limit the SCBs per lun to 2 with
+ * or without tagged queueing enabled. If tagged queueing is
+ * disabled, the sequencer will keep the 2nd SCB in the input
+ * queue until the first one completes - so it is OK to to have
+ * more than 1 SCB queued. If tagged queueing is enabled, then
+ * the sequencer will attempt to send the 2nd SCB to the device
+ * while the first SCB is executing and the device is disconnected.
+ * For adapters limited to 4 SCBs, you may want to actually
+ * decrease the commands per lun to 1, if you often have more
+ * than 2 devices active at the same time. This will allocate
+ * 1 SCB for each device and ensure that there will always be
+ * a free SCB for up to 4 devices active at the same time.
+ *
+ * o 3985 support - The 3985 adapter is much like the 3940, but
+ * has three 7870 controllers as opposed to two for the 3940.
+ * It will get probed and recognized as three different adapters,
+ * but all three controllers share the same bank of 255 SCBs
+ * instead of each controller having their own bank (like the
+ * controllers on the 3940). For this reason, it is important
+ * that all devices be resident on just one channel of the 3985.
+ * In the near future, we'll modify the driver to reserve 1/3
+ * of the SCBs for each controller.
+ *
+ * Daniel M. Eischen, deischen@iworks.InterWorks.org, 01/11/96
+ */
+
+/* Uncomment this for testing twin bus support. */
+#define AIC7XXX_TWIN_SUPPORT
+
+/* Uncomment this for tagged queueing. */
+/* #define AIC7XXX_TAGGED_QUEUEING */
+
+/* Uncomment this for allowing sharing of IRQs. */
+#define AIC7XXX_SHARE_IRQS
+
+/*
+ * You can try raising me if tagged queueing is enabled, or lowering
+ * me if you only have 4 SCBs.
+ */
+#define AIC7XXX_CMDS_PER_LUN 2
+
+/* Set this to the delay in seconds after SCSI bus reset. */
+#define AIC7XXX_RESET_DELAY 15
+
+/*
+ * Uncomment the following define for collection of SCSI transfer statistics
+ * for the /proc filesystem.
+ *
+ * NOTE: This does affect performance since it has to maintain statistics.
+ */
+/* #define AIC7XXX_PROC_STATS */
+
+/*
+ * For debugging the abort/reset code.
+ */
+/* #define AIC7XXX_DEBUG_ABORT */
+
+/*
+ * For general debug messages
+ */
+#define AIC7XXX_DEBUG
+
+/*
+ * Controller type and options
+ */
+typedef enum {
+ AIC_NONE,
+ AIC_7770, /* EISA aic7770 on motherboard */
+ AIC_7771, /* EISA aic7771 on 274x */
+ AIC_284x, /* VLB aic7770 on 284x */
+ AIC_7850, /* PCI aic7850 */
+ AIC_7870, /* PCI aic7870 on motherboard */
+ AIC_7871, /* PCI aic7871 on 294x */
+ AIC_7872, /* PCI aic7872 on 3940 */
+ AIC_7873, /* PCI aic7873 on 3985 */
+ AIC_7874, /* PCI aic7874 on 294x Differential */
+ AIC_7880, /* PCI aic7880 on motherboard */
+ AIC_7881, /* PCI aic7881 on 294x Ultra */
+ AIC_7882, /* PCI aic7882 on 3940 Ultra */
+ AIC_7883, /* PCI aic7883 on 3985 Ultra */
+ AIC_7884 /* PCI aic7884 on 294x Ultra Differential */
+} aha_type;
+
+typedef enum {
+ AIC_777x, /* AIC-7770 based */
+ AIC_785x, /* AIC-7850 based */
+ AIC_787x, /* AIC-7870 based */
+ AIC_788x /* AIC-7880 based */
+} aha_chip_type;
+
+typedef enum {
+ AIC_SINGLE, /* Single Channel */
+ AIC_TWIN, /* Twin Channel */
+ AIC_WIDE /* Wide Channel */
+} aha_bus_type;
+
+typedef enum {
+ AIC_UNKNOWN,
+ AIC_ENABLED,
+ AIC_DISABLED
+} aha_status_type;
+
+typedef enum {
+ LIST_HEAD,
+ LIST_SECOND
+} insert_type;
+
+typedef enum {
+ ABORT_RESET_INACTIVE,
+ ABORT_RESET_PENDING,
+ ABORT_RESET_SUCCESS
+} aha_abort_reset_type;
+
+/*
+ * Define an array of board names that can be indexed by aha_type.
+ * Don't forget to change this when changing the types!
+ */
+static const char * board_names[] = {
+ "<AIC-7xxx Unknown>", /* AIC_NONE */
+ "AIC-7770", /* AIC_7770 */
+ "AHA-2740", /* AIC_7771 */
+ "AHA-2840", /* AIC_284x */
+ "AIC-7850", /* AIC_7850 */
+ "AIC-7870", /* AIC_7870 */
+ "AHA-2940", /* AIC_7871 */
+ "AHA-3940", /* AIC_7872 */
+ "AHA-3985", /* AIC_7873 */
+ "AHA-2940 Differential", /* AIC_7874 */
+ "AIC-7880 Ultra", /* AIC_7880 */
+ "AHA-2940 Ultra", /* AIC_7881 */
+ "AHA-3940 Ultra", /* AIC_7882 */
+ "AHA-3985 Ultra", /* AIC_7883 */
+ "AHA-2940 Ultra Differential" /* AIC_7884 */
+};
+
+/*
+ * There should be a specific return value for this in scsi.h, but
+ * it seems that most drivers ignore it.
+ */
+#define DID_UNDERFLOW DID_ERROR
+
+/*
+ * What we want to do is have the higher level scsi driver requeue
+ * the command to us. There is no specific driver status for this
+ * condition, but the higher level scsi driver will requeue the
+ * command on a DID_BUS_BUSY error.
+ */
+#define DID_RETRY_COMMAND DID_BUS_BUSY
+
+/*
+ * EISA/VL-bus stuff
+ */
+#define MINSLOT 1
+#define MAXSLOT 15
+#define SLOTBASE(x) ((x) << 12)
+#define MAXIRQ 15
+
+/*
+ * Standard EISA Host ID regs (Offset from slot base)
+ */
+#define HID0 0x80 /* 0,1: msb of ID2, 2-7: ID1 */
+#define HID1 0x81 /* 0-4: ID3, 5-7: LSB ID2 */
+#define HID2 0x82 /* product */
+#define HID3 0x83 /* firmware revision */
+
+/*
+ * AIC-7770 I/O range to reserve for a card
+ */
+#define MINREG 0xC00
+#define MAXREG 0xCBF
+
+#define INTDEF 0x5C /* Interrupt Definition Register */
+
+/*
+ * Some defines for the HCNTRL register.
+ */
+#define REQ_PAUSE IRQMS | INTEN | PAUSE
+#define UNPAUSE_274X IRQMS | INTEN
+#define UNPAUSE_284X INTEN
+#define UNPAUSE_294X IRQMS | INTEN
+
+/*
+ * AIC-78X0 PCI registers
+ */
+#define CLASS_PROGIF_REVID 0x08
+#define DEVREVID 0x000000FFul
+#define PROGINFC 0x0000FF00ul
+#define SUBCLASS 0x00FF0000ul
+#define BASECLASS 0xFF000000ul
+
+#define CSIZE_LATTIME 0x0C
+#define CACHESIZE 0x0000003Ful /* only 5 bits */
+#define LATTIME 0x0000FF00ul
+
+#define DEVCONFIG 0x40
+#define MPORTMODE 0x00000400ul /* aic7870 only */
+#define RAMPSM 0x00000200ul /* aic7870 only */
+#define VOLSENSE 0x00000100ul
+#define SCBRAMSEL 0x00000080ul
+#define MRDCEN 0x00000040ul
+#define EXTSCBTIME 0x00000020ul /* aic7870 only */
+#define EXTSCBPEN 0x00000010ul /* aic7870 only */
+#define BERREN 0x00000008ul
+#define DACEN 0x00000004ul
+#define STPWLEVEL 0x00000002ul
+#define DIFACTNEGEN 0x00000001ul /* aic7870 only */
+
+/*
+ *
+ * Define the format of the SEEPROM registers (16 bits).
+ *
+ */
+struct seeprom_config {
+
+/*
+ * SCSI ID Configuration Flags
+ */
+#define CFXFER 0x0007 /* synchronous transfer rate */
+#define CFSYNCH 0x0008 /* enable synchronous transfer */
+#define CFDISC 0x0010 /* enable disconnection */
+#define CFWIDEB 0x0020 /* wide bus device (wide card) */
+/* UNUSED 0x00C0 */
+#define CFSTART 0x0100 /* send start unit SCSI command */
+#define CFINCBIOS 0x0200 /* include in BIOS scan */
+#define CFRNFOUND 0x0400 /* report even if not found */
+/* UNUSED 0xF800 */
+ unsigned short device_flags[16]; /* words 0-15 */
+
+/*
+ * BIOS Control Bits
+ */
+#define CFSUPREM 0x0001 /* support all removeable drives */
+#define CFSUPREMB 0x0002 /* support removeable drives for boot only */
+#define CFBIOSEN 0x0004 /* BIOS enabled */
+/* UNUSED 0x0008 */
+#define CFSM2DRV 0x0010 /* support more than two drives */
+#define CF284XEXTEND 0x0020 /* extended translation (284x cards) */
+/* UNUSED 0x0040 */
+#define CFEXTEND 0x0080 /* extended translation enabled */
+/* UNUSED 0xFF00 */
+ unsigned short bios_control; /* word 16 */
+
+/*
+ * Host Adapter Control Bits
+ */
+/* UNUSED 0x0001 */
+#define CFULTRAEN 0x0002 /* Ultra SCSI speed enable (Ultra cards) */
+#define CF284XSELTO 0x0003 /* Selection timeout (284x cards) */
+#define CF284XFIFO 0x000C /* FIFO Threshold (284x cards) */
+#define CFSTERM 0x0004 /* SCSI low byte termination (non-wide cards) */
+#define CFWSTERM 0x0008 /* SCSI high byte termination (wide card) */
+#define CFSPARITY 0x0010 /* SCSI parity */
+#define CF284XSTERM 0x0020 /* SCSI low byte termination (284x cards) */
+#define CFRESETB 0x0040 /* reset SCSI bus at IC initialization */
+/* UNUSED 0xFF80 */
+ unsigned short adapter_control; /* word 17 */
+
+/*
+ * Bus Release, Host Adapter ID
+ */
+#define CFSCSIID 0x000F /* host adapter SCSI ID */
+/* UNUSED 0x00F0 */
+#define CFBRTIME 0xFF00 /* bus release time */
+ unsigned short brtime_id; /* word 18 */
+
+/*
+ * Maximum targets
+ */
+#define CFMAXTARG 0x00FF /* maximum targets */
+/* UNUSED 0xFF00 */
+ unsigned short max_targets; /* word 19 */
+
+ unsigned short res_1[11]; /* words 20-30 */
+ unsigned short checksum; /* word 31 */
+};
+
+/*
+ * Pause the sequencer and wait for it to actually stop - this
+ * is important since the sequencer can disable pausing for critical
+ * sections.
+ */
+#define PAUSE_SEQUENCER(p) \
+ outb(p->pause, HCNTRL + p->base); \
+ while ((inb(HCNTRL + p->base) & PAUSE) == 0) \
+ ; \
+
+/*
+ * Unpause the sequencer. Unremarkable, yet done often enough to
+ * warrant an easy way to do it.
+ */
+#define UNPAUSE_SEQUENCER(p) \
+ outb(p->unpause, HCNTRL + p->base)
+
+/*
+ * Restart the sequencer program from address zero
+ */
+#define RESTART_SEQUENCER(p) \
+ do { \
+ outb(SEQRESET | FASTMODE, SEQCTL + p->base); \
+ } while (inb(SEQADDR0 + p->base) != 0 && \
+ inb(SEQADDR1 + p->base) != 0); \
+ UNPAUSE_SEQUENCER(p);
+
+/*
+ * If an error occurs during a data transfer phase, run the comand
+ * to completion - it's easier that way - making a note of the error
+ * condition in this location. This then will modify a DID_OK status
+ * into an appropriate error for the higher-level SCSI code.
+ */
+#define aic7xxx_error(cmd) ((cmd)->SCp.Status)
+
+/*
+ * Keep track of the targets returned status.
+ */
+#define aic7xxx_status(cmd) ((cmd)->SCp.sent_command)
+
+/*
+ * The position of the SCSI commands scb within the scb array.
+ */
+#define aic7xxx_position(cmd) ((cmd)->SCp.have_data_in)
+
+/*
+ * Since the sequencer code DMAs the scatter-gather structures
+ * directly from memory, we use this macro to assert that the
+ * kernel structure hasn't changed.
+ */
+#define SG_STRUCT_CHECK(sg) \
+ ((char *) &(sg).address - (char *) &(sg) != 0 || \
+ (char *) &(sg).length - (char *) &(sg) != 8 || \
+ sizeof((sg).address) != 4 || \
+ sizeof((sg).length) != 4 || \
+ sizeof(sg) != 12)
+
+/*
+ * "Static" structures. Note that these are NOT initialized
+ * to zero inside the kernel - we have to initialize them all
+ * explicitly.
+ *
+ * We support multiple adapter cards per interrupt, but keep a
+ * linked list of Scsi_Host structures for each IRQ. On an interrupt,
+ * use the IRQ as an index into aic7xxx_boards[] to locate the card
+ * information.
+ */
+static struct Scsi_Host *aic7xxx_boards[MAXIRQ + 1];
+
+/*
+ * When we detect and register the card, it is possible to
+ * have the card raise a spurious interrupt. Because we need
+ * to support multiple cards, we cannot tell which card caused
+ * the spurious interrupt. And, we might not even have added
+ * the card info to the linked list at the time the spurious
+ * interrupt gets raised. This variable is suppose to keep track
+ * of when we are registering a card and how many spurious
+ * interrupts we have encountered.
+ *
+ * 0 - do not allow spurious interrupts.
+ * 1 - allow 1 spurious interrupt
+ * 2 - have 1 spurious interrupt, do not allow any more.
+ *
+ * I've made it an integer instead of a boolean in case we
+ * want to allow more than one spurious interrupt for debugging
+ * purposes. Otherwise, it could just go from true to false to
+ * true (or something like that).
+ *
+ * When the driver detects the cards, we'll set the count to 1
+ * for each card detection and registration. After the registration
+ * of a card completes, we'll set the count back to 0. So far, it
+ * seems to be enough to allow a spurious interrupt only during
+ * card registration; if a spurious interrupt is going to occur,
+ * this is where it happens.
+ *
+ * We should be able to find a way to avoid getting the spurious
+ * interrupt. But until we do, we have to keep this ugly code.
+ */
+static int aic7xxx_spurious_count;
+
+/*
+ * The driver keeps up to four scb structures per card in memory. Only the
+ * first 25 bytes of the structure are valid for the hardware, the rest used
+ * for driver level bookkeeping.
+ */
+
+struct aic7xxx_scb {
+/* ------------ Begin hardware supported fields ---------------- */
+/* 0*/ unsigned char control;
+/* 1*/ unsigned char target_channel_lun; /* 4/1/3 bits */
+/* 2*/ unsigned char target_status;
+/* 3*/ unsigned char SG_segment_count;
+/* 4*/ unsigned char SG_list_pointer[4] __attribute__ ((packed));
+/* 8*/ unsigned char residual_SG_segment_count;
+/* 9*/ unsigned char residual_data_count[3];
+/*12*/ unsigned char data_pointer[4] __attribute__ ((packed));
+/*16*/ unsigned long data_count;
+/*20*/ unsigned char SCSI_cmd_pointer[4] __attribute__ ((packed));
+/*24*/ unsigned char SCSI_cmd_length;
+#define SCB_PIO_TRANSFER_SIZE 25 /*
+ * amount we need to upload/download
+ * via rep in/outsb to perform
+ * a request sense. The second
+ * RESERVED byte is initialized to
+ * 0 in getscb().
+ */
+/*25*/ u_char next_waiting; /* Used to thread SCBs awaiting selection. */
+ /*-----------------end of hardware supported fields----------------*/
+ struct aic7xxx_scb *next; /* next ptr when in free list */
+ Scsi_Cmnd *cmd; /* Scsi_Cmnd for this scb */
+#define SCB_FREE 0x00
+#define SCB_ACTIVE 0x01
+#define SCB_ABORTED 0x02
+#define SCB_DEVICE_RESET 0x04
+#define SCB_IMMED 0x08
+#define SCB_SENSE 0x10
+ int state; /* current state of scb */
+ unsigned int position; /* Position in scb array */
+ struct scatterlist sg;
+ struct scatterlist sense_sg;
+ unsigned char sense_cmd[6]; /* Allocate 6 characters for sense command */
+};
+
+static struct {
+ unsigned char errno;
+ const char *errmesg;
+} hard_error[] = {
+ { ILLHADDR, "Illegal Host Access" },
+ { ILLSADDR, "Illegal Sequencer Address referrenced" },
+ { ILLOPCODE, "Illegal Opcode in sequencer program" },
+ { PARERR, "Sequencer Ram Parity Error" }
+};
+
+static unsigned char
+generic_sense[] = { REQUEST_SENSE, 0, 0, 0, 255, 0 };
+
+/*
+ * The maximum number of SCBs we could have for ANY type
+ * of card. DON'T FORGET TO CHANGE THE SCB MASK IN THE
+ * SEQUENCER CODE IF THIS IS MODIFIED!
+ */
+#define AIC7XXX_MAXSCB 255
+
+/*
+ * Define a structure used for each host adapter, only one per IRQ.
+ */
+struct aic7xxx_host {
+ int base; /* card base address */
+ int maxscb; /* hardware SCBs */
+ int numscb; /* current number of scbs */
+ int extended; /* extended xlate? */
+ aha_type type; /* card type */
+ aha_chip_type chip_type; /* chip base type */
+ int ultra_enabled; /* Ultra SCSI speed enabled */
+ int chan_num; /* for 3940/3985, channel number */
+ aha_bus_type bus_type; /* normal/twin/wide bus */
+ unsigned char a_scanned; /* 0 not scanned, 1 scanned */
+ unsigned char b_scanned; /* 0 not scanned, 1 scanned */
+ unsigned int isr_count; /* Interrupt count */
+ volatile unsigned char unpause; /* unpause value for HCNTRL */
+ volatile unsigned char pause; /* pause value for HCNTRL */
+ volatile unsigned short needsdtr_copy; /* default config */
+ volatile unsigned short needsdtr;
+ volatile unsigned short sdtr_pending;
+ volatile unsigned short needwdtr_copy; /* default config */
+ volatile unsigned short needwdtr;
+ volatile unsigned short wdtr_pending;
+ volatile unsigned short discenable; /* Targets allowed to disconnect */
+ struct seeprom_config seeprom;
+ int have_seeprom;
+ struct Scsi_Host *next; /* allow for multiple IRQs */
+ struct aic7xxx_scb scb_array[AIC7XXX_MAXSCB]; /* active commands */
+ struct aic7xxx_scb *free_scb; /* list of free SCBs */
+#ifdef AIC7XXX_PROC_STATS
+ /*
+ * Statistics Kept:
+ *
+ * Total Xfers (count for each command that has a data xfer),
+ * broken down further by reads && writes.
+ *
+ * Binned sizes, writes && reads:
+ * < 512, 512, 1-2K, 2-4K, 4-8K, 8-16K, 16-32K, 32-64K, 64K-128K, > 128K
+ *
+ * Total amounts read/written above 512 bytes (amts under ignored)
+ */
+ struct aic7xxx_xferstats {
+ long xfers; /* total xfer count */
+ long w_total; /* total writes */
+ long w_total512; /* 512 byte blocks written */
+ long w_bins[10]; /* binned write */
+ long r_total; /* total reads */
+ long r_total512; /* 512 byte blocks read */
+ long r_bins[10]; /* binned reads */
+ } stats[2][16][8]; /* channel, target, lun */
+#endif /* AIC7XXX_PROC_STATS */
+};
+
+struct aic7xxx_host_config {
+ int irq; /* IRQ number */
+ int base; /* I/O base */
+ int maxscb; /* hardware SCBs */
+ int unpause; /* unpause value for HCNTRL */
+ int pause; /* pause value for HCNTRL */
+ int scsi_id; /* host SCSI ID */
+ int scsi_id_b; /* host SCSI ID B channel for twin cards */
+ int extended; /* extended xlate? */
+ int busrtime; /* bus release time */
+ int walk_scbs; /* external SCB RAM detected; walk the scb array */
+ aha_type type; /* card type */
+ aha_chip_type chip_type; /* chip base type */
+ int ultra_enabled; /* Ultra SCSI speed enabled */
+ int chan_num; /* for 3940/3985, channel number */
+ aha_bus_type bus_type; /* normal/twin/wide bus */
+ aha_status_type parity; /* bus parity enabled/disabled */
+ aha_status_type low_term; /* bus termination low byte */
+ aha_status_type high_term; /* bus termination high byte (wide cards only) */
+};
+
+/*
+ * Valid SCSIRATE values. (p. 3-17)
+ * Provides a mapping of tranfer periods in ns to the proper value to
+ * stick in the scsiscfr reg to use that transfer rate.
+ */
+static struct {
+ short period;
+ /* Rates in Ultra mode have bit 8 of sxfr set */
+#define ULTRA_SXFR 0x100
+ short rate;
+ const char *english;
+} aic7xxx_syncrates[] = {
+ { 50, 0x100, "20.0" },
+ { 62, 0x110, "16.0" },
+ { 75, 0x120, "13.4" },
+ { 100, 0x140, "10.0" },
+ { 100, 0x000, "10.0" },
+ { 125, 0x010, "8.0" },
+ { 150, 0x020, "6.67" },
+ { 175, 0x030, "5.7" },
+ { 200, 0x040, "5.0" },
+ { 225, 0x050, "4.4" },
+ { 250, 0x060, "4.0" },
+ { 275, 0x070, "3.6" }
+};
+
+static int num_aic7xxx_syncrates =
+ sizeof(aic7xxx_syncrates) / sizeof(aic7xxx_syncrates[0]);
+
+#ifdef CONFIG_PCI
+static int number_of_39xxs = 0;
+#endif CONFIG_PCI
+
+#ifdef AIC7XXX_DEBUG
+static void
+debug(const char *fmt, ...)
+{
+ va_list ap;
+ char buf[256];
+
+ va_start(ap, fmt);
+ vsprintf(buf, fmt, ap);
+ printk(buf);
+ va_end(ap);
+}
+
+static void
+debug_config(struct aic7xxx_host_config *p)
+{
+ int host_conf, scsi_conf;
+ unsigned char brelease;
+ unsigned char dfthresh;
+
+ static int DFT[] = { 0, 50, 75, 100 };
+ static int SST[] = { 256, 128, 64, 32 };
+ static const char *BUSW[] = { "", "-TWIN", "-WIDE" };
+
+ host_conf = inb(HOSTCONF + p->base);
+ scsi_conf = inb(SCSICONF + p->base);
+
+ /*
+ * The 7870 gets the bus release time and data FIFO threshold
+ * from the serial EEPROM (stored in the config structure) and
+ * scsi_conf register respectively. The 7770 gets the bus
+ * release time and data FIFO threshold from the scsi_conf and
+ * host_conf registers respectively.
+ */
+ if (p->chip_type == AIC_777x)
+ {
+ dfthresh = (host_conf >> 6);
+ }
+ else
+ {
+ dfthresh = (scsi_conf >> 6);
+ }
+
+ brelease = p->busrtime;
+ if (brelease == 0)
+ {
+ brelease = 2;
+ }
+
+ switch (p->type)
+ {
+ case AIC_7770:
+ case AIC_7771:
+ printk("%s%s AT EISA SLOT %d:\n", board_names[p->type], BUSW[p->bus_type],
+ p->base >> 12);
+ break;
+
+ case AIC_284x:
+ printk("%s%s AT VLB SLOT %d:\n", board_names[p->type], BUSW[p->bus_type],
+ p->base >> 12);
+ break;
+
+ case AIC_7850:
+ case AIC_7870:
+ case AIC_7871:
+ case AIC_7872:
+ case AIC_7873:
+ case AIC_7874:
+ case AIC_7880:
+ case AIC_7881:
+ case AIC_7882:
+ case AIC_7883:
+ case AIC_7884:
+ printk("%s%s (PCI-bus):\n", board_names[p->type], BUSW[p->bus_type]);
+ break;
+
+ default:
+ panic("aic7xxx: (debug_config) internal error.\n");
+ }
+
+ printk(" irq %d\n"
+ " bus release time %d bclks\n"
+ " data fifo threshold %d%%\n",
+ p->irq,
+ brelease,
+ DFT[dfthresh]);
+
+ printk(" SCSI CHANNEL A:\n"
+ " scsi id %d\n"
+ " scsi selection timeout %d ms\n"
+ " scsi bus reset at power-on %sabled\n",
+ scsi_conf & 0x07,
+ SST[(scsi_conf >> 3) & 0x03],
+ (scsi_conf & 0x40) ? "en" : "dis");
+
+ if ((p->chip_type == AIC_777x) && (p->parity == AIC_UNKNOWN))
+ {
+ /*
+ * Set the parity for 7770 based cards.
+ */
+ p->parity = (scsi_conf & 0x20) ? AIC_ENABLED : AIC_DISABLED;
+ }
+ if (p->parity != AIC_UNKNOWN)
+ {
+ printk(" scsi bus parity %sabled\n",
+ (p->parity == AIC_ENABLED) ? "en" : "dis");
+ }
+
+ if ((p->type == AIC_7770) || (p->type == AIC_7771))
+ {
+ p->low_term = (scsi_conf & 0x80) ? AIC_ENABLED : AIC_DISABLED;
+ }
+ if (p->low_term != AIC_UNKNOWN)
+ {
+ printk(" scsi bus termination (low byte) %sabled\n",
+ (p->low_term == AIC_ENABLED) ? "en" : "dis");
+ }
+ if ((p->bus_type == AIC_WIDE) && (p->high_term != AIC_UNKNOWN))
+ {
+ printk(" scsi bus termination (high byte) %sabled\n",
+ (p->high_term == AIC_ENABLED) ? "en" : "dis");
+ }
+}
+
+#if 0
+static void
+debug_scb(struct aic7xxx_scb *scb)
+{
+ printk("control 0x%x, tcl 0x%x, sg_count %d, sg_ptr 0x%x, cmdp 0x%x, cmdlen %d\n",
+ scb->control, scb->target_channel_lun, scb->SG_segment_count,
+ (scb->SG_list_pointer[3] << 24) | (scb->SG_list_pointer[2] << 16) |
+ (scb->SG_list_pointer[1] << 8) | scb->SG_list_pointer[0],
+ (scb->SCSI_cmd_pointer[3] << 24) | (scb->SCSI_cmd_pointer[2] << 16) |
+ (scb->SCSI_cmd_pointer[1] << 8) | scb->SCSI_cmd_pointer[0],
+ scb->SCSI_cmd_length);
+ printk("reserved 0x%x, target status 0x%x, resid SG count %d, resid data count %d\n",
+ (scb->RESERVED[1] << 8) | scb->RESERVED[0], scb->target_status,
+ scb->residual_SG_segment_count, scb->residual_data_count);
+ printk("data ptr 0x%x, data count %d, next waiting %d\n",
+ (scb->data_pointer[3] << 24) | (scb->data_pointer[2] << 16) |
+ (scb->data_pointer[1] << 8) | scb->data_pointer[0],
+ scb->data_count, scb->next_waiting);
+ printk("next ptr 0x%lx, Scsi Cmnd 0x%lx, state 0x%x, position %d\n",
+ (unsigned long) scb->next, (unsigned long) scb->cmd, scb->state,
+ scb->position);
+}
+#endif
+
+#else
+# define debug(fmt, args...)
+# define debug_config(x)
+# define debug_scb(x)
+#endif AIC7XXX_DEBUG
+
+/*
+ * XXX - these options apply unilaterally to _all_ 274x/284x/294x
+ * cards in the system. This should be fixed, but then,
+ * does anyone really have more than one in a machine?
+ */
+static unsigned int aic7xxx_extended = 0; /* extended translation on? */
+static unsigned int aic7xxx_no_reset = 0; /* no resetting of SCSI bus */
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_setup
+ *
+ * Description:
+ * Handle Linux boot parameters. This routine allows for assigning a value
+ * to a parameter with a ':' between the parameter and the value.
+ * ie. aic7xxx=unpause:0x0A,extended
+ *-F*************************************************************************/
+void
+aic7xxx_setup(char *s, int *dummy)
+{
+ int i, n;
+ char *p;
+
+ static struct {
+ const char *name;
+ unsigned int *flag;
+ } options[] = {
+ { "extended", &aic7xxx_extended },
+ { "no_reset", &aic7xxx_no_reset },
+ { NULL, NULL }
+ };
+
+ for (p = strtok(s, ","); p; p = strtok(NULL, ","))
+ {
+ for (i = 0; options[i].name; i++)
+ {
+ n = strlen(options[i].name);
+ if (!strncmp(options[i].name, p, n))
+ {
+ if (p[n] == ':')
+ {
+ *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
+ }
+ else
+ {
+ *(options[i].flag) = !0;
+ }
+ }
+ }
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_loadseq
+ *
+ * Description:
+ * Load the sequencer code into the controller memory.
+ *-F*************************************************************************/
+static void
+aic7xxx_loadseq(int base)
+{
+ static unsigned char seqprog[] = {
+ /*
+ * Each sequencer instruction is 29 bits
+ * long (fill in the excess with zeroes)
+ * and has to be loaded from least -> most
+ * significant byte, so this table has the
+ * byte ordering reversed.
+ */
+# include "aic7xxx_seq.h"
+ };
+
+ /*
+ * When the AIC-7770 is paused (as on chip reset), the
+ * sequencer address can be altered and a sequencer
+ * program can be loaded by writing it, byte by byte, to
+ * the sequencer RAM port - the Adaptec documentation
+ * recommends using REP OUTSB to do this, hence the inline
+ * assembly. Since the address autoincrements as we load
+ * the program, reset it back to zero afterward. Disable
+ * sequencer RAM parity error detection while loading, and
+ * make sure the LOADRAM bit is enabled for loading.
+ */
+ outb(PERRORDIS | SEQRESET | LOADRAM, SEQCTL + base);
+
+ outsb(SEQRAM + base, seqprog, sizeof(seqprog));
+
+ /*
+ * WARNING! This is a magic sequence! After extensive
+ * experimentation, it seems that you MUST turn off the
+ * LOADRAM bit before you play with SEQADDR again, else
+ * you will end up with parity errors being flagged on
+ * your sequencer program. (You would also think that
+ * turning off LOADRAM and setting SEQRESET to reset the
+ * address to zero would work, but you need to do it twice
+ * for it to take effect on the address. Timing problem?)
+ */
+ do {
+ /*
+ * Actually, reset it until
+ * the address shows up as
+ * zero just to be safe..
+ */
+ outb(SEQRESET | FASTMODE, SEQCTL + base);
+ } while ((inb(SEQADDR0 + base) != 0) && (inb(SEQADDR1 + base) != 0));
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_delay
+ *
+ * Description:
+ * Delay for specified amount of time.
+ *-F*************************************************************************/
+static void
+aic7xxx_delay(int seconds)
+{
+ unsigned long i;
+
+ i = jiffies + (seconds * HZ); /* compute time to stop */
+
+ while (jiffies < i)
+ {
+ ; /* Do nothing! */
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * rcs_version
+ *
+ * Description:
+ * Return a string containing just the RCS version number from either
+ * an Id or Revison RCS clause.
+ *-F*************************************************************************/
+const char *
+rcs_version(const char *version_info)
+{
+ static char buf[10];
+ char *bp, *ep;
+
+ bp = NULL;
+ strcpy(buf, "????");
+ if (!strncmp(version_info, "$Id: ", 5))
+ {
+ if ((bp = strchr(version_info, ' ')) != NULL)
+ {
+ bp++;
+ if ((bp = strchr(bp, ' ')) != NULL)
+ {
+ bp++;
+ }
+ }
+ }
+ else
+ {
+ if (!strncmp(version_info, "$Revision: ", 11))
+ {
+ if ((bp = strchr(version_info, ' ')) != NULL)
+ {
+ bp++;
+ }
+ }
+ }
+
+ if (bp != NULL)
+ {
+ if ((ep = strchr(bp, ' ')) != NULL)
+ {
+ register int len = ep - bp;
+
+ strncpy(buf, bp, len);
+ buf[len] = '\0';
+ }
+ }
+
+ return buf;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_info
+ *
+ * Description:
+ * Return a string describing the driver.
+ *-F*************************************************************************/
+const char *
+aic7xxx_info(struct Scsi_Host *notused)
+{
+ static char buffer[128];
+
+ strcpy(buffer, "Adaptec AHA274x/284x/294x (EISA/VLB/PCI-Fast SCSI) ");
+ strcat(buffer, rcs_version(AIC7XXX_C_VERSION));
+ strcat(buffer, "/");
+ strcat(buffer, rcs_version(AIC7XXX_H_VERSION));
+ strcat(buffer, "/");
+ strcat(buffer, rcs_version(AIC7XXX_SEQ_VER));
+
+ return buffer;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_length
+ *
+ * Description:
+ * How much data should be transferred for this SCSI command? Stop
+ * at segment sg_last if it's a scatter-gather command so we can
+ * compute underflow easily.
+ *-F*************************************************************************/
+static unsigned
+aic7xxx_length(Scsi_Cmnd *cmd, int sg_last)
+{
+ int i, segments;
+ unsigned length;
+ struct scatterlist *sg;
+
+ segments = cmd->use_sg - sg_last;
+ sg = (struct scatterlist *) cmd->buffer;
+
+ if (cmd->use_sg)
+ {
+ for (i = length = 0; (i < cmd->use_sg) && (i < segments); i++)
+ {
+ length += sg[i].length;
+ }
+ }
+ else
+ {
+ length = cmd->request_bufflen;
+ }
+
+ return (length);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_scsirate
+ *
+ * Description:
+ * Look up the valid period to SCSIRATE conversion in our table
+ *-F*************************************************************************/
+static void
+aic7xxx_scsirate(struct aic7xxx_host *p, unsigned char *scsirate,
+ short period, unsigned char offset,
+ int target, char channel)
+{
+ int i;
+
+ for (i = 0; i < num_aic7xxx_syncrates; i++)
+ {
+ if ((aic7xxx_syncrates[i].period - period) >= 0)
+ {
+ /*
+ * Watch out for Ultra speeds when ultra is not enabled and
+ * vice-versa.
+ */
+ if (p->ultra_enabled)
+ {
+ if (!(aic7xxx_syncrates[i].rate & ULTRA_SXFR))
+ {
+ printk ("aic7xxx: Target %d, channel %c, requests %sMHz transfers, "
+ "but adapter in Ultra mode can only sync at 10MHz or "
+ "above.\n", target, channel, aic7xxx_syncrates[i].english);
+ break; /* Use asynchronous transfers. */
+ }
+ }
+ else
+ {
+ /*
+ * Check for an Ultra device trying to negotiate an Ultra rate
+ * on an adapter with Ultra mode disabled.
+ */
+ if (aic7xxx_syncrates[i].rate & ULTRA_SXFR)
+ {
+ /*
+ * This should only happen if the driver is the first to negotiate
+ * and chooses a high rate. We'll just move down the table until
+ * we hit a non Ultra speed.
+ */
+ continue;
+ }
+ }
+ *scsirate = (aic7xxx_syncrates[i].rate) | (offset & 0x0F);
+ printk("aic7xxx: Target %d, channel %c, now synchronous at %sMHz, "
+ "offset(0x%x).\n",
+ target, channel, aic7xxx_syncrates[i].english, offset);
+ return;
+ }
+ }
+
+ /*
+ * Default to asynchronous transfer
+ */
+ *scsirate = 0;
+ printk("aic7xxx: Target %d, channel %c, using asynchronous transfers.\n",
+ target, channel);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_putscb
+ *
+ * Description:
+ * Transfer a SCB to the controller.
+ *-F*************************************************************************/
+static inline void
+aic7xxx_putscb(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ unsigned char curscb;
+ int base = p->base;
+
+ curscb = inb(SCBPTR + base);
+ outb(scb->position, SCBPTR + base);
+ outb(SCBAUTO, SCBCNT + base);
+
+ /*
+ * By turning on the SCB auto increment, any reference
+ * to the SCB I/O space postincrements the SCB address
+ * we're looking at. So turn this on and dump the relevant
+ * portion of the SCB to the card.
+ *
+ * We can do 16bit transfers on all but 284x.
+ */
+ if (p->type == AIC_284x)
+ {
+ outsb(SCBARRAY + base, scb, SCB_PIO_TRANSFER_SIZE);
+ }
+ else
+ {
+ outsl(SCBARRAY + base, scb, (SCB_PIO_TRANSFER_SIZE + 3) / 4);
+ }
+
+ outb(0, SCBCNT + base);
+ outb(curscb, SCBPTR + base);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_getscb
+ *
+ * Description:
+ * Get a SCB from the controller.
+ *-F*************************************************************************/
+static inline void
+aic7xxx_getscb(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ int base = p->base;
+
+ /*
+ * This is almost identical to aic7xxx_putscb().
+ */
+ outb(SCBAUTO, SCBCNT + base);
+ insb(SCBARRAY + base, scb, SCB_PIO_TRANSFER_SIZE);
+ outb(0, SCBCNT + base);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_match_scb
+ *
+ * Description:
+ * Checks to see if an scb matches the target/channel as specified.
+ * If target is ALL_TARGETS (-1), then we're looking for any device
+ * on the specified channel; this happens when a channel is going
+ * to be reset and all devices on that channel must be aborted.
+ *-F*************************************************************************/
+static int
+aic7xxx_match_scb(struct aic7xxx_scb *scb, int target, char channel)
+{
+ int targ = (scb->target_channel_lun >> 4) & 0x0F;
+ char chan = (scb->target_channel_lun & SELBUSB) ? 'B' : 'A';
+
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (match_scb) comparing target/channel %d/%c to scb %d/%c\n",
+ target, channel, targ, chan);
+#endif
+ if (target == ALL_TARGETS)
+ {
+ return (chan == channel);
+ }
+ else
+ {
+ return ((chan == channel) && (targ == target));
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_busy_target
+ *
+ * Description:
+ * Set the specified target active.
+ *-F*************************************************************************/
+static void
+aic7xxx_busy_target(unsigned char target, char channel, int base)
+{
+ unsigned char active;
+ unsigned long active_port = ACTIVE_A + base;
+
+ if ((target > 0x07) || (channel == 'B'))
+ {
+ /*
+ * targets on the Second channel or above id 7 store info in byte two
+ * of ACTIVE
+ */
+ active_port++;
+ }
+ active = inb(active_port);
+ active |= (0x01 << (target & 0x07));
+ outb(active, active_port);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_unbusy_target
+ *
+ * Description:
+ * Set the specified target inactive.
+ *-F*************************************************************************/
+static void
+aic7xxx_unbusy_target(unsigned char target, char channel, int base)
+{
+ unsigned char active;
+ unsigned long active_port = ACTIVE_A + base;
+
+#ifdef 0
+ printk ("aic7xxx: (unbusy_target) target/channel %d/%c\n",
+ target, channel);
+#endif
+ if ((target > 0x07) || (channel == 'B'))
+ {
+ /*
+ * targets on the Second channel or above id 7 store info in byte two
+ * of ACTIVE
+ */
+ active_port++;
+ }
+ active = inb(active_port);
+ active &= ~(0x01 << (target & 0x07));
+ outb(active, active_port);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_done
+ *
+ * Description:
+ * Calls the higher level scsi done function and frees the scb.
+ *-F*************************************************************************/
+static void
+aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ long flags;
+ Scsi_Cmnd *cmd = scb->cmd;
+
+#ifdef 0
+ printk ("aic7xxx: (done) target/channel %d/%d\n",
+ cmd->target, cmd->channel);
+#endif
+ /*
+ * This is a critical section, since we don't want the
+ * queue routine mucking with the host data.
+ */
+ save_flags(flags);
+ cli();
+
+ /*
+ * Process the command after marking the scb as free
+ * and adding it to the free list.
+ */
+ scb->state = SCB_FREE;
+ scb->next = p->free_scb;
+ p->free_scb = scb;
+ scb->cmd = NULL;
+
+ restore_flags(flags);
+
+ cmd->scsi_done(cmd);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_add_waiting_scb
+ *
+ * Description:
+ * Add this SCB to the "waiting for selection" list.
+ *-F*************************************************************************/
+static void
+aic7xxx_add_waiting_scb(u_long base,
+ struct aic7xxx_scb *scb,
+ insert_type where)
+{
+ unsigned char head;
+ unsigned char curscb;
+
+ curscb = inb(SCBPTR + base);
+ head = inb(WAITING_SCBH + base);
+ if (head == SCB_LIST_NULL)
+ {
+ /*
+ * List was empty
+ */
+ head = scb->position;
+ }
+ else
+ {
+ if (where == LIST_HEAD)
+ {
+ outb(scb->position, SCBPTR + base);
+ outb(head, SCB_NEXT_WAITING + base);
+ head = scb->position;
+ }
+ else
+ {
+ /* where == LIST_SECOND */
+ unsigned char third_scb;
+
+ outb(head, SCBPTR + base);
+ third_scb = inb(SCB_NEXT_WAITING + base);
+ outb(scb->position, SCB_NEXT_WAITING + base);
+ outb(scb->position, SCBPTR + base);
+ outb(third_scb, SCB_NEXT_WAITING + base);
+ }
+ }
+ outb(head, WAITING_SCBH + base);
+ outb(curscb, SCBPTR + base);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_abort_waiting_scb
+ *
+ * Description:
+ * Manipulate the waiting for selection list and return the
+ * scb that follows the one that we remove.
+ *-F*************************************************************************/
+static unsigned char
+aic7xxx_abort_waiting_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb,
+ unsigned char prev, unsigned char timedout_scb)
+{
+ unsigned char curscb, next;
+ int target = (scb->target_channel_lun >> 4) & 0x0F;
+ char channel = (scb->target_channel_lun & SELBUSB) ? 'B' : 'A';
+ int base = p->base;
+
+ /*
+ * Select the SCB we want to abort and
+ * pull the next pointer out of it.
+ */
+ curscb = inb(SCBPTR + base);
+ outb(scb->position, SCBPTR + base);
+ next = inb(SCB_NEXT_WAITING + base);
+
+ /*
+ * Clear the necessary fields
+ */
+ outb(0, SCBARRAY + base);
+ outb(SCB_LIST_NULL, SCB_NEXT_WAITING + base);
+ aic7xxx_unbusy_target(target, channel, base);
+
+ /*
+ * Update the waiting list
+ */
+ if (prev == SCB_LIST_NULL)
+ {
+ /*
+ * First in the list
+ */
+ outb(next, WAITING_SCBH + base);
+ }
+ else
+ {
+ /*
+ * Select the scb that pointed to us and update its next pointer.
+ */
+ outb(prev, SCBPTR + base);
+ outb(next, SCB_NEXT_WAITING + base);
+ }
+ /*
+ * Update the tail pointer
+ */
+ if (inb(WAITING_SCBT + base) == scb->position)
+ {
+ outb(prev, WAITING_SCBT + base);
+ }
+
+ /*
+ * Point us back at the original scb position
+ * and inform the SCSI system that the command
+ * has been aborted.
+ */
+ outb(curscb, SCBPTR + base);
+ scb->state |= SCB_ABORTED;
+ scb->cmd->result = (DID_RESET << 16);
+ aic7xxx_done(p, scb);
+
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (abort_waiting_scb) target/channel %d/%c, prev %d, "
+ "to_scb %d, next %d\n", target, channel, prev, timedout_scb, next);
+#endif
+ return (next);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_reset_device
+ *
+ * Description:
+ * The device at the given target/channel has been reset. Abort
+ * all active and queued scbs for that target/channel.
+ *-F*************************************************************************/
+static int
+aic7xxx_reset_device(struct aic7xxx_host *p, int target, char channel,
+ unsigned char timedout_scb)
+{
+ int base = p->base;
+ struct aic7xxx_scb *scb;
+ unsigned char active_scb;
+ int i = 0;
+ int found = 0;
+
+ /*
+ * Restore this when we're done
+ */
+ active_scb = inb(SCBPTR + base);
+
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (reset_device) target/channel %d/%c, to_scb %d, "
+ "active_scb %d\n", target, channel, timedout_scb, active_scb);
+#endif
+ /*
+ * Search the QINFIFO.
+ */
+ {
+ int saved_queue[AIC7XXX_MAXSCB];
+ int queued = inb(QINCNT + base);
+
+ for (i = 0; i < (queued - found); i++)
+ {
+ saved_queue[i] = inb(QINFIFO + base);
+ scb = &(p->scb_array[saved_queue[i]]);
+ if (aic7xxx_match_scb(scb, target, channel))
+ {
+ /*
+ * We found an scb that needs to be aborted.
+ */
+ scb->state |= SCB_ABORTED;
+ scb->cmd->result = (DID_RESET << 16);
+ aic7xxx_done(p, scb);
+ outb(scb->position, SCBPTR + base);
+ outb(0, SCBARRAY + base);
+ i--;
+ found++;
+ }
+ }
+ /*
+ * Now put the saved scbs back.
+ */
+ for (queued = 0; queued < i; queued++)
+ {
+ outb(saved_queue[queued], QINFIFO + base);
+ }
+ }
+
+ /*
+ * Search waiting for selection list.
+ */
+ {
+ unsigned char next, prev;
+
+ next = inb(WAITING_SCBH + base); /* Start at head of list. */
+ prev = SCB_LIST_NULL;
+
+ while (next != SCB_LIST_NULL)
+ {
+ scb = &(p->scb_array[next]);
+ /*
+ * Select the SCB.
+ */
+ if (aic7xxx_match_scb(scb, target, channel))
+ {
+ next = aic7xxx_abort_waiting_scb(p, scb, prev, timedout_scb);
+ found++;
+ }
+ else
+ {
+ outb(scb->position, SCBPTR + base);
+ prev = next;
+ next = inb(SCB_NEXT_WAITING + base);
+ }
+ }
+ }
+
+ /*
+ * Go through the entire SCB array now and look for
+ * commands for this target that are active. These
+ * are other (most likely tagged) commands that
+ * were disconnected when the reset occured.
+ */
+ for (i = 0; i < p->numscb; i++)
+ {
+ scb = &(p->scb_array[i]);
+ if ((scb->state & SCB_ACTIVE) && aic7xxx_match_scb(scb, target, channel))
+ {
+ /*
+ * Ensure the target is "free"
+ */
+ aic7xxx_unbusy_target(target, channel, base);
+ outb(scb->position, SCBPTR + base);
+ outb(0, SCBARRAY + base);
+ scb->state |= SCB_ABORTED;
+ scb->cmd->result = (DID_RESET << 16);
+ aic7xxx_done(p, scb);
+ found++;
+ }
+ }
+
+ outb(active_scb, SCBPTR + base);
+ return (found);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_reset_current_bus
+ *
+ * Description:
+ * Reset the current SCSI bus.
+ *-F*************************************************************************/
+static void
+aic7xxx_reset_current_bus(int base)
+{
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (reset_current_bus)\n");
+#endif
+ outb(SCSIRSTO, SCSISEQ + base);
+ udelay(1000);
+ outb(0, SCSISEQ + base);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_reset_channel
+ *
+ * Description:
+ * Reset the channel.
+ *-F*************************************************************************/
+static int
+aic7xxx_reset_channel(struct aic7xxx_host *p, char channel,
+ unsigned char timedout_scb)
+{
+ int base = p->base;
+ unsigned char sblkctl;
+ char cur_channel;
+ unsigned long offset, offset_max;
+ int found;
+
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (reset_channel) channel %c, to_scb %d\n",
+ channel, timedout_scb);
+#endif
+ /*
+ * Clean up all the state information for the
+ * pending transactions on this bus.
+ */
+ found = aic7xxx_reset_device(p, ALL_TARGETS, channel, timedout_scb);
+
+ if (channel == 'B')
+ {
+ p->needsdtr |= (p->needsdtr_copy & 0xFF00);
+ p->sdtr_pending &= 0x00FF;
+ outb(0, ACTIVE_B + base);
+ offset = TARG_SCRATCH + base + 8;
+ offset_max = TARG_SCRATCH + base + 16;
+ }
+ else
+ {
+ if (p->bus_type == AIC_WIDE)
+ {
+ p->needsdtr = p->needsdtr_copy;
+ p->needwdtr = p->needwdtr_copy;
+ p->sdtr_pending = 0x0;
+ p->wdtr_pending = 0x0;
+ outb(0, ACTIVE_A + base);
+ outb(0, ACTIVE_B + base);
+ offset = TARG_SCRATCH + base;
+ offset_max = TARG_SCRATCH + base + 16;
+ }
+ else
+ {
+ p->needsdtr |= (p->needsdtr_copy & 0x00FF);
+ p->sdtr_pending &= 0xFF00;
+ outb(0, ACTIVE_A + base);
+ offset = TARG_SCRATCH + base;
+ offset_max = TARG_SCRATCH + base + 8;
+ }
+ }
+ while (offset < offset_max)
+ {
+ /*
+ * Revert to async/narrow transfers
+ * until we renegotiate.
+ */
+ u_char targ_scratch;
+ targ_scratch = inb(offset);
+ targ_scratch &= SXFR;
+ outb(targ_scratch, offset);
+ offset++;
+ }
+
+ /*
+ * Reset the bus and unpause/restart the controller
+ */
+
+ /*
+ * Case 1: Command for another bus is active
+ */
+ sblkctl = inb(SBLKCTL + base);
+ cur_channel = (sblkctl & SELBUSB) ? 'B' : 'A';
+ if (cur_channel != channel)
+ {
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (reset_channel) Stealthily resetting channel %c\n",
+ channel);
+#endif
+ /*
+ * Stealthily reset the other bus without upsetting the current bus
+ */
+ outb(sblkctl ^ SELBUSB, SBLKCTL + base);
+ aic7xxx_reset_current_bus(base);
+ outb(sblkctl, SBLKCTL + base);
+
+ UNPAUSE_SEQUENCER(p);
+ }
+ /*
+ * Case 2: A command from this bus is active or we're idle
+ */
+ else
+ {
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (reset_channel) Resetting current channel %c\n",
+ channel);
+#endif
+ aic7xxx_reset_current_bus(base);
+ RESTART_SEQUENCER(p);
+ }
+
+ return found;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_isr
+ *
+ * Description:
+ * SCSI controller interrupt handler.
+ *
+ * NOTE: Since we declared this using SA_INTERRUPT, interrupts should
+ * be disabled all through this function unless we say otherwise.
+ *-F*************************************************************************/
+static void
+aic7xxx_isr(int irq, struct pt_regs * regs)
+{
+ int base, intstat;
+ struct aic7xxx_host *p;
+ struct aic7xxx_scb *scb;
+ unsigned char ha_flags;
+ short transfer;
+ unsigned char scsi_id, bus_width;
+ unsigned char offset, rate, scratch, scratch_offset;
+ unsigned char max_offset, rej_byte;
+ unsigned short target_mask;
+ char channel;
+ void *addr;
+ int actual;
+ int scb_index;
+ Scsi_Cmnd *cmd;
+
+ p = (struct aic7xxx_host *) aic7xxx_boards[irq]->hostdata;
+
+ /*
+ * Search for the host with a pending interrupt. If we can't find
+ * one, then we've encountered a spurious interrupt.
+ */
+ while ((p != NULL) && !(inb(INTSTAT + p->base) & INT_PEND))
+ {
+ if (p->next == NULL)
+ {
+ p = NULL;
+ }
+ else
+ {
+ p = (struct aic7xxx_host *) p->next->hostdata;
+ }
+ }
+
+ if (p == NULL)
+ {
+ if (aic7xxx_spurious_count == 1)
+ {
+ aic7xxx_spurious_count = 2;
+ printk("aic7xxx: (aic7xxx_isr) Encountered spurious interrupt.\n");
+ return;
+ }
+ else
+ {
+ /*
+ * The best we can do is to set p back to head of list and process
+ * the erroneous interrupt - most likely a BRKADRINT.
+ */
+ p = (struct aic7xxx_host *) aic7xxx_boards[irq]->hostdata;
+ }
+ }
+
+ /*
+ * Keep track of interrupts for /proc/scsi
+ */
+ p->isr_count++;
+
+ if (!p->a_scanned && (p->isr_count == 1))
+ {
+ /*
+ * We must only have one card at this IRQ and it must have been
+ * added to the board data before the spurious interrupt occurred.
+ * It is sufficient that we check isr_count and not the spurious
+ * interrupt count.
+ */
+ printk("aic7xxx: (aic7xxx_isr) Encountered spurious interrupt.\n");
+ return;
+ }
+
+ base = p->base;
+ /*
+ * Handle all the interrupt sources - especially for SCSI
+ * interrupts, we won't get a second chance at them.
+ */
+ intstat = inb(INTSTAT + base);
+
+ if (intstat & BRKADRINT)
+ {
+ int i;
+ unsigned char errno = inb(ERROR + base);
+
+ printk("aic7xxx: (aic7xxx_isr) BRKADRINT error(0x%x):\n", errno);
+ for (i = 0; i < NUMBER(hard_error); i++)
+ {
+ if (errno & hard_error[i].errno)
+ {
+ printk(" %s\n", hard_error[i].errmesg);
+ }
+ }
+
+ panic("aic7xxx: (aic7xxx_isr) BRKADRINT, error(0x%x) seqaddr(0x%x).\n",
+ inb(ERROR + base), (inb(SEQADDR1 + base) << 8) | inb(SEQADDR0 + base));
+ }
+
+ if (intstat & SEQINT)
+ {
+ /*
+ * Although the sequencer is paused immediately on
+ * a SEQINT, an interrupt for a SCSIINT condition will
+ * unpaused the sequencer before this point.
+ */
+ PAUSE_SEQUENCER(p);
+
+ scsi_id = (inb(SCSIID + base) >> 4) & 0x0F;
+ scratch_offset = scsi_id;
+ channel = 'A';
+ if (inb(SBLKCTL + base) & SELBUSB)
+ {
+ channel = 'B';
+ scratch_offset += 8;
+ }
+ target_mask = (0x01 << scratch_offset);
+
+ switch (intstat & SEQINT_MASK)
+ {
+ case BAD_PHASE:
+ panic("aic7xxx: (aic7xxx_isr) Unknown scsi bus phase.\n");
+ break;
+
+ case SEND_REJECT:
+ rej_byte = inb(REJBYTE + base);
+ if ((rej_byte & 0xF0) == 0x20)
+ {
+ scb_index = inb(SCBPTR + base);
+ scb = &(p->scb_array[scb_index]);
+ printk("aic7xxx: Warning - Tagged message received without identify."
+ "Disabling tagged commands for target %d channel %c.\n",
+ scsi_id, channel);
+ scb->cmd->device->tagged_supported = 0;
+ scb->cmd->device->tagged_queue = 0;
+ }
+ else
+ {
+ debug("aic7xxx: Warning - Rejecting unknown message (0x%x) received "
+ "from target %d channel %c.\n", rej_byte, scsi_id, channel);
+ }
+ break;
+
+ case NO_IDENT:
+ panic("aic7xxx: Target %d, channel %c, did not send an IDENTIFY "
+ "message. SAVED_TCL(0x%x).\n",
+ scsi_id, channel, inb(SAVED_TCL + base));
+ break;
+
+ case NO_MATCH:
+ printk("aic7xxx: No active SCB for reconnecting target %d, "
+ "channel %c - Issuing ABORT. SAVED_TCL(0x%x).\n",
+ scsi_id, channel, inb(SAVED_TCL + base));
+ aic7xxx_unbusy_target(scsi_id, channel, base);
+ outb(0, SCBARRAY + base);
+ outb(CLRSELTIMEO, CLRSINT1 + base);
+ RESTART_SEQUENCER(p);
+ break;
+
+ case SDTR_MSG:
+ /*
+ * Help the sequencer to translate the negotiated
+ * transfer rate. Transfer is 1/4 the period
+ * in ns as is returned by the sync negotiation
+ * message. So, we must multiply by four.
+ */
+ transfer = (inb(ARG_1 + base) << 2);
+ offset = inb(ACCUM + base);
+ scratch = inb(TARG_SCRATCH + base + scratch_offset);
+ /*
+ * The maximum offset for a wide device is 0x08; for a
+ * 8-bit bus device the maximum offset is 0x0F.
+ */
+ if (scratch & WIDEXFER)
+ {
+ max_offset = 0x08;
+ }
+ else
+ {
+ max_offset = 0x0F;
+ }
+ aic7xxx_scsirate(p, &rate, transfer, MIN(offset, max_offset),
+ scsi_id, channel);
+ /*
+ * Preserve the wide transfer flag.
+ */
+ scratch = rate | (scratch & WIDEXFER);
+ outb(scratch, TARG_SCRATCH + base + scratch_offset);
+ outb(scratch, SCSIRATE + base);
+ if ((scratch & 0x0F) == 0)
+ { /*
+ * The requested rate was so low that asynchronous transfers
+ * are faster (not to mention the controller won't support
+ * them), so we issue a reject to ensure we go to asynchronous
+ * transfers.
+ */
+ outb(SEND_REJ, RETURN_1 + base);
+ }
+ else
+ {
+ /*
+ * See if we initiated Sync Negotiation
+ */
+ if (p->sdtr_pending & target_mask)
+ {
+ /*
+ * Don't send an SDTR back to the target.
+ */
+ outb(0, RETURN_1 + base);
+ }
+ else
+ {
+ /*
+ * Send our own SDTR in reply.
+ */
+ printk("aic7xxx: Sending SDTR!!\n");
+ outb(SEND_SDTR, RETURN_1 + base);
+ }
+ }
+ /*
+ * Clear the flags.
+ */
+ p->needsdtr &= ~target_mask;
+ p->sdtr_pending &= ~target_mask;
+ break;
+
+ case WDTR_MSG:
+ {
+ bus_width = inb(ARG_1 + base);
+ printk("aic7xxx: Received MSG_WDTR, Target %d, channel %c "
+ "needwdtr(0x%x).\n", scsi_id, channel, p->needwdtr);
+ scratch = inb(TARG_SCRATCH + base + scratch_offset);
+
+ if (p->wdtr_pending & target_mask)
+ {
+ /*
+ * Don't send an WDTR back to the target, since we asked first.
+ */
+ outb(0, RETURN_1 + base);
+ switch (bus_width)
+ {
+ case BUS_8_BIT:
+ scratch &= 0x7F;
+ break;
+
+ case BUS_16_BIT:
+ printk("aic7xxx: Target %d, channel %c, using 16 bit "
+ "transfers.\n", scsi_id, channel);
+ scratch |= 0x80;
+ break;
+
+ case BUS_32_BIT:
+ outb(SEND_REJ, RETURN_1 + base);
+ printk("aic7xxx: Target %d, channel %c, requesting 32 bit "
+ "transfers, rejecting...\n", scsi_id, channel);
+ break;
+ }
+ }
+ else
+ {
+ /*
+ * Send our own WDTR in reply.
+ */
+ printk("aic7xxx: Will send WDTR!!\n");
+ switch (bus_width)
+ {
+ case BUS_8_BIT:
+ scratch &= 0x7F;
+ break;
+
+ case BUS_32_BIT:
+ /*
+ * Negotiate 16 bits.
+ */
+ bus_width = BUS_16_BIT;
+ /* Yes, we mean to fall thru here. */
+
+ case BUS_16_BIT:
+ printk("aic7xxx: Target %d, channel %c, using 16 bit "
+ "transfers.\n", scsi_id, channel);
+ scratch |= 0x80;
+ break;
+ }
+ outb(bus_width | SEND_WDTR, RETURN_1 + base);
+ }
+ p->needwdtr &= ~target_mask;
+ p->wdtr_pending &= ~target_mask;
+ outb(scratch, TARG_SCRATCH + base + scratch_offset);
+ outb(scratch, SCSIRATE + base);
+ break;
+ }
+
+ case REJECT_MSG:
+ {
+ /*
+ * What we care about here is if we had an
+ * outstanding SDTR or WDTR message for this
+ * target. If we did, this is a signal that
+ * the target is refusing negotiation.
+ */
+
+ scratch = inb(TARG_SCRATCH + base + scratch_offset);
+
+ if (p->wdtr_pending & target_mask)
+ {
+ /*
+ * note 8bit xfers and clear flag
+ */
+ scratch &= 0x7F;
+ p->needwdtr &= ~target_mask;
+ p->wdtr_pending &= ~target_mask;
+ printk("aic7xxx: Target %d, channel %c, refusing WIDE negotiation. "
+ "Using 8 bit transfers.\n", scsi_id, channel);
+ }
+ else
+ {
+ if (p->sdtr_pending & target_mask)
+ {
+ /*
+ * note asynch xfers and clear flag
+ */
+ scratch &= 0xF0;
+ p->needsdtr &= ~target_mask;
+ p->sdtr_pending &= ~target_mask;
+ printk("aic7xxx: Target %d, channel %c, refusing synchronous "
+ "negotiation. Using asynchronous transfers.\n",
+ scsi_id, channel);
+ }
+ /*
+ * Otherwise, we ignore it.
+ */
+ }
+ outb(scratch, TARG_SCRATCH + base + scratch_offset);
+ outb(scratch, SCSIRATE + base);
+ break;
+ }
+
+ case BAD_STATUS:
+ scb_index = inb(SCBPTR + base);
+ scb = &(p->scb_array[scb_index]);
+ outb(0, RETURN_1 + base); /* CHECK_CONDITION may change this */
+ if (!(scb->state & SCB_ACTIVE) || (scb->cmd == NULL))
+ {
+ printk("aic7xxx: Referenced SCB not valid during SEQINT(0x%x) "
+ "scb(%d) state(0x%x) cmd(0x%x).\n",
+ intstat, scb_index, scb->state, (unsigned int) scb->cmd);
+ }
+ else
+ {
+ cmd = scb->cmd;
+ aic7xxx_getscb(p, scb);
+ aic7xxx_status(cmd) = scb->target_status;
+
+ cmd->result |= scb->target_status;
+
+ switch (status_byte(scb->target_status))
+ {
+ case GOOD:
+ printk("aic7xxx: Interrupted for status of GOOD???\n");
+ break;
+
+ case CHECK_CONDITION:
+ if ((aic7xxx_error(cmd) == 0) && !(cmd->flags & WAS_SENSE))
+ {
+ unsigned char tcl;
+ unsigned char control;
+ void *req_buf;
+
+ tcl = scb->target_channel_lun;
+
+ /*
+ * Send a sense command to the requesting target.
+ */
+ cmd->flags |= WAS_SENSE;
+ memcpy((void *) scb->sense_cmd, (void *) generic_sense,
+ sizeof(generic_sense));
+
+ scb->sense_cmd[1] = (cmd->lun << 5);
+ scb->sense_cmd[4] = sizeof(cmd->sense_buffer);
+
+ scb->sense_sg.address = (char *) &cmd->sense_buffer;
+ scb->sense_sg.length = sizeof(cmd->sense_buffer);
+ req_buf = &scb->sense_sg;
+ cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
+ control = scb->control;
+
+ memset(scb, 0, SCB_PIO_TRANSFER_SIZE);
+ scb->control = control & DISCENB;
+ scb->target_channel_lun = tcl;
+ addr = scb->sense_cmd;
+ scb->SCSI_cmd_length = COMMAND_SIZE(scb->sense_cmd[0]);
+ memcpy(scb->SCSI_cmd_pointer, &addr,
+ sizeof(scb->SCSI_cmd_pointer));
+ scb->SG_segment_count = 1;
+ memcpy(scb->SG_list_pointer, &req_buf,
+ sizeof(scb->SG_list_pointer));
+ scb->data_count = scb->sense_sg.length;
+ memcpy(scb->data_pointer, &(scb->sense_sg.address), 4);
+
+ aic7xxx_putscb(p, scb);
+ outb(SCB_LIST_NULL, SCB_NEXT_WAITING + base);
+ /*
+ * Ensure that the target is "BUSY" so we don't get overlapping
+ * commands if we happen to be doing tagged I/O.
+ */
+ aic7xxx_busy_target(scsi_id, channel, base);
+
+ aic7xxx_add_waiting_scb(base, scb, LIST_HEAD);
+ outb(SEND_SENSE, RETURN_1 + base);
+ } /* first time sense, no errors */
+
+ cmd->flags &= ~ASKED_FOR_SENSE;
+ if (aic7xxx_error(cmd) == 0)
+ {
+ aic7xxx_error(cmd) = DID_RETRY_COMMAND;
+ }
+ break;
+
+ case BUSY:
+ printk("aic7xxx: Target busy.\n");
+ if (!aic7xxx_error(cmd))
+ {
+ aic7xxx_error(cmd) = DID_BUS_BUSY;
+ }
+ break;
+
+ case QUEUE_FULL:
+ printk("aic7xxx: Queue full.\n");
+ if (!aic7xxx_error(cmd))
+ {
+ aic7xxx_error(cmd) = DID_RETRY_COMMAND;
+ }
+ break;
+
+ default:
+ printk("aic7xxx: Unexpected target status(0x%x).\n",
+ scb->target_status);
+ if (!aic7xxx_error(cmd))
+ {
+ aic7xxx_error(cmd) = DID_RETRY_COMMAND;
+ }
+ break;
+ } /* end switch */
+ } /* end else of */
+ break;
+
+ case RESIDUAL:
+ scb_index = inb(SCBPTR + base);
+ scb = &(p->scb_array[scb_index]);
+ if (!(scb->state & SCB_ACTIVE) || (scb->cmd == NULL))
+ {
+ printk("aic7xxx: Referenced SCB not valid during SEQINT(0x%x) "
+ "scb(%d) state(0x%x) cmd(0x%x).\n",
+ intstat, scb_index, scb->state, (unsigned int) scb->cmd);
+ }
+ else
+ {
+ cmd = scb->cmd;
+ /*
+ * Don't destroy valid residual information with
+ * residual coming from a check sense operation.
+ */
+ if (!(cmd->flags & WAS_SENSE))
+ {
+ /*
+ * We had an underflow. At this time, there's only
+ * one other driver that bothers to check for this,
+ * and cmd->underflow seems to be set rather half-
+ * heartedly in the higher-level SCSI code.
+ */
+ actual = aic7xxx_length(cmd, scb->residual_SG_segment_count);
+
+ actual -= (inb(SCB_RESID_DCNT2 + base) << 16) |
+ (inb(SCB_RESID_DCNT1 + base) << 8) |
+ inb(SCB_RESID_DCNT0 + base);
+
+ if (actual < cmd->underflow)
+ {
+ printk("aic7xxx: Target %d underflow - "
+ "Wanted (at least) (%u) got(%u) count(%d).\n",
+ cmd->target, cmd->underflow, actual,
+ inb(SCB_RESID_SGCNT + base));
+ aic7xxx_error(cmd) = DID_RETRY_COMMAND;
+ aic7xxx_status(cmd) = scb->target_status;
+ }
+ }
+ }
+ break;
+
+ case ABORT_TAG:
+ scb_index = inb(SCBPTR + base);
+ scb = &(p->scb_array[scb_index]);
+ if (!(scb->state & SCB_ACTIVE) || (scb->cmd == NULL))
+ {
+ printk("aic7xxx: Referenced SCB not valid during SEQINT(0x%x) "
+ "scb(%d) state(0x%x) cmd(0x%x)\n",
+ intstat, scb_index, scb->state, (unsigned int) scb->cmd);
+ }
+ else
+ {
+ cmd = scb->cmd;
+ /*
+ * We didn't receive a valid tag back from the target
+ * on a reconnect.
+ */
+ printk("aic7xxx: Invalid tag received on target %d, channel %c, "
+ "lun %d - Sending ABORT_TAG.\n",
+ scsi_id, channel, cmd->lun & 0x07);
+
+ cmd->result = (DID_RETRY_COMMAND << 16);
+ aic7xxx_done(p, scb);
+ }
+ break;
+
+ case AWAITING_MSG:
+ scb_index = inb(SCBPTR + base);
+ scb = &(p->scb_array[scb_index]);
+ if (!(scb->state & SCB_ACTIVE) || (scb->cmd == NULL))
+ {
+ printk("aic7xxx: Referenced SCB not valid during SEQINT(0x%x) "
+ "scb(%d) state(0x%x) cmd(0x%x).\n",
+ intstat, scb_index, scb->state, (unsigned int) scb->cmd);
+ }
+ else
+ {
+ /*
+ * This SCB had a zero length command, informing the sequencer
+ * that we wanted to send a special message to this target.
+ * We only do this for BUS_DEVICE_RESET messages currently.
+ */
+ if (scb->state & SCB_DEVICE_RESET)
+ {
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (isr) sending bus device reset to target %d\n",
+ scsi_id);
+#endif
+ outb(MSG_BUS_DEVICE_RESET, MSG0 + base);
+ outb(1, MSG_LEN + base);
+ }
+ else
+ {
+ panic("aic7xxx: AWAITING_SCB for an SCB that does "
+ "not have a waiting message.\n");
+ }
+ }
+ break;
+
+ case IMMEDDONE:
+ scb_index = inb(SCBPTR + base);
+ scb = &(p->scb_array[scb_index]);
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (isr) received IMMEDDONE for target %d, scb %d, state %d\n",
+ scsi_id, scb_index, scb->state);
+#endif
+ if (scb->state & SCB_DEVICE_RESET)
+ {
+ int found;
+
+ /*
+ * Go back to async/narrow transfers and renogiate.
+ */
+ aic7xxx_unbusy_target(scsi_id, channel, base);
+ p->needsdtr |= (p->needsdtr_copy & target_mask);
+ p->needwdtr |= (p->needwdtr_copy & target_mask);
+ p->sdtr_pending &= ~target_mask;
+ p->wdtr_pending &= ~target_mask;
+ scratch = inb(TARG_SCRATCH + base + scratch_offset);
+ scratch &= SXFR;
+ outb(scratch, TARG_SCRATCH + base + scratch_offset);
+ found = aic7xxx_reset_device(p, (int) scsi_id, channel, SCB_LIST_NULL);
+ }
+ else
+ {
+ panic("aic7xxx: Immediate complete for unknown operation.\n");
+ }
+ break;
+
+#if AIC7XXX_NOT_YET
+ /* XXX Fill these in later */
+ case MESG_BUFFER_BUSY:
+ break;
+ case MSGIN_PHASEMIS:
+ break;
+#endif
+
+ case PARITY_ERROR:
+ {
+ scb_index = inb(SCBPTR + base);
+ scb = &(p->scb_array[scb_index]);
+ if (!(scb->state & SCB_ACTIVE) || (scb->cmd == NULL))
+ {
+ printk("aic7xxx: Referenced SCB not valid during SEQINT(0x%x) "
+ "scb(%d) state(0x%x) cmd(0x%x).\n",
+ intstat, scb_index, scb->state, (unsigned int) scb->cmd);
+ }
+ else
+ {
+ char *phase;
+ unsigned char mesg_out = MSG_NOP;
+ unsigned char lastphase = inb(LASTPHASE + base);
+
+ cmd = scb->cmd;
+ switch (lastphase)
+ {
+ case P_DATAOUT:
+ phase = "Data-Out";
+ break;
+ case P_DATAIN:
+ phase = "Data-In";
+ mesg_out = MSG_INITIATOR_DET_ERROR;
+ break;
+ case P_COMMAND:
+ phase = "Command";
+ break;
+ case P_MESGOUT:
+ phase = "Message-Out";
+ break;
+ case P_STATUS:
+ phase = "Status";
+ mesg_out = MSG_INITIATOR_DET_ERROR;
+ break;
+ case P_MESGIN:
+ phase = "Message-In";
+ mesg_out = MSG_MSG_PARITY_ERROR;
+ break;
+ default:
+ phase = "unknown";
+ break;
+ }
+
+ /*
+ * A parity error has occurred during a data
+ * transfer phase. Flag it and continue.
+ */
+ printk("aic7xxx: Parity error during phase %s on target %d, "
+ "channel %d, lun %d.\n", phase,
+ cmd->target, cmd->channel & 0x01, cmd->lun & 0x07);
+
+ /*
+ * We've set the hardware to assert ATN if we get a parity
+ * error on "in" phases, so all we need to do is stuff the
+ * message buffer with the appropriate message. In phases
+ * have set mesg_out to something other than MSG_NOP.
+ */
+ if (mesg_out != MSG_NOP)
+ {
+ outb(mesg_out, MSG0 + base);
+ outb(1, MSG_LEN + base);
+ aic7xxx_error(cmd) = DID_PARITY;
+ }
+ else
+ {
+ /*
+ * Should we allow the target to make this decision for us?
+ */
+ aic7xxx_error(cmd) = DID_RETRY_COMMAND;
+ }
+ }
+ break;
+ }
+ default: /* unknown */
+ debug("aic7xxx: SEQINT, INTSTAT(0x%x) SCSISIGI(0x%x).\n",
+ intstat, inb(SCSISIGI + base));
+ break;
+ }
+
+ outb(CLRSEQINT, CLRINT + base);
+ UNPAUSE_SEQUENCER(p);
+ }
+
+ if (intstat & SCSIINT)
+ {
+ int status = inb(SSTAT1 + base);
+ scsi_id = (inb(SCSIID + base) >> 4) & 0x0F;
+ channel = 'A';
+ if (inb(SBLKCTL + base) & SELBUSB)
+ {
+ channel = 'B';
+ }
+
+ scb_index = inb(SCBPTR + base);
+ scb = &(p->scb_array[scb_index]);
+ if (!(scb->state & SCB_ACTIVE) || (scb->cmd == NULL))
+ {
+ printk("aic7xxx: No command for SCB (SCSIINT).\n");
+ /*
+ * Turn off the interrupt and set status
+ * to zero, so that it falls through the
+ * reset of the SCSIINT code.
+ */
+ outb(status, CLRSINT1 + base);
+ UNPAUSE_SEQUENCER(p);
+ outb(CLRSCSIINT, CLRINT + base);
+ scb = NULL;
+ }
+ else
+ {
+ cmd = scb->cmd;
+
+ /*
+ * Only the SCSI Status 1 register has information
+ * about exceptional conditions that we'd have a
+ * SCSIINT about; anything in SSTAT0 will be handled
+ * by the sequencer. Note that there can be multiple
+ * bits set.
+ */
+ if (status & SELTO)
+ {
+ unsigned char waiting;
+
+ /*
+ * Hardware selection timer has expired. Turn
+ * off SCSI selection sequence.
+ */
+ outb(ENRSELI, SCSISEQ + base);
+ cmd->result = (DID_TIME_OUT << 16);
+ /*
+ * Clear an pending messages for the timed out
+ * target and mark the target as free.
+ */
+ ha_flags = inb(FLAGS + base);
+ outb(0, MSG_LEN + base);
+ aic7xxx_unbusy_target(scsi_id, channel, base);
+
+ outb(0, SCBARRAY + base);
+
+ /*
+ * Shut off the offending interrupt sources, reset
+ * the sequencer address to zero and unpause it,
+ * then call the high-level SCSI completion routine.
+ *
+ * WARNING! This is a magic sequence! After many
+ * hours of guesswork, turning off the SCSI interrupts
+ * in CLRSINT? does NOT clear the SCSIINT bit in
+ * INTSTAT. By writing to the (undocumented, unused
+ * according to the AIC-7770 manual) third bit of
+ * CLRINT, you can clear INTSTAT. But, if you do it
+ * while the sequencer is paused, you get a BRKADRINT
+ * with an Illegal Host Address status, so the
+ * sequencer has to be restarted first.
+ */
+ outb(CLRSELTIMEO, CLRSINT1 + base);
+
+ outb(CLRSCSIINT, CLRINT + base);
+
+ /*
+ * Shift the waiting for selection queue forward
+ */
+ waiting = inb(WAITING_SCBH + base);
+ outb(waiting, SCBPTR + base);
+ waiting = inb(SCB_NEXT_WAITING + base);
+ outb(waiting, WAITING_SCBH + base);
+
+ RESTART_SEQUENCER(p);
+ aic7xxx_done(p, scb);
+#if 0
+ printk("aic7xxx: SELTO SCB(%d) state(0x%x) cmd(0x%x).\n",
+ scb->position, scb->state, (unsigned int) scb->cmd);
+#endif
+ }
+ else
+ {
+ if (!(status & BUSFREE))
+ {
+ /*
+ * We don't know what's going on. Turn off the
+ * interrupt source and try to continue.
+ */
+ printk("aic7xxx: SSTAT1(0x%x).\n", status);
+ outb(status, CLRSINT1 + base);
+ UNPAUSE_SEQUENCER(p);
+ outb(CLRSCSIINT, CLRINT + base);
+ }
+ }
+ } /* else */
+ }
+
+ if (intstat & CMDCMPLT)
+ {
+ int complete;
+
+ /*
+ * The sequencer will continue running when it
+ * issues this interrupt. There may be >1 commands
+ * finished, so loop until we've processed them all.
+ */
+ do {
+ complete = inb(QOUTFIFO + base);
+
+ scb = &(p->scb_array[complete]);
+ if (!(scb->state & SCB_ACTIVE) || (scb->cmd == NULL))
+ {
+ printk("aic7xxx: Warning - No command for SCB %d (CMDCMPLT).\n"
+ " QOUTCNT(%d) SCB state(0x%x) cmd(0x%x) pos(%d).\n",
+ complete, inb(QOUTFIFO + base),
+ scb->state, (unsigned int) scb->cmd, scb->position);
+ outb(CLRCMDINT, CLRINT + base);
+ continue;
+ }
+ cmd = scb->cmd;
+
+ if ((cmd->flags & WAS_SENSE) && !(cmd->flags & ASKED_FOR_SENSE))
+ {
+ /*
+ * Got sense information.
+ */
+ cmd->flags &= ASKED_FOR_SENSE;
+ }
+#if 0
+ printk("aic7xxx: (complete) State(%d) cmd(0x%x) free(0x%x).\n",
+ scb->state, (unsigned int) scb->cmd, (unsigned int) p->free_scb);
+#endif
+
+ /*
+ * Clear interrupt status before checking
+ * the output queue again. This eliminates
+ * a race condition whereby a command could
+ * complete between the queue poll and the
+ * interrupt clearing, so notification of the
+ * command being complete never made it back
+ * up to the kernel.
+ */
+ outb(CLRCMDINT, CLRINT + base);
+ aic7xxx_done(p, scb);
+#if 0
+ if (scb != &p->scb_array[scb->position])
+ {
+ printk("aic7xxx: (complete) Address mismatch, pos(%d).\n", scb->position);
+ }
+ printk("aic7xxx: (complete) State(%d) cmd(0x%x) free(0x%x).\n",
+ scb->state, (unsigned int) scb->cmd, (unsigned int) p->free_scb);
+#endif
+
+#ifdef AIC7XXX_PROC_STATS
+ /*
+ * XXX: we should actually know how much actually transferred
+ * XXX: for each command, but apparently that's too difficult.
+ */
+ actual = aic7xxx_length(cmd, 0);
+ if (!(cmd->flags & WAS_SENSE) && (actual > 0))
+ {
+ struct aic7xxx_xferstats *sp;
+ long *ptr;
+ int x;
+
+ sp = &p->stats[cmd->channel & 0x01][cmd->target & 0x0F][cmd->lun & 0x07];
+ sp->xfers++;
+
+ if (cmd->request.cmd == WRITE)
+ {
+ sp->w_total++;
+ sp->w_total512 += (actual >> 9);
+ ptr = sp->w_bins;
+ }
+ else
+ {
+ sp->r_total++;
+ sp->r_total512 += (actual >> 9);
+ ptr = sp->r_bins;
+ }
+ for (x = 9; x <= 17; x++)
+ {
+ if (actual < (1 << x))
+ {
+ ptr[x - 9]++;
+ break;
+ }
+ }
+ if (x > 17)
+ {
+ ptr[x - 9]++;
+ }
+ }
+#endif /* AIC7XXX_PROC_STATS */
+
+ } while (inb(QOUTCNT + base));
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_probe
+ *
+ * Description:
+ * Probing for EISA boards: it looks like the first two bytes
+ * are a manufacturer code - three characters, five bits each:
+ *
+ * BYTE 0 BYTE 1 BYTE 2 BYTE 3
+ * ?1111122 22233333 PPPPPPPP RRRRRRRR
+ *
+ * The characters are baselined off ASCII '@', so add that value
+ * to each to get the real ASCII code for it. The next two bytes
+ * appear to be a product and revision number, probably vendor-
+ * specific. This is what is being searched for at each port,
+ * and what should probably correspond to the ID= field in the
+ * ECU's .cfg file for the card - if your card is not detected,
+ * make sure your signature is listed in the array.
+ *
+ * The fourth byte's lowest bit seems to be an enabled/disabled
+ * flag (rest of the bits are reserved?).
+ *-F*************************************************************************/
+static aha_type
+aic7xxx_probe(int slot, int base)
+{
+ int i;
+ unsigned char buf[4];
+
+ static struct {
+ int n;
+ unsigned char signature[sizeof(buf)];
+ aha_type type;
+ } AIC7xxx[] = {
+ { 4, { 0x04, 0x90, 0x77, 0x71 }, AIC_7771 }, /* host adapter 274x */
+ { 4, { 0x04, 0x90, 0x77, 0x70 }, AIC_7770 }, /* motherboard 7770 */
+ { 4, { 0x04, 0x90, 0x77, 0x56 }, AIC_284x }, /* 284x, BIOS enabled */
+ { 4, { 0x04, 0x90, 0x77, 0x57 }, AIC_284x } /* 284x, BIOS disabled */
+ };
+
+ /*
+ * The VL-bus cards need to be primed by
+ * writing before a signature check.
+ */
+ for (i = 0; i < sizeof(buf); i++)
+ {
+ outb(0x80 + i, base);
+ buf[i] = inb(base + i);
+ }
+
+ for (i = 0; i < NUMBER(AIC7xxx); i++)
+ {
+ /*
+ * Signature match on enabled card?
+ */
+ if (!memcmp(buf, AIC7xxx[i].signature, AIC7xxx[i].n))
+ {
+ if (inb(base + 4) & 1)
+ {
+ return (AIC7xxx[i].type);
+ }
+
+ printk("aic7xxx: Disabled at slot %d, ignored.\n", slot);
+ }
+ }
+
+ return (AIC_NONE);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * read_2840_seeprom
+ *
+ * Description:
+ * Reads the 2840 serial EEPROM and returns 1 if successful and 0 if
+ * not successful.
+ *
+ * See read_seeprom (for the 2940) for the instruction set of the 93C46
+ * chip.
+ *
+ * The 2840 interface to the 93C46 serial EEPROM is through the
+ * STATUS_2840 and SEECTL_2840 registers. The CS_2840, CK_2840, and
+ * DO_2840 bits of the SEECTL_2840 register are connected to the chip
+ * select, clock, and data out lines respectively of the serial EEPROM.
+ * The DI_2840 bit of the STATUS_2840 is connected to the data in line
+ * of the serial EEPROM. The EEPROM_TF bit of STATUS_2840 register is
+ * useful in that it gives us an 800 nsec timer. After a read from the
+ * SEECTL_2840 register the timing flag is cleard and goes high 800 nsec
+ * later.
+ *
+ *-F*************************************************************************/
+static int
+read_2840_seeprom(int base, struct seeprom_config *sc)
+{
+ int i = 0, k = 0;
+ unsigned char temp;
+ unsigned short checksum = 0;
+ unsigned short *seeprom = (unsigned short *) sc;
+ struct seeprom_cmd {
+ unsigned char len;
+ unsigned char bits[3];
+ };
+ struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
+
+#define CLOCK_PULSE(p) \
+ while ((inb(STATUS_2840 + base) & EEPROM_TF) == 0) \
+ { \
+ ; /* Do nothing */ \
+ } \
+ (void) inb(SEECTL_2840 + base);
+
+ /*
+ * Read the first 32 registers of the seeprom. For the 2840,
+ * the 93C46 SEEPROM is a 1024-bit device with 64 16-bit registers
+ * but only the first 32 are used by Adaptec BIOS. The loop
+ * will range from 0 to 31.
+ */
+ for (k = 0; k < (sizeof(*sc) / 2); k++)
+ {
+ /*
+ * Send chip select for one clock cycle.
+ */
+ outb(CK_2840 | CS_2840, SEECTL_2840 + base);
+ CLOCK_PULSE(base);
+
+ /*
+ * Now we're ready to send the read command followed by the
+ * address of the 16-bit register we want to read.
+ */
+ for (i = 0; i < seeprom_read.len; i++)
+ {
+ temp = CS_2840 | seeprom_read.bits[i];
+ outb(temp, SEECTL_2840 + base);
+ CLOCK_PULSE(base);
+ temp = temp ^ CK_2840;
+ outb(temp, SEECTL_2840 + base);
+ CLOCK_PULSE(base);
+ }
+ /*
+ * Send the 6 bit address (MSB first, LSB last).
+ */
+ for (i = 5; i >= 0; i--)
+ {
+ temp = k;
+ temp = (temp >> i) & 1; /* Mask out all but lower bit. */
+ temp = CS_2840 | temp;
+ outb(temp, SEECTL_2840 + base);
+ CLOCK_PULSE(base);
+ temp = temp ^ CK_2840;
+ outb(temp, SEECTL_2840 + base);
+ CLOCK_PULSE(base);
+ }
+
+ /*
+ * Now read the 16 bit register. An initial 0 precedes the
+ * register contents which begins with bit 15 (MSB) and ends
+ * with bit 0 (LSB). The initial 0 will be shifted off the
+ * top of our word as we let the loop run from 0 to 16.
+ */
+ for (i = 0; i <= 16; i++)
+ {
+ temp = CS_2840;
+ outb(temp, SEECTL_2840 + base);
+ CLOCK_PULSE(base);
+ temp = temp ^ CK_2840;
+ seeprom[k] = (seeprom[k] << 1) | (inb(STATUS_2840 + base) & DI_2840);
+ outb(temp, SEECTL_2840 + base);
+ CLOCK_PULSE(base);
+ }
+ /*
+ * The serial EEPROM has a checksum in the last word. Keep a
+ * running checksum for all words read except for the last
+ * word. We'll verify the checksum after all words have been
+ * read.
+ */
+ if (k < (sizeof(*sc) / 2) - 1)
+ {
+ checksum = checksum + seeprom[k];
+ }
+
+ /*
+ * Reset the chip select for the next command cycle.
+ */
+ outb(0, SEECTL_2840 + base);
+ CLOCK_PULSE(base);
+ outb(CK_2840, SEECTL_2840 + base);
+ CLOCK_PULSE(base);
+ outb(0, SEECTL_2840 + base);
+ CLOCK_PULSE(base);
+ }
+
+#if 0
+ printk("Computed checksum 0x%x, checksum read 0x%x\n", checksum, sc->checksum);
+ printk("Serial EEPROM:");
+ for (k = 0; k < (sizeof(*sc) / 2); k++)
+ {
+ if (((k % 8) == 0) && (k != 0))
+ {
+ printk("\n ");
+ }
+ printk(" 0x%x", seeprom[k]);
+ }
+ printk("\n");
+#endif
+
+ if (checksum != sc->checksum)
+ {
+ printk("aic7xxx: SEEPROM checksum error, ignoring SEEPROM settings.\n");
+ return (0);
+ }
+
+ return (1);
+#undef CLOCK_PULSE
+}
+
+/*+F*************************************************************************
+ * Function:
+ * read_seeprom
+ *
+ * Description:
+ * Reads the serial EEPROM and returns 1 if successful and 0 if
+ * not successful.
+ *
+ * The instruction set of the 93C46 chip is as follows:
+ *
+ * Start OP
+ * Function Bit Code Address Data Description
+ * -------------------------------------------------------------------
+ * READ 1 10 A5 - A0 Reads data stored in memory,
+ * starting at specified address
+ * EWEN 1 00 11XXXX Write enable must preceed
+ * all programming modes
+ * ERASE 1 11 A5 - A0 Erase register A5A4A3A2A1A0
+ * WRITE 1 01 A5 - A0 D15 - D0 Writes register
+ * ERAL 1 00 10XXXX Erase all registers
+ * WRAL 1 00 01XXXX D15 - D0 Writes to all registers
+ * EWDS 1 00 00XXXX Disables all programming
+ * instructions
+ * *Note: A value of X for address is a don't care condition.
+ *
+ * The 93C46 has a four wire interface: clock, chip select, data in, and
+ * data out. In order to perform one of the above functions, you need
+ * to enable the chip select for a clock period (typically a minimum of
+ * 1 usec, with the clock high and low a minimum of 750 and 250 nsec
+ * respectively. While the chip select remains high, you can clock in
+ * the instructions (above) starting with the start bit, followed by the
+ * OP code, Address, and Data (if needed). For the READ instruction, the
+ * requested 16-bit register contents is read from the data out line but
+ * is preceded by an initial zero (leading 0, followed by 16-bits, MSB
+ * first). The clock cycling from low to high initiates the next data
+ * bit to be sent from the chip.
+ *
+ * The 7870 interface to the 93C46 serial EEPROM is through the SEECTL
+ * register. After successful arbitration for the memory port, the
+ * SEECS bit of the SEECTL register is connected to the chip select.
+ * The SEECK, SEEDO, and SEEDI are connected to the clock, data out,
+ * and data in lines respectively. The SEERDY bit of SEECTL is useful
+ * in that it gives us an 800 nsec timer. After a write to the SEECTL
+ * register, the SEERDY goes high 800 nsec later. The one exception
+ * to this is when we first request access to the memory port. The
+ * SEERDY goes high to signify that access has been granted and, for
+ * this case, has no implied timing.
+ *
+ *-F*************************************************************************/
+static int
+read_seeprom(int base, int offset, struct seeprom_config *sc)
+{
+ int i = 0, k;
+ unsigned long timeout;
+ unsigned char temp;
+ unsigned short checksum = 0;
+ unsigned short *seeprom = (unsigned short *) sc;
+ struct seeprom_cmd {
+ unsigned char len;
+ unsigned char bits[3];
+ };
+ struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
+
+#define CLOCK_PULSE(p) \
+ while ((inb(SEECTL + base) & SEERDY) == 0) \
+ { \
+ ; /* Do nothing */ \
+ }
+
+ /*
+ * Request access of the memory port. When access is
+ * granted, SEERDY will go high. We use a 1 second
+ * timeout which should be near 1 second more than
+ * is needed. Reason: after the 7870 chip reset, there
+ * should be no contention.
+ */
+ outb(SEEMS, SEECTL + base);
+ timeout = jiffies + 100; /* 1 second timeout */
+ while ((jiffies < timeout) && ((inb(SEECTL + base) & SEERDY) == 0))
+ {
+ ; /* Do nothing! Wait for access to be granted. */
+ }
+ if ((inb(SEECTL + base) & SEERDY) == 0)
+ {
+ outb(0, SEECTL + base);
+ return (0);
+ }
+
+ /*
+ * Read the first 32 registers of the seeprom. For the 7870,
+ * the 93C46 SEEPROM is a 1024-bit device with 64 16-bit registers
+ * but only the first 32 are used by Adaptec BIOS. The loop
+ * will range from 0 to 31.
+ */
+ for (k = 0; k < (sizeof(*sc) / 2); k++)
+ {
+ /*
+ * Send chip select for one clock cycle.
+ */
+ outb(SEEMS | SEECK | SEECS, SEECTL + base);
+ CLOCK_PULSE(base);
+
+ /*
+ * Now we're ready to send the read command followed by the
+ * address of the 16-bit register we want to read.
+ */
+ for (i = 0; i < seeprom_read.len; i++)
+ {
+ temp = SEEMS | SEECS | (seeprom_read.bits[i] << 1);
+ outb(temp, SEECTL + base);
+ CLOCK_PULSE(base);
+ temp = temp ^ SEECK;
+ outb(temp, SEECTL + base);
+ CLOCK_PULSE(base);
+ }
+ /*
+ * Send the 6 bit address (MSB first, LSB last).
+ */
+ for (i = 5; i >= 0; i--)
+ {
+ temp = k + offset;
+ temp = (temp >> i) & 1; /* Mask out all but lower bit. */
+ temp = SEEMS | SEECS | (temp << 1);
+ outb(temp, SEECTL + base);
+ CLOCK_PULSE(base);
+ temp = temp ^ SEECK;
+ outb(temp, SEECTL + base);
+ CLOCK_PULSE(base);
+ }
+
+ /*
+ * Now read the 16 bit register. An initial 0 precedes the
+ * register contents which begins with bit 15 (MSB) and ends
+ * with bit 0 (LSB). The initial 0 will be shifted off the
+ * top of our word as we let the loop run from 0 to 16.
+ */
+ for (i = 0; i <= 16; i++)
+ {
+ temp = SEEMS | SEECS;
+ outb(temp, SEECTL + base);
+ CLOCK_PULSE(base);
+ temp = temp ^ SEECK;
+ seeprom[k] = (seeprom[k] << 1) | (inb(SEECTL + base) & SEEDI);
+ outb(temp, SEECTL + base);
+ CLOCK_PULSE(base);
+ }
+
+ /*
+ * The serial EEPROM has a checksum in the last word. Keep a
+ * running checksum for all words read except for the last
+ * word. We'll verify the checksum after all words have been
+ * read.
+ */
+ if (k < (sizeof(*sc) / 2) - 1)
+ {
+ checksum = checksum + seeprom[k];
+ }
+
+ /*
+ * Reset the chip select for the next command cycle.
+ */
+ outb(SEEMS, SEECTL + base);
+ CLOCK_PULSE(base);
+ outb(SEEMS | SEECK, SEECTL + base);
+ CLOCK_PULSE(base);
+ outb(SEEMS, SEECTL + base);
+ CLOCK_PULSE(base);
+ }
+
+ /*
+ * Release access to the memory port and the serial EEPROM.
+ */
+ outb(0, SEECTL + base);
+
+#if 0
+ printk("Computed checksum 0x%x, checksum read 0x%x\n", checksum, sc->checksum);
+ printk("Serial EEPROM:");
+ for (k = 0; k < (sizeof(*sc) / 2); k++)
+ {
+ if (((k % 8) == 0) && (k != 0))
+ {
+ printk("\n ");
+ }
+ printk(" 0x%x", seeprom[k]);
+ }
+ printk("\n");
+#endif
+
+ if (checksum != sc->checksum)
+ {
+ printk("aic7xxx: SEEPROM checksum error, ignoring SEEPROM settings.\n");
+ return (0);
+ }
+
+ return (1);
+#undef CLOCK_PULSE
+}
+
+/*+F*************************************************************************
+ * Function:
+ * detect_maxscb
+ *
+ * Description:
+ * Return the maximum number of SCB's allowed for a given controller.
+ *-F*************************************************************************/
+static int
+detect_maxscb(aha_type type, int base, int walk_scbs)
+{
+ unsigned char sblkctl_reg, scb_byte;
+ int maxscb = 0, i;
+
+ switch (type)
+ {
+ case AIC_7770:
+ case AIC_7771:
+ case AIC_284x:
+ /*
+ * Check for Rev C or E boards. Rev E boards can supposedly have
+ * more than 4 SCBs, while the Rev C boards are limited to 4 SCBs.
+ * Until we know how to access more than 4 SCBs for the Rev E chips,
+ * we limit them, along with the Rev C chips, to 4 SCBs.
+ *
+ * The Rev E boards have a read/write autoflush bit in the
+ * SBLKCTL register, while in the Rev C boards it is read only.
+ */
+ sblkctl_reg = inb(SBLKCTL + base) ^ AUTOFLUSHDIS;
+ outb(sblkctl_reg, SBLKCTL + base);
+ if (inb(SBLKCTL + base) == sblkctl_reg)
+ {
+ /*
+ * We detected a Rev E board.
+ */
+ printk("aic7xxx: %s Rev E and subsequent.\n", board_names[type]);
+ outb(sblkctl_reg ^ AUTOFLUSHDIS, SBLKCTL + base);
+ maxscb = 4;
+ }
+ else
+ {
+ printk("aic7xxx: %s Rev C and previous.\n", board_names[type]);
+ maxscb = 4;
+ }
+ break;
+
+ case AIC_7850:
+ maxscb = 3;
+ break;
+
+ case AIC_7870:
+ case AIC_7871:
+ case AIC_7874:
+ case AIC_7880:
+ case AIC_7881:
+ case AIC_7884:
+ maxscb = 16;
+ break;
+
+ case AIC_7872:
+ case AIC_7873:
+ case AIC_7882:
+ case AIC_7883:
+ /*
+ * Is suppose to have 255 SCBs, but we'll walk the SCBs
+ * looking for more if external RAM is detected.
+ */
+ maxscb = 16;
+ break;
+
+ case AIC_NONE:
+ /*
+ * This should never happen... But just in case.
+ */
+ break;
+ }
+
+ if (walk_scbs)
+ {
+ /*
+ * This adapter has external SCB memory.
+ * Walk the SCBs to determine how many there are.
+ */
+ i = 0;
+ while (i < AIC7XXX_MAXSCB)
+ {
+ outb(i, SCBPTR + base);
+ scb_byte = ~(inb(SCBARRAY + base)); /* complement the byte */
+ outb(scb_byte, SCBARRAY + base); /* write it back out */
+ if (inb(SCBARRAY + base) != scb_byte)
+ {
+ break;
+ }
+ i++;
+ }
+ maxscb = i;
+
+ printk("aic7xxx: Using %d SCB's after checking for SCB memory.\n", maxscb);
+ }
+ else
+ {
+ printk("aic7xxx: Using %d SCB's; No SCB memory check.\n", maxscb);
+ }
+
+ return (maxscb);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_register
+ *
+ * Description:
+ * Register a Adaptec aic7xxx chip SCSI controller with the kernel.
+ *-F*************************************************************************/
+static int
+aic7xxx_register(Scsi_Host_Template *template,
+ struct aic7xxx_host_config *config)
+{
+ int i;
+ unsigned char sblkctl;
+ int max_targets;
+ int found = 1, base;
+ int bios_disabled = FALSE;
+ unsigned char target_settings;
+ unsigned char scsi_conf, host_conf;
+ int have_seeprom = FALSE;
+ struct Scsi_Host *host;
+ struct aic7xxx_host *p;
+ struct seeprom_config sc;
+
+ base = config->base;
+
+ /*
+ * Lock out other contenders for our i/o space.
+ */
+ request_region(MINREG + base, MAXREG - MINREG, "aic7xxx");
+
+ switch (config->type)
+ {
+ case AIC_7770:
+ case AIC_7771:
+ /*
+ * For some 274x boards, we must clear the CHIPRST bit
+ * and pause the sequencer. For some reason, this makes
+ * the driver work. For 284x boards, we give it a
+ * CHIPRST just like the 294x boards.
+ *
+ * Use the BIOS settings to determine the interrupt
+ * trigger type (level or edge) and use this value
+ * for pausing and unpausing the sequencer.
+ */
+ config->unpause = (inb(HCNTRL + base) & IRQMS) | INTEN;
+ config->pause = config->unpause | PAUSE;
+ config->extended = aic7xxx_extended;
+
+ outb(config->pause | CHIPRST, HCNTRL + base);
+ aic7xxx_delay(1);
+ if (inb(HCNTRL + base) & CHIPRST)
+ {
+ printk("aic7xxx: Chip reset not cleared; clearing manually.\n");
+ }
+ outb(config->pause, HCNTRL + base);
+
+ /*
+ * Just to be on the safe side with the 274x, we will re-read the irq
+ * since there was some issue about resetting the board.
+ */
+ config->irq = inb(INTDEF + base) & 0x0F;
+ if ((inb(HA_274_BIOSCTRL + base) & BIOSMODE) == BIOSDISABLED)
+ {
+ bios_disabled = TRUE;
+ }
+ host_conf = inb(HOSTCONF + base);
+ config->busrtime = host_conf & 0x3C;
+ /* XXX Is this valid for motherboard based controllers? */
+ /* Setup the FIFO threshold and the bus off time */
+ outb(host_conf & DFTHRSH, BUSSPD + base);
+ outb((host_conf << 2) & BOFF, BUSTIME + base);
+
+ /*
+ * A reminder until this can be detected automatically.
+ */
+ printk("aic7xxx: Extended translation %sabled.\n",
+ config->extended ? "en" : "dis");
+ break;
+
+ case AIC_284x:
+ outb(CHIPRST, HCNTRL + base);
+ config->unpause = UNPAUSE_284X;
+ config->pause = REQ_PAUSE; /* DWG would like to be like the rest */
+ aic7xxx_delay(1);
+ outb(config->pause, HCNTRL + base);
+
+ config->extended = aic7xxx_extended;
+ config->irq = inb(INTDEF + base) & 0x0F;
+ if ((inb(HA_274_BIOSCTRL + base) & BIOSMODE) == BIOSDISABLED)
+ {
+ bios_disabled = TRUE;
+ }
+ host_conf = inb(HOSTCONF + base);
+
+ printk("aic7xxx: Reading SEEPROM...");
+ have_seeprom = read_2840_seeprom(base, &sc);
+ if (!have_seeprom)
+ {
+ printk("aic7xxx: Unable to read SEEPROM.\n");
+ config->busrtime = host_conf & 0x3C;
+ }
+ else
+ {
+ printk("done.\n");
+ config->extended = ((sc.bios_control & CF284XEXTEND) >> 5);
+ config->scsi_id = (sc.brtime_id & CFSCSIID);
+ config->parity = (sc.adapter_control & CFSPARITY) ?
+ AIC_ENABLED : AIC_DISABLED;
+ config->low_term = (sc.adapter_control & CF284XSTERM) ?
+ AIC_ENABLED : AIC_DISABLED;
+ /*
+ * XXX - Adaptec *does* make 284x wide controllers, but the
+ * documents do not say where the high byte termination
+ * enable bit is located. For now, we'll just assume
+ * that it's in the same place as for the 2940 card.
+ */
+ config->high_term = (sc.adapter_control & CFWSTERM) ?
+ AIC_ENABLED : AIC_DISABLED;
+ config->busrtime = ((sc.brtime_id & CFBRTIME) >> 8);
+ }
+ /* XXX Is this valid for motherboard based controllers? */
+ /* Setup the FIFO threshold and the bus off time */
+ outb(host_conf & DFTHRSH, BUSSPD + base);
+ outb((host_conf << 2) & BOFF, BUSTIME + base);
+
+ printk("aic7xxx: Extended translation %sabled.\n",
+ config->extended ? "en" : "dis");
+ break;
+
+ case AIC_7850:
+ case AIC_7870:
+ case AIC_7871:
+ case AIC_7872:
+ case AIC_7873:
+ case AIC_7874:
+ case AIC_7880:
+ case AIC_7881:
+ case AIC_7882:
+ case AIC_7883:
+ case AIC_7884:
+ outb(CHIPRST, HCNTRL + base);
+ config->unpause = UNPAUSE_294X;
+ config->pause = config->unpause | PAUSE;
+ aic7xxx_delay(1);
+ outb(config->pause, HCNTRL + base);
+
+ config->extended = aic7xxx_extended;
+ config->scsi_id = 7;
+
+ printk("aic7xxx: Reading SEEPROM...");
+ have_seeprom = read_seeprom(base, config->chan_num * (sizeof(sc) / 2), &sc);
+ if (!have_seeprom)
+ {
+ printk("aic7xxx: Unable to read SEEPROM.\n");
+ }
+ else
+ {
+ printk("done.\n");
+ config->extended = ((sc.bios_control & CFEXTEND) >> 7);
+ config->scsi_id = (sc.brtime_id & CFSCSIID);
+ config->parity = (sc.adapter_control & CFSPARITY) ?
+ AIC_ENABLED : AIC_DISABLED;
+ config->low_term = (sc.adapter_control & CFSTERM) ?
+ AIC_ENABLED : AIC_DISABLED;
+ config->high_term = (sc.adapter_control & CFWSTERM) ?
+ AIC_ENABLED : AIC_DISABLED;
+ config->busrtime = ((sc.brtime_id & CFBRTIME) >> 8);
+ if (((config->type == AIC_7880) || (config->type == AIC_7882) ||
+ (config->type == AIC_7883) || (config->type == AIC_7884)) &&
+ (sc.adapter_control & CFULTRAEN))
+ {
+ printk ("aic7xxx: Enabling support for Ultra SCSI speed.\n");
+ config->ultra_enabled = TRUE;
+ }
+ }
+
+ /*
+ * XXX - force data fifo threshold to 100%. Why does this
+ * need to be done?
+ *
+ * We don't know where this is set in the SEEPROM or by the BIOS,
+ * so we default it to 100%.
+ */
+ outb(config->scsi_id | DFTHRSH_100, SCSICONF + base);
+ outb(DFTHRSH_100, DSPCISTATUS + base);
+
+ /*
+ * In case we are a wide card, place scsi ID in second conf byte.
+ */
+ outb(config->scsi_id, (SCSICONF + base + 1));
+
+ printk("aic7xxx: Extended translation %sabled.\n",
+ config->extended ? "en" : "dis");
+ break;
+
+ default:
+ panic("aic7xxx: (aic7xxx_register) Internal error.\n");
+ }
+
+ config->maxscb = detect_maxscb(config->type, base, config->walk_scbs);
+
+ if (config->chip_type == AIC_777x)
+ {
+ if (config->pause & IRQMS)
+ {
+ printk("aic7xxx: Using level sensitive interrupts.\n");
+ }
+ else
+ {
+ printk("aic7xxx: Using edge triggered interrupts.\n");
+ }
+ }
+
+ /*
+ * Read the bus type from the SBLKCTL register. Set the FLAGS
+ * register in the sequencer for twin and wide bus cards.
+ */
+ sblkctl = inb(SBLKCTL + base);
+ switch (sblkctl & SELBUS_MASK)
+ {
+ case SELNARROW: /* narrow/normal bus */
+ config->scsi_id = inb(SCSICONF + base) & 0x07;
+ config->bus_type = AIC_SINGLE;
+ outb(SINGLE_BUS, FLAGS + base);
+ break;
+
+ case SELWIDE: /* Wide bus */
+ config->scsi_id = inb(SCSICONF + base + 1) & 0x0F;
+ config->bus_type = AIC_WIDE;
+ printk("aic7xxx: Enabling wide channel of %s-Wide.\n",
+ board_names[config->type]);
+ outb(WIDE_BUS, FLAGS + base);
+ break;
+
+ case SELBUSB: /* Twin bus */
+ config->scsi_id = inb(SCSICONF + base) & 0x07;
+#ifdef AIC7XXX_TWIN_SUPPORT
+ config->scsi_id_b = inb(SCSICONF + base + 1) & 0x07;
+ config->bus_type = AIC_TWIN;
+ printk("aic7xxx: Enabled channel B of %s-Twin.\n",
+ board_names[config->type]);
+ outb(TWIN_BUS, FLAGS + base);
+#else
+ config->bus_type = AIC_SINGLE;
+ printk("aic7xxx: Channel B of %s-Twin will be ignored.\n",
+ board_names[config->type]);
+ outb(0, FLAGS + base);
+#endif
+ break;
+
+ default:
+ printk("aic7xxx: Unsupported type 0x%x, please "
+ "mail deang@ims.com\n", inb(SBLKCTL + base));
+ outb(0, FLAGS + base);
+ return (0);
+ }
+
+ /*
+ * For the 294x cards, clearing DIAGLEDEN and DIAGLEDON, will
+ * take the card out of diagnostic mode and make the host adatper
+ * LED follow bus activity (will not always be on).
+ */
+ outb(sblkctl & ~(DIAGLEDEN | DIAGLEDON), SBLKCTL + base);
+
+ /*
+ * The IRQ level in i/o port 4 maps directly onto the real
+ * IRQ number. If it's ok, register it with the kernel.
+ *
+ * NB. the Adaptec documentation says the IRQ number is only
+ * in the lower four bits; the ECU information shows the
+ * high bit being used as well. Which is correct?
+ *
+ * The PCI cards get their interrupt from PCI BIOS.
+ */
+ if ((config->chip_type == AIC_777x) && ((config->irq < 9) || (config->irq > 15)))
+ {
+ printk("aic7xxx: Host adapter uses unsupported IRQ level, ignoring.\n");
+ return (0);
+ }
+
+ /*
+ * Check the IRQ to see if it is shared by another aic7xxx
+ * controller. If it is and sharing of IRQs is not defined,
+ * then return 0 hosts found. If sharing of IRQs is allowed
+ * or the IRQ is not shared by another host adapter, then
+ * proceed.
+ */
+#ifndef AIC7XXX_SHARE_IRQS
+ if (aic7xxx_boards[config->irq] != NULL)
+ {
+ printk("aic7xxx: Sharing of IRQ's is not configured.\n");
+ return (0);
+ }
+#endif
+
+ /*
+ * Print out debugging information before re-enabling
+ * the card - a lot of registers on it can't be read
+ * when the sequencer is active.
+ */
+ debug_config(config);
+
+ /*
+ * Before registry, make sure that the offsets of the
+ * struct scatterlist are what the sequencer will expect,
+ * otherwise disable scatter-gather altogether until someone
+ * can fix it. This is important since the sequencer will
+ * DMA elements of the SG array in while executing commands.
+ */
+ if (template->sg_tablesize != SG_NONE)
+ {
+ struct scatterlist sg;
+
+ if (SG_STRUCT_CHECK(sg))
+ {
+ printk("aic7xxx: Warning - Kernel scatter-gather structures changed, "
+ "disabling it.\n");
+ template->sg_tablesize = SG_NONE;
+ }
+ }
+
+ /*
+ * Register each "host" and fill in the returned Scsi_Host
+ * structure as best we can. Some of the parameters aren't
+ * really relevant for bus types beyond ISA, and none of the
+ * high-level SCSI code looks at it anyway. Why are the fields
+ * there? Also save the pointer so that we can find the
+ * information when an IRQ is triggered.
+ */
+ host = scsi_register(template, sizeof(struct aic7xxx_host));
+ host->can_queue = config->maxscb;
+ host->cmd_per_lun = AIC7XXX_CMDS_PER_LUN;
+ host->this_id = config->scsi_id;
+ host->irq = config->irq;
+ if (config->bus_type == AIC_WIDE)
+ {
+ host->max_id = 16;
+ }
+ if (config->bus_type == AIC_TWIN)
+ {
+ host->max_channel = 1;
+ }
+
+ p = (struct aic7xxx_host *) host->hostdata;
+
+ /*
+ * Initialize the scb array by setting the state to free.
+ */
+ for (i = 0; i < AIC7XXX_MAXSCB; i++)
+ {
+ p->scb_array[i].state = SCB_FREE;
+ p->scb_array[i].next = NULL;
+ p->scb_array[i].cmd = NULL;
+ }
+
+ p->isr_count = 0;
+ p->a_scanned = FALSE;
+ p->b_scanned = FALSE;
+ p->base = base;
+ p->maxscb = config->maxscb;
+ p->numscb = 0;
+ p->extended = config->extended;
+ p->type = config->type;
+ p->chip_type = config->chip_type;
+ p->ultra_enabled = config->ultra_enabled;
+ p->chan_num = config->chan_num;
+ p->bus_type = config->bus_type;
+ p->have_seeprom = have_seeprom;
+ p->seeprom = sc;
+ p->free_scb = NULL;
+ p->next = NULL;
+
+ p->unpause = config->unpause;
+ p->pause = config->pause;
+
+ if (aic7xxx_boards[config->irq] == NULL)
+ {
+ /*
+ * Warning! This must be done before requesting the irq. It is
+ * possible for some boards to raise an interrupt as soon as
+ * they are enabled. So when we request the irq from the Linux
+ * kernel, an interrupt is triggered immediately. Therefore, we
+ * must ensure the board data is correctly set before the request.
+ */
+ aic7xxx_boards[config->irq] = host;
+
+ /*
+ * Register IRQ with the kernel.
+ */
+ if (request_irq(config->irq, aic7xxx_isr, SA_INTERRUPT, "aic7xxx"))
+ {
+ printk("aic7xxx: Couldn't register IRQ %d, ignoring.\n", config->irq);
+ aic7xxx_boards[config->irq] = NULL;
+ return (0);
+ }
+ }
+ else
+ {
+ /*
+ * We have found a host adapter sharing an IRQ of a previously
+ * registered host adapter. Add this host adapter's Scsi_Host
+ * to the beginning of the linked list of hosts at the same IRQ.
+ */
+ p->next = aic7xxx_boards[config->irq];
+ aic7xxx_boards[config->irq] = host;
+ }
+
+ /*
+ * Load the sequencer program, then re-enable the board -
+ * resetting the AIC-7770 disables it, leaving the lights
+ * on with nobody home. On the PCI bus you *may* be home,
+ * but then your mailing address is dynamically assigned
+ * so no one can find you anyway :-)
+ */
+ printk("aic7xxx: Downloading sequencer code...");
+ aic7xxx_loadseq(base);
+
+ /*
+ * Set Fast Mode and Enable the board
+ */
+ outb(FASTMODE, SEQCTL + base);
+
+ if (p->chip_type == AIC_777x)
+ {
+ outb(ENABLE, BCTL + base);
+ }
+
+ printk("done.\n");
+
+ /*
+ * Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels
+ */
+ if (p->bus_type == AIC_TWIN)
+ {
+ /*
+ * Select Channel B.
+ */
+ outb((sblkctl & ~SELBUS_MASK) | SELBUSB, SBLKCTL + base);
+
+ outb(config->scsi_id_b, SCSIID + base);
+ scsi_conf = inb(SCSICONF + base + 1) & (ENSPCHK | STIMESEL);
+ outb(scsi_conf | ENSTIMER | ACTNEGEN | STPWEN, SXFRCTL1 + base);
+ outb(ENSELTIMO , SIMODE1 + base);
+ if (p->ultra_enabled)
+ {
+ outb(DFON | SPIOEN | ULTRAEN, SXFRCTL0 + base);
+ }
+ else
+ {
+ outb(DFON | SPIOEN, SXFRCTL0 + base);
+ }
+
+ /*
+ * Select Channel A
+ */
+ outb((sblkctl & ~SELBUS_MASK) | SELNARROW, SBLKCTL + base);
+ }
+ outb(config->scsi_id, SCSIID + base);
+ scsi_conf = inb(SCSICONF + base) & (ENSPCHK | STIMESEL);
+ outb(scsi_conf | ENSTIMER | ACTNEGEN | STPWEN, SXFRCTL1 + base);
+ outb(ENSELTIMO , SIMODE1 + base);
+ if (p->ultra_enabled)
+ {
+ outb(DFON | SPIOEN | ULTRAEN, SXFRCTL0 + base);
+ }
+ else
+ {
+ outb(DFON | SPIOEN, SXFRCTL0 + base);
+ }
+
+ /*
+ * Look at the information that board initialization or the board
+ * BIOS has left us. In the lower four bits of each target's
+ * scratch space any value other than 0 indicates that we should
+ * initiate synchronous transfers. If it's zero, the user or the
+ * BIOS has decided to disable synchronous negotiation to that
+ * target so we don't activate the needsdtr flag.
+ */
+ p->needsdtr_copy = 0x0;
+ p->sdtr_pending = 0x0;
+ p->needwdtr_copy = 0x0;
+ p->wdtr_pending = 0x0;
+ if (p->bus_type == AIC_SINGLE)
+ {
+ max_targets = 8;
+ }
+ else
+ {
+ max_targets = 16;
+ }
+
+ /*
+ * Grab the disconnection disable table and invert it for our needs
+ */
+ if (have_seeprom)
+ {
+ p->discenable = 0x0;
+ }
+ else
+ {
+ if (bios_disabled)
+ {
+ printk("aic7xxx : Host adapter BIOS disabled. Using default SCSI "
+ "device parameters.\n");
+ p->discenable = 0xFFFF;
+ }
+ else
+ {
+ p->discenable = ~((inb(DISC_DSB + base + 1) << 8) |
+ inb(DISC_DSB + base));
+ }
+ }
+
+ for (i = 0; i < max_targets; i++)
+ {
+ if (have_seeprom)
+ {
+ target_settings = ((sc.device_flags[i] & CFXFER) << 4);
+ if (sc.device_flags[i] & CFSYNCH)
+ {
+ p->needsdtr_copy |= (0x01 << i);
+ }
+ if (sc.device_flags[i] & CFWIDEB)
+ {
+ p->needwdtr_copy |= (0x01 << i);
+ }
+ if (sc.device_flags[i] & CFDISC)
+ {
+ p->discenable |= (0x01 << i);
+ }
+ }
+ else
+ {
+ if (bios_disabled)
+ {
+ target_settings = 0; /* 10 MHz */
+ p->needsdtr_copy |= (0x01 << i);
+ p->needwdtr_copy |= (0x01 << i);
+ }
+ else
+ {
+ target_settings = inb(TARG_SCRATCH + base + i);
+ if (target_settings & 0x0F)
+ {
+ p->needsdtr_copy |= (0x01 << i);
+ /*
+ * Default to asynchronous transfers (0 offset)
+ */
+ target_settings &= 0xF0;
+ }
+ if (target_settings & 0x80)
+ {
+ p->needwdtr_copy |= (0x01 << i);
+ target_settings &= 0x7F;
+ }
+ }
+ }
+ outb(target_settings, (TARG_SCRATCH + base + i));
+ }
+
+ /*
+ * If we are not wide, forget WDTR. This makes the driver
+ * work on some cards that don't leave these fields cleared
+ * when BIOS is not installed.
+ */
+ if (p->bus_type != AIC_WIDE)
+ {
+ p->needwdtr = 0;
+ }
+ p->needsdtr = p->needsdtr_copy;
+ p->needwdtr = p->needwdtr_copy;
+#if 0
+ printk("NeedSdtr = 0x%x, 0x%x\n", p->needsdtr_copy, p->needsdtr);
+ printk("NeedWdtr = 0x%x, 0x%x\n", p->needwdtr_copy, p->needwdtr);
+#endif
+
+ /*
+ * Clear the control byte for every SCB so that the sequencer
+ * doesn't get confused and think that one of them is valid
+ */
+ for (i = 0; i < config->maxscb; i++)
+ {
+ outb(i, SCBPTR + base);
+ outb(0, SCBARRAY + base);
+ }
+
+ /*
+ * For reconnecting targets, the sequencer code needs to
+ * know how many SCBs it has to search through.
+ */
+ outb(config->maxscb, SCBCOUNT + base);
+
+ /*
+ * 2s compliment of SCBCOUNT
+ */
+ i = p->maxscb;
+ outb(-i & 0xff, COMP_SCBCOUNT + base);
+
+ /*
+ * Clear the active flags - no targets are busy.
+ */
+ outb(0, ACTIVE_A + base);
+ outb(0, ACTIVE_B + base);
+
+ /*
+ * We don't have any waiting selections
+ */
+ outb(SCB_LIST_NULL, WAITING_SCBH + base);
+ outb(SCB_LIST_NULL, WAITING_SCBT + base);
+
+ /*
+ * Reset the SCSI bus. Is this necessary?
+ * There may be problems for a warm boot without resetting
+ * the SCSI bus. Either BIOS settings in scratch RAM
+ * will not get reinitialized, or devices may stay at
+ * previous negotiated settings (SDTR and WDTR) while
+ * the driver will think that no negotiations have been
+ * performed.
+ *
+ * Some devices need a long time to "settle" after a SCSI
+ * bus reset.
+ */
+
+ if (!aic7xxx_no_reset)
+ {
+ printk("aic7xxx: Resetting the SCSI bus...");
+ if (p->bus_type == AIC_TWIN)
+ {
+ /*
+ * Select Channel B.
+ */
+ outb((sblkctl & ~SELBUS_MASK) | SELBUSB, SBLKCTL + base);
+
+ outb(SCSIRSTO, SCSISEQ + base);
+ udelay(1000);
+ outb(0, SCSISEQ + base);
+
+ /*
+ * Select Channel A.
+ */
+ outb((sblkctl & ~SELBUS_MASK) | SELNARROW, SBLKCTL + base);
+ }
+
+ outb(SCSIRSTO, SCSISEQ + base);
+ udelay(1000);
+ outb(0, SCSISEQ + base);
+
+ aic7xxx_delay(AIC7XXX_RESET_DELAY);
+
+ printk("done.\n");
+ }
+
+ /*
+ * Unpause the sequencer before returning and enable
+ * interrupts - we shouldn't get any until the first
+ * command is sent to us by the high-level SCSI code.
+ */
+ UNPAUSE_SEQUENCER(p);
+ return (found);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_detect
+ *
+ * Description:
+ * Try to detect and register an Adaptec 7770 or 7870 SCSI controller.
+ *-F*************************************************************************/
+int
+aic7xxx_detect(Scsi_Host_Template *template)
+{
+ int found = 0, slot, base;
+ unsigned char irq = 0;
+ int i;
+ struct aic7xxx_host_config config;
+
+ template->proc_dir = &proc_scsi_aic7xxx;
+ config.chan_num = 0;
+
+ /*
+ * Since we may allow sharing of IRQs, it is imperative
+ * that we "null-out" the aic7xxx_boards array. It is
+ * not guaranteed to be initialized to 0 (NULL). We use
+ * a NULL entry to indicate that no prior hosts have
+ * been found/registered for that IRQ.
+ */
+ for (i = 0; i <= MAXIRQ; i++)
+ {
+ aic7xxx_boards[i] = NULL;
+ }
+
+ /*
+ * Initialize the spurious count to 0.
+ */
+ aic7xxx_spurious_count = 0;
+
+ /*
+ * EISA/VL-bus card signature probe.
+ */
+ for (slot = MINSLOT; slot <= MAXSLOT; slot++)
+ {
+ base = SLOTBASE(slot) + MINREG;
+
+ if (check_region(MINREG + base, MAXREG - MINREG))
+ {
+ /*
+ * Some other driver has staked a
+ * claim to this i/o region already.
+ */
+ continue;
+ }
+
+ config.type = aic7xxx_probe(slot, HID0 + base);
+ if (config.type != AIC_NONE)
+ {
+ /*
+ * We found a card, allow 1 spurious interrupt.
+ */
+ aic7xxx_spurious_count = 1;
+
+ /*
+ * We "find" a AIC-7770 if we locate the card
+ * signature and we can set it up and register
+ * it with the kernel without incident.
+ */
+ config.chip_type = AIC_777x;
+ config.base = base;
+ config.irq = irq;
+ config.parity = AIC_UNKNOWN;
+ config.low_term = AIC_UNKNOWN;
+ config.high_term = AIC_UNKNOWN;
+ config.busrtime = 0;
+ config.walk_scbs = FALSE;
+ config.ultra_enabled = FALSE;
+ found += aic7xxx_register(template, &config);
+
+ /*
+ * Disallow spurious interrupts.
+ */
+ aic7xxx_spurious_count = 0;
+ }
+ }
+
+#ifdef CONFIG_PCI
+ /*
+ * PCI-bus probe.
+ */
+ if (pcibios_present())
+ {
+ struct
+ {
+ unsigned short vendor_id;
+ unsigned short device_id;
+ aha_type card_type;
+ aha_chip_type chip_type;
+ } const aic7xxx_pci_devices[] = {
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7850, AIC_7850, AIC_785x},
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7870, AIC_7870, AIC_787x},
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7871, AIC_7871, AIC_787x},
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7872, AIC_7872, AIC_787x},
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7873, AIC_7873, AIC_787x},
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7874, AIC_7874, AIC_787x},
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7880, AIC_7880, AIC_788x},
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7881, AIC_7881, AIC_788x},
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7882, AIC_7882, AIC_788x},
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7883, AIC_7883, AIC_788x},
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7884, AIC_7884, AIC_788x}
+ };
+
+ int error;
+ int done = 0;
+ unsigned int io_port;
+ unsigned short index = 0;
+ unsigned char pci_bus, pci_device_fn;
+ unsigned int csize_lattime;
+ unsigned int class_revid;
+ unsigned int devconfig;
+ char rev_id[] = {'B', 'C', 'D'};
+
+ for (i = 0; i < NUMBER(aic7xxx_pci_devices); i++)
+ {
+ done = FALSE;
+ while (!done)
+ {
+ if (pcibios_find_device(aic7xxx_pci_devices[i].vendor_id,
+ aic7xxx_pci_devices[i].device_id,
+ index, &pci_bus, &pci_device_fn))
+ {
+ done = TRUE;
+ }
+ else /* Found an Adaptec PCI device. */
+ {
+ config.type = aic7xxx_pci_devices[i].card_type;
+ config.chip_type = aic7xxx_pci_devices[i].chip_type;
+ config.chan_num = 0;
+ config.walk_scbs = FALSE;
+ switch (config.type)
+ {
+ case AIC_7872: /* 3940 */
+ case AIC_7882: /* 3940-Ultra */
+ config.walk_scbs = TRUE;
+ config.chan_num = number_of_39xxs & 0x1; /* Has 2 controllers */
+ number_of_39xxs++;
+ if (number_of_39xxs == 2)
+ {
+ number_of_39xxs = 0; /* To be consistent with 3985. */
+ }
+ break;
+
+ case AIC_7873: /* 3985 */
+ case AIC_7883: /* 3985-Ultra */
+ config.chan_num = number_of_39xxs & 0x3; /* Has 3 controllers */
+ number_of_39xxs++;
+ if (number_of_39xxs == 3)
+ {
+ number_of_39xxs = 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /*
+ * Read esundry information from PCI BIOS.
+ */
+ error = pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &io_port);
+ error += pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &irq);
+
+ /*
+ * Ensure that we are using good values for the PCI burst size
+ * and latency timer.
+ */
+ error += pcibios_read_config_dword(pci_bus, pci_device_fn,
+ CSIZE_LATTIME, &csize_lattime);
+ if ((csize_lattime & CACHESIZE) == 0)
+ {
+ /* Default to 8DWDs - what's the PCI define for this? */
+ csize_lattime |= 8;
+ }
+ if((csize_lattime & LATTIME) == 0)
+ {
+ /* Default to 64 PCLKS (is this a good value?) */
+ /* This may also be availble in the SEEPROM?? */
+ csize_lattime |= (64 << 8);
+ }
+ pcibios_write_config_dword(pci_bus, pci_device_fn,
+ CSIZE_LATTIME, csize_lattime);
+ printk("aic7xxx: BurstLen = %d DWDs, Latency Timer = %d PCLKS\n",
+ (int) (csize_lattime & CACHESIZE),
+ (csize_lattime >> 8) & 0x000000ff);
+
+ error += pcibios_read_config_dword(pci_bus, pci_device_fn,
+ CLASS_PROGIF_REVID, &class_revid);
+ if ((class_revid & DEVREVID) < 3)
+ {
+ printk("aic7xxx: %s Rev %c.\n", board_names[config.type],
+ rev_id[class_revid & DEVREVID]);
+ }
+
+ error += pcibios_read_config_dword(pci_bus, pci_device_fn,
+ DEVCONFIG, &devconfig);
+ if (error)
+ {
+ panic("aic7xxx: (aic7xxx_detect) Error %d reading PCI registers.\n",
+ error);
+ }
+
+ printk("aic7xxx: devconfig = 0x%x.\n", devconfig);
+
+ /*
+ * The first bit of PCI_BASE_ADDRESS_0 is always set, so
+ * we mask it off.
+ */
+ base = io_port & 0xfffffffe;
+
+ /*
+ * I don't think we need to bother with allowing
+ * spurious interrupts for the 787x/7850, but what
+ * the hey.
+ */
+ aic7xxx_spurious_count = 1;
+
+ config.base = base;
+ config.irq = irq;
+ config.parity = AIC_UNKNOWN;
+ config.low_term = AIC_UNKNOWN;
+ config.high_term = AIC_UNKNOWN;
+ config.busrtime = 0;
+ config.ultra_enabled = FALSE;
+ if (devconfig & RAMPSM)
+ {
+ /*
+ * External SRAM present. Have the probe walk the SCBs to see
+ * how much SRAM we have and set the number of SCBs accordingly.
+ * We have to turn off SCBRAMSEL to access the external SCB
+ * SRAM.
+ *
+ * It seems that early versions of the aic7870 didn't use these
+ * bits, hence the hack for the 3940 above. I would guess that
+ * recent 3940s using later aic7870 or aic7880 chips do actually
+ * set RAMPSM.
+ *
+ * The documentation isn't clear, but it sounds like the value
+ * written to devconfig must not have RAMPSM set. The second
+ * sixteen bits of the register are R/O anyway, so it shouldn't
+ * affect RAMPSM either way.
+ */
+ printk ("aic7xxx: External RAM detected. Enabling RAM access.\n");
+ devconfig &= ~(RAMPSM | SCBRAMSEL);
+ pcibios_write_config_dword(pci_bus, pci_device_fn,
+ DEVCONFIG, devconfig);
+ config.walk_scbs = TRUE;
+ }
+ found += aic7xxx_register(template, &config);
+
+ /*
+ * Disable spurious interrupts.
+ */
+ aic7xxx_spurious_count = 0;
+
+ index++;
+ } /* Found an Adaptec PCI device. */
+ }
+ }
+ }
+#endif CONFIG_PCI
+
+ template->name = aic7xxx_info(NULL);
+ return (found);
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_buildscb
+ *
+ * Description:
+ * Build a SCB.
+ *-F*************************************************************************/
+static void
+aic7xxx_buildscb(struct aic7xxx_host *p,
+ Scsi_Cmnd *cmd,
+ struct aic7xxx_scb *scb)
+{
+ void *addr;
+ unsigned short mask;
+ struct scatterlist *sg;
+
+ /*
+ * Setup the control byte if we need negotiation and have not
+ * already requested it.
+ */
+#ifdef AIC7XXX_TAGGED_QUEUEING
+ if (cmd->device->tagged_supported)
+ {
+ if (cmd->device->tagged_queue == 0)
+ {
+ printk("aic7xxx: Enabling tagged queuing for target %d, "
+ "channel %d.\n", cmd->target, cmd->channel);
+ cmd->device->tagged_queue = 1;
+ cmd->device->current_tag = 1; /* enable tagging */
+ }
+ cmd->tag = cmd->device->current_tag;
+ cmd->device->current_tag++;
+ scb->control |= TAG_ENB;
+ }
+#endif
+ mask = (0x01 << (cmd->target | (cmd->channel << 3)));
+ if (p->discenable & mask)
+ {
+ scb->control |= DISCENB;
+ }
+ if ((p->needwdtr & mask) && !(p->wdtr_pending & mask))
+ {
+ p->wdtr_pending |= mask;
+ scb->control |= NEEDWDTR;
+#if 0
+ printk("aic7xxx: Sending WDTR request to target %d.\n", cmd->target);
+#endif
+ }
+ else
+ {
+ if ((p->needsdtr & mask) && !(p->sdtr_pending & mask))
+ {
+ p->sdtr_pending |= mask;
+ scb->control |= NEEDSDTR;
+#if 0
+ printk("aic7xxx: Sending SDTR request to target %d.\n", cmd->target);
+#endif
+ }
+ }
+
+#if 0
+ printk("aic7xxx: (build_scb) Target %d, cmd(0x%x) size(%u) wdtr(0x%x) "
+ "mask(0x%x).\n",
+ cmd->target, cmd->cmnd[0], cmd->cmd_len, p->needwdtr, mask);
+#endif
+ scb->target_channel_lun = ((cmd->target << 4) & 0xF0) |
+ ((cmd->channel & 0x01) << 3) | (cmd->lun & 0x07);
+
+ /*
+ * The interpretation of request_buffer and request_bufflen
+ * changes depending on whether or not use_sg is zero; a
+ * non-zero use_sg indicates the number of elements in the
+ * scatter-gather array.
+ */
+
+ /*
+ * XXX - this relies on the host data being stored in a
+ * little-endian format.
+ */
+ addr = cmd->cmnd;
+ scb->SCSI_cmd_length = cmd->cmd_len;
+ memcpy(scb->SCSI_cmd_pointer, &addr, sizeof(scb->SCSI_cmd_pointer));
+
+ if (cmd->use_sg)
+ {
+ scb->SG_segment_count = cmd->use_sg;
+ memcpy(scb->SG_list_pointer, &cmd->request_buffer,
+ sizeof(scb->SG_list_pointer));
+ memcpy(&sg, &cmd->request_buffer, sizeof(sg));
+ memcpy(scb->data_pointer, &(sg[0].address), sizeof(scb->data_pointer));
+ scb->data_count = sg[0].length;
+#if 0
+ debug("aic7xxx: (build_scb) SG segs(%d), length(%u), sg[0].length(%d).\n",
+ cmd->use_sg, aic7xxx_length(cmd, 0), scb->data_count);
+#endif
+ }
+ else
+ {
+#if 0
+ debug("aic7xxx: (build_scb) Creating scatterlist, addr(0x%lx) length(%d).\n",
+ (unsigned long) cmd->request_buffer, cmd->request_bufflen);
+#endif
+ if (cmd->request_bufflen == 0)
+ {
+ /*
+ * In case the higher level SCSI code ever tries to send a zero
+ * length command, ensure the SCB indicates no data. The driver
+ * will interpret a zero length command as a Bus Device Reset.
+ */
+ scb->SG_segment_count = 0;
+ memset(scb->SG_list_pointer, 0, sizeof(scb->SG_list_pointer));
+ memset(scb->data_pointer, 0, sizeof(scb->data_pointer));
+ scb->data_count = 0;
+ }
+ else
+ {
+ scb->SG_segment_count = 1;
+ scb->sg.address = (char *) cmd->request_buffer;
+ scb->sg.length = cmd->request_bufflen;
+ addr = &scb->sg;
+ memcpy(scb->SG_list_pointer, &addr, sizeof(scb->SG_list_pointer));
+ scb->data_count = scb->sg.length;
+ memcpy(scb->data_pointer, &cmd->request_buffer, sizeof(scb->data_pointer));
+ }
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_queue
+ *
+ * Description:
+ * Queue a SCB to the controller.
+ *-F*************************************************************************/
+int
+aic7xxx_queue(Scsi_Cmnd *cmd, void (*fn)(Scsi_Cmnd *))
+{
+ long flags;
+ struct aic7xxx_host *p;
+ struct aic7xxx_scb *scb;
+
+ p = (struct aic7xxx_host *) cmd->host->hostdata;
+
+ /*
+ * Check to see if channel was scanned.
+ */
+ if (!p->a_scanned && (cmd->channel == 0))
+ {
+ printk("aic7xxx: Scanning channel A for devices.\n");
+ p->a_scanned = TRUE;
+ }
+ else
+ {
+ if (!p->b_scanned && (cmd->channel == 1))
+ {
+ printk("aic7xxx: Scanning channel B for devices.\n");
+ p->b_scanned = TRUE;
+ }
+ }
+
+#if 0
+ debug("aic7xxx: (queue) cmd(0x%x) size(%u), target %d, channel %d, lun %d.\n",
+ cmd->cmnd[0], cmd->cmd_len, cmd->target, cmd->channel,
+ cmd->lun & 0x07);
+#endif
+
+ /*
+ * This is a critical section, since we don't want the
+ * interrupt routine mucking with the host data or the
+ * card. Since the kernel documentation is vague on
+ * whether or not we are in a cli/sti pair already, save
+ * the flags to be on the safe side.
+ */
+ save_flags(flags);
+ cli();
+
+ /*
+ * Find a free slot in the SCB array to load this command
+ * into. Since can_queue is set to the maximum number of
+ * SCBs for the card, we should always find one.
+ *
+ * First try to find an scb in the free list. If there are
+ * none in the free list, then check the current number of
+ * of scbs and take an unused one from the scb array.
+ */
+ scb = p->free_scb;
+ if (scb != NULL)
+ { /* found one in the free list */
+ p->free_scb = scb->next; /* remove and update head of list */
+ /*
+ * Warning! For some unknown reason, the scb at the head
+ * of the free list is not the same address that it should
+ * be. That's why we set the scb pointer taken by the
+ * position in the array. The scb at the head of the list
+ * should match this address, but it doesn't.
+ */
+ scb = &(p->scb_array[scb->position]);
+ scb->control = 0;
+ scb->state = SCB_ACTIVE;
+ }
+ else
+ {
+ if (p->numscb >= p->maxscb)
+ {
+ panic("aic7xxx: (aic7xxx_queue) Couldn't find a free SCB.\n");
+ }
+ else
+ {
+ /*
+ * Initialize the scb within the scb array. The
+ * position within the array is the position on
+ * the board that it will be loaded.
+ */
+ scb = &(p->scb_array[p->numscb]);
+ memset(scb, 0, sizeof(*scb));
+
+ scb->position = p->numscb;
+ p->numscb++;
+ scb->state = SCB_ACTIVE;
+ }
+ }
+
+ scb->cmd = cmd;
+ aic7xxx_position(cmd) = scb->position;
+#if 0
+ debug_scb(scb);
+#endif;
+
+ /*
+ * Construct the SCB beforehand, so the sequencer is
+ * paused a minimal amount of time.
+ */
+ aic7xxx_buildscb(p, cmd, scb);
+
+#if 0
+ if (scb != &p->scb_array[scb->position])
+ {
+ printk("aic7xxx: (queue) Address of SCB by position does not match SCB "
+ "address.\n");
+ }
+ printk("aic7xxx: (queue) SCB pos(%d) cmdptr(0x%x) state(%d) freescb(0x%x)\n",
+ scb->position, (unsigned int) scb->cmd,
+ scb->state, (unsigned int) p->free_scb);
+#endif
+ /*
+ * Pause the sequencer so we can play with its registers -
+ * wait for it to acknowledge the pause.
+ *
+ * XXX - should the interrupts be left on while doing this?
+ */
+ PAUSE_SEQUENCER(p);
+
+ /*
+ * Save the SCB pointer and put our own pointer in - this
+ * selects one of the four banks of SCB registers. Load
+ * the SCB, then write its pointer into the queue in FIFO
+ * and restore the saved SCB pointer.
+ */
+ aic7xxx_putscb(p, scb);
+ outb(scb->position, QINFIFO + p->base);
+
+ /*
+ * Make sure the Scsi_Cmnd pointer is saved, the struct it
+ * points to is set up properly, and the parity error flag
+ * is reset, then unpause the sequencer and watch the fun
+ * begin.
+ */
+ cmd->scsi_done = fn;
+ aic7xxx_error(cmd) = DID_OK;
+ aic7xxx_status(cmd) = 0;
+ cmd->result = 0;
+ memset(&cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
+
+ UNPAUSE_SEQUENCER(p);
+#if 0
+ printk("aic7xxx: (queue) After - cmd(0x%lx) scb->cmd(0x%lx) pos(%d).\n",
+ (long) cmd, (long) scb->cmd, scb->position);
+#endif;
+ restore_flags(flags);
+ return (0);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_abort_scb
+ *
+ * Description:
+ * Abort an scb. If the scb has not previously been aborted, then
+ * we attempt to send a BUS_DEVICE_RESET message to the target. If
+ * the scb has previously been unsuccessfully aborted, then we will
+ * reset the channel and have all devices renegotiate. Returns an
+ * enumerated type that indicates the status of the operation.
+ *-F*************************************************************************/
+static aha_abort_reset_type
+aic7xxx_abort_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb,
+ unsigned char errcode)
+{
+ int base = p->base;
+ int found = FALSE;
+ aha_abort_reset_type scb_status = ABORT_RESET_SUCCESS;
+ char channel = scb->target_channel_lun & SELBUSB ? 'B': 'A';
+
+ /*
+ * Ensure that the card doesn't do anything
+ * behind our back.
+ */
+ PAUSE_SEQUENCER(p);
+
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (abort_scb) scb %d, scb_aborted 0x%x\n",
+ scb->position, (scb->state & SCB_ABORTED));
+#endif
+ /*
+ * First, determine if we want to do a bus reset or simply a bus device
+ * reset. If this is the first time that a transaction has timed out,
+ * just schedule a bus device reset. Otherwise, we reset the bus and
+ * abort all pending I/Os on that bus.
+ */
+ if (scb->state & SCB_ABORTED)
+ {
+ /*
+ * Been down this road before. Do a full bus reset.
+ */
+ found = aic7xxx_reset_channel(p, channel, scb->position);
+ }
+ else
+ {
+ unsigned char active_scb, control;
+ struct aic7xxx_scb *active_scbp;
+
+ /*
+ * Send a Bus Device Reset Message:
+ * The target we select to send the message to may be entirely
+ * different than the target pointed to by the scb that timed
+ * out. If the command is in the QINFIFO or the waiting for
+ * selection list, its not tying up the bus and isn't responsible
+ * for the delay so we pick off the active command which should
+ * be the SCB selected by SCBPTR. If its disconnected or active,
+ * we device reset the target scbp points to. Although it may
+ * be that this target is not responsible for the delay, it may
+ * may also be that we're timing out on a command that just takes
+ * too much time, so we try the bus device reset there first.
+ */
+ active_scb = inb(SCBPTR + base);
+ active_scbp = &(p->scb_array[active_scb]);
+ control = inb(SCBARRAY + base);
+
+ /*
+ * Test to see if scbp is disconnected
+ */
+ outb(scb->position, SCBPTR + base);
+ if (inb(SCBARRAY + base) & DISCONNECTED)
+ {
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (abort_scb) scb %d is disconnected.\n", scb->position);
+#endif
+ scb->state |= (SCB_DEVICE_RESET | SCB_ABORTED);
+ scb->SG_segment_count = 0;
+ memset(scb->SG_list_pointer, 0, sizeof(scb->SG_list_pointer));
+ memset(scb->data_pointer, 0, sizeof(scb->data_pointer));
+ scb->data_count = 0;
+ aic7xxx_putscb(p, scb);
+ aic7xxx_error(scb->cmd) = errcode;
+ scb_status = ABORT_RESET_PENDING;
+ aic7xxx_add_waiting_scb(base, scb, LIST_SECOND);
+ UNPAUSE_SEQUENCER(p);
+ }
+ else
+ {
+ /*
+ * Is the active SCB really active?
+ */
+ if (active_scbp->state & SCB_ACTIVE)
+ {
+ unsigned char msg_len = inb(MSG_LEN + base);
+ if (msg_len != 0)
+ {
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (abort_scb) scb is active, needs DMA, "
+ "msg_len is non-zero.\n");
+#endif
+ /*
+ * If we're in a message phase, tacking on another message
+ * may confuse the target totally. The bus is probably wedged,
+ * so reset the channel.
+ */
+ channel = (active_scbp->target_channel_lun & SELBUSB) ? 'B': 'A';
+ aic7xxx_reset_channel(p, channel, scb->position);
+ }
+ else
+ {
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (abort_scb) scb is active, needs DMA, "
+ "msg_len is zero.\n");
+#endif
+ /*
+ * Load the message buffer and assert attention.
+ */
+ active_scbp->state |= (SCB_DEVICE_RESET | SCB_ABORTED);
+ outb(1, MSG_LEN + base);
+ outb(MSG_BUS_DEVICE_RESET, MSG0 + base);
+ if (active_scbp->target_channel_lun != scb->target_channel_lun)
+ {
+ /*
+ * XXX - We would like to increment the timeout on scb, but
+ * access to that routine is denied because it is hidden
+ * in scsi.c. If we were able to do this, it would give
+ * scb a new lease on life.
+ */
+ ;
+ }
+ aic7xxx_error(scb->cmd) = errcode;
+ scb_status = ABORT_RESET_PENDING;
+ UNPAUSE_SEQUENCER(p);
+ }
+ }
+ else
+ {
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (abort_scb) no active command.\n");
+#endif
+ /*
+ * No active command to single out, so reset
+ * the bus for the timed out target.
+ */
+ aic7xxx_reset_channel(p, channel, scb->position);
+ }
+ }
+ }
+ return (scb_status);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_abort_reset
+ *
+ * Description:
+ * Abort or reset the current SCSI command(s). Returns an enumerated
+ * type that indicates the status of the operation.
+ *-F*************************************************************************/
+static aha_abort_reset_type
+aic7xxx_abort_reset(Scsi_Cmnd *cmd, unsigned char errcode)
+{
+ struct aic7xxx_scb *scb;
+ struct aic7xxx_host *p;
+ long flags;
+ aha_abort_reset_type scb_status = ABORT_RESET_SUCCESS;
+
+ p = (struct aic7xxx_host *) cmd->host->hostdata;
+ scb = &(p->scb_array[aic7xxx_position(cmd)]);
+
+ save_flags(flags);
+ cli();
+
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (abort_reset) scb state 0x%x\n", scb->state);
+#endif
+
+ if (scb->state & SCB_ACTIVE)
+ {
+ if (scb->state & SCB_IMMED)
+ {
+ /*
+ * Don't know how set the number of retries to 0.
+ */
+ /* cmd->retries = 0; */
+ aic7xxx_error(cmd) = errcode;
+ aic7xxx_done(p, scb);
+ }
+ else
+ {
+ /*
+ * Abort the operation.
+ */
+ scb_status = aic7xxx_abort_scb(p, scb, errcode);
+ }
+ }
+ else
+ {
+ /*
+ * The scb is not active and must have completed after the timeout
+ * check in scsi.c and before we check the scb state above. For
+ * this case we return SCSI_ABORT_NOT_RUNNING (if abort was called)
+ * or SCSI_RESET_SUCCESS (if reset was called).
+ */
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (abort_reset) called with no active scb, errcode 0x%x\n",
+ errcode);
+#endif
+ scb_status = ABORT_RESET_INACTIVE;
+ /*
+ * According to the comments in scsi.h and Michael Neuffer, if we do not
+ * have an active command for abort or reset, we should not call the
+ * command done function. Unfortunately, this hangs the system for me
+ * unless we *do* call the done function.
+ *
+ * XXX - Revisit this sometime!
+ */
+ cmd->result = errcode << 16;
+ cmd->scsi_done(cmd);
+ }
+ restore_flags(flags);
+ return (scb_status);
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_abort
+ *
+ * Description:
+ * Abort the current SCSI command(s).
+ *-F*************************************************************************/
+int
+aic7xxx_abort(Scsi_Cmnd *cmd)
+{
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (abort) target/channel %d/%d\n", cmd->target, cmd->channel);
+#endif
+
+ switch (aic7xxx_abort_reset(cmd, DID_ABORT))
+ {
+ case ABORT_RESET_INACTIVE:
+ return (SCSI_ABORT_NOT_RUNNING);
+ break;
+ case ABORT_RESET_PENDING:
+ return (SCSI_ABORT_PENDING);
+ break;
+ case ABORT_RESET_SUCCESS:
+ default:
+ return (SCSI_ABORT_SUCCESS);
+ break;
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_reset
+ *
+ * Description:
+ * Resetting the bus always succeeds - is has to, otherwise the
+ * kernel will panic! Try a surgical technique - sending a BUS
+ * DEVICE RESET message - on the offending target before pulling
+ * the SCSI bus reset line.
+ *-F*************************************************************************/
+int
+aic7xxx_reset(Scsi_Cmnd *cmd)
+{
+#ifdef AIC7XXX_DEBUG_ABORT
+ printk ("aic7xxx: (reset) target/channel %d/%d\n", cmd->target, cmd->channel);
+#endif
+
+ switch (aic7xxx_abort_reset(cmd, DID_RESET))
+ {
+ case ABORT_RESET_PENDING:
+ return (SCSI_RESET_PENDING);
+ break;
+ case ABORT_RESET_INACTIVE:
+ case ABORT_RESET_SUCCESS:
+ default:
+ return (SCSI_RESET_SUCCESS);
+ break;
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_biosparam
+ *
+ * Description:
+ * Return the disk geometry for the given SCSI device.
+ *-F*************************************************************************/
+int
+aic7xxx_biosparam(Disk *disk, kdev_t dev, int geom[])
+{
+ int heads, sectors, cylinders;
+ struct aic7xxx_host *p;
+
+ p = (struct aic7xxx_host *) disk->device->host->hostdata;
+
+ /*
+ * XXX - if I could portably find the card's configuration
+ * information, then this could be autodetected instead
+ * of left to a boot-time switch.
+ */
+ heads = 64;
+ sectors = 32;
+ cylinders = disk->capacity / (heads * sectors);
+
+ if (p->extended && cylinders > 1024)
+ {
+ heads = 255;
+ sectors = 63;
+ cylinders = disk->capacity / (255 * 63);
+ }
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return (0);
+}
+
+#ifdef MACH
+#include "aic7xxx_proc.src"
+#else
+#include "aic7xxx_proc.c"
+#endif
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = AIC7XXX;
+
+#include "scsi_module.c"
+#endif
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 2
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -2
+ * c-argdecl-indent: 2
+ * c-label-offset: -2
+ * c-continued-statement-offset: 2
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
+
diff --git a/i386/i386at/gpl/linux/scsi/aic7xxx.h b/i386/i386at/gpl/linux/scsi/aic7xxx.h
new file mode 100644
index 00000000..76d4d962
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/aic7xxx.h
@@ -0,0 +1,67 @@
+/*+M*************************************************************************
+ * Adaptec 274x/284x/294x device driver for Linux.
+ *
+ * Copyright (c) 1994 John Aycock
+ * The University of Calgary Department of Computer Science.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * $Id: aic7xxx.h,v 1.1.1.1 1997/02/25 21:27:47 thomas Exp $
+ *-M*************************************************************************/
+#ifndef _aic7xxx_h
+#define _aic7xxx_h
+
+#define AIC7XXX_H_VERSION "$Revision: 1.1.1.1 $"
+
+/*
+ * Scsi_Host_Template (see hosts.h) for AIC-7770/AIC-7870 - some fields
+ * to do with card config are filled in after the card is detected.
+ */
+#define AIC7XXX { \
+ NULL, \
+ NULL, \
+ NULL, \
+ aic7xxx_proc_info, \
+ NULL, \
+ aic7xxx_detect, \
+ NULL, \
+ aic7xxx_info, \
+ NULL, \
+ aic7xxx_queue, \
+ aic7xxx_abort, \
+ aic7xxx_reset, \
+ NULL, \
+ aic7xxx_biosparam, \
+ -1, /* max simultaneous cmds */\
+ -1, /* scsi id of host adapter */\
+ SG_ALL, /* max scatter-gather cmds */\
+ 2, /* cmds per lun (linked cmds) */\
+ 0, /* number of 7xxx's present */\
+ 0, /* no memory DMA restrictions */\
+ ENABLE_CLUSTERING \
+}
+
+extern int aic7xxx_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *));
+extern int aic7xxx_biosparam(Disk *, kdev_t, int[]);
+extern int aic7xxx_detect(Scsi_Host_Template *);
+extern int aic7xxx_command(Scsi_Cmnd *);
+extern int aic7xxx_abort(Scsi_Cmnd *);
+extern int aic7xxx_reset(Scsi_Cmnd *);
+
+extern const char *aic7xxx_info(struct Scsi_Host *);
+
+extern int aic7xxx_proc_info(char *, char **, off_t, int, int, int);
+
+#endif /* _aic7xxx_h */
diff --git a/i386/i386at/gpl/linux/scsi/aic7xxx_proc.src b/i386/i386at/gpl/linux/scsi/aic7xxx_proc.src
new file mode 100644
index 00000000..65996822
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/aic7xxx_proc.src
@@ -0,0 +1,271 @@
+/*+M*************************************************************************
+ * Adaptec 274x/284x/294x device driver proc support for Linux.
+ *
+ * Copyright (c) 1995 Dean W. Gehnert
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * ----------------------------------------------------------------
+ * o Modified from the EATA /proc support.
+ * o Additional support for device block statistics provided by
+ * Matthew Jacob.
+ *
+ * Dean W. Gehnert, deang@ims.com, 08/30/95
+ *
+ * $Id: aic7xxx_proc.src,v 1.1.1.1 1997/02/25 21:27:47 thomas Exp $
+ *-M*************************************************************************/
+
+#define BLS buffer + len + size
+#define HDRB \
+" < 512 512-1K 1-2K 2-4K 4-8K 8-16K 16-32K 32-64K 64-128K >128K"
+
+#ifdef PROC_DEBUG
+extern int vsprintf(char *, const char *, va_list);
+
+static void
+proc_debug(const char *fmt, ...)
+{
+ va_list ap;
+ char buf[256];
+
+ va_start(ap, fmt);
+ vsprintf(buf, fmt, ap);
+ printk(buf);
+ va_end(ap);
+}
+#else /* PROC_DEBUG */
+# define proc_debug(fmt, args...)
+#endif /* PROC_DEBUG */
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_set_info
+ *
+ * Description:
+ * Set parameters for the driver from the /proc filesystem.
+ *-F*************************************************************************/
+int
+aic7xxx_set_info(char *buffer, int length, struct Scsi_Host *HBAptr)
+{
+ proc_debug("aic7xxx_set_info(): %s\n", buffer);
+ return (-ENOSYS); /* Currently this is a no-op */
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_proc_info
+ *
+ * Description:
+ * Return information to handle /proc support for the driver.
+ *-F*************************************************************************/
+int
+aic7xxx_proc_info(char *buffer, char **start, off_t offset, int length,
+ int hostno, int inout)
+{
+ struct Scsi_Host *HBAptr;
+ struct aic7xxx_host *p;
+ static u8 buff[512];
+ int i;
+ int size = 0;
+ int len = 0;
+ off_t begin = 0;
+ off_t pos = 0;
+ static char *bus_name[] = {"Single", "Twin", "Wide"};
+
+ HBAptr = NULL;
+ for (i = 0; i < NUMBER(aic7xxx_boards); i++)
+ {
+ if ((HBAptr = aic7xxx_boards[i]) != NULL)
+ {
+ if (HBAptr->host_no == hostno)
+ {
+ break;
+ }
+
+ while ((HBAptr->hostdata != NULL) &&
+ ((HBAptr = ((struct aic7xxx_host *) HBAptr->hostdata)->next) != NULL))
+ {
+ if (HBAptr->host_no == hostno)
+ {
+ break; break;
+ }
+ }
+
+ HBAptr = NULL;
+ }
+ }
+
+ if (HBAptr == NULL)
+ {
+ size += sprintf(BLS, "Can't find adapter for host number %d\n", hostno);
+ len += size; pos = begin + len; size = 0;
+ goto stop_output;
+ }
+
+ if (inout == TRUE) /* Has data been written to the file? */
+ {
+ return (aic7xxx_set_info(buffer, length, HBAptr));
+ }
+
+ if (offset == 0)
+ {
+ memset(buff, 0, sizeof(buff));
+ }
+
+ p = (struct aic7xxx_host *) HBAptr->hostdata;
+
+ size += sprintf(BLS, "Adaptec AIC7xxx driver version: ");
+ size += sprintf(BLS, "%s/", rcs_version(AIC7XXX_C_VERSION));
+ size += sprintf(BLS, "%s/", rcs_version(AIC7XXX_H_VERSION));
+ size += sprintf(BLS, "%s\n", rcs_version(AIC7XXX_SEQ_VER));
+ len += size; pos = begin + len; size = 0;
+
+ size += sprintf(BLS, "\n");
+ size += sprintf(BLS, "Compile Options:\n");
+#ifdef AIC7XXX_RESET_DELAY
+ size += sprintf(BLS, " AIC7XXX_RESET_DELAY : %d\n", AIC7XXX_RESET_DELAY);
+#endif
+#ifdef AIC7XXX_TWIN_SUPPORT
+ size += sprintf(BLS, " AIC7XXX_TWIN_SUPPORT : Enabled\n");
+#else
+ size += sprintf(BLS, " AIC7XXX_TWIN_SUPPORT : Disabled\n");
+#endif
+#ifdef AIC7XXX_TAGGED_QUEUEING
+ size += sprintf(BLS, " AIC7XXX_TAGGED_QUEUEING: Enabled\n");
+#else
+ size += sprintf(BLS, " AIC7XXX_TAGGED_QUEUEING: Disabled\n");
+#endif
+#ifdef AIC7XXX_SHARE_IRQS
+ size += sprintf(BLS, " AIC7XXX_SHARE_IRQS : Enabled\n");
+#else
+ size += sprintf(BLS, " AIC7XXX_SHARE_IRQS : Disabled\n");
+#endif
+#ifdef AIC7XXX_PROC_STATS
+ size += sprintf(BLS, " AIC7XXX_PROC_STATS : Enabled\n");
+#else
+ size += sprintf(BLS, " AIC7XXX_PROC_STATS : Disabled\n");
+#endif
+ len += size; pos = begin + len; size = 0;
+
+ size += sprintf(BLS, "\n");
+ size += sprintf(BLS, "Adapter Configuration:\n");
+ size += sprintf(BLS, " SCSI Adapter: %s\n", board_names[p->type]);
+ size += sprintf(BLS, " Host Bus: %s\n", bus_name[p->bus_type]);
+ size += sprintf(BLS, " Base IO: %#.4x\n", p->base);
+ size += sprintf(BLS, " IRQ: %d\n", HBAptr->irq);
+ size += sprintf(BLS, " SCB: %d (%d)\n", p->numscb, p->maxscb);
+ size += sprintf(BLS, " Interrupts: %d", p->isr_count);
+ if (p->chip_type == AIC_777x)
+ {
+ size += sprintf(BLS, " %s\n",
+ (p->pause & IRQMS) ? "(Level Sensitive)" : "(Edge Triggered)");
+ }
+ else
+ {
+ size += sprintf(BLS, "\n");
+ }
+ size += sprintf(BLS, " Serial EEPROM: %s\n",
+ p->have_seeprom ? "True" : "False");
+ size += sprintf(BLS, " Pause/Unpause: %#.2x/%#.2x\n", p->pause,
+ p->unpause);
+ size += sprintf(BLS, " Extended Translation: %sabled\n",
+ p->extended ? "En" : "Dis");
+ size += sprintf(BLS, " SCSI Bus Reset: %sabled\n",
+ aic7xxx_no_reset ? "Dis" : "En");
+ size += sprintf(BLS, " Ultra SCSI: %sabled\n",
+ p->ultra_enabled ? "En" : "Dis");
+ len += size; pos = begin + len; size = 0;
+
+#ifdef AIC7XXX_PROC_STATS
+ {
+ struct aic7xxx_xferstats *sp;
+ int channel, target, lun;
+
+ /*
+ * XXX: Need to fix this to avoid overflow...
+ */
+ size += sprintf(BLS, "\n");
+ size += sprintf(BLS, "Statistics:\n");
+ for (channel = 0; channel < 2; channel++)
+ {
+ for (target = 0; target < 16; target++)
+ {
+ for (lun = 0; lun < 8; lun++)
+ {
+ sp = &p->stats[channel][target][lun];
+ if (sp->xfers == 0)
+ {
+ continue;
+ }
+ size += sprintf(BLS, "CHAN#%c (TGT %d LUN %d):\n",
+ 'A' + channel, target, lun);
+ size += sprintf(BLS, "nxfers %ld (%ld read;%ld written)\n",
+ sp->xfers, sp->r_total, sp->w_total);
+ size += sprintf(BLS, "blks(512) rd=%ld; blks(512) wr=%ld\n",
+ sp->r_total512, sp->w_total512);
+ size += sprintf(BLS, "%s\n", HDRB);
+ size += sprintf(BLS, " Reads:");
+ size += sprintf(BLS, "%6ld %6ld %6ld %6ld ", sp->r_bins[0],
+ sp->r_bins[1], sp->r_bins[2], sp->r_bins[3]);
+ size += sprintf(BLS, "%6ld %6ld %6ld %6ld ", sp->r_bins[4],
+ sp->r_bins[5], sp->r_bins[6], sp->r_bins[7]);
+ size += sprintf(BLS, "%6ld %6ld\n", sp->r_bins[8],
+ sp->r_bins[9]);
+ size += sprintf(BLS, "Writes:");
+ size += sprintf(BLS, "%6ld %6ld %6ld %6ld ", sp->w_bins[0],
+ sp->w_bins[1], sp->w_bins[2], sp->w_bins[3]);
+ size += sprintf(BLS, "%6ld %6ld %6ld %6ld ", sp->w_bins[4],
+ sp->w_bins[5], sp->w_bins[6], sp->w_bins[7]);
+ size += sprintf(BLS, "%6ld %6ld\n", sp->w_bins[8],
+ sp->w_bins[9]);
+ size += sprintf(BLS, "\n");
+ }
+ }
+ }
+ len += size; pos = begin + len; size = 0;
+ }
+#endif /* AIC7XXX_PROC_STATS */
+
+stop_output:
+ proc_debug("2pos: %ld offset: %ld len: %d\n", pos, offset, len);
+ *start = buffer + (offset - begin); /* Start of wanted data */
+ len -= (offset - begin); /* Start slop */
+ if (len > length)
+ {
+ len = length; /* Ending slop */
+ }
+ proc_debug("3pos: %ld offset: %ld len: %d\n", pos, offset, len);
+
+ return (len);
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 2
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -2
+ * c-argdecl-indent: 2
+ * c-label-offset: -2
+ * c-continued-statement-offset: 2
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/aic7xxx_reg.h b/i386/i386at/gpl/linux/scsi/aic7xxx_reg.h
new file mode 100644
index 00000000..4a7f612c
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/aic7xxx_reg.h
@@ -0,0 +1,746 @@
+/*+M*************************************************************************
+ * Adaptec AIC7xxx register and scratch ram definitions.
+ *
+ * Copyright (c) 1994, 1995, 1996 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * $Id: aic7xxx_reg.h,v 1.1.1.1 1997/02/25 21:27:47 thomas Exp $
+ *-M*************************************************************************/
+
+/*
+ * This header is shared by the sequencer code and the kernel level driver.
+ *
+ * All page numbers refer to the Adaptec AIC-7770 Data Book availible from
+ * Adaptec's Technical Documents Department 1-800-934-2766
+ */
+
+/*
+ * SCSI Sequence Control (p. 3-11).
+ * Each bit, when set starts a specific SCSI sequence on the bus
+ */
+#define SCSISEQ 0x000
+#define TEMODEO 0x80
+#define ENSELO 0x40
+#define ENSELI 0x20
+#define ENRSELI 0x10
+#define ENAUTOATNO 0x08
+#define ENAUTOATNI 0x04
+#define ENAUTOATNP 0x02
+#define SCSIRSTO 0x01
+
+/*
+ * SCSI Transfer Control 0 Register (pp. 3-13).
+ * Controls the SCSI module data path.
+ */
+#define SXFRCTL0 0x001
+#define DFON 0x80
+#define DFPEXP 0x40
+#define ULTRAEN 0x20
+#define CLRSTCNT 0x10
+#define SPIOEN 0x08
+#define SCAMEN 0x04
+#define CLRCHN 0x02
+/* UNUSED 0x01 */
+
+/*
+ * SCSI Transfer Control 1 Register (pp. 3-14,15).
+ * Controls the SCSI module data path.
+ */
+#define SXFRCTL1 0x002
+#define BITBUCKET 0x80
+#define SWRAPEN 0x40
+#define ENSPCHK 0x20
+#define STIMESEL 0x18
+#define ENSTIMER 0x04
+#define ACTNEGEN 0x02
+#define STPWEN 0x01 /* Powered Termination */
+
+/*
+ * SCSI Control Signal Read Register (p. 3-15).
+ * Reads the actual state of the SCSI bus pins
+ */
+#define SCSISIGI 0x003
+#define CDI 0x80
+#define IOI 0x40
+#define MSGI 0x20
+#define ATNI 0x10
+#define SELI 0x08
+#define BSYI 0x04
+#define REQI 0x02
+#define ACKI 0x01
+
+/*
+ * Possible phases in SCSISIGI
+ */
+#define PHASE_MASK 0xe0
+#define P_DATAOUT 0x00
+#define P_DATAIN 0x40
+#define P_COMMAND 0x80
+#define P_MESGOUT 0xa0
+#define P_STATUS 0xc0
+#define P_MESGIN 0xe0
+/*
+ * SCSI Contol Signal Write Register (p. 3-16).
+ * Writing to this register modifies the control signals on the bus. Only
+ * those signals that are allowed in the current mode (Initiator/Target) are
+ * asserted.
+ */
+#define SCSISIGO 0x003
+#define CDO 0x80
+#define IOO 0x40
+#define MSGO 0x20
+#define ATNO 0x10
+#define SELO 0x08
+#define BSYO 0x04
+#define REQO 0x02
+#define ACKO 0x01
+
+/*
+ * SCSI Rate Control (p. 3-17).
+ * Contents of this register determine the Synchronous SCSI data transfer
+ * rate and the maximum synchronous Req/Ack offset. An offset of 0 in the
+ * SOFS (3:0) bits disables synchronous data transfers. Any offset value
+ * greater than 0 enables synchronous transfers.
+ */
+#define SCSIRATE 0x004
+#define WIDEXFER 0x80 /* Wide transfer control */
+#define SXFR 0x70 /* Sync transfer rate */
+#define SOFS 0x0f /* Sync offset */
+
+/*
+ * SCSI ID (p. 3-18).
+ * Contains the ID of the board and the current target on the
+ * selected channel.
+ */
+#define SCSIID 0x005
+#define TID 0xf0 /* Target ID mask */
+#define OID 0x0f /* Our ID mask */
+
+/*
+ * SCSI Latched Data (p. 3-19).
+ * Read/Write latchs used to transfer data on the SCSI bus during
+ * Automatic or Manual PIO mode. SCSIDATH can be used for the
+ * upper byte of a 16bit wide asyncronouse data phase transfer.
+ */
+#define SCSIDATL 0x006
+#define SCSIDATH 0x007
+
+/*
+ * SCSI Transfer Count (pp. 3-19,20)
+ * These registers count down the number of bytes transfered
+ * across the SCSI bus. The counter is decremented only once
+ * the data has been safely transfered. SDONE in SSTAT0 is
+ * set when STCNT goes to 0
+ */
+#define STCNT 0x008
+#define STCNT0 0x008
+#define STCNT1 0x009
+#define STCNT2 0x00a
+
+/*
+ * Clear SCSI Interrupt 0 (p. 3-20)
+ * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT0.
+ */
+#define CLRSINT0 0x00b
+#define CLRSELDO 0x40
+#define CLRSELDI 0x20
+#define CLRSELINGO 0x10
+#define CLRSWRAP 0x08
+/* UNUSED 0x04 */
+#define CLRSPIORDY 0x02
+/* UNUSED 0x01 */
+
+/*
+ * SCSI Status 0 (p. 3-21)
+ * Contains one set of SCSI Interrupt codes
+ * These are most likely of interest to the sequencer
+ */
+#define SSTAT0 0x00b
+#define TARGET 0x80 /* Board acting as target */
+#define SELDO 0x40 /* Selection Done */
+#define SELDI 0x20 /* Board has been selected */
+#define SELINGO 0x10 /* Selection In Progress */
+#define SWRAP 0x08 /* 24bit counter wrap */
+#define SDONE 0x04 /* STCNT = 0x000000 */
+#define SPIORDY 0x02 /* SCSI PIO Ready */
+#define DMADONE 0x01 /* DMA transfer completed */
+
+/*
+ * Clear SCSI Interrupt 1 (p. 3-23)
+ * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT1.
+ */
+#define CLRSINT1 0x00c
+#define CLRSELTIMEO 0x80
+#define CLRATNO 0x40
+#define CLRSCSIRSTI 0x20
+/* UNUSED 0x10 */
+#define CLRBUSFREE 0x08
+#define CLRSCSIPERR 0x04
+#define CLRPHASECHG 0x02
+#define CLRREQINIT 0x01
+
+/*
+ * SCSI Status 1 (p. 3-24)
+ */
+#define SSTAT1 0x00c
+#define SELTO 0x80
+#define ATNTARG 0x40
+#define SCSIRSTI 0x20
+#define PHASEMIS 0x10
+#define BUSFREE 0x08
+#define SCSIPERR 0x04
+#define PHASECHG 0x02
+#define REQINIT 0x01
+
+/*
+ * SCSI Interrupt Mode 1 (pp. 3-28,29)
+ * Setting any bit will enable the corresponding function
+ * in SIMODE1 to interrupt via the IRQ pin.
+ */
+#define SIMODE1 0x011
+#define ENSELTIMO 0x80
+#define ENATNTARG 0x40
+#define ENSCSIRST 0x20
+#define ENPHASEMIS 0x10
+#define ENBUSFREE 0x08
+#define ENSCSIPERR 0x04
+#define ENPHASECHG 0x02
+#define ENREQINIT 0x01
+
+/*
+ * SCSI Data Bus (High) (p. 3-29)
+ * This register reads data on the SCSI Data bus directly.
+ */
+#define SCSIBUSL 0x012
+#define SCSIBUSH 0x013
+
+/*
+ * SCSI/Host Address (p. 3-30)
+ * These registers hold the host address for the byte about to be
+ * transfered on the SCSI bus. They are counted up in the same
+ * manner as STCNT is counted down. SHADDR should always be used
+ * to determine the address of the last byte transfered since HADDR
+ * can be squewed by write ahead.
+ */
+#define SHADDR 0x014
+#define SHADDR0 0x014
+#define SHADDR1 0x015
+#define SHADDR2 0x016
+#define SHADDR3 0x017
+
+/*
+ * Selection/Reselection ID (p. 3-31)
+ * Upper four bits are the device id. The ONEBIT is set when the re/selecting
+ * device did not set its own ID.
+ */
+#define SELID 0x019
+#define SELID_MASK 0xf0
+#define ONEBIT 0x08
+/* UNUSED 0x07 */
+
+/*
+ * SCSI Block Control (p. 3-32)
+ * Controls Bus type and channel selection. In a twin channel configuration
+ * addresses 0x00-0x1e are gated to the appropriate channel based on this
+ * register. SELWIDE allows for the coexistence of 8bit and 16bit devices
+ * on a wide bus.
+ */
+#define SBLKCTL 0x01f
+#define DIAGLEDEN 0x80 /* Aic78X0 only */
+#define DIAGLEDON 0x40 /* Aic78X0 only */
+#define AUTOFLUSHDIS 0x20
+/* UNUSED 0x10 */
+#define SELBUS_MASK 0x0a
+#define SELBUSB 0x08
+/* UNUSED 0x04 */
+#define SELWIDE 0x02
+/* UNUSED 0x01 */
+#define SELNARROW 0x00
+
+/*
+ * Sequencer Control (p. 3-33)
+ * Error detection mode and speed configuration
+ */
+#define SEQCTL 0x060
+#define PERRORDIS 0x80
+#define PAUSEDIS 0x40
+#define FAILDIS 0x20
+#define FASTMODE 0x10
+#define BRKADRINTEN 0x08
+#define STEP 0x04
+#define SEQRESET 0x02
+#define LOADRAM 0x01
+
+/*
+ * Sequencer RAM Data (p. 3-34)
+ * Single byte window into the Scratch Ram area starting at the address
+ * specified by SEQADDR0 and SEQADDR1. To write a full word, simply write
+ * four bytes in sucessesion. The SEQADDRs will increment after the most
+ * significant byte is written
+ */
+#define SEQRAM 0x061
+
+/*
+ * Sequencer Address Registers (p. 3-35)
+ * Only the first bit of SEQADDR1 holds addressing information
+ */
+#define SEQADDR0 0x062
+#define SEQADDR1 0x063
+#define SEQADDR1_MASK 0x01
+
+/*
+ * Accumulator
+ * We cheat by passing arguments in the Accumulator up to the kernel driver
+ */
+#define ACCUM 0x064
+
+#define SINDEX 0x065
+#define DINDEX 0x066
+#define ALLZEROS 0x06a
+#define NONE 0x06a
+#define SINDIR 0x06c
+#define DINDIR 0x06d
+#define FUNCTION1 0x06e
+
+/*
+ * Host Address (p. 3-48)
+ * This register contains the address of the byte about
+ * to be transfered across the host bus.
+ */
+#define HADDR 0x088
+#define HADDR0 0x088
+#define HADDR1 0x089
+#define HADDR2 0x08a
+#define HADDR3 0x08b
+
+#define HCNT 0x08c
+#define HCNT0 0x08c
+#define HCNT1 0x08d
+#define HCNT2 0x08e
+
+/*
+ * SCB Pointer (p. 3-49)
+ * Gate one of the four SCBs into the SCBARRAY window.
+ */
+#define SCBPTR 0x090
+
+/*
+ * Board Control (p. 3-43)
+ */
+#define BCTL 0x084
+/* RSVD 0xf0 */
+#define ACE 0x08 /* Support for external processors */
+/* RSVD 0x06 */
+#define ENABLE 0x01
+
+/*
+ * On the aic78X0 chips, Board Control is replaced by the DSCommand
+ * register (p. 4-64)
+ */
+#define DSCOMMAND 0x084
+#define CACHETHEN 0x80 /* Cache Threshold enable */
+#define DPARCKEN 0x40 /* Data Parity Check Enable */
+#define MPARCKEN 0x20 /* Memory Parity Check Enable */
+#define EXTREQLCK 0x10 /* External Request Lock */
+
+/*
+ * Bus On/Off Time (p. 3-44)
+ */
+#define BUSTIME 0x085
+#define BOFF 0xf0
+#define BON 0x0f
+#define BOFF_60BCLKS 0xf0
+
+/*
+ * Bus Speed (p. 3-45)
+ */
+#define BUSSPD 0x086
+#define DFTHRSH 0xc0
+#define STBOFF 0x38
+#define STBON 0x07
+#define DFTHRSH_100 0xc0
+
+/*
+ * Host Control (p. 3-47) R/W
+ * Overal host control of the device.
+ */
+#define HCNTRL 0x087
+/* UNUSED 0x80 */
+#define POWRDN 0x40
+/* UNUSED 0x20 */
+#define SWINT 0x10
+#define IRQMS 0x08
+#define PAUSE 0x04
+#define INTEN 0x02
+#define CHIPRST 0x01
+
+/*
+ * Interrupt Status (p. 3-50)
+ * Status for system interrupts
+ */
+#define INTSTAT 0x091
+#define SEQINT_MASK 0xf1 /* SEQINT Status Codes */
+#define BAD_PHASE 0x01 /* unknown scsi bus phase */
+#define SEND_REJECT 0x11 /* sending a message reject */
+#define NO_IDENT 0x21 /* no IDENTIFY after reconnect*/
+#define NO_MATCH 0x31 /* no cmd match for reconnect */
+#define SDTR_MSG 0x41 /* SDTR message recieved */
+#define WDTR_MSG 0x51 /* WDTR message recieved */
+#define REJECT_MSG 0x61 /* Reject message recieved */
+#define BAD_STATUS 0x71 /* Bad status from target */
+#define RESIDUAL 0x81 /* Residual byte count != 0 */
+#define ABORT_TAG 0x91 /* Sent an ABORT_TAG message */
+#define AWAITING_MSG 0xa1 /*
+ * Kernel requested to specify
+ * a message to this target
+ * (command was null), so tell
+ * it that it can fill the
+ * message buffer.
+ */
+#define IMMEDDONE 0xb1 /*
+ * An immediate command has
+ * completed
+ */
+#define MSG_BUFFER_BUSY 0xc1 /*
+ * Sequencer wants to use the
+ * message buffer, but it
+ * already contains a message
+ */
+#define MSGIN_PHASEMIS 0xd1 /*
+ * Target changed phase on us
+ * when we were expecting
+ * another msgin byte.
+ */
+#define PARITY_ERROR 0xe1 /*
+ * Sequencer detected a parity
+ * error.
+ */
+#define BRKADRINT 0x08
+#define SCSIINT 0x04
+#define CMDCMPLT 0x02
+#define SEQINT 0x01
+#define INT_PEND (BRKADRINT | SEQINT | SCSIINT | CMDCMPLT)
+
+/*
+ * Hard Error (p. 3-53)
+ * Reporting of catastrophic errors. You usually cannot recover from
+ * these without a full board reset.
+ */
+#define ERROR 0x092
+/* UNUSED 0xf0 */
+#define PARERR 0x08
+#define ILLOPCODE 0x04
+#define ILLSADDR 0x02
+#define ILLHADDR 0x01
+
+/*
+ * Clear Interrupt Status (p. 3-52)
+ */
+#define CLRINT 0x092
+#define CLRBRKADRINT 0x08
+#define CLRSCSIINT 0x04
+#define CLRCMDINT 0x02
+#define CLRSEQINT 0x01
+
+#define DFCNTRL 0x093
+#define WIDEODD 0x40
+#define SCSIEN 0x20
+#define SDMAEN 0x10
+#define SDMAENACK 0x10
+#define HDMAEN 0x08
+#define HDMAENACK 0x08
+#define DIRECTION 0x04
+#define FIFOFLUSH 0x02
+#define FIFORESET 0x01
+
+#define DFSTATUS 0x094
+#define HDONE 0x08
+#define FIFOEMP 0x01
+
+#define DFDAT 0x099
+
+/*
+ * SCB Auto Increment (p. 3-59)
+ * Byte offset into the SCB Array and an optional bit to allow auto
+ * incrementing of the address during download and upload operations
+ */
+#define SCBCNT 0x09a
+#define SCBAUTO 0x80
+#define SCBCNT_MASK 0x1f
+
+/*
+ * Queue In FIFO (p. 3-60)
+ * Input queue for queued SCBs (commands that the seqencer has yet to start)
+ */
+#define QINFIFO 0x09b
+
+/*
+ * Queue In Count (p. 3-60)
+ * Number of queued SCBs
+ */
+#define QINCNT 0x09c
+
+/*
+ * Queue Out FIFO (p. 3-61)
+ * Queue of SCBs that have completed and await the host
+ */
+#define QOUTFIFO 0x09d
+
+/*
+ * Queue Out Count (p. 3-61)
+ * Number of queued SCBs in the Out FIFO
+ */
+#define QOUTCNT 0x09e
+
+/*
+ * SCB Definition (p. 5-4)
+ * The two reserved bytes at SCBARRAY+1[23] are expected to be set to
+ * zero. Bit 3 in SCBARRAY+0 is used as an internal flag to indicate
+ * whether or not to DMA an SCB from host ram. This flag prevents the
+ * "re-fetching" of transactions that are requed because the target is
+ * busy with another command. We also use bits 6 & 7 to indicate whether
+ * or not to initiate SDTR or WDTR repectively when starting this command.
+ */
+#define SCBARRAY 0x0a0
+#define SCB_CONTROL 0x0a0
+#define NEEDWDTR 0x80
+#define DISCENB 0x40
+#define TAG_ENB 0x20
+#define NEEDSDTR 0x10
+#define DISCONNECTED 0x04
+#define SCB_TAG_TYPE 0x03
+#define SCB_TCL 0x0a1
+#define SCB_TARGET_STATUS 0x0a2
+#define SCB_SGCOUNT 0x0a3
+#define SCB_SGPTR 0x0a4
+#define SCB_SGPTR0 0x0a4
+#define SCB_SGPTR1 0x0a5
+#define SCB_SGPTR2 0x0a6
+#define SCB_SGPTR3 0x0a7
+#define SCB_RESID_SGCNT 0x0a8
+#define SCB_RESID_DCNT 0x0a9
+#define SCB_RESID_DCNT0 0x0a9
+#define SCB_RESID_DCNT1 0x0aa
+#define SCB_RESID_DCNT2 0x0ab
+#define SCB_DATAPTR 0x0ac
+#define SCB_DATAPTR0 0x0ac
+#define SCB_DATAPTR1 0x0ad
+#define SCB_DATAPTR2 0x0ae
+#define SCB_DATAPTR3 0x0af
+#define SCB_DATACNT 0x0b0
+#define SCB_DATACNT0 0x0b0
+#define SCB_DATACNT1 0x0b1
+#define SCB_DATACNT2 0x0b2
+/* UNUSED - QUAD PADDING 0x0b3 */
+#define SCB_CMDPTR 0x0b4
+#define SCB_CMDPTR0 0x0b4
+#define SCB_CMDPTR1 0x0b5
+#define SCB_CMDPTR2 0x0b6
+#define SCB_CMDPTR3 0x0b7
+#define SCB_CMDLEN 0x0b8
+#define SCB_NEXT_WAITING 0x0b9
+
+#ifdef linux
+#define SG_SIZEOF 0x0c /* sizeof(struct scatterlist) */
+#else
+#define SG_SIZEOF 0x08 /* sizeof(struct ahc_dma) */
+#endif
+
+/* --------------------- AHA-2840-only definitions -------------------- */
+
+#define SEECTL_2840 0x0c0
+/* UNUSED 0xf8 */
+#define CS_2840 0x04
+#define CK_2840 0x02
+#define DO_2840 0x01
+
+#define STATUS_2840 0x0c1
+#define EEPROM_TF 0x80
+#define BIOS_SEL 0x60
+#define ADSEL 0x1e
+#define DI_2840 0x01
+
+/* --------------------- AIC-7870-only definitions -------------------- */
+
+#define DSPCISTATUS 0x086
+
+/*
+ * Serial EEPROM Control (p. 4-92 in 7870 Databook)
+ * Controls the reading and writing of an external serial 1-bit
+ * EEPROM Device. In order to access the serial EEPROM, you must
+ * first set the SEEMS bit that generates a request to the memory
+ * port for access to the serial EEPROM device. When the memory
+ * port is not busy servicing another request, it reconfigures
+ * to allow access to the serial EEPROM. When this happens, SEERDY
+ * gets set high to verify that the memory port access has been
+ * granted.
+ *
+ * After successful arbitration for the memory port, the SEECS bit of
+ * the SEECTL register is connected to the chip select. The SEECK,
+ * SEEDO, and SEEDI are connected to the clock, data out, and data in
+ * lines respectively. The SEERDY bit of SEECTL is useful in that it
+ * gives us an 800 nsec timer. After a write to the SEECTL register,
+ * the SEERDY goes high 800 nsec later. The one exception to this is
+ * when we first request access to the memory port. The SEERDY goes
+ * high to signify that access has been granted and, for this case, has
+ * no implied timing.
+ *
+ * See 93cx6.c for detailed information on the protocol necessary to
+ * read the serial EEPROM.
+ */
+#define SEECTL 0x01e
+#define EXTARBACK 0x80
+#define EXTARBREQ 0x40
+#define SEEMS 0x20
+#define SEERDY 0x10
+#define SEECS 0x08
+#define SEECK 0x04
+#define SEEDO 0x02
+#define SEEDI 0x01
+
+/* ---------------------- Scratch RAM Offsets ------------------------- */
+/* These offsets are either to values that are initialized by the board's
+ * BIOS or are specified by the sequencer code.
+ *
+ * The host adapter card (at least the BIOS) uses 20-2f for SCSI
+ * device information, 32-33 and 5a-5f as well. As it turns out, the
+ * BIOS trashes 20-2f, writing the synchronous negotiation results
+ * on top of the BIOS values, so we re-use those for our per-target
+ * scratchspace (actually a value that can be copied directly into
+ * SCSIRATE). The kernel driver will enable synchronous negotiation
+ * for all targets that have a value other than 0 in the lower four
+ * bits of the target scratch space. This should work regardless of
+ * whether the bios has been installed.
+ */
+
+/*
+ * 1 byte per target starting at this address for configuration values
+ */
+#define TARG_SCRATCH 0x020
+
+/*
+ * The sequencer will stick the frist byte of any rejected message here so
+ * we can see what is getting thrown away.
+ */
+#define REJBYTE 0x031
+
+/*
+ * Bit vector of targets that have disconnection disabled.
+ */
+#define DISC_DSB 0x032
+#define DISC_DSB_A 0x032
+#define DISC_DSB_B 0x033
+
+/*
+ * Length of pending message
+ */
+#define MSG_LEN 0x034
+
+#define MSG0 0x035
+#define COMP_MSG0 0xcb /* 2's complement of MSG0 */
+#define MSG1 0x036
+#define MSG2 0x037
+#define MSG3 0x038
+#define MSG4 0x039
+#define MSG5 0x03a
+
+/*
+ * These are offsets into the card's scratch ram. Some of the values are
+ * specified in the AHA2742 technical reference manual and are initialized
+ * by the BIOS at boot time.
+ */
+#define LASTPHASE 0x049
+#define ARG_1 0x04a
+#define RETURN_1 0x04a
+#define SEND_SENSE 0x80
+#define SEND_WDTR 0x80
+#define SEND_SDTR 0x80
+#define SEND_REJ 0x40
+
+#define SIGSTATE 0x04b
+
+#define DMAPARAMS 0x04c /* Parameters for DMA Logic */
+
+#define SG_COUNT 0x04d
+#define SG_NEXT 0x04e /* working value of SG pointer */
+#define SG_NEXT0 0x04e
+#define SG_NEXT1 0x04f
+#define SG_NEXT2 0x050
+#define SG_NEXT3 0x051
+
+#define SCBCOUNT 0x052 /*
+ * Number of SCBs supported by
+ * this card.
+ */
+#define FLAGS 0x053
+#define SINGLE_BUS 0x00
+#define TWIN_BUS 0x01
+#define WIDE_BUS 0x02
+#define DPHASE 0x04
+#define MAXOFFSET 0x08
+#define IDENTIFY_SEEN 0x40
+#define RESELECTED 0x80
+
+#define ACTIVE_A 0x054
+#define ACTIVE_B 0x055
+#define SAVED_TCL 0x056 /*
+ * Temporary storage for the
+ * target/channel/lun of a
+ * reconnecting target
+ */
+#define WAITING_SCBH 0x057 /*
+ * head of list of SCBs awaiting
+ * selection
+ */
+#define WAITING_SCBT 0x058 /*
+ * tail of list of SCBs awaiting
+ * selection
+ */
+#define COMP_SCBCOUNT 0x059
+#define SCB_LIST_NULL 0xff
+
+#define SCSICONF 0x05a
+#define HOSTCONF 0x05d
+
+#define HA_274_BIOSCTRL 0x05f
+#define BIOSMODE 0x30
+#define BIOSDISABLED 0x30
+
+/* Message codes */
+#define MSG_EXTENDED 0x01
+#define MSG_SDTR 0x01
+#define MSG_WDTR 0x03
+#define MSG_SDPTRS 0x02
+#define MSG_RDPTRS 0x03
+#define MSG_DISCONNECT 0x04
+#define MSG_INITIATOR_DET_ERROR 0x05
+#define MSG_ABORT 0x06
+#define MSG_REJECT 0x07
+#define MSG_NOP 0x08
+#define MSG_MSG_PARITY_ERROR 0x09
+#define MSG_BUS_DEVICE_RESET 0x0c
+#define MSG_SIMPLE_TAG 0x20
+#define MSG_IDENTIFY 0x80
+
+/* WDTR Message values */
+#define BUS_8_BIT 0x00
+#define BUS_16_BIT 0x01
+#define BUS_32_BIT 0x02
+
+#define MAX_OFFSET_8BIT 0x0f
+#define MAX_OFFSET_16BIT 0x08
+
diff --git a/i386/i386at/gpl/linux/scsi/aic7xxx_seq.h b/i386/i386at/gpl/linux/scsi/aic7xxx_seq.h
new file mode 100644
index 00000000..d08f7d81
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/aic7xxx_seq.h
@@ -0,0 +1,374 @@
+#define AIC7XXX_SEQ_VER "$Id: aic7xxx_seq.h,v 1.1.1.1 1997/02/25 21:27:47 thomas Exp $"
+ 0x10, 0x6a, 0x00, 0x00,
+ 0x01, 0x53, 0x05, 0x1e,
+ 0x08, 0x1f, 0x1f, 0x04,
+ 0x20, 0x0b, 0x32, 0x1a,
+ 0x08, 0x1f, 0x1f, 0x04,
+ 0x20, 0x0b, 0x32, 0x1a,
+ 0xff, 0x57, 0x12, 0x18,
+ 0xff, 0x9c, 0x01, 0x1e,
+ 0xff, 0x9b, 0x90, 0x02,
+ 0xff, 0xa1, 0x6e, 0x02,
+ 0xff, 0x6e, 0x64, 0x02,
+ 0x88, 0xa1, 0x14, 0x1e,
+ 0x00, 0x55, 0x10, 0x1a,
+ 0x20, 0xa0, 0x17, 0x1a,
+ 0x00, 0x55, 0x55, 0x00,
+ 0x00, 0x65, 0x17, 0x10,
+ 0xff, 0x90, 0x9b, 0x02,
+ 0x00, 0x65, 0x01, 0x10,
+ 0xff, 0x57, 0x90, 0x02,
+ 0x00, 0x65, 0x19, 0x10,
+ 0x00, 0x54, 0x10, 0x1a,
+ 0x20, 0xa0, 0x17, 0x1a,
+ 0x00, 0x54, 0x54, 0x00,
+ 0xff, 0x57, 0xb9, 0x02,
+ 0xff, 0x90, 0x57, 0x02,
+ 0xf7, 0x1f, 0x65, 0x02,
+ 0x08, 0xa1, 0x64, 0x02,
+ 0x00, 0x65, 0x65, 0x00,
+ 0xff, 0x65, 0x1f, 0x02,
+ 0x00, 0xa1, 0x1f, 0x17,
+ 0x58, 0x6a, 0x00, 0x00,
+ 0xff, 0xb8, 0x22, 0x1a,
+ 0xa1, 0x6a, 0x91, 0x00,
+ 0x00, 0x65, 0x30, 0x10,
+ 0x40, 0xa0, 0x64, 0x02,
+ 0x07, 0xa1, 0x35, 0x02,
+ 0x00, 0x35, 0x35, 0x00,
+ 0x80, 0x35, 0x35, 0x00,
+ 0x01, 0x6a, 0x34, 0x00,
+ 0xb0, 0xa0, 0x30, 0x1e,
+ 0x36, 0x6a, 0x66, 0x00,
+ 0x20, 0xa0, 0x2e, 0x1e,
+ 0x23, 0xa0, 0x64, 0x02,
+ 0xff, 0x64, 0x6d, 0x02,
+ 0xff, 0x90, 0x6d, 0x02,
+ 0xcb, 0x66, 0x34, 0x06,
+ 0x90, 0xa0, 0x30, 0x1e,
+ 0x00, 0x66, 0x51, 0x17,
+ 0x40, 0x0b, 0x37, 0x1a,
+ 0x20, 0x0b, 0x30, 0x1e,
+ 0xff, 0x6a, 0x34, 0x02,
+ 0x00, 0x19, 0x1f, 0x17,
+ 0x03, 0x53, 0x53, 0x02,
+ 0x80, 0x53, 0x53, 0x00,
+ 0x00, 0x65, 0x39, 0x10,
+ 0x03, 0x53, 0x53, 0x02,
+ 0xff, 0xb9, 0x57, 0x02,
+ 0x02, 0x01, 0x01, 0x00,
+ 0x00, 0x65, 0x4d, 0x17,
+ 0xff, 0x6c, 0x04, 0x02,
+ 0x02, 0x6a, 0x00, 0x00,
+ 0x08, 0x6a, 0x0c, 0x00,
+ 0x60, 0x6a, 0x0b, 0x00,
+ 0x08, 0x0c, 0x04, 0x1b,
+ 0x01, 0x0c, 0x3f, 0x1e,
+ 0x04, 0x0c, 0x44, 0x1e,
+ 0x04, 0x0c, 0x0c, 0x00,
+ 0xe1, 0x6a, 0x91, 0x00,
+ 0xe0, 0x03, 0x64, 0x02,
+ 0xff, 0x64, 0x49, 0x02,
+ 0xff, 0x64, 0x03, 0x02,
+ 0x00, 0x6a, 0x4e, 0x1c,
+ 0x40, 0x64, 0x54, 0x1c,
+ 0x80, 0x64, 0x81, 0x1c,
+ 0xa0, 0x64, 0x90, 0x1c,
+ 0xc0, 0x64, 0x8e, 0x1c,
+ 0xe0, 0x64, 0xa4, 0x1c,
+ 0x01, 0x6a, 0x91, 0x00,
+ 0x7d, 0x6a, 0x4c, 0x00,
+ 0x00, 0x65, 0x55, 0x10,
+ 0xff, 0xa9, 0x08, 0x02,
+ 0xff, 0xaa, 0x09, 0x02,
+ 0xff, 0xab, 0x0a, 0x02,
+ 0x00, 0x65, 0x59, 0x10,
+ 0x79, 0x6a, 0x4c, 0x00,
+ 0x00, 0x65, 0x23, 0x17,
+ 0x04, 0x53, 0x50, 0x1a,
+ 0x00, 0x65, 0x31, 0x17,
+ 0x04, 0x53, 0x53, 0x00,
+ 0x01, 0x4d, 0x5b, 0x18,
+ 0xbf, 0x4c, 0x4c, 0x02,
+ 0x00, 0x4c, 0x17, 0x17,
+ 0x04, 0x0b, 0x7c, 0x1e,
+ 0xff, 0x4d, 0x4d, 0x06,
+ 0xff, 0x4d, 0x7c, 0x1e,
+ 0xff, 0x6a, 0x64, 0x02,
+ 0x0c, 0x4e, 0x4e, 0x06,
+ 0x00, 0x4f, 0x4f, 0x08,
+ 0xff, 0x6a, 0x8e, 0x02,
+ 0xff, 0x6a, 0x8d, 0x02,
+ 0x0c, 0x6a, 0x8c, 0x00,
+ 0xff, 0x4e, 0x88, 0x02,
+ 0xff, 0x4f, 0x89, 0x02,
+ 0xff, 0x50, 0x8a, 0x02,
+ 0xff, 0x51, 0x8b, 0x02,
+ 0x0d, 0x93, 0x93, 0x00,
+ 0x08, 0x94, 0x6a, 0x1e,
+ 0x40, 0x93, 0x93, 0x02,
+ 0x08, 0x93, 0x6c, 0x1a,
+ 0xff, 0x99, 0x88, 0x02,
+ 0xff, 0x99, 0x89, 0x02,
+ 0xff, 0x99, 0x8a, 0x02,
+ 0xff, 0x99, 0x8b, 0x02,
+ 0xff, 0x99, 0x6a, 0x02,
+ 0xff, 0x99, 0x6a, 0x02,
+ 0xff, 0x99, 0x6a, 0x02,
+ 0xff, 0x99, 0x6a, 0x02,
+ 0xff, 0x99, 0x8c, 0x02,
+ 0xff, 0x99, 0x8d, 0x02,
+ 0xff, 0x99, 0x8e, 0x02,
+ 0xff, 0x8c, 0x08, 0x02,
+ 0xff, 0x8d, 0x09, 0x02,
+ 0xff, 0x8e, 0x0a, 0x02,
+ 0x10, 0x0c, 0x59, 0x1e,
+ 0xff, 0x08, 0xa9, 0x02,
+ 0xff, 0x09, 0xaa, 0x02,
+ 0xff, 0x0a, 0xab, 0x02,
+ 0xff, 0x4d, 0xa8, 0x02,
+ 0x00, 0x65, 0x3f, 0x10,
+ 0x00, 0x65, 0x23, 0x17,
+ 0xff, 0xb4, 0x88, 0x02,
+ 0xff, 0xb5, 0x89, 0x02,
+ 0xff, 0xb6, 0x8a, 0x02,
+ 0xff, 0xb7, 0x8b, 0x02,
+ 0xff, 0xb8, 0x8c, 0x02,
+ 0xff, 0x6a, 0x8d, 0x02,
+ 0xff, 0x6a, 0x8e, 0x02,
+ 0xff, 0x8c, 0x08, 0x02,
+ 0xff, 0x8d, 0x09, 0x02,
+ 0xff, 0x8e, 0x0a, 0x02,
+ 0x3d, 0x6a, 0x17, 0x17,
+ 0x00, 0x65, 0x3f, 0x10,
+ 0xa2, 0x6a, 0x12, 0x17,
+ 0x00, 0x65, 0xb0, 0x10,
+ 0xff, 0x34, 0x92, 0x1a,
+ 0x08, 0x6a, 0x07, 0x17,
+ 0x35, 0x6a, 0x65, 0x00,
+ 0xff, 0x34, 0x66, 0x02,
+ 0x10, 0x0c, 0xa1, 0x1a,
+ 0x02, 0x0b, 0x94, 0x1e,
+ 0x01, 0x66, 0x98, 0x18,
+ 0x40, 0x6a, 0x0c, 0x00,
+ 0xff, 0x66, 0x66, 0x06,
+ 0x02, 0x0b, 0x0b, 0x00,
+ 0xff, 0x6c, 0x06, 0x02,
+ 0xff, 0x66, 0x94, 0x1a,
+ 0x08, 0x0c, 0xa2, 0x1a,
+ 0x01, 0x0c, 0x9c, 0x1e,
+ 0x10, 0x0c, 0xa2, 0x1a,
+ 0x10, 0x03, 0x03, 0x00,
+ 0x00, 0x65, 0x3f, 0x10,
+ 0x40, 0x6a, 0x0c, 0x00,
+ 0xff, 0x6a, 0x34, 0x02,
+ 0x00, 0x65, 0x3f, 0x10,
+ 0x64, 0x6a, 0x12, 0x17,
+ 0xff, 0x64, 0x31, 0x02,
+ 0x80, 0x64, 0xe4, 0x1a,
+ 0x04, 0x64, 0xde, 0x1c,
+ 0x02, 0x64, 0xe0, 0x1c,
+ 0x00, 0x6a, 0xb2, 0x1c,
+ 0x03, 0x64, 0xe2, 0x1c,
+ 0x01, 0x64, 0xc5, 0x1c,
+ 0x07, 0x64, 0x02, 0x1d,
+ 0x10, 0x03, 0x03, 0x00,
+ 0x11, 0x6a, 0x91, 0x00,
+ 0x07, 0x6a, 0x07, 0x17,
+ 0x00, 0x65, 0x14, 0x17,
+ 0x00, 0x65, 0x3f, 0x10,
+ 0xff, 0xa8, 0xb4, 0x1e,
+ 0x81, 0x6a, 0x91, 0x00,
+ 0xff, 0xa2, 0xb8, 0x1e,
+ 0x71, 0x6a, 0x91, 0x00,
+ 0x80, 0x4a, 0xb8, 0x18,
+ 0x00, 0x65, 0xb0, 0x10,
+ 0x20, 0xa0, 0xbf, 0x1a,
+ 0xff, 0xa1, 0x6e, 0x02,
+ 0xff, 0x6e, 0x64, 0x02,
+ 0x88, 0xa1, 0xbe, 0x1e,
+ 0x00, 0x55, 0x55, 0x04,
+ 0x00, 0x65, 0xbf, 0x10,
+ 0x00, 0x54, 0x54, 0x04,
+ 0xff, 0xb8, 0xc2, 0x1a,
+ 0xb1, 0x6a, 0x91, 0x00,
+ 0x00, 0x65, 0x00, 0x10,
+ 0xff, 0x90, 0x9d, 0x02,
+ 0x02, 0x6a, 0x91, 0x00,
+ 0x00, 0x65, 0xb0, 0x10,
+ 0x4a, 0x6a, 0x0e, 0x17,
+ 0x64, 0x6a, 0x0e, 0x17,
+ 0x01, 0x64, 0xd4, 0x1c,
+ 0x03, 0x64, 0xca, 0x1c,
+ 0x00, 0x65, 0xad, 0x10,
+ 0x02, 0x4a, 0xad, 0x18,
+ 0x4a, 0x6a, 0x0e, 0x17,
+ 0x51, 0x6a, 0x91, 0x00,
+ 0xff, 0x4a, 0xb0, 0x1e,
+ 0x40, 0x4a, 0xad, 0x1c,
+ 0x7f, 0x4a, 0x4a, 0x02,
+ 0x35, 0x6a, 0x66, 0x00,
+ 0x35, 0x6a, 0x62, 0x17,
+ 0x10, 0x03, 0x03, 0x00,
+ 0x00, 0x65, 0xb0, 0x10,
+ 0x03, 0x4a, 0xad, 0x18,
+ 0x4a, 0x6a, 0x0e, 0x17,
+ 0x64, 0x6a, 0x0e, 0x17,
+ 0x41, 0x6a, 0x91, 0x00,
+ 0xff, 0x4a, 0xb0, 0x1e,
+ 0x40, 0x4a, 0xad, 0x1c,
+ 0x35, 0x6a, 0x66, 0x00,
+ 0x35, 0x6a, 0x53, 0x17,
+ 0x10, 0x03, 0x03, 0x00,
+ 0x00, 0x65, 0xb0, 0x10,
+ 0x04, 0xa0, 0xa0, 0x00,
+ 0x00, 0x65, 0xb0, 0x10,
+ 0x00, 0x65, 0x40, 0x17,
+ 0x00, 0x65, 0xb0, 0x10,
+ 0xfb, 0x53, 0x53, 0x02,
+ 0x00, 0x65, 0xb0, 0x10,
+ 0x78, 0x64, 0xad, 0x1a,
+ 0x07, 0x64, 0x64, 0x02,
+ 0x00, 0x19, 0x56, 0x00,
+ 0xf7, 0x56, 0x56, 0x02,
+ 0x08, 0x1f, 0x64, 0x02,
+ 0x00, 0x56, 0x56, 0x00,
+ 0x00, 0x65, 0x14, 0x17,
+ 0x08, 0x0c, 0xf0, 0x1a,
+ 0x01, 0x0c, 0xeb, 0x1e,
+ 0x10, 0x0c, 0xf0, 0x1a,
+ 0x64, 0x6a, 0x12, 0x17,
+ 0x20, 0x64, 0xf4, 0x1c,
+ 0x00, 0x6a, 0x26, 0x17,
+ 0xfb, 0xa0, 0xa0, 0x02,
+ 0x40, 0x53, 0x53, 0x00,
+ 0x00, 0x65, 0x3f, 0x10,
+ 0x4a, 0x6a, 0x0e, 0x17,
+ 0xff, 0x59, 0x64, 0x02,
+ 0x00, 0x4a, 0x65, 0x06,
+ 0x00, 0x65, 0xfe, 0x12,
+ 0xff, 0x4a, 0x90, 0x02,
+ 0xff, 0x56, 0x64, 0x02,
+ 0x00, 0xa1, 0xfe, 0x18,
+ 0x20, 0xa0, 0xfe, 0x1e,
+ 0x00, 0x65, 0x14, 0x17,
+ 0x00, 0x65, 0xf1, 0x10,
+ 0x10, 0x03, 0x03, 0x00,
+ 0x91, 0x6a, 0x91, 0x00,
+ 0x0d, 0x6a, 0x07, 0x17,
+ 0x00, 0x65, 0xb0, 0x10,
+ 0x61, 0x6a, 0x91, 0x00,
+ 0x00, 0x65, 0xb0, 0x10,
+ 0x40, 0x6a, 0x0c, 0x00,
+ 0xff, 0xb8, 0xb8, 0x1e,
+ 0x00, 0x65, 0x00, 0x10,
+ 0x50, 0x6a, 0x60, 0x00,
+ 0xff, 0x34, 0x0b, 0x1f,
+ 0x10, 0x6a, 0x60, 0x00,
+ 0xc1, 0x6a, 0x91, 0x00,
+ 0x01, 0x6a, 0x34, 0x00,
+ 0xff, 0x65, 0x35, 0x02,
+ 0x10, 0x6a, 0x60, 0x01,
+ 0x02, 0x0b, 0x0b, 0x00,
+ 0xff, 0x06, 0x6a, 0x02,
+ 0x10, 0x0c, 0x15, 0x1b,
+ 0x02, 0x0b, 0x10, 0x1f,
+ 0xff, 0x65, 0x66, 0x02,
+ 0xff, 0x12, 0x6d, 0x03,
+ 0xff, 0x06, 0x6a, 0x03,
+ 0xd1, 0x6a, 0x91, 0x00,
+ 0x00, 0x65, 0x3f, 0x10,
+ 0xff, 0x65, 0x93, 0x02,
+ 0x01, 0x0b, 0x1a, 0x1b,
+ 0x10, 0x0c, 0x18, 0x1f,
+ 0x04, 0x65, 0x1c, 0x1b,
+ 0x01, 0x94, 0x1b, 0x1f,
+ 0x40, 0x93, 0x93, 0x02,
+ 0x38, 0x93, 0x1d, 0x1b,
+ 0xff, 0x6a, 0x6a, 0x03,
+ 0xf0, 0x65, 0x65, 0x02,
+ 0x0f, 0x05, 0x64, 0x02,
+ 0x00, 0x65, 0x65, 0x00,
+ 0xff, 0x65, 0x05, 0x03,
+ 0x80, 0x53, 0x74, 0x1f,
+ 0x40, 0x53, 0x74, 0x1b,
+ 0x21, 0x6a, 0x91, 0x01,
+ 0xff, 0x56, 0x64, 0x02,
+ 0xff, 0x65, 0x90, 0x02,
+ 0x00, 0xa1, 0x2b, 0x19,
+ 0x04, 0xa0, 0x2b, 0x1f,
+ 0xff, 0x6a, 0x6a, 0x03,
+ 0x01, 0x65, 0x65, 0x06,
+ 0xff, 0x52, 0x64, 0x02,
+ 0x00, 0x65, 0x26, 0x19,
+ 0x31, 0x6a, 0x91, 0x00,
+ 0x06, 0x6a, 0x07, 0x17,
+ 0x10, 0x03, 0x03, 0x01,
+ 0xff, 0xac, 0x88, 0x02,
+ 0xff, 0xad, 0x89, 0x02,
+ 0xff, 0xae, 0x8a, 0x02,
+ 0xff, 0xaf, 0x8b, 0x02,
+ 0xff, 0xb0, 0x8c, 0x02,
+ 0xff, 0xb1, 0x8d, 0x02,
+ 0xff, 0xb2, 0x8e, 0x02,
+ 0xff, 0x8c, 0x08, 0x02,
+ 0xff, 0x8d, 0x09, 0x02,
+ 0xff, 0x8e, 0x0a, 0x02,
+ 0xff, 0xa3, 0x4d, 0x02,
+ 0xff, 0xa4, 0x4e, 0x02,
+ 0xff, 0xa5, 0x4f, 0x02,
+ 0xff, 0xa6, 0x50, 0x02,
+ 0xff, 0xa7, 0x51, 0x03,
+ 0x04, 0x53, 0x74, 0x1f,
+ 0xff, 0x4d, 0xa3, 0x02,
+ 0xff, 0x4e, 0xa4, 0x02,
+ 0xff, 0x4f, 0xa5, 0x02,
+ 0xff, 0x50, 0xa6, 0x02,
+ 0xff, 0x51, 0xa7, 0x02,
+ 0xff, 0x14, 0xac, 0x02,
+ 0xff, 0x15, 0xad, 0x02,
+ 0xff, 0x16, 0xae, 0x02,
+ 0xff, 0x17, 0xaf, 0x02,
+ 0xff, 0xa9, 0xb0, 0x02,
+ 0xff, 0xaa, 0xb1, 0x02,
+ 0xff, 0xab, 0xb2, 0x03,
+ 0x4c, 0x05, 0x64, 0x0a,
+ 0x08, 0x1f, 0x50, 0x1f,
+ 0x08, 0x64, 0x64, 0x00,
+ 0x20, 0x64, 0x65, 0x07,
+ 0x80, 0xa0, 0x61, 0x1b,
+ 0x08, 0x53, 0x53, 0x00,
+ 0x01, 0x6a, 0x6d, 0x00,
+ 0x03, 0x6a, 0x6d, 0x00,
+ 0x01, 0x6a, 0x6d, 0x00,
+ 0x00, 0x65, 0x67, 0x17,
+ 0xff, 0x4a, 0x6d, 0x02,
+ 0x08, 0x53, 0x5b, 0x1b,
+ 0x0f, 0x6c, 0x6d, 0x02,
+ 0xcb, 0x66, 0x34, 0x07,
+ 0x08, 0x53, 0x53, 0x04,
+ 0x80, 0x04, 0x5f, 0x1b,
+ 0x0f, 0x6a, 0x6d, 0x00,
+ 0x00, 0x65, 0x5a, 0x11,
+ 0x08, 0x6a, 0x6d, 0x00,
+ 0x00, 0x65, 0x5a, 0x11,
+ 0x01, 0x6a, 0x4a, 0x00,
+ 0x01, 0x6a, 0x6d, 0x00,
+ 0x02, 0x6a, 0x6d, 0x00,
+ 0x03, 0x6a, 0x6d, 0x00,
+ 0xff, 0x4a, 0x6d, 0x02,
+ 0xcb, 0x66, 0x34, 0x07,
+ 0x00, 0x65, 0x4d, 0x17,
+ 0x4c, 0x6c, 0x64, 0x0a,
+ 0xff, 0x65, 0x65, 0x06,
+ 0x07, 0x64, 0x64, 0x02,
+ 0xff, 0x6a, 0x4a, 0x02,
+ 0x0f, 0x64, 0x70, 0x1f,
+ 0x19, 0x4a, 0x4a, 0x06,
+ 0xff, 0x64, 0x64, 0x06,
+ 0x00, 0x65, 0x6c, 0x11,
+ 0x2e, 0x4a, 0x4a, 0x0a,
+ 0x19, 0x4a, 0x4a, 0x06,
+ 0x20, 0x01, 0x74, 0x1f,
+ 0x1f, 0x4a, 0x4a, 0x0a,
+ 0xff, 0x6a, 0x6a, 0x03,
diff --git a/i386/i386at/gpl/linux/scsi/constants.c b/i386/i386at/gpl/linux/scsi/constants.c
new file mode 100644
index 00000000..07f071b2
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/constants.c
@@ -0,0 +1,649 @@
+/*
+ * ASCII values for a number of symbolic constants, printing functions,
+ * etc.
+ */
+
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <linux/config.h>
+#include <linux/blk.h>
+#include <linux/kernel.h>
+#include "scsi.h"
+#include "hosts.h"
+
+#define CONST_COMMAND 0x01
+#define CONST_STATUS 0x02
+#define CONST_SENSE 0x04
+#define CONST_XSENSE 0x08
+#define CONST_CMND 0x10
+#define CONST_MSG 0x20
+#define CONST_HOST 0x40
+#define CONST_DRIVER 0x80
+
+static const char unknown[] = "UNKNOWN";
+
+#ifdef CONFIG_SCSI_CONSTANTS
+#ifdef CONSTANTS
+#undef CONSTANTS
+#endif
+#define CONSTANTS (CONST_COMMAND | CONST_STATUS | CONST_SENSE | CONST_XSENSE \
+ | CONST_CMND | CONST_MSG | CONST_HOST | CONST_DRIVER)
+#endif
+
+#if (CONSTANTS & CONST_COMMAND)
+static const char * group_0_commands[] = {
+/* 00-03 */ "Test Unit Ready", "Rezero Unit", unknown, "Request Sense",
+/* 04-07 */ "Format Unit", "Read Block Limits", unknown, "Reasssign Blocks",
+/* 08-0d */ "Read (6)", unknown, "Write (6)", "Seek (6)", unknown, unknown,
+/* 0e-12 */ unknown, "Read Reverse", "Write Filemarks", "Space", "Inquiry",
+/* 13-16 */ unknown, "Recover Buffered Data", "Mode Select", "Reserve",
+/* 17-1b */ "Release", "Copy", "Erase", "Mode Sense", "Start/Stop Unit",
+/* 1c-1d */ "Receive Diagnostic", "Send Diagnostic",
+/* 1e-1f */ "Prevent/Allow Medium Removal", unknown,
+};
+
+
+static const char *group_1_commands[] = {
+/* 20-22 */ unknown, unknown, unknown,
+/* 23-28 */ unknown, unknown, "Read Capacity", unknown, unknown, "Read (10)",
+/* 29-2d */ unknown, "Write (10)", "Seek (10)", unknown, unknown,
+/* 2e-31 */ "Write Verify","Verify", "Search High", "Search Equal",
+/* 32-34 */ "Search Low", "Set Limits", "Prefetch or Read Position",
+/* 35-37 */ "Synchronize Cache","Lock/Unlock Cache", "Read Defect Data",
+/* 38-3c */ "Medium Scan", "Compare","Copy Verify", "Write Buffer", "Read Buffer",
+/* 3d-3f */ "Update Block", "Read Long", "Write Long",
+};
+
+
+static const char *group_2_commands[] = {
+/* 40-41 */ "Change Definition", "Write Same",
+/* 42-48 */ unknown, unknown, unknown, unknown, unknown, unknown, unknown,
+/* 49-4f */ unknown, unknown, unknown, "Log Select", "Log Sense", unknown, unknown,
+/* 50-55 */ unknown, unknown, unknown, unknown, unknown, "Mode Select (10)",
+/* 56-5b */ unknown, unknown, unknown, unknown, "Mode Sense (10)", unknown,
+/* 5c-5f */ unknown, unknown, unknown,
+};
+
+
+
+#define group(opcode) (((opcode) >> 5) & 7)
+
+#define RESERVED_GROUP 0
+#define VENDOR_GROUP 1
+#define NOTEXT_GROUP 2
+
+static const char **commands[] = {
+ group_0_commands, group_1_commands, group_2_commands,
+ (const char **) RESERVED_GROUP, (const char **) RESERVED_GROUP,
+ (const char **) NOTEXT_GROUP, (const char **) VENDOR_GROUP,
+ (const char **) VENDOR_GROUP
+};
+
+static const char reserved[] = "RESERVED";
+static const char vendor[] = "VENDOR SPECIFIC";
+
+static void print_opcode(int opcode) {
+ const char **table = commands[ group(opcode) ];
+ switch ((unsigned long) table) {
+ case RESERVED_GROUP:
+ printk("%s(0x%02x) ", reserved, opcode);
+ break;
+ case NOTEXT_GROUP:
+ printk("%s(0x%02x) ", unknown, opcode);
+ break;
+ case VENDOR_GROUP:
+ printk("%s(0x%02x) ", vendor, opcode);
+ break;
+ default:
+ printk("%s ",table[opcode & 0x1f]);
+ }
+}
+#else /* CONST & CONST_COMMAND */
+static void print_opcode(int opcode) {
+ printk("0x%02x ", opcode);
+}
+#endif
+
+void print_command (unsigned char *command) {
+ int i,s;
+ print_opcode(command[0]);
+ for ( i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
+ printk("%02x ", command[i]);
+ printk("\n");
+}
+
+#if (CONSTANTS & CONST_STATUS)
+static const char * statuses[] = {
+/* 0-4 */ "Good", "Check Condition", "Condition Good", unknown, "Busy",
+/* 5-9 */ unknown, unknown, unknown, "Intermediate Good", unknown,
+/* a-d */ "Intermediate Good", unknown, "Reservation Conflict", unknown,
+/* e-f */ unknown, unknown,
+};
+#endif
+
+void print_status (int status) {
+ status = (status >> 1) & 0xf;
+#if (CONSTANTS & CONST_STATUS)
+ printk("%s ",statuses[status]);
+#else
+ printk("0x%0x ", status);
+#endif
+}
+
+#if (CONSTANTS & CONST_XSENSE)
+#define D 0x001 /* DIRECT ACCESS DEVICE (disk) */
+#define T 0x002 /* SEQUENTIAL ACCESS DEVICE (tape) */
+#define L 0x004 /* PRINTER DEVICE */
+#define P 0x008 /* PROCESSOR DEVICE */
+#define W 0x010 /* WRITE ONCE READ MULTIPLE DEVICE */
+#define R 0x020 /* READ ONLY (CD-ROM) DEVICE */
+#define S 0x040 /* SCANNER DEVICE */
+#define O 0x080 /* OPTICAL MEMORY DEVICE */
+#define M 0x100 /* MEDIA CHANGER DEVICE */
+#define C 0x200 /* COMMUNICATION DEVICE */
+
+struct error_info{
+ unsigned char code1, code2;
+ unsigned short int devices;
+ const char * text;
+};
+
+struct error_info2{
+ unsigned char code1, code2_min, code2_max;
+ unsigned short int devices;
+ const char * text;
+};
+
+static struct error_info2 additional2[] =
+{
+ {0x40,0x00,0x7f,D,"Ram failure (%x)"},
+ {0x40,0x80,0xff,D|T|L|P|W|R|S|O|M|C,"Diagnostic failure on component (%x)"},
+ {0x41,0x00,0xff,D,"Data path failure (%x)"},
+ {0x42,0x00,0xff,D,"Power-on or self-test failure (%x)"},
+ {0, 0, 0, 0, NULL}
+};
+
+static struct error_info additional[] =
+{
+ {0x00,0x01,T,"Filemark detected"},
+ {0x00,0x02,T|S,"End-of-partition/medium detected"},
+ {0x00,0x03,T,"Setmark detected"},
+ {0x00,0x04,T|S,"Beginning-of-partition/medium detected"},
+ {0x00,0x05,T|S,"End-of-data detected"},
+ {0x00,0x06,D|T|L|P|W|R|S|O|M|C,"I/O process terminated"},
+ {0x00,0x11,R,"Audio play operation in progress"},
+ {0x00,0x12,R,"Audio play operation paused"},
+ {0x00,0x13,R,"Audio play operation successfully completed"},
+ {0x00,0x14,R,"Audio play operation stopped due to error"},
+ {0x00,0x15,R,"No current audio status to return"},
+ {0x01,0x00,D|W|O,"No index/sector signal"},
+ {0x02,0x00,D|W|R|O|M,"No seek complete"},
+ {0x03,0x00,D|T|L|W|S|O,"Peripheral device write fault"},
+ {0x03,0x01,T,"No write current"},
+ {0x03,0x02,T,"Excessive write errors"},
+ {0x04,0x00,D|T|L|P|W|R|S|O|M|C,
+ "Logical unit not ready, cause not reportable"},
+ {0x04,0x01,D|T|L|P|W|R|S|O|M|C,
+ "Logical unit is in process of becoming ready"},
+ {0x04,0x02,D|T|L|P|W|R|S|O|M|C,
+ "Logical unit not ready, initializing command required"},
+ {0x04,0x03,D|T|L|P|W|R|S|O|M|C,
+ "Logical unit not ready, manual intervention required"},
+ {0x04,0x04,D|T|L|O,"Logical unit not ready, format in progress"},
+ {0x05,0x00,D|T|L|W|R|S|O|M|C,"Logical unit does not respond to selection"},
+ {0x06,0x00,D|W|R|O|M,"No reference position found"},
+ {0x07,0x00,D|T|L|W|R|S|O|M,"Multiple peripheral devices selected"},
+ {0x08,0x00,D|T|L|W|R|S|O|M|C,"Logical unit communication failure"},
+ {0x08,0x01,D|T|L|W|R|S|O|M|C,"Logical unit communication time-out"},
+ {0x08,0x02,D|T|L|W|R|S|O|M|C,"Logical unit communication parity error"},
+ {0x09,0x00,D|T|W|R|O,"Track following error"},
+ {0x09,0x01,W|R|O,"Tracking servo failure"},
+ {0x09,0x02,W|R|O,"Focus servo failure"},
+ {0x09,0x03,W|R|O,"Spindle servo failure"},
+ {0x0A,0x00,D|T|L|P|W|R|S|O|M|C,"Error log overflow"},
+ {0x0C,0x00,T|S,"Write error"},
+ {0x0C,0x01,D|W|O,"Write error recovered with auto reallocation"},
+ {0x0C,0x02,D|W|O,"Write error - auto reallocation failed"},
+ {0x10,0x00,D|W|O,"Id crc or ecc error"},
+ {0x11,0x00,D|T|W|R|S|O,"Unrecovered read error"},
+ {0x11,0x01,D|T|W|S|O,"Read retries exhausted"},
+ {0x11,0x02,D|T|W|S|O,"Error too long to correct"},
+ {0x11,0x03,D|T|W|S|O,"Multiple read errors"},
+ {0x11,0x04,D|W|O,"Unrecovered read error - auto reallocate failed"},
+ {0x11,0x05,W|R|O,"L-ec uncorrectable error"},
+ {0x11,0x06,W|R|O,"Circ unrecovered error"},
+ {0x11,0x07,W|O,"Data resynchronization error"},
+ {0x11,0x08,T,"Incomplete block read"},
+ {0x11,0x09,T,"No gap found"},
+ {0x11,0x0A,D|T|O,"Miscorrected error"},
+ {0x11,0x0B,D|W|O,"Unrecovered read error - recommend reassignment"},
+ {0x11,0x0C,D|W|O,"Unrecovered read error - recommend rewrite the data"},
+ {0x12,0x00,D|W|O,"Address mark not found for id field"},
+ {0x13,0x00,D|W|O,"Address mark not found for data field"},
+ {0x14,0x00,D|T|L|W|R|S|O,"Recorded entity not found"},
+ {0x14,0x01,D|T|W|R|O,"Record not found"},
+ {0x14,0x02,T,"Filemark or setmark not found"},
+ {0x14,0x03,T,"End-of-data not found"},
+ {0x14,0x04,T,"Block sequence error"},
+ {0x15,0x00,D|T|L|W|R|S|O|M,"Random positioning error"},
+ {0x15,0x01,D|T|L|W|R|S|O|M,"Mechanical positioning error"},
+ {0x15,0x02,D|T|W|R|O,"Positioning error detected by read of medium"},
+ {0x16,0x00,D|W|O,"Data synchronization mark error"},
+ {0x17,0x00,D|T|W|R|S|O,"Recovered data with no error correction applied"},
+ {0x17,0x01,D|T|W|R|S|O,"Recovered data with retries"},
+ {0x17,0x02,D|T|W|R|O,"Recovered data with positive head offset"},
+ {0x17,0x03,D|T|W|R|O,"Recovered data with negative head offset"},
+ {0x17,0x04,W|R|O,"Recovered data with retries and/or circ applied"},
+ {0x17,0x05,D|W|R|O,"Recovered data using previous sector id"},
+ {0x17,0x06,D|W|O,"Recovered data without ecc - data auto-reallocated"},
+ {0x17,0x07,D|W|O,"Recovered data without ecc - recommend reassignment"},
+ {0x18,0x00,D|T|W|R|O,"Recovered data with error correction applied"},
+ {0x18,0x01,D|W|R|O,"Recovered data with error correction and retries applied"},
+ {0x18,0x02,D|W|R|O,"Recovered data - data auto-reallocated"},
+ {0x18,0x03,R,"Recovered data with circ"},
+ {0x18,0x04,R,"Recovered data with lec"},
+ {0x18,0x05,D|W|R|O,"Recovered data - recommend reassignment"},
+ {0x19,0x00,D|O,"Defect list error"},
+ {0x19,0x01,D|O,"Defect list not available"},
+ {0x19,0x02,D|O,"Defect list error in primary list"},
+ {0x19,0x03,D|O,"Defect list error in grown list"},
+ {0x1A,0x00,D|T|L|P|W|R|S|O|M|C,"Parameter list length error"},
+ {0x1B,0x00,D|T|L|P|W|R|S|O|M|C,"Synchronous data transfer error"},
+ {0x1C,0x00,D|O,"Defect list not found"},
+ {0x1C,0x01,D|O,"Primary defect list not found"},
+ {0x1C,0x02,D|O,"Grown defect list not found"},
+ {0x1D,0x00,D|W|O,"Miscompare during verify operation"},
+ {0x1E,0x00,D|W|O,"Recovered id with ecc correction"},
+ {0x20,0x00,D|T|L|P|W|R|S|O|M|C,"Invalid command operation code"},
+ {0x21,0x00,D|T|W|R|O|M,"Logical block address out of range"},
+ {0x21,0x01,M,"Invalid element address"},
+ {0x22,0x00,D,"Illegal function (should use 20 00, 24 00, or 26 00)"},
+ {0x24,0x00,D|T|L|P|W|R|S|O|M|C,"Invalid field in cdb"},
+ {0x25,0x00,D|T|L|P|W|R|S|O|M|C,"Logical unit not supported"},
+ {0x26,0x00,D|T|L|P|W|R|S|O|M|C,"Invalid field in parameter list"},
+ {0x26,0x01,D|T|L|P|W|R|S|O|M|C,"Parameter not supported"},
+ {0x26,0x02,D|T|L|P|W|R|S|O|M|C,"Parameter value invalid"},
+ {0x26,0x03,D|T|L|P|W|R|S|O|M|C,"Threshold parameters not supported"},
+ {0x27,0x00,D|T|W|O,"Write protected"},
+ {0x28,0x00,D|T|L|P|W|R|S|O|M|C,"Not ready to ready transition (medium may have changed)"},
+ {0x28,0x01,M,"Import or export element accessed"},
+ {0x29,0x00,D|T|L|P|W|R|S|O|M|C,"Power on, reset, or bus device reset occurred"},
+ {0x2A,0x00,D|T|L|W|R|S|O|M|C,"Parameters changed"},
+ {0x2A,0x01,D|T|L|W|R|S|O|M|C,"Mode parameters changed"},
+ {0x2A,0x02,D|T|L|W|R|S|O|M|C,"Log parameters changed"},
+ {0x2B,0x00,D|T|L|P|W|R|S|O|C,"Copy cannot execute since host cannot disconnect"},
+ {0x2C,0x00,D|T|L|P|W|R|S|O|M|C,"Command sequence error"},
+ {0x2C,0x01,S,"Too many windows specified"},
+ {0x2C,0x02,S,"Invalid combination of windows specified"},
+ {0x2D,0x00,T,"Overwrite error on update in place"},
+ {0x2F,0x00,D|T|L|P|W|R|S|O|M|C,"Commands cleared by another initiator"},
+ {0x30,0x00,D|T|W|R|O|M,"Incompatible medium installed"},
+ {0x30,0x01,D|T|W|R|O,"Cannot read medium - unknown format"},
+ {0x30,0x02,D|T|W|R|O,"Cannot read medium - incompatible format"},
+ {0x30,0x03,D|T,"Cleaning cartridge installed"},
+ {0x31,0x00,D|T|W|O,"Medium format corrupted"},
+ {0x31,0x01,D|L|O,"Format command failed"},
+ {0x32,0x00,D|W|O,"No defect spare location available"},
+ {0x32,0x01,D|W|O,"Defect list update failure"},
+ {0x33,0x00,T,"Tape length error"},
+ {0x36,0x00,L,"Ribbon, ink, or toner failure"},
+ {0x37,0x00,D|T|L|W|R|S|O|M|C,"Rounded parameter"},
+ {0x39,0x00,D|T|L|W|R|S|O|M|C,"Saving parameters not supported"},
+ {0x3A,0x00,D|T|L|W|R|S|O|M,"Medium not present"},
+ {0x3B,0x00,T|L,"Sequential positioning error"},
+ {0x3B,0x01,T,"Tape position error at beginning-of-medium"},
+ {0x3B,0x02,T,"Tape position error at end-of-medium"},
+ {0x3B,0x03,L,"Tape or electronic vertical forms unit not ready"},
+ {0x3B,0x04,L,"Slew failure"},
+ {0x3B,0x05,L,"Paper jam"},
+ {0x3B,0x06,L,"Failed to sense top-of-form"},
+ {0x3B,0x07,L,"Failed to sense bottom-of-form"},
+ {0x3B,0x08,T,"Reposition error"},
+ {0x3B,0x09,S,"Read past end of medium"},
+ {0x3B,0x0A,S,"Read past beginning of medium"},
+ {0x3B,0x0B,S,"Position past end of medium"},
+ {0x3B,0x0C,S,"Position past beginning of medium"},
+ {0x3B,0x0D,M,"Medium destination element full"},
+ {0x3B,0x0E,M,"Medium source element empty"},
+ {0x3D,0x00,D|T|L|P|W|R|S|O|M|C,"Invalid bits in identify message"},
+ {0x3E,0x00,D|T|L|P|W|R|S|O|M|C,"Logical unit has not self-configured yet"},
+ {0x3F,0x00,D|T|L|P|W|R|S|O|M|C,"Target operating conditions have changed"},
+ {0x3F,0x01,D|T|L|P|W|R|S|O|M|C,"Microcode has been changed"},
+ {0x3F,0x02,D|T|L|P|W|R|S|O|M|C,"Changed operating definition"},
+ {0x3F,0x03,D|T|L|P|W|R|S|O|M|C,"Inquiry data has changed"},
+ {0x43,0x00,D|T|L|P|W|R|S|O|M|C,"Message error"},
+ {0x44,0x00,D|T|L|P|W|R|S|O|M|C,"Internal target failure"},
+ {0x45,0x00,D|T|L|P|W|R|S|O|M|C,"Select or reselect failure"},
+ {0x46,0x00,D|T|L|P|W|R|S|O|M|C,"Unsuccessful soft reset"},
+ {0x47,0x00,D|T|L|P|W|R|S|O|M|C,"Scsi parity error"},
+ {0x48,0x00,D|T|L|P|W|R|S|O|M|C,"Initiator detected error message received"},
+ {0x49,0x00,D|T|L|P|W|R|S|O|M|C,"Invalid message error"},
+ {0x4A,0x00,D|T|L|P|W|R|S|O|M|C,"Command phase error"},
+ {0x4B,0x00,D|T|L|P|W|R|S|O|M|C,"Data phase error"},
+ {0x4C,0x00,D|T|L|P|W|R|S|O|M|C,"Logical unit failed self-configuration"},
+ {0x4E,0x00,D|T|L|P|W|R|S|O|M|C,"Overlapped commands attempted"},
+ {0x50,0x00,T,"Write append error"},
+ {0x50,0x01,T,"Write append position error"},
+ {0x50,0x02,T,"Position error related to timing"},
+ {0x51,0x00,T|O,"Erase failure"},
+ {0x52,0x00,T,"Cartridge fault"},
+ {0x53,0x00,D|T|L|W|R|S|O|M,"Media load or eject failed"},
+ {0x53,0x01,T,"Unload tape failure"},
+ {0x53,0x02,D|T|W|R|O|M,"Medium removal prevented"},
+ {0x54,0x00,P,"Scsi to host system interface failure"},
+ {0x55,0x00,P,"System resource failure"},
+ {0x57,0x00,R,"Unable to recover table-of-contents"},
+ {0x58,0x00,O,"Generation does not exist"},
+ {0x59,0x00,O,"Updated block read"},
+ {0x5A,0x00,D|T|L|P|W|R|S|O|M,"Operator request or state change input (unspecified)"},
+ {0x5A,0x01,D|T|W|R|O|M,"Operator medium removal request"},
+ {0x5A,0x02,D|T|W|O,"Operator selected write protect"},
+ {0x5A,0x03,D|T|W|O,"Operator selected write permit"},
+ {0x5B,0x00,D|T|L|P|W|R|S|O|M,"Log exception"},
+ {0x5B,0x01,D|T|L|P|W|R|S|O|M,"Threshold condition met"},
+ {0x5B,0x02,D|T|L|P|W|R|S|O|M,"Log counter at maximum"},
+ {0x5B,0x03,D|T|L|P|W|R|S|O|M,"Log list codes exhausted"},
+ {0x5C,0x00,D|O,"Rpl status change"},
+ {0x5C,0x01,D|O,"Spindles synchronized"},
+ {0x5C,0x02,D|O,"Spindles not synchronized"},
+ {0x60,0x00,S,"Lamp failure"},
+ {0x61,0x00,S,"Video acquisition error"},
+ {0x61,0x01,S,"Unable to acquire video"},
+ {0x61,0x02,S,"Out of focus"},
+ {0x62,0x00,S,"Scan head positioning error"},
+ {0x63,0x00,R,"End of user area encountered on this track"},
+ {0x64,0x00,R,"Illegal mode for this track"},
+ {0, 0, 0, NULL}
+};
+#endif
+
+#if (CONSTANTS & CONST_SENSE)
+static const char *snstext[] = {
+ "None","Recovered Error","Not Ready","Medium Error","Hardware Error",
+ "Illegal Request","Unit Attention","Data Protect","Blank Check",
+ "Key=9","Copy Aborted","Aborted Command","End-Of-Medium",
+ "Volume Overflow", "Miscompare", "Key=15"};
+#endif
+
+
+/* Print sense information */
+void print_sense(const char * devclass, Scsi_Cmnd * SCpnt)
+{
+ int i, s;
+ int sense_class, valid, code;
+ unsigned char * sense_buffer = SCpnt->sense_buffer;
+ const char * error = NULL;
+
+ sense_class = (sense_buffer[0] >> 4) & 0x07;
+ code = sense_buffer[0] & 0xf;
+ valid = sense_buffer[0] & 0x80;
+
+ if (sense_class == 7) {
+ s = sense_buffer[7] + 8;
+ if(s > sizeof(SCpnt->sense_buffer)) s = sizeof(SCpnt->sense_buffer);
+
+ if (!valid)
+ printk("extra data not valid ");
+
+ if (sense_buffer[2] & 0x80) printk( "FMK ");
+ if (sense_buffer[2] & 0x40) printk( "EOM ");
+ if (sense_buffer[2] & 0x20) printk( "ILI ");
+
+ switch (code) {
+ case 0x0:
+ error = "Current";
+ break;
+ case 0x1:
+ error = "Deferred";
+ break;
+ default:
+ error = "Invalid";
+ }
+
+ printk("%s error ", error);
+
+#if (CONSTANTS & CONST_SENSE)
+ printk( "%s%s: sense key %s\n", devclass,
+ kdevname(SCpnt->request.rq_dev), snstext[sense_buffer[2] & 0x0f]);
+#else
+ printk("%s%s: sns = %2x %2x\n", devclass,
+ kdevname(SCpnt->request.rq_dev), sense_buffer[0], sense_buffer[2]);
+#endif
+
+ /* Check to see if additional sense information is available */
+ if(sense_buffer[7] + 7 < 13 ||
+ (sense_buffer[12] == 0 && sense_buffer[13] == 0)) goto done;
+
+#if (CONSTANTS & CONST_XSENSE)
+ for(i=0; additional[i].text; i++)
+ if(additional[i].code1 == sense_buffer[12] &&
+ additional[i].code2 == sense_buffer[13])
+ printk("Additional sense indicates %s\n", additional[i].text);
+
+ for(i=0; additional2[i].text; i++)
+ if(additional2[i].code1 == sense_buffer[12] &&
+ additional2[i].code2_min >= sense_buffer[13] &&
+ additional2[i].code2_max <= sense_buffer[13]) {
+ printk("Additional sense indicates ");
+ printk(additional2[i].text, sense_buffer[13]);
+ printk("\n");
+ };
+#else
+ printk("ASC=%2x ASCQ=%2x\n", sense_buffer[12], sense_buffer[13]);
+#endif
+ } else {
+
+#if (CONSTANTS & CONST_SENSE)
+ if (sense_buffer[0] < 15)
+ printk("%s%s: old sense key %s\n", devclass,
+ kdevname(SCpnt->request.rq_dev), snstext[sense_buffer[0] & 0x0f]);
+ else
+#endif
+ printk("%s%s: sns = %2x %2x\n", devclass,
+ kdevname(SCpnt->request.rq_dev), sense_buffer[0], sense_buffer[2]);
+
+ printk("Non-extended sense class %d code 0x%0x ", sense_class, code);
+ s = 4;
+ }
+
+ done:
+#if !(CONSTANTS & CONST_SENSE)
+ printk("Raw sense data:");
+ for (i = 0; i < s; ++i)
+ printk("0x%02x ", sense_buffer[i]);
+ printk("\n");
+#endif
+ return;
+}
+
+#if (CONSTANTS & CONST_MSG)
+static const char *one_byte_msgs[] = {
+/* 0x00 */ "Command Complete", NULL, "Save Pointers",
+/* 0x03 */ "Restore Pointers", "Disconnect", "Initiator Error",
+/* 0x06 */ "Abort", "Message Reject", "Nop", "Message Parity Error",
+/* 0x0a */ "Linked Command Complete", "Linked Command Complete w/flag",
+/* 0x0c */ "Bus device reset", "Abort Tag", "Clear Queue",
+/* 0x0f */ "Initiate Recovery", "Release Recovery"
+};
+
+#define NO_ONE_BYTE_MSGS (sizeof(one_byte_msgs) / sizeof (const char *))
+
+static const char *two_byte_msgs[] = {
+/* 0x20 */ "Simple Queue Tag", "Head of Queue Tag", "Ordered Queue Tag"
+/* 0x23 */ "Ignore Wide Residue"
+};
+
+#define NO_TWO_BYTE_MSGS (sizeof(two_byte_msgs) / sizeof (const char *))
+
+static const char *extended_msgs[] = {
+/* 0x00 */ "Modify Data Pointer", "Synchronous Data Transfer Request",
+/* 0x02 */ "SCSI-I Extended Identify", "Wide Data Transfer Request"
+};
+
+#define NO_EXTENDED_MSGS (sizeof(two_byte_msgs) / sizeof (const char *))
+#endif /* (CONSTANTS & CONST_MSG) */
+
+int print_msg (const unsigned char *msg) {
+ int len = 0, i;
+ if (msg[0] == EXTENDED_MESSAGE) {
+ len = 3 + msg[1];
+#if (CONSTANTS & CONST_MSG)
+ if (msg[2] < NO_EXTENDED_MSGS)
+ printk ("%s ", extended_msgs[msg[2]]);
+ else
+ printk ("Extended Message, reserved code (0x%02x) ", (int) msg[2]);
+ switch (msg[2]) {
+ case EXTENDED_MODIFY_DATA_POINTER:
+ printk("pointer = %d", (int) (msg[3] << 24) | (msg[4] << 16) |
+ (msg[5] << 8) | msg[6]);
+ break;
+ case EXTENDED_SDTR:
+ printk("period = %d ns, offset = %d", (int) msg[3] * 4, (int)
+ msg[4]);
+ break;
+ case EXTENDED_WDTR:
+ printk("width = 2^%d bytes", msg[3]);
+ break;
+ default:
+ for (i = 2; i < len; ++i)
+ printk("%02x ", msg[i]);
+ }
+#else
+ for (i = 0; i < len; ++i)
+ printk("%02x ", msg[i]);
+#endif
+ /* Identify */
+ } else if (msg[0] & 0x80) {
+#if (CONSTANTS & CONST_MSG)
+ printk("Identify disconnect %sallowed %s %d ",
+ (msg[0] & 0x40) ? "" : "not ",
+ (msg[0] & 0x20) ? "target routine" : "lun",
+ msg[0] & 0x7);
+#else
+ printk("%02x ", msg[0]);
+#endif
+ len = 1;
+ /* Normal One byte */
+ } else if (msg[0] < 0x1f) {
+#if (CONSTANTS & CONST_MSG)
+ if (msg[0] < NO_ONE_BYTE_MSGS)
+ printk(one_byte_msgs[msg[0]]);
+ else
+ printk("reserved (%02x) ", msg[0]);
+#else
+ printk("%02x ", msg[0]);
+#endif
+ len = 1;
+ /* Two byte */
+ } else if (msg[0] <= 0x2f) {
+#if (CONSTANTS & CONST_MSG)
+ if ((msg[0] - 0x20) < NO_TWO_BYTE_MSGS)
+ printk("%s %02x ", two_byte_msgs[msg[0] - 0x20],
+ msg[1]);
+ else
+ printk("reserved two byte (%02x %02x) ",
+ msg[0], msg[1]);
+#else
+ printk("%02x %02x", msg[0], msg[1]);
+#endif
+ len = 2;
+ } else
+#if (CONSTANTS & CONST_MSG)
+ printk(reserved);
+#else
+ printk("%02x ", msg[0]);
+#endif
+ return len;
+}
+
+void print_Scsi_Cmnd (Scsi_Cmnd *cmd) {
+ printk("scsi%d : destination target %d, lun %d\n",
+ cmd->host->host_no,
+ cmd->target,
+ cmd->lun);
+ printk(" command = ");
+ print_command (cmd->cmnd);
+}
+
+#if (CONSTANTS & CONST_HOST)
+static const char * hostbyte_table[]={
+"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
+"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",NULL};
+
+void print_hostbyte(int scsiresult)
+{ static int maxcode=0;
+ int i;
+
+ if(!maxcode) {
+ for(i=0;hostbyte_table[i];i++) ;
+ maxcode=i-1;
+ }
+ printk("Hostbyte=0x%02x",host_byte(scsiresult));
+ if(host_byte(scsiresult)>maxcode) {
+ printk("is invalid ");
+ return;
+ }
+ printk("(%s) ",hostbyte_table[host_byte(scsiresult)]);
+}
+#else
+void print_hostbyte(int scsiresult)
+{ printk("Hostbyte=0x%02x ",host_byte(scsiresult));
+}
+#endif
+
+#if (CONSTANTS & CONST_DRIVER)
+static const char * driverbyte_table[]={
+"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",
+"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD",NULL };
+
+static const char * driversuggest_table[]={"SUGGEST_OK",
+"SUGGEST_RETRY", "SUGGEST_ABORT", "SUGGEST_REMAP", "SUGGEST_DIE",
+unknown,unknown,unknown, "SUGGEST_SENSE",NULL};
+
+
+void print_driverbyte(int scsiresult)
+{ static int driver_max=0,suggest_max=0;
+ int i,dr=driver_byte(scsiresult)&DRIVER_MASK,
+ su=(driver_byte(scsiresult)&SUGGEST_MASK)>>4;
+
+ if(!driver_max) {
+ for(i=0;driverbyte_table[i];i++) ;
+ driver_max=i;
+ for(i=0;driversuggest_table[i];i++) ;
+ suggest_max=i;
+ }
+ printk("Driverbyte=0x%02x",driver_byte(scsiresult));
+ printk("(%s,%s) ",
+ dr<driver_max ? driverbyte_table[dr]:"invalid",
+ su<suggest_max ? driversuggest_table[su]:"invalid");
+}
+#else
+void print_driverbyte(int scsiresult)
+{ printk("Driverbyte=0x%02x ",driver_byte(scsiresult));
+}
+#endif
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/constants.h b/i386/i386at/gpl/linux/scsi/constants.h
new file mode 100644
index 00000000..e10527ea
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/constants.h
@@ -0,0 +1,6 @@
+#ifndef _CONSTANTS_H
+#define _CONSTANTS_H
+extern int print_msg(unsigned char *);
+extern void print_status(int);
+extern void print_Scsi_Cmnd (Scsi_Cmnd *);
+#endif /* def _CONSTANTS_H */
diff --git a/i386/i386at/gpl/linux/scsi/eata.c b/i386/i386at/gpl/linux/scsi/eata.c
new file mode 100644
index 00000000..29000332
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/eata.c
@@ -0,0 +1,1099 @@
+/*
+ * eata.c - Low-level driver for EATA/DMA SCSI host adapters.
+ *
+ * 6 Jul 1995 rev. 2.01 for linux 1.3.7
+ * Update required by the new /proc/scsi support.
+ *
+ * 11 Mar 1995 rev. 2.00 for linux 1.2.0
+ * Fixed a bug which prevented media change detection for removable
+ * disk drives.
+ *
+ * 23 Feb 1995 rev. 1.18 for linux 1.1.94
+ * Added a check for scsi_register returning NULL.
+ *
+ * 11 Feb 1995 rev. 1.17 for linux 1.1.91
+ * Now DEBUG_RESET is disabled by default.
+ * Register a board even if it does not assert DMA protocol support
+ * (DPT SK2011B does not report correctly the dmasup bit).
+ *
+ * 9 Feb 1995 rev. 1.16 for linux 1.1.90
+ * Use host->wish_block instead of host->block.
+ * New list of Data Out SCSI commands.
+ *
+ * 8 Feb 1995 rev. 1.15 for linux 1.1.89
+ * Cleared target_time_out counter while performing a reset.
+ * All external symbols renamed to avoid possible name conflicts.
+ *
+ * 28 Jan 1995 rev. 1.14 for linux 1.1.86
+ * Added module support.
+ * Log and do a retry when a disk drive returns a target status
+ * different from zero on a recovered error.
+ *
+ * 24 Jan 1995 rev. 1.13 for linux 1.1.85
+ * Use optimized board configuration, with a measured performance
+ * increase in the range 10%-20% on i/o throughput.
+ *
+ * 16 Jan 1995 rev. 1.12 for linux 1.1.81
+ * Fix mscp structure comments (no functional change).
+ * Display a message if check_region detects a port address
+ * already in use.
+ *
+ * 17 Dec 1994 rev. 1.11 for linux 1.1.74
+ * Use the scsicam_bios_param routine. This allows an easy
+ * migration path from disk partition tables created using
+ * different SCSI drivers and non optimal disk geometry.
+ *
+ * 15 Dec 1994 rev. 1.10 for linux 1.1.74
+ * Added support for ISA EATA boards (DPT PM2011, DPT PM2021).
+ * The host->block flag is set for all the detected ISA boards.
+ * The detect routine no longer enforces LEVEL triggering
+ * for EISA boards, it just prints a warning message.
+ *
+ * 30 Nov 1994 rev. 1.09 for linux 1.1.68
+ * Redo i/o on target status CHECK_CONDITION for TYPE_DISK only.
+ * Added optional support for using a single board at a time.
+ *
+ * 18 Nov 1994 rev. 1.08 for linux 1.1.64
+ * Forces sg_tablesize = 64 and can_queue = 64 if these
+ * values are not correctly detected (DPT PM2012).
+ *
+ * 14 Nov 1994 rev. 1.07 for linux 1.1.63 Final BETA release.
+ * 04 Aug 1994 rev. 1.00 for linux 1.1.39 First BETA release.
+ *
+ *
+ * This driver is based on the CAM (Common Access Method Committee)
+ * EATA (Enhanced AT Bus Attachment) rev. 2.0A, using DMA protocol.
+ *
+ * Copyright (C) 1994, 1995 Dario Ballabio (dario@milano.europe.dg.com)
+ *
+ */
+
+/*
+ *
+ * Here is a brief description of the DPT SCSI host adapters.
+ * All these boards provide an EATA/DMA compatible programming interface
+ * and are fully supported by this driver:
+ *
+ * PM2011B/9X - Entry Level ISA
+ * PM2021A/9X - High Performance ISA
+ * PM2012A Old EISA
+ * PM2012B Old EISA
+ * PM2022A/9X - Entry Level EISA
+ * PM2122A/9X - High Performance EISA
+ * PM2322A/9X - Extra High Performance EISA
+ *
+ * The DPT PM2001 provides only the EATA/PIO interface and hence is not
+ * supported by this driver.
+ *
+ * This code has been tested with up to 3 Distributed Processing Technology
+ * PM2122A/9X (DPT SCSI BIOS v002.D1, firmware v05E.0) eisa controllers,
+ * no on board cache and no RAID option.
+ * BIOS must be enabled on the first board and must be disabled for all other
+ * boards.
+ * Support is provided for any number of DPT PM2122 eisa boards.
+ * All boards should be configured at the same IRQ level.
+ * Multiple IRQ configurations are supported too.
+ * Boards can be located in any eisa slot (1-15) and are named EATA0,
+ * EATA1,... in increasing eisa slot number. ISA boards are detected
+ * after the eisa slot probes.
+ *
+ * The IRQ for EISA boards should be _level_ triggered (not _edge_ triggered).
+ * This is a requirement in order to support multiple boards on the same IRQ.
+ *
+ * Other eisa configuration parameters are:
+ *
+ * COMMAND QUEUING : ENABLED
+ * COMMAND TIMEOUT : ENABLED
+ * CACHE : DISABLED
+ *
+ * In order to support multiple ISA boards in a reliable way,
+ * the driver sets host->wish_block = TRUE for all ISA boards.
+ */
+
+#if defined(MODULE)
+#include <linux/module.h>
+#include <linux/version.h>
+#endif
+
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/proc_fs.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include "linux/in.h"
+#include "eata.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_eata2x = {
+ PROC_SCSI_EATA2X, 6, "eata2x",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+/* Subversion values */
+#define ISA 0
+#define ESA 1
+
+#undef FORCE_CONFIG
+
+#undef DEBUG_DETECT
+#undef DEBUG_INTERRUPT
+#undef DEBUG_STATISTICS
+#undef DEBUG_RESET
+
+#define MAX_TARGET 8
+#define MAX_IRQ 16
+#define MAX_BOARDS 18
+#define MAX_MAILBOXES 64
+#define MAX_SGLIST 64
+#define MAX_CMD_PER_LUN 2
+
+#define FALSE 0
+#define TRUE 1
+#define FREE 0
+#define IN_USE 1
+#define LOCKED 2
+#define IN_RESET 3
+#define IGNORE 4
+#define NO_IRQ 0xff
+#define NO_DMA 0xff
+#define MAXLOOP 200000
+
+#define REG_CMD 7
+#define REG_STATUS 7
+#define REG_AUX_STATUS 8
+#define REG_DATA 0
+#define REG_DATA2 1
+#define REG_SEE 6
+#define REG_LOW 2
+#define REG_LM 3
+#define REG_MID 4
+#define REG_MSB 5
+#define REGION_SIZE 9
+#define EISA_RANGE 0xf000
+#define BSY_ASSERTED 0x80
+#define DRQ_ASSERTED 0x08
+#define ABSY_ASSERTED 0x01
+#define IRQ_ASSERTED 0x02
+#define READ_CONFIG_PIO 0xf0
+#define SET_CONFIG_PIO 0xf1
+#define SEND_CP_PIO 0xf2
+#define RECEIVE_SP_PIO 0xf3
+#define TRUNCATE_XFR_PIO 0xf4
+#define RESET_PIO 0xf9
+#define READ_CONFIG_DMA 0xfd
+#define SET_CONFIG_DMA 0xfe
+#define SEND_CP_DMA 0xff
+#define ASOK 0x00
+#define ASST 0x01
+
+#define ARRAY_SIZE(arr) (sizeof (arr) / sizeof (arr)[0])
+
+/* "EATA", in Big Endian format */
+#define EATA_SIGNATURE 0x41544145
+
+/* Number of valid bytes in the board config structure for EATA 2.0x */
+#define EATA_2_0A_SIZE 28
+#define EATA_2_0B_SIZE 30
+
+/* Board info structure */
+struct eata_info {
+ ulong data_len; /* Number of valid bytes after this field */
+ ulong sign; /* ASCII "EATA" signature */
+ unchar :4, /* unused low nibble */
+ version:4; /* EATA version, should be 0x1 */
+ unchar ocsena:1, /* Overlap Command Support Enabled */
+ tarsup:1, /* Target Mode Supported */
+ :2,
+ dmasup:1, /* DMA Supported */
+ drqvld:1, /* DRQ Index (DRQX) is valid */
+ ata:1, /* This is an ATA device */
+ haaval:1; /* Host Adapter Address Valid */
+ ushort cp_pad_len; /* Number of pad bytes after cp_len */
+ unchar host_addr[3]; /* Host Adapter SCSI ID for channels 2, 1, 0 */
+ unchar reserved;
+ ulong cp_len; /* Number of valid bytes in cp */
+ ulong sp_len; /* Number of valid bytes in sp */
+ ushort queue_size; /* Max number of cp that can be queued */
+ ushort unused;
+ ushort scatt_size; /* Max number of entries in scatter/gather table */
+ unchar irq:4, /* Interrupt Request assigned to this controller */
+ irq_tr:1, /* 0 for edge triggered, 1 for level triggered */
+ second:1, /* 1 if this is a secondary (not primary) controller */
+ drqx:2; /* DRQ Index (0=DMA0, 1=DMA7, 2=DMA6, 3=DMA5) */
+ unchar sync; /* 1 if scsi target id 7...0 is running sync scsi */
+
+ /* Structure extension defined in EATA 2.0B */
+ unchar isaena:1, /* ISA i/o addressing is disabled/enabled */
+ forcaddr:1, /* Port address has been forced */
+ :6;
+ unchar max_id:5, /* Max number of SCSI target IDs */
+ max_chan:3; /* Max SCSI channel number on this board */
+
+ ushort ipad[249];
+ };
+
+/* Board config structure */
+struct eata_config {
+ ushort len; /* Number of bytes following this field */
+ unchar edis:1, /* Disable EATA interface after config command */
+ ocena:1, /* Overlapped Commands Enabled */
+ mdpena:1, /* Transfer all Modified Data Pointer Messages */
+ tarena:1, /* Target Mode Enabled for this controller */
+ :4;
+ unchar cpad[511];
+ };
+
+/* Returned status packet structure */
+struct mssp {
+ unchar adapter_status:7, /* State related to current command */
+ eoc:1; /* End Of Command (1 = command completed) */
+ unchar target_status; /* SCSI status received after data transfer */
+ unchar unused[2];
+ ulong inv_res_len; /* Number of bytes not transferred */
+ Scsi_Cmnd *SCpnt; /* Address set in cp */
+ char mess[12];
+ };
+
+/* MailBox SCSI Command Packet */
+struct mscp {
+ unchar sreset:1, /* SCSI Bus Reset Signal should be asserted */
+ init:1, /* Re-initialize controller and self test */
+ reqsen:1, /* Transfer Request Sense Data to addr using DMA */
+ sg:1, /* Use Scatter/Gather */
+ :1,
+ interp:1, /* The controller interprets cp, not the target */
+ dout:1, /* Direction of Transfer is Out (Host to Target) */
+ din:1; /* Direction of Transfer is In (Target to Host) */
+ unchar sense_len; /* Request Sense Length */
+ unchar unused[4];
+ unchar phsunit:1, /* Send to Target Physical Unit (bypass RAID) */
+ notused:7;
+ unchar target; /* SCSI Target ID */
+ unchar lun:3, /* LUN */
+ :2,
+ luntar:1, /* This cp is for Target (not LUN) */
+ dispri:1, /* Disconnect Privilege granted */
+ one:1; /* 1 */
+ unchar mess[3]; /* Massage to/from Target */
+ unchar cdb[12]; /* Command Descriptor Block */
+ ulong data_len; /* If sg=0 Data Length, if sg=1 sglist length */
+ Scsi_Cmnd *SCpnt; /* Address to be returned is sp */
+ ulong data_address; /* If sg=0 Data Address, if sg=1 sglist address */
+ ulong sp_addr; /* Address where sp is DMA'ed when cp completes */
+ ulong sense_addr; /* Address where Sense Data is DMA'ed on error */
+
+ struct sg_list {
+ unsigned int address; /* Segment Address */
+ unsigned int num_bytes; /* Segment Length */
+ } sglist[MAX_SGLIST];
+
+ unsigned int index; /* cp index */
+ };
+
+struct hostdata {
+ struct mscp cp[MAX_MAILBOXES]; /* Mailboxes for this board */
+ unsigned int cp_stat[MAX_MAILBOXES]; /* FREE, IN_USE, LOCKED, IN_RESET */
+ unsigned int last_cp_used; /* Index of last mailbox used */
+ unsigned int iocount; /* Total i/o done for this board */
+ unsigned int multicount; /* Total ... in second ihdlr loop */
+ int board_number; /* Number of this board */
+ char board_name[16]; /* Name of this board */
+ char board_id[256]; /* data from INQUIRY on this board */
+ int in_reset; /* True if board is doing a reset */
+ int target_time_out[MAX_TARGET]; /* N. of timeout errors on target */
+ int target_reset[MAX_TARGET]; /* If TRUE redo operation on target */
+ unsigned char subversion; /* Bus type, either ISA or ESA */
+ unsigned char protocol_rev; /* EATA 2.0 rev., 'A' or 'B' or 'C' */
+ struct mssp sp[MAX_MAILBOXES]; /* Returned status for this board */
+ };
+
+static struct Scsi_Host * sh[MAX_BOARDS + 1];
+static const char* driver_name = "EATA";
+static unsigned int irqlist[MAX_IRQ], calls[MAX_IRQ];
+
+#define HD(board) ((struct hostdata *) &sh[board]->hostdata)
+#define BN(board) (HD(board)->board_name)
+
+static void eata2x_interrupt_handler(int, struct pt_regs *);
+static int do_trace = FALSE;
+
+static inline unchar wait_on_busy(ushort iobase) {
+ unsigned int loop = MAXLOOP;
+
+ while (inb(iobase + REG_AUX_STATUS) & ABSY_ASSERTED)
+ if (--loop == 0) return TRUE;
+
+ return FALSE;
+}
+
+static inline unchar do_dma (ushort iobase, unsigned int addr, unchar cmd) {
+
+ if (wait_on_busy(iobase)) return TRUE;
+
+ if (addr) {
+ outb((char) addr, iobase + REG_LOW);
+ outb((char) (addr >> 8), iobase + REG_LM);
+ outb((char) (addr >> 16), iobase + REG_MID);
+ outb((char) (addr >> 24), iobase + REG_MSB);
+ }
+
+ outb(cmd, iobase + REG_CMD);
+ return FALSE;
+}
+
+static inline unchar read_pio (ushort iobase, ushort *start, ushort *end) {
+ unsigned int loop = MAXLOOP;
+ ushort *p;
+
+ for (p = start; p <= end; p++) {
+
+ while (!(inb(iobase + REG_STATUS) & DRQ_ASSERTED))
+ if (--loop == 0) return TRUE;
+
+ loop = MAXLOOP;
+ *p = inw(iobase);
+ }
+
+ return FALSE;
+}
+
+static inline int port_detect(ushort *port_base, unsigned int j,
+ Scsi_Host_Template * tpnt) {
+ unsigned char irq, dma_channel, subversion;
+ unsigned char protocol_rev;
+ struct eata_info info;
+ const char *board_status;
+
+ /* Allowed DMA channels for ISA (0 indicates reserved) */
+ unsigned char dma_channel_table[4] = { 5, 6, 7, 0 };
+
+ char name[16];
+
+ sprintf(name, "%s%d", driver_name, j);
+
+ if(check_region(*port_base, REGION_SIZE)) {
+ printk("%s: address 0x%03x in use, skipping probe.\n",
+ name, *port_base);
+ return FALSE;
+ }
+
+ if (do_dma(*port_base, 0, READ_CONFIG_PIO)) return FALSE;
+
+ /* Read the info structure */
+ if (read_pio(*port_base, (ushort *)&info, (ushort *)&info.ipad[0]))
+ return FALSE;
+
+ /* Check the controller "EATA" signature */
+ if (info.sign != EATA_SIGNATURE) return FALSE;
+
+ if (ntohl(info.data_len) < EATA_2_0A_SIZE) {
+ printk("%s: config structure size (%ld bytes) too short, detaching.\n",
+ name, ntohl(info.data_len));
+ return FALSE;
+ }
+ else if (ntohl(info.data_len) == EATA_2_0A_SIZE)
+ protocol_rev = 'A';
+ else if (ntohl(info.data_len) == EATA_2_0B_SIZE)
+ protocol_rev = 'B';
+ else
+ protocol_rev = 'C';
+
+ if (protocol_rev != 'A' && info.max_chan > 0)
+ printk("%s: warning, only scsi channel 0 is supported.\n", name);
+
+ irq = info.irq;
+
+ if (*port_base & EISA_RANGE) {
+
+ if (!info.haaval || info.ata || info.drqvld) {
+ printk("%s: unusable EISA board found (%d%d%d), detaching.\n",
+ name, info.haaval, info.ata, info.drqvld);
+ return FALSE;
+ }
+
+ subversion = ESA;
+ dma_channel = NO_DMA;
+ }
+ else {
+
+ if (!info.haaval || info.ata || !info.drqvld) {
+ printk("%s: unusable ISA board found (%d%d%d), detaching.\n",
+ name, info.haaval, info.ata, info.drqvld);
+ return FALSE;
+ }
+
+ subversion = ISA;
+ dma_channel = dma_channel_table[3 - info.drqx];
+ }
+
+ if (!info.dmasup)
+ printk("%s: warning, DMA protocol support not asserted.\n", name);
+
+ if (subversion == ESA && !info.irq_tr)
+ printk("%s: warning, LEVEL triggering is suggested for IRQ %u.\n",
+ name, irq);
+
+ if (info.second)
+ board_status = "Sec.";
+ else
+ board_status = "Prim.";
+
+ /* Board detected, allocate its IRQ if not already done */
+ if ((irq >= MAX_IRQ) || ((irqlist[irq] == NO_IRQ) && request_irq
+ (irq, eata2x_interrupt_handler, SA_INTERRUPT, driver_name))) {
+ printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq);
+ return FALSE;
+ }
+
+ if (subversion == ISA && request_dma(dma_channel, driver_name)) {
+ printk("%s: unable to allocate DMA channel %u, detaching.\n",
+ name, dma_channel);
+ free_irq(irq);
+ return FALSE;
+ }
+
+#if defined (FORCE_CONFIG)
+ {
+ struct eata_config config;
+
+ /* Set board configuration */
+ memset((char *)&config, 0, sizeof(struct eata_config));
+ config.len = (ushort) htons((ushort)510);
+ config.ocena = TRUE;
+
+ if (do_dma(*port_base, (unsigned int)&config, SET_CONFIG_DMA)) {
+ printk("%s: busy timeout sending configuration, detaching.\n", name);
+ return FALSE;
+ }
+ }
+#endif
+
+ sh[j] = scsi_register(tpnt, sizeof(struct hostdata));
+
+ if (sh[j] == NULL) {
+ printk("%s: unable to register host, detaching.\n", name);
+
+ if (irqlist[irq] == NO_IRQ) free_irq(irq);
+
+ if (subversion == ISA) free_dma(dma_channel);
+
+ return FALSE;
+ }
+
+ sh[j]->io_port = *port_base;
+ sh[j]->n_io_port = REGION_SIZE;
+ sh[j]->dma_channel = dma_channel;
+ sh[j]->irq = irq;
+ sh[j]->sg_tablesize = (ushort) ntohs(info.scatt_size);
+ sh[j]->this_id = (ushort) info.host_addr[3];
+ sh[j]->can_queue = (ushort) ntohs(info.queue_size);
+ sh[j]->cmd_per_lun = MAX_CMD_PER_LUN;
+
+ /* Register the I/O space that we use */
+ request_region(sh[j]->io_port, REGION_SIZE, driver_name);
+
+ memset(HD(j), 0, sizeof(struct hostdata));
+ HD(j)->subversion = subversion;
+ HD(j)->protocol_rev = protocol_rev;
+ HD(j)->board_number = j;
+ irqlist[irq] = j;
+
+ if (HD(j)->subversion == ESA)
+ sh[j]->unchecked_isa_dma = FALSE;
+ else {
+ sh[j]->wish_block = TRUE;
+ sh[j]->unchecked_isa_dma = TRUE;
+ disable_dma(dma_channel);
+ clear_dma_ff(dma_channel);
+ set_dma_mode(dma_channel, DMA_MODE_CASCADE);
+ enable_dma(dma_channel);
+ }
+
+ strcpy(BN(j), name);
+
+ printk("%s: 2.0%c, %s, ID %d, PORT 0x%03x, IRQ %u, DMA %u, SG %d, "\
+ "Mbox %d, CmdLun %d.\n", BN(j), HD(j)->protocol_rev, board_status,
+ sh[j]->this_id, sh[j]->io_port, sh[j]->irq, sh[j]->dma_channel,
+ sh[j]->sg_tablesize, sh[j]->can_queue, sh[j]->cmd_per_lun);
+
+ /* DPT PM2012 does not allow to detect sg_tablesize correctly */
+ if (sh[j]->sg_tablesize > MAX_SGLIST || sh[j]->sg_tablesize < 2) {
+ printk("%s: detect, forcing to use %d SG lists.\n", BN(j), MAX_SGLIST);
+ sh[j]->sg_tablesize = MAX_SGLIST;
+ }
+
+ /* DPT PM2012 does not allow to detect can_queue correctly */
+ if (sh[j]->can_queue > MAX_MAILBOXES || sh[j]->can_queue < 2) {
+ printk("%s: detect, forcing to use %d Mbox.\n", BN(j), MAX_MAILBOXES);
+ sh[j]->can_queue = MAX_MAILBOXES;
+ }
+
+#if defined (DEBUG_DETECT)
+ if (protocol_rev != 'A')
+ printk("%s: EATA 2.0%c, isaena %u, forcaddr %u, max_id %u,"\
+ " max_chan %u.\n", name, protocol_rev, info.isaena,
+ info.forcaddr, info.max_id, info.max_chan);
+
+ printk("%s: Version 0x%x, SYNC 0x%x, infol %ld, cpl %ld spl %ld.\n",
+ name, info.version, info.sync, ntohl(info.data_len),
+ ntohl(info.cp_len), ntohl(info.sp_len));
+#endif
+
+ return TRUE;
+}
+
+int eata2x_detect (Scsi_Host_Template * tpnt) {
+ unsigned int j = 0, k, flags;
+
+ ushort io_port[] = {
+ 0x1c88, 0x2c88, 0x3c88, 0x4c88, 0x5c88, 0x6c88, 0x7c88, 0x8c88,
+ 0x9c88, 0xac88, 0xbc88, 0xcc88, 0xdc88, 0xec88, 0xfc88,
+ 0x1f0, 0x170, 0x330, 0x230, 0x0
+ };
+
+ ushort *port_base = io_port;
+
+ tpnt->proc_dir = &proc_scsi_eata2x;
+
+ save_flags(flags);
+ cli();
+
+ for (k = 0; k < MAX_IRQ; k++) {
+ irqlist[k] = NO_IRQ;
+ calls[k] = 0;
+ }
+
+ for (k = 0; k < MAX_BOARDS + 1; k++) sh[k] = NULL;
+
+ while (*port_base) {
+
+ if (j < MAX_BOARDS && port_detect(port_base, j, tpnt)) j++;
+
+ port_base++;
+ }
+
+ if (j > 0)
+ printk("EATA/DMA 2.0x: Copyright (C) 1994, 1995 Dario Ballabio.\n");
+
+ restore_flags(flags);
+ return j;
+}
+
+static inline void build_sg_list(struct mscp *cpp, Scsi_Cmnd *SCpnt) {
+ unsigned int k;
+ struct scatterlist * sgpnt;
+
+ sgpnt = (struct scatterlist *) SCpnt->request_buffer;
+
+ for (k = 0; k < SCpnt->use_sg; k++) {
+ cpp->sglist[k].address = htonl((unsigned int) sgpnt[k].address);
+ cpp->sglist[k].num_bytes = htonl((unsigned int) sgpnt[k].length);
+ }
+
+ cpp->data_address = htonl((unsigned int) cpp->sglist);
+ cpp->data_len = htonl((SCpnt->use_sg * sizeof(struct sg_list)));
+}
+
+int eata2x_queuecommand (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) {
+ unsigned int i, j, k, flags;
+ struct mscp *cpp;
+ struct mssp *spp;
+
+ static const unsigned char data_out_cmds[] = {
+ 0x0a, 0x2a, 0x15, 0x55, 0x04, 0x07, 0x0b, 0x10, 0x16, 0x18, 0x1d,
+ 0x24, 0x2b, 0x2e, 0x30, 0x31, 0x32, 0x38, 0x39, 0x3a, 0x3b, 0x3d,
+ 0x3f, 0x40, 0x41, 0x4c, 0xaa, 0xae, 0xb0, 0xb1, 0xb2, 0xb6, 0xea
+ };
+
+ save_flags(flags);
+ cli();
+ /* j is the board number */
+ j = ((struct hostdata *) SCpnt->host->hostdata)->board_number;
+
+ if (!done) panic("%s: qcomm, pid %ld, null done.\n", BN(j), SCpnt->pid);
+
+ /* i is the mailbox number, look for the first free mailbox
+ starting from last_cp_used */
+ i = HD(j)->last_cp_used + 1;
+
+ for (k = 0; k < sh[j]->can_queue; k++, i++) {
+
+ if (i >= sh[j]->can_queue) i = 0;
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ HD(j)->last_cp_used = i;
+ break;
+ }
+ }
+
+ if (k == sh[j]->can_queue) {
+ printk("%s: qcomm, no free mailbox, resetting.\n", BN(j));
+
+ if (HD(j)->in_reset)
+ printk("%s: qcomm, already in reset.\n", BN(j));
+ else if (eata2x_reset(SCpnt) == SCSI_RESET_SUCCESS)
+ panic("%s: qcomm, SCSI_RESET_SUCCESS.\n", BN(j));
+
+ SCpnt->result = DID_BUS_BUSY << 16;
+ SCpnt->host_scribble = NULL;
+ printk("%s: qcomm, pid %ld, DID_BUS_BUSY, done.\n", BN(j), SCpnt->pid);
+ restore_flags(flags);
+ done(SCpnt);
+ return 0;
+ }
+
+ /* Set pointer to control packet structure */
+ cpp = &HD(j)->cp[i];
+
+ memset(cpp, 0, sizeof(struct mscp));
+
+ /* Set pointer to status packet structure */
+ spp = &HD(j)->sp[i];
+
+ memset(spp, 0, sizeof(struct mssp));
+
+ /* The EATA protocol uses Big Endian format, while Intel is Little Endian */
+ cpp->sp_addr = htonl((unsigned int) spp);
+
+ SCpnt->scsi_done = done;
+ cpp->index = i;
+ SCpnt->host_scribble = (unsigned char *) &cpp->index;
+
+ if (do_trace) printk("%s: qcomm, mbox %d, target %d, pid %ld.\n",
+ BN(j), i, SCpnt->target, SCpnt->pid);
+
+ for (k = 0; k < ARRAY_SIZE(data_out_cmds); k++)
+ if (SCpnt->cmnd[0] == data_out_cmds[k]) {
+ cpp->dout = TRUE;
+ break;
+ }
+
+ cpp->din = !cpp->dout;
+ cpp->reqsen = TRUE;
+ cpp->dispri = TRUE;
+ cpp->one = TRUE;
+ cpp->target = SCpnt->target;
+ cpp->lun = SCpnt->lun;
+ cpp->SCpnt = SCpnt;
+ cpp->sense_addr = htonl((unsigned int) SCpnt->sense_buffer);
+ cpp->sense_len = sizeof SCpnt->sense_buffer;
+
+ if (SCpnt->use_sg) {
+ cpp->sg = TRUE;
+ build_sg_list(cpp, SCpnt);
+ }
+ else {
+ cpp->data_address = htonl((unsigned int) SCpnt->request_buffer);
+ cpp->data_len = htonl(SCpnt->request_bufflen);
+ }
+
+ memcpy(cpp->cdb, SCpnt->cmnd, SCpnt->cmd_len);
+
+ /* Send control packet to the board */
+ if (do_dma(sh[j]->io_port, (unsigned int) cpp, SEND_CP_DMA)) {
+ SCpnt->result = DID_ERROR << 16;
+ SCpnt->host_scribble = NULL;
+ printk("%s: qcomm, target %d, pid %ld, adapter busy, DID_ERROR, done.\n",
+ BN(j), SCpnt->target, SCpnt->pid);
+ restore_flags(flags);
+ done(SCpnt);
+ return 0;
+ }
+
+ HD(j)->cp_stat[i] = IN_USE;
+ restore_flags(flags);
+ return 0;
+}
+
+int eata2x_abort (Scsi_Cmnd *SCarg) {
+ unsigned int i, j, flags;
+
+ save_flags(flags);
+ cli();
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+
+ if (SCarg->host_scribble == NULL) {
+ printk("%s: abort, target %d, pid %ld inactive.\n",
+ BN(j), SCarg->target, SCarg->pid);
+ restore_flags(flags);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ i = *(unsigned int *)SCarg->host_scribble;
+ printk("%s: abort, mbox %d, target %d, pid %ld.\n",
+ BN(j), i, SCarg->target, SCarg->pid);
+
+ if (i >= sh[j]->can_queue)
+ panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j));
+
+ if (wait_on_busy(sh[j]->io_port)) {
+ printk("%s: abort, timeout error.\n", BN(j));
+ restore_flags(flags);
+ return SCSI_ABORT_ERROR;
+ }
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: abort, mbox %d is free.\n", BN(j), i);
+ restore_flags(flags);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_USE) {
+ printk("%s: abort, mbox %d is in use.\n", BN(j), i);
+
+ if (SCarg != HD(j)->cp[i].SCpnt)
+ panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n",
+ BN(j), i, SCarg, HD(j)->cp[i].SCpnt);
+
+ restore_flags(flags);
+ return SCSI_ABORT_SNOOZE;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ printk("%s: abort, mbox %d is in reset.\n", BN(j), i);
+ restore_flags(flags);
+ return SCSI_ABORT_ERROR;
+ }
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ printk("%s: abort, mbox %d is locked.\n", BN(j), i);
+ restore_flags(flags);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+ restore_flags(flags);
+ panic("%s: abort, mbox %d, invalid cp_stat.\n", BN(j), i);
+}
+
+int eata2x_reset (Scsi_Cmnd *SCarg) {
+ unsigned int i, j, flags, time, k, limit = 0;
+ int arg_done = FALSE;
+ Scsi_Cmnd *SCpnt;
+
+ save_flags(flags);
+ cli();
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+ printk("%s: reset, enter, target %d, pid %ld.\n",
+ BN(j), SCarg->target, SCarg->pid);
+
+ if (SCarg->host_scribble == NULL)
+ printk("%s: reset, pid %ld inactive.\n", BN(j), SCarg->pid);
+
+ if (HD(j)->in_reset) {
+ printk("%s: reset, exit, already in reset.\n", BN(j));
+ restore_flags(flags);
+ return SCSI_RESET_ERROR;
+ }
+
+ if (wait_on_busy(sh[j]->io_port)) {
+ printk("%s: reset, exit, timeout error.\n", BN(j));
+ restore_flags(flags);
+ return SCSI_RESET_ERROR;
+ }
+
+ for (k = 0; k < MAX_TARGET; k++) HD(j)->target_reset[k] = TRUE;
+
+ for (k = 0; k < MAX_TARGET; k++) HD(j)->target_time_out[k] = 0;
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == FREE) continue;
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: reset, locked mbox %d forced free.\n", BN(j), i);
+ continue;
+ }
+
+ SCpnt = HD(j)->cp[i].SCpnt;
+ HD(j)->cp_stat[i] = IN_RESET;
+ printk("%s: reset, mbox %d in reset, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+
+ if (SCpnt == NULL)
+ panic("%s: reset, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: reset, mbox %d, garbled SCpnt.\n", BN(j), i);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: reset, mbox %d, index mismatch.\n", BN(j), i);
+
+ if (SCpnt->scsi_done == NULL)
+ panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n", BN(j), i);
+
+ if (SCpnt == SCarg) arg_done = TRUE;
+ }
+
+ if (do_dma(sh[j]->io_port, 0, RESET_PIO)) {
+ printk("%s: reset, cannot reset, timeout error.\n", BN(j));
+ restore_flags(flags);
+ return SCSI_RESET_ERROR;
+ }
+
+ printk("%s: reset, board reset done, enabling interrupts.\n", BN(j));
+
+#if defined (DEBUG_RESET)
+ do_trace = TRUE;
+#endif
+
+ HD(j)->in_reset = TRUE;
+ sti();
+ time = jiffies;
+ while (jiffies < (time + 100) && limit++ < 100000000);
+ cli();
+ printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ /* Skip mailboxes already set free by interrupt */
+ if (HD(j)->cp_stat[i] != IN_RESET) continue;
+
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox is still waiting for its interrupt */
+ HD(j)->cp_stat[i] = LOCKED;
+
+ printk("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ restore_flags(flags);
+ SCpnt->scsi_done(SCpnt);
+ cli();
+ }
+
+ HD(j)->in_reset = FALSE;
+ do_trace = FALSE;
+ restore_flags(flags);
+
+ if (arg_done) {
+ printk("%s: reset, exit, success.\n", BN(j));
+ return SCSI_RESET_SUCCESS;
+ }
+ else {
+ printk("%s: reset, exit, wakeup.\n", BN(j));
+ return SCSI_RESET_PUNT;
+ }
+}
+
+static void eata2x_interrupt_handler(int irq, struct pt_regs * regs) {
+ Scsi_Cmnd *SCpnt;
+ unsigned int i, j, k, flags, status, tstatus, loops, total_loops = 0;
+ struct mssp *spp;
+ struct mscp *cpp;
+
+ save_flags(flags);
+ cli();
+
+ if (irqlist[irq] == NO_IRQ) {
+ printk("%s, ihdlr, irq %d, unexpected interrupt.\n", driver_name, irq);
+ restore_flags(flags);
+ return;
+ }
+
+ if (do_trace) printk("%s: ihdlr, enter, irq %d, calls %d.\n",
+ driver_name, irq, calls[irq]);
+
+ /* Service all the boards configured on this irq */
+ for (j = 0; sh[j] != NULL; j++) {
+
+ if (sh[j]->irq != irq) continue;
+
+ loops = 0;
+
+ /* Loop until all interrupts for a board are serviced */
+ while (inb(sh[j]->io_port + REG_AUX_STATUS) & IRQ_ASSERTED) {
+ total_loops++;
+ loops++;
+
+ if (do_trace) printk("%s: ihdlr, start service, count %d.\n",
+ BN(j), HD(j)->iocount);
+
+ /* Read the status register to clear the interrupt indication */
+ inb(sh[j]->io_port + REG_STATUS);
+
+ /* Service all mailboxes of this board */
+ for (i = 0; i < sh[j]->can_queue; i++) {
+ spp = &HD(j)->sp[i];
+
+ /* Check if this mailbox has completed the operation */
+ if (spp->eoc == FALSE) continue;
+
+ spp->eoc = FALSE;
+
+ if (HD(j)->cp_stat[i] == IGNORE) {
+ HD(j)->cp_stat[i] = FREE;
+ continue;
+ }
+ else if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: ihdlr, mbox %d unlocked, count %d.\n",
+ BN(j), i, HD(j)->iocount);
+ continue;
+ }
+ else if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: ihdlr, mbox %d is free, count %d.\n",
+ BN(j), i, HD(j)->iocount);
+ continue;
+ }
+ else if (HD(j)->cp_stat[i] == IN_RESET)
+ printk("%s: ihdlr, mbox %d is in reset.\n", BN(j), i);
+ else if (HD(j)->cp_stat[i] != IN_USE)
+ panic("%s: ihdlr, mbox %d, invalid cp_stat.\n", BN(j), i);
+
+ HD(j)->cp_stat[i] = FREE;
+ cpp = &HD(j)->cp[i];
+ SCpnt = spp->SCpnt;
+
+ if (SCpnt == NULL)
+ panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (SCpnt != cpp->SCpnt)
+ panic("%s: ihdlr, mbox %d, sp SCpnt %p, cp SCpnt %p.\n",
+ BN(j), i, SCpnt, cpp->SCpnt);
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: ihdlr, mbox %d, pid %ld, SCpnt %p garbled.\n",
+ BN(j), i, SCpnt->pid, SCpnt);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: ihdlr, mbox %d, pid %ld, index mismatch %d,"\
+ " irq %d.\n", BN(j), i, SCpnt->pid,
+ *(unsigned int *)SCpnt->host_scribble, irq);
+
+ tstatus = status_byte(spp->target_status);
+
+ switch (spp->adapter_status) {
+ case ASOK: /* status OK */
+
+ /* Forces a reset if a disk drive keeps returning BUSY */
+ if (tstatus == BUSY && SCpnt->device->type != TYPE_TAPE)
+ status = DID_ERROR << 16;
+
+ /* If there was a bus reset, redo operation on each target */
+ else if (tstatus != GOOD
+ && SCpnt->device->type == TYPE_DISK
+ && HD(j)->target_reset[SCpnt->target])
+ status = DID_BUS_BUSY << 16;
+
+ /* Works around a flaw in scsi.c */
+ else if (tstatus == CHECK_CONDITION
+ && SCpnt->device->type == TYPE_DISK
+ && (SCpnt->sense_buffer[2] & 0xf) == RECOVERED_ERROR)
+ status = DID_BUS_BUSY << 16;
+
+ else
+ status = DID_OK << 16;
+
+ if (tstatus == GOOD)
+ HD(j)->target_reset[SCpnt->target] = FALSE;
+
+ if (spp->target_status && SCpnt->device->type == TYPE_DISK)
+ printk("%s: ihdlr, target %d:%d, pid %ld, target_status "\
+ "0x%x, sense key 0x%x.\n", BN(j),
+ SCpnt->target, SCpnt->lun, SCpnt->pid,
+ spp->target_status, SCpnt->sense_buffer[2]);
+
+ HD(j)->target_time_out[SCpnt->target] = 0;
+
+ break;
+ case ASST: /* Selection Time Out */
+ case 0x02: /* Command Time Out */
+
+ if (HD(j)->target_time_out[SCpnt->target] > 1)
+ status = DID_ERROR << 16;
+ else {
+ status = DID_TIME_OUT << 16;
+ HD(j)->target_time_out[SCpnt->target]++;
+ }
+
+ break;
+ case 0x03: /* SCSI Bus Reset Received */
+ case 0x04: /* Initial Controller Power-up */
+
+ if (SCpnt->device->type != TYPE_TAPE)
+ status = DID_BUS_BUSY << 16;
+ else
+ status = DID_ERROR << 16;
+
+ for (k = 0; k < MAX_TARGET; k++)
+ HD(j)->target_reset[k] = TRUE;
+
+ break;
+ case 0x07: /* Bus Parity Error */
+ case 0x0c: /* Controller Ram Parity */
+ case 0x05: /* Unexpected Bus Phase */
+ case 0x06: /* Unexpected Bus Free */
+ case 0x08: /* SCSI Hung */
+ case 0x09: /* Unexpected Message Reject */
+ case 0x0a: /* SCSI Bus Reset Stuck */
+ case 0x0b: /* Auto Request-Sense Failed */
+ default:
+ status = DID_ERROR << 16;
+ break;
+ }
+
+ SCpnt->result = status | spp->target_status;
+ HD(j)->iocount++;
+
+ if (loops > 1) HD(j)->multicount++;
+
+#if defined (DEBUG_INTERRUPT)
+ if (SCpnt->result || do_trace)
+#else
+ if ((spp->adapter_status != ASOK && HD(j)->iocount > 1000) ||
+ (spp->adapter_status != ASOK &&
+ spp->adapter_status != ASST && HD(j)->iocount <= 1000) ||
+ do_trace)
+#endif
+ printk("%s: ihdlr, mbox %d, err 0x%x:%x,"\
+ " target %d:%d, pid %ld, count %d.\n",
+ BN(j), i, spp->adapter_status, spp->target_status,
+ SCpnt->target, SCpnt->lun, SCpnt->pid, HD(j)->iocount);
+
+ /* Set the command state to inactive */
+ SCpnt->host_scribble = NULL;
+
+ restore_flags(flags);
+ SCpnt->scsi_done(SCpnt);
+ cli();
+
+ } /* Mailbox loop */
+
+ } /* Multiple command loop */
+
+ } /* Boards loop */
+
+ calls[irq]++;
+
+ if (total_loops == 0)
+ printk("%s: ihdlr, irq %d, no command completed, calls %d.\n",
+ driver_name, irq, calls[irq]);
+
+ if (do_trace) printk("%s: ihdlr, exit, irq %d, calls %d.\n",
+ driver_name, irq, calls[irq]);
+
+#if defined (DEBUG_STATISTICS)
+ if ((calls[irq] % 100000) == 10000)
+ for (j = 0; sh[j] != NULL; j++)
+ printk("%s: ihdlr, calls %d, count %d, multi %d.\n", BN(j),
+ calls[(sh[j]->irq)], HD(j)->iocount, HD(j)->multicount);
+#endif
+
+ restore_flags(flags);
+ return;
+}
+
+#if defined(MODULE)
+Scsi_Host_Template driver_template = EATA;
+
+#include "scsi_module.c"
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/eata.h b/i386/i386at/gpl/linux/scsi/eata.h
new file mode 100644
index 00000000..aabcc806
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/eata.h
@@ -0,0 +1,41 @@
+/*
+ * eata.h - used by the low-level driver for EATA/DMA SCSI host adapters.
+ *
+ */
+#ifndef _EATA_H
+#define _EATA_H
+
+#include <linux/scsicam.h>
+
+int eata2x_detect(Scsi_Host_Template *);
+int eata2x_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int eata2x_abort(Scsi_Cmnd *);
+int eata2x_reset(Scsi_Cmnd *);
+
+#define EATA_VERSION "2.01.00"
+
+
+#define EATA { \
+ NULL, /* Ptr for modules */ \
+ NULL, /* usage count for modules */ \
+ NULL, \
+ NULL, \
+ "EATA/DMA 2.0x rev. " EATA_VERSION " ", \
+ eata2x_detect, \
+ NULL, /* Release */ \
+ NULL, \
+ NULL, \
+ eata2x_queuecommand, \
+ eata2x_abort, \
+ eata2x_reset, \
+ NULL, \
+ scsicam_bios_param, \
+ 0, /* can_queue, reset by detect */ \
+ 7, /* this_id, reset by detect */ \
+ 0, /* sg_tablesize, reset by detect */ \
+ 0, /* cmd_per_lun, reset by detect */ \
+ 0, /* number of boards present */ \
+ 1, /* unchecked isa dma, reset by detect */ \
+ ENABLE_CLUSTERING \
+ }
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/eata_dma.c b/i386/i386at/gpl/linux/scsi/eata_dma.c
new file mode 100644
index 00000000..da9e3daa
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/eata_dma.c
@@ -0,0 +1,1375 @@
+/************************************************************
+ * *
+ * Linux EATA SCSI driver *
+ * *
+ * based on the CAM document CAM/89-004 rev. 2.0c, *
+ * DPT's driver kit, some internal documents and source, *
+ * and several other Linux scsi drivers and kernel docs. *
+ * *
+ * The driver currently: *
+ * -supports all ISA based EATA-DMA boards *
+ * -supports all EISA based EATA-DMA boards *
+ * -supports all PCI based EATA-DMA boards *
+ * -supports multiple HBAs with & without IRQ sharing *
+ * -supports all SCSI channels on multi channel boards *
+ * -needs identical IDs on all channels of a HBA *
+ * -can be loaded as module *
+ * -displays statistical and hardware information *
+ * in /proc/scsi/eata_dma *
+ * -provides rudimentary latency measurement *
+ * possibilities via /proc/scsi/eata_dma/<hostnum> *
+ * *
+ * (c)1993,94,95 Michael Neuffer *
+ * neuffer@goofy.zdv.uni-mainz.de *
+ * *
+ * This program is free software; you can redistribute it *
+ * and/or modify it under the terms of the GNU General *
+ * Public License as published by the Free Software *
+ * Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be *
+ * useful, but WITHOUT ANY WARRANTY; without even the *
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A *
+ * PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. *
+ * *
+ * You should have received a copy of the GNU General *
+ * Public License along with this kernel; if not, write to *
+ * the Free Software Foundation, Inc., 675 Mass Ave, *
+ * Cambridge, MA 02139, USA. *
+ * *
+ * I have to thank DPT for their excellent support. I took *
+ * me almost a year and a stopover at their HQ, on my first *
+ * trip to the USA, to get it, but since then they've been *
+ * very helpful and tried to give me all the infos and *
+ * support I need. *
+ * *
+ * Thanks also to Greg Hosler who did a lot of testing and *
+ * found quite a number of bugs during the development. *
+ ************************************************************
+ * last change: 95/11/29 OS: Linux 1.3.45 *
+ ************************************************************/
+
+/* Look in eata_dma.h for configuration and revision information */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/in.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <asm/byteorder.h>
+#include <asm/types.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "sd.h"
+#include "hosts.h"
+#include <linux/scsicam.h>
+#include "eata_dma.h"
+#include "eata_dma_proc.h"
+
+#include <linux/stat.h>
+#include <linux/config.h> /* for CONFIG_PCI */
+
+struct proc_dir_entry proc_scsi_eata_dma = {
+ PROC_SCSI_EATA, 8, "eata_dma",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+static u32 ISAbases[] =
+{0x1F0, 0x170, 0x330, 0x230};
+static unchar EISAbases[] =
+{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static uint registered_HBAs = 0;
+static struct Scsi_Host *last_HBA = NULL;
+static struct Scsi_Host *first_HBA = NULL;
+static unchar reg_IRQ[] =
+{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static unchar reg_IRQL[] =
+{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static struct eata_sp *status = 0; /* Statuspacket array */
+static void *dma_scratch = 0;
+
+static struct eata_register *fake_int_base;
+static int fake_int_result;
+static int fake_int_happened;
+
+static ulong int_counter = 0;
+static ulong queue_counter = 0;
+
+void eata_scsi_done (Scsi_Cmnd * scmd)
+{
+ scmd->request.rq_status = RQ_SCSI_DONE;
+
+ if (scmd->request.sem != NULL)
+ up(scmd->request.sem);
+
+ return;
+}
+
+void eata_fake_int_handler(s32 irq, struct pt_regs * regs)
+{
+ fake_int_result = inb((ulong)fake_int_base + HA_RSTATUS);
+ fake_int_happened = TRUE;
+ DBG(DBG_INTR3, printk("eata_fake_int_handler called irq%d base %p"
+ " res %#x\n", irq, fake_int_base, fake_int_result));
+ return;
+}
+
+#ifdef MACH
+#include "eata_dma_proc.src"
+#else
+#include "eata_dma_proc.c"
+#endif
+
+#ifdef MODULE
+int eata_release(struct Scsi_Host *sh)
+{
+ uint i;
+ if (sh->irq && reg_IRQ[sh->irq] == 1) free_irq(sh->irq);
+ else reg_IRQ[sh->irq]--;
+
+ scsi_init_free((void *)status, 512);
+ scsi_init_free((void *)dma_scratch, 512);
+ for (i = 0; i < sh->can_queue; i++){ /* Free all SG arrays */
+ if(SD(sh)->ccb[i].sg_list != NULL)
+ scsi_init_free((void *) SD(sh)->ccb[i].sg_list,
+ sh->sg_tablesize * sizeof(struct eata_sg_list));
+ }
+
+ if (SD(sh)->channel == 0) {
+ if (sh->dma_channel != BUSMASTER) free_dma(sh->dma_channel);
+ if (sh->io_port && sh->n_io_port)
+ release_region(sh->io_port, sh->n_io_port);
+ }
+ return(TRUE);
+}
+#endif
+
+
+void eata_int_handler(int irq, struct pt_regs * regs)
+{
+ uint i, result = 0;
+ uint hba_stat, scsi_stat, eata_stat;
+ Scsi_Cmnd *cmd;
+ struct eata_ccb *cp;
+ struct eata_sp *sp;
+ uint base;
+ ulong flags;
+ uint x;
+ struct Scsi_Host *sh;
+
+ save_flags(flags);
+ cli();
+
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) {
+ if (sh->irq != irq)
+ continue;
+
+ while(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) {
+
+ int_counter++;
+
+ sp = &SD(sh)->sp;
+ cp = sp->ccb;
+
+ if(cp == NULL) {
+ eata_stat = inb((uint)sh->base + HA_RSTATUS);
+ printk("eata_dma: int_handler, Spurious IRQ %d "
+ "received. CCB pointer not set.\n", irq);
+ break;
+ }
+
+ cmd = cp->cmd;
+ base = (uint) cmd->host->base;
+ hba_stat = sp->hba_stat;
+
+ scsi_stat = (sp->scsi_stat >> 1) & 0x1f;
+
+ if (sp->EOC == FALSE) {
+ eata_stat = inb(base + HA_RSTATUS);
+ printk("eata_dma: int_handler, board: %x cmd %lx returned "
+ "unfinished.\nEATA: %x HBA: %x SCSI: %x spadr %lx "
+ "spadrirq %lx, irq%d\n", base, (long)cp, eata_stat,
+ hba_stat, scsi_stat,(long)&status, (long)&status[irq],
+ irq);
+ DBG(DBG_DELAY, DEL2(800));
+ break;
+ }
+
+ if (cp->status == LOCKED) {
+ cp->status = FREE;
+ eata_stat = inb(base + HA_RSTATUS);
+ printk("eata_dma: int_handler, freeing locked queueslot\n");
+ DBG(DBG_INTR && DBG_DELAY, DEL2(800));
+ break;
+ }
+
+ eata_stat = inb(base + HA_RSTATUS);
+ DBG(DBG_INTR, printk("IRQ %d received, base %#.4x, pid %ld, "
+ "target: %x, lun: %x, ea_s: %#.2x, hba_s: "
+ "%#.2x \n", irq, base, cmd->pid, cmd->target,
+ cmd->lun, eata_stat, hba_stat));
+
+ switch (hba_stat) {
+ case HA_NO_ERROR: /* NO Error */
+ if (scsi_stat == CONDITION_GOOD
+ && cmd->device->type == TYPE_DISK
+ && (HD(cmd)->t_state[cp->cp_channel][cp->cp_id] == RESET))
+ result = DID_BUS_BUSY << 16;
+ else if (scsi_stat == GOOD) {
+ HD(cmd)->t_state[cp->cp_channel][cp->cp_id] = OK;
+ if(HD(cmd)->do_latency == TRUE && cp->timestamp) {
+ uint time;
+ time = jiffies - cp->timestamp;
+ if((cp->rw_latency) == TRUE) { /* was WRITE */
+ if(HD(cmd)->writes_lat[cp->sizeindex][1] > time)
+ HD(cmd)->writes_lat[cp->sizeindex][1] = time;
+ if(HD(cmd)->writes_lat[cp->sizeindex][2] < time)
+ HD(cmd)->writes_lat[cp->sizeindex][2] = time;
+ HD(cmd)->writes_lat[cp->sizeindex][3] += time;
+ HD(cmd)->writes_lat[cp->sizeindex][0]++;
+ } else {
+ if(HD(cmd)->reads_lat[cp->sizeindex][1] > time)
+ HD(cmd)->reads_lat[cp->sizeindex][1] = time;
+ if(HD(cmd)->reads_lat[cp->sizeindex][2] < time)
+ HD(cmd)->reads_lat[cp->sizeindex][2] = time;
+ HD(cmd)->reads_lat[cp->sizeindex][3] += time;
+ HD(cmd)->reads_lat[cp->sizeindex][0]++;
+ }
+ }
+ }
+ else if (scsi_stat == CHECK_CONDITION
+ && cmd->device->type == TYPE_DISK
+ && (cmd->sense_buffer[2] & 0xf) == RECOVERED_ERROR)
+ result = DID_BUS_BUSY << 16;
+ else
+ result = DID_OK << 16;
+ HD(cmd)->t_timeout[cp->cp_channel][cp->cp_id] = OK;
+ break;
+ case HA_ERR_SEL_TO: /* Selection Timeout */
+ result = DID_BAD_TARGET << 16;
+ break;
+ case HA_ERR_CMD_TO: /* Command Timeout */
+ if (HD(cmd)->t_timeout[cp->cp_channel][cp->cp_id] > 1)
+ result = DID_ERROR << 16;
+ else {
+ result = DID_TIME_OUT << 16;
+ HD(cmd)->t_timeout[cp->cp_channel][cp->cp_id]++;
+ }
+ break;
+ case HA_ERR_RESET: /* SCSI Bus Reset Received */
+ case HA_INIT_POWERUP: /* Initial Controller Power-up */
+ if (cmd->device->type != TYPE_TAPE)
+ result = DID_BUS_BUSY << 16;
+ else
+ result = DID_ERROR << 16;
+
+ for (i = 0; i < MAXTARGET; i++)
+ HD(cmd)->t_state[cp->cp_channel][i] = RESET;
+ break;
+ case HA_UNX_BUSPHASE: /* Unexpected Bus Phase */
+ case HA_UNX_BUS_FREE: /* Unexpected Bus Free */
+ case HA_BUS_PARITY: /* Bus Parity Error */
+ case HA_SCSI_HUNG: /* SCSI Hung */
+ case HA_UNX_MSGRJCT: /* Unexpected Message Reject */
+ case HA_RESET_STUCK: /* SCSI Bus Reset Stuck */
+ case HA_RSENSE_FAIL: /* Auto Request-Sense Failed */
+ case HA_PARITY_ERR: /* Controller Ram Parity */
+ default:
+ result = DID_ERROR << 16;
+ break;
+ }
+ cmd->result = result | (scsi_stat << 1);
+
+#if DBG_INTR2
+ if (scsi_stat || result || hba_stat || eata_stat != 0x50
+ || cmd->scsi_done == NULL || cmd->device->id == 7)
+ printk("HBA: %d, channel %d, id: %d, lun %d, pid %ld:\n"
+ "eata_stat %#x, hba_stat %#.2x, scsi_stat %#.2x, "
+ "sense_key: %#x, result: %#.8x\n", x,
+ cmd->device->channel, cmd->device->id, cmd->device->lun,
+ cmd->pid, eata_stat, hba_stat, scsi_stat,
+ cmd->sense_buffer[2] & 0xf, cmd->result);
+ DBG(DBG_INTR&&DBG_DELAY,DEL2(800));
+#endif
+
+ cp->status = FREE; /* now we can release the slot */
+ cmd->scsi_done(cmd);
+ }
+ }
+ restore_flags(flags);
+
+ return;
+}
+
+inline int eata_send_command(u32 addr, u32 base, u8 command)
+{
+ long loop = R_LIMIT;
+
+ while (inb(base + HA_RAUXSTAT) & HA_ABUSY)
+ if (--loop == 0)
+ return(FALSE);
+
+ /* And now the address in nice little byte chunks */
+ outb( addr & 0x000000ff, base + HA_WDMAADDR);
+ outb((addr & 0x0000ff00) >> 8, base + HA_WDMAADDR + 1);
+ outb((addr & 0x00ff0000) >> 16, base + HA_WDMAADDR + 2);
+ outb((addr & 0xff000000) >> 24, base + HA_WDMAADDR + 3);
+ outb(command, base + HA_WCOMMAND);
+ return(TRUE);
+}
+
+#if 0
+inline int eata_send_immediate(u32 addr, u32 base, u8 cmnd, u8 cmnd2, u8 id,
+ u8 lun)
+{
+ if(addr){
+ outb( addr & 0x000000ff, base + HA_WDMAADDR);
+ outb((addr & 0x0000ff00) >> 8, base + HA_WDMAADDR + 1);
+ outb((addr & 0x00ff0000) >> 16, base + HA_WDMAADDR + 2);
+ outb((addr & 0xff000000) >> 24, base + HA_WDMAADDR + 3);
+ } else {
+ outb(id, base + HA_WSUBCODE);
+ outb(lun, base + HA_WSUBLUN);
+ }
+
+ outb(cmnd2, base + HA_WCOMMAND2);
+ outb(cmnd, base + HA_WCOMMAND);
+ return(TRUE);
+}
+#endif
+
+int eata_queue(Scsi_Cmnd * cmd, void (* done) (Scsi_Cmnd *))
+{
+ unsigned int i, x, y;
+ u32 flags;
+ hostdata *hd;
+ struct Scsi_Host *sh;
+ struct eata_ccb *cp;
+ struct scatterlist *sl;
+
+ save_flags(flags);
+ cli();
+
+ queue_counter++;
+
+ hd = HD(cmd);
+ sh = cmd->host;
+
+ /* check for free slot */
+ for (y = hd->last_ccb + 1, x = 0; x < sh->can_queue; x++, y++) {
+ if (y >= sh->can_queue)
+ y = 0;
+ if (hd->ccb[y].status == FREE)
+ break;
+ }
+
+ hd->last_ccb = y;
+
+ if (x >= sh->can_queue) {
+ uint z;
+
+ printk(KERN_EMERG "eata_dma: run out of queue slots cmdno:%ld"
+ " intrno: %ld, can_queue: %d, x: %d, y: %d\n",
+ queue_counter, int_counter, sh->can_queue, x, y);
+ printk(KERN_EMERG "Status of queueslots:");
+ for(z = 0; z < sh->can_queue; z +=2) {
+ switch(hd->ccb[z].status) {
+ case FREE:
+ printk(KERN_EMERG "Slot %2d is FREE \t", z);
+ break;
+ case USED:
+ printk(KERN_EMERG "Slot %2d is USED \t", z);
+ break;
+ case LOCKED:
+ printk(KERN_EMERG "Slot %2d is LOCKED\t", z);
+ break;
+ default:
+ printk(KERN_EMERG "Slot %2d is UNKNOWN\t", z);
+ }
+ panic("\nSystem halted.\n");
+ }
+ }
+ cp = &hd->ccb[y];
+
+ memset(cp, 0, sizeof(struct eata_ccb) - sizeof(struct eata_sg_list *));
+
+ cp->status = USED; /* claim free slot */
+
+ DBG(DBG_QUEUE, printk("eata_queue pid %ld, target: %x, lun: %x, y %d\n",
+ cmd->pid, cmd->target, cmd->lun, y));
+ DBG(DBG_QUEUE && DBG_DELAY, DEL2(250));
+
+ if(hd->do_latency == TRUE) {
+ int x, z;
+ short *sho;
+ long *lon;
+ x = 0; /* just to keep GCC quiet */
+ if (cmd->cmnd[0] == WRITE_6 || cmd->cmnd[0] == WRITE_10 ||
+ cmd->cmnd[0] == WRITE_12 || cmd->cmnd[0] == READ_6 ||
+ cmd->cmnd[0] == READ_10 || cmd->cmnd[0] == READ_12) {
+
+ cp->timestamp = jiffies; /* For latency measurements */
+ switch(cmd->cmnd[0]) {
+ case WRITE_6:
+ case READ_6:
+ x = cmd->cmnd[4]/2;
+ break;
+ case WRITE_10:
+ case READ_10:
+ sho = (short *) &cmd->cmnd[7];
+ x = ntohs(*sho)/2;
+ break;
+ case WRITE_12:
+ case READ_12:
+ lon = (long *) &cmd->cmnd[6];
+ x = ntohl(*lon)/2;
+ break;
+ }
+
+ for(z = 0; (x > (1 << z)) && (z <= 11); z++)
+ /* nothing */;
+ cp->sizeindex = z;
+ if (cmd->cmnd[0] == WRITE_6 || cmd->cmnd[0] == WRITE_10 ||
+ cmd->cmnd[0] == WRITE_12){
+ cp->rw_latency = TRUE;
+ }
+ }
+ }
+ cmd->scsi_done = (void *)done;
+
+ switch (cmd->cmnd[0]) {
+ case CHANGE_DEFINITION: case COMPARE: case COPY:
+ case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT:
+ case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
+ case WRITE_6: case WRITE_10: case WRITE_VERIFY:
+ case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME:
+ case SEARCH_HIGH_12: case SEARCH_EQUAL_12: case SEARCH_LOW_12:
+ case WRITE_12: case WRITE_VERIFY_12: case SET_WINDOW:
+ case MEDIUM_SCAN: case SEND_VOLUME_TAG:
+ case 0xea: /* alternate number for WRITE LONG */
+ cp->DataOut = TRUE; /* Output mode */
+ break;
+ case TEST_UNIT_READY:
+ default:
+ cp->DataIn = TRUE; /* Input mode */
+ }
+
+ /* FIXME: This will will have to be changed once the midlevel driver
+ * allows different HBA IDs on every channel.
+ */
+ if (cmd->target == sh->this_id)
+ cp->Interpret = TRUE; /* Interpret command */
+
+ if (cmd->use_sg) {
+ cp->scatter = TRUE; /* SG mode */
+ if (cp->sg_list == NULL) {
+ cp->sg_list = kmalloc(sh->sg_tablesize * sizeof(struct eata_sg_list),
+ GFP_ATOMIC | GFP_DMA);
+ }
+ if (cp->sg_list == NULL)
+ panic("eata_dma: Run out of DMA memory for SG lists !\n");
+ cp->cp_dataDMA = htonl(virt_to_bus(cp->sg_list));
+
+ cp->cp_datalen = htonl(cmd->use_sg * sizeof(struct eata_sg_list));
+ sl=(struct scatterlist *)cmd->request_buffer;
+ for(i = 0; i < cmd->use_sg; i++, sl++){
+ cp->sg_list[i].data = htonl(virt_to_bus(sl->address));
+ cp->sg_list[i].len = htonl((u32) sl->length);
+ }
+ } else {
+ cp->scatter = FALSE;
+ cp->cp_datalen = htonl(cmd->request_bufflen);
+ cp->cp_dataDMA = htonl(virt_to_bus(cmd->request_buffer));
+ }
+
+ cp->Auto_Req_Sen = TRUE;
+ cp->cp_reqDMA = htonl(virt_to_bus(cmd->sense_buffer));
+ cp->reqlen = sizeof(cmd->sense_buffer);
+
+ cp->cp_id = cmd->target;
+ cp->cp_channel = cmd->channel;
+ cp->cp_lun = cmd->lun;
+ cp->cp_dispri = TRUE;
+ cp->cp_identify = TRUE;
+ memcpy(cp->cp_cdb, cmd->cmnd, cmd->cmd_len);
+
+ cp->cp_statDMA = htonl(virt_to_bus(&(hd->sp)));
+
+ cp->cp_viraddr = cp; /* This will be passed thru, so we don't need to
+ * convert it */
+ cp->cmd = cmd;
+ cmd->host_scribble = (char *)&hd->ccb[y];
+
+ if(eata_send_command((u32) cp, (u32) sh->base, EATA_CMD_DMA_SEND_CP) == FALSE) {
+ cmd->result = DID_BUS_BUSY << 16;
+ DBG(DBG_QUEUE && DBG_ABNORM,
+ printk("eata_queue target %d, pid %ld, HBA busy, "
+ "returning DID_BUS_BUSY\n",cmd->target, cmd->pid));
+ done(cmd);
+ cp->status = FREE;
+ restore_flags(flags);
+ return(0);
+ }
+ DBG(DBG_QUEUE, printk("Queued base %#.4x pid: %ld target: %x lun: %x "
+ "slot %d irq %d\n", (s32)sh->base, cmd->pid,
+ cmd->target, cmd->lun, y, sh->irq));
+ DBG(DBG_QUEUE && DBG_DELAY, DEL2(200));
+ restore_flags(flags);
+ return(0);
+}
+
+
+int eata_abort(Scsi_Cmnd * cmd)
+{
+ ulong loop = R_LIMIT;
+ ulong flags;
+
+ save_flags(flags);
+ cli();
+
+ DBG(DBG_ABNORM, printk("eata_abort called pid: %ld target: %x lun: %x"
+ " reason %x\n", cmd->pid, cmd->target, cmd->lun,
+ cmd->abort_reason));
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+
+ while (inb((u32)(cmd->host->base) + HA_RAUXSTAT) & HA_ABUSY) {
+ if (--loop == 0) {
+ printk("eata_dma: abort, timeout error.\n");
+ restore_flags(flags);
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+ return (SCSI_ABORT_ERROR);
+ }
+ }
+ if (CD(cmd)->status == RESET) {
+ restore_flags(flags);
+ printk("eata_dma: abort, command reset error.\n");
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+ return (SCSI_ABORT_ERROR);
+ }
+ if (CD(cmd)->status == LOCKED) {
+ restore_flags(flags);
+ DBG(DBG_ABNORM, printk("eata_dma: abort, queue slot locked.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+ return (SCSI_ABORT_NOT_RUNNING);
+ }
+ if (CD(cmd)->status == USED) {
+ DBG(DBG_ABNORM, printk("Returning: SCSI_ABORT_BUSY\n"));
+ restore_flags(flags);
+ return (SCSI_ABORT_BUSY); /* SNOOZE */
+ }
+ if (CD(cmd)->status == FREE) {
+ DBG(DBG_ABNORM, printk("Returning: SCSI_ABORT_NOT_RUNNING\n"));
+ restore_flags(flags);
+ return (SCSI_ABORT_NOT_RUNNING);
+ }
+ restore_flags(flags);
+ panic("eata_dma: abort: invalid slot status\n");
+}
+
+int eata_reset(Scsi_Cmnd * cmd)
+{
+ ushort x, z;
+ ulong time, limit = 0;
+ ulong loop = R_LIMIT;
+ ulong flags;
+ unchar success = FALSE;
+ Scsi_Cmnd *sp;
+
+ save_flags(flags);
+ cli();
+
+ DBG(DBG_ABNORM, printk("eata_reset called pid:%ld target: %x lun: %x"
+ " reason %x\n", cmd->pid, cmd->target, cmd->lun,
+ cmd->abort_reason));
+
+ if (HD(cmd)->state == RESET) {
+ printk("eata_reset: exit, already in reset.\n");
+ restore_flags(flags);
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+ return (SCSI_RESET_ERROR);
+ }
+
+ while (inb((u32)(cmd->host->base) + HA_RAUXSTAT) & HA_ABUSY)
+ if (--loop == 0) {
+ printk("eata_reset: exit, timeout error.\n");
+ restore_flags(flags);
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+ return (SCSI_RESET_ERROR);
+ }
+
+ for (x = 0; x < MAXCHANNEL; x++) {
+ for (z = 0; z < MAXTARGET; z++) {
+ HD(cmd)->t_state[x][z] = RESET;
+ HD(cmd)->t_timeout[x][z] = NO_TIMEOUT;
+ }
+ }
+
+ for (x = 0; x < cmd->host->can_queue; x++) {
+ if (HD(cmd)->ccb[x].status == FREE)
+ continue;
+
+ if (HD(cmd)->ccb[x].status == LOCKED) {
+ HD(cmd)->ccb[x].status = FREE;
+ printk("eata_reset: locked slot %d forced free.\n", x);
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+ continue;
+ }
+ sp = HD(cmd)->ccb[x].cmd;
+ HD(cmd)->ccb[x].status = RESET;
+ printk("eata_reset: slot %d in reset, pid %ld.\n", x, sp->pid);
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+
+ if (sp == NULL)
+ panic("eata_reset: slot %d, sp==NULL.\n", x);
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+
+ if (sp == cmd)
+ success = TRUE;
+ }
+
+ /* hard reset the HBA */
+ inb((u32) (cmd->host->base) + HA_RSTATUS); /* This might cause trouble */
+ eata_send_command(0, (u32) cmd->host->base, EATA_CMD_RESET);
+
+ DBG(DBG_ABNORM, printk("eata_reset: board reset done, enabling interrupts.\n"));
+ HD(cmd)->state = RESET;
+
+ restore_flags(flags);
+
+ time = jiffies;
+ while (jiffies < (time + (3 * HZ)) || limit++ < 10000000)
+ /* As time goes by... */;
+
+ save_flags(flags);
+ cli();
+
+ DBG(DBG_ABNORM, printk("eata_reset: interrupts disabled, loops %ld.\n",
+ limit));
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+
+ for (x = 0; x < cmd->host->can_queue; x++) {
+
+ /* Skip slots already set free by interrupt */
+ if (HD(cmd)->ccb[x].status != RESET)
+ continue;
+
+ sp = HD(cmd)->ccb[x].cmd;
+ sp->result = DID_RESET << 16;
+
+ /* This mailbox is still waiting for its interrupt */
+ HD(cmd)->ccb[x].status = LOCKED;
+
+ printk("eata_reset: slot %d locked, DID_RESET, pid %ld done.\n",
+ x, sp->pid);
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+ restore_flags(flags);
+ sp->scsi_done(sp);
+ cli();
+ }
+
+ HD(cmd)->state = FALSE;
+ restore_flags(flags);
+
+ if (success) {
+ DBG(DBG_ABNORM, printk("eata_reset: exit, success.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+ return (SCSI_RESET_SUCCESS);
+ } else {
+ DBG(DBG_ABNORM, printk("eata_reset: exit, wakeup.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+ return (SCSI_RESET_PUNT);
+ }
+}
+
+char * get_board_data(u32 base, u32 irq, u32 id)
+{
+ struct eata_ccb *cp;
+ struct eata_sp *sp;
+ static char *buff;
+ ulong i;
+ ulong limit = 0;
+
+ cp = (struct eata_ccb *) scsi_init_malloc(sizeof(struct eata_ccb),
+ GFP_ATOMIC | GFP_DMA);
+ sp = (struct eata_sp *) scsi_init_malloc(sizeof(struct eata_sp),
+ GFP_ATOMIC | GFP_DMA);
+
+ buff = dma_scratch;
+
+ memset(cp, 0, sizeof(struct eata_ccb));
+ memset(sp, 0, sizeof(struct eata_sp));
+ memset(buff, 0, 256);
+
+ cp->DataIn = TRUE;
+ cp->Interpret = TRUE; /* Interpret command */
+ cp->cp_dispri = TRUE;
+ cp->cp_identify = TRUE;
+
+ cp->cp_datalen = htonl(56);
+ cp->cp_dataDMA = htonl(virt_to_bus(buff));
+ cp->cp_statDMA = htonl(virt_to_bus(sp));
+ cp->cp_viraddr = cp;
+
+ cp->cp_id = id;
+ cp->cp_lun = 0;
+
+ cp->cp_cdb[0] = INQUIRY;
+ cp->cp_cdb[1] = 0;
+ cp->cp_cdb[2] = 0;
+ cp->cp_cdb[3] = 0;
+ cp->cp_cdb[4] = 56;
+ cp->cp_cdb[5] = 0;
+
+ fake_int_base = (struct eata_register *) base;
+ fake_int_result = FALSE;
+ fake_int_happened = FALSE;
+
+ eata_send_command((u32) cp, (u32) base, EATA_CMD_DMA_SEND_CP);
+
+ i = jiffies + (3 * HZ);
+ while (fake_int_happened == FALSE && jiffies <= i)
+ barrier();
+
+ DBG(DBG_INTR3, printk("fake_int_result: %#x hbastat %#x scsistat %#x,"
+ " buff %p sp %p\n",
+ fake_int_result, (u32) (sp->hba_stat /*& 0x7f*/),
+ (u32) sp->scsi_stat, buff, sp));
+
+ scsi_init_free((void *)cp, sizeof(struct eata_ccb));
+ scsi_init_free((void *)sp, sizeof(struct eata_sp));
+
+ if ((fake_int_result & HA_SERROR) || jiffies > i){
+ /* hard reset the HBA */
+ inb((u32) (base) + HA_RSTATUS);
+ eata_send_command(0, base, EATA_CMD_RESET);
+ i = jiffies;
+ while (jiffies < (i + (3 * HZ)) && limit++ < 10000000)
+ barrier();
+ return (NULL);
+ } else
+ return (buff);
+}
+
+int check_blink_state(long base)
+{
+ ushort loops = 10;
+ u32 blinkindicator;
+ u32 state = 0x12345678;
+ u32 oldstate = 0;
+
+ blinkindicator = htonl(0x54504442);
+ while ((loops--) && (state != oldstate)) {
+ oldstate = state;
+ state = inl((uint) base + 1);
+ }
+
+ DBG(DBG_BLINK, printk("Did Blink check. Status: %d\n",
+ (state == oldstate) && (state == blinkindicator)));
+
+ if ((state == oldstate) && (state == blinkindicator))
+ return(TRUE);
+ else
+ return (FALSE);
+}
+
+int get_conf_PIO(u32 base, struct get_conf *buf)
+{
+ ulong loop = R_LIMIT;
+ u16 *p;
+
+ if(check_region(base, 9))
+ return (FALSE);
+
+ memset(buf, 0, sizeof(struct get_conf));
+
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ if (--loop == 0)
+ return (FALSE);
+
+ DBG(DBG_PIO && DBG_PROBE,
+ printk("Issuing PIO READ CONFIG to HBA at %#x\n", base));
+ eata_send_command(0, base, EATA_CMD_PIO_READ_CONFIG);
+
+ loop = R_LIMIT;
+ for (p = (u16 *) buf;
+ (long)p <= ((long)buf + (sizeof(struct get_conf) / 2)); p++) {
+ while (!(inb(base + HA_RSTATUS) & HA_SDRQ))
+ if (--loop == 0)
+ return (FALSE);
+
+ loop = R_LIMIT;
+ *p = inw(base + HA_RDATA);
+ }
+
+ if (!(inb(base + HA_RSTATUS) & HA_SERROR)) { /* Error ? */
+ if (htonl(EATA_SIGNATURE) == buf->signature) {
+ DBG(DBG_PIO&&DBG_PROBE, printk("EATA Controller found at %x "
+ "EATA Level: %x\n", (uint) base,
+ (uint) (buf->version)));
+
+ while (inb(base + HA_RSTATUS) & HA_SDRQ)
+ inw(base + HA_RDATA);
+ return (TRUE);
+ }
+ } else {
+ DBG(DBG_PROBE, printk("eata_dma: get_conf_PIO, error during transfer "
+ "for HBA at %lx\n", (long)base));
+ }
+ return (FALSE);
+}
+
+void print_config(struct get_conf *gc)
+{
+ printk("LEN: %d ver:%d OCS:%d TAR:%d TRNXFR:%d MORES:%d DMAS:%d\n",
+ (u32) ntohl(gc->len), gc->version,
+ gc->OCS_enabled, gc->TAR_support, gc->TRNXFR, gc->MORE_support,
+ gc->DMA_support);
+ printk("DMAV:%d HAAV:%d SCSIID0:%d ID1:%d ID2:%d QUEUE:%d SG:%d SEC:%d\n",
+ gc->DMA_valid, gc->HAA_valid, gc->scsi_id[3], gc->scsi_id[2],
+ gc->scsi_id[1], ntohs(gc->queuesiz), ntohs(gc->SGsiz), gc->SECOND);
+ printk("IRQ:%d IRQT:%d DMAC:%d FORCADR:%d SG_64K:%d SG_UAE:%d MID:%d "
+ "MCH:%d MLUN:%d\n",
+ gc->IRQ, gc->IRQ_TR, (8 - gc->DMA_channel) & 7, gc->FORCADR,
+ gc->SG_64K, gc->SG_UAE, gc->MAX_ID, gc->MAX_CHAN, gc->MAX_LUN);
+ printk("RIDQ:%d PCI:%d EISA:%d\n",
+ gc->ID_qest, gc->is_PCI, gc->is_EISA);
+ DBG(DPT_DEBUG, DELAY(14));
+}
+
+short register_HBA(u32 base, struct get_conf *gc, Scsi_Host_Template * tpnt,
+ u8 bustype)
+{
+ ulong size = 0;
+ unchar dma_channel = 0;
+ char *buff = 0;
+ unchar bugs = 0;
+ struct Scsi_Host *sh;
+ hostdata *hd;
+ int x;
+
+
+ DBG(DBG_REGISTER, print_config(gc));
+
+ if (gc->DMA_support == FALSE) {
+ printk("The EATA HBA at %#.4x does not support DMA.\n"
+ "Please use the EATA-PIO driver.\n", base);
+ return (FALSE);
+ }
+ if(gc->HAA_valid == FALSE || ntohl(gc->len) < 0x22)
+ gc->MAX_CHAN = 0;
+
+ if (reg_IRQ[gc->IRQ] == FALSE) { /* Interrupt already registered ? */
+ if (!request_irq(gc->IRQ, (void *) eata_fake_int_handler, SA_INTERRUPT,
+ "eata_dma")){
+ reg_IRQ[gc->IRQ]++;
+ if (!gc->IRQ_TR)
+ reg_IRQL[gc->IRQ] = TRUE; /* IRQ is edge triggered */
+ } else {
+ printk("Couldn't allocate IRQ %d, Sorry.", gc->IRQ);
+ return (FALSE);
+ }
+ } else { /* More than one HBA on this IRQ */
+ if (reg_IRQL[gc->IRQ] == TRUE) {
+ printk("Can't support more than one HBA on this IRQ,\n"
+ " if the IRQ is edge triggered. Sorry.\n");
+ return (FALSE);
+ } else
+ reg_IRQ[gc->IRQ]++;
+ }
+
+ /* if gc->DMA_valid it must be an ISA HBA and we have to register it */
+ dma_channel = BUSMASTER;
+ if (gc->DMA_valid) {
+ if (request_dma(dma_channel = (8 - gc->DMA_channel) & 7, "eata_dma")) {
+ printk("Unable to allocate DMA channel %d for ISA HBA at %#.4x.\n",
+ dma_channel, base);
+ reg_IRQ[gc->IRQ]--;
+ if (reg_IRQ[gc->IRQ] == 0)
+ free_irq(gc->IRQ);
+ if (gc->IRQ_TR == FALSE)
+ reg_IRQL[gc->IRQ] = FALSE;
+ return (FALSE);
+ }
+ }
+
+#if !(NEWSTUFF)
+ if (bustype != IS_EISA && bustype != IS_ISA)
+#endif
+ buff = get_board_data(base, gc->IRQ, gc->scsi_id[3]);
+
+ if (buff == NULL) {
+#if !(NEWSTUFF)
+ if (bustype == IS_EISA || bustype == IS_ISA) {
+ bugs = bugs || BROKEN_INQUIRY;
+ } else {
+#endif
+ if (gc->DMA_support == FALSE)
+ printk("HBA at %#.4x doesn't support DMA. Sorry\n", base);
+ else
+ printk("HBA at %#.4x does not react on INQUIRY. Sorry.\n",
+ base);
+ if (gc->DMA_valid)
+ free_dma(dma_channel);
+ reg_IRQ[gc->IRQ]--;
+ if (reg_IRQ[gc->IRQ] == 0)
+ free_irq(gc->IRQ);
+ if (gc->IRQ_TR == FALSE)
+ reg_IRQL[gc->IRQ] = FALSE;
+ return (FALSE);
+#if !(NEWSTUFF)
+ }
+#endif
+ }
+
+ if (gc->DMA_support == FALSE && buff != NULL)
+ printk("HBA %.12sat %#.4x doesn't set the DMA_support flag correctly.\n",
+ &buff[16], base);
+
+ request_region(base, 9, "eata_dma"); /* We already checked the
+ * availability, so this
+ * should not fail.
+ */
+
+ if(ntohs(gc->queuesiz) == 0) {
+ gc->queuesiz = ntohs(64);
+ printk("Warning: Queue size has to be corrected. Assuming 64 queueslots\n"
+ " This might be a PM2012B with a defective Firmware\n");
+ }
+
+ size = sizeof(hostdata) + ((sizeof(struct eata_ccb) + sizeof(long))
+ * ntohs(gc->queuesiz));
+
+ DBG(DBG_REGISTER, printk("scsi_register size: %ld\n", size));
+
+ sh = scsi_register(tpnt, size);
+
+ if(sh == NULL) {
+ if (gc->DMA_valid)
+ free_dma(dma_channel);
+
+ reg_IRQ[gc->IRQ]--;
+ if (reg_IRQ[gc->IRQ] == 0)
+ free_irq(gc->IRQ);
+ if (gc->IRQ_TR == FALSE)
+ reg_IRQL[gc->IRQ] = FALSE;
+ return (FALSE);
+ }
+
+ hd = SD(sh);
+
+ memset(hd->ccb, 0, sizeof(struct eata_ccb) * ntohs(gc->queuesiz));
+ memset(hd->reads, 0, sizeof(u32) * 26);
+
+ hd->broken_INQUIRY = (bugs & BROKEN_INQUIRY);
+
+ if(hd->broken_INQUIRY == TRUE) {
+ strcpy(SD(sh)->vendor, "DPT");
+ strcpy(SD(sh)->name, "??????????");
+ strcpy(SD(sh)->revision, "???.?");
+ } else {
+ strncpy(SD(sh)->vendor, &buff[8], 8);
+ SD(sh)->vendor[8] = 0;
+ strncpy(SD(sh)->name, &buff[16], 17);
+ SD(sh)->name[17] = 0;
+ SD(sh)->revision[0] = buff[32];
+ SD(sh)->revision[1] = buff[33];
+ SD(sh)->revision[2] = buff[34];
+ SD(sh)->revision[3] = '.';
+ SD(sh)->revision[4] = buff[35];
+ SD(sh)->revision[5] = 0;
+ }
+
+ switch (ntohl(gc->len)) {
+ case 0x1c:
+ SD(sh)->EATA_revision = 'a';
+ break;
+ case 0x1e:
+ SD(sh)->EATA_revision = 'b';
+ break;
+ case 0x22:
+ SD(sh)->EATA_revision = 'c';
+ break;
+ case 0x24:
+ SD(sh)->EATA_revision = 'z';
+ default:
+ SD(sh)->EATA_revision = '?';
+ }
+
+ if(ntohl(gc->len) >= 0x22) {
+ if (gc->is_PCI == TRUE)
+ hd->bustype = IS_PCI;
+ else if (gc->is_EISA == TRUE)
+ hd->bustype = IS_EISA;
+ else
+ hd->bustype = IS_ISA;
+ } else if(hd->broken_INQUIRY == FALSE) {
+ if (buff[21] == '4')
+ hd->bustype = IS_PCI;
+ else if (buff[21] == '2')
+ hd->bustype = IS_EISA;
+ else
+ hd->bustype = IS_ISA;
+ } else
+ hd->bustype = bustype;
+
+ if(ntohl(gc->len) >= 0x22) {
+ sh->max_id = gc->MAX_ID + 1;
+ sh->max_lun = gc->MAX_LUN + 1;
+ } else {
+ sh->max_id = 8;
+ sh->max_lun = 8;
+ }
+
+ hd->channel = gc->MAX_CHAN;
+ sh->max_channel = gc->MAX_CHAN;
+ sh->unique_id = base;
+ sh->base = (char *) base;
+ sh->io_port = base;
+ sh->n_io_port = 9;
+ sh->irq = gc->IRQ;
+ sh->dma_channel = dma_channel;
+
+ /* FIXME:
+ * SCSI midlevel code should support different HBA ids on every channel
+ */
+ sh->this_id = gc->scsi_id[3];
+ sh->can_queue = ntohs(gc->queuesiz);
+
+ if (gc->OCS_enabled == TRUE) {
+ if(hd->bustype != IS_ISA)
+ sh->cmd_per_lun = sh->can_queue/C_P_L_DIV;
+ else
+ sh->cmd_per_lun = 8; /* We artificially limit this to conserve
+ * memory, which would be needed for ISA
+ * bounce buffers */
+ } else
+ sh->cmd_per_lun = 1;
+
+ /* FIXME:
+ * SG should be allocated more dynamically
+ */
+ /*
+ * If we are using a ISA board, we can't use extended SG,
+ * because we would need exessive amounts of memory for
+ * bounce buffers.
+ */
+ if (gc->SG_64K == TRUE && ntohs(gc->SGsiz) == 64 && hd->bustype != IS_ISA){
+ sh->sg_tablesize = SG_SIZE_BIG;
+ sh->use_clustering = FALSE;
+ } else {
+ sh->sg_tablesize = ntohs(gc->SGsiz);
+ sh->use_clustering = TRUE;
+ if (sh->sg_tablesize > SG_SIZE || sh->sg_tablesize == 0) {
+ sh->sg_tablesize = SG_SIZE;
+ if (ntohs(gc->SGsiz) == 0)
+ printk("Warning: SG size had to be corrected.\n"
+ "This might be a PM2012 with a defective Firmware\n");
+ }
+ }
+
+ if (gc->SECOND)
+ hd->primary = FALSE;
+ else
+ hd->primary = TRUE;
+
+ sh->wish_block = FALSE;
+
+ if (hd->bustype != IS_ISA) {
+ sh->unchecked_isa_dma = FALSE;
+ } else {
+ sh->unchecked_isa_dma = TRUE; /* We're doing ISA DMA */
+ }
+
+ for(x = 0; x <= 11; x++){ /* Initialize min. latency */
+ hd->writes_lat[x][1] = 0xffffffff;
+ hd->reads_lat[x][1] = 0xffffffff;
+ }
+
+ hd->next = NULL; /* build a linked list of all HBAs */
+ hd->prev = last_HBA;
+ if(hd->prev != NULL)
+ SD(hd->prev)->next = sh;
+ last_HBA = sh;
+ if (first_HBA == NULL)
+ first_HBA = sh;
+ registered_HBAs++;
+
+ return (TRUE);
+}
+
+
+void find_EISA(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+ u32 base;
+ int i;
+
+#if CHECKPAL
+ u8 pal1, pal2, pal3;
+#endif
+
+ for (i = 0; i < MAXEISA; i++) {
+ if (EISAbases[i] == TRUE) { /* Still a possibility ? */
+
+ base = 0x1c88 + (i * 0x1000);
+#if CHECKPAL
+ pal1 = inb((u16)base - 8);
+ pal2 = inb((u16)base - 7);
+ pal3 = inb((u16)base - 6);
+
+ if (((pal1 == DPT_ID1) && (pal2 == DPT_ID2)) ||
+ ((pal1 == NEC_ID1) && (pal2 == NEC_ID2) && (pal3 == NEC_ID3))||
+ ((pal1 == ATT_ID1) && (pal2 == ATT_ID2) && (pal3 == ATT_ID3))){
+ DBG(DBG_PROBE, printk("EISA EATA id tags found: %x %x %x \n",
+ (int)pal1, (int)pal2, (int)pal3));
+#endif
+ if (get_conf_PIO(base, buf) == TRUE) {
+ if (buf->IRQ) {
+ DBG(DBG_EISA, printk("Registering EISA HBA\n"));
+ register_HBA(base, buf, tpnt, IS_EISA);
+ } else
+ printk("eata_dma: No valid IRQ. HBA removed from list\n");
+ } else {
+ if (check_blink_state(base))
+ printk("HBA is in BLINK state. Consult your HBAs "
+ "Manual to correct this.\n");
+ }
+ /* Nothing found here so we take it from the list */
+ EISAbases[i] = 0;
+#if CHECKPAL
+ }
+#endif
+ }
+ }
+ return;
+}
+
+void find_ISA(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+ int i;
+
+ for (i = 0; i < MAXISA; i++) {
+ if (ISAbases[i]) {
+ if (get_conf_PIO(ISAbases[i],buf) == TRUE){
+ DBG(DBG_ISA, printk("Registering ISA HBA\n"));
+ register_HBA(ISAbases[i], buf, tpnt, IS_ISA);
+ } else {
+ if (check_blink_state(ISAbases[i]))
+ printk("HBA is in BLINK state. Consult your HBAs "
+ "Manual to correct this.\n");
+ }
+ ISAbases[i] = 0;
+ }
+ }
+ return;
+}
+
+void find_PCI(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+
+#ifndef CONFIG_PCI
+ printk("eata_dma: kernel PCI support not enabled. Skipping scan for PCI HBAs.\n");
+#else
+
+ u8 pci_bus, pci_device_fn;
+ static s16 pci_index = 0; /* Device index to PCI BIOS calls */
+ u32 base = 0;
+ u16 com_adr;
+ u16 rev_device;
+ u32 error, i, x;
+ u8 pal1, pal2, pal3;
+
+ if (pcibios_present()) {
+ for (i = 0; i <= MAXPCI; ++i, ++pci_index) {
+ if (pcibios_find_device(PCI_VENDOR_ID_DPT, PCI_DEVICE_ID_DPT,
+ pci_index, &pci_bus, &pci_device_fn))
+ break;
+ DBG(DBG_PROBE && DBG_PCI,
+ printk("eata_dma: find_PCI, HBA at bus %d, device %d,"
+ " function %d, index %d\n", (s32)pci_bus,
+ (s32)((pci_device_fn & 0xf8) >> 3),
+ (s32)(pci_device_fn & 7), pci_index));
+
+ if (!(error = pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_CLASS_DEVICE, &rev_device))) {
+ if (rev_device == PCI_CLASS_STORAGE_SCSI) {
+ if (!(error = pcibios_read_config_word(pci_bus,
+ pci_device_fn, PCI_COMMAND,
+ (u16 *) & com_adr))) {
+ if (!((com_adr & PCI_COMMAND_IO) &&
+ (com_adr & PCI_COMMAND_MASTER))) {
+ printk("eata_dma: find_PCI, HBA has IO or BUSMASTER mode disabled\n");
+ continue;
+ }
+ } else
+ printk("eata_dma: find_PCI, error %x while reading "
+ "PCI_COMMAND\n", error);
+ } else
+ printk("eata_dma: find_PCI, DEVICECLASSID %x didn't match\n",
+ rev_device);
+ } else {
+ printk("eata_dma: find_PCI, error %x while reading PCI_CLASS_BASE\n",
+ error);
+ continue;
+ }
+
+ if (!(error = pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, (int *) &base))){
+
+ /* Check if the address is valid */
+ if (base & 0x01) {
+ base &= 0xfffffffe;
+ /* EISA tag there ? */
+ pal1 = inb(base);
+ pal2 = inb(base + 1);
+ pal3 = inb(base + 2);
+ if (((pal1 == DPT_ID1) && (pal2 == DPT_ID2)) ||
+ ((pal1 == NEC_ID1) && (pal2 == NEC_ID2) &&
+ (pal3 == NEC_ID3)) ||
+ ((pal1 == ATT_ID1) && (pal2 == ATT_ID2) &&
+ (pal3 == ATT_ID3)))
+ base += 0x08;
+ else
+ base += 0x10; /* Now, THIS is the real address */
+
+ if (base != 0x1f8) {
+ /* We didn't find it in the primary search */
+ if (get_conf_PIO(base, buf) == TRUE) {
+
+ /* OK. We made it till here, so we can go now
+ * and register it. We only have to check and
+ * eventually remove it from the EISA and ISA list
+ */
+ DBG(DBG_PCI, printk("Registering PCI HBA\n"));
+ register_HBA(base, buf, tpnt, IS_PCI);
+
+ if (base < 0x1000) {
+ for (x = 0; x < MAXISA; ++x) {
+ if (ISAbases[x] == base) {
+ ISAbases[x] = 0;
+ break;
+ }
+ }
+ } else if ((base & 0x0fff) == 0x0c88)
+ EISAbases[(base >> 12) & 0x0f] = 0;
+ continue; /* break; */
+ } else if (check_blink_state(base) == TRUE) {
+ printk("eata_dma: HBA is in BLINK state.\n"
+ "Consult your HBAs Manual to correct this.\n");
+ }
+ }
+ }
+ } else {
+ printk("eata_dma: error %x while reading "
+ "PCI_BASE_ADDRESS_0\n", error);
+ }
+ }
+ } else {
+ printk("eata_dma: No BIOS32 extensions present. This driver release "
+ "still depends on it.\n"
+ " Skipping scan for PCI HBAs. \n");
+ }
+#endif /* #ifndef CONFIG_PCI */
+ return;
+}
+
+int eata_detect(Scsi_Host_Template * tpnt)
+{
+ struct Scsi_Host *HBA_ptr;
+ struct get_conf gc;
+ int i;
+
+ DBG((DBG_PROBE && DBG_DELAY) || DPT_DEBUG,
+ printk("Using lots of delays to let you read the debugging output\n"));
+
+ tpnt->proc_dir = &proc_scsi_eata_dma;
+
+ status = scsi_init_malloc(512, GFP_ATOMIC | GFP_DMA);
+ dma_scratch = scsi_init_malloc(512, GFP_ATOMIC | GFP_DMA);
+
+ if(status == NULL || dma_scratch == NULL) {
+ printk("eata_dma: can't allocate enough memory to probe for hosts !\n");
+ return(0);
+ }
+
+ find_PCI(&gc, tpnt);
+
+ find_EISA(&gc, tpnt);
+
+ find_ISA(&gc, tpnt);
+
+ for (i = 0; i <= MAXIRQ; i++) { /* Now that we know what we have, we */
+ if (reg_IRQ[i]){ /* exchange the interrupt handler which */
+ free_irq(i); /* we used for probing with the real one */
+ request_irq(i, (void *)(eata_int_handler), SA_INTERRUPT, "eata_dma");
+ }
+ }
+ HBA_ptr = first_HBA;
+
+ if (registered_HBAs != 0) {
+ printk("EATA (Extended Attachment) driver version: %d.%d%s\n"
+ "developed in co-operation with DPT\n"
+ "(c) 1993-95 Michael Neuffer, neuffer@goofy.zdv.uni-mainz.de\n",
+ VER_MAJOR, VER_MINOR, VER_SUB);
+ printk("Registered HBAs:");
+ printk("\nHBA no. Boardtype: Revis: EATA: Bus: BaseIO: IRQ: DMA: Ch: "
+ "ID: Pr: QS: SG: CPL:\n");
+ for (i = 1; i <= registered_HBAs; i++) {
+ printk("scsi%-2d: %.10s v%s 2.0%c %s %#.4x %2d",
+ HBA_ptr->host_no, SD(HBA_ptr)->name, SD(HBA_ptr)->revision,
+ SD(HBA_ptr)->EATA_revision, (SD(HBA_ptr)->bustype == 'P')?
+ "PCI ":(SD(HBA_ptr)->bustype == 'E')?"EISA":"ISA ",
+ (u32) HBA_ptr->base, HBA_ptr->irq);
+ if(HBA_ptr->dma_channel != BUSMASTER)
+ printk(" %2x ", HBA_ptr->dma_channel);
+ else
+ printk(" %s", "BMST");
+ printk(" %d %d %c %2d %2d %2d\n", SD(HBA_ptr)->channel,
+ HBA_ptr->this_id, (SD(HBA_ptr)->primary == TRUE)?'Y':'N',
+ HBA_ptr->can_queue, HBA_ptr->sg_tablesize, HBA_ptr->cmd_per_lun);
+ HBA_ptr = SD(HBA_ptr)->next;
+ }
+ } else {
+ scsi_init_free((void *)status, 512);
+ }
+
+ scsi_init_free((void *)dma_scratch, 512);
+
+ DBG(DPT_DEBUG, DELAY(12));
+
+ return(registered_HBAs);
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = EATA_DMA;
+#include "scsi_module.c"
+#endif
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/eata_dma.h b/i386/i386at/gpl/linux/scsi/eata_dma.h
new file mode 100644
index 00000000..41504673
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/eata_dma.h
@@ -0,0 +1,119 @@
+/********************************************************
+* Header file for eata_dma.c Linux EATA-DMA SCSI driver *
+* (c) 1993,94,95 Michael Neuffer *
+*********************************************************
+* last change: 95/07/18 *
+********************************************************/
+
+
+#ifndef _EATA_DMA_H
+#define _EATA_DMA_H
+
+#ifndef HOSTS_C
+
+#include "eata_generic.h"
+
+
+#define VER_MAJOR 2
+#define VER_MINOR 5
+#define VER_SUB "8a"
+
+
+/************************************************************************
+ * Here you can switch parts of the code on and of *
+ ************************************************************************/
+
+#define CHECKPAL 0 /* EISA pal checking on/off */
+#define NEWSTUFF 0 /* Some changes for ISA/EISA boards */
+
+/************************************************************************
+ * Debug options. *
+ * Enable DEBUG and whichever options you require. *
+ ************************************************************************/
+#define DEBUG_EATA 1 /* Enable debug code. */
+#define DPT_DEBUG 0 /* Bobs special */
+#define DBG_DELAY 0 /* Build in delays so debug messages can be
+ * be read before they vanish of the top of
+ * the screen! */
+#define DBG_PROBE 0 /* Debug probe routines. */
+#define DBG_PCI 0 /* Trace PCI routines */
+#define DBG_EISA 0 /* Trace EISA routines */
+#define DBG_ISA 0 /* Trace ISA routines */
+#define DBG_BLINK 0 /* Trace Blink check */
+#define DBG_PIO 0 /* Trace get_config_PIO */
+#define DBG_COM 0 /* Trace command call */
+#define DBG_QUEUE 0 /* Trace command queueing. */
+#define DBG_QUEUE2 0 /* Trace command queueing SG. */
+#define DBG_INTR 0 /* Trace interrupt service routine. */
+#define DBG_INTR2 0 /* Trace interrupt service routine. */
+#define DBG_INTR3 0 /* Trace get_board_data interrupts. */
+#define DBG_PROC 0 /* Debug proc-fs related statistics */
+#define DBG_PROC_WRITE 0
+#define DBG_REGISTER 0 /* */
+#define DBG_ABNORM 1 /* Debug abnormal actions (reset, abort)*/
+
+#if DEBUG_EATA
+#define DBG(x, y) if ((x)) {y;}
+#else
+#define DBG(x, y)
+#endif
+
+#endif /* !HOSTS_C */
+
+int eata_detect(Scsi_Host_Template *);
+const char *eata_info(struct Scsi_Host *);
+int eata_command(Scsi_Cmnd *);
+int eata_queue(Scsi_Cmnd *, void (* done)(Scsi_Cmnd *));
+int eata_abort(Scsi_Cmnd *);
+int eata_reset(Scsi_Cmnd *);
+int eata_proc_info(char *, char **, off_t, int, int, int);
+#ifdef MODULE
+int eata_release(struct Scsi_Host *);
+#else
+#define eata_release NULL
+#endif
+
+#include <linux/scsicam.h>
+
+#define EATA_DMA { \
+ NULL, NULL, \
+ NULL, /* proc_dir_entry */ \
+ eata_proc_info, /* procinfo */ \
+ "EATA (Extended Attachment) HBA driver", \
+ eata_detect, \
+ eata_release, \
+ NULL, NULL, \
+ eata_queue, \
+ eata_abort, \
+ eata_reset, \
+ NULL, /* Slave attach */ \
+ scsicam_bios_param, \
+ 0, /* Canqueue */ \
+ 0, /* this_id */ \
+ 0, /* sg_tablesize */ \
+ 0, /* cmd_per_lun */ \
+ 0, /* present */ \
+ 1, /* True if ISA */ \
+ ENABLE_CLUSTERING }
+
+
+#endif /* _EATA_DMA_H */
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/eata_dma_proc.h b/i386/i386at/gpl/linux/scsi/eata_dma_proc.h
new file mode 100644
index 00000000..d49f348e
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/eata_dma_proc.h
@@ -0,0 +1,260 @@
+
+struct lun_map {
+ __u8 id:5,
+ chan:3;
+ __u8 lun;
+};
+
+typedef struct emul_pp {
+ __u8 p_code:6,
+ null:1,
+ p_save:1;
+ __u8 p_length;
+ __u16 cylinder;
+ __u8 heads;
+ __u8 sectors;
+ __u8 null2;
+ __u8 s_lunmap:4,
+ ems:1;
+ __u16 drive_type; /* In Little Endian ! */
+ struct lun_map lunmap[4];
+}emulpp;
+
+
+/* Log Sense pages */
+
+typedef struct log_sheader {
+ __u8 page_code,
+ reserved;
+ __u16 length;
+}logsh;
+
+
+/* Log Sense Statistics */
+
+typedef struct read_command_statistics {
+ __u16 code; /* 0x01 */
+ __u8 flags;
+ __u8 length; /* 0x24 */
+ __u32 h_commands,
+ uncached,
+ la_cmds,
+ la_blks,
+ la_hits,
+ missed,
+ hits,
+ seq_la_blks,
+ seq_la_hits;
+}r_cmd_stat;
+
+typedef struct write_command_statistics {
+ __u16 code; /* 0x03 */
+ __u8 flags;
+ __u8 length; /* 0x28 */
+ __u32 h_commands,
+ uncached,
+ thru,
+ bypass,
+ soft_err,
+ hits,
+ b_idle,
+ b_activ,
+ b_blks,
+ b_blks_clean;
+}w_cmd_stat;
+
+typedef struct host_command_statistics {
+ __u16 code; /* 0x02, 0x04 */
+ __u8 flags;
+ __u8 length; /* 0x30 */
+ __u32 sizes[12];
+}hst_cmd_stat;
+
+typedef struct physical_command_statistics {
+ __u16 code; /* 0x06, 0x07 */
+ __u8 flags;
+ __u8 length; /* 0x34 */
+ __u32 sizes[13];
+}phy_cmd_stat;
+
+typedef struct misc_device_statistics {
+ __u16 code; /* 0x05 */
+ __u8 flags;
+ __u8 length; /* 0x10 */
+ __u32 disconnect,
+ pass_thru,
+ sg_commands,
+ stripe_boundary_crosses;
+}msc_stats;
+
+/* Configuration Pages */
+
+typedef struct controller_configuration {
+ __u16 code; /* 0x01 */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 intt:1,
+ sec:1,
+ csh:1,
+ key:1,
+ tmr:1,
+ srs:1,
+ nvr:1;
+ __u8 interrupt;
+}coco;
+
+typedef struct controller_hardware_errors {
+ __u16 code; /* 0x02 */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 unused:1,
+ per:1;
+ __u8 interrupt;
+}coher;
+
+typedef struct memory_map {
+ __u16 code; /* 0x03, 0x04 */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u32 memory_map;
+}mema;
+
+typedef struct scsi_transfer {
+ __u16 code; /* 0x05 */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u8 offset,
+ period;
+ __u16 speed;
+}scsitrans;
+
+typedef struct scsi_modes {
+ __u16 code; /* 0x06 */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 que:1,
+ cdis:1,
+ wtru:1,
+ dasd:1,
+ ncr:1,
+ awre:1;
+ __u8 reserved;
+}scsimod;
+
+typedef struct host_bus {
+ __u16 code; /* 0x07 */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 speed:6,
+ pci:1,
+ eisa:1;
+ __u8 reserved;
+}hobu;
+
+typedef struct scsi_bus {
+ __u16 code; /* 0x08 */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 speed:4,
+ res:1,
+ ext:1,
+ wide:1,
+ dif:1;
+ __u8 busnum;
+}scbu;
+
+typedef struct board_type {
+ __u16 code; /* 0x09 */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u8 unused:1,
+ cmi:1,
+ dmi:1,
+ cm4k:1,
+ cm4:1,
+ dm4k:1,
+ dm4:1,
+ hba:1;
+ __u8 cpu_type,
+ cpu_speed;
+ __u8 sx1:1,
+ sx2:1,
+ unused2:4,
+ alrm:1,
+ srom:1;
+}boty;
+
+typedef struct memory_config {
+ __u16 code; /* 0x0a */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u8 banksize[4];
+}memco;
+
+typedef struct firmware_info {
+ __u16 code; /* 0x0b */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u8 dnld:1,
+ bs528:1,
+ fmt:1,
+ fw528:1;
+ __u8 unused1,
+ fw_type,
+ unused;
+}firm;
+
+typedef struct subsystem_info {
+ __u16 code; /* 0x0c */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 shlf:1,
+ swap:1,
+ noss:1;
+ __u8 reserved;
+}subinf;
+
+typedef struct per_channel_info {
+ __u16 code; /* 0x0d */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 channel;
+ __u8 shlf:1,
+ swap:1,
+ noss:1,
+ srs:1,
+ que:1,
+ ext:1,
+ wide:1,
+ diff:1;
+}pcinf;
+
+typedef struct array_limits {
+ __u16 code; /* 0x0e */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u8 max_groups,
+ raid0_drv,
+ raid35_drv,
+ unused;
+}arrlim;
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
+
diff --git a/i386/i386at/gpl/linux/scsi/eata_dma_proc.src b/i386/i386at/gpl/linux/scsi/eata_dma_proc.src
new file mode 100644
index 00000000..b4936a67
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/eata_dma_proc.src
@@ -0,0 +1,488 @@
+
+void swap_statistics(u8 *p)
+{
+ u32 y;
+ u32 *lp, h_lp;
+ u16 *sp, h_sp;
+ u8 *bp;
+
+ lp = (u32 *)p;
+ sp = ((short *)lp) + 1; /* Convert Header */
+ h_sp = *sp = ntohs(*sp);
+ lp++;
+
+ do {
+ sp = (u16 *)lp; /* Convert SubHeader */
+ *sp = ntohs(*sp);
+ bp = (u8 *) lp;
+ y = *(bp + 3);
+ lp++;
+ for (h_lp = (u32)lp; (u32)lp < h_lp + ((u32)*(bp + 3)); lp++)
+ *lp = ntohl(*lp);
+ }while ((u32)lp < ((u32)p) + 4 + h_sp);
+
+}
+
+/*
+ * eata_set_info
+ * buffer : pointer to the data that has been written to the hostfile
+ * length : number of bytes written to the hostfile
+ * HBA_ptr: pointer to the Scsi_Host struct
+ */
+int eata_set_info(char *buffer, int length, struct Scsi_Host *HBA_ptr)
+{
+ int orig_length = length;
+
+ if (length >= 8 && strncmp(buffer, "eata_dma", 8) == 0) {
+ buffer += 9;
+ length -= 9;
+ if(length >= 8 && strncmp(buffer, "latency", 7) == 0) {
+ SD(HBA_ptr)->do_latency = TRUE;
+ return(orig_length);
+ }
+
+ if(length >=10 && strncmp(buffer, "nolatency", 9) == 0) {
+ SD(HBA_ptr)->do_latency = FALSE;
+ return(orig_length);
+ }
+
+ printk("Unknown command:%s length: %d\n", buffer, length);
+ } else
+ printk("Wrong Signature:%10s\n", buffer);
+
+ return(-EINVAL);
+}
+
+/*
+ * eata_proc_info
+ * inout : decides on the direction of the dataflow and the meaning of the
+ * variables
+ * buffer: If inout==FALSE data is beeing written to it else read from it
+ * *start: If inout==FALSE start of the valid data in the buffer
+ * offset: If inout==FALSE offset from the beginning of the imaginary file
+ * from which we start writing into the buffer
+ * length: If inout==FALSE max number of bytes to be written into the buffer
+ * else number of bytes in the buffer
+ */
+int eata_proc_info(char *buffer, char **start, off_t offset, int length,
+ int hostno, int inout)
+{
+
+ Scsi_Device *scd, SDev;
+ struct Scsi_Host *HBA_ptr;
+ Scsi_Cmnd scmd;
+ char cmnd[10];
+ static u8 buff[512];
+ static u8 buff2[512];
+ hst_cmd_stat *rhcs, *whcs;
+ coco *cc;
+ scsitrans *st;
+ scsimod *sm;
+ hobu *hb;
+ scbu *sb;
+ boty *bt;
+ memco *mc;
+ firm *fm;
+ subinf *si;
+ pcinf *pi;
+ arrlim *al;
+ int i, x;
+ int size, len = 0;
+ off_t begin = 0;
+ off_t pos = 0;
+ scd = NULL;
+
+ HBA_ptr = first_HBA;
+ for (i = 1; i <= registered_HBAs; i++) {
+ if (HBA_ptr->host_no == hostno)
+ break;
+ HBA_ptr = SD(HBA_ptr)->next;
+ }
+
+ if(inout == TRUE) /* Has data been writen to the file ? */
+ return(eata_set_info(buffer, length, HBA_ptr));
+
+ if (offset == 0)
+ memset(buff, 0, sizeof(buff));
+
+ cc = (coco *) (buff + 0x148);
+ st = (scsitrans *)(buff + 0x164);
+ sm = (scsimod *) (buff + 0x16c);
+ hb = (hobu *) (buff + 0x172);
+ sb = (scbu *) (buff + 0x178);
+ bt = (boty *) (buff + 0x17e);
+ mc = (memco *) (buff + 0x186);
+ fm = (firm *) (buff + 0x18e);
+ si = (subinf *) (buff + 0x196);
+ pi = (pcinf *) (buff + 0x19c);
+ al = (arrlim *) (buff + 0x1a2);
+
+ size = sprintf(buffer+len, "EATA (Extended Attachment) driver version: "
+ "%d.%d%s\n",VER_MAJOR, VER_MINOR, VER_SUB);
+ len += size; pos = begin + len;
+ size = sprintf(buffer + len, "queued commands: %10ld\n"
+ "processed interrupts:%10ld\n", queue_counter, int_counter);
+ len += size; pos = begin + len;
+
+ size = sprintf(buffer + len, "\nscsi%-2d: HBA %.10s\n",
+ HBA_ptr->host_no, SD(HBA_ptr)->name);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Firmware revision: v%s\n",
+ SD(HBA_ptr)->revision);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Hardware Configuration:\n");
+ len += size;
+ pos = begin + len;
+
+ if(SD(HBA_ptr)->broken_INQUIRY == TRUE) {
+ if (HBA_ptr->dma_channel == BUSMASTER)
+ size = sprintf(buffer + len, "DMA: BUSMASTER\n");
+ else
+ size = sprintf(buffer + len, "DMA: %d\n", HBA_ptr->dma_channel);
+ len += size;
+ pos = begin + len;
+
+ size = sprintf(buffer + len, "Base IO : %#.4x\n", (u32) HBA_ptr->base);
+ len += size;
+ pos = begin + len;
+
+ size = sprintf(buffer + len, "Host Bus: EISA\n");
+ len += size;
+ pos = begin + len;
+
+ } else {
+ memset(&SDev, 0, sizeof(Scsi_Device));
+ memset(&scmd, 0, sizeof(Scsi_Cmnd));
+
+ SDev.host = HBA_ptr;
+ SDev.id = HBA_ptr->this_id;
+ SDev.lun = 0;
+ SDev.channel = 0;
+
+ cmnd[0] = LOG_SENSE;
+ cmnd[1] = 0;
+ cmnd[2] = 0x33 + (3<<6);
+ cmnd[3] = 0;
+ cmnd[4] = 0;
+ cmnd[5] = 0;
+ cmnd[6] = 0;
+ cmnd[7] = 0x00;
+ cmnd[8] = 0x66;
+ cmnd[9] = 0;
+
+ scmd.cmd_len = 10;
+
+ scmd.host = HBA_ptr;
+ scmd.device = &SDev;
+ scmd.target = HBA_ptr->this_id;
+ scmd.lun = 0;
+ scmd.channel = 0;
+ scmd.use_sg = 0;
+
+ /*
+ * Do the command and wait for it to finish.
+ */
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ scmd.request.rq_status = RQ_SCSI_BUSY;
+ scmd.request.sem = &sem;
+ scsi_do_cmd (&scmd, cmnd, buff + 0x144, 0x66,
+ eata_scsi_done, 1 * HZ, 1);
+ down(&sem);
+ }
+
+ size = sprintf(buffer + len, "IRQ: %2d, %s triggered\n", cc->interrupt,
+ (cc->intt == TRUE)?"level":"edge");
+ len += size;
+ pos = begin + len;
+ if (HBA_ptr->dma_channel == 0xff)
+ size = sprintf(buffer + len, "DMA: BUSMASTER\n");
+ else
+ size = sprintf(buffer + len, "DMA: %d\n", HBA_ptr->dma_channel);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "CPU: MC680%02d %dMHz\n", bt->cpu_type,
+ bt->cpu_speed);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Base IO : %#.4x\n", (u32) HBA_ptr->base);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Host Bus: %s\n",
+ (SD(HBA_ptr)->bustype == IS_PCI)?"PCI ":
+ (SD(HBA_ptr)->bustype == IS_EISA)?"EISA":"ISA ");
+
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "SCSI Bus:%s%s Speed: %sMB/sec. %s\n",
+ (sb->wide == TRUE)?" WIDE":"",
+ (sb->dif == TRUE)?" DIFFERENTIAL":"",
+ (sb->speed == 0)?"5":(sb->speed == 1)?"10":"20",
+ (sb->ext == TRUE)?"With external cable detection":"");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "SCSI channel expansion Module: %s present\n",
+ (bt->sx1 == TRUE)?"SX1 (one channel)":
+ ((bt->sx2 == TRUE)?"SX2 (two channels)":"not"));
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "SmartRAID hardware: %spresent.\n",
+ (cc->srs == TRUE)?"":"not ");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, " Type: %s\n",
+ ((cc->key == TRUE)?((bt->dmi == TRUE)?"integrated"
+ :((bt->dm4 == TRUE)?"DM401X"
+ :(bt->dm4k == TRUE)?"DM4000"
+ :"-"))
+ :"-"));
+ len += size;
+ pos = begin + len;
+
+ size = sprintf(buffer + len, " Max array groups: %d\n",
+ (al->code == 0x0e)?al->max_groups:7);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, " Max drives per RAID 0 array: %d\n",
+ (al->code == 0x0e)?al->raid0_drv:7);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, " Max drives per RAID 3/5 array: %d\n",
+ (al->code == 0x0e)?al->raid35_drv:7);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Cache Module: %spresent.\n",
+ (cc->csh)?"":"not ");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, " Type: %s\n",
+ ((cc->csh == TRUE)?((bt->cmi == TRUE)?"integrated"
+ :((bt->cm4 == TRUE)?"CM401X"
+ :((bt->cm4k == TRUE)?"CM4000"
+ :"-")))
+ :"-"));
+ len += size;
+ pos = begin + len;
+ for (x = 0; x <= 3; x++) {
+ size = sprintf(buffer + len, " Bank%d: %dMB with%s ECC\n",x,
+ mc->banksize[x] & 0x7f,
+ (mc->banksize[x] & 0x80)?"":"out");
+ len += size;
+ pos = begin + len;
+ }
+ size = sprintf(buffer + len, "Timer Mod.: %spresent\n",
+ (cc->tmr == TRUE)?"":"not ");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "NVRAM : %spresent\n",
+ (cc->nvr == TRUE)?"":"not ");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "SmartROM : %sabled\n",
+ (bt->srom == TRUE)?"dis":"en");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Alarm : %s\n",
+ (bt->alrm == TRUE)?"on":"off");
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+
+ cmnd[0] = LOG_SENSE;
+ cmnd[1] = 0;
+ cmnd[2] = 0x32 + (3<<6);
+ cmnd[3] = 0;
+ cmnd[4] = 0;
+ cmnd[5] = 0;
+ cmnd[6] = 0;
+ cmnd[7] = 0x01;
+ cmnd[8] = 0x44;
+ cmnd[9] = 0;
+
+ scmd.cmd_len = 10;
+
+ /*
+ * Do the command and wait for it to finish.
+ */
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ scmd.request.rq_status = RQ_SCSI_BUSY;
+ scmd.request.sem = &sem;
+ scsi_do_cmd (&scmd, cmnd, buff2, 0x144,
+ eata_scsi_done, 1 * HZ, 1);
+ down(&sem);
+ }
+
+ swap_statistics(buff2);
+ rhcs = (hst_cmd_stat *)(buff2 + 0x2c);
+ whcs = (hst_cmd_stat *)(buff2 + 0x8c);
+
+ for (x = 0; x <= 11; x++) {
+ SD(HBA_ptr)->reads[x] += rhcs->sizes[x];
+ SD(HBA_ptr)->writes[x] += whcs->sizes[x];
+ SD(HBA_ptr)->reads[12] += rhcs->sizes[x];
+ SD(HBA_ptr)->writes[12] += whcs->sizes[x];
+ }
+ size = sprintf(buffer + len, "Host<->Disk command statistics:\n"
+ " Reads: Writes:\n");
+ len += size;
+ pos = begin + len;
+ for (x = 0; x <= 10; x++) {
+ size = sprintf(buffer+len,"%5dk:%12u %12u\n", 1 << x,
+ SD(HBA_ptr)->reads[x],
+ SD(HBA_ptr)->writes[x]);
+ len += size;
+ pos = begin + len;
+ }
+ size = sprintf(buffer+len,">1024k:%12u %12u\n",
+ SD(HBA_ptr)->reads[11],
+ SD(HBA_ptr)->writes[11]);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer+len,"Sum :%12u %12u\n",
+ SD(HBA_ptr)->reads[12],
+ SD(HBA_ptr)->writes[12]);
+ len += size;
+ pos = begin + len;
+ }
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+
+ if(SD(HBA_ptr)->do_latency == TRUE) {
+ size = sprintf(buffer + len, "Host Latency Command Statistics:\n"
+ "Current timer resolution: 10ms\n"
+ " Reads: Min:(ms) Max:(ms) Ave:(ms)\n");
+ len += size;
+ pos = begin + len;
+ for (x = 0; x <= 10; x++) {
+ size = sprintf(buffer+len,"%5dk:%12u %12u %12u %12u\n",
+ 1 << x,
+ SD(HBA_ptr)->reads_lat[x][0],
+ (SD(HBA_ptr)->reads_lat[x][1] == 0xffffffff)
+ ? 0:(SD(HBA_ptr)->reads_lat[x][1] * 10),
+ SD(HBA_ptr)->reads_lat[x][2] * 10,
+ SD(HBA_ptr)->reads_lat[x][3] * 10 /
+ ((SD(HBA_ptr)->reads_lat[x][0])
+ ? SD(HBA_ptr)->reads_lat[x][0]:1));
+ len += size;
+ pos = begin + len;
+ }
+ size = sprintf(buffer+len,">1024k:%12u %12u %12u %12u\n",
+ SD(HBA_ptr)->reads_lat[11][0],
+ (SD(HBA_ptr)->reads_lat[11][1] == 0xffffffff)
+ ? 0:(SD(HBA_ptr)->reads_lat[11][1] * 10),
+ SD(HBA_ptr)->reads_lat[11][2] * 10,
+ SD(HBA_ptr)->reads_lat[11][3] * 10 /
+ ((SD(HBA_ptr)->reads_lat[x][0])
+ ? SD(HBA_ptr)->reads_lat[x][0]:1));
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+
+ size = sprintf(buffer + len,
+ " Writes: Min:(ms) Max:(ms) Ave:(ms)\n");
+ len += size;
+ pos = begin + len;
+ for (x = 0; x <= 10; x++) {
+ size = sprintf(buffer+len,"%5dk:%12u %12u %12u %12u\n",
+ 1 << x,
+ SD(HBA_ptr)->writes_lat[x][0],
+ (SD(HBA_ptr)->writes_lat[x][1] == 0xffffffff)
+ ? 0:(SD(HBA_ptr)->writes_lat[x][1] * 10),
+ SD(HBA_ptr)->writes_lat[x][2] * 10,
+ SD(HBA_ptr)->writes_lat[x][3] * 10 /
+ ((SD(HBA_ptr)->writes_lat[x][0])
+ ? SD(HBA_ptr)->writes_lat[x][0]:1));
+ len += size;
+ pos = begin + len;
+ }
+ size = sprintf(buffer+len,">1024k:%12u %12u %12u %12u\n",
+ SD(HBA_ptr)->writes_lat[11][0],
+ (SD(HBA_ptr)->writes_lat[11][1] == 0xffffffff)
+ ? 0:(SD(HBA_ptr)->writes_lat[x][1] * 10),
+ SD(HBA_ptr)->writes_lat[11][2] * 10,
+ SD(HBA_ptr)->writes_lat[11][3] * 10/
+ ((SD(HBA_ptr)->writes_lat[x][0])
+ ? SD(HBA_ptr)->writes_lat[x][0]:1));
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+ }
+
+#if 0
+ scd = scsi_devices;
+
+ size = sprintf(buffer+len,"Attached devices: %s\n", (scd)?"":"none");
+ len += size;
+ pos = begin + len;
+
+ while (scd) {
+ if (scd->host == HBA_ptr) {
+ proc_print_scsidevice(scd, buffer, &size, len);
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+ }
+ scd = scd->next;
+ }
+#endif
+
+ stop_output:
+ DBG(DBG_PROC, printk("2pos: %ld offset: %ld len: %d\n", pos, offset, len));
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len = length; /* Ending slop */
+ DBG(DBG_PROC, printk("3pos: %ld offset: %ld len: %d\n", pos, offset, len));
+
+ return (len);
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/eata_generic.h b/i386/i386at/gpl/linux/scsi/eata_generic.h
new file mode 100644
index 00000000..4d9fc497
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/eata_generic.h
@@ -0,0 +1,397 @@
+/********************************************************
+* Header file for eata_dma.c and eata_pio.c *
+* Linux EATA SCSI drivers *
+* (c) 1993,94,95 Michael Neuffer *
+*********************************************************
+* last change: 95/11/07 *
+********************************************************/
+
+
+#ifndef _EATA_GENERIC_H
+#define _EATA_GENERIC_H
+
+
+
+/*********************************************
+ * Misc. definitions *
+ *********************************************/
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#define min(a,b) ((a<b)?(a):(b))
+
+#define R_LIMIT 0x20000
+
+#define MAXISA 4
+#define MAXEISA 16
+#define MAXPCI 16
+#define MAXIRQ 16
+#define MAXTARGET 16
+#define MAXCHANNEL 3
+
+#define IS_ISA 'I'
+#define IS_EISA 'E'
+#define IS_PCI 'P'
+
+#define BROKEN_INQUIRY 1
+
+#define BUSMASTER 0xff
+#define PIO 0xfe
+
+#define EATA_SIGNATURE 0x45415441 /* BIG ENDIAN coded "EATA" sig. */
+
+#define DPT_ID1 0x12
+#define DPT_ID2 0x14
+
+#define ATT_ID1 0x06
+#define ATT_ID2 0x94
+#define ATT_ID3 0x0
+
+#define NEC_ID1 0x38
+#define NEC_ID2 0xa3
+#define NEC_ID3 0x82
+
+
+#define EATA_CP_SIZE 44
+
+#define MAX_PCI_DEVICES 32 /* Maximum # Of Devices Per Bus */
+#define MAX_METHOD_2 16 /* Max Devices For Method 2 */
+#define MAX_PCI_BUS 16 /* Maximum # Of Busses Allowed */
+
+#define SG_SIZE 64
+#define SG_SIZE_BIG 509 /* max. 509 elements, one 4k page */
+
+#define C_P_L_DIV 2 /* 1 <= C_P_L_DIV <= 8
+ * You can use this parameter to fine-tune
+ * the driver. Depending on the number of
+ * devices and their speed and ability to queue
+ * commands, you will get the best results with a
+ * value
+ * ~= numdevices-(devices_unable_to_queue_commands/2)
+ * The reason for this is that the disk driver
+ * tends to flood the queue, so that other
+ * drivers have problems to queue commands
+ * themselves. This can for example result in
+ * the effect that the tape stops during disk
+ * accesses.
+ */
+
+#define FREE 0
+#define OK 0
+#define NO_TIMEOUT 0
+#define USED 1
+#define TIMEOUT 2
+#define RESET 4
+#define LOCKED 8
+
+#define HD(cmd) ((hostdata *)&(cmd->host->hostdata))
+#define CD(cmd) ((struct eata_ccb *)(cmd->host_scribble))
+#define SD(host) ((hostdata *)&(host->hostdata))
+
+#define DELAY(x) { __u32 i; i = jiffies + (x * HZ); while (jiffies < i) barrier(); }
+#define DEL2(x) { __u32 i; for (i = 0; i < 0xffff * x; i++); }
+
+/***********************************************
+ * EATA Command & Register definitions *
+ ***********************************************/
+#define PCI_REG_DPTconfig 0x40
+#define PCI_REG_PumpModeAddress 0x44
+#define PCI_REG_PumpModeData 0x48
+#define PCI_REG_ConfigParam1 0x50
+#define PCI_REG_ConfigParam2 0x54
+
+
+#define EATA_CMD_PIO_SETUPTEST 0xc6
+#define EATA_CMD_PIO_READ_CONFIG 0xf0
+#define EATA_CMD_PIO_SET_CONFIG 0xf1
+#define EATA_CMD_PIO_SEND_CP 0xf2
+#define EATA_CMD_PIO_RECEIVE_SP 0xf3
+#define EATA_CMD_PIO_TRUNC 0xf4
+
+#define EATA_CMD_RESET 0xf9
+#define EATA_CMD_IMMEDIATE 0xfa
+
+#define EATA_CMD_DMA_READ_CONFIG 0xfd
+#define EATA_CMD_DMA_SET_CONFIG 0xfe
+#define EATA_CMD_DMA_SEND_CP 0xff
+
+#define ECS_EMULATE_SENSE 0xd4
+
+#define EATA_GENERIC_ABORT 0x00
+#define EATA_SPECIFIC_RESET 0x01
+#define EATA_BUS_RESET 0x02
+#define EATA_SPECIFIC_ABORT 0x03
+#define EATA_QUIET_INTR 0x04
+#define EATA_COLD_BOOT_HBA 0x06 /* Only as a last resort */
+#define EATA_FORCE_IO 0x07
+
+
+#define HA_WCOMMAND 0x07 /* command register offset */
+#define HA_WCOMMAND2 0x06 /* immediate command offset */
+#define HA_WSUBCODE 0x05
+#define HA_WSUBLUN 0x04
+#define HA_WDMAADDR 0x02 /* DMA address LSB offset */
+#define HA_RAUXSTAT 0x08 /* aux status register offset*/
+#define HA_RSTATUS 0x07 /* status register offset */
+#define HA_RDATA 0x00 /* data register (16bit) */
+
+#define HA_ABUSY 0x01 /* aux busy bit */
+#define HA_AIRQ 0x02 /* aux IRQ pending bit */
+#define HA_SERROR 0x01 /* pr. command ended in error*/
+#define HA_SMORE 0x02 /* more data soon to come */
+#define HA_SCORR 0x04 /* data corrected */
+#define HA_SDRQ 0x08 /* data request active */
+#define HA_SSC 0x10 /* seek complete */
+#define HA_SFAULT 0x20 /* write fault */
+#define HA_SREADY 0x40 /* drive ready */
+#define HA_SBUSY 0x80 /* drive busy */
+#define HA_SDRDY HA_SSC+HA_SREADY+HA_SDRQ
+
+/**********************************************
+ * Message definitions *
+ **********************************************/
+
+#define HA_NO_ERROR 0x00 /* No Error */
+#define HA_ERR_SEL_TO 0x01 /* Selection Timeout */
+#define HA_ERR_CMD_TO 0x02 /* Command Timeout */
+#define HA_ERR_RESET 0x03 /* SCSI Bus Reset Received */
+#define HA_INIT_POWERUP 0x04 /* Initial Controller Power-up */
+#define HA_UNX_BUSPHASE 0x05 /* Unexpected Bus Phase */
+#define HA_UNX_BUS_FREE 0x06 /* Unexpected Bus Free */
+#define HA_BUS_PARITY 0x07 /* Bus Parity Error */
+#define HA_SCSI_HUNG 0x08 /* SCSI Hung */
+#define HA_UNX_MSGRJCT 0x09 /* Unexpected Message Rejected */
+#define HA_RESET_STUCK 0x0a /* SCSI Bus Reset Stuck */
+#define HA_RSENSE_FAIL 0x0b /* Auto Request-Sense Failed */
+#define HA_PARITY_ERR 0x0c /* Controller Ram Parity Error */
+#define HA_CP_ABORT_NA 0x0d /* Abort Message sent to non-active cmd */
+#define HA_CP_ABORTED 0x0e /* Abort Message sent to active cmd */
+#define HA_CP_RESET_NA 0x0f /* Reset Message sent to non-active cmd */
+#define HA_CP_RESET 0x10 /* Reset Message sent to active cmd */
+#define HA_ECC_ERR 0x11 /* Controller Ram ECC Error */
+#define HA_PCI_PARITY 0x12 /* PCI Parity Error */
+#define HA_PCI_MABORT 0x13 /* PCI Master Abort */
+#define HA_PCI_TABORT 0x14 /* PCI Target Abort */
+#define HA_PCI_STABORT 0x15 /* PCI Signaled Target Abort */
+
+/**********************************************
+ * Other definitions *
+ **********************************************/
+
+struct reg_bit { /* reading this one will clear the interrupt */
+ __u8 error:1; /* previous command ended in an error */
+ __u8 more:1; /* more DATA coming soon, poll BSY & DRQ (PIO) */
+ __u8 corr:1; /* data read was successfully corrected with ECC*/
+ __u8 drq:1; /* data request active */
+ __u8 sc:1; /* seek complete */
+ __u8 fault:1; /* write fault */
+ __u8 ready:1; /* drive ready */
+ __u8 busy:1; /* controller busy */
+};
+
+struct reg_abit { /* reading this won't clear the interrupt */
+ __u8 abusy:1; /* auxiliary busy */
+ __u8 irq:1; /* set when drive interrupt is asserted */
+ __u8 dummy:6;
+};
+
+struct eata_register { /* EATA register set */
+ __u8 data_reg[2]; /* R, couldn't figure this one out */
+ __u8 cp_addr[4]; /* W, CP address register */
+ union {
+ __u8 command; /* W, command code: [read|set] conf, send CP*/
+ struct reg_bit status; /* R, see register_bit1 */
+ __u8 statusbyte;
+ } ovr;
+ struct reg_abit aux_stat; /* R, see register_bit2 */
+};
+
+struct get_conf { /* Read Configuration Array */
+ __u32 len; /* Should return 0x22, 0x24, etc */
+ __u32 signature; /* Signature MUST be "EATA" */
+ __u8 version2:4,
+ version:4; /* EATA Version level */
+ __u8 OCS_enabled:1, /* Overlap Command Support enabled */
+ TAR_support:1, /* SCSI Target Mode supported */
+ TRNXFR:1, /* Truncate Transfer Cmd not necessary *
+ * Only used in PIO Mode */
+ MORE_support:1, /* MORE supported (only PIO Mode) */
+ DMA_support:1, /* DMA supported Driver uses only *
+ * this mode */
+ DMA_valid:1, /* DRQ value in Byte 30 is valid */
+ ATA:1, /* ATA device connected (not supported) */
+ HAA_valid:1; /* Hostadapter Address is valid */
+
+ __u16 cppadlen; /* Number of pad bytes send after CD data *
+ * set to zero for DMA commands */
+ __u8 scsi_id[4]; /* SCSI ID of controller 2-0 Byte 0 res. *
+ * if not, zero is returned */
+ __u32 cplen; /* CP length: number of valid cp bytes */
+ __u32 splen; /* Number of bytes returned after *
+ * Receive SP command */
+ __u16 queuesiz; /* max number of queueable CPs */
+ __u16 dummy;
+ __u16 SGsiz; /* max number of SG table entries */
+ __u8 IRQ:4, /* IRQ used this HA */
+ IRQ_TR:1, /* IRQ Trigger: 0=edge, 1=level */
+ SECOND:1, /* This is a secondary controller */
+ DMA_channel:2; /* DRQ index, DRQ is 2comp of DRQX */
+ __u8 sync; /* device at ID 7 tru 0 is running in *
+ * synchronous mode, this will disappear */
+ __u8 DSBLE:1, /* ISA i/o addressing is disabled */
+ FORCADR:1, /* i/o address has been forced */
+ SG_64K:1,
+ SG_UAE:1,
+ :4;
+ __u8 MAX_ID:5, /* Max number of SCSI target IDs */
+ MAX_CHAN:3; /* Number of SCSI busses on HBA */
+ __u8 MAX_LUN; /* Max number of LUNs */
+ __u8 :3,
+ AUTOTRM:1,
+ M1_inst:1,
+ ID_qest:1, /* Raidnum ID is questionable */
+ is_PCI:1, /* HBA is PCI */
+ is_EISA:1; /* HBA is EISA */
+ __u8 unused[478];
+};
+
+struct eata_sg_list
+{
+ __u32 data;
+ __u32 len;
+};
+
+struct eata_ccb { /* Send Command Packet structure */
+
+ __u8 SCSI_Reset:1, /* Cause a SCSI Bus reset on the cmd */
+ HBA_Init:1, /* Cause Controller to reinitialize */
+ Auto_Req_Sen:1, /* Do Auto Request Sense on errors */
+ scatter:1, /* Data Ptr points to a SG Packet */
+ Resrvd:1, /* RFU */
+ Interpret:1, /* Interpret the SCSI cdb of own use */
+ DataOut:1, /* Data Out phase with command */
+ DataIn:1; /* Data In phase with command */
+ __u8 reqlen; /* Request Sense Length *
+ * Valid if Auto_Req_Sen=1 */
+ __u8 unused[3];
+ __u8 FWNEST:1, /* send cmd to phys RAID component */
+ unused2:7;
+ __u8 Phsunit:1, /* physical unit on mirrored pair */
+ I_AT:1, /* inhibit address translation */
+ I_HBA_C:1, /* HBA inhibit caching */
+ unused3:5;
+
+ __u8 cp_id:5, /* SCSI Device ID of target */
+ cp_channel:3; /* SCSI Channel # of HBA */
+ __u8 cp_lun:3,
+ :2,
+ cp_luntar:1, /* CP is for target ROUTINE */
+ cp_dispri:1, /* Grant disconnect privilege */
+ cp_identify:1; /* Always TRUE */
+ __u8 cp_msg1; /* Message bytes 0-3 */
+ __u8 cp_msg2;
+ __u8 cp_msg3;
+ __u8 cp_cdb[12]; /* Command Descriptor Block */
+ __u32 cp_datalen; /* Data Transfer Length *
+ * If scatter=1 len of sg package */
+ void *cp_viraddr; /* address of this ccb */
+ __u32 cp_dataDMA; /* Data Address, if scatter=1 *
+ * address of scatter packet */
+ __u32 cp_statDMA; /* address for Status Packet */
+ __u32 cp_reqDMA; /* Request Sense Address, used if *
+ * CP command ends with error */
+ /* Additional CP info begins here */
+ __u32 timestamp; /* Needed to measure command latency */
+ __u32 timeout;
+ __u8 sizeindex;
+ __u8 rw_latency;
+ __u8 retries;
+ __u8 status; /* status of this queueslot */
+ Scsi_Cmnd *cmd; /* address of cmd */
+ struct eata_sg_list *sg_list;
+};
+
+
+struct eata_sp {
+ __u8 hba_stat:7, /* HBA status */
+ EOC:1; /* True if command finished */
+ __u8 scsi_stat; /* Target SCSI status */
+ __u8 reserved[2];
+ __u32 residue_len; /* Number of bytes not transferred */
+ struct eata_ccb *ccb; /* Address set in COMMAND PACKET */
+ __u8 msg[12];
+};
+
+typedef struct hstd {
+ __u8 vendor[9];
+ __u8 name[18];
+ __u8 revision[6];
+ __u8 EATA_revision;
+ __u8 bustype; /* bustype of HBA */
+ __u8 channel; /* # of avail. scsi channels */
+ __u8 state; /* state of HBA */
+ __u8 primary; /* true if primary */
+ __u8 broken_INQUIRY:1; /* This is an EISA HBA with *
+ * broken INQUIRY */
+ __u8 do_latency; /* Latency measurement flag */
+ __u32 reads[13];
+ __u32 writes[13];
+ __u32 reads_lat[12][4];
+ __u32 writes_lat[12][4];
+ /* state of Target (RESET,..) */
+ __u8 t_state[MAXCHANNEL][MAXTARGET];
+ /* timeouts on target */
+ __u32 t_timeout[MAXCHANNEL][MAXTARGET];
+ __u32 last_ccb; /* Last used ccb */
+ __u32 cplen; /* size of CP in words */
+ __u16 cppadlen; /* pad length of cp in words */
+ __u8 hostid; /* SCSI ID of HBA */
+ __u8 devflags; /* bits set for detected devices */
+ __u8 moresupport; /* HBA supports MORE flag */
+ struct Scsi_Host *next;
+ struct Scsi_Host *prev;
+ struct eata_sp sp; /* status packet */
+ struct eata_ccb ccb[0]; /* ccb array begins here */
+}hostdata;
+
+/* structure for max. 2 emulated drives */
+struct drive_geom_emul {
+ __u8 trans; /* translation flag 1=transl */
+ __u8 channel; /* SCSI channel number */
+ __u8 HBA; /* HBA number (prim/sec) */
+ __u8 id; /* drive id */
+ __u8 lun; /* drive lun */
+ __u32 heads; /* number of heads */
+ __u32 sectors; /* number of sectors */
+ __u32 cylinder; /* number of cylinders */
+};
+
+struct geom_emul {
+ __u8 bios_drives; /* number of emulated drives */
+ struct drive_geom_emul drv[2]; /* drive structures */
+};
+
+#endif /* _EATA_GENERIC_H */
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/eata_pio.c b/i386/i386at/gpl/linux/scsi/eata_pio.c
new file mode 100644
index 00000000..95248ae7
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/eata_pio.c
@@ -0,0 +1,1051 @@
+/************************************************************
+ * *
+ * Linux EATA SCSI PIO driver *
+ * *
+ * based on the CAM document CAM/89-004 rev. 2.0c, *
+ * DPT's driver kit, some internal documents and source, *
+ * and several other Linux scsi drivers and kernel docs. *
+ * *
+ * The driver currently: *
+ * -supports all EATA-PIO boards *
+ * -only supports DASD devices *
+ * *
+ * (c)1993,94,95 Michael Neuffer, Alfred Arnold *
+ * neuffer@goofy.zdv.uni-mainz.de *
+ * a.arnold@kfa-juelich.de *
+ * *
+ * This program is free software; you can redistribute it *
+ * and/or modify it under the terms of the GNU General *
+ * Public License as published by the Free Software *
+ * Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be *
+ * useful, but WITHOUT ANY WARRANTY; without even the *
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A *
+ * PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. *
+ * *
+ * You should have received a copy of the GNU General *
+ * Public License along with this kernel; if not, write to *
+ * the Free Software Foundation, Inc., 675 Mass Ave, *
+ * Cambridge, MA 02139, USA. *
+ * *
+ ************************************************************
+ * last change: 95/08/04 OS: Linux 1.3.15 *
+ ************************************************************/
+
+/* Look in eata_pio.h for configuration information */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/in.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <asm/io.h>
+#include "eata_pio.h"
+#include "eata_dma_proc.h"
+#include "scsi.h"
+#include "sd.h"
+
+#include <linux/stat.h>
+#include <linux/config.h> /* for CONFIG_PCI */
+
+struct proc_dir_entry proc_scsi_eata_pio = {
+ PROC_SCSI_EATA_PIO, 9, "eata_pio",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+static uint ISAbases[MAXISA] =
+{0x1F0, 0x170, 0x330, 0x230};
+static uint ISAirqs[MAXISA] =
+{14,12,15,11};
+static unchar EISAbases[] =
+{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static uint registered_HBAs = 0;
+static struct Scsi_Host *last_HBA = NULL;
+static struct Scsi_Host *first_HBA = NULL;
+static unchar reg_IRQ[] =
+{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static unchar reg_IRQL[] =
+{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+static ulong int_counter = 0;
+static ulong queue_counter = 0;
+
+void hprint(const char *str)
+{
+ char *hptr =(char *) 0x000b0000;
+ char *hptr2=(char *) 0x000b00a0;
+ char *hptr3=(char *) 0x000b0f00;
+ int z;
+
+ memmove(hptr,hptr2,24*80*2);
+ for (z=0; z<strlen(str); z++)
+ hptr3[z*2]=str[z];
+ for (; z<80; z++)
+ hptr3[z*2]=' ';
+}
+
+#ifdef MACH
+#include "eata_pio_proc.src"
+#else
+#include "eata_pio_proc.c"
+#endif
+
+#ifdef MODULE
+int eata_pio_release(struct Scsi_Host *sh)
+{
+ if (sh->irq && reg_IRQ[sh->irq] == 1) free_irq(sh->irq);
+ else reg_IRQ[sh->irq]--;
+ if (SD(sh)->channel == 0) {
+ if (sh->io_port && sh->n_io_port)
+ release_region(sh->io_port, sh->n_io_port);
+ }
+ return(TRUE);
+}
+#endif
+
+void IncStat(Scsi_Pointer *SCp, uint Increment)
+{
+ SCp->ptr+=Increment;
+ if ((SCp->this_residual-=Increment)==0)
+ {
+ if ((--SCp->buffers_residual)==0) SCp->Status=FALSE;
+ else
+ {
+ SCp->buffer++;
+ SCp->ptr=SCp->buffer->address;
+ SCp->this_residual=SCp->buffer->length;
+ }
+ }
+}
+
+void eata_pio_int_handler(int irq, struct pt_regs * regs)
+{
+ uint eata_stat = 0xfffff;
+ Scsi_Cmnd *cmd;
+ hostdata *hd;
+ struct eata_ccb *cp;
+ uint base;
+ ulong flags;
+ uint x,z;
+ struct Scsi_Host *sh;
+ ushort zwickel=0;
+ unchar stat,odd;
+
+ save_flags(flags);
+ cli();
+
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->prev) {
+ if (sh->irq != irq)
+ continue;
+ if (inb((uint)sh->base + HA_RSTATUS) & HA_SBUSY)
+ continue;
+
+ int_counter++;
+
+ hd=SD(sh);
+
+ cp = &hd->ccb[0];
+ cmd = cp->cmd;
+ base = (uint) cmd->host->base;
+
+ do
+ {
+ stat=inb(base+HA_RSTATUS);
+ if (stat&HA_SDRQ)
+ if (cp->DataIn)
+ {
+ z=256; odd=FALSE;
+ while ((cmd->SCp.Status)&&((z>0)||(odd)))
+ {
+ if (odd)
+ {
+ *(cmd->SCp.ptr)=zwickel>>8;
+ IncStat(&cmd->SCp,1);
+ odd=FALSE;
+ }
+ x=min(z,cmd->SCp.this_residual/2);
+ insw(base+HA_RDATA,cmd->SCp.ptr,x);
+ z-=x;
+ IncStat(&cmd->SCp,2*x);
+ if ((z>0)&&(cmd->SCp.this_residual==1))
+ {
+ zwickel=inw(base+HA_RDATA);
+ *(cmd->SCp.ptr)=zwickel&0xff;
+ IncStat(&cmd->SCp,1); z--;
+ odd=TRUE;
+ }
+ }
+ while (z>0) {
+ zwickel=inw(base+HA_RDATA);
+ z--;
+ }
+ }
+ else /* cp->DataOut */
+ {
+ odd=FALSE; z=256;
+ while ((cmd->SCp.Status)&&((z>0)||(odd)))
+ {
+ if (odd)
+ {
+ zwickel+=*(cmd->SCp.ptr)<<8;
+ IncStat(&cmd->SCp,1);
+ outw(zwickel,base+HA_RDATA);
+ z--;
+ odd=FALSE;
+ }
+ x=min(z,cmd->SCp.this_residual/2);
+ outsw(base+HA_RDATA,cmd->SCp.ptr,x);
+ z-=x;
+ IncStat(&cmd->SCp,2*x);
+ if ((z>0)&&(cmd->SCp.this_residual==1))
+ {
+ zwickel=*(cmd->SCp.ptr);
+ zwickel&=0xff;
+ IncStat(&cmd->SCp,1);
+ odd=TRUE;
+ }
+ }
+ while (z>0||odd) {
+ outw(zwickel,base+HA_RDATA);
+ z--;
+ odd=FALSE;
+ }
+ }
+ }
+ while ((stat&HA_SDRQ)||((stat&HA_SMORE)&&hd->moresupport));
+
+ /* terminate handler if HBA goes busy again, i.e. transfers
+ * more data */
+
+ if (stat&HA_SBUSY) break;
+
+ /* OK, this is quite stupid, but I haven't found any correct
+ * way to get HBA&SCSI status so far */
+
+ if (!(inb(base+HA_RSTATUS)&HA_SERROR))
+ {
+ cmd->result=(DID_OK<<16);
+ hd->devflags|=(1<<cp->cp_id);
+ }
+ else if (hd->devflags&1<<cp->cp_id)
+ cmd->result=(DID_OK<<16)+0x02;
+ else cmd->result=(DID_NO_CONNECT<<16);
+
+ if (cp->status == LOCKED) {
+ cp->status = FREE;
+ eata_stat = inb(base + HA_RSTATUS);
+ printk("eata_pio: int_handler, freeing locked queueslot\n");
+ DBG(DBG_INTR&&DBG_DELAY,DEL2(800));
+ restore_flags(flags);
+ return;
+ }
+
+#if DBG_INTR2
+ if (stat != 0x50)
+ printk("stat: %#.2x, result: %#.8x\n", stat, cmd->result);
+ DBG(DBG_INTR&&DBG_DELAY,DEL2(800));
+#endif
+
+ cp->status = FREE; /* now we can release the slot */
+
+ restore_flags(flags);
+ cmd->scsi_done(cmd);
+ save_flags(flags);
+ cli();
+ }
+ restore_flags(flags);
+
+ return;
+}
+
+inline uint eata_pio_send_command(uint base, unchar command)
+{
+ uint loop = R_LIMIT;
+
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ if (--loop == 0)
+ return(TRUE);
+
+ outb(command, base + HA_WCOMMAND);
+ return(FALSE);
+}
+
+int eata_pio_queue(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
+{
+ uint x, y;
+ long flags;
+ uint base;
+
+ hostdata *hd;
+ struct Scsi_Host *sh;
+ struct eata_ccb *cp;
+
+ save_flags(flags);
+ cli();
+
+ queue_counter++;
+
+ hd = HD(cmd);
+ sh = cmd->host;
+ base = (uint) sh->base;
+
+ /* use only slot 0, as 2001 can handle only one cmd at a time */
+
+ y = x = 0;
+
+ if (hd->ccb[y].status!=FREE) {
+
+ DBG(DBG_QUEUE, printk("can_queue %d, x %d, y %d\n",sh->can_queue,x,y));
+#if DEBUG_EATA
+ panic("eata_pio: run out of queue slots cmdno:%ld intrno: %ld\n",
+ queue_counter, int_counter);
+#else
+ panic("eata_pio: run out of queue slots....\n");
+#endif
+ }
+
+ cp = &hd->ccb[y];
+
+ memset(cp, 0, sizeof(struct eata_ccb));
+ memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
+
+ cp->status = USED; /* claim free slot */
+
+ DBG(DBG_QUEUE, printk("eata_pio_queue pid %ld, target: %x, lun: %x, y %d\n",
+ cmd->pid, cmd->target, cmd->lun, y));
+ DBG(DBG_QUEUE && DBG_DELAY, DEL2(250));
+
+ cmd->scsi_done = (void *)done;
+
+ switch (cmd->cmnd[0]) {
+ case CHANGE_DEFINITION: case COMPARE: case COPY:
+ case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT:
+ case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
+ case WRITE_6: case WRITE_10: case WRITE_VERIFY:
+ case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME:
+ case SEARCH_HIGH_12: case SEARCH_EQUAL_12: case SEARCH_LOW_12:
+ case WRITE_12: case WRITE_VERIFY_12: case SET_WINDOW:
+ case MEDIUM_SCAN: case SEND_VOLUME_TAG:
+ case 0xea: /* alternate number for WRITE LONG */
+ cp->DataOut = TRUE; /* Output mode */
+ break;
+ case TEST_UNIT_READY:
+ default:
+ cp->DataIn = TRUE; /* Input mode */
+ }
+
+ cp->Interpret = (cmd->target == hd->hostid);
+ cp->cp_datalen = htonl((ulong)cmd->request_bufflen);
+ cp->Auto_Req_Sen = FALSE;
+ cp->cp_reqDMA = htonl(0);
+ cp->reqlen = 0;
+
+ cp->cp_id = cmd->target;
+ cp->cp_lun = cmd->lun;
+ cp->cp_dispri = FALSE;
+ cp->cp_identify = TRUE;
+ memcpy(cp->cp_cdb, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
+
+ cp->cp_statDMA = htonl(0);
+
+ cp->cp_viraddr = cp;
+ cp->cmd = cmd;
+ cmd->host_scribble = (char *)&hd->ccb[y];
+
+ if (cmd->use_sg == 0)
+ {
+ cmd->SCp.buffers_residual=1;
+ cmd->SCp.ptr = cmd->request_buffer;
+ cmd->SCp.this_residual = cmd->request_bufflen;
+ cmd->SCp.buffer = NULL;
+ } else {
+ cmd->SCp.buffer = cmd->request_buffer;
+ cmd->SCp.buffers_residual = cmd->use_sg;
+ cmd->SCp.ptr = cmd->SCp.buffer->address;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ }
+ cmd->SCp.Status = (cmd->SCp.this_residual != 0); /* TRUE as long as bytes
+ * are to transfer */
+
+ if (eata_pio_send_command(base, EATA_CMD_PIO_SEND_CP))
+ {
+ cmd->result = DID_BUS_BUSY << 16;
+ printk("eata_pio_queue target %d, pid %ld, HBA busy, returning "
+ "DID_BUS_BUSY, done.\n", cmd->target, cmd->pid);
+ done(cmd);
+ cp->status = FREE;
+ restore_flags(flags);
+ return (0);
+ }
+ while (!(inb(base + HA_RSTATUS) & HA_SDRQ));
+ outsw(base + HA_RDATA, cp, hd->cplen);
+ outb(EATA_CMD_PIO_TRUNC, base + HA_WCOMMAND);
+ for (x = 0; x < hd->cppadlen; x++) outw(0, base + HA_RDATA);
+
+ DBG(DBG_QUEUE,printk("Queued base %#.4lx pid: %ld target: %x lun: %x "
+ "slot %d irq %d\n", (long)sh->base, cmd->pid,
+ cmd->target, cmd->lun, y, sh->irq));
+ DBG(DBG_QUEUE && DBG_DELAY, DEL2(200));
+
+ restore_flags(flags);
+ return (0);
+}
+
+int eata_pio_abort(Scsi_Cmnd * cmd)
+{
+ ulong flags;
+ uint loop = R_LIMIT;
+
+ save_flags(flags);
+ cli();
+
+ DBG(DBG_ABNORM, printk("eata_pio_abort called pid: %ld target: %x lun: %x"
+ " reason %x\n", cmd->pid, cmd->target, cmd->lun,
+ cmd->abort_reason));
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+
+
+ while (inb((uint)(cmd->host->base) + HA_RAUXSTAT) & HA_ABUSY)
+ if (--loop == 0) {
+ printk("eata_pio: abort, timeout error.\n");
+ restore_flags(flags);
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+ return (SCSI_ABORT_ERROR);
+ }
+ if (CD(cmd)->status == FREE) {
+ DBG(DBG_ABNORM, printk("Returning: SCSI_ABORT_NOT_RUNNING\n"));
+ restore_flags(flags);
+ return (SCSI_ABORT_NOT_RUNNING);
+ }
+ if (CD(cmd)->status == USED) {
+ DBG(DBG_ABNORM, printk("Returning: SCSI_ABORT_BUSY\n"));
+ restore_flags(flags);
+ return (SCSI_ABORT_BUSY); /* SNOOZE */
+ }
+ if (CD(cmd)->status == RESET) {
+ restore_flags(flags);
+ printk("eata_pio: abort, command reset error.\n");
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+ return (SCSI_ABORT_ERROR);
+ }
+ if (CD(cmd)->status == LOCKED) {
+ restore_flags(flags);
+ DBG(DBG_ABNORM, printk("eata_pio: abort, queue slot locked.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+ return (SCSI_ABORT_NOT_RUNNING);
+ }
+ restore_flags(flags);
+ panic("eata_pio: abort: invalid slot status\n");
+}
+
+int eata_pio_reset(Scsi_Cmnd * cmd)
+{
+ uint x, z, time, limit = 0;
+ ulong flags;
+ unchar success = FALSE;
+ Scsi_Cmnd *sp;
+
+ save_flags(flags);
+ cli();
+ hprint("reset");
+ DBG(DBG_ABNORM, printk("eata_pio_reset called pid:%ld target: %x lun: %x "
+ "reason %x\n", cmd->pid, cmd->target, cmd->lun,
+ cmd->abort_reason));
+
+ if (HD(cmd)->state == RESET) {
+ printk("eata_pio_reset: exit, already in reset.\n");
+ restore_flags(flags);
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+ return (SCSI_RESET_ERROR);
+ }
+
+ for (z = 0; z < MAXTARGET; z++) {
+ HD(cmd)->t_state[0][z] = RESET;
+ HD(cmd)->t_timeout[0][z] = NO_TIMEOUT;
+ }
+
+ /* force all slots to be free */
+
+ for (x = 0; x < cmd->host->can_queue; x++) {
+
+ if (HD(cmd)->ccb[x].status == FREE)
+ continue;
+
+ sp = HD(cmd)->ccb[x].cmd;
+ HD(cmd)->ccb[x].status = RESET;
+ printk("eata_pio_reset: slot %d in reset, pid %ld.\n", x, sp->pid);
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+
+ if (sp == NULL)
+ panic("eata_pio_reset: slot %d, sp==NULL.\n", x);
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+ }
+
+ /* hard reset the HBA */
+ outb((uint) cmd->host->base+HA_WCOMMAND, EATA_CMD_RESET);
+
+ DBG(DBG_ABNORM, printk("eata_pio_reset: board reset done.\n"));
+ HD(cmd)->state = RESET;
+
+ time = jiffies;
+ while (jiffies < (time + (3 * HZ)) && limit++ < 10000000);
+
+ DBG(DBG_ABNORM, printk("eata_pio_reset: interrupts disabled, loops %d.\n", limit));
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+
+ for (x = 0; x < cmd->host->can_queue; x++) {
+
+ /* Skip slots already set free by interrupt */
+ if (HD(cmd)->ccb[x].status != RESET)
+ continue;
+
+ sp = HD(cmd)->ccb[x].cmd;
+ sp->result = DID_RESET << 16;
+
+ /* This mailbox is terminated */
+ printk("eata_pio_reset: resetted ccb %d.\n",x);
+ HD(cmd)->ccb[x].status = FREE;
+
+ restore_flags(flags);
+ sp->scsi_done(sp);
+ cli();
+ }
+
+ HD(cmd)->state = FALSE;
+ restore_flags(flags);
+
+ if (success) { /* hmmm... */
+ DBG(DBG_ABNORM, printk("eata_pio_reset: exit, success.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+ return (SCSI_RESET_SUCCESS);
+ } else {
+ DBG(DBG_ABNORM, printk("eata_pio_reset: exit, wakeup.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DEL2(500));
+ return (SCSI_RESET_PUNT);
+ }
+}
+
+char * get_pio_board_data(ulong base, uint irq, uint id, ulong cplen, ushort cppadlen)
+{
+ struct eata_ccb cp;
+ static char buff[256];
+ int z;
+
+ memset(&cp, 0, sizeof(struct eata_ccb));
+ memset(buff, 0, sizeof(buff));
+
+ cp.DataIn = TRUE;
+ cp.Interpret = TRUE; /* Interpret command */
+
+ cp.cp_datalen = htonl(254);
+ cp.cp_dataDMA = htonl(0);
+
+ cp.cp_id = id;
+ cp.cp_lun = 0;
+
+ cp.cp_cdb[0] = INQUIRY;
+ cp.cp_cdb[1] = 0;
+ cp.cp_cdb[2] = 0;
+ cp.cp_cdb[3] = 0;
+ cp.cp_cdb[4] = 254;
+ cp.cp_cdb[5] = 0;
+
+ if (eata_pio_send_command((uint) base, EATA_CMD_PIO_SEND_CP))
+ return (NULL);
+ while (!(inb(base + HA_RSTATUS) & HA_SDRQ));
+ outsw(base + HA_RDATA, &cp, cplen);
+ outb(EATA_CMD_PIO_TRUNC, base + HA_WCOMMAND);
+ for (z = 0; z < cppadlen; z++) outw(0, base + HA_RDATA);
+
+ while (inb(base + HA_RSTATUS) & HA_SBUSY);
+ if (inb(base + HA_RSTATUS) & HA_SERROR)
+ return (NULL);
+ else if (!(inb(base + HA_RSTATUS) & HA_SDRQ))
+ return (NULL);
+ else
+ {
+ insw(base+HA_RDATA, &buff, 127);
+ while (inb(base + HA_RSTATUS)&HA_SDRQ) inw(base + HA_RDATA);
+ return (buff);
+ }
+}
+
+int get_pio_conf_PIO(u32 base, struct get_conf *buf)
+{
+ ulong loop = R_LIMIT;
+ int z;
+ ushort *p;
+
+ if(check_region(base, 9))
+ return (FALSE);
+
+ memset(buf, 0, sizeof(struct get_conf));
+
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ if (--loop == 0)
+ return (FALSE);
+
+ DBG(DBG_PIO && DBG_PROBE,
+ printk("Issuing PIO READ CONFIG to HBA at %#x\n", base));
+ eata_pio_send_command(base, EATA_CMD_PIO_READ_CONFIG);
+
+ loop = R_LIMIT;
+ for (p = (ushort *) buf;
+ (long)p <= ((long)buf + (sizeof(struct get_conf) / 2)); p++) {
+ while (!(inb(base + HA_RSTATUS) & HA_SDRQ))
+ if (--loop == 0)
+ return (FALSE);
+
+ loop = R_LIMIT;
+ *p = inw(base + HA_RDATA);
+ }
+ if (!(inb(base + HA_RSTATUS) & HA_SERROR)) { /* Error ? */
+ if (htonl(EATA_SIGNATURE) == buf->signature) {
+ DBG(DBG_PIO&&DBG_PROBE, printk("EATA Controller found at %#4x "
+ "EATA Level: %x\n", base,
+ (uint) (buf->version)));
+
+ while (inb(base + HA_RSTATUS) & HA_SDRQ)
+ inw(base + HA_RDATA);
+ if(ALLOW_DMA_BOARDS == FALSE) {
+ for (z = 0; z < MAXISA; z++)
+ if (base == ISAbases[z]) {
+ buf->IRQ = ISAirqs[z];
+ break;
+ }
+ }
+ return (TRUE);
+ }
+ } else {
+ DBG(DBG_PROBE, printk("eata_dma: get_conf_PIO, error during transfer "
+ "for HBA at %x\n", base));
+ }
+ return (FALSE);
+}
+
+void print_pio_config(struct get_conf *gc)
+{
+ printk("Please check values: (read config data)\n");
+ printk("LEN: %d ver:%d OCS:%d TAR:%d TRNXFR:%d MORES:%d\n",
+ (uint) ntohl(gc->len), gc->version,
+ gc->OCS_enabled, gc->TAR_support, gc->TRNXFR, gc->MORE_support);
+ printk("HAAV:%d SCSIID0:%d ID1:%d ID2:%d QUEUE:%d SG:%d SEC:%d\n",
+ gc->HAA_valid, gc->scsi_id[3], gc->scsi_id[2],
+ gc->scsi_id[1], ntohs(gc->queuesiz), ntohs(gc->SGsiz), gc->SECOND);
+ printk("IRQ:%d IRQT:%d FORCADR:%d MCH:%d RIDQ:%d\n",
+ gc->IRQ, gc->IRQ_TR, gc->FORCADR,
+ gc->MAX_CHAN, gc->ID_qest);
+ DBG(DPT_DEBUG, DELAY(14));
+}
+
+static uint print_selftest(uint base)
+{
+ unchar buffer[512];
+#ifdef VERBOSE_SETUP
+ int z;
+#endif
+
+ printk("eata_pio: executing controller self test & setup...\n");
+ while (inb(base + HA_RSTATUS) & HA_SBUSY);
+ outb(EATA_CMD_PIO_SETUPTEST, base + HA_WCOMMAND);
+ do {
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ /* nothing */ ;
+ if (inb(base + HA_RSTATUS) & HA_SDRQ)
+ {
+ insw(base + HA_RDATA, &buffer, 256);
+#ifdef VERBOSE_SETUP
+ /* no beeps please... */
+ for (z = 0; z < 511 && buffer[z]; z++)
+ if (buffer[z] != 7) printk("%c", buffer[z]);
+#endif
+ }
+ } while (inb(base+HA_RSTATUS) & (HA_SBUSY|HA_SDRQ));
+
+ return (!(inb(base+HA_RSTATUS) & HA_SERROR));
+}
+
+int register_pio_HBA(long base, struct get_conf *gc, Scsi_Host_Template * tpnt)
+{
+ ulong size = 0;
+ char *buff;
+ ulong cplen;
+ ushort cppadlen;
+ struct Scsi_Host *sh;
+ hostdata *hd;
+
+ DBG(DBG_REGISTER, print_pio_config(gc));
+
+ if (gc->DMA_support == TRUE) {
+ printk("HBA at %#.4lx supports DMA. Please use EATA-DMA driver.\n",base);
+ if(ALLOW_DMA_BOARDS == FALSE)
+ return (FALSE);
+ }
+
+ if ((buff = get_pio_board_data((uint)base, gc->IRQ, gc->scsi_id[3],
+ cplen =(htonl(gc->cplen )+1)/2,
+ cppadlen=(htons(gc->cppadlen)+1)/2)) == NULL)
+ {
+ printk("HBA at %#lx didn't react on INQUIRY. Sorry.\n", (ulong) base);
+ return (FALSE);
+ }
+
+ if (print_selftest(base) == FALSE && ALLOW_DMA_BOARDS == FALSE)
+ {
+ printk("HBA at %#lx failed while performing self test & setup.\n",
+ (ulong) base);
+ return (FALSE);
+ }
+
+ if (!reg_IRQ[gc->IRQ]) { /* Interrupt already registered ? */
+ if (!request_irq(gc->IRQ, eata_pio_int_handler, SA_INTERRUPT,
+ "EATA-PIO")){
+ reg_IRQ[gc->IRQ]++;
+ if (!gc->IRQ_TR)
+ reg_IRQL[gc->IRQ] = TRUE; /* IRQ is edge triggered */
+ } else {
+ printk("Couldn't allocate IRQ %d, Sorry.", gc->IRQ);
+ return (FALSE);
+ }
+ } else { /* More than one HBA on this IRQ */
+ if (reg_IRQL[gc->IRQ] == TRUE) {
+ printk("Can't support more than one HBA on this IRQ,\n"
+ " if the IRQ is edge triggered. Sorry.\n");
+ return (FALSE);
+ } else
+ reg_IRQ[gc->IRQ]++;
+ }
+
+ request_region(base, 8, "eata_pio");
+
+ size = sizeof(hostdata) + (sizeof(struct eata_ccb) * ntohs(gc->queuesiz));
+
+ sh = scsi_register(tpnt, size);
+ hd = SD(sh);
+
+ memset(hd->ccb, 0, (sizeof(struct eata_ccb) * ntohs(gc->queuesiz)));
+ memset(hd->reads, 0, sizeof(ulong) * 26);
+
+ strncpy(SD(sh)->vendor, &buff[8], 8);
+ SD(sh)->vendor[8] = 0;
+ strncpy(SD(sh)->name, &buff[16], 17);
+ SD(sh)->name[17] = 0;
+ SD(sh)->revision[0] = buff[32];
+ SD(sh)->revision[1] = buff[33];
+ SD(sh)->revision[2] = buff[34];
+ SD(sh)->revision[3] = '.';
+ SD(sh)->revision[4] = buff[35];
+ SD(sh)->revision[5] = 0;
+
+ switch (ntohl(gc->len)) {
+ case 0x1c:
+ SD(sh)->EATA_revision = 'a';
+ break;
+ case 0x1e:
+ SD(sh)->EATA_revision = 'b';
+ break;
+ case 0x22:
+ SD(sh)->EATA_revision = 'c';
+ break;
+ case 0x24:
+ SD(sh)->EATA_revision = 'z';
+ default:
+ SD(sh)->EATA_revision = '?';
+ }
+
+ if(ntohl(gc->len) >= 0x22) {
+ if (gc->is_PCI == TRUE)
+ hd->bustype = IS_PCI;
+ else if (gc->is_EISA == TRUE)
+ hd->bustype = IS_EISA;
+ else
+ hd->bustype = IS_ISA;
+ } else {
+ if (buff[21] == '4')
+ hd->bustype = IS_PCI;
+ else if (buff[21] == '2')
+ hd->bustype = IS_EISA;
+ else
+ hd->bustype = IS_ISA;
+ }
+
+ SD(sh)->cplen=cplen;
+ SD(sh)->cppadlen=cppadlen;
+ SD(sh)->hostid=gc->scsi_id[3];
+ SD(sh)->devflags=1<<gc->scsi_id[3];
+ SD(sh)->moresupport=gc->MORE_support;
+ sh->unique_id = base;
+ sh->base = (char *) base;
+ sh->io_port = base;
+ sh->n_io_port = 8;
+ sh->irq = gc->IRQ;
+ sh->dma_channel = PIO;
+ sh->this_id = gc->scsi_id[3];
+ sh->can_queue = 1;
+ sh->cmd_per_lun = 1;
+ sh->sg_tablesize = SG_ALL;
+
+ hd->channel = 0;
+
+ sh->max_id = 8;
+ sh->max_lun = 8;
+
+ if (gc->SECOND)
+ hd->primary = FALSE;
+ else
+ hd->primary = TRUE;
+
+ sh->unchecked_isa_dma = FALSE; /* We can only do PIO */
+
+ hd->next = NULL; /* build a linked list of all HBAs */
+ hd->prev = last_HBA;
+ if(hd->prev != NULL)
+ SD(hd->prev)->next = sh;
+ last_HBA = sh;
+ if (first_HBA == NULL)
+ first_HBA = sh;
+ registered_HBAs++;
+ return (1);
+}
+
+void find_pio_ISA(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+ int i;
+
+ for (i = 0; i < MAXISA; i++) {
+ if (ISAbases[i]) {
+ if (get_pio_conf_PIO(ISAbases[i], buf) == TRUE){
+ register_pio_HBA(ISAbases[i], buf, tpnt);
+ }
+ ISAbases[i] = 0;
+ }
+ }
+ return;
+}
+
+void find_pio_EISA(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+ u32 base;
+ int i;
+
+#if CHECKPAL
+ u8 pal1, pal2, pal3;
+#endif
+
+ for (i = 0; i < MAXEISA; i++) {
+ if (EISAbases[i] == TRUE) { /* Still a possibility ? */
+
+ base = 0x1c88 + (i * 0x1000);
+#if CHECKPAL
+ pal1 = inb((u16)base - 8);
+ pal2 = inb((u16)base - 7);
+ pal3 = inb((u16)base - 6);
+
+ if (((pal1 == 0x12) && (pal2 == 0x14)) ||
+ ((pal1 == 0x38) && (pal2 == 0xa3) && (pal3 == 0x82)) ||
+ ((pal1 == 0x06) && (pal2 == 0x94) && (pal3 == 0x24))) {
+ DBG(DBG_PROBE, printk("EISA EATA id tags found: %x %x %x \n",
+ (int)pal1, (int)pal2, (int)pal3));
+#endif
+ if (get_pio_conf_PIO(base, buf) == TRUE) {
+ DBG(DBG_PROBE && DBG_EISA, print_pio_config(buf));
+ if (buf->IRQ) {
+ register_pio_HBA(base, buf, tpnt);
+ } else
+ printk("eata_dma: No valid IRQ. HBA removed from list\n");
+ }
+ /* Nothing found here so we take it from the list */
+ EISAbases[i] = 0;
+#if CHECKPAL
+ }
+#endif
+ }
+ }
+ return;
+}
+
+void find_pio_PCI(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+
+#ifndef CONFIG_PCI
+ printk("eata_pio: kernel PCI support not enabled. Skipping scan for PCI HBAs.\n");
+#else
+
+ u8 pci_bus, pci_device_fn;
+ static s16 pci_index = 0; /* Device index to PCI BIOS calls */
+ u32 base = 0;
+ u16 com_adr;
+ u16 rev_device;
+ u32 error, i, x;
+
+ if (pcibios_present()) {
+ for (i = 0; i <= MAXPCI; ++i, ++pci_index) {
+ if (pcibios_find_device(PCI_VENDOR_ID_DPT, PCI_DEVICE_ID_DPT,
+ pci_index, &pci_bus, &pci_device_fn))
+ break;
+ DBG(DBG_PROBE && DBG_PCI,
+ printk("eata_pio: HBA at bus %d, device %d,"
+ " function %d, index %d\n", (s32)pci_bus,
+ (s32)((pci_device_fn & 0xf8) >> 3),
+ (s32)(pci_device_fn & 7), pci_index));
+
+ if (!(error = pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_CLASS_DEVICE, &rev_device))) {
+ if (rev_device == PCI_CLASS_STORAGE_SCSI) {
+ if (!(error = pcibios_read_config_word(pci_bus,
+ pci_device_fn, PCI_COMMAND,
+ (u16 *) & com_adr))) {
+ if (!((com_adr & PCI_COMMAND_IO) &&
+ (com_adr & PCI_COMMAND_MASTER))) {
+ printk("HBA has IO or BUSMASTER mode disabled\n");
+ continue;
+ }
+ } else
+ printk("eata_pio: error %x while reading "
+ "PCI_COMMAND\n", error);
+ } else
+ printk("DEVICECLASSID %x didn't match\n", rev_device);
+ } else {
+ printk("eata_pio: error %x while reading PCI_CLASS_BASE\n",
+ error);
+ continue;
+ }
+
+ if (!(error = pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, (int *) &base))){
+
+ /* Check if the address is valid */
+ if (base & 0x01) {
+ base &= 0xfffffffe;
+ /* EISA tag there ? */
+ if ((inb(base) == 0x12) && (inb(base + 1) == 0x14))
+ continue; /* Jep, it's forced, so move on */
+ base += 0x10; /* Now, THIS is the real address */
+ if (base != 0x1f8) {
+ /* We didn't find it in the primary search */
+ if (get_pio_conf_PIO(base, buf) == TRUE) {
+ if (buf->FORCADR) /* If the address is forced */
+ continue; /* we'll find it later */
+
+ /* OK. We made it till here, so we can go now
+ * and register it. We only have to check and
+ * eventually remove it from the EISA and ISA list
+ */
+
+ register_pio_HBA(base, buf, tpnt);
+
+ if (base < 0x1000) {
+ for (x = 0; x < MAXISA; ++x) {
+ if (ISAbases[x] == base) {
+ ISAbases[x] = 0;
+ break;
+ }
+ }
+ } else if ((base & 0x0fff) == 0x0c88) {
+ x = (base >> 12) & 0x0f;
+ EISAbases[x] = 0;
+ }
+ continue; /* break; */
+ }
+ }
+ }
+ } else
+ printk("eata_pio: error %x while reading "
+ "PCI_BASE_ADDRESS_0\n", error);
+ }
+ } else
+ printk("eata_pio: No BIOS32 extensions present. This driver release "
+ "still depends on it.\n"
+ " Skipping scan for PCI HBAs.\n");
+#endif /* #ifndef CONFIG_PCI */
+ return;
+}
+
+
+int eata_pio_detect(Scsi_Host_Template * tpnt)
+{
+ struct Scsi_Host *HBA_ptr;
+ struct get_conf gc;
+ int i;
+
+ DBG((DBG_PROBE && DBG_DELAY) || DPT_DEBUG,
+ printk("Using lots of delays to let you read the debugging output\n"));
+
+ tpnt->proc_dir = &proc_scsi_eata_pio;
+
+ find_pio_PCI(&gc, tpnt);
+
+ find_pio_EISA(&gc, tpnt);
+
+ find_pio_ISA(&gc, tpnt);
+
+ for (i = 0; i <= MAXIRQ; i++)
+ if (reg_IRQ[i])
+ request_irq(i, eata_pio_int_handler, SA_INTERRUPT, "EATA-PIO");
+
+ HBA_ptr = first_HBA;
+
+ if (registered_HBAs != 0) {
+ printk("EATA (Extended Attachment) PIO driver version: %d.%d%s\n"
+ "(c) 1993-95 Michael Neuffer, neuffer@goofy.zdv.uni-mainz.de\n"
+ " Alfred Arnold, a.arnold@kfa-juelich.de\n"
+ "This release only supports DASD devices (harddisks)\n",
+ VER_MAJOR, VER_MINOR, VER_SUB);
+
+ printk("Registered HBAs:\n");
+ printk("HBA no. Boardtype: Revis: EATA: Bus: BaseIO: IRQ: Ch: ID: Pr:"
+ " QS: SG: CPL:\n");
+ for (i = 1; i <= registered_HBAs; i++) {
+ printk("scsi%-2d: %.10s v%s 2.0%c %s %#.4x %2d %d %d %c"
+ " %2d %2d %2d\n",
+ HBA_ptr->host_no, SD(HBA_ptr)->name, SD(HBA_ptr)->revision,
+ SD(HBA_ptr)->EATA_revision, (SD(HBA_ptr)->bustype == 'P')?
+ "PCI ":(SD(HBA_ptr)->bustype == 'E')?"EISA":"ISA ",
+ (uint) HBA_ptr->base, HBA_ptr->irq, SD(HBA_ptr)->channel,
+ HBA_ptr->this_id, (SD(HBA_ptr)->primary == TRUE)?'Y':'N',
+ HBA_ptr->can_queue, HBA_ptr->sg_tablesize,
+ HBA_ptr->cmd_per_lun);
+ HBA_ptr = SD(HBA_ptr)->next;
+ }
+ }
+ DBG(DPT_DEBUG,DELAY(12));
+
+ return (registered_HBAs);
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = EATA_PIO;
+
+#include "scsi_module.c"
+#endif
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/eata_pio.h b/i386/i386at/gpl/linux/scsi/eata_pio.h
new file mode 100644
index 00000000..8a626e0b
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/eata_pio.h
@@ -0,0 +1,116 @@
+/********************************************************
+* Header file for eata_pio.c Linux EATA-PIO SCSI driver *
+* (c) 1993,94,95 Michael Neuffer *
+*********************************************************
+* last change: 95/06/21 *
+********************************************************/
+
+
+#ifndef _EATA_PIO_H
+#define _EATA_PIO_H
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include <linux/scsicam.h>
+
+#ifndef HOSTS_C
+#include "eata_generic.h"
+
+#define VER_MAJOR 0
+#define VER_MINOR 0
+#define VER_SUB "1b"
+
+/************************************************************************
+ * Here you can switch parts of the code on and of *
+ ************************************************************************/
+
+#define VERBOSE_SETUP /* show startup screen of 2001 */
+#define ALLOW_DMA_BOARDS 1
+
+/************************************************************************
+ * Debug options. *
+ * Enable DEBUG and whichever options you require. *
+ ************************************************************************/
+#define DEBUG_EATA 1 /* Enable debug code. */
+#define DPT_DEBUG 0 /* Bobs special */
+#define DBG_DELAY 0 /* Build in delays so debug messages can be
+ * be read before they vanish of the top of
+ * the screen!
+ */
+#define DBG_PROBE 0 /* Debug probe routines. */
+#define DBG_ISA 0 /* Trace ISA routines */
+#define DBG_EISA 0 /* Trace EISA routines */
+#define DBG_PCI 0 /* Trace PCI routines */
+#define DBG_PIO 0 /* Trace get_config_PIO */
+#define DBG_COM 0 /* Trace command call */
+#define DBG_QUEUE 0 /* Trace command queueing. */
+#define DBG_INTR 0 /* Trace interrupt service routine. */
+#define DBG_INTR2 0 /* Trace interrupt service routine. */
+#define DBG_PROC 0 /* Debug proc-fs related statistics */
+#define DBG_PROC_WRITE 0
+#define DBG_REGISTER 0 /* */
+#define DBG_ABNORM 1 /* Debug abnormal actions (reset, abort) */
+
+#if DEBUG_EATA
+#define DBG(x, y) if ((x)) {y;}
+#else
+#define DBG(x, y)
+#endif
+
+#endif /* !HOSTS_C */
+
+int eata_pio_detect(Scsi_Host_Template *);
+const char *eata_pio_info(struct Scsi_Host *);
+int eata_pio_command(Scsi_Cmnd *);
+int eata_pio_queue(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int eata_pio_abort(Scsi_Cmnd *);
+int eata_pio_reset(Scsi_Cmnd *);
+int eata_pio_proc_info(char *, char **, off_t, int, int, int);
+#ifdef MODULE
+int eata_pio_release(struct Scsi_Host *);
+#else
+#define eata_pio_release NULL
+#endif
+
+
+#define EATA_PIO { \
+ NULL, NULL, \
+ NULL, /* proc_dir_entry */ \
+ eata_pio_proc_info, /* procinfo */ \
+ "EATA (Extended Attachment) PIO driver", \
+ eata_pio_detect, \
+ eata_pio_release, \
+ NULL, NULL, \
+ eata_pio_queue, \
+ eata_pio_abort, \
+ eata_pio_reset, \
+ NULL, /* Slave attach */ \
+ scsicam_bios_param, \
+ 0, /* Canqueue */ \
+ 0, /* this_id */ \
+ 0, /* sg_tablesize */ \
+ 0, /* cmd_per_lun */ \
+ 0, /* present */ \
+ 1, /* True if ISA */ \
+ ENABLE_CLUSTERING }
+
+#endif /* _EATA_PIO_H */
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/eata_pio_proc.src b/i386/i386at/gpl/linux/scsi/eata_pio_proc.src
new file mode 100644
index 00000000..b5480091
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/eata_pio_proc.src
@@ -0,0 +1,150 @@
+
+#define MAX_SCSI_DEVICE_CODE 10
+const char *const pio_scsi_dev_types[MAX_SCSI_DEVICE_CODE] =
+{
+ "Direct-Access ",
+ "Sequential-Access",
+ "Printer ",
+ "Processor ",
+ "WORM ",
+ "CD-ROM ",
+ "Scanner ",
+ "Optical Device ",
+ "Medium Changer ",
+ "Communications "
+};
+
+/*
+ * eata_set_info
+ * buffer : pointer to the data that has been written to the hostfile
+ * length : number of bytes written to the hostfile
+ * HBA_ptr: pointer to the Scsi_Host struct
+ */
+int eata_pio_set_info(char *buffer, int length, struct Scsi_Host *HBA_ptr)
+{
+ DBG(DBG_PROC_WRITE, printk("%s\n", buffer));
+ return(-ENOSYS); /* Currently this is a no-op */
+}
+
+/*
+ * eata_proc_info
+ * inout : decides on the direction of the dataflow and the meaning of the
+ * variables
+ * buffer: If inout==FALSE data is beeing written to it else read from it
+ * *start: If inout==FALSE start of the valid data in the buffer
+ * offset: If inout==FALSE offset from the beginning of the imaginary file
+ * from which we start writing into the buffer
+ * length: If inout==FALSE max number of bytes to be written into the buffer
+ * else number of bytes in the buffer
+ */
+int eata_pio_proc_info(char *buffer, char **start, off_t offset, int length,
+ int hostno, int inout)
+{
+
+ Scsi_Device *scd;
+ struct Scsi_Host *HBA_ptr;
+ static u8 buff[512];
+ int i;
+ int size, len = 0;
+ off_t begin = 0;
+ off_t pos = 0;
+
+ HBA_ptr = first_HBA;
+ for (i = 1; i <= registered_HBAs; i++) {
+ if (HBA_ptr->host_no == hostno)
+ break;
+ HBA_ptr = SD(HBA_ptr)->next;
+ }
+
+ if(inout == TRUE) /* Has data been writen to the file ? */
+ return(eata_pio_set_info(buffer, length, HBA_ptr));
+
+ if (offset == 0)
+ memset(buff, 0, sizeof(buff));
+
+ size = sprintf(buffer+len, "EATA (Extended Attachment) PIO driver version: "
+ "%d.%d%s\n",VER_MAJOR, VER_MINOR, VER_SUB);
+ len += size; pos = begin + len;
+ size = sprintf(buffer + len, "queued commands: %10ld\n"
+ "processed interrupts:%10ld\n", queue_counter, int_counter);
+ len += size; pos = begin + len;
+
+ size = sprintf(buffer + len, "\nscsi%-2d: HBA %.10s\n",
+ HBA_ptr->host_no, SD(HBA_ptr)->name);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Firmware revision: v%s\n",
+ SD(HBA_ptr)->revision);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "IO: PIO\n");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Base IO : %#.4x\n", (u32) HBA_ptr->base);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Host Bus: %s\n",
+ (SD(HBA_ptr)->bustype == 'P')?"PCI ":
+ (SD(HBA_ptr)->bustype == 'E')?"EISA":"ISA ");
+
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+
+ scd = scsi_devices;
+
+ size = sprintf(buffer+len,"Attached devices: %s\n", (scd)?"":"none");
+ len += size;
+ pos = begin + len;
+
+ while (scd) {
+ if (scd->host == HBA_ptr) {
+ proc_print_scsidevice(scd, buffer, &size, len);
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+ }
+ scd = scd->next;
+ }
+
+ stop_output:
+ DBG(DBG_PROC, printk("2pos: %ld offset: %ld len: %d\n", pos, offset, len));
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len = length; /* Ending slop */
+ DBG(DBG_PROC, printk("3pos: %ld offset: %ld len: %d\n", pos, offset, len));
+
+ return (len);
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
+
diff --git a/i386/i386at/gpl/linux/scsi/fdomain.c b/i386/i386at/gpl/linux/scsi/fdomain.c
new file mode 100644
index 00000000..54507046
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/fdomain.c
@@ -0,0 +1,2016 @@
+/* fdomain.c -- Future Domain TMC-16x0 SCSI driver
+ * Created: Sun May 3 18:53:19 1992 by faith@cs.unc.edu
+ * Revised: Thu Oct 12 15:59:37 1995 by r.faith@ieee.org
+ * Author: Rickard E. Faith, faith@cs.unc.edu
+ * Copyright 1992, 1993, 1994, 1995 Rickard E. Faith
+ *
+ * $Id: fdomain.c,v 1.1.1.1 1997/02/25 21:27:49 thomas Exp $
+
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ **************************************************************************
+
+ DESCRIPTION:
+
+ This is the Linux low-level SCSI driver for Future Domain TMC-1660/1680
+ TMC-1650/1670, and TMC-3260 SCSI host adapters. The 1650 and 1670 have a
+ 25-pin external connector, whereas the 1660 and 1680 have a SCSI-2 50-pin
+ high-density external connector. The 1670 and 1680 have floppy disk
+ controllers built in. The TMC-3260 is a PCI bus card.
+
+ Future Domain's older boards are based on the TMC-1800 chip, and this
+ driver was originally written for a TMC-1680 board with the TMC-1800 chip.
+ More recently, boards are being produced with the TMC-18C50 and TMC-18C30
+ chips. The latest and greatest board may not work with this driver. If
+ you have to patch this driver so that it will recognize your board's BIOS
+ signature, then the driver may fail to function after the board is
+ detected.
+
+ The following BIOS versions are supported: 2.0, 3.0, 3.2, 3.4, and 3.5.
+ The following chips are supported: TMC-1800, TMC-18C50, TMC-18C30.
+ Reports suggest that the driver will also work with the 36C70 chip and
+ with the Quantum ISA-200S and ISA-250MG SCSI adapters.
+
+ Please note that the drive ordering that Future Domain implemented in BIOS
+ versions 3.4 and 3.5 is the opposite of the order (currently) used by the
+ rest of the SCSI industry. If you have BIOS version 3.4 or 3.5, and have
+ more then one drive, then the drive ordering will be the reverse of that
+ which you see under DOS. For example, under DOS SCSI ID 0 will be D: and
+ SCSI ID 1 will be C: (the boot device). Under Linux, SCSI ID 0 will be
+ /dev/sda and SCSI ID 1 will be /dev/sdb. The Linux ordering is consistent
+ with that provided by all the other SCSI drivers for Linux. If you want
+ this changed, send me patches that are protected by #ifdefs.
+
+ If you have a TMC-8xx or TMC-9xx board, then this is not the driver for
+ your board. Please refer to the Seagate driver for more information and
+ possible support.
+
+
+
+ REFERENCES USED:
+
+ "TMC-1800 SCSI Chip Specification (FDC-1800T)", Future Domain Corporation,
+ 1990.
+
+ "Technical Reference Manual: 18C50 SCSI Host Adapter Chip", Future Domain
+ Corporation, January 1992.
+
+ "LXT SCSI Products: Specifications and OEM Technical Manual (Revision
+ B/September 1991)", Maxtor Corporation, 1991.
+
+ "7213S product Manual (Revision P3)", Maxtor Corporation, 1992.
+
+ "Draft Proposed American National Standard: Small Computer System
+ Interface - 2 (SCSI-2)", Global Engineering Documents. (X3T9.2/86-109,
+ revision 10h, October 17, 1991)
+
+ Private communications, Drew Eckhardt (drew@cs.colorado.edu) and Eric
+ Youngdale (ericy@cais.com), 1992.
+
+ Private communication, Tuong Le (Future Domain Engineering department),
+ 1994. (Disk geometry computations for Future Domain BIOS version 3.4, and
+ TMC-18C30 detection.)
+
+ Hogan, Thom. The Programmer's PC Sourcebook. Microsoft Press, 1988. Page
+ 60 (2.39: Disk Partition Table Layout).
+
+ "18C30 Technical Reference Manual", Future Domain Corporation, 1993, page
+ 6-1.
+
+
+
+ NOTES ON REFERENCES:
+
+ The Maxtor manuals were free. Maxtor telephone technical support is
+ great!
+
+ The Future Domain manuals were $25 and $35. They document the chip, not
+ the TMC-16x0 boards, so some information I had to guess at. In 1992,
+ Future Domain sold DOS BIOS source for $250 and the UN*X driver source was
+ $750, but these required a non-disclosure agreement, so even if I could
+ have afforded them, they would *not* have been useful for writing this
+ publically distributable driver. Future Domain technical support has
+ provided some information on the phone and have sent a few useful FAXs.
+ They have been much more helpful since they started to recognize that the
+ word "Linux" refers to an operating system :-).
+
+
+
+ ALPHA TESTERS:
+
+ There are many other alpha testers that come and go as the driver
+ develops. The people listed here were most helpful in times of greatest
+ need (mostly early on -- I've probably left out a few worthy people in
+ more recent times):
+
+ Todd Carrico (todd@wutc.wustl.edu), Dan Poirier (poirier@cs.unc.edu ), Ken
+ Corey (kenc@sol.acs.unt.edu), C. de Bruin (bruin@bruin@sterbbs.nl), Sakari
+ Aaltonen (sakaria@vipunen.hit.fi), John Rice (rice@xanth.cs.odu.edu), Brad
+ Yearwood (brad@optilink.com), and Ray Toy (toy@soho.crd.ge.com).
+
+ Special thanks to Tien-Wan Yang (twyang@cs.uh.edu), who graciously lent me
+ his 18C50-based card for debugging. He is the sole reason that this
+ driver works with the 18C50 chip.
+
+ Thanks to Dave Newman (dnewman@crl.com) for providing initial patches for
+ the version 3.4 BIOS.
+
+ Thanks to James T. McKinley (mckinley@msupa.pa.msu.edu) for providing
+ patches that support the TMC-3260, a PCI bus card with the 36C70 chip.
+ The 36C70 chip appears to be "completely compatible" with the 18C30 chip.
+
+ Thanks to Eric Kasten (tigger@petroglyph.cl.msu.edu) for providing the
+ patch for the version 3.5 BIOS.
+
+ Thanks for Stephen Henson (shenson@nyx10.cs.du.edu) for providing the
+ patch for the Quantum ISA-200S SCSI adapter.
+
+ Thanks to Adam Bowen for the signature to the 1610M/MER/MEX scsi cards, to
+ Martin Andrews (andrewm@ccfadm.eeg.ccf.org) for the signature to some
+ random TMC-1680 repackaged by IBM; and to Mintak Ng (mintak@panix.com) for
+ the version 3.61 BIOS siganture.
+
+ Thanks for Mark Singer (elf@netcom.com) and Richard Simpson
+ (rsimpson@ewrcsdra.demon.co.uk) for more Quantum signatures and detective
+ work on the Quantum RAM layout.
+
+ Special thanks to James T. McKinley (mckinley@msupa.pa.msu.edu) for
+ providing patches for proper PCI BIOS32-mediated detection of the TMC-3260
+ card (a PCI bus card with the 36C70 chip). Please send James PCI-related
+ bug reports.
+
+ Thanks to Tom Cavin (tec@usa1.com) for preliminary command-line option
+ patches.
+
+ All of the alpha testers deserve much thanks.
+
+
+
+ NOTES ON USER DEFINABLE OPTIONS:
+
+ DEBUG: This turns on the printing of various debug information.
+
+ ENABLE_PARITY: This turns on SCSI parity checking. With the current
+ driver, all attached devices must support SCSI parity. If none of your
+ devices support parity, then you can probably get the driver to work by
+ turning this option off. I have no way of testing this, however.
+
+ FIFO_COUNT: The host adapter has an 8K cache (host adapters based on the
+ 18C30 chip have a 2k cache). When this many 512 byte blocks are filled by
+ the SCSI device, an interrupt will be raised. Therefore, this could be as
+ low as 0, or as high as 16. Note, however, that values which are too high
+ or too low seem to prevent any interrupts from occurring, and thereby lock
+ up the machine. I have found that 2 is a good number, but throughput may
+ be increased by changing this value to values which are close to 2.
+ Please let me know if you try any different values.
+
+ DO_DETECT: This activates some old scan code which was needed before the
+ high level drivers got fixed. If you are having trouble with the driver,
+ turning this on should not hurt, and might help. Please let me know if
+ this is the case, since this code will be removed from future drivers.
+
+ RESELECTION: This is no longer an option, since I gave up trying to
+ implement it in version 4.x of this driver. It did not improve
+ performance at all and made the driver unstable (because I never found one
+ of the two race conditions which were introduced by the multiple
+ outstanding command code). The instability seems a very high price to pay
+ just so that you don't have to wait for the tape to rewind. If you want
+ this feature implemented, send me patches. I'll be happy to send a copy
+ of my (broken) driver to anyone who would like to see a copy.
+
+ **************************************************************************/
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "fdomain.h"
+#include <asm/system.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/stat.h>
+
+#include <linux/config.h> /* for CONFIG_PCI */
+
+struct proc_dir_entry proc_scsi_fdomain = {
+ PROC_SCSI_FDOMAIN, 7, "fdomain",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+#define VERSION "$Revision: 1.1.1.1 $"
+
+/* START OF USER DEFINABLE OPTIONS */
+
+#define DEBUG 1 /* Enable debugging output */
+#define ENABLE_PARITY 1 /* Enable SCSI Parity */
+#define FIFO_COUNT 2 /* Number of 512 byte blocks before INTR */
+#define DO_DETECT 0 /* Do device detection here (see scsi.c) */
+
+/* END OF USER DEFINABLE OPTIONS */
+
+#if DEBUG
+#define EVERY_ACCESS 0 /* Write a line on every scsi access */
+#define ERRORS_ONLY 1 /* Only write a line if there is an error */
+#define DEBUG_DETECT 0 /* Debug fdomain_16x0_detect() */
+#define DEBUG_MESSAGES 1 /* Debug MESSAGE IN phase */
+#define DEBUG_ABORT 1 /* Debug abort() routine */
+#define DEBUG_RESET 1 /* Debug reset() routine */
+#define DEBUG_RACE 1 /* Debug interrupt-driven race condition */
+#else
+#define EVERY_ACCESS 0 /* LEAVE THESE ALONE--CHANGE THE ONES ABOVE */
+#define ERRORS_ONLY 0
+#define DEBUG_DETECT 0
+#define DEBUG_MESSAGES 0
+#define DEBUG_ABORT 0
+#define DEBUG_RESET 0
+#define DEBUG_RACE 0
+#endif
+
+/* Errors are reported on the line, so we don't need to report them again */
+#if EVERY_ACCESS
+#undef ERRORS_ONLY
+#define ERRORS_ONLY 0
+#endif
+
+#if ENABLE_PARITY
+#define PARITY_MASK 0x08
+#else
+#define PARITY_MASK 0x00
+#endif
+
+enum chip_type {
+ unknown = 0x00,
+ tmc1800 = 0x01,
+ tmc18c50 = 0x02,
+ tmc18c30 = 0x03,
+};
+
+enum {
+ in_arbitration = 0x02,
+ in_selection = 0x04,
+ in_other = 0x08,
+ disconnect = 0x10,
+ aborted = 0x20,
+ sent_ident = 0x40,
+};
+
+enum in_port_type {
+ Read_SCSI_Data = 0,
+ SCSI_Status = 1,
+ TMC_Status = 2,
+ FIFO_Status = 3, /* tmc18c50/tmc18c30 only */
+ Interrupt_Cond = 4, /* tmc18c50/tmc18c30 only */
+ LSB_ID_Code = 5,
+ MSB_ID_Code = 6,
+ Read_Loopback = 7,
+ SCSI_Data_NoACK = 8,
+ Interrupt_Status = 9,
+ Configuration1 = 10,
+ Configuration2 = 11, /* tmc18c50/tmc18c30 only */
+ Read_FIFO = 12,
+ FIFO_Data_Count = 14
+};
+
+enum out_port_type {
+ Write_SCSI_Data = 0,
+ SCSI_Cntl = 1,
+ Interrupt_Cntl = 2,
+ SCSI_Mode_Cntl = 3,
+ TMC_Cntl = 4,
+ Memory_Cntl = 5, /* tmc18c50/tmc18c30 only */
+ Write_Loopback = 7,
+ IO_Control = 11, /* tmc18c30 only */
+ Write_FIFO = 12
+};
+
+static int port_base = 0;
+static void *bios_base = NULL;
+static int bios_major = 0;
+static int bios_minor = 0;
+static int PCI_bus = 0;
+static int Quantum = 0; /* Quantum board variant */
+static int interrupt_level = 0;
+static volatile int in_command = 0;
+static Scsi_Cmnd *current_SC = NULL;
+static enum chip_type chip = unknown;
+static int adapter_mask = 0;
+static int this_id = 0;
+static int setup_called = 0;
+
+#if DEBUG_RACE
+static volatile int in_interrupt_flag = 0;
+#endif
+
+static int SCSI_Mode_Cntl_port;
+static int FIFO_Data_Count_port;
+static int Interrupt_Cntl_port;
+static int Interrupt_Status_port;
+static int Read_FIFO_port;
+static int Read_SCSI_Data_port;
+static int SCSI_Cntl_port;
+static int SCSI_Data_NoACK_port;
+static int SCSI_Status_port;
+static int TMC_Cntl_port;
+static int TMC_Status_port;
+static int Write_FIFO_port;
+static int Write_SCSI_Data_port;
+
+static int FIFO_Size = 0x2000; /* 8k FIFO for
+ pre-tmc18c30 chips */
+
+extern void fdomain_16x0_intr( int irq, struct pt_regs * regs );
+
+static void *addresses[] = {
+ (void *)0xc8000,
+ (void *)0xca000,
+ (void *)0xce000,
+ (void *)0xde000,
+ (void *)0xd0000, /* Extra addresses for PCI boards */
+ (void *)0xe0000,
+};
+#define ADDRESS_COUNT (sizeof( addresses ) / sizeof( unsigned ))
+
+static unsigned short ports[] = { 0x140, 0x150, 0x160, 0x170 };
+#define PORT_COUNT (sizeof( ports ) / sizeof( unsigned short ))
+
+static unsigned short ints[] = { 3, 5, 10, 11, 12, 14, 15, 0 };
+
+/*
+
+ READ THIS BEFORE YOU ADD A SIGNATURE!
+
+ READING THIS SHORT NOTE CAN SAVE YOU LOTS OF TIME!
+
+ READ EVERY WORD, ESPECIALLY THE WORD *NOT*
+
+ This driver works *ONLY* for Future Domain cards using the TMC-1800,
+ TMC-18C50, or TMC-18C30 chip. This includes models TMC-1650, 1660, 1670,
+ and 1680.
+
+ The following BIOS signature signatures are for boards which do *NOT*
+ work with this driver (these TMC-8xx and TMC-9xx boards may work with the
+ Seagate driver):
+
+ FUTURE DOMAIN CORP. (C) 1986-1988 V4.0I 03/16/88
+ FUTURE DOMAIN CORP. (C) 1986-1989 V5.0C2/14/89
+ FUTURE DOMAIN CORP. (C) 1986-1989 V6.0A7/28/89
+ FUTURE DOMAIN CORP. (C) 1986-1990 V6.0105/31/90
+ FUTURE DOMAIN CORP. (C) 1986-1990 V6.0209/18/90
+ FUTURE DOMAIN CORP. (C) 1986-1990 V7.009/18/90
+ FUTURE DOMAIN CORP. (C) 1992 V8.00.004/02/92
+
+*/
+
+struct signature {
+ const char *signature;
+ int sig_offset;
+ int sig_length;
+ int major_bios_version;
+ int minor_bios_version;
+ int flag; /* 1 == PCI_bus, 2 == ISA_200S, 3 == ISA_250MG, 4 == ISA_200S */
+} signatures[] = {
+ /* 1 2 3 4 5 6 */
+ /* 123456789012345678901234567890123456789012345678901234567890 */
+ { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 5, 50, 2, 0, 0 },
+ { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V1.07/28/89", 5, 50, 2, 0, 0 },
+ { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 72, 50, 2, 0, 2 },
+ { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.0", 73, 43, 2, 0, 3 },
+ { "FUTURE DOMAIN CORP. (C) 1991 1800-V2.0.", 72, 39, 2, 0, 4 },
+ { "FUTURE DOMAIN CORP. (C) 1992 V3.00.004/02/92", 5, 44, 3, 0, 0 },
+ { "FUTURE DOMAIN TMC-18XX (C) 1993 V3.203/12/93", 5, 44, 3, 2, 0 },
+ { "IBM F1 P2 BIOS v1.0104/29/93", 5, 28, 3, -1, 0 },
+ { "Future Domain Corp. V1.0008/18/93", 5, 33, 3, 4, 0 },
+ { "Future Domain Corp. V1.0008/18/93", 26, 33, 3, 4, 1 },
+ /* This next signature may not be a 3.5 bios */
+ { "Future Domain Corp. V2.0108/18/93", 5, 33, 3, 5, 0 },
+ { "FUTURE DOMAIN CORP. V3.5008/18/93", 5, 34, 3, 5, 0 },
+ { "FUTURE DOMAIN 18c30/18c50/1800 (C) 1994 V3.5", 5, 44, 3, 5, 0 },
+ { "FUTURE DOMAIN CORP. V3.6008/18/93", 5, 34, 3, 6, 0 },
+ { "FUTURE DOMAIN CORP. V3.6108/18/93", 5, 34, 3, 6, 0 },
+ { "FUTURE DOMAIN TMC-18XX", 5, 22, -1, -1, 0 },
+
+ /* READ NOTICE ABOVE *BEFORE* YOU WASTE YOUR TIME ADDING A SIGNATURE
+ Also, fix the disk geometry code for your signature and send your
+ changes for faith@cs.unc.edu. Above all, do *NOT* change any old
+ signatures!
+
+ Note that the last line will match a "generic" 18XX bios. Because
+ Future Domain has changed the host SCSI ID and/or the location of the
+ geometry information in the on-board RAM area for each of the first
+ three BIOS's, it is still important to enter a fully qualified
+ signature in the table for any new BIOS's (after the host SCSI ID and
+ geometry location are verified). */
+};
+
+#define SIGNATURE_COUNT (sizeof( signatures ) / sizeof( struct signature ))
+
+static void print_banner( struct Scsi_Host *shpnt )
+{
+ if (!shpnt) return; /* This won't ever happen */
+
+ if (bios_major < 0 && bios_minor < 0) {
+ printk( "scsi%d <fdomain>: No BIOS; using scsi id %d\n",
+ shpnt->host_no, shpnt->this_id );
+ } else {
+ printk( "scsi%d <fdomain>: BIOS version ", shpnt->host_no );
+
+ if (bios_major >= 0) printk( "%d.", bios_major );
+ else printk( "?." );
+
+ if (bios_minor >= 0) printk( "%d", bios_minor );
+ else printk( "?." );
+
+ printk( " at 0x%x using scsi id %d\n",
+ (unsigned)bios_base, shpnt->this_id );
+ }
+
+ /* If this driver works for later FD PCI
+ boards, we will have to modify banner
+ for additional PCI cards, but for now if
+ it's PCI it's a TMC-3260 - JTM */
+ printk( "scsi%d <fdomain>: %s chip at 0x%x irq ",
+ shpnt->host_no,
+ chip == tmc1800 ? "TMC-1800"
+ : (chip == tmc18c50 ? "TMC-18C50"
+ : (chip == tmc18c30 ?
+ (PCI_bus ? "TMC-36C70 (PCI bus)" : "TMC-18C30")
+ : "Unknown")),
+ port_base );
+
+ if (interrupt_level) printk( "%d", interrupt_level );
+ else printk( "<none>" );
+
+ printk( "\n" );
+}
+
+void fdomain_setup( char *str, int *ints )
+{
+ if (setup_called++ || ints[0] < 2 || ints[0] > 3) {
+ printk( "fdomain: usage: fdomain=<PORT_BASE>,<IRQ>[,<ADAPTER_ID>]\n" );
+ printk( "fdomain: bad LILO parameters?\n" );
+ }
+
+ port_base = ints[0] >= 1 ? ints[1] : 0;
+ interrupt_level = ints[0] >= 2 ? ints[2] : 0;
+ this_id = ints[0] >= 3 ? ints[3] : 0;
+
+ bios_major = bios_minor = -1; /* Use geometry for BIOS version >= 3.4 */
+}
+
+
+static void do_pause( unsigned amount ) /* Pause for amount*10 milliseconds */
+{
+ unsigned long the_time = jiffies + amount; /* 0.01 seconds per jiffy */
+
+ while (jiffies < the_time);
+}
+
+inline static void fdomain_make_bus_idle( void )
+{
+ outb( 0, SCSI_Cntl_port );
+ outb( 0, SCSI_Mode_Cntl_port );
+ if (chip == tmc18c50 || chip == tmc18c30)
+ outb( 0x21 | PARITY_MASK, TMC_Cntl_port ); /* Clear forced intr. */
+ else
+ outb( 0x01 | PARITY_MASK, TMC_Cntl_port );
+}
+
+static int fdomain_is_valid_port( int port )
+{
+#if DEBUG_DETECT
+ printk( " (%x%x),",
+ inb( port + MSB_ID_Code ), inb( port + LSB_ID_Code ) );
+#endif
+
+ /* The MCA ID is a unique id for each MCA compatible board. We
+ are using ISA boards, but Future Domain provides the MCA ID
+ anyway. We can use this ID to ensure that this is a Future
+ Domain TMC-1660/TMC-1680.
+ */
+
+ if (inb( port + LSB_ID_Code ) != 0xe9) { /* test for 0x6127 id */
+ if (inb( port + LSB_ID_Code ) != 0x27) return 0;
+ if (inb( port + MSB_ID_Code ) != 0x61) return 0;
+ chip = tmc1800;
+ } else { /* test for 0xe960 id */
+ if (inb( port + MSB_ID_Code ) != 0x60) return 0;
+ chip = tmc18c50;
+
+#if 0
+
+ /* Try to toggle 32-bit mode. This only
+ works on an 18c30 chip. (User reports
+ say that this doesn't work at all, so
+ we'll use the other method.) */
+
+ outb( 0x80, port + IO_Control );
+ if ((inb( port + Configuration2 ) & 0x80) == 0x80) {
+ outb( 0x00, port + IO_Control );
+ if ((inb( port + Configuration2 ) & 0x80) == 0x00) {
+ chip = tmc18c30;
+ FIFO_Size = 0x800; /* 2k FIFO */
+ }
+ }
+#else
+
+ /* That should have worked, but appears to
+ have problems. Lets assume it is an
+ 18c30 if the RAM is disabled. */
+
+ if (inb( port + Configuration2 ) & 0x02) {
+ chip = tmc18c30;
+ FIFO_Size = 0x800; /* 2k FIFO */
+ }
+#endif
+ /* If that failed, we are an 18c50. */
+ }
+
+ return 1;
+}
+
+static int fdomain_test_loopback( void )
+{
+ int i;
+ int result;
+
+ for (i = 0; i < 255; i++) {
+ outb( i, port_base + Write_Loopback );
+ result = inb( port_base + Read_Loopback );
+ if (i != result)
+ return 1;
+ }
+ return 0;
+}
+
+/* fdomain_get_irq assumes that we have a valid MCA ID for a
+ TMC-1660/TMC-1680 Future Domain board. Now, check to be sure the
+ bios_base matches these ports. If someone was unlucky enough to have
+ purchased more than one Future Domain board, then they will have to
+ modify this code, as we only detect one board here. [The one with the
+ lowest bios_base.]
+
+ Note that this routine is only used for systems without a PCI BIOS32
+ (e.g., ISA bus). For PCI bus systems, this routine will likely fail
+ unless one of the IRQs listed in the ints array is used by the board.
+ Sometimes it is possible to use the computer's BIOS setup screen to
+ configure a PCI system so that one of these IRQs will be used by the
+ Future Domain card. */
+
+static int fdomain_get_irq( int base )
+{
+ int options = inb( base + Configuration1 );
+
+#if DEBUG_DETECT
+ printk( " Options = %x\n", options );
+#endif
+
+ /* Check for board with lowest bios_base --
+ this isn't valid for the 18c30 or for
+ boards on the PCI bus, so just assume we
+ have the right board. */
+
+ if (chip != tmc18c30
+ && !PCI_bus
+ && addresses[ (options & 0xc0) >> 6 ] != bios_base) return 0;
+
+ return ints[ (options & 0x0e) >> 1 ];
+}
+
+static int fdomain_isa_detect( int *irq, int *iobase )
+{
+ int i;
+ int base;
+ int flag = 0;
+
+ if (bios_major == 2) {
+ /* The TMC-1660/TMC-1680 has a RAM area just after the BIOS ROM.
+ Assuming the ROM is enabled (otherwise we wouldn't have been
+ able to read the ROM signature :-), then the ROM sets up the
+ RAM area with some magic numbers, such as a list of port
+ base addresses and a list of the disk "geometry" reported to
+ DOS (this geometry has nothing to do with physical geometry).
+ */
+
+ switch (Quantum) {
+ case 2: /* ISA_200S */
+ case 3: /* ISA_250MG */
+ base = *((char *)bios_base + 0x1fa2)
+ + (*((char *)bios_base + 0x1fa3) << 8);
+ break;
+ case 4: /* ISA_200S (another one) */
+ base = *((char *)bios_base + 0x1fa3)
+ + (*((char *)bios_base + 0x1fa4) << 8);
+ break;
+ default:
+ base = *((char *)bios_base + 0x1fcc)
+ + (*((char *)bios_base + 0x1fcd) << 8);
+ break;
+ }
+
+#if DEBUG_DETECT
+ printk( " %x,", base );
+#endif
+
+ for (flag = 0, i = 0; !flag && i < PORT_COUNT; i++) {
+ if (base == ports[i])
+ ++flag;
+ }
+
+ if (flag && fdomain_is_valid_port( base )) {
+ *irq = fdomain_get_irq( base );
+ *iobase = base;
+ return 1;
+ }
+
+ /* This is a bad sign. It usually means that someone patched the
+ BIOS signature list (the signatures variable) to contain a BIOS
+ signature for a board *OTHER THAN* the TMC-1660/TMC-1680. */
+
+#if DEBUG_DETECT
+ printk( " RAM FAILED, " );
+#endif
+ }
+
+ /* Anyway, the alternative to finding the address in the RAM is to just
+ search through every possible port address for one that is attached
+ to the Future Domain card. Don't panic, though, about reading all
+ these random port addresses -- there are rumors that the Future
+ Domain BIOS does something very similar.
+
+ Do not, however, check ports which the kernel knows are being used by
+ another driver. */
+
+ for (i = 0; i < PORT_COUNT; i++) {
+ base = ports[i];
+ if (check_region( base, 0x10 )) {
+#if DEBUG_DETECT
+ printk( " (%x inuse),", base );
+#endif
+ continue;
+ }
+#if DEBUG_DETECT
+ printk( " %x,", base );
+#endif
+ if ((flag = fdomain_is_valid_port( base ))) break;
+ }
+
+ if (!flag) return 0; /* iobase not found */
+
+ *irq = fdomain_get_irq( base );
+ *iobase = base;
+
+ return 1; /* success */
+}
+
+static int fdomain_pci_nobios_detect( int *irq, int *iobase )
+{
+ int i;
+ int flag = 0;
+
+ /* The proper way of doing this is to use ask the PCI bus for the device
+ IRQ and interrupt level. But we can't do that if PCI BIOS32 support
+ isn't compiled into the kernel, or if a PCI BIOS32 isn't present.
+
+ Instead, we scan down a bunch of addresses (Future Domain tech
+ support says we will probably find the address before we get to
+ 0xf800). This works fine on some systems -- other systems may have
+ to scan more addresses. If you have to modify this section for your
+ installation, please send mail to faith@cs.unc.edu. */
+
+ for (i = 0xfff8; i > 0xe000; i -= 8) {
+ if (check_region( i, 0x10 )) {
+#if DEBUG_DETECT
+ printk( " (%x inuse)," , i );
+#endif
+ continue;
+ }
+ if ((flag = fdomain_is_valid_port( i ))) break;
+ }
+
+ if (!flag) return 0; /* iobase not found */
+
+ *irq = fdomain_get_irq( i );
+ *iobase = i;
+
+ return 1; /* success */
+}
+
+/* PCI detection function: int fdomain_pci_bios_detect(int* irq, int*
+ iobase) This function gets the Interrupt Level and I/O base address from
+ the PCI configuration registers. The I/O base address is masked with
+ 0xfff8 since on my card the address read from the PCI config registers
+ is off by one from the actual I/O base address necessary for accessing
+ the status and control registers on the card (PCI config register gives
+ 0xf801, actual address is 0xf800). This is likely a bug in the FD
+ config code that writes to the PCI registers, however using a mask
+ should be safe since I think the scan done by the card to determine the
+ I/O base is done in increments of 8 (i.e., 0xf800, 0xf808, ...), at
+ least the old scan code we used to use to get the I/O base did... Also,
+ the device ID from the PCI config registers is 0x0 and should be 0x60e9
+ as it is in the status registers (offset 5 from I/O base). If this is
+ changed in future hardware/BIOS changes it will need to be fixed in this
+ detection function. Comments, bug reports, etc... on this function
+ should be sent to mckinley@msupa.pa.msu.edu - James T. McKinley. */
+
+#ifdef CONFIG_PCI
+static int fdomain_pci_bios_detect( int *irq, int *iobase )
+{
+ int error;
+ unsigned char pci_bus, pci_dev_fn; /* PCI bus & device function */
+ unsigned char pci_irq; /* PCI interrupt line */
+ unsigned int pci_base; /* PCI I/O base address */
+ unsigned short pci_vendor, pci_device; /* PCI vendor & device IDs */
+
+ /* If the PCI BIOS doesn't exist, use the old-style detection routines.
+ Otherwise, get the I/O base address and interrupt from the PCI config
+ registers. */
+
+ if (!pcibios_present()) return fdomain_pci_nobios_detect( irq, iobase );
+
+#if DEBUG_DETECT
+ /* Tell how to print a list of the known PCI devices from bios32 and
+ list vendor and device IDs being used if in debug mode. */
+
+ printk( "\nINFO: cat /proc/pci to see list of PCI devices from bios32\n" );
+ printk( "\nTMC-3260 detect:"
+ " Using PCI Vendor ID: 0x%x, PCI Device ID: 0x%x\n",
+ PCI_VENDOR_ID_FD,
+ PCI_DEVICE_ID_FD_36C70 );
+#endif
+
+ /* We will have to change this if more than 1 PCI bus is present and the
+ FD scsi host is not on the first bus (i.e., a PCI to PCI bridge,
+ which is not supported by bios32 right now anyway). This should
+ probably be done by a call to pcibios_find_device but I can't get it
+ to work... Also the device ID reported from the PCI config registers
+ does not match the device ID quoted in the tech manual or available
+ from offset 5 from the I/O base address. It should be 0x60E9, but it
+ is 0x0 if read from the PCI config registers. I guess the FD folks
+ neglected to write it to the PCI registers... This loop is necessary
+ to get the device function (at least until someone can get
+ pcibios_find_device to work, I cannot but 53c7,8xx.c uses it...). */
+
+ pci_bus = 0;
+
+ for (pci_dev_fn = 0x0; pci_dev_fn < 0xff; pci_dev_fn++) {
+ pcibios_read_config_word( pci_bus,
+ pci_dev_fn,
+ PCI_VENDOR_ID,
+ &pci_vendor );
+
+ if (pci_vendor == PCI_VENDOR_ID_FD) {
+ pcibios_read_config_word( pci_bus,
+ pci_dev_fn,
+ PCI_DEVICE_ID,
+ &pci_device );
+
+ if (pci_device == PCI_DEVICE_ID_FD_36C70) {
+ /* Break out once we have the correct device. If other FD
+ PCI devices are added to this driver we will need to add
+ an or of the other PCI_DEVICE_ID_FD_XXXXX's here. */
+ break;
+ } else {
+ /* If we can't find an FD scsi card we give up. */
+ return 0;
+ }
+ }
+ }
+
+#if DEBUG_DETECT
+ printk( "Future Domain 36C70 : at PCI bus %u, device %u, function %u\n",
+ pci_bus,
+ (pci_dev_fn & 0xf8) >> 3,
+ pci_dev_fn & 7 );
+#endif
+
+ /* We now have the appropriate device function for the FD board so we
+ just read the PCI config info from the registers. */
+
+ if ((error = pcibios_read_config_dword( pci_bus,
+ pci_dev_fn,
+ PCI_BASE_ADDRESS_0,
+ &pci_base ))
+ || (error = pcibios_read_config_byte( pci_bus,
+ pci_dev_fn,
+ PCI_INTERRUPT_LINE,
+ &pci_irq ))) {
+ printk ( "PCI ERROR: Future Domain 36C70 not initializing"
+ " due to error reading configuration space\n" );
+ return 0;
+ } else {
+#if DEBUG_DETECT
+ printk( "TMC-3260 PCI: IRQ = %u, I/O base = 0x%lx\n",
+ pci_irq, pci_base );
+#endif
+
+ /* Now we have the I/O base address and interrupt from the PCI
+ configuration registers. Unfortunately it seems that the I/O base
+ address is off by one on my card so I mask it with 0xfff8. This
+ must be some kind of goof in the FD code that does the autoconfig
+ and writes to the PCI registers (or maybe I just don't understand
+ something). If they fix it in later versions of the card or BIOS
+ we may have to adjust the address based on the signature or
+ something... */
+
+ *irq = pci_irq;
+ *iobase = (pci_base & 0xfff8);
+
+#if DEBUG_DETECT
+ printk( "TMC-3260 fix: Masking I/O base address with 0xff00.\n" );
+ printk( "TMC-3260: IRQ = %d, I/O base = 0x%x\n", *irq, *iobase );
+#endif
+
+ if (!fdomain_is_valid_port( *iobase )) return 0;
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+int fdomain_16x0_detect( Scsi_Host_Template *tpnt )
+{
+ int i, j;
+ int retcode;
+ struct Scsi_Host *shpnt;
+#if DO_DETECT
+ const int buflen = 255;
+ Scsi_Cmnd SCinit;
+ unsigned char do_inquiry[] = { INQUIRY, 0, 0, 0, buflen, 0 };
+ unsigned char do_request_sense[] = { REQUEST_SENSE, 0, 0, 0, buflen, 0 };
+ unsigned char do_read_capacity[] = { READ_CAPACITY,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ unsigned char buf[buflen];
+#endif
+
+#if DEBUG_DETECT
+ printk( "fdomain_16x0_detect()," );
+#endif
+ tpnt->proc_dir = &proc_scsi_fdomain;
+
+ if (setup_called) {
+#if DEBUG_DETECT
+ printk( "no BIOS, using port_base = 0x%x, irq = %d\n",
+ port_base, interrupt_level );
+#endif
+ if (!fdomain_is_valid_port( port_base )) {
+ printk( "fdomain: cannot locate chip at port base 0x%x\n",
+ port_base );
+ printk( "fdomain: bad LILO parameters?\n" );
+ return 0;
+ }
+ } else {
+ int flag = 0;
+
+ for (i = 0; !bios_base && i < ADDRESS_COUNT; i++) {
+#if DEBUG_DETECT
+ printk( " %x(%x),", (unsigned)addresses[i], (unsigned)bios_base );
+#endif
+ for (j = 0; !bios_base && j < SIGNATURE_COUNT; j++) {
+ if (!memcmp( ((char *)addresses[i] + signatures[j].sig_offset),
+ signatures[j].signature, signatures[j].sig_length )) {
+ bios_major = signatures[j].major_bios_version;
+ bios_minor = signatures[j].minor_bios_version;
+ PCI_bus = (signatures[j].flag == 1);
+ Quantum = (signatures[j].flag > 1) ? signatures[j].flag : 0;
+ bios_base = addresses[i];
+ }
+ }
+ }
+
+ if (!bios_base) {
+#if DEBUG_DETECT
+ printk( " FAILED: NO BIOS\n" );
+#endif
+ return 0;
+ }
+
+ if (!PCI_bus) {
+ flag = fdomain_isa_detect( &interrupt_level, &port_base );
+ } else {
+#ifdef CONFIG_PCI
+ flag = fdomain_pci_bios_detect( &interrupt_level, &port_base );
+#else
+ flag = fdomain_pci_nobios_detect( &interrupt_level, &port_base );
+#endif
+ }
+
+ if (!flag) {
+#if DEBUG_DETECT
+ printk( " FAILED: NO PORT\n" );
+#endif
+#ifdef CONFIG_PCI
+ printk( "\nTMC-3260 36C70 PCI scsi chip detection failed.\n" );
+ printk( "Send mail to mckinley@msupa.pa.msu.edu.\n" );
+#endif
+ return 0; /* Cannot find valid set of ports */
+ }
+ }
+
+ SCSI_Mode_Cntl_port = port_base + SCSI_Mode_Cntl;
+ FIFO_Data_Count_port = port_base + FIFO_Data_Count;
+ Interrupt_Cntl_port = port_base + Interrupt_Cntl;
+ Interrupt_Status_port = port_base + Interrupt_Status;
+ Read_FIFO_port = port_base + Read_FIFO;
+ Read_SCSI_Data_port = port_base + Read_SCSI_Data;
+ SCSI_Cntl_port = port_base + SCSI_Cntl;
+ SCSI_Data_NoACK_port = port_base + SCSI_Data_NoACK;
+ SCSI_Status_port = port_base + SCSI_Status;
+ TMC_Cntl_port = port_base + TMC_Cntl;
+ TMC_Status_port = port_base + TMC_Status;
+ Write_FIFO_port = port_base + Write_FIFO;
+ Write_SCSI_Data_port = port_base + Write_SCSI_Data;
+
+ fdomain_16x0_reset( NULL );
+
+ if (fdomain_test_loopback()) {
+#if DEBUG_DETECT
+ printk( "fdomain: LOOPBACK TEST FAILED, FAILING DETECT!\n" );
+#endif
+ if (setup_called) {
+ printk( "fdomain: loopback test failed at port base 0x%x\n",
+ port_base );
+ printk( "fdomain: bad LILO parameters?\n" );
+ }
+ return 0;
+ }
+
+ if (this_id) {
+ tpnt->this_id = (this_id & 0x07);
+ adapter_mask = (1 << tpnt->this_id);
+ } else {
+ if ((bios_major == 3 && bios_minor >= 2) || bios_major < 0) {
+ tpnt->this_id = 7;
+ adapter_mask = 0x80;
+ } else {
+ tpnt->this_id = 6;
+ adapter_mask = 0x40;
+ }
+ }
+
+ /* Print out a banner here in case we can't
+ get resources. */
+
+ shpnt = scsi_register( tpnt, 0 );
+ print_banner( shpnt );
+
+ /* Log IRQ with kernel */
+ if (!interrupt_level) {
+ panic( "fdomain: *NO* interrupt level selected!\n" );
+ } else {
+ /* Register the IRQ with the kernel */
+
+ retcode = request_irq( interrupt_level,
+ fdomain_16x0_intr, SA_INTERRUPT, "fdomain" );
+
+ if (retcode < 0) {
+ if (retcode == -EINVAL) {
+ printk( "fdomain: IRQ %d is bad!\n", interrupt_level );
+ printk( " This shouldn't happen!\n" );
+ printk( " Send mail to faith@cs.unc.edu\n" );
+ } else if (retcode == -EBUSY) {
+ printk( "fdomain: IRQ %d is already in use!\n", interrupt_level );
+ printk( " Please use another IRQ!\n" );
+ } else {
+ printk( "fdomain: Error getting IRQ %d\n", interrupt_level );
+ printk( " This shouldn't happen!\n" );
+ printk( " Send mail to faith@cs.unc.edu\n" );
+ }
+ panic( "fdomain: Driver requires interruptions\n" );
+ }
+ }
+
+ /* Log I/O ports with kernel */
+ request_region( port_base, 0x10, "fdomain" );
+
+#if DO_DETECT
+
+ /* These routines are here because of the way the SCSI bus behaves after
+ a reset. This appropriate behavior was not handled correctly by the
+ higher level SCSI routines when I first wrote this driver. Now,
+ however, correct scan routines are part of scsi.c and these routines
+ are no longer needed. However, this code is still good for
+ debugging. */
+
+ SCinit.request_buffer = SCinit.buffer = buf;
+ SCinit.request_bufflen = SCinit.bufflen = sizeof(buf)-1;
+ SCinit.use_sg = 0;
+ SCinit.lun = 0;
+
+ printk( "fdomain: detection routine scanning for devices:\n" );
+ for (i = 0; i < 8; i++) {
+ SCinit.target = i;
+ if (i == tpnt->this_id) /* Skip host adapter */
+ continue;
+ memcpy(SCinit.cmnd, do_request_sense, sizeof(do_request_sense));
+ retcode = fdomain_16x0_command(&SCinit);
+ if (!retcode) {
+ memcpy(SCinit.cmnd, do_inquiry, sizeof(do_inquiry));
+ retcode = fdomain_16x0_command(&SCinit);
+ if (!retcode) {
+ printk( " SCSI ID %d: ", i );
+ for (j = 8; j < (buf[4] < 32 ? buf[4] : 32); j++)
+ printk( "%c", buf[j] >= 20 ? buf[j] : ' ' );
+ memcpy(SCinit.cmnd, do_read_capacity, sizeof(do_read_capacity));
+ retcode = fdomain_16x0_command(&SCinit);
+ if (!retcode) {
+ unsigned long blocks, size, capacity;
+
+ blocks = (buf[0] << 24) | (buf[1] << 16)
+ | (buf[2] << 8) | buf[3];
+ size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
+ capacity = +( +(blocks / 1024L) * +(size * 10L)) / 1024L;
+
+ printk( "%lu MB (%lu byte blocks)",
+ ((capacity + 5L) / 10L), size );
+ } else {
+ memcpy(SCinit.cmnd, do_request_sense, sizeof(do_request_sense));
+ retcode = fdomain_16x0_command(&SCinit);
+ }
+ printk ("\n" );
+ } else {
+ memcpy(SCinit.cmnd, do_request_sense, sizeof(do_request_sense));
+ retcode = fdomain_16x0_command(&SCinit);
+ }
+ }
+ }
+#endif
+
+ return 1; /* Maximum of one adapter will be detected. */
+}
+
+const char *fdomain_16x0_info( struct Scsi_Host *ignore )
+{
+ static char buffer[80];
+ char *pt;
+
+ strcpy( buffer, "Future Domain TMC-16x0 SCSI driver, version" );
+ if (strchr( VERSION, ':')) { /* Assume VERSION is an RCS Revision string */
+ strcat( buffer, strchr( VERSION, ':' ) + 1 );
+ pt = strrchr( buffer, '$') - 1;
+ if (!pt) /* Stripped RCS Revision string? */
+ pt = buffer + strlen( buffer ) - 1;
+ if (*pt != ' ')
+ ++pt;
+ *pt = '\0';
+ } else { /* Assume VERSION is a number */
+ strcat( buffer, " " VERSION );
+ }
+
+ return buffer;
+}
+
+ /* First pass at /proc information routine. */
+/*
+ * inout : decides on the direction of the dataflow and the meaning of the
+ * variables
+ * buffer: If inout==FALSE data is beeing written to it else read from it
+ * *start: If inout==FALSE start of the valid data in the buffer
+ * offset: If inout==FALSE offset from the beginning of the imaginary file
+ * from which we start writing into the buffer
+ * length: If inout==FALSE max number of bytes to be written into the buffer
+ * else number of bytes in the buffer
+ */
+int fdomain_16x0_proc_info( char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout )
+{
+ const char *info = fdomain_16x0_info( NULL );
+ int len;
+ int pos;
+ int begin;
+
+ if (inout) return(-ENOSYS);
+
+ begin = 0;
+ strcpy( buffer, info );
+ strcat( buffer, "\n" );
+
+ pos = len = strlen( buffer );
+
+ if(pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+
+ *start = buffer + (offset - begin); /* Start of wanted data */
+ len -= (offset - begin);
+ if(len > length) len = length;
+
+ return(len);
+}
+
+#if 0
+static int fdomain_arbitrate( void )
+{
+ int status = 0;
+ unsigned long timeout;
+
+#if EVERY_ACCESS
+ printk( "fdomain_arbitrate()\n" );
+#endif
+
+ outb( 0x00, SCSI_Cntl_port ); /* Disable data drivers */
+ outb( adapter_mask, port_base + SCSI_Data_NoACK ); /* Set our id bit */
+ outb( 0x04 | PARITY_MASK, TMC_Cntl_port ); /* Start arbitration */
+
+ timeout = jiffies + 50; /* 500 mS */
+ while (jiffies < timeout) {
+ status = inb( TMC_Status_port ); /* Read adapter status */
+ if (status & 0x02) /* Arbitration complete */
+ return 0;
+ }
+
+ /* Make bus idle */
+ fdomain_make_bus_idle();
+
+#if EVERY_ACCESS
+ printk( "Arbitration failed, status = %x\n", status );
+#endif
+#if ERRORS_ONLY
+ printk( "fdomain: Arbitration failed, status = %x\n", status );
+#endif
+ return 1;
+}
+#endif
+
+static int fdomain_select( int target )
+{
+ int status;
+ unsigned long timeout;
+ static int flag = 0;
+
+
+ outb( 0x82, SCSI_Cntl_port ); /* Bus Enable + Select */
+ outb( adapter_mask | (1 << target), SCSI_Data_NoACK_port );
+
+ /* Stop arbitration and enable parity */
+ outb( PARITY_MASK, TMC_Cntl_port );
+
+ timeout = jiffies + 35; /* 350mS -- because of timeouts
+ (was 250mS) */
+
+ while (jiffies < timeout) {
+ status = inb( SCSI_Status_port ); /* Read adapter status */
+ if (status & 1) { /* Busy asserted */
+ /* Enable SCSI Bus (on error, should make bus idle with 0) */
+ outb( 0x80, SCSI_Cntl_port );
+ return 0;
+ }
+ }
+ /* Make bus idle */
+ fdomain_make_bus_idle();
+#if EVERY_ACCESS
+ if (!target) printk( "Selection failed\n" );
+#endif
+#if ERRORS_ONLY
+ if (!target) {
+ if (chip == tmc18c30 && !flag) /* Skip first failure for 18C30 chips. */
+ ++flag;
+ else
+ printk( "fdomain: Selection failed\n" );
+ }
+#endif
+ return 1;
+}
+
+void my_done( int error )
+{
+ if (in_command) {
+ in_command = 0;
+ outb( 0x00, Interrupt_Cntl_port );
+ fdomain_make_bus_idle();
+ current_SC->result = error;
+ if (current_SC->scsi_done)
+ current_SC->scsi_done( current_SC );
+ else panic( "fdomain: current_SC->scsi_done() == NULL" );
+ } else {
+ panic( "fdomain: my_done() called outside of command\n" );
+ }
+#if DEBUG_RACE
+ in_interrupt_flag = 0;
+#endif
+}
+
+void fdomain_16x0_intr( int irq, struct pt_regs * regs )
+{
+ int status;
+ int done = 0;
+ unsigned data_count;
+
+ /* The fdomain_16x0_intr is only called via
+ the interrupt handler. The goal of the
+ sti() here is to allow other
+ interruptions while this routine is
+ running. */
+
+ sti(); /* Yes, we really want sti() here */
+
+ outb( 0x00, Interrupt_Cntl_port );
+
+ /* We usually have one spurious interrupt after each command. Ignore it. */
+ if (!in_command || !current_SC) { /* Spurious interrupt */
+#if EVERY_ACCESS
+ printk( "Spurious interrupt, in_command = %d, current_SC = %x\n",
+ in_command, current_SC );
+#endif
+ return;
+ }
+
+ /* Abort calls my_done, so we do nothing here. */
+ if (current_SC->SCp.phase & aborted) {
+#if DEBUG_ABORT
+ printk( "Interrupt after abort, ignoring\n" );
+#endif
+ /*
+ return; */
+ }
+
+#if DEBUG_RACE
+ ++in_interrupt_flag;
+#endif
+
+ if (current_SC->SCp.phase & in_arbitration) {
+ status = inb( TMC_Status_port ); /* Read adapter status */
+ if (!(status & 0x02)) {
+#if EVERY_ACCESS
+ printk( " AFAIL " );
+#endif
+ my_done( DID_BUS_BUSY << 16 );
+ return;
+ }
+ current_SC->SCp.phase = in_selection;
+
+ outb( 0x40 | FIFO_COUNT, Interrupt_Cntl_port );
+
+ outb( 0x82, SCSI_Cntl_port ); /* Bus Enable + Select */
+ outb( adapter_mask | (1 << current_SC->target), SCSI_Data_NoACK_port );
+
+ /* Stop arbitration and enable parity */
+ outb( 0x10 | PARITY_MASK, TMC_Cntl_port );
+#if DEBUG_RACE
+ in_interrupt_flag = 0;
+#endif
+ return;
+ } else if (current_SC->SCp.phase & in_selection) {
+ status = inb( SCSI_Status_port );
+ if (!(status & 0x01)) {
+ /* Try again, for slow devices */
+ if (fdomain_select( current_SC->target )) {
+#if EVERY_ACCESS
+ printk( " SFAIL " );
+#endif
+ my_done( DID_NO_CONNECT << 16 );
+ return;
+ } else {
+#if EVERY_ACCESS
+ printk( " AltSel " );
+#endif
+ /* Stop arbitration and enable parity */
+ outb( 0x10 | PARITY_MASK, TMC_Cntl_port );
+ }
+ }
+ current_SC->SCp.phase = in_other;
+ outb( 0x90 | FIFO_COUNT, Interrupt_Cntl_port );
+ outb( 0x80, SCSI_Cntl_port );
+#if DEBUG_RACE
+ in_interrupt_flag = 0;
+#endif
+ return;
+ }
+
+ /* current_SC->SCp.phase == in_other: this is the body of the routine */
+
+ status = inb( SCSI_Status_port );
+
+ if (status & 0x10) { /* REQ */
+
+ switch (status & 0x0e) {
+
+ case 0x08: /* COMMAND OUT */
+ outb( current_SC->cmnd[current_SC->SCp.sent_command++],
+ Write_SCSI_Data_port );
+#if EVERY_ACCESS
+ printk( "CMD = %x,",
+ current_SC->cmnd[ current_SC->SCp.sent_command - 1] );
+#endif
+ break;
+ case 0x00: /* DATA OUT -- tmc18c50/tmc18c30 only */
+ if (chip != tmc1800 && !current_SC->SCp.have_data_in) {
+ current_SC->SCp.have_data_in = -1;
+ outb( 0xd0 | PARITY_MASK, TMC_Cntl_port );
+ }
+ break;
+ case 0x04: /* DATA IN -- tmc18c50/tmc18c30 only */
+ if (chip != tmc1800 && !current_SC->SCp.have_data_in) {
+ current_SC->SCp.have_data_in = 1;
+ outb( 0x90 | PARITY_MASK, TMC_Cntl_port );
+ }
+ break;
+ case 0x0c: /* STATUS IN */
+ current_SC->SCp.Status = inb( Read_SCSI_Data_port );
+#if EVERY_ACCESS
+ printk( "Status = %x, ", current_SC->SCp.Status );
+#endif
+#if ERRORS_ONLY
+ if (current_SC->SCp.Status && current_SC->SCp.Status != 2) {
+ printk( "fdomain: target = %d, command = %x, status = %x\n",
+ current_SC->target,
+ current_SC->cmnd[0],
+ current_SC->SCp.Status );
+ }
+#endif
+ break;
+ case 0x0a: /* MESSAGE OUT */
+ outb( MESSAGE_REJECT, Write_SCSI_Data_port ); /* Reject */
+ break;
+ case 0x0e: /* MESSAGE IN */
+ current_SC->SCp.Message = inb( Read_SCSI_Data_port );
+#if EVERY_ACCESS
+ printk( "Message = %x, ", current_SC->SCp.Message );
+#endif
+ if (!current_SC->SCp.Message) ++done;
+#if DEBUG_MESSAGES || EVERY_ACCESS
+ if (current_SC->SCp.Message) {
+ printk( "fdomain: message = %x\n", current_SC->SCp.Message );
+ }
+#endif
+ break;
+ }
+ }
+
+ if (chip == tmc1800
+ && !current_SC->SCp.have_data_in
+ && (current_SC->SCp.sent_command
+ >= current_SC->cmd_len)) {
+ /* We have to get the FIFO direction
+ correct, so I've made a table based
+ on the SCSI Standard of which commands
+ appear to require a DATA OUT phase.
+ */
+ /*
+ p. 94: Command for all device types
+ CHANGE DEFINITION 40 DATA OUT
+ COMPARE 39 DATA OUT
+ COPY 18 DATA OUT
+ COPY AND VERIFY 3a DATA OUT
+ INQUIRY 12
+ LOG SELECT 4c DATA OUT
+ LOG SENSE 4d
+ MODE SELECT (6) 15 DATA OUT
+ MODE SELECT (10) 55 DATA OUT
+ MODE SENSE (6) 1a
+ MODE SENSE (10) 5a
+ READ BUFFER 3c
+ RECEIVE DIAGNOSTIC RESULTS 1c
+ REQUEST SENSE 03
+ SEND DIAGNOSTIC 1d DATA OUT
+ TEST UNIT READY 00
+ WRITE BUFFER 3b DATA OUT
+
+ p.178: Commands for direct-access devices (not listed on p. 94)
+ FORMAT UNIT 04 DATA OUT
+ LOCK-UNLOCK CACHE 36
+ PRE-FETCH 34
+ PREVENT-ALLOW MEDIUM REMOVAL 1e
+ READ (6)/RECEIVE 08
+ READ (10) 3c
+ READ CAPACITY 25
+ READ DEFECT DATA (10) 37
+ READ LONG 3e
+ REASSIGN BLOCKS 07 DATA OUT
+ RELEASE 17
+ RESERVE 16 DATA OUT
+ REZERO UNIT/REWIND 01
+ SEARCH DATA EQUAL (10) 31 DATA OUT
+ SEARCH DATA HIGH (10) 30 DATA OUT
+ SEARCH DATA LOW (10) 32 DATA OUT
+ SEEK (6) 0b
+ SEEK (10) 2b
+ SET LIMITS (10) 33
+ START STOP UNIT 1b
+ SYNCHRONIZE CACHE 35
+ VERIFY (10) 2f
+ WRITE (6)/PRINT/SEND 0a DATA OUT
+ WRITE (10)/SEND 2a DATA OUT
+ WRITE AND VERIFY (10) 2e DATA OUT
+ WRITE LONG 3f DATA OUT
+ WRITE SAME 41 DATA OUT ?
+
+ p. 261: Commands for sequential-access devices (not previously listed)
+ ERASE 19
+ LOAD UNLOAD 1b
+ LOCATE 2b
+ READ BLOCK LIMITS 05
+ READ POSITION 34
+ READ REVERSE 0f
+ RECOVER BUFFERED DATA 14
+ SPACE 11
+ WRITE FILEMARKS 10 ?
+
+ p. 298: Commands for printer devices (not previously listed)
+ ****** NOT SUPPORTED BY THIS DRIVER, since 0b is SEEK (6) *****
+ SLEW AND PRINT 0b DATA OUT -- same as seek
+ STOP PRINT 1b
+ SYNCHRONIZE BUFFER 10
+
+ p. 315: Commands for processor devices (not previously listed)
+
+ p. 321: Commands for write-once devices (not previously listed)
+ MEDIUM SCAN 38
+ READ (12) a8
+ SEARCH DATA EQUAL (12) b1 DATA OUT
+ SEARCH DATA HIGH (12) b0 DATA OUT
+ SEARCH DATA LOW (12) b2 DATA OUT
+ SET LIMITS (12) b3
+ VERIFY (12) af
+ WRITE (12) aa DATA OUT
+ WRITE AND VERIFY (12) ae DATA OUT
+
+ p. 332: Commands for CD-ROM devices (not previously listed)
+ PAUSE/RESUME 4b
+ PLAY AUDIO (10) 45
+ PLAY AUDIO (12) a5
+ PLAY AUDIO MSF 47
+ PLAY TRACK RELATIVE (10) 49
+ PLAY TRACK RELATIVE (12) a9
+ READ HEADER 44
+ READ SUB-CHANNEL 42
+ READ TOC 43
+
+ p. 370: Commands for scanner devices (not previously listed)
+ GET DATA BUFFER STATUS 34
+ GET WINDOW 25
+ OBJECT POSITION 31
+ SCAN 1b
+ SET WINDOW 24 DATA OUT
+
+ p. 391: Commands for optical memory devices (not listed)
+ ERASE (10) 2c
+ ERASE (12) ac
+ MEDIUM SCAN 38 DATA OUT
+ READ DEFECT DATA (12) b7
+ READ GENERATION 29
+ READ UPDATED BLOCK 2d
+ UPDATE BLOCK 3d DATA OUT
+
+ p. 419: Commands for medium changer devices (not listed)
+ EXCHANGE MEDIUM 46
+ INITIALIZE ELEMENT STATUS 07
+ MOVE MEDIUM a5
+ POSITION TO ELEMENT 2b
+ READ ELEMENT STATUS b8
+ REQUEST VOL. ELEMENT ADDRESS b5
+ SEND VOLUME TAG b6 DATA OUT
+
+ p. 454: Commands for communications devices (not listed previously)
+ GET MESSAGE (6) 08
+ GET MESSAGE (10) 28
+ GET MESSAGE (12) a8
+ */
+
+ switch (current_SC->cmnd[0]) {
+ case CHANGE_DEFINITION: case COMPARE: case COPY:
+ case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT:
+ case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER:
+
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
+ case WRITE_6: case WRITE_10: case WRITE_VERIFY:
+ case 0x3f: case 0x41:
+
+ case 0xb1: case 0xb0: case 0xb2:
+ case 0xaa: case 0xae:
+
+ case 0x24:
+
+ case 0x38: case 0x3d:
+
+ case 0xb6:
+
+ case 0xea: /* alternate number for WRITE LONG */
+
+ current_SC->SCp.have_data_in = -1;
+ outb( 0xd0 | PARITY_MASK, TMC_Cntl_port );
+ break;
+
+ case 0x00:
+ default:
+
+ current_SC->SCp.have_data_in = 1;
+ outb( 0x90 | PARITY_MASK, TMC_Cntl_port );
+ break;
+ }
+ }
+
+ if (current_SC->SCp.have_data_in == -1) { /* DATA OUT */
+ while ( (data_count = FIFO_Size - inw( FIFO_Data_Count_port )) > 512 ) {
+#if EVERY_ACCESS
+ printk( "DC=%d, ", data_count ) ;
+#endif
+ if (data_count > current_SC->SCp.this_residual)
+ data_count = current_SC->SCp.this_residual;
+ if (data_count > 0) {
+#if EVERY_ACCESS
+ printk( "%d OUT, ", data_count );
+#endif
+ if (data_count == 1) {
+ outb( *current_SC->SCp.ptr++, Write_FIFO_port );
+ --current_SC->SCp.this_residual;
+ } else {
+ data_count >>= 1;
+ outsw( Write_FIFO_port, current_SC->SCp.ptr, data_count );
+ current_SC->SCp.ptr += 2 * data_count;
+ current_SC->SCp.this_residual -= 2 * data_count;
+ }
+ }
+ if (!current_SC->SCp.this_residual) {
+ if (current_SC->SCp.buffers_residual) {
+ --current_SC->SCp.buffers_residual;
+ ++current_SC->SCp.buffer;
+ current_SC->SCp.ptr = current_SC->SCp.buffer->address;
+ current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
+ } else
+ break;
+ }
+ }
+ }
+
+ if (current_SC->SCp.have_data_in == 1) { /* DATA IN */
+ while ((data_count = inw( FIFO_Data_Count_port )) > 0) {
+#if EVERY_ACCESS
+ printk( "DC=%d, ", data_count );
+#endif
+ if (data_count > current_SC->SCp.this_residual)
+ data_count = current_SC->SCp.this_residual;
+ if (data_count) {
+#if EVERY_ACCESS
+ printk( "%d IN, ", data_count );
+#endif
+ if (data_count == 1) {
+ *current_SC->SCp.ptr++ = inb( Read_FIFO_port );
+ --current_SC->SCp.this_residual;
+ } else {
+ data_count >>= 1; /* Number of words */
+ insw( Read_FIFO_port, current_SC->SCp.ptr, data_count );
+ current_SC->SCp.ptr += 2 * data_count;
+ current_SC->SCp.this_residual -= 2 * data_count;
+ }
+ }
+ if (!current_SC->SCp.this_residual
+ && current_SC->SCp.buffers_residual) {
+ --current_SC->SCp.buffers_residual;
+ ++current_SC->SCp.buffer;
+ current_SC->SCp.ptr = current_SC->SCp.buffer->address;
+ current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
+ }
+ }
+ }
+
+ if (done) {
+#if EVERY_ACCESS
+ printk( " ** IN DONE %d ** ", current_SC->SCp.have_data_in );
+#endif
+
+#if ERRORS_ONLY
+ if (current_SC->cmnd[0] == REQUEST_SENSE && !current_SC->SCp.Status) {
+ if ((unsigned char)(*((char *)current_SC->request_buffer+2)) & 0x0f) {
+ unsigned char key;
+ unsigned char code;
+ unsigned char qualifier;
+
+ key = (unsigned char)(*((char *)current_SC->request_buffer + 2))
+ & 0x0f;
+ code = (unsigned char)(*((char *)current_SC->request_buffer + 12));
+ qualifier = (unsigned char)(*((char *)current_SC->request_buffer
+ + 13));
+
+ if (!(key == UNIT_ATTENTION && (code == 0x29 || !code))
+ && !(key == NOT_READY
+ && code == 0x04
+ && (!qualifier || qualifier == 0x02 || qualifier == 0x01))
+ && !(key == ILLEGAL_REQUEST && (code == 0x25
+ || code == 0x24
+ || !code)))
+
+ printk( "fdomain: REQUEST SENSE "
+ "Key = %x, Code = %x, Qualifier = %x\n",
+ key, code, qualifier );
+ }
+ }
+#endif
+#if EVERY_ACCESS
+ printk( "BEFORE MY_DONE. . ." );
+#endif
+ my_done( (current_SC->SCp.Status & 0xff)
+ | ((current_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16) );
+#if EVERY_ACCESS
+ printk( "RETURNING.\n" );
+#endif
+
+ } else {
+ if (current_SC->SCp.phase & disconnect) {
+ outb( 0xd0 | FIFO_COUNT, Interrupt_Cntl_port );
+ outb( 0x00, SCSI_Cntl_port );
+ } else {
+ outb( 0x90 | FIFO_COUNT, Interrupt_Cntl_port );
+ }
+ }
+#if DEBUG_RACE
+ in_interrupt_flag = 0;
+#endif
+ return;
+}
+
+int fdomain_16x0_queue( Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ if (in_command) {
+ panic( "fdomain: fdomain_16x0_queue() NOT REENTRANT!\n" );
+ }
+#if EVERY_ACCESS
+ printk( "queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n",
+ SCpnt->target,
+ *(unsigned char *)SCpnt->cmnd,
+ SCpnt->use_sg,
+ SCpnt->request_bufflen );
+#endif
+
+ fdomain_make_bus_idle();
+
+ current_SC = SCpnt; /* Save this for the done function */
+ current_SC->scsi_done = done;
+
+ /* Initialize static data */
+
+ if (current_SC->use_sg) {
+ current_SC->SCp.buffer =
+ (struct scatterlist *)current_SC->request_buffer;
+ current_SC->SCp.ptr = current_SC->SCp.buffer->address;
+ current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
+ current_SC->SCp.buffers_residual = current_SC->use_sg - 1;
+ } else {
+ current_SC->SCp.ptr = (char *)current_SC->request_buffer;
+ current_SC->SCp.this_residual = current_SC->request_bufflen;
+ current_SC->SCp.buffer = NULL;
+ current_SC->SCp.buffers_residual = 0;
+ }
+
+
+ current_SC->SCp.Status = 0;
+ current_SC->SCp.Message = 0;
+ current_SC->SCp.have_data_in = 0;
+ current_SC->SCp.sent_command = 0;
+ current_SC->SCp.phase = in_arbitration;
+
+ /* Start arbitration */
+ outb( 0x00, Interrupt_Cntl_port );
+ outb( 0x00, SCSI_Cntl_port ); /* Disable data drivers */
+ outb( adapter_mask, SCSI_Data_NoACK_port ); /* Set our id bit */
+ ++in_command;
+ outb( 0x20, Interrupt_Cntl_port );
+ outb( 0x14 | PARITY_MASK, TMC_Cntl_port ); /* Start arbitration */
+
+ return 0;
+}
+
+/* The following code, which simulates the old-style command function, was
+ taken from Tommy Thorn's aha1542.c file. This code is Copyright (C)
+ 1992 Tommy Thorn. */
+
+static volatile int internal_done_flag = 0;
+static volatile int internal_done_errcode = 0;
+
+static void internal_done( Scsi_Cmnd *SCpnt )
+{
+ internal_done_errcode = SCpnt->result;
+ ++internal_done_flag;
+}
+
+int fdomain_16x0_command( Scsi_Cmnd *SCpnt )
+{
+ fdomain_16x0_queue( SCpnt, internal_done );
+
+ while (!internal_done_flag)
+ ;
+ internal_done_flag = 0;
+ return internal_done_errcode;
+}
+
+/* End of code derived from Tommy Thorn's work. */
+
+void print_info( Scsi_Cmnd *SCpnt )
+{
+ unsigned int imr;
+ unsigned int irr;
+ unsigned int isr;
+
+ if (!SCpnt || !SCpnt->host) {
+ printk( "fdomain: cannot provide detailed information\n" );
+ }
+
+ printk( "%s\n", fdomain_16x0_info( SCpnt->host ) );
+ print_banner( SCpnt->host );
+ switch (SCpnt->SCp.phase) {
+ case in_arbitration: printk( "arbitration " ); break;
+ case in_selection: printk( "selection " ); break;
+ case in_other: printk( "other " ); break;
+ default: printk( "unknown " ); break;
+ }
+
+ printk( "(%d), target = %d cmnd = 0x%02x pieces = %d size = %u\n",
+ SCpnt->SCp.phase,
+ SCpnt->target,
+ *(unsigned char *)SCpnt->cmnd,
+ SCpnt->use_sg,
+ SCpnt->request_bufflen );
+ printk( "sent_command = %d, have_data_in = %d, timeout = %d\n",
+ SCpnt->SCp.sent_command,
+ SCpnt->SCp.have_data_in,
+ SCpnt->timeout );
+#if DEBUG_RACE
+ printk( "in_interrupt_flag = %d\n", in_interrupt_flag );
+#endif
+
+ imr = (inb( 0x0a1 ) << 8) + inb( 0x21 );
+ outb( 0x0a, 0xa0 );
+ irr = inb( 0xa0 ) << 8;
+ outb( 0x0a, 0x20 );
+ irr += inb( 0x20 );
+ outb( 0x0b, 0xa0 );
+ isr = inb( 0xa0 ) << 8;
+ outb( 0x0b, 0x20 );
+ isr += inb( 0x20 );
+
+ /* Print out interesting information */
+ printk( "IMR = 0x%04x", imr );
+ if (imr & (1 << interrupt_level))
+ printk( " (masked)" );
+ printk( ", IRR = 0x%04x, ISR = 0x%04x\n", irr, isr );
+
+ printk( "SCSI Status = 0x%02x\n", inb( SCSI_Status_port ) );
+ printk( "TMC Status = 0x%02x", inb( TMC_Status_port ) );
+ if (inb( TMC_Status_port & 1))
+ printk( " (interrupt)" );
+ printk( "\n" );
+ printk( "Interrupt Status = 0x%02x", inb( Interrupt_Status_port ) );
+ if (inb( Interrupt_Status_port ) & 0x08)
+ printk( " (enabled)" );
+ printk( "\n" );
+ if (chip == tmc18c50 || chip == tmc18c30) {
+ printk( "FIFO Status = 0x%02x\n", inb( port_base + FIFO_Status ) );
+ printk( "Int. Condition = 0x%02x\n",
+ inb( port_base + Interrupt_Cond ) );
+ }
+ printk( "Configuration 1 = 0x%02x\n", inb( port_base + Configuration1 ) );
+ if (chip == tmc18c50 || chip == tmc18c30)
+ printk( "Configuration 2 = 0x%02x\n",
+ inb( port_base + Configuration2 ) );
+}
+
+int fdomain_16x0_abort( Scsi_Cmnd *SCpnt)
+{
+ unsigned long flags;
+#if EVERY_ACCESS || ERRORS_ONLY || DEBUG_ABORT
+ printk( "fdomain: abort " );
+#endif
+
+ save_flags( flags );
+ cli();
+ if (!in_command) {
+#if EVERY_ACCESS || ERRORS_ONLY
+ printk( " (not in command)\n" );
+#endif
+ restore_flags( flags );
+ return SCSI_ABORT_NOT_RUNNING;
+ } else printk( "\n" );
+
+#if DEBUG_ABORT
+ print_info( SCpnt );
+#endif
+
+ fdomain_make_bus_idle();
+
+ current_SC->SCp.phase |= aborted;
+
+ current_SC->result = DID_ABORT << 16;
+
+ restore_flags( flags );
+
+ /* Aborts are not done well. . . */
+ my_done( DID_ABORT << 16 );
+
+ return SCSI_ABORT_SUCCESS;
+}
+
+int fdomain_16x0_reset( Scsi_Cmnd *SCpnt )
+{
+#if DEBUG_RESET
+ static int called_once = 0;
+#endif
+
+#if ERRORS_ONLY
+ if (SCpnt) printk( "fdomain: SCSI Bus Reset\n" );
+#endif
+
+#if DEBUG_RESET
+ if (called_once) print_info( current_SC );
+ called_once = 1;
+#endif
+
+ outb( 1, SCSI_Cntl_port );
+ do_pause( 2 );
+ outb( 0, SCSI_Cntl_port );
+ do_pause( 115 );
+ outb( 0, SCSI_Mode_Cntl_port );
+ outb( PARITY_MASK, TMC_Cntl_port );
+
+ /* Unless this is the very first call (i.e., SCPnt == NULL), everything
+ is probably hosed at this point. We will, however, try to keep
+ things going by informing the high-level code that we need help. */
+
+ return SCSI_RESET_WAKEUP;
+}
+
+#include "sd.h"
+#include "scsi_ioctl.h"
+
+int fdomain_16x0_biosparam( Scsi_Disk *disk, kdev_t dev, int *info_array )
+{
+ int drive;
+ unsigned char buf[512 + sizeof( int ) * 2];
+ int size = disk->capacity;
+ int *sizes = (int *)buf;
+ unsigned char *data = (unsigned char *)(sizes + 2);
+ unsigned char do_read[] = { READ_6, 0, 0, 0, 1, 0 };
+ int retcode;
+ struct drive_info {
+ unsigned short cylinders;
+ unsigned char heads;
+ unsigned char sectors;
+ } *i;
+
+ /* NOTES:
+ The RAM area starts at 0x1f00 from the bios_base address.
+
+ For BIOS Version 2.0:
+
+ The drive parameter table seems to start at 0x1f30.
+ The first byte's purpose is not known.
+ Next is the cylinder, head, and sector information.
+ The last 4 bytes appear to be the drive's size in sectors.
+ The other bytes in the drive parameter table are unknown.
+ If anyone figures them out, please send me mail, and I will
+ update these notes.
+
+ Tape drives do not get placed in this table.
+
+ There is another table at 0x1fea:
+ If the byte is 0x01, then the SCSI ID is not in use.
+ If the byte is 0x18 or 0x48, then the SCSI ID is in use,
+ although tapes don't seem to be in this table. I haven't
+ seen any other numbers (in a limited sample).
+
+ 0x1f2d is a drive count (i.e., not including tapes)
+
+ The table at 0x1fcc are I/O ports addresses for the various
+ operations. I calculate these by hand in this driver code.
+
+
+
+ For the ISA-200S version of BIOS Version 2.0:
+
+ The drive parameter table starts at 0x1f33.
+
+ WARNING: Assume that the table entry is 25 bytes long. Someone needs
+ to check this for the Quantum ISA-200S card.
+
+
+
+ For BIOS Version 3.2:
+
+ The drive parameter table starts at 0x1f70. Each entry is
+ 0x0a bytes long. Heads are one less than we need to report.
+ */
+
+ drive = MINOR(dev) / 16;
+
+ if (bios_major == 2) {
+ switch (Quantum) {
+ case 2: /* ISA_200S */
+ /* The value of 25 has never been verified.
+ It should probably be 15. */
+ i = (struct drive_info *)( (char *)bios_base + 0x1f33 + drive * 25 );
+ break;
+ case 3: /* ISA_250MG */
+ i = (struct drive_info *)( (char *)bios_base + 0x1f36 + drive * 15 );
+ break;
+ case 4: /* ISA_200S (another one) */
+ i = (struct drive_info *)( (char *)bios_base + 0x1f34 + drive * 15 );
+ break;
+ default:
+ i = (struct drive_info *)( (char *)bios_base + 0x1f31 + drive * 25 );
+ break;
+ }
+ info_array[0] = i->heads;
+ info_array[1] = i->sectors;
+ info_array[2] = i->cylinders;
+ } else if (bios_major == 3
+ && bios_minor >= 0
+ && bios_minor < 4) { /* 3.0 and 3.2 BIOS */
+ i = (struct drive_info *)( (char *)bios_base + 0x1f71 + drive * 10 );
+ info_array[0] = i->heads + 1;
+ info_array[1] = i->sectors;
+ info_array[2] = i->cylinders;
+ } else { /* 3.4 BIOS (and up?) */
+ /* This algorithm was provided by Future Domain (much thanks!). */
+
+ sizes[0] = 0; /* zero bytes out */
+ sizes[1] = 512; /* one sector in */
+ memcpy( data, do_read, sizeof( do_read ) );
+ retcode = kernel_scsi_ioctl( disk->device,
+ SCSI_IOCTL_SEND_COMMAND,
+ (void *)buf );
+ if (!retcode /* SCSI command ok */
+ && data[511] == 0xaa && data[510] == 0x55 /* Partition table valid */
+ && data[0x1c2]) { /* Partition type */
+
+ /* The partition table layout is as follows:
+
+ Start: 0x1b3h
+ Offset: 0 = partition status
+ 1 = starting head
+ 2 = starting sector and cylinder (word, encoded)
+ 4 = partition type
+ 5 = ending head
+ 6 = ending sector and cylinder (word, encoded)
+ 8 = starting absolute sector (double word)
+ c = number of sectors (double word)
+ Signature: 0x1fe = 0x55aa
+
+ So, this algorithm assumes:
+ 1) the first partition table is in use,
+ 2) the data in the first entry is correct, and
+ 3) partitions never divide cylinders
+
+ Note that (1) may be FALSE for NetBSD (and other BSD flavors),
+ as well as for Linux. Note also, that Linux doesn't pay any
+ attention to the fields that are used by this algorithm -- it
+ only uses the absolute sector data. Recent versions of Linux's
+ fdisk(1) will fill this data in correctly, and forthcoming
+ versions will check for consistency.
+
+ Checking for a non-zero partition type is not part of the
+ Future Domain algorithm, but it seemed to be a reasonable thing
+ to do, especially in the Linux and BSD worlds. */
+
+ info_array[0] = data[0x1c3] + 1; /* heads */
+ info_array[1] = data[0x1c4] & 0x3f; /* sectors */
+ } else {
+
+ /* Note that this new method guarantees that there will always be
+ less than 1024 cylinders on a platter. This is good for drives
+ up to approximately 7.85GB (where 1GB = 1024 * 1024 kB). */
+
+ if ((unsigned int)size >= 0x7e0000U) {
+ info_array[0] = 0xff; /* heads = 255 */
+ info_array[1] = 0x3f; /* sectors = 63 */
+ } else if ((unsigned int)size >= 0x200000U) {
+ info_array[0] = 0x80; /* heads = 128 */
+ info_array[1] = 0x3f; /* sectors = 63 */
+ } else {
+ info_array[0] = 0x40; /* heads = 64 */
+ info_array[1] = 0x20; /* sectors = 32 */
+ }
+ }
+ /* For both methods, compute the cylinders */
+ info_array[2] = (unsigned int)size / (info_array[0] * info_array[1] );
+ }
+
+ return 0;
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = FDOMAIN_16X0;
+
+#include "scsi_module.c"
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/fdomain.h b/i386/i386at/gpl/linux/scsi/fdomain.h
new file mode 100644
index 00000000..e0c4e045
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/fdomain.h
@@ -0,0 +1,61 @@
+/* fdomain.h -- Header for Future Domain TMC-16x0 driver
+ * Created: Sun May 3 18:47:33 1992 by faith@cs.unc.edu
+ * Revised: Thu Oct 12 13:21:35 1995 by r.faith@ieee.org
+ * Author: Rickard E. Faith, faith@cs.unc.edu
+ * Copyright 1992, 1993, 1994, 1995 Rickard E. Faith
+ *
+ * $Id: fdomain.h,v 1.1.1.1 1997/02/25 21:27:49 thomas Exp $
+
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ */
+
+#ifndef _FDOMAIN_H
+#define _FDOMAIN_H
+
+int fdomain_16x0_detect( Scsi_Host_Template * );
+int fdomain_16x0_command( Scsi_Cmnd * );
+int fdomain_16x0_abort( Scsi_Cmnd * );
+const char *fdomain_16x0_info( struct Scsi_Host * );
+int fdomain_16x0_reset( Scsi_Cmnd * );
+int fdomain_16x0_queue( Scsi_Cmnd *, void (*done)(Scsi_Cmnd *) );
+int fdomain_16x0_biosparam( Disk *, kdev_t, int * );
+int fdomain_16x0_proc_info( char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout );
+
+extern struct proc_dir_entry proc_scsi_fdomain;
+
+#define FDOMAIN_16X0 { NULL, \
+ NULL, \
+ NULL, \
+ fdomain_16x0_proc_info, \
+ NULL, \
+ fdomain_16x0_detect, \
+ NULL, \
+ fdomain_16x0_info, \
+ fdomain_16x0_command, \
+ fdomain_16x0_queue, \
+ fdomain_16x0_abort, \
+ fdomain_16x0_reset, \
+ NULL, \
+ fdomain_16x0_biosparam, \
+ 1, \
+ 6, \
+ 64, \
+ 1, \
+ 0, \
+ 0, \
+ DISABLE_CLUSTERING }
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/g_NCR5380.c b/i386/i386at/gpl/linux/scsi/g_NCR5380.c
new file mode 100644
index 00000000..f7f2aabc
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/g_NCR5380.c
@@ -0,0 +1,588 @@
+/*
+ * Generic Generic NCR5380 driver
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin
+ * K.Lentin@cs.monash.edu.au
+ *
+ * ALPHA RELEASE 1.
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * TODO : flesh out DMA support, find some one actually using this (I have
+ * a memory mapped Trantor board that works fine)
+ */
+
+/*
+ * Options :
+ *
+ * PARITY - enable parity checking. Not supported.
+ *
+ * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
+ *
+ * USLEEP - enable support for devices that don't disconnect. Untested.
+ *
+ * The card is detected and initialized in one of several ways :
+ * 1. With command line overrides - NCR5380=port,irq may be
+ * used on the LILO command line to override the defaults.
+ *
+ * 2. With the GENERIC_NCR5380_OVERRIDE compile time define. This is
+ * specified as an array of address, irq, dma, board tuples. Ie, for
+ * one board at 0x350, IRQ5, no dma, I could say
+ * -DGENERIC_NCR5380_OVERRIDE={{0xcc000, 5, DMA_NONE, BOARD_NCR5380}}
+ *
+ * -1 should be specified for no or DMA interrupt, -2 to autoprobe for an
+ * IRQ line if overridden on the command line.
+ */
+
+/*
+ * $Log: g_NCR5380.c,v $
+ * Revision 1.1.1.1 1996/10/30 01:40:05 thomas
+ * Imported from UK22
+ *
+ * Revision 1.1 1996/03/25 20:25:35 goel
+ * Linux driver merge.
+ *
+ */
+
+#define AUTOPROBE_IRQ
+#define AUTOSENSE
+
+#include <linux/config.h>
+
+#ifdef MACH
+#define CONFIG_SCSI_G_NCR5380_MEM
+#endif
+
+#ifdef CONFIG_SCSI_GENERIC_NCR53C400
+#define NCR53C400_PSEUDO_DMA 1
+#define PSEUDO_DMA
+#define NCR53C400
+#endif
+#if defined(CONFIG_SCSI_G_NCR5380_PORT) && defined(CONFIG_SCSI_G_NCR5380_MEM)
+#error You can not configure the Generic NCR 5380 SCSI Driver for memory mapped I/O and port mapped I/O at the same time (yet)
+#endif
+#if !defined(CONFIG_SCSI_G_NCR5380_PORT) && !defined(CONFIG_SCSI_G_NCR5380_MEM)
+#error You must configure the Generic NCR 5380 SCSI Driver for one of memory mapped I/O and port mapped I/O.
+#endif
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "g_NCR5380.h"
+#include "NCR5380.h"
+#include "constants.h"
+#include "sd.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_g_ncr5380 = {
+ PROC_SCSI_GENERIC_NCR5380, 9, "g_NCR5380",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+static struct override {
+ NCR5380_implementation_fields;
+ int irq;
+ int dma;
+ int board; /* Use NCR53c400, Ricoh, etc. extensions ? */
+} overrides
+#ifdef GENERIC_NCR5380_OVERRIDE
+ [] = GENERIC_NCR5380_OVERRIDE
+#else
+ [1] = {{0,},};
+#endif
+
+#define NO_OVERRIDES (sizeof(overrides) / sizeof(struct override))
+
+/*
+ * Function : static internal_setup(int board, char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : board - either BOARD_NCR5380 for a normal NCR5380 board,
+ * or BOARD_NCR53C400 for a NCR53C400 board. str - unused, ints -
+ * array of integer parameters with ints[0] equal to the number of ints.
+ *
+ */
+
+static void internal_setup(int board, char *str, int *ints) {
+ static int commandline_current = 0;
+ switch (board) {
+ case BOARD_NCR5380:
+ if (ints[0] != 2 && ints[0] != 3)
+ printk("generic_NCR5380_setup : usage ncr5380=" STRVAL(NCR5380_map_name) ",irq,dma\n");
+ return;
+ case BOARD_NCR53C400:
+ if (ints[0] != 2)
+ printk("generic_NCR53C400_setup : usage ncr53c400= " STRVAL(NCR5380_map_name) ",irq\n");
+ return;
+ }
+
+ if (commandline_current < NO_OVERRIDES) {
+ overrides[commandline_current].NCR5380_map_name = (NCR5380_map_type)ints[1];
+ overrides[commandline_current].irq = ints[2];
+ if (ints[0] == 3)
+ overrides[commandline_current].dma = ints[3];
+ else
+ overrides[commandline_current].dma = DMA_NONE;
+ overrides[commandline_current].board = board;
+ ++commandline_current;
+ }
+}
+
+/*
+ * Function : generic_NCR5380_setup (char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer paramters with ints[0]
+ * equal to the number of ints.
+ */
+
+void generic_NCR5380_setup (char *str, int *ints) {
+ internal_setup (BOARD_NCR5380, str, ints);
+}
+
+/*
+ * Function : generic_NCR53C400_setup (char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer paramters with ints[0]
+ * equal to the number of ints.
+ */
+
+void generic_NCR53C400_setup (char *str, int *ints) {
+ internal_setup (BOARD_NCR53C400, str, ints);
+}
+
+/*
+ * Function : int generic_NCR5380_detect(Scsi_Host_Template * tpnt)
+ *
+ * Purpose : initializes generic NCR5380 driver based on the
+ * command line / compile time port and irq definitions.
+ *
+ * Inputs : tpnt - template for this SCSI adapter.
+ *
+ * Returns : 1 if a host adapter was found, 0 if not.
+ *
+ */
+
+int generic_NCR5380_detect(Scsi_Host_Template * tpnt) {
+ static int current_override = 0;
+ int count;
+ int flags = 0;
+ struct Scsi_Host *instance;
+
+ tpnt->proc_dir = &proc_scsi_g_ncr5380;
+
+ for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
+ if (!(overrides[current_override].NCR5380_map_name))
+ continue;
+
+ switch (overrides[current_override].board) {
+ case BOARD_NCR5380:
+ flags = FLAG_NO_PSEUDO_DMA;
+ break;
+ case BOARD_NCR53C400:
+ flags = FLAG_NCR53C400;
+ break;
+ }
+
+ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
+ instance->NCR5380_instance_name = overrides[current_override].NCR5380_map_name;
+
+ NCR5380_init(instance, flags);
+
+ if (overrides[current_override].irq != IRQ_AUTO)
+ instance->irq = overrides[current_override].irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, 0xffff);
+
+ if (instance->irq != IRQ_NONE)
+ if (request_irq(instance->irq, generic_NCR5380_intr, SA_INTERRUPT, "NCR5380")) {
+ printk("scsi%d : IRQ%d not free, interrupts disabled\n",
+ instance->host_no, instance->irq);
+ instance->irq = IRQ_NONE;
+ }
+
+ if (instance->irq == IRQ_NONE) {
+ printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ }
+
+ printk("scsi%d : at " STRVAL(NCR5380_map_name) " 0x%x", instance->host_no, (unsigned int)instance->NCR5380_instance_name);
+ if (instance->irq == IRQ_NONE)
+ printk (" interrupts disabled");
+ else
+ printk (" irq %d", instance->irq);
+ printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
+ CAN_QUEUE, CMD_PER_LUN, GENERIC_NCR5380_PUBLIC_RELEASE);
+ NCR5380_print_options(instance);
+ printk("\n");
+
+ ++current_override;
+ ++count;
+ }
+ return count;
+}
+
+const char * generic_NCR5380_info (void) {
+ static const char string[]="Generic NCR5380/53C400 Info";
+ return string;
+}
+
+int generic_NCR5380_release_resources(struct Scsi_Host * instance)
+{
+ NCR5380_local_declare();
+
+ NCR5380_setup(instance);
+
+ free_irq(instance->irq);
+
+ return 0;
+}
+
+#ifdef BIOSPARAM
+/*
+ * Function : int generic_NCR5380_biosparam(Disk * disk, kdev_t dev, int *ip)
+ *
+ * Purpose : Generates a BIOS / DOS compatable H-C-S mapping for
+ * the specified device / size.
+ *
+ * Inputs : size = size of device in sectors (512 bytes), dev = block device
+ * major / minor, ip[] = {heads, sectors, cylinders}
+ *
+ * Returns : allways 0 (success), initializes ip
+ *
+ */
+
+/*
+ * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
+ * using hard disks on a trantor should verify that this mapping corresponds
+ * to that used by the BIOS / ASPI driver by running the linux fdisk program
+ * and matching the H_C_S coordinates to what DOS uses.
+ */
+
+int generic_NCR5380_biosparam(Disk * disk, kdev_t dev, int *ip)
+{
+ int size = disk->capacity;
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ return 0;
+}
+#endif
+
+int generic_NCR5380_proc_info(char* buffer, char** start, off_t offset, int length, int hostno, int inout)
+{
+ int len = 0;
+ struct Scsi_Host *scsi_ptr;
+
+ for (scsi_ptr = first_instance; scsi_ptr; scsi_ptr=scsi_ptr->next)
+ if (scsi_ptr->host_no == hostno)
+ break;
+
+ len += sprintf(buffer+len, "SCSI host number %d : %s\n", scsi_ptr->host_no, scsi_ptr->hostt->name);
+ len += sprintf(buffer+len, "Generic NCR5380 driver version %d\n", GENERIC_NCR5380_PUBLIC_RELEASE);
+ len += sprintf(buffer+len, "NCR5380 driver core version %d\n", NCR5380_PUBLIC_RELEASE);
+#ifdef NCR53C400
+ len += sprintf(buffer+len, "NCR53C400 driver extension version %d\n", NCR53C400_PUBLIC_RELEASE);
+ len += sprintf(buffer+len, "NCR53C400 card%s detected\n", (((struct NCR5380_hostdata *)scsi_ptr->hostdata)->flags & FLAG_NCR53C400)?"":" not");
+# if NCR53C400_PSEUDO_DMA
+ len += sprintf(buffer+len, "NCR53C400 pseudo DMA being used\n");
+# endif
+#else
+ len += sprintf(buffer+len, "NO NCR53C400 driver extensions\n");
+#endif
+ len += sprintf(buffer+len, "Using %s mapping at %s 0x%x, ", STRVAL(NCR5380_map_config), STRVAL(NCR5380_map_name), scsi_ptr->NCR5380_instance_name);
+ if (scsi_ptr->irq == IRQ_NONE)
+ len += sprintf(buffer+len, "interrupts disabled\n");
+ else
+ len += sprintf(buffer+len, "on interrupt %d\n", scsi_ptr->irq);
+
+ *start = buffer + offset;
+ len -= offset;
+ if (len > length)
+ len = length;
+ return len;
+}
+
+#if NCR53C400_PSEUDO_DMA
+static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, int len)
+{
+ int blocks = len / 128;
+ int start = 0;
+ int i;
+ int bl;
+ NCR5380_local_declare();
+
+ NCR5380_setup(instance);
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: About to read %d blocks for %d bytes\n", blocks, len);
+#endif
+
+ NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE | CSR_TRANS_DIR);
+ NCR5380_write(C400_BLOCK_COUNTER_REG, blocks);
+ while (1) {
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: %d blocks left\n", blocks);
+#endif
+
+ if ((bl=NCR5380_read(C400_BLOCK_COUNTER_REG)) == 0) {
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ if (blocks)
+ printk("53C400r: blocks still == %d\n", blocks);
+ else
+ printk("53C400r: Exiting loop\n");
+#endif
+ break;
+ }
+
+#if 1
+ if (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ) {
+ printk("53C400r: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
+ return -1;
+ }
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Waiting for buffer, bl=%d\n", bl);
+#endif
+
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Transferring 128 bytes\n");
+#endif
+
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ dst[start+i] = NCR5380_read(C400_HOST_BUFFER);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(dst+start,NCR53C400_host_buffer+NCR5380_map_name,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: EXTRA: Waiting for buffer\n");
+#endif
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Transferring EXTRA 128 bytes\n");
+#endif
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ dst[start+i] = NCR5380_read(C400_HOST_BUFFER);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(dst+start,NCR53C400_host_buffer+NCR5380_map_name,128);
+#endif
+ start+=128;
+ blocks--;
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Final values: blocks=%d start=%d\n", blocks, start);
+#endif
+
+ if (!(NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ))
+ printk("53C400r: no 53C80 gated irq after transfer");
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ else
+ printk("53C400r: Got 53C80 interupt and tried to clear it\n");
+#endif
+
+/* DON'T DO THIS - THEY NEVER ARRIVE!
+ printk("53C400r: Waiting for 53C80 registers\n");
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_53C80_REG)
+ ;
+*/
+
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER))
+ printk("53C400r: no end dma signal\n");
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ else
+ printk("53C400r: end dma as expected\n");
+#endif
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ return 0;
+}
+
+static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src, int len)
+{
+ int blocks = len / 128;
+ int start = 0;
+ int i;
+ int bl;
+ NCR5380_local_declare();
+
+ NCR5380_setup(instance);
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: About to write %d blocks for %d bytes\n", blocks, len);
+#endif
+
+ NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE);
+ NCR5380_write(C400_BLOCK_COUNTER_REG, blocks);
+ while (1) {
+ if (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ) {
+ printk("53C400w: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
+ return -1;
+ }
+
+ if ((bl=NCR5380_read(C400_BLOCK_COUNTER_REG)) == 0) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ if (blocks)
+ printk("53C400w: exiting loop, blocks still == %d\n", blocks);
+ else
+ printk("53C400w: exiting loop\n");
+#endif
+ break;
+ }
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: %d blocks left\n", blocks);
+
+ printk("53C400w: waiting for buffer, bl=%d\n", bl);
+#endif
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: transferring 128 bytes\n");
+#endif
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ NCR5380_write(C400_HOST_BUFFER, src[start+i]);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(NCR53C400_host_buffer+NCR5380_map_name,src+start,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+ if (blocks) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: EXTRA waiting for buffer\n");
+#endif
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: transferring EXTRA 128 bytes\n");
+#endif
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ NCR5380_write(C400_HOST_BUFFER, src[start+i]);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(NCR53C400_host_buffer+NCR5380_map_name,src+start,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ else
+ printk("53C400w: No EXTRA required\n");
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: Final values: blocks=%d start=%d\n", blocks, start);
+#endif
+
+#if 0
+ printk("53C400w: waiting for registers to be available\n");
+ THEY NEVER DO!
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_53C80_REG)
+ ;
+ printk("53C400w: Got em\n");
+#endif
+
+ /* Let's wait for this instead - could be ugly */
+ /* All documentation says to check for this. Maybe my hardware is too
+ * fast. Waiting for it seems to work fine! KLL
+ */
+ while (!(i = NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ))
+ ;
+
+ /*
+ * I know. i is certainly != 0 here but the loop is new. See previous
+ * comment.
+ */
+ if (i) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ prink("53C400w: got 53C80 gated irq (last block)\n");
+#endif
+ if (!((i=NCR5380_read(BUS_AND_STATUS_REG)) & BASR_END_DMA_TRANSFER))
+ printk("53C400w: No END OF DMA bit - WHOOPS! BASR=%0x\n",i);
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ else
+ printk("53C400w: Got END OF DMA\n");
+#endif
+ }
+ else
+ printk("53C400w: no 53C80 gated irq after transfer (last block)\n");
+
+#if 0
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER)) {
+ printk("53C400w: no end dma signal\n");
+ }
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: waiting for last byte...\n");
+#endif
+ while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT))
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: got last byte.\n");
+ printk("53C400w: pwrite exiting with status 0, whoopee!\n");
+#endif
+ return 0;
+}
+#endif /* PSEUDO_DMA */
+
+#ifdef MACH
+#include "NCR5380.src"
+#else
+#include "NCR5380.c"
+#endif
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = GENERIC_NCR5380;
+
+#include <linux/module.h>
+#include "scsi_module.c"
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/g_NCR5380.h b/i386/i386at/gpl/linux/scsi/g_NCR5380.h
new file mode 100644
index 00000000..52e64499
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/g_NCR5380.h
@@ -0,0 +1,166 @@
+/*
+ * Generic Generic NCR5380 driver defines
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin
+ * K.Lentin@cs.monash.edu.au
+ *
+ * ALPHA RELEASE 1.
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * $Log: g_NCR5380.h,v $
+ * Revision 1.1.1.1 1996/10/30 01:40:05 thomas
+ * Imported from UK22
+ *
+ * Revision 1.1 1996/03/25 20:25:35 goel
+ * Linux driver merge.
+ *
+ */
+
+#ifndef GENERIC_NCR5380_H
+#define GENERIC_NCR5380_H
+
+#define GENERIC_NCR5380_PUBLIC_RELEASE 1
+
+#ifdef NCR53C400
+#define BIOSPARAM
+#define NCR5380_BIOSPARAM generic_NCR5380_biosparam
+#else
+#define NCR5380_BIOSPARAM NULL
+#endif
+
+#ifndef ASM
+int generic_NCR5380_abort(Scsi_Cmnd *);
+int generic_NCR5380_detect(Scsi_Host_Template *);
+int generic_NCR5380_release_resources(struct Scsi_Host *);
+int generic_NCR5380_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int generic_NCR5380_reset(Scsi_Cmnd *);
+#ifdef BIOSPARAM
+int generic_NCR5380_biosparam(Disk *, kdev_t, int *);
+#endif
+
+int generic_NCR5380_proc_info(char* buffer, char** start, off_t offset, int length, int hostno, int inout);
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 16
+#endif
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#define GENERIC_NCR5380 {NULL, NULL, NULL, \
+ generic_NCR5380_proc_info, \
+ "Generic NCR5380/NCR53C400 Scsi Driver", \
+ generic_NCR5380_detect, generic_NCR5380_release_resources, \
+ generic_NCR5380_info, NULL, \
+ generic_NCR5380_queue_command, generic_NCR5380_abort, \
+ generic_NCR5380_reset, NULL, \
+ NCR5380_BIOSPARAM, \
+ /* can queue */ CAN_QUEUE, /* id */ 7, SG_ALL, \
+ /* cmd per lun */ CMD_PER_LUN , 0, 0, DISABLE_CLUSTERING}
+
+#endif
+
+#ifndef HOSTS_C
+
+#define __STRVAL(x) #x
+#define STRVAL(x) __STRVAL(x)
+
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+
+#define NCR5380_map_config port
+
+#define NCR5380_map_type int
+
+#define NCR5380_map_name port
+
+#define NCR5380_instance_name io_port
+
+#define NCR53C400_register_offset 0
+
+#define NCR53C400_address_adjust 8
+
+#ifdef NCR53C400
+#define NCR5380_region_size 16
+#else
+#define NCR5380_region_size 8
+#endif
+
+#define NCR5380_read(reg) (inb(NCR5380_map_name + (reg)))
+#define NCR5380_write(reg, value) (outb((value), (NCR5380_map_name + (reg))))
+
+#else
+/* therefore CONFIG_SCSI_G_NCR5380_MEM */
+
+#define NCR5380_map_config memory
+
+#define NCR5380_map_type volatile unsigned char*
+
+#define NCR5380_map_name base
+
+#define NCR5380_instance_name base
+
+#define NCR53C400_register_offset 0x108
+
+#define NCR53C400_address_adjust 0
+
+#define NCR53C400_mem_base 0x3880
+
+#define NCR53C400_host_buffer 0x3900
+
+#define NCR5380_region_size 0x3a00
+
+
+#define NCR5380_read(reg) (*(NCR5380_map_name + NCR53C400_mem_base + (reg)))
+#define NCR5380_write(reg, value) (*(NCR5380_map_name + NCR53C400_mem_base + (reg)) = value)
+
+#endif
+
+#define NCR5380_implementation_fields \
+ NCR5380_map_type NCR5380_map_name
+
+#define NCR5380_local_declare() \
+ register NCR5380_implementation_fields
+
+#define NCR5380_setup(instance) \
+ NCR5380_map_name = (NCR5380_map_type)((instance)->NCR5380_instance_name)
+
+#define NCR5380_intr generic_NCR5380_intr
+#define NCR5380_queue_command generic_NCR5380_queue_command
+#define NCR5380_abort generic_NCR5380_abort
+#define NCR5380_reset generic_NCR5380_reset
+#define NCR5380_pread generic_NCR5380_pread
+#define NCR5380_pwrite generic_NCR5380_pwrite
+
+#define BOARD_NCR5380 0
+#define BOARD_NCR53C400 1
+
+#endif /* else def HOSTS_C */
+#endif /* ndef ASM */
+#endif /* GENERIC_NCR5380_H */
+
diff --git a/i386/i386at/gpl/linux/scsi/hosts.c b/i386/i386at/gpl/linux/scsi/hosts.c
new file mode 100644
index 00000000..cb0163b4
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/hosts.c
@@ -0,0 +1,436 @@
+/*
+ * hosts.c Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * mid to lowlevel SCSI driver interface
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ */
+
+
+/*
+ * This file contains the medium level SCSI
+ * host interface initialization, as well as the scsi_hosts array of SCSI
+ * hosts currently present in the system.
+ */
+
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <linux/config.h>
+#include <linux/blk.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+
+#include "scsi.h"
+
+#ifndef NULL
+#define NULL 0L
+#endif
+
+#define HOSTS_C
+
+#include "hosts.h"
+
+#ifdef CONFIG_SCSI_ADVANSYS
+#include "advansys.h"
+#endif
+
+#ifdef CONFIG_SCSI_AHA152X
+#include "aha152x.h"
+#endif
+
+#ifdef CONFIG_SCSI_AHA1542
+#include "aha1542.h"
+#endif
+
+#ifdef CONFIG_SCSI_AHA1740
+#include "aha1740.h"
+#endif
+
+#ifdef CONFIG_SCSI_AIC7XXX
+#include "aic7xxx.h"
+#endif
+
+#ifdef CONFIG_SCSI_BUSLOGIC
+#include "BusLogic.h"
+#endif
+
+#ifdef CONFIG_SCSI_EATA_DMA
+#include "eata_dma.h"
+#endif
+
+#ifdef CONFIG_SCSI_EATA_PIO
+#include "eata_pio.h"
+#endif
+
+#ifdef CONFIG_SCSI_U14_34F
+#include "u14-34f.h"
+#endif
+
+#ifdef CONFIG_SCSI_FUTURE_DOMAIN
+#include "fdomain.h"
+#endif
+
+#ifdef CONFIG_SCSI_GENERIC_NCR5380
+#include "g_NCR5380.h"
+#endif
+
+#ifdef CONFIG_SCSI_IN2000
+#include "in2000.h"
+#endif
+
+#ifdef CONFIG_SCSI_PAS16
+#include "pas16.h"
+#endif
+
+#ifdef CONFIG_SCSI_QLOGIC
+#include "qlogic.h"
+#endif
+
+#ifdef CONFIG_SCSI_SEAGATE
+#include "seagate.h"
+#endif
+
+#ifdef CONFIG_SCSI_T128
+#include "t128.h"
+#endif
+
+#ifdef CONFIG_SCSI_NCR53C7xx
+#include "53c7,8xx.h"
+#endif
+
+#ifdef CONFIG_SCSI_ULTRASTOR
+#include "ultrastor.h"
+#endif
+
+#ifdef CONFIG_SCSI_7000FASST
+#include "wd7000.h"
+#endif
+
+#ifdef CONFIG_SCSI_EATA
+#include "eata.h"
+#endif
+
+#ifdef CONFIG_SCSI_NCR53C406A
+#include "NCR53c406a.h"
+#endif
+
+#ifdef CONFIG_SCSI_AM53C974
+#include "AM53C974.h"
+#endif
+
+#ifdef CONFIG_SCSI_DEBUG
+#include "scsi_debug.h"
+#endif
+
+/*
+static const char RCSid[] = "$Header: cvs/gnumach/i386/i386at/gpl/linux/scsi/Attic/hosts.c,v 1.1.1.1 1997/02/25 21:27:49 thomas Exp $";
+*/
+
+/*
+ * The scsi host entries should be in the order you wish the
+ * cards to be detected. A driver may appear more than once IFF
+ * it can deal with being detected (and therefore initialized)
+ * with more than one simultaneous host number, can handle being
+ * reentrant, etc.
+ *
+ * They may appear in any order, as each SCSI host is told which host
+ * number it is during detection.
+ */
+
+/* This is a placeholder for controllers that are not configured into
+ * the system - we do this to ensure that the controller numbering is
+ * always consistent, no matter how the kernel is configured. */
+
+#define NO_CONTROLLER {NULL, NULL, NULL, NULL, NULL, NULL, NULL, \
+ NULL, NULL, 0, 0, 0, 0, 0, 0}
+
+/*
+ * When figure is run, we don't want to link to any object code. Since
+ * the macro for each host will contain function pointers, we cannot
+ * use it and instead must use a "blank" that does no such
+ * idiocy.
+ */
+
+Scsi_Host_Template * scsi_hosts = NULL;
+
+static Scsi_Host_Template builtin_scsi_hosts[] =
+{
+#ifdef CONFIG_SCSI_ADVANSYS
+ ADVANSYS,
+#endif
+/* BusLogic must come before aha1542.c */
+#ifdef CONFIG_SCSI_BUSLOGIC
+ BUSLOGIC,
+#endif
+#ifdef CONFIG_SCSI_U14_34F
+ ULTRASTOR_14_34F,
+#endif
+#ifdef CONFIG_SCSI_ULTRASTOR
+ ULTRASTOR_14F,
+#endif
+#ifdef CONFIG_SCSI_AHA152X
+ AHA152X,
+#endif
+#ifdef CONFIG_SCSI_AHA1542
+ AHA1542,
+#endif
+#ifdef CONFIG_SCSI_AHA1740
+ AHA1740,
+#endif
+#ifdef CONFIG_SCSI_AIC7XXX
+ AIC7XXX,
+#endif
+#ifdef CONFIG_SCSI_FUTURE_DOMAIN
+ FDOMAIN_16X0,
+#endif
+#ifdef CONFIG_SCSI_IN2000
+ IN2000,
+#endif
+#ifdef CONFIG_SCSI_GENERIC_NCR5380
+ GENERIC_NCR5380,
+#endif
+#ifdef CONFIG_SCSI_NCR53C406A /* 53C406A should come before QLOGIC */
+ NCR53c406a,
+#endif
+#ifdef CONFIG_SCSI_QLOGIC
+ QLOGIC,
+#endif
+#ifdef CONFIG_SCSI_PAS16
+ MV_PAS16,
+#endif
+#ifdef CONFIG_SCSI_SEAGATE
+ SEAGATE_ST0X,
+#endif
+#ifdef CONFIG_SCSI_T128
+ TRANTOR_T128,
+#endif
+#ifdef CONFIG_SCSI_NCR53C7xx
+ NCR53c7xx,
+#endif
+#ifdef CONFIG_SCSI_EATA_DMA
+ EATA_DMA,
+#endif
+#ifdef CONFIG_SCSI_EATA_PIO
+ EATA_PIO,
+#endif
+#ifdef CONFIG_SCSI_7000FASST
+ WD7000,
+#endif
+#ifdef CONFIG_SCSI_EATA
+ EATA,
+#endif
+#ifdef CONFIG_SCSI_AM53C974
+ AM53C974,
+#endif
+#ifdef CONFIG_SCSI_DEBUG
+ SCSI_DEBUG,
+#endif
+};
+
+#define MAX_SCSI_HOSTS (sizeof(builtin_scsi_hosts) / sizeof(Scsi_Host_Template))
+
+
+/*
+ * Our semaphores and timeout counters, where size depends on
+ * MAX_SCSI_HOSTS here.
+ */
+
+struct Scsi_Host * scsi_hostlist = NULL;
+struct Scsi_Device_Template * scsi_devicelist = NULL;
+
+int max_scsi_hosts = 0;
+int next_scsi_host = 0;
+
+void
+scsi_unregister(struct Scsi_Host * sh){
+ struct Scsi_Host * shpnt;
+
+ if(scsi_hostlist == sh)
+ scsi_hostlist = sh->next;
+ else {
+ shpnt = scsi_hostlist;
+ while(shpnt->next != sh) shpnt = shpnt->next;
+ shpnt->next = shpnt->next->next;
+ }
+
+ /* If we are removing the last host registered, it is safe to reuse
+ * its host number (this avoids "holes" at boot time) (DB)
+ */
+ if (max_scsi_hosts == next_scsi_host && !scsi_loadable_module_flag)
+ max_scsi_hosts--;
+
+ next_scsi_host--;
+ scsi_init_free((char *) sh, sizeof(struct Scsi_Host) + sh->extra_bytes);
+}
+
+/* We call this when we come across a new host adapter. We only do this
+ * once we are 100% sure that we want to use this host adapter - it is a
+ * pain to reverse this, so we try and avoid it
+ */
+
+struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j){
+ struct Scsi_Host * retval, *shpnt;
+ retval = (struct Scsi_Host *)scsi_init_malloc(sizeof(struct Scsi_Host) + j,
+ (tpnt->unchecked_isa_dma && j ? GFP_DMA : 0) | GFP_ATOMIC);
+ retval->host_busy = 0;
+ retval->block = NULL;
+ retval->wish_block = 0;
+ if(j > 0xffff) panic("Too many extra bytes requested\n");
+ retval->extra_bytes = j;
+ retval->loaded_as_module = scsi_loadable_module_flag;
+ retval->host_no = max_scsi_hosts++; /* never reuse host_no (DB) */
+ next_scsi_host++;
+ retval->host_queue = NULL;
+ retval->host_wait = NULL;
+ retval->last_reset = 0;
+ retval->irq = 0;
+ retval->dma_channel = 0xff;
+
+ /* These three are default values which can be overridden */
+ retval->max_channel = 0;
+ retval->max_id = 8;
+ retval->max_lun = 8;
+
+ retval->unique_id = 0;
+ retval->io_port = 0;
+ retval->hostt = tpnt;
+ retval->next = NULL;
+#ifdef DEBUG
+ printk("Register %x %x: %d\n", (int)retval, (int)retval->hostt, j);
+#endif
+
+ /* The next six are the default values which can be overridden
+ * if need be */
+ retval->this_id = tpnt->this_id;
+ retval->can_queue = tpnt->can_queue;
+ retval->sg_tablesize = tpnt->sg_tablesize;
+ retval->cmd_per_lun = tpnt->cmd_per_lun;
+ retval->unchecked_isa_dma = tpnt->unchecked_isa_dma;
+ retval->use_clustering = tpnt->use_clustering;
+ if(!scsi_hostlist)
+ scsi_hostlist = retval;
+ else
+ {
+ shpnt = scsi_hostlist;
+ while(shpnt->next) shpnt = shpnt->next;
+ shpnt->next = retval;
+ }
+
+ return retval;
+}
+
+int
+scsi_register_device(struct Scsi_Device_Template * sdpnt)
+{
+ if(sdpnt->next) panic("Device already registered");
+ sdpnt->next = scsi_devicelist;
+ scsi_devicelist = sdpnt;
+ return 0;
+}
+
+unsigned int scsi_init()
+{
+ static int called = 0;
+ int i, pcount;
+ Scsi_Host_Template * tpnt;
+ struct Scsi_Host * shpnt;
+ const char * name;
+
+ if(called) return 0;
+
+ called = 1;
+ for (tpnt = &builtin_scsi_hosts[0], i = 0; i < MAX_SCSI_HOSTS; ++i, tpnt++)
+ {
+ /*
+ * Initialize our semaphores. -1 is interpreted to mean
+ * "inactive" - where as 0 will indicate a time out condition.
+ */
+
+ pcount = next_scsi_host;
+ if ((tpnt->detect) &&
+ (tpnt->present =
+ tpnt->detect(tpnt)))
+ {
+ /* The only time this should come up is when people use
+ * some kind of patched driver of some kind or another. */
+ if(pcount == next_scsi_host) {
+ if(tpnt->present > 1)
+ panic("Failure to register low-level scsi driver");
+ /* The low-level driver failed to register a driver. We
+ * can do this now. */
+ scsi_register(tpnt,0);
+ }
+ tpnt->next = scsi_hosts;
+ scsi_hosts = tpnt;
+
+ /* Add the driver to /proc/scsi */
+#if CONFIG_PROC_FS
+ build_proc_dir_entries(tpnt);
+#endif
+ }
+ }
+
+ for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next)
+ {
+ if(shpnt->hostt->info)
+ name = shpnt->hostt->info(shpnt);
+ else
+ name = shpnt->hostt->name;
+ printk ("scsi%d : %s\n", /* And print a little message */
+ shpnt->host_no, name);
+ }
+
+ printk ("scsi : %d host%s.\n", next_scsi_host,
+ (next_scsi_host == 1) ? "" : "s");
+
+ scsi_make_blocked_list();
+
+ /* Now attach the high level drivers */
+#ifdef CONFIG_BLK_DEV_SD
+ scsi_register_device(&sd_template);
+#endif
+#ifdef CONFIG_BLK_DEV_SR
+ scsi_register_device(&sr_template);
+#endif
+#ifdef CONFIG_CHR_DEV_ST
+ scsi_register_device(&st_template);
+#endif
+#ifdef CONFIG_CHR_DEV_SG
+ scsi_register_device(&sg_template);
+#endif
+
+#if 0
+ max_scsi_hosts = next_scsi_host;
+#endif
+ return 0;
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/hosts.h b/i386/i386at/gpl/linux/scsi/hosts.h
new file mode 100644
index 00000000..1da480de
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/hosts.h
@@ -0,0 +1,409 @@
+/*
+ * hosts.h Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * mid to low-level SCSI driver interface header
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale eric@aib.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ *
+ * Further modified by Eric Youngdale to support multiple host adapters
+ * of the same type.
+ */
+
+#ifndef _HOSTS_H
+#define _HOSTS_H
+
+/*
+ $Header: cvs/gnumach/i386/i386at/gpl/linux/scsi/Attic/hosts.h,v 1.1.1.1 1997/02/25 21:27:49 thomas Exp $
+*/
+
+#include <linux/proc_fs.h>
+
+/* It is senseless to set SG_ALL any higher than this - the performance
+ * does not get any better, and it wastes memory
+ */
+#define SG_NONE 0
+#define SG_ALL 0xff
+
+#define DISABLE_CLUSTERING 0
+#define ENABLE_CLUSTERING 1
+
+/* The various choices mean:
+ * NONE: Self evident. Host adapter is not capable of scatter-gather.
+ * ALL: Means that the host adapter module can do scatter-gather,
+ * and that there is no limit to the size of the table to which
+ * we scatter/gather data.
+ * Anything else: Indicates the maximum number of chains that can be
+ * used in one scatter-gather request.
+ */
+
+/*
+ * The Scsi_Host_Template type has all that is needed to interface with a SCSI
+ * host in a device independent matter. There is one entry for each different
+ * type of host adapter that is supported on the system.
+ */
+
+typedef struct scsi_disk Disk;
+
+typedef struct SHT
+{
+
+ /* Used with loadable modules so we can construct a linked list. */
+ struct SHT * next;
+
+ /* Used with loadable modules so that we know when it is safe to unload */
+ long * usage_count;
+
+ /* The pointer to the /proc/scsi directory entry */
+ struct proc_dir_entry *proc_dir;
+
+ /* proc-fs info function.
+ * Can be used to export driver statistics and other infos to the world
+ * outside the kernel ie. userspace and it also provides an interface
+ * to feed the driver with information. Check eata_dma_proc.c for reference
+ */
+ int (*proc_info)(char *, char **, off_t, int, int, int);
+
+ /*
+ * The name pointer is a pointer to the name of the SCSI
+ * device detected.
+ */
+ const char *name;
+
+ /*
+ * The detect function shall return non zero on detection,
+ * indicating the number of host adapters of this particular
+ * type were found. It should also
+ * initialize all data necessary for this particular
+ * SCSI driver. It is passed the host number, so this host
+ * knows where the first entry is in the scsi_hosts[] array.
+ *
+ * Note that the detect routine MUST not call any of the mid level
+ * functions to queue commands because things are not guaranteed
+ * to be set up yet. The detect routine can send commands to
+ * the host adapter as long as the program control will not be
+ * passed to scsi.c in the processing of the command. Note
+ * especially that scsi_malloc/scsi_free must not be called.
+ */
+ int (* detect)(struct SHT *);
+
+ /* Used with loadable modules to unload the host structures. Note:
+ * there is a default action built into the modules code which may
+ * be sufficient for most host adapters. Thus you may not have to supply
+ * this at all.
+ */
+ int (*release)(struct Scsi_Host *);
+
+ /*
+ * The info function will return whatever useful
+ * information the developer sees fit. If not provided, then
+ * the name field will be used instead.
+ */
+ const char *(* info)(struct Scsi_Host *);
+
+ /*
+ * The command function takes a target, a command (this is a SCSI
+ * command formatted as per the SCSI spec, nothing strange), a
+ * data buffer pointer, and data buffer length pointer. The return
+ * is a status int, bit fielded as follows :
+ * Byte What
+ * 0 SCSI status code
+ * 1 SCSI 1 byte message
+ * 2 host error return.
+ * 3 mid level error return
+ */
+ int (* command)(Scsi_Cmnd *);
+
+ /*
+ * The QueueCommand function works in a similar manner
+ * to the command function. It takes an additional parameter,
+ * void (* done)(int host, int code) which is passed the host
+ * # and exit result when the command is complete.
+ * Host number is the POSITION IN THE hosts array of THIS
+ * host adapter.
+ */
+ int (* queuecommand)(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+
+ /*
+ * Since the mid level driver handles time outs, etc, we want to
+ * be able to abort the current command. Abort returns 0 if the
+ * abortion was successful. The field SCpnt->abort reason
+ * can be filled in with the appropriate reason why we wanted
+ * the abort in the first place, and this will be used
+ * in the mid-level code instead of the host_byte().
+ * If non-zero, the code passed to it
+ * will be used as the return code, otherwise
+ * DID_ABORT should be returned.
+ *
+ * Note that the scsi driver should "clean up" after itself,
+ * resetting the bus, etc. if necessary.
+ */
+ int (* abort)(Scsi_Cmnd *);
+
+ /*
+ * The reset function will reset the SCSI bus. Any executing
+ * commands should fail with a DID_RESET in the host byte.
+ * The Scsi_Cmnd is passed so that the reset routine can figure
+ * out which host adapter should be reset, and also which command
+ * within the command block was responsible for the reset in
+ * the first place. Some hosts do not implement a reset function,
+ * and these hosts must call scsi_request_sense(SCpnt) to keep
+ * the command alive.
+ */
+ int (* reset)(Scsi_Cmnd *);
+
+ /*
+ * This function is used to select synchronous communications,
+ * which will result in a higher data throughput. Not implemented
+ * yet.
+ */
+ int (* slave_attach)(int, int);
+
+ /*
+ * This function determines the bios parameters for a given
+ * harddisk. These tend to be numbers that are made up by
+ * the host adapter. Parameters:
+ * size, device number, list (heads, sectors, cylinders)
+ */
+ int (* bios_param)(Disk *, kdev_t, int []);
+
+ /*
+ * This determines if we will use a non-interrupt driven
+ * or an interrupt driven scheme, It is set to the maximum number
+ * of simultaneous commands a given host adapter will accept.
+ */
+ int can_queue;
+
+ /*
+ * In many instances, especially where disconnect / reconnect are
+ * supported, our host also has an ID on the SCSI bus. If this is
+ * the case, then it must be reserved. Please set this_id to -1 if
+ * your setup is in single initiator mode, and the host lacks an
+ * ID.
+ */
+ int this_id;
+
+ /*
+ * This determines the degree to which the host adapter is capable
+ * of scatter-gather.
+ */
+ short unsigned int sg_tablesize;
+
+ /*
+ * True if this host adapter can make good use of linked commands.
+ * This will allow more than one command to be queued to a given
+ * unit on a given host. Set this to the maximum number of command
+ * blocks to be provided for each device. Set this to 1 for one
+ * command block per lun, 2 for two, etc. Do not set this to 0.
+ * You should make sure that the host adapter will do the right thing
+ * before you try setting this above 1.
+ */
+ short cmd_per_lun;
+
+ /*
+ * present contains counter indicating how many boards of this
+ * type were found when we did the scan.
+ */
+ unsigned char present;
+
+ /*
+ * true if this host adapter uses unchecked DMA onto an ISA bus.
+ */
+ unsigned unchecked_isa_dma:1;
+
+ /*
+ * true if this host adapter can make good use of clustering.
+ * I originally thought that if the tablesize was large that it
+ * was a waste of CPU cycles to prepare a cluster list, but
+ * it works out that the Buslogic is faster if you use a smaller
+ * number of segments (i.e. use clustering). I guess it is
+ * inefficient.
+ */
+ unsigned use_clustering:1;
+
+} Scsi_Host_Template;
+
+/*
+ * The scsi_hosts array is the array containing the data for all
+ * possible <supported> scsi hosts. This is similar to the
+ * Scsi_Host_Template, except that we have one entry for each
+ * actual physical host adapter on the system, stored as a linked
+ * list. Note that if there are 2 aha1542 boards, then there will
+ * be two Scsi_Host entries, but only 1 Scsi_Host_Template entries.
+ */
+
+struct Scsi_Host
+{
+ struct Scsi_Host * next;
+ unsigned short extra_bytes;
+ volatile unsigned char host_busy;
+ char host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
+ int last_reset;
+ struct wait_queue *host_wait;
+ Scsi_Cmnd *host_queue;
+ Scsi_Host_Template * hostt;
+
+ /*
+ * These three parameters can be used to allow for wide scsi,
+ * and for host adapters that support multiple busses
+ * The first two should be set to 1 more than the actual max id
+ * or lun (i.e. 8 for normal systems).
+ */
+ unsigned int max_id;
+ unsigned int max_lun;
+ unsigned int max_channel;
+
+ /*
+ * Pointer to a circularly linked list - this indicates the hosts
+ * that should be locked out of performing I/O while we have an active
+ * command on this host.
+ */
+ struct Scsi_Host * block;
+ unsigned wish_block:1;
+
+ /* These parameters should be set by the detect routine */
+ unsigned char *base;
+ unsigned int io_port;
+ unsigned char n_io_port;
+ unsigned char irq;
+ unsigned char dma_channel;
+
+ /*
+ * This is a unique identifier that must be assigned so that we
+ * have some way of identifying each detected host adapter properly
+ * and uniquely. For hosts that do not support more than one card
+ * in the system at one time, this does not need to be set. It is
+ * initialized to 0 in scsi_register.
+ */
+ unsigned int unique_id;
+
+ /*
+ * The rest can be copied from the template, or specifically
+ * initialized, as required.
+ */
+
+ int this_id;
+ int can_queue;
+ short cmd_per_lun;
+ short unsigned int sg_tablesize;
+ unsigned unchecked_isa_dma:1;
+ unsigned use_clustering:1;
+ /*
+ * True if this host was loaded as a loadable module
+ */
+ unsigned loaded_as_module:1;
+
+ /*
+ * True when we call the low-level reset function, and
+ * the midlevel code suggests a full bus reset.
+ */
+ unsigned suggest_bus_reset:1;
+
+ unsigned long hostdata[0]; /* Used for storage of host specific stuff */
+};
+
+extern struct Scsi_Host * scsi_hostlist;
+extern struct Scsi_Device_Template * scsi_devicelist;
+
+extern Scsi_Host_Template * scsi_hosts;
+
+extern void build_proc_dir_entries(Scsi_Host_Template *);
+
+
+/*
+ * scsi_init initializes the scsi hosts.
+ */
+
+/*
+ * We use these goofy things because the MM is not set up when we init
+ * the scsi subsystem. By using these functions we can write code that
+ * looks normal. Also, it makes it possible to use the same code for a
+ * loadable module.
+ */
+
+extern void * scsi_init_malloc(unsigned int size, int priority);
+extern void scsi_init_free(char * ptr, unsigned int size);
+
+extern int next_scsi_host;
+
+extern int scsi_loadable_module_flag;
+unsigned int scsi_init(void);
+extern struct Scsi_Host * scsi_register(Scsi_Host_Template *, int j);
+extern void scsi_unregister(struct Scsi_Host * i);
+
+#define BLANK_HOST {"", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+struct Scsi_Device_Template
+{
+ struct Scsi_Device_Template * next;
+ const char * name;
+ const char * tag;
+ long * usage_count; /* Used for loadable modules */
+ unsigned char scsi_type;
+ unsigned char major;
+ unsigned char nr_dev; /* Number currently attached */
+ unsigned char dev_noticed; /* Number of devices detected. */
+ unsigned char dev_max; /* Current size of arrays */
+ unsigned blk:1; /* 0 if character device */
+ int (*detect)(Scsi_Device *); /* Returns 1 if we can attach this device */
+ int (*init)(void); /* Sizes arrays based upon number of devices
+ * detected */
+ void (*finish)(void); /* Perform initialization after attachment */
+ int (*attach)(Scsi_Device *); /* Attach devices to arrays */
+ void (*detach)(Scsi_Device *);
+};
+
+extern struct Scsi_Device_Template sd_template;
+extern struct Scsi_Device_Template st_template;
+extern struct Scsi_Device_Template sr_template;
+extern struct Scsi_Device_Template sg_template;
+
+int scsi_register_device(struct Scsi_Device_Template * sdpnt);
+
+/* These are used by loadable modules */
+extern int scsi_register_module(int, void *);
+extern void scsi_unregister_module(int, void *);
+
+/* The different types of modules that we can load and unload */
+#define MODULE_SCSI_HA 1
+#define MODULE_SCSI_CONST 2
+#define MODULE_SCSI_IOCTL 3
+#define MODULE_SCSI_DEV 4
+
+
+/*
+ * This is an ugly hack. If we expect to be able to load devices at run time,
+ * we need to leave extra room in some of the data structures. Doing a
+ * realloc to enlarge the structures would be riddled with race conditions,
+ * so until a better solution is discovered, we use this crude approach
+ */
+#define SD_EXTRA_DEVS 2
+#define ST_EXTRA_DEVS 2
+#define SR_EXTRA_DEVS 2
+#define SG_EXTRA_DEVS (SD_EXTRA_DEVS + SR_EXTRA_DEVS + ST_EXTRA_DEVS)
+
+#endif
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/in2000.c b/i386/i386at/gpl/linux/scsi/in2000.c
new file mode 100644
index 00000000..ac2cc656
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/in2000.c
@@ -0,0 +1,731 @@
+/*
+ * This file is in2000.c, written and
+ * Copyright (C) 1993 Brad McLean
+ * Last edit 1/19/95 TZ
+ * Disclaimer:
+ * Note: This is ugly. I know it, I wrote it, but my whole
+ * focus was on getting the damn thing up and out quickly.
+ * Future stuff that would be nice: Command chaining, and
+ * a local queue of commands would speed stuff up considerably.
+ * Disconnection needs some supporting code. All of this
+ * is beyond the scope of what I wanted to address, but if you
+ * have time and patience, more power to you.
+ * Also, there are some constants scattered throughout that
+ * should have defines, and I should have built functions to
+ * address the registers on the WD chip.
+ * Oh well, I'm out of time for this project.
+ * The one good thing to be said is that you can use the card.
+ */
+
+/*
+ * This module was updated by Shaun Savage first on 5-13-93
+ * At that time the write was fixed, irq detection, and some
+ * timing stuff. since that time other problems were fixed.
+ * On 7-20-93 this file was updated for patch level 11
+ * There are still problems with it but it work on 95% of
+ * the machines. There are still problems with it working with
+ * IDE drives, as swap drive and HD that support reselection.
+ * But for most people it will work.
+ */
+/* More changes by Bill Earnest, wde@aluxpo.att.com
+ * through 4/07/94. Includes rewrites of FIFO routines,
+ * length-limited commands to make swap partitions work.
+ * Merged the changes released by Larry Doolittle, based on input
+ * from Jon Luckey, Roger Sunshine, John Shifflett. The FAST_FIFO
+ * doesn't work for me. Scatter-gather code from Eric. The change to
+ * an IF stmt. in the interrupt routine finally made it stable.
+ * Limiting swap request size patch to ll_rw_blk.c not needed now.
+ * Please ignore the clutter of debug stmts., pretty can come later.
+ */
+/* Merged code from Matt Postiff improving the auto-sense validation
+ * for all I/O addresses. Some reports of problems still come in, but
+ * have been unable to reproduce or localize the cause. Some are from
+ * LUN > 0 problems, but that is not host specific. Now 6/6/94.
+ */
+/* Changes for 1.1.28 kernel made 7/19/94, code not affected. (WDE)
+ */
+/* Changes for 1.1.43+ kernels made 8/25/94, code added to check for
+ * new BIOS version, derived by jshiffle@netcom.com. (WDE)
+ *
+ * 1/7/95 Fix from Peter Lu (swift@world.std.com) for datalen vs. dataptr
+ * logic, much more stable under load.
+ *
+ * 1/19/95 (zerucha@shell.portal.com) Added module and biosparam support for
+ * larger SCSI hard drives (untested).
+ */
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/head.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <asm/dma.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+
+#include "in2000.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_in2000 = {
+ PROC_SCSI_IN2000, 6, "in2000",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+/*#define FAST_FIFO_IO*/
+
+/*#define DEBUG*/
+#ifdef DEBUG
+#define DEB(x) x
+#else
+#define DEB(x)
+#endif
+
+/* These functions are based on include/asm/io.h */
+#ifndef inw
+inline static unsigned short inw( unsigned short port )
+{
+ unsigned short _v;
+
+ __asm__ volatile ("inw %1,%0"
+ :"=a" (_v):"d" ((unsigned short) port));
+ return _v;
+}
+#endif
+
+#ifndef outw
+inline static void outw( unsigned short value, unsigned short port )
+{
+ __asm__ volatile ("outw %0,%1"
+ : /* no outputs */
+ :"a" ((unsigned short) value),
+ "d" ((unsigned short) port));
+}
+#endif
+
+/* These functions are lifted from drivers/block/hd.c */
+
+#define port_read(port,buf,nr) \
+__asm__("cld;rep;insw": :"d" (port),"D" (buf),"c" (nr):"cx","di")
+
+#define port_write(port,buf,nr) \
+__asm__("cld;rep;outsw": :"d" (port),"S" (buf),"c" (nr):"cx","si")
+
+static unsigned int base;
+static unsigned int ficmsk;
+static unsigned char irq_level;
+static int in2000_datalen;
+static unsigned int in2000_nsegment;
+static unsigned int in2000_current_segment;
+static unsigned short *in2000_dataptr;
+static char in2000_datawrite;
+static struct scatterlist * in2000_scatter;
+static Scsi_Cmnd *in2000_SCptr = 0;
+
+static void (*in2000_done)(Scsi_Cmnd *);
+
+static int in2000_test_port(int index)
+{
+ static const int *bios_tab[] = {
+ (int *) 0xc8000, (int *) 0xd0000, (int *) 0xd8000 };
+ int i;
+ char tmp;
+
+ tmp = inb(INFLED);
+ /* First, see if the DIP switch values are valid */
+ /* The test of B7 may fail on some early boards, mine works. */
+ if ( ((~tmp & 0x3) != index ) || (tmp & 0x80) || !(tmp & 0x4) )
+ return 0;
+ printk("IN-2000 probe got dip setting of %02X\n", tmp);
+ tmp = inb(INVERS);
+/* Add some extra sanity checks here */
+ for(i=0; i < 3; i++)
+ if(*(bios_tab[i]+0x04) == 0x41564f4e ||
+ *(bios_tab[i]+0xc) == 0x61776c41) {
+ printk("IN-2000 probe found hdw. vers. %02x, BIOS at %06x\n",
+ tmp, (unsigned int)bios_tab[i]);
+ return 1;
+ }
+ printk("in2000 BIOS not found.\n");
+ return 0;
+}
+
+
+/*
+ * retrieve the current transaction counter from the WD
+ */
+
+static unsigned in2000_txcnt(void)
+{
+ unsigned total=0;
+
+ if(inb(INSTAT) & 0x20) return 0xffffff; /* not readable now */
+ outb(TXCNTH,INSTAT); /* then autoincrement */
+ total = (inb(INDATA) & 0xff) << 16;
+ outb(TXCNTM,INSTAT);
+ total += (inb(INDATA) & 0xff) << 8;
+ outb(TXCNTL,INSTAT);
+ total += (inb(INDATA) & 0xff);
+ return total;
+}
+
+/*
+ * Note: the FIFO is screwy, and has a counter granularity of 16 bytes, so
+ * we have to reconcile the FIFO counter, the transaction byte count from the
+ * WD chip, and of course, our desired transaction size. It may look strange,
+ * and could probably use improvement, but it works, for now.
+ */
+
+static void in2000_fifo_out(void) /* uses FIFOCNTR */
+{
+ unsigned count, infcnt, txcnt;
+
+ infcnt = inb(INFCNT)& 0xfe; /* FIFO counter */
+ do {
+ txcnt = in2000_txcnt();
+/*DEB(printk("FIw:%d %02x %d\n", in2000_datalen, infcnt, txcnt));*/
+ count = (infcnt << 3) - 32; /* don't fill completely */
+ if ( count > in2000_datalen )
+ count = in2000_datalen; /* limit to actual data on hand */
+ count >>= 1; /* Words, not bytes */
+#ifdef FAST_FIFO_IO
+ if ( count ) {
+ port_write(INFIFO, in2000_dataptr, count);
+ in2000_datalen -= (count<<1);
+ }
+#else
+ while ( count-- )
+ {
+ outw(*in2000_dataptr++, INFIFO);
+ in2000_datalen -= 2;
+ }
+#endif
+ } while((in2000_datalen > 0) && ((infcnt = (inb(INFCNT)) & 0xfe) >= 0x20) );
+ /* If scatter-gather, go on to next segment */
+ if( !in2000_datalen && ++in2000_current_segment < in2000_nsegment)
+ {
+ in2000_scatter++;
+ in2000_datalen = in2000_scatter->length;
+ in2000_dataptr = (unsigned short*)in2000_scatter->address;
+ }
+ if ( in2000_datalen <= 0 )
+ {
+ ficmsk = 0;
+ count = 32; /* Always says to use this much flush */
+ while ( count-- )
+ outw(0, INFIFO);
+ outb(2, ININTR); /* Mask FIFO Interrupts when done */
+ }
+}
+
+static void in2000_fifo_in(void) /* uses FIFOCNTR */
+{
+ unsigned fic, count, count2;
+
+ count = inb(INFCNT) & 0xe1;
+ do{
+ count2 = count;
+ count = (fic = inb(INFCNT)) & 0xe1;
+ } while ( count != count2 );
+DEB(printk("FIir:%d %02x %08x\n", in2000_datalen,fic,(unsigned int )in2000_dataptr));
+ do {
+ count2 = in2000_txcnt(); /* bytes yet to come over SCSI bus */
+DEB(printk("FIr:%d %02x %08x %08x\n", in2000_datalen,fic,count2,(unsigned int)in2000_dataptr));
+ if(count2 > 65536) count2 = 0;
+ if(fic > 128) count = 1024;
+ else if(fic > 64) count = 512;
+ else if (fic > 32) count = 256;
+ else if ( count2 < in2000_datalen ) /* if drive has < what we want */
+ count = in2000_datalen - count2; /* FIFO has the rest */
+ if ( count > in2000_datalen ) /* count2 is lesser of FIFO & rqst */
+ count2 = in2000_datalen >> 1; /* converted to word count */
+ else
+ count2 = count >> 1;
+ count >>= 1; /* also to words */
+ count -= count2; /* extra left over in FIFO */
+#ifdef FAST_FIFO_IO
+ if ( count2 ) {
+ port_read(INFIFO, in2000_dataptr, count2);
+ in2000_datalen -= (count2<<1);
+ }
+#else
+ while ( count2-- )
+ {
+ *in2000_dataptr++ = inw(INFIFO);
+ in2000_datalen -=2;
+ }
+#endif
+ } while((in2000_datalen > 0) && (fic = inb(INFCNT)) );
+DEB(printk("FIer:%d %02x %08x\n", in2000_datalen,fic,(unsigned int )in2000_dataptr));
+/* while ( count-- )
+ inw(INFIFO);*/ /* Throw away some extra stuff */
+ if( !in2000_datalen && ++in2000_current_segment < in2000_nsegment)
+ {
+ in2000_scatter++;
+ in2000_datalen = in2000_scatter->length;
+ in2000_dataptr = (unsigned short*)in2000_scatter->address;
+ }
+ if ( ! in2000_datalen ){
+ outb(2, ININTR); /* Mask FIFO Interrupts when done */
+ ficmsk = 0;}
+}
+
+static void in2000_intr_handle(int irq, struct pt_regs *regs)
+{
+ int result=0;
+ unsigned int count,auxstatus,scsistatus,cmdphase,scsibyte;
+ int action=0;
+ Scsi_Cmnd *SCptr;
+
+ DEB(printk("INT:%d %02x %08x\n", in2000_datalen, inb(INFCNT),(unsigned int)in2000_dataptr));
+
+ if (( (ficmsk & (count = inb(INFCNT))) == 0xfe ) ||
+ ( (inb(INSTAT) & 0x8c) == 0x80))
+ { /* FIFO interrupt or WD interrupt */
+ auxstatus = inb(INSTAT); /* need to save now */
+ outb(SCSIST,INSTAT);
+ scsistatus = inb(INDATA); /* This clears the WD intrpt bit */
+ outb(TARGETU,INSTAT); /* then autoincrement */
+ scsibyte = inb(INDATA); /* Get the scsi status byte */
+ outb(CMDPHAS,INSTAT);
+ cmdphase = inb(INDATA);
+ DEB(printk("(int2000:%02x %02x %02x %02x %02x)\n",count,auxstatus,
+ scsistatus,cmdphase,scsibyte));
+
+ /* Why do we assume that we need to send more data here??? ERY */
+ if ( in2000_datalen ) /* data xfer pending */
+ {
+ if ( in2000_dataptr == NULL )
+ printk("int2000: dataptr=NULL datalen=%d\n",
+ in2000_datalen);
+ else if ( in2000_datawrite )
+ in2000_fifo_out();
+ else
+ in2000_fifo_in();
+ }
+ if ( (auxstatus & 0x8c) == 0x80 )
+ { /* There is a WD Chip interrupt & register read good */
+ outb(2,ININTR); /* Disable fifo interrupts */
+ ficmsk = 0;
+ result = DID_OK << 16;
+ /* 16=Select & transfer complete, 85=got disconnect */
+ if ((scsistatus != 0x16) && (scsistatus != 0x85)
+ && (scsistatus != 0x42)){
+/* printk("(WDi2000:%02x %02x %02x %02x %02x)\n",count,auxstatus,
+ scsistatus,cmdphase,scsibyte);*/
+/* printk("QDAT:%d %08x %02x\n",
+ in2000_datalen,(unsigned int)in2000_dataptr,ficmsk);*/
+ ;
+ }
+ switch ( scsistatus & 0xf0 )
+ {
+ case 0x00: /* Card Reset Completed */
+ action = 3;
+ break;
+ case 0x10: /* Successful Command Completion */
+ if ( scsistatus & 0x8 )
+ action = 1;
+ break;
+ case 0x20: /* Command Paused or Aborted */
+ if ( (scsistatus & 0x8) )
+ action = 1;
+ else if ( (scsistatus & 7) < 2 )
+ action = 2;
+ else
+ result = DID_ABORT << 16;
+ break;
+ case 0x40: /* Terminated early */
+ if ( scsistatus & 0x8 )
+ action = 1;
+ else if ( (scsistatus & 7) > 2 )
+ action = 2;
+ else
+ result = DID_TIME_OUT << 16;
+ break;
+ case 0x80: /* Service Required from SCSI bus */
+ if ( scsistatus & 0x8 )
+ action = 1;
+ else
+ action = 2;
+ break;
+ } /* end switch(scsistatus) */
+ outb(0,INFLED);
+ switch ( action )
+ {
+ case 0x02: /* Issue an abort */
+ outb(COMMAND,INSTAT);
+ outb(1,INDATA); /* ABORT COMMAND */
+ result = DID_ABORT << 16;
+ case 0x00: /* Basically all done */
+ if ( ! in2000_SCptr )
+ return;
+ in2000_SCptr->result = result | scsibyte;
+ SCptr = in2000_SCptr;
+ in2000_SCptr = 0;
+ if ( in2000_done )
+ (*in2000_done)(SCptr);
+ break;
+ case 0x01: /* We need to reissue a command */
+ outb(CMDPHAS,INSTAT);
+ switch ( scsistatus & 7 )
+ {
+ case 0: /* Data out phase */
+ case 1: /* Data in phase */
+ case 4: /* Unspec info out phase */
+ case 5: /* Unspec info in phase */
+ case 6: /* Message in phase */
+ case 7: /* Message in phase */
+ outb(0x41,INDATA); /* rdy to disconn */
+ break;
+ case 2: /* command phase */
+ outb(0x30,INDATA); /* rdy to send cmd bytes */
+ break;
+ case 3: /* status phase */
+ outb(0x45,INDATA); /* To go to status phase,*/
+ outb(TXCNTH,INSTAT); /* elim. data, autoinc */
+ outb(0,INDATA);
+ outb(0,INDATA);
+ outb(0,INDATA);
+ in2000_datalen = 0;
+ in2000_dataptr = 0;
+ break;
+ } /* end switch(scsistatus) */
+ outb(COMMAND,INSTAT);
+ outb(8,INDATA); /* RESTART THE COMMAND */
+ break;
+ case 0x03: /* Finish up a Card Reset */
+ outb(TIMEOUT,INSTAT); /* I got these values */
+ /* by reverse Engineering */
+ outb(IN2000_TMOUT,INDATA); /* the Always' bios. */
+ outb(CONTROL,INSTAT);
+ outb(0,INDATA);
+ outb(SYNCTXR,INSTAT);
+ outb(0x40,INDATA); /* async, 4 cyc xfer per. */
+ break;
+ } /* end switch(action) */
+ } /* end if auxstatus for WD int */
+ } /* end while intrpt active */
+}
+
+int in2000_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ unchar direction;
+ unchar *cmd = (unchar *) SCpnt->cmnd;
+ unchar target = SCpnt->target;
+ void *buff = SCpnt->request_buffer;
+ unsigned long flags;
+ int bufflen = SCpnt->request_bufflen;
+ int timeout, size, loop;
+ int i;
+
+ /*
+ * This SCSI command has no data phase, but unfortunately the mid-level
+ * SCSI drivers ask for 256 bytes of data xfer. Our card hangs if you
+ * do this, so we protect against it here. It would be nice if the mid-
+ * level could be changed, but who knows if that would break other host
+ * adapter drivers.
+ */
+ if ( *cmd == TEST_UNIT_READY )
+ bufflen = 0;
+
+ /*
+ * What it looks like. Boy did I get tired of reading its output.
+ */
+ if (*cmd == READ_10 || *cmd == WRITE_10) {
+ i = xscsi2int((cmd+1));
+ } else if (*cmd == READ_6 || *cmd == WRITE_6) {
+ i = scsi2int((cmd+1));
+ } else {
+ i = -1;
+ }
+#ifdef DEBUG
+ printk("in2000qcmd: pos %d len %d ", i, bufflen);
+ printk("scsi cmd:");
+ for (i = 0; i < SCpnt->cmd_len; i++) printk("%02x ", cmd[i]);
+ printk("\n");
+#endif
+ direction = 1; /* assume for most commands */
+ if (*cmd == WRITE_10 || *cmd == WRITE_6)
+ direction = 0;
+ size = SCpnt->cmd_len; /* CDB length */
+ /*
+ * Setup our current pointers
+ * This is where you would allocate a control structure in a queue,
+ * If you were going to upgrade this to do multiple issue.
+ * Note that datalen and dataptr exist because we can change the
+ * values during the course of the operation, while managing the
+ * FIFO.
+ * Note the nasty little first clause. In theory, the mid-level
+ * drivers should never hand us more than one command at a time,
+ * but just in case someone gets cute in configuring the driver,
+ * we'll protect them, although not very politely.
+ */
+ if ( in2000_SCptr )
+ {
+ printk("in2000_queue_command waiting for free command block!\n");
+ while ( in2000_SCptr )
+ barrier();
+ }
+ for ( timeout = jiffies + 5; timeout > jiffies; )
+ {
+ if ( ! ( inb(INSTAT) & 0xb0 ) )
+ {
+ timeout = 0;
+ break;
+ }
+ else
+ {
+ inb(INSTAT);
+ outb(SCSIST,INSTAT);
+ inb(INDATA);
+ outb(TARGETU,INSTAT); /* then autoinc */
+ inb(INDATA);
+ inb(INDATA);
+ }
+ }
+ if ( timeout )
+ {
+ printk("in2000_queue_command timeout!\n");
+ SCpnt->result = DID_TIME_OUT << 16;
+ (*done)(SCpnt);
+ return 1;
+ }
+ /* Added for scatter-gather support */
+ in2000_nsegment = SCpnt->use_sg;
+ in2000_current_segment = 0;
+ if(SCpnt->use_sg){
+ in2000_scatter = (struct scatterlist *) buff;
+ in2000_datalen = in2000_scatter->length;
+ in2000_dataptr = (unsigned short*)in2000_scatter->address;
+ } else {
+ in2000_scatter = NULL;
+ in2000_datalen = bufflen;
+ in2000_dataptr = (unsigned short*) buff;
+ };
+ in2000_done = done;
+ in2000_SCptr = SCpnt;
+ /*
+ * Write the CDB to the card, then the LUN, the length, and the target.
+ */
+ outb(TOTSECT, INSTAT); /* start here then autoincrement */
+ for ( loop=0; loop < size; loop++ )
+ outb(cmd[loop],INDATA);
+ outb(TARGETU,INSTAT);
+ outb(SCpnt->lun & 7,INDATA);
+ SCpnt->host_scribble = NULL;
+ outb(TXCNTH,INSTAT); /* then autoincrement */
+ outb(bufflen>>16,INDATA);
+ outb(bufflen>>8,INDATA);
+ outb(bufflen,INDATA);
+ outb(target&7,INDATA);
+ /*
+ * Set up the FIFO
+ */
+ save_flags(flags);
+ cli(); /* so FIFO init waits till WD set */
+ outb(0,INFRST);
+ if ( direction == 1 )
+ {
+ in2000_datawrite = 0;
+ outb(0,INFWRT);
+ }
+ else
+ {
+ in2000_datawrite = 1;
+ for ( loop=16; --loop; ) /* preload the outgoing fifo */
+ {
+ outw(*in2000_dataptr++,INFIFO);
+ if(in2000_datalen > 0) in2000_datalen-=2;
+ }
+ }
+ ficmsk = 0xff;
+ /*
+ * Start it up
+ */
+ outb(CONTROL,INSTAT); /* WD BUS Mode */
+ outb(0x4C,INDATA);
+ if ( in2000_datalen ) /* if data xfer cmd */
+ outb(0,ININTR); /* Enable FIFO intrpt some boards? */
+ outb(COMMAND,INSTAT);
+ outb(0,INNLED);
+ outb(8,INDATA); /* Select w/ATN & Transfer */
+ restore_flags(flags); /* let the intrpt rip */
+ return 0;
+}
+
+static volatile int internal_done_flag = 0;
+static volatile int internal_done_errcode = 0;
+
+static void internal_done(Scsi_Cmnd * SCpnt)
+{
+ internal_done_errcode = SCpnt->result;
+ ++internal_done_flag;
+}
+
+int in2000_command(Scsi_Cmnd * SCpnt)
+{
+ in2000_queuecommand(SCpnt, internal_done);
+
+ while (!internal_done_flag);
+ internal_done_flag = 0;
+ return internal_done_errcode;
+}
+
+int in2000_detect(Scsi_Host_Template * tpnt)
+{
+/* Order chosen to reduce conflicts with some multi-port serial boards */
+ int base_tab[] = { 0x220,0x200,0x110,0x100 };
+ int int_tab[] = { 15,14,11,10 };
+ struct Scsi_Host * shpnt;
+ int loop, tmp;
+
+ DEB(printk("in2000_detect: \n"));
+
+ tpnt->proc_dir = &proc_scsi_in2000;
+
+ for ( loop=0; loop < 4; loop++ )
+ {
+ base = base_tab[loop];
+ if ( in2000_test_port(loop)) break;
+ }
+ if ( loop == 4 )
+ return 0;
+
+ /* Read the dip switch values again for miscellaneous checking and
+ informative messages */
+ tmp = inb(INFLED);
+
+ /* Bit 2 tells us if interrupts are disabled */
+ if ( (tmp & 0x4) == 0 ) {
+ printk("The IN-2000 is not configured for interrupt operation\n");
+ printk("Change the DIP switch settings to enable interrupt operation\n");
+ }
+
+ /* Bit 6 tells us about floppy controller */
+ printk("IN-2000 probe found floppy controller on IN-2000 ");
+ if ( (tmp & 0x40) == 0)
+ printk("enabled\n");
+ else
+ printk("disabled\n");
+
+ /* Bit 5 tells us about synch/asynch mode */
+ printk("IN-2000 probe found IN-2000 in ");
+ if ( (tmp & 0x20) == 0)
+ printk("synchronous mode\n");
+ else
+ printk("asynchronous mode\n");
+
+ irq_level = int_tab [ ((~inb(INFLED)>>3)&0x3) ];
+
+ printk("Configuring IN2000 at IO:%x, IRQ %d"
+#ifdef FAST_FIFO_IO
+ " (using fast FIFO I/O code)"
+#endif
+ "\n",base, irq_level);
+
+ outb(2,ININTR); /* Shut off the FIFO first, so it won't ask for data.*/
+ if (request_irq(irq_level,in2000_intr_handle, 0, "in2000"))
+ {
+ printk("in2000_detect: Unable to allocate IRQ.\n");
+ return 0;
+ }
+ outb(0,INFWRT); /* read mode so WD can intrpt */
+ outb(SCSIST,INSTAT);
+ inb(INDATA); /* free status reg, clear WD intrpt */
+ outb(OWNID,INSTAT);
+ outb(0x7,INDATA); /* we use addr 7 */
+ outb(COMMAND,INSTAT);
+ outb(0,INDATA); /* do chip reset */
+ shpnt = scsi_register(tpnt, 0);
+ /* Set these up so that we can unload the driver properly. */
+ shpnt->io_port = base;
+ shpnt->n_io_port = 12;
+ shpnt->irq = irq_level;
+ request_region(base, 12,"in2000"); /* Prevent other drivers from using this space */
+ return 1;
+}
+
+int in2000_abort(Scsi_Cmnd * SCpnt)
+{
+ DEB(printk("in2000_abort\n"));
+ /*
+ * Ask no stupid questions, just order the abort.
+ */
+ outb(COMMAND,INSTAT);
+ outb(1,INDATA); /* Abort Command */
+ return 0;
+}
+
+static inline void delay( unsigned how_long )
+{
+ unsigned long time = jiffies + how_long;
+ while (jiffies < time) ;
+}
+
+int in2000_reset(Scsi_Cmnd * SCpnt)
+{
+ DEB(printk("in2000_reset called\n"));
+ /*
+ * Note: this is finished off by an incoming interrupt
+ */
+ outb(0,INFWRT); /* read mode so WD can intrpt */
+ outb(SCSIST,INSTAT);
+ inb(INDATA);
+ outb(OWNID,INSTAT);
+ outb(0x7,INDATA); /* ID=7,noadv, no parity, clk div=2 (8-10Mhz clk) */
+ outb(COMMAND,INSTAT);
+ outb(0,INDATA); /* reset WD chip */
+ delay(2);
+#ifdef SCSI_RESET_PENDING
+ return SCSI_RESET_PENDING;
+#else
+ if(SCpnt) SCpnt->flags |= NEEDS_JUMPSTART;
+ return 0;
+#endif
+}
+
+int in2000_biosparam(Disk * disk, kdev_t dev, int* iinfo)
+ {
+ int size = disk->capacity;
+ DEB(printk("in2000_biosparam\n"));
+ iinfo[0] = 64;
+ iinfo[1] = 32;
+ iinfo[2] = size >> 11;
+/* This should approximate the large drive handling that the DOS ASPI manager
+ uses. Drives very near the boundaries may not be handled correctly (i.e.
+ near 2.0 Gb and 4.0 Gb) */
+ if (iinfo[2] > 1024) {
+ iinfo[0] = 64;
+ iinfo[1] = 63;
+ iinfo[2] = disk->capacity / (iinfo[0] * iinfo[1]);
+ }
+ if (iinfo[2] > 1024) {
+ iinfo[0] = 128;
+ iinfo[1] = 63;
+ iinfo[2] = disk->capacity / (iinfo[0] * iinfo[1]);
+ }
+ if (iinfo[2] > 1024) {
+ iinfo[0] = 255;
+ iinfo[1] = 63;
+ iinfo[2] = disk->capacity / (iinfo[0] * iinfo[1]);
+ if (iinfo[2] > 1023)
+ iinfo[2] = 1023;
+ }
+ return 0;
+ }
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = IN2000;
+
+#include "scsi_module.c"
+#endif
+
diff --git a/i386/i386at/gpl/linux/scsi/in2000.h b/i386/i386at/gpl/linux/scsi/in2000.h
new file mode 100644
index 00000000..cf68db7f
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/in2000.h
@@ -0,0 +1,122 @@
+#ifndef _IN2000_H
+
+/* $Id: in2000.h,v 1.1.1.1 1997/02/25 21:27:50 thomas Exp $
+ *
+ * Header file for the Always IN 2000 driver for Linux
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+/* The IN-2000 is based on a WD33C93 */
+
+#define INSTAT (base + 0x0) /* R: Auxiliary Status; W: register select */
+#define INDATA (base + 0x1) /* R/W: Data port */
+#define INFIFO (base + 0x2) /* R/W FIFO, Word access only */
+#define INREST (base + 0x3) /* W: Reset everything */
+#define INFCNT (base + 0x4) /* R: FIFO byte count */
+#define INFRST (base + 0x5) /* W: Reset Fifo count and to write */
+#define INFWRT (base + 0x7) /* W: Set FIFO to read */
+#define INFLED (base + 0x8) /* W: Set LED; R: Dip Switch settings */
+#define INNLED (base + 0x9) /* W: reset LED */
+#define INVERS (base + 0xa) /* R: Read hw version, end-reset */
+#define ININTR (base + 0xc) /* W: Interrupt Mask Port */
+#define G2CNTRL_HRDY 0x20 /* Sets HOST ready */
+
+/* WD33C93 defines */
+#define OWNID 0
+#undef CONTROL
+#define CONTROL 1
+#define TIMEOUT 2
+#define TOTSECT 3
+#define TOTHEAD 4
+#define TOTCYLH 5
+#define TOTCYLL 6
+#define LADRSHH 7
+#define LADRSHL 8
+#define LADRSLH 9
+#define LADRSLL 10
+#define SECTNUM 11
+#define HEADNUM 12
+#define CYLNUMH 13
+#define CYLNUML 14
+#define TARGETU 15
+#define CMDPHAS 16
+#define SYNCTXR 17
+#define TXCNTH 18
+#define TXCNTM 19
+#define TXCNTL 20
+#define DESTID 21
+#define SRCID 22
+#define SCSIST 23
+#define COMMAND 24
+#define WDDATA 25
+#define AUXSTAT 31
+
+/* OWNID Register Bits */
+#define OWN_EAF 0x08
+#define OWN_EHP 0x10
+#define OWN_FS0 0x40
+#define OWN_FS1 0x80
+/* AUX Register Bits */
+#define AUX_DBR 0
+#define AUX_PE 1
+#define AUX_CIP 0x10
+#define AUX_BSY 0x20
+#define AUX_LCI 0x40
+#define AUX_INT 0x80
+
+/* Select timeout const, 1 count = 8ms */
+#define IN2000_TMOUT 0x1f
+
+/* These belong in scsi.h also */
+#undef any2scsi
+#define any2scsi(up, p) \
+(up)[0] = (((unsigned long)(p)) >> 16); \
+(up)[1] = (((unsigned long)(p)) >> 8); \
+(up)[2] = ((unsigned long)(p));
+
+#undef scsi2int
+#define scsi2int(up) ( ((((long)*(up))&0x1f) << 16) + (((long)(up)[1]) << 8) + ((long)(up)[2]) )
+
+#undef xany2scsi
+#define xany2scsi(up, p) \
+(up)[0] = ((long)(p)) >> 24; \
+(up)[1] = ((long)(p)) >> 16; \
+(up)[2] = ((long)(p)) >> 8; \
+(up)[3] = ((long)(p));
+
+#define xscsi2int(up) ( (((long)(up)[0]) << 24) + (((long)(up)[1]) << 16) \
+ + (((long)(up)[2]) << 8) + ((long)(up)[3]) )
+
+#define MAX_CDB 12
+#define MAX_SENSE 14
+#define MAX_STATUS 32
+
+int in2000_detect(Scsi_Host_Template *);
+int in2000_command(Scsi_Cmnd *);
+int in2000_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int in2000_abort(Scsi_Cmnd *);
+int in2000_reset(Scsi_Cmnd *);
+int in2000_biosparam(Disk *, kdev_t, int*);
+
+#ifndef NULL
+ #define NULL 0
+#endif
+
+
+/* next may be "SG_NONE" or "SG_ALL" or nr. of (1k) blocks per R/W Cmd. */
+#define IN2000_SG SG_ALL
+#define IN2000 {NULL, NULL, \
+ NULL, NULL, \
+ "Always IN2000", in2000_detect, NULL, \
+ NULL, in2000_command, \
+ in2000_queuecommand, \
+ in2000_abort, \
+ in2000_reset, \
+ NULL, \
+ in2000_biosparam, \
+ 1, 7, IN2000_SG, 1, 0, 0}
+
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/pas16.c b/i386/i386at/gpl/linux/scsi/pas16.c
new file mode 100644
index 00000000..9f5d8826
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/pas16.c
@@ -0,0 +1,553 @@
+#define AUTOSENSE
+#define PSEUDO_DMA
+#define FOO
+#define UNSAFE /* Not unsafe for PAS16 -- use it */
+
+/*
+ * This driver adapted from Drew Eckhardt's Trantor T128 driver
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * ( Based on T128 - DISTRIBUTION RELEASE 3. )
+ *
+ * Modified to work with the Pro Audio Spectrum/Studio 16
+ * by John Weidman.
+ *
+ *
+ * For more information, please consult
+ *
+ * Media Vision
+ * (510) 770-8600
+ * (800) 348-7116
+ *
+ * and
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * Options :
+ * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
+ * for commands that return with a CHECK CONDITION status.
+ *
+ * LIMIT_TRANSFERSIZE - if defined, limit the pseudo-dma transfers to 512
+ * bytes at a time. Since interrupts are disabled by default during
+ * these transfers, we might need this to give reasonable interrupt
+ * service time if the transfer size gets too large.
+ *
+ * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance
+ * increase compared to polled I/O.
+ *
+ * PARITY - enable parity checking. Not supported.
+ *
+ * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
+ *
+ * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. This
+ * parameter comes from the NCR5380 code. It is NOT unsafe with
+ * the PAS16 and you should use it. If you don't you will have
+ * a problem with dropped characters during high speed
+ * communications during SCSI transfers. If you really don't
+ * want to use UNSAFE you can try defining LIMIT_TRANSFERSIZE or
+ * twiddle with the transfer size in the high level code.
+ *
+ * USLEEP - enable support for devices that don't disconnect. Untested.
+ *
+ * The card is detected and initialized in one of several ways :
+ * 1. Autoprobe (default) - There are many different models of
+ * the Pro Audio Spectrum/Studio 16, and I only have one of
+ * them, so this may require a little tweaking. An interrupt
+ * is triggered to autoprobe for the interrupt line. Note:
+ * with the newer model boards, the interrupt is set via
+ * software after reset using the default_irq for the
+ * current board number.
+ *
+ *
+ * 2. With command line overrides - pas16=port,irq may be
+ * used on the LILO command line to override the defaults.
+ *
+ * 3. With the PAS16_OVERRIDE compile time define. This is
+ * specified as an array of address, irq tuples. Ie, for
+ * one board at the default 0x388 address, IRQ10, I could say
+ * -DPAS16_OVERRIDE={{0x388, 10}}
+ * NOTE: Untested.
+ *
+ * Note that if the override methods are used, place holders must
+ * be specified for other boards in the system.
+ *
+ *
+ * Configuration notes :
+ * The current driver does not support interrupt sharing with the
+ * sound portion of the card. If you use the same irq for the
+ * scsi port and sound you will have problems. Either use
+ * a different irq for the scsi port or don't use interrupts
+ * for the scsi port.
+ *
+ * If you have problems with your card not being recognized, use
+ * the LILO command line override. Try to get it recognized without
+ * interrupts. Ie, for a board at the default 0x388 base port,
+ * boot: linux pas16=0x388,255
+ *
+ * (255 is the IRQ_NONE constant in NCR5380.h)
+ */
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <asm/system.h>
+#include <linux/signal.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "pas16.h"
+#define AUTOPROBE_IRQ
+#include "NCR5380.h"
+#include "constants.h"
+#include "sd.h"
+
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_pas16 = {
+ PROC_SCSI_PAS16, 5, "pas16",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+int scsi_irq_translate[] =
+ { 0, 0, 1, 2, 3, 4, 5, 6, 0, 0, 7, 8, 9, 0, 10, 11 };
+
+/* The default_irqs array contains values used to set the irq into the
+ * board via software (as must be done on newer model boards without
+ * irq jumpers on the board). The first value in the array will be
+ * assigned to logical board 0, the next to board 1, etc.
+ */
+int default_irqs[] = { PAS16_DEFAULT_BOARD_1_IRQ,
+ PAS16_DEFAULT_BOARD_2_IRQ,
+ PAS16_DEFAULT_BOARD_3_IRQ,
+ PAS16_DEFAULT_BOARD_4_IRQ
+ };
+
+static struct override {
+ unsigned short io_port;
+ int irq;
+} overrides
+#ifdef PAS16_OVERRIDE
+ [] = PAS16_OVERRIDE;
+#else
+ [4] = {{0,IRQ_AUTO}, {0,IRQ_AUTO}, {0,IRQ_AUTO},
+ {0,IRQ_AUTO}};
+#endif
+
+#define NO_OVERRIDES (sizeof(overrides) / sizeof(struct override))
+
+static struct base {
+ unsigned short io_port;
+ int noauto;
+} bases[] = { {PAS16_DEFAULT_BASE_1, 0},
+ {PAS16_DEFAULT_BASE_2, 0},
+ {PAS16_DEFAULT_BASE_3, 0},
+ {PAS16_DEFAULT_BASE_4, 0}
+ };
+
+#define NO_BASES (sizeof (bases) / sizeof (struct base))
+
+unsigned short pas16_offset[ 8 ] =
+ {
+ 0x1c00, /* OUTPUT_DATA_REG */
+ 0x1c01, /* INITIATOR_COMMAND_REG */
+ 0x1c02, /* MODE_REG */
+ 0x1c03, /* TARGET_COMMAND_REG */
+ 0x3c00, /* STATUS_REG ro, SELECT_ENABLE_REG wo */
+ 0x3c01, /* BUS_AND_STATUS_REG ro, START_DMA_SEND_REG wo */
+ 0x3c02, /* INPUT_DATA_REGISTER ro, (N/A on PAS16 ?)
+ * START_DMA_TARGET_RECEIVE_REG wo
+ */
+ 0x3c03, /* RESET_PARITY_INTERRUPT_REG ro,
+ * START_DMA_INITIATOR_RECEIVE_REG wo
+ */
+ };
+
+
+
+/*
+ * Function : enable_board( int board_num, unsigned short port )
+ *
+ * Purpose : set address in new model board
+ *
+ * Inputs : board_num - logical board number 0-3, port - base address
+ *
+ */
+
+void enable_board( int board_num, unsigned short port )
+{
+ outb( 0xbc + board_num, MASTER_ADDRESS_PTR );
+ outb( port >> 2, MASTER_ADDRESS_PTR );
+}
+
+
+
+/*
+ * Function : init_board( unsigned short port, int irq )
+ *
+ * Purpose : Set the board up to handle the SCSI interface
+ *
+ * Inputs : port - base address of the board,
+ * irq - irq to assign to the SCSI port
+ * force_irq - set it even if it conflicts with sound driver
+ *
+ */
+
+void init_board( unsigned short io_port, int irq, int force_irq )
+{
+ unsigned int tmp;
+ unsigned int pas_irq_code;
+
+ /* Initialize the SCSI part of the board */
+
+ outb( 0x30, io_port + P_TIMEOUT_COUNTER_REG ); /* Timeout counter */
+ outb( 0x01, io_port + P_TIMEOUT_STATUS_REG_OFFSET ); /* Reset TC */
+ outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */
+
+ NCR5380_read( RESET_PARITY_INTERRUPT_REG );
+
+ /* Set the SCSI interrupt pointer without mucking up the sound
+ * interrupt pointer in the same byte.
+ */
+ pas_irq_code = ( irq < 16 ) ? scsi_irq_translate[irq] : 0;
+ tmp = inb( io_port + IO_CONFIG_3 );
+
+ if( (( tmp & 0x0f ) == pas_irq_code) && pas_irq_code > 0
+ && !force_irq )
+ {
+ printk( "pas16: WARNING: Can't use same irq as sound "
+ "driver -- interrupts disabled\n" );
+ /* Set up the drive parameters, disable 5380 interrupts */
+ outb( 0x4d, io_port + SYS_CONFIG_4 );
+ }
+ else
+ {
+ tmp = ( tmp & 0x0f ) | ( pas_irq_code << 4 );
+ outb( tmp, io_port + IO_CONFIG_3 );
+
+ /* Set up the drive parameters and enable 5380 interrupts */
+ outb( 0x6d, io_port + SYS_CONFIG_4 );
+ }
+}
+
+
+/*
+ * Function : pas16_hw_detect( unsigned short board_num )
+ *
+ * Purpose : determine if a pas16 board is present
+ *
+ * Inputs : board_num - logical board number ( 0 - 3 )
+ *
+ * Returns : 0 if board not found, 1 if found.
+ */
+
+int pas16_hw_detect( unsigned short board_num )
+{
+ unsigned char board_rev, tmp;
+ unsigned short io_port = bases[ board_num ].io_port;
+
+ /* See if we can find a PAS16 board at the address associated
+ * with this logical board number.
+ */
+
+ /* First, attempt to take a newer model board out of reset and
+ * give it a base address. This shouldn't affect older boards.
+ */
+ enable_board( board_num, io_port );
+
+ /* Now see if it looks like a PAS16 board */
+ board_rev = inb( io_port + PCB_CONFIG );
+
+ if( board_rev == 0xff )
+ return 0;
+
+ tmp = board_rev ^ 0xe0;
+
+ outb( tmp, io_port + PCB_CONFIG );
+ tmp = inb( io_port + PCB_CONFIG );
+ outb( board_rev, io_port + PCB_CONFIG );
+
+ if( board_rev != tmp ) /* Not a PAS-16 */
+ return 0;
+
+ if( ( inb( io_port + OPERATION_MODE_1 ) & 0x03 ) != 0x03 )
+ return 0; /* return if no SCSI interface found */
+
+ /* Mediavision has some new model boards that return ID bits
+ * that indicate a SCSI interface, but they're not (LMS). We'll
+ * put in an additional test to try and weed them out.
+ */
+
+ outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */
+ NCR5380_write( MODE_REG, 0x20 ); /* Is it really SCSI? */
+ if( NCR5380_read( MODE_REG ) != 0x20 ) /* Write to a reg. */
+ return 0; /* and try to read */
+ NCR5380_write( MODE_REG, 0x00 ); /* it back. */
+ if( NCR5380_read( MODE_REG ) != 0x00 )
+ return 0;
+
+ return 1;
+}
+
+
+/*
+ * Function : pas16_setup(char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ *
+ */
+
+void pas16_setup(char *str, int *ints) {
+ static int commandline_current = 0;
+ int i;
+ if (ints[0] != 2)
+ printk("pas16_setup : usage pas16=io_port,irq\n");
+ else
+ if (commandline_current < NO_OVERRIDES) {
+ overrides[commandline_current].io_port = (unsigned short) ints[1];
+ overrides[commandline_current].irq = ints[2];
+ for (i = 0; i < NO_BASES; ++i)
+ if (bases[i].io_port == (unsigned short) ints[1]) {
+ bases[i].noauto = 1;
+ break;
+ }
+ ++commandline_current;
+ }
+}
+
+/*
+ * Function : int pas16_detect(Scsi_Host_Template * tpnt)
+ *
+ * Purpose : detects and initializes PAS16 controllers
+ * that were autoprobed, overridden on the LILO command line,
+ * or specified at compile time.
+ *
+ * Inputs : tpnt - template for this SCSI adapter.
+ *
+ * Returns : 1 if a host adapter was found, 0 if not.
+ *
+ */
+
+int pas16_detect(Scsi_Host_Template * tpnt) {
+ static int current_override = 0;
+ static unsigned short current_base = 0;
+ struct Scsi_Host *instance;
+ unsigned short io_port;
+ int count;
+
+ tpnt->proc_dir = &proc_scsi_pas16;
+
+ for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
+ io_port = 0;
+
+ if (overrides[current_override].io_port)
+ {
+ io_port = overrides[current_override].io_port;
+ enable_board( current_override, io_port );
+ init_board( io_port, overrides[current_override].irq, 1 );
+ }
+ else
+ for (; !io_port && (current_base < NO_BASES); ++current_base) {
+#if (PDEBUG & PDEBUG_INIT)
+ printk("scsi-pas16 : probing io_port %04x\n", (unsigned int) bases[current_base].io_port);
+#endif
+ if ( !bases[current_base].noauto &&
+ pas16_hw_detect( current_base ) ){
+ io_port = bases[current_base].io_port;
+ init_board( io_port, default_irqs[ current_base ], 0 );
+#if (PDEBUG & PDEBUG_INIT)
+ printk("scsi-pas16 : detected board.\n");
+#endif
+ }
+ }
+
+
+#if defined(PDEBUG) && (PDEBUG & PDEBUG_INIT)
+ printk("scsi-pas16 : io_port = %04x\n", (unsigned int) io_port);
+#endif
+
+ if (!io_port)
+ break;
+
+ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
+ instance->io_port = io_port;
+
+ NCR5380_init(instance, 0);
+
+ if (overrides[current_override].irq != IRQ_AUTO)
+ instance->irq = overrides[current_override].irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
+
+ if (instance->irq != IRQ_NONE)
+ if (request_irq(instance->irq, pas16_intr, SA_INTERRUPT, "pas16")) {
+ printk("scsi%d : IRQ%d not free, interrupts disabled\n",
+ instance->host_no, instance->irq);
+ instance->irq = IRQ_NONE;
+ }
+
+ if (instance->irq == IRQ_NONE) {
+ printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ /* Disable 5380 interrupts, leave drive params the same */
+ outb( 0x4d, io_port + SYS_CONFIG_4 );
+ outb( (inb(io_port + IO_CONFIG_3) & 0x0f), io_port + IO_CONFIG_3 );
+ }
+
+#if defined(PDEBUG) && (PDEBUG & PDEBUG_INIT)
+ printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
+#endif
+
+ printk("scsi%d : at 0x%04x", instance->host_no, (int)
+ instance->io_port);
+ if (instance->irq == IRQ_NONE)
+ printk (" interrupts disabled");
+ else
+ printk (" irq %d", instance->irq);
+ printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
+ CAN_QUEUE, CMD_PER_LUN, PAS16_PUBLIC_RELEASE);
+ NCR5380_print_options(instance);
+ printk("\n");
+
+ ++current_override;
+ ++count;
+ }
+ return count;
+}
+
+/*
+ * Function : int pas16_biosparam(Disk *disk, kdev_t dev, int *ip)
+ *
+ * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
+ * the specified device / size.
+ *
+ * Inputs : size = size of device in sectors (512 bytes), dev = block device
+ * major / minor, ip[] = {heads, sectors, cylinders}
+ *
+ * Returns : always 0 (success), initializes ip
+ *
+ */
+
+/*
+ * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
+ * using hard disks on a trantor should verify that this mapping corresponds
+ * to that used by the BIOS / ASPI driver by running the linux fdisk program
+ * and matching the H_C_S coordinates to what DOS uses.
+ */
+
+int pas16_biosparam(Disk * disk, kdev_t dev, int * ip)
+{
+ int size = disk->capacity;
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11; /* I think I have it as /(32*64) */
+ if( ip[2] > 1024 ) { /* yes, >, not >= */
+ ip[0]=255;
+ ip[1]=63;
+ ip[2]=size/(63*255);
+ if( ip[2] > 1023 ) /* yes >1023... */
+ ip[2] = 1023;
+ }
+
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_pread (struct Scsi_Host *instance,
+ * unsigned char *dst, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to
+ * dst
+ *
+ * Inputs : dst = destination, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+ */
+
+static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
+ int len) {
+ register unsigned char *d = dst;
+ register unsigned short reg = (unsigned short) (instance->io_port +
+ P_DATA_REG_OFFSET);
+ register i = len;
+
+ while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) );
+
+ insb( reg, d, i );
+
+ if ( inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) {
+ outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET);
+ printk("scsi%d : watchdog timer fired in NCR5380_pread()\n",
+ instance->host_no);
+ return -1;
+ } else
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_pwrite (struct Scsi_Host *instance,
+ * unsigned char *src, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
+ * src
+ *
+ * Inputs : src = source, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+ */
+
+static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src,
+ int len) {
+ register unsigned char *s = src;
+ register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET);
+ register i = len;
+
+ while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) );
+
+ outsb( reg, s, i );
+
+ if (inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) {
+ outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET);
+ printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n",
+ instance->host_no);
+ return -1;
+ } else
+ return 0;
+}
+
+#ifdef MACH
+#include "NCR5380.src"
+#else
+#include "NCR5380.c"
+#endif
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = MV_PAS16;
+
+#include <linux/module.h>
+#include "scsi_module.c"
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/pas16.h b/i386/i386at/gpl/linux/scsi/pas16.h
new file mode 100644
index 00000000..9733792b
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/pas16.h
@@ -0,0 +1,193 @@
+/*
+ * This driver adapted from Drew Eckhardt's Trantor T128 driver
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * ( Based on T128 - DISTRIBUTION RELEASE 3. )
+ *
+ * Modified to work with the Pro Audio Spectrum/Studio 16
+ * by John Weidman.
+ *
+ *
+ * For more information, please consult
+ *
+ * Media Vision
+ * (510) 770-8600
+ * (800) 348-7116
+ *
+ * and
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+
+#ifndef PAS16_H
+#define PAS16_H
+
+#define PAS16_PUBLIC_RELEASE 3
+
+#define PDEBUG_INIT 0x1
+#define PDEBUG_TRANSFER 0x2
+
+#define PAS16_DEFAULT_BASE_1 0x388
+#define PAS16_DEFAULT_BASE_2 0x384
+#define PAS16_DEFAULT_BASE_3 0x38c
+#define PAS16_DEFAULT_BASE_4 0x288
+
+#define PAS16_DEFAULT_BOARD_1_IRQ 10
+#define PAS16_DEFAULT_BOARD_2_IRQ 12
+#define PAS16_DEFAULT_BOARD_3_IRQ 14
+#define PAS16_DEFAULT_BOARD_4_IRQ 15
+
+
+/*
+ * The Pro Audio Spectrum boards are I/O mapped. They use a Zilog 5380
+ * SCSI controller, which is the equivalent of NCR's 5380. "Pseudo-DMA"
+ * architecture is used, where a PAL drives the DMA signals on the 5380
+ * allowing fast, blind transfers with proper handshaking.
+ */
+
+
+/* The Time-out Counter register is used to safe-guard against a stuck
+ * bus (in the case of RDY driven handshake) or a stuck byte (if 16-Bit
+ * DMA conversion is used). The counter uses a 28.224MHz clock
+ * divided by 14 as its clock source. In the case of a stuck byte in
+ * the holding register, an interrupt is generated (and mixed with the
+ * one with the drive) using the CD-ROM interrupt pointer.
+ */
+
+#define P_TIMEOUT_COUNTER_REG 0x4000
+#define P_TC_DISABLE 0x80 /* Set to 0 to enable timeout int. */
+ /* Bits D6-D0 contain timeout count */
+
+
+#define P_TIMEOUT_STATUS_REG_OFFSET 0x4001
+#define P_TS_TIM 0x80 /* check timeout status */
+ /* Bits D6-D4 N/U */
+#define P_TS_ARM_DRQ_INT 0x08 /* Arm DRQ Int. When set high,
+ * the next rising edge will
+ * cause a CD-ROM interrupt.
+ * When set low, the interrupt
+ * will be cleared. There is
+ * no status available for
+ * this interrupt.
+ */
+#define P_TS_ENABLE_TO_ERR_INTERRUPT /* Enable timeout error int. */
+#define P_TS_ENABLE_WAIT /* Enable Wait */
+
+#define P_TS_CT 0x01 /* clear timeout. Note: writing
+ * to this register clears the
+ * timeout error int. or status
+ */
+
+
+/*
+ * The data register reads/writes to/from the 5380 in pseudo-DMA mode
+ */
+
+#define P_DATA_REG_OFFSET 0x5c00 /* rw */
+
+#define P_STATUS_REG_OFFSET 0x5c01 /* ro */
+#define P_ST_RDY 0x80 /* 5380 DDRQ Status */
+
+#define P_IRQ_STATUS 0x5c03
+#define P_IS_IRQ 0x80 /* DIRQ status */
+
+#define PCB_CONFIG 0x803
+#define MASTER_ADDRESS_PTR 0x9a01 /* Fixed position - no relo */
+#define SYS_CONFIG_4 0x8003
+#define WAIT_STATE 0xbc00
+#define OPERATION_MODE_1 0xec03
+#define IO_CONFIG_3 0xf002
+
+
+#ifndef ASM
+int pas16_abort(Scsi_Cmnd *);
+int pas16_biosparam(Disk *, kdev_t, int*);
+int pas16_detect(Scsi_Host_Template *);
+int pas16_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int pas16_reset(Scsi_Cmnd *);
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 32
+#endif
+
+/*
+ * I hadn't thought of this with the earlier drivers - but to prevent
+ * macro definition conflicts, we shouldn't define all of the internal
+ * macros when this is being used solely for the host stub.
+ */
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#define MV_PAS16 {NULL, NULL, NULL, NULL, \
+ "Pro Audio Spectrum-16 SCSI", \
+ pas16_detect, NULL, NULL, \
+ NULL, pas16_queue_command, pas16_abort, pas16_reset, NULL, \
+ pas16_biosparam, \
+ /* can queue */ CAN_QUEUE, /* id */ 7, SG_ALL, \
+ /* cmd per lun */ CMD_PER_LUN , 0, 0, DISABLE_CLUSTERING}
+
+#endif
+#ifndef HOSTS_C
+
+#define NCR5380_implementation_fields \
+ volatile unsigned short io_port
+
+#define NCR5380_local_declare() \
+ volatile unsigned short io_port
+
+#define NCR5380_setup(instance) \
+ io_port = (instance)->io_port
+
+#define PAS16_io_port(reg) ( io_port + pas16_offset[(reg)] )
+
+#if !(PDEBUG & PDEBUG_TRANSFER)
+#define NCR5380_read(reg) ( inb(PAS16_io_port(reg)) )
+#define NCR5380_write(reg, value) ( outb((value),PAS16_io_port(reg)) )
+#else
+#define NCR5380_read(reg) \
+ (((unsigned char) printk("scsi%d : read register %d at io_port %04x\n"\
+ , instance->hostno, (reg), PAS16_io_port(reg))), inb( PAS16_io_port(reg)) )
+
+#define NCR5380_write(reg, value) \
+ (printk("scsi%d : write %02x to register %d at io_port %04x\n", \
+ instance->hostno, (value), (reg), PAS16_io_port(reg)), \
+ outb( (value),PAS16_io_port(reg) ) )
+
+#endif
+
+
+#define NCR5380_intr pas16_intr
+#define NCR5380_queue_command pas16_queue_command
+#define NCR5380_abort pas16_abort
+#define NCR5380_reset pas16_reset
+
+/* 15 14 12 10 7 5 3
+ 1101 0100 1010 1000 */
+
+#define PAS16_IRQS 0xd4a8
+
+#endif /* else def HOSTS_C */
+#endif /* ndef ASM */
+#endif /* PAS16_H */
diff --git a/i386/i386at/gpl/linux/scsi/qlogic.c b/i386/i386at/gpl/linux/scsi/qlogic.c
new file mode 100644
index 00000000..8333275b
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/qlogic.c
@@ -0,0 +1,678 @@
+/*----------------------------------------------------------------*/
+/*
+ Qlogic linux driver - work in progress. No Warranty express or implied.
+ Use at your own risk. Support Tort Reform so you won't have to read all
+ these silly disclaimers.
+
+ Copyright 1994, Tom Zerucha.
+ zerucha@shell.portal.com
+
+ Additional Code, and much appreciated help by
+ Michael A. Griffith
+ grif@cs.ucr.edu
+
+ Thanks to Eric Youngdale and Dave Hinds for loadable module and PCMCIA
+ help respectively, and for suffering through my foolishness during the
+ debugging process.
+
+ Reference Qlogic FAS408 Technical Manual, 53408-510-00A, May 10, 1994
+ (you can reference it, but it is incomplete and inaccurate in places)
+
+ Version 0.43 4/6/95 - kernel 1.2.0+, pcmcia 2.5.4+
+
+ Functions as standalone, loadable, and PCMCIA driver, the latter from
+ Dave Hind's PCMCIA package.
+
+ Redistributable under terms of the GNU Public License
+
+*/
+/*----------------------------------------------------------------*/
+/* Configuration */
+
+/* Set the following to 2 to use normal interrupt (active high/totempole-
+ tristate), otherwise use 0 (REQUIRED FOR PCMCIA) for active low, open
+ drain */
+#define QL_INT_ACTIVE_HIGH 2
+
+/* Set the following to 1 to enable the use of interrupts. Note that 0 tends
+ to be more stable, but slower (or ties up the system more) */
+#define QL_USE_IRQ 1
+
+/* Set the following to max out the speed of the PIO PseudoDMA transfers,
+ again, 0 tends to be slower, but more stable. */
+#define QL_TURBO_PDMA 1
+
+/* This should be 1 to enable parity detection */
+#define QL_ENABLE_PARITY 1
+
+/* This will reset all devices when the driver is initialized (during bootup).
+ The other linux drivers don't do this, but the DOS drivers do, and after
+ using DOS or some kind of crash or lockup this will bring things back
+ without requiring a cold boot. It does take some time to recover from a
+ reset, so it is slower, and I have seen timeouts so that devices weren't
+ recognized when this was set. */
+#define QL_RESET_AT_START 0
+
+/* crystal frequency in megahertz (for offset 5 and 9)
+ Please set this for your card. Most Qlogic cards are 40 Mhz. The
+ Control Concepts ISA (not VLB) is 24 Mhz */
+#define XTALFREQ 40
+
+/**********/
+/* DANGER! modify these at your own risk */
+/* SLOWCABLE can usually be reset to zero if you have a clean setup and
+ proper termination. The rest are for synchronous transfers and other
+ advanced features if your device can transfer faster than 5Mb/sec.
+ If you are really curious, email me for a quick howto until I have
+ something official */
+/**********/
+
+/*****/
+/* config register 1 (offset 8) options */
+/* This needs to be set to 1 if your cabling is long or noisy */
+#define SLOWCABLE 1
+
+/*****/
+/* offset 0xc */
+/* This will set fast (10Mhz) synchronous timing when set to 1
+ For this to have an effect, FASTCLK must also be 1 */
+#define FASTSCSI 0
+
+/* This when set to 1 will set a faster sync transfer rate */
+#define FASTCLK 0
+/*(XTALFREQ>25?1:0)*/
+
+/*****/
+/* offset 6 */
+/* This is the sync transfer divisor, XTALFREQ/X will be the maximum
+ achievable data rate (assuming the rest of the system is capable
+ and set properly) */
+#define SYNCXFRPD 5
+/*(XTALFREQ/5)*/
+
+/*****/
+/* offset 7 */
+/* This is the count of how many synchronous transfers can take place
+ i.e. how many reqs can occur before an ack is given.
+ The maximum value for this is 15, the upper bits can modify
+ REQ/ACK assertion and deassertion during synchronous transfers
+ If this is 0, the bus will only transfer asynchronously */
+#define SYNCOFFST 0
+/* for the curious, bits 7&6 control the deassertion delay in 1/2 cycles
+ of the 40Mhz clock. If FASTCLK is 1, specifying 01 (1/2) will
+ cause the deassertion to be early by 1/2 clock. Bits 5&4 control
+ the assertion delay, also in 1/2 clocks (FASTCLK is ignored here). */
+
+/*----------------------------------------------------------------*/
+#ifdef PCMCIA
+#undef QL_INT_ACTIVE_HIGH
+#define QL_INT_ACTIVE_HIGH 0
+#define MODULE
+#endif
+
+#include <linux/module.h>
+
+#ifdef PCMCIA
+#undef MODULE
+#endif
+
+#include <linux/blk.h> /* to get disk capacity */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <linux/unistd.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include "sd.h"
+#include "hosts.h"
+#include "qlogic.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_qlogic = {
+ PROC_SCSI_QLOGIC, 6, "qlogic",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+/*----------------------------------------------------------------*/
+/* driver state info, local to driver */
+static int qbase = 0; /* Port */
+static int qinitid; /* initiator ID */
+static int qabort; /* Flag to cause an abort */
+static int qlirq = -1; /* IRQ being used */
+static char qinfo[80]; /* description */
+static Scsi_Cmnd *qlcmd; /* current command being processed */
+
+static int qlcfg5 = ( XTALFREQ << 5 ); /* 15625/512 */
+static int qlcfg6 = SYNCXFRPD;
+static int qlcfg7 = SYNCOFFST;
+static int qlcfg8 = ( SLOWCABLE << 7 ) | ( QL_ENABLE_PARITY << 4 );
+static int qlcfg9 = ( ( XTALFREQ + 4 ) / 5 );
+static int qlcfgc = ( FASTCLK << 3 ) | ( FASTSCSI << 4 );
+
+/*----------------------------------------------------------------*/
+/* The qlogic card uses two register maps - These macros select which one */
+#define REG0 ( outb( inb( qbase + 0xd ) & 0x7f , qbase + 0xd ), outb( 4 , qbase + 0xd ))
+#define REG1 ( outb( inb( qbase + 0xd ) | 0x80 , qbase + 0xd ), outb( 0xb4 | QL_INT_ACTIVE_HIGH , qbase + 0xd ))
+
+/* following is watchdog timeout in microseconds */
+#define WATCHDOG 5000000
+
+/*----------------------------------------------------------------*/
+/* the following will set the monitor border color (useful to find
+ where something crashed or gets stuck at and as a simple profiler) */
+
+#if 0
+#define rtrc(i) {inb(0x3da);outb(0x31,0x3c0);outb((i),0x3c0);}
+#else
+#define rtrc(i) {}
+#endif
+
+/*----------------------------------------------------------------*/
+/* local functions */
+/*----------------------------------------------------------------*/
+static void ql_zap(void);
+/* error recovery - reset everything */
+void ql_zap()
+{
+int x;
+unsigned long flags;
+ save_flags( flags );
+ cli();
+ x = inb(qbase + 0xd);
+ REG0;
+ outb(3, qbase + 3); /* reset SCSI */
+ outb(2, qbase + 3); /* reset chip */
+ if (x & 0x80)
+ REG1;
+ restore_flags( flags );
+}
+
+/*----------------------------------------------------------------*/
+/* do pseudo-dma */
+static int ql_pdma(int phase, char *request, int reqlen)
+{
+int j;
+ j = 0;
+ if (phase & 1) { /* in */
+#if QL_TURBO_PDMA
+rtrc(4)
+ /* empty fifo in large chunks */
+ if( reqlen >= 128 && (inb( qbase + 8 ) & 2) ) { /* full */
+ insl( qbase + 4, request, 32 );
+ reqlen -= 128;
+ request += 128;
+ }
+ while( reqlen >= 84 && !( j & 0xc0 ) ) /* 2/3 */
+ if( (j=inb( qbase + 8 )) & 4 ) {
+ insl( qbase + 4, request, 21 );
+ reqlen -= 84;
+ request += 84;
+ }
+ if( reqlen >= 44 && (inb( qbase + 8 ) & 8) ) { /* 1/3 */
+ insl( qbase + 4, request, 11 );
+ reqlen -= 44;
+ request += 44;
+ }
+#endif
+ /* until both empty and int (or until reclen is 0) */
+rtrc(7)
+ j = 0;
+ while( reqlen && !( (j & 0x10) && (j & 0xc0) ) ) {
+ /* while bytes to receive and not empty */
+ j &= 0xc0;
+ while ( reqlen && !( (j=inb(qbase + 8)) & 0x10 ) ) {
+ *request++ = inb(qbase + 4);
+ reqlen--;
+ }
+ if( j & 0x10 )
+ j = inb(qbase+8);
+
+ }
+ }
+ else { /* out */
+#if QL_TURBO_PDMA
+rtrc(4)
+ if( reqlen >= 128 && inb( qbase + 8 ) & 0x10 ) { /* empty */
+ outsl(qbase + 4, request, 32 );
+ reqlen -= 128;
+ request += 128;
+ }
+ while( reqlen >= 84 && !( j & 0xc0 ) ) /* 1/3 */
+ if( !((j=inb( qbase + 8 )) & 8) ) {
+ outsl( qbase + 4, request, 21 );
+ reqlen -= 84;
+ request += 84;
+ }
+ if( reqlen >= 40 && !(inb( qbase + 8 ) & 4 ) ) { /* 2/3 */
+ outsl( qbase + 4, request, 10 );
+ reqlen -= 40;
+ request += 40;
+ }
+#endif
+ /* until full and int (or until reclen is 0) */
+rtrc(7)
+ j = 0;
+ while( reqlen && !( (j & 2) && (j & 0xc0) ) ) {
+ /* while bytes to send and not full */
+ while ( reqlen && !( (j=inb(qbase + 8)) & 2 ) ) {
+ outb(*request++, qbase + 4);
+ reqlen--;
+ }
+ if( j & 2 )
+ j = inb(qbase+8);
+ }
+ }
+/* maybe return reqlen */
+ return inb( qbase + 8 ) & 0xc0;
+}
+
+/*----------------------------------------------------------------*/
+/* wait for interrupt flag (polled - not real hardware interrupt) */
+static int ql_wai(void)
+{
+int i,k;
+ k = 0;
+ i = jiffies + WATCHDOG;
+ while ( i > jiffies && !qabort && !((k = inb(qbase + 4)) & 0xe0))
+ barrier();
+ if (i <= jiffies)
+ return (DID_TIME_OUT);
+ if (qabort)
+ return (qabort == 1 ? DID_ABORT : DID_RESET);
+ if (k & 0x60)
+ ql_zap();
+ if (k & 0x20)
+ return (DID_PARITY);
+ if (k & 0x40)
+ return (DID_ERROR);
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+/* initiate scsi command - queueing handler */
+static void ql_icmd(Scsi_Cmnd * cmd)
+{
+unsigned int i;
+unsigned long flags;
+
+ qabort = 0;
+
+ save_flags( flags );
+ cli();
+ REG0;
+/* clearing of interrupts and the fifo is needed */
+ inb(qbase + 5); /* clear interrupts */
+ if (inb(qbase + 5)) /* if still interrupting */
+ outb(2, qbase + 3); /* reset chip */
+ else if (inb(qbase + 7) & 0x1f)
+ outb(1, qbase + 3); /* clear fifo */
+ while (inb(qbase + 5)); /* clear ints */
+ REG1;
+ outb(1, qbase + 8); /* set for PIO pseudo DMA */
+ outb(0, qbase + 0xb); /* disable ints */
+ inb(qbase + 8); /* clear int bits */
+ REG0;
+ outb(0x40, qbase + 0xb); /* enable features */
+
+/* configurables */
+ outb( qlcfgc , qbase + 0xc);
+/* config: no reset interrupt, (initiator) bus id */
+ outb( 0x40 | qlcfg8 | qinitid, qbase + 8);
+ outb( qlcfg7 , qbase + 7 );
+ outb( qlcfg6 , qbase + 6 );
+/**/
+ outb(qlcfg5, qbase + 5); /* select timer */
+ outb(qlcfg9 & 7, qbase + 9); /* prescaler */
+/* outb(0x99, qbase + 5); */
+ outb(cmd->target, qbase + 4);
+
+ for (i = 0; i < cmd->cmd_len; i++)
+ outb(cmd->cmnd[i], qbase + 2);
+ qlcmd = cmd;
+ outb(0x41, qbase + 3); /* select and send command */
+ restore_flags( flags );
+}
+/*----------------------------------------------------------------*/
+/* process scsi command - usually after interrupt */
+static unsigned int ql_pcmd(Scsi_Cmnd * cmd)
+{
+unsigned int i, j, k;
+unsigned int result; /* ultimate return result */
+unsigned int status; /* scsi returned status */
+unsigned int message; /* scsi returned message */
+unsigned int phase; /* recorded scsi phase */
+unsigned int reqlen; /* total length of transfer */
+struct scatterlist *sglist; /* scatter-gather list pointer */
+unsigned int sgcount; /* sg counter */
+
+rtrc(1)
+ j = inb(qbase + 6);
+ i = inb(qbase + 5);
+ if (i == 0x20) {
+ return (DID_NO_CONNECT << 16);
+ }
+ i |= inb(qbase + 5); /* the 0x10 bit can be set after the 0x08 */
+ if (i != 0x18) {
+ printk("Ql:Bad Interrupt status:%02x\n", i);
+ ql_zap();
+ return (DID_BAD_INTR << 16);
+ }
+ j &= 7; /* j = inb( qbase + 7 ) >> 5; */
+/* correct status is supposed to be step 4 */
+/* it sometimes returns step 3 but with 0 bytes left to send */
+/* We can try stuffing the FIFO with the max each time, but we will get a
+ sequence of 3 if any bytes are left (but we do flush the FIFO anyway */
+ if(j != 3 && j != 4) {
+ printk("Ql:Bad sequence for command %d, int %02X, cmdleft = %d\n", j, i, inb( qbase+7 ) & 0x1f );
+ ql_zap();
+ return (DID_ERROR << 16);
+ }
+ result = DID_OK;
+ if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */
+ outb(1, qbase + 3); /* clear fifo */
+/* note that request_bufflen is the total xfer size when sg is used */
+ reqlen = cmd->request_bufflen;
+/* note that it won't work if transfers > 16M are requested */
+ if (reqlen && !((phase = inb(qbase + 4)) & 6)) { /* data phase */
+rtrc(2)
+ outb(reqlen, qbase); /* low-mid xfer cnt */
+ outb(reqlen >> 8, qbase+1); /* low-mid xfer cnt */
+ outb(reqlen >> 16, qbase + 0xe); /* high xfer cnt */
+ outb(0x90, qbase + 3); /* command do xfer */
+/* PIO pseudo DMA to buffer or sglist */
+ REG1;
+ if (!cmd->use_sg)
+ ql_pdma(phase, cmd->request_buffer, cmd->request_bufflen);
+ else {
+ sgcount = cmd->use_sg;
+ sglist = cmd->request_buffer;
+ while (sgcount--) {
+ if (qabort) {
+ REG0;
+ return ((qabort == 1 ? DID_ABORT : DID_RESET) << 16);
+ }
+ if (ql_pdma(phase, sglist->address, sglist->length))
+ break;
+ sglist++;
+ }
+ }
+ REG0;
+rtrc(2)
+/* wait for irq (split into second state of irq handler if this can take time) */
+ if ((k = ql_wai()))
+ return (k << 16);
+ k = inb(qbase + 5); /* should be 0x10, bus service */
+ }
+/*** Enter Status (and Message In) Phase ***/
+ k = jiffies + WATCHDOG;
+ while ( k > jiffies && !qabort && !(inb(qbase + 4) & 6)); /* wait for status phase */
+ if ( k <= jiffies ) {
+ ql_zap();
+ return (DID_TIME_OUT << 16);
+ }
+ while (inb(qbase + 5)); /* clear pending ints */
+ if (qabort)
+ return ((qabort == 1 ? DID_ABORT : DID_RESET) << 16);
+ outb(0x11, qbase + 3); /* get status and message */
+ if ((k = ql_wai()))
+ return (k << 16);
+ i = inb(qbase + 5); /* get chip irq stat */
+ j = inb(qbase + 7) & 0x1f; /* and bytes rec'd */
+ status = inb(qbase + 2);
+ message = inb(qbase + 2);
+/* should get function complete int if Status and message, else bus serv if only status */
+ if (!((i == 8 && j == 2) || (i == 0x10 && j == 1))) {
+ printk("Ql:Error during status phase, int=%02X, %d bytes recd\n", i, j);
+ result = DID_ERROR;
+ }
+ outb(0x12, qbase + 3); /* done, disconnect */
+rtrc(1)
+ if ((k = ql_wai()))
+ return (k << 16);
+/* should get bus service interrupt and disconnect interrupt */
+ i = inb(qbase + 5); /* should be bus service */
+ while (!qabort && ((i & 0x20) != 0x20)) {
+ barrier();
+ i |= inb(qbase + 5);
+ }
+rtrc(0)
+ if (qabort)
+ return ((qabort == 1 ? DID_ABORT : DID_RESET) << 16);
+ return (result << 16) | (message << 8) | (status & STATUS_MASK);
+}
+
+#if QL_USE_IRQ
+/*----------------------------------------------------------------*/
+/* interrupt handler */
+static void ql_ihandl(int irq, struct pt_regs * regs)
+{
+Scsi_Cmnd *icmd;
+ REG0;
+ if (!(inb(qbase + 4) & 0x80)) /* false alarm? */
+ return;
+ if (qlcmd == NULL) { /* no command to process? */
+ int i;
+ i = 16;
+ while (i-- && inb(qbase + 5)); /* maybe also ql_zap() */
+ return;
+ }
+ icmd = qlcmd;
+ icmd->result = ql_pcmd(icmd);
+ qlcmd = NULL;
+/* if result is CHECK CONDITION done calls qcommand to request sense */
+ (icmd->scsi_done) (icmd);
+}
+#endif
+
+/*----------------------------------------------------------------*/
+/* global functions */
+/*----------------------------------------------------------------*/
+/* non queued command */
+#if QL_USE_IRQ
+static void qlidone(Scsi_Cmnd * cmd) {}; /* null function */
+#endif
+
+/* command process */
+int qlogic_command(Scsi_Cmnd * cmd)
+{
+int k;
+#if QL_USE_IRQ
+ if (qlirq >= 0) {
+ qlogic_queuecommand(cmd, qlidone);
+ while (qlcmd != NULL);
+ return cmd->result;
+ }
+#endif
+/* non-irq version */
+ if (cmd->target == qinitid)
+ return (DID_BAD_TARGET << 16);
+ ql_icmd(cmd);
+ if ((k = ql_wai()))
+ return (k << 16);
+ return ql_pcmd(cmd);
+
+}
+
+#if QL_USE_IRQ
+/*----------------------------------------------------------------*/
+/* queued command */
+int qlogic_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
+{
+ if(cmd->target == qinitid) {
+ cmd->result = DID_BAD_TARGET << 16;
+ done(cmd);
+ return 0;
+ }
+
+ cmd->scsi_done = done;
+/* wait for the last command's interrupt to finish */
+ while (qlcmd != NULL)
+ barrier();
+ ql_icmd(cmd);
+ return 0;
+}
+#else
+int qlogic_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
+{
+ return 1;
+}
+#endif
+
+#ifdef PCMCIA
+/*----------------------------------------------------------------*/
+/* allow PCMCIA code to preset the port */
+/* port should be 0 and irq to -1 respectively for autoprobing */
+void qlogic_preset(int port, int irq)
+{
+ qbase=port;
+ qlirq=irq;
+}
+#endif
+
+/*----------------------------------------------------------------*/
+/* look for qlogic card and init if found */
+int qlogic_detect(Scsi_Host_Template * host)
+{
+int i, j; /* these are only used by IRQ detect */
+int qltyp; /* type of chip */
+struct Scsi_Host *hreg; /* registered host structure */
+unsigned long flags;
+
+host->proc_dir = &proc_scsi_qlogic;
+
+/* Qlogic Cards only exist at 0x230 or 0x330 (the chip itself decodes the
+ address - I check 230 first since MIDI cards are typically at 330
+
+ Theoretically, two Qlogic cards can coexist in the same system. This
+ should work by simply using this as a loadable module for the second
+ card, but I haven't tested this.
+*/
+
+ if( !qbase ) {
+ for (qbase = 0x230; qbase < 0x430; qbase += 0x100) {
+ if( check_region( qbase , 0x10 ) )
+ continue;
+ REG1;
+ if ( ( (inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7 )
+ && ( (inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7 ) )
+ break;
+ }
+ if (qbase == 0x430)
+ return 0;
+ }
+ else
+ printk( "Ql: Using preset base address of %03x\n", qbase );
+
+ qltyp = inb(qbase + 0xe) & 0xf8;
+ qinitid = host->this_id;
+ if (qinitid < 0)
+ qinitid = 7; /* if no ID, use 7 */
+ outb(1, qbase + 8); /* set for PIO pseudo DMA */
+ REG0;
+ outb(0x40 | qlcfg8 | qinitid, qbase + 8); /* (ini) bus id, disable scsi rst */
+ outb(qlcfg5, qbase + 5); /* select timer */
+ outb(qlcfg9, qbase + 9); /* prescaler */
+#if QL_RESET_AT_START
+ outb( 3 , qbase + 3 );
+ REG1;
+ while( inb( qbase + 0xf ) & 4 );
+ REG0;
+#endif
+#if QL_USE_IRQ
+/* IRQ probe - toggle pin and check request pending */
+
+ if( qlirq == -1 ) {
+ save_flags( flags );
+ cli();
+ i = 0xffff;
+ j = 3;
+ outb(0x90, qbase + 3); /* illegal command - cause interrupt */
+ REG1;
+ outb(10, 0x20); /* access pending interrupt map */
+ outb(10, 0xa0);
+ while (j--) {
+ outb(0xb0 | QL_INT_ACTIVE_HIGH , qbase + 0xd); /* int pin off */
+ i &= ~(inb(0x20) | (inb(0xa0) << 8)); /* find IRQ off */
+ outb(0xb4 | QL_INT_ACTIVE_HIGH , qbase + 0xd); /* int pin on */
+ i &= inb(0x20) | (inb(0xa0) << 8); /* find IRQ on */
+ }
+ REG0;
+ while (inb(qbase + 5)); /* purge int */
+ j = -1;
+ while (i) /* find on bit */
+ i >>= 1, j++; /* should check for exactly 1 on */
+ qlirq = j;
+ restore_flags( flags );
+ }
+ else
+ printk( "Ql: Using preset IRQ %d\n", qlirq );
+
+ if (qlirq >= 0 && !request_irq(qlirq, ql_ihandl, 0, "qlogic"))
+ host->can_queue = 1;
+#endif
+ request_region( qbase , 0x10 ,"qlogic");
+ hreg = scsi_register( host , 0 ); /* no host data */
+ hreg->io_port = qbase;
+ hreg->n_io_port = 16;
+ hreg->dma_channel = -1;
+ if( qlirq != -1 )
+ hreg->irq = qlirq;
+
+ sprintf(qinfo, "Qlogic Driver version 0.43, chip %02X at %03X, IRQ %d, TPdma:%d",
+ qltyp, qbase, qlirq, QL_TURBO_PDMA );
+ host->name = qinfo;
+
+ return 1;
+}
+
+/*----------------------------------------------------------------*/
+/* return bios parameters */
+int qlogic_biosparam(Disk * disk, kdev_t dev, int ip[])
+{
+/* This should mimic the DOS Qlogic driver's behavior exactly */
+ ip[0] = 0x40;
+ ip[1] = 0x20;
+ ip[2] = disk->capacity / (ip[0] * ip[1]);
+ if (ip[2] > 1024) {
+ ip[0] = 0xff;
+ ip[1] = 0x3f;
+ ip[2] = disk->capacity / (ip[0] * ip[1]);
+ if (ip[2] > 1023)
+ ip[2] = 1023;
+ }
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+/* abort command in progress */
+int qlogic_abort(Scsi_Cmnd * cmd)
+{
+ qabort = 1;
+ ql_zap();
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+/* reset SCSI bus */
+int qlogic_reset(Scsi_Cmnd * cmd)
+{
+ qabort = 2;
+ ql_zap();
+ return 1;
+}
+
+/*----------------------------------------------------------------*/
+/* return info string */
+const char *qlogic_info(struct Scsi_Host * host)
+{
+ return qinfo;
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = QLOGIC;
+
+#include "scsi_module.c"
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/qlogic.h b/i386/i386at/gpl/linux/scsi/qlogic.h
new file mode 100644
index 00000000..0ff119ae
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/qlogic.h
@@ -0,0 +1,40 @@
+#ifndef _QLOGIC_H
+#define _QLOGIC_H
+
+int qlogic_detect(Scsi_Host_Template * );
+const char * qlogic_info(struct Scsi_Host *);
+int qlogic_command(Scsi_Cmnd *);
+int qlogic_queuecommand(Scsi_Cmnd *, void (* done)(Scsi_Cmnd *));
+int qlogic_abort(Scsi_Cmnd *);
+int qlogic_reset(Scsi_Cmnd *);
+int qlogic_biosparam(Disk *, kdev_t, int[]);
+
+#ifndef NULL
+#define NULL (0)
+#endif
+
+#define QLOGIC { \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL, \
+ qlogic_detect, \
+ NULL, \
+ qlogic_info, \
+ qlogic_command, \
+ qlogic_queuecommand, \
+ qlogic_abort, \
+ qlogic_reset, \
+ NULL, \
+ qlogic_biosparam, \
+ 0, \
+ -1, \
+ SG_ALL, \
+ 1, \
+ 0, \
+ 0, \
+ DISABLE_CLUSTERING \
+}
+
+#endif /* _QLOGIC_H */
diff --git a/i386/i386at/gpl/linux/scsi/scsi.c b/i386/i386at/gpl/linux/scsi/scsi.c
new file mode 100644
index 00000000..85d234e1
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/scsi.c
@@ -0,0 +1,3204 @@
+/*
+ * scsi.c Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * generic mid-level SCSI driver
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ *
+ * Bug correction thanks go to :
+ * Rik Faith <faith@cs.unc.edu>
+ * Tommy Thorn <tthorn>
+ * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
+ *
+ * Modified by Eric Youngdale eric@aib.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ *
+ * Native multichannel and wide scsi support added
+ * by Michael Neuffer neuffer@goofy.zdv.uni-mainz.de
+ */
+
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/malloc.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include<linux/stat.h>
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "constants.h"
+
+#include <linux/config.h>
+
+#undef USE_STATIC_SCSI_MEMORY
+
+/*
+static const char RCSid[] = "$Header: cvs/gnumach/i386/i386at/gpl/linux/scsi/Attic/scsi.c,v 1.1.1.1 1997/02/25 21:27:50 thomas Exp $";
+*/
+
+
+/* Command groups 3 and 4 are reserved and should never be used. */
+const unsigned char scsi_command_size[8] = { 6, 10, 10, 12, 12, 12, 10, 10 };
+
+#define INTERNAL_ERROR (panic ("Internal error in file %s, line %d.\n", __FILE__, __LINE__))
+
+/*
+ * PAGE_SIZE must be a multiple of the sector size (512). True
+ * for all reasonably recent architectures (even the VAX...).
+ */
+#define SECTOR_SIZE 512
+#define SECTORS_PER_PAGE (PAGE_SIZE/SECTOR_SIZE)
+
+#if SECTORS_PER_PAGE <= 8
+ typedef unsigned char FreeSectorBitmap;
+#elif SECTORS_PER_PAGE <= 32
+ typedef unsigned int FreeSectorBitmap;
+#else
+# error You lose.
+#endif
+
+static void scsi_done (Scsi_Cmnd *SCpnt);
+static int update_timeout (Scsi_Cmnd *, int);
+static void print_inquiry(unsigned char *data);
+static void scsi_times_out (Scsi_Cmnd * SCpnt, int pid);
+static int scan_scsis_single (int channel,int dev,int lun,int * max_scsi_dev ,
+ Scsi_Device ** SDpnt, Scsi_Cmnd * SCpnt,
+ struct Scsi_Host *shpnt, char * scsi_result);
+void scsi_build_commandblocks(Scsi_Device * SDpnt);
+
+#ifdef CONFIG_MODULES
+extern struct symbol_table scsi_symbol_table;
+#endif
+
+static FreeSectorBitmap * dma_malloc_freelist = NULL;
+static int scsi_need_isa_bounce_buffers;
+static unsigned int dma_sectors = 0;
+unsigned int dma_free_sectors = 0;
+unsigned int need_isa_buffer = 0;
+static unsigned char ** dma_malloc_pages = NULL;
+
+static int time_start;
+static int time_elapsed;
+static volatile struct Scsi_Host * host_active = NULL;
+#define SCSI_BLOCK(HOST) ((HOST->block && host_active && HOST != host_active) \
+ || (HOST->can_queue && HOST->host_busy >= HOST->can_queue))
+
+#define MAX_SCSI_DEVICE_CODE 10
+const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
+{
+ "Direct-Access ",
+ "Sequential-Access",
+ "Printer ",
+ "Processor ",
+ "WORM ",
+ "CD-ROM ",
+ "Scanner ",
+ "Optical Device ",
+ "Medium Changer ",
+ "Communications "
+};
+
+
+/*
+ * global variables :
+ * scsi_devices an array of these specifying the address for each
+ * (host, id, LUN)
+ */
+
+Scsi_Device * scsi_devices = NULL;
+
+/* Process ID of SCSI commands */
+unsigned long scsi_pid = 0;
+
+static unsigned char generic_sense[6] = {REQUEST_SENSE, 0,0,0, 255, 0};
+static void resize_dma_pool(void);
+
+/* This variable is merely a hook so that we can debug the kernel with gdb. */
+Scsi_Cmnd * last_cmnd = NULL;
+
+/* This is the pointer to the /proc/scsi code.
+ * It is only initialized to !=0 if the scsi code is present
+ */
+extern int (* dispatch_scsi_info_ptr)(int ino, char *buffer, char **start,
+ off_t offset, int length, int inout);
+extern int dispatch_scsi_info(int ino, char *buffer, char **start,
+ off_t offset, int length, int inout);
+
+struct proc_dir_entry proc_scsi_scsi = {
+ PROC_SCSI_SCSI, 4, "scsi",
+ S_IFREG | S_IRUGO | S_IWUSR, 2, 0, 0, 0,
+ NULL,
+ NULL, NULL,
+ NULL, NULL, NULL
+};
+
+
+/*
+ * As the scsi do command functions are intelligent, and may need to
+ * redo a command, we need to keep track of the last command
+ * executed on each one.
+ */
+
+#define WAS_RESET 0x01
+#define WAS_TIMEDOUT 0x02
+#define WAS_SENSE 0x04
+#define IS_RESETTING 0x08
+#define IS_ABORTING 0x10
+#define ASKED_FOR_SENSE 0x20
+
+/*
+ * This is the number of clock ticks we should wait before we time out
+ * and abort the command. This is for where the scsi.c module generates
+ * the command, not where it originates from a higher level, in which
+ * case the timeout is specified there.
+ *
+ * ABORT_TIMEOUT and RESET_TIMEOUT are the timeouts for RESET and ABORT
+ * respectively.
+ */
+
+#ifdef DEBUG_TIMEOUT
+static void scsi_dump_status(void);
+#endif
+
+
+#ifdef DEBUG
+ #define SCSI_TIMEOUT (5*HZ)
+#else
+ #define SCSI_TIMEOUT (1*HZ)
+#endif
+
+#ifdef DEBUG
+ #define SENSE_TIMEOUT SCSI_TIMEOUT
+ #define ABORT_TIMEOUT SCSI_TIMEOUT
+ #define RESET_TIMEOUT SCSI_TIMEOUT
+#else
+ #define SENSE_TIMEOUT (5*HZ/10)
+ #define RESET_TIMEOUT (5*HZ/10)
+ #define ABORT_TIMEOUT (5*HZ/10)
+#endif
+
+#define MIN_RESET_DELAY (1*HZ)
+
+/* Do not call reset on error if we just did a reset within 10 sec. */
+#define MIN_RESET_PERIOD (10*HZ)
+
+/* The following devices are known not to tolerate a lun != 0 scan for
+ * one reason or another. Some will respond to all luns, others will
+ * lock up.
+ */
+
+#define BLIST_NOLUN 0x01
+#define BLIST_FORCELUN 0x02
+#define BLIST_BORKEN 0x04
+#define BLIST_KEY 0x08
+#define BLIST_SINGLELUN 0x10
+
+struct dev_info{
+ const char * vendor;
+ const char * model;
+ const char * revision; /* Latest revision known to be bad. Not used yet */
+ unsigned flags;
+};
+
+/*
+ * This is what was previously known as the blacklist. The concept
+ * has been expanded so that we can specify other types of things we
+ * need to be aware of.
+ */
+static struct dev_info device_list[] =
+{
+{"CHINON","CD-ROM CDS-431","H42", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"CHINON","CD-ROM CDS-535","Q14", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"DENON","DRD-25X","V", BLIST_NOLUN}, /* Locks up if probed for lun != 0 */
+{"HITACHI","DK312C","CM81", BLIST_NOLUN}, /* Responds to all lun - dtg */
+{"HITACHI","DK314C","CR21" , BLIST_NOLUN}, /* responds to all lun */
+{"IMS", "CDD521/10","2.06", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
+{"MAXTOR","XT-3280","PR02", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
+{"MAXTOR","XT-4380S","B3C", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
+{"MAXTOR","MXT-1240S","I1.2", BLIST_NOLUN}, /* Locks up when LUN>0 polled */
+{"MAXTOR","XT-4170S","B5A", BLIST_NOLUN}, /* Locks-up sometimes when LUN>0 polled. */
+{"MAXTOR","XT-8760S","B7B", BLIST_NOLUN}, /* guess what? */
+{"MEDIAVIS","RENO CD-ROMX2A","2.03",BLIST_NOLUN},/*Responds to all lun */
+{"NEC","CD-ROM DRIVE:841","1.0", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
+{"RODIME","RO3000S","2.33", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"SEAGATE", "ST157N", "\004|j", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
+ * for aha152x controller, which causes
+ * SCSI code to reset bus.*/
+{"SEAGATE", "ST296","921", BLIST_NOLUN}, /* Responds to all lun */
+{"SEAGATE","ST1581","6538",BLIST_NOLUN}, /* Responds to all lun */
+{"SONY","CD-ROM CDU-541","4.3d", BLIST_NOLUN},
+{"SONY","CD-ROM CDU-55S","1.0i", BLIST_NOLUN},
+{"SONY","CD-ROM CDU-561","1.7x", BLIST_NOLUN},
+{"TANDBERG","TDC 3600","U07", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"TEAC","CD-ROM","1.06", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
+ * for seagate controller, which causes
+ * SCSI code to reset bus.*/
+{"TEXEL","CD-ROM","1.06", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
+ * for seagate controller, which causes
+ * SCSI code to reset bus.*/
+{"QUANTUM","LPS525S","3110", BLIST_NOLUN}, /* Locks sometimes if polled for lun != 0 */
+{"QUANTUM","PD1225S","3110", BLIST_NOLUN}, /* Locks sometimes if polled for lun != 0 */
+{"MEDIAVIS","CDR-H93MV","1.31", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"SANKYO", "CP525","6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
+{"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */
+{"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */
+{"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */
+
+/*
+ * Other types of devices that have special flags.
+ */
+{"SONY","CD-ROM CDU-8001","*", BLIST_BORKEN},
+{"TEXEL","CD-ROM","1.06", BLIST_BORKEN},
+{"INSITE","Floptical F*8I","*", BLIST_KEY},
+{"INSITE","I325VM","*", BLIST_KEY},
+{"PIONEER","CD-ROMDRM-602X","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+{"PIONEER","CD-ROMDRM-604X","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+/*
+ * Must be at end of list...
+ */
+{NULL, NULL, NULL}
+};
+
+static int get_device_flags(unsigned char * response_data){
+ int i = 0;
+ unsigned char * pnt;
+ for(i=0; 1; i++){
+ if(device_list[i].vendor == NULL) return 0;
+ pnt = &response_data[8];
+ while(*pnt && *pnt == ' ') pnt++;
+ if(memcmp(device_list[i].vendor, pnt,
+ strlen(device_list[i].vendor))) continue;
+ pnt = &response_data[16];
+ while(*pnt && *pnt == ' ') pnt++;
+ if(memcmp(device_list[i].model, pnt,
+ strlen(device_list[i].model))) continue;
+ return device_list[i].flags;
+ }
+ return 0;
+}
+
+void scsi_make_blocked_list(void) {
+ int block_count = 0, index;
+ unsigned int flags;
+ struct Scsi_Host * sh[128], * shpnt;
+
+ /*
+ * Create a circular linked list from the scsi hosts which have
+ * the "wish_block" field in the Scsi_Host structure set.
+ * The blocked list should include all the scsi hosts using ISA DMA.
+ * In some systems, using two dma channels simultaneously causes
+ * unpredictable results.
+ * Among the scsi hosts in the blocked list, only one host at a time
+ * is allowed to have active commands queued. The transition from
+ * one active host to the next one is allowed only when host_busy == 0
+ * for the active host (which implies host_busy == 0 for all the hosts
+ * in the list). Moreover for block devices the transition to a new
+ * active host is allowed only when a request is completed, since a
+ * block device request can be divided into multiple scsi commands
+ * (when there are few sg lists or clustering is disabled).
+ *
+ * (DB, 4 Feb 1995)
+ */
+
+ save_flags(flags);
+ cli();
+ host_active = NULL;
+
+ for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next) {
+
+#if 0
+ /*
+ * Is this is a candidate for the blocked list?
+ * Useful to put into the blocked list all the hosts whose driver
+ * does not know about the host->block feature.
+ */
+ if (shpnt->unchecked_isa_dma) shpnt->wish_block = 1;
+#endif
+
+ if (shpnt->wish_block) sh[block_count++] = shpnt;
+ }
+
+ if (block_count == 1) sh[0]->block = NULL;
+
+ else if (block_count > 1) {
+
+ for(index = 0; index < block_count - 1; index++) {
+ sh[index]->block = sh[index + 1];
+ printk("scsi%d : added to blocked host list.\n",
+ sh[index]->host_no);
+ }
+
+ sh[block_count - 1]->block = sh[0];
+ printk("scsi%d : added to blocked host list.\n",
+ sh[index]->host_no);
+ }
+
+ restore_flags(flags);
+}
+
+static void scan_scsis_done (Scsi_Cmnd * SCpnt)
+{
+
+#ifdef DEBUG
+ printk ("scan_scsis_done(%p, %06x)\n", SCpnt->host, SCpnt->result);
+#endif
+ SCpnt->request.rq_status = RQ_SCSI_DONE;
+
+ if (SCpnt->request.sem != NULL)
+ up(SCpnt->request.sem);
+}
+
+#ifdef CONFIG_SCSI_MULTI_LUN
+static int max_scsi_luns = 8;
+#else
+static int max_scsi_luns = 1;
+#endif
+
+void scsi_luns_setup(char *str, int *ints) {
+ if (ints[0] != 1)
+ printk("scsi_luns_setup : usage max_scsi_luns=n (n should be between 1 and 8)\n");
+ else
+ max_scsi_luns = ints[1];
+}
+
+/*
+ * Detecting SCSI devices :
+ * We scan all present host adapter's busses, from ID 0 to ID (max_id).
+ * We use the INQUIRY command, determine device type, and pass the ID /
+ * lun address of all sequential devices to the tape driver, all random
+ * devices to the disk driver.
+ */
+static void scan_scsis (struct Scsi_Host *shpnt, unchar hardcoded,
+ unchar hchannel, unchar hid, unchar hlun)
+{
+ int dev, lun, channel;
+ unsigned char scsi_result0[256];
+ unsigned char *scsi_result;
+ Scsi_Device *SDpnt;
+ int max_dev_lun;
+ Scsi_Cmnd *SCpnt;
+
+ SCpnt = (Scsi_Cmnd *) scsi_init_malloc (sizeof (Scsi_Cmnd), GFP_ATOMIC | GFP_DMA);
+ SDpnt = (Scsi_Device *) scsi_init_malloc (sizeof (Scsi_Device), GFP_ATOMIC);
+ memset (SCpnt, 0, sizeof (Scsi_Cmnd));
+
+
+ /* Make sure we have something that is valid for DMA purposes */
+ scsi_result = ( ( !shpnt->unchecked_isa_dma )
+ ? &scsi_result0[0] : scsi_init_malloc (512, GFP_DMA));
+
+ if (scsi_result == NULL) {
+ printk ("Unable to obtain scsi_result buffer\n");
+ goto leave;
+ }
+
+ /* We must chain ourself in the host_queue, so commands can time out */
+ if(shpnt->host_queue)
+ shpnt->host_queue->prev = SCpnt;
+ SCpnt->next = shpnt->host_queue;
+ SCpnt->prev = NULL;
+ shpnt->host_queue = SCpnt;
+
+
+ if (hardcoded == 1) {
+ Scsi_Device *oldSDpnt=SDpnt;
+ struct Scsi_Device_Template * sdtpnt;
+ channel = hchannel;
+ if(channel > shpnt->max_channel) goto leave;
+ dev = hid;
+ if(dev >= shpnt->max_id) goto leave;
+ lun = hlun;
+ if(lun >= shpnt->max_lun) goto leave;
+ scan_scsis_single (channel, dev, lun, &max_dev_lun,
+ &SDpnt, SCpnt, shpnt, scsi_result);
+ if(SDpnt!=oldSDpnt) {
+
+ /* it could happen the blockdevice hasn't yet been inited */
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init)();
+
+ oldSDpnt->scsi_request_fn = NULL;
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->attach) {
+ (*sdtpnt->attach)(oldSDpnt);
+ if(oldSDpnt->attached) scsi_build_commandblocks(oldSDpnt);}
+ resize_dma_pool();
+
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
+ if(sdtpnt->finish && sdtpnt->nr_dev)
+ {(*sdtpnt->finish)();}
+ }
+ }
+
+ }
+ else {
+ for (channel = 0; channel <= shpnt->max_channel; channel++) {
+ for (dev = 0; dev < shpnt->max_id; ++dev) {
+ if (shpnt->this_id != dev) {
+
+ /*
+ * We need the for so our continue, etc. work fine. We put this in
+ * a variable so that we can override it during the scan if we
+ * detect a device *KNOWN* to have multiple logical units.
+ */
+ max_dev_lun = (max_scsi_luns < shpnt->max_lun ?
+ max_scsi_luns : shpnt->max_lun);
+ for (lun = 0; lun < max_dev_lun; ++lun) {
+ if (!scan_scsis_single (channel, dev, lun, &max_dev_lun,
+ &SDpnt, SCpnt, shpnt, scsi_result))
+ break; /* break means don't probe further for luns!=0 */
+ } /* for lun ends */
+ } /* if this_id != id ends */
+ } /* for dev ends */
+ } /* for channel ends */
+ } /* if/else hardcoded */
+
+ leave:
+
+ {/* Unchain SCpnt from host_queue */
+ Scsi_Cmnd *prev,*next,*hqptr;
+ for(hqptr=shpnt->host_queue; hqptr!=SCpnt; hqptr=hqptr->next) ;
+ if(hqptr) {
+ prev=hqptr->prev;
+ next=hqptr->next;
+ if(prev)
+ prev->next=next;
+ else
+ shpnt->host_queue=next;
+ if(next) next->prev=prev;
+ }
+ }
+
+ /* Last device block does not exist. Free memory. */
+ if (SDpnt != NULL)
+ scsi_init_free ((char *) SDpnt, sizeof (Scsi_Device));
+
+ if (SCpnt != NULL)
+ scsi_init_free ((char *) SCpnt, sizeof (Scsi_Cmnd));
+
+ /* If we allocated a buffer so we could do DMA, free it now */
+ if (scsi_result != &scsi_result0[0] && scsi_result != NULL)
+ scsi_init_free (scsi_result, 512);
+
+}
+
+/*
+ * The worker for scan_scsis.
+ * Returning 0 means Please don't ask further for lun!=0, 1 means OK go on.
+ * Global variables used : scsi_devices(linked list)
+ */
+int scan_scsis_single (int channel, int dev, int lun, int *max_dev_lun,
+ Scsi_Device **SDpnt2, Scsi_Cmnd * SCpnt, struct Scsi_Host * shpnt,
+ char *scsi_result)
+{
+ unsigned char scsi_cmd[12];
+ struct Scsi_Device_Template *sdtpnt;
+ Scsi_Device * SDtail, *SDpnt=*SDpnt2;
+ int bflags, type=-1;
+
+ SDtail = scsi_devices;
+ if (scsi_devices)
+ while (SDtail->next)
+ SDtail = SDtail->next;
+
+ memset (SDpnt, 0, sizeof (Scsi_Device));
+ SDpnt->host = shpnt;
+ SDpnt->id = dev;
+ SDpnt->lun = lun;
+ SDpnt->channel = channel;
+
+ /* Some low level driver could use device->type (DB) */
+ SDpnt->type = -1;
+
+ /*
+ * Assume that the device will have handshaking problems, and then fix this
+ * field later if it turns out it doesn't
+ */
+ SDpnt->borken = 1;
+ SDpnt->was_reset = 0;
+ SDpnt->expecting_cc_ua = 0;
+
+ scsi_cmd[0] = TEST_UNIT_READY;
+ scsi_cmd[1] = lun << 5;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[4] = scsi_cmd[5] = 0;
+
+ SCpnt->host = SDpnt->host;
+ SCpnt->device = SDpnt;
+ SCpnt->target = SDpnt->id;
+ SCpnt->lun = SDpnt->lun;
+ SCpnt->channel = SDpnt->channel;
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ scsi_do_cmd (SCpnt, (void *) scsi_cmd,
+ (void *) scsi_result,
+ 256, scan_scsis_done, SCSI_TIMEOUT + 4 * HZ, 5);
+ down (&sem);
+ }
+
+#if defined(DEBUG) || defined(DEBUG_INIT)
+ printk ("scsi: scan_scsis_single id %d lun %d. Return code 0x%08x\n",
+ dev, lun, SCpnt->result);
+ print_driverbyte(SCpnt->result); print_hostbyte(SCpnt->result);
+ printk("\n");
+#endif
+
+ if (SCpnt->result) {
+ if (((driver_byte (SCpnt->result) & DRIVER_SENSE) ||
+ (status_byte (SCpnt->result) & CHECK_CONDITION)) &&
+ ((SCpnt->sense_buffer[0] & 0x70) >> 4) == 7) {
+ if (((SCpnt->sense_buffer[2] & 0xf) != NOT_READY) &&
+ ((SCpnt->sense_buffer[2] & 0xf) != UNIT_ATTENTION) &&
+ ((SCpnt->sense_buffer[2] & 0xf) != ILLEGAL_REQUEST || lun > 0))
+ return 1;
+ }
+ else
+ return 0;
+ }
+
+#if defined (DEBUG) || defined(DEBUG_INIT)
+ printk ("scsi: performing INQUIRY\n");
+#endif
+ /*
+ * Build an INQUIRY command block.
+ */
+ scsi_cmd[0] = INQUIRY;
+ scsi_cmd[1] = (lun << 5) & 0xe0;
+ scsi_cmd[2] = 0;
+ scsi_cmd[3] = 0;
+ scsi_cmd[4] = 255;
+ scsi_cmd[5] = 0;
+ SCpnt->cmd_len = 0;
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ scsi_do_cmd (SCpnt, (void *) scsi_cmd,
+ (void *) scsi_result,
+ 256, scan_scsis_done, SCSI_TIMEOUT, 3);
+ down (&sem);
+ }
+
+#if defined(DEBUG) || defined(DEBUG_INIT)
+ printk ("scsi: INQUIRY %s with code 0x%x\n",
+ SCpnt->result ? "failed" : "successful", SCpnt->result);
+#endif
+
+ if (SCpnt->result)
+ return 0; /* assume no peripheral if any sort of error */
+
+ /*
+ * It would seem some TOSHIBA CDROM gets things wrong
+ */
+ if (!strncmp (scsi_result + 8, "TOSHIBA", 7) &&
+ !strncmp (scsi_result + 16, "CD-ROM", 6) &&
+ scsi_result[0] == TYPE_DISK) {
+ scsi_result[0] = TYPE_ROM;
+ scsi_result[1] |= 0x80; /* removable */
+ }
+
+ if (!strncmp (scsi_result + 8, "NEC", 3)) {
+ if (!strncmp (scsi_result + 16, "CD-ROM DRIVE:84 ", 16) ||
+ !strncmp (scsi_result + 16, "CD-ROM DRIVE:25", 15))
+ SDpnt->manufacturer = SCSI_MAN_NEC_OLDCDR;
+ else
+ SDpnt->manufacturer = SCSI_MAN_NEC;
+ }
+ else if (!strncmp (scsi_result + 8, "TOSHIBA", 7))
+ SDpnt->manufacturer = SCSI_MAN_TOSHIBA;
+ else if (!strncmp (scsi_result + 8, "SONY", 4))
+ SDpnt->manufacturer = SCSI_MAN_SONY;
+ else if (!strncmp (scsi_result + 8, "PIONEER", 7))
+ SDpnt->manufacturer = SCSI_MAN_PIONEER;
+ else
+ SDpnt->manufacturer = SCSI_MAN_UNKNOWN;
+
+ memcpy (SDpnt->vendor, scsi_result + 8, 8);
+ memcpy (SDpnt->model, scsi_result + 16, 16);
+ memcpy (SDpnt->rev, scsi_result + 32, 4);
+
+ SDpnt->removable = (0x80 & scsi_result[1]) >> 7;
+ SDpnt->lockable = SDpnt->removable;
+ SDpnt->changed = 0;
+ SDpnt->access_count = 0;
+ SDpnt->busy = 0;
+ SDpnt->has_cmdblocks = 0;
+ /*
+ * Currently, all sequential devices are assumed to be tapes, all random
+ * devices disk, with the appropriate read only flags set for ROM / WORM
+ * treated as RO.
+ */
+ switch (type = (scsi_result[0] & 0x1f)) {
+ case TYPE_TAPE:
+ case TYPE_DISK:
+ case TYPE_MOD:
+ case TYPE_PROCESSOR:
+ case TYPE_SCANNER:
+ SDpnt->writeable = 1;
+ break;
+ case TYPE_WORM:
+ case TYPE_ROM:
+ SDpnt->writeable = 0;
+ break;
+ default:
+ printk ("scsi: unknown type %d\n", type);
+ }
+
+ SDpnt->single_lun = 0;
+ SDpnt->soft_reset =
+ (scsi_result[7] & 1) && ((scsi_result[3] & 7) == 2);
+ SDpnt->random = (type == TYPE_TAPE) ? 0 : 1;
+ SDpnt->type = (type & 0x1f);
+
+ print_inquiry (scsi_result);
+
+ for (sdtpnt = scsi_devicelist; sdtpnt;
+ sdtpnt = sdtpnt->next)
+ if (sdtpnt->detect)
+ SDpnt->attached +=
+ (*sdtpnt->detect) (SDpnt);
+
+ SDpnt->scsi_level = scsi_result[2] & 0x07;
+ if (SDpnt->scsi_level >= 2 ||
+ (SDpnt->scsi_level == 1 &&
+ (scsi_result[3] & 0x0f) == 1))
+ SDpnt->scsi_level++;
+
+ /*
+ * Set the tagged_queue flag for SCSI-II devices that purport to support
+ * tagged queuing in the INQUIRY data.
+ */
+ SDpnt->tagged_queue = 0;
+ if ((SDpnt->scsi_level >= SCSI_2) &&
+ (scsi_result[7] & 2)) {
+ SDpnt->tagged_supported = 1;
+ SDpnt->current_tag = 0;
+ }
+
+ /*
+ * Accommodate drivers that want to sleep when they should be in a polling
+ * loop.
+ */
+ SDpnt->disconnect = 0;
+
+ /*
+ * Get any flags for this device.
+ */
+ bflags = get_device_flags (scsi_result);
+
+ /*
+ * Some revisions of the Texel CD ROM drives have handshaking problems when
+ * used with the Seagate controllers. Before we know what type of device
+ * we're talking to, we assume it's borken and then change it here if it
+ * turns out that it isn't a TEXEL drive.
+ */
+ if ((bflags & BLIST_BORKEN) == 0)
+ SDpnt->borken = 0;
+
+ /*
+ * These devices need this "key" to unlock the devices so we can use it
+ */
+ if ((bflags & BLIST_KEY) != 0) {
+ printk ("Unlocked floptical drive.\n");
+ SDpnt->lockable = 0;
+ scsi_cmd[0] = MODE_SENSE;
+ scsi_cmd[1] = (lun << 5) & 0xe0;
+ scsi_cmd[2] = 0x2e;
+ scsi_cmd[3] = 0;
+ scsi_cmd[4] = 0x2a;
+ scsi_cmd[5] = 0;
+ SCpnt->cmd_len = 0;
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt, (void *) scsi_cmd,
+ (void *) scsi_result, 0x2a,
+ scan_scsis_done, SCSI_TIMEOUT, 3);
+ down (&sem);
+ }
+ }
+ /* Add this device to the linked list at the end */
+ if (SDtail)
+ SDtail->next = SDpnt;
+ else
+ scsi_devices = SDpnt;
+ SDtail = SDpnt;
+
+ SDpnt = (Scsi_Device *) scsi_init_malloc (sizeof (Scsi_Device), GFP_ATOMIC);
+ *SDpnt2=SDpnt;
+ if (!SDpnt)
+ printk ("scsi: scan_scsis_single: Cannot malloc\n");
+
+
+ /*
+ * Some scsi devices cannot be polled for lun != 0 due to firmware bugs
+ */
+ if (bflags & BLIST_NOLUN)
+ return 0; /* break; */
+
+ /*
+ * If we want to only allow I/O to one of the luns attached to this device
+ * at a time, then we set this flag.
+ */
+ if (bflags & BLIST_SINGLELUN)
+ SDpnt->single_lun = 1;
+
+ /*
+ * If this device is known to support multiple units, override the other
+ * settings, and scan all of them.
+ */
+ if (bflags & BLIST_FORCELUN)
+ *max_dev_lun = 8;
+ /*
+ * We assume the device can't handle lun!=0 if: - it reports scsi-0 (ANSI
+ * SCSI Revision 0) (old drives like MAXTOR XT-3280) or - it reports scsi-1
+ * (ANSI SCSI Revision 1) and Response Data Format 0
+ */
+ if (((scsi_result[2] & 0x07) == 0)
+ ||
+ ((scsi_result[2] & 0x07) == 1 &&
+ (scsi_result[3] & 0x0f) == 0))
+ return 0;
+ return 1;
+}
+
+/*
+ * Flag bits for the internal_timeout array
+ */
+#define NORMAL_TIMEOUT 0
+#define IN_ABORT 1
+#define IN_RESET 2
+
+/*
+ * This is our time out function, called when the timer expires for a
+ * given host adapter. It will attempt to abort the currently executing
+ * command, that failing perform a kernel panic.
+ */
+
+static void scsi_times_out (Scsi_Cmnd * SCpnt, int pid)
+{
+
+ switch (SCpnt->internal_timeout & (IN_ABORT | IN_RESET))
+ {
+ case NORMAL_TIMEOUT:
+ {
+#ifdef DEBUG_TIMEOUT
+ scsi_dump_status();
+#endif
+ }
+
+ if (!scsi_abort (SCpnt, DID_TIME_OUT, pid))
+ return;
+ case IN_ABORT:
+ printk("SCSI host %d abort (pid %ld) timed out - resetting\n",
+ SCpnt->host->host_no, SCpnt->pid);
+ if (!scsi_reset (SCpnt, FALSE))
+ return;
+ case IN_RESET:
+ case (IN_ABORT | IN_RESET):
+ /* This might be controversial, but if there is a bus hang,
+ * you might conceivably want the machine up and running
+ * esp if you have an ide disk.
+ */
+ printk("Unable to reset scsi host %d - ", SCpnt->host->host_no);
+ printk("probably a SCSI bus hang.\n");
+ SCpnt->internal_timeout &= ~IN_RESET;
+ scsi_reset (SCpnt, TRUE);
+ return;
+
+ default:
+ INTERNAL_ERROR;
+ }
+
+}
+
+
+/* This function takes a quick look at a request, and decides if it
+ * can be queued now, or if there would be a stall while waiting for
+ * something else to finish. This routine assumes that interrupts are
+ * turned off when entering the routine. It is the responsibility
+ * of the calling code to ensure that this is the case.
+ */
+
+Scsi_Cmnd * request_queueable (struct request * req, Scsi_Device * device)
+{
+ Scsi_Cmnd * SCpnt = NULL;
+ int tablesize;
+ Scsi_Cmnd * found = NULL;
+ struct buffer_head * bh, *bhp;
+
+ if (!device)
+ panic ("No device passed to request_queueable().\n");
+
+ if (req && req->rq_status == RQ_INACTIVE)
+ panic("Inactive in request_queueable");
+
+ SCpnt = device->host->host_queue;
+
+ /*
+ * Look for a free command block. If we have been instructed not to queue
+ * multiple commands to multi-lun devices, then check to see what else is
+ * going for this device first.
+ */
+
+ SCpnt = device->host->host_queue;
+ if (!device->single_lun) {
+ while(SCpnt){
+ if(SCpnt->target == device->id &&
+ SCpnt->lun == device->lun) {
+ if(SCpnt->request.rq_status == RQ_INACTIVE) break;
+ }
+ SCpnt = SCpnt->next;
+ }
+ } else {
+ while(SCpnt){
+ if(SCpnt->target == device->id) {
+ if (SCpnt->lun == device->lun) {
+ if(found == NULL
+ && SCpnt->request.rq_status == RQ_INACTIVE)
+ {
+ found=SCpnt;
+ }
+ }
+ if(SCpnt->request.rq_status != RQ_INACTIVE) {
+ /*
+ * I think that we should really limit things to one
+ * outstanding command per device - this is what tends
+ * to trip up buggy firmware.
+ */
+ return NULL;
+ }
+ }
+ SCpnt = SCpnt->next;
+ }
+ SCpnt = found;
+ }
+
+ if (!SCpnt) return NULL;
+
+ if (SCSI_BLOCK(device->host)) return NULL;
+
+ if (req) {
+ memcpy(&SCpnt->request, req, sizeof(struct request));
+ tablesize = device->host->sg_tablesize;
+ bhp = bh = req->bh;
+ if(!tablesize) bh = NULL;
+ /* Take a quick look through the table to see how big it is.
+ * We already have our copy of req, so we can mess with that
+ * if we want to.
+ */
+ while(req->nr_sectors && bh){
+ bhp = bhp->b_reqnext;
+ if(!bhp || !CONTIGUOUS_BUFFERS(bh,bhp)) tablesize--;
+ req->nr_sectors -= bh->b_size >> 9;
+ req->sector += bh->b_size >> 9;
+ if(!tablesize) break;
+ bh = bhp;
+ }
+ if(req->nr_sectors && bh && bh->b_reqnext){ /* Any leftovers? */
+ SCpnt->request.bhtail = bh;
+ req->bh = bh->b_reqnext; /* Divide request */
+ bh->b_reqnext = NULL;
+ bh = req->bh;
+
+ /* Now reset things so that req looks OK */
+ SCpnt->request.nr_sectors -= req->nr_sectors;
+ req->current_nr_sectors = bh->b_size >> 9;
+ req->buffer = bh->b_data;
+ SCpnt->request.sem = NULL; /* Wait until whole thing done */
+ } else {
+ req->rq_status = RQ_INACTIVE;
+ wake_up(&wait_for_request);
+ }
+ } else {
+ SCpnt->request.rq_status = RQ_SCSI_BUSY; /* Busy, but no request */
+ SCpnt->request.sem = NULL; /* And no one is waiting for the device
+ * either */
+ }
+
+ SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
+ SCpnt->old_use_sg = 0;
+ SCpnt->transfersize = 0;
+ SCpnt->underflow = 0;
+ SCpnt->cmd_len = 0;
+
+/* Since not everyone seems to set the device info correctly
+ * before Scsi_Cmnd gets send out to scsi_do_command, we do it here.
+ */
+ SCpnt->channel = device->channel;
+ SCpnt->lun = device->lun;
+ SCpnt->target = device->id;
+
+ return SCpnt;
+}
+
+/* This function returns a structure pointer that will be valid for
+ * the device. The wait parameter tells us whether we should wait for
+ * the unit to become free or not. We are also able to tell this routine
+ * not to return a descriptor if the host is unable to accept any more
+ * commands for the time being. We need to keep in mind that there is no
+ * guarantee that the host remain not busy. Keep in mind the
+ * request_queueable function also knows the internal allocation scheme
+ * of the packets for each device
+ */
+
+Scsi_Cmnd * allocate_device (struct request ** reqp, Scsi_Device * device,
+ int wait)
+{
+ kdev_t dev;
+ struct request * req = NULL;
+ int tablesize;
+ unsigned int flags;
+ struct buffer_head * bh, *bhp;
+ struct Scsi_Host * host;
+ Scsi_Cmnd * SCpnt = NULL;
+ Scsi_Cmnd * SCwait = NULL;
+ Scsi_Cmnd * found = NULL;
+
+ if (!device)
+ panic ("No device passed to allocate_device().\n");
+
+ if (reqp) req = *reqp;
+
+ /* See if this request has already been queued by an interrupt routine */
+ if (req) {
+ if(req->rq_status == RQ_INACTIVE) return NULL;
+ dev = req->rq_dev;
+ } else
+ dev = 0; /* unused */
+
+ host = device->host;
+
+ if (intr_count && SCSI_BLOCK(host)) return NULL;
+
+ while (1==1){
+ SCpnt = device->host->host_queue;
+ if (!device->single_lun) {
+ while(SCpnt){
+ if(SCpnt->target == device->id &&
+ SCpnt->lun == device->lun) {
+ SCwait = SCpnt;
+ if(SCpnt->request.rq_status == RQ_INACTIVE) break;
+ }
+ SCpnt = SCpnt->next;
+ }
+ } else {
+ while(SCpnt){
+ if(SCpnt->target == device->id) {
+ if (SCpnt->lun == device->lun) {
+ SCwait = SCpnt;
+ if(found == NULL
+ && SCpnt->request.rq_status == RQ_INACTIVE)
+ {
+ found=SCpnt;
+ }
+ }
+ if(SCpnt->request.rq_status != RQ_INACTIVE) {
+ /*
+ * I think that we should really limit things to one
+ * outstanding command per device - this is what tends
+ * to trip up buggy firmware.
+ */
+ found = NULL;
+ break;
+ }
+ }
+ SCpnt = SCpnt->next;
+ }
+ SCpnt = found;
+ }
+
+ save_flags(flags);
+ cli();
+ /* See if this request has already been queued by an interrupt routine
+ */
+ if (req && (req->rq_status == RQ_INACTIVE || req->rq_dev != dev)) {
+ restore_flags(flags);
+ return NULL;
+ }
+ if (!SCpnt || SCpnt->request.rq_status != RQ_INACTIVE) /* Might have changed */
+ {
+ restore_flags(flags);
+ if(!wait) return NULL;
+ if (!SCwait) {
+ printk("Attempt to allocate device channel %d, target %d, "
+ "lun %d\n", device->channel, device->id, device->lun);
+ panic("No device found in allocate_device\n");
+ }
+ SCSI_SLEEP(&device->device_wait,
+ (SCwait->request.rq_status != RQ_INACTIVE));
+ } else {
+ if (req) {
+ memcpy(&SCpnt->request, req, sizeof(struct request));
+ tablesize = device->host->sg_tablesize;
+ bhp = bh = req->bh;
+ if(!tablesize) bh = NULL;
+ /* Take a quick look through the table to see how big it is.
+ * We already have our copy of req, so we can mess with that
+ * if we want to.
+ */
+ while(req->nr_sectors && bh){
+ bhp = bhp->b_reqnext;
+ if(!bhp || !CONTIGUOUS_BUFFERS(bh,bhp)) tablesize--;
+ req->nr_sectors -= bh->b_size >> 9;
+ req->sector += bh->b_size >> 9;
+ if(!tablesize) break;
+ bh = bhp;
+ }
+ if(req->nr_sectors && bh && bh->b_reqnext){/* Any leftovers? */
+ SCpnt->request.bhtail = bh;
+ req->bh = bh->b_reqnext; /* Divide request */
+ bh->b_reqnext = NULL;
+ bh = req->bh;
+ /* Now reset things so that req looks OK */
+ SCpnt->request.nr_sectors -= req->nr_sectors;
+ req->current_nr_sectors = bh->b_size >> 9;
+ req->buffer = bh->b_data;
+ SCpnt->request.sem = NULL; /* Wait until whole thing done*/
+ }
+ else
+ {
+ req->rq_status = RQ_INACTIVE;
+ *reqp = req->next;
+ wake_up(&wait_for_request);
+ }
+ } else {
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.sem = NULL; /* And no one is waiting for this
+ * to complete */
+ }
+ restore_flags(flags);
+ break;
+ }
+ }
+
+ SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
+ SCpnt->old_use_sg = 0;
+ SCpnt->transfersize = 0; /* No default transfer size */
+ SCpnt->cmd_len = 0;
+
+ SCpnt->underflow = 0; /* Do not flag underflow conditions */
+
+ /* Since not everyone seems to set the device info correctly
+ * before Scsi_Cmnd gets send out to scsi_do_command, we do it here.
+ */
+ SCpnt->channel = device->channel;
+ SCpnt->lun = device->lun;
+ SCpnt->target = device->id;
+
+ return SCpnt;
+}
+
+/*
+ * This is inline because we have stack problemes if we recurse to deeply.
+ */
+
+inline void internal_cmnd (Scsi_Cmnd * SCpnt)
+{
+ int temp;
+ struct Scsi_Host * host;
+ unsigned int flags;
+#ifdef DEBUG_DELAY
+ int clock;
+#endif
+
+ host = SCpnt->host;
+
+ /*
+ * We will wait MIN_RESET_DELAY clock ticks after the last reset so
+ * we can avoid the drive not being ready.
+ */
+ save_flags(flags);
+ sti();
+ temp = host->last_reset + MIN_RESET_DELAY;
+ while (jiffies < temp);
+ restore_flags(flags);
+
+ update_timeout(SCpnt, SCpnt->timeout_per_command);
+
+ /*
+ * We will use a queued command if possible, otherwise we will emulate the
+ * queuing and calling of completion function ourselves.
+ */
+#ifdef DEBUG
+ printk("internal_cmnd (host = %d, channel = %d, target = %d, "
+ "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
+ SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
+ SCpnt->buffer, SCpnt->bufflen, SCpnt->done);
+#endif
+
+ if (host->can_queue)
+ {
+#ifdef DEBUG
+ printk("queuecommand : routine at %p\n",
+ host->hostt->queuecommand);
+#endif
+ /* This locking tries to prevent all sorts of races between
+ * queuecommand and the interrupt code. In effect,
+ * we are only allowed to be in queuecommand once at
+ * any given time, and we can only be in the interrupt
+ * handler and the queuecommand function at the same time
+ * when queuecommand is called while servicing the
+ * interrupt.
+ */
+
+ if(!intr_count && SCpnt->host->irq)
+ disable_irq(SCpnt->host->irq);
+
+ host->hostt->queuecommand (SCpnt, scsi_done);
+
+ if(!intr_count && SCpnt->host->irq)
+ enable_irq(SCpnt->host->irq);
+ }
+ else
+ {
+
+#ifdef DEBUG
+ printk("command() : routine at %p\n", host->hostt->command);
+#endif
+ temp=host->hostt->command (SCpnt);
+ SCpnt->result = temp;
+#ifdef DEBUG_DELAY
+ clock = jiffies + 4 * HZ;
+ while (jiffies < clock);
+ printk("done(host = %d, result = %04x) : routine at %p\n",
+ host->host_no, temp, host->hostt->command);
+#endif
+ scsi_done(SCpnt);
+ }
+#ifdef DEBUG
+ printk("leaving internal_cmnd()\n");
+#endif
+}
+
+static void scsi_request_sense (Scsi_Cmnd * SCpnt)
+{
+ unsigned int flags;
+
+ save_flags(flags);
+ cli();
+ SCpnt->flags |= WAS_SENSE | ASKED_FOR_SENSE;
+ update_timeout(SCpnt, SENSE_TIMEOUT);
+ restore_flags(flags);
+
+
+ memcpy ((void *) SCpnt->cmnd , (void *) generic_sense,
+ sizeof(generic_sense));
+
+ SCpnt->cmnd[1] = SCpnt->lun << 5;
+ SCpnt->cmnd[4] = sizeof(SCpnt->sense_buffer);
+
+ SCpnt->request_buffer = &SCpnt->sense_buffer;
+ SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer);
+ SCpnt->use_sg = 0;
+ SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
+ internal_cmnd (SCpnt);
+}
+
+
+
+/*
+ * scsi_do_cmd sends all the commands out to the low-level driver. It
+ * handles the specifics required for each low level driver - ie queued
+ * or non queued. It also prevents conflicts when different high level
+ * drivers go for the same host at the same time.
+ */
+
+void scsi_do_cmd (Scsi_Cmnd * SCpnt, const void *cmnd ,
+ void *buffer, unsigned bufflen, void (*done)(Scsi_Cmnd *),
+ int timeout, int retries)
+{
+ unsigned long flags;
+ struct Scsi_Host * host = SCpnt->host;
+
+#ifdef DEBUG
+ {
+ int i;
+ int target = SCpnt->target;
+ printk ("scsi_do_cmd (host = %d, channel = %d target = %d, "
+ "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
+ "retries = %d)\n"
+ "command : " , host->host_no, SCpnt->channel, target, buffer,
+ bufflen, done, timeout, retries);
+ for (i = 0; i < 10; ++i)
+ printk ("%02x ", ((unsigned char *) cmnd)[i]);
+ printk("\n");
+ }
+#endif
+
+ if (!host)
+ {
+ panic ("Invalid or not present host.\n");
+ }
+
+
+ /*
+ * We must prevent reentrancy to the lowlevel host driver. This prevents
+ * it - we enter a loop until the host we want to talk to is not busy.
+ * Race conditions are prevented, as interrupts are disabled in between the
+ * time we check for the host being not busy, and the time we mark it busy
+ * ourselves.
+ */
+
+ save_flags(flags);
+ cli();
+ SCpnt->pid = scsi_pid++;
+
+ while (SCSI_BLOCK(host)) {
+ restore_flags(flags);
+ SCSI_SLEEP(&host->host_wait, SCSI_BLOCK(host));
+ cli();
+ }
+
+ if (host->block) host_active = host;
+
+ host->host_busy++;
+ restore_flags(flags);
+
+ /*
+ * Our own function scsi_done (which marks the host as not busy, disables
+ * the timeout counter, etc) will be called by us or by the
+ * scsi_hosts[host].queuecommand() function needs to also call
+ * the completion function for the high level driver.
+ */
+
+ memcpy ((void *) SCpnt->data_cmnd , (const void *) cmnd, 12);
+#if 0
+ SCpnt->host = host;
+ SCpnt->channel = channel;
+ SCpnt->target = target;
+ SCpnt->lun = (SCpnt->data_cmnd[1] >> 5);
+#endif
+ SCpnt->bufflen = bufflen;
+ SCpnt->buffer = buffer;
+ SCpnt->flags=0;
+ SCpnt->retries=0;
+ SCpnt->allowed=retries;
+ SCpnt->done = done;
+ SCpnt->timeout_per_command = timeout;
+
+ memcpy ((void *) SCpnt->cmnd , (const void *) cmnd, 12);
+ /* Zero the sense buffer. Some host adapters automatically request
+ * sense on error. 0 is not a valid sense code.
+ */
+ memset ((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
+ SCpnt->request_buffer = buffer;
+ SCpnt->request_bufflen = bufflen;
+ SCpnt->old_use_sg = SCpnt->use_sg;
+ if (SCpnt->cmd_len == 0)
+ SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
+ SCpnt->old_cmd_len = SCpnt->cmd_len;
+
+ /* Start the timer ticking. */
+
+ SCpnt->internal_timeout = 0;
+ SCpnt->abort_reason = 0;
+ internal_cmnd (SCpnt);
+
+#ifdef DEBUG
+ printk ("Leaving scsi_do_cmd()\n");
+#endif
+}
+
+static int check_sense (Scsi_Cmnd * SCpnt)
+{
+ /* If there is no sense information, request it. If we have already
+ * requested it, there is no point in asking again - the firmware must
+ * be confused.
+ */
+ if (((SCpnt->sense_buffer[0] & 0x70) >> 4) != 7) {
+ if(!(SCpnt->flags & ASKED_FOR_SENSE))
+ return SUGGEST_SENSE;
+ else
+ return SUGGEST_RETRY;
+ }
+
+ SCpnt->flags &= ~ASKED_FOR_SENSE;
+
+#ifdef DEBUG_INIT
+ printk("scsi%d, channel%d : ", SCpnt->host->host_no, SCpnt->channel);
+ print_sense("", SCpnt);
+ printk("\n");
+#endif
+ if (SCpnt->sense_buffer[2] & 0xe0)
+ return SUGGEST_ABORT;
+
+ switch (SCpnt->sense_buffer[2] & 0xf)
+ {
+ case NO_SENSE:
+ return 0;
+ case RECOVERED_ERROR:
+ return SUGGEST_IS_OK;
+
+ case ABORTED_COMMAND:
+ return SUGGEST_RETRY;
+ case NOT_READY:
+ case UNIT_ATTENTION:
+ /*
+ * If we are expecting a CC/UA because of a bus reset that we
+ * performed, treat this just as a retry. Otherwise this is
+ * information that we should pass up to the upper-level driver
+ * so that we can deal with it there.
+ */
+ if( SCpnt->device->expecting_cc_ua )
+ {
+ SCpnt->device->expecting_cc_ua = 0;
+ return SUGGEST_RETRY;
+ }
+ return SUGGEST_ABORT;
+
+ /* these three are not supported */
+ case COPY_ABORTED:
+ case VOLUME_OVERFLOW:
+ case MISCOMPARE:
+
+ case MEDIUM_ERROR:
+ return SUGGEST_REMAP;
+ case BLANK_CHECK:
+ case DATA_PROTECT:
+ case HARDWARE_ERROR:
+ case ILLEGAL_REQUEST:
+ default:
+ return SUGGEST_ABORT;
+ }
+}
+
+/* This function is the mid-level interrupt routine, which decides how
+ * to handle error conditions. Each invocation of this function must
+ * do one and *only* one of the following:
+ *
+ * (1) Call last_cmnd[host].done. This is done for fatal errors and
+ * normal completion, and indicates that the handling for this
+ * request is complete.
+ * (2) Call internal_cmnd to requeue the command. This will result in
+ * scsi_done being called again when the retry is complete.
+ * (3) Call scsi_request_sense. This asks the host adapter/drive for
+ * more information about the error condition. When the information
+ * is available, scsi_done will be called again.
+ * (4) Call reset(). This is sort of a last resort, and the idea is that
+ * this may kick things loose and get the drive working again. reset()
+ * automatically calls scsi_request_sense, and thus scsi_done will be
+ * called again once the reset is complete.
+ *
+ * If none of the above actions are taken, the drive in question
+ * will hang. If more than one of the above actions are taken by
+ * scsi_done, then unpredictable behavior will result.
+ */
+static void scsi_done (Scsi_Cmnd * SCpnt)
+{
+ int status=0;
+ int exit=0;
+ int checked;
+ int oldto;
+ struct Scsi_Host * host = SCpnt->host;
+ int result = SCpnt->result;
+ oldto = update_timeout(SCpnt, 0);
+
+#ifdef DEBUG_TIMEOUT
+ if(result) printk("Non-zero result in scsi_done %x %d:%d\n",
+ result, SCpnt->target, SCpnt->lun);
+#endif
+
+ /* If we requested an abort, (and we got it) then fix up the return
+ * status to say why
+ */
+ if(host_byte(result) == DID_ABORT && SCpnt->abort_reason)
+ SCpnt->result = result = (result & 0xff00ffff) |
+ (SCpnt->abort_reason << 16);
+
+
+#define FINISHED 0
+#define MAYREDO 1
+#define REDO 3
+#define PENDING 4
+
+#ifdef DEBUG
+ printk("In scsi_done(host = %d, result = %06x)\n", host->host_no, result);
+#endif
+
+ if(SCpnt->flags & WAS_SENSE)
+ {
+ SCpnt->use_sg = SCpnt->old_use_sg;
+ SCpnt->cmd_len = SCpnt->old_cmd_len;
+ }
+
+ switch (host_byte(result))
+ {
+ case DID_OK:
+ if (status_byte(result) && (SCpnt->flags & WAS_SENSE))
+ /* Failed to obtain sense information */
+ {
+ SCpnt->flags &= ~WAS_SENSE;
+ SCpnt->internal_timeout &= ~SENSE_TIMEOUT;
+
+ if (!(SCpnt->flags & WAS_RESET))
+ {
+ printk("scsi%d : channel %d target %d lun %d request sense"
+ " failed, performing reset.\n",
+ SCpnt->host->host_no, SCpnt->channel, SCpnt->target,
+ SCpnt->lun);
+ scsi_reset(SCpnt, FALSE);
+ return;
+ }
+ else
+ {
+ exit = (DRIVER_HARD | SUGGEST_ABORT);
+ status = FINISHED;
+ }
+ }
+ else switch(msg_byte(result))
+ {
+ case COMMAND_COMPLETE:
+ switch (status_byte(result))
+ {
+ case GOOD:
+ if (SCpnt->flags & WAS_SENSE)
+ {
+#ifdef DEBUG
+ printk ("In scsi_done, GOOD status, COMMAND COMPLETE, parsing sense information.\n");
+#endif
+ SCpnt->flags &= ~WAS_SENSE;
+ SCpnt->internal_timeout &= ~SENSE_TIMEOUT;
+
+ switch (checked = check_sense(SCpnt))
+ {
+ case SUGGEST_SENSE:
+ case 0:
+#ifdef DEBUG
+ printk("NO SENSE. status = REDO\n");
+#endif
+ update_timeout(SCpnt, oldto);
+ status = REDO;
+ break;
+ case SUGGEST_IS_OK:
+ break;
+ case SUGGEST_REMAP:
+ case SUGGEST_RETRY:
+#ifdef DEBUG
+ printk("SENSE SUGGEST REMAP or SUGGEST RETRY - status = MAYREDO\n");
+#endif
+ status = MAYREDO;
+ exit = DRIVER_SENSE | SUGGEST_RETRY;
+ break;
+ case SUGGEST_ABORT:
+#ifdef DEBUG
+ printk("SENSE SUGGEST ABORT - status = FINISHED");
+#endif
+ status = FINISHED;
+ exit = DRIVER_SENSE | SUGGEST_ABORT;
+ break;
+ default:
+ printk ("Internal error %s %d \n", __FILE__,
+ __LINE__);
+ }
+ } /* end WAS_SENSE */
+ else
+ {
+#ifdef DEBUG
+ printk("COMMAND COMPLETE message returned, status = FINISHED. \n");
+#endif
+ exit = DRIVER_OK;
+ status = FINISHED;
+ }
+ break;
+
+ case CHECK_CONDITION:
+ switch (check_sense(SCpnt))
+ {
+ case 0:
+ update_timeout(SCpnt, oldto);
+ status = REDO;
+ break;
+ case SUGGEST_REMAP:
+ case SUGGEST_RETRY:
+ status = MAYREDO;
+ exit = DRIVER_SENSE | SUGGEST_RETRY;
+ break;
+ case SUGGEST_ABORT:
+ status = FINISHED;
+ exit = DRIVER_SENSE | SUGGEST_ABORT;
+ break;
+ case SUGGEST_SENSE:
+ scsi_request_sense (SCpnt);
+ status = PENDING;
+ break;
+ }
+ break;
+
+ case CONDITION_GOOD:
+ case INTERMEDIATE_GOOD:
+ case INTERMEDIATE_C_GOOD:
+ break;
+
+ case BUSY:
+ update_timeout(SCpnt, oldto);
+ status = REDO;
+ break;
+
+ case RESERVATION_CONFLICT:
+ printk("scsi%d, channel %d : RESERVATION CONFLICT performing"
+ " reset.\n", SCpnt->host->host_no, SCpnt->channel);
+ scsi_reset(SCpnt, FALSE);
+ return;
+#if 0
+ exit = DRIVER_SOFT | SUGGEST_ABORT;
+ status = MAYREDO;
+ break;
+#endif
+ default:
+ printk ("Internal error %s %d \n"
+ "status byte = %d \n", __FILE__,
+ __LINE__, status_byte(result));
+
+ }
+ break;
+ default:
+ panic("scsi: unsupported message byte %d received\n",
+ msg_byte(result));
+ }
+ break;
+ case DID_TIME_OUT:
+#ifdef DEBUG
+ printk("Host returned DID_TIME_OUT - ");
+#endif
+
+ if (SCpnt->flags & WAS_TIMEDOUT)
+ {
+#ifdef DEBUG
+ printk("Aborting\n");
+#endif
+ /*
+ Allow TEST_UNIT_READY and INQUIRY commands to timeout early
+ without causing resets. All other commands should be retried.
+ */
+ if (SCpnt->cmnd[0] != TEST_UNIT_READY &&
+ SCpnt->cmnd[0] != INQUIRY)
+ status = MAYREDO;
+ exit = (DRIVER_TIMEOUT | SUGGEST_ABORT);
+ }
+ else
+ {
+#ifdef DEBUG
+ printk ("Retrying.\n");
+#endif
+ SCpnt->flags |= WAS_TIMEDOUT;
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ status = REDO;
+ }
+ break;
+ case DID_BUS_BUSY:
+ case DID_PARITY:
+ status = REDO;
+ break;
+ case DID_NO_CONNECT:
+#ifdef DEBUG
+ printk("Couldn't connect.\n");
+#endif
+ exit = (DRIVER_HARD | SUGGEST_ABORT);
+ break;
+ case DID_ERROR:
+ status = MAYREDO;
+ exit = (DRIVER_HARD | SUGGEST_ABORT);
+ break;
+ case DID_BAD_TARGET:
+ case DID_ABORT:
+ exit = (DRIVER_INVALID | SUGGEST_ABORT);
+ break;
+ case DID_RESET:
+ if (SCpnt->flags & IS_RESETTING)
+ {
+ SCpnt->flags &= ~IS_RESETTING;
+ status = REDO;
+ break;
+ }
+
+ if(msg_byte(result) == GOOD &&
+ status_byte(result) == CHECK_CONDITION) {
+ switch (check_sense(SCpnt)) {
+ case 0:
+ update_timeout(SCpnt, oldto);
+ status = REDO;
+ break;
+ case SUGGEST_REMAP:
+ case SUGGEST_RETRY:
+ status = MAYREDO;
+ exit = DRIVER_SENSE | SUGGEST_RETRY;
+ break;
+ case SUGGEST_ABORT:
+ status = FINISHED;
+ exit = DRIVER_SENSE | SUGGEST_ABORT;
+ break;
+ case SUGGEST_SENSE:
+ scsi_request_sense (SCpnt);
+ status = PENDING;
+ break;
+ }
+ } else {
+ status=REDO;
+ exit = SUGGEST_RETRY;
+ }
+ break;
+ default :
+ exit = (DRIVER_ERROR | SUGGEST_DIE);
+ }
+
+ switch (status)
+ {
+ case FINISHED:
+ case PENDING:
+ break;
+ case MAYREDO:
+#ifdef DEBUG
+ printk("In MAYREDO, allowing %d retries, have %d\n",
+ SCpnt->allowed, SCpnt->retries);
+#endif
+ if ((++SCpnt->retries) < SCpnt->allowed)
+ {
+ if ((SCpnt->retries >= (SCpnt->allowed >> 1))
+ && !(jiffies < SCpnt->host->last_reset + MIN_RESET_PERIOD)
+ && !(SCpnt->flags & WAS_RESET))
+ {
+ printk("scsi%d channel %d : resetting for second half of retries.\n",
+ SCpnt->host->host_no, SCpnt->channel);
+ scsi_reset(SCpnt, FALSE);
+ break;
+ }
+
+ }
+ else
+ {
+ status = FINISHED;
+ break;
+ }
+ /* fall through to REDO */
+
+ case REDO:
+
+ if (SCpnt->flags & WAS_SENSE)
+ scsi_request_sense(SCpnt);
+ else
+ {
+ memcpy ((void *) SCpnt->cmnd,
+ (void*) SCpnt->data_cmnd,
+ sizeof(SCpnt->data_cmnd));
+ SCpnt->request_buffer = SCpnt->buffer;
+ SCpnt->request_bufflen = SCpnt->bufflen;
+ SCpnt->use_sg = SCpnt->old_use_sg;
+ SCpnt->cmd_len = SCpnt->old_cmd_len;
+ internal_cmnd (SCpnt);
+ }
+ break;
+ default:
+ INTERNAL_ERROR;
+ }
+
+ if (status == FINISHED) {
+#ifdef DEBUG
+ printk("Calling done function - at address %p\n", SCpnt->done);
+#endif
+ host->host_busy--; /* Indicate that we are free */
+
+ if (host->block && host->host_busy == 0) {
+ host_active = NULL;
+
+ /* For block devices "wake_up" is done in end_scsi_request */
+ if (MAJOR(SCpnt->request.rq_dev) != SCSI_DISK_MAJOR &&
+ MAJOR(SCpnt->request.rq_dev) != SCSI_CDROM_MAJOR) {
+ struct Scsi_Host * next;
+
+ for (next = host->block; next != host; next = next->block)
+ wake_up(&next->host_wait);
+ }
+
+ }
+
+ wake_up(&host->host_wait);
+ SCpnt->result = result | ((exit & 0xff) << 24);
+ SCpnt->use_sg = SCpnt->old_use_sg;
+ SCpnt->cmd_len = SCpnt->old_cmd_len;
+ SCpnt->done (SCpnt);
+ }
+
+#undef FINISHED
+#undef REDO
+#undef MAYREDO
+#undef PENDING
+}
+
+/*
+ * The scsi_abort function interfaces with the abort() function of the host
+ * we are aborting, and causes the current command to not complete. The
+ * caller should deal with any error messages or status returned on the
+ * next call.
+ *
+ * This will not be called reentrantly for a given host.
+ */
+
+/*
+ * Since we're nice guys and specified that abort() and reset()
+ * can be non-reentrant. The internal_timeout flags are used for
+ * this.
+ */
+
+
+int scsi_abort (Scsi_Cmnd * SCpnt, int why, int pid)
+{
+ int oldto;
+ unsigned long flags;
+ struct Scsi_Host * host = SCpnt->host;
+
+ while(1)
+ {
+ save_flags(flags);
+ cli();
+
+ /*
+ * Protect against races here. If the command is done, or we are
+ * on a different command forget it.
+ */
+ if (SCpnt->request.rq_status == RQ_INACTIVE || pid != SCpnt->pid) {
+ restore_flags(flags);
+ return 0;
+ }
+
+ if (SCpnt->internal_timeout & IN_ABORT)
+ {
+ restore_flags(flags);
+ while (SCpnt->internal_timeout & IN_ABORT)
+ barrier();
+ }
+ else
+ {
+ SCpnt->internal_timeout |= IN_ABORT;
+ oldto = update_timeout(SCpnt, ABORT_TIMEOUT);
+
+ if ((SCpnt->flags & IS_RESETTING) && SCpnt->device->soft_reset) {
+ /* OK, this command must have died when we did the
+ * reset. The device itself must have lied.
+ */
+ printk("Stale command on %d %d:%d appears to have died when"
+ " the bus was reset\n",
+ SCpnt->channel, SCpnt->target, SCpnt->lun);
+ }
+
+ restore_flags(flags);
+ if (!host->host_busy) {
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ update_timeout(SCpnt, oldto);
+ return 0;
+ }
+ printk("scsi : aborting command due to timeout : pid %lu, scsi%d,"
+ " channel %d, id %d, lun %d ",
+ SCpnt->pid, SCpnt->host->host_no, (int) SCpnt->channel,
+ (int) SCpnt->target, (int) SCpnt->lun);
+ print_command (SCpnt->cmnd);
+ if (SCpnt->request.rq_status == RQ_INACTIVE || pid != SCpnt->pid)
+ return 0;
+ SCpnt->abort_reason = why;
+ switch(host->hostt->abort(SCpnt)) {
+ /* We do not know how to abort. Try waiting another
+ * time increment and see if this helps. Set the
+ * WAS_TIMEDOUT flag set so we do not try this twice
+ */
+ case SCSI_ABORT_BUSY: /* Tough call - returning 1 from
+ * this is too severe
+ */
+ case SCSI_ABORT_SNOOZE:
+ if(why == DID_TIME_OUT) {
+ save_flags(flags);
+ cli();
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ if(SCpnt->flags & WAS_TIMEDOUT) {
+ restore_flags(flags);
+ return 1; /* Indicate we cannot handle this.
+ * We drop down into the reset handler
+ * and try again
+ */
+ } else {
+ SCpnt->flags |= WAS_TIMEDOUT;
+ oldto = SCpnt->timeout_per_command;
+ update_timeout(SCpnt, oldto);
+ }
+ restore_flags(flags);
+ }
+ return 0;
+ case SCSI_ABORT_PENDING:
+ if(why != DID_TIME_OUT) {
+ save_flags(flags);
+ cli();
+ update_timeout(SCpnt, oldto);
+ restore_flags(flags);
+ }
+ return 0;
+ case SCSI_ABORT_SUCCESS:
+ /* We should have already aborted this one. No
+ * need to adjust timeout
+ */
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ return 0;
+ case SCSI_ABORT_NOT_RUNNING:
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ update_timeout(SCpnt, 0);
+ return 0;
+ case SCSI_ABORT_ERROR:
+ default:
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ return 1;
+ }
+ }
+ }
+}
+
+
+/* Mark a single SCSI Device as having been reset. */
+
+static inline void scsi_mark_device_reset(Scsi_Device *Device)
+{
+ Device->was_reset = 1;
+ Device->expecting_cc_ua = 1;
+}
+
+
+/* Mark all SCSI Devices on a specific Host as having been reset. */
+
+void scsi_mark_host_bus_reset(struct Scsi_Host *Host)
+{
+ Scsi_Cmnd *SCpnt;
+ for(SCpnt = Host->host_queue; SCpnt; SCpnt = SCpnt->next)
+ scsi_mark_device_reset(SCpnt->device);
+}
+
+
+int scsi_reset (Scsi_Cmnd * SCpnt, int bus_reset_flag)
+{
+ int temp, oldto;
+ unsigned long flags;
+ Scsi_Cmnd * SCpnt1;
+ struct Scsi_Host * host = SCpnt->host;
+
+ printk("SCSI bus is being reset for host %d.\n",
+ host->host_no);
+
+ /*
+ * First of all, we need to make a recommendation to the low-level
+ * driver as to whether a BUS_DEVICE_RESET should be performed,
+ * or whether we should do a full BUS_RESET. There is no simple
+ * algorithm here - we basically use a series of heuristics
+ * to determine what we should do.
+ */
+ SCpnt->host->suggest_bus_reset = FALSE;
+
+ /*
+ * First see if all of the active devices on the bus have
+ * been jammed up so that we are attempting resets. If so,
+ * then suggest a bus reset. Forcing a bus reset could
+ * result in some race conditions, but no more than
+ * you would usually get with timeouts. We will cross
+ * that bridge when we come to it.
+ */
+ SCpnt1 = host->host_queue;
+ while(SCpnt1) {
+ if( SCpnt1->request.rq_status != RQ_INACTIVE
+ && (SCpnt1->flags & (WAS_RESET | IS_RESETTING)) == 0 )
+ break;
+ SCpnt1 = SCpnt1->next;
+ }
+ if( SCpnt1 == NULL ) {
+ SCpnt->host->suggest_bus_reset = TRUE;
+ }
+
+
+ /*
+ * If the code that called us is suggesting a hard reset, then
+ * definitely request it. This usually occurs because a
+ * BUS_DEVICE_RESET times out.
+ */
+ if( bus_reset_flag ) {
+ SCpnt->host->suggest_bus_reset = TRUE;
+ }
+
+ while (1) {
+ save_flags(flags);
+ cli();
+ if (SCpnt->internal_timeout & IN_RESET)
+ {
+ restore_flags(flags);
+ while (SCpnt->internal_timeout & IN_RESET)
+ barrier();
+ }
+ else
+ {
+ SCpnt->internal_timeout |= IN_RESET;
+ oldto = update_timeout(SCpnt, RESET_TIMEOUT);
+
+ if (host->host_busy)
+ {
+ restore_flags(flags);
+ SCpnt1 = host->host_queue;
+ while(SCpnt1) {
+ if (SCpnt1->request.rq_status != RQ_INACTIVE) {
+#if 0
+ if (!(SCpnt1->flags & IS_RESETTING) &&
+ !(SCpnt1->internal_timeout & IN_ABORT))
+ scsi_abort(SCpnt1, DID_RESET, SCpnt->pid);
+#endif
+ SCpnt1->flags |= (WAS_RESET | IS_RESETTING);
+ }
+ SCpnt1 = SCpnt1->next;
+ }
+
+ host->last_reset = jiffies;
+ temp = host->hostt->reset(SCpnt);
+ host->last_reset = jiffies;
+ }
+ else
+ {
+ if (!host->block) host->host_busy++;
+ restore_flags(flags);
+ host->last_reset = jiffies;
+ SCpnt->flags |= (WAS_RESET | IS_RESETTING);
+ temp = host->hostt->reset(SCpnt);
+ host->last_reset = jiffies;
+ if (!host->block) host->host_busy--;
+ }
+
+#ifdef DEBUG
+ printk("scsi reset function returned %d\n", temp);
+#endif
+
+ /*
+ * Now figure out what we need to do, based upon
+ * what the low level driver said that it did.
+ * If the result is SCSI_RESET_SUCCESS, SCSI_RESET_PENDING,
+ * or SCSI_RESET_WAKEUP, then the low level driver did a
+ * bus device reset or bus reset, so we should go through
+ * and mark one or all of the devices on that bus
+ * as having been reset.
+ */
+ switch(temp & SCSI_RESET_ACTION) {
+ case SCSI_RESET_SUCCESS:
+ if (temp & SCSI_RESET_BUS_RESET)
+ scsi_mark_host_bus_reset(host);
+ else scsi_mark_device_reset(SCpnt->device);
+ save_flags(flags);
+ cli();
+ SCpnt->internal_timeout &= ~IN_RESET;
+ update_timeout(SCpnt, oldto);
+ restore_flags(flags);
+ return 0;
+ case SCSI_RESET_PENDING:
+ if (temp & SCSI_RESET_BUS_RESET)
+ scsi_mark_host_bus_reset(host);
+ else scsi_mark_device_reset(SCpnt->device);
+ return 0;
+ case SCSI_RESET_PUNT:
+ SCpnt->internal_timeout &= ~IN_RESET;
+ scsi_request_sense (SCpnt);
+ return 0;
+ case SCSI_RESET_WAKEUP:
+ if (temp & SCSI_RESET_BUS_RESET)
+ scsi_mark_host_bus_reset(host);
+ else scsi_mark_device_reset(SCpnt->device);
+ SCpnt->internal_timeout &= ~IN_RESET;
+ scsi_request_sense (SCpnt);
+ /*
+ * Since a bus reset was performed, we
+ * need to wake up each and every command
+ * that was active on the bus.
+ */
+ if( temp & SCSI_RESET_BUS_RESET )
+ {
+ SCpnt1 = host->host_queue;
+ while(SCpnt1) {
+ if( SCpnt->request.rq_status != RQ_INACTIVE
+ && SCpnt1 != SCpnt)
+ scsi_request_sense (SCpnt);
+ SCpnt1 = SCpnt1->next;
+ }
+ }
+ return 0;
+ case SCSI_RESET_SNOOZE:
+ /* In this case, we set the timeout field to 0
+ * so that this command does not time out any more,
+ * and we return 1 so that we get a message on the
+ * screen.
+ */
+ save_flags(flags);
+ cli();
+ SCpnt->internal_timeout &= ~IN_RESET;
+ update_timeout(SCpnt, 0);
+ restore_flags(flags);
+ /* If you snooze, you lose... */
+ case SCSI_RESET_ERROR:
+ default:
+ return 1;
+ }
+
+ return temp;
+ }
+ }
+}
+
+
+static void scsi_main_timeout(void)
+{
+ /*
+ * We must not enter update_timeout with a timeout condition still pending.
+ */
+
+ int timed_out, pid;
+ unsigned long flags;
+ struct Scsi_Host * host;
+ Scsi_Cmnd * SCpnt = NULL;
+
+ do {
+ save_flags(flags);
+ cli();
+
+ update_timeout(NULL, 0);
+ /*
+ * Find all timers such that they have 0 or negative (shouldn't happen)
+ * time remaining on them.
+ */
+
+ timed_out = 0;
+ for(host = scsi_hostlist; host; host = host->next) {
+ for(SCpnt = host->host_queue; SCpnt; SCpnt = SCpnt->next)
+ if (SCpnt->timeout == -1)
+ {
+ SCpnt->timeout = 0;
+ pid = SCpnt->pid;
+ restore_flags(flags);
+ scsi_times_out(SCpnt, pid);
+ ++timed_out;
+ save_flags(flags);
+ cli();
+ }
+ }
+ } while (timed_out);
+ restore_flags(flags);
+}
+
+/*
+ * The strategy is to cause the timer code to call scsi_times_out()
+ * when the soonest timeout is pending.
+ * The arguments are used when we are queueing a new command, because
+ * we do not want to subtract the time used from this time, but when we
+ * set the timer, we want to take this value into account.
+ */
+
+static int update_timeout(Scsi_Cmnd * SCset, int timeout)
+{
+ unsigned int least, used;
+ unsigned int oldto;
+ unsigned long flags;
+ struct Scsi_Host * host;
+ Scsi_Cmnd * SCpnt = NULL;
+
+ save_flags(flags);
+ cli();
+
+ /*
+ * Figure out how much time has passed since the last time the timeouts
+ * were updated
+ */
+ used = (time_start) ? (jiffies - time_start) : 0;
+
+ /*
+ * Find out what is due to timeout soonest, and adjust all timeouts for
+ * the amount of time that has passed since the last time we called
+ * update_timeout.
+ */
+
+ oldto = 0;
+
+ if(SCset){
+ oldto = SCset->timeout - used;
+ SCset->timeout = timeout + used;
+ }
+
+ least = 0xffffffff;
+
+ for(host = scsi_hostlist; host; host = host->next)
+ for(SCpnt = host->host_queue; SCpnt; SCpnt = SCpnt->next)
+ if (SCpnt->timeout > 0) {
+ SCpnt->timeout -= used;
+ if(SCpnt->timeout <= 0) SCpnt->timeout = -1;
+ if(SCpnt->timeout > 0 && SCpnt->timeout < least)
+ least = SCpnt->timeout;
+ }
+
+ /*
+ * If something is due to timeout again, then we will set the next timeout
+ * interrupt to occur. Otherwise, timeouts are disabled.
+ */
+
+ if (least != 0xffffffff)
+ {
+ time_start = jiffies;
+ timer_table[SCSI_TIMER].expires = (time_elapsed = least) + jiffies;
+ timer_active |= 1 << SCSI_TIMER;
+ }
+ else
+ {
+ timer_table[SCSI_TIMER].expires = time_start = time_elapsed = 0;
+ timer_active &= ~(1 << SCSI_TIMER);
+ }
+ restore_flags(flags);
+ return oldto;
+}
+
+#ifdef CONFIG_MODULES
+static int scsi_register_host(Scsi_Host_Template *);
+static void scsi_unregister_host(Scsi_Host_Template *);
+#endif
+
+void *scsi_malloc(unsigned int len)
+{
+ unsigned int nbits, mask;
+ unsigned long flags;
+ int i, j;
+ if(len % SECTOR_SIZE != 0 || len > PAGE_SIZE)
+ return NULL;
+
+ save_flags(flags);
+ cli();
+ nbits = len >> 9;
+ mask = (1 << nbits) - 1;
+
+ for(i=0;i < dma_sectors / SECTORS_PER_PAGE; i++)
+ for(j=0; j<=SECTORS_PER_PAGE - nbits; j++){
+ if ((dma_malloc_freelist[i] & (mask << j)) == 0){
+ dma_malloc_freelist[i] |= (mask << j);
+ restore_flags(flags);
+ dma_free_sectors -= nbits;
+#ifdef DEBUG
+ printk("SMalloc: %d %p\n",len, dma_malloc_pages[i] + (j << 9));
+#endif
+ return (void *) ((unsigned long) dma_malloc_pages[i] + (j << 9));
+ }
+ }
+ restore_flags(flags);
+ return NULL; /* Nope. No more */
+}
+
+int scsi_free(void *obj, unsigned int len)
+{
+ unsigned int page, sector, nbits, mask;
+ unsigned long flags;
+
+#ifdef DEBUG
+ printk("scsi_free %p %d\n",obj, len);
+#endif
+
+ for (page = 0; page < dma_sectors / SECTORS_PER_PAGE; page++) {
+ unsigned long page_addr = (unsigned long) dma_malloc_pages[page];
+ if ((unsigned long) obj >= page_addr &&
+ (unsigned long) obj < page_addr + PAGE_SIZE)
+ {
+ sector = (((unsigned long) obj) - page_addr) >> 9;
+
+ nbits = len >> 9;
+ mask = (1 << nbits) - 1;
+
+ if ((mask << sector) >= (1 << SECTORS_PER_PAGE))
+ panic ("scsi_free:Bad memory alignment");
+
+ save_flags(flags);
+ cli();
+ if((dma_malloc_freelist[page] & (mask << sector)) != (mask<<sector))
+ panic("scsi_free:Trying to free unused memory");
+
+ dma_free_sectors += nbits;
+ dma_malloc_freelist[page] &= ~(mask << sector);
+ restore_flags(flags);
+ return 0;
+ }
+ }
+ panic("scsi_free:Bad offset");
+}
+
+
+int scsi_loadable_module_flag; /* Set after we scan builtin drivers */
+
+void * scsi_init_malloc(unsigned int size, int priority)
+{
+ void * retval;
+
+ /*
+ * For buffers used by the DMA pool, we assume page aligned
+ * structures.
+ */
+ if ((size % PAGE_SIZE) == 0) {
+ int order, a_size;
+ for (order = 0, a_size = PAGE_SIZE;
+ a_size < size; order++, a_size <<= 1)
+ ;
+ retval = (void *) __get_dma_pages(priority & GFP_LEVEL_MASK,
+ order);
+ } else
+ retval = kmalloc(size, priority);
+
+ if (retval)
+ memset(retval, 0, size);
+ return retval;
+}
+
+
+void scsi_init_free(char * ptr, unsigned int size)
+{
+ /*
+ * We need this special code here because the DMA pool assumes
+ * page aligned data. Besides, it is wasteful to allocate
+ * page sized chunks with kmalloc.
+ */
+ if ((size % PAGE_SIZE) == 0) {
+ int order, a_size;
+
+ for (order = 0, a_size = PAGE_SIZE;
+ a_size < size; order++, a_size <<= 1)
+ ;
+ free_pages((unsigned long)ptr, order);
+ } else
+ kfree(ptr);
+}
+
+void scsi_build_commandblocks(Scsi_Device * SDpnt)
+{
+ int j;
+ Scsi_Cmnd * SCpnt;
+ struct Scsi_Host * host = NULL;
+
+ for(j=0;j<SDpnt->host->cmd_per_lun;j++){
+ SCpnt = (Scsi_Cmnd *) scsi_init_malloc(sizeof(Scsi_Cmnd), GFP_ATOMIC);
+ SCpnt->host = SDpnt->host;
+ SCpnt->device = SDpnt;
+ SCpnt->target = SDpnt->id;
+ SCpnt->lun = SDpnt->lun;
+ SCpnt->channel = SDpnt->channel;
+ SCpnt->request.rq_status = RQ_INACTIVE;
+ SCpnt->use_sg = 0;
+ SCpnt->old_use_sg = 0;
+ SCpnt->old_cmd_len = 0;
+ SCpnt->timeout = 0;
+ SCpnt->underflow = 0;
+ SCpnt->transfersize = 0;
+ SCpnt->host_scribble = NULL;
+ host = SDpnt->host;
+ if(host->host_queue)
+ host->host_queue->prev = SCpnt;
+ SCpnt->next = host->host_queue;
+ SCpnt->prev = NULL;
+ host->host_queue = SCpnt;
+ }
+ SDpnt->has_cmdblocks = 1;
+}
+
+/*
+ * scsi_dev_init() is our initialization routine, which in turn calls host
+ * initialization, bus scanning, and sd/st initialization routines.
+ */
+
+int scsi_dev_init(void)
+{
+ Scsi_Device * SDpnt;
+ struct Scsi_Host * shpnt;
+ struct Scsi_Device_Template * sdtpnt;
+#ifdef FOO_ON_YOU
+ return;
+#endif
+
+ /* Yes we're here... */
+ dispatch_scsi_info_ptr = dispatch_scsi_info;
+
+ /* Init a few things so we can "malloc" memory. */
+ scsi_loadable_module_flag = 0;
+
+ timer_table[SCSI_TIMER].fn = scsi_main_timeout;
+ timer_table[SCSI_TIMER].expires = 0;
+
+#ifdef CONFIG_MODULES
+ register_symtab(&scsi_symbol_table);
+#endif
+
+ /* Register the /proc/scsi/scsi entry */
+#if CONFIG_PROC_FS
+ proc_scsi_register(0, &proc_scsi_scsi);
+#endif
+
+ /* initialize all hosts */
+ scsi_init();
+
+ scsi_devices = (Scsi_Device *) NULL;
+
+ for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
+ scan_scsis(shpnt,0,0,0,0); /* scan for scsi devices */
+
+ printk("scsi : detected ");
+ for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if (sdtpnt->dev_noticed && sdtpnt->name)
+ printk("%d SCSI %s%s ", sdtpnt->dev_noticed, sdtpnt->name,
+ (sdtpnt->dev_noticed != 1) ? "s" : "");
+ printk("total.\n");
+
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init)();
+
+ for (SDpnt=scsi_devices; SDpnt; SDpnt = SDpnt->next) {
+ SDpnt->scsi_request_fn = NULL;
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->attach) (*sdtpnt->attach)(SDpnt);
+ if(SDpnt->attached) scsi_build_commandblocks(SDpnt);
+ }
+
+
+ /*
+ * This should build the DMA pool.
+ */
+ resize_dma_pool();
+
+ /*
+ * OK, now we finish the initialization by doing spin-up, read
+ * capacity, etc, etc
+ */
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->finish && sdtpnt->nr_dev)
+ (*sdtpnt->finish)();
+
+ scsi_loadable_module_flag = 1;
+
+ return 0;
+}
+
+static void print_inquiry(unsigned char *data)
+{
+ int i;
+
+ printk(" Vendor: ");
+ for (i = 8; i < 16; i++)
+ {
+ if (data[i] >= 0x20 && i < data[4] + 5)
+ printk("%c", data[i]);
+ else
+ printk(" ");
+ }
+
+ printk(" Model: ");
+ for (i = 16; i < 32; i++)
+ {
+ if (data[i] >= 0x20 && i < data[4] + 5)
+ printk("%c", data[i]);
+ else
+ printk(" ");
+ }
+
+ printk(" Rev: ");
+ for (i = 32; i < 36; i++)
+ {
+ if (data[i] >= 0x20 && i < data[4] + 5)
+ printk("%c", data[i]);
+ else
+ printk(" ");
+ }
+
+ printk("\n");
+
+ i = data[0] & 0x1f;
+
+ printk(" Type: %s ",
+ i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : "Unknown " );
+ printk(" ANSI SCSI revision: %02x", data[2] & 0x07);
+ if ((data[2] & 0x07) == 1 && (data[3] & 0x0f) == 1)
+ printk(" CCS\n");
+ else
+ printk("\n");
+}
+
+
+#ifdef CONFIG_PROC_FS
+int scsi_proc_info(char *buffer, char **start, off_t offset, int length,
+ int hostno, int inout)
+{
+ Scsi_Device *scd;
+ struct Scsi_Host *HBA_ptr;
+ int parameter[4];
+ char *p;
+ int i,size, len = 0;
+ off_t begin = 0;
+ off_t pos = 0;
+
+ scd = scsi_devices;
+ HBA_ptr = scsi_hostlist;
+
+ if(inout == 0) {
+ size = sprintf(buffer+len,"Attached devices: %s\n", (scd)?"":"none");
+ len += size;
+ pos = begin + len;
+ while (HBA_ptr) {
+#if 0
+ size += sprintf(buffer+len,"scsi%2d: %s\n", (int) HBA_ptr->host_no,
+ HBA_ptr->hostt->procname);
+ len += size;
+ pos = begin + len;
+#endif
+ scd = scsi_devices;
+ while (scd) {
+ if (scd->host == HBA_ptr) {
+ proc_print_scsidevice(scd, buffer, &size, len);
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+ }
+ scd = scd->next;
+ }
+ HBA_ptr = HBA_ptr->next;
+ }
+
+ stop_output:
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len = length; /* Ending slop */
+ return (len);
+ }
+
+ /*
+ * Usage: echo "scsi singledevice 0 1 2 3" >/proc/scsi/scsi
+ * with "0 1 2 3" replaced by your "Host Channel Id Lun".
+ * Consider this feature BETA.
+ * CAUTION: This is not for hotplugging your peripherals. As
+ * SCSI was not designed for this you could damage your
+ * hardware !
+ * However perhaps it is legal to switch on an
+ * already connected device. It is perhaps not
+ * guaranteed this device doesn't corrupt an ongoing data transfer.
+ */
+ if(!buffer || length < 25 || strncmp("scsi", buffer, 4))
+ return(-EINVAL);
+
+ if(!strncmp("singledevice", buffer + 5, 12)) {
+ p = buffer + 17;
+
+ for(i=0; i<4; i++) {
+ p++;
+ parameter[i] = simple_strtoul(p, &p, 0);
+ }
+ printk("scsi singledevice %d %d %d %d\n", parameter[0], parameter[1],
+ parameter[2], parameter[3]);
+
+ while(scd && (scd->host->host_no != parameter[0]
+ || scd->channel != parameter[1]
+ || scd->id != parameter[2]
+ || scd->lun != parameter[3])) {
+ scd = scd->next;
+ }
+ if(scd)
+ return(-ENOSYS); /* We do not yet support unplugging */
+ while(HBA_ptr && HBA_ptr->host_no != parameter[0])
+ HBA_ptr = HBA_ptr->next;
+
+ if(!HBA_ptr)
+ return(-ENXIO);
+
+ scan_scsis (HBA_ptr, 1, parameter[1], parameter[2], parameter[3]);
+ return(length);
+ }
+ return(-EINVAL);
+}
+#endif
+
+/*
+ * Go through the device list and recompute the most appropriate size
+ * for the dma pool. Then grab more memory (as required).
+ */
+static void resize_dma_pool(void)
+{
+ int i;
+ unsigned long size;
+ struct Scsi_Host * shpnt;
+ struct Scsi_Host * host = NULL;
+ Scsi_Device * SDpnt;
+ unsigned long flags;
+ FreeSectorBitmap * new_dma_malloc_freelist = NULL;
+ unsigned int new_dma_sectors = 0;
+ unsigned int new_need_isa_buffer = 0;
+ unsigned char ** new_dma_malloc_pages = NULL;
+
+ if( !scsi_devices )
+ {
+ /*
+ * Free up the DMA pool.
+ */
+ if( dma_free_sectors != dma_sectors )
+ panic("SCSI DMA pool memory leak %d %d\n",dma_free_sectors,dma_sectors);
+
+ for(i=0; i < dma_sectors / SECTORS_PER_PAGE; i++)
+ scsi_init_free(dma_malloc_pages[i], PAGE_SIZE);
+ if (dma_malloc_pages)
+ scsi_init_free((char *) dma_malloc_pages,
+ (dma_sectors / SECTORS_PER_PAGE)*sizeof(*dma_malloc_pages));
+ dma_malloc_pages = NULL;
+ if (dma_malloc_freelist)
+ scsi_init_free((char *) dma_malloc_freelist,
+ (dma_sectors / SECTORS_PER_PAGE)*sizeof(*dma_malloc_freelist));
+ dma_malloc_freelist = NULL;
+ dma_sectors = 0;
+ dma_free_sectors = 0;
+ return;
+ }
+ /* Next, check to see if we need to extend the DMA buffer pool */
+
+ new_dma_sectors = 2*SECTORS_PER_PAGE; /* Base value we use */
+
+ if (high_memory-1 > ISA_DMA_THRESHOLD)
+ scsi_need_isa_bounce_buffers = 1;
+ else
+ scsi_need_isa_bounce_buffers = 0;
+
+ if (scsi_devicelist)
+ for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next)
+ new_dma_sectors += SECTORS_PER_PAGE; /* Increment for each host */
+
+ for (SDpnt=scsi_devices; SDpnt; SDpnt = SDpnt->next) {
+ host = SDpnt->host;
+
+ if(SDpnt->type != TYPE_TAPE)
+ new_dma_sectors += ((host->sg_tablesize *
+ sizeof(struct scatterlist) + 511) >> 9) *
+ host->cmd_per_lun;
+
+ if(host->unchecked_isa_dma &&
+ scsi_need_isa_bounce_buffers &&
+ SDpnt->type != TYPE_TAPE) {
+ new_dma_sectors += (PAGE_SIZE >> 9) * host->sg_tablesize *
+ host->cmd_per_lun;
+ new_need_isa_buffer++;
+ }
+ }
+
+ /* limit DMA memory to 32MB: */
+ new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
+
+ /*
+ * We never shrink the buffers - this leads to
+ * race conditions that I would rather not even think
+ * about right now.
+ */
+ if( new_dma_sectors < dma_sectors )
+ new_dma_sectors = dma_sectors;
+
+ if (new_dma_sectors)
+ {
+ size = (new_dma_sectors / SECTORS_PER_PAGE)*sizeof(FreeSectorBitmap);
+ new_dma_malloc_freelist = (FreeSectorBitmap *) scsi_init_malloc(size, GFP_ATOMIC);
+ memset(new_dma_malloc_freelist, 0, size);
+
+ size = (new_dma_sectors / SECTORS_PER_PAGE)*sizeof(*new_dma_malloc_pages);
+ new_dma_malloc_pages = (unsigned char **) scsi_init_malloc(size, GFP_ATOMIC);
+ memset(new_dma_malloc_pages, 0, size);
+ }
+
+ /*
+ * If we need more buffers, expand the list.
+ */
+ if( new_dma_sectors > dma_sectors ) {
+ for(i=dma_sectors / SECTORS_PER_PAGE; i< new_dma_sectors / SECTORS_PER_PAGE; i++)
+ new_dma_malloc_pages[i] = (unsigned char *)
+ scsi_init_malloc(PAGE_SIZE, GFP_ATOMIC | GFP_DMA);
+ }
+
+ /* When we dick with the actual DMA list, we need to
+ * protect things
+ */
+ save_flags(flags);
+ cli();
+ if (dma_malloc_freelist)
+ {
+ size = (dma_sectors / SECTORS_PER_PAGE)*sizeof(FreeSectorBitmap);
+ memcpy(new_dma_malloc_freelist, dma_malloc_freelist, size);
+ scsi_init_free((char *) dma_malloc_freelist, size);
+ }
+ dma_malloc_freelist = new_dma_malloc_freelist;
+
+ if (dma_malloc_pages)
+ {
+ size = (dma_sectors / SECTORS_PER_PAGE)*sizeof(*dma_malloc_pages);
+ memcpy(new_dma_malloc_pages, dma_malloc_pages, size);
+ scsi_init_free((char *) dma_malloc_pages, size);
+ }
+
+ dma_free_sectors += new_dma_sectors - dma_sectors;
+ dma_malloc_pages = new_dma_malloc_pages;
+ dma_sectors = new_dma_sectors;
+ need_isa_buffer = new_need_isa_buffer;
+ restore_flags(flags);
+}
+
+#ifdef CONFIG_MODULES /* a big #ifdef block... */
+
+/*
+ * This entry point should be called by a loadable module if it is trying
+ * add a low level scsi driver to the system.
+ */
+static int scsi_register_host(Scsi_Host_Template * tpnt)
+{
+ int pcount;
+ struct Scsi_Host * shpnt;
+ Scsi_Device * SDpnt;
+ struct Scsi_Device_Template * sdtpnt;
+ const char * name;
+
+ if (tpnt->next || !tpnt->detect) return 1;/* Must be already loaded, or
+ * no detect routine available
+ */
+ pcount = next_scsi_host;
+ if ((tpnt->present = tpnt->detect(tpnt)))
+ {
+ if(pcount == next_scsi_host) {
+ if(tpnt->present > 1) {
+ printk("Failure to register low-level scsi driver");
+ scsi_unregister_host(tpnt);
+ return 1;
+ }
+ /* The low-level driver failed to register a driver. We
+ * can do this now.
+ */
+ scsi_register(tpnt,0);
+ }
+ tpnt->next = scsi_hosts; /* Add to the linked list */
+ scsi_hosts = tpnt;
+
+ /* Add the new driver to /proc/scsi */
+#if CONFIG_PROC_FS
+ build_proc_dir_entries(tpnt);
+#endif
+
+ for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next)
+ if(shpnt->hostt == tpnt)
+ {
+ if(tpnt->info)
+ name = tpnt->info(shpnt);
+ else
+ name = tpnt->name;
+ printk ("scsi%d : %s\n", /* And print a little message */
+ shpnt->host_no, name);
+ }
+
+ printk ("scsi : %d host%s.\n", next_scsi_host,
+ (next_scsi_host == 1) ? "" : "s");
+
+ scsi_make_blocked_list();
+
+ /* The next step is to call scan_scsis here. This generates the
+ * Scsi_Devices entries
+ */
+
+ for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next)
+ if(shpnt->hostt == tpnt) scan_scsis(shpnt,0,0,0,0);
+
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init)();
+
+ /* Next we create the Scsi_Cmnd structures for this host */
+
+ for(SDpnt = scsi_devices; SDpnt; SDpnt = SDpnt->next)
+ if(SDpnt->host->hostt == tpnt)
+ {
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->attach) (*sdtpnt->attach)(SDpnt);
+ if(SDpnt->attached) scsi_build_commandblocks(SDpnt);
+ }
+
+ /*
+ * Now that we have all of the devices, resize the DMA pool,
+ * as required. */
+ resize_dma_pool();
+
+
+ /* This does any final handling that is required. */
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->finish && sdtpnt->nr_dev)
+ (*sdtpnt->finish)();
+ }
+
+#if defined(USE_STATIC_SCSI_MEMORY)
+ printk ("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
+ (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
+ (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
+ (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
+#endif
+
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * Similarly, this entry point should be called by a loadable module if it
+ * is trying to remove a low level scsi driver from the system.
+ */
+static void scsi_unregister_host(Scsi_Host_Template * tpnt)
+{
+ Scsi_Host_Template * SHT, *SHTp;
+ Scsi_Device *sdpnt, * sdppnt, * sdpnt1;
+ Scsi_Cmnd * SCpnt;
+ unsigned long flags;
+ struct Scsi_Device_Template * sdtpnt;
+ struct Scsi_Host * shpnt, *sh1;
+ int pcount;
+
+ /* First verify that this host adapter is completely free with no pending
+ * commands */
+
+ for(sdpnt = scsi_devices; sdpnt; sdpnt = sdpnt->next)
+ if(sdpnt->host->hostt == tpnt && sdpnt->host->hostt->usage_count
+ && *sdpnt->host->hostt->usage_count) return;
+
+ for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
+ {
+ if (shpnt->hostt != tpnt) continue;
+ for(SCpnt = shpnt->host_queue; SCpnt; SCpnt = SCpnt->next)
+ {
+ save_flags(flags);
+ cli();
+ if(SCpnt->request.rq_status != RQ_INACTIVE) {
+ restore_flags(flags);
+ for(SCpnt = shpnt->host_queue; SCpnt; SCpnt = SCpnt->next)
+ if(SCpnt->request.rq_status == RQ_SCSI_DISCONNECTING)
+ SCpnt->request.rq_status = RQ_INACTIVE;
+ printk("Device busy???\n");
+ return;
+ }
+ SCpnt->request.rq_status = RQ_SCSI_DISCONNECTING; /* Mark as busy */
+ restore_flags(flags);
+ }
+ }
+ /* Next we detach the high level drivers from the Scsi_Device structures */
+
+ for(sdpnt = scsi_devices; sdpnt; sdpnt = sdpnt->next)
+ if(sdpnt->host->hostt == tpnt)
+ {
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->detach) (*sdtpnt->detach)(sdpnt);
+ /* If something still attached, punt */
+ if (sdpnt->attached) {
+ printk("Attached usage count = %d\n", sdpnt->attached);
+ return;
+ }
+ }
+
+ /* Next we free up the Scsi_Cmnd structures for this host */
+
+ for(sdpnt = scsi_devices; sdpnt; sdpnt = sdpnt->next)
+ if(sdpnt->host->hostt == tpnt)
+ while (sdpnt->host->host_queue) {
+ SCpnt = sdpnt->host->host_queue->next;
+ scsi_init_free((char *) sdpnt->host->host_queue, sizeof(Scsi_Cmnd));
+ sdpnt->host->host_queue = SCpnt;
+ if (SCpnt) SCpnt->prev = NULL;
+ sdpnt->has_cmdblocks = 0;
+ }
+
+ /* Next free up the Scsi_Device structures for this host */
+
+ sdppnt = NULL;
+ for(sdpnt = scsi_devices; sdpnt; sdpnt = sdpnt1)
+ {
+ sdpnt1 = sdpnt->next;
+ if (sdpnt->host->hostt == tpnt) {
+ if (sdppnt)
+ sdppnt->next = sdpnt->next;
+ else
+ scsi_devices = sdpnt->next;
+ scsi_init_free((char *) sdpnt, sizeof (Scsi_Device));
+ } else
+ sdppnt = sdpnt;
+ }
+
+ /* Next we go through and remove the instances of the individual hosts
+ * that were detected */
+
+ shpnt = scsi_hostlist;
+ while(shpnt) {
+ sh1 = shpnt->next;
+ if(shpnt->hostt == tpnt) {
+ if(shpnt->loaded_as_module) {
+ pcount = next_scsi_host;
+ /* Remove the /proc/scsi directory entry */
+#if CONFIG_PROC_FS
+ proc_scsi_unregister(tpnt->proc_dir,
+ shpnt->host_no + PROC_SCSI_FILE);
+#endif
+ if(tpnt->release)
+ (*tpnt->release)(shpnt);
+ else {
+ /* This is the default case for the release function.
+ * It should do the right thing for most correctly
+ * written host adapters.
+ */
+ if (shpnt->irq) free_irq(shpnt->irq);
+ if (shpnt->dma_channel != 0xff) free_dma(shpnt->dma_channel);
+ if (shpnt->io_port && shpnt->n_io_port)
+ release_region(shpnt->io_port, shpnt->n_io_port);
+ }
+ if(pcount == next_scsi_host) scsi_unregister(shpnt);
+ tpnt->present--;
+ }
+ }
+ shpnt = sh1;
+ }
+
+ /*
+ * If there are absolutely no more hosts left, it is safe
+ * to completely nuke the DMA pool. The resize operation will
+ * do the right thing and free everything.
+ */
+ if( !scsi_devices )
+ resize_dma_pool();
+
+ printk ("scsi : %d host%s.\n", next_scsi_host,
+ (next_scsi_host == 1) ? "" : "s");
+
+#if defined(USE_STATIC_SCSI_MEMORY)
+ printk ("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
+ (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
+ (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
+ (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
+#endif
+
+ scsi_make_blocked_list();
+
+ /* There were some hosts that were loaded at boot time, so we cannot
+ do any more than this */
+ if (tpnt->present) return;
+
+ /* OK, this is the very last step. Remove this host adapter from the
+ linked list. */
+ for(SHTp=NULL, SHT=scsi_hosts; SHT; SHTp=SHT, SHT=SHT->next)
+ if(SHT == tpnt) {
+ if(SHTp)
+ SHTp->next = SHT->next;
+ else
+ scsi_hosts = SHT->next;
+ SHT->next = NULL;
+ break;
+ }
+
+ /* Rebuild the /proc/scsi directory entries */
+#if CONFIG_PROC_FS
+ proc_scsi_unregister(tpnt->proc_dir, tpnt->proc_dir->low_ino);
+#endif
+ MOD_DEC_USE_COUNT;
+}
+
+/*
+ * This entry point should be called by a loadable module if it is trying
+ * add a high level scsi driver to the system.
+ */
+static int scsi_register_device_module(struct Scsi_Device_Template * tpnt)
+{
+ Scsi_Device * SDpnt;
+
+ if (tpnt->next) return 1;
+
+ scsi_register_device(tpnt);
+ /*
+ * First scan the devices that we know about, and see if we notice them.
+ */
+
+ for(SDpnt = scsi_devices; SDpnt; SDpnt = SDpnt->next)
+ if(tpnt->detect) SDpnt->attached += (*tpnt->detect)(SDpnt);
+
+ /*
+ * If any of the devices would match this driver, then perform the
+ * init function.
+ */
+ if(tpnt->init && tpnt->dev_noticed)
+ if ((*tpnt->init)()) return 1;
+
+ /*
+ * Now actually connect the devices to the new driver.
+ */
+ for(SDpnt = scsi_devices; SDpnt; SDpnt = SDpnt->next)
+ {
+ if(tpnt->attach) (*tpnt->attach)(SDpnt);
+ /*
+ * If this driver attached to the device, and we no longer
+ * have anything attached, release the scso command blocks.
+ */
+ if(SDpnt->attached && SDpnt->has_cmdblocks == 0)
+ scsi_build_commandblocks(SDpnt);
+ }
+
+ /*
+ * This does any final handling that is required.
+ */
+ if(tpnt->finish && tpnt->nr_dev) (*tpnt->finish)();
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int scsi_unregister_device(struct Scsi_Device_Template * tpnt)
+{
+ Scsi_Device * SDpnt;
+ Scsi_Cmnd * SCpnt;
+ struct Scsi_Device_Template * spnt;
+ struct Scsi_Device_Template * prev_spnt;
+
+ /*
+ * If we are busy, this is not going to fly.
+ */
+ if( *tpnt->usage_count != 0) return 0;
+ /*
+ * Next, detach the devices from the driver.
+ */
+
+ for(SDpnt = scsi_devices; SDpnt; SDpnt = SDpnt->next)
+ {
+ if(tpnt->detach) (*tpnt->detach)(SDpnt);
+ if(SDpnt->attached == 0)
+ {
+ /*
+ * Nobody is using this device any more. Free all of the
+ * command structures.
+ */
+ for(SCpnt = SDpnt->host->host_queue; SCpnt; SCpnt = SCpnt->next)
+ {
+ if(SCpnt->device == SDpnt)
+ {
+ if(SCpnt->prev != NULL)
+ SCpnt->prev->next = SCpnt->next;
+ if(SCpnt->next != NULL)
+ SCpnt->next->prev = SCpnt->prev;
+ if(SCpnt == SDpnt->host->host_queue)
+ SDpnt->host->host_queue = SCpnt->next;
+ scsi_init_free((char *) SCpnt, sizeof(*SCpnt));
+ }
+ }
+ SDpnt->has_cmdblocks = 0;
+ }
+ }
+ /*
+ * Extract the template from the linked list.
+ */
+ spnt = scsi_devicelist;
+ prev_spnt = NULL;
+ while(spnt != tpnt)
+ {
+ prev_spnt = spnt;
+ spnt = spnt->next;
+ }
+ if(prev_spnt == NULL)
+ scsi_devicelist = tpnt->next;
+ else
+ prev_spnt->next = spnt->next;
+
+ MOD_DEC_USE_COUNT;
+ /*
+ * Final cleanup for the driver is done in the driver sources in the
+ * cleanup function.
+ */
+ return 0;
+}
+
+
+int scsi_register_module(int module_type, void * ptr)
+{
+ switch(module_type){
+ case MODULE_SCSI_HA:
+ return scsi_register_host((Scsi_Host_Template *) ptr);
+
+ /* Load upper level device handler of some kind */
+ case MODULE_SCSI_DEV:
+ return scsi_register_device_module((struct Scsi_Device_Template *) ptr);
+ /* The rest of these are not yet implemented */
+
+ /* Load constants.o */
+ case MODULE_SCSI_CONST:
+
+ /* Load specialized ioctl handler for some device. Intended for
+ * cdroms that have non-SCSI2 audio command sets. */
+ case MODULE_SCSI_IOCTL:
+
+ default:
+ return 1;
+ }
+}
+
+void scsi_unregister_module(int module_type, void * ptr)
+{
+ switch(module_type) {
+ case MODULE_SCSI_HA:
+ scsi_unregister_host((Scsi_Host_Template *) ptr);
+ break;
+ case MODULE_SCSI_DEV:
+ scsi_unregister_device((struct Scsi_Device_Template *) ptr);
+ break;
+ /* The rest of these are not yet implemented. */
+ case MODULE_SCSI_CONST:
+ case MODULE_SCSI_IOCTL:
+ break;
+ default:
+ }
+ return;
+}
+
+#endif /* CONFIG_MODULES */
+
+#ifdef DEBUG_TIMEOUT
+static void
+scsi_dump_status(void)
+{
+ int i;
+ struct Scsi_Host * shpnt;
+ Scsi_Cmnd * SCpnt;
+ printk("Dump of scsi parameters:\n");
+ i = 0;
+ for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
+ for(SCpnt=shpnt->host_queue; SCpnt; SCpnt = SCpnt->next)
+ {
+ /* (0) 0:0:0:0 (802 123434 8 8 0) (3 3 2) (%d %d %d) %d %x */
+ printk("(%d) %d:%d:%d:%d (%s %ld %ld %ld %d) (%d %d %x) (%d %d %d) %x %x %x\n",
+ i++, SCpnt->host->host_no,
+ SCpnt->channel,
+ SCpnt->target,
+ SCpnt->lun,
+ kdevname(SCpnt->request.rq_dev),
+ SCpnt->request.sector,
+ SCpnt->request.nr_sectors,
+ SCpnt->request.current_nr_sectors,
+ SCpnt->use_sg,
+ SCpnt->retries,
+ SCpnt->allowed,
+ SCpnt->flags,
+ SCpnt->timeout_per_command,
+ SCpnt->timeout,
+ SCpnt->internal_timeout,
+ SCpnt->cmnd[0],
+ SCpnt->sense_buffer[2],
+ SCpnt->result);
+ }
+ printk("wait_for_request = %p\n", wait_for_request);
+ /* Now dump the request lists for each block device */
+ printk("Dump of pending block device requests\n");
+ for(i=0; i<MAX_BLKDEV; i++)
+ if(blk_dev[i].current_request)
+ {
+ struct request * req;
+ printk("%d: ", i);
+ req = blk_dev[i].current_request;
+ while(req) {
+ printk("(%s %d %ld %ld %ld) ",
+ kdevname(req->rq_dev),
+ req->cmd,
+ req->sector,
+ req->nr_sectors,
+ req->current_nr_sectors);
+ req = req->next;
+ }
+ printk("\n");
+ }
+}
+#endif
+
+#ifdef MODULE
+
+int init_module(void) {
+ unsigned long size;
+
+ /*
+ * This makes /proc/scsi visible.
+ */
+ dispatch_scsi_info_ptr = dispatch_scsi_info;
+
+ timer_table[SCSI_TIMER].fn = scsi_main_timeout;
+ timer_table[SCSI_TIMER].expires = 0;
+ register_symtab(&scsi_symbol_table);
+ scsi_loadable_module_flag = 1;
+
+ /* Register the /proc/scsi/scsi entry */
+#if CONFIG_PROC_FS
+ proc_scsi_register(0, &proc_scsi_scsi);
+#endif
+
+
+ dma_sectors = PAGE_SIZE / SECTOR_SIZE;
+ dma_free_sectors= dma_sectors;
+ /*
+ * Set up a minimal DMA buffer list - this will be used during scan_scsis
+ * in some cases.
+ */
+
+ /* One bit per sector to indicate free/busy */
+ size = (dma_sectors / SECTORS_PER_PAGE)*sizeof(FreeSectorBitmap);
+ dma_malloc_freelist = (unsigned char *) scsi_init_malloc(size, GFP_ATOMIC);
+ memset(dma_malloc_freelist, 0, size);
+
+ /* One pointer per page for the page list */
+ dma_malloc_pages = (unsigned char **)
+ scsi_init_malloc((dma_sectors / SECTORS_PER_PAGE)*sizeof(*dma_malloc_pages), GFP_ATOMIC);
+ dma_malloc_pages[0] = (unsigned char *)
+ scsi_init_malloc(PAGE_SIZE, GFP_ATOMIC | GFP_DMA);
+ return 0;
+}
+
+void cleanup_module( void)
+{
+#if CONFIG_PROC_FS
+ proc_scsi_unregister(0, PROC_SCSI_SCSI);
+#endif
+
+ /* No, we're not here anymore. Don't show the /proc/scsi files. */
+ dispatch_scsi_info_ptr = 0L;
+
+ /*
+ * Free up the DMA pool.
+ */
+ resize_dma_pool();
+
+ timer_table[SCSI_TIMER].fn = NULL;
+ timer_table[SCSI_TIMER].expires = 0;
+}
+#endif /* MODULE */
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/scsi.h b/i386/i386at/gpl/linux/scsi/scsi.h
new file mode 100644
index 00000000..fefe1c73
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/scsi.h
@@ -0,0 +1,618 @@
+/*
+ * scsi.h Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ * generic SCSI package header file by
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale eric@aib.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ */
+
+#ifndef _SCSI_H
+#define _SCSI_H
+
+/*
+ * Some of the public constants are being moved to this file.
+ * We include it here so that what came from where is transparent.
+ */
+#include <linux/scsi.h>
+
+
+/*
+ * Some defs, in case these are not defined elsewhere.
+ */
+#ifndef TRUE
+# define TRUE 1
+#endif
+#ifndef FALSE
+# define FALSE 0
+#endif
+
+#ifdef MACH
+#ifndef LINUX_SCSI_DEBUG
+#undef DEBUG
+#endif
+#endif
+
+extern void scsi_make_blocked_list(void);
+extern volatile int in_scan_scsis;
+extern const unsigned char scsi_command_size[8];
+#define COMMAND_SIZE(opcode) scsi_command_size[((opcode) >> 5) & 7]
+
+#define IDENTIFY_BASE 0x80
+#define IDENTIFY(can_disconnect, lun) (IDENTIFY_BASE |\
+ ((can_disconnect) ? 0x40 : 0) |\
+ ((lun) & 0x07))
+
+
+
+/*
+ * the return of the status word will be in the following format :
+ * The low byte is the status returned by the SCSI command,
+ * with vendor specific bits masked.
+ *
+ * The next byte is the message which followed the SCSI status.
+ * This allows a stos to be used, since the Intel is a little
+ * endian machine.
+ *
+ * The final byte is a host return code, which is one of the following.
+ *
+ * IE
+ * lsb msb
+ * status msg host code
+ *
+ * Our errors returned by OUR driver, NOT SCSI message. Or'd with
+ * SCSI message passed back to driver <IF any>.
+ */
+
+
+#define DID_OK 0x00 /* NO error */
+#define DID_NO_CONNECT 0x01 /* Couldn't connect before timeout period */
+#define DID_BUS_BUSY 0x02 /* BUS stayed busy through time out period */
+#define DID_TIME_OUT 0x03 /* TIMED OUT for other reason */
+#define DID_BAD_TARGET 0x04 /* BAD target. */
+#define DID_ABORT 0x05 /* Told to abort for some other reason */
+#define DID_PARITY 0x06 /* Parity error */
+#define DID_ERROR 0x07 /* Internal error */
+#define DID_RESET 0x08 /* Reset by somebody. */
+#define DID_BAD_INTR 0x09 /* Got an interrupt we weren't expecting. */
+#define DRIVER_OK 0x00 /* Driver status */
+
+/*
+ * These indicate the error that occurred, and what is available.
+ */
+
+#define DRIVER_BUSY 0x01
+#define DRIVER_SOFT 0x02
+#define DRIVER_MEDIA 0x03
+#define DRIVER_ERROR 0x04
+
+#define DRIVER_INVALID 0x05
+#define DRIVER_TIMEOUT 0x06
+#define DRIVER_HARD 0x07
+#define DRIVER_SENSE 0x08
+
+#define SUGGEST_RETRY 0x10
+#define SUGGEST_ABORT 0x20
+#define SUGGEST_REMAP 0x30
+#define SUGGEST_DIE 0x40
+#define SUGGEST_SENSE 0x80
+#define SUGGEST_IS_OK 0xff
+
+#define DRIVER_MASK 0x0f
+#define SUGGEST_MASK 0xf0
+
+#define MAX_COMMAND_SIZE 12
+
+/*
+ * SCSI command sets
+ */
+
+#define SCSI_UNKNOWN 0
+#define SCSI_1 1
+#define SCSI_1_CCS 2
+#define SCSI_2 3
+
+/*
+ * Every SCSI command starts with a one byte OP-code.
+ * The next byte's high three bits are the LUN of the
+ * device. Any multi-byte quantities are stored high byte
+ * first, and may have a 5 bit MSB in the same byte
+ * as the LUN.
+ */
+
+/*
+ * Manufacturers list
+ */
+
+#define SCSI_MAN_UNKNOWN 0
+#define SCSI_MAN_NEC 1
+#define SCSI_MAN_TOSHIBA 2
+#define SCSI_MAN_NEC_OLDCDR 3
+#define SCSI_MAN_SONY 4
+#define SCSI_MAN_PIONEER 5
+
+/*
+ * As the scsi do command functions are intelligent, and may need to
+ * redo a command, we need to keep track of the last command
+ * executed on each one.
+ */
+
+#define WAS_RESET 0x01
+#define WAS_TIMEDOUT 0x02
+#define WAS_SENSE 0x04
+#define IS_RESETTING 0x08
+#define IS_ABORTING 0x10
+#define ASKED_FOR_SENSE 0x20
+
+/*
+ * The scsi_device struct contains what we know about each given scsi
+ * device.
+ */
+
+typedef struct scsi_device {
+ struct scsi_device * next; /* Used for linked list */
+
+ unsigned char id, lun, channel;
+
+ unsigned int manufacturer; /* Manufacturer of device, for using
+ * vendor-specific cmd's */
+ int attached; /* # of high level drivers attached to
+ * this */
+ int access_count; /* Count of open channels/mounts */
+ struct wait_queue * device_wait;/* Used to wait if device is busy */
+ struct Scsi_Host * host;
+ void (*scsi_request_fn)(void); /* Used to jumpstart things after an
+ * ioctl */
+ void *hostdata; /* available to low-level driver */
+ char type;
+ char scsi_level;
+ char vendor[8], model[16], rev[4];
+ unsigned char current_tag; /* current tag */
+ unsigned char sync_min_period; /* Not less than this period */
+ unsigned char sync_max_offset; /* Not greater than this offset */
+
+ unsigned writeable:1;
+ unsigned removable:1;
+ unsigned random:1;
+ unsigned has_cmdblocks:1;
+ unsigned changed:1; /* Data invalid due to media change */
+ unsigned busy:1; /* Used to prevent races */
+ unsigned lockable:1; /* Able to prevent media removal */
+ unsigned borken:1; /* Tell the Seagate driver to be
+ * painfully slow on this device */
+ unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */
+ unsigned tagged_queue:1; /* SCSI-II tagged queuing enabled */
+ unsigned disconnect:1; /* can disconnect */
+ unsigned soft_reset:1; /* Uses soft reset option */
+ unsigned sync:1; /* Negotiate for sync transfers */
+ unsigned single_lun:1; /* Indicates we should only allow I/O to
+ one of the luns for the device at a time. */
+ unsigned was_reset:1; /* There was a bus reset on the bus for this
+ device */
+ unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN
+ because we did a bus reset. */
+} Scsi_Device;
+
+/*
+ * Use these to separate status msg and our bytes
+ */
+
+#define status_byte(result) (((result) >> 1) & 0xf)
+#define msg_byte(result) (((result) >> 8) & 0xff)
+#define host_byte(result) (((result) >> 16) & 0xff)
+#define driver_byte(result) (((result) >> 24) & 0xff)
+#define suggestion(result) (driver_byte(result) & SUGGEST_MASK)
+
+#define sense_class(sense) (((sense) >> 4) & 0x7)
+#define sense_error(sense) ((sense) & 0xf)
+#define sense_valid(sense) ((sense) & 0x80);
+
+/*
+ * These are the SCSI devices available on the system.
+ */
+
+extern Scsi_Device * scsi_devices;
+
+/*
+ * Initializes all SCSI devices. This scans all scsi busses.
+ */
+
+extern int scsi_dev_init (void);
+
+struct scatterlist {
+ char * address; /* Location data is to be transferred to */
+ char * alt_address; /* Location of actual if address is a
+ * dma indirect buffer. NULL otherwise */
+ unsigned int length;
+};
+
+#ifdef __alpha__
+# define ISA_DMA_THRESHOLD (~0UL)
+#else
+# define ISA_DMA_THRESHOLD (0x00ffffff)
+#endif
+#define CONTIGUOUS_BUFFERS(X,Y) ((X->b_data+X->b_size) == Y->b_data)
+
+
+/*
+ * These are the return codes for the abort and reset functions. The mid-level
+ * code uses these to decide what to do next. Each of the low level abort
+ * and reset functions must correctly indicate what it has done.
+ * The descriptions are written from the point of view of the mid-level code,
+ * so that the return code is telling the mid-level drivers exactly what
+ * the low level driver has already done, and what remains to be done.
+ */
+
+/* We did not do anything.
+ * Wait some more for this command to complete, and if this does not work,
+ * try something more serious. */
+#define SCSI_ABORT_SNOOZE 0
+
+/* This means that we were able to abort the command. We have already
+ * called the mid-level done function, and do not expect an interrupt that
+ * will lead to another call to the mid-level done function for this command */
+#define SCSI_ABORT_SUCCESS 1
+
+/* We called for an abort of this command, and we should get an interrupt
+ * when this succeeds. Thus we should not restore the timer for this
+ * command in the mid-level abort function. */
+#define SCSI_ABORT_PENDING 2
+
+/* Unable to abort - command is currently on the bus. Grin and bear it. */
+#define SCSI_ABORT_BUSY 3
+
+/* The command is not active in the low level code. Command probably
+ * finished. */
+#define SCSI_ABORT_NOT_RUNNING 4
+
+/* Something went wrong. The low level driver will indicate the correct
+ * error condition when it calls scsi_done, so the mid-level abort function
+ * can simply wait until this comes through */
+#define SCSI_ABORT_ERROR 5
+
+/* We do not know how to reset the bus, or we do not want to. Bummer.
+ * Anyway, just wait a little more for the command in question, and hope that
+ * it eventually finishes. If it never finishes, the SCSI device could
+ * hang, so use this with caution. */
+#define SCSI_RESET_SNOOZE 0
+
+/* We do not know how to reset the bus, or we do not want to. Bummer.
+ * We have given up on this ever completing. The mid-level code will
+ * request sense information to decide how to proceed from here. */
+#define SCSI_RESET_PUNT 1
+
+/* This means that we were able to reset the bus. We have restarted all of
+ * the commands that should be restarted, and we should be able to continue
+ * on normally from here. We do not expect any interrupts that will return
+ * DID_RESET to any of the other commands in the host_queue, and the mid-level
+ * code does not need to do anything special to keep the commands alive.
+ * If a hard reset was performed then all outstanding commands on the
+ * bus have been restarted. */
+#define SCSI_RESET_SUCCESS 2
+
+/* We called for a reset of this bus, and we should get an interrupt
+ * when this succeeds. Each command should get its own status
+ * passed up to scsi_done, but this has not happened yet.
+ * If a hard reset was performed, then we expect an interrupt
+ * for *each* of the outstanding commands that will have the
+ * effect of restarting the commands.
+ */
+#define SCSI_RESET_PENDING 3
+
+/* We did a reset, but do not expect an interrupt to signal DID_RESET.
+ * This tells the upper level code to request the sense info, and this
+ * should keep the command alive. */
+#define SCSI_RESET_WAKEUP 4
+
+/* Something went wrong, and we do not know how to fix it. */
+#define SCSI_RESET_ERROR 5
+
+/*
+ * This is a bitmask that is ored with one of the above codes.
+ * It tells the mid-level code that we did a hard reset.
+ */
+#define SCSI_RESET_BUS_RESET 0x100
+/*
+ * Used to mask off bits and to obtain the basic action that was
+ * performed.
+ */
+#define SCSI_RESET_ACTION 0xff
+
+void * scsi_malloc(unsigned int);
+int scsi_free(void *, unsigned int);
+extern unsigned int dma_free_sectors; /* How much room do we have left */
+extern unsigned int need_isa_buffer; /* True if some devices need indirection
+ * buffers */
+
+/*
+ * The Scsi_Cmnd structure is used by scsi.c internally, and for communication
+ * with low level drivers that support multiple outstanding commands.
+ */
+typedef struct scsi_pointer {
+ char * ptr; /* data pointer */
+ int this_residual; /* left in this buffer */
+ struct scatterlist *buffer; /* which buffer */
+ int buffers_residual; /* how many buffers left */
+
+ volatile int Status;
+ volatile int Message;
+ volatile int have_data_in;
+ volatile int sent_command;
+ volatile int phase;
+} Scsi_Pointer;
+
+typedef struct scsi_cmnd {
+ struct Scsi_Host * host;
+ Scsi_Device * device;
+ unsigned char target, lun, channel;
+ unsigned char cmd_len;
+ unsigned char old_cmd_len;
+ struct scsi_cmnd *next, *prev;
+
+ /* These elements define the operation we are about to perform */
+ unsigned char cmnd[12];
+ unsigned request_bufflen; /* Actual request size */
+
+ void * request_buffer; /* Actual requested buffer */
+
+ /* These elements define the operation we ultimately want to perform */
+ unsigned char data_cmnd[12];
+ unsigned short old_use_sg; /* We save use_sg here when requesting
+ * sense info */
+ unsigned short use_sg; /* Number of pieces of scatter-gather */
+ unsigned short sglist_len; /* size of malloc'd scatter-gather list */
+ unsigned short abort_reason;/* If the mid-level code requests an
+ * abort, this is the reason. */
+ unsigned bufflen; /* Size of data buffer */
+ void *buffer; /* Data buffer */
+
+ unsigned underflow; /* Return error if less than this amount is
+ * transfered */
+
+ unsigned transfersize; /* How much we are guaranteed to transfer with
+ * each SCSI transfer (ie, between disconnect /
+ * reconnects. Probably == sector size */
+
+
+ struct request request; /* A copy of the command we are working on */
+
+ unsigned char sense_buffer[16]; /* Sense for this command, if needed */
+
+
+ int retries;
+ int allowed;
+ int timeout_per_command, timeout_total, timeout;
+
+ /*
+ * We handle the timeout differently if it happens when a reset,
+ * abort, etc are in process.
+ */
+ unsigned volatile char internal_timeout;
+
+ unsigned flags;
+
+ /* These variables are for the cdrom only. Once we have variable size
+ * buffers in the buffer cache, they will go away. */
+ int this_count;
+ /* End of special cdrom variables */
+
+ /* Low-level done function - can be used by low-level driver to point
+ * to completion function. Not used by mid/upper level code. */
+ void (*scsi_done)(struct scsi_cmnd *);
+ void (*done)(struct scsi_cmnd *); /* Mid-level done function */
+
+ /*
+ * The following fields can be written to by the host specific code.
+ * Everything else should be left alone.
+ */
+
+ Scsi_Pointer SCp; /* Scratchpad used by some host adapters */
+
+ unsigned char * host_scribble; /* The host adapter is allowed to
+ * call scsi_malloc and get some memory
+ * and hang it here. The host adapter
+ * is also expected to call scsi_free
+ * to release this memory. (The memory
+ * obtained by scsi_malloc is guaranteed
+ * to be at an address < 16Mb). */
+
+ int result; /* Status code from lower level driver */
+
+ unsigned char tag; /* SCSI-II queued command tag */
+ unsigned long pid; /* Process ID, starts at 0 */
+} Scsi_Cmnd;
+
+/*
+ * scsi_abort aborts the current command that is executing on host host.
+ * The error code, if non zero is returned in the host byte, otherwise
+ * DID_ABORT is returned in the hostbyte.
+ */
+
+extern int scsi_abort (Scsi_Cmnd *, int code, int pid);
+
+extern void scsi_do_cmd (Scsi_Cmnd *, const void *cmnd ,
+ void *buffer, unsigned bufflen,
+ void (*done)(struct scsi_cmnd *),
+ int timeout, int retries);
+
+
+extern Scsi_Cmnd * allocate_device(struct request **, Scsi_Device *, int);
+
+extern Scsi_Cmnd * request_queueable(struct request *, Scsi_Device *);
+extern int scsi_reset (Scsi_Cmnd *, int);
+
+extern int max_scsi_hosts;
+
+extern void proc_print_scsidevice(Scsi_Device *, char *, int *, int);
+
+extern void print_command(unsigned char *);
+extern void print_sense(const char *, Scsi_Cmnd *);
+extern void print_driverbyte(int scsiresult);
+extern void print_hostbyte(int scsiresult);
+
+extern void scsi_mark_host_bus_reset(struct Scsi_Host *Host);
+
+#if defined(MAJOR_NR) && (MAJOR_NR != SCSI_TAPE_MAJOR)
+#include "hosts.h"
+
+static Scsi_Cmnd * end_scsi_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors)
+{
+ struct request * req;
+ struct buffer_head * bh;
+
+ req = &SCpnt->request;
+ if (!uptodate) {
+ printk(DEVICE_NAME " I/O error: dev %s, sector %lu\n",
+ kdevname(req->rq_dev), req->sector);
+#ifdef MACH
+ req->errors = 1;
+ while (req->bh) {
+ bh = req->bh;
+ req->bh = bh->b_reqnext;
+ mark_buffer_uptodate(bh, 0);
+ unlock_buffer(bh);
+ }
+ goto done;
+#endif
+ }
+
+ do {
+ if ((bh = req->bh) != NULL) {
+ req->bh = bh->b_reqnext;
+ req->nr_sectors -= bh->b_size >> 9;
+ req->sector += bh->b_size >> 9;
+ bh->b_reqnext = NULL;
+ mark_buffer_uptodate(bh, uptodate);
+ unlock_buffer(bh);
+ sectors -= bh->b_size >> 9;
+ if ((bh = req->bh) != NULL) {
+ req->current_nr_sectors = bh->b_size >> 9;
+ if (req->nr_sectors < req->current_nr_sectors) {
+ req->nr_sectors = req->current_nr_sectors;
+ printk("end_scsi_request: buffer-list destroyed\n");
+ }
+ }
+ }
+ } while(sectors && bh);
+ if (req->bh){
+ req->buffer = bh->b_data;
+ return SCpnt;
+ };
+#ifdef MACH
+ req->errors = 0;
+
+done:
+#endif
+ DEVICE_OFF(req->rq_dev);
+ if (req->sem != NULL) {
+ up(req->sem);
+ }
+
+ if (SCpnt->host->block) {
+ struct Scsi_Host * next;
+
+ for (next = SCpnt->host->block; next != SCpnt->host;
+ next = next->block)
+ wake_up(&next->host_wait);
+ }
+
+ req->rq_status = RQ_INACTIVE;
+#ifndef MACH
+ wake_up(&wait_for_request);
+#endif
+ wake_up(&SCpnt->device->device_wait);
+#ifdef MACH
+ {
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ (*blk_dev[MAJOR(req->rq_dev)].request_fn)();
+ restore_flags(flags);
+ }
+#endif
+ return NULL;
+}
+
+
+/* This is just like INIT_REQUEST, but we need to be aware of the fact
+ * that an interrupt may start another request, so we run this with interrupts
+ * turned off
+ */
+#define INIT_SCSI_REQUEST \
+ if (!CURRENT) { \
+ CLEAR_INTR; \
+ restore_flags(flags); \
+ return; \
+ } \
+ if (MAJOR(CURRENT->rq_dev) != MAJOR_NR) \
+ panic(DEVICE_NAME ": request list destroyed");\
+ if (CURRENT->bh) { \
+ if (!buffer_locked(CURRENT->bh)) \
+ panic(DEVICE_NAME ": block not locked"); \
+ }
+#endif
+
+#ifdef MACH
+#define SCSI_SLEEP(QUEUE, CONDITION) { \
+ if (CONDITION) { \
+ struct wait_queue wait = { NULL, NULL}; \
+ add_wait_queue(QUEUE, &wait); \
+ for(;;) { \
+ if (CONDITION) { \
+ if (intr_count) \
+ panic("scsi: trying to call schedule() in interrupt" \
+ ", file %s, line %d.\n", __FILE__, __LINE__); \
+ schedule(); \
+ } \
+ else \
+ break; \
+ } \
+ remove_wait_queue(QUEUE, &wait);\
+ }; }
+#else /* ! MACH */
+#define SCSI_SLEEP(QUEUE, CONDITION) { \
+ if (CONDITION) { \
+ struct wait_queue wait = { current, NULL}; \
+ add_wait_queue(QUEUE, &wait); \
+ for(;;) { \
+ current->state = TASK_UNINTERRUPTIBLE; \
+ if (CONDITION) { \
+ if (intr_count) \
+ panic("scsi: trying to call schedule() in interrupt" \
+ ", file %s, line %d.\n", __FILE__, __LINE__); \
+ schedule(); \
+ } \
+ else \
+ break; \
+ } \
+ remove_wait_queue(QUEUE, &wait);\
+ current->state = TASK_RUNNING; \
+ }; }
+#endif /* ! MACH */
+
+#endif
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/scsi_debug.c b/i386/i386at/gpl/linux/scsi/scsi_debug.c
new file mode 100644
index 00000000..e98f53e2
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/scsi_debug.c
@@ -0,0 +1,710 @@
+/* $Id: scsi_debug.c,v 1.1.1.1 1997/02/25 21:27:51 thomas Exp $
+ * linux/kernel/scsi_debug.c
+ *
+ * Copyright (C) 1992 Eric Youngdale
+ * Simulate a host adapter with 2 disks attached. Do a lot of checking
+ * to make sure that we are not getting blocks mixed up, and panic if
+ * anything out of the ordinary is seen.
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/head.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/genhd.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+
+#include "sd.h"
+
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_scsi_debug = {
+ PROC_SCSI_SCSI_DEBUG, 10, "scsi_debug",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+
+/* A few options that we want selected */
+
+/* Do not attempt to use a timer to simulate a real disk with latency */
+/* Only use this in the actual kernel, not in the simulator. */
+#define IMMEDIATE
+
+/* Skip some consistency checking. Good for benchmarking */
+#define SPEEDY
+
+/* Number of real scsi disks that will be detected ahead of time */
+static int NR_REAL=-1;
+
+#define NR_BLK_DEV 12
+#ifndef MAJOR_NR
+#define MAJOR_NR 8
+#endif
+#define START_PARTITION 4
+#define SCSI_DEBUG_TIMER 20
+/* Number of jiffies to wait before completing a command */
+#define DISK_SPEED 10
+#define CAPACITY (0x80000)
+
+static int starts[] = {4, 1000, 50000, CAPACITY, 0};
+static int npart = 0;
+
+#include "scsi_debug.h"
+#ifdef DEBUG
+#define DEB(x) x
+#else
+#define DEB(x)
+#endif
+
+#ifdef SPEEDY
+#define VERIFY1_DEBUG(RW) 1
+#define VERIFY_DEBUG(RW) 1
+#else
+
+#define VERIFY1_DEBUG(RW) \
+ if (bufflen != 1024) {printk("%d", bufflen); panic("(1)Bad bufflen");}; \
+ start = 0; \
+ if ((MINOR(SCpnt->request.rq_dev) & 0xf) != 0) start = starts[(MINOR(SCpnt->request.rq_dev) & 0xf) - 1]; \
+ if (bh){ \
+ if (bh->b_size != 1024) panic ("Wrong bh size"); \
+ if ((bh->b_blocknr << 1) + start != block) \
+ { printk("Wrong bh block# %d %d ",bh->b_blocknr, block); \
+ panic ("Wrong bh block#"); \
+ }; \
+ if (bh->b_dev != SCpnt->request.rq_dev) \
+ panic ("Bad bh target"); \
+ };
+
+#define VERIFY_DEBUG(RW) \
+ if (bufflen != 1024 && (!SCpnt->use_sg)) {printk("%x %d\n ",bufflen, SCpnt->use_sg); panic("Bad bufflen");}; \
+ start = 0; \
+ if ((MINOR(SCpnt->request.rq_dev) & 0xf) > npart) panic ("Bad partition"); \
+ if ((MINOR(SCpnt->request.rq_dev) & 0xf) != 0) start = starts[(MINOR(SCpnt->request.rq_dev) & 0xf) - 1]; \
+ if (SCpnt->request.cmd != RW) panic ("Wrong operation"); \
+ if (SCpnt->request.sector + start != block) panic("Wrong block."); \
+ if (SCpnt->request.current_nr_sectors != 2 && (!SCpnt->use_sg)) panic ("Wrong # blocks"); \
+ if (SCpnt->request.bh){ \
+ if (SCpnt->request.bh->b_size != 1024) panic ("Wrong bh size"); \
+ if ((SCpnt->request.bh->b_blocknr << 1) + start != block) \
+ { printk("Wrong bh block# %d %d ",SCpnt->request.bh->b_blocknr, block); \
+ panic ("Wrong bh block#"); \
+ }; \
+ if (SCpnt->request.bh->b_dev != SCpnt->request.rq_dev) \
+ panic ("Bad bh target");\
+ };
+#endif
+
+static volatile void (*do_done[SCSI_DEBUG_MAILBOXES])(Scsi_Cmnd *) = {NULL, };
+extern void scsi_debug_interrupt();
+
+volatile Scsi_Cmnd * SCint[SCSI_DEBUG_MAILBOXES] = {NULL,};
+static char SCrst[SCSI_DEBUG_MAILBOXES] = {0,};
+static volatile unsigned int timeout[8] ={0,};
+
+/*
+ * Semaphore used to simulate bus lockups.
+ */
+static int scsi_debug_lockup = 0;
+
+static char sense_buffer[128] = {0,};
+
+static void scsi_dump(Scsi_Cmnd * SCpnt, int flag){
+ int i;
+#if 0
+ unsigned char * pnt;
+#endif
+ unsigned int * lpnt;
+ struct scatterlist * sgpnt = NULL;
+ printk("use_sg: %d",SCpnt->use_sg);
+ if (SCpnt->use_sg){
+ sgpnt = (struct scatterlist *) SCpnt->buffer;
+ for(i=0; i<SCpnt->use_sg; i++) {
+ lpnt = (int *) sgpnt[i].alt_address;
+ printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
+ if (lpnt) printk(" (Alt %x) ",lpnt[15]);
+ };
+ } else {
+ printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
+ SCpnt->bufflen);
+ lpnt = (int *) SCpnt->request.buffer;
+ if (lpnt) printk(" (Alt %x) ",lpnt[15]);
+ };
+ lpnt = (unsigned int *) SCpnt;
+ for (i=0;i<sizeof(Scsi_Cmnd)/4+1; i++) {
+ if ((i & 7) == 0) printk("\n");
+ printk("%x ",*lpnt++);
+ };
+ printk("\n");
+ if (flag == 0) return;
+ lpnt = (unsigned int *) sgpnt[0].alt_address;
+ for (i=0;i<sizeof(Scsi_Cmnd)/4+1; i++) {
+ if ((i & 7) == 0) printk("\n");
+ printk("%x ",*lpnt++);
+ };
+#if 0
+ printk("\n");
+ lpnt = (unsigned int *) sgpnt[0].address;
+ for (i=0;i<sizeof(Scsi_Cmnd)/4+1; i++) {
+ if ((i & 7) == 0) printk("\n");
+ printk("%x ",*lpnt++);
+ };
+ printk("\n");
+#endif
+ printk("DMA free %d sectors.\n", dma_free_sectors);
+}
+
+int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ unchar *cmd = (unchar *) SCpnt->cmnd;
+ struct partition * p;
+ int block, start;
+ struct buffer_head * bh = NULL;
+ unsigned char * buff;
+ int nbytes, sgcount;
+ int scsi_debug_errsts;
+ struct scatterlist * sgpnt;
+ int target = SCpnt->target;
+ int bufflen = SCpnt->request_bufflen;
+ unsigned long flags;
+ int i;
+ sgcount = 0;
+ sgpnt = NULL;
+
+ DEB(if (target > 1) { SCpnt->result = DID_TIME_OUT << 16;done(SCpnt);return 0;});
+
+ buff = (unsigned char *) SCpnt->request_buffer;
+
+ if(target>=1 || SCpnt->lun != 0) {
+ SCpnt->result = DID_NO_CONNECT << 16;
+ done(SCpnt);
+ return 0;
+ };
+
+ if( SCrst[target] != 0 && !scsi_debug_lockup )
+ {
+ SCrst[target] = 0;
+ memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
+ SCpnt->sense_buffer[0] = 0x70;
+ SCpnt->sense_buffer[2] = UNIT_ATTENTION;
+ SCpnt->result = (CHECK_CONDITION << 1);
+ done(SCpnt);
+ }
+ switch(*cmd){
+ case REQUEST_SENSE:
+ printk("Request sense...\n");
+#ifndef DEBUG
+ {
+ int i;
+ printk("scsi_debug: Requesting sense buffer (%x %x %x %d):", SCpnt, buff, done, bufflen);
+ for(i=0;i<12;i++) printk("%d ",sense_buffer[i]);
+ printk("\n");
+ };
+#endif
+ memset(buff, 0, bufflen);
+ memcpy(buff, sense_buffer, bufflen);
+ memset(sense_buffer, 0, sizeof(sense_buffer));
+ SCpnt->result = 0;
+ done(SCpnt);
+ return 0;
+ case ALLOW_MEDIUM_REMOVAL:
+ if(cmd[4]) printk("Medium removal inhibited...");
+ else printk("Medium removal enabled...");
+ scsi_debug_errsts = 0;
+ break;
+ case INQUIRY:
+ printk("Inquiry...(%x %d)\n", buff, bufflen);
+ memset(buff, 0, bufflen);
+ buff[0] = TYPE_DISK;
+ buff[1] = 0x80; /* Removable disk */
+ buff[2] = 1;
+ buff[4] = 33 - 5;
+ memcpy(&buff[8],"Foo Inc",7);
+ memcpy(&buff[16],"XYZZY",5);
+ memcpy(&buff[32],"1",1);
+ scsi_debug_errsts = 0;
+ break;
+ case TEST_UNIT_READY:
+ printk("Test unit ready(%x %d)\n", buff, bufflen);
+ if (buff)
+ memset(buff, 0, bufflen);
+ scsi_debug_errsts = 0;
+ break;
+ case READ_CAPACITY:
+ printk("Read Capacity\n");
+ if(NR_REAL < 0) NR_REAL = (MINOR(SCpnt->request.rq_dev) >> 4) & 0x0f;
+ memset(buff, 0, bufflen);
+ buff[0] = (CAPACITY >> 24);
+ buff[1] = (CAPACITY >> 16) & 0xff;
+ buff[2] = (CAPACITY >> 8) & 0xff;
+ buff[3] = CAPACITY & 0xff;
+ buff[6] = 2; /* 512 byte sectors */
+ scsi_debug_errsts = 0;
+ break;
+ case READ_10:
+ case READ_6:
+#ifdef DEBUG
+ printk("Read...");
+#endif
+ if ((*cmd) == READ_10)
+ block = cmd[5] + (cmd[4] << 8) + (cmd[3] << 16) + (cmd[2] << 24);
+ else
+ block = cmd[3] + (cmd[2] << 8) + ((cmd[1] & 0x1f) << 16);
+ VERIFY_DEBUG(READ);
+#if defined(SCSI_SETUP_LATENCY) || defined(SCSI_DATARATE)
+ {
+ int delay = SCSI_SETUP_LATENCY;
+ double usec;
+
+ usec = 0.0;
+ usec = (SCpnt->request.nr_sectors << 9) * 1.0e6 / SCSI_DATARATE;
+ delay += usec;
+ if(delay) usleep(delay);
+ };
+#endif
+
+#ifdef DEBUG
+ printk("(r%d)",SCpnt->request.nr_sectors);
+#endif
+ nbytes = bufflen;
+ if(SCpnt->use_sg){
+ sgcount = 0;
+ sgpnt = (struct scatterlist *) buff;
+ buff = sgpnt[sgcount].address;
+ bufflen = sgpnt[sgcount].length;
+ bh = SCpnt->request.bh;
+ };
+ scsi_debug_errsts = 0;
+ do{
+ VERIFY1_DEBUG(READ);
+ /* For the speedy test, we do not even want to fill the buffer with anything */
+#ifndef SPEEDY
+ memset(buff, 0, bufflen);
+#endif
+ /* If this is block 0, then we want to read the partition table for this
+ * device. Let's make one up */
+ if(block == 0 && target == 0) {
+ memset(buff, 0, bufflen);
+ *((unsigned short *) (buff+510)) = 0xAA55;
+ p = (struct partition* ) (buff + 0x1be);
+ npart = 0;
+ while(starts[npart+1]){
+ p->start_sect = starts[npart];
+ p->nr_sects = starts[npart+1] - starts [npart];
+ p->sys_ind = 0x81; /* Linux partition */
+ p++;
+ npart++;
+ };
+ scsi_debug_errsts = 0;
+ break;
+ };
+#ifdef DEBUG
+ if (SCpnt->use_sg) printk("Block %x (%d %d)\n",block, SCpnt->request.nr_sectors,
+ SCpnt->request.current_nr_sectors);
+#endif
+
+#if 0
+ /* Simulate a disk change */
+ if(block == 0xfff0) {
+ sense_buffer[0] = 0x70;
+ sense_buffer[2] = UNIT_ATTENTION;
+ starts[0] += 10;
+ starts[1] += 10;
+ starts[2] += 10;
+
+#ifdef DEBUG
+ {
+ int i;
+ printk("scsi_debug: Filling sense buffer:");
+ for(i=0;i<12;i++) printk("%d ",sense_buffer[i]);
+ printk("\n");
+ };
+#endif
+ scsi_debug_errsts = (COMMAND_COMPLETE << 8) | (CHECK_CONDITION << 1);
+ break;
+ } /* End phony disk change code */
+#endif
+
+#ifndef SPEEDY
+ memcpy(buff, &target, sizeof(target));
+ memcpy(buff+sizeof(target), cmd, 24);
+ memcpy(buff+60, &block, sizeof(block));
+ memcpy(buff+64, SCpnt, sizeof(Scsi_Cmnd));
+#endif
+ nbytes -= bufflen;
+ if(SCpnt->use_sg){
+#ifndef SPEEDY
+ memcpy(buff+128, bh, sizeof(struct buffer_head));
+#endif
+ block += bufflen >> 9;
+ bh = bh->b_reqnext;
+ sgcount++;
+ if (nbytes) {
+ if(!bh) panic("Too few blocks for linked request.");
+ buff = sgpnt[sgcount].address;
+ bufflen = sgpnt[sgcount].length;
+ };
+ }
+ } while(nbytes);
+
+ SCpnt->result = 0;
+ (done)(SCpnt);
+ return;
+
+ if (SCpnt->use_sg && !scsi_debug_errsts)
+ if(bh) scsi_dump(SCpnt, 0);
+ break;
+ case WRITE_10:
+ case WRITE_6:
+#ifdef DEBUG
+ printk("Write\n");
+#endif
+ if ((*cmd) == WRITE_10)
+ block = cmd[5] + (cmd[4] << 8) + (cmd[3] << 16) + (cmd[2] << 24);
+ else
+ block = cmd[3] + (cmd[2] << 8) + ((cmd[1] & 0x1f) << 16);
+ VERIFY_DEBUG(WRITE);
+ /* printk("(w%d)",SCpnt->request.nr_sectors); */
+ if (SCpnt->use_sg){
+ if ((bufflen >> 9) != SCpnt->request.nr_sectors)
+ panic ("Trying to write wrong number of blocks\n");
+ sgpnt = (struct scatterlist *) buff;
+ buff = sgpnt[sgcount].address;
+ };
+#if 0
+ if (block != *((unsigned long *) (buff+60))) {
+ printk("%x %x :",block, *((unsigned long *) (buff+60)));
+ scsi_dump(SCpnt,1);
+ panic("Bad block written.\n");
+ };
+#endif
+ scsi_debug_errsts = 0;
+ break;
+ default:
+ printk("Unknown command %d\n",*cmd);
+ SCpnt->result = DID_NO_CONNECT << 16;
+ done(SCpnt);
+ return 0;
+ };
+
+ save_flags(flags);
+ cli();
+ for(i=0;i<SCSI_DEBUG_MAILBOXES; i++){
+ if (SCint[i] == 0) break;
+ };
+
+ if (i >= SCSI_DEBUG_MAILBOXES || SCint[i] != 0)
+ panic("Unable to find empty SCSI_DEBUG command slot.\n");
+
+ SCint[i] = SCpnt;
+
+ if (done) {
+ DEB(printk("scsi_debug_queuecommand: now waiting for interrupt "););
+ if (do_done[i])
+ printk("scsi_debug_queuecommand: Two concurrent queuecommand?\n");
+ else
+ do_done[i] = done;
+ }
+ else
+ printk("scsi_debug_queuecommand: done cant be NULL\n");
+
+#ifdef IMMEDIATE
+ if( !scsi_debug_lockup )
+ {
+ SCpnt->result = scsi_debug_errsts;
+ scsi_debug_intr_handle(); /* No timer - do this one right away */
+ }
+ restore_flags(flags);
+#else
+ timeout[i] = jiffies+DISK_SPEED;
+
+ /* If no timers active, then set this one */
+ if ((timer_active & (1 << SCSI_DEBUG_TIMER)) == 0) {
+ timer_table[SCSI_DEBUG_TIMER].expires = timeout[i];
+ timer_active |= 1 << SCSI_DEBUG_TIMER;
+ };
+
+ SCpnt->result = scsi_debug_errsts;
+ restore_flags(flags);
+
+#if 0
+ printk("Sending command (%d %x %d %d)...", i, done, timeout[i],jiffies);
+#endif
+#endif
+
+ return 0;
+}
+
+volatile static int internal_done_flag = 0;
+volatile static int internal_done_errcode = 0;
+static void internal_done(Scsi_Cmnd * SCpnt)
+{
+ internal_done_errcode = SCpnt->result;
+ ++internal_done_flag;
+}
+
+int scsi_debug_command(Scsi_Cmnd * SCpnt)
+{
+ DEB(printk("scsi_debug_command: ..calling scsi_debug_queuecommand\n"));
+ scsi_debug_queuecommand(SCpnt, internal_done);
+
+ while (!internal_done_flag);
+ internal_done_flag = 0;
+ return internal_done_errcode;
+}
+
+/* A "high" level interrupt handler. This should be called once per jiffy
+ * to simulate a regular scsi disk. We use a timer to do this. */
+
+static void scsi_debug_intr_handle(void)
+{
+ Scsi_Cmnd * SCtmp;
+ int i, pending;
+ void (*my_done)(Scsi_Cmnd *);
+ unsigned long flags;
+ int to;
+
+#ifndef IMMEDIATE
+ timer_table[SCSI_DEBUG_TIMER].expires = 0;
+ timer_active &= ~(1 << SCSI_DEBUG_TIMER);
+#endif
+
+ repeat:
+ save_flags(flags);
+ cli();
+ for(i=0;i<SCSI_DEBUG_MAILBOXES; i++) {
+ if (SCint[i] == 0) continue;
+#ifndef IMMEDIATE
+ if (timeout[i] == 0) continue;
+ if (timeout[i] <= jiffies) break;
+#else
+ break;
+#endif
+ };
+
+ if(i == SCSI_DEBUG_MAILBOXES){
+#ifndef IMMEDIATE
+ pending = INT_MAX;
+ for(i=0;i<SCSI_DEBUG_MAILBOXES; i++) {
+ if (SCint[i] == 0) continue;
+ if (timeout[i] == 0) continue;
+ if (timeout[i] <= jiffies) {restore_flags(flags); goto repeat;};
+ if (timeout[i] > jiffies) {
+ if (pending > timeout[i]) pending = timeout[i];
+ continue;
+ };
+ };
+ if (pending && pending != INT_MAX) {
+ timer_table[SCSI_DEBUG_TIMER].expires =
+ (pending <= jiffies ? jiffies+1 : pending);
+ timer_active |= 1 << SCSI_DEBUG_TIMER;
+ };
+ restore_flags(flags);
+#endif
+ return;
+ };
+
+ if(i < SCSI_DEBUG_MAILBOXES){
+ timeout[i] = 0;
+ my_done = do_done[i];
+ do_done[i] = NULL;
+ to = timeout[i];
+ timeout[i] = 0;
+ SCtmp = (Scsi_Cmnd *) SCint[i];
+ SCint[i] = NULL;
+ restore_flags(flags);
+
+ if (!my_done) {
+ printk("scsi_debug_intr_handle: Unexpected interrupt\n");
+ return;
+ }
+
+#ifdef DEBUG
+ printk("In intr_handle...");
+ printk("...done %d %x %d %d\n",i , my_done, to, jiffies);
+ printk("In intr_handle: %d %x %x\n",i, SCtmp, my_done);
+#endif
+
+ my_done(SCtmp);
+#ifdef DEBUG
+ printk("Called done.\n");
+#endif
+ };
+ goto repeat;
+}
+
+
+int scsi_debug_detect(Scsi_Host_Template * tpnt)
+{
+ tpnt->proc_dir = &proc_scsi_scsi_debug;
+#ifndef IMMEDIATE
+ timer_table[SCSI_DEBUG_TIMER].fn = scsi_debug_intr_handle;
+ timer_table[SCSI_DEBUG_TIMER].expires = 0;
+#endif
+ return 1;
+}
+
+int scsi_debug_abort(Scsi_Cmnd * SCpnt)
+{
+ int j;
+ void (*my_done)(Scsi_Cmnd *);
+ unsigned long flags;
+
+ DEB(printk("scsi_debug_abort\n"));
+#if 0
+ SCpnt->result = SCpnt->abort_reason << 16;
+ for(j=0;j<SCSI_DEBUG_MAILBOXES; j++) {
+ if(SCpnt == SCint[j]) {
+ my_done = do_done[j];
+ my_done(SCpnt);
+ save_flags(flags);
+ cli();
+ timeout[j] = 0;
+ SCint[j] = NULL;
+ do_done[j] = NULL;
+ restore_flags(flags);
+ };
+ };
+#endif
+ return SCSI_ABORT_SNOOZE;
+}
+
+int scsi_debug_biosparam(Disk * disk, kdev_t dev, int* info){
+ int size = disk->capacity;
+ info[0] = 32;
+ info[1] = 64;
+ info[2] = (size + 2047) >> 11;
+ if (info[2] >= 1024) info[2] = 1024;
+ return 0;
+}
+
+int scsi_debug_reset(Scsi_Cmnd * SCpnt)
+{
+ int i;
+ unsigned long flags;
+
+ void (*my_done)(Scsi_Cmnd *);
+ printk("Bus unlocked by reset(%d)\n", SCpnt->host->suggest_bus_reset);
+ scsi_debug_lockup = 0;
+ DEB(printk("scsi_debug_reset called\n"));
+ for(i=0;i<SCSI_DEBUG_MAILBOXES; i++) {
+ if (SCint[i] == NULL) continue;
+ SCint[i]->result = DID_RESET << 16;
+ my_done = do_done[i];
+ my_done(SCint[i]);
+ save_flags(flags);
+ cli();
+ SCint[i] = NULL;
+ do_done[i] = NULL;
+ timeout[i] = 0;
+ restore_flags(flags);
+ }
+ return SCSI_RESET_SUCCESS;
+}
+
+const char *scsi_debug_info(void)
+{
+ static char buffer[] = " "; /* looks nicer without anything here */
+ return buffer;
+}
+
+/* scsi_debug_proc_info
+ * Used if the driver currently has no own support for /proc/scsi
+ */
+int scsi_debug_proc_info(char *buffer, char **start, off_t offset,
+ int length, int inode, int inout)
+{
+ int len, pos, begin;
+ int orig_length;
+
+ if(inout == 1)
+ {
+ /* First check for the Signature */
+ if (length >= 10 && strncmp(buffer, "scsi_debug", 10) == 0) {
+ buffer += 11;
+ length -= 11;
+ /*
+ * OK, we are getting some kind of command. Figure out
+ * what we are supposed to do here. Simulate bus lockups
+ * to test our reset capability.
+ */
+ if( length == 6 && strncmp(buffer, "lockup", length) == 0 )
+ {
+ scsi_debug_lockup = 1;
+ return length;
+ }
+
+ if( length == 6 && strncmp(buffer, "unlock", length) == 0 )
+ {
+ scsi_debug_lockup = 0;
+ return length;
+ }
+
+ printk("Unknown command:%s\n", buffer);
+ } else
+ printk("Wrong Signature:%10s\n", (char *) ((ulong)buffer-11));
+
+ return -EINVAL;
+
+ }
+
+ begin = 0;
+ pos = len = sprintf(buffer,
+ "This driver is not a real scsi driver, but it plays one on TV.\n"
+ "It is very handy for debugging specific problems because you\n"
+ "can simulate a variety of error conditions\n");
+ if(pos < offset)
+ {
+ len = 0;
+ begin = pos;
+ }
+
+ *start = buffer + (offset - begin); /* Start of wanted data */
+ len -= (offset - begin);
+ if(len > length)
+ len = length;
+
+ return(len);
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = SCSI_DEBUG;
+
+#include "scsi_module.c"
+#endif
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/scsi_debug.h b/i386/i386at/gpl/linux/scsi/scsi_debug.h
new file mode 100644
index 00000000..87ae155f
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/scsi_debug.h
@@ -0,0 +1,30 @@
+#ifndef _SCSI_DEBUG_H
+
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+
+int scsi_debug_detect(Scsi_Host_Template *);
+int scsi_debug_command(Scsi_Cmnd *);
+int scsi_debug_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int scsi_debug_abort(Scsi_Cmnd *);
+int scsi_debug_biosparam(Disk *, kdev_t, int[]);
+int scsi_debug_reset(Scsi_Cmnd *);
+int scsi_debug_proc_info(char *, char **, off_t, int, int, int);
+
+#ifndef NULL
+ #define NULL 0
+#endif
+
+
+#define SCSI_DEBUG_MAILBOXES 8
+
+#define SCSI_DEBUG {NULL, NULL, NULL, scsi_debug_proc_info, \
+ "SCSI DEBUG", scsi_debug_detect, NULL, \
+ NULL, scsi_debug_command, \
+ scsi_debug_queuecommand, \
+ scsi_debug_abort, \
+ scsi_debug_reset, \
+ NULL, \
+ scsi_debug_biosparam, \
+ SCSI_DEBUG_MAILBOXES, 7, SG_ALL, 1, 0, 1, ENABLE_CLUSTERING}
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/scsi_ioctl.c b/i386/i386at/gpl/linux/scsi/scsi_ioctl.c
new file mode 100644
index 00000000..11d57bae
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/scsi_ioctl.c
@@ -0,0 +1,397 @@
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <asm/io.h>
+#include <asm/segment.h>
+#include <asm/system.h>
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "scsi_ioctl.h"
+
+#define MAX_RETRIES 5
+#define MAX_TIMEOUT 900
+#define MAX_BUF 4096
+
+#define max(a,b) (((a) > (b)) ? (a) : (b))
+
+/*
+ * If we are told to probe a host, we will return 0 if the host is not
+ * present, 1 if the host is present, and will return an identifying
+ * string at *arg, if arg is non null, filling to the length stored at
+ * (int *) arg
+ */
+
+static int ioctl_probe(struct Scsi_Host * host, void *buffer)
+{
+ int temp, result;
+ unsigned int len,slen;
+ const char * string;
+
+ if ((temp = host->hostt->present) && buffer) {
+ result = verify_area(VERIFY_READ, buffer, sizeof(long));
+ if (result) return result;
+
+ len = get_user ((unsigned int *) buffer);
+ if(host->hostt->info)
+ string = host->hostt->info(host);
+ else
+ string = host->hostt->name;
+ if(string) {
+ slen = strlen(string);
+ if (len > slen)
+ len = slen + 1;
+ result = verify_area(VERIFY_WRITE, buffer, len);
+ if (result) return result;
+
+ memcpy_tofs (buffer, string, len);
+ }
+ }
+ return temp;
+}
+
+/*
+ *
+ * The SCSI_IOCTL_SEND_COMMAND ioctl sends a command out to the SCSI host.
+ * The MAX_TIMEOUT and MAX_RETRIES variables are used.
+ *
+ * dev is the SCSI device struct ptr, *(int *) arg is the length of the
+ * input data, if any, not including the command string & counts,
+ * *((int *)arg + 1) is the output buffer size in bytes.
+ *
+ * *(char *) ((int *) arg)[2] the actual command byte.
+ *
+ * Note that no more than MAX_BUF data bytes will be transfered. Since
+ * SCSI block device size is 512 bytes, I figured 1K was good.
+ * but (WDE) changed it to 8192 to handle large bad track buffers.
+ * ERY: I changed this to a dynamic allocation using scsi_malloc - we were
+ * getting a kernel stack overflow which was crashing the system when we
+ * were using 8192 bytes.
+ *
+ * This size *does not* include the initial lengths that were passed.
+ *
+ * The SCSI command is read from the memory location immediately after the
+ * length words, and the input data is right after the command. The SCSI
+ * routines know the command size based on the opcode decode.
+ *
+ * The output area is then filled in starting from the command byte.
+ */
+
+static void scsi_ioctl_done (Scsi_Cmnd * SCpnt)
+{
+ struct request * req;
+
+ req = &SCpnt->request;
+ req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
+
+ if (req->sem != NULL) {
+ up(req->sem);
+ }
+}
+
+static int ioctl_internal_command(Scsi_Device *dev, char * cmd)
+{
+ int result;
+ Scsi_Cmnd * SCpnt;
+
+ SCpnt = allocate_device(NULL, dev, 1);
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd(SCpnt, cmd, NULL, 0,
+ scsi_ioctl_done, MAX_TIMEOUT,
+ MAX_RETRIES);
+ down(&sem);
+ }
+
+ if(driver_byte(SCpnt->result) != 0)
+ switch(SCpnt->sense_buffer[2] & 0xf) {
+ case ILLEGAL_REQUEST:
+ if(cmd[0] == ALLOW_MEDIUM_REMOVAL) dev->lockable = 0;
+ else printk("SCSI device (ioctl) reports ILLEGAL REQUEST.\n");
+ break;
+ case NOT_READY: /* This happens if there is no disc in drive */
+ if(dev->removable){
+ printk(KERN_INFO "Device not ready. Make sure there is a disc in the drive.\n");
+ break;
+ };
+ case UNIT_ATTENTION:
+ if (dev->removable){
+ dev->changed = 1;
+ SCpnt->result = 0; /* This is no longer considered an error */
+ printk(KERN_INFO "Disc change detected.\n");
+ break;
+ };
+ default: /* Fall through for non-removable media */
+ printk("SCSI error: host %d id %d lun %d return code = %x\n",
+ dev->host->host_no,
+ dev->id,
+ dev->lun,
+ SCpnt->result);
+ printk("\tSense class %x, sense error %x, extended sense %x\n",
+ sense_class(SCpnt->sense_buffer[0]),
+ sense_error(SCpnt->sense_buffer[0]),
+ SCpnt->sense_buffer[2] & 0xf);
+
+ };
+
+ result = SCpnt->result;
+ SCpnt->request.rq_status = RQ_INACTIVE;
+ wake_up(&SCpnt->device->device_wait);
+ return result;
+}
+
+/*
+ * This interface is depreciated - users should use the scsi generics
+ * interface instead, as this is a more flexible approach to performing
+ * generic SCSI commands on a device.
+ */
+static int ioctl_command(Scsi_Device *dev, void *buffer)
+{
+ char * buf;
+ char cmd[12];
+ char * cmd_in;
+ Scsi_Cmnd * SCpnt;
+ unsigned char opcode;
+ int inlen, outlen, cmdlen;
+ int needed, buf_needed;
+ int result;
+
+ if (!buffer)
+ return -EINVAL;
+
+
+ /*
+ * Verify that we can read at least this much.
+ */
+ result = verify_area(VERIFY_READ, buffer, 2*sizeof(long) + 1);
+ if (result) return result;
+
+ /*
+ * The structure that we are passed should look like:
+ *
+ * struct sdata{
+ * int inlen;
+ * int outlen;
+ * char cmd[]; # However many bytes are used for cmd.
+ * char data[];
+ */
+ inlen = get_user((unsigned int *) buffer);
+ outlen = get_user( ((unsigned int *) buffer) + 1);
+
+ /*
+ * We do not transfer more than MAX_BUF with this interface.
+ * If the user needs to transfer more data than this, they
+ * should use scsi_generics instead.
+ */
+ if( inlen > MAX_BUF ) inlen = MAX_BUF;
+ if( outlen > MAX_BUF ) outlen = MAX_BUF;
+
+ cmd_in = (char *) ( ((int *)buffer) + 2);
+ opcode = get_user(cmd_in);
+
+ needed = buf_needed = (inlen > outlen ? inlen : outlen);
+ if(buf_needed){
+ buf_needed = (buf_needed + 511) & ~511;
+ if (buf_needed > MAX_BUF) buf_needed = MAX_BUF;
+ buf = (char *) scsi_malloc(buf_needed);
+ if (!buf) return -ENOMEM;
+ memset(buf, 0, buf_needed);
+ } else
+ buf = NULL;
+
+ /*
+ * Obtain the command from the user's address space.
+ */
+ cmdlen = COMMAND_SIZE(opcode);
+
+ result = verify_area(VERIFY_READ, cmd_in,
+ cmdlen + inlen > MAX_BUF ? MAX_BUF : inlen);
+ if (result) return result;
+
+ memcpy_fromfs ((void *) cmd, cmd_in, cmdlen);
+
+ /*
+ * Obtain the data to be sent to the device (if any).
+ */
+ memcpy_fromfs ((void *) buf,
+ (void *) (cmd_in + cmdlen),
+ inlen);
+
+ /*
+ * Set the lun field to the correct value.
+ */
+ cmd[1] = ( cmd[1] & 0x1f ) | (dev->lun << 5);
+
+#ifndef DEBUG_NO_CMD
+
+ SCpnt = allocate_device(NULL, dev, 1);
+
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd(SCpnt, cmd, buf, needed, scsi_ioctl_done, MAX_TIMEOUT,
+ MAX_RETRIES);
+ down(&sem);
+ }
+
+ /*
+ * If there was an error condition, pass the info back to the user.
+ */
+ if(SCpnt->result) {
+ result = verify_area(VERIFY_WRITE,
+ cmd_in,
+ sizeof(SCpnt->sense_buffer));
+ if (result) return result;
+ memcpy_tofs((void *) cmd_in,
+ SCpnt->sense_buffer,
+ sizeof(SCpnt->sense_buffer));
+ } else {
+ result = verify_area(VERIFY_WRITE, cmd_in, outlen);
+ if (result) return result;
+ memcpy_tofs ((void *) cmd_in, buf, outlen);
+ }
+ result = SCpnt->result;
+
+ SCpnt->request.rq_status = RQ_INACTIVE;
+
+ if (buf) scsi_free(buf, buf_needed);
+
+ if(SCpnt->device->scsi_request_fn)
+ (*SCpnt->device->scsi_request_fn)();
+
+ wake_up(&SCpnt->device->device_wait);
+ return result;
+#else
+ {
+ int i;
+ printk("scsi_ioctl : device %d. command = ", dev->id);
+ for (i = 0; i < 12; ++i)
+ printk("%02x ", cmd[i]);
+ printk("\nbuffer =");
+ for (i = 0; i < 20; ++i)
+ printk("%02x ", buf[i]);
+ printk("\n");
+ printk("inlen = %d, outlen = %d, cmdlen = %d\n",
+ inlen, outlen, cmdlen);
+ printk("buffer = %d, cmd_in = %d\n", buffer, cmd_in);
+ }
+ return 0;
+#endif
+}
+
+/*
+ * the scsi_ioctl() function differs from most ioctls in that it does
+ * not take a major/minor number as the dev filed. Rather, it takes
+ * a pointer to a scsi_devices[] element, a structure.
+ */
+int scsi_ioctl (Scsi_Device *dev, int cmd, void *arg)
+{
+ int result;
+ char scsi_cmd[12];
+
+ /* No idea how this happens.... */
+ if (!dev) return -ENXIO;
+
+ switch (cmd) {
+ case SCSI_IOCTL_GET_IDLUN:
+ result = verify_area(VERIFY_WRITE, (void *) arg, 2*sizeof(long));
+ if (result) return result;
+
+ put_user(dev->id
+ + (dev->lun << 8)
+ + (dev->channel << 16)
+ + ((dev->host->hostt->proc_dir->low_ino & 0xff) << 24),
+ (unsigned long *) arg);
+ put_user( dev->host->unique_id, (unsigned long *) arg+1);
+ return 0;
+ case SCSI_IOCTL_TAGGED_ENABLE:
+ if(!suser()) return -EACCES;
+ if(!dev->tagged_supported) return -EINVAL;
+ dev->tagged_queue = 1;
+ dev->current_tag = 1;
+ break;
+ case SCSI_IOCTL_TAGGED_DISABLE:
+ if(!suser()) return -EACCES;
+ if(!dev->tagged_supported) return -EINVAL;
+ dev->tagged_queue = 0;
+ dev->current_tag = 0;
+ break;
+ case SCSI_IOCTL_PROBE_HOST:
+ return ioctl_probe(dev->host, arg);
+ case SCSI_IOCTL_SEND_COMMAND:
+ if(!suser()) return -EACCES;
+ return ioctl_command((Scsi_Device *) dev, arg);
+ case SCSI_IOCTL_DOORLOCK:
+ if (!dev->removable || !dev->lockable) return 0;
+ scsi_cmd[0] = ALLOW_MEDIUM_REMOVAL;
+ scsi_cmd[1] = dev->lun << 5;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
+ scsi_cmd[4] = SCSI_REMOVAL_PREVENT;
+ return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd);
+ break;
+ case SCSI_IOCTL_DOORUNLOCK:
+ if (!dev->removable || !dev->lockable) return 0;
+ scsi_cmd[0] = ALLOW_MEDIUM_REMOVAL;
+ scsi_cmd[1] = dev->lun << 5;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
+ scsi_cmd[4] = SCSI_REMOVAL_ALLOW;
+ return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd);
+ case SCSI_IOCTL_TEST_UNIT_READY:
+ scsi_cmd[0] = TEST_UNIT_READY;
+ scsi_cmd[1] = dev->lun << 5;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
+ scsi_cmd[4] = 0;
+ return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd);
+ break;
+ default :
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Just like scsi_ioctl, only callable from kernel space with no
+ * fs segment fiddling.
+ */
+
+int kernel_scsi_ioctl (Scsi_Device *dev, int cmd, void *arg) {
+ unsigned long oldfs;
+ int tmp;
+ oldfs = get_fs();
+ set_fs(get_ds());
+ tmp = scsi_ioctl (dev, cmd, arg);
+ set_fs(oldfs);
+ return tmp;
+}
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/scsi_ioctl.h b/i386/i386at/gpl/linux/scsi/scsi_ioctl.h
new file mode 100644
index 00000000..a42fed00
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/scsi_ioctl.h
@@ -0,0 +1,21 @@
+#ifndef _SCSI_IOCTL_H
+#define _SCSI_IOCTL_H
+
+#define SCSI_IOCTL_SEND_COMMAND 1
+#define SCSI_IOCTL_TEST_UNIT_READY 2
+#define SCSI_IOCTL_BENCHMARK_COMMAND 3
+#define SCSI_IOCTL_SYNC 4 /* Request synchronous parameters */
+/* The door lock/unlock constants are compatible with Sun constants for
+ the cdrom */
+#define SCSI_IOCTL_DOORLOCK 0x5380 /* lock the eject mechanism */
+#define SCSI_IOCTL_DOORUNLOCK 0x5381 /* unlock the mechanism */
+
+#define SCSI_REMOVAL_PREVENT 1
+#define SCSI_REMOVAL_ALLOW 0
+
+extern int scsi_ioctl (Scsi_Device *dev, int cmd, void *arg);
+extern int kernel_scsi_ioctl (Scsi_Device *dev, int cmd, void *arg);
+
+#endif
+
+
diff --git a/i386/i386at/gpl/linux/scsi/scsi_proc.c b/i386/i386at/gpl/linux/scsi/scsi_proc.c
new file mode 100644
index 00000000..6650cec9
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/scsi_proc.c
@@ -0,0 +1,317 @@
+/*
+ * linux/drivers/scsi/scsi_proc.c
+ *
+ * The functions in this file provide an interface between
+ * the PROC file system and the SCSI device drivers
+ * It is mainly used for debugging, statistics and to pass
+ * information directly to the lowlevel driver.
+ *
+ * (c) 1995 Michael Neuffer neuffer@goofy.zdv.uni-mainz.de
+ * Version: 0.99.8 last change: 95/09/13
+ *
+ * generic command parser provided by:
+ * Andreas Heilwagen <crashcar@informatik.uni-koblenz.de>
+ */
+
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/malloc.h>
+#include <linux/proc_fs.h>
+#include <linux/errno.h>
+#include <linux/stat.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+
+#ifndef TRUE
+#define TRUE 1
+#define FALSE 0
+#endif
+
+extern int scsi_proc_info(char *, char **, off_t, int, int, int);
+
+struct scsi_dir {
+ struct proc_dir_entry entry;
+ char name[4];
+};
+
+
+/* generic_proc_info
+ * Used if the driver currently has no own support for /proc/scsi
+ */
+int generic_proc_info(char *buffer, char **start, off_t offset,
+ int length, int inode, int inout)
+{
+ int len, pos, begin;
+
+ if(inout == TRUE)
+ return(-ENOSYS); /* This is a no-op */
+
+ begin = 0;
+ pos = len = sprintf(buffer,
+ "The driver does not yet support the proc-fs\n");
+ if(pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+
+ *start = buffer + (offset - begin); /* Start of wanted data */
+ len -= (offset - begin);
+ if(len > length)
+ len = length;
+
+ return(len);
+}
+
+/* dispatch_scsi_info is the central dispatcher
+ * It is the interface between the proc-fs and the SCSI subsystem code
+ */
+extern int dispatch_scsi_info(int ino, char *buffer, char **start,
+ off_t offset, int length, int func)
+{
+ struct Scsi_Host *hpnt = scsi_hostlist;
+
+ if(ino == PROC_SCSI_SCSI) {
+ /*
+ * This is for the scsi core, rather than any specific
+ * lowlevel driver.
+ */
+ return(scsi_proc_info(buffer, start, offset, length, 0, func));
+ }
+
+ while(hpnt) {
+ if (ino == (hpnt->host_no + PROC_SCSI_FILE)) {
+ if(hpnt->hostt->proc_info == NULL)
+ return generic_proc_info(buffer, start, offset, length,
+ hpnt->host_no, func);
+ else
+ return(hpnt->hostt->proc_info(buffer, start, offset,
+ length, hpnt->host_no, func));
+ }
+ hpnt = hpnt->next;
+ }
+ return(-EBADF);
+}
+
+void build_proc_dir_entries(Scsi_Host_Template *tpnt)
+{
+ struct Scsi_Host *hpnt;
+
+ struct scsi_dir *scsi_hba_dir;
+
+ proc_scsi_register(0, tpnt->proc_dir);
+
+ hpnt = scsi_hostlist;
+ while (hpnt) {
+ if (tpnt == hpnt->hostt) {
+ scsi_hba_dir = scsi_init_malloc(sizeof(struct scsi_dir), GFP_KERNEL);
+ if(scsi_hba_dir == NULL)
+ panic("Not enough memory to register SCSI HBA in /proc/scsi !\n");
+ memset(scsi_hba_dir, 0, sizeof(struct scsi_dir));
+ scsi_hba_dir->entry.low_ino = PROC_SCSI_FILE + hpnt->host_no;
+ scsi_hba_dir->entry.namelen = sprintf(scsi_hba_dir->name,"%d",
+ hpnt->host_no);
+ scsi_hba_dir->entry.name = scsi_hba_dir->name;
+ scsi_hba_dir->entry.mode = S_IFREG | S_IRUGO | S_IWUSR;
+ proc_scsi_register(tpnt->proc_dir, &scsi_hba_dir->entry);
+ }
+ hpnt = hpnt->next;
+ }
+}
+
+/*
+ * parseHandle *parseInit(char *buf, char *cmdList, int cmdNum);
+ * gets a pointer to a null terminated data buffer
+ * and a list of commands with blanks as delimiter
+ * in between.
+ * The commands have to be alphanumerically sorted.
+ * cmdNum has to contain the number of commands.
+ * On success, a pointer to a handle structure
+ * is returned, NULL on failure
+ *
+ * int parseOpt(parseHandle *handle, char **param);
+ * processes the next parameter. On success, the
+ * index of the appropriate command in the cmdList
+ * is returned, starting with zero.
+ * param points to the null terminated parameter string.
+ * On failure, -1 is returned.
+ *
+ * The databuffer buf may only contain pairs of commands
+ * options, separated by blanks:
+ * <Command> <Parameter> [<Command> <Parameter>]*
+ */
+
+typedef struct
+{
+ char *buf, /* command buffer */
+ *cmdList, /* command list */
+ *bufPos, /* actual position */
+ **cmdPos, /* cmdList index */
+ cmdNum; /* cmd number */
+} parseHandle;
+
+
+inline int parseFree (parseHandle *handle) /* free memory */
+{
+ kfree (handle->cmdPos);
+ kfree (handle);
+
+ return(-1);
+}
+
+
+parseHandle *parseInit(char *buf, char *cmdList, int cmdNum)
+{
+ char *ptr; /* temp pointer */
+ parseHandle *handle; /* new handle */
+
+ if (!buf || !cmdList) /* bad input ? */
+ return(NULL);
+ if ((handle = (parseHandle*) kmalloc(sizeof(parseHandle), 1)) == 0)
+ return(NULL); /* out of memory */
+ if ((handle->cmdPos = (char**) kmalloc(sizeof(int), cmdNum)) == 0) {
+ kfree(handle);
+ return(NULL); /* out of memory */
+ }
+
+ handle->buf = handle->bufPos = buf; /* init handle */
+ handle->cmdList = cmdList;
+ handle->cmdNum = cmdNum;
+
+ handle->cmdPos[cmdNum = 0] = cmdList;
+ for (ptr = cmdList; *ptr; ptr++) { /* scan command string */
+ if(*ptr == ' ') { /* and insert zeroes */
+ *ptr++ = 0;
+ handle->cmdPos[++cmdNum] = ptr++;
+ }
+ }
+ return(handle);
+}
+
+
+int parseOpt(parseHandle *handle, char **param)
+{
+ int cmdIndex = 0,
+ cmdLen = 0;
+ char *startPos;
+
+ if (!handle) /* invalid handle */
+ return(parseFree(handle));
+ /* skip spaces */
+ for (; *(handle->bufPos) && *(handle->bufPos) == ' '; handle->bufPos++);
+ if (!*(handle->bufPos))
+ return(parseFree(handle)); /* end of data */
+
+ startPos = handle->bufPos; /* store cmd start */
+ for (; handle->cmdPos[cmdIndex][cmdLen] && *(handle->bufPos); handle->bufPos++)
+ { /* no string end? */
+ for (;;)
+ {
+ if (*(handle->bufPos) == handle->cmdPos[cmdIndex][cmdLen])
+ break; /* char matches ? */
+ else
+ if (memcmp(startPos, (char*)(handle->cmdPos[++cmdIndex]), cmdLen))
+ return(parseFree(handle)); /* unknown command */
+
+ if (cmdIndex >= handle->cmdNum)
+ return(parseFree(handle)); /* unknown command */
+ }
+
+ cmdLen++; /* next char */
+ }
+
+ /* Get param. First skip all blanks, then insert zero after param */
+
+ for (; *(handle->bufPos) && *(handle->bufPos) == ' '; handle->bufPos++);
+ *param = handle->bufPos;
+
+ for (; *(handle->bufPos) && *(handle->bufPos) != ' '; handle->bufPos++);
+ *(handle->bufPos++) = 0;
+
+ return(cmdIndex);
+}
+
+#define MAX_SCSI_DEVICE_CODE 10
+const char *const scsi_dev_types[MAX_SCSI_DEVICE_CODE] =
+{
+ "Direct-Access ",
+ "Sequential-Access",
+ "Printer ",
+ "Processor ",
+ "WORM ",
+ "CD-ROM ",
+ "Scanner ",
+ "Optical Device ",
+ "Medium Changer ",
+ "Communications "
+};
+
+void proc_print_scsidevice(Scsi_Device *scd, char *buffer, int *size, int len)
+{
+ int x, y = *size;
+
+ y = sprintf(buffer + len,
+ "Host: scsi%d Channel: %02d Id: %02d Lun: %02d\n Vendor: ",
+ scd->host->host_no, scd->channel, scd->id, scd->lun);
+ for (x = 0; x < 8; x++) {
+ if (scd->vendor[x] >= 0x20)
+ y += sprintf(buffer + len + y, "%c", scd->vendor[x]);
+ else
+ y += sprintf(buffer + len + y," ");
+ }
+ y += sprintf(buffer + len + y, " Model: ");
+ for (x = 0; x < 16; x++) {
+ if (scd->model[x] >= 0x20)
+ y += sprintf(buffer + len + y, "%c", scd->model[x]);
+ else
+ y += sprintf(buffer + len + y, " ");
+ }
+ y += sprintf(buffer + len + y, " Rev: ");
+ for (x = 0; x < 4; x++) {
+ if (scd->rev[x] >= 0x20)
+ y += sprintf(buffer + len + y, "%c", scd->rev[x]);
+ else
+ y += sprintf(buffer + len + y, " ");
+ }
+ y += sprintf(buffer + len + y, "\n");
+
+ y += sprintf(buffer + len + y, " Type: %s ",
+ scd->type < MAX_SCSI_DEVICE_CODE ?
+ scsi_dev_types[(int)scd->type] : "Unknown " );
+ y += sprintf(buffer + len + y, " ANSI"
+ " SCSI revision: %02x", (scd->scsi_level < 3)?1:2);
+ if (scd->scsi_level == 2)
+ y += sprintf(buffer + len + y, " CCS\n");
+ else
+ y += sprintf(buffer + len + y, "\n");
+
+ *size = y;
+ return;
+}
+
+/*
+ * Overrides for Emacs so that we get a uniform tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/scsicam.c b/i386/i386at/gpl/linux/scsi/scsicam.c
new file mode 100644
index 00000000..e4e4e764
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/scsicam.c
@@ -0,0 +1,214 @@
+/*
+ * scsicam.c - SCSI CAM support functions, use for HDIO_GETGEO, etc.
+ *
+ * Copyright 1993, 1994 Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@Colorado.EDU
+ * +1 (303) 786-7975
+ *
+ * For more information, please consult the SCSI-CAM draft.
+ */
+
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/kernel.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+
+static int partsize(struct buffer_head *bh, unsigned long capacity,
+ unsigned int *cyls, unsigned int *hds, unsigned int *secs);
+static int setsize(unsigned long capacity,unsigned int *cyls,unsigned int *hds,
+ unsigned int *secs);
+
+/*
+ * Function : int scsicam_bios_param (Disk *disk, int dev, int *ip)
+ *
+ * Purpose : to determine the BIOS mapping used for a drive in a
+ * SCSI-CAM system, storing the results in ip as required
+ * by the HDIO_GETGEO ioctl().
+ *
+ * Returns : -1 on failure, 0 on success.
+ *
+ */
+
+int scsicam_bios_param (Disk *disk, /* SCSI disk */
+ kdev_t dev, /* Device major, minor */
+ int *ip /* Heads, sectors, cylinders in that order */) {
+
+ struct buffer_head *bh;
+ int ret_code;
+ int size = disk->capacity;
+
+ if (!(bh = bread(MKDEV(MAJOR(dev), MINOR(dev)&~0xf), 0, 1024)))
+ return -1;
+
+#ifdef DEBUG
+ printk ("scsicam_bios_param : trying existing mapping\n");
+#endif
+ ret_code = partsize (bh, (unsigned long) size, (unsigned int *) ip + 2,
+ (unsigned int *) ip + 0, (unsigned int *) ip + 1);
+ brelse (bh);
+
+ if (ret_code == -1) {
+#ifdef DEBUG
+ printk ("scsicam_bios_param : trying optimal mapping\n");
+#endif
+ ret_code = setsize ((unsigned long) size, (unsigned int *) ip + 2,
+ (unsigned int *) ip + 0, (unsigned int *) ip + 1);
+ }
+
+ return ret_code;
+}
+
+/*
+ * Function : static int partsize(struct buffer_head *bh, unsigned long
+ * capacity,unsigned int *cyls, unsigned int *hds, unsigned int *secs);
+ *
+ * Purpose : to determine the BIOS mapping used to create the partition
+ * table, storing the results in *cyls, *hds, and *secs
+ *
+ * Returns : -1 on failure, 0 on success.
+ *
+ */
+
+static int partsize(struct buffer_head *bh, unsigned long capacity,
+ unsigned int *cyls, unsigned int *hds, unsigned int *secs) {
+ struct partition *p, *largest = NULL;
+ int i, largest_cyl;
+ int cyl, ext_cyl, end_head, end_cyl, end_sector;
+ unsigned int logical_end, physical_end, ext_physical_end;
+
+
+ if (*(unsigned short *) (bh->b_data+510) == 0xAA55) {
+ for (largest_cyl = -1, p = (struct partition *)
+ (0x1BE + bh->b_data), i = 0; i < 4; ++i, ++p) {
+ if (!p->sys_ind)
+ continue;
+#ifdef DEBUG
+ printk ("scsicam_bios_param : partition %d has system \n",
+ i);
+#endif
+ cyl = p->cyl + ((p->sector & 0xc0) << 2);
+ if (cyl > largest_cyl) {
+ largest_cyl = cyl;
+ largest = p;
+ }
+ }
+ }
+
+ if (largest) {
+ end_cyl = largest->end_cyl + ((largest->end_sector & 0xc0) << 2);
+ end_head = largest->end_head;
+ end_sector = largest->end_sector & 0x3f;
+
+#ifdef DEBUG
+ printk ("scsicam_bios_param : end at h = %d, c = %d, s = %d\n",
+ end_head, end_cyl, end_sector);
+#endif
+
+ physical_end = end_cyl * (end_head + 1) * end_sector +
+ end_head * end_sector + end_sector;
+
+ /* This is the actual _sector_ number at the end */
+ logical_end = largest->start_sect + largest->nr_sects;
+
+ /* This is for >1023 cylinders */
+ ext_cyl= (logical_end-(end_head * end_sector + end_sector))
+ /(end_head + 1) / end_sector;
+ ext_physical_end = ext_cyl * (end_head + 1) * end_sector +
+ end_head * end_sector + end_sector;
+
+#ifdef DEBUG
+ printk("scsicam_bios_param : logical_end=%d physical_end=%d ext_physical_end=%d ext_cyl=%d\n"
+ ,logical_end,physical_end,ext_physical_end,ext_cyl);
+#endif
+
+ if ((logical_end == physical_end) ||
+ (end_cyl==1023 && ext_physical_end==logical_end)) {
+ *secs = end_sector;
+ *hds = end_head + 1;
+ *cyls = capacity / ((end_head + 1) * end_sector);
+ return 0;
+ }
+
+#ifdef DEBUG
+ printk ("scsicam_bios_param : logical (%u) != physical (%u)\n",
+ logical_end, physical_end);
+#endif
+ }
+ return -1;
+}
+
+/*
+ * Function : static int setsize(unsigned long capacity,unsigned int *cyls,
+ * unsigned int *hds, unsigned int *secs);
+ *
+ * Purpose : to determine a near-optimal int 0x13 mapping for a
+ * SCSI disk in terms of lost space of size capacity, storing
+ * the results in *cyls, *hds, and *secs.
+ *
+ * Returns : -1 on failure, 0 on success.
+ *
+ * Extracted from
+ *
+ * WORKING X3T9.2
+ * DRAFT 792D
+ *
+ *
+ * Revision 6
+ * 10-MAR-94
+ * Information technology -
+ * SCSI-2 Common access method
+ * transport and SCSI interface module
+ *
+ * ANNEX A :
+ *
+ * setsize() converts a read capacity value to int 13h
+ * head-cylinder-sector requirements. It minimizes the value for
+ * number of heads and maximizes the number of cylinders. This
+ * will support rather large disks before the number of heads
+ * will not fit in 4 bits (or 6 bits). This algorithm also
+ * minimizes the number of sectors that will be unused at the end
+ * of the disk while allowing for very large disks to be
+ * accommodated. This algorithm does not use physical geometry.
+ */
+
+static int setsize(unsigned long capacity,unsigned int *cyls,unsigned int *hds,
+ unsigned int *secs) {
+ unsigned int rv = 0;
+ unsigned long heads, sectors, cylinders, temp;
+
+ cylinders = 1024L; /* Set number of cylinders to max */
+ sectors = 62L; /* Maximize sectors per track */
+
+ temp = cylinders * sectors; /* Compute divisor for heads */
+ heads = capacity / temp; /* Compute value for number of heads */
+ if (capacity % temp) { /* If no remainder, done! */
+ heads++; /* Else, increment number of heads */
+ temp = cylinders * heads; /* Compute divisor for sectors */
+ sectors = capacity / temp; /* Compute value for sectors per
+ track */
+ if (capacity % temp) { /* If no remainder, done! */
+ sectors++; /* Else, increment number of sectors */
+ temp = heads * sectors; /* Compute divisor for cylinders */
+ cylinders = capacity / temp;/* Compute number of cylinders */
+ }
+ }
+ if (cylinders == 0) rv=(unsigned)-1;/* Give error if 0 cylinders */
+
+ *cyls = (unsigned int) cylinders; /* Stuff return values */
+ *secs = (unsigned int) sectors;
+ *hds = (unsigned int) heads;
+ return(rv);
+}
diff --git a/i386/i386at/gpl/linux/scsi/sd.c b/i386/i386at/gpl/linux/scsi/sd.c
new file mode 100644
index 00000000..47bf2263
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/sd.c
@@ -0,0 +1,1543 @@
+/*
+ * sd.c Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * Linux scsi disk driver
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale ericy@cais.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ *
+ * Modified by Eric Youngdale eric@aib.com to support loadable
+ * low-level scsi drivers.
+ */
+
+#include <linux/module.h>
+#ifdef MODULE
+/*
+ * This is a variable in scsi.c that is set when we are processing something
+ * after boot time. By definition, this is true when we are a loadable module
+ * ourselves.
+ */
+#define MODULE_FLAG 1
+#else
+#define MODULE_FLAG scsi_loadable_module_flag
+#endif /* MODULE */
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <asm/system.h>
+
+#define MAJOR_NR SCSI_DISK_MAJOR
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include "scsi_ioctl.h"
+#include "constants.h"
+
+#include <linux/genhd.h>
+
+/*
+ * static const char RCSid[] = "$Header:";
+ */
+
+#define MAX_RETRIES 5
+
+/*
+ * Time out in seconds for disks and Magneto-opticals (which are slower).
+ */
+
+#define SD_TIMEOUT (7 * HZ)
+#define SD_MOD_TIMEOUT (8 * HZ)
+
+#define CLUSTERABLE_DEVICE(SC) (SC->host->use_clustering && \
+ SC->device->type != TYPE_MOD)
+
+struct hd_struct * sd;
+
+Scsi_Disk * rscsi_disks = NULL;
+static int * sd_sizes;
+static int * sd_blocksizes;
+static int * sd_hardsizes; /* Hardware sector size */
+
+extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+
+static int check_scsidisk_media_change(kdev_t);
+static int fop_revalidate_scsidisk(kdev_t);
+
+static sd_init_onedisk(int);
+
+static void requeue_sd_request (Scsi_Cmnd * SCpnt);
+
+static int sd_init(void);
+static void sd_finish(void);
+static int sd_attach(Scsi_Device *);
+static int sd_detect(Scsi_Device *);
+static void sd_detach(Scsi_Device *);
+
+struct Scsi_Device_Template sd_template =
+{ NULL, "disk", "sd", NULL, TYPE_DISK,
+ SCSI_DISK_MAJOR, 0, 0, 0, 1,
+ sd_detect, sd_init,
+ sd_finish, sd_attach, sd_detach
+};
+
+static int sd_open(struct inode * inode, struct file * filp)
+{
+ int target;
+ target = DEVICE_NR(inode->i_rdev);
+
+ if(target >= sd_template.dev_max || !rscsi_disks[target].device)
+ return -ENXIO; /* No such device */
+
+ /*
+ * Make sure that only one process can do a check_change_disk at one time.
+ * This is also used to lock out further access when the partition table
+ * is being re-read.
+ */
+
+ while (rscsi_disks[target].device->busy)
+ barrier();
+ if(rscsi_disks[target].device->removable) {
+ check_disk_change(inode->i_rdev);
+
+ /*
+ * If the drive is empty, just let the open fail.
+ */
+ if ( !rscsi_disks[target].ready ) {
+ return -ENXIO;
+ }
+
+ /*
+ * Similarily, if the device has the write protect tab set,
+ * have the open fail if the user expects to be able to write
+ * to the thing.
+ */
+ if ( (rscsi_disks[target].write_prot) && (filp->f_mode & 2) ) {
+ return -EROFS;
+ }
+
+ if(!rscsi_disks[target].device->access_count)
+ sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
+ };
+
+ /*
+ * See if we are requesting a non-existent partition. Do this
+ * after checking for disk change.
+ */
+ if(sd_sizes[MINOR(inode->i_rdev)] == 0)
+ return -ENXIO;
+
+ rscsi_disks[target].device->access_count++;
+ if (rscsi_disks[target].device->host->hostt->usage_count)
+ (*rscsi_disks[target].device->host->hostt->usage_count)++;
+ if(sd_template.usage_count) (*sd_template.usage_count)++;
+ return 0;
+}
+
+static void sd_release(struct inode * inode, struct file * file)
+{
+ int target;
+ sync_dev(inode->i_rdev);
+
+ target = DEVICE_NR(inode->i_rdev);
+
+ rscsi_disks[target].device->access_count--;
+ if (rscsi_disks[target].device->host->hostt->usage_count)
+ (*rscsi_disks[target].device->host->hostt->usage_count)--;
+ if(sd_template.usage_count) (*sd_template.usage_count)--;
+
+ if(rscsi_disks[target].device->removable) {
+ if(!rscsi_disks[target].device->access_count)
+ sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
+ }
+}
+
+static void sd_geninit(struct gendisk *);
+
+static struct file_operations sd_fops = {
+ NULL, /* lseek - default */
+ block_read, /* read - general block-dev read */
+ block_write, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* select */
+ sd_ioctl, /* ioctl */
+ NULL, /* mmap */
+ sd_open, /* open code */
+ sd_release, /* release */
+ block_fsync, /* fsync */
+ NULL, /* fasync */
+ check_scsidisk_media_change, /* Disk change */
+ fop_revalidate_scsidisk /* revalidate */
+};
+
+static struct gendisk sd_gendisk = {
+ MAJOR_NR, /* Major number */
+ "sd", /* Major name */
+ 4, /* Bits to shift to get real from partition */
+ 1 << 4, /* Number of partitions per real */
+ 0, /* maximum number of real */
+ sd_geninit, /* init function */
+ NULL, /* hd struct */
+ NULL, /* block sizes */
+ 0, /* number */
+ NULL, /* internal */
+ NULL /* next */
+};
+
+static void sd_geninit (struct gendisk *ignored)
+{
+ int i;
+
+ for (i = 0; i < sd_template.dev_max; ++i)
+ if(rscsi_disks[i].device)
+ sd[i << 4].nr_sects = rscsi_disks[i].capacity;
+#if 0
+ /* No longer needed - we keep track of this as we attach/detach */
+ sd_gendisk.nr_real = sd_template.dev_max;
+#endif
+}
+
+/*
+ * rw_intr is the interrupt routine for the device driver. It will
+ * be notified on the end of a SCSI read / write, and
+ * will take on of several actions based on success or failure.
+ */
+
+static void rw_intr (Scsi_Cmnd *SCpnt)
+{
+ int result = SCpnt->result;
+ int this_count = SCpnt->bufflen >> 9;
+
+#ifdef DEBUG
+ printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.rq_dev),
+ SCpnt->host->host_no, result);
+#endif
+
+ /*
+ * First case : we assume that the command succeeded. One of two things
+ * will happen here. Either we will be finished, or there will be more
+ * sectors that we were unable to read last time.
+ */
+
+ if (!result) {
+
+#ifdef DEBUG
+ printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.rq_dev),
+ SCpnt->request.nr_sectors);
+ printk("use_sg is %d\n ",SCpnt->use_sg);
+#endif
+ if (SCpnt->use_sg) {
+ struct scatterlist * sgpnt;
+ int i;
+ sgpnt = (struct scatterlist *) SCpnt->buffer;
+ for(i=0; i<SCpnt->use_sg; i++) {
+#ifdef DEBUG
+ printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address,
+ sgpnt[i].length);
+#endif
+ if (sgpnt[i].alt_address) {
+ if (SCpnt->request.cmd == READ)
+ memcpy(sgpnt[i].alt_address, sgpnt[i].address,
+ sgpnt[i].length);
+ scsi_free(sgpnt[i].address, sgpnt[i].length);
+ };
+ };
+
+ /* Free list of scatter-gather pointers */
+ scsi_free(SCpnt->buffer, SCpnt->sglist_len);
+ } else {
+ if (SCpnt->buffer != SCpnt->request.buffer) {
+#ifdef DEBUG
+ printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
+ SCpnt->bufflen);
+#endif
+ if (SCpnt->request.cmd == READ)
+ memcpy(SCpnt->request.buffer, SCpnt->buffer,
+ SCpnt->bufflen);
+ scsi_free(SCpnt->buffer, SCpnt->bufflen);
+ };
+ };
+ /*
+ * If multiple sectors are requested in one buffer, then
+ * they will have been finished off by the first command.
+ * If not, then we have a multi-buffer command.
+ */
+ if (SCpnt->request.nr_sectors > this_count)
+ {
+ SCpnt->request.errors = 0;
+
+ if (!SCpnt->request.bh)
+ {
+#ifdef DEBUG
+ printk("sd%c : handling page request, no buffer\n",
+ 'a' + MINOR(SCpnt->request.rq_dev));
+#endif
+ /*
+ * The SCpnt->request.nr_sectors field is always done in
+ * 512 byte sectors, even if this really isn't the case.
+ */
+ panic("sd.c: linked page request (%lx %x)",
+ SCpnt->request.sector, this_count);
+ }
+ }
+ SCpnt = end_scsi_request(SCpnt, 1, this_count);
+ requeue_sd_request(SCpnt);
+ return;
+ }
+
+ /* Free up any indirection buffers we allocated for DMA purposes. */
+ if (SCpnt->use_sg) {
+ struct scatterlist * sgpnt;
+ int i;
+ sgpnt = (struct scatterlist *) SCpnt->buffer;
+ for(i=0; i<SCpnt->use_sg; i++) {
+#ifdef DEBUG
+ printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
+ SCpnt->bufflen);
+#endif
+ if (sgpnt[i].alt_address) {
+ scsi_free(sgpnt[i].address, sgpnt[i].length);
+ };
+ };
+ scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */
+ } else {
+#ifdef DEBUG
+ printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
+ SCpnt->bufflen);
+#endif
+ if (SCpnt->buffer != SCpnt->request.buffer)
+ scsi_free(SCpnt->buffer, SCpnt->bufflen);
+ };
+
+ /*
+ * Now, if we were good little boys and girls, Santa left us a request
+ * sense buffer. We can extract information from this, so we
+ * can choose a block to remap, etc.
+ */
+
+ if (driver_byte(result) != 0) {
+ if (suggestion(result) == SUGGEST_REMAP) {
+#ifdef REMAP
+ /*
+ * Not yet implemented. A read will fail after being remapped,
+ * a write will call the strategy routine again.
+ */
+ if rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].remap
+ {
+ result = 0;
+ }
+ else
+#endif
+ }
+
+ if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
+ if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
+ if(rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->removable) {
+ /* detected disc change. set a bit and quietly refuse
+ * further access.
+ */
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1;
+ SCpnt = end_scsi_request(SCpnt, 0, this_count);
+ requeue_sd_request(SCpnt);
+ return;
+ }
+ else
+ {
+ /*
+ * Must have been a power glitch, or a bus reset.
+ * Could not have been a media change, so we just retry
+ * the request and see what happens.
+ */
+ requeue_sd_request(SCpnt);
+ return;
+ }
+ }
+ }
+
+
+ /* If we had an ILLEGAL REQUEST returned, then we may have
+ * performed an unsupported command. The only thing this should be
+ * would be a ten byte read where only a six byte read was supported.
+ * Also, on a system where READ CAPACITY failed, we have have read
+ * past the end of the disk.
+ */
+
+ if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
+ if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten) {
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;
+ requeue_sd_request(SCpnt);
+ result = 0;
+ } else {
+ /* ???? */
+ }
+ }
+ } /* driver byte != 0 */
+ if (result) {
+ printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->channel,
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->id,
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->lun, result);
+
+ if (driver_byte(result) & DRIVER_SENSE)
+ print_sense("sd", SCpnt);
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
+ requeue_sd_request(SCpnt);
+ return;
+ }
+}
+
+/*
+ * requeue_sd_request() is the request handler function for the sd driver.
+ * Its function in life is to take block device requests, and translate
+ * them to SCSI commands.
+ */
+
+static void do_sd_request (void)
+{
+ Scsi_Cmnd * SCpnt = NULL;
+ Scsi_Device * SDev;
+ struct request * req = NULL;
+ unsigned long flags;
+ int flag = 0;
+
+ save_flags(flags);
+ while (1==1){
+ cli();
+ if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {
+ restore_flags(flags);
+ return;
+ };
+
+ INIT_SCSI_REQUEST;
+ SDev = rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device;
+
+ /*
+ * I am not sure where the best place to do this is. We need
+ * to hook in a place where we are likely to come if in user
+ * space.
+ */
+ if( SDev->was_reset )
+ {
+ /*
+ * We need to relock the door, but we might
+ * be in an interrupt handler. Only do this
+ * from user space, since we do not want to
+ * sleep from an interrupt.
+ */
+ if( SDev->removable && !intr_count )
+ {
+ scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
+ }
+ SDev->was_reset = 0;
+ }
+
+ /* We have to be careful here. allocate_device will get a free pointer,
+ * but there is no guarantee that it is queueable. In normal usage,
+ * we want to call this, because other types of devices may have the
+ * host all tied up, and we want to make sure that we have at least
+ * one request pending for this type of device. We can also come
+ * through here while servicing an interrupt, because of the need to
+ * start another command. If we call allocate_device more than once,
+ * then the system can wedge if the command is not queueable. The
+ * request_queueable function is safe because it checks to make sure
+ * that the host is able to take another command before it returns
+ * a pointer.
+ */
+
+ if (flag++ == 0)
+ SCpnt = allocate_device(&CURRENT,
+ rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device, 0);
+ else SCpnt = NULL;
+
+ /*
+ * The following restore_flags leads to latency problems. FIXME.
+ * Using a "sti()" gets rid of the latency problems but causes
+ * race conditions and crashes.
+ */
+ restore_flags(flags);
+
+ /* This is a performance enhancement. We dig down into the request
+ * list and try and find a queueable request (i.e. device not busy,
+ * and host able to accept another command. If we find one, then we
+ * queue it. This can make a big difference on systems with more than
+ * one disk drive. We want to have the interrupts off when monkeying
+ * with the request list, because otherwise the kernel might try and
+ * slip in a request in between somewhere.
+ */
+
+ if (!SCpnt && sd_template.nr_dev > 1){
+ struct request *req1;
+ req1 = NULL;
+ cli();
+ req = CURRENT;
+ while(req){
+ SCpnt = request_queueable(req, rscsi_disks[DEVICE_NR(req->rq_dev)].device);
+ if(SCpnt) break;
+ req1 = req;
+ req = req->next;
+ };
+ if (SCpnt && req->rq_status == RQ_INACTIVE) {
+ if (req == CURRENT)
+ CURRENT = CURRENT->next;
+ else
+ req1->next = req->next;
+ };
+ restore_flags(flags);
+ };
+
+ if (!SCpnt) return; /* Could not find anything to do */
+
+ /* Queue command */
+ requeue_sd_request(SCpnt);
+ }; /* While */
+}
+
+static void requeue_sd_request (Scsi_Cmnd * SCpnt)
+{
+ int dev, devm, block, this_count;
+ unsigned char cmd[10];
+ int bounce_size, contiguous;
+ int max_sg;
+ struct buffer_head * bh, *bhp;
+ char * buff, *bounce_buffer;
+
+ repeat:
+
+ if(!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
+ do_sd_request();
+ return;
+ }
+
+ devm = MINOR(SCpnt->request.rq_dev);
+ dev = DEVICE_NR(SCpnt->request.rq_dev);
+
+ block = SCpnt->request.sector;
+ this_count = 0;
+
+#ifdef DEBUG
+ printk("Doing sd request, dev = %d, block = %d\n", devm, block);
+#endif
+
+ if (devm >= (sd_template.dev_max << 4) ||
+ !rscsi_disks[dev].device ||
+ block + SCpnt->request.nr_sectors > sd[devm].nr_sects)
+ {
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ goto repeat;
+ }
+
+ block += sd[devm].start_sect;
+
+ if (rscsi_disks[dev].device->changed)
+ {
+ /*
+ * quietly refuse to do anything to a changed disc until the changed
+ * bit has been reset
+ */
+ /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ goto repeat;
+ }
+
+#ifdef DEBUG
+ printk("sd%c : real dev = /dev/sd%c, block = %d\n",
+ 'a' + devm, dev, block);
+#endif
+
+ /*
+ * If we have a 1K hardware sectorsize, prevent access to single
+ * 512 byte sectors. In theory we could handle this - in fact
+ * the scsi cdrom driver must be able to handle this because
+ * we typically use 1K blocksizes, and cdroms typically have
+ * 2K hardware sectorsizes. Of course, things are simpler
+ * with the cdrom, since it is read-only. For performance
+ * reasons, the filesystems should be able to handle this
+ * and not force the scsi disk driver to use bounce buffers
+ * for this.
+ */
+ if (rscsi_disks[dev].sector_size == 1024)
+ if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
+ printk("sd.c:Bad block number requested");
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ goto repeat;
+ }
+
+ switch (SCpnt->request.cmd)
+ {
+ case WRITE :
+ if (!rscsi_disks[dev].device->writeable)
+ {
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ goto repeat;
+ }
+ cmd[0] = WRITE_6;
+ break;
+ case READ :
+ cmd[0] = READ_6;
+ break;
+ default :
+ panic ("Unknown sd command %d\n", SCpnt->request.cmd);
+ }
+
+ SCpnt->this_count = 0;
+
+ /* If the host adapter can deal with very large scatter-gather
+ * requests, it is a waste of time to cluster
+ */
+ contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
+ bounce_buffer = NULL;
+ bounce_size = (SCpnt->request.nr_sectors << 9);
+
+ /* First see if we need a bounce buffer for this request. If we do, make
+ * sure that we can allocate a buffer. Do not waste space by allocating
+ * a bounce buffer if we are straddling the 16Mb line
+ */
+ if (contiguous && SCpnt->request.bh &&
+ ((long) SCpnt->request.bh->b_data)
+ + (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD
+ && SCpnt->host->unchecked_isa_dma) {
+ if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
+ bounce_buffer = (char *) scsi_malloc(bounce_size);
+ if(!bounce_buffer) contiguous = 0;
+ };
+
+ if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
+ for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
+ bhp = bhp->b_reqnext) {
+ if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
+ if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
+ contiguous = 0;
+ break;
+ }
+ };
+ if (!SCpnt->request.bh || contiguous) {
+
+ /* case of page request (i.e. raw device), or unlinked buffer */
+ this_count = SCpnt->request.nr_sectors;
+ buff = SCpnt->request.buffer;
+ SCpnt->use_sg = 0;
+
+ } else if (SCpnt->host->sg_tablesize == 0 ||
+ (need_isa_buffer && dma_free_sectors <= 10)) {
+
+ /* Case of host adapter that cannot scatter-gather. We also
+ * come here if we are running low on DMA buffer memory. We set
+ * a threshold higher than that we would need for this request so
+ * we leave room for other requests. Even though we would not need
+ * it all, we need to be conservative, because if we run low enough
+ * we have no choice but to panic.
+ */
+ if (SCpnt->host->sg_tablesize != 0 &&
+ need_isa_buffer &&
+ dma_free_sectors <= 10)
+ printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
+
+ this_count = SCpnt->request.current_nr_sectors;
+ buff = SCpnt->request.buffer;
+ SCpnt->use_sg = 0;
+
+ } else {
+
+ /* Scatter-gather capable host adapter */
+ struct scatterlist * sgpnt;
+ int count, this_count_max;
+ int counted;
+
+ bh = SCpnt->request.bh;
+ this_count = 0;
+ this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
+ count = 0;
+ bhp = NULL;
+ while(bh) {
+ if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
+ if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
+ !CLUSTERABLE_DEVICE(SCpnt) ||
+ (SCpnt->host->unchecked_isa_dma &&
+ ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
+ if (count < SCpnt->host->sg_tablesize) count++;
+ else break;
+ };
+ this_count += (bh->b_size >> 9);
+ bhp = bh;
+ bh = bh->b_reqnext;
+ };
+#if 0
+ if(SCpnt->host->unchecked_isa_dma &&
+ ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
+#endif
+ SCpnt->use_sg = count; /* Number of chains */
+ count = 512;/* scsi_malloc can only allocate in chunks of 512 bytes */
+ while( count < (SCpnt->use_sg * sizeof(struct scatterlist)))
+ count = count << 1;
+ SCpnt->sglist_len = count;
+ max_sg = count / sizeof(struct scatterlist);
+ if(SCpnt->host->sg_tablesize < max_sg)
+ max_sg = SCpnt->host->sg_tablesize;
+ sgpnt = (struct scatterlist * ) scsi_malloc(count);
+ if (!sgpnt) {
+ printk("Warning - running *really* short on DMA buffers\n");
+ SCpnt->use_sg = 0; /* No memory left - bail out */
+ this_count = SCpnt->request.current_nr_sectors;
+ buff = SCpnt->request.buffer;
+ } else {
+ memset(sgpnt, 0, count); /* Zero so it is easy to fill, but only
+ * if memory is available
+ */
+ buff = (char *) sgpnt;
+ counted = 0;
+ for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
+ count < SCpnt->use_sg && bh;
+ count++, bh = bhp) {
+
+ bhp = bh->b_reqnext;
+
+ if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
+ sgpnt[count].length += bh->b_size;
+ counted += bh->b_size >> 9;
+
+ if (((long) sgpnt[count].address) + sgpnt[count].length - 1 >
+ ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
+ !sgpnt[count].alt_address) {
+ sgpnt[count].alt_address = sgpnt[count].address;
+ /* We try and avoid exhausting the DMA pool, since it is
+ * easier to control usage here. In other places we might
+ * have a more pressing need, and we would be screwed if
+ * we ran out */
+ if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
+ sgpnt[count].address = NULL;
+ } else {
+ sgpnt[count].address =
+ (char *) scsi_malloc(sgpnt[count].length);
+ };
+ /* If we start running low on DMA buffers, we abort the
+ * scatter-gather operation, and free all of the memory
+ * we have allocated. We want to ensure that all scsi
+ * operations are able to do at least a non-scatter/gather
+ * operation */
+ if(sgpnt[count].address == NULL){ /* Out of dma memory */
+#if 0
+ printk("Warning: Running low on SCSI DMA buffers");
+ /* Try switching back to a non s-g operation. */
+ while(--count >= 0){
+ if(sgpnt[count].alt_address)
+ scsi_free(sgpnt[count].address,
+ sgpnt[count].length);
+ };
+ this_count = SCpnt->request.current_nr_sectors;
+ buff = SCpnt->request.buffer;
+ SCpnt->use_sg = 0;
+ scsi_free(sgpnt, SCpnt->sglist_len);
+#endif
+ SCpnt->use_sg = count;
+ this_count = counted -= bh->b_size >> 9;
+ break;
+ };
+
+ };
+
+ /* Only cluster buffers if we know that we can supply DMA
+ * buffers large enough to satisfy the request. Do not cluster
+ * a new request if this would mean that we suddenly need to
+ * start using DMA bounce buffers */
+ if(bhp && CONTIGUOUS_BUFFERS(bh,bhp)
+ && CLUSTERABLE_DEVICE(SCpnt)) {
+ char * tmp;
+
+ if (((long) sgpnt[count].address) + sgpnt[count].length +
+ bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
+ (SCpnt->host->unchecked_isa_dma) &&
+ !sgpnt[count].alt_address) continue;
+
+ if(!sgpnt[count].alt_address) {count--; continue; }
+ if(dma_free_sectors > 10)
+ tmp = (char *) scsi_malloc(sgpnt[count].length
+ + bhp->b_size);
+ else {
+ tmp = NULL;
+ max_sg = SCpnt->use_sg;
+ };
+ if(tmp){
+ scsi_free(sgpnt[count].address, sgpnt[count].length);
+ sgpnt[count].address = tmp;
+ count--;
+ continue;
+ };
+
+ /* If we are allowed another sg chain, then increment
+ * counter so we can insert it. Otherwise we will end
+ up truncating */
+
+ if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
+ }; /* contiguous buffers */
+ }; /* for loop */
+
+ /* This is actually how many we are going to transfer */
+ this_count = counted;
+
+ if(count < SCpnt->use_sg || SCpnt->use_sg
+ > SCpnt->host->sg_tablesize){
+ bh = SCpnt->request.bh;
+ printk("Use sg, count %d %x %d\n",
+ SCpnt->use_sg, count, dma_free_sectors);
+ printk("maxsg = %x, counted = %d this_count = %d\n",
+ max_sg, counted, this_count);
+ while(bh){
+ printk("[%p %lx] ", bh->b_data, bh->b_size);
+ bh = bh->b_reqnext;
+ };
+ if(SCpnt->use_sg < 16)
+ for(count=0; count<SCpnt->use_sg; count++)
+ printk("{%d:%p %p %d} ", count,
+ sgpnt[count].address,
+ sgpnt[count].alt_address,
+ sgpnt[count].length);
+ panic("Ooops");
+ };
+
+ if (SCpnt->request.cmd == WRITE)
+ for(count=0; count<SCpnt->use_sg; count++)
+ if(sgpnt[count].alt_address)
+ memcpy(sgpnt[count].address, sgpnt[count].alt_address,
+ sgpnt[count].length);
+ }; /* Able to malloc sgpnt */
+ }; /* Host adapter capable of scatter-gather */
+
+ /* Now handle the possibility of DMA to addresses > 16Mb */
+
+ if(SCpnt->use_sg == 0){
+ if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
+ (SCpnt->host->unchecked_isa_dma)) {
+ if(bounce_buffer)
+ buff = bounce_buffer;
+ else
+ buff = (char *) scsi_malloc(this_count << 9);
+ if(buff == NULL) { /* Try backing off a bit if we are low on mem*/
+ this_count = SCpnt->request.current_nr_sectors;
+ buff = (char *) scsi_malloc(this_count << 9);
+ if(!buff) panic("Ran out of DMA buffers.");
+ };
+ if (SCpnt->request.cmd == WRITE)
+ memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
+ };
+ };
+#ifdef DEBUG
+ printk("sd%c : %s %d/%d 512 byte blocks.\n",
+ 'a' + devm,
+ (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
+ this_count, SCpnt->request.nr_sectors);
+#endif
+
+ cmd[1] = (SCpnt->lun << 5) & 0xe0;
+
+ if (rscsi_disks[dev].sector_size == 1024){
+ if(block & 1) panic("sd.c:Bad block number requested");
+ if(this_count & 1) panic("sd.c:Bad block number requested");
+ block = block >> 1;
+ this_count = this_count >> 1;
+ };
+
+ if (rscsi_disks[dev].sector_size == 256){
+ block = block << 1;
+ this_count = this_count << 1;
+ };
+
+ if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
+ {
+ if (this_count > 0xffff)
+ this_count = 0xffff;
+
+ cmd[0] += READ_10 - READ_6 ;
+ cmd[2] = (unsigned char) (block >> 24) & 0xff;
+ cmd[3] = (unsigned char) (block >> 16) & 0xff;
+ cmd[4] = (unsigned char) (block >> 8) & 0xff;
+ cmd[5] = (unsigned char) block & 0xff;
+ cmd[6] = cmd[9] = 0;
+ cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
+ cmd[8] = (unsigned char) this_count & 0xff;
+ }
+ else
+ {
+ if (this_count > 0xff)
+ this_count = 0xff;
+
+ cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
+ cmd[2] = (unsigned char) ((block >> 8) & 0xff);
+ cmd[3] = (unsigned char) block & 0xff;
+ cmd[4] = (unsigned char) this_count;
+ cmd[5] = 0;
+ }
+
+ /*
+ * We shouldn't disconnect in the middle of a sector, so with a dumb
+ * host adapter, it's safe to assume that we can at least transfer
+ * this many bytes between each connect / disconnect.
+ */
+
+ SCpnt->transfersize = rscsi_disks[dev].sector_size;
+ SCpnt->underflow = this_count << 9;
+ scsi_do_cmd (SCpnt, (void *) cmd, buff,
+ this_count * rscsi_disks[dev].sector_size,
+ rw_intr,
+ (SCpnt->device->type == TYPE_DISK ?
+ SD_TIMEOUT : SD_MOD_TIMEOUT),
+ MAX_RETRIES);
+}
+
+static int check_scsidisk_media_change(kdev_t full_dev){
+ int retval;
+ int target;
+ struct inode inode;
+ int flag = 0;
+
+ target = DEVICE_NR(full_dev);
+
+ if (target >= sd_template.dev_max ||
+ !rscsi_disks[target].device) {
+ printk("SCSI disk request error: invalid device.\n");
+ return 0;
+ };
+
+ if(!rscsi_disks[target].device->removable) return 0;
+
+ inode.i_rdev = full_dev; /* This is all we really need here */
+ retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
+
+ if(retval){ /* Unable to test, unit probably not ready. This usually
+ * means there is no disc in the drive. Mark as changed,
+ * and we will figure it out later once the drive is
+ * available again. */
+
+ rscsi_disks[target].ready = 0;
+ rscsi_disks[target].device->changed = 1;
+ return 1; /* This will force a flush, if called from
+ * check_disk_change */
+ };
+
+ /*
+ * for removable scsi disk ( FLOPTICAL ) we have to recognise the
+ * presence of disk in the drive. This is kept in the Scsi_Disk
+ * struct and tested at open ! Daniel Roche ( dan@lectra.fr )
+ */
+
+ rscsi_disks[target].ready = 1; /* FLOPTICAL */
+
+ retval = rscsi_disks[target].device->changed;
+ if(!flag) rscsi_disks[target].device->changed = 0;
+ return retval;
+}
+
+static void sd_init_done (Scsi_Cmnd * SCpnt)
+{
+ struct request * req;
+
+ req = &SCpnt->request;
+ req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
+
+ if (req->sem != NULL) {
+ up(req->sem);
+ }
+}
+
+static int sd_init_onedisk(int i)
+{
+ unsigned char cmd[10];
+ unsigned char *buffer;
+ unsigned long spintime;
+ int the_result, retries;
+ Scsi_Cmnd * SCpnt;
+
+ /* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is
+ * considered a fatal error, and many devices report such an error
+ * just after a scsi bus reset.
+ */
+
+ SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
+ buffer = (unsigned char *) scsi_malloc(512);
+
+ spintime = 0;
+
+ /* Spin up drives, as required. Only do this at boot time */
+ if (!MODULE_FLAG){
+ do{
+ retries = 0;
+ while(retries < 3)
+ {
+ cmd[0] = TEST_UNIT_READY;
+ cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
+ memset ((void *) &cmd[2], 0, 8);
+ SCpnt->cmd_len = 0;
+ SCpnt->sense_buffer[0] = 0;
+ SCpnt->sense_buffer[2] = 0;
+
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ /* Mark as really busy again */
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt,
+ (void *) cmd, (void *) buffer,
+ 512, sd_init_done, SD_TIMEOUT,
+ MAX_RETRIES);
+ down(&sem);
+ }
+
+ the_result = SCpnt->result;
+ retries++;
+ if( the_result == 0
+ || SCpnt->sense_buffer[2] != UNIT_ATTENTION)
+ break;
+ }
+
+ /* Look for non-removable devices that return NOT_READY.
+ * Issue command to spin up drive for these cases. */
+ if(the_result && !rscsi_disks[i].device->removable &&
+ SCpnt->sense_buffer[2] == NOT_READY) {
+ int time1;
+ if(!spintime){
+ printk( "sd%c: Spinning up disk...", 'a' + i );
+ cmd[0] = START_STOP;
+ cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
+ cmd[1] |= 1; /* Return immediately */
+ memset ((void *) &cmd[2], 0, 8);
+ cmd[4] = 1; /* Start spin cycle */
+ SCpnt->cmd_len = 0;
+ SCpnt->sense_buffer[0] = 0;
+ SCpnt->sense_buffer[2] = 0;
+
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ /* Mark as really busy again */
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt,
+ (void *) cmd, (void *) buffer,
+ 512, sd_init_done, SD_TIMEOUT,
+ MAX_RETRIES);
+ down(&sem);
+ }
+
+ spintime = jiffies;
+ }
+
+ time1 = jiffies;
+ while(jiffies < time1 + HZ); /* Wait 1 second for next try */
+ printk( "." );
+ };
+ } while(the_result && spintime && spintime+100*HZ > jiffies);
+ if (spintime) {
+ if (the_result)
+ printk( "not responding...\n" );
+ else
+ printk( "ready\n" );
+ }
+ }; /* !MODULE_FLAG */
+
+
+ retries = 3;
+ do {
+ cmd[0] = READ_CAPACITY;
+ cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
+ memset ((void *) &cmd[2], 0, 8);
+ memset ((void *) buffer, 0, 8);
+ SCpnt->cmd_len = 0;
+ SCpnt->sense_buffer[0] = 0;
+ SCpnt->sense_buffer[2] = 0;
+
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ /* Mark as really busy again */
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt,
+ (void *) cmd, (void *) buffer,
+ 8, sd_init_done, SD_TIMEOUT,
+ MAX_RETRIES);
+ down(&sem); /* sleep until it is ready */
+ }
+
+ the_result = SCpnt->result;
+ retries--;
+
+ } while(the_result && retries);
+
+ SCpnt->request.rq_status = RQ_INACTIVE; /* Mark as not busy */
+
+ wake_up(&SCpnt->device->device_wait);
+
+ /* Wake up a process waiting for device */
+
+ /*
+ * The SCSI standard says:
+ * "READ CAPACITY is necessary for self configuring software"
+ * While not mandatory, support of READ CAPACITY is strongly encouraged.
+ * We used to die if we couldn't successfully do a READ CAPACITY.
+ * But, now we go on about our way. The side effects of this are
+ *
+ * 1. We can't know block size with certainty. I have said "512 bytes
+ * is it" as this is most common.
+ *
+ * 2. Recovery from when some one attempts to read past the end of the
+ * raw device will be slower.
+ */
+
+ if (the_result)
+ {
+ printk ("sd%c : READ CAPACITY failed.\n"
+ "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
+ 'a' + i, 'a' + i,
+ status_byte(the_result),
+ msg_byte(the_result),
+ host_byte(the_result),
+ driver_byte(the_result)
+ );
+ if (driver_byte(the_result) & DRIVER_SENSE)
+ printk("sd%c : extended sense code = %1x \n",
+ 'a' + i, SCpnt->sense_buffer[2] & 0xf);
+ else
+ printk("sd%c : sense not available. \n", 'a' + i);
+
+ printk("sd%c : block size assumed to be 512 bytes, disk size 1GB. \n",
+ 'a' + i);
+ rscsi_disks[i].capacity = 0x1fffff;
+ rscsi_disks[i].sector_size = 512;
+
+ /* Set dirty bit for removable devices if not ready - sometimes drives
+ * will not report this properly. */
+ if(rscsi_disks[i].device->removable &&
+ SCpnt->sense_buffer[2] == NOT_READY)
+ rscsi_disks[i].device->changed = 1;
+
+ }
+ else
+ {
+ /*
+ * FLOPTICAL , if read_capa is ok , drive is assumed to be ready
+ */
+ rscsi_disks[i].ready = 1;
+
+ rscsi_disks[i].capacity = (buffer[0] << 24) |
+ (buffer[1] << 16) |
+ (buffer[2] << 8) |
+ buffer[3];
+
+ rscsi_disks[i].sector_size = (buffer[4] << 24) |
+ (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
+
+ if (rscsi_disks[i].sector_size == 0) {
+ rscsi_disks[i].sector_size = 512;
+ printk("sd%c : sector size 0 reported, assuming 512.\n", 'a' + i);
+ }
+
+
+ if (rscsi_disks[i].sector_size != 512 &&
+ rscsi_disks[i].sector_size != 1024 &&
+ rscsi_disks[i].sector_size != 256)
+ {
+ printk ("sd%c : unsupported sector size %d.\n",
+ 'a' + i, rscsi_disks[i].sector_size);
+ if(rscsi_disks[i].device->removable){
+ rscsi_disks[i].capacity = 0;
+ } else {
+ printk ("scsi : deleting disk entry.\n");
+ rscsi_disks[i].device = NULL;
+ sd_template.nr_dev--;
+ return i;
+ };
+ }
+ {
+ /*
+ * The msdos fs need to know the hardware sector size
+ * So I have created this table. See ll_rw_blk.c
+ * Jacques Gelinas (Jacques@solucorp.qc.ca)
+ */
+ int m;
+ int hard_sector = rscsi_disks[i].sector_size;
+ /* There is 16 minor allocated for each devices */
+ for (m=i<<4; m<((i+1)<<4); m++){
+ sd_hardsizes[m] = hard_sector;
+ }
+ printk ("SCSI Hardware sector size is %d bytes on device sd%c\n",
+ hard_sector,i+'a');
+ }
+ if(rscsi_disks[i].sector_size == 1024)
+ rscsi_disks[i].capacity <<= 1; /* Change into 512 byte sectors */
+ if(rscsi_disks[i].sector_size == 256)
+ rscsi_disks[i].capacity >>= 1; /* Change into 512 byte sectors */
+ }
+
+
+ /*
+ * Unless otherwise specified, this is not write protected.
+ */
+ rscsi_disks[i].write_prot = 0;
+ if ( rscsi_disks[i].device->removable && rscsi_disks[i].ready ) {
+ /* FLOPTICAL */
+
+ /*
+ * for removable scsi disk ( FLOPTICAL ) we have to recognise
+ * the Write Protect Flag. This flag is kept in the Scsi_Disk struct
+ * and tested at open !
+ * Daniel Roche ( dan@lectra.fr )
+ */
+
+ memset ((void *) &cmd[0], 0, 8);
+ cmd[0] = MODE_SENSE;
+ cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
+ cmd[2] = 1; /* page code 1 ?? */
+ cmd[4] = 12;
+ SCpnt->cmd_len = 0;
+ SCpnt->sense_buffer[0] = 0;
+ SCpnt->sense_buffer[2] = 0;
+
+ /* same code as READCAPA !! */
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.rq_status = RQ_SCSI_BUSY; /* Mark as really busy again */
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt,
+ (void *) cmd, (void *) buffer,
+ 512, sd_init_done, SD_TIMEOUT,
+ MAX_RETRIES);
+ down(&sem);
+ }
+
+ the_result = SCpnt->result;
+ SCpnt->request.rq_status = RQ_INACTIVE; /* Mark as not busy */
+ wake_up(&SCpnt->device->device_wait);
+
+ if ( the_result ) {
+ printk ("sd%c: test WP failed, assume Write Protected\n",i+'a');
+ rscsi_disks[i].write_prot = 1;
+ } else {
+ rscsi_disks[i].write_prot = ((buffer[2] & 0x80) != 0);
+ printk ("sd%c: Write Protect is %s\n",i+'a',
+ rscsi_disks[i].write_prot ? "on" : "off");
+ }
+
+ } /* check for write protect */
+
+ rscsi_disks[i].ten = 1;
+ rscsi_disks[i].remap = 1;
+ scsi_free(buffer, 512);
+ return i;
+}
+
+/*
+ * The sd_init() function looks at all SCSI drives present, determines
+ * their size, and reads partition table entries for them.
+ */
+
+static int sd_registered = 0;
+
+static int sd_init()
+{
+ int i;
+
+ if (sd_template.dev_noticed == 0) return 0;
+
+ if(!sd_registered) {
+ if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
+ printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
+ return 1;
+ }
+ sd_registered++;
+ }
+
+ /* We do not support attaching loadable devices yet. */
+ if(rscsi_disks) return 0;
+
+ sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
+
+ rscsi_disks = (Scsi_Disk *)
+ scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
+ memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
+
+ sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
+ sizeof(int), GFP_ATOMIC);
+ memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
+
+ sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
+ sizeof(int), GFP_ATOMIC);
+
+ sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
+ sizeof(int), GFP_ATOMIC);
+
+ for(i=0;i<(sd_template.dev_max << 4);i++){
+ sd_blocksizes[i] = 1024;
+ sd_hardsizes[i] = 512;
+ }
+ blksize_size[MAJOR_NR] = sd_blocksizes;
+ hardsect_size[MAJOR_NR] = sd_hardsizes;
+ sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
+ sizeof(struct hd_struct),
+ GFP_ATOMIC);
+
+
+ sd_gendisk.max_nr = sd_template.dev_max;
+ sd_gendisk.part = sd;
+ sd_gendisk.sizes = sd_sizes;
+ sd_gendisk.real_devices = (void *) rscsi_disks;
+ return 0;
+}
+
+static void sd_finish()
+{
+ int i;
+
+ blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+
+ sd_gendisk.next = gendisk_head;
+ gendisk_head = &sd_gendisk;
+
+ for (i = 0; i < sd_template.dev_max; ++i)
+ if (!rscsi_disks[i].capacity &&
+ rscsi_disks[i].device)
+ {
+ if (MODULE_FLAG
+ && !rscsi_disks[i].has_part_table) {
+ sd_sizes[i << 4] = rscsi_disks[i].capacity;
+ /* revalidate does sd_init_onedisk via MAYBE_REINIT*/
+ revalidate_scsidisk(MKDEV(MAJOR_NR, i << 4), 0);
+ }
+ else
+ i=sd_init_onedisk(i);
+ rscsi_disks[i].has_part_table = 1;
+ }
+
+ /* If our host adapter is capable of scatter-gather, then we increase
+ * the read-ahead to 16 blocks (32 sectors). If not, we use
+ * a two block (4 sector) read ahead.
+ */
+ if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize)
+ read_ahead[MAJOR_NR] = 120; /* 120 sector read-ahead */
+ else
+ read_ahead[MAJOR_NR] = 4; /* 4 sector read-ahead */
+
+ return;
+}
+
+static int sd_detect(Scsi_Device * SDp){
+ if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
+
+ printk("Detected scsi disk sd%c at scsi%d, channel %d, id %d, lun %d\n",
+ 'a'+ (sd_template.dev_noticed++),
+ SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
+
+ return 1;
+}
+
+static int sd_attach(Scsi_Device * SDp){
+ Scsi_Disk * dpnt;
+ int i;
+
+ if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
+
+ if(sd_template.nr_dev >= sd_template.dev_max) {
+ SDp->attached--;
+ return 1;
+ }
+
+ for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
+ if(!dpnt->device) break;
+
+ if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
+
+ SDp->scsi_request_fn = do_sd_request;
+ rscsi_disks[i].device = SDp;
+ rscsi_disks[i].has_part_table = 0;
+ sd_template.nr_dev++;
+ sd_gendisk.nr_real++;
+ return 0;
+}
+
+#define DEVICE_BUSY rscsi_disks[target].device->busy
+#define USAGE rscsi_disks[target].device->access_count
+#define CAPACITY rscsi_disks[target].capacity
+#define MAYBE_REINIT sd_init_onedisk(target)
+#define GENDISK_STRUCT sd_gendisk
+
+/* This routine is called to flush all partitions and partition tables
+ * for a changed scsi disk, and then re-read the new partition table.
+ * If we are revalidating a disk because of a media change, then we
+ * enter with usage == 0. If we are using an ioctl, we automatically have
+ * usage == 1 (we need an open channel to use an ioctl :-), so this
+ * is our limit.
+ */
+int revalidate_scsidisk(kdev_t dev, int maxusage){
+ int target;
+ struct gendisk * gdev;
+ unsigned long flags;
+ int max_p;
+ int start;
+ int i;
+
+ target = DEVICE_NR(dev);
+ gdev = &GENDISK_STRUCT;
+
+ save_flags(flags);
+ cli();
+ if (DEVICE_BUSY || USAGE > maxusage) {
+ restore_flags(flags);
+ printk("Device busy for revalidation (usage=%d)\n", USAGE);
+ return -EBUSY;
+ };
+ DEVICE_BUSY = 1;
+ restore_flags(flags);
+
+ max_p = gdev->max_p;
+ start = target << gdev->minor_shift;
+
+ for (i=max_p - 1; i >=0 ; i--) {
+ int minor = start+i;
+ kdev_t devi = MKDEV(MAJOR_NR, minor);
+ sync_dev(devi);
+ invalidate_inodes(devi);
+ invalidate_buffers(devi);
+ gdev->part[minor].start_sect = 0;
+ gdev->part[minor].nr_sects = 0;
+ /*
+ * Reset the blocksize for everything so that we can read
+ * the partition table.
+ */
+ blksize_size[MAJOR_NR][minor] = 1024;
+ };
+
+#ifdef MAYBE_REINIT
+ MAYBE_REINIT;
+#endif
+
+ gdev->part[start].nr_sects = CAPACITY;
+ resetup_one_dev(gdev, target);
+
+ DEVICE_BUSY = 0;
+ return 0;
+}
+
+static int fop_revalidate_scsidisk(kdev_t dev){
+ return revalidate_scsidisk(dev, 0);
+}
+
+
+static void sd_detach(Scsi_Device * SDp)
+{
+ Scsi_Disk * dpnt;
+ int i;
+ int max_p;
+ int start;
+
+ for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
+ if(dpnt->device == SDp) {
+
+ /* If we are disconnecting a disk driver, sync and invalidate
+ * everything */
+ max_p = sd_gendisk.max_p;
+ start = i << sd_gendisk.minor_shift;
+
+ for (i=max_p - 1; i >=0 ; i--) {
+ int minor = start+i;
+ kdev_t devi = MKDEV(MAJOR_NR, minor);
+ sync_dev(devi);
+ invalidate_inodes(devi);
+ invalidate_buffers(devi);
+ sd_gendisk.part[minor].start_sect = 0;
+ sd_gendisk.part[minor].nr_sects = 0;
+ sd_sizes[minor] = 0;
+ };
+
+ dpnt->has_part_table = 0;
+ dpnt->device = NULL;
+ dpnt->capacity = 0;
+ SDp->attached--;
+ sd_template.dev_noticed--;
+ sd_template.nr_dev--;
+ sd_gendisk.nr_real--;
+ return;
+ }
+ return;
+}
+
+#ifdef MODULE
+
+int init_module(void) {
+ sd_template.usage_count = &mod_use_count_;
+ return scsi_register_module(MODULE_SCSI_DEV, &sd_template);
+}
+
+void cleanup_module( void)
+{
+ struct gendisk * prev_sdgd;
+ struct gendisk * sdgd;
+
+ scsi_unregister_module(MODULE_SCSI_DEV, &sd_template);
+ unregister_blkdev(SCSI_DISK_MAJOR, "sd");
+ sd_registered--;
+ if( rscsi_disks != NULL )
+ {
+ scsi_init_free((char *) rscsi_disks,
+ (sd_template.dev_noticed + SD_EXTRA_DEVS)
+ * sizeof(Scsi_Disk));
+
+ scsi_init_free((char *) sd_sizes, sd_template.dev_max * sizeof(int));
+ scsi_init_free((char *) sd_blocksizes, sd_template.dev_max * sizeof(int));
+ scsi_init_free((char *) sd_hardsizes, sd_template.dev_max * sizeof(int));
+ scsi_init_free((char *) sd,
+ (sd_template.dev_max << 4) * sizeof(struct hd_struct));
+ /*
+ * Now remove sd_gendisk from the linked list
+ */
+ sdgd = gendisk_head;
+ prev_sdgd = NULL;
+ while(sdgd != &sd_gendisk)
+ {
+ prev_sdgd = sdgd;
+ sdgd = sdgd->next;
+ }
+
+ if(sdgd != &sd_gendisk)
+ printk("sd_gendisk not in disk chain.\n");
+ else {
+ if(prev_sdgd != NULL)
+ prev_sdgd->next = sdgd->next;
+ else
+ gendisk_head = sdgd->next;
+ }
+ }
+
+ blksize_size[MAJOR_NR] = NULL;
+ blk_dev[MAJOR_NR].request_fn = NULL;
+ blk_size[MAJOR_NR] = NULL;
+ hardsect_size[MAJOR_NR] = NULL;
+ read_ahead[MAJOR_NR] = 0;
+ sd_template.dev_max = 0;
+}
+#endif /* MODULE */
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/sd.h b/i386/i386at/gpl/linux/scsi/sd.h
new file mode 100644
index 00000000..7a8219b7
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/sd.h
@@ -0,0 +1,65 @@
+/*
+ * sd.h Copyright (C) 1992 Drew Eckhardt
+ * SCSI disk driver header file by
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale eric@aib.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ */
+#ifndef _SD_H
+#define _SD_H
+/*
+ $Header: cvs/gnumach/i386/i386at/gpl/linux/scsi/Attic/sd.h,v 1.1.1.1 1997/02/25 21:27:52 thomas Exp $
+*/
+
+#ifndef _SCSI_H
+#include "scsi.h"
+#endif
+
+#ifndef _GENDISK_H
+#include <linux/genhd.h>
+#endif
+
+extern struct hd_struct * sd;
+
+typedef struct scsi_disk {
+ unsigned capacity; /* size in blocks */
+ unsigned sector_size; /* size in bytes */
+ Scsi_Device *device;
+ unsigned char ready; /* flag ready for FLOPTICAL */
+ unsigned char write_prot; /* flag write_protect for rmvable dev */
+ unsigned char sector_bit_size; /* sector_size = 2 to the bit size power */
+ unsigned char sector_bit_shift; /* power of 2 sectors per FS block */
+ unsigned ten:1; /* support ten byte read / write */
+ unsigned remap:1; /* support remapping */
+ unsigned has_part_table:1; /* has partition table */
+} Scsi_Disk;
+
+extern Scsi_Disk * rscsi_disks;
+
+extern int revalidate_scsidisk(kdev_t dev, int maxusage);
+
+#endif
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
+
diff --git a/i386/i386at/gpl/linux/scsi/sd_ioctl.c b/i386/i386at/gpl/linux/scsi/sd_ioctl.c
new file mode 100644
index 00000000..1898af61
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/sd_ioctl.c
@@ -0,0 +1,94 @@
+/*
+ * drivers/scsi/sd_ioctl.c
+ *
+ * ioctl handling for SCSI disks
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/hdreg.h>
+#include <linux/errno.h>
+
+#include <asm/segment.h>
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "scsi_ioctl.h"
+#include "hosts.h"
+#include "sd.h"
+
+int sd_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg)
+{
+ kdev_t dev = inode->i_rdev;
+ int error;
+ struct Scsi_Host * host;
+ int diskinfo[4];
+ struct hd_geometry *loc = (struct hd_geometry *) arg;
+
+ switch (cmd) {
+ case HDIO_GETGEO: /* Return BIOS disk parameters */
+ if (!loc) return -EINVAL;
+ error = verify_area(VERIFY_WRITE, loc, sizeof(*loc));
+ if (error)
+ return error;
+ host = rscsi_disks[MINOR(dev) >> 4].device->host;
+ diskinfo[0] = 0;
+ diskinfo[1] = 0;
+ diskinfo[2] = 0;
+ if(host->hostt->bios_param != NULL)
+ host->hostt->bios_param(&rscsi_disks[MINOR(dev) >> 4],
+ dev,
+ &diskinfo[0]);
+ put_user(diskinfo[0], &loc->heads);
+ put_user(diskinfo[1], &loc->sectors);
+ put_user(diskinfo[2], &loc->cylinders);
+ put_user(sd[MINOR(inode->i_rdev)].start_sect, &loc->start);
+ return 0;
+ case BLKGETSIZE: /* Return device size */
+ if (!arg) return -EINVAL;
+ error = verify_area(VERIFY_WRITE, (long *) arg, sizeof(long));
+ if (error)
+ return error;
+ put_user(sd[MINOR(inode->i_rdev)].nr_sects,
+ (long *) arg);
+ return 0;
+ case BLKRASET:
+ if(!suser()) return -EACCES;
+ if(!(inode->i_rdev)) return -EINVAL;
+ if(arg > 0xff) return -EINVAL;
+ read_ahead[MAJOR(inode->i_rdev)] = arg;
+ return 0;
+ case BLKFLSBUF:
+ if(!suser()) return -EACCES;
+ if(!(inode->i_rdev)) return -EINVAL;
+ fsync_dev(inode->i_rdev);
+ invalidate_buffers(inode->i_rdev);
+ return 0;
+
+ case BLKRRPART: /* Re-read partition tables */
+ return revalidate_scsidisk(dev, 1);
+ default:
+ return scsi_ioctl(rscsi_disks[MINOR(dev) >> 4].device , cmd, (void *) arg);
+ }
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/seagate.c b/i386/i386at/gpl/linux/scsi/seagate.c
new file mode 100644
index 00000000..5b25a833
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/seagate.c
@@ -0,0 +1,1744 @@
+/*
+ * seagate.c Copyright (C) 1992, 1993 Drew Eckhardt
+ * low level scsi driver for ST01/ST02, Future Domain TMC-885,
+ * TMC-950 by
+ *
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ *
+ * Note : TMC-880 boards don't work because they have two bits in
+ * the status register flipped, I'll fix this "RSN"
+ *
+ * This card does all the I/O via memory mapped I/O, so there is no need
+ * to check or allocate a region of the I/O address space.
+ */
+
+/*
+ * Configuration :
+ * To use without BIOS -DOVERRIDE=base_address -DCONTROLLER=FD or SEAGATE
+ * -DIRQ will override the default of 5.
+ * Note: You can now set these options from the kernel's "command line".
+ * The syntax is:
+ *
+ * st0x=ADDRESS,IRQ (for a Seagate controller)
+ * or:
+ * tmc8xx=ADDRESS,IRQ (for a TMC-8xx or TMC-950 controller)
+ * eg:
+ * tmc8xx=0xC8000,15
+ *
+ * will configure the driver for a TMC-8xx style controller using IRQ 15
+ * with a base address of 0xC8000.
+ *
+ * -DFAST or -DFAST32 will use blind transfers where possible
+ *
+ * -DARBITRATE will cause the host adapter to arbitrate for the
+ * bus for better SCSI-II compatibility, rather than just
+ * waiting for BUS FREE and then doing its thing. Should
+ * let us do one command per Lun when I integrate my
+ * reorganization changes into the distribution sources.
+ *
+ * -DSLOW_HANDSHAKE will allow compatibility with broken devices that don't
+ * handshake fast enough (ie, some CD ROM's) for the Seagate
+ * code.
+ *
+ * -DSLOW_RATE=x, x some number will let you specify a default
+ * transfer rate if handshaking isn't working correctly.
+ */
+
+#include <linux/module.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/config.h>
+#include <linux/proc_fs.h>
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "seagate.h"
+#include "constants.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_seagate = {
+ PROC_SCSI_SEAGATE, 7, "seagate",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+
+#ifndef IRQ
+#define IRQ 5
+#endif
+
+#if (defined(FAST32) && !defined(FAST))
+#define FAST
+#endif
+
+#if defined(SLOW_RATE) && !defined(SLOW_HANDSHAKE)
+#define SLOW_HANDSHAKE
+#endif
+
+#if defined(SLOW_HANDSHAKE) && !defined(SLOW_RATE)
+#define SLOW_RATE 50
+#endif
+
+
+#if defined(LINKED)
+#undef LINKED /* Linked commands are currently broken ! */
+#endif
+
+static int internal_command(unsigned char target, unsigned char lun,
+ const void *cmnd,
+ void *buff, int bufflen, int reselect);
+
+static int incommand; /*
+ set if arbitration has finished and we are
+ in some command phase.
+ */
+
+static const void *base_address = NULL; /*
+ Where the card ROM starts,
+ used to calculate memory mapped
+ register location.
+ */
+#ifdef notyet
+static volatile int abort_confirm = 0;
+#endif
+
+static volatile void *st0x_cr_sr; /*
+ control register write,
+ status register read.
+ 256 bytes in length.
+
+ Read is status of SCSI BUS,
+ as per STAT masks.
+
+ */
+
+
+static volatile void *st0x_dr; /*
+ data register, read write
+ 256 bytes in length.
+ */
+
+
+static volatile int st0x_aborted=0; /*
+ set when we are aborted, ie by a time out, etc.
+ */
+
+static unsigned char controller_type = 0; /* set to SEAGATE for ST0x boards or FD for TMC-8xx boards */
+static unsigned char irq = IRQ;
+
+#define retcode(result) (((result) << 16) | (message << 8) | status)
+#define STATUS (*(volatile unsigned char *) st0x_cr_sr)
+#define CONTROL STATUS
+#define DATA (*(volatile unsigned char *) st0x_dr)
+
+void st0x_setup (char *str, int *ints) {
+ controller_type = SEAGATE;
+ base_address = (void *) ints[1];
+ irq = ints[2];
+}
+
+void tmc8xx_setup (char *str, int *ints) {
+ controller_type = FD;
+ base_address = (void *) ints[1];
+ irq = ints[2];
+}
+
+
+#ifndef OVERRIDE
+static const char * seagate_bases[] = {
+ (char *) 0xc8000, (char *) 0xca000, (char *) 0xcc000,
+ (char *) 0xce000, (char *) 0xdc000, (char *) 0xde000
+};
+
+typedef struct {
+ const char *signature ;
+ unsigned offset;
+ unsigned length;
+ unsigned char type;
+} Signature;
+
+static const Signature signatures[] = {
+#ifdef CONFIG_SCSI_SEAGATE
+{"ST01 v1.7 (C) Copyright 1987 Seagate", 15, 37, SEAGATE},
+{"SCSI BIOS 2.00 (C) Copyright 1987 Seagate", 15, 40, SEAGATE},
+
+/*
+ * The following two lines are NOT mistakes. One detects ROM revision
+ * 3.0.0, the other 3.2. Since seagate has only one type of SCSI adapter,
+ * and this is not going to change, the "SEAGATE" and "SCSI" together
+ * are probably "good enough"
+ */
+
+{"SEAGATE SCSI BIOS ",16, 17, SEAGATE},
+{"SEAGATE SCSI BIOS ",17, 17, SEAGATE},
+
+/*
+ * However, future domain makes several incompatible SCSI boards, so specific
+ * signatures must be used.
+ */
+
+{"FUTURE DOMAIN CORP. (C) 1986-1989 V5.0C2/14/89", 5, 46, FD},
+{"FUTURE DOMAIN CORP. (C) 1986-1989 V6.0A7/28/89", 5, 46, FD},
+{"FUTURE DOMAIN CORP. (C) 1986-1990 V6.0105/31/90",5, 47, FD},
+{"FUTURE DOMAIN CORP. (C) 1986-1990 V6.0209/18/90",5, 47, FD},
+{"FUTURE DOMAIN CORP. (C) 1986-1990 V7.009/18/90", 5, 46, FD},
+{"FUTURE DOMAIN CORP. (C) 1992 V8.00.004/02/92", 5, 44, FD},
+{"IBM F1 BIOS V1.1004/30/92", 5, 25, FD},
+{"FUTURE DOMAIN TMC-950", 5, 21, FD},
+#endif /* CONFIG_SCSI_SEAGATE */
+}
+;
+
+#define NUM_SIGNATURES (sizeof(signatures) / sizeof(Signature))
+#endif /* n OVERRIDE */
+
+/*
+ * hostno stores the hostnumber, as told to us by the init routine.
+ */
+
+static int hostno = -1;
+static void seagate_reconnect_intr(int, struct pt_regs *);
+
+#ifdef FAST
+static int fast = 1;
+#endif
+
+#ifdef SLOW_HANDSHAKE
+/*
+ * Support for broken devices :
+ * The Seagate board has a handshaking problem. Namely, a lack
+ * thereof for slow devices. You can blast 600K/second through
+ * it if you are polling for each byte, more if you do a blind
+ * transfer. In the first case, with a fast device, REQ will
+ * transition high-low or high-low-high before your loop restarts
+ * and you'll have no problems. In the second case, the board
+ * will insert wait states for up to 13.2 usecs for REQ to
+ * transition low->high, and everything will work.
+ *
+ * However, there's nothing in the state machine that says
+ * you *HAVE* to see a high-low-high set of transitions before
+ * sending the next byte, and slow things like the Trantor CD ROMS
+ * will break because of this.
+ *
+ * So, we need to slow things down, which isn't as simple as it
+ * seems. We can't slow things down period, because then people
+ * who don't recompile their kernels will shoot me for ruining
+ * their performance. We need to do it on a case per case basis.
+ *
+ * The best for performance will be to, only for borken devices
+ * (this is stored on a per-target basis in the scsi_devices array)
+ *
+ * Wait for a low->high transition before continuing with that
+ * transfer. If we timeout, continue anyways. We don't need
+ * a long timeout, because REQ should only be asserted until the
+ * corresponding ACK is received and processed.
+ *
+ * Note that we can't use the system timer for this, because of
+ * resolution, and we *really* can't use the timer chip since
+ * gettimeofday() and the beeper routines use that. So,
+ * the best thing for us to do will be to calibrate a timing
+ * loop in the initialization code using the timer chip before
+ * gettimeofday() can screw with it.
+ */
+
+static int borken_calibration = 0;
+static void borken_init (void) {
+ register int count = 0, start = jiffies + 1, stop = start + 25;
+
+ while (jiffies < start);
+ for (;jiffies < stop; ++count);
+
+/*
+ * Ok, we now have a count for .25 seconds. Convert to a
+ * count per second and divide by transfer rate in K.
+ */
+
+ borken_calibration = (count * 4) / (SLOW_RATE*1024);
+
+ if (borken_calibration < 1)
+ borken_calibration = 1;
+#if (DEBUG & DEBUG_BORKEN)
+ printk("scsi%d : borken calibrated to %dK/sec, %d cycles per transfer\n",
+ hostno, BORKEN_RATE, borken_calibration);
+#endif
+}
+
+static inline void borken_wait(void) {
+ register int count;
+ for (count = borken_calibration; count && (STATUS & STAT_REQ);
+ --count);
+#if (DEBUG & DEBUG_BORKEN)
+ if (count)
+ printk("scsi%d : borken timeout\n", hostno);
+#endif
+}
+
+#endif /* def SLOW_HANDSHAKE */
+
+int seagate_st0x_detect (Scsi_Host_Template * tpnt)
+ {
+ struct Scsi_Host *instance;
+#ifndef OVERRIDE
+ int i,j;
+#endif
+
+ tpnt->proc_dir = &proc_scsi_seagate;
+/*
+ * First, we try for the manual override.
+ */
+#ifdef DEBUG
+ printk("Autodetecting ST0x / TMC-8xx\n");
+#endif
+
+ if (hostno != -1)
+ {
+ printk ("ERROR : seagate_st0x_detect() called twice.\n");
+ return 0;
+ }
+
+ /* If the user specified the controller type from the command line,
+ controller_type will be non-zero, so don't try and detect one */
+
+ if (!controller_type) {
+#ifdef OVERRIDE
+ base_address = (void *) OVERRIDE;
+
+/* CONTROLLER is used to override controller (SEAGATE or FD). PM: 07/01/93 */
+#ifdef CONTROLLER
+ controller_type = CONTROLLER;
+#else
+#error Please use -DCONTROLLER=SEAGATE or -DCONTROLLER=FD to override controller type
+#endif /* CONTROLLER */
+#ifdef DEBUG
+ printk("Base address overridden to %x, controller type is %s\n",
+ base_address,controller_type == SEAGATE ? "SEAGATE" : "FD");
+#endif
+#else /* OVERRIDE */
+/*
+ * To detect this card, we simply look for the signature
+ * from the BIOS version notice in all the possible locations
+ * of the ROM's. This has a nice side effect of not trashing
+ * any register locations that might be used by something else.
+ *
+ * XXX - note that we probably should be probing the address
+ * space for the on-board RAM instead.
+ */
+
+ for (i = 0; i < (sizeof (seagate_bases) / sizeof (char * )); ++i)
+ for (j = 0; !base_address && j < NUM_SIGNATURES; ++j)
+ if (!memcmp ((const void *) (seagate_bases[i] +
+ signatures[j].offset), (const void *) signatures[j].signature,
+ signatures[j].length)) {
+ base_address = (const void *) seagate_bases[i];
+ controller_type = signatures[j].type;
+ }
+#endif /* OVERRIDE */
+ } /* (! controller_type) */
+
+ tpnt->this_id = (controller_type == SEAGATE) ? 7 : 6;
+ tpnt->name = (controller_type == SEAGATE) ? ST0X_ID_STR : FD_ID_STR;
+
+ if (base_address)
+ {
+ st0x_cr_sr =(void *) (((const unsigned char *) base_address) + (controller_type == SEAGATE ? 0x1a00 : 0x1c00));
+ st0x_dr = (void *) (((const unsigned char *) base_address ) + (controller_type == SEAGATE ? 0x1c00 : 0x1e00));
+#ifdef DEBUG
+ printk("%s detected. Base address = %x, cr = %x, dr = %x\n", tpnt->name, base_address, st0x_cr_sr, st0x_dr);
+#endif
+/*
+ * At all times, we will use IRQ 5. Should also check for IRQ3 if we
+ * loose our first interrupt.
+ */
+ instance = scsi_register(tpnt, 0);
+ hostno = instance->host_no;
+ if (request_irq((int) irq, seagate_reconnect_intr, SA_INTERRUPT,
+ (controller_type == SEAGATE) ? "seagate" : "tmc-8xx")) {
+ printk("scsi%d : unable to allocate IRQ%d\n",
+ hostno, (int) irq);
+ return 0;
+ }
+ instance->irq = irq;
+ instance->io_port = (unsigned int) base_address;
+#ifdef SLOW_HANDSHAKE
+ borken_init();
+#endif
+
+ printk("%s options:"
+#ifdef ARBITRATE
+ " ARBITRATE"
+#endif
+#ifdef SLOW_HANDSHAKE
+ " SLOW_HANDSHAKE"
+#endif
+#ifdef FAST
+#ifdef FAST32
+ " FAST32"
+#else
+ " FAST"
+#endif
+#endif
+#ifdef LINKED
+ " LINKED"
+#endif
+ "\n", tpnt->name);
+ return 1;
+ }
+ else
+ {
+#ifdef DEBUG
+ printk("ST0x / TMC-8xx not detected.\n");
+#endif
+ return 0;
+ }
+ }
+
+const char *seagate_st0x_info(struct Scsi_Host * shpnt) {
+ static char buffer[64];
+ sprintf(buffer, "%s at irq %d, address 0x%05X",
+ (controller_type == SEAGATE) ? ST0X_ID_STR : FD_ID_STR,
+ irq, (unsigned int)base_address);
+ return buffer;
+}
+
+int seagate_st0x_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout)
+{
+ const char *info = seagate_st0x_info(NULL);
+ int len;
+ int pos;
+ int begin;
+
+ if (inout) return(-ENOSYS);
+
+ begin = 0;
+ strcpy(buffer,info);
+ strcat(buffer,"\n");
+
+ pos = len = strlen(buffer);
+
+ if (pos<offset) {
+ len = 0;
+ begin = pos;
+ }
+
+ *start = buffer + (offset - begin);
+ len -= (offset - begin);
+ if ( len > length ) len = length;
+ return(len);
+}
+
+/*
+ * These are our saved pointers for the outstanding command that is
+ * waiting for a reconnect
+ */
+
+static unsigned char current_target, current_lun;
+static unsigned char *current_cmnd, *current_data;
+static int current_nobuffs;
+static struct scatterlist *current_buffer;
+static int current_bufflen;
+
+#ifdef LINKED
+
+/*
+ * linked_connected indicates whether or not we are currently connected to
+ * linked_target, linked_lun and in an INFORMATION TRANSFER phase,
+ * using linked commands.
+ */
+
+static int linked_connected = 0;
+static unsigned char linked_target, linked_lun;
+#endif
+
+
+static void (*done_fn)(Scsi_Cmnd *) = NULL;
+static Scsi_Cmnd * SCint = NULL;
+
+/*
+ * These control whether or not disconnect / reconnect will be attempted,
+ * or are being attempted.
+ */
+
+#define NO_RECONNECT 0
+#define RECONNECT_NOW 1
+#define CAN_RECONNECT 2
+
+#ifdef LINKED
+
+/*
+ * LINKED_RIGHT indicates that we are currently connected to the correct target
+ * for this command, LINKED_WRONG indicates that we are connected to the wrong
+ * target. Note that these imply CAN_RECONNECT.
+ */
+
+#define LINKED_RIGHT 3
+#define LINKED_WRONG 4
+#endif
+
+/*
+ * This determines if we are expecting to reconnect or not.
+ */
+
+static int should_reconnect = 0;
+
+/*
+ * The seagate_reconnect_intr routine is called when a target reselects the
+ * host adapter. This occurs on the interrupt triggered by the target
+ * asserting SEL.
+ */
+
+static void seagate_reconnect_intr(int irq, struct pt_regs * regs)
+ {
+ int temp;
+ Scsi_Cmnd * SCtmp;
+
+/* enable all other interrupts. */
+ sti();
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : seagate_reconnect_intr() called\n", hostno);
+#endif
+
+ if (!should_reconnect)
+ printk("scsi%d: unexpected interrupt.\n", hostno);
+ else {
+ should_reconnect = 0;
+
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : internal_command("
+ "%d, %08x, %08x, %d, RECONNECT_NOW\n", hostno,
+ current_target, current_data, current_bufflen);
+#endif
+
+ temp = internal_command (current_target, current_lun,
+ current_cmnd, current_data, current_bufflen,
+ RECONNECT_NOW);
+
+ if (msg_byte(temp) != DISCONNECT) {
+ if (done_fn) {
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : done_fn(%d,%08x)", hostno,
+ hostno, temp);
+#endif
+ if(!SCint) panic("SCint == NULL in seagate");
+ SCtmp = SCint;
+ SCint = NULL;
+ SCtmp->result = temp;
+ done_fn (SCtmp);
+ } else
+ printk("done_fn() not defined.\n");
+ }
+ }
+ }
+
+/*
+ * The seagate_st0x_queue_command() function provides a queued interface
+ * to the seagate SCSI driver. Basically, it just passes control onto the
+ * seagate_command() function, after fixing it so that the done_fn()
+ * is set to the one passed to the function. We have to be very careful,
+ * because there are some commands on some devices that do not disconnect,
+ * and if we simply call the done_fn when the command is done then another
+ * command is started and queue_command is called again... We end up
+ * overflowing the kernel stack, and this tends not to be such a good idea.
+ */
+
+static int recursion_depth = 0;
+
+int seagate_st0x_queue_command (Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+ {
+ int result, reconnect;
+ Scsi_Cmnd * SCtmp;
+
+ done_fn = done;
+ current_target = SCpnt->target;
+ current_lun = SCpnt->lun;
+ (const void *) current_cmnd = SCpnt->cmnd;
+ current_data = (unsigned char *) SCpnt->request_buffer;
+ current_bufflen = SCpnt->request_bufflen;
+ SCint = SCpnt;
+ if(recursion_depth) {
+ return 0;
+ };
+ recursion_depth++;
+ do{
+#ifdef LINKED
+/*
+ * Set linked command bit in control field of SCSI command.
+ */
+
+ current_cmnd[SCpnt->cmd_len] |= 0x01;
+ if (linked_connected) {
+#if (DEBUG & DEBUG_LINKED)
+ printk("scsi%d : using linked commands, current I_T_L nexus is ",
+ hostno);
+#endif
+ if ((linked_target == current_target) &&
+ (linked_lun == current_lun)) {
+#if (DEBUG & DEBUG_LINKED)
+ printk("correct\n");
+#endif
+ reconnect = LINKED_RIGHT;
+ } else {
+#if (DEBUG & DEBUG_LINKED)
+ printk("incorrect\n");
+#endif
+ reconnect = LINKED_WRONG;
+ }
+ } else
+#endif /* LINKED */
+ reconnect = CAN_RECONNECT;
+
+
+
+
+
+ result = internal_command (SCint->target, SCint->lun, SCint->cmnd, SCint->request_buffer,
+ SCint->request_bufflen,
+ reconnect);
+ if (msg_byte(result) == DISCONNECT) break;
+ SCtmp = SCint;
+ SCint = NULL;
+ SCtmp->result = result;
+ done_fn (SCtmp);
+ } while(SCint);
+ recursion_depth--;
+ return 0;
+ }
+
+int seagate_st0x_command (Scsi_Cmnd * SCpnt) {
+ return internal_command (SCpnt->target, SCpnt->lun, SCpnt->cmnd, SCpnt->request_buffer,
+ SCpnt->request_bufflen,
+ (int) NO_RECONNECT);
+}
+
+static int internal_command(unsigned char target, unsigned char lun, const void *cmnd,
+ void *buff, int bufflen, int reselect) {
+ int len = 0;
+ unsigned char *data = NULL;
+ struct scatterlist *buffer = NULL;
+ int nobuffs = 0;
+ int clock;
+ int temp;
+#ifdef SLOW_HANDSHAKE
+ int borken; /* Does the current target require Very Slow I/O ? */
+#endif
+
+
+#if (DEBUG & PHASE_DATAIN) || (DEBUG & PHASE_DATOUT)
+ int transfered = 0;
+#endif
+
+#if (((DEBUG & PHASE_ETC) == PHASE_ETC) || (DEBUG & PRINT_COMMAND) || \
+ (DEBUG & PHASE_EXIT))
+ int i;
+#endif
+
+#if ((DEBUG & PHASE_ETC) == PHASE_ETC)
+ int phase=0, newphase;
+#endif
+
+ int done = 0;
+ unsigned char status = 0;
+ unsigned char message = 0;
+ register unsigned char status_read;
+
+ unsigned transfersize = 0, underflow = 0;
+
+ incommand = 0;
+ st0x_aborted = 0;
+
+#ifdef SLOW_HANDSHAKE
+ borken = (int) SCint->device->borken;
+#endif
+
+#if (DEBUG & PRINT_COMMAND)
+ printk ("scsi%d : target = %d, command = ", hostno, target);
+ print_command((unsigned char *) cmnd);
+ printk("\n");
+#endif
+
+#if (DEBUG & PHASE_RESELECT)
+ switch (reselect) {
+ case RECONNECT_NOW :
+ printk("scsi%d : reconnecting\n", hostno);
+ break;
+#ifdef LINKED
+ case LINKED_RIGHT :
+ printk("scsi%d : connected, can reconnect\n", hostno);
+ break;
+ case LINKED_WRONG :
+ printk("scsi%d : connected to wrong target, can reconnect\n",
+ hostno);
+ break;
+#endif
+ case CAN_RECONNECT :
+ printk("scsi%d : allowed to reconnect\n", hostno);
+ break;
+ default :
+ printk("scsi%d : not allowed to reconnect\n", hostno);
+ }
+#endif
+
+
+ if (target == (controller_type == SEAGATE ? 7 : 6))
+ return DID_BAD_TARGET;
+
+/*
+ * We work it differently depending on if this is is "the first time,"
+ * or a reconnect. If this is a reselect phase, then SEL will
+ * be asserted, and we must skip selection / arbitration phases.
+ */
+
+ switch (reselect) {
+ case RECONNECT_NOW:
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : phase RESELECT \n", hostno);
+#endif
+
+/*
+ * At this point, we should find the logical or of our ID and the original
+ * target's ID on the BUS, with BSY, SEL, and I/O signals asserted.
+ *
+ * After ARBITRATION phase is completed, only SEL, BSY, and the
+ * target ID are asserted. A valid initiator ID is not on the bus
+ * until IO is asserted, so we must wait for that.
+ */
+ clock = jiffies + 10;
+ for (;;) {
+ temp = STATUS;
+ if ((temp & STAT_IO) && !(temp & STAT_BSY))
+ break;
+
+ if (jiffies > clock) {
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : RESELECT timed out while waiting for IO .\n",
+ hostno);
+#endif
+ return (DID_BAD_INTR << 16);
+ }
+ }
+
+/*
+ * After I/O is asserted by the target, we can read our ID and its
+ * ID off of the BUS.
+ */
+
+ if (!((temp = DATA) & (controller_type == SEAGATE ? 0x80 : 0x40)))
+ {
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : detected reconnect request to different target.\n"
+ "\tData bus = %d\n", hostno, temp);
+#endif
+ return (DID_BAD_INTR << 16);
+ }
+
+ if (!(temp & (1 << current_target)))
+ {
+ printk("scsi%d : Unexpected reselect interrupt. Data bus = %d\n",
+ hostno, temp);
+ return (DID_BAD_INTR << 16);
+ }
+
+ buffer=current_buffer;
+ cmnd=current_cmnd; /* WDE add */
+ data=current_data; /* WDE add */
+ len=current_bufflen; /* WDE add */
+ nobuffs=current_nobuffs;
+
+/*
+ * We have determined that we have been selected. At this point,
+ * we must respond to the reselection by asserting BSY ourselves
+ */
+
+#if 1
+ CONTROL = (BASE_CMD | CMD_DRVR_ENABLE | CMD_BSY);
+#else
+ CONTROL = (BASE_CMD | CMD_BSY);
+#endif
+
+/*
+ * The target will drop SEL, and raise BSY, at which time we must drop
+ * BSY.
+ */
+
+ for (clock = jiffies + 10; (jiffies < clock) && (STATUS & STAT_SEL););
+
+ if (jiffies >= clock)
+ {
+ CONTROL = (BASE_CMD | CMD_INTR);
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : RESELECT timed out while waiting for SEL.\n",
+ hostno);
+#endif
+ return (DID_BAD_INTR << 16);
+ }
+
+ CONTROL = BASE_CMD;
+
+/*
+ * At this point, we have connected with the target and can get
+ * on with our lives.
+ */
+ break;
+ case CAN_RECONNECT:
+
+#ifdef LINKED
+/*
+ * This is a bletcherous hack, just as bad as the Unix #! interpreter stuff.
+ * If it turns out we are using the wrong I_T_L nexus, the easiest way to deal
+ * with it is to go into our INFORMATION TRANSFER PHASE code, send a ABORT
+ * message on MESSAGE OUT phase, and then loop back to here.
+ */
+
+connect_loop :
+
+#endif
+
+#if (DEBUG & PHASE_BUS_FREE)
+ printk ("scsi%d : phase = BUS FREE \n", hostno);
+#endif
+
+/*
+ * BUS FREE PHASE
+ *
+ * On entry, we make sure that the BUS is in a BUS FREE
+ * phase, by insuring that both BSY and SEL are low for
+ * at least one bus settle delay. Several reads help
+ * eliminate wire glitch.
+ */
+
+ clock = jiffies + ST0X_BUS_FREE_DELAY;
+
+#if !defined (ARBITRATE)
+ while (((STATUS | STATUS | STATUS) &
+ (STAT_BSY | STAT_SEL)) &&
+ (!st0x_aborted) && (jiffies < clock));
+
+ if (jiffies > clock)
+ return retcode(DID_BUS_BUSY);
+ else if (st0x_aborted)
+ return retcode(st0x_aborted);
+#endif
+
+#if (DEBUG & PHASE_SELECTION)
+ printk("scsi%d : phase = SELECTION\n", hostno);
+#endif
+
+ clock = jiffies + ST0X_SELECTION_DELAY;
+
+/*
+ * Arbitration/selection procedure :
+ * 1. Disable drivers
+ * 2. Write HOST adapter address bit
+ * 3. Set start arbitration.
+ * 4. We get either ARBITRATION COMPLETE or SELECT at this
+ * point.
+ * 5. OR our ID and targets on bus.
+ * 6. Enable SCSI drivers and asserted SEL and ATTN
+ */
+
+#if defined(ARBITRATE)
+ cli();
+ CONTROL = 0;
+ DATA = (controller_type == SEAGATE) ? 0x80 : 0x40;
+ CONTROL = CMD_START_ARB;
+ sti();
+ while (!((status_read = STATUS) & (STAT_ARB_CMPL | STAT_SEL)) &&
+ (jiffies < clock) && !st0x_aborted);
+
+ if (!(status_read & STAT_ARB_CMPL)) {
+#if (DEBUG & PHASE_SELECTION)
+ if (status_read & STAT_SEL)
+ printk("scsi%d : arbitration lost\n", hostno);
+ else
+ printk("scsi%d : arbitration timeout.\n", hostno);
+#endif
+ CONTROL = BASE_CMD;
+ return retcode(DID_NO_CONNECT);
+ };
+
+#if (DEBUG & PHASE_SELECTION)
+ printk("scsi%d : arbitration complete\n", hostno);
+#endif
+#endif
+
+
+/*
+ * When the SCSI device decides that we're gawking at it, it will
+ * respond by asserting BUSY on the bus.
+ *
+ * Note : the Seagate ST-01/02 product manual says that we should
+ * twiddle the DATA register before the control register. However,
+ * this does not work reliably so we do it the other way around.
+ *
+ * Probably could be a problem with arbitration too, we really should
+ * try this with a SCSI protocol or logic analyzer to see what is
+ * going on.
+ */
+ cli();
+ DATA = (unsigned char) ((1 << target) | (controller_type == SEAGATE ? 0x80 : 0x40));
+ CONTROL = BASE_CMD | CMD_DRVR_ENABLE | CMD_SEL |
+ (reselect ? CMD_ATTN : 0);
+ sti();
+ while (!((status_read = STATUS) & STAT_BSY) &&
+ (jiffies < clock) && !st0x_aborted)
+
+#if 0 && (DEBUG & PHASE_SELECTION)
+ {
+ temp = clock - jiffies;
+
+ if (!(jiffies % 5))
+ printk("seagate_st0x_timeout : %d \r",temp);
+
+ }
+ printk("Done. \n");
+ printk("scsi%d : status = %02x, seagate_st0x_timeout = %d, aborted = %02x \n",
+ hostno, status_read, temp, st0x_aborted);
+#else
+ ;
+#endif
+
+
+ if ((jiffies >= clock) && !(status_read & STAT_BSY))
+ {
+#if (DEBUG & PHASE_SELECTION)
+ printk ("scsi%d : NO CONNECT with target %d, status = %x \n",
+ hostno, target, STATUS);
+#endif
+ return retcode(DID_NO_CONNECT);
+ }
+
+/*
+ * If we have been aborted, and we have a command in progress, IE the
+ * target still has BSY asserted, then we will reset the bus, and
+ * notify the midlevel driver to expect sense.
+ */
+
+ if (st0x_aborted) {
+ CONTROL = BASE_CMD;
+ if (STATUS & STAT_BSY) {
+ printk("scsi%d : BST asserted after we've been aborted.\n",
+ hostno);
+ seagate_st0x_reset(NULL);
+ return retcode(DID_RESET);
+ }
+ return retcode(st0x_aborted);
+ }
+
+/* Establish current pointers. Take into account scatter / gather */
+
+ if ((nobuffs = SCint->use_sg)) {
+#if (DEBUG & DEBUG_SG)
+ {
+ int i;
+ printk("scsi%d : scatter gather requested, using %d buffers.\n",
+ hostno, nobuffs);
+ for (i = 0; i < nobuffs; ++i)
+ printk("scsi%d : buffer %d address = %08x length = %d\n",
+ hostno, i, buffer[i].address, buffer[i].length);
+ }
+#endif
+
+ buffer = (struct scatterlist *) SCint->buffer;
+ len = buffer->length;
+ data = (unsigned char *) buffer->address;
+ } else {
+#if (DEBUG & DEBUG_SG)
+ printk("scsi%d : scatter gather not requested.\n", hostno);
+#endif
+ buffer = NULL;
+ len = SCint->request_bufflen;
+ data = (unsigned char *) SCint->request_buffer;
+ }
+
+#if (DEBUG & (PHASE_DATAIN | PHASE_DATAOUT))
+ printk("scsi%d : len = %d\n", hostno, len);
+#endif
+
+ break;
+#ifdef LINKED
+ case LINKED_RIGHT:
+ break;
+ case LINKED_WRONG:
+ break;
+#endif
+ }
+
+/*
+ * There are several conditions under which we wish to send a message :
+ * 1. When we are allowing disconnect / reconnect, and need to establish
+ * the I_T_L nexus via an IDENTIFY with the DiscPriv bit set.
+ *
+ * 2. When we are doing linked commands, are have the wrong I_T_L nexus
+ * established and want to send an ABORT message.
+ */
+
+
+ CONTROL = BASE_CMD | CMD_DRVR_ENABLE |
+ (((reselect == CAN_RECONNECT)
+#ifdef LINKED
+ || (reselect == LINKED_WRONG)
+#endif
+ ) ? CMD_ATTN : 0) ;
+
+/*
+ * INFORMATION TRANSFER PHASE
+ *
+ * The nasty looking read / write inline assembler loops we use for
+ * DATAIN and DATAOUT phases are approximately 4-5 times as fast as
+ * the 'C' versions - since we're moving 1024 bytes of data, this
+ * really adds up.
+ */
+
+#if ((DEBUG & PHASE_ETC) == PHASE_ETC)
+ printk("scsi%d : phase = INFORMATION TRANSFER\n", hostno);
+#endif
+
+ incommand = 1;
+ transfersize = SCint->transfersize;
+ underflow = SCint->underflow;
+
+
+/*
+ * Now, we poll the device for status information,
+ * and handle any requests it makes. Note that since we are unsure of
+ * how much data will be flowing across the system, etc and cannot
+ * make reasonable timeouts, that we will instead have the midlevel
+ * driver handle any timeouts that occur in this phase.
+ */
+
+ while (((status_read = STATUS) & STAT_BSY) && !st0x_aborted && !done)
+ {
+#ifdef PARITY
+ if (status_read & STAT_PARITY)
+ {
+ printk("scsi%d : got parity error\n", hostno);
+ st0x_aborted = DID_PARITY;
+ }
+#endif
+
+ if (status_read & STAT_REQ)
+ {
+#if ((DEBUG & PHASE_ETC) == PHASE_ETC)
+ if ((newphase = (status_read & REQ_MASK)) != phase)
+ {
+ phase = newphase;
+ switch (phase)
+ {
+ case REQ_DATAOUT:
+ printk("scsi%d : phase = DATA OUT\n",
+ hostno);
+ break;
+ case REQ_DATAIN :
+ printk("scsi%d : phase = DATA IN\n",
+ hostno);
+ break;
+ case REQ_CMDOUT :
+ printk("scsi%d : phase = COMMAND OUT\n",
+ hostno);
+ break;
+ case REQ_STATIN :
+ printk("scsi%d : phase = STATUS IN\n",
+ hostno);
+ break;
+ case REQ_MSGOUT :
+ printk("scsi%d : phase = MESSAGE OUT\n",
+ hostno);
+ break;
+ case REQ_MSGIN :
+ printk("scsi%d : phase = MESSAGE IN\n",
+ hostno);
+ break;
+ default :
+ printk("scsi%d : phase = UNKNOWN\n",
+ hostno);
+ st0x_aborted = DID_ERROR;
+ }
+ }
+#endif
+ switch (status_read & REQ_MASK)
+ {
+ case REQ_DATAOUT :
+/*
+ * If we are in fast mode, then we simply splat the data out
+ * in word-sized chunks as fast as we can.
+ */
+
+#ifdef FAST
+if (!len) {
+#if 0
+ printk("scsi%d: underflow to target %d lun %d \n",
+ hostno, target, lun);
+ st0x_aborted = DID_ERROR;
+ fast = 0;
+#endif
+ break;
+}
+
+if (fast && transfersize && !(len % transfersize) && (len >= transfersize)
+#ifdef FAST32
+ && !(transfersize % 4)
+#endif
+ ) {
+#if (DEBUG & DEBUG_FAST)
+ printk("scsi%d : FAST transfer, underflow = %d, transfersize = %d\n"
+ " len = %d, data = %08x\n", hostno, SCint->underflow,
+ SCint->transfersize, len, data);
+#endif
+
+ __asm__("
+ cld;
+"
+#ifdef FAST32
+" shr $2, %%ecx;
+1: lodsl;
+ movl %%eax, (%%edi);
+"
+#else
+"1: lodsb;
+ movb %%al, (%%edi);
+"
+#endif
+" loop 1b;" : :
+ /* input */
+ "D" (st0x_dr), "S" (data), "c" (SCint->transfersize) :
+ /* clobbered */
+ "eax", "ecx", "esi" );
+
+ len -= transfersize;
+ data += transfersize;
+
+#if (DEBUG & DEBUG_FAST)
+ printk("scsi%d : FAST transfer complete len = %d data = %08x\n",
+ hostno, len, data);
+#endif
+
+
+} else
+#endif
+
+{
+/*
+ * We loop as long as we are in a data out phase, there is data to send,
+ * and BSY is still active.
+ */
+ __asm__ (
+
+/*
+ Local variables :
+ len = ecx
+ data = esi
+ st0x_cr_sr = ebx
+ st0x_dr = edi
+
+ Test for any data here at all.
+*/
+ "\torl %%ecx, %%ecx
+ jz 2f
+
+ cld
+
+ movl " SYMBOL_NAME_STR(st0x_cr_sr) ", %%ebx
+ movl " SYMBOL_NAME_STR(st0x_dr) ", %%edi
+
+1: movb (%%ebx), %%al\n"
+/*
+ Test for BSY
+*/
+
+ "\ttest $1, %%al
+ jz 2f\n"
+
+/*
+ Test for data out phase - STATUS & REQ_MASK should be REQ_DATAOUT, which is 0.
+*/
+ "\ttest $0xe, %%al
+ jnz 2f \n"
+/*
+ Test for REQ
+*/
+ "\ttest $0x10, %%al
+ jz 1b
+ lodsb
+ movb %%al, (%%edi)
+ loop 1b
+
+2:
+ ":
+/* output */
+"=S" (data), "=c" (len) :
+/* input */
+"0" (data), "1" (len) :
+/* clobbered */
+"eax", "ebx", "edi");
+}
+
+ if (!len && nobuffs) {
+ --nobuffs;
+ ++buffer;
+ len = buffer->length;
+ data = (unsigned char *) buffer->address;
+#if (DEBUG & DEBUG_SG)
+ printk("scsi%d : next scatter-gather buffer len = %d address = %08x\n",
+ hostno, len, data);
+#endif
+ }
+ break;
+
+ case REQ_DATAIN :
+#ifdef SLOW_HANDSHAKE
+ if (borken) {
+#if (DEBUG & (PHASE_DATAIN))
+ transfered += len;
+#endif
+ for (; len && (STATUS & (REQ_MASK | STAT_REQ)) == (REQ_DATAIN |
+ STAT_REQ); --len) {
+ *data++ = DATA;
+ borken_wait();
+}
+#if (DEBUG & (PHASE_DATAIN))
+ transfered -= len;
+#endif
+ } else
+#endif
+#ifdef FAST
+if (fast && transfersize && !(len % transfersize) && (len >= transfersize)
+#ifdef FAST32
+ && !(transfersize % 4)
+#endif
+ ) {
+#if (DEBUG & DEBUG_FAST)
+ printk("scsi%d : FAST transfer, underflow = %d, transfersize = %d\n"
+ " len = %d, data = %08x\n", hostno, SCint->underflow,
+ SCint->transfersize, len, data);
+#endif
+ __asm__("
+ cld;
+"
+#ifdef FAST32
+" shr $2, %%ecx;
+1: movl (%%esi), %%eax;
+ stosl;
+"
+#else
+"1: movb (%%esi), %%al;
+ stosb;
+"
+#endif
+
+" loop 1b;" : :
+ /* input */
+ "S" (st0x_dr), "D" (data), "c" (SCint->transfersize) :
+ /* clobbered */
+ "eax", "ecx", "edi");
+
+ len -= transfersize;
+ data += transfersize;
+
+#if (DEBUG & PHASE_DATAIN)
+ printk("scsi%d: transfered += %d\n", hostno, transfersize);
+ transfered += transfersize;
+#endif
+
+#if (DEBUG & DEBUG_FAST)
+ printk("scsi%d : FAST transfer complete len = %d data = %08x\n",
+ hostno, len, data);
+#endif
+
+} else
+#endif
+{
+
+#if (DEBUG & PHASE_DATAIN)
+ printk("scsi%d: transfered += %d\n", hostno, len);
+ transfered += len; /* Assume we'll transfer it all, then
+ subtract what we *didn't* transfer */
+#endif
+
+/*
+ * We loop as long as we are in a data in phase, there is room to read,
+ * and BSY is still active
+ */
+
+ __asm__ (
+/*
+ Local variables :
+ ecx = len
+ edi = data
+ esi = st0x_cr_sr
+ ebx = st0x_dr
+
+ Test for room to read
+*/
+ "\torl %%ecx, %%ecx
+ jz 2f
+
+ cld
+ movl " SYMBOL_NAME_STR(st0x_cr_sr) ", %%esi
+ movl " SYMBOL_NAME_STR(st0x_dr) ", %%ebx
+
+1: movb (%%esi), %%al\n"
+/*
+ Test for BSY
+*/
+
+ "\ttest $1, %%al
+ jz 2f\n"
+
+/*
+ Test for data in phase - STATUS & REQ_MASK should be REQ_DATAIN, = STAT_IO, which is 4.
+*/
+ "\tmovb $0xe, %%ah
+ andb %%al, %%ah
+ cmpb $0x04, %%ah
+ jne 2f\n"
+
+/*
+ Test for REQ
+*/
+ "\ttest $0x10, %%al
+ jz 1b
+
+ movb (%%ebx), %%al
+ stosb
+ loop 1b\n"
+
+"2:\n"
+ :
+/* output */
+"=D" (data), "=c" (len) :
+/* input */
+"0" (data), "1" (len) :
+/* clobbered */
+"eax","ebx", "esi");
+
+#if (DEBUG & PHASE_DATAIN)
+ printk("scsi%d: transfered -= %d\n", hostno, len);
+ transfered -= len; /* Since we assumed all of Len got
+ * transfered, correct our mistake */
+#endif
+}
+
+ if (!len && nobuffs) {
+ --nobuffs;
+ ++buffer;
+ len = buffer->length;
+ data = (unsigned char *) buffer->address;
+#if (DEBUG & DEBUG_SG)
+ printk("scsi%d : next scatter-gather buffer len = %d address = %08x\n",
+ hostno, len, data);
+#endif
+ }
+
+ break;
+
+ case REQ_CMDOUT :
+ while (((status_read = STATUS) & STAT_BSY) &&
+ ((status_read & REQ_MASK) == REQ_CMDOUT))
+ if (status_read & STAT_REQ) {
+ DATA = *(const unsigned char *) cmnd;
+ cmnd = 1+(const unsigned char *) cmnd;
+#ifdef SLOW_HANDSHAKE
+ if (borken)
+ borken_wait();
+#endif
+ }
+ break;
+
+ case REQ_STATIN :
+ status = DATA;
+ break;
+
+ case REQ_MSGOUT :
+/*
+ * We can only have sent a MSG OUT if we requested to do this
+ * by raising ATTN. So, we must drop ATTN.
+ */
+
+ CONTROL = BASE_CMD | CMD_DRVR_ENABLE;
+/*
+ * If we are reconnecting, then we must send an IDENTIFY message in
+ * response to MSGOUT.
+ */
+ switch (reselect) {
+ case CAN_RECONNECT:
+ DATA = IDENTIFY(1, lun);
+
+#if (DEBUG & (PHASE_RESELECT | PHASE_MSGOUT))
+ printk("scsi%d : sent IDENTIFY message.\n", hostno);
+#endif
+ break;
+#ifdef LINKED
+ case LINKED_WRONG:
+ DATA = ABORT;
+ linked_connected = 0;
+ reselect = CAN_RECONNECT;
+ goto connect_loop;
+#if (DEBUG & (PHASE_MSGOUT | DEBUG_LINKED))
+ printk("scsi%d : sent ABORT message to cancel incorrect I_T_L nexus.\n", hostno);
+#endif
+#endif /* LINKED */
+#if (DEBUG & DEBUG_LINKED)
+ printk("correct\n");
+#endif
+ default:
+ DATA = NOP;
+ printk("scsi%d : target %d requested MSGOUT, sent NOP message.\n", hostno, target);
+ }
+ break;
+
+ case REQ_MSGIN :
+ switch (message = DATA) {
+ case DISCONNECT :
+ should_reconnect = 1;
+ current_data = data; /* WDE add */
+ current_buffer = buffer;
+ current_bufflen = len; /* WDE add */
+ current_nobuffs = nobuffs;
+#ifdef LINKED
+ linked_connected = 0;
+#endif
+ done=1;
+#if (DEBUG & (PHASE_RESELECT | PHASE_MSGIN))
+ printk("scsi%d : disconnected.\n", hostno);
+#endif
+ break;
+
+#ifdef LINKED
+ case LINKED_CMD_COMPLETE:
+ case LINKED_FLG_CMD_COMPLETE:
+#endif
+ case COMMAND_COMPLETE :
+/*
+ * Note : we should check for underflow here.
+ */
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : command complete.\n", hostno);
+#endif
+ done = 1;
+ break;
+ case ABORT :
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : abort message.\n", hostno);
+#endif
+ done=1;
+ break;
+ case SAVE_POINTERS :
+ current_buffer = buffer;
+ current_bufflen = len; /* WDE add */
+ current_data = data; /* WDE mod */
+ current_nobuffs = nobuffs;
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : pointers saved.\n", hostno);
+#endif
+ break;
+ case RESTORE_POINTERS:
+ buffer=current_buffer;
+ cmnd=current_cmnd;
+ data=current_data; /* WDE mod */
+ len=current_bufflen;
+ nobuffs=current_nobuffs;
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : pointers restored.\n", hostno);
+#endif
+ break;
+ default:
+
+/*
+ * IDENTIFY distinguishes itself from the other messages by setting the
+ * high byte.
+ *
+ * Note : we need to handle at least one outstanding command per LUN,
+ * and need to hash the SCSI command for that I_T_L nexus based on the
+ * known ID (at this point) and LUN.
+ */
+
+ if (message & 0x80) {
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : IDENTIFY message received from id %d, lun %d.\n",
+ hostno, target, message & 7);
+#endif
+ } else {
+
+/*
+ * We should go into a MESSAGE OUT phase, and send a MESSAGE_REJECT
+ * if we run into a message that we don't like. The seagate driver
+ * needs some serious restructuring first though.
+ */
+
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : unknown message %d from target %d.\n",
+ hostno, message, target);
+#endif
+ }
+ }
+ break;
+
+ default :
+ printk("scsi%d : unknown phase.\n", hostno);
+ st0x_aborted = DID_ERROR;
+ }
+
+#ifdef SLOW_HANDSHAKE
+/*
+ * I really don't care to deal with borken devices in each single
+ * byte transfer case (ie, message in, message out, status), so
+ * I'll do the wait here if necessary.
+ */
+ if (borken)
+ borken_wait();
+#endif
+
+ } /* if ends */
+ } /* while ends */
+
+#if (DEBUG & (PHASE_DATAIN | PHASE_DATAOUT | PHASE_EXIT))
+ printk("scsi%d : Transfered %d bytes\n", hostno, transfered);
+#endif
+
+#if (DEBUG & PHASE_EXIT)
+#if 0 /* Doesn't work for scatter / gather */
+ printk("Buffer : \n");
+ for (i = 0; i < 20; ++i)
+ printk ("%02x ", ((unsigned char *) data)[i]); /* WDE mod */
+ printk("\n");
+#endif
+ printk("scsi%d : status = ", hostno);
+ print_status(status);
+ printk("message = %02x\n", message);
+#endif
+
+
+/* We shouldn't reach this until *after* BSY has been deasserted */
+#ifdef notyet
+ if (st0x_aborted) {
+ if (STATUS & STAT_BSY) {
+ seagate_st0x_reset(NULL);
+ st0x_aborted = DID_RESET;
+ }
+ abort_confirm = 1;
+ }
+#endif
+
+#ifdef LINKED
+else {
+/*
+ * Fix the message byte so that unsuspecting high level drivers don't
+ * puke when they see a LINKED COMMAND message in place of the COMMAND
+ * COMPLETE they may be expecting. Shouldn't be necessary, but it's
+ * better to be on the safe side.
+ *
+ * A non LINKED* message byte will indicate that the command completed,
+ * and we are now disconnected.
+ */
+
+ switch (message) {
+ case LINKED_CMD_COMPLETE :
+ case LINKED_FLG_CMD_COMPLETE :
+ message = COMMAND_COMPLETE;
+ linked_target = current_target;
+ linked_lun = current_lun;
+ linked_connected = 1;
+#if (DEBUG & DEBUG_LINKED)
+ printk("scsi%d : keeping I_T_L nexus established for linked command.\n",
+ hostno);
+#endif
+/*
+ * We also will need to adjust status to accommodate intermediate conditions.
+ */
+ if ((status == INTERMEDIATE_GOOD) ||
+ (status == INTERMEDIATE_C_GOOD))
+ status = GOOD;
+
+ break;
+/*
+ * We should also handle what are "normal" termination messages
+ * here (ABORT, BUS_DEVICE_RESET?, and COMMAND_COMPLETE individually,
+ * and flake if things aren't right.
+ */
+
+ default :
+#if (DEBUG & DEBUG_LINKED)
+ printk("scsi%d : closing I_T_L nexus.\n", hostno);
+#endif
+ linked_connected = 0;
+ }
+ }
+#endif /* LINKED */
+
+
+
+
+ if (should_reconnect) {
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : exiting seagate_st0x_queue_command() with reconnect enabled.\n",
+ hostno);
+#endif
+ CONTROL = BASE_CMD | CMD_INTR ;
+ } else
+ CONTROL = BASE_CMD;
+
+ return retcode (st0x_aborted);
+ }
+
+int seagate_st0x_abort (Scsi_Cmnd * SCpnt)
+ {
+ st0x_aborted = DID_ABORT;
+
+ return SCSI_ABORT_PENDING;
+ }
+
+/*
+ the seagate_st0x_reset function resets the SCSI bus
+*/
+
+int seagate_st0x_reset (Scsi_Cmnd * SCpnt)
+ {
+ unsigned clock;
+ /*
+ No timeouts - this command is going to fail because
+ it was reset.
+ */
+
+#ifdef DEBUG
+ printk("In seagate_st0x_reset()\n");
+#endif
+
+
+ /* assert RESET signal on SCSI bus. */
+
+ CONTROL = BASE_CMD | CMD_RST;
+ clock=jiffies+2;
+
+
+ /* Wait. */
+
+ while (jiffies < clock);
+
+ CONTROL = BASE_CMD;
+
+ st0x_aborted = DID_RESET;
+
+#ifdef DEBUG
+ printk("SCSI bus reset.\n");
+#endif
+ return SCSI_RESET_WAKEUP;
+ }
+
+#include <asm/segment.h>
+#include "sd.h"
+#include "scsi_ioctl.h"
+
+int seagate_st0x_biosparam(Disk * disk, kdev_t dev, int* ip) {
+ unsigned char buf[256 + sizeof(int) * 2], cmd[6], *data, *page;
+ int *sizes, result, formatted_sectors, total_sectors;
+ int cylinders, heads, sectors;
+
+/*
+ * Only SCSI-I CCS drives and later implement the necessary mode sense
+ * pages.
+ */
+
+ if (disk->device->scsi_level < 2)
+ return -1;
+
+ sizes = (int *) buf;
+ data = (unsigned char *) (sizes + 2);
+
+ cmd[0] = MODE_SENSE;
+ cmd[1] = (disk->device->lun << 5) & 0xe5;
+ cmd[2] = 0x04; /* Read page 4, rigid disk geometry page current values */
+ cmd[3] = 0;
+ cmd[4] = 255;
+ cmd[5] = 0;
+
+/*
+ * We are transferring 0 bytes in the out direction, and expect to get back
+ * 24 bytes for each mode page.
+ */
+
+ sizes[0] = 0;
+ sizes[1] = 256;
+
+ memcpy (data, cmd, 6);
+
+ if (!(result = kernel_scsi_ioctl (disk->device, SCSI_IOCTL_SEND_COMMAND, (void *) buf))) {
+/*
+ * The mode page lies beyond the MODE SENSE header, with length 4, and
+ * the BLOCK DESCRIPTOR, with length header[3].
+ */
+
+ page = data + 4 + data[3];
+ heads = (int) page[5];
+ cylinders = (page[2] << 16) | (page[3] << 8) | page[4];
+
+ cmd[2] = 0x03; /* Read page 3, format page current values */
+ memcpy (data, cmd, 6);
+
+ if (!(result = kernel_scsi_ioctl (disk->device, SCSI_IOCTL_SEND_COMMAND, (void *) buf))) {
+ page = data + 4 + data[3];
+ sectors = (page[10] << 8) | page[11];
+
+
+/*
+ * Get the total number of formatted sectors from the block descriptor,
+ * so we can tell how many are being used for alternates.
+ */
+
+ formatted_sectors = (data[4 + 1] << 16) | (data[4 + 2] << 8) |
+ data[4 + 3] ;
+
+ total_sectors = (heads * cylinders * sectors);
+
+/*
+ * Adjust the real geometry by subtracting
+ * (spare sectors / (heads * tracks)) cylinders from the number of cylinders.
+ *
+ * It appears that the CE cylinder CAN be a partial cylinder.
+ */
+
+
+printk("scsi%d : heads = %d cylinders = %d sectors = %d total = %d formatted = %d\n",
+ hostno, heads, cylinders, sectors, total_sectors, formatted_sectors);
+
+ if (!heads || !sectors || !cylinders)
+ result = -1;
+ else
+ cylinders -= ((total_sectors - formatted_sectors) / (heads * sectors));
+
+/*
+ * Now, we need to do a sanity check on the geometry to see if it is
+ * BIOS compatible. The maximum BIOS geometry is 1024 cylinders *
+ * 256 heads * 64 sectors.
+ */
+
+ if ((cylinders > 1024) || (sectors > 64))
+ result = -1;
+ else {
+ ip[0] = heads;
+ ip[1] = sectors;
+ ip[2] = cylinders;
+ }
+
+/*
+ * There should be an alternate mapping for things the seagate doesn't
+ * understand, but I couldn't say what it is with reasonable certainty.
+ */
+
+ }
+ }
+
+ return result;
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = SEAGATE_ST0X;
+
+#include "scsi_module.c"
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/seagate.h b/i386/i386at/gpl/linux/scsi/seagate.h
new file mode 100644
index 00000000..8d9e1a42
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/seagate.h
@@ -0,0 +1,139 @@
+/*
+ * seagate.h Copyright (C) 1992 Drew Eckhardt
+ * low level scsi driver header for ST01/ST02 by
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ */
+
+#ifndef _SEAGATE_H
+ #define SEAGATE_H
+/*
+ $Header
+*/
+#ifndef ASM
+int seagate_st0x_detect(Scsi_Host_Template *);
+int seagate_st0x_command(Scsi_Cmnd *);
+int seagate_st0x_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+
+int seagate_st0x_abort(Scsi_Cmnd *);
+const char *seagate_st0x_info(struct Scsi_Host *);
+int seagate_st0x_reset(Scsi_Cmnd *);
+int seagate_st0x_proc_info(char *,char **,off_t,int,int,int);
+
+#ifndef NULL
+ #define NULL 0
+#endif
+
+#include <linux/kdev_t.h>
+int seagate_st0x_biosparam(Disk *, kdev_t, int*);
+
+#define SEAGATE_ST0X { NULL, NULL, NULL, seagate_st0x_proc_info, \
+ NULL, seagate_st0x_detect, \
+ NULL, \
+ seagate_st0x_info, seagate_st0x_command, \
+ seagate_st0x_queue_command, seagate_st0x_abort, \
+ seagate_st0x_reset, NULL, seagate_st0x_biosparam, \
+ 1, 7, SG_ALL, 1, 0, 0, DISABLE_CLUSTERING}
+#endif
+
+
+/*
+ defining PARITY causes parity data to be checked
+*/
+
+#define PARITY
+
+
+/*
+ Thanks to Brian Antoine for the example code in his Messy-Loss ST-01
+ driver, and Mitsugu Suzuki for information on the ST-01
+ SCSI host.
+*/
+
+/*
+ CONTROL defines
+*/
+
+#define CMD_RST 0x01
+#define CMD_SEL 0x02
+#define CMD_BSY 0x04
+#define CMD_ATTN 0x08
+#define CMD_START_ARB 0x10
+#define CMD_EN_PARITY 0x20
+#define CMD_INTR 0x40
+#define CMD_DRVR_ENABLE 0x80
+
+/*
+ STATUS
+*/
+
+#define STAT_BSY 0x01
+#define STAT_MSG 0x02
+#define STAT_IO 0x04
+#define STAT_CD 0x08
+#define STAT_REQ 0x10
+#define STAT_SEL 0x20
+#define STAT_PARITY 0x40
+#define STAT_ARB_CMPL 0x80
+
+/*
+ REQUESTS
+*/
+
+#define REQ_MASK (STAT_CD | STAT_IO | STAT_MSG)
+#define REQ_DATAOUT 0
+#define REQ_DATAIN STAT_IO
+#define REQ_CMDOUT STAT_CD
+#define REQ_STATIN (STAT_CD | STAT_IO)
+#define REQ_MSGOUT (STAT_MSG | STAT_CD)
+#define REQ_MSGIN (STAT_MSG | STAT_CD | STAT_IO)
+
+extern volatile int seagate_st0x_timeout;
+
+#ifdef PARITY
+ #define BASE_CMD CMD_EN_PARITY
+#else
+ #define BASE_CMD 0
+#endif
+
+/*
+ Debugging code
+*/
+
+#define PHASE_BUS_FREE 1
+#define PHASE_ARBITRATION 2
+#define PHASE_SELECTION 4
+#define PHASE_DATAIN 8
+#define PHASE_DATAOUT 0x10
+#define PHASE_CMDOUT 0x20
+#define PHASE_MSGIN 0x40
+#define PHASE_MSGOUT 0x80
+#define PHASE_STATUSIN 0x100
+#define PHASE_ETC (PHASE_DATAIN | PHASE_DATA_OUT | PHASE_CMDOUT | PHASE_MSGIN | PHASE_MSGOUT | PHASE_STATUSIN)
+#define PRINT_COMMAND 0x200
+#define PHASE_EXIT 0x400
+#define PHASE_RESELECT 0x800
+#define DEBUG_FAST 0x1000
+#define DEBUG_SG 0x2000
+#define DEBUG_LINKED 0x4000
+#define DEBUG_BORKEN 0x8000
+
+/*
+ * Control options - these are timeouts specified in .01 seconds.
+ */
+
+/* 30, 20 work */
+#define ST0X_BUS_FREE_DELAY 25
+#define ST0X_SELECTION_DELAY 25
+
+#define eoi() __asm__("push %%eax\nmovb $0x20, %%al\noutb %%al, $0x20\npop %%eax"::)
+
+#define SEAGATE 1 /* these determine the type of the controller */
+#define FD 2
+
+#define ST0X_ID_STR "Seagate ST-01/ST-02"
+#define FD_ID_STR "TMC-8XX/TMC-950"
+
+#endif
+
diff --git a/i386/i386at/gpl/linux/scsi/sr.c b/i386/i386at/gpl/linux/scsi/sr.c
new file mode 100644
index 00000000..dc15cf58
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/sr.c
@@ -0,0 +1,1191 @@
+/*
+ * sr.c Copyright (C) 1992 David Giller
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * adapted from:
+ * sd.c Copyright (C) 1992 Drew Eckhardt
+ * Linux scsi disk driver by
+ * Drew Eckhardt <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale ericy@cais.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ *
+ * Modified by Eric Youngdale eric@aib.com to support loadable
+ * low-level scsi drivers.
+ *
+ * Modified by Thomas Quinot thomas@melchior.cuivre.fdn.fr to
+ * provide auto-eject.
+ *
+ */
+
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/cdrom.h>
+#include <asm/system.h>
+
+#define MAJOR_NR SCSI_CDROM_MAJOR
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sr.h"
+#include "scsi_ioctl.h" /* For the door lock/unlock commands */
+#include "constants.h"
+
+#define MAX_RETRIES 3
+#define SR_TIMEOUT (150 * HZ)
+
+static int sr_init(void);
+static void sr_finish(void);
+static int sr_attach(Scsi_Device *);
+static int sr_detect(Scsi_Device *);
+static void sr_detach(Scsi_Device *);
+
+struct Scsi_Device_Template sr_template = {NULL, "cdrom", "sr", NULL, TYPE_ROM,
+ SCSI_CDROM_MAJOR, 0, 0, 0, 1,
+ sr_detect, sr_init,
+ sr_finish, sr_attach, sr_detach};
+
+Scsi_CD * scsi_CDs = NULL;
+static int * sr_sizes;
+
+static int * sr_blocksizes;
+
+static int sr_open(struct inode *, struct file *);
+static void get_sectorsize(int);
+
+extern int sr_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+
+void requeue_sr_request (Scsi_Cmnd * SCpnt);
+static int check_cdrom_media_change(kdev_t);
+
+static void sr_release(struct inode * inode, struct file * file)
+{
+ sync_dev(inode->i_rdev);
+ if(! --scsi_CDs[MINOR(inode->i_rdev)].device->access_count)
+ {
+ sr_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
+ if (scsi_CDs[MINOR(inode->i_rdev)].auto_eject)
+ sr_ioctl(inode, NULL, CDROMEJECT, 0);
+ }
+ if (scsi_CDs[MINOR(inode->i_rdev)].device->host->hostt->usage_count)
+ (*scsi_CDs[MINOR(inode->i_rdev)].device->host->hostt->usage_count)--;
+ if(sr_template.usage_count) (*sr_template.usage_count)--;
+}
+
+static struct file_operations sr_fops =
+{
+ NULL, /* lseek - default */
+ block_read, /* read - general block-dev read */
+ block_write, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* select */
+ sr_ioctl, /* ioctl */
+ NULL, /* mmap */
+ sr_open, /* special open code */
+ sr_release, /* release */
+ NULL, /* fsync */
+ NULL, /* fasync */
+ check_cdrom_media_change, /* Disk change */
+ NULL /* revalidate */
+};
+
+/*
+ * This function checks to see if the media has been changed in the
+ * CDROM drive. It is possible that we have already sensed a change,
+ * or the drive may have sensed one and not yet reported it. We must
+ * be ready for either case. This function always reports the current
+ * value of the changed bit. If flag is 0, then the changed bit is reset.
+ * This function could be done as an ioctl, but we would need to have
+ * an inode for that to work, and we do not always have one.
+ */
+
+int check_cdrom_media_change(kdev_t full_dev){
+ int retval, target;
+ struct inode inode;
+ int flag = 0;
+
+ target = MINOR(full_dev);
+
+ if (target >= sr_template.nr_dev) {
+ printk("CD-ROM request error: invalid device.\n");
+ return 0;
+ };
+
+ inode.i_rdev = full_dev; /* This is all we really need here */
+ retval = sr_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
+
+ if(retval){ /* Unable to test, unit probably not ready. This usually
+ * means there is no disc in the drive. Mark as changed,
+ * and we will figure it out later once the drive is
+ * available again. */
+
+ scsi_CDs[target].device->changed = 1;
+ return 1; /* This will force a flush, if called from
+ * check_disk_change */
+ };
+
+ retval = scsi_CDs[target].device->changed;
+ if(!flag) {
+ scsi_CDs[target].device->changed = 0;
+ /* If the disk changed, the capacity will now be different,
+ * so we force a re-read of this information */
+ if (retval) scsi_CDs[target].needs_sector_size = 1;
+ };
+ return retval;
+}
+
+/*
+ * rw_intr is the interrupt routine for the device driver. It will be notified on the
+ * end of a SCSI read / write, and will take on of several actions based on success or failure.
+ */
+
+static void rw_intr (Scsi_Cmnd * SCpnt)
+{
+ int result = SCpnt->result;
+ int this_count = SCpnt->this_count;
+
+#ifdef DEBUG
+ printk("sr.c done: %x %x\n",result, SCpnt->request.bh->b_data);
+#endif
+ if (!result)
+ { /* No error */
+ if (SCpnt->use_sg == 0) {
+ if (SCpnt->buffer != SCpnt->request.buffer)
+ {
+ int offset;
+ offset = (SCpnt->request.sector % 4) << 9;
+ memcpy((char *)SCpnt->request.buffer,
+ (char *)SCpnt->buffer + offset,
+ this_count << 9);
+ /* Even though we are not using scatter-gather, we look
+ * ahead and see if there is a linked request for the
+ * other half of this buffer. If there is, then satisfy
+ * it. */
+ if((offset == 0) && this_count == 2 &&
+ SCpnt->request.nr_sectors > this_count &&
+ SCpnt->request.bh &&
+ SCpnt->request.bh->b_reqnext &&
+ SCpnt->request.bh->b_reqnext->b_size == 1024) {
+ memcpy((char *)SCpnt->request.bh->b_reqnext->b_data,
+ (char *)SCpnt->buffer + 1024,
+ 1024);
+ this_count += 2;
+ };
+
+ scsi_free(SCpnt->buffer, 2048);
+ }
+ } else {
+ struct scatterlist * sgpnt;
+ int i;
+ sgpnt = (struct scatterlist *) SCpnt->buffer;
+ for(i=0; i<SCpnt->use_sg; i++) {
+ if (sgpnt[i].alt_address) {
+ if (sgpnt[i].alt_address != sgpnt[i].address) {
+ memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
+ };
+ scsi_free(sgpnt[i].address, sgpnt[i].length);
+ };
+ };
+ scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */
+ if(SCpnt->request.sector % 4) this_count -= 2;
+ /* See if there is a padding record at the end that needs to be removed */
+ if(this_count > SCpnt->request.nr_sectors)
+ this_count -= 2;
+ };
+
+#ifdef DEBUG
+ printk("(%x %x %x) ",SCpnt->request.bh, SCpnt->request.nr_sectors,
+ this_count);
+#endif
+ if (SCpnt->request.nr_sectors > this_count)
+ {
+ SCpnt->request.errors = 0;
+ if (!SCpnt->request.bh)
+ panic("sr.c: linked page request (%lx %x)",
+ SCpnt->request.sector, this_count);
+ }
+
+ SCpnt = end_scsi_request(SCpnt, 1, this_count); /* All done */
+ requeue_sr_request(SCpnt);
+ return;
+ } /* Normal completion */
+
+ /* We only come through here if we have an error of some kind */
+
+ /* Free up any indirection buffers we allocated for DMA purposes. */
+ if (SCpnt->use_sg) {
+ struct scatterlist * sgpnt;
+ int i;
+ sgpnt = (struct scatterlist *) SCpnt->buffer;
+ for(i=0; i<SCpnt->use_sg; i++) {
+ if (sgpnt[i].alt_address) {
+ scsi_free(sgpnt[i].address, sgpnt[i].length);
+ };
+ };
+ scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */
+ } else {
+ if (SCpnt->buffer != SCpnt->request.buffer)
+ scsi_free(SCpnt->buffer, SCpnt->bufflen);
+ };
+
+ if (driver_byte(result) != 0) {
+ if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
+ if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
+ /* detected disc change. set a bit and quietly refuse
+ * further access. */
+
+ scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1;
+ SCpnt = end_scsi_request(SCpnt, 0, this_count);
+ requeue_sr_request(SCpnt);
+ return;
+ }
+ }
+
+ if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
+ printk("CD-ROM error: ");
+ print_sense("sr", SCpnt);
+ printk("command was: ");
+ print_command(SCpnt->cmnd);
+ if (scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].ten) {
+ scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;
+ requeue_sr_request(SCpnt);
+ result = 0;
+ return;
+ } else {
+ SCpnt = end_scsi_request(SCpnt, 0, this_count);
+ requeue_sr_request(SCpnt); /* Do next request */
+ return;
+ }
+
+ }
+
+ if (SCpnt->sense_buffer[2] == NOT_READY) {
+ printk("CDROM not ready. Make sure you have a disc in the drive.\n");
+ SCpnt = end_scsi_request(SCpnt, 0, this_count);
+ requeue_sr_request(SCpnt); /* Do next request */
+ return;
+ };
+ }
+
+ /* We only get this far if we have an error we have not recognized */
+ if(result) {
+ printk("SCSI CD error : host %d id %d lun %d return code = %03x\n",
+ scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,
+ scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->id,
+ scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->lun,
+ result);
+
+ if (status_byte(result) == CHECK_CONDITION)
+ print_sense("sr", SCpnt);
+
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
+ requeue_sr_request(SCpnt);
+ }
+}
+
+/*
+ * Here I tried to implement better support for PhotoCD's.
+ *
+ * Much of this has do be done with vendor-specific SCSI-commands.
+ * So I have to complete it step by step. Useful information is welcome.
+ *
+ * Actually works:
+ * - NEC: Detection and support of multisession CD's. Special handling
+ * for XA-disks is not necessary.
+ *
+ * - TOSHIBA: setting density is done here now, mounting PhotoCD's should
+ * work now without running the program "set_density"
+ * Multisession CD's are supported too.
+ *
+ * kraxel@cs.tu-berlin.de (Gerd Knorr)
+ */
+/*
+ * 19950704 operator@melchior.cuivre.fdn.fr (Thomas Quinot)
+ *
+ * - SONY: Same as Nec.
+ *
+ * - PIONEER: works with SONY code
+ */
+
+static void sr_photocd(struct inode *inode)
+{
+ unsigned long sector,min,sec,frame;
+ unsigned char buf[40]; /* the buffer for the ioctl */
+ unsigned char *cmd; /* the scsi-command */
+ unsigned char *send; /* the data we send to the drive ... */
+ unsigned char *rec; /* ... and get back */
+ int rc,is_xa,no_multi;
+
+ if (scsi_CDs[MINOR(inode->i_rdev)].xa_flags & 0x02) {
+#ifdef DEBUG
+ printk("sr_photocd: CDROM and/or the driver does not support multisession CD's");
+#endif
+ return;
+ }
+
+ if (!suser()) {
+ /* I'm not the superuser, so SCSI_IOCTL_SEND_COMMAND isn't allowed for me.
+ * That's why mpcd_sector will be initialized with zero, because I'm not
+ * able to get the right value. Necessary only if access_count is 1, else
+ * no disk change happened since the last call of this function and we can
+ * keep the old value.
+ */
+ if (1 == scsi_CDs[MINOR(inode->i_rdev)].device->access_count) {
+ scsi_CDs[MINOR(inode->i_rdev)].mpcd_sector = 0;
+ scsi_CDs[MINOR(inode->i_rdev)].xa_flags &= ~0x01;
+ }
+ return;
+ }
+
+ sector = 0;
+ is_xa = 0;
+ no_multi = 0;
+ cmd = rec = &buf[8];
+
+ switch(scsi_CDs[MINOR(inode->i_rdev)].device->manufacturer) {
+
+ case SCSI_MAN_NEC:
+#ifdef DEBUG
+ printk("sr_photocd: use NEC code\n");
+#endif
+ memset(buf,0,40);
+ *((unsigned long*)buf) = 0x0; /* we send nothing... */
+ *((unsigned long*)buf+1) = 0x16; /* and receive 0x16 bytes */
+ cmd[0] = 0xde;
+ cmd[1] = 0x03;
+ cmd[2] = 0xb0;
+ rc = kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_SEND_COMMAND, buf);
+ if (rc != 0) {
+ printk("sr_photocd: ioctl error (NEC): 0x%x\n",rc);
+ break;
+ }
+ if (rec[14] != 0 && rec[14] != 0xb0) {
+ printk("sr_photocd: (NEC) Hmm, seems the CDROM doesn't support multisession CD's\n");
+ no_multi = 1;
+ break;
+ }
+ min = (unsigned long) rec[15]/16*10 + (unsigned long) rec[15]%16;
+ sec = (unsigned long) rec[16]/16*10 + (unsigned long) rec[16]%16;
+ frame = (unsigned long) rec[17]/16*10 + (unsigned long) rec[17]%16;
+ sector = min*CD_SECS*CD_FRAMES + sec*CD_FRAMES + frame;
+ is_xa = (rec[14] == 0xb0);
+#ifdef DEBUG
+ if (sector) {
+ printk("sr_photocd: multisession CD detected. start: %lu\n",sector);
+ }
+#endif
+ break;
+
+ case SCSI_MAN_TOSHIBA:
+#ifdef DEBUG
+ printk("sr_photocd: use TOSHIBA code\n");
+#endif
+
+ /* we request some disc information (is it a XA-CD ?,
+ * where starts the last session ?) */
+ memset(buf,0,40);
+ *((unsigned long*)buf) = 0;
+ *((unsigned long*)buf+1) = 4; /* we receive 4 bytes from the drive */
+ cmd[0] = 0xc7;
+ cmd[1] = 3;
+ rc = kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_SEND_COMMAND, buf);
+ if (rc != 0) {
+ if (rc == 0x28000002) {
+ /* Got a "not ready" - error. No chance to find out if this is
+ * because there is no CD in the drive or because the drive
+ * don't knows multisession CD's. So I need to do an extra check... */
+ if (kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_TEST_UNIT_READY, NULL)) {
+ printk("sr_photocd: drive not ready\n");
+ } else {
+ printk("sr_photocd: (TOSHIBA) Hmm, seems the CDROM doesn't support multisession CD's\n");
+ no_multi = 1;
+ }
+ } else
+ printk("sr_photocd: ioctl error (TOSHIBA #1): 0x%x\n",rc);
+ break; /* if the first ioctl fails, we don't call the second one */
+ }
+ is_xa = (rec[0] == 0x20);
+ min = (unsigned long) rec[1]/16*10 + (unsigned long) rec[1]%16;
+ sec = (unsigned long) rec[2]/16*10 + (unsigned long) rec[2]%16;
+ frame = (unsigned long) rec[3]/16*10 + (unsigned long) rec[3]%16;
+ sector = min*CD_SECS*CD_FRAMES + sec*CD_FRAMES + frame;
+ if (sector) {
+ sector -= CD_BLOCK_OFFSET;
+#ifdef DEBUG
+ printk("sr_photocd: multisession CD detected: start: %lu\n",sector);
+#endif
+ }
+
+ /* now we do a get_density... */
+ memset(buf,0,40);
+ *((unsigned long*)buf) = 0;
+ *((unsigned long*)buf+1) = 12;
+ cmd[0] = 0x1a;
+ cmd[2] = 1;
+ cmd[4] = 12;
+ rc = kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_SEND_COMMAND, buf);
+ if (rc != 0) {
+ printk("sr_photocd: ioctl error (TOSHIBA #2): 0x%x\n",rc);
+ break;
+ }
+#ifdef DEBUG
+ printk("sr_photocd: get_density: 0x%x\n",rec[4]);
+#endif
+
+ /* ...and only if necessary a set_density */
+ if ((rec[4] != 0x81 && is_xa) || (rec[4] != 0 && !is_xa)) {
+#ifdef DEBUG
+ printk("sr_photocd: doing set_density\n");
+#endif
+ memset(buf,0,40);
+ *((unsigned long*)buf) = 12; /* sending 12 bytes... */
+ *((unsigned long*)buf+1) = 0;
+ cmd[0] = 0x15;
+ cmd[1] = (1 << 4);
+ cmd[4] = 12;
+ send = &cmd[6]; /* this is a 6-Byte command */
+ send[ 3] = 0x08; /* the data for the command */
+ send[ 4] = (is_xa) ? 0x81 : 0; /* density 0x81 for XA-CD's, 0 else */
+ send[10] = 0x08;
+ rc = kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_SEND_COMMAND, buf);
+ if (rc != 0) {
+ printk("sr_photocd: ioctl error (TOSHIBA #3): 0x%x\n",rc);
+ }
+ /* The set_density command may have changed the sector size or capacity. */
+ scsi_CDs[MINOR(inode->i_rdev)].needs_sector_size = 1;
+ }
+ break;
+
+ case SCSI_MAN_SONY: /* Thomas QUINOT <thomas@melchior.cuivre.fdn.fr> */
+ case SCSI_MAN_PIONEER:
+#ifdef DEBUG
+ printk("sr_photocd: use SONY/PIONEER code\n");
+#endif
+ memset(buf,0,40);
+ *((unsigned long*)buf) = 0x0; /* we send nothing... */
+ *((unsigned long*)buf+1) = 0x0c; /* and receive 0x0c bytes */
+ cmd[0] = 0x43; /* Read TOC */
+ cmd[8] = 0x0c;
+ cmd[9] = 0x40;
+ rc = kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_SEND_COMMAND, buf);
+
+ if (rc != 0) {
+ printk("sr_photocd: ioctl error (SONY): 0x%x\n",rc);
+ break;
+ }
+ if ((rec[0] << 8) + rec[1] != 0x0a) {
+ printk("sr_photocd: (SONY) Hmm, seems the CDROM doesn't support multisession CD's\n");
+ no_multi = 1;
+ break;
+ }
+ sector = rec[11] + (rec[10] << 8) + (rec[9] << 16) + (rec[8] << 24);
+ is_xa = !!sector;
+#ifdef DEBUG
+ if (sector)
+ printk ("sr_photocd: multisession CD detected. start: %lu\n",sector);
+#endif
+ break;
+
+ case SCSI_MAN_NEC_OLDCDR:
+ case SCSI_MAN_UNKNOWN:
+ default:
+ sector = 0;
+ no_multi = 1;
+ break; }
+
+ scsi_CDs[MINOR(inode->i_rdev)].mpcd_sector = sector;
+ if (is_xa)
+ scsi_CDs[MINOR(inode->i_rdev)].xa_flags |= 0x01;
+ else
+ scsi_CDs[MINOR(inode->i_rdev)].xa_flags &= ~0x01;
+ if (no_multi)
+ scsi_CDs[MINOR(inode->i_rdev)].xa_flags |= 0x02;
+ return;
+}
+
+static int sr_open(struct inode * inode, struct file * filp)
+{
+ if(MINOR(inode->i_rdev) >= sr_template.nr_dev ||
+ !scsi_CDs[MINOR(inode->i_rdev)].device) return -ENXIO; /* No such device */
+
+ if (filp->f_mode & 2)
+ return -EROFS;
+
+ check_disk_change(inode->i_rdev);
+
+ if(!scsi_CDs[MINOR(inode->i_rdev)].device->access_count++)
+ sr_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
+ if (scsi_CDs[MINOR(inode->i_rdev)].device->host->hostt->usage_count)
+ (*scsi_CDs[MINOR(inode->i_rdev)].device->host->hostt->usage_count)++;
+ if(sr_template.usage_count) (*sr_template.usage_count)++;
+
+ sr_photocd(inode);
+
+ /* If this device did not have media in the drive at boot time, then
+ * we would have been unable to get the sector size. Check to see if
+ * this is the case, and try again.
+ */
+
+ if(scsi_CDs[MINOR(inode->i_rdev)].needs_sector_size)
+ get_sectorsize(MINOR(inode->i_rdev));
+
+ return 0;
+}
+
+
+/*
+ * do_sr_request() is the request handler function for the sr driver.
+ * Its function in life is to take block device requests, and
+ * translate them to SCSI commands.
+ */
+
+static void do_sr_request (void)
+{
+ Scsi_Cmnd * SCpnt = NULL;
+ struct request * req = NULL;
+ Scsi_Device * SDev;
+ unsigned long flags;
+ int flag = 0;
+
+ while (1==1){
+ save_flags(flags);
+ cli();
+ if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {
+ restore_flags(flags);
+ return;
+ };
+
+ INIT_SCSI_REQUEST;
+
+ SDev = scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].device;
+
+ /*
+ * I am not sure where the best place to do this is. We need
+ * to hook in a place where we are likely to come if in user
+ * space.
+ */
+ if( SDev->was_reset )
+ {
+ /*
+ * We need to relock the door, but we might
+ * be in an interrupt handler. Only do this
+ * from user space, since we do not want to
+ * sleep from an interrupt.
+ */
+ if( SDev->removable && !intr_count )
+ {
+ scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
+ }
+ SDev->was_reset = 0;
+ }
+
+ if (flag++ == 0)
+ SCpnt = allocate_device(&CURRENT,
+ scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].device, 0);
+ else SCpnt = NULL;
+ restore_flags(flags);
+
+ /* This is a performance enhancement. We dig down into the request list and
+ * try and find a queueable request (i.e. device not busy, and host able to
+ * accept another command. If we find one, then we queue it. This can
+ * make a big difference on systems with more than one disk drive. We want
+ * to have the interrupts off when monkeying with the request list, because
+ * otherwise the kernel might try and slip in a request in between somewhere. */
+
+ if (!SCpnt && sr_template.nr_dev > 1){
+ struct request *req1;
+ req1 = NULL;
+ save_flags(flags);
+ cli();
+ req = CURRENT;
+ while(req){
+ SCpnt = request_queueable(req,
+ scsi_CDs[DEVICE_NR(req->rq_dev)].device);
+ if(SCpnt) break;
+ req1 = req;
+ req = req->next;
+ };
+ if (SCpnt && req->rq_status == RQ_INACTIVE) {
+ if (req == CURRENT)
+ CURRENT = CURRENT->next;
+ else
+ req1->next = req->next;
+ };
+ restore_flags(flags);
+ };
+
+ if (!SCpnt)
+ return; /* Could not find anything to do */
+
+ wake_up(&wait_for_request);
+
+ /* Queue command */
+ requeue_sr_request(SCpnt);
+ }; /* While */
+}
+
+void requeue_sr_request (Scsi_Cmnd * SCpnt)
+{
+ unsigned int dev, block, realcount;
+ unsigned char cmd[10], *buffer, tries;
+ int this_count, start, end_rec;
+
+ tries = 2;
+
+ repeat:
+ if(!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
+ do_sr_request();
+ return;
+ }
+
+ dev = MINOR(SCpnt->request.rq_dev);
+ block = SCpnt->request.sector;
+ buffer = NULL;
+ this_count = 0;
+
+ if (dev >= sr_template.nr_dev) {
+ /* printk("CD-ROM request error: invalid device.\n"); */
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ tries = 2;
+ goto repeat;
+ }
+
+ if (!scsi_CDs[dev].use) {
+ /* printk("CD-ROM request error: device marked not in use.\n"); */
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ tries = 2;
+ goto repeat;
+ }
+
+ if (scsi_CDs[dev].device->changed) {
+ /*
+ * quietly refuse to do anything to a changed disc
+ * until the changed bit has been reset
+ */
+ /* printk("CD-ROM has been changed. Prohibiting further I/O.\n"); */
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ tries = 2;
+ goto repeat;
+ }
+
+ switch (SCpnt->request.cmd)
+ {
+ case WRITE:
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ goto repeat;
+ break;
+ case READ :
+ cmd[0] = READ_6;
+ break;
+ default :
+ panic ("Unknown sr command %d\n", SCpnt->request.cmd);
+ }
+
+ cmd[1] = (SCpnt->lun << 5) & 0xe0;
+
+ /*
+ * Now do the grungy work of figuring out which sectors we need, and
+ * where in memory we are going to put them.
+ *
+ * The variables we need are:
+ *
+ * this_count= number of 512 byte sectors being read
+ * block = starting cdrom sector to read.
+ * realcount = # of cdrom sectors to read
+ *
+ * The major difference between a scsi disk and a scsi cdrom
+ * is that we will always use scatter-gather if we can, because we can
+ * work around the fact that the buffer cache has a block size of 1024,
+ * and we have 2048 byte sectors. This code should work for buffers that
+ * are any multiple of 512 bytes long.
+ */
+
+ SCpnt->use_sg = 0;
+
+ if (SCpnt->host->sg_tablesize > 0 &&
+ (!need_isa_buffer ||
+ dma_free_sectors >= 10)) {
+ struct buffer_head * bh;
+ struct scatterlist * sgpnt;
+ int count, this_count_max;
+ bh = SCpnt->request.bh;
+ this_count = 0;
+ count = 0;
+ this_count_max = (scsi_CDs[dev].ten ? 0xffff : 0xff) << 4;
+ /* Calculate how many links we can use. First see if we need
+ * a padding record at the start */
+ this_count = SCpnt->request.sector % 4;
+ if(this_count) count++;
+ while(bh && count < SCpnt->host->sg_tablesize) {
+ if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
+ this_count += (bh->b_size >> 9);
+ count++;
+ bh = bh->b_reqnext;
+ };
+ /* Fix up in case of an odd record at the end */
+ end_rec = 0;
+ if(this_count % 4) {
+ if (count < SCpnt->host->sg_tablesize) {
+ count++;
+ end_rec = (4 - (this_count % 4)) << 9;
+ this_count += 4 - (this_count % 4);
+ } else {
+ count--;
+ this_count -= (this_count % 4);
+ };
+ };
+ SCpnt->use_sg = count; /* Number of chains */
+ count = 512;/* scsi_malloc can only allocate in chunks of 512 bytes*/
+ while( count < (SCpnt->use_sg * sizeof(struct scatterlist)))
+ count = count << 1;
+ SCpnt->sglist_len = count;
+ sgpnt = (struct scatterlist * ) scsi_malloc(count);
+ if (!sgpnt) {
+ printk("Warning - running *really* short on DMA buffers\n");
+ SCpnt->use_sg = 0; /* No memory left - bail out */
+ } else {
+ buffer = (unsigned char *) sgpnt;
+ count = 0;
+ bh = SCpnt->request.bh;
+ if(SCpnt->request.sector % 4) {
+ sgpnt[count].length = (SCpnt->request.sector % 4) << 9;
+ sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
+ if(!sgpnt[count].address) panic("SCSI DMA pool exhausted.");
+ sgpnt[count].alt_address = sgpnt[count].address; /* Flag to delete
+ if needed */
+ count++;
+ };
+ for(bh = SCpnt->request.bh; count < SCpnt->use_sg;
+ count++, bh = bh->b_reqnext) {
+ if (bh) { /* Need a placeholder at the end of the record? */
+ sgpnt[count].address = bh->b_data;
+ sgpnt[count].length = bh->b_size;
+ sgpnt[count].alt_address = NULL;
+ } else {
+ sgpnt[count].address = (char *) scsi_malloc(end_rec);
+ if(!sgpnt[count].address) panic("SCSI DMA pool exhausted.");
+ sgpnt[count].length = end_rec;
+ sgpnt[count].alt_address = sgpnt[count].address;
+ if (count+1 != SCpnt->use_sg) panic("Bad sr request list");
+ break;
+ };
+ if (((long) sgpnt[count].address) + sgpnt[count].length > ISA_DMA_THRESHOLD &&
+ SCpnt->host->unchecked_isa_dma) {
+ sgpnt[count].alt_address = sgpnt[count].address;
+ /* We try and avoid exhausting the DMA pool, since it is easier
+ * to control usage here. In other places we might have a more
+ * pressing need, and we would be screwed if we ran out */
+ if(dma_free_sectors < (sgpnt[count].length >> 9) + 5) {
+ sgpnt[count].address = NULL;
+ } else {
+ sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
+ };
+ /* If we start running low on DMA buffers, we abort the scatter-gather
+ * operation, and free all of the memory we have allocated. We want to
+ * ensure that all scsi operations are able to do at least a non-scatter/gather
+ * operation */
+ if(sgpnt[count].address == NULL){ /* Out of dma memory */
+ printk("Warning: Running low on SCSI DMA buffers");
+ /* Try switching back to a non scatter-gather operation. */
+ while(--count >= 0){
+ if(sgpnt[count].alt_address)
+ scsi_free(sgpnt[count].address, sgpnt[count].length);
+ };
+ SCpnt->use_sg = 0;
+ scsi_free(buffer, SCpnt->sglist_len);
+ break;
+ }; /* if address == NULL */
+ }; /* if need DMA fixup */
+ }; /* for loop to fill list */
+#ifdef DEBUG
+ printk("SR: %d %d %d %d %d *** ",SCpnt->use_sg, SCpnt->request.sector,
+ this_count,
+ SCpnt->request.current_nr_sectors,
+ SCpnt->request.nr_sectors);
+ for(count=0; count<SCpnt->use_sg; count++)
+ printk("SGlist: %d %x %x %x\n", count,
+ sgpnt[count].address,
+ sgpnt[count].alt_address,
+ sgpnt[count].length);
+#endif
+ }; /* Able to allocate scatter-gather list */
+ };
+
+ if (SCpnt->use_sg == 0){
+ /* We cannot use scatter-gather. Do this the old fashion way */
+ if (!SCpnt->request.bh)
+ this_count = SCpnt->request.nr_sectors;
+ else
+ this_count = (SCpnt->request.bh->b_size >> 9);
+
+ start = block % 4;
+ if (start)
+ {
+ this_count = ((this_count > 4 - start) ?
+ (4 - start) : (this_count));
+ buffer = (unsigned char *) scsi_malloc(2048);
+ }
+ else if (this_count < 4)
+ {
+ buffer = (unsigned char *) scsi_malloc(2048);
+ }
+ else
+ {
+ this_count -= this_count % 4;
+ buffer = (unsigned char *) SCpnt->request.buffer;
+ if (((long) buffer) + (this_count << 9) > ISA_DMA_THRESHOLD &&
+ SCpnt->host->unchecked_isa_dma)
+ buffer = (unsigned char *) scsi_malloc(this_count << 9);
+ }
+ };
+
+ if (scsi_CDs[dev].sector_size == 2048)
+ block = block >> 2; /* These are the sectors that the cdrom uses */
+ else
+ block = block & 0xfffffffc;
+
+ realcount = (this_count + 3) / 4;
+
+ if (scsi_CDs[dev].sector_size == 512) realcount = realcount << 2;
+
+ if (((realcount > 0xff) || (block > 0x1fffff)) && scsi_CDs[dev].ten)
+ {
+ if (realcount > 0xffff)
+ {
+ realcount = 0xffff;
+ this_count = realcount * (scsi_CDs[dev].sector_size >> 9);
+ }
+
+ cmd[0] += READ_10 - READ_6 ;
+ cmd[2] = (unsigned char) (block >> 24) & 0xff;
+ cmd[3] = (unsigned char) (block >> 16) & 0xff;
+ cmd[4] = (unsigned char) (block >> 8) & 0xff;
+ cmd[5] = (unsigned char) block & 0xff;
+ cmd[6] = cmd[9] = 0;
+ cmd[7] = (unsigned char) (realcount >> 8) & 0xff;
+ cmd[8] = (unsigned char) realcount & 0xff;
+ }
+ else
+ {
+ if (realcount > 0xff)
+ {
+ realcount = 0xff;
+ this_count = realcount * (scsi_CDs[dev].sector_size >> 9);
+ }
+
+ cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
+ cmd[2] = (unsigned char) ((block >> 8) & 0xff);
+ cmd[3] = (unsigned char) block & 0xff;
+ cmd[4] = (unsigned char) realcount;
+ cmd[5] = 0;
+ }
+
+#ifdef DEBUG
+ {
+ int i;
+ printk("ReadCD: %d %d %d %d\n",block, realcount, buffer, this_count);
+ printk("Use sg: %d\n", SCpnt->use_sg);
+ printk("Dumping command: ");
+ for(i=0; i<12; i++) printk("%2.2x ", cmd[i]);
+ printk("\n");
+ };
+#endif
+
+ /* Some dumb host adapters can speed transfers by knowing the
+ * minimum transfersize in advance.
+ *
+ * We shouldn't disconnect in the middle of a sector, but the cdrom
+ * sector size can be larger than the size of a buffer and the
+ * transfer may be split to the size of a buffer. So it's safe to
+ * assume that we can at least transfer the minimum of the buffer
+ * size (1024) and the sector size between each connect / disconnect.
+ */
+
+ SCpnt->transfersize = (scsi_CDs[dev].sector_size > 1024) ?
+ 1024 : scsi_CDs[dev].sector_size;
+
+ SCpnt->this_count = this_count;
+ scsi_do_cmd (SCpnt, (void *) cmd, buffer,
+ realcount * scsi_CDs[dev].sector_size,
+ rw_intr, SR_TIMEOUT, MAX_RETRIES);
+}
+
+static int sr_detect(Scsi_Device * SDp){
+
+ if(SDp->type != TYPE_ROM && SDp->type != TYPE_WORM) return 0;
+
+ printk("Detected scsi CD-ROM sr%d at scsi%d, channel %d, id %d, lun %d\n",
+ sr_template.dev_noticed++,
+ SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
+
+ return 1;
+}
+
+static int sr_attach(Scsi_Device * SDp){
+ Scsi_CD * cpnt;
+ int i;
+
+ if(SDp->type != TYPE_ROM && SDp->type != TYPE_WORM) return 1;
+
+ if (sr_template.nr_dev >= sr_template.dev_max)
+ {
+ SDp->attached--;
+ return 1;
+ }
+
+ for(cpnt = scsi_CDs, i=0; i<sr_template.dev_max; i++, cpnt++)
+ if(!cpnt->device) break;
+
+ if(i >= sr_template.dev_max) panic ("scsi_devices corrupt (sr)");
+
+ SDp->scsi_request_fn = do_sr_request;
+ scsi_CDs[i].device = SDp;
+ sr_template.nr_dev++;
+ if(sr_template.nr_dev > sr_template.dev_max)
+ panic ("scsi_devices corrupt (sr)");
+ return 0;
+}
+
+
+static void sr_init_done (Scsi_Cmnd * SCpnt)
+{
+ struct request * req;
+
+ req = &SCpnt->request;
+ req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
+
+ if (req->sem != NULL) {
+ up(req->sem);
+ }
+}
+
+static void get_sectorsize(int i){
+ unsigned char cmd[10];
+ unsigned char *buffer;
+ int the_result, retries;
+ Scsi_Cmnd * SCpnt;
+
+ buffer = (unsigned char *) scsi_malloc(512);
+ SCpnt = allocate_device(NULL, scsi_CDs[i].device, 1);
+
+ retries = 3;
+ do {
+ cmd[0] = READ_CAPACITY;
+ cmd[1] = (scsi_CDs[i].device->lun << 5) & 0xe0;
+ memset ((void *) &cmd[2], 0, 8);
+ SCpnt->request.rq_status = RQ_SCSI_BUSY; /* Mark as really busy */
+ SCpnt->cmd_len = 0;
+
+ memset(buffer, 0, 8);
+
+ /* Do the command and wait.. */
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt,
+ (void *) cmd, (void *) buffer,
+ 512, sr_init_done, SR_TIMEOUT,
+ MAX_RETRIES);
+ down(&sem);
+ }
+
+ the_result = SCpnt->result;
+ retries--;
+
+ } while(the_result && retries);
+
+ SCpnt->request.rq_status = RQ_INACTIVE; /* Mark as not busy */
+
+ wake_up(&SCpnt->device->device_wait);
+
+ if (the_result) {
+ scsi_CDs[i].capacity = 0x1fffff;
+ scsi_CDs[i].sector_size = 2048; /* A guess, just in case */
+ scsi_CDs[i].needs_sector_size = 1;
+ } else {
+ scsi_CDs[i].capacity = (buffer[0] << 24) |
+ (buffer[1] << 16) | (buffer[2] << 8) | buffer[3];
+ scsi_CDs[i].sector_size = (buffer[4] << 24) |
+ (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
+ if(scsi_CDs[i].sector_size == 0) scsi_CDs[i].sector_size = 2048;
+ /* Work around bug/feature in HP 4020i CD-Recorder... */
+ if(scsi_CDs[i].sector_size == 2340) scsi_CDs[i].sector_size = 2048;
+ if(scsi_CDs[i].sector_size != 2048 &&
+ scsi_CDs[i].sector_size != 512) {
+ printk ("scd%d : unsupported sector size %d.\n",
+ i, scsi_CDs[i].sector_size);
+ scsi_CDs[i].capacity = 0;
+ scsi_CDs[i].needs_sector_size = 1;
+ };
+ if(scsi_CDs[i].sector_size == 2048)
+ scsi_CDs[i].capacity *= 4;
+ scsi_CDs[i].needs_sector_size = 0;
+ sr_sizes[i] = scsi_CDs[i].capacity;
+ };
+ scsi_free(buffer, 512);
+}
+
+static int sr_registered = 0;
+
+static int sr_init()
+{
+ int i;
+
+ if(sr_template.dev_noticed == 0) return 0;
+
+ if(!sr_registered) {
+ if (register_blkdev(MAJOR_NR,"sr",&sr_fops)) {
+ printk("Unable to get major %d for SCSI-CD\n",MAJOR_NR);
+ return 1;
+ }
+ sr_registered++;
+ }
+
+
+ if (scsi_CDs) return 0;
+ sr_template.dev_max = sr_template.dev_noticed + SR_EXTRA_DEVS;
+ scsi_CDs = (Scsi_CD *) scsi_init_malloc(sr_template.dev_max * sizeof(Scsi_CD), GFP_ATOMIC);
+ memset(scsi_CDs, 0, sr_template.dev_max * sizeof(Scsi_CD));
+
+ sr_sizes = (int *) scsi_init_malloc(sr_template.dev_max * sizeof(int), GFP_ATOMIC);
+ memset(sr_sizes, 0, sr_template.dev_max * sizeof(int));
+
+ sr_blocksizes = (int *) scsi_init_malloc(sr_template.dev_max *
+ sizeof(int), GFP_ATOMIC);
+ for(i=0;i<sr_template.dev_max;i++) sr_blocksizes[i] = 2048;
+ blksize_size[MAJOR_NR] = sr_blocksizes;
+ return 0;
+}
+
+void sr_finish()
+{
+ int i;
+
+ blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_size[MAJOR_NR] = sr_sizes;
+
+ for (i = 0; i < sr_template.nr_dev; ++i)
+ {
+ /* If we have already seen this, then skip it. Comes up
+ * with loadable modules. */
+ if (scsi_CDs[i].capacity) continue;
+ scsi_CDs[i].capacity = 0x1fffff;
+ scsi_CDs[i].sector_size = 2048; /* A guess, just in case */
+ scsi_CDs[i].needs_sector_size = 1;
+#if 0
+ /* seems better to leave this for later */
+ get_sectorsize(i);
+ printk("Scd sectorsize = %d bytes.\n", scsi_CDs[i].sector_size);
+#endif
+ scsi_CDs[i].use = 1;
+ scsi_CDs[i].ten = 1;
+ scsi_CDs[i].remap = 1;
+ scsi_CDs[i].auto_eject = 0; /* Default is not to eject upon unmount. */
+ sr_sizes[i] = scsi_CDs[i].capacity;
+ }
+
+
+ /* If our host adapter is capable of scatter-gather, then we increase
+ * the read-ahead to 16 blocks (32 sectors). If not, we use
+ * a two block (4 sector) read ahead. */
+ if(scsi_CDs[0].device && scsi_CDs[0].device->host->sg_tablesize)
+ read_ahead[MAJOR_NR] = 32; /* 32 sector read-ahead. Always removable. */
+ else
+ read_ahead[MAJOR_NR] = 4; /* 4 sector read-ahead */
+
+ return;
+}
+
+static void sr_detach(Scsi_Device * SDp)
+{
+ Scsi_CD * cpnt;
+ int i;
+
+ for(cpnt = scsi_CDs, i=0; i<sr_template.dev_max; i++, cpnt++)
+ if(cpnt->device == SDp) {
+ kdev_t devi = MKDEV(MAJOR_NR, i);
+
+ /*
+ * Since the cdrom is read-only, no need to sync the device.
+ * We should be kind to our buffer cache, however.
+ */
+ invalidate_inodes(devi);
+ invalidate_buffers(devi);
+
+ /*
+ * Reset things back to a sane state so that one can re-load a new
+ * driver (perhaps the same one).
+ */
+ cpnt->device = NULL;
+ cpnt->capacity = 0;
+ SDp->attached--;
+ sr_template.nr_dev--;
+ sr_template.dev_noticed--;
+ sr_sizes[i] = 0;
+ return;
+ }
+ return;
+}
+
+
+#ifdef MODULE
+
+int init_module(void) {
+ sr_template.usage_count = &mod_use_count_;
+ return scsi_register_module(MODULE_SCSI_DEV, &sr_template);
+}
+
+void cleanup_module( void)
+{
+ scsi_unregister_module(MODULE_SCSI_DEV, &sr_template);
+ unregister_blkdev(SCSI_CDROM_MAJOR, "sr");
+ sr_registered--;
+ if(scsi_CDs != NULL) {
+ scsi_init_free((char *) scsi_CDs,
+ (sr_template.dev_noticed + SR_EXTRA_DEVS)
+ * sizeof(Scsi_CD));
+
+ scsi_init_free((char *) sr_sizes, sr_template.dev_max * sizeof(int));
+ scsi_init_free((char *) sr_blocksizes, sr_template.dev_max * sizeof(int));
+ }
+
+ blksize_size[MAJOR_NR] = NULL;
+ blk_dev[MAJOR_NR].request_fn = NULL;
+ blk_size[MAJOR_NR] = NULL;
+ read_ahead[MAJOR_NR] = 0;
+
+ sr_template.dev_max = 0;
+}
+#endif /* MODULE */
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/sr.h b/i386/i386at/gpl/linux/scsi/sr.h
new file mode 100644
index 00000000..381678a6
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/sr.h
@@ -0,0 +1,40 @@
+/*
+ * sr.h by David Giller
+ * CD-ROM disk driver header file
+ *
+ * adapted from:
+ * sd.h Copyright (C) 1992 Drew Eckhardt
+ * SCSI disk driver header file by
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale eric@aib.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ */
+
+#ifndef _SR_H
+#define _SR_H
+
+#include "scsi.h"
+
+typedef struct
+ {
+ unsigned capacity; /* size in blocks */
+ unsigned sector_size; /* size in bytes */
+ Scsi_Device *device;
+ unsigned long mpcd_sector; /* for reading multisession-CD's */
+ char xa_flags; /* some flags for handling XA-CD's */
+ unsigned char sector_bit_size; /* sector size = 2^sector_bit_size */
+ unsigned char sector_bit_shift; /* sectors/FS block = 2^sector_bit_shift*/
+ unsigned needs_sector_size:1; /* needs to get sector size */
+ unsigned ten:1; /* support ten byte commands */
+ unsigned remap:1; /* support remapping */
+ unsigned use:1; /* is this device still supportable */
+ unsigned auto_eject:1; /* auto-eject medium on last release. */
+ } Scsi_CD;
+
+extern Scsi_CD * scsi_CDs;
+
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/sr_ioctl.c b/i386/i386at/gpl/linux/scsi/sr_ioctl.c
new file mode 100644
index 00000000..2313cf8c
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/sr_ioctl.c
@@ -0,0 +1,489 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <asm/segment.h>
+#include <linux/errno.h>
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sr.h"
+#include "scsi_ioctl.h"
+
+#include <linux/cdrom.h>
+
+#define IOCTL_RETRIES 3
+/* The CDROM is fairly slow, so we need a little extra time */
+/* In fact, it is very slow if it has to spin up first */
+#define IOCTL_TIMEOUT 3000
+
+static void sr_ioctl_done(Scsi_Cmnd * SCpnt)
+{
+ struct request * req;
+
+ req = &SCpnt->request;
+ req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
+
+ if (req->sem != NULL) {
+ up(req->sem);
+ }
+}
+
+/* We do our own retries because we want to know what the specific
+ error code is. Normally the UNIT_ATTENTION code will automatically
+ clear after one error */
+
+static int do_ioctl(int target, unsigned char * sr_cmd, void * buffer, unsigned buflength)
+{
+ Scsi_Cmnd * SCpnt;
+ int result;
+
+ SCpnt = allocate_device(NULL, scsi_CDs[target].device, 1);
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd(SCpnt,
+ (void *) sr_cmd, buffer, buflength, sr_ioctl_done,
+ IOCTL_TIMEOUT, IOCTL_RETRIES);
+ down(&sem);
+ }
+
+ result = SCpnt->result;
+
+ /* Minimal error checking. Ignore cases we know about, and report the rest. */
+ if(driver_byte(result) != 0)
+ switch(SCpnt->sense_buffer[2] & 0xf) {
+ case UNIT_ATTENTION:
+ scsi_CDs[target].device->changed = 1;
+ printk("Disc change detected.\n");
+ break;
+ case NOT_READY: /* This happens if there is no disc in drive */
+ printk("CDROM not ready. Make sure there is a disc in the drive.\n");
+ break;
+ case ILLEGAL_REQUEST:
+ printk("CDROM (ioctl) reports ILLEGAL REQUEST.\n");
+ break;
+ default:
+ printk("SCSI CD error: host %d id %d lun %d return code = %03x\n",
+ scsi_CDs[target].device->host->host_no,
+ scsi_CDs[target].device->id,
+ scsi_CDs[target].device->lun,
+ result);
+ printk("\tSense class %x, sense error %x, extended sense %x\n",
+ sense_class(SCpnt->sense_buffer[0]),
+ sense_error(SCpnt->sense_buffer[0]),
+ SCpnt->sense_buffer[2] & 0xf);
+
+ };
+
+ result = SCpnt->result;
+ SCpnt->request.rq_status = RQ_INACTIVE; /* Deallocate */
+ wake_up(&SCpnt->device->device_wait);
+ /* Wake up a process waiting for device*/
+ return result;
+}
+
+int sr_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg)
+{
+ u_char sr_cmd[10];
+
+ kdev_t dev = inode->i_rdev;
+ int result, target, err;
+
+ target = MINOR(dev);
+
+ if (target >= sr_template.nr_dev ||
+ !scsi_CDs[target].device) return -ENXIO;
+
+ switch (cmd)
+ {
+ /* Sun-compatible */
+ case CDROMPAUSE:
+
+ sr_cmd[0] = SCMD_PAUSE_RESUME;
+ sr_cmd[1] = scsi_CDs[target].device->lun << 5;
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = 0;
+ sr_cmd[5] = sr_cmd[6] = sr_cmd[7] = 0;
+ sr_cmd[8] = 0;
+ sr_cmd[9] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+ return result;
+
+ case CDROMRESUME:
+
+ sr_cmd[0] = SCMD_PAUSE_RESUME;
+ sr_cmd[1] = scsi_CDs[target].device->lun << 5;
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = 0;
+ sr_cmd[5] = sr_cmd[6] = sr_cmd[7] = 0;
+ sr_cmd[8] = 1;
+ sr_cmd[9] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+
+ return result;
+
+ case CDROMPLAYMSF:
+ {
+ struct cdrom_msf msf;
+
+ err = verify_area (VERIFY_READ, (void *) arg, sizeof (msf));
+ if (err) return err;
+
+ memcpy_fromfs(&msf, (void *) arg, sizeof(msf));
+
+ sr_cmd[0] = SCMD_PLAYAUDIO_MSF;
+ sr_cmd[1] = scsi_CDs[target].device->lun << 5;
+ sr_cmd[2] = 0;
+ sr_cmd[3] = msf.cdmsf_min0;
+ sr_cmd[4] = msf.cdmsf_sec0;
+ sr_cmd[5] = msf.cdmsf_frame0;
+ sr_cmd[6] = msf.cdmsf_min1;
+ sr_cmd[7] = msf.cdmsf_sec1;
+ sr_cmd[8] = msf.cdmsf_frame1;
+ sr_cmd[9] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+ return result;
+ }
+
+ case CDROMPLAYBLK:
+ {
+ struct cdrom_blk blk;
+
+ err = verify_area (VERIFY_READ, (void *) arg, sizeof (blk));
+ if (err) return err;
+
+ memcpy_fromfs(&blk, (void *) arg, sizeof(blk));
+
+ sr_cmd[0] = SCMD_PLAYAUDIO10;
+ sr_cmd[1] = scsi_CDs[target].device->lun << 5;
+ sr_cmd[2] = blk.from >> 24;
+ sr_cmd[3] = blk.from >> 16;
+ sr_cmd[4] = blk.from >> 8;
+ sr_cmd[5] = blk.from;
+ sr_cmd[6] = 0;
+ sr_cmd[7] = blk.len >> 8;
+ sr_cmd[8] = blk.len;
+ sr_cmd[9] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+ return result;
+ }
+
+ case CDROMPLAYTRKIND:
+ {
+ struct cdrom_ti ti;
+
+ err = verify_area (VERIFY_READ, (void *) arg, sizeof (ti));
+ if (err) return err;
+
+ memcpy_fromfs(&ti, (void *) arg, sizeof(ti));
+
+ sr_cmd[0] = SCMD_PLAYAUDIO_TI;
+ sr_cmd[1] = scsi_CDs[target].device->lun << 5;
+ sr_cmd[2] = 0;
+ sr_cmd[3] = 0;
+ sr_cmd[4] = ti.cdti_trk0;
+ sr_cmd[5] = ti.cdti_ind0;
+ sr_cmd[6] = 0;
+ sr_cmd[7] = ti.cdti_trk1;
+ sr_cmd[8] = ti.cdti_ind1;
+ sr_cmd[9] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+
+ return result;
+ }
+
+ case CDROMREADTOCHDR:
+ {
+ struct cdrom_tochdr tochdr;
+ char * buffer;
+
+ sr_cmd[0] = SCMD_READ_TOC;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) | 0x02; /* MSF format */
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
+ sr_cmd[6] = 0;
+ sr_cmd[7] = 0; /* MSB of length (12) */
+ sr_cmd[8] = 12; /* LSB of length */
+ sr_cmd[9] = 0;
+
+ buffer = (unsigned char *) scsi_malloc(512);
+ if(!buffer) return -ENOMEM;
+
+ result = do_ioctl(target, sr_cmd, buffer, 12);
+
+ tochdr.cdth_trk0 = buffer[2];
+ tochdr.cdth_trk1 = buffer[3];
+
+ scsi_free(buffer, 512);
+
+ err = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_tochdr));
+ if (err)
+ return err;
+ memcpy_tofs ((void *) arg, &tochdr, sizeof (struct cdrom_tochdr));
+
+ return result;
+ }
+
+ case CDROMREADTOCENTRY:
+ {
+ struct cdrom_tocentry tocentry;
+ char * buffer;
+
+ err = verify_area (VERIFY_READ, (void *) arg, sizeof (struct cdrom_tocentry));
+ if (err) return err;
+
+ memcpy_fromfs (&tocentry, (void *) arg, sizeof (struct cdrom_tocentry));
+
+ sr_cmd[0] = SCMD_READ_TOC;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) | 0x02; /* MSF format */
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
+ sr_cmd[6] = tocentry.cdte_track;
+ sr_cmd[7] = 0; /* MSB of length (12) */
+ sr_cmd[8] = 12; /* LSB of length */
+ sr_cmd[9] = 0;
+
+ buffer = (unsigned char *) scsi_malloc(512);
+ if(!buffer) return -ENOMEM;
+
+ result = do_ioctl (target, sr_cmd, buffer, 12);
+
+ if (tocentry.cdte_format == CDROM_MSF) {
+ tocentry.cdte_addr.msf.minute = buffer[9];
+ tocentry.cdte_addr.msf.second = buffer[10];
+ tocentry.cdte_addr.msf.frame = buffer[11];
+ tocentry.cdte_ctrl = buffer[5] & 0xf;
+ }
+ else
+ tocentry.cdte_addr.lba = (int) buffer[0];
+
+ scsi_free(buffer, 512);
+
+ err = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_tocentry));
+ if (err)
+ return err;
+ memcpy_tofs ((void *) arg, &tocentry, sizeof (struct cdrom_tocentry));
+
+ return result;
+ }
+
+ case CDROMSTOP:
+ sr_cmd[0] = START_STOP;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) | 1;
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
+ sr_cmd[4] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+ return result;
+
+ case CDROMSTART:
+ sr_cmd[0] = START_STOP;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) | 1;
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
+ sr_cmd[4] = 1;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+ return result;
+
+ case CDROMEJECT:
+ /*
+ * Allow 0 for access count for auto-eject feature.
+ */
+ if (scsi_CDs[target].device -> access_count > 1)
+ return -EBUSY;
+
+ sr_ioctl (inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
+ sr_cmd[0] = START_STOP;
+ sr_cmd[1] = ((scsi_CDs[target].device -> lun) << 5) | 1;
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
+ sr_cmd[4] = 0x02;
+
+ if (!(result = do_ioctl(target, sr_cmd, NULL, 255)))
+ scsi_CDs[target].device -> changed = 1;
+
+ return result;
+
+ case CDROMEJECT_SW:
+ scsi_CDs[target].auto_eject = !!arg;
+ return 0;
+
+ case CDROMVOLCTRL:
+ {
+ char * buffer, * mask;
+ struct cdrom_volctrl volctrl;
+
+ err = verify_area (VERIFY_READ, (void *) arg, sizeof (struct cdrom_volctrl));
+ if (err) return err;
+
+ memcpy_fromfs (&volctrl, (void *) arg, sizeof (struct cdrom_volctrl));
+
+ /* First we get the current params so we can just twiddle the volume */
+
+ sr_cmd[0] = MODE_SENSE;
+ sr_cmd[1] = (scsi_CDs[target].device -> lun) << 5;
+ sr_cmd[2] = 0xe; /* Want mode page 0xe, CDROM audio params */
+ sr_cmd[3] = 0;
+ sr_cmd[4] = 28;
+ sr_cmd[5] = 0;
+
+ buffer = (unsigned char *) scsi_malloc(512);
+ if(!buffer) return -ENOMEM;
+
+ if ((result = do_ioctl (target, sr_cmd, buffer, 28))) {
+ printk ("Hosed while obtaining audio mode page\n");
+ scsi_free(buffer, 512);
+ return result;
+ }
+
+ sr_cmd[0] = MODE_SENSE;
+ sr_cmd[1] = (scsi_CDs[target].device -> lun) << 5;
+ sr_cmd[2] = 0x4e; /* Want the mask for mode page 0xe */
+ sr_cmd[3] = 0;
+ sr_cmd[4] = 28;
+ sr_cmd[5] = 0;
+
+ mask = (unsigned char *) scsi_malloc(512);
+ if(!mask) {
+ scsi_free(buffer, 512);
+ return -ENOMEM;
+ };
+
+ if ((result = do_ioctl (target, sr_cmd, mask, 28))) {
+ printk ("Hosed while obtaining mask for audio mode page\n");
+ scsi_free(buffer, 512);
+ scsi_free(mask, 512);
+ return result;
+ }
+
+ /* Now mask and substitute our own volume and reuse the rest */
+ buffer[0] = 0; /* Clear reserved field */
+
+ buffer[21] = volctrl.channel0 & mask[21];
+ buffer[23] = volctrl.channel1 & mask[23];
+ buffer[25] = volctrl.channel2 & mask[25];
+ buffer[27] = volctrl.channel3 & mask[27];
+
+ sr_cmd[0] = MODE_SELECT;
+ sr_cmd[1] = ((scsi_CDs[target].device -> lun) << 5) | 0x10; /* Params are SCSI-2 */
+ sr_cmd[2] = sr_cmd[3] = 0;
+ sr_cmd[4] = 28;
+ sr_cmd[5] = 0;
+
+ result = do_ioctl (target, sr_cmd, buffer, 28);
+ scsi_free(buffer, 512);
+ scsi_free(mask, 512);
+ return result;
+ }
+
+ case CDROMSUBCHNL:
+ {
+ struct cdrom_subchnl subchnl;
+ char * buffer;
+
+ sr_cmd[0] = SCMD_READ_SUBCHANNEL;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) | 0x02; /* MSF format */
+ sr_cmd[2] = 0x40; /* I do want the subchannel info */
+ sr_cmd[3] = 0x01; /* Give me current position info */
+ sr_cmd[4] = sr_cmd[5] = 0;
+ sr_cmd[6] = 0;
+ sr_cmd[7] = 0;
+ sr_cmd[8] = 16;
+ sr_cmd[9] = 0;
+
+ buffer = (unsigned char*) scsi_malloc(512);
+ if(!buffer) return -ENOMEM;
+
+ result = do_ioctl(target, sr_cmd, buffer, 16);
+
+ subchnl.cdsc_audiostatus = buffer[1];
+ subchnl.cdsc_format = CDROM_MSF;
+ subchnl.cdsc_ctrl = buffer[5] & 0xf;
+ subchnl.cdsc_trk = buffer[6];
+ subchnl.cdsc_ind = buffer[7];
+
+ subchnl.cdsc_reladdr.msf.minute = buffer[13];
+ subchnl.cdsc_reladdr.msf.second = buffer[14];
+ subchnl.cdsc_reladdr.msf.frame = buffer[15];
+ subchnl.cdsc_absaddr.msf.minute = buffer[9];
+ subchnl.cdsc_absaddr.msf.second = buffer[10];
+ subchnl.cdsc_absaddr.msf.frame = buffer[11];
+
+ scsi_free(buffer, 512);
+
+ err = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_subchnl));
+ if (err)
+ return err;
+ memcpy_tofs ((void *) arg, &subchnl, sizeof (struct cdrom_subchnl));
+ return result;
+ }
+
+ case CDROMREADMODE2:
+ return -EINVAL;
+ case CDROMREADMODE1:
+ return -EINVAL;
+
+ /* block-copy from ../block/sbpcd.c with some adjustments... */
+ case CDROMMULTISESSION: /* tell start-of-last-session to user */
+ {
+ struct cdrom_multisession ms_info;
+ long lba;
+
+ err = verify_area(VERIFY_READ, (void *) arg,
+ sizeof(struct cdrom_multisession));
+ if (err) return (err);
+
+ memcpy_fromfs(&ms_info, (void *) arg, sizeof(struct cdrom_multisession));
+
+ if (ms_info.addr_format==CDROM_MSF) { /* MSF-bin requested */
+ lba = scsi_CDs[target].mpcd_sector+CD_BLOCK_OFFSET;
+ ms_info.addr.msf.minute = lba / (CD_SECS*CD_FRAMES);
+ lba %= CD_SECS*CD_FRAMES;
+ ms_info.addr.msf.second = lba / CD_FRAMES;
+ ms_info.addr.msf.frame = lba % CD_FRAMES;
+ } else if (ms_info.addr_format==CDROM_LBA) /* lba requested */
+ ms_info.addr.lba=scsi_CDs[target].mpcd_sector;
+ else return (-EINVAL);
+
+ ms_info.xa_flag=scsi_CDs[target].xa_flags & 0x01;
+
+ err=verify_area(VERIFY_WRITE,(void *) arg,
+ sizeof(struct cdrom_multisession));
+ if (err) return (err);
+
+ memcpy_tofs((void *) arg, &ms_info, sizeof(struct cdrom_multisession));
+ return (0);
+ }
+
+ case BLKRASET:
+ if(!suser()) return -EACCES;
+ if(!(inode->i_rdev)) return -EINVAL;
+ if(arg > 0xff) return -EINVAL;
+ read_ahead[MAJOR(inode->i_rdev)] = arg;
+ return 0;
+ RO_IOCTLS(dev,arg);
+ default:
+ return scsi_ioctl(scsi_CDs[target].device,cmd,(void *) arg);
+ }
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/i386/i386at/gpl/linux/scsi/t128.c b/i386/i386at/gpl/linux/scsi/t128.c
new file mode 100644
index 00000000..9212b61e
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/t128.c
@@ -0,0 +1,413 @@
+#define AUTOSENSE
+#define PSEUDO_DMA
+
+/*
+ * Trantor T128/T128F/T228 driver
+ * Note : architecturally, the T100 and T130 are different and won't
+ * work
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * DISTRIBUTION RELEASE 3.
+ *
+ * For more information, please consult
+ *
+ * Trantor Systems, Ltd.
+ * T128/T128F/T228 SCSI Host Adapter
+ * Hardware Specifications
+ *
+ * Trantor Systems, Ltd.
+ * 5415 Randall Place
+ * Fremont, CA 94538
+ * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
+ *
+ * and
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * Options :
+ * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
+ * for commands that return with a CHECK CONDITION status.
+ *
+ * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance
+ * increase compared to polled I/O.
+ *
+ * PARITY - enable parity checking. Not supported.
+ *
+ * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
+ *
+ *
+ * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. You
+ * only really want to use this if you're having a problem with
+ * dropped characters during high speed communications, and even
+ * then, you're going to be better off twiddling with transfersize.
+ *
+ * USLEEP - enable support for devices that don't disconnect. Untested.
+ *
+ * The card is detected and initialized in one of several ways :
+ * 1. Autoprobe (default) - since the board is memory mapped,
+ * a BIOS signature is scanned for to locate the registers.
+ * An interrupt is triggered to autoprobe for the interrupt
+ * line.
+ *
+ * 2. With command line overrides - t128=address,irq may be
+ * used on the LILO command line to override the defaults.
+ *
+ * 3. With the T128_OVERRIDE compile time define. This is
+ * specified as an array of address, irq tuples. Ie, for
+ * one board at the default 0xcc000 address, IRQ5, I could say
+ * -DT128_OVERRIDE={{0xcc000, 5}}
+ *
+ * Note that if the override methods are used, place holders must
+ * be specified for other boards in the system.
+ *
+ * T128/T128F jumper/dipswitch settings (note : on my sample, the switches
+ * were epoxy'd shut, meaning I couldn't change the 0xcc000 base address) :
+ *
+ * T128 Sw7 Sw8 Sw6 = 0ws Sw5 = boot
+ * T128F Sw6 Sw7 Sw5 = 0ws Sw4 = boot Sw8 = floppy disable
+ * cc000 off off
+ * c8000 off on
+ * dc000 on off
+ * d8000 on on
+ *
+ *
+ * Interrupts
+ * There is a 12 pin jumper block, jp1, numbered as follows :
+ * T128 (JP1) T128F (J5)
+ * 2 4 6 8 10 12 11 9 7 5 3 1
+ * 1 3 5 7 9 11 12 10 8 6 4 2
+ *
+ * 3 2-4
+ * 5 1-3
+ * 7 3-5
+ * T128F only
+ * 10 8-10
+ * 12 7-9
+ * 14 10-12
+ * 15 9-11
+ */
+
+/*
+ * $Log: t128.c,v $
+ * Revision 1.1.1.1 1996/10/30 01:40:07 thomas
+ * Imported from UK22
+ *
+ * Revision 1.1 1996/03/25 20:25:52 goel
+ * Linux driver merge.
+ *
+ */
+
+#include <asm/system.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "t128.h"
+#define AUTOPROBE_IRQ
+#include "NCR5380.h"
+#include "constants.h"
+#include "sd.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_t128 = {
+ PROC_SCSI_T128, 4, "t128",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+
+static struct override {
+ unsigned char *address;
+ int irq;
+} overrides
+#ifdef T128_OVERRIDE
+ [] = T128_OVERRIDE;
+#else
+ [4] = {{NULL,IRQ_AUTO}, {NULL,IRQ_AUTO}, {NULL,IRQ_AUTO},
+ {NULL,IRQ_AUTO}};
+#endif
+
+#define NO_OVERRIDES (sizeof(overrides) / sizeof(struct override))
+
+static struct base {
+ unsigned char *address;
+ int noauto;
+} bases[] = {{(unsigned char *) 0xcc000, 0}, {(unsigned char *) 0xc8000, 0},
+ {(unsigned char *) 0xdc000, 0}, {(unsigned char *) 0xd8000, 0}};
+
+#define NO_BASES (sizeof (bases) / sizeof (struct base))
+
+static const struct signature {
+ const char *string;
+ int offset;
+} signatures[] = {
+{"TSROM: SCSI BIOS, Version 1.12", 0x36},
+};
+
+#define NO_SIGNATURES (sizeof (signatures) / sizeof (struct signature))
+
+/*
+ * Function : t128_setup(char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ *
+ */
+
+void t128_setup(char *str, int *ints) {
+ static int commandline_current = 0;
+ int i;
+ if (ints[0] != 2)
+ printk("t128_setup : usage t128=address,irq\n");
+ else
+ if (commandline_current < NO_OVERRIDES) {
+ overrides[commandline_current].address = (unsigned char *) ints[1];
+ overrides[commandline_current].irq = ints[2];
+ for (i = 0; i < NO_BASES; ++i)
+ if (bases[i].address == (unsigned char *) ints[1]) {
+ bases[i].noauto = 1;
+ break;
+ }
+ ++commandline_current;
+ }
+}
+
+/*
+ * Function : int t128_detect(Scsi_Host_Template * tpnt)
+ *
+ * Purpose : detects and initializes T128,T128F, or T228 controllers
+ * that were autoprobed, overridden on the LILO command line,
+ * or specified at compile time.
+ *
+ * Inputs : tpnt - template for this SCSI adapter.
+ *
+ * Returns : 1 if a host adapter was found, 0 if not.
+ *
+ */
+
+int t128_detect(Scsi_Host_Template * tpnt) {
+ static int current_override = 0, current_base = 0;
+ struct Scsi_Host *instance;
+ unsigned char *base;
+ int sig, count;
+
+ tpnt->proc_dir = &proc_scsi_t128;
+
+ for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
+ base = NULL;
+
+ if (overrides[current_override].address)
+ base = overrides[current_override].address;
+ else
+ for (; !base && (current_base < NO_BASES); ++current_base) {
+#if (TDEBUG & TDEBUG_INIT)
+ printk("scsi : probing address %08x\n", (unsigned int) bases[current_base].address);
+#endif
+ for (sig = 0; sig < NO_SIGNATURES; ++sig)
+ if (!bases[current_base].noauto && !memcmp
+ (bases[current_base].address + signatures[sig].offset,
+ signatures[sig].string, strlen(signatures[sig].string))) {
+ base = bases[current_base].address;
+#if (TDEBUG & TDEBUG_INIT)
+ printk("scsi-t128 : detected board.\n");
+#endif
+ break;
+ }
+ }
+
+#if defined(TDEBUG) && (TDEBUG & TDEBUG_INIT)
+ printk("scsi-t128 : base = %08x\n", (unsigned int) base);
+#endif
+
+ if (!base)
+ break;
+
+ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
+ instance->base = base;
+
+ NCR5380_init(instance, 0);
+
+ if (overrides[current_override].irq != IRQ_AUTO)
+ instance->irq = overrides[current_override].irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, T128_IRQS);
+
+ if (instance->irq != IRQ_NONE)
+ if (request_irq(instance->irq, t128_intr, SA_INTERRUPT, "t128")) {
+ printk("scsi%d : IRQ%d not free, interrupts disabled\n",
+ instance->host_no, instance->irq);
+ instance->irq = IRQ_NONE;
+ }
+
+ if (instance->irq == IRQ_NONE) {
+ printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ }
+
+#if defined(TDEBUG) && (TDEBUG & TDEBUG_INIT)
+ printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
+#endif
+
+ printk("scsi%d : at 0x%08x", instance->host_no, (int)
+ instance->base);
+ if (instance->irq == IRQ_NONE)
+ printk (" interrupts disabled");
+ else
+ printk (" irq %d", instance->irq);
+ printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
+ CAN_QUEUE, CMD_PER_LUN, T128_PUBLIC_RELEASE);
+ NCR5380_print_options(instance);
+ printk("\n");
+
+ ++current_override;
+ ++count;
+ }
+ return count;
+}
+
+/*
+ * Function : int t128_biosparam(Disk * disk, kdev_t dev, int *ip)
+ *
+ * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
+ * the specified device / size.
+ *
+ * Inputs : size = size of device in sectors (512 bytes), dev = block device
+ * major / minor, ip[] = {heads, sectors, cylinders}
+ *
+ * Returns : always 0 (success), initializes ip
+ *
+ */
+
+/*
+ * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
+ * using hard disks on a trantor should verify that this mapping corresponds
+ * to that used by the BIOS / ASPI driver by running the linux fdisk program
+ * and matching the H_C_S coordinates to what DOS uses.
+ */
+
+int t128_biosparam(Disk * disk, kdev_t dev, int * ip)
+{
+ int size = disk->capacity;
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_pread (struct Scsi_Host *instance,
+ * unsigned char *dst, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to
+ * dst
+ *
+ * Inputs : dst = destination, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+ */
+
+static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
+ int len) {
+ register unsigned char *reg = (unsigned char *) (instance->base +
+ T_DATA_REG_OFFSET), *d = dst;
+ register i = len;
+
+
+#if 0
+ for (; i; --i) {
+ while (!(instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY) barrier();
+#else
+ while (!(instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY) barrier();
+ for (; i; --i) {
+#endif
+ *d++ = *reg;
+ }
+
+ if (*(instance->base + T_STATUS_REG_OFFSET) & T_ST_TIM) {
+ unsigned char tmp;
+ volatile unsigned char *foo;
+ foo = instance->base + T_CONTROL_REG_OFFSET;
+ tmp = *foo;
+ *foo = tmp | T_CR_CT;
+ *foo = tmp;
+ printk("scsi%d : watchdog timer fired in NCR5380_pread()\n",
+ instance->host_no);
+ return -1;
+ } else
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_pwrite (struct Scsi_Host *instance,
+ * unsigned char *src, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
+ * src
+ *
+ * Inputs : src = source, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+ */
+
+static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src,
+ int len) {
+ register unsigned char *reg = (unsigned char *) (instance->base +
+ T_DATA_REG_OFFSET), *s = src;
+ register i = len;
+
+#if 0
+ for (; i; --i) {
+ while (!(instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY) barrier();
+#else
+ while (!(instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY) barrier();
+ for (; i; --i) {
+#endif
+ *reg = *s++;
+ }
+
+ if (*(instance->base + T_STATUS_REG_OFFSET) & T_ST_TIM) {
+ unsigned char tmp;
+ volatile unsigned char *foo;
+ foo = instance->base + T_CONTROL_REG_OFFSET;
+ tmp = *foo;
+ *foo = tmp | T_CR_CT;
+ *foo = tmp;
+ printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n",
+ instance->host_no);
+ return -1;
+ } else
+ return 0;
+}
+
+#ifdef MACH
+#include "NCR5380.src"
+#else
+#include "NCR5380.c"
+#endif
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = TRANTOR_T128;
+
+#include "scsi_module.c"
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/t128.h b/i386/i386at/gpl/linux/scsi/t128.h
new file mode 100644
index 00000000..8c7cb579
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/t128.h
@@ -0,0 +1,176 @@
+/*
+ * Trantor T128/T128F/T228 defines
+ * Note : architecturally, the T100 and T128 are different and won't work
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * DISTRIBUTION RELEASE 3.
+ *
+ * For more information, please consult
+ *
+ * Trantor Systems, Ltd.
+ * T128/T128F/T228 SCSI Host Adapter
+ * Hardware Specifications
+ *
+ * Trantor Systems, Ltd.
+ * 5415 Randall Place
+ * Fremont, CA 94538
+ * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
+ *
+ * and
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * $Log: t128.h,v $
+ * Revision 1.1.1.1 1996/10/30 01:40:07 thomas
+ * Imported from UK22
+ *
+ * Revision 1.1 1996/03/25 20:25:52 goel
+ * Linux driver merge.
+ *
+ */
+
+#ifndef T128_H
+#define T128_H
+
+#define T128_PUBLIC_RELEASE 3
+
+#define TDEBUG_INIT 0x1
+#define TDEBUG_TRANSFER 0x2
+
+/*
+ * The trantor boards are memory mapped. They use an NCR5380 or
+ * equivalent (my sample board had part second sourced from ZILOG).
+ * NCR's recommended "Pseudo-DMA" architecture is used, where
+ * a PAL drives the DMA signals on the 5380 allowing fast, blind
+ * transfers with proper handshaking.
+ */
+
+/*
+ * Note : a boot switch is provided for the purpose of informing the
+ * firmware to boot or not boot from attached SCSI devices. So, I imagine
+ * there are fewer people who've yanked the ROM like they do on the Seagate
+ * to make bootup faster, and I'll probably use this for autodetection.
+ */
+#define T_ROM_OFFSET 0
+
+/*
+ * Note : my sample board *WAS NOT* populated with the SRAM, so this
+ * can't be used for autodetection without a ROM present.
+ */
+#define T_RAM_OFFSET 0x1800
+
+/*
+ * All of the registers are allocated 32 bytes of address space, except
+ * for the data register (read/write to/from the 5380 in pseudo-DMA mode)
+ */
+#define T_CONTROL_REG_OFFSET 0x1c00 /* rw */
+#define T_CR_INT 0x10 /* Enable interrupts */
+#define T_CR_CT 0x02 /* Reset watchdog timer */
+
+#define T_STATUS_REG_OFFSET 0x1c20 /* ro */
+#define T_ST_BOOT 0x80 /* Boot switch */
+#define T_ST_S3 0x40 /* User settable switches, */
+#define T_ST_S2 0x20 /* read 0 when switch is on, 1 off */
+#define T_ST_S1 0x10
+#define T_ST_PS2 0x08 /* Set for Microchannel 228 */
+#define T_ST_RDY 0x04 /* 5380 DRQ */
+#define T_ST_TIM 0x02 /* indicates 40us watchdog timer fired */
+#define T_ST_ZERO 0x01 /* Always zero */
+
+#define T_5380_OFFSET 0x1d00 /* 8 registers here, see NCR5380.h */
+
+#define T_DATA_REG_OFFSET 0x1e00 /* rw 512 bytes long */
+
+#ifndef ASM
+int t128_abort(Scsi_Cmnd *);
+int t128_biosparam(Disk *, kdev_t, int*);
+int t128_detect(Scsi_Host_Template *);
+int t128_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int t128_reset(Scsi_Cmnd *);
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 32
+#endif
+
+/*
+ * I hadn't thought of this with the earlier drivers - but to prevent
+ * macro definition conflicts, we shouldn't define all of the internal
+ * macros when this is being used solely for the host stub.
+ */
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#define TRANTOR_T128 {NULL, NULL, NULL, NULL, \
+ "Trantor T128/T128F/T228", t128_detect, NULL, \
+ NULL, \
+ NULL, t128_queue_command, t128_abort, t128_reset, NULL, \
+ t128_biosparam, \
+ /* can queue */ CAN_QUEUE, /* id */ 7, SG_ALL, \
+ /* cmd per lun */ CMD_PER_LUN , 0, 0, DISABLE_CLUSTERING}
+
+#endif
+
+#ifndef(HOSTS_C)
+
+#define NCR5380_implementation_fields \
+ volatile unsigned char *base
+
+#define NCR5380_local_declare() \
+ volatile unsigned char *base
+
+#define NCR5380_setup(instance) \
+ base = (volatile unsigned char *) (instance)->base
+
+#define T128_address(reg) (base + T_5380_OFFSET + ((reg) * 0x20))
+
+#if !(TDEBUG & TDEBUG_TRANSFER)
+#define NCR5380_read(reg) (*(T128_address(reg)))
+#define NCR5380_write(reg, value) (*(T128_address(reg)) = (value))
+#else
+#define NCR5380_read(reg) \
+ (((unsigned char) printk("scsi%d : read register %d at address %08x\n"\
+ , instance->hostno, (reg), T128_address(reg))), *(T128_address(reg)))
+
+#define NCR5380_write(reg, value) { \
+ printk("scsi%d : write %02x to register %d at address %08x\n", \
+ instance->hostno, (value), (reg), T128_address(reg)); \
+ *(T128_address(reg)) = (value); \
+}
+#endif
+
+#define NCR5380_intr t128_intr
+#define NCR5380_queue_command t128_queue_command
+#define NCR5380_abort t128_abort
+#define NCR5380_reset t128_reset
+
+/* 15 14 12 10 7 5 3
+ 1101 0100 1010 1000 */
+
+#define T128_IRQS 0xc4a8
+
+#endif /* else def HOSTS_C */
+#endif /* ndef ASM */
+#endif /* T128_H */
diff --git a/i386/i386at/gpl/linux/scsi/u14-34f.c b/i386/i386at/gpl/linux/scsi/u14-34f.c
new file mode 100644
index 00000000..ce0e378e
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/u14-34f.c
@@ -0,0 +1,1044 @@
+/*
+ * u14-34f.c - Low-level driver for UltraStor 14F/34F SCSI host adapters.
+ *
+ * 13 Jun 1995 rev. 2.01 for linux 1.2.10
+ * HAVE_OLD_UX4F_FIRMWARE should be defined for U34F boards when
+ * the firmware prom is not the lastest one (28008-006).
+ *
+ * 11 Mar 1995 rev. 2.00 for linux 1.2.0
+ * Fixed a bug which prevented media change detection for removable
+ * disk drives.
+ *
+ * 23 Feb 1995 rev. 1.18 for linux 1.1.94
+ * Added a check for scsi_register returning NULL.
+ *
+ * 11 Feb 1995 rev. 1.17 for linux 1.1.91
+ * U14F qualified to run with 32 sglists.
+ * Now DEBUG_RESET is disabled by default.
+ *
+ * 9 Feb 1995 rev. 1.16 for linux 1.1.90
+ * Use host->wish_block instead of host->block.
+ *
+ * 8 Feb 1995 rev. 1.15 for linux 1.1.89
+ * Cleared target_time_out counter while performing a reset.
+ *
+ * 28 Jan 1995 rev. 1.14 for linux 1.1.86
+ * Added module support.
+ * Log and do a retry when a disk drive returns a target status
+ * different from zero on a recovered error.
+ * Auto detects if U14F boards have an old firmware revision.
+ * Max number of scatter/gather lists set to 16 for all boards
+ * (most installation run fine using 33 sglists, while other
+ * has problems when using more then 16).
+ *
+ * 16 Jan 1995 rev. 1.13 for linux 1.1.81
+ * Display a message if check_region detects a port address
+ * already in use.
+ *
+ * 15 Dec 1994 rev. 1.12 for linux 1.1.74
+ * The host->block flag is set for all the detected ISA boards.
+ *
+ * 30 Nov 1994 rev. 1.11 for linux 1.1.68
+ * Redo i/o on target status CHECK_CONDITION for TYPE_DISK only.
+ * Added optional support for using a single board at a time.
+ *
+ * 14 Nov 1994 rev. 1.10 for linux 1.1.63
+ *
+ * 28 Oct 1994 rev. 1.09 for linux 1.1.58 Final BETA release.
+ * 16 Jul 1994 rev. 1.00 for linux 1.1.29 Initial ALPHA release.
+ *
+ * This driver is a total replacement of the original UltraStor
+ * scsi driver, but it supports ONLY the 14F and 34F boards.
+ * It can be configured in the same kernel in which the original
+ * ultrastor driver is configured to allow the original U24F
+ * support.
+ *
+ * Multiple U14F and/or U34F host adapters are supported.
+ *
+ * Copyright (C) 1994, 1995 Dario Ballabio (dario@milano.europe.dg.com)
+ *
+ * WARNING: if your 14/34F board has an old firmware revision (see below)
+ * you must change "#undef" into "#define" in the following
+ * statement.
+ */
+#undef HAVE_OLD_UX4F_FIRMWARE
+/*
+ * The UltraStor 14F, 24F, and 34F are a family of intelligent, high
+ * performance SCSI-2 host adapters.
+ * Here is the scoop on the various models:
+ *
+ * 14F - ISA first-party DMA HA with floppy support and WD1003 emulation.
+ * 24F - EISA Bus Master HA with floppy support and WD1003 emulation.
+ * 34F - VESA Local-Bus Bus Master HA (no WD1003 emulation).
+ *
+ * This code has been tested with up to two U14F boards, using both
+ * firmware 28004-005/38004-004 (BIOS rev. 2.00) and the latest firmware
+ * 28004-006/38004-005 (BIOS rev. 2.01).
+ *
+ * The latest firmware is required in order to get reliable operations when
+ * clustering is enabled. ENABLE_CLUSTERING provides a performance increase
+ * up to 50% on sequential access.
+ *
+ * Since the Scsi_Host_Template structure is shared among all 14F and 34F,
+ * the last setting of use_clustering is in effect for all of these boards.
+ *
+ * Here a sample configuration using two U14F boards:
+ *
+ U14F0: PORT 0x330, BIOS 0xc8000, IRQ 11, DMA 5, SG 32, Mbox 16, CmdLun 2, C1.
+ U14F1: PORT 0x340, BIOS 0x00000, IRQ 10, DMA 6, SG 32, Mbox 16, CmdLun 2, C1.
+ *
+ * The boot controller must have its BIOS enabled, while other boards can
+ * have their BIOS disabled, or enabled to an higher address.
+ * Boards are named Ux4F0, Ux4F1..., according to the port address order in
+ * the io_port[] array.
+ *
+ * The following facts are based on real testing results (not on
+ * documentation) on the above U14F board.
+ *
+ * - The U14F board should be jumpered for bus on time less or equal to 7
+ * microseconds, while the default is 11 microseconds. This is order to
+ * get acceptable performance while using floppy drive and hard disk
+ * together. The jumpering for 7 microseconds is: JP13 pin 15-16,
+ * JP14 pin 7-8 and pin 9-10.
+ * The reduction has a little impact on scsi performance.
+ *
+ * - If scsi bus length exceeds 3m., the scsi bus speed needs to be reduced
+ * from 10Mhz to 5Mhz (do this by inserting a jumper on JP13 pin 7-8).
+ *
+ * - If U14F on board firmware is older than 28004-006/38004-005,
+ * the U14F board is unable to provide reliable operations if the scsi
+ * request length exceeds 16Kbyte. When this length is exceeded the
+ * behavior is:
+ * - adapter_status equal 0x96 or 0xa3 or 0x93 or 0x94;
+ * - adapter_status equal 0 and target_status equal 2 on for all targets
+ * in the next operation following the reset.
+ * This sequence takes a long time (>3 seconds), so in the meantime
+ * the SD_TIMEOUT in sd.c could expire giving rise to scsi aborts
+ * (SD_TIMEOUT has been increased from 3 to 6 seconds in 1.1.31).
+ * Because of this I had to DISABLE_CLUSTERING and to work around the
+ * bus reset in the interrupt service routine, returning DID_BUS_BUSY
+ * so that the operations are retried without complains from the scsi.c
+ * code.
+ * Any reset of the scsi bus is going to kill tape operations, since
+ * no retry is allowed for tapes. Bus resets are more likely when the
+ * scsi bus is under heavy load.
+ * Requests using scatter/gather have a maximum length of 16 x 1024 bytes
+ * when DISABLE_CLUSTERING is in effect, but unscattered requests could be
+ * larger than 16Kbyte.
+ *
+ * The new firmware has fixed all the above problems.
+ *
+ * For U34F boards the latest bios prom is 38008-002 (BIOS rev. 2.01),
+ * the latest firmware prom is 28008-006. Older firmware 28008-005 has
+ * problems when using more then 16 scatter/gather lists.
+ *
+ * In order to support multiple ISA boards in a reliable way,
+ * the driver sets host->wish_block = TRUE for all ISA boards.
+ */
+
+#if defined(MODULE)
+#include <linux/module.h>
+#include <linux/version.h>
+#endif
+
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/proc_fs.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include "u14-34f.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_u14_34f = {
+ PROC_SCSI_U14_34F, 6, "u14_34f",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+/* Values for the PRODUCT_ID ports for the 14/34F */
+#define PRODUCT_ID1 0x56
+#define PRODUCT_ID2 0x40 /* NOTE: Only upper nibble is used */
+
+/* Subversion values */
+#define ISA 0
+#define ESA 1
+
+#define OP_HOST_ADAPTER 0x1
+#define OP_SCSI 0x2
+#define OP_RESET 0x4
+#define DTD_SCSI 0x0
+#define DTD_IN 0x1
+#define DTD_OUT 0x2
+#define DTD_NONE 0x3
+#define HA_CMD_INQUIRY 0x1
+#define HA_CMD_SELF_DIAG 0x2
+#define HA_CMD_READ_BUFF 0x3
+#define HA_CMD_WRITE_BUFF 0x4
+
+#undef DEBUG_DETECT
+#undef DEBUG_INTERRUPT
+#undef DEBUG_STATISTICS
+#undef DEBUG_RESET
+
+#define MAX_TARGET 8
+#define MAX_IRQ 16
+#define MAX_BOARDS 4
+#define MAX_MAILBOXES 16
+#define MAX_SGLIST 32
+#define MAX_SAFE_SGLIST 16
+#define MAX_CMD_PER_LUN 2
+
+#define FALSE 0
+#define TRUE 1
+#define FREE 0
+#define IN_USE 1
+#define LOCKED 2
+#define IN_RESET 3
+#define IGNORE 4
+#define NO_IRQ 0xff
+#define NO_DMA 0xff
+#define MAXLOOP 200000
+
+#define REG_LCL_MASK 0
+#define REG_LCL_INTR 1
+#define REG_SYS_MASK 2
+#define REG_SYS_INTR 3
+#define REG_PRODUCT_ID1 4
+#define REG_PRODUCT_ID2 5
+#define REG_CONFIG1 6
+#define REG_CONFIG2 7
+#define REG_OGM 8
+#define REG_ICM 12
+#define REGION_SIZE 13
+#define BSY_ASSERTED 0x01
+#define IRQ_ASSERTED 0x01
+#define CMD_RESET 0xc0
+#define CMD_OGM_INTR 0x01
+#define CMD_CLR_INTR 0x01
+#define CMD_ENA_INTR 0x81
+#define ASOK 0x00
+#define ASST 0x91
+
+#define PACKED __attribute__((packed))
+
+/* MailBox SCSI Command Packet */
+struct mscp {
+ unsigned char opcode: 3; /* type of command */
+ unsigned char xdir: 2; /* data transfer direction */
+ unsigned char dcn: 1; /* disable disconnect */
+ unsigned char ca: 1; /* use cache (if available) */
+ unsigned char sg: 1; /* scatter/gather operation */
+ unsigned char target: 3; /* target SCSI id */
+ unsigned char ch_no: 2; /* SCSI channel (always 0 for 14f) */
+ unsigned char lun: 3; /* logical unit number */
+ unsigned int data_address PACKED; /* transfer data pointer */
+ unsigned int data_len PACKED; /* length in bytes */
+ unsigned int command_link PACKED; /* for linking command chains */
+ unsigned char scsi_command_link_id; /* identifies command in chain */
+ unsigned char use_sg; /* (if sg is set) 8 bytes per list */
+ unsigned char sense_len;
+ unsigned char scsi_cdbs_len; /* 6, 10, or 12 */
+ unsigned char scsi_cdbs[12]; /* SCSI commands */
+ unsigned char adapter_status; /* non-zero indicates HA error */
+ unsigned char target_status; /* non-zero indicates target error */
+ unsigned int sense_addr PACKED;
+
+ Scsi_Cmnd *SCpnt;
+
+ struct sg_list {
+ unsigned int address; /* Segment Address */
+ unsigned int num_bytes; /* Segment Length */
+ } sglist[MAX_SGLIST];
+
+ unsigned int index; /* cp index */
+ };
+
+struct hostdata {
+ struct mscp cp[MAX_MAILBOXES]; /* Mailboxes for this board */
+ unsigned int cp_stat[MAX_MAILBOXES]; /* FREE, IN_USE, LOCKED, IN_RESET */
+ unsigned int last_cp_used; /* Index of last mailbox used */
+ unsigned int iocount; /* Total i/o done for this board */
+ unsigned int multicount; /* Total ... in second ihdlr loop */
+ int board_number; /* Number of this board */
+ char board_name[16]; /* Name of this board */
+ char board_id[256]; /* data from INQUIRY on this board */
+ int in_reset; /* True if board is doing a reset */
+ int target_time_out[MAX_TARGET]; /* N. of timeout errors on target */
+ int target_reset[MAX_TARGET]; /* If TRUE redo operation on target */
+ unsigned char subversion; /* Bus type, either ISA or ESA */
+ unsigned char heads;
+ unsigned char sectors;
+
+ /* slot != 0 for the U24F, slot == 0 for both the U14F and U34F */
+ unsigned char slot;
+ };
+
+static struct Scsi_Host * sh[MAX_BOARDS + 1];
+static const char* driver_name = "Ux4F";
+static unsigned int irqlist[MAX_IRQ], calls[MAX_IRQ];
+
+#define HD(board) ((struct hostdata *) &sh[board]->hostdata)
+#define BN(board) (HD(board)->board_name)
+
+static void u14_34f_interrupt_handler(int, struct pt_regs *);
+static int do_trace = FALSE;
+
+static inline unchar wait_on_busy(ushort iobase) {
+ unsigned int loop = MAXLOOP;
+
+ while (inb(iobase + REG_LCL_INTR) & BSY_ASSERTED)
+ if (--loop == 0) return TRUE;
+
+ return FALSE;
+}
+
+static int board_inquiry(unsigned int j) {
+ struct mscp *cpp;
+ unsigned int time, limit = 0;
+
+ cpp = &HD(j)->cp[0];
+ memset(cpp, 0, sizeof(struct mscp));
+ cpp->opcode = OP_HOST_ADAPTER;
+ cpp->xdir = DTD_IN;
+ cpp->data_address = (unsigned int) HD(j)->board_id;
+ cpp->data_len = sizeof(HD(j)->board_id);
+ cpp->scsi_cdbs_len = 6;
+ cpp->scsi_cdbs[0] = HA_CMD_INQUIRY;
+
+ if (wait_on_busy(sh[j]->io_port)) {
+ printk("%s: board_inquiry, adapter busy.\n", BN(j));
+ return TRUE;
+ }
+
+ HD(j)->cp_stat[0] = IGNORE;
+
+ /* Clear the interrupt indication */
+ outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
+
+ /* Store pointer in OGM address bytes */
+ outl((unsigned int)cpp, sh[j]->io_port + REG_OGM);
+
+ /* Issue OGM interrupt */
+ outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
+
+ sti();
+ time = jiffies;
+ while (jiffies < (time + 100) && limit++ < 100000000);
+ cli();
+
+ if (cpp->adapter_status || HD(j)->cp_stat[0] != FREE) {
+ HD(j)->cp_stat[0] = FREE;
+ printk("%s: board_inquiry, err 0x%x.\n", BN(j), cpp->adapter_status);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+static inline int port_detect(ushort *port_base, unsigned int j,
+ Scsi_Host_Template * tpnt) {
+ unsigned char irq, dma_channel, subversion;
+ unsigned char in_byte;
+
+ /* Allowed BIOS base addresses (NULL indicates reserved) */
+ void *bios_segment_table[8] = {
+ NULL,
+ (void *) 0xc4000, (void *) 0xc8000, (void *) 0xcc000, (void *) 0xd0000,
+ (void *) 0xd4000, (void *) 0xd8000, (void *) 0xdc000
+ };
+
+ /* Allowed IRQs */
+ unsigned char interrupt_table[4] = { 15, 14, 11, 10 };
+
+ /* Allowed DMA channels for ISA (0 indicates reserved) */
+ unsigned char dma_channel_table[4] = { 5, 6, 7, 0 };
+
+ /* Head/sector mappings */
+ struct {
+ unsigned char heads;
+ unsigned char sectors;
+ } mapping_table[4] = {
+ { 16, 63 }, { 64, 32 }, { 64, 63 }, { 64, 32 }
+ };
+
+ struct config_1 {
+ unsigned char bios_segment: 3;
+ unsigned char removable_disks_as_fixed: 1;
+ unsigned char interrupt: 2;
+ unsigned char dma_channel: 2;
+ } config_1;
+
+ struct config_2 {
+ unsigned char ha_scsi_id: 3;
+ unsigned char mapping_mode: 2;
+ unsigned char bios_drive_number: 1;
+ unsigned char tfr_port: 2;
+ } config_2;
+
+ char name[16];
+
+ sprintf(name, "%s%d", driver_name, j);
+
+ if(check_region(*port_base, REGION_SIZE)) {
+ printk("%s: address 0x%03x in use, skipping probe.\n",
+ name, *port_base);
+ return FALSE;
+ }
+
+ if (inb(*port_base + REG_PRODUCT_ID1) != PRODUCT_ID1) return FALSE;
+
+ in_byte = inb(*port_base + REG_PRODUCT_ID2);
+
+ if ((in_byte & 0xf0) != PRODUCT_ID2) return FALSE;
+
+ *(char *)&config_1 = inb(*port_base + REG_CONFIG1);
+ *(char *)&config_2 = inb(*port_base + REG_CONFIG2);
+
+ irq = interrupt_table[config_1.interrupt];
+ dma_channel = dma_channel_table[config_1.dma_channel];
+ subversion = (in_byte & 0x0f);
+
+ /* Board detected, allocate its IRQ if not already done */
+ if ((irq >= MAX_IRQ) || ((irqlist[irq] == NO_IRQ) && request_irq
+ (irq, u14_34f_interrupt_handler, SA_INTERRUPT, driver_name))) {
+ printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq);
+ return FALSE;
+ }
+
+ if (subversion == ISA && request_dma(dma_channel, driver_name)) {
+ printk("%s: unable to allocate DMA channel %u, detaching.\n",
+ name, dma_channel);
+ free_irq(irq);
+ return FALSE;
+ }
+
+ sh[j] = scsi_register(tpnt, sizeof(struct hostdata));
+
+ if (sh[j] == NULL) {
+ printk("%s: unable to register host, detaching.\n", name);
+
+ if (irqlist[irq] == NO_IRQ) free_irq(irq);
+
+ if (subversion == ISA) free_dma(dma_channel);
+
+ return FALSE;
+ }
+
+ sh[j]->io_port = *port_base;
+ sh[j]->n_io_port = REGION_SIZE;
+ sh[j]->base = bios_segment_table[config_1.bios_segment];
+ sh[j]->irq = irq;
+ sh[j]->sg_tablesize = MAX_SGLIST;
+ sh[j]->this_id = config_2.ha_scsi_id;
+ sh[j]->can_queue = MAX_MAILBOXES;
+ sh[j]->cmd_per_lun = MAX_CMD_PER_LUN;
+
+#if defined(DEBUG_DETECT)
+ {
+ unsigned char sys_mask, lcl_mask;
+
+ sys_mask = inb(sh[j]->io_port + REG_SYS_MASK);
+ lcl_mask = inb(sh[j]->io_port + REG_LCL_MASK);
+ printk("SYS_MASK 0x%x, LCL_MASK 0x%x.\n", sys_mask, lcl_mask);
+ }
+#endif
+
+ /* If BIOS is disabled, force enable interrupts */
+ if (sh[j]->base == 0) outb(CMD_ENA_INTR, sh[j]->io_port + REG_SYS_MASK);
+
+ /* Register the I/O space that we use */
+ request_region(sh[j]->io_port, REGION_SIZE, driver_name);
+
+ memset(HD(j), 0, sizeof(struct hostdata));
+ HD(j)->heads = mapping_table[config_2.mapping_mode].heads;
+ HD(j)->sectors = mapping_table[config_2.mapping_mode].sectors;
+ HD(j)->subversion = subversion;
+ HD(j)->board_number = j;
+ irqlist[irq] = j;
+
+ if (HD(j)->subversion == ESA) {
+
+#if defined (HAVE_OLD_UX4F_FIRMWARE)
+ sh[j]->sg_tablesize = MAX_SAFE_SGLIST;
+#endif
+
+ sh[j]->dma_channel = NO_DMA;
+ sh[j]->unchecked_isa_dma = FALSE;
+ sprintf(BN(j), "U34F%d", j);
+ }
+ else {
+ sh[j]->wish_block = TRUE;
+
+#if defined (HAVE_OLD_UX4F_FIRMWARE)
+ sh[j]->hostt->use_clustering = DISABLE_CLUSTERING;
+ sh[j]->sg_tablesize = MAX_SAFE_SGLIST;
+#endif
+
+ sh[j]->dma_channel = dma_channel;
+ sh[j]->unchecked_isa_dma = TRUE;
+ sprintf(BN(j), "U14F%d", j);
+ disable_dma(dma_channel);
+ clear_dma_ff(dma_channel);
+ set_dma_mode(dma_channel, DMA_MODE_CASCADE);
+ enable_dma(dma_channel);
+ }
+
+ if (HD(j)->subversion == ISA && !board_inquiry(j)) {
+ HD(j)->board_id[40] = 0;
+
+ if (strcmp(&HD(j)->board_id[32], "06000600")) {
+ printk("%s: %s.\n", BN(j), &HD(j)->board_id[8]);
+ printk("%s: firmware %s is outdated, FW PROM should be 28004-006.\n",
+ BN(j), &HD(j)->board_id[32]);
+ sh[j]->hostt->use_clustering = DISABLE_CLUSTERING;
+ sh[j]->sg_tablesize = MAX_SAFE_SGLIST;
+ }
+ }
+
+ printk("%s: PORT 0x%03x, BIOS 0x%05x, IRQ %u, DMA %u, SG %d, "\
+ "Mbox %d, CmdLun %d, C%d.\n", BN(j), sh[j]->io_port,
+ (int)sh[j]->base, sh[j]->irq,
+ sh[j]->dma_channel, sh[j]->sg_tablesize,
+ sh[j]->can_queue, sh[j]->cmd_per_lun,
+ sh[j]->hostt->use_clustering);
+ return TRUE;
+}
+
+int u14_34f_detect (Scsi_Host_Template * tpnt) {
+ unsigned int j = 0, k, flags;
+
+ ushort io_port[] = {
+ 0x330, 0x340, 0x230, 0x240, 0x210, 0x130, 0x140, 0x0
+ };
+
+ ushort *port_base = io_port;
+
+ tpnt->proc_dir = &proc_scsi_u14_34f;
+
+ save_flags(flags);
+ cli();
+
+ for (k = 0; k < MAX_IRQ; k++) {
+ irqlist[k] = NO_IRQ;
+ calls[k] = 0;
+ }
+
+ for (k = 0; k < MAX_BOARDS + 1; k++) sh[k] = NULL;
+
+ while (*port_base) {
+
+ if (j < MAX_BOARDS && port_detect(port_base, j, tpnt)) j++;
+
+ port_base++;
+ }
+
+ if (j > 0)
+ printk("UltraStor 14F/34F: Copyright (C) 1994, 1995 Dario Ballabio.\n");
+
+ restore_flags(flags);
+ return j;
+}
+
+static inline void build_sg_list(struct mscp *cpp, Scsi_Cmnd *SCpnt) {
+ unsigned int k, data_len = 0;
+ struct scatterlist * sgpnt;
+
+ sgpnt = (struct scatterlist *) SCpnt->request_buffer;
+
+ for (k = 0; k < SCpnt->use_sg; k++) {
+ cpp->sglist[k].address = (unsigned int) sgpnt[k].address;
+ cpp->sglist[k].num_bytes = sgpnt[k].length;
+ data_len += sgpnt[k].length;
+ }
+
+ cpp->use_sg = SCpnt->use_sg;
+ cpp->data_address = (unsigned int) cpp->sglist;
+ cpp->data_len = data_len;
+}
+
+int u14_34f_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) {
+ unsigned int i, j, k, flags;
+ struct mscp *cpp;
+
+ save_flags(flags);
+ cli();
+ /* j is the board number */
+ j = ((struct hostdata *) SCpnt->host->hostdata)->board_number;
+
+ if (!done) panic("%s: qcomm, pid %ld, null done.\n", BN(j), SCpnt->pid);
+
+ /* i is the mailbox number, look for the first free mailbox
+ starting from last_cp_used */
+ i = HD(j)->last_cp_used + 1;
+
+ for (k = 0; k < sh[j]->can_queue; k++, i++) {
+
+ if (i >= sh[j]->can_queue) i = 0;
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ HD(j)->last_cp_used = i;
+ break;
+ }
+ }
+
+ if (k == sh[j]->can_queue) {
+ printk("%s: qcomm, no free mailbox, resetting.\n", BN(j));
+
+ if (HD(j)->in_reset)
+ printk("%s: qcomm, already in reset.\n", BN(j));
+ else if (u14_34f_reset(SCpnt) == SCSI_RESET_SUCCESS)
+ panic("%s: qcomm, SCSI_RESET_SUCCESS.\n", BN(j));
+
+ SCpnt->result = DID_BUS_BUSY << 16;
+ SCpnt->host_scribble = NULL;
+ printk("%s: qcomm, pid %ld, DID_BUS_BUSY, done.\n", BN(j), SCpnt->pid);
+ restore_flags(flags);
+ done(SCpnt);
+ return 0;
+ }
+
+ /* Set pointer to control packet structure */
+ cpp = &HD(j)->cp[i];
+
+ memset(cpp, 0, sizeof(struct mscp));
+ SCpnt->scsi_done = done;
+ cpp->index = i;
+ SCpnt->host_scribble = (unsigned char *) &cpp->index;
+
+ if (do_trace) printk("%s: qcomm, mbox %d, target %d, pid %ld.\n",
+ BN(j), i, SCpnt->target, SCpnt->pid);
+
+ cpp->opcode = OP_SCSI;
+ cpp->xdir = DTD_SCSI;
+ cpp->target = SCpnt->target;
+ cpp->lun = SCpnt->lun;
+ cpp->SCpnt = SCpnt;
+ cpp->sense_addr = (unsigned int) SCpnt->sense_buffer;
+ cpp->sense_len = sizeof SCpnt->sense_buffer;
+
+ if (SCpnt->use_sg) {
+ cpp->sg = TRUE;
+ build_sg_list(cpp, SCpnt);
+ }
+ else {
+ cpp->data_address = (unsigned int)SCpnt->request_buffer;
+ cpp->data_len = SCpnt->request_bufflen;
+ }
+
+ cpp->scsi_cdbs_len = SCpnt->cmd_len;
+ memcpy(cpp->scsi_cdbs, SCpnt->cmnd, cpp->scsi_cdbs_len);
+
+ if (wait_on_busy(sh[j]->io_port)) {
+ SCpnt->result = DID_ERROR << 16;
+ SCpnt->host_scribble = NULL;
+ printk("%s: qcomm, target %d, pid %ld, adapter busy, DID_ERROR, done.\n",
+ BN(j), SCpnt->target, SCpnt->pid);
+ restore_flags(flags);
+ done(SCpnt);
+ return 0;
+ }
+
+ /* Store pointer in OGM address bytes */
+ outl((unsigned int)cpp, sh[j]->io_port + REG_OGM);
+
+ /* Issue OGM interrupt */
+ outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
+
+ HD(j)->cp_stat[i] = IN_USE;
+ restore_flags(flags);
+ return 0;
+}
+
+int u14_34f_abort(Scsi_Cmnd *SCarg) {
+ unsigned int i, j, flags;
+
+ save_flags(flags);
+ cli();
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+
+ if (SCarg->host_scribble == NULL) {
+ printk("%s: abort, target %d, pid %ld inactive.\n",
+ BN(j), SCarg->target, SCarg->pid);
+ restore_flags(flags);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ i = *(unsigned int *)SCarg->host_scribble;
+ printk("%s: abort, mbox %d, target %d, pid %ld.\n",
+ BN(j), i, SCarg->target, SCarg->pid);
+
+ if (i >= sh[j]->can_queue)
+ panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j));
+
+ if (wait_on_busy(sh[j]->io_port)) {
+ printk("%s: abort, timeout error.\n", BN(j));
+ restore_flags(flags);
+ return SCSI_ABORT_ERROR;
+ }
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: abort, mbox %d is free.\n", BN(j), i);
+ restore_flags(flags);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_USE) {
+ printk("%s: abort, mbox %d is in use.\n", BN(j), i);
+
+ if (SCarg != HD(j)->cp[i].SCpnt)
+ panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n",
+ BN(j), i, SCarg, HD(j)->cp[i].SCpnt);
+
+ restore_flags(flags);
+ return SCSI_ABORT_SNOOZE;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ printk("%s: abort, mbox %d is in reset.\n", BN(j), i);
+ restore_flags(flags);
+ return SCSI_ABORT_ERROR;
+ }
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ printk("%s: abort, mbox %d is locked.\n", BN(j), i);
+ restore_flags(flags);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+ restore_flags(flags);
+ panic("%s: abort, mbox %d, invalid cp_stat.\n", BN(j), i);
+}
+
+int u14_34f_reset(Scsi_Cmnd * SCarg) {
+ unsigned int i, j, flags, time, k, limit = 0;
+ int arg_done = FALSE;
+ Scsi_Cmnd *SCpnt;
+
+ save_flags(flags);
+ cli();
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+ printk("%s: reset, enter, target %d, pid %ld.\n",
+ BN(j), SCarg->target, SCarg->pid);
+
+ if (SCarg->host_scribble == NULL)
+ printk("%s: reset, pid %ld inactive.\n", BN(j), SCarg->pid);
+
+ if (HD(j)->in_reset) {
+ printk("%s: reset, exit, already in reset.\n", BN(j));
+ restore_flags(flags);
+ return SCSI_RESET_ERROR;
+ }
+
+ if (wait_on_busy(sh[j]->io_port)) {
+ printk("%s: reset, exit, timeout error.\n", BN(j));
+ restore_flags(flags);
+ return SCSI_RESET_ERROR;
+ }
+
+ for (k = 0; k < MAX_TARGET; k++) HD(j)->target_reset[k] = TRUE;
+
+ for (k = 0; k < MAX_TARGET; k++) HD(j)->target_time_out[k] = 0;
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == FREE) continue;
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: reset, locked mbox %d forced free.\n", BN(j), i);
+ continue;
+ }
+
+ SCpnt = HD(j)->cp[i].SCpnt;
+ HD(j)->cp_stat[i] = IN_RESET;
+ printk("%s: reset, mbox %d in reset, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+
+ if (SCpnt == NULL)
+ panic("%s: reset, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: reset, mbox %d, garbled SCpnt.\n", BN(j), i);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: reset, mbox %d, index mismatch.\n", BN(j), i);
+
+ if (SCpnt->scsi_done == NULL)
+ panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n", BN(j), i);
+
+ if (SCpnt == SCarg) arg_done = TRUE;
+ }
+
+ if (wait_on_busy(sh[j]->io_port)) {
+ printk("%s: reset, cannot reset, timeout error.\n", BN(j));
+ restore_flags(flags);
+ return SCSI_RESET_ERROR;
+ }
+
+ outb(CMD_RESET, sh[j]->io_port + REG_LCL_INTR);
+ printk("%s: reset, board reset done, enabling interrupts.\n", BN(j));
+
+#if defined (DEBUG_RESET)
+ do_trace = TRUE;
+#endif
+
+ HD(j)->in_reset = TRUE;
+ sti();
+ time = jiffies;
+ while (jiffies < (time + 100) && limit++ < 100000000);
+ cli();
+ printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ /* Skip mailboxes already set free by interrupt */
+ if (HD(j)->cp_stat[i] != IN_RESET) continue;
+
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox is still waiting for its interrupt */
+ HD(j)->cp_stat[i] = LOCKED;
+
+ printk("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ restore_flags(flags);
+ SCpnt->scsi_done(SCpnt);
+ cli();
+ }
+
+ HD(j)->in_reset = FALSE;
+ do_trace = FALSE;
+ restore_flags(flags);
+
+ if (arg_done) {
+ printk("%s: reset, exit, success.\n", BN(j));
+ return SCSI_RESET_SUCCESS;
+ }
+ else {
+ printk("%s: reset, exit, wakeup.\n", BN(j));
+ return SCSI_RESET_PUNT;
+ }
+}
+
+int u14_34f_biosparam(Disk * disk, kdev_t dev, int * dkinfo) {
+ unsigned int j = 0;
+ int size = disk->capacity;
+
+ dkinfo[0] = HD(j)->heads;
+ dkinfo[1] = HD(j)->sectors;
+ dkinfo[2] = size / (HD(j)->heads * HD(j)->sectors);
+ return 0;
+}
+
+static void u14_34f_interrupt_handler(int irq, struct pt_regs * regs) {
+ Scsi_Cmnd *SCpnt;
+ unsigned int i, j, k, flags, status, tstatus, loops, total_loops = 0;
+ struct mscp *spp;
+
+ save_flags(flags);
+ cli();
+
+ if (irqlist[irq] == NO_IRQ) {
+ printk("%s, ihdlr, irq %d, unexpected interrupt.\n", driver_name, irq);
+ restore_flags(flags);
+ return;
+ }
+
+ if (do_trace) printk("%s: ihdlr, enter, irq %d, calls %d.\n",
+ driver_name, irq, calls[irq]);
+
+ /* Service all the boards configured on this irq */
+ for (j = 0; sh[j] != NULL; j++) {
+
+ if (sh[j]->irq != irq) continue;
+
+ loops = 0;
+
+ /* Loop until all interrupts for a board are serviced */
+ while (inb(sh[j]->io_port + REG_SYS_INTR) & IRQ_ASSERTED) {
+ total_loops++;
+ loops++;
+
+ if (do_trace) printk("%s: ihdlr, start service, count %d.\n",
+ BN(j), HD(j)->iocount);
+
+ spp = (struct mscp *)inl(sh[j]->io_port + REG_ICM);
+
+ /* Clear interrupt pending flag */
+ outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
+
+ i = spp - HD(j)->cp;
+
+ if (i >= sh[j]->can_queue)
+ panic("%s: ihdlr, invalid mscp address.\n", BN(j));
+
+ if (HD(j)->cp_stat[i] == IGNORE) {
+ HD(j)->cp_stat[i] = FREE;
+ continue;
+ }
+ else if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: ihdlr, mbox %d unlocked, count %d.\n",
+ BN(j), i, HD(j)->iocount);
+ continue;
+ }
+ else if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: ihdlr, mbox %d is free, count %d.\n",
+ BN(j), i, HD(j)->iocount);
+ continue;
+ }
+ else if (HD(j)->cp_stat[i] == IN_RESET)
+ printk("%s: ihdlr, mbox %d is in reset.\n", BN(j), i);
+ else if (HD(j)->cp_stat[i] != IN_USE)
+ panic("%s: ihdlr, mbox %d, invalid cp_stat.\n", BN(j), i);
+
+ HD(j)->cp_stat[i] = FREE;
+ SCpnt = spp->SCpnt;
+
+ if (SCpnt == NULL)
+ panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: ihdlr, mbox %d, pid %ld, SCpnt %p garbled.\n",
+ BN(j), i, SCpnt->pid, SCpnt);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: ihdlr, mbox %d, pid %ld, index mismatch %d,"\
+ " irq %d.\n", BN(j), i, SCpnt->pid,
+ *(unsigned int *)SCpnt->host_scribble, irq);
+
+ tstatus = status_byte(spp->target_status);
+
+ switch (spp->adapter_status) {
+ case ASOK: /* status OK */
+
+ /* Forces a reset if a disk drive keeps returning BUSY */
+ if (tstatus == BUSY && SCpnt->device->type != TYPE_TAPE)
+ status = DID_ERROR << 16;
+
+ /* If there was a bus reset, redo operation on each target */
+ else if (tstatus != GOOD
+ && SCpnt->device->type == TYPE_DISK
+ && HD(j)->target_reset[SCpnt->target])
+ status = DID_BUS_BUSY << 16;
+
+ /* Works around a flaw in scsi.c */
+ else if (tstatus == CHECK_CONDITION
+ && SCpnt->device->type == TYPE_DISK
+ && (SCpnt->sense_buffer[2] & 0xf) == RECOVERED_ERROR)
+ status = DID_BUS_BUSY << 16;
+
+ else
+ status = DID_OK << 16;
+
+ if (tstatus == GOOD)
+ HD(j)->target_reset[SCpnt->target] = FALSE;
+
+ if (spp->target_status && SCpnt->device->type == TYPE_DISK)
+ printk("%s: ihdlr, target %d:%d, pid %ld, target_status "\
+ "0x%x, sense key 0x%x.\n", BN(j),
+ SCpnt->target, SCpnt->lun, SCpnt->pid,
+ spp->target_status, SCpnt->sense_buffer[2]);
+
+ HD(j)->target_time_out[SCpnt->target] = 0;
+
+ break;
+ case ASST: /* Selection Time Out */
+
+ if (HD(j)->target_time_out[SCpnt->target] > 1)
+ status = DID_ERROR << 16;
+ else {
+ status = DID_TIME_OUT << 16;
+ HD(j)->target_time_out[SCpnt->target]++;
+ }
+
+ break;
+ case 0x92: /* Data over/under-run */
+ case 0x93: /* Unexpected bus free */
+ case 0x94: /* Target bus phase sequence failure */
+ case 0x96: /* Illegal SCSI command */
+ case 0xa3: /* SCSI bus reset error */
+
+ if (SCpnt->device->type != TYPE_TAPE)
+ status = DID_BUS_BUSY << 16;
+ else
+ status = DID_ERROR << 16;
+
+ for (k = 0; k < MAX_TARGET; k++)
+ HD(j)->target_reset[k] = TRUE;
+
+ break;
+ case 0x01: /* Invalid command */
+ case 0x02: /* Invalid parameters */
+ case 0x03: /* Invalid data list */
+ case 0x84: /* SCSI bus abort error */
+ case 0x9b: /* Auto request sense error */
+ case 0x9f: /* Unexpected command complete message error */
+ case 0xff: /* Invalid parameter in the S/G list */
+ default:
+ status = DID_ERROR << 16;
+ break;
+ }
+
+ SCpnt->result = status | spp->target_status;
+ HD(j)->iocount++;
+
+ if (loops > 1) HD(j)->multicount++;
+
+#if defined (DEBUG_INTERRUPT)
+ if (SCpnt->result || do_trace)
+#else
+ if ((spp->adapter_status != ASOK && HD(j)->iocount > 1000) ||
+ (spp->adapter_status != ASOK &&
+ spp->adapter_status != ASST && HD(j)->iocount <= 1000) ||
+ do_trace)
+#endif
+ printk("%s: ihdlr, mbox %d, err 0x%x:%x,"\
+ " target %d:%d, pid %ld, count %d.\n",
+ BN(j), i, spp->adapter_status, spp->target_status,
+ SCpnt->target, SCpnt->lun, SCpnt->pid, HD(j)->iocount);
+
+ /* Set the command state to inactive */
+ SCpnt->host_scribble = NULL;
+
+ restore_flags(flags);
+ SCpnt->scsi_done(SCpnt);
+ cli();
+
+ } /* Multiple command loop */
+
+ } /* Boards loop */
+
+ calls[irq]++;
+
+ if (total_loops == 0)
+ printk("%s: ihdlr, irq %d, no command completed, calls %d.\n",
+ driver_name, irq, calls[irq]);
+
+ if (do_trace) printk("%s: ihdlr, exit, irq %d, calls %d.\n",
+ driver_name, irq, calls[irq]);
+
+#if defined (DEBUG_STATISTICS)
+ if ((calls[irq] % 100000) == 10000)
+ for (j = 0; sh[j] != NULL; j++)
+ printk("%s: ihdlr, calls %d, count %d, multi %d.\n", BN(j),
+ calls[(sh[j]->irq)], HD(j)->iocount, HD(j)->multicount);
+#endif
+
+ restore_flags(flags);
+ return;
+}
+
+#if defined(MODULE)
+Scsi_Host_Template driver_template = ULTRASTOR_14_34F;
+
+#include "scsi_module.c"
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/u14-34f.h b/i386/i386at/gpl/linux/scsi/u14-34f.h
new file mode 100644
index 00000000..2988824e
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/u14-34f.h
@@ -0,0 +1,38 @@
+/*
+ * u14-34f.h - used by the low-level driver for UltraStor 14F/34F
+ */
+#ifndef _U14_34F_H
+#define _U14_34F_H
+
+int u14_34f_detect(Scsi_Host_Template *);
+int u14_34f_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int u14_34f_abort(Scsi_Cmnd *);
+int u14_34f_reset(Scsi_Cmnd *);
+int u14_34f_biosparam(Disk *, kdev_t, int *);
+
+#define U14_34F_VERSION "2.01.00"
+
+#define ULTRASTOR_14_34F { \
+ NULL, /* Ptr for modules */ \
+ NULL, /* usage count for modules */ \
+ NULL, \
+ NULL, \
+ "UltraStor 14F/34F rev. " U14_34F_VERSION " ", \
+ u14_34f_detect, \
+ NULL, /* Release */ \
+ NULL, \
+ NULL, \
+ u14_34f_queuecommand, \
+ u14_34f_abort, \
+ u14_34f_reset, \
+ NULL, \
+ u14_34f_biosparam, \
+ 0, /* can_queue, reset by detect */ \
+ 7, /* this_id, reset by detect */ \
+ 0, /* sg_tablesize, reset by detect */ \
+ 0, /* cmd_per_lun, reset by detect */ \
+ 0, /* number of boards present */ \
+ 1, /* unchecked isa dma, reset by detect */ \
+ ENABLE_CLUSTERING \
+ }
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/ultrastor.c b/i386/i386at/gpl/linux/scsi/ultrastor.c
new file mode 100644
index 00000000..23e94a91
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/ultrastor.c
@@ -0,0 +1,1160 @@
+/*
+ * ultrastor.c Copyright (C) 1992 David B. Gentzel
+ * Low-level SCSI driver for UltraStor 14F, 24F, and 34F
+ * by David B. Gentzel, Whitfield Software Services, Carnegie, PA
+ * (gentzel@nova.enet.dec.com)
+ * scatter/gather added by Scott Taylor (n217cg@tamuts.tamu.edu)
+ * 24F and multiple command support by John F. Carr (jfc@athena.mit.edu)
+ * John's work modified by Caleb Epstein (cae@jpmorgan.com) and
+ * Eric Youngdale (ericy@cais.com).
+ * Thanks to UltraStor for providing the necessary documentation
+ */
+
+/*
+ * TODO:
+ * 1. Find out why scatter/gather is limited to 16 requests per command.
+ * This is fixed, at least on the 24F, as of version 1.12 - CAE.
+ * 2. Look at command linking (mscp.command_link and
+ * mscp.command_link_id). (Does not work with many disks,
+ * and no performance increase. ERY).
+ * 3. Allow multiple adapters.
+ */
+
+/*
+ * NOTES:
+ * The UltraStor 14F, 24F, and 34F are a family of intelligent, high
+ * performance SCSI-2 host adapters. They all support command queueing
+ * and scatter/gather I/O. Some of them can also emulate the standard
+ * WD1003 interface for use with OS's which don't support SCSI. Here
+ * is the scoop on the various models:
+ * 14F - ISA first-party DMA HA with floppy support and WD1003 emulation.
+ * 14N - ISA HA with floppy support. I think that this is a non-DMA
+ * HA. Nothing further known.
+ * 24F - EISA Bus Master HA with floppy support and WD1003 emulation.
+ * 34F - VL-Bus Bus Master HA with floppy support (no WD1003 emulation).
+ *
+ * The 14F, 24F, and 34F are supported by this driver.
+ *
+ * Places flagged with a triple question-mark are things which are either
+ * unfinished, questionable, or wrong.
+ */
+
+/* Changes from version 1.11 alpha to 1.12
+ *
+ * Increased the size of the scatter-gather list to 33 entries for
+ * the 24F adapter (it was 16). I don't have the specs for the 14F
+ * or the 34F, so they may support larger s-g lists as well.
+ *
+ * Caleb Epstein <cae@jpmorgan.com>
+ */
+
+/* Changes from version 1.9 to 1.11
+ *
+ * Patches to bring this driver up to speed with the default kernel
+ * driver which supports only the 14F and 34F adapters. This version
+ * should compile cleanly into 0.99.13, 0.99.12 and probably 0.99.11.
+ *
+ * Fixes from Eric Youngdale to fix a few possible race conditions and
+ * several problems with bit testing operations (insufficient
+ * parentheses).
+ *
+ * Removed the ultrastor_abort() and ultrastor_reset() functions
+ * (enclosed them in #if 0 / #endif). These functions, at least on
+ * the 24F, cause the SCSI bus to do odd things and generally lead to
+ * kernel panics and machine hangs. This is like the Adaptec code.
+ *
+ * Use check/snarf_region for 14f, 34f to avoid I/O space address conflicts.
+ */
+
+/* Changes from version 1.8 to version 1.9
+ *
+ * 0.99.11 patches (cae@jpmorgan.com) */
+
+/* Changes from version 1.7 to version 1.8
+ *
+ * Better error reporting.
+ */
+
+/* Changes from version 1.6 to version 1.7
+ *
+ * Removed CSIR command code.
+ *
+ * Better race condition avoidance (xchgb function added).
+ *
+ * Set ICM and OGM status to zero at probe (24F)
+ *
+ * reset sends soft reset to UltraStor adapter
+ *
+ * reset adapter if adapter interrupts with an invalid MSCP address
+ *
+ * handle aborted command interrupt (24F)
+ *
+ */
+
+/* Changes from version 1.5 to version 1.6:
+ *
+ * Read MSCP address from ICM _before_ clearing the interrupt flag.
+ * This fixes a race condition.
+ */
+
+/* Changes from version 1.4 to version 1.5:
+ *
+ * Abort now calls done when multiple commands are enabled.
+ *
+ * Clear busy when aborted command finishes, not when abort is called.
+ *
+ * More debugging messages for aborts.
+ */
+
+/* Changes from version 1.3 to version 1.4:
+ *
+ * Enable automatic request of sense data on error (requires newer version
+ * of scsi.c to be useful).
+ *
+ * Fix PORT_OVERRIDE for 14F.
+ *
+ * Fix abort and reset to work properly (config.aborted wasn't cleared
+ * after it was tested, so after a command abort no further commands would
+ * work).
+ *
+ * Boot time test to enable SCSI bus reset (defaults to not allowing reset).
+ *
+ * Fix test for OGM busy -- the busy bit is in different places on the 24F.
+ *
+ * Release ICM slot by clearing first byte on 24F.
+ */
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <asm/io.h>
+#include <asm/bitops.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+
+#define ULTRASTOR_PRIVATE /* Get the private stuff from ultrastor.h */
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "ultrastor.h"
+#include "sd.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_ultrastor = {
+ PROC_SCSI_ULTRASTOR, 9, "ultrastor",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+#define FALSE 0
+#define TRUE 1
+
+#ifndef ULTRASTOR_DEBUG
+#define ULTRASTOR_DEBUG (UD_ABORT|UD_CSIR|UD_RESET)
+#endif
+
+#define VERSION "1.12"
+
+#define ARRAY_SIZE(arr) (sizeof (arr) / sizeof (arr)[0])
+
+#define PACKED __attribute__((packed))
+#define ALIGNED(x) __attribute__((aligned(x)))
+
+
+/* The 14F uses an array of 4-byte ints for its scatter/gather list.
+ The data can be unaligned, but need not be. It's easier to give
+ the list normal alignment since it doesn't need to fit into a
+ packed structure. */
+
+typedef struct {
+ unsigned int address;
+ unsigned int num_bytes;
+} ultrastor_sg_list;
+
+
+/* MailBox SCSI Command Packet. Basic command structure for communicating
+ with controller. */
+struct mscp {
+ unsigned char opcode: 3; /* type of command */
+ unsigned char xdir: 2; /* data transfer direction */
+ unsigned char dcn: 1; /* disable disconnect */
+ unsigned char ca: 1; /* use cache (if available) */
+ unsigned char sg: 1; /* scatter/gather operation */
+ unsigned char target_id: 3; /* target SCSI id */
+ unsigned char ch_no: 2; /* SCSI channel (always 0 for 14f) */
+ unsigned char lun: 3; /* logical unit number */
+ unsigned int transfer_data PACKED; /* transfer data pointer */
+ unsigned int transfer_data_length PACKED; /* length in bytes */
+ unsigned int command_link PACKED; /* for linking command chains */
+ unsigned char scsi_command_link_id; /* identifies command in chain */
+ unsigned char number_of_sg_list; /* (if sg is set) 8 bytes per list */
+ unsigned char length_of_sense_byte;
+ unsigned char length_of_scsi_cdbs; /* 6, 10, or 12 */
+ unsigned char scsi_cdbs[12]; /* SCSI commands */
+ unsigned char adapter_status; /* non-zero indicates HA error */
+ unsigned char target_status; /* non-zero indicates target error */
+ unsigned int sense_data PACKED;
+ /* The following fields are for software only. They are included in
+ the MSCP structure because they are associated with SCSI requests. */
+ void (*done)(Scsi_Cmnd *);
+ Scsi_Cmnd *SCint;
+ ultrastor_sg_list sglist[ULTRASTOR_24F_MAX_SG]; /* use larger size for 24F */
+};
+
+
+/* Port addresses (relative to the base address) */
+#define U14F_PRODUCT_ID(port) ((port) + 0x4)
+#define CONFIG(port) ((port) + 0x6)
+
+/* Port addresses relative to the doorbell base address. */
+#define LCL_DOORBELL_MASK(port) ((port) + 0x0)
+#define LCL_DOORBELL_INTR(port) ((port) + 0x1)
+#define SYS_DOORBELL_MASK(port) ((port) + 0x2)
+#define SYS_DOORBELL_INTR(port) ((port) + 0x3)
+
+
+/* Used to store configuration info read from config i/o registers. Most of
+ this is not used yet, but might as well save it.
+
+ This structure also holds port addresses that are not at the same offset
+ on the 14F and 24F.
+
+ This structure holds all data that must be duplicated to support multiple
+ adapters. */
+
+static struct ultrastor_config
+{
+ unsigned short port_address; /* base address of card */
+ unsigned short doorbell_address; /* base address of doorbell CSRs */
+ unsigned short ogm_address; /* base address of OGM */
+ unsigned short icm_address; /* base address of ICM */
+ const void *bios_segment;
+ unsigned char interrupt: 4;
+ unsigned char dma_channel: 3;
+ unsigned char bios_drive_number: 1;
+ unsigned char heads;
+ unsigned char sectors;
+ unsigned char ha_scsi_id: 3;
+ unsigned char subversion: 4;
+ unsigned char revision;
+ /* The slot number is used to distinguish the 24F (slot != 0) from
+ the 14F and 34F (slot == 0). */
+ unsigned char slot;
+
+#ifdef PRINT_U24F_VERSION
+ volatile int csir_done;
+#endif
+
+ /* A pool of MSCP structures for this adapter, and a bitmask of
+ busy structures. (If ULTRASTOR_14F_MAX_CMDS == 1, a 1 byte
+ busy flag is used instead.) */
+
+#if ULTRASTOR_MAX_CMDS == 1
+ unsigned char mscp_busy;
+#else
+ unsigned short mscp_free;
+#endif
+ volatile unsigned char aborted[ULTRASTOR_MAX_CMDS];
+ struct mscp mscp[ULTRASTOR_MAX_CMDS];
+} config = {0};
+
+/* Set this to 1 to reset the SCSI bus on error. */
+int ultrastor_bus_reset = 0;
+
+
+/* Allowed BIOS base addresses (NULL indicates reserved) */
+static const void *const bios_segment_table[8] = {
+ NULL, (void *)0xC4000, (void *)0xC8000, (void *)0xCC000,
+ (void *)0xD0000, (void *)0xD4000, (void *)0xD8000, (void *)0xDC000,
+};
+
+/* Allowed IRQs for 14f */
+static const unsigned char interrupt_table_14f[4] = { 15, 14, 11, 10 };
+
+/* Allowed DMA channels for 14f (0 indicates reserved) */
+static const unsigned char dma_channel_table_14f[4] = { 5, 6, 7, 0 };
+
+/* Head/sector mappings allowed by 14f */
+static const struct {
+ unsigned char heads;
+ unsigned char sectors;
+} mapping_table[4] = { { 16, 63 }, { 64, 32 }, { 64, 63 }, { 64, 32 } };
+
+#ifndef PORT_OVERRIDE
+/* ??? A probe of address 0x310 screws up NE2000 cards */
+static const unsigned short ultrastor_ports_14f[] = {
+ 0x330, 0x340, /*0x310,*/ 0x230, 0x240, 0x210, 0x130, 0x140,
+};
+#endif
+
+static void ultrastor_interrupt(int, struct pt_regs *);
+static inline void build_sg_list(struct mscp *, Scsi_Cmnd *SCpnt);
+
+
+static inline int find_and_clear_bit_16(unsigned short *field)
+{
+ int rv;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ if (*field == 0) panic("No free mscp");
+ asm("xorl %0,%0\n0:\tbsfw %1,%w0\n\tbtr %0,%1\n\tjnc 0b"
+ : "=&r" (rv), "=m" (*field) : "1" (*field));
+ restore_flags(flags);
+ return rv;
+}
+
+/* This has been re-implemented with the help of Richard Earnshaw,
+ <rwe@pegasus.esprit.ec.org> and works with gcc-2.5.8 and gcc-2.6.0.
+ The instability noted by jfc below appears to be a bug in
+ gcc-2.5.x when compiling w/o optimization. --Caleb
+
+ This asm is fragile: it doesn't work without the casts and it may
+ not work without optimization. Maybe I should add a swap builtin
+ to gcc. --jfc */
+static inline unsigned char xchgb(unsigned char reg,
+ volatile unsigned char *mem)
+{
+ __asm__ ("xchgb %0,%1" : "=q" (reg), "=m" (*mem) : "0" (reg));
+ return reg;
+}
+
+#if ULTRASTOR_DEBUG & (UD_COMMAND | UD_ABORT)
+
+static void log_ultrastor_abort(register struct ultrastor_config *config,
+ int command)
+{
+ static char fmt[80] = "abort %d (%x); MSCP free pool: %x;";
+ register int i;
+ int flags;
+ save_flags(flags);
+ cli();
+
+ for (i = 0; i < ULTRASTOR_MAX_CMDS; i++)
+ {
+ fmt[20 + i*2] = ' ';
+ if (! (config->mscp_free & (1 << i)))
+ fmt[21 + i*2] = '0' + config->mscp[i].target_id;
+ else
+ fmt[21 + i*2] = '-';
+ }
+ fmt[20 + ULTRASTOR_MAX_CMDS * 2] = '\n';
+ fmt[21 + ULTRASTOR_MAX_CMDS * 2] = 0;
+ printk(fmt, command, &config->mscp[command], config->mscp_free);
+ restore_flags(flags);
+}
+#endif
+
+static int ultrastor_14f_detect(Scsi_Host_Template * tpnt)
+{
+ size_t i;
+ unsigned char in_byte, version_byte = 0;
+ struct config_1 {
+ unsigned char bios_segment: 3;
+ unsigned char removable_disks_as_fixed: 1;
+ unsigned char interrupt: 2;
+ unsigned char dma_channel: 2;
+ } config_1;
+ struct config_2 {
+ unsigned char ha_scsi_id: 3;
+ unsigned char mapping_mode: 2;
+ unsigned char bios_drive_number: 1;
+ unsigned char tfr_port: 2;
+ } config_2;
+
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: called\n");
+#endif
+
+ /* If a 24F has already been configured, don't look for a 14F. */
+ if (config.bios_segment)
+ return FALSE;
+
+#ifdef PORT_OVERRIDE
+ if(check_region(PORT_OVERRIDE, 0xc)) {
+ printk("Ultrastor I/O space already in use\n");
+ return FALSE;
+ };
+ config.port_address = PORT_OVERRIDE;
+#else
+ for (i = 0; i < ARRAY_SIZE(ultrastor_ports_14f); i++) {
+ if(check_region(ultrastor_ports_14f[i], 0x0c)) continue;
+ config.port_address = ultrastor_ports_14f[i];
+#endif
+
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: testing port address %03X\n", config.port_address);
+#endif
+
+ in_byte = inb(U14F_PRODUCT_ID(config.port_address));
+ if (in_byte != US14F_PRODUCT_ID_0) {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+# ifdef PORT_OVERRIDE
+ printk("US14F: detect: wrong product ID 0 - %02X\n", in_byte);
+# else
+ printk("US14F: detect: no adapter at port %03X\n", config.port_address);
+# endif
+#endif
+#ifdef PORT_OVERRIDE
+ return FALSE;
+#else
+ continue;
+#endif
+ }
+ in_byte = inb(U14F_PRODUCT_ID(config.port_address) + 1);
+ /* Only upper nibble is significant for Product ID 1 */
+ if ((in_byte & 0xF0) != US14F_PRODUCT_ID_1) {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+# ifdef PORT_OVERRIDE
+ printk("US14F: detect: wrong product ID 1 - %02X\n", in_byte);
+# else
+ printk("US14F: detect: no adapter at port %03X\n", config.port_address);
+# endif
+#endif
+#ifdef PORT_OVERRIDE
+ return FALSE;
+#else
+ continue;
+#endif
+ }
+ version_byte = in_byte;
+#ifndef PORT_OVERRIDE
+ break;
+ }
+ if (i == ARRAY_SIZE(ultrastor_ports_14f)) {
+# if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: no port address found!\n");
+# endif
+ return FALSE;
+ }
+#endif
+
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: adapter found at port address %03X\n",
+ config.port_address);
+#endif
+
+ /* Set local doorbell mask to disallow bus reset unless
+ ultrastor_bus_reset is true. */
+ outb(ultrastor_bus_reset ? 0xc2 : 0x82, LCL_DOORBELL_MASK(config.port_address));
+
+ /* All above tests passed, must be the right thing. Get some useful
+ info. */
+
+ request_region(config.port_address, 0x0c,"ultrastor");
+ /* Register the I/O space that we use */
+
+ *(char *)&config_1 = inb(CONFIG(config.port_address + 0));
+ *(char *)&config_2 = inb(CONFIG(config.port_address + 1));
+ config.bios_segment = bios_segment_table[config_1.bios_segment];
+ config.doorbell_address = config.port_address;
+ config.ogm_address = config.port_address + 0x8;
+ config.icm_address = config.port_address + 0xC;
+ config.interrupt = interrupt_table_14f[config_1.interrupt];
+ config.ha_scsi_id = config_2.ha_scsi_id;
+ config.heads = mapping_table[config_2.mapping_mode].heads;
+ config.sectors = mapping_table[config_2.mapping_mode].sectors;
+ config.bios_drive_number = config_2.bios_drive_number;
+ config.subversion = (version_byte & 0x0F);
+ if (config.subversion == U34F)
+ config.dma_channel = 0;
+ else
+ config.dma_channel = dma_channel_table_14f[config_1.dma_channel];
+
+ if (!config.bios_segment) {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: not detected.\n");
+#endif
+ return FALSE;
+ }
+
+ /* Final consistency check, verify previous info. */
+ if (config.subversion != U34F)
+ if (!config.dma_channel || !(config_2.tfr_port & 0x2)) {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: consistency check failed\n");
+#endif
+ return FALSE;
+ }
+
+ /* If we were TRULY paranoid, we could issue a host adapter inquiry
+ command here and verify the data returned. But frankly, I'm
+ exhausted! */
+
+ /* Finally! Now I'm satisfied... */
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: detect succeeded\n"
+ " Port address: %03X\n"
+ " BIOS segment: %05X\n"
+ " Interrupt: %u\n"
+ " DMA channel: %u\n"
+ " H/A SCSI ID: %u\n"
+ " Subversion: %u\n",
+ config.port_address, config.bios_segment, config.interrupt,
+ config.dma_channel, config.ha_scsi_id, config.subversion);
+#endif
+ tpnt->this_id = config.ha_scsi_id;
+ tpnt->unchecked_isa_dma = (config.subversion != U34F);
+
+#if ULTRASTOR_MAX_CMDS > 1
+ config.mscp_free = ~0;
+#endif
+
+ if (request_irq(config.interrupt, ultrastor_interrupt, 0, "Ultrastor")) {
+ printk("Unable to allocate IRQ%u for UltraStor controller.\n",
+ config.interrupt);
+ return FALSE;
+ }
+ if (config.dma_channel && request_dma(config.dma_channel,"Ultrastor")) {
+ printk("Unable to allocate DMA channel %u for UltraStor controller.\n",
+ config.dma_channel);
+ free_irq(config.interrupt);
+ return FALSE;
+ }
+ tpnt->sg_tablesize = ULTRASTOR_14F_MAX_SG;
+ printk("UltraStor driver version" VERSION ". Using %d SG lists.\n",
+ ULTRASTOR_14F_MAX_SG);
+
+ return TRUE;
+}
+
+static int ultrastor_24f_detect(Scsi_Host_Template * tpnt)
+{
+ register int i;
+ struct Scsi_Host * shpnt = NULL;
+
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US24F: detect");
+#endif
+
+ /* probe each EISA slot at slot address C80 */
+ for (i = 1; i < 15; i++)
+ {
+ unsigned char config_1, config_2;
+ unsigned short addr = (i << 12) | ULTRASTOR_24F_PORT;
+
+ if (inb(addr) != US24F_PRODUCT_ID_0 &&
+ inb(addr+1) != US24F_PRODUCT_ID_1 &&
+ inb(addr+2) != US24F_PRODUCT_ID_2)
+ continue;
+
+ config.revision = inb(addr+3);
+ config.slot = i;
+ if (! (inb(addr+4) & 1))
+ {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("U24F: found disabled card in slot %u\n", i);
+#endif
+ continue;
+ }
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("U24F: found card in slot %u\n", i);
+#endif
+ config_1 = inb(addr + 5);
+ config.bios_segment = bios_segment_table[config_1 & 7];
+ switch(config_1 >> 4)
+ {
+ case 1:
+ config.interrupt = 15;
+ break;
+ case 2:
+ config.interrupt = 14;
+ break;
+ case 4:
+ config.interrupt = 11;
+ break;
+ case 8:
+ config.interrupt = 10;
+ break;
+ default:
+ printk("U24F: invalid IRQ\n");
+ return FALSE;
+ }
+ if (request_irq(config.interrupt, ultrastor_interrupt, 0, "Ultrastor"))
+ {
+ printk("Unable to allocate IRQ%u for UltraStor controller.\n",
+ config.interrupt);
+ return FALSE;
+ }
+ /* BIOS addr set */
+ /* base port set */
+ config.port_address = addr;
+ config.doorbell_address = addr + 12;
+ config.ogm_address = addr + 0x17;
+ config.icm_address = addr + 0x1C;
+ config_2 = inb(addr + 7);
+ config.ha_scsi_id = config_2 & 7;
+ config.heads = mapping_table[(config_2 >> 3) & 3].heads;
+ config.sectors = mapping_table[(config_2 >> 3) & 3].sectors;
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US24F: detect: detect succeeded\n"
+ " Port address: %03X\n"
+ " BIOS segment: %05X\n"
+ " Interrupt: %u\n"
+ " H/A SCSI ID: %u\n",
+ config.port_address, config.bios_segment,
+ config.interrupt, config.ha_scsi_id);
+#endif
+ tpnt->this_id = config.ha_scsi_id;
+ tpnt->unchecked_isa_dma = 0;
+ tpnt->sg_tablesize = ULTRASTOR_24F_MAX_SG;
+
+ shpnt = scsi_register(tpnt, 0);
+ shpnt->irq = config.interrupt;
+ shpnt->dma_channel = config.dma_channel;
+ shpnt->io_port = config.port_address;
+
+#if ULTRASTOR_MAX_CMDS > 1
+ config.mscp_free = ~0;
+#endif
+ /* Mark ICM and OGM free */
+ outb(0, addr + 0x16);
+ outb(0, addr + 0x1B);
+
+ /* Set local doorbell mask to disallow bus reset unless
+ ultrastor_bus_reset is true. */
+ outb(ultrastor_bus_reset ? 0xc2 : 0x82, LCL_DOORBELL_MASK(addr+12));
+ outb(0x02, SYS_DOORBELL_MASK(addr+12));
+ printk("UltraStor driver version " VERSION ". Using %d SG lists.\n",
+ tpnt->sg_tablesize);
+ return TRUE;
+ }
+ return FALSE;
+}
+
+int ultrastor_detect(Scsi_Host_Template * tpnt)
+{
+ tpnt->proc_dir = &proc_scsi_ultrastor;
+ return ultrastor_14f_detect(tpnt) || ultrastor_24f_detect(tpnt);
+}
+
+const char *ultrastor_info(struct Scsi_Host * shpnt)
+{
+ static char buf[64];
+
+ if (config.slot)
+ sprintf(buf, "UltraStor 24F SCSI @ Slot %u IRQ%u\n",
+ config.slot, config.interrupt);
+ else if (config.subversion)
+ sprintf(buf, "UltraStor 34F SCSI @ Port %03X BIOS %05X IRQ%u\n",
+ config.port_address, (int)config.bios_segment,
+ config.interrupt);
+ else
+ sprintf(buf, "UltraStor 14F SCSI @ Port %03X BIOS %05X IRQ%u DMA%u\n",
+ config.port_address, (int)config.bios_segment,
+ config.interrupt, config.dma_channel);
+ return buf;
+}
+
+static inline void build_sg_list(register struct mscp *mscp, Scsi_Cmnd *SCpnt)
+{
+ struct scatterlist *sl;
+ long transfer_length = 0;
+ int i, max;
+
+ sl = (struct scatterlist *) SCpnt->request_buffer;
+ max = SCpnt->use_sg;
+ for (i = 0; i < max; i++) {
+ mscp->sglist[i].address = (unsigned int)sl[i].address;
+ mscp->sglist[i].num_bytes = sl[i].length;
+ transfer_length += sl[i].length;
+ }
+ mscp->number_of_sg_list = max;
+ mscp->transfer_data = (unsigned int)mscp->sglist;
+ /* ??? May not be necessary. Docs are unclear as to whether transfer
+ length field is ignored or whether it should be set to the total
+ number of bytes of the transfer. */
+ mscp->transfer_data_length = transfer_length;
+}
+
+int ultrastor_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ register struct mscp *my_mscp;
+#if ULTRASTOR_MAX_CMDS > 1
+ int mscp_index;
+#endif
+ unsigned int status;
+ int flags;
+
+ /* Next test is for debugging; "can't happen" */
+ if ((config.mscp_free & ((1U << ULTRASTOR_MAX_CMDS) - 1)) == 0)
+ panic("ultrastor_queuecommand: no free MSCP\n");
+ mscp_index = find_and_clear_bit_16(&config.mscp_free);
+
+ /* Has the command been aborted? */
+ if (xchgb(0xff, &config.aborted[mscp_index]) != 0)
+ {
+ status = DID_ABORT << 16;
+ goto aborted;
+ }
+
+ my_mscp = &config.mscp[mscp_index];
+
+#if 1
+ /* This way is faster. */
+ *(unsigned char *)my_mscp = OP_SCSI | (DTD_SCSI << 3);
+#else
+ my_mscp->opcode = OP_SCSI;
+ my_mscp->xdir = DTD_SCSI;
+ my_mscp->dcn = FALSE;
+#endif
+ /* Tape drives don't work properly if the cache is used. The SCSI
+ READ command for a tape doesn't have a block offset, and the adapter
+ incorrectly assumes that all reads from the tape read the same
+ blocks. Results will depend on read buffer size and other disk
+ activity.
+
+ ??? Which other device types should never use the cache? */
+ my_mscp->ca = SCpnt->device->type != TYPE_TAPE;
+ my_mscp->target_id = SCpnt->target;
+ my_mscp->ch_no = 0;
+ my_mscp->lun = SCpnt->lun;
+ if (SCpnt->use_sg) {
+ /* Set scatter/gather flag in SCSI command packet */
+ my_mscp->sg = TRUE;
+ build_sg_list(my_mscp, SCpnt);
+ } else {
+ /* Unset scatter/gather flag in SCSI command packet */
+ my_mscp->sg = FALSE;
+ my_mscp->transfer_data = (unsigned int)SCpnt->request_buffer;
+ my_mscp->transfer_data_length = SCpnt->request_bufflen;
+ }
+ my_mscp->command_link = 0; /*???*/
+ my_mscp->scsi_command_link_id = 0; /*???*/
+ my_mscp->length_of_sense_byte = sizeof SCpnt->sense_buffer;
+ my_mscp->length_of_scsi_cdbs = SCpnt->cmd_len;
+ memcpy(my_mscp->scsi_cdbs, SCpnt->cmnd, my_mscp->length_of_scsi_cdbs);
+ my_mscp->adapter_status = 0;
+ my_mscp->target_status = 0;
+ my_mscp->sense_data = (unsigned int)&SCpnt->sense_buffer;
+ my_mscp->done = done;
+ my_mscp->SCint = SCpnt;
+ SCpnt->host_scribble = (unsigned char *)my_mscp;
+
+ /* Find free OGM slot. On 24F, look for OGM status byte == 0.
+ On 14F and 34F, wait for local interrupt pending flag to clear. */
+
+ retry:
+ if (config.slot)
+ while (inb(config.ogm_address - 1) != 0 &&
+ config.aborted[mscp_index] == 0xff) barrier();
+
+ /* else??? */
+
+ while ((inb(LCL_DOORBELL_INTR(config.doorbell_address)) &
+ (config.slot ? 2 : 1))
+ && config.aborted[mscp_index] == 0xff) barrier();
+
+ /* To avoid race conditions, make the code to write to the adapter
+ atomic. This simplifies the abort code. */
+
+ save_flags(flags);
+ cli();
+
+ if (inb(LCL_DOORBELL_INTR(config.doorbell_address)) &
+ (config.slot ? 2 : 1))
+ {
+ restore_flags(flags);
+ goto retry;
+ }
+
+ status = xchgb(0, &config.aborted[mscp_index]);
+ if (status != 0xff) {
+ restore_flags(flags);
+
+#if ULTRASTOR_DEBUG & (UD_COMMAND | UD_ABORT)
+ printk("USx4F: queuecommand: aborted\n");
+#if ULTRASTOR_MAX_CMDS > 1
+ log_ultrastor_abort(&config, mscp_index);
+#endif
+#endif
+ status <<= 16;
+
+ aborted:
+ set_bit(mscp_index, &config.mscp_free);
+ /* If the driver queues commands, call the done proc here. Otherwise
+ return an error. */
+#if ULTRASTOR_MAX_CMDS > 1
+ SCpnt->result = status;
+ done(SCpnt);
+ return 0;
+#else
+ return status;
+#endif
+ }
+
+ /* Store pointer in OGM address bytes */
+ outl((unsigned int)my_mscp, config.ogm_address);
+
+ /* Issue OGM interrupt */
+ if (config.slot) {
+ /* Write OGM command register on 24F */
+ outb(1, config.ogm_address - 1);
+ outb(0x2, LCL_DOORBELL_INTR(config.doorbell_address));
+ } else {
+ outb(0x1, LCL_DOORBELL_INTR(config.doorbell_address));
+ }
+
+ restore_flags(flags);
+
+#if (ULTRASTOR_DEBUG & UD_COMMAND)
+ printk("USx4F: queuecommand: returning\n");
+#endif
+
+ return 0;
+}
+
+/* This code must deal with 2 cases:
+
+ 1. The command has not been written to the OGM. In this case, set
+ the abort flag and return.
+
+ 2. The command has been written to the OGM and is stuck somewhere in
+ the adapter.
+
+ 2a. On a 24F, ask the adapter to abort the command. It will interrupt
+ when it does.
+
+ 2b. Call the command's done procedure.
+
+ */
+
+int ultrastor_abort(Scsi_Cmnd *SCpnt)
+{
+#if ULTRASTOR_DEBUG & UD_ABORT
+ char out[108];
+ unsigned char icm_status = 0, ogm_status = 0;
+ unsigned int icm_addr = 0, ogm_addr = 0;
+#endif
+ unsigned int mscp_index;
+ unsigned char old_aborted;
+ void (*done)(Scsi_Cmnd *);
+
+ if(config.slot)
+ return SCSI_ABORT_SNOOZE; /* Do not attempt an abort for the 24f */
+
+ /* Simple consistency checking */
+ if(!SCpnt->host_scribble)
+ return SCSI_ABORT_NOT_RUNNING;
+
+ mscp_index = ((struct mscp *)SCpnt->host_scribble) - config.mscp;
+ if (mscp_index >= ULTRASTOR_MAX_CMDS)
+ panic("Ux4F aborting invalid MSCP");
+
+#if ULTRASTOR_DEBUG & UD_ABORT
+ if (config.slot)
+ {
+ int port0 = (config.slot << 12) | 0xc80;
+ int i;
+ int flags;
+ save_flags(flags);
+ cli();
+ strcpy(out, "OGM %d:%x ICM %d:%x ports: ");
+ for (i = 0; i < 16; i++)
+ {
+ unsigned char p = inb(port0 + i);
+ out[28 + i * 3] = "0123456789abcdef"[p >> 4];
+ out[29 + i * 3] = "0123456789abcdef"[p & 15];
+ out[30 + i * 3] = ' ';
+ }
+ out[28 + i * 3] = '\n';
+ out[29 + i * 3] = 0;
+ ogm_status = inb(port0 + 22);
+ ogm_addr = inl(port0 + 23);
+ icm_status = inb(port0 + 27);
+ icm_addr = inl(port0 + 28);
+ restore_flags(flags);
+ }
+
+ /* First check to see if an interrupt is pending. I suspect the SiS
+ chipset loses interrupts. (I also suspect is mangles data, but
+ one bug at a time... */
+ if (config.slot ? inb(config.icm_address - 1) == 2 :
+ (inb(SYS_DOORBELL_INTR(config.doorbell_address)) & 1))
+ {
+ int flags;
+ save_flags(flags);
+ printk("Ux4F: abort while completed command pending\n");
+ restore_flags(flags);
+ cli();
+ ultrastor_interrupt(0, NULL);
+ restore_flags(flags);
+ return SCSI_ABORT_SUCCESS; /* FIXME - is this correct? -ERY */
+ }
+#endif
+
+ old_aborted = xchgb(DID_ABORT, &config.aborted[mscp_index]);
+
+ /* aborted == 0xff is the signal that queuecommand has not yet sent
+ the command. It will notice the new abort flag and fail. */
+ if (old_aborted == 0xff)
+ return SCSI_ABORT_SUCCESS;
+
+ /* On 24F, send an abort MSCP request. The adapter will interrupt
+ and the interrupt handler will call done. */
+ if (config.slot && inb(config.ogm_address - 1) == 0)
+ {
+ int flags;
+
+ save_flags(flags);
+ cli();
+ outl((int)&config.mscp[mscp_index], config.ogm_address);
+ inb(0xc80); /* delay */
+ outb(0x80, config.ogm_address - 1);
+ outb(0x2, LCL_DOORBELL_INTR(config.doorbell_address));
+#if ULTRASTOR_DEBUG & UD_ABORT
+ log_ultrastor_abort(&config, mscp_index);
+ printk(out, ogm_status, ogm_addr, icm_status, icm_addr);
+#endif
+ restore_flags(flags);
+ return SCSI_ABORT_PENDING;
+ }
+
+#if ULTRASTOR_DEBUG & UD_ABORT
+ log_ultrastor_abort(&config, mscp_index);
+#endif
+
+ /* Can't request a graceful abort. Either this is not a 24F or
+ the OGM is busy. Don't free the command -- the adapter might
+ still be using it. Setting SCint = 0 causes the interrupt
+ handler to ignore the command. */
+
+ /* FIXME - devices that implement soft resets will still be running
+ the command after a bus reset. We would probably rather leave
+ the command in the queue. The upper level code will automatically
+ leave the command in the active state instead of requeueing it. ERY */
+
+#if ULTRASTOR_DEBUG & UD_ABORT
+ if (config.mscp[mscp_index].SCint != SCpnt)
+ printk("abort: command mismatch, %p != %p\n",
+ config.mscp[mscp_index].SCint, SCpnt);
+#endif
+ if (config.mscp[mscp_index].SCint == 0)
+ return SCSI_ABORT_NOT_RUNNING;
+
+ if (config.mscp[mscp_index].SCint != SCpnt) panic("Bad abort");
+ config.mscp[mscp_index].SCint = 0;
+ done = config.mscp[mscp_index].done;
+ config.mscp[mscp_index].done = 0;
+ SCpnt->result = DID_ABORT << 16;
+ /* I worry about reentrancy in scsi.c */
+ done(SCpnt);
+
+ /* Need to set a timeout here in case command never completes. */
+ return SCSI_ABORT_SUCCESS;
+}
+
+int ultrastor_reset(Scsi_Cmnd * SCpnt)
+{
+ int flags;
+ register int i;
+#if (ULTRASTOR_DEBUG & UD_RESET)
+ printk("US14F: reset: called\n");
+#endif
+
+ if(config.slot)
+ return SCSI_RESET_PUNT; /* Do not attempt a reset for the 24f */
+
+ save_flags(flags);
+ cli();
+
+ /* Reset the adapter and SCSI bus. The SCSI bus reset can be
+ inhibited by clearing ultrastor_bus_reset before probe. */
+ outb(0xc0, LCL_DOORBELL_INTR(config.doorbell_address));
+ if (config.slot)
+ {
+ outb(0, config.ogm_address - 1);
+ outb(0, config.icm_address - 1);
+ }
+
+#if ULTRASTOR_MAX_CMDS == 1
+ if (config.mscp_busy && config.mscp->done && config.mscp->SCint)
+ {
+ config.mscp->SCint->result = DID_RESET << 16;
+ config.mscp->done(config.mscp->SCint);
+ }
+ config.mscp->SCint = 0;
+#else
+ for (i = 0; i < ULTRASTOR_MAX_CMDS; i++)
+ {
+ if (! (config.mscp_free & (1 << i)) &&
+ config.mscp[i].done && config.mscp[i].SCint)
+ {
+ config.mscp[i].SCint->result = DID_RESET << 16;
+ config.mscp[i].done(config.mscp[i].SCint);
+ config.mscp[i].done = 0;
+ }
+ config.mscp[i].SCint = 0;
+ }
+#endif
+
+ /* FIXME - if the device implements soft resets, then the command
+ will still be running. ERY */
+
+ memset((unsigned char *)config.aborted, 0, sizeof config.aborted);
+#if ULTRASTOR_MAX_CMDS == 1
+ config.mscp_busy = 0;
+#else
+ config.mscp_free = ~0;
+#endif
+
+ restore_flags(flags);
+ return SCSI_RESET_SUCCESS;
+
+}
+
+int ultrastor_biosparam(Disk * disk, kdev_t dev, int * dkinfo)
+{
+ int size = disk->capacity;
+ unsigned int s = config.heads * config.sectors;
+
+ dkinfo[0] = config.heads;
+ dkinfo[1] = config.sectors;
+ dkinfo[2] = size / s; /* Ignore partial cylinders */
+#if 0
+ if (dkinfo[2] > 1024)
+ dkinfo[2] = 1024;
+#endif
+ return 0;
+}
+
+static void ultrastor_interrupt(int irq, struct pt_regs *regs)
+{
+ unsigned int status;
+#if ULTRASTOR_MAX_CMDS > 1
+ unsigned int mscp_index;
+#endif
+ register struct mscp *mscp;
+ void (*done)(Scsi_Cmnd *);
+ Scsi_Cmnd *SCtmp;
+
+#if ULTRASTOR_MAX_CMDS == 1
+ mscp = &config.mscp[0];
+#else
+ mscp = (struct mscp *)inl(config.icm_address);
+ mscp_index = mscp - config.mscp;
+ if (mscp_index >= ULTRASTOR_MAX_CMDS) {
+ printk("Ux4F interrupt: bad MSCP address %x\n", (unsigned int) mscp);
+ /* A command has been lost. Reset and report an error
+ for all commands. */
+ ultrastor_reset(NULL);
+ return;
+ }
+#endif
+
+ /* Clean ICM slot (set ICMINT bit to 0) */
+ if (config.slot) {
+ unsigned char icm_status = inb(config.icm_address - 1);
+#if ULTRASTOR_DEBUG & (UD_INTERRUPT|UD_ERROR|UD_ABORT)
+ if (icm_status != 1 && icm_status != 2)
+ printk("US24F: ICM status %x for MSCP %d (%x)\n", icm_status,
+ mscp_index, (unsigned int) mscp);
+#endif
+ /* The manual says clear interrupt then write 0 to ICM status.
+ This seems backwards, but I'll do it anyway. --jfc */
+ outb(2, SYS_DOORBELL_INTR(config.doorbell_address));
+ outb(0, config.icm_address - 1);
+ if (icm_status == 4) {
+ printk("UltraStor abort command failed\n");
+ return;
+ }
+ if (icm_status == 3) {
+ void (*done)(Scsi_Cmnd *) = mscp->done;
+ if (done) {
+ mscp->done = 0;
+ mscp->SCint->result = DID_ABORT << 16;
+ done(mscp->SCint);
+ }
+ return;
+ }
+ } else {
+ outb(1, SYS_DOORBELL_INTR(config.doorbell_address));
+ }
+
+ SCtmp = mscp->SCint;
+ mscp->SCint = NULL;
+
+ if (SCtmp == 0)
+ {
+#if ULTRASTOR_DEBUG & (UD_ABORT|UD_INTERRUPT)
+ printk("MSCP %d (%x): no command\n", mscp_index, (unsigned int) mscp);
+#endif
+#if ULTRASTOR_MAX_CMDS == 1
+ config.mscp_busy = FALSE;
+#else
+ set_bit(mscp_index, &config.mscp_free);
+#endif
+ config.aborted[mscp_index] = 0;
+ return;
+ }
+
+ /* Save done locally and zero before calling. This is needed as
+ once we call done, we may get another command queued before this
+ interrupt service routine can return. */
+ done = mscp->done;
+ mscp->done = 0;
+
+ /* Let the higher levels know that we're done */
+ switch (mscp->adapter_status)
+ {
+ case 0:
+ status = DID_OK << 16;
+ break;
+ case 0x01: /* invalid command */
+ case 0x02: /* invalid parameters */
+ case 0x03: /* invalid data list */
+ default:
+ status = DID_ERROR << 16;
+ break;
+ case 0x84: /* SCSI bus abort */
+ status = DID_ABORT << 16;
+ break;
+ case 0x91:
+ status = DID_TIME_OUT << 16;
+ break;
+ }
+
+ SCtmp->result = status | mscp->target_status;
+
+ SCtmp->host_scribble = 0;
+
+ /* Free up mscp block for next command */
+#if ULTRASTOR_MAX_CMDS == 1
+ config.mscp_busy = FALSE;
+#else
+ set_bit(mscp_index, &config.mscp_free);
+#endif
+
+#if ULTRASTOR_DEBUG & (UD_ABORT|UD_INTERRUPT)
+ if (config.aborted[mscp_index])
+ printk("Ux4 interrupt: MSCP %d (%x) aborted = %d\n",
+ mscp_index, (unsigned int) mscp, config.aborted[mscp_index]);
+#endif
+ config.aborted[mscp_index] = 0;
+
+ if (done)
+ done(SCtmp);
+ else
+ printk("US14F: interrupt: unexpected interrupt\n");
+
+ if (config.slot ? inb(config.icm_address - 1) : (inb(SYS_DOORBELL_INTR(config.doorbell_address)) & 1))
+ printk("Ux4F: multiple commands completed\n");
+
+#if (ULTRASTOR_DEBUG & UD_INTERRUPT)
+ printk("USx4F: interrupt: returning\n");
+#endif
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = ULTRASTOR_14F;
+
+#include "scsi_module.c"
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/ultrastor.h b/i386/i386at/gpl/linux/scsi/ultrastor.h
new file mode 100644
index 00000000..10cf63f2
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/ultrastor.h
@@ -0,0 +1,102 @@
+/*
+ * ultrastor.c (C) 1991 David B. Gentzel
+ * Low-level scsi driver for UltraStor 14F
+ * by David B. Gentzel, Whitfield Software Services, Carnegie, PA
+ * (gentzel@nova.enet.dec.com)
+ * scatter/gather added by Scott Taylor (n217cg@tamuts.tamu.edu)
+ * 24F support by John F. Carr (jfc@athena.mit.edu)
+ * John's work modified by Caleb Epstein (cae@jpmorgan.com) and
+ * Eric Youngdale (eric@tantalus.nrl.navy.mil).
+ * Thanks to UltraStor for providing the necessary documentation
+ */
+
+#ifndef _ULTRASTOR_H
+#define _ULTRASTOR_H
+#include <linux/kdev_t.h>
+
+int ultrastor_detect(Scsi_Host_Template *);
+const char *ultrastor_info(struct Scsi_Host * shpnt);
+int ultrastor_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int ultrastor_abort(Scsi_Cmnd *);
+int ultrastor_reset(Scsi_Cmnd *);
+int ultrastor_biosparam(Disk *, kdev_t, int *);
+
+
+#define ULTRASTOR_14F_MAX_SG 16
+#define ULTRASTOR_24F_MAX_SG 33
+
+#define ULTRASTOR_MAX_CMDS_PER_LUN 5
+#define ULTRASTOR_MAX_CMDS 16
+
+#define ULTRASTOR_24F_PORT 0xC80
+
+
+#define ULTRASTOR_14F { NULL, NULL, /* Ptr for modules*/ \
+ NULL, \
+ NULL, \
+ "UltraStor 14F/24F/34F", \
+ ultrastor_detect, \
+ NULL, /* Release */ \
+ ultrastor_info, \
+ 0, \
+ ultrastor_queuecommand, \
+ ultrastor_abort, \
+ ultrastor_reset, \
+ 0, \
+ ultrastor_biosparam, \
+ ULTRASTOR_MAX_CMDS, \
+ 0, \
+ ULTRASTOR_14F_MAX_SG, \
+ ULTRASTOR_MAX_CMDS_PER_LUN, \
+ 0, \
+ 1, \
+ ENABLE_CLUSTERING }
+
+
+#ifdef ULTRASTOR_PRIVATE
+
+#define UD_ABORT 0x0001
+#define UD_COMMAND 0x0002
+#define UD_DETECT 0x0004
+#define UD_INTERRUPT 0x0008
+#define UD_RESET 0x0010
+#define UD_MULTI_CMD 0x0020
+#define UD_CSIR 0x0040
+#define UD_ERROR 0x0080
+
+/* #define PORT_OVERRIDE 0x330 */
+
+/* Values for the PRODUCT_ID ports for the 14F */
+#define US14F_PRODUCT_ID_0 0x56
+#define US14F_PRODUCT_ID_1 0x40 /* NOTE: Only upper nibble is used */
+
+#define US24F_PRODUCT_ID_0 0x56
+#define US24F_PRODUCT_ID_1 0x63
+#define US24F_PRODUCT_ID_2 0x02
+
+/* Subversion values */
+#define U14F 0
+#define U34F 1
+
+/* MSCP field values */
+
+/* Opcode */
+#define OP_HOST_ADAPTER 0x1
+#define OP_SCSI 0x2
+#define OP_RESET 0x4
+
+/* Date Transfer Direction */
+#define DTD_SCSI 0x0
+#define DTD_IN 0x1
+#define DTD_OUT 0x2
+#define DTD_NONE 0x3
+
+/* Host Adapter command subcodes */
+#define HA_CMD_INQUIRY 0x1
+#define HA_CMD_SELF_DIAG 0x2
+#define HA_CMD_READ_BUFF 0x3
+#define HA_CMD_WRITE_BUFF 0x4
+
+#endif
+
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/wd7000.c b/i386/i386at/gpl/linux/scsi/wd7000.c
new file mode 100644
index 00000000..61d92b10
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/wd7000.c
@@ -0,0 +1,1237 @@
+/* $Id: wd7000.c,v 1.1.1.1 1997/02/25 21:27:53 thomas Exp $
+ * linux/drivers/scsi/wd7000.c
+ *
+ * Copyright (C) 1992 Thomas Wuensche
+ * closely related to the aha1542 driver from Tommy Thorn
+ * ( as close as different hardware allows on a lowlevel-driver :-) )
+ *
+ * Revised (and renamed) by John Boyd <boyd@cis.ohio-state.edu> to
+ * accommodate Eric Youngdale's modifications to scsi.c. Nov 1992.
+ *
+ * Additional changes to support scatter/gather. Dec. 1992. tw/jb
+ *
+ * No longer tries to reset SCSI bus at boot (it wasn't working anyway).
+ * Rewritten to support multiple host adapters.
+ * Miscellaneous cleanup.
+ * So far, still doesn't do reset or abort correctly, since I have no idea
+ * how to do them with this board (8^(. Jan 1994 jb
+ *
+ * This driver now supports both of the two standard configurations (per
+ * the 3.36 Owner's Manual, my latest reference) by the same method as
+ * before; namely, by looking for a BIOS signature. Thus, the location of
+ * the BIOS signature determines the board configuration. Until I have
+ * time to do something more flexible, users should stick to one of the
+ * following:
+ *
+ * Standard configuration for single-adapter systems:
+ * - BIOS at CE00h
+ * - I/O base address 350h
+ * - IRQ level 15
+ * - DMA channel 6
+ * Standard configuration for a second adapter in a system:
+ * - BIOS at C800h
+ * - I/O base address 330h
+ * - IRQ level 11
+ * - DMA channel 5
+ *
+ * Anyone who can recompile the kernel is welcome to add others as need
+ * arises, but unpredictable results may occur if there are conflicts.
+ * In any event, if there are multiple adapters in a system, they MUST
+ * use different I/O bases, IRQ levels, and DMA channels, since they will be
+ * indistinguishable (and in direct conflict) otherwise.
+ *
+ * As a point of information, the NO_OP command toggles the CMD_RDY bit
+ * of the status port, and this fact could be used as a test for the I/O
+ * base address (or more generally, board detection). There is an interrupt
+ * status port, so IRQ probing could also be done. I suppose the full
+ * DMA diagnostic could be used to detect the DMA channel being used. I
+ * haven't done any of this, though, because I think there's too much of
+ * a chance that such explorations could be destructive, if some other
+ * board's resources are used inadvertently. So, call me a wimp, but I
+ * don't want to try it. The only kind of exploration I trust is memory
+ * exploration, since it's more certain that reading memory won't be
+ * destructive.
+ *
+ * More to my liking would be a LILO boot command line specification, such
+ * as is used by the aha152x driver (and possibly others). I'll look into
+ * it, as I have time...
+ *
+ * I get mail occasionally from people who either are using or are
+ * considering using a WD7000 with Linux. There is a variety of
+ * nomenclature describing WD7000's. To the best of my knowledge, the
+ * following is a brief summary (from an old WD doc - I don't work for
+ * them or anything like that):
+ *
+ * WD7000-FASST2: This is a WD7000 board with the real-mode SST ROM BIOS
+ * installed. Last I heard, the BIOS was actually done by Columbia
+ * Data Products. The BIOS is only used by this driver (and thus
+ * by Linux) to identify the board; none of it can be executed under
+ * Linux.
+ *
+ * WD7000-ASC: This is the original adapter board, with or without BIOS.
+ * The board uses a WD33C93 or WD33C93A SBIC, which in turn is
+ * controlled by an onboard Z80 processor. The board interface
+ * visible to the host CPU is defined effectively by the Z80's
+ * firmware, and it is this firmware's revision level that is
+ * determined and reported by this driver. (The version of the
+ * on-board BIOS is of no interest whatsoever.) The host CPU has
+ * no access to the SBIC; hence the fact that it is a WD33C93 is
+ * also of no interest to this driver.
+ *
+ * WD7000-AX:
+ * WD7000-MX:
+ * WD7000-EX: These are newer versions of the WD7000-ASC. The -ASC is
+ * largely built from discrete components; these boards use more
+ * integration. The -AX is an ISA bus board (like the -ASC),
+ * the -MX is an MCA (i.e., PS/2) bus board), and the -EX is an
+ * EISA bus board.
+ *
+ * At the time of my documentation, the -?X boards were "future" products,
+ * and were not yet available. However, I vaguely recall that Thomas
+ * Wuensche had an -AX, so I believe at least it is supported by this
+ * driver. I have no personal knowledge of either -MX or -EX boards.
+ *
+ * P.S. Just recently, I've discovered (directly from WD and Future
+ * Domain) that all but the WD7000-EX have been out of production for
+ * two years now. FD has production rights to the 7000-EX, and are
+ * producing it under a new name, and with a new BIOS. If anyone has
+ * one of the FD boards, it would be nice to come up with a signature
+ * for it.
+ * J.B. Jan 1994.
+ */
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <stdarg.h>
+#include <linux/kernel.h>
+#include <linux/head.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/malloc.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+
+#define ANY2SCSI_INLINE /* undef this to use old macros */
+#undef DEBUG
+
+#include "wd7000.h"
+
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_wd7000 = {
+ PROC_SCSI_7000FASST, 6, "wd7000",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+
+/*
+ * Mailbox structure sizes.
+ * I prefer to keep the number of ICMBs much larger than the number of
+ * OGMBs. OGMBs are used very quickly by the driver to start one or
+ * more commands, while ICMBs are used by the host adapter per command.
+ */
+#define OGMB_CNT 16
+#define ICMB_CNT 32
+
+/*
+ * Scb's are shared by all active adapters. So, if they all become busy,
+ * callers may be made to wait in alloc_scbs for them to free. That can
+ * be avoided by setting MAX_SCBS to NUM_CONFIG * WD7000_Q. If you'd
+ * rather conserve memory, use a smaller number (> 0, of course) - things
+ * will should still work OK.
+ */
+#define MAX_SCBS 32
+
+/*
+ * WD7000-specific mailbox structure
+ *
+ */
+typedef volatile struct mailbox{
+ unchar status;
+ unchar scbptr[3]; /* SCSI-style - MSB first (big endian) */
+} Mailbox;
+
+/*
+ * This structure should contain all per-adapter global data. I.e., any
+ * new global per-adapter data should put in here.
+ *
+ */
+typedef struct adapter {
+ struct Scsi_Host *sh; /* Pointer to Scsi_Host structure */
+ int iobase; /* This adapter's I/O base address */
+ int irq; /* This adapter's IRQ level */
+ int dma; /* This adapter's DMA channel */
+ struct { /* This adapter's mailboxes */
+ Mailbox ogmb[OGMB_CNT]; /* Outgoing mailboxes */
+ Mailbox icmb[ICMB_CNT]; /* Incoming mailboxes */
+ } mb;
+ int next_ogmb; /* to reduce contention at mailboxes */
+ unchar control; /* shadows CONTROL port value */
+ unchar rev1, rev2; /* filled in by wd7000_revision */
+} Adapter;
+
+/*
+ * The following is set up by wd7000_detect, and used thereafter by
+ * wd7000_intr_handle to map the irq level to the corresponding Adapter.
+ * Note that if SA_INTERRUPT is not used, wd7000_intr_handle must be
+ * changed to pick up the IRQ level correctly.
+ */
+Adapter *irq2host[16] = {NULL}; /* Possible IRQs are 0-15 */
+
+/*
+ * Standard Adapter Configurations - used by wd7000_detect
+ */
+typedef struct {
+ const void *bios; /* (linear) base address for ROM BIOS */
+ int iobase; /* I/O ports base address */
+ int irq; /* IRQ level */
+ int dma; /* DMA channel */
+} Config;
+
+static const Config configs[] = {
+ {(void *) 0xce000, 0x350, 15, 6}, /* defaults for single adapter */
+ {(void *) 0xc8000, 0x330, 11, 5}, /* defaults for second adapter */
+ {(void *) 0xd8000, 0x350, 15, 6}, /* Arghhh.... who added this ? */
+};
+#define NUM_CONFIGS (sizeof(configs)/sizeof(Config))
+
+/*
+ * The following list defines strings to look for in the BIOS that identify
+ * it as the WD7000-FASST2 SST BIOS. I suspect that something should be
+ * added for the Future Domain version.
+ */
+typedef struct signature {
+ const void *sig; /* String to look for */
+ unsigned ofs; /* offset from BIOS base address */
+ unsigned len; /* length of string */
+} Signature;
+
+static const Signature signatures[] = {
+ {"SSTBIOS",0x0000d,7} /* "SSTBIOS" @ offset 0x0000d */
+};
+#define NUM_SIGNATURES (sizeof(signatures)/sizeof(Signature))
+
+
+/*
+ * I/O Port Offsets and Bit Definitions
+ * 4 addresses are used. Those not defined here are reserved.
+ */
+#define ASC_STAT 0 /* Status, Read */
+#define ASC_COMMAND 0 /* Command, Write */
+#define ASC_INTR_STAT 1 /* Interrupt Status, Read */
+#define ASC_INTR_ACK 1 /* Acknowledge, Write */
+#define ASC_CONTROL 2 /* Control, Write */
+
+/* ASC Status Port
+ */
+#define INT_IM 0x80 /* Interrupt Image Flag */
+#define CMD_RDY 0x40 /* Command Port Ready */
+#define CMD_REJ 0x20 /* Command Port Byte Rejected */
+#define ASC_INIT 0x10 /* ASC Initialized Flag */
+#define ASC_STATMASK 0xf0 /* The lower 4 Bytes are reserved */
+
+/* COMMAND opcodes
+ *
+ * Unfortunately, I have no idea how to properly use some of these commands,
+ * as the OEM manual does not make it clear. I have not been able to use
+ * enable/disable unsolicited interrupts or the reset commands with any
+ * discernible effect whatsoever. I think they may be related to certain
+ * ICB commands, but again, the OEM manual doesn't make that clear.
+ */
+#define NO_OP 0 /* NO-OP toggles CMD_RDY bit in ASC_STAT */
+#define INITIALIZATION 1 /* initialization (10 bytes) */
+#define DISABLE_UNS_INTR 2 /* disable unsolicited interrupts */
+#define ENABLE_UNS_INTR 3 /* enable unsolicited interrupts */
+#define INTR_ON_FREE_OGMB 4 /* interrupt on free OGMB */
+#define SOFT_RESET 5 /* SCSI bus soft reset */
+#define HARD_RESET_ACK 6 /* SCSI bus hard reset acknowledge */
+#define START_OGMB 0x80 /* start command in OGMB (n) */
+#define SCAN_OGMBS 0xc0 /* start multiple commands, signature (n) */
+ /* where (n) = lower 6 bits */
+/* For INITIALIZATION:
+ */
+typedef struct initCmd {
+ unchar op; /* command opcode (= 1) */
+ unchar ID; /* Adapter's SCSI ID */
+ unchar bus_on; /* Bus on time, x 125ns (see below) */
+ unchar bus_off; /* Bus off time, "" "" */
+ unchar rsvd; /* Reserved */
+ unchar mailboxes[3]; /* Address of Mailboxes, MSB first */
+ unchar ogmbs; /* Number of outgoing MBs, max 64, 0,1 = 1 */
+ unchar icmbs; /* Number of incoming MBs, "" "" */
+} InitCmd;
+
+#define BUS_ON 64 /* x 125ns = 8000ns (BIOS default) */
+#define BUS_OFF 15 /* x 125ns = 1875ns (BIOS default) */
+
+/* Interrupt Status Port - also returns diagnostic codes at ASC reset
+ *
+ * if msb is zero, the lower bits are diagnostic status
+ * Diagnostics:
+ * 01 No diagnostic error occurred
+ * 02 RAM failure
+ * 03 FIFO R/W failed
+ * 04 SBIC register read/write failed
+ * 05 Initialization D-FF failed
+ * 06 Host IRQ D-FF failed
+ * 07 ROM checksum error
+ * Interrupt status (bitwise):
+ * 10NNNNNN outgoing mailbox NNNNNN is free
+ * 11NNNNNN incoming mailbox NNNNNN needs service
+ */
+#define MB_INTR 0xC0 /* Mailbox Service possible/required */
+#define IMB_INTR 0x40 /* 1 Incoming / 0 Outgoing */
+#define MB_MASK 0x3f /* mask for mailbox number */
+
+/* CONTROL port bits
+ */
+#define INT_EN 0x08 /* Interrupt Enable */
+#define DMA_EN 0x04 /* DMA Enable */
+#define SCSI_RES 0x02 /* SCSI Reset */
+#define ASC_RES 0x01 /* ASC Reset */
+
+/*
+ Driver data structures:
+ - mb and scbs are required for interfacing with the host adapter.
+ An SCB has extra fields not visible to the adapter; mb's
+ _cannot_ do this, since the adapter assumes they are contiguous in
+ memory, 4 bytes each, with ICMBs following OGMBs, and uses this fact
+ to access them.
+ - An icb is for host-only (non-SCSI) commands. ICBs are 16 bytes each;
+ the additional bytes are used only by the driver.
+ - For now, a pool of SCBs are kept in global storage by this driver,
+ and are allocated and freed as needed.
+
+ The 7000-FASST2 marks OGMBs empty as soon as it has _started_ a command,
+ not when it has finished. Since the SCB must be around for completion,
+ problems arise when SCBs correspond to OGMBs, which may be reallocated
+ earlier (or delayed unnecessarily until a command completes).
+ Mailboxes are used as transient data structures, simply for
+ carrying SCB addresses to/from the 7000-FASST2.
+
+ Note also since SCBs are not "permanently" associated with mailboxes,
+ there is no need to keep a global list of Scsi_Cmnd pointers indexed
+ by OGMB. Again, SCBs reference their Scsi_Cmnds directly, so mailbox
+ indices need not be involved.
+*/
+
+/*
+ * WD7000-specific scatter/gather element structure
+ */
+typedef struct sgb {
+ unchar len[3];
+ unchar ptr[3]; /* Also SCSI-style - MSB first */
+} Sgb;
+
+typedef struct scb { /* Command Control Block 5.4.1 */
+ unchar op; /* Command Control Block Operation Code */
+ unchar idlun; /* op=0,2:Target Id, op=1:Initiator Id */
+ /* Outbound data transfer, length is checked*/
+ /* Inbound data transfer, length is checked */
+ /* Logical Unit Number */
+ unchar cdb[12]; /* SCSI Command Block */
+ volatile unchar status; /* SCSI Return Status */
+ volatile unchar vue; /* Vendor Unique Error Code */
+ unchar maxlen[3]; /* Maximum Data Transfer Length */
+ unchar dataptr[3]; /* SCSI Data Block Pointer */
+ unchar linkptr[3]; /* Next Command Link Pointer */
+ unchar direc; /* Transfer Direction */
+ unchar reserved2[6]; /* SCSI Command Descriptor Block */
+ /* end of hardware SCB */
+ Scsi_Cmnd *SCpnt; /* Scsi_Cmnd using this SCB */
+ Sgb sgb[WD7000_SG]; /* Scatter/gather list for this SCB */
+ Adapter *host; /* host adapter */
+ struct scb *next; /* for lists of scbs */
+} Scb;
+
+/*
+ * This driver is written to allow host-only commands to be executed.
+ * These use a 16-byte block called an ICB. The format is extended by the
+ * driver to 18 bytes, to support the status returned in the ICMB and
+ * an execution phase code.
+ *
+ * There are other formats besides these; these are the ones I've tried
+ * to use. Formats for some of the defined ICB opcodes are not defined
+ * (notably, get/set unsolicited interrupt status) in my copy of the OEM
+ * manual, and others are ambiguous/hard to follow.
+ */
+#define ICB_OP_MASK 0x80 /* distinguishes scbs from icbs */
+#define ICB_OP_OPEN_RBUF 0x80 /* open receive buffer */
+#define ICB_OP_RECV_CMD 0x81 /* receive command from initiator */
+#define ICB_OP_RECV_DATA 0x82 /* receive data from initiator */
+#define ICB_OP_RECV_SDATA 0x83 /* receive data with status from init. */
+#define ICB_OP_SEND_DATA 0x84 /* send data with status to initiator */
+#define ICB_OP_SEND_STAT 0x86 /* send command status to initiator */
+ /* 0x87 is reserved */
+#define ICB_OP_READ_INIT 0x88 /* read initialization bytes */
+#define ICB_OP_READ_ID 0x89 /* read adapter's SCSI ID */
+#define ICB_OP_SET_UMASK 0x8A /* set unsolicited interrupt mask */
+#define ICB_OP_GET_UMASK 0x8B /* read unsolicited interrupt mask */
+#define ICB_OP_GET_REVISION 0x8C /* read firmware revision level */
+#define ICB_OP_DIAGNOSTICS 0x8D /* execute diagnostics */
+#define ICB_OP_SET_EPARMS 0x8E /* set execution parameters */
+#define ICB_OP_GET_EPARMS 0x8F /* read execution parameters */
+
+typedef struct icbRecvCmd {
+ unchar op;
+ unchar IDlun; /* Initiator SCSI ID/lun */
+ unchar len[3]; /* command buffer length */
+ unchar ptr[3]; /* command buffer address */
+ unchar rsvd[7]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbRecvCmd;
+
+typedef struct icbSendStat {
+ unchar op;
+ unchar IDlun; /* Target SCSI ID/lun */
+ unchar stat; /* (outgoing) completion status byte 1 */
+ unchar rsvd[12]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbSendStat;
+
+typedef struct icbRevLvl {
+ unchar op;
+ volatile unchar primary; /* primary revision level (returned) */
+ volatile unchar secondary; /* secondary revision level (returned) */
+ unchar rsvd[12]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbRevLvl;
+
+typedef struct icbUnsMask { /* I'm totally guessing here */
+ unchar op;
+ volatile unchar mask[14]; /* mask bits */
+#ifdef 0
+ unchar rsvd[12]; /* reserved */
+#endif
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbUnsMask;
+
+typedef struct icbDiag {
+ unchar op;
+ unchar type; /* diagnostics type code (0-3) */
+ unchar len[3]; /* buffer length */
+ unchar ptr[3]; /* buffer address */
+ unchar rsvd[7]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbDiag;
+
+#define ICB_DIAG_POWERUP 0 /* Power-up diags only */
+#define ICB_DIAG_WALKING 1 /* walking 1's pattern */
+#define ICB_DIAG_DMA 2 /* DMA - system memory diags */
+#define ICB_DIAG_FULL 3 /* do both 1 & 2 */
+
+typedef struct icbParms {
+ unchar op;
+ unchar rsvd1; /* reserved */
+ unchar len[3]; /* parms buffer length */
+ unchar ptr[3]; /* parms buffer address */
+ unchar idx[2]; /* index (MSB-LSB) */
+ unchar rsvd2[5]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbParms;
+
+typedef struct icbAny {
+ unchar op;
+ unchar data[14]; /* format-specific data */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbAny;
+
+typedef union icb {
+ unchar op; /* ICB opcode */
+ IcbRecvCmd recv_cmd; /* format for receive command */
+ IcbSendStat send_stat; /* format for send status */
+ IcbRevLvl rev_lvl; /* format for get revision level */
+ IcbDiag diag; /* format for execute diagnostics */
+ IcbParms eparms; /* format for get/set exec parms */
+ IcbAny icb; /* generic format */
+ unchar data[18];
+} Icb;
+
+
+/*
+ * Driver SCB structure pool.
+ *
+ * The SCBs declared here are shared by all host adapters; hence, this
+ * structure is not part of the Adapter structure.
+ */
+static Scb scbs[MAX_SCBS];
+static Scb *scbfree = NULL; /* free list */
+static int freescbs = MAX_SCBS; /* free list counter */
+
+/*
+ * END of data/declarations - code follows.
+ */
+
+
+#ifdef ANY2SCSI_INLINE
+/*
+ Since they're used a lot, I've redone the following from the macros
+ formerly in wd7000.h, hopefully to speed them up by getting rid of
+ all the shifting (it may not matter; GCC might have done as well anyway).
+
+ xany2scsi and xscsi2int were not being used, and are no longer defined.
+ (They were simply 4-byte versions of these routines).
+*/
+
+typedef union { /* let's cheat... */
+ int i;
+ unchar u[sizeof(int)]; /* the sizeof(int) makes it more portable */
+} i_u;
+
+
+static inline void any2scsi( unchar *scsi, int any )
+{
+ *scsi++ = ((i_u) any).u[2];
+ *scsi++ = ((i_u) any).u[1];
+ *scsi++ = ((i_u) any).u[0];
+}
+
+
+static inline int scsi2int( unchar *scsi )
+{
+ i_u result;
+
+ result.i = 0; /* clears unused bytes */
+ *(result.u+2) = *scsi++;
+ *(result.u+1) = *scsi++;
+ *(result.u) = *scsi++;
+ return result.i;
+}
+#else
+/*
+ These are the old ones - I've just moved them here...
+*/
+#undef any2scsi
+#define any2scsi(up, p) \
+(up)[0] = (((unsigned long)(p)) >> 16); \
+(up)[1] = ((unsigned long)(p)) >> 8; \
+(up)[2] = ((unsigned long)(p));
+
+#undef scsi2int
+#define scsi2int(up) ( (((unsigned long)*(up)) << 16) + \
+ (((unsigned long)(up)[1]) << 8) + ((unsigned long)(up)[2]) )
+#endif
+
+
+static inline void wd7000_enable_intr(Adapter *host)
+{
+ host->control |= INT_EN;
+ outb(host->control, host->iobase+ASC_CONTROL);
+}
+
+
+static inline void wd7000_enable_dma(Adapter *host)
+{
+ host->control |= DMA_EN;
+ outb(host->control,host->iobase+ASC_CONTROL);
+ set_dma_mode(host->dma, DMA_MODE_CASCADE);
+ enable_dma(host->dma);
+}
+
+
+#define WAITnexttimeout 200 /* 2 seconds */
+
+#define WAIT(port, mask, allof, noneof) \
+ { register volatile unsigned WAITbits; \
+ register unsigned long WAITtimeout = jiffies + WAITnexttimeout; \
+ while (1) { \
+ WAITbits = inb(port) & (mask); \
+ if ((WAITbits & (allof)) == (allof) && ((WAITbits & (noneof)) == 0)) \
+ break; \
+ if (jiffies > WAITtimeout) goto fail; \
+ } \
+ }
+
+
+static inline void delay( unsigned how_long )
+{
+ register unsigned long time = jiffies + how_long;
+
+ while (jiffies < time);
+}
+
+
+static inline int command_out(Adapter *host, unchar *cmd, int len)
+{
+ WAIT(host->iobase+ASC_STAT,ASC_STATMASK,CMD_RDY,0);
+ while (len--) {
+ do {
+ outb(*cmd, host->iobase+ASC_COMMAND);
+ WAIT(host->iobase+ASC_STAT, ASC_STATMASK, CMD_RDY, 0);
+ } while (inb(host->iobase+ASC_STAT) & CMD_REJ);
+ cmd++;
+ }
+ return 1;
+
+fail:
+ printk("wd7000 command_out: WAIT failed(%d)\n", len+1);
+ return 0;
+}
+
+
+/*
+ * This version of alloc_scbs is in preparation for supporting multiple
+ * commands per lun and command chaining, by queueing pending commands.
+ * We will need to allocate Scbs in blocks since they will wait to be
+ * executed so there is the possibility of deadlock otherwise.
+ * Also, to keep larger requests from being starved by smaller requests,
+ * we limit access to this routine with an internal busy flag, so that
+ * the satisfiability of a request is not dependent on the size of the
+ * request.
+ */
+static inline Scb *alloc_scbs(int needed)
+{
+ register Scb *scb, *p;
+ register unsigned long flags;
+ register unsigned long timeout = jiffies + WAITnexttimeout;
+ register unsigned long now;
+ static int busy = 0;
+ int i;
+
+ if (needed <= 0) return NULL; /* sanity check */
+
+ save_flags(flags);
+ cli();
+ while (busy) { /* someone else is allocating */
+ sti(); /* Yes this is really needed here */
+ now = jiffies; while (jiffies == now) /* wait a jiffy */;
+ cli();
+ }
+ busy = 1; /* not busy now; it's our turn */
+
+ while (freescbs < needed) {
+ timeout = jiffies + WAITnexttimeout;
+ do {
+ sti(); /* Yes this is really needed here */
+ now = jiffies; while (jiffies == now) /* wait a jiffy */;
+ cli();
+ } while (freescbs < needed && jiffies <= timeout);
+ /*
+ * If we get here with enough free Scbs, we can take them.
+ * Otherwise, we timed out and didn't get enough.
+ */
+ if (freescbs < needed) {
+ busy = 0;
+ panic("wd7000: can't get enough free SCBs.\n");
+ restore_flags(flags);
+ return NULL;
+ }
+ }
+ scb = scbfree; freescbs -= needed;
+ for (i = 0; i < needed; i++) { p = scbfree; scbfree = p->next; }
+ p->next = NULL;
+
+ busy = 0; /* we're done */
+
+ restore_flags(flags);
+
+ return scb;
+}
+
+
+static inline void free_scb( Scb *scb )
+{
+ register unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ memset(scb, 0, sizeof(Scb));
+ scb->next = scbfree; scbfree = scb;
+ freescbs++;
+
+ restore_flags(flags);
+}
+
+
+static inline void init_scbs(void)
+{
+ int i;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ scbfree = &(scbs[0]);
+ memset(scbs, 0, sizeof(scbs));
+ for (i = 0; i < MAX_SCBS-1; i++) {
+ scbs[i].next = &(scbs[i+1]); scbs[i].SCpnt = NULL;
+ }
+ scbs[MAX_SCBS-1].next = NULL;
+ scbs[MAX_SCBS-1].SCpnt = NULL;
+
+ restore_flags(flags);
+}
+
+
+static int mail_out( Adapter *host, Scb *scbptr )
+/*
+ * Note: this can also be used for ICBs; just cast to the parm type.
+ */
+{
+ register int i, ogmb;
+ register unsigned long flags;
+ unchar start_ogmb;
+ Mailbox *ogmbs = host->mb.ogmb;
+ int *next_ogmb = &(host->next_ogmb);
+#ifdef DEBUG
+ printk("wd7000 mail_out: %06x",(unsigned int) scbptr);
+#endif
+ /* We first look for a free outgoing mailbox */
+ save_flags(flags);
+ cli();
+ ogmb = *next_ogmb;
+ for (i = 0; i < OGMB_CNT; i++) {
+ if (ogmbs[ogmb].status == 0) {
+#ifdef DEBUG
+ printk(" using OGMB %x",ogmb);
+#endif
+ ogmbs[ogmb].status = 1;
+ any2scsi((unchar *) ogmbs[ogmb].scbptr, (int) scbptr);
+
+ *next_ogmb = (ogmb+1) % OGMB_CNT;
+ break;
+ } else
+ ogmb = (++ogmb) % OGMB_CNT;
+ }
+ restore_flags(flags);
+#ifdef DEBUG
+ printk(", scb is %x",(unsigned int) scbptr);
+#endif
+ if (i >= OGMB_CNT) {
+ /*
+ * Alternatively, we might issue the "interrupt on free OGMB",
+ * and sleep, but it must be ensured that it isn't the init
+ * task running. Instead, this version assumes that the caller
+ * will be persistent, and try again. Since it's the adapter
+ * that marks OGMB's free, waiting even with interrupts off
+ * should work, since they are freed very quickly in most cases.
+ */
+ #ifdef DEBUG
+ printk(", no free OGMBs.\n");
+#endif
+ return 0;
+ }
+
+ wd7000_enable_intr(host);
+
+ start_ogmb = START_OGMB | ogmb;
+ command_out( host, &start_ogmb, 1 );
+#ifdef DEBUG
+ printk(", awaiting interrupt.\n");
+#endif
+ return 1;
+}
+
+
+int make_code(unsigned hosterr, unsigned scsierr)
+{
+#ifdef DEBUG
+ int in_error = hosterr;
+#endif
+
+ switch ((hosterr>>8)&0xff){
+ case 0: /* Reserved */
+ hosterr = DID_ERROR;
+ break;
+ case 1: /* Command Complete, no errors */
+ hosterr = DID_OK;
+ break;
+ case 2: /* Command complete, error logged in scb status (scsierr) */
+ hosterr = DID_OK;
+ break;
+ case 4: /* Command failed to complete - timeout */
+ hosterr = DID_TIME_OUT;
+ break;
+ case 5: /* Command terminated; Bus reset by external device */
+ hosterr = DID_RESET;
+ break;
+ case 6: /* Unexpected Command Received w/ host as target */
+ hosterr = DID_BAD_TARGET;
+ break;
+ case 80: /* Unexpected Reselection */
+ case 81: /* Unexpected Selection */
+ hosterr = DID_BAD_INTR;
+ break;
+ case 82: /* Abort Command Message */
+ hosterr = DID_ABORT;
+ break;
+ case 83: /* SCSI Bus Software Reset */
+ case 84: /* SCSI Bus Hardware Reset */
+ hosterr = DID_RESET;
+ break;
+ default: /* Reserved */
+ hosterr = DID_ERROR;
+ break;
+ }
+#ifdef DEBUG
+ if (scsierr||hosterr)
+ printk("\nSCSI command error: SCSI %02x host %04x return %d",
+ scsierr,in_error,hosterr);
+#endif
+ return scsierr | (hosterr << 16);
+}
+
+
+static void wd7000_scsi_done(Scsi_Cmnd * SCpnt)
+{
+#ifdef DEBUG
+ printk("wd7000_scsi_done: %06x\n",(unsigned int) SCpnt);
+#endif
+ SCpnt->SCp.phase = 0;
+}
+
+
+#define wd7000_intr_ack(host) outb(0,host->iobase+ASC_INTR_ACK)
+
+void wd7000_intr_handle(int irq, struct pt_regs * regs)
+{
+ register int flag, icmb, errstatus, icmb_status;
+ register int host_error, scsi_error;
+ register Scb *scb; /* for SCSI commands */
+ register IcbAny *icb; /* for host commands */
+ register Scsi_Cmnd *SCpnt;
+ Adapter *host = irq2host[irq]; /* This MUST be set!!! */
+ Mailbox *icmbs = host->mb.icmb;
+
+#ifdef DEBUG
+ printk("wd7000_intr_handle: irq = %d, host = %06x\n", irq, host);
+#endif
+
+ flag = inb(host->iobase+ASC_INTR_STAT);
+#ifdef DEBUG
+ printk("wd7000_intr_handle: intr stat = %02x\n",flag);
+#endif
+
+ if (!(inb(host->iobase+ASC_STAT) & INT_IM)) {
+ /* NB: these are _very_ possible if IRQ 15 is being used, since
+ it's the "garbage collector" on the 2nd 8259 PIC. Specifically,
+ any interrupt signal into the 8259 which can't be identified
+ comes out as 7 from the 8259, which is 15 to the host. Thus, it
+ is a good thing the WD7000 has an interrupt status port, so we
+ can sort these out. Otherwise, electrical noise and other such
+ problems would be indistinguishable from valid interrupts...
+ */
+#ifdef DEBUG
+ printk("wd7000_intr_handle: phantom interrupt...\n");
+#endif
+ wd7000_intr_ack(host);
+ return;
+ }
+
+ if (flag & MB_INTR) {
+ /* The interrupt is for a mailbox */
+ if (!(flag & IMB_INTR)) {
+#ifdef DEBUG
+ printk("wd7000_intr_handle: free outgoing mailbox");
+#endif
+ /*
+ * If sleep_on() and the "interrupt on free OGMB" command are
+ * used in mail_out(), wake_up() should correspondingly be called
+ * here. For now, we don't need to do anything special.
+ */
+ wd7000_intr_ack(host);
+ return;
+ } else {
+ /* The interrupt is for an incoming mailbox */
+ icmb = flag & MB_MASK;
+ icmb_status = icmbs[icmb].status;
+ if (icmb_status & 0x80) { /* unsolicited - result in ICMB */
+#ifdef DEBUG
+ printk("wd7000_intr_handle: unsolicited interrupt %02xh\n",
+ icmb_status);
+#endif
+ wd7000_intr_ack(host);
+ return;
+ }
+ scb = (struct scb *) scsi2int((unchar *)icmbs[icmb].scbptr);
+ icmbs[icmb].status = 0;
+ if (!(scb->op & ICB_OP_MASK)) { /* an SCB is done */
+ SCpnt = scb->SCpnt;
+ if (--(SCpnt->SCp.phase) <= 0) { /* all scbs are done */
+ host_error = scb->vue | (icmb_status << 8);
+ scsi_error = scb->status;
+ errstatus = make_code(host_error,scsi_error);
+ SCpnt->result = errstatus;
+
+ free_scb(scb);
+
+ SCpnt->scsi_done(SCpnt);
+ }
+ } else { /* an ICB is done */
+ icb = (IcbAny *) scb;
+ icb->status = icmb_status;
+ icb->phase = 0;
+ }
+ } /* incoming mailbox */
+ }
+
+ wd7000_intr_ack(host);
+ return;
+}
+
+
+int wd7000_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ register Scb *scb;
+ register Sgb *sgb;
+ register unchar *cdb = (unchar *) SCpnt->cmnd;
+ register unchar idlun;
+ register short cdblen;
+ Adapter *host = (Adapter *) SCpnt->host->hostdata;
+
+ cdblen = SCpnt->cmd_len;
+ idlun = ((SCpnt->target << 5) & 0xe0) | (SCpnt->lun & 7);
+ SCpnt->scsi_done = done;
+ SCpnt->SCp.phase = 1;
+ scb = alloc_scbs(1);
+ scb->idlun = idlun;
+ memcpy(scb->cdb, cdb, cdblen);
+ scb->direc = 0x40; /* Disable direction check */
+
+ scb->SCpnt = SCpnt; /* so we can find stuff later */
+ SCpnt->host_scribble = (unchar *) scb;
+ scb->host = host;
+
+ if (SCpnt->use_sg) {
+ struct scatterlist *sg = (struct scatterlist *) SCpnt->request_buffer;
+ unsigned i;
+
+ if (SCpnt->host->sg_tablesize == SG_NONE) {
+ panic("wd7000_queuecommand: scatter/gather not supported.\n");
+ }
+#ifdef DEBUG
+ printk("Using scatter/gather with %d elements.\n",SCpnt->use_sg);
+#endif
+
+ sgb = scb->sgb;
+ scb->op = 1;
+ any2scsi(scb->dataptr, (int) sgb);
+ any2scsi(scb->maxlen, SCpnt->use_sg * sizeof (Sgb) );
+
+ for (i = 0; i < SCpnt->use_sg; i++) {
+ any2scsi(sgb[i].ptr, (int) sg[i].address);
+ any2scsi(sgb[i].len, sg[i].length);
+ }
+ } else {
+ scb->op = 0;
+ any2scsi(scb->dataptr, (int) SCpnt->request_buffer);
+ any2scsi(scb->maxlen, SCpnt->request_bufflen);
+ }
+ while (!mail_out(host, scb)) /* keep trying */;
+
+ return 1;
+}
+
+
+int wd7000_command(Scsi_Cmnd *SCpnt)
+{
+ wd7000_queuecommand(SCpnt, wd7000_scsi_done);
+
+ while (SCpnt->SCp.phase > 0) barrier(); /* phase counts scbs down to 0 */
+
+ return SCpnt->result;
+}
+
+
+int wd7000_diagnostics( Adapter *host, int code )
+{
+ static IcbDiag icb = {ICB_OP_DIAGNOSTICS};
+ static unchar buf[256];
+ unsigned long timeout;
+
+ icb.type = code;
+ any2scsi(icb.len, sizeof(buf));
+ any2scsi(icb.ptr, (int) &buf);
+ icb.phase = 1;
+ /*
+ * This routine is only called at init, so there should be OGMBs
+ * available. I'm assuming so here. If this is going to
+ * fail, I can just let the timeout catch the failure.
+ */
+ mail_out(host, (struct scb *) &icb);
+ timeout = jiffies + WAITnexttimeout; /* wait up to 2 seconds */
+ while (icb.phase && jiffies < timeout)
+ barrier(); /* wait for completion */
+
+ if (icb.phase) {
+ printk("wd7000_diagnostics: timed out.\n");
+ return 0;
+ }
+ if (make_code(icb.vue|(icb.status << 8),0)) {
+ printk("wd7000_diagnostics: failed (%02x,%02x)\n",
+ icb.vue, icb.status);
+ return 0;
+ }
+
+ return 1;
+}
+
+
+int wd7000_init( Adapter *host )
+{
+ InitCmd init_cmd = {
+ INITIALIZATION, 7, BUS_ON, BUS_OFF, 0, {0,0,0}, OGMB_CNT, ICMB_CNT
+ };
+ int diag;
+
+ /*
+ Reset the adapter - only. The SCSI bus was initialized at power-up,
+ and we need to do this just so we control the mailboxes, etc.
+ */
+ outb(ASC_RES, host->iobase+ASC_CONTROL);
+ delay(1); /* reset pulse: this is 10ms, only need 25us */
+ outb(0,host->iobase+ASC_CONTROL);
+ host->control = 0; /* this must always shadow ASC_CONTROL */
+ WAIT(host->iobase+ASC_STAT, ASC_STATMASK, CMD_RDY, 0);
+
+ if ((diag = inb(host->iobase+ASC_INTR_STAT)) != 1) {
+ printk("wd7000_init: ");
+ switch (diag) {
+ case 2:
+ printk("RAM failure.\n");
+ break;
+ case 3:
+ printk("FIFO R/W failed\n");
+ break;
+ case 4:
+ printk("SBIC register R/W failed\n");
+ break;
+ case 5:
+ printk("Initialization D-FF failed.\n");
+ break;
+ case 6:
+ printk("Host IRQ D-FF failed.\n");
+ break;
+ case 7:
+ printk("ROM checksum error.\n");
+ break;
+ default:
+ printk("diagnostic code %02Xh received.\n", diag);
+ break;
+ }
+ return 0;
+ }
+
+ /* Clear mailboxes */
+ memset(&(host->mb), 0, sizeof(host->mb));
+
+ /* Execute init command */
+ any2scsi((unchar *) &(init_cmd.mailboxes), (int) &(host->mb));
+ if (!command_out(host, (unchar *) &init_cmd, sizeof(init_cmd))) {
+ printk("wd7000_init: adapter initialization failed.\n");
+ return 0;
+ }
+ WAIT(host->iobase+ASC_STAT, ASC_STATMASK, ASC_INIT, 0);
+
+ if (request_irq(host->irq, wd7000_intr_handle, SA_INTERRUPT, "wd7000")) {
+ printk("wd7000_init: can't get IRQ %d.\n", host->irq);
+ return 0;
+ }
+ if (request_dma(host->dma,"wd7000")) {
+ printk("wd7000_init: can't get DMA channel %d.\n", host->dma);
+ free_irq(host->irq);
+ return 0;
+ }
+ wd7000_enable_dma(host);
+ wd7000_enable_intr(host);
+
+ if (!wd7000_diagnostics(host,ICB_DIAG_FULL)) {
+ free_dma(host->dma);
+ free_irq(host->irq);
+ return 0;
+ }
+
+ return 1;
+
+ fail:
+ printk("wd7000_init: WAIT timed out.\n");
+ return 0; /* 0 = not ok */
+}
+
+
+void wd7000_revision(Adapter *host)
+{
+ static IcbRevLvl icb = {ICB_OP_GET_REVISION};
+
+ icb.phase = 1;
+ /*
+ * Like diagnostics, this is only done at init time, in fact, from
+ * wd7000_detect, so there should be OGMBs available. If it fails,
+ * the only damage will be that the revision will show up as 0.0,
+ * which in turn means that scatter/gather will be disabled.
+ */
+ mail_out(host, (struct scb *) &icb);
+ while (icb.phase)
+ barrier(); /* wait for completion */
+ host->rev1 = icb.primary;
+ host->rev2 = icb.secondary;
+}
+
+
+int wd7000_detect(Scsi_Host_Template * tpnt)
+/*
+ * Returns the number of adapters this driver is supporting.
+ *
+ * The source for hosts.c says to wait to call scsi_register until 100%
+ * sure about an adapter. We need to do it a little sooner here; we
+ * need the storage set up by scsi_register before wd7000_init, and
+ * changing the location of an Adapter structure is more trouble than
+ * calling scsi_unregister.
+ *
+ */
+{
+ int i,j, present = 0;
+ const Config *cfg;
+ const Signature *sig;
+ Adapter *host = NULL;
+ struct Scsi_Host *sh;
+
+ tpnt->proc_dir = &proc_scsi_wd7000;
+
+ /* Set up SCB free list, which is shared by all adapters */
+ init_scbs();
+
+ cfg = configs;
+ for (i = 0; i < NUM_CONFIGS; i++) {
+ sig = signatures;
+ for (j = 0; j < NUM_SIGNATURES; j++) {
+ if (!memcmp(cfg->bios+sig->ofs, sig->sig, sig->len)) {
+ /* matched this one */
+#ifdef DEBUG
+ printk("WD-7000 SST BIOS detected at %04X: checking...\n",
+ (int) cfg->bios);
+#endif
+ /*
+ * We won't explicitly test the configuration (in this
+ * version); instead, we'll just see if it works to
+ * setup the adapter; if it does, we'll use it.
+ */
+ if (check_region(cfg->iobase, 4)) { /* ports in use */
+ printk("IO %xh already in use.\n", host->iobase);
+ continue;
+ }
+ /*
+ * We register here, to get a pointer to the extra space,
+ * which we'll use as the Adapter structure (host) for
+ * this adapter. It is located just after the registered
+ * Scsi_Host structure (sh), and is located by the empty
+ * array hostdata.
+ */
+ sh = scsi_register(tpnt, sizeof(Adapter) );
+ host = (Adapter *) sh->hostdata;
+#ifdef DEBUG
+ printk("wd7000_detect: adapter allocated at %06x\n",
+ (int)host);
+#endif
+ memset( host, 0, sizeof(Adapter) );
+ host->sh = sh;
+ host->irq = cfg->irq;
+ host->iobase = cfg->iobase;
+ host->dma = cfg->dma;
+ irq2host[host->irq] = host;
+
+ if (!wd7000_init(host)) { /* Initialization failed */
+ scsi_unregister (sh);
+ continue;
+ }
+
+ /*
+ * OK from here - we'll use this adapter/configuration.
+ */
+ wd7000_revision(host); /* important for scatter/gather */
+
+ printk("Western Digital WD-7000 (%d.%d) ",
+ host->rev1, host->rev2);
+ printk("using IO %xh IRQ %d DMA %d.\n",
+ host->iobase, host->irq, host->dma);
+
+ request_region(host->iobase, 4,"wd7000"); /* Register our ports */
+ /*
+ * For boards before rev 6.0, scatter/gather isn't supported.
+ */
+ if (host->rev1 < 6) sh->sg_tablesize = SG_NONE;
+
+ present++; /* count it */
+ break; /* don't try any more sigs */
+ }
+ sig++; /* try next signature with this configuration */
+ }
+ cfg++; /* try next configuration */
+ }
+
+ return present;
+}
+
+
+/*
+ * I have absolutely NO idea how to do an abort with the WD7000...
+ */
+int wd7000_abort(Scsi_Cmnd * SCpnt)
+{
+ Adapter *host = (Adapter *) SCpnt->host->hostdata;
+
+ if (inb(host->iobase+ASC_STAT) & INT_IM) {
+ printk("wd7000_abort: lost interrupt\n");
+ wd7000_intr_handle(host->irq, NULL);
+ return SCSI_ABORT_SUCCESS;
+ }
+
+ return SCSI_ABORT_SNOOZE;
+}
+
+
+/*
+ * I also have no idea how to do a reset...
+ */
+int wd7000_reset(Scsi_Cmnd * SCpnt)
+{
+ return SCSI_RESET_PUNT;
+}
+
+
+/*
+ * This was borrowed directly from aha1542.c, but my disks are organized
+ * this way, so I think it will work OK. Someone who is ambitious can
+ * borrow a newer or more complete version from another driver.
+ */
+int wd7000_biosparam(Disk * disk, kdev_t dev, int* ip)
+{
+ int size = disk->capacity;
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+/* if (ip[2] >= 1024) ip[2] = 1024; */
+ return 0;
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = WD7000;
+
+#include "scsi_module.c"
+#endif
diff --git a/i386/i386at/gpl/linux/scsi/wd7000.h b/i386/i386at/gpl/linux/scsi/wd7000.h
new file mode 100644
index 00000000..5a194dbc
--- /dev/null
+++ b/i386/i386at/gpl/linux/scsi/wd7000.h
@@ -0,0 +1,55 @@
+#ifndef _WD7000_H
+
+/* $Id: wd7000.h,v 1.1.1.1 1997/02/25 21:27:53 thomas Exp $
+ *
+ * Header file for the WD-7000 driver for Linux
+ *
+ * John Boyd <boyd@cis.ohio-state.edu> Jan 1994:
+ * This file has been reduced to only the definitions needed for the
+ * WD7000 host structure.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+
+int wd7000_detect(Scsi_Host_Template *);
+int wd7000_command(Scsi_Cmnd *);
+int wd7000_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int wd7000_abort(Scsi_Cmnd *);
+int wd7000_reset(Scsi_Cmnd *);
+int wd7000_biosparam(Disk *, kdev_t, int *);
+
+#ifndef NULL
+#define NULL 0L
+#endif
+
+/*
+ * In this version, sg_tablesize now defaults to WD7000_SG, and will
+ * be set to SG_NONE for older boards. This is the reverse of the
+ * previous default, and was changed so that the driver-level
+ * Scsi_Host_Template would reflect the driver's support for scatter/
+ * gather.
+ *
+ * Also, it has been reported that boards at Revision 6 support scatter/
+ * gather, so the new definition of an "older" board has been changed
+ * accordingly.
+ */
+#define WD7000_Q 16
+#define WD7000_SG 16
+
+#define WD7000 { NULL, NULL, \
+ NULL, \
+ NULL, \
+ "Western Digital WD-7000", \
+ wd7000_detect, \
+ NULL, \
+ NULL, \
+ wd7000_command, \
+ wd7000_queuecommand, \
+ wd7000_abort, \
+ wd7000_reset, \
+ NULL, \
+ wd7000_biosparam, \
+ WD7000_Q, 7, WD7000_SG, 1, 0, 1, ENABLE_CLUSTERING}
+#endif
diff --git a/i386/i386at/i386at_ds_routines.c b/i386/i386at/i386at_ds_routines.c
new file mode 100644
index 00000000..b1375afd
--- /dev/null
+++ b/i386/i386at/i386at_ds_routines.c
@@ -0,0 +1,270 @@
+/*
+ * Mach device server routines (i386at version).
+ *
+ * Copyright (c) 1996 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/mig_errors.h>
+#include <mach/port.h>
+#include <mach/notify.h>
+
+#include <device/device_types.h>
+#include <device/device_port.h>
+#include "device_interface.h"
+
+#include <i386at/dev_hdr.h>
+#include <i386at/device_emul.h>
+
+extern struct device_emulation_ops mach_device_emulation_ops;
+#ifdef LINUX_DEV
+extern struct device_emulation_ops linux_block_emulation_ops;
+extern struct device_emulation_ops linux_net_emulation_ops;
+#endif
+
+/* List of emulations. */
+static struct device_emulation_ops *emulation_list[] =
+{
+#ifdef LINUX_DEV
+ &linux_block_emulation_ops,
+ &linux_net_emulation_ops,
+#endif
+ &mach_device_emulation_ops,
+};
+
+#define NUM_EMULATION (sizeof (emulation_list) / sizeof (emulation_list[0]))
+
+io_return_t
+ds_device_open (ipc_port_t open_port, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ char *name, device_t *devp)
+{
+ int i;
+ device_t dev;
+ io_return_t err;
+
+ /* Open must be called on the master device port. */
+ if (open_port != master_device_port)
+ return D_INVALID_OPERATION;
+
+ /* There must be a reply port. */
+ if (! IP_VALID (reply_port))
+ {
+ printf ("ds_* invalid reply port\n");
+ Debugger ("ds_* reply_port");
+ return MIG_NO_REPLY;
+ }
+
+ /* Call each emulation's open routine to find the device. */
+ for (i = 0; i < NUM_EMULATION; i++)
+ {
+ err = (*emulation_list[i]->open) (reply_port, reply_port_type,
+ mode, name, devp);
+ if (err != D_NO_SUCH_DEVICE)
+ break;
+ }
+
+ return err;
+}
+
+io_return_t
+ds_device_close (device_t dev)
+{
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+ return (dev->emul_ops->close
+ ? (*dev->emul_ops->close) (dev->emul_data)
+ : D_SUCCESS);
+}
+
+io_return_t
+ds_device_write (device_t dev, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t recnum, io_buf_ptr_t data, unsigned int count,
+ int *bytes_written)
+{
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+ if (! data)
+ return D_INVALID_SIZE;
+ if (! dev->emul_ops->write)
+ return D_INVALID_OPERATION;
+ return (*dev->emul_ops->write) (dev->emul_data, reply_port,
+ reply_port_type, mode, recnum,
+ data, count, bytes_written);
+}
+
+io_return_t
+ds_device_write_inband (device_t dev, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode, recnum_t recnum,
+ io_buf_ptr_inband_t data, unsigned count,
+ int *bytes_written)
+{
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+ if (! data)
+ return D_INVALID_SIZE;
+ if (! dev->emul_ops->write_inband)
+ return D_INVALID_OPERATION;
+ return (*dev->emul_ops->write_inband) (dev->emul_data, reply_port,
+ reply_port_type, mode, recnum,
+ data, count, bytes_written);
+}
+
+io_return_t
+ds_device_read (device_t dev, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t recnum, int count, io_buf_ptr_t *data,
+ unsigned *bytes_read)
+{
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+ if (! dev->emul_ops->read)
+ return D_INVALID_OPERATION;
+ return (*dev->emul_ops->read) (dev->emul_data, reply_port,
+ reply_port_type, mode, recnum,
+ count, data, bytes_read);
+}
+
+io_return_t
+ds_device_read_inband (device_t dev, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t recnum, int count, char *data,
+ unsigned *bytes_read)
+{
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+ if (! dev->emul_ops->read_inband)
+ return D_INVALID_OPERATION;
+ return (*dev->emul_ops->read_inband) (dev->emul_data, reply_port,
+ reply_port_type, mode, recnum,
+ count, data, bytes_read);
+}
+
+io_return_t
+ds_device_set_status (device_t dev, dev_flavor_t flavor,
+ dev_status_t status, mach_msg_type_number_t status_count)
+{
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+ if (! dev->emul_ops->set_status)
+ return D_INVALID_OPERATION;
+
+ return (*dev->emul_ops->set_status) (dev->emul_data, flavor, status,
+ status_count);
+}
+
+io_return_t
+ds_device_get_status (device_t dev, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t *status_count)
+{
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+ if (! dev->emul_ops->get_status)
+ return D_INVALID_OPERATION;
+
+ return (*dev->emul_ops->get_status) (dev->emul_data, flavor, status,
+ status_count);
+}
+
+io_return_t
+ds_device_set_filter (device_t dev, ipc_port_t receive_port, int priority,
+ filter_t *filter, unsigned filter_count)
+{
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+ if (! dev->emul_ops->set_filter)
+ return D_INVALID_OPERATION;
+ return (*dev->emul_ops->set_filter) (dev->emul_data, receive_port,
+ priority, filter, filter_count);
+}
+
+io_return_t
+ds_device_map (device_t dev, vm_prot_t prot, vm_offset_t offset,
+ vm_size_t size, ipc_port_t *pager, boolean_t unmap)
+{
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+ if (! dev->emul_ops->map)
+ return D_INVALID_OPERATION;
+ return (*dev->emul_ops->map) (dev->emul_data, prot,
+ offset, size, pager, unmap);
+}
+
+boolean_t
+ds_notify (mach_msg_header_t *msg)
+{
+ if (msg->msgh_id == MACH_NOTIFY_NO_SENDERS)
+ {
+ device_t dev;
+ mach_no_senders_notification_t *ns;
+
+ ns = (mach_no_senders_notification_t *) msg;
+ dev = (device_t) ns->not_header.msgh_remote_port;
+ if (dev->emul_ops->no_senders)
+ (*dev->emul_ops->no_senders) (ns);
+ return TRUE;
+ }
+
+ printf ("ds_notify: strange notification %d\n", msg->msgh_id);
+ return FALSE;
+}
+
+io_return_t
+ds_device_write_trap (device_t dev, dev_mode_t mode,
+ recnum_t recnum, vm_offset_t data, vm_size_t count)
+{
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+ if (! dev->emul_ops->write_trap)
+ return D_INVALID_OPERATION;
+ return (*dev->emul_ops->write_trap) (dev->emul_data,
+ mode, recnum, data, count);
+}
+
+io_return_t
+ds_device_writev_trap (device_t dev, dev_mode_t mode,
+ recnum_t recnum, io_buf_vec_t *iovec, vm_size_t count)
+{
+ if (dev == DEVICE_NULL)
+ return D_NO_SUCH_DEVICE;
+ if (! dev->emul_ops->writev_trap)
+ return D_INVALID_OPERATION;
+ return (*dev->emul_ops->writev_trap) (dev->emul_data,
+ mode, recnum, iovec, count);
+}
+
+void
+device_reference (device_t dev)
+{
+ if (dev->emul_ops->reference)
+ (*dev->emul_ops->reference) (dev->emul_data);
+}
+
+void
+device_deallocate (device_t dev)
+{
+ if (dev->emul_ops->dealloc)
+ (*dev->emul_ops->dealloc) (dev->emul_data);
+}
diff --git a/i386/i386at/i8250.h b/i386/i386at/i8250.h
new file mode 100644
index 00000000..fa81173e
--- /dev/null
+++ b/i386/i386at/i8250.h
@@ -0,0 +1,129 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * Header file for i8250 chip
+ */
+
+/* port offsets from the base i/o address */
+
+#define RDAT 0
+#define RIE 1
+#define RID 2
+#define RFC 2
+#define RLC 3
+#define RMC 4
+#define RLS 5
+#define RMS 6
+#define RDLSB 0
+#define RDMSB 1
+
+/* interrupt control register */
+
+#define IERD 0x01 /* read int */
+#define IETX 0x02 /* xmit int */
+#define IELS 0x04 /* line status int */
+#define IEMS 0x08 /* modem int */
+
+/* interrupt status register */
+
+#define IDIP 0x01 /* not interrupt pending */
+#define IDMS 0x00 /* modem int */
+#define IDTX 0x02 /* xmit int */
+#define IDRD 0x04 /* read int */
+#define IDLS 0x06 /* line status int */
+#define IDMASK 0x0f /* interrupt ID mask */
+
+/* line control register */
+
+#define LC5 0x00 /* word length 5 */
+#define LC6 0x01 /* word length 6 */
+#define LC7 0x02 /* word length 7 */
+#define LC8 0x03 /* word length 8 */
+#define LCSTB 0x04 /* 2 stop */
+#define LCPEN 0x08 /* parity enable */
+#define LCEPS 0x10 /* even parity select */
+#define LCSP 0x20 /* stick parity */
+#define LCBRK 0x40 /* send break */
+#define LCDLAB 0x80 /* divisor latch access bit */
+#define LCPAR 0x38 /* parity mask */
+
+/* line status register */
+
+#define LSDR 0x01 /* data ready */
+#define LSOR 0x02 /* overrun error */
+#define LSPE 0x04 /* parity error */
+#define LSFE 0x08 /* framing error */
+#define LSBI 0x10 /* break interrupt */
+#define LSTHRE 0x20 /* xmit holding reg empty */
+#define LSTSRE 0x40 /* xmit shift reg empty */
+
+/* modem control register */
+
+#define MCDTR 0x01 /* DTR */
+#define MCRTS 0x02 /* RTS */
+#define MCOUT1 0x04 /* OUT1 */
+#define MCOUT2 0x08 /* OUT2 */
+#define MCLOOP 0x10 /* loopback */
+
+/* modem status register */
+
+#define MSDCTS 0x01 /* delta CTS */
+#define MSDDSR 0x02 /* delta DSR */
+#define MSTERI 0x04 /* delta RE */
+#define MSDRLSD 0x08 /* delta CD */
+#define MSCTS 0x10 /* CTS */
+#define MSDSR 0x20 /* DSR */
+#define MSRI 0x40 /* RE */
+#define MSRLSD 0x80 /* CD */
+
+/* divisor latch register settings for various baud rates */
+
+#define BCNT1200 0x60
+#define BCNT2400 0x30
+#define BCNT4800 0x18
+#define BCNT9600 0x0c
diff --git a/i386/i386at/i82586.h b/i386/i386at/i82586.h
new file mode 100644
index 00000000..fd205897
--- /dev/null
+++ b/i386/i386at/i82586.h
@@ -0,0 +1,264 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * Defines for managing the status word of the 82586 cpu. For details see
+ * the Intel LAN Component User's Manual starting at p. 2-14.
+ *
+ */
+
+#define SCB_SW_INT 0xf000
+#define SCB_SW_CX 0x8000 /* CU finished w/ int. bit set */
+#define SCB_SW_FR 0x4000 /* RU finished receiving a frame */
+#define SCB_SW_CNA 0x2000 /* CU left active state */
+#define SCB_SW_RNR 0x1000 /* RU left ready state */
+
+/*
+ * Defines for managing the Command Unit Status portion of the 82586
+ * System Control Block.
+ *
+ */
+
+#define SCB_CUS_IDLE 0x0000
+#define SCB_CUS_SUSPND 0x0100
+#define SCB_CUS_ACTV 0x0200
+
+/*
+ * Defines for managing the Receive Unit Status portion of the System
+ * Control Block.
+ *
+ */
+
+#define SCB_RUS_IDLE 0x0000
+#define SCB_RUS_SUSPND 0x0010
+#define SCB_RUS_NORESRC 0x0020
+#define SCB_RUS_READY 0x0040
+
+/*
+ * Defines that manage portions of the Command Word in the System Control
+ * Block of the 82586. Below are the Interrupt Acknowledge Bits and their
+ * appropriate masks.
+ *
+ */
+
+#define SCB_ACK_CX 0x8000
+#define SCB_ACK_FR 0x4000
+#define SCB_ACK_CNA 0x2000
+#define SCB_ACK_RNR 0x1000
+
+/*
+ * Defines for managing the Command Unit Control word, and the Receive
+ * Unit Control word. The software RESET bit is also defined.
+ *
+ */
+
+#define SCB_CU_STRT 0x0100
+#define SCB_CU_RSUM 0x0200
+#define SCB_CU_SUSPND 0x0300
+#define SCB_CU_ABRT 0x0400
+
+#define SCB_RESET 0x0080
+
+#define SCB_RU_STRT 0x0010
+#define SCB_RU_RSUM 0x0020
+#define SCB_RU_SUSPND 0x0030
+#define SCB_RU_ABRT 0x0040
+
+
+/*
+ * The following define Action Commands for the 82586 chip.
+ *
+ */
+
+#define AC_NOP 0x00
+#define AC_IASETUP 0x01
+#define AC_CONFIGURE 0x02
+#define AC_MCSETUP 0x03
+#define AC_TRANSMIT 0x04
+#define AC_TDR 0x05
+#define AC_DUMP 0x06
+#define AC_DIAGNOSE 0x07
+
+
+/*
+ * Defines for General Format for Action Commands, both Status Words, and
+ * Command Words.
+ *
+ */
+
+#define AC_SW_C 0x8000
+#define AC_SW_B 0x4000
+#define AC_SW_OK 0x2000
+#define AC_SW_A 0x1000
+#define TC_CARRIER 0x0400
+#define TC_CLS 0x0200
+#define TC_DMA 0x0100
+#define TC_DEFER 0x0080
+#define TC_SQE 0x0040
+#define TC_COLLISION 0x0020
+#define AC_CW_EL 0x8000
+#define AC_CW_S 0x4000
+#define AC_CW_I 0x2000
+
+/*
+ * Specific defines for the transmit action command.
+ *
+ */
+
+#define TBD_SW_EOF 0x8000
+#define TBD_SW_COUNT 0x3fff
+
+/*
+ * Specific defines for the receive frame actions.
+ *
+ */
+
+#define RBD_SW_EOF 0x8000
+#define RBD_SW_COUNT 0x3fff
+
+#define RFD_DONE 0x8000
+#define RFD_BUSY 0x4000
+#define RFD_OK 0x2000
+#define RFD_CRC 0x0800
+#define RFD_ALN 0x0400
+#define RFD_RSC 0x0200
+#define RFD_DMA 0x0100
+#define RFD_SHORT 0x0080
+#define RFD_EOF 0x0040
+#define RFD_EL 0x8000
+#define RFD_SUSP 0x4000
+/*
+ * 82586 chip specific structure definitions. For details, see the Intel
+ * LAN Components manual.
+ *
+ */
+
+
+typedef struct {
+ u_short scp_sysbus;
+ u_short scp_unused[2];
+ u_short scp_iscp;
+ u_short scp_iscp_base;
+} scp_t;
+
+
+typedef struct {
+ u_short iscp_busy;
+ u_short iscp_scb_offset;
+ u_short iscp_scb;
+ u_short iscp_scb_base;
+} iscp_t;
+
+
+typedef struct {
+ u_short scb_status;
+ u_short scb_command;
+ u_short scb_cbl_offset;
+ u_short scb_rfa_offset;
+ u_short scb_crcerrs;
+ u_short scb_alnerrs;
+ u_short scb_rscerrs;
+ u_short scb_ovrnerrs;
+} scb_t;
+
+
+typedef struct {
+ u_short tbd_offset;
+ u_char dest_addr[6];
+ u_short length;
+} transmit_t;
+
+
+typedef struct {
+ u_short fifolim_bytecnt;
+ u_short addrlen_mode;
+ u_short linprio_interframe;
+ u_short slot_time;
+ u_short hardware;
+ u_short min_frame_len;
+} configure_t;
+
+
+typedef struct {
+ u_short ac_status;
+ u_short ac_command;
+ u_short ac_link_offset;
+ union {
+ transmit_t transmit;
+ configure_t configure;
+ u_char iasetup[6];
+ } cmd;
+} ac_t;
+
+
+typedef struct {
+ u_short act_count;
+ u_short next_tbd_offset;
+ u_short buffer_addr;
+ u_short buffer_base;
+} tbd_t;
+
+
+typedef struct {
+ u_short status;
+ u_short command;
+ u_short link_offset;
+ u_short rbd_offset;
+ u_char destination[6];
+ u_char source[6];
+ u_short length;
+} fd_t;
+
+
+typedef struct {
+ u_short status;
+ u_short next_rbd_offset;
+ u_short buffer_addr;
+ u_short buffer_base;
+ u_short size;
+} rbd_t;
diff --git a/i386/i386at/idt.h b/i386/i386at/idt.h
new file mode 100644
index 00000000..7903310b
--- /dev/null
+++ b/i386/i386at/idt.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#ifndef _I386AT_IDT_
+#define _I386AT_IDT_
+
+/* On a standard PC, we only need 16 interrupt vectors,
+ because that's all the PIC hardware supports. */
+/* XX But for some reason we program the PIC
+ to use vectors 0x40-0x4f rather than 0x20-0x2f. Fix. */
+#define IDTSZ (0x20+0x20+0x10)
+
+#define PIC_INT_BASE 0x40
+
+#include "idt-gen.h"
+
+#endif _I386AT_IDT_
diff --git a/i386/i386at/if_3c501.c b/i386/i386at/if_3c501.c
new file mode 100644
index 00000000..b822d273
--- /dev/null
+++ b/i386/i386at/if_3c501.c
@@ -0,0 +1,1240 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: if_3c501.c
+ * Author: Philippe Bernadat
+ * Date: 1989
+ * Copyright (c) 1989 OSF Research Institute
+ *
+ * 3COM Etherlink 3C501 Mach Ethernet drvier
+ */
+/*
+ Copyright 1990 by Open Software Foundation,
+Cambridge, MA.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby granted,
+provided that the above copyright notice appears in all copies and
+that both the copyright notice and this permission notice appear in
+supporting documentation, and that the name of OSF or Open Software
+Foundation not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+ OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#include <at3c501.h>
+
+#ifdef MACH_KERNEL
+#include <kern/time_out.h>
+#include <device/device_types.h>
+#include <device/errno.h>
+#include <device/io_req.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+#include <device/net_status.h>
+#include <device/net_io.h>
+#else MACH_KERNEL
+#include <sys/param.h>
+#include <mach/machine/vm_param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/buf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/vmmac.h>
+#include <sys/ioctl.h>
+#include <sys/errno.h>
+#include <sys/syslog.h>
+
+#include <net/if.h>
+#include <net/netisr.h>
+#include <net/route.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+#include <netinet/if_ether.h>
+#endif
+
+#ifdef NS
+#include <netns/ns.h>
+#include <netns/ns_if.h>
+#endif
+#endif MACH_KERNEL
+
+#include <i386/ipl.h>
+#include <chips/busses.h>
+#include <i386at/if_3c501.h>
+
+#define SPLNET spl6
+
+int at3c501probe();
+void at3c501attach();
+int at3c501intr();
+int at3c501init();
+int at3c501output();
+int at3c501ioctl();
+int at3c501reset();
+int at3c501watch();
+
+static vm_offset_t at3c501_std[NAT3C501] = { 0 };
+static struct bus_device *at3c501_info[NAT3C501];
+struct bus_driver at3c501driver =
+ {at3c501probe, 0, at3c501attach, 0, at3c501_std, "et", at3c501_info, };
+
+int watchdog_id;
+
+typedef struct {
+#ifdef MACH_KERNEL
+ struct ifnet ds_if; /* generic interface header */
+ u_char ds_addr[6]; /* Ethernet hardware address */
+#else MACH_KERNEL
+ struct arpcom at3c501_ac;
+#define ds_if at3c501_ac.ac_if
+#define ds_addr at3c501_ac.ac_enaddr
+#endif MACH_KERNEL
+ int flags;
+ int timer;
+ char *base;
+ u_char address[ETHER_ADD_SIZE];
+ short mode;
+ int badxmt;
+ int badrcv;
+ int spurious;
+ int rcv;
+ int xmt;
+} at3c501_softc_t;
+
+at3c501_softc_t at3c501_softc[NAT3C501];
+
+/*
+ * at3c501probe:
+ *
+ * This function "probes" or checks for the 3c501 board on the bus to see
+ * if it is there. As far as I can tell, the best break between this
+ * routine and the attach code is to simply determine whether the board
+ * is configured in properly. Currently my approach to this is to write
+ * and read a string from the Packet Buffer on the board being probed.
+ * If the string comes back properly then we assume the board is there.
+ * The config code expects to see a successful return from the probe
+ * routine before attach will be called.
+ *
+ * input : address device is mapped to, and unit # being checked
+ * output : a '1' is returned if the board exists, and a 0 otherwise
+ *
+ */
+at3c501probe(port, dev)
+struct bus_device *dev;
+{
+ caddr_t base = (caddr_t)dev->address;
+ int unit = dev->unit;
+ char inbuf[50];
+ char *str = "3c501 ethernet board %d out of range\n";
+ int strsize = strlen(str);
+
+ if ((unit < 0) || (unit >= NAT3C501)) {
+ printf(str, unit);
+ return(0);
+ }
+
+ /* reset */
+ outb(IE_CSR(base), IE_RESET);
+
+ /* write a string to the packet buffer */
+
+ outb(IE_CSR(base), IE_RIDE | IE_SYSBFR);
+ outw(IE_GP(base), 0);
+ loutb(IE_BFR(base), str, strsize);
+
+ /* read it back */
+
+ outb(IE_CSR(base), IE_RIDE | IE_SYSBFR);
+ outw(IE_GP(base), 0);
+ linb(IE_BFR(base), inbuf, strsize);
+ /* compare them */
+
+#ifdef MACH_KERNEL
+ if (strncmp(str, inbuf, strsize))
+#else MACH_KERNEL
+ if (bcmp(str, inbuf, strsize))
+#endif MACH_KERNEL
+ {
+ return(0);
+ }
+ at3c501_softc[unit].base = base;
+
+ return(1);
+}
+
+/*
+ * at3c501attach:
+ *
+ * This function attaches a 3C501 board to the "system". The rest of
+ * runtime structures are initialized here (this routine is called after
+ * a successful probe of the board). Once the ethernet address is read
+ * and stored, the board's ifnet structure is attached and readied.
+ *
+ * input : bus_device structure setup in autoconfig
+ * output : board structs and ifnet is setup
+ *
+ */
+void at3c501attach(dev)
+struct bus_device *dev;
+{
+ at3c501_softc_t *sp;
+ struct ifnet *ifp;
+ u_char unit;
+ caddr_t base;
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+ extern int tcp_recvspace;
+ tcp_recvspace = 0x300; /* empircal messure */
+#endif MACH_KERNEL
+
+ take_dev_irq(dev);
+ unit = (u_char)dev->unit;
+ printf(", port = %x, spl = %d, pic = %d. ",
+ dev->address, dev->sysdep, dev->sysdep1);
+
+ sp = &at3c501_softc[unit];
+ base = sp->base;
+ if (base != (caddr_t)dev->address) {
+ printf("3C501 board %d attach address error\n", unit);
+ return;
+ }
+ sp->timer = -1;
+ sp->flags = 0;
+ sp->mode = 0;
+ outb(IE_CSR(sp->base), IE_RESET);
+ at3c501geteh(base, sp->ds_addr);
+ at3c501geteh(base, sp->address);
+ at3c501seteh(base, sp->address);
+ printf("ethernet id [%x:%x:%x:%x:%x:%x]",
+ sp->address[0],sp->address[1],sp->address[2],
+ sp->address[3],sp->address[4],sp->address[5]);
+ ifp = &(sp->ds_if);
+ ifp->if_unit = unit;
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_flags = IFF_BROADCAST;
+#ifdef MACH_KERNEL
+ ifp->if_header_size = sizeof(struct ether_header);
+ ifp->if_header_format = HDR_ETHERNET;
+ ifp->if_address_size = 6;
+ ifp->if_address = (char *)&sp->address[0];
+ if_init_queues(ifp);
+#else MACH_KERNEL
+ ifp->if_name = "et";
+ ifp->if_init = at3c501init;
+ ifp->if_output = at3c501output;
+ ifp->if_ioctl = at3c501ioctl;
+ ifp->if_reset = at3c501reset;
+ ifp->if_next = NULL;
+ if_attach(ifp);
+#ifdef notdef
+ watchdog_id = timeout(at3c501watch, &(ifp->if_unit), 20*HZ);
+#endif
+#endif MACH_KERNEL
+}
+
+/*
+ * at3c501watch():
+ *
+ */
+at3c501watch(b_ptr)
+
+caddr_t b_ptr;
+
+{
+ int x,
+ y,
+ opri,
+ unit;
+ at3c501_softc_t *is;
+
+ unit = *b_ptr;
+#ifdef MACH_KERNEL
+ timeout(at3c501watch,b_ptr,20*hz);
+#else MACH_KERNEL
+ watchdog_id = timeout(at3c501watch,b_ptr,20*HZ);
+#endif MACH_KERNEL
+ is = &at3c501_softc[unit];
+ printf("\nxmt/bad rcv/bad spurious\n");
+ printf("%d/%d %d/%d %d\n", is->xmt, is->badxmt, \
+ is->rcv, is->badrcv, is->spurious);
+ is->rcv=is->badrcv=is->xmt=is->badxmt=is->spurious=0;
+}
+
+/*
+ * at3c501geteh:
+ *
+ * This function gets the ethernet address (array of 6 unsigned
+ * bytes) from the 3c501 board prom.
+ *
+ */
+
+at3c501geteh(base, ep)
+caddr_t base;
+char *ep;
+{
+ int i;
+
+ for (i = 0; i < ETHER_ADD_SIZE; i++) {
+ outw(IE_GP(base), i);
+ *ep++ = inb(IE_SAPROM(base));
+ }
+}
+
+/*
+ * at3c501seteh:
+ *
+ * This function sets the ethernet address (array of 6 unsigned
+ * bytes) on the 3c501 board.
+ *
+ */
+
+at3c501seteh(base, ep)
+caddr_t base;
+char *ep;
+{
+ int i;
+
+ for (i = 0; i < ETHER_ADD_SIZE; i++) {
+ outb(EDLC_ADDR(base) + i, *ep++);
+ }
+}
+
+#ifdef MACH_KERNEL
+int at3c501start(); /* forward */
+
+at3c501output(dev, ior)
+ dev_t dev;
+ io_req_t ior;
+{
+ register int unit = minor(dev);
+
+ if (unit < 0 || unit >= NAT3C501 ||
+ at3c501_softc[unit].base == 0)
+ return (ENXIO);
+
+ return (net_write(&at3c501_softc[unit].ds_if, at3c501start, ior));
+}
+
+at3c501setinput(dev, receive_port, priority, filter, filter_count)
+ dev_t dev;
+ mach_port_t receive_port;
+ int priority;
+ filter_t filter[];
+ u_int filter_count;
+{
+ register int unit = minor(dev);
+
+ if (unit < 0 || unit >= NAT3C501 ||
+ at3c501_softc[unit].base == 0)
+ return (ENXIO);
+
+ return (net_set_filter(&at3c501_softc[unit].ds_if,
+ receive_port, priority,
+ filter, filter_count));
+}
+
+#else MACH_KERNEL
+/*
+ * at3c501output:
+ *
+ * This routine is called by the "if" layer to output a packet to
+ * the network. This code resolves the local ethernet address, and
+ * puts it into the mbuf if there is room. If not, then a new mbuf
+ * is allocated with the header information and precedes the data
+ * to be transmitted.
+ *
+ * input: ifnet structure pointer, an mbuf with data, and address
+ * to be resolved
+ * output: mbuf is updated to hold enet address, or a new mbuf
+ * with the address is added
+ *
+ */
+at3c501output(ifp, m0, dst)
+struct ifnet *ifp;
+struct mbuf *m0;
+struct sockaddr *dst;
+{
+ int type, error;
+ spl_t opri;
+ u_char edst[6];
+ struct in_addr idst;
+ register at3c501_softc_t *is;
+ register struct mbuf *m = m0;
+ register struct ether_header *eh;
+ register int off;
+ int usetrailers;
+
+ is = &at3c501_softc[ifp->if_unit];
+ if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) {
+ printf("3C501 Turning off board %d\n", ifp->if_unit);
+ at3c501intoff(ifp->if_unit);
+ error = ENETDOWN;
+ goto bad;
+ }
+ switch (dst->sa_family) {
+
+#ifdef INET
+ case AF_INET:
+ idst = ((struct sockaddr_in *)dst)->sin_addr;
+ if (!arpresolve(&is->at3c501_ac, m, &idst, edst, &usetrailers)){
+ return (0); /* if not yet resolved */
+ }
+ off = ntohs((u_short)mtod(m, struct ip *)->ip_len) - m->m_len;
+
+ if (usetrailers && off > 0 && (off & 0x1ff) == 0 &&
+ m->m_off >= MMINOFF + 2 * sizeof (u_short)) {
+ type = ETHERTYPE_TRAIL + (off>>9);
+ m->m_off -= 2 * sizeof (u_short);
+ m->m_len += 2 * sizeof (u_short);
+ *mtod(m, u_short *) = htons((u_short)ETHERTYPE_IP);
+ *(mtod(m, u_short *) + 1) = htons((u_short)m->m_len);
+ goto gottrailertype;
+ }
+ type = ETHERTYPE_IP;
+ off = 0;
+ goto gottype;
+#endif
+#ifdef NS
+ case AF_NS:
+ type = ETHERTYPE_NS;
+ bcopy((caddr_t)&(((struct sockaddr_ns *)dst)->sns_addr.x_host),
+ (caddr_t)edst, sizeof (edst));
+ off = 0;
+ goto gottype;
+#endif
+
+ case AF_UNSPEC:
+ eh = (struct ether_header *)dst->sa_data;
+ bcopy((caddr_t)eh->ether_dhost, (caddr_t)edst, sizeof (edst));
+ type = eh->ether_type;
+ goto gottype;
+
+ default:
+ printf("at3c501%d: can't handle af%d\n", ifp->if_unit,
+ dst->sa_family);
+ error = EAFNOSUPPORT;
+ goto bad;
+ }
+
+gottrailertype:
+ /*
+ * Packet to be sent as trailer: move first packet
+ * (control information) to end of chain.
+ */
+ while (m->m_next)
+ m = m->m_next;
+ m->m_next = m0;
+ m = m0->m_next;
+ m0->m_next = 0;
+ m0 = m;
+
+gottype:
+ /*
+ * Add local net header. If no space in first mbuf,
+ * allocate another.
+ */
+ if (m->m_off > MMAXOFF ||
+ MMINOFF + sizeof (struct ether_header) > m->m_off) {
+ m = m_get(M_DONTWAIT, MT_HEADER);
+ if (m == 0) {
+ error = ENOBUFS;
+ goto bad;
+ }
+ m->m_next = m0;
+ m->m_off = MMINOFF;
+ m->m_len = sizeof (struct ether_header);
+ } else {
+ m->m_off -= sizeof (struct ether_header);
+ m->m_len += sizeof (struct ether_header);
+ }
+ eh = mtod(m, struct ether_header *);
+ eh->ether_type = htons((u_short)type);
+ bcopy((caddr_t)edst, (caddr_t)eh->ether_dhost, sizeof (edst));
+ bcopy((caddr_t)is->address,(caddr_t)eh->ether_shost,
+ sizeof(edst));
+ /*
+ * Queue message on interface, and start output if interface
+ * not yet active.
+ */
+ opri = SPLNET();
+ if (IF_QFULL(&ifp->if_snd)) {
+ IF_DROP(&ifp->if_snd);
+ splx(opri);
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ IF_ENQUEUE(&ifp->if_snd, m);
+ /*
+ * Some action needs to be added here for checking whether the
+ * board is already transmitting. If it is, we don't want to
+ * start it up (ie call at3c501start()). We will attempt to send
+ * packets that are queued up after an interrupt occurs. Some
+ * flag checking action has to happen here and/or in the start
+ * routine. This note is here to remind me that some thought
+ * is needed and there is a potential problem here.
+ *
+ */
+ at3c501start(ifp->if_unit);
+ splx(opri);
+ return (0);
+
+bad:
+ m_freem(m0);
+ return (error);
+}
+#endif MACH_KERNEL
+
+/*
+ * at3c501reset:
+ *
+ * This routine is in part an entry point for the "if" code. Since most
+ * of the actual initialization has already (we hope already) been done
+ * by calling at3c501attach().
+ *
+ * input : unit number or board number to reset
+ * output : board is reset
+ *
+ */
+at3c501reset(unit)
+int unit;
+{
+ at3c501_softc[unit].ds_if.if_flags &= ~IFF_RUNNING;
+ return(at3c501init(unit));
+}
+
+
+
+/*
+ * at3c501init:
+ *
+ * Another routine that interfaces the "if" layer to this driver.
+ * Simply resets the structures that are used by "upper layers".
+ * As well as calling at3c501hwrst that does reset the at3c501 board.
+ *
+ * input : board number
+ * output : structures (if structs) and board are reset
+ *
+ */
+at3c501init(unit)
+int unit;
+{
+ struct ifnet *ifp;
+ int stat;
+ spl_t oldpri;
+
+ ifp = &(at3c501_softc[unit].ds_if);
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+ if (ifp->if_addrlist == (struct ifaddr *)0) {
+ return;
+ }
+#endif MACH_KERNEL
+ oldpri = SPLNET();
+ if ((stat = at3c501hwrst(unit)) == TRUE) {
+ at3c501_softc[unit].ds_if.if_flags |= IFF_RUNNING;
+ at3c501_softc[unit].flags |= DSF_RUNNING;
+ at3c501start(unit);
+ }
+ else
+ printf("3C501 trouble resetting board %d\n", unit);
+ at3c501_softc[unit].timer = 5;
+ splx(oldpri);
+ return(stat);
+
+}
+
+#ifdef MACH_KERNEL
+/*ARGSUSED*/
+at3c501open(dev, flag)
+ dev_t dev;
+ int flag;
+{
+ register int unit = minor(dev);
+
+ if (unit < 0 || unit >= NAT3C501 ||
+ at3c501_softc[unit].base == 0)
+ return (ENXIO);
+
+ at3c501_softc[unit].ds_if.if_flags |= IFF_UP;
+ at3c501init(unit);
+ return(0);
+}
+#endif MACH_KERNEL
+
+/*
+ * at3c501start:
+ *
+ * This is yet another interface routine that simply tries to output a
+ * in an mbuf after a reset.
+ *
+ * input : board number
+ * output : stuff sent to board if any there
+ *
+ */
+at3c501start(unit)
+int unit;
+
+{
+#ifdef MACH_KERNEL
+ io_req_t m;
+#else MACH_KERNEL
+ struct mbuf *m;
+#endif MACH_KERNEL
+ struct ifnet *ifp;
+
+ ifp = &(at3c501_softc[unit].ds_if);
+ for(;;) {
+ IF_DEQUEUE(&ifp->if_snd, m);
+#ifdef MACH_KERNEL
+ if (m != 0)
+#else MACH_KERNEL
+ if (m != (struct mbuf *)0)
+#endif MACH_KERNEL
+ at3c501xmt(unit, m);
+ else
+ return;
+ }
+}
+
+#ifdef MACH_KERNEL
+/*ARGSUSED*/
+at3c501getstat(dev, flavor, status, count)
+ dev_t dev;
+ int flavor;
+ dev_status_t status; /* pointer to OUT array */
+ u_int *count; /* out */
+{
+ register int unit = minor(dev);
+
+ if (unit < 0 || unit >= NAT3C501 ||
+ at3c501_softc[unit].base == 0)
+ return (ENXIO);
+
+ return (net_getstat(&at3c501_softc[unit].ds_if,
+ flavor,
+ status,
+ count));
+}
+
+at3c501setstat(dev, flavor, status, count)
+ dev_t dev;
+ int flavor;
+ dev_status_t status;
+ u_int count;
+{
+ register int unit = minor(dev);
+ register at3c501_softc_t *sp;
+
+ if (unit < 0 || unit >= NAT3C501 ||
+ at3c501_softc[unit].base == 0)
+ return (ENXIO);
+
+ sp = &at3c501_softc[unit];
+
+ switch (flavor) {
+ case NET_STATUS:
+ {
+ /*
+ * All we can change are flags, and not many of those.
+ */
+ register struct net_status *ns = (struct net_status *)status;
+ int mode = 0;
+
+ if (count < NET_STATUS_COUNT)
+ return (D_INVALID_SIZE);
+
+ if (ns->flags & IFF_ALLMULTI)
+ mode |= MOD_ENAL;
+ if (ns->flags & IFF_PROMISC)
+ mode |= MOD_PROM;
+
+ /*
+ * Force a compilete reset if the receive mode changes
+ * so that these take effect immediately.
+ */
+ if (sp->mode != mode) {
+ sp->mode = mode;
+ if (sp->flags & DSF_RUNNING) {
+ sp->flags &= ~(DSF_LOCK | DSF_RUNNING);
+ at3c501init(unit);
+ }
+ }
+ break;
+ }
+ case NET_ADDRESS:
+ {
+ register union ether_cvt {
+ char addr[6];
+ int lwd[2];
+ } *ec = (union ether_cvt *)status;
+
+ if (count < sizeof(*ec)/sizeof(int))
+ return (D_INVALID_SIZE);
+
+ ec->lwd[0] = ntohl(ec->lwd[0]);
+ ec->lwd[1] = ntohl(ec->lwd[1]);
+ at3c501seteh(sp->base, ec->addr);
+ break;
+ }
+
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+#else MACH_KERNEL
+
+/*
+ * at3c501ioctl:
+ *
+ * This routine processes an ioctl request from the "if" layer
+ * above.
+ *
+ * input : pointer the appropriate "if" struct, command, and data
+ * output : based on command appropriate action is taken on the
+ * at3c501 board(s) or related structures
+ * return : error is returned containing exit conditions
+ *
+ */
+int curr_ipl;
+u_short curr_pic_mask;
+u_short pic_mask[];
+
+at3c501ioctl(ifp, cmd, data)
+struct ifnet *ifp;
+int cmd;
+caddr_t data;
+{
+ register struct ifaddr *ifa = (struct ifaddr *)data;
+ register at3c501_softc_t *is;
+ int error;
+ spl_t opri;
+ short mode = 0;
+
+ is = &at3c501_softc[ifp->if_unit];
+ opri = SPLNET();
+ error = 0;
+ switch (cmd) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ at3c501init(ifp->if_unit);
+ switch (ifa->ifa_addr.sa_family) {
+#ifdef INET
+ case AF_INET:
+ ((struct arpcom *)ifp)->ac_ipaddr =
+ IA_SIN(ifa)->sin_addr;
+ arpwhohas((struct arpcom *)ifp,
+ &IA_SIN(ifa)->sin_addr);
+ break;
+#endif
+#ifdef NS
+ case AF_NS:
+ {
+ register struct ns_addr *ina =
+ &(IA_SNS(ifa)->sns_addr);
+ if (ns_nullhost(*ina))
+ ina->x_host =
+ *(union ns_host *)(ds->ds_addr);
+ else
+ at3c501seteh(ina->x_host.c_host,
+ at3c501_softc[ifp->if_unit].base);
+ break;
+ }
+#endif
+ }
+ break;
+ case SIOCSIFFLAGS:
+ if (ifp->if_flags & IFF_ALLMULTI)
+ mode |= MOD_ENAL;
+ if (ifp->if_flags & IFF_PROMISC)
+ mode |= MOD_PROM;
+ /*
+ * force a complete reset if the receive multicast/
+ * promiscuous mode changes so that these take
+ * effect immediately.
+ *
+ */
+ if (is->mode != mode) {
+ is->mode = mode;
+ if (is->flags & DSF_RUNNING) {
+ is->flags &=
+ ~(DSF_LOCK|DSF_RUNNING);
+ at3c501init(ifp->if_unit);
+ }
+ }
+ if ((ifp->if_flags & IFF_UP) == 0 &&
+ is->flags & DSF_RUNNING) {
+ printf("AT3C501 ioctl: turning off board %d\n",
+ ifp->if_unit);
+ is->flags &= ~(DSF_LOCK | DSF_RUNNING);
+ is->timer = -1;
+ at3c501intoff(ifp->if_unit);
+ } else
+ if (ifp->if_flags & IFF_UP &&
+ (is->flags & DSF_RUNNING) == 0)
+ at3c501init(ifp->if_unit);
+ break;
+ default:
+ error = EINVAL;
+ }
+ splx(opri);
+ return (error);
+}
+#endif MACH_KERNEL
+
+
+/*
+ * at3c501hwrst:
+ *
+ * This routine resets the at3c501 board that corresponds to the
+ * board number passed in.
+ *
+ * input : board number to do a hardware reset
+ * output : board is reset
+ *
+ */
+#define XMT_STAT (EDLC_16|EDLC_JAM|EDLC_UNDER|EDLC_IDLE)
+#define RCV_STAT (EDLC_STALE|EDLC_ANY|EDLC_SHORT|EDLC_DRIBBLE|EDLC_OVER|EDLC_FCS)
+int
+at3c501hwrst(unit)
+int unit;
+{
+ u_char stat;
+ caddr_t base = at3c501_softc[unit].base;
+
+ outb(IE_CSR(base), IE_RESET);
+ outb(IE_CSR(base), 0);
+ at3c501seteh(base, at3c501_softc[unit].address);
+ if ((stat = inb(IE_CSR(base))) != IE_RESET) {
+ printf("at3c501reset: can't reset CSR: %x\n", stat);
+ return(FALSE);
+ }
+ if ((stat = inb(EDLC_XMT(base))) & XMT_STAT) {
+ printf("at3c501reset: can't reset XMT: %x\n", stat);
+ return(FALSE);
+ }
+ if (((stat = inb(EDLC_RCV(base))) & RCV_STAT) != EDLC_STALE) {
+ printf("at3c501reset: can't reset RCV: %x\n", stat);
+ return(FALSE);
+ }
+ if (at3c501config(unit) == FALSE) {
+ printf("at3c501hwrst(): failed to config\n");
+ return(FALSE);
+ }
+ outb(IE_RP(base), 0);
+ outb(IE_CSR(base), IE_RIDE|IE_RCVEDLC);
+ return(TRUE);
+}
+
+/*
+ * at3c501intr:
+ *
+ * This function is the interrupt handler for the at3c501 ethernet
+ * board. This routine will be called whenever either a packet
+ * is received, or a packet has successfully been transfered and
+ * the unit is ready to transmit another packet.
+ *
+ * input : board number that interrupted
+ * output : either a packet is received, or a packet is transfered
+ *
+ */
+at3c501intr(unit)
+int unit;
+{
+ at3c501rcv(unit);
+ at3c501start(unit);
+
+ return(0);
+}
+
+
+/*
+ * at3c501rcv:
+ *
+ * This routine is called by the interrupt handler to initiate a
+ * packet transfer from the board to the "if" layer above this
+ * driver. This routine checks if a buffer has been successfully
+ * received by the at3c501. If so, the routine at3c501read is called
+ * to do the actual transfer of the board data (including the
+ * ethernet header) into a packet (consisting of an mbuf chain).
+ *
+ * input : number of the board to check
+ * output : if a packet is available, it is "sent up"
+ *
+ */
+at3c501rcv(unit)
+int unit;
+{
+ int stat;
+ caddr_t base;
+#ifdef MACH_KERNEL
+ ipc_kmsg_t new_kmsg;
+ struct ether_header *ehp;
+ struct packet_header *pkt;
+#else MACH_KERNEL
+ struct mbuf *m, *tm;
+#endif MACH_KERNEL
+ u_short len;
+ register struct ifnet *ifp;
+ struct ether_header header;
+ int tlen;
+ register at3c501_softc_t *is;
+ register struct ifqueue *inq;
+ spl_t opri;
+ struct ether_header eh;
+
+ is = &at3c501_softc[unit];
+ ifp = &is->ds_if;
+ base = at3c501_softc[unit].base;
+ is->rcv++;
+ if (inb(IE_CSR(base)) & IE_RCVBSY)
+ is->spurious++;
+ while (!((stat=inb(EDLC_RCV(base))) & EDLC_STALE)) {
+ outb(IE_CSR(base), IE_SYSBFR);
+ if (!(stat & EDLC_ANY)) {
+ outw(IE_GP(base), 0);
+ len = inw(IE_RP(base))-sizeof(struct ether_header);
+ outb(IE_RP(base), 0);
+ outb(IE_CSR(base), IE_RIDE|IE_RCVEDLC);
+ is->badrcv++;
+#ifdef DEBUG
+ printf("at3c501rcv: received %d bad bytes", len);
+ if (stat & EDLC_SHORT)
+ printf(" Short frame");
+ if (stat & EDLC_OVER)
+ printf(" Data overflow");
+ if (stat & EDLC_DRIBBLE)
+ printf(" Dribble error");
+ if (stat & EDLC_FCS)
+ printf(" CRC error");
+ printf("\n");
+#endif DEBUG
+ } else {
+ outw(IE_GP(base), 0);
+ len = inw(IE_RP(base));
+ if (len < 60) {
+ outb(IE_RP(base), 0);
+ outb(IE_CSR(base), IE_RIDE|IE_RCVEDLC);
+ return;
+ }
+ linb(IE_BFR(base), &eh, sizeof(struct ether_header));
+#ifdef MACH_KERNEL
+ new_kmsg = net_kmsg_get();
+ if (new_kmsg == IKM_NULL) {
+ /*
+ * Drop the packet.
+ */
+ is->ds_if.if_rcvdrops++;
+
+ outb(IE_RP(base), 0);
+ outb(IE_CSR(base), IE_RIDE|IE_RCVEDLC);
+ return;
+ }
+
+ ehp = (struct ether_header *)
+ (&net_kmsg(new_kmsg)->header[0]);
+ pkt = (struct packet_header *)
+ (&net_kmsg(new_kmsg)->packet[0]);
+
+ /*
+ * Get header.
+ */
+ *ehp = eh;
+
+ /*
+ * Get body
+ */
+ linb(IE_BFR(base),
+ (char *)(pkt + 1),
+ len - sizeof(struct ether_header));
+
+ outb(IE_RP(base), 0);
+ outb(IE_CSR(base), IE_RIDE|IE_RCVEDLC);
+
+ pkt->type = ehp->ether_type;
+ pkt->length = len - sizeof(struct ether_header)
+ + sizeof(struct packet_header);
+
+ /*
+ * Hand the packet to the network module.
+ */
+ net_packet(ifp, new_kmsg, pkt->length,
+ ethernet_priority(new_kmsg));
+
+#else MACH_KERNEL
+ eh.ether_type = htons(eh.ether_type);
+ m =(struct mbuf *)0;
+#ifdef DEBUG
+ printf("received %d bytes\n", len);
+#endif DEBUG
+ len -= sizeof(struct ether_header);
+ while ( len ) {
+ if (m == (struct mbuf *)0) {
+ m = m_get(M_DONTWAIT, MT_DATA);
+ if (m == (struct mbuf *)0) {
+ printf("at3c501rcv: Lost frame\n");
+ outb(IE_RP(base), 0);
+ outb(IE_CSR(base), IE_RIDE|IE_RCVEDLC);
+
+ return;
+ }
+ tm = m;
+ tm->m_off = MMINOFF;
+ /*
+ * first mbuf in the packet must contain a pointer to the
+ * ifnet structure. other mbufs that follow and make up
+ * the packet do not need this pointer in the mbuf.
+ *
+ */
+ *(mtod(tm, struct ifnet **)) = ifp;
+ tm->m_len = sizeof(struct ifnet **);
+ }
+ else {
+ tm->m_next = m_get(M_DONTWAIT, MT_DATA);
+ tm = tm->m_next;
+ tm->m_off = MMINOFF;
+ tm->m_len = 0;
+ if (tm == (struct mbuf *)0) {
+ m_freem(m);
+ printf("at3c501rcv: No mbufs, lost frame\n");
+ outb(IE_RP(base), 0);
+ outb(IE_CSR(base), IE_RIDE|IE_RCVEDLC);
+ return;
+ }
+ }
+ tlen = MIN( MLEN - tm->m_len, len );
+ tm->m_next = (struct mbuf *)0;
+ linb(IE_BFR(base), mtod(tm, char *)+tm->m_len, tlen );
+ tm->m_len += tlen;
+ len -= tlen;
+ }
+ outb(IE_RP(base), 0);
+ outb(IE_CSR(base), IE_RIDE|IE_RCVEDLC);
+ /*
+ * received packet is now in a chain of mbuf's. next step is
+ * to pass the packet upwards.
+ *
+ */
+ switch (eh.ether_type) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ schednetisr(NETISR_IP);
+ inq = &ipintrq;
+ break;
+ case ETHERTYPE_ARP:
+ arpinput(&is->at3c501_ac, m);
+ return;
+#endif
+#ifdef NS
+ case ETHERTYPE_NS:
+ schednetisr(NETISR_NS);
+ inq = &nsintrq;
+ break;
+#endif
+ default:
+ m_freem(m);
+ return;
+ }
+ opri = SPLNET();
+ if (IF_QFULL(inq)) {
+ IF_DROP(inq);
+ splx(opri);
+ m_freem(m);
+ return;
+ }
+ IF_ENQUEUE(inq, m);
+ splx(opri);
+#endif MACH_KERNEL
+ }
+ }
+}
+
+
+/*
+ * at3c501xmt:
+ *
+ * This routine fills in the appropriate registers and memory
+ * locations on the 3C501 board and starts the board off on
+ * the transmit.
+ *
+ * input : board number of interest, and a pointer to the mbuf
+ * output : board memory and registers are set for xfer and attention
+ *
+ */
+at3c501xmt(unit, m)
+int unit;
+#ifdef MACH_KERNEL
+io_req_t m;
+#else MACH_KERNEL
+struct mbuf *m;
+#endif MACH_KERNEL
+{
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+ register struct mbuf *tm_p;
+#endif MACH_KERNEL
+ int i;
+ at3c501_softc_t *is = &at3c501_softc[unit];
+ caddr_t base = is->base;
+ u_short count = 0;
+ u_short bytes_in_msg;
+
+ is->xmt++;
+ outb(IE_CSR(base), IE_SYSBFR);
+#ifdef MACH_KERNEL
+ count = m->io_count;
+#define max(a,b) (((a) > (b)) ? (a) : (b))
+ bytes_in_msg = max(count,
+ ETHERMIN + sizeof(struct ether_header));
+#else MACH_KERNEL
+ bytes_in_msg = max(m_length(m), ETHERMIN + sizeof(struct ether_header));
+#endif MACH_KERNEL
+ outw(IE_GP(base), BFRSIZ-bytes_in_msg);
+#ifdef MACH_KERNEL
+ loutb(IE_BFR(base), m->io_data, count);
+#else MACH_KERNEL
+ for (tm_p = m; tm_p != (struct mbuf *)0; tm_p = tm_p->m_next) {
+ if (count + tm_p->m_len > ETHERMTU + sizeof(struct ether_header))
+ break;
+ if (tm_p->m_len == 0)
+ continue;
+ loutb(IE_BFR(base), mtod(tm_p, caddr_t), tm_p->m_len);
+ count += tm_p->m_len;
+ }
+#endif MACH_KERNEL
+ while (count < bytes_in_msg) {
+ outb(IE_BFR(base), 0);
+ count++;
+ }
+ do {
+ if (!(int)m) {
+ outb(IE_CSR(base), IE_SYSBFR);
+ }
+ outw(IE_GP(base), BFRSIZ-bytes_in_msg);
+ outb(IE_CSR(base), IE_RIDE|IE_XMTEDLC);
+ if (m) {
+#ifdef MACH_KERNEL
+ iodone(m);
+ m = 0;
+#else MACH_KERNEL
+ m_freem(m);
+ m = (struct mbuf *) 0;
+#endif MACH_KERNEL
+ }
+ for (i=0; inb(IE_CSR(base)) & IE_XMTBSY; i++);
+ if ((i=inb(EDLC_XMT(base))) & EDLC_JAM) {
+ is->badxmt++;
+#ifdef DEBUG
+ printf("at3c501xmt jam\n");
+#endif DEBUG
+ }
+ } while ((i & EDLC_JAM) && !(i & EDLC_16));
+
+ if (i & EDLC_16) {
+ printf("%");
+ }
+ return;
+
+}
+
+/*
+ * at3c501config:
+ *
+ * This routine does a standard config of the at3c501 board.
+ *
+ */
+at3c501config(unit)
+int unit;
+{
+ caddr_t base = at3c501_softc[unit].base;
+ u_char stat;
+
+ /* Enable DMA & Interrupts */
+
+ outb(IE_CSR(base), IE_RIDE|IE_SYSBFR);
+
+ /* No Transmit Interrupts */
+
+ outb(EDLC_XMT(base), 0);
+ inb(EDLC_XMT(base));
+
+ /* Setup Receive Interrupts */
+
+ outb(EDLC_RCV(base), EDLC_BROAD|EDLC_SHORT|EDLC_GOOD|EDLC_DRIBBLE|EDLC_OVER);
+ inb(EDLC_RCV(base));
+
+ outb(IE_CSR(base), IE_RIDE|IE_SYSBFR);
+ outb(IE_RP(base), 0);
+ outb(IE_CSR(base), IE_RIDE|IE_RCVEDLC);
+ return(TRUE);
+}
+
+/*
+ * at3c501intoff:
+ *
+ * This function turns interrupts off for the at3c501 board indicated.
+ *
+ */
+at3c501intoff(unit)
+int unit;
+{
+ caddr_t base = at3c501_softc[unit].base;
+ outb(IE_CSR(base), 0);
+}
+
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+/*
+ * The length of an mbuf chain
+ */
+m_length(m)
+ register struct mbuf *m;
+{
+ register int len = 0;
+
+ while (m) {
+ len += m->m_len;
+ m = m->m_next;
+ }
+ return len;
+}
+#endif MACH_KERNEL
diff --git a/i386/i386at/if_3c501.h b/i386/i386at/if_3c501.h
new file mode 100644
index 00000000..ac0641f5
--- /dev/null
+++ b/i386/i386at/if_3c501.h
@@ -0,0 +1,175 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: if_3c501.h
+ * Author: Philippe Bernadat
+ * Date: 1989
+ * Copyright (c) 1989 OSF Research Institute
+ *
+ * 3COM Etherlink 3C501 Mach Ethernet drvier
+ */
+/*
+ Copyright 1990 by Open Software Foundation,
+Cambridge, MA.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby granted,
+provided that the above copyright notice appears in all copies and
+that both the copyright notice and this permission notice appear in
+supporting documentation, and that the name of OSF or Open Software
+Foundation not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+ OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/* The various IE command registers */
+
+#define EDLC_ADDR(base) (base) /* EDLC station address, 6 bytes*/
+#define EDLC_RCV(base) ((base)+0x6) /* EDLC receive cmd. & stat. */
+#define EDLC_XMT(base) ((base)+0x7) /* EDLC transmit cmd. & stat. */
+#define IE_GP(base) ((base)+0x8) /* General Purpose pointer */
+#define IE_RP(base) ((base)+0xa) /* Receive buffer pointer */
+#define IE_SAPROM(base) ((base)+0xc) /* station addr prom window */
+#define IE_CSR(base) ((base)+0xe) /* IE command and status */
+#define IE_BFR(base) ((base)+0xf) /* 1 byte window on packet buffer*/
+
+/* CSR Status Register (read)
+ *
+ * _______________________________________________________________________
+ * | | | | | | | | |
+ * | XMTBSY | RIDE | DMA | EDMA | BUFCTL | | RCVBSY |
+ * |________|________|________|________|________|________|________|________|
+ *
+ */
+
+/* CSR Command Register (write)
+ *
+ * _______________________________________________________________________
+ * | | | | | | | | |
+ * | RESET | RIDE | DMA | | BUFCTL | | IRE |
+ * |________|________|________|________|________|________|________|________|
+ *
+ */
+
+#define IE_XMTBSY 0x80 /* Transmitter busy (ro) */
+#define IE_RESET 0x80 /* reset the controller (wo) */
+#define IE_RIDE 0x40 /* request interrupt/DMA enable (rw) */
+#define IE_DMA 0x20 /* DMA request (rw) */
+#define IE_EDMA 0x10 /* DMA done (ro) */
+#define IE_BUFCTL 0x0c /* mask for buffer control field (rw) */
+#define IE_RCVBSY 0x01 /* receive in progress (ro) */
+#define IE_IRE 0x01 /* Interrupt request enable */
+
+/* BUFCTL values */
+
+#define IE_LOOP 0x0c /* 2 bit field in bits 2,3, loopback */
+#define IE_RCVEDLC 0x08 /* gives buffer to receiver */
+#define IE_XMTEDLC 0x04 /* gives buffer to transmit */
+#define IE_SYSBFR 0x00 /* gives buffer to processor */
+
+/* XMTCSR Transmit Status Register (read)
+ *
+ * _______________________________________________________________________
+ * | | | | | | | | |
+ * | | | | | IDLE | 16 | JAM | UNDER |
+ * |________|________|________|________|________|________|________|________|
+ *
+ */
+
+/* XMTCSR Transmit Command Register (write) enables interrupts when written
+ *
+ * _______________________________________________________________________
+ * | | | | | | | | |
+ * | | | | | | | | |
+ * |________|________|________|________|________|________|________|________|
+ *
+ */
+
+#define EDLC_IDLE 0x08 /* transmit idle */
+#define EDLC_16 0x04 /* packet experienced 16 collisions */
+#define EDLC_JAM 0x02 /* packet experienced a collision */
+#define EDLC_UNDER 0x01 /* data underflow */
+
+/* RCVCSR Receive Status Register (read)
+ *
+ * _______________________________________________________________________
+ * | | | | | | | | |
+ * | STALE | | GOOD | ANY | SHORT | DRIBBLE| FCS | OVER |
+ * |________|________|________|________|________|________|________|________|
+ *
+ */
+
+/* RCVCSR Receive Command Register (write) enables interrupt when written
+ *
+ * _______________________________________________________________________
+ * | | | | | | | | |
+ * | ADDR MATCH MODE | GOOD | ANY | SHORT | DRIBBLE| FCS | OVER |
+ * |________|________|________|________|________|________|________|________|
+ *
+ */
+
+#define EDLC_STALE 0x80 /* receive CSR status previously read */
+#define EDLC_GOOD 0x20 /* well formed packets only */
+#define EDLC_ANY 0x10 /* any packet, even those with errors */
+#define EDLC_SHORT 0x08 /* short frame */
+#define EDLC_DRIBBLE 0x04 /* dribble error */
+#define EDLC_FCS 0x02 /* CRC error */
+#define EDLC_OVER 0x01 /* data overflow */
+
+/* Address Match Mode */
+
+#define EDLC_NONE 0x00 /* match mode in bits 5-6, write only */
+#define EDLC_ALL 0x40 /* promiscuous receive, write only */
+#define EDLC_BROAD 0x80 /* station address plus broadcast */
+#define EDLC_MULTI 0xc0 /* station address plus multicast */
+
+/* Packet Buffer size */
+
+#define BFRSIZ 2048
+
+#define NAT3C501 1
+#define ETHER_ADD_SIZE 6 /* size of a MAC address */
+
+#ifndef TRUE
+#define TRUE 1
+#endif TRUE
+#define HZ 100
+
+#define DSF_LOCK 1
+#define DSF_RUNNING 2
+
+#define MOD_ENAL 1
+#define MOD_PROM 2
diff --git a/i386/i386at/if_3c503.h b/i386/i386at/if_3c503.h
new file mode 100644
index 00000000..865882cb
--- /dev/null
+++ b/i386/i386at/if_3c503.h
@@ -0,0 +1,116 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie-Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/* Vendor unique hardware addr. prefix. 3Com has 2 because they ran
+ out of available addresses on the first one... */
+
+#define OLD_3COM_ID 0x02608c
+#define NEW_3COM_ID 0x0020af
+
+/* Gate Array Description */
+
+#define PSTR 0x400
+#define PSPR 0x401
+#define DQTR 0x402
+#define BCFR 0x403
+#define PCFR 0x404
+#define GACFR 0x405
+#define CTLR 0x406
+#define STREG 0x407
+#define IDCFR 0x408
+#define DAMSB 0x409
+#define DALSB 0x40A
+#define VPTR2 0x40B
+#define VPTR1 0x40C
+#define VPTR0 0x40D
+#define RFMSB 0x40E
+#define RFLSB 0x40F
+
+ /* PSTR 400 */
+/* int */
+ /* PSPR 401 */
+/* int */
+ /* DQTR 402 */
+/* dma only */
+ /* BCFR 403 */
+#define B7_300 0x300
+#define B6_310 0x310
+#define B5_330 0x330
+#define B4_350 0x350
+#define B3_250 0x250
+#define B2_280 0x280
+#define B1_2A0 0x2a0
+#define B0_2E0 0x2e0
+ /* PCFR 404 */
+
+ /* GACFR 405 */
+#define GACFR_NIM 0x80
+#define GACFR_TCM 0x40
+#define GACFR_OWS 0x20
+#define GACFR_TEST 0x10
+#define GACFR_RSEL 0x08
+#define GACFR_MBS2 0x04
+#define GACFR_MBS1 0x02
+#define GACFR_MBS0 0x01
+ /*
+ * This definition is only for the std 8k window on an 8k board.
+ * It is incorrect for a 32K board. But they do not exists yet
+ * and I don't even know how to tell I am looking at one.
+ */
+#define GACFR_8K (GACFR_RSEL|0x1)
+ /* CTLR 406 */
+#define CTLR_START 0x80
+#define CTLR_DDIR 0x40
+#define CTLR_DBSEL 0x20
+#define CTLR_SHARE 0x10
+#define CTLR_EAHI 0x08
+#define CTLR_EALO 0x04
+#define CTLR_XSEL 0x02
+#define CTLR_RST 0x01
+#define CTLR_EA 0x0c
+#define CTLR_STA_ADDR 0x04
+#define CTLR_THIN 0x02
+#define CTLR_THICK 0x00
+ /* STREG 407 */
+/* DMA */
+ /* IDCFR 408 */
+#define IDCFR_IRQ5 0x80
+#define IDCFR_IRQ4 0x40
+#define IDCFR_IRQ3 0x20
+#define IDCFR_IRQ2 0x10
+#define IDCFR_DRQ3 0x04
+#define IDCFR_DRQ2 0x02
+#define IDCFR_DRQ1 0x01
+ /* DAMSB 409 */
+/* int & dma */
+ /* DALSB 40A */
+/* int & dma */
+ /* VPTR2 40B */
+ /* VPTR1 40C */
+ /* VPTR0 40D */
+ /* RFMSB 40E */
+/* what's a register file */
+ /* RFLSB 40F */
diff --git a/i386/i386at/if_de6c.c b/i386/i386at/if_de6c.c
new file mode 100644
index 00000000..0bad7803
--- /dev/null
+++ b/i386/i386at/if_de6c.c
@@ -0,0 +1,1777 @@
+#define DEBUG 1
+/*
+ * Mach Operating System
+ * Copyright (c) 1994,1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * HISTORY
+ * 17-Feb-94 David Golub (dbg) at Carnegie-Mellon University
+ * Fix from Bob Baron to fix transmitter problems.
+ *
+ * $Log: if_de6c.c,v $
+ * Revision 1.1.1.1 1996/10/30 01:39:26 thomas
+ * Imported from UK22
+ *
+ * Revision 1.1 1994/11/08 20:47:24 baford
+ * merged in CMU's MK83-MK83a diffs
+ *
+ * Revision 2.2 93/11/17 18:29:25 dbg
+ * Moved source into kernel/i386at/DLINK/if_de6c.c, since we
+ * can't release it but don't want to lose it.
+ * [93/11/17 dbg]
+ *
+ * Removed u_long.
+ * [93/03/25 dbg]
+ *
+ * Created.
+ * I have used if_3c501.c as a typical driver template and
+ * spliced in the appropriate particulars for the
+ * d-link 600.
+ * [92/08/13 rvb]
+ *
+ *
+ * File: if_de6c.c
+ * Author: Robert V. Baron
+ */
+
+/*
+ * File: if_3c501.c
+ * Author: Philippe Bernadat
+ * Date: 1989
+ * Copyright (c) 1989 OSF Research Institute
+ *
+ * 3COM Etherlink d-link "600" Mach Ethernet drvier
+ */
+/*
+ Copyright 1990 by Open Software Foundation,
+Cambridge, MA.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby granted,
+provided that the above copyright notice appears in all copies and
+that both the copyright notice and this permission notice appear in
+supporting documentation, and that the name of OSF or Open Software
+Foundation not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+ OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * I have tried to make it clear what is device specific code
+ * and what code supports the general BSD ethernet interface. d-link
+ * specific code is preceded by a line or two of
+ * "d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON"
+ * and followed by a line or two of
+ * "d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF"
+ *
+ * The main routines that do device specific processing are:
+ * de6cintr - interrupt dispatcher
+ * de6crcv - rcv packets and switch to new buffers
+ * de6cxmt - xmt packet and wait for xmtbusy to clear
+ * de6calive - probe for device
+ * de6cinit - device initialization
+ * de6cintoff - turn it off.
+ * There are a couple of interesting macros at the head of this
+ * file and some support subroutines at the end.
+ *
+ * Lastly, to get decent performance on i386SX class machines, it
+ * was necessary to recode the read and write d-link memory routines in
+ * assembler. The deread and dewrite routines that are used are in
+ * if_de6s.s
+ *
+ */
+
+/* Questions:
+
+ Make sure that iopl maps 378, 278 and 3bc.
+
+ If you set command w/o MODE and page bit, what happens?
+
+ Could I get xmt interrupts; currently I spin - this is not an issue?
+
+ enable promiscuous?
+
+ Can you assert TXEN and RXen simulatneously?
+*/
+
+#include <de6c.h>
+#include <par.h>
+
+#ifdef MACH_KERNEL
+#include <kern/time_out.h>
+#include <device/device_types.h>
+#include <device/errno.h>
+#include <device/io_req.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+#include <device/net_status.h>
+#include <device/net_io.h>
+#include <chips/busses.h>
+#else MACH_KERNEL
+#include <sys/param.h>
+#include <mach/machine/vm_param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/buf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/vmmac.h>
+#include <sys/ioctl.h>
+#include <sys/errno.h>
+#include <sys/syslog.h>
+
+#include <net/if.h>
+#include <net/netisr.h>
+#include <net/route.h>
+#include <i386at/atbus.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+#include <netinet/if_ether.h>
+#endif
+
+#ifdef NS
+#include <netns/ns.h>
+#include <netns/ns_if.h>
+#endif
+#endif MACH_KERNEL
+
+#include <vm/vm_kern.h>
+#include <i386/ipl.h>
+#include <i386/pio.h>
+#include <i386at/if_de6c.h>
+
+#define SPLNET spl6
+
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+#define de6cwrite(sp, addr, buf, len) \
+ de6cwriteasm(addr, buf, len, DATA(sp->port), sp->latency)
+
+#define de6cread(sp, addr, buf, len) \
+ de6creadasm(addr, buf, len, DATA(sp->port), sp->latency)
+
+#define DATA_OUT(sp, p, f, z) \
+ de6coutb(sp, DATA(p), ((z)<<4) | f);\
+ de6coutb(sp, DATA(p), ((z)&0xf0)| f | STROBE)
+
+#define STAT_IN(sp, p, in) \
+ de6coutb(sp, DATA(p), STATUS); \
+ in = inb(STAT(port)); \
+ de6coutb(sp, DATA(p), NUL_CMD | STROBE)
+
+#define XMTidx 3
+#define XMT_BSY_WAIT 10000
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+
+int de6cprobe();
+void de6cattach();
+int de6cintr();
+int de6cinit();
+int de6coutput();
+int de6cioctl();
+int de6creset();
+void de6cwatch();
+
+static vm_offset_t de6c_std[NDE6C] = { 0 };
+static struct bus_device *de6c_info[NDE6C];
+
+#ifdef MACH_KERNEL
+struct bus_driver de6cdriver =
+ {de6cprobe, 0, de6cattach, 0, de6c_std, "de", de6c_info, };
+extern struct bus_device *lprinfo[];
+
+#define MM io_req_t
+#define PIC sysdep1
+#define DEV bus_device
+#else MACH_KERNEL
+int (*de6cintrs[])() = { de6cintr, 0};
+struct isa_driver de6cdriver =
+ {de6cprobe, 0, de6cattach, "de", 0, 0, 0};
+extern struct isa_dev *lprinfo[];
+
+#define MM struct mbuf *
+#define PIC dev_pic
+#define DEV isa_dev
+#endif MACH_KERNEL
+
+int watchdog_id;
+
+typedef struct {
+#ifdef MACH_KERNEL
+ struct ifnet ds_if; /* generic interface header */
+ u_char ds_addr[6]; /* Ethernet hardware address */
+#else MACH_KERNEL
+ struct arpcom de6c_ac;
+#define ds_if de6c_ac.ac_if
+#define ds_addr de6c_ac.ac_enaddr
+#endif MACH_KERNEL
+ int flags;
+ int timer;
+ u_char address[6];
+ short mode;
+ int port;
+ int latency;
+ int xmt;
+ int rcv;
+ int rcvoff;
+ int rcvspin;
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+ int produce;
+ int consume;
+ int rcvlen[XMTidx];
+ int alive;
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+ int (*oldvect)();
+ int oldunit;
+} de6c_softc_t;
+
+de6c_softc_t de6c_softc[NDE6C];
+
+int de6cactive[NDE6C];
+
+/*
+ * Patch to change latency value
+ */
+int de6c_latency = 30; /* works on NEC Versa (pottsylvania.mach) */
+
+#ifdef DEBUG
+int de6crcv0, de6crcv1, de6crcv2, de6crcv3;
+int de6cdo_rcvintr = 0, de6cdo_watch = 0;
+int de6cdo_xmt = 0;
+#define D(X) X
+#else /* DEBUG */
+#define D(X)
+#endif /* DEBUG */
+
+/*
+ * de6cprobe:
+ * We are not directly probed. The lprattach will call de6cattach.
+ * But what we have is plausible for a probe.
+ */
+de6cprobe(port, dev)
+struct DEV *dev;
+{
+#ifdef MACH_KERNEL
+ int unit = dev->unit;
+#else MACH_KERNEL
+ int unit = dev->dev_unit;
+#endif MACH_KERNEL
+
+ if ((unit < 0) || (unit >= NDE6C)) {
+ return(0);
+ }
+ return(1);
+}
+
+/*
+ * de6cattach:
+ *
+ * Called from lprattach
+ *
+ */
+void de6cattach(dev)
+#ifdef MACH_KERNEL
+struct bus_device *dev;
+#else MACH_KERNEL
+struct isa_dev *dev;
+#endif MACH_KERNEL
+{
+ de6c_softc_t *sp;
+ struct ifnet *ifp;
+#ifdef MACH_KERNEL
+ int unit = dev->unit;
+ int port = (int)dev->address;
+#else MACH_KERNEL
+ int unit = dev->dev_unit;
+ int port = (int)dev->dev_addr;
+#endif MACH_KERNEL
+
+ sp = &de6c_softc[unit];
+ sp->port = port;
+ sp->timer = -1;
+ sp->flags = 0;
+ sp->mode = 0;
+
+ ifp = &(sp->ds_if);
+ ifp->if_unit = unit;
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_flags = IFF_BROADCAST;
+
+#ifdef MACH_KERNEL
+ ifp->if_header_size = sizeof(struct ether_header);
+ ifp->if_header_format = HDR_ETHERNET;
+ ifp->if_address_size = 6;
+ ifp->if_address = (char *)&sp->address[0];
+ if_init_queues(ifp);
+#else MACH_KERNEL
+ ifp->if_name = "de";
+ ifp->if_init = de6cinit;
+ ifp->if_output = de6coutput;
+ ifp->if_ioctl = de6cioctl;
+ ifp->if_reset = de6creset;
+ ifp->if_next = NULL;
+ if_attach(ifp);
+#endif MACH_KERNEL
+
+ sp->alive = de6calive(sp);
+}
+
+de6calive(sp)
+de6c_softc_t *sp;
+{
+ int port = sp->port;
+ int unit = sp->ds_if.if_unit;
+ struct DEV *dev = lprinfo[unit];
+ int i;
+
+#ifdef MACH_KERNEL
+#else /* MACH_KERNEL */
+ extern int tcp_recvspace; /* empircal messure */
+#endif /* MACH_KERNEL */
+
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+ de6coutb(sp, CMD(port), SLT_NIC);
+ DATA_OUT(sp, port, COMMAND, RESET);
+ DATA_OUT(sp, port, COMMAND, STOP_RESET);
+ sp->latency = 101;
+ if (!de6cgetid(sp, sp->ds_addr)) {
+ de6coutb(sp, CMD(port), SLT_PRN);
+ return 0;
+ }
+
+#ifdef MACH_KERNEL
+#else /* MACH_KERNEL */
+ tcp_recvspace = 0x300; /* empircal messure */
+#endif /* MACH_KERNEL */
+
+#ifdef de6cwrite
+ sp->latency = de6c_latency;
+#else /* de6cwrite */
+ sp->latency = 0;
+#endif /* de6cwrite */
+
+ for (i = 0; i++ < 10;) {
+ if (de6cmemcheck(sp))
+ break;
+ sp->latency += 10;
+ }
+
+ de6cgetid(sp, sp->ds_addr);
+ de6cgetid(sp, sp->address);
+ de6csetid(sp, sp->address);
+ de6coutb(sp, CMD(port), SLT_PRN);
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+
+#ifdef MACH_KERNEL
+#if NPAR > 0
+ printf("\n");
+#endif /* NPAR > 0 */
+ printf(" de%d: at lpr%d, port = %x, spl = %d, pic = %d. ",
+ unit, unit, dev->address, dev->sysdep, dev->sysdep1);
+
+ printf("ethernet id [%x:%x:%x:%x:%x:%x]",
+ sp->address[0],sp->address[1],sp->address[2],
+ sp->address[3],sp->address[4],sp->address[5]);
+
+ if (sp->latency > 1) {
+ printf("\n");
+ printf(" LATENCY = %d", sp->latency);
+ printf(" LATENCY = %d", sp->latency);
+ printf(" LATENCY = %d", sp->latency);
+ printf(" LATENCY = %d", sp->latency);
+ }
+#else MACH_KERNEL
+ printf("de%d: port = %x, spl = %d, pic = %d. ",
+ unit, dev->dev_addr, dev->dev_spl, dev->dev_pic);
+
+ printf("ethernet id [%x:%x:%x:%x:%x:%x]\n",
+ sp->address[0],sp->address[1],sp->address[2],
+ sp->address[3],sp->address[4],sp->address[5]);
+
+ if (sp->latency > 1) {
+ printf("de%d:", unit);
+ printf(" LATENCY = %d", sp->latency);
+ printf(" LATENCY = %d", sp->latency);
+ printf(" LATENCY = %d", sp->latency);
+ printf(" LATENCY = %d", sp->latency);
+ printf("\n");
+ }
+#endif MACH_KERNEL
+
+ return 1;
+}
+
+/*
+ * de6cwatch():
+ *
+ */
+void de6cwatch(b_ptr)
+short *b_ptr;
+{
+#ifdef DEBUG_MORE
+ int unit = *b_ptr;
+ de6c_softc_t *sp = &de6c_softc[unit];
+
+ if(!de6cdo_watch) return;
+ de6cintr(unit);
+ if (sp->ds_if.if_flags & IFF_RUNNING)
+ timeout(de6cwatch, b_ptr, de6cdo_watch);
+#endif /* DEBUG_MORE */
+}
+
+#ifdef MACH_KERNEL
+void de6cstart(int); /* forward */
+
+de6coutput(dev, ior)
+ dev_t dev;
+ io_req_t ior;
+{
+ register int unit = minor(dev);
+
+ if (unit < 0 || unit >= NDE6C ||
+ de6c_softc[unit].port == 0)
+ return (ENXIO);
+
+ return (net_write(&de6c_softc[unit].ds_if, de6cstart, ior));
+}
+
+io_return_t
+de6csetinput(
+ dev_t dev,
+ mach_port_t receive_port,
+ int priority,
+ filter_t filter[],
+ natural_t filter_count)
+{
+ register int unit = minor(dev);
+
+ if (unit < 0 || unit >= NDE6C ||
+ de6c_softc[unit].port == 0)
+ return ENXIO;
+
+ return net_set_filter(&de6c_softc[unit].ds_if,
+ receive_port, priority,
+ filter, filter_count);
+}
+
+#else MACH_KERNEL
+/*
+ * de6coutput:
+ *
+ * This routine is called by the "if" layer to output a packet to
+ * the network. This code resolves the local ethernet address, and
+ * puts it into the mbuf if there is room. If not, then a new mbuf
+ * is allocated with the header information and precedes the data
+ * to be transmitted.
+ *
+ * input: ifnet structure pointer, an mbuf with data, and address
+ * to be resolved
+ * output: mbuf is updated to hold enet address, or a new mbuf
+ * with the address is added
+ *
+ */
+de6coutput(ifp, m0, dst)
+struct ifnet *ifp;
+struct mbuf *m0;
+struct sockaddr *dst;
+{
+ register de6c_softc_t *sp = &de6c_softc[ifp->if_unit];
+ int type, opri, error;
+ u_char edst[6];
+ struct in_addr idst;
+ register struct mbuf *m = m0;
+ register struct ether_header *eh;
+ register int off;
+ int usetrailers;
+
+ if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) {
+ de6cintoff(ifp->if_unit);
+ error = ENETDOWN;
+ goto bad;
+ }
+ switch (dst->sa_family) {
+
+#ifdef INET
+ case AF_INET:
+ idst = ((struct sockaddr_in *)dst)->sin_addr;
+ if (!arpresolve(&sp->de6c_ac, m, &idst, edst, &usetrailers)){
+ return (0); /* if not yet resolved */
+ }
+ off = ntohs((u_short)mtod(m, struct ip *)->ip_len) - m->m_len;
+
+ if (usetrailers && off > 0 && (off & 0x1ff) == 0 &&
+ m->m_off >= MMINOFF + 2 * sizeof (u_short)) {
+ type = ETHERTYPE_TRAIL + (off>>9);
+ m->m_off -= 2 * sizeof (u_short);
+ m->m_len += 2 * sizeof (u_short);
+ *mtod(m, u_short *) = htons((u_short)ETHERTYPE_IP);
+ *(mtod(m, u_short *) + 1) = htons((u_short)m->m_len);
+ goto gottrailertype;
+ }
+ type = ETHERTYPE_IP;
+ off = 0;
+ goto gottype;
+#endif
+#ifdef NS
+ case AF_NS:
+ type = ETHERTYPE_NS;
+ bcopy((caddr_t)&(((struct sockaddr_ns *)dst)->sns_addr.x_host),
+ (caddr_t)edst, sizeof (edst));
+ off = 0;
+ goto gottype;
+#endif
+
+ case AF_UNSPEC:
+ eh = (struct ether_header *)dst->sa_data;
+ bcopy((caddr_t)eh->ether_dhost, (caddr_t)edst, sizeof (edst));
+ type = eh->ether_type;
+ goto gottype;
+
+ default:
+ printf("de6c%d: can't handle af%d\n", ifp->if_unit,
+ dst->sa_family);
+ error = EAFNOSUPPORT;
+ goto bad;
+ }
+
+gottrailertype:
+ /*
+ * Packet to be sent as trailer: move first packet
+ * (control information) to end of chain.
+ */
+ while (m->m_next)
+ m = m->m_next;
+ m->m_next = m0;
+ m = m0->m_next;
+ m0->m_next = 0;
+ m0 = m;
+
+gottype:
+ /*
+ * Add local net header. If no space in first mbuf,
+ * allocate another.
+ */
+ if (m->m_off > MMAXOFF ||
+ MMINOFF + sizeof (struct ether_header) > m->m_off) {
+ m = m_get(M_DONTWAIT, MT_HEADER);
+ if (m == 0) {
+ error = ENOBUFS;
+ goto bad;
+ }
+ m->m_next = m0;
+ m->m_off = MMINOFF;
+ m->m_len = sizeof (struct ether_header);
+ } else {
+ m->m_off -= sizeof (struct ether_header);
+ m->m_len += sizeof (struct ether_header);
+ }
+ eh = mtod(m, struct ether_header *);
+ eh->ether_type = htons((u_short)type);
+ bcopy((caddr_t)edst, (caddr_t)eh->ether_dhost, sizeof (edst));
+ bcopy((caddr_t)sp->address,(caddr_t)eh->ether_shost, sizeof(edst));
+ /*
+ * Queue message on interface, and start output if interface
+ * not yet active.
+ */
+ opri = SPLNET();
+ if (IF_QFULL(&ifp->if_snd)) {
+ IF_DROP(&ifp->if_snd);
+ splx(opri);
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ IF_ENQUEUE(&ifp->if_snd, m);
+ de6cstart(ifp->if_unit);
+ splx(opri);
+ return (0);
+
+bad:
+ m_freem(m0);
+ return (error);
+}
+#endif MACH_KERNEL
+
+/*
+ * de6creset:
+ *
+ * This routine is in part an entry point for the "if" code. Since most
+ * of the actual initialization has already (we hope already) been done
+ * by calling de6cattach().
+ *
+ * input : unit number or board number to reset
+ * output : board is reset
+ *
+ */
+de6creset(unit)
+int unit;
+{
+ de6c_softc[unit].ds_if.if_flags &= ~IFF_RUNNING;
+ return(de6cinit(unit));
+}
+
+
+
+/*
+ * de6cinit:
+ *
+ * Another routine that interfaces the "if" layer to this driver.
+ * Simply resets the structures that are used by "upper layers".
+ *
+ * input : board number
+ * output : structures (if structs) and board are reset
+ *
+ */
+de6cinit(unit)
+int unit;
+{
+ de6c_softc_t *sp = &de6c_softc[unit];
+ struct ifnet *ifp = &(sp->ds_if);
+ int port = sp->port;
+ int pic = lprinfo[unit]->PIC;
+ spl_t oldpri;
+
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+ if (ifp->if_addrlist == (struct ifaddr *)0) {
+ return;
+ }
+#endif MACH_KERNEL
+ oldpri = SPLNET();
+
+ if (ivect[pic] != de6cintr) {
+ sp->oldvect = ivect[pic];
+ ivect[pic] = de6cintr;
+ sp->oldunit = iunit[pic];
+ iunit[pic] = unit;
+ }
+
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+ sp->consume = 0;
+ sp->produce = 0;
+ de6coutb(sp, CMD(port), SLT_NIC);
+ DATA_OUT(sp, port, COMMAND, RESET);
+ DATA_OUT(sp, port, COMMAND, STOP_RESET);
+ de6coutb(sp, CMD(port), IRQEN);
+ DATA_OUT(sp, port, COMMAND, RX_BP|(sp->produce<<4));
+ DATA_OUT(sp, port, COMMAND, RX_BP|(sp->produce<<4)|RXEN);
+ de6coutb(sp, CMD(port), SLT_PRN);
+#if 0
+ if (sp->mode & IFF_PROMISC) {
+ /* handle promiscuous case */;
+ }
+#endif 0
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+ sp->ds_if.if_flags |= IFF_RUNNING;
+ sp->flags |= DSF_RUNNING;
+ sp->timer = 5;
+ timeout(de6cwatch, &(ifp->if_unit), 3);
+ de6cstart(unit);
+ splx(oldpri);
+}
+
+#ifdef MACH_KERNEL
+/*ARGSUSED*/
+de6copen(dev, flag)
+ dev_t dev;
+ int flag;
+{
+ register int unit = minor(dev);
+
+ if (unit < 0 || unit >= NDE6C ||
+ de6c_softc[unit].port == 0)
+ return (ENXIO);
+
+ de6c_softc[unit].ds_if.if_flags |= IFF_UP;
+ de6cinit(unit);
+ return(0);
+}
+#endif MACH_KERNEL
+
+/*
+ * de6cstart:
+ *
+ * This is yet another interface routine that simply tries to output a
+ * in an mbuf after a reset.
+ *
+ * input : board number
+ * output : stuff sent to board if any there
+ *
+ */
+
+/* NOTE: called at SPLNET */
+void de6cstart(
+ int unit)
+{
+ struct ifnet *ifp = &(de6c_softc[unit].ds_if);
+ MM m;
+
+ for(;;) {
+ IF_DEQUEUE(&ifp->if_snd, m);
+ if (m != (MM) 0)
+ de6cxmt(unit, m);
+ else
+ return;
+ }
+}
+
+#ifdef MACH_KERNEL
+/*ARGSUSED*/
+io_return_t
+de6cgetstat(
+ dev_t dev,
+ int flavor,
+ dev_status_t status, /* pointer to OUT array */
+ natural_t *count) /* out */
+{
+ register int unit = minor(dev);
+ register de6c_softc_t *sp;
+
+ if (unit < 0 || unit >= NDE6C ||
+ de6c_softc[unit].port == 0)
+ return (ENXIO);
+
+ sp = &de6c_softc[unit];
+ if (! sp->alive)
+ if (! (sp->alive = de6calive(sp)))
+ return ENXIO;
+
+ return (net_getstat(&de6c_softc[unit].ds_if,
+ flavor,
+ status,
+ count));
+}
+
+io_return_t
+de6csetstat(
+ dev_t dev,
+ int flavor,
+ dev_status_t status,
+ natural_t count)
+{
+ register int unit = minor(dev);
+ register de6c_softc_t *sp;
+
+ if (unit < 0 || unit >= NDE6C ||
+ de6c_softc[unit].port == 0)
+ return (ENXIO);
+
+ sp = &de6c_softc[unit];
+ if (! sp->alive)
+ if (! (sp->alive = de6calive(sp)))
+ return ENXIO;
+
+
+ switch (flavor) {
+ case NET_STATUS:
+ {
+ /*
+ * All we can change are flags, and not many of those.
+ */
+ register struct net_status *ns = (struct net_status *)status;
+ int mode = 0;
+
+ if (count < NET_STATUS_COUNT)
+ return (D_INVALID_SIZE);
+
+ if (ns->flags & IFF_ALLMULTI)
+ mode |= MOD_ENAL;
+ if (ns->flags & IFF_PROMISC)
+ mode |= MOD_PROM;
+
+ /*
+ * Force a compilete reset if the receive mode changes
+ * so that these take effect immediately.
+ */
+ if (sp->mode != mode) {
+ sp->mode = mode;
+ if (sp->flags & DSF_RUNNING) {
+ sp->flags &= ~(DSF_LOCK | DSF_RUNNING);
+ de6cinit(unit);
+ }
+ }
+ break;
+ }
+ case NET_ADDRESS:
+ {
+ register union ether_cvt {
+ char addr[6];
+ int lwd[2];
+ } *ec = (union ether_cvt *)status;
+
+ if (count < sizeof(*ec)/sizeof(int))
+ return (D_INVALID_SIZE);
+
+ ec->lwd[0] = ntohl(ec->lwd[0]);
+ ec->lwd[1] = ntohl(ec->lwd[1]);
+ de6csetid(sp->port, ec->addr);
+ break;
+ }
+
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+#else MACH_KERNEL
+
+/*
+ * de6cioctl:
+ *
+ * This routine processes an ioctl request from the "if" layer
+ * above.
+ *
+ * input : pointer the appropriate "if" struct, command, and data
+ * output : based on command appropriate action is taken on the
+ * de6c board(s) or related structures
+ * return : error is returned containing exit conditions
+ *
+ */
+de6cioctl(ifp, cmd, data)
+struct ifnet *ifp;
+int cmd;
+caddr_t data;
+{
+ register struct ifaddr *ifa = (struct ifaddr *)data;
+ register de6c_softc_t *sp = &de6c_softc[ifp->if_unit];
+ int opri, error;
+ short mode = 0;
+
+ if (! sp->alive)
+ if (! (sp->alive = de6calive(sp)))
+ return ENXIO;
+
+ opri = SPLNET();
+ error = 0;
+ switch (cmd) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ de6cinit(ifp->if_unit);
+ switch (ifa->ifa_addr.sa_family) {
+#ifdef INET
+ case AF_INET:
+ ((struct arpcom *)ifp)->ac_ipaddr =
+ IA_SIN(ifa)->sin_addr;
+ arpwhohas((struct arpcom *)ifp,
+ &IA_SIN(ifa)->sin_addr);
+ break;
+#endif
+#ifdef NS
+ case AF_NS:
+ {
+ register struct ns_addr *ina =
+ &(IA_SNS(ifa)->sns_addr);
+ if (ns_nullhost(*ina))
+ ina->x_host =
+ *(union ns_host *)(ds->ds_addr);
+ else
+ de6cseteh(ina->x_host.c_host,
+ de6c_softc[ifp->if_unit].port);
+ break;
+ }
+#endif
+ }
+ break;
+ case SIOCSIFFLAGS:
+ if (ifp->if_flags & IFF_ALLMULTI)
+ mode |= MOD_ENAL;
+ if (ifp->if_flags & IFF_PROMISC)
+ mode |= MOD_PROM;
+ /*
+ * force a complete reset if the receive multicast/
+ * promiscuous mode changes so that these take
+ * effect immediately.
+ *
+ */
+ if (sp->mode != mode) {
+ sp->mode = mode;
+ if (sp->flags & DSF_RUNNING) {
+ sp->flags &=
+ ~(DSF_LOCK|DSF_RUNNING);
+ de6cinit(ifp->if_unit);
+ }
+ }
+ if ((ifp->if_flags & IFF_UP) == 0 &&
+ sp->flags & DSF_RUNNING) {
+ sp->timer = -1;
+ de6cintoff(ifp->if_unit);
+ } else
+ if (ifp->if_flags & IFF_UP &&
+ (sp->flags & DSF_RUNNING) == 0)
+ de6cinit(ifp->if_unit);
+ break;
+ default:
+ error = EINVAL;
+ }
+ splx(opri);
+ return (error);
+}
+#endif MACH_KERNEL
+
+/*
+ * de6cintr:
+ *
+ * This function is the interrupt handler for the de6c ethernet
+ * board. This routine will be called whenever either a packet
+ * is received, or a packet has successfully been transfered and
+ * the unit is ready to transmit another packet.
+ *
+ * input : board number that interrupted
+ * output : either a packet is received, or a packet is transfered
+ *
+ */
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+#ifdef DEBUG_MORE
+de6crcvintr(unit)
+int unit;
+{
+ if(!de6cdo_rcvintr)
+ return;
+ de6cintr(unit);
+}
+#endif /* DEBUG_MORE */
+
+de6cintr(unit)
+int unit;
+{
+ register de6c_softc_t *sp = &de6c_softc[unit];
+ int port = sp->port;
+ int in;
+
+ if (de6cactive[unit] || !(sp->flags & DSF_RUNNING))
+ return;
+ de6cactive[unit]++;
+ de6coutb(sp, CMD(port), SLT_NIC);
+ STAT_IN(sp, port, in);
+
+ if ((in & (GOOD|TXBUSY)) == (GOOD|TXBUSY)) {
+ /* on L40's means that we are disconnected */
+ printf("de6intr%d: Card was disconnected; turning off network.\n", unit);
+ de6cintoff(unit);
+ de6cactive[unit]--;
+ return;
+ }
+
+ if (in & GOOD)
+ de6crcv(unit, in);
+ else
+/*rvb:tmp printf("intr: %x\n", in)*/;
+
+
+ de6coutb(sp, CMD(port), SLT_PRN);
+ de6cactive[unit]--;
+}
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+
+/*
+ * de6crcv:
+ *
+ * This routine is called by the interrupt handler to initiate a
+ * packet transfer from the board to the "if" layer above this
+ * driver. This routine checks if a buffer has been successfully
+ * received by the de6c. If so, the routine de6cread is called
+ * to do the actual transfer of the board data (including the
+ * ethernet header) into a packet (consisting of an mbuf chain).
+ *
+ * input : number of the board to check
+ * output : if a packet is available, it is "sent up"
+ *
+ */
+de6crcv(unit, in)
+int unit, in;
+{
+ register de6c_softc_t *sp = &de6c_softc[unit];
+ register struct ifnet *ifp = &sp->ds_if;
+ int port = sp->port;
+ int bo;
+ int collision = 0;
+ int spins = 0;
+ u_short len;
+ struct ether_header header;
+ int tlen;
+ register struct ifqueue *inq;
+ int opri;
+ struct ether_header eh;
+#ifdef MACH_KERNEL
+ ipc_kmsg_t new_kmsg;
+ struct ether_header *ehp;
+ struct packet_header *pkt;
+#else MACH_KERNEL
+ struct mbuf *m, *tm;
+#endif MACH_KERNEL
+
+ sp->rcv++;
+
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+ D(de6crcv0++);
+#define MT (sp->consume == sp->produce)
+ while (in & GOOD || !MT) {
+ spins++;
+ D(de6crcv1++);
+ if (in & GOOD) {
+ sp->rcvlen[sp->produce] = de6clen(sp);
+ if ( ((sp->produce + 1) % XMTidx) != sp->consume) {
+ if (++sp->produce == XMTidx)
+ sp->produce = 0;
+ DATA_OUT(sp, port, COMMAND, RX_BP|(sp->produce<<4));
+ DATA_OUT(sp, port, COMMAND, RX_BP|(sp->produce<<4)|RXEN);
+ } else collision = 1;
+ }
+ len = sp->rcvlen[sp->consume];
+ bo = sp->consume*BFRSIZ;
+ if (len < 60) {
+ printf("de%d: len(%d) < 60\n", unit, len);
+ goto out;
+ return;
+ }
+ de6cread(sp, bo, &eh, sizeof(struct ether_header));
+ bo += sizeof(struct ether_header);
+ len -= 18;
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+
+#ifdef MACH_KERNEL
+ new_kmsg = net_kmsg_get();
+ if (new_kmsg == IKM_NULL) {
+ /*
+ * Drop the packet.
+ */
+ sp->ds_if.if_rcvdrops++;
+ goto out;
+ return;
+ }
+
+ ehp = (struct ether_header *)
+ (&net_kmsg(new_kmsg)->header[0]);
+ pkt = (struct packet_header *)
+ (&net_kmsg(new_kmsg)->packet[0]);
+
+ /*
+ * Get header.
+ */
+ *ehp = eh;
+
+ /*
+ * Get body
+ */
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+ de6cread(sp, bo, (char *)(pkt + 1), len);
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+ pkt->type = ehp->ether_type;
+ pkt->length = len + sizeof(struct packet_header);
+
+ /*
+ * Hand the packet to the network module.
+ */
+ net_packet(ifp, new_kmsg, pkt->length,
+ ethernet_priority(new_kmsg));
+
+#else MACH_KERNEL
+ eh.ether_type = htons(eh.ether_type);
+ m =(struct mbuf *)0;
+ while ( len ) {
+ if (m == (struct mbuf *)0) {
+ m = m_get(M_DONTWAIT, MT_DATA);
+ if (m == (struct mbuf *)0) {
+ printf("de6crcv: Lost frame\n");
+ goto out;
+ return;
+ }
+ tm = m;
+ tm->m_off = MMINOFF;
+ /*
+ * first mbuf in the packet must contain a pointer to the
+ * ifnet structure. other mbufs that follow and make up
+ * the packet do not need this pointer in the mbuf.
+ *
+ */
+ *(mtod(tm, struct ifnet **)) = ifp;
+ tm->m_len = sizeof(struct ifnet **);
+ }
+ else {
+ tm->m_next = m_get(M_DONTWAIT, MT_DATA);
+ tm = tm->m_next;
+ tm->m_off = MMINOFF;
+ tm->m_len = 0;
+ if (tm == (struct mbuf *)0) {
+ m_freem(m);
+ printf("de6crcv: No mbufs, lost frame\n");
+ goto out;
+ return;
+ }
+ }
+ tlen = MIN( MLEN - tm->m_len, len );
+ tm->m_next = (struct mbuf *)0;
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+ de6cread(sp, bo, mtod(tm, char *)+tm->m_len, tlen);
+ bo += tlen;
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+ tm->m_len += tlen;
+ len -= tlen;
+ }
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+ STAT_IN(sp, port, in);
+ if (in & GOOD) { /* got another */
+ D(de6crcv2++);
+ sp->rcvlen[sp->produce] = de6clen(sp);
+ if ( ((sp->produce + 1) % XMTidx) != sp->consume) {
+ if (++sp->produce == XMTidx)
+ sp->produce = 0;
+ DATA_OUT(sp, port, COMMAND, RX_BP|(sp->produce<<4));
+ DATA_OUT(sp, port, COMMAND, RX_BP|(sp->produce<<4)|RXEN);
+ } else collision = 1;
+ }
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+
+ /*
+ * received packet is now in a chain of mbuf's. next step is
+ * to pass the packet upwards.
+ *
+ */
+ switch (eh.ether_type) {
+
+#ifdef INET
+ case ETHERTYPE_IP:
+ schednetisr(NETISR_IP);
+ inq = &ipintrq;
+ break;
+ case ETHERTYPE_ARP:
+ arpinput(&sp->de6c_ac, m);
+ goto out;
+ return;
+#endif
+#ifdef NS
+ case ETHERTYPE_NS:
+ schednetisr(NETISR_NS);
+ inq = &nsintrq;
+ break;
+#endif
+ default:
+ m_freem(m);
+ goto out;
+ return;
+ }
+ opri = SPLNET();
+ if (IF_QFULL(inq)) {
+ IF_DROP(inq);
+ splx(opri);
+ m_freem(m);
+ goto out;
+ return;
+ }
+ IF_ENQUEUE(inq, m);
+ splx(opri);
+#endif MACH_KERNEL
+out:
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+ STAT_IN(sp, port, in);
+ if (in & GOOD) { /* got another */
+ D(de6crcv3++);
+ }
+/*2*/ /* implies wrap and pause */
+ if (collision) {
+ collision = 0;
+ D(printf("*C* "));
+ sp->rcvoff++;
+ if (++sp->produce == XMTidx)
+ sp->produce = 0;
+ DATA_OUT(sp, port, COMMAND, RX_BP|(sp->produce<<4));
+ DATA_OUT(sp, port, COMMAND, RX_BP|(sp->produce<<4)|RXEN);
+ }
+/*2*/ /* implies wrap and pause */
+ if (++sp->consume == XMTidx)
+ sp->consume = 0;
+ if (spins > 10) {
+ spins = 0;
+ D(printf("*R* "));
+ sp->rcvspin++;
+ /* how should we recover here ??? */;
+ /* return does not work */;
+ /* de6cinit(unit) gets ugly if we are called from
+ de6cxmt */;
+ }
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+ }
+}
+
+
+/*
+ * de6cxmt:
+ *
+ * This routine fills in the appropriate registers and memory
+ * locations on the d-link "600" board and starts the board off on
+ * the transmit.
+ *
+ * input : board number of interest, and a pointer to the mbuf
+ * output : board memory and registers are set for xfer and attention
+ *
+ */
+/* NOTE: called at SPLNET */
+/*
+ * This implies that rcv interrupts will be blocked.
+ */
+#define max(a,b) (((a) > (b)) ? (a) : (b))
+
+char de6mt[ETHERMIN];
+de6cxmt(unit, m)
+int unit;
+MM m;
+{
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+ register struct mbuf *tm_p;
+#endif MACH_KERNEL
+ int i;
+ int in;
+ int bo, boo;
+ de6c_softc_t *sp = &de6c_softc[unit];
+ int port = sp->port;
+ u_short count = 0;
+ u_short bytes_in_msg;
+ static int m_length();
+
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+ if (de6cactive[unit] >= 2) /* a funny loop caused by */
+ return; /* a flood of arps */
+ if (de6cactive[unit]++ == 0)
+ de6coutb(sp, CMD(port), SLT_NIC);
+ STAT_IN(sp, port, in);
+
+ D(if (de6cdo_xmt) printf("xmt: stat[-] = %x\n", in));
+ if (in & GOOD) {
+ de6crcv(unit, in);
+ }
+ sp->xmt++;
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+
+#ifdef MACH_KERNEL
+ count = m->io_count;
+ bytes_in_msg = max(count, ETHERMIN + sizeof(struct ether_header));
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+ boo = bo = XMTidx*BFRSIZ+(BFRSIZ-bytes_in_msg);
+ de6cwrite(sp, bo, m->io_data, count);
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+ bo += count;
+ iodone(m);
+#else MACH_KERNEL
+ bytes_in_msg = max(m_length(m), ETHERMIN + sizeof(struct ether_header));
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+ boo = bo = XMTidx*BFRSIZ+(BFRSIZ-bytes_in_msg);
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+
+ for (tm_p = m; tm_p != (struct mbuf *)0; tm_p = tm_p->m_next) {
+ if (count + tm_p->m_len > ETHERMTU + sizeof(struct ether_header))
+ break;
+ if (tm_p->m_len == 0)
+ continue;
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+ de6cwrite(sp, bo, mtod(tm_p, caddr_t), tm_p->m_len);
+ bo += tm_p->m_len;
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+ count += tm_p->m_len;
+ }
+ m_freem(m);
+#endif MACH_KERNEL
+
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+ if (bytes_in_msg - count > 0)
+ de6cwrite(sp, bo, de6mt, bytes_in_msg - count);
+
+ DATA_OUT(sp, port, TX_ADR, boo & 0xff);
+ DATA_OUT(sp, port, TX_ADR, (boo >> 8) & 0xff);
+ DATA_OUT(sp, port, COMMAND, RX_BP|(sp->produce<<4));
+ DATA_OUT(sp, port, COMMAND, RX_BP|(sp->produce<<4)|TXEN);
+
+ for (i = 0; i < XMT_BSY_WAIT; i++) {
+ STAT_IN(sp, port, in);
+ D(if (de6cdo_xmt) printf("xmt: stat[%d] = %x\n", i, in));
+ if (in & GOOD) {
+ /*
+ * this does indeed happen
+ * printf("!#");
+ */
+ de6crcv(unit, in);
+ }
+ if (!(in & TXBUSY)) {
+ goto out;
+ return;
+ }
+ }
+ printf("dexmt: stat[??] = %x\n", i, in);
+out:
+ DATA_OUT(sp, port, COMMAND, RX_BP|(sp->produce<<4));
+ DATA_OUT(sp, port, COMMAND, RX_BP|(sp->produce<<4)|RXEN);
+
+ if (--de6cactive[unit] == 0)
+ de6coutb(sp, CMD(port), SLT_PRN);
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+}
+
+/*
+ * de6cintoff:
+ *
+ * This function turns interrupts off for the de6c board indicated.
+ *
+ */
+de6cintoff(unit)
+int unit;
+{
+ de6c_softc_t *sp = &de6c_softc[unit];
+ int port = sp->port;
+ int pic = lprinfo[unit]->PIC;
+
+ printf("de%d: Turning off d-link \"600\"\n", unit);
+ sp->ds_if.if_flags &= ~(IFF_UP|IFF_RUNNING);
+ sp->flags &= ~(DSF_LOCK | DSF_RUNNING);
+
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+ de6coutb(sp, CMD(port), SLT_NIC);
+ DATA_OUT(sp, port, COMMAND, RESET);
+ DATA_OUT(sp, port, COMMAND, STOP_RESET);
+ de6coutb(sp, CMD(port), SLT_PRN);
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+
+ outb(CMD(sp->port), 0x07);
+ ivect[pic] = sp->oldvect;
+ iunit[pic] = sp->oldunit;
+}
+
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+/*
+ * The length of an mbuf chain
+ */
+static
+m_length(m)
+ register struct mbuf *m;
+{
+ register int len = 0;
+
+ while (m) {
+ len += m->m_len;
+ m = m->m_next;
+ }
+ return len;
+}
+#endif MACH_KERNEL
+
+
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+/* d-link 600 ON; d-link 600 ON; d-link 600 ON; d-link 600 ON */
+de6cgetid(sp, buf)
+de6c_softc_t *sp;
+u_char *buf;
+{
+ de6cread(sp, EADDR, buf, 6);
+ if ((buf[0] != 0x00) || (buf[1] != 0xde) || (buf[2] != 0x15))
+ return 0;
+ buf[0] = 0x80;
+ buf[1] = 0x00;
+ buf[2] = 0xc8;
+ /* for this model d-link we assert 0x70 as the high mfr's nibble. */
+ buf[3] = 0x70 | (buf[3] & 0xf);
+ return 1;
+}
+
+de6csetid(sp, buf)
+de6c_softc_t *sp;
+char *buf;
+{
+ de6cwrite(sp, EADDR, buf, 6);
+}
+
+/*
+ * get length of packet just rcv'd.
+ * includes ether header and crc
+ */
+de6clen(sp)
+de6c_softc_t *sp;
+{
+ int port = sp->port;
+ int in;
+ int i;
+
+ de6coutb(sp, DATA(port), RX_LEN);
+ in = inb(STAT(port));
+ de6coutb(sp, DATA(port), RX_LEN|STROBE);
+ i = ((in>>4) | (inb(STAT(port)) & 0xf0));
+
+ de6coutb(sp, DATA(port), RX_LEN);
+ in = inb(STAT(port));
+ de6coutb(sp, DATA(port), RX_LEN|STROBE);
+ i |= ((in>>4) | (inb(STAT(port)) & 0xf0)) << 8;
+
+ return i;
+}
+
+#if 0
+de6cread(sp, address, buf, len)
+de6c_softc_t *sp;
+unsigned char *buf;
+{
+ int port = sp->port;
+ u_char in;
+
+ DATA_OUT(sp, port, RW_ADR, address & 0xff);
+ DATA_OUT(sp, port, RW_ADR, (address >> 8) & 0xff);
+
+ while (len--) {
+ de6coutb(sp, DATA(port), READ);
+ in = inb(STAT(port));
+ de6coutb(sp, DATA(port), READ|STROBE);
+ *buf++ = ((in>>4) | (inb(STAT(port)) & 0xf0));
+ }
+}
+
+de6cwrite(sp, address, buf, len)
+de6c_softc_t *sp;
+unsigned char *buf;
+{
+ int port = sp->port;
+ int out;
+
+ DATA_OUT(sp, port, RW_ADR, address & 0xff);
+ DATA_OUT(sp, port, RW_ADR, (address >> 8) & 0xff);
+
+ while (len--) {
+ out = *buf++;
+ DATA_OUT(sp, port, WRITE, out);
+
+ }
+}
+#endif 0
+
+#ifndef de6cread
+de6cread(sp, address, buf, len)
+de6c_softc_t *sp;
+unsigned char *buf;
+{
+ int port = sp->port;
+ register volatile int i;
+ unsigned char in;
+
+ outb(port, ((address)<<4) | RW_ADR);
+ i = sp->latency; while (i-- > 0);
+ outb(port, ((address)&0xf0)| RW_ADR | 0x8);
+ i = sp->latency; while (i-- > 0);
+
+ outb(port, ((address>>8)<<4) | RW_ADR);
+ i = sp->latency; while (i-- > 0);
+ outb(port, ((address>>8)&0xf0)| RW_ADR | 0x8);
+ i = sp->latency; while (i-- > 0);
+
+ while (len--) {
+ outb(port, READ);
+ i = sp->latency; while (i-- > 0);
+ in = inb(STAT(port));
+ outb(port, READ|0x08);
+ i = sp->latency; while (i-- > 0);
+ *buf++ = ((in>>4) | (inb(STAT(port)) & 0xf0));
+ }
+}
+#endif /* de6cread */
+
+#ifndef de6cwrite
+de6cwrite(sp, address, buf, len)
+de6c_softc_t *sp;
+unsigned char *buf;
+{
+ int port = sp->port;
+ register volatile int i;
+ unsigned char out;
+
+ outb(port, ((address)<<4) | RW_ADR);
+ i = sp->latency; while (i-- > 0);
+ outb(port, ((address)&0xf0)| RW_ADR | 0x8);
+ i = sp->latency; while (i-- > 0);
+
+ outb(port, ((address>>8)<<4) | RW_ADR);
+ i = sp->latency; while (i-- > 0);
+ outb(port, ((address>>8)&0xf0)| RW_ADR | 0x8);
+ i = sp->latency; while (i-- > 0);
+
+ while (len--) {
+ out = *buf++;
+ outb(port, ((out)<<4) | WRITE);
+ i = sp->latency; while (i-- > 0);
+ outb(port, ((out)&0xf0)| WRITE | 0x8);
+ i = sp->latency; while (i-- > 0);
+ }
+}
+#endif /* de6cwrite */
+
+de6coutb(sp, p, v)
+de6c_softc_t *sp;
+{
+register volatile int i = sp->latency;
+
+ outb(p, v);
+ while (i-- > 0);
+}
+
+de6cmemcheck(sp)
+de6c_softc_t *sp;
+{
+ int i;
+ int off = 0;
+ int ret = 1;
+#ifdef MACH_KERNEL
+ unsigned short *memchk;
+ unsigned short *chkmem;
+ if (kmem_alloc(kernel_map, (vm_offset_t *)&memchk, BFRS * BFRSIZ) !=
+ KERN_SUCCESS ||
+ kmem_alloc(kernel_map, (vm_offset_t *)&chkmem, BFRS * BFRSIZ) !=
+ KERN_SUCCESS) {
+ printf("de6c: memory allocation failure!!\n");
+ return 0;
+ }
+#else /* MACH_KERNEL */
+ unsigned short *memchk = (unsigned short *) kmem_alloc(kernel_map, BFRS * BFRSIZ);
+ unsigned short *chkmem = (unsigned short *) kmem_alloc(kernel_map, BFRS * BFRSIZ);
+ if ( ! ((int) memchk) || ! ((int) chkmem)) {
+ printf("de6c: memory allocation failure!!\n");
+ return 0;
+ }
+#endif /* MACH_KERNEL */
+
+ for (i = 0; i < BFRS * BFRSIZ/sizeof (short); i++)
+ memchk[i] = i;
+ bzero(chkmem, BFRS * BFRSIZ);
+
+
+ for (off = 0; off < BFRS * BFRSIZ; off += BFRSIZ/2) {
+ de6cwrite(sp, off, memchk+(off/sizeof (short)), BFRSIZ/2);
+ de6cread (sp, off, chkmem+(off/sizeof (short)), BFRSIZ/2);
+ }
+
+ for (i = 0; i < BFRS * (BFRSIZ/sizeof (short)); i++)
+ if (memchk[i] != chkmem[i]) {
+ printf("de: tilt:seq [%x:%d] %x != %x\n",
+ i, i, memchk[i], chkmem[i]);
+ ret = 0;
+ break;
+ }
+
+ kmem_free(kernel_map, (vm_offset_t) memchk, BFRS * BFRSIZ);
+ kmem_free(kernel_map, (vm_offset_t) chkmem, BFRS * BFRSIZ);
+
+ return ret;
+}
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+/* d-link 600 OFF; d-link 600 OFF; d-link 600 OFF; d-link 600 OFF */
+
+#ifdef DEBUG
+#define STATIC
+STATIC int print_pkt(), print_bdy();
+STATIC int print_e_hdr(), print_ip_hdr(), print_ip();
+STATIC int print_ipa(), print_arp(), print_e(), print_chars();
+
+STATIC
+print_pkt(p, len)
+unsigned char *p;
+{
+ int j, k;
+ int type;
+
+ if (len < 18)
+ printf("print_pkt: too small %d\n", len);
+
+ type = print_e_hdr(p);
+
+ switch (type) {
+ case 0x806:
+ print_arp(p+14);
+ break;
+ case 0x800:
+ print_ip(p+14, len - 18);
+ break;
+ default:
+ for (j = 14; j < len; j +=20) {
+ for (k = 0; k < 20; k++)
+ printf("%2x ", p[j+k]);
+ printf("\n");
+ }
+ }
+}
+
+STATIC
+print_bdy(p, len, type)
+unsigned char *p;
+{
+ int j, k;
+
+ if (len < 18)
+ printf("print_pkt: too small %d|n", len);
+
+ switch (type) {
+ case 0x806:
+ print_arp(p);
+ break;
+ case 0x800:
+ print_ip(p, len);
+ break;
+ default:
+ for (j = 0; j < len; j +=20) {
+ for (k = 0; k < 20; k++)
+ printf("%2x ", p[j+k]);
+ printf("\n");
+ }
+ }
+}
+
+STATIC
+print_e_hdr(p)
+unsigned char *p;
+{
+ int type = ntohs(((unsigned short *)p)[6]);
+
+ printf("S=%x:%x:%x:%x:%x:%x, ", p[6], p[7], p[8], p[9], p[10], p[11]);
+ printf("D=%x:%x:%x:%x:%x:%x, ", p[0], p[1], p[2], p[3], p[4], p[5]);
+ printf("T=%x\n", type);
+
+ return type;
+}
+
+STATIC
+print_ip_hdr(u)
+u_char *u;
+{
+
+ int l = ntohs(*(u_short *)(u+2));
+
+ print_ipa(u+12);
+ printf(" -> ");
+ print_ipa(u+12+4);
+ printf(" L%d(0x%x)\n", l, l);
+}
+
+STATIC
+print_ip(p, len)
+unsigned char *p;
+{
+ int j,k;
+
+ print_ip_hdr(p);
+ for (k =0; k < 12; k++)
+ printf("%2x ", p[k]);
+ print_ipa(p+12);
+ printf(" ");
+ print_ipa(p+12+4);
+ printf("\n");
+ for (j = 20; j < len; j +=16) {
+ for (k = 0; k < 16; k++)
+ printf("%2x ", p[j+k]);
+ print_chars(&p[j], 16);
+ printf("\n");
+ }
+}
+
+STATIC
+print_ipa(u)
+u_char *u;
+{
+ printf("%d.%d.%d.%d", u[0], u[1], u[2], u[3]);
+}
+
+STATIC
+print_arp(p)
+#ifdef MACH_KERNEL
+{}
+#else MACH_KERNEL
+struct arphdr *p;
+{
+ u_char *u = (u_char *)(p+1);
+
+ printf("op = %x, pro = %x, hln = %x, pln = %x\n",
+ ntohs(p->ar_op), ntohs(p->ar_pro), p->ar_hln, p->ar_pln);
+
+ print_e(u);
+ print_ipa(u+p->ar_hln);
+ printf(" seeks\n");
+
+ print_e(u+p->ar_hln+p->ar_pln);
+ print_ipa(u+p->ar_hln+p->ar_pln+p->ar_hln);
+ printf("\n");
+
+}
+#endif MACH_KERNEL
+
+STATIC
+print_e(u)
+u_char *u;
+{
+ printf("%x:%x:%x:%x:%x:%x ", u[0], u[1], u[2], u[3], u[4], u[5]);
+}
+
+STATIC
+print_chars(u, len)
+u_char *u;
+{
+ int c;
+
+ printf("|>");
+ while (len--) {
+ c = *u++;
+ if (c < 0x7f && c > 0x1f)
+ printf("%c", c);
+ else
+ printf(" ");
+ }
+ printf("<|");
+}
+#endif DEBUG
diff --git a/i386/i386at/if_de6c.h b/i386/i386at/if_de6c.h
new file mode 100644
index 00000000..9d8e9b85
--- /dev/null
+++ b/i386/i386at/if_de6c.h
@@ -0,0 +1,113 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * HISTORY
+ * $Log: if_de6c.h,v $
+ * Revision 1.1.1.1 1996/10/30 01:39:26 thomas
+ * Imported from UK22
+ *
+ * Revision 1.2 1994/11/08 20:47:25 baford
+ * merged in CMU's MK83-MK83a diffs
+ *
+ * Revision 2.2 93/11/17 18:32:40 dbg
+ * Moved source into kernel/i386at/DLINK/if_de6c.c, since we
+ * can't release it but don't want to lose it.
+ * [93/11/17 dbg]
+ *
+ * Removed local declaration of HZ.
+ * [93/01/29 dbg]
+ *
+ * Created.
+ * [92/08/13 rvb]
+ *
+ */
+
+/* PC/FTP Packet Driver source, conforming to version 1.09 of the spec
+ * Portions (C) Copyright 1990 D-Link, Inc.
+ *
+ * Permission is granted to any individual or institution to use, copy,
+ * modify, or redistribute this software and its documentation provided
+ * this notice and the copyright notices are retained. This software may
+ * not be distributed for profit, either in original form or in derivative
+ * works. D-Link, inc. makes no representations about the suitability
+ * of this software for any purpose. D-LINK GIVES NO WARRANTY,
+ * EITHER EXPRESS OR IMPLIED, FOR THE PROGRAM AND/OR DOCUMENTATION
+ * PROVIDED, INCLUDING, WITHOUT LIMITATION, WARRANTY OF MERCHANTABILITY
+ * AND WARRANTY OF FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#define DATA(port) (port + 0)
+#define STAT(port) (port + 1)
+#define CMD(port) (port + 2)
+
+/* DE-600's DATA port Command */
+#define WRITE 0x00 /* write memory */
+#define READ 0x01 /* read memory */
+#define STATUS 0x02 /* read status register */
+#define COMMAND 0x03 /* write command register */
+#define RX_NONE 0x00 /* M1=0, M0=0 (bit 1,0) */
+#define RX_ALL 0x01 /* M1=0, M0=1 */
+#define RX_BP 0x02 /* M1=1, M0=0 */
+#define RX_MBP 0x03 /* M1=1, M0=1 */
+#define TXEN 0x04 /* bit 2 */
+#define RXEN 0x08 /* bit 3 */
+#define LOOPBACK 0x0c /* RXEN=1, TXEN=1 */
+#define IRQINV 0x40 /* bit 6 -- IRQ inverse */
+#define RESET 0x80 /* set bit 7 high */
+#define STOP_RESET 0x00 /* set bit 7 low */
+#define NUL_CMD 0x04 /* null command */
+#define RX_LEN 0x05 /* read Rx packet length */
+#define TX_ADR 0x06 /* write Tx address */
+#define RW_ADR 0x07 /* write memory address */
+
+/* DE-600's CMD port Command */
+/* #define CMD(port) (port + 2) */
+#define SLT_NIC 0x04 /* select Network Interface Card */
+#define SLT_PRN 0x1c /* select Printer */
+#define NML_PRN 0xec /* normal Printer situation */
+#define IRQEN 0x10 /* enable IRQ line */
+
+/* DE-600's STAT port bits 7-4 */
+/* #define STAT(port) (port + 1) */
+#define RXBUSY 0x80
+#define GOOD 0x40
+#define RESET_FLAG 0x20
+#define T16 0x10
+#define TXBUSY 0x08
+
+#define STROBE 0x08
+#define EADDR 0x2000 /* HA13=0 => Mem, HA13=1 => Node Num */
+#define BFRSIZ 0x0800 /* number of bytes in a buffer */
+#define BFRS 4
+
+#define DSF_LOCK 1
+#define DSF_RUNNING 2
+
+#define MOD_ENAL 1
+#define MOD_PROM 2
+
diff --git a/i386/i386at/if_de6s.S b/i386/i386at/if_de6s.S
new file mode 100644
index 00000000..cc697a23
--- /dev/null
+++ b/i386/i386at/if_de6s.S
@@ -0,0 +1,278 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * HISTORY
+ * $Log: if_de6s.S,v $
+ * Revision 1.1.1.1 1996/10/30 01:39:26 thomas
+ * Imported from UK22
+ *
+# Revision 1.3 1995/04/26 19:22:12 baford
+# got alignment working right for both ELF and a.out
+#
+# Revision 1.2 1995/04/25 16:13:28 baford
+# got kernel working with ELF build tools
+#
+# Revision 1.1 1994/11/08 20:47:25 baford
+# merged in CMU's MK83-MK83a diffs
+#
+ * Revision 2.2 93/11/17 18:33:19 dbg
+ * Moved source into kernel/i386at/DLINK/if_de6c.c, since we
+ * can't release it but don't want to lose it.
+ * [93/11/17 dbg]
+ *
+ * Revision 2.2.2.1 93/09/21 21:00:39 dbg
+ * <no changes>
+ *
+ * Revision 2.2.1.1 93/09/03 15:06:26 dbg
+ * Created.
+ * [92/08/13 rvb]
+ *
+ *
+ * File: if_de6s.s
+ * Author: Robert V. Baron
+ */
+
+#include <mach/machine/asm.h>
+#undef DATA
+
+#include <i386at/if_de6c.h>
+
+ P2ALIGN(2)
+de6csetmemaddr:
+ movl 8(%ebp), %ebx /* addr */
+ movb %bl, %al /* low byte; low nibble */
+ salb $4, %al
+ orb $(RW_ADR), %al
+ outb %al, %dx
+ movl %edi, %ecx
+0: loop 0b
+
+ movb %bl, %al /* low byte; high nibble */
+ andb $0xf0, %al
+ orb $(RW_ADR|STROBE), %al
+ outb %al, %dx
+ movl %edi, %ecx
+0: loop 0b
+
+ movb %bh, %al /* high byte; low nibble */
+ salb $4, %al
+ orb $(RW_ADR), %al
+ outb %al, %dx
+ movl %edi, %ecx
+0: loop 0b
+
+ movb %bh, %al /* high byte; high nibble */
+ andb $0xf0, %al
+ orb $(RW_ADR|STROBE), %al
+ outb %al, %dx
+ movl %edi, %ecx
+0: loop 0b
+ ret
+
+/* de6cwriteasm(address, buf, len, port, delay) */
+ENTRY(de6cwriteasm)
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %edi
+ pushl %esi
+ pushl %ebx
+ movl 20(%ebp), %edx /* port */
+ movl 24(%ebp), %edi /* delay */
+
+ call de6csetmemaddr
+
+ cld
+ movl 16(%ebp), %ecx /* cnt */
+ movl 12(%ebp), %esi /* source */
+
+ cmpl $1, %edi /* no delay/latency */
+ je 2f
+1: lodsb /* leave delay/latency */
+ pushl %ecx
+ movb %al, %bl /* high byte; low nibble */
+ salb $4, %al
+ outb %al, %dx
+ movl %edi, %ecx
+0: loop 0b
+
+ movb %bl, %al /* high byte; high nibble */
+ andb $0xf0, %al
+ orb $(WRITE|STROBE), %al /* NB: WRITE == 0 */
+ outb %al, %dx
+ movl %edi, %ecx
+0: loop 0b
+ popl %ecx
+ loop 1b
+
+ popl %ebx
+ popl %esi
+ popl %edi
+ leave
+ ret
+ /* edi and ebx free */
+2: lodsb
+ movb %al, %bl /* high byte; low nibble */
+ salb $4, %al
+ outb %al, %dx
+
+ movb %bl, %al /* high byte; high nibble */
+ andb $0xf0, %al
+ orb $(WRITE|STROBE), %al /* NB: WRITE == 0 */
+ outb %al, %dx
+ loop 2b
+
+6: popl %ebx
+ popl %esi
+ popl %edi
+ leave
+ ret
+
+
+/* de6creadasm(address, buf, len, port, delay) */
+ENTRY(de6creadasm)
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %edi
+ pushl %esi
+ pushl %ebx
+ movl 20(%ebp), %edx /* port */
+ movl 24(%ebp), %edi /* delay for desetmemaddr*/
+ movl %edi, %esi /* delay; cause edi used by stosl */
+
+ call de6csetmemaddr
+
+ cld
+ movl 16(%ebp), %ecx /* cnt */
+ movl 12(%ebp), %edi /* destination */
+ movw $0x0901, %bx /* bl = 1 = READ; bh = READ|STROBE */
+
+#ifdef out_in
+ cmpl $1, %esi /* no delay/latency */
+ je 2f
+#endif /* out_in */
+1: xorw %ax, %ax /* leave delay/latency */
+ pushl %ecx
+ movb %bl, %al
+ outb %al, %dx
+ movl %esi, %ecx
+0: loop 0b
+ incw %dx /* inb from STAT == port + 1 */
+ inb %dx, %al /* first byte high nibble goes into */
+ decw %dx
+ salw $4, %ax /* ... low nibble formed byte */
+
+ movb %bh, %al
+ outb %al, %dx
+ movl %esi, %ecx
+0: loop 0b
+ incw %dx /* inb from STAT == port + 1 */
+ inb %dx, %al /* second byte high nibble goes into */
+ decw %dx
+ andb $0xf0, %al /* ... high nibble formed byte */
+ orb %ah, %al
+ stosb
+ popl %ecx
+ loop 1b
+
+ popl %ebx
+ popl %esi
+ popl %edi
+ leave
+ ret
+
+2: xorw %ax, %ax /* leave delay/latency */
+ movb %bl, %al
+ outb %al, %dx
+ incw %dx /* inb from STAT == port + 1 */
+ inb %dx, %al /* high nibble goes into low nibble */
+ decw %dx
+ salw $4, %ax
+
+ movb %bh, %al
+ outb %al, %dx
+ incw %dx /* inb from STAT == port + 1 */
+ inb %dx, %al
+ decw %dx
+ andb $0xf0, %al
+ orb %ah, %al
+ stosb
+ loop 2b
+
+ popl %ebx
+ popl %esi
+ popl %edi
+ leave
+ ret
+
+
+#ifdef unroll_wins
+ unrolled loop for write iff no delay
+2: lodsl
+ movl %eax, %ebx /* byte one; low nibble */
+ salb $4, %al
+ outb %al, %dx
+
+ movb %bl, %al /* byte one; high nibble */
+ andb $0xf0, %al
+ orb $8, %al
+ outb %al, %dx
+loop 3f
+jmp 6f
+3: sarl $8, %ebx
+ movb %bl, %al /* byte two; low nibble */
+ salb $4, %al
+ outb %al, %dx
+
+ movb %bl, %al /* byte two; high nibble */
+ andb $0xf0, %al
+ orb $8, %al
+ outb %al, %dx
+loop 4f
+jmp 6f
+4: sarl $8, %ebx
+ movb %bl, %al /* byte three; low nibble */
+ salb $4, %al
+ outb %al, %dx
+
+ movb %bl, %al /* byte three; high nibble */
+ andb $0xf0, %al
+ orb $8, %al
+ outb %al, %dx
+loop 5f
+jmp 6f
+5: sarl $8, %ebx
+ movb %bl, %al /* byte three; low nibble */
+ salb $4, %al
+ outb %al, %dx
+
+ movb %bl, %al /* byte four; high nibble */
+ andb $0xf0, %al
+ orb $8, %al
+ outb %al, %dx
+ loop 2b
+#endif /* unroll_wins */
+
diff --git a/i386/i386at/if_ne.c b/i386/i386at/if_ne.c
new file mode 100644
index 00000000..9a950d69
--- /dev/null
+++ b/i386/i386at/if_ne.c
@@ -0,0 +1,1081 @@
+/*-
+ * Copyright (c) 1990, 1991 William F. Jolitz.
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_ne.c 7.4 (Berkeley) 5/21/91
+ */
+
+/*
+ * NE2000 Ethernet driver
+ *
+ * Parts inspired from Tim Tucker's if_wd driver for the wd8003,
+ * insight on the ne2000 gained from Robert Clements PC/FTP driver.
+ */
+
+#include <ne.h>
+
+
+#if NNE > 0
+#ifdef MACH_KERNEL
+
+#include <kern/time_out.h>
+#include <device/device_types.h>
+#include <device/errno.h>
+#include <device/io_req.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+#include <device/net_status.h>
+#include <device/net_io.h>
+#include <i386at/ds8390.h>
+#include <i386at/if_nereg.h>
+#include <i386/ipl.h>
+#include <chips/busses.h>
+#ifdef FIPC
+#include <ipc/fipc.h>
+#endif /* FIPC */
+
+#else MACH_KERNEL
+
+#include <sys/param.h>
+#include <mach/vm_param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/table.h>
+#include <sys/buf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/vmmac.h>
+#include <sys/ioctl.h>
+#include <sys/errno.h>
+#include <sys/syslog.h>
+#include <vm/vm_kern.h>
+
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/route.h>
+
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+#include <netinet/if_ether.h>
+
+#include <netns/ns.h>
+#include <netns/ns_if.h>
+
+
+#include <i386/ipl.h>
+#include <i386at/atbus.h>
+#include <i386at/ds8390.h>
+#include <i386at/if_nereg.h>
+#include <i386/handler.h>
+#include <i386/dispatcher.h>
+int ether_output();
+int neioctl();
+
+#endif
+
+
+int neprobe();
+void neattach();
+int neintr();
+int nestart();
+int neinit();
+
+static vm_offset_t ne_std[NNE] = {0};
+static struct bus_device *ne_info[NNE];
+struct bus_driver nedriver =
+ { neprobe, 0, neattach, 0, ne_std, "ne", ne_info, 0, 0, 0 };
+
+#define ETHER_MIN_LEN 64
+#define ETHER_MAX_LEN 1536
+#define SPLNET spl6
+/*
+ * Ethernet software status per interface.
+ *
+ * Each interface is referenced by a network interface structure,
+ * ns_if, which the routing code uses to locate the interface.
+ * This structure contains the output queue for the interface, its address, ...
+ */
+typedef struct {
+
+#ifdef MACH_KERNEL
+ struct ifnet ds_if;
+ u_char ds_addr[6];
+#else MACH_KERNEL
+ struct arpcom ns_ac; /* Ethernet common part */
+#define ds_if ns_ac.ac_if /* network-visible interface */
+#define ds_addr ns_ac.ac_enaddr /* hardware Ethernet address */
+#endif MACH_KERNEL
+
+ int ns_flags;
+#define DSF_LOCK 1 /* block re-entering enstart */
+#define DSF_RUNNING 2
+ int ns_oactive ;
+ int ns_mask ;
+ int ns_ba; /* byte addr in buffer ram of inc pkt */
+ int ns_cur; /* current page being filled */
+ struct prhdr ns_ph; /* hardware header of incoming packet*/
+ struct ether_header ns_eh; /* header of incoming packet */
+ u_char ns_pb[2048 /*ETHERMTU+sizeof(long)*/];
+ short ns_txstart; /* transmitter buffer start */
+ short ns_rxend; /* recevier buffer end */
+ short ns_rxbndry; /* recevier buffer boundary */
+ caddr_t ns_port; /* i/o port base */
+ short ns_mode; /* word/byte mode */
+ int mode;
+ short card_present;
+#ifndef MACH_KERNEL
+ ihandler_t handler;
+ ihandler_id_t *handler_id;
+#endif MACH_KERNEL
+
+} ne_softc_t;
+ne_softc_t ne_softc[NNE];
+
+#define PAT(n) (0xa55a + 37*(n))
+
+u_short boarddata[16];
+
+/*
+ * Fetch from onboard ROM/RAM
+ */
+nefetch (ns, up, ad, len) ne_softc_t *ns; caddr_t up; {
+ u_char cmd;
+ caddr_t nec = ns->ns_port;
+ int counter = 10000;
+ int t_len;
+ unsigned char last_word[2];
+ char odd;
+
+ cmd = inb (nec + ds_cmd);
+ outb (nec + ds_cmd, DSCM_NODMA|DSCM_PG0|DSCM_START);
+
+ /* Setup remote dma */
+ outb (nec + ds0_isr, DSIS_RDC);
+
+ t_len = len;
+ if ((ns->ns_mode & DSDC_WTS) && len&1) {
+ odd=1;
+ t_len++; /* roundup to words */
+ } else odd=0;
+
+ outb (nec+ds0_rbcr0, t_len);
+ outb (nec+ds0_rbcr1, t_len>>8);
+ outb (nec+ds0_rsar0, ad);
+ outb (nec+ds0_rsar1, ad>>8);
+
+ /* Execute & extract from card */
+ outb (nec+ds_cmd, DSCM_RREAD|DSCM_PG0|DSCM_START);
+
+ if (ns->ns_mode & DSDC_WTS)
+ if (odd) {
+ linw (nec+ne_data, up, len/2);
+ *(last_word) = inw(nec+ne_data); /* get last word */
+ *(up+len-1) = last_word[0]; /* last byte */
+ } else {
+ linw (nec+ne_data, up, len/2);
+ }
+ else
+ linb (nec+ne_data, up, len);
+
+
+ /* Wait till done, then shutdown feature */
+ while ((inb (nec+ds0_isr) & DSIS_RDC) == 0 && counter-- > 0)
+ ;
+
+ outb (nec+ds0_isr, DSIS_RDC);
+ outb (nec+ds_cmd, cmd);
+}
+
+/*
+ * Put to onboard RAM
+ */
+neput (ns, up, ad, len) ne_softc_t *ns; caddr_t up; {
+ u_char cmd;
+ caddr_t nec = ns->ns_port;
+ int counter = 10000;
+ int t_len;
+ int odd;
+ unsigned char last_word[2];
+
+ cmd = inb(nec+ds_cmd);
+ outb (nec+ds_cmd, DSCM_NODMA|DSCM_PG0|DSCM_START);
+
+ /* Setup for remote dma */
+ outb (nec+ds0_isr, DSIS_RDC);
+
+ t_len = len;
+ if ((ns->ns_mode & DSDC_WTS) && len&1) {
+ odd = 1;
+ t_len++; /* roundup to words */
+ } else odd = 0;
+
+ outb (nec+ds0_rbcr0, t_len);
+ outb (nec+ds0_rbcr1, t_len>>8);
+ outb (nec+ds0_rsar0, ad);
+ outb (nec+ds0_rsar1, ad>>8);
+
+ /* Execute & stuff to card */
+ outb (nec+ds_cmd, DSCM_RWRITE|DSCM_PG0|DSCM_START);
+ if (ns->ns_mode & DSDC_WTS) {
+ if (odd) {
+ loutw (nec+ne_data, up, len/2);
+ last_word[0] = *(up+len-1);
+ outw (nec+ne_data, (unsigned short) *(last_word));
+ }
+ else {
+ loutw (nec+ne_data, up, len/2);
+ }
+ }
+ else
+ loutb (nec+ne_data, up, len);
+
+
+ /* Wait till done, then shutdown feature */
+ while ((inb (nec+ds0_isr) & DSIS_RDC) == 0 && counter-- > 0)
+ ;
+
+ outb (nec+ds0_isr, DSIS_RDC);
+ outb (nec+ds_cmd, cmd);
+}
+
+int
+neprobe(port, dev)
+struct bus_device *dev;
+{
+ int val, i, sum, bytemode = 1, pat;
+ int unit = dev->unit;
+ ne_softc_t *ns = &ne_softc[unit];
+ caddr_t nec;
+
+ if ((unsigned) unit >= NNE)
+ return(0);
+
+ nec = (caddr_t) ns->ns_port = dev->address;
+
+ if (ns->card_present) {
+ printf("ne%s : card already present in port %x\n",
+ unit, nec);
+ return(0);
+ }
+
+ if (bytemode) {
+ /* Byte Transfers, Burst Mode Select, Fifo at 8 bytes */
+ ns->ns_mode = DSDC_BMS|DSDC_FT1;
+ ns->ns_txstart = TBUF8;
+ ns->ns_rxend = RBUFEND8;
+ } else {
+word:
+ /* Word Transfers, Burst Mode Select, Fifo at 8 bytes */
+ ns->ns_mode = DSDC_WTS|DSDC_BMS|DSDC_FT1;
+ ns->ns_txstart = TBUF16;
+ ns->ns_rxend = RBUFEND16;
+ bytemode = 0;
+ }
+
+ /* Reset the bastard */
+ val = inb(nec + ne_reset);
+ delay(200);
+ outb(nec + ne_reset, val);
+ delay(200);
+
+ outb(nec + ds_cmd, DSCM_STOP|DSCM_NODMA);
+
+ i = 10000;
+ while ((inb(nec + ds0_isr) & DSIS_RESET) == 0 && i-- > 0);
+ if (i < 0) return (0);
+
+ outb(nec + ds0_isr, 0xff);
+ outb(nec + ds0_dcr, ns->ns_mode);
+ outb(nec + ds_cmd, DSCM_NODMA|DSCM_PG0|DSCM_STOP);
+ delay(1000);
+
+ /* Check cmd reg and fail if not right */
+ if ((i = inb(nec + ds_cmd)) != (DSCM_NODMA|DSCM_PG0|DSCM_STOP))
+ return(0);
+
+ outb(nec + ds0_tcr, DSTC_LB0);
+ outb(nec + ds0_rcr, DSRC_MON);
+ outb(nec + ds0_pstart, ns->ns_txstart+PKTSZ);
+ outb(nec + ds0_pstop, ns->ns_rxend);
+ outb(nec + ds0_bnry, ns->ns_rxend);
+ outb(nec + ds0_imr, 0);
+ outb(nec + ds0_isr, 0);
+ outb(nec + ds_cmd, DSCM_NODMA|DSCM_PG1|DSCM_STOP);
+ outb(nec + ds1_curr, ns->ns_txstart+PKTSZ);
+ outb(nec + ds_cmd, DSCM_NODMA|DSCM_PG0|DSCM_STOP);
+
+#ifdef NEDEBUG
+#define RCON 37
+ { int i, rom;
+
+ rom=1;
+ printf("ne ram ");
+
+ for (i = 0; i < 0xfff0; i+=4) {
+ pat = PAT(i);
+ neput(ns, &pat,i,4);
+ nefetch(ns, &pat,i,4);
+ if (pat == PAT(i)) {
+ if (rom) {
+ rom=0;
+ printf(" %x", i);
+ }
+ } else {
+ if (!rom) {
+ rom=1;
+ printf("..%x ", i);
+ }
+ }
+ pat=0;
+ neput(ns, &pat,i,4);
+ }
+ printf("\n");
+ }
+#endif
+
+ /*
+ * <groan> detect difference between units
+ * solely by where the RAM is decoded.
+ */
+ pat = PAT(0);
+ neput (ns, &pat, ns->ns_txstart*DS_PGSIZE, 4);
+ nefetch(ns, &pat, ns->ns_txstart*DS_PGSIZE, 4);
+ if (pat != PAT(0)) {
+ if (bytemode)
+ goto word;
+ else return (0);
+ }
+
+
+ /* Extract board address */
+ nefetch (ns, (caddr_t)boarddata, 0, sizeof(boarddata));
+
+ for(i=0; i < 6; i++)
+ ns->ds_addr[i] = boarddata[i];
+ ns->card_present = 1;
+ return (1);
+}
+
+/*
+ * Interface exists: make available by filling in network interface
+ * record. System will initialize the interface when it is ready
+ * to accept packets. We get the ethernet address here.
+ */
+void
+neattach(dev)
+struct bus_device *dev;
+{
+ short unit = dev->unit;
+ ne_softc_t *ns = &ne_softc[unit];
+ register struct ifnet *ifp = &(ns->ds_if);
+
+ if ((unsigned) unit >= NNE)
+ return;
+
+#ifdef MACH_KERNEL
+ take_dev_irq(dev);
+#else MACH_KERNEL
+ /* setup intr handler */
+ ns->handler.ih_level = dev->dev_pic;
+ ns->handler.ih_handler = dev->dev_intr[0];
+ ns->handler.ih_resolver = i386_resolver;
+ ns->handler.ih_rdev = dev;
+ ns->handler.ih_stats.intr_type = INTR_DEVICE;
+ ns->handler.ih_stats.intr_cnt = 0;
+ ns->handler.ih_hparam[0].intparam = unit;
+ if ((ns->handler_id = handler_add(&ns->handler)) != NULL)
+ handler_enable(ns->handler_id);
+ else
+ panic("Unable to add NEx000 interrupt handler");
+#endif MACH_KERNEL
+ printf (", port = %x, spl = %d, pic = %d, [%s].",
+ dev->address, dev->sysdep, dev->sysdep1,
+ ether_sprintf(ns->ds_addr));
+#ifndef MACH_KERNEL
+ ns->ns_ac.ac_bcastaddr = (u_char *)etherbroadcastaddr;
+ ns->ns_ac.ac_arphrd = ARPHRD_ETHER;
+#endif MACH_KERNEL
+ ns->ns_flags = 0;
+ ns->mode = 0;
+ ifp->if_unit = unit;
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_flags = IFF_BROADCAST;
+#ifdef MACH_KERNEL
+ ifp->if_header_size = sizeof(struct ether_header);
+ ifp->if_header_format = HDR_ETHERNET;
+ ifp->if_address_size = 6;
+ ifp->if_address = (char *)&ns->ds_addr[0];
+ if_init_queues(ifp);
+#else MACH_KERNEL
+ ifp->if_name = nedriver.driver_dname;
+ ifp->if_init = neinit;
+ ifp->if_output = ether_output;
+ ifp->if_start = nestart;
+ ifp->if_ioctl = neioctl;
+ ifp->if_reset = nereset;
+ ifp->if_watchdog= 0;
+ if_attach(ifp);
+#endif MACH_KERNEL
+}
+
+/*
+ * Initialization of interface; set up initialization block
+ * and transmit/receive descriptor rings.
+ */
+neinit(unit)
+ int unit;
+{
+ ne_softc_t *ns = &ne_softc[unit];
+ struct ifnet *ifp = &ns->ds_if;
+ int i; char *cp;
+ int oldpri;
+ caddr_t nec = ns->ns_port;
+
+#ifndef MACH_KERNEL
+ if (ifp->if_addrlist == (struct ifaddr *)0) return;
+#endif MACH_KERNEL
+
+ oldpri = SPLNET();
+
+ outb(nec+ds_cmd, DSCM_NODMA|DSCM_PG0|DSCM_STOP);
+
+ /* Word Transfer select, Burst Mode Select, Fifo at 8 bytes */
+ outb(nec+ds0_dcr, ns->ns_mode);
+
+ /* clear remote byte count resigters */
+ outb (nec+ds0_rbcr0, 0);
+ outb (nec+ds0_rbcr1, 0);
+
+ /* don't store incoming packets into memory for now */
+ outb (nec+ds0_rcr, DSRC_MON);
+
+ /* place NIC in internal loopback mode */
+ outb(nec+ds0_tcr, DSTC_LB0);
+
+ /* initialize transmit/recieve (ring-buffer) Page Start */
+ outb (nec+ds0_tpsr, 0);
+ outb (nec+ds0_pstart, ns->ns_txstart+PKTSZ);
+
+ /* initialize reciever (ring-buffer) Page Stop and Boundary */
+ outb (nec+ds0_pstop, ns->ns_rxend);
+ outb (nec+ds0_bnry, ns->ns_txstart+PKTSZ);
+
+ /* clear all interrupts */
+ outb (nec+ds0_isr, 0xff);
+
+ /* enable the interrupts that we care about */
+ outb (nec+ds0_imr, IMR_ENABLE);
+
+ /* set physical address on ethernet */
+ outb (nec+ds_cmd, DSCM_NODMA|DSCM_PG1|DSCM_STOP);
+ for (i=0 ; i < 6 ; i++) outb(nec+ds1_par0+i,ns->ds_addr[i]);
+
+ ns->ns_cur = ns->ns_txstart+PKTSZ + 1;
+ outb (nec+ds1_curr, ns->ns_cur);
+
+ /* XXX deal with Reciever Configuration Register */
+ /* clr logical address hash filter for now */
+ for (i=0 ; i < 8 ; i++) outb(nec+ds1_mar0+i,0xff);
+
+ /* set page 0 registers */
+ outb (nec+ds_cmd, DSCM_NODMA|DSCM_PG0|DSCM_STOP);
+ outb (nec+ds0_rcr, DSRC_AB);
+
+ /* take unit out of loopback mode */
+ outb (nec+ds0_tcr, 0);
+
+ ns->ds_if.if_flags |= IFF_RUNNING;
+ ns->ns_flags &= ~(DSF_LOCK|DSF_RUNNING);
+ ns->ns_oactive = 0; ns->ns_mask = ~0;
+ splx(oldpri);
+ nestart(unit);
+ return(1);
+}
+
+/*
+ * Setup output on interface.
+ * Get another datagram to send off of the interface queue,
+ * and map it to the interface before starting the output.
+ * called only at splimp or interrupt level.
+ */
+nestart(unit)
+int unit;
+{
+ ne_softc_t *ns = &ne_softc[unit];
+ struct ifnet *ifp = &ns->ds_if;
+ int buffer;
+ int len, i, total,t;
+ caddr_t nec = ns->ns_port;
+#ifdef MACH_KERNEL
+ io_req_t m;
+
+#else MACH_KERNEL
+ struct mbuf *m0, *m;
+#endif MACH_KERNEL
+
+ /*
+ * The DS8390 has only one transmit buffer, if it is busy we
+ * must wait until the transmit interrupt completes.
+ */
+ outb(nec+ds_cmd,DSCM_NODMA|DSCM_START);
+
+ if (ns->ns_flags & DSF_LOCK)
+ goto done;
+
+ if (inb(nec+ds_cmd) & DSCM_TRANS)
+ goto done;
+
+ if ((ns->ds_if.if_flags & IFF_RUNNING) == 0)
+ goto done;
+
+ IF_DEQUEUE(&ns->ds_if.if_snd, m);
+ if (m == 0)
+ goto done;
+
+ /*
+ * Copy the mbuf chain into the transmit buffer
+ */
+
+ ns->ns_flags |= DSF_LOCK; /* prevent entering nestart */
+ buffer = ns->ns_txstart*DS_PGSIZE;
+#ifdef MACH_KERNEL
+ total = m->io_count;
+ neput(ns, m->io_data, buffer, total);
+#else MACH_KERNEL
+ t = 0; len = i = 0;
+ for (m0 = m; m != 0; m = m->m_next)
+ t += m->m_len;
+
+ m = m0;
+ total = t;
+ for (m0 = m; m != 0; ) {
+
+ if (m->m_len&1 && t > m->m_len) {
+ neput(ns, mtod(m, caddr_t), buffer, m->m_len - 1);
+ t -= m->m_len - 1;
+ buffer += m->m_len - 1;
+ m->m_data += m->m_len - 1;
+ m->m_len = 1;
+ m = m_pullup(m, 2);
+ } else {
+ neput(ns, mtod(m, caddr_t), buffer, m->m_len);
+ buffer += m->m_len;
+ t -= m->m_len;
+ MFREE(m, m0);
+ m = m0;
+ }
+ }
+#endif MACH_KERNEL
+ /*
+ * Init transmit length registers, and set transmit start flag.
+ */
+
+ len = total;
+ if (len < ETHER_MIN_LEN) len = ETHER_MIN_LEN;
+ outb(nec+ds0_tbcr0,len&0xff);
+ outb(nec+ds0_tbcr1,(len>>8)&0xff);
+ outb(nec+ds0_tpsr, ns->ns_txstart);
+ outb(nec+ds_cmd, DSCM_TRANS|DSCM_NODMA|DSCM_START);
+
+#ifdef MACH_KERNEL
+ iodone(m);
+ m = 0;
+done:
+#endif MACH_KERNEL
+}
+
+/* buffer successor/predecessor in ring? */
+#define succ(n) (((n)+1 >= ns->ns_rxend) ? (ns->ns_txstart+PKTSZ) : (n)+1)
+#define pred(n) (((n)-1 < (ns->ns_txstart+PKTSZ)) ? ns->ns_rxend-1 : (n)-1)
+
+/*
+ * Controller interrupt.
+ */
+neintr(unit)
+{
+ ne_softc_t *ns = &ne_softc[unit];
+ u_char cmd,isr;
+ caddr_t nec = ns->ns_port;
+
+ /* Save cmd, clear interrupt */
+ cmd = inb (nec+ds_cmd);
+ isr = inb (nec+ds0_isr);
+loop:
+ outb(nec+ds_cmd,DSCM_NODMA|DSCM_START);
+ outb(nec+ds0_isr, isr);
+
+ /* Receiver error */
+ if (isr & DSIS_RXE) {
+ (void) inb(nec+ds0_rsr);
+ /* need to read these registers to clear status */
+ ns->ds_if.if_ierrors++;
+ }
+
+ /* Counters overflowed, reading the registers resets them */
+ if (isr & DSIS_CTRS) {
+ (void) inb(nec+ds0_cntr0);
+ (void) inb(nec+ds0_cntr1);
+ (void) inb(nec+ds0_cntr2);
+ }
+
+
+ /* We received something; rummage thru tiny ring buffer */
+ if (isr & (DSIS_RX|DSIS_RXE|DSIS_ROVRN)) {
+ u_char pend,lastfree;
+
+ outb(nec+ds_cmd, DSCM_START|DSCM_NODMA|DSCM_PG1);
+ pend = inb(nec+ds1_curr);
+ outb(nec+ds_cmd, DSCM_START|DSCM_NODMA|DSCM_PG0);
+
+ /* Something in the buffer? */
+ while (pend != ns->ns_cur) {
+ /* Extract header from microcephalic board */
+ nefetch(ns, &ns->ns_ph,ns->ns_cur*DS_PGSIZE,
+ sizeof(ns->ns_ph));
+ ns->ns_ba = ns->ns_cur*DS_PGSIZE+sizeof(ns->ns_ph);
+
+ /* Incipient paranoia */
+ if (ns->ns_ph.pr_status == DSRS_RPC ||
+ /* for dequna's */
+ ns->ns_ph.pr_status == 0x21) {
+ if (nerecv(ns))
+ ns->ns_cur = ns->ns_ph.pr_nxtpg ;
+ else {
+ outb(nec+ds0_bnry, pred(ns->ns_cur));
+ goto short_load;
+ }
+ }
+#ifdef NEDEBUG
+ else {
+ printf("cur %x pnd %x lfr %x ",
+ ns->ns_cur, pend, lastfree);
+ printf("nxt %x len %x ", ns->ns_ph.pr_nxtpg,
+ (ns->ns_ph.pr_sz1<<8)+ ns->ns_ph.pr_sz0);
+ printf("Bogus Sts %x\n", ns->ns_ph.pr_status);
+ ns->ns_cur = pend;
+ }
+#endif
+ outb(nec+ds0_bnry, pred(ns->ns_cur));
+ outb(nec+ds_cmd, DSCM_START|DSCM_NODMA|DSCM_PG1);
+ pend = inb(nec+ds1_curr);
+ outb(nec+ds_cmd, DSCM_START|DSCM_NODMA|DSCM_PG0);
+ }
+short_load:
+ outb(nec+ds_cmd, DSCM_START|DSCM_NODMA);
+ }
+
+ /* Transmit error */
+ if (isr & DSIS_TXE) {
+ ns->ns_flags &= ~DSF_LOCK;
+ /* Need to read these registers to clear status */
+ ns->ds_if.if_collisions += inb(nec+ds0_tbcr0);
+ ns->ds_if.if_oerrors++;
+ }
+
+ /* Packet Transmitted */
+ if (isr & DSIS_TX) {
+ ns->ns_flags &= ~DSF_LOCK;
+ ++ns->ds_if.if_opackets;
+ ns->ds_if.if_collisions += inb(nec+ds0_tbcr0);
+ }
+
+ /* Receiver ovverun? */
+ if (isr & DSIS_ROVRN) {
+ outb(nec+ds0_rbcr0, 0);
+ outb(nec+ds0_rbcr1, 0);
+ outb(nec+ds0_tcr, DSTC_LB0);
+ outb(nec+ds0_rcr, DSRC_MON);
+ outb(nec+ds_cmd, DSCM_START|DSCM_NODMA);
+ outb(nec+ds0_rcr, DSRC_AB);
+ outb(nec+ds0_tcr, 0);
+ }
+
+ /* Any more to send? */
+ outb (nec+ds_cmd, DSCM_NODMA|DSCM_PG0|DSCM_START);
+ nestart(unit);
+ outb (nec+ds_cmd, cmd);
+ outb (nec+ds0_imr, IMR_ENABLE);
+
+ /* Still more to do? */
+ isr = inb (nec+ds0_isr);
+ if(isr) goto loop;
+
+ return 0;
+}
+
+/*
+ * Ethernet interface receiver interface.
+ * If input error just drop packet.
+ * Otherwise examine packet to determine type. If can't determine length
+ * from type, then have to drop packet. Othewise decapsulate
+ * packet based on type and pass to type specific higher-level
+ * input routine.
+ */
+nerecv(ns)
+ ne_softc_t *ns;
+{
+#ifdef MACH_KERNEL
+ ipc_kmsg_t new_kmsg;
+ struct ether_header *ehp;
+ struct packet_header *pkt;
+ register struct ifnet *ifp = &ns->ds_if;
+#ifdef FIPC
+ char *fipc_buf;
+#endif
+#else MACH_KERNEL
+ struct mbuf *top, **mp, *m, *p;
+#endif MACH_KERNEL
+ int len, l;
+ int epkt;
+
+ ns->ds_if.if_ipackets++;
+ len = ns->ns_ph.pr_sz0 + (ns->ns_ph.pr_sz1<<8);
+ if(len < ETHER_MIN_LEN || len > ETHER_MAX_LEN)
+ return 0;
+
+ nefetch(ns, &ns->ns_eh, ns->ns_ba, sizeof(struct ether_header));
+
+#ifndef MACH_KERNEL
+ ns->ns_eh.ether_type = ntohs((u_short)ns->ns_eh.ether_type);
+#endif MACH_KERNEL
+ ns->ns_ba += sizeof(struct ether_header);
+
+ /* don't forget checksum! */
+ len -= (sizeof(struct ether_header) + sizeof(long));
+#ifdef MACH_KERNEL
+#ifdef FIPC
+ if (ns->ns_eh.ether_type == FIPC_MSG_TYPE) /* fipc packet */
+ {
+ /* We need to hand the whole packet to the handler. */
+
+ fipc_recvs++;
+
+ fipc_buf = get_fipc_buffer (len, TRUE, TRUE);
+
+ if (fipc_buf == NULL)
+ {
+ ns->ds_if.if_rcvdrops++;
+ return(0);
+ }
+ nefetch (ns, fipc_buf, ns->ns_ba, len);
+
+ fipc_packet (fipc_buf, ns->ns_eh);
+ }
+ else /* net_kmsg */
+ {
+#endif /* FIPC */
+ new_kmsg = net_kmsg_get();
+
+ if (new_kmsg == IKM_NULL) {
+ ns->ds_if.if_rcvdrops++;
+ return(0);
+ }
+
+ ehp = (struct ether_header *) (&net_kmsg(new_kmsg)->header[0]);
+ pkt = (struct packet_header *)(&net_kmsg(new_kmsg)->packet[0]);
+ *ehp = ns->ns_eh;
+
+ nefetch(ns, (char *) (pkt + 1), ns->ns_ba, len);
+
+ pkt->type = ehp->ether_type;
+
+ pkt->length = len + sizeof(struct packet_header);
+ net_packet(ifp, new_kmsg, pkt->length, ethernet_priority(new_kmsg));
+#ifdef FIPC
+ }
+#endif
+
+#else MACH_KERNEL
+/**/
+ epkt = ns->ns_ba + len;
+
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m == 0)
+ return (0);
+
+ m->m_pkthdr.rcvif = &ns->ds_if;
+ m->m_pkthdr.len = len;
+ m->m_len = MHLEN;
+
+ top = 0;
+ mp = &top;
+ while (len > 0) {
+ if (top) {
+ MGET(m, M_DONTWAIT, MT_DATA);
+ if (m == 0) {
+ m_freem(top);
+ return (0);
+ }
+ m->m_len = MLEN;
+ }
+ l = min(len, epkt - ns->ns_ba);
+ if (l >= MINCLSIZE) {
+ MCLGET(m, M_DONTWAIT);
+ if (m->m_flags & M_EXT)
+ m->m_len = l = min(len, MCLBYTES);
+ else
+ l = m->m_len;
+ } else {
+ /*
+ * Place initial small packet/header at end of mbuf.
+ */
+ if (l < m->m_len) {
+ if (top == 0 && len + max_linkhdr <= m->m_len)
+ m->m_data += max_linkhdr;
+ m->m_len = l;
+ } else
+ l = m->m_len;
+ }
+ nefetch(ns, mtod(m, caddr_t), ns->ns_ba, l);
+ ns->ns_ba += l;
+ *mp = m;
+ mp = &m->m_next;
+ len -= l;
+ }
+/**/
+ if (top == 0) return 0; /* NEED MODIFY HERE !!! */
+
+ ether_input(&ns->ds_if, &ns->ns_eh, top);
+#endif MACH_KERNEL
+ return 1;
+}
+
+#ifdef MACH_KERNEL
+neopen(dev, flag)
+dev_t dev;
+int flag;
+{
+ register int unit = minor(dev);
+
+ if (!ne_softc[unit].card_present || unit < 0 || unit >= NNE)
+ return (ENXIO);
+
+ ne_softc[unit].ds_if.if_flags |= IFF_UP;
+ neinit(unit);
+ return(0);
+}
+
+#ifdef FIPC
+nefoutput(dev, ior)
+dev_t dev;
+io_req_t ior;
+{
+ register int unit = minor(dev);
+
+ if (!ne_softc[unit].card_present || unit < 0 || unit >= NNE)
+ return (ENXIO);
+
+ return (net_fwrite(&ne_softc[unit].ds_if, nestart, ior));
+}
+#endif
+
+neoutput(dev, ior)
+dev_t dev;
+io_req_t ior;
+{
+ register int unit = minor(dev);
+
+ if (!ne_softc[unit].card_present || unit < 0 || unit >= NNE)
+ return (ENXIO);
+
+ return (net_write(&ne_softc[unit].ds_if, nestart, ior));
+}
+
+nesetinput(dev, receive_port, priority, filter, filter_count)
+dev_t dev;
+mach_port_t receive_port;
+int priority;
+filter_t filter[];
+unsigned int filter_count;
+{
+ register int unit = minor(dev);
+
+ if (!ne_softc[unit].card_present || unit < 0 || unit >= NNE)
+ return (ENXIO);
+
+ return (net_set_filter(&ne_softc[unit].ds_if,
+ receive_port, priority,
+ filter, filter_count));
+}
+
+negetstat(dev, flavor, status, count)
+dev_t dev;
+int flavor;
+dev_status_t status;
+unsigned int *count;
+{
+ register int unit = minor(dev);
+
+ if (!ne_softc[unit].card_present || unit < 0 || unit >= NNE)
+ return (ENXIO);
+
+ return (net_getstat(&ne_softc[unit].ds_if,
+ flavor,
+ status,
+ count));
+}
+
+nesetstat(dev, flavor, status, count)
+dev_t dev;
+int flavor;
+dev_status_t status;
+unsigned int count;
+{
+ register int unit = minor(dev);
+ register ne_softc_t *ns;
+
+ if (!ne_softc[unit].card_present || unit < 0 || unit >= NNE)
+ return (ENXIO);
+
+ ns = &ne_softc[unit];
+
+ switch(flavor) {
+ case NET_STATUS: {
+ register struct net_status *s = (struct net_status *)status;
+ int mode = 0;
+ if (count < NET_STATUS_COUNT)
+ return(D_INVALID_SIZE);
+#define MOD_ENAL 1
+#define MOD_PROM 2
+ if (s->flags & IFF_ALLMULTI)
+ mode |= MOD_ENAL;
+ if (s->flags & IFF_PROMISC)
+ mode |= MOD_PROM;
+
+ if (ns->mode != mode) {
+ ns->mode = mode;
+ if (ns->ns_flags & DSF_RUNNING) {
+ ns->ns_flags &= ~(DSF_LOCK | DSF_RUNNING);
+ neinit(unit);
+ }
+ }
+ break;
+ }
+ default :
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+
+#else MACH_KERNEL
+
+/*
+ * Process an ioctl request.
+ */
+neioctl(ifp, cmd, data)
+ register struct ifnet *ifp;
+ int cmd;
+ caddr_t data;
+{
+ register struct ifaddr *ifa = (struct ifaddr *)data;
+ ne_softc_t *ns = &ne_softc[ifp->if_unit];
+ struct ifreq *ifr = (struct ifreq *)data;
+ int s = splimp(), error = 0;
+
+
+ switch (cmd) {
+
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+
+ switch (ifa->ifa_addr->sa_family) {
+ case AF_INET:
+ neinit(ifp->if_unit); /* before arpwhohas */
+ ((struct arpcom *)ifp)->ac_ipaddr =
+ IA_SIN(ifa)->sin_addr;
+ arpwhohas((struct arpcom *)ifp, &IA_SIN(ifa)->sin_addr);
+ break;
+ case AF_NS:
+ {
+ register struct ns_addr *ina = &(IA_SNS(ifa)->sns_addr);
+
+ if (ns_nullhost(*ina))
+ ina->x_host = *(union ns_host *)(ns->ds_addr);
+ else {
+ /*
+ * The manual says we can't change the address
+ * while the receiver is armed,
+ * so reset everything
+ */
+ ifp->if_flags &= ~IFF_RUNNING;
+ bcopy((caddr_t)ina->x_host.c_host,
+ (caddr_t)ns->ds_addr, sizeof(ns->ds_addr));
+ }
+ neinit(ifp->if_unit); /* does ne_setaddr() */
+ break;
+ }
+ default:
+ neinit(ifp->if_unit);
+ break;
+ }
+ break;
+
+ case SIOCSIFFLAGS:
+ if ((ifp->if_flags & IFF_UP) == 0 &&
+ ifp->if_flags & IFF_RUNNING) {
+ ifp->if_flags &= ~IFF_RUNNING;
+ outb(ns->ns_port + ds_cmd, DSCM_STOP|DSCM_NODMA);
+ } else if (ifp->if_flags & IFF_UP &&
+ (ifp->if_flags & IFF_RUNNING) == 0)
+ neinit(ifp->if_unit);
+ break;
+
+#ifdef notdef
+ case SIOCGHWADDR:
+ bcopy((caddr_t)ns->ds_addr, (caddr_t) &ifr->ifr_data,
+ sizeof(ns->ds_addr));
+ break;
+#endif
+
+ default:
+ error = EINVAL;
+ }
+ splx(s);
+ return (error);
+}
+
+/*
+ * Reset of interface.
+ */
+nereset(unit, uban)
+ int unit, uban;
+{
+ if (unit >= NNE)
+ return;
+ printf("ne%d: reset\n", unit);
+ ne_softc[unit].ns_flags &= ~DSF_LOCK;
+ neinit(unit);
+}
+#endif MACH_KERNEL
+#endif
diff --git a/i386/i386at/if_nereg.h b/i386/i386at/if_nereg.h
new file mode 100644
index 00000000..a6b5a82c
--- /dev/null
+++ b/i386/i386at/if_nereg.h
@@ -0,0 +1,66 @@
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_nereg.h 7.1 (Berkeley) 5/9/91
+ */
+
+/*
+ * NE1000/2000 Ethernet Card registers
+ */
+
+/* This card uses a DS8390 Ethernet controller in at the beginning of
+ its i/o space */
+
+#define ne_data 0x10 /* Data Transfer port */
+#define ne_reset 0x1F /* Card Reset port */
+
+#define PKTSZ 6 /* Size of transmit buffer */
+
+/* Span of memory on an NE2000 */
+#define TBUF16 0x40 /* Starting location of Transmit Buffer */
+#define RBUFEND16 0x80 /* Ending location of Receive Buffer */
+
+/* Span of memory on an NE1000 */
+#define TBUF8 0x20 /* Starting location of Transmit Buffer */
+#define RBUFEND8 0x40 /* Ending location of Receive Buffer */
+
+#if 0
+#define PKTSZ 3*512 /* Size of transmit buffer */
+
+/* Span of memory on an NE2000 */
+#define TBUF16 (16*1024) /* Starting location of Transmit Buffer */
+#define RBUFEND16 (32*1024) /* Ending location of Receive Buffer */
+
+/* Span of memory on an NE1000 */
+#define TBUF8 (8*1024) /* Starting location of Transmit Buffer */
+#define RBUFEND8 (16*1024) /* Ending location of Receive Buffer */
+#endif
diff --git a/i386/i386at/if_ns8390.c b/i386/i386at/if_ns8390.c
new file mode 100644
index 00000000..15c94400
--- /dev/null
+++ b/i386/i386at/if_ns8390.c
@@ -0,0 +1,2578 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* NOTE:
+ * There are three outstanding bug/features in this implementation.
+ * They may even be hardware misfeatures. The conditions are registered
+ * by counters maintained by the software.
+ * 1: over_write is a condition that means that the board wants to store
+ * packets, but there is no room. So new packets are lost. What seems to
+ * be happening is that we get an over_write condition, but there are no
+ * or just a few packets in the board's ram. Also it seems that we get
+ * several over_writes in a row.
+ * 2: Since there is only one transmit buffer, we need a lock to indicate
+ * whether it is in use. We clear this lock when we get a transmit interrupt.
+ * Sometimes we go to transmit and although there is no transmit in progress,
+ * the lock is set. (In this case, we just ignore the lock.) It would look
+ * like we can miss transmit interrupts?
+ * 3: We tried to clean up the unnecessary switches to bank 0.
+ * Unfortunately, when you do an ifconfig "down", the system tend to lock up
+ * a few seconds later (this was when DSF_RUNNING) was not being set before.
+ * But even with DSF_RUNNING, on an EISA bus machine we ALWAYS lock up after
+ * a few seconds.
+ */
+
+/*
+ * Western Digital 8003E Mach Ethernet driver (for intel 80386)
+ * Copyright (c) 1990 by Open Software Foundation (OSF).
+ */
+
+/*
+ Copyright 1990 by Open Software Foundation,
+Cambridge, MA.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby granted,
+provided that the above copyright notice appears in all copies and
+that both the copyright notice and this permission notice appear in
+supporting documentation, and that the name of OSF or Open Software
+Foundation not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+ OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+<INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#define IF_CNTRS MACH
+
+#include <ns8390.h>
+#if NNS8390 > 0
+
+#include <mach_ttd.h>
+#include <kern/time_out.h>
+#include <device/device_types.h>
+#include <device/errno.h>
+#include <device/io_req.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+#include <device/net_status.h>
+#include <device/net_io.h>
+#include "vm_param.h"
+#include <i386/ipl.h>
+#include <chips/busses.h>
+#include <i386at/ds8390.h>
+#include <i386at/if_wd8003.h>
+#include <i386at/if_3c503.h>
+
+#if MACH_TTD
+#include <ttd/ttd_stub.h>
+#endif /* MACH_TTD */
+
+
+#define SPLNET spl6
+
+int wd_debug = 0;
+
+int ns8390probe();
+void ns8390attach();
+int ns8390intr();
+int ns8390init();
+int ns8390output();
+int ns8390ioctl();
+int ns8390reset();
+int ns8390rcv();
+int ns8390watch();
+int ns8390get_CURR();
+int ns8390over_write();
+
+struct bus_device *ns8390info[NNS8390]; /* ???? */
+
+static vm_offset_t ns8390_std[NNS8390] = { 0 };
+static struct bus_device *ns8390_info[NNS8390];
+struct bus_driver ns8390driver =
+ {ns8390probe, 0, ns8390attach, 0, ns8390_std, "ns8390", ns8390_info, 0, 0, 0};
+
+int watchdog_id;
+
+char *wd8003_card = "wd";
+char *elii_card = "el";
+/* 2e0, 2a0, 280, 250, 350, 330, 310, 300*/
+int elii_irq[8] = {5, 2, 2, 5, 5, 0x711, 0x711, 5};
+int elii_bnc[8] = {1, 0, 1, 1, 0, 0x711, 0x711, 0};
+/*int elii_bnc[8] = {0, 1, 1, 1, 1, 1, 0, 1}; */
+
+typedef struct {
+#ifdef MACH_KERNEL
+ struct ifnet ds_if; /* generic interface header */
+ u_char ds_addr[6]; /* Ethernet hardware address */
+#else MACH_KERNEL
+ struct arpcom ns8390_ac;
+#define ds_if ns8390_ac.ac_if
+#define ds_addr ns8390_ac.ac_enaddr
+#endif MACH_KERNEL
+ int flags;
+ int timer;
+ int interrupt;
+ char *nic;
+ u_char address[ETHER_ADDR_SIZE];
+ short mode;
+ int tbusy;
+ char *sram; /* beginning of the shared memory RAM buffer */
+ int read_nxtpkt_ptr;/* pointer to next packet available */
+ int pstart; /* page start hold */
+ int pstop; /* page stop hold */
+ int tpsr; /* transmit page start hold */
+ int fifo_depth; /* NIC fifo threshold */
+ char *card;
+ int board_id;
+}
+ns8390_softc_t;
+
+ns8390_softc_t ns8390_softc[NNS8390];
+
+struct ns8390_cntrs {
+u_int ovw,
+ jabber,
+ crc,
+ frame,
+ miss,
+ fifo,
+ rcv;
+u_int xmt,
+ xmti,
+ busy,
+ heart;
+} ns8390_cntrs[NNS8390];
+
+#if MACH_TTD
+boolean_t ttd_poll_loop;
+
+int ns8390poll_receive();
+int ns8390transmit_ttd();
+#endif /* MACH_TTD */
+
+#ifdef IF_CNTRS
+int ns_narp = 1, ns_arp = 0;
+int ns_ein[32], ns_eout[32];
+int ns_lin[128/8], ns_lout[128/8];
+static
+log_2(no)
+unsigned long no;
+{
+ return ({ unsigned long _temp__;
+ asm("bsr %1, %0; jne 0f; xorl %0, %0; 0:" :
+ "=r" (_temp__) : "a" (no));
+ _temp__;});
+}
+#endif IF_CNTRS
+
+/* Interrupts mask bits */
+int imr_hold = DSIM_PRXE|DSIM_PTXE|DSIM_RXEE|DSIM_TXEE|DSIM_OVWE|DSIM_CNTE;
+
+/*
+ * ns8390probe:
+ *
+ * This function "probes" or checks for the wd8003 board on the bus to see
+ * if it is there. As far as I can tell, the best break between this
+ * routine and the attach code is to simply determine whether the board
+ * is configured in properly. Currently my approach to this is to test the
+ * base I/O special offset for the Western Digital unique byte sequence
+ * identifier. If the bytes match we assume board is there.
+ * The config code expects to see a successful return from the probe
+ * routine before attach will be called.
+ *
+ * input : address device is mapped to, and unit # being checked
+ * output : a '1' is returned if the board exists, and a 0 otherwise
+ *
+ */
+
+ns8390probe(port, dev)
+struct bus_device *dev;
+{
+ caddr_t hdwbase = (caddr_t)dev->address;
+ int unit = dev->unit;
+ ns8390_softc_t *sp = &ns8390_softc[unit];
+ int tmp;
+ int vendor_id;
+
+ if ((unit < 0) || (unit > NNS8390)) {
+ printf("ns8390 ethernet unit %d out of range\n", unit);
+ return(0);
+ }
+ if (((u_char) inb(hdwbase+IFWD_LAR_0) == (u_char) WD_NODE_ADDR_0) &&
+ ((u_char) inb(hdwbase+IFWD_LAR_1) == (u_char) WD_NODE_ADDR_1) &&
+ ((u_char) inb(hdwbase+IFWD_LAR_2) == (u_char) WD_NODE_ADDR_2)) {
+ ns8390info[unit] = dev;
+ sp->card = wd8003_card;
+ dev->name = wd8003_card;
+ sp->nic = hdwbase + OFF_8390;
+ /* enable mem access to board */
+ sp->board_id = wd80xxget_board_id(dev);
+
+ *(sp->address) = inb(hdwbase+IFWD_LAR_0);
+ *(sp->address + 1) = inb(hdwbase+IFWD_LAR_1);
+ *(sp->address + 2) = inb(hdwbase+IFWD_LAR_2);
+ *(sp->address + 3) = inb(hdwbase+IFWD_LAR_3);
+ *(sp->address + 4) = inb(hdwbase+IFWD_LAR_4);
+ *(sp->address + 5) = inb(hdwbase+IFWD_LAR_5);
+ return (1);
+ } /* checks the address of the board to verify that it is a WD */
+
+ /* try to avoid any NE2000 pretending to be an el II */
+ if (inb(hdwbase + 0x408) == 0xff)
+ return 0;
+
+ /* check vendor id */
+ tmp = inb(hdwbase + CTLR);
+
+ outb(hdwbase + CTLR, CTLR_RST|CTLR_THIN); /* Reset it... */
+ outb(hdwbase + CTLR, CTLR_THIN);
+ /*
+ * Map the station addr PROM into the lower I/O ports. We now
+ * check for both the old and new 3Com prefix
+ */
+ outb(hdwbase + CTLR, CTLR_STA_ADDR|CTLR_THIN);
+ vendor_id = inb(hdwbase)*0x10000 + inb(hdwbase + 1)*0x100 +
+ inb(hdwbase + 2);
+ /* Restore the register we frobbed. */
+ outb(hdwbase + CTLR, tmp);
+ if ((vendor_id != OLD_3COM_ID) && (vendor_id != NEW_3COM_ID))
+ return 0;
+
+ if ((tmp = inb(hdwbase+BCFR))) {
+ switch(tmp) {
+ case (1<<7): sp->board_id = 7; break; /*irq5 xvcr*/
+#ifdef not_currently_possible
+ case (1<<6): sp->board_id = 6; break;
+ case (1<<5): sp->board_id = 5; break;
+#endif not_currently_possible
+ case (1<<4): sp->board_id = 4; break;
+ case (1<<3): sp->board_id = 3; break;
+ case (1<<2): sp->board_id = 2; break; /*irq2 bnc*/
+ case (1<<1): sp->board_id = 1; break; /*irq2 xvcr*/
+ case (1<<0): sp->board_id = 0; break; /*irq5 bnc*/
+ default: return 0;
+ }
+ switch (inb(hdwbase+PCFR)) {
+ case (1<<7): dev->phys_address = 0xDC000; break;
+ case (1<<6): dev->phys_address = 0xD8000; break;
+#ifdef not_currently_possible
+ case (1<<5): dev->phys_address = 0xCC000; break;
+ case (1<<4): dev->phys_address = 0xC8000; break;
+#endif not_currently_possible
+ default:
+ printf("EtherLink II with NO memory configured\n");
+ return 0;
+ }
+ ns8390info[unit] = dev;
+ dev->sysdep1 = elii_irq[sp->board_id];
+ if (dev->sysdep1 == 2)
+ dev->sysdep1 = 9;
+ sp->card = elii_card;
+ dev->name = elii_card;
+ sp->nic = hdwbase;
+ return 1;
+ }
+
+ return(0);
+}
+
+/*
+ * ns8390attach:
+ *
+ * This function attaches a ns8390 board to the "system". The rest of
+ * runtime structures are initialized here (this routine is called after
+ * a successful probe of the board). Once the ethernet address is read
+ * and stored, the board's ifnet structure is attached and readied.
+ *
+ * input : bus_device structure setup in autoconfig
+ * output : board structs and ifnet is setup
+ *
+ */
+
+void ns8390attach(dev)
+struct bus_device *dev;
+{
+ ns8390_softc_t *sp;
+ struct ifnet *ifp;
+ u_char unit;
+ int temp;
+
+ take_dev_irq(dev);
+ unit = (u_char)dev->unit;
+ sp = &ns8390_softc[unit];
+ printf(", port = %x, spl = %d, pic = %d. ",
+ dev->address, dev->sysdep, dev->sysdep1);
+
+ if (sp->card == elii_card) {
+ if (elii_bnc[sp->board_id])
+ printf("cheapernet ");
+ else
+ printf("ethernet ");
+ } else
+ printf("ethernet ");
+
+ (volatile char *)sp->sram =
+ (volatile char *) phystokv(dev->phys_address);
+ dev->address = (vm_offset_t) phystokv(dev->address);
+ sp->timer = -1;
+ sp->flags = 0;
+ sp->mode = 0;
+
+ if (!ns8390hwrst(unit)) {
+ printf("%s%d: attach(): reset failed.\n",
+ sp->card, unit);
+ return;
+ }
+ /* N.B. sp->address is not determined till
+ * hwrst time. */
+ *(sp->ds_addr) = *(sp->address);
+ *(sp->ds_addr + 1) = *(sp->address + 1);
+ *(sp->ds_addr + 2) = *(sp->address + 2);
+ *(sp->ds_addr + 3) = *(sp->address + 3);
+ *(sp->ds_addr + 4) = *(sp->address + 4);
+ *(sp->ds_addr + 5) = *(sp->address + 5);
+
+ printf("id [%x:%x:%x:%x:%x:%x]",
+ sp->address[0],sp->address[1],sp->address[2],
+ sp->address[3],sp->address[4],sp->address[5]);
+ ifp = &(sp->ds_if);
+ ifp->if_unit = unit;
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_flags = IFF_BROADCAST;
+#ifdef MACH_KERNEL
+ ifp->if_header_size = sizeof(struct ether_header);
+ ifp->if_header_format = HDR_ETHERNET;
+ ifp->if_address_size = 6;
+ ifp->if_address = (char *)&sp->address[0];
+ if_init_queues(ifp);
+#else MACH_KERNEL
+ ifp->if_name = sp->card;
+ ifp->if_init = ns8390init;
+ ifp->if_output = ns8390output;
+ ifp->if_ioctl = ns8390ioctl;
+ ifp->if_reset = ns8390reset;
+ ifp->if_next = NULL;
+ if_attach(ifp);
+#ifdef notdef
+ watchdog_id = timeout(ns8390watch, &(ifp->if_unit), 20*HZ);
+#endif
+#endif MACH_KERNEL
+
+#ifdef MACH_KERNEL
+#if MACH_TTD
+ if (!ttd_get_packet) {
+ ttd_device_unit = unit;
+ ttd_get_packet = ns8390poll_receive;
+ ttd_send_packet = ns8390transmit_ttd;
+ ttd_host_ether_id.array[0] = *(sp->address);
+ ttd_host_ether_id.array[1] = *(sp->address + 1);
+ ttd_host_ether_id.array[2] = *(sp->address + 2);
+ ttd_host_ether_id.array[3] = *(sp->address + 3);
+ ttd_host_ether_id.array[4] = *(sp->address + 4);
+ ttd_host_ether_id.array[5] = *(sp->address + 5);
+ }
+#endif /* MACH_TTD */
+#endif /* MACH_KERNEL */
+}
+
+/*
+ * ns8390watch():
+ *
+ */
+
+int
+ns8390watch(b_ptr)
+caddr_t b_ptr;
+{
+ int x,
+ y,
+ opri,
+ unit;
+ int temp_cr;
+ caddr_t nic;
+
+ unit = *b_ptr;
+#ifdef MACH_KERNEL
+ timeout(ns8390watch,b_ptr,20*HZ);
+#else MACH_KERNEL
+ watchdog_id = timeout(ns8390watch,b_ptr,20*HZ);
+#endif MACH_KERNEL
+ nic = ns8390_softc[unit].nic;
+ temp_cr = inb(nic+ds_cmd);
+ outb(nic + ds_cmd, (temp_cr & 0x3f) | DSCM_PG0);
+ printf("<<< ISR=%x CURR=%x rdnxt=%x BNDY=%x>>> ",
+ inb(nic + ds0_isr),
+ ns8390get_CURR(unit), ns8390_softc[unit].read_nxtpkt_ptr,
+ inb(nic+ds0_bndy));
+ outb(nic+ds_cmd,temp_cr);
+}
+
+#ifdef MACH_KERNEL
+int ns8390start(); /* forward */
+
+/*ARGSUSED*/
+wd8003open(dev, flag)
+ dev_t dev;
+ int flag;
+{
+ register int unit = minor(dev);
+
+ if (ns8390_softc[unit].card != wd8003_card)
+ return (ENXIO);
+ if (unit < 0 || unit >= NNS8390 ||
+ ns8390_softc[unit].nic == 0)
+ return (ENXIO);
+
+ ns8390_softc[unit].ds_if.if_flags |= IFF_UP;
+ ns8390init(unit);
+ return(0);
+}
+
+eliiopen(dev, flag)
+ dev_t dev;
+ int flag;
+{
+ register int unit = minor(dev);
+
+ if (ns8390_softc[unit].card != elii_card)
+ return (ENXIO);
+ if (unit < 0 || unit >= NNS8390 ||
+ ns8390_softc[unit].nic == 0)
+ return (ENXIO);
+
+ ns8390_softc[unit].ds_if.if_flags |= IFF_UP;
+ ns8390init(unit);
+ return(0);
+}
+
+ns8390output(dev, ior)
+ dev_t dev;
+ io_req_t ior;
+{
+ register int unit = minor(dev);
+
+ if (unit < 0 || unit >= NNS8390 ||
+ ns8390_softc[unit].nic == 0)
+ return (ENXIO);
+ return (net_write(&ns8390_softc[unit].ds_if, ns8390start, ior));
+}
+
+ns8390setinput(dev, receive_port, priority, filter, filter_count)
+ dev_t dev;
+ mach_port_t receive_port;
+ int priority;
+ filter_t filter[];
+ unsigned int filter_count;
+{
+ register int unit = minor(dev);
+
+ if (unit < 0 || unit >= NNS8390 ||
+ ns8390_softc[unit].nic == 0)
+ return (ENXIO);
+
+ return (net_set_filter(&ns8390_softc[unit].ds_if,
+ receive_port, priority,
+ filter, filter_count));
+}
+
+#else MACH_KERNEL
+/*
+ * ns8390output:
+ *
+ * This routine is called by the "if" layer to output a packet to
+ * the network. This code resolves the local ethernet address, and
+ * puts it into the mbuf if there is room. If not, then a new mbuf
+ * is allocated with the header information and precedes the data
+ * to be transmitted. The routine ns8390xmt() which actually
+ * transmits the data expects the ethernet header to precede the
+ * data in the mbuf.
+ *
+ * input: ifnet structure pointer, an mbuf with data, and address
+ * to be resolved
+ * output: mbuf is updated to hold enet address, or a new mbuf
+ * with the address is added
+ *
+ */
+
+ns8390output(ifp, m0, dst)
+struct ifnet *ifp;
+struct mbuf *m0;
+struct sockaddr *dst;
+{
+ register ns8390_softc_t *is = &ns8390_softc[ifp->if_unit];
+ u_char edst[6];
+ struct in_addr idst;
+ register struct mbuf *m = m0;
+ register struct ether_header *eh;
+ register int off;
+ int usetrailers;
+ int type, error;
+ spl_t opri;
+
+ if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) {
+ printf("%s%d output(): Turning off board %d\n",
+ is->card, ifp->if_unit);
+ ns8390intoff(ifp->if_unit);
+ error = ENETDOWN;
+ goto bad;
+ }
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ idst = ((struct sockaddr_in *)dst)->sin_addr;
+ if (!arpresolve(&is->ns8390_ac, m, &idst, edst, &usetrailers)){
+ return (0); /* if not yet resolved */
+ }
+ off = ntohs((u_short)mtod(m, struct ip *)->ip_len) - m->m_len;
+ if (usetrailers && off > 0 && (off & 0x1ff) == 0 &&
+ m->m_off >= MMINOFF + 2 * sizeof (u_short)) {
+ type = ETHERTYPE_TRAIL + (off>>9);
+ m->m_off -= 2 * sizeof (u_short);
+ m->m_len += 2 * sizeof (u_short);
+ *mtod(m, u_short *) = htons((u_short)ETHERTYPE_IP);
+ *(mtod(m, u_short *) + 1) = htons((u_short)m->m_len);
+ goto gottrailertype;
+ }
+ type = ETHERTYPE_IP;
+ off = 0;
+ goto gottype;
+#endif
+#ifdef NS
+ case AF_NS:
+ type = ETHERTYPE_NS;
+ bcopy((caddr_t)&(((struct sockaddr_ns *)dst)->sns_addr.x_host),
+ (caddr_t)edst,
+ sizeof (edst));
+ off = 0;
+ goto gottype;
+#endif
+ case AF_UNSPEC:
+ eh = (struct ether_header *)dst->sa_data;
+ bcopy((caddr_t)eh->ether_dhost, (caddr_t)edst, sizeof (edst));
+ type = eh->ether_type;
+ goto gottype;
+ default:
+ printf("%s%d output(): can't handle af%d\n",
+ is->card, ifp->if_unit,
+ dst->sa_family);
+ error = EAFNOSUPPORT;
+ goto bad;
+ }
+gottrailertype:
+ /*
+ * Packet to be sent as trailer: move first packet
+ * (control information) to end of chain.
+ */
+ while (m->m_next)
+ m = m->m_next;
+ m->m_next = m0;
+ m = m0->m_next;
+ m0->m_next = 0;
+ m0 = m;
+gottype:
+ /*
+ * Add local net header. If no space in first mbuf,
+ * allocate another.
+ */
+ if (m->m_off > MMAXOFF ||
+ MMINOFF + sizeof (struct ether_header) > m->m_off) {
+ m = m_get(M_DONTWAIT, MT_HEADER);
+ if (m == 0) {
+ error = ENOBUFS;
+ goto bad;
+ }
+ m->m_next = m0;
+ m->m_off = MMINOFF;
+ m->m_len = sizeof (struct ether_header);
+ } else {
+ m->m_off -= sizeof (struct ether_header);
+ m->m_len += sizeof (struct ether_header);
+ }
+ eh = mtod(m, struct ether_header *);
+ eh->ether_type = htons((u_short)type);
+ bcopy((caddr_t)edst, (caddr_t)eh->ether_dhost, sizeof (edst));
+ bcopy((caddr_t)is->address,
+ (caddr_t)eh->ether_shost,
+ sizeof(edst));
+ /*
+ * Queue message on interface, and start output if interface
+ * not yet active.
+ */
+ opri = SPLNET();
+ if (IF_QFULL(&ifp->if_snd)) {
+ IF_DROP(&ifp->if_snd);
+ splx(opri);
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ IF_ENQUEUE(&ifp->if_snd, m);
+ /*
+ * Some action needs to be added here for checking whether the
+ * board is already transmitting. If it is, we don't want to
+ * start it up (ie call ns8390start()). We will attempt to send
+ * packets that are queued up after an interrupt occurs. Some
+ * flag checking action has to happen here and/or in the start
+ * routine. This note is here to remind me that some thought
+ * is needed and there is a potential problem here.
+ *
+ */
+ ns8390start(ifp->if_unit);
+ splx(opri);
+ return (0);
+bad:
+ m_freem(m0);
+ return (error);
+}
+#endif MACH_KERNEL
+
+/*
+ * ns8390reset:
+ *
+ * This routine is in part an entry point for the "if" code. Since most
+ * of the actual initialization has already (we hope already) been done
+ * by calling ns8390attach().
+ *
+ * input : unit number or board number to reset
+ * output : board is reset
+ *
+ */
+
+int
+ns8390reset(unit)
+int unit;
+{
+
+ ns8390_softc[unit].ds_if.if_flags &= ~IFF_RUNNING;
+ return(ns8390init(unit));
+}
+
+/*
+ * ns8390init:
+ *
+ * Another routine that interfaces the "if" layer to this driver.
+ * Simply resets the structures that are used by "upper layers".
+ * As well as calling ns8390hwrst that does reset the ns8390 board.
+ *
+ * input : board number
+ * output : structures (if structs) and board are reset
+ *
+ */
+
+int
+ns8390init(unit)
+int unit;
+{
+ struct ifnet *ifp;
+ int stat;
+ spl_t oldpri;
+
+ ifp = &(ns8390_softc[unit].ds_if);
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+ if (ifp->if_addrlist == (struct ifaddr *)0) {
+ return;
+ }
+#endif MACH_KERNEL
+ oldpri = SPLNET();
+ if ((stat = ns8390hwrst(unit)) == TRUE) {
+ ns8390_softc[unit].ds_if.if_flags |= IFF_RUNNING;
+ ns8390_softc[unit].flags |= DSF_RUNNING;
+ ns8390_softc[unit].tbusy = 0;
+ ns8390start(unit);
+ } else
+ printf("%s%d init(): trouble resetting board %d\n",
+ ns8390_softc[unit].card, unit);
+ ns8390_softc[unit].timer = 5;
+ splx(oldpri);
+ return(stat);
+}
+
+/*
+ * ns8390start:
+ *
+ * This is yet another interface routine that simply tries to output a
+ * in an mbuf after a reset.
+ *
+ * input : board number
+ * output : stuff sent to board if any there
+ *
+ */
+
+ns8390start(unit)
+int unit;
+{
+ register ns8390_softc_t *is = &ns8390_softc[unit];
+ struct ifnet *ifp;
+#ifdef MACH_KERNEL
+ io_req_t m;
+#else MACH_KERNEL
+ struct mbuf *m;
+#endif MACH_KERNEL
+
+ if (is->tbusy) {
+ caddr_t nic = ns8390_softc[unit].nic;
+ if (!(inb(nic+ds_cmd) & DSCM_TRANS)) {
+ is->tbusy = 0;
+ ns8390_cntrs[unit].busy++;
+ } else
+ return;
+ }
+
+ ifp = &(ns8390_softc[unit].ds_if);
+
+ IF_DEQUEUE(&ifp->if_snd, m);
+#ifdef MACH_KERNEL
+ if (m != 0)
+#else MACH_KERNEL
+ if (m != (struct mbuf *)0)
+#endif MACH_KERNEL
+ {
+ is->tbusy++;
+ ns8390_cntrs[unit].xmt++;
+ ns8390xmt(unit, m);
+ }
+}
+
+#ifdef MACH_KERNEL
+/*ARGSUSED*/
+ns8390getstat(dev, flavor, status, count)
+ dev_t dev;
+ int flavor;
+ dev_status_t status; /* pointer to OUT array */
+ unsigned int *count; /* out */
+{
+ register int unit = minor(dev);
+
+ if (unit < 0 || unit >= NNS8390 ||
+ ns8390_softc[unit].nic == 0)
+ return (ENXIO);
+
+ return (net_getstat(&ns8390_softc[unit].ds_if,
+ flavor,
+ status,
+ count));
+}
+ns8390setstat(dev, flavor, status, count)
+ dev_t dev;
+ int flavor;
+ dev_status_t status;
+ unsigned int count;
+{
+ register int unit = minor(dev);
+ register ns8390_softc_t *sp;
+
+ if (unit < 0 || unit >= NNS8390 ||
+ ns8390_softc[unit].nic == 0)
+ return (ENXIO);
+
+ sp = &ns8390_softc[unit];
+
+ switch (flavor) {
+ case NET_STATUS:
+ {
+ /*
+ * All we can change are flags, and not many of those.
+ */
+ register struct net_status *ns = (struct net_status *)status;
+ int mode = 0;
+
+ if (count < NET_STATUS_COUNT)
+ return (D_INVALID_SIZE);
+
+ if (ns->flags & IFF_ALLMULTI)
+ mode |= MOD_ENAL;
+ if (ns->flags & IFF_PROMISC)
+ mode |= MOD_PROM;
+
+ /*
+ * Force a complete reset if the receive mode changes
+ * so that these take effect immediately.
+ */
+ if (sp->mode != mode) {
+ sp->mode = mode;
+ if (sp->flags & DSF_RUNNING) {
+ sp->flags &= ~(DSF_LOCK | DSF_RUNNING);
+ ns8390init(unit);
+ }
+ }
+ break;
+ }
+
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+#else MACH_KERNEL
+/*
+ * ns8390ioctl:
+ *
+ * This routine processes an ioctl request from the "if" layer
+ * above.
+ *
+ * input : pointer the appropriate "if" struct, command, and data
+ * output : based on command appropriate action is taken on the
+ * ns8390 board(s) or related structures
+ * return : error is returned containing exit conditions
+ *
+ */
+
+int
+ns8390ioctl(ifp, cmd, data)
+struct ifnet *ifp;
+int cmd;
+caddr_t data;
+{
+ register struct ifaddr *ifa = (struct ifaddr *)data;
+ register ns8390_softc_t *is;
+ int error;
+ spl_t opri;
+ short mode = 0;
+
+ is = &ns8390_softc[ifp->if_unit];
+ opri = SPLNET();
+ error = 0;
+ switch (cmd) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ ns8390init(ifp->if_unit);
+ switch (ifa->ifa_addr.sa_family) {
+#ifdef INET
+ case AF_INET:
+ ((struct arpcom *)ifp)->ac_ipaddr =
+ IA_SIN(ifa)->sin_addr;
+ arpwhohas((struct arpcom *)ifp, &IA_SIN(ifa)->sin_addr);
+ break;
+#endif
+#ifdef NS
+ case AF_NS:
+ {
+ register struct ns_addr *ina =
+ &(IA_SNS(ifa)->sns_addr);
+ if (ns_nullhost(*ina))
+ ina->x_host =
+ *(union ns_host *)(ds->ds_addr);
+ else
+????
+ ns8390seteh(ina->x_host.c_host,
+ ns8390_softc[ifp->if_unit].base);
+ break;
+ }
+#endif
+ }
+ break;
+ case SIOCSIFFLAGS:
+ if (ifp->if_flags & IFF_ALLMULTI)
+ mode |= MOD_ENAL;
+ if (ifp->if_flags & IFF_PROMISC)
+ mode |= MOD_PROM;
+ /*
+ * force a complete reset if the receive multicast/
+ * promiscuous mode changes so that these take
+ * effect immediately.
+ *
+ */
+ if (is->mode != mode) {
+ is->mode = mode;
+ if (is->flags & DSF_RUNNING) {
+ is->flags &=
+ ~(DSF_LOCK|DSF_RUNNING);
+ ns8390init(ifp->if_unit);
+ }
+ }
+ if ((ifp->if_flags & IFF_UP) == 0 &&
+ is->flags & DSF_RUNNING) {
+ printf("%s%d ioctl(): turning off board %d\n",
+ is->card, ifp->if_unit);
+ is->flags &= ~(DSF_LOCK | DSF_RUNNING);
+ is->timer = -1;
+ ns8390intoff(ifp->if_unit);
+ ns8390over_write(ifp->if_unit);
+ } else
+ if (ifp->if_flags & IFF_UP &&
+ (is->flags & DSF_RUNNING) == 0)
+ ns8390init(ifp->if_unit);
+ break;
+#ifdef IF_CNTRS
+ case SIOCCIFCNTRS:
+ if (!suser()) {
+ error = EPERM;
+ break;
+ }
+ bzero((caddr_t)ns_ein, sizeof (ns_ein));
+ bzero((caddr_t)ns_eout, sizeof (ns_eout));
+ bzero((caddr_t)ns_lin, sizeof (ns_lin));
+ bzero((caddr_t)ns_lout, sizeof (ns_lout));
+ bzero((caddr_t)&ns_arp, sizeof (int));
+ bzero((caddr_t)&ns8390_cntrs, sizeof (ns8390_cntrs));
+ break;
+#endif IF_CNTRS
+ default:
+ error = EINVAL;
+ }
+ splx(opri);
+ return (error);
+}
+#endif MACH_KERNEL
+
+/*
+ * ns8390hwrst:
+ *
+ * This routine resets the ns8390 board that corresponds to the
+ * board number passed in.
+ *
+ * input : board number to do a hardware reset
+ * output : board is reset
+ *
+ */
+
+int
+ns8390hwrst(unit)
+int unit;
+{
+ caddr_t nic = ns8390_softc[unit].nic;
+ int count;
+ u_char stat;
+ spl_t spl = SPLNET();
+
+ if (ns8390_softc[unit].card == wd8003_card &&
+ config_wd8003(unit) == FALSE) {
+ printf("%s%d hwrst(): config_wd8003 failed.\n",
+ ns8390_softc[unit].card, unit);
+ splx(spl);
+ return(FALSE);
+ }
+ if (ns8390_softc[unit].card == elii_card &&
+ config_3c503(unit) == FALSE) {
+ printf("%s%d hwrst(): config_3c503 failed.\n",
+ ns8390_softc[unit].card, unit);
+ splx(spl);
+ return(FALSE);
+ }
+ if (config_nic(unit) == FALSE) {
+ printf("%s%d hwrst(): config_nic failed.\n",
+ ns8390_softc[unit].card, unit);
+ splx(spl);
+ return(FALSE);
+ }
+ splx(spl);
+ return(TRUE);
+}
+
+/*
+ * ns8390intr:
+ *
+ * This function is the interrupt handler for the ns8390 ethernet
+ * board. This routine will be called whenever either a packet
+ * is received, or a packet has successfully been transfered and
+ * the unit is ready to transmit another packet.
+ *
+ * input : board number that interrupted
+ * output : either a packet is received, or a packet is transfered
+ *
+ */
+int
+ns8390intr(unit)
+{
+ int opri, i;
+ int isr_status;
+ int temp_cr;
+ caddr_t nic = ns8390_softc[unit].nic;
+
+ temp_cr = inb(nic+ds_cmd);
+ outb(nic+ds_cmd, (temp_cr & 0x3f) | DSCM_PG0);
+ outb(nic+ds0_imr, 0); /* stop board interrupts */
+ outb(nic+ds_cmd, temp_cr);
+ while (isr_status = inb(nic+ds0_isr)) {
+ outb(nic+ds0_isr, isr_status); /* clear interrupt status */
+
+ if ((isr_status & (DSIS_ROVRN|DSIS_RXE)) == DSIS_RXE) {
+ int rsr = inb(nic+ds0_rsr);
+ if (rsr & DSRS_DFR) ns8390_cntrs[unit].jabber++;
+ if (rsr & ~(DSRS_DFR|DSRS_PHY|DSRS_FAE|DSRS_CRC|DSIS_RX))
+ printf("%s%d intr(): isr = %x, RSR = %x\n",
+ ns8390_softc[unit].card, unit,
+ isr_status, rsr);
+ } else if (isr_status & DSIS_ROVRN) {
+ ns8390_cntrs[unit].ovw++;
+ ns8390over_write(unit);
+ }
+ if (isr_status & DSIS_RX) { /* DFR & PRX is possible */
+ ns8390rcv(unit);
+
+#if MACH_TTD
+ if (kttd_active)
+ ttd_poll_loop = FALSE;
+#endif /* MACH_TTD */
+ }
+
+ if (isr_status & DSIS_TXE) {
+ int tsr = inb(nic+ds0_tsr);
+ tsr &= ~0x2; /* unadvertised special */
+#if MACH_TTD
+ if (!kttd_active)
+#endif /* MACH_TTD */
+ {
+ if (tsr == (DSTS_CDH|DSTS_ABT))
+ ns8390_cntrs[unit].heart++;
+ else
+ printf("%s%d intr(): isr = %x, TSR = %x\n",
+ ns8390_softc[unit].card, unit,
+ isr_status, tsr);
+ ns8390_softc[unit].tbusy = 0;
+ ns8390start(unit);
+ }
+ } else if (isr_status & DSIS_TX) {
+#if MACH_TTD
+ if (!kttd_active)
+#endif /* MACH_TTD */
+ {
+ ns8390_cntrs[unit].xmti++;
+ ns8390_softc[unit].tbusy = 0;
+ ns8390start(unit);
+ }
+ }
+
+ if (isr_status & DSIS_CTRS) {
+ int c0 = inb(nic+ds0_cntr0);
+ int c1 = inb(nic+ds0_cntr1);
+ int c2 = inb(nic+ds0_cntr2);
+ ns8390_cntrs[unit].frame += c0;
+ ns8390_cntrs[unit].crc += c1;
+ ns8390_cntrs[unit].miss += c2;
+#ifdef COUNTERS
+ printf("%s%d intr(): isr = %x, FRAME %x, CRC %x, MISS %x\n",
+ ns8390_softc[unit].card, unit,
+ isr_status, c0, c1, c2);
+ printf("%s%d intr(): TOTAL , FRAME %x, CRC %x, MISS %x\n",
+ ns8390_softc[unit].card, unit,
+ ns8390_cntrs[unit].frame,
+ ns8390_cntrs[unit].crc,
+ ns8390_cntrs[unit].miss);
+#endif COUNTERS
+ outb(nic+ds0_isr, isr_status); /* clear interrupt status again */
+ }
+ }
+ temp_cr=inb(nic+ds_cmd);
+ outb(nic+ds_cmd, (temp_cr & 0x3f) | DSCM_PG0);
+ outb(nic+ds0_imr, imr_hold);
+ outb(nic+ds_cmd, temp_cr);
+ return(0);
+}
+
+/*
+ * Called if on board buffer has been completely filled by ns8390intr. It stops
+ * the board, reads in all the buffers that are currently in the buffer, and
+ * then restart board.
+ */
+ns8390over_write(unit)
+int unit;
+{
+ caddr_t nic = ns8390_softc[unit].nic;
+ int no;
+ int count = 0;
+
+ outb(nic+ds_cmd, DSCM_NODMA|DSCM_STOP|DSCM_PG0); /* clear the receive buffer */
+ outb(nic+ds0_rbcr0, 0);
+ outb(nic+ds0_rbcr1, 0);
+ while ((!(inb (nic + ds0_isr) & DSIS_RESET)) && (count < 10000))
+ count++;
+ if (count == 10000) {
+ printf("%s%d: over_write(): would not reset.\n",
+ ns8390_softc[unit].card, unit);
+ }
+ no = ns8390rcv(unit);
+#ifdef OVWBUG
+ printf("%s%d over_write(): ns8390 OVW ... %d.\n",
+ ns8390_softc[unit].card, unit, no);
+#endif OVWBUG
+ outb(nic+ds0_tcr, DSTC_LB0); /* External loopback mode */
+ outb(nic+ds_cmd, DSCM_NODMA|DSCM_START|DSCM_PG0);
+ outb(nic+ds0_tcr, 0);
+ return;
+}
+
+/*
+ * ns8390rcv:
+ *
+ * This routine is called by the interrupt handler to initiate a
+ * packet transfer from the board to the "if" layer above this
+ * driver. This routine checks if a buffer has been successfully
+ * received by the ns8390. If so, it does the actual transfer of the
+ * board data (including the ethernet header) into a packet (consisting
+ * of an mbuf chain) and enqueues it to a higher level.
+ * Then check again whether there are any packets in the receive ring,
+ * if so, read the next packet, until there are no more.
+ *
+ * input : number of the board to check
+ * output : if a packet is available, it is "sent up"
+ */
+ns8390rcv(unit)
+int unit;
+{
+ register ns8390_softc_t *is = &ns8390_softc[unit];
+ register struct ifnet *ifp = &is->ds_if;
+ caddr_t nic = is->nic;
+ int packets = 0;
+ struct ether_header eh;
+ u_short mlen, len, bytes_in_mbuf, bytes;
+ u_short remaining;
+ int temp_cr;
+ u_char *mb_p;
+ int board_id = is->board_id;
+ vm_offset_t hdwbase = ns8390info[unit]->address;
+ spl_t s;
+
+ /* calculation of pkt size */
+ int nic_overcount; /* NIC says 1 or 2 more than we need */
+ int pkt_size; /* calculated size of received data */
+ int wrap_size; /* size of data before wrapping it */
+ int header_nxtpkt_ptr; /* NIC's next pkt ptr in rcv header */
+ int low_byte_count; /* low byte count of read from rcv header */
+ int high_byte_count; /* calculated high byte count */
+
+
+ volatile char *sram_nxtpkt_ptr; /* mem location of next packet */
+ volatile char *sram_getdata_ptr; /* next location to be read */
+#ifdef MACH_KERNEL
+ ipc_kmsg_t new_kmsg;
+ struct ether_header *ehp;
+ struct packet_header *pkt;
+#else MACH_KERNEL
+ struct mbuf *m, *tm; /* initial allocation of mem; temp */
+#endif MACH_KERNEL
+
+
+#if MACH_TTD
+ if (((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) &&
+ !kttd_active) {
+#else
+ if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) {
+#endif /* MACH_TTD */
+ temp_cr = inb(nic+ds_cmd); /* get current CR value */
+ outb(nic+ds_cmd,((temp_cr & 0x3F)|DSCM_PG0|DSCM_STOP));
+ outb(nic+ds0_imr, 0); /* Interrupt Mask Register */
+ outb(nic+ds_cmd, temp_cr);
+ return -1;
+ }
+
+ while(is->read_nxtpkt_ptr != ns8390get_CURR(unit)) {
+
+ /* while there is a packet to read from the buffer */
+
+ if ((is->read_nxtpkt_ptr < is->pstart) ||
+ (is->read_nxtpkt_ptr >= is->pstop)) {
+ ns8390hwrst(unit);
+ return -1;
+ } /* if next packet pointer is out of receive ring bounds */
+
+#if MACH_TTD
+ if (!kttd_active)
+#endif /* MACH_TTD */
+ {
+ packets++;
+ ns8390_cntrs[unit].rcv++;
+ }
+
+ sram_nxtpkt_ptr = (char *) (is->sram + (is->read_nxtpkt_ptr << 8));
+
+ /* get packet size and location of next packet */
+ header_nxtpkt_ptr = *(sram_nxtpkt_ptr + 1);
+ header_nxtpkt_ptr &= 0xFF;
+ low_byte_count = *(sram_nxtpkt_ptr + 2);
+ low_byte_count &= 0xFF;
+
+ if ((low_byte_count + NIC_HEADER_SIZE) > NIC_PAGE_SIZE)
+ nic_overcount = 2;
+ else
+ nic_overcount = 1;
+ if (header_nxtpkt_ptr > is->read_nxtpkt_ptr) {
+ wrap_size = 0;
+ high_byte_count = header_nxtpkt_ptr - is->read_nxtpkt_ptr -
+ nic_overcount;
+ } else {
+ wrap_size = (int) (is->pstop - is->read_nxtpkt_ptr - nic_overcount);
+ high_byte_count = is->pstop - is->read_nxtpkt_ptr +
+ header_nxtpkt_ptr - is->pstart - nic_overcount;
+ }
+ pkt_size = (high_byte_count << 8) | (low_byte_count & 0xFF);
+ /* does not seem to include NIC_HEADER_SIZE */
+ if (!pkt_size) {
+ printf("%s%d rcv(): zero length.\n",
+ ns8390_softc[unit].card, unit);
+ goto next_pkt;
+ }
+ len = pkt_size;
+
+ sram_getdata_ptr = sram_nxtpkt_ptr + NIC_HEADER_SIZE;
+ if (board_id & IFWD_SLOT_16BIT) {
+#if MACH_TTD
+ if (!kttd_active)
+#endif /* MACH_TTD */
+ { s = splhi(); }
+
+ en_16bit_access(hdwbase, board_id);
+ bcopy16 (sram_getdata_ptr,
+ &eh,
+ sizeof(struct ether_header));
+ dis_16bit_access (hdwbase, board_id);
+#if MACH_TTD
+ if (!kttd_active)
+#endif /* MACH_TTD */
+ { splx(s); }
+
+ } else {
+ bcopy16 (sram_getdata_ptr,
+ &eh,
+ sizeof(struct ether_header));
+ }
+ sram_getdata_ptr += sizeof(struct ether_header);
+ len -= (sizeof(struct ether_header) + 4); /* crc size */
+#ifdef MACH_KERNEL
+#if MACH_TTD
+ if (kttd_active) {
+ new_kmsg = (ipc_kmsg_t)ttd_request_msg;
+ }else
+#endif /* MACH_TTD */
+ {
+ new_kmsg = net_kmsg_get();
+ if (new_kmsg == IKM_NULL) {
+ /*
+ * Drop the packet.
+ */
+ is->ds_if.if_rcvdrops++;
+ /*
+ * not only do we want to return, we need to drop
+ * the packet on the floor to clear the interrupt.
+ */
+ ns8390lost_frame(unit);
+ return;/* packets;*/
+ }
+ }
+
+#if DEBUG_TTD
+ dump_ether_header("ns8390wire",&eh);
+#endif /* DEBUG_TTD */
+
+ ehp = (struct ether_header *) (&net_kmsg(new_kmsg)->header[0]);
+ pkt = (struct packet_header *) (&net_kmsg(new_kmsg)->packet[0]);
+
+#if DEBUG_TTD
+ printf("!ehp = 0x%x, pkt = 0x%x!",ehp, pkt);
+#endif /* DEBUG_TTD */
+
+ *ehp = eh;
+ if (len >
+ (wrap_size = (is->sram + (is->pstop << 8) - sram_getdata_ptr))) {
+ /* if needs to wrap */
+ if (board_id & IFWD_SLOT_16BIT) {
+#if MACH_TTD
+ if (!kttd_active)
+#endif /* MACH_TTD */
+ { s = splhi(); }
+
+ en_16bit_access(hdwbase, board_id);
+ bcopy16 (sram_getdata_ptr, (char *) (pkt + 1),
+ wrap_size);
+ dis_16bit_access (hdwbase, board_id);
+#if MACH_TTD
+ if (!kttd_active)
+#endif /* MACH_TTD */
+ { splx(s); }
+ } else {
+ bcopy (sram_getdata_ptr, (char *) (pkt + 1),
+ wrap_size);
+ }
+ sram_getdata_ptr = (volatile char *)
+ (is->sram + (is->pstart << 8));
+ } else { /* normal getting data from buffer */
+ wrap_size = 0;
+ }
+ if (board_id & IFWD_SLOT_16BIT) {
+#if MACH_TTD
+ if (!kttd_active)
+#endif /* MACH_TTD */
+ { s = splhi(); }
+ en_16bit_access(hdwbase, board_id);
+ bcopy16 (sram_getdata_ptr,
+ (char *) (pkt + 1) + wrap_size,
+ len - wrap_size);
+ dis_16bit_access (hdwbase, board_id);
+#if MACH_TTD
+ if (!kttd_active)
+#endif /* MACH_TTD */
+ { splx(s); }
+ } else {
+ bcopy (sram_getdata_ptr,
+ (char *) (pkt + 1) + wrap_size,
+ len - wrap_size);
+ }
+
+ pkt->type = ehp->ether_type;
+ pkt->length = len + sizeof(struct packet_header);
+
+#if MACH_TTD
+ /*
+ * Don't want to call net_packet if we are polling
+ * for a packet.
+ */
+ if (!kttd_active)
+#endif /* MACH_TTD */
+ {
+ /*
+ * Hand the packet to the network module.
+ */
+ net_packet(ifp, new_kmsg, pkt->length,
+ ethernet_priority(new_kmsg));
+ }
+
+#else MACH_KERNEL
+#define NEW
+#ifdef NEW
+ m = (struct mbuf *) 0;
+ eh.ether_type = ntohs(eh.ether_type);
+ MGET(m, M_DONTWAIT, MT_DATA);
+ if (m == (struct mbuf *) 0) {
+ printf("%s%d rcv(): Lost frame\n",
+ ns8390_softc[unit].card, unit);
+ ns8390lost_frame(unit); /* update NIC pointers and registers */
+ return packets;
+ }
+ m->m_next = (struct mbuf *) 0;
+ tm = m;
+ m->m_len = MLEN;
+ if (len > 2 * MLEN - sizeof (struct ifnet **)) {
+ MCLGET(m);
+ }
+ *(mtod(tm, struct ifnet **)) = ifp;
+ mlen = sizeof (struct ifnet **);
+ bytes_in_mbuf = m->m_len - sizeof(struct ifnet **);
+ mb_p = mtod(tm, u_char *) + sizeof (struct ifnet **);
+ bytes = min(bytes_in_mbuf, len);
+ remaining = (int) (is->sram + (is->pstop << 8) -
+ sram_getdata_ptr);
+ bytes = min(bytes, remaining);
+ do {
+ if (board_id & IFWD_SLOT_16BIT) {
+ s = splhi();
+ en_16bit_access(hdwbase, board_id);
+ bcopy16 (sram_getdata_ptr, mb_p, bytes);
+ dis_16bit_access (hdwbase, board_id);
+ splx(s);
+ } else {
+ bcopy16 (sram_getdata_ptr, mb_p, bytes);
+ }
+
+ mlen += bytes;
+
+ if (!(bytes_in_mbuf -= bytes)) {
+ MGET(tm->m_next, M_DONTWAIT, MT_DATA);
+ tm = tm->m_next;
+ if (tm == (struct mbuf *)0) {
+ printf("%s%d rcv(): No mbufs, lost frame\n",
+ ns8390_softc[unit].card, unit);
+ m_freem(m); /* free the mbuf chain */
+ ns8390lost_frame(unit); /* update NIC pointers and registers */
+ return;
+ }
+ mlen = 0;
+ tm->m_len = MLEN;
+ bytes_in_mbuf = MLEN;
+ mb_p = mtod(tm, u_char *);
+ } else
+ mb_p += bytes;
+
+ if (!(len -= bytes)) {
+ tm->m_len = mlen;
+ break;
+ } else if (bytes == remaining) {
+ sram_getdata_ptr = (volatile char *) (is->sram +
+ (is->pstart << 8));
+ bytes = len;
+ remaining = ETHERMTU;
+ } else {
+ sram_getdata_ptr += bytes;
+ remaining -= bytes;
+ }
+
+ bytes = min(bytes_in_mbuf, len);
+ bytes = min(bytes, remaining);
+ } while(1);
+#else NEW
+ m = (struct mbuf *) 0;
+ eh.ether_type = ntohs(eh.ether_type);
+
+ while ( len ) {
+ if (m == (struct mbuf *) 0) {
+ m = m_get(M_DONTWAIT, MT_DATA);
+ if (m == (struct mbuf *) 0) {
+ printf("%s%d rcv(): Lost frame\n",
+ ns8390_softc[unit].card, unit);
+ ns8390lost_frame(unit); /* update NIC pointers and registers */
+ return packets;
+ }
+ tm = m;
+ tm->m_off = MMINOFF;
+
+
+ /*
+ * first mbuf in the packet must contain a pointer to the
+ * ifnet structure. other mbufs that follow and make up
+ * the packet do not need this pointer in the mbuf.
+ *
+ */
+
+ *(mtod(tm, struct ifnet **)) = ifp;
+ tm->m_len = sizeof(struct ifnet **);
+
+ /* end of first buffer of packet */
+ } else {
+ tm->m_next = m_get(M_DONTWAIT, MT_DATA);
+ tm = tm->m_next;
+ if (tm == (struct mbuf *) 0) {
+ printf("%s%d rcv(): No mbufs, lost frame\n",
+ ns8390_softc[unit].card, unit);
+ m_freem(m); /* free the mbuf chain */
+ ns8390lost_frame(unit); /* update NIC pointers and registers */
+ return packets;
+ }
+ tm->m_off = MMINOFF;
+ tm->m_len = 0;
+ }
+
+ tlen = MIN( MLEN - tm->m_len, len);
+ /* size of mbuf so you know how much you can copy from board */
+ tm->m_next = (struct mbuf *) 0;
+ if (sram_getdata_ptr + tlen >=
+ (volatile char *) (is->sram + (is->pstop << 8))) {
+ /* if needs to wrap */
+ wrap_size = (int) (is->sram + (is->pstop << 8) -
+ sram_getdata_ptr);
+ if (board_id & IFWD_SLOT_16BIT) {
+ s = splhi();
+ en_16bit_access(hdwbase, board_id);
+ bcopy16 (sram_getdata_ptr,
+ mtod(tm, char*) + tm->m_len,
+ wrap_size);
+ dis_16bit_access (hdwbase, board_id);
+ splx(s);
+ } else {
+ bcopy16 (sram_getdata_ptr,
+ mtod(tm, char*) + tm->m_len,
+ wrap_size);
+ }
+ tm->m_len += wrap_size;
+ len -= wrap_size;
+
+ sram_getdata_ptr = (volatile char *) (is->sram +
+ (is->pstart << 8));
+ } else { /* normal getting data from buffer */
+ if (board_id & IFWD_SLOT_16BIT) {
+ s = splhi();
+ en_16bit_access(hdwbase, board_id);
+ bcopy16 (sram_getdata_ptr,
+ mtod(tm, char*) + tm->m_len,
+ tlen);
+ dis_16bit_access (hdwbase, board_id);
+ splx(s);
+ } else {
+ bcopy16 (sram_getdata_ptr,
+ mtod(tm, char*) + tm->m_len,
+ tlen);
+ }
+ sram_getdata_ptr += tlen;
+ tm->m_len += tlen;
+ len -= tlen;
+
+ }
+ }
+
+#endif NEW
+ if (!ns8390send_packet_up(m, &eh, is))
+ m_freem(m);
+#ifdef IF_CNTRS
+ ns_ein[log_2(pkt_size)]++;
+ if (pkt_size < 128) ns_lin[(pkt_size)>>3]++;
+
+ if (eh.ether_type == ETHERTYPE_ARP) {
+ ns_arp++;
+ if (ns_narp) {
+ ns_ein[log_2(pkt_size)]--;
+ if (pkt_size < 128) ns_lin[(pkt_size)>>3]--;
+ }
+ }
+#endif IF_CNTRS
+#endif MACH_KERNEL
+
+next_pkt:
+ is->read_nxtpkt_ptr = *(sram_nxtpkt_ptr + 1);
+ is->read_nxtpkt_ptr &= 0xFF;
+
+#if MACH_TTD
+ if (!kttd_active)
+#endif /* MACH_TTD */
+ {
+ temp_cr = inb(nic+ds_cmd);
+ outb(nic+ds_cmd, (temp_cr & 0x3f) | DSCM_PG0);
+ }
+
+ if (is->read_nxtpkt_ptr == ns8390get_CURR(unit))
+ if (is->read_nxtpkt_ptr == is->pstart)
+ outb(nic+ds0_bndy, is->pstop - 1);
+ else
+ outb(nic+ds0_bndy, is->read_nxtpkt_ptr - 1);
+ else
+ outb(nic+ds0_bndy, is->read_nxtpkt_ptr);
+
+#if MACH_TTD
+ if (!kttd_active)
+#endif /* MACH_TTD */
+ { outb(nic+ds_cmd, temp_cr); }
+
+#if MACH_TTD
+ /*
+ * Hand the packet back to the TTD server, if active.
+ */
+ if (kttd_active && pkt_size)
+ return 1;
+#endif /* MACH_TTD */
+
+
+ }
+ return packets;
+
+}
+
+#ifdef MACH_KERNEL
+#if MACH_TTD
+/*
+ * Polling routines for the TTD debugger.
+ */
+int ns8390poll_receive(unit)
+ int unit;
+{
+ int s;
+ int orig_cr;
+ int orig_imr;
+ int isr_status;
+ int pkts;
+
+ ttd_poll_loop = TRUE;
+
+
+ /*
+ * Should already in at splhigh. Is this necessary? XXX
+ */
+ s = splhigh();
+
+#if 0
+ if (kttd_debug)
+ printf("ns8390poll_receive: beginning polling loop\n");
+#endif /* DEBUG_TTD */
+
+ /*
+ * Loop until packet arrives.
+ */
+ while(ttd_poll_loop) {
+
+ /*
+ * Call intr routine
+ */
+
+ ns8390intr(unit);
+ }
+
+#if 0
+ if (kttd_debug)
+ printf("ns8390poll_receive: got packet exiting loop\n");
+#endif /* DEBUG_TTD */
+
+ splx(s);
+}
+
+int ns8390transmit_ttd(unit, packet, len)
+ int unit;
+ char * packet;
+ int len;
+{
+ ns8390_softc_t *is = &ns8390_softc[unit];
+ caddr_t nic = is->nic;
+ u_short count = 0; /* amount of data already copied */
+ volatile char *sram_write_pkt;
+ int board_id = is->board_id;
+ caddr_t hdwbase = ns8390info[unit]->address;
+ int s;
+ int orig_cr;
+ int orig_imr;
+ int isr_status;
+ boolean_t loop = TRUE;
+
+#if 0
+ dump_ipudpbootp("Beg of xmit",packet);
+#endif
+
+ s = splhigh();
+
+ /* begining of physical address of transmition buffer */
+
+ sram_write_pkt = is->sram + is->tpsr * 0x100;
+
+ count = len;
+ if (board_id & IFWD_SLOT_16BIT) {
+ en_16bit_access(hdwbase, board_id);
+ bcopy16 (packet, sram_write_pkt, count);
+ dis_16bit_access (hdwbase, board_id);
+ } else {
+ bcopy (packet, sram_write_pkt, count);
+ }
+
+ while (count < ETHERMIN+sizeof(struct ether_header)) {
+ *(sram_write_pkt + count) = 0;
+ count++;
+ }
+ outb(nic+ds_cmd, DSCM_NODMA|DSCM_START|DSCM_PG0); /* select page 0 */
+ outb(nic+ds0_tpsr, is->tpsr); /* xmt page start at 0 of RAM */
+ outb(nic+ds0_tbcr1, count >> 8); /* upper byte of count */
+ outb(nic+ds0_tbcr0, count & 0xFF); /* lower byte of count */
+ outb(nic+ds_cmd, DSCM_TRANS|DSCM_NODMA|DSCM_START); /* start transmission */
+
+ ns8390intr(unit);
+
+ splx(s);
+}
+#endif /* MACH_TTD */
+#endif /* MACH_KERNEL */
+
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+/*
+ * Send a packet composed of an mbuf chain to the higher levels
+ *
+ */
+ns8390send_packet_up(m, eh, is)
+struct mbuf *m;
+struct ether_header *eh;
+ns8390_softc_t *is;
+{
+ register struct ifqueue *inq;
+ spl_t opri;
+
+ switch (eh->ether_type) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ schednetisr(NETISR_IP);
+ inq = &ipintrq;
+ break;
+ case ETHERTYPE_ARP:
+ arpinput(&is->ns8390_ac, m);
+ return(TRUE);
+#endif
+#ifdef NS
+ case ETHERTYPE_NS:
+ schednetisr(NETISR_NS);
+ inq = &nsintrq;
+ break;
+#endif
+ default:
+ return(FALSE);
+ }
+ opri = SPLNET();
+ if (IF_QFULL(inq)) {
+ IF_DROP(inq);
+ splx(opri);
+ return(FALSE);
+ }
+ IF_ENQUEUE(inq, m);
+ splx(opri);
+ return(TRUE);
+}
+#endif MACH_KERNEL
+
+/*
+ * ns8390lost_frame:
+ * this routine called by ns8390read after memory for mbufs could not be
+ * allocated. It sets the boundary pointers and registers to the next
+ * packet location.
+ */
+
+ns8390lost_frame(unit)
+int unit;
+{
+ ns8390_softc_t *is = &ns8390_softc[unit];
+ caddr_t nic = is->nic;
+ volatile char *sram_nxtpkt_ptr;
+ int temp_cr;
+
+
+
+ sram_nxtpkt_ptr = (volatile char *) (is->sram +
+ (is->read_nxtpkt_ptr << 8));
+
+ is->read_nxtpkt_ptr = *(sram_nxtpkt_ptr + 1);
+ is->read_nxtpkt_ptr &= 0xFF;
+
+ temp_cr = inb(nic+ds_cmd);
+ outb(nic+ds_cmd, (temp_cr & 0x3f) | DSCM_PG0);
+
+ /* update boundary register */
+ if (is->read_nxtpkt_ptr == ns8390get_CURR(unit))
+ if (is->read_nxtpkt_ptr == is->pstart)
+ outb(nic+ds0_bndy, is->pstop - 1);
+ else
+ outb(nic+ds0_bndy, is->read_nxtpkt_ptr - 1);
+ else
+ outb(nic+ds0_bndy, is->read_nxtpkt_ptr);
+
+ outb(nic+ds_cmd, temp_cr);
+
+ return;
+}
+
+/*
+ * ns8390get_CURR():
+ *
+ * Returns the value of the register CURR, which points to the next
+ * available space for NIC to receive from network unto receive ring.
+ *
+ */
+
+int
+ns8390get_CURR(unit)
+int unit;
+{
+ caddr_t nic = ns8390_softc[unit].nic;
+ int temp_cr;
+ int ret_val;
+ spl_t s;
+
+ s = SPLNET();
+
+ temp_cr = inb(nic+ds_cmd); /* get current CR value */
+ outb(nic+ds_cmd, ((temp_cr & 0x3F) | DSCM_PG1)); /* select page 1 registers */
+ ret_val = inb(nic+ds1_curr); /* read CURR value */
+ outb(nic+ds_cmd, temp_cr);
+ splx(s);
+ return (ret_val & 0xFF);
+}
+
+/*
+ * ns8390xmt:
+ *
+ * This routine fills in the appropriate registers and memory
+ * locations on the ns8390 board and starts the board off on
+ * the transmit.
+ *
+ * input : board number of interest, and a pointer to the mbuf
+ * output : board memory and registers are set for xfer and attention
+ *
+ */
+
+ns8390xmt(unit, m)
+int unit;
+#ifdef MACH_KERNEL
+io_req_t m;
+#else MACH_KERNEL
+struct mbuf *m;
+#endif MACH_KERNEL
+{
+ ns8390_softc_t *is = &ns8390_softc[unit];
+ caddr_t nic = is->nic;
+ struct ether_header *eh;
+ int i;
+ int opri;
+ u_short count = 0; /* amount of data already copied */
+ volatile char *sram_write_pkt;
+ int board_id = is->board_id;
+ vm_offset_t hdwbase = ns8390info[unit]->address;
+ spl_t s;
+
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+ register struct mbuf *tm_p;
+#endif MACH_KERNEL
+ /* begining of physical address of transmition buffer */
+
+ sram_write_pkt = is->sram + is->tpsr * 0x100;
+
+#ifdef MACH_KERNEL
+ count = m->io_count;
+ if (board_id & IFWD_SLOT_16BIT) {
+ s = splhi();
+ en_16bit_access(hdwbase, board_id);
+ bcopy16 (m->io_data, sram_write_pkt, count);
+ dis_16bit_access (hdwbase, board_id);
+ splx(s);
+ } else {
+ bcopy (m->io_data, sram_write_pkt, count);
+ }
+#else MACH_KERNEL
+ for(tm_p = m; tm_p != (struct mbuf *)0; tm_p = tm_p->m_next) {
+ if (count + tm_p->m_len > ETHERMTU + sizeof (struct ether_header))
+ break;
+ if (tm_p->m_len == 0)
+ continue;
+ if (board_id & IFWD_SLOT_16BIT) {
+ s = splhi();
+ en_16bit_access(hdwbase, board_id);
+ bcopy16 (mtod(tm_p, caddr_t),
+ sram_write_pkt + count,
+ tm_p->m_len);
+ dis_16bit_access (hdwbase, board_id);
+ splx(s);
+ } else {
+ bcopy16 (mtod(tm_p, caddr_t),
+ sram_write_pkt + count,
+ tm_p->m_len);
+ }
+ count += tm_p->m_len;
+ }
+#ifdef IF_CNTRS
+ ns_eout[log_2(count+4/*crc*/)]++;
+ if (count < 128) ns_lout[(count+4/*crc*/)>>3]++;
+#endif IF_CNTRS
+#endif MACH_KERNEL
+ while (count < ETHERMIN+sizeof(struct ether_header)) {
+ *(sram_write_pkt + count) = 0;
+ count++;
+ }
+
+ /* select page 0 */
+ outb(nic+ds_cmd, DSCM_NODMA|DSCM_START|DSCM_PG0);
+ outb(nic+ds0_tpsr, is->tpsr); /* xmt page start at 0 of RAM */
+ outb(nic+ds0_tbcr1, count >> 8); /* upper byte of count */
+ outb(nic+ds0_tbcr0, count & 0xFF); /* lower byte of count */
+ /* start transmission */
+ outb(nic+ds_cmd, DSCM_TRANS|DSCM_NODMA|DSCM_START);
+
+#ifdef MACH_KERNEL
+ iodone(m);
+ m=0;
+#else MACH_KERNEL
+ /* If this is a broadcast packet, loop it back to rcv. */
+ eh = mtod( m, struct ether_header *);
+ for (i=0; ((i < 6) && (eh->ether_dhost[i] == 0xff)); i++) ;
+ if (i == 6) {
+ if (!ns8390send_packet_up(m, eh, is))
+ m_freem(m);
+ } else
+ m_freem(m);
+#endif MACH_KERNEL
+ return;
+}
+
+config_nic(unit)
+int unit;
+{
+ ns8390_softc_t *is = &ns8390_softc[unit];
+ caddr_t nic = is->nic;
+ int i;
+ int temp;
+ int count = 0;
+ spl_t s;
+
+ /* soft reset and page 0 */
+ outb (nic+ds_cmd, DSCM_PG0|DSCM_NODMA|DSCM_STOP);
+
+ while ((!(inb (nic + ds0_isr) & DSIS_RESET)) && (count < 10000))
+ count++;
+ if (count == 10000) {
+ printf("%s%d: config_nic(): would not reset.\n",
+ ns8390_softc[unit].card, unit);
+ }
+
+ /* fifo depth | not loopback */
+ temp = ((is->fifo_depth & 0x0c) << 3) | DSDC_BMS;
+
+ /* word xfer select (16 bit cards ) */
+ if (is->board_id & IFWD_SLOT_16BIT)
+ temp |= DSDC_WTS;
+
+ outb (nic+ds0_dcr, temp);
+ outb (nic+ds0_tcr, 0);
+ outb (nic+ds0_rcr, DSRC_MON); /* receive configuration register */
+ /* recieve ring starts 2k into RAM */
+ outb (nic+ds0_pstart, is->pstart);
+ /* stop at last RAM buffer rcv location */
+ outb (nic+ds0_pstop, is->pstop);
+
+ /* boundary pointer for page 0 */
+ outb (nic+ds0_bndy, is->pstart);
+ s = SPLNET();
+
+ /* maintain rst | sel page 1 */
+ outb (nic+ds_cmd, DSCM_PG1|DSCM_NODMA|DSCM_STOP);
+
+ /* internal next packet pointer */
+ is->read_nxtpkt_ptr = is->pstart + 1;
+
+ outb (nic+ds1_curr, is->read_nxtpkt_ptr); /* Current page register */
+ for(i=0; i<ETHER_ADDR_SIZE; i++)
+ outb (nic+ds1_par0+i, is->address[i]);
+ for(i=0; i<8; i++)
+ outb (nic+ds1_mar0+i, 0);
+
+ outb (nic+ds_cmd, DSCM_PG0|DSCM_STOP|DSCM_NODMA);
+ splx(s);
+ outb (nic+ds0_isr, 0xff); /* clear all interrupt status bits */
+ outb (nic+ds0_imr, imr_hold); /* Enable interrupts */
+ outb (nic+ds0_rbcr0, 0); /* clear remote byte count */
+ outb (nic+ds0_rbcr1, 0);
+
+ /* start NIC | select page 0 */
+ outb (nic+ds_cmd, DSCM_PG0|DSCM_START|DSCM_NODMA);
+
+ outb (nic+ds0_rcr, DSRC_AB); /* receive configuration register */
+
+ return TRUE;
+}
+
+/*
+ * config_ns8390:
+ *
+ * This routine does a standard config of a wd8003 family board, with
+ * the proper modifications to different boards within this family.
+ *
+ */
+config_wd8003(unit)
+int unit;
+{
+ ns8390_softc_t *is = &ns8390_softc[unit];
+ vm_offset_t hdwbase = ns8390info[unit]->address;
+ int i;
+ int RAMsize;
+ volatile char *RAMbase;
+ int addr_temp;
+
+ is->tpsr = 0; /* transmit page start hold */
+ is->pstart = 0x06; /* receive page start hold */
+ is->read_nxtpkt_ptr = is->pstart + 1; /* internal next packet pointer */
+ is->fifo_depth = 0x08; /* NIC fifo threshold */
+ switch (is->board_id & IFWD_RAM_SIZE_MASK) {
+ case IFWD_RAM_SIZE_8K:
+ RAMsize = 0x2000; break;
+ case IFWD_RAM_SIZE_16K:
+ RAMsize = 0x4000; break;
+ case IFWD_RAM_SIZE_32K:
+ RAMsize = 0x8000; break;
+ case IFWD_RAM_SIZE_64K:
+ RAMsize = 0x10000; break;
+ default:
+ RAMsize = 0x2000; break;
+ }
+ is->pstop = (((int)RAMsize >> 8) & 0x0ff); /* rcv page stop hold */
+ RAMbase = (volatile char *)ns8390info[unit]->phys_address;
+ addr_temp = ((int)(RAMbase) >> 13) & 0x3f; /* convert to be written to MSR */
+ outb(hdwbase+IFWD_MSR, addr_temp | IFWD_MENB); /* initialize MSR */
+ /* enable 16 bit access from lan controller */
+ if (is->board_id & IFWD_SLOT_16BIT) {
+ if (is->board_id & IFWD_INTERFACE_CHIP) {
+ outb(hdwbase+IFWD_REG_5,
+ (inb(hdwbase + IFWD_REG_5) & IFWD_REG5_MEM_MASK) |
+ IFWD_LAN16ENB);
+ } else {
+ outb(hdwbase+IFWD_REG_5, (IFWD_LAN16ENB | IFWD_LA19));
+ }
+ }
+ /*
+ outb(hdwbase+LAAR, LAN16ENB | LA19| MEM16ENB | SOFTINT);
+ */
+
+ return TRUE;
+}
+
+/*
+ * config_ns8390:
+ *
+ * This routine does a standard config of a 3 com etherlink II board.
+ *
+ */
+int
+config_3c503(unit)
+int unit;
+{
+ ns8390_softc_t *is = &ns8390_softc[unit];
+ struct bus_device *dev = ns8390info[unit];
+ vm_offset_t hdwbase = dev->address;
+ int RAMsize = dev->am;
+ int i;
+
+ is->tpsr = 0x20; /* transmit page start hold */
+ is->sram = (char *)phystokv(dev->phys_address) - is->tpsr * 0x100;
+ /* When NIC says page 20, this means go to
+ the beginning of the sram range */
+ is->pstart = 0x26; /* receive page start hold */
+ is->read_nxtpkt_ptr = is->pstart + 1; /* internal next packet pointer */
+ is->fifo_depth = 0x08; /* NIC fifo threshold */
+ is->pstop = is->tpsr + ((RAMsize >> 8) & 0x0ff); /* rcv page stop hold */
+
+ outb(hdwbase+CTLR, CTLR_RST|CTLR_THIN);
+ outb(hdwbase+CTLR, CTLR_THIN);
+ outb(hdwbase+CTLR, CTLR_STA_ADDR|CTLR_THIN);
+ for (i = 0; i < 6; i++)
+ is->address[i] = inb(hdwbase+i);
+ outb(hdwbase+CTLR, elii_bnc[is->board_id]?CTLR_THIN:CTLR_THICK);
+ outb(hdwbase+PSTR, is->pstart);
+ outb(hdwbase+PSPR, is->pstop);
+ outb(hdwbase+IDCFR, IDCFR_IRQ2 << (elii_irq[is->board_id] - 2));
+ outb(hdwbase+GACFR, GACFR_TCM|GACFR_8K);
+ /* BCFR & PCRFR ro */
+ /* STREG ro & dma */
+ outb(hdwbase+DQTR, 0);
+ outb(hdwbase+DAMSB, 0);
+ outb(hdwbase+DALSB, 0);
+ outb(hdwbase+VPTR2, 0);
+ outb(hdwbase+VPTR1, 0);
+ outb(hdwbase+VPTR0, 0);
+ outb(hdwbase+RFMSB, 0);
+ outb(hdwbase+RFLSB, 0);
+ return TRUE;
+}
+
+/*
+ * ns8390intoff:
+ *
+ * This function turns interrupts off for the ns8390 board indicated.
+ *
+ */
+void
+ns8390intoff(unit)
+int unit;
+{
+ caddr_t nic = ns8390_softc[unit].nic;
+ int temp_cr = inb(nic+ds_cmd); /* get current CR value */
+
+ outb(nic+ds_cmd,((temp_cr & 0x3F)|DSCM_PG0|DSCM_STOP));
+ outb(nic+ds0_imr, 0); /* Interrupt Mask Register */
+ outb(nic+ds_cmd, temp_cr|DSCM_STOP);
+
+}
+
+
+/*
+ * wd80xxget_board_id:
+ *
+ * determine which board is being used.
+ * Currently supports:
+ * wd8003E (tested)
+ * wd8003EBT
+ * wd8003EP (tested)
+ * wd8013EP (tested)
+ *
+ */
+wd80xxget_board_id(dev)
+struct bus_device *dev;
+{
+ vm_offset_t hdwbase = dev->address;
+ long unit = dev->unit;
+ long board_id = 0;
+ int reg_temp;
+ int rev_num; /* revision number */
+ int ram_flag;
+ int intr_temp;
+ int i;
+ boolean_t register_aliasing;
+
+ rev_num = (inb(hdwbase + IFWD_BOARD_ID) & IFWD_BOARD_REV_MASK) >> 1;
+ printf("%s%d: ", ns8390_softc[unit].card, unit);
+
+ if (rev_num == 0) {
+ printf("rev 0x00\n");
+ /* It must be 8000 board */
+ return 0;
+ }
+
+ /* Check if register aliasing is true, that is reading from register
+ offsets 0-7 will return the contents of register offsets 8-f */
+
+ register_aliasing = TRUE;
+ for (i = 1; i < 5; i++) {
+ if (inb(hdwbase + IFWD_REG_0 + i) !=
+ inb(hdwbase + IFWD_LAR_0 + i))
+ register_aliasing = FALSE;
+ }
+ if (inb(hdwbase + IFWD_REG_7) != inb(hdwbase + IFWD_CHKSUM))
+ register_aliasing = FALSE;
+
+
+ if (register_aliasing == FALSE) {
+ /* Check if board has interface chip */
+
+ reg_temp = inb(hdwbase + IFWD_REG_7); /* save old */
+ outb(hdwbase + IFWD_REG_7, 0x35); /* write value */
+ inb(hdwbase + IFWD_REG_0); /* dummy read */
+ if ((inb(hdwbase + IFWD_REG_7) & 0xff) == 0x35) {
+ outb(hdwbase + IFWD_REG_7, 0x3a);/* Try another value*/
+ inb(hdwbase + IFWD_REG_0); /* dummy read */
+ if ((inb(hdwbase + IFWD_REG_7) & 0xff) == 0x3a) {
+ board_id |= IFWD_INTERFACE_CHIP;
+ outb(hdwbase + IFWD_REG_7, reg_temp);
+ /* restore old value */
+ }
+ }
+
+ /* Check if board is 16 bit by testing if bit zero in
+ register 1 is unchangeable by software. If so then
+ card has 16 bit capability */
+ reg_temp = inb(hdwbase + IFWD_REG_1);
+ outb(hdwbase + IFWD_REG_1, reg_temp ^ IFWD_16BIT);
+ inb(hdwbase + IFWD_REG_0); /* dummy read */
+ if ((inb(hdwbase + IFWD_REG_1) & IFWD_16BIT) ==
+ (reg_temp & IFWD_16BIT)) { /* Is bit unchanged */
+ board_id |= IFWD_BOARD_16BIT; /* Yes == 16 bit */
+ reg_temp &= 0xfe; /* For 16 bit board
+ always reset bit 0 */
+ }
+ outb(hdwbase + IFWD_REG_1, reg_temp); /* write value back */
+
+ /* Test if 16 bit card is in 16 bit slot by reading bit zero in
+ register 1. */
+ if (board_id & IFWD_BOARD_16BIT) {
+ if (inb(hdwbase + IFWD_REG_1) & IFWD_16BIT) {
+ board_id |= IFWD_SLOT_16BIT;
+ }
+ }
+ }
+
+ /* Get media type */
+
+ if (inb(hdwbase + IFWD_BOARD_ID) & IFWD_MEDIA_TYPE) {
+ board_id |= IFWD_ETHERNET_MEDIA;
+ } else if (rev_num == 1) {
+ board_id |= IFWD_STARLAN_MEDIA;
+ } else {
+ board_id |= IFWD_TWISTED_PAIR_MEDIA;
+ }
+
+ if (rev_num == 2) {
+ if (inb(hdwbase + IFWD_BOARD_ID) & IFWD_SOFT_CONFIG) {
+ if ((board_id & IFWD_STATIC_ID_MASK) == WD8003EB ||
+ (board_id & IFWD_STATIC_ID_MASK) == WD8003W) {
+ board_id |= IFWD_ALTERNATE_IRQ_BIT;
+ }
+ }
+ /* Check for memory size */
+
+ ram_flag = inb(hdwbase + IFWD_BOARD_ID) & IFWD_MEMSIZE;
+
+ switch (board_id & IFWD_STATIC_ID_MASK) {
+ case WD8003E: /* same as WD8003EBT */
+ case WD8003S: /* same as WD8003SH */
+ case WD8003WT:
+ case WD8003W:
+ case WD8003EB: /* same as WD8003EP */
+ if (ram_flag)
+ board_id |= IFWD_RAM_SIZE_32K;
+ else
+ board_id |= IFWD_RAM_SIZE_8K;
+ break;
+ case WD8003ETA:
+ case WD8003STA:
+ case WD8003EA:
+ case WD8003SHA:
+ case WD8003WA:
+ board_id |= IFWD_RAM_SIZE_16K;
+ break;
+ case WD8013EBT:
+ if (board_id & IFWD_SLOT_16BIT) {
+ if (ram_flag)
+ board_id |= IFWD_RAM_SIZE_64K;
+ else
+ board_id |= IFWD_RAM_SIZE_16K;
+ } else {
+ if (ram_flag)
+ board_id |= IFWD_RAM_SIZE_32K;
+ else
+ board_id |= IFWD_RAM_SIZE_8K;
+ }
+ break;
+ default:
+ board_id |= IFWD_RAM_SIZE_UNKNOWN;
+ break;
+ }
+ } else if (rev_num >= 3) {
+ board_id &= (long) ~IFWD_MEDIA_MASK; /* remove media info */
+ board_id |= IFWD_INTERFACE_584_CHIP;
+ board_id |= wd80xxget_eeprom_info(hdwbase, board_id);
+ } else {
+ /* Check for memory size */
+ if (board_id & IFWD_BOARD_16BIT) {
+ if (board_id & IFWD_SLOT_16BIT)
+ board_id |= IFWD_RAM_SIZE_16K;
+ else
+ board_id |= IFWD_RAM_SIZE_8K;
+ } else if (board_id & IFWD_MICROCHANNEL)
+ board_id |= IFWD_RAM_SIZE_16K;
+ else if (board_id & IFWD_INTERFACE_CHIP) {
+ if (inb(hdwbase + IFWD_REG_1) & IFWD_MEMSIZE)
+ board_id |= IFWD_RAM_SIZE_32K;
+ else
+ board_id |= IFWD_RAM_SIZE_8K;
+ } else
+ board_id |= IFWD_RAM_SIZE_UNKNOWN;
+
+ /* No support for 690 chip yet. It should be checked here */
+ }
+
+ switch (board_id & IFWD_STATIC_ID_MASK) {
+ case WD8003E: printf("WD8003E or WD8003EBT"); break;
+ case WD8003S: printf("WD8003S or WD8003SH"); break;
+ case WD8003WT: printf("WD8003WT"); break;
+ case WD8003W: printf("WD8003W"); break;
+ case WD8003EB:
+ if (board_id & IFWD_INTERFACE_584_CHIP)
+ printf("WD8003EP");
+ else
+ printf("WD8003EB");
+ break;
+ case WD8003EW: printf("WD8003EW"); break;
+ case WD8003ETA: printf("WD8003ETA"); break;
+ case WD8003STA: printf("WD8003STA"); break;
+ case WD8003EA: printf("WD8003EA"); break;
+ case WD8003SHA: printf("WD8003SHA"); break;
+ case WD8003WA: printf("WD8003WA"); break;
+ case WD8013EBT: printf("WD8013EBT"); break;
+ case WD8013EB:
+ if (board_id & IFWD_INTERFACE_584_CHIP)
+ printf("WD8013EP");
+ else
+ printf("WD8013EB");
+ break;
+ case WD8013W: printf("WD8013W"); break;
+ case WD8013EW: printf("WD8013EW"); break;
+ default: printf("unknown"); break;
+ }
+ printf(" rev 0x%02x", rev_num);
+ switch(board_id & IFWD_RAM_SIZE_RES_7) {
+ case IFWD_RAM_SIZE_UNKNOWN:
+ break;
+ case IFWD_RAM_SIZE_8K:
+ printf(" 8 kB ram");
+ break;
+ case IFWD_RAM_SIZE_16K:
+ printf(" 16 kB ram");
+ break;
+ case IFWD_RAM_SIZE_32K:
+ printf(" 32 kB ram");
+ break;
+ case IFWD_RAM_SIZE_64K:
+ printf(" 64 kB ram");
+ break;
+ default:
+ printf("wd: Internal error ram size value invalid %d\n",
+ (board_id & IFWD_RAM_SIZE_RES_7)>>16);
+ }
+
+ if (board_id & IFWD_BOARD_16BIT) {
+ if (board_id & IFWD_SLOT_16BIT) {
+ printf(", in 16 bit slot");
+ } else {
+ printf(", 16 bit board in 8 bit slot");
+ }
+ }
+ if (board_id & IFWD_INTERFACE_CHIP) {
+ if (board_id & IFWD_INTERFACE_584_CHIP) {
+ printf(", 584 chip");
+ } else {
+ printf(", 583 chip");
+ }
+ }
+ if ((board_id & IFWD_INTERFACE_CHIP) == IFWD_INTERFACE_CHIP) {
+ /* program the WD83C583 EEPROM registers */
+ int irr_temp, icr_temp;
+
+ icr_temp = inb(hdwbase + IFWD_ICR);
+ irr_temp = inb(hdwbase + IFWD_IRR);
+
+ irr_temp &= ~(IFWD_IR0 | IFWD_IR1);
+ irr_temp |= IFWD_IEN;
+
+ icr_temp &= IFWD_WTS;
+
+ if (!(board_id & IFWD_INTERFACE_584_CHIP)) {
+ icr_temp |= IFWD_DMAE | IFWD_IOPE;
+ if (ram_flag)
+ icr_temp |= IFWD_MSZ;
+ }
+
+ if (board_id & IFWD_INTERFACE_584_CHIP) {
+ switch(ns8390info[unit]->sysdep1) {
+ case 10:
+ icr_temp |= IFWD_DMAE;
+ break;
+ case 2:
+ case 9: /* Same as 2 */
+ break;
+ case 11:
+ icr_temp |= IFWD_DMAE;
+ /*FALLTHROUGH*/
+ case 3:
+ irr_temp |= IFWD_IR0;
+ break;
+ case 15:
+ icr_temp |= IFWD_DMAE;
+ /*FALLTHROUGH*/
+ case 5:
+ irr_temp |= IFWD_IR1;
+ break;
+ case 4:
+ icr_temp |= IFWD_DMAE;
+ /*FALLTHROUGH*/
+ case 7:
+ irr_temp |= IFWD_IR0 | IFWD_IR1;
+ break;
+ default:
+ printf("%s%d: wd80xx_get_board_id(): Could not set Interrupt Request Register according to pic(%d).\n",
+ ns8390_softc[unit].card, unit,
+ ns8390info[unit]->sysdep1);
+ break;
+ }
+ } else {
+ switch(ns8390info[unit]->sysdep1) {
+ /* attempt to set interrupt according to assigned pic */
+ case 2:
+ case 9: /* Same as 2 */
+ break;
+ case 3:
+ irr_temp |= IFWD_IR0;
+ break;
+ case 4:
+ irr_temp |= IFWD_IR1;
+ break;
+ case 5:
+ irr_temp |= IFWD_IR1 | IFWD_AINT;
+ break;
+ case 7:
+ irr_temp |= IFWD_IR0 | IFWD_IR1;
+ break;
+ default:
+ printf("%s%d: wd80xx_get_board_id(): Could not set Interrupt Request Register according to pic(%d).\n",
+ ns8390_softc[unit].card, unit,
+ ns8390info[unit]->sysdep1);
+ }
+ }
+ outb(hdwbase + IFWD_IRR, irr_temp);
+ outb(hdwbase + IFWD_ICR, icr_temp);
+ }
+ printf("\n");
+ return (board_id);
+}
+
+wd80xxget_eeprom_info(hdwbase, board_id)
+ caddr_t hdwbase;
+ long board_id;
+{
+ unsigned long new_bits = 0;
+ int reg_temp;
+
+ outb(hdwbase + IFWD_REG_1,
+ ((inb(hdwbase + IFWD_REG_1) & IFWD_ICR_MASK) | IFWD_OTHER_BIT));
+ outb(hdwbase + IFWD_REG_3,
+ ((inb(hdwbase + IFWD_REG_3) & IFWD_EAR_MASK) | IFWD_ENGR_PAGE));
+ outb(hdwbase + IFWD_REG_1,
+ ((inb(hdwbase + IFWD_REG_1) & IFWD_ICR_MASK) |
+ (IFWD_RLA | IFWD_OTHER_BIT)));
+ while (inb(hdwbase + IFWD_REG_1) & IFWD_RECALL_DONE_MASK)
+ ;
+
+ reg_temp = inb(hdwbase + IFWD_EEPROM_1);
+ switch (reg_temp & IFWD_EEPROM_BUS_TYPE_MASK) {
+ case IFWD_EEPROM_BUS_TYPE_AT:
+ if (wd_debug & 1) printf("wd: AT bus, ");
+ break;
+ case IFWD_EEPROM_BUS_TYPE_MCA:
+ if (wd_debug & 1) printf("wd: MICROCHANNEL, ");
+ new_bits |= IFWD_MICROCHANNEL;
+ break;
+ default:
+ break;
+ }
+ switch (reg_temp & IFWD_EEPROM_BUS_SIZE_MASK) {
+ case IFWD_EEPROM_BUS_SIZE_8BIT:
+ if (wd_debug & 1) printf("8 bit bus size, ");
+ break;
+ case IFWD_EEPROM_BUS_SIZE_16BIT:
+ if (wd_debug & 1) printf("16 bit bus size ");
+ new_bits |= IFWD_BOARD_16BIT;
+ if (inb(hdwbase + IFWD_REG_1) & IFWD_16BIT) {
+ new_bits |= IFWD_SLOT_16BIT;
+ if (wd_debug & 1)
+ printf("in 16 bit slot, ");
+ } else {
+ if (wd_debug & 1)
+ printf("in 8 bit slot (why?), ");
+ }
+ break;
+ default:
+ if (wd_debug & 1) printf("bus size other than 8 or 16 bit, ");
+ break;
+ }
+ reg_temp = inb(hdwbase + IFWD_EEPROM_0);
+ switch (reg_temp & IFWD_EEPROM_MEDIA_MASK) {
+ case IFWD_STARLAN_TYPE:
+ if (wd_debug & 1) printf("Starlan media, ");
+ new_bits |= IFWD_STARLAN_MEDIA;
+ break;
+ case IFWD_TP_TYPE:
+ if (wd_debug & 1) printf("Twisted pair media, ");
+ new_bits |= IFWD_TWISTED_PAIR_MEDIA;
+ break;
+ case IFWD_EW_TYPE:
+ if (wd_debug & 1) printf("Ethernet and twisted pair media, ");
+ new_bits |= IFWD_EW_MEDIA;
+ break;
+ case IFWD_ETHERNET_TYPE: /*FALLTHROUGH*/
+ default:
+ if (wd_debug & 1) printf("ethernet media, ");
+ new_bits |= IFWD_ETHERNET_MEDIA;
+ break;
+ }
+ switch (reg_temp & IFWD_EEPROM_IRQ_MASK) {
+ case IFWD_ALTERNATE_IRQ_1:
+ if (wd_debug & 1) printf("Alternate irq 1\n");
+ new_bits |= IFWD_ALTERNATE_IRQ_BIT;
+ break;
+ default:
+ if (wd_debug & 1) printf("\n");
+ break;
+ }
+ switch (reg_temp & IFWD_EEPROM_RAM_SIZE_MASK) {
+ case IFWD_EEPROM_RAM_SIZE_8K:
+ new_bits |= IFWD_RAM_SIZE_8K;
+ break;
+ case IFWD_EEPROM_RAM_SIZE_16K:
+ if ((new_bits & IFWD_BOARD_16BIT) && (new_bits & IFWD_SLOT_16BIT))
+ new_bits |= IFWD_RAM_SIZE_16K;
+ else
+ new_bits |= IFWD_RAM_SIZE_8K;
+ break;
+ case IFWD_EEPROM_RAM_SIZE_32K:
+ new_bits |= IFWD_RAM_SIZE_32K;
+ break;
+ case IFWD_EEPROM_RAM_SIZE_64K:
+ if ((new_bits & IFWD_BOARD_16BIT) && (new_bits & IFWD_SLOT_16BIT))
+ new_bits |= IFWD_RAM_SIZE_64K;
+ else
+ new_bits |= IFWD_RAM_SIZE_32K;
+ break;
+ default:
+ new_bits |= IFWD_RAM_SIZE_UNKNOWN;
+ break;
+ }
+ outb(hdwbase + IFWD_REG_1,
+ ((inb(hdwbase + IFWD_REG_1) & IFWD_ICR_MASK) | IFWD_OTHER_BIT));
+ outb(hdwbase + IFWD_REG_3,
+ ((inb(hdwbase + IFWD_REG_3) & IFWD_EAR_MASK) | IFWD_EA6));
+ outb(hdwbase + IFWD_REG_1,
+ ((inb(hdwbase + IFWD_REG_1) & IFWD_ICR_MASK) | IFWD_RLA));
+ return (new_bits);
+}
+
+wdpr(unit)
+{
+ caddr_t nic = ns8390_softc[unit].nic;
+ spl_t s;
+ int temp_cr;
+
+ s = SPLNET();
+ temp_cr = inb(nic+ds_cmd); /* get current CR value */
+
+ printf("CR %x, BNDRY %x, TSR %x, NCR %x, FIFO %x, ISR %x, RSR %x\n",
+ inb(nic+0x0), inb(nic+0x3), inb(nic+0x4), inb(nic+0x5),
+ inb(nic+0x6), inb(nic+0x7), inb(nic+0xc));
+ printf("CLD %x:%x, CRD %x:%x, FR %x, CRC %x, Miss %x\n",
+ inb(nic+0x1), inb(nic+0x2),
+ inb(nic+0x8), inb(nic+0x9),
+ inb(nic+0xd), inb(nic+0xe), inb(nic+0xf));
+
+
+ outb(nic, (temp_cr&0x3f)|DSCM_PG1); /* page 1 CR value */
+ printf("PHYS %x:%x:%x:%x:%x CUR %x\n",
+ inb(nic+0x1), inb(nic+0x2), inb(nic+0x3),
+ inb(nic+0x4), inb(nic+0x5), inb(nic+0x6),
+ inb(nic+0x7));
+ printf("MAR %x:%x:%x:%x:%x:%x:%x:%x\n",
+ inb(nic+0x8), inb(nic+0x9), inb(nic+0xa), inb(nic+0xb),
+ inb(nic+0xc), inb(nic+0xd), inb(nic+0xe), inb(nic+0xf));
+ outb(nic, temp_cr); /* restore current CR value */
+ splx(s);
+}
+
+
+/*
+ This sets bit 7 (0 justified) of register offset 0x05. It will enable
+ the host to access shared RAM 16 bits at a time. It will also maintain
+ the LAN16BIT bit high in addition, this routine maintains address bit 19
+ (previous cards assumed this bit high...we must do it manually)
+
+ note 1: this is a write only register
+ note 2: this routine should be called only after interrupts are disabled
+ and they should remain disabled until after the routine 'dis_16bit_access'
+ is called
+*/
+
+en_16bit_access (hdwbase, board_id)
+ caddr_t hdwbase;
+ long board_id;
+{
+ if (board_id & IFWD_INTERFACE_CHIP)
+ outb(hdwbase+IFWD_REG_5,
+ (inb(hdwbase+IFWD_REG_5) & IFWD_REG5_MEM_MASK)
+ | IFWD_MEM16ENB | IFWD_LAN16ENB);
+ else
+ outb(hdwbase+IFWD_REG_5, (IFWD_MEM16ENB | IFWD_LAN16ENB |
+ IFWD_LA19));
+}
+
+/*
+ This resets bit 7 (0 justified) of register offset 0x05. It will disable
+ the host from accessing shared RAM 16 bits at a time. It will maintain the
+ LAN16BIT bit high in addition, this routine maintains address bit 19
+ (previous cards assumed this bit high...we must do it manually)
+
+ note: this is a write only register
+*/
+
+dis_16bit_access (hdwbase, board_id)
+ caddr_t hdwbase;
+ long board_id;
+{
+ if (board_id & IFWD_INTERFACE_CHIP)
+ outb(hdwbase+IFWD_REG_5,
+ ((inb(hdwbase+IFWD_REG_5) & IFWD_REG5_MEM_MASK) |
+ IFWD_LAN16ENB));
+ else
+ outb(hdwbase+IFWD_REG_5, (IFWD_LAN16ENB | IFWD_LA19));
+}
+
+#endif
diff --git a/i386/i386at/if_ns8390.h b/i386/i386at/if_ns8390.h
new file mode 100644
index 00000000..9466c364
--- /dev/null
+++ b/i386/i386at/if_ns8390.h
@@ -0,0 +1,203 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Western Digital Mach Ethernet driver
+ * Copyright (c) 1990 OSF Research Institute
+ */
+/*
+ Copyright 1990 by Open Software Foundation,
+Cambridge, MA.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby granted,
+provided that the above copyright notice appears in all copies and
+that both the copyright notice and this permission notice appear in
+supporting documentation, and that the name of OSF or Open Software
+Foundation not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+ OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/********************************************/
+/* Defines for the NIC 8390 Lan Controller */
+/********************************************/
+
+
+/*-- page 0, rd --*/
+#define CR 0x00 /* Command Register */
+#define CLDA0 0x01 /* Current Local DMA Address 0 */
+#define CLDA1 0x02 /* Current Local DMA Address 1 */
+#define BNRY 0x03 /* Boundary Pointer */
+#define TSR 0x04 /* Transmit Status Register */
+#define NCR 0x05 /* Number of Collisions Register */
+#define FIFO 0x06 /* FIFO */
+#define ISR 0x07 /* Interrupt Status Register */
+#define CRDA0 0x08 /* Current Remote DMA Address 0 */
+#define CRDA1 0x09 /* Current Remote DMA Address 1 */
+/* 0x0A is reserved */
+/* 0x0B is reserved */
+#define RSR 0x0C /* Receive Status Register */
+#define CNTR0 0x0D /* Frame Alignment Errors */
+#define CNTR1 0x0E /* CRC Errors */
+#define CNTR2 0x0F /* Missed Packet Errors */
+
+/*-- page 0, wr --*/
+/* CR 0x00 Command Register */
+#define PSTART 0x01 /* Page Start Register */
+#define PSTOP 0x02 /* Page Stop Register */
+#define BNDY 0x03 /* Boundary Pointer */
+#define TPSR 0x04 /* Transmit Page Start Register */
+#define TBCR0 0x05 /* Transmit Byte Count Register 0*/
+#define TBCR1 0x06 /* Transmit Byte Count Register 1*/
+/* ISR 0x07 Interrupt Status Register */
+#define RSAR0 0x08 /* Remote Start Address Register 0 */
+#define RSAR1 0x09 /* Remote Start Address Register 1 */
+#define RBCR0 0x0A /* Remote Byte Count Register 0 */
+#define RBCR1 0x0B /* Remote Byte Count Register 1 */
+#define RCR 0x0C /* Receive Configuration Register */
+#define TCR 0x0D /* Transmit Configuration Register */
+#define DCR 0x0E /* Data Configuration Register */
+#define IMR 0x0F /* Interrupt Mask Register */
+
+/*-- page 1, rd and wr */
+/* CR 0x00 Control Register */
+#define PAR0 0x01 /* Physical Address Register 0 */
+#define PAR1 0x02 /* 1 */
+#define PAR2 0x03 /* 2 */
+#define PAR3 0x04 /* 3 */
+#define PAR4 0x05 /* 4 */
+#define PAR5 0x06 /* 5 */
+#define CURR 0x07 /* Current Page Register */
+#define MAR0 0x08 /* Multicast Address Register 0 */
+#define MAR1 0x09 /* 1 */
+#define MAR2 0x0A /* 2 */
+#define MAR3 0x0B /* 3 */
+#define MAR4 0x0C /* 4 */
+#define MAR5 0x0D /* 5 */
+#define MAR6 0x0E /* 6 */
+#define MAR7 0x0F /* 7 */
+
+/*-- page 2, rd --*/
+
+/*-- page 2, wr --*/
+
+/*-- Command Register CR description */
+#define STP 0x01 /* stop; software reset */
+#define STA 0x02 /* start */
+#define TXP 0x04 /* transmit packet */
+#define RD0 0x08
+#define RD1 0x10
+#define RD2 0x20
+#define RRD 0x08 /* remote DMA command - remote read */
+
+#define RWR 0x10 /* remote DMA command - remote write */
+#define SPK 0x18 /* remote DMA command - send packet */
+#define ABR 0x20 /* remote DMA command - abrt/cmplt remote DMA */
+
+#define PS0 0x00 /* register page select - 0 */
+#define PS1 0x40 /* register page select - 1 */
+#define PS2 0x80 /* register page select - 2 */
+
+#define PS0_STA 0x22 /* page select 0 with start bit maintained */
+#define PS1_STA 0x62 /* page select 1 with start bit maintained */
+#define PS2_STA 0x0A2 /* page select 2 with start bit maintained */
+
+/*-- Interrupt Status Register ISR description */
+#define PRX 0x01 /* packet received no error */
+#define PTX 0x02 /* packet transmitted no error */
+#define RXE 0x04 /* receive error */
+#define TXE 0x08 /* transmit error */
+#define OVW 0x10 /* overwrite warning */
+#define CNT 0x20 /* counter overflow */
+#define RDC 0x40 /* remote DMA complete */
+#define RST 0x80 /* reset status */
+
+/*-- Interrupt Mask Register IMR description */
+#define PRXE 0x01 /* packet received interrupt enable */
+#define PTXE 0x02 /* packet transmitted interrupt enable */
+#define RXEE 0x04 /* receive error interrupt enable */
+#define TXEE 0x08 /* transmit error interrupt enable */
+#define OVWE 0x10 /* overwrite warning interrupt enable */
+#define CNTE 0x20 /* counter overflow interrupt enable */
+#define RDCE 0x40 /* DMA complete interrupt enable */
+
+/*-- Data Configuration Register DCR description */
+#define WTS 0x01 /* word transfer select */
+#define BOS 0x02 /* byte order select */
+#define LAS 0x04 /* long address select */
+#define BMS 0x08 /* burst DMA select */
+#define AINIT 0x10 /* autoinitialize remote */
+
+#define FTB2 0x00 /* receive FIFO threshold select - 2 bytes */
+#define FTB4 0x20 /* receive FIFO threshold select - 4 bytes */
+#define FTB8 0x40 /* receive FIFO threshold select - 8 bytes */
+#define FTB12 0x60 /* receive FIFO threshold select - 12 bytes */
+
+/*-- Transmit Configuration Register TCR description */
+#define MCRC 0x01 /* manual crc generation */
+#define LB1 0x02 /* mode 1; internal loopback LPBK=0 */
+#define LB2 0x04 /* mode 2; internal loopback LPBK=1 */
+#define LB3 0x06 /* mode 3; internal loopback LPBK=0 */
+
+#define ATD 0x08 /* auto transmit disable */
+#define OFST 0x10 /* collision offset enable */
+
+/*-- Transmit Status Register TSR description --*/
+#define XMT 0x01 /* packet transmitted without error */
+#define COL 0x04 /* transmit collided */
+#define ABT 0x08 /* transmit aborted */
+#define CRS 0x10 /* carrier sense lost - xmit not aborted */
+#define FU 0x20 /* FIFO underrun */
+#define CDH 0x40 /* CD heartbeat */
+#define OWC 0x80 /* out of window collision - xmit not aborted */
+
+/*-- Receive Configuration Register RCR description --*/
+#define SEP 0x01 /* save error packets */
+#define AR 0x02 /* accept runt packet */
+#define AB 0x04 /* accept broadcast */
+#define AM 0x08 /* accept multicast */
+#define PRO 0x10 /* promiscuous physical */
+#define MON 0x20 /* monitor mode */
+
+/*--Receive Status Register RSR description --*/
+#define RCV 0x01 /* packet received intact */
+#define CRC 0x02 /* CRC error */
+#define FAE 0x04 /* frame alignment error */
+#define FO 0x08 /* FIFO overrun */
+#define MPA 0x10 /* missed packet */
+#define PHY 0x20 /* physical/multicast address */
+#define DIS 0x40 /* receiver disable */
+#define DFR 0x80 /* deferring */
diff --git a/i386/i386at/if_par.c b/i386/i386at/if_par.c
new file mode 100644
index 00000000..3995fadc
--- /dev/null
+++ b/i386/i386at/if_par.c
@@ -0,0 +1,456 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Parallel port network driver v1.1
+ * All rights reserved.
+ */
+
+/*
+ Subject: parallel network interface
+
+ The printer network driver has the following hardware requirements for the
+ interconnection cable:
+
+ Connections:
+ Side1 Side2 Function Side1 / Side2
+ Pin 5 Pin 10 Interrupt strobe: send status (w)/send status (r)
+ Pin 2 Pin 15 Data bits : write / read
+ Pin 3 Pin 13 Data bits : write / read
+ Pin 4 Pin 12 Data bits : write / read
+ Pin 6 Pin 11 Data bits : write / read
+ Pin 10 Pin 5
+ Pin 11 Pin 6
+ Pin 12 Pin 4
+ Pin 13 Pin 3
+ Pin 15 Pin 2
+ Pins 18-25 Pins 18-25 (ground interconnections)
+
+ The cable is "symmetric" in that either side can be plugged into either of the
+ computers.
+
+ The hardware requirements are as follows:
+ Port 0x378 must be writable with the following specifications:
+ Bit 4 -> pin 6
+ Bit 3 -> pin 5
+ Bit 2 -> pin 4
+ Bit 1 -> pin 3
+ Bit 0 -> pin 2
+ Port 0x379 must be readable with the following specifications:
+ Bit 7 <- pin 11
+ Bit 6 <- pin 10
+ Bit 5 <- pin 12
+ Bit 4 <- pin 13
+ Bit 3 <- pin 15
+ Port 0x37a must be readable and writable with the following specifications:
+ Bit 4 -> interrupt enable
+ So Port 0x378 connects to Port 0x379 as
+ Bit 3 -> pin 5 : pin 10 -> Bit 6 0x08 -> 0x40
+
+ Bit 4 -> pin 6 : pin 11 -> Bit 7 0x08<<1 -> ~ 0x80
+ Bit 2 -> pin 4 : pin 12 -> Bit 5 0x07 -> 0x38
+ Bit 1 -> pin 3 : pin 13 -> Bit 4 0x07 -> 0x38
+ Bit 0 -> pin 2 : pin 15 -> Bit 3 0x07 -> 0x38
+ [note: bit 0 is considered the least significant bit, pins on the connector
+ are numbered starting with 1, -> represents sending data out on the bus, <-
+ represents reading data from the bus]
+
+ Pins 1,7,8,9, and 16 are currently unused, and may be allowed to "float".
+
+ The data is sent in 4 bit "nybbles", with the highest 4 bits being sent first.
+
+ To bring up the interface, all that should be required is
+ ifconfig par0 <your ip address> <connected machine's ip address> up
+ and to bring down the interface
+ ifconfig par0 down
+ You may get a warning message (such as printer out of paper) once you down
+ the interface, as the port is monitored for both printer and network activity
+ depending on whether par0 is up or down, and when you down the interface the
+ printer driver will then read whatever is on the port (which will be the last
+ message from the other computer).
+ */
+
+#include <par.h>
+#if NPAR > 0
+
+#include <kern/time_out.h>
+#include <device/device_types.h>
+#include <device/errno.h>
+#include <device/io_req.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+#include <device/net_status.h>
+#include <device/net_io.h>
+
+#include <i386/ipl.h>
+#include <i386/pio.h>
+#include <chips/busses.h>
+#include <i386at/if_par.h>
+
+
+int parintr();
+int parioctl();
+int parattach();
+int paroutput();
+
+int (*oldvect)();
+int oldunit;
+
+extern struct bus_device *lprinfo[];
+
+int par_watch = 0;
+
+struct par_softc {
+ struct ifnet ds_if;
+ u_char ds_addr[6]; /* Ethernet hardware address */
+ u_char address[6];
+ char sc_buf[PARMTU+sizeof(struct ifnet *)];
+} par_softc[NPAR];
+
+void parintoff(unit)
+int unit;
+{
+struct bus_device *lpdev = lprinfo[unit];
+
+ outb(INTR(lpdev->address), 0x07);
+ par_softc[unit].ds_if.if_flags &= ~IFF_RUNNING;
+ ivect[lpdev->sysdep1] = oldvect;
+ iunit[lpdev->sysdep1] = oldunit;
+}
+
+void parinit(unit)
+int unit;
+{
+struct bus_device *lpdev = lprinfo[unit];
+
+ if (ivect[lpdev->sysdep1] != parintr) {
+ oldvect = ivect[lpdev->sysdep1];
+ oldunit = iunit[lpdev->sysdep1];
+ ivect[lpdev->sysdep1] = parintr;
+ iunit[lpdev->sysdep1] = unit;
+ }
+ outb(INTR(lpdev->address),0x11);
+ par_softc[unit].ds_if.if_flags |= IFF_RUNNING;
+ *(struct ifnet **)par_softc[unit].sc_buf = &par_softc[unit].ds_if;
+}
+
+struct ether_header par_eh;
+
+int parattach(dev)
+struct bus_device *dev;
+{
+ u_char unit = (u_char)dev->unit;
+ struct ifnet *ifp;
+ struct par_softc*sp;
+
+ if ((unit < 0) || (unit >= NPAR))
+ return(0);
+ printf("\n par%d: at lpr%d, port = %x, spl = %d, pic = %d. ",
+ unit, unit, dev->address, dev->sysdep, dev->sysdep1);
+
+ sp = &par_softc[unit];
+ ifp = &(sp->ds_if);
+
+ *(sp->ds_addr) = *(sp->address) = 0x11;
+ *(sp->ds_addr + 1) = *(sp->address + 1) = 0x22;
+ *(sp->ds_addr + 2) = *(sp->address + 2) = 0x33;
+ *(sp->ds_addr + 3) = *(sp->address + 3) = 0x44;
+ *(sp->ds_addr + 4) = *(sp->address + 4) = 0x55;
+ *(sp->ds_addr + 5) = *(sp->address + 5) = 0x66;
+
+ par_eh.ether_dhost[5] = par_eh.ether_shost[0] = 0x11;
+ par_eh.ether_dhost[4] = par_eh.ether_shost[1] = 0x22;
+ par_eh.ether_dhost[3] = par_eh.ether_shost[2] = 0x33;
+ par_eh.ether_dhost[2] = par_eh.ether_shost[3] = 0x44;
+ par_eh.ether_dhost[1] = par_eh.ether_shost[4] = 0x55;
+ par_eh.ether_dhost[0] = par_eh.ether_shost[5] = 0x66;
+ par_eh.ether_type = htons(0x0800);
+
+ printf("ethernet id [%x:%x:%x:%x:%x:%x]",
+ sp->address[0],sp->address[1],sp->address[2],
+ sp->address[3],sp->address[4],sp->address[5]);
+
+ ifp->if_unit = unit;
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_flags = IFF_POINTOPOINT;
+ ifp->if_header_size = sizeof(struct ether_header);
+ ifp->if_header_format = HDR_ETHERNET;
+ ifp->if_address_size = 6;
+ ifp->if_address = (char *)&par_softc[unit].address[0];
+ if_init_queues(ifp);
+ return(0);
+}
+
+int parstart(); /* forward */
+
+/*ARGSUSED*/
+paropen(dev, flag)
+ dev_t dev;
+ int flag;
+{
+ register int unit = minor(dev);
+
+ if (unit < 0 || unit >= NPAR)
+ return (ENXIO);
+
+ par_softc[unit].ds_if.if_flags |= IFF_UP;
+ parinit(unit);
+ return(0);
+}
+
+paroutput(dev, ior)
+ dev_t dev;
+ io_req_t ior;
+{
+ register int unit = minor(dev);
+
+ if (unit < 0 || unit >= NPAR)
+ return (ENXIO);
+ return (net_write(&par_softc[unit].ds_if, parstart, ior));
+}
+
+parsetinput(dev, receive_port, priority, filter, filter_count)
+ dev_t dev;
+ mach_port_t receive_port;
+ int priority;
+ filter_t filter[];
+ unsigned int filter_count;
+{
+ register int unit = minor(dev);
+
+ if (unit < 0 || unit >= NPAR)
+ return (ENXIO);
+
+ return (net_set_filter(&par_softc[unit].ds_if,
+ receive_port, priority,
+ filter, filter_count));
+}
+
+int parstart(unit)
+{
+ struct ifnet *ifp = &(par_softc[unit].ds_if);
+ u_short addr = lprinfo[unit]->address;
+ struct sockaddr *dst;
+ int len, i;
+ spl_t s;
+ u_char *mcp, c;
+ io_req_t m;
+
+ if (!(ifp->if_flags & IFF_RUNNING)) {
+#ifdef WHY
+ m_free(m);
+ parintoff(unit);
+ return(ENETDOWN);
+#else WHY
+ parintoff(unit);
+ return(-1);
+#endif WHY
+ }
+ s = SPLNET();
+
+ IF_DEQUEUE(&ifp->if_snd, m);
+ if (m == 0) {
+ splx(s);
+ return 0;
+ }
+ len = m->io_count;
+ if (par_watch)
+ printf("O%d\n",len);
+ len -= 14 /* XXX */;
+ mcp = (u_char *)m->io_data + 14 /* XXX */;
+ while (len--) {
+ c=*mcp++;
+ outb(OUTPUT(addr),((c&0x80)>>3) | ((c&0x70)>>4) | 0x08);
+ i=MAXSPIN;
+ while (!(inb(INPUT(addr))&0x40) && --i);
+ outb(OUTPUT(addr),((c&0x08)<<1) | (c&0x07));
+ i=MAXSPIN;
+ while ((inb(INPUT(addr))&0x40) && --i);
+ }
+ outb(OUTPUT(addr),(((c&0x08)<<1) | (c&0x07))^0x17);
+ iodone(m);
+ splx(s);
+ return (0);
+}
+
+/*ARGSUSED*/
+pargetstat(dev, flavor, status, count)
+ dev_t dev;
+ int flavor;
+ dev_status_t status; /* pointer to OUT array */
+ unsigned int *count; /* out */
+{
+ register int unit = minor(dev);
+
+ if (unit < 0 || unit >= NPAR)
+ return (ENXIO);
+
+
+ switch (flavor) {
+ case NET_DSTADDR:
+ return (D_SUCCESS);
+ break;
+ }
+
+ return (net_getstat(&par_softc[unit].ds_if,
+ flavor,
+ status,
+ count));
+}
+
+parsetstat(dev, flavor, status, count)
+ dev_t dev;
+ int flavor;
+ dev_status_t status;
+ unsigned int count;
+{
+ register int unit = minor(dev);
+ register struct par_softc *sp;
+
+ if (unit < 0 || unit >= NPAR)
+ return (ENXIO);
+
+ sp = &par_softc[unit];
+
+ switch (flavor) {
+ case NET_STATUS:
+ {
+ /*
+ * All we can change are flags, and not many of those.
+ */
+ register struct net_status *ns = (struct net_status *)status;
+ int mode = 0;
+
+ if (count < NET_STATUS_COUNT)
+ return (D_INVALID_SIZE);
+
+#if 0
+ /* ha ha ha */
+ if (ns->flags & IFF_ALLMULTI)
+ mode |= MOD_ENAL;
+ if (ns->flags & IFF_PROMISC)
+ mode |= MOD_PROM;
+ /*
+ * Force a complete reset if the receive mode changes
+ * so that these take effect immediately.
+ */
+ if (sp->mode != mode) {
+ sp->mode = mode;
+ if (sp->flags & DSF_RUNNING) {
+ sp->flags &= ~(DSF_LOCK | DSF_RUNNING);
+ parinit(unit);
+ }
+ }
+#endif
+ break;
+ }
+ case NET_ADDRESS:
+ {
+ register union ether_cvt {
+ char addr[6];
+ int lwd[2];
+ } *ec = (union ether_cvt *)status;
+
+ if (count < sizeof(*ec)/sizeof(int))
+ return (D_INVALID_SIZE);
+
+ ec->lwd[0] = ntohl(ec->lwd[0]);
+ ec->lwd[1] = ntohl(ec->lwd[1]);
+/* at3c501seteh(sp->base, ec->addr);*/
+ break;
+ }
+
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+
+int parintr(unit)
+int unit;
+{
+ register struct par_softc *sp = &par_softc[unit];
+ u_short addr = lprinfo[unit]->address;
+ char *trav = sp->sc_buf;
+ short len = 0;
+ u_char c, c2;
+ int i;
+ ipc_kmsg_t new_kmsg;
+ struct ether_header *ehp;
+ struct packet_header *pkt;
+ struct ifnet *ifp = &(sp->ds_if);
+
+ do {
+ c2=inb(INPUT(addr));
+ outb(OUTPUT(addr),0x08);
+ i=MAXSPIN;
+ while(((c=inb(INPUT(addr)))&0x40) && --i);
+
+ c = inb(INPUT(addr));
+ outb(OUTPUT(addr),0x00);
+ if (!i)
+ break;
+
+ if (++len > ETHERMTU) {
+ trav = sp->sc_buf;
+ len = 0;
+ continue;
+ }
+ *trav++ = ((~c2)&0x80) | ((c2&0x38)<<1) | (((~c)&0x80)>>4) | ((c&0x38)>>3);
+ i=MAXSPIN;
+ while (!((c2=inb(INPUT(addr)))&0x40) && --i)
+ if (((c2^0xb8)&0xf8) == (c&0xf8))
+ goto end;
+ } while (i);
+end:
+ if (len < 20) /* line noise ? */
+ return;
+ if (par_watch)
+ printf("I%d\n",len);
+
+ new_kmsg = net_kmsg_get();
+ if (new_kmsg == IKM_NULL) {
+ /*
+ * Drop the packet.
+ */
+ sp->ds_if.if_rcvdrops++;
+ return;
+ }
+ ehp = (struct ether_header *) (&net_kmsg(new_kmsg)->header[0]);
+ pkt = (struct packet_header *) (&net_kmsg(new_kmsg)->packet[0]);
+ *ehp = par_eh;
+
+ bcopy (sp->sc_buf, (char *) (pkt + 1), len);
+
+ pkt->type = ehp->ether_type;
+ pkt->length = len + sizeof(struct packet_header);
+ /*
+ * Hand the packet to the network module.
+ */
+ net_packet(ifp, new_kmsg, pkt->length,
+ ethernet_priority(new_kmsg));
+ return(0);
+}
+#endif
diff --git a/i386/i386at/if_par.h b/i386/i386at/if_par.h
new file mode 100644
index 00000000..2cb7ed5d
--- /dev/null
+++ b/i386/i386at/if_par.h
@@ -0,0 +1,36 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Parallel port network driver v1.0
+ * All rights reserved.
+ */
+#define OUTPUT(addr) (addr + 0)
+#define INPUT(addr) (addr + 1)
+#define INTR(addr) (addr + 2)
+
+#define SPLNET spl6
+#define PARMTU 8192
+#define MAXSPIN 10000
diff --git a/i386/i386at/if_pc586.c b/i386/i386at/if_pc586.c
new file mode 100644
index 00000000..195ce7d6
--- /dev/null
+++ b/i386/i386at/if_pc586.c
@@ -0,0 +1,2076 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Olivetti PC586 Mach Ethernet driver v1.0
+ * Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989
+ * All rights reserved.
+ *
+ */
+
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * NOTE:
+ * by rvb:
+ * 1. The best book on the 82586 is:
+ * LAN Components User's Manual by Intel
+ * The copy I found was dated 1984. This really tells you
+ * what the state machines are doing
+ * 2. In the current design, we only do one write at a time,
+ * though the hardware is capable of chaining and possibly
+ * even batching. The problem is that we only make one
+ * transmit buffer available in sram space.
+ * 3.
+ * n. Board Memory Map
+ RFA/FD 0 - 227 0x228 bytes
+ 226 = 0x19 * 0x16 bytes
+ RBD 228 - 3813 0x35ec bytes
+ 35e8 = 0x19 * 0x228 bytes
+ == 0x0a bytes (bd) + 2 bytes + 21c bytes
+ CU 3814 - 3913 0x100 bytes
+ TBD 3914 - 39a3 0x90 bytes
+ 90 = No 18 * 0x08 bytes
+ TBUF 39a4 - 3fdd 0x63a bytes (= 1594(10))
+ SCB 3fde - 3fed 0x10 bytes
+ ISCP 3fee - 3ff5 0x08 bytes
+ SCP 3ff6 - 3fff 0x0a bytes
+ *
+ */
+
+/*
+ * NOTE:
+ *
+ * Currently this driver doesn't support trailer protocols for
+ * packets. Once that is added, please remove this comment.
+ *
+ * Also, some lacking material includes the DLI code. If you
+ * are compiling this driver with DLI set, lookout, that code
+ * has not been looked at.
+ *
+ */
+
+#define DEBUG
+#define IF_CNTRS MACH
+#define NDLI 0
+
+#include <pc586.h>
+
+#ifdef MACH_KERNEL
+#include <kern/time_out.h>
+#include <device/device_types.h>
+#include <device/errno.h>
+#include <device/io_req.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+#include <device/net_status.h>
+#include <device/net_io.h>
+#else MACH_KERNEL
+#include <sys/param.h>
+#include <mach/machine/vm_param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/buf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/vmmac.h>
+#include <sys/ioctl.h>
+#include <sys/errno.h>
+#include <sys/syslog.h>
+
+#include <net/if.h>
+#include <net/netisr.h>
+#include <net/route.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+#include <netinet/if_ether.h>
+#endif
+
+#ifdef NS
+#include <netns/ns.h>
+#include <netns/ns_if.h>
+#endif
+
+#if DLI
+#include <net/dli_var.h>
+struct dli_var de_dlv[NDE];
+#endif DLI
+#endif MACH_KERNEL
+
+#include <i386/ipl.h>
+#include <mach/vm_param.h>
+#include <vm/vm_kern.h>
+#include <chips/busses.h>
+#include <i386at/if_pc586.h>
+
+#define SPLNET spl6
+#if __STDC__
+#define CMD(x, y, unit) *(u_short *)(pc_softc[unit].prom + OFFSET_ ## x) = (u_short) (y)
+#else __STDC__
+#define CMD(x, y, unit) *(u_short *)(pc_softc[unit].prom + OFFSET_/**/x) = (u_short) (y)
+#endif __STDC__
+
+#define pc586chatt(unit) CMD(CHANATT, 0x0001, unit)
+#define pc586inton(unit) CMD(INTENAB, CMD_1, unit)
+#define pc586intoff(unit) CMD(INTENAB, CMD_0, unit)
+
+int pc586probe();
+void pc586attach();
+int pc586intr(), pc586init(), pc586output(), pc586ioctl(), pc586reset();
+int pc586watch(), pc586rcv(), pc586xmt(), pc586bldcu();
+int pc586diag(), pc586config();
+char *pc586bldru();
+char *ram_to_ptr();
+u_short ptr_to_ram();
+
+static vm_offset_t pc586_std[NPC586] = { 0 };
+static struct bus_device *pc586_info[NPC586];
+struct bus_driver pcdriver =
+ {pc586probe, 0, pc586attach, 0, pc586_std, "pc", pc586_info, 0, 0, 0};
+
+char t_packet[ETHERMTU + sizeof(struct ether_header) + sizeof(long)];
+int xmt_watch = 0;
+
+typedef struct {
+#ifdef MACH_KERNEL
+ struct ifnet ds_if; /* generic interface header */
+ u_char ds_addr[6]; /* Ethernet hardware address */
+#else MACH_KERNEL
+ struct arpcom pc586_ac;
+#define ds_if pc586_ac.ac_if
+#define ds_addr pc586_ac.ac_enaddr
+#endif MACH_KERNEL
+ int flags;
+ int seated;
+ int timer;
+ int open;
+ fd_t *begin_fd;
+ fd_t *end_fd;
+ rbd_t *end_rbd;
+ char *prom;
+ char *sram;
+ int tbusy;
+ short mode;
+} pc_softc_t;
+pc_softc_t pc_softc[NPC586];
+
+struct pc586_cntrs {
+ struct {
+ u_int xmt, xmti;
+ u_int defer;
+ u_int busy;
+ u_int sleaze, intrinsic, intrinsic_count;
+ u_int chain;
+ } xmt;
+ struct {
+ u_int rcv;
+ u_int ovw;
+ u_int crc;
+ u_int frame;
+ u_int rscerrs, ovrnerrs;
+ u_int partial, bad_chain, fill;
+ } rcv;
+ u_int watch;
+} pc586_cntrs[NPC586];
+
+
+#ifdef IF_CNTRS
+int pc586_narp = 1, pc586_arp = 0;
+int pc586_ein[32], pc586_eout[32];
+int pc586_lin[128/8], pc586_lout[128/8];
+static
+log_2(no)
+unsigned long no;
+{
+ return ({ unsigned long _temp__;
+ asm("bsr %1, %0; jne 0f; xorl %0, %0; 0:" :
+ "=r" (_temp__) : "a" (no));
+ _temp__;});
+}
+#endif IF_CNTRS
+
+/*
+ * pc586probe:
+ *
+ * This function "probes" or checks for the pc586 board on the bus to see
+ * if it is there. As far as I can tell, the best break between this
+ * routine and the attach code is to simply determine whether the board
+ * is configured in properly. Currently my approach to this is to write
+ * and read a word from the SRAM on the board being probed. If the word
+ * comes back properly then we assume the board is there. The config
+ * code expects to see a successful return from the probe routine before
+ * attach will be called.
+ *
+ * input : address device is mapped to, and unit # being checked
+ * output : a '1' is returned if the board exists, and a 0 otherwise
+ *
+ */
+pc586probe(port, dev)
+struct bus_device *dev;
+{
+ caddr_t addr = (caddr_t)dev->address;
+ int unit = dev->unit;
+ int len = round_page(0x4000);
+ int sram_len = round_page(0x4000);
+ extern vm_offset_t phys_last_addr;
+ int i;
+ volatile char *b_prom;
+ volatile char *b_sram;
+ volatile u_short*t_ps;
+
+ if ((unit < 0) || (unit > NPC586)) {
+ printf("pc%d: board out of range [0..%d]\n",
+ unit, NPC586);
+ return(0);
+ }
+ if ((addr > (caddr_t)0x100000) && (addr < (caddr_t)phys_last_addr))
+ return 0;
+
+ if (kmem_alloc_pageable(kernel_map, (vm_offset_t *) &b_prom, len)
+ != KERN_SUCCESS) {
+ printf("pc%d: can not allocate memory for prom.\n", unit);
+ return 0;
+ }
+ if (kmem_alloc_pageable(kernel_map, (vm_offset_t *) &b_sram, sram_len)
+ != KERN_SUCCESS) {
+ printf("pc%d: can not allocate memory for sram.\n", unit);
+ return 0;
+ }
+ (void)pmap_map(b_prom, (vm_offset_t)addr,
+ (vm_offset_t)addr+len,
+ VM_PROT_READ | VM_PROT_WRITE);
+ if ((int)addr > 0x100000) /* stupid hardware */
+ addr += EXTENDED_ADDR;
+ addr += 0x4000; /* sram space */
+ (void)pmap_map(b_sram, (vm_offset_t)addr,
+ (vm_offset_t)addr+sram_len,
+ VM_PROT_READ | VM_PROT_WRITE);
+
+ *(b_prom + OFFSET_RESET) = 1;
+ { int i; for (i = 0; i < 1000; i++); /* 4 clocks at 6Mhz */}
+ *(b_prom + OFFSET_RESET) = 0;
+ t_ps = (u_short *)(b_sram + OFFSET_SCB);
+ *(t_ps) = (u_short)0x5a5a;
+ if (*(t_ps) != (u_short)0x5a5a) {
+ kmem_free(kernel_map, b_prom, len);
+ kmem_free(kernel_map, b_sram, sram_len);
+ return(0);
+ }
+ t_ps = (u_short *)(b_prom + + OFFSET_PROM);
+#define ETHER0 0x00
+#define ETHER1 0xaa
+#define ETHER2 0x00
+ if ((t_ps[0]&0xff) == ETHER0 &&
+ (t_ps[1]&0xff) == ETHER1 &&
+ (t_ps[2]&0xff) == ETHER2)
+ pc_softc[unit].seated = TRUE;
+#undef ETHER0
+#undef ETHER1
+#undef ETHER2
+#define ETHER0 0x00
+#define ETHER1 0x00
+#define ETHER2 0x1c
+ if ((t_ps[0]&0xff) == ETHER0 ||
+ (t_ps[1]&0xff) == ETHER1 ||
+ (t_ps[2]&0xff) == ETHER2)
+ pc_softc[unit].seated = TRUE;
+#undef ETHER0
+#undef ETHER1
+#undef ETHER2
+ if (pc_softc[unit].seated != TRUE) {
+ kmem_free(kernel_map, b_prom, len);
+ kmem_free(kernel_map, b_sram, sram_len);
+ return(0);
+ }
+ (volatile char *)pc_softc[unit].prom = (volatile char *)b_prom;
+ (volatile char *)pc_softc[unit].sram = (volatile char *)b_sram;
+ return(1);
+}
+
+/*
+ * pc586attach:
+ *
+ * This function attaches a PC586 board to the "system". The rest of
+ * runtime structures are initialized here (this routine is called after
+ * a successful probe of the board). Once the ethernet address is read
+ * and stored, the board's ifnet structure is attached and readied.
+ *
+ * input : bus_device structure setup in autoconfig
+ * output : board structs and ifnet is setup
+ *
+ */
+void pc586attach(dev)
+ struct bus_device *dev;
+{
+ struct ifnet *ifp;
+ u_char *addr_p;
+ u_short *b_addr;
+ u_char unit = (u_char)dev->unit;
+ pc_softc_t *sp = &pc_softc[unit];
+ volatile scb_t *scb_p;
+
+ take_dev_irq(dev);
+ printf(", port = %x, spl = %d, pic = %d. ",
+ dev->address, dev->sysdep, dev->sysdep1);
+
+ sp->timer = -1;
+ sp->flags = 0;
+ sp->mode = 0;
+ sp->open = 0;
+ CMD(RESET, CMD_1, unit);
+ { int i; for (i = 0; i < 1000; i++); /* 4 clocks at 6Mhz */}
+ CMD(RESET, CMD_0, unit);
+ b_addr = (u_short *)(sp->prom + OFFSET_PROM);
+ addr_p = (u_char *)sp->ds_addr;
+ addr_p[0] = b_addr[0];
+ addr_p[1] = b_addr[1];
+ addr_p[2] = b_addr[2];
+ addr_p[3] = b_addr[3];
+ addr_p[4] = b_addr[4];
+ addr_p[5] = b_addr[5];
+ printf("ethernet id [%x:%x:%x:%x:%x:%x]",
+ addr_p[0], addr_p[1], addr_p[2],
+ addr_p[3], addr_p[4], addr_p[5]);
+
+ scb_p = (volatile scb_t *)(sp->sram + OFFSET_SCB);
+ scb_p->scb_crcerrs = 0; /* initialize counters */
+ scb_p->scb_alnerrs = 0;
+ scb_p->scb_rscerrs = 0;
+ scb_p->scb_ovrnerrs = 0;
+
+ ifp = &(sp->ds_if);
+ ifp->if_unit = unit;
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_flags = IFF_BROADCAST;
+#ifdef MACH_KERNEL
+ ifp->if_header_size = sizeof(struct ether_header);
+ ifp->if_header_format = HDR_ETHERNET;
+ ifp->if_address_size = 6;
+ ifp->if_address = (char *)&sp->ds_addr[0];
+ if_init_queues(ifp);
+#else MACH_KERNEL
+ ifp->if_name = "pc";
+ ifp->if_init = pc586init;
+ ifp->if_output = pc586output;
+ ifp->if_ioctl = pc586ioctl;
+ ifp->if_reset = pc586reset;
+ ifp->if_next = NULL;
+ if_attach(ifp);
+#endif MACH_KERNEL
+}
+
+/*
+ * pc586reset:
+ *
+ * This routine is in part an entry point for the "if" code. Since most
+ * of the actual initialization has already (we hope already) been done
+ * by calling pc586attach().
+ *
+ * input : unit number or board number to reset
+ * output : board is reset
+ *
+ */
+pc586reset(unit)
+int unit;
+{
+ pc_softc[unit].ds_if.if_flags &= ~IFF_RUNNING;
+ pc_softc[unit].flags &= ~(DSF_LOCK|DSF_RUNNING);
+ return(pc586init(unit));
+
+}
+
+/*
+ * pc586init:
+ *
+ * Another routine that interfaces the "if" layer to this driver.
+ * Simply resets the structures that are used by "upper layers".
+ * As well as calling pc586hwrst that does reset the pc586 board.
+ *
+ * input : board number
+ * output : structures (if structs) and board are reset
+ *
+ */
+pc586init(unit)
+int unit;
+{
+ struct ifnet *ifp;
+ int stat;
+ spl_t oldpri;
+
+ ifp = &(pc_softc[unit].ds_if);
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+ if (ifp->if_addrlist == (struct ifaddr *)0) {
+ return;
+ }
+#endif MACH_KERNEL
+ oldpri = SPLNET();
+ if ((stat = pc586hwrst(unit)) == TRUE) {
+#ifdef MACH_KERNEL
+#undef HZ
+#define HZ hz
+#endif MACH_KERNEL
+ timeout(pc586watch, &(ifp->if_unit), 5*HZ);
+ pc_softc[unit].timer = 5;
+
+ pc_softc[unit].ds_if.if_flags |= IFF_RUNNING;
+ pc_softc[unit].flags |= DSF_RUNNING;
+ pc_softc[unit].tbusy = 0;
+ pc586start(unit);
+#if DLI
+ dli_init();
+#endif DLI
+ } else
+ printf("pc%d init(): trouble resetting board.\n", unit);
+ splx(oldpri);
+ return(stat);
+}
+
+#ifdef MACH_KERNEL
+/*ARGSUSED*/
+pc586open(dev, flag)
+ dev_t dev;
+ int flag;
+{
+ register int unit;
+ pc_softc_t *sp;
+
+ unit = minor(dev); /* XXX */
+ if (unit < 0 || unit >= NPC586 || !pc_softc[unit].seated)
+ return (ENXIO);
+
+ pc_softc[unit].ds_if.if_flags |= IFF_UP;
+ pc586init(unit);
+ return (0);
+}
+#endif MACH_KERNEL
+
+/*
+ * pc586start:
+ *
+ * This is yet another interface routine that simply tries to output a
+ * in an mbuf after a reset.
+ *
+ * input : board number
+ * output : stuff sent to board if any there
+ *
+ */
+pc586start(unit)
+int unit;
+{
+#ifdef MACH_KERNEL
+ io_req_t m;
+#else MACH_KERNEL
+ struct mbuf *m;
+#endif MACH_KERNEL
+ struct ifnet *ifp;
+ register pc_softc_t *is = &pc_softc[unit];
+ volatile scb_t *scb_p = (volatile scb_t *)(pc_softc[unit].sram + OFFSET_SCB);
+
+ if (is->tbusy) {
+ if (!(scb_p->scb_status & 0x0700)) { /* ! IDLE */
+ is->tbusy = 0;
+ pc586_cntrs[unit].xmt.busy++;
+ /*
+ * This is probably just a race. The xmt'r is just
+ * became idle but WE have masked interrupts so ...
+ */
+ if (xmt_watch) printf("!!");
+ } else
+ return;
+ }
+
+ ifp = &(pc_softc[unit].ds_if);
+ IF_DEQUEUE(&ifp->if_snd, m);
+#ifdef MACH_KERNEL
+ if (m != 0)
+#else MACH_KERNEL
+ if (m != (struct mbuf *)0)
+#endif MACH_KERNEL
+ {
+ is->tbusy++;
+ pc586_cntrs[unit].xmt.xmt++;
+ pc586xmt(unit, m);
+ }
+ return;
+}
+
+/*
+ * pc586read:
+ *
+ * This routine does the actual copy of data (including ethernet header
+ * structure) from the pc586 to an mbuf chain that will be passed up
+ * to the "if" (network interface) layer. NOTE: we currently
+ * don't handle trailer protocols, so if that is needed, it will
+ * (at least in part) be added here. For simplicities sake, this
+ * routine copies the receive buffers from the board into a local (stack)
+ * buffer until the frame has been copied from the board. Once in
+ * the local buffer, the contents are copied to an mbuf chain that
+ * is then enqueued onto the appropriate "if" queue.
+ *
+ * input : board number, and an frame descriptor pointer
+ * output : the packet is put into an mbuf chain, and passed up
+ * assumes : if any errors occur, packet is "dropped on the floor"
+ *
+ */
+pc586read(unit, fd_p)
+int unit;
+fd_t *fd_p;
+{
+ register pc_softc_t *is = &pc_softc[unit];
+ register struct ifnet *ifp = &is->ds_if;
+ struct ether_header eh;
+#ifdef MACH_KERNEL
+ ipc_kmsg_t new_kmsg;
+ struct ether_header *ehp;
+ struct packet_header *pkt;
+ char *dp;
+#else MACH_KERNEL
+ struct mbuf *m, *tm;
+#endif MACH_KERNEL
+ rbd_t *rbd_p;
+ u_char *buffer_p;
+ u_char *mb_p;
+ u_short mlen, len, clen;
+ u_short bytes_in_msg, bytes_in_mbuf, bytes;
+
+
+ if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) {
+ printf("pc%d read(): board is not running.\n", ifp->if_unit);
+ pc586intoff(ifp->if_unit);
+ }
+ pc586_cntrs[unit].rcv.rcv++;
+#ifdef MACH_KERNEL
+ new_kmsg = net_kmsg_get();
+ if (new_kmsg == IKM_NULL) {
+ /*
+ * Drop the received packet.
+ */
+ is->ds_if.if_rcvdrops++;
+
+ /*
+ * not only do we want to return, we need to drop the packet on
+ * the floor to clear the interrupt.
+ */
+ return 1;
+ }
+ ehp = (struct ether_header *) (&net_kmsg(new_kmsg)->header[0]);
+ pkt = (struct packet_header *)(&net_kmsg(new_kmsg)->packet[0]);
+
+ /*
+ * Get ether header.
+ */
+ ehp->ether_type = fd_p->length;
+ len = sizeof(struct ether_header);
+ bcopy16(fd_p->source, ehp->ether_shost, ETHER_ADD_SIZE);
+ bcopy16(fd_p->destination, ehp->ether_dhost, ETHER_ADD_SIZE);
+
+ /*
+ * Get packet body.
+ */
+ dp = (char *)(pkt + 1);
+
+ rbd_p = (rbd_t *)ram_to_ptr(fd_p->rbd_offset, unit);
+ if (rbd_p == 0) {
+ printf("pc%d read(): Invalid buffer\n", unit);
+ if (pc586hwrst(unit) != TRUE) {
+ printf("pc%d read(): hwrst trouble.\n", unit);
+ }
+ net_kmsg_put(new_kmsg);
+ return 0;
+ }
+
+ do {
+ buffer_p = (u_char *)(pc_softc[unit].sram + rbd_p->buffer_addr);
+ bytes_in_msg = rbd_p->status & RBD_SW_COUNT;
+ bcopy16((u_short *)buffer_p,
+ (u_short *)dp,
+ (bytes_in_msg + 1) & ~1); /* but we know it's even */
+ len += bytes_in_msg;
+ dp += bytes_in_msg;
+ if (rbd_p->status & RBD_SW_EOF)
+ break;
+ rbd_p = (rbd_t *)ram_to_ptr(rbd_p->next_rbd_offset, unit);
+ } while ((int) rbd_p);
+
+ pkt->type = ehp->ether_type;
+ pkt->length =
+ len - sizeof(struct ether_header)
+ + sizeof(struct packet_header);
+
+ /*
+ * Send the packet to the network module.
+ */
+ net_packet(ifp, new_kmsg, pkt->length, ethernet_priority(new_kmsg));
+ return 1;
+#else MACH_KERNEL
+ eh.ether_type = ntohs(fd_p->length);
+ bcopy16(fd_p->source, eh.ether_shost, ETHER_ADD_SIZE);
+ bcopy16(fd_p->destination, eh.ether_dhost, ETHER_ADD_SIZE);
+
+ if ((rbd_p =(rbd_t *)ram_to_ptr(fd_p->rbd_offset, unit))== (rbd_t *)NULL) {
+ printf("pc%d read(): Invalid buffer\n", unit);
+ if (pc586hwrst(unit) != TRUE) {
+ printf("pc%d read(): hwrst trouble.\n", unit);
+ }
+ return 0;
+ }
+
+ bytes_in_msg = rbd_p->status & RBD_SW_COUNT;
+ buffer_p = (u_char *)(pc_softc[unit].sram + rbd_p->buffer_addr);
+ MGET(m, M_DONTWAIT, MT_DATA);
+ tm = m;
+ if (m == (struct mbuf *)0) {
+ /*
+ * not only do we want to return, we need to drop the packet on
+ * the floor to clear the interrupt.
+ *
+ */
+ printf("pc%d read(): No mbuf 1st\n", unit);
+ if (pc586hwrst(unit) != TRUE) {
+ pc586intoff(unit);
+ printf("pc%d read(): hwrst trouble.\n", unit);
+ pc_softc[unit].timer = 0;
+ }
+ return 0;
+ }
+m->m_next = (struct mbuf *) 0;
+ m->m_len = MLEN;
+ if (bytes_in_msg > 2 * MLEN - sizeof (struct ifnet **)) {
+ MCLGET(m);
+ }
+ /*
+ * first mbuf in the packet must contain a pointer to the
+ * ifnet structure. other mbufs that follow and make up
+ * the packet do not need this pointer in the mbuf.
+ *
+ */
+ *(mtod(tm, struct ifnet **)) = ifp;
+ mlen = sizeof (struct ifnet **);
+ clen = mlen;
+ bytes_in_mbuf = m->m_len - sizeof(struct ifnet **);
+ mb_p = mtod(tm, u_char *) + sizeof (struct ifnet **);
+ bytes = min(bytes_in_mbuf, bytes_in_msg);
+ do {
+ if (bytes & 1)
+ len = bytes + 1;
+ else
+ len = bytes;
+ bcopy16(buffer_p, mb_p, len);
+ clen += bytes;
+ mlen += bytes;
+
+ if (!(bytes_in_mbuf -= bytes)) {
+ MGET(tm->m_next, M_DONTWAIT, MT_DATA);
+ tm = tm->m_next;
+ if (tm == (struct mbuf *)0) {
+ m_freem(m);
+ printf("pc%d read(): No mbuf nth\n", unit);
+ if (pc586hwrst(unit) != TRUE) {
+ pc586intoff(unit);
+ printf("pc%d read(): hwrst trouble.\n", unit);
+ pc_softc[unit].timer = 0;
+ }
+ return 0;
+ }
+ mlen = 0;
+ tm->m_len = MLEN;
+ bytes_in_mbuf = MLEN;
+ mb_p = mtod(tm, u_char *);
+ } else
+ mb_p += bytes;
+
+ if (!(bytes_in_msg -= bytes)) {
+ if (rbd_p->status & RBD_SW_EOF ||
+ (rbd_p = (rbd_t *)ram_to_ptr(rbd_p->next_rbd_offset, unit)) ==
+ NULL) {
+ tm->m_len = mlen;
+ break;
+ } else {
+ bytes_in_msg = rbd_p->status & RBD_SW_COUNT;
+ buffer_p = (u_char *)(pc_softc[unit].sram + rbd_p->buffer_addr);
+ }
+ } else
+ buffer_p += bytes;
+
+ bytes = min(bytes_in_mbuf, bytes_in_msg);
+ } while(1);
+#ifdef IF_CNTRS
+/* clen -= (sizeof (struct ifnet **)
+ clen += 4 /* crc */;
+ clen += sizeof (struct ether_header);
+ pc586_ein[log_2(clen)]++;
+ if (clen < 128) pc586_lin[clen>>3]++;
+
+ if (eh.ether_type == ETHERTYPE_ARP) {
+ pc586_arp++;
+ if (pc586_narp) {
+ pc586_ein[log_2(clen)]--;
+ if (clen < 128) pc586_lin[clen>>3]--;
+ }
+ }
+#endif IF_CNTRS
+ /*
+ * received packet is now in a chain of mbuf's. next step is
+ * to pass the packet upwards.
+ *
+ */
+ pc586send_packet_up(m, &eh, is);
+ return 1;
+#endif MACH_KERNEL
+}
+
+/*
+ * Send a packet composed of an mbuf chain to the higher levels
+ *
+ */
+#ifndef MACH_KERNEL
+pc586send_packet_up(m, eh, is)
+struct mbuf *m;
+struct ether_header *eh;
+pc_softc_t *is;
+{
+ register struct ifqueue *inq;
+ spl_t opri;
+
+ switch (eh->ether_type) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ schednetisr(NETISR_IP);
+ inq = &ipintrq;
+ break;
+ case ETHERTYPE_ARP:
+ arpinput(&is->pc586_ac, m);
+ return;
+#endif
+#ifdef NS
+ case ETHERTYPE_NS:
+ schednetisr(NETISR_NS);
+ inq = &nsintrq;
+ break;
+#endif
+ default:
+#if DLI
+ {
+ eh.ether_type = htons(eh.ether_type);
+ dli_input(m,eh.ether_type,&eh.ether_shost[0],
+ &de_dlv[ds->ds_if.if_unit], &eh);
+ }
+#else DLI
+ m_freem(m);
+#endif DLI
+ return;
+ }
+ opri = SPLNET();
+ if (IF_QFULL(inq)) {
+ IF_DROP(inq);
+ splx(opri);
+ m_freem(m);
+ return;
+ }
+ IF_ENQUEUE(inq, m);
+ splx(opri);
+ return;
+}
+#endif MACH_KERNEL
+
+#ifdef MACH_KERNEL
+pc586output(dev, ior)
+ dev_t dev;
+ io_req_t ior;
+{
+ register int unit;
+
+ unit = minor(dev); /* XXX */
+ if (unit < 0 || unit >= NPC586 || !pc_softc[unit].seated)
+ return (ENXIO);
+
+ return (net_write(&pc_softc[unit].ds_if, pc586start, ior));
+}
+
+pc586setinput(dev, receive_port, priority, filter, filter_count)
+ dev_t dev;
+ mach_port_t receive_port;
+ int priority;
+ filter_t filter[];
+ unsigned int filter_count;
+{
+ register int unit = minor(dev);
+ if (unit < 0 || unit >= NPC586 || !pc_softc[unit].seated)
+ return (ENXIO);
+
+ return (net_set_filter(&pc_softc[unit].ds_if,
+ receive_port, priority,
+ filter, filter_count));
+}
+#else MACH_KERNEL
+/*
+ * pc586output:
+ *
+ * This routine is called by the "if" layer to output a packet to
+ * the network. This code resolves the local ethernet address, and
+ * puts it into the mbuf if there is room. If not, then a new mbuf
+ * is allocated with the header information and precedes the data
+ * to be transmitted. The routines that actually transmit the
+ * data (pc586xmt()) expect the ethernet structure to precede
+ * the data in the mbuf. This information is required by the
+ * 82586's transfer command segment, and thus mbuf's cannot
+ * be simply "slammed" out onto the network.
+ *
+ * input: ifnet structure pointer, an mbuf with data, and address
+ * to be resolved
+ * output: mbuf is updated to hold enet address, or a new mbuf
+ * with the address is added
+ *
+ */
+pc586output(ifp, m0, dst)
+struct ifnet *ifp;
+struct mbuf *m0;
+struct sockaddr *dst;
+{
+ register pc_softc_t *is = &pc_softc[ifp->if_unit];
+ register struct mbuf *m = m0;
+ int type, error;
+ spl_t opri;
+ u_char edst[6];
+ struct in_addr idst;
+ register struct ether_header *eh;
+ register int off;
+ int usetrailers;
+
+ if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) {
+ printf("pc%d output(): board is not running.\n", ifp->if_unit);
+ pc586intoff(ifp->if_unit);
+ error = ENETDOWN;
+ goto bad;
+ }
+ switch (dst->sa_family) {
+
+#ifdef INET
+ case AF_INET:
+ idst = ((struct sockaddr_in *)dst)->sin_addr;
+ if (!arpresolve(&is->pc586_ac, m, &idst, edst, &usetrailers)){
+ return (0); /* if not yet resolved */
+ }
+ off = ntohs((u_short)mtod(m, struct ip *)->ip_len) - m->m_len;
+
+ if (usetrailers && off > 0 && (off & 0x1ff) == 0 &&
+ m->m_off >= MMINOFF + 2 * sizeof (u_short)) {
+ type = ETHERTYPE_TRAIL + (off>>9);
+ m->m_off -= 2 * sizeof (u_short);
+ m->m_len += 2 * sizeof (u_short);
+ *mtod(m, u_short *) = htons((u_short)ETHERTYPE_IP);
+ *(mtod(m, u_short *) + 1) = htons((u_short)m->m_len);
+ goto gottrailertype;
+ }
+ type = ETHERTYPE_IP;
+ off = 0;
+ goto gottype;
+#endif
+#ifdef NS
+ case AF_NS:
+ type = ETHERTYPE_NS;
+ bcopy((caddr_t)&(((struct sockaddr_ns *)dst)->sns_addr.x_host),
+ (caddr_t)edst, sizeof (edst));
+ off = 0;
+ goto gottype;
+#endif
+
+#if DLI
+ case AF_DLI:
+ if (m->m_len < sizeof(struct ether_header))
+ {
+ error = EMSGSIZE;
+ goto bad;
+ }
+ eh = mtod(m, struct ether_header *);
+ bcopy(dst->sa_data, (caddr_t)eh->ether_dhost,
+ sizeof (eh->ether_dhost));
+ goto gotheader;
+#endif DLI
+
+ case AF_UNSPEC:
+ eh = (struct ether_header *)dst->sa_data;
+ bcopy((caddr_t)eh->ether_dhost, (caddr_t)edst, sizeof (edst));
+ type = eh->ether_type;
+ goto gottype;
+
+ default:
+ printf("pc%d output(): can't handle af%d\n",
+ ifp->if_unit, dst->sa_family);
+ error = EAFNOSUPPORT;
+ goto bad;
+ }
+
+gottrailertype:
+ /*
+ * Packet to be sent as trailer: move first packet
+ * (control information) to end of chain.
+ */
+ while (m->m_next)
+ m = m->m_next;
+ m->m_next = m0;
+ m = m0->m_next;
+ m0->m_next = 0;
+ m0 = m;
+
+gottype:
+ /*
+ * Add local net header. If no space in first mbuf,
+ * allocate another.
+ */
+ if (m->m_off > MMAXOFF ||
+ MMINOFF + sizeof (struct ether_header) > m->m_off) {
+ m = m_get(M_DONTWAIT, MT_HEADER);
+ if (m == 0) {
+ error = ENOBUFS;
+ goto bad;
+ }
+ m->m_next = m0;
+ m->m_off = MMINOFF;
+ m->m_len = sizeof (struct ether_header);
+ } else {
+ m->m_off -= sizeof (struct ether_header);
+ m->m_len += sizeof (struct ether_header);
+ }
+ eh = mtod(m, struct ether_header *);
+ eh->ether_type = htons((u_short)type);
+ bcopy((caddr_t)edst, (caddr_t)eh->ether_dhost, sizeof (edst));
+ bcopy((caddr_t)is->ds_addr,(caddr_t)eh->ether_shost, sizeof(edst));
+#if DLI
+gotheader:
+#endif DLI
+
+ /*
+ * Queue message on interface, and start output if interface
+ * not yet active.
+ */
+ opri = SPLNET();
+ if (IF_QFULL(&ifp->if_snd)) {
+ IF_DROP(&ifp->if_snd);
+ splx(opri);
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ IF_ENQUEUE(&ifp->if_snd, m);
+ /*
+ * Some action needs to be added here for checking whether the
+ * board is already transmitting. If it is, we don't want to
+ * start it up (ie call pc586start()). We will attempt to send
+ * packets that are queued up after an interrupt occurs. Some
+ * flag checking action has to happen here and/or in the start
+ * routine. This note is here to remind me that some thought
+ * is needed and there is a potential problem here.
+ *
+ */
+ pc586start(ifp->if_unit);
+ splx(opri);
+ return (0);
+bad:
+ m_freem(m0);
+ return (error);
+}
+#endif MACH_KERNEL
+
+#ifdef MACH_KERNEL
+pc586getstat(dev, flavor, status, count)
+ dev_t dev;
+ int flavor;
+ dev_status_t status; /* pointer to OUT array */
+ unsigned int *count; /* out */
+{
+ register int unit = minor(dev);
+ register pc_softc_t *sp;
+
+ if (unit < 0 || unit >= NPC586 || !pc_softc[unit].seated)
+ return (ENXIO);
+
+ sp = &pc_softc[unit];
+ return (net_getstat(&sp->ds_if, flavor, status, count));
+}
+
+pc586setstat(dev, flavor, status, count)
+ dev_t dev;
+ int flavor;
+ dev_status_t status;
+ unsigned int count;
+{
+ register int unit = minor(dev);
+ register pc_softc_t *sp;
+
+ if (unit < 0 || unit >= NPC586 || !pc_softc[unit].seated)
+ return (ENXIO);
+
+ sp = &pc_softc[unit];
+
+ switch (flavor) {
+ case NET_STATUS:
+ {
+ /*
+ * All we can change are flags, and not many of those.
+ */
+ register struct net_status *ns = (struct net_status *)status;
+ int mode = 0;
+
+ if (count < NET_STATUS_COUNT)
+ return (D_INVALID_OPERATION);
+
+ if (ns->flags & IFF_ALLMULTI)
+ mode |= MOD_ENAL;
+ if (ns->flags & IFF_PROMISC)
+ mode |= MOD_PROM;
+
+ /*
+ * Force a complete reset if the receive mode changes
+ * so that these take effect immediately.
+ */
+ if (sp->mode != mode) {
+ sp->mode = mode;
+ if (sp->flags & DSF_RUNNING) {
+ sp->flags &= ~(DSF_LOCK|DSF_RUNNING);
+ pc586init(unit);
+ }
+ }
+ break;
+ }
+
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+
+}
+#else MACH_KERNEL
+/*
+ * pc586ioctl:
+ *
+ * This routine processes an ioctl request from the "if" layer
+ * above.
+ *
+ * input : pointer the appropriate "if" struct, command, and data
+ * output : based on command appropriate action is taken on the
+ * pc586 board(s) or related structures
+ * return : error is returned containing exit conditions
+ *
+ */
+pc586ioctl(ifp, cmd, data)
+struct ifnet *ifp;
+int cmd;
+caddr_t data;
+{
+ register struct ifaddr *ifa = (struct ifaddr *)data;
+ int unit = ifp->if_unit;
+ register pc_softc_t *is = &pc_softc[unit];
+ short mode = 0;
+ int error = 0;
+ spl_t opri;
+
+ opri = SPLNET();
+ switch (cmd) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ pc586init(unit);
+ switch (ifa->ifa_addr.sa_family) {
+#ifdef INET
+ case AF_INET:
+ ((struct arpcom *)ifp)->ac_ipaddr = IA_SIN(ifa)->sin_addr;
+ arpwhohas((struct arpcom *)ifp, &IA_SIN(ifa)->sin_addr);
+ break;
+#endif
+#ifdef NS
+ case AF_NS:
+ {
+ register struct ns_addr *ina =
+ &(IA_SNS(ifa)->sns_addr);
+ if (ns_nullhost(*ina))
+ ina->x_host = *(union ns_host *)(ds->ds_addr);
+ else
+ pc586setaddr(ina->x_host.c_host, unit);
+ break;
+ }
+#endif
+ }
+ break;
+ case SIOCSIFFLAGS:
+ if (ifp->if_flags & IFF_ALLMULTI)
+ mode |= MOD_ENAL;
+ if (ifp->if_flags & IFF_PROMISC)
+ mode |= MOD_PROM;
+ /*
+ * force a complete reset if the receive multicast/
+ * promiscuous mode changes so that these take
+ * effect immediately.
+ *
+ */
+ if (is->mode != mode) {
+ is->mode = mode;
+ if (is->flags & DSF_RUNNING) {
+ is->flags &= ~(DSF_LOCK|DSF_RUNNING);
+ pc586init(unit);
+ }
+ }
+ if ((ifp->if_flags & IFF_UP) == 0 && is->flags & DSF_RUNNING) {
+ printf("pc%d ioctl(): board is not running\n", unit);
+ is->flags &= ~(DSF_LOCK | DSF_RUNNING);
+ is->timer = -1;
+ pc586intoff(unit);
+ } else if (ifp->if_flags & IFF_UP && (is->flags & DSF_RUNNING) == 0) {
+ pc586init(unit);
+ }
+ break;
+#ifdef IF_CNTRS
+ case SIOCCIFCNTRS:
+ if (!suser()) {
+ error = EPERM;
+ break;
+ }
+ bzero((caddr_t)pc586_ein, sizeof (pc586_ein));
+ bzero((caddr_t)pc586_eout, sizeof (pc586_eout));
+ bzero((caddr_t)pc586_lin, sizeof (pc586_lin));
+ bzero((caddr_t)pc586_lout, sizeof (pc586_lout));
+ bzero((caddr_t)&pc586_arp, sizeof (int));
+ bzero((caddr_t)&pc586_cntrs, sizeof (pc586_cntrs));
+ break;
+#endif IF_CNTRS
+ default:
+ error = EINVAL;
+ }
+ splx(opri);
+ return (error);
+}
+#endif MACH_KERNEL
+
+/*
+ * pc586hwrst:
+ *
+ * This routine resets the pc586 board that corresponds to the
+ * board number passed in.
+ *
+ * input : board number to do a hardware reset
+ * output : board is reset
+ *
+ */
+pc586hwrst(unit)
+int unit;
+{
+ CMD(CHANATT, CMD_0, unit);
+ CMD(RESET, CMD_1, unit);
+ { int i; for (i = 0; i < 1000; i++); /* 4 clocks at 6Mhz */}
+ CMD(RESET,CMD_0, unit);
+
+/*
+ * for (i = 0; i < 1000000; i++);
+ * with this loop above and with the reset toggle also looping to
+ * 1000000. We don't see the reset behaving as advertised. DOES
+ * IT HAPPEN AT ALL. In particular, NORMODE, ENABLE, and XFER
+ * should all be zero and they have not changed at all.
+ */
+ CMD(INTENAB, CMD_0, unit);
+ CMD(NORMMODE, CMD_0, unit);
+ CMD(XFERMODE, CMD_1, unit);
+
+ pc586bldcu(unit);
+
+ if (pc586diag(unit) == FALSE)
+ return(FALSE);
+
+ if (pc586config(unit) == FALSE)
+ return(FALSE);
+ /*
+ * insert code for loopback test here
+ *
+ */
+ pc586rustrt(unit);
+
+ pc586inton(unit);
+ CMD(NORMMODE, CMD_1, unit);
+ return(TRUE);
+}
+
+/*
+ * pc586watch():
+ *
+ * This routine is the watchdog timer routine for the pc586 chip. If
+ * chip wedges, this routine will fire and cause a board reset and
+ * begin again.
+ *
+ * input : which board is timing out
+ * output : potential board reset if wedged
+ *
+ */
+int watch_dead = 0;
+pc586watch(b_ptr)
+caddr_t b_ptr;
+{
+ spl_t opri;
+ int unit = *b_ptr;
+
+ if ((pc_softc[unit].ds_if.if_flags & IFF_UP) == 0) {
+ return;
+ }
+ if (pc_softc[unit].timer == -1) {
+ timeout(pc586watch, b_ptr, 5*HZ);
+ return;
+ }
+ if (--pc_softc[unit].timer != -1) {
+ timeout(pc586watch, b_ptr, 1*HZ);
+ return;
+ }
+
+ opri = SPLNET();
+#ifdef notdef
+ printf("pc%d watch(): 6sec timeout no %d\n", unit, ++watch_dead);
+#endif notdef
+ pc586_cntrs[unit].watch++;
+ if (pc586hwrst(unit) != TRUE) {
+ printf("pc%d watch(): hwrst trouble.\n", unit);
+ pc_softc[unit].timer = 0;
+ } else {
+ timeout(pc586watch, b_ptr, 1*HZ);
+ pc_softc[unit].timer = 5;
+ }
+ splx(opri);
+}
+
+/*
+ * pc586intr:
+ *
+ * This function is the interrupt handler for the pc586 ethernet
+ * board. This routine will be called whenever either a packet
+ * is received, or a packet has successfully been transfered and
+ * the unit is ready to transmit another packet.
+ *
+ * input : board number that interrupted
+ * output : either a packet is received, or a packet is transfered
+ *
+ */
+pc586intr(unit)
+int unit;
+{
+ volatile scb_t *scb_p = (volatile scb_t *)(pc_softc[unit].sram + OFFSET_SCB);
+ volatile ac_t *cb_p = (volatile ac_t *)(pc_softc[unit].sram + OFFSET_CU);
+ int next, x;
+ int i;
+ u_short int_type;
+
+ if (pc_softc[unit].seated == FALSE) {
+ printf("pc%d intr(): board not seated\n", unit);
+ return(-1);
+ }
+
+ while ((int_type = (scb_p->scb_status & SCB_SW_INT)) != 0) {
+ pc586ack(unit);
+ if (int_type & SCB_SW_FR) {
+ pc586rcv(unit);
+ watch_dead=0;
+ }
+ if (int_type & SCB_SW_RNR) {
+ pc586_cntrs[unit].rcv.ovw++;
+#ifdef notdef
+ printf("pc%d intr(): receiver overrun! begin_fd = %x\n",
+ unit, pc_softc[unit].begin_fd);
+#endif notdef
+ pc586rustrt(unit);
+ }
+ if (int_type & SCB_SW_CNA) {
+ /*
+ * At present, we don't care about CNA's. We
+ * believe they are a side effect of XMT.
+ */
+ }
+ if (int_type & SCB_SW_CX) {
+ /*
+ * At present, we only request Interrupt for
+ * XMT.
+ */
+ if ((!(cb_p->ac_status & AC_SW_OK)) ||
+ (cb_p->ac_status & (0xfff^TC_SQE))) {
+ if (cb_p->ac_status & TC_DEFER) {
+ if (xmt_watch) printf("DF");
+ pc586_cntrs[unit].xmt.defer++;
+ } else if (cb_p->ac_status & (TC_COLLISION|0xf)) {
+ if (xmt_watch) printf("%x",cb_p->ac_status & 0xf);
+ } else if (xmt_watch)
+ printf("pc%d XMT: %x %x\n",
+ unit, cb_p->ac_status, cb_p->ac_command);
+ }
+ pc586_cntrs[unit].xmt.xmti++;
+ pc_softc[unit].tbusy = 0;
+ pc586start(unit);
+ }
+ pc_softc[unit].timer = 5;
+ }
+ return(0);
+}
+
+/*
+ * pc586rcv:
+ *
+ * This routine is called by the interrupt handler to initiate a
+ * packet transfer from the board to the "if" layer above this
+ * driver. This routine checks if a buffer has been successfully
+ * received by the pc586. If so, the routine pc586read is called
+ * to do the actual transfer of the board data (including the
+ * ethernet header) into a packet (consisting of an mbuf chain).
+ *
+ * input : number of the board to check
+ * output : if a packet is available, it is "sent up"
+ *
+ */
+pc586rcv(unit)
+int unit;
+{
+ fd_t *fd_p;
+
+ for (fd_p = pc_softc[unit].begin_fd; fd_p != (fd_t *)NULL;
+ fd_p = pc_softc[unit].begin_fd) {
+ if (fd_p->status == 0xffff || fd_p->rbd_offset == 0xffff) {
+ if (pc586hwrst(unit) != TRUE)
+ printf("pc%d rcv(): hwrst ffff trouble.\n",
+ unit);
+ return;
+ } else if (fd_p->status & AC_SW_C) {
+ fd_t *bfd = (fd_t *)ram_to_ptr(fd_p->link_offset, unit);
+
+ if (fd_p->status == (RFD_DONE|RFD_RSC)) {
+ /* lost one */;
+#ifdef notdef
+ printf("pc%d RCV: RSC %x\n",
+ unit, fd_p->status);
+#endif notdef
+ pc586_cntrs[unit].rcv.partial++;
+ } else if (!(fd_p->status & RFD_OK))
+ printf("pc%d RCV: !OK %x\n",
+ unit, fd_p->status);
+ else if (fd_p->status & 0xfff)
+ printf("pc%d RCV: ERRs %x\n",
+ unit, fd_p->status);
+ else
+ if (!pc586read(unit, fd_p))
+ return;
+ if (!pc586requeue(unit, fd_p)) { /* abort on chain error */
+ if (pc586hwrst(unit) != TRUE)
+ printf("pc%d rcv(): hwrst trouble.\n", unit);
+ return;
+ }
+ pc_softc[unit].begin_fd = bfd;
+ } else
+ break;
+ }
+ return;
+}
+
+/*
+ * pc586requeue:
+ *
+ * This routine puts rbd's used in the last receive back onto the
+ * free list for the next receive.
+ *
+ */
+pc586requeue(unit, fd_p)
+int unit;
+fd_t *fd_p;
+{
+ rbd_t *l_rbdp;
+ rbd_t *f_rbdp;
+
+#ifndef REQUEUE_DBG
+ if (bad_rbd_chain(fd_p->rbd_offset, unit))
+ return 0;
+#endif REQUEUE_DBG
+ f_rbdp = (rbd_t *)ram_to_ptr(fd_p->rbd_offset, unit);
+ if (f_rbdp != NULL) {
+ l_rbdp = f_rbdp;
+ while ( (!(l_rbdp->status & RBD_SW_EOF)) &&
+ (l_rbdp->next_rbd_offset != 0xffff))
+ {
+ l_rbdp->status = 0;
+ l_rbdp = (rbd_t *)ram_to_ptr(l_rbdp->next_rbd_offset,
+ unit);
+ }
+ l_rbdp->next_rbd_offset = PC586NULL;
+ l_rbdp->status = 0;
+ l_rbdp->size |= AC_CW_EL;
+ pc_softc[unit].end_rbd->next_rbd_offset =
+ ptr_to_ram((char *)f_rbdp, unit);
+ pc_softc[unit].end_rbd->size &= ~AC_CW_EL;
+ pc_softc[unit].end_rbd= l_rbdp;
+ }
+
+ fd_p->status = 0;
+ fd_p->command = AC_CW_EL;
+ fd_p->link_offset = PC586NULL;
+ fd_p->rbd_offset = PC586NULL;
+
+ pc_softc[unit].end_fd->link_offset = ptr_to_ram((char *)fd_p, unit);
+ pc_softc[unit].end_fd->command = 0;
+ pc_softc[unit].end_fd = fd_p;
+
+ return 1;
+}
+
+/*
+ * pc586xmt:
+ *
+ * This routine fills in the appropriate registers and memory
+ * locations on the PC586 board and starts the board off on
+ * the transmit.
+ *
+ * input : board number of interest, and a pointer to the mbuf
+ * output : board memory and registers are set for xfer and attention
+ *
+ */
+#ifdef DEBUG
+int xmt_debug = 0;
+#endif DEBUG
+pc586xmt(unit, m)
+int unit;
+#ifdef MACH_KERNEL
+io_req_t m;
+#else MACH_KERNEL
+struct mbuf *m;
+#endif MACH_KERNEL
+{
+ pc_softc_t *is = &pc_softc[unit];
+ register u_char *xmtdata_p = (u_char *)(is->sram + OFFSET_TBUF);
+ register u_short *xmtshort_p;
+#ifdef MACH_KERNEL
+ register struct ether_header *eh_p = (struct ether_header *)m->io_data;
+#else MACH_KERNEL
+ struct mbuf *tm_p = m;
+ register struct ether_header *eh_p = mtod(m, struct ether_header *);
+ u_char *mb_p = mtod(m, u_char *) + sizeof(struct ether_header);
+ u_short count = m->m_len - sizeof(struct ether_header);
+#endif MACH_KERNEL
+ volatile scb_t *scb_p = (volatile scb_t *)(is->sram + OFFSET_SCB);
+ volatile ac_t *cb_p = (volatile ac_t *)(is->sram + OFFSET_CU);
+ tbd_t *tbd_p = (tbd_t *)(is->sram + OFFSET_TBD);
+ u_short tbd = OFFSET_TBD;
+ u_short len, clen = 0;
+
+ cb_p->ac_status = 0;
+ cb_p->ac_command = (AC_CW_EL|AC_TRANSMIT|AC_CW_I);
+ cb_p->ac_link_offset = PC586NULL;
+ cb_p->cmd.transmit.tbd_offset = OFFSET_TBD;
+
+ bcopy16(eh_p->ether_dhost, cb_p->cmd.transmit.dest_addr, ETHER_ADD_SIZE);
+ cb_p->cmd.transmit.length = (u_short)(eh_p->ether_type);
+
+#ifndef MACH_KERNEL
+#ifdef DEBUG
+ if (xmt_debug)
+ printf("XMT mbuf: L%d @%x ", count, mb_p);
+#endif DEBUG
+#endif MACH_KERNEL
+ tbd_p->act_count = 0;
+ tbd_p->buffer_base = 0;
+ tbd_p->buffer_addr = ptr_to_ram(xmtdata_p, unit);
+#ifdef MACH_KERNEL
+ { int Rlen, Llen;
+ clen = m->io_count - sizeof(struct ether_header);
+ Llen = clen & 1;
+ Rlen = ((int)(m->io_data + sizeof(struct ether_header))) & 1;
+
+ bcopy16(m->io_data + sizeof(struct ether_header) - Rlen,
+ xmtdata_p,
+ clen + (Rlen + Llen) );
+ xmtdata_p += clen + Llen;
+ tbd_p->act_count = clen;
+ tbd_p->buffer_addr += Rlen;
+ }
+#else MACH_KERNEL
+ do {
+ if (count) {
+ if (clen + count > ETHERMTU)
+ break;
+ if (count & 1)
+ len = count + 1;
+ else
+ len = count;
+ bcopy16(mb_p, xmtdata_p, len);
+ clen += count;
+ tbd_p->act_count += count;
+ xmtdata_p += len;
+ if ((tm_p = tm_p->m_next) == (struct mbuf *)0)
+ break;
+ if (count & 1) {
+ /* go to the next descriptor */
+ tbd_p++->next_tbd_offset = (tbd += sizeof (tbd_t));
+ tbd_p->act_count = 0;
+ tbd_p->buffer_base = 0;
+ tbd_p->buffer_addr = ptr_to_ram(xmtdata_p, unit);
+ /* at the end -> coallesce remaining mbufs */
+ if (tbd == OFFSET_TBD + (N_TBD-1) * sizeof (tbd_t)) {
+ pc586sftwsleaze(&count, &mb_p, &tm_p, unit);
+ continue;
+ }
+ /* next mbuf short -> coallesce as needed */
+ if ( (tm_p->m_next == (struct mbuf *) 0) ||
+#define HDW_THRESHOLD 55
+ tm_p->m_len > HDW_THRESHOLD)
+ /* ok */;
+ else {
+ pc586hdwsleaze(&count, &mb_p, &tm_p, unit);
+ continue;
+ }
+ }
+ } else if ((tm_p = tm_p->m_next) == (struct mbuf *)0)
+ break;
+ count = tm_p->m_len;
+ mb_p = mtod(tm_p, u_char *);
+#ifdef DEBUG
+ if (xmt_debug)
+ printf("mbuf+ L%d @%x ", count, mb_p);
+#endif DEBUG
+ } while (1);
+#endif MACH_KERNEL
+#ifdef DEBUG
+ if (xmt_debug)
+ printf("CLEN = %d\n", clen);
+#endif DEBUG
+ if (clen < ETHERMIN) {
+ tbd_p->act_count += ETHERMIN - clen;
+ for (xmtshort_p = (u_short *)xmtdata_p;
+ clen < ETHERMIN;
+ clen += 2) *xmtshort_p++ = 0;
+ }
+ tbd_p->act_count |= TBD_SW_EOF;
+ tbd_p->next_tbd_offset = PC586NULL;
+#ifdef IF_CNTRS
+ clen += sizeof (struct ether_header) + 4 /* crc */;
+ pc586_eout[log_2(clen)]++;
+ if (clen < 128) pc586_lout[clen>>3]++;
+#endif IF_CNTRS
+#ifdef DEBUG
+ if (xmt_debug) {
+ pc586tbd(unit);
+ printf("\n");
+ }
+#endif DEBUG
+
+ while (scb_p->scb_command) ;
+ scb_p->scb_command = SCB_CU_STRT;
+ pc586chatt(unit);
+
+#ifdef MACH_KERNEL
+ iodone(m);
+#else MACH_KERNEL
+ for (count=0; ((count < 6) && (eh_p->ether_dhost[count] == 0xff)); count++) ;
+ if (count == 6) {
+ pc586send_packet_up(m, eh_p, is);
+ } else
+ m_freem(m);
+#endif MACH_KERNEL
+ return;
+}
+
+/*
+ * pc586bldcu:
+ *
+ * This function builds up the command unit structures. It inits
+ * the scp, iscp, scb, cb, tbd, and tbuf.
+ *
+ */
+pc586bldcu(unit)
+{
+ char *sram = pc_softc[unit].sram;
+ scp_t *scp_p = (scp_t *)(sram + OFFSET_SCP);
+ iscp_t *iscp_p = (iscp_t *)(sram + OFFSET_ISCP);
+ volatile scb_t *scb_p = (volatile scb_t *)(sram + OFFSET_SCB);
+ volatile ac_t *cb_p = (volatile ac_t *)(sram + OFFSET_CU);
+ tbd_t *tbd_p = (tbd_t *)(sram + OFFSET_TBD);
+ int i;
+
+ scp_p->scp_sysbus = 0;
+ scp_p->scp_iscp = OFFSET_ISCP;
+ scp_p->scp_iscp_base = 0;
+
+ iscp_p->iscp_busy = 1;
+ iscp_p->iscp_scb_offset = OFFSET_SCB;
+ iscp_p->iscp_scb = 0;
+ iscp_p->iscp_scb_base = 0;
+
+ pc586_cntrs[unit].rcv.crc += scb_p->scb_crcerrs;
+ pc586_cntrs[unit].rcv.frame += scb_p->scb_alnerrs;
+ pc586_cntrs[unit].rcv.rscerrs += scb_p->scb_rscerrs;
+ pc586_cntrs[unit].rcv.ovrnerrs += scb_p->scb_ovrnerrs;
+ scb_p->scb_status = 0;
+ scb_p->scb_command = 0;
+ scb_p->scb_cbl_offset = OFFSET_CU;
+ scb_p->scb_rfa_offset = OFFSET_RU;
+ scb_p->scb_crcerrs = 0;
+ scb_p->scb_alnerrs = 0;
+ scb_p->scb_rscerrs = 0;
+ scb_p->scb_ovrnerrs = 0;
+
+ scb_p->scb_command = SCB_RESET;
+ pc586chatt(unit);
+ for (i = 1000000; iscp_p->iscp_busy && (i-- > 0); );
+ if (!i) printf("pc%d bldcu(): iscp_busy timeout.\n", unit);
+ for (i = STATUS_TRIES; i-- > 0; ) {
+ if (scb_p->scb_status == (SCB_SW_CX|SCB_SW_CNA))
+ break;
+ }
+ if (!i)
+ printf("pc%d bldcu(): not ready after reset.\n", unit);
+ pc586ack(unit);
+
+ cb_p->ac_status = 0;
+ cb_p->ac_command = AC_CW_EL;
+ cb_p->ac_link_offset = OFFSET_CU;
+
+ tbd_p->act_count = 0;
+ tbd_p->next_tbd_offset = PC586NULL;
+ tbd_p->buffer_addr = 0;
+ tbd_p->buffer_base = 0;
+ return;
+}
+
+/*
+ * pc586bldru:
+ *
+ * This function builds the linear linked lists of fd's and
+ * rbd's. Based on page 4-32 of 1986 Intel microcom handbook.
+ *
+ */
+char *
+pc586bldru(unit)
+int unit;
+{
+ fd_t *fd_p = (fd_t *)(pc_softc[unit].sram + OFFSET_RU);
+ ru_t *rbd_p = (ru_t *)(pc_softc[unit].sram + OFFSET_RBD);
+ int i;
+
+ pc_softc[unit].begin_fd = fd_p;
+ for(i = 0; i < N_FD; i++, fd_p++) {
+ fd_p->status = 0;
+ fd_p->command = 0;
+ fd_p->link_offset = ptr_to_ram((char *)(fd_p + 1), unit);
+ fd_p->rbd_offset = PC586NULL;
+ }
+ pc_softc[unit].end_fd = --fd_p;
+ fd_p->link_offset = PC586NULL;
+ fd_p->command = AC_CW_EL;
+ fd_p = (fd_t *)(pc_softc[unit].sram + OFFSET_RU);
+
+ fd_p->rbd_offset = ptr_to_ram((char *)rbd_p, unit);
+ for(i = 0; i < N_RBD; i++, rbd_p = (ru_t *) &(rbd_p->rbuffer[RCVBUFSIZE])) {
+ rbd_p->r.status = 0;
+ rbd_p->r.buffer_addr = ptr_to_ram((char *)(rbd_p->rbuffer),
+ unit);
+ rbd_p->r.buffer_base = 0;
+ rbd_p->r.size = RCVBUFSIZE;
+ if (i != N_RBD-1) {
+ rbd_p->r.next_rbd_offset=ptr_to_ram(&(rbd_p->rbuffer[RCVBUFSIZE]),
+ unit);
+ } else {
+ rbd_p->r.next_rbd_offset = PC586NULL;
+ rbd_p->r.size |= AC_CW_EL;
+ pc_softc[unit].end_rbd = (rbd_t *)rbd_p;
+ }
+ }
+ return (char *)pc_softc[unit].begin_fd;
+}
+
+/*
+ * pc586rustrt:
+ *
+ * This routine starts the receive unit running. First checks if the
+ * board is actually ready, then the board is instructed to receive
+ * packets again.
+ *
+ */
+pc586rustrt(unit)
+int unit;
+{
+ volatile scb_t *scb_p = (volatile scb_t *)(pc_softc[unit].sram + OFFSET_SCB);
+ char *strt;
+
+ if ((scb_p->scb_status & SCB_RUS_READY) == SCB_RUS_READY)
+ return;
+
+ strt = pc586bldru(unit);
+ scb_p->scb_command = SCB_RU_STRT;
+ scb_p->scb_rfa_offset = ptr_to_ram(strt, unit);
+ pc586chatt(unit);
+ return;
+}
+
+/*
+ * pc586diag:
+ *
+ * This routine does a 586 op-code number 7, and obtains the
+ * diagnose status for the pc586.
+ *
+ */
+pc586diag(unit)
+int unit;
+{
+ volatile scb_t *scb_p = (volatile scb_t *)(pc_softc[unit].sram + OFFSET_SCB);
+ volatile ac_t *cb_p = (volatile ac_t *)(pc_softc[unit].sram + OFFSET_CU);
+ int i;
+
+ if (scb_p->scb_status & SCB_SW_INT) {
+ printf("pc%d diag(): bad initial state %\n",
+ unit, scb_p->scb_status);
+ pc586ack(unit);
+ }
+ cb_p->ac_status = 0;
+ cb_p->ac_command = (AC_DIAGNOSE|AC_CW_EL);
+ scb_p->scb_command = SCB_CU_STRT;
+ pc586chatt(unit);
+
+ for(i = 0; i < 0xffff; i++)
+ if ((cb_p->ac_status & AC_SW_C))
+ break;
+ if (i == 0xffff || !(cb_p->ac_status & AC_SW_OK)) {
+ printf("pc%d: diag failed; status = %x\n",
+ unit, cb_p->ac_status);
+ return(FALSE);
+ }
+
+ if ( (scb_p->scb_status & SCB_SW_INT) && (scb_p->scb_status != SCB_SW_CNA) ) {
+ printf("pc%d diag(): bad final state %x\n",
+ unit, scb_p->scb_status);
+ pc586ack(unit);
+ }
+ return(TRUE);
+}
+
+/*
+ * pc586config:
+ *
+ * This routine does a standard config of the pc586 board.
+ *
+ */
+pc586config(unit)
+int unit;
+{
+ volatile scb_t *scb_p = (volatile scb_t *)(pc_softc[unit].sram + OFFSET_SCB);
+ volatile ac_t *cb_p = (volatile ac_t *)(pc_softc[unit].sram + OFFSET_CU);
+ int i;
+
+
+/*
+ if ((scb_p->scb_status != SCB_SW_CNA) && (scb_p->scb_status & SCB_SW_INT) ) {
+ printf("pc%d config(): unexpected initial state %x\n",
+ unit, scb_p->scb_status);
+ }
+*/
+ pc586ack(unit);
+
+ cb_p->ac_status = 0;
+ cb_p->ac_command = (AC_CONFIGURE|AC_CW_EL);
+
+ /*
+ * below is the default board configuration from p2-28 from 586 book
+ */
+ cb_p->cmd.configure.fifolim_bytecnt = 0x080c;
+ cb_p->cmd.configure.addrlen_mode = 0x2600;
+ cb_p->cmd.configure.linprio_interframe = 0x6000;
+ cb_p->cmd.configure.slot_time = 0xf200;
+ cb_p->cmd.configure.hardware = 0x0000;
+ cb_p->cmd.configure.min_frame_len = 0x0040;
+
+ scb_p->scb_command = SCB_CU_STRT;
+ pc586chatt(unit);
+
+ for(i = 0; i < 0xffff; i++)
+ if ((cb_p->ac_status & AC_SW_C))
+ break;
+ if (i == 0xffff || !(cb_p->ac_status & AC_SW_OK)) {
+ printf("pc%d: config-configure failed; status = %x\n",
+ unit, cb_p->ac_status);
+ return(FALSE);
+ }
+/*
+ if (scb_p->scb_status & SCB_SW_INT) {
+ printf("pc%d configure(): bad configure state %x\n",
+ unit, scb_p->scb_status);
+ pc586ack(unit);
+ }
+*/
+ cb_p->ac_status = 0;
+ cb_p->ac_command = (AC_IASETUP|AC_CW_EL);
+
+ bcopy16(pc_softc[unit].ds_addr, cb_p->cmd.iasetup, ETHER_ADD_SIZE);
+
+ scb_p->scb_command = SCB_CU_STRT;
+ pc586chatt(unit);
+
+ for (i = 0; i < 0xffff; i++)
+ if ((cb_p->ac_status & AC_SW_C))
+ break;
+ if (i == 0xffff || !(cb_p->ac_status & AC_SW_OK)) {
+ printf("pc%d: config-address failed; status = %x\n",
+ unit, cb_p->ac_status);
+ return(FALSE);
+ }
+/*
+ if ((scb_p->scb_status & SCB_SW_INT) != SCB_SW_CNA) {
+ printf("pc%d configure(): unexpected final state %x\n",
+ unit, scb_p->scb_status);
+ }
+*/
+ pc586ack(unit);
+
+ return(TRUE);
+}
+
+/*
+ * pc586ack:
+ */
+pc586ack(unit)
+{
+ volatile scb_t *scb_p = (volatile scb_t *)(pc_softc[unit].sram + OFFSET_SCB);
+ int i;
+
+ if (!(scb_p->scb_command = scb_p->scb_status & SCB_SW_INT))
+ return;
+ CMD(CHANATT, 0x0001, unit);
+ for (i = 1000000; scb_p->scb_command && (i-- > 0); );
+ if (!i)
+ printf("pc%d pc586ack(): board not accepting command.\n", unit);
+}
+
+char *
+ram_to_ptr(offset, unit)
+int unit;
+u_short offset;
+{
+ if (offset == PC586NULL)
+ return(NULL);
+ if (offset > 0x3fff) {
+ printf("ram_to_ptr(%x, %d)\n", offset, unit);
+ panic("range");
+ return(NULL);
+ }
+ return(pc_softc[unit].sram + offset);
+}
+
+#ifndef REQUEUE_DBG
+bad_rbd_chain(offset, unit)
+{
+ rbd_t *rbdp;
+ char *sram = pc_softc[unit].sram;
+
+ for (;;) {
+ if (offset == PC586NULL)
+ return 0;
+ if (offset > 0x3fff) {
+ printf("pc%d: bad_rbd_chain offset = %x\n",
+ unit, offset);
+ pc586_cntrs[unit].rcv.bad_chain++;
+ return 1;
+ }
+
+ rbdp = (rbd_t *)(sram + offset);
+ offset = rbdp->next_rbd_offset;
+ }
+}
+#endif REQUEUE_DBG
+
+u_short
+ptr_to_ram(k_va, unit)
+char *k_va;
+int unit;
+{
+ return((u_short)(k_va - pc_softc[unit].sram));
+}
+
+pc586scb(unit)
+{
+ volatile scb_t *scb = (volatile scb_t *)(pc_softc[unit].sram + OFFSET_SCB);
+ volatile u_short*cmd = (volatile u_short *)(pc_softc[unit].prom + OFFSET_NORMMODE);
+ u_short i;
+
+ i = scb->scb_status;
+ printf("stat: stat %x, cus %x, rus %x //",
+ (i&0xf000)>>12, (i&0x0700)>>8, (i&0x0070)>>4);
+ i = scb->scb_command;
+ printf(" cmd: ack %x, cuc %x, ruc %x\n",
+ (i&0xf000)>>12, (i&0x0700)>>8, (i&0x0070)>>4);
+
+ printf("crc %d[%d], align %d[%d], rsc %d[%d], ovr %d[%d]\n",
+ scb->scb_crcerrs, pc586_cntrs[unit].rcv.crc,
+ scb->scb_alnerrs, pc586_cntrs[unit].rcv.frame,
+ scb->scb_rscerrs, pc586_cntrs[unit].rcv.rscerrs,
+ scb->scb_ovrnerrs, pc586_cntrs[unit].rcv.ovrnerrs);
+
+ printf("cbl %x, rfa %x //", scb->scb_cbl_offset, scb->scb_rfa_offset);
+ printf(" norm %x, ena %x, xfer %x //",
+ cmd[0] & 1, cmd[3] & 1, cmd[4] & 1);
+ printf(" atn %x, reset %x, type %x, stat %x\n",
+ cmd[1] & 1, cmd[2] & 1, cmd[5] & 1, cmd[6] & 1);
+}
+
+pc586tbd(unit)
+{
+ pc_softc_t *is = &pc_softc[unit];
+ tbd_t *tbd_p = (tbd_t *)(is->sram + OFFSET_TBD);
+ int i = 0;
+ int sum = 0;
+
+ do {
+ sum += (tbd_p->act_count & ~TBD_SW_EOF);
+ printf("%d: addr %x, count %d (%d), next %x, base %x\n",
+ i++, tbd_p->buffer_addr,
+ (tbd_p->act_count & ~TBD_SW_EOF), sum,
+ tbd_p->next_tbd_offset,
+ tbd_p->buffer_base);
+ if (tbd_p->act_count & TBD_SW_EOF)
+ break;
+ tbd_p = (tbd_t *)(is->sram + tbd_p->next_tbd_offset);
+ } while (1);
+}
+
+#ifndef MACH_KERNEL
+pc586hdwsleaze(countp, mb_pp, tm_pp, unit)
+struct mbuf **tm_pp;
+u_char **mb_pp;
+u_short *countp;
+{
+ struct mbuf *tm_p = *tm_pp;
+ u_char *mb_p = *mb_pp;
+ u_short count = 0;
+ u_char *cp;
+ int len;
+
+ pc586_cntrs[unit].xmt.sleaze++;
+ /*
+ * can we get a run that will be coallesced or
+ * that terminates before breaking
+ */
+ do {
+ count += tm_p->m_len;
+ if (tm_p->m_len & 1)
+ break;
+ } while ((tm_p = tm_p->m_next) != (struct mbuf *)0);
+ if ( (tm_p == (struct mbuf *)0) ||
+ count > HDW_THRESHOLD) {
+ *countp = (*tm_pp)->m_len;
+ *mb_pp = mtod((*tm_pp), u_char *);
+ printf("\n");
+ return;
+ }
+
+ /* we need to copy */
+ pc586_cntrs[unit].xmt.intrinsic++;
+ tm_p = *tm_pp;
+ mb_p = *mb_pp;
+ count = 0;
+ cp = (u_char *) t_packet;
+ do {
+ bcopy(mtod(tm_p, u_char *), cp, len = tm_p->m_len);
+ count += len;
+ if (count > HDW_THRESHOLD)
+ break;
+ cp += len;
+ if (tm_p->m_next == (struct mbuf *)0)
+ break;
+ tm_p = tm_p->m_next;
+ } while (1);
+ pc586_cntrs[unit].xmt.intrinsic_count += count;
+ *countp = count;
+ *mb_pp = (u_char *) t_packet;
+ *tm_pp = tm_p;
+ return;
+}
+
+pc586sftwsleaze(countp, mb_pp, tm_pp, unit)
+struct mbuf **tm_pp;
+u_char **mb_pp;
+u_short *countp;
+{
+ struct mbuf *tm_p = *tm_pp;
+ u_char *mb_p = *mb_pp;
+ u_short count = 0;
+ u_char *cp = (u_char *) t_packet;
+ int len;
+
+ pc586_cntrs[unit].xmt.chain++;
+ /* we need to copy */
+ do {
+ bcopy(mtod(tm_p, u_char *), cp, len = tm_p->m_len);
+ count += len;
+ cp += len;
+ if (tm_p->m_next == (struct mbuf *)0)
+ break;
+ tm_p = tm_p->m_next;
+ } while (1);
+
+ *countp = count;
+ *mb_pp = (u_char *) t_packet;
+ *tm_pp = tm_p;
+ return;
+}
+#endif MACH_KERNEL
diff --git a/i386/i386at/if_pc586.h b/i386/i386at/if_pc586.h
new file mode 100644
index 00000000..59614067
--- /dev/null
+++ b/i386/i386at/if_pc586.h
@@ -0,0 +1,139 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Olivetti PC586 Mach Ethernet driver v1.0
+ * Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989
+ * All rights reserved.
+ *
+ */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#include <i386at/i82586.h> /* chip/board specific defines */
+
+#define STATUS_TRIES 15000
+#define ETHER_ADD_SIZE 6 /* size of a MAC address */
+#define ETHER_PCK_SIZE 1500 /* maximum size of an ethernet packet */
+
+/*
+ * Board Specific Defines:
+ */
+
+#define OFFSET_NORMMODE 0x3000
+#define OFFSET_CHANATT 0x3002
+#define OFFSET_RESET 0x3004
+#define OFFSET_INTENAB 0x3006
+#define OFFSET_XFERMODE 0x3008
+#define OFFSET_SYSTYPE 0x300a
+#define OFFSET_INTSTAT 0x300c
+#define OFFSET_PROM 0x2000
+
+#define EXTENDED_ADDR 0x20000
+#define OFFSET_SCP (0x7ff6 - 0x4000)
+#define OFFSET_ISCP (0x7fee - 0x4000)
+#define OFFSET_SCB (0x7fde - 0x4000)
+#define OFFSET_RU (0x4000 - 0x4000)
+#define OFFSET_RBD (0x4228 - 0x4000)
+#define OFFSET_CU (0x7814 - 0x4000)
+
+#define OFFSET_TBD (0x7914 - 0x4000)
+#define OFFSET_TBUF (0x79a4 - 0x4000)
+#define N_FD 25
+#define N_RBD 25
+#define N_TBD 18
+#define RCVBUFSIZE 540
+#define DL_DEAD 0xffff
+
+#define CMD_0 0
+#define CMD_1 0xffff
+
+#define PC586NULL 0xffff /* pc586 NULL for lists */
+
+#define DSF_LOCK 1
+#define DSF_RUNNING 2
+
+#define MOD_ENAL 1
+#define MOD_PROM 2
+
+/*
+ * Driver (not board) specific defines and structures:
+ */
+
+typedef struct {
+ rbd_t r;
+ char rbd_pad[2];
+ char rbuffer[RCVBUFSIZE];
+} ru_t;
+
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+#ifndef TRUE
+#define TRUE 1
+#endif TRUE
+#define HZ 100
+#endif MACH_KERNEL
diff --git a/i386/i386at/if_wd8003.h b/i386/i386at/if_wd8003.h
new file mode 100644
index 00000000..3fa35ddc
--- /dev/null
+++ b/i386/i386at/if_wd8003.h
@@ -0,0 +1,315 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Western Digital Mach Ethernet driver
+ * Copyright (c) 1990 OSF Research Institute
+ */
+/*
+ Copyright 1990 by Open Software Foundation,
+Cambridge, MA.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby granted,
+provided that the above copyright notice appears in all copies and
+that both the copyright notice and this permission notice appear in
+supporting documentation, and that the name of OSF or Open Software
+Foundation not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+ OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/***********************************************************/
+/* Defines for the 583 chip. */
+/***********************************************************/
+
+/*--- 8390 Registers ---*/
+#define OFF_8390 0x10 /* offset of the 8390 chip */
+
+/* Register offsets */
+
+#define IFWD_REG_0 0x00
+#define IFWD_REG_1 0x01
+#define IFWD_REG_2 0x02
+#define IFWD_REG_3 0x03
+#define IFWD_REG_4 0x04
+#define IFWD_REG_5 0x05
+#define IFWD_REG_6 0x06
+#define IFWD_REG_7 0x07
+
+/* Register offset definitions for all boards */
+
+#define IFWD_LAR_0 0x08
+#define IFWD_LAR_1 0x09
+#define IFWD_LAR_2 0x0a
+#define IFWD_LAR_3 0x0b
+#define IFWD_LAR_4 0x0c
+#define IFWD_LAR_5 0x0d
+#define IFWD_BOARD_ID 0x0e
+#define IFWD_CHKSUM 0x0f
+
+/* revision number mask for BOARD_ID */
+#define IFWD_BOARD_REV_MASK 0x1e
+
+/* REG 1 */
+#define IFWD_MEMSIZE 0x08
+#define IFWD_16BIT 0x01
+
+/* REG 5 */
+#define IFWD_REG5_MEM_MASK 0x3f /* B23-B19 of address of the memory */
+#define IFWD_LA19 0x01 /* B19 of address of the memory */
+#define IFWD_MEM16ENB 0x80 /* Enable 16 bit memory access from bus */
+#define IFWD_LAN16ENB 0x40 /* Enable 16 bit memory access from chip*/
+#define IFWD_INIT_LAAR IFWD_LA19
+#define IFWD_SOFTINT 0x20 /* Enable interrupt from pc */
+
+/* Defs for board rev numbers > 1 */
+#define IFWD_MEDIA_TYPE 0x01
+#define IFWD_SOFT_CONFIG 0x20
+#define IFWD_RAM_SIZE 0x40
+#define IFWD_BUS_TYPE 0x80
+
+/* Register offsets for reading the EEPROM in the 584 chip */
+#define IFWD_EEPROM_0 0x08
+#define IFWD_EEPROM_1 0x09
+#define IFWD_EEPROM_2 0x0A
+#define IFWD_EEPROM_3 0x0B
+#define IFWD_EEPROM_4 0x0C
+#define IFWD_EEPROM_5 0x0D
+#define IFWD_EEPROM_6 0x0E
+#define IFWD_EEPROM_7 0x0F
+
+/**** defs for manipulating the 584 ****/
+#define IFWD_OTHER_BIT 0x02
+#define IFWD_ICR_MASK 0x0C
+#define IFWD_EAR_MASK 0x0F
+#define IFWD_ENGR_PAGE 0xA0
+/* #define IFWD_RLA 0x10 defined in ICR defs */
+#define IFWD_EA6 0x80
+#define IFWD_RECALL_DONE_MASK 0x10
+#define IFWD_EEPROM_MEDIA_MASK 0x07
+#define IFWD_STARLAN_TYPE 0x00
+#define IFWD_ETHERNET_TYPE 0x01
+#define IFWD_TP_TYPE 0x02
+#define IFWD_EW_TYPE 0x03
+#define IFWD_EEPROM_IRQ_MASK 0x18
+#define IFWD_PRIMARY_IRQ 0x00
+#define IFWD_ALTERNATE_IRQ_1 0x08
+#define IFWD_ALTERNATE_IRQ_2 0x10
+#define IFWD_ALTERNATE_IRQ_3 0x18
+#define IFWD_EEPROM_RAM_SIZE_MASK 0xE0
+#define IFWD_EEPROM_RAM_SIZE_RES1 0x00
+#define IFWD_EEPROM_RAM_SIZE_RES2 0x20
+#define IFWD_EEPROM_RAM_SIZE_8K 0x40
+#define IFWD_EEPROM_RAM_SIZE_16K 0x60
+#define IFWD_EEPROM_RAM_SIZE_32K 0x80
+#define IFWD_EEPROM_RAM_SIZE_64K 0xA0
+#define IFWD_EEPROM_RAM_SIZE_RES3 0xC0
+#define IFWD_EEPROM_RAM_SIZE_RES4 0xE0
+#define IFWD_EEPROM_BUS_TYPE_MASK 0x07
+#define IFWD_EEPROM_BUS_TYPE_AT 0x00
+#define IFWD_EEPROM_BUS_TYPE_MCA 0x01
+#define IFWD_EEPROM_BUS_TYPE_EISA 0x02
+#define IFWD_EEPROM_BUS_SIZE_MASK 0x18
+#define IFWD_EEPROM_BUS_SIZE_8BIT 0x00
+#define IFWD_EEPROM_BUS_SIZE_16BIT 0x08
+#define IFWD_EEPROM_BUS_SIZE_32BIT 0x10
+#define IFWD_EEPROM_BUS_SIZE_64BIT 0x18
+
+/*****************************************************************************
+ * *
+ * Definitions for board ID. *
+ * *
+ * note: board ID should be ANDed with the STATIC_ID_MASK *
+ * before comparing to a specific board ID *
+ * The high order 16 bits correspond to the Extra Bits which do not *
+ * change the boards ID. *
+ * *
+ * Note: not all are implemented. Rest are here for future enhancements...*
+ * *
+ *****************************************************************************/
+
+#define IFWD_STARLAN_MEDIA 0x00000001 /* StarLAN */
+#define IFWD_ETHERNET_MEDIA 0x00000002 /* Ethernet */
+#define IFWD_TWISTED_PAIR_MEDIA 0x00000003 /* Twisted Pair */
+#define IFWD_EW_MEDIA 0x00000004 /* Ethernet and Twisted Pair */
+#define IFWD_MICROCHANNEL 0x00000008 /* MicroChannel Adapter */
+#define IFWD_INTERFACE_CHIP 0x00000010 /* Soft Config Adapter */
+/* #define IFWD_UNUSED 0x00000020 */ /* used to be INTELLIGENT */
+#define IFWD_BOARD_16BIT 0x00000040 /* 16 bit capability */
+#define IFWD_RAM_SIZE_UNKNOWN 0x00000000 /* 000 => Unknown RAM Size */
+#define IFWD_RAM_SIZE_RES_1 0x00010000 /* 001 => Reserved */
+#define IFWD_RAM_SIZE_8K 0x00020000 /* 010 => 8k RAM */
+#define IFWD_RAM_SIZE_16K 0x00030000 /* 011 => 16k RAM */
+#define IFWD_RAM_SIZE_32K 0x00040000 /* 100 => 32k RAM */
+#define IFWD_RAM_SIZE_64K 0x00050000 /* 101 => 64k RAM */
+#define IFWD_RAM_SIZE_RES_6 0x00060000 /* 110 => Reserved */
+#define IFWD_RAM_SIZE_RES_7 0x00070000 /* 111 => Reserved */
+#define IFWD_SLOT_16BIT 0x00080000 /* 16 bit board - 16 bit slot*/
+#define IFWD_NIC_690_BIT 0x00100000 /* NIC is 690 */
+#define IFWD_ALTERNATE_IRQ_BIT 0x00200000 /* Alternate IRQ is used */
+#define IFWD_INTERFACE_584_CHIP 0x00400000 /* Interface chip is a 584 */
+
+#define IFWD_MEDIA_MASK 0x00000007 /* Isolates Media Type */
+#define IFWD_RAM_SIZE_MASK 0x00070000 /* Isolates RAM Size */
+#define IFWD_STATIC_ID_MASK 0x0000FFFF /* Isolates Board ID */
+
+/* Word definitions for board types */
+#define WD8003E IFWD_ETHERNET_MEDIA
+#define WD8003EBT WD8003E /* functionally identical to WD8003E */
+#define WD8003S IFWD_STARLAN_MEDIA
+#define WD8003SH WD8003S /* functionally identical to WD8003S */
+#define WD8003WT IFWD_TWISTED_PAIR_MEDIA
+#define WD8003W (IFWD_TWISTED_PAIR_MEDIA | IFWD_INTERFACE_CHIP)
+#define WD8003EB (IFWD_ETHERNET_MEDIA | IFWD_INTERFACE_CHIP)
+#define WD8003EP WD8003EB /* with IFWD_INTERFACE_584_CHIP bit set */a
+#define WD8003EW (IFWD_EW_MEDIA | IFWD_INTERFACE_CHIP)
+#define WD8003ETA (IFWD_ETHERNET_MEDIA | IFWD_MICROCHANNEL)
+#define WD8003STA (IFWD_STARLAN_MEDIA | IFWD_MICROCHANNEL)
+#define WD8003EA (IFWD_ETHERNET_MEDIA | IFWD_MICROCHANNEL | \
+ IFWD_INTERFACE_CHIP)
+#define WD8003SHA (IFWD_STARLAN_MEDIA | IFWD_MICROCHANNEL | \
+ IFWD_INTERFACE_CHIP)
+#define WD8003WA (IFWD_TWISTED_PAIR_MEDIA | IFWD_MICROCHANNEL | \
+ IFWD_INTERFACE_CHIP)
+#define WD8013EBT (IFWD_ETHERNET_MEDIA | IFWD_BOARD_16BIT)
+#define WD8013EB (IFWD_ETHERNET_MEDIA | IFWD_BOARD_16BIT | \
+ IFWD_INTERFACE_CHIP)
+#define WD8013EP WD8013EB /* with IFWD_INTERFACE_584_CHIP bit set */
+#define WD8013W (IFWD_TWISTED_PAIR_MEDIA | IFWD_BOARD_16BIT | \
+ IFWD_INTERFACE_CHIP)
+#define WD8013EW (IFWD_EW_MEDIA | IFWD_BOARD_16BIT | \
+ IFWD_INTERFACE_CHIP)
+
+
+/**** Western digital node bytes ****/
+#define WD_NODE_ADDR_0 0x00
+#define WD_NODE_ADDR_1 0x00
+#define WD_NODE_ADDR_2 0xC0
+
+/*--- 83c583 registers ---*/
+#define IFWD_MSR 0x00 /* memory select register */
+ /* In 584 Board's command register */
+#define IFWD_ICR 0x01 /* interface configuration register */
+ /* In 584 8013 bus size register */
+#define IFWD_IAR 0x02 /* io address register */
+#define IFWD_BIO 0x03 /* bios ROM address register */
+#define IFWD_IRR 0x04 /* interrupt request register */
+#define IFWD_GP1 0x05 /* general purpose register 1 */
+#define IFWD_IOD 0x06 /* io data latch */
+#define IFWD_GP2 0x07 /* general purpose register 2 */
+#define IFWD_LAR 0x08 /* LAN address register */
+#define IFWD_LAR2 0x09 /* */
+#define IFWD_LAR3 0x0A /* */
+#define IFWD_LAR4 0x0B /* */
+#define IFWD_LAR5 0x0C /* */
+#define IFWD_LAR6 0x0D /* */
+#define IFWD_LAR7 0x0E /* */
+#define IFWD_LAR8 0x0F /* LAN address register */
+
+/********************* Register Bit Definitions **************************/
+/* MSR definitions */
+#define IFWD_RST 0x80 /* 1 => reset */
+#define IFWD_MENB 0x40 /* 1 => memory enable */
+#define IFWD_SA18 0x20 /* Memory enable bits */
+#define IFWD_SA17 0x10 /* telling where shared */
+#define IFWD_SA16 0x08 /* mem is to start. */
+#define IFWD_SA15 0x04 /* Assume SA19 = 1 */
+#define IFWD_SA14 0x02 /* */
+#define IFWD_SA13 0x01 /* */
+
+/* ICR definitions */
+#define IFWD_STR 0x80 /* Non-volatile EEPROM store */
+#define IFWD_RCL 0x40 /* Recall I/O Address from EEPROM */
+#define IFWD_RX7 0x20 /* Recall all but I/O and LAN address*/
+#define IFWD_RLA 0x10 /* Recall LAN Address */
+#define IFWD_MSZ 0x08 /* Shared Memory Size */
+#define IFWD_DMAE 0x04 /* DMA Enable */
+#define IFWD_IOPE 0x02 /* I/O Port Enable */
+#define IFWD_WTS 0x01 /* Word Transfer Select */
+
+/* IAR definitions */
+#define IFWD_IA15 0x80 /* I/O Address Bits */
+/* . */
+/* . */
+/* . */
+#define IFWD_IA5 0x01 /* */
+
+/* BIO definitions */
+#define IFWD_RS1 0x80 /* BIOS size bit 1 */
+#define IFWD_RS0 0x40 /* BIOS size bit 0 */
+#define IFWD_BA18 0x20 /* BIOS ROM Memory Address Bits */
+#define IFWD_BA17 0x10 /* */
+#define IFWD_BA16 0x08 /* */
+#define IFWD_BA15 0x04 /* */
+#define IFWD_BA14 0x02 /* BIOS ROM Memory Address Bits */
+#define IFWD_WINT 0x01 /* W8003 interrupt */
+
+/* IRR definitions */
+#define IFWD_IEN 0x80 /* Interrupt Enable */
+#define IFWD_IR1 0x40 /* Interrupt request bit 1 */
+#define IFWD_IR0 0x20 /* Interrupt request bit 0 */
+#define IFWD_AMD 0x10 /* Alternate mode */
+#define IFWD_AINT 0x08 /* Alternate interrupt */
+#define IFWD_BW1 0x04 /* BIOS Wait State Control bit 1 */
+#define IFWD_BW0 0x02 /* BIOS Wait State Control bit 0 */
+#define IFWD_OWS 0x01 /* Zero Wait State Enable */
+
+/* GP1 definitions */
+
+/* IOD definitions */
+
+/* GP2 definitions */
+
+/*************************************************************/
+/* Shared RAM buffer definitions */
+/*************************************************************/
+
+/**** NIC definitions ****/
+#define NIC_8003_SRAM_SIZE 0x2000 /* size of shared RAM buffer */
+#define NIC_HEADER_SIZE 4 /* size of receive header */
+#define NIC_PAGE_SIZE 0x100 /* each page of rcv ring is 256 byte */
+
+#define ETHER_ADDR_SIZE 6 /* size of a MAC address */
+
+#ifdef MACH
+#define HZ 100
+#endif
+
+#define DSF_LOCK 1
+#define DSF_RUNNING 2
+
+#define MOD_ENAL 1
+#define MOD_PROM 2
diff --git a/i386/i386at/immc.c b/i386/i386at/immc.c
new file mode 100644
index 00000000..d6756e31
--- /dev/null
+++ b/i386/i386at/immc.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#ifdef ENABLE_IMMEDIATE_CONSOLE
+
+/* This is a special "feature" (read: kludge)
+ intended for use only for kernel debugging.
+ It enables an extremely simple console output mechanism
+ that sends text straight to CGA/EGA/VGA video memory.
+ It has the nice property of being functional right from the start,
+ so it can be used to debug things that happen very early
+ before any devices are initialized. */
+
+int immediate_console_enable = 1;
+
+void
+immc_cnputc(unsigned char c)
+{
+ static int ofs = -1;
+
+ if (!immediate_console_enable)
+ return;
+ if (ofs < 0)
+ {
+ ofs = 0;
+ immc_cnputc('\n');
+ }
+ else if (c == '\n')
+ {
+ bcopy(0xb8000+80*2, 0xb8000, 80*2*24);
+ bzero(0xb8000+80*2*24, 80*2);
+ ofs = 0;
+ }
+ else
+ {
+ volatile unsigned char *p;
+
+ if (ofs >= 80)
+ {
+ immc_cnputc('\r');
+ immc_cnputc('\n');
+ }
+
+ p = (void*)0xb8000 + 80*2*24 + ofs*2;
+ p[0] = c;
+ p[1] = 0x0f;
+ ofs++;
+ }
+}
+
+int immc_cnmaygetc(void)
+{
+ return -1;
+}
+
+#endif ENABLE_IMMEDIATE_CONSOLE
+
diff --git a/i386/i386at/int_init.c b/i386/i386at/int_init.c
new file mode 100644
index 00000000..819201f3
--- /dev/null
+++ b/i386/i386at/int_init.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include "idt.h"
+#include "gdt.h"
+
+/* defined in locore.S */
+extern vm_offset_t int_entry_table[];
+
+void int_init()
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ fill_idt_gate(PIC_INT_BASE + i,
+ int_entry_table[i], KERNEL_CS,
+ ACC_PL_K|ACC_INTR_GATE, 0);
+}
+
diff --git a/i386/i386at/interrupt.S b/i386/i386at/interrupt.S
new file mode 100644
index 00000000..25cd66e7
--- /dev/null
+++ b/i386/i386at/interrupt.S
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 1995 Shantanu Goel
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE AUTHOR ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE AUTHOR DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+#include <mach/machine/asm.h>
+
+#include "ipl.h"
+#include "pic.h"
+#include "i386asm.h"
+
+#define READ_ISR (OCW_TEMPLATE|READ_NEXT_RD|READ_IS_ONRD)
+
+/*
+ * Generic interrupt handler.
+ *
+ * On entry, %eax contains the irq number.
+ */
+ENTRY(interrupt)
+ movl %eax,%ecx /* save irq number */
+ movb $(NON_SPEC_EOI),%al /* non-specific EOI */
+ outb %al,$(PIC_MASTER_ICW) /* ack interrupt to master */
+ cmpl $8,%ecx /* do we need to ack slave? */
+ jl 1f /* no, skip it */
+ outb %al,$(PIC_SLAVE_ICW)
+1:
+ shll $2,%ecx /* irq * 4 */
+ movl EXT(intpri)(%ecx),%edx /* get new ipl */
+ call spl /* set ipl */
+ movl EXT(iunit)(%ecx),%edx /* get device unit number */
+ pushl %eax /* push previous ipl */
+ pushl %edx /* push unit number */
+ call *EXT(ivect)(%ecx) /* call interrupt handler */
+ addl $4,%esp /* pop unit number */
+ call splx_cli /* restore previous ipl */
+ cli /* XXX no more nested interrupts */
+ addl $4,%esp /* pop previous ipl */
+ ret /* return */
diff --git a/i386/i386at/iopl.c b/i386/i386at/iopl.c
new file mode 100644
index 00000000..ae67568c
--- /dev/null
+++ b/i386/i386at/iopl.c
@@ -0,0 +1,287 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/vm_prot.h>
+#include <mach/machine/vm_types.h>
+#include <mach/machine/vm_param.h>
+#include <mach/machine/eflags.h>
+
+#include <ipc/ipc_port.h>
+
+#include <device/io_req.h>
+
+#include <i386/io_port.h>
+#include <i386/pit.h>
+
+/*
+ * IOPL device.
+ */
+ipc_port_t iopl_device_port = IP_NULL;
+mach_device_t iopl_device = 0;
+
+/*
+ * Ports that we allow access to.
+ */
+io_reg_t iopl_port_list[] = {
+ /* timer 2 */
+ 0x42,
+ /* speaker output */
+ 0x61,
+ /* ATI - savage */
+ 0x1ce, 0x1cf,
+ /* game port */
+ 0x201,
+ /* sound board */
+ 0x220, 0x221, 0x222, 0x223, 0x224, 0x225, 0x226, 0x227,
+ 0x228, 0x229, 0x22a, 0x22b, 0x22c, 0x22d, 0x22e, 0x22f,
+ /* printer */
+ 0x278, 0x279, 0x27a,
+ 0x378, 0x379, 0x37a,
+ /* ega/vga */
+ 0x3b0, 0x3b1, 0x3b2, 0x3b3, 0x3b4, 0x3b5, 0x3b6, 0x3b7,
+ 0x3b8, 0x3b9, 0x3ba, 0x3bb, 0x3bc, 0x3bd, 0x3be, 0x3bf,
+ 0x3c0, 0x3c1, 0x3c2, 0x3c3, 0x3c4, 0x3c5, 0x3c6, 0x3c7,
+ 0x3c8, 0x3c9, 0x3ca, 0x3cb, 0x3cc, 0x3cd, 0x3ce, 0x3cf,
+ 0x3d0, 0x3d1, 0x3d2, 0x3d3, 0x3d4, 0x3d5, 0x3d6, 0x3d7,
+ 0x3d8, 0x3d9, 0x3da, 0x3db, 0x3dc, 0x3dd, 0x3de, 0x3df,
+ /* end of list */
+ IO_REG_NULL,
+ /* patch space */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+int
+ioplopen(dev, flag, ior)
+ int dev;
+ int flag;
+ io_req_t ior;
+{
+ iopl_device_port = ior->io_device->port;
+ iopl_device = ior->io_device;
+
+ io_port_create(iopl_device, iopl_port_list);
+ return (0);
+}
+
+
+/*ARGSUSED*/
+ioplclose(dev, flags)
+ int dev;
+ int flags;
+{
+ io_port_destroy(iopl_device);
+ iopl_device_port = IP_NULL;
+ iopl_device = 0;
+ return 0;
+}
+
+/*ARGSUSED*/
+int iopl_all = 1;
+ioplmmap(dev, off, prot)
+int dev;
+vm_offset_t off;
+vm_prot_t prot;
+{
+ extern vm_offset_t phys_last_addr;
+
+ if (iopl_all) {
+ if (off == 0)
+ return 0;
+ else if (off < 0xa0000)
+ return -1;
+ else if (off >= 0x100000 && off <= phys_last_addr)
+ return -1;
+ else
+ return i386_btop(off);
+
+ }
+ if (off > 0x60000)
+ return(-1);
+
+ /* Get page frame number for the page to be mapped. */
+
+ return(i386_btop(0xa0000 + off));
+}
+
+/*
+ * For DOS compatibility, it's easier to list the ports we don't
+ * allow access to.
+ */
+#define IOPL_PORTS_USED_MAX 256
+io_reg_t iopl_ports_used[IOPL_PORTS_USED_MAX] = {
+ IO_REG_NULL
+};
+
+boolean_t
+iopl_port_forbidden(io_port)
+ int io_port;
+{
+ int i;
+
+#if 0 /* we only read from these... it should be OK */
+
+ if (io_port <= 0xff)
+ return TRUE; /* system ports. 42,61,70,71 allowed above */
+
+ if (io_port >= 0x130 && io_port <= 0x137)
+ return TRUE; /* AHA disk */
+
+ if (io_port >= 0x170 && io_port <= 0x177)
+ return TRUE; /* HD disk */
+
+ if (io_port >= 0x1f0 && io_port <= 0x1f7)
+ return TRUE; /* HD disk */
+
+ if (io_port >= 0x230 && io_port <= 0x237)
+ return TRUE; /* AHA disk */
+
+ if (io_port >= 0x280 && io_port <= 0x2df)
+ return TRUE; /* 8390 network */
+
+ if (io_port >= 0x300 && io_port <= 0x31f)
+ return TRUE; /* 8390 network */
+
+ if (io_port >= 0x330 && io_port <= 0x337)
+ return TRUE; /* AHA disk */
+
+ if (io_port >= 0x370 && io_port <= 0x377)
+ return TRUE; /* FD disk */
+
+ if (io_port >= 0x3f0 && io_port <= 0x3f7)
+ return TRUE; /* FD disk */
+
+#endif
+
+ /*
+ * Must be OK, as far as we know...
+ * Record the port in the list, for
+ * curiosity seekers.
+ */
+ for (i = 0; i < IOPL_PORTS_USED_MAX; i++) {
+ if (iopl_ports_used[i] == io_port)
+ break; /* in list */
+ if (iopl_ports_used[i] == IO_REG_NULL) {
+ iopl_ports_used[i] = io_port;
+ iopl_ports_used[i+1] = IO_REG_NULL;
+ break;
+ }
+ }
+
+ return FALSE;
+}
+
+/*
+ * Emulate certain IO instructions for the AT bus.
+ *
+ * We emulate writes to the timer control port, 43.
+ * Only writes to timer 2 are allowed.
+ *
+ * Temporarily, we allow reads of any IO port,
+ * but ONLY if the thread has the IOPL device mapped
+ * and is not in V86 mode.
+ *
+ * This is a HACK and MUST go away when the DOS emulator
+ * emulates these IO ports, or when we decide that
+ * the DOS world can get access to all uncommitted IO
+ * ports. In that case, config() should remove the IO
+ * ports for devices it exists from the allowable list.
+ */
+boolean_t
+iopl_emulate(regs, opcode, io_port)
+ struct i386_saved_state *regs;
+ int opcode;
+ int io_port;
+{
+ iopb_tss_t iopb;
+
+ iopb = current_thread()->pcb->ims.io_tss;
+ if (iopb == 0)
+ return FALSE; /* no IO mapped */
+
+ /*
+ * Handle outb to the timer control port,
+ * for timer 2 only.
+ */
+ if (io_port == PITCTL_PORT) {
+
+ int io_byte = regs->eax & 0xff;
+
+ if (((iopb->bitmap[PITCTR2_PORT >> 3] & (1 << (PITCTR2_PORT & 0x7)))
+ == 0) /* allowed */
+ && (opcode == 0xe6 || opcode == 0xee) /* outb */
+ && (io_byte & 0xc0) == 0x80) /* timer 2 */
+ {
+ outb(io_port, io_byte);
+ return TRUE;
+ }
+ return FALSE; /* invalid IO to port 42 */
+ }
+
+ /*
+ * If the thread has the IOPL device mapped, and
+ * the io port is not on the 'forbidden' list, allow
+ * reads from it. Reject writes.
+ *
+ * Don`t do this for V86 mode threads
+ * (hack for DOS emulator XXX!)
+ */
+ if (!(regs->efl & EFL_VM) &&
+ iopb_check_mapping(current_thread(), iopl_device) &&
+ !iopl_port_forbidden(io_port))
+ {
+ /*
+ * handle inb, inw, inl
+ */
+ switch (opcode) {
+ case 0xE4: /* inb imm */
+ case 0xEC: /* inb dx */
+ regs->eax = (regs->eax & 0xffffff00)
+ | inb(io_port);
+ return TRUE;
+
+ case 0x66E5: /* inw imm */
+ case 0x66ED: /* inw dx */
+ regs->eax = (regs->eax & 0xffff0000)
+ | inw(io_port);
+ return TRUE;
+
+ case 0xE5: /* inl imm */
+ case 0xED: /* inl dx */
+ regs->eax = inl(io_port);
+ return TRUE;
+
+ default:
+ return FALSE; /* OUT not allowed */
+ }
+ }
+
+ /*
+ * Not OK.
+ */
+ return FALSE;
+}
+
diff --git a/i386/i386at/kd.c b/i386/i386at/kd.c
new file mode 100644
index 00000000..6c41ec56
--- /dev/null
+++ b/i386/i386at/kd.c
@@ -0,0 +1,2990 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Olivetti Mach Console driver v0.0
+ * Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989
+ * All rights reserved.
+ *
+ */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/* $ Header: $ */
+
+#include <mach_kdb.h>
+
+#include <sys/types.h>
+#include <kern/time_out.h>
+#include <device/conf.h>
+#include <device/tty.h>
+#include <device/io_req.h>
+#include <device/buf.h> /* for struct uio (!) */
+#include <i386/io_port.h>
+#include <vm/vm_kern.h>
+#include "vm_param.h"
+#include <i386/machspl.h>
+#include <i386at/cram.h>
+#include <i386at/kd.h>
+#include <i386at/kdsoft.h>
+#include <cons.h>
+
+#include <blit.h>
+#if NBLIT > 0
+#include <i386at/blitvar.h>
+#else
+#define blit_present() FALSE
+#define blit_init() /* nothing */
+#endif
+
+#include <evc.h>
+#if NEVC > 0
+int evc1init();
+#else
+#define evc1init() FALSE
+#endif
+
+#define DEBUG 1 /* export feep() */
+
+#define DEFAULT -1 /* see kd_atoi */
+
+void kd_enqsc(); /* enqueues a scancode */
+
+void timeout();
+
+#define BROKEN_KEYBOARD_RESET
+
+
+struct tty kd_tty;
+extern int rebootflag;
+
+static void charput(), charmvup(), charmvdown(), charclear(), charsetcursor();
+static void kd_noopreset();
+boolean_t kdcheckmagic();
+
+int kdcnprobe(struct consdev *cp);
+int kdcninit(struct consdev *cp);
+int kdcngetc(dev_t dev, int wait);
+int kdcnputc(dev_t dev, int c);
+
+/*
+ * These routines define the interface to the device-specific layer.
+ * See kdsoft.h for a more complete description of what each routine does.
+ */
+void (*kd_dput)() = charput; /* put attributed char */
+void (*kd_dmvup)() = charmvup; /* block move up */
+void (*kd_dmvdown)() = charmvdown; /* block move down */
+void (*kd_dclear)() = charclear; /* block clear */
+void (*kd_dsetcursor)() = charsetcursor;
+ /* set cursor position on displayed page */
+void (*kd_dreset)() = kd_noopreset; /* prepare for reboot */
+
+/* forward declarations */
+unsigned char kd_getdata(), state2leds();
+
+
+/*
+ * Globals used for both character-based controllers and bitmap-based
+ * controllers. Default is EGA.
+ */
+
+vm_offset_t kd_bitmap_start = (vm_offset_t)0xa0000; /* XXX - put in kd.h */
+u_char *vid_start = (u_char *)EGA_START;
+ /* VM start of video RAM or frame buffer */
+csrpos_t kd_curpos = 0; /* set indirectly by kd_setpos--see kdsoft.h */
+short kd_lines = 25;
+short kd_cols = 80;
+char kd_attr = KA_NORMAL; /* current attribute */
+
+/*
+ * kd_state shows the state of the modifier keys (ctrl, caps lock,
+ * etc.) It should normally be changed by calling set_kd_state(), so
+ * that the keyboard status LEDs are updated correctly.
+ */
+int kd_state = KS_NORMAL;
+int kb_mode = KB_ASCII; /* event/ascii */
+
+/*
+ * State for the keyboard "mouse".
+ */
+int kd_kbd_mouse = 0;
+int kd_kbd_magic_scale = 6;
+int kd_kbd_magic_button = 0;
+
+/*
+ * Some keyboard commands work by sending a command, waiting for an
+ * ack (handled by kdintr), then sending data, which generates a
+ * second ack. If we are in the middle of such a sequence, kd_ack
+ * shows what the ack is for.
+ *
+ * When a byte is sent to the keyboard, it is kept around in last_sent
+ * in case it needs to be resent.
+ *
+ * The rest of the variables here hold the data required to complete
+ * the sequence.
+ *
+ * XXX - the System V driver keeps a command queue, I guess in case we
+ * want to start a command while another is in progress. Is this
+ * something we should worry about?
+ */
+enum why_ack {NOT_WAITING, SET_LEDS, DATA_ACK};
+enum why_ack kd_ack = NOT_WAITING;
+
+u_char last_sent = 0;
+
+u_char kd_nextled = 0;
+
+/*
+ * We don't provide any mutex protection for this flag because we know
+ * that this module will have been initialized by the time multiple
+ * threads are running.
+ */
+boolean_t kd_initialized = FALSE; /* driver initialized? */
+boolean_t kd_extended = FALSE;
+
+/* Array for processing escape sequences. */
+#define K_MAXESC 16
+u_char esc_seq[K_MAXESC];
+u_char *esc_spt = (u_char *)0;
+
+/*
+ * This array maps scancodes to Ascii characters (or character
+ * sequences).
+ * Each row corresponds to one key. There are NUMOUTPUT bytes per key
+ * state. The states are ordered: Normal, SHIFT, CTRL, ALT,
+ * SHIFT/ALT.
+ */
+unsigned char key_map[NUMKEYS][WIDTH_KMAP] = {
+{NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC},
+{K_ESC,NC,NC, K_ESC,NC,NC, K_ESC,NC,NC, K_ESC,NC,NC, K_ESC,NC,NC},
+{K_ONE,NC,NC, K_BANG,NC,NC, K_ONE,NC,NC, 0x1b,0x4e,0x31, 0x1b,0x4e,0x21},
+{K_TWO,NC,NC, K_ATSN,NC,NC, K_NUL,NC,NC, 0x1b,0x4e,0x32, 0x1b,0x4e,0x40},
+{K_THREE,NC,NC, K_POUND,NC,NC, K_THREE,NC,NC, 0x1b,0x4e,0x33, 0x1b,0x4e,0x23},
+{K_FOUR,NC,NC, K_DOLLAR,NC,NC, K_FOUR,NC,NC, 0x1b,0x4e,0x34, 0x1b,0x4e,0x24},
+{K_FIVE,NC,NC, K_PERC,NC,NC, K_FIVE,NC,NC, 0x1b,0x4e,0x35, 0x1b,0x4e,0x25},
+{K_SIX,NC,NC, K_CARET,NC,NC, K_RS,NC,NC, 0x1b,0x4e,0x36, 0x1b,0x4e,0x5e},
+{K_SEVEN,NC,NC, K_AMPER,NC,NC, K_SEVEN,NC,NC, 0x1b,0x4e,0x37, 0x1b,0x4e,0x26},
+{K_EIGHT,NC,NC, K_ASTER,NC,NC, K_EIGHT,NC,NC, 0x1b,0x4e,0x38, 0x1b,0x4e,0x2a},
+{K_NINE,NC,NC, K_LPAREN,NC,NC, K_NINE,NC,NC, 0x1b,0x4e,0x39,0x1b,0x4e,0x28},
+{K_ZERO,NC,NC, K_RPAREN,NC,NC, K_ZERO,NC,NC, 0x1b,0x4e,0x30,0x1b,0x4e,0x29},
+{K_MINUS,NC,NC, K_UNDSC,NC,NC, K_US,NC,NC, 0x1b,0x4e,0x2d, 0x1b,0x4e,0x5f},
+{K_EQL,NC,NC, K_PLUS,NC,NC, K_EQL,NC,NC, 0x1b,0x4e,0x3d, 0x1b,0x4e,0x2b},
+{K_BS,NC,NC, K_BS,NC,NC, K_BS,NC,NC, K_BS,NC,NC, K_BS,NC,NC},
+{K_HT,NC,NC, K_GS,NC,NC, K_HT,NC,NC, K_HT,NC,NC, K_GS,NC,NC},
+{K_q,NC,NC, K_Q,NC,NC, K_DC1,NC,NC, 0x1b,0x4e,0x71, 0x1b,0x4e,0x51},
+{K_w,NC,NC, K_W,NC,NC, K_ETB,NC,NC, 0x1b,0x4e,0x77, 0x1b,0x4e,0x57},
+{K_e,NC,NC, K_E,NC,NC, K_ENQ,NC,NC, 0x1b,0x4e,0x65, 0x1b,0x4e,0x45},
+{K_r,NC,NC, K_R,NC,NC, K_DC2,NC,NC, 0x1b,0x4e,0x72, 0x1b,0x4e,0x52},
+{K_t,NC,NC, K_T,NC,NC, K_DC4,NC,NC, 0x1b,0x4e,0x74, 0x1b,0x4e,0x54},
+{K_y,NC,NC, K_Y,NC,NC, K_EM,NC,NC, 0x1b,0x4e,0x79, 0x1b,0x4e,0x59},
+{K_u,NC,NC, K_U,NC,NC, K_NAK,NC,NC, 0x1b,0x4e,0x75, 0x1b,0x4e,0x55},
+{K_i,NC,NC, K_I,NC,NC, K_HT,NC,NC, 0x1b,0x4e,0x69, 0x1b,0x4e,0x49},
+{K_o,NC,NC, K_O,NC,NC, K_SI,NC,NC, 0x1b,0x4e,0x6f, 0x1b,0x4e,0x4f},
+{K_p,NC,NC, K_P,NC,NC, K_DLE,NC,NC, 0x1b,0x4e,0x70, 0x1b,0x4e,0x50},
+{K_LBRKT,NC,NC, K_LBRACE,NC,NC, K_ESC,NC,NC, 0x1b,0x4e,0x5b, 0x1b,0x4e,0x7b},
+{K_RBRKT,NC,NC, K_RBRACE,NC,NC, K_GS,NC,NC, 0x1b,0x4e,0x5d, 0x1b,0x4e,0x7d},
+{K_CR,NC,NC, K_CR,NC,NC, K_CR,NC,NC, K_CR,NC,NC, K_CR,NC,NC},
+{K_SCAN,K_CTLSC,NC, K_SCAN,K_CTLSC,NC, K_SCAN,K_CTLSC,NC, K_SCAN,K_CTLSC,NC,
+ K_SCAN,K_CTLSC,NC},
+{K_a,NC,NC, K_A,NC,NC, K_SOH,NC,NC, 0x1b,0x4e,0x61, 0x1b,0x4e,0x41},
+{K_s,NC,NC, K_S,NC,NC, K_DC3,NC,NC, 0x1b,0x4e,0x73, 0x1b,0x4e,0x53},
+{K_d,NC,NC, K_D,NC,NC, K_EOT,NC,NC, 0x1b,0x4e,0x65, 0x1b,0x4e,0x45},
+{K_f,NC,NC, K_F,NC,NC, K_ACK,NC,NC, 0x1b,0x4e,0x66, 0x1b,0x4e,0x46},
+{K_g,NC,NC, K_G,NC,NC, K_BEL,NC,NC, 0x1b,0x4e,0x67, 0x1b,0x4e,0x47},
+{K_h,NC,NC, K_H,NC,NC, K_BS,NC,NC, 0x1b,0x4e,0x68, 0x1b,0x4e,0x48},
+{K_j,NC,NC, K_J,NC,NC, K_LF,NC,NC, 0x1b,0x4e,0x6a, 0x1b,0x4e,0x4a},
+{K_k,NC,NC, K_K,NC,NC, K_VT,NC,NC, 0x1b,0x4e,0x6b, 0x1b,0x4e,0x4b},
+{K_l,NC,NC, K_L,NC,NC, K_FF,NC,NC, 0x1b,0x4e,0x6c, 0x1b,0x4e,0x4c},
+{K_SEMI,NC,NC, K_COLON,NC,NC, K_SEMI,NC,NC, 0x1b,0x4e,0x3b, 0x1b,0x4e,0x3a},
+{K_SQUOTE,NC,NC,K_DQUOTE,NC,NC,K_SQUOTE,NC,NC,0x1b,0x4e,0x27,0x1b,0x4e,0x22},
+{K_GRAV,NC,NC, K_TILDE,NC,NC, K_RS,NC,NC, 0x1b,0x4e,0x60, 0x1b,0x4e,0x7e},
+{K_SCAN,K_LSHSC,NC, K_SCAN,K_LSHSC,NC, K_SCAN,K_LSHSC,NC, K_SCAN,K_LSHSC,NC,
+ K_SCAN,K_LSHSC,NC},
+{K_BSLSH,NC,NC, K_PIPE,NC,NC, K_FS,NC,NC, 0x1b,0x4e,0x5c, 0x1b,0x4e,0x7c},
+{K_z,NC,NC, K_Z,NC,NC, K_SUB,NC,NC, 0x1b,0x4e,0x7a, 0x1b,0x4e,0x5a},
+{K_x,NC,NC, K_X,NC,NC, K_CAN,NC,NC, 0x1b,0x4e,0x78, 0x1b,0x4e,0x58},
+{K_c,NC,NC, K_C,NC,NC, K_ETX,NC,NC, 0x1b,0x4e,0x63, 0x1b,0x4e,0x43},
+{K_v,NC,NC, K_V,NC,NC, K_SYN,NC,NC, 0x1b,0x4e,0x76, 0x1b,0x4e,0x56},
+{K_b,NC,NC, K_B,NC,NC, K_STX,NC,NC, 0x1b,0x4e,0x62, 0x1b,0x4e,0x42},
+{K_n,NC,NC, K_N,NC,NC, K_SO,NC,NC, 0x1b,0x4e,0x6e, 0x1b,0x4e,0x4e},
+{K_m,NC,NC, K_M,NC,NC, K_CR,NC,NC, 0x1b,0x4e,0x6d, 0x1b,0x4e,0x4d},
+{K_COMMA,NC,NC, K_LTHN,NC,NC, K_COMMA,NC,NC, 0x1b,0x4e,0x2c, 0x1b,0x4e,0x3c},
+{K_PERIOD,NC,NC, K_GTHN,NC,NC, K_PERIOD,NC,NC,0x1b,0x4e,0x2e,0x1b,0x4e,0x3e},
+{K_SLASH,NC,NC, K_QUES,NC,NC, K_SLASH,NC,NC, 0x1b,0x4e,0x2f, 0x1b,0x4e,0x3f},
+{K_SCAN,K_RSHSC,NC, K_SCAN,K_RSHSC,NC, K_SCAN,K_RSHSC,NC, K_SCAN,K_RSHSC,NC,
+ K_SCAN,K_RSHSC,NC},
+{K_ASTER,NC,NC, K_ASTER,NC,NC, K_ASTER,NC,NC, 0x1b,0x4e,0x2a,0x1b,0x4e,0x2a},
+{K_SCAN,K_ALTSC,NC, K_SCAN,K_ALTSC,NC, K_SCAN,K_ALTSC,NC, K_SCAN,K_ALTSC,NC,
+ K_SCAN,K_ALTSC,NC},
+{K_SPACE,NC,NC, K_SPACE,NC,NC, K_NUL,NC,NC, K_SPACE,NC,NC, K_SPACE,NC,NC},
+{K_SCAN,K_CLCKSC,NC, K_SCAN,K_CLCKSC,NC, K_SCAN,K_CLCKSC,NC,
+ K_SCAN,K_CLCKSC,NC, K_SCAN,K_CLCKSC,NC},
+{K_F1, K_F1S, K_F1, K_F1, K_F1S},
+{K_F2, K_F2S, K_F2, K_F2, K_F2S},
+{K_F3, K_F3S, K_F3, K_F3, K_F3S},
+{K_F4, K_F4S, K_F4, K_F4, K_F4S},
+{K_F5, K_F5S, K_F5, K_F5, K_F5S},
+{K_F6, K_F6S, K_F6, K_F6, K_F6S},
+{K_F7, K_F7S, K_F7, K_F7, K_F7S},
+{K_F8, K_F8S, K_F8, K_F8, K_F8S},
+{K_F9, K_F9S, K_F9, K_F9, K_F9S},
+{K_F10, K_F10S, K_F10, K_F10, K_F10S},
+{K_SCAN,K_NLCKSC,NC, K_SCAN,K_NLCKSC,NC, K_SCAN,K_NLCKSC,NC,
+ K_SCAN,K_NLCKSC,NC, K_SCAN,K_NLCKSC,NC},
+{K_SCRL, K_NUL,NC,NC, K_SCRL, K_SCRL, K_NUL,NC,NC},
+{K_HOME, K_SEVEN,NC,NC, K_HOME, K_HOME, 0x1b,0x4e,0x37},
+{K_UA, K_EIGHT,NC,NC, K_UA, K_UA, 0x1b,0x4e,0x38},
+{K_PUP, K_NINE,NC,NC, K_PUP, K_PUP, 0x1b,0x4e,0x39},
+{0x1b,0x5b,0x53, K_MINUS,NC,NC, 0x1b,0x5b,0x53,0x1b,0x5b,0x53,0x1b,0x4e,0x2d},
+{K_LA, K_FOUR,NC,NC, K_LA, K_LA, 0x1b,0x4e,0x34},
+{0x1b,0x5b,0x47,K_FIVE,NC,NC,0x1b,0x5b,0x47, 0x1b,0x5b,0x47, 0x1b,0x4e,0x35},
+{K_RA, K_SIX,NC,NC, K_RA, K_RA, 0x1b,0x4e,0x36},
+{0x1b,0x5b,0x54,K_PLUS,NC,NC, 0x1b,0x5b,0x54, 0x1b,0x5b,0x54, 0x1b,0x4e,0x2b},
+{K_END, K_ONE,NC,NC, K_END, K_END, 0x1b,0x4e,0x31},
+{K_DA, K_TWO,NC,NC, K_DA, K_DA, 0x1b,0x4e,0x32},
+{K_PDN, K_THREE,NC,NC, K_PDN, K_PDN, 0x1b,0x4e,0x33},
+{K_INS, K_ZERO,NC,NC, K_INS, K_INS, 0x1b,0x4e,0x30},
+{K_DEL,NC,NC, K_PERIOD,NC,NC, K_DEL,NC,NC, K_DEL,NC,NC, 0x1b,0x4e,0x2e},
+{NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC},
+{NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC},
+{NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC},
+{K_F11, K_F11S, K_F11, K_F11, K_F11S},
+{K_F12, K_F12S, K_F12, K_F12, K_F12S}
+};
+
+
+/*
+ * Globals used only for character-based controllers.
+ */
+
+short kd_index_reg = EGA_IDX_REG;
+short kd_io_reg = EGA_IO_REG;
+
+/*
+ * IO port sets for different controllers.
+ */
+io_reg_t vga_port_list[] = {
+ 0x3b4, 0x3b5, 0x3b8, 0x3b9, 0x3ba, /* MDA/EGA */
+ 0x3d4, 0x3d5, 0x3d8, 0x3d9, 0x3da, /* CGA/EGA */
+ 0x3c0, 0x3c1, 0x3c2, 0x3c3, 0x3c4, 0x3c5, 0x3c6, 0x3c7,
+ 0x3c8, 0x3c9, 0x3ca, 0x3cb, 0x3cc, 0x3cd, 0x3ce, 0x3cf,
+ IO_REG_NULL
+};
+
+mach_device_t kd_io_device = 0;
+
+kd_io_map_open(device)
+ mach_device_t device;
+{
+ kd_io_device = device;
+ io_port_create(device, vga_port_list);
+}
+
+kd_io_map_close()
+{
+ io_port_destroy(kd_io_device);
+ kd_io_device = 0;
+}
+
+/*
+ * Globals used only for bitmap-based controllers. See kdsoft.h for
+ * an explanation of what some of these variables are used for.
+ */
+
+u_char *font_start = 0; /* starting addr of font */
+
+short fb_width = 0; /* bits in frame buffer scan line */
+short fb_height = 0; /* scan lines in frame buffer*/
+short char_width = 0; /* bit width of 1 char */
+short char_height = 0; /* bit height of 1 char */
+short chars_in_font = 0;
+short cursor_height = 0; /* bit height of cursor */
+
+/* These initial values are simply guesses. */
+u_char char_black = 0;
+u_char char_white = 0xff;
+
+short xstart = 0;
+short ystart = 0;
+
+short char_byte_width = 0; /* char_width/NBBY */
+short fb_byte_width = 0; /* fb_width/NBBY */
+short font_byte_width = 0; /* num bytes in 1 scan line of font */
+
+/*
+ * Switch for poll vs. interrupt.
+ */
+int kd_pollc = 0;
+
+#ifdef DEBUG
+/*
+ * feep:
+ *
+ * Ring the bell for a short time.
+ * Warning: uses outb(). You may prefer to use kd_debug_put.
+ */
+feep()
+{
+ int i;
+
+ kd_bellon();
+ for (i = 0; i < 50000; ++i)
+ ;
+ kd_belloff();
+}
+
+pause()
+{
+ int i;
+
+ for (i = 0; i < 50000; ++i)
+ ;
+}
+
+/*
+ * Put a debugging character on the screen.
+ * LOC=0 means put it in the bottom right corner, LOC=1 means put it
+ * one column to the left, etc.
+ */
+kd_debug_put(loc, c)
+int loc;
+char c;
+{
+ csrpos_t pos = ONE_PAGE - (loc+1) * ONE_SPACE;
+
+ (*kd_dput)(pos, c, KA_NORMAL);
+}
+#endif /* DEBUG */
+
+
+extern int mouse_in_use;
+int old_kb_mode;
+
+cnpollc(on)
+boolean_t on;
+{
+ if (mouse_in_use) {
+ if (on) {
+ /* switch into X */
+ old_kb_mode = kb_mode;
+ kb_mode = KB_ASCII;
+ X_kdb_enter();
+
+ kd_pollc++;
+ } else {
+ --kd_pollc;
+
+ /* switch out of X */
+ X_kdb_exit();
+ kb_mode = old_kb_mode;
+ }
+ } else {
+ if (on) {
+ kd_pollc++;
+ } else {
+ --kd_pollc;
+ }
+ }
+}
+
+
+
+/*
+ * kdopen:
+ *
+ * This opens the console driver and sets up the tty and other
+ * rudimentary stuff including calling the line discipline for
+ * setting up the device independent stuff for a tty driver.
+ *
+ * input: device number 'dev', and flag
+ *
+ * output: device is opened and setup
+ *
+ */
+kdopen(dev, flag, ior)
+ dev_t dev;
+ int flag;
+ io_req_t ior;
+{
+ struct tty *tp;
+ int kdstart();
+ spl_t o_pri;
+ int kdstop();
+
+ tp = &kd_tty;
+ o_pri = spltty();
+ simple_lock(&tp->t_lock);
+ if (!(tp->t_state & (TS_ISOPEN|TS_WOPEN))) {
+ /* XXX ttychars allocates memory */
+ simple_unlock(&tp->t_lock);
+ ttychars(tp);
+ simple_lock(&tp->t_lock);
+ /*
+ * Special support for boot-time rc scripts, which don't
+ * stty the console.
+ */
+ tp->t_oproc = kdstart;
+ tp->t_stop = kdstop;
+ tp->t_ospeed = tp->t_ispeed = B9600;
+ tp->t_flags = ODDP|EVENP|ECHO|CRMOD|XTABS;
+ kdinit();
+
+ /* XXX kd_io_map_open allocates memory */
+ simple_unlock(&tp->t_lock);
+ kd_io_map_open(ior->io_device);
+ simple_lock(&tp->t_lock);
+ }
+ tp->t_state |= TS_CARR_ON;
+ simple_unlock(&tp->t_lock);
+ splx(o_pri);
+ return (char_open(dev, tp, flag, ior));
+}
+
+
+/*
+ * kdclose:
+ *
+ * This function merely executes the device independent code for
+ * closing the line discipline.
+ *
+ * input: device number 'dev', and flag
+ *
+ * output: device is closed
+ *
+ */
+/*ARGSUSED*/
+kdclose(dev, flag)
+int dev;
+int flag;
+{
+ struct tty *tp;
+
+ tp = &kd_tty;
+ {
+ spl_t s = spltty();
+ simple_lock(&tp->t_lock);
+ ttyclose(tp);
+ simple_unlock(&tp->t_lock);
+ splx(s);
+ }
+
+ kd_io_map_close();
+
+ return;
+
+}
+
+
+/*
+ * kdread:
+ *
+ * This function executes the device independent code to read from
+ * the tty.
+ *
+ * input: device number 'dev'
+ *
+ * output: characters are read from tty clists
+ *
+ */
+/*ARGSUSED*/
+kdread(dev, uio)
+int dev;
+struct uio *uio;
+{
+ struct tty *tp;
+
+ tp = &kd_tty;
+ tp->t_state |= TS_CARR_ON;
+ return((*linesw[kd_tty.t_line].l_read)(tp, uio));
+}
+
+
+/*
+ * kdwrite:
+ *
+ * This function does the device independent write action for this
+ * console (tty) driver.
+ *
+ * input: device number 'dev'
+ *
+ * output: characters are written to tty clists
+ *
+ */
+/*ARGSUSED*/
+kdwrite(dev, uio)
+int dev;
+struct uio *uio;
+{
+ return((*linesw[kd_tty.t_line].l_write)(&kd_tty, uio));
+}
+
+/*
+ * Mmap.
+ */
+
+/*ARGSUSED*/
+int
+kdmmap(dev, off, prot)
+ dev_t dev;
+ off_t off;
+ int prot;
+{
+ if ((u_int) off >= (128*1024))
+ return(-1);
+
+ /* Get page frame number for the page to be mapped. */
+ return(i386_btop(kd_bitmap_start+off));
+}
+
+kdportdeath(dev, port)
+ dev_t dev;
+ mach_port_t port;
+{
+ return (tty_portdeath(&kd_tty, port));
+}
+
+/*ARGSUSED*/
+io_return_t kdgetstat(dev, flavor, data, count)
+ dev_t dev;
+ int flavor;
+ int * data; /* pointer to OUT array */
+ unsigned int *count; /* OUT */
+{
+ io_return_t result;
+
+ switch (flavor) {
+ case KDGSTATE:
+ if (*count < 1)
+ return (D_INVALID_OPERATION);
+ *data = kd_state;
+ *count = 1;
+ result = D_SUCCESS;
+ break;
+
+ case KDGKBENT:
+ result = kdgetkbent((struct kbentry *)data);
+ *count = sizeof(struct kbentry)/sizeof(int);
+ break;
+
+ default:
+ result = tty_get_status(&kd_tty, flavor, data, count);
+ break;
+ }
+ return (result);
+}
+
+/*ARGSUSED*/
+io_return_t kdsetstat(dev, flavor, data, count)
+ dev_t dev;
+ int flavor;
+ int * data;
+ unsigned int count;
+{
+ io_return_t result;
+
+ switch (flavor) {
+ case KDSKBENT:
+ if (count < sizeof(struct kbentry)/sizeof(int)) {
+ return (D_INVALID_OPERATION);
+ }
+ result = kdsetkbent((struct kbentry *)data, 0);
+ break;
+
+ case KDSETBELL:
+ if (count < 1)
+ return (D_INVALID_OPERATION);
+ result = kdsetbell(*data, 0);
+ break;
+
+ default:
+ result = tty_set_status(&kd_tty, flavor, data, count);
+ }
+ return (result);
+}
+
+
+
+/*
+ * kdsetbell:
+ *
+ * Turn the bell on or off. Returns error code, if given bogus
+ * on/off value.
+ */
+kdsetbell(val, flags)
+int val; /* on or off */
+int flags; /* flags set for console */
+{
+ int err = 0;
+
+
+ if (val == KD_BELLON)
+ kd_bellon();
+ else if (val == KD_BELLOFF)
+ kd_belloff();
+ else
+ err = D_INVALID_OPERATION;
+
+ return(err);
+}
+
+
+/*
+ * kdgetkbent:
+ *
+ * Get entry from key mapping table. Returns error code, if any.
+ */
+kdgetkbent(kbent)
+struct kbentry * kbent;
+{
+ u_char *cp;
+ spl_t o_pri = SPLKD(); /* probably superfluous */
+
+ cp = &key_map[kbent->kb_index][CHARIDX(kbent->kb_state)];
+ kbent->kb_value[0] = *cp++;
+ kbent->kb_value[1] = *cp++;
+ kbent->kb_value[2] = *cp;
+ (void)splx(o_pri);
+ return(0);
+}
+
+
+/*
+ * kdsetkbent:
+ *
+ * Set entry in key mapping table. Return error code, if any.
+ */
+int
+kdsetkbent(kbent, flags)
+struct kbentry * kbent;
+int flags; /* flags set for console */
+{
+ u_char *cp;
+ spl_t o_pri;
+
+ o_pri = SPLKD();
+ cp = &key_map[kbent->kb_index][CHARIDX(kbent->kb_state)];
+ *cp++ = kbent->kb_value[0];
+ *cp++ = kbent->kb_value[1];
+ *cp = kbent->kb_value[2];
+ (void)splx(o_pri);
+ return(0);
+}
+
+/*
+ * kdintr:
+ *
+ * This function is the interrupt code for the driver. Since this is
+ * a special tty (console), interrupts are only for input, so we read in
+ * the character. If in ascii mode, we then do the mapping translation
+ * from the keyboard switch table and place the characters on the tty's
+ * input switch table. If in event mode, we create and queue a kd_event.
+ *
+ * input: interrupt vector 'vec'
+ *
+ * output: character or sequence is placed on appropriate queue
+ *
+ */
+/*ARGSUSED*/
+kdintr(vec, regs)
+int vec;
+int regs;
+{
+ struct tty *tp;
+ unsigned char c;
+ unsigned char scancode;
+ int o_pri;
+ int char_idx;
+ boolean_t up = FALSE; /* key-up event */
+ extern int mouse_in_use;
+ if (kd_pollc)
+ return; /* kdb polling kbd */
+
+ tp = &kd_tty;
+#ifdef old
+ while ((inb(K_STATUS) & K_OBUF_FUL) == 0); /* this should never loop */
+#else old
+ {
+ /*
+ * Allow for keyboards that raise interrupt before
+ * the character gets to the buffer. But don't wait
+ * forever if grabbing the character by polling leaves
+ * the interrupt on but buffer empty.
+ */
+ /*
+ * Micronics VLB motherboard with 486DX2 can report keyboard
+ * interrupt before K_STATUS register indicates that the
+ * output buffer is full. Moreover, the bus won't settle w
+ * while we poll K_STATUS at speed. Temporary fix is to break
+ * out after safety runs out and pick up keyboard event. This
+ * should be fixed eventually by putting a 1us timout between
+ * inb's to K_STATUS and fix the pic initialization order to
+ * avoid bootup keyboard wedging (ie make kd a real device)
+ */
+ int safety = 1000;
+ while ((inb(K_STATUS) & K_OBUF_FUL) == 0)
+ if (!safety--) break; /* XXX */
+ }
+#endif old
+ /*
+ * We may have seen a mouse event.
+ */
+ if ((inb(K_STATUS) & 0x20) == 0x20) {
+ if (mouse_in_use) {
+ mouse_handle_byte((u_char)inb(K_RDWR));
+ return;
+ } else {
+ printf("M%xI", inb(K_RDWR));
+ return;
+ }
+ }
+
+ scancode = inb(K_RDWR);
+ if (scancode == K_EXTEND) {
+ if (kb_mode != KB_EVENT)
+ kd_extended = TRUE;
+ goto done;
+ } else if (scancode == K_RESEND) {
+ kd_resend();
+ goto done;
+ } else if (scancode == K_ACKSC) {
+ kd_handle_ack();
+ goto done;
+ } else if (kd_kbd_mouse && kd_kbd_magic(scancode)) {
+ goto done;
+ } else if (kdcheckmagic(scancode, &regs)) {
+ goto done;
+ } else if (kb_mode == KB_EVENT) {
+ kd_enqsc(scancode);
+ goto done;
+ } /* else... */
+
+ if (scancode & K_UP) {
+ up = TRUE;
+ scancode &= ~K_UP;
+ }
+ if (scancode < NUMKEYS) {
+ /* Lookup in map, then process. */
+ char_idx = kdstate2idx(kd_state, kd_extended);
+ c = key_map[scancode][char_idx];
+ if (c == K_SCAN) {
+ c = key_map[scancode][++char_idx];
+ set_kd_state(do_modifier(kd_state, c, up));
+ } else if (!up) {
+ /* regular key-down */
+ int max; /* max index for char sequence */
+
+ max = char_idx + NUMOUTPUT;
+ char_idx++;
+ if (!kd_extended) {
+ if (kd_state&KS_CLKED) {
+ if (kd_isupper(c)) {
+ c += ('a' - 'A');
+ max = char_idx;
+ }
+ else if (kd_islower(c)) {
+ c -= ('a' - 'A');
+ max = char_idx;
+ }
+ }
+ /*
+ * Notice that even if the keypad is remapped,
+ * NumLock only effects the keys that are
+ * physically part of the keypad. Is this
+ * The Right Thing?
+ */
+ if ((kd_state&KS_NLKED) &&
+ (((K_HOMESC) <= scancode) &&
+ (scancode <= (K_DELSC)))) {
+ char_idx = CHARIDX(SHIFT_STATE);
+ c = key_map[scancode][char_idx];
+ max = char_idx + NUMOUTPUT;
+ char_idx++;
+ }
+ }
+
+ /*
+ * here's where we actually put the char (or
+ * char sequence, for function keys) onto the
+ * input queue.
+ */
+ for ( ; (c != K_DONE) && (char_idx <= max);
+ c = key_map[scancode][char_idx++]) {
+ (*linesw[tp->t_line].l_rint)(c, tp);
+ }
+ kd_extended = FALSE;
+ }
+ }
+
+ done:
+ return;
+}
+
+/*
+ * kd_handle_ack:
+ *
+ * For pending commands, complete the command. For data bytes,
+ * drop the ack on the floor.
+ */
+kd_handle_ack()
+{
+ switch (kd_ack) {
+ case SET_LEDS:
+ kd_setleds2();
+ kd_ack = DATA_ACK;
+ break;
+ case DATA_ACK:
+ kd_ack = NOT_WAITING;
+ break;
+ case NOT_WAITING:
+ printf("unexpected ACK from keyboard\n");
+ break;
+ default:
+ panic("bogus kd_ack\n");
+ break;
+ }
+}
+
+/*
+ * kd_resend:
+ *
+ * Resend a missed keyboard command or data byte.
+ */
+kd_resend()
+{
+ if (kd_ack == NOT_WAITING)
+ printf("unexpected RESEND from keyboard\n");
+ else
+ kd_senddata(last_sent);
+}
+
+
+/*
+ * do_modifier:
+ *
+ * Change keyboard state according to which modifier key and
+ * whether it went down or up.
+ *
+ * input: the current state, the key, and the key's direction.
+ * The key can be any key, not just a modifier key.
+ *
+ * output: the new state
+ */
+do_modifier(state, c, up)
+int state;
+Scancode c;
+boolean_t up;
+{
+ switch (c) {
+ case (K_ALTSC):
+ if (up)
+ state &= ~KS_ALTED;
+ else
+ state |= KS_ALTED;
+ kd_extended = FALSE;
+ break;
+#ifndef ORC
+ case (K_CLCKSC):
+#endif ORC
+ case (K_CTLSC):
+ if (up)
+ state &= ~KS_CTLED;
+ else
+ state |= KS_CTLED;
+ kd_extended = FALSE;
+ break;
+#ifdef ORC
+ case (K_CLCKSC):
+ if (!up)
+ state ^= KS_CLKED;
+ break;
+#endif ORC
+ case (K_NLCKSC):
+ if (!up)
+ state ^= KS_NLKED;
+ break;
+ case (K_LSHSC):
+ case (K_RSHSC):
+ if (up)
+ state &= ~KS_SHIFTED;
+ else
+ state |= KS_SHIFTED;
+ kd_extended = FALSE;
+ break;
+ }
+
+ return(state);
+}
+
+
+/*
+ * kdcheckmagic:
+ *
+ * Check for magic keystrokes for invoking the debugger or
+ * rebooting or ...
+ *
+ * input: an unprocessed scancode
+ *
+ * output: TRUE if a magic key combination was recognized and
+ * processed. FALSE otherwise.
+ *
+ * side effects:
+ * various actions possible, depending on which keys are
+ * pressed. If the debugger is called, steps are taken
+ * to ensure that the system doesn't think the magic keys
+ * are still held down.
+ */
+boolean_t
+kdcheckmagic(scancode, regs)
+Scancode scancode;
+int *regs;
+{
+ static int magic_state = KS_NORMAL; /* like kd_state */
+ boolean_t up = FALSE;
+ extern int rebootflag;
+
+ if (scancode == 0x46) /* scroll lock */
+/* if (scancode == 0x52) ** insert key */
+ {
+ kd_kbd_mouse = !kd_kbd_mouse;
+ kd_kbd_magic_button = 0;
+ return(TRUE);
+ }
+ if (scancode & K_UP) {
+ up = TRUE;
+ scancode &= ~K_UP;
+ }
+ magic_state = do_modifier(magic_state, scancode, up);
+
+ if ((magic_state&(KS_CTLED|KS_ALTED)) == (KS_CTLED|KS_ALTED)) {
+ switch (scancode) {
+#if MACH_KDB
+ case K_dSC: /* ctl-alt-d */
+ kdb_kintr(); /* invoke debugger */
+ /* Returned from debugger, so reset kbd state. */
+ (void)SPLKD();
+ magic_state = KS_NORMAL;
+ if (kb_mode == KB_ASCII)
+ kd_state = KS_NORMAL;
+ /* setting leds kills kbd */
+ else {
+ kd_enqsc(K_ALTSC | K_UP);
+ kd_enqsc(K_CTLSC | K_UP);
+ kd_enqsc(K_dSC | K_UP);
+ }
+ return(TRUE);
+ break;
+#endif MACH_KDB
+ case K_DELSC: /* ctl-alt-del */
+ /* if rebootflag is on, reboot the system */
+ if (rebootflag)
+ kdreboot();
+ break;
+ }
+ }
+ return(FALSE);
+}
+
+
+/*
+ * kdstate2idx:
+ *
+ * Return the value for the 2nd index into key_map that
+ * corresponds to the given state.
+ */
+kdstate2idx(state, extended)
+int state; /* bit vector, not a state index */
+boolean_t extended;
+{
+ int state_idx = NORM_STATE;
+
+ if ((!extended) && state != KS_NORMAL) {
+ if ((state&(KS_SHIFTED|KS_ALTED)) == (KS_SHIFTED|KS_ALTED))
+ state_idx = SHIFT_ALT;
+ else if (state&KS_SHIFTED)
+ state_idx = SHIFT_STATE;
+ else if (state&KS_ALTED)
+ state_idx = ALT_STATE;
+ else if (state&KS_CTLED)
+ state_idx = CTRL_STATE;
+ }
+
+ return (CHARIDX(state_idx));
+}
+
+/*
+ * kdstart:
+ *
+ * This function does the general processing of characters and other
+ * operations for the device driver. The device independent portion of
+ * the tty driver calls this routine (it's setup in kdinit) with a
+ * given command. That command is then processed, and control is passed
+ * back to the kernel.
+ *
+ * input: tty pointer 'tp', and command to execute 'cmd'
+ *
+ * output: command is executed
+ *
+ * Entered and left at spltty. Drops priority to spl0 to display character.
+ * ASSUMES that it is never called from interrupt-driven code.
+ */
+kdstart(tp)
+struct tty *tp;
+{
+ spl_t o_pri;
+ int ch;
+ unsigned char c;
+
+ if (tp->t_state & TS_TTSTOP)
+ return;
+ for ( ; ; ) {
+ tp->t_state &= ~TS_BUSY;
+ if (tp->t_state & TS_TTSTOP)
+ break;
+ if ((tp->t_outq.c_cc <= 0) || (ch = getc(&tp->t_outq)) == -1)
+ break;
+ c = ch;
+ /*
+ * Drop priority for long screen updates. ttstart() calls us at
+ * spltty.
+ */
+ o_pri = splsoftclock(); /* block timeout */
+ if (c == (K_ESC)) {
+ if (esc_spt == esc_seq) {
+ *(esc_spt++)=(K_ESC);
+ *(esc_spt) = '\0';
+ } else {
+ kd_putc((K_ESC));
+ esc_spt = esc_seq;
+ }
+ } else {
+ if (esc_spt - esc_seq) {
+ if (esc_spt - esc_seq > K_MAXESC - 1)
+ esc_spt = esc_seq;
+ else {
+ *(esc_spt++) = c;
+ *(esc_spt) = '\0';
+ kd_parseesc();
+ }
+ } else {
+ kd_putc(c);
+ }
+ }
+ splx(o_pri);
+ }
+ if (tp->t_outq.c_cc <= TTLOWAT(tp)) {
+ tt_write_wakeup(tp);
+ }
+}
+
+/*ARGSUSED*/
+kdstop(tp, flags)
+ register struct tty *tp;
+ int flags;
+{
+ /*
+ * do nothing - all characters are output by one call to
+ * kdstart.
+ */
+}
+
+/*
+ * kdinit:
+ *
+ * This code initializes the structures and sets up the port registers
+ * for the console driver.
+ *
+ * Each bitmap-based graphics card is likely to require a unique
+ * way to determine the card's presence. The driver runs through
+ * each "special" card that it knows about and uses the first one
+ * that it finds. If it doesn't find any, it assumes that an
+ * EGA-like card is installed.
+ *
+ * input : None. Interrupts are assumed to be disabled
+ * output : Driver is initialized
+ *
+ */
+kdinit()
+{
+ void kd_xga_init();
+ unsigned char k_comm; /* keyboard command byte */
+
+ if (kd_initialized)
+ return;
+
+ esc_spt = esc_seq;
+ kd_attr = KA_NORMAL;
+
+ /*
+ * board specific initialization: set up globals and kd_dxxx
+ * pointers, and synch displayed cursor with logical cursor.
+ */
+ if (!evc1init())
+ if (blit_present())
+ blit_init();
+ else
+ kd_xga_init();
+
+ /* get rid of any garbage in output buffer */
+ if (inb(K_STATUS) & K_OBUF_FUL)
+ (void)inb(K_RDWR);
+
+ kd_sendcmd(KC_CMD_READ); /* ask for the ctlr command byte */
+ k_comm = kd_getdata();
+ k_comm &= ~K_CB_DISBLE; /* clear keyboard disable bit */
+ k_comm |= K_CB_ENBLIRQ; /* enable interrupt */
+ kd_sendcmd(KC_CMD_WRITE); /* write new ctlr command byte */
+ kd_senddata(k_comm);
+ kd_initialized = TRUE;
+
+#ifdef ENABLE_IMMEDIATE_CONSOLE
+ /* Now that we're set up, we no longer need or want the
+ immediate console. */
+ {
+ extern int immediate_console_enable;
+ immediate_console_enable = 0;
+ }
+
+ /* The immediate console printed stuff at the bottom of the
+ screen rather than at the cursor position, so that's where
+ we should start. */
+ kd_setpos(ONE_PAGE - ONE_LINE); printf("\n");
+#endif
+
+ cnsetleds(kd_state = KS_NORMAL);
+ /* clear the LEDs AFTER we
+ enable the keyboard controller.
+ This keeps NUM-LOCK from being
+ set on the NEC Versa. */
+}
+
+/*
+ * kd_belloff:
+ *
+ * This routine shuts the bell off, by sending the appropriate code
+ * to the speaker port.
+ *
+ * input : None
+ * output : bell is turned off
+ *
+ */
+static unsigned int kd_bellstate = 0;
+kd_belloff()
+{
+ unsigned char status;
+
+ status = (inb(K_PORTB) & ~(K_SPKRDATA | K_ENABLETMR2));
+ outb(K_PORTB, status);
+ kd_bellstate = 0;
+ return;
+}
+
+
+/*
+ * kd_bellon:
+ *
+ * This routine turns the bell on.
+ *
+ * input : None
+ * output : bell is turned on
+ *
+ */
+kd_bellon()
+{
+ unsigned char status;
+
+ /* program timer 2 */
+ outb(K_TMRCTL, K_SELTMR2 | K_RDLDTWORD | K_TSQRWAVE | K_TBINARY);
+ outb(K_TMR2, 1500 & 0xff); /* LSB */
+ outb(K_TMR2, (int)1500 >> 8); /* MSB */
+
+ /* start speaker - why must we turn on K_SPKRDATA? */
+ status = (inb(K_PORTB)| K_ENABLETMR2 | K_SPKRDATA);
+ outb(K_PORTB, status);
+ return;
+}
+
+/*
+ *
+ * Function kd_putc():
+ *
+ * This function simply puts a character on the screen. It does some
+ * special processing for linefeed, carriage return, backspace and
+ * the bell.
+ *
+ * input : character to be displayed
+ * output : character is displayed, or some action is taken
+ *
+ */
+int sit_for_0 = 1;
+
+kd_putc(ch)
+u_char ch;
+{
+ if ((!ch) && sit_for_0)
+ return;
+
+ switch (ch) {
+ case ((K_LF)):
+ kd_down();
+ break;
+ case ((K_CR)):
+ kd_cr();
+ break;
+ case ((K_BS)):
+ kd_left();
+ break;
+ case ((K_HT)):
+ kd_tab();
+ break;
+ case ((K_BEL)):
+ /*
+ * Similar problem to K_BS here (behavior might depend
+ * on tty setting). Also check LF and CR.
+ */
+ if (!kd_bellstate)
+ {
+ kd_bellon();
+ timeout(kd_belloff, 0, hz/8 );
+ kd_bellstate = 1;
+ }
+ break;
+ default:
+ (*kd_dput)(kd_curpos, ch, kd_attr);
+ kd_right();
+ break;
+ }
+ return;
+}
+
+
+/*
+ * kd_setpos:
+ *
+ * This function sets the software and hardware cursor position
+ * on the screen, using device-specific code to actually move and
+ * display the cursor.
+ *
+ * input : position on (or off) screen to move the cursor to
+ * output : cursor position is updated, screen has been scrolled
+ * if necessary to bring cursor position back onto
+ * screen.
+ *
+ */
+kd_setpos(newpos)
+csrpos_t newpos;
+{
+ if (newpos > ONE_PAGE) {
+ kd_scrollup();
+ newpos = BOTTOM_LINE;
+ }
+ if (newpos < 0) {
+ kd_scrolldn();
+ newpos = 0;
+ }
+
+ (*kd_dsetcursor)(newpos);
+}
+
+
+/*
+ * kd_scrollup:
+ *
+ * This function scrolls the screen up one line using a DMA memory
+ * copy.
+ *
+ * input : None
+ * output : lines on screen appear to be shifted up one line
+ *
+ */
+kd_scrollup()
+{
+ csrpos_t to;
+ csrpos_t from;
+ int count;
+
+ /* scroll up */
+ to = 0;
+ from = ONE_LINE;
+ count = (ONE_PAGE - ONE_LINE)/ONE_SPACE;
+ (*kd_dmvup)(from, to, count);
+
+ /* clear bottom line */
+ to = BOTTOM_LINE;
+ count = ONE_LINE/ONE_SPACE;
+ (*kd_dclear)(to, count, kd_attr);
+ return;
+}
+
+
+/*
+ * kd_scrolldn:
+ *
+ * Scrolls the characters on the screen down one line.
+ *
+ * input : None
+ * output : Lines on screen appear to be moved down one line
+ *
+ */
+kd_scrolldn()
+{
+ csrpos_t to;
+ csrpos_t from;
+ int count;
+
+ /* move down */
+ to = ONE_PAGE - ONE_SPACE;
+ from = ONE_PAGE - ONE_LINE - ONE_SPACE;
+ count = (ONE_PAGE - ONE_LINE) / ONE_SPACE;
+ (*kd_dmvdown)(from, to, count);
+
+ /* clear top line */
+ to = 0;
+ count = ONE_LINE/ONE_SPACE;
+ (*kd_dclear)(to, count, kd_attr);
+ return;
+
+}
+
+
+/*
+ * kd_parseesc:
+ *
+ * This routine begins the parsing of an escape sequence. It uses the
+ * escape sequence array and the escape spot pointer to handle
+ * asynchronous parsing of escape sequences.
+ *
+ * input : String of characters prepended by an escape
+ * output : Appropriate actions are taken depending on the string as
+ * defined by the ansi terminal specification
+ *
+ */
+kd_parseesc()
+{
+ u_char *escp;
+
+ escp = esc_seq + 1; /* point to char following ESC */
+ switch(*(escp)) {
+ case 'c':
+ kd_cls();
+ kd_home();
+ esc_spt = esc_seq; /* reset spot in ESC sequence */
+ break;
+ case '[':
+ escp++;
+ kd_parserest(escp);
+ break;
+ case '\0':
+ break; /* not enough info yet */
+ default:
+ kd_putc(*escp);
+ esc_spt = esc_seq; /* inv sequence char, reset */
+ break;
+ }
+ return;
+
+}
+
+
+/*
+ * kd_parserest:
+ *
+ * This function will complete the parsing of an escape sequence and
+ * call the appropriate support routine if it matches a character. This
+ * function could be greatly improved by using a function jump table, and
+ * removing this bulky switch statement.
+ *
+ * input : An string
+ * output : Appropriate action based on whether the string matches a
+ * sequence acceptable to the ansi terminal specification
+ *
+ */
+kd_parserest(cp)
+u_char *cp;
+{
+ int number;
+ csrpos_t newpos;
+
+ cp += kd_atoi(cp, &number);
+ switch(*cp) {
+ case 'm':
+ switch(number) {
+ case DEFAULT:
+ case 0:
+ kd_attr = KA_NORMAL;
+ break;
+ case 7:
+ kd_attr = KA_REVERSE;
+ break;
+ default:
+ kd_attr = KA_NORMAL;
+ break;
+ }
+ esc_spt = esc_seq;
+ break;
+ case '@':
+ if (number == DEFAULT)
+ kd_insch(1);
+ else
+ kd_insch(number);
+ esc_spt = esc_seq;
+ break;
+ case 'H':
+ kd_home();
+ esc_spt = esc_seq;
+ break;
+ case 'A':
+ if (number == DEFAULT)
+ kd_up();
+ else
+ while (number--)
+ kd_up();
+ esc_spt = esc_seq;
+ break;
+ case 'B':
+ if (number == DEFAULT)
+ kd_down();
+ else
+ while (number--)
+ kd_down();
+ esc_spt = esc_seq;
+ break;
+ case 'C':
+ if (number == DEFAULT)
+ kd_right();
+ else
+ while (number--)
+ kd_right();
+ esc_spt = esc_seq;
+ break;
+ case 'D':
+ if (number == DEFAULT)
+ kd_left();
+ else
+ while (number--)
+ kd_left();
+ esc_spt = esc_seq;
+ break;
+ case 'E':
+ kd_cr();
+ if (number == DEFAULT)
+ kd_down();
+ else
+ while (number--)
+ kd_down();
+ esc_spt = esc_seq;
+ break;
+ case 'F':
+ kd_cr();
+ if (number == DEFAULT)
+ kd_up();
+ else
+ while (number--)
+ kd_up();
+ esc_spt = esc_seq;
+ break;
+ case 'G':
+ if (number == DEFAULT)
+ number = 0;
+ else
+ if (number > 0)
+ --number; /* because number is from 1 */
+ kd_setpos(BEG_OF_LINE(kd_curpos) + number * ONE_SPACE);
+ esc_spt = esc_seq;
+ break;
+ case ';':
+ ++cp;
+ if (*cp == '\0')
+ break; /* not ready yet */
+ if (number == DEFAULT)
+ number = 0;
+ else
+ if (number > 0)
+ --number; /* numbered from 1 */
+ newpos = (number * ONE_LINE); /* setup row */
+ cp += kd_atoi(cp, &number);
+ if (*cp == '\0')
+ break; /* not ready yet */
+ if (number == DEFAULT)
+ number = 0;
+ else if (number > 0)
+ number--;
+ newpos += (number * ONE_SPACE); /* setup column */
+ if (newpos < 0)
+ newpos = 0; /* upper left */
+ if (newpos > ONE_PAGE)
+ newpos = (ONE_PAGE - ONE_SPACE);
+ /* lower right */
+ if (*cp == '\0')
+ break; /* not ready yet */
+ if (*cp == 'H') {
+ kd_setpos(newpos);
+ esc_spt = esc_seq; /* done, reset */
+ }
+ else
+ esc_spt = esc_seq;
+ break; /* done or not ready */
+ case 'J':
+ switch(number) {
+ case DEFAULT:
+ case 0:
+ kd_cltobcur(); /* clears from current
+ pos to bottom.
+ */
+ break;
+ case 1:
+ kd_cltopcur(); /* clears from top to
+ current pos.
+ */
+ break;
+ case 2:
+ kd_cls();
+ break;
+ default:
+ break;
+ }
+ esc_spt = esc_seq; /* reset it */
+ break;
+ case 'K':
+ switch(number) {
+ case DEFAULT:
+ case 0:
+ kd_cltoecur(); /* clears from current
+ pos to eoln.
+ */
+ break;
+ case 1:
+ kd_clfrbcur(); /* clears from begin
+ of line to current
+ pos.
+ */
+ break;
+ case 2:
+ kd_eraseln(); /* clear entire line */
+ break;
+ default:
+ break;
+ }
+ esc_spt = esc_seq;
+ break;
+ case 'L':
+ if (number == DEFAULT)
+ kd_insln(1);
+ else
+ kd_insln(number);
+ esc_spt = esc_seq;
+ break;
+ case 'M':
+ if (number == DEFAULT)
+ kd_delln(1);
+ else
+ kd_delln(number);
+ esc_spt = esc_seq;
+ break;
+ case 'P':
+ if (number == DEFAULT)
+ kd_delch(1);
+ else
+ kd_delch(number);
+ esc_spt = esc_seq;
+ break;
+ case 'S':
+ if (number == DEFAULT)
+ kd_scrollup();
+ else
+ while (number--)
+ kd_scrollup();
+ esc_spt = esc_seq;
+ break;
+ case 'T':
+ if (number == DEFAULT)
+ kd_scrolldn();
+ else
+ while (number--)
+ kd_scrolldn();
+ esc_spt = esc_seq;
+ break;
+ case 'X':
+ if (number == DEFAULT)
+ kd_erase(1);
+ else
+ kd_erase(number);
+ esc_spt = esc_seq;
+ break;
+ case '\0':
+ break; /* not enough yet */
+ default:
+ kd_putc(*cp); /* show inv character */
+ esc_spt = esc_seq; /* inv entry, reset */
+ break;
+ }
+ return;
+}
+
+/*
+ * kd_atoi:
+ *
+ * This function converts an ascii string into an integer, and
+ * returns DEFAULT if no integer was found. Note that this is why
+ * we don't use the regular atio(), because ZERO is ZERO and not
+ * the DEFAULT in all cases.
+ *
+ * input : string
+ * output : a number or possibly DEFAULT, and the count of characters
+ * consumed by the conversion
+ *
+ */
+int
+kd_atoi(cp, nump)
+u_char *cp;
+int *nump;
+{
+ int number;
+ u_char *original;
+
+ original = cp;
+ for (number = 0; ('0' <= *cp) && (*cp <= '9'); cp++)
+ number = (number * 10) + (*cp - '0');
+ if (original == cp)
+ *nump = DEFAULT;
+ else
+ *nump = number;
+ return(cp - original);
+}
+
+kd_tab()
+{
+ int i;
+
+ for (i = 8 - (CURRENT_COLUMN(kd_curpos) % 8); i > 0; i--) {
+ kd_putc(' ');
+ }
+
+}
+
+
+/*
+ * kd_cls:
+ *
+ * This function clears the screen with spaces and the current attribute.
+ *
+ * input : None
+ * output : Screen is cleared
+ *
+ */
+kd_cls()
+{
+ (*kd_dclear)(0, ONE_PAGE/ONE_SPACE, kd_attr);
+ return;
+}
+
+
+/*
+ * kd_home:
+ *
+ * This function will move the cursor to the home position on the screen,
+ * as well as set the internal cursor position (kd_curpos) to home.
+ *
+ * input : None
+ * output : Cursor position is moved
+ *
+ */
+kd_home()
+{
+ kd_setpos(0);
+ return;
+}
+
+
+/*
+ * kd_up:
+ *
+ * This function moves the cursor up one line position.
+ *
+ * input : None
+ * output : Cursor moves up one line, or screen is scrolled
+ *
+ */
+kd_up()
+{
+ if (kd_curpos < ONE_LINE)
+ kd_scrolldn();
+ else
+ kd_setpos(kd_curpos - ONE_LINE);
+ return;
+}
+
+
+/*
+ * kd_down:
+ *
+ * This function moves the cursor down one line position.
+ *
+ * input : None
+ * output : Cursor moves down one line or the screen is scrolled
+ *
+ */
+kd_down()
+{
+ if (kd_curpos >= (ONE_PAGE - ONE_LINE))
+ kd_scrollup();
+ else
+ kd_setpos(kd_curpos + ONE_LINE);
+ return;
+}
+
+
+/*
+ * kd_right:
+ *
+ * This function moves the cursor one position to the right.
+ *
+ * input : None
+ * output : Cursor moves one position to the right
+ *
+ */
+kd_right()
+{
+ if (kd_curpos < (ONE_PAGE - ONE_SPACE))
+ kd_setpos(kd_curpos + ONE_SPACE);
+ else {
+ kd_scrollup();
+ kd_setpos(BEG_OF_LINE(kd_curpos));
+ }
+ return;
+}
+
+
+/*
+ * kd_left:
+ *
+ * This function moves the cursor one position to the left.
+ *
+ * input : None
+ * output : Cursor moves one position to the left
+ *
+ */
+kd_left()
+{
+ if (0 < kd_curpos)
+ kd_setpos(kd_curpos - ONE_SPACE);
+ return;
+}
+
+
+/*
+ * kd_cr:
+ *
+ * This function moves the cursor to the beginning of the current
+ * line.
+ *
+ * input : None
+ * output : Cursor moves to the beginning of the current line
+ *
+ */
+kd_cr()
+{
+ kd_setpos(BEG_OF_LINE(kd_curpos));
+ return;
+}
+
+
+/*
+ * kd_cltobcur:
+ *
+ * This function clears from the current cursor position to the bottom
+ * of the screen.
+ *
+ * input : None
+ * output : Screen is cleared from current cursor postion to bottom
+ *
+ */
+kd_cltobcur()
+{
+ csrpos_t start;
+ int count;
+
+ start = kd_curpos;
+ count = (ONE_PAGE - kd_curpos)/ONE_SPACE;
+ (*kd_dclear)(start, count, kd_attr);
+ return;
+}
+
+
+/*
+ * kd_cltopcur:
+ *
+ * This function clears from the current cursor position to the top
+ * of the screen.
+ *
+ * input : None
+ * output : Screen is cleared from current cursor postion to top
+ *
+ */
+kd_cltopcur()
+{
+ int count;
+
+ count = (kd_curpos + ONE_SPACE) / ONE_SPACE;
+ (*kd_dclear)(0, count, kd_attr);
+ return;
+}
+
+
+/*
+ * kd_cltoecur:
+ *
+ * This function clears from the current cursor position to eoln.
+ *
+ * input : None
+ * output : Line is cleared from current cursor position to eoln
+ *
+ */
+kd_cltoecur()
+{
+ csrpos_t i;
+ csrpos_t hold;
+
+ hold = BEG_OF_LINE(kd_curpos) + ONE_LINE;
+ for (i = kd_curpos; i < hold; i += ONE_SPACE) {
+ (*kd_dput)(i, K_SPACE, kd_attr);
+ }
+}
+
+
+/*
+ * kd_clfrbcur:
+ *
+ * This function clears from the beginning of the line to the current
+ * cursor position.
+ *
+ * input : None
+ * output : Line is cleared from beginning to current position
+ *
+ */
+kd_clfrbcur()
+{
+ csrpos_t i;
+
+ for (i = BEG_OF_LINE(kd_curpos); i <= kd_curpos; i += ONE_SPACE) {
+ (*kd_dput)(i, K_SPACE, kd_attr);
+ }
+}
+
+
+/*
+ * kd_delln:
+ *
+ * This function deletes 'number' lines on the screen by effectively
+ * scrolling the lines up and replacing the old lines with spaces.
+ *
+ * input : number of lines to delete
+ * output : lines appear to be deleted
+ *
+ */
+kd_delln(number)
+int number;
+{
+ csrpos_t to;
+ csrpos_t from;
+ int delbytes; /* num of bytes to delete */
+ int count; /* num of words to move or fill */
+
+ if (number <= 0)
+ return;
+
+ delbytes = number * ONE_LINE;
+ to = BEG_OF_LINE(kd_curpos);
+ if (to + delbytes >= ONE_PAGE)
+ delbytes = ONE_PAGE - to;
+ if (to + delbytes < ONE_PAGE) {
+ from = to + delbytes;
+ count = (ONE_PAGE - from) / ONE_SPACE;
+ (*kd_dmvup)(from, to, count);
+ }
+
+ to = ONE_PAGE - delbytes;
+ count = delbytes / ONE_SPACE;
+ (*kd_dclear)(to, count, kd_attr);
+ return;
+}
+
+
+/*
+ * kd_insln:
+ *
+ * This function inserts a line above the current one by
+ * scrolling the current line and all the lines below it down.
+ *
+ * input : number of lines to insert
+ * output : New lines appear to be inserted
+ *
+ */
+kd_insln(number)
+int number;
+{
+ csrpos_t to;
+ csrpos_t from;
+ int count;
+ csrpos_t top; /* top of block to be moved */
+ int insbytes; /* num of bytes inserted */
+
+ if (number <= 0)
+ return;
+
+ top = BEG_OF_LINE(kd_curpos);
+ insbytes = number * ONE_LINE;
+ if (top + insbytes > ONE_PAGE)
+ insbytes = ONE_PAGE - top;
+ to = ONE_PAGE - ONE_SPACE;
+ from = to - insbytes;
+ if (from > top) {
+ count = (from - top + ONE_SPACE) / ONE_SPACE;
+ (*kd_dmvdown)(from, to, count);
+ }
+
+ count = insbytes / ONE_SPACE;
+ (*kd_dclear)(top, count, kd_attr);
+ return;
+}
+
+
+/*
+ * kd_delch:
+ *
+ * This function deletes a number of characters from the current
+ * position in the line.
+ *
+ * input : number of characters to delete
+ * output : characters appear to be deleted
+ *
+ */
+kd_delch(number)
+int number;
+{
+ int count; /* num words moved/filled */
+ int delbytes; /* bytes to delete */
+ register csrpos_t to;
+ csrpos_t from;
+ csrpos_t nextline; /* start of next line */
+
+ if (number <= 0)
+ return;
+
+ nextline = BEG_OF_LINE(kd_curpos) + ONE_LINE;
+ delbytes = number * ONE_SPACE;
+ if (kd_curpos + delbytes > nextline)
+ delbytes = nextline - kd_curpos;
+ if (kd_curpos + delbytes < nextline) {
+ from = kd_curpos + delbytes;
+ to = kd_curpos;
+ count = (nextline - from) / ONE_SPACE;
+ (*kd_dmvup)(from, to, count);
+ }
+
+ to = nextline - delbytes;
+ count = delbytes / ONE_SPACE;
+ (*kd_dclear)(to, count, kd_attr);
+ return;
+
+}
+
+
+/*
+ * kd_erase:
+ *
+ * This function overwrites characters with a space starting with the
+ * current cursor position and ending in number spaces away.
+ *
+ * input : number of characters to erase
+ * output : characters appear to be blanked or erased
+ *
+ */
+kd_erase(number)
+int number;
+{
+ csrpos_t i;
+ csrpos_t stop;
+
+ stop = kd_curpos + (ONE_SPACE * number);
+ if (stop > BEG_OF_LINE(kd_curpos) + ONE_LINE)
+ stop = BEG_OF_LINE(kd_curpos) + ONE_LINE;
+ for (i = kd_curpos; i < stop; i += ONE_SPACE) {
+ (*kd_dput)(i, K_SPACE, kd_attr);
+ }
+ return;
+}
+
+
+/*
+ * kd_eraseln:
+ *
+ * This function erases the current line with spaces.
+ *
+ * input : None
+ * output : Current line is erased
+ *
+ */
+kd_eraseln()
+{
+ csrpos_t i;
+ csrpos_t stop;
+
+ stop = BEG_OF_LINE(kd_curpos) + ONE_LINE;
+ for (i = BEG_OF_LINE(kd_curpos); i < stop; i += ONE_SPACE) {
+ (*kd_dput)(i, K_SPACE, kd_attr);
+ }
+ return;
+}
+
+
+/*
+ * kd_insch:
+ *
+ * This function inserts a blank at the current cursor position
+ * and moves all other characters on the line over.
+ *
+ * input : number of blanks to insert
+ * output : Blanks are inserted at cursor position
+ *
+ */
+kd_insch(number)
+int number;
+{
+ csrpos_t to;
+ csrpos_t from;
+ int count;
+ csrpos_t nextline; /* start of next line */
+ int insbytes; /* num of bytes inserted */
+
+ if (number <= 0)
+ return;
+
+ nextline = BEG_OF_LINE(kd_curpos) + ONE_LINE;
+ insbytes = number * ONE_SPACE;
+ if (kd_curpos + insbytes > nextline)
+ insbytes = nextline - kd_curpos;
+
+ to = nextline - ONE_SPACE;
+ from = to - insbytes;
+ if (from >= kd_curpos) {
+ count = (from - kd_curpos + ONE_SPACE) / ONE_SPACE;
+ (*kd_dmvdown)(from, to, count);
+ }
+
+ count = insbytes / ONE_SPACE;
+ (*kd_dclear)(kd_curpos, count, kd_attr);
+ return;
+}
+
+
+/*
+ * kd_isupper, kd_islower:
+ *
+ * Didn't want to include ctype.h because it brings in stdio.h, and
+ * only want to see if the darn character is uppercase or lowercase.
+ *
+ * input : Character 'c'
+ * output : isuuper gives TRUE if character is uppercase, islower
+ * returns TRUE if character is lowercase
+ *
+ */
+kd_isupper(c)
+u_char c;
+{
+ if (('A' <= c) && (c <= 'Z'))
+ return(TRUE);
+ return(FALSE);
+}
+
+kd_islower(c)
+u_char c;
+{
+ if (('a' <= c) && (c <= 'z'))
+ return(TRUE);
+ return(FALSE);
+}
+
+/*
+ * kd_senddata:
+ *
+ * This function sends a byte to the keyboard RDWR port, but
+ * first waits until the input/output data buffer is clear before
+ * sending the data. Note that this byte can be either data or a
+ * keyboard command.
+ *
+ */
+kd_senddata(ch)
+unsigned char ch;
+{
+ while (inb(K_STATUS) & K_IBUF_FUL);
+ outb(K_RDWR, ch);
+ last_sent = ch;
+ return;
+}
+
+/*
+ * kd_sendcmd:
+ *
+ * This function sends a command byte to the keyboard command
+ * port, but first waits until the input/output data buffer is
+ * clear before sending the data.
+ *
+ */
+kd_sendcmd(ch)
+unsigned char ch;
+{
+ while (inb(K_STATUS) & K_IBUF_FUL);
+ outb(K_CMD, ch);
+ return;
+}
+
+
+/*
+ * kd_getdata:
+ *
+ * This function returns a data byte from the keyboard RDWR port,
+ * after waiting until the port is flagged as having something to
+ * read.
+ */
+unsigned char
+kd_getdata()
+{
+ while ((inb(K_STATUS) & K_OBUF_FUL) == 0);
+ return(inb(K_RDWR));
+}
+
+kd_cmdreg_read()
+{
+int ch=KC_CMD_READ;
+
+ while (inb(K_STATUS) & K_IBUF_FUL);
+ outb(K_CMD, ch);
+
+ while ((inb(K_STATUS) & K_OBUF_FUL) == 0);
+ return(inb(K_RDWR));
+}
+
+kd_cmdreg_write(val)
+{
+int ch=KC_CMD_WRITE;
+
+ while (inb(K_STATUS) & K_IBUF_FUL);
+ outb(K_CMD, ch);
+
+ while (inb(K_STATUS) & K_IBUF_FUL);
+ outb(K_RDWR, val);
+}
+
+kd_mouse_drain()
+{
+ int i;
+ while(inb(K_STATUS) & K_IBUF_FUL);
+ while((i = inb(K_STATUS)) & K_OBUF_FUL)
+ printf("kbd: S = %x D = %x\n", i, inb(K_RDWR));
+}
+
+/*
+ * set_kd_state:
+ *
+ * Set kd_state and update the keyboard status LEDs.
+ */
+
+set_kd_state(newstate)
+int newstate;
+{
+ kd_state = newstate;
+ kd_setleds1(state2leds(newstate));
+}
+
+/*
+ * state2leds:
+ *
+ * Return a byte containing LED settings for the keyboard, given
+ * a state vector.
+ */
+u_char
+state2leds(state)
+int state;
+{
+ u_char result = 0;
+
+ if (state & KS_NLKED)
+ result |= K_LED_NUMLK;
+ if (state & KS_CLKED)
+ result |= K_LED_CAPSLK;
+ return(result);
+}
+
+/*
+ * kd_setleds[12]:
+ *
+ * Set the keyboard LEDs according to the given byte.
+ */
+kd_setleds1(val)
+u_char val;
+{
+ if (kd_ack != NOT_WAITING) {
+ printf("kd_setleds1: unexpected state (%d)\n", kd_ack);
+ return;
+ }
+
+ kd_ack = SET_LEDS;
+ kd_nextled = val;
+ kd_senddata(K_CMD_LEDS);
+}
+
+kd_setleds2()
+{
+ kd_senddata(kd_nextled);
+}
+
+
+/*
+ * cnsetleds:
+ *
+ * like kd_setleds[12], but not interrupt-based.
+ * Currently disabled because cngetc ignores caps lock and num
+ * lock anyway.
+ */
+cnsetleds(val)
+u_char val;
+{
+ kd_senddata(K_CMD_LEDS);
+ (void)kd_getdata(); /* XXX - assume is ACK */
+ kd_senddata(val);
+ (void)kd_getdata(); /* XXX - assume is ACK */
+}
+
+kdreboot()
+{
+ (*kd_dreset)();
+
+#ifndef BROKEN_KEYBOARD_RESET
+ kd_sendcmd(0xFE); /* XXX - magic # */
+ delay(1000000); /* wait to see if anything happens */
+#endif
+
+ /*
+ * If that didn't work, then we'll just have to try and
+ * do it the hard way.
+ */
+ cpu_shutdown();
+}
+
+static int which_button[] = {0, MOUSE_LEFT, MOUSE_MIDDLE, MOUSE_RIGHT};
+static struct mouse_motion moved;
+
+kd_kbd_magic(scancode)
+{
+int new_button = 0;
+
+ if (kd_kbd_mouse == 2)
+ printf("sc = %x\n", scancode);
+
+ switch (scancode) {
+/* f1 f2 f3 */
+ case 0x3d:
+ new_button++;
+ case 0x3c:
+ new_button++;
+ case 0x3b:
+ new_button++;
+ if (kd_kbd_magic_button && (new_button != kd_kbd_magic_button)) {
+ /* down w/o up */
+ mouse_button(which_button[kd_kbd_magic_button], 1);
+ }
+ /* normal */
+ if (kd_kbd_magic_button == new_button) {
+ mouse_button(which_button[new_button], 1);
+ kd_kbd_magic_button = 0;
+ } else {
+ mouse_button(which_button[new_button], 0);
+ kd_kbd_magic_button = new_button;
+ }
+ break;
+
+/* right left up down */
+ case 0x4d:
+ moved.mm_deltaX = kd_kbd_magic_scale;
+ moved.mm_deltaY = 0;
+ mouse_moved(moved);
+ break;
+ case 0x4b:
+ moved.mm_deltaX = -kd_kbd_magic_scale;
+ moved.mm_deltaY = 0;
+ mouse_moved(moved);
+ break;
+ case 0x48:
+ moved.mm_deltaX = 0;
+ moved.mm_deltaY = kd_kbd_magic_scale;
+ mouse_moved(moved);
+ break;
+ case 0x50:
+ moved.mm_deltaX = 0;
+ moved.mm_deltaY = -kd_kbd_magic_scale;
+ mouse_moved(moved);
+ break;
+/* home pageup end pagedown */
+ case 0x47:
+ moved.mm_deltaX = -2*kd_kbd_magic_scale;
+ moved.mm_deltaY = 2*kd_kbd_magic_scale;
+ mouse_moved(moved);
+ break;
+ case 0x49:
+ moved.mm_deltaX = 2*kd_kbd_magic_scale;
+ moved.mm_deltaY = 2*kd_kbd_magic_scale;
+ mouse_moved(moved);
+ break;
+ case 0x4f:
+ moved.mm_deltaX = -2*kd_kbd_magic_scale;
+ moved.mm_deltaY = -2*kd_kbd_magic_scale;
+ mouse_moved(moved);
+ break;
+ case 0x51:
+ moved.mm_deltaX = 2*kd_kbd_magic_scale;
+ moved.mm_deltaY = -2*kd_kbd_magic_scale;
+ mouse_moved(moved);
+ break;
+
+ default:
+ return 0;
+ }
+ return 1;
+}
+
+
+
+/*
+ * Code specific to EGA/CGA/VGA boards. This code relies on the fact
+ * that the "slam" functions take a word count and ONE_SPACE takes up
+ * 1 word.
+ */
+#define SLAMBPW 2 /* bytes per word for "slam" fcns */
+
+
+/*
+ * kd_xga_init:
+ *
+ * Initialization specific to character-based graphics adapters.
+ */
+void
+kd_xga_init()
+{
+ csrpos_t xga_getpos();
+ unsigned char screen;
+
+ outb(CMOS_ADDR, CMOS_EB);
+ screen = inb(CMOS_DATA) & CM_SCRMSK;
+ switch(screen) {
+ case CM_EGA_VGA:
+ /*
+ * Here we'll want to query to bios on the card
+ * itself, because then we can figure out what
+ * type we have exactly. At this point we only
+ * know that the card is NOT CGA or MONO. For
+ * now, however, we assume backwards compatibility
+ * with 0xb8000 as the starting screen offset
+ * memory location for these cards.
+ *
+ */
+
+ vid_start = (u_char *)phystokv(EGA_START);
+ kd_index_reg = EGA_IDX_REG;
+ kd_io_reg = EGA_IO_REG;
+ kd_lines = 25;
+ kd_cols = 80;
+ kd_bitmap_start = 0xa0000; /* XXX - magic numbers */
+ { /* XXX - is there a cleaner way to do this? */
+ char *addr = (char *)phystokv(kd_bitmap_start);
+ int i;
+ for (i = 0; i < 200; i++)
+ addr[i] = 0x00;
+ }
+ break;
+ case CM_CGA_40:
+ vid_start = (u_char *)phystokv(CGA_START);
+ kd_index_reg = CGA_IDX_REG;
+ kd_io_reg = CGA_IO_REG;
+ kd_lines = 25;
+ kd_cols = 40;
+ break;
+ case CM_CGA_80:
+ vid_start = (u_char *)phystokv(CGA_START);
+ kd_index_reg = CGA_IDX_REG;
+ kd_io_reg = CGA_IO_REG;
+ kd_lines = 25;
+ kd_cols = 80;
+ break;
+ case CM_MONO_80:
+ vid_start = (u_char *)phystokv(MONO_START);
+ kd_index_reg = MONO_IDX_REG;
+ kd_io_reg = MONO_IO_REG;
+ kd_lines = 25;
+ kd_cols = 80;
+ break;
+ default:
+ printf("kd: unknown screen type, defaulting to EGA\n");
+ }
+
+ kd_setpos(xga_getpos());
+}
+
+
+/*
+ * xga_getpos:
+ *
+ * This function returns the current hardware cursor position on the
+ * screen, scaled for compatibility with kd_curpos.
+ *
+ * input : None
+ * output : returns the value of cursor position on screen
+ *
+ */
+csrpos_t
+xga_getpos()
+
+{
+ unsigned char low;
+ unsigned char high;
+ short pos;
+
+ outb(kd_index_reg, C_HIGH);
+ high = inb(kd_io_reg);
+ outb(kd_index_reg, C_LOW);
+ low = inb(kd_io_reg);
+ pos = (0xff&low) + ((unsigned short)high<<8);
+
+ return(ONE_SPACE * (csrpos_t)pos);
+}
+
+
+/*
+ * charput:
+ *
+ * Put attributed character for EGA/CGA/etc.
+ */
+static void
+charput(pos, ch, chattr)
+csrpos_t pos; /* where to put it */
+char ch; /* the character */
+char chattr; /* its attribute */
+{
+ *(vid_start + pos) = ch;
+ *(vid_start + pos + 1) = chattr;
+}
+
+
+/*
+ * charsetcursor:
+ *
+ * Set hardware cursor position for EGA/CGA/etc.
+ */
+static void
+charsetcursor(newpos)
+csrpos_t newpos;
+{
+ short curpos; /* position, not scaled for attribute byte */
+
+ curpos = newpos / ONE_SPACE;
+ outb(kd_index_reg, C_HIGH);
+ outb(kd_io_reg, (u_char)(curpos>>8));
+ outb(kd_index_reg, C_LOW);
+ outb(kd_io_reg, (u_char)(curpos&0xff));
+
+ kd_curpos = newpos;
+}
+
+
+/*
+ * charmvup:
+ *
+ * Block move up for EGA/CGA/etc.
+ */
+static void
+charmvup(from, to, count)
+csrpos_t from, to;
+int count;
+{
+ kd_slmscu(vid_start+from, vid_start+to, count);
+}
+
+
+/*
+ * charmvdown:
+ *
+ * Block move down for EGA/CGA/etc.
+ */
+static void
+charmvdown(from, to, count)
+csrpos_t from, to;
+int count;
+{
+ kd_slmscd(vid_start+from, vid_start+to, count);
+}
+
+
+/*
+ * charclear:
+ *
+ * Fast clear for CGA/EGA/etc.
+ */
+static void
+charclear(to, count, chattr)
+csrpos_t to;
+int count;
+char chattr;
+{
+ kd_slmwd(vid_start+to, count, ((unsigned short)chattr<<8)+K_SPACE);
+}
+
+
+/*
+ * kd_noopreset:
+ *
+ * No-op reset routine for kd_dreset.
+ */
+static void
+kd_noopreset()
+{
+}
+
+
+
+/*
+ * Generic routines for bitmap devices (i.e., assume no hardware
+ * assist). Assumes a simple byte ordering (i.e., a byte at a lower
+ * address is to the left of the byte at the next higher address).
+ * For the 82786, this works anyway if the characters are 2 bytes
+ * wide. (more bubble gum and paper clips.)
+ *
+ * See the comments above about SLAMBPW.
+ */
+
+void bmpch2bit(), bmppaintcsr();
+u_char *bit2fbptr();
+
+
+/*
+ * bmpput: Copy a character from the font to the frame buffer.
+ */
+
+void
+bmpput(pos, ch, chattr)
+csrpos_t pos;
+char ch, chattr;
+{
+ short xbit, ybit; /* u/l corner of char pos */
+ register u_char *to, *from;
+ register short i, j;
+ u_char mask = (chattr == KA_REVERSE ? 0xff : 0);
+
+ if ((u_char)ch >= chars_in_font)
+ ch = K_QUES;
+
+ bmpch2bit(pos, &xbit, &ybit);
+ to = bit2fbptr(xbit, ybit);
+ from = font_start + ch * char_byte_width;
+ for (i = 0; i < char_height; ++i) {
+ for (j = 0; j < char_byte_width; ++j)
+ *(to+j) = *(from+j) ^ mask;
+ to += fb_byte_width;
+ from += font_byte_width;
+ }
+}
+
+/*
+ * bmpcp1char: copy 1 char from one place in the frame buffer to
+ * another.
+ */
+void
+bmpcp1char(from, to)
+csrpos_t from, to;
+{
+ short from_xbit, from_ybit;
+ short to_xbit, to_ybit;
+ register u_char *tp, *fp;
+ register short i, j;
+
+ bmpch2bit(from, &from_xbit, &from_ybit);
+ bmpch2bit(to, &to_xbit, &to_ybit);
+
+ tp = bit2fbptr(to_xbit, to_ybit);
+ fp = bit2fbptr(from_xbit, from_ybit);
+
+ for (i = 0; i < char_height; ++i) {
+ for (j = 0; j < char_byte_width; ++j)
+ *(tp+j) = *(fp+j);
+ tp += fb_byte_width;
+ fp += fb_byte_width;
+ }
+}
+
+/*
+ * bmpvmup: Copy a block of character positions upwards.
+ */
+void
+bmpmvup(from, to, count)
+csrpos_t from, to;
+int count;
+{
+ short from_xbit, from_ybit;
+ short to_xbit, to_ybit;
+ short i;
+
+ bmpch2bit(from, &from_xbit, &from_ybit);
+ bmpch2bit(to, &to_xbit, &to_ybit);
+
+ if (from_xbit == xstart && to_xbit == xstart && count%kd_cols == 0) {
+ /* fast case - entire lines */
+ from_xbit = to_xbit = 0;
+ bmppaintcsr(kd_curpos, char_black); /* don't copy cursor */
+ count /= kd_cols; /* num lines */
+ count *= fb_byte_width * (char_height+cursor_height);
+ kd_slmscu(bit2fbptr(from_xbit, from_ybit),
+ bit2fbptr(to_xbit, to_ybit),
+ count/SLAMBPW);
+ bmppaintcsr(kd_curpos, char_white);
+ } else {
+ /* slow case - everything else */
+ for (i=0; i < count; ++i) {
+ bmpcp1char(from, to);
+ from += ONE_SPACE;
+ to += ONE_SPACE;
+ }
+ }
+}
+
+/*
+ * bmpmvdown: copy a block of characters down.
+ */
+void
+bmpmvdown(from, to, count)
+csrpos_t from, to;
+int count;
+{
+ short from_xbit, from_ybit;
+ short to_xbit, to_ybit;
+ short i;
+
+ bmpch2bit(from, &from_xbit, &from_ybit);
+ bmpch2bit(to, &to_xbit, &to_ybit);
+
+ if (from_xbit == xstart + (kd_cols - 1) * char_width
+ && to_xbit == xstart + (kd_cols - 1) * char_width
+ && count%kd_cols == 0) {
+ /* fast case - entire lines*/
+ from_xbit = to_xbit = 8 * (fb_byte_width - 1);
+ /* last byte on line */
+ bmppaintcsr(kd_curpos, char_black); /* don't copy cursor */
+ count /= kd_cols; /* num lines */
+ count *= fb_byte_width * (char_height+cursor_height);
+ kd_slmscd(bit2fbptr(from_xbit, from_ybit),
+ bit2fbptr(to_xbit, to_ybit),
+ count/SLAMBPW);
+ bmppaintcsr(kd_curpos, char_white);
+ } else {
+ /* slow case - everything else */
+ for (i=0; i < count; ++i) {
+ bmpcp1char(from, to);
+ from -= ONE_SPACE;
+ to -= ONE_SPACE;
+ }
+ }
+}
+
+/*
+ * bmpclear: clear one or more character positions.
+ */
+void
+bmpclear(to, count, chattr)
+csrpos_t to; /* 1st char */
+int count; /* num chars */
+char chattr; /* reverse or normal */
+{
+ register short i;
+ u_short clearval;
+ u_short clearbyte = (chattr == KA_REVERSE ? char_white : char_black);
+
+ clearval = (u_short)(clearbyte<<8) + clearbyte;
+ if (to == 0 && count >= kd_lines * kd_cols) {
+ /* fast case - entire page */
+ kd_slmwd(vid_start, (fb_byte_width * fb_height)/SLAMBPW,
+ clearval);
+ } else
+ /* slow case */
+ for (i = 0; i < count; ++i) {
+ bmpput(to, K_SPACE, chattr);
+ to += ONE_SPACE;
+ }
+}
+
+/*
+ * bmpsetcursor: update the display and set the logical cursor.
+ */
+void
+bmpsetcursor(pos)
+csrpos_t pos;
+{
+ /* erase old cursor & paint new one */
+ bmppaintcsr(kd_curpos, char_black);
+ bmppaintcsr(pos, char_white);
+ kd_curpos = pos;
+}
+
+/*
+ * bmppaintcsr: paint cursor bits.
+ */
+void
+bmppaintcsr(pos, val)
+csrpos_t pos;
+u_char val;
+{
+ short xbit, ybit;
+ register u_char *cp;
+ register short line, byte;
+
+ bmpch2bit(pos, &xbit, &ybit);
+ ybit += char_height; /* position at bottom of line */
+ cp = bit2fbptr(xbit, ybit);
+ for (line = 0; line < cursor_height; ++line) {
+ for (byte = 0; byte < char_byte_width; ++byte)
+ *(cp+byte) = val;
+ cp += fb_byte_width;
+ }
+}
+
+/*
+ * bmpch2bit: convert character position to x and y bit addresses.
+ * (0, 0) is the upper left corner.
+ */
+void
+bmpch2bit(pos, xb, yb)
+csrpos_t pos;
+short *xb, *yb; /* x, y bit positions, u/l corner */
+{
+ register short xch, ych;
+
+ xch = (pos / ONE_SPACE) % kd_cols;
+ ych = pos / (ONE_SPACE * kd_cols);
+ *xb = xstart + xch * char_width;
+ *yb = ystart + ych * (char_height + cursor_height);
+}
+
+/*
+ * bit2fbptr: return a pointer into the frame buffer corresponding to
+ * the bit address (x, y).
+ * Assumes that xb and yb don't point to the middle of a
+ * byte.
+ */
+u_char *
+bit2fbptr(xb, yb)
+short xb, yb;
+{
+ return(vid_start + yb * fb_byte_width + xb/8);
+}
+
+
+/*
+ * console stuff
+ */
+
+/*
+ * XXX we assume that pcs *always* have a console
+ */
+int
+kdcnprobe(struct consdev *cp)
+{
+ int maj, unit, pri;
+
+ maj = 0;
+ unit = 0;
+ pri = CN_INTERNAL;
+
+ cp->cn_dev = makedev(maj, unit);
+ cp->cn_pri = pri;
+}
+
+int
+kdcninit(struct consdev *cp)
+{
+ kdinit();
+ return 0;
+}
+
+int
+kdcngetc(dev_t dev, int wait)
+{
+ if (wait) {
+ int c;
+ while ((c = kdcnmaygetc()) < 0)
+ continue;
+ return c;
+ }
+ else
+ return kdcnmaygetc();
+}
+
+int
+kdcnputc(dev_t dev, int c)
+{
+ int i;
+
+ if (!kd_initialized)
+ return;
+
+ /* Note that tab is handled in kd_putc */
+ if (c == '\n')
+ kd_putc('\r');
+ kd_putc(c);
+}
+
+/*
+ * kdcnmaygetc:
+ *
+ * Get one character using polling, rather than interrupts. Used
+ * by the kernel debugger. Note that Caps Lock is ignored.
+ * Normally this routine is called with interrupts already
+ * disabled, but there is code in place so that it will be more
+ * likely to work even if interrupts are turned on.
+ */
+int
+kdcnmaygetc(void)
+{
+ unsigned char c;
+ unsigned char scancode;
+ unsigned int char_idx;
+#ifdef notdef
+ spl_t o_pri;
+#endif
+ boolean_t up;
+
+ if (! kd_initialized)
+ return -1;
+
+ kd_extended = FALSE;
+#ifdef notdef
+ o_pri = splhi();
+#endif
+ for ( ; ; ) {
+ if (!(inb(K_STATUS) & K_OBUF_FUL))
+ return -1;
+
+ up = FALSE;
+ /*
+ * We'd come here for mouse events in debugger, if
+ * the mouse were on.
+ */
+ if ((inb(K_STATUS) & 0x20) == 0x20) {
+ printf("M%xP", inb(K_RDWR));
+ continue;
+ }
+ scancode = inb(K_RDWR);
+ /*
+ * Handle extend modifier and
+ * ack/resend, otherwise we may never receive
+ * a key.
+ */
+ if (scancode == K_EXTEND) {
+ kd_extended = TRUE;
+ continue;
+ } else if (scancode == K_RESEND) {
+ printf("cngetc: resend");
+ kd_resend();
+ continue;
+ } else if (scancode == K_ACKSC) {
+ printf("cngetc: handle_ack");
+ kd_handle_ack();
+ continue;
+ }
+ if (scancode & K_UP) {
+ up = TRUE;
+ scancode &= ~K_UP;
+ }
+ if (kd_kbd_mouse)
+ kd_kbd_magic(scancode);
+ if (scancode < NUMKEYS) {
+ /* Lookup in map, then process. */
+ char_idx = kdstate2idx(kd_state, kd_extended);
+ c = key_map[scancode][char_idx];
+ if (c == K_SCAN) {
+ c = key_map[scancode][++char_idx];
+ kd_state = do_modifier(kd_state, c, up);
+#ifdef notdef
+ cnsetleds(state2leds(kd_state));
+#endif
+ } else if (!up) {
+ /* regular key-down */
+ if (c == K_CR)
+ c = K_LF;
+#ifdef notdef
+ splx(o_pri);
+#endif
+ return(c & 0177);
+ }
+ }
+ }
+}
diff --git a/i386/i386at/kd.h b/i386/i386at/kd.h
new file mode 100644
index 00000000..f2d3f5ec
--- /dev/null
+++ b/i386/i386at/kd.h
@@ -0,0 +1,663 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* **********************************************************************
+ File: kd.h
+ Description: definitions for AT keyboard/display driver
+ Authors: Eugene Kuerner, Adrienne Jardetzky, Mike Kupfer
+
+ $ Header: $
+
+ Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989.
+ All rights reserved.
+********************************************************************** */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * This file contains defines and structures that implement hardware
+ * keyboard mapping into ansi defined output codes. Note that this
+ * is structured so that "re-mapping" of actual keys is allowed at
+ * anytime during execution of the console driver. And each scan code
+ * is potentially expanded into NUMKEYS characters. Which is programmable
+ * at runtime or whenever.
+ *
+ * 02 Nov 1988 orc!eugene
+ *
+ */
+
+#ifndef _KD_H_
+#define _KD_H_
+
+#include <sys/ioctl.h>
+#include <mach/boolean.h>
+#include <sys/types.h>
+#include <sys/time.h>
+
+
+/*
+ * Where memory for various graphics adapters starts.
+ */
+#define EGA_START 0x0b8000
+#define CGA_START 0x0b8000
+#define MONO_START 0x0b0000
+
+/*
+ * Common I/O ports.
+ */
+#define K_TMR0 0x40 /* timer 0, 1, or 2 value (r/w) */
+#define K_TMR1 0x41
+#define K_TMR2 0x42
+#define K_TMRCTL 0x43 /* timer control (write-only) */
+#define K_RDWR 0x60 /* keyboard data & cmds (read/write) */
+#define K_PORTB 0x61 /* r/w. speaker & status lines */
+#define K_STATUS 0x64 /* keybd status (read-only) */
+#define K_CMD 0x64 /* keybd ctlr command (write-only) */
+
+/*
+ * I/O ports for various graphics adapters.
+ */
+#define EGA_IDX_REG 0x3d4
+#define EGA_IO_REG 0x3d5
+#define CGA_IDX_REG 0x3d4
+#define CGA_IO_REG 0x3d5
+#define MONO_IDX_REG 0x3b4
+#define MONO_IO_REG 0x3b5
+
+/*
+ * Commands sent to graphics adapter.
+ */
+#define C_LOW 0x0f /* return low byte of cursor addr */
+#define C_HIGH 0x0e /* high byte */
+
+/*
+ * Bit definitions for K_STATUS port.
+ */
+#define K_OBUF_FUL 0x01 /* output (from keybd) buffer full */
+#define K_IBUF_FUL 0x02 /* input (to keybd) buffer full */
+#define K_SYSFLAG 0x04 /* "System Flag" */
+#define K_CMD_DATA 0x08 /* 1 = input buf has cmd, 0 = data */
+#define K_KBD_INHBT 0x10 /* 0 if keyboard inhibited */
+
+/*
+ * Keyboard controller commands (sent to K_CMD port).
+ */
+#define KC_CMD_READ 0x20 /* read controller command byte */
+#define KC_CMD_WRITE 0x60 /* write controller command byte */
+#define KC_CMD_TEST 0xab /* test interface */
+#define KC_CMD_DUMP 0xac /* diagnostic dump */
+#define KC_CMD_DISBLE 0xad /* disable keyboard */
+#define KC_CMD_ENBLE 0xae /* enable keyboard */
+#define KC_CMD_RDKBD 0xc4 /* read keyboard ID */
+#define KC_CMD_ECHO 0xee /* used for diagnostic testing */
+
+/*
+ * Keyboard commands (send to K_RDWR).
+ */
+#define K_CMD_LEDS 0xed /* set status LEDs (caps lock, etc.) */
+
+/*
+ * Bit definitions for controller command byte (sent following
+ * K_CMD_WRITE command).
+ */
+#define K_CB_ENBLIRQ 0x01 /* enable data-ready intrpt */
+#define K_CB_SETSYSF 0x04 /* Set System Flag */
+#define K_CB_INHBOVR 0x08 /* Inhibit Override */
+#define K_CB_DISBLE 0x10 /* disable keyboard */
+
+/*
+ * Bit definitions for "Indicator Status Byte" (sent after a
+ * K_CMD_LEDS command). If the bit is on, the LED is on. Undefined
+ * bit positions must be 0.
+ */
+#define K_LED_SCRLLK 0x1 /* scroll lock */
+#define K_LED_NUMLK 0x2 /* num lock */
+#define K_LED_CAPSLK 0x4 /* caps lock */
+
+/*
+ * Bit definitions for "Miscellaneous port B" (K_PORTB).
+ */
+/* read/write */
+#define K_ENABLETMR2 0x01 /* enable output from timer 2 */
+#define K_SPKRDATA 0x02 /* direct input to speaker */
+#define K_ENABLEPRTB 0x04 /* "enable" port B */
+#define K_EIOPRTB 0x08 /* enable NMI on parity error */
+/* read-only */
+#define K_REFRESHB 0x10 /* refresh flag from INLTCONT PAL */
+#define K_OUT2B 0x20 /* timer 2 output */
+#define K_ICKB 0x40 /* I/O channel check (parity error) */
+
+/*
+ * Bit definitions for timer control port (K_TMRCTL).
+ */
+/* select timer 0, 1, or 2. Don't mess with 0 or 1. */
+#define K_SELTMRMASK 0xc0
+#define K_SELTMR0 0x00
+#define K_SELTMR1 0x40
+#define K_SELTMR2 0x80
+
+/* read/load control */
+#define K_RDLDTMRMASK 0x30
+#define K_HOLDTMR 0x00 /* freeze timer until read */
+#define K_RDLDTLSB 0x10 /* read/load LSB */
+#define K_RDLDTMSB 0x20 /* read/load MSB */
+#define K_RDLDTWORD 0x30 /* read/load LSB then MSB */
+
+/* mode control */
+#define K_TMDCTLMASK 0x0e
+#define K_TCOUNTINTR 0x00 /* "Term Count Intr" */
+#define K_TONESHOT 0x02 /* "Progr One-Shot" */
+#define K_TRATEGEN 0x04 /* "Rate Gen (/n)" */
+#define K_TSQRWAVE 0x06 /* "Sqr Wave Gen" */
+#define K_TSOFTSTRB 0x08 /* "Softw Trig Strob" */
+#define K_THARDSTRB 0x0a /* "Hardw Trig Strob" */
+
+/* count mode */
+#define K_TCNTMDMASK 0x01
+#define K_TBINARY 0x00 /* 16-bit binary counter */
+#define K_TBCD 0x01 /* 4-decade BCD counter */
+
+
+
+/*
+ * Fun definitions for displayed characters and characters read from
+ * the keyboard.
+ */
+
+/*
+ * Attributes for character sent to display.
+ */
+#define KA_NORMAL 0x07
+#define KA_REVERSE 0x70
+
+/*
+ * For an EGA-like display, each character takes two bytes, one for the
+ * actual character, followed by one for its attributes.
+ * Be very careful if you change ONE_SPACE, as these constants are also used
+ * to define the device-independent display implemented by kd.c.
+ * (See kdsoft.h for more details on the device-independent display.)
+ */
+#define ONE_SPACE 2 /* bytes in 1 char, EGA-like display */
+#define BOTTOM_LINE 3840 /* 1st byte in last line of display */
+#define ONE_PAGE 4000 /* number of bytes in page */
+#define ONE_LINE 160 /* number of bytes in line */
+
+#define BEG_OF_LINE(pos) ((pos) - (pos)%ONE_LINE)
+#define CURRENT_COLUMN(pos) (((pos) % ONE_LINE) / ONE_SPACE)
+
+#define NUMKEYS 89
+#define NUMSTATES 5 /* NORMAL_STATE, ... */
+#define NUMOUTPUT 3 /* max size of byte seq from key */
+#define WIDTH_KMAP (NUMSTATES * NUMOUTPUT)
+
+/*
+ * Keyboard states. Used for KDGKBENT, KDSKBENT ioctl's. If you
+ * change these values, you should also rearrange the entries in
+ * key_map.
+ */
+/* "state indices" (for computing key_map index) */
+#define NORM_STATE 0
+#define SHIFT_STATE 1
+#define CTRL_STATE 2
+#define ALT_STATE 3
+#define SHIFT_ALT 4
+/* macro to convert from state index to actual key_map index */
+#define CHARIDX(sidx) ((sidx) * NUMOUTPUT)
+ /* where sidx is in [NORM_STATE ... SHIFT_ALT] */
+
+/* "state bits" for kd_state vector */
+#define KS_NORMAL 0x00
+#define KS_SLKED 0x01
+#define KS_NLKED 0x02
+#define KS_CLKED 0x04
+#define KS_ALTED 0x08
+#define KS_SHIFTED 0x10
+#define KS_CTLED 0x20
+
+
+/*
+ * Scancode values, not to be confused with Ascii values.
+ */
+typedef u_char Scancode;
+
+/* special codes */
+#define K_UP 0x80 /* OR'd in if key below is released */
+#define K_EXTEND 0xe0 /* marker for "extended" sequence */
+#define K_ACKSC 0xfa /* ack for keyboard command */
+#define K_RESEND 0xfe /* request to resend keybd cmd */
+
+/* modifier keys */
+#define K_CTLSC 0x1d /* control down */
+#define K_LSHSC 0x2a /* left shift down */
+#define K_RSHSC 0x36 /* right shift down */
+#define K_ALTSC 0x38 /* alt key down */
+#define K_CLCKSC 0x3a /* caps lock */
+#define K_NLCKSC 0x45 /* num lock down */
+
+/* "special keys" */
+#define K_BSSC 0x0e /* backspace */
+#define K_TABSC 0x0f /* tab */
+#define K_RETSC 0x1c /* return */
+#define K_SPSC 0x39 /* space */
+#define K_ESCSC 0x01 /* ESC */
+
+/* alphabetic keys */
+#define K_qSC 0x10
+#define K_wSC 0x11
+#define K_eSC 0x12
+#define K_rSC 0x13
+#define K_tSC 0x14
+#define K_ySC 0x15
+#define K_uSC 0x16
+#define K_iSC 0x17
+#define K_oSC 0x18
+#define K_pSC 0x19
+
+#define K_aSC 0x1e
+#define K_sSC 0x1f
+#define K_dSC 0x20
+#define K_fSC 0x21
+#define K_gSC 0x22
+#define K_hSC 0x23
+#define K_jSC 0x24
+#define K_kSC 0x25
+#define K_lSC 0x26
+
+#define K_zSC 0x2c
+#define K_xSC 0x2d
+#define K_cSC 0x2e
+#define K_vSC 0x2f
+#define K_bSC 0x30
+#define K_nSC 0x31
+#define K_mSC 0x32
+
+/* numbers and punctuation */
+#define K_ONESC 0x02 /* 1 */
+#define K_TWOSC 0x03 /* 2 */
+#define K_THREESC 0x04 /* 3 */
+#define K_FOURSC 0x05 /* 4 */
+#define K_FIVESC 0x06 /* 5 */
+#define K_SIXSC 0x07 /* 6 */
+#define K_SEVENSC 0x08 /* 7 */
+#define K_EIGHTSC 0x09 /* 8 */
+#define K_NINESC 0x0a /* 9 */
+#define K_ZEROSC 0x0b /* 0 */
+
+#define K_MINUSSC 0x0c /* - */
+#define K_EQLSC 0x0d /* = */
+#define K_LBRKTSC 0x1a /* [ */
+#define K_RBRKTSC 0x1b /* ] */
+#define K_SEMISC 0x27 /* ; */
+#define K_SQUOTESC 0x28 /* ' */
+#define K_GRAVSC 0x29 /* ` */
+#define K_BSLSHSC 0x2b /* \ */
+#define K_COMMASC 0x33 /* , */
+#define K_PERIODSC 0x34 /* . */
+#define K_SLASHSC 0x35 /* / */
+
+/* keypad keys */
+#define K_HOMESC 0x47 /* scancode for home */
+#define K_DELSC 0x53 /* scancode for del */
+
+/*
+ * Ascii values and flag characters for key map.
+ * A function key is represented by the 3-byte char sequence that it
+ * corresponds to.
+ * Other mappable non-Ascii keys (e.g., "ctrl") are represented by a
+ * two-byte sequence: K_SCAN, followed by the key's scan code.
+ */
+#define K_DONE 0xff /* must be same as NC */
+#define NC 0xff /* No character defined */
+
+#define K_SCAN 0xfe /* followed by scan code */
+
+/* ascii char set */
+#define K_NUL 0x00 /* Null character */
+#define K_SOH 0x01
+#define K_STX 0x02
+#define K_ETX 0x03
+#define K_EOT 0x04
+#define K_ENQ 0x05
+#define K_ACK 0x06
+#define K_BEL 0x07 /* bell character */
+#define K_BS 0x08 /* back space */
+#define K_HT 0x09
+#define K_LF 0x0a /* line feed */
+#define K_VT 0x0b
+#define K_FF 0x0c
+#define K_CR 0x0d /* carriage return */
+#define K_SO 0x0e
+#define K_SI 0x0f
+#define K_DLE 0x10
+#define K_DC1 0x11
+#define K_DC2 0x12
+#define K_DC3 0x13
+#define K_DC4 0x14
+#define K_NAK 0x15
+#define K_SYN 0x16
+#define K_ETB 0x17
+#define K_CAN 0x18
+#define K_EM 0x19
+#define K_SUB 0x1a
+#define K_ESC 0x1b /* escape character */
+#define K_FS 0x1c
+#define K_GS 0x1d
+#define K_RS 0x1e
+#define K_US 0x1f
+#define K_SPACE 0x20 /* space character */
+#define K_BANG 0x21 /* ! */
+#define K_DQUOTE 0x22 /* " */
+#define K_POUND 0x23 /* # */
+#define K_DOLLAR 0x24 /* $ */
+#define K_PERC 0x25 /* % */
+#define K_AMPER 0x26 /* & */
+#define K_SQUOTE 0x27 /* ' */
+#define K_LPAREN 0x28 /* ( */
+#define K_RPAREN 0x29 /* ) */
+#define K_ASTER 0x2a /* * */
+#define K_PLUS 0x2b /* + */
+#define K_COMMA 0x2c /* , */
+#define K_MINUS 0x2d /* - */
+#define K_PERIOD 0x2e /* . */
+#define K_SLASH 0x2f /* / */
+#define K_ZERO 0x30 /* 0 */
+#define K_ONE 0x31 /* 1 */
+#define K_TWO 0x32 /* 2 */
+#define K_THREE 0x33 /* 3 */
+#define K_FOUR 0x34 /* 4 */
+#define K_FIVE 0x35 /* 5 */
+#define K_SIX 0x36 /* 6 */
+#define K_SEVEN 0x37 /* 7 */
+#define K_EIGHT 0x38 /* 8 */
+#define K_NINE 0x39 /* 9 */
+#define K_COLON 0x3a /* : */
+#define K_SEMI 0x3b /* ; */
+#define K_LTHN 0x3c /* < */
+#define K_EQL 0x3d /* = */
+#define K_GTHN 0x3e /* > */
+#define K_QUES 0x3f /* ? */
+#define K_ATSN 0x40 /* @ */
+#define K_A 0x41 /* A */
+#define K_B 0x42 /* B */
+#define K_C 0x43 /* C */
+#define K_D 0x44 /* D */
+#define K_E 0x45 /* E */
+#define K_F 0x46 /* F */
+#define K_G 0x47 /* G */
+#define K_H 0x48 /* H */
+#define K_I 0x49 /* I */
+#define K_J 0x4a /* J */
+#define K_K 0x4b /* K */
+#define K_L 0x4c /* L */
+#define K_M 0x4d /* M */
+#define K_N 0x4e /* N */
+#define K_O 0x4f /* O */
+#define K_P 0x50 /* P */
+#define K_Q 0x51 /* Q */
+#define K_R 0x52 /* R */
+#define K_S 0x53 /* S */
+#define K_T 0x54 /* T */
+#define K_U 0x55 /* U */
+#define K_V 0x56 /* V */
+#define K_W 0x57 /* W */
+#define K_X 0x58 /* X */
+#define K_Y 0x59 /* Y */
+#define K_Z 0x5a /* Z */
+#define K_LBRKT 0x5b /* [ */
+#define K_BSLSH 0x5c /* \ */
+#define K_RBRKT 0x5d /* ] */
+#define K_CARET 0x5e /* ^ */
+#define K_UNDSC 0x5f /* _ */
+#define K_GRAV 0x60 /* ` */
+#define K_a 0x61 /* a */
+#define K_b 0x62 /* b */
+#define K_c 0x63 /* c */
+#define K_d 0x64 /* d */
+#define K_e 0x65 /* e */
+#define K_f 0x66 /* f */
+#define K_g 0x67 /* g */
+#define K_h 0x68 /* h */
+#define K_i 0x69 /* i */
+#define K_j 0x6a /* j */
+#define K_k 0x6b /* k */
+#define K_l 0x6c /* l */
+#define K_m 0x6d /* m */
+#define K_n 0x6e /* n */
+#define K_o 0x6f /* o */
+#define K_p 0x70 /* p */
+#define K_q 0x71 /* q */
+#define K_r 0x72 /* r */
+#define K_s 0x73 /* s */
+#define K_t 0x74 /* t */
+#define K_u 0x75 /* u */
+#define K_v 0x76 /* v */
+#define K_w 0x77 /* w */
+#define K_x 0x78 /* x */
+#define K_y 0x79 /* y */
+#define K_z 0x7a /* z */
+#define K_LBRACE 0x7b /* { */
+#define K_PIPE 0x7c /* | */
+#define K_RBRACE 0x7d /* } */
+#define K_TILDE 0x7e /* ~ */
+#define K_DEL 0x7f /* delete */
+
+/* Ascii sequences to be generated by the named key */
+#define K_F1 0x1b,0x4f,0x50
+#define K_F1S 0x1b,0x4f,0x70
+#define K_F2 0x1b,0x4f,0x51
+#define K_F2S 0x1b,0x4f,0x71
+#define K_F3 0x1b,0x4f,0x52
+#define K_F3S 0x1b,0x4f,0x72
+#define K_F4 0x1b,0x4f,0x53
+#define K_F4S 0x1b,0x4f,0x73
+#define K_F5 0x1b,0x4f,0x54
+#define K_F5S 0x1b,0x4f,0x74
+#define K_F6 0x1b,0x4f,0x55
+#define K_F6S 0x1b,0x4f,0x75
+#define K_F7 0x1b,0x4f,0x56
+#define K_F7S 0x1b,0x4f,0x76
+#define K_F8 0x1b,0x4f,0x57
+#define K_F8S 0x1b,0x4f,0x77
+#define K_F9 0x1b,0x4f,0x58
+#define K_F9S 0x1b,0x4f,0x78
+#define K_F10 0x1b,0x4f,0x59
+#define K_F10S 0x1b,0x4f,0x79
+#define K_F11 0x1b,0x4f,0x5a
+#define K_F11S 0x1b,0x4f,0x7a
+#define K_F12 0x1b,0x4f,0x41
+#define K_F12S 0x1b,0x4f,0x61
+
+#define K_SCRL 0x1b,0x5b,0x4d
+#define K_HOME 0x1b,0x5b,0x48
+#define K_UA 0x1b,0x5b,0x41
+#define K_PUP 0x1b,0x5b,0x56
+#define K_LA 0x1b,0x5b,0x44
+#define K_RA 0x1b,0x5b,0x43
+#define K_END 0x1b,0x5b,0x59
+#define K_DA 0x1b,0x5b,0x42
+#define K_PDN 0x1b,0x5b,0x55
+#define K_INS 0x1b,0x5b,0x40
+
+
+/*
+ * This array maps scancodes to Ascii characters (or character
+ * sequences).
+ * The first index is the scancode. The first NUMOUTPUT characters
+ * (accessed using the second index) correspond to the key's char
+ * sequence for the Normal state. The next NUMOUTPUT characters
+ * are for the Shift state, then Ctrl, then Alt, then Shift/Alt.
+ */
+#ifdef KERNEL
+extern u_char key_map[NUMKEYS][WIDTH_KMAP];
+#endif /* KERNEL */
+
+
+
+/*
+ * These routines are declared here so that all the modules making
+ * up the kd driver agree on how to do locking.
+ */
+
+#ifdef KERNEL
+#include <i386/machspl.h>
+extern void splx();
+extern spl_t spltty();
+#define SPLKD spltty
+#endif /* KERNEL */
+
+
+/*
+ * Ioctl's on /dev/console.
+ */
+
+/*
+ * KDGKBENT, KDSKBENT - Get and set keyboard table entry. Useful for
+ * remapping keys.
+ *
+ * KDGSTATE - Get the keyboard state variable, which flags the
+ * modifier keys (shift, ctrl, etc.) that are down. See
+ * KS_NORMAL et al above. Used for debugging.
+ *
+ * KDSETBELL - Turns the bell on or off.
+ */
+
+#define KDGKBENT _IOWR('k', 1, struct kbentry) /* get keybd entry */
+
+#define KDSKBENT _IOW('k', 2, struct kbentry) /* set keybd entry */
+
+#define KDGSTATE _IOR('k', 3, int) /* get keybd state */
+
+#define KDSETBELL _IOW('k', 4, int) /* turn bell on or off */
+# define KD_BELLON 1
+# define KD_BELLOFF 0
+
+/*
+ * This struct is used for getting and setting key definitions. The
+ * values for kb_index are obtainable from the man page for
+ * keyboard(7) (though they should really be defined here!).
+ */
+struct kbentry {
+ u_char kb_state; /* which state to use */
+ u_char kb_index; /* which keycode */
+ u_char kb_value[NUMOUTPUT]; /* value to get/set */
+};
+
+
+/*
+ * Ioctl's on /dev/kbd.
+ */
+
+/*
+ * KDSKBDMODE - When the console is in "ascii" mode, keyboard events are
+ * converted to Ascii characters that are readable from /dev/console.
+ * When the console is in "event" mode, keyboard events are
+ * timestamped and queued up on /dev/kbd as kd_events. When the last
+ * close is done on /dev/kbd, the console automatically reverts to ascii
+ * mode.
+ * When /dev/mouse is opened, mouse events are timestamped and queued
+ * on /dev/mouse, again as kd_events.
+ *
+ * KDGKBDTYPE - Returns the type of keyboard installed. Currently
+ * there is only one type, KB_VANILLAKB, which is your standard PC-AT
+ * keyboard.
+ */
+
+#ifdef KERNEL
+extern int kb_mode;
+#endif
+
+#define KDSKBDMODE _IOW('K', 1, int) /* set keyboard mode */
+#define KB_EVENT 1
+#define KB_ASCII 2
+
+#define KDGKBDTYPE _IOR('K', 2, int) /* get keyboard type */
+#define KB_VANILLAKB 0
+
+struct X_kdb {
+ u_int *ptr;
+ u_int size;
+};
+
+#define K_X_KDB_ENTER _IOW('K', 16, struct X_kdb)
+#define K_X_KDB_EXIT _IOW('K', 17, struct X_kdb)
+
+#define K_X_IN 0x01000000
+#define K_X_OUT 0x02000000
+#define K_X_BYTE 0x00010000
+#define K_X_WORD 0x00020000
+#define K_X_LONG 0x00040000
+#define K_X_TYPE 0x03070000
+#define K_X_PORT 0x0000ffff
+
+typedef u_short kev_type; /* kd event type */
+
+/* (used for event records) */
+struct mouse_motion {
+ short mm_deltaX; /* units? */
+ short mm_deltaY;
+};
+
+typedef struct {
+ kev_type type; /* see below */
+ struct timeval time; /* timestamp */
+ union { /* value associated with event */
+ boolean_t up; /* MOUSE_LEFT .. MOUSE_RIGHT */
+ Scancode sc; /* KEYBD_EVENT */
+ struct mouse_motion mmotion; /* MOUSE_MOTION */
+ } value;
+} kd_event;
+#define m_deltaX mmotion.mm_deltaX
+#define m_deltaY mmotion.mm_deltaY
+
+/*
+ * kd_event ID's.
+ */
+#define MOUSE_LEFT 1 /* mouse left button up/down */
+#define MOUSE_MIDDLE 2
+#define MOUSE_RIGHT 3
+#define MOUSE_MOTION 4 /* mouse motion */
+#define KEYBD_EVENT 5 /* key up/down */
+
+#endif /* _KD_H_ */
diff --git a/i386/i386at/kd_event.c b/i386/i386at/kd_event.c
new file mode 100644
index 00000000..9ee43749
--- /dev/null
+++ b/i386/i386at/kd_event.c
@@ -0,0 +1,560 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* **********************************************************************
+ File: kd_event.c
+ Description: Driver for event interface to keyboard.
+
+ $ Header: $
+
+ Copyright Ing. C. Olivetti & C. S.p.A. 1989. All rights reserved.
+********************************************************************** */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#include <mach/boolean.h>
+#include <sys/types.h>
+#ifdef MACH_KERNEL
+#include <device/errno.h>
+#include <device/io_req.h>
+#else MACH_KERNEL
+#include <sys/file.h>
+#include <sys/errno.h>
+#include <kern/thread.h>
+#include <sys/user.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/ioctl.h>
+#include <sys/tty.h>
+#endif MACH_KERNEL
+#include <i386/machspl.h>
+#include <i386at/kd.h>
+#include <i386at/kd_queue.h>
+
+/*
+ * Code for /dev/kbd. The interrupt processing is done in kd.c,
+ * which calls into this module to enqueue scancode events when
+ * the keyboard is in Event mode.
+ */
+
+/*
+ * Note: These globals are protected by raising the interrupt level
+ * via SPLKD.
+ */
+
+kd_event_queue kbd_queue; /* queue of keyboard events */
+#ifdef MACH_KERNEL
+queue_head_t kbd_read_queue = { &kbd_read_queue, &kbd_read_queue };
+#else MACH_KERNEL
+struct proc *kbd_sel = 0; /* selecting process, if any */
+short kbdpgrp = 0; /* process group leader when dev is open */
+
+int kbdflag = 0;
+#define KBD_COLL 1 /* select collision */
+#define KBD_ASYNC 2 /* user wants asynch notification */
+#define KBD_NBIO 4 /* user wants non-blocking I/O */
+#endif MACH_KERNEL
+
+
+void kbd_enqueue();
+#ifdef MACH_KERNEL
+io_return_t X_kdb_enter_init();
+io_return_t X_kdb_exit_init();
+#endif MACH_KERNEL
+
+static boolean_t initialized = FALSE;
+
+
+/*
+ * kbdinit - set up event queue.
+ */
+
+kbdinit()
+{
+ spl_t s = SPLKD();
+
+ if (!initialized) {
+ kdq_reset(&kbd_queue);
+ initialized = TRUE;
+ }
+ splx(s);
+}
+
+
+/*
+ * kbdopen - Verify that open is read-only and remember process
+ * group leader.
+ */
+
+/*ARGSUSED*/
+kbdopen(dev, flags)
+ dev_t dev;
+ int flags;
+{
+ kbdinit();
+
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+ if (flags & FWRITE)
+ return(ENODEV);
+
+ if (kbdpgrp == 0)
+ kbdpgrp = u.u_procp->p_pgrp;
+#endif MACH_KERNEL
+ return(0);
+}
+
+
+/*
+ * kbdclose - Make sure that the kd driver is in Ascii mode and
+ * reset various flags.
+ */
+
+/*ARGSUSED*/
+kbdclose(dev, flags)
+ dev_t dev;
+ int flags;
+{
+ spl_t s = SPLKD();
+
+ kb_mode = KB_ASCII;
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+ kbdpgrp = 0;
+ kbdflag = 0;
+ kbd_sel = 0;
+#endif MACH_KERNEL
+ kdq_reset(&kbd_queue);
+ splx(s);
+}
+
+
+#ifdef MACH_KERNEL
+io_return_t kbdgetstat(dev, flavor, data, count)
+ dev_t dev;
+ int flavor;
+ int * data; /* pointer to OUT array */
+ unsigned int *count; /* OUT */
+{
+ io_return_t result;
+
+ switch (flavor) {
+ case KDGKBDTYPE:
+ *data = KB_VANILLAKB;
+ *count = 1;
+ break;
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+
+io_return_t kbdsetstat(dev, flavor, data, count)
+ dev_t dev;
+ int flavor;
+ int * data;
+ unsigned int count;
+{
+ io_return_t result;
+
+ switch (flavor) {
+ case KDSKBDMODE:
+ kb_mode = *data;
+ /* XXX - what to do about unread events? */
+ /* XXX - should check that 'data' contains an OK valud */
+ break;
+ case K_X_KDB_ENTER:
+ return X_kdb_enter_init(data, count);
+ case K_X_KDB_EXIT:
+ return X_kdb_exit_init(data, count);
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (D_SUCCESS);
+}
+
+#else MACH_KERNEL
+/*
+ * kbdioctl - handling for asynch & non-blocking I/O.
+ */
+
+/*ARGSUSED*/
+kbdioctl(dev, cmd, data, flag)
+ dev_t dev;
+ int cmd;
+ caddr_t data;
+ int flag;
+{
+ spl_t s = SPLKD();
+ int err = 0;
+
+ switch (cmd) {
+ case KDSKBDMODE:
+ kb_mode = *(int *)data;
+ /* XXX - what to do about unread events? */
+ /* XXX - should check that "data" contains an OK value */
+ break;
+ case KDGKBDTYPE:
+ *(int *)data = KB_VANILLAKB;
+ break;
+ case K_X_KDB_ENTER:
+ X_kdb_enter_init((struct X_kdb *) data);
+ break;
+ case K_X_KDB_EXIT:
+ X_kdb_exit_init( (struct X_kdb *) data);
+ break;
+ case FIONBIO:
+ if (*(int *)data)
+ kbdflag |= KBD_NBIO;
+ else
+ kbdflag &= ~KBD_NBIO;
+ break;
+ case FIOASYNC:
+ if (*(int *)data)
+ kbdflag |= KBD_ASYNC;
+ else
+ kbdflag &= ~KBD_ASYNC;
+ break;
+ default:
+ err = ENOTTY;
+ break;
+ }
+
+ splx(s);
+ return(err);
+}
+
+
+/*
+ * kbdselect
+ */
+
+/*ARGSUSED*/
+kbdselect(dev, rw)
+{
+ spl_t s = SPLKD();
+
+ if (!kdq_empty(&kbd_queue)) {
+ splx(s);
+ return(1);
+ }
+
+ if (kbd_sel)
+ kbdflag |= KBD_COLL;
+ else
+ kbd_sel = (struct proc *)current_thread();
+ /* eeeyuck */
+
+ splx(s);
+ return(0);
+}
+#endif MACH_KERNEL
+
+
+/*
+ * kbdread - dequeue and return any queued events.
+ */
+
+#ifdef MACH_KERNEL
+boolean_t kbd_read_done(); /* forward */
+
+kbdread(dev, ior)
+ dev_t dev;
+ register io_req_t ior;
+{
+ register int err, count;
+ register spl_t s;
+
+ err = device_read_alloc(ior, (vm_size_t)ior->io_count);
+ if (err != KERN_SUCCESS)
+ return (err);
+
+ s = SPLKD();
+ if (kdq_empty(&kbd_queue)) {
+ if (ior->io_mode & D_NOWAIT) {
+ splx(s);
+ return (D_WOULD_BLOCK);
+ }
+ ior->io_done = kbd_read_done;
+ enqueue_tail(&kbd_read_queue, (queue_entry_t) ior);
+ splx(s);
+ return (D_IO_QUEUED);
+ }
+ count = 0;
+ while (!kdq_empty(&kbd_queue) && count < ior->io_count) {
+ register kd_event *ev;
+
+ ev = kdq_get(&kbd_queue);
+ *(kd_event *)(&ior->io_data[count]) = *ev;
+ count += sizeof(kd_event);
+ }
+ splx(s);
+ ior->io_residual = ior->io_count - count;
+ return (D_SUCCESS);
+}
+
+boolean_t kbd_read_done(ior)
+ register io_req_t ior;
+{
+ register int count;
+ register spl_t s;
+
+ s = SPLKD();
+ if (kdq_empty(&kbd_queue)) {
+ ior->io_done = kbd_read_done;
+ enqueue_tail(&kbd_read_queue, (queue_entry_t)ior);
+ splx(s);
+ return (FALSE);
+ }
+
+ count = 0;
+ while (!kdq_empty(&kbd_queue) && count < ior->io_count) {
+ register kd_event *ev;
+
+ ev = kdq_get(&kbd_queue);
+ *(kd_event *)(&ior->io_data[count]) = *ev;
+ count += sizeof(kd_event);
+ }
+ splx(s);
+
+ ior->io_residual = ior->io_count - count;
+ ds_read_done(ior);
+
+ return (TRUE);
+}
+
+#else MACH_KERNEL
+/*ARGSUSED*/
+kbdread(dev, uio)
+ dev_t dev;
+ struct uio *uio;
+{
+ int s = SPLKD();
+ int err = 0;
+ kd_event *ev;
+ int i;
+ char *cp;
+
+ if (kdq_empty(&kbd_queue))
+ if (kbdflag & KBD_NBIO) {
+ err = EWOULDBLOCK;
+ goto done;
+ } else
+ while (kdq_empty(&kbd_queue)) {
+ splx(s);
+ sleep((caddr_t)&kbd_queue, TTIPRI);
+ s = SPLKD();
+ }
+
+ while (!kdq_empty(&kbd_queue) && uio->uio_resid >= sizeof(kd_event)) {
+ ev = kdq_get(&kbd_queue);
+ for (cp = (char *)ev, i = 0; i < sizeof(kd_event);
+ ++i, ++cp) {
+ err = ureadc(*cp, uio);
+ if (err)
+ goto done;
+ }
+ }
+
+done:
+ splx(s);
+ return(err);
+}
+#endif MACH_KERNEL
+
+
+/*
+ * kd_enqsc - enqueue a scancode. Should be called at SPLKD.
+ */
+
+void
+kd_enqsc(sc)
+ Scancode sc;
+{
+ kd_event ev;
+
+ ev.type = KEYBD_EVENT;
+ ev.time = time;
+ ev.value.sc = sc;
+ kbd_enqueue(&ev);
+}
+
+
+/*
+ * kbd_enqueue - enqueue an event and wake up selecting processes, if
+ * any. Should be called at SPLKD.
+ */
+
+void
+kbd_enqueue(ev)
+ kd_event *ev;
+{
+ if (kdq_full(&kbd_queue))
+ printf("kbd: queue full\n");
+ else
+ kdq_put(&kbd_queue, ev);
+
+#ifdef MACH_KERNEL
+ {
+ register io_req_t ior;
+ while ((ior = (io_req_t)dequeue_head(&kbd_read_queue)) != 0)
+ iodone(ior);
+ }
+#else MACH_KERNEL
+ if (kbd_sel) {
+ selwakeup(kbd_sel, kbdflag & KBD_COLL);
+ kbd_sel = 0;
+ kbdflag &= ~KBD_COLL;
+ }
+ if (kbdflag & KBD_ASYNC)
+ gsignal(kbdpgrp, SIGIO);
+ wakeup((caddr_t)&kbd_queue);
+#endif MACH_KERNEL
+}
+
+u_int X_kdb_enter_str[512], X_kdb_exit_str[512];
+int X_kdb_enter_len = 0, X_kdb_exit_len = 0;
+
+kdb_in_out(p)
+u_int *p;
+{
+register int t = p[0];
+
+ switch (t & K_X_TYPE) {
+ case K_X_IN|K_X_BYTE:
+ inb(t & K_X_PORT);
+ break;
+
+ case K_X_IN|K_X_WORD:
+ inw(t & K_X_PORT);
+ break;
+
+ case K_X_IN|K_X_LONG:
+ inl(t & K_X_PORT);
+ break;
+
+ case K_X_OUT|K_X_BYTE:
+ outb(t & K_X_PORT, p[1]);
+ break;
+
+ case K_X_OUT|K_X_WORD:
+ outw(t & K_X_PORT, p[1]);
+ break;
+
+ case K_X_OUT|K_X_LONG:
+ outl(t & K_X_PORT, p[1]);
+ break;
+ }
+}
+
+X_kdb_enter()
+{
+register u_int *u_ip, *endp;
+
+ for (u_ip = X_kdb_enter_str, endp = &X_kdb_enter_str[X_kdb_enter_len];
+ u_ip < endp;
+ u_ip += 2)
+ kdb_in_out(u_ip);
+}
+
+X_kdb_exit()
+{
+register u_int *u_ip, *endp;
+
+ for (u_ip = X_kdb_exit_str, endp = &X_kdb_exit_str[X_kdb_exit_len];
+ u_ip < endp;
+ u_ip += 2)
+ kdb_in_out(u_ip);
+}
+
+#ifdef MACH_KERNEL
+io_return_t
+X_kdb_enter_init(data, count)
+ u_int *data;
+ u_int count;
+{
+ if (count * sizeof X_kdb_enter_str[0] > sizeof X_kdb_enter_str)
+ return D_INVALID_OPERATION;
+
+ bcopy(data, X_kdb_enter_str, count * sizeof X_kdb_enter_str[0]);
+ X_kdb_enter_len = count;
+ return D_SUCCESS;
+}
+
+io_return_t
+X_kdb_exit_init(data, count)
+ u_int *data;
+ u_int count;
+{
+ if (count * sizeof X_kdb_exit_str[0] > sizeof X_kdb_exit_str)
+ return D_INVALID_OPERATION;
+
+ bcopy(data, X_kdb_exit_str, count * sizeof X_kdb_exit_str[0]);
+ X_kdb_exit_len = count;
+ return D_SUCCESS;
+}
+#else MACH_KERNEL
+X_kdb_enter_init(kp)
+struct X_kdb *kp;
+{
+ if (kp->size > sizeof X_kdb_enter_str)
+ u.u_error = ENOENT;
+ else if(copyin(kp->ptr, X_kdb_enter_str, kp->size) == EFAULT)
+ u.u_error = EFAULT;
+
+ X_kdb_enter_len = kp->size>>2;
+}
+
+X_kdb_exit_init(kp)
+struct X_kdb *kp;
+{
+ if (kp->size > sizeof X_kdb_exit_str)
+ u.u_error = ENOENT;
+ else if(copyin(kp->ptr, X_kdb_exit_str, kp->size) == EFAULT)
+ u.u_error = EFAULT;
+
+ X_kdb_exit_len = kp->size>>2;
+}
+#endif MACH_KERNEL
diff --git a/i386/i386at/kd_mouse.c b/i386/i386at/kd_mouse.c
new file mode 100644
index 00000000..8f4e09aa
--- /dev/null
+++ b/i386/i386at/kd_mouse.c
@@ -0,0 +1,899 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* **********************************************************************
+ File: kd_mouse.c
+ Description: mouse driver as part of keyboard/display driver
+
+ $ Header: $
+
+ Copyright Ing. C. Olivetti & C. S.p.A. 1989.
+ All rights reserved.
+********************************************************************** */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * Hacked up support for serial mouse connected to COM1, using Mouse
+ * Systems 5-byte protocol at 1200 baud. This should work for
+ * Mouse Systems, SummaMouse, and Logitek C7 mice.
+ *
+ * The interface provided by /dev/mouse is a series of events as
+ * described in i386at/kd.h.
+ */
+
+#include <mach/boolean.h>
+#include <sys/types.h>
+#ifdef MACH_KERNEL
+#include <device/errno.h>
+#include <device/io_req.h>
+#else MACH_KERNEL
+#include <sys/file.h>
+#include <sys/errno.h>
+#include <kern/thread.h>
+#include <sys/user.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/ioctl.h>
+#include <sys/tty.h>
+#endif MACH_KERNEL
+#include <i386/ipl.h>
+#include <chips/busses.h>
+#include <i386at/kd.h>
+#include <i386at/kd_queue.h>
+#include <i386at/i8250.h>
+
+static int (*oldvect)(); /* old interrupt vector */
+static int oldunit;
+static spl_t oldspl;
+extern struct bus_device *cominfo[];
+
+kd_event_queue mouse_queue; /* queue of mouse events */
+boolean_t mouse_in_use = FALSE;
+#ifdef MACH_KERNEL
+queue_head_t mouse_read_queue = { &mouse_read_queue, &mouse_read_queue };
+#else MACH_KERNEL
+struct proc *mouse_sel = 0; /* selecting process, if any */
+short mousepgrp = 0; /* process group leader when dev is open */
+#endif MACH_KERNEL
+
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+int mouseflag = 0;
+#define MOUSE_COLL 1 /* select collision */
+#define MOUSE_ASYNC 2 /* user wants asynch notification */
+#define MOUSE_NBIO 4 /* user wants non-blocking I/O */
+#endif MACH_KERNEL
+
+/*
+ * The state of the 3 buttons is encoded in the low-order 3 bits (both
+ * here and in other variables in the driver).
+ */
+u_char lastbuttons; /* previous state of mouse buttons */
+#define MOUSE_UP 1
+#define MOUSE_DOWN 0
+#define MOUSE_ALL_UP 0x7
+
+int mouseintr();
+void mouse_enqueue();
+int mouse_baud = BCNT1200;
+
+boolean_t mouse_char_cmd = FALSE; /* mouse response is to cmd */
+boolean_t mouse_char_wanted = FALSE; /* want mouse response */
+boolean_t mouse_char_in = FALSE; /* have mouse response */
+unsigned char mouse_char; /* mouse response */
+
+
+/*
+ * init_mouse_hw - initialize the serial port.
+ */
+init_mouse_hw(unit, mode)
+{
+ caddr_t base_addr = (caddr_t)cominfo[unit]->address;
+
+ outb(base_addr + RIE, 0);
+ outb(base_addr + RLC, LCDLAB);
+ outb(base_addr + RDLSB, mouse_baud & 0xff);
+ outb(base_addr + RDMSB, (mouse_baud >> 8) & 0xff);
+ outb(base_addr + RLC, mode);
+ outb(base_addr + RMC, MCDTR | MCRTS | MCOUT2);
+ outb(base_addr + RIE, IERD | IELS);
+}
+
+
+/*
+ * mouseopen - Verify that the request is read-only, initialize,
+ * and remember process group leader.
+ */
+/*
+ * Low 3 bits of minor are the com port #.
+ * The high 5 bits of minor are the mouse type
+ */
+#define MOUSE_SYSTEM_MOUSE 0
+#define MICROSOFT_MOUSE 1
+#define IBM_MOUSE 2
+#define NO_MOUSE 3
+#define LOGITECH_TRACKMAN 4
+#define MICROSOFT_MOUSE7 5
+static int mouse_type;
+static int mousebufsize;
+static int mousebufindex = 0;
+int track_man[10];
+
+/*ARGSUSED*/
+mouseopen(dev, flags)
+ dev_t dev;
+ int flags;
+{
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+ if (flags & FWRITE)
+ return(ENODEV);
+#endif MACH_KERNEL
+ if (mouse_in_use)
+ return(EBUSY);
+ mouse_in_use = TRUE; /* locking? */
+ kdq_reset(&mouse_queue);
+ lastbuttons = MOUSE_ALL_UP;
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+ mousepgrp = u.u_procp->p_pgrp;
+#endif MACH_KERNEL
+
+ switch (mouse_type = ((minor(dev) & 0xf8) >> 3)) {
+ case MICROSOFT_MOUSE7:
+ mousebufsize = 3;
+ serial_mouse_open(dev);
+ init_mouse_hw(dev&7, LC7);
+ case MICROSOFT_MOUSE:
+ mousebufsize = 3;
+ serial_mouse_open(dev);
+ init_mouse_hw(dev&7, LC8);
+ break;
+ case MOUSE_SYSTEM_MOUSE:
+ mousebufsize = 5;
+ serial_mouse_open(dev);
+ init_mouse_hw(dev&7, LC8);
+ break;
+ case LOGITECH_TRACKMAN:
+ mousebufsize = 3;
+ serial_mouse_open(dev);
+ init_mouse_hw(dev&7, LC7);
+ track_man[0] = comgetc(dev&7);
+ track_man[1] = comgetc(dev&7);
+ if (track_man[0] != 0x4d &&
+ track_man[1] != 0x33) {
+ printf("LOGITECH_TRACKMAN: NOT M3");
+ }
+ break;
+ case IBM_MOUSE:
+ mousebufsize = 3;
+ kd_mouse_open(dev, 12);
+ ibm_ps2_mouse_open(dev);
+ break;
+ case NO_MOUSE:
+ break;
+ }
+ mousebufindex = 0;
+ return(0);
+}
+
+serial_mouse_open(dev)
+{
+ int unit = minor(dev) & 0x7;
+ int mouse_pic = cominfo[unit]->sysdep1;
+
+ spl_t s = splhi(); /* disable interrupts */
+
+ oldvect = ivect[mouse_pic];
+ ivect[mouse_pic] = mouseintr;
+
+ oldunit = iunit[mouse_pic];
+ iunit[mouse_pic] = unit;
+
+ /* XXX other arrays to init? */
+ splx(s); /* XXX - should come after init? */
+}
+
+int mouse_packets = 0;
+kd_mouse_open(dev, mouse_pic)
+{
+ spl_t s = splhi(); /* disable interrupts */
+ extern int kdintr();
+
+ oldvect = ivect[mouse_pic];
+ ivect[mouse_pic] = kdintr;
+ oldspl = intpri[mouse_pic];
+ intpri[mouse_pic] = SPL6;
+ form_pic_mask();
+ splx(s);
+}
+
+/*
+ * mouseclose - Disable interrupts on the serial port, reset driver flags,
+ * and restore the serial port interrupt vector.
+ */
+mouseclose(dev, flags)
+{
+ switch (mouse_type) {
+ case MICROSOFT_MOUSE:
+ case MICROSOFT_MOUSE7:
+ case MOUSE_SYSTEM_MOUSE:
+ case LOGITECH_TRACKMAN:
+ serial_mouse_close(dev);
+ break;
+ case IBM_MOUSE:
+ ibm_ps2_mouse_close(dev);
+ kd_mouse_close(dev, 12);
+ {int i = 20000; for (;i--;); }
+ kd_mouse_drain();
+ break;
+ case NO_MOUSE:
+ break;
+ }
+
+ kdq_reset(&mouse_queue); /* paranoia */
+ mouse_in_use = FALSE;
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+ mousepgrp = 0;
+ mouseflag = 0;
+ mouse_sel = 0;
+#endif MACH_KERNEL
+}
+
+/*ARGSUSED*/
+serial_mouse_close(dev, flags)
+ dev_t dev;
+ int flags;
+{
+ spl_t o_pri = splhi(); /* mutex with open() */
+ int unit = minor(dev) & 0x7;
+ int mouse_pic = cominfo[unit]->sysdep1;
+ caddr_t base_addr = (caddr_t)cominfo[unit]->address;
+
+ assert(ivect[mouse_pic] == mouseintr);
+ outb(base_addr + RIE, 0); /* disable serial port */
+ outb(base_addr + RMC, 0); /* no rts */
+ ivect[mouse_pic] = oldvect;
+ iunit[mouse_pic] = oldunit;
+
+ (void)splx(o_pri);
+}
+
+kd_mouse_close(dev, mouse_pic)
+{
+ spl_t s = splhi();
+
+ ivect[mouse_pic] = oldvect;
+ intpri[mouse_pic] = oldspl;
+ form_pic_mask();
+ splx(s);
+}
+
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+/*
+ * mouseioctl - handling for asynch & non-blocking I/O.
+ */
+
+/*ARGSUSED*/
+mouseioctl(dev, cmd, data, flag)
+ dev_t dev;
+ int cmd;
+ caddr_t data;
+ int flag;
+{
+ int s = SPLKD();
+ int err = 0;
+
+ switch (cmd) {
+ case FIONBIO:
+ if (*(int *)data)
+ mouseflag |= MOUSE_NBIO;
+ else
+ mouseflag &= ~MOUSE_NBIO;
+ break;
+ case FIOASYNC:
+ if (*(int *)data)
+ mouseflag |= MOUSE_ASYNC;
+ else
+ mouseflag &= ~MOUSE_ASYNC;
+ break;
+ default:
+ err = ENOTTY;
+ break;
+ }
+
+ splx(s);
+ return(err);
+}
+
+
+/*
+ * mouseselect - check for pending events, etc.
+ */
+
+/*ARGSUSED*/
+mouseselect(dev, rw)
+{
+ int s = SPLKD();
+
+ if (!kdq_empty(&mouse_queue)) {
+ splx(s);
+ return(1);
+ }
+
+ if (mouse_sel)
+ mouseflag |= MOUSE_COLL;
+ else
+ mouse_sel = (struct proc *)current_thread();
+ /* eeeyuck */
+
+ splx(s);
+ return(0);
+}
+#endif MACH_KERNEL
+
+/*
+ * mouseread - dequeue and return any queued events.
+ */
+#ifdef MACH_KERNEL
+boolean_t mouse_read_done(); /* forward */
+
+mouseread(dev, ior)
+ dev_t dev;
+ register io_req_t ior;
+{
+ register int err, count;
+ register spl_t s;
+
+ err = device_read_alloc(ior, (vm_size_t)ior->io_count);
+ if (err != KERN_SUCCESS)
+ return (err);
+
+ s = SPLKD();
+ if (kdq_empty(&mouse_queue)) {
+ if (ior->io_mode & D_NOWAIT) {
+ splx(s);
+ return (D_WOULD_BLOCK);
+ }
+ ior->io_done = mouse_read_done;
+ enqueue_tail(&mouse_read_queue, (queue_entry_t)ior);
+ splx(s);
+ return (D_IO_QUEUED);
+ }
+ count = 0;
+ while (!kdq_empty(&mouse_queue) && count < ior->io_count) {
+ register kd_event *ev;
+
+ ev = kdq_get(&mouse_queue);
+ *(kd_event *)(&ior->io_data[count]) = *ev;
+ count += sizeof(kd_event);
+ }
+ splx(s);
+ ior->io_residual = ior->io_count - count;
+ return (D_SUCCESS);
+}
+
+boolean_t mouse_read_done(ior)
+ register io_req_t ior;
+{
+ register int count;
+ register spl_t s;
+
+ s = SPLKD();
+ if (kdq_empty(&mouse_queue)) {
+ ior->io_done = mouse_read_done;
+ enqueue_tail(&mouse_read_queue, (queue_entry_t)ior);
+ splx(s);
+ return (FALSE);
+ }
+
+ count = 0;
+ while (!kdq_empty(&mouse_queue) && count < ior->io_count) {
+ register kd_event *ev;
+
+ ev = kdq_get(&mouse_queue);
+ *(kd_event *)(&ior->io_data[count]) = *ev;
+ count += sizeof(kd_event);
+ }
+ splx(s);
+
+ ior->io_residual = ior->io_count - count;
+ ds_read_done(ior);
+
+ return (TRUE);
+}
+
+#else MACH_KERNEL
+/*ARGSUSED*/
+mouseread(dev, uio)
+ dev_t dev;
+ struct uio *uio;
+{
+ int s = SPLKD();
+ int err = 0;
+ kd_event *ev;
+ int i;
+ char *cp;
+
+ if (kdq_empty(&mouse_queue))
+ if (mouseflag & MOUSE_NBIO) {
+ err = EWOULDBLOCK;
+ goto done;
+ } else
+ while (kdq_empty(&mouse_queue)) {
+ splx(s);
+ sleep((caddr_t)&mouse_queue, TTIPRI);
+ s = SPLKD();
+ }
+
+ while (!kdq_empty(&mouse_queue) && uio->uio_resid >= sizeof(kd_event)) {
+ ev = kdq_get(&mouse_queue);
+ for (cp = (char *)ev, i = 0; i < sizeof(kd_event);
+ ++i, ++cp) {
+ err = ureadc(*cp, uio);
+ if (err)
+ goto done;
+ }
+ }
+
+done:
+ splx(s);
+ return(err);
+}
+#endif MACH_KERNEL
+
+
+/*
+ * mouseintr - Get a byte and pass it up for handling. Called at SPLKD.
+ */
+mouseintr(unit)
+{
+ caddr_t base_addr = (caddr_t)cominfo[unit]->address;
+ unsigned char id, ls;
+
+ /* get reason for interrupt and line status */
+ id = inb(base_addr + RID);
+ ls = inb(base_addr + RLS);
+
+ /* handle status changes */
+ if (id == IDLS) {
+ if (ls & LSDR) {
+ inb(base_addr + RDAT); /* flush bad character */
+ }
+ return; /* ignore status change */
+ }
+
+ if (id & IDRD) {
+ mouse_handle_byte((u_char)(inb(base_addr + RDAT) & 0xff));
+ }
+}
+
+
+/*
+ * handle_byte - Accumulate bytes until we have an entire packet.
+ * If the mouse has moved or any of the buttons have changed state (up
+ * or down), enqueue the corresponding events.
+ * Called at SPLKD.
+ * XXX - magic numbers.
+ */
+int show_mouse_byte = 0;
+/*
+ X down; middle down; middle up; X up 50 0 0; 50 0 0 22; 50 0 0 02; 40 0 0
+ X down; middle down; X up; middle up 50 0 0; 50 0 0 22; 40 0 0 22; 40 0 0 2
+ *
+ * The trick here is that all the while the middle button is down you get 4 byte
+ * packets with the last byte 0x22. When the middle button goes up you get a
+ * last packet with 0x02.
+ */
+int lastgitech = 0x40; /* figure whether the first 3 bytes imply */
+ /* its time to expect a fourth */
+int fourthgitech = 0; /* look for the 4th byte; we must process it */
+int middlegitech = 0; /* what should the middle button be */
+
+#define MOUSEBUFSIZE 5 /* num bytes def'd by protocol */
+static u_char mousebuf[MOUSEBUFSIZE]; /* 5-byte packet from mouse */
+
+mouse_handle_byte(ch)
+ u_char ch;
+{
+ if (show_mouse_byte) {
+ printf("%x(%c) ", ch, ch);
+ }
+
+ if (mouse_char_cmd) {
+ /*
+ * Mouse character is response to command
+ */
+ mouse_char = ch;
+ mouse_char_in = TRUE;
+ if (mouse_char_wanted) {
+ mouse_char_wanted = FALSE;
+ wakeup(&mouse_char);
+ }
+ return;
+ }
+
+ if (mousebufindex == 0) {
+ switch (mouse_type) {
+ case MICROSOFT_MOUSE7:
+ if ((ch & 0x40) != 0x40)
+ return;
+ break;
+ case MICROSOFT_MOUSE:
+ if ((ch & 0xc0) != 0xc0)
+ return;
+ break;
+ case MOUSE_SYSTEM_MOUSE:
+ if ((ch & 0xf8) != 0x80)
+ return;
+ break;
+ case LOGITECH_TRACKMAN:
+ if (fourthgitech == 1) {
+ fourthgitech = 0;
+ if (ch & 0xf0)
+ middlegitech = 0x4;
+ else
+ middlegitech = 0x0;
+ mouse_packet_microsoft_mouse(mousebuf);
+ return;
+ } else if ((ch & 0xc0) != 0x40)
+ return;
+ break;
+ case IBM_MOUSE:
+ break;
+ }
+ }
+
+ mousebuf[mousebufindex++] = ch;
+ if (mousebufindex < mousebufsize)
+ return;
+
+ /* got a packet */
+ mousebufindex = 0;
+
+ switch (mouse_type) {
+ case MICROSOFT_MOUSE7:
+ case MICROSOFT_MOUSE:
+ mouse_packet_microsoft_mouse(mousebuf);
+ break;
+ case MOUSE_SYSTEM_MOUSE:
+ mouse_packet_mouse_system_mouse(mousebuf);
+ break;
+ case LOGITECH_TRACKMAN:
+ if ( mousebuf[1] || mousebuf[2] ||
+ mousebuf[0] != lastgitech) {
+ mouse_packet_microsoft_mouse(mousebuf);
+ lastgitech = mousebuf[0] & 0xf0;
+ } else {
+ fourthgitech = 1;
+ }
+ break;
+ case IBM_MOUSE:
+ mouse_packet_ibm_ps2_mouse(mousebuf);
+ break;
+ }
+}
+
+mouse_packet_mouse_system_mouse(mousebuf)
+u_char mousebuf[MOUSEBUFSIZE];
+{
+ u_char buttons, buttonchanges;
+ struct mouse_motion moved;
+
+ buttons = mousebuf[0] & 0x7; /* get current state of buttons */
+ buttonchanges = buttons ^ lastbuttons;
+ moved.mm_deltaX = (char)mousebuf[1] + (char)mousebuf[3];
+ moved.mm_deltaY = (char)mousebuf[2] + (char)mousebuf[4];
+
+ if (moved.mm_deltaX != 0 || moved.mm_deltaY != 0)
+ mouse_moved(moved);
+
+ if (buttonchanges != 0) {
+ lastbuttons = buttons;
+ if (buttonchanges & 1)
+ mouse_button(MOUSE_RIGHT, buttons & 1);
+ if (buttonchanges & 2)
+ mouse_button(MOUSE_MIDDLE, (buttons & 2) >> 1);
+ if (buttonchanges & 4)
+ mouse_button(MOUSE_LEFT, (buttons & 4) >> 2);
+ }
+}
+
+/* same as above for microsoft mouse */
+/*
+ * 3 byte microsoft format used
+ *
+ * 7 6 5 4 3 2 1 0
+ * 1 1 L R Y7 Y6 X7 X6
+ * 1 0 X5 X4 X3 X3 X1 X0
+ * 1 0 Y5 Y4 Y3 Y2 Y1 Y0
+ *
+ */
+mouse_packet_microsoft_mouse(mousebuf)
+u_char mousebuf[MOUSEBUFSIZE];
+{
+ u_char buttons, buttonchanges;
+ struct mouse_motion moved;
+
+ buttons = ((mousebuf[0] & 0x30) >> 4);
+ buttons |= middlegitech;
+ /* get current state of buttons */
+#ifdef gross_hack
+ if (buttons == 0x03) /* both buttons down */
+ buttons = 0x04;
+#endif /* gross_hack */
+ buttons = (~buttons) & 0x07; /* convert to not pressed */
+
+ buttonchanges = buttons ^ lastbuttons;
+ moved.mm_deltaX = ((mousebuf[0] & 0x03) << 6) | (mousebuf[1] & 0x3F);
+ moved.mm_deltaY = ((mousebuf[0] & 0x0c) << 4) | (mousebuf[2] & 0x3F);
+ if (moved.mm_deltaX & 0x80) /* negative, in fact */
+ moved.mm_deltaX = moved.mm_deltaX - 0x100;
+ if (moved.mm_deltaY & 0x80) /* negative, in fact */
+ moved.mm_deltaY = moved.mm_deltaY - 0x100;
+ /* and finally the Y orientation is different for the microsoft mouse */
+ moved.mm_deltaY = -moved.mm_deltaY;
+
+ if (moved.mm_deltaX != 0 || moved.mm_deltaY != 0)
+ mouse_moved(moved);
+
+ if (buttonchanges != 0) {
+ lastbuttons = buttons;
+ if (buttonchanges & 1)
+ mouse_button(MOUSE_RIGHT, (buttons & 1) ?
+ MOUSE_UP : MOUSE_DOWN);
+ if (buttonchanges & 2)
+ mouse_button(MOUSE_LEFT, (buttons & 2) ?
+ MOUSE_UP : MOUSE_DOWN);
+ if (buttonchanges & 4)
+ mouse_button(MOUSE_MIDDLE, (buttons & 4) ?
+ MOUSE_UP : MOUSE_DOWN);
+ }
+}
+
+/*
+ * AUX device (PS2) open/close
+ */
+
+/*
+ * Write character to mouse. Called at spltty.
+ */
+void kd_mouse_write(
+ unsigned char ch)
+{
+ while (inb(K_STATUS) & K_IBUF_FUL)
+ continue; /* wait for 'input' port empty */
+ outb(K_CMD, 0xd4); /* send next character to mouse */
+
+ while (inb(K_STATUS) & K_IBUF_FUL)
+ continue; /* wait for 'input' port empty */
+ outb(K_RDWR, ch); /* send command to mouse */
+}
+
+/*
+ * Read next character from mouse, waiting for interrupt
+ * to deliver it. Called at spltty.
+ */
+int kd_mouse_read(void)
+{
+ int ch;
+
+ while (!mouse_char_in) {
+ mouse_char_wanted = TRUE;
+#ifdef MACH_KERNEL
+ assert_wait((event_t) &mouse_char, FALSE);
+ thread_block((void (*)()) 0);
+#else MACH_KERNEL
+ sleep(&mouse_char, PZERO);
+#endif MACH_KERNEL
+ }
+
+ ch = mouse_char;
+ mouse_char_in = FALSE;
+
+ return ch;
+}
+
+ibm_ps2_mouse_open(dev)
+{
+ spl_t s = spltty();
+
+ lastbuttons = 0;
+ mouse_char_cmd = TRUE; /* responses are to commands */
+
+ kd_sendcmd(0xa8); /* enable mouse in kbd */
+
+ kd_cmdreg_write(0x47); /* allow mouse interrupts */
+ /* magic number for ibm? */
+
+ kd_mouse_write(0xff); /* reset mouse */
+ if (kd_mouse_read() != 0xfa) {
+ splx(s);
+ return; /* need ACK */
+ }
+
+ (void) kd_mouse_read(); /* discard 2-character mouse ID */
+ (void) kd_mouse_read();
+
+ kd_mouse_write(0xea); /* set stream mode */
+ if (kd_mouse_read() != 0xfa) {
+ splx(s);
+ return; /* need ACK */
+ }
+
+ kd_mouse_write(0xf4); /* enable */
+ if (kd_mouse_read() != 0xfa) {
+ splx(s);
+ return; /* need ACK */
+ }
+
+ mouse_char_cmd = FALSE; /* now we get mouse packets */
+
+ splx(s);
+}
+
+ibm_ps2_mouse_close(dev)
+{
+ spl_t s = spltty();
+
+ mouse_char_cmd = TRUE; /* responses are to commands */
+
+ kd_mouse_write(0xff); /* reset mouse */
+ if (kd_mouse_read() == 0xfa) {
+ /* got ACK: discard 2-char mouse ID */
+ (void) kd_mouse_read();
+ (void) kd_mouse_read();
+ }
+
+ kd_sendcmd(0xa7); /* disable mouse in kbd */
+ kd_cmdreg_write(0x65); /* disallow mouse interrupts */
+ /* magic number for ibm? */
+
+ splx(s);
+}
+
+/*
+ * 3 byte ibm ps2 format used
+ *
+ * 7 6 5 4 3 2 1 0
+ * YO XO YS XS 1 M R L
+ * X7 X6 X5 X4 X3 X3 X1 X0
+ * Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
+ *
+ */
+mouse_packet_ibm_ps2_mouse(mousebuf)
+u_char mousebuf[MOUSEBUFSIZE];
+{
+ u_char buttons, buttonchanges;
+ struct mouse_motion moved;
+
+ buttons = mousebuf[0] & 0x7; /* get current state of buttons */
+ buttonchanges = buttons ^ lastbuttons;
+ moved.mm_deltaX = ((mousebuf[0]&0x10) ? 0xffffff00 : 0 ) | (u_char)mousebuf[1];
+ moved.mm_deltaY = ((mousebuf[0]&0x20) ? 0xffffff00 : 0 ) | (u_char)mousebuf[2];
+ if (mouse_packets) {
+ printf("(%x:%x:%x)", mousebuf[0], mousebuf[1], mousebuf[2]);
+ return;
+ }
+
+ if (moved.mm_deltaX != 0 || moved.mm_deltaY != 0)
+ mouse_moved(moved);
+
+ if (buttonchanges != 0) {
+ lastbuttons = buttons;
+ if (buttonchanges & 1)
+ mouse_button(MOUSE_LEFT, !(buttons & 1));
+ if (buttonchanges & 2)
+ mouse_button(MOUSE_RIGHT, !((buttons & 2) >> 1));
+ if (buttonchanges & 4)
+ mouse_button(MOUSE_MIDDLE, !((buttons & 4) >> 2));
+ }
+}
+
+/*
+ * Enqueue a mouse-motion event. Called at SPLKD.
+ */
+mouse_moved(where)
+ struct mouse_motion where;
+{
+ kd_event ev;
+
+ ev.type = MOUSE_MOTION;
+ ev.time = time;
+ ev.value.mmotion = where;
+ mouse_enqueue(&ev);
+}
+
+
+/*
+ * Enqueue an event for mouse button press or release. Called at SPLKD.
+ */
+mouse_button(which, direction)
+ kev_type which;
+ u_char direction;
+{
+ kd_event ev;
+
+ ev.type = which;
+ ev.time = time;
+ ev.value.up = (direction == MOUSE_UP) ? TRUE : FALSE;
+ mouse_enqueue(&ev);
+}
+
+
+/*
+ * mouse_enqueue - enqueue an event and wake up selecting processes, if
+ * any. Called at SPLKD.
+ */
+
+void
+mouse_enqueue(ev)
+ kd_event *ev;
+{
+ if (kdq_full(&mouse_queue))
+ printf("mouse: queue full\n");
+ else
+ kdq_put(&mouse_queue, ev);
+
+#ifdef MACH_KERNEL
+ {
+ register io_req_t ior;
+ while ((ior = (io_req_t)dequeue_head(&mouse_read_queue)) != 0)
+ iodone(ior);
+ }
+#else MACH_KERNEL
+ if (mouse_sel) {
+ selwakeup(mouse_sel, mouseflag & MOUSE_COLL);
+ mouse_sel = 0;
+ mouseflag &= ~MOUSE_COLL;
+ }
+ if (mouseflag & MOUSE_ASYNC)
+ gsignal(mousepgrp, SIGIO);
+ wakeup((caddr_t)&mouse_queue);
+#endif MACH_KERNEL
+}
diff --git a/i386/i386at/kd_queue.c b/i386/i386at/kd_queue.c
new file mode 100644
index 00000000..2b83044a
--- /dev/null
+++ b/i386/i386at/kd_queue.c
@@ -0,0 +1,115 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* **********************************************************************
+ File: kd_queue.c
+ Description: Event queue code for keyboard/display (and mouse) driver.
+
+ $ Header: $
+
+ Copyright Ing. C. Olivetti & C. S.p.A. 1989.
+ All rights reserved.
+********************************************************************** */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+
+#include <i386at/kd_queue.h>
+
+/*
+ * Notice that when adding an entry to the queue, the caller provides
+ * its own storage, which is copied into the queue. However, when
+ * removing an entry from the queue, the caller is given a pointer to a
+ * queue element. This means that the caller must either process the
+ * element or copy it into its own storage before unlocking the queue.
+ *
+ * These routines should be called only at a protected SPL.
+ */
+
+#define q_next(index) (((index)+1) % KDQSIZE)
+
+boolean_t
+kdq_empty(q)
+ kd_event_queue *q;
+{
+ return(q->firstfree == q->firstout);
+}
+
+boolean_t
+kdq_full(q)
+ kd_event_queue *q;
+{
+ return(q_next(q->firstfree) == q->firstout);
+}
+
+void
+kdq_put(q, ev)
+ kd_event_queue *q;
+ kd_event *ev;
+{
+ kd_event *qp = q->events + q->firstfree;
+
+ qp->type = ev->type;
+ qp->time = ev->time;
+ qp->value = ev->value;
+ q->firstfree = q_next(q->firstfree);
+}
+
+kd_event *
+kdq_get(q)
+ kd_event_queue *q;
+{
+ kd_event *result = q->events + q->firstout;
+
+ q->firstout = q_next(q->firstout);
+ return(result);
+}
+
+void
+kdq_reset(q)
+ kd_event_queue *q;
+{
+ q->firstout = q->firstfree = 0;
+}
diff --git a/i386/i386at/kd_queue.h b/i386/i386at/kd_queue.h
new file mode 100644
index 00000000..1190e600
--- /dev/null
+++ b/i386/i386at/kd_queue.h
@@ -0,0 +1,79 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* **********************************************************************
+ File: kd_queue.h
+ Description: definitions for keybd/display Event queue
+
+ $ Header: $
+
+ Copyright Ing. C. Olivetti & C. S.p.A. 1989.
+ All rights reserved.
+********************************************************************** */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * Definitions for keyboard/mouse events.
+ *
+ * The keyboard and mouse can be read as a stream of events. The event
+ * definition is the same in both cases, but only keyboard events will
+ * be generated by /dev/kbd, and only mouse events will be generated by
+ * /dev/mouse.
+ */
+
+#include <mach/std_types.h>
+#include <i386at/kd.h>
+
+#define KDQSIZE 100 /* is this a good size? */
+
+typedef struct {
+ kd_event events[KDQSIZE];
+ int firstfree, firstout;
+} kd_event_queue;
+
+extern void kdq_put(), kdq_reset();
+extern boolean_t kdq_empty(), kdq_full();
+extern kd_event *kdq_get();
diff --git a/i386/i386at/kdasm.S b/i386/i386at/kdasm.S
new file mode 100644
index 00000000..46b1ee6b
--- /dev/null
+++ b/i386/i386at/kdasm.S
@@ -0,0 +1,145 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Some inline code to speed up major block copies to and from the
+ * screen buffer.
+ *
+ * Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989.
+ * All rights reserved.
+ *
+ * orc!eugene 28 Oct 1988
+ *
+ */
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/* $ Header: $ */
+
+
+#include <mach/machine/asm.h>
+
+/*
+ * Function: kd_slmwd()
+ *
+ * This function "slams" a word (char/attr) into the screen memory using
+ * a block fill operation on the 386.
+ *
+ */
+
+#define start 0x08(%ebp)
+#define count 0x0c(%ebp)
+#define value 0x10(%ebp)
+
+ENTRY(kd_slmwd)
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %edi
+
+ movl start, %edi
+ movl count, %ecx
+ movw value, %ax
+ cld
+ rep
+ stosw
+
+ popl %edi
+ leave
+ ret
+#undef start
+#undef count
+#undef value
+
+/*
+ * "slam up"
+ */
+
+#define from 0x08(%ebp)
+#define to 0x0c(%ebp)
+#define count 0x10(%ebp)
+ENTRY(kd_slmscu)
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %esi
+ pushl %edi
+
+ movl from, %esi
+ movl to, %edi
+ movl count, %ecx
+ cmpl %edi, %esi
+ cld
+ rep
+ movsw
+
+ popl %edi
+ popl %esi
+ leave
+ ret
+
+/*
+ * "slam down"
+ */
+ENTRY(kd_slmscd)
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %esi
+ pushl %edi
+
+ movl from, %esi
+ movl to, %edi
+ movl count, %ecx
+ cmpl %edi, %esi
+ std
+ rep
+ movsw
+ cld
+
+ popl %edi
+ popl %esi
+ leave
+ ret
+#undef from
+#undef to
+#undef count
diff --git a/i386/i386at/kdsoft.h b/i386/i386at/kdsoft.h
new file mode 100644
index 00000000..2be21d04
--- /dev/null
+++ b/i386/i386at/kdsoft.h
@@ -0,0 +1,201 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* **********************************************************************
+ File: kdsoft.h
+ Description: Software structures for keyboard/display driver, shared with
+ drivers for specific graphics cards.
+
+ $ Header: $
+
+ Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989.
+ All rights reserved.
+********************************************************************** */
+
+/*
+ Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc.,
+Cupertino, California.
+
+ All Rights Reserved
+
+ Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Olivetti
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+ OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * This driver handles two types of graphics cards. The first type
+ * (e.g., EGA, CGA), treats the screen as a page of characters and
+ * has a hardware cursor. The second type (e.g., the Blit) treats the
+ * screen as a bitmap. A hardware cursor may be present, but it is
+ * ignored in favor of a software cursor.
+ *
+ *
+ * Most of the driver uses the following abstraction for the display:
+ *
+ * The cursor position is simply an index into a (logical) linear char
+ * array that wraps around at the end of each line. Each character
+ * takes up ONE_SPACE bytes. Values in [0..ONE_PAGE) are positions in
+ * the displayed page. Values < 0 and >= ONE_PAGE are off the page
+ * and require some scrolling to put the cursor back on the page.
+ *
+ * The kd_dxxx routines handle the conversion from this abstraction to
+ * what the hardware requires.
+ *
+ * (*kd_dput)(pos, ch, chattr)
+ * csrpos_t pos;
+ * char ch, chattr;
+ * Displays a character at "pos", where "ch" = the character to
+ * be displayed and "chattr" is its attribute byte.
+ *
+ * (*kd_dmvup)(from, to, count)
+ * csrpos_t from, to;
+ * int count;
+ * Does a (relatively) fast block transfer of characters upward.
+ * "count" is the number of character positions (not bytes) to move.
+ * "from" is the character position to start moving from (at the start
+ * of the block to be moved). "to" is the character position to start
+ * moving to.
+ *
+ * (*kd_dmvdown)(from, to, count)
+ * csrpos_t from, to;
+ * int count;
+ * "count" is the number of character positions (not bytes) to move.
+ * "from" is the character position to start moving from (at the end
+ * of the block to be moved). "to" is the character position to
+ * start moving to.
+ *
+ * (*kd_dclear)(to, count, chattr)
+ * csrpos_t, to;
+ * int count;
+ * char chattr;
+ * Erases "count" character positions, starting with "to".
+ *
+ * (*kd_dsetcursor)(pos)
+ * Sets kd_curpos and moves the displayed cursor to track it. "pos"
+ * should be in the range [0..ONE_PAGE).
+ *
+ * (*kd_dreset)()
+ * In some cases, the boot program expects the display to be in a
+ * particular state, and doing a soft reset (i.e.,
+ * software-controlled reboot) doesn't put it into that state. For
+ * these cases, the machine-specific driver should provide a "reset"
+ * procedure, which will be called just before the kd code causes the
+ * system to reboot.
+ */
+
+extern void bmpput(), bmpmvup(), bmpmvdown(), bmpclear(), bmpsetcursor();
+
+extern void (*kd_dput)(); /* put attributed char */
+extern void (*kd_dmvup)(); /* block move up */
+extern void (*kd_dmvdown)(); /* block move down */
+extern void (*kd_dclear)(); /* block clear */
+extern void (*kd_dsetcursor)();
+ /* set cursor position on displayed page */
+extern void (*kd_dreset)(); /* prepare for reboot */
+
+
+/*
+ * Globals used for both character-based controllers and bitmap-based
+ * controllers.
+ */
+typedef short csrpos_t; /* cursor position, ONE_SPACE bytes per char */
+
+extern u_char *vid_start; /* VM start of video RAM or frame buffer */
+extern csrpos_t kd_curpos; /* should be set only by kd_setpos */
+extern short kd_lines; /* num lines in tty display */
+extern short kd_cols;
+extern char kd_attr; /* current character attribute */
+
+
+/*
+ * Globals used only for bitmap-based controllers.
+ * XXX - probably needs reworking for color.
+ */
+
+/*
+ * The following font layout is assumed:
+ *
+ * The top scan line of all the characters comes first. Then the
+ * second scan line, then the third, etc.
+ *
+ * ------ ... ---------|-----N--------|-------------- ... -----------
+ * ------ ... ---------|-----N--------|-------------- ... -----------
+ * .
+ * .
+ * .
+ * ------ ... ---------|-----N--------|-------------- ... -----------
+ *
+ * In the picture, each line is a scan line from the font. Each scan
+ * line is stored in memory immediately after the previous one. The
+ * bits between the vertical lines are the bits for a single character
+ * (e.g., the letter "N").
+ * There are "char_height" scan lines. Each character is "char_width"
+ * bits wide. We make the simplifying assumption that characters are
+ * on byte boundaries. (We also assume that a byte is 8 bits.)
+ */
+
+extern u_char *font_start; /* starting addr of font */
+
+extern short fb_width; /* bits in frame buffer scan line */
+extern short fb_height; /* scan lines in frame buffer*/
+extern short char_width; /* bit width of 1 char */
+extern short char_height; /* bit height of 1 char */
+extern short chars_in_font;
+extern short cursor_height; /* bit height of cursor */
+ /* char_height + cursor_height = line_height */
+
+extern u_char char_black; /* 8 black (off) bits */
+extern u_char char_white; /* 8 white (on) bits */
+
+
+/*
+ * The tty emulation does not usually require the entire frame buffer.
+ * (xstart, ystart) is the bit address for the upper left corner of the
+ * tty "screen".
+ */
+
+extern short xstart, ystart;
+
+
+/*
+ * Accelerators for bitmap displays.
+ */
+
+extern short char_byte_width; /* char_width/8 */
+extern short fb_byte_width; /* fb_width/8 */
+extern short font_byte_width; /* num bytes in 1 scan line of font */
diff --git a/i386/i386at/lpr.c b/i386/i386at/lpr.c
new file mode 100644
index 00000000..09afbfc6
--- /dev/null
+++ b/i386/i386at/lpr.c
@@ -0,0 +1,419 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Parallel port printer driver v1.0
+ * All rights reserved.
+ */
+
+#include <lpr.h>
+#if NLPR > 0
+#include <par.h>
+#include <de6c.h>
+
+#ifdef MACH_KERNEL
+#include <mach/std_types.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <device/conf.h>
+#include <device/errno.h>
+#include <device/tty.h>
+#include <device/io_req.h>
+#else MACH_KERNEL
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/dir.h>
+#include <sys/user.h>
+#include <sys/proc.h>
+#include <sys/ioctl.h>
+#include <sys/tty.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+#include <sys/file.h>
+#endif MACH_KERNEL
+
+#include <i386/ipl.h>
+#include <i386/pio.h>
+#include <chips/busses.h>
+#include <i386at/lprreg.h>
+
+#if NPAR > 0
+extern int parattach();
+#endif
+
+#if NDE6C > 0
+extern int de6cattach();
+#endif
+
+extern void splx();
+extern spl_t spltty();
+extern void timeout();
+extern void ttrstrt();
+
+/*
+ * Driver information for auto-configuration stuff.
+ */
+
+int lprprobe(), lprintr(), lprstart(), lprstop();
+void lprattach(struct bus_device *);
+#ifdef MACH_KERNEL
+int lprstop(), lprgetstat(), lprsetstat();
+#endif MACH_KERNEL
+
+struct bus_device *lprinfo[NLPR]; /* ??? */
+
+static vm_offset_t lpr_std[NLPR] = { 0 };
+static struct bus_device *lpr_info[NLPR];
+struct bus_driver lprdriver = {
+ lprprobe, 0, lprattach, 0, lpr_std, "lpr", lpr_info, 0, 0, 0};
+
+struct tty lpr_tty[NLPR];
+
+int lpr_alive[NLPR];
+
+lprprobe(port, dev)
+struct bus_device *dev;
+{
+ u_short addr = (u_short) dev->address;
+ int unit = dev->unit;
+ int ret;
+
+ if ((unit < 0) || (unit > NLPR)) {
+ printf("com %d out of range\n", unit);
+ return(0);
+ }
+
+ outb(INTR_ENAB(addr),0x07);
+ outb(DATA(addr),0xaa);
+ ret = inb(DATA(addr)) == 0xaa;
+ if (ret) {
+ if (lpr_alive[unit]) {
+ printf("lpr: Multiple alive entries for unit %d.\n", unit);
+ printf("lpr: Ignoring entry with address = %x .\n", addr);
+ ret = 0;
+ } else
+ lpr_alive[unit]++;
+ }
+ return(ret);
+}
+
+void lprattach(struct bus_device *dev)
+{
+ u_char unit = dev->unit;
+ u_short addr = (u_short) dev->address;
+ struct tty *tp = &lpr_tty[unit];
+
+ take_dev_irq(dev);
+ printf(", port = %x, spl = %d, pic = %d.",
+ dev->address, dev->sysdep, dev->sysdep1);
+ lprinfo[unit] = dev;
+
+ outb(INTR_ENAB(addr), inb(INTR_ENAB(addr)) & 0x0f);
+
+#if NPAR > 0
+ parattach(dev);
+#endif
+
+#if NDE6C > 0 && !defined(LINUX_DEV)
+ de6cattach(dev);
+#endif
+ return;
+}
+
+lpropen(dev, flag, ior)
+int dev;
+int flag;
+#ifdef MACH_KERNEL
+io_req_t ior;
+#endif MACH_KERNEL
+{
+int unit = minor(dev);
+struct bus_device *isai;
+struct tty *tp;
+u_short addr;
+
+ if (unit >= NLPR || (isai = lprinfo[unit]) == 0 || isai->alive == 0)
+ return(ENXIO);
+ tp = &lpr_tty[unit];
+#ifndef MACH_KERNEL
+ if (tp->t_state & TS_XCLUDE && u.u_uid != 0)
+ return(EBUSY);
+#endif MACH_KERNEL
+ addr = (u_short) isai->address;
+ tp->t_dev = dev;
+ tp->t_addr = *(caddr_t *)&addr;
+ tp->t_oproc = lprstart;
+ tp->t_state |= TS_WOPEN;
+#ifdef MACH_KERNEL
+ tp->t_stop = lprstop;
+ tp->t_getstat = lprgetstat;
+ tp->t_setstat = lprsetstat;
+#endif MACH_KERNEL
+ if ((tp->t_state & TS_ISOPEN) == 0)
+ ttychars(tp);
+ outb(INTR_ENAB(addr), inb(INTR_ENAB(addr)) | 0x10);
+ tp->t_state |= TS_CARR_ON;
+ return (char_open(dev, tp, flag, ior));
+}
+
+lprclose(dev, flag)
+int dev;
+int flag;
+{
+int unit = minor(dev);
+struct tty *tp = &lpr_tty[unit];
+u_short addr = (u_short) lprinfo[unit]->address;
+
+#ifndef MACH_KERNEL
+ (*linesw[tp->t_line].l_close)(tp);
+#endif MACH_KERNEL
+ ttyclose(tp);
+ if (tp->t_state&TS_HUPCLS || (tp->t_state&TS_ISOPEN)==0) {
+ outb(INTR_ENAB(addr), inb(INTR_ENAB(addr)) & 0x0f);
+ tp->t_state &= ~TS_BUSY;
+ }
+}
+
+#ifdef MACH_KERNEL
+lprread(dev, ior)
+int dev;
+io_req_t ior;
+{
+ return char_read(&lpr_tty[minor(dev)], ior);
+}
+
+lprwrite(dev, ior)
+int dev;
+io_req_t ior;
+{
+ return char_write(&lpr_tty[minor(dev)], ior);
+}
+
+lprportdeath(dev, port)
+dev_t dev;
+mach_port_t port;
+{
+ return (tty_portdeath(&lpr_tty[minor(dev)], port));
+}
+
+io_return_t
+lprgetstat(dev, flavor, data, count)
+dev_t dev;
+int flavor;
+int *data; /* pointer to OUT array */
+unsigned int *count; /* out */
+{
+ io_return_t result = D_SUCCESS;
+ int unit = minor(dev);
+
+ switch (flavor) {
+ default:
+ result = tty_get_status(&lpr_tty[unit], flavor, data, count);
+ break;
+ }
+ return (result);
+}
+
+io_return_t
+lprsetstat(dev, flavor, data, count)
+dev_t dev;
+int flavor;
+int * data;
+unsigned int count;
+{
+ io_return_t result = D_SUCCESS;
+ int unit = minor(dev);
+ u_short dev_addr = (u_short) lprinfo[unit]->address;
+ int s;
+
+ switch (flavor) {
+ default:
+ result = tty_set_status(&lpr_tty[unit], flavor, data, count);
+/* if (result == D_SUCCESS && flavor == TTY_STATUS)
+ lprparam(unit);
+*/ return (result);
+ }
+ return (D_SUCCESS);
+}
+#else MACH_KERNEL
+int lprwrite(dev, uio)
+ int dev;
+ struct uio *uio;
+{
+ struct tty *tp= &lpr_tty[minor(dev)];
+
+ return ((*linesw[tp->t_line].l_write)(tp, uio));
+}
+
+int lprioctl(dev, cmd, addr, mode)
+ int dev;
+ int cmd;
+ caddr_t addr;
+ int mode;
+{
+ int error;
+ spl_t s;
+ int unit = minor(dev);
+ struct tty *tp = &lpr_tty[unit];
+
+ error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, addr,mode);
+ if (error >= 0)
+ return(error);
+ error = ttioctl(tp, cmd, addr,mode);
+ if (error >= 0)
+ return (error);
+ s = spltty();
+ switch (cmd) {
+ default:
+ splx(s);
+ return(ENOTTY);
+ }
+ splx(s);
+ return(0);
+}
+#endif MACH_KERNEL
+
+int lprintr(unit)
+int unit;
+{
+ register struct tty *tp = &lpr_tty[unit];
+
+ if ((tp->t_state & TS_ISOPEN) == 0)
+ return;
+
+ tp->t_state &= ~TS_BUSY;
+ if (tp->t_state&TS_FLUSH)
+ tp->t_state &=~TS_FLUSH;
+ tt_write_wakeup(tp);
+ lprstart(tp);
+}
+
+int lprstart(tp)
+struct tty *tp;
+{
+ spl_t s = spltty();
+ u_short addr = (natural_t) tp->t_addr;
+ int status = inb(STATUS(addr));
+ char nch;
+
+ if (tp->t_state & (TS_TIMEOUT|TS_TTSTOP|TS_BUSY)) {
+ splx(s);
+ return(0);
+ }
+
+ if (status & 0x20) {
+ printf("Printer out of paper!\n");
+ splx(s);
+ return(0);
+ }
+
+ if (tp->t_outq.c_cc <= TTLOWAT(tp)) {
+#ifdef MACH_KERNEL
+ tt_write_wakeup(tp);
+#else MACH_KERNEL
+ if (tp->t_state & TS_ASLEEP) {
+ tp->t_state &= ~TS_ASLEEP;
+ wakeup ((caddr_t)&tp->t_outq);
+ }
+ if (tp->t_wsel) {
+ selwakeup(tp->t_wsel, tp->t_state & TS_WCOLL);
+ tp->t_wsel = 0;
+ tp->t_state &= ~TS_WCOLL;
+ }
+#endif MACH_KERNEL
+ }
+ if (tp->t_outq.c_cc == 0) {
+ splx(s);
+ return(0);
+ }
+#ifdef MACH_KERNEL
+ nch = getc(&tp->t_outq);
+ if ((tp->t_flags & LITOUT) == 0 && (nch & 0200)) {
+ timeout(ttrstrt, (char *)tp, (nch & 0x7f) + 6);
+ tp->t_state |= TS_TIMEOUT;
+ return;
+ }
+ outb(DATA(addr), nch);
+ outb(INTR_ENAB(addr),inb(INTR_ENAB(addr)) | 0x01);
+ outb(INTR_ENAB(addr),inb(INTR_ENAB(addr)) & 0x1e);
+ tp->t_state |= TS_BUSY;
+#else MACH_KERNEL
+ if (tp->t_flags & (RAW|LITOUT))
+ nch = ndqb(&tp->t_outq,0);
+ else {
+ nch = ndqb(&tp->t_outq, 0200);
+ if (nch == 0) {
+ nch = getc(&tp->t_outq);
+ timeout(ttrstrt,(caddr_t)tp,(nch&0x7f)+6);
+ tp->t_state |= TS_TIMEOUT;
+ splx(s);
+ return(0);
+ }
+ }
+ if (nch) {
+ nch=getc(&tp->t_outq);
+ outb(DATA(addr), nch);
+ outb(INTR_ENAB(addr),inb(INTR_ENAB(addr)) | 0x01);
+ outb(INTR_ENAB(addr),inb(INTR_ENAB(addr)) & 0x1e);
+ tp->t_state |= TS_BUSY;
+ }
+#endif MACH_KERNEL
+ splx(s);
+ return(0);
+}
+
+#ifdef MACH_KERNEL
+lprstop(tp, flags)
+register struct tty *tp;
+int flags;
+{
+ if ((tp->t_state & TS_BUSY) && (tp->t_state & TS_TTSTOP) == 0)
+ tp->t_state |= TS_FLUSH;
+}
+#else MACH_KERNEL
+int lprstop(tp, flag)
+struct tty *tp;
+{
+ int s = spltty();
+
+ if ((tp->t_state&TS_BUSY) && (!(tp->t_state&TS_TTSTOP)))
+ tp->t_state |= TS_FLUSH;
+ splx(s);
+}
+#endif MACH_KERNEL
+lprpr(unit)
+{
+ lprpr_addr(lprinfo[unit]->address);
+ return 0;
+}
+
+lprpr_addr(addr)
+{
+ printf("DATA(%x) %x, STATUS(%x) %x, INTR_ENAB(%x) %x\n",
+ DATA(addr), inb(DATA(addr)),
+ STATUS(addr), inb(STATUS(addr)),
+ INTR_ENAB(addr), inb(INTR_ENAB(addr)));
+}
+#endif NLPR
diff --git a/i386/i386at/lprreg.h b/i386/i386at/lprreg.h
new file mode 100644
index 00000000..c6fbed43
--- /dev/null
+++ b/i386/i386at/lprreg.h
@@ -0,0 +1,33 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Parallel port printer driver v1.0
+ * All rights reserved.
+ */
+
+#define DATA(addr) (addr + 0)
+#define STATUS(addr) (addr + 1)
+#define INTR_ENAB(addr) (addr + 2)
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
new file mode 100644
index 00000000..3ef53a94
--- /dev/null
+++ b/i386/i386at/model_dep.c
@@ -0,0 +1,651 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: model_dep.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * Copyright (C) 1986, Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * Basic initialization for I386 - ISA bus machines.
+ */
+
+#include <platforms.h>
+#include <mach_kdb.h>
+
+#include <mach/vm_param.h>
+#include <mach/vm_prot.h>
+#include <mach/machine.h>
+#include <mach/machine/multiboot.h>
+
+#include "vm_param.h"
+#include <kern/time_out.h>
+#include <sys/time.h>
+#include <vm/vm_page.h>
+#include <i386/machspl.h>
+#include <i386/pmap.h>
+#include "proc_reg.h"
+
+/* Location of the kernel's symbol table.
+ Both of these are 0 if none is available. */
+#if MACH_KDB
+static vm_offset_t kern_sym_start, kern_sym_end;
+#else
+#define kern_sym_start 0
+#define kern_sym_end 0
+#endif
+
+/* These indicate the total extent of physical memory addresses we're using.
+ They are page-aligned. */
+vm_offset_t phys_first_addr = 0;
+vm_offset_t phys_last_addr;
+
+/* Virtual address of physical memory, for the kvtophys/phystokv macros. */
+vm_offset_t phys_mem_va;
+
+struct multiboot_info *boot_info;
+
+/* Command line supplied to kernel. */
+char *kernel_cmdline = "";
+
+/* This is used for memory initialization:
+ it gets bumped up through physical memory
+ that exists and is not occupied by boot gunk.
+ It is not necessarily page-aligned. */
+static vm_offset_t avail_next = 0x1000; /* XX end of BIOS data area */
+
+/* Possibly overestimated amount of available memory
+ still remaining to be handed to the VM system. */
+static vm_size_t avail_remaining;
+
+/* Configuration parameter:
+ if zero, only use physical memory in the low 16MB of addresses.
+ Only SCSI still has DMA problems. */
+#ifdef LINUX_DEV
+int use_all_mem = 1;
+#else
+#include "nscsi.h"
+#if NSCSI > 0
+int use_all_mem = 0;
+#else
+int use_all_mem = 1;
+#endif
+#endif
+
+extern char version[];
+
+extern void setup_main();
+
+void inittodr(); /* forward */
+
+int rebootflag = 0; /* exported to kdintr */
+
+/* XX interrupt stack pointer and highwater mark, for locore.S. */
+vm_offset_t int_stack_top, int_stack_high;
+
+#ifdef LINUX_DEV
+extern void linux_init(void);
+#endif
+
+/*
+ * Find devices. The system is alive.
+ */
+void machine_init()
+{
+ /*
+ * Initialize the console.
+ */
+ cninit();
+
+ /*
+ * Set up to use floating point.
+ */
+ init_fpu();
+
+#ifdef LINUX_DEV
+ /*
+ * Initialize Linux drivers.
+ */
+ linux_init();
+#endif
+
+ /*
+ * Find the devices
+ */
+ probeio();
+
+ /*
+ * Get the time
+ */
+ inittodr();
+
+ /*
+ * Tell the BIOS not to clear and test memory.
+ */
+ *(unsigned short *)phystokv(0x472) = 0x1234;
+
+ /*
+ * Unmap page 0 to trap NULL references.
+ */
+ pmap_unmap_page_zero();
+}
+
+/*
+ * Halt a cpu.
+ */
+halt_cpu()
+{
+ asm volatile("cli");
+ while(1);
+}
+
+/*
+ * Halt the system or reboot.
+ */
+halt_all_cpus(reboot)
+ boolean_t reboot;
+{
+ if (reboot) {
+ kdreboot();
+ }
+ else {
+ rebootflag = 1;
+ printf("In tight loop: hit ctl-alt-del to reboot\n");
+ (void) spl0();
+ }
+ for (;;)
+ continue;
+}
+
+void exit(int rc)
+{
+ halt_all_cpus(0);
+}
+
+void db_reset_cpu()
+{
+ halt_all_cpus(1);
+}
+
+
+/*
+ * Compute physical memory size and other parameters.
+ */
+void
+mem_size_init()
+{
+ /* Physical memory on all PCs starts at physical address 0.
+ XX make it a constant. */
+ phys_first_addr = 0;
+
+ phys_last_addr = 0x100000 + (boot_info->mem_upper * 0x400);
+ avail_remaining
+ = phys_last_addr - (0x100000 - (boot_info->mem_lower * 0x400)
+ - 0x1000);
+
+ printf("AT386 boot: physical memory from 0x%x to 0x%x\n",
+ phys_first_addr, phys_last_addr);
+
+ if ((!use_all_mem) && phys_last_addr > 16 * 1024*1024) {
+ printf("** Limiting useable memory to 16 Meg to avoid DMA problems.\n");
+ /* This is actually enforced below, in init_alloc_aligned. */
+ }
+
+ phys_first_addr = round_page(phys_first_addr);
+ phys_last_addr = trunc_page(phys_last_addr);
+}
+
+/*
+ * Basic PC VM initialization.
+ * Turns on paging and changes the kernel segments to use high linear addresses.
+ */
+i386at_init()
+{
+ /* XXX move to intel/pmap.h */
+ extern pt_entry_t *kernel_page_dir;
+
+ /*
+ * Initialize the PIC prior to any possible call to an spl.
+ */
+ picinit();
+
+ /*
+ * Find memory size parameters.
+ */
+ mem_size_init();
+
+ /*
+ * Initialize kernel physical map, mapping the
+ * region from loadpt to avail_start.
+ * Kernel virtual address starts at VM_KERNEL_MIN_ADDRESS.
+ * XXX make the BIOS page (page 0) read-only.
+ */
+ pmap_bootstrap();
+
+ /*
+ * Turn paging on.
+ * We'll have to temporarily install a direct mapping
+ * between physical memory and low linear memory,
+ * until we start using our new kernel segment descriptors.
+ * One page table (4MB) should do the trick.
+ * Also, set the WP bit so that on 486 or better processors
+ * page-level write protection works in kernel mode.
+ */
+ kernel_page_dir[lin2pdenum(0)] =
+ kernel_page_dir[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS)];
+ set_cr3((unsigned)kernel_page_dir);
+ set_cr0(get_cr0() | CR0_PG | CR0_WP);
+ flush_instr_queue();
+
+ /*
+ * Initialize and activate the real i386 protected-mode structures.
+ */
+ gdt_init();
+ idt_init();
+ int_init();
+ ldt_init();
+ ktss_init();
+
+ /* Get rid of the temporary direct mapping and flush it out of the TLB. */
+ kernel_page_dir[lin2pdenum(0)] = 0;
+ set_cr3((unsigned)kernel_page_dir);
+
+
+
+ /* XXX We'll just use the initialization stack we're already running on
+ as the interrupt stack for now. Later this will have to change,
+ because the init stack will get freed after bootup. */
+ asm("movl %%esp,%0" : "=m" (int_stack_top));
+
+ /* Interrupt stacks are allocated in physical memory,
+ while kernel stacks are allocated in kernel virtual memory,
+ so phys_last_addr serves as a convenient dividing point. */
+ int_stack_high = phys_last_addr;
+}
+
+/*
+ * C boot entrypoint - called by boot_entry in boothdr.S.
+ * Running in 32-bit flat mode, but without paging yet.
+ */
+void c_boot_entry(vm_offset_t bi)
+{
+ /* Stash the boot_image_info pointer. */
+ boot_info = (struct multiboot_info*)phystokv(bi);
+
+ /* XXX we currently assume phys_mem_va is always 0 here -
+ if it isn't, we must tweak the pointers in the boot_info. */
+
+ /* Before we do _anything_ else, print the hello message.
+ If there are no initialized console devices yet,
+ it will be stored and printed at the first opportunity. */
+ printf(version);
+ printf("\n");
+
+ /* Find the kernel command line, if there is one. */
+ if (boot_info->flags & MULTIBOOT_CMDLINE)
+ kernel_cmdline = (char*)phystokv(boot_info->cmdline);
+
+#if MACH_KDB
+ /*
+ * Locate the kernel's symbol table, if the boot loader provided it.
+ * We need to do this before i386at_init()
+ * so that the symbol table's memory won't be stomped on.
+ */
+ if ((boot_info->flags & MULTIBOOT_AOUT_SYMS)
+ && boot_info->syms.a.addr)
+ {
+ vm_size_t symtab_size, strtab_size;
+
+ kern_sym_start = (vm_offset_t)phystokv(boot_info->syms.a.addr);
+ symtab_size = (vm_offset_t)phystokv(boot_info->syms.a.tabsize);
+ strtab_size = (vm_offset_t)phystokv(boot_info->syms.a.strsize);
+ kern_sym_end = kern_sym_start + 4 + symtab_size + strtab_size;
+
+ printf("kernel symbol table at %08x-%08x (%d,%d)\n",
+ kern_sym_start, kern_sym_end,
+ symtab_size, strtab_size);
+ }
+#endif MACH_KDB
+
+ /*
+ * Do basic VM initialization
+ */
+ i386at_init();
+
+#if MACH_KDB
+ /*
+ * Initialize the kernel debugger's kernel symbol table.
+ */
+ if (kern_sym_start)
+ {
+ aout_db_sym_init(kern_sym_start, kern_sym_end, "mach", 0);
+ }
+
+ /*
+ * Cause a breakpoint trap to the debugger before proceeding
+ * any further if the proper option flag was specified
+ * on the kernel's command line.
+ * XXX check for surrounding spaces.
+ */
+ if (strstr(kernel_cmdline, "-d ")) {
+ cninit(); /* need console for debugger */
+ Debugger();
+ }
+#endif MACH_KDB
+
+ machine_slot[0].is_cpu = TRUE;
+ machine_slot[0].running = TRUE;
+ machine_slot[0].cpu_type = CPU_TYPE_I386;
+ machine_slot[0].cpu_subtype = CPU_SUBTYPE_AT386;
+
+ /*
+ * Start the system.
+ */
+ setup_main();
+
+}
+
+#include <mach/vm_prot.h>
+#include <vm/pmap.h>
+#include <mach/time_value.h>
+
+timemmap(dev,off,prot)
+ vm_prot_t prot;
+{
+ extern time_value_t *mtime;
+
+#ifdef lint
+ dev++; off++;
+#endif lint
+
+ if (prot & VM_PROT_WRITE) return (-1);
+
+ return (i386_btop(pmap_extract(pmap_kernel(), (vm_offset_t) mtime)));
+}
+
+startrtclock()
+{
+ clkstart();
+}
+
+void
+inittodr()
+{
+ time_value_t new_time;
+
+ new_time.seconds = 0;
+ new_time.microseconds = 0;
+
+ (void) readtodc(&new_time.seconds);
+
+ {
+ spl_t s = splhigh();
+ time = new_time;
+ splx(s);
+ }
+}
+
+void
+resettodr()
+{
+ writetodc();
+}
+
+unsigned int pmap_free_pages()
+{
+ return atop(avail_remaining);
+}
+
+/* Always returns page-aligned regions. */
+boolean_t
+init_alloc_aligned(vm_size_t size, vm_offset_t *addrp)
+{
+ vm_offset_t addr;
+ extern char start[], end[];
+ int i;
+
+ /* Memory regions to skip. */
+ vm_offset_t boot_info_start_pa = kvtophys(boot_info);
+ vm_offset_t boot_info_end_pa = boot_info_start_pa + sizeof(*boot_info);
+ vm_offset_t cmdline_start_pa = boot_info->flags & MULTIBOOT_CMDLINE
+ ? boot_info->cmdline : 0;
+ vm_offset_t cmdline_end_pa = cmdline_start_pa
+ ? cmdline_start_pa+strlen((char*)phystokv(cmdline_start_pa))+1
+ : 0;
+ vm_offset_t mods_start_pa = boot_info->flags & MULTIBOOT_MODS
+ ? boot_info->mods_addr : 0;
+ vm_offset_t mods_end_pa = mods_start_pa
+ ? mods_start_pa
+ + boot_info->mods_count * sizeof(struct multiboot_module)
+ : 0;
+
+ retry:
+
+ /* Page-align the start address. */
+ avail_next = round_page(avail_next);
+
+ /* Check if we have reached the end of memory. */
+ if (avail_next == phys_last_addr)
+ return FALSE;
+
+ /* Tentatively assign the current location to the caller. */
+ addr = avail_next;
+
+ /* Bump the pointer past the newly allocated region
+ and see where that puts us. */
+ avail_next += size;
+
+ /* Skip past the I/O and ROM area. */
+ if ((avail_next > (boot_info->mem_lower * 0x400)) && (addr < 0x100000))
+ {
+ avail_next = 0x100000;
+ goto retry;
+ }
+
+ /* If we're only supposed to use the low 16 megs, enforce that. */
+ if ((!use_all_mem) && (addr >= 16 * 1024*1024)) {
+ return FALSE;
+ }
+
+ /* Skip our own kernel code, data, and bss. */
+ if ((avail_next >= (vm_offset_t)start) && (addr < (vm_offset_t)end))
+ {
+ avail_next = (vm_offset_t)end;
+ goto retry;
+ }
+
+ /* Skip any areas occupied by valuable boot_info data. */
+ if ((avail_next > boot_info_start_pa) && (addr < boot_info_end_pa))
+ {
+ avail_next = boot_info_end_pa;
+ goto retry;
+ }
+ if ((avail_next > cmdline_start_pa) && (addr < cmdline_end_pa))
+ {
+ avail_next = cmdline_end_pa;
+ goto retry;
+ }
+ if ((avail_next > mods_start_pa) && (addr < mods_end_pa))
+ {
+ avail_next = mods_end_pa;
+ goto retry;
+ }
+ if ((avail_next > kern_sym_start) && (addr < kern_sym_end))
+ {
+ avail_next = kern_sym_end;
+ goto retry;
+ }
+ if (boot_info->flags & MULTIBOOT_MODS)
+ {
+ struct multiboot_module *m = (struct multiboot_module *)
+ phystokv(boot_info->mods_addr);
+ for (i = 0; i < boot_info->mods_count; i++)
+ {
+ if ((avail_next > m[i].mod_start)
+ && (addr < m[i].mod_end))
+ {
+ avail_next = m[i].mod_end;
+ goto retry;
+ }
+ /* XXX string */
+ }
+ }
+
+ avail_remaining -= size;
+
+ *addrp = addr;
+ return TRUE;
+}
+
+boolean_t pmap_next_page(addrp)
+ vm_offset_t *addrp;
+{
+ return init_alloc_aligned(PAGE_SIZE, addrp);
+}
+
+/* Grab a physical page:
+ the standard memory allocation mechanism
+ during system initialization. */
+vm_offset_t
+pmap_grab_page()
+{
+ vm_offset_t addr;
+ if (!pmap_next_page(&addr))
+ panic("Not enough memory to initialize Mach");
+ return addr;
+}
+
+boolean_t pmap_valid_page(x)
+ vm_offset_t x;
+{
+ /* XXX is this OK? What does it matter for? */
+ return (((phys_first_addr <= x) && (x < phys_last_addr)) &&
+ !(((boot_info->mem_lower * 1024) <= x) && (x < 1024*1024)));
+}
+
+#ifndef NBBY
+#define NBBY 8
+#endif
+#ifndef NBPW
+#define NBPW (NBBY * sizeof(int))
+#endif
+#define DMA_MAX (16*1024*1024)
+
+/*
+ * Allocate contiguous pages below 16 MB
+ * starting at specified boundary for DMA.
+ */
+vm_offset_t
+alloc_dma_mem(size, align)
+ vm_size_t size;
+ vm_offset_t align;
+{
+ int *bits, i, j, k, n;
+ int npages, count, bit, mask;
+ int first_page, last_page;
+ vm_offset_t addr;
+ vm_page_t p, prevp;
+
+ npages = round_page(size) / PAGE_SIZE;
+ mask = align ? (align - 1) / PAGE_SIZE : 0;
+
+ /*
+ * Allocate bit array.
+ */
+ n = ((DMA_MAX / PAGE_SIZE) + NBPW - 1) / NBPW;
+ i = n * NBPW;
+ bits = (unsigned *)kalloc(i);
+ if (bits == 0) {
+ printf("alloc_dma_mem: unable alloc bit array\n");
+ return (0);
+ }
+ bzero((char *)bits, i);
+
+ /*
+ * Walk the page free list and set a bit for
+ * every usable page in bit array.
+ */
+ simple_lock(&vm_page_queue_free_lock);
+ for (p = vm_page_queue_free; p; p = (vm_page_t)p->pageq.next) {
+ if (p->phys_addr < DMA_MAX) {
+ i = p->phys_addr / PAGE_SIZE;
+ bits[i / NBPW] |= 1 << (i % NBPW);
+ }
+ }
+
+ /*
+ * Search for contiguous pages by scanning bit array.
+ */
+ for (i = 0, first_page = -1; i < n; i++) {
+ for (bit = 1, j = 0; j < NBPW; j++, bit <<= 1) {
+ if (bits[i] & bit) {
+ if (first_page < 0) {
+ k = i * NBPW + j;
+ if (!mask
+ || (((k & mask) + npages)
+ <= mask + 1)) {
+ first_page = k;
+ if (npages == 1)
+ goto found;
+ count = 1;
+ }
+ } else if (++count == npages)
+ goto found;
+ } else
+ first_page = -1;
+ }
+ }
+ addr = 0;
+ goto out;
+
+ found:
+ /*
+ * Remove pages from the free list.
+ */
+ addr = first_page * PAGE_SIZE;
+ last_page = first_page + npages;
+ vm_page_free_count -= npages;
+ p = vm_page_queue_free;
+ prevp = 0;
+ while (1) {
+ i = p->phys_addr / PAGE_SIZE;
+ if (i >= first_page && i < last_page) {
+ if (prevp)
+ prevp->pageq.next = p->pageq.next;
+ else
+ vm_page_queue_free = (vm_page_t)p->pageq.next;
+ p->free = FALSE;
+ if (--npages == 0)
+ break;
+ } else
+ prevp = p;
+ p = (vm_page_t)p->pageq.next;
+ }
+
+ out:
+ simple_unlock(&vm_page_queue_free_lock);
+ kfree((vm_offset_t)bits, n * NBPW);
+ return (addr);
+}
diff --git a/i386/i386at/nfd.c b/i386/i386at/nfd.c
new file mode 100644
index 00000000..950f8964
--- /dev/null
+++ b/i386/i386at/nfd.c
@@ -0,0 +1,1484 @@
+/*
+ * Copyright (c) 1994 Shantanu Goel
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE AUTHOR ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE AUTHOR DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+#include <fd.h>
+#if NFD > 0
+/*
+ * Floppy disk driver.
+ *
+ * Supports:
+ * 1 controller and 2 drives.
+ * Media change and automatic media detection.
+ * Arbitrarily sized read/write requests.
+ * Misaligned requests
+ * DMA above 16 Meg
+ *
+ * TODO:
+ * 1) Real probe routines for controller and drives.
+ * 2) Support for multiple controllers. The driver does
+ * not assume a single controller since all functions
+ * take the controller and/or device structure as an
+ * argument, however the probe routines limit the
+ * number of controllers and drives to 1 and 2 respectively.
+ * 3) V_VERIFY ioctl.
+ * 4) User defined diskette parameters.
+ * 5) Detect Intel 82077 or compatible and use its FIFO mode.
+ *
+ * Shantanu Goel (goel@cs.columbia.edu)
+ */
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include "vm_param.h"
+#include <kern/time_out.h>
+#include <vm/pmap.h>
+#include <device/param.h>
+#include <device/buf.h>
+#include <device/errno.h>
+#include <chips/busses.h>
+#include <i386/machspl.h>
+#include <i386/pio.h>
+#include <i386at/cram.h>
+#include <i386at/disk.h>
+#include <i386at/nfdreg.h>
+
+/*
+ * Number of drives supported by an FDC.
+ * The controller is actually capable of
+ * supporting 4 drives, however, most (all?)
+ * board implementations only support 2.
+ */
+#define NDRIVES_PER_FDC 2
+#define NFDC ((NFD + NDRIVES_PER_FDC - 1) / NDRIVES_PER_FDC)
+
+#define fdunit(dev) (((int)(dev) >> 6) & 3)
+#define fdmedia(dev) ((int)(dev) & 3)
+
+#define b_cylin b_resid
+#define B_FORMAT B_MD1
+
+#define SECSIZE 512
+
+#define DMABSIZE (18*1024) /* size of DMA bounce buffer */
+
+#define OP_TIMEOUT 5 /* time to wait (secs) for an
+ operation before giving up */
+#define MOTOR_TIMEOUT 5 /* time to wait (secs) before turning
+ off an idle drive motor */
+#define MAX_RETRIES 48 /* number of times to try
+ an I/O operation */
+
+#define SRTHUT 0xdf /* step rate/head unload time */
+#define HLTND 0x02 /* head load time/dma mode */
+
+/*
+ * DMA controller.
+ *
+ * XXX: There should be a generic <i386/dma.h> file.
+ */
+
+/*
+ * Ports
+ */
+#define DMA2_PAGE 0x81 /* channel 2, page register */
+#define DMA2_ADDR 0x04 /* channel 2, addr register */
+#define DMA2_COUNT 0x05 /* channel 2, count register */
+#define DMA_STATUS 0x08 /* status register */
+#define DMA_COMMAND 0x08 /* command register */
+#define DMA_WREQ 0x09 /* request register */
+#define DMA_SINGLEMSK 0x0a /* single mask register */
+#define DMA_MODE 0x0b /* mode register */
+#define DMA_FLIPFLOP 0x0c /* pointer flip/flop */
+#define DMA_TEMP 0x0d /* temporary register */
+#define DMA_MASTERCLR 0x0d /* master clear */
+#define DMA_CLRMASK 0x0e /* clear mask register */
+#define DMA_ALLMASK 0x0f /* all mask register */
+
+/*
+ * Commands
+ */
+#define DMA_WRITE 0x46 /* write on channel 2 */
+#define DMA_READ 0x4a /* read on channel 2 */
+
+/*
+ * Autoconfiguration stuff.
+ */
+struct bus_ctlr *fdminfo[NFDC];
+struct bus_device *fddinfo[NFD];
+int fdstd[] = { 0 };
+int fdprobe(), fdslave(), fdintr();
+void fdattach();
+struct bus_driver fddriver = {
+ fdprobe, fdslave, fdattach, 0, fdstd, "fd", fddinfo, "fdc", fdminfo
+};
+
+/*
+ * Per-controller state.
+ */
+struct fdcsoftc {
+ int sc_flags;
+#define FDF_WANT 0x01 /* someone needs direct controller access */
+#define FDF_RESET 0x02 /* controller needs reset */
+#define FDF_LIMIT 0x04 /* limit transfer to a single sector */
+#define FDF_BOUNCE 0x08 /* using bounce buffer */
+ int sc_state; /* transfer fsm */
+ caddr_t sc_addr; /* buffer address */
+ int sc_resid; /* amount left to transfer */
+ int sc_amt; /* amount currently being transferred */
+ int sc_op; /* operation being performed */
+ int sc_mode; /* DMA mode */
+ int sc_sn; /* sector number */
+ int sc_tn; /* track number */
+ int sc_cn; /* cylinder number */
+ int sc_recalerr; /* # recalibration errors */
+ int sc_seekerr; /* # seek errors */
+ int sc_ioerr; /* # i/o errors */
+ int sc_dor; /* copy of digital output register */
+ int sc_rate; /* copy of transfer rate register */
+ int sc_wticks; /* watchdog */
+ u_int sc_buf; /* buffer for transfers > 16 Meg */
+ u_char sc_cmd[9]; /* command buffer */
+ u_char sc_results[7]; /* operation results */
+} fdcsoftc[NFDC];
+
+#define sc_st0 sc_results[0]
+#define sc_st3 sc_results[0]
+#define sc_st1 sc_results[1]
+#define sc_pcn sc_results[1]
+#define sc_st2 sc_results[2]
+#define sc_c sc_results[3]
+#define sc_h sc_results[4]
+#define sc_r sc_results[5]
+#define sc_n sc_results[6]
+
+/*
+ * Transfer states.
+ */
+#define IDLE 0 /* controller is idle */
+#define RESET 1 /* reset controller */
+#define RESETDONE 2 /* reset completion interrupt */
+#define RECAL 3 /* recalibrate drive */
+#define RECALDONE 4 /* recalibration complete interrupt */
+#define SEEK 5 /* perform seek on drive */
+#define SEEKDONE 6 /* seek completion interrupt */
+#define TRANSFER 7 /* perform transfer on drive */
+#define TRANSFERDONE 8 /* transfer completion interrupt */
+
+/*
+ * Per-drive state.
+ */
+struct fdsoftc {
+ int sc_flags;
+#define FDF_RECAL 0x02 /* drive needs recalibration */
+#define FDF_SEEK 0x04 /* force seek during auto-detection */
+#define FDF_AUTO 0x08 /* performing auto-density */
+#define FDF_AUTOFORCE 0x10 /* force auto-density */
+#define FDF_INIT 0x20 /* drive is being initialized */
+ int sc_type; /* drive type */
+ struct fddk *sc_dk; /* diskette type */
+ int sc_cyl; /* current head position */
+ int sc_mticks; /* motor timeout */
+} fdsoftc[NFD];
+
+struct buf fdtab[NFDC]; /* controller queues */
+struct buf fdutab[NFD]; /* drive queues */
+
+/*
+ * Floppy drive type names.
+ */
+char *fdnames[] = { "360K", "1.2 Meg", "720K", "1.44 Meg" };
+#define NTYPES (sizeof(fdnames) / sizeof(fdnames[0]))
+
+/*
+ * Floppy diskette parameters.
+ */
+struct fddk {
+ int dk_nspu; /* sectors/unit */
+ int dk_nspc; /* sectors/cylinder */
+ int dk_ncyl; /* cylinders/unit */
+ int dk_nspt; /* sectors/track */
+ int dk_step; /* !=0 means double track steps */
+ int dk_gap; /* read/write gap length */
+ int dk_fgap; /* format gap length */
+ int dk_rate; /* transfer rate */
+ int dk_drives; /* bit mask of drives that accept diskette */
+ char *dk_name; /* type name */
+} fddk[] = {
+ /*
+ * NOTE: largest density for each drive type must be first so
+ * fdauto() tries it before any lower ones.
+ */
+ { 2880, 36, 80, 18, 0, 0x1b, 0x6c, 0x00, 0x08, "1.44 Meg" },
+ { 2400, 30, 80, 15, 0, 0x1b, 0x54, 0x00, 0x02, "1.2 Meg" },
+ { 1440, 18, 80, 9, 0, 0x2a, 0x50, 0x02, 0x0c, "720K" },
+ { 720, 18, 40, 9, 1, 0x23, 0x50, 0x01, 0x02, "360K" },
+ { 720, 18, 40, 9, 0, 0x2a, 0x50, 0x02, 0x01, "360K PC" }
+};
+#define NDKTYPES (sizeof(fddk) / sizeof(fddk[0]))
+
+/*
+ * For compatibility with old driver.
+ * This array is indexed by the old floppy type codes
+ * and points to the corresponding entry for that
+ * type in fddk[] above.
+ */
+struct fddk *fdcompat[NDKTYPES];
+
+int fdwstart = 0;
+int fdstrategy(), fdformat();
+char *fderrmsg();
+void fdwatch(), fdminphys(), fdspinup(), wakeup();
+
+#define FDDEBUG
+#ifdef FDDEBUG
+int fddebug = 0;
+#define DEBUGF(n, stmt) { if (fddebug >= (n)) stmt; }
+#else
+#define DEBUGF(n, stmt)
+#endif
+
+/*
+ * Probe for a controller.
+ */
+int
+fdprobe(xxx, um)
+ int xxx;
+ struct bus_ctlr *um;
+{
+ struct fdcsoftc *fdc;
+
+ if (um->unit >= NFDC) {
+ printf("fdc%d: not configured\n", um->unit);
+ return (0);
+ }
+ if (um->unit > 0) /* XXX: only 1 controller */
+ return (0);
+
+ /*
+ * XXX: need real probe
+ */
+ take_ctlr_irq(um);
+ printf("%s%d: port 0x%x, spl %d, pic %d.\n",
+ um->name, um->unit, um->address, um->sysdep, um->sysdep1);
+
+ /*
+ * Set up compatibility array.
+ */
+ fdcompat[0] = &fddk[2];
+ fdcompat[1] = &fddk[0];
+ fdcompat[2] = &fddk[3];
+ fdcompat[3] = &fddk[1];
+
+ fdc = &fdcsoftc[um->unit];
+ fdc->sc_rate = -1;
+ if (!fdc->sc_buf) {
+ fdc->sc_buf = alloc_dma_mem(DMABSIZE, 64*1024);
+ if (fdc->sc_buf == 0)
+ panic("fd: alloc_dma_mem() failed");
+ }
+ fdc->sc_dor = DOR_RSTCLR | DOR_IENABLE;
+ outb(FD_DOR(um->address), fdc->sc_dor);
+ return (1);
+}
+
+/*
+ * Probe for a drive.
+ */
+int
+fdslave(ui)
+ struct bus_device *ui;
+{
+ struct fdsoftc *sc;
+
+ if (ui->unit >= NFD) {
+ printf("fd%d: not configured\n", ui->unit);
+ return (0);
+ }
+ if (ui->unit > 1) /* XXX: only 2 drives */
+ return (0);
+
+ /*
+ * Find out from CMOS if drive exists.
+ */
+ sc = &fdsoftc[ui->unit];
+ outb(CMOS_ADDR, 0x10);
+ sc->sc_type = inb(CMOS_DATA);
+ if (ui->unit == 0)
+ sc->sc_type >>= 4;
+ sc->sc_type &= 0x0f;
+ return (sc->sc_type);
+}
+
+/*
+ * Attach a drive to the system.
+ */
+void
+fdattach(ui)
+ struct bus_device *ui;
+{
+ struct fdsoftc *sc;
+
+ sc = &fdsoftc[ui->unit];
+ if (--sc->sc_type >= NTYPES) {
+ printf(": unknown drive type %d", sc->sc_type);
+ ui->alive = 0;
+ return;
+ }
+ printf(": %s", fdnames[sc->sc_type]);
+ sc->sc_flags = FDF_RECAL | FDF_SEEK | FDF_AUTOFORCE;
+}
+
+int
+fdopen(dev, mode)
+ dev_t dev;
+ int mode;
+{
+ int unit = fdunit(dev), error;
+ struct bus_device *ui;
+ struct fdsoftc *sc;
+
+ if (unit >= NFD || (ui = fddinfo[unit]) == 0 || ui->alive == 0)
+ return (ENXIO);
+
+ /*
+ * Start watchdog.
+ */
+ if (!fdwstart) {
+ fdwstart++;
+ timeout(fdwatch, 0, hz);
+ }
+ /*
+ * Do media detection if drive is being opened for the
+ * first time or diskette has been changed since the last open.
+ */
+ sc = &fdsoftc[unit];
+ if ((sc->sc_flags & FDF_AUTOFORCE) || fddskchg(ui)) {
+ if (error = fdauto(dev))
+ return (error);
+ sc->sc_flags &= ~FDF_AUTOFORCE;
+ }
+ return (0);
+}
+
+int
+fdclose(dev)
+ dev_t dev;
+{
+ int s, unit = fdunit(dev);
+ struct fdsoftc *sc = &fdsoftc[unit];
+
+ /*
+ * Wait for pending operations to complete.
+ */
+ s = splbio();
+ while (fdutab[unit].b_active) {
+ sc->sc_flags |= FDF_WANT;
+ assert_wait((event_t)sc, FALSE);
+ thread_block((void (*)())0);
+ }
+ splx(s);
+ return (0);
+}
+
+int
+fdread(dev, ior)
+ dev_t dev;
+ io_req_t ior;
+{
+ return (block_io(fdstrategy, fdminphys, ior));
+}
+
+int
+fdwrite(dev, ior)
+ dev_t dev;
+ io_req_t ior;
+{
+ return (block_io(fdstrategy, fdminphys, ior));
+}
+
+int
+fdgetstat(dev, flavor, status, status_count)
+ dev_t dev;
+ dev_flavor_t flavor;
+ dev_status_t status;
+ mach_msg_type_number_t *status_count;
+{
+ switch (flavor) {
+
+ case DEV_GET_SIZE:
+ {
+ int *info;
+ io_return_t error;
+ struct disk_parms dp;
+
+ if (error = fdgetparms(dev, &dp))
+ return (error);
+ info = (int *)status;
+ info[DEV_GET_SIZE_DEVICE_SIZE] = dp.dp_pnumsec * SECSIZE;
+ info[DEV_GET_SIZE_RECORD_SIZE] = SECSIZE;
+ *status_count = DEV_GET_SIZE_COUNT;
+ return (D_SUCCESS);
+ }
+ case V_GETPARMS:
+ if (*status_count < (sizeof(struct disk_parms) / sizeof(int)))
+ return (D_INVALID_OPERATION);
+ *status_count = sizeof(struct disk_parms) / sizeof(int);
+ return (fdgetparms(dev, (struct disk_parms *)status));
+
+ default:
+ return (D_INVALID_OPERATION);
+ }
+}
+
+int
+fdsetstat(dev, flavor, status, status_count)
+ dev_t dev;
+ dev_flavor_t flavor;
+ dev_status_t status;
+ mach_msg_type_number_t status_count;
+{
+ switch (flavor) {
+
+ case V_SETPARMS:
+ return (fdsetparms(dev, *(int *)status));
+
+ case V_FORMAT:
+ return (fdformat(dev, (union io_arg *)status));
+
+ case V_VERIFY:
+ /*
+ * XXX: needs to be implemented
+ */
+ return (D_SUCCESS);
+
+ default:
+ return (D_INVALID_OPERATION);
+ }
+}
+
+int
+fddevinfo(dev, flavor, info)
+ dev_t dev;
+ int flavor;
+ char *info;
+{
+ switch (flavor) {
+
+ case D_INFO_BLOCK_SIZE:
+ *(int *)info = SECSIZE;
+ return (D_SUCCESS);
+
+ default:
+ return (D_INVALID_OPERATION);
+ }
+}
+
+/*
+ * Allow arbitrary transfers. Standard minphys restricts
+ * transfers to a maximum of 256K preventing us from reading
+ * an entire diskette in a single system call.
+ */
+void
+fdminphys(ior)
+ io_req_t ior;
+{
+}
+
+/*
+ * Return current media parameters.
+ */
+int
+fdgetparms(dev, dp)
+ dev_t dev;
+ struct disk_parms *dp;
+{
+ struct fddk *dk = fdsoftc[fdunit(dev)].sc_dk;
+
+ dp->dp_type = DPT_FLOPPY;
+ dp->dp_heads = 2;
+ dp->dp_sectors = dk->dk_nspt;
+ dp->dp_pstartsec = 0;
+ dp->dp_cyls = dk->dk_ncyl;
+ dp->dp_pnumsec = dk->dk_nspu;
+ return (0);
+}
+
+/*
+ * Set media parameters.
+ */
+int
+fdsetparms(dev, type)
+ dev_t dev;
+ int type;
+{
+ struct fdsoftc *sc;
+ struct fddk *dk;
+
+ if (type < 0 || type >= NDKTYPES)
+ return (EINVAL);
+ dk = fdcompat[type];
+ sc = &fdsoftc[fdunit(dev)];
+ if ((dk->dk_drives & (1 << sc->sc_type)) == 0)
+ return (EINVAL);
+ sc->sc_dk = dk;
+ return (D_SUCCESS);
+}
+
+/*
+ * Format a floppy.
+ */
+int
+fdformat(dev, arg)
+ dev_t dev;
+ union io_arg *arg;
+{
+ int i, j, sect, error = 0;
+ unsigned track, num_trks;
+ struct buf *bp;
+ struct fddk *dk;
+ struct format_info *fmt;
+
+ dk = fdsoftc[fdunit(dev)].sc_dk;
+ num_trks = arg->ia_fmt.num_trks;
+ track = arg->ia_fmt.start_trk;
+ if (num_trks == 0 || track + num_trks > (dk->dk_ncyl << 1)
+ || arg->ia_fmt.intlv >= dk->dk_nspt)
+ return (EINVAL);
+
+ bp = (struct buf *)geteblk(SECSIZE);
+ bp->b_dev = dev;
+ bp->b_bcount = dk->dk_nspt * sizeof(struct format_info);
+ bp->b_blkno = track * dk->dk_nspt;
+
+ while (num_trks-- > 0) {
+ /*
+ * Set up format information.
+ */
+ fmt = (struct format_info *)bp->b_un.b_addr;
+ for (i = 0; i < dk->dk_nspt; i++)
+ fmt[i].sector = 0;
+ for (i = 0, j = 0, sect = 1; i < dk->dk_nspt; i++) {
+ fmt[j].cyl = track >> 1;
+ fmt[j].head = track & 1;
+ fmt[j].sector = sect++;
+ fmt[j].secsize = 2;
+ if ((j += arg->ia_fmt.intlv) < dk->dk_nspt)
+ continue;
+ for (j -= dk->dk_nspt; j < dk->dk_nspt; j++)
+ if (fmt[j].sector == 0)
+ break;
+ }
+ bp->b_flags = B_FORMAT;
+ fdstrategy(bp);
+ biowait(bp);
+ if (bp->b_flags & B_ERROR) {
+ error = bp->b_error;
+ break;
+ }
+ bp->b_blkno += dk->dk_nspt;
+ track++;
+ }
+ bp->b_flags &= ~B_FORMAT;
+ brelse(bp);
+ return (error);
+}
+
+/*
+ * Strategy routine.
+ * Enqueue a request on drive queue.
+ */
+int
+fdstrategy(bp)
+ struct buf *bp;
+{
+ int unit = fdunit(bp->b_dev), s;
+ int bn, sz, maxsz;
+ struct buf *dp;
+ struct bus_device *ui = fddinfo[unit];
+ struct fddk *dk = fdsoftc[unit].sc_dk;
+
+ bn = bp->b_blkno;
+ sz = (bp->b_bcount + SECSIZE - 1) / SECSIZE;
+ maxsz = dk->dk_nspu;
+ if (bn < 0 || bn + sz > maxsz) {
+ if (bn == maxsz) {
+ bp->b_resid = bp->b_bcount;
+ goto done;
+ }
+ sz = maxsz - bn;
+ if (sz <= 0) {
+ bp->b_error = EINVAL;
+ bp->b_flags |= B_ERROR;
+ goto done;
+ }
+ bp->b_bcount = sz * SECSIZE;
+ }
+ bp->b_cylin = bn / dk->dk_nspc;
+ dp = &fdutab[unit];
+ s = splbio();
+ disksort(dp, bp);
+ if (!dp->b_active) {
+ fdustart(ui);
+ if (!fdtab[ui->mi->unit].b_active)
+ fdstart(ui->mi);
+ }
+ splx(s);
+ return;
+ done:
+ biodone(bp);
+ return;
+}
+
+/*
+ * Unit start routine.
+ * Move request from drive to controller queue.
+ */
+int
+fdustart(ui)
+ struct bus_device *ui;
+{
+ struct buf *bp;
+ struct buf *dp;
+
+ bp = &fdutab[ui->unit];
+ if (bp->b_actf == 0)
+ return;
+ dp = &fdtab[ui->mi->unit];
+ if (dp->b_actf == 0)
+ dp->b_actf = bp;
+ else
+ dp->b_actl->b_forw = bp;
+ bp->b_forw = 0;
+ dp->b_actl = bp;
+ bp->b_active++;
+}
+
+/*
+ * Start output on controller.
+ */
+int
+fdstart(um)
+ struct bus_ctlr *um;
+{
+ struct buf *bp;
+ struct buf *dp;
+ struct fdsoftc *sc;
+ struct fdcsoftc *fdc;
+ struct bus_device *ui;
+ struct fddk *dk;
+
+ /*
+ * Pull a request from the controller queue.
+ */
+ dp = &fdtab[um->unit];
+ if ((bp = dp->b_actf) == 0)
+ return;
+ bp = bp->b_actf;
+
+ fdc = &fdcsoftc[um->unit];
+ ui = fddinfo[fdunit(bp->b_dev)];
+ sc = &fdsoftc[ui->unit];
+ dk = sc->sc_dk;
+
+ /*
+ * Mark controller busy.
+ */
+ dp->b_active++;
+
+ /*
+ * Figure out where this request is going.
+ */
+ fdc->sc_cn = bp->b_cylin;
+ fdc->sc_sn = bp->b_blkno % dk->dk_nspc;
+ fdc->sc_tn = fdc->sc_sn / dk->dk_nspt;
+ fdc->sc_sn %= dk->dk_nspt;
+
+ /*
+ * Set up for multi-sector transfer.
+ */
+ fdc->sc_op = ((bp->b_flags & B_FORMAT) ? CMD_FORMAT
+ : ((bp->b_flags & B_READ) ? CMD_READ : CMD_WRITE));
+ fdc->sc_mode = (bp->b_flags & B_READ) ? DMA_WRITE : DMA_READ;
+ fdc->sc_addr = bp->b_un.b_addr;
+ fdc->sc_resid = bp->b_bcount;
+ fdc->sc_wticks = 0;
+ fdc->sc_recalerr = 0;
+ fdc->sc_seekerr = 0;
+ fdc->sc_ioerr = 0;
+
+ /*
+ * Set initial transfer state.
+ */
+ if (fdc->sc_flags & FDF_RESET)
+ fdc->sc_state = RESET;
+ else if (sc->sc_flags & FDF_RECAL)
+ fdc->sc_state = RECAL;
+ else if (sc->sc_cyl != fdc->sc_cn)
+ fdc->sc_state = SEEK;
+ else
+ fdc->sc_state = TRANSFER;
+
+ /*
+ * Set transfer rate.
+ */
+ if (fdc->sc_rate != dk->dk_rate) {
+ fdc->sc_rate = dk->dk_rate;
+ outb(FD_RATE(um->address), fdc->sc_rate);
+ }
+ /*
+ * Turn on drive motor.
+ * Don't start I/O if drive is spinning up.
+ */
+ if (fdmotoron(ui)) {
+ timeout(fdspinup, (void *)um, hz / 2);
+ return;
+ }
+ /*
+ * Call transfer state routine to do the actual I/O.
+ */
+ fdstate(um);
+}
+
+/*
+ * Interrupt routine.
+ */
+int
+fdintr(ctlr)
+ int ctlr;
+{
+ int timedout;
+ u_char results[7];
+ struct buf *bp;
+ struct bus_device *ui;
+ struct fdsoftc *sc;
+ struct buf *dp = &fdtab[ctlr];
+ struct fdcsoftc *fdc = &fdcsoftc[ctlr];
+ struct bus_ctlr *um = fdminfo[ctlr];
+
+ if (!dp->b_active) {
+ printf("fdc%d: stray interrupt\n", ctlr);
+ return;
+ }
+ timedout = fdc->sc_wticks >= OP_TIMEOUT;
+ fdc->sc_wticks = 0;
+ bp = dp->b_actf->b_actf;
+ ui = fddinfo[fdunit(bp->b_dev)];
+ sc = &fdsoftc[ui->unit];
+
+ /*
+ * Operation timed out, terminate request.
+ */
+ if (timedout) {
+ fderror("timed out", ui);
+ fdmotoroff(ui);
+ sc->sc_flags |= FDF_RECAL;
+ bp->b_flags |= B_ERROR;
+ bp->b_error = ENXIO;
+ fddone(ui, bp);
+ return;
+ }
+ /*
+ * Read results from FDC.
+ * For transfer completion they can be read immediately.
+ * For anything else, we must issue a Sense Interrupt
+ * Status Command. We keep issuing this command till
+ * FDC returns invalid command status. The Controller Busy
+ * bit in the status register indicates completion of a
+ * read/write/format operation.
+ */
+ if (inb(FD_STATUS(um->address)) & ST_CB) {
+ if (!fdresults(um, fdc->sc_results))
+ return;
+ } else {
+ while (1) {
+ fdc->sc_cmd[0] = CMD_SENSEI;
+ if (!fdcmd(um, 1)) {
+ DEBUGF(2, printf(2, "fd%d: SENSEI failed\n"));
+ return;
+ }
+ if (!fdresults(um, results))
+ return;
+ if ((results[0] & ST0_IC) == 0x80)
+ break;
+ if ((results[0] & ST0_US) == ui->slave) {
+ fdc->sc_results[0] = results[0];
+ fdc->sc_results[1] = results[1];
+ }
+ }
+ }
+ /*
+ * Let transfer state routine handle the rest.
+ */
+ fdstate(um);
+}
+
+/*
+ * Transfer finite state machine driver.
+ */
+int
+fdstate(um)
+ struct bus_ctlr *um;
+{
+ int unit, max, pa, s;
+ struct buf *bp;
+ struct fdsoftc *sc;
+ struct bus_device *ui;
+ struct fddk *dk;
+ struct fdcsoftc *fdc = &fdcsoftc[um->unit];
+
+ bp = fdtab[um->unit].b_actf->b_actf;
+ ui = fddinfo[fdunit(bp->b_dev)];
+ sc = &fdsoftc[ui->unit];
+ dk = sc->sc_dk;
+
+ while (1) switch (fdc->sc_state) {
+
+ case RESET:
+ /*
+ * Reset the controller.
+ */
+ fdreset(um);
+ return;
+
+ case RESETDONE:
+ /*
+ * Reset complete.
+ * Mark all drives as needing recalibration
+ * and issue specify command.
+ */
+ for (unit = 0; unit < NFD; unit++)
+ if (fddinfo[unit] && fddinfo[unit]->alive
+ && fddinfo[unit]->mi == um)
+ fdsoftc[unit].sc_flags |= FDF_RECAL;
+ fdc->sc_cmd[0] = CMD_SPECIFY;
+ fdc->sc_cmd[1] = SRTHUT;
+ fdc->sc_cmd[2] = HLTND;
+ if (!fdcmd(um, 3))
+ return;
+ fdc->sc_flags &= ~FDF_RESET;
+ fdc->sc_state = RECAL;
+ break;
+
+ case RECAL:
+ /*
+ * Recalibrate drive.
+ */
+ fdc->sc_state = RECALDONE;
+ fdc->sc_cmd[0] = CMD_RECAL;
+ fdc->sc_cmd[1] = ui->slave;
+ fdcmd(um, 2);
+ return;
+
+ case RECALDONE:
+ /*
+ * Recalibration complete.
+ */
+ if ((fdc->sc_st0 & ST0_IC) || (fdc->sc_st0 & ST0_EC)) {
+ if (++fdc->sc_recalerr == 2) {
+ fderror("recalibrate failed", ui);
+ goto bad;
+ }
+ fdc->sc_state = RESET;
+ break;
+ }
+ sc->sc_flags &= ~FDF_RECAL;
+ fdc->sc_recalerr = 0;
+ sc->sc_cyl = -1;
+ fdc->sc_state = SEEK;
+ break;
+
+ case SEEK:
+ /*
+ * Perform seek operation.
+ */
+ fdc->sc_state = SEEKDONE;
+ fdc->sc_cmd[0] = CMD_SEEK;
+ fdc->sc_cmd[1] = (fdc->sc_tn << 2) | ui->slave;
+ fdc->sc_cmd[2] = fdc->sc_cn;
+ if (dk->dk_step)
+ fdc->sc_cmd[2] <<= 1;
+ fdcmd(um, 3);
+ return;
+
+ case SEEKDONE:
+ /*
+ * Seek complete.
+ */
+ if (dk->dk_step)
+ fdc->sc_pcn >>= 1;
+ if ((fdc->sc_st0 & ST0_IC) || (fdc->sc_st0 & ST0_SE) == 0
+ || fdc->sc_pcn != fdc->sc_cn) {
+ if (++fdc->sc_seekerr == 2) {
+ fderror("seek failed", ui);
+ goto bad;
+ }
+ fdc->sc_state = RESET;
+ break;
+ }
+ fdc->sc_seekerr = 0;
+ sc->sc_cyl = fdc->sc_pcn;
+ fdc->sc_state = TRANSFER;
+ break;
+
+ case TRANSFER:
+ /*
+ * Perform I/O transfer.
+ */
+ fdc->sc_flags &= ~FDF_BOUNCE;
+ pa = pmap_extract(kernel_pmap, fdc->sc_addr);
+ if (fdc->sc_op == CMD_FORMAT) {
+ max = sizeof(struct format_info) * dk->dk_nspt;
+ } else if (fdc->sc_flags & FDF_LIMIT) {
+ fdc->sc_flags &= ~FDF_LIMIT;
+ max = SECSIZE;
+ } else {
+ max = (dk->dk_nspc - dk->dk_nspt * fdc->sc_tn
+ - fdc->sc_sn) * SECSIZE;
+ }
+ if (max > fdc->sc_resid)
+ max = fdc->sc_resid;
+ if (pa >= 16*1024*1024) {
+ fdc->sc_flags |= FDF_BOUNCE;
+ pa = fdc->sc_buf;
+ if (max < DMABSIZE)
+ fdc->sc_amt = max;
+ else
+ fdc->sc_amt = DMABSIZE;
+ } else {
+ int prevpa, curpa, omax;
+ vm_offset_t va;
+
+ omax = max;
+ if (max > 65536 - (pa & 0xffff))
+ max = 65536 - (pa & 0xffff);
+ fdc->sc_amt = I386_PGBYTES - (pa & (I386_PGBYTES - 1));
+ va = (vm_offset_t)fdc->sc_addr + fdc->sc_amt;
+ prevpa = pa & ~(I386_PGBYTES - 1);
+ while (fdc->sc_amt < max) {
+ curpa = pmap_extract(kernel_pmap, va);
+ if (curpa >= 16*1024*1024
+ || curpa != prevpa + I386_PGBYTES)
+ break;
+ fdc->sc_amt += I386_PGBYTES;
+ va += I386_PGBYTES;
+ prevpa = curpa;
+ }
+ if (fdc->sc_amt > max)
+ fdc->sc_amt = max;
+ if (fdc->sc_op == CMD_FORMAT) {
+ if (fdc->sc_amt != omax) {
+ fdc->sc_flags |= FDF_BOUNCE;
+ pa = fdc->sc_buf;
+ fdc->sc_amt = omax;
+ }
+ } else if (fdc->sc_amt != fdc->sc_resid) {
+ if (fdc->sc_amt < SECSIZE) {
+ fdc->sc_flags |= FDF_BOUNCE;
+ pa = fdc->sc_buf;
+ if (omax > DMABSIZE)
+ fdc->sc_amt = DMABSIZE;
+ else
+ fdc->sc_amt = omax;
+ } else
+ fdc->sc_amt &= ~(SECSIZE - 1);
+ }
+ }
+
+ DEBUGF(2, printf("fd%d: TRANSFER: amt %d cn %d tn %d sn %d\n",
+ ui->unit, fdc->sc_amt, fdc->sc_cn,
+ fdc->sc_tn, fdc->sc_sn + 1));
+
+ if ((fdc->sc_flags & FDF_BOUNCE) && fdc->sc_op != CMD_READ) {
+ fdc->sc_flags &= ~FDF_BOUNCE;
+ bcopy(fdc->sc_addr, (caddr_t)phystokv(fdc->sc_buf),
+ fdc->sc_amt);
+ }
+ /*
+ * Set up DMA.
+ */
+ s = sploff();
+ outb(DMA_SINGLEMSK, 0x04 | 0x02);
+ outb(DMA_FLIPFLOP, 0);
+ outb(DMA_MODE, fdc->sc_mode);
+ outb(DMA2_ADDR, pa);
+ outb(DMA2_ADDR, pa >> 8);
+ outb(DMA2_PAGE, pa >> 16);
+ outb(DMA2_COUNT, fdc->sc_amt - 1);
+ outb(DMA2_COUNT, (fdc->sc_amt - 1) >> 8);
+ outb(DMA_SINGLEMSK, 0x02);
+ splon(s);
+
+ /*
+ * Issue command to FDC.
+ */
+ fdc->sc_state = TRANSFERDONE;
+ fdc->sc_cmd[0] = fdc->sc_op;
+ fdc->sc_cmd[1] = (fdc->sc_tn << 2) | ui->slave;
+ if (fdc->sc_op == CMD_FORMAT) {
+ fdc->sc_cmd[2] = 0x02;
+ fdc->sc_cmd[3] = dk->dk_nspt;
+ fdc->sc_cmd[4] = dk->dk_fgap;
+ fdc->sc_cmd[5] = 0xda;
+ fdcmd(um, 6);
+ } else {
+ fdc->sc_cmd[2] = fdc->sc_cn;
+ fdc->sc_cmd[3] = fdc->sc_tn;
+ fdc->sc_cmd[4] = fdc->sc_sn + 1;
+ fdc->sc_cmd[5] = 0x02;
+ fdc->sc_cmd[6] = dk->dk_nspt;
+ fdc->sc_cmd[7] = dk->dk_gap;
+ fdc->sc_cmd[8] = 0xff;
+ fdcmd(um, 9);
+ }
+ return;
+
+ case TRANSFERDONE:
+ /*
+ * Transfer complete.
+ */
+ if (fdc->sc_st0 & ST0_IC) {
+ fdc->sc_ioerr++;
+ if (sc->sc_flags & FDF_AUTO) {
+ /*
+ * Give up on second try if
+ * media detection is in progress.
+ */
+ if (fdc->sc_ioerr == 2)
+ goto bad;
+ fdc->sc_state = RECAL;
+ break;
+ }
+ if (fdc->sc_ioerr == MAX_RETRIES) {
+ fderror(fderrmsg(ui), ui);
+ goto bad;
+ }
+ /*
+ * Give up immediately on write-protected diskettes.
+ */
+ if (fdc->sc_st1 & ST1_NW) {
+ fderror("write-protected diskette", ui);
+ goto bad;
+ }
+ /*
+ * Limit transfer to a single sector.
+ */
+ fdc->sc_flags |= FDF_LIMIT;
+ /*
+ * Every fourth attempt recalibrate the drive.
+ * Every eight attempt reset the controller.
+ * Also, every eighth attempt inform user
+ * about the error.
+ */
+ if (fdc->sc_ioerr & 3)
+ fdc->sc_state = TRANSFER;
+ else if (fdc->sc_ioerr & 7)
+ fdc->sc_state = RECAL;
+ else {
+ fdc->sc_state = RESET;
+ fderror(fderrmsg(ui), ui);
+ }
+ break;
+ }
+ /*
+ * Transfer completed successfully.
+ * Advance counters/pointers, and if more
+ * is left, initiate I/O.
+ */
+ if (fdc->sc_flags & FDF_BOUNCE) {
+ fdc->sc_flags &= ~FDF_BOUNCE;
+ bcopy((caddr_t)phystokv(fdc->sc_buf), fdc->sc_addr,
+ fdc->sc_amt);
+ }
+ if ((fdc->sc_resid -= fdc->sc_amt) == 0) {
+ bp->b_resid = 0;
+ fddone(ui, bp);
+ return;
+ }
+ fdc->sc_state = TRANSFER;
+ fdc->sc_ioerr = 0;
+ fdc->sc_addr += fdc->sc_amt;
+ if (fdc->sc_op == CMD_FORMAT) {
+ fdc->sc_sn = 0;
+ if (fdc->sc_tn == 1) {
+ fdc->sc_tn = 0;
+ fdc->sc_cn++;
+ fdc->sc_state = SEEK;
+ } else
+ fdc->sc_tn = 1;
+ } else {
+ fdc->sc_sn += fdc->sc_amt / SECSIZE;
+ while (fdc->sc_sn >= dk->dk_nspt) {
+ fdc->sc_sn -= dk->dk_nspt;
+ if (fdc->sc_tn == 1) {
+ fdc->sc_tn = 0;
+ fdc->sc_cn++;
+ fdc->sc_state = SEEK;
+ } else
+ fdc->sc_tn = 1;
+ }
+ }
+ break;
+
+ default:
+ printf("fd%d: invalid state\n", ui->unit);
+ panic("fdstate");
+ /*NOTREACHED*/
+ }
+ bad:
+ bp->b_flags |= B_ERROR;
+ bp->b_error = EIO;
+ sc->sc_flags |= FDF_RECAL;
+ fddone(ui, bp);
+}
+
+/*
+ * Terminate current request and start
+ * any others that are queued.
+ */
+int
+fddone(ui, bp)
+ struct bus_device *ui;
+ struct buf *bp;
+{
+ struct bus_ctlr *um = ui->mi;
+ struct fdsoftc *sc = &fdsoftc[ui->unit];
+ struct fdcsoftc *fdc = &fdcsoftc[um->unit];
+ struct buf *dp = &fdtab[um->unit];
+
+ DEBUGF(1, printf("fd%d: fddone()\n", ui->unit));
+
+ /*
+ * Remove this request from queue.
+ */
+ if (bp) {
+ fdutab[ui->unit].b_actf = bp->b_actf;
+ biodone(bp);
+ bp = &fdutab[ui->unit];
+ dp->b_actf = bp->b_forw;
+ } else
+ bp = &fdutab[ui->unit];
+
+ /*
+ * Mark controller and drive idle.
+ */
+ dp->b_active = 0;
+ bp->b_active = 0;
+ fdc->sc_state = IDLE;
+ sc->sc_mticks = 0;
+ fdc->sc_flags &= ~(FDF_LIMIT|FDF_BOUNCE);
+
+ /*
+ * Start up other requests.
+ */
+ fdustart(ui);
+ fdstart(um);
+
+ /*
+ * Wakeup anyone waiting for drive or controller.
+ */
+ if (sc->sc_flags & FDF_WANT) {
+ sc->sc_flags &= ~FDF_WANT;
+ wakeup((void *)sc);
+ }
+ if (fdc->sc_flags & FDF_WANT) {
+ fdc->sc_flags &= ~FDF_WANT;
+ wakeup((void *)fdc);
+ }
+}
+
+/*
+ * Check if diskette change has occured since the last open.
+ */
+int
+fddskchg(ui)
+ struct bus_device *ui;
+{
+ int s, dir;
+ struct fdsoftc *sc = &fdsoftc[ui->unit];
+ struct bus_ctlr *um = ui->mi;
+ struct fdcsoftc *fdc = &fdcsoftc[um->unit];
+
+ /*
+ * Get access to controller.
+ */
+ s = splbio();
+ while (fdtab[um->unit].b_active) {
+ fdc->sc_flags |= FDF_WANT;
+ assert_wait((event_t)fdc, FALSE);
+ thread_block((void (*)())0);
+ }
+ fdtab[um->unit].b_active = 1;
+ fdutab[ui->unit].b_active = 1;
+
+ /*
+ * Turn on drive motor and read digital input register.
+ */
+ if (fdmotoron(ui)) {
+ timeout(wakeup, (void *)fdc, hz / 2);
+ assert_wait((event_t)fdc, FALSE);
+ thread_block((void (*)())0);
+ }
+ dir = inb(FD_DIR(um->address));
+ fddone(ui, NULL);
+ splx(s);
+
+ if (dir & DIR_DSKCHG) {
+ printf("fd%d: diskette change detected\n", ui->unit);
+ sc->sc_flags |= FDF_SEEK;
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * Do media detection.
+ */
+int
+fdauto(dev)
+ dev_t dev;
+{
+ int i, error = 0;
+ struct buf *bp;
+ struct bus_device *ui = fddinfo[fdunit(dev)];
+ struct fdsoftc *sc = &fdsoftc[ui->unit];
+ struct fddk *dk, *def = 0;
+
+ sc->sc_flags |= FDF_AUTO;
+ bp = (struct buf *)geteblk(SECSIZE);
+ for (i = 0, dk = fddk; i < NDKTYPES; i++, dk++) {
+ if ((dk->dk_drives & (1 << sc->sc_type)) == 0)
+ continue;
+ if (def == 0)
+ def = dk;
+ sc->sc_dk = dk;
+ bp->b_flags = B_READ;
+ bp->b_dev = dev;
+ bp->b_bcount = SECSIZE;
+ if (sc->sc_flags & FDF_SEEK) {
+ sc->sc_flags &= ~FDF_SEEK;
+ bp->b_blkno = 100;
+ } else
+ bp->b_blkno = 0;
+ fdstrategy(bp);
+ biowait(bp);
+ if ((bp->b_flags & B_ERROR) == 0 || bp->b_error == ENXIO)
+ break;
+ }
+ if (i == NDKTYPES) {
+ printf("fd%d: couldn't detect type, using %s\n",
+ ui->unit, def->dk_name);
+ sc->sc_dk = def;
+ } else if ((bp->b_flags & B_ERROR) == 0)
+ printf("fd%d: detected %s\n", ui->unit, sc->sc_dk->dk_name);
+ else
+ error = ENXIO;
+ sc->sc_flags &= ~FDF_AUTO;
+ brelse(bp);
+ return (error);
+}
+
+/*
+ * Turn on drive motor and select drive.
+ */
+int
+fdmotoron(ui)
+ struct bus_device *ui;
+{
+ int bit;
+ struct bus_ctlr *um = ui->mi;
+ struct fdcsoftc *fdc = &fdcsoftc[um->unit];
+
+ bit = 1 << (ui->slave + 4);
+ if ((fdc->sc_dor & bit) == 0) {
+ fdc->sc_dor &= ~3;
+ fdc->sc_dor |= bit | ui->slave;
+ outb(FD_DOR(um->address), fdc->sc_dor);
+ return (1);
+ }
+ if ((fdc->sc_dor & 3) != ui->slave) {
+ fdc->sc_dor &= ~3;
+ fdc->sc_dor |= ui->slave;
+ outb(FD_DOR(um->address), fdc->sc_dor);
+ }
+ return (0);
+}
+
+/*
+ * Turn off drive motor.
+ */
+int
+fdmotoroff(ui)
+ struct bus_device *ui;
+{
+ struct bus_ctlr *um = ui->mi;
+ struct fdcsoftc *fdc = &fdcsoftc[um->unit];
+
+ fdc->sc_dor &= ~(1 << (ui->slave + 4));
+ outb(FD_DOR(um->address), fdc->sc_dor);
+}
+
+/*
+ * This routine is invoked via timeout() by fdstart()
+ * to call fdstate() at splbio.
+ */
+void
+fdspinup(um)
+ struct bus_ctlr *um;
+{
+ int s;
+
+ s = splbio();
+ fdstate(um);
+ splx(s);
+}
+
+/*
+ * Watchdog routine.
+ * Check for hung operations.
+ * Turn off motor of idle drives.
+ */
+void
+fdwatch()
+{
+ int unit, s;
+ struct bus_device *ui;
+
+ timeout(fdwatch, 0, hz);
+ s = splbio();
+ for (unit = 0; unit < NFDC; unit++)
+ if (fdtab[unit].b_active
+ && ++fdcsoftc[unit].sc_wticks == OP_TIMEOUT)
+ fdintr(unit);
+ for (unit = 0; unit < NFD; unit++) {
+ if ((ui = fddinfo[unit]) == 0 || ui->alive == 0)
+ continue;
+ if (fdutab[unit].b_active == 0
+ && (fdcsoftc[ui->mi->unit].sc_dor & (1 << (ui->slave + 4)))
+ && ++fdsoftc[unit].sc_mticks == MOTOR_TIMEOUT)
+ fdmotoroff(ui);
+ }
+ splx(s);
+}
+
+/*
+ * Print an error message.
+ */
+int
+fderror(msg, ui)
+ char *msg;
+ struct bus_device *ui;
+{
+ struct fdcsoftc *fdc = &fdcsoftc[ui->mi->unit];
+
+ printf("fd%d: %s, %sing cn %d tn %d sn %d\n", ui->unit, msg,
+ (fdc->sc_op == CMD_READ ? "read"
+ : (fdc->sc_op == CMD_WRITE ? "writ" : "formatt")),
+ fdc->sc_cn, fdc->sc_tn, fdc->sc_sn + 1);
+}
+
+/*
+ * Return an error message for an I/O error.
+ */
+char *
+fderrmsg(ui)
+ struct bus_device *ui;
+{
+ struct fdcsoftc *fdc = &fdcsoftc[ui->mi->unit];
+
+ if (fdc->sc_st1 & ST1_EC)
+ return ("invalid sector");
+ if (fdc->sc_st1 & ST1_DE)
+ return ("CRC error");
+ if (fdc->sc_st1 & ST1_OR)
+ return ("DMA overrun");
+ if (fdc->sc_st1 & ST1_ND)
+ return ("sector not found");
+ if (fdc->sc_st1 & ST1_NW)
+ return ("write-protected diskette");
+ if (fdc->sc_st1 & ST1_MA)
+ return ("missing address mark");
+ return ("hard error");
+}
+
+/*
+ * Output a command to FDC.
+ */
+int
+fdcmd(um, n)
+ struct bus_ctlr *um;
+ int n;
+{
+ int i, j;
+ struct fdcsoftc *fdc = &fdcsoftc[um->unit];
+
+ for (i = j = 0; i < 200; i++) {
+ if ((inb(FD_STATUS(um->address)) & (ST_RQM|ST_DIO)) != ST_RQM)
+ continue;
+ outb(FD_DATA(um->address), fdc->sc_cmd[j++]);
+ if (--n == 0)
+ return (1);
+ }
+ /*
+ * Controller is not responding, reset it.
+ */
+ DEBUGF(1, printf("fdc%d: fdcmd() failed\n", um->unit));
+ fdreset(um);
+ return (0);
+}
+
+/*
+ * Read results from FDC.
+ */
+int
+fdresults(um, rp)
+ struct bus_ctlr *um;
+ u_char *rp;
+{
+ int i, j, status;
+
+ for (i = j = 0; i < 200; i++) {
+ status = inb(FD_STATUS(um->address));
+ if ((status & ST_RQM) == 0)
+ continue;
+ if ((status & ST_DIO) == 0)
+ return (j);
+ if (j == 7)
+ break;
+ *rp++ = inb(FD_DATA(um->address));
+ j++;
+ }
+ /*
+ * Controller is not responding, reset it.
+ */
+ DEBUGF(1, printf("fdc%d: fdresults() failed\n", um->unit));
+ fdreset(um);
+ return (0);
+}
+
+/*
+ * Reset controller.
+ */
+int
+fdreset(um)
+ struct bus_ctlr *um;
+{
+ struct fdcsoftc *fdc = &fdcsoftc[um->unit];
+
+ outb(FD_DOR(um->address), fdc->sc_dor & ~(DOR_RSTCLR|DOR_IENABLE));
+ fdc->sc_state = RESETDONE;
+ fdc->sc_flags |= FDF_RESET;
+ outb(FD_DOR(um->address), fdc->sc_dor);
+}
+
+#endif /* NFD > 0 */
diff --git a/i386/i386at/nfdreg.h b/i386/i386at/nfdreg.h
new file mode 100644
index 00000000..9bdf44d3
--- /dev/null
+++ b/i386/i386at/nfdreg.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 1994 Shantanu Goel
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE AUTHOR ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE AUTHOR DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+/*
+ * NEC 765/Intel 8272 floppy disk controller.
+ */
+
+/*
+ * Ports
+ */
+#define FD_DOR(p) (p) /* digital output register */
+#define FD_STATUS(p) ((p) + 2) /* status register */
+#define FD_DATA(p) ((p) + 3) /* data register */
+#define FD_RATE(p) ((p) + 5) /* transfer rate register */
+#define FD_DIR(p) ((p) + 5) /* digital input register */
+
+/*
+ * Digital output register.
+ */
+#define DOR_IENABLE 0x08 /* enable interrupts and DMA */
+#define DOR_RSTCLR 0x04 /* clear reset */
+
+/*
+ * Status register.
+ */
+#define ST_RQM 0x80 /* request for master */
+#define ST_DIO 0x40 /* direction of data transfer
+ 1 = fdc to cpu, 0 = cpu to fdc */
+#define ST_NDM 0x20 /* non DMA mode */
+#define ST_CB 0x10 /* controller busy */
+
+/*
+ * Digital input register.
+ */
+#define DIR_DSKCHG 0x80 /* diskette chnage has occured */
+
+/*
+ * ST0
+ */
+#define ST0_IC 0xc0 /* interrupt code */
+#define ST0_SE 0x20 /* seek end */
+#define ST0_EC 0x10 /* equipment check */
+#define ST0_NR 0x08 /* not ready */
+#define ST0_HD 0x04 /* head address */
+#define ST0_US 0x03 /* unit select */
+
+/*
+ * ST1
+ */
+#define ST1_EC 0x80 /* end of cylinder */
+#define ST1_DE 0x20 /* CRC data error */
+#define ST1_OR 0x10 /* DMA overrun */
+#define ST1_ND 0x04 /* sector not found */
+#define ST1_NW 0x02 /* write-protected diskette */
+#define ST1_MA 0x01 /* missing address mark */
+
+/*
+ * ST2
+ */
+#define ST2_CM 0x40 /* control mark */
+#define ST2_DD 0x20 /* data error */
+#define ST2_WC 0x10 /* wrong cylinder */
+#define ST2_SH 0x08 /* scan equal hit */
+#define ST2_SN 0x04 /* scan not satisfied */
+#define ST2_BC 0x02 /* bad cylinder */
+#define ST2_MD 0x01 /* missing address mark */
+
+/*
+ * ST3
+ */
+#define ST3_FT 0x80 /* fault */
+#define ST3_WP 0x40 /* write protect */
+#define ST3_RY 0x20 /* ready */
+#define ST3_T0 0x10 /* track 0 */
+#define ST3_TS 0x08 /* two side */
+#define ST3_HD 0x04 /* head address */
+#define ST3_US 0x03 /* unit select */
+
+/*
+ * Commands.
+ */
+#define CMD_SPECIFY 0x03
+#define CMD_RECAL 0x07
+#define CMD_SENSEI 0x08
+#define CMD_SEEK 0x0f
+#define CMD_FORMAT 0x4d
+#define CMD_WRITE 0xc5
+#define CMD_READ 0xe6
+
+/*
+ * Information required by FDC when formatting a diskette.
+ */
+struct format_info {
+ unsigned char cyl;
+ unsigned char head;
+ unsigned char sector;
+ unsigned char secsize;
+};
diff --git a/i386/i386at/nhd.c b/i386/i386at/nhd.c
new file mode 100644
index 00000000..72b4cfc3
--- /dev/null
+++ b/i386/i386at/nhd.c
@@ -0,0 +1,1430 @@
+/*
+ * Copyright (c) 1994 Shantanu Goel
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE AUTHOR ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE AUTHOR DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * MODIFIED BY KEVIN T. VAN MAREN, University of Utah, CSL
+ * Copyright (c) 1996, University of Utah, CSL
+ *
+ * Uses a 'unified' partition code with the SCSI driver.
+ * Reading/Writing disklabels through the kernel is NOT recommended.
+ * (The preferred method is through the raw device (wd0), with no
+ * open partitions). setdisklabel() should work for the in-core
+ * fudged disklabel, but will not change the partitioning. The driver
+ * *never* sees the disklabel on the disk.
+ *
+ */
+
+
+#include <hd.h>
+#if NHD > 0 && !defined(LINUX_DEV)
+/*
+ * Hard disk driver.
+ *
+ * Supports:
+ * 1 controller and 2 drives.
+ * Arbitrarily sized read/write requests.
+ * Misaligned requests.
+ * Multiple sector transfer mode (not tested extensively).
+ *
+ * TODO:
+ * 1) Real probe routines for controller and drives.
+ * 2) Support for multiple controllers. The driver does
+ * not assume a single controller since all functions
+ * take the controller and/or device structure as an
+ * argument, however the probe routines limit the
+ * number of controllers and drives to 1 and 2 respectively.
+ *
+ * Shantanu Goel (goel@cs.columbia.edu)
+ */
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include "vm_param.h"
+#include <kern/time_out.h>
+#include <vm/vm_kern.h>
+#include <vm/pmap.h>
+#include <device/param.h>
+#include <device/buf.h>
+#include <device/errno.h>
+#include <device/device_types.h>
+#include <device/disk_status.h>
+#include <chips/busses.h>
+#include <i386/machspl.h>
+#include <i386/pio.h>
+#include <i386at/cram.h>
+#include <i386at/disk.h>
+#include <i386at/nhdreg.h>
+
+#include <scsi/rz_labels.h>
+
+
+/* this is for the partition code */
+typedef struct ide_driver_info {
+ dev_t dev;
+/* struct buf *bp; */
+ int sectorsize;
+} ide_driver_info;
+
+#define MAX_IDE_PARTS 32 /* max partitions per drive */
+static char *drive_name[4]={"wd0: ","wd1: ","xxx ","yyy "};
+
+/*
+ * XXX: This will have to be fixed for controllers that
+ * can support upto 4 drives.
+ */
+#define NDRIVES_PER_HDC 2
+#define NHDC ((NHD + NDRIVES_PER_HDC - 1) / NDRIVES_PER_HDC)
+
+#define b_cylin b_resid
+
+#define B_ABS B_MD1
+#define B_IDENTIFY (B_MD1 << 1)
+
+/* shift right SLICE_BITS + PARTITION_BITS. Note: 2^10 = 1024 sub-parts */
+#define hdunit(dev) (((dev) >> 10) & 3)
+#define hdpart(dev) ((dev) & 0x3ff)
+
+#define MAX_RETRIES 12 /* maximum number of retries */
+#define OP_TIMEOUT 7 /* time to wait (secs) for an operation */
+
+/*
+ * Autoconfiguration stuff.
+ */
+struct bus_ctlr *hdminfo[NHDC];
+struct bus_device *hddinfo[NHD];
+int hdstd[] = { 0 };
+int hdprobe(), hdslave(), hdstrategy();
+void hdattach();
+struct bus_driver hddriver = {
+ hdprobe, hdslave, hdattach, 0, hdstd, "hd", hddinfo, "hdc", hdminfo, 0
+};
+
+/*
+ * BIOS geometry.
+ */
+struct hdbios {
+ int bg_ncyl; /* cylinders/unit */
+ int bg_ntrk; /* tracks/cylinder */
+ int bg_precomp; /* write precomp cylinder */
+ int bg_nsect; /* sectors/track */
+} hdbios[NHD];
+
+/*
+ * Controller state.
+ */
+struct hdcsoftc {
+ int sc_state; /* transfer fsm */
+ caddr_t sc_addr; /* buffer address */
+ int sc_resid; /* amount left to transfer */
+ int sc_amt; /* amount currently being transferred */
+ int sc_cnt; /* amount transferred per interrupt */
+ int sc_sn; /* sector number */
+ int sc_tn; /* track number */
+ int sc_cn; /* cylinder number */
+ int sc_recalerr; /* # recalibration errors */
+ int sc_ioerr; /* # i/o errors */
+ int sc_wticks; /* watchdog */
+ caddr_t sc_buf; /* buffer for unaligned requests */
+} hdcsoftc[NHDC];
+
+/*
+ * Transfer states.
+ */
+#define IDLE 0 /* controller is idle */
+#define SETPARAM 1 /* set disk parameters */
+#define SETPARAMDONE 2 /* set parameters done */
+#define RESTORE 3 /* recalibrate drive */
+#define RESTOREDONE 4 /* recalibrate done */
+#define TRANSFER 5 /* perform I/O transfer */
+#define TRANSFERDONE 6 /* transfer done */
+#define IDENTIFY 7 /* get drive info */
+#define IDENTIFYDONE 8 /* get drive info done */
+#define SETMULTI 9 /* set multiple mode count */
+#define SETMULTIDONE 10 /* set multiple mode count done */
+
+/*
+ * Drive state.
+ */
+struct hdsoftc {
+ int sc_flags;
+#define HDF_SETPARAM 0x001 /* set drive parameters before I/O operation */
+#define HDF_RESTORE 0x002 /* drive needs recalibration */
+#define HDF_WANT 0x004 /* some one is waiting for drive */
+#define HDF_UNALIGNED 0x008 /* request is not a multiple of sector size */
+#define HDF_SETMULTI 0x010 /* set multiple count before I/O operation */
+#define HDF_MULTIDONE 0x020 /* multicnt field is valid */
+#define HDF_IDENTDONE 0x040 /* identify command done */
+#define HDF_LBA 0x080 /* use LBA mode */
+ int sc_multicnt; /* current multiple count */
+ int sc_abssn; /* absolute sector number (for {RD,WR}ABS) */
+ int sc_abscnt; /* absolute sector count */
+ int sc_openpart; /* bit mask of open partitions */
+ struct hdident sc_id; /* info returned by identify */
+} hdsoftc[NHD];
+
+struct buf hdtab[NHDC]; /* controller queues */
+struct buf hdutab[NHD]; /* drive queues */
+struct disklabel hdlabel[NHD]; /* disklabels -- incorrect info! */
+struct diskpart array[NHD*MAX_IDE_PARTS]; /* partition info */
+
+/*
+ * To enable multiple mode,
+ * set this, recompile, and reboot the machine.
+ */
+int hdenmulti = 0;
+
+char *hderrchk();
+struct buf *geteblk();
+int hdwstart = 0;
+void hdwatch();
+
+/*
+ * Probe for a controller.
+ */
+int
+hdprobe(xxx, um)
+ int xxx;
+ struct bus_ctlr *um;
+{
+ struct hdcsoftc *hdc;
+
+ if (um->unit >= NHDC) {
+ printf("hdc%d: not configured\n", um->unit);
+ return (0);
+ }
+ if (um->unit > 0) { /* XXX: only 1 controller */
+
+ printf("nhd:probe for 2+ controllers -- not implemented\n");
+ return (0);
+ }
+
+ /*
+ * XXX: need real probe
+ */
+ hdc = &hdcsoftc[um->unit];
+ if (!hdc->sc_buf)
+ kmem_alloc(kernel_map,
+ (vm_offset_t *)&hdc->sc_buf, I386_PGBYTES);
+ take_ctlr_irq(um);
+ return (1);
+}
+
+/*
+ * Probe for a drive.
+ */
+int
+hdslave(ui)
+ struct bus_device *ui;
+{
+ int type;
+
+ if (ui->unit >= NHD) {
+ printf("hd%d: not configured\n", ui->unit);
+ return (0);
+ }
+ if (ui->unit > 1) /* XXX: only 2 drives */
+ return (0);
+
+ /*
+ * Find out if drive exists by reading CMOS.
+ */
+ outb(CMOS_ADDR, 0x12);
+ type = inb(CMOS_DATA);
+ if (ui->unit == 0)
+ type >>= 4;
+ type &= 0x0f;
+ return (type);
+}
+
+/*
+ * Attach a drive to the system.
+ */
+void
+hdattach(ui)
+ struct bus_device *ui;
+{
+ char *tbl;
+ unsigned n;
+ /* struct hdsoftc *sc = &hdsoftc[ui->unit]; */
+ struct disklabel *lp = &hdlabel[ui->unit];
+ struct hdbios *bg = &hdbios[ui->unit];
+
+ /*
+ * Set up a temporary disklabel from BIOS parameters.
+ * The actual partition table will be read during open.
+ */
+ n = *(unsigned *)phystokv(ui->address);
+ tbl = (unsigned char *)phystokv((n & 0xffff) + ((n >> 12) & 0xffff0));
+ bg->bg_ncyl = *(unsigned short *)tbl;
+ bg->bg_ntrk = *(unsigned char *)(tbl + 2);
+ bg->bg_precomp = *(unsigned short *)(tbl + 5);
+ bg->bg_nsect = *(unsigned char *)(tbl + 14);
+ fudge_bsd_label(lp, DTYPE_ESDI, bg->bg_ncyl*bg->bg_ntrk*bg->bg_nsect,
+ bg->bg_ntrk, bg->bg_nsect, SECSIZE, 3);
+
+ /* FORCE sector size to 512... */
+
+ printf(": ntrak(heads) %d, ncyl %d, nsec %d, size %u MB",
+ lp->d_ntracks, lp->d_ncylinders, lp->d_nsectors,
+ lp->d_secperunit * lp->d_secsize / (1024*1024));
+}
+
+int
+hdopen(dev, mode)
+ dev_t dev;
+ int mode;
+{
+ int unit = hdunit(dev), part = hdpart(dev) /*, error */;
+ struct bus_device *ui;
+ struct hdsoftc *sc;
+ struct diskpart *label;
+
+ if (unit >= NHD || (ui = hddinfo[unit]) == 0 || ui->alive == 0)
+ return (ENXIO);
+ if (!hdwstart) {
+ hdwstart++;
+ timeout(hdwatch, 0, hz);
+ }
+ sc = &hdsoftc[unit];
+ /* should this be changed so only gets called once, even if all
+ partitions are closed and re-opened? */
+ if (sc->sc_openpart == 0) {
+ hdinit(dev);
+ if (sc->sc_flags & HDF_LBA)
+ printf("hd%d: Using LBA mode\n", ui->unit);
+ }
+
+/* Note: should set a bit in the label structure to ensure that
+ aliasing prevents multiple instances to be opened. */
+#if 0
+ if (part >= MAXPARTITIONS || lp->d_partitions[part].p_size == 0)
+ return (ENXIO);
+#endif 0
+
+ label=lookup_part(&array[MAX_IDE_PARTS*unit], hdpart(dev));
+ if (!label)
+ return (ENXIO);
+
+
+ sc->sc_openpart |= 1 << part;
+ return (0);
+}
+
+int
+hdclose(dev)
+ dev_t dev;
+{
+ int unit = hdunit(dev), s;
+ struct hdsoftc *sc = &hdsoftc[unit];
+
+ sc->sc_openpart &= ~(1 << hdpart(dev));
+ if (sc->sc_openpart == 0) {
+ s = splbio();
+ while (hdutab[unit].b_active) {
+ sc->sc_flags |= HDF_WANT;
+ assert_wait((event_t)sc, FALSE);
+ thread_block((void (*)())0);
+ }
+ splx(s);
+ }
+ return (0);
+}
+
+int
+hdread(dev, ior)
+ dev_t dev;
+ io_req_t ior;
+{
+ return (block_io(hdstrategy, minphys, ior));
+}
+
+int
+hdwrite(dev, ior)
+ dev_t dev;
+ io_req_t ior;
+{
+ return (block_io(hdstrategy, minphys, ior));
+}
+
+int
+hdgetstat(dev, flavor, data, count)
+ dev_t dev;
+ dev_flavor_t flavor;
+ dev_status_t data;
+ mach_msg_type_number_t *count;
+{
+ int unit = hdunit(dev), part = hdpart(dev);
+ struct hdsoftc *sc = &hdsoftc[unit];
+ struct disklabel *lp = &hdlabel[unit];
+ struct buf *bp;
+ struct diskpart *label;
+
+ label=lookup_part(&array[MAX_IDE_PARTS*unit], hdpart(dev));
+ switch (flavor) {
+
+ case DEV_GET_SIZE:
+ if (label) {
+ data[DEV_GET_SIZE_DEVICE_SIZE] = (label->size * lp->d_secsize);
+ data[DEV_GET_SIZE_RECORD_SIZE] = lp->d_secsize;
+ *count = DEV_GET_SIZE_COUNT;
+ } else { /* Kevin: added checking here */
+ data[DEV_GET_SIZE_DEVICE_SIZE] = 0;
+ data[DEV_GET_SIZE_RECORD_SIZE] = 0;
+ *count = 0;
+ }
+ break;
+
+ case DIOCGDINFO:
+ case DIOCGDINFO - (0x10 << 16):
+ dkgetlabel(lp, flavor, data, count);
+ break;
+
+ case V_GETPARMS:
+ {
+ struct disk_parms *dp;
+ struct hdbios *bg = &hdbios[unit];
+
+ if (*count < (sizeof(struct disk_parms) / sizeof(int)))
+ return (D_INVALID_OPERATION);
+ dp = (struct disk_parms *)data;
+ dp->dp_type = DPT_WINI;
+ dp->dp_heads = lp->d_ntracks;
+ dp->dp_cyls = lp->d_ncylinders;
+ dp->dp_sectors = lp->d_nsectors;
+ dp->dp_dosheads = bg->bg_ntrk;
+ dp->dp_doscyls = bg->bg_ncyl;
+ dp->dp_dossectors = bg->bg_nsect;
+ dp->dp_secsiz = lp->d_secsize;
+ dp->dp_ptag = 0;
+ dp->dp_pflag = 0;
+ if (label) {
+ dp->dp_pstartsec = label->start;
+ dp->dp_pnumsec = label->size;
+ } else { /* added by Kevin */
+ dp->dp_pstartsec = -1;
+ dp->dp_pnumsec = -1;
+ }
+
+ *count = sizeof(struct disk_parms) / sizeof(int);
+ break;
+ }
+ case V_RDABS:
+ if (*count < lp->d_secsize / sizeof(int)) {
+ printf("hd%d: RDABS, bad size %d\n", unit, *count);
+ return (EINVAL);
+ }
+ bp = geteblk(lp->d_secsize);
+ bp->b_flags = B_READ | B_ABS;
+ bp->b_blkno = sc->sc_abssn;
+ bp->b_dev = dev;
+ bp->b_bcount = lp->d_secsize;
+ hdstrategy(bp);
+ biowait(bp);
+ if (bp->b_flags & B_ERROR) {
+ printf("hd%d: RDABS failed\n", unit);
+ brelse(bp);
+ return (EIO);
+ }
+ bcopy(bp->b_un.b_addr, (caddr_t)data, lp->d_secsize);
+ brelse(bp);
+ *count = lp->d_secsize / sizeof(int);
+ break;
+
+ case V_VERIFY:
+ {
+ int i, amt, n, error = 0;
+
+ bp = geteblk(I386_PGBYTES);
+ bp->b_blkno = sc->sc_abssn;
+ bp->b_dev = dev;
+ amt = sc->sc_abscnt;
+ n = I386_PGBYTES / lp->d_secsize;
+ while (amt > 0) {
+ i = (amt > n) ? n : amt;
+ bp->b_bcount = i * lp->d_secsize;
+ bp->b_flags = B_READ | B_ABS;
+ hdstrategy(bp);
+ biowait(bp);
+ if (bp->b_flags & B_ERROR) {
+ error = BAD_BLK;
+ break;
+ }
+ amt -= bp->b_bcount;
+ bp->b_blkno += i;
+ }
+ brelse(bp);
+ data[0] = error;
+ *count = 1;
+ break;
+ }
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (0);
+}
+
+int
+hdsetstat(dev, flavor, data, count)
+ dev_t dev;
+ dev_flavor_t flavor;
+ dev_status_t data;
+ mach_msg_type_number_t count;
+{
+ int unit = hdunit(dev); /* , part = hdpart(dev); */
+ int error = 0 /*, s */;
+ struct hdsoftc *sc = &hdsoftc[unit];
+ struct disklabel *lp = &hdlabel[unit];
+ struct buf *bp;
+
+ switch (flavor) {
+
+ case DIOCWLABEL:
+ case DIOCWLABEL - (0x10 << 16):
+ break;
+
+ case DIOCSDINFO:
+ case DIOCSDINFO - (0x10 << 16):
+ if (count != (sizeof(struct disklabel) / sizeof(int)))
+ return (D_INVALID_SIZE);
+ error = setdisklabel(lp, (struct disklabel *)data);
+ if (error == 0 && (sc->sc_flags & HDF_LBA) == 0)
+ sc->sc_flags |= HDF_SETPARAM;
+ break;
+
+ case DIOCWDINFO:
+ case DIOCWDINFO - (0x10 << 16):
+ if (count != (sizeof(struct disklabel) / sizeof(int)))
+ return (D_INVALID_SIZE);
+ error = setdisklabel(lp, (struct disklabel *)data);
+ if (error == 0) {
+ if ((sc->sc_flags & HDF_LBA) == 0)
+ sc->sc_flags |= HDF_SETPARAM;
+ error = hdwritelabel(dev);
+ }
+ break;
+
+ case V_REMOUNT:
+ hdinit(dev);
+ break;
+
+ case V_ABS:
+ if (count != 1 && count != 2)
+ return (D_INVALID_OPERATION);
+ sc->sc_abssn = *(int *)data;
+ if (sc->sc_abssn < 0 || sc->sc_abssn >= lp->d_secperunit)
+ return (D_INVALID_OPERATION);
+ if (count == 2)
+ sc->sc_abscnt = *((int *)data + 1);
+ else
+ sc->sc_abscnt = 1;
+ if (sc->sc_abscnt <= 0
+ || sc->sc_abssn + sc->sc_abscnt > lp->d_secperunit)
+ return (D_INVALID_OPERATION);
+ break;
+
+ case V_WRABS:
+ if (count < (lp->d_secsize / sizeof(int))) {
+ printf("hd%d: WRABS, bad size %d\n", unit, count);
+ return (D_INVALID_OPERATION);
+ }
+ bp = geteblk(lp->d_secsize);
+ bcopy((caddr_t)data, bp->b_un.b_addr, lp->d_secsize);
+ bp->b_flags = B_WRITE | B_ABS;
+ bp->b_blkno = sc->sc_abssn;
+ bp->b_bcount = lp->d_secsize;
+ bp->b_dev = dev;
+ hdstrategy(bp);
+ biowait(bp);
+ if (bp->b_flags & B_ERROR) {
+ printf("hd%d: WRABS failed\n", unit);
+ error = EIO;
+ }
+ brelse(bp);
+ break;
+
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (error);
+}
+
+int
+hddevinfo(dev, flavor, info)
+ dev_t dev;
+ int flavor;
+ char *info;
+{
+ switch (flavor) {
+
+ case D_INFO_BLOCK_SIZE:
+ *((int *)info) = SECSIZE; /* #defined to 512 */
+ break;
+
+ default:
+ return (D_INVALID_OPERATION);
+ }
+ return (0);
+}
+
+
+
+
+/* Kevin T. Van Maren: Added this low-level routine for the unified
+ partition code. A pointer to this routine is passed, along with param* */
+int
+ide_read_fun(struct ide_driver_info *param, int sectornum, char *buff)
+{
+ struct buf *bp;
+
+ bp = geteblk(param->sectorsize);
+ bp->b_flags = B_READ | B_ABS;
+
+ bp->b_bcount = param->sectorsize;
+ bp->b_blkno = sectornum;
+
+ /* WARNING: DEPENDS ON NUMBER OF BITS FOR PARTITIONS */
+ bp->b_dev = param->dev & ~0x3ff;
+ hdstrategy(bp);
+ biowait(bp);
+ if ((bp->b_flags & B_ERROR) == 0)
+ bcopy((char *)bp->b_un.b_addr, buff, param->sectorsize);
+ else {
+ printf("ERROR!\n");
+ return(B_ERROR);
+ }
+
+ brelse(bp);
+ return(0);
+}
+
+
+
+/*
+ * Initialize drive.
+ */
+int
+hdinit(dev)
+ dev_t dev;
+{
+ int unit = hdunit(dev);
+ struct hdsoftc *sc = &hdsoftc[unit];
+ struct disklabel *lp = &hdlabel[unit], *dlp;
+ struct buf *bp = 0;
+ int numpart;
+
+ struct ide_driver_info ide_param = { dev, /* bp, */ lp->d_secsize };
+ int ret;
+
+ /*
+ * Issue identify command.
+ */
+ if ((sc->sc_flags & HDF_IDENTDONE) == 0) {
+ sc->sc_flags |= HDF_IDENTDONE;
+ bp = geteblk(lp->d_secsize);
+ /* sector size #defined to 512 */
+ bp->b_flags = B_IDENTIFY;
+ bp->b_dev = dev;
+ hdstrategy(bp);
+ biowait(bp);
+ if ((bp->b_flags & B_ERROR) == 0) {
+ bcopy((char *)bp->b_un.b_addr,
+ (char *)&sc->sc_id, sizeof(struct hdident));
+
+ /*
+ * Check if drive supports LBA mode.
+ */
+ if (sc->sc_id.id_capability & 2)
+ sc->sc_flags |= HDF_LBA;
+ }
+ }
+
+ /*
+ * Check if drive supports multiple read/write mode.
+ */
+ hdmulti(dev);
+
+ /* Note: label was fudged during attach! */
+
+ /* ensure the 'raw disk' can be accessed reliably */
+ array[MAX_IDE_PARTS*unit].start=0;
+ array[MAX_IDE_PARTS*unit].size=lp->d_secperunit; /* fill in root for MY reads */
+#if 0
+ array[MAX_IDE_PARTS*unit].subs=0;
+ array[MAX_IDE_PARTS*unit].nsubs=0;
+ array[MAX_IDE_PARTS*unit].type=0;
+ array[MAX_IDE_PARTS*unit].fsys=0;
+#endif 0
+
+ numpart=get_only_partition(&ide_param, (*ide_read_fun),
+ &array[MAX_IDE_PARTS*unit],MAX_IDE_PARTS,lp->d_secperunit,
+ drive_name[unit]);
+
+ printf("%s %d partitions found\n",drive_name[unit],numpart);
+
+ if ((sc->sc_flags & HDF_LBA) == 0)
+ sc->sc_flags |= HDF_SETPARAM;
+
+ brelse(bp);
+ return(ret);
+}
+
+
+
+
+/*
+ * Check if drive supports multiple read/write mode.
+ */
+int
+hdmulti(dev)
+ dev_t dev;
+{
+ int unit = hdunit(dev);
+ struct hdsoftc *sc = &hdsoftc[unit];
+ struct buf *bp;
+ struct hdident *id;
+
+ if (sc->sc_flags & HDF_MULTIDONE)
+ return(0);
+
+ sc->sc_flags |= HDF_MULTIDONE;
+
+ if (hdenmulti == 0)
+ return(0);
+
+ /*
+ * Get drive information by issuing IDENTIFY command.
+ */
+ bp = geteblk(DEV_BSIZE);
+ bp->b_flags = B_IDENTIFY;
+ bp->b_dev = dev;
+ hdstrategy(bp);
+ biowait(bp);
+ id = (struct hdident *)bp->b_un.b_addr;
+
+ /*
+ * If controller does not recognise IDENTIFY command,
+ * or does not support multiple mode, clear count.
+ */
+ if ((bp->b_flags & B_ERROR) || !id->id_multisize)
+ sc->sc_multicnt = 0;
+ else {
+ sc->sc_multicnt = id->id_multisize;
+ printf("hd%d: max multiple size %u", unit, sc->sc_multicnt);
+ /*
+ * Use 4096 since it is the minimum block size in FFS.
+ */
+ if (sc->sc_multicnt > 4096 / 512)
+ sc->sc_multicnt = 4096 / 512;
+ printf(", using %u\n", sc->sc_multicnt);
+ sc->sc_flags |= HDF_SETMULTI;
+ }
+ brelse(bp);
+}
+
+/*
+ * Write label to disk.
+ */
+int
+hdwritelabel(dev)
+ dev_t dev;
+{
+ int unit = hdunit(dev), error = 0;
+ long labelsect;
+ struct buf *bp;
+ struct disklabel *lp = &hdlabel[unit];
+
+ printf("hdwritelabel: no longer implemented\n");
+
+#if 0
+ bp = geteblk(lp->d_secsize);
+ bp->b_flags = B_READ | B_ABS;
+ bp->b_blkno = LBLLOC + lp->d_partitions[PART_DISK].p_offset;
+ bp->b_bcount = lp->d_secsize;
+ bp->b_dev = dev;
+ hdstrategy(bp);
+ biowait(bp);
+ if (bp->b_flags & B_ERROR) {
+ printf("hd%d: hdwritelabel(), error reading disklabel\n",unit);
+ error = EIO;
+ goto out;
+ }
+ *(struct disklabel *)bp->b_un.b_addr = *lp; /* copy disk label */
+ bp->b_flags = B_WRITE | B_ABS;
+ hdstrategy(bp);
+ biowait(bp);
+ if (bp->b_flags & B_ERROR) {
+ printf("hd%d: hdwritelabel(), error writing disklabel\n",unit);
+ error = EIO;
+ }
+ out:
+ brelse(bp);
+#endif 0
+
+ return (error);
+}
+
+/*
+ * Strategy routine.
+ * Enqueue request on drive.
+ */
+int
+hdstrategy(bp)
+ struct buf *bp;
+{
+ int unit = hdunit(bp->b_dev), part = hdpart(bp->b_dev), s;
+ long bn, sz, maxsz;
+ struct buf *dp;
+ struct hdsoftc *sc = &hdsoftc[unit];
+ struct bus_device *ui = hddinfo[unit];
+ struct disklabel *lp = &hdlabel[unit];
+ struct diskpart *label;
+
+ if (bp->b_flags & B_IDENTIFY) {
+ bp->b_cylin = 0;
+ goto q;
+ }
+ bn = bp->b_blkno;
+ if (bp->b_flags & B_ABS)
+ goto q1;
+ sz = (bp->b_bcount + lp->d_secsize - 1) / lp->d_secsize;
+ label=lookup_part(&array[MAX_IDE_PARTS*unit], hdpart(bp->b_dev));
+ if (label) {
+ maxsz = label->size;
+ } else {
+ bp->b_flags |= B_ERROR;
+ bp->b_error = EINVAL;
+ goto done;
+ }
+
+ if (bn < 0 || bn + sz > maxsz) {
+ if (bn == maxsz) {
+ bp->b_resid = bp->b_bcount;
+ goto done;
+ }
+ sz = maxsz - bn;
+ if (sz <= 0) {
+ bp->b_flags |= B_ERROR;
+ bp->b_error = EINVAL;
+ goto done;
+ }
+ bp->b_bcount = sz * lp->d_secsize;
+ }
+ bn += lp->d_partitions[part].p_offset;
+ bn += label->start;
+
+ q1:
+ bp->b_cylin = (sc->sc_flags & HDF_LBA) ? bn : bn / lp->d_secpercyl;
+ q:
+ dp = &hdutab[unit];
+ s = splbio();
+ disksort(dp, bp);
+ if (!dp->b_active) {
+ hdustart(ui);
+ if (!hdtab[ui->mi->unit].b_active)
+ hdstart(ui->mi);
+ }
+ splx(s);
+ return(0);
+ done:
+ biodone(bp);
+ return(0);
+}
+
+/*
+ * Unit start routine.
+ * Move request from drive to controller queue.
+ */
+int
+hdustart(ui)
+ struct bus_device *ui;
+{
+ struct buf *bp;
+ struct buf *dp;
+
+ bp = &hdutab[ui->unit];
+ if (bp->b_actf == 0)
+ return(0);
+ dp = &hdtab[ui->mi->unit];
+ if (dp->b_actf == 0)
+ dp->b_actf = bp;
+ else
+ dp->b_actl->b_forw = bp;
+ bp->b_forw = 0;
+ dp->b_actl = bp;
+ bp->b_active++;
+}
+
+/*
+ * Start output on controller.
+ */
+int
+hdstart(um)
+ struct bus_ctlr *um;
+{
+ long bn;
+ struct buf *bp;
+ struct buf *dp;
+ struct hdsoftc *sc;
+ struct hdcsoftc *hdc;
+ struct bus_device *ui;
+ struct disklabel *lp;
+ struct diskpart *label;
+
+ /*
+ * Pull a request from the controller queue.
+ */
+ dp = &hdtab[um->unit];
+ if ((bp = dp->b_actf) == 0)
+ return(0);
+ bp = bp->b_actf;
+
+ hdc = &hdcsoftc[um->unit];
+ ui = hddinfo[hdunit(bp->b_dev)];
+ sc = &hdsoftc[ui->unit];
+ lp = &hdlabel[ui->unit];
+
+ label = lookup_part(&array[MAX_IDE_PARTS*hdunit(bp->b_dev)], hdpart(bp->b_dev));
+
+ /*
+ * Mark controller busy.
+ */
+ dp->b_active++;
+
+ if (bp->b_flags & B_IDENTIFY) {
+ hdc->sc_state = IDENTIFY;
+ goto doit;
+ }
+
+ /*
+ * Figure out where this request is going.
+ */
+ if (sc->sc_flags & HDF_LBA)
+ hdc->sc_cn = bp->b_cylin;
+ else {
+ bn = bp->b_blkno;
+ if ((bp->b_flags & B_ABS) == 0) {
+ bn += label->start; /* partition must be valid */
+ }
+ hdc->sc_cn = bp->b_cylin;
+ hdc->sc_sn = bn % lp->d_secpercyl;
+ hdc->sc_tn = hdc->sc_sn / lp->d_nsectors;
+ hdc->sc_sn %= lp->d_nsectors;
+ }
+
+ /*
+ * Set up for multi-sector transfer.
+ */
+ hdc->sc_addr = bp->b_un.b_addr;
+ hdc->sc_resid = bp->b_bcount;
+ hdc->sc_wticks = 0;
+ hdc->sc_recalerr = 0;
+ hdc->sc_ioerr = 0;
+
+ /*
+ * Set initial transfer state.
+ */
+ if (sc->sc_flags & HDF_SETPARAM)
+ hdc->sc_state = SETPARAM;
+ else if (sc->sc_flags & HDF_RESTORE)
+ hdc->sc_state = RESTORE;
+ else if (sc->sc_flags & HDF_SETMULTI)
+ hdc->sc_state = SETMULTI;
+ else
+ hdc->sc_state = TRANSFER;
+
+ doit:
+ /*
+ * Call transfer state routine to do the actual I/O.
+ */
+ hdstate(um);
+}
+
+/*
+ * Interrupt routine.
+ */
+int
+hdintr(ctlr)
+ int ctlr;
+{
+ int timedout;
+ struct bus_ctlr *um = hdminfo[ctlr];
+ struct bus_device *ui;
+ struct buf *bp;
+ struct buf *dp = &hdtab[ctlr];
+ struct hdcsoftc *hdc = &hdcsoftc[ctlr];
+
+ if (!dp->b_active) {
+ (void) inb(HD_STATUS(um->address));
+ printf("hdc%d: stray interrupt\n", ctlr);
+ return(0);
+ }
+ timedout = hdc->sc_wticks >= OP_TIMEOUT;
+ hdc->sc_wticks = 0;
+
+ /*
+ * Operation timed out, terminate request.
+ */
+ if (timedout) {
+ bp = dp->b_actf->b_actf;
+ ui = hddinfo[hdunit(bp->b_dev)];
+ hderror("timed out", ui);
+ hdsoftc[ui->unit].sc_flags |= HDF_RESTORE;
+ bp->b_flags |= B_ERROR;
+ bp->b_error = EIO;
+ hddone(ui, bp);
+ return(0);
+ }
+
+ /*
+ * Let transfer state routine handle the rest.
+ */
+ hdstate(um);
+}
+
+/*
+ * Transfer finite state machine driver.
+ */
+int
+hdstate(um)
+ struct bus_ctlr *um;
+{
+ char *msg;
+ int op;
+ struct buf *bp;
+ struct hdsoftc *sc;
+ struct bus_device *ui;
+ struct disklabel *lp;
+ struct hdcsoftc *hdc = &hdcsoftc[um->unit];
+ struct hdbios *bg;
+
+ bp = hdtab[um->unit].b_actf->b_actf;
+ ui = hddinfo[hdunit(bp->b_dev)];
+ lp = &hdlabel[ui->unit];
+ sc = &hdsoftc[ui->unit];
+ bg = &hdbios[ui->unit];
+
+ /*
+ * Ensure controller is not busy.
+ */
+ if (!hdwait(um))
+ goto ctlr_err;
+
+ while (1) switch (hdc->sc_state) {
+
+ case SETPARAM:
+ /*
+ * Set drive parameters.
+ */
+ outb(HD_DRVHD(um->address),
+ 0xa0 | (ui->slave << 4) | (lp->d_ntracks - 1));
+ outb(HD_SECTCNT(um->address), lp->d_nsectors);
+ outb(HD_CMD(um->address), CMD_SETPARAM);
+ hdc->sc_state = SETPARAMDONE;
+ return(0);
+
+ case SETPARAMDONE:
+ /*
+ * Set parameters complete.
+ */
+ if (msg = hderrchk(um))
+ goto bad;
+ sc->sc_flags &= ~HDF_SETPARAM;
+ hdc->sc_state = RESTORE;
+ break;
+
+ case RESTORE:
+ /*
+ * Recalibrate drive.
+ */
+ outb(HD_DRVHD(um->address), 0xa0 | (ui->slave << 4));
+ outb(HD_CMD(um->address), CMD_RESTORE);
+ hdc->sc_state = RESTOREDONE;
+ return(0);
+
+ case RESTOREDONE:
+ /*
+ * Recalibration complete.
+ */
+ if (msg = hderrchk(um)) {
+ if (++hdc->sc_recalerr == 2)
+ goto bad;
+ hdc->sc_state = RESTORE;
+ break;
+ }
+ sc->sc_flags &= ~HDF_RESTORE;
+ hdc->sc_recalerr = 0;
+ if (sc->sc_flags & HDF_SETMULTI)
+ hdc->sc_state = SETMULTI;
+ else
+ hdc->sc_state = TRANSFER;
+ break;
+
+ case TRANSFER:
+ /*
+ * Perform I/O transfer.
+ */
+ sc->sc_flags &= ~HDF_UNALIGNED;
+ hdc->sc_state = TRANSFERDONE;
+ hdc->sc_amt = hdc->sc_resid / lp->d_secsize;
+ if (hdc->sc_amt == 0) {
+ sc->sc_flags |= HDF_UNALIGNED;
+ hdc->sc_amt = 1;
+ } else if (hdc->sc_amt > 256)
+ hdc->sc_amt = 256;
+ if (sc->sc_multicnt > 1 && hdc->sc_amt >= sc->sc_multicnt) {
+ hdc->sc_cnt = sc->sc_multicnt;
+ hdc->sc_amt -= hdc->sc_amt % hdc->sc_cnt;
+ if (bp->b_flags & B_READ)
+ op = CMD_READMULTI;
+ else
+ op = CMD_WRITEMULTI;
+ } else {
+ hdc->sc_cnt = 1;
+ if (bp->b_flags & B_READ)
+ op = CMD_READ;
+ else
+ op = CMD_WRITE;
+ }
+ if (sc->sc_flags & HDF_LBA) {
+ outb(HD_DRVHD(um->address),
+ (0xe0 | (ui->slave << 4)
+ | ((hdc->sc_cn >> 24) & 0x0f)));
+ outb(HD_SECT(um->address), hdc->sc_cn);
+ outb(HD_CYLLO(um->address), hdc->sc_cn >> 8);
+ outb(HD_CYLHI(um->address), hdc->sc_cn >> 16);
+ } else {
+ outb(HD_DRVHD(um->address),
+ 0xa0 | (ui->slave << 4) | hdc->sc_tn);
+ outb(HD_SECT(um->address), hdc->sc_sn + 1);
+ outb(HD_CYLLO(um->address), hdc->sc_cn);
+ outb(HD_CYLHI(um->address), hdc->sc_cn >> 8);
+ }
+ outb(HD_SECTCNT(um->address), hdc->sc_amt & 0xff);
+ outb(HD_PRECOMP(um->address), bg->bg_precomp / 4);
+ outb(HD_CMD(um->address), op);
+ if ((bp->b_flags & B_READ) == 0) {
+ int i;
+ caddr_t buf;
+
+ if (sc->sc_flags & HDF_UNALIGNED) {
+ buf = hdc->sc_buf;
+ bcopy(hdc->sc_addr, buf, hdc->sc_resid);
+ bzero(buf + hdc->sc_resid,
+ lp->d_secsize - hdc->sc_resid);
+ } else
+ buf = hdc->sc_addr;
+ for (i = 0; i < 1000000; i++)
+ if (inb(HD_STATUS(um->address)) & ST_DREQ) {
+ loutw(HD_DATA(um->address), buf,
+ hdc->sc_cnt * lp->d_secsize / 2);
+ return(0);
+ }
+ goto ctlr_err;
+ }
+ return(0);
+
+ case TRANSFERDONE:
+ /*
+ * Transfer complete.
+ */
+ if (msg = hderrchk(um)) {
+ if (++hdc->sc_ioerr == MAX_RETRIES)
+ goto bad;
+ /*
+ * Every fourth attempt print a message
+ * and recalibrate the drive.
+ */
+ if (hdc->sc_ioerr & 3)
+ hdc->sc_state = TRANSFER;
+ else {
+ hderror(msg, ui);
+ hdc->sc_state = RESTORE;
+ }
+ break;
+ }
+ if (bp->b_flags & B_READ) {
+ if (sc->sc_flags & HDF_UNALIGNED) {
+ linw(HD_DATA(um->address), hdc->sc_buf,
+ lp->d_secsize / 2);
+ bcopy(hdc->sc_buf, hdc->sc_addr,
+ hdc->sc_resid);
+ } else
+ linw(HD_DATA(um->address), hdc->sc_addr,
+ hdc->sc_cnt * lp->d_secsize / 2);
+ }
+ hdc->sc_resid -= hdc->sc_cnt * lp->d_secsize;
+ if (hdc->sc_resid <= 0) {
+ bp->b_resid = 0;
+ hddone(ui, bp);
+ return(0);
+ }
+ if (sc->sc_flags & HDF_LBA)
+ hdc->sc_cn += hdc->sc_cnt;
+ else {
+ hdc->sc_sn += hdc->sc_cnt;
+ while (hdc->sc_sn >= lp->d_nsectors) {
+ hdc->sc_sn -= lp->d_nsectors;
+ if (++hdc->sc_tn == lp->d_ntracks) {
+ hdc->sc_tn = 0;
+ hdc->sc_cn++;
+ }
+ }
+ }
+ hdc->sc_ioerr = 0;
+ hdc->sc_addr += hdc->sc_cnt * lp->d_secsize;
+ hdc->sc_amt -= hdc->sc_cnt;
+ if (hdc->sc_amt == 0) {
+ hdc->sc_state = TRANSFER;
+ break;
+ }
+ if ((bp->b_flags & B_READ) == 0) {
+ int i;
+
+ for (i = 0; i < 1000000; i++)
+ if (inb(HD_STATUS(um->address)) & ST_DREQ) {
+ loutw(HD_DATA(um->address),
+ hdc->sc_addr,
+ hdc->sc_cnt * lp->d_secsize / 2);
+ return(0);
+ }
+ goto ctlr_err;
+ }
+ return(0);
+
+ case IDENTIFY:
+ /*
+ * Get drive info.
+ */
+ hdc->sc_state = IDENTIFYDONE;
+ outb(HD_DRVHD(um->address), 0xa0 | (ui->slave << 4));
+ outb(HD_CMD(um->address), CMD_IDENTIFY);
+ return(0);
+
+ case IDENTIFYDONE:
+ /*
+ * Get drive info complete.
+ */
+ if (msg = hderrchk(um))
+ goto bad;
+ linw(HD_DATA(um->address), (u_short *)bp->b_un.b_addr, 256);
+ hddone(ui, bp);
+ return(0);
+
+ case SETMULTI:
+ /*
+ * Set multiple mode count.
+ */
+ hdc->sc_state = SETMULTIDONE;
+ outb(HD_DRVHD(um->address), 0xa0 | (ui->slave << 4));
+ outb(HD_SECTCNT(um->address), sc->sc_multicnt);
+ outb(HD_CMD(um->address), CMD_SETMULTI);
+ return(0);
+
+ case SETMULTIDONE:
+ /*
+ * Set multiple mode count complete.
+ */
+ sc->sc_flags &= ~HDF_SETMULTI;
+ if (msg = hderrchk(um)) {
+ sc->sc_multicnt = 0;
+ goto bad;
+ }
+ hdc->sc_state = TRANSFER;
+ break;
+
+ default:
+ printf("hd%d: invalid state\n", ui->unit);
+ panic("hdstate");
+ /*NOTREACHED*/
+ }
+
+ ctlr_err:
+ msg = "controller error";
+
+ bad:
+ hderror(msg, ui);
+ bp->b_flags |= B_ERROR;
+ bp->b_error = EIO;
+ sc->sc_flags |= HDF_RESTORE;
+ hddone(ui, bp);
+}
+
+/*
+ * Terminate current request and start
+ * any others that are queued.
+ */
+int
+hddone(ui, bp)
+ struct bus_device *ui;
+ struct buf *bp;
+{
+ struct bus_ctlr *um = ui->mi;
+ struct hdsoftc *sc = &hdsoftc[ui->unit];
+ struct hdcsoftc *hdc = &hdcsoftc[um->unit];
+ struct buf *dp = &hdtab[um->unit];
+
+ sc->sc_flags &= ~HDF_UNALIGNED;
+
+ /*
+ * Remove this request from queue.
+ */
+ hdutab[ui->unit].b_actf = bp->b_actf;
+ biodone(bp);
+ bp = &hdutab[ui->unit];
+ dp->b_actf = bp->b_forw;
+
+ /*
+ * Mark controller and drive idle.
+ */
+ dp->b_active = 0;
+ bp->b_active = 0;
+ hdc->sc_state = IDLE;
+
+ /*
+ * Start up other requests.
+ */
+ hdustart(ui);
+ hdstart(um);
+
+ /*
+ * Wakeup anyone waiting for drive.
+ */
+ if (sc->sc_flags & HDF_WANT) {
+ sc->sc_flags &= ~HDF_WANT;
+ wakeup((caddr_t)sc);
+ }
+}
+
+/*
+ * Wait for controller to be idle.
+ */
+int
+hdwait(um)
+ struct bus_ctlr *um;
+{
+ int i, status;
+
+ for (i = 0; i < 1000000; i++) {
+ status = inb(HD_STATUS(um->address));
+ if ((status & ST_BUSY) == 0 && (status & ST_READY))
+ return (status);
+ }
+ return (0);
+}
+
+/*
+ * Check for errors on completion of an operation.
+ */
+char *
+hderrchk(um)
+ struct bus_ctlr *um;
+{
+ int status;
+
+ status = inb(HD_STATUS(um->address));
+ if (status & ST_WRTFLT)
+ return ("write fault");
+ if (status & ST_ERROR) {
+ status = inb(HD_ERROR(um->address));
+ if (status & ERR_DAM)
+ return ("data address mark not found");
+ if (status & ERR_TR0)
+ return ("track 0 not found");
+ if (status & ERR_ID)
+ return ("sector not found");
+ if (status & ERR_ECC)
+ return ("uncorrectable ECC error");
+ if (status & ERR_BADBLK)
+ return ("bad block detected");
+ if (status & ERR_ABORT)
+ return ("command aborted");
+ return ("hard error");
+ }
+ return (NULL);
+}
+
+/*
+ * Print an error message.
+ */
+hderror(msg, ui)
+ char *msg;
+ struct bus_device *ui;
+{
+ char *op;
+ int prn_sn = 0;
+ struct hdcsoftc *hdc = &hdcsoftc[ui->mi->unit];
+
+ switch (hdc->sc_state) {
+
+ case SETPARAM:
+ case SETPARAMDONE:
+ op = "SETPARAM: ";
+ break;
+
+ case RESTORE:
+ case RESTOREDONE:
+ op = "RESTORE: ";
+ break;
+
+ case TRANSFER:
+ case TRANSFERDONE:
+ if (hdutab[ui->unit].b_actf->b_flags & B_READ)
+ op = "READ: ";
+ else
+ op = "WRITE: ";
+ prn_sn = 1;
+ break;
+
+ case IDENTIFY:
+ case IDENTIFYDONE:
+ op = "IDENTIFY: ";
+ break;
+
+ case SETMULTI:
+ case SETMULTIDONE:
+ op = "SETMULTI: ";
+ break;
+
+ default:
+ op = "";
+ break;
+ }
+ printf("hd%d: %s%s", ui->unit, op, msg);
+ if (prn_sn) {
+ if (hdsoftc[ui->unit].sc_flags & HDF_LBA)
+ printf(", bn %d", hdc->sc_cn);
+ else
+ printf(", cn %d tn %d sn %d",
+ hdc->sc_cn, hdc->sc_tn, hdc->sc_sn + 1);
+ }
+ printf("\n");
+}
+
+/*
+ * Watchdog routine.
+ * Check for any hung operations.
+ */
+void
+hdwatch()
+{
+ int unit, s;
+
+ timeout(hdwatch, 0, hz);
+ s = splbio();
+ for (unit = 0; unit < NHDC; unit++)
+ if (hdtab[unit].b_active
+ && ++hdcsoftc[unit].sc_wticks >= OP_TIMEOUT)
+ hdintr(unit);
+ splx(s);
+}
+
+#endif /* NHD > 0 && !LINUX_DEV */
diff --git a/i386/i386at/nhdreg.h b/i386/i386at/nhdreg.h
new file mode 100644
index 00000000..d0ef1975
--- /dev/null
+++ b/i386/i386at/nhdreg.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 1994 Shantanu Goel
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE AUTHOR ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE AUTHOR DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+/*
+ * Hard disk controller.
+ */
+
+#define HD_DATA(p) (p) /* data register */
+#define HD_ERROR(p) ((p) + 1) /* error register */
+#define HD_PRECOMP(p) ((p) + 1) /* precomp register */
+#define HD_SECTCNT(p) ((p) + 2) /* sector count register */
+#define HD_SECT(p) ((p) + 3) /* sector number register */
+#define HD_CYLLO(p) ((p) + 4) /* cylinder number low */
+#define HD_CYLHI(p) ((p) + 5) /* cylinder number high */
+#define HD_DRVHD(p) ((p) + 6) /* drive head register */
+#define HD_STATUS(p) ((p) + 7) /* status register */
+#define HD_CMD(p) ((p) + 7) /* command register */
+
+/*
+ * Status register
+ */
+#define ST_BUSY 0x80 /* controller is busy */
+#define ST_READY 0x40 /* drive is ready */
+#define ST_WRTFLT 0x20 /* write fault */
+#define ST_SEEK 0x10 /* seek complete */
+#define ST_DREQ 0x08 /* data request */
+#define ST_ECC 0x04 /* ECC corrected data */
+#define ST_INDEX 0x02 /* index pulse */
+#define ST_ERROR 0x01 /* an operation resulted in error */
+
+/*
+ * Error register
+ */
+#define ERR_DAM 0x01 /* data address mark not found */
+#define ERR_TR0 0x02 /* track 0 not found */
+#define ERR_ABORT 0x04 /* command aborted */
+#define ERR_ID 0x10 /* sector not found */
+#define ERR_ECC 0x40 /* uncorrectable ECC error */
+#define ERR_BADBLK 0x80 /* bad block detected */
+
+/*
+ * Commands
+ */
+#define CMD_RESTORE 0x10
+#define CMD_READ 0x20
+#define CMD_WRITE 0x30
+#define CMD_SETPARAM 0x91
+#define CMD_READMULTI 0xc4
+#define CMD_WRITEMULTI 0xc5
+#define CMD_SETMULTI 0xc6
+#define CMD_IDENTIFY 0xec
+
+#if 0
+#define PDLOCATION 29 /* XXX: belongs in <i386at/disk.h> */
+#endif
+
+#define BAD_BLK 0x80
+#define SECSIZE 512
+
+/*
+ * Information returned by IDENTIFY command.
+ */
+struct hdident {
+ u_short id_config; /* flags */
+ u_short id_npcyl; /* # physical cylinders */
+ u_short id_rsvd2; /* reserved (word 2) */
+ u_short id_nptrk; /* # physical tracks */
+ u_short id_bptrk; /* unformatted bytes/track */
+ u_short id_bpsect; /* unformatted bytes/sector */
+ u_short id_npsect; /* # physical sectors/track */
+ u_short id_vendor0; /* vendor unique */
+ u_short id_vendor1; /* vendor unique */
+ u_short id_vendor2; /* vendor unique */
+ u_char id_serno[20]; /* serial #: 0 = unspecified */
+ u_short id_buftype; /* ??? */
+ u_short id_bufsize; /* 512 byte increments: 0 = unspecified */
+ u_short id_eccbytes; /* for R/W LONG commands: 0 = unspecified */
+ u_char id_rev[8]; /* firmware revision: 0 = unspecified */
+ u_char id_model[40]; /* model name: 0 = unspecified */
+ u_char id_multisize; /* max multiple I/O size: 0 = unsupported */
+ u_char id_vendor3; /* vendor unique */
+ u_short id_dwordio; /* 0 = unsupported; 1 = implemented */
+ u_char id_vendor4; /* vendor unique */
+ u_char id_capability; /* 0:DMA 1:LBA 2:IORDYsw 3:IORDY:sup */
+ u_short id_rsvd50; /* reserved (word 50) */
+ u_char id_vendor5; /* vendor unique */
+ u_char id_pio; /* 0=slow, 1=medium, 2=fast */
+ u_char id_vendor6; /* vendor unique */
+ u_char id_dma; /* 0=slow, 1=medium, 2=fast */
+ u_short id_valid; /* 0:logical 1:eide */
+ u_short id_nlcyl; /* # logical cylinders */
+ u_short id_nltrk; /* # logical tracks */
+ u_short id_nlsect; /* # logical sectors/track */
+ u_short id_capacity0; /* logical total sectors on drive */
+ u_short id_capacity1; /* (2 words, misaligned int) */
+ u_char id_multisect; /* current multiple sector count */
+ u_char id_multivalid; /* bit 0=1, multisect field is valid */
+ u_short id_totsect; /* total number of sectors */
+ u_short id_dma1; /* single word DMA info */
+ u_short id_dmamulti; /* multiple word DMA info */
+ u_short id_eidepiomode; /* 0:mode3 1:mode4 */
+ u_short id_eidedmamin; /* min multiple word DMA cycle time (ns) */
+ u_short id_eidedmatime; /* recomended DMA cycle time (ns) */
+ u_short id_eidepio; /* min cycle time (ns), no IORDY */
+ u_short id_eidepioiordy;/* min cycle time (ns, with IORDY */
+ u_short id_rsvd69; /* reserved (word 69) */
+ u_short id_rsvd70; /* reserved (word 70) */
+};
diff --git a/i386/i386at/phys_mem_grab_page.c b/i386/i386at/phys_mem_grab_page.c
new file mode 100644
index 00000000..8ceaca60
--- /dev/null
+++ b/i386/i386at/phys_mem_grab_page.c
@@ -0,0 +1 @@
+/*XXX bogus kludge */
diff --git a/i386/i386at/pic_isa.c b/i386/i386at/pic_isa.c
new file mode 100644
index 00000000..49eff4d3
--- /dev/null
+++ b/i386/i386at/pic_isa.c
@@ -0,0 +1,68 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <sys/types.h>
+#include <i386/ipl.h>
+#include <i386/pic.h>
+#include <rc.h>
+
+
+/* These interrupts are always present */
+extern intnull(), fpintr(), hardclock(), kdintr();
+extern prtnull();
+
+int (*ivect[NINTR])() = {
+ /* 00 */ hardclock, /* always */
+#if RCLINE < 0
+ /* 01 */ kdintr, /* kdintr, ... */
+#else
+ /* 01 */ intnull, /* kdintr, ... */
+#endif
+ /* 02 */ intnull,
+ /* 03 */ intnull, /* lnpoll, comintr, ... */
+
+ /* 04 */ intnull, /* comintr, ... */
+ /* 05 */ intnull, /* comintr, wtintr, ... */
+ /* 06 */ intnull, /* fdintr, ... */
+ /* 07 */ prtnull, /* qdintr, ... */
+
+ /* 08 */ intnull,
+ /* 09 */ intnull, /* ether */
+ /* 10 */ intnull,
+ /* 11 */ intnull,
+
+ /* 12 */ intnull,
+ /* 13 */ fpintr, /* always */
+ /* 14 */ intnull, /* hdintr, ... */
+ /* 15 */ intnull, /* ??? */
+};
+
+int intpri[NINTR] = {
+ /* 00 */ 0, SPL6, 0, 0,
+ /* 04 */ 0, 0, 0, 0,
+ /* 08 */ 0, 0, 0, 0,
+ /* 12 */ 0, SPL1, 0, 0,
+};
diff --git a/i386/i386at/rtc.c b/i386/i386at/rtc.c
new file mode 100644
index 00000000..7a8d1d7b
--- /dev/null
+++ b/i386/i386at/rtc.c
@@ -0,0 +1,237 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#include <sys/types.h>
+#include <sys/time.h>
+#include <kern/time_out.h>
+#include <i386/machspl.h>
+#include <i386at/rtc.h>
+
+static unsigned char rtc[RTC_NREG];
+static int first_rtcopen_ever = 1;
+
+rtcinit()
+{
+ outb(RTC_ADDR, RTC_A);
+ outb(RTC_DATA, RTC_DIV2 | RTC_RATE6);
+ outb(RTC_ADDR, RTC_B);
+ outb(RTC_DATA, RTC_HM);
+}
+
+
+int
+rtcget(regs)
+unsigned char *regs;
+{
+ if (first_rtcopen_ever) {
+ rtcinit();
+ first_rtcopen_ever = 0;
+ }
+ outb(RTC_ADDR, RTC_D);
+ if (inb(RTC_DATA) & RTC_VRT == 0) return(-1);
+ outb(RTC_ADDR, RTC_A);
+ while (inb(RTC_DATA) & RTC_UIP) /* busy wait */
+ outb(RTC_ADDR, RTC_A);
+ load_rtc(regs);
+ return(0);
+}
+
+rtcput(regs)
+unsigned char *regs;
+{
+ register unsigned char x;
+
+ if (first_rtcopen_ever) {
+ rtcinit();
+ first_rtcopen_ever = 0;
+ }
+ outb(RTC_ADDR, RTC_B);
+ x = inb(RTC_DATA);
+ outb(RTC_ADDR, RTC_B);
+ outb(RTC_DATA, x | RTC_SET);
+ save_rtc(regs);
+ outb(RTC_ADDR, RTC_B);
+ outb(RTC_DATA, x & ~RTC_SET);
+}
+
+
+extern struct timeval time;
+extern struct timezone tz;
+
+static int month[12] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+
+yeartoday(year)
+int year;
+{
+ return((year%4) ? 365 : 366);
+}
+
+hexdectodec(n)
+char n;
+{
+ return(((n>>4)&0x0F)*10 + (n&0x0F));
+}
+
+char
+dectohexdec(n)
+int n;
+{
+ return((char)(((n/10)<<4)&0xF0) | ((n%10)&0x0F));
+}
+
+
+readtodc(tp)
+ u_int *tp;
+{
+ struct rtc_st rtclk;
+ time_t n;
+ int sec, min, hr, dom, mon, yr;
+ int i, days = 0;
+ spl_t ospl;
+
+#ifdef MACH_KERNEL
+ ospl = splclock();
+#else MACH_KERNEL
+ ospl = spl5();
+#endif MACH_KERNEL
+ if (rtcget(&rtclk)) {
+ splx(ospl);
+ return(-1);
+ }
+ splx (ospl);
+
+ sec = hexdectodec(rtclk.rtc_sec);
+ min = hexdectodec(rtclk.rtc_min);
+ hr = hexdectodec(rtclk.rtc_hr);
+ dom = hexdectodec(rtclk.rtc_dom);
+ mon = hexdectodec(rtclk.rtc_mon);
+ yr = hexdectodec(rtclk.rtc_yr);
+ yr = (yr < 70) ? yr+100 : yr;
+
+ n = sec + 60 * min + 3600 * hr;
+ n += (dom - 1) * 3600 * 24;
+
+ if (yeartoday(yr) == 366)
+ month[1] = 29;
+ for (i = mon - 2; i >= 0; i--)
+ days += month[i];
+ month[1] = 28;
+ for (i = 70; i < yr; i++)
+ days += yeartoday(i);
+ n += days * 3600 * 24;
+
+#ifdef MACH_KERNEL
+#else MACH_KERNEL
+ n += tz.tz_minuteswest * 60;
+ if (tz.tz_dsttime)
+ n -= 3600;
+#endif MACH_KERNEL
+
+ *tp = n;
+
+ return(0);
+}
+
+writetodc()
+{
+ struct rtc_st rtclk;
+ time_t n;
+ int diff, i, j;
+ spl_t ospl;
+
+#ifdef MACH_KERNEL
+ ospl = splclock();
+#else MACH_KERNEL
+ ospl = spl5();
+#endif MACH_KERNEL
+ if (rtcget(&rtclk)) {
+ splx(ospl);
+ return(-1);
+ }
+ splx(ospl);
+
+#ifdef MACH_KERNEL
+ diff = 0;
+#else MACH_KERNEL
+ diff = tz.tz_minuteswest * 60;
+ if (tz.tz_dsttime)
+ diff -= 3600;
+#endif MACH_KERNEL
+ n = (time.tv_sec - diff) % (3600 * 24); /* hrs+mins+secs */
+ rtclk.rtc_sec = dectohexdec(n%60);
+ n /= 60;
+ rtclk.rtc_min = dectohexdec(n%60);
+ rtclk.rtc_hr = dectohexdec(n/60);
+
+ n = (time.tv_sec - diff) / (3600 * 24); /* days */
+ rtclk.rtc_dow = (n + 4) % 7; /* 1/1/70 is Thursday */
+
+ for (j = 1970, i = yeartoday(j); n >= i; j++, i = yeartoday(j))
+ n -= i;
+
+ rtclk.rtc_yr = dectohexdec(j - 1900);
+
+ if (i == 366)
+ month[1] = 29;
+ for (i = 0; n >= month[i]; i++)
+ n -= month[i];
+ month[1] = 28;
+ rtclk.rtc_mon = dectohexdec(++i);
+
+ rtclk.rtc_dom = dectohexdec(++n);
+
+#ifdef MACH_KERNEL
+ ospl = splclock();
+#else MACH_KERNEL
+ ospl = spl5();
+#endif MACH_KERNEL
+ rtcput(&rtclk);
+ splx(ospl);
+
+ return(0);
+}
diff --git a/i386/i386at/rtc.h b/i386/i386at/rtc.h
new file mode 100644
index 00000000..e8d19670
--- /dev/null
+++ b/i386/i386at/rtc.h
@@ -0,0 +1,137 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#define RTC_ADDR 0x70 /* I/O port address for register select */
+#define RTC_DATA 0x71 /* I/O port address for data read/write */
+
+/*
+ * Register A definitions
+ */
+#define RTC_A 0x0a /* register A address */
+#define RTC_UIP 0x80 /* Update in progress bit */
+#define RTC_DIV0 0x00 /* Time base of 4.194304 MHz */
+#define RTC_DIV1 0x10 /* Time base of 1.048576 MHz */
+#define RTC_DIV2 0x20 /* Time base of 32.768 KHz */
+#define RTC_RATE6 0x06 /* interrupt rate of 976.562 */
+
+/*
+ * Register B definitions
+ */
+#define RTC_B 0x0b /* register B address */
+#define RTC_SET 0x80 /* stop updates for time set */
+#define RTC_PIE 0x40 /* Periodic interrupt enable */
+#define RTC_AIE 0x20 /* Alarm interrupt enable */
+#define RTC_UIE 0x10 /* Update ended interrupt enable */
+#define RTC_SQWE 0x08 /* Square wave enable */
+#define RTC_DM 0x04 /* Date mode, 1 = binary, 0 = BCD */
+#define RTC_HM 0x02 /* hour mode, 1 = 24 hour, 0 = 12 hour */
+#define RTC_DSE 0x01 /* Daylight savings enable */
+
+/*
+ * Register C definitions
+ */
+#define RTC_C 0x0c /* register C address */
+#define RTC_IRQF 0x80 /* IRQ flag */
+#define RTC_PF 0x40 /* PF flag bit */
+#define RTC_AF 0x20 /* AF flag bit */
+#define RTC_UF 0x10 /* UF flag bit */
+
+/*
+ * Register D definitions
+ */
+#define RTC_D 0x0d /* register D address */
+#define RTC_VRT 0x80 /* Valid RAM and time bit */
+
+#define RTC_NREG 0x0e /* number of RTC registers */
+#define RTC_NREGP 0x0a /* number of RTC registers to set time */
+
+#define RTCRTIME _IOR('c', 0x01, struct rtc_st) /* Read time from RTC */
+#define RTCSTIME _IOW('c', 0x02, struct rtc_st) /* Set time into RTC */
+
+struct rtc_st {
+ char rtc_sec;
+ char rtc_asec;
+ char rtc_min;
+ char rtc_amin;
+ char rtc_hr;
+ char rtc_ahr;
+ char rtc_dow;
+ char rtc_dom;
+ char rtc_mon;
+ char rtc_yr;
+ char rtc_statusa;
+ char rtc_statusb;
+ char rtc_statusc;
+ char rtc_statusd;
+};
+
+/*
+ * this macro reads contents of real time clock to specified buffer
+ */
+#define load_rtc(regs) \
+{\
+ register int i; \
+ \
+ for (i = 0; i < RTC_NREG; i++) { \
+ outb(RTC_ADDR, i); \
+ regs[i] = inb(RTC_DATA); \
+ } \
+}
+
+/*
+ * this macro writes contents of specified buffer to real time clock
+ */
+#define save_rtc(regs) \
+{ \
+ register int i; \
+ for (i = 0; i < RTC_NREGP; i++) { \
+ outb(RTC_ADDR, i); \
+ outb(RTC_DATA, regs[i]);\
+ } \
+}
+
+
diff --git a/i386/imps/Makefile.in b/i386/imps/Makefile.in
new file mode 100644
index 00000000..bf353f1d
--- /dev/null
+++ b/i386/imps/Makefile.in
@@ -0,0 +1,66 @@
+#
+# Copyright (c) 1994 The University of Utah and
+# the Computer Systems Laboratory (CSL). All rights reserved.
+#
+# Permission to use, copy, modify and distribute this software and its
+# documentation is hereby granted, provided that both the copyright
+# notice and this permission notice appear in all copies of the
+# software, derivative works or modified versions, and any portions
+# thereof, and that both notices appear in supporting documentation.
+#
+# THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+# IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+# ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+#
+# CSL requests users of this software to return to csl-dist@cs.utah.edu any
+# improvements that they make and grant CSL redistribution rights.
+#
+# Author: Bryan Ford, University of Utah CSL
+#
+
+
+# This makefile creates a microkernel
+# that supports Intel MP specification-compliant machines.
+#### Start of configuration section ####
+
+GSRCDIR = @top_gsrcdir@
+MSRCDIR = @top_srcdir@
+OBJDIR = @top_objdir@
+
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+
+include $(OBJDIR)/Makeconf
+
+##### End of configuration section #####
+
+
+# Only build an MP kernel if the user asked for one.
+ifeq ($(ENABLE_MP),yes)
+
+
+# First define what we're trying to accomplish.
+TARGET = kernel-imps.bmod
+
+
+# For Intel MP spec support add the imps directory.
+SRCDIRS += $(MSRCDIR)/kernel/imps
+
+
+# Everything else is done in here.
+include $(MSRCDIR)/kernel/Makerules
+
+
+# Be sure and build the asm symbol file before anything else.
+# This is sort of a bogus dependency, but it does the job.
+i386asm.h: impsasm.h
+CLEAN_FILES += impsasm.h
+
+
+else
+
+all:
+clean:
+install:
+
+endif
diff --git a/i386/imps/apic.h b/i386/imps/apic.h
new file mode 100644
index 00000000..24a161af
--- /dev/null
+++ b/i386/imps/apic.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _IMPS_APIC_
+#define _IMPS_APIC_
+
+typedef struct ApicReg
+{
+ unsigned r; /* the actual register */
+ unsigned p[3]; /* pad to the next 128-bit boundary */
+} ApicReg;
+
+typedef struct ApicIoUnit
+{
+ ApicReg select;
+ ApicReg window;
+} ApicIoUnit;
+#define APIC_IO_UNIT_ID 0x00
+#define APIC_IO_VERSION 0x01
+#define APIC_IO_REDIR_LOW(int_pin) (0x10+(int_pin)*2)
+#define APIC_IO_REDIR_HIGH(int_pin) (0x11+(int_pin)*2)
+
+typedef struct ApicLocalUnit
+{
+ ApicReg reserved0;
+ ApicReg reserved1;
+ ApicReg unit_id;
+ ApicReg version;
+ ApicReg reserved4;
+ ApicReg reserved5;
+ ApicReg reserved6;
+ ApicReg reserved7;
+ ApicReg task_pri;
+ ApicReg reservedb;
+ ApicReg reservedc;
+ ApicReg eoi;
+ ApicReg remote;
+ ApicReg logical_dest;
+ ApicReg dest_format;
+ ApicReg spurious_vector;
+ ApicReg isr[8];
+ ApicReg tmr[8];
+ ApicReg irr[8];
+ ApicReg reserved28[8];
+ ApicReg int_command[2];
+ ApicReg timer_vector;
+ ApicReg reserved33;
+ ApicReg reserved34;
+ ApicReg lint0_vector;
+ ApicReg lint1_vector;
+ ApicReg reserved37;
+ ApicReg init_count;
+ ApicReg cur_count;
+ ApicReg reserved3a;
+ ApicReg reserved3b;
+ ApicReg reserved3c;
+ ApicReg reserved3d;
+ ApicReg divider_config;
+ ApicReg reserved3f;
+} ApicLocalUnit;
+
+
+/* Address at which the local unit is mapped in kernel virtual memory.
+ Must be constant. */
+#define APIC_LOCAL_VA 0xc1000000
+
+#define apic_local_unit (*((volatile ApicLocalUnit*)APIC_LOCAL_VA))
+
+
+/* Set or clear a bit in a 255-bit APIC mask register.
+ These registers are spread through eight 32-bit registers. */
+#define APIC_SET_MASK_BIT(reg, bit) \
+ ((reg)[(bit) >> 5].r |= 1 << ((bit) & 0x1f))
+#define APIC_CLEAR_MASK_BIT(reg, bit) \
+ ((reg)[(bit) >> 5].r &= ~(1 << ((bit) & 0x1f)))
+
+#endif _IMPS_APIC_
diff --git a/i386/imps/cpu_number.h b/i386/imps/cpu_number.h
new file mode 100644
index 00000000..cae267b1
--- /dev/null
+++ b/i386/imps/cpu_number.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _IMPS_CPU_NUMBER_
+#define _IMPS_CPU_NUMBER_
+
+
+#ifndef ASSEMBLER
+
+#include "apic.h"
+
+static inline int
+cpu_number()
+{
+ return apic_local_unit.unit_id.r >> 24;
+}
+
+#else ASSEMBLER
+
+#include "impsasm.h"
+
+#define CPU_NUMBER(reg) \
+ movzbl APIC_LOCAL_VA+APIC_LOCAL_UNIT_ID+3,reg
+
+#endif ASSEMBLER
+
+
+#include "i386/cpu_number.h"
+
+
+#endif _IMPS_CPU_NUMBER_
diff --git a/i386/imps/cpus.h b/i386/imps/cpus.h
new file mode 100644
index 00000000..f49d85e6
--- /dev/null
+++ b/i386/imps/cpus.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#define NCPUS 2 /* XXX make it unlimited */
+#define MULTIPROCESSOR 1
diff --git a/i386/imps/imps.c b/i386/imps/imps.c
new file mode 100644
index 00000000..279ca3df
--- /dev/null
+++ b/i386/imps/imps.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/kern_return.h>
+
+void
+interrupt_processor(int which_cpu)
+{
+ panic("interrupt_processor");
+}
+
+void
+start_other_cpus()
+{
+printf("start other CPUs please!!!\n");
+}
+
+kern_return_t
+cpu_control(int cpu, int *info, int count)
+{
+printf("cpu_control %d\n", cpu);
+ return KERN_FAILURE;
+}
+
+kern_return_t
+cpu_start(int cpu)
+{
+printf("cpu_start %d\n", cpu);
+ return KERN_FAILURE;
+}
+
diff --git a/i386/imps/impsasm.sym b/i386/imps/impsasm.sym
new file mode 100644
index 00000000..4093d41e
--- /dev/null
+++ b/i386/imps/impsasm.sym
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include "apic.h"
+
+expr APIC_LOCAL_VA
+
+offset ApicLocalUnit apic_local unit_id
+
diff --git a/i386/include/Makefile.in b/i386/include/Makefile.in
new file mode 100644
index 00000000..d801f79e
--- /dev/null
+++ b/i386/include/Makefile.in
@@ -0,0 +1,36 @@
+#
+# Copyright (c) 1994 The University of Utah and
+# the Computer Systems Laboratory (CSL). All rights reserved.
+#
+# Permission to use, copy, modify and distribute this software and its
+# documentation is hereby granted, provided that both the copyright
+# notice and this permission notice appear in all copies of the
+# software, derivative works or modified versions, and any portions
+# thereof, and that both notices appear in supporting documentation.
+#
+# THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+# IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+# ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+#
+# CSL requests users of this software to return to csl-dist@cs.utah.edu any
+# improvements that they make and grant CSL redistribution rights.
+#
+# Author: Bryan Ford, University of Utah CSL
+#
+
+
+#### Start of configuration section ####
+
+GSRCDIR = @top_gsrcdir@
+MSRCDIR = @top_srcdir@
+OBJDIR = @top_objdir@
+
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+
+include $(OBJDIR)/Makeconf
+
+##### End of configuration section #####
+
+include $(MSRCDIR)/include/Makerules
+
diff --git a/i386/include/Makerules b/i386/include/Makerules
new file mode 100644
index 00000000..4cd47478
--- /dev/null
+++ b/i386/include/Makerules
@@ -0,0 +1,47 @@
+#
+# Copyright (c) 1994 The University of Utah and
+# the Center for Software Science (CSS). All rights reserved.
+#
+# Permission to use, copy, modify and distribute this software and its
+# documentation is hereby granted, provided that both the copyright
+# notice and this permission notice appear in all copies of the
+# software, derivative works or modified versions, and any portions
+# thereof, and that both notices appear in supporting documentation.
+#
+# THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+# IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
+# ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+#
+# CSS requests users of this software to return to css-dist@cs.utah.edu any
+# improvements that they make and grant CSS redistribution rights.
+#
+# Author: Bryan Ford, University of Utah CSS
+
+
+all:
+
+clean:
+
+HFILES += $(patsubst $(GSRCDIR)/include/%,%, \
+ $(wildcard $(addprefix $(GSRCDIR)/include/, \
+ * */* */*/* */*/*/*))) \
+ $(patsubst $(MSRCDIR)/include/%,%, \
+ $(wildcard $(addprefix $(MSRCDIR)/include/, \
+ * */* */*/* */*/*/*)))
+
+# Only install .defs and .h files.
+HFILES := $(filter %.h,$(HFILES)) $(filter %.defs,$(HFILES))
+
+$(INSTALL_INCDIR)/%: $(GSRCDIR)/include/%
+ $(MKDIR) $(patsubst %/,%,$(dir $@))
+ $(INSTALL) -m 644 $< $@
+$(INSTALL_INCDIR)/%: $(MSRCDIR)/include/%
+ $(MKDIR) $(patsubst %/,%,$(dir $@))
+ $(INSTALL) -m 644 $< $@
+
+include $(GSRCDIR)/Makerules
+
+install: $(addprefix $(INSTALL_INCDIR)/,$(HFILES)) $(INSTALL_INCDIR)/mach/i386
+
+$(INSTALL_INCDIR)/mach/i386:
+ ln -s machine $@
diff --git a/i386/include/mach/i386/asm.h b/i386/include/mach/i386/asm.h
new file mode 100644
index 00000000..c7755359
--- /dev/null
+++ b/i386/include/mach/i386/asm.h
@@ -0,0 +1,114 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+
+#define S_ARG0 4(%esp)
+#define S_ARG1 8(%esp)
+#define S_ARG2 12(%esp)
+#define S_ARG3 16(%esp)
+
+#define FRAME pushl %ebp; movl %esp, %ebp
+#define EMARF leave
+
+#define B_ARG0 8(%ebp)
+#define B_ARG1 12(%ebp)
+#define B_ARG2 16(%ebp)
+#define B_ARG3 20(%ebp)
+
+#ifdef i486
+#define TEXT_ALIGN 4
+#else
+#define TEXT_ALIGN 2
+#endif
+#define DATA_ALIGN 2
+#define ALIGN TEXT_ALIGN
+
+#define P2ALIGN(p2) .p2align p2 /* gas-specific */
+
+#define LCL(x) x
+
+#define LB(x,n) n
+#ifdef __STDC__
+#ifndef __ELF__
+#define EXT(x) _ ## x
+#define LEXT(x) _ ## x ## :
+#define SEXT(x) "_"#x
+#else
+#define EXT(x) x
+#define LEXT(x) x ## :
+#define SEXT(x) #x
+#endif
+#define LCLL(x) x ## :
+#define gLB(n) n ## :
+#define LBb(x,n) n ## b
+#define LBf(x,n) n ## f
+#else __STDC__
+#error XXX elf
+#define EXT(x) _/**/x
+#define LEXT(x) _/**/x/**/:
+#define LCLL(x) x/**/:
+#define gLB(n) n/**/:
+#define LBb(x,n) n/**/b
+#define LBf(x,n) n/**/f
+#endif __STDC__
+#define SVC .byte 0x9a; .long 0; .word 0x7
+
+#define String .ascii
+#define Value .word
+#define Times(a,b) (a*b)
+#define Divide(a,b) (a/b)
+
+#define INB inb %dx, %al
+#define OUTB outb %al, %dx
+#define INL inl %dx, %eax
+#define OUTL outl %eax, %dx
+
+#define data16 .byte 0x66
+#define addr16 .byte 0x67
+
+
+
+#ifdef GPROF
+
+#define MCOUNT .data; gLB(9) .long 0; .text; lea LBb(x, 9),%edx; call mcount
+#define ENTRY(x) .globl EXT(x); .p2align TEXT_ALIGN; LEXT(x) ; \
+ pushl %ebp; movl %esp, %ebp; MCOUNT; popl %ebp;
+#define ENTRY2(x,y) .globl EXT(x); .globl EXT(y); \
+ .p2align TEXT_ALIGN; LEXT(x) LEXT(y)
+#define ASENTRY(x) .globl x; .p2align TEXT_ALIGN; gLB(x) ; \
+ pushl %ebp; movl %esp, %ebp; MCOUNT; popl %ebp;
+
+#else GPROF
+
+#define MCOUNT
+#define ENTRY(x) .globl EXT(x); .p2align TEXT_ALIGN; LEXT(x)
+#define ENTRY2(x,y) .globl EXT(x); .globl EXT(y); \
+ .p2align TEXT_ALIGN; LEXT(x) LEXT(y)
+#define ASENTRY(x) .globl x; .p2align TEXT_ALIGN; gLB(x)
+#endif GPROF
+
+#define Entry(x) .globl EXT(x); .p2align TEXT_ALIGN; LEXT(x)
+#define DATA(x) .globl EXT(x); .p2align DATA_ALIGN; LEXT(x)
diff --git a/i386/include/mach/i386/bios.h b/i386/include/mach/i386/bios.h
new file mode 100644
index 00000000..6ad44457
--- /dev/null
+++ b/i386/include/mach/i386/bios.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#ifndef _MACH_MACHINE_BIOS_
+#define _MACH_MACHINE_BIOS_
+
+/*
+ * To make a call to a 16-bit BIOS entrypoint,
+ * fill in one of these structures and call bios_call().
+ */
+struct bios_call_params
+{
+ union
+ {
+ struct
+ {
+ unsigned short ax;
+ unsigned short bx;
+ unsigned short cx;
+ unsigned short dx;
+ } w;
+ struct
+ {
+ unsigned char al;
+ unsigned char ah;
+ unsigned char bl;
+ unsigned char bh;
+ unsigned char cl;
+ unsigned char ch;
+ unsigned char dl;
+ unsigned char dh;
+ } b;
+ } u;
+ unsigned short si;
+ unsigned short di;
+ unsigned short bp;
+ unsigned short ds;
+ unsigned short es;
+ unsigned short flags;
+};
+
+void bios_call(unsigned char int_num, struct bios_call_params *bcp);
+
+#endif _MACH_MACHINE_BIOS_
diff --git a/i386/include/mach/i386/boolean.h b/i386/include/mach/i386/boolean.h
new file mode 100644
index 00000000..a33d007b
--- /dev/null
+++ b/i386/include/mach/i386/boolean.h
@@ -0,0 +1,37 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: boolean.h
+ *
+ * Boolean type, for I386.
+ */
+
+#ifndef _MACH_I386_BOOLEAN_H_
+#define _MACH_I386_BOOLEAN_H_
+
+typedef int boolean_t;
+
+#endif /* _MACH_I386_BOOLEAN_H_ */
diff --git a/i386/include/mach/i386/code16.h b/i386/include/mach/i386/code16.h
new file mode 100644
index 00000000..7bc2dc14
--- /dev/null
+++ b/i386/include/mach/i386/code16.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Center for Software Science (CSS). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSS requests users of this software to return to css-dist@cs.utah.edu any
+ * improvements that they make and grant CSS redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSS
+ */
+#ifndef _MACH_I386_CODE16_H_
+#define _MACH_I386_CODE16_H_
+
+/* Switch GAS into 16-bit mode. */
+#define CODE16 asm(".code16");
+
+/* Switch back to 32-bit mode. */
+#define CODE32 asm(".code32");
+
+#endif _MACH_I386_CODE16_H_
diff --git a/i386/include/mach/i386/cthreads.h b/i386/include/mach/i386/cthreads.h
new file mode 100644
index 00000000..62a29cbe
--- /dev/null
+++ b/i386/include/mach/i386/cthreads.h
@@ -0,0 +1,58 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#ifndef _MACHINE_CTHREADS_H_
+#define _MACHINE_CTHREADS_H_
+
+typedef volatile int spin_lock_t;
+#define SPIN_LOCK_INITIALIZER 0
+#define spin_lock_init(s) (*(s) = 0)
+#define spin_lock_locked(s) (*(s) != 0)
+
+#ifdef __GNUC__
+
+#define spin_unlock(p) \
+ ({ register int _u__ ; \
+ __asm__ volatile("xorl %0, %0; \n\
+ xchgl %0, %1" \
+ : "=&r" (_u__), "=m" (*(p)) ); \
+ 0; })
+
+#define spin_try_lock(p)\
+ (!({ boolean_t _r__; \
+ __asm__ volatile("movl $1, %0; \n\
+ xchgl %0, %1" \
+ : "=&r" (_r__), "=m" (*(p)) ); \
+ _r__; }))
+
+#define cthread_sp() \
+ ({ int _sp__; \
+ __asm__("movl %%esp, %0" \
+ : "=g" (_sp__) ); \
+ _sp__; })
+
+#endif /* __GNUC__ */
+
+#endif _MACHINE_CTHREADS_H_
diff --git a/i386/include/mach/i386/debug_reg.h b/i386/include/mach/i386/debug_reg.h
new file mode 100644
index 00000000..ab4293a0
--- /dev/null
+++ b/i386/include/mach/i386/debug_reg.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_I386_DEBUG_REG_H_
+#define _MACH_I386_DEBUG_REG_H_
+
+/* Bits in DR7 - debug control register */
+#define DR7_LEN3 0xc0000000
+#define DR7_RW3 0x30000000
+#define DR7_LEN2 0x0c000000
+#define DR7_RW2 0x03000000
+#define DR7_LEN1 0x00c00000
+#define DR7_RW1 0x00300000
+#define DR7_LEN0 0x000c0000
+#define DR7_RW0 0x00030000
+#define DR7_GD 0x00002000
+#define DR7_GE 0x00000200
+#define DR7_LE 0x00000100
+#define DR7_G3 0x00000080
+#define DR7_L3 0x00000040
+#define DR7_G2 0x00000020
+#define DR7_L2 0x00000010
+#define DR7_G1 0x00000008
+#define DR7_L1 0x00000004
+#define DR7_G0 0x00000002
+#define DR7_L0 0x00000001
+
+/* Shift values for multibit fields in DR7 */
+#define DR7_LEN3_SHIFT 30
+#define DR7_RW3_SHIFT 28
+#define DR7_LEN2_SHIFT 26
+#define DR7_RW2_SHIFT 24
+#define DR7_LEN1_SHIFT 22
+#define DR7_RW1_SHIFT 20
+#define DR7_LEN0_SHIFT 18
+#define DR7_RW0_SHIFT 16
+
+/* Values for LEN fields in DR7 */
+#define DR7_LEN_1 0
+#define DR7_LEN_2 1
+#define DR7_LEN_4 3
+
+/* Values for RW fields in DR7 */
+#define DR7_RW_INST 0 /* Break on instruction execution */
+#define DR7_RW_WRITE 1 /* Break on data writes */
+#define DR7_RW_IO 2 /* Break on I/O reads and writes (Pentium only) */
+#define DR7_RW_DATA 3 /* Break on data reads and writes */
+
+
+/* Bits in DR6 - debug status register */
+#define DR6_BT 0x00008000
+#define DR6_BS 0x00004000
+#define DR6_BD 0x00002000
+#define DR6_B3 0x00000008
+#define DR6_B2 0x00000004
+#define DR6_B1 0x00000002
+#define DR6_B0 0x00000001
+
+
+#include <mach/inline.h>
+
+/* Functions to set debug registers. */
+
+MACH_INLINE unsigned get_dr0()
+{
+ unsigned val;
+ asm volatile("movl %%dr0,%0" : "=r" (val));
+ return val;
+}
+
+MACH_INLINE unsigned get_dr1()
+{
+ unsigned val;
+ asm volatile("movl %%dr1,%0" : "=r" (val));
+ return val;
+}
+
+MACH_INLINE unsigned get_dr2()
+{
+ unsigned val;
+ asm volatile("movl %%dr2,%0" : "=r" (val));
+ return val;
+}
+
+MACH_INLINE unsigned get_dr3()
+{
+ unsigned val;
+ asm volatile("movl %%dr3,%0" : "=r" (val));
+ return val;
+}
+
+MACH_INLINE unsigned get_dr6()
+{
+ unsigned val;
+ asm volatile("movl %%dr6,%0" : "=r" (val));
+ return val;
+}
+
+MACH_INLINE unsigned get_dr7()
+{
+ unsigned val;
+ asm volatile("movl %%dr7,%0" : "=r" (val));
+ return val;
+}
+
+MACH_INLINE void set_dr0(unsigned val)
+{
+ asm volatile("movl %0,%%dr0" : : "r" (val));
+}
+
+
+/* Functions to read debug registers. */
+
+MACH_INLINE void set_dr1(unsigned val)
+{
+ asm volatile("movl %0,%%dr1" : : "r" (val));
+}
+
+MACH_INLINE void set_dr2(unsigned val)
+{
+ asm volatile("movl %0,%%dr2" : : "r" (val));
+}
+
+MACH_INLINE void set_dr3(unsigned val)
+{
+ asm volatile("movl %0,%%dr3" : : "r" (val));
+}
+
+MACH_INLINE void set_dr6(unsigned val)
+{
+ asm volatile("movl %0,%%dr6" : : "r" (val));
+}
+
+MACH_INLINE void set_dr7(unsigned val)
+{
+ asm volatile("movl %0,%%dr7" : : "r" (val));
+}
+
+
+/* Functions to set global breakpoints. */
+
+MACH_INLINE void set_b0(unsigned addr, unsigned len, unsigned rw)
+{
+ set_dr0(addr);
+ addr = ((get_dr7() & ~(DR7_LEN0 | DR7_RW0))
+ | (len << DR7_LEN0_SHIFT) | (rw << DR7_RW0_SHIFT)
+ | DR7_GE | DR7_G0);
+ set_dr7(addr);
+}
+
+MACH_INLINE void set_b1(unsigned addr, unsigned len, unsigned rw)
+{
+ set_dr1(addr);
+ set_dr7((get_dr7() & ~(DR7_LEN1 | DR7_RW1))
+ | (len << DR7_LEN1_SHIFT) | (rw << DR7_RW1_SHIFT)
+ | DR7_GE | DR7_G1);
+}
+
+MACH_INLINE void set_b2(unsigned addr, unsigned len, unsigned rw)
+{
+ set_dr2(addr);
+ set_dr7((get_dr7() & ~(DR7_LEN2 | DR7_RW2))
+ | (len << DR7_LEN2_SHIFT) | (rw << DR7_RW2_SHIFT)
+ | DR7_GE | DR7_G2);
+}
+
+MACH_INLINE void set_b3(unsigned addr, unsigned len, unsigned rw)
+{
+ set_dr3(addr);
+ set_dr7((get_dr7() & ~(DR7_LEN3 | DR7_RW3))
+ | (len << DR7_LEN3_SHIFT) | (rw << DR7_RW3_SHIFT)
+ | DR7_GE | DR7_G3);
+}
+
+
+
+#endif /* _MACH_I386_DEBUG_REG_H_ */
diff --git a/i386/include/mach/i386/disk.h b/i386/include/mach/i386/disk.h
new file mode 100644
index 00000000..40ed4fa8
--- /dev/null
+++ b/i386/include/mach/i386/disk.h
@@ -0,0 +1,120 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * disk.h
+ */
+
+#if defined(__linux__) || defined(__masix__)
+#define PART_DISK 4 /* partition number for entire disk */
+#else
+#define PART_DISK 2 /* partition number for entire disk */
+#endif
+
+
+/* driver ioctl() commands */
+
+#define V_CONFIG _IOW('v',1,union io_arg)/* Configure Drive */
+#define V_REMOUNT _IO('v',2) /* Remount Drive */
+#define V_ADDBAD _IOW('v',3,union io_arg)/* Add Bad Sector */
+#define V_GETPARMS _IOR('v',4,struct disk_parms) /* Get drive/partition parameters */
+#define V_FORMAT _IOW('v',5,union io_arg)/* Format track(s) */
+#define V_PDLOC _IOR('v',6,int) /* Ask driver where pdinfo is on disk */
+
+#define V_ABS _IOW('v',9,int) /* set a sector for an absolute addr */
+#define V_RDABS _IOW('v',10,struct absio)/* Read a sector at an absolute addr */
+#define V_WRABS _IOW('v',11,struct absio)/* Write a sector to absolute addr */
+#define V_VERIFY _IOWR('v',12,union vfy_io)/* Read verify sector(s) */
+#define V_XFORMAT _IO('v',13) /* Selectively mark sectors as bad */
+#define V_SETPARMS _IOW('v',14,int) /* Set drivers parameters */
+
+
+/*
+ * Data structure for the V_VERIFY ioctl
+ */
+union vfy_io {
+ struct {
+ long abs_sec; /* absolute sector number */
+ u_short num_sec; /* number of sectors to verify */
+ u_short time_flg; /* flag to indicate time the ops */
+ }vfy_in;
+ struct {
+ long deltatime; /* duration of operation */
+ u_short err_code; /* reason for failure */
+ }vfy_out;
+};
+
+
+/* data structure returned by the Get Parameters ioctl: */
+struct disk_parms {
+/*00*/ char dp_type; /* Disk type (see below) */
+ u_char dp_heads; /* Number of heads */
+ u_short dp_cyls; /* Number of cylinders */
+/*04*/ u_char dp_sectors; /* Number of sectors/track */
+ u_short dp_secsiz; /* Number of bytes/sector */
+ /* for this partition: */
+/*08*/ u_short dp_ptag; /* Partition tag */
+ u_short dp_pflag; /* Partition flag */
+/*0c*/ long dp_pstartsec; /* Starting absolute sector number */
+/*10*/ long dp_pnumsec; /* Number of sectors */
+/*14*/ u_char dp_dosheads; /* Number of heads */
+ u_short dp_doscyls; /* Number of cylinders */
+/*18*/ u_char dp_dossectors; /* Number of sectors/track */
+};
+
+/* Disk types for disk_parms.dp_type: */
+#define DPT_WINI 1 /* Winchester disk */
+#define DPT_FLOPPY 2 /* Floppy */
+#define DPT_OTHER 3 /* Other type of disk */
+#define DPT_NOTDISK 0 /* Not a disk device */
+
+/* Data structure for V_RDABS/V_WRABS ioctl's */
+struct absio {
+ long abs_sec; /* Absolute sector number (from 0) */
+ char *abs_buf; /* Sector buffer */
+};
+
diff --git a/i386/include/mach/i386/dpmi.h b/i386/include/mach/i386/dpmi.h
new file mode 100644
index 00000000..51a638c8
--- /dev/null
+++ b/i386/include/mach/i386/dpmi.h
@@ -0,0 +1,537 @@
+/*
+ * Copyright (c) 1996-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _FLUX_KERNEL_I386_DOS_I16_DPMI_H_
+#define _FLUX_KERNEL_I386_DOS_I16_DPMI_H_
+
+#include <mach/inline.h>
+#include <mach/machine/seg.h>
+
+typedef unsigned short dpmi_error_t;
+
+#define DPMI_UNSUPPORTED_FUNCTION 0x8001
+#define DPMI_OBJECT_WRONG_STATE 0x8002
+#define DPMI_SYSTEM_INTEGRITY 0x8003
+#define DPMI_DEADLOCK 0x8004
+#define DPMI_SERIALIZATION_CANCELLED 0x8005
+#define DPMI_OUT_OF_RESOURCES 0x8010
+#define DPMI_DESCRIPTOR_UNAVAILABLE 0x8011
+#define DPMI_LINEAR_MEMORY_UNAVAILABLE 0x8012
+#define DPMI_PHYSICAL_MEMORY_UNAVAILABLE 0x8013
+#define DPMI_BACKING_STORE_UNAVAILABLE 0x8014
+#define DPMI_CALLBACK_UNAVAILABLE 0x8015
+#define DPMI_HANDLE_UNAVAILABLE 0x8016
+#define DPMI_MAX_LOCK_COUNT_EXCEEDED 0x8017
+#define DPMI_ALREADY_SERIALIZED_EXCLUSIVELY 0x8018
+#define DPMI_ALREADY_SERIALIZED_SHARED 0x8019
+#define DPMI_INVALID_VALUE 0x8021
+#define DPMI_INVALID_SELECTOR 0x8022
+#define DPMI_INVALID_HANDLE 0x8023
+#define DPMI_INVALID_CALLBACK 0x8024
+#define DPMI_INVALID_LINEAR_ADDRESS 0x8025
+#define DPMI_NOT_SUPPORTED_BY_HARDWARE 0x8026
+
+struct real_call_data; /*XXX*/
+
+MACH_INLINE dpmi_error_t dpmi_switch_to_pmode(
+ struct far_pointer_16 *pmode_entry_vector,
+ unsigned short host_data_seg)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ movw %3,%%es
+ lcallw %2
+ jc 1f
+ xorw %%ax,%%ax
+ 1: pushw %%ds
+ popw %%es
+ " : "=a" (err)
+ : "a" (1),
+ "m" (*pmode_entry_vector),
+ "rm" (host_data_seg));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_allocate_descriptors(
+ unsigned short count,
+ unsigned short *out_selector)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ movw %%ax,%1
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err),
+ "=rm" (*out_selector)
+ : "a" (0x0000),
+ "c" (count));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_get_segment_base(
+ unsigned short selector,
+ unsigned long *out_base)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ xorw %%ax,%%ax
+ shll $16,%ecx
+ movw %dx,%cx
+ 1:
+ " : "=a" (err),
+ "=c" (*out_base)
+ : "a" (0x0006),
+ "b" (selector)
+ : "edx");
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_set_segment_base(
+ unsigned short selector,
+ unsigned long base)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err)
+ : "a" (0x0007),
+ "b" (selector),
+ "c" (base >> 16),
+ "d" (base));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_set_segment_limit(
+ unsigned short selector,
+ unsigned limit)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err)
+ : "a" (0x0008),
+ "b" (selector),
+ "c" (limit >> 16),
+ "d" (limit));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_create_code_segment_alias(
+ unsigned short code_selector,
+ unsigned short *out_data_selector)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ movw %%ax,%1
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err),
+ "=rm" (*out_data_selector)
+ : "a" (0x000a),
+ "b" (code_selector));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_get_descriptor(
+ unsigned short selector,
+ struct i386_descriptor *out_descriptor)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err)
+ : "a" (0x000b),
+ "b" (selector),
+ "D" (out_descriptor));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_set_descriptor(
+ unsigned short selector,
+ struct i386_descriptor *descriptor)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err)
+ : "a" (0x000c),
+ "b" (selector),
+ "D" (descriptor));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_allocate_specific_descriptor(
+ unsigned short selector)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err)
+ : "a" (0x000d),
+ "b" (selector));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_get_exception_handler(
+ unsigned char trapno,
+ struct far_pointer_32 *out_vector)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err),
+ "=c" (out_vector->seg),
+ "=d" (out_vector->ofs)
+ : "a" (0x0202),
+ "b" (trapno));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_set_exception_handler(
+ unsigned char trapno,
+ struct far_pointer_32 *vector)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err)
+ : "a" (0x0203),
+ "b" (trapno),
+ "c" (vector->seg),
+ "d" (vector->ofs));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_get_interrupt_handler(
+ unsigned char intvec,
+ struct far_pointer_32 *out_vector)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err),
+ "=c" (out_vector->seg),
+ "=d" (out_vector->ofs)
+ : "a" (0x0204),
+ "b" (intvec));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_set_interrupt_handler(
+ unsigned char intvec,
+ struct far_pointer_32 *vector)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err)
+ : "a" (0x0205),
+ "b" (intvec),
+ "c" (vector->seg),
+ "d" (vector->ofs));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_simulate_real_mode_interrupt(
+ unsigned char intnum,
+ struct real_call_data *call_data)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err)
+ : "a" (0x0300),
+ "b" ((unsigned short)intnum),
+ "c" (0),
+ "D" (call_data));
+
+ return err;
+}
+
+struct dpmi_version_status
+{
+ unsigned char minor_version;
+ unsigned char major_version;
+ unsigned short flags;
+ unsigned char slave_pic_base;
+ unsigned char master_pic_base;
+ unsigned char processor_type;
+};
+
+MACH_INLINE void dpmi_get_version(struct dpmi_version_status *status)
+{
+ asm volatile("
+ int $0x31
+ " : "=a" (*((short*)&status->minor_version)),
+ "=b" (status->flags),
+ "=c" (status->processor_type),
+ "=d" (*((short*)&status->slave_pic_base))
+ : "a" (0x0400));
+}
+
+MACH_INLINE dpmi_error_t dpmi_allocate_memory(
+ unsigned size,
+ unsigned *out_linear_addr,
+ unsigned *out_mem_handle)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ shll $16,%%ebx
+ movw %%cx,%%bx
+ shll $16,%%esi
+ movw %%di,%%si
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err),
+ "=b" (*out_linear_addr),
+ "=S" (*out_mem_handle)
+ : "a" (0x0501),
+ "b" (size >> 16),
+ "c" (size)
+ : "ebx", "ecx", "esi", "edi");
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_free_memory(
+ unsigned mem_handle)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err)
+ : "a" (0x0502),
+ "S" (mem_handle >> 16),
+ "D" (mem_handle));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_allocate_linear_memory(
+ unsigned linear_addr,
+ unsigned size,
+ unsigned flags,
+ unsigned *out_linear_addr,
+ unsigned *out_mem_handle)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err),
+ "=b" (*out_linear_addr),
+ "=S" (*out_mem_handle)
+ : "a" (0x0504),
+ "b" (linear_addr),
+ "c" (size),
+ "d" (flags));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_resize_linear_memory(
+ unsigned handle,
+ unsigned new_size,
+ unsigned flags,
+ unsigned short *update_selector_array,
+ unsigned update_selector_count,
+ unsigned *out_new_linear_addr)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err),
+ "=b" (*out_new_linear_addr)
+ : "a" (0x0505),
+ "b" (update_selector_array),
+ "c" (new_size),
+ "d" (flags),
+ "S" (handle),
+ "D" (update_selector_count));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_map_conventional_memory(
+ unsigned handle,
+ vm_offset_t offset,
+ vm_offset_t low_addr,
+ vm_size_t page_count)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err)
+ : "a" (0x0509),
+ "S" (handle),
+ "b" (offset),
+ "c" (page_count),
+ "d" (low_addr));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_lock_linear_region(
+ vm_offset_t start_la,
+ vm_size_t size)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err)
+ : "a" (0x0600),
+ "b" (start_la >> 16),
+ "c" (start_la),
+ "S" (size >> 16),
+ "D" (size));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_unlock_linear_region(
+ vm_offset_t start_la,
+ vm_size_t size)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err)
+ : "a" (0x0601),
+ "b" (start_la >> 16),
+ "c" (start_la),
+ "S" (size >> 16),
+ "D" (size));
+
+ return err;
+}
+
+MACH_INLINE dpmi_error_t dpmi_get_page_size(
+ unsigned *out_page_size)
+{
+ dpmi_error_t err;
+
+ asm volatile("
+ int $0x31
+ jc 1f
+ shll $16,%%ebx
+ movw %%cx,%%bx
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (err),
+ "=b" (*out_page_size)
+ : "a" (0x0604)
+ : "ecx");
+
+ return err;
+}
+
+
+#endif /* _FLUX_KERNEL_I386_DOS_I16_DPMI_H_ */
diff --git a/i386/include/mach/i386/eflags.h b/i386/include/mach/i386/eflags.h
new file mode 100644
index 00000000..d7d54b2d
--- /dev/null
+++ b/i386/include/mach/i386/eflags.h
@@ -0,0 +1,49 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_I386_EFLAGS_H_
+#define _MACH_I386_EFLAGS_H_
+
+/*
+ * i386 flags register
+ */
+#define EFL_CF 0x00000001 /* carry */
+#define EFL_PF 0x00000004 /* parity of low 8 bits */
+#define EFL_AF 0x00000010 /* carry out of bit 3 */
+#define EFL_ZF 0x00000040 /* zero */
+#define EFL_SF 0x00000080 /* sign */
+#define EFL_TF 0x00000100 /* trace trap */
+#define EFL_IF 0x00000200 /* interrupt enable */
+#define EFL_DF 0x00000400 /* direction */
+#define EFL_OF 0x00000800 /* overflow */
+#define EFL_IOPL 0x00003000 /* IO privilege level: */
+#define EFL_IOPL_KERNEL 0x00000000 /* kernel */
+#define EFL_IOPL_USER 0x00003000 /* user */
+#define EFL_NT 0x00004000 /* nested task */
+#define EFL_RF 0x00010000 /* resume without tracing */
+#define EFL_VM 0x00020000 /* virtual 8086 mode */
+
+#endif _MACH_I386_EFLAGS_H_
diff --git a/i386/include/mach/i386/exception.h b/i386/include/mach/i386/exception.h
new file mode 100644
index 00000000..1aaf6c75
--- /dev/null
+++ b/i386/include/mach/i386/exception.h
@@ -0,0 +1,85 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Codes and subcodes for 80386 exceptions.
+ */
+
+/*
+ * EXC_BAD_INSTRUCTION
+ */
+
+#ifndef _MACH_I386_EXCEPTION_H_
+#define _MACH_I386_EXCEPTION_H_
+
+#define EXC_I386_INVOP 1
+
+/*
+ * EXC_ARITHMETIC
+ */
+
+#define EXC_I386_DIV 1
+#define EXC_I386_INTO 2
+#define EXC_I386_NOEXT 3
+#define EXC_I386_EXTOVR 4
+#define EXC_I386_EXTERR 5
+#define EXC_I386_EMERR 6
+#define EXC_I386_BOUND 7
+
+/*
+ * EXC_SOFTWARE
+ */
+
+/*
+ * EXC_BAD_ACCESS
+ */
+
+/*
+ * EXC_BREAKPOINT
+ */
+
+#define EXC_I386_SGL 1
+#define EXC_I386_BPT 2
+
+#define EXC_I386_DIVERR 0 /* divide by 0 eprror */
+#define EXC_I386_SGLSTP 1 /* single step */
+#define EXC_I386_NMIFLT 2 /* NMI */
+#define EXC_I386_BPTFLT 3 /* breakpoint fault */
+#define EXC_I386_INTOFLT 4 /* INTO overflow fault */
+#define EXC_I386_BOUNDFLT 5 /* BOUND instruction fault */
+#define EXC_I386_INVOPFLT 6 /* invalid opcode fault */
+#define EXC_I386_NOEXTFLT 7 /* extension not available fault*/
+#define EXC_I386_DBLFLT 8 /* double fault */
+#define EXC_I386_EXTOVRFLT 9 /* extension overrun fault */
+#define EXC_I386_INVTSSFLT 10 /* invalid TSS fault */
+#define EXC_I386_SEGNPFLT 11 /* segment not present fault */
+#define EXC_I386_STKFLT 12 /* stack fault */
+#define EXC_I386_GPFLT 13 /* general protection fault */
+#define EXC_I386_PGFLT 14 /* page fault */
+#define EXC_I386_EXTERRFLT 16 /* extension error fault */
+#define EXC_I386_ENDPERR 33 /* emulated extension error flt */
+#define EXC_I386_ENOEXTFLT 32 /* emulated ext not present */
+
+#endif /* _MACH_I386_EXCEPTION_H_ */
diff --git a/i386/include/mach/i386/exec/elf.h b/i386/include/mach/i386/exec/elf.h
new file mode 100644
index 00000000..5155b3d5
--- /dev/null
+++ b/i386/include/mach/i386/exec/elf.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_I386_EXEC_ELF_H_
+#define _MACH_I386_EXEC_ELF_H_
+
+typedef unsigned long Elf32_Addr;
+typedef unsigned short Elf32_Half;
+typedef unsigned long Elf32_Off;
+typedef signed long Elf32_Sword;
+typedef unsigned long Elf32_Word;
+
+/* Architecture identification parameters for i386. */
+#define MY_EI_DATA ELFDATA2LSB
+#define MY_E_MACHINE EM_386
+
+#endif /* _MACH_I386_EXEC_ELF_H_ */
diff --git a/i386/include/mach/i386/far_ptr.h b/i386/include/mach/i386/far_ptr.h
new file mode 100644
index 00000000..e85962d9
--- /dev/null
+++ b/i386/include/mach/i386/far_ptr.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Center for Software Science (CSS). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSS requests users of this software to return to css-dist@cs.utah.edu any
+ * improvements that they make and grant CSS redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSS
+ */
+#ifndef _MACH_I386_FAR_PTR_H_
+#define _MACH_I386_FAR_PTR_H_
+
+struct far_pointer_16
+{
+ unsigned short ofs;
+ unsigned short seg;
+};
+
+struct far_pointer_32
+{
+ unsigned long ofs;
+ unsigned short seg;
+};
+
+#endif /* _MACH_I386_FAR_PTR_H_ */
diff --git a/i386/include/mach/i386/fp_reg.h b/i386/include/mach/i386/fp_reg.h
new file mode 100644
index 00000000..6fe7af56
--- /dev/null
+++ b/i386/include/mach/i386/fp_reg.h
@@ -0,0 +1,108 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_I386_FP_REG_H_
+#define _MACH_I386_FP_REG_H_
+/*
+ * Floating point registers and status, as saved
+ * and restored by FP save/restore instructions.
+ */
+struct i386_fp_save {
+ unsigned short fp_control; /* control */
+ unsigned short fp_unused_1;
+ unsigned short fp_status; /* status */
+ unsigned short fp_unused_2;
+ unsigned short fp_tag; /* register tags */
+ unsigned short fp_unused_3;
+ unsigned int fp_eip; /* eip at failed instruction */
+ unsigned short fp_cs; /* cs at failed instruction */
+ unsigned short fp_opcode; /* opcode of failed instruction */
+ unsigned int fp_dp; /* data address */
+ unsigned short fp_ds; /* data segment */
+ unsigned short fp_unused_4;
+};
+
+struct i386_fp_regs {
+ unsigned short fp_reg_word[5][8];
+ /* space for 8 80-bit FP registers */
+};
+
+/*
+ * Control register
+ */
+#define FPC_IE 0x0001 /* enable invalid operation
+ exception */
+#define FPC_IM FPC_IE
+#define FPC_DE 0x0002 /* enable denormalized operation
+ exception */
+#define FPC_DM FPC_DE
+#define FPC_ZE 0x0004 /* enable zero-divide exception */
+#define FPC_ZM FPC_ZE
+#define FPC_OE 0x0008 /* enable overflow exception */
+#define FPC_OM FPC_OE
+#define FPC_UE 0x0010 /* enable underflow exception */
+#define FPC_PE 0x0020 /* enable precision exception */
+#define FPC_PC 0x0300 /* precision control: */
+#define FPC_PC_24 0x0000 /* 24 bits */
+#define FPC_PC_53 0x0200 /* 53 bits */
+#define FPC_PC_64 0x0300 /* 64 bits */
+#define FPC_RC 0x0c00 /* rounding control: */
+#define FPC_RC_RN 0x0000 /* round to nearest or even */
+#define FPC_RC_RD 0x0400 /* round down */
+#define FPC_RC_RU 0x0800 /* round up */
+#define FPC_RC_CHOP 0x0c00 /* chop */
+#define FPC_IC 0x1000 /* infinity control (obsolete) */
+#define FPC_IC_PROJ 0x0000 /* projective infinity */
+#define FPC_IC_AFF 0x1000 /* affine infinity (std) */
+
+/*
+ * Status register
+ */
+#define FPS_IE 0x0001 /* invalid operation */
+#define FPS_DE 0x0002 /* denormalized operand */
+#define FPS_ZE 0x0004 /* divide by zero */
+#define FPS_OE 0x0008 /* overflow */
+#define FPS_UE 0x0010 /* underflow */
+#define FPS_PE 0x0020 /* precision */
+#define FPS_SF 0x0040 /* stack flag */
+#define FPS_ES 0x0080 /* error summary */
+#define FPS_C0 0x0100 /* condition code bit 0 */
+#define FPS_C1 0x0200 /* condition code bit 1 */
+#define FPS_C2 0x0400 /* condition code bit 2 */
+#define FPS_TOS 0x3800 /* top-of-stack pointer */
+#define FPS_TOS_SHIFT 11
+#define FPS_C3 0x4000 /* condition code bit 3 */
+#define FPS_BUSY 0x8000 /* FPU busy */
+
+/*
+ * Kind of floating-point support provided by kernel.
+ */
+#define FP_NO 0 /* no floating point */
+#define FP_SOFT 1 /* software FP emulator */
+#define FP_287 2 /* 80287 */
+#define FP_387 3 /* 80387 or 80486 */
+
+#endif /* _MACH_I386_FP_REG_H_ */
diff --git a/i386/include/mach/i386/ioccom.h b/i386/include/mach/i386/ioccom.h
new file mode 100644
index 00000000..17566a39
--- /dev/null
+++ b/i386/include/mach/i386/ioccom.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 1982, 1986 Regents of the University of California.
+ * All rights reserved. The Berkeley software License Agreement
+ * specifies the terms and conditions for redistribution.
+ */
+
+#ifndef __sys_ioccom_h
+#define __sys_ioccom_h
+
+/*
+ * Ioctl's have the command encoded in the lower word,
+ * and the size of any in or out parameters in the upper
+ * word. The high 2 bits of the upper word are used
+ * to encode the in/out status of the parameter; for now
+ * we restrict parameters to at most 255 bytes.
+ */
+#define _IOCPARM_MASK 0xff /* parameters must be < 256 bytes */
+#define _IOC_VOID 0x20000000 /* no parameters */
+#define _IOC_OUT 0x40000000 /* copy out parameters */
+#define _IOC_IN 0x80000000 /* copy in parameters */
+#define _IOC_INOUT (_IOC_IN|_IOC_OUT)
+/* the 0x20000000 is so we can distinguish new ioctl's from old */
+#define _IO(x,y) (_IOC_VOID|('x'<<8)|y)
+#define _IOR(x,y,t) (_IOC_OUT|((sizeof(t)&_IOCPARM_MASK)<<16)|('x'<<8)|y)
+#define _IORN(x,y,t) (_IOC_OUT|(((t)&_IOCPARM_MASK)<<16)|('x'<<8)|y)
+#define _IOW(x,y,t) (_IOC_IN|((sizeof(t)&_IOCPARM_MASK)<<16)|('x'<<8)|y)
+#define _IOWN(x,y,t) (_IOC_IN|(((t)&_IOCPARM_MASK)<<16)|('x'<<8)|y)
+/* this should be _IORW, but stdio got there first */
+#define _IOWR(x,y,t) (_IOC_INOUT|((sizeof(t)&_IOCPARM_MASK)<<16)|('x'<<8)|y)
+#define _IOWRN(x,y,t) (_IOC_INOUT|(((t)&_IOCPARM_MASK)<<16)|('x'<<8)|y)
+
+#endif /* !__sys_ioccom_h */
diff --git a/i386/include/mach/i386/kern_return.h b/i386/include/mach/i386/kern_return.h
new file mode 100644
index 00000000..c51915de
--- /dev/null
+++ b/i386/include/mach/i386/kern_return.h
@@ -0,0 +1,40 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern_return.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Machine-dependent kernel return definitions.
+ */
+
+#ifndef _MACH_I386_KERN_RETURN_H_
+#define _MACH_I386_KERN_RETURN_H_
+
+#ifndef ASSEMBLER
+typedef int kern_return_t;
+#endif /* ASSEMBLER */
+#endif /* _MACH_I386_KERN_RETURN_H_ */
diff --git a/i386/include/mach/i386/mach_i386.defs b/i386/include/mach/i386/mach_i386.defs
new file mode 100644
index 00000000..5c30b67a
--- /dev/null
+++ b/i386/include/mach/i386/mach_i386.defs
@@ -0,0 +1,68 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Special functions for i386.
+ */
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif KERNEL_SERVER
+ mach_i386 3800;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+#include <device/device_types.defs>
+
+type device_list_t = ^array[] of device_t;
+
+type descriptor_t = struct[2] of int;
+type descriptor_list_t = array[*] of descriptor_t;
+
+import <mach/machine/mach_i386_types.h>;
+
+routine i386_io_port_add(
+ target_thread : thread_t;
+ device : device_t);
+
+routine i386_io_port_remove(
+ target_thread : thread_t;
+ device : device_t);
+
+routine i386_io_port_list(
+ target_thread : thread_t;
+ out device_list : device_list_t);
+
+routine i386_set_ldt(
+ target_thread : thread_t;
+ first_selector : int;
+ desc_list : descriptor_list_t, serverCopy);
+
+routine i386_get_ldt(
+ target_thread : thread_t;
+ first_selector : int;
+ selector_count : int;
+ out desc_list : descriptor_list_t);
diff --git a/i386/include/mach/i386/mach_i386_types.h b/i386/include/mach/i386/mach_i386_types.h
new file mode 100644
index 00000000..43780a3f
--- /dev/null
+++ b/i386/include/mach/i386/mach_i386_types.h
@@ -0,0 +1,49 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Type definitions for i386 interface routines.
+ */
+
+#ifndef _MACH_MACH_I386_TYPES_H_
+#define _MACH_MACH_I386_TYPES_H_
+
+/*
+ * Array of devices.
+ */
+typedef device_t *device_list_t;
+
+/*
+ * i386 segment descriptor.
+ */
+struct descriptor {
+ unsigned int low_word;
+ unsigned int high_word;
+};
+
+typedef struct descriptor descriptor_t;
+typedef struct descriptor *descriptor_list_t;
+
+#endif /* _MACH_MACH_I386_TYPES_H_ */
diff --git a/i386/include/mach/i386/machine_types.defs b/i386/include/mach/i386/machine_types.defs
new file mode 100755
index 00000000..991b7400
--- /dev/null
+++ b/i386/include/mach/i386/machine_types.defs
@@ -0,0 +1,71 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/machine/machine_types.defs
+ * Author: Alessandro Forin
+ * Date: 7/92
+ *
+ * Header file for the basic, machine-dependent data types.
+ * Version for 32 bit architectures.
+ *
+ */
+
+#ifndef _MACHINE_MACHINE_TYPES_DEFS_
+#define _MACHINE_MACHINE_TYPES_DEFS_ 1
+
+/*
+ * A natural_t is the type for the native
+ * integer type, e.g. 32 or 64 or.. whatever
+ * register size the machine has. Unsigned, it is
+ * used for entities that might be either
+ * unsigned integers or pointers, and for
+ * type-casting between the two.
+ * For instance, the IPC system represents
+ * a port in user space as an integer and
+ * in kernel space as a pointer.
+ */
+type natural_t = unsigned32;
+
+/*
+ * An integer_t is the signed counterpart
+ * of the natural_t type. Both types are
+ * only supposed to be used to define
+ * other types in a machine-independent
+ * way.
+ */
+type integer_t = int32;
+
+
+#if MACH_IPC_COMPAT
+/*
+ * For the old IPC interface
+ */
+#define MSG_TYPE_PORT_NAME MACH_MSG_TYPE_INTEGER_32
+
+#endif /* MACH_IPC_COMPAT */
+
+
+#endif /* _MACHINE_MACHINE_TYPES_DEFS_ */
diff --git a/i386/include/mach/i386/multiboot.h b/i386/include/mach/i386/multiboot.h
new file mode 100644
index 00000000..e0db8237
--- /dev/null
+++ b/i386/include/mach/i386/multiboot.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_I386_MULTIBOOT_H_
+#define _MACH_I386_MULTIBOOT_H_
+
+#include <mach/machine/vm_types.h>
+
+/* For a.out kernel boot images, the following header must appear
+ somewhere in the first 8192 bytes of the kernel image file. */
+struct multiboot_header
+{
+ /* Must be MULTIBOOT_MAGIC */
+ unsigned magic;
+
+ /* Feature flags - see below. */
+ unsigned flags;
+
+ /*
+ * Checksum
+ *
+ * The above fields plus this one must equal 0 mod 2^32.
+ */
+ unsigned checksum;
+
+ /* These are only valid if MULTIBOOT_AOUT_KLUDGE is set. */
+ vm_offset_t header_addr;
+ vm_offset_t load_addr;
+ vm_offset_t load_end_addr;
+ vm_offset_t bss_end_addr;
+ vm_offset_t entry;
+};
+
+/* The entire multiboot_header must be contained
+ within the first MULTIBOOT_SEARCH bytes of the kernel image. */
+#define MULTIBOOT_SEARCH 8192
+
+/* Magic value identifying the multiboot_header. */
+#define MULTIBOOT_MAGIC 0x1badb002
+
+/* Features flags for 'flags'.
+ If a boot loader sees a flag in MULTIBOOT_MUSTKNOW set
+ and it doesn't understand it, it must fail. */
+#define MULTIBOOT_MUSTKNOW 0x0000ffff
+
+/* Align all boot modules on page (4KB) boundaries. */
+#define MULTIBOOT_PAGE_ALIGN 0x00000001
+
+/* Must be provided memory information in multiboot_info structure */
+#define MULTIBOOT_MEMORY_INFO 0x00000002
+
+/* Use the load address fields above instead of the ones in the a.out header
+ to figure out what to load where, and what to do afterwards.
+ This should only be needed for a.out kernel images
+ (ELF and other formats can generally provide the needed information). */
+#define MULTIBOOT_AOUT_KLUDGE 0x00010000
+
+/* The boot loader passes this value in register EAX to signal the kernel
+ that the multiboot method is being used */
+#define MULTIBOOT_VALID 0x2badb002
+
+/* The boot loader passes this data structure to the kernel in
+ register EBX on entry. */
+struct multiboot_info
+{
+ /* These flags indicate which parts of the multiboot_info are valid;
+ see below for the actual flag bit definitions. */
+ unsigned flags;
+
+ /* Lower/Upper memory installed in the machine.
+ Valid only if MULTIBOOT_MEMORY is set in flags word above. */
+ vm_size_t mem_lower;
+ vm_size_t mem_upper;
+
+ /* BIOS disk device the kernel was loaded from.
+ Valid only if MULTIBOOT_BOOT_DEVICE is set in flags word above. */
+ unsigned char boot_device[4];
+
+ /* Command-line for the OS kernel: a null-terminated ASCII string.
+ Valid only if MULTIBOOT_CMDLINE is set in flags word above. */
+ vm_offset_t cmdline;
+
+ /* List of boot modules loaded with the kernel.
+ Valid only if MULTIBOOT_MODS is set in flags word above. */
+ unsigned mods_count;
+ vm_offset_t mods_addr;
+
+ /* Symbol information for a.out or ELF executables. */
+ union
+ {
+ struct
+ {
+ /* a.out symbol information valid only if MULTIBOOT_AOUT_SYMS
+ is set in flags word above. */
+ vm_size_t tabsize;
+ vm_size_t strsize;
+ vm_offset_t addr;
+ unsigned reserved;
+ } a;
+
+ struct
+ {
+ /* ELF section header information valid only if
+ MULTIBOOT_ELF_SHDR is set in flags word above. */
+ unsigned num;
+ vm_size_t size;
+ vm_offset_t addr;
+ unsigned shndx;
+ } e;
+ } syms;
+
+ /* Memory map buffer.
+ Valid only if MULTIBOOT_MEM_MAP is set in flags word above. */
+ vm_size_t mmap_count;
+ vm_offset_t mmap_addr;
+};
+
+#define MULTIBOOT_MEMORY 0x00000001
+#define MULTIBOOT_BOOT_DEVICE 0x00000002
+#define MULTIBOOT_CMDLINE 0x00000004
+#define MULTIBOOT_MODS 0x00000008
+#define MULTIBOOT_AOUT_SYMS 0x00000010
+#define MULTIBOOT_ELF_SHDR 0x00000020
+#define MULTIBOOT_MEM_MAP 0x00000040
+
+
+/* The mods_addr field above contains the physical address of the first
+ of 'mods_count' multiboot_module structures. */
+struct multiboot_module
+{
+ /* Physical start and end addresses of the module data itself. */
+ vm_offset_t mod_start;
+ vm_offset_t mod_end;
+
+ /* Arbitrary ASCII string associated with the module. */
+ vm_offset_t string;
+
+ /* Boot loader must set to 0; OS must ignore. */
+ unsigned reserved;
+};
+
+
+/* The mmap_addr field above contains the physical address of the first
+ of the AddrRangeDesc structure. "size" represents the size of the
+ rest of the structure and optional padding. The offset to the beginning
+ of the next structure is therefore "size + 4". */
+struct AddrRangeDesc
+{
+ unsigned long size;
+ unsigned long BaseAddrLow;
+ unsigned long BaseAddrHigh;
+ unsigned long LengthLow;
+ unsigned long LengthHigh;
+ unsigned long Type;
+
+ /* unspecified optional padding... */
+};
+
+/* usable memory "Type", all others are reserved. */
+#define MB_ARD_MEMORY 1
+
+
+#endif _MACH_I386_MULTIBOOT_H_
diff --git a/i386/include/mach/i386/paging.h b/i386/include/mach/i386/paging.h
new file mode 100644
index 00000000..e24136c7
--- /dev/null
+++ b/i386/include/mach/i386/paging.h
@@ -0,0 +1,134 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Definitions relating to i386 page directories and page tables.
+ */
+#ifndef _MACH_I386_PAGING_H_
+#define _MACH_I386_PAGING_H_
+
+
+#define INTEL_OFFMASK 0xfff /* offset within page */
+#define PDESHIFT 22 /* page descriptor shift */
+#define PDEMASK 0x3ff /* mask for page descriptor index */
+#define PTESHIFT 12 /* page table shift */
+#define PTEMASK 0x3ff /* mask for page table index */
+
+/*
+ * Convert linear offset to page descriptor/page table index
+ */
+#define lin2pdenum(a) (((a) >> PDESHIFT) & PDEMASK)
+#define lin2ptenum(a) (((a) >> PTESHIFT) & PTEMASK)
+
+/*
+ * Convert page descriptor/page table index to linear address
+ */
+#define pdenum2lin(a) ((vm_offset_t)(a) << PDESHIFT)
+#define ptenum2lin(a) ((vm_offset_t)(a) << PTESHIFT)
+
+/*
+ * Number of ptes/pdes in a page table/directory.
+ */
+#define NPTES (i386_ptob(1)/sizeof(pt_entry_t))
+#define NPDES (i386_ptob(1)/sizeof(pt_entry_t))
+
+/*
+ * Hardware pte bit definitions (to be used directly on the ptes
+ * without using the bit fields).
+ */
+#define INTEL_PTE_VALID 0x00000001
+#define INTEL_PTE_WRITE 0x00000002
+#define INTEL_PTE_USER 0x00000004
+#define INTEL_PTE_WTHRU 0x00000008
+#define INTEL_PTE_NCACHE 0x00000010
+#define INTEL_PTE_REF 0x00000020
+#define INTEL_PTE_MOD 0x00000040
+#define INTEL_PTE_AVAIL 0x00000e00
+#define INTEL_PTE_PFN 0xfffff000
+
+/*
+ * Macros to translate between page table entry values
+ * and physical addresses.
+ */
+#define pa_to_pte(a) ((a) & INTEL_PTE_PFN)
+#define pte_to_pa(p) ((p) & INTEL_PTE_PFN)
+#define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
+
+
+#ifndef ASSEMBLER
+
+#include <mach/inline.h>
+#include <mach/machine/vm_param.h>
+#include <mach/machine/proc_reg.h>
+
+/*
+ * i386/i486/i860 Page Table Entry
+ */
+typedef unsigned int pt_entry_t;
+#define PT_ENTRY_NULL ((pt_entry_t *) 0)
+
+
+/*
+ * Load page directory 'pdir' and turn paging on.
+ * Assumes that 'pdir' equivalently maps the physical memory
+ * that contains the currently executing code,
+ * the currently loaded GDT and IDT, etc.
+ */
+MACH_INLINE void i386_paging_enable(vm_offset_t pdir)
+{
+ /* Load the page directory. */
+ set_cr3(pdir);
+
+ /* Turn on paging. */
+ asm volatile("
+ movl %0,%%cr0
+ jmp 1f
+ 1:
+ " : : "r" (get_cr0() | CR0_PG));
+}
+
+/*
+ * Turn paging off.
+ * Assumes that the currently loaded page directory
+ * equivalently maps the physical memory
+ * that contains the currently executing code,
+ * the currently loaded GDT and IDT, etc.
+ */
+MACH_INLINE void i386_paging_disable(void)
+{
+ /* Turn paging off. */
+ asm volatile("
+ movl %0,%%cr0
+ jmp 1f
+ 1:
+ " : : "r" (get_cr0() & ~CR0_PG));
+
+ /* Flush the TLB. */
+ set_cr3(0);
+}
+
+#endif /* !ASSEMBLER */
+
+#endif _MACH_I386_PAGING_H_
diff --git a/i386/include/mach/i386/pio.h b/i386/include/mach/i386/pio.h
new file mode 100644
index 00000000..06311dbf
--- /dev/null
+++ b/i386/include/mach/i386/pio.h
@@ -0,0 +1,70 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_I386_PIO_H_
+#define _MACH_I386_PIO_H_
+
+#ifdef __GNUC__
+
+#define inl(port) \
+({ unsigned long _tmp__; \
+ asm volatile("inl %1, %0" : "=a" (_tmp__) : "d" ((unsigned short)(port))); \
+ _tmp__; })
+
+#define inw(port) \
+({ unsigned short _tmp__; \
+ asm volatile(".byte 0x66; inl %1, %0" : "=a" (_tmp__) : "d" ((unsigned short)(port))); \
+ _tmp__; })
+
+#define inb(port) \
+({ unsigned char _tmp__; \
+ asm volatile("inb %1, %0" : "=a" (_tmp__) : "d" ((unsigned short)(port))); \
+ _tmp__; })
+
+
+#define outl(port, val) \
+({ asm volatile("outl %0, %1" : : "a" (val) , "d" ((unsigned short)(port))); })
+
+
+#define outw(port, val) \
+({asm volatile(".byte 0x66; outl %0, %1" : : "a" ((unsigned short)(val)) , "d" ((unsigned short)(port))); })
+
+
+#define outb(port, val) \
+({ asm volatile("outb %0, %1" : : "a" ((unsigned char)(val)) , "d" ((unsigned short)(port))); })
+
+
+/* Inline code works just as well for 16-bit code as for 32-bit. */
+#define i16_inl(port) inl(port)
+#define i16_inw(port) inw(port)
+#define i16_inb(port) inb(port)
+#define i16_outl(port, val) outl(port, val)
+#define i16_outw(port, val) outw(port, val)
+#define i16_outb(port, val) outb(port, val)
+
+#endif __GNUC__
+
+#endif /* _MACH_I386_PIO_H_ */
diff --git a/i386/include/mach/i386/pmode.h b/i386/include/mach/i386/pmode.h
new file mode 100644
index 00000000..99b7d26b
--- /dev/null
+++ b/i386/include/mach/i386/pmode.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Center for Software Science (CSS). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSS requests users of this software to return to css-dist@cs.utah.edu any
+ * improvements that they make and grant CSS redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSS
+ */
+#ifndef _MACH_I386_PMODE_H_
+#define _MACH_I386_PMODE_H_
+
+#include <mach/inline.h>
+#include <mach/macro_help.h>
+#include <mach/machine/proc_reg.h>
+
+
+
+/* Enter protected mode on i386 machines.
+ Assumes:
+ * Running in real mode.
+ * Interrupts are turned off.
+ * A20 is enabled (if on a PC).
+ * A suitable GDT is already loaded.
+
+ You must supply a 16-bit code segment
+ equivalent to the real-mode code segment currently in use.
+
+ You must reload all segment registers except CS
+ immediately after invoking this macro.
+*/
+#define i16_enter_pmode(prot_cs) \
+MACRO_BEGIN \
+ /* Switch to protected mode. */ \
+ asm volatile("
+ movl %0,%%cr0
+ ljmp %1,$1f
+ 1:
+ " : : "r" (i16_get_cr0() | CR0_PE), "i" (KERNEL_16_CS)); \
+MACRO_END
+
+
+
+/* Leave protected mode and return to real mode.
+ Assumes:
+ * Running in protected mode
+ * Interrupts are turned off.
+ * Paging is turned off.
+ * All currently loaded segment registers
+ contain 16-bit segments with limits of 0xffff.
+
+ You must supply a real-mode code segment
+ equivalent to the protected-mode code segment currently in use.
+
+ You must reload all segment registers except CS
+ immediately after invoking this function.
+*/
+MACH_INLINE i16_leave_pmode(int real_cs)
+{
+ /* Switch back to real mode.
+ Note: switching to the real-mode code segment
+ _must_ be done with an _immediate_ far jump,
+ not an indirect far jump. At least on my Am386DX/40,
+ an indirect far jump leaves the code segment read-only. */
+ {
+ extern unsigned short real_jmp[];
+
+ real_jmp[3] = real_cs;
+ asm volatile("
+ movl %0,%%cr0
+ jmp 1f
+ 1:
+ real_jmp:
+ _real_jmp:
+ ljmp $0,$1f
+ 1:
+ " : : "r" (i16_get_cr0() & ~CR0_PE));
+ }
+}
+
+
+
+#endif _MACH_I386_PMODE_H_
diff --git a/i386/include/mach/i386/proc_reg.h b/i386/include/mach/i386/proc_reg.h
new file mode 100644
index 00000000..20196edd
--- /dev/null
+++ b/i386/include/mach/i386/proc_reg.h
@@ -0,0 +1,340 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Processor registers for i386 and i486.
+ */
+#ifndef _MACH_I386_PROC_REG_H_
+#define _MACH_I386_PROC_REG_H_
+
+/*
+ * CR0
+ */
+#define CR0_PG 0x80000000 /* enable paging */
+#define CR0_CD 0x40000000 /* i486: cache disable */
+#define CR0_NW 0x20000000 /* i486: no write-through */
+#define CR0_AM 0x00040000 /* i486: alignment check mask */
+#define CR0_WP 0x00010000 /* i486: write-protect kernel access */
+#define CR0_NE 0x00000020 /* i486: handle numeric exceptions */
+#define CR0_ET 0x00000010 /* extension type is 80387 */
+ /* (not official) */
+#define CR0_TS 0x00000008 /* task switch */
+#define CR0_EM 0x00000004 /* emulate coprocessor */
+#define CR0_MP 0x00000002 /* monitor coprocessor */
+#define CR0_PE 0x00000001 /* enable protected mode */
+
+#ifndef ASSEMBLER
+#ifdef __GNUC__
+
+#include <mach/inline.h>
+
+
+/* Some processors, notably my Am386DX/40,
+ seem to have some rather subtle pipeline- or timing-related bugs
+ which case really weird things to happen with pushf's and popf's
+ that come too close together... */
+
+MACH_INLINE unsigned get_eflags()
+{
+ unsigned eflags;
+ asm volatile("
+ jmp 1f
+ 1: jmp 1f
+ 1: jmp 1f
+ 1: pushf
+ jmp 1f
+ 1: jmp 1f
+ 1: jmp 1f
+ 1: popl %0" : "=r" (eflags));
+ return eflags;
+}
+
+MACH_INLINE void set_eflags(unsigned eflags)
+{
+ asm volatile("
+ pushl %0
+ jmp 1f
+ 1: jmp 1f
+ 1: jmp 1f
+ 1: popf
+ jmp 1f
+ 1: jmp 1f
+ 1: jmp 1f
+ 1: " : : "r" (eflags));
+}
+
+MACH_INLINE void cli() { asm volatile("cli"); }
+MACH_INLINE void sti() { asm volatile("sti"); }
+MACH_INLINE void cld() { asm volatile("cld"); }
+MACH_INLINE void std() { asm volatile("std"); }
+MACH_INLINE void clts() { asm volatile("clts"); }
+
+MACH_INLINE unsigned short get_cs()
+{
+ unsigned short cs;
+ asm volatile("movw %%cs,%w0" : "=r" (cs));
+ return cs;
+}
+
+MACH_INLINE unsigned short get_ds()
+{
+ unsigned short ds;
+ asm volatile("movw %%ds,%w0" : "=r" (ds));
+ return ds;
+}
+MACH_INLINE void set_ds(unsigned short ds)
+{
+ asm volatile("movw %w0,%%ds" : : "r" (ds));
+}
+
+MACH_INLINE unsigned short get_es()
+{
+ unsigned short es;
+ asm volatile("movw %%es,%w0" : "=r" (es));
+ return es;
+}
+MACH_INLINE void set_es(unsigned short es)
+{
+ asm volatile("movw %w0,%%es" : : "r" (es));
+}
+
+MACH_INLINE unsigned short get_fs()
+{
+ unsigned short fs;
+ asm volatile("movw %%fs,%w0" : "=r" (fs));
+ return fs;
+}
+MACH_INLINE void set_fs(unsigned short fs)
+{
+ asm volatile("movw %w0,%%fs" : : "r" (fs));
+}
+
+MACH_INLINE unsigned short get_gs()
+{
+ unsigned short gs;
+ asm volatile("movw %%gs,%w0" : "=r" (gs));
+ return gs;
+}
+MACH_INLINE void set_gs(unsigned short gs)
+{
+ asm volatile("movw %w0,%%gs" : : "r" (gs));
+}
+
+MACH_INLINE unsigned short get_ss()
+{
+ unsigned short ss;
+ asm volatile("movw %%ss,%w0" : "=r" (ss));
+ return ss;
+}
+MACH_INLINE void set_ss(unsigned short ss)
+{
+ asm volatile("movw %w0,%%ss" : : "r" (ss));
+}
+
+#define get_eax() \
+ ({ \
+ register unsigned int _temp__; \
+ asm("movl %%eax, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define get_ebx() \
+ ({ \
+ register unsigned int _temp__; \
+ asm("movl %%ebx, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define get_ecx() \
+ ({ \
+ register unsigned int _temp__; \
+ asm("movl %%ecx, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define get_edx() \
+ ({ \
+ register unsigned int _temp__; \
+ asm("movl %%edx, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define get_esi() \
+ ({ \
+ register unsigned int _temp__; \
+ asm("movl %%esi, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define get_edi() \
+ ({ \
+ register unsigned int _temp__; \
+ asm("movl %%edi, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define get_ebp() \
+ ({ \
+ register unsigned int _temp__; \
+ asm("movl %%ebp, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define get_esp() \
+ ({ \
+ register unsigned int _temp__; \
+ asm("movl %%esp, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define get_eflags() \
+ ({ \
+ register unsigned int _temp__; \
+ asm volatile("pushf; popl %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define get_cr0() \
+ ({ \
+ register unsigned int _temp__; \
+ asm volatile("mov %%cr0, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define set_cr0(value) \
+ ({ \
+ register unsigned int _temp__ = (value); \
+ asm volatile("mov %0, %%cr0" : : "r" (_temp__)); \
+ })
+
+MACH_INLINE unsigned short get_msw()
+{
+ unsigned short msw;
+ asm volatile("smsw %0" : "=r" (msw));
+ return msw;
+}
+
+#define get_cr2() \
+ ({ \
+ register unsigned int _temp__; \
+ asm("mov %%cr2, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define get_cr3() \
+ ({ \
+ register unsigned int _temp__; \
+ asm("mov %%cr3, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define set_cr3(value) \
+ ({ \
+ register unsigned int _temp__ = (value); \
+ asm volatile("mov %0, %%cr3" : : "r" (_temp__)); \
+ })
+
+#define set_ts() \
+ set_cr0(get_cr0() | CR0_TS)
+
+#define clear_ts() \
+ asm volatile("clts")
+
+#define get_tr() \
+ ({ \
+ unsigned short _seg__; \
+ asm volatile("str %0" : "=rm" (_seg__) ); \
+ _seg__; \
+ })
+
+#define set_tr(seg) \
+ asm volatile("ltr %0" : : "rm" ((unsigned short)(seg)) )
+
+#define set_gdt(pseudo_desc) \
+ ({ \
+ asm volatile("lgdt %0" : : "m" ((pseudo_desc)->limit)); \
+ })
+
+#define set_idt(pseudo_desc) \
+ ({ \
+ asm volatile("lidt %0" : : "m" ((pseudo_desc)->limit)); \
+ })
+
+#define get_ldt() \
+ ({ \
+ unsigned short _seg__; \
+ asm volatile("sldt %0" : "=rm" (_seg__) ); \
+ _seg__; \
+ })
+
+#define set_ldt(seg) \
+ asm volatile("lldt %0" : : "rm" ((unsigned short)(seg)) )
+
+/* This doesn't set a processor register,
+ but it's often used immediately after setting one,
+ to flush the instruction queue. */
+#define flush_instr_queue() \
+ asm volatile("
+ jmp 0f
+ 0:
+ ")
+
+/* Inline functions work fine for 16-bit code as well. */
+#ifdef CODE16
+#define i16_get_eflags get_eflags
+#define i16_set_eflags set_eflags
+#define i16_cli cli
+#define i16_sti sti
+#define i16_cld cld
+#define i16_std std
+#define i16_clts clts
+#define i16_get_cs get_cs
+#define i16_set_cs set_cs
+#define i16_get_ds get_ds
+#define i16_set_ds set_ds
+#define i16_get_es get_es
+#define i16_set_es set_es
+#define i16_get_fs get_fs
+#define i16_set_fs set_fs
+#define i16_get_gs get_gs
+#define i16_set_gs set_gs
+#define i16_get_ss get_ss
+#define i16_set_ss set_ss
+#define i16_get_cr0 get_cr0
+#define i16_set_cr0 set_cr0
+#define i16_get_cr3 get_cr3
+#define i16_set_cr3 set_cr3
+#define i16_get_msw get_msw
+#define i16_set_gdt set_gdt
+#define i16_set_idt set_idt
+#define i16_set_ldt set_ldt
+#define i16_set_tr set_tr
+#define i16_flush_instr_queue flush_instr_queue
+#endif
+
+#endif /* __GNUC__ */
+#endif /* ASSEMBLER */
+
+#endif /* _MACH_I386_PROC_REG_H_ */
diff --git a/i386/include/mach/i386/rpc.h b/i386/include/mach/i386/rpc.h
new file mode 100644
index 00000000..71d31fb9
--- /dev/null
+++ b/i386/include/mach/i386/rpc.h
@@ -0,0 +1,9 @@
+
+struct rpc_csig_action
+{
+};
+
+struct rpc_csig_entry
+{
+};
+
diff --git a/i386/include/mach/i386/seg.h b/i386/include/mach/i386/seg.h
new file mode 100644
index 00000000..31ada9ec
--- /dev/null
+++ b/i386/include/mach/i386/seg.h
@@ -0,0 +1,190 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_I386_SEG_H_
+#define _MACH_I386_SEG_H_
+
+
+/*
+ * i386 segmentation.
+ */
+
+#ifndef ASSEMBLER
+
+/*
+ * Real segment descriptor.
+ */
+struct i386_descriptor {
+ unsigned int limit_low:16, /* limit 0..15 */
+ base_low:16, /* base 0..15 */
+ base_med:8, /* base 16..23 */
+ access:8, /* access byte */
+ limit_high:4, /* limit 16..19 */
+ granularity:4, /* granularity */
+ base_high:8; /* base 24..31 */
+};
+
+struct i386_gate {
+ unsigned int offset_low:16, /* offset 0..15 */
+ selector:16,
+ word_count:8,
+ access:8,
+ offset_high:16; /* offset 16..31 */
+};
+
+#endif !ASSEMBLER
+
+#define SZ_32 0x4 /* 32-bit segment */
+#define SZ_16 0x0 /* 16-bit segment */
+#define SZ_G 0x8 /* 4K limit field */
+
+#define ACC_A 0x01 /* accessed */
+#define ACC_TYPE 0x1e /* type field: */
+
+#define ACC_TYPE_SYSTEM 0x00 /* system descriptors: */
+
+#define ACC_LDT 0x02 /* LDT */
+#define ACC_CALL_GATE_16 0x04 /* 16-bit call gate */
+#define ACC_TASK_GATE 0x05 /* task gate */
+#define ACC_TSS 0x09 /* task segment */
+#define ACC_CALL_GATE 0x0c /* call gate */
+#define ACC_INTR_GATE 0x0e /* interrupt gate */
+#define ACC_TRAP_GATE 0x0f /* trap gate */
+
+#define ACC_TSS_BUSY 0x02 /* task busy */
+
+#define ACC_TYPE_USER 0x10 /* user descriptors */
+
+#define ACC_DATA 0x10 /* data */
+#define ACC_DATA_W 0x12 /* data, writable */
+#define ACC_DATA_E 0x14 /* data, expand-down */
+#define ACC_DATA_EW 0x16 /* data, expand-down,
+ writable */
+#define ACC_CODE 0x18 /* code */
+#define ACC_CODE_R 0x1a /* code, readable */
+#define ACC_CODE_C 0x1c /* code, conforming */
+#define ACC_CODE_CR 0x1e /* code, conforming,
+ readable */
+#define ACC_PL 0x60 /* access rights: */
+#define ACC_PL_K 0x00 /* kernel access only */
+#define ACC_PL_U 0x60 /* user access */
+#define ACC_P 0x80 /* segment present */
+
+/*
+ * Components of a selector
+ */
+#define SEL_LDT 0x04 /* local selector */
+#define SEL_PL 0x03 /* privilege level: */
+#define SEL_PL_K 0x00 /* kernel selector */
+#define SEL_PL_U 0x03 /* user selector */
+
+/*
+ * Convert selector to descriptor table index.
+ */
+#define sel_idx(sel) ((sel)>>3)
+
+
+#ifndef ASSEMBLER
+
+#include <mach/inline.h>
+
+
+/* Format of a "pseudo-descriptor", used for loading the IDT and GDT. */
+struct pseudo_descriptor
+{
+ short pad;
+ unsigned short limit;
+ unsigned long linear_base;
+};
+
+
+/* Fill a segment descriptor. */
+MACH_INLINE void
+fill_descriptor(struct i386_descriptor *desc, unsigned base, unsigned limit,
+ unsigned char access, unsigned char sizebits)
+{
+ if (limit > 0xfffff)
+ {
+ limit >>= 12;
+ sizebits |= SZ_G;
+ }
+ desc->limit_low = limit & 0xffff;
+ desc->base_low = base & 0xffff;
+ desc->base_med = (base >> 16) & 0xff;
+ desc->access = access | ACC_P;
+ desc->limit_high = limit >> 16;
+ desc->granularity = sizebits;
+ desc->base_high = base >> 24;
+}
+
+/* Set the base address in a segment descriptor. */
+MACH_INLINE void
+fill_descriptor_base(struct i386_descriptor *desc, unsigned base)
+{
+ desc->base_low = base & 0xffff;
+ desc->base_med = (base >> 16) & 0xff;
+ desc->base_high = base >> 24;
+}
+
+/* Set the limit in a segment descriptor. */
+MACH_INLINE void
+fill_descriptor_limit(struct i386_descriptor *desc, unsigned limit)
+{
+ if (limit > 0xfffff)
+ {
+ limit >>= 12;
+ desc->granularity |= SZ_G;
+ }
+ else
+ desc->granularity &= ~SZ_G;
+ desc->limit_low = limit & 0xffff;
+ desc->limit_high = limit >> 16;
+}
+
+/* Fill a gate with particular values. */
+MACH_INLINE void
+fill_gate(struct i386_gate *gate, unsigned offset, unsigned short selector,
+ unsigned char access, unsigned char word_count)
+{
+ gate->offset_low = offset & 0xffff;
+ gate->selector = selector;
+ gate->word_count = word_count;
+ gate->access = access | ACC_P;
+ gate->offset_high = (offset >> 16) & 0xffff;
+}
+
+#ifdef CODE16
+#define i16_fill_descriptor fill_descriptor
+#define i16_fill_gate fill_gate
+#endif
+
+#endif !ASSEMBLER
+
+#endif /* _MACH_I386_SEG_H_ */
diff --git a/i386/include/mach/i386/syscall_sw.h b/i386/include/mach/i386/syscall_sw.h
new file mode 100644
index 00000000..6b937d9b
--- /dev/null
+++ b/i386/include/mach/i386/syscall_sw.h
@@ -0,0 +1,47 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_I386_SYSCALL_SW_H_
+#define _MACH_I386_SYSCALL_SW_H_
+
+#include <mach/machine/asm.h>
+
+#if BSD_TRAP
+#define kernel_trap(trap_name,trap_number,number_args) \
+ENTRY(trap_name) \
+ movl $ trap_number,%eax; \
+ SVC; \
+ jb LCL(cerror); \
+ ret;
+#else
+#define kernel_trap(trap_name,trap_number,number_args) \
+ENTRY(trap_name) \
+ movl $ trap_number,%eax; \
+ SVC; \
+ ret;
+#endif
+
+#endif /* _MACH_I386_SYSCALL_SW_H_ */
diff --git a/i386/include/mach/i386/thread_status.h b/i386/include/mach/i386/thread_status.h
new file mode 100644
index 00000000..cc3dc663
--- /dev/null
+++ b/i386/include/mach/i386/thread_status.h
@@ -0,0 +1,147 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: thread_status.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * This file contains the structure definitions for the thread
+ * state as applied to I386 processors.
+ */
+
+#ifndef _MACH_I386_THREAD_STATUS_H_
+#define _MACH_I386_THREAD_STATUS_H_
+
+#include <mach/machine/fp_reg.h>
+/*
+ * i386_thread_state this is the structure that is exported
+ * to user threads for use in status/mutate
+ * calls. This structure should never
+ * change.
+ *
+ * i386_float_state exported to use threads for access to
+ * floating point registers. Try not to
+ * change this one, either.
+ *
+ * i386_isa_port_map_state exported to user threads to allow
+ * selective in/out operations
+ *
+ */
+
+#define i386_THREAD_STATE 1
+#define i386_FLOAT_STATE 2
+#define i386_ISA_PORT_MAP_STATE 3
+#define i386_V86_ASSIST_STATE 4
+#define i386_REGS_SEGS_STATE 5
+
+/*
+ * This structure is used for both
+ * i386_THREAD_STATE and i386_REGS_SEGS_STATE.
+ */
+struct i386_thread_state {
+ unsigned int gs;
+ unsigned int fs;
+ unsigned int es;
+ unsigned int ds;
+ unsigned int edi;
+ unsigned int esi;
+ unsigned int ebp;
+ unsigned int esp;
+ unsigned int ebx;
+ unsigned int edx;
+ unsigned int ecx;
+ unsigned int eax;
+ unsigned int eip;
+ unsigned int cs;
+ unsigned int efl;
+ unsigned int uesp;
+ unsigned int ss;
+};
+#define i386_THREAD_STATE_COUNT (sizeof (struct i386_thread_state)/sizeof(unsigned int))
+
+/*
+ * Floating point state.
+ *
+ * fpkind tells in what way floating point operations are supported.
+ * See the values for fp_kind in <mach/i386/fp_reg.h>.
+ *
+ * If the kind is FP_NO, then calls to set the state will fail, and
+ * thread_getstatus will return garbage for the rest of the state.
+ * If "initialized" is false, then the rest of the state is garbage.
+ * Clients can set "initialized" to false to force the coprocessor to
+ * be reset.
+ * "exc_status" is non-zero if the thread has noticed (but not
+ * proceeded from) a coprocessor exception. It contains the status
+ * word with the exception bits set. The status word in "fp_status"
+ * will have the exception bits turned off. If an exception bit in
+ * "fp_status" is turned on, then "exc_status" should be zero. This
+ * happens when the coprocessor exception is noticed after the system
+ * has context switched to some other thread.
+ *
+ * If kind is FP_387, then "state" is a i387_state. Other kinds might
+ * also use i387_state, but somebody will have to verify it (XXX).
+ * Note that the registers are ordered from top-of-stack down, not
+ * according to physical register number.
+ */
+
+#define FP_STATE_BYTES \
+ (sizeof (struct i386_fp_save) + sizeof (struct i386_fp_regs))
+
+struct i386_float_state {
+ int fpkind; /* FP_NO..FP_387 (readonly) */
+ int initialized;
+ unsigned char hw_state[FP_STATE_BYTES]; /* actual "hardware" state */
+ int exc_status; /* exception status (readonly) */
+};
+#define i386_FLOAT_STATE_COUNT (sizeof(struct i386_float_state)/sizeof(unsigned int))
+
+
+#define PORT_MAP_BITS 0x400
+struct i386_isa_port_map_state {
+ unsigned char pm[PORT_MAP_BITS>>3];
+};
+
+#define i386_ISA_PORT_MAP_STATE_COUNT (sizeof(struct i386_isa_port_map_state)/sizeof(unsigned int))
+
+/*
+ * V8086 assist supplies a pointer to an interrupt
+ * descriptor table in task space.
+ */
+struct i386_v86_assist_state {
+ unsigned int int_table; /* interrupt table address */
+ int int_count; /* interrupt table size */
+};
+
+struct v86_interrupt_table {
+ unsigned int count; /* count of pending interrupts */
+ unsigned short mask; /* ignore this interrupt if true */
+ unsigned short vec; /* vector to take */
+};
+
+#define i386_V86_ASSIST_STATE_COUNT \
+ (sizeof(struct i386_v86_assist_state)/sizeof(unsigned int))
+
+#endif /* _MACH_I386_THREAD_STATUS_H_ */
diff --git a/i386/include/mach/i386/time_stamp.h b/i386/include/mach/i386/time_stamp.h
new file mode 100644
index 00000000..d6488e26
--- /dev/null
+++ b/i386/include/mach/i386/time_stamp.h
@@ -0,0 +1,29 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * The I386 timestamp implementation uses the default, so we don't
+ * need to do anything here.
+ */
diff --git a/i386/include/mach/i386/trap.h b/i386/include/mach/i386/trap.h
new file mode 100644
index 00000000..58dc266c
--- /dev/null
+++ b/i386/include/mach/i386/trap.h
@@ -0,0 +1,60 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_I386_TRAP_H_
+#define _MACH_I386_TRAP_H_
+
+/*
+ * Hardware trap vectors for i386.
+ */
+#define T_DIVIDE_ERROR 0
+#define T_DEBUG 1
+#define T_NMI 2 /* non-maskable interrupt */
+#define T_INT3 3 /* int 3 instruction */
+#define T_OVERFLOW 4 /* overflow test */
+#define T_OUT_OF_BOUNDS 5 /* bounds check */
+#define T_INVALID_OPCODE 6 /* invalid op code */
+#define T_NO_FPU 7 /* no floating point */
+#define T_DOUBLE_FAULT 8 /* double fault */
+#define T_FPU_FAULT 9
+/* 10 */
+#define T_SEGMENT_NOT_PRESENT 11
+#define T_STACK_FAULT 12
+#define T_GENERAL_PROTECTION 13
+#define T_PAGE_FAULT 14
+/* 15 */
+#define T_FLOATING_POINT_ERROR 16
+#define T_WATCHPOINT 17
+
+/*
+ * Page-fault trap codes.
+ */
+#define T_PF_PROT 0x1 /* protection violation */
+#define T_PF_WRITE 0x2 /* write access */
+#define T_PF_USER 0x4 /* from user state */
+
+
+#endif _MACH_I386_TRAP_H_
diff --git a/i386/include/mach/i386/tss.h b/i386/include/mach/i386/tss.h
new file mode 100644
index 00000000..c9a82980
--- /dev/null
+++ b/i386/include/mach/i386/tss.h
@@ -0,0 +1,67 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_I386_TSS_H_
+#define _MACH_I386_TSS_H_
+
+/*
+ * i386 Task State Segment
+ */
+struct i386_tss {
+ int back_link; /* segment number of previous task,
+ if nested */
+ int esp0; /* initial stack pointer ... */
+ int ss0; /* and segment for ring 0 */
+ int esp1; /* initial stack pointer ... */
+ int ss1; /* and segment for ring 1 */
+ int esp2; /* initial stack pointer ... */
+ int ss2; /* and segment for ring 2 */
+ int cr3; /* CR3 - page table directory
+ physical address */
+ int eip;
+ int eflags;
+ int eax;
+ int ecx;
+ int edx;
+ int ebx;
+ int esp; /* current stack pointer */
+ int ebp;
+ int esi;
+ int edi;
+ int es;
+ int cs;
+ int ss; /* current stack segment */
+ int ds;
+ int fs;
+ int gs;
+ int ldt; /* local descriptor table segment */
+ unsigned short trace_trap; /* trap on switch to this task */
+ unsigned short io_bit_map_offset;
+ /* offset to start of IO permission
+ bit map */
+};
+
+#endif /* _MACH_I386_TSS_H_ */
diff --git a/i386/include/mach/i386/vcpi.h b/i386/include/mach/i386/vcpi.h
new file mode 100644
index 00000000..0c09ed09
--- /dev/null
+++ b/i386/include/mach/i386/vcpi.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 1996-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _FLUX_INCLUDE_FLUX_I386_VCPI_H_
+#define _FLUX_INCLUDE_FLUX_I386_VCPI_H_
+
+struct vcpi_switch_data
+{
+ vm_offset_t phys_pdir;
+ vm_offset_t lin_gdt;
+ vm_offset_t lin_idt;
+ unsigned short ldt_sel;
+ unsigned short tss_sel;
+ unsigned long entry_eip;
+ unsigned short entry_cs;
+};
+
+#endif /* _FLUX_INCLUDE_FLUX_I386_VCPI_H_ */
diff --git a/i386/include/mach/i386/vm_param.h b/i386/include/mach/i386/vm_param.h
new file mode 100644
index 00000000..a6b1efd5
--- /dev/null
+++ b/i386/include/mach/i386/vm_param.h
@@ -0,0 +1,73 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm_param.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * I386 machine dependent virtual memory parameters.
+ * Most of the declarations are preceeded by I386_ (or i386_)
+ * which is OK because only I386 specific code will be using
+ * them.
+ */
+
+#ifndef _MACH_I386_VM_PARAM_H_
+#define _MACH_I386_VM_PARAM_H_
+
+#include <mach/machine/vm_types.h>
+
+#define BYTE_SIZE 8 /* byte size in bits */
+
+#define I386_PGBYTES 4096 /* bytes per 80386 page */
+#define I386_PGSHIFT 12 /* number of bits to shift for pages */
+
+/* Virtual page size is the same as real page size - 4K is big enough. */
+#define PAGE_SHIFT I386_PGSHIFT
+
+/*
+ * Convert bytes to pages and convert pages to bytes.
+ * No rounding is used.
+ */
+
+#define i386_btop(x) (((unsigned)(x)) >> I386_PGSHIFT)
+#define i386_ptob(x) (((unsigned)(x)) << I386_PGSHIFT)
+
+/*
+ * Round off or truncate to the nearest page. These will work
+ * for either addresses or counts. (i.e. 1 byte rounds to 1 page
+ * bytes.)
+ */
+
+#define i386_round_page(x) ((((unsigned)(x)) + I386_PGBYTES - 1) & \
+ ~(I386_PGBYTES-1))
+#define i386_trunc_page(x) (((unsigned)(x)) & ~(I386_PGBYTES-1))
+
+/* User address spaces are 3GB each,
+ starting at virtual and linear address 0. */
+#define VM_MIN_ADDRESS ((vm_offset_t) 0)
+#define VM_MAX_ADDRESS ((vm_offset_t) 0xc0000000)
+
+#endif /* _MACH_I386_VM_PARAM_H_ */
diff --git a/i386/include/mach/i386/vm_types.h b/i386/include/mach/i386/vm_types.h
new file mode 100644
index 00000000..51fdca87
--- /dev/null
+++ b/i386/include/mach/i386/vm_types.h
@@ -0,0 +1,108 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm_types.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * Header file for VM data types. I386 version.
+ */
+
+#ifndef _MACHINE_VM_TYPES_H_
+#define _MACHINE_VM_TYPES_H_ 1
+
+#ifdef ASSEMBLER
+#else ASSEMBLER
+
+/*
+ * A natural_t is the type for the native
+ * integer type, e.g. 32 or 64 or.. whatever
+ * register size the machine has. Unsigned, it is
+ * used for entities that might be either
+ * unsigned integers or pointers, and for
+ * type-casting between the two.
+ * For instance, the IPC system represents
+ * a port in user space as an integer and
+ * in kernel space as a pointer.
+ */
+typedef unsigned int natural_t;
+
+/*
+ * An integer_t is the signed counterpart
+ * of the natural_t type. Both types are
+ * only supposed to be used to define
+ * other types in a machine-independent
+ * way.
+ */
+typedef int integer_t;
+
+#ifndef _POSIX_SOURCE
+
+/*
+ * An int32 is an integer that is at least 32 bits wide
+ */
+typedef int int32;
+typedef unsigned int uint32;
+
+#endif /* _POSIX_SOURCE */
+
+/*
+ * A vm_offset_t is a type-neutral pointer,
+ * e.g. an offset into a virtual memory space.
+ */
+typedef natural_t vm_offset_t;
+
+/*
+ * A vm_size_t is the proper type for e.g.
+ * expressing the difference between two
+ * vm_offset_t entities.
+ */
+typedef natural_t vm_size_t;
+
+/*
+ * These types are _exactly_ as wide as indicated in their names.
+ */
+typedef signed char signed8_t;
+typedef signed short signed16_t;
+typedef signed long signed32_t;
+typedef signed long long signed64_t;
+typedef unsigned char unsigned8_t;
+typedef unsigned short unsigned16_t;
+typedef unsigned long unsigned32_t;
+typedef unsigned long long unsigned64_t;
+typedef float float32_t;
+typedef double float64_t;
+
+#endif /* ASSEMBLER */
+
+/*
+ * If composing messages by hand (please dont)
+ */
+
+#define MACH_MSG_TYPE_INTEGER_T MACH_MSG_TYPE_INTEGER_32
+
+#endif /* _MACHINE_VM_TYPES_H_ */
+
diff --git a/i386/include/mach/proc_ops.h b/i386/include/mach/proc_ops.h
new file mode 100644
index 00000000..7c379cfc
--- /dev/null
+++ b/i386/include/mach/proc_ops.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Center for Software Science (CSS). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSS requests users of this software to return to css-dist@cs.utah.edu any
+ * improvements that they make and grant CSS redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSS
+ */
+#ifndef _MACH_I386_PROC_OPS_H_
+#define _MACH_I386_PROC_OPS_H_
+
+#include <mach/machine/vm_types.h>
+#include <mach/inline.h>
+
+/* Returns the bit number of the most-significant set bit in `val',
+ e.g. 0 for 1, 1 for 2-3, 2 for 4-7, etc.
+ If `val' is 0 (i.e. no bits are set), the behavior is undefined. */
+MACH_INLINE int find_msb_set(natural_t val)
+{
+ int msb;
+
+ asm("
+ bsr %0,%0
+ " : "=r" (msb) : "0" (val));
+
+ return msb;
+}
+
+#endif _MACH_I386_PROC_OPS_H_
diff --git a/i386/include/mach/sa/stdarg.h b/i386/include/mach/sa/stdarg.h
new file mode 100644
index 00000000..3288b932
--- /dev/null
+++ b/i386/include/mach/sa/stdarg.h
@@ -0,0 +1,46 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University.
+ * Copyright (c) 1994 The University of Utah and
+ * the Center for Software Science (CSS).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#ifndef _MACH_SA_STDARG_H_
+#define _MACH_SA_STDARG_H_
+
+#define __va_size(type) ((sizeof(type)+3) & ~0x3)
+
+#ifndef _VA_LIST_
+#define _VA_LIST_
+typedef char *va_list;
+#endif
+
+#define va_start(pvar, lastarg) \
+ ((pvar) = (char*)(void*)&(lastarg) + __va_size(lastarg))
+#define va_end(pvar)
+#define va_arg(pvar,type) \
+ ((pvar) += __va_size(type), \
+ *((type *)((pvar) - __va_size(type))))
+
+#endif _MACH_SA_STDARG_H_
diff --git a/i386/include/mach/sa/sys/varargs.h b/i386/include/mach/sa/sys/varargs.h
new file mode 100644
index 00000000..ff953ea7
--- /dev/null
+++ b/i386/include/mach/sa/sys/varargs.h
@@ -0,0 +1,41 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon rights
+ * to redistribute these changes.
+ */
+/*
+ * varargs support for i386
+ */
+#ifndef _MACH_SYS_VARARGS_H_
+#define _MACH_SYS_VARARGS_H_
+
+#define va_dcl int va_alist;
+typedef char * va_list;
+
+#define va_start(pvar) (pvar) = (va_list)&va_alist
+#define va_end(pvar)
+#define va_arg(pvar,type) ( \
+ (pvar) += ((sizeof(type)+3) & ~0x3), \
+ *((type *)((pvar) - ((sizeof(type)+3) & ~0x3))) )
+
+#endif _MACH_SYS_VARARGS_H_
diff --git a/i386/include/mach/setjmp.h b/i386/include/mach/setjmp.h
new file mode 100644
index 00000000..6ea42c8e
--- /dev/null
+++ b/i386/include/mach/setjmp.h
@@ -0,0 +1,58 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Setjmp/longjmp buffer for i386.
+ */
+#ifndef _MACH_SETJMP_H_PROCESSED_
+#define _MACH_SETJMP_H_PROCESSED_ 1
+
+/* XXX prefix these with mach_ so they don't interfere with higher layers?
+ This stuff is included by cthreads.h. */
+
+/* XXX The original definition of jmp_buf[] causes problems using
+ * libthreads when linked against NetBSD and FreeBSD's libc because
+ * it's too small. When cthreads calls _setjmp, it gets the libc
+ * version which saves more state than it's expecting and overwrites
+ * important cthread data. =( This definition is big enough for all
+ * known systems so far (Linux's is 6, FreeBSD's is 9 and NetBSD's is
+ * 10). This file really shouldn't even be here, since we should be
+ * using libc's setjmp.h.
+ */
+#if 0
+#define _JBLEN 6
+#else
+#define _JBLEN 10
+#endif
+
+typedef int jmp_buf[_JBLEN]; /* ebx, esi, edi, ebp, esp, eip */
+
+
+extern int setjmp (jmp_buf);
+extern void longjmp (jmp_buf, int);
+extern int _setjmp (jmp_buf);
+extern void _longjmp (jmp_buf, int);
+
+#endif /* _MACH_SETJMP_H_PROCESSED_ */
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
new file mode 100644
index 00000000..3c2493b9
--- /dev/null
+++ b/i386/intel/pmap.c
@@ -0,0 +1,2563 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: pmap.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * (These guys wrote the Vax version)
+ *
+ * Physical Map management code for Intel i386, i486, and i860.
+ *
+ * Manages physical address maps.
+ *
+ * In addition to hardware address maps, this
+ * module is called upon to provide software-use-only
+ * maps which may or may not be stored in the same
+ * form as hardware maps. These pseudo-maps are
+ * used to store intermediate results from copy
+ * operations to and from address spaces.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+#include <cpus.h>
+
+#include <mach/machine/vm_types.h>
+
+#include <mach/boolean.h>
+#include <kern/thread.h>
+#include <kern/zalloc.h>
+
+#include <kern/lock.h>
+
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include "vm_param.h"
+#include <mach/vm_prot.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_user.h>
+
+#include <mach/machine/vm_param.h>
+#include <machine/thread.h>
+#include "cpu_number.h"
+#if i860
+#include <i860ipsc/nodehw.h>
+#endif
+
+#ifdef ORC
+#define OLIVETTICACHE 1
+#endif ORC
+
+#ifndef OLIVETTICACHE
+#define WRITE_PTE(pte_p, pte_entry) *(pte_p) = (pte_entry);
+#define WRITE_PTE_FAST(pte_p, pte_entry) *(pte_p) = (pte_entry);
+#else OLIVETTICACHE
+#error might not work anymore
+
+/* This gross kludgery is needed for Olivetti XP7 & XP9 boxes to get
+ * around an apparent hardware bug. Other than at startup it doesn't
+ * affect run-time performacne very much, so we leave it in for all
+ * machines.
+ */
+extern unsigned *pstart();
+#define CACHE_LINE 8
+#define CACHE_SIZE 512
+#define CACHE_PAGE 0x1000;
+
+#define WRITE_PTE(pte_p, pte_entry) { write_pte(pte_p, pte_entry); }
+
+write_pte(pte_p, pte_entry)
+pt_entry_t *pte_p, pte_entry;
+{
+ unsigned long count;
+ volatile unsigned long hold, *addr1, *addr2;
+
+ if ( pte_entry != *pte_p )
+ *pte_p = pte_entry;
+ else {
+ /* This isn't necessarily the optimal algorithm */
+ addr1 = (unsigned long *)pstart;
+ for (count = 0; count < CACHE_SIZE; count++) {
+ addr2 = addr1 + CACHE_PAGE;
+ hold = *addr1; /* clear cache bank - A - */
+ hold = *addr2; /* clear cache bank - B - */
+ addr1 += CACHE_LINE;
+ }
+ }
+}
+
+#define WRITE_PTE_FAST(pte_p, pte_entry)*pte_p = pte_entry;
+
+#endif OLIVETTICACHE
+
+/*
+ * Private data structures.
+ */
+
+/*
+ * For each vm_page_t, there is a list of all currently
+ * valid virtual mappings of that page. An entry is
+ * a pv_entry_t; the list is the pv_table.
+ */
+
+typedef struct pv_entry {
+ struct pv_entry *next; /* next pv_entry */
+ pmap_t pmap; /* pmap where mapping lies */
+ vm_offset_t va; /* virtual address for mapping */
+} *pv_entry_t;
+
+#define PV_ENTRY_NULL ((pv_entry_t) 0)
+
+pv_entry_t pv_head_table; /* array of entries, one per page */
+
+/*
+ * pv_list entries are kept on a list that can only be accessed
+ * with the pmap system locked (at SPLVM, not in the cpus_active set).
+ * The list is refilled from the pv_list_zone if it becomes empty.
+ */
+pv_entry_t pv_free_list; /* free list at SPLVM */
+decl_simple_lock_data(, pv_free_list_lock)
+
+#define PV_ALLOC(pv_e) { \
+ simple_lock(&pv_free_list_lock); \
+ if ((pv_e = pv_free_list) != 0) { \
+ pv_free_list = pv_e->next; \
+ } \
+ simple_unlock(&pv_free_list_lock); \
+}
+
+#define PV_FREE(pv_e) { \
+ simple_lock(&pv_free_list_lock); \
+ pv_e->next = pv_free_list; \
+ pv_free_list = pv_e; \
+ simple_unlock(&pv_free_list_lock); \
+}
+
+zone_t pv_list_zone; /* zone of pv_entry structures */
+
+/*
+ * Each entry in the pv_head_table is locked by a bit in the
+ * pv_lock_table. The lock bits are accessed by the physical
+ * address of the page they lock.
+ */
+
+char *pv_lock_table; /* pointer to array of bits */
+#define pv_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE)
+
+/* Has pmap_init completed? */
+boolean_t pmap_initialized = FALSE;
+
+/*
+ * More-specific code provides these;
+ * they indicate the total extent of physical memory
+ * that we know about and might ever have to manage.
+ */
+extern vm_offset_t phys_first_addr, phys_last_addr;
+
+/*
+ * Range of kernel virtual addresses available for kernel memory mapping.
+ * Does not include the virtual addresses used to map physical memory 1-1.
+ * Initialized by pmap_bootstrap.
+ */
+vm_offset_t kernel_virtual_start;
+vm_offset_t kernel_virtual_end;
+
+/* XXX stupid fixed limit - get rid */
+vm_size_t morevm = 40 * 1024 * 1024; /* VM space for kernel map */
+
+/*
+ * Index into pv_head table, its lock bits, and the modify/reference
+ * bits starting at phys_first_addr.
+ */
+#define pa_index(pa) (atop(pa - phys_first_addr))
+
+#define pai_to_pvh(pai) (&pv_head_table[pai])
+#define lock_pvh_pai(pai) (bit_lock(pai, pv_lock_table))
+#define unlock_pvh_pai(pai) (bit_unlock(pai, pv_lock_table))
+
+/*
+ * Array of physical page attribites for managed pages.
+ * One byte per physical page.
+ */
+char *pmap_phys_attributes;
+
+/*
+ * Physical page attributes. Copy bits from PTE definition.
+ */
+#define PHYS_MODIFIED INTEL_PTE_MOD /* page modified */
+#define PHYS_REFERENCED INTEL_PTE_REF /* page referenced */
+
+/*
+ * Amount of virtual memory mapped by one
+ * page-directory entry.
+ */
+#define PDE_MAPPED_SIZE (pdenum2lin(1))
+
+/*
+ * We allocate page table pages directly from the VM system
+ * through this object. It maps physical memory.
+ */
+vm_object_t pmap_object = VM_OBJECT_NULL;
+
+/*
+ * Locking and TLB invalidation
+ */
+
+/*
+ * Locking Protocols:
+ *
+ * There are two structures in the pmap module that need locking:
+ * the pmaps themselves, and the per-page pv_lists (which are locked
+ * by locking the pv_lock_table entry that corresponds to the pv_head
+ * for the list in question.) Most routines want to lock a pmap and
+ * then do operations in it that require pv_list locking -- however
+ * pmap_remove_all and pmap_copy_on_write operate on a physical page
+ * basis and want to do the locking in the reverse order, i.e. lock
+ * a pv_list and then go through all the pmaps referenced by that list.
+ * To protect against deadlock between these two cases, the pmap_lock
+ * is used. There are three different locking protocols as a result:
+ *
+ * 1. pmap operations only (pmap_extract, pmap_access, ...) Lock only
+ * the pmap.
+ *
+ * 2. pmap-based operations (pmap_enter, pmap_remove, ...) Get a read
+ * lock on the pmap_lock (shared read), then lock the pmap
+ * and finally the pv_lists as needed [i.e. pmap lock before
+ * pv_list lock.]
+ *
+ * 3. pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...)
+ * Get a write lock on the pmap_lock (exclusive write); this
+ * also guaranteees exclusive access to the pv_lists. Lock the
+ * pmaps as needed.
+ *
+ * At no time may any routine hold more than one pmap lock or more than
+ * one pv_list lock. Because interrupt level routines can allocate
+ * mbufs and cause pmap_enter's, the pmap_lock and the lock on the
+ * kernel_pmap can only be held at splvm.
+ */
+
+#if NCPUS > 1
+/*
+ * We raise the interrupt level to splvm, to block interprocessor
+ * interrupts during pmap operations. We must take the CPU out of
+ * the cpus_active set while interrupts are blocked.
+ */
+#define SPLVM(spl) { \
+ spl = splvm(); \
+ i_bit_clear(cpu_number(), &cpus_active); \
+}
+
+#define SPLX(spl) { \
+ i_bit_set(cpu_number(), &cpus_active); \
+ splx(spl); \
+}
+
+/*
+ * Lock on pmap system
+ */
+lock_data_t pmap_system_lock;
+
+#define PMAP_READ_LOCK(pmap, spl) { \
+ SPLVM(spl); \
+ lock_read(&pmap_system_lock); \
+ simple_lock(&(pmap)->lock); \
+}
+
+#define PMAP_WRITE_LOCK(spl) { \
+ SPLVM(spl); \
+ lock_write(&pmap_system_lock); \
+}
+
+#define PMAP_READ_UNLOCK(pmap, spl) { \
+ simple_unlock(&(pmap)->lock); \
+ lock_read_done(&pmap_system_lock); \
+ SPLX(spl); \
+}
+
+#define PMAP_WRITE_UNLOCK(spl) { \
+ lock_write_done(&pmap_system_lock); \
+ SPLX(spl); \
+}
+
+#define PMAP_WRITE_TO_READ_LOCK(pmap) { \
+ simple_lock(&(pmap)->lock); \
+ lock_write_to_read(&pmap_system_lock); \
+}
+
+#define LOCK_PVH(index) (lock_pvh_pai(index))
+
+#define UNLOCK_PVH(index) (unlock_pvh_pai(index))
+
+#define PMAP_UPDATE_TLBS(pmap, s, e) \
+{ \
+ cpu_set cpu_mask = 1 << cpu_number(); \
+ cpu_set users; \
+ \
+ /* Since the pmap is locked, other updates are locked */ \
+ /* out, and any pmap_activate has finished. */ \
+ \
+ /* find other cpus using the pmap */ \
+ users = (pmap)->cpus_using & ~cpu_mask; \
+ if (users) { \
+ /* signal them, and wait for them to finish */ \
+ /* using the pmap */ \
+ signal_cpus(users, (pmap), (s), (e)); \
+ while ((pmap)->cpus_using & cpus_active & ~cpu_mask) \
+ continue; \
+ } \
+ \
+ /* invalidate our own TLB if pmap is in use */ \
+ if ((pmap)->cpus_using & cpu_mask) { \
+ INVALIDATE_TLB((s), (e)); \
+ } \
+}
+
+#else NCPUS > 1
+
+#define SPLVM(spl)
+#define SPLX(spl)
+
+#define PMAP_READ_LOCK(pmap, spl) SPLVM(spl)
+#define PMAP_WRITE_LOCK(spl) SPLVM(spl)
+#define PMAP_READ_UNLOCK(pmap, spl) SPLX(spl)
+#define PMAP_WRITE_UNLOCK(spl) SPLX(spl)
+#define PMAP_WRITE_TO_READ_LOCK(pmap)
+
+#define LOCK_PVH(index)
+#define UNLOCK_PVH(index)
+
+#define PMAP_UPDATE_TLBS(pmap, s, e) { \
+ /* invalidate our own TLB if pmap is in use */ \
+ if ((pmap)->cpus_using) { \
+ INVALIDATE_TLB((s), (e)); \
+ } \
+}
+
+#endif NCPUS > 1
+
+#define MAX_TBIS_SIZE 32 /* > this -> TBIA */ /* XXX */
+
+#if i860
+/* Do a data cache flush until we find the caching bug XXX prp */
+#define INVALIDATE_TLB(s, e) { \
+ flush(); \
+ flush_tlb(); \
+}
+#else i860
+#define INVALIDATE_TLB(s, e) { \
+ flush_tlb(); \
+}
+#endif i860
+
+
+#if NCPUS > 1
+/*
+ * Structures to keep track of pending TLB invalidations
+ */
+
+#define UPDATE_LIST_SIZE 4
+
+struct pmap_update_item {
+ pmap_t pmap; /* pmap to invalidate */
+ vm_offset_t start; /* start address to invalidate */
+ vm_offset_t end; /* end address to invalidate */
+} ;
+
+typedef struct pmap_update_item *pmap_update_item_t;
+
+/*
+ * List of pmap updates. If the list overflows,
+ * the last entry is changed to invalidate all.
+ */
+struct pmap_update_list {
+ decl_simple_lock_data(, lock)
+ int count;
+ struct pmap_update_item item[UPDATE_LIST_SIZE];
+} ;
+typedef struct pmap_update_list *pmap_update_list_t;
+
+struct pmap_update_list cpu_update_list[NCPUS];
+
+#endif NCPUS > 1
+
+/*
+ * Other useful macros.
+ */
+#define current_pmap() (vm_map_pmap(current_thread()->task->map))
+#define pmap_in_use(pmap, cpu) (((pmap)->cpus_using & (1 << (cpu))) != 0)
+
+struct pmap kernel_pmap_store;
+pmap_t kernel_pmap;
+
+struct zone *pmap_zone; /* zone of pmap structures */
+
+int pmap_debug = 0; /* flag for debugging prints */
+
+#if 0
+int ptes_per_vm_page; /* number of hardware ptes needed
+ to map one VM page. */
+#else
+#define ptes_per_vm_page 1
+#endif
+
+unsigned int inuse_ptepages_count = 0; /* debugging */
+
+extern char end;
+
+/*
+ * Pointer to the basic page directory for the kernel.
+ * Initialized by pmap_bootstrap().
+ */
+pt_entry_t *kernel_page_dir;
+
+void pmap_remove_range(); /* forward */
+#if NCPUS > 1
+void signal_cpus(); /* forward */
+#endif NCPUS > 1
+
+#if i860
+/*
+ * Paging flag
+ */
+int paging_enabled = 0;
+#endif
+
+static inline pt_entry_t *
+pmap_pde(pmap_t pmap, vm_offset_t addr)
+{
+ if (pmap == kernel_pmap)
+ addr = kvtolin(addr);
+ return &pmap->dirbase[lin2pdenum(addr)];
+}
+
+/*
+ * Given an offset and a map, compute the address of the
+ * pte. If the address is invalid with respect to the map
+ * then PT_ENTRY_NULL is returned (and the map may need to grow).
+ *
+ * This is only used internally.
+ */
+pt_entry_t *
+pmap_pte(pmap_t pmap, vm_offset_t addr)
+{
+ pt_entry_t *ptp;
+ pt_entry_t pte;
+
+ if (pmap->dirbase == 0)
+ return(PT_ENTRY_NULL);
+ pte = *pmap_pde(pmap, addr);
+ if ((pte & INTEL_PTE_VALID) == 0)
+ return(PT_ENTRY_NULL);
+ ptp = (pt_entry_t *)ptetokv(pte);
+ return(&ptp[ptenum(addr)]);
+}
+
+#define DEBUG_PTE_PAGE 0
+
+#if DEBUG_PTE_PAGE
+void ptep_check(ptep)
+ ptep_t ptep;
+{
+ register pt_entry_t *pte, *epte;
+ int ctu, ctw;
+
+ /* check the use and wired counts */
+ if (ptep == PTE_PAGE_NULL)
+ return;
+ pte = pmap_pte(ptep->pmap, ptep->va);
+ epte = pte + INTEL_PGBYTES/sizeof(pt_entry_t);
+ ctu = 0;
+ ctw = 0;
+ while (pte < epte) {
+ if (pte->pfn != 0) {
+ ctu++;
+ if (pte->wired)
+ ctw++;
+ }
+ pte += ptes_per_vm_page;
+ }
+
+ if (ctu != ptep->use_count || ctw != ptep->wired_count) {
+ printf("use %d wired %d - actual use %d wired %d\n",
+ ptep->use_count, ptep->wired_count, ctu, ctw);
+ panic("pte count");
+ }
+}
+#endif DEBUG_PTE_PAGE
+
+/*
+ * Map memory at initialization. The physical addresses being
+ * mapped are not managed and are never unmapped.
+ *
+ * For now, VM is already on, we only need to map the
+ * specified memory.
+ */
+vm_offset_t pmap_map(virt, start, end, prot)
+ register vm_offset_t virt;
+ register vm_offset_t start;
+ register vm_offset_t end;
+ register int prot;
+{
+ register int ps;
+
+ ps = PAGE_SIZE;
+ while (start < end) {
+ pmap_enter(kernel_pmap, virt, start, prot, FALSE);
+ virt += ps;
+ start += ps;
+ }
+ return(virt);
+}
+
+/*
+ * Back-door routine for mapping kernel VM at initialization.
+ * Useful for mapping memory outside the range
+ * [phys_first_addr, phys_last_addr) (i.e., devices).
+ * Otherwise like pmap_map.
+#if i860
+ * Sets no-cache bit.
+#endif
+ */
+vm_offset_t pmap_map_bd(virt, start, end, prot)
+ register vm_offset_t virt;
+ register vm_offset_t start;
+ register vm_offset_t end;
+ vm_prot_t prot;
+{
+ register pt_entry_t template;
+ register pt_entry_t *pte;
+
+ template = pa_to_pte(start)
+#if i860
+ | INTEL_PTE_NCACHE
+#endif
+ | INTEL_PTE_VALID;
+ if (prot & VM_PROT_WRITE)
+ template |= INTEL_PTE_WRITE;
+
+ while (start < end) {
+ pte = pmap_pte(kernel_pmap, virt);
+ if (pte == PT_ENTRY_NULL)
+ panic("pmap_map_bd: Invalid kernel address\n");
+ WRITE_PTE_FAST(pte, template)
+ pte_increment_pa(template);
+ virt += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+ return(virt);
+}
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ * Allocate the kernel page directory and page tables,
+ * and direct-map all physical memory.
+ * Called with mapping off.
+ */
+void pmap_bootstrap()
+{
+ /*
+ * Mapping is turned off; we must reference only physical addresses.
+ * The load image of the system is to be mapped 1-1 physical = virtual.
+ */
+
+ /*
+ * Set ptes_per_vm_page for general use.
+ */
+#if 0
+ ptes_per_vm_page = PAGE_SIZE / INTEL_PGBYTES;
+#endif
+
+ /*
+ * The kernel's pmap is statically allocated so we don't
+ * have to use pmap_create, which is unlikely to work
+ * correctly at this part of the boot sequence.
+ */
+
+ kernel_pmap = &kernel_pmap_store;
+
+#if NCPUS > 1
+ lock_init(&pmap_system_lock, FALSE); /* NOT a sleep lock */
+#endif NCPUS > 1
+
+ simple_lock_init(&kernel_pmap->lock);
+
+ kernel_pmap->ref_count = 1;
+
+ /*
+ * Determine the kernel virtual address range.
+ * It starts at the end of the physical memory
+ * mapped into the kernel address space,
+ * and extends to a stupid arbitrary limit beyond that.
+ */
+ kernel_virtual_start = phys_last_addr;
+ kernel_virtual_end = phys_last_addr + morevm;
+
+ /*
+ * Allocate and clear a kernel page directory.
+ */
+ kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)pmap_grab_page();
+ {
+ int i;
+ for (i = 0; i < NPDES; i++)
+ kernel_pmap->dirbase[i] = 0;
+ }
+
+ /*
+ * Allocate and set up the kernel page tables.
+ */
+ {
+ vm_offset_t va;
+
+ /*
+ * Map virtual memory for all known physical memory, 1-1,
+ * from phys_first_addr to phys_last_addr.
+ * Make any mappings completely in the kernel's text segment read-only.
+ *
+ * Also allocate some additional all-null page tables afterwards
+ * for kernel virtual memory allocation,
+ * because this PMAP module is too stupid
+ * to allocate new kernel page tables later.
+ * XX fix this
+ */
+ for (va = phys_first_addr; va < phys_last_addr + morevm; )
+ {
+ pt_entry_t *pde = kernel_page_dir + lin2pdenum(kvtolin(va));
+ pt_entry_t *ptable = (pt_entry_t*)pmap_grab_page();
+ pt_entry_t *pte;
+ vm_offset_t pteva;
+
+ /* Initialize the page directory entry. */
+ *pde = pa_to_pte((vm_offset_t)ptable)
+ | INTEL_PTE_VALID | INTEL_PTE_WRITE;
+
+ /* Initialize the page table. */
+ for (pte = ptable; (va < phys_last_addr) && (pte < ptable+NPTES); pte++)
+ {
+ if ((pte - ptable) < ptenum(va))
+ {
+ WRITE_PTE_FAST(pte, 0);
+ }
+ else
+ {
+ extern char start[], etext[];
+
+ if ((va >= (vm_offset_t)start)
+ && (va + INTEL_PGBYTES <= (vm_offset_t)etext))
+ {
+ WRITE_PTE_FAST(pte, pa_to_pte(va)
+ | INTEL_PTE_VALID);
+ }
+ else
+ {
+ WRITE_PTE_FAST(pte, pa_to_pte(va)
+ | INTEL_PTE_VALID | INTEL_PTE_WRITE);
+ }
+ va += INTEL_PGBYTES;
+ }
+ }
+ for (; pte < ptable+NPTES; pte++)
+ {
+ WRITE_PTE_FAST(pte, 0);
+ va += INTEL_PGBYTES;
+ }
+ }
+ }
+
+#if i860
+#error probably doesnt work anymore
+ XXX move to architecture-specific code just after the pmap_bootstrap call.
+
+ /* kvtophys should now work in phys range */
+
+ /*
+ * Mark page table pages non-cacheable
+ */
+
+ pt_pte = (pt_entry_t *)pte_to_pa(*(kpde + pdenum(sva))) + ptenum(sva);
+
+ for (va = load_start; va < tva; va += INTEL_PGBYTES*NPTES) {
+ /* Mark page table non-cacheable */
+ *pt_pte |= INTEL_PTE_NCACHE;
+ pt_pte++;
+ }
+
+ /*
+ * Map I/O space
+ */
+
+ ppde = kpde;
+ ppde += pdenum(IO_BASE);
+
+ if (pte_to_pa(*ppde) == 0) {
+ /* This pte has not been allocated */
+ ppte = (pt_entry_t *)kvtophys(virtual_avail);
+ ptend = ppte + NPTES;
+ virtual_avail = phystokv((vm_offset_t)ptend);
+ *ppde = pa_to_pte((vm_offset_t)ppte)
+ | INTEL_PTE_VALID
+ | INTEL_PTE_WRITE;
+ pte = ptend;
+
+ /* Mark page table non-cacheable */
+ *pt_pte |= INTEL_PTE_NCACHE;
+ pt_pte++;
+
+ bzero((char *)ppte, INTEL_PGBYTES);
+ } else {
+ ppte = (pt_entry_t *)(*ppde); /* first pte of page */
+ }
+ *ppde |= INTEL_PTE_USER;
+
+
+ WRITE_PTE(ppte + ptenum(FIFO_ADDR),
+ pa_to_pte(FIFO_ADDR_PH)
+ | INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_NCACHE);
+
+ WRITE_PTE(ppte + ptenum(FIFO_ADDR + XEOD_OFF),
+ pa_to_pte(FIFO_ADDR_PH + XEOD_OFF_PH)
+ | INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_NCACHE);
+
+/* XXX Allowed user access to control reg - cfj */
+ WRITE_PTE(ppte + ptenum(CSR_ADDR),
+ pa_to_pte(CSR_ADDR_PH)
+ | INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_NCACHE | INTEL_PTE_USER);
+
+/* XXX Allowed user access to perf reg - cfj */
+ WRITE_PTE(ppte + ptenum(PERFCNT_ADDR),
+ pa_to_pte(PERFCNT_ADDR_PH)
+ | INTEL_PTE_VALID | INTEL_PTE_USER | INTEL_PTE_NCACHE | INTEL_PTE_USER);
+
+ WRITE_PTE(ppte + ptenum(UART_ADDR),
+ pa_to_pte(UART_ADDR_PH)
+ | INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_NCACHE);
+
+ WRITE_PTE(ppte + ptenum(0xFFFFF000),
+ pa_to_pte(avail_end)
+ | INTEL_PTE_VALID | INTEL_PTE_WRITE);
+ avail_start = kvtophys(virtual_avail);
+
+/*
+ * Turn on mapping
+ */
+
+ flush_and_ctxsw(kernel_pmap->dirbase);
+ paging_enabled = 1;
+
+ printf("Paging enabled.\n");
+#endif
+
+ /* Architecture-specific code will turn on paging
+ soon after we return from here. */
+}
+
+void pmap_virtual_space(startp, endp)
+ vm_offset_t *startp;
+ vm_offset_t *endp;
+{
+ *startp = kernel_virtual_start;
+ *endp = kernel_virtual_end;
+}
+
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ */
+void pmap_init()
+{
+ register long npages;
+ vm_offset_t addr;
+ register vm_size_t s;
+ int i;
+
+ /*
+ * Allocate memory for the pv_head_table and its lock bits,
+ * the modify bit array, and the pte_page table.
+ */
+
+ npages = atop(phys_last_addr - phys_first_addr);
+ s = (vm_size_t) (sizeof(struct pv_entry) * npages
+ + pv_lock_table_size(npages)
+ + npages);
+
+ s = round_page(s);
+ if (kmem_alloc_wired(kernel_map, &addr, s) != KERN_SUCCESS)
+ panic("pmap_init");
+ bzero((char *) addr, s);
+
+ /*
+ * Allocate the structures first to preserve word-alignment.
+ */
+ pv_head_table = (pv_entry_t) addr;
+ addr = (vm_offset_t) (pv_head_table + npages);
+
+ pv_lock_table = (char *) addr;
+ addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages));
+
+ pmap_phys_attributes = (char *) addr;
+
+ /*
+ * Create the zone of physical maps,
+ * and of the physical-to-virtual entries.
+ */
+ s = (vm_size_t) sizeof(struct pmap);
+ pmap_zone = zinit(s, 400*s, 4096, 0, "pmap"); /* XXX */
+ s = (vm_size_t) sizeof(struct pv_entry);
+ pv_list_zone = zinit(s, 10000*s, 4096, 0, "pv_list"); /* XXX */
+
+#if NCPUS > 1
+ /*
+ * Set up the pmap request lists
+ */
+ for (i = 0; i < NCPUS; i++) {
+ pmap_update_list_t up = &cpu_update_list[i];
+
+ simple_lock_init(&up->lock);
+ up->count = 0;
+ }
+#endif NCPUS > 1
+
+ /*
+ * Indicate that the PMAP module is now fully initialized.
+ */
+ pmap_initialized = TRUE;
+}
+
+#define valid_page(x) (pmap_initialized && pmap_valid_page(x))
+
+boolean_t pmap_verify_free(phys)
+ vm_offset_t phys;
+{
+ pv_entry_t pv_h;
+ int pai;
+ int spl;
+ boolean_t result;
+
+ assert(phys != vm_page_fictitious_addr);
+ if (!pmap_initialized)
+ return(TRUE);
+
+ if (!pmap_valid_page(phys))
+ return(FALSE);
+
+ PMAP_WRITE_LOCK(spl);
+
+ pai = pa_index(phys);
+ pv_h = pai_to_pvh(pai);
+
+ result = (pv_h->pmap == PMAP_NULL);
+ PMAP_WRITE_UNLOCK(spl);
+
+ return(result);
+}
+
+/*
+ * Routine: pmap_page_table_page_alloc
+ *
+ * Allocates a new physical page to be used as a page-table page.
+ *
+ * Must be called with the pmap system and the pmap unlocked,
+ * since these must be unlocked to use vm_page_grab.
+ */
+vm_offset_t
+pmap_page_table_page_alloc()
+{
+ register vm_page_t m;
+ register vm_offset_t pa;
+
+ check_simple_locks();
+
+ /*
+ * We cannot allocate the pmap_object in pmap_init,
+ * because it is called before the zone package is up.
+ * Allocate it now if it is missing.
+ */
+ if (pmap_object == VM_OBJECT_NULL)
+ pmap_object = vm_object_allocate(phys_last_addr - phys_first_addr);
+
+ /*
+ * Allocate a VM page for the level 2 page table entries.
+ */
+ while ((m = vm_page_grab()) == VM_PAGE_NULL)
+ VM_PAGE_WAIT((void (*)()) 0);
+
+ /*
+ * Map the page to its physical address so that it
+ * can be found later.
+ */
+ pa = m->phys_addr;
+ vm_object_lock(pmap_object);
+ vm_page_insert(m, pmap_object, pa);
+ vm_page_lock_queues();
+ vm_page_wire(m);
+ inuse_ptepages_count++;
+ vm_page_unlock_queues();
+ vm_object_unlock(pmap_object);
+
+ /*
+ * Zero the page.
+ */
+ bzero(phystokv(pa), PAGE_SIZE);
+
+#if i860
+ /*
+ * Mark the page table page(s) non-cacheable.
+ */
+ {
+ int i = ptes_per_vm_page;
+ pt_entry_t *pdp;
+
+ pdp = pmap_pte(kernel_pmap, pa);
+ do {
+ *pdp |= INTEL_PTE_NCACHE;
+ pdp++;
+ } while (--i > 0);
+ }
+#endif
+ return pa;
+}
+
+/*
+ * Deallocate a page-table page.
+ * The page-table page must have all mappings removed,
+ * and be removed from its page directory.
+ */
+void
+pmap_page_table_page_dealloc(pa)
+ vm_offset_t pa;
+{
+ vm_page_t m;
+
+ vm_object_lock(pmap_object);
+ m = vm_page_lookup(pmap_object, pa);
+ vm_page_lock_queues();
+ vm_page_free(m);
+ inuse_ptepages_count--;
+ vm_page_unlock_queues();
+ vm_object_unlock(pmap_object);
+}
+
+/*
+ * Create and return a physical map.
+ *
+ * If the size specified for the map
+ * is zero, the map is an actual physical
+ * map, and may be referenced by the
+ * hardware.
+ *
+ * If the size specified is non-zero,
+ * the map will be used in software only, and
+ * is bounded by that size.
+ */
+pmap_t pmap_create(size)
+ vm_size_t size;
+{
+ register pmap_t p;
+ register pmap_statistics_t stats;
+
+ /*
+ * A software use-only map doesn't even need a map.
+ */
+
+ if (size != 0) {
+ return(PMAP_NULL);
+ }
+
+/*
+ * Allocate a pmap struct from the pmap_zone. Then allocate
+ * the page descriptor table from the pd_zone.
+ */
+
+ p = (pmap_t) zalloc(pmap_zone);
+ if (p == PMAP_NULL)
+ panic("pmap_create");
+
+ if (kmem_alloc_wired(kernel_map,
+ (vm_offset_t *)&p->dirbase, INTEL_PGBYTES)
+ != KERN_SUCCESS)
+ panic("pmap_create");
+
+ bcopy(kernel_page_dir, p->dirbase, INTEL_PGBYTES);
+ p->ref_count = 1;
+
+ simple_lock_init(&p->lock);
+ p->cpus_using = 0;
+
+ /*
+ * Initialize statistics.
+ */
+
+ stats = &p->stats;
+ stats->resident_count = 0;
+ stats->wired_count = 0;
+
+ return(p);
+}
+
+/*
+ * Retire the given physical map from service.
+ * Should only be called if the map contains
+ * no valid mappings.
+ */
+
+void pmap_destroy(p)
+ register pmap_t p;
+{
+ register pt_entry_t *pdep;
+ register vm_offset_t pa;
+ register int c, s;
+ register vm_page_t m;
+
+ if (p == PMAP_NULL)
+ return;
+
+ SPLVM(s);
+ simple_lock(&p->lock);
+ c = --p->ref_count;
+ simple_unlock(&p->lock);
+ SPLX(s);
+
+ if (c != 0) {
+ return; /* still in use */
+ }
+
+ /*
+ * Free the memory maps, then the
+ * pmap structure.
+ */
+ for (pdep = p->dirbase;
+ pdep < &p->dirbase[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS)];
+ pdep += ptes_per_vm_page) {
+ if (*pdep & INTEL_PTE_VALID) {
+ pa = pte_to_pa(*pdep);
+ vm_object_lock(pmap_object);
+ m = vm_page_lookup(pmap_object, pa);
+ if (m == VM_PAGE_NULL)
+ panic("pmap_destroy: pte page not in object");
+ vm_page_lock_queues();
+ vm_page_free(m);
+ inuse_ptepages_count--;
+ vm_page_unlock_queues();
+ vm_object_unlock(pmap_object);
+ }
+ }
+ kmem_free(kernel_map, p->dirbase, INTEL_PGBYTES);
+ zfree(pmap_zone, (vm_offset_t) p);
+}
+
+/*
+ * Add a reference to the specified pmap.
+ */
+
+void pmap_reference(p)
+ register pmap_t p;
+{
+ int s;
+ if (p != PMAP_NULL) {
+ SPLVM(s);
+ simple_lock(&p->lock);
+ p->ref_count++;
+ simple_unlock(&p->lock);
+ SPLX(s);
+ }
+}
+
+/*
+ * Remove a range of hardware page-table entries.
+ * The entries given are the first (inclusive)
+ * and last (exclusive) entries for the VM pages.
+ * The virtual address is the va for the first pte.
+ *
+ * The pmap must be locked.
+ * If the pmap is not the kernel pmap, the range must lie
+ * entirely within one pte-page. This is NOT checked.
+ * Assumes that the pte-page exists.
+ */
+
+/* static */
+void pmap_remove_range(pmap, va, spte, epte)
+ pmap_t pmap;
+ vm_offset_t va;
+ pt_entry_t *spte;
+ pt_entry_t *epte;
+{
+ register pt_entry_t *cpte;
+ int num_removed, num_unwired;
+ int pai;
+ vm_offset_t pa;
+
+#if DEBUG_PTE_PAGE
+ if (pmap != kernel_pmap)
+ ptep_check(get_pte_page(spte));
+#endif DEBUG_PTE_PAGE
+ num_removed = 0;
+ num_unwired = 0;
+
+ for (cpte = spte; cpte < epte;
+ cpte += ptes_per_vm_page, va += PAGE_SIZE) {
+
+ if (*cpte == 0)
+ continue;
+ pa = pte_to_pa(*cpte);
+
+ num_removed++;
+ if (*cpte & INTEL_PTE_WIRED)
+ num_unwired++;
+
+ if (!valid_page(pa)) {
+
+ /*
+ * Outside range of managed physical memory.
+ * Just remove the mappings.
+ */
+ register int i = ptes_per_vm_page;
+ register pt_entry_t *lpte = cpte;
+ do {
+ *lpte = 0;
+ lpte++;
+ } while (--i > 0);
+ continue;
+ }
+
+ pai = pa_index(pa);
+ LOCK_PVH(pai);
+
+ /*
+ * Get the modify and reference bits.
+ */
+ {
+ register int i;
+ register pt_entry_t *lpte;
+
+ i = ptes_per_vm_page;
+ lpte = cpte;
+ do {
+ pmap_phys_attributes[pai] |=
+ *lpte & (PHYS_MODIFIED|PHYS_REFERENCED);
+ *lpte = 0;
+ lpte++;
+ } while (--i > 0);
+ }
+
+ /*
+ * Remove the mapping from the pvlist for
+ * this physical page.
+ */
+ {
+ register pv_entry_t pv_h, prev, cur;
+
+ pv_h = pai_to_pvh(pai);
+ if (pv_h->pmap == PMAP_NULL) {
+ panic("pmap_remove: null pv_list!");
+ }
+ if (pv_h->va == va && pv_h->pmap == pmap) {
+ /*
+ * Header is the pv_entry. Copy the next one
+ * to header and free the next one (we cannot
+ * free the header)
+ */
+ cur = pv_h->next;
+ if (cur != PV_ENTRY_NULL) {
+ *pv_h = *cur;
+ PV_FREE(cur);
+ }
+ else {
+ pv_h->pmap = PMAP_NULL;
+ }
+ }
+ else {
+ cur = pv_h;
+ do {
+ prev = cur;
+ if ((cur = prev->next) == PV_ENTRY_NULL) {
+ panic("pmap-remove: mapping not in pv_list!");
+ }
+ } while (cur->va != va || cur->pmap != pmap);
+ prev->next = cur->next;
+ PV_FREE(cur);
+ }
+ UNLOCK_PVH(pai);
+ }
+ }
+
+ /*
+ * Update the counts
+ */
+ pmap->stats.resident_count -= num_removed;
+ pmap->stats.wired_count -= num_unwired;
+}
+
+/*
+ * Remove the given range of addresses
+ * from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the hardware page size.
+ */
+
+void pmap_remove(map, s, e)
+ pmap_t map;
+ vm_offset_t s, e;
+{
+ int spl;
+ register pt_entry_t *pde;
+ register pt_entry_t *spte, *epte;
+ vm_offset_t l;
+
+ if (map == PMAP_NULL)
+ return;
+
+ PMAP_READ_LOCK(map, spl);
+
+ /*
+ * Invalidate the translation buffer first
+ */
+ PMAP_UPDATE_TLBS(map, s, e);
+
+ pde = pmap_pde(map, s);
+ while (s < e) {
+ l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
+ if (l > e)
+ l = e;
+ if (*pde & INTEL_PTE_VALID) {
+ spte = (pt_entry_t *)ptetokv(*pde);
+ spte = &spte[ptenum(s)];
+ epte = &spte[intel_btop(l-s)];
+ pmap_remove_range(map, s, spte, epte);
+ }
+ s = l;
+ pde++;
+ }
+
+ PMAP_READ_UNLOCK(map, spl);
+}
+
+/*
+ * Routine: pmap_page_protect
+ *
+ * Function:
+ * Lower the permission for all mappings to a given
+ * page.
+ */
+void pmap_page_protect(phys, prot)
+ vm_offset_t phys;
+ vm_prot_t prot;
+{
+ pv_entry_t pv_h, prev;
+ register pv_entry_t pv_e;
+ register pt_entry_t *pte;
+ int pai;
+ register pmap_t pmap;
+ int spl;
+ boolean_t remove;
+
+ assert(phys != vm_page_fictitious_addr);
+ if (!valid_page(phys)) {
+ /*
+ * Not a managed page.
+ */
+ return;
+ }
+
+ /*
+ * Determine the new protection.
+ */
+ switch (prot) {
+ case VM_PROT_READ:
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ remove = FALSE;
+ break;
+ case VM_PROT_ALL:
+ return; /* nothing to do */
+ default:
+ remove = TRUE;
+ break;
+ }
+
+ /*
+ * Lock the pmap system first, since we will be changing
+ * several pmaps.
+ */
+
+ PMAP_WRITE_LOCK(spl);
+
+ pai = pa_index(phys);
+ pv_h = pai_to_pvh(pai);
+
+ /*
+ * Walk down PV list, changing or removing all mappings.
+ * We do not have to lock the pv_list because we have
+ * the entire pmap system locked.
+ */
+ if (pv_h->pmap != PMAP_NULL) {
+
+ prev = pv_e = pv_h;
+ do {
+ pmap = pv_e->pmap;
+ /*
+ * Lock the pmap to block pmap_extract and similar routines.
+ */
+ simple_lock(&pmap->lock);
+
+ {
+ register vm_offset_t va;
+
+ va = pv_e->va;
+ pte = pmap_pte(pmap, va);
+
+ /*
+ * Consistency checks.
+ */
+ /* assert(*pte & INTEL_PTE_VALID); XXX */
+ /* assert(pte_to_phys(*pte) == phys); */
+
+ /*
+ * Invalidate TLBs for all CPUs using this mapping.
+ */
+ PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
+ }
+
+ /*
+ * Remove the mapping if new protection is NONE
+ * or if write-protecting a kernel mapping.
+ */
+ if (remove || pmap == kernel_pmap) {
+ /*
+ * Remove the mapping, collecting any modify bits.
+ */
+ if (*pte & INTEL_PTE_WIRED)
+ panic("pmap_remove_all removing a wired page");
+
+ {
+ register int i = ptes_per_vm_page;
+
+ do {
+ pmap_phys_attributes[pai] |=
+ *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
+ *pte++ = 0;
+ } while (--i > 0);
+ }
+
+ pmap->stats.resident_count--;
+
+ /*
+ * Remove the pv_entry.
+ */
+ if (pv_e == pv_h) {
+ /*
+ * Fix up head later.
+ */
+ pv_h->pmap = PMAP_NULL;
+ }
+ else {
+ /*
+ * Delete this entry.
+ */
+ prev->next = pv_e->next;
+ PV_FREE(pv_e);
+ }
+ }
+ else {
+ /*
+ * Write-protect.
+ */
+ register int i = ptes_per_vm_page;
+
+ do {
+ *pte &= ~INTEL_PTE_WRITE;
+ pte++;
+ } while (--i > 0);
+
+ /*
+ * Advance prev.
+ */
+ prev = pv_e;
+ }
+
+ simple_unlock(&pmap->lock);
+
+ } while ((pv_e = prev->next) != PV_ENTRY_NULL);
+
+ /*
+ * If pv_head mapping was removed, fix it up.
+ */
+ if (pv_h->pmap == PMAP_NULL) {
+ pv_e = pv_h->next;
+ if (pv_e != PV_ENTRY_NULL) {
+ *pv_h = *pv_e;
+ PV_FREE(pv_e);
+ }
+ }
+ }
+
+ PMAP_WRITE_UNLOCK(spl);
+}
+
+/*
+ * Set the physical protection on the
+ * specified range of this map as requested.
+ * Will not increase permissions.
+ */
+void pmap_protect(map, s, e, prot)
+ pmap_t map;
+ vm_offset_t s, e;
+ vm_prot_t prot;
+{
+ register pt_entry_t *pde;
+ register pt_entry_t *spte, *epte;
+ vm_offset_t l;
+ int spl;
+
+ if (map == PMAP_NULL)
+ return;
+
+ /*
+ * Determine the new protection.
+ */
+ switch (prot) {
+ case VM_PROT_READ:
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ break;
+ case VM_PROT_READ|VM_PROT_WRITE:
+ case VM_PROT_ALL:
+ return; /* nothing to do */
+ default:
+ pmap_remove(map, s, e);
+ return;
+ }
+
+ /*
+ * If write-protecting in the kernel pmap,
+ * remove the mappings; the i386 ignores
+ * the write-permission bit in kernel mode.
+ *
+ * XXX should be #if'd for i386
+ */
+ if (map == kernel_pmap) {
+ pmap_remove(map, s, e);
+ return;
+ }
+
+ SPLVM(spl);
+ simple_lock(&map->lock);
+
+ /*
+ * Invalidate the translation buffer first
+ */
+ PMAP_UPDATE_TLBS(map, s, e);
+
+ pde = pmap_pde(map, s);
+ while (s < e) {
+ l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
+ if (l > e)
+ l = e;
+ if (*pde & INTEL_PTE_VALID) {
+ spte = (pt_entry_t *)ptetokv(*pde);
+ spte = &spte[ptenum(s)];
+ epte = &spte[intel_btop(l-s)];
+
+ while (spte < epte) {
+ if (*spte & INTEL_PTE_VALID)
+ *spte &= ~INTEL_PTE_WRITE;
+ spte++;
+ }
+ }
+ s = l;
+ pde++;
+ }
+
+ simple_unlock(&map->lock);
+ SPLX(spl);
+}
+
+/*
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
+ *
+ * If specified, the page will be wired down, meaning
+ * that the related pte can not be reclaimed.
+ *
+ * NB: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
+ */
+void pmap_enter(pmap, v, pa, prot, wired)
+ register pmap_t pmap;
+ vm_offset_t v;
+ register vm_offset_t pa;
+ vm_prot_t prot;
+ boolean_t wired;
+{
+ register pt_entry_t *pte;
+ register pv_entry_t pv_h;
+ register int i, pai;
+ pv_entry_t pv_e;
+ pt_entry_t template;
+ int spl;
+ vm_offset_t old_pa;
+
+ assert(pa != vm_page_fictitious_addr);
+if (pmap_debug) printf("pmap(%x, %x)\n", v, pa);
+ if (pmap == PMAP_NULL)
+ return;
+
+ if (pmap == kernel_pmap && (prot & VM_PROT_WRITE) == 0
+ && !wired /* hack for io_wire */ ) {
+ /*
+ * Because the 386 ignores write protection in kernel mode,
+ * we cannot enter a read-only kernel mapping, and must
+ * remove an existing mapping if changing it.
+ *
+ * XXX should be #if'd for i386
+ */
+ PMAP_READ_LOCK(pmap, spl);
+
+ pte = pmap_pte(pmap, v);
+ if (pte != PT_ENTRY_NULL && *pte != 0) {
+ /*
+ * Invalidate the translation buffer,
+ * then remove the mapping.
+ */
+ PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE);
+ pmap_remove_range(pmap, v, pte,
+ pte + ptes_per_vm_page);
+ }
+ PMAP_READ_UNLOCK(pmap, spl);
+ return;
+ }
+
+ /*
+ * Must allocate a new pvlist entry while we're unlocked;
+ * zalloc may cause pageout (which will lock the pmap system).
+ * If we determine we need a pvlist entry, we will unlock
+ * and allocate one. Then we will retry, throughing away
+ * the allocated entry later (if we no longer need it).
+ */
+ pv_e = PV_ENTRY_NULL;
+Retry:
+ PMAP_READ_LOCK(pmap, spl);
+
+ /*
+ * Expand pmap to include this pte. Assume that
+ * pmap is always expanded to include enough hardware
+ * pages to map one VM page.
+ */
+
+ while ((pte = pmap_pte(pmap, v)) == PT_ENTRY_NULL) {
+ /*
+ * Need to allocate a new page-table page.
+ */
+ vm_offset_t ptp;
+ pt_entry_t *pdp;
+ int i;
+
+ if (pmap == kernel_pmap) {
+ /*
+ * Would have to enter the new page-table page in
+ * EVERY pmap.
+ */
+ panic("pmap_expand kernel pmap to %#x", v);
+ }
+
+ /*
+ * Unlock the pmap and allocate a new page-table page.
+ */
+ PMAP_READ_UNLOCK(pmap, spl);
+
+ ptp = pmap_page_table_page_alloc();
+
+ /*
+ * Re-lock the pmap and check that another thread has
+ * not already allocated the page-table page. If it
+ * has, discard the new page-table page (and try
+ * again to make sure).
+ */
+ PMAP_READ_LOCK(pmap, spl);
+
+ if (pmap_pte(pmap, v) != PT_ENTRY_NULL) {
+ /*
+ * Oops...
+ */
+ PMAP_READ_UNLOCK(pmap, spl);
+ pmap_page_table_page_dealloc(ptp);
+ PMAP_READ_LOCK(pmap, spl);
+ continue;
+ }
+
+ /*
+ * Enter the new page table page in the page directory.
+ */
+ i = ptes_per_vm_page;
+ /*XX pdp = &pmap->dirbase[pdenum(v) & ~(i-1)];*/
+ pdp = pmap_pde(pmap, v);
+ do {
+ *pdp = pa_to_pte(ptp) | INTEL_PTE_VALID
+ | INTEL_PTE_USER
+ | INTEL_PTE_WRITE;
+ pdp++;
+ ptp += INTEL_PGBYTES;
+ } while (--i > 0);
+#if i860
+ /*
+ * Flush the data cache.
+ */
+ flush();
+#endif /* i860 */
+
+ /*
+ * Now, get the address of the page-table entry.
+ */
+ continue;
+ }
+
+ /*
+ * Special case if the physical page is already mapped
+ * at this address.
+ */
+ old_pa = pte_to_pa(*pte);
+ if (*pte && old_pa == pa) {
+ /*
+ * May be changing its wired attribute or protection
+ */
+
+ if (wired && !(*pte & INTEL_PTE_WIRED))
+ pmap->stats.wired_count++;
+ else if (!wired && (*pte & INTEL_PTE_WIRED))
+ pmap->stats.wired_count--;
+
+ template = pa_to_pte(pa) | INTEL_PTE_VALID;
+ if (pmap != kernel_pmap)
+ template |= INTEL_PTE_USER;
+ if (prot & VM_PROT_WRITE)
+ template |= INTEL_PTE_WRITE;
+ if (wired)
+ template |= INTEL_PTE_WIRED;
+ PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE);
+ i = ptes_per_vm_page;
+ do {
+ if (*pte & INTEL_PTE_MOD)
+ template |= INTEL_PTE_MOD;
+ WRITE_PTE(pte, template)
+ pte++;
+ pte_increment_pa(template);
+ } while (--i > 0);
+ }
+ else {
+
+ /*
+ * Remove old mapping from the PV list if necessary.
+ */
+ if (*pte) {
+ /*
+ * Invalidate the translation buffer,
+ * then remove the mapping.
+ */
+ PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE);
+
+ /*
+ * Don't free the pte page if removing last
+ * mapping - we will immediately replace it.
+ */
+ pmap_remove_range(pmap, v, pte,
+ pte + ptes_per_vm_page);
+ }
+
+ if (valid_page(pa)) {
+
+ /*
+ * Enter the mapping in the PV list for this
+ * physical page.
+ */
+
+ pai = pa_index(pa);
+ LOCK_PVH(pai);
+ pv_h = pai_to_pvh(pai);
+
+ if (pv_h->pmap == PMAP_NULL) {
+ /*
+ * No mappings yet
+ */
+ pv_h->va = v;
+ pv_h->pmap = pmap;
+ pv_h->next = PV_ENTRY_NULL;
+ }
+ else {
+#if DEBUG
+ {
+ /* check that this mapping is not already there */
+ pv_entry_t e = pv_h;
+ while (e != PV_ENTRY_NULL) {
+ if (e->pmap == pmap && e->va == v)
+ panic("pmap_enter: already in pv_list");
+ e = e->next;
+ }
+ }
+#endif DEBUG
+
+ /*
+ * Add new pv_entry after header.
+ */
+ if (pv_e == PV_ENTRY_NULL) {
+ PV_ALLOC(pv_e);
+ if (pv_e == PV_ENTRY_NULL) {
+ UNLOCK_PVH(pai);
+ PMAP_READ_UNLOCK(pmap, spl);
+
+ /*
+ * Refill from zone.
+ */
+ pv_e = (pv_entry_t) zalloc(pv_list_zone);
+ goto Retry;
+ }
+ }
+ pv_e->va = v;
+ pv_e->pmap = pmap;
+ pv_e->next = pv_h->next;
+ pv_h->next = pv_e;
+ /*
+ * Remember that we used the pvlist entry.
+ */
+ pv_e = PV_ENTRY_NULL;
+ }
+ UNLOCK_PVH(pai);
+ }
+
+ /*
+ * And count the mapping.
+ */
+
+ pmap->stats.resident_count++;
+ if (wired)
+ pmap->stats.wired_count++;
+
+ /*
+ * Build a template to speed up entering -
+ * only the pfn changes.
+ */
+ template = pa_to_pte(pa) | INTEL_PTE_VALID;
+ if (pmap != kernel_pmap)
+ template |= INTEL_PTE_USER;
+ if (prot & VM_PROT_WRITE)
+ template |= INTEL_PTE_WRITE;
+ if (wired)
+ template |= INTEL_PTE_WIRED;
+ i = ptes_per_vm_page;
+ do {
+ WRITE_PTE(pte, template)
+ pte++;
+ pte_increment_pa(template);
+ } while (--i > 0);
+ }
+
+ if (pv_e != PV_ENTRY_NULL) {
+ PV_FREE(pv_e);
+ }
+
+ PMAP_READ_UNLOCK(pmap, spl);
+}
+
+/*
+ * Routine: pmap_change_wiring
+ * Function: Change the wiring attribute for a map/virtual-address
+ * pair.
+ * In/out conditions:
+ * The mapping must already exist in the pmap.
+ */
+void pmap_change_wiring(map, v, wired)
+ register pmap_t map;
+ vm_offset_t v;
+ boolean_t wired;
+{
+ register pt_entry_t *pte;
+ register int i;
+ int spl;
+
+ /*
+ * We must grab the pmap system lock because we may
+ * change a pte_page queue.
+ */
+ PMAP_READ_LOCK(map, spl);
+
+ if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL)
+ panic("pmap_change_wiring: pte missing");
+
+ if (wired && !(*pte & INTEL_PTE_WIRED)) {
+ /*
+ * wiring down mapping
+ */
+ map->stats.wired_count++;
+ i = ptes_per_vm_page;
+ do {
+ *pte++ |= INTEL_PTE_WIRED;
+ } while (--i > 0);
+ }
+ else if (!wired && (*pte & INTEL_PTE_WIRED)) {
+ /*
+ * unwiring mapping
+ */
+ map->stats.wired_count--;
+ i = ptes_per_vm_page;
+ do {
+ *pte &= ~INTEL_PTE_WIRED;
+ } while (--i > 0);
+ }
+
+ PMAP_READ_UNLOCK(map, spl);
+}
+
+/*
+ * Routine: pmap_extract
+ * Function:
+ * Extract the physical page address associated
+ * with the given map/virtual_address pair.
+ */
+
+vm_offset_t pmap_extract(pmap, va)
+ register pmap_t pmap;
+ vm_offset_t va;
+{
+ register pt_entry_t *pte;
+ register vm_offset_t pa;
+ int spl;
+
+ SPLVM(spl);
+ simple_lock(&pmap->lock);
+ if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
+ pa = (vm_offset_t) 0;
+ else if (!(*pte & INTEL_PTE_VALID))
+ pa = (vm_offset_t) 0;
+ else
+ pa = pte_to_pa(*pte) + (va & INTEL_OFFMASK);
+ simple_unlock(&pmap->lock);
+ SPLX(spl);
+ return(pa);
+}
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+#if 0
+void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
+ pmap_t dst_pmap;
+ pmap_t src_pmap;
+ vm_offset_t dst_addr;
+ vm_size_t len;
+ vm_offset_t src_addr;
+{
+#ifdef lint
+ dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++;
+#endif lint
+}
+#endif 0
+
+/*
+ * Routine: pmap_collect
+ * Function:
+ * Garbage collects the physical map system for
+ * pages which are no longer used.
+ * Success need not be guaranteed -- that is, there
+ * may well be pages which are not referenced, but
+ * others may be collected.
+ * Usage:
+ * Called by the pageout daemon when pages are scarce.
+ */
+void pmap_collect(p)
+ pmap_t p;
+{
+ register pt_entry_t *pdp, *ptp;
+ pt_entry_t *eptp;
+ vm_offset_t pa;
+ int spl, wired;
+
+ if (p == PMAP_NULL)
+ return;
+
+ if (p == kernel_pmap)
+ return;
+
+ /*
+ * Garbage collect map.
+ */
+ PMAP_READ_LOCK(p, spl);
+ PMAP_UPDATE_TLBS(p, VM_MIN_ADDRESS, VM_MAX_ADDRESS);
+
+ for (pdp = p->dirbase;
+ pdp < &p->dirbase[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS)];
+ pdp += ptes_per_vm_page)
+ {
+ if (*pdp & INTEL_PTE_VALID) {
+
+ pa = pte_to_pa(*pdp);
+ ptp = (pt_entry_t *)phystokv(pa);
+ eptp = ptp + NPTES*ptes_per_vm_page;
+
+ /*
+ * If the pte page has any wired mappings, we cannot
+ * free it.
+ */
+ wired = 0;
+ {
+ register pt_entry_t *ptep;
+ for (ptep = ptp; ptep < eptp; ptep++) {
+ if (*ptep & INTEL_PTE_WIRED) {
+ wired = 1;
+ break;
+ }
+ }
+ }
+ if (!wired) {
+ /*
+ * Remove the virtual addresses mapped by this pte page.
+ */
+ { /*XXX big hack*/
+ vm_offset_t va = pdenum2lin(pdp - p->dirbase);
+ if (p == kernel_pmap)
+ va = lintokv(va);
+ pmap_remove_range(p,
+ va,
+ ptp,
+ eptp);
+ }
+
+ /*
+ * Invalidate the page directory pointer.
+ */
+ {
+ register int i = ptes_per_vm_page;
+ register pt_entry_t *pdep = pdp;
+ do {
+ *pdep++ = 0;
+ } while (--i > 0);
+ }
+
+ PMAP_READ_UNLOCK(p, spl);
+
+ /*
+ * And free the pte page itself.
+ */
+ {
+ register vm_page_t m;
+
+ vm_object_lock(pmap_object);
+ m = vm_page_lookup(pmap_object, pa);
+ if (m == VM_PAGE_NULL)
+ panic("pmap_collect: pte page not in object");
+ vm_page_lock_queues();
+ vm_page_free(m);
+ inuse_ptepages_count--;
+ vm_page_unlock_queues();
+ vm_object_unlock(pmap_object);
+ }
+
+ PMAP_READ_LOCK(p, spl);
+ }
+ }
+ }
+ PMAP_READ_UNLOCK(p, spl);
+ return;
+
+}
+
+/*
+ * Routine: pmap_activate
+ * Function:
+ * Binds the given physical map to the given
+ * processor, and returns a hardware map description.
+ */
+#if 0
+void pmap_activate(my_pmap, th, my_cpu)
+ register pmap_t my_pmap;
+ thread_t th;
+ int my_cpu;
+{
+ PMAP_ACTIVATE(my_pmap, th, my_cpu);
+}
+#endif 0
+
+/*
+ * Routine: pmap_deactivate
+ * Function:
+ * Indicates that the given physical map is no longer
+ * in use on the specified processor. (This is a macro
+ * in pmap.h)
+ */
+#if 0
+void pmap_deactivate(pmap, th, which_cpu)
+ pmap_t pmap;
+ thread_t th;
+ int which_cpu;
+{
+#ifdef lint
+ pmap++; th++; which_cpu++;
+#endif lint
+ PMAP_DEACTIVATE(pmap, th, which_cpu);
+}
+#endif 0
+
+/*
+ * Routine: pmap_kernel
+ * Function:
+ * Returns the physical map handle for the kernel.
+ */
+#if 0
+pmap_t pmap_kernel()
+{
+ return (kernel_pmap);
+}
+#endif 0
+
+/*
+ * pmap_zero_page zeros the specified (machine independent) page.
+ * See machine/phys.c or machine/phys.s for implementation.
+ */
+#if 0
+pmap_zero_page(phys)
+ register vm_offset_t phys;
+{
+ register int i;
+
+ assert(phys != vm_page_fictitious_addr);
+ i = PAGE_SIZE / INTEL_PGBYTES;
+ phys = intel_pfn(phys);
+
+ while (i--)
+ zero_phys(phys++);
+}
+#endif 0
+
+/*
+ * pmap_copy_page copies the specified (machine independent) page.
+ * See machine/phys.c or machine/phys.s for implementation.
+ */
+#if 0
+pmap_copy_page(src, dst)
+ vm_offset_t src, dst;
+{
+ int i;
+
+ assert(src != vm_page_fictitious_addr);
+ assert(dst != vm_page_fictitious_addr);
+ i = PAGE_SIZE / INTEL_PGBYTES;
+
+ while (i--) {
+ copy_phys(intel_pfn(src), intel_pfn(dst));
+ src += INTEL_PGBYTES;
+ dst += INTEL_PGBYTES;
+ }
+}
+#endif 0
+
+/*
+ * Routine: pmap_pageable
+ * Function:
+ * Make the specified pages (by pmap, offset)
+ * pageable (or not) as requested.
+ *
+ * A page which is not pageable may not take
+ * a fault; therefore, its page table entry
+ * must remain valid for the duration.
+ *
+ * This routine is merely advisory; pmap_enter
+ * will specify that these pages are to be wired
+ * down (or not) as appropriate.
+ */
+pmap_pageable(pmap, start, end, pageable)
+ pmap_t pmap;
+ vm_offset_t start;
+ vm_offset_t end;
+ boolean_t pageable;
+{
+#ifdef lint
+ pmap++; start++; end++; pageable++;
+#endif lint
+}
+
+/*
+ * Clear specified attribute bits.
+ */
+void
+phys_attribute_clear(phys, bits)
+ vm_offset_t phys;
+ int bits;
+{
+ pv_entry_t pv_h;
+ register pv_entry_t pv_e;
+ register pt_entry_t *pte;
+ int pai;
+ register pmap_t pmap;
+ int spl;
+
+ assert(phys != vm_page_fictitious_addr);
+ if (!valid_page(phys)) {
+ /*
+ * Not a managed page.
+ */
+ return;
+ }
+
+ /*
+ * Lock the pmap system first, since we will be changing
+ * several pmaps.
+ */
+
+ PMAP_WRITE_LOCK(spl);
+
+ pai = pa_index(phys);
+ pv_h = pai_to_pvh(pai);
+
+ /*
+ * Walk down PV list, clearing all modify or reference bits.
+ * We do not have to lock the pv_list because we have
+ * the entire pmap system locked.
+ */
+ if (pv_h->pmap != PMAP_NULL) {
+ /*
+ * There are some mappings.
+ */
+ for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
+
+ pmap = pv_e->pmap;
+ /*
+ * Lock the pmap to block pmap_extract and similar routines.
+ */
+ simple_lock(&pmap->lock);
+
+ {
+ register vm_offset_t va;
+
+ va = pv_e->va;
+ pte = pmap_pte(pmap, va);
+
+#if 0
+ /*
+ * Consistency checks.
+ */
+ assert(*pte & INTEL_PTE_VALID);
+ /* assert(pte_to_phys(*pte) == phys); */
+#endif
+
+ /*
+ * Invalidate TLBs for all CPUs using this mapping.
+ */
+ PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
+ }
+
+ /*
+ * Clear modify or reference bits.
+ */
+ {
+ register int i = ptes_per_vm_page;
+ do {
+ *pte &= ~bits;
+ } while (--i > 0);
+ }
+ simple_unlock(&pmap->lock);
+ }
+ }
+
+ pmap_phys_attributes[pai] &= ~bits;
+
+ PMAP_WRITE_UNLOCK(spl);
+}
+
+/*
+ * Check specified attribute bits.
+ */
+boolean_t
+phys_attribute_test(phys, bits)
+ vm_offset_t phys;
+ int bits;
+{
+ pv_entry_t pv_h;
+ register pv_entry_t pv_e;
+ register pt_entry_t *pte;
+ int pai;
+ register pmap_t pmap;
+ int spl;
+
+ assert(phys != vm_page_fictitious_addr);
+ if (!valid_page(phys)) {
+ /*
+ * Not a managed page.
+ */
+ return (FALSE);
+ }
+
+ /*
+ * Lock the pmap system first, since we will be checking
+ * several pmaps.
+ */
+
+ PMAP_WRITE_LOCK(spl);
+
+ pai = pa_index(phys);
+ pv_h = pai_to_pvh(pai);
+
+ if (pmap_phys_attributes[pai] & bits) {
+ PMAP_WRITE_UNLOCK(spl);
+ return (TRUE);
+ }
+
+ /*
+ * Walk down PV list, checking all mappings.
+ * We do not have to lock the pv_list because we have
+ * the entire pmap system locked.
+ */
+ if (pv_h->pmap != PMAP_NULL) {
+ /*
+ * There are some mappings.
+ */
+ for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
+
+ pmap = pv_e->pmap;
+ /*
+ * Lock the pmap to block pmap_extract and similar routines.
+ */
+ simple_lock(&pmap->lock);
+
+ {
+ register vm_offset_t va;
+
+ va = pv_e->va;
+ pte = pmap_pte(pmap, va);
+
+#if 0
+ /*
+ * Consistency checks.
+ */
+ assert(*pte & INTEL_PTE_VALID);
+ /* assert(pte_to_phys(*pte) == phys); */
+#endif
+ }
+
+ /*
+ * Check modify or reference bits.
+ */
+ {
+ register int i = ptes_per_vm_page;
+
+ do {
+ if (*pte & bits) {
+ simple_unlock(&pmap->lock);
+ PMAP_WRITE_UNLOCK(spl);
+ return (TRUE);
+ }
+ } while (--i > 0);
+ }
+ simple_unlock(&pmap->lock);
+ }
+ }
+ PMAP_WRITE_UNLOCK(spl);
+ return (FALSE);
+}
+
+/*
+ * Clear the modify bits on the specified physical page.
+ */
+
+void pmap_clear_modify(phys)
+ register vm_offset_t phys;
+{
+ phys_attribute_clear(phys, PHYS_MODIFIED);
+}
+
+/*
+ * pmap_is_modified:
+ *
+ * Return whether or not the specified physical page is modified
+ * by any physical maps.
+ */
+
+boolean_t pmap_is_modified(phys)
+ register vm_offset_t phys;
+{
+ return (phys_attribute_test(phys, PHYS_MODIFIED));
+}
+
+/*
+ * pmap_clear_reference:
+ *
+ * Clear the reference bit on the specified physical page.
+ */
+
+void pmap_clear_reference(phys)
+ vm_offset_t phys;
+{
+ phys_attribute_clear(phys, PHYS_REFERENCED);
+}
+
+/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page is referenced
+ * by any physical maps.
+ */
+
+boolean_t pmap_is_referenced(phys)
+ vm_offset_t phys;
+{
+ return (phys_attribute_test(phys, PHYS_REFERENCED));
+}
+
+#if NCPUS > 1
+/*
+* TLB Coherence Code (TLB "shootdown" code)
+*
+* Threads that belong to the same task share the same address space and
+* hence share a pmap. However, they may run on distinct cpus and thus
+* have distinct TLBs that cache page table entries. In order to guarantee
+* the TLBs are consistent, whenever a pmap is changed, all threads that
+* are active in that pmap must have their TLB updated. To keep track of
+* this information, the set of cpus that are currently using a pmap is
+* maintained within each pmap structure (cpus_using). Pmap_activate() and
+* pmap_deactivate add and remove, respectively, a cpu from this set.
+* Since the TLBs are not addressable over the bus, each processor must
+* flush its own TLB; a processor that needs to invalidate another TLB
+* needs to interrupt the processor that owns that TLB to signal the
+* update.
+*
+* Whenever a pmap is updated, the lock on that pmap is locked, and all
+* cpus using the pmap are signaled to invalidate. All threads that need
+* to activate a pmap must wait for the lock to clear to await any updates
+* in progress before using the pmap. They must ACQUIRE the lock to add
+* their cpu to the cpus_using set. An implicit assumption made
+* throughout the TLB code is that all kernel code that runs at or higher
+* than splvm blocks out update interrupts, and that such code does not
+* touch pageable pages.
+*
+* A shootdown interrupt serves another function besides signaling a
+* processor to invalidate. The interrupt routine (pmap_update_interrupt)
+* waits for the both the pmap lock (and the kernel pmap lock) to clear,
+* preventing user code from making implicit pmap updates while the
+* sending processor is performing its update. (This could happen via a
+* user data write reference that turns on the modify bit in the page
+* table). It must wait for any kernel updates that may have started
+* concurrently with a user pmap update because the IPC code
+* changes mappings.
+* Spinning on the VALUES of the locks is sufficient (rather than
+* having to acquire the locks) because any updates that occur subsequent
+* to finding the lock unlocked will be signaled via another interrupt.
+* (This assumes the interrupt is cleared before the low level interrupt code
+* calls pmap_update_interrupt()).
+*
+* The signaling processor must wait for any implicit updates in progress
+* to terminate before continuing with its update. Thus it must wait for an
+* acknowledgement of the interrupt from each processor for which such
+* references could be made. For maintaining this information, a set
+* cpus_active is used. A cpu is in this set if and only if it can
+* use a pmap. When pmap_update_interrupt() is entered, a cpu is removed from
+* this set; when all such cpus are removed, it is safe to update.
+*
+* Before attempting to acquire the update lock on a pmap, a cpu (A) must
+* be at least at the priority of the interprocessor interrupt
+* (splip<=splvm). Otherwise, A could grab a lock and be interrupted by a
+* kernel update; it would spin forever in pmap_update_interrupt() trying
+* to acquire the user pmap lock it had already acquired. Furthermore A
+* must remove itself from cpus_active. Otherwise, another cpu holding
+* the lock (B) could be in the process of sending an update signal to A,
+* and thus be waiting for A to remove itself from cpus_active. If A is
+* spinning on the lock at priority this will never happen and a deadlock
+* will result.
+*/
+
+/*
+ * Signal another CPU that it must flush its TLB
+ */
+void signal_cpus(use_list, pmap, start, end)
+ cpu_set use_list;
+ pmap_t pmap;
+ vm_offset_t start, end;
+{
+ register int which_cpu, j;
+ register pmap_update_list_t update_list_p;
+
+ while ((which_cpu = ffs(use_list)) != 0) {
+ which_cpu -= 1; /* convert to 0 origin */
+
+ update_list_p = &cpu_update_list[which_cpu];
+ simple_lock(&update_list_p->lock);
+
+ j = update_list_p->count;
+ if (j >= UPDATE_LIST_SIZE) {
+ /*
+ * list overflowed. Change last item to
+ * indicate overflow.
+ */
+ update_list_p->item[UPDATE_LIST_SIZE-1].pmap = kernel_pmap;
+ update_list_p->item[UPDATE_LIST_SIZE-1].start = VM_MIN_ADDRESS;
+ update_list_p->item[UPDATE_LIST_SIZE-1].end = VM_MAX_KERNEL_ADDRESS;
+ }
+ else {
+ update_list_p->item[j].pmap = pmap;
+ update_list_p->item[j].start = start;
+ update_list_p->item[j].end = end;
+ update_list_p->count = j+1;
+ }
+ cpu_update_needed[which_cpu] = TRUE;
+ simple_unlock(&update_list_p->lock);
+
+ if ((cpus_idle & (1 << which_cpu)) == 0)
+ interrupt_processor(which_cpu);
+ use_list &= ~(1 << which_cpu);
+ }
+}
+
+void process_pmap_updates(my_pmap)
+ register pmap_t my_pmap;
+{
+ register int my_cpu = cpu_number();
+ register pmap_update_list_t update_list_p;
+ register int j;
+ register pmap_t pmap;
+
+ update_list_p = &cpu_update_list[my_cpu];
+ simple_lock(&update_list_p->lock);
+
+ for (j = 0; j < update_list_p->count; j++) {
+ pmap = update_list_p->item[j].pmap;
+ if (pmap == my_pmap ||
+ pmap == kernel_pmap) {
+
+ INVALIDATE_TLB(update_list_p->item[j].start,
+ update_list_p->item[j].end);
+ }
+ }
+ update_list_p->count = 0;
+ cpu_update_needed[my_cpu] = FALSE;
+ simple_unlock(&update_list_p->lock);
+}
+
+/*
+ * Interrupt routine for TBIA requested from other processor.
+ */
+void pmap_update_interrupt()
+{
+ register int my_cpu;
+ register pmap_t my_pmap;
+ int s;
+
+ my_cpu = cpu_number();
+
+ /*
+ * Exit now if we're idle. We'll pick up the update request
+ * when we go active, and we must not put ourselves back in
+ * the active set because we'll never process the interrupt
+ * while we're idle (thus hanging the system).
+ */
+ if (cpus_idle & (1 << my_cpu))
+ return;
+
+ if (current_thread() == THREAD_NULL)
+ my_pmap = kernel_pmap;
+ else {
+ my_pmap = current_pmap();
+ if (!pmap_in_use(my_pmap, my_cpu))
+ my_pmap = kernel_pmap;
+ }
+
+ /*
+ * Raise spl to splvm (above splip) to block out pmap_extract
+ * from IO code (which would put this cpu back in the active
+ * set).
+ */
+ s = splvm();
+
+ do {
+
+ /*
+ * Indicate that we're not using either user or kernel
+ * pmap.
+ */
+ i_bit_clear(my_cpu, &cpus_active);
+
+ /*
+ * Wait for any pmap updates in progress, on either user
+ * or kernel pmap.
+ */
+ while (*(volatile int *)&my_pmap->lock.lock_data ||
+ *(volatile int *)&kernel_pmap->lock.lock_data)
+ continue;
+
+ process_pmap_updates(my_pmap);
+
+ i_bit_set(my_cpu, &cpus_active);
+
+ } while (cpu_update_needed[my_cpu]);
+
+ splx(s);
+}
+#else NCPUS > 1
+/*
+ * Dummy routine to satisfy external reference.
+ */
+void pmap_update_interrupt()
+{
+ /* should never be called. */
+}
+#endif NCPUS > 1
+
+#if i860 /* akp */
+void set_dirbase(dirbase)
+ register vm_offset_t dirbase;
+{
+ /*flush();*/
+ /*flush_tlb();*/
+ flush_and_ctxsw(dirbase);
+}
+#endif i860
+
+#ifdef i386
+/* Unmap page 0 to trap NULL references. */
+void
+pmap_unmap_page_zero ()
+{
+ int *pte;
+
+ pte = (int *) pmap_pte (kernel_pmap, 0);
+ assert (pte);
+ *pte = 0;
+ asm volatile ("movl %%cr3,%%eax; movl %%eax,%%cr3" ::: "ax");
+}
+#endif /* i386 */
diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h
new file mode 100644
index 00000000..6e759a6d
--- /dev/null
+++ b/i386/intel/pmap.h
@@ -0,0 +1,401 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: pmap.h
+ *
+ * Authors: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Machine-dependent structures for the physical map module.
+ */
+
+#ifndef _PMAP_MACHINE_
+#define _PMAP_MACHINE_ 1
+
+#ifndef ASSEMBLER
+
+#include <kern/zalloc.h>
+#include <kern/lock.h>
+#include <mach/machine/vm_param.h>
+#include <mach/vm_statistics.h>
+#include <mach/kern_return.h>
+
+/*
+ * Define the generic in terms of the specific
+ */
+
+#if i386
+#define INTEL_PGBYTES I386_PGBYTES
+#define INTEL_PGSHIFT I386_PGSHIFT
+#define intel_btop(x) i386_btop(x)
+#define intel_ptob(x) i386_ptob(x)
+#define intel_round_page(x) i386_round_page(x)
+#define intel_trunc_page(x) i386_trunc_page(x)
+#define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
+#define round_intel_to_vm(x) round_i386_to_vm(x)
+#define vm_to_intel(x) vm_to_i386(x)
+#endif i386
+#if i860
+#define INTEL_PGBYTES I860_PGBYTES
+#define INTEL_PGSHIFT I860_PGSHIFT
+#define intel_btop(x) i860_btop(x)
+#define intel_ptob(x) i860_ptob(x)
+#define intel_round_page(x) i860_round_page(x)
+#define intel_trunc_page(x) i860_trunc_page(x)
+#define trunc_intel_to_vm(x) trunc_i860_to_vm(x)
+#define round_intel_to_vm(x) round_i860_to_vm(x)
+#define vm_to_intel(x) vm_to_i860(x)
+#endif i860
+
+/*
+ * i386/i486/i860 Page Table Entry
+ */
+
+typedef unsigned int pt_entry_t;
+#define PT_ENTRY_NULL ((pt_entry_t *) 0)
+
+#endif ASSEMBLER
+
+#define INTEL_OFFMASK 0xfff /* offset within page */
+#define PDESHIFT 22 /* page descriptor shift */
+#define PDEMASK 0x3ff /* mask for page descriptor index */
+#define PTESHIFT 12 /* page table shift */
+#define PTEMASK 0x3ff /* mask for page table index */
+
+/*
+ * Convert linear offset to page descriptor index
+ */
+#define lin2pdenum(a) (((a) >> PDESHIFT) & PDEMASK)
+
+/*
+ * Convert page descriptor index to linear address
+ */
+#define pdenum2lin(a) ((vm_offset_t)(a) << PDESHIFT)
+
+/*
+ * Convert linear offset to page table index
+ */
+#define ptenum(a) (((a) >> PTESHIFT) & PTEMASK)
+
+#define NPTES (intel_ptob(1)/sizeof(pt_entry_t))
+#define NPDES (intel_ptob(1)/sizeof(pt_entry_t))
+
+/*
+ * Hardware pte bit definitions (to be used directly on the ptes
+ * without using the bit fields).
+ */
+
+#if i860
+#define INTEL_PTE_valid 0x00000001
+#else
+#define INTEL_PTE_VALID 0x00000001
+#endif
+#define INTEL_PTE_WRITE 0x00000002
+#define INTEL_PTE_USER 0x00000004
+#define INTEL_PTE_WTHRU 0x00000008
+#define INTEL_PTE_NCACHE 0x00000010
+#define INTEL_PTE_REF 0x00000020
+#define INTEL_PTE_MOD 0x00000040
+#define INTEL_PTE_WIRED 0x00000200
+#define INTEL_PTE_PFN 0xfffff000
+
+#if i860
+#if NOCACHE
+#define INTEL_PTE_VALID (INTEL_PTE_valid \
+ |INTEL_PTE_WTHRU \
+ |INTEL_PTE_NCACHE \
+ |INTEL_PTE_REF \
+ |INTEL_PTE_MOD \
+ )
+#else NOCACHE
+#define INTEL_PTE_VALID (INTEL_PTE_valid \
+ |INTEL_PTE_REF \
+ |INTEL_PTE_MOD \
+ )
+#endif NOCACHE
+#endif i860
+
+#define pa_to_pte(a) ((a) & INTEL_PTE_PFN)
+#define pte_to_pa(p) ((p) & INTEL_PTE_PFN)
+#define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
+
+/*
+ * Convert page table entry to kernel virtual address
+ */
+#define ptetokv(a) (phystokv(pte_to_pa(a)))
+
+#ifndef ASSEMBLER
+typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */
+ /* changed by other processors */
+
+struct pmap {
+ pt_entry_t *dirbase; /* page directory pointer register */
+ int ref_count; /* reference count */
+ decl_simple_lock_data(,lock)
+ /* lock on map */
+ struct pmap_statistics stats; /* map statistics */
+ cpu_set cpus_using; /* bitmap of cpus using pmap */
+};
+
+typedef struct pmap *pmap_t;
+
+#define PMAP_NULL ((pmap_t) 0)
+
+#if i860
+/*#define set_dirbase(dirbase) flush_and_ctxsw(dirbase)*//*akp*/
+#else
+#define set_dirbase(dirbase) set_cr3(dirbase)
+#endif
+
+#if NCPUS > 1
+/*
+ * List of cpus that are actively using mapped memory. Any
+ * pmap update operation must wait for all cpus in this list.
+ * Update operations must still be queued to cpus not in this
+ * list.
+ */
+cpu_set cpus_active;
+
+/*
+ * List of cpus that are idle, but still operating, and will want
+ * to see any kernel pmap updates when they become active.
+ */
+cpu_set cpus_idle;
+
+/*
+ * Quick test for pmap update requests.
+ */
+volatile
+boolean_t cpu_update_needed[NCPUS];
+
+/*
+ * External declarations for PMAP_ACTIVATE.
+ */
+
+void process_pmap_updates();
+void pmap_update_interrupt();
+extern pmap_t kernel_pmap;
+
+#endif NCPUS > 1
+
+/*
+ * Machine dependent routines that are used only for i386/i486/i860.
+ */
+
+pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t addr);
+
+/*
+ * Macros for speed.
+ */
+
+#if NCPUS > 1
+
+/*
+ * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage
+ * fields to control TLB invalidation on other CPUS.
+ */
+
+#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
+ \
+ /* \
+ * Let pmap updates proceed while we wait for this pmap. \
+ */ \
+ i_bit_clear((my_cpu), &cpus_active); \
+ \
+ /* \
+ * Lock the pmap to put this cpu in its active set. \
+ * Wait for updates here. \
+ */ \
+ simple_lock(&kernel_pmap->lock); \
+ \
+ /* \
+ * Process invalidate requests for the kernel pmap. \
+ */ \
+ if (cpu_update_needed[(my_cpu)]) \
+ process_pmap_updates(kernel_pmap); \
+ \
+ /* \
+ * Mark that this cpu is using the pmap. \
+ */ \
+ i_bit_set((my_cpu), &kernel_pmap->cpus_using); \
+ \
+ /* \
+ * Mark this cpu active - IPL will be lowered by \
+ * load_context(). \
+ */ \
+ i_bit_set((my_cpu), &cpus_active); \
+ \
+ simple_unlock(&kernel_pmap->lock); \
+}
+
+#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
+ /* \
+ * Mark pmap no longer in use by this cpu even if \
+ * pmap is locked against updates. \
+ */ \
+ i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \
+}
+
+#define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
+ register pmap_t tpmap = (pmap); \
+ \
+ if (tpmap == kernel_pmap) { \
+ /* \
+ * If this is the kernel pmap, switch to its page tables. \
+ */ \
+ set_dirbase(kvtophys(tpmap->dirbase)); \
+ } \
+ else { \
+ /* \
+ * Let pmap updates proceed while we wait for this pmap. \
+ */ \
+ i_bit_clear((my_cpu), &cpus_active); \
+ \
+ /* \
+ * Lock the pmap to put this cpu in its active set. \
+ * Wait for updates here. \
+ */ \
+ simple_lock(&tpmap->lock); \
+ \
+ /* \
+ * No need to invalidate the TLB - the entire user pmap \
+ * will be invalidated by reloading dirbase. \
+ */ \
+ set_dirbase(kvtophys(tpmap->dirbase)); \
+ \
+ /* \
+ * Mark that this cpu is using the pmap. \
+ */ \
+ i_bit_set((my_cpu), &tpmap->cpus_using); \
+ \
+ /* \
+ * Mark this cpu active - IPL will be lowered by \
+ * load_context(). \
+ */ \
+ i_bit_set((my_cpu), &cpus_active); \
+ \
+ simple_unlock(&tpmap->lock); \
+ } \
+}
+
+#define PMAP_DEACTIVATE_USER(pmap, thread, my_cpu) { \
+ register pmap_t tpmap = (pmap); \
+ \
+ /* \
+ * Do nothing if this is the kernel pmap. \
+ */ \
+ if (tpmap != kernel_pmap) { \
+ /* \
+ * Mark pmap no longer in use by this cpu even if \
+ * pmap is locked against updates. \
+ */ \
+ i_bit_clear((my_cpu), &(pmap)->cpus_using); \
+ } \
+}
+
+#define MARK_CPU_IDLE(my_cpu) { \
+ /* \
+ * Mark this cpu idle, and remove it from the active set, \
+ * since it is not actively using any pmap. Signal_cpus \
+ * will notice that it is idle, and avoid signaling it, \
+ * but will queue the update request for when the cpu \
+ * becomes active. \
+ */ \
+ int s = splvm(); \
+ i_bit_set((my_cpu), &cpus_idle); \
+ i_bit_clear((my_cpu), &cpus_active); \
+ splx(s); \
+}
+
+#define MARK_CPU_ACTIVE(my_cpu) { \
+ \
+ int s = splvm(); \
+ /* \
+ * If a kernel_pmap update was requested while this cpu \
+ * was idle, process it as if we got the interrupt. \
+ * Before doing so, remove this cpu from the idle set. \
+ * Since we do not grab any pmap locks while we flush \
+ * our TLB, another cpu may start an update operation \
+ * before we finish. Removing this cpu from the idle \
+ * set assures that we will receive another update \
+ * interrupt if this happens. \
+ */ \
+ i_bit_clear((my_cpu), &cpus_idle); \
+ \
+ if (cpu_update_needed[(my_cpu)]) \
+ pmap_update_interrupt(); \
+ \
+ /* \
+ * Mark that this cpu is now active. \
+ */ \
+ i_bit_set((my_cpu), &cpus_active); \
+ splx(s); \
+}
+
+#else NCPUS > 1
+
+/*
+ * With only one CPU, we just have to indicate whether the pmap is
+ * in use.
+ */
+
+#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
+ kernel_pmap->cpus_using = TRUE; \
+}
+
+#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
+ kernel_pmap->cpus_using = FALSE; \
+}
+
+#define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
+ register pmap_t tpmap = (pmap); \
+ \
+ set_dirbase(kvtophys(tpmap->dirbase)); \
+ if (tpmap != kernel_pmap) { \
+ tpmap->cpus_using = TRUE; \
+ } \
+}
+
+#define PMAP_DEACTIVATE_USER(pmap, thread, cpu) { \
+ if ((pmap) != kernel_pmap) \
+ (pmap)->cpus_using = FALSE; \
+}
+
+#endif NCPUS > 1
+
+#define PMAP_CONTEXT(pmap, thread)
+
+#define pmap_kernel() (kernel_pmap)
+#define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
+#define pmap_phys_address(frame) ((vm_offset_t) (intel_ptob(frame)))
+#define pmap_phys_to_frame(phys) ((int) (intel_btop(phys)))
+#define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
+#define pmap_attribute(pmap,addr,size,attr,value) \
+ (KERN_INVALID_ADDRESS)
+
+#endif ASSEMBLER
+
+#endif _PMAP_MACHINE_
diff --git a/i386/intel/read_fault.c b/i386/intel/read_fault.c
new file mode 100644
index 00000000..d0c03e83
--- /dev/null
+++ b/i386/intel/read_fault.c
@@ -0,0 +1,178 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <vm/vm_fault.h>
+#include <mach/kern_return.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/pmap.h>
+
+#include <kern/macro_help.h>
+
+/*
+ * Expansion of vm_fault for read fault in kernel mode.
+ * Must enter the mapping as writable, since the i386
+ * (and i860 in i386 compatability mode) ignores write
+ * protection in kernel mode.
+ */
+kern_return_t
+intel_read_fault(map, vaddr)
+ vm_map_t map;
+ vm_offset_t vaddr;
+{
+ vm_map_version_t version; /* Map version for
+ verification */
+ vm_object_t object; /* Top-level object */
+ vm_offset_t offset; /* Top-level offset */
+ vm_prot_t prot; /* Protection for mapping */
+ vm_page_t result_page; /* Result of vm_fault_page */
+ vm_page_t top_page; /* Placeholder page */
+ boolean_t wired; /* Is map region wired? */
+ boolean_t su;
+ kern_return_t result;
+ register vm_page_t m;
+
+ RetryFault:
+
+ /*
+ * Find the backing store object and offset into it
+ * to begin search.
+ */
+ result = vm_map_lookup(&map, vaddr, VM_PROT_READ, &version,
+ &object, &offset, &prot, &wired, &su);
+ if (result != KERN_SUCCESS)
+ return (result);
+
+ /*
+ * Make a reference to this object to prevent its
+ * disposal while we are playing with it.
+ */
+ assert(object->ref_count > 0);
+ object->ref_count++;
+ vm_object_paging_begin(object);
+
+ result = vm_fault_page(object, offset, VM_PROT_READ, FALSE, TRUE,
+ &prot, &result_page, &top_page,
+ FALSE, (void (*)()) 0);
+
+ if (result != VM_FAULT_SUCCESS) {
+ vm_object_deallocate(object);
+
+ switch (result) {
+ case VM_FAULT_RETRY:
+ goto RetryFault;
+ case VM_FAULT_INTERRUPTED:
+ return (KERN_SUCCESS);
+ case VM_FAULT_MEMORY_SHORTAGE:
+ VM_PAGE_WAIT((void (*)()) 0);
+ goto RetryFault;
+ case VM_FAULT_FICTITIOUS_SHORTAGE:
+ vm_page_more_fictitious();
+ goto RetryFault;
+ case VM_FAULT_MEMORY_ERROR:
+ return (KERN_MEMORY_ERROR);
+ }
+ }
+
+ m = result_page;
+
+ /*
+ * How to clean up the result of vm_fault_page. This
+ * happens whether the mapping is entered or not.
+ */
+
+#define UNLOCK_AND_DEALLOCATE \
+ MACRO_BEGIN \
+ vm_fault_cleanup(m->object, top_page); \
+ vm_object_deallocate(object); \
+ MACRO_END
+
+ /*
+ * What to do with the resulting page from vm_fault_page
+ * if it doesn't get entered into the physical map:
+ */
+
+#define RELEASE_PAGE(m) \
+ MACRO_BEGIN \
+ PAGE_WAKEUP_DONE(m); \
+ vm_page_lock_queues(); \
+ if (!m->active && !m->inactive) \
+ vm_page_activate(m); \
+ vm_page_unlock_queues(); \
+ MACRO_END
+
+ /*
+ * We must verify that the maps have not changed.
+ */
+ vm_object_unlock(m->object);
+ while (!vm_map_verify(map, &version)) {
+ vm_object_t retry_object;
+ vm_offset_t retry_offset;
+ vm_prot_t retry_prot;
+
+ result = vm_map_lookup(&map, vaddr, VM_PROT_READ, &version,
+ &retry_object, &retry_offset, &retry_prot,
+ &wired, &su);
+ if (result != KERN_SUCCESS) {
+ vm_object_lock(m->object);
+ RELEASE_PAGE(m);
+ UNLOCK_AND_DEALLOCATE;
+ return (result);
+ }
+
+ vm_object_unlock(retry_object);
+
+ if (retry_object != object || retry_offset != offset) {
+ vm_object_lock(m->object);
+ RELEASE_PAGE(m);
+ UNLOCK_AND_DEALLOCATE;
+ goto RetryFault;
+ }
+ }
+
+ /*
+ * Put the page in the physical map.
+ */
+ PMAP_ENTER(map->pmap, vaddr, m, VM_PROT_READ|VM_PROT_WRITE, wired);
+
+ vm_object_lock(m->object);
+ vm_page_lock_queues();
+ if (!m->active && !m->inactive)
+ vm_page_activate(m);
+ m->reference = TRUE;
+ vm_page_unlock_queues();
+
+ vm_map_verify_done(map, &version);
+ PAGE_WAKEUP_DONE(m);
+
+ UNLOCK_AND_DEALLOCATE;
+
+#undef UNLOCK_AND_DEALLOCATE
+#undef RELEASE_PAGE
+
+ return (KERN_SUCCESS);
+}
diff --git a/i386/pc/Makerules b/i386/pc/Makerules
new file mode 100644
index 00000000..eaa1f24d
--- /dev/null
+++ b/i386/pc/Makerules
@@ -0,0 +1,30 @@
+#
+# Copyright (c) 1994 The University of Utah and
+# the Center for Software Science (CSS). All rights reserved.
+#
+# Permission to use, copy, modify and distribute this software and its
+# documentation is hereby granted, provided that both the copyright
+# notice and this permission notice appear in all copies of the
+# software, derivative works or modified versions, and any portions
+# thereof, and that both notices appear in supporting documentation.
+#
+# THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+# IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
+# ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+#
+# CSS requests users of this software to return to css-dist@cs.utah.edu any
+# improvements that they make and grant CSS redistribution rights.
+#
+# Author: Bryan Ford, University of Utah CSS
+#
+ifndef _mach4_i386_kernel_pc_makerules_
+_mach4_i386_kernel_pc_makerules = yes
+
+
+CLEAN_FILES += pc_asm.h
+i16_real_int.o: pc_asm.h
+
+include $(MSRCDIR)/kernel/util/Makerules
+
+
+endif
diff --git a/i386/pc/NOTES b/i386/pc/NOTES
new file mode 100644
index 00000000..d165aa41
--- /dev/null
+++ b/i386/pc/NOTES
@@ -0,0 +1,34 @@
+
+
+i16_raw.c:
+ Provides a default implementation
+ of real/pmode switching code.
+ Assumes that, as far as it's concerned,
+ low linear address always map to physical addresses.
+ (The low linear mappings can be changed,
+ but must be changed back before switching back to real mode.)
+
+ Provides:
+ i16_raw_switch_to_pmode()
+ i16_raw_switch_to_real_mode()
+
+ i16_raw_start()
+ Called in real mode.
+ Initializes the pmode switching system,
+ switches to pmode for the first time,
+ and calls the 32-bit function raw_start().
+
+ Depends on:
+
+ paging.h:
+ raw_paging_enable()
+ raw_paging_disable()
+ raw_paging_init()
+
+ a20.h:
+ i16_enable_a20()
+ i16_disable_a20()
+
+ real.h:
+ real_cs
+
diff --git a/i386/pc/debug.h b/i386/pc/debug.h
new file mode 100644
index 00000000..62650820
--- /dev/null
+++ b/i386/pc/debug.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _KUKM_I386_PC_DEBUG_H_
+#define _KUKM_I386_PC_DEBUG_H_
+
+#ifdef ASSEMBLER
+#ifdef DEBUG
+
+
+/* Poke a character directly onto the VGA text display,
+ as a very quick, mostly-reliable status indicator.
+ Assumes ss is a kernel data segment register. */
+#define POKE_STATUS(char,scratch) \
+ ss/*XXX gas bug */ ;\
+ movl %ss:_phys_mem_va,scratch ;\
+ addl $0xb8000+80*2*13+40*2,scratch ;\
+ movb char,%ss:(scratch) ;\
+ movb $0xf0,%ss:1(scratch)
+
+
+#else !DEBUG
+
+#define POKE_STATUS(char,scratch)
+
+#endif !DEBUG
+#else !ASSEMBLER
+#ifdef DEBUG
+
+#include <mach/machine/vm_types.h>
+
+
+#define POKE_STATUS(string) \
+ ({ unsigned char *s = (string); \
+ extern vm_offset_t phys_mem_va; \
+ short *d = (short*)(phys_mem_va+0xb8000+80*2*13+40*2); \
+ while (*s) { (*d++) = 0x3000 | (*s++); } \
+ *d = ' '; \
+ })
+
+
+#else !DEBUG
+
+#define POKE_STATUS(char)
+
+#endif !DEBUG
+#endif !ASSEMBLER
+
+
+#include_next "debug.h"
+
+#endif _KUKM_I386_PC_DEBUG_H_
diff --git a/i386/pc/exit.c b/i386/pc/exit.c
new file mode 100644
index 00000000..59c93838
--- /dev/null
+++ b/i386/pc/exit.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include "real.h"
+
+void _exit(int rc)
+{
+ about_to_die(rc);
+
+ (*real_exit)(rc);
+}
+
diff --git a/i386/pc/gdt.h b/i386/pc/gdt.h
new file mode 100644
index 00000000..8dae857f
--- /dev/null
+++ b/i386/pc/gdt.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include_next "gdt.h"
+
+/* If we have a REAL_TSS, use that as our DEFAULT_TSS if necessary.
+ (The DEFAULT_TSS gets loaded by cpu_tables_load() upon switching to pmode.) */
+#if defined(ENABLE_REAL_TSS) && !defined(DEFAULT_TSS)
+#define DEFAULT_TSS REAL_TSS
+#define DEFAULT_TSS_IDX REAL_TSS_IDX
+#endif
+
diff --git a/i386/pc/gdt_sels.h b/i386/pc/gdt_sels.h
new file mode 100644
index 00000000..18e09072
--- /dev/null
+++ b/i386/pc/gdt_sels.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include_next "gdt_sels.h"
+
+#ifdef ENABLE_REAL_TSS
+
+/* This is a special TSS with a full IO bitmap
+ that allows access to all I/O ports in v86 mode.
+ It's used for making calls to the real-mode BIOS (or DOS). */
+gdt_sel(REAL_TSS)
+
+#endif
+
diff --git a/i386/pc/i16/i16_a20.c b/i386/pc/i16/i16_a20.c
new file mode 100644
index 00000000..5e91f86c
--- /dev/null
+++ b/i386/pc/i16/i16_a20.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+
+#include <mach/machine/pio.h>
+#include <mach/machine/code16.h>
+
+#include "i16_a20.h"
+
+
+/* Keyboard stuff for turning on the A20 address line (gak!). */
+#define K_RDWR 0x60 /* keyboard data & cmds (read/write) */
+#define K_STATUS 0x64 /* keyboard status (read-only) */
+#define K_CMD 0x64 /* keybd ctlr command (write-only) */
+
+#define K_OBUF_FUL 0x01 /* output buffer full */
+#define K_IBUF_FUL 0x02 /* input buffer full */
+
+#define KC_CMD_WIN 0xd0 /* read output port */
+#define KC_CMD_WOUT 0xd1 /* write output port */
+
+#define KB_ENABLE_A20 0xdf /* Linux and my BIOS uses this,
+ and I trust them more than Mach 3.0,
+ but I'd like to know what the difference is
+ and if it matters. */
+ /*0x9f*/ /* enable A20,
+ enable output buffer full interrupt
+ enable data line
+ disable clock line */
+#define KB_DISABLE_A20 0xdd
+
+
+CODE16
+
+
+/*
+ This routine ensures that the keyboard command queue is empty
+ (after emptying the output buffers)
+
+ No timeout is used - if this hangs there is something wrong with
+ the machine, and we probably couldn't proceed anyway.
+ XXX should at least die properly
+*/
+static void i16_empty_8042(void)
+{
+ int status;
+
+retry:
+ i16_nanodelay(1000);
+ status = i16_inb(K_STATUS);
+
+ if (status & K_OBUF_FUL)
+ {
+ i16_nanodelay(1000);
+ i16_inb(K_RDWR);
+ goto retry;
+ }
+
+ if (status & K_IBUF_FUL)
+ goto retry;
+}
+
+int i16_raw_test_a20(void);
+
+/* Enable the A20 address line. */
+void i16_raw_enable_a20(void)
+{
+ int v;
+
+ /* XXX try int 15h function 24h */
+
+ if (i16_raw_test_a20())
+ return;
+
+ /* PS/2 */
+ v = i16_inb(0x92);
+ i16_nanodelay(1000);
+ i16_outb(0x92,v | 2);
+
+ if (i16_raw_test_a20())
+ return;
+
+ /* AT */
+ i16_empty_8042();
+ i16_outb(K_CMD, KC_CMD_WOUT);
+ i16_empty_8042();
+ i16_outb(K_RDWR, KB_ENABLE_A20);
+ i16_empty_8042();
+
+ /* Wait until the a20 line gets enabled. */
+ while (!i16_raw_test_a20());
+}
+
+/* Disable the A20 address line. */
+void i16_raw_disable_a20(void)
+{
+ int v;
+
+ if (!i16_raw_test_a20())
+ return;
+
+ /* PS/2 */
+ v = i16_inb(0x92);
+ i16_nanodelay(1000);
+ i16_outb(0x92, v & ~2);
+
+ if (!i16_raw_test_a20())
+ return;
+
+ /* AT */
+ i16_empty_8042();
+ i16_outb(K_CMD, KC_CMD_WOUT);
+ i16_empty_8042();
+ i16_outb(K_RDWR, KB_DISABLE_A20);
+ i16_empty_8042();
+
+ /* Wait until the a20 line gets disabled. */
+ while (i16_raw_test_a20());
+}
+
+
+void (*i16_enable_a20)(void) = i16_raw_enable_a20;
+void (*i16_disable_a20)(void) = i16_raw_disable_a20;
+
diff --git a/i386/pc/i16/i16_a20.h b/i386/pc/i16/i16_a20.h
new file mode 100644
index 00000000..afe124b9
--- /dev/null
+++ b/i386/pc/i16/i16_a20.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_PC_A20_H_
+#define _I386_PC_A20_H_
+
+/* By default these point to the "raw" routines in pc/a20.c.
+ They can be revectored to other routines,
+ e.g. to use a HIMEM driver's facilities. */
+extern void (*i16_enable_a20)(void);
+extern void (*i16_disable_a20)(void);
+
+#endif _I386_PC_A20_H_
diff --git a/i386/pc/i16/i16_bios.h b/i386/pc/i16/i16_bios.h
new file mode 100644
index 00000000..29dc2d87
--- /dev/null
+++ b/i386/pc/i16/i16_bios.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I16_BIOS_H_
+#define _I16_BIOS_H_
+
+#include <mach/inline.h>
+
+
+MACH_INLINE void i16_bios_putchar(int c)
+{
+ asm volatile("int $0x10" : : "a" (0x0e00 | (c & 0xff)), "b" (0x07));
+}
+
+MACH_INLINE int i16_bios_getchar()
+{
+ int c;
+ asm volatile("int $0x16" : "=a" (c) : "a" (0x0000));
+ c &= 0xff;
+ return c;
+}
+
+MACH_INLINE void i16_bios_warm_boot(void)
+{
+ asm volatile("
+ cli
+ movw $0x40,%ax
+ movw %ax,%ds
+ movw $0x1234,0x72
+ ljmp $0xffff,$0x0000
+ ");
+}
+
+MACH_INLINE void i16_bios_cold_boot(void)
+{
+ asm volatile("
+ cli
+ movw $0x40,%ax
+ movw %ax,%ds
+ movw $0x0000,0x72
+ ljmp $0xffff,$0x0000
+ ");
+}
+
+MACH_INLINE unsigned char i16_bios_copy_ext_mem(
+ unsigned src_la, unsigned dest_la, unsigned short word_count)
+{
+ char buf[48];
+ unsigned short i, rc;
+
+ /* Initialize the descriptor structure. */
+ for (i = 0; i < sizeof(buf); i++)
+ buf[i] = 0;
+ *((unsigned short*)(buf+0x10)) = 0xffff; /* source limit */
+ *((unsigned long*)(buf+0x12)) = src_la; /* source linear address */
+ *((unsigned char*)(buf+0x15)) = 0x93; /* source access rights */
+ *((unsigned short*)(buf+0x18)) = 0xffff; /* dest limit */
+ *((unsigned long*)(buf+0x1a)) = dest_la; /* dest linear address */
+ *((unsigned char*)(buf+0x1d)) = 0x93; /* dest access rights */
+
+#if 0
+ i16_puts("buf:");
+ for (i = 0; i < sizeof(buf); i++)
+ i16_writehexb(buf[i]);
+ i16_puts("");
+#endif
+
+ /* Make the BIOS call to perform the copy. */
+ asm volatile("
+ int $0x15
+ " : "=a" (rc)
+ : "a" ((unsigned short)0x8700),
+ "c" (word_count),
+ "S" ((unsigned short)(unsigned)buf));
+
+ return rc >> 8;
+}
+
+#endif _I16_BIOS_H_
diff --git a/i386/pc/i16/i16_exit.c b/i386/pc/i16/i16_exit.c
new file mode 100644
index 00000000..674033ad
--- /dev/null
+++ b/i386/pc/i16/i16_exit.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/code16.h>
+
+#include "i16_bios.h"
+
+
+CODE16
+
+void i16_exit(int rc)
+{
+ i16_puts("Press any key to reboot.");
+ i16_bios_getchar();
+ i16_bios_warm_boot();
+}
+
diff --git a/i386/pc/i16/i16_ext_mem.c b/i386/pc/i16/i16_ext_mem.c
new file mode 100644
index 00000000..08cbecfa
--- /dev/null
+++ b/i386/pc/i16/i16_ext_mem.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/code16.h>
+#include <mach/machine/vm_types.h>
+#include <mach/machine/far_ptr.h>
+#include <mach/machine/proc_reg.h>
+
+#include "i16_bios.h"
+#include "phys_mem.h"
+#include "vm_param.h"
+#include "debug.h"
+
+
+static vm_offset_t ext_mem_phys_free_mem;
+static vm_size_t ext_mem_phys_free_size;
+
+
+CODE32
+
+int ext_mem_collect(void)
+{
+ if (ext_mem_phys_free_mem)
+ {
+ phys_mem_add(ext_mem_phys_free_mem, ext_mem_phys_free_size);
+ ext_mem_phys_free_mem = 0;
+ }
+}
+
+CODE16
+
+void i16_ext_mem_check()
+{
+ vm_offset_t ext_mem_top, ext_mem_bot;
+ unsigned short ext_mem_k;
+
+ /* Find the top of available extended memory. */
+ asm volatile("
+ int $0x15
+ jnc 1f
+ xorw %%ax,%%ax
+ 1:
+ " : "=a" (ext_mem_k)
+ : "a" (0x8800));
+ ext_mem_top = 0x100000 + (vm_offset_t)ext_mem_k * 1024;
+
+ /* XXX check for >16MB memory using function 0xc7 */
+
+ ext_mem_bot = 0x100000;
+
+ /* Check for extended memory allocated bottom-up: method 1.
+ This uses the technique (and, loosely, the code)
+ described in the VCPI spec, version 1.0. */
+ if (ext_mem_top > ext_mem_bot)
+ {
+ asm volatile("
+ pushw %%es
+
+ xorw %%ax,%%ax
+ movw %%ax,%%es
+ movw %%es:0x19*4+2,%%ax
+ movw %%ax,%%es
+
+ movw $0x12,%%di
+ movw $7,%%cx
+ rep
+ cmpsb
+ jne 1f
+
+ xorl %%edx,%%edx
+ movb %%es:0x2e,%%dl
+ shll $16,%%edx
+ movw %%es:0x2c,%%dx
+
+ 1:
+ popw %%es
+ " : "=d" (ext_mem_bot)
+ : "d" (ext_mem_bot),
+ "S" ((unsigned short)(vm_offset_t)"VDISK V")
+ : "eax", "ecx", "esi", "edi");
+ }
+ i16_assert(ext_mem_bot >= 0x100000);
+
+ /* Check for extended memory allocated bottom-up: method 2.
+ This uses the technique (and, loosely, the code)
+ described in the VCPI spec, version 1.0. */
+ if (ext_mem_top > ext_mem_bot)
+ {
+ struct {
+ char pad1[3];
+ char V;
+ long DISK;
+ char pad2[30-8];
+ unsigned short addr;
+ } buf;
+ unsigned char rc;
+
+ i16_assert(sizeof(buf) == 0x20);
+ rc = i16_bios_copy_ext_mem(0x100000, kvtolin((vm_offset_t)&buf), sizeof(buf)/2);
+ if ((rc == 0) && (buf.V == 'V') && (buf.DISK == 'DISK'))
+ {
+ vm_offset_t new_bot = (vm_offset_t)buf.addr << 10;
+ i16_assert(new_bot > 0x100000);
+ if (new_bot > ext_mem_bot)
+ ext_mem_bot = new_bot;
+ }
+ }
+ i16_assert(ext_mem_bot >= 0x100000);
+
+ if (ext_mem_top > ext_mem_bot)
+ {
+ ext_mem_phys_free_mem = ext_mem_bot;
+ ext_mem_phys_free_size = ext_mem_top - ext_mem_bot;
+
+ /* We need to update phys_mem_max here
+ instead of just letting phys_mem_add() do it
+ when the memory is collected with phys_mem_collect(),
+ because VCPI initialization needs to know the top of physical memory
+ before phys_mem_collect() is called.
+ See i16_vcpi.c for the gross details. */
+ if (ext_mem_top > phys_mem_max)
+ phys_mem_max = ext_mem_top;
+ }
+}
+
+void i16_ext_mem_shutdown()
+{
+ /* We didn't actually allocate the memory,
+ so no need to deallocate it... */
+}
+
diff --git a/i386/pc/i16/i16_init.c b/i386/pc/i16/i16_init.c
new file mode 100644
index 00000000..23a51df5
--- /dev/null
+++ b/i386/pc/i16/i16_init.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/code16.h>
+#include <mach/machine/proc_reg.h>
+
+#include "vm_param.h"
+
+
+/* Code segment we originally had when we started in real mode. */
+unsigned short real_cs;
+
+/* Virtual address of physical memory. */
+vm_offset_t phys_mem_va;
+
+/* Physical address of start of boot image. */
+vm_offset_t boot_image_pa;
+
+/* Upper limit of known physical memory. */
+vm_offset_t phys_mem_max;
+
+
+CODE16
+
+#include "i16_bios.h"
+
+/* Called by i16_crt0 (or the equivalent)
+ to set up our basic 16-bit runtime environment
+ before calling i16_main(). */
+void i16_init(void)
+{
+ /* Find our code/data/everything segment. */
+ real_cs = get_cs();
+
+ /* Find out where in physical memory we got loaded. */
+ boot_image_pa = real_cs << 4;
+
+ /* Find out where the bottom of physical memory is.
+ (We won't be able to directly use it for 32-bit accesses
+ until we actually get into 32-bit mode.) */
+ phys_mem_va = -boot_image_pa;
+
+ /* The base of linear memory is at the same place,
+ at least until we turn paging on. */
+ linear_base_va = phys_mem_va;
+}
+
diff --git a/i386/pc/i16/i16_main.c b/i386/pc/i16/i16_main.c
new file mode 100644
index 00000000..328ccebf
--- /dev/null
+++ b/i386/pc/i16/i16_main.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/code16.h>
+
+
+CODE16
+
+void i16_main(void)
+{
+ i16_init();
+
+ /* Grab all the memory we can find. */
+ i16_ext_mem_check();
+
+ i16_raw_start();
+}
+
diff --git a/i386/pc/i16/i16_pic.c b/i386/pc/i16/i16_pic.c
new file mode 100644
index 00000000..3ff26973
--- /dev/null
+++ b/i386/pc/i16/i16_pic.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/pio.h>
+
+#include "pic.h"
+#include "i16.h"
+
+CODE16
+
+/* Program the PICs to use a different set of interrupt vectors.
+ Assumes processor I flag is off. */
+void i16_pic_set_master(int base)
+{
+ unsigned char old_mask;
+
+ /* Save the original interrupt mask. */
+ old_mask = inb(MASTER_OCW); PIC_DELAY();
+
+ /* Initialize the master PIC. */
+ outb(MASTER_ICW, PICM_ICW1); PIC_DELAY();
+ outb(MASTER_OCW, base); PIC_DELAY();
+ outb(MASTER_OCW, PICM_ICW3); PIC_DELAY();
+ outb(MASTER_OCW, PICM_ICW4); PIC_DELAY();
+
+ /* Restore the original interrupt mask. */
+ outb(MASTER_OCW, old_mask); PIC_DELAY();
+}
+
+void i16_pic_set_slave(int base)
+{
+ unsigned char old_mask;
+
+ /* Save the original interrupt mask. */
+ old_mask = inb(SLAVES_OCW); PIC_DELAY();
+
+ /* Initialize the slave PIC. */
+ outb(SLAVES_ICW, PICS_ICW1); PIC_DELAY();
+ outb(SLAVES_OCW, base); PIC_DELAY();
+ outb(SLAVES_OCW, PICS_ICW3); PIC_DELAY();
+ outb(SLAVES_OCW, PICS_ICW4); PIC_DELAY();
+
+ /* Restore the original interrupt mask. */
+ outb(SLAVES_OCW, old_mask); PIC_DELAY();
+}
+
diff --git a/i386/pc/i16/i16_putchar.c b/i386/pc/i16/i16_putchar.c
new file mode 100644
index 00000000..365f4f83
--- /dev/null
+++ b/i386/pc/i16/i16_putchar.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/code16.h>
+
+#include "i16_bios.h"
+
+CODE16
+
+void i16_putchar(int ch)
+{
+ if (ch == '\n')
+ i16_bios_putchar('\r');
+ i16_bios_putchar(ch);
+}
+
diff --git a/i386/pc/i16/i16_raw.c b/i386/pc/i16/i16_raw.c
new file mode 100644
index 00000000..1f705d3b
--- /dev/null
+++ b/i386/pc/i16/i16_raw.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ This file rovides a default implementation
+ of real/pmode switching code.
+ Assumes that, as far as it's concerned,
+ low linear address always map to physical addresses.
+ (The low linear mappings can be changed,
+ but must be changed back before switching back to real mode.)
+
+ Provides:
+ i16_raw_switch_to_pmode()
+ i16_raw_switch_to_real_mode()
+
+ i16_raw_start()
+ Called in real mode.
+ Initializes the pmode switching system,
+ switches to pmode for the first time,
+ and calls the 32-bit function raw_start().
+
+ Depends on:
+
+ paging.h:
+ raw_paging_enable()
+ raw_paging_disable()
+ raw_paging_init()
+
+ a20.h:
+ i16_enable_a20()
+ i16_disable_a20()
+
+ real.h:
+ real_cs
+*/
+
+#include <mach/boolean.h>
+#include <mach/machine/code16.h>
+#include <mach/machine/vm_param.h>
+#include <mach/machine/proc_reg.h>
+#include <mach/machine/pio.h>
+#include <mach/machine/seg.h>
+#include <mach/machine/eflags.h>
+#include <mach/machine/pmode.h>
+
+#include "config.h"
+#include "cpu.h"
+#include "i16.h"
+#include "vm_param.h"
+#include "pic.h"
+#include "debug.h"
+#include "i16_a20.h"
+#include "i16_switch.h"
+
+int irq_master_base, irq_slave_base;
+
+/* Set to true when everything is initialized properly. */
+static boolean_t inited;
+
+/* Saved value of eflags register for real mode. */
+static unsigned real_eflags;
+
+
+
+#ifdef ENABLE_PAGING
+#define RAW_PAGING_ENABLE() raw_paging_enable()
+#define RAW_PAGING_DISABLE() raw_paging_disable()
+#define RAW_PAGING_INIT() raw_paging_init()
+#else
+#define RAW_PAGING_ENABLE() ((void)0)
+#define RAW_PAGING_DISABLE() ((void)0)
+#define RAW_PAGING_INIT() ((void)0)
+#endif
+
+
+CODE16
+
+void i16_raw_switch_to_pmode()
+{
+ /* No interrupts from now on please. */
+ i16_cli();
+
+ /* Save the eflags register for switching back later. */
+ real_eflags = get_eflags();
+
+ /* Enable the A20 address line. */
+ i16_enable_a20();
+
+ /* Load the GDT.
+ Note that we have to do this each time we enter pmode,
+ not just the first,
+ because other real-mode programs may have switched to pmode
+ and back again in the meantime, trashing the GDT pointer. */
+ {
+ struct pseudo_descriptor pdesc;
+
+ pdesc.limit = sizeof(cpu[0].tables.gdt)-1;
+ pdesc.linear_base = boot_image_pa
+ + (vm_offset_t)&cpu[0].tables.gdt;
+ i16_set_gdt(&pdesc);
+ }
+
+ /* Switch into protected mode. */
+ i16_enter_pmode(KERNEL_16_CS);
+
+ /* Reload all the segment registers from the new GDT. */
+ set_ds(KERNEL_DS);
+ set_es(KERNEL_DS);
+ set_fs(0);
+ set_gs(0);
+ set_ss(KERNEL_DS);
+
+ i16_do_32bit(
+
+ if (inited)
+ {
+ /* Turn paging on if necessary. */
+ RAW_PAGING_ENABLE();
+
+ /* Load the CPU tables into the processor. */
+ cpu_tables_load(&cpu[0]);
+
+ /* Program the PIC so the interrupt vectors won't
+ conflict with the processor exception vectors. */
+ pic_init(PICM_VECTBASE, PICS_VECTBASE);
+ }
+
+ /* Make sure our flags register is appropriate. */
+ set_eflags((get_eflags()
+ & ~(EFL_IF | EFL_DF | EFL_NT))
+ | EFL_IOPL_USER);
+ );
+}
+
+void i16_raw_switch_to_real_mode()
+{
+ /* Make sure interrupts are disabled. */
+ cli();
+
+ /* Avoid sending DOS bogus coprocessor exceptions.
+ XXX should we save/restore all of CR0? */
+ i16_clts();
+
+ i16_do_32bit(
+ /* Turn paging off if necessary. */
+ RAW_PAGING_DISABLE();
+
+ /* Reprogram the PIC back to the settings DOS expects. */
+ pic_init(0x08, 0x70);
+ );
+
+ /* Make sure all the segment registers are 16-bit.
+ The code segment definitely is already,
+ because we're running 16-bit code. */
+ set_ds(KERNEL_16_DS);
+ set_es(KERNEL_16_DS);
+ set_fs(KERNEL_16_DS);
+ set_gs(KERNEL_16_DS);
+ set_ss(KERNEL_16_DS);
+
+ /* Switch back to real mode. */
+ i16_leave_pmode(real_cs);
+
+ /* Load the real-mode segment registers. */
+ set_ds(real_cs);
+ set_es(real_cs);
+ set_fs(real_cs);
+ set_gs(real_cs);
+ set_ss(real_cs);
+
+ /* Load the real-mode IDT. */
+ {
+ struct pseudo_descriptor pdesc;
+
+ pdesc.limit = 0xffff;
+ pdesc.linear_base = 0;
+ i16_set_idt(&pdesc);
+ }
+
+ /* Disable the A20 address line. */
+ i16_disable_a20();
+
+ /* Restore the eflags register to its original real-mode state.
+ Note that this will leave interrupts disabled
+ since it was saved after the cli() above. */
+ set_eflags(real_eflags);
+}
+
+void i16_raw_start()
+{
+ /* Make sure we're not already in protected mode. */
+ if (i16_get_msw() & CR0_PE)
+ i16_die("The processor is in an unknown "
+ "protected mode environment.");
+
+ do_debug(i16_puts("Real mode detected"));
+
+ /* Minimally initialize the GDT. */
+ i16_gdt_init_temp();
+
+ /* Switch to protected mode for the first time.
+ This won't load all the processor tables and everything yet,
+ since they're not fully initialized. */
+ i16_raw_switch_to_pmode();
+
+ /* We can now hop in and out of 32-bit mode at will. */
+ i16_do_32bit(
+
+ /* Now that we can access all physical memory,
+ collect the memory regions we discovered while in 16-bit mode
+ and add them to our free memory list.
+ We can't do this before now because the free list nodes
+ are stored in the free memory itself,
+ which is probably out of reach of our 16-bit segments. */
+ phys_mem_collect();
+
+ /* Initialize paging if necessary.
+ Do it before initializing the other processor tables
+ because they might have to be located
+ somewhere in high linear memory. */
+ RAW_PAGING_INIT();
+
+ /* Initialize the processor tables. */
+ cpu_init(&cpu[0]);
+
+ /* Initialize the hardware interrupt vectors in the IDT. */
+ irq_master_base = PICM_VECTBASE;
+ irq_slave_base = PICS_VECTBASE;
+ idt_irq_init();
+
+ inited = TRUE;
+
+ /* Switch to real mode and back again once more,
+ to make sure everything's loaded properly. */
+ do_16bit(
+ i16_raw_switch_to_real_mode();
+ i16_raw_switch_to_pmode();
+ );
+
+ raw_start();
+ );
+}
+
+void (*i16_switch_to_real_mode)() = i16_raw_switch_to_real_mode;
+void (*i16_switch_to_pmode)() = i16_raw_switch_to_pmode;
+
diff --git a/i386/pc/i16/i16_raw_test_a20.S b/i386/pc/i16/i16_raw_test_a20.S
new file mode 100644
index 00000000..a934e12f
--- /dev/null
+++ b/i386/pc/i16/i16_raw_test_a20.S
@@ -0,0 +1,35 @@
+
+#include <mach/machine/asm.h>
+
+ .text
+ .code16
+
+/*
+ * Test the A20 address line; return true if it is enabled.
+ */
+ENTRY(i16_raw_test_a20)
+ xorw %ax,%ax
+ movw %ax,%fs
+ notw %ax
+ movw %ax,%gs
+
+ /* See if the values in already in the corresponding locations
+ are the same. */
+ movw %fs:0,%ax
+ cmpw %gs:16,%ax
+ jnz 1f
+
+ /* Yes; try changing one and see if they're still the same. */
+ movw %ax,%dx
+ notw %ax
+ movw %ax,%fs:0
+ cmpw %gs:16,%ax
+ movw %dx,%fs:0
+ jnz 1f
+
+ xorl %eax,%eax
+ ret
+1:
+ movl $1,%eax
+ ret
+
diff --git a/i386/pc/i16/i16_real_int.S b/i386/pc/i16/i16_real_int.S
new file mode 100644
index 00000000..f05077e2
--- /dev/null
+++ b/i386/pc/i16/i16_real_int.S
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/asm.h>
+
+#include "pc_asm.h"
+
+ .text
+ .code16
+
+ENTRY(i16_real_int)
+ pushf
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+ pushl %ebp
+
+ cli
+
+ movl 6*4(%esp),%eax
+ movb %al,1f+1
+
+ movl 7*4(%esp),%ebp
+ movl RCD_EAX(%ebp),%eax
+ movl RCD_EBX(%ebp),%ebx
+ movl RCD_ECX(%ebp),%ecx
+ movl RCD_EDX(%ebp),%edx
+ movl RCD_ESI(%ebp),%esi
+ movl RCD_EDI(%ebp),%edi
+ movw RCD_DS(%ebp),%ds
+ movw RCD_ES(%ebp),%es
+ /* XXX flags */
+ movl RCD_EBP(%ebp),%ebp
+
+1: int $0
+
+ pushl %ebp
+ movl 8*4(%esp),%ebp
+ popl RCD_EBP(%ebp)
+
+ movl %eax,RCD_EAX(%ebp)
+ movl %ebx,RCD_EBX(%ebp)
+ movl %ecx,RCD_ECX(%ebp)
+ movl %edx,RCD_EDX(%ebp)
+ movl %esi,RCD_ESI(%ebp)
+ movl %edi,RCD_EDI(%ebp)
+ movw %ds,RCD_DS(%ebp)
+ movw %es,RCD_ES(%ebp)
+
+ pushf
+ popl %eax
+ movw %ax,RCD_FLAGS(%ebp)
+
+ movw %ss,%ax
+ movw %ax,%ds
+ movw %ax,%es
+
+ popl %ebp
+ popl %edi
+ popl %esi
+ popl %ebx
+ popf
+ ret
+
diff --git a/i386/pc/i16/i16_switch.h b/i386/pc/i16/i16_switch.h
new file mode 100644
index 00000000..b7ecf182
--- /dev/null
+++ b/i386/pc/i16/i16_switch.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_PC_SWITCH_H_
+#define _I386_PC_SWITCH_H_
+
+/* Vectors to routines to switch between real and protected mode. */
+extern void (*i16_switch_to_real_mode)();
+extern void (*i16_switch_to_pmode)();
+
+#endif _I386_PC_SWITCH_H_
diff --git a/i386/pc/i16/phys_mem_collect.c b/i386/pc/i16/phys_mem_collect.c
new file mode 100644
index 00000000..bcb0c09d
--- /dev/null
+++ b/i386/pc/i16/phys_mem_collect.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/vm_param.h>
+
+#include "phys_mem.h"
+
+void phys_mem_collect(void)
+{
+#define pms(name) name##_collect();
+#include "phys_mem_sources.h"
+#undef pms
+}
+
diff --git a/i386/pc/i16/phys_mem_sources.h b/i386/pc/i16/phys_mem_sources.h
new file mode 100644
index 00000000..788910d3
--- /dev/null
+++ b/i386/pc/i16/phys_mem_sources.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+pms(ext_mem)
+
diff --git a/i386/pc/i16/raw_exit.c b/i386/pc/i16/raw_exit.c
new file mode 100644
index 00000000..5ccb69a5
--- /dev/null
+++ b/i386/pc/i16/raw_exit.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/seg.h>
+#include <mach/machine/proc_reg.h>
+
+#include "i16.h"
+#include "debug.h"
+
+
+void raw_exit(int rc)
+{
+ do_16bit(
+ i16_raw_switch_to_real_mode();
+ i16_exit(rc);
+ while (1);
+ );
+}
+
+void (*real_exit)(int rc) = raw_exit;
+
diff --git a/i386/pc/i16/raw_real_int.c b/i386/pc/i16/raw_real_int.c
new file mode 100644
index 00000000..a76d8ca7
--- /dev/null
+++ b/i386/pc/i16/raw_real_int.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/seg.h>
+#include <mach/machine/proc_reg.h>
+
+#include "real.h"
+#include "i16.h"
+#include "debug.h"
+
+
+void raw_real_int(int intnum, struct real_call_data *rcd)
+{
+ assert((get_cs() & 3) == 0);
+
+ do_16bit(
+ unsigned int eflags;
+
+ i16_raw_switch_to_real_mode();
+ i16_real_int(intnum, rcd);
+ i16_raw_switch_to_pmode();
+ );
+}
+
+void (*real_int)(int intnum, struct real_call_data *rcd) = raw_real_int;
+
diff --git a/i386/pc/ipl.h b/i386/pc/ipl.h
new file mode 100644
index 00000000..5f40660c
--- /dev/null
+++ b/i386/pc/ipl.h
@@ -0,0 +1,74 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+Copyright (c) 1988,1989 Prime Computer, Inc. Natick, MA 01760
+All Rights Reserved.
+
+Permission to use, copy, modify, and distribute this
+software and its documentation for any purpose and
+without fee is hereby granted, provided that the above
+copyright notice appears in all copies and that both the
+copyright notice and this permission notice appear in
+supporting documentation, and that the name of Prime
+Computer, Inc. not be used in advertising or publicity
+pertaining to distribution of the software without
+specific, written prior permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS", AND PRIME COMPUTER,
+INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN
+NO EVENT SHALL PRIME COMPUTER, INC. BE LIABLE FOR ANY
+SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR
+OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+
+#define SPL0 0
+#define SPL1 1
+#define SPL2 2
+#define SPL3 3
+#define SPL4 4
+#define SPL5 5
+#define SPL6 6
+
+#define SPLPP 5
+#define SPLTTY 6
+#define SPLNI 6
+
+#define IPLHI 8
+#define SPL7 IPLHI
+#define SPLHI IPLHI
+
+#ifndef ASSEMBLER
+extern int (*ivect[])();
+extern int iunit[];
+extern unsigned char intpri[];
+#endif ASSEMBLER
+
diff --git a/i386/pc/irq.h b/i386/pc/irq.h
new file mode 100644
index 00000000..1f9da9ae
--- /dev/null
+++ b/i386/pc/irq.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_PC_IRQ_H_
+#define _I386_PC_IRQ_H_
+
+/* On normal PCs, there are always 16 IRQ lines. */
+#define IRQ_COUNT 16
+
+/* Start of hardware interrupt vectors in the IDT. */
+#define IDT_IRQ_BASE 0x20
+
+/* Variables storing the master and slave PIC interrupt vector base. */
+extern int irq_master_base, irq_slave_base;
+
+/* Routine called just after entering protected mode for the first time,
+ to set up the IRQ interrupt vectors in the protected-mode IDT.
+ It should initialize IDT entries irq_master_base through irq_master_base+7,
+ and irq_slave_base through irq_slave_base+7. */
+extern void idt_irq_init(void);
+
+/* Fill an IRQ gate in a CPU's IDT.
+ Always uses an interrupt gate; just set `access' to the privilege level. */
+#define fill_irq_gate(cpu, irq_num, entry, selector, access) \
+ fill_idt_gate(cpu, (irq_num) < 8 \
+ ? irq_master_base+(irq_num) \
+ : irq_slave_base+(irq_num)-8, \
+ entry, selector, ACC_INTR_GATE | (access))
+
+#endif _I386_PC_IRQ_H_
diff --git a/i386/pc/irq_list.h b/i386/pc/irq_list.h
new file mode 100644
index 00000000..bfd216e9
--- /dev/null
+++ b/i386/pc/irq_list.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+/* This is just a handy file listing all the IRQ's on the PC, for whatever purpose... */
+
+irq(master,0,0)
+irq(master,1,1)
+irq(master,2,2)
+irq(master,3,3)
+irq(master,4,4)
+irq(master,5,5)
+irq(master,6,6)
+irq(master,7,7)
+
+irq(slave,0,8)
+irq(slave,1,9)
+irq(slave,2,10)
+irq(slave,3,11)
+irq(slave,4,12)
+irq(slave,5,13)
+irq(slave,6,14)
+irq(slave,7,15)
+
diff --git a/i386/pc/pc_asm.sym b/i386/pc/pc_asm.sym
new file mode 100644
index 00000000..e05677e1
--- /dev/null
+++ b/i386/pc/pc_asm.sym
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include "real.h"
+#include "real_tss.h"
+
+offset real_call_data rcd eax
+offset real_call_data rcd ebx
+offset real_call_data rcd ecx
+offset real_call_data rcd edx
+offset real_call_data rcd esi
+offset real_call_data rcd edi
+offset real_call_data rcd ebp
+offset real_call_data rcd flags
+offset real_call_data rcd ds
+offset real_call_data rcd es
+offset real_call_data rcd fs
+offset real_call_data rcd gs
+size real_call_data rcd
+
+expr REAL_TSS_SIZE
+
diff --git a/i386/pc/phys_mem.h b/i386/pc/phys_mem.h
new file mode 100644
index 00000000..558267cd
--- /dev/null
+++ b/i386/pc/phys_mem.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * PC-specific flag bits and priority values
+ * for the List Memory Manager (LMM)
+ * relevant for kernels managing physical memory.
+ */
+#ifndef _I386_PC_PHYS_MEM_H_
+#define _I386_PC_PHYS_MEM_H_
+
+#include_next "phys_mem.h"
+
+/* <1MB memory is most precious, then <16MB memory, then high memory.
+ Assign priorities to each region accordingly
+ so that high memory will be used first when possible,
+ then 16MB memory, then 1MB memory. */
+#define LMM_PRI_1MB -2
+#define LMM_PRI_16MB -1
+#define LMM_PRI_HIGH 0
+
+/* For memory <1MB, both LMMF_1MB and LMMF_16MB will be set.
+ For memory from 1MB to 16MB, only LMMF_16MB will be set.
+ For all memory higher than that, neither will be set. */
+#define LMMF_1MB 0x01
+#define LMMF_16MB 0x02
+
+
+/* Call one of these routines to add a chunk of physical memory found
+ to the malloc_lmm free list.
+ It assigns the appropriate flags and priorities to the region,
+ as defined above, breaking up the region if necessary. */
+void phys_mem_add(vm_offset_t min, vm_size_t size);
+void i16_phys_mem_add(vm_offset_t min, vm_size_t size);
+
+#endif _I386_PC_PHYS_MEM_H_
diff --git a/i386/pc/phys_mem_add.c b/i386/pc/phys_mem_add.c
new file mode 100644
index 00000000..79fd3ce4
--- /dev/null
+++ b/i386/pc/phys_mem_add.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/vm_types.h>
+#include <mach/lmm.h>
+#include <malloc.h>
+
+#include "vm_param.h"
+#include "phys_mem.h"
+
+/* Note that this routine takes _physical_ addresses, not virtual. */
+void phys_mem_add(vm_offset_t min, vm_size_t size)
+{
+ vm_offset_t max = min + size;
+
+ /* Add the memory region with the proper flags and priority. */
+ if (max <= 1*1024*1024)
+ {
+ lmm_add(&malloc_lmm, phystokv(min), size,
+ LMMF_1MB | LMMF_16MB, LMM_PRI_1MB);
+ }
+ else
+ {
+ if (min < 16*1024*1024)
+ {
+ vm_offset_t nmax = max;
+ if (nmax > 16*1024*1024) nmax = 16*1024*1024;
+ lmm_add(&malloc_lmm, phystokv(min), nmax - min,
+ LMMF_16MB, LMM_PRI_16MB);
+ }
+ if (max > 16*1024*1024)
+ {
+ vm_offset_t nmin = min;
+ if (nmin < 16*1024*1024) nmin = 16*1024*1024;
+ lmm_add(&malloc_lmm, phystokv(nmin), max - nmin, 0, 0);
+ }
+ }
+
+ if (max > phys_mem_max)
+ phys_mem_max = max;
+}
+
diff --git a/i386/pc/pic.c b/i386/pc/pic.c
new file mode 100644
index 00000000..eaf7b567
--- /dev/null
+++ b/i386/pc/pic.c
@@ -0,0 +1,283 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+Copyright (c) 1988,1989 Prime Computer, Inc. Natick, MA 01760
+All Rights Reserved.
+
+Permission to use, copy, modify, and distribute this
+software and its documentation for any purpose and
+without fee is hereby granted, provided that the above
+copyright notice appears in all copies and that both the
+copyright notice and this permission notice appear in
+supporting documentation, and that the name of Prime
+Computer, Inc. not be used in advertising or publicity
+pertaining to distribution of the software without
+specific, written prior permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS", AND PRIME COMPUTER,
+INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN
+NO EVENT SHALL PRIME COMPUTER, INC. BE LIABLE FOR ANY
+SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR
+OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#include <mach/machine/pio.h>
+
+#include <sys/types.h>
+
+#include "ipl.h"
+#include "pic.h"
+
+
+u_short pic_mask[SPLHI+1];
+
+int curr_ipl;
+u_short curr_pic_mask;
+
+u_short orig_pic_mask;
+int orig_pic_mask_initialized;
+
+u_char intpri[NINTR];
+
+/*
+** picinit() - This routine
+** * Establishes a table of interrupt vectors
+** * Establishes a table of interrupt priority levels
+** * Establishes a table of interrupt masks to be put
+** in the PICs.
+** * Establishes location of PICs in the system
+** * Initialises them
+**
+** At this stage the interrupt functionality of this system should be
+** coplete.
+**
+*/
+
+
+/*
+** 1. First we form a table of PIC masks - rather then calling form_pic_mask()
+** each time there is a change of interrupt level - we will form a table
+** of pic masks, as there are only 7 interrupt priority levels.
+**
+** 2. The next thing we must do is to determine which of the PIC interrupt
+** request lines have to be masked out, this is done by calling
+** form_pic_mask() with a (int_lev) of zero, this will find all the
+** interrupt lines that have priority 0, (ie to be ignored).
+** Then we split this up for the master/slave PICs.
+**
+** 2. Initialise the PICs , master first, then the slave.
+** All the register field definitions are described in pic_jh.h, also
+** the settings of these fields for the various registers are selected.
+**
+*/
+
+pic_init(int master_base, int slave_base)
+{
+ u_short PICM_OCW1, PICS_OCW1 ;
+ u_short PICM_OCW2, PICS_OCW2 ;
+ u_short PICM_OCW3, PICS_OCW3 ;
+ u_short i;
+
+ if (!orig_pic_mask_initialized)
+ {
+ unsigned omaster, oslave;
+
+ omaster = inb(MASTER_OCW);
+ PIC_DELAY();
+ oslave = inb(SLAVES_OCW);
+ PIC_DELAY();
+
+ orig_pic_mask = omaster | (oslave << 8);
+ orig_pic_mask_initialized = 1;
+ }
+
+
+ /*
+ ** 1. Form pic mask table
+ */
+
+ form_pic_mask();
+
+ /*
+ ** 1a. Select current SPL.
+ */
+
+ curr_ipl = SPLHI;
+ curr_pic_mask = pic_mask[SPLHI];
+
+ /*
+ ** 3. Select options for each ICW and each OCW for each PIC.
+ */
+
+#if 0
+ PICM_ICW1 = (ICW_TEMPLATE | EDGE_TRIGGER | ADDR_INTRVL8
+ | CASCADE_MODE | ICW4__NEEDED);
+
+ PICS_ICW1 = (ICW_TEMPLATE | EDGE_TRIGGER | ADDR_INTRVL8
+ | CASCADE_MODE | ICW4__NEEDED);
+
+ PICM_ICW2 = master_base;
+ PICS_ICW2 = slave_base;
+
+ PICM_ICW3 = ( SLAVE_ON_IR2 );
+ PICS_ICW3 = ( I_AM_SLAVE_2 );
+
+ PICM_ICW4 = (SNF_MODE_DIS | NONBUFD_MODE | NRML_EOI_MOD
+ | I8086_EMM_MOD);
+ PICS_ICW4 = (SNF_MODE_DIS | NONBUFD_MODE | NRML_EOI_MOD
+ | I8086_EMM_MOD);
+#endif
+
+ PICM_OCW1 = (curr_pic_mask & 0x00FF);
+ PICS_OCW1 = ((curr_pic_mask & 0xFF00)>>8);
+
+ PICM_OCW2 = NON_SPEC_EOI;
+ PICS_OCW2 = NON_SPEC_EOI;
+
+ PICM_OCW3 = (OCW_TEMPLATE | READ_NEXT_RD | READ_IR_ONRD );
+ PICS_OCW3 = (OCW_TEMPLATE | READ_NEXT_RD | READ_IR_ONRD );
+
+
+ /*
+ ** 4. Initialise master - send commands to master PIC
+ */
+
+ outb ( MASTER_ICW, PICM_ICW1 );
+ PIC_DELAY();
+ outb ( MASTER_OCW, master_base );
+ PIC_DELAY();
+ outb ( MASTER_OCW, PICM_ICW3 );
+ PIC_DELAY();
+ outb ( MASTER_OCW, PICM_ICW4 );
+ PIC_DELAY();
+
+#if 0
+ outb ( MASTER_OCW, PICM_MASK );
+ PIC_DELAY();
+ outb ( MASTER_ICW, PICM_OCW3 );
+ PIC_DELAY();
+#endif
+
+ /*
+ ** 5. Initialise slave - send commands to slave PIC
+ */
+
+ outb ( SLAVES_ICW, PICS_ICW1 );
+ PIC_DELAY();
+ outb ( SLAVES_OCW, slave_base );
+ PIC_DELAY();
+ outb ( SLAVES_OCW, PICS_ICW3 );
+ PIC_DELAY();
+ outb ( SLAVES_OCW, PICS_ICW4 );
+ PIC_DELAY();
+
+#if 0
+ outb ( SLAVES_OCW, PICS_OCW1 );
+ PIC_DELAY();
+ outb ( SLAVES_ICW, PICS_OCW3 );
+ PIC_DELAY();
+
+ /*
+ ** 6. Initialise interrupts
+ */
+ outb ( MASTER_OCW, PICM_OCW1 );
+ PIC_DELAY();
+#endif
+
+ outb(MASTER_OCW, orig_pic_mask);
+ PIC_DELAY();
+ outb(SLAVES_OCW, orig_pic_mask >> 8);
+ PIC_DELAY();
+
+#if 0
+ /* XXX */
+ if (master_base != 8)
+ {
+ outb(0x21, 0xff);
+ PIC_DELAY();
+ outb(0xa1, 0xff);
+ PIC_DELAY();
+ }
+#endif
+
+ outb(MASTER_ICW, NON_SPEC_EOI);
+ PIC_DELAY();
+ outb(SLAVES_ICW, NON_SPEC_EOI);
+ PIC_DELAY();
+
+ inb(0x60);
+
+}
+
+/*
+** form_pic_mask(int_lvl)
+**
+** For a given interrupt priority level (int_lvl), this routine goes out
+** and scans through the interrupt level table, and forms a mask based on the
+** entries it finds there that have the same or lower interrupt priority level
+** as (int_lvl). It returns a 16-bit mask which will have to be split up between
+** the 2 pics.
+**
+*/
+
+#define SLAVEMASK (0xFFFF ^ SLAVE_ON_IR2)
+#define SLAVEACTV 0xFF00
+
+form_pic_mask()
+{
+ unsigned short i, j, bit, mask;
+
+ for (i=SPL0; i <= SPLHI; i++) {
+ for (j=0x00, bit=0x01, mask = 0; j < NINTR; j++, bit<<=1)
+ if (intpri[j] <= i)
+ mask |= bit;
+
+ if ((mask & SLAVEACTV) != SLAVEACTV )
+ mask &= SLAVEMASK;
+
+ pic_mask[i] = mask;
+ }
+}
+
+#if 0
+
+intnull(unit_dev)
+{
+ printf("intnull(%d)\n", unit_dev);
+}
+
+int prtnull_count = 0;
+prtnull(unit)
+{
+ ++prtnull_count;
+}
+
+#endif 0
diff --git a/i386/pc/pic.h b/i386/pc/pic.h
new file mode 100644
index 00000000..51911cc2
--- /dev/null
+++ b/i386/pc/pic.h
@@ -0,0 +1,204 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+Copyright (c) 1988,1989 Prime Computer, Inc. Natick, MA 01760
+All Rights Reserved.
+
+Permission to use, copy, modify, and distribute this
+software and its documentation for any purpose and
+without fee is hereby granted, provided that the above
+copyright notice appears in all copies and that both the
+copyright notice and this permission notice appear in
+supporting documentation, and that the name of Prime
+Computer, Inc. not be used in advertising or publicity
+pertaining to distribution of the software without
+specific, written prior permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS", AND PRIME COMPUTER,
+INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN
+NO EVENT SHALL PRIME COMPUTER, INC. BE LIABLE FOR ANY
+SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR
+OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _I386_PIC_H_
+#define _I386_PIC_H_
+
+#include "irq.h"
+
+#define NINTR 0x10
+#define NPICS 0x02
+
+/*
+** The following are definitions used to locate the PICs in the system
+*/
+
+#define ADDR_PIC_BASE 0x20
+#define OFF_ICW 0x00
+#define OFF_OCW 0x01
+#define SIZE_PIC 0x80
+
+#define MASTER_ICW (ADDR_PIC_BASE + OFF_ICW)
+#define MASTER_OCW (ADDR_PIC_BASE + OFF_OCW)
+#define SLAVES_ICW (ADDR_PIC_BASE + OFF_ICW + SIZE_PIC)
+#define SLAVES_OCW (ADDR_PIC_BASE + OFF_OCW + SIZE_PIC)
+
+/*
+** The following banks of definitions ICW1, ICW2, ICW3, and ICW4 are used
+** to define the fields of the various ICWs for initialisation of the PICs
+*/
+
+/*
+** ICW1
+*/
+
+#define ICW_TEMPLATE 0x10
+
+#define LEVL_TRIGGER 0x08
+#define EDGE_TRIGGER 0x00
+#define ADDR_INTRVL4 0x04
+#define ADDR_INTRVL8 0x00
+#define SINGLE__MODE 0x02
+#define CASCADE_MODE 0x00
+#define ICW4__NEEDED 0x01
+#define NO_ICW4_NEED 0x00
+
+/*
+** ICW2
+*/
+
+#define PICM_VECTBASE IDT_IRQ_BASE
+#define PICS_VECTBASE (PICM_VECTBASE + 0x08)
+
+/*
+** ICW3
+*/
+
+#define SLAVE_ON_IR0 0x01
+#define SLAVE_ON_IR1 0x02
+#define SLAVE_ON_IR2 0x04
+#define SLAVE_ON_IR3 0x08
+#define SLAVE_ON_IR4 0x10
+#define SLAVE_ON_IR5 0x20
+#define SLAVE_ON_IR6 0x40
+#define SLAVE_ON_IR7 0x80
+
+#define I_AM_SLAVE_0 0x00
+#define I_AM_SLAVE_1 0x01
+#define I_AM_SLAVE_2 0x02
+#define I_AM_SLAVE_3 0x03
+#define I_AM_SLAVE_4 0x04
+#define I_AM_SLAVE_5 0x05
+#define I_AM_SLAVE_6 0x06
+#define I_AM_SLAVE_7 0x07
+
+/*
+** ICW4
+*/
+
+#define SNF_MODE_ENA 0x10
+#define SNF_MODE_DIS 0x00
+#define BUFFERD_MODE 0x08
+#define NONBUFD_MODE 0x00
+#define AUTO_EOI_MOD 0x02
+#define NRML_EOI_MOD 0x00
+#define I8086_EMM_MOD 0x01
+#define SET_MCS_MODE 0x00
+
+/*
+** OCW1
+*/
+
+#define PICM_MASK 0xFF
+#define PICS_MASK 0xFF
+
+/*
+** OCW2
+*/
+
+#define NON_SPEC_EOI 0x20
+#define SPECIFIC_EOI 0x30
+#define ROT_NON_SPEC 0x50
+#define SET_ROT_AEOI 0x40
+#define RSET_ROTAEOI 0x00
+#define ROT_SPEC_EOI 0x70
+#define SET_PRIORITY 0x60
+#define NO_OPERATION 0x20
+
+#define SEND_EOI_IR0 0x00
+#define SEND_EOI_IR1 0x01
+#define SEND_EOI_IR2 0x02
+#define SEND_EOI_IR3 0x03
+#define SEND_EOI_IR4 0x04
+#define SEND_EOI_IR5 0x05
+#define SEND_EOI_IR6 0x06
+#define SEND_EOI_IR7 0x07
+
+/*
+** OCW3
+*/
+
+#define OCW_TEMPLATE 0x08
+#define SPECIAL_MASK 0x40
+#define MASK_MDE_SET 0x20
+#define MASK_MDE_RST 0x00
+#define POLL_COMMAND 0x04
+#define NO_POLL_CMND 0x00
+#define READ_NEXT_RD 0x02
+#define READ_IR_ONRD 0x00
+#define READ_IS_ONRD 0x01
+
+
+/*
+** Standard PIC initialization values for PCs.
+*/
+#define PICM_ICW1 (ICW_TEMPLATE | EDGE_TRIGGER | ADDR_INTRVL8 \
+ | CASCADE_MODE | ICW4__NEEDED)
+#define PICM_ICW3 (SLAVE_ON_IR2)
+#define PICM_ICW4 (SNF_MODE_DIS | NONBUFD_MODE | NRML_EOI_MOD \
+ | I8086_EMM_MOD)
+
+#define PICS_ICW1 (ICW_TEMPLATE | EDGE_TRIGGER | ADDR_INTRVL8 \
+ | CASCADE_MODE | ICW4__NEEDED)
+#define PICS_ICW3 (I_AM_SLAVE_2)
+#define PICS_ICW4 (SNF_MODE_DIS | NONBUFD_MODE | NRML_EOI_MOD \
+ | I8086_EMM_MOD)
+
+/* Some systems need a little bit of delay
+ while fiddling with PIC registers. */
+#ifndef ASSEMBLER
+#define PIC_DELAY() asm volatile("jmp 1f; 1: jmp 1f; 1:")
+#else
+#define PIC_DELAY jmp 9f; 9: jmp 9f; 9:
+#endif
+
+
+#endif _I386_PIC_H_
diff --git a/i386/pc/putchar.c b/i386/pc/putchar.c
new file mode 100644
index 00000000..d5b106c2
--- /dev/null
+++ b/i386/pc/putchar.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/eflags.h>
+
+#include "real.h"
+
+#ifndef ENABLE_IMMCONSOLE
+
+#include <rc.h>
+int putchar(int c)
+{
+
+ if (c == '\n')
+ putchar('\r');
+
+ {
+#if RCLINE >= 0
+ static int serial_inited = 0;
+ if (! serial_inited) {
+ init_serial();
+ serial_inited = 1;
+ }
+ serial_putc(c);
+#else
+ struct real_call_data rcd;
+ rcd.eax = 0x0e00 | (c & 0xff);
+ rcd.ebx = 0x07;
+ rcd.flags = 0;
+ real_int(0x10, &rcd);
+#endif
+ }
+
+ return 0;
+}
+
+#else ENABLE_IMMCONSOLE
+
+void
+putchar(unsigned char c)
+{
+ static int ofs = -1;
+
+ if (ofs < 0)
+ {
+ ofs = 0;
+ putchar('\n');
+ }
+ if (c == '\r')
+ {
+ ofs = 0;
+ }
+ else if (c == '\n')
+ {
+ bcopy(0xb8000+80*2, 0xb8000, 80*2*24);
+ bzero(0xb8000+80*2*24, 80*2);
+ ofs = 0;
+ }
+ else
+ {
+ volatile unsigned char *p;
+
+ if (ofs >= 80)
+ {
+ putchar('\r');
+ putchar('\n');
+ }
+
+ p = (void*)0xb8000 + 80*2*24 + ofs*2;
+ p[0] = c;
+ p[1] = 0x0f;
+ ofs++;
+ }
+}
+
+#endif ENABLE_IMMCONSOLE
diff --git a/i386/pc/real.h b/i386/pc/real.h
new file mode 100644
index 00000000..4206231c
--- /dev/null
+++ b/i386/pc/real.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_PC_REAL_CALL_H_
+#define _I386_PC_REAL_CALL_H_
+
+/* This structure happens to correspond to the DPMI real-call structure. */
+struct real_call_data
+{
+ unsigned edi;
+ unsigned esi;
+ unsigned ebp;
+ unsigned reserved;
+ unsigned ebx;
+ unsigned edx;
+ unsigned ecx;
+ unsigned eax;
+ unsigned short flags;
+ unsigned short es;
+ unsigned short ds;
+ unsigned short fs;
+ unsigned short gs;
+ unsigned short ip;
+ unsigned short cs;
+ unsigned short sp;
+ unsigned short ss;
+};
+
+/* Code segment we originally had when we started in real mode. */
+extern unsigned short real_cs;
+
+extern void (*real_int)(int intnum, struct real_call_data *rcd);
+extern void (*real_exit)(int rc);
+
+#define real_call_data_init(rcd) \
+ ({ (rcd)->flags = 0; \
+ (rcd)->ss = 0; \
+ (rcd)->sp = 0; \
+ })
+
+#endif /* _I386_PC_REAL_CALL_H_ */
diff --git a/i386/pc/real_tss.c b/i386/pc/real_tss.c
new file mode 100644
index 00000000..745a6113
--- /dev/null
+++ b/i386/pc/real_tss.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/tss.h>
+#include <mach/machine/proc_reg.h>
+
+#include "cpu.h"
+#include "real_tss.h"
+#include "vm_param.h"
+#include "config.h"
+
+#ifdef ENABLE_REAL_TSS
+
+static void real_tss_init()
+{
+ /* Only initialize once. */
+ if (!real_tss.ss0)
+ {
+ /* Initialize the real-mode TSS. */
+ real_tss.ss0 = KERNEL_DS;
+ real_tss.esp0 = get_esp();
+ real_tss.io_bit_map_offset = sizeof(real_tss);
+
+ /* Set the last byte in the I/O bitmap to all 1's. */
+ ((unsigned char*)&real_tss)[REAL_TSS_SIZE] = 0xff;
+ }
+}
+
+void
+cpu_gdt_init_REAL_TSS(struct cpu *cpu)
+{
+ real_tss_init();
+
+ fill_gdt_descriptor(cpu, REAL_TSS,
+ kvtolin(&real_tss), REAL_TSS_SIZE-1,
+ ACC_PL_K|ACC_TSS, 0);
+}
+
+#endif ENABLE_REAL_TSS
diff --git a/i386/pc/real_tss.h b/i386/pc/real_tss.h
new file mode 100644
index 00000000..2f895475
--- /dev/null
+++ b/i386/pc/real_tss.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_PC_REAL_TSS_
+#define _I386_PC_REAL_TSS_
+
+#include <mach/machine/tss.h>
+
+extern struct i386_tss real_tss;
+
+/* This size doesn't include the extra required 0xff byte
+ just past the end of the real IOPB. */
+#define REAL_TSS_SIZE (sizeof(real_tss)+65536/8)
+
+#endif _I386_PC_REAL_TSS_
diff --git a/i386/pc/real_tss_def.S b/i386/pc/real_tss_def.S
new file mode 100644
index 00000000..18712505
--- /dev/null
+++ b/i386/pc/real_tss_def.S
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/asm.h>
+
+#include "config.h"
+#include "pc_asm.h"
+
+#ifdef ENABLE_REAL_TSS
+
+ .data
+ .globl EXT(real_tss)
+ .comm EXT(real_tss),REAL_TSS_SIZE+1
+
+#endif ENABLE_REAL_TSS
diff --git a/i386/pc/rv86/config.h b/i386/pc/rv86/config.h
new file mode 100644
index 00000000..5f971ced
--- /dev/null
+++ b/i386/pc/rv86/config.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include_next "config.h"
+
+#ifndef ENABLE_REAL_TSS
+#define ENABLE_REAL_TSS
+#endif
+
diff --git a/i386/pc/rv86/gdt_sels.h b/i386/pc/rv86/gdt_sels.h
new file mode 100644
index 00000000..89a13c7a
--- /dev/null
+++ b/i386/pc/rv86/gdt_sels.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include_next "gdt_sels.h"
+
+/* This is a special TSS with a full IO bitmap
+ that allows access to all I/O ports in v86 mode.
+ It's used for making calls to the real-mode BIOS (or DOS). */
+gdt_sel(RV86_TSS)
+
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include_next "gdt_sels.h"
+
+/* This is a special TSS with a full IO bitmap
+ that allows access to all I/O ports in v86 mode.
+ It's used for making calls to the real-mode BIOS (or DOS). */
+gdt_sel(RV86_TSS)
+
diff --git a/i386/pc/rv86/idt_irq_init.c b/i386/pc/rv86/idt_irq_init.c
new file mode 100644
index 00000000..f46882a5
--- /dev/null
+++ b/i386/pc/rv86/idt_irq_init.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include "cpu.h"
+#include "idt.h"
+
+void idt_irq_init()
+{
+ int i;
+
+#define irq(pic,picnum,irqnum) \
+ { extern void rv86_reflect_irq##irqnum(); \
+ fill_idt_gate(&cpu[0], irq_##pic##_base + picnum, \
+ (vm_offset_t)rv86_reflect_irq##irqnum, KERNEL_CS, \
+ ACC_PL_K|ACC_INTR_GATE); \
+ }
+#include "irq_list.h"
+#undef irq
+}
+
diff --git a/i386/pc/rv86/rv86_real_int.c b/i386/pc/rv86/rv86_real_int.c
new file mode 100644
index 00000000..d9c35b68
--- /dev/null
+++ b/i386/pc/rv86/rv86_real_int.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/seg.h>
+#include <mach/machine/proc_reg.h>
+#include <mach/machine/far_ptr.h>
+#include <mach/machine/eflags.h>
+
+#include "vm_param.h"
+#include "real.h"
+#include "real_tss.h"
+#include "cpu.h"
+#include "debug.h"
+
+
+/*
+
+ There seem to be three main ways to handle v86 mode:
+
+ * The v86 environment is just an extension of the normal kernel environment:
+ you can switch to and from v86 mode just as you can change any other processor state.
+ You always keep running on the separate "logical" stack,
+ which is the kernel stack when running in protected mode,
+ or the user stack when running in v86 mode.
+ When in v86 mode, the "actual" kernel stack is just a stub
+ big enough to switch back to the "normal" kernel stack,
+ which was being used as the user stack while running in v86 mode.
+ Thus, v86 and protected-mode "segments" of stack data
+ can be interleaved together on the same logical stack.
+
+ - To make a real int call from kernel pmode,
+ switch to v86 mode and execute an int instruction,
+ then switch back to protected mode.
+
+ - To reflect an interrupt to v86 mode:
+
+ > If the processor was running in v86 mode,
+ just adjust the kernel and user stacks
+ to emulate a real-mode interrupt, and return.
+
+ > If the processor was running in pmode,
+ switch to v86 mode and re-trigger the interrupt
+ with a software int instruction.
+
+ - To handle an interrupt in pmode:
+
+ > If the processor was running in v86 mode,
+ switch from the stub stack to the user stack that was in use
+ (could be different from the stack we set originally,
+ because BIOS/DOS code might have switched stacks!),
+ call the interrupt handler, switch back, and return.
+
+ > If the processor was running in pmode,
+ just call the interrupt handler and return.
+
+ This method only works if the whole "kernel" is <64KB
+ and generally compatible with real-mode execution.
+ This is the model my DOS extender currently uses.
+
+ One major disadvantage of this method
+ is that interrupt handlers can't run "general" protected-mode code,
+ such as typical code compiled by GCC.
+ This is because, if an interrupt occurs while in v86 mode,
+ the v86-mode ss:sp may point basically anywhere in the low 1MB,
+ and it therefore it can't be used directly as a pmode stack;
+ and the only other stack available is the miniscule stub stack.
+ Since "general" protected-mode code expects a full-size stack
+ with an SS equal to the normal protected-mode DS,
+ neither of these available stacks will suffice.
+ It is impossible to switch back to the original kernel stack
+ because arbitrary DOS or BIOS code might have switched from it
+ to a different stack somewhere else in the low 1MB,
+ and we have no way of telling where the SP was when that happened.
+ The upshot is that interrupt handlers must be extremely simple;
+ in MOSS, all they do is post a signal to "the process,"
+ and return immediately without actually handling the interrupt.
+
+ * The v86 environment is a separate "task" with its own user and kernel stacks;
+ you switch back and forth as if between multiple ordinary tasks,
+ the tasks can preempt each other, go idle waiting for events, etc.
+
+ - To make a real int call from kernel pmode,
+ the task making the call essentially does a synchronous IPC to the v86 task.
+ If the v86 task is busy with another request or a reflected interrupt,
+ the calling task will go idle until the v86 task is available.
+
+ - Reflecting an interrupt to v86 mode
+ basically amounts to sending a Unix-like "signal" to the v86 task:
+
+ > If the processor was running in the v86 task,
+ just adjust the kernel and user stacks
+ to emulate a real-mode interrupt, and return.
+
+ > If the processor was running in a protected-mode task
+ (or another v86-mode task),
+ post a signal to the v86 task, wake it up if it's asleep,
+ and invoke the scheduler to switch to the v86 task
+ if it has a higher priority than the currently running task.
+
+ - To handle an interrupt in pmode,
+ just call the interrupt handler and return.
+ It doesn't matter whether the interrupt was from v86 or pmode,
+ because the kernel stacks look the same in either case.
+
+ One big problem with this method is that if interrupts are to be handled in v86 mode,
+ all the typical problems of handling interrupts in user-mode tasks pop up.
+ In particular, an interrupt can now cause preemption,
+ so this will break an interruptible but nonpreemptible environment.
+ (The problem is not that the interrupted task is "preempted"
+ to switch temporarily to the v86 task to handle the interrupt;
+ the problem is that when the v86 task is done handling the interrupt,
+ the scheduler will be invoked and some task other than the interrupted task may be run.)
+
+ Of course, this is undoubtedly the right solution
+ if that's the interrupt model the OS is using anyway
+ (i.e. if the OS already supports user-level protected-mode interrupts).
+
+ * A bastardization of the two above approaches:
+ treat the v86 environment as a separate "task",
+ but a special one that doesn't behave at all like other tasks.
+ The v86 "task" in this case is more of an "interrupt co-stack"
+ that grows and shrinks alongside the normal interrupt stack
+ (or the current kernel stack, if interrupts are handled on the kernel stack).
+ Interrupts and real calls can cause switches between these two interrupt stacks,
+ but they can't cause preemption in the normal sense.
+ The route taken while building the stacks is exactly the opposite
+ the route taken while tearing it down.
+
+ Now two "kernel stack pointers" have to be maintained all the time instead of one.
+ When running in protected mode:
+
+ - The ESP register contains the pmode stack pointer.
+ - Some global variable contains the v86 stack pointer.
+
+ When running in v86 mode:
+
+ - The ESP register contains the v86 stack pointer.
+ (Note that BIOS/DOS code can switch stacks,
+ so at any given time it may point practically anywhere!)
+ - The current tss's esp0 contains the pmode stack pointer.
+
+ Whenever a switch is made, a stack frame is placed on the new co-stack
+ indicating that the switch was performed.
+
+ - To make a real int call from kernel pmode,
+ build a real-mode interrupt stack frame on the v86 interrupt stack,
+ build a v86-mode trap stack frame on the pmode stack,
+ set the tss's esp0 to point to the end of that stack frame,
+ and iret from it.
+ Then when the magic "done-with-real-call" int instruction is hit,
+ the pmode interrupt handler will see it
+ and know to simply destroy the v86 trap stack on the pmode stack.
+
+ - Handling an interrupt can always be thought of as going "through" pmode:
+ switching from the v86 stack to the pmode stack
+ if the processor was in v86 mode when the interrupt was taken,
+ and switching from the pmode stack back to the v86 stack as described above
+ if the interrupt is to be reflected to v86 mode.
+
+ Of course, optimized paths are possible:
+
+ - To reflect an interrupt to v86 mode:
+
+ > If the processor was running in v86 mode,
+ just adjust the kernel and user stack frames and return.
+
+ > If the processor was running in pmode,
+ do as described above for explicit real int calls.
+
+ - To handle an interrupt in pmode:
+
+ > If the processor was running in v86 mode,
+ switch to the pmode stack,
+ stash the old v86 stack pointer variable on the pmode stack,
+ and set the v86 stack pointer variable to the new location.
+ Call the interrupt handler,
+ then tear down everything and return to v86 mode.
+
+ Observation:
+ In the first and third models,
+ explicit real int calls are entirely symmetrical
+ to hardware interrupts from pmode to v86 mode.
+ This is valid because of the interruptible but nonpreemptible model:
+ no scheduling is involved, and the stack(s) will always be torn down
+ in exactly the opposite order in which they were built up.
+ In the second model,
+ explicit real calls are quite different,
+ because the BIOS is interruptible but nonpreemptible:
+ you can reflect an interrupt into the v86 task at any time,
+ but you can only make an explicit request to that task when it's ready
+ (i.e. no other requests or interrupts are outstanding).
+
+*/
+
+
+
+#define RV86_USTACK_SIZE 1024
+
+vm_offset_t rv86_ustack_pa;
+vm_offset_t rv86_return_int_pa;
+struct far_pointer_32 rv86_usp;
+struct far_pointer_16 rv86_rp;
+
+void rv86_real_int(int intnum, struct real_call_data *rcd)
+{
+ unsigned short old_tr;
+ unsigned int old_eflags;
+
+ /* If this is the first time this routine is being called,
+ initialize the kernel stack. */
+ if (!rv86_ustack_pa)
+ {
+ rv86_ustack_pa = 0xa0000 - RV86_USTACK_SIZE; /* XXX */
+
+ assert(rv86_ustack_pa < 0x100000);
+
+ /* Use the top two bytes of the ustack for an 'int $0xff' instruction. */
+ rv86_return_int_pa = rv86_ustack_pa + RV86_USTACK_SIZE - 2;
+ *(short*)phystokv(rv86_return_int_pa) = 0xffcd;
+
+ /* Set up the v86 stack pointer. */
+ rv86_usp.seg = rv86_rp.seg = rv86_ustack_pa >> 4;
+ rv86_usp.ofs = rv86_rp.ofs = (rv86_ustack_pa & 0xf) + RV86_USTACK_SIZE - 2;
+
+ /* Pre-allocate a real-mode interrupt stack frame. */
+ rv86_usp.ofs -= 6;
+ }
+
+ /* Make sure interrupts are disabled. */
+ old_eflags = get_eflags();
+
+ /* Switch to the TSS to use in v86 mode. */
+ old_tr = get_tr();
+ cpu[0].tables.gdt[REAL_TSS_IDX].access &= ~ACC_TSS_BUSY;
+ set_tr(REAL_TSS);
+
+ asm volatile("
+ pushl %%ebp
+ pushl %%eax
+ call rv86_real_int_asm
+ popl %%eax
+ popl %%ebp
+ " :
+ : "a" (rcd), "S" (intnum)
+ : "eax", "ebx", "ecx", "edx", "esi", "edi");
+
+ /* Switch to the original TSS. */
+ cpu[0].tables.gdt[old_tr/8].access &= ~ACC_TSS_BUSY;
+ set_tr(old_tr);
+
+ /* Restore the original processor flags. */
+ set_eflags(old_eflags);
+}
+
+void (*real_int)(int intnum, struct real_call_data *rcd) = rv86_real_int;
+
diff --git a/i386/pc/rv86/rv86_real_int_asm.S b/i386/pc/rv86/rv86_real_int_asm.S
new file mode 100644
index 00000000..54b1b9b2
--- /dev/null
+++ b/i386/pc/rv86/rv86_real_int_asm.S
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/asm.h>
+#include <mach/machine/eflags.h>
+
+#include "trap.h"
+#include "i386_asm.h"
+#include "pc_asm.h"
+#include "trap_asm.h"
+
+ .text
+ .globl rv86_real_int_asm
+rv86_real_int_asm:
+
+ /* Stash our protected-mode stack pointer. */
+ movl %esp,EXT(real_tss)+TSS_ESP0
+
+ /* Load the linear/physical-address data segment into ES,
+ for easy access to real-mode memory. */
+ movl $LINEAR_DS,%edx
+ movw %dx,%es
+
+ /* Find the physical address of the real-mode interrupt stack (es:ebx).
+ A 6-byte stack frame has already been allocated on it. */
+ movl EXT(rv86_usp)+4,%ebx
+ shll $4,%ebx
+ addl EXT(rv86_usp),%ebx
+
+ /* Store the magic return pointer into the real-mode interrupt stack frame. */
+ movl EXT(rv86_rp),%edx
+ movl %edx,%es:(%ebx)
+ movw RCD_FLAGS(%eax),%dx
+ movw %dx,%es:4(%ebx)
+
+ /* Find the address of the real mode interrupt vector (es:esi). */
+ shll $2,%esi
+
+ /* Build the v86 trap frame. */
+ xorl %edx,%edx
+ movw RCD_GS(%eax),%dx
+ pushl %edx
+ movw RCD_FS(%eax),%dx
+ pushl %edx
+ movw RCD_DS(%eax),%dx
+ pushl %edx
+ movw RCD_ES(%eax),%dx
+ pushl %edx
+ pushl EXT(rv86_usp)+4
+ pushl EXT(rv86_usp)
+ movl $EFL_VM+EFL_IOPL_USER,%ecx
+ orw RCD_FLAGS(%eax),%cx
+ andl $-1-EFL_IF-EFL_TF,%ecx
+ pushl %ecx
+ movw %es:2(%esi),%edx
+ pushl %edx
+ movw %es:(%esi),%edx
+ pushl %edx
+
+ /* Load the requested register state. */
+ movl RCD_EDI(%eax),%edi
+ movl RCD_ESI(%eax),%esi
+ movl RCD_EBP(%eax),%ebp
+ movl RCD_EBX(%eax),%ebx
+ movl RCD_EDX(%eax),%edx
+ movl RCD_ECX(%eax),%ecx
+ movl RCD_EAX(%eax),%eax
+
+ /* Drop into v86 mode. */
+ iret
+
+ENTRY(rv86_return)
+
+ /* Restore the kernel segment registers. */
+ movw %ss,%ax
+ movw %ax,%ds
+ movw %ax,%es
+
+ /* Retrieve the real_call_data pointer from rv86_real_int_asm's stack frame. */
+ movl TR_V86SIZE+4(%esp),%eax
+
+ /* Stash the final register state. */
+ movl TR_EDI(%esp),%edx; movl %edx,RCD_EDI(%eax)
+ movl TR_ESI(%esp),%edx; movl %edx,RCD_ESI(%eax)
+ movl TR_EBP(%esp),%edx; movl %edx,RCD_EBP(%eax)
+ movl TR_EBX(%esp),%edx; movl %edx,RCD_EBX(%eax)
+ movl TR_EDX(%esp),%edx; movl %edx,RCD_EDX(%eax)
+ movl TR_ECX(%esp),%edx; movl %edx,RCD_ECX(%eax)
+ movl TR_EAX(%esp),%edx; movl %edx,RCD_EAX(%eax)
+ movl TR_EFLAGS(%esp),%edx; movw %dx,RCD_FLAGS(%eax)
+ movl TR_V86_ES(%esp),%edx; movw %dx,RCD_ES(%eax)
+ movl TR_V86_DS(%esp),%edx; movw %dx,RCD_DS(%eax)
+ movl TR_V86_FS(%esp),%edx; movw %dx,RCD_FS(%eax)
+ movl TR_V86_GS(%esp),%edx; movw %dx,RCD_GS(%eax)
+
+ /* Return from the call to rv86_real_int_asm. */
+ lea TR_V86SIZE(%esp),%esp
+ ret
+
diff --git a/i386/pc/rv86/rv86_reflect_irq.S b/i386/pc/rv86/rv86_reflect_irq.S
new file mode 100644
index 00000000..5d68fae8
--- /dev/null
+++ b/i386/pc/rv86/rv86_reflect_irq.S
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/asm.h>
+#include <mach/machine/eflags.h>
+
+#include "trap.h"
+#include "trap_asm.h"
+#include "pc_asm.h"
+#include "i386_asm.h"
+
+/*
+ * Define a set of interrupt handlers to reflect interrupts to v86 mode.
+ */
+
+ .text
+
+#define master_base 0x08
+#define slave_base 0x70
+
+#define irq(pic,picnum,irqnum) \
+ENTRY(rv86_reflect_irq##irqnum) ;\
+ pushl $pic##_base+picnum ;\
+ pushl $0x80000000+irqnum /* (for debug) */ ;\
+ jmp allintrs
+
+#include "irq_list.h"
+
+
+allintrs:
+ pusha
+ pushl %ds
+ pushl %es
+ pushl %fs
+ pushl %gs
+
+ /* Load the normal kernel segment registers. */
+ movw %ss,%ax
+ movw %ax,%ds
+ movw %ax,%es
+
+ /* See if we came from v86 mode. */
+ testl $EFL_VM,TR_EFLAGS(%esp)
+ jnz int_from_v86
+
+ movl TR_ERR(%esp),%eax
+ pushl $dummy_rcd
+ pushl %eax
+ call EXT(rv86_real_int)
+ addl $2*4,%esp
+
+ popl %gs
+ popl %fs
+ popl %es
+ popl %ds
+ popa
+ addl $2*4,%esp
+ iret
+
+int_from_v86:
+
+ /* Save the v86 stack pointer before handling the interrupt.
+ We need this in order to handle recursive reflected interrupts
+ possibly interspersed with protected-mode interrupts. */
+ movl EXT(rv86_usp),%esi
+ movl EXT(rv86_usp)+4,%edi
+ movl EXT(real_tss)+TSS_ESP0,%ebx
+
+ movl TR_ESP(%esp),%eax
+ subw $6,%ax /* allocate a real-mode interrupt stack frame. */
+ movl %eax,EXT(rv86_usp)
+ movl TR_SS(%esp),%eax
+ movw %ax,EXT(rv86_usp)+4
+
+ movl TR_ERR(%esp),%eax
+ pushl $dummy_rcd
+ pushl %eax
+ call EXT(rv86_real_int)
+ addl $2*4,%esp
+
+ movl %esi,EXT(rv86_usp)
+ movl %edi,EXT(rv86_usp)+4
+ movl %ebx,EXT(real_tss)+TSS_ESP0
+
+ addl $4*4,%esp
+ popa
+ addl $2*4,%esp
+ iret
+
+ /* Dummy real_call_data structure (always all zero)
+ to use when reflecting hardware interrupts. */
+ .comm dummy_rcd,RCD_SIZE
+
diff --git a/i386/pc/rv86/rv86_trap_handler.S b/i386/pc/rv86/rv86_trap_handler.S
new file mode 100644
index 00000000..793f6b65
--- /dev/null
+++ b/i386/pc/rv86/rv86_trap_handler.S
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/asm.h>
+#include <mach/machine/eflags.h>
+
+#include "i386_asm.h"
+#include "trap.h"
+#include "trap_asm.h"
+
+ .text
+
+ENTRY(rv86_trap_handler)
+ cmpl $T_GENERAL_PROTECTION,TR_TRAPNO(%esp)
+ jz gpf_from_v86
+
+ UNEXPECTED_TRAP
+
+gpf_from_v86:
+
+ /* Load the linear/physical-address data segment,
+ for easy access to real-mode memory. */
+ movl $LINEAR_DS,%eax
+ movw %ax,%ds
+
+ /* Find the physical address of the trapping instruction (ebx). */
+ movzwl TR_CS(%esp),%ebx
+ shll $4,%ebx
+ addl TR_EIP(%esp),%ebx
+
+ /* See if we're just trying to get out of v86 mode. */
+ cmpl %ss:EXT(rv86_return_int_pa),%ebx
+ je EXT(rv86_return)
+
+ /* Check the instruction (al). */
+ movb (%ebx),%al
+ cmpb $0xcd,%al
+ je gpf_int_n
+
+ UNEXPECTED_TRAP
+
+
+gpf_int_n:
+
+ /* Bump the ip past the int instruction. */
+ addw $2,TR_EIP(%esp)
+
+ /* Find the real mode interrupt vector number (esi). */
+ movzbl 1(%ebx),%esi
+
+ /* See if it's a copy-extended-memory interrupt request;
+ if so, just handle it here. */
+ cmpl $0x15,%esi
+ jne 1f
+ cmpb $0x87,TR_EAX+1(%esp)
+ je copy_ext_mem
+1:
+
+ /* XXX The stuff down here is essentially the same as in moss. */
+
+reflect_v86_intr:
+
+ /* Find the address of the real mode interrupt vector (esi). */
+ shll $2,%esi
+
+ /* Make room for the real-mode interrupt stack frame. */
+ subw $6,TR_ESP(%esp)
+
+ /* Find the physical address of the v86 stack (ebx). */
+ movzwl TR_SS(%esp),%ebx
+ shll $4,%ebx
+ addl TR_ESP(%esp),%ebx
+
+ /* Store the return information into the v86 stack frame. */
+ movl TR_EIP(%esp),%eax
+ movw %ax,(%ebx)
+ movl TR_CS(%esp),%eax
+ movw %ax,2(%ebx)
+ movl TR_EFLAGS(%esp),%eax
+ movw %ax,4(%ebx)
+
+ /* Find the real-mode interrupt vector to invoke,
+ and set up the real_call_thread's kernel stack frame
+ to point to it. */
+ movl (%esi),%eax
+ movw %ax,TR_EIP(%esp)
+ shrl $16,%eax
+ movw %ax,TR_CS(%esp)
+ andl $-1-EFL_IF-EFL_TF,TR_EFLAGS(%esp)
+
+ /* Restore saved state and return. */
+ addl $4*4,%esp
+ popa
+ addl $4*2,%esp
+ iret
+
+
+
+/* We intercepted a copy-extended-memory software interrupt
+ (int 0x15 function 0x87).
+ This is used by HIMEM.SYS, for example, to manage extended memory.
+ The BIOS's routine isn't going to work in v86 mode,
+ so do it ourselves. */
+copy_ext_mem:
+
+ /* Find the parameter block provided by the caller (ebx). */
+ movzwl TR_V86_ES(%esp),%ebx
+ movzwl TR_ESI(%esp),%eax
+ shll $4,%ebx
+ addl %eax,%ebx
+
+ /* Source address (esi). */
+ movl 0x12(%ebx),%esi
+ andl $0x00ffffff,%esi
+
+ /* Destination address (edi). */
+ movl 0x1a(%ebx),%edi
+ andl $0x00ffffff,%edi
+
+ /* Number of bytes (ecx). */
+ movzwl TR_ECX(%esp),%ecx
+ addl %ecx,%ecx
+
+ /* Use the standard i386 bcopy routine to copy the data.
+ This assumes it's "friendly" in its use of segment registers
+ (i.e. always uses ss for stack data and ds/es for the data to copy).
+ The bcopy is simple enough that this should always be true. */
+ movw %ds,%ax
+ movw %ax,%es
+ cld
+ pushl %ecx
+ pushl %edi
+ pushl %esi
+ call EXT(bcopy)
+ addl $3*4,%esp
+
+ /* Clear the carry flag to indicate that the copy was successful.
+ AH is also cleared, below. */
+ andl $-1-EFL_CF,TR_EFLAGS(%esp)
+
+ /* Restore saved state and return. */
+ addl $4*4,%esp
+ popa
+ xorb %ah,%ah
+ addl $4*2,%esp
+ iret
+
diff --git a/i386/pc/rv86/trap_handler.S b/i386/pc/rv86/trap_handler.S
new file mode 100644
index 00000000..99254eef
--- /dev/null
+++ b/i386/pc/rv86/trap_handler.S
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/asm.h>
+#include <mach/machine/eflags.h>
+
+#include "trap.h"
+#include "trap_asm.h"
+
+ .text
+
+ENTRY(trap_handler)
+
+ /* See if we came from v86 mode. */
+ testl $EFL_VM,TR_EFLAGS(%esp)
+ jnz EXT(rv86_trap_handler)
+
+ UNEXPECTED_TRAP
+
diff --git a/i386/util/Makerules b/i386/util/Makerules
new file mode 100644
index 00000000..8bdb020a
--- /dev/null
+++ b/i386/util/Makerules
@@ -0,0 +1,38 @@
+#
+# Copyright (c) 1995 The University of Utah and
+# the Computer Systems Laboratory (CSL). All rights reserved.
+#
+# Permission to use, copy, modify and distribute this software and its
+# documentation is hereby granted, provided that both the copyright
+# notice and this permission notice appear in all copies of the
+# software, derivative works or modified versions, and any portions
+# thereof, and that both notices appear in supporting documentation.
+#
+# THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+# IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
+# ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+#
+# CSS requests users of this software to return to css-dist@cs.utah.edu any
+# improvements that they make and grant CSS redistribution rights.
+#
+# Author: Bryan Ford, University of Utah CSS
+#
+ifndef _mach4_i386_kernel_util_makerules_
+_mach4_i386_kernel_util_makerules = yes
+
+
+CLEAN_FILES += i386_asm.h
+
+
+include $(MSRCDIR)/Makerules
+include $(GSRCDIR)/kernel/util/Makerules
+
+
+# Handy rule to show what's in the GDT...
+gdt-list:
+ echo '#include "gdt_sels.h"' >gdt-list.c
+ $(CC) -E $(CFLAGS) $(TARGET_CFLAGS) gdt-list.c | grep ^gdt_sel
+ rm -f gdt-list.c
+
+
+endif
diff --git a/i386/util/NOTES b/i386/util/NOTES
new file mode 100644
index 00000000..770bbef7
--- /dev/null
+++ b/i386/util/NOTES
@@ -0,0 +1,35 @@
+
+raw_switch:
+
+ Provides the code to switch between real and protected mode.
+ Switches between the environments "completely":
+ e.g. when switching to protected mode,
+ all the normal protected-mode state for that environment is set up.
+
+raw_pmode:
+
+ i16_raw_enter_pmode()
+ Enters protoected mode from real mode.
+ Does not initialize IDT or TSS or anything else;
+ just gets the system into protected mode
+ with a simple temporary GDT.
+ Returns with interrupts turned off
+ (and they'd better stay off until there's a valid pmode IDT!)
+
+ i16_raw_leave_pmode()
+ Assumes paging is turned off.
+ Returns with interrupts turned off;
+ they can probably be turned back on at any time.
+
+ Depends on:
+ i16_die()
+ A20 enable/disable code (e.g. raw_a20).
+ gdt.h: KERNEL_16_CS, KERNEL_16_DS
+
+
+
+vm_param.h:
+
+ Must export kvtolin(), lintokv()
+
+
diff --git a/i386/util/anno.c b/i386/util/anno.c
new file mode 100644
index 00000000..0137fe85
--- /dev/null
+++ b/i386/util/anno.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include "anno.h"
+#include "debug.h"
+
+#ifdef ENABLE_ANNO
+
+void anno_init()
+{
+ extern struct anno_entry __ANNO_START__[], __ANNO_END__[];
+ struct anno_entry *base;
+
+ /* Sort the tables using a slow, simple selection sort;
+ it only needs to be done once. */
+ for (base = __ANNO_START__; base < __ANNO_END__; base++)
+ {
+ struct anno_entry *cur, *low, tmp;
+
+ /* Select the lowermost remaining entry,
+ and swap it into the base slot.
+ Sort by table first, then by val1, val2, val3. */
+ low = base;
+ for (cur = base+1; cur < __ANNO_END__; cur++)
+ if ((cur->table < low->table)
+ || ((cur->table == low->table)
+ && ((cur->val1 < low->val1)
+ || ((cur->val1 == low->val1)
+ && ((cur->val2 < low->val2)
+ || ((cur->val2 == low->val2)
+ && (cur->val3 < low->val3)))))))
+ low = cur;
+ tmp = *base;
+ *base = *low;
+ *low = tmp;
+ }
+
+ /* Initialize each anno_table structure with entries in the array. */
+ for (base = __ANNO_START__; base < __ANNO_END__; )
+ {
+ struct anno_entry *end;
+
+ for (end = base;
+ (end < __ANNO_END__) && (end->table == base->table);
+ end++);
+ base->table->start = base;
+ base->table->end = end;
+
+ base = end;
+ }
+
+#if 0 /* debugging code */
+ {
+ struct anno_table *t = 0;
+
+ for (base = __ANNO_START__; base < __ANNO_END__; base++)
+ {
+ if (t != base->table)
+ {
+ t = base->table;
+ printf("table %08x: %08x-%08x (%d entries)\n",
+ t, t->start, t->end, t->end - t->start);
+ assert(t->start == base);
+ }
+ printf(" vals %08x %08x %08x\n",
+ base->table, base->val1, base->val2, base->val3);
+ }
+ }
+#endif
+}
+
+#endif ENABLE_ANNO
diff --git a/i386/util/anno.h b/i386/util/anno.h
new file mode 100644
index 00000000..67e2778a
--- /dev/null
+++ b/i386/util/anno.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_KERNEL_UTIL_ANNO_H_
+#define _I386_KERNEL_UTIL_ANNO_H_
+
+#ifndef ASSEMBLER
+
+
+struct anno_table
+{
+ struct anno_entry *start;
+ struct anno_entry *end;
+};
+
+struct anno_entry
+{
+ int val1;
+ int val2;
+ int val3;
+ struct anno_table *table;
+};
+
+
+#else /* ASSEMBLER */
+
+
+/* Create an arbitrary annotation entry.
+ Must switch back to an appropriate segment afterward. */
+#define ANNO_ENTRY(table, val1, val2, val3) \
+ .section .anno,"aw",@progbits ;\
+ .long val1,val2,val3,table
+
+/* Create an annotation entry for code in a text segment. */
+#define ANNO_TEXT(table, val2, val3) \
+9: ANNO_ENTRY(table, 9b, val2, val3) ;\
+ .text
+
+
+
+/* The following are for common annotation tables.
+ These don't have to be used in any given kernel,
+ and others can be defined as convenient. */
+
+
+/* The anno_intr table is generally accessed
+ on hardware interrupts that occur while running in kernel mode.
+ The value is a routine for the trap handler in interrupt.S
+ to jump to before processing the hardware interrupt.
+ This routine applies to all code from this address
+ up to but not including the address of the next ANNO_INTR.
+ To disable interrupt redirection for a piece of code,
+ place an ANNO_INTR(0) before it. */
+
+#define ANNO_INTR(routine) \
+ ANNO_TEXT(anno_intr, routine, 0)
+
+
+/* The anno_trap table is accessed
+ on processor traps that occur in kernel mode.
+ If a match is found in this table,
+ the specified alternate handler is run instead of the generic handler.
+ A match is found only if the EIP exactly matches an ANNO_TRAP entry
+ (i.e. these entries apply to individual instructions, not groups),
+ and if the trap type that occurred matches the type specified. */
+
+#define ANNO_TRAP(type, routine) \
+ ANNO_TEXT(anno_trap, type, routine)
+
+
+#endif /* ASSEMBLER */
+
+#endif _I386_KERNEL_UTIL_ANNO_H_
diff --git a/i386/util/cpu.h b/i386/util/cpu.h
new file mode 100644
index 00000000..d90c6f8c
--- /dev/null
+++ b/i386/util/cpu.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_UTIL_CPU_H_
+#define _I386_UTIL_CPU_H_
+
+#include <mach/machine/tss.h>
+
+#include "config.h"
+#include "gdt.h"
+#include "ldt.h"
+#include "idt.h"
+
+/*
+ * Multiprocessor i386/i486 systems use a separate copy of the
+ * GDT, IDT, LDT, and kernel TSS per processor. The first three
+ * are separate to avoid lock contention: the i386 uses locked
+ * memory cycles to access the descriptor tables. The TSS is
+ * separate since each processor needs its own kernel stack,
+ * and since using a TSS marks it busy.
+ */
+
+/* This structure holds the processor tables for this cpu. */
+struct cpu_tables
+{
+ struct i386_gate idt[IDTSZ];
+ struct i386_descriptor gdt[GDTSZ];
+#ifdef ENABLE_KERNEL_LDT
+ struct i386_descriptor ldt[LDTSZ];
+#endif
+#ifdef ENABLE_KERNEL_TSS
+ struct i386_tss tss;
+#endif
+};
+
+#include_next "cpu.h"
+
+#endif _I386_UTIL_CPU_H_
diff --git a/i386/util/cpu_subs.h b/i386/util/cpu_subs.h
new file mode 100644
index 00000000..f814d933
--- /dev/null
+++ b/i386/util/cpu_subs.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+cpu_sub(tables)
+
+#include_next "cpu_subs.h"
diff --git a/i386/util/cpu_tables_init.c b/i386/util/cpu_tables_init.c
new file mode 100644
index 00000000..48e840de
--- /dev/null
+++ b/i386/util/cpu_tables_init.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include "cpu.h"
+
+void cpu_tables_init(struct cpu *cpu)
+{
+ cpu_idt_init(cpu);
+ cpu_gdt_init(cpu);
+#ifdef ENABLE_KERNEL_LDT
+ cpu_ldt_init(cpu);
+#endif
+#ifdef ENABLE_KERNEL_TSS
+ cpu_tss_init(cpu);
+#endif
+}
+
diff --git a/i386/util/cpu_tables_load.c b/i386/util/cpu_tables_load.c
new file mode 100644
index 00000000..2237b334
--- /dev/null
+++ b/i386/util/cpu_tables_load.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/proc_reg.h>
+
+#include "cpu.h"
+#include "vm_param.h"
+
+void cpu_tables_load(struct cpu *cpu)
+{
+ struct pseudo_descriptor pdesc;
+
+ /* Load the final GDT.
+ If paging is now on,
+ then this will point the processor to the GDT
+ at its new linear address in the kernel linear space. */
+ pdesc.limit = sizeof(cpu->tables.gdt)-1;
+ pdesc.linear_base = kvtolin(&cpu->tables.gdt);
+ set_gdt(&pdesc);
+
+ /* Reload all the segment registers from the new GDT. */
+ asm volatile("
+ ljmp %0,$1f
+ 1:
+ " : : "i" (KERNEL_CS));
+ set_ds(KERNEL_DS);
+ set_es(KERNEL_DS);
+ set_fs(0);
+ set_gs(0);
+ set_ss(KERNEL_DS);
+
+ /* Load the IDT. */
+ pdesc.limit = sizeof(cpu[0].tables.idt)-1;
+ pdesc.linear_base = kvtolin(&cpu->tables.idt);
+ set_idt(&pdesc);
+
+#ifdef DEFAULT_LDT
+ /* Load the default LDT. */
+ set_ldt(DEFAULT_LDT);
+#endif
+
+#ifdef DEFAULT_TSS
+ /* Make sure it isn't marked busy. */
+ cpu->tables.gdt[DEFAULT_TSS_IDX].access &= ~ACC_TSS_BUSY;
+
+ /* Load the default TSS. */
+ set_tr(DEFAULT_TSS);
+#endif
+}
+
diff --git a/i386/util/crtn.S b/i386/util/crtn.S
new file mode 100644
index 00000000..6a891503
--- /dev/null
+++ b/i386/util/crtn.S
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#ifdef __ELF__
+
+#include <mach/machine/asm.h>
+
+
+ .section .anno,"aw",@progbits
+ .globl __ANNO_END__
+__ANNO_END__:
+
+#endif /* __ELF__ */
diff --git a/i386/util/debug.h b/i386/util/debug.h
new file mode 100644
index 00000000..15ebac52
--- /dev/null
+++ b/i386/util/debug.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _UTIL_I386_DEBUG_H_
+#define _UTIL_I386_DEBUG_H_
+
+#ifdef ASSEMBLER
+#ifdef DEBUG
+
+
+#define A(cond,a,b) \
+ a,b ;\
+ j##cond 8f ;\
+ int $0xda ;\
+8:
+
+
+#else !DEBUG
+
+
+#define A(cond,a,b)
+
+
+#endif !DEBUG
+#else !ASSEMBLER
+
+#include_next "debug.h"
+
+#endif !ASSEMBLER
+
+#endif _UTIL_I386_DEBUG_H_
diff --git a/i386/util/gdt.c b/i386/util/gdt.c
new file mode 100644
index 00000000..7bfa6aa2
--- /dev/null
+++ b/i386/util/gdt.c
@@ -0,0 +1,80 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include "cpu.h"
+#include "vm_param.h"
+
+
+/* Initialize the 32-bit kernel code and data segment descriptors
+ to point to the base of the kernel linear space region. */
+gdt_desc_initializer(KERNEL_CS,
+ kvtolin(0), 0xffffffff,
+ ACC_PL_K|ACC_CODE_R, SZ_32);
+gdt_desc_initializer(KERNEL_DS,
+ kvtolin(0), 0xffffffff,
+ ACC_PL_K|ACC_DATA_W, SZ_32);
+
+/* Initialize the 16-bit real-mode code and data segment descriptors. */
+gdt_desc_initializer(KERNEL_16_CS,
+ kvtolin(0), 0xffff,
+ ACC_PL_K|ACC_CODE_R, SZ_16);
+gdt_desc_initializer(KERNEL_16_DS,
+ kvtolin(0), 0xffff,
+ ACC_PL_K|ACC_DATA_W, SZ_16);
+
+/* Initialize the linear-space data segment descriptor. */
+gdt_desc_initializer(LINEAR_CS,
+ 0, 0xffffffff,
+ ACC_PL_K|ACC_CODE_R, SZ_32);
+gdt_desc_initializer(LINEAR_DS,
+ 0, 0xffffffff,
+ ACC_PL_K|ACC_DATA_W, SZ_32);
+
+/* Initialize the master LDT and TSS descriptors. */
+#ifdef ENABLE_KERNEL_LDT
+gdt_desc_initializer(KERNEL_LDT,
+ kvtolin(&cpu->tables.ldt), sizeof(cpu->tables.ldt)-1,
+ ACC_PL_K|ACC_LDT, 0);
+#endif
+#ifdef ENABLE_KERNEL_TSS
+gdt_desc_initializer(KERNEL_TSS,
+ kvtolin(&cpu->tables.tss), sizeof(cpu->tables.tss)-1,
+ ACC_PL_K|ACC_TSS, 0);
+#endif
+
+
+void cpu_gdt_init(struct cpu *cpu)
+{
+ /* Initialize all the selectors of the GDT. */
+#define gdt_sel(name) cpu_gdt_init_##name(cpu);
+#include "gdt_sels.h"
+#undef gdt_sel
+}
+
diff --git a/i386/util/gdt.h b/i386/util/gdt.h
new file mode 100644
index 00000000..5b422ae0
--- /dev/null
+++ b/i386/util/gdt.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_GDT_
+#define _I386_GDT_
+
+#include <mach/machine/seg.h>
+
+/*
+ * Collect and define the GDT segment selectors.
+ * xxx_IDX is the index number of the selector;
+ * xxx is the actual selector value (index * 8).
+ */
+enum gdt_idx
+{
+ GDT_NULL_IDX = 0,
+
+#define gdt_sel(name) name##_IDX,
+#include "gdt_sels.h"
+#undef gdt_sel
+
+ GDT_FIRST_FREE_IDX
+};
+
+enum gdt_sel
+{
+ GDT_NULL = 0,
+
+#define gdt_sel(name) name = name##_IDX * 8,
+#include "gdt_sels.h"
+#undef gdt_sel
+};
+
+#define GDTSZ GDT_FIRST_FREE_IDX
+
+
+/* If we have a KERNEL_TSS, use that as our DEFAULT_TSS if none is defined yet.
+ (The DEFAULT_TSS gets loaded by cpu_tables_load() upon switching to pmode.)
+ Similarly with DEFAULT_LDT. */
+#if defined(ENABLE_KERNEL_TSS) && !defined(DEFAULT_TSS)
+#define DEFAULT_TSS KERNEL_TSS
+#define DEFAULT_TSS_IDX KERNEL_TSS_IDX
+#endif
+#if defined(ENABLE_KERNEL_LDT) && !defined(DEFAULT_LDT)
+#define DEFAULT_LDT KERNEL_LDT
+#define DEFAULT_LDT_IDX KERNEL_LDT_IDX
+#endif
+
+
+/* Fill a segment descriptor in a CPU's GDT. */
+#define fill_gdt_descriptor(cpu, segment, base, limit, access, sizebits) \
+ fill_descriptor(&(cpu)->tables.gdt[segment/8], \
+ base, limit, access, sizebits)
+
+#define i16_fill_gdt_descriptor(cpu, segment, base, limit, access, sizebits) \
+ i16_fill_descriptor(&(cpu)->tables.gdt[segment/8], \
+ base, limit, access, sizebits)
+
+
+/* This automatically defines GDT descriptor initialization functions. */
+#define gdt_desc_initializer(segment, base, limit, access, sizebits) \
+ void cpu_gdt_init_##segment(struct cpu *cpu) \
+ { \
+ fill_gdt_descriptor(cpu, segment, base, limit, \
+ access, sizebits); \
+ }
+
+
+#endif _I386_GDT_
diff --git a/i386/util/gdt_sels.h b/i386/util/gdt_sels.h
new file mode 100644
index 00000000..e9b33fe5
--- /dev/null
+++ b/i386/util/gdt_sels.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include "config.h"
+
+/* Kernel segment descriptors (32-bit flat address space). */
+gdt_sel(KERNEL_CS)
+gdt_sel(KERNEL_DS)
+
+/* Corresponding 16-bit descriptors for protected-mode entry and exit. */
+gdt_sel(KERNEL_16_CS)
+gdt_sel(KERNEL_16_DS)
+
+/* Code and data segments that always maps directly to flat, linear memory. */
+gdt_sel(LINEAR_CS)
+gdt_sel(LINEAR_DS)
+
+/* Standard LDT and TSS descriptors. */
+#ifdef ENABLE_KERNEL_LDT
+gdt_sel(KERNEL_LDT)
+#endif
+#ifdef ENABLE_KERNEL_TSS
+gdt_sel(KERNEL_TSS)
+#endif
+
diff --git a/i386/util/i16/debug.h b/i386/util/i16/debug.h
new file mode 100644
index 00000000..6bce9d2c
--- /dev/null
+++ b/i386/util/i16/debug.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _UTIL_I386_I16_DEBUG_H_
+#define _UTIL_I386_I16_DEBUG_H_
+
+#include_next "debug.h"
+
+#ifdef DEBUG
+
+#define i16_assert(v) \
+ MACRO_BEGIN \
+ if (!(v)) \
+ i16_die(__FILE__":?: failed assertion `"#v"'"); \
+ MACRO_END
+
+#else /* !DEBUG */
+
+#define i16_assert(v) (0)
+
+#endif /* !DEBUG */
+
+#endif /* _UTIL_I386_I16_DEBUG_H_ */
diff --git a/i386/util/i16/i16.h b/i386/util/i16/i16.h
new file mode 100644
index 00000000..0eae59ab
--- /dev/null
+++ b/i386/util/i16/i16.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_I16_
+#define _I386_I16_
+
+#include <mach/machine/code16.h>
+
+#include "gdt.h"
+
+
+/* Macros to switch between 16-bit and 32-bit code
+ in the middle of a C function.
+ Be careful with these! */
+#define i16_switch_to_32bit() asm volatile("
+ ljmp %0,$1f
+ .code32
+ 1:
+ " : : "i" (KERNEL_CS));
+#define switch_to_16bit() asm volatile("
+ ljmp %0,$1f
+ .code16
+ 1:
+ " : : "i" (KERNEL_16_CS));
+
+
+/* From within one type of code, execute 'stmt' in the other.
+ These are safer and harder to screw up with than the above macros. */
+#define i16_do_32bit(stmt) \
+ ({ i16_switch_to_32bit(); \
+ { stmt; } \
+ switch_to_16bit(); })
+#define do_16bit(stmt) \
+ ({ switch_to_16bit(); \
+ { stmt; } \
+ i16_switch_to_32bit(); })
+
+
+#endif _I386_I16_
diff --git a/i386/util/i16/i16_die.c b/i386/util/i16/i16_die.c
new file mode 100644
index 00000000..e3cd533a
--- /dev/null
+++ b/i386/util/i16/i16_die.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/code16.h>
+
+CODE16
+
+void i16_die(char *mes)
+{
+ if (mes)
+ i16_puts(mes);
+
+ i16_exit(mes != 0);
+}
diff --git a/i386/util/i16/i16_gdt_init_temp.c b/i386/util/i16/i16_gdt_init_temp.c
new file mode 100644
index 00000000..14f2d0e4
--- /dev/null
+++ b/i386/util/i16/i16_gdt_init_temp.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/code16.h>
+#include <mach/machine/seg.h>
+
+#include "vm_param.h"
+#include "cpu.h"
+
+
+CODE16
+
+/* This 16-bit function initializes CPU 0's GDT
+ just enough to get in and out of protected (and possibly paged) mode,
+ with all addresses assuming identity-mapped memory. */
+void i16_gdt_init_temp()
+{
+ /* Create temporary kernel code and data segment descriptors.
+ (They'll be reinitialized later after paging is enabled.) */
+ i16_fill_gdt_descriptor(&cpu[0], KERNEL_CS,
+ boot_image_pa, 0xffffffff,
+ ACC_PL_K|ACC_CODE_R, SZ_32);
+ i16_fill_gdt_descriptor(&cpu[0], KERNEL_DS,
+ boot_image_pa, 0xffffffff,
+ ACC_PL_K|ACC_DATA_W, SZ_32);
+ i16_fill_gdt_descriptor(&cpu[0], KERNEL_16_CS,
+ boot_image_pa, 0xffff,
+ ACC_PL_K|ACC_CODE_R, SZ_16);
+ i16_fill_gdt_descriptor(&cpu[0], KERNEL_16_DS,
+ boot_image_pa, 0xffff,
+ ACC_PL_K|ACC_DATA_W, SZ_16);
+}
+
diff --git a/i386/util/i16/i16_nanodelay.c b/i386/util/i16/i16_nanodelay.c
new file mode 100644
index 00000000..7b23cb5b
--- /dev/null
+++ b/i386/util/i16/i16_nanodelay.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/code16.h>
+
+CODE16
+
+/* XXX */
+void i16_nanodelay(int ns)
+{
+ asm("jmp 1f; 1:");
+ asm("jmp 1f; 1:");
+ asm("jmp 1f; 1:");
+}
+
diff --git a/i386/util/i16/i16_puts.c b/i386/util/i16/i16_puts.c
new file mode 100644
index 00000000..c43a3e1e
--- /dev/null
+++ b/i386/util/i16/i16_puts.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/code16.h>
+
+CODE16
+
+void i16_puts(const char *mes)
+{
+ while (*mes)
+ i16_putchar(*mes++);
+ i16_putchar('\n');
+}
+
diff --git a/i386/util/i16/i16_writehex.c b/i386/util/i16/i16_writehex.c
new file mode 100644
index 00000000..4a1b2af3
--- /dev/null
+++ b/i386/util/i16/i16_writehex.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/code16.h>
+
+CODE16
+
+void i16_writehexdigit(unsigned char digit)
+{
+ digit &= 0xf;
+ i16_putchar(digit < 10 ? digit+'0' : digit-10+'A');
+}
+
+void i16_writehexb(unsigned char val)
+{
+ i16_writehexdigit(val >> 4);
+ i16_writehexdigit(val);
+}
+
+void i16_writehexw(unsigned short val)
+{
+ i16_writehexb(val >> 8);
+ i16_writehexb(val);
+}
+
+void i16_writehexl(unsigned long val)
+{
+ i16_writehexw(val >> 16);
+ i16_writehexw(val);
+}
+
+void i16_writehexll(unsigned long long val)
+{
+ i16_writehexl(val >> 32);
+ i16_writehexl(val);
+}
+
diff --git a/i386/util/i386_asm.sym b/i386/util/i386_asm.sym
new file mode 100644
index 00000000..9de12c38
--- /dev/null
+++ b/i386/util/i386_asm.sym
@@ -0,0 +1,36 @@
+/*
+ * MOSS - DOS extender built from the Mach 4 source tree
+ * Copyright (C) 1995-1994 Sleepless Software
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Bryan Ford
+ */
+
+#include <mach/machine/tss.h>
+
+#include "gdt.h"
+
+offset i386_tss tss esp0
+offset i386_tss tss ss
+size i386_tss tss
+
+expr KERNEL_CS
+expr KERNEL_DS
+expr KERNEL_16_CS
+expr KERNEL_16_DS
+expr LINEAR_CS
+expr LINEAR_DS
+
diff --git a/i386/util/idt.c b/i386/util/idt.c
new file mode 100644
index 00000000..234b60e3
--- /dev/null
+++ b/i386/util/idt.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/seg.h>
+#include <mach/machine/proc_reg.h>
+
+#include "vm_param.h"
+#include "cpu.h"
+
+struct idt_init_entry
+{
+ unsigned entrypoint;
+ unsigned short vector;
+ unsigned short type;
+};
+extern struct idt_init_entry idt_inittab[];
+
+void cpu_idt_init(struct cpu *cpu)
+{
+ struct idt_init_entry *iie = idt_inittab;
+
+ /* Initialize the trap/interrupt vectors from the idt_inittab. */
+ while (iie->entrypoint)
+ {
+ fill_idt_gate(cpu, iie->vector, iie->entrypoint,
+ KERNEL_CS, iie->type);
+ iie++;
+ }
+}
+
diff --git a/i386/util/idt.h b/i386/util/idt.h
new file mode 100644
index 00000000..a94019f0
--- /dev/null
+++ b/i386/util/idt.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_IDT_
+#define _I386_IDT_
+
+#include <mach/vm_param.h>
+#include <mach/machine/seg.h>
+
+#include "irq.h"
+
+/* On a standard PC, we only need 16 interrupt vectors,
+ because that's all the PIC hardware supports. */
+#ifndef IDTSZ
+#define IDTSZ (IDT_IRQ_BASE+IRQ_COUNT)
+#endif
+
+
+/* Fill a gate in a CPU's IDT. */
+#define fill_idt_gate(cpu, int_num, entry, selector, access) \
+ fill_gate(&(cpu)->tables.idt[int_num], \
+ entry, selector, access, 0)
+
+#endif _I386_IDT_
diff --git a/i386/util/idt_inittab.S b/i386/util/idt_inittab.S
new file mode 100644
index 00000000..3fc6b5c7
--- /dev/null
+++ b/i386/util/idt_inittab.S
@@ -0,0 +1,128 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the nema IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/machine/asm.h>
+#include <mach/machine/eflags.h>
+#include <mach/machine/trap.h>
+#include <mach/machine/seg.h>
+
+#include "trap.h"
+#include "idt_inittab.h"
+#include "debug.h"
+
+/*
+ * This is a default idt_inittab
+ * that simply invokes panic_trap on any trap.
+ * All gates are interrupt gates,
+ * so that interrupts will be immediately turned off.
+ */
+
+
+/*
+ * No error code. Clear error code and push trap number.
+ */
+#define EXCEPTION(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_K|ACC_INTR_GATE);\
+ENTRY(name) ;\
+ pushl $(0) ;\
+ pushl $(n) ;\
+ jmp alltraps
+
+/*
+ * User-accessible exception. Otherwise, same as above.
+ */
+#define EXCEP_USR(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_U|ACC_INTR_GATE);\
+ENTRY(name) ;\
+ pushl $(0) ;\
+ pushl $(n) ;\
+ jmp alltraps
+
+/*
+ * Error code has been pushed. Just push trap number.
+ */
+#define EXCEP_ERR(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_K|ACC_INTR_GATE);\
+ENTRY(name) ;\
+ pushl $(n) ;\
+ jmp alltraps
+
+/*
+ * Special interrupt code: dispatches to a unique entrypoint,
+ * not defined automatically here.
+ */
+#define EXCEP_SPC(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_K|ACC_INTR_GATE)
+
+
+IDT_INITTAB_BEGIN
+
+EXCEPTION(0x00,t_zero_div)
+EXCEPTION(0x01,t_debug)
+EXCEP_USR(0x03,t_int3)
+EXCEP_USR(0x04,t_into)
+EXCEP_USR(0x05,t_bounds)
+EXCEPTION(0x06,t_invop)
+EXCEPTION(0x07,t_nofpu)
+EXCEPTION(0x08,a_dbl_fault)
+EXCEPTION(0x09,a_fpu_over)
+EXCEP_ERR(0x0a,a_inv_tss)
+EXCEP_ERR(0x0b,t_segnp)
+EXCEP_ERR(0x0c,t_stack_fault)
+EXCEP_ERR(0x0d,t_gen_prot)
+EXCEP_ERR(0x0e,t_page_fault)
+EXCEPTION(0x0f,t_trap_0f)
+EXCEPTION(0x10,t_fpu_err)
+EXCEPTION(0x11,t_trap_11)
+EXCEPTION(0x12,t_trap_12)
+EXCEPTION(0x13,t_trap_13)
+EXCEPTION(0x14,t_trap_14)
+EXCEPTION(0x15,t_trap_15)
+EXCEPTION(0x16,t_trap_16)
+EXCEPTION(0x17,t_trap_17)
+EXCEPTION(0x18,t_trap_18)
+EXCEPTION(0x19,t_trap_19)
+EXCEPTION(0x1a,t_trap_1a)
+EXCEPTION(0x1b,t_trap_1b)
+EXCEPTION(0x1c,t_trap_1c)
+EXCEPTION(0x1d,t_trap_1d)
+EXCEPTION(0x1e,t_trap_1e)
+EXCEPTION(0x1f,t_trap_1f)
+
+IDT_INITTAB_END
+
+alltraps:
+ pusha
+ pushl %ds
+ pushl %es
+ pushl %fs
+ pushl %gs
+ jmp EXT(trap_handler)
+
diff --git a/i386/util/idt_inittab.h b/i386/util/idt_inittab.h
new file mode 100644
index 00000000..9cb994fa
--- /dev/null
+++ b/i386/util/idt_inittab.h
@@ -0,0 +1,57 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#ifndef _I386_UTIL_IDT_INITTAB_H_
+
+
+
+/* We'll be using macros to fill in a table in data hunk 2
+ while writing trap entrypoint routines at the same time.
+ Here's the header that comes before everything else. */
+#define IDT_INITTAB_BEGIN \
+ .data 2 ;\
+ENTRY(idt_inittab) ;\
+ .text
+
+/*
+ * Interrupt descriptor table and code vectors for it.
+ */
+#define IDT_ENTRY(n,entry,type) \
+ .data 2 ;\
+ .long entry ;\
+ .word n ;\
+ .word type ;\
+ .text
+
+/*
+ * Terminator for the end of the table.
+ */
+#define IDT_INITTAB_END \
+ .data 2 ;\
+ .long 0 ;\
+ .text
+
+
+#endif _I386_UTIL_IDT_INITTAB_H_
diff --git a/i386/util/ldt.h b/i386/util/ldt.h
new file mode 100644
index 00000000..135632fc
--- /dev/null
+++ b/i386/util/ldt.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_UTIL_LDT_
+#define _I386_UTIL_LDT_
+
+#include "config.h"
+
+/* If more-specific code wants a standard LDT,
+ then it should define ENABLE_KERNEL_LDT in config.h. */
+#ifdef ENABLE_KERNEL_LDT
+
+#include <mach/machine/seg.h>
+
+/* Fill a segment descriptor in a CPU's master LDT. */
+#define fill_ldt_descriptor(cpu, selector, base, limit, access, sizebits) \
+ fill_descriptor(&(cpu)->tables.ldt[(selector)/8], \
+ base, limit, access, sizebits)
+
+#define fill_ldt_gate(cpu, selector, offset, dest_selector, access, word_count) \
+ fill_gate((struct i386_gate*)&(cpu)->tables.ldt[(selector)/8], \
+ offset, dest_selector, access, word_count)
+
+#endif ENABLE_KERNEL_LDT
+
+#endif _I386_UTIL_LDT_
diff --git a/i386/util/trap.h b/i386/util/trap.h
new file mode 100644
index 00000000..98d586c3
--- /dev/null
+++ b/i386/util/trap.h
@@ -0,0 +1,100 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1994 The University of Utah and
+ * the Center for Software Science (CSS).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#ifndef _I386_MOSS_TRAP_H_
+#define _I386_MOSS_TRAP_H_
+
+#ifndef ASSEMBLER
+
+
+/* This structure corresponds to the state of user registers
+ as saved upon kernel trap/interrupt entry.
+ As always, it is only a default implementation;
+ a well-optimized microkernel will probably want to override it
+ with something that allows better optimization. */
+
+struct trap_state {
+
+ /* Saved segment registers */
+ unsigned int gs;
+ unsigned int fs;
+ unsigned int es;
+ unsigned int ds;
+
+ /* PUSHA register state frame */
+ unsigned int edi;
+ unsigned int esi;
+ unsigned int ebp;
+ unsigned int cr2; /* we save cr2 over esp for page faults */
+ unsigned int ebx;
+ unsigned int edx;
+ unsigned int ecx;
+ unsigned int eax;
+
+ unsigned int trapno;
+ unsigned int err;
+
+ /* Processor state frame */
+ unsigned int eip;
+ unsigned int cs;
+ unsigned int eflags;
+ unsigned int esp;
+ unsigned int ss;
+
+ /* Virtual 8086 segment registers */
+ unsigned int v86_es;
+ unsigned int v86_ds;
+ unsigned int v86_fs;
+ unsigned int v86_gs;
+};
+
+/* The actual trap_state frame pushed by the processor
+ varies in size depending on where the trap came from. */
+#define TR_KSIZE ((int)&((struct trap_state*)0)->esp)
+#define TR_USIZE ((int)&((struct trap_state*)0)->v86_es)
+#define TR_V86SIZE sizeof(struct trap_state)
+
+
+#else ASSEMBLER
+
+#include <mach/machine/asm.h>
+
+#define UNEXPECTED_TRAP \
+ movw %ss,%ax ;\
+ movw %ax,%ds ;\
+ movw %ax,%es ;\
+ movl %esp,%eax ;\
+ pushl %eax ;\
+ call EXT(trap_dump_die) ;\
+
+
+#endif ASSEMBLER
+
+#include <mach/machine/trap.h>
+
+#endif _I386_MOSS_TRAP_H_
diff --git a/i386/util/trap_asm.sym b/i386/util/trap_asm.sym
new file mode 100644
index 00000000..ef956386
--- /dev/null
+++ b/i386/util/trap_asm.sym
@@ -0,0 +1,45 @@
+/*
+ * MOSS - DOS extender built from the Mach 4 source tree
+ * Copyright (C) 1995-1994 Sleepless Software
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Bryan Ford
+ */
+
+#include "trap.h"
+
+offset trap_state tr edi
+offset trap_state tr esi
+offset trap_state tr ebp
+offset trap_state tr ebx
+offset trap_state tr edx
+offset trap_state tr ecx
+offset trap_state tr eax
+offset trap_state tr trapno
+offset trap_state tr err
+offset trap_state tr eip
+offset trap_state tr cs
+offset trap_state tr eflags
+offset trap_state tr esp
+offset trap_state tr ss
+offset trap_state tr v86_es
+offset trap_state tr v86_ds
+offset trap_state tr v86_fs
+offset trap_state tr v86_gs
+expr TR_KSIZE
+expr TR_USIZE
+expr TR_V86SIZE
+
diff --git a/i386/util/trap_dump.c b/i386/util/trap_dump.c
new file mode 100644
index 00000000..62540d58
--- /dev/null
+++ b/i386/util/trap_dump.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Center for Software Science (CSS). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSS requests users of this software to return to css-dist@cs.utah.edu any
+ * improvements that they make and grant CSS redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSS
+ */
+
+#include <mach/machine/eflags.h>
+#include <mach/machine/proc_reg.h>
+
+#include "vm_param.h"
+#include "trap.h"
+
+void trap_dump(struct trap_state *st)
+{
+ short flags;
+ int from_user = (st->cs & 3) || (st->eflags & EFL_VM);
+ unsigned *dump_sp = 0;
+ int i;
+
+ printf("Dump of trap_state at %08x:\n", st);
+ printf("EAX %08x EBX %08x ECX %08x EDX %08x\n",
+ st->eax, st->ebx, st->ecx, st->edx);
+ printf("ESI %08x EDI %08x EBP %08x ESP %08x\n",
+ st->esi, st->edi, st->ebp,
+ from_user ? st->esp : (unsigned)&st->esp);
+ printf("EIP %08x EFLAGS %08x\n", st->eip, st->eflags);
+ printf("CS %04x SS %04x DS %04x ES %04x FS %04x GS %04x\n",
+ st->cs & 0xffff, from_user ? st->ss & 0xffff : get_ss(),
+ st->ds & 0xffff, st->es & 0xffff,
+ st->fs & 0xffff, st->gs & 0xffff);
+ printf("v86: DS %04x ES %04x FS %04x GS %04x\n",
+ st->v86_ds & 0xffff, st->v86_es & 0xffff,
+ st->v86_gs & 0xffff, st->v86_gs & 0xffff);
+ printf("trapno %d, error %08x, from %s mode\n",
+ st->trapno, st->err, from_user ? "user" : "kernel");
+ if (st->trapno == T_PAGE_FAULT)
+ printf("page fault linear address %08x\n", st->cr2);
+
+ /* Dump the top of the stack too. */
+ if (!from_user)
+ {
+ for (i = 0; i < 32; i++)
+ {
+ printf("%08x%c", (&st->esp)[i],
+ ((i & 7) == 7) ? '\n' : ' ');
+ }
+ }
+}
+
diff --git a/i386/util/trap_dump_die.c b/i386/util/trap_dump_die.c
new file mode 100644
index 00000000..0407657e
--- /dev/null
+++ b/i386/util/trap_dump_die.c
@@ -0,0 +1,12 @@
+
+#include "trap.h"
+
+void trap_dump_die(struct trap_state *st)
+{
+ about_to_die(1);
+
+ trap_dump(st);
+
+ die("terminated due to trap\n");
+}
+
diff --git a/i386/util/trap_handler.S b/i386/util/trap_handler.S
new file mode 100644
index 00000000..f11ba100
--- /dev/null
+++ b/i386/util/trap_handler.S
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/asm.h>
+
+#include "trap.h"
+
+ .text
+
+ENTRY(trap_handler)
+ UNEXPECTED_TRAP
+
diff --git a/i386/util/trap_return.S b/i386/util/trap_return.S
new file mode 100644
index 00000000..b1223129
--- /dev/null
+++ b/i386/util/trap_return.S
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/asm.h>
+
+#include "trap.h"
+
+ .text
+
+ENTRY(trap_return)
+ popl %gs
+ popl %fs
+ popl %es
+ popl %ds
+ popa
+ addl $4*2,%esp
+ iret
+
+
diff --git a/i386/util/tss.c b/i386/util/tss.c
new file mode 100644
index 00000000..a9be7bfa
--- /dev/null
+++ b/i386/util/tss.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <mach/machine/tss.h>
+#include <mach/machine/proc_reg.h>
+
+#include "cpu.h"
+
+#ifdef ENABLE_KERNEL_TSS
+
+void
+cpu_tss_init(struct cpu *cpu)
+{
+ /* Only initialize once. */
+ if (!cpu->tables.tss.ss0)
+ {
+ /* Initialize the master TSS. */
+ cpu->tables.tss.ss0 = KERNEL_DS;
+ cpu->tables.tss.esp0 = get_esp(); /* only temporary */
+ cpu->tables.tss.io_bit_map_offset = sizeof(cpu->tables.tss);
+ }
+}
+
+#endif
+
diff --git a/i386/util/tss.h b/i386/util/tss.h
new file mode 100644
index 00000000..8ecabf5f
--- /dev/null
+++ b/i386/util/tss.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_KTSS_
+#define _I386_KTSS_
+
+#include <mach/machine/tss.h>
+
+extern struct i386_tss ktss;
+
+#endif _I386_KTSS_
diff --git a/i386/util/tss_dump.c b/i386/util/tss_dump.c
new file mode 100644
index 00000000..037774e8
--- /dev/null
+++ b/i386/util/tss_dump.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Center for Software Science (CSS). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSS requests users of this software to return to css-dist@cs.utah.edu any
+ * improvements that they make and grant CSS redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSS
+ */
+
+#include <mach/machine/tss.h>
+
+void tss_dump(struct i386_tss *tss)
+{
+ printf("Dump of TSS at %08x:\n", tss);
+ printf("back_link %04x\n", tss->back_link & 0xffff);
+ printf("ESP0 %08x SS0 %04x\n", tss->esp0, tss->ss0 & 0xffff);
+ printf("ESP1 %08x SS1 %04x\n", tss->esp1, tss->ss1 & 0xffff);
+ printf("ESP2 %08x SS2 %04x\n", tss->esp2, tss->ss2 & 0xffff);
+ printf("CR3 %08x\n", tss->cr3);
+ printf("EIP %08x EFLAGS %08x\n", tss->eip, tss->eflags);
+ printf("EAX %08x EBX %08x ECX %08x EDX %08x\n",
+ tss->eax, tss->ebx, tss->ecx, tss->edx);
+ printf("ESI %08x EDI %08x EBP %08x ESP %08x\n",
+ tss->esi, tss->edi, tss->ebp, tss->esp);
+ printf("CS %04x SS %04x DS %04x ES %04x FS %04x GS %04x\n",
+ tss->cs & 0xffff, tss->ss & 0xffff,
+ tss->ds & 0xffff, tss->es & 0xffff,
+ tss->fs & 0xffff, tss->gs & 0xffff);
+ printf("LDT %04x\n", tss->ldt & 0xffff);
+ printf("trace_trap %04x\n", tss->trace_trap);
+ printf("IOPB offset %04x\n", tss->io_bit_map_offset);
+}
+
diff --git a/i386/util/vm_param.h b/i386/util/vm_param.h
new file mode 100644
index 00000000..c24ed074
--- /dev/null
+++ b/i386/util/vm_param.h
@@ -0,0 +1,89 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm_param.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * I386 machine dependent virtual memory parameters.
+ * Most of the declarations are preceeded by I386_ (or i386_)
+ * which is OK because only I386 specific code will be using
+ * them.
+ */
+
+#ifndef _I386_KERNEL_UTIL_VM_PARAM_H_
+#define _I386_KERNEL_UTIL_VM_PARAM_H_
+
+#include <mach/vm_param.h>
+
+
+/* This variable is expected always to contain
+ the kernel virtual address at which physical memory is mapped.
+ It may change as paging is turned on or off. */
+extern vm_offset_t phys_mem_va;
+
+
+/* Calculate a kernel virtual address from a physical address. */
+#define phystokv(pa) ((vm_offset_t)(pa) + phys_mem_va)
+
+/* Same, but in reverse.
+ This only works for the region of kernel virtual addresses
+ that directly map physical addresses. */
+#define kvtophys(va) ((vm_offset_t)(va) - phys_mem_va)
+
+
+/* This variable contains the kernel virtual address
+ corresponding to linear address 0.
+ In the absence of paging,
+ linear addresses are always the same as physical addresses. */
+#ifndef linear_base_va
+#define linear_base_va phys_mem_va
+#endif
+
+/* Convert between linear and kernel virtual addresses. */
+#define lintokv(la) ((vm_offset_t)(la) + linear_base_va)
+#define kvtolin(va) ((vm_offset_t)(va) - linear_base_va)
+
+
+/* This variable keeps track of where in physical memory
+ our boot image was loaded.
+ It holds the physical address
+ corresponding to the boot image's virtual address 0.
+ When paging is disabled, this is simply -phys_mem_va.
+ However, when paging is enabled,
+ phys_mem_va points to the place physical memory is mapped into exec space,
+ and has no relationship to where in physical memory the boot image is.
+ Thus, this variable always contains the location of the boot image
+ whether or not paging is enabled. */
+extern vm_offset_t boot_image_pa;
+
+/* Code segment we originally had when we started in real mode.
+ Always equal to boot_image_pa >> 4. */
+extern unsigned short real_cs;
+
+
+
+#endif /* _I386_KERNEL_UTIL_VM_PARAM_H_ */
diff --git a/include/device/audio_status.h b/include/device/audio_status.h
new file mode 100644
index 00000000..26bd6c73
--- /dev/null
+++ b/include/device/audio_status.h
@@ -0,0 +1,168 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Copyright (c) 1991, 1992 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the Computer Systems
+ * Engineering Group at Lawrence Berkeley Laboratory.
+ * 4. The name of the Laboratory may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DEVICE_AUDIO_STATUS_H_
+#define _DEVICE_AUDIO_STATUS_H_
+
+/*
+ * Access to ADC devices, such as the AMD 79C30A/32A.
+ */
+
+/*
+ * Programmable gains, see tables in device drivers
+ * for detailed mapping to device specifics.
+ */
+#define AUDIO_MIN_GAIN (0)
+#define AUDIO_MAX_GAIN (255)
+
+/*
+ * Encoding of audio samples
+ */
+#define AUDIO_ENCODING_ULAW (1)
+#define AUDIO_ENCODING_ALAW (2)
+
+/*
+ * Selection of input/output jack
+ */
+#define AUDIO_MIKE 1
+
+#define AUDIO_SPEAKER 1
+#define AUDIO_HEADPHONE 2
+
+/*
+ * Programming information from/to user application.
+ * Only portions of this might be available on any given chip.
+ */
+struct audio_prinfo {
+ unsigned int sample_rate;
+ unsigned int channels;
+ unsigned int precision;
+ unsigned int encoding;
+ unsigned int gain;
+ unsigned int port; /* input/output jack */
+ unsigned int seek; /* BSD extension */
+ unsigned int ispare[3];
+ unsigned int samples;
+ unsigned int eof;
+
+ unsigned char pause;
+ unsigned char error;
+ unsigned char waiting;
+ unsigned char cspare[3];
+ unsigned char open;
+ unsigned char active;
+
+};
+
+struct audio_info {
+ struct audio_prinfo play;
+ struct audio_prinfo record;
+ unsigned int monitor_gain;
+ /* BSD extensions */
+ unsigned int blocksize; /* input blocking threshold */
+ unsigned int hiwat; /* output high water mark */
+ unsigned int lowat; /* output low water mark */
+ unsigned int backlog; /* samples of output backlog to gen. */
+};
+
+typedef struct audio_info audio_info_t;
+
+#define AUDIO_INITINFO(p)\
+ (void)memset((void *)(p), 0xff, sizeof(struct audio_info))
+
+#define AUDIO_GETINFO _IOR('A', 21, audio_info_t)
+#define AUDIO_SETINFO _IOWR('A', 22, audio_info_t)
+#define AUDIO_DRAIN _IO('A', 23)
+#define AUDIO_FLUSH _IO('A', 24)
+#define AUDIO_WSEEK _IOR('A', 25, unsigned int)
+#define AUDIO_RERROR _IOR('A', 26, int)
+#define AUDIO_WERROR _IOR('A', 27, int)
+
+/*
+ * Low level interface to the amd79c30.
+ * Internal registers of the MAP block,
+ * the Main Audio Processor.
+ */
+struct mapreg {
+ unsigned short mr_x[8];
+ unsigned short mr_r[8];
+ unsigned short mr_gx;
+ unsigned short mr_gr;
+ unsigned short mr_ger;
+ unsigned short mr_stgr;
+ unsigned short mr_ftgr;
+ unsigned short mr_atgr;
+ unsigned char mr_mmr1;
+ unsigned char mr_mmr2;
+};
+
+#define AUDIO_GETMAP _IOR('A', 27, struct mapreg)
+#define AUDIO_SETMAP _IOW('A', 28, struct mapreg)
+
+/*
+ * Compatibility with Sun interface
+ */
+struct audio_ioctl {
+ short control;
+ unsigned char data[46];
+};
+
+#define AUDIOGETREG _IOWR('i',1,struct audio_ioctl)
+#define AUDIOSETREG _IOW('i',2,struct audio_ioctl)
+
+#endif /* _DEVICE_AUDIO_STATUS_H_ */
diff --git a/include/device/bpf.h b/include/device/bpf.h
new file mode 100644
index 00000000..05cbf9bd
--- /dev/null
+++ b/include/device/bpf.h
@@ -0,0 +1,249 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * Berkeley Packet Filter Definitions from Berkeley
+ */
+
+/*-
+ * Copyright (c) 1990-1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from the Stanford/CMU enet packet filter,
+ * (net/enet.c) distributed as part of 4.3BSD, and code contributed
+ * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
+ * Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)bpf.h 7.1 (Berkeley) 5/7/91
+ *
+ */
+
+#ifndef _DEVICE_BPF_H_
+#define _DEVICE_BPF_H_
+
+#if 0 /* not used in MK now */
+/*
+ * Alignment macros. BPF_WORDALIGN rounds up to the next
+ * even multiple of BPF_ALIGNMENT.
+ */
+#define BPF_ALIGNMENT sizeof(int)
+#define BPF_WORDALIGN(x) (((x)+(BPF_ALIGNMENT-1))&~(BPF_ALIGNMENT-1))
+
+/*
+ * Struct return by BIOCVERSION. This represents the version number of
+ * the filter language described by the instruction encodings below.
+ * bpf understands a program iff kernel_major == filter_major &&
+ * kernel_minor >= filter_minor, that is, if the value returned by the
+ * running kernel has the same major number and a minor number equal
+ * equal to or less than the filter being downloaded. Otherwise, the
+ * results are undefined, meaning an error may be returned or packets
+ * may be accepted haphazardly.
+ * It has nothing to do with the source code version.
+ */
+struct bpf_version {
+ u_short bv_major;
+ u_short bv_minor;
+};
+/* Current version number. */
+#define BPF_MAJOR_VERSION 1
+#define BPF_MINOR_VERSION 1
+
+/*
+ * Data-link level type codes.
+ * Currently, only DLT_EN10MB and DLT_SLIP are supported.
+ */
+#define DLT_NULL 0 /* no link-layer encapsulation */
+#define DLT_EN10MB 1 /* Ethernet (10Mb) */
+#define DLT_EN3MB 2 /* Experimental Ethernet (3Mb) */
+#define DLT_AX25 3 /* Amateur Radio AX.25 */
+#define DLT_PRONET 4 /* Proteon ProNET Token Ring */
+#define DLT_CHAOS 5 /* Chaos */
+#define DLT_IEEE802 6 /* IEEE 802 Networks */
+#define DLT_ARCNET 7 /* ARCNET */
+#define DLT_SLIP 8 /* Serial Line IP */
+#define DLT_PPP 9 /* Point-to-point Protocol */
+#define DLT_FDDI 10 /* FDDI */
+
+#endif /* 0 */
+
+/*
+ * The instruction encondings.
+ */
+
+/* Magic number for the first instruction */
+#define BPF_BEGIN NETF_BPF
+
+/* instruction classes */
+#define BPF_CLASS(code) ((code) & 0x07)
+#define BPF_LD 0x00
+#define BPF_LDX 0x01
+#define BPF_ST 0x02
+#define BPF_STX 0x03
+#define BPF_ALU 0x04
+#define BPF_JMP 0x05
+#define BPF_RET 0x06
+#define BPF_MISC 0x07
+
+/* ld/ldx fields */
+#define BPF_SIZE(code) ((code) & 0x18)
+#define BPF_W 0x00
+#define BPF_H 0x08
+#define BPF_B 0x10
+#define BPF_MODE(code) ((code) & 0xe0)
+#define BPF_IMM 0x00
+#define BPF_ABS 0x20
+#define BPF_IND 0x40
+#define BPF_MEM 0x60
+#define BPF_LEN 0x80
+#define BPF_MSH 0xa0
+
+/* alu/jmp fields */
+#define BPF_OP(code) ((code) & 0xf0)
+#define BPF_ADD 0x00
+#define BPF_SUB 0x10
+#define BPF_MUL 0x20
+#define BPF_DIV 0x30
+#define BPF_OR 0x40
+#define BPF_AND 0x50
+#define BPF_LSH 0x60
+#define BPF_RSH 0x70
+#define BPF_NEG 0x80
+#define BPF_JA 0x00
+#define BPF_JEQ 0x10
+#define BPF_JGT 0x20
+#define BPF_JGE 0x30
+#define BPF_JSET 0x40
+#define BPF_CKMATCH_IMM 0x50
+#define BPF_SRC(code) ((code) & 0x08)
+#define BPF_K 0x00
+#define BPF_X 0x08
+
+/* ret - BPF_K and BPF_X also apply */
+#define BPF_RVAL(code) ((code) & 0x38)
+#define BPF_A 0x10
+#define BPF_MATCH_IMM 0x18
+#define BPF_MATCH_DATA 0x20
+
+/* misc */
+#define BPF_MISCOP(code) ((code) & 0xf8)
+#define BPF_TAX 0x00
+#define BPF_TXA 0x80
+#define BPF_KEY 0x10
+#define BPF_REG_DATA 0x18
+#define BPF_POSTPONE 0x20
+
+/*
+ * The instruction data structure.
+ */
+struct bpf_insn {
+ unsigned short code;
+ unsigned char jt;
+ unsigned char jf;
+ int k;
+};
+typedef struct bpf_insn *bpf_insn_t;
+
+/*
+ * largest bpf program size
+ */
+#define NET_MAX_BPF ((NET_MAX_FILTER*sizeof(filter_t))/sizeof(struct bpf_insn))
+
+/*
+ * Macros for insn array initializers.
+ */
+#define BPF_STMT(code, k) { (unsigned short)(code), 0, 0, k }
+#define BPF_JUMP(code, k, jt, jf) { (unsigned short)(code), jt, jf, k }
+#define BPF_RETMATCH(code, k, nkey) { (unsigned short)(code), nkey, 0, k }
+
+#define BPF_INSN_STMT(pc, c, n) {\
+ (pc)->code = (c); \
+ (pc)->jt = (pc)->jf = 0; \
+ (pc)->k = (n); \
+ (pc)++; \
+}
+
+#define BPF_INSN_JUMP(pc, c, n, jtrue, jfalse) {\
+ (pc)->code = (c); \
+ (pc)->jt = (jtrue); \
+ (pc)->jf = (jfalse); \
+ (pc)->k = (n); \
+ (pc)++; \
+}
+
+#define BPF_INSN_RETMATCH(pc, c, n, nkey) {\
+ (pc)->code = (c); \
+ (pc)->jt = (nkey); \
+ (pc)->jf = 0; \
+ (pc)->k = (n); \
+ (pc)++; \
+}
+
+/*
+ * Number of scratch memory words (for BPF_LD|BPF_MEM and BPF_ST).
+ */
+#define BPF_MEMWORDS 16
+
+/*
+ * Link level header can be accessed by adding BPF_DLBASE to an offset.
+ */
+#define BPF_DLBASE (1<<30)
+
+#define BPF_BYTES(n) ((n) * sizeof (struct bpf_insn))
+#define BPF_BYTES2LEN(n) ((n) / sizeof (struct bpf_insn))
+#define BPF_INSN_EQ(p,q) ((p)->code == (q)->code && \
+ (p)->jt == (q)->jt && \
+ (p)->jf == (q)->jf && \
+ (p)->k == (q)->k)
+
+#endif /* _DEVICE_BPF_H_ */
diff --git a/include/device/device.defs b/include/device/device.defs
new file mode 100644
index 00000000..2bbd5563
--- /dev/null
+++ b/include/device/device.defs
@@ -0,0 +1,151 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: device/device.defs
+ * Author: Douglas Orr
+ * Feb 10, 1988
+ * Abstract:
+ * Mach device support. Mach devices are accessed through
+ * block and character device interfaces to the kernel.
+ */
+
+#ifdef MACH_KERNEL
+simport <kern/compat_xxx_defs.h>; /* for obsolete routines */
+#endif
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif
+ device 2800;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+#include <device/device_types.defs>
+
+serverprefix ds_;
+
+type reply_port_t = MACH_MSG_TYPE_MAKE_SEND_ONCE | polymorphic
+ ctype: mach_port_t;
+
+routine device_open(
+ master_port : mach_port_t;
+ sreplyport reply_port : reply_port_t;
+ mode : dev_mode_t;
+ name : dev_name_t;
+ out device : device_t
+ );
+
+routine device_close(
+ device : device_t
+ );
+
+routine device_write(
+ device : device_t;
+ sreplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in data : io_buf_ptr_t;
+ out bytes_written : int
+ );
+
+routine device_write_inband(
+ device : device_t;
+ sreplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in data : io_buf_ptr_inband_t;
+ out bytes_written : int
+ );
+
+routine device_read(
+ device : device_t;
+ sreplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in bytes_wanted : int;
+ out data : io_buf_ptr_t
+ );
+
+routine device_read_inband(
+ device : device_t;
+ sreplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in bytes_wanted : int;
+ out data : io_buf_ptr_inband_t
+ );
+
+/* obsolete */
+routine xxx_device_set_status(
+ device : device_t;
+ in flavor : dev_flavor_t;
+ in status : dev_status_t, IsLong
+ );
+
+/* obsolete */
+routine xxx_device_get_status(
+ device : device_t;
+ in flavor : dev_flavor_t;
+ out status : dev_status_t, IsLong
+ );
+
+/* obsolete */
+routine xxx_device_set_filter(
+ device : device_t;
+ in receive_port : mach_port_send_t;
+ in priority : int;
+ in filter : filter_array_t, IsLong
+ );
+
+routine device_map(
+ device : device_t;
+ in prot : vm_prot_t;
+ in offset : vm_offset_t;
+ in size : vm_size_t;
+ out pager : memory_object_t;
+ in unmap : int
+ );
+
+routine device_set_status(
+ device : device_t;
+ in flavor : dev_flavor_t;
+ in status : dev_status_t
+ );
+
+routine device_get_status(
+ device : device_t;
+ in flavor : dev_flavor_t;
+ out status : dev_status_t, CountInOut
+ );
+
+routine device_set_filter(
+ device : device_t;
+ in receive_port : mach_port_send_t;
+ in priority : int;
+ in filter : filter_array_t
+ );
+
diff --git a/include/device/device_reply.defs b/include/device/device_reply.defs
new file mode 100644
index 00000000..03c17f5d
--- /dev/null
+++ b/include/device/device_reply.defs
@@ -0,0 +1,104 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ *
+ * Reply-only side of device interface.
+ */
+
+subsystem
+#if KERNEL_USER
+ KernelUser
+#endif
+ device_reply 2900;
+ /* to match reply numbers for device.defs */
+
+/*
+ * Device_write_reply (only user of this data type) deallocates
+ * the data.
+ */
+
+
+#include <mach/std_types.defs>
+#include <device/device_types.defs>
+
+userprefix ds_;
+
+#if SEQNOS
+serverprefix seqnos_;
+serverdemux seqnos_device_reply_server;
+#endif SEQNOS
+
+type reply_port_t = polymorphic|MACH_MSG_TYPE_PORT_SEND_ONCE
+ ctype: mach_port_t;
+
+simpleroutine device_open_reply(
+ reply_port : reply_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ in return_code : kern_return_t;
+ in device_port : mach_port_make_send_t
+ );
+
+skip; /* device_close */
+
+simpleroutine device_write_reply(
+ reply_port : reply_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ in return_code : kern_return_t;
+ in bytes_written : int
+ );
+
+simpleroutine device_write_reply_inband(
+ reply_port : reply_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ in return_code : kern_return_t;
+ in bytes_written : int
+ );
+
+simpleroutine device_read_reply(
+ reply_port : reply_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ in return_code : kern_return_t;
+ in data : io_buf_ptr_t, dealloc
+ );
+
+simpleroutine device_read_reply_inband(
+ reply_port : reply_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ in return_code : kern_return_t;
+ in data : io_buf_ptr_inband_t
+ );
diff --git a/include/device/device_request.defs b/include/device/device_request.defs
new file mode 100644
index 00000000..e8aab2a6
--- /dev/null
+++ b/include/device/device_request.defs
@@ -0,0 +1,81 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 8/89
+ *
+ * Request-only side of device interface.
+ */
+
+subsystem device_request 2800; /* to match device.defs */
+
+#include <device/device_types.defs>
+
+serverprefix ds_;
+
+type reply_port_t = MACH_MSG_TYPE_MAKE_SEND_ONCE
+ ctype: mach_port_t;
+
+simpleroutine device_open_request(
+ device_server_port : mach_port_t;
+ ureplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in name : dev_name_t
+ );
+
+skip; /* device_close */
+
+simpleroutine device_write_request(
+ device : device_t;
+ ureplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in data : io_buf_ptr_t
+ );
+
+simpleroutine device_write_request_inband(
+ device : device_t;
+ ureplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in data : io_buf_ptr_inband_t
+ );
+
+simpleroutine device_read_request(
+ device : device_t;
+ ureplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in bytes_wanted : int
+ );
+
+simpleroutine device_read_request_inband(
+ device : device_t;
+ ureplyport reply_port : reply_port_t;
+ in mode : dev_mode_t;
+ in recnum : recnum_t;
+ in bytes_wanted : int
+ );
diff --git a/include/device/device_types.defs b/include/device/device_types.defs
new file mode 100644
index 00000000..c5d8e9d5
--- /dev/null
+++ b/include/device/device_types.defs
@@ -0,0 +1,64 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/89
+ *
+ * Common definitions for device interface types.
+ */
+
+#ifndef _DEVICE_DEVICE_TYPES_DEFS_
+#define _DEVICE_DEVICE_TYPES_DEFS_
+
+/*
+ * Basic types
+ */
+
+#include <mach/std_types.defs>
+
+type recnum_t = unsigned32;
+type dev_mode_t = unsigned32;
+type dev_flavor_t = unsigned32;
+type dev_name_t = (MACH_MSG_TYPE_STRING_C, 8*128);
+type dev_status_t = array[*:1024] of int;
+type io_buf_ptr_t = ^array[] of MACH_MSG_TYPE_INTEGER_8;
+type io_buf_ptr_inband_t= array[*:128] of char;
+type filter_t = short;
+type filter_array_t = array[*:128] of filter_t;
+
+type device_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: device_t dev_port_lookup(mach_port_t)
+ outtran: mach_port_t convert_device_to_port(device_t)
+ destructor: device_deallocate(device_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+import <device/device_types.h>;
+import <device/net_status.h>;
+
+#endif _DEVICE_DEVICE_TYPES_DEFS_
diff --git a/include/device/device_types.h b/include/device/device_types.h
new file mode 100644
index 00000000..d02b8f1d
--- /dev/null
+++ b/include/device/device_types.h
@@ -0,0 +1,133 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/89
+ */
+
+#ifndef DEVICE_TYPES_H
+#define DEVICE_TYPES_H
+
+/*
+ * Types for device interface.
+ */
+#include <mach/std_types.h>
+
+#ifdef MACH_KERNEL
+/*
+ * Get kernel-only type definitions.
+ */
+#include <device/device_types_kernel.h>
+
+#else /* MACH_KERNEL */
+/*
+ * Device handle.
+ */
+typedef mach_port_t device_t;
+
+#endif /* MACH_KERNEL */
+
+/*
+ * Device name string
+ */
+typedef char dev_name_t[128]; /* must match device_types.defs */
+
+/*
+ * Mode for open/read/write
+ */
+typedef unsigned int dev_mode_t;
+#define D_READ 0x1 /* read */
+#define D_WRITE 0x2 /* write */
+#define D_NODELAY 0x4 /* no delay on open */
+#define D_NOWAIT 0x8 /* do not wait if data not available */
+
+/*
+ * IO buffer - out-of-line array of characters.
+ */
+typedef char * io_buf_ptr_t;
+
+/*
+ * IO buffer - in-line array of characters.
+ */
+#define IO_INBAND_MAX (128) /* must match device_types.defs */
+typedef char io_buf_ptr_inband_t[IO_INBAND_MAX];
+
+/*
+ * IO buffer vector - for scatter/gather IO.
+ */
+typedef struct {
+ vm_offset_t data;
+ vm_size_t count;
+} io_buf_vec_t;
+
+/*
+ * Record number for random-access devices
+ */
+typedef unsigned int recnum_t;
+
+/*
+ * Flavors of set/get statuses
+ */
+typedef unsigned int dev_flavor_t;
+
+/*
+ * Generic array for get/set status
+ */
+typedef int *dev_status_t; /* Variable-length array of integers */
+#define DEV_STATUS_MAX (1024) /* Maximum array size */
+
+typedef int dev_status_data_t[DEV_STATUS_MAX];
+
+/*
+ * Mandatory get/set status operations
+ */
+
+/* size a device: op code and indexes for returned values */
+#define DEV_GET_SIZE 0
+# define DEV_GET_SIZE_DEVICE_SIZE 0 /* 0 if unknown */
+# define DEV_GET_SIZE_RECORD_SIZE 1 /* 1 if sequential */
+#define DEV_GET_SIZE_COUNT 2
+
+/*
+ * Device error codes
+ */
+typedef int io_return_t;
+
+#define D_IO_QUEUED (-1) /* IO queued - do not return result */
+#define D_SUCCESS 0
+
+#define D_IO_ERROR 2500 /* hardware IO error */
+#define D_WOULD_BLOCK 2501 /* would block, but D_NOWAIT set */
+#define D_NO_SUCH_DEVICE 2502 /* no such device */
+#define D_ALREADY_OPEN 2503 /* exclusive-use device already open */
+#define D_DEVICE_DOWN 2504 /* device has been shut down */
+#define D_INVALID_OPERATION 2505 /* bad operation for device */
+#define D_INVALID_RECNUM 2506 /* invalid record (block) number */
+#define D_INVALID_SIZE 2507 /* invalid IO size */
+#define D_NO_MEMORY 2508 /* memory allocation failure */
+#define D_READ_ONLY 2509 /* device cannot be written to */
+
+#endif DEVICE_TYPES_H
diff --git a/include/device/disk_status.h b/include/device/disk_status.h
new file mode 100644
index 00000000..868e06ef
--- /dev/null
+++ b/include/device/disk_status.h
@@ -0,0 +1,306 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Copyright (c) 1987, 1988 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the above copyright notice and this paragraph are
+ * duplicated in all such forms and that any documentation,
+ * advertising materials, and other materials related to such
+ * distribution and use acknowledge that the software was developed
+ * by the University of California, Berkeley. The name of the
+ * University may not be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * @(#)disklabel.h 7.10 (Berkeley) 6/27/88
+ */
+
+#ifndef _DISK_STATUS_H_
+#define _DISK_STATUS_H_
+
+/*
+ * Each disk has a label which includes information about the hardware
+ * disk geometry, filesystem partitions, and drive specific information.
+ * The label is in block 0 or 1, possibly offset from the beginning
+ * to leave room for a bootstrap, etc.
+ */
+
+#define LABELSECTOR 0 /* sector containing label */
+#define LABELOFFSET 64 /* offset of label in sector */
+#define DISKMAGIC ((unsigned int) 0x82564557U) /* The disk magic number */
+#ifndef MAXPARTITIONS
+#define MAXPARTITIONS 8
+#endif
+
+
+#ifndef LOCORE
+struct disklabel {
+ unsigned int d_magic; /* the magic number */
+ short d_type; /* drive type */
+ short d_subtype; /* controller/d_type specific */
+ char d_typename[16]; /* type name, e.g. "eagle" */
+ /*
+ * d_packname contains the pack identifier and is returned when
+ * the disklabel is read off the disk or in-core copy.
+ * d_boot0 and d_boot1 are the (optional) names of the
+ * primary (block 0) and secondary (block 1-15) bootstraps
+ * as found in /usr/mdec. These are returned when using
+ * getdiskbyname(3) to retrieve the values from /etc/disktab.
+ */
+#if defined(MACH_KERNEL) || defined(STANDALONE)
+ char d_packname[16]; /* pack identifier */
+#else
+ union {
+ char un_d_packname[16]; /* pack identifier */
+ struct {
+ char *un_d_boot0; /* primary bootstrap name */
+ char *un_d_boot1; /* secondary bootstrap name */
+ } un_b;
+ } d_un;
+#define d_packname d_un.un_d_packname
+#define d_boot0 d_un.un_b.un_d_boot0
+#define d_boot1 d_un.un_b.un_d_boot1
+#endif /* ! MACH_KERNEL or STANDALONE */
+ /* disk geometry: */
+ unsigned int d_secsize; /* # of bytes per sector */
+ unsigned int d_nsectors; /* # of data sectors per track */
+ unsigned int d_ntracks; /* # of tracks per cylinder */
+ unsigned int d_ncylinders; /* # of data cylinders per unit */
+ unsigned int d_secpercyl; /* # of data sectors per cylinder */
+ unsigned int d_secperunit; /* # of data sectors per unit */
+ /*
+ * Spares (bad sector replacements) below
+ * are not counted in d_nsectors or d_secpercyl.
+ * Spare sectors are assumed to be physical sectors
+ * which occupy space at the end of each track and/or cylinder.
+ */
+ unsigned short d_sparespertrack; /* # of spare sectors per track */
+ unsigned short d_sparespercyl; /* # of spare sectors per cylinder */
+ /*
+ * Alternate cylinders include maintenance, replacement,
+ * configuration description areas, etc.
+ */
+ unsigned int d_acylinders; /* # of alt. cylinders per unit */
+
+ /* hardware characteristics: */
+ /*
+ * d_interleave, d_trackskew and d_cylskew describe perturbations
+ * in the media format used to compensate for a slow controller.
+ * Interleave is physical sector interleave, set up by the formatter
+ * or controller when formatting. When interleaving is in use,
+ * logically adjacent sectors are not physically contiguous,
+ * but instead are separated by some number of sectors.
+ * It is specified as the ratio of physical sectors traversed
+ * per logical sector. Thus an interleave of 1:1 implies contiguous
+ * layout, while 2:1 implies that logical sector 0 is separated
+ * by one sector from logical sector 1.
+ * d_trackskew is the offset of sector 0 on track N
+ * relative to sector 0 on track N-1 on the same cylinder.
+ * Finally, d_cylskew is the offset of sector 0 on cylinder N
+ * relative to sector 0 on cylinder N-1.
+ */
+ unsigned short d_rpm; /* rotational speed */
+ unsigned short d_interleave; /* hardware sector interleave */
+ unsigned short d_trackskew; /* sector 0 skew, per track */
+ unsigned short d_cylskew; /* sector 0 skew, per cylinder */
+ unsigned int d_headswitch; /* head switch time, usec */
+ unsigned int d_trkseek; /* track-to-track seek, usec */
+ unsigned int d_flags; /* generic flags */
+#define NDDATA 5
+ unsigned int d_drivedata[NDDATA]; /* drive-type specific information */
+#define NSPARE 5
+ unsigned int d_spare[NSPARE]; /* reserved for future use */
+ unsigned int d_magic2; /* the magic number (again) */
+ unsigned short d_checksum; /* xor of data incl. partitions */
+
+ /* filesystem and partition information: */
+ unsigned short d_npartitions; /* number of partitions in following */
+ unsigned int d_bbsize; /* size of boot area at sn0, bytes */
+ unsigned int d_sbsize; /* max size of fs superblock, bytes */
+ struct partition { /* the partition table */
+ unsigned int p_size; /* number of sectors in partition */
+ unsigned int p_offset; /* starting sector */
+ unsigned int p_fsize; /* filesystem basic fragment size */
+ unsigned char p_fstype; /* filesystem type, see below */
+ unsigned char p_frag; /* filesystem fragments per block */
+ unsigned short p_cpg; /* filesystem cylinders per group */
+ } d_partitions[MAXPARTITIONS+1]; /* actually may be more */
+
+#if defined(alpha) && defined(MACH_KERNEL)
+ /*
+ * Disgusting hack. If this structure contains a pointer,
+ * as it does for non-kernel, then the compiler rounds
+ * the size to make it pointer-sized properly (arrays of..).
+ * But if I define the pointer for the kernel then instances
+ * of this structure better be aligned otherwise picking
+ * up a short might be done by too-smart compilers (GCC) with
+ * a load-long instruction expecting the short to be aligned.
+ * I bet the OSF folks stomped into this too, since they use
+ * the same disgusting hack below.. [whatelse can I do ??]
+ */
+ int bugfix;
+#endif
+};
+#else LOCORE
+ /*
+ * offsets for asm boot files.
+ */
+ .set d_secsize,40
+ .set d_nsectors,44
+ .set d_ntracks,48
+ .set d_ncylinders,52
+ .set d_secpercyl,56
+ .set d_secperunit,60
+ .set d_end_,276 /* size of disk label */
+#endif LOCORE
+
+/* d_type values: */
+#define DTYPE_SMD 1 /* SMD, XSMD; VAX hp/up */
+#define DTYPE_MSCP 2 /* MSCP */
+#define DTYPE_DEC 3 /* other DEC (rk, rl) */
+#define DTYPE_SCSI 4 /* SCSI */
+#define DTYPE_ESDI 5 /* ESDI interface */
+#define DTYPE_ST506 6 /* ST506 etc. */
+#define DTYPE_FLOPPY 10 /* floppy */
+
+#ifdef DKTYPENAMES
+static char *dktypenames[] = {
+ "unknown",
+ "SMD",
+ "MSCP",
+ "old DEC",
+ "SCSI",
+ "ESDI",
+ "type 6",
+ "type 7",
+ "type 8",
+ "type 9",
+ "floppy",
+ 0
+};
+#define DKMAXTYPES (sizeof(dktypenames) / sizeof(dktypenames[0]) - 1)
+#endif
+
+/*
+ * Filesystem type and version.
+ * Used to interpret other filesystem-specific
+ * per-partition information.
+ */
+#define FS_UNUSED 0 /* unused */
+#define FS_SWAP 1 /* swap */
+#define FS_V6 2 /* Sixth Edition */
+#define FS_V7 3 /* Seventh Edition */
+#define FS_SYSV 4 /* System V */
+#define FS_V71K 5 /* V7 with 1K blocks (4.1, 2.9) */
+#define FS_V8 6 /* Eighth Edition, 4K blocks */
+#define FS_BSDFFS 7 /* 4.2BSD fast file system */
+#define FS_LINUXFS 8 /* Linux file system */
+
+#ifdef DKTYPENAMES
+static char *fstypenames[] = {
+ "unused",
+ "swap",
+ "Version 6",
+ "Version 7",
+ "System V",
+ "4.1BSD",
+ "Eighth Edition",
+ "4.2BSD",
+ "Linux",
+ 0
+};
+#define FSMAXTYPES (sizeof(fstypenames) / sizeof(fstypenames[0]) - 1)
+#endif
+
+/*
+ * flags shared by various drives:
+ */
+#define D_REMOVABLE 0x01 /* removable media */
+#define D_ECC 0x02 /* supports ECC */
+#define D_BADSECT 0x04 /* supports bad sector forw. */
+#define D_RAMDISK 0x08 /* disk emulator */
+#define D_CHAIN 0x10 /* can do back-back transfers */
+
+/*
+ * Drive data for SMD.
+ */
+#define d_smdflags d_drivedata[0]
+#define D_SSE 0x1 /* supports skip sectoring */
+#define d_mindist d_drivedata[1]
+#define d_maxdist d_drivedata[2]
+#define d_sdist d_drivedata[3]
+
+/*
+ * Drive data for ST506.
+ */
+#define d_precompcyl d_drivedata[0]
+#define d_gap3 d_drivedata[1] /* used only when formatting */
+
+/*
+ * IBM controller info (d_precompcyl used, too)
+ */
+#define d_step d_drivedata[2]
+
+#ifndef LOCORE
+/*
+ * Structure used to perform a format
+ * or other raw operation, returning data
+ * and/or register values.
+ * Register identification and format
+ * are device- and driver-dependent.
+ */
+struct format_op {
+ char *df_buf;
+ int df_count; /* value-result */
+ recnum_t df_startblk;
+ int df_reg[8]; /* result */
+};
+
+/*
+ * Disk-specific ioctls.
+ */
+ /* get and set disklabel; DIOCGPART used internally */
+#define DIOCGDINFO _IOR('d', 101, struct disklabel)/* get */
+#define DIOCSDINFO _IOW('d', 102, struct disklabel)/* set */
+#define DIOCWDINFO _IOW('d', 103, struct disklabel)/* set, update disk */
+
+/* do format operation, read or write */
+#define DIOCRFORMAT _IOWR('d', 105, struct format_op)
+#define DIOCWFORMAT _IOWR('d', 106, struct format_op)
+
+#define DIOCSSTEP _IOW('d', 107, int) /* set step rate */
+#define DIOCSRETRIES _IOW('d', 108, int) /* set # of retries */
+#define DIOCWLABEL _IOW('d', 109, int) /* write en/disable label */
+
+#define DIOCSBAD _IOW('d', 110, struct dkbad) /* set kernel dkbad */
+
+#endif LOCORE
+
+#endif /* _DISK_STATUS_H_ */
diff --git a/include/device/net_status.h b/include/device/net_status.h
new file mode 100644
index 00000000..2b29b327
--- /dev/null
+++ b/include/device/net_status.h
@@ -0,0 +1,187 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 3/89
+ *
+ * Status information for network interfaces.
+ */
+
+#ifndef _DEVICE_NET_STATUS_H_
+#define _DEVICE_NET_STATUS_H_
+
+#include <device/device_types.h>
+#include <mach/message.h>
+
+/*
+ * General interface status
+ */
+struct net_status {
+ int min_packet_size; /* minimum size, including header */
+ int max_packet_size; /* maximum size, including header */
+ int header_format; /* format of network header */
+ int header_size; /* size of network header */
+ int address_size; /* size of network address */
+ int flags; /* interface status */
+ int mapped_size; /* if mappable, virtual mem needed */
+};
+#define NET_STATUS_COUNT (sizeof(struct net_status)/sizeof(int))
+#define NET_STATUS (('n'<<16) + 1)
+
+/*
+ * Header formats, as given by RFC 826/1010 for ARP:
+ */
+#define HDR_ETHERNET 1 /* Ethernet hardware address */
+#define HDR_EXP_ETHERNET 2 /* 3Mhz experimental Ethernet
+ hardware address */
+#define HDR_PRO_NET 4 /* Proteon ProNET Token Ring */
+#define HDR_CHAOS 5 /* Chaosnet */
+#define HDR_802 6 /* IEEE 802 networks */
+
+
+/*
+ * A network address is an array of bytes. In order to return
+ * this in an array of (long) integers, it is returned in net order.
+ * Use 'ntohl' on each element of the array to retrieve the original
+ * ordering.
+ */
+#define NET_ADDRESS (('n'<<16) + 2)
+
+#define NET_DSTADDR (('n'<<16) + 3)
+
+
+/*
+ * Input packet filter definition
+ */
+#define NET_MAX_FILTER 128 /* was 64, bpf programs are big */
+#define NET_FILTER_STACK_DEPTH 32
+
+/*
+ * We allow specification of up to NET_MAX_FILTER (short) words of a filter
+ * command list to be applied to incoming packets to determine if
+ * those packets should be given to a particular network input filter.
+ *
+ * Each network filter specifies the filter command list via net_add_filter.
+ * Each filter command list specifies a sequences of actions which leave a
+ * boolean value on the top of an internal stack. Each word of the
+ * command list specifies an action from the set {PUSHLIT, PUSHZERO,
+ * PUSHWORD+N} which respectively push the next word of the filter, zero,
+ * or word N of the incoming packet on the stack, and a binary operator
+ * from the set {EQ, LT, LE, GT, GE, AND, OR, XOR} which operates on the
+ * top two elements of the stack and replaces them with its result. The
+ * special action NOPUSH and the special operator NOP can be used to only
+ * perform the binary operation or to only push a value on the stack.
+ *
+ * If the final value of the filter operation is true, then the packet is
+ * accepted for the filter.
+ *
+ */
+
+typedef unsigned short filter_t;
+typedef filter_t *filter_array_t;
+
+#define CSPF_BYTES(n) ((n) * sizeof (filter_t))
+
+/* these must sum to 16! */
+#define NETF_NBPA 10 /* # bits / argument */
+#define NETF_NBPO 6 /* # bits / operator */
+
+#define NETF_ARG(word) ((word) & 0x3ff)
+#define NETF_OP(word) (((word)>>NETF_NBPA)&0x3f)
+
+/* binary operators */
+#define NETF_NOP (0<<NETF_NBPA)
+#define NETF_EQ (1<<NETF_NBPA)
+#define NETF_LT (2<<NETF_NBPA)
+#define NETF_LE (3<<NETF_NBPA)
+#define NETF_GT (4<<NETF_NBPA)
+#define NETF_GE (5<<NETF_NBPA)
+#define NETF_AND (6<<NETF_NBPA)
+#define NETF_OR (7<<NETF_NBPA)
+#define NETF_XOR (8<<NETF_NBPA)
+#define NETF_COR (9<<NETF_NBPA)
+#define NETF_CAND (10<<NETF_NBPA)
+#define NETF_CNOR (11<<NETF_NBPA)
+#define NETF_CNAND (12<<NETF_NBPA)
+#define NETF_NEQ (13<<NETF_NBPA)
+#define NETF_LSH (14<<NETF_NBPA)
+#define NETF_RSH (15<<NETF_NBPA)
+#define NETF_ADD (16<<NETF_NBPA)
+#define NETF_SUB (17<<NETF_NBPA)
+#define NETF_BPF (((1 << NETF_NBPO) - 1) << NETF_NBPA)
+
+
+/* stack arguments */
+#define NETF_NOPUSH 0 /* don`t push */
+#define NETF_PUSHLIT 1 /* next word in filter */
+#define NETF_PUSHZERO 2 /* 0 */
+#define NETF_PUSHIND 14 /* word indexed by stack top */
+#define NETF_PUSHHDRIND 15 /* header word indexed by stack top */
+#define NETF_PUSHWORD 16 /* word 0 .. 944 in packet */
+#define NETF_PUSHHDR 960 /* word 0 .. 31 in header */
+#define NETF_PUSHSTK 992 /* word 0 .. 31 in stack */
+
+/* priorities */
+#define NET_HI_PRI 100
+#define NET_PRI_MAX 255
+
+/*
+ * BPF support.
+ */
+#include <device/bpf.h>
+
+/*
+ * Net receive message format.
+ *
+ * The header and data are packaged separately, since some hardware
+ * supports variable-length headers. We prefix the packet with
+ * a packet_hdr structure so that the real data portion begins
+ * on a long-word boundary, and so that packet filters can address
+ * the type field and packet size uniformly.
+ */
+#define NET_RCV_MAX 4095
+#define NET_HDW_HDR_MAX 64
+
+#define NET_RCV_MSG_ID 2999 /* in device.defs reply range */
+
+struct packet_header {
+ unsigned short length;
+ unsigned short type; /* network order */
+};
+
+struct net_rcv_msg {
+ mach_msg_header_t msg_hdr;
+ mach_msg_type_t header_type;
+ char header[NET_HDW_HDR_MAX];
+ mach_msg_type_t packet_type;
+ char packet[NET_RCV_MAX];
+};
+typedef struct net_rcv_msg *net_rcv_msg_t;
+#define net_rcv_msg_packet_count packet_type.msgt_number
+
+
+
+#endif _DEVICE_NET_STATUS_H_
diff --git a/include/device/tape_status.h b/include/device/tape_status.h
new file mode 100644
index 00000000..e14479d2
--- /dev/null
+++ b/include/device/tape_status.h
@@ -0,0 +1,128 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Copyright (c) 1982, 1986 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the above copyright notice and this paragraph are
+ * duplicated in all such forms and that any documentation,
+ * advertising materials, and other materials related to such
+ * distribution and use acknowledge that the software was developed
+ * by the University of California, Berkeley. The name of the
+ * University may not be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * @(#)mtio.h 7.4 (Berkeley) 8/31/88
+ */
+
+#ifndef _TAPE_STATUS_H_
+#define _TAPE_STATUS_H_
+
+/*
+ * Tape status
+ */
+
+struct tape_status {
+ unsigned int mt_type;
+ unsigned int speed;
+ unsigned int density;
+ unsigned int flags;
+# define TAPE_FLG_REWIND 0x1
+# define TAPE_FLG_WP 0x2
+};
+#define TAPE_STATUS_COUNT (sizeof(struct tape_status)/sizeof(int))
+#define TAPE_STATUS (('m'<<16) + 1)
+
+/*
+ * Constants for mt_type. These are the same
+ * for controllers compatible with the types listed.
+ */
+#define MT_ISTS 0x01 /* TS-11 */
+#define MT_ISHT 0x02 /* TM03 Massbus: TE16, TU45, TU77 */
+#define MT_ISTM 0x03 /* TM11/TE10 Unibus */
+#define MT_ISMT 0x04 /* TM78/TU78 Massbus */
+#define MT_ISUT 0x05 /* SI TU-45 emulation on Unibus */
+#define MT_ISCPC 0x06 /* SUN */
+#define MT_ISAR 0x07 /* SUN */
+#define MT_ISTMSCP 0x08 /* DEC TMSCP protocol (TU81, TK50) */
+#define MT_ISCY 0x09 /* CCI Cipher */
+#define MT_ISSCSI 0x0a /* SCSI tape (all brands) */
+
+
+/*
+ * Set status parameters
+ */
+
+struct tape_params {
+ unsigned int mt_operation;
+ unsigned int mt_repeat_count;
+};
+
+/* operations */
+#define MTWEOF 0 /* write an end-of-file record */
+#define MTFSF 1 /* forward space file */
+#define MTBSF 2 /* backward space file */
+#define MTFSR 3 /* forward space record */
+#define MTBSR 4 /* backward space record */
+#define MTREW 5 /* rewind */
+#define MTOFFL 6 /* rewind and put the drive offline */
+#define MTNOP 7 /* no operation, sets status only */
+#define MTCACHE 8 /* enable controller cache */
+#define MTNOCACHE 9 /* disable controller cache */
+
+
+/*
+ * U*x compatibility
+ */
+
+/* structure for MTIOCGET - mag tape get status command */
+
+struct mtget {
+ short mt_type; /* type of magtape device */
+/* the following two registers are grossly device dependent */
+ short mt_dsreg; /* ``drive status'' register */
+ short mt_erreg; /* ``error'' register */
+/* end device-dependent registers */
+ short mt_resid; /* residual count */
+/* the following two are not yet implemented */
+ unsigned long mt_fileno; /* file number of current position */
+ unsigned long mt_blkno; /* block number of current position */
+/* end not yet implemented */
+};
+
+
+/* mag tape io control commands */
+#define MTIOCTOP _IOW('m', 1, struct tape_params)/* do a mag tape op */
+#define MTIOCGET _IOR('m', 2, struct mtget) /* get tape status */
+#define MTIOCIEOT _IO('m', 3) /* ignore EOT error */
+#define MTIOCEEOT _IO('m', 4) /* enable EOT error */
+
+
+#endif _TAPE_STATUS_H_
diff --git a/include/device/tty_status.h b/include/device/tty_status.h
new file mode 100644
index 00000000..9f5eab81
--- /dev/null
+++ b/include/device/tty_status.h
@@ -0,0 +1,127 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: ll/90
+ *
+ * Status information for tty.
+ */
+
+struct tty_status {
+ int tt_ispeed; /* input speed */
+ int tt_ospeed; /* output speed */
+ int tt_breakc; /* character to deliver when break
+ detected on line */
+ int tt_flags; /* mode flags */
+};
+#define TTY_STATUS_COUNT (sizeof(struct tty_status)/sizeof(int))
+#define TTY_STATUS (dev_flavor_t)(('t'<<16) + 1)
+
+/*
+ * Speeds
+ */
+#define B0 0
+#define B50 1
+#define B75 2
+#define B110 3
+#define B134 4
+#define B150 5
+#define B200 6
+#define B300 7
+#define B600 8
+#define B1200 9
+#define B1800 10
+#define B2400 11
+#define B4800 12
+#define B9600 13
+#define EXTA 14 /* XX can we just get rid of EXTA and EXTB? */
+#define EXTB 15
+#define B19200 EXTA
+#define B38400 EXTB
+
+#define NSPEEDS 16
+
+/*
+ * Flags
+ */
+#define TF_TANDEM 0x00000001 /* send stop character when input
+ queue full */
+#define TF_ODDP 0x00000002 /* get/send odd parity */
+#define TF_EVENP 0x00000004 /* get/send even parity */
+#define TF_ANYP (TF_ODDP|TF_EVENP)
+ /* get any parity/send none */
+#define TF_LITOUT 0x00000008 /* output all 8 bits
+ otherwise, characters >= 0x80
+ are time delays XXX */
+#define TF_MDMBUF 0x00000010 /* start/stop output on carrier
+ interrupt
+ otherwise, dropping carrier
+ hangs up line */
+#define TF_NOHANG 0x00000020 /* no hangup signal on carrier drop */
+#define TF_HUPCLS 0x00000040 /* hang up (outgoing) on last close */
+
+/*
+ * Read-only flags - information about device
+ */
+#define TF_ECHO 0x00000080 /* device wants user to echo input */
+#define TF_CRMOD 0x00000100 /* device wants \r\n, not \n */
+#define TF_XTABS 0x00000200 /* device does not understand tabs */
+
+/*
+ * Modem control
+ */
+#define TTY_MODEM_COUNT (1) /* one integer */
+#define TTY_MODEM (dev_flavor_t)(('t'<<16) + 2)
+
+#define TM_LE 0x0001 /* line enable */
+#define TM_DTR 0x0002 /* data terminal ready */
+#define TM_RTS 0x0004 /* request to send */
+#define TM_ST 0x0008 /* secondary transmit */
+#define TM_SR 0x0010 /* secondary receive */
+#define TM_CTS 0x0020 /* clear to send */
+#define TM_CAR 0x0040 /* carrier detect */
+#define TM_RNG 0x0080 /* ring */
+#define TM_DSR 0x0100 /* data set ready */
+
+#define TM_BRK 0x0200 /* set line break (internal) */
+#define TM_HUP 0x0000 /* close line (internal) */
+
+/*
+ * Other controls
+ */
+#define TTY_FLUSH_COUNT (1) /* one integer - D_READ|D_WRITE */
+#define TTY_FLUSH (dev_flavor_t)(('t'<<16) + 3)
+ /* flush input or output */
+#define TTY_STOP (dev_flavor_t)(('t'<<16) + 4)
+ /* stop output */
+#define TTY_START (dev_flavor_t)(('t'<<16) + 5)
+ /* start output */
+#define TTY_SET_BREAK (dev_flavor_t)(('t'<<16) + 6)
+ /* set break condition */
+#define TTY_CLEAR_BREAK (dev_flavor_t)(('t'<<16) + 7)
+ /* clear break condition */
+#define TTY_SET_TRANSLATION (dev_flavor_t)(('t'<<16) + 8)
+ /* set translation table */
diff --git a/include/mach.h b/include/mach.h
new file mode 100644
index 00000000..71975d9c
--- /dev/null
+++ b/include/mach.h
@@ -0,0 +1,39 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Includes all the types that a normal user
+ * of Mach programs should need
+ */
+
+#ifndef _MACH_H_
+#define _MACH_H_
+
+#include <mach/mach_types.h>
+#include <mach/mach_interface.h>
+#include <mach/mach_port.h>
+#include <mach_init.h>
+
+#endif _MACH_H_
diff --git a/include/mach/alert.h b/include/mach/alert.h
new file mode 100644
index 00000000..8232f9ef
--- /dev/null
+++ b/include/mach/alert.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * File: mach/alert.h
+ *
+ * Standard alert definitions
+ *
+ */
+
+#ifndef _MACH_ALERT_H_
+#define _MACH_ALERT_H_
+
+#define ALERT_BITS 32 /* Minimum; more may actually be available */
+
+#define ALERT_ABORT_STRONG 0x00000001 /* Request to abort _all_ operations */
+#define ALERT_ABORT_SAFE 0x00000002 /* Request to abort restartable operations */
+
+#define ALERT_USER 0xffff0000 /* User-defined alert bits */
+
+#endif _MACH_ALERT_H_
diff --git a/include/mach/boolean.h b/include/mach/boolean.h
new file mode 100644
index 00000000..54028ad7
--- /dev/null
+++ b/include/mach/boolean.h
@@ -0,0 +1,63 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/boolean.h
+ *
+ * Boolean data type.
+ *
+ */
+
+#ifndef _MACH_BOOLEAN_H_
+#define _MACH_BOOLEAN_H_
+
+/*
+ * Pick up "boolean_t" type definition
+ */
+
+#ifndef ASSEMBLER
+#include <mach/machine/boolean.h>
+#endif /* ASSEMBLER */
+
+#endif /* _MACH_BOOLEAN_H_ */
+
+/*
+ * Define TRUE and FALSE, only if they haven't been before,
+ * and not if they're explicitly refused. Note that we're
+ * outside the BOOLEAN_H_ conditional, to avoid ordering
+ * problems.
+ */
+
+#if !defined(NOBOOL)
+
+#ifndef TRUE
+#define TRUE ((boolean_t) 1)
+#endif /* TRUE */
+
+#ifndef FALSE
+#define FALSE ((boolean_t) 0)
+#endif /* FALSE */
+
+#endif /* !defined(NOBOOL) */
diff --git a/include/mach/boot.h b/include/mach/boot.h
new file mode 100644
index 00000000..2f3e6c80
--- /dev/null
+++ b/include/mach/boot.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_BOOT_
+#define _MACH_BOOT_
+
+#include <mach/machine/boot.h>
+
+#ifndef ASSEMBLER
+
+#include <mach/machine/vm_types.h>
+
+struct boot_image_info
+{
+ /* First of the chain of boot modules in the boot image. */
+ struct boot_module *first_bmod;
+
+ /* List of rendezvous points:
+ starts out 0; and bmods can add nodes as needed. */
+ struct boot_rendezvous *first_rzv;
+
+ /* These register the total virtual address extent of the boot image. */
+ vm_offset_t start, end;
+
+ /* Machine-dependent boot information. */
+ struct machine_boot_image_info mboot;
+};
+
+struct boot_module
+{
+ int magic;
+ int (*init)(struct boot_image_info *bii);
+ vm_offset_t text;
+ vm_offset_t etext;
+ vm_offset_t data;
+ vm_offset_t edata;
+ vm_offset_t bss;
+ vm_offset_t ebss;
+};
+#define BMOD_VALID(bmod) ((bmod)->magic == BMOD_MAGIC)
+#define BMOD_NEXT(bmod) ((struct boot_module*)((bmod)->edata))
+
+struct boot_rendezvous
+{
+ struct boot_rendezvous *next;
+ int code;
+};
+
+#endif !ASSEMBLER
+
+
+/* This is the magic value that must appear in boot_module.magic. */
+#define BMOD_MAGIC 0x424d4f44 /* 'BMOD' */
+
+
+/* Following are the codes for boot_rendezvous.code. */
+
+/* This rendezvous is used for choosing a microkernel to start.
+ XX not used yet */
+#define BRZV_KERNEL 'K'
+
+/* Once the microkernel is fully initialized,
+ it starts one or more bootstrap services... */
+#define BRZV_BOOTSTRAP 'B'
+
+/* The bootstrap services might need other OS-dependent data,
+ such as initial programs to run, filesystem snapshots, etc.
+ These generic chunks of data are packaged up by the microkernel
+ and provided to the bootstrap services upon request.
+ XX When can they be deallocated? */
+#define BRZV_DATA 'D'
+
+
+#endif _MACH_BOOT_
diff --git a/include/mach/bootstrap.defs b/include/mach/bootstrap.defs
new file mode 100644
index 00000000..0b233e4a
--- /dev/null
+++ b/include/mach/bootstrap.defs
@@ -0,0 +1,49 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+subsystem bootstrap 999999;
+
+#include <mach/std_types.defs>
+
+ServerPrefix do_;
+
+/*
+ * The old form of the bootstrap_privileged_ports RPC
+ * is not expressible in Mig syntax, because the reply
+ * message doesn't have a return code.
+ */
+
+skip; /* old bootstrap_privileged_ports */
+
+/*
+ * The startup task can make this call on its bootstrap port
+ * to get the privileged ports.
+ */
+
+routine bootstrap_privileged_ports(
+ bootstrap : mach_port_t;
+ out priv_host : mach_port_t;
+ out priv_device : mach_port_t);
diff --git a/include/mach/cthreads.h b/include/mach/cthreads.h
new file mode 100644
index 00000000..d44fa200
--- /dev/null
+++ b/include/mach/cthreads.h
@@ -0,0 +1,424 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: cthreads.h
+ * Author: Eric Cooper, Carnegie Mellon University
+ * Date: Jul, 1987
+ *
+ * Definitions for the C Threads package.
+ *
+ */
+
+
+#ifndef _CTHREADS_
+#define _CTHREADS_ 1
+
+#include <mach/machine/cthreads.h>
+#include <mach.h>
+#include <mach/macro_help.h>
+#include <mach/machine/vm_param.h>
+
+#ifdef __STDC__
+extern void *malloc();
+#else
+extern char *malloc();
+#endif
+
+typedef void *any_t; /* XXX - obsolete, should be deleted. */
+
+#if defined(TRUE)
+#else /* not defined(TRUE) */
+#define TRUE 1
+#define FALSE 0
+#endif
+
+/*
+ * C Threads package initialization.
+ */
+
+extern vm_offset_t cthread_init(void);
+
+
+/*
+ * Queues.
+ */
+typedef struct cthread_queue {
+ struct cthread_queue_item *head;
+ struct cthread_queue_item *tail;
+} *cthread_queue_t;
+
+typedef struct cthread_queue_item {
+ struct cthread_queue_item *next;
+} *cthread_queue_item_t;
+
+#define NO_QUEUE_ITEM ((cthread_queue_item_t) 0)
+
+#define QUEUE_INITIALIZER { NO_QUEUE_ITEM, NO_QUEUE_ITEM }
+
+#define cthread_queue_alloc() ((cthread_queue_t) calloc(1, sizeof(struct cthread_queue)))
+#define cthread_queue_init(q) ((q)->head = (q)->tail = 0)
+#define cthread_queue_free(q) free((q))
+
+#define cthread_queue_enq(q, x) \
+ MACRO_BEGIN \
+ (x)->next = 0; \
+ if ((q)->tail == 0) \
+ (q)->head = (cthread_queue_item_t) (x); \
+ else \
+ (q)->tail->next = (cthread_queue_item_t) (x); \
+ (q)->tail = (cthread_queue_item_t) (x); \
+ MACRO_END
+
+#define cthread_queue_preq(q, x) \
+ MACRO_BEGIN \
+ if ((q)->tail == 0) \
+ (q)->tail = (cthread_queue_item_t) (x); \
+ ((cthread_queue_item_t) (x))->next = (q)->head; \
+ (q)->head = (cthread_queue_item_t) (x); \
+ MACRO_END
+
+#define cthread_queue_head(q, t) ((t) ((q)->head))
+
+#define cthread_queue_deq(q, t, x) \
+ MACRO_BEGIN \
+ if (((x) = (t) ((q)->head)) != 0 && \
+ ((q)->head = (cthread_queue_item_t) ((x)->next)) == 0) \
+ (q)->tail = 0; \
+ MACRO_END
+
+#define cthread_queue_map(q, t, f) \
+ MACRO_BEGIN \
+ register cthread_queue_item_t x, next; \
+ for (x = (cthread_queue_item_t) ((q)->head); x != 0; x = next){\
+ next = x->next; \
+ (*(f))((t) x); \
+ } \
+ MACRO_END
+
+/*
+ * Spin locks.
+ */
+extern void spin_lock_solid(spin_lock_t *_lock);
+
+#if defined(spin_unlock)
+#else /* not defined(spin_unlock) */
+extern void spin_unlock(spin_lock_t *_lock);
+#endif
+
+#if defined(spin_try_lock)
+#else /* not defined(spin_try_lock) */
+extern boolean_t spin_try_lock(spin_lock_t *_lock);
+#endif
+
+#define spin_lock(p) \
+ MACRO_BEGIN \
+ if (!spin_try_lock(p)) { \
+ spin_lock_solid(p); \
+ } \
+ MACRO_END
+
+/*
+ * Mutex objects.
+ */
+typedef struct mutex {
+ spin_lock_t lock;
+ const char *name;
+ struct cthread_queue queue;
+ spin_lock_t held;
+ /* holder is for WAIT_DEBUG. Not ifdeffed to keep size constant. */
+ struct cthread *holder;
+} *mutex_t;
+
+#define MUTEX_INITIALIZER { SPIN_LOCK_INITIALIZER, 0, QUEUE_INITIALIZER, SPIN_LOCK_INITIALIZER}
+#define MUTEX_NAMED_INITIALIZER(Name) { SPIN_LOCK_INITIALIZER, Name, QUEUE_INITIALIZER, SPIN_LOCK_INITIALIZER}
+
+#ifdef WAIT_DEBUG
+#define mutex_set_holder(m,h) ((m)->holder = (h))
+#else
+#define mutex_set_holder(m,h) (0)
+#endif
+
+#define mutex_alloc() ((mutex_t) calloc(1, sizeof(struct mutex)))
+#define mutex_init(m) \
+ MACRO_BEGIN \
+ spin_lock_init(&(m)->lock); \
+ cthread_queue_init(&(m)->queue); \
+ spin_lock_init(&(m)->held); \
+ mutex_set_holder(m, 0); \
+ MACRO_END
+#define mutex_set_name(m, x) ((m)->name = (x))
+#define mutex_name(m) ((m)->name != 0 ? (m)->name : "?")
+#define mutex_clear(m) /* nop */???
+#define mutex_free(m) free((m))
+
+extern void mutex_lock_solid(mutex_t _mutex); /* blocking */
+
+extern void mutex_unlock_solid(mutex_t _mutex);
+
+#define mutex_try_lock(m) \
+ (spin_try_lock(&(m)->held) ? mutex_set_holder((m), cthread_self()), TRUE : FALSE)
+#define mutex_lock(m) \
+ MACRO_BEGIN \
+ if (!spin_try_lock(&(m)->held)) { \
+ mutex_lock_solid(m); \
+ } \
+ mutex_set_holder(m, cthread_self()); \
+ MACRO_END
+#define mutex_unlock(m) \
+ MACRO_BEGIN \
+ mutex_set_holder(m, 0); \
+ if (spin_unlock(&(m)->held), \
+ cthread_queue_head(&(m)->queue, vm_offset_t) != 0) { \
+ mutex_unlock_solid(m); \
+ } \
+ MACRO_END
+
+/*
+ * Condition variables.
+ */
+typedef struct condition {
+ spin_lock_t lock;
+ struct cthread_queue queue;
+ const char *name;
+} *condition_t;
+
+#define CONDITION_INITIALIZER { SPIN_LOCK_INITIALIZER, QUEUE_INITIALIZER, 0 }
+#define CONDITION_NAMED_INITIALIZER(Name) { SPIN_LOCK_INITIALIZER, QUEUE_INITIALIZER, Name }
+
+#define condition_alloc() \
+ ((condition_t) calloc(1, sizeof(struct condition)))
+#define condition_init(c) \
+ MACRO_BEGIN \
+ spin_lock_init(&(c)->lock); \
+ cthread_queue_init(&(c)->queue); \
+ MACRO_END
+#define condition_set_name(c, x) ((c)->name = (x))
+#define condition_name(c) ((c)->name != 0 ? (c)->name : "?")
+#define condition_clear(c) \
+ MACRO_BEGIN \
+ condition_broadcast(c); \
+ spin_lock(&(c)->lock); \
+ MACRO_END
+#define condition_free(c) \
+ MACRO_BEGIN \
+ condition_clear(c); \
+ free((c)); \
+ MACRO_END
+
+#define condition_signal(c) \
+ MACRO_BEGIN \
+ if ((c)->queue.head) { \
+ cond_signal(c); \
+ } \
+ MACRO_END
+
+#define condition_broadcast(c) \
+ MACRO_BEGIN \
+ if ((c)->queue.head) { \
+ cond_broadcast(c); \
+ } \
+ MACRO_END
+
+extern void cond_signal(condition_t _cond);
+
+extern void cond_broadcast(condition_t _cond);
+
+extern void condition_wait(condition_t _cond, mutex_t _mutex);
+
+/*
+ * Threads.
+ */
+
+typedef void * (*cthread_fn_t)(void *arg);
+
+/* XXX We really should be using the setjmp.h that goes with the libc
+ * that we're planning on using, since that's where the setjmp()
+ * functions are going to be comming from.
+ */
+#include <mach/setjmp.h>
+
+typedef struct cthread {
+ struct cthread *next;
+ struct mutex lock;
+ struct condition done;
+ int state;
+ jmp_buf catch_exit;
+ cthread_fn_t func;
+ void *arg;
+ void *result;
+ const char *name;
+ void *data;
+ void *ldata;
+ void *private_data;
+ struct ur_cthread *ur;
+} *cthread_t;
+
+#define NO_CTHREAD ((cthread_t) 0)
+
+extern cthread_t cthread_fork(cthread_fn_t _func, void *_arg);
+
+extern void cthread_detach(cthread_t _thread);
+
+extern any_t cthread_join(cthread_t _thread);
+
+extern void cthread_yield(void);
+
+extern void cthread_exit(void *_result);
+
+/*
+ * This structure must agree with struct cproc in cthread_internals.h
+ */
+typedef struct ur_cthread {
+ struct ur_cthread *next;
+ cthread_t incarnation;
+} *ur_cthread_t;
+
+#ifndef cthread_sp
+extern vm_offset_t
+cthread_sp(void);
+#endif
+
+extern vm_offset_t cthread_stack_mask;
+
+#if defined(STACK_GROWTH_UP)
+#define ur_cthread_ptr(sp) \
+ (* (ur_cthread_t *) ((sp) & cthread_stack_mask))
+#else /* not defined(STACK_GROWTH_UP) */
+#define ur_cthread_ptr(sp) \
+ (* (ur_cthread_t *) ( ((sp) | cthread_stack_mask) + 1 \
+ - sizeof(ur_cthread_t *)) )
+#endif /* defined(STACK_GROWTH_UP) */
+
+#define ur_cthread_self() (ur_cthread_ptr(cthread_sp()))
+
+#define cthread_assoc(id, t) ((((ur_cthread_t) (id))->incarnation = (t)), \
+ ((t) ? ((t)->ur = (ur_cthread_t)(id)) : 0))
+#define cthread_self() (ur_cthread_self()->incarnation)
+
+extern void cthread_set_name(cthread_t _thread, const char *_name);
+
+extern const char * cthread_name(cthread_t _thread);
+
+extern int cthread_count(void);
+
+extern void cthread_set_limit(int _limit);
+
+extern int cthread_limit(void);
+
+extern void cthread_set_kernel_limit(int _n);
+
+extern int cthread_kernel_limit(void);
+
+extern void cthread_wire(void);
+
+extern void cthread_unwire(void);
+
+extern void cthread_msg_busy(mach_port_t _port, int _min, int _max);
+
+extern void cthread_msg_active(mach_port_t _prt, int _min, int _max);
+
+extern mach_msg_return_t cthread_mach_msg(mach_msg_header_t *_header,
+ mach_msg_option_t _option,
+ mach_msg_size_t _send_size,
+ mach_msg_size_t _rcv_size,
+ mach_port_t _rcv_name,
+ mach_msg_timeout_t _timeout,
+ mach_port_t _notify,
+ int _min, int _max);
+
+extern void cthread_fork_prepare(void);
+
+extern void cthread_fork_parent(void);
+
+extern void cthread_fork_child(void);
+
+#if defined(THREAD_CALLS)
+/*
+ * Routines to replace thread_*.
+ */
+extern kern_return_t cthread_get_state(cthread_t _thread);
+
+extern kern_return_t cthread_set_state(cthread_t _thread);
+
+extern kern_return_t cthread_abort(cthread_t _thread);
+
+extern kern_return_t cthread_resume(cthread_t _thread);
+
+extern kern_return_t cthread_suspend(cthread_t _thread);
+
+extern kern_return_t cthread_call_on(cthread_t _thread);
+#endif /* defined(THREAD_CALLS) */
+
+#if defined(CTHREAD_DATA_XX)
+/*
+ * Set or get thread specific "global" variable
+ *
+ * The thread given must be the calling thread (ie. thread_self).
+ * XXX This is for compatibility with the old cthread_data. XXX
+ */
+extern int cthread_set_data(cthread_t _thread, void *_val);
+
+extern void * cthread_data(cthread_t _thread);
+#else /* defined(CTHREAD_DATA_XX) */
+
+#define cthread_set_data(_thread, _val) ((_thread)->data) = (void *)(_val);
+#define cthread_data(_thread) ((_thread)->data)
+
+#define cthread_set_ldata(_thread, _val) ((_thread)->ldata) = (void *)(_val);
+#define cthread_ldata(_thread) ((_thread)->ldata)
+
+#endif /* defined(CTHREAD_DATA_XX) */
+
+
+/*
+ * Support for POSIX thread specific data
+ *
+ * Multiplexes a thread specific "global" variable
+ * into many thread specific "global" variables.
+ */
+#define CTHREAD_DATA_VALUE_NULL (void *)0
+#define CTHREAD_KEY_INVALID (cthread_key_t)-1
+
+typedef int cthread_key_t;
+
+/*
+ * Create key to private data visible to all threads in task.
+ * Different threads may use same key, but the values bound to the key are
+ * maintained on a thread specific basis.
+ */
+extern int cthread_keycreate(cthread_key_t *_key);
+
+/*
+ * Get value currently bound to key for calling thread
+ */
+extern int cthread_getspecific(cthread_key_t _key, void **_value);
+
+/*
+ * Bind value to given key for calling thread
+ */
+extern int cthread_setspecific(cthread_key_t _key, void *_value);
+
+#endif /* not defined(_CTHREADS_) */
diff --git a/include/mach/default_pager.defs b/include/mach/default_pager.defs
new file mode 100644
index 00000000..e2154e2e
--- /dev/null
+++ b/include/mach/default_pager.defs
@@ -0,0 +1,65 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+subsystem default_pager 2275;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+#include <mach/default_pager_types.defs>
+
+routine default_pager_object_create(
+ default_pager : mach_port_t;
+ out memory_object : memory_object_t =
+ MACH_MSG_TYPE_MAKE_SEND;
+ object_size : vm_size_t);
+
+routine default_pager_info(
+ default_pager : mach_port_t;
+ out info : default_pager_info_t);
+
+routine default_pager_objects(
+ default_pager : mach_port_t;
+ out objects : default_pager_object_array_t,
+ CountInOut, Dealloc;
+ out ports : mach_port_array_t =
+ array[] of mach_port_move_send_t,
+ CountInOut, Dealloc);
+
+routine default_pager_object_pages(
+ default_pager : mach_port_t;
+ memory_object : memory_object_name_t;
+ out pages : default_pager_page_array_t,
+ CountInOut, Dealloc);
+
+routine default_pager_paging_file(
+ default_pager : mach_port_t;
+ master_device_port : mach_port_t;
+ filename : default_pager_filename_t;
+ add : boolean_t);
+
+routine default_pager_register_fileserver(
+ default_pager : mach_port_t;
+ fileserver_port : mach_port_t);
diff --git a/include/mach/default_pager_helper.defs b/include/mach/default_pager_helper.defs
new file mode 100644
index 00000000..a8a9f78d
--- /dev/null
+++ b/include/mach/default_pager_helper.defs
@@ -0,0 +1,53 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+subsystem dp_helper 888888;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+ServerPrefix do_;
+
+/*
+ * Message that the default pager sends to
+ * a fileserver who has registered itself
+ * with the default pager as a "paging helper"
+ * to notify that more paging spage is or
+ * is not needed. Same message to let the
+ * fileserver know it can (ask to) reclaim space.
+ *
+ * This message is only sent to a port that
+ * has been previously registered via
+ * default_pager_register_fileserver.
+ * The (optional) reply from the fileserver
+ * is a call to default_pager_paging_file.
+ */
+
+simpleroutine dp_helper_paging_space(
+ dp_helper : mach_port_t;
+ space_shortage : boolean_t;
+ approx_amount : vm_size_t);
+
diff --git a/include/mach/default_pager_types.defs b/include/mach/default_pager_types.defs
new file mode 100644
index 00000000..3164f047
--- /dev/null
+++ b/include/mach/default_pager_types.defs
@@ -0,0 +1,44 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_DEFAULT_PAGER_TYPES_DEFS_
+#define _MACH_DEFAULT_PAGER_TYPES_DEFS_
+
+#include <mach/std_types.defs>
+
+type default_pager_info_t = struct[3] of natural_t;
+
+type default_pager_object_t = struct[2] of natural_t;
+type default_pager_object_array_t = array[] of default_pager_object_t;
+
+type default_pager_page_t = struct[1] of natural_t;
+type default_pager_page_array_t = array[] of default_pager_page_t;
+
+type default_pager_filename_t = (MACH_MSG_TYPE_STRING_C, 8*256);
+
+import <mach/default_pager_types.h>;
+
+#endif _MACH_DEFAULT_PAGER_TYPES_DEFS_
diff --git a/include/mach/default_pager_types.h b/include/mach/default_pager_types.h
new file mode 100644
index 00000000..99e43ce3
--- /dev/null
+++ b/include/mach/default_pager_types.h
@@ -0,0 +1,58 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_DEFAULT_PAGER_TYPES_H_
+#define _MACH_DEFAULT_PAGER_TYPES_H_
+
+/*
+ * Remember to update the mig type definitions
+ * in default_pager_types.defs when adding/removing fields.
+ */
+
+typedef struct default_pager_info {
+ vm_size_t dpi_total_space; /* size of backing store */
+ vm_size_t dpi_free_space; /* how much of it is unused */
+ vm_size_t dpi_page_size; /* the pager's vm page size */
+} default_pager_info_t;
+
+
+typedef struct default_pager_object {
+ vm_offset_t dpo_object; /* object managed by the pager */
+ vm_size_t dpo_size; /* backing store used for the object */
+} default_pager_object_t;
+
+typedef default_pager_object_t *default_pager_object_array_t;
+
+
+typedef struct default_pager_page {
+ vm_offset_t dpp_offset; /* offset of the page in its object */
+} default_pager_page_t;
+
+typedef default_pager_page_t *default_pager_page_array_t;
+
+typedef char default_pager_filename_t[256];
+
+#endif _MACH_DEFAULT_PAGER_TYPES_H_
diff --git a/include/mach/error.h b/include/mach/error.h
new file mode 100644
index 00000000..1aa6a841
--- /dev/null
+++ b/include/mach/error.h
@@ -0,0 +1,95 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/error.h
+ * Purpose:
+ * error module definitions
+ *
+ */
+
+#ifndef _MACH_ERROR_H_
+#define _MACH_ERROR_H_
+#include <mach/kern_return.h>
+
+/*
+ * error number layout as follows:
+ *
+ * hi lo
+ * | system(6) | subsystem(12) | code(14) |
+ */
+
+
+#define err_none (mach_error_t)0
+#define ERR_SUCCESS (mach_error_t)0
+#define ERR_ROUTINE_NIL (mach_error_fn_t)0
+
+
+#define err_system(x) (((x)&0x3f)<<26)
+#define err_sub(x) (((x)&0xfff)<<14)
+
+#define err_get_system(err) (((err)>>26)&0x3f)
+#define err_get_sub(err) (((err)>>14)&0xfff)
+#define err_get_code(err) ((err)&0x3fff)
+
+#define system_emask (err_system(0x3f))
+#define sub_emask (err_sub(0xfff))
+#define code_emask (0x3fff)
+
+
+/* Mach error systems */
+#define err_kern err_system(0x0) /* kernel */
+#define err_us err_system(0x1) /* user space library */
+#define err_server err_system(0x2) /* user space servers */
+#define err_ipc err_system(0x3) /* old ipc errors */
+#define err_mach_ipc err_system(0x4) /* mach-ipc errors */
+#define err_bootstrap err_system(0x5) /* bootstrap errors */
+#define err_hurd err_system(0x10) /* GNU Hurd server errors */
+#define err_local err_system(0x3e) /* user defined errors */
+#define err_ipc_compat err_system(0x3f) /* (compatibility) mach-ipc errors */
+
+#define err_max_system 0x3f
+
+
+/* special old "subsystems" that don't really follow the above rules */
+#define err_mig -300
+#define err_exec 6000
+
+/* unix errors get lumped into one subsystem */
+#define err_unix (err_kern|err_sub(3))
+#define unix_err(errno) (err_kern|err_sub(3)|errno)
+
+/* MS-DOS extended error codes */
+#define err_dos (err_kern|err_sub(0xd05))
+
+/* Flux OS error systems */
+#define err_fluke err_system(0x20) /* Fluke API */
+
+#ifndef ASSEMBLER
+typedef kern_return_t mach_error_t;
+typedef mach_error_t (* mach_error_fn_t)();
+#endif /* ASSEMBLER */
+
+#endif /* _MACH_ERROR_H_ */
diff --git a/include/mach/errorlib.h b/include/mach/errorlib.h
new file mode 100644
index 00000000..6c4d2bfb
--- /dev/null
+++ b/include/mach/errorlib.h
@@ -0,0 +1,69 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * File: errorlib.h
+ * Author: Douglas Orr, Carnegie Mellon University
+ * Date: Mar. 1988
+ *
+ * Error bases for subsytems errors.
+ */
+
+#include <mach/error.h>
+
+#define KERN_DEVICE_MOD (err_kern|err_sub(1))
+
+#define BOOTSTRAP_FS_MOD (err_bootstrap|err_sub(0))
+
+#define MACH_IPC_SEND_MOD (err_mach_ipc|err_sub(0))
+#define MACH_IPC_RCV_MOD (err_mach_ipc|err_sub(1))
+#define MACH_IPC_MIG_MOD (err_mach_ipc|err_sub(2))
+
+#define IPC_SEND_MOD (err_ipc|err_sub(0))
+#define IPC_RCV_MOD (err_ipc|err_sub(1))
+#define IPC_MIG_MOD (err_ipc|err_sub(2))
+
+#define SERV_NETNAME_MOD (err_server|err_sub(0))
+#define SERV_ENV_MOD (err_server|err_sub(1))
+#define SERV_EXECD_MOD (err_server|err_sub(2))
+
+
+#define NO_SUCH_ERROR "unknown error code"
+
+struct error_subsystem {
+ char * subsys_name;
+ int max_code;
+ char * * codes;
+};
+
+struct error_system {
+ int max_sub;
+ char * bad_sub;
+ struct error_subsystem * subsystem;
+};
+
+extern struct error_system errors[err_max_system+1];
+
+#define errlib_count(s) (sizeof(s)/sizeof(s[0]))
diff --git a/include/mach/exc.defs b/include/mach/exc.defs
new file mode 100644
index 00000000..94af828c
--- /dev/null
+++ b/include/mach/exc.defs
@@ -0,0 +1,47 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Abstract:
+ * MiG definitions file for Mach exception interface.
+ */
+
+subsystem
+#if KERNEL_USER
+ KernelUser
+#endif /* KERNEL_USER */
+ exc 2400;
+
+#include <mach/std_types.defs>
+
+ServerPrefix catch_;
+
+routine exception_raise(
+ exception_port : mach_port_t;
+ thread : mach_port_t;
+ task : mach_port_t;
+ exception : integer_t;
+ code : integer_t;
+ subcode : integer_t);
diff --git a/include/mach/exception.h b/include/mach/exception.h
new file mode 100644
index 00000000..c44fd538
--- /dev/null
+++ b/include/mach/exception.h
@@ -0,0 +1,58 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_EXCEPTION_H_
+#define _MACH_EXCEPTION_H_
+
+#include <mach/machine/exception.h>
+
+/*
+ * Machine-independent exception definitions.
+ */
+
+#define EXC_BAD_ACCESS 1 /* Could not access memory */
+ /* Code contains kern_return_t describing error. */
+ /* Subcode contains bad memory address. */
+
+#define EXC_BAD_INSTRUCTION 2 /* Instruction failed */
+ /* Illegal or undefined instruction or operand */
+
+#define EXC_ARITHMETIC 3 /* Arithmetic exception */
+ /* Exact nature of exception is in code field */
+
+#define EXC_EMULATION 4 /* Emulation instruction */
+ /* Emulation support instruction encountered */
+ /* Details in code and subcode fields */
+
+#define EXC_SOFTWARE 5 /* Software generated exception */
+ /* Exact exception is in code field. */
+ /* Codes 0 - 0xFFFF reserved to hardware */
+ /* Codes 0x10000 - 0x1FFFF reserved for OS emulation (Unix) */
+
+#define EXC_BREAKPOINT 6 /* Trace, breakpoint, etc. */
+ /* Details in code field. */
+
+#endif /* _MACH_EXCEPTION_H_ */
diff --git a/include/mach/exec/a.out.h b/include/mach/exec/a.out.h
new file mode 100644
index 00000000..c6dcaff3
--- /dev/null
+++ b/include/mach/exec/a.out.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_A_OUT_
+#define _MACH_A_OUT_
+
+struct exec
+{
+ unsigned long a_magic; /* magic number */
+ unsigned long a_text; /* size of text segment */
+ unsigned long a_data; /* size of initialized data */
+ unsigned long a_bss; /* size of uninitialized data */
+ unsigned long a_syms; /* size of symbol table */
+ unsigned long a_entry; /* entry point */
+ unsigned long a_trsize; /* size of text relocation */
+ unsigned long a_drsize; /* size of data relocation */
+};
+
+struct nlist {
+ long n_strx;
+ unsigned char n_type;
+ char n_other;
+ short n_desc;
+ unsigned long n_value;
+};
+
+#define OMAGIC 0407
+#define NMAGIC 0410
+#define ZMAGIC 0413
+#define QMAGIC 0314
+
+#define N_GETMAGIC(ex) \
+ ( (ex).a_magic & 0xffff )
+#define N_GETMAGIC_NET(ex) \
+ (ntohl((ex).a_magic) & 0xffff)
+
+/* Valid magic number check. */
+#define N_BADMAG(ex) \
+ (N_GETMAGIC(ex) != OMAGIC && N_GETMAGIC(ex) != NMAGIC && \
+ N_GETMAGIC(ex) != ZMAGIC && N_GETMAGIC(ex) != QMAGIC && \
+ N_GETMAGIC_NET(ex) != OMAGIC && N_GETMAGIC_NET(ex) != NMAGIC && \
+ N_GETMAGIC_NET(ex) != ZMAGIC && N_GETMAGIC_NET(ex) != QMAGIC)
+
+/* We don't provide any N_???OFF macros here
+ because they vary too much between the different a.out variants;
+ it's practically impossible to create one set of macros
+ that works for UX, FreeBSD, NetBSD, Linux, etc. */
+
+#endif /* _MACH_A_OUT_ */
diff --git a/include/mach/exec/elf.h b/include/mach/exec/elf.h
new file mode 100644
index 00000000..81989309
--- /dev/null
+++ b/include/mach/exec/elf.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 1995, 1994, 1993, 1992, 1991, 1990
+ * Open Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of ("OSF") or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL OSF BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * ACTION OF CONTRACT, NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE
+ *
+ */
+/*
+ * OSF Research Institute MK6.1 (unencumbered) 1/31/1995
+ */
+#ifndef _MACH_EXEC_ELF_H_
+#define _MACH_EXEC_ELF_H_
+
+#include <mach/machine/exec/elf.h>
+
+/* ELF Header - figure 4-3, page 4-4 */
+
+#define EI_NIDENT 16
+
+typedef struct {
+ unsigned char e_ident[EI_NIDENT];
+ Elf32_Half e_type;
+ Elf32_Half e_machine;
+ Elf32_Word e_version;
+ Elf32_Addr e_entry;
+ Elf32_Off e_phoff;
+ Elf32_Off e_shoff;
+ Elf32_Word e_flags;
+ Elf32_Half e_ehsize;
+ Elf32_Half e_phentsize;
+ Elf32_Half e_phnum;
+ Elf32_Half e_shentsize;
+ Elf32_Half e_shnum;
+ Elf32_Half e_shstrndx;
+} Elf32_Ehdr;
+
+
+/* e_ident[] identification indexes - figure 4-4, page 4-7 */
+
+#define EI_MAG0 0
+#define EI_MAG1 1
+#define EI_MAG2 2
+#define EI_MAG3 3
+#define EI_CLASS 4
+#define EI_DATA 5
+#define EI_VERSION 6
+#define EI_PAD 7
+
+/* magic number - pg 4-8 */
+
+#define ELFMAG0 0x7f
+#define ELFMAG1 'E'
+#define ELFMAG2 'L'
+#define ELFMAG3 'F'
+
+/* file class or capacity - page 4-8 */
+
+#define ELFCLASSNONE 0
+#define ELFCLASS32 1
+#define ELFCLASS64 2
+
+/* date encoding - page 4-9 */
+
+#define ELFDATANONE 0
+#define ELFDATA2LSB 1
+#define ELFDATA2MSB 2
+
+/* object file types - page 4-5 */
+
+#define ET_NONE 0
+#define ET_REL 1
+#define ET_EXEC 2
+#define ET_DYN 3
+#define ET_CORE 4
+
+#define ET_LOPROC 0xff00
+#define ET_HIPROC 0xffff
+
+/* architecture - page 4-5 */
+
+#define EM_NONE 0
+#define EM_M32 1
+#define EM_SPARC 2
+#define EM_386 3
+#define EM_68K 4
+#define EM_88K 5
+#define EM_860 7
+#define EM_MIPS 8
+#define EM_MIPS_RS4_BE 10
+#define EM_SPARC64 11
+#define EM_PARISC 15
+#define EM_PPC 20
+
+/* version - page 4-6 */
+
+#define EV_NONE 0
+#define EV_CURRENT 1
+
+/* special section indexes - page 4-11, figure 4-7 */
+
+#define SHN_UNDEF 0
+#define SHN_LORESERVE 0xff00
+#define SHN_LOPROC 0xff00
+#define SHN_HIPROC 0xff1f
+#define SHN_ABS 0xfff1
+#define SHN_COMMON 0xfff2
+#define SHN_HIRESERVE 0xffff
+
+/* section header - page 4-13, figure 4-8 */
+
+typedef struct {
+ Elf32_Word sh_name;
+ Elf32_Word sh_type;
+ Elf32_Word sh_flags;
+ Elf32_Addr sh_addr;
+ Elf32_Off sh_offset;
+ Elf32_Word sh_size;
+ Elf32_Word sh_link;
+ Elf32_Word sh_info;
+ Elf32_Word sh_addralign;
+ Elf32_Word sh_entsize;
+} Elf32_Shdr;
+
+/* section types - page 4-15, figure 4-9 */
+
+#define SHT_NULL 0
+#define SHT_PROGBITS 1
+#define SHT_SYMTAB 2
+#define SHT_STRTAB 3
+#define SHT_RELA 4
+#define SHT_HASH 5
+#define SHT_DYNAMIC 6
+#define SHT_NOTE 7
+#define SHT_NOBITS 8
+#define SHT_REL 9
+#define SHT_SHLIB 10
+#define SHT_DYNSYM 11
+
+#define SHT_LOPROC 0x70000000
+#define SHT_HIPROC 0x7fffffff
+#define SHT_LOUSER 0x80000000
+#define SHT_HIUSER 0xffffffff
+
+/* section attribute flags - page 4-18, figure 4-11 */
+
+#define SHF_WRITE 0x1
+#define SHF_ALLOC 0x2
+#define SHF_EXECINSTR 0x4
+#define SHF_MASKPROC 0xf0000000
+
+/* symbol table - page 4-25, figure 4-15 */
+typedef struct
+{
+ Elf32_Word st_name;
+ Elf32_Addr st_value;
+ Elf32_Word st_size;
+ unsigned char st_info;
+ unsigned char st_other;
+ Elf32_Half st_shndx;
+} Elf32_Sym;
+
+/* symbol type and binding attributes - page 4-26 */
+
+#define ELF32_ST_BIND(i) ((i) >> 4)
+#define ELF32_ST_TYPE(i) ((i) & 0xf)
+#define ELF32_ST_INFO(b,t) (((b)<<4)+((t)&0xf))
+
+/* symbol binding - page 4-26, figure 4-16 */
+
+#define STB_LOCAL 0
+#define STB_GLOBAL 1
+#define STB_WEAK 2
+#define STB_LOPROC 13
+#define STB_HIPROC 15
+
+/* symbol types - page 4-28, figure 4-17 */
+
+#define STT_NOTYPE 0
+#define STT_OBJECT 1
+#define STT_FUNC 2
+#define STT_SECTION 3
+#define STT_FILE 4
+#define STT_LOPROC 13
+#define STT_HIPROC 15
+
+
+/* relocation entries - page 4-31, figure 4-19 */
+
+typedef struct
+{
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
+} Elf32_Rel;
+
+typedef struct
+{
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
+ Elf32_Sword r_addend;
+} Elf32_Rela;
+
+/* Macros to split/combine relocation type and symbol page 4-32 */
+
+#define ELF32_R_SYM(__i) ((__i)>>8)
+#define ELF32_R_TYPE(__i) ((unsigned char) (__i))
+#define ELF32_R_INFO(__s, __t) (((__s)<<8) + (unsigned char) (__t))
+
+
+/* program header - page 5-2, figure 5-1 */
+
+typedef struct {
+ Elf32_Word p_type;
+ Elf32_Off p_offset;
+ Elf32_Addr p_vaddr;
+ Elf32_Addr p_paddr;
+ Elf32_Word p_filesz;
+ Elf32_Word p_memsz;
+ Elf32_Word p_flags;
+ Elf32_Word p_align;
+} Elf32_Phdr;
+
+/* segment types - page 5-3, figure 5-2 */
+
+#define PT_NULL 0
+#define PT_LOAD 1
+#define PT_DYNAMIC 2
+#define PT_INTERP 3
+#define PT_NOTE 4
+#define PT_SHLIB 5
+#define PT_PHDR 6
+
+#define PT_LOPROC 0x70000000
+#define PT_HIPROC 0x7fffffff
+
+/* segment permissions - page 5-6 */
+
+#define PF_X 0x1
+#define PF_W 0x2
+#define PF_R 0x4
+#define PF_MASKPROC 0xf0000000
+
+
+/* dynamic structure - page 5-15, figure 5-9 */
+
+typedef struct {
+ Elf32_Sword d_tag;
+ union {
+ Elf32_Word d_val;
+ Elf32_Addr d_ptr;
+ } d_un;
+} Elf32_Dyn;
+
+/* Dynamic array tags - page 5-16, figure 5-10. */
+
+#define DT_NULL 0
+#define DT_NEEDED 1
+#define DT_PLTRELSZ 2
+#define DT_PLTGOT 3
+#define DT_HASH 4
+#define DT_STRTAB 5
+#define DT_SYMTAB 6
+#define DT_RELA 7
+#define DT_RELASZ 8
+#define DT_RELAENT 9
+#define DT_STRSZ 10
+#define DT_SYMENT 11
+#define DT_INIT 12
+#define DT_FINI 13
+#define DT_SONAME 14
+#define DT_RPATH 15
+#define DT_SYMBOLIC 16
+#define DT_REL 17
+#define DT_RELSZ 18
+#define DT_RELENT 19
+#define DT_PLTREL 20
+#define DT_DEBUG 21
+#define DT_TEXTREL 22
+#define DT_JMPREL 23
+
+/*
+ * Bootstrap doesn't need machine dependent extensions.
+ */
+
+#endif /* _MACH_EXEC_ELF_H_ */
diff --git a/include/mach/exec/exec.h b/include/mach/exec/exec.h
new file mode 100644
index 00000000..94b234b0
--- /dev/null
+++ b/include/mach/exec/exec.h
@@ -0,0 +1,130 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#ifndef _MACH_EXEC_H_
+#define _MACH_EXEC_H_
+
+#include <mach/machine/vm_types.h>
+#include <mach/vm_prot.h>
+
+/* XXX */
+typedef enum
+{
+ EXEC_ELF = 1,
+ EXEC_AOUT = 2,
+} exec_format_t;
+
+typedef struct exec_info
+{
+ /* Format of executable loaded - see above. */
+ exec_format_t format;
+
+ /* Program entrypoint. */
+ vm_offset_t entry;
+
+ /* Initial data pointer - only some architectures use this. */
+ vm_offset_t init_dp;
+
+ /* (ELF) Address of interpreter string for loading shared libraries, null if none. */
+ vm_offset_t interp;
+
+} exec_info_t;
+
+typedef int exec_sectype_t;
+#define EXEC_SECTYPE_READ VM_PROT_READ
+#define EXEC_SECTYPE_WRITE VM_PROT_WRITE
+#define EXEC_SECTYPE_EXECUTE VM_PROT_EXECUTE
+#define EXEC_SECTYPE_PROT_MASK VM_PROT_ALL
+#define EXEC_SECTYPE_ALLOC ((exec_sectype_t)0x000100)
+#define EXEC_SECTYPE_LOAD ((exec_sectype_t)0x000200)
+#define EXEC_SECTYPE_DEBUG ((exec_sectype_t)0x010000)
+#define EXEC_SECTYPE_AOUT_SYMTAB ((exec_sectype_t)0x020000)
+#define EXEC_SECTYPE_AOUT_STRTAB ((exec_sectype_t)0x040000)
+
+typedef int exec_read_func_t(void *handle, vm_offset_t file_ofs,
+ void *buf, vm_size_t size,
+ vm_size_t *out_actual);
+
+typedef int exec_read_exec_func_t(void *handle,
+ vm_offset_t file_ofs, vm_size_t file_size,
+ vm_offset_t mem_addr, vm_size_t mem_size,
+ exec_sectype_t section_type);
+
+/*
+ * Routines exported from libmach_exec.a
+ */
+
+/* Generic function to interpret an executable "file"
+ and "load" it into "memory".
+ Doesn't really know about files, loading, or memory;
+ all file I/O and destination memory accesses
+ go through provided functions.
+ Thus, this is a very generic loading mechanism.
+
+ The read() function is used to read metadata from the file
+ into the local address space.
+
+ The read_exec() function is used to load the actual sections.
+ It is used for all kinds of sections - code, data, bss, debugging data.
+ The 'section_type' parameter specifies what type of section is being loaded.
+
+ For code, data, and bss, the EXEC_SECTYPE_ALLOC flag will be set.
+ For code and data (i.e. stuff that's actually loaded from the file),
+ EXEC_SECTYPE_LOAD will also be set.
+ The EXEC_SECTYPE_PROT_MASK contains the intended access permissions
+ for the section.
+ 'file_size' may be less than 'mem_size';
+ the remaining data must be zero-filled.
+ 'mem_size' is always greater than zero, but 'file_size' may be zero
+ (e.g. in the case of a bss section).
+ No two read_exec() calls for one executable
+ will load data into the same virtual memory page,
+ although they may load from arbitrary (possibly overlapping) file positions.
+
+ For sections that aren't normally loaded into the process image
+ (e.g. debug sections), EXEC_SECTYPE_ALLOC isn't set,
+ but some other appropriate flag is set to indicate the type of section.
+
+ The 'handle' is an opaque pointer which is simply passed on
+ to the read() and read_exec() functions.
+
+ On return, the specified info structure is filled in
+ with information about the loaded executable.
+*/
+int exec_load(exec_read_func_t *read, exec_read_exec_func_t *read_exec,
+ void *handle, exec_info_t *out_info);
+
+/*
+ * Error codes
+ */
+
+#define EX_NOT_EXECUTABLE 6000 /* not a recognized executable format */
+#define EX_WRONG_ARCH 6001 /* valid executable, but wrong arch. */
+#define EX_CORRUPT 6002 /* recognized executable, but mangled */
+#define EX_BAD_LAYOUT 6003 /* something wrong with the memory or file image layout */
+
+
+#endif /* _MACH_EXEC_H_ */
diff --git a/include/mach/flick_mach3.h b/include/mach/flick_mach3.h
new file mode 100644
index 00000000..7d7675db
--- /dev/null
+++ b/include/mach/flick_mach3.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * Glue for the Flick's Mach 3 backend. (Flick := Flexible IDL Compiler Kit.)
+ * This file is included from every header file generated by that backend.
+ * It provides standard MOM types and routines.
+ */
+#ifndef _MACH_FLICK_MACH3MIG_H_
+#define _MACH_FLICK_MACH3MIG_H_
+
+#include <mach/boolean.h>
+#include <mach/port.h>
+#include <mach/message.h>
+
+typedef char mom_char8_t;
+typedef unsigned8_t mom_unsigned8_t;
+typedef unsigned16_t mom_unsigned16_t;
+typedef unsigned32_t mom_unsigned32_t;
+typedef signed8_t mom_signed8_t;
+typedef signed16_t mom_signed16_t;
+typedef signed32_t mom_signed32_t;
+
+typedef int mom_key_t;
+
+struct mom_ref
+{
+ mom_refcount_t count;
+ mach_port_t port;
+
+ /* Chain on hash mom_ref hash table,
+ for canonicalization. */
+ struct mom_ref *hash_next;
+
+ /* Array of associations for this reference. */
+ int assoc_count;
+ void *assoc[0];
+};
+typedef struct mom_ref *mom_ref_t;
+
+struct mom_obj
+{
+ void *handle;
+
+ int port_count;
+ mach_port_t port[0];
+};
+typedef struct mom_obj *mom_obj_t;
+
+#define MOM__LABEL_BITS 8
+#define MOM__MAX_LABELS (1 << MOM__LABEL_BITS)
+
+#define mom_get_label_bits() MOM__LABEL_BITS
+
+
+#endif /* _MACH_FLICK_MACH3MIG_H_ */
diff --git a/include/mach/flick_mach3_glue.h b/include/mach/flick_mach3_glue.h
new file mode 100644
index 00000000..7deb6203
--- /dev/null
+++ b/include/mach/flick_mach3_glue.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * Glue for the Flick's Mach 3 backend. (Flick := Flexible IDL Compiler Kit.)
+ * This file is included from every stub source code (.c) file generated by that backend.
+ * Stubs are built primarily out of invocations of these macros.
+ */
+#ifndef _MACH_FLICK_MACH3_GLUE_H_
+#define _MACH_FLICK_MACH3_GLUE_H_
+
+#include <stdlib.h>
+#include <string.h>
+#include <mach/flick_mach3.h>
+
+
+/*** Internal Flick data types ***/
+
+/* Each client stub allocates one of these on its stack first thing,
+ and holds all the important generic state throughout RPC processing. */
+struct flick_mach3_rpc_desc
+{
+ /* This is initially set to point to init_buf,
+ but is dynamically re-allocated if more space is needed. */
+ char *msg_buf;
+ vm_size_t msg_buf_size;
+
+ /* Before calling flick_mach3_rpc(),
+ the client stub sets this to the offset of the end of the data it marshaled.
+ It always starts marshaling at offset 4*4, to leave room for a mach_msg_header
+ (which is actually 6*4 bytes, but overwrites the 2*4-byte marshaled IDL ID). */
+ vm_size_t send_end_ofs;
+
+ /* flick_mach3_rpc() sets these to the offset of the data to unmarshal,
+ and the offset of the end of the data to unmarshal, respectively. */
+ vm_size_t rcv_ofs, rcv_end_ofs;
+
+ /* The size of this buffer varies from stub to stub. */
+ char init_buf[0];
+};
+
+/* Each server stub allocates one of these on its stack first thing,
+ and holds all the important generic state throughout RPC processing. */
+struct flick_mach3_rpc_serv_desc
+{
+ /* This is initially set to point to init_buf,
+ but is dynamically re-allocated if more space is needed. */
+ char *msg_buf;
+ vm_size_t msg_buf_size;
+
+ /* Before calling flick_mach3_rpc(),
+ the client stub sets this to the offset of the end of the data it marshaled.
+ It always starts marshaling at offset 4*4, to leave room for a mach_msg_header
+ (which is actually 6*4 bytes, but overwrites the 2*4-byte marshaled IDL ID). */
+ vm_size_t send_end_ofs;
+
+ /* flick_mach3_rpc() sets these to the offset of the data to unmarshal,
+ and the offset of the end of the data to unmarshal, respectively. */
+ vm_size_t rcv_ofs, rcv_end_ofs;
+
+ /* The size of this buffer varies from stub to stub. */
+ char init_buf[0];
+};
+
+
+/*** Encoding ***/
+
+#define flick_mach3_encode_new_glob(max_size) \
+{ \
+ while (_desc.d.send_end_ofs + (max_size) > _desc.d.msg_buf_size) \
+ { \
+ mach_msg_return_t result = flick_mach3_rpc_grow_buf(&_desc); \
+ /*XXX result */ \
+ } \
+ _e_chunk = _desc.d.msg_buf + _desc.d.send_end_ofs; \
+}
+#define flick_mach3_encode_end_glob(max_size) \
+ _desc.d.send_end_ofs += (max_size);
+
+#define flick_mach3_encode_new_chunk(size) /* do nothing */
+#define flick_mach3_encode_end_chunk(size) (_e_chunk += (size))
+
+#define flick_mach3_encode_prim(_ofs, _data, _name, _bits, _ctype) \
+{ \
+ struct { mach_msg_type_t _t; _ctype _v; } *_p = (void*)(_e_chunk + _ofs); \
+ mach_msg_type_t _tmpl = { _name, _bits, 1, 1, 0, 0 }; \
+ _p->_t = _tmpl; _p->_v = (_data); \
+}
+
+#define flick_mach3_encode_boolean(_ofs, _data) \
+ flick_mach3_encode_prim(_ofs, _data, MACH_MSG_TYPE_BOOLEAN, 32, signed32_t);
+
+#define flick_mach3_encode_char8(_ofs, _data) \
+ flick_mach3_encode_prim(_ofs, _data, MACH_MSG_TYPE_CHAR, 8, signed8_t);
+#define flick_mach3_encode_char16(_ofs, _data) \
+ flick_mach3_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 8, signed16_t);
+
+#define flick_mach3_encode_signed8(_ofs, _data) \
+ flick_mach3_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_8, 8, signed8_t);
+#define flick_mach3_encode_unsigned8(ofs, data) \
+ flick_mach3_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_8, 8, unsigned8_t);
+#define flick_mach3_encode_signed16(_ofs, _data) \
+ flick_mach3_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 16, signed16_t);
+#define flick_mach3_encode_unsigned16(ofs, data) \
+ flick_mach3_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 16, unsigned16_t);
+#define flick_mach3_encode_signed32(_ofs, _data) \
+ flick_mach3_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_32, 32, signed32_t);
+#define flick_mach3_encode_unsigned32(ofs, data) \
+ flick_mach3_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_32, 32, unsigned32_t);
+
+#define flick_mach3_encode_port(_ofs, _data, _adjust) \
+{ \
+ if (_adjust > 1) { \
+ kern_return_t res = mach_port_mod_refs(mach_task_self(), (_data), \
+ MACH_PORT_RIGHT_SEND, -(_adjust-1)); \
+ } \
+ flick_mach3_encode_prim(_ofs, _data, \
+ _adjust ? MACH_MSG_TYPE_MOVE_SEND : MACH_MSG_TYPE_COPY_SEND, \
+ 32, mach_port_t); \
+}
+
+
+/*** Decoding ***/
+
+#if TypeCheck
+#define flick_iftypecheck(code) code
+#else
+#define flick_iftypecheck(code)
+#endif
+
+#define flick_mach3_decode_new_glob(max_size)
+#define flick_mach3_decode_end_glob(max_size)
+
+#define flick_mach3_decode_new_chunk(size) \
+{ \
+ flick_iftypecheck( \
+ if (_desc.d.rcv_ofs + (size) > _d_msgsize) \
+ XXX throw MIG_TYPE_ERROR; \
+ ); \
+ _d_chunk = _desc.d.msg_buf + _desc.d.rcv_ofs; \
+}
+#define flick_mach3_decode_end_chunk(size) \
+ _desc.d.rcv_ofs += (size);
+
+#define flick_mach3_decode_prim(_ofs, _data, _name, _bits, _ctype) \
+{ \
+ struct { mach_msg_type_t _t; _ctype _v; } *_p = (void*)(_d_chunk + _ofs); \
+ flick_iftypecheck( ({ \
+ mach_msg_type_t _tmpl = { _name, _bits, 1, 1, 0, 0 }; \
+ if (*((signed32_t*)&_tmpl) != *((signed32_t)&_p->_t)) \
+ XXX throw MIG_TYPE_ERROR; \
+ )} ) \
+ (_data) = _p->_v; \
+}
+
+#define flick_mach3_decode_boolean(_ofs, _data) \
+ flick_mach3_decode_prim(_ofs, _data, MACH_MSG_TYPE_BOOLEAN, 32, signed32_t);
+
+#define flick_mach3_decode_char8(_ofs, _data) \
+ flick_mach3_decode_prim(_ofs, _data, MACH_MSG_TYPE_CHAR, 8, signed8_t);
+#define flick_mach3_decode_char16(_ofs, _data) \
+ flick_mach3_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 8, signed16_t);
+
+#define flick_mach3_decode_signed8(_ofs, _data) \
+ flick_mach3_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_8, 8, signed8_t);
+#define flick_mach3_decode_unsigned8(_ofs, _data) \
+ flick_mach3_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_8, 8, unsigned8_t);
+#define flick_mach3_decode_signed16(_ofs, _data) \
+ flick_mach3_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 16, signed16_t);
+#define flick_mach3_decode_unsigned16(_ofs, _data) \
+ flick_mach3_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 16, unsigned16_t);
+#define flick_mach3_decode_signed32(_ofs, _data) \
+ flick_mach3_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_32, 32, signed32_t);
+#define flick_mach3_decode_unsigned32(_ofs, _data) \
+ flick_mach3_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_32, 32, unsigned32_t);
+
+#define flick_mach3_decode_port(_ofs, _data, _adjust) \
+{ \
+ flick_mach3_decode_prim(_ofs, _data, MACH_MSG_TYPE_PORT_SEND, 32, mach_port_t); \
+ if (_adjust != 1) { \
+ kern_return_t res = mach_port_mod_refs(mach_task_self(), (_data), \
+ MACH_PORT_RIGHT_SEND, _adjust-1); \
+ } \
+}
+
+
+/*** Client-side support ***/
+
+mach_msg_return_t flick_mach3_rpc(struct flick_mach3_rpc_desc *rpc,
+ mach_port_t send_target, mach_msg_bits_t send_msgh_bits);
+
+#define flick_mach3_rpc_macro(iscomplex) \
+{ \
+ kern_return_t result = flick_mach3_rpc(&_desc.d, _msg_request/*XXX*/, \
+ (iscomplex ? MACH_MSGH_BITS_COMPLEX : 0) \
+ | MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND_ONCE));\
+}
+
+#endif /* _MACH_FLICK_MACH3_GLUE_H_ */
diff --git a/include/mach/flick_mach3mig_glue.h b/include/mach/flick_mach3mig_glue.h
new file mode 100644
index 00000000..c1ed04ac
--- /dev/null
+++ b/include/mach/flick_mach3mig_glue.h
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * Glue for the Flick's Mach 3 backend. (Flick := Flexible IDL Compiler Kit.)
+ * This file is included from every stub source code (.c) file generated by that backend.
+ * Stubs are built primarily out of invocations of these macros.
+ */
+#ifndef _MACH_FLICK_MACH3MIG_GLUE_H_
+#define _MACH_FLICK_MACH3MIG_GLUE_H_
+
+#include <stdlib.h>
+#include <string.h>
+#include <mach/mig_errors.h>
+#include <mach/flick_mach3mig.h>
+
+
+#define FLICK_NO_MEMORY 499 /*XXX*/
+
+
+/*** Internal Flick data types ***/
+
+/* Each client stub allocates one of these on its stack first thing,
+ and holds all the important generic state throughout RPC processing. */
+struct flick_mach3mig_rpc_desc
+{
+ /* This is initially set to point to init_buf,
+ but is dynamically re-allocated if more space is needed. */
+ mig_reply_header_t *msg_buf;
+ vm_size_t msg_buf_size;
+
+ /* Before calling flick_mach3mig_rpc(),
+ the client stub sets this to the offset of the end of the data it marshaled.
+ It always starts marshaling just after the Mach message header. */
+ vm_size_t send_end_ofs;
+
+ /* flick_mach3mig_rpc() sets these to the offset of the data to unmarshal,
+ and the offset of the end of the data to unmarshal, respectively. */
+ vm_size_t rcv_ofs, rcv_end_ofs;
+
+ /* The actual size of this buffer varies from stub to stub. */
+ mig_reply_header_t init_buf;
+};
+
+/* Each server stub allocates one of these on its stack first thing,
+ and holds all the important generic state throughout RPC processing. */
+struct flick_mach3mig_rpc_serv_desc
+{
+ /* During decoding msg_buf is InHeadP;
+ during encoding msg_buf is OutHeadP.
+ msg_buf_size is always simply a "very large" constant -
+ i.e. we don't know how big the buffer is; we just assume it's big enough. */
+ mig_reply_header_t *msg_buf;
+ vm_size_t msg_buf_size;
+
+ /* flick_mach3mig_serv_start_encode() sets these
+ to the offset of the data to unmarshal,
+ and the offset of the end of the data to unmarshal, respectively. */
+ vm_size_t rcv_ofs, rcv_end_ofs;
+
+ /* After the reply message has been encoded,
+ this contains the offset of the end of the data it marshaled. */
+ vm_size_t send_end_ofs;
+};
+
+
+
+/*** Memory allocation/deallocation ***/
+
+#define flick_alloc_mach_vm(size) \
+({ \
+ vm_address_t addr; \
+ if (_err = vm_allocate(mach_task_self(), &addr, (size), 1)) return _err; \
+ (void*)addr; \
+})
+#define flick_free_mach_vm(addr, size) \
+ if (_err = vm_deallocate(mach_task_self(), (addr), (size))) return _err;
+
+
+/*** Encoding ***/
+
+#define flick_mach3mig_encode_target(_data, _adjust) \
+{ \
+ if (_adjust > 1) { \
+ if (_err = mach_port_mod_refs(mach_task_self(), (_data), \
+ MACH_PORT_RIGHT_SEND, -(_adjust-1))) return _err; \
+ } \
+ _desc.d.msg_buf->Head.msgh_remote_port = (_data); \
+ _desc.d.msg_buf->Head.msgh_bits = MACH_MSGH_BITS( \
+ _adjust ? MACH_MSG_TYPE_MOVE_SEND : MACH_MSG_TYPE_COPY_SEND, 0); \
+}
+
+/* Primitive types with individual type descriptors. */
+#define flick_mach3mig_encode_new_glob(max_size) \
+{ \
+ while (_desc.d.send_end_ofs + (max_size) > _desc.d.msg_buf_size) \
+ if (_err = flick_mach3mig_rpc_grow_buf(&_desc)) return _err; \
+ _e_chunk = (void*)_desc.d.msg_buf + _desc.d.send_end_ofs; \
+}
+#define flick_mach3mig_encode_end_glob(max_size) \
+ _desc.d.send_end_ofs += (max_size);
+
+#define flick_mach3mig_encode_new_chunk(size) /* do nothing */
+#define flick_mach3mig_encode_end_chunk(size) (_e_chunk += (size))
+
+#define flick_mach3mig_encode_prim(_ofs, _data, _name, _bits, _ctype) \
+{ \
+ struct { mach_msg_type_t _t; _ctype _v; } *_p = (void*)(_e_chunk + _ofs); \
+ mach_msg_type_t _tmpl = { _name, _bits, 1, 1, 0, 0 }; \
+ _p->_t = _tmpl; _p->_v = (_data); \
+}
+#define flick_mach3mig_encode_boolean(_ofs, _data) \
+ flick_mach3mig_encode_prim(_ofs, _data, MACH_MSG_TYPE_BOOLEAN, 32, signed32_t);
+#define flick_mach3mig_encode_char8(_ofs, _data) \
+ flick_mach3mig_encode_prim(_ofs, _data, MACH_MSG_TYPE_CHAR, 8, signed8_t);
+#define flick_mach3mig_encode_char16(_ofs, _data) \
+ flick_mach3mig_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 8, signed16_t);
+#define flick_mach3mig_encode_signed8(_ofs, _data) \
+ flick_mach3mig_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_8, 8, signed8_t);
+#define flick_mach3mig_encode_unsigned8(_ofs, _data) \
+ flick_mach3mig_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_8, 8, unsigned8_t);
+#define flick_mach3mig_encode_signed16(_ofs, _data) \
+ flick_mach3mig_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 16, signed16_t);
+#define flick_mach3mig_encode_unsigned16(_ofs, _data) \
+ flick_mach3mig_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 16, unsigned16_t);
+#define flick_mach3mig_encode_signed32(_ofs, _data) \
+ flick_mach3mig_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_32, 32, signed32_t);
+#define flick_mach3mig_encode_unsigned32(_ofs, _data) \
+ flick_mach3mig_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_32, 32, unsigned32_t);
+#define flick_mach3mig_encode_port(_ofs, _data, _adjust) \
+{ \
+ if (_adjust > 1) { \
+ if (_err = mach_port_mod_refs(mach_task_self(), (_data), \
+ MACH_PORT_RIGHT_SEND, -(_adjust-1))) return _err; \
+ } \
+ flick_mach3mig_encode_prim(_ofs, _data, \
+ _adjust ? MACH_MSG_TYPE_MOVE_SEND : MACH_MSG_TYPE_COPY_SEND, \
+ 32, mach_port_t); \
+}
+
+/* Array type descriptors. */
+#define flick_mach3mig_array_encode_type(_ofs, _name, _bits, _ctype, _num, _inl) \
+{ \
+ mach_msg_type_t *_p = (void*)(_e_chunk + _ofs); \
+ mach_msg_type_t _tmpl = { _name, _bits, _num, _inl, 0, 0 }; \
+ *_p = _tmpl; \
+}
+#define flick_mach3mig_array_encode_long_type(_ofs, _name, _bits, _ctype, _num, _inl) \
+{ \
+ mach_msg_type_long_t *_p = (void*)(_e_chunk + _ofs); \
+ mach_msg_type_long_t _tmpl = { { 0, 0, 0, _inl, 1, 0 }, _name, _bits, _num}; \
+ *_p = _tmpl; \
+}
+#define flick_mach3mig_array_encode_boolean_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_encode##_long(_ofs, MACH_MSG_TYPE_BOOLEAN, 32, signed32_t, _num, _inl);
+#define flick_mach3mig_array_encode_char8_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_encode##_long(_ofs, MACH_MSG_TYPE_CHAR, 8, signed8_t, _num, _inl);
+#define flick_mach3mig_array_encode_char16_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_encode##_long(_ofs, MACH_MSG_TYPE_INTEGER_16, 8, signed16_t, _num, _inl);
+#define flick_mach3mig_array_encode_signed8_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_encode##_long(_ofs, MACH_MSG_TYPE_INTEGER_8, 8, signed8_t, _num, _inl);
+#define flick_mach3mig_array_encode_unsigned8_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_encode##_long(_ofs, MACH_MSG_TYPE_INTEGER_8, 8, unsigned8_t, _num, _inl);
+#define flick_mach3mig_array_encode_signed16_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_encode##_long(_ofs, MACH_MSG_TYPE_INTEGER_16, 16, signed16_t, _num, _inl);
+#define flick_mach3mig_array_encode_unsigned16_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_encode##_long(_ofs, MACH_MSG_TYPE_INTEGER_16, 16, unsigned16_t, _num, _inl);
+#define flick_mach3mig_array_encode_signed32_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_encode##_long(_ofs, MACH_MSG_TYPE_INTEGER_32, 32, signed32_t, _num, _inl);
+#define flick_mach3mig_array_encode_unsigned32_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_encode##_long(_ofs, MACH_MSG_TYPE_INTEGER_32, 32, unsigned32_t, _num, _inl);
+
+/* Array elements. */
+#define flick_mach3mig_array_encode_new_glob(max_size) flick_mach3mig_encode_new_glob(max_size)
+#define flick_mach3mig_array_encode_end_glob(max_size) flick_mach3mig_encode_end_glob(max_size)
+#define flick_mach3mig_array_encode_new_chunk(size) flick_mach3mig_encode_new_chunk(size)
+#define flick_mach3mig_array_encode_end_chunk(size) flick_mach3mig_encode_end_chunk(size)
+
+#define flick_mach3mig_array_encode_prim(_ofs, _data, _name, _bits, _ctype) \
+{ \
+ _ctype *_p = (void*)(_e_chunk + _ofs); \
+ *_p = (_data); \
+}
+#define flick_mach3mig_array_encode_boolean(_ofs, _data) \
+ flick_mach3mig_array_encode_prim(_ofs, _data, MACH_MSG_TYPE_BOOLEAN, 32, signed32_t);
+#define flick_mach3mig_array_encode_char8(_ofs, _data) \
+ flick_mach3mig_array_encode_prim(_ofs, _data, MACH_MSG_TYPE_CHAR, 8, signed8_t);
+#define flick_mach3mig_array_encode_char16(_ofs, _data) \
+ flick_mach3mig_array_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 8, signed16_t);
+#define flick_mach3mig_array_encode_signed8(_ofs, _data) \
+ flick_mach3mig_array_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_8, 8, signed8_t);
+#define flick_mach3mig_array_encode_unsigned8(_ofs, _data) \
+ flick_mach3mig_array_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_8, 8, unsigned8_t);
+#define flick_mach3mig_array_encode_signed16(_ofs, _data) \
+ flick_mach3mig_array_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 16, signed16_t);
+#define flick_mach3mig_array_encode_unsigned16(_ofs, _data) \
+ flick_mach3mig_array_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 16, unsigned16_t);
+#define flick_mach3mig_array_encode_signed32(_ofs, _data) \
+ flick_mach3mig_array_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_32, 32, signed32_t);
+#define flick_mach3mig_array_encode_unsigned32(_ofs, _data) \
+ flick_mach3mig_array_encode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_32, 32, unsigned32_t);
+#define flick_mach3mig_array_encode_port(_ofs, _data, _adjust) \
+{ \
+ if (_adjust > 1) { \
+ if (_err = mach_port_mod_refs(mach_task_self(), (_data), \
+ MACH_PORT_RIGHT_SEND, -(_adjust-1))) return _err; \
+ } \
+ flick_mach3mig_array_encode_prim(_ofs, _data, \
+ _adjust ? MACH_MSG_TYPE_MOVE_SEND : MACH_MSG_TYPE_COPY_SEND, \
+ 32, mach_port_t); \
+}
+
+/* Out-of-line buffer support. */
+#define flick_mach3mig_array_encode_ool_start(_ofs, _size) \
+{ \
+ vm_address_t *_p = (void*)(_e_chunk + _ofs); \
+ struct { struct { void *msg_buf; vm_size_t msg_buf_size, send_end_ofs; } d; } _desc; \
+ void *_e_chunk; \
+ \
+ _desc.d.msg_buf_size = (_size); \
+ if (_err = vm_allocate(mach_task_self(), _p, _desc.d.msg_buf_size, 1)) \
+ return _err; \
+ _desc.d.msg_buf = (void*)*_p; _desc.d.send_end_ofs = 0;
+
+#define flick_mach3mig_array_encode_ool_end() \
+}
+
+
+
+/*** Decoding ***/
+
+#if TypeCheck
+#define flick_iftypecheck(code) code
+#else
+#define flick_iftypecheck(code)
+#endif
+
+/* Primitive types with individual type descriptors. */
+#define flick_mach3mig_decode_new_glob(max_size)
+#define flick_mach3mig_decode_end_glob(max_size)
+
+#define flick_mach3mig_decode_new_chunk(size) \
+{ \
+ flick_iftypecheck( \
+ if (_desc.d.rcv_ofs + (size) > _desc.d.rcv_end_ofs) \
+ return MIG_TYPE_ERROR; \
+ ); \
+ _d_chunk = (void*)_desc.d.msg_buf + _desc.d.rcv_ofs; \
+}
+#define flick_mach3mig_decode_end_chunk(size) \
+ _desc.d.rcv_ofs += (size);
+
+#define flick_mach3mig_decode_prim(_ofs, _data, _name, _bits, _ctype) \
+{ \
+ struct { mach_msg_type_t _t; _ctype _v; } *_p = (void*)(_d_chunk + _ofs); \
+ flick_iftypecheck( ({ \
+ mach_msg_type_t _tmpl = { _name, _bits, 1, 1, 0, 0 }; \
+ if (*((signed32_t*)&_tmpl) != *((signed32_t*)&_p->_t)) \
+ return MIG_TYPE_ERROR; \
+ )} ) \
+ (_data) = _p->_v; \
+}
+#define flick_mach3mig_decode_boolean(_ofs, _data) \
+ flick_mach3mig_decode_prim(_ofs, _data, MACH_MSG_TYPE_BOOLEAN, 32, signed32_t);
+#define flick_mach3mig_decode_char8(_ofs, _data) \
+ flick_mach3mig_decode_prim(_ofs, _data, MACH_MSG_TYPE_CHAR, 8, signed8_t);
+#define flick_mach3mig_decode_char16(_ofs, _data) \
+ flick_mach3mig_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 8, signed16_t);
+#define flick_mach3mig_decode_signed8(_ofs, _data) \
+ flick_mach3mig_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_8, 8, signed8_t);
+#define flick_mach3mig_decode_unsigned8(_ofs, _data) \
+ flick_mach3mig_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_8, 8, unsigned8_t);
+#define flick_mach3mig_decode_signed16(_ofs, _data) \
+ flick_mach3mig_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 16, signed16_t);
+#define flick_mach3mig_decode_unsigned16(_ofs, _data) \
+ flick_mach3mig_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 16, unsigned16_t);
+#define flick_mach3mig_decode_signed32(_ofs, _data) \
+ flick_mach3mig_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_32, 32, signed32_t);
+#define flick_mach3mig_decode_unsigned32(_ofs, _data) \
+ flick_mach3mig_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_32, 32, unsigned32_t);
+#define flick_mach3mig_decode_port(_ofs, _data, _adjust) \
+{ \
+ flick_mach3mig_decode_prim(_ofs, _data, MACH_MSG_TYPE_PORT_SEND, 32, mach_port_t); \
+ if (_adjust != 1) { \
+ if (_err = mach_port_mod_refs(mach_task_self(), (_data), \
+ MACH_PORT_RIGHT_SEND, _adjust-1)) return _err; \
+ } \
+}
+
+/* Array type descriptors. */
+#define flick_mach3mig_array_decode_type(_ofs, _name, _bits, _ctype, _num, _inl) \
+{ \
+ mach_msg_type_t *_p = (void*)(_e_chunk + _ofs); \
+ flick_iftypecheck( ({ \
+ mach_msg_type_t _tmpl = { _name, _bits, _num, _inl, 0, 0 }; \
+ if (*((signed32_t*)&_tmpl) != *((signed32_t*)_p)) \
+ return MIG_TYPE_ERROR; \
+ )} ) \
+}
+#define flick_mach3mig_array_decode_long_type(_ofs, _name, _bits, _ctype, _num, _inl) \
+{ \
+ mach_msg_type_long_t *_p = (void*)(_e_chunk + _ofs); \
+ flick_iftypecheck( ({ \
+ mach_msg_type_long_t _tmpl = { { 0, 0, 0, _inl, 1, 0 }, _name, _bits, _num}; \
+ if (memcmp(&_tmpl, _p, sizeof(_tmpl))) \
+ return MIG_TYPE_ERROR; \
+ )} ) \
+}
+#define flick_mach3mig_array_decode_boolean_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_decode##_long(_ofs, MACH_MSG_TYPE_BOOLEAN, 32, signed32_t, _num, _inl);
+#define flick_mach3mig_array_decode_char8_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_decode##_long(_ofs, MACH_MSG_TYPE_CHAR, 8, signed8_t, _num, _inl);
+#define flick_mach3mig_array_decode_char16_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_decode##_long(_ofs, MACH_MSG_TYPE_INTEGER_16, 8, signed16_t, _num, _inl);
+#define flick_mach3mig_array_decode_signed8_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_decode##_long(_ofs, MACH_MSG_TYPE_INTEGER_8, 8, signed8_t, _num, _inl);
+#define flick_mach3mig_array_decode_unsigned8_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_decode##_long(_ofs, MACH_MSG_TYPE_INTEGER_8, 8, unsigned8_t, _num, _inl);
+#define flick_mach3mig_array_decode_signed16_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_decode##_long(_ofs, MACH_MSG_TYPE_INTEGER_16, 16, signed16_t, _num, _inl);
+#define flick_mach3mig_array_decode_unsigned16_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_decode##_long(_ofs, MACH_MSG_TYPE_INTEGER_16, 16, unsigned16_t, _num, _inl);
+#define flick_mach3mig_array_decode_signed32_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_decode##_long(_ofs, MACH_MSG_TYPE_INTEGER_32, 32, signed32_t, _num, _inl);
+#define flick_mach3mig_array_decode_unsigned32_type(_ofs, _num, _inl, _long) \
+ flick_mach3mig_array_decode##_long(_ofs, MACH_MSG_TYPE_INTEGER_32, 32, unsigned32_t, _num, _inl);
+
+/* Array elements. */
+#define flick_mach3mig_array_decode_new_glob(max_size) flick_mach3mig_decode_new_glob(max_size)
+#define flick_mach3mig_array_decode_end_glob(max_size) flick_mach3mig_decode_end_glob(max_size)
+#define flick_mach3mig_array_decode_new_chunk(size) flick_mach3mig_decode_new_chunk(size)
+#define flick_mach3mig_array_decode_end_chunk(size) flick_mach3mig_decode_end_chunk(size)
+
+#define flick_mach3mig_array_decode_prim(_ofs, _data, _name, _bits, _ctype) \
+{ \
+ _ctype *_p = (void*)(_d_chunk + _ofs); \
+ (_data) = *_p; \
+}
+#define flick_mach3mig_array_decode_boolean(_ofs, _data) \
+ flick_mach3mig_array_decode_prim(_ofs, _data, MACH_MSG_TYPE_BOOLEAN, 32, signed32_t);
+#define flick_mach3mig_array_decode_char8(_ofs, _data) \
+ flick_mach3mig_array_decode_prim(_ofs, _data, MACH_MSG_TYPE_CHAR, 8, signed8_t);
+#define flick_mach3mig_array_decode_char16(_ofs, _data) \
+ flick_mach3mig_array_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 8, signed16_t);
+#define flick_mach3mig_array_decode_signed8(_ofs, _data) \
+ flick_mach3mig_array_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_8, 8, signed8_t);
+#define flick_mach3mig_array_decode_unsigned8(_ofs, _data) \
+ flick_mach3mig_array_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_8, 8, unsigned8_t);
+#define flick_mach3mig_array_decode_signed16(_ofs, _data) \
+ flick_mach3mig_array_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 16, signed16_t);
+#define flick_mach3mig_array_decode_unsigned16(_ofs, _data) \
+ flick_mach3mig_array_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_16, 16, unsigned16_t);
+#define flick_mach3mig_array_decode_signed32(_ofs, _data) \
+ flick_mach3mig_array_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_32, 32, signed32_t);
+#define flick_mach3mig_array_decode_unsigned32(_ofs, _data) \
+ flick_mach3mig_array_decode_prim(_ofs, _data, MACH_MSG_TYPE_INTEGER_32, 32, unsigned32_t);
+#define flick_mach3mig_array_decode_port(_ofs, _data, _adjust) \
+{ \
+ flick_mach3mig_array_decode_prim(_ofs, _data, MACH_MSG_TYPE_PORT_SEND, 32, mach_port_t); \
+ if (_adjust != 1) { \
+ kern_return_t res = mach_port_mod_refs(mach_task_self(), (_data), \
+ MACH_PORT_RIGHT_SEND, _adjust-1); \
+ } \
+}
+
+/* Out-of-line buffer support. */
+#define flick_mach3mig_array_decode_ool_start(_ofs, _size) \
+{ \
+ vm_address_t *_p = (void*)(_e_chunk + _ofs); \
+ struct { struct { void *msg_buf; vm_size_t rcv_ofs, rcv_end_ofs; } d; } _desc; \
+ void *_e_chunk; \
+ \
+ _desc.d.msg_buf = (void*)*_p; _desc.d.rcv_ofs = 0; _desc.d.rcv_end_ofs = (_size);\
+
+#define flick_mach3mig_array_decode_ool_end() \
+ if (_err = vm_deallocate(mach_task_self(), *_p, _desc.d.rcv_end_ofs)) \
+ return _err; \
+}
+
+
+/*** Client-side support ***/
+
+mach_msg_return_t flick_mach3mig_rpc(struct flick_mach3mig_rpc_desc *rpc);
+
+#define flick_mach3mig_rpc_macro(iscomplex) \
+{ \
+ _desc.d.msg_buf->Head.msgh_bits |= \
+ MACH_MSGH_BITS(0, MACH_MSG_TYPE_MAKE_SEND_ONCE) \
+ | (iscomplex ? MACH_MSGH_BITS_COMPLEX : 0); \
+ if (_err = flick_mach3mig_rpc(&_desc.d)) return _err; \
+}
+
+#define flick_mach3mig_send_macro(iscomplex) \
+{ \
+ _desc.d.msg_buf->Head.msgh_bits |= (iscomplex ? MACH_MSGH_BITS_COMPLEX : 0); \
+ if (_err = flick_mach3mig_rpc(&_desc.d)) return _err; \
+}
+
+
+/*** Server-side support ***/
+
+#define flick_mach3mig_serv_start_decode() \
+{ \
+ _desc.d.msg_buf = (mig_reply_header_t*)InHeadP; \
+ _desc.d.msg_buf_size = 0x7fffffff; \
+ _desc.d.rcv_ofs = sizeof(mach_msg_header_t); \
+ _desc.d.rcv_end_ofs = InHeadP->msgh_size; \
+}
+
+#define flick_mach3mig_serv_end_decode() /* do nothing */
+
+#define flick_mach3mig_serv_start_encode() \
+{ \
+ _desc.d.msg_buf = (mig_reply_header_t*)OutHeadP; \
+ _desc.d.send_end_ofs = sizeof(mig_reply_header_t); \
+}
+
+#define flick_mach3mig_serv_end_encode() \
+{ \
+ mach_msg_type_t _ret_tmpl = { MACH_MSG_TYPE_INTEGER_32, 32, 1, 1, 0, 0 }; \
+ OutHeadP->msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REPLY(InHeadP->msgh_bits), 0); \
+ OutHeadP->msgh_size = _desc.d.send_end_ofs; \
+ OutHeadP->msgh_remote_port = InHeadP->msgh_remote_port; \
+ OutHeadP->msgh_local_port = MACH_PORT_NULL; \
+ _desc.d.msg_buf->RetCodeType = _ret_tmpl; \
+ _desc.d.msg_buf->RetCode = _return; \
+}
+
+
+#endif /* _MACH_FLICK_MACH3MIG_GLUE_H_ */
diff --git a/include/mach/host_info.h b/include/mach/host_info.h
new file mode 100644
index 00000000..60a6aefd
--- /dev/null
+++ b/include/mach/host_info.h
@@ -0,0 +1,93 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/host_info.h
+ *
+ * Definitions for host_info call.
+ */
+
+#ifndef _MACH_HOST_INFO_H_
+#define _MACH_HOST_INFO_H_
+
+#include <mach/machine.h>
+#include <mach/machine/vm_types.h>
+
+/*
+ * Generic information structure to allow for expansion.
+ */
+typedef integer_t *host_info_t; /* varying array of integers */
+
+#define HOST_INFO_MAX (1024) /* max array size */
+typedef integer_t host_info_data_t[HOST_INFO_MAX];
+
+#define KERNEL_VERSION_MAX (512)
+typedef char kernel_version_t[KERNEL_VERSION_MAX];
+
+#define KERNEL_BOOT_INFO_MAX (4096)
+typedef char kernel_boot_info_t[KERNEL_BOOT_INFO_MAX];
+
+/*
+ * Currently defined information.
+ */
+#define HOST_BASIC_INFO 1 /* basic info */
+#define HOST_PROCESSOR_SLOTS 2 /* processor slot numbers */
+#define HOST_SCHED_INFO 3 /* scheduling info */
+#define HOST_LOAD_INFO 4 /* avenrun/mach_factor info */
+
+struct host_basic_info {
+ integer_t max_cpus; /* max number of cpus possible */
+ integer_t avail_cpus; /* number of cpus now available */
+ vm_size_t memory_size; /* size of memory in bytes */
+ cpu_type_t cpu_type; /* cpu type */
+ cpu_subtype_t cpu_subtype; /* cpu subtype */
+};
+
+typedef struct host_basic_info host_basic_info_data_t;
+typedef struct host_basic_info *host_basic_info_t;
+#define HOST_BASIC_INFO_COUNT \
+ (sizeof(host_basic_info_data_t)/sizeof(integer_t))
+
+struct host_sched_info {
+ integer_t min_timeout; /* minimum timeout in milliseconds */
+ integer_t min_quantum; /* minimum quantum in milliseconds */
+};
+
+typedef struct host_sched_info host_sched_info_data_t;
+typedef struct host_sched_info *host_sched_info_t;
+#define HOST_SCHED_INFO_COUNT \
+ (sizeof(host_sched_info_data_t)/sizeof(integer_t))
+
+struct host_load_info {
+ integer_t avenrun[3]; /* scaled by LOAD_SCALE */
+ integer_t mach_factor[3]; /* scaled by LOAD_SCALE */
+};
+
+typedef struct host_load_info host_load_info_data_t;
+typedef struct host_load_info *host_load_info_t;
+#define HOST_LOAD_INFO_COUNT \
+ (sizeof(host_load_info_data_t)/sizeof(integer_t))
+
+#endif /* _MACH_HOST_INFO_H_ */
diff --git a/include/mach/inline.h b/include/mach/inline.h
new file mode 100644
index 00000000..35f5c5d2
--- /dev/null
+++ b/include/mach/inline.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Center for Software Science (CSS). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSS requests users of this software to return to css-dist@cs.utah.edu any
+ * improvements that they make and grant CSS redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSS
+ */
+#ifndef _MACH_INLINE_H_
+#define _MACH_INLINE_H_
+
+#ifndef MACH_INLINE
+#define MACH_INLINE extern __inline
+#endif
+
+#endif /* _MACH_INLINE_H_ */
diff --git a/include/mach/kern_return.h b/include/mach/kern_return.h
new file mode 100644
index 00000000..2274328b
--- /dev/null
+++ b/include/mach/kern_return.h
@@ -0,0 +1,160 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: h/kern_return.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * Kernel return codes.
+ *
+ */
+
+#ifndef _MACH_KERN_RETURN_H_
+#define _MACH_KERN_RETURN_H_
+
+#include <mach/machine/kern_return.h>
+
+#define KERN_SUCCESS 0
+
+#define KERN_INVALID_ADDRESS 1
+ /* Specified address is not currently valid.
+ */
+
+#define KERN_PROTECTION_FAILURE 2
+ /* Specified memory is valid, but does not permit the
+ * required forms of access.
+ */
+
+#define KERN_NO_SPACE 3
+ /* The address range specified is already in use, or
+ * no address range of the size specified could be
+ * found.
+ */
+
+#define KERN_INVALID_ARGUMENT 4
+ /* The function requested was not applicable to this
+ * type of argument, or an argument
+ */
+
+#define KERN_FAILURE 5
+ /* The function could not be performed. A catch-all.
+ */
+
+#define KERN_RESOURCE_SHORTAGE 6
+ /* A system resource could not be allocated to fulfill
+ * this request. This failure may not be permanent.
+ */
+
+#define KERN_NOT_RECEIVER 7
+ /* The task in question does not hold receive rights
+ * for the port argument.
+ */
+
+#define KERN_NO_ACCESS 8
+ /* Bogus access restriction.
+ */
+
+#define KERN_MEMORY_FAILURE 9
+ /* During a page fault, the target address refers to a
+ * memory object that has been destroyed. This
+ * failure is permanent.
+ */
+
+#define KERN_MEMORY_ERROR 10
+ /* During a page fault, the memory object indicated
+ * that the data could not be returned. This failure
+ * may be temporary; future attempts to access this
+ * same data may succeed, as defined by the memory
+ * object.
+ */
+
+/* KERN_ALREADY_IN_SET 11 obsolete */
+
+#define KERN_NOT_IN_SET 12
+ /* The receive right is not a member of a port set.
+ */
+
+#define KERN_NAME_EXISTS 13
+ /* The name already denotes a right in the task.
+ */
+
+#define KERN_ABORTED 14
+ /* The operation was aborted. Ipc code will
+ * catch this and reflect it as a message error.
+ */
+
+#define KERN_INVALID_NAME 15
+ /* The name doesn't denote a right in the task.
+ */
+
+#define KERN_INVALID_TASK 16
+ /* Target task isn't an active task.
+ */
+
+#define KERN_INVALID_RIGHT 17
+ /* The name denotes a right, but not an appropriate right.
+ */
+
+#define KERN_INVALID_VALUE 18
+ /* A blatant range error.
+ */
+
+#define KERN_UREFS_OVERFLOW 19
+ /* Operation would overflow limit on user-references.
+ */
+
+#define KERN_INVALID_CAPABILITY 20
+ /* The supplied (port) capability is improper.
+ */
+
+#define KERN_RIGHT_EXISTS 21
+ /* The task already has send or receive rights
+ * for the port under another name.
+ */
+
+#define KERN_INVALID_HOST 22
+ /* Target host isn't actually a host.
+ */
+
+#define KERN_MEMORY_PRESENT 23
+ /* An attempt was made to supply "precious" data
+ * for memory that is already present in a
+ * memory object.
+ */
+
+#define KERN_WRITE_PROTECTION_FAILURE 24
+ /*
+ * A page was marked as VM_PROT_NOTIFY and an attempt was
+ * made to write it
+ */
+#define KERN_TERMINATED 26
+ /* Object has been terminated and is no longer available.
+ */
+
+#endif /* _MACH_KERN_RETURN_H_ */
diff --git a/include/mach/lmm.h b/include/mach/lmm.h
new file mode 100644
index 00000000..f350329c
--- /dev/null
+++ b/include/mach/lmm.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * Public header file for the List Memory Manager.
+ */
+#ifndef _MACH_LMM_H_
+#define _MACH_LMM_H_
+
+#include <mach/machine/vm_types.h>
+
+/* The contents of this structure is opaque to users. */
+typedef struct lmm
+{
+ struct lmm_region *regions;
+} lmm_t;
+
+#define LMM_INITIALIZER { 0 }
+
+typedef natural_t lmm_flags_t;
+typedef integer_t lmm_pri_t;
+
+void lmm_init(lmm_t *lmm);
+void lmm_add(lmm_t *lmm, vm_offset_t addr, vm_size_t size,
+ lmm_flags_t flags, lmm_pri_t pri);
+void *lmm_alloc(lmm_t *lmm, vm_size_t size, lmm_flags_t flags);
+void *lmm_alloc_aligned(lmm_t *lmm, vm_size_t size, lmm_flags_t flags,
+ int align_bits, vm_offset_t align_ofs);
+void *lmm_alloc_page(lmm_t *lmm, lmm_flags_t flags);
+void *lmm_alloc_gen(lmm_t *lmm, vm_size_t size, lmm_flags_t flags,
+ int align_bits, vm_offset_t align_ofs,
+ vm_offset_t bounds_min, vm_offset_t bounds_max);
+vm_size_t lmm_avail(lmm_t *lmm, lmm_flags_t flags);
+void lmm_find_free(lmm_t *lmm, vm_offset_t *inout_addr,
+ vm_size_t *out_size, lmm_flags_t *out_flags);
+void lmm_free(lmm_t *lmm, void *block, vm_size_t size);
+
+/* Only available if debugging turned on. */
+void lmm_dump(lmm_t *lmm);
+
+#endif /* _MACH_LMM_H_ */
diff --git a/include/mach/mach.defs b/include/mach/mach.defs
new file mode 100644
index 00000000..1e4429d6
--- /dev/null
+++ b/include/mach/mach.defs
@@ -0,0 +1,958 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Matchmaker definitions file for Mach kernel interface.
+ */
+
+#ifdef MACH_KERNEL
+#include <mach_ipc_compat.h>
+
+simport <kern/compat_xxx_defs.h>; /* for obsolete routines */
+#endif /* MACH_KERNEL */
+
+subsystem
+#if KERNEL_USER
+ KernelUser
+#endif /* KERNEL_USER */
+#if KERNEL_SERVER
+ KernelServer
+#endif /* KERNEL_SERVER */
+ mach 2000;
+
+#ifdef KERNEL_USER
+userprefix r_;
+#endif /* KERNEL_USER */
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+skip; /* old port_allocate */
+skip; /* old port_deallocate */
+skip; /* old port_enable */
+skip; /* old port_disable */
+skip; /* old port_select */
+skip; /* old port_set_backlog */
+skip; /* old port_status */
+
+/*
+ * Create a new task with an empty set of IPC rights,
+ * and having an address space constructed from the
+ * target task (or empty, if inherit_memory is FALSE).
+ */
+routine task_create(
+ target_task : task_t;
+ inherit_memory : boolean_t;
+ out child_task : task_t);
+
+/*
+ * Destroy the target task, causing all of its threads
+ * to be destroyed, all of its IPC rights to be deallocated,
+ * and all of its address space to be deallocated.
+ */
+routine task_terminate(
+ target_task : task_t);
+
+/*
+ * Get user-level handler entry points for all
+ * emulated system calls.
+ */
+routine task_get_emulation_vector(
+ task : task_t;
+ out vector_start : int;
+ out emulation_vector: emulation_vector_t);
+
+/*
+ * Establish user-level handlers for the specified
+ * system calls. Non-emulated system calls are specified
+ * with emulation_vector[i] == EML_ROUTINE_NULL.
+ */
+routine task_set_emulation_vector(
+ task : task_t;
+ vector_start : int;
+ emulation_vector: emulation_vector_t);
+
+
+/*
+ * Returns the set of threads belonging to the target task.
+ */
+routine task_threads(
+ target_task : task_t;
+ out thread_list : thread_array_t);
+
+/*
+ * Returns information about the target task.
+ */
+routine task_info(
+ target_task : task_t;
+ flavor : int;
+ out task_info_out : task_info_t, CountInOut);
+
+
+skip; /* old task_status */
+skip; /* old task_set_notify */
+skip; /* old thread_create */
+
+/*
+ * Destroy the target thread.
+ */
+routine thread_terminate(
+ target_thread : thread_t);
+
+/*
+ * Return the selected state information for the target
+ * thread. If the thread is currently executing, the results
+ * may be stale. [Flavor THREAD_STATE_FLAVOR_LIST provides a
+ * list of valid flavors for the target thread.]
+ */
+routine thread_get_state(
+ target_thread : thread_t;
+ flavor : int;
+ out old_state : thread_state_t, CountInOut);
+
+/*
+ * Set the selected state information for the target thread.
+ * If the thread is currently executing, the state change
+ * may be ill-defined.
+ */
+routine thread_set_state(
+ target_thread : thread_t;
+ flavor : int;
+ new_state : thread_state_t);
+
+/*
+ * Returns information about the target thread.
+ */
+routine thread_info(
+ target_thread : thread_t;
+ flavor : int;
+ out thread_info_out : thread_info_t, CountInOut);
+
+skip; /* old thread_mutate */
+
+/*
+ * Allocate zero-filled memory in the address space
+ * of the target task, either at the specified address,
+ * or wherever space can be found (if anywhere is TRUE),
+ * of the specified size. The address at which the
+ * allocation actually took place is returned.
+ */
+#ifdef EMULATOR
+skip; /* the emulator redefines vm_allocate using vm_map */
+#else EMULATOR
+routine vm_allocate(
+ target_task : vm_task_t;
+ inout address : vm_address_t;
+ size : vm_size_t;
+ anywhere : boolean_t);
+#endif EMULATOR
+
+skip; /* old vm_allocate_with_pager */
+
+/*
+ * Deallocate the specified range from the virtual
+ * address space of the target task.
+ */
+routine vm_deallocate(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t);
+
+/*
+ * Set the current or maximum protection attribute
+ * for the specified range of the virtual address
+ * space of the target task. The current protection
+ * limits the memory access rights of threads within
+ * the task; the maximum protection limits the accesses
+ * that may be given in the current protection.
+ * Protections are specified as a set of {read, write, execute}
+ * *permissions*.
+ */
+routine vm_protect(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t;
+ set_maximum : boolean_t;
+ new_protection : vm_prot_t);
+
+/*
+ * Set the inheritance attribute for the specified range
+ * of the virtual address space of the target task.
+ * The inheritance value is one of {none, copy, share}, and
+ * specifies how the child address space should acquire
+ * this memory at the time of a task_create call.
+ */
+routine vm_inherit(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t;
+ new_inheritance : vm_inherit_t);
+
+/*
+ * Returns the contents of the specified range of the
+ * virtual address space of the target task. [The
+ * range must be aligned on a virtual page boundary,
+ * and must be a multiple of pages in extent. The
+ * protection on the specified range must permit reading.]
+ */
+routine vm_read(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t;
+ out data : pointer_t);
+
+/*
+ * Writes the contents of the specified range of the
+ * virtual address space of the target task. [The
+ * range must be aligned on a virtual page boundary,
+ * and must be a multiple of pages in extent. The
+ * protection on the specified range must permit writing.]
+ */
+routine vm_write(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ data : pointer_t);
+
+/*
+ * Copy the contents of the source range of the virtual
+ * address space of the target task to the destination
+ * range in that same address space. [Both of the
+ * ranges must be aligned on a virtual page boundary,
+ * and must be multiples of pages in extent. The
+ * protection on the source range must permit reading,
+ * and the protection on the destination range must
+ * permit writing.]
+ */
+routine vm_copy(
+ target_task : vm_task_t;
+ source_address : vm_address_t;
+ size : vm_size_t;
+ dest_address : vm_address_t);
+
+/*
+ * Returns information about the contents of the virtual
+ * address space of the target task at the specified
+ * address. The returned protection, inheritance, sharing
+ * and memory object values apply to the entire range described
+ * by the address range returned; the memory object offset
+ * corresponds to the beginning of the address range.
+ * [If the specified address is not allocated, the next
+ * highest address range is described. If no addresses beyond
+ * the one specified are allocated, the call returns KERN_NO_SPACE.]
+ */
+routine vm_region(
+ target_task : vm_task_t;
+ inout address : vm_address_t;
+ out size : vm_size_t;
+ out protection : vm_prot_t;
+ out max_protection : vm_prot_t;
+ out inheritance : vm_inherit_t;
+ out is_shared : boolean_t;
+ /* avoid out-translation of the argument */
+ out object_name : memory_object_name_t =
+ MACH_MSG_TYPE_MOVE_SEND
+ ctype: mach_port_t;
+ out offset : vm_offset_t);
+
+/*
+ * Return virtual memory statistics for the host
+ * on which the target task resides. [Note that the
+ * statistics are not specific to the target task.]
+ */
+routine vm_statistics(
+ target_task : vm_task_t;
+ out vm_stats : vm_statistics_data_t);
+
+skip; /* old task_by_u*x_pid */
+skip; /* old vm_pageable */
+
+/*
+ * Stash a handful of ports for the target task; child
+ * tasks inherit this stash at task_create time.
+ */
+routine mach_ports_register(
+ target_task : task_t;
+ init_port_set : mach_port_array_t =
+ ^array[] of mach_port_t);
+
+/*
+ * Retrieve the stashed ports for the target task.
+ */
+routine mach_ports_lookup(
+ target_task : task_t;
+ out init_port_set : mach_port_array_t =
+ ^array[] of mach_port_t);
+
+skip; /* old u*x_pid */
+skip; /* old netipc_listen */
+skip; /* old netipc_ignore */
+
+/*
+ * Provide the data contents of a range of the given memory
+ * object, with the access restriction specified. [Only
+ * whole virtual pages of data can be accepted; partial pages
+ * will be discarded. Data should be provided on request, but
+ * may be provided in advance as desired. When data already
+ * held by this kernel is provided again, the new data is ignored.
+ * The access restriction is the subset of {read, write, execute}
+ * which are prohibited. The kernel may not provide any data (or
+ * protection) consistency among pages with different virtual page
+ * alignments within the same object.]
+ */
+simpleroutine memory_object_data_provided(
+ memory_control : memory_object_control_t;
+ offset : vm_offset_t;
+ data : pointer_t;
+ lock_value : vm_prot_t);
+
+/*
+ * Indicate that a range of the given temporary memory object does
+ * not exist, and that the backing memory object should be used
+ * instead (or zero-fill memory be used, if no backing object exists).
+ * [This call is intended for use only by the default memory manager.
+ * It should not be used to indicate a real error --
+ * memory_object_data_error should be used for that purpose.]
+ */
+simpleroutine memory_object_data_unavailable(
+ memory_control : memory_object_control_t;
+ offset : vm_offset_t;
+ size : vm_size_t);
+
+/*
+ * Retrieves the attributes currently associated with
+ * a memory object.
+ */
+routine memory_object_get_attributes(
+ memory_control : memory_object_control_t;
+ out object_ready : boolean_t;
+ out may_cache : boolean_t;
+ out copy_strategy : memory_object_copy_strategy_t);
+
+/*
+ * Sets the default memory manager, the port to which
+ * newly-created temporary memory objects are delivered.
+ * [See (memory_object_default)memory_object_create.]
+ * The old memory manager port is returned.
+ */
+routine vm_set_default_memory_manager(
+ host_priv : host_priv_t;
+ inout default_manager : mach_port_make_send_t);
+
+skip; /* old pager_flush_request */
+
+/*
+ * Control use of the data associated with the given
+ * memory object. For each page in the given range,
+ * perform the following operations, in order:
+ * 1) restrict access to the page (disallow
+ * forms specified by "prot");
+ * 2) write back modifications (if "should_return"
+ * is RETURN_DIRTY and the page is dirty, or
+ * "should_return" is RETURN_ALL and the page
+ * is either dirty or precious); and,
+ * 3) flush the cached copy (if "should_flush"
+ * is asserted).
+ * The set of pages is defined by a starting offset
+ * ("offset") and size ("size"). Only pages with the
+ * same page alignment as the starting offset are
+ * considered.
+ *
+ * A single acknowledgement is sent (to the "reply_to"
+ * port) when these actions are complete.
+ *
+ * There are two versions of this routine because IPC distinguishes
+ * between booleans and integers (a 2-valued integer is NOT a
+ * boolean). The new routine is backwards compatible at the C
+ * language interface.
+ */
+simpleroutine xxx_memory_object_lock_request(
+ memory_control : memory_object_control_t;
+ offset : vm_offset_t;
+ size : vm_size_t;
+ should_clean : boolean_t;
+ should_flush : boolean_t;
+ lock_value : vm_prot_t;
+ reply_to : mach_port_t =
+ MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic);
+
+
+simpleroutine memory_object_lock_request(
+ memory_control : memory_object_control_t;
+ offset : vm_offset_t;
+ size : vm_size_t;
+ should_return : memory_object_return_t;
+ should_flush : boolean_t;
+ lock_value : vm_prot_t;
+ reply_to : mach_port_t =
+ MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic);
+
+/* obsolete */
+routine xxx_task_get_emulation_vector(
+ task : task_t;
+ out vector_start : int;
+ out emulation_vector: xxx_emulation_vector_t, IsLong);
+
+/* obsolete */
+routine xxx_task_set_emulation_vector(
+ task : task_t;
+ vector_start : int;
+ emulation_vector: xxx_emulation_vector_t, IsLong);
+
+/*
+ * Returns information about the host on which the
+ * target object resides. [This object may be
+ * a task, thread, or memory_object_control port.]
+ */
+routine xxx_host_info(
+ target_task : mach_port_t;
+ out info : machine_info_data_t);
+
+/*
+ * Returns information about a particular processor on
+ * the host on which the target task resides.
+ */
+routine xxx_slot_info(
+ target_task : task_t;
+ slot : int;
+ out info : machine_slot_data_t);
+
+/*
+ * Performs control operations (currently only
+ * turning off or on) on a particular processor on
+ * the host on which the target task resides.
+ */
+routine xxx_cpu_control(
+ target_task : task_t;
+ cpu : int;
+ running : boolean_t);
+
+skip; /* old thread_statistics */
+skip; /* old task_statistics */
+skip; /* old netport_init */
+skip; /* old netport_enter */
+skip; /* old netport_remove */
+skip; /* old thread_set_priority */
+
+/*
+ * Increment the suspend count for the target task.
+ * No threads within a task may run when the suspend
+ * count for that task is non-zero.
+ */
+routine task_suspend(
+ target_task : task_t);
+
+/*
+ * Decrement the suspend count for the target task,
+ * if the count is currently non-zero. If the resulting
+ * suspend count is zero, then threads within the task
+ * that also have non-zero suspend counts may execute.
+ */
+routine task_resume(
+ target_task : task_t);
+
+/*
+ * Returns the current value of the selected special port
+ * associated with the target task.
+ */
+routine task_get_special_port(
+ task : task_t;
+ which_port : int;
+ out special_port : mach_port_t);
+
+/*
+ * Set one of the special ports associated with the
+ * target task.
+ */
+routine task_set_special_port(
+ task : task_t;
+ which_port : int;
+ special_port : mach_port_t);
+
+/* obsolete */
+routine xxx_task_info(
+ target_task : task_t;
+ flavor : int;
+ out task_info_out : task_info_t, IsLong);
+
+
+/*
+ * Create a new thread within the target task, returning
+ * the port representing that new thread. The
+ * initial execution state of the thread is undefined.
+ */
+routine thread_create(
+ parent_task : task_t;
+ out child_thread : thread_t);
+
+/*
+ * Increment the suspend count for the target thread.
+ * Once this call has completed, the thread will not
+ * execute any further user or meta- instructions.
+ * Once suspended, a thread may not execute again until
+ * its suspend count is zero, and the suspend count
+ * for its task is also zero.
+ */
+routine thread_suspend(
+ target_thread : thread_t);
+
+/*
+ * Decrement the suspend count for the target thread,
+ * if that count is not already zero.
+ */
+routine thread_resume(
+ target_thread : thread_t);
+
+/*
+ * Cause any user or meta- instructions currently being
+ * executed by the target thread to be aborted. [Meta-
+ * instructions consist of the basic traps for IPC
+ * (e.g., msg_send, msg_receive) and self-identification
+ * (e.g., task_self, thread_self, thread_reply). Calls
+ * described by MiG interfaces are not meta-instructions
+ * themselves.]
+ */
+routine thread_abort(
+ target_thread : thread_t);
+
+/* obsolete */
+routine xxx_thread_get_state(
+ target_thread : thread_t;
+ flavor : int;
+ out old_state : thread_state_t, IsLong);
+
+/* obsolete */
+routine xxx_thread_set_state(
+ target_thread : thread_t;
+ flavor : int;
+ new_state : thread_state_t, IsLong);
+
+/*
+ * Returns the current value of the selected special port
+ * associated with the target thread.
+ */
+routine thread_get_special_port(
+ thread : thread_t;
+ which_port : int;
+ out special_port : mach_port_t);
+
+/*
+ * Set one of the special ports associated with the
+ * target thread.
+ */
+routine thread_set_special_port(
+ thread : thread_t;
+ which_port : int;
+ special_port : mach_port_t);
+
+/* obsolete */
+routine xxx_thread_info(
+ target_thread : thread_t;
+ flavor : int;
+ out thread_info_out : thread_info_t, IsLong);
+
+/*
+ * Establish a user-level handler for the specified
+ * system call.
+ */
+routine task_set_emulation(
+ target_port : task_t;
+ routine_entry_pt: vm_address_t;
+ routine_number : int);
+
+/*
+ * Establish restart pc for interrupted atomic sequences.
+ * This reuses the message number for the old task_get_io_port.
+ * See task_info.h for description of flavors.
+ *
+ */
+routine task_ras_control(
+ target_task : task_t;
+ basepc : vm_address_t;
+ boundspc : vm_address_t;
+ flavor : int);
+
+
+
+skip; /* old host_ipc_statistics */
+
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Returns the set of port and port set names
+ * to which the target task has access, along with
+ * the type (set or port) for each name.
+ */
+routine port_names(
+ task : ipc_space_t;
+ out port_names_p : port_name_array_t;
+ out port_types : port_type_array_t);
+
+/*
+ * Returns the type (set or port) for the port name
+ * within the target task.
+ */
+routine port_type(
+ task : ipc_space_t;
+ port_name : port_name_t;
+ out port_type_p : port_type_t);
+
+/*
+ * Changes the name by which a port (or port set) is known to
+ * the target task.
+ */
+routine port_rename(
+ task : ipc_space_t;
+ old_name : port_name_t;
+ new_name : port_name_t);
+
+/*
+ * Allocate a new port (with all rights) in the target task.
+ * The port name in that task is returned.
+ */
+routine port_allocate(
+ task : ipc_space_t;
+ out port_name : port_name_t);
+
+/*
+ * Deallocate the port with the given name from the target task.
+ */
+routine port_deallocate(
+ task : ipc_space_t;
+ port_name : port_name_t);
+
+/*
+ * Set the number of messages that may be queued to
+ * the port in the target task with the given name
+ * before further message queueing operations block.
+ * The target task must hold receive rights for the
+ * port named.
+ */
+routine port_set_backlog(
+ task : ipc_space_t;
+ port_name : port_name_t;
+ backlog : int);
+
+/*
+ * Return information about the port with the given
+ * name in the target task. Only the ownership and
+ * receive_rights results are meaningful unless the
+ * target task holds receive rights for the port.
+ */
+routine port_status(
+ task : ipc_space_t;
+ port_name : port_name_t;
+ out enabled : port_set_name_t;
+ out num_msgs : int;
+ out backlog : int;
+ out ownership : boolean_t;
+ out receive_rights : boolean_t);
+
+/*
+ * Allocate a new port set in the target task, returning
+ * the name of that new port set. [The new set is
+ * initially empty.]
+ */
+routine port_set_allocate(
+ task : ipc_space_t;
+ out set_name : port_set_name_t);
+
+/*
+ * Deallocate the named port set from the target task.
+ * Ports that are currently members of the named port
+ * set are first removed from the set.
+ */
+routine port_set_deallocate(
+ task : ipc_space_t;
+ set_name : port_set_name_t);
+
+/*
+ * Add the named port to the port set named within
+ * the target task. [If the port currently is a member
+ * of another port set, it is removed from that set.]
+ */
+routine port_set_add(
+ task : ipc_space_t;
+ set_name : port_set_name_t;
+ port_name : port_name_t);
+
+/*
+ * Remove the named port from the port set named within
+ * the target task.
+ */
+routine port_set_remove(
+ task : ipc_space_t;
+ port_name : port_name_t);
+
+/*
+ * Returns the current set of ports that are members
+ * of the named port set in the target task.
+ */
+routine port_set_status(
+ task : ipc_space_t;
+ set_name : port_set_name_t;
+ out members : port_name_array_t);
+
+/*
+ * Insert send rights for the specified port into
+ * the target task with the specified port name.
+ * [If the name is in use, or the target task already
+ * has another name for the specified port, then
+ * the operation will fail.]
+ */
+routine port_insert_send(
+ task : ipc_space_t;
+ my_port : port_t;
+ his_name : port_name_t);
+
+/*
+ * Returns send rights for the named port in the
+ * target task, removing that port name and port
+ * send rights from the target task. [If the
+ * target task holds receive rights for this port,
+ * the operation will fail.]
+ */
+routine port_extract_send(
+ task : ipc_space_t;
+ his_name : port_name_t;
+ out his_port : port_t);
+
+/*
+ * Insert receive rights for the specified port into
+ * the target task with the specified port name.
+ * [If the name is in use, or the target task already
+ * has another name for the specified port, then
+ * the operation will fail.
+ */
+routine port_insert_receive(
+ task : ipc_space_t;
+ my_port : port_all_t;
+ his_name : port_name_t);
+
+/*
+ * Returns receive rights for the named port in the
+ * target task, removing that port name and all port
+ * rights from the target task.
+ */
+routine port_extract_receive(
+ task : ipc_space_t;
+ his_name : port_name_t;
+ out his_port : port_all_t);
+
+#else MACH_IPC_COMPAT
+
+skip; /* old port_names */
+skip; /* old port_type */
+skip; /* old port_rename */
+skip; /* old port_allocate */
+skip; /* old port_deallocate */
+skip; /* old port_set_backlog */
+skip; /* old port_status */
+skip; /* old port_set_allocate */
+skip; /* old port_set_deallocate */
+skip; /* old port_set_add */
+skip; /* old port_set_remove */
+skip; /* old port_set_status */
+skip; /* old port_insert_send */
+skip; /* old port_extract_send */
+skip; /* old port_insert_receive */
+skip; /* old port_extract_receive */
+
+#endif MACH_IPC_COMPAT
+
+/*
+ * Map a user-defined memory object into the virtual address
+ * space of the target task. If desired (anywhere is TRUE),
+ * the kernel will find a suitable address range of the
+ * specified size; else, the specific address will be allocated.
+ *
+ * The beginning address of the range will be aligned on a virtual
+ * page boundary, be at or beyond the address specified, and
+ * meet the mask requirements (bits turned on in the mask must not
+ * be turned on in the result); the size of the range, in bytes,
+ * will be rounded up to an integral number of virtual pages.
+ *
+ * The memory in the resulting range will be associated with the
+ * specified memory object, with the beginning of the memory range
+ * referring to the specified offset into the memory object.
+ *
+ * The mapping will take the current and maximum protections and
+ * the inheritance attributes specified; see the vm_protect and
+ * vm_inherit calls for a description of these attributes.
+ *
+ * If desired (copy is TRUE), the memory range will be filled
+ * with a copy of the data from the memory object; this copy will
+ * be private to this mapping in this target task. Otherwise,
+ * the memory in this mapping will be shared with other mappings
+ * of the same memory object at the same offset (in this task or
+ * in other tasks). [The Mach kernel only enforces shared memory
+ * consistency among mappings on one host with similar page alignments.
+ * The user-defined memory manager for this object is responsible
+ * for further consistency.]
+ */
+#ifdef EMULATOR
+routine htg_vm_map(
+ target_task : vm_task_t;
+ ureplyport reply_port : mach_port_make_send_once_t;
+ inout address : vm_address_t;
+ size : vm_size_t;
+ mask : vm_address_t;
+ anywhere : boolean_t;
+ memory_object : memory_object_t;
+ offset : vm_offset_t;
+ copy : boolean_t;
+ cur_protection : vm_prot_t;
+ max_protection : vm_prot_t;
+ inheritance : vm_inherit_t);
+#else EMULATOR
+routine vm_map(
+ target_task : vm_task_t;
+ inout address : vm_address_t;
+ size : vm_size_t;
+ mask : vm_address_t;
+ anywhere : boolean_t;
+ memory_object : memory_object_t;
+ offset : vm_offset_t;
+ copy : boolean_t;
+ cur_protection : vm_prot_t;
+ max_protection : vm_prot_t;
+ inheritance : vm_inherit_t);
+#endif EMULATOR
+
+/*
+ * Indicate that a range of the specified memory object cannot
+ * be provided at this time. [Threads waiting for memory pages
+ * specified by this call will experience a memory exception.
+ * Only threads waiting at the time of the call are affected.]
+ */
+simpleroutine memory_object_data_error(
+ memory_control : memory_object_control_t;
+ offset : vm_offset_t;
+ size : vm_size_t;
+ error_value : kern_return_t);
+
+/*
+ * Make decisions regarding the use of the specified
+ * memory object.
+ */
+simpleroutine memory_object_set_attributes(
+ memory_control : memory_object_control_t;
+ object_ready : boolean_t;
+ may_cache : boolean_t;
+ copy_strategy : memory_object_copy_strategy_t);
+
+/*
+ */
+simpleroutine memory_object_destroy(
+ memory_control : memory_object_control_t;
+ reason : kern_return_t);
+
+/*
+ * Provide the data contents of a range of the given memory
+ * object, with the access restriction specified, optional
+ * precious attribute, and reply message. [Only
+ * whole virtual pages of data can be accepted; partial pages
+ * will be discarded. Data should be provided on request, but
+ * may be provided in advance as desired. When data already
+ * held by this kernel is provided again, the new data is ignored.
+ * The access restriction is the subset of {read, write, execute}
+ * which are prohibited. The kernel may not provide any data (or
+ * protection) consistency among pages with different virtual page
+ * alignments within the same object. The precious value controls
+ * how the kernel treats the data. If it is FALSE, the kernel treats
+ * its copy as a temporary and may throw it away if it hasn't been
+ * changed. If the precious value is TRUE, the kernel treats its
+ * copy as a data repository and promises to return it to the manager;
+ * the manager may tell the kernel to throw it away instead by flushing
+ * and not cleaning the data -- see memory_object_lock_request. The
+ * reply_to port is for a compeletion message; it will be
+ * memory_object_supply_completed.]
+ */
+
+simpleroutine memory_object_data_supply(
+ memory_control : memory_object_control_t;
+ offset : vm_offset_t;
+ data : pointer_t, Dealloc[];
+ lock_value : vm_prot_t;
+ precious : boolean_t;
+ reply_to : mach_port_t =
+ MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic);
+
+simpleroutine memory_object_ready(
+ memory_control : memory_object_control_t;
+ may_cache : boolean_t;
+ copy_strategy : memory_object_copy_strategy_t);
+
+simpleroutine memory_object_change_attributes(
+ memory_control : memory_object_control_t;
+ may_cache : boolean_t;
+ copy_strategy : memory_object_copy_strategy_t;
+ reply_to : mach_port_t =
+ MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic);
+
+skip; /* old host_callout_statistics_reset */
+skip; /* old port_set_select */
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Sets a backup port for the named port. The task
+ * must have receive rights for the named port.
+ * Returns the previous backup port, if any.
+ */
+
+routine port_set_backup(
+ task : ipc_space_t;
+ port_name : port_name_t;
+ backup : port_t = MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ out previous : port_t);
+
+#else MACH_IPC_COMPAT
+
+skip; /* old port_set_backup */
+
+#endif MACH_IPC_COMPAT
+
+/*
+ * Set/Get special properties of memory associated
+ * to some virtual address range, such as cachability,
+ * migrability, replicability. Machine-dependent.
+ */
+routine vm_machine_attribute(
+ target_task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t;
+ attribute : vm_machine_attribute_t;
+ inout value : vm_machine_attribute_val_t);
+
+skip; /* old host_fpa_counters_reset */
+
+/*
+ * There is no more room in this interface for additional calls.
+ */
diff --git a/include/mach/mach4.defs b/include/mach/mach4.defs
new file mode 100644
index 00000000..e4f363fc
--- /dev/null
+++ b/include/mach/mach4.defs
@@ -0,0 +1,82 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994,1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Matchmaker definitions file for Mach4 kernel interface.
+ */
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif /* KERNEL_SERVER */
+#if KERNEL_USER
+ KernelUser
+#endif /* KERNEL_USER */
+ mach4 4000;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+
+#ifdef MACH_PCSAMPLE
+type sampled_pc_t = struct[3] of natural_t;
+type sampled_pc_array_t = array[*:512] of sampled_pc_t;
+type sampled_pc_seqno_t = unsigned;
+type sampled_pc_flavor_t = natural_t;
+
+routine task_enable_pc_sampling(
+ host : task_t;
+ out tick : int; /* sample frequency in usecs */
+ flavor : sampled_pc_flavor_t );
+
+routine task_disable_pc_sampling(
+ host : task_t;
+ out samplecnt : int);
+
+routine task_get_sampled_pcs(
+ host : task_t;
+ inout seqno : sampled_pc_seqno_t;
+ out sampled_pcs : sampled_pc_array_t);
+
+routine thread_enable_pc_sampling(
+ host : thread_t;
+ out tick : int; /* sample frequency in usecs*/
+ flavor : sampled_pc_flavor_t );
+
+routine thread_disable_pc_sampling(
+ host : thread_t;
+ out samplecnt : int);
+
+routine thread_get_sampled_pcs(
+ host : thread_t;
+ inout seqno : sampled_pc_seqno_t;
+ out sampled_pcs : sampled_pc_array_t);
+
+
+skip /* pc_sampling reserved 1*/;
+skip /* pc_sampling reserved 2*/;
+skip /* pc_sampling reserved 3*/;
+skip /* pc_sampling reserved 4*/;
+#endif
diff --git a/include/mach/mach_host.defs b/include/mach/mach_host.defs
new file mode 100644
index 00000000..85ee4dc5
--- /dev/null
+++ b/include/mach/mach_host.defs
@@ -0,0 +1,379 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/mach_host.defs
+ *
+ * Abstract:
+ * Mach host operations support. Includes processor allocation and
+ * control.
+ */
+
+#ifdef MACH_KERNEL
+simport <kern/compat_xxx_defs.h>; /* for obsolete routines */
+#endif
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif
+ mach_host 2600;
+
+/*
+ * Basic types
+ */
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+/*
+ * Get list of processors on this host.
+ */
+
+routine host_processors(
+ host_priv : host_priv_t;
+ out processor_list : processor_array_t);
+
+/* obsolete */
+routine yyy_host_info(
+ host : host_t;
+ flavor : int;
+ out host_info_out : host_info_t, IsLong);
+
+
+/* obsolete */
+routine yyy_processor_info(
+ processor : processor_t;
+ flavor : int;
+ out host : host_t;
+ out processor_info_out: processor_info_t, IsLong);
+
+/*
+ * Start processor.
+ */
+
+routine processor_start(
+ processor : processor_t);
+
+/*
+ * Exit processor -- may not be restartable.
+ */
+
+routine processor_exit(
+ processor : processor_t);
+
+/* obsolete */
+routine yyy_processor_control(
+ processor : processor_t;
+ processor_cmd : processor_info_t, IsLong);
+
+/*
+ * Get default processor set for host.
+ */
+routine processor_set_default(
+ host : host_t;
+ out default_set : processor_set_name_t);
+
+/*
+ * Get rights to default processor set for host.
+ * Replaced by host_processor_set_priv.
+ */
+routine xxx_processor_set_default_priv(
+ host : host_priv_t;
+ out default_set : processor_set_t);
+
+/*
+ * Create new processor set. Returns real port for manipulations,
+ * and name port for obtaining information.
+ */
+routine processor_set_create(
+ host : host_t;
+ out new_set : processor_set_t;
+ out new_name : processor_set_name_t);
+
+/*
+ * Destroy processor set.
+ */
+routine processor_set_destroy(
+ set : processor_set_t);
+
+/* obsolete */
+routine yyy_processor_set_info(
+ set_name : processor_set_name_t;
+ flavor : int;
+ out host : host_t;
+ out info_out : processor_set_info_t, IsLong);
+
+/*
+ * Assign processor to processor set.
+ */
+routine processor_assign(
+ processor : processor_t;
+ new_set : processor_set_t;
+ wait : boolean_t);
+
+/*
+ * Get current assignment for processor.
+ */
+
+routine processor_get_assignment(
+ processor : processor_t;
+ out assigned_set : processor_set_name_t);
+
+/*
+ * Assign thread to processor set.
+ */
+routine thread_assign(
+ thread : thread_t;
+ new_set : processor_set_t);
+
+/*
+ * Assign thread to default set.
+ */
+routine thread_assign_default(
+ thread : thread_t);
+
+/*
+ * Get current assignment for thread.
+ */
+routine thread_get_assignment(
+ thread : thread_t;
+ out assigned_set : processor_set_name_t);
+
+/*
+ * Assign task to processor set.
+ */
+routine task_assign(
+ task : task_t;
+ new_set : processor_set_t;
+ assign_threads : boolean_t);
+/*
+ * Assign task to default set.
+ */
+routine task_assign_default(
+ task : task_t;
+ assign_threads : boolean_t);
+
+/*
+ * Get current assignment for task.
+ */
+routine task_get_assignment(
+ task : task_t;
+ out assigned_set : processor_set_name_t);
+
+/*
+ * Get string describing current kernel version.
+ */
+routine host_kernel_version(
+ host : host_t;
+ out kernel_version : kernel_version_t);
+
+/*
+ * Set priority for thread.
+ */
+routine thread_priority(
+ thread : thread_t;
+ priority : int;
+ set_max : boolean_t);
+
+/*
+ * Set max priority for thread.
+ */
+routine thread_max_priority(
+ thread : thread_t;
+ processor_set : processor_set_t;
+ max_priority : int);
+
+/*
+ * Set task priority.
+ */
+routine task_priority(
+ task : task_t;
+ priority : int;
+ change_threads : boolean_t);
+
+/*
+ * Set max priority for processor_set.
+ */
+routine processor_set_max_priority(
+ processor_set : processor_set_t;
+ max_priority : int;
+ change_threads : boolean_t);
+
+/*
+ * Set policy for thread
+ */
+routine thread_policy(
+ thread : thread_t;
+ policy : int;
+ data : int);
+
+/*
+ * Enable policy for processor set
+ */
+routine processor_set_policy_enable(
+ processor_set : processor_set_t;
+ policy : int);
+
+/*
+ * Disable policy for processor set
+ */
+routine processor_set_policy_disable(
+ processor_set : processor_set_t;
+ policy : int;
+ change_threads : boolean_t);
+/*
+ * List all tasks in processor set.
+ */
+routine processor_set_tasks(
+ processor_set : processor_set_t;
+ out task_list : task_array_t);
+
+/*
+ * List all threads in processor set.
+ */
+routine processor_set_threads(
+ processor_set : processor_set_t;
+ out thread_list : thread_array_t);
+
+/*
+ * List all processor sets on host.
+ */
+routine host_processor_sets(
+ host : host_t;
+ out processor_sets : processor_set_name_array_t);
+
+/*
+ * Get control port for a processor set.
+ */
+routine host_processor_set_priv(
+ host_priv : host_priv_t;
+ set_name : processor_set_name_t;
+ out set : processor_set_t);
+
+routine thread_depress_abort(
+ thread : thread_t);
+
+/*
+ * Set the time on this host.
+ * Only available to privileged users.
+ */
+routine host_set_time(
+ host_priv : host_priv_t;
+ new_time : time_value_t);
+
+/*
+ * Arrange for the time on this host to be gradually changed
+ * by an adjustment value, and return the old value.
+ * Only available to privileged users.
+ */
+routine host_adjust_time(
+ host_priv : host_priv_t;
+ in new_adjustment : time_value_t;
+ out old_adjustment : time_value_t);
+
+/*
+ * Get the time on this host.
+ * Available to all.
+ */
+routine host_get_time(
+ host : host_t;
+ out current_time : time_value_t);
+
+/*
+ * Reboot this host.
+ * Only available to privileged users.
+ */
+routine host_reboot(
+ host_priv : host_priv_t;
+ options : int);
+
+/*
+ * Specify that the range of the virtual address space
+ * of the target task must not cause page faults for
+ * the indicated accesses.
+ *
+ * [ To unwire the pages, specify VM_PROT_NONE. ]
+ */
+routine vm_wire(
+ host_priv : host_priv_t;
+ task : vm_task_t;
+ address : vm_address_t;
+ size : vm_size_t;
+ access : vm_prot_t);
+
+/*
+ * Specify that the target thread must always be able
+ * to run and to allocate memory.
+ */
+routine thread_wire(
+ host_priv : host_priv_t;
+ thread : thread_t;
+ wired : boolean_t);
+
+/*
+ * Return information about this host.
+ */
+
+routine host_info(
+ host : host_t;
+ flavor : int;
+ out host_info_out : host_info_t, CountInOut);
+
+
+/*
+ * Return information about this processor.
+ */
+routine processor_info(
+ processor : processor_t;
+ flavor : int;
+ out host : host_t;
+ out processor_info_out: processor_info_t, CountInOut);
+
+/*
+ * Get information about processor set.
+ */
+routine processor_set_info(
+ set_name : processor_set_name_t;
+ flavor : int;
+ out host : host_t;
+ out info_out : processor_set_info_t, CountInOut);
+
+/*
+ * Do something machine-dependent to processor.
+ */
+routine processor_control(
+ processor : processor_t;
+ processor_cmd : processor_info_t);
+
+/*
+ * Get boot configuration information from kernel.
+ */
+routine host_get_boot_info(
+ host_priv : host_priv_t;
+ out boot_info : kernel_boot_info_t);
diff --git a/include/mach/mach_norma.defs b/include/mach/mach_norma.defs
new file mode 100644
index 00000000..01b38723
--- /dev/null
+++ b/include/mach/mach_norma.defs
@@ -0,0 +1,120 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+subsystem
+#if KERNEL_USER
+ KernelUser
+#endif
+#if KERNEL_SERVER
+ KernelServer
+#endif
+ mach_norma 555000;
+#ifdef KERNEL_USER
+userprefix r_;
+#endif
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+skip;
+
+/*
+ * Specify a node upon which children tasks will be created.
+ * This call exists only to allow testing with unmodified servers.
+ * Server developers should use norma_task_create instead.
+ */
+routine task_set_child_node(
+ target_task : task_t;
+ child_node : int);
+
+/*
+ * THIS CALL WILL BE ELIMINATED.
+ * Use norma_port_location_hint(,mach_task_self(),) instead.
+ */
+routine norma_node_self(
+ host : host_t;
+ out node : int);
+
+skip;
+
+skip;
+
+skip;
+
+/*
+ * (Used to be called task_create_remote.)
+ * Create a task on the given node, possibly inheriting memory.
+ * Same inheritance semantics as task_create, including inheritance
+ * of initial ports and emulation library.
+ * Setting child_node to node_self forces local task creation.
+ */
+routine norma_task_create(
+ target_task : task_t;
+ inherit_memory : boolean_t;
+ child_node : int;
+ out child_task : task_t);
+
+/*
+ * Get a given special port for a given node.
+ * Norma special ports are defined in norma_special_ports.h;
+ * examples include the master device port.
+ * There are a limited number of slots available for system servers.
+ *
+ * XXX MAX_SPECIAL_ID should be defined in norma_special_ports.h,
+ * XXX not just in norma/ipc_special.c!
+ * (MAX_SPECIAL_ID specifies total number of slots available)
+ */
+routine norma_get_special_port(
+ host_priv : host_priv_t;
+ node : int;
+ which : int;
+ out port : mach_port_t);
+
+/*
+ * Set a given special port for a given node.
+ * See norma_get_special_port.
+ */
+routine norma_set_special_port(
+ host_priv : host_priv_t;
+ which : int;
+ port : mach_port_t);
+
+skip;
+
+skip;
+
+skip;
+
+/*
+ * Return best guess of port's current location.
+ * Guaranteed to be a node where the port once was.
+ * Guaranteed to be accurate if port has never moved.
+ * Can be used to determine residence node for hosts, tasks, threads, etc.
+ */
+routine norma_port_location_hint(
+ task : task_t;
+ port : mach_port_t;
+ out node : int);
diff --git a/include/mach/mach_param.h b/include/mach/mach_param.h
new file mode 100644
index 00000000..ce02ed83
--- /dev/null
+++ b/include/mach/mach_param.h
@@ -0,0 +1,53 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/mach_param.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1986
+ *
+ * Mach system sizing parameters
+ */
+
+#ifndef _MACH_MACH_PARAM_H_
+#define _MACH_MACH_PARAM_H_
+
+#ifdef MACH_KERNEL
+#include <mach_ipc_compat.h>
+#endif /* MACH_KERNEL */
+
+#define TASK_PORT_REGISTER_MAX 4 /* Number of "registered" ports */
+
+
+/* Definitions for the old IPC interface. */
+
+#if MACH_IPC_COMPAT
+
+#define PORT_BACKLOG_DEFAULT 5
+#define PORT_BACKLOG_MAX 16
+
+#endif /* MACH_IPC_COMPAT */
+
+#endif /* _MACH_MACH_PARAM_H_ */
diff --git a/include/mach/mach_port.defs b/include/mach/mach_port.defs
new file mode 100644
index 00000000..e1f45e3c
--- /dev/null
+++ b/include/mach/mach_port.defs
@@ -0,0 +1,346 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/mach_port.defs
+ * Author: Rich Draves
+ *
+ * Copyright (c) 1989 Richard P. Draves, Jr.
+ *
+ * Exported kernel calls.
+ */
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif
+ mach_port 3200;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+/*
+ * Returns the set of port and port set names
+ * to which the target task has access, along with
+ * the type (set or port) for each name.
+ */
+
+routine mach_port_names(
+ task : ipc_space_t;
+ out names : mach_port_name_array_t =
+ ^array[] of mach_port_name_t
+ ctype: mach_port_array_t;
+ out types : mach_port_type_array_t =
+ ^array[] of mach_port_type_t);
+
+/*
+ * Returns the type (set or port) for the port name
+ * within the target task. Also indicates whether
+ * there is a dead-name request for the name.
+ */
+
+routine mach_port_type(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ out ptype : mach_port_type_t);
+
+/*
+ * Changes the name by which a port (or port set) is known to
+ * the target task. The new name can't be in use. The
+ * old name becomes available for recycling.
+ */
+
+routine mach_port_rename(
+ task : ipc_space_t;
+ old_name : mach_port_name_t;
+ new_name : mach_port_name_t);
+
+/*
+ * Allocates the specified kind of object, with the given name.
+ * The right must be one of
+ * MACH_PORT_RIGHT_RECEIVE
+ * MACH_PORT_RIGHT_PORT_SET
+ * MACH_PORT_RIGHT_DEAD_NAME
+ * New port sets are empty. New ports don't have any
+ * send/send-once rights or queued messages. The make-send
+ * count is zero and their queue limit is MACH_PORT_QLIMIT_DEFAULT.
+ * New sets, ports, and dead names have one user reference.
+ */
+
+routine mach_port_allocate_name(
+ task : ipc_space_t;
+ right : mach_port_right_t;
+ name : mach_port_name_t);
+
+/*
+ * Allocates the specified kind of object.
+ * The right must be one of
+ * MACH_PORT_RIGHT_RECEIVE
+ * MACH_PORT_RIGHT_PORT_SET
+ * MACH_PORT_RIGHT_DEAD_NAME
+ * Like port_allocate_name, but the kernel picks a name.
+ * It can use any name not associated with a right.
+ */
+
+routine mach_port_allocate(
+ task : ipc_space_t;
+ right : mach_port_right_t;
+ out name : mach_port_name_t);
+
+/*
+ * Destroys all rights associated with the name and makes it
+ * available for recycling immediately. The name can be a
+ * port (possibly with multiple user refs), a port set, or
+ * a dead name (again, with multiple user refs).
+ */
+
+routine mach_port_destroy(
+ task : ipc_space_t;
+ name : mach_port_name_t);
+
+/*
+ * Releases one send/send-once/dead-name user ref.
+ * Just like mach_port_mod_refs -1, but deduces the
+ * correct type of right. This allows a user task
+ * to release a ref for a port without worrying
+ * about whether the port has died or not.
+ */
+
+routine mach_port_deallocate(
+ task : ipc_space_t;
+ name : mach_port_name_t);
+
+/*
+ * A port set always has one user ref.
+ * A send-once right always has one user ref.
+ * A dead name always has one or more user refs.
+ * A send right always has one or more user refs.
+ * A receive right always has one user ref.
+ * The right must be one of
+ * MACH_PORT_RIGHT_RECEIVE
+ * MACH_PORT_RIGHT_PORT_SET
+ * MACH_PORT_RIGHT_DEAD_NAME
+ * MACH_PORT_RIGHT_SEND
+ * MACH_PORT_RIGHT_SEND_ONCE
+ */
+
+routine mach_port_get_refs(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ right : mach_port_right_t;
+ out refs : mach_port_urefs_t);
+
+/*
+ * The delta is a signed change to the task's
+ * user ref count for the right. Only dead names
+ * and send rights can have a positive delta.
+ * The resulting user ref count can't be negative.
+ * If it is zero, the right is deallocated.
+ * If the name isn't a composite right, it becomes
+ * available for recycling. The right must be one of
+ * MACH_PORT_RIGHT_RECEIVE
+ * MACH_PORT_RIGHT_PORT_SET
+ * MACH_PORT_RIGHT_DEAD_NAME
+ * MACH_PORT_RIGHT_SEND
+ * MACH_PORT_RIGHT_SEND_ONCE
+ */
+
+routine mach_port_mod_refs(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ right : mach_port_right_t;
+ delta : mach_port_delta_t);
+
+/*
+ * Temporary compatibility call.
+ */
+
+routine old_mach_port_get_receive_status(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ out status : old_mach_port_status_t);
+
+/*
+ * Only valid for receive rights.
+ * Sets the queue-limit for the port.
+ * The limit must be
+ * 1 <= qlimit <= MACH_PORT_QLIMIT_MAX
+ */
+
+routine mach_port_set_qlimit(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ qlimit : mach_port_msgcount_t);
+
+/*
+ * Only valid for receive rights.
+ * Sets the make-send count for the port.
+ */
+
+routine mach_port_set_mscount(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ mscount : mach_port_mscount_t);
+
+/*
+ * Only valid for port sets. Returns a list of
+ * the members.
+ */
+
+routine mach_port_get_set_status(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ out members : mach_port_name_array_t =
+ ^array[] of mach_port_name_t
+ ctype: mach_port_array_t);
+
+/*
+ * Puts the member port (the task must have receive rights)
+ * into the after port set. (Or removes it from any port set
+ * if after is MACH_PORT_NULL.) If the port is already in
+ * a set, does an atomic move.
+ */
+
+routine mach_port_move_member(
+ task : ipc_space_t;
+ member : mach_port_name_t;
+ after : mach_port_name_t);
+
+/*
+ * Requests a notification from the kernel. The request
+ * must supply the send-once right which is used for
+ * the notification. If a send-once right was previously
+ * registered, it is returned. The msg_id must be one of
+ * MACH_NOTIFY_PORT_DESTROYED (receive rights)
+ * MACH_NOTIFY_DEAD_NAME (send/receive/send-once rights)
+ * MACH_NOTIFY_NO_SENDERS (receive rights)
+ *
+ * The sync value specifies whether a notification should
+ * get sent immediately, if appropriate. The exact meaning
+ * depends on the notification:
+ * MACH_NOTIFY_PORT_DESTROYED: must be zero.
+ * MACH_NOTIFY_DEAD_NAME: if non-zero, then name can be dead,
+ * and the notification gets sent immediately.
+ * If zero, then name can't be dead.
+ * MACH_NOTIFY_NO_SENDERS: the notification gets sent
+ * immediately if the current mscount is greater
+ * than or equal to the sync value and there are no
+ * extant send rights.
+ */
+
+routine mach_port_request_notification(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ id : mach_msg_id_t;
+ sync : mach_port_mscount_t;
+ notify : mach_port_send_once_t;
+ out previous : mach_port_send_once_t);
+
+/*
+ * Inserts the specified rights into the target task,
+ * using the specified name. If inserting send/receive
+ * rights and the task already has send/receive rights
+ * for the port, then the names must agree. In any case,
+ * the task gains a user ref for the port.
+ */
+
+routine mach_port_insert_right(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ poly : mach_port_poly_t);
+
+/*
+ * Returns the specified right for the named port
+ * in the target task, extracting that right from
+ * the target task. The target task loses a user
+ * ref and the name may be available for recycling.
+ * msgt_name must be one of
+ * MACH_MSG_TYPE_MOVE_RECEIVE
+ * MACH_MSG_TYPE_COPY_SEND
+ * MACH_MSG_TYPE_MAKE_SEND
+ * MACH_MSG_TYPE_MOVE_SEND
+ * MACH_MSG_TYPE_MAKE_SEND_ONCE
+ * MACH_MSG_TYPE_MOVE_SEND_ONCE
+ */
+
+routine mach_port_extract_right(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ msgt_name : mach_msg_type_name_t;
+ out poly : mach_port_poly_t);
+
+/*
+ * The task must have receive rights for the named port.
+ * Returns a status structure (see mach/port.h).
+ */
+
+routine mach_port_get_receive_status(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ out status : mach_port_status_t);
+
+/*
+ * Only valid for receive rights.
+ * Sets the sequence number for the port.
+ */
+
+routine mach_port_set_seqno(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ seqno : mach_port_seqno_t);
+
+#ifdef MIGRATING_THREADS
+/*
+ * Only valid for receive rights.
+ * Set the user-mode entry info for RPCs coming through this port.
+ * Do this BEFORE attaching an ActPool to this port,
+ * unless you can be sure no RPCs will be coming through it yet.
+ */
+
+routine mach_port_set_rpcinfo(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ rpc_info : thread_info_t); /* XXX */
+
+/*
+ * Only valid for receive rights.
+ * Create a new activation for migrating RPC, and attach it to the port's ActPool.
+ * Create an ActPool for the port if it doesn't already have one.
+ * Supply a stack and receive memory buffer.
+ */
+
+routine mach_port_create_act(
+ task : task_t;
+ name : mach_port_name_t;
+ user_stack : vm_offset_t;
+ user_rbuf : vm_offset_t;
+ user_rbuf_size : vm_size_t;
+ out new_act : thread_t);
+
+#endif /* MIGRATING_THREADS */
+
diff --git a/include/mach/mach_traps.h b/include/mach/mach_traps.h
new file mode 100644
index 00000000..3303e320
--- /dev/null
+++ b/include/mach/mach_traps.h
@@ -0,0 +1,132 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Definitions of general Mach system traps.
+ *
+ * IPC traps are defined in <mach/message.h>.
+ * Kernel RPC functions are defined in <mach/mach_interface.h>.
+ */
+
+#ifndef _MACH_MACH_TRAPS_H_
+#define _MACH_MACH_TRAPS_H_
+
+#ifdef MACH_KERNEL
+#include <mach_ipc_compat.h>
+#endif /* MACH_KERNEL */
+
+#include <mach/port.h>
+
+mach_port_t mach_reply_port
+#ifdef LINTLIBRARY
+ ()
+ { return MACH_PORT_NULL; }
+#else /* LINTLIBRARY */
+ ();
+#endif /* LINTLIBRARY */
+
+mach_port_t mach_thread_self
+#ifdef LINTLIBRARY
+ ()
+ { return MACH_PORT_NULL; }
+#else /* LINTLIBRARY */
+ ();
+#endif /* LINTLIBRARY */
+
+#ifdef __386BSD__
+#undef mach_task_self
+#endif
+mach_port_t mach_task_self
+#ifdef LINTLIBRARY
+ ()
+ { return MACH_PORT_NULL; }
+#else /* LINTLIBRARY */
+ ();
+#endif /* LINTLIBRARY */
+
+mach_port_t mach_host_self
+#ifdef LINTLIBRARY
+ ()
+ { return MACH_PORT_NULL; }
+#else /* LINTLIBRARY */
+ ();
+#endif /* LINTLIBRARY */
+
+
+/* Definitions for the old IPC interface. */
+
+#if MACH_IPC_COMPAT
+
+port_t task_self
+#ifdef LINTLIBRARY
+ ()
+ { return(PORT_NULL); }
+#else /* LINTLIBRARY */
+ ();
+#endif /* LINTLIBRARY */
+
+port_t task_notify
+#ifdef LINTLIBRARY
+ ()
+ { return(PORT_NULL); }
+#else /* LINTLIBRARY */
+ ();
+#endif /* LINTLIBRARY */
+
+port_t thread_self
+#ifdef LINTLIBRARY
+ ()
+ { return(PORT_NULL); }
+#else /* LINTLIBRARY */
+ ();
+#endif /* LINTLIBRARY */
+
+port_t thread_reply
+#ifdef LINTLIBRARY
+ ()
+ { return(PORT_NULL); }
+#else /* LINTLIBRARY */
+ ();
+#endif /* LINTLIBRARY */
+
+port_t host_self
+#ifdef LINTLIBRARY
+ ()
+ { return(PORT_NULL); }
+#else /* LINTLIBRARY */
+ ();
+#endif /* LINTLIBRARY */
+
+port_t host_priv_self
+#ifdef LINTLIBRARY
+ ()
+ { return(PORT_NULL); }
+#else /* LINTLIBRARY */
+ ();
+#endif /* LINTLIBRARY */
+
+#endif /* MACH_IPC_COMPAT */
+
+#endif /* _MACH_MACH_TRAPS_H_ */
diff --git a/include/mach/mach_types.defs b/include/mach/mach_types.defs
new file mode 100644
index 00000000..69522723
--- /dev/null
+++ b/include/mach/mach_types.defs
@@ -0,0 +1,249 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994-1988 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Mach kernel interface type declarations
+ */
+
+#ifndef _MACH_MACH_TYPES_DEFS_
+#define _MACH_MACH_TYPES_DEFS_
+
+/*
+ * For KernelServer and KernelUser interfaces, Mig will
+ * automagically use ipc_port_t instead of mach_port_t
+ * on the kernel side of the interface. For example,
+ * convert_task_to_port really returns ipc_port_t.
+ * Doing this in Mig saves many explicit conditional
+ * cusertype/cservertype declarations.
+ *
+ * Mig doesn't translate the components of an array.
+ * For example, Mig won't use the thread_t translations
+ * to translate a thread_array_t argument.
+ */
+
+#include <mach/std_types.defs>
+#if KERNEL_SERVER
+#include <norma_vm.h>
+#endif /* KERNEL_SERVER */
+
+type mach_port_status_t = struct[9] of integer_t;
+
+type old_mach_port_status_t = struct[8] of integer_t; /* compatibility */
+
+type task_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: task_t convert_port_to_task(mach_port_t)
+ outtran: mach_port_t convert_task_to_port(task_t)
+ destructor: task_deallocate(task_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+#ifdef MIGRATING_THREADS
+#if KERNEL
+/* What the conventional external Mach interfaces see as a thread_t
+ is really an act_t within the kernel. */
+#define thread_t act_t
+#define convert_port_to_thread convert_port_to_act
+#define convert_thread_to_port convert_act_to_port
+#define thread_deallocate act_deallocate
+#endif /* KERNEL */
+#endif /* MIGRATING_THREADS */
+
+type thread_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: thread_t convert_port_to_thread(mach_port_t)
+ outtran: mach_port_t convert_thread_to_port(thread_t)
+ destructor: thread_deallocate(thread_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type thread_state_t = array[*:1024] of natural_t;
+
+type task_array_t = ^array[] of task_t;
+type thread_array_t = ^array[] of thread_t;
+
+type vm_task_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: vm_map_t convert_port_to_map(mach_port_t)
+ destructor: vm_map_deallocate(vm_map_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type ipc_space_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: ipc_space_t convert_port_to_space(mach_port_t)
+ destructor: space_deallocate(ipc_space_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type vm_address_t = natural_t;
+type vm_offset_t = natural_t;
+type vm_size_t = natural_t;
+type vm_prot_t = int;
+type vm_inherit_t = int;
+type vm_statistics_data_t = struct[13] of integer_t;
+type vm_machine_attribute_t = int;
+type vm_machine_attribute_val_t = int;
+
+type thread_info_t = array[*:1024] of natural_t;
+type thread_basic_info_data_t = struct[11] of integer_t;
+type thread_sched_info_data_t = struct[7] of integer_t;
+
+type task_info_t = array[*:1024] of natural_t;
+type task_basic_info_data_t = struct[8] of integer_t;
+type task_events_info = struct[7] of natural_t;
+type task_thread_times_info_data_t = struct[4] of integer_t;
+
+
+type memory_object_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: ipc_port_t null_conversion(mach_port_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type memory_object_control_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+#if NORMA_VM
+ intran: mach_xmm_obj_t xmm_kobj_lookup(mach_port_t)
+#else /* NORMA_VM */
+ intran: vm_object_t vm_object_lookup(mach_port_t)
+#endif /* NORMA_VM */
+#endif /* KERNEL_SERVER */
+ ;
+
+type memory_object_name_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: vm_object_t vm_object_lookup_name(mach_port_t)
+ destructor: vm_object_deallocate(vm_object_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type memory_object_copy_strategy_t = int;
+type memory_object_return_t = int;
+
+type machine_info_data_t = struct[5] of integer_t;
+type machine_slot_data_t = struct[8] of integer_t;
+
+type host_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: host_t convert_port_to_host(mach_port_t)
+ outtran: mach_port_t convert_host_to_port(host_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type host_priv_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: host_t convert_port_to_host_priv(mach_port_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type host_info_t = array[*:1024] of natural_t;
+type host_basic_info_data_t = struct[5] of integer_t;
+type host_sched_info_data_t = struct[2] of integer_t;
+type host_load_info_data_t = struct[6] of integer_t;
+
+
+type processor_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: processor_t convert_port_to_processor(mach_port_t)
+ outtran: mach_port_t convert_processor_to_port(processor_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type processor_array_t = ^array[] of processor_t;
+type processor_info_t = array[*:1024] of natural_t;
+type processor_basic_info_data_t = struct[5] of integer_t;
+
+
+type processor_set_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: processor_set_t convert_port_to_pset(mach_port_t)
+ outtran: mach_port_t convert_pset_to_port(processor_set_t)
+ destructor: pset_deallocate(processor_set_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type processor_set_array_t = ^array[] of processor_set_t;
+
+type processor_set_name_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: processor_set_t convert_port_to_pset_name(mach_port_t)
+ outtran: mach_port_t convert_pset_name_to_port(processor_set_t)
+ destructor: pset_deallocate(processor_set_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+type processor_set_name_array_t = ^array[] of processor_set_name_t;
+
+type processor_set_info_t = array[*:1024] of natural_t;
+type processor_set_basic_info_data_t = struct[5] of integer_t;
+type processor_set_sched_info_data_t = struct[2] of integer_t;
+
+
+type kernel_version_t = (MACH_MSG_TYPE_STRING, 512*8);
+
+type kernel_boot_info_t = (MACH_MSG_TYPE_STRING, 4096*8);
+
+type time_value_t = struct[2] of integer_t;
+
+type emulation_vector_t = ^array[] of vm_offset_t;
+
+type xxx_emulation_vector_t = array[*:1024] of vm_offset_t
+ ctype: emulation_vector_t;
+ /* XXX compatibility */
+
+type rpc_signature_info_t = array[*:1024] of int;
+
+#if KERNEL_SERVER
+simport <kern/ipc_kobject.h>; /* for null conversion */
+simport <kern/ipc_tt.h>; /* for task/thread conversion */
+simport <kern/ipc_host.h>; /* for host/processor/pset conversions */
+simport <kern/task.h>; /* for task_t */
+simport <kern/thread.h>; /* for thread_t */
+simport <kern/host.h>; /* for host_t */
+simport <kern/processor.h>; /* for processor_t, processor_set_t */
+simport <vm/vm_object.h>; /* for vm_object_t */
+simport <vm/vm_map.h>; /* for vm_map_t */
+simport <ipc/ipc_space.h>; /* for ipc_space_t */
+#endif /* KERNEL_SERVER */
+
+import <mach/mach_types.h>;
+
+#endif /* _MACH_MACH_TYPES_DEFS_ */
diff --git a/include/mach/mach_types.h b/include/mach/mach_types.h
new file mode 100644
index 00000000..1fa32923
--- /dev/null
+++ b/include/mach/mach_types.h
@@ -0,0 +1,93 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/mach_types.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1986
+ *
+ * Mach external interface definitions.
+ *
+ */
+
+#ifndef _MACH_MACH_TYPES_H_
+#define _MACH_MACH_TYPES_H_
+
+#include <mach/host_info.h>
+#include <mach/machine.h>
+#include <mach/machine/vm_types.h>
+#include <mach/memory_object.h>
+#include <mach/pc_sample.h>
+#include <mach/port.h>
+#include <mach/processor_info.h>
+#include <mach/task_info.h>
+#include <mach/task_special_ports.h>
+#include <mach/thread_info.h>
+#include <mach/thread_special_ports.h>
+#include <mach/thread_status.h>
+#include <mach/time_value.h>
+#include <mach/vm_attributes.h>
+#include <mach/vm_inherit.h>
+#include <mach/vm_prot.h>
+#include <mach/vm_statistics.h>
+
+#ifdef MACH_KERNEL
+#include <kern/task.h> /* for task_array_t */
+#include <kern/thread.h> /* for thread_array_t */
+#include <kern/processor.h> /* for processor_array_t,
+ processor_set_array_t,
+ processor_set_name_array_t */
+#include <kern/syscall_emulation.h>
+ /* for emulation_vector_t */
+#include <norma_vm.h>
+#if NORMA_VM
+typedef struct xmm_obj *mach_xmm_obj_t;
+extern mach_xmm_obj_t xmm_kobj_lookup();
+#endif /* NORMA_VM */
+#else /* MACH_KERNEL */
+typedef mach_port_t task_t;
+typedef task_t *task_array_t;
+typedef task_t vm_task_t;
+typedef task_t ipc_space_t;
+typedef mach_port_t thread_t;
+typedef thread_t *thread_array_t;
+typedef mach_port_t host_t;
+typedef mach_port_t host_priv_t;
+typedef mach_port_t processor_t;
+typedef mach_port_t *processor_array_t;
+typedef mach_port_t processor_set_t;
+typedef mach_port_t processor_set_name_t;
+typedef mach_port_t *processor_set_array_t;
+typedef mach_port_t *processor_set_name_array_t;
+typedef vm_offset_t *emulation_vector_t;
+#endif /* MACH_KERNEL */
+
+/*
+ * Backwards compatibility, for those programs written
+ * before mach/{std,mach}_types.{defs,h} were set up.
+ */
+#include <mach/std_types.h>
+
+#endif /* _MACH_MACH_TYPES_H_ */
diff --git a/include/mach/machine.h b/include/mach/machine.h
new file mode 100644
index 00000000..a6100a24
--- /dev/null
+++ b/include/mach/machine.h
@@ -0,0 +1,267 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* File: machine.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1986
+ *
+ * Machine independent machine abstraction.
+ */
+
+#ifndef _MACH_MACHINE_H_
+#define _MACH_MACHINE_H_
+
+#ifdef MACH_KERNEL
+#include <cpus.h>
+#endif /* MACH_KERNEL */
+
+#include <mach/machine/vm_types.h>
+#include <mach/boolean.h>
+
+/*
+ * For each host, there is a maximum possible number of
+ * cpus that may be available in the system. This is the
+ * compile-time constant NCPUS, which is defined in cpus.h.
+ *
+ * In addition, there is a machine_slot specifier for each
+ * possible cpu in the system.
+ */
+
+struct machine_info {
+ integer_t major_version; /* kernel major version id */
+ integer_t minor_version; /* kernel minor version id */
+ integer_t max_cpus; /* max number of cpus compiled */
+ integer_t avail_cpus; /* number actually available */
+ vm_size_t memory_size; /* size of memory in bytes */
+};
+
+typedef struct machine_info *machine_info_t;
+typedef struct machine_info machine_info_data_t; /* bogus */
+
+typedef integer_t cpu_type_t;
+typedef integer_t cpu_subtype_t;
+
+#define CPU_STATE_MAX 3
+
+#define CPU_STATE_USER 0
+#define CPU_STATE_SYSTEM 1
+#define CPU_STATE_IDLE 2
+
+struct machine_slot {
+/*boolean_t*/integer_t is_cpu; /* is there a cpu in this slot? */
+ cpu_type_t cpu_type; /* type of cpu */
+ cpu_subtype_t cpu_subtype; /* subtype of cpu */
+/*boolean_t*/integer_t running; /* is cpu running */
+ integer_t cpu_ticks[CPU_STATE_MAX];
+ integer_t clock_freq; /* clock interrupt frequency */
+};
+
+typedef struct machine_slot *machine_slot_t;
+typedef struct machine_slot machine_slot_data_t; /* bogus */
+
+#ifdef MACH_KERNEL
+extern struct machine_info machine_info;
+extern struct machine_slot machine_slot[NCPUS];
+#endif /* MACH_KERNEL */
+
+/*
+ * Machine types known by all.
+ *
+ * When adding new types & subtypes, please also update slot_name.c
+ * in the libmach sources.
+ */
+
+#define CPU_TYPE_VAX ((cpu_type_t) 1)
+#define CPU_TYPE_ROMP ((cpu_type_t) 2)
+#define CPU_TYPE_MC68020 ((cpu_type_t) 3)
+#define CPU_TYPE_NS32032 ((cpu_type_t) 4)
+#define CPU_TYPE_NS32332 ((cpu_type_t) 5)
+#define CPU_TYPE_NS32532 ((cpu_type_t) 6)
+#define CPU_TYPE_I386 ((cpu_type_t) 7)
+#define CPU_TYPE_MIPS ((cpu_type_t) 8)
+#define CPU_TYPE_MC68030 ((cpu_type_t) 9)
+#define CPU_TYPE_MC68040 ((cpu_type_t) 10)
+#define CPU_TYPE_HPPA ((cpu_type_t) 11)
+#define CPU_TYPE_ARM ((cpu_type_t) 12)
+#define CPU_TYPE_MC88000 ((cpu_type_t) 13)
+#define CPU_TYPE_SPARC ((cpu_type_t) 14)
+#define CPU_TYPE_I860 ((cpu_type_t) 15)
+#define CPU_TYPE_ALPHA ((cpu_type_t) 16)
+
+/*
+ * Machine subtypes (these are defined here, instead of in a machine
+ * dependent directory, so that any program can get all definitions
+ * regardless of where is it compiled).
+ */
+
+/*
+ * VAX subtypes (these do *not* necessarily conform to the actual cpu
+ * ID assigned by DEC available via the SID register).
+ */
+
+#define CPU_SUBTYPE_VAX780 ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_VAX785 ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_VAX750 ((cpu_subtype_t) 3)
+#define CPU_SUBTYPE_VAX730 ((cpu_subtype_t) 4)
+#define CPU_SUBTYPE_UVAXI ((cpu_subtype_t) 5)
+#define CPU_SUBTYPE_UVAXII ((cpu_subtype_t) 6)
+#define CPU_SUBTYPE_VAX8200 ((cpu_subtype_t) 7)
+#define CPU_SUBTYPE_VAX8500 ((cpu_subtype_t) 8)
+#define CPU_SUBTYPE_VAX8600 ((cpu_subtype_t) 9)
+#define CPU_SUBTYPE_VAX8650 ((cpu_subtype_t) 10)
+#define CPU_SUBTYPE_VAX8800 ((cpu_subtype_t) 11)
+#define CPU_SUBTYPE_UVAXIII ((cpu_subtype_t) 12)
+
+/*
+ * ROMP subtypes.
+ */
+
+#define CPU_SUBTYPE_RT_PC ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_RT_APC ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_RT_135 ((cpu_subtype_t) 3)
+
+/*
+ * 68020 subtypes.
+ */
+
+#define CPU_SUBTYPE_SUN3_50 ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_SUN3_160 ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_SUN3_260 ((cpu_subtype_t) 3)
+#define CPU_SUBTYPE_SUN3_110 ((cpu_subtype_t) 4)
+#define CPU_SUBTYPE_SUN3_60 ((cpu_subtype_t) 5)
+
+#define CPU_SUBTYPE_HP_320 ((cpu_subtype_t) 6)
+ /* 16.67 Mhz HP 300 series, custom MMU [HP 320] */
+#define CPU_SUBTYPE_HP_330 ((cpu_subtype_t) 7)
+ /* 16.67 Mhz HP 300 series, MC68851 MMU [HP 318,319,330,349] */
+#define CPU_SUBTYPE_HP_350 ((cpu_subtype_t) 8)
+ /* 25.00 Mhz HP 300 series, custom MMU [HP 350] */
+
+/*
+ * 32032/32332/32532 subtypes.
+ */
+
+#define CPU_SUBTYPE_MMAX_DPC ((cpu_subtype_t) 1) /* 032 CPU */
+#define CPU_SUBTYPE_SQT ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_MMAX_APC_FPU ((cpu_subtype_t) 3) /* 32081 FPU */
+#define CPU_SUBTYPE_MMAX_APC_FPA ((cpu_subtype_t) 4) /* Weitek FPA */
+#define CPU_SUBTYPE_MMAX_XPC ((cpu_subtype_t) 5) /* 532 CPU */
+#define CPU_SUBTYPE_PC532 ((cpu_subtype_t) 6) /* pc532 board */
+
+/*
+ * 80386/80486 subtypes.
+ */
+
+#define CPU_SUBTYPE_AT386 ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_EXL ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_iPSC386 ((cpu_subtype_t) 3)
+#define CPU_SUBTYPE_SYMMETRY ((cpu_subtype_t) 4)
+#define CPU_SUBTYPE_PS2 ((cpu_subtype_t) 5) /* PS/2 w/ MCA */
+
+/*
+ * Mips subtypes.
+ */
+
+#define CPU_SUBTYPE_MIPS_R2300 ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_MIPS_R2600 ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_MIPS_R2800 ((cpu_subtype_t) 3)
+#define CPU_SUBTYPE_MIPS_R2000a ((cpu_subtype_t) 4) /* pmax */
+#define CPU_SUBTYPE_MIPS_R2000 ((cpu_subtype_t) 5)
+#define CPU_SUBTYPE_MIPS_R3000a ((cpu_subtype_t) 6) /* 3max */
+#define CPU_SUBTYPE_MIPS_R3000 ((cpu_subtype_t) 7)
+
+/*
+ * MC68030 subtypes.
+ */
+
+#define CPU_SUBTYPE_NeXT ((cpu_subtype_t) 1)
+ /* NeXt thinks MC68030 is 6 rather than 9 */
+#define CPU_SUBTYPE_HP_340 ((cpu_subtype_t) 2)
+ /* 16.67 Mhz HP 300 series [HP 332,340] */
+#define CPU_SUBTYPE_HP_360 ((cpu_subtype_t) 3)
+ /* 25.00 Mhz HP 300 series [HP 360] */
+#define CPU_SUBTYPE_HP_370 ((cpu_subtype_t) 4)
+ /* 33.33 Mhz HP 300 series [HP 370] */
+
+/*
+ * HPPA subtypes.
+ */
+
+#define CPU_SUBTYPE_HPPA_825 ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_HPPA_835 ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_HPPA_840 ((cpu_subtype_t) 3)
+#define CPU_SUBTYPE_HPPA_850 ((cpu_subtype_t) 4)
+#define CPU_SUBTYPE_HPPA_855 ((cpu_subtype_t) 5)
+
+/*
+ * ARM subtypes.
+ */
+
+#define CPU_SUBTYPE_ARM_A500_ARCH ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_ARM_A500 ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_ARM_A440 ((cpu_subtype_t) 3)
+#define CPU_SUBTYPE_ARM_M4 ((cpu_subtype_t) 4)
+#define CPU_SUBTYPE_ARM_A680 ((cpu_subtype_t) 5)
+
+/*
+ * MC88000 subtypes.
+ */
+
+#define CPU_SUBTYPE_MMAX_JPC ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_LUNA88K ((cpu_subtype_t) 2)
+
+/*
+ * Sparc subtypes.
+ */
+
+#define CPU_SUBTYPE_SUN4_260 ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_SUN4_110 ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_SUN4_330 ((cpu_subtype_t) 3)
+#define CPU_SUBTYPE_SUN4C_60 ((cpu_subtype_t) 4)
+#define CPU_SUBTYPE_SUN4C_65 ((cpu_subtype_t) 5)
+#define CPU_SUBTYPE_SUN4C_20 ((cpu_subtype_t) 6)
+#define CPU_SUBTYPE_SUN4C_30 ((cpu_subtype_t) 7)
+#define CPU_SUBTYPE_SUN4C_40 ((cpu_subtype_t) 8)
+#define CPU_SUBTYPE_SUN4C_50 ((cpu_subtype_t) 9)
+#define CPU_SUBTYPE_SUN4C_75 ((cpu_subtype_t) 10)
+
+/*
+ * i860 subtypes.
+ */
+
+#define CPU_SUBTYPE_iPSC860 ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_OKI860 ((cpu_subtype_t) 2)
+
+/*
+ * Alpha subtypes.
+ */
+
+#define CPU_SUBTYPE_ALPHA_EV3 ((cpu_subtype_t) 1)
+#define CPU_SUBTYPE_ALPHA_EV4 ((cpu_subtype_t) 2)
+#define CPU_SUBTYPE_ALPHA_ISP ((cpu_subtype_t) 3)
+#define CPU_SUBTYPE_ALPHA_21064 ((cpu_subtype_t) 4)
+
+
+#endif /* _MACH_MACHINE_H_ */
diff --git a/include/mach/macro_help.h b/include/mach/macro_help.h
new file mode 100644
index 00000000..f041e40b
--- /dev/null
+++ b/include/mach/macro_help.h
@@ -0,0 +1,18 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1988 Carnegie-Mellon University
+ * All rights reserved. The CMU software License Agreement specifies
+ * the terms and conditions for use and redistribution.
+ */
+
+#ifndef _MACRO_HELP_H_
+#define _MACRO_HELP_H_ 1
+
+#define MACRO_BEGIN do {
+#define MACRO_END } while (0)
+
+#define MACRO_RETURN if (1) return
+
+#endif /* _MACRO_HELP_H_ */
+
+
diff --git a/include/mach/memory_object.defs b/include/mach/memory_object.defs
new file mode 100644
index 00000000..e64be877
--- /dev/null
+++ b/include/mach/memory_object.defs
@@ -0,0 +1,313 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/memory_object.defs
+ *
+ * Abstract:
+ * Basic Mach external memory management interface declaration.
+ */
+
+subsystem
+#if KERNEL_USER
+ KernelUser
+#endif KERNEL_USER
+#if KERNEL_SERVER
+ KernelServer
+#endif KERNEL_SERVER
+ memory_object 2200;
+
+#ifdef KERNEL
+#include <norma_vm.h>
+#if NORMA_VM
+userprefix k_;
+#endif NORMA_VM
+#endif KERNEL
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+#if SEQNOS
+serverprefix seqnos_;
+serverdemux seqnos_memory_object_server;
+#endif SEQNOS
+
+/*
+ * Initialize the specified memory object, providing
+ * a reqeust port on which control calls can be made, and
+ * a name port that identifies this object to callers of
+ * vm_regions.
+ * [To allow the mapping of this object to be used, the
+ * memory manager must call memory_object_set_attributes,
+ * specifying the "ready" parameter as TRUE. To reject
+ * all mappings of this object, the memory manager may
+ * use memory_object_destroy.]
+ */
+simpleroutine memory_object_init(
+ memory_object : memory_object_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ memory_control : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ memory_object_name : memory_object_name_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ memory_object_page_size : vm_size_t);
+
+/*
+ * Indicates that the specified memory object is no longer
+ * mapped (or cached -- see memory_object_set_attributes),
+ * and that further mappings will cause another memory_object_init
+ * call to be made. No further calls will be made on
+ * the memory object by this kernel.
+ *
+ * [All rights to the control and name ports are included
+ * in this call. The memory manager should use port_deallocate
+ * to release them once they are no longer needed.]
+ */
+simpleroutine memory_object_terminate(
+ memory_object : memory_object_t =
+ MACH_MSG_TYPE_MOVE_SEND
+ ctype: mach_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ memory_control : memory_object_control_t =
+ MACH_MSG_TYPE_MOVE_RECEIVE
+ ctype: mach_port_t
+#if KERNEL_USER
+ /* for compatibility with Mach 2.5 kernels */
+ , dealloc
+#endif KERNEL_USER
+ ;
+ memory_object_name : memory_object_name_t =
+ MACH_MSG_TYPE_MOVE_RECEIVE
+ ctype: mach_port_t
+#if KERNEL_USER
+ /* for compatibility with Mach 2.5 kernels */
+ , dealloc
+#endif KERNEL_USER
+ );
+
+/*
+ * Indicates that a copy has been made of the specified range of
+ * the given original memory object. The kernel will use the new
+ * memory object, control and name ports to refer to the new copy
+ * (once the memory manager has asserted its "ready" attribute).
+ *
+ * Cached pages from the original memory object at the time of
+ * the copy operation are handled as follows:
+ * Readable pages may be silently copied to the new
+ * memory object (with all access permissions).
+ * Pages not copied are locked to prevent write access.
+ *
+ * This call includes only the new memory object itself; a
+ * memory_object_init call will be made on the new memory
+ * object after the actions above are completed.
+ *
+ * The new memory object is *temporary*, meaning that the
+ * memory manager should not change its contents or allow
+ * the memory object to be mapped in another client. The
+ * memory manager may use the memory_object_data_unavailable
+ * call to indicate that the appropriate page of the original
+ * memory object may be used to fulfill a data request.
+ *
+ * [Reply should be memory_object_set_attributes on the
+ * new memory object control port to indicate readiness.]
+ */
+simpleroutine memory_object_copy(
+ old_memory_object : memory_object_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ old_memory_control : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ offset : vm_offset_t;
+ length : vm_size_t;
+ new_memory_object : memory_object_t =
+ MACH_MSG_TYPE_MOVE_RECEIVE
+ ctype: mach_port_t
+#if KERNEL_USER
+ /* for compatibility with Mach 2.5 kernels */
+ , dealloc
+#endif KERNEL_USER
+ );
+
+/*
+ * Request data from this memory object. At least
+ * the specified data should be returned with at
+ * least the specified access permitted.
+ *
+ * [Reply should be memory_object_data_provided.]
+ */
+simpleroutine memory_object_data_request(
+ memory_object : memory_object_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ memory_control : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ offset : vm_offset_t;
+ length : vm_size_t;
+ desired_access : vm_prot_t);
+
+/*
+ * Request that the specified portion of this
+ * memory object be unlocked to allow the specified
+ * forms of access; the kernel already has the data.
+ *
+ * [Reply should be memory_object_lock_request.]
+ */
+simpleroutine memory_object_data_unlock(
+ memory_object : memory_object_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ memory_control : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ offset : vm_offset_t;
+ length : vm_size_t;
+ desired_access : vm_prot_t);
+
+/*
+ * Write back modifications made to this portion of
+ * the memory object while in memory.
+ *
+ * Unless explicitly requested by a memory_object_lock_request
+ * (clean, but not flush), the kernel will not retain
+ * the data.
+ *
+ * [Reply should be vm_deallocate to release the data.]
+ */
+simpleroutine memory_object_data_write(
+ memory_object : memory_object_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ memory_control : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ offset : vm_offset_t;
+ data : pointer_t);
+
+/*
+ * Indicate that a previous memory_object_lock_reqeust has been
+ * completed. Note that this call is made on whatever
+ * port is specified in the memory_object_lock_request; that port
+ * need not be the memory object port itself.
+ *
+ * [No reply expected.]
+ */
+simpleroutine memory_object_lock_completed(
+ memory_object : memory_object_t =
+ polymorphic|MACH_MSG_TYPE_PORT_SEND_ONCE
+ ctype: mach_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ memory_control : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ offset : vm_offset_t;
+ length : vm_size_t);
+
+/*
+ * Indicate that a previous memory_object_data_supply has been
+ * completed. Note that this call is made on whatever
+ * port is specified in the memory_object_data_supply; that port
+ * need not be the memory object port itself.
+ *
+ * The result parameter indicates what happened during the supply.
+ * If it is not KERN_SUCCESS, then error_offset identifies the
+ * first offset at which a problem occurred. The pagein operation
+ * stopped at this point. Note that the only failures reported
+ * by this mechanism are KERN_MEMORY_PRESENT. All other failures
+ * (invalid argument, error on pagein of supplied data in manager's
+ * address space) cause the entire operation to fail.
+ *
+ * XXX Check what actually happens in latter case!
+ *
+ * [No reply expected.]
+ */
+simpleroutine memory_object_supply_completed(
+ memory_object : memory_object_t =
+ polymorphic|MACH_MSG_TYPE_PORT_SEND_ONCE
+ ctype: mach_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ memory_control : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ offset : vm_offset_t;
+ length : vm_size_t;
+ result : kern_return_t;
+ error_offset : vm_offset_t);
+
+/*
+ * Return data to manager. This call is used in place of data_write
+ * for objects initialized by object_ready instead of set_attributes.
+ * This call indicates whether the returned data is dirty and whether
+ * the kernel kept a copy. Precious data remains precious if the
+ * kernel keeps a copy. The indication that the kernel kept a copy
+ * is only a hint if the data is not precious; the cleaned copy may
+ * be discarded without further notifying the manager.
+ *
+ * [Reply should be vm_deallocate to release the data.]
+ */
+simpleroutine memory_object_data_return(
+ memory_object : memory_object_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ memory_control : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ offset : vm_offset_t;
+ data : pointer_t;
+ dirty : boolean_t;
+ kernel_copy : boolean_t);
+
+/*
+ * XXX Warning: This routine does NOT contain a memory_object_control_t
+ * XXX because the memory_object_change_attributes call may cause
+ * XXX memory object termination (by uncaching the object). This would
+ * XXX yield an invalid port.
+ */
+
+simpleroutine memory_object_change_completed(
+ memory_object : memory_object_t =
+ polymorphic|MACH_MSG_TYPE_PORT_SEND_ONCE
+ ctype: mach_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ may_cache : boolean_t;
+ copy_strategy : memory_object_copy_strategy_t);
diff --git a/include/mach/memory_object.h b/include/mach/memory_object.h
new file mode 100644
index 00000000..b4dd71f7
--- /dev/null
+++ b/include/mach/memory_object.h
@@ -0,0 +1,83 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: memory_object.h
+ * Author: Michael Wayne Young
+ *
+ * External memory management interface definition.
+ */
+
+#ifndef _MACH_MEMORY_OBJECT_H_
+#define _MACH_MEMORY_OBJECT_H_
+
+/*
+ * User-visible types used in the external memory
+ * management interface:
+ */
+
+#include <mach/port.h>
+
+typedef mach_port_t memory_object_t;
+ /* Represents a memory object ... */
+ /* Used by user programs to specify */
+ /* the object to map; used by the */
+ /* kernel to retrieve or store data */
+
+typedef mach_port_t memory_object_control_t;
+ /* Provided to a memory manager; ... */
+ /* used to control a memory object */
+
+typedef mach_port_t memory_object_name_t;
+ /* Used to describe the memory ... */
+ /* object in vm_regions() calls */
+
+typedef int memory_object_copy_strategy_t;
+ /* How memory manager handles copy: */
+#define MEMORY_OBJECT_COPY_NONE 0
+ /* ... No special support */
+#define MEMORY_OBJECT_COPY_CALL 1
+ /* ... Make call on memory manager */
+#define MEMORY_OBJECT_COPY_DELAY 2
+ /* ... Memory manager doesn't ... */
+ /* change data externally. */
+#define MEMORY_OBJECT_COPY_TEMPORARY 3
+ /* ... Memory manager doesn't ... */
+ /* change data externally, and */
+ /* doesn't need to see changes. */
+
+typedef int memory_object_return_t;
+ /* Which pages to return to manager
+ this time (lock_request) */
+#define MEMORY_OBJECT_RETURN_NONE 0
+ /* ... don't return any. */
+#define MEMORY_OBJECT_RETURN_DIRTY 1
+ /* ... only dirty pages. */
+#define MEMORY_OBJECT_RETURN_ALL 2
+ /* ... dirty and precious pages. */
+
+#define MEMORY_OBJECT_NULL MACH_PORT_NULL
+
+#endif /* _MACH_MEMORY_OBJECT_H_ */
diff --git a/include/mach/memory_object_default.defs b/include/mach/memory_object_default.defs
new file mode 100644
index 00000000..eb2aea7a
--- /dev/null
+++ b/include/mach/memory_object_default.defs
@@ -0,0 +1,121 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/memory_object_default.defs
+ *
+ * Abstract:
+ * Mach external memory management interface declaration; subset
+ * that is applicable to managers of kernel-created memory objects.
+ */
+
+subsystem
+#if KERNEL_USER
+ KernelUser
+#endif KERNEL_USER
+ memory_object_default 2250;
+
+#ifdef MACH_KERNEL
+#include <norma_vm.h>
+#if NORMA_VM
+userprefix k_;
+#endif NORMA_VM
+#endif MACH_KERNEL
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+#if SEQNOS
+serverprefix seqnos_;
+serverdemux seqnos_memory_object_default_server;
+#endif SEQNOS
+
+/*
+ * Pass on responsibility for the new kernel-created memory
+ * object. The port on which this request is that port
+ * (possibly a memory object itself) registered as the "default
+ * pager". Other arguments are as described for memory_object_init.
+ * [No reply required.]
+ */
+simpleroutine memory_object_create(
+ old_memory_object : memory_object_t =
+ MACH_MSG_TYPE_MOVE_SEND
+ ctype: mach_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ new_memory_object : memory_object_t =
+ MACH_MSG_TYPE_MOVE_RECEIVE
+ ctype: mach_port_t
+#if KERNEL_USER
+ /* for compatibility with Mach 2.5 kernels */
+ , dealloc
+#endif KERNEL_USER
+ ;
+ new_object_size : vm_size_t;
+ new_control_port : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ new_name : memory_object_name_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ new_page_size : vm_size_t);
+
+/*
+ * Provide initial data contents for this region of
+ * the memory object. If data has already been written
+ * to the object, this value must be discarded; otherwise,
+ * this call acts identically to memory_object_data_write.
+ */
+simpleroutine memory_object_data_initialize(
+ memory_object : memory_object_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ memory_control_port : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ offset : vm_offset_t;
+ data : pointer_t);
+
+#if 0
+/*
+ * Indicate that the specified range of data in this memory object
+ * will not be requested again until it is reinitialized with
+ * memory_object_data_write or memory_object_data_initialize.
+ */
+simpleroutine memory_object_data_terminate(
+ memory_object : memory_object_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ memory_control_port : memory_object_control_t =
+ MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+ offset : vm_offset_t;
+ size : vm_size_t);
+#else 0
+skip; /* memory_object_data_terminate */
+#endif 0
diff --git a/include/mach/message.h b/include/mach/message.h
new file mode 100644
index 00000000..8f1860f8
--- /dev/null
+++ b/include/mach/message.h
@@ -0,0 +1,750 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/message.h
+ *
+ * Mach IPC message and primitive function definitions.
+ */
+
+#ifndef _MACH_MESSAGE_H_
+#define _MACH_MESSAGE_H_
+
+#ifdef MACH_KERNEL
+#include <mach_ipc_compat.h>
+#endif /* MACH_KERNEL */
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+
+
+/*
+ * The timeout mechanism uses mach_msg_timeout_t values,
+ * passed by value. The timeout units are milliseconds.
+ * It is controlled with the MACH_SEND_TIMEOUT
+ * and MACH_RCV_TIMEOUT options.
+ */
+
+typedef natural_t mach_msg_timeout_t;
+
+/*
+ * The value to be used when there is no timeout.
+ * (No MACH_SEND_TIMEOUT/MACH_RCV_TIMEOUT option.)
+ */
+
+#define MACH_MSG_TIMEOUT_NONE ((mach_msg_timeout_t) 0)
+
+/*
+ * The kernel uses MACH_MSGH_BITS_COMPLEX as a hint. It it isn't on, it
+ * assumes the body of the message doesn't contain port rights or OOL
+ * data. The field is set in received messages. A user task must
+ * use caution in interpreting the body of a message if the bit isn't
+ * on, because the mach_msg_type's in the body might "lie" about the
+ * contents. If the bit isn't on, but the mach_msg_types
+ * in the body specify rights or OOL data, the behaviour is undefined.
+ * (Ie, an error may or may not be produced.)
+ *
+ * The value of MACH_MSGH_BITS_REMOTE determines the interpretation
+ * of the msgh_remote_port field. It is handled like a msgt_name.
+ *
+ * The value of MACH_MSGH_BITS_LOCAL determines the interpretation
+ * of the msgh_local_port field. It is handled like a msgt_name.
+ *
+ * MACH_MSGH_BITS() combines two MACH_MSG_TYPE_* values, for the remote
+ * and local fields, into a single value suitable for msgh_bits.
+ *
+ * MACH_MSGH_BITS_COMPLEX_PORTS, MACH_MSGH_BITS_COMPLEX_DATA, and
+ * MACH_MSGH_BITS_CIRCULAR should be zero; they are used internally.
+ *
+ * The unused bits should be zero.
+ */
+
+#define MACH_MSGH_BITS_ZERO 0x00000000
+#define MACH_MSGH_BITS_REMOTE_MASK 0x000000ff
+#define MACH_MSGH_BITS_LOCAL_MASK 0x0000ff00
+#define MACH_MSGH_BITS_COMPLEX 0x80000000U
+#define MACH_MSGH_BITS_CIRCULAR 0x40000000 /* internal use only */
+#define MACH_MSGH_BITS_COMPLEX_PORTS 0x20000000 /* internal use only */
+#define MACH_MSGH_BITS_COMPLEX_DATA 0x10000000 /* internal use only */
+#define MACH_MSGH_BITS_MIGRATED 0x08000000 /* internal use only */
+#define MACH_MSGH_BITS_UNUSED 0x07ff0000
+
+#define MACH_MSGH_BITS_PORTS_MASK \
+ (MACH_MSGH_BITS_REMOTE_MASK|MACH_MSGH_BITS_LOCAL_MASK)
+
+#define MACH_MSGH_BITS(remote, local) \
+ ((remote) | ((local) << 8))
+#define MACH_MSGH_BITS_REMOTE(bits) \
+ ((bits) & MACH_MSGH_BITS_REMOTE_MASK)
+#define MACH_MSGH_BITS_LOCAL(bits) \
+ (((bits) & MACH_MSGH_BITS_LOCAL_MASK) >> 8)
+#define MACH_MSGH_BITS_PORTS(bits) \
+ ((bits) & MACH_MSGH_BITS_PORTS_MASK)
+#define MACH_MSGH_BITS_OTHER(bits) \
+ ((bits) &~ MACH_MSGH_BITS_PORTS_MASK)
+
+/*
+ * Every message starts with a message header.
+ * Following the message header are zero or more pairs of
+ * type descriptors (mach_msg_type_t/mach_msg_type_long_t) and
+ * data values. The size of the message must be specified in bytes,
+ * and includes the message header, type descriptors, inline
+ * data, and inline pointer for out-of-line data.
+ *
+ * The msgh_remote_port field specifies the destination of the message.
+ * It must specify a valid send or send-once right for a port.
+ *
+ * The msgh_local_port field specifies a "reply port". Normally,
+ * This field carries a send-once right that the receiver will use
+ * to reply to the message. It may carry the values MACH_PORT_NULL,
+ * MACH_PORT_DEAD, a send-once right, or a send right.
+ *
+ * The msgh_seqno field carries a sequence number associated with the
+ * received-from port. A port's sequence number is incremented every
+ * time a message is received from it. In sent messages, the field's
+ * value is ignored.
+ *
+ * The msgh_id field is uninterpreted by the message primitives.
+ * It normally carries information specifying the format
+ * or meaning of the message.
+ */
+
+typedef unsigned int mach_msg_bits_t;
+typedef unsigned int mach_msg_size_t;
+typedef natural_t mach_msg_seqno_t;
+typedef integer_t mach_msg_id_t;
+
+typedef struct {
+ mach_msg_bits_t msgh_bits;
+ mach_msg_size_t msgh_size;
+ mach_port_t msgh_remote_port;
+ mach_port_t msgh_local_port;
+ mach_port_seqno_t msgh_seqno;
+ mach_msg_id_t msgh_id;
+} mach_msg_header_t;
+
+/*
+ * There is no fixed upper bound to the size of Mach messages.
+ */
+
+#define MACH_MSG_SIZE_MAX ((mach_msg_size_t) ~0)
+
+/*
+ * Compatibility definitions, for code written
+ * when there was a msgh_kind instead of msgh_seqno.
+ */
+
+#define MACH_MSGH_KIND_NORMAL 0x00000000
+#if 0
+/* code using this is likely to break, so better not to have it defined */
+#define MACH_MSGH_KIND_NOTIFICATION 0x00000001
+#endif
+#define msgh_kind msgh_seqno
+#define mach_msg_kind_t mach_port_seqno_t
+
+/*
+ * The msgt_number field specifies the number of data elements.
+ * The msgt_size field specifies the size of each data element, in bits.
+ * The msgt_name field specifies the type of each data element.
+ * If msgt_inline is TRUE, the data follows the type descriptor
+ * in the body of the message. If msgt_inline is FALSE, then a pointer
+ * to the data should follow the type descriptor, and the data is
+ * sent out-of-line. In this case, if msgt_deallocate is TRUE,
+ * then the out-of-line data is moved (instead of copied) into the message.
+ * If msgt_longform is TRUE, then the type descriptor is actually
+ * a mach_msg_type_long_t.
+ *
+ * The actual amount of inline data following the descriptor must
+ * a multiple of the word size. For out-of-line data, this is a
+ * pointer. For inline data, the supplied data size (calculated
+ * from msgt_number/msgt_size) is rounded up. This guarantees
+ * that type descriptors always fall on word boundaries.
+ *
+ * For port rights, msgt_size must be 8*sizeof(mach_port_t).
+ * If the data is inline, msgt_deallocate should be FALSE.
+ * The msgt_unused bit should be zero.
+ * The msgt_name, msgt_size, msgt_number fields in
+ * a mach_msg_type_long_t should be zero.
+ */
+
+typedef unsigned int mach_msg_type_name_t;
+typedef unsigned int mach_msg_type_size_t;
+typedef natural_t mach_msg_type_number_t;
+
+typedef struct {
+ unsigned int msgt_name : 8,
+ msgt_size : 8,
+ msgt_number : 12,
+ msgt_inline : 1,
+ msgt_longform : 1,
+ msgt_deallocate : 1,
+ msgt_unused : 1;
+} mach_msg_type_t;
+
+typedef struct {
+ mach_msg_type_t msgtl_header;
+ unsigned short msgtl_name;
+ unsigned short msgtl_size;
+ natural_t msgtl_number;
+} mach_msg_type_long_t;
+
+
+/*
+ * Known values for the msgt_name field.
+ *
+ * The only types known to the Mach kernel are
+ * the port types, and those types used in the
+ * kernel RPC interface.
+ */
+
+#define MACH_MSG_TYPE_UNSTRUCTURED 0
+#define MACH_MSG_TYPE_BIT 0
+#define MACH_MSG_TYPE_BOOLEAN 0
+#define MACH_MSG_TYPE_INTEGER_16 1
+#define MACH_MSG_TYPE_INTEGER_32 2
+#define MACH_MSG_TYPE_CHAR 8
+#define MACH_MSG_TYPE_BYTE 9
+#define MACH_MSG_TYPE_INTEGER_8 9
+#define MACH_MSG_TYPE_REAL 10
+#define MACH_MSG_TYPE_INTEGER_64 11
+#define MACH_MSG_TYPE_STRING 12
+#define MACH_MSG_TYPE_STRING_C 12
+
+/*
+ * Values used when sending a port right.
+ */
+
+#define MACH_MSG_TYPE_MOVE_RECEIVE 16 /* Must hold receive rights */
+#define MACH_MSG_TYPE_MOVE_SEND 17 /* Must hold send rights */
+#define MACH_MSG_TYPE_MOVE_SEND_ONCE 18 /* Must hold sendonce rights */
+#define MACH_MSG_TYPE_COPY_SEND 19 /* Must hold send rights */
+#define MACH_MSG_TYPE_MAKE_SEND 20 /* Must hold receive rights */
+#define MACH_MSG_TYPE_MAKE_SEND_ONCE 21 /* Must hold receive rights */
+
+/*
+ * Values received/carried in messages. Tells the receiver what
+ * sort of port right he now has.
+ *
+ * MACH_MSG_TYPE_PORT_NAME is used to transfer a port name
+ * which should remain uninterpreted by the kernel. (Port rights
+ * are not transferred, just the port name.)
+ */
+
+#define MACH_MSG_TYPE_PORT_NAME 15
+#define MACH_MSG_TYPE_PORT_RECEIVE MACH_MSG_TYPE_MOVE_RECEIVE
+#define MACH_MSG_TYPE_PORT_SEND MACH_MSG_TYPE_MOVE_SEND
+#define MACH_MSG_TYPE_PORT_SEND_ONCE MACH_MSG_TYPE_MOVE_SEND_ONCE
+
+#define MACH_MSG_TYPE_LAST 22 /* Last assigned */
+
+/*
+ * A dummy value. Mostly used to indicate that the actual value
+ * will be filled in later, dynamically.
+ */
+
+#define MACH_MSG_TYPE_POLYMORPHIC ((mach_msg_type_name_t) -1)
+
+/*
+ * Is a given item a port type?
+ */
+
+#define MACH_MSG_TYPE_PORT_ANY(x) \
+ (((x) >= MACH_MSG_TYPE_MOVE_RECEIVE) && \
+ ((x) <= MACH_MSG_TYPE_MAKE_SEND_ONCE))
+
+#define MACH_MSG_TYPE_PORT_ANY_SEND(x) \
+ (((x) >= MACH_MSG_TYPE_MOVE_SEND) && \
+ ((x) <= MACH_MSG_TYPE_MAKE_SEND_ONCE))
+
+#define MACH_MSG_TYPE_PORT_ANY_RIGHT(x) \
+ (((x) >= MACH_MSG_TYPE_MOVE_RECEIVE) && \
+ ((x) <= MACH_MSG_TYPE_MOVE_SEND_ONCE))
+
+typedef integer_t mach_msg_option_t;
+
+#define MACH_MSG_OPTION_NONE 0x00000000
+
+#define MACH_SEND_MSG 0x00000001
+#define MACH_RCV_MSG 0x00000002
+
+#define MACH_SEND_TIMEOUT 0x00000010
+#define MACH_SEND_NOTIFY 0x00000020
+#define MACH_SEND_INTERRUPT 0x00000040 /* libmach implements */
+#define MACH_SEND_CANCEL 0x00000080
+#define MACH_RCV_TIMEOUT 0x00000100
+#define MACH_RCV_NOTIFY 0x00000200
+#define MACH_RCV_INTERRUPT 0x00000400 /* libmach implements */
+#define MACH_RCV_LARGE 0x00000800
+
+#define MACH_SEND_ALWAYS 0x00010000 /* internal use only */
+
+
+/*
+ * Much code assumes that mach_msg_return_t == kern_return_t.
+ * This definition is useful for descriptive purposes.
+ *
+ * See <mach/error.h> for the format of error codes.
+ * IPC errors are system 4. Send errors are subsystem 0;
+ * receive errors are subsystem 1. The code field is always non-zero.
+ * The high bits of the code field communicate extra information
+ * for some error codes. MACH_MSG_MASK masks off these special bits.
+ */
+
+typedef kern_return_t mach_msg_return_t;
+
+#define MACH_MSG_SUCCESS 0x00000000
+
+#define MACH_MSG_MASK 0x00003c00
+ /* All special error code bits defined below. */
+#define MACH_MSG_IPC_SPACE 0x00002000
+ /* No room in IPC name space for another capability name. */
+#define MACH_MSG_VM_SPACE 0x00001000
+ /* No room in VM address space for out-of-line memory. */
+#define MACH_MSG_IPC_KERNEL 0x00000800
+ /* Kernel resource shortage handling an IPC capability. */
+#define MACH_MSG_VM_KERNEL 0x00000400
+ /* Kernel resource shortage handling out-of-line memory. */
+
+#define MACH_SEND_IN_PROGRESS 0x10000001
+ /* Thread is waiting to send. (Internal use only.) */
+#define MACH_SEND_INVALID_DATA 0x10000002
+ /* Bogus in-line data. */
+#define MACH_SEND_INVALID_DEST 0x10000003
+ /* Bogus destination port. */
+#define MACH_SEND_TIMED_OUT 0x10000004
+ /* Message not sent before timeout expired. */
+#define MACH_SEND_WILL_NOTIFY 0x10000005
+ /* Msg-accepted notification will be generated. */
+#define MACH_SEND_NOTIFY_IN_PROGRESS 0x10000006
+ /* Msg-accepted notification already pending. */
+#define MACH_SEND_INTERRUPTED 0x10000007
+ /* Software interrupt. */
+#define MACH_SEND_MSG_TOO_SMALL 0x10000008
+ /* Data doesn't contain a complete message. */
+#define MACH_SEND_INVALID_REPLY 0x10000009
+ /* Bogus reply port. */
+#define MACH_SEND_INVALID_RIGHT 0x1000000a
+ /* Bogus port rights in the message body. */
+#define MACH_SEND_INVALID_NOTIFY 0x1000000b
+ /* Bogus notify port argument. */
+#define MACH_SEND_INVALID_MEMORY 0x1000000c
+ /* Invalid out-of-line memory pointer. */
+#define MACH_SEND_NO_BUFFER 0x1000000d
+ /* No message buffer is available. */
+#define MACH_SEND_NO_NOTIFY 0x1000000e
+ /* Resource shortage; can't request msg-accepted notif. */
+#define MACH_SEND_INVALID_TYPE 0x1000000f
+ /* Invalid msg-type specification. */
+#define MACH_SEND_INVALID_HEADER 0x10000010
+ /* A field in the header had a bad value. */
+
+#define MACH_RCV_IN_PROGRESS 0x10004001
+ /* Thread is waiting for receive. (Internal use only.) */
+#define MACH_RCV_INVALID_NAME 0x10004002
+ /* Bogus name for receive port/port-set. */
+#define MACH_RCV_TIMED_OUT 0x10004003
+ /* Didn't get a message within the timeout value. */
+#define MACH_RCV_TOO_LARGE 0x10004004
+ /* Message buffer is not large enough for inline data. */
+#define MACH_RCV_INTERRUPTED 0x10004005
+ /* Software interrupt. */
+#define MACH_RCV_PORT_CHANGED 0x10004006
+ /* Port moved into a set during the receive. */
+#define MACH_RCV_INVALID_NOTIFY 0x10004007
+ /* Bogus notify port argument. */
+#define MACH_RCV_INVALID_DATA 0x10004008
+ /* Bogus message buffer for inline data. */
+#define MACH_RCV_PORT_DIED 0x10004009
+ /* Port/set was sent away/died during receive. */
+#define MACH_RCV_IN_SET 0x1000400a
+ /* Port is a member of a port set. */
+#define MACH_RCV_HEADER_ERROR 0x1000400b
+ /* Error receiving message header. See special bits. */
+#define MACH_RCV_BODY_ERROR 0x1000400c
+ /* Error receiving message body. See special bits. */
+
+
+extern mach_msg_return_t
+mach_msg_trap
+#if defined(c_plusplus) || defined(__STDC__)
+ (mach_msg_header_t *msg,
+ mach_msg_option_t option,
+ mach_msg_size_t send_size,
+ mach_msg_size_t rcv_size,
+ mach_port_t rcv_name,
+ mach_msg_timeout_t timeout,
+ mach_port_t notify);
+#else /* c_plusplus || __STDC__ */
+#ifdef LINTLIBRARY
+ (msg, option, send_size, rcv_size, rcv_name, timeout, notify)
+ mach_msg_header_t *msg;
+ mach_msg_option_t option;
+ mach_msg_size_t send_size;
+ mach_msg_size_t rcv_size
+ mach_port_t rcv_name;
+ mach_msg_timeout_t timeout;
+ mach_port_t notify;
+{ return MACH_RCV_SUCCESS; }
+#else /* LINTLIBRARY */
+ ();
+#endif /* LINTLIBRARY */
+#endif /* c_plusplus || __STDC__ */
+
+extern mach_msg_return_t
+mach_msg
+#if defined(c_plusplus) || defined(__STDC__)
+ (mach_msg_header_t *msg,
+ mach_msg_option_t option,
+ mach_msg_size_t send_size,
+ mach_msg_size_t rcv_size,
+ mach_port_t rcv_name,
+ mach_msg_timeout_t timeout,
+ mach_port_t notify);
+#else /* c_plusplus || __STDC__ */
+#ifdef LINTLIBRARY
+ (msg, option, send_size, rcv_size, rcv_name, timeout, notify)
+ mach_msg_header_t *msg;
+ mach_msg_option_t option;
+ mach_msg_size_t send_size;
+ mach_msg_size_t rcv_size
+ mach_port_t rcv_name;
+ mach_msg_timeout_t timeout;
+ mach_port_t notify;
+{ return MACH_RCV_SUCCESS; }
+#else /* LINTLIBRARY */
+ ();
+#endif /* LINTLIBRARY */
+#endif /* c_plusplus || __STDC__ */
+
+
+/* Definitions for the old IPC interface. */
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Message data structures.
+ *
+ * Messages consist of two parts: a fixed-size header, immediately
+ * followed by a variable-size array of typed data items.
+ *
+ */
+
+typedef unsigned int msg_size_t;
+
+typedef struct {
+ unsigned int msg_unused : 24,
+ msg_simple : 8;
+ msg_size_t msg_size;
+ integer_t msg_type;
+ port_t msg_local_port;
+ port_t msg_remote_port;
+ integer_t msg_id;
+} msg_header_t;
+
+#define MSG_SIZE_MAX 8192
+
+/*
+ * Known values for the msg_type field.
+ * These are Accent holdovers, which should be purged when possible.
+ *
+ * Only one bit in the msg_type field is used by the kernel.
+ * Others are available to user applications. See <msg_type.h>
+ * for system application-assigned values.
+ */
+
+#define MSG_TYPE_NORMAL 0
+#define MSG_TYPE_EMERGENCY 1
+
+/*
+ * Each data item is preceded by a description of that
+ * item, including what type of data, how big it is, and
+ * how many of them are present.
+ *
+ * The actual data will either follow this type
+ * descriptor ("inline") or will be specified by a pointer.
+ *
+ * If the type name, size, or number is too large to be encoded
+ * in this structure, the "longform" option may be selected,
+ * and those fields must immediately follow in full integer fields.
+ *
+ * For convenience, out-of-line data regions or port rights may
+ * be deallocated when the message is sent by specifying the
+ * "deallocate" field. Beware: if the data item in question is both
+ * out-of-line and contains port rights, then both will be deallocated.
+ */
+
+typedef struct {
+ unsigned int msg_type_name : 8, /* What kind of data */
+ msg_type_size : 8, /* How many bits is each item */
+ msg_type_number : 12, /* How many items are there */
+ msg_type_inline : 1, /* If true, data follows; else a pointer */
+ msg_type_longform : 1, /* Name, size, number follow: see above */
+ msg_type_deallocate : 1, /* Deallocate port rights or memory */
+ msg_type_unused : 1;
+} msg_type_t;
+
+typedef struct {
+ msg_type_t msg_type_header;
+ unsigned short msg_type_long_name;
+ unsigned short msg_type_long_size;
+ natural_t msg_type_long_number;
+} msg_type_long_t;
+
+/*
+ * Known values for the msg_type_name field.
+ *
+ * The only types known to the Mach kernel are
+ * the port types, and those types used in the
+ * kernel RPC interface.
+ */
+
+#define MSG_TYPE_UNSTRUCTURED 0
+#define MSG_TYPE_BIT 0
+#define MSG_TYPE_BOOLEAN 0
+#define MSG_TYPE_INTEGER_16 1
+#define MSG_TYPE_INTEGER_32 2
+#define MSG_TYPE_PORT_OWNERSHIP 3 /* obsolete */
+#define MSG_TYPE_PORT_RECEIVE 4 /* obsolete */
+#define MSG_TYPE_PORT_ALL 5
+#define MSG_TYPE_PORT 6
+#define MSG_TYPE_CHAR 8
+#define MSG_TYPE_BYTE 9
+#define MSG_TYPE_INTEGER_8 9
+#define MSG_TYPE_REAL 10
+#define MSG_TYPE_STRING 12
+#define MSG_TYPE_STRING_C 12
+/* MSG_TYPE_INVALID 13 unused */
+
+#define MSG_TYPE_INTERNAL_MEMORY MSG_TYPE_INTEGER_8
+
+#define MSG_TYPE_PORT_NAME 15 /* A capability name */
+#define MSG_TYPE_LAST 16 /* Last assigned */
+
+#define MSG_TYPE_POLYMORPHIC ((unsigned int) -1)
+
+/*
+ * Is a given item a port type?
+ */
+
+#define MSG_TYPE_PORT_ANY(x) \
+ (((x) == MSG_TYPE_PORT) || ((x) == MSG_TYPE_PORT_ALL))
+
+/*
+ * Other basic types
+ */
+
+typedef natural_t msg_timeout_t;
+
+/*
+ * Options to IPC primitives.
+ *
+ * These can be combined by or'ing; the combination RPC call
+ * uses both SEND_ and RCV_ options at once.
+ */
+
+typedef integer_t msg_option_t;
+
+#define MSG_OPTION_NONE 0x0000 /* Terminate only when message op works */
+
+#define SEND_TIMEOUT 0x0001 /* Terminate on timeout elapsed */
+#define SEND_NOTIFY 0x0002 /* Terminate with reply message if need be */
+
+#define SEND_INTERRUPT 0x0004 /* Terminate on software interrupt */
+
+#define RCV_TIMEOUT 0x0100 /* Terminate on timeout elapsed */
+#define RCV_NO_SENDERS 0x0200 /* Terminate if I'm the only sender left */
+#define RCV_INTERRUPT 0x0400 /* Terminate on software interrupt */
+
+/*
+ * Returns from IPC primitives.
+ *
+ * Values are separate in order to allow RPC users to
+ * distinguish which operation failed; for successful completion,
+ * this doesn't matter.
+ */
+
+typedef int msg_return_t;
+
+#define SEND_SUCCESS 0
+
+#define SEND_ERRORS_START -100
+#define SEND_INVALID_MEMORY -101 /* Message or OOL data invalid */
+#define SEND_INVALID_PORT -102 /* Reference to inacessible port */
+#define SEND_TIMED_OUT -103 /* Terminated due to timeout */
+#define SEND_WILL_NOTIFY -105 /* Msg accepted provisionally */
+#define SEND_NOTIFY_IN_PROGRESS -106 /* Already awaiting a notification */
+#define SEND_KERNEL_REFUSED -107 /* Message to the kernel refused */
+#define SEND_INTERRUPTED -108 /* Software interrupt during send */
+#define SEND_MSG_TOO_LARGE -109 /* Message specified was too large */
+#define SEND_MSG_TOO_SMALL -110 /* Data specified exceeds msg size */
+/* SEND_MSG_SIZE_CHANGE -111 Msg size changed during copy */
+#define SEND_ERRORS_END -111
+
+#define msg_return_send(x) ((x) < SEND_ERRORS_START && (x) > SEND_ERRORS_END)
+
+#define RCV_SUCCESS 0
+
+#define RCV_ERRORS_START -200
+#define RCV_INVALID_MEMORY -201
+#define RCV_INVALID_PORT -202
+#define RCV_TIMED_OUT -203
+#define RCV_TOO_LARGE -204 /* Msg structure too small for data */
+#define RCV_NOT_ENOUGH_MEMORY -205 /* Can't find space for OOL data */
+#define RCV_ONLY_SENDER -206 /* Receiver is only sender */
+#define RCV_INTERRUPTED -207
+#define RCV_PORT_CHANGE -208 /* Port was put in a set */
+#define RCV_ERRORS_END -209
+
+#define msg_return_rcv(x) ((x) < RCV_ERRORS_START && (x) > RCV_ERRORS_END)
+
+#define RPC_SUCCESS 0
+
+/*
+ * The IPC primitive functions themselves
+ */
+
+msg_return_t msg_send(
+#if defined(c_plusplus) || defined(__STDC__)
+ msg_header_t *header,
+ msg_option_t option,
+ msg_timeout_t timeout);
+#else /* c_plusplus || __STDC__ */
+#if LINTLIBRARY
+ header, option, timeout)
+ msg_header_t *header;
+ msg_option_t option;
+ msg_timeout_t timeout;
+ { return(SEND_SUCCESS); }
+#else /* LINTLIBRARY */
+ );
+#endif /* LINTLIBRARY */
+#endif /* c_plusplus || __STDC__ */
+
+msg_return_t msg_receive(
+#if defined(c_plusplus) || defined(__STDC__)
+ msg_header_t *header,
+ msg_option_t option,
+ msg_timeout_t timeout);
+#else /* c_plusplus || __STDC__ */
+#if LINTLIBRARY
+ header, option, timeout)
+ msg_header_t *header;
+ msg_option_t option;
+ msg_timeout_t timeout;
+ { return(RCV_SUCCESS); }
+#else /* LINTLIBRARY */
+ );
+#endif /* LINTLIBRARY */
+#endif /* c_plusplus || __STDC__ */
+
+msg_return_t msg_rpc(
+#if defined(c_plusplus) || defined(__STDC__)
+ msg_header_t *header, /* in/out */
+ msg_option_t option,
+ msg_size_t rcv_size,
+ msg_timeout_t send_timeout,
+ msg_timeout_t rcv_timeout);
+#else /* c_plusplus || __STDC__ */
+#if LINTLIBRARY
+ header, option, rcv_size,
+ send_timeout, rcv_timeout)
+ msg_header_t *header; /* in/out */
+ msg_option_t option;
+ msg_size_t rcv_size;
+ msg_timeout_t send_timeout;
+ msg_timeout_t rcv_timeout;
+ { return(RPC_SUCCESS); }
+#else /* LINTLIBRARY */
+ );
+#endif /* LINTLIBRARY */
+#endif /* c_plusplus || __STDC__ */
+
+msg_return_t msg_send_trap(
+#if defined(c_plusplus) || defined(__STDC__)
+ msg_header_t *header,
+ msg_option_t option,
+ msg_size_t send_size,
+ msg_timeout_t timeout);
+#else /* c_plusplus || __STDC__ */
+#if LINTLIBRARY
+ header, option, send_size, timeout)
+ msg_header_t *header;
+ msg_option_t option;
+ msg_size_t send_size;
+ msg_timeout_t timeout;
+ { return(SEND_SUCCESS); }
+#else /* LINTLIBRARY */
+ );
+#endif /* LINTLIBRARY */
+#endif /* c_plusplus || __STDC__ */
+
+msg_return_t msg_receive_trap(
+#if defined(c_plusplus) || defined(__STDC__)
+ msg_header_t *header,
+ msg_option_t option,
+ msg_size_t rcv_size,
+ port_name_t rcv_name,
+ msg_timeout_t timeout);
+#else /* c_plusplus || __STDC__ */
+#if LINTLIBRARY
+ header, option, rcv_size, rcv_name, timeout)
+ msg_header_t *header;
+ msg_option_t option;
+ msg_size_t rcv_size;
+ port_name_t rcv_name;
+ msg_timeout_t timeout;
+ { return(RCV_SUCCESS); }
+#else /* LINTLIBRARY */
+ );
+#endif /* LINTLIBRARY */
+#endif /* c_plusplus || __STDC__ */
+
+msg_return_t msg_rpc_trap(
+#if defined(c_plusplus) || defined(__STDC__)
+ msg_header_t *header, /* in/out */
+ msg_option_t option,
+ msg_size_t send_size,
+ msg_size_t rcv_size,
+ msg_timeout_t send_timeout,
+ msg_timeout_t rcv_timeout);
+#else /* c_plusplus || __STDC__ */
+#if LINTLIBRARY
+ header, option, send_size, rcv_size,
+ send_timeout, rcv_timeout)
+ msg_header_t *header; /* in/out */
+ msg_option_t option;
+ msg_size_t send_size;
+ msg_size_t rcv_size;
+ msg_timeout_t send_timeout;
+ msg_timeout_t rcv_timeout;
+ { return(RPC_SUCCESS); }
+#else /* LINTLIBRARY */
+ );
+#endif /* LINTLIBRARY */
+#endif /* c_plusplus || __STDC__ */
+
+#endif /* MACH_IPC_COMPAT */
+
+#endif /* _MACH_MESSAGE_H_ */
diff --git a/include/mach/mig_errors.h b/include/mach/mig_errors.h
new file mode 100644
index 00000000..eec4c197
--- /dev/null
+++ b/include/mach/mig_errors.h
@@ -0,0 +1,105 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Mach Interface Generator errors
+ *
+ */
+
+#ifndef _MACH_MIG_ERRORS_H_
+#define _MACH_MIG_ERRORS_H_
+
+#ifdef MACH_KERNEL
+#include <mach_ipc_compat.h>
+#endif /* MACH_KERNEL */
+
+#include <mach/kern_return.h>
+#include <mach/message.h>
+
+/*
+ * These error codes should be specified as system 4, subsytem 2.
+ * But alas backwards compatibility makes that impossible.
+ * The problem is old clients of new servers (eg, the kernel)
+ * which get strange large error codes when there is a Mig problem
+ * in the server. Unfortunately, the IPC system doesn't have
+ * the knowledge to convert the codes in this situation.
+ */
+
+#define MIG_TYPE_ERROR -300 /* client type check failure */
+#define MIG_REPLY_MISMATCH -301 /* wrong reply message ID */
+#define MIG_REMOTE_ERROR -302 /* server detected error */
+#define MIG_BAD_ID -303 /* bad request message ID */
+#define MIG_BAD_ARGUMENTS -304 /* server type check failure */
+#define MIG_NO_REPLY -305 /* no reply should be sent */
+#define MIG_EXCEPTION -306 /* server raised exception */
+#define MIG_ARRAY_TOO_LARGE -307 /* array not large enough */
+#define MIG_SERVER_DIED -308 /* server died */
+#define MIG_DESTROY_REQUEST -309 /* destroy request with no reply */
+
+typedef struct {
+ mach_msg_header_t Head;
+ mach_msg_type_t RetCodeType;
+ kern_return_t RetCode;
+} mig_reply_header_t;
+
+typedef struct mig_symtab {
+ char *ms_routine_name;
+ int ms_routine_number;
+#if defined(__STDC__) || defined(c_plus_plus) || defined(hc)
+ void
+#else
+ int
+#endif
+ (*ms_routine)();
+} mig_symtab_t;
+
+/*
+ * Definition for server stub routines. These routines
+ * unpack the request message, call the server procedure,
+ * and pack the reply message.
+ */
+#if defined(__STDC__) || defined(c_plus_plus)
+typedef void (*mig_routine_t)(mach_msg_header_t *, mach_msg_header_t *);
+#else
+#if defined(hc)
+typedef void (*mig_routine_t)();
+#else
+typedef int (*mig_routine_t)(); /* PCC cannot handle void (*)() */
+#endif
+#endif
+
+/* Definitions for the old IPC interface. */
+
+#if MACH_IPC_COMPAT
+
+typedef struct {
+ msg_header_t Head;
+ msg_type_t RetCodeType;
+ kern_return_t RetCode;
+} death_pill_t;
+
+#endif /* MACH_IPC_COMPAT */
+
+#endif /* _MACH_MIG_ERRORS_H_ */
diff --git a/include/mach/mig_support.h b/include/mach/mig_support.h
new file mode 100644
index 00000000..e19de97a
--- /dev/null
+++ b/include/mach/mig_support.h
@@ -0,0 +1,67 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Abstract:
+ * Header file for support routines called by MiG generated interfaces.
+ *
+ */
+
+#ifndef _MACH_MIG_SUPPORT_H_
+#define _MACH_MIG_SUPPORT_H_
+
+#include <mach/message.h>
+#include <mach/mach_types.h>
+
+#if defined(MACH_KERNEL)
+
+#if defined(bcopy)
+#else /* not defined(bcopy) */
+extern void bcopy(const void *, void *, vm_size_t);
+#define memcpy(_dst,_src,_len) bcopy((_src),(_dst),(_len))
+#endif /* defined(bcopy) */
+
+#endif /* defined(MACH_KERNEL) */
+
+extern void mig_init(void *_first);
+
+extern void mig_allocate(vm_address_t *_addr_p, vm_size_t _size);
+
+extern void mig_deallocate(vm_address_t _addr, vm_size_t _size);
+
+extern void mig_dealloc_reply_port(mach_port_t);
+
+extern void mig_put_reply_port(mach_port_t);
+
+extern mach_port_t mig_get_reply_port(void);
+
+extern void mig_reply_setup(const mach_msg_header_t *_request,
+ mach_msg_header_t *reply);
+
+#ifndef MACH_KERNEL
+extern vm_size_t mig_strncpy(char *_dest, const char *_src, vm_size_t _len);
+#endif
+
+#endif /* not defined(_MACH_MIG_SUPPORT_H_) */
diff --git a/include/mach/msg_type.h b/include/mach/msg_type.h
new file mode 100644
index 00000000..3298fd54
--- /dev/null
+++ b/include/mach/msg_type.h
@@ -0,0 +1,42 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * This file defines user msg types that may be ored into
+ * the msg_type field in a msg header. Values 0-5 are reserved
+ * for use by the kernel and are defined in message.h.
+ *
+ */
+
+#ifndef _MACH_MSG_TYPE_H_
+#define _MACH_MSG_TYPE_H_
+
+#define MSG_TYPE_CAMELOT (1 << 6)
+#define MSG_TYPE_ENCRYPTED (1 << 7)
+#define MSG_TYPE_RPC (1 << 8) /* Reply expected */
+
+#include <mach/message.h>
+
+#endif /* _MACH_MSG_TYPE_H_ */
diff --git a/include/mach/multiboot.h b/include/mach/multiboot.h
new file mode 100644
index 00000000..4ae6b2f5
--- /dev/null
+++ b/include/mach/multiboot.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_MULTIBOOT_H_
+#define _MACH_MULTIBOOT_H_
+
+#include <mach/machine/vm_types.h>
+#include <mach/machine/multiboot.h>
+
+struct multiboot_region
+{
+ vm_offset_t start;
+ vm_offset_t end;
+};
+
+struct multiboot_rlist
+{
+ int count;
+ vm_offset_t regions;
+};
+
+struct multiboot_module
+{
+ /* Location and size of the module. */
+ struct multiboot_region region;
+
+ /* Command-line associated with this boot module:
+ a null-terminated ASCII string.
+ Both start and end are 0 if there is no command line.
+ The end pointer points at least one byte past the terminating null. */
+ struct multiboot_region cmdline;
+
+ /* Reserved; boot loader must initialize to zero. */
+ natural_t pad[4];
+};
+
+struct multiboot_info
+{
+ /* List of available physical memory regions.
+ Can (and probably does) include the memory containing
+ the kernel, boot modules, this structure, etc. */
+ struct multiboot_rlist avail;
+
+ /* Physical memory region occupied by things the boot loader set up
+ and the OS shouldn't clobber at least until it's all done initializing itself.
+ This includes the kernel image, boot modules, these structures,
+ initial processor tables, etc. */
+ struct multiboot_rlist occupied;
+
+ /* Command-line for the OS kernel: a null-terminated ASCII string.
+ Both start and end are 0 if there is no command line.
+ The end pointer points at least one byte past the terminating null. */
+ struct multiboot_region cmdline;
+
+ /* Secondary boot modules loaded with this kernel image. */
+ int nmods;
+ vm_offset_t mods;
+
+ /* Reserved; boot loader must initialize to zero. */
+ natural_t pad[4];
+};
+
+#endif _MACH_MULTIBOOT_H_
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_MULTIBOOT_H_
+#define _MACH_MULTIBOOT_H_
+
+#include <mach/machine/vm_types.h>
+#include <mach/machine/multiboot.h>
+
+struct multiboot_region
+{
+ vm_offset_t start;
+ vm_offset_t end;
+};
+
+struct multiboot_rlist
+{
+ int count;
+ vm_offset_t regions;
+};
+
+struct multiboot_module
+{
+ /* Location and size of the module. */
+ struct multiboot_region region;
+
+ /* Command-line associated with this boot module:
+ a null-terminated ASCII string.
+ Both start and end are 0 if there is no command line.
+ The end pointer points at least one byte past the terminating null. */
+ struct multiboot_region cmdline;
+
+ /* Reserved; boot loader must initialize to zero. */
+ natural_t pad[4];
+};
+
+struct multiboot_info
+{
+ /* List of available physical memory regions.
+ Can (and probably does) include the memory containing
+ the kernel, boot modules, this structure, etc. */
+ struct multiboot_rlist avail;
+
+ /* Physical memory region occupied by things the boot loader set up
+ and the OS shouldn't clobber at least until it's all done initializing itself.
+ This includes the kernel image, boot modules, these structures,
+ initial processor tables, etc. */
+ struct multiboot_rlist occupied;
+
+ /* Command-line for the OS kernel: a null-terminated ASCII string.
+ Both start and end are 0 if there is no command line.
+ The end pointer points at least one byte past the terminating null. */
+ struct multiboot_region cmdline;
+
+ /* Secondary boot modules loaded with this kernel image. */
+ int nmods;
+ vm_offset_t mods;
+
+ /* Reserved; boot loader must initialize to zero. */
+ natural_t pad[4];
+};
+
+#endif _MACH_MULTIBOOT_H_
diff --git a/include/mach/norma_special_ports.h b/include/mach/norma_special_ports.h
new file mode 100644
index 00000000..e9114673
--- /dev/null
+++ b/include/mach/norma_special_ports.h
@@ -0,0 +1,84 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/norma_special_ports.h
+ *
+ * Defines codes for remote access to special ports. These are NOT
+ * port identifiers - they are only used for the norma_get_special_port
+ * and norma_set_special_port routines.
+ */
+
+#ifndef _MACH_NORMA_SPECIAL_PORTS_H_
+#define _MACH_NORMA_SPECIAL_PORTS_H_
+
+#define MAX_SPECIAL_KERNEL_ID 3
+#define MAX_SPECIAL_ID 32
+
+/*
+ * Provided by kernel
+ */
+#define NORMA_DEVICE_PORT 1
+#define NORMA_HOST_PORT 2
+#define NORMA_HOST_PRIV_PORT 3
+
+/*
+ * Not provided by kernel
+ */
+#define NORMA_NAMESERVER_PORT (1 + MAX_SPECIAL_KERNEL_ID)
+
+/*
+ * Definitions for ease of use.
+ *
+ * In the get call, the host parameter can be any host, but will generally
+ * be the local node host port. In the set call, the host must the per-node
+ * host port for the node being affected.
+ */
+
+#define norma_get_device_port(host, node, port) \
+ (norma_get_special_port((host), (node), NORMA_DEVICE_PORT, (port)))
+
+#define norma_set_device_port(host, port) \
+ (norma_set_special_port((host), NORMA_DEVICE_PORT, (port)))
+
+#define norma_get_host_port(host, node, port) \
+ (norma_get_special_port((host), (node), NORMA_HOST_PORT, (port)))
+
+#define norma_set_host_port(host, port) \
+ (norma_set_special_port((host), NORMA_HOST_PORT, (port)))
+
+#define norma_get_host_priv_port(host, node, port) \
+ (norma_get_special_port((host), (node), NORMA_HOST_PRIV_PORT, (port)))
+
+#define norma_set_host_priv_port(host, port) \
+ (norma_set_special_port((host), NORMA_HOST_PRIV_PORT, (port)))
+
+#define norma_get_nameserver_port(host, node, port) \
+ (norma_get_special_port((host), (node), NORMA_NAMESERVER_PORT, (port)))
+
+#define norma_set_nameserver_port(host, port) \
+ (norma_set_special_port((host), NORMA_NAMESERVER_PORT, (port)))
+
+#endif /* _MACH_NORMA_SPECIAL_PORTS_H_ */
diff --git a/include/mach/norma_task.defs b/include/mach/norma_task.defs
new file mode 100644
index 00000000..1ae5972a
--- /dev/null
+++ b/include/mach/norma_task.defs
@@ -0,0 +1,43 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+subsystem
+#if KERNEL_USER
+ KernelUser
+#endif KERNEL_USER
+#if KERNEL_SERVER
+ KernelServer
+#endif KERNEL_SERVER
+ norma_task 666000;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+routine norma_task_create(
+ norma_task_server : mach_port_t;
+ target_task : mach_port_t;
+ inherit_memory : boolean_t;
+ out child_task : mach_port_t);
diff --git a/include/mach/notify.defs b/include/mach/notify.defs
new file mode 100644
index 00000000..e06f6b41
--- /dev/null
+++ b/include/mach/notify.defs
@@ -0,0 +1,95 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+subsystem notify 64;
+
+#include <mach/std_types.defs>
+
+#if SEQNOS
+serverprefix do_seqnos_;
+serverdemux seqnos_notify_server;
+#else SEQNOS
+serverprefix do_;
+serverdemux notify_server;
+#endif SEQNOS
+
+type notify_port_t = MACH_MSG_TYPE_MOVE_SEND_ONCE
+ ctype: mach_port_t;
+
+/* MACH_NOTIFY_FIRST: 0100 */
+skip;
+
+/* MACH_NOTIFY_PORT_DELETED: 0101 */
+simpleroutine mach_notify_port_deleted(
+ notify : notify_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ name : mach_port_name_t);
+
+/* MACH_NOTIFY_MSG_ACCEPTED: 0102 */
+simpleroutine mach_notify_msg_accepted(
+ notify : notify_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ name : mach_port_name_t);
+
+skip; /* was NOTIFY_OWNERSHIP_RIGHTS: 0103 */
+
+skip; /* was NOTIFY_RECEIVE_RIGHTS: 0104 */
+
+/* MACH_NOTIFY_PORT_DESTROYED: 0105 */
+simpleroutine mach_notify_port_destroyed(
+ notify : notify_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ rights : mach_port_receive_t);
+
+/* MACH_NOTIFY_NO_SENDERS: 0106 */
+simpleroutine mach_notify_no_senders(
+ notify : notify_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ mscount : mach_port_mscount_t);
+
+/* MACH_NOTIFY_SEND_ONCE: 0107 */
+simpleroutine mach_notify_send_once(
+ notify : notify_port_t
+#if SEQNOS
+; msgseqno seqno : mach_port_seqno_t
+#endif SEQNOS
+ );
+
+/* MACH_NOTIFY_DEAD_NAME: 0110 */
+simpleroutine mach_notify_dead_name(
+ notify : notify_port_t;
+#if SEQNOS
+ msgseqno seqno : mach_port_seqno_t;
+#endif SEQNOS
+ name : mach_port_name_t);
diff --git a/include/mach/notify.h b/include/mach/notify.h
new file mode 100644
index 00000000..9fa65ad0
--- /dev/null
+++ b/include/mach/notify.h
@@ -0,0 +1,122 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/notify.h
+ *
+ * Kernel notification message definitions.
+ */
+
+#ifndef _MACH_NOTIFY_H_
+#define _MACH_NOTIFY_H_
+
+#ifdef MACH_KERNEL
+#include <mach_ipc_compat.h>
+#endif /* MACH_KERNEL */
+
+#include <mach/port.h>
+#include <mach/message.h>
+
+/*
+ * An alternative specification of the notification interface
+ * may be found in mach/notify.defs.
+ */
+
+#define MACH_NOTIFY_FIRST 0100
+#define MACH_NOTIFY_PORT_DELETED (MACH_NOTIFY_FIRST + 001 )
+ /* A send or send-once right was deleted. */
+#define MACH_NOTIFY_MSG_ACCEPTED (MACH_NOTIFY_FIRST + 002)
+ /* A MACH_SEND_NOTIFY msg was accepted */
+#define MACH_NOTIFY_PORT_DESTROYED (MACH_NOTIFY_FIRST + 005)
+ /* A receive right was (would have been) deallocated */
+#define MACH_NOTIFY_NO_SENDERS (MACH_NOTIFY_FIRST + 006)
+ /* Receive right has no extant send rights */
+#define MACH_NOTIFY_SEND_ONCE (MACH_NOTIFY_FIRST + 007)
+ /* An extant send-once right died */
+#define MACH_NOTIFY_DEAD_NAME (MACH_NOTIFY_FIRST + 010)
+ /* Send or send-once right died, leaving a dead-name */
+#define MACH_NOTIFY_LAST (MACH_NOTIFY_FIRST + 015)
+
+typedef struct {
+ mach_msg_header_t not_header;
+ mach_msg_type_t not_type; /* MACH_MSG_TYPE_PORT_NAME */
+ mach_port_t not_port;
+} mach_port_deleted_notification_t;
+
+typedef struct {
+ mach_msg_header_t not_header;
+ mach_msg_type_t not_type; /* MACH_MSG_TYPE_PORT_NAME */
+ mach_port_t not_port;
+} mach_msg_accepted_notification_t;
+
+typedef struct {
+ mach_msg_header_t not_header;
+ mach_msg_type_t not_type; /* MACH_MSG_TYPE_PORT_RECEIVE */
+ mach_port_t not_port;
+} mach_port_destroyed_notification_t;
+
+typedef struct {
+ mach_msg_header_t not_header;
+ mach_msg_type_t not_type; /* MACH_MSG_TYPE_INTEGER_32 */
+ unsigned int not_count;
+} mach_no_senders_notification_t;
+
+typedef struct {
+ mach_msg_header_t not_header;
+} mach_send_once_notification_t;
+
+typedef struct {
+ mach_msg_header_t not_header;
+ mach_msg_type_t not_type; /* MACH_MSG_TYPE_PORT_NAME */
+ mach_port_t not_port;
+} mach_dead_name_notification_t;
+
+
+/* Definitions for the old IPC interface. */
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Notifications sent upon interesting system events.
+ */
+
+#define NOTIFY_FIRST 0100
+#define NOTIFY_PORT_DELETED ( NOTIFY_FIRST + 001 )
+#define NOTIFY_MSG_ACCEPTED ( NOTIFY_FIRST + 002 )
+#define NOTIFY_OWNERSHIP_RIGHTS ( NOTIFY_FIRST + 003 )
+#define NOTIFY_RECEIVE_RIGHTS ( NOTIFY_FIRST + 004 )
+#define NOTIFY_PORT_DESTROYED ( NOTIFY_FIRST + 005 )
+#define NOTIFY_NO_MORE_SENDERS ( NOTIFY_FIRST + 006 )
+#define NOTIFY_LAST ( NOTIFY_FIRST + 015 )
+
+typedef struct {
+ msg_header_t notify_header;
+ msg_type_t notify_type;
+ port_t notify_port;
+} notification_t;
+
+#endif /* MACH_IPC_COMPAT */
+
+#endif /* _MACH_NOTIFY_H_ */
diff --git a/include/mach/pc_sample.h b/include/mach/pc_sample.h
new file mode 100644
index 00000000..662addb9
--- /dev/null
+++ b/include/mach/pc_sample.h
@@ -0,0 +1,66 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_PC_SAMPLE_H_
+#define _MACH_PC_SAMPLE_H_
+
+#include <mach/machine/vm_types.h>
+
+typedef natural_t sampled_pc_flavor_t;
+
+
+#define SAMPLED_PC_PERIODIC 0x1 /* default */
+
+
+#define SAMPLED_PC_VM_ZFILL_FAULTS 0x10
+#define SAMPLED_PC_VM_REACTIVATION_FAULTS 0x20
+#define SAMPLED_PC_VM_PAGEIN_FAULTS 0x40
+#define SAMPLED_PC_VM_COW_FAULTS 0x80
+#define SAMPLED_PC_VM_FAULTS_ANY 0x100
+#define SAMPLED_PC_VM_FAULTS \
+ (SAMPLED_PC_VM_ZFILL_FAULTS | \
+ SAMPLED_PC_VM_REACTIVATION_FAULTS |\
+ SAMPLED_PC_VM_PAGEIN_FAULTS |\
+ SAMPLED_PC_VM_COW_FAULTS )
+
+
+
+
+/*
+ * Definitions for the PC sampling interface.
+ */
+
+typedef struct sampled_pc {
+ natural_t id;
+ vm_offset_t pc;
+ sampled_pc_flavor_t sampletype;
+} sampled_pc_t;
+
+typedef sampled_pc_t *sampled_pc_array_t;
+typedef unsigned int sampled_pc_seqno_t;
+
+
+#endif /* _MACH_PC_SAMPLE_H_ */
diff --git a/include/mach/policy.h b/include/mach/policy.h
new file mode 100644
index 00000000..da776c98
--- /dev/null
+++ b/include/mach/policy.h
@@ -0,0 +1,45 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_POLICY_H_
+#define _MACH_POLICY_H_
+
+/*
+ * mach/policy.h
+ *
+ * Definitions for scheduing policy.
+ */
+
+/*
+ * Policy definitions. Policies must be powers of 2.
+ */
+#define POLICY_TIMESHARE 1
+#define POLICY_FIXEDPRI 2
+#define POLICY_LAST 2
+
+#define invalid_policy(policy) (((policy) <= 0) || ((policy) > POLICY_LAST))
+
+#endif /* _MACH_POLICY_H_ */
diff --git a/include/mach/port.h b/include/mach/port.h
new file mode 100644
index 00000000..6b9de370
--- /dev/null
+++ b/include/mach/port.h
@@ -0,0 +1,189 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/port.h
+ *
+ * Definition of a port
+ *
+ * [The basic mach_port_t type should probably be machine-dependent,
+ * as it must be represented by a 32-bit integer.]
+ */
+
+#ifndef _MACH_PORT_H_
+#define _MACH_PORT_H_
+
+#ifdef MACH_KERNEL
+#include <mach_ipc_compat.h>
+#endif /* MACH_KERNEL */
+
+#include <mach/boolean.h>
+#include <mach/machine/vm_types.h>
+
+
+typedef natural_t mach_port_t;
+typedef mach_port_t *mach_port_array_t;
+typedef int *rpc_signature_info_t;
+
+/*
+ * MACH_PORT_NULL is a legal value that can be carried in messages.
+ * It indicates the absence of any port or port rights. (A port
+ * argument keeps the message from being "simple", even if the
+ * value is MACH_PORT_NULL.) The value MACH_PORT_DEAD is also
+ * a legal value that can be carried in messages. It indicates
+ * that a port right was present, but it died.
+ */
+
+#define MACH_PORT_NULL ((mach_port_t) 0)
+#define MACH_PORT_DEAD ((mach_port_t) ~0)
+
+#define MACH_PORT_VALID(name) \
+ (((name) != MACH_PORT_NULL) && ((name) != MACH_PORT_DEAD))
+
+/*
+ * These are the different rights a task may have.
+ * The MACH_PORT_RIGHT_* definitions are used as arguments
+ * to mach_port_allocate, mach_port_get_refs, etc, to specify
+ * a particular right to act upon. The mach_port_names and
+ * mach_port_type calls return bitmasks using the MACH_PORT_TYPE_*
+ * definitions. This is because a single name may denote
+ * multiple rights.
+ */
+
+typedef natural_t mach_port_right_t;
+
+#define MACH_PORT_RIGHT_SEND ((mach_port_right_t) 0)
+#define MACH_PORT_RIGHT_RECEIVE ((mach_port_right_t) 1)
+#define MACH_PORT_RIGHT_SEND_ONCE ((mach_port_right_t) 2)
+#define MACH_PORT_RIGHT_PORT_SET ((mach_port_right_t) 3)
+#define MACH_PORT_RIGHT_DEAD_NAME ((mach_port_right_t) 4)
+#define MACH_PORT_RIGHT_NUMBER ((mach_port_right_t) 5)
+
+typedef natural_t mach_port_type_t;
+typedef mach_port_type_t *mach_port_type_array_t;
+
+#define MACH_PORT_TYPE(right) ((mach_port_type_t)(1 << ((right)+16)))
+#define MACH_PORT_TYPE_NONE ((mach_port_type_t) 0)
+#define MACH_PORT_TYPE_SEND MACH_PORT_TYPE(MACH_PORT_RIGHT_SEND)
+#define MACH_PORT_TYPE_RECEIVE MACH_PORT_TYPE(MACH_PORT_RIGHT_RECEIVE)
+#define MACH_PORT_TYPE_SEND_ONCE MACH_PORT_TYPE(MACH_PORT_RIGHT_SEND_ONCE)
+#define MACH_PORT_TYPE_PORT_SET MACH_PORT_TYPE(MACH_PORT_RIGHT_PORT_SET)
+#define MACH_PORT_TYPE_DEAD_NAME MACH_PORT_TYPE(MACH_PORT_RIGHT_DEAD_NAME)
+
+/* Convenient combinations. */
+
+#define MACH_PORT_TYPE_SEND_RECEIVE \
+ (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_RECEIVE)
+#define MACH_PORT_TYPE_SEND_RIGHTS \
+ (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_SEND_ONCE)
+#define MACH_PORT_TYPE_PORT_RIGHTS \
+ (MACH_PORT_TYPE_SEND_RIGHTS|MACH_PORT_TYPE_RECEIVE)
+#define MACH_PORT_TYPE_PORT_OR_DEAD \
+ (MACH_PORT_TYPE_PORT_RIGHTS|MACH_PORT_TYPE_DEAD_NAME)
+#define MACH_PORT_TYPE_ALL_RIGHTS \
+ (MACH_PORT_TYPE_PORT_OR_DEAD|MACH_PORT_TYPE_PORT_SET)
+
+/* Dummy type bits that mach_port_type/mach_port_names can return. */
+
+#define MACH_PORT_TYPE_DNREQUEST 0x80000000U
+#define MACH_PORT_TYPE_MAREQUEST 0x40000000
+#define MACH_PORT_TYPE_COMPAT 0x20000000
+
+/* User-references for capabilities. */
+
+typedef natural_t mach_port_urefs_t;
+typedef integer_t mach_port_delta_t; /* change in urefs */
+
+/* Attributes of ports. (See mach_port_get_receive_status.) */
+
+typedef natural_t mach_port_seqno_t; /* sequence number */
+typedef unsigned int mach_port_mscount_t; /* make-send count */
+typedef unsigned int mach_port_msgcount_t; /* number of msgs */
+typedef unsigned int mach_port_rights_t; /* number of rights */
+
+typedef struct mach_port_status {
+ mach_port_t mps_pset; /* containing port set */
+ mach_port_seqno_t mps_seqno; /* sequence number */
+/*mach_port_mscount_t*/natural_t mps_mscount; /* make-send count */
+/*mach_port_msgcount_t*/natural_t mps_qlimit; /* queue limit */
+/*mach_port_msgcount_t*/natural_t mps_msgcount; /* number in the queue */
+/*mach_port_rights_t*/natural_t mps_sorights; /* how many send-once rights */
+/*boolean_t*/natural_t mps_srights; /* do send rights exist? */
+/*boolean_t*/natural_t mps_pdrequest; /* port-deleted requested? */
+/*boolean_t*/natural_t mps_nsrequest; /* no-senders requested? */
+} mach_port_status_t;
+
+#define MACH_PORT_QLIMIT_DEFAULT ((mach_port_msgcount_t) 5)
+#define MACH_PORT_QLIMIT_MAX ((mach_port_msgcount_t) 16)
+
+/*
+ * Compatibility definitions, for code written
+ * before there was an mps_seqno field.
+ */
+
+typedef struct old_mach_port_status {
+ mach_port_t mps_pset; /* containing port set */
+/*mach_port_mscount_t*/natural_t mps_mscount; /* make-send count */
+/*mach_port_msgcount_t*/natural_t mps_qlimit; /* queue limit */
+/*mach_port_msgcount_t*/natural_t mps_msgcount; /* number in the queue */
+/*mach_port_rights_t*/natural_t mps_sorights; /* how many send-once rights */
+/*boolean_t*/natural_t mps_srights; /* do send rights exist? */
+/*boolean_t*/natural_t mps_pdrequest; /* port-deleted requested? */
+/*boolean_t*/natural_t mps_nsrequest; /* no-senders requested? */
+} old_mach_port_status_t;
+
+
+/* Definitions for the old IPC interface. */
+
+#if MACH_IPC_COMPAT
+
+typedef integer_t port_name_t; /* A capability's name */
+typedef port_name_t port_set_name_t; /* Descriptive alias */
+typedef port_name_t *port_name_array_t;
+
+typedef integer_t port_type_t; /* What kind of capability? */
+typedef port_type_t *port_type_array_t;
+
+ /* Values for port_type_t */
+
+#define PORT_TYPE_NONE 0 /* No rights */
+#define PORT_TYPE_SEND 1 /* Send rights */
+#define PORT_TYPE_RECEIVE 3 /* obsolete */
+#define PORT_TYPE_OWN 5 /* obsolete */
+#define PORT_TYPE_RECEIVE_OWN 7 /* Send, receive, ownership */
+#define PORT_TYPE_SET 9 /* Set ownership */
+#define PORT_TYPE_LAST 10 /* Last assigned */
+
+typedef port_name_t port_t; /* Port with send rights */
+typedef port_t port_rcv_t; /* Port with receive rights */
+typedef port_t port_own_t; /* Port with ownership rights */
+typedef port_t port_all_t; /* Port with receive and ownership */
+typedef port_t *port_array_t;
+
+#define PORT_NULL ((port_name_t) 0) /* Used to denote no port; legal value */
+
+#endif /* MACH_IPC_COMPAT */
+
+#endif /* _MACH_PORT_H_ */
diff --git a/include/mach/proc_ops.h b/include/mach/proc_ops.h
new file mode 100644
index 00000000..a453ec88
--- /dev/null
+++ b/include/mach/proc_ops.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Center for Software Science (CSS). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSS requests users of this software to return to css-dist@cs.utah.edu any
+ * improvements that they make and grant CSS redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSS
+ */
+/*
+ * Simple operations that don't exist as primitives in C,
+ * but which processors often implement directly.
+ * This file contains default, "dumb" implementations;
+ * machine-independent code can override these with smarter implementations.
+ */
+#ifndef _MACH_PROC_OPS_H_
+#define _MACH_PROC_OPS_H_
+
+#include <mach/machine/vm_types.h>
+#include <mach/inline.h>
+
+/* Returns the bit number of the most-significant set bit in `val',
+ e.g. 0 for 1, 1 for 2-3, 2 for 4-7, etc.
+ If `val' is 0 (i.e. no bits are set), the behavior is undefined. */
+MACH_INLINE int find_msb_set(natural_t val)
+{
+ int msb;
+ for (msb = sizeof(val)*8-1; (val & ((natural_t)1 << msb)) == 0; msb--);
+ return msb;
+}
+
+/* Returns the bit number of the least-significant set bit in `val'.
+ If `val' is 0 (i.e. no bits are set), the behavior is undefined. */
+MACH_INLINE int find_lsb_set(natural_t val)
+{
+ int lsb;
+ for (lsb = 0; (val & ((natural_t)1 << lsb)) == 0; lsb++);
+ return lsb;
+}
+
+#endif _MACH_PROC_OPS_H_
diff --git a/include/mach/processor_info.h b/include/mach/processor_info.h
new file mode 100644
index 00000000..5f761ea8
--- /dev/null
+++ b/include/mach/processor_info.h
@@ -0,0 +1,104 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/processor_info.h
+ * Author: David L. Black
+ * Date: 1988
+ *
+ * Data structure definitions for processor_info, processor_set_info
+ */
+
+#ifndef _MACH_PROCESSOR_INFO_H_
+#define _MACH_PROCESSOR_INFO_H_
+
+#include <mach/machine.h>
+
+/*
+ * Generic information structure to allow for expansion.
+ */
+typedef integer_t *processor_info_t; /* varying array of int. */
+
+#define PROCESSOR_INFO_MAX (1024) /* max array size */
+typedef integer_t processor_info_data_t[PROCESSOR_INFO_MAX];
+
+
+typedef integer_t *processor_set_info_t; /* varying array of int. */
+
+#define PROCESSOR_SET_INFO_MAX (1024) /* max array size */
+typedef integer_t processor_set_info_data_t[PROCESSOR_SET_INFO_MAX];
+
+/*
+ * Currently defined information.
+ */
+#define PROCESSOR_BASIC_INFO 1 /* basic information */
+
+struct processor_basic_info {
+ cpu_type_t cpu_type; /* type of cpu */
+ cpu_subtype_t cpu_subtype; /* subtype of cpu */
+/*boolean_t*/integer_t running; /* is processor running */
+ integer_t slot_num; /* slot number */
+/*boolean_t*/integer_t is_master; /* is this the master processor */
+};
+
+typedef struct processor_basic_info processor_basic_info_data_t;
+typedef struct processor_basic_info *processor_basic_info_t;
+#define PROCESSOR_BASIC_INFO_COUNT \
+ (sizeof(processor_basic_info_data_t)/sizeof(integer_t))
+
+
+#define PROCESSOR_SET_BASIC_INFO 1 /* basic information */
+
+struct processor_set_basic_info {
+ integer_t processor_count; /* How many processors */
+ integer_t task_count; /* How many tasks */
+ integer_t thread_count; /* How many threads */
+ integer_t load_average; /* Scaled */
+ integer_t mach_factor; /* Scaled */
+};
+
+/*
+ * Scaling factor for load_average, mach_factor.
+ */
+#define LOAD_SCALE 1000
+
+typedef struct processor_set_basic_info processor_set_basic_info_data_t;
+typedef struct processor_set_basic_info *processor_set_basic_info_t;
+#define PROCESSOR_SET_BASIC_INFO_COUNT \
+ (sizeof(processor_set_basic_info_data_t)/sizeof(integer_t))
+
+#define PROCESSOR_SET_SCHED_INFO 2 /* scheduling info */
+
+struct processor_set_sched_info {
+ integer_t policies; /* allowed policies */
+ integer_t max_priority; /* max priority for new threads */
+};
+
+typedef struct processor_set_sched_info processor_set_sched_info_data_t;
+typedef struct processor_set_sched_info *processor_set_sched_info_t;
+#define PROCESSOR_SET_SCHED_INFO_COUNT \
+ (sizeof(processor_set_sched_info_data_t)/sizeof(integer_t))
+
+#endif /* _MACH_PROCESSOR_INFO_H_ */
diff --git a/include/mach/profil.h b/include/mach/profil.h
new file mode 100644
index 00000000..0eb4ce47
--- /dev/null
+++ b/include/mach/profil.h
@@ -0,0 +1,212 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * Copyright 1991 by Open Software Foundation,
+ * Grenoble, FRANCE
+ *
+ * All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of OSF or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior
+ * permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+ * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef _MACH_PROFIL_H_
+#define _MACH_PROFIL_H_
+
+#include <mach/boolean.h>
+#include <ipc/ipc_object.h>
+#include <vm/vm_kern.h>
+
+
+#define NB_PROF_BUFFER 2 /* number of buffers servicing a
+ * profiled thread */
+#define SIZE_PROF_BUFFER 100 /* size of a profil buffer (in int)
+ * This values is also defined in
+ * the server (ugly), be careful ! */
+
+
+struct prof_data {
+ ipc_object_t prof_port; /* where to send a full buffer */
+
+ struct buffer {
+ int *p_zone; /* points to the actual storage area */
+ int p_index;/* next slot to be filled */
+ boolean_t p_full; /* is the current buffer full ? */
+ } prof_area[NB_PROF_BUFFER];
+
+ int prof_index; /* index of the buffer structure
+ * currently in use */
+
+};
+typedef struct prof_data *prof_data_t;
+#define NULLPBUF ((prof_data_t) 0)
+typedef struct buffer *buffer_t;
+
+/* Macros */
+
+#define set_pbuf_nb(pbuf, nb) \
+ (((nb) >= 0 && (nb) < NB_PROF_BUFFER) \
+ ? (pbuf)->prof_index = (nb), 1 \
+ : 0)
+
+
+#define get_pbuf_nb(pbuf) \
+ (pbuf)->prof_index
+
+
+extern vm_map_t kernel_map;
+
+#define dealloc_pbuf_area(pbuf) \
+ { \
+ register int i; \
+ \
+ for(i=0; i < NB_PROF_BUFFER ; i++) \
+ kmem_free(kernel_map, \
+ (vm_offset_t) (pbuf)->prof_area[i].p_zone, \
+ SIZE_PROF_BUFFER*sizeof(int)); \
+ kmem_free(kernel_map, \
+ (vm_offset_t)(pbuf), \
+ sizeof(struct prof_data)); \
+ }
+
+
+#define alloc_pbuf_area(pbuf, vmpbuf) \
+ (vmpbuf) = (vm_offset_t) 0; \
+ if (kmem_alloc(kernel_map, &(vmpbuf) , sizeof(struct prof_data)) == \
+ KERN_SUCCESS) { \
+ register int i; \
+ register boolean_t end; \
+ \
+ (pbuf) = (prof_data_t) (vmpbuf); \
+ for(i=0, end=FALSE; i < NB_PROF_BUFFER && end == FALSE; i++) { \
+ (vmpbuf) = (vm_offset_t) 0; \
+ if (kmem_alloc(kernel_map,&(vmpbuf),SIZE_PROF_BUFFER*sizeof(int)) == KERN_SUCCESS) { \
+ (pbuf)->prof_area[i].p_zone = (int *) (vmpbuf); \
+ (pbuf)->prof_area[i].p_full = FALSE; \
+ } \
+ else { \
+ (pbuf) = NULLPBUF; \
+ end = TRUE; \
+ } \
+ } \
+ } \
+ else \
+ (pbuf) = NULLPBUF;
+
+
+
+/* MACRO set_pbuf_value
+**
+** enters the value 'val' in the buffer 'pbuf' and returns the following
+** indications: 0: means that a fatal error occured: the buffer was full
+** (it hasn't been sent yet)
+** 1: means that a value has been inserted successfully
+** 2: means that we'v just entered the last value causing
+** the current buffer to be full.(must switch to
+** another buffer and signal the sender to send it)
+*/
+
+#define set_pbuf_value(pbuf, val) \
+ { \
+ register buffer_t a = &((pbuf)->prof_area[(pbuf)->prof_index]); \
+ register int i = a->p_index++; \
+ register boolean_t f = a->p_full; \
+ \
+ if (f == TRUE ) \
+ *(val) = 0; \
+ else { \
+ a->p_zone[i] = *(val); \
+ if (i == SIZE_PROF_BUFFER-1) { \
+ a->p_full = TRUE; \
+ *(val) = 2; \
+ } \
+ else \
+ *(val) = 1; \
+ } \
+ }
+
+
+#define reset_pbuf_area(pbuf) \
+ { \
+ register int *i = &((pbuf)->prof_index); \
+ \
+ *i = (*i == NB_PROF_BUFFER-1) ? 0 : ++(*i); \
+ (pbuf)->prof_area[*i].p_index = 0; \
+ }
+
+
+/**************************************************************/
+/* Structure, elements used for queuing operations on buffers */
+/**************************************************************/
+
+#define thread_t int *
+/*
+** This must be done in order to avoid a circular inclusion
+** with file kern/thread.h .
+** When using this data structure, one must cast the actual
+** type, this is (int *) or (thread_t)
+*/
+
+struct buf_to_send {
+ queue_chain_t list;
+ thread_t thread;
+ int number; /* the number of the buffer to be sent */
+ char wakeme; /* do wakeup when buffer has been sent */
+ } ;
+
+#undef thread_t
+
+
+
+typedef struct buf_to_send *buf_to_send_t;
+
+#define NULLBTS ((buf_to_send_t) 0)
+
+/*
+** Global variable: the head of the queue of buffers to send
+** It is a queue with locks (uses macros from queue.h) and it
+** is shared by hardclock() and the sender_thread()
+*/
+
+mpqueue_head_t prof_queue;
+
+#endif /* _MACH_PROF_H_ */
diff --git a/include/mach/profilparam.h b/include/mach/profilparam.h
new file mode 100644
index 00000000..20a8aaff
--- /dev/null
+++ b/include/mach/profilparam.h
@@ -0,0 +1,62 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * Copyright 1991 by Open Software Foundation,
+ * Grenoble, FRANCE
+ *
+ * All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of OSF or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior
+ * permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+ * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MACH_PROFILPARAM_H_
+#define _MACH_PROFILPARAM_H_
+
+/*
+ * These values are also used when compiling the server, be careful !
+ */
+
+#define NB_PROF_BUFFER 2 /* number of buffers servicing a
+ * profiled thread */
+#define SIZE_PROF_BUFFER 100 /* size of a profil buffer (in int) */
+
+#endif /* _MACH_PROFILPARAM_H_ */
diff --git a/include/mach/rpc.h b/include/mach/rpc.h
new file mode 100644
index 00000000..d3098f80
--- /dev/null
+++ b/include/mach/rpc.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ */
+
+#ifndef MACH_RPC_H
+#define MACH_RPC_H
+
+#include <mach/kern_return.h>
+#include <mach/message.h>
+#include <mach/machine/rpc.h>
+
+/*
+ * Description of a port passed up by the leaky-register RPC path
+ * when it needs to perform translation.
+ */
+struct rpc_port_desc {
+ mach_port_t name;
+ mach_msg_type_name_t msgt_name;
+};
+
+#endif /* MACH_RPC_H */
diff --git a/include/mach/sa/a.out.h b/include/mach/sa/a.out.h
new file mode 100644
index 00000000..8ab8ba87
--- /dev/null
+++ b/include/mach/sa/a.out.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_SA_A_OUT_H_
+#define _MACH_SA_A_OUT_H_
+
+#include <mach/exec/a.out.h>
+
+#endif _MACH_SA_A_OUT_H_
diff --git a/include/mach/sa/alloca.h b/include/mach/sa/alloca.h
new file mode 100644
index 00000000..0a476b4f
--- /dev/null
+++ b/include/mach/sa/alloca.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_ALLOCA_H_
+#define _MACH_ALLOCA_H_
+
+#define alloca(size) __builtin_alloca(size)
+
+#endif _MACH_ALLOCA_H_
diff --git a/include/mach/sa/assert.h b/include/mach/sa/assert.h
new file mode 100644
index 00000000..8c12f1cb
--- /dev/null
+++ b/include/mach/sa/assert.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _ASSERT_H_
+#define _ASSERT_H_
+
+#ifdef NDEBUG
+
+#define assert(ignore) ((void)0)
+
+#else
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+extern void panic(const char *format, ...);
+__END_DECLS
+
+#define assert(expression) \
+ ((void)((expression) ? 0 : (panic("%s:%u: failed assertion `%s'", \
+ __FILE__, __LINE__, #expression), 0)))
+
+#endif
+
+#endif /* _ASSERT_H_ */
diff --git a/include/mach/sa/ctype.h b/include/mach/sa/ctype.h
new file mode 100644
index 00000000..40b53662
--- /dev/null
+++ b/include/mach/sa/ctype.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_CTYPE_H_
+#define _MACH_CTYPE_H_
+
+#include <sys/cdefs.h>
+
+__INLINE_FUNC int isdigit(char c)
+{
+ return ((c) >= '0') && ((c) <= '9');
+}
+
+__INLINE_FUNC int isspace(char c)
+{
+ return ((c) == ' ') || ((c) == '\f')
+ || ((c) == '\n') || ((c) == '\r')
+ || ((c) == '\t') || ((c) == '\v');
+}
+
+__INLINE_FUNC int isalpha(char c)
+{
+ return (((c) >= 'a') && ((c) <= 'z'))
+ || (((c) >= 'A') && ((c) <= 'Z'));
+}
+
+__INLINE_FUNC int isalnum(char c)
+{
+ return isalpha(c) || isdigit(c);
+}
+
+__INLINE_FUNC int toupper(char c)
+{
+ return ((c >= 'a') && (c <= 'z')) ? (c - 'a' + 'A') : c;
+}
+
+__INLINE_FUNC int tolower(char c)
+{
+ return ((c >= 'A') && (c <= 'Z')) ? (c - 'A' + 'a') : c;
+}
+
+
+#endif _MACH_CTYPE_H_
diff --git a/include/mach/sa/errno.h b/include/mach/sa/errno.h
new file mode 100644
index 00000000..1e8be239
--- /dev/null
+++ b/include/mach/sa/errno.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * This header file defines a set of POSIX errno values
+ * that fits consistently into the Mach error code "space" -
+ * i.e. these error code values can be mixed with kern_return_t's
+ * and mach_msg_return_t's and such without conflict.
+ * Higher-level services are not required to use these values
+ * (or, for that matter, any of the mach/sa header files),
+ * but if they use other values of their own choosing,
+ * those values may conflict with values in the Mach error code space,
+ * making it necessary to keep the different types of error codes separate.
+ *
+ * (For example, Lites uses BSD's errno values,
+ * which conflict with Mach's kern_return_t values,
+ * and therefore must carefully distinguish between BSD and Mach error codes
+ * and never return one type when the other is expected, etc. -
+ * we've found this to be a frequent source of bugs.)
+ *
+ * One (probably the main) disadvantage of using these error codes
+ * is that, since they don't start from around 0 like typical Unix errno values,
+ * it's impossible to provide a conventional Unix-style sys_errlist table for them.
+ * However, they are compatible with the POSIX-blessed strerror and perror routines.
+ */
+#ifndef _MACH_SA_ERRNO_H_
+#define _MACH_SA_ERRNO_H_
+
+extern int errno; /* global error number */
+
+/* ISO/ANSI C-1990 errors */
+#define EDOM 0xc001 /* Numerical argument out of domain */
+#define ERANGE 0xc002 /* Result too large */
+
+/* POSIX-1990 errors */
+#define E2BIG 0xc003 /* Argument list too long */
+#define EACCES 0xc004 /* Permission denied */
+#define EAGAIN 0xc005 /* Resource temporarily unavailable */
+#define EBADF 0xc006 /* Bad file descriptor */
+#define EBUSY 0xc007 /* Device busy */
+#define ECHILD 0xc008 /* No child processes */
+#define EDEADLK 0xc009 /* Resource deadlock avoided */
+#define EEXIST 0xc00a /* File exists */
+#define EFAULT 0xc00b /* Bad address */
+#define EFBIG 0xc00c /* File too large */
+#define EINTR 0xc00d /* Interrupted system call */
+#define EINVAL 0xc00e /* Invalid argument */
+#define EIO 0xc00f /* Input/output error */
+#define EISDIR 0xc010 /* Is a directory */
+#define EMFILE 0xc011 /* Too many open files */
+#define EMLINK 0xc012 /* Too many links */
+#define ENAMETOOLONG 0xc013 /* File name too long */
+#define ENFILE 0xc014 /* Too many open files in system */
+#define ENODEV 0xc015 /* Operation not supported by device */
+#define ENOENT 0xc016 /* No such file or directory */
+#define ENOEXEC 0xc017 /* Exec format error */
+#define ENOLCK 0xc018 /* No locks available */
+#define ENOMEM 0xc019 /* Cannot allocate memory */
+#define ENOSPC 0xc01a /* No space left on device */
+#define ENOSYS 0xc01b /* Function not implemented */
+#define ENOTDIR 0xc01c /* Not a directory */
+#define ENOTEMPTY 0xc01d /* Directory not empty */
+#define ENOTTY 0xc01e /* Inappropriate ioctl for device */
+#define ENXIO 0xc01f /* Device not configured */
+#define EPERM 0xc020 /* Operation not permitted */
+#define EPIPE 0xc021 /* Broken pipe */
+#define EROFS 0xc022 /* Read-only file system */
+#define ESPIPE 0xc023 /* Illegal seek */
+#define ESRCH 0xc024 /* No such process */
+#define EXDEV 0xc025 /* Cross-device link */
+
+/* POSIX-1993 errors */
+#define EBADMSG 0xc026
+#define ECANCELED 0xc027
+#define EINPROGRESS 0xc028
+#define EMSGSIZE 0xc029
+#define ENOTSUP 0xc02a
+
+#endif _MACH_SA_ERRNO_H_
diff --git a/include/mach/sa/fcntl.h b/include/mach/sa/fcntl.h
new file mode 100644
index 00000000..ac86fe37
--- /dev/null
+++ b/include/mach/sa/fcntl.h
@@ -0,0 +1,22 @@
+#ifndef _MACH_SA_FCNTL_H_
+#define _MACH_SA_FCNTL_H_
+
+#include <sys/cdefs.h>
+
+#define O_ACCMODE 0x0003
+#define O_RDONLY 0x0000
+#define O_WRONLY 0x0001
+#define O_RDWR 0x0002
+
+#define O_CREAT 0x0010
+#define O_TRUNC 0x0020
+#define O_APPEND 0x0040
+#define O_EXCL 0x0080
+
+__BEGIN_DECLS
+
+int open(const char *__name, int __mode, ...);
+
+__END_DECLS
+
+#endif /* _MACH_SA_FCNTL_H_ */
diff --git a/include/mach/sa/limits.h b/include/mach/sa/limits.h
new file mode 100644
index 00000000..f8dd03af
--- /dev/null
+++ b/include/mach/sa/limits.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_LIMITS_H_
+#define _MACH_LIMITS_H_
+
+/* This file is valid for typical 32-bit machines;
+ it should be overridden on 64-bit machines. */
+
+#define INT_MIN ((signed int)0x80000000)
+#define INT_MAX ((signed int)0x7fffffff)
+
+#define UINT_MIN ((unsigned int)0x00000000)
+#define UINT_MAX ((unsigned int)0xffffffff)
+
+#endif _MACH_LIMITS_H_
diff --git a/include/mach/sa/malloc.h b/include/mach/sa/malloc.h
new file mode 100644
index 00000000..36690468
--- /dev/null
+++ b/include/mach/sa/malloc.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_SA_MALLOC_H_
+#define _MACH_SA_MALLOC_H_
+
+#include <mach/machine/vm_types.h>
+#include <sys/cdefs.h>
+
+#ifndef _SIZE_T
+#define _SIZE_T
+typedef natural_t size_t;
+#endif
+
+/* The malloc package in the base C library
+ is implemented on top of the List Memory Manager,
+ and the underlying memory pool can be manipulated
+ directly with the LMM primitives using this lmm structure. */
+extern struct lmm malloc_lmm;
+
+__BEGIN_DECLS
+
+void *malloc(size_t size);
+void *calloc(size_t nelt, size_t eltsize);
+void *realloc(void *buf, size_t new_size);
+void free(void *buf);
+
+/* malloc() and realloc() call this routine when they're about to fail;
+ it should try to scare up more memory and add it to the malloc_lmm.
+ Returns nonzero if it succeeds in finding more memory. */
+int morecore(size_t size);
+
+__END_DECLS
+
+#endif _MACH_SA_MALLOC_H_
diff --git a/include/mach/sa/memory.h b/include/mach/sa/memory.h
new file mode 100644
index 00000000..e2060eaf
--- /dev/null
+++ b/include/mach/sa/memory.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_MEMORY_H_
+#define _MACH_MEMORY_H_
+
+#include <string.h>
+
+#endif _MACH_MEMORY_H_
diff --git a/include/mach/sa/stddef.h b/include/mach/sa/stddef.h
new file mode 100644
index 00000000..9da5de0f
--- /dev/null
+++ b/include/mach/sa/stddef.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_STDDEF_H_
+#define _MACH_STDDEF_H_
+
+
+#endif _MACH_STDDEF_H_
diff --git a/include/mach/sa/stdio.h b/include/mach/sa/stdio.h
new file mode 100644
index 00000000..d8f7201b
--- /dev/null
+++ b/include/mach/sa/stdio.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_SA_STDIO_H
+#define _MACH_SA_STDIO_H
+
+#include <sys/cdefs.h>
+
+/* This is a very naive standard I/O implementation
+ which simply chains to the low-level I/O routines
+ without doing any buffering or anything. */
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+typedef struct
+{
+ int fd;
+} FILE;
+
+#define SEEK_SET 0
+#define SEEK_CUR 1
+#define SEEK_END 2
+
+#ifndef EOF
+#define EOF -1
+#endif
+
+__BEGIN_DECLS
+
+int putchar(int c);
+int puts(const char *str);
+int printf(const char *format, ...);
+int sprintf(char *dest, const char *format, ...);
+FILE *fopen(const char *path, const char *mode);
+int fclose(FILE *stream);
+int fread(void *buf, int size, int count, FILE *stream);
+int fwrite(void *buf, int size, int count, FILE *stream);
+int fputc(int c, FILE *stream);
+int fgetc(FILE *stream);
+int fprintf(FILE *stream, const char *format, ...);
+int fscanf(FILE *stream, const char *format, ...);
+int feof(FILE *stream);
+long ftell(FILE *stream);
+void rewind(FILE *stream);
+int rename(const char *from, const char *to);
+
+#define putc(c, stream) fputc(c, stream)
+
+__END_DECLS
+
+#endif _MACH_SA_STDIO_H
diff --git a/include/mach/sa/stdlib.h b/include/mach/sa/stdlib.h
new file mode 100644
index 00000000..29d3eafb
--- /dev/null
+++ b/include/mach/sa/stdlib.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_SA_STDLIB_H_
+#define _MACH_SA_STDLIB_H_
+
+#include <mach/machine/vm_types.h>
+#include <sys/cdefs.h>
+
+#ifndef _SIZE_T
+#define _SIZE_T
+typedef natural_t size_t;
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+__BEGIN_DECLS
+
+int rand(void);
+
+long atol(const char *str);
+#define atoi(str) ((int)atol(str))
+
+#define abs(n) __builtin_abs(n)
+
+void exit(int status);
+
+void srand(unsigned seed);
+int rand(void);
+
+void *malloc(size_t size);
+void *calloc(size_t nelt, size_t eltsize);
+void *realloc(void *buf, size_t new_size);
+void free(void *buf);
+
+__END_DECLS
+
+#endif _MACH_SA_STDLIB_H_
diff --git a/include/mach/sa/string.h b/include/mach/sa/string.h
new file mode 100644
index 00000000..45fc137b
--- /dev/null
+++ b/include/mach/sa/string.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_STRING_H_
+#define _MACH_STRING_H_
+
+#include <sys/cdefs.h>
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+__BEGIN_DECLS
+
+__DECL(char *,strdup(const char *s));
+__DECL(char *,strcat(char *dest, const char *src));
+__DECL(int,strcmp(const char *a, const char *b));
+__DECL(int,strncpy(char *dest, const char *src, int n));
+__DECL(int,strncmp(const char *a, const char *b, int n));
+
+__DECL(char *,strchr(const char *s, int c));
+__DECL(char *,strrchr(const char *s, int c));
+__DECL(char *,index(const char *s, int c));
+__DECL(char *,rindex(const char *s, int c));
+__DECL(void *,strstr(const char *haystack, const char *needle));
+
+#ifndef __GNUC__
+__DECL(void *,memcpy(void *to, const void *from, unsigned int n));
+#endif
+__DECL(void *,memset(void *to, int ch, unsigned int n));
+
+__DECL(void,bcopy(const void *from, void *to, unsigned int n));
+__DECL(void,bzero(void *to, unsigned int n));
+
+__END_DECLS
+
+#endif _MACH_STRING_H_
diff --git a/include/mach/sa/strings.h b/include/mach/sa/strings.h
new file mode 100644
index 00000000..67b502e9
--- /dev/null
+++ b/include/mach/sa/strings.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_STRINGS_H_
+#define _MACH_STRINGS_H_
+
+#include <string.h>
+
+#endif _MACH_STRINGS_H_
diff --git a/include/mach/sa/sys/cdefs.h b/include/mach/sa/sys/cdefs.h
new file mode 100644
index 00000000..1e804ad6
--- /dev/null
+++ b/include/mach/sa/sys/cdefs.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * Common private header file used by the mach/sa headers.
+ * This header file does not cause any non-POSIX-reserved symbols to be defined.
+ */
+#ifndef _MACH_SA_SYS_CDEFS_H_
+#define _MACH_SA_SYS_CDEFS_H_
+
+#ifdef __cplusplus
+#define __BEGIN_DECLS extern "C" {
+#define __END_DECLS }
+#else
+#define __BEGIN_DECLS
+#define __END_DECLS
+#endif
+
+#ifndef __DECL
+#define __DECL(rettype, decl) \
+ extern rettype __##decl; \
+ extern rettype decl;
+#endif
+
+#ifndef __INLINE_FUNC
+#define __INLINE_FUNC static __inline
+#endif
+
+#endif /* _MACH_SA_SYS_CDEFS_H_ */
diff --git a/include/mach/sa/sys/ioctl.h b/include/mach/sa/sys/ioctl.h
new file mode 100644
index 00000000..732494dc
--- /dev/null
+++ b/include/mach/sa/sys/ioctl.h
@@ -0,0 +1,52 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon rights
+ * to redistribute these changes.
+ */
+/*
+ * Format definitions for 'ioctl' commands in device definitions.
+ *
+ * From BSD4.4.
+ */
+
+#ifndef _MACH_SYS_IOCTL_H_
+#define _MACH_SYS_IOCTL_H_
+/*
+ * Ioctl's have the command encoded in the lower word, and the size of
+ * any in or out parameters in the upper word. The high 3 bits of the
+ * upper word are used to encode the in/out status of the parameter.
+ */
+#define IOCPARM_MASK 0x1fff /* parameter length, at most 13 bits */
+#define IOC_VOID 0x20000000 /* no parameters */
+#define IOC_OUT 0x40000000 /* copy out parameters */
+#define IOC_IN 0x80000000U /* copy in parameters */
+#define IOC_INOUT (IOC_IN|IOC_OUT)
+
+#define _IOC(inout,group,num,len) \
+ (inout | ((len & IOCPARM_MASK) << 16) | ((group) << 8) | (num))
+#define _IO(g,n) _IOC(IOC_VOID, (g), (n), 0)
+#define _IOR(g,n,t) _IOC(IOC_OUT, (g), (n), sizeof(t))
+#define _IOW(g,n,t) _IOC(IOC_IN, (g), (n), sizeof(t))
+#define _IOWR(g,n,t) _IOC(IOC_INOUT, (g), (n), sizeof(t))
+
+#endif _MACH_SYS_IOCTL_H_
diff --git a/include/mach/sa/sys/mman.h b/include/mach/sa/sys/mman.h
new file mode 100644
index 00000000..3400d302
--- /dev/null
+++ b/include/mach/sa/sys/mman.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_SA_SYS_MMAN_H_
+#define _MACH_SA_SYS_MMAN_H_
+
+/*
+ * Protections are chosen from these bits, or-ed together.
+ * NB: These are the same values as the VM_PROT_xxx definitions,
+ * and they can be used interchangeably.
+ */
+#define PROT_READ 0x01 /* pages can be read */
+#define PROT_WRITE 0x02 /* pages can be written */
+#define PROT_EXEC 0x04 /* pages can be executed */
+
+/*
+ * Flags for the mlockall() call.
+ */
+#define MCL_CURRENT 0x0001 /* lock all currently mapped memory */
+#define MCL_FUTURE 0x0002 /* lock all memory mapped in the future */
+
+#endif _MACH_SA_SYS_MMAN_H_
diff --git a/include/mach/sa/sys/reboot.h b/include/mach/sa/sys/reboot.h
new file mode 100644
index 00000000..d74ed98d
--- /dev/null
+++ b/include/mach/sa/sys/reboot.h
@@ -0,0 +1,123 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Copyright (c) 1982, 1986, 1988 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the above copyright notice and this paragraph are
+ * duplicated in all such forms and that any documentation,
+ * advertising materials, and other materials related to such
+ * distribution and use acknowledge that the software was developed
+ * by the University of California, Berkeley. The name of the
+ * University may not be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * @(#)reboot.h 7.5 (Berkeley) 6/27/88
+ */
+/*
+ * Warning: The contents of this file are deprecated;
+ * it should only ever be used for BSD and Mach 3 compatibility.
+ * As the above copyright notice suggests, this file originated in BSD;
+ * it is mostly the same, except the flags after RB_DFLTROOT
+ * have diverged from BSD.
+ */
+#ifndef _MACH_SYS_REBOOT_H_
+#define _MACH_SYS_REBOOT_H_
+
+/*
+ * Arguments to reboot system call.
+ * These are converted to switches, and passed to startup program,
+ * and on to init.
+ */
+#define RB_AUTOBOOT 0 /* flags for system auto-booting itself */
+
+#define RB_ASKNAME 0x01 /* -a: ask for file name to reboot from */
+#define RB_SINGLE 0x02 /* -s: reboot to single user only */
+#define RB_KDB 0x04 /* -d: kernel debugger symbols loaded */
+#define RB_HALT 0x08 /* -h: enter KDB at bootup */
+ /* for host_reboot(): don't reboot,
+ just halt */
+#define RB_INITNAME 0x10 /* -i: name given for /etc/init (unused) */
+#define RB_DFLTROOT 0x20 /* use compiled-in rootdev */
+#define RB_NOBOOTRC 0x20 /* -b: don't run /etc/rc.boot */
+#define RB_ALTBOOT 0x40 /* use /boot.old vs /boot */
+#define RB_UNIPROC 0x80 /* -u: start only one processor */
+
+#define RB_SHIFT 8 /* second byte is for ux */
+
+#define RB_DEBUGGER 0x1000 /* for host_reboot(): enter kernel
+ debugger from user level */
+
+/* Corresponding BSD definitions, where they disagree with the Mach flags. */
+#define BSD_RB_NOSYNC 0x04 /* dont sync before reboot */
+#define BSD_RB_KDB 0x40 /* give control to kernel debugger */
+#define BSD_RB_RDONLY 0x80 /* mount root fs read-only */
+#define BSD_RB_DUMP 0x100 /* dump kernel memory before reboot */
+#define BSD_RB_MINIROOT 0x200 /* mini-root present in memory at boot time */
+#define BSD_RB_CONFIG 0x400 /* invoke user configuration routing */
+
+
+/*
+ * Constants for converting boot-style device number to type,
+ * adaptor (uba, mba, etc), unit number and partition number.
+ * Type (== major device number) is in the low byte
+ * for backward compatibility. Except for that of the "magic
+ * number", each mask applies to the shifted value.
+ * Format:
+ * (4) (4) (4) (4) (8) (8)
+ * --------------------------------
+ * |MA | AD| CT| UN| PART | TYPE |
+ * --------------------------------
+ */
+#define B_ADAPTORSHIFT 24
+#define B_ADAPTORMASK 0x0f
+#define B_ADAPTOR(val) (((val) >> B_ADAPTORSHIFT) & B_ADAPTORMASK)
+#define B_CONTROLLERSHIFT 20
+#define B_CONTROLLERMASK 0xf
+#define B_CONTROLLER(val) (((val)>>B_CONTROLLERSHIFT) & B_CONTROLLERMASK)
+#define B_UNITSHIFT 16
+#define B_UNITMASK 0xf
+#define B_UNIT(val) (((val) >> B_UNITSHIFT) & B_UNITMASK)
+#define B_PARTITIONSHIFT 8
+#define B_PARTITIONMASK 0xff
+#define B_PARTITION(val) (((val) >> B_PARTITIONSHIFT) & B_PARTITIONMASK)
+#define B_TYPESHIFT 0
+#define B_TYPEMASK 0xff
+#define B_TYPE(val) (((val) >> B_TYPESHIFT) & B_TYPEMASK)
+
+#define B_MAGICMASK ((u_int)0xf0000000U)
+#define B_DEVMAGIC ((u_int)0xa0000000U)
+
+#define MAKEBOOTDEV(type, adaptor, controller, unit, partition) \
+ (((type) << B_TYPESHIFT) | ((adaptor) << B_ADAPTORSHIFT) | \
+ ((controller) << B_CONTROLLERSHIFT) | ((unit) << B_UNITSHIFT) | \
+ ((partition) << B_PARTITIONSHIFT) | B_DEVMAGIC)
+
+#endif /* _MACH_SYS_REBOOT_H_ */
diff --git a/include/mach/sa/sys/signal.h b/include/mach/sa/sys/signal.h
new file mode 100644
index 00000000..c3c12069
--- /dev/null
+++ b/include/mach/sa/sys/signal.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 1982, 1986, 1989, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)signal.h 8.2 (Berkeley) 1/21/94
+ * signal.h,v 1.2 1994/08/02 07:53:32 davidg Exp
+ */
+
+#ifndef _MACH_SA_SYS_SIGNAL_H_
+#define _MACH_SA_SYS_SIGNAL_H_
+
+#define NSIG 32 /* counting 0; could be 33 (mask is 1-32) */
+
+#define SIGHUP 1 /* hangup */
+#define SIGINT 2 /* interrupt */
+#define SIGQUIT 3 /* quit */
+#define SIGILL 4 /* illegal instruction (not reset when caught) */
+#ifndef _POSIX_SOURCE
+#define SIGTRAP 5 /* trace trap (not reset when caught) */
+#endif
+#define SIGABRT 6 /* abort() */
+#ifndef _POSIX_SOURCE
+#define SIGIOT SIGABRT /* compatibility */
+#define SIGEMT 7 /* EMT instruction */
+#endif
+#define SIGFPE 8 /* floating point exception */
+#define SIGKILL 9 /* kill (cannot be caught or ignored) */
+#ifndef _POSIX_SOURCE
+#define SIGBUS 10 /* bus error */
+#endif
+#define SIGSEGV 11 /* segmentation violation */
+#ifndef _POSIX_SOURCE
+#define SIGSYS 12 /* bad argument to system call */
+#endif
+#define SIGPIPE 13 /* write on a pipe with no one to read it */
+#define SIGALRM 14 /* alarm clock */
+#define SIGTERM 15 /* software termination signal from kill */
+#ifndef _POSIX_SOURCE
+#define SIGURG 16 /* urgent condition on IO channel */
+#endif
+#define SIGSTOP 17 /* sendable stop signal not from tty */
+#define SIGTSTP 18 /* stop signal from tty */
+#define SIGCONT 19 /* continue a stopped process */
+#define SIGCHLD 20 /* to parent on child stop or exit */
+#define SIGTTIN 21 /* to readers pgrp upon background tty read */
+#define SIGTTOU 22 /* like TTIN for output if (tp->t_local&LTOSTOP) */
+#ifndef _POSIX_SOURCE
+#define SIGIO 23 /* input/output possible signal */
+#define SIGXCPU 24 /* exceeded CPU time limit */
+#define SIGXFSZ 25 /* exceeded file size limit */
+#define SIGVTALRM 26 /* virtual time alarm */
+#define SIGPROF 27 /* profiling time alarm */
+#define SIGWINCH 28 /* window size changes */
+#define SIGINFO 29 /* information request */
+#endif
+#define SIGUSR1 30 /* user defined signal 1 */
+#define SIGUSR2 31 /* user defined signal 2 */
+
+#if defined(_ANSI_SOURCE) || defined(__cplusplus)
+/*
+ * Language spec sez we must list exactly one parameter, even though we
+ * actually supply three. Ugh!
+ */
+#define SIG_DFL (void (*)(int))0
+#define SIG_IGN (void (*)(int))1
+#define SIG_ERR (void (*)(int))-1
+#else
+#define SIG_DFL (void (*)())0
+#define SIG_IGN (void (*)())1
+#define SIG_ERR (void (*)())-1
+#endif
+
+#ifndef _ANSI_SOURCE
+
+typedef unsigned int sigset_t;
+
+/*
+ * POSIX 1003.1b: Generic value to pass back to an application.
+ */
+union sigval
+{
+ int sival_int;
+ void *sival_ptr;
+};
+
+/*
+ * This structure is passed to signal handlers
+ * that use the new SA_SIGINFO calling convention (see below).
+ */
+typedef struct
+{
+ int si_signo;
+ int si_code;
+ union sigval si_value;
+} siginfo_t;
+
+/* Values for si_code, indicating the source of the signal */
+#define SI_USER 0 /* sent by kill(), raise(), or abort() */
+#define SI_QUEUE 1 /* sent by sigqueue() */
+#define SI_TIMER 2 /* generated by an expired timer */
+#define SI_ASYNCIO 3 /* generated by completion of an async i/o */
+#define SI_MESGQ 4 /* generated by the arrival of a message */
+#define SI_IRQ 5 /* hardware int dispatched to application */
+
+/*
+ * Signal vector "template" used in sigaction call.
+ */
+struct sigaction {
+ union { /* signal handler */
+ void (*sa_u_handler)();
+ void (*sa_u_sigaction)(int, siginfo_t *, void *);
+ } sa_u;
+ sigset_t sa_mask; /* signal mask to apply */
+ int sa_flags; /* see signal options below */
+};
+#define sa_handler sa_u.sa_u_handler
+#define sa_sigaction sa_u.sa_u_sigaction
+
+#ifndef _POSIX_SOURCE
+#define SA_ONSTACK 0x0001 /* take signal on signal stack */
+#define SA_RESTART 0x0002 /* restart system on signal return */
+#define SA_DISABLE 0x0004 /* disable taking signals on alternate stack */
+#ifdef COMPAT_SUNOS
+#define SA_USERTRAMP 0x0100 /* do not bounce off kernel's sigtramp */
+#endif
+#endif
+#define SA_NOCLDSTOP 0x0008 /* do not generate SIGCHLD on child stop */
+#define SA_SIGINFO 0x0010 /* use sa_sigaction calling convention */
+
+/*
+ * Flags for sigprocmask:
+ */
+#define SIG_BLOCK 1 /* block specified signal set */
+#define SIG_UNBLOCK 2 /* unblock specified signal set */
+#define SIG_SETMASK 3 /* set specified signal set */
+
+/*
+ * POSIX 1003.1b:
+ * Used when requesting queued notification of an event,
+ * such as a timer expiration or a message arrival.
+ */
+struct sigevent
+{
+ int sigev_notify;
+ union
+ {
+ struct
+ {
+ int __signo;
+ union sigval __value;
+ } __sig;
+ struct
+ {
+ void (*__handler)(void);
+ void *__stack;
+ } __fastint;
+ } __u;
+};
+
+#define sigev_signo __u.__sig.__signo
+#define sigev_value __u.__sig.__value
+
+#define sigev_handler __u.__fastint.__handler
+#define sigev_stack __u.__fastint.__stack
+
+/* Values for sigev_notify */
+#define SIGEV_NONE 0
+#define SIGEV_SIGNAL 1
+#define SIGEV_FASTINT 2
+
+#endif /* !_ANSI_SOURCE */
+
+#endif /* !_MACH_SA_SYS_SIGNAL_H_ */
diff --git a/include/mach/sa/sys/stat.h b/include/mach/sa/sys/stat.h
new file mode 100644
index 00000000..81ca25d1
--- /dev/null
+++ b/include/mach/sa/sys/stat.h
@@ -0,0 +1,126 @@
+/*-
+ * Copyright (c) 1982, 1986, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)stat.h 8.6 (Berkeley) 3/8/94
+ * stat.h,v 1.5 1994/10/02 17:24:57 phk Exp
+ */
+
+#ifndef _MACH_SA_SYS_STAT_H_
+#define _MACH_SA_SYS_STAT_H_
+
+#include <sys/types.h>
+
+/*
+ * XXX we need this for struct timespec. We get miscellaneous namespace
+ * pollution with it. struct timespace itself is namespace pollution if
+ * _POSIX_SOURCE is defined.
+ */
+#include <sys/time.h>
+
+struct stat {
+ dev_t st_dev; /* inode's device */
+ ino_t st_ino; /* inode's number */
+ mode_t st_mode; /* inode protection mode */
+ nlink_t st_nlink; /* number of hard links */
+ uid_t st_uid; /* user ID of the file's owner */
+ gid_t st_gid; /* group ID of the file's group */
+ dev_t st_rdev; /* device type */
+ time_t st_atime; /* time of last access */
+ time_t st_mtime; /* time of last data modification */
+ time_t st_ctime; /* time of last file status change */
+ off_t st_size; /* file size, in bytes */
+ unsigned long st_blocks; /* blocks allocated for file */
+ unsigned long st_blksize; /* optimal blocksize for I/O */
+};
+
+#define S_ISUID 0004000 /* set user id on execution */
+#define S_ISGID 0002000 /* set group id on execution */
+#ifndef _POSIX_SOURCE
+#define S_ISTXT 0001000 /* sticky bit */
+#endif
+
+#define S_IRWXU 0000700 /* RWX mask for owner */
+#define S_IRUSR 0000400 /* R for owner */
+#define S_IWUSR 0000200 /* W for owner */
+#define S_IXUSR 0000100 /* X for owner */
+
+#define S_IRWXG 0000070 /* RWX mask for group */
+#define S_IRGRP 0000040 /* R for group */
+#define S_IWGRP 0000020 /* W for group */
+#define S_IXGRP 0000010 /* X for group */
+
+#define S_IRWXO 0000007 /* RWX mask for other */
+#define S_IROTH 0000004 /* R for other */
+#define S_IWOTH 0000002 /* W for other */
+#define S_IXOTH 0000001 /* X for other */
+
+#ifndef _POSIX_SOURCE
+#define S_IFMT 0170000 /* type of file mask */
+#define S_IFIFO 0010000 /* named pipe (fifo) */
+#define S_IFCHR 0020000 /* character special */
+#define S_IFDIR 0040000 /* directory */
+#define S_IFBLK 0060000 /* block special */
+#define S_IFREG 0100000 /* regular */
+#define S_IFLNK 0120000 /* symbolic link */
+#define S_IFSOCK 0140000 /* socket */
+#define S_ISVTX 0001000 /* save swapped text even after use */
+#endif
+
+#define S_ISDIR(m) (((m) & 0170000) == 0040000) /* directory */
+#define S_ISCHR(m) (((m) & 0170000) == 0020000) /* char special */
+#define S_ISBLK(m) (((m) & 0170000) == 0060000) /* block special */
+#define S_ISREG(m) (((m) & 0170000) == 0100000) /* regular file */
+#define S_ISFIFO(m) (((m) & 0170000) == 0010000 || \
+ ((m) & 0170000) == 0140000) /* fifo or socket */
+#ifndef _POSIX_SOURCE
+#define S_ISLNK(m) (((m) & 0170000) == 0120000) /* symbolic link */
+#define S_ISSOCK(m) (((m) & 0170000) == 0010000 || \
+ ((m) & 0170000) == 0140000) /* fifo or socket */
+#endif
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+int chmod(const char *, mode_t);
+int fstat(int, struct stat *);
+int mkdir(const char *, mode_t);
+int mkfifo(const char *, mode_t);
+int stat(const char *, struct stat *);
+mode_t umask(mode_t);
+__END_DECLS
+
+#endif /* !_MACH_SA_SYS_STAT_H_ */
diff --git a/include/mach/sa/sys/termios.h b/include/mach/sa/sys/termios.h
new file mode 100644
index 00000000..2d2e4bd8
--- /dev/null
+++ b/include/mach/sa/sys/termios.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 1988, 1989, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)termios.h 8.3 (Berkeley) 3/28/94
+ * termios.h,v 1.3 1994/08/02 07:53:46 davidg Exp
+ */
+
+#ifndef _MACH_SA_SYS_TERMIOS_H_
+#define _MACH_SA_SYS_TERMIOS_H_
+
+/*
+ * Special Control Characters
+ *
+ * Index into c_cc[] character array.
+ *
+ * Name Subscript Enabled by
+ */
+#define VEOF 0 /* ICANON */
+#define VEOL 1 /* ICANON */
+#define VERASE 3 /* ICANON */
+#define VKILL 5 /* ICANON */
+#define VINTR 8 /* ISIG */
+#define VQUIT 9 /* ISIG */
+#define VSUSP 10 /* ISIG */
+#define VSTART 12 /* IXON, IXOFF */
+#define VSTOP 13 /* IXON, IXOFF */
+#define VMIN 16 /* !ICANON */
+#define VTIME 17 /* !ICANON */
+#define NCCS 20
+
+#define _POSIX_VDISABLE ((unsigned char)'\377')
+
+/*
+ * Input flags - software input processing
+ */
+#define IGNBRK 0x00000001 /* ignore BREAK condition */
+#define BRKINT 0x00000002 /* map BREAK to SIGINTR */
+#define IGNPAR 0x00000004 /* ignore (discard) parity errors */
+#define PARMRK 0x00000008 /* mark parity and framing errors */
+#define INPCK 0x00000010 /* enable checking of parity errors */
+#define ISTRIP 0x00000020 /* strip 8th bit off chars */
+#define INLCR 0x00000040 /* map NL into CR */
+#define IGNCR 0x00000080 /* ignore CR */
+#define ICRNL 0x00000100 /* map CR to NL (ala CRMOD) */
+#define IXON 0x00000200 /* enable output flow control */
+#define IXOFF 0x00000400 /* enable input flow control */
+#ifndef _POSIX_SOURCE
+#define IXANY 0x00000800 /* any char will restart after stop */
+#define IMAXBEL 0x00002000 /* ring bell on input queue full */
+#endif /*_POSIX_SOURCE */
+
+/*
+ * Output flags - software output processing
+ */
+#define OPOST 0x00000001 /* enable following output processing */
+
+/*
+ * Control flags - hardware control of terminal
+ */
+#ifndef _POSIX_SOURCE
+#define CIGNORE 0x00000001 /* ignore control flags */
+#endif
+#define CSIZE 0x00000300 /* character size mask */
+#define CS5 0x00000000 /* 5 bits (pseudo) */
+#define CS6 0x00000100 /* 6 bits */
+#define CS7 0x00000200 /* 7 bits */
+#define CS8 0x00000300 /* 8 bits */
+#define CSTOPB 0x00000400 /* send 2 stop bits */
+#define CREAD 0x00000800 /* enable receiver */
+#define PARENB 0x00001000 /* parity enable */
+#define PARODD 0x00002000 /* odd parity, else even */
+#define HUPCL 0x00004000 /* hang up on last close */
+#define CLOCAL 0x00008000 /* ignore modem status lines */
+
+
+/*
+ * "Local" flags - dumping ground for other state
+ *
+ * Warning: some flags in this structure begin with
+ * the letter "I" and look like they belong in the
+ * input flag.
+ */
+
+#define ECHOE 0x00000002 /* visually erase chars */
+#define ECHOK 0x00000004 /* echo NL after line kill */
+#define ECHO 0x00000008 /* enable echoing */
+#define ECHONL 0x00000010 /* echo NL even if ECHO is off */
+#define ISIG 0x00000080 /* enable signals INTR, QUIT, [D]SUSP */
+#define ICANON 0x00000100 /* canonicalize input lines */
+#define IEXTEN 0x00000400 /* enable DISCARD and LNEXT */
+#define EXTPROC 0x00000800 /* external processing */
+#define TOSTOP 0x00400000 /* stop background jobs from output */
+#ifndef _POSIX_SOURCE
+#define FLUSHO 0x00800000 /* output being flushed (state) */
+#define NOKERNINFO 0x02000000 /* no kernel output from VSTATUS */
+#define PENDIN 0x20000000 /* XXX retype pending input (state) */
+#endif /*_POSIX_SOURCE */
+#define NOFLSH 0x80000000 /* don't flush after interrupt */
+
+typedef unsigned long tcflag_t;
+typedef unsigned char cc_t;
+typedef long speed_t;
+
+struct termios {
+ tcflag_t c_iflag; /* input flags */
+ tcflag_t c_oflag; /* output flags */
+ tcflag_t c_cflag; /* control flags */
+ tcflag_t c_lflag; /* local flags */
+ cc_t c_cc[NCCS]; /* control chars */
+ long c_ispeed; /* input speed */
+ long c_ospeed; /* output speed */
+};
+
+/*
+ * Commands passed to tcsetattr() for setting the termios structure.
+ */
+#define TCSANOW 0 /* make change immediate */
+#define TCSADRAIN 1 /* drain output, then change */
+#define TCSAFLUSH 2 /* drain output, flush input */
+#ifndef _POSIX_SOURCE
+#define TCSASOFT 0x10 /* flag - don't alter h.w. state */
+#endif
+
+/*
+ * Standard speeds
+ */
+#define B0 0
+#define B50 50
+#define B75 75
+#define B110 110
+#define B134 134
+#define B150 150
+#define B200 200
+#define B300 300
+#define B600 600
+#define B1200 1200
+#define B1800 1800
+#define B2400 2400
+#define B4800 4800
+#define B9600 9600
+#define B19200 19200
+#define B38400 38400
+#ifndef _POSIX_SOURCE
+#define B7200 7200
+#define B14400 14400
+#define B28800 28800
+#define B57600 57600
+#define B76800 76800
+#define B115200 115200
+#define B230400 230400
+#define EXTA 19200
+#define EXTB 38400
+#endif /* !_POSIX_SOURCE */
+
+#define TCIFLUSH 1
+#define TCOFLUSH 2
+#define TCIOFLUSH 3
+#define TCOOFF 1
+#define TCOON 2
+#define TCIOFF 3
+#define TCION 4
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+speed_t cfgetispeed(const struct termios *);
+speed_t cfgetospeed(const struct termios *);
+int cfsetispeed(struct termios *, speed_t);
+int cfsetospeed(struct termios *, speed_t);
+int tcgetattr(int, struct termios *);
+int tcsetattr(int, int, const struct termios *);
+int tcdrain(int);
+int tcflow(int, int);
+int tcflush(int, int);
+int tcsendbreak(int, int);
+__END_DECLS
+
+#endif /* !_MACH_SA_SYS_TERMIOS_H_ */
diff --git a/include/mach/sa/sys/time.h b/include/mach/sa/sys/time.h
new file mode 100644
index 00000000..ab96678e
--- /dev/null
+++ b/include/mach/sa/sys/time.h
@@ -0,0 +1,53 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon rights
+ * to redistribute these changes.
+ */
+/*
+ * Time-keeper for kernel IO devices.
+ *
+ * May or may not have any relation to wall-clock time.
+ */
+
+#ifndef _MACH_SA_SYS_TIME_H_
+#define _MACH_SA_SYS_TIME_H_
+
+#include <mach/time_value.h>
+
+extern time_value_t time;
+
+/*
+ * Definitions to keep old code happy.
+ */
+#define timeval_t time_value_t
+#define timeval time_value
+#define tv_sec seconds
+#define tv_usec microseconds
+
+#define timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec)
+#define timercmp(tvp, uvp, cmp) \
+ ((tvp)->tv_sec cmp (uvp)->tv_sec || \
+ (tvp)->tv_sec == (uvp)->tv_sec && (tvp)->tv_usec cmp (uvp)->tv_usec)
+#define timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0
+
+#endif _MACH_SA_SYS_TIME_H_
diff --git a/include/mach/sa/sys/types.h b/include/mach/sa/sys/types.h
new file mode 100644
index 00000000..6973f892
--- /dev/null
+++ b/include/mach/sa/sys/types.h
@@ -0,0 +1,91 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#ifndef _MACH_SA_SYS_TYPES_H_
+#define _MACH_SA_SYS_TYPES_H_
+
+#include <mach/machine/vm_types.h>
+
+#ifndef _SIZE_T
+#define _SIZE_T
+typedef natural_t size_t;
+#endif
+
+#ifndef _SSIZE_T
+#define _SSIZE_T
+typedef integer_t ssize_t;
+#endif
+
+typedef unsigned short dev_t; /* device id */
+typedef unsigned long gid_t; /* group id */
+typedef unsigned long ino_t; /* inode number */
+typedef unsigned short mode_t; /* permissions */
+typedef unsigned short nlink_t; /* link count */
+typedef natural_t off_t; /* file offset */
+typedef unsigned long uid_t; /* user id */
+
+
+/* Symbols allowed but not required by POSIX */
+
+typedef char * caddr_t; /* address of a (signed) char */
+
+#ifndef _TIME_T
+#define _TIME_T
+typedef unsigned int time_t;
+#endif
+
+#define RAND_MAX 0x7fffffff
+
+/* Symbols not allowed by POSIX */
+#ifndef _POSIX_SOURCE
+
+/*
+ * Common type definitions that lots of old files seem to want.
+ */
+
+typedef unsigned char u_char; /* unsigned char */
+typedef unsigned short u_short; /* unsigned short */
+typedef unsigned int u_int; /* unsigned int */
+typedef unsigned long u_long; /* unsigned long */
+
+typedef struct _quad_ {
+ unsigned int val[2]; /* 2 32-bit values make... */
+} quad; /* an 8-byte item */
+
+typedef unsigned int daddr_t; /* disk address */
+
+#define major(i) (((i) >> 8) & 0xFF)
+#define minor(i) ((i) & 0xFF)
+#define makedev(i,j) ((((i) & 0xFF) << 8) | ((j) & 0xFF))
+
+#define NBBY 8
+
+#ifndef NULL
+#define NULL ((void *) 0) /* the null pointer */
+#endif
+
+#endif /* _POSIX_SOURCE */
+
+#endif /* _MACH_SA_SYS_TYPES_H_ */
diff --git a/include/mach/sa/time.h b/include/mach/sa/time.h
new file mode 100644
index 00000000..2f026226
--- /dev/null
+++ b/include/mach/sa/time.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_SA_TIME_H
+#define _MACH_SA_TIME_H
+
+#endif /* _MACH_SA_TIME_H */
diff --git a/include/mach/sa/unistd.h b/include/mach/sa/unistd.h
new file mode 100644
index 00000000..d3c313da
--- /dev/null
+++ b/include/mach/sa/unistd.h
@@ -0,0 +1,18 @@
+#ifndef _UNISTD_H_
+#define _UNISTD_H_
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+
+__BEGIN_DECLS
+
+__DECL(int,close(int fd));
+__DECL(int,read(int fd, void *buf, unsigned int n));
+__DECL(int,write(int fd, const void *buf, unsigned int n));
+__DECL(off_t,lseek(int fd, off_t offset, int whence));
+__DECL(int,rename(const char *oldpath, const char *newpath));
+__DECL(void *,sbrk(int size));
+
+__END_DECLS
+
+#endif /* _UNISTD_H_ */
diff --git a/include/mach/std_types.defs b/include/mach/std_types.defs
new file mode 100644
index 00000000..aef7dba8
--- /dev/null
+++ b/include/mach/std_types.defs
@@ -0,0 +1,131 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Mach kernel standard interface type declarations
+ */
+
+#ifndef _MACH_STD_TYPES_DEFS_
+#define _MACH_STD_TYPES_DEFS_
+
+#ifdef MACH_KERNEL
+#include <mach_ipc_compat.h>
+#endif
+
+type char = MACH_MSG_TYPE_CHAR;
+type short = MACH_MSG_TYPE_INTEGER_16;
+type int = MACH_MSG_TYPE_INTEGER_32;
+type int32 = MACH_MSG_TYPE_INTEGER_32;
+type int64 = MACH_MSG_TYPE_INTEGER_64;
+type boolean_t = MACH_MSG_TYPE_BOOLEAN;
+type unsigned = MACH_MSG_TYPE_INTEGER_32;
+type unsigned32 = MACH_MSG_TYPE_INTEGER_32;
+type unsigned64 = MACH_MSG_TYPE_INTEGER_64;
+
+/* Get the definitions for natural_t and integer_t */
+#include <mach/machine/machine_types.defs>
+
+type kern_return_t = int;
+
+type pointer_t = ^array[] of MACH_MSG_TYPE_BYTE
+ ctype: vm_offset_t;
+
+
+type mach_port_t = MACH_MSG_TYPE_COPY_SEND;
+type mach_port_array_t = array[] of mach_port_t;
+
+type mach_port_name_t = MACH_MSG_TYPE_PORT_NAME
+ ctype: mach_port_t;
+type mach_port_name_array_t = array[] of mach_port_name_t
+ ctype: mach_port_array_t;
+
+type mach_port_right_t = natural_t;
+
+type mach_port_type_t = natural_t;
+type mach_port_type_array_t = array[] of mach_port_type_t;
+
+type mach_port_urefs_t = natural_t;
+type mach_port_delta_t = integer_t;
+type mach_port_seqno_t = natural_t;
+type mach_port_mscount_t = unsigned;
+type mach_port_msgcount_t = unsigned;
+type mach_port_rights_t = unsigned;
+type mach_msg_id_t = integer_t;
+type mach_msg_type_name_t = unsigned;
+type mach_msg_type_number_t = natural_t;
+
+type mach_port_move_receive_t = MACH_MSG_TYPE_MOVE_RECEIVE
+ ctype: mach_port_t;
+type mach_port_copy_send_t = MACH_MSG_TYPE_COPY_SEND
+ ctype: mach_port_t;
+type mach_port_make_send_t = MACH_MSG_TYPE_MAKE_SEND
+ ctype: mach_port_t;
+type mach_port_move_send_t = MACH_MSG_TYPE_MOVE_SEND
+ ctype: mach_port_t;
+type mach_port_make_send_once_t = MACH_MSG_TYPE_MAKE_SEND_ONCE
+ ctype: mach_port_t;
+type mach_port_move_send_once_t = MACH_MSG_TYPE_MOVE_SEND_ONCE
+ ctype: mach_port_t;
+
+type mach_port_receive_t = MACH_MSG_TYPE_PORT_RECEIVE
+ ctype: mach_port_t;
+type mach_port_send_t = MACH_MSG_TYPE_PORT_SEND
+ ctype: mach_port_t;
+type mach_port_send_once_t = MACH_MSG_TYPE_PORT_SEND_ONCE
+ ctype: mach_port_t;
+
+type mach_port_poly_t = polymorphic
+ ctype: mach_port_t;
+
+
+/* Definitions for the old IPC interface. */
+
+#if MACH_IPC_COMPAT
+
+type port_name_t = MSG_TYPE_PORT_NAME
+ ctype: mach_port_t;
+type port_name_array_t = ^array[] of port_name_t
+ ctype: mach_port_array_t;
+type port_type_t = int
+ ctype: mach_port_type_t;
+type port_type_array_t = ^array[] of port_type_t
+ ctype: mach_port_type_array_t;
+type port_set_name_t = port_name_t
+ ctype: mach_port_t;
+
+type port_t = MACH_MSG_TYPE_COPY_SEND
+ ctype: mach_port_t;
+type port_all_t = MACH_MSG_TYPE_MOVE_RECEIVE
+ ctype: mach_port_t;
+type port_rcv_t = MACH_MSG_TYPE_MOVE_RECEIVE
+ ctype: mach_port_t;
+type port_array_t = ^array[] of port_t
+ ctype: mach_port_array_t;
+
+#endif MACH_IPC_COMPAT
+
+import <mach/std_types.h>;
+
+#endif _MACH_STD_TYPES_DEFS_
diff --git a/include/mach/std_types.h b/include/mach/std_types.h
new file mode 100644
index 00000000..f78e236a
--- /dev/null
+++ b/include/mach/std_types.h
@@ -0,0 +1,48 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Mach standard external interface type definitions.
+ *
+ */
+
+#ifndef _MACH_STD_TYPES_H_
+#define _MACH_STD_TYPES_H_
+
+#define EXPORT_BOOLEAN
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/machine/vm_types.h>
+
+typedef vm_offset_t pointer_t;
+typedef vm_offset_t vm_address_t;
+
+#ifdef MACH_KERNEL
+#include <ipc/ipc_port.h>
+#endif /* MACH_KERNEL */
+
+#endif /* _MACH_STD_TYPES_H_ */
diff --git a/include/mach/syscall_sw.h b/include/mach/syscall_sw.h
new file mode 100644
index 00000000..03527a53
--- /dev/null
+++ b/include/mach/syscall_sw.h
@@ -0,0 +1,140 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_SYSCALL_SW_H_
+#define _MACH_SYSCALL_SW_H_
+
+/*
+ * The machine-dependent "syscall_sw.h" file should
+ * define a macro for
+ * kernel_trap(trap_name, trap_number, arg_count)
+ * which will expand into assembly code for the
+ * trap.
+ *
+ * N.B.: When adding calls, do not put spaces in the macros.
+ */
+
+#include <mach/machine/syscall_sw.h>
+
+/*
+ * These trap numbers should be taken from the
+ * table in <kern/syscall_sw.c>.
+ */
+
+kernel_trap(evc_wait,-17,1)
+kernel_trap(evc_wait_clear,-18,1)
+
+kernel_trap(mach_msg_trap,-25,7)
+kernel_trap(mach_reply_port,-26,0)
+kernel_trap(mach_thread_self,-27,0)
+kernel_trap(mach_task_self,-28,0)
+kernel_trap(mach_host_self,-29,0)
+
+kernel_trap(swtch_pri,-59,1)
+kernel_trap(swtch,-60,0)
+kernel_trap(thread_switch,-61,3)
+kernel_trap(nw_update,-80,3)
+kernel_trap(nw_lookup,-81,2)
+kernel_trap(nw_endpoint_allocate,-82,4)
+kernel_trap(nw_endpoint_deallocate,-83,1)
+kernel_trap(nw_buffer_allocate,-84,2)
+kernel_trap(nw_buffer_deallocate,-85,2)
+kernel_trap(nw_connection_open,-86,4)
+kernel_trap(nw_connection_accept,-87,3)
+kernel_trap(nw_connection_close,-88,1)
+kernel_trap(nw_multicast_add,-89,4)
+kernel_trap(nw_multicast_drop,-90,4)
+kernel_trap(nw_endpoint_status,-91,3)
+kernel_trap(nw_send,-92,3)
+kernel_trap(nw_receive,-93,2)
+kernel_trap(nw_rpc,-94,4)
+kernel_trap(nw_select,-95,3)
+
+
+/*
+ * These are syscall versions of Mach kernel calls.
+ * They only work on local tasks.
+ */
+
+kernel_trap(syscall_vm_map,-64,11)
+kernel_trap(syscall_vm_allocate,-65,4)
+kernel_trap(syscall_vm_deallocate,-66,3)
+
+kernel_trap(syscall_task_create,-68,3)
+kernel_trap(syscall_task_terminate,-69,1)
+kernel_trap(syscall_task_suspend,-70,1)
+kernel_trap(syscall_task_set_special_port,-71,3)
+
+kernel_trap(syscall_mach_port_allocate,-72,3)
+kernel_trap(syscall_mach_port_deallocate,-73,2)
+kernel_trap(syscall_mach_port_insert_right,-74,4)
+kernel_trap(syscall_mach_port_allocate_name,-75,3)
+kernel_trap(syscall_thread_depress_abort,-76,1)
+
+/* These are screwing up glibc somehow. */
+/*kernel_trap(syscall_device_writev_request,-39,6)*/
+/*kernel_trap(syscall_device_write_request,-40,6)*/
+
+/*
+ * These "Mach" traps are not implemented by the kernel;
+ * the emulation library and Unix server implement them.
+ * But they are traditionally part of libmach, and use
+ * the Mach trap calling conventions and numbering.
+ */
+
+#if UNIXOID_TRAPS
+
+kernel_trap(task_by_pid,-33,1)
+kernel_trap(pid_by_task,-34,4)
+kernel_trap(init_process,-41,0)
+kernel_trap(map_fd,-43,5)
+kernel_trap(rfs_make_symlink,-44,3)
+kernel_trap(htg_syscall,-52,3)
+kernel_trap(set_ras_address,-53,2)
+
+#endif /* UNIXOID_TRAPS */
+
+/* Traps for the old IPC interface. */
+
+#if MACH_IPC_COMPAT
+
+kernel_trap(task_self,-10,0)
+kernel_trap(thread_reply,-11,0)
+kernel_trap(task_notify,-12,0)
+kernel_trap(thread_self,-13,0)
+kernel_trap(msg_send_trap,-20,4)
+kernel_trap(msg_receive_trap,-21,5)
+kernel_trap(msg_rpc_trap,-22,6)
+kernel_trap(host_self,-55,0)
+
+#endif /* MACH_IPC_COMPAT */
+
+#ifdef FIPC
+kernel_trap(fipc_send,-96,4)
+kernel_trap(fipc_recv,-97,5)
+#endif
+
+#endif /* _MACH_SYSCALL_SW_H_ */
diff --git a/include/mach/task_info.h b/include/mach/task_info.h
new file mode 100644
index 00000000..fdcbeb11
--- /dev/null
+++ b/include/mach/task_info.h
@@ -0,0 +1,111 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Machine-independent task information structures and definitions.
+ *
+ * The definitions in this file are exported to the user. The kernel
+ * will translate its internal data structures to these structures
+ * as appropriate.
+ *
+ */
+
+#ifndef _MACH_TASK_INFO_H_
+#define _MACH_TASK_INFO_H_
+
+#include <mach/machine/vm_types.h>
+#include <mach/time_value.h>
+
+/*
+ * Generic information structure to allow for expansion.
+ */
+typedef integer_t *task_info_t; /* varying array of int */
+
+#define TASK_INFO_MAX (1024) /* maximum array size */
+typedef integer_t task_info_data_t[TASK_INFO_MAX];
+
+/*
+ * Currently defined information structures.
+ */
+#define TASK_BASIC_INFO 1 /* basic information */
+
+struct task_basic_info {
+ integer_t suspend_count; /* suspend count for task */
+ integer_t base_priority; /* base scheduling priority */
+ vm_size_t virtual_size; /* number of virtual pages */
+ vm_size_t resident_size; /* number of resident pages */
+ time_value_t user_time; /* total user run time for
+ terminated threads */
+ time_value_t system_time; /* total system run time for
+ terminated threads */
+};
+
+typedef struct task_basic_info task_basic_info_data_t;
+typedef struct task_basic_info *task_basic_info_t;
+#define TASK_BASIC_INFO_COUNT \
+ (sizeof(task_basic_info_data_t) / sizeof(natural_t))
+
+
+#define TASK_EVENTS_INFO 2 /* various event counts */
+
+struct task_events_info {
+ natural_t faults; /* number of page faults */
+ natural_t zero_fills; /* number of zero fill pages */
+ natural_t reactivations; /* number of reactivated pages */
+ natural_t pageins; /* number of actual pageins */
+ natural_t cow_faults; /* number of copy-on-write faults */
+ natural_t messages_sent; /* number of messages sent */
+ natural_t messages_received; /* number of messages received */
+};
+typedef struct task_events_info task_events_info_data_t;
+typedef struct task_events_info *task_events_info_t;
+#define TASK_EVENTS_INFO_COUNT \
+ (sizeof(task_events_info_data_t) / sizeof(natural_t))
+
+#define TASK_THREAD_TIMES_INFO 3 /* total times for live threads -
+ only accurate if suspended */
+
+struct task_thread_times_info {
+ time_value_t user_time; /* total user run time for
+ live threads */
+ time_value_t system_time; /* total system run time for
+ live threads */
+};
+
+typedef struct task_thread_times_info task_thread_times_info_data_t;
+typedef struct task_thread_times_info *task_thread_times_info_t;
+#define TASK_THREAD_TIMES_INFO_COUNT \
+ (sizeof(task_thread_times_info_data_t) / sizeof(natural_t))
+
+/*
+ * Flavor definitions for task_ras_control
+ */
+#define TASK_RAS_CONTROL_PURGE_ALL 0
+#define TASK_RAS_CONTROL_PURGE_ONE 1
+#define TASK_RAS_CONTROL_PURGE_ALL_AND_INSTALL_ONE 2
+#define TASK_RAS_CONTROL_INSTALL_ONE 3
+
+#endif /* _MACH_TASK_INFO_H_ */
+
diff --git a/include/mach/task_special_ports.h b/include/mach/task_special_ports.h
new file mode 100644
index 00000000..682c7f61
--- /dev/null
+++ b/include/mach/task_special_ports.h
@@ -0,0 +1,86 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/task_special_ports.h
+ *
+ * Defines codes for special_purpose task ports. These are NOT
+ * port identifiers - they are only used for the task_get_special_port
+ * and task_set_special_port routines.
+ *
+ */
+
+#ifndef _MACH_TASK_SPECIAL_PORTS_H_
+#define _MACH_TASK_SPECIAL_PORTS_H_
+
+#ifdef MACH_KERNEL
+#include <mach_ipc_compat.h>
+#endif /* MACH_KERNEL */
+
+#define TASK_KERNEL_PORT 1 /* Represents task to the outside
+ world.*/
+#define TASK_EXCEPTION_PORT 3 /* Exception messages for task are
+ sent to this port. */
+#define TASK_BOOTSTRAP_PORT 4 /* Bootstrap environment for task. */
+
+/*
+ * Definitions for ease of use
+ */
+
+#define task_get_kernel_port(task, port) \
+ (task_get_special_port((task), TASK_KERNEL_PORT, (port)))
+
+#define task_set_kernel_port(task, port) \
+ (task_set_special_port((task), TASK_KERNEL_PORT, (port)))
+
+#define task_get_exception_port(task, port) \
+ (task_get_special_port((task), TASK_EXCEPTION_PORT, (port)))
+
+#define task_set_exception_port(task, port) \
+ (task_set_special_port((task), TASK_EXCEPTION_PORT, (port)))
+
+#define task_get_bootstrap_port(task, port) \
+ (task_get_special_port((task), TASK_BOOTSTRAP_PORT, (port)))
+
+#define task_set_bootstrap_port(task, port) \
+ (task_set_special_port((task), TASK_BOOTSTRAP_PORT, (port)))
+
+
+/* Definitions for the old IPC interface. */
+
+#if MACH_IPC_COMPAT
+
+#define TASK_NOTIFY_PORT 2 /* Task receives kernel IPC
+ notifications here. */
+
+#define task_get_notify_port(task, port) \
+ (task_get_special_port((task), TASK_NOTIFY_PORT, (port)))
+
+#define task_set_notify_port(task, port) \
+ (task_set_special_port((task), TASK_NOTIFY_PORT, (port)))
+
+#endif /* MACH_IPC_COMPAT */
+
+#endif /* _MACH_TASK_SPECIAL_PORTS_H_ */
diff --git a/include/mach/thread_info.h b/include/mach/thread_info.h
new file mode 100644
index 00000000..2c79829a
--- /dev/null
+++ b/include/mach/thread_info.h
@@ -0,0 +1,116 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/thread_info
+ *
+ * Thread information structure and definitions.
+ *
+ * The defintions in this file are exported to the user. The kernel
+ * will translate its internal data structures to these structures
+ * as appropriate.
+ *
+ */
+
+#ifndef _MACH_THREAD_INFO_H_
+#define _MACH_THREAD_INFO_H_
+
+#include <mach/boolean.h>
+#include <mach/policy.h>
+#include <mach/time_value.h>
+
+/*
+ * Generic information structure to allow for expansion.
+ */
+typedef integer_t *thread_info_t; /* varying array of ints */
+
+#define THREAD_INFO_MAX (1024) /* maximum array size */
+typedef integer_t thread_info_data_t[THREAD_INFO_MAX];
+
+/*
+ * Currently defined information.
+ */
+#define THREAD_BASIC_INFO 1 /* basic information */
+
+struct thread_basic_info {
+ time_value_t user_time; /* user run time */
+ time_value_t system_time; /* system run time */
+ integer_t cpu_usage; /* scaled cpu usage percentage */
+ integer_t base_priority; /* base scheduling priority */
+ integer_t cur_priority; /* current scheduling priority */
+ integer_t run_state; /* run state (see below) */
+ integer_t flags; /* various flags (see below) */
+ integer_t suspend_count; /* suspend count for thread */
+ integer_t sleep_time; /* number of seconds that thread
+ has been sleeping */
+};
+
+typedef struct thread_basic_info thread_basic_info_data_t;
+typedef struct thread_basic_info *thread_basic_info_t;
+#define THREAD_BASIC_INFO_COUNT \
+ (sizeof(thread_basic_info_data_t) / sizeof(natural_t))
+
+/*
+ * Scale factor for usage field.
+ */
+
+#define TH_USAGE_SCALE 1000
+
+/*
+ * Thread run states (state field).
+ */
+
+#define TH_STATE_RUNNING 1 /* thread is running normally */
+#define TH_STATE_STOPPED 2 /* thread is stopped */
+#define TH_STATE_WAITING 3 /* thread is waiting normally */
+#define TH_STATE_UNINTERRUPTIBLE 4 /* thread is in an uninterruptible
+ wait */
+#define TH_STATE_HALTED 5 /* thread is halted at a
+ clean point */
+
+/*
+ * Thread flags (flags field).
+ */
+#define TH_FLAGS_SWAPPED 0x1 /* thread is swapped out */
+#define TH_FLAGS_IDLE 0x2 /* thread is an idle thread */
+
+#define THREAD_SCHED_INFO 2
+
+struct thread_sched_info {
+ integer_t policy; /* scheduling policy */
+ integer_t data; /* associated data */
+ integer_t base_priority; /* base priority */
+ integer_t max_priority; /* max priority */
+ integer_t cur_priority; /* current priority */
+/*boolean_t*/integer_t depressed; /* depressed ? */
+ integer_t depress_priority; /* priority depressed from */
+};
+
+typedef struct thread_sched_info thread_sched_info_data_t;
+typedef struct thread_sched_info *thread_sched_info_t;
+#define THREAD_SCHED_INFO_COUNT \
+ (sizeof(thread_sched_info_data_t) / sizeof(natural_t))
+
+#endif /* _MACH_THREAD_INFO_H_ */
diff --git a/include/mach/thread_special_ports.h b/include/mach/thread_special_ports.h
new file mode 100644
index 00000000..0ab36468
--- /dev/null
+++ b/include/mach/thread_special_ports.h
@@ -0,0 +1,79 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/thread_special_ports.h
+ *
+ * Defines codes for special_purpose thread ports. These are NOT
+ * port identifiers - they are only used for the thread_get_special_port
+ * and thread_set_special_port routines.
+ *
+ */
+
+#ifndef _MACH_THREAD_SPECIAL_PORTS_H_
+#define _MACH_THREAD_SPECIAL_PORTS_H_
+
+#ifdef MACH_KERNEL
+#include <mach_ipc_compat.h>
+#endif /* MACH_KERNEL */
+
+#define THREAD_KERNEL_PORT 1 /* Represents the thread to the outside
+ world.*/
+#define THREAD_EXCEPTION_PORT 3 /* Exception messages for the thread
+ are sent to this port. */
+
+/*
+ * Definitions for ease of use
+ */
+
+#define thread_get_kernel_port(thread, port) \
+ (thread_get_special_port((thread), THREAD_KERNEL_PORT, (port)))
+
+#define thread_set_kernel_port(thread, port) \
+ (thread_set_special_port((thread), THREAD_KERNEL_PORT, (port)))
+
+#define thread_get_exception_port(thread, port) \
+ (thread_get_special_port((thread), THREAD_EXCEPTION_PORT, (port)))
+
+#define thread_set_exception_port(thread, port) \
+ (thread_set_special_port((thread), THREAD_EXCEPTION_PORT, (port)))
+
+
+/* Definitions for the old IPC interface. */
+
+#if MACH_IPC_COMPAT
+
+#define THREAD_REPLY_PORT 2 /* Default reply port for the thread's
+ use. */
+
+#define thread_get_reply_port(thread, port) \
+ (thread_get_special_port((thread), THREAD_REPLY_PORT, (port)))
+
+#define thread_set_reply_port(thread, port) \
+ (thread_set_special_port((thread), THREAD_REPLY_PORT, (port)))
+
+#endif /* MACH_IPC_COMPAT */
+
+#endif /* _MACH_THREAD_SPECIAL_PORTS_H_ */
diff --git a/include/mach/thread_status.h b/include/mach/thread_status.h
new file mode 100644
index 00000000..b02f5b4c
--- /dev/null
+++ b/include/mach/thread_status.h
@@ -0,0 +1,55 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ *
+ * This file contains the structure definitions for the user-visible
+ * thread state. This thread state is examined with the thread_get_state
+ * kernel call and may be changed with the thread_set_state kernel call.
+ *
+ */
+
+#ifndef _MACH_THREAD_STATUS_H_
+#define _MACH_THREAD_STATUS_H_
+
+/*
+ * The actual structure that comprises the thread state is defined
+ * in the machine dependent module.
+ */
+#include <mach/machine/vm_types.h>
+#include <mach/machine/thread_status.h>
+
+/*
+ * Generic definition for machine-dependent thread status.
+ */
+
+typedef natural_t *thread_state_t; /* Variable-length array */
+
+#define THREAD_STATE_MAX (1024) /* Maximum array size */
+typedef natural_t thread_state_data_t[THREAD_STATE_MAX];
+
+#define THREAD_STATE_FLAVOR_LIST 0 /* List of valid flavors */
+
+#endif /* _MACH_THREAD_STATUS_H_ */
diff --git a/include/mach/thread_switch.h b/include/mach/thread_switch.h
new file mode 100644
index 00000000..5235b874
--- /dev/null
+++ b/include/mach/thread_switch.h
@@ -0,0 +1,40 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_THREAD_SWITCH_H_
+#define _MACH_THREAD_SWITCH_H_
+
+/*
+ * Constant definitions for thread_switch trap.
+ */
+
+#define SWITCH_OPTION_NONE 0
+#define SWITCH_OPTION_DEPRESS 1
+#define SWITCH_OPTION_WAIT 2
+
+#define valid_switch_option(opt) ((0 <= (opt)) && ((opt) <= 2))
+
+#endif /* _MACH_THREAD_SWITCH_H_ */
diff --git a/include/mach/time_value.h b/include/mach/time_value.h
new file mode 100644
index 00000000..2a2f0911
--- /dev/null
+++ b/include/mach/time_value.h
@@ -0,0 +1,80 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_TIME_VALUE_H_
+#define _MACH_TIME_VALUE_H_
+
+#include <mach/machine/vm_types.h>
+
+/*
+ * Time value returned by kernel.
+ */
+
+struct time_value {
+ integer_t seconds;
+ integer_t microseconds;
+};
+typedef struct time_value time_value_t;
+
+/*
+ * Macros to manipulate time values. Assume that time values
+ * are normalized (microseconds <= 999999).
+ */
+#define TIME_MICROS_MAX (1000000)
+
+#define time_value_add_usec(val, micros) { \
+ if (((val)->microseconds += (micros)) \
+ >= TIME_MICROS_MAX) { \
+ (val)->microseconds -= TIME_MICROS_MAX; \
+ (val)->seconds++; \
+ } \
+}
+
+#define time_value_add(result, addend) { \
+ (result)->microseconds += (addend)->microseconds; \
+ (result)->seconds += (addend)->seconds; \
+ if ((result)->microseconds >= TIME_MICROS_MAX) { \
+ (result)->microseconds -= TIME_MICROS_MAX; \
+ (result)->seconds++; \
+ } \
+}
+
+/*
+ * Time value available through the mapped-time interface.
+ * Read this mapped value with
+ * do {
+ * secs = mtime->seconds;
+ * usecs = mtime->microseconds;
+ * } while (secs != mtime->check_seconds);
+ */
+
+typedef struct mapped_time_value {
+ integer_t seconds;
+ integer_t microseconds;
+ integer_t check_seconds;
+} mapped_time_value_t;
+
+#endif /* _MACH_TIME_VALUE_H_ */
diff --git a/include/mach/version.h b/include/mach/version.h
new file mode 100644
index 00000000..ec12ea74
--- /dev/null
+++ b/include/mach/version.h
@@ -0,0 +1,68 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon rights
+ * to redistribute these changes.
+ */
+/*
+ * Each kernel has a major and minor version number. Changes in
+ * the major number in general indicate a change in exported features.
+ * Changes in minor number usually correspond to internal-only
+ * changes that the user need not be aware of (in general). These
+ * values are stored at boot time in the machine_info strucuture and
+ * can be obtained by user programs with the host_info kernel call.
+ * This mechanism is intended to be the formal way for Mach programs
+ * to provide for backward compatibility in future releases.
+ *
+ * [ This needs to be reconciled somehow with the major/minor version
+ * number stuffed into the version string - mja, 5/8/87 ]
+ *
+ * Following is an informal history of the numbers:
+ *
+ * 25-March-87 Avadis Tevanian, Jr.
+ * Created version numbering scheme. Started with major 1,
+ * minor 0.
+ */
+
+#define KERNEL_MAJOR_VERSION 4
+#define KERNEL_MINOR_VERSION 0
+
+/*
+ * Version number of the kernel include files.
+ *
+ * This number must be changed whenever an incompatible change is made to one
+ * or more of our include files which are used by application programs that
+ * delve into kernel memory. The number should normally be simply incremented
+ * but may actually be changed in any manner so long as it differs from the
+ * numbers previously assigned to any other versions with which the current
+ * version is incompatible. It is used at boot time to determine which
+ * versions of the system programs to install.
+ *
+ * Note that the symbol _INCLUDE_VERSION must be set to this in the symbol
+ * table. On the VAX for example, this is done in locore.s.
+ */
+
+/*
+ * Current allocation strategy: bump either branch by 2, until non-MACH is
+ * excised from the CSD environment.
+ */
+#define INCLUDE_VERSION 0
diff --git a/include/mach/vm_attributes.h b/include/mach/vm_attributes.h
new file mode 100644
index 00000000..9ca3ef5c
--- /dev/null
+++ b/include/mach/vm_attributes.h
@@ -0,0 +1,63 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/vm_attributes.h
+ * Author: Alessandro Forin
+ *
+ * Virtual memory attributes definitions.
+ *
+ * These definitions are in addition to the machine-independent
+ * ones (e.g. protection), and are only selectively supported
+ * on specific machine architectures.
+ *
+ */
+
+#ifndef _MACH_VM_ATTRIBUTES_H_
+#define _MACH_VM_ATTRIBUTES_H_
+
+/*
+ * Types of machine-dependent attributes
+ */
+typedef unsigned int vm_machine_attribute_t;
+
+#define MATTR_CACHE 1 /* cachability */
+#define MATTR_MIGRATE 2 /* migrability */
+#define MATTR_REPLICATE 4 /* replicability */
+
+/*
+ * Values for the above, e.g. operations on attribute
+ */
+typedef int vm_machine_attribute_val_t;
+
+#define MATTR_VAL_OFF 0 /* (generic) turn attribute off */
+#define MATTR_VAL_ON 1 /* (generic) turn attribute on */
+#define MATTR_VAL_GET 2 /* (generic) return current value */
+
+#define MATTR_VAL_CACHE_FLUSH 6 /* flush from all caches */
+#define MATTR_VAL_DCACHE_FLUSH 7 /* flush from data caches */
+#define MATTR_VAL_ICACHE_FLUSH 8 /* flush from instruction caches */
+
+#endif /* _MACH_VM_ATTRIBUTES_H_ */
diff --git a/include/mach/vm_inherit.h b/include/mach/vm_inherit.h
new file mode 100644
index 00000000..2899290e
--- /dev/null
+++ b/include/mach/vm_inherit.h
@@ -0,0 +1,55 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/vm_inherit.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * Virtual memory map inheritance definitions.
+ *
+ */
+
+#ifndef _MACH_VM_INHERIT_H_
+#define _MACH_VM_INHERIT_H_
+
+/*
+ * Types defined:
+ *
+ * vm_inherit_t inheritance codes.
+ */
+
+typedef int vm_inherit_t; /* might want to change this */
+
+/*
+ * Enumeration of valid values for vm_inherit_t.
+ */
+
+#define VM_INHERIT_SHARE ((vm_inherit_t) 0) /* share with child */
+#define VM_INHERIT_COPY ((vm_inherit_t) 1) /* copy into child */
+#define VM_INHERIT_NONE ((vm_inherit_t) 2) /* absent from child */
+
+#define VM_INHERIT_DEFAULT VM_INHERIT_COPY
+
+#endif /* _MACH_VM_INHERIT_H_ */
diff --git a/include/mach/vm_param.h b/include/mach/vm_param.h
new file mode 100644
index 00000000..03609815
--- /dev/null
+++ b/include/mach/vm_param.h
@@ -0,0 +1,98 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/vm_param.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Machine independent virtual memory parameters.
+ *
+ */
+
+#ifndef _MACH_VM_PARAM_H_
+#define _MACH_VM_PARAM_H_
+
+#include <mach/machine/vm_param.h>
+#include <mach/machine/vm_types.h>
+
+/*
+ * The machine independent pages are refered to as PAGES. A page
+ * is some number of hardware pages, depending on the target machine.
+ *
+ * All references to the size of a page should be done
+ * with PAGE_SIZE, PAGE_SHIFT, or PAGE_MASK.
+ * They may be implemented as either constants or variables,
+ * depending on more-specific code.
+ * If they're variables, they had better be initialized
+ * by the time system-independent code starts getting called.
+ *
+ * Regardless whether it is implemented with a constant or a variable,
+ * the PAGE_SIZE is assumed to be a power of two throughout the
+ * virtual memory system implementation.
+ *
+ * More-specific code must at least provide PAGE_SHIFT;
+ * we can calculate the others if necessary.
+ * (However, if PAGE_SHIFT really refers to a variable,
+ * PAGE_SIZE and PAGE_MASK should also be variables
+ * so their values don't have to be constantly recomputed.)
+ */
+#ifndef PAGE_SHIFT
+#error mach/machine/vm_param.h needs to define PAGE_SHIFT.
+#endif
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#endif
+
+#ifndef PAGE_MASK
+#define PAGE_MASK (PAGE_SIZE-1)
+#endif
+
+/*
+ * Convert addresses to pages and vice versa.
+ * No rounding is used.
+ */
+
+#define atop(x) (((vm_size_t)(x)) >> PAGE_SHIFT)
+#define ptoa(x) ((vm_offset_t)((x) << PAGE_SHIFT))
+
+/*
+ * Round off or truncate to the nearest page. These will work
+ * for either addresses or counts. (i.e. 1 byte rounds to 1 page
+ * bytes.
+ */
+
+#define round_page(x) ((vm_offset_t)((((vm_offset_t)(x)) + PAGE_MASK) & ~PAGE_MASK))
+#define trunc_page(x) ((vm_offset_t)(((vm_offset_t)(x)) & ~PAGE_MASK))
+
+/*
+ * Determine whether an address is page-aligned, or a count is
+ * an exact page multiple.
+ */
+
+#define page_aligned(x) ((((vm_offset_t) (x)) & PAGE_MASK) == 0)
+
+#endif /* _MACH_VM_PARAM_H_ */
diff --git a/include/mach/vm_prot.h b/include/mach/vm_prot.h
new file mode 100644
index 00000000..22a76a8f
--- /dev/null
+++ b/include/mach/vm_prot.h
@@ -0,0 +1,79 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/vm_prot.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * Virtual memory protection definitions.
+ *
+ */
+
+#ifndef _MACH_VM_PROT_H_
+#define _MACH_VM_PROT_H_
+
+/*
+ * Types defined:
+ *
+ * vm_prot_t VM protection values.
+ */
+
+typedef int vm_prot_t;
+
+/*
+ * Protection values, defined as bits within the vm_prot_t type
+ */
+
+#define VM_PROT_NONE ((vm_prot_t) 0x00)
+
+#define VM_PROT_READ ((vm_prot_t) 0x01) /* read permission */
+#define VM_PROT_WRITE ((vm_prot_t) 0x02) /* write permission */
+#define VM_PROT_EXECUTE ((vm_prot_t) 0x04) /* execute permission */
+
+/*
+ * The default protection for newly-created virtual memory
+ */
+
+#define VM_PROT_DEFAULT (VM_PROT_READ|VM_PROT_WRITE)
+
+/*
+ * The maximum privileges possible, for parameter checking.
+ */
+
+#define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
+
+/*
+ * An invalid protection value.
+ * Used only by memory_object_lock_request to indicate no change
+ * to page locks. Using -1 here is a bad idea because it
+ * looks like VM_PROT_ALL and then some.
+ */
+#define VM_PROT_NO_CHANGE ((vm_prot_t) 0x08)
+
+/*
+ * This protection value says whether special notification is to be used.
+ */
+#define VM_PROT_NOTIFY ((vm_prot_t) 0x10)
+#endif /* _MACH_VM_PROT_H_ */
diff --git a/include/mach/vm_statistics.h b/include/mach/vm_statistics.h
new file mode 100644
index 00000000..2039a822
--- /dev/null
+++ b/include/mach/vm_statistics.h
@@ -0,0 +1,75 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/vm_statistics.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
+ *
+ * Virtual memory statistics structure.
+ *
+ */
+
+#ifndef _MACH_VM_STATISTICS_H_
+#define _MACH_VM_STATISTICS_H_
+
+#include <mach/machine/vm_types.h>
+
+struct vm_statistics {
+ integer_t pagesize; /* page size in bytes */
+ integer_t free_count; /* # of pages free */
+ integer_t active_count; /* # of pages active */
+ integer_t inactive_count; /* # of pages inactive */
+ integer_t wire_count; /* # of pages wired down */
+ integer_t zero_fill_count; /* # of zero fill pages */
+ integer_t reactivations; /* # of pages reactivated */
+ integer_t pageins; /* # of pageins */
+ integer_t pageouts; /* # of pageouts */
+ integer_t faults; /* # of faults */
+ integer_t cow_faults; /* # of copy-on-writes */
+ integer_t lookups; /* object cache lookups */
+ integer_t hits; /* object cache hits */
+};
+
+typedef struct vm_statistics *vm_statistics_t;
+typedef struct vm_statistics vm_statistics_data_t;
+
+#ifdef MACH_KERNEL
+extern vm_statistics_data_t vm_stat;
+#endif /* MACH_KERNEL */
+
+/*
+ * Each machine dependent implementation is expected to
+ * keep certain statistics. They may do this anyway they
+ * so choose, but are expected to return the statistics
+ * in the following structure.
+ */
+
+struct pmap_statistics {
+ integer_t resident_count; /* # of pages mapped (total)*/
+ integer_t wired_count; /* # of pages wired */
+};
+
+typedef struct pmap_statistics *pmap_statistics_t;
+#endif /* _MACH_VM_STATISTICS_H_ */
diff --git a/include/mach_debug/hash_info.h b/include/mach_debug/hash_info.h
new file mode 100644
index 00000000..19a039f4
--- /dev/null
+++ b/include/mach_debug/hash_info.h
@@ -0,0 +1,41 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_DEBUG_HASH_INFO_H_
+#define _MACH_DEBUG_HASH_INFO_H_
+
+/*
+ * Remember to update the mig type definitions
+ * in mach_debug_types.defs when adding/removing fields.
+ */
+
+typedef struct hash_info_bucket {
+ natural_t hib_count; /* number of records in bucket */
+} hash_info_bucket_t;
+
+typedef hash_info_bucket_t *hash_info_bucket_array_t;
+
+#endif _MACH_DEBUG_HASH_INFO_H_
diff --git a/include/mach_debug/ipc_info.h b/include/mach_debug/ipc_info.h
new file mode 100644
index 00000000..8d2cafeb
--- /dev/null
+++ b/include/mach_debug/ipc_info.h
@@ -0,0 +1,100 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach_debug/ipc_info.h
+ * Author: Rich Draves
+ * Date: March, 1990
+ *
+ * Definitions for the IPC debugging interface.
+ */
+
+#ifndef _MACH_DEBUG_IPC_INFO_H_
+#define _MACH_DEBUG_IPC_INFO_H_
+
+#include <mach/boolean.h>
+#include <mach/port.h>
+#include <mach/machine/vm_types.h>
+
+/*
+ * Remember to update the mig type definitions
+ * in mach_debug_types.defs when adding/removing fields.
+ */
+
+
+typedef struct ipc_info_space {
+ natural_t iis_genno_mask; /* generation number mask */
+ natural_t iis_table_size; /* size of table */
+ natural_t iis_table_next; /* next possible size of table */
+ natural_t iis_tree_size; /* size of tree */
+ natural_t iis_tree_small; /* # of small entries in tree */
+ natural_t iis_tree_hash; /* # of hashed entries in tree */
+} ipc_info_space_t;
+
+
+typedef struct ipc_info_name {
+ mach_port_t iin_name; /* port name, including gen number */
+/*boolean_t*/integer_t iin_collision; /* collision at this entry? */
+/*boolean_t*/integer_t iin_compat; /* is this a compat-mode entry? */
+/*boolean_t*/integer_t iin_marequest; /* extant msg-accepted request? */
+ mach_port_type_t iin_type; /* straight port type */
+ mach_port_urefs_t iin_urefs; /* user-references */
+ vm_offset_t iin_object; /* object pointer */
+ natural_t iin_next; /* marequest/next in free list */
+ natural_t iin_hash; /* hash index */
+} ipc_info_name_t;
+
+typedef ipc_info_name_t *ipc_info_name_array_t;
+
+
+typedef struct ipc_info_tree_name {
+ ipc_info_name_t iitn_name;
+ mach_port_t iitn_lchild; /* name of left child */
+ mach_port_t iitn_rchild; /* name of right child */
+} ipc_info_tree_name_t;
+
+typedef ipc_info_tree_name_t *ipc_info_tree_name_array_t;
+
+/*
+ * Type definitions for mach_port_kernel_object.
+ * By remarkable coincidence, these closely resemble
+ * the IKOT_* definitions in ipc/ipc_kobject.h.
+ */
+
+#define IPC_INFO_TYPE_NONE 0
+#define IPC_INFO_TYPE_THREAD 1
+#define IPC_INFO_TYPE_TASK 2
+#define IPC_INFO_TYPE_HOST 3
+#define IPC_INFO_TYPE_HOST_PRIV 4
+#define IPC_INFO_TYPE_PROCESSOR 5
+#define IPC_INFO_TYPE_PSET 6
+#define IPC_INFO_TYPE_PSET_NAME 7
+#define IPC_INFO_TYPE_PAGER 8
+#define IPC_INFO_TYPE_PAGING_REQUEST 9
+#define IPC_INFO_TYPE_DEVICE 10
+#define IPC_INFO_TYPE_XMM_PAGER 11
+#define IPC_INFO_TYPE_PAGING_NAME 12
+
+#endif _MACH_DEBUG_IPC_INFO_H_
diff --git a/include/mach_debug/mach_debug.defs b/include/mach_debug/mach_debug.defs
new file mode 100644
index 00000000..72210346
--- /dev/null
+++ b/include/mach_debug/mach_debug.defs
@@ -0,0 +1,241 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Matchmaker definitions file for Mach kernel debugging interface.
+ */
+
+#ifdef MACH_KERNEL
+#include <mach_ipc_debug.h>
+#include <mach_vm_debug.h>
+#include <mach_kdb.h>
+#endif
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif KERNEL_SERVER
+ mach_debug 3000;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+#include <mach_debug/mach_debug_types.defs>
+
+skip; /* host_ipc_statistics */
+skip; /* host_ipc_statistics_reset */
+skip; /* host_callout_info */
+skip; /* host_callout_statistics */
+skip; /* host_callout_statistics_reset */
+
+/*
+ * Returns information about the memory allocation zones.
+ */
+routine host_zone_info(
+ host : host_t;
+ out names : zone_name_array_t,
+ CountInOut, Dealloc;
+ out info : zone_info_array_t,
+ CountInOut, Dealloc);
+
+skip; /* host_ipc_bucket_info */
+
+#if !defined(MACH_IPC_DEBUG) || MACH_IPC_DEBUG
+
+/*
+ * Returns the exact number of extant send rights
+ * for the given receive right.
+ */
+
+routine mach_port_get_srights(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ out srights : mach_port_rights_t);
+
+/*
+ * Returns information about the global reverse hash table.
+ */
+
+routine host_ipc_hash_info(
+ host : host_t;
+ out info : hash_info_bucket_array_t,
+ CountInOut, Dealloc);
+
+/*
+ * Returns information about the marequest hash table.
+ */
+
+routine host_ipc_marequest_info(
+ host : host_t;
+ out max_requests : unsigned;
+ out info : hash_info_bucket_array_t,
+ CountInOut, Dealloc);
+
+/*
+ * Returns information about an IPC space.
+ */
+
+routine mach_port_space_info(
+ task : ipc_space_t;
+ out info : ipc_info_space_t;
+ out table_info : ipc_info_name_array_t,
+ CountInOut, Dealloc;
+ out tree_info : ipc_info_tree_name_array_t,
+ CountInOut, Dealloc);
+
+/*
+ * Returns information about the dead-name requests
+ * registered with the named receive right.
+ */
+
+routine mach_port_dnrequest_info(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ out total : unsigned; /* total size of table */
+ out used : unsigned); /* amount used */
+
+#else !defined(MACH_IPC_DEBUG) || MACH_IPC_DEBUG
+skip; /* mach_port_get_srights */
+skip; /* host_ipc_hash_info */
+skip; /* host_ipc_marequest_info */
+skip; /* mach_port_space_info */
+skip; /* mach_port_dnrequest_info */
+#endif !defined(MACH_IPC_DEBUG) || MACH_IPC_DEBUG
+
+skip; /* mach_vm_region_info */
+skip; /* vm_mapped_pages_info */
+
+/*
+ * Returns stack usage information:
+ * reserved Amount of stack space reserved for pcb.
+ * total Number of stacks.
+ * space Total VM space for stacks.
+ * resident Resident VM space for stacks.
+ * maxusage Maximum amount of stack used.
+ * maxstack Address in the kernel of the largest stack.
+ */
+
+routine host_stack_usage(
+ host : host_t;
+ out reserved : vm_size_t;
+ out total : unsigned;
+ out space : vm_size_t;
+ out resident : vm_size_t;
+ out maxusage : vm_size_t;
+ out maxstack : vm_offset_t);
+
+routine processor_set_stack_usage(
+ pset : processor_set_name_t;
+ out total : unsigned;
+ out space : vm_size_t;
+ out resident : vm_size_t;
+ out maxusage : vm_size_t;
+ out maxstack : vm_offset_t);
+
+#if !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG
+
+/*
+ * Returns information about the global VP table.
+ */
+
+routine host_virtual_physical_table_info(
+ host : host_t;
+ out info : hash_info_bucket_array_t,
+ CountInOut, Dealloc);
+
+#else !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG
+skip; /* host_virtual_physical_table_info */
+#endif !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG
+
+#if !defined(MACH_KDB) || MACH_KDB
+/*
+ * Loads a symbol table for an external file into the kernel debugger.
+ * The symbol table data is an array of characters. It is assumed that
+ * the caller and the kernel debugger agree on its format.
+ */
+
+routine host_load_symbol_table(
+ host : host_priv_t;
+ task : task_t;
+ name : symtab_name_t;
+ symtab : pointer_t);
+
+#else !defined(MACH_KDB) || MACH_KDB
+skip; /* host_load_symbol_table */
+#endif !defined(MACH_KDB) || MACH_KDB
+
+#if !defined(MACH_IPC_DEBUG) || MACH_IPC_DEBUG
+
+/*
+ * Return the type and address of the kernel object
+ * that the given send/receive right represents.
+ */
+
+routine mach_port_kernel_object(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ out object_type : unsigned;
+ out object_addr : vm_offset_t);
+
+#else !defined(MACH_IPC_DEBUG) || MACH_IPC_DEBUG
+skip; /* mach_port_kernel_object */
+#endif !defined(MACH_IPC_DEBUG) || MACH_IPC_DEBUG
+
+#if !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG
+
+/*
+ * Returns information about a region of memory.
+ */
+
+routine mach_vm_region_info(
+ task : vm_task_t;
+ address : vm_address_t;
+ out region : vm_region_info_t;
+ /* avoid out-translation of the argument */
+ out object : memory_object_name_t =
+ MACH_MSG_TYPE_MOVE_SEND
+ ctype: mach_port_t);
+
+routine mach_vm_object_info(
+ object : memory_object_name_t;
+ out info : vm_object_info_t;
+ /* avoid out-translation of the argument */
+ out shadow : memory_object_name_t =
+ MACH_MSG_TYPE_MOVE_SEND
+ ctype: mach_port_t;
+ /* avoid out-translation of the argument */
+ out copy : memory_object_name_t =
+ MACH_MSG_TYPE_MOVE_SEND
+ ctype: mach_port_t);
+
+routine mach_vm_object_pages(
+ object : memory_object_name_t;
+ out pages : vm_page_info_array_t,
+ CountInOut, Dealloc);
+
+#else !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG
+skip; /* mach_vm_region_info */
+skip; /* mach_vm_object_info */
+skip; /* mach_vm_object_pages */
+#endif !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG
diff --git a/include/mach_debug/mach_debug_types.defs b/include/mach_debug/mach_debug_types.defs
new file mode 100644
index 00000000..64564ab2
--- /dev/null
+++ b/include/mach_debug/mach_debug_types.defs
@@ -0,0 +1,65 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Mach kernel debugging interface type declarations
+ */
+
+#ifndef _MACH_DEBUG_MACH_DEBUG_TYPES_DEFS_
+#define _MACH_DEBUG_MACH_DEBUG_TYPES_DEFS_
+
+#include <mach/std_types.defs>
+
+type zone_name_t = struct[80] of char;
+type zone_name_array_t = array[] of zone_name_t;
+
+type zone_info_t = struct[9] of integer_t;
+type zone_info_array_t = array[] of zone_info_t;
+
+type hash_info_bucket_t = struct[1] of natural_t;
+type hash_info_bucket_array_t = array[] of hash_info_bucket_t;
+
+type ipc_info_space_t = struct[6] of natural_t;
+
+type ipc_info_name_t = struct[9] of natural_t;
+type ipc_info_name_array_t = array[] of ipc_info_name_t;
+
+type ipc_info_tree_name_t = struct[11] of natural_t;
+type ipc_info_tree_name_array_t = array[] of ipc_info_tree_name_t;
+
+type vm_region_info_t = struct[11] of natural_t;
+type vm_region_info_array_t = array[] of vm_region_info_t;
+
+type vm_object_info_t = struct[14] of natural_t;
+type vm_object_info_array_t = array[] of vm_object_info_t;
+
+type vm_page_info_t = struct[6] of natural_t;
+type vm_page_info_array_t = array[] of vm_page_info_t;
+
+type symtab_name_t = (MACH_MSG_TYPE_STRING_C, 8*32);
+
+import <mach_debug/mach_debug_types.h>;
+
+#endif _MACH_DEBUG_MACH_DEBUG_TYPES_DEFS_
diff --git a/include/mach_debug/mach_debug_types.h b/include/mach_debug/mach_debug_types.h
new file mode 100644
index 00000000..9eb89548
--- /dev/null
+++ b/include/mach_debug/mach_debug_types.h
@@ -0,0 +1,40 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Mach kernel debugging interface type declarations
+ */
+
+#ifndef _MACH_DEBUG_MACH_DEBUG_TYPES_H_
+#define _MACH_DEBUG_MACH_DEBUG_TYPES_H_
+
+#include <mach_debug/ipc_info.h>
+#include <mach_debug/vm_info.h>
+#include <mach_debug/zone_info.h>
+#include <mach_debug/hash_info.h>
+
+typedef char symtab_name_t[32];
+
+#endif _MACH_DEBUG_MACH_DEBUG_TYPES_H_
diff --git a/include/mach_debug/pc_info.h b/include/mach_debug/pc_info.h
new file mode 100644
index 00000000..bc43fa8d
--- /dev/null
+++ b/include/mach_debug/pc_info.h
@@ -0,0 +1,43 @@
+/*
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach_debug/pc_info.h
+ * Author: Brian Bershad
+ * Date: January 1992
+ *
+ * Definitions for the PC sampling interface.
+ */
+
+#ifndef _MACH_DEBUG_PC_INFO_H_
+#define _MACH_DEBUG_PC_INFO_H_
+
+
+typedef struct sampled_pc {
+ task_t task;
+ thread_t thread;
+ vm_offset_t pc;
+} sampled_pc_t;
+
+typedef sampled_pc_t *sampled_pc_array_t;
+typedef unsigned int sampled_pc_seqno_t;
+
+#endif _MACH_DEBUG_PC_INFO_H_
diff --git a/include/mach_debug/vm_info.h b/include/mach_debug/vm_info.h
new file mode 100644
index 00000000..3800cbd6
--- /dev/null
+++ b/include/mach_debug/vm_info.h
@@ -0,0 +1,132 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach_debug/vm_info.h
+ * Author: Rich Draves
+ * Date: March, 1990
+ *
+ * Definitions for the VM debugging interface.
+ */
+
+#ifndef _MACH_DEBUG_VM_INFO_H_
+#define _MACH_DEBUG_VM_INFO_H_
+
+#include <mach/boolean.h>
+#include <mach/machine/vm_types.h>
+#include <mach/vm_inherit.h>
+#include <mach/vm_prot.h>
+#include <mach/memory_object.h>
+
+/*
+ * Remember to update the mig type definitions
+ * in mach_debug_types.defs when adding/removing fields.
+ */
+
+typedef struct vm_region_info {
+ vm_offset_t vri_start; /* start of region */
+ vm_offset_t vri_end; /* end of region */
+
+/*vm_prot_t*/natural_t vri_protection; /* protection code */
+/*vm_prot_t*/natural_t vri_max_protection; /* maximum protection */
+/*vm_inherit_t*/natural_t vri_inheritance; /* inheritance */
+ natural_t vri_wired_count; /* number of times wired */
+ natural_t vri_user_wired_count; /* number of times user has wired */
+
+ vm_offset_t vri_object; /* the mapped object */
+ vm_offset_t vri_offset; /* offset into object */
+/*boolean_t*/integer_t vri_needs_copy; /* does object need to be copied? */
+ natural_t vri_sharing; /* share map references */
+} vm_region_info_t;
+
+typedef vm_region_info_t *vm_region_info_array_t;
+
+
+typedef natural_t vm_object_info_state_t;
+
+#define VOI_STATE_PAGER_CREATED 0x00000001
+#define VOI_STATE_PAGER_INITIALIZED 0x00000002
+#define VOI_STATE_PAGER_READY 0x00000004
+#define VOI_STATE_CAN_PERSIST 0x00000008
+#define VOI_STATE_INTERNAL 0x00000010
+#define VOI_STATE_TEMPORARY 0x00000020
+#define VOI_STATE_ALIVE 0x00000040
+#define VOI_STATE_LOCK_IN_PROGRESS 0x00000080
+#define VOI_STATE_LOCK_RESTART 0x00000100
+#define VOI_STATE_USE_OLD_PAGEOUT 0x00000200
+
+typedef struct vm_object_info {
+ vm_offset_t voi_object; /* this object */
+ vm_size_t voi_pagesize; /* object's page size */
+ vm_size_t voi_size; /* object size (valid if internal) */
+ natural_t voi_ref_count; /* number of references */
+ natural_t voi_resident_page_count; /* number of resident pages */
+ natural_t voi_absent_count; /* number requested but not filled */
+ vm_offset_t voi_copy; /* copy object */
+ vm_offset_t voi_shadow; /* shadow object */
+ vm_offset_t voi_shadow_offset; /* offset into shadow object */
+ vm_offset_t voi_paging_offset; /* offset into memory object */
+/*memory_object_copy_strategy_t*/integer_t voi_copy_strategy;
+ /* how to handle data copy */
+ vm_offset_t voi_last_alloc; /* offset of last allocation */
+ natural_t voi_paging_in_progress; /* paging references */
+ vm_object_info_state_t voi_state; /* random state bits */
+} vm_object_info_t;
+
+typedef vm_object_info_t *vm_object_info_array_t;
+
+
+typedef natural_t vm_page_info_state_t;
+
+#define VPI_STATE_BUSY 0x00000001
+#define VPI_STATE_WANTED 0x00000002
+#define VPI_STATE_TABLED 0x00000004
+#define VPI_STATE_FICTITIOUS 0x00000008
+#define VPI_STATE_PRIVATE 0x00000010
+#define VPI_STATE_ABSENT 0x00000020
+#define VPI_STATE_ERROR 0x00000040
+#define VPI_STATE_DIRTY 0x00000080
+#define VPI_STATE_PRECIOUS 0x00000100
+#define VPI_STATE_OVERWRITING 0x00000200
+#define VPI_STATE_INACTIVE 0x00000400
+#define VPI_STATE_ACTIVE 0x00000800
+#define VPI_STATE_LAUNDRY 0x00001000
+#define VPI_STATE_FREE 0x00002000
+#define VPI_STATE_REFERENCE 0x00004000
+
+#define VPI_STATE_PAGER 0x80000000 /* pager has the page */
+
+typedef struct vm_page_info {
+ vm_offset_t vpi_offset; /* offset in object */
+ vm_offset_t vpi_phys_addr; /* physical address */
+ natural_t vpi_wire_count; /* number of times wired */
+/*vm_prot_t*/natural_t vpi_page_lock; /* XP access restrictions */
+/*vm_prot_t*/natural_t vpi_unlock_request; /* outstanding unlock requests */
+ vm_page_info_state_t vpi_state; /* random state bits */
+} vm_page_info_t;
+
+typedef vm_page_info_t *vm_page_info_array_t;
+
+#endif _MACH_DEBUG_VM_INFO_H_
diff --git a/include/mach_debug/zone_info.h b/include/mach_debug/zone_info.h
new file mode 100644
index 00000000..623bd58d
--- /dev/null
+++ b/include/mach_debug/zone_info.h
@@ -0,0 +1,61 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_DEBUG_ZONE_INFO_H_
+#define _MACH_DEBUG_ZONE_INFO_H_
+
+#include <mach/boolean.h>
+#include <mach/machine/vm_types.h>
+
+/*
+ * Remember to update the mig type definitions
+ * in mach_debug_types.defs when adding/removing fields.
+ */
+
+#define ZONE_NAME_MAX_LEN 80
+
+typedef struct zone_name {
+ char zn_name[ZONE_NAME_MAX_LEN];
+} zone_name_t;
+
+typedef zone_name_t *zone_name_array_t;
+
+
+typedef struct zone_info {
+ integer_t zi_count; /* Number of elements used now */
+ vm_size_t zi_cur_size; /* current memory utilization */
+ vm_size_t zi_max_size; /* how large can this zone grow */
+ vm_size_t zi_elem_size; /* size of an element */
+ vm_size_t zi_alloc_size; /* size used for more memory */
+/*boolean_t*/integer_t zi_pageable; /* zone pageable? */
+/*boolean_t*/integer_t zi_sleepable; /* sleep if empty? */
+/*boolean_t*/integer_t zi_exhaustible; /* merely return if empty? */
+/*boolean_t*/integer_t zi_collectable; /* garbage collect elements? */
+} zone_info_t;
+
+typedef zone_info_t *zone_info_array_t;
+
+#endif _MACH_DEBUG_ZONE_INFO_H_
diff --git a/include/mach_error.h b/include/mach_error.h
new file mode 100644
index 00000000..4ed6d1b1
--- /dev/null
+++ b/include/mach_error.h
@@ -0,0 +1,67 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * File: mach_error.h
+ * Author: Douglas Orr, Carnegie Mellon University
+ * Date: Mar. 1988
+ *
+ * Definitions of routines in mach_error.c
+ */
+
+#ifndef _MACH_ERROR_
+#define _MACH_ERROR_ 1
+
+#include <mach/error.h>
+
+char *mach_error_string(
+/*
+ * Returns a string appropriate to the error argument given
+ */
+#if c_plusplus
+ mach_error_t error_value
+#endif c_plusplus
+ );
+
+void mach_error(
+/*
+ * Prints an appropriate message on the standard error stream
+ */
+#if c_plusplus
+ char *str,
+ mach_error_t error_value
+#endif c_plusplus
+ );
+
+char *mach_error_type(
+/*
+ * Returns a string with the error system, subsystem and code
+*/
+#if c_plusplus
+ mach_error_t error_value
+#endif c_plusplus
+ );
+
+#endif _MACH_ERROR_
diff --git a/include/mach_init.h b/include/mach_init.h
new file mode 100644
index 00000000..19d69d0c
--- /dev/null
+++ b/include/mach_init.h
@@ -0,0 +1,84 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987,1986 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Items provided by the Mach environment initialization.
+ */
+
+#ifndef _MACH_INIT_
+#define _MACH_INIT_ 1
+
+#include <mach/mach_types.h>
+
+/*
+ * Calls to the Unix emulation to supply privileged ports:
+ * the privileged host port and the master device port.
+ */
+
+#define mach_host_priv_self() task_by_pid(-1)
+#define mach_master_device_port() task_by_pid(-2)
+
+/*
+ * Kernel-related ports; how a task/thread controls itself
+ */
+
+extern mach_port_t mach_task_self_;
+
+#define mach_task_self() mach_task_self_
+
+#define current_task() mach_task_self()
+
+/*
+ * Other important ports in the Mach user environment
+ */
+
+extern mach_port_t name_server_port;
+extern mach_port_t environment_port;
+extern mach_port_t service_port;
+
+/*
+ * Where these ports occur in the "mach_ports_register"
+ * collection... only servers or the runtime library need know.
+ */
+
+#if MACH_INIT_SLOTS
+#define NAME_SERVER_SLOT 0
+#define ENVIRONMENT_SLOT 1
+#define SERVICE_SLOT 2
+
+#define MACH_PORTS_SLOTS_USED 3
+#endif MACH_INIT_SLOTS
+
+/*
+ * Globally interesting numbers.
+ * These macros assume vm_page_size is a power-of-2.
+ */
+
+extern vm_size_t vm_page_size;
+
+#define trunc_page(x) ((x) &~ (vm_page_size - 1))
+#define round_page(x) trunc_page((x) + (vm_page_size - 1))
+
+#endif _MACH_INIT_
diff --git a/include/servers/machid.defs b/include/servers/machid.defs
new file mode 100644
index 00000000..185f251a
--- /dev/null
+++ b/include/servers/machid.defs
@@ -0,0 +1,550 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+subsystem machid 9829283;
+
+userprefix machid_;
+serverprefix do_;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+#include <servers/machid_types.defs>
+
+routine mach_type(
+ server : mach_port_t;
+ auth : mach_port_t;
+ id : mach_id_t;
+ out mtype : mach_type_t);
+
+routine mach_register(
+ server : mach_port_t;
+ auth : mach_port_t;
+ port : mach_port_t;
+ mtype : mach_type_t;
+ out id : mach_id_t);
+
+routine mach_lookup(
+ server : mach_port_t;
+ auth : mach_port_t;
+ name : mach_id_t;
+ atype : mach_type_t;
+ out aname : mach_id_t);
+
+routine mach_port(
+ server : mach_port_t;
+ auth : mach_port_t;
+ name : mach_id_t;
+ out port : mach_port_move_send_t);
+
+routine host_ports(
+ server : mach_port_t;
+ auth : mach_port_t;
+ out host : mhost_t;
+ out phost : mhost_priv_t);
+
+routine host_processor_sets(
+ server : mach_port_t;
+ auth : mach_port_t;
+ host : mhost_priv_t;
+ out sets : mprocessor_set_array_t,
+ CountInOut, Dealloc);
+
+routine host_tasks(
+ server : mach_port_t;
+ auth : mach_port_t;
+ host : mhost_priv_t;
+ out tasks : mtask_array_t,
+ CountInOut, Dealloc);
+
+routine host_threads(
+ server : mach_port_t;
+ auth : mach_port_t;
+ host : mhost_priv_t;
+ out threads : mthread_array_t,
+ CountInOut, Dealloc);
+
+routine host_processors(
+ server : mach_port_t;
+ auth : mach_port_t;
+ host : mhost_priv_t;
+ out procs : mprocessor_array_t,
+ CountInOut, Dealloc);
+
+routine processor_set_threads(
+ server : mach_port_t;
+ auth : mach_port_t;
+ pset : mprocessor_set_t;
+ out threads : mthread_array_t,
+ CountInOut, Dealloc);
+
+routine processor_set_tasks(
+ server : mach_port_t;
+ auth : mach_port_t;
+ pset : mprocessor_set_t;
+ out tasks : mtask_array_t,
+ CountInOut, Dealloc);
+
+routine task_threads(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ out threads : mthread_array_t,
+ CountInOut, Dealloc);
+
+routine host_basic_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ host : mhost_t;
+ out info : host_basic_info_data_t);
+
+routine host_sched_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ host : mhost_t;
+ out info : host_sched_info_data_t);
+
+routine host_load_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ host : mhost_t;
+ out info : host_load_info_data_t);
+
+routine processor_set_default(
+ server : mach_port_t;
+ auth : mach_port_t;
+ host : mhost_t;
+ out pset : mprocessor_set_name_t);
+
+routine host_kernel_version(
+ server : mach_port_t;
+ auth : mach_port_t;
+ host : mhost_t;
+ out kernel_version : kernel_version_t);
+
+routine processor_basic_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ proc : mprocessor_t;
+ out host : mhost_t;
+ out info : processor_basic_info_data_t);
+
+routine processor_set_basic_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ pset : mprocessor_set_name_t;
+ out host : mhost_t;
+ out info : processor_set_basic_info_data_t);
+
+routine processor_set_sched_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ pset : mprocessor_set_name_t;
+ out host : mhost_t;
+ out info : processor_set_sched_info_data_t);
+
+routine task_unix_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ out pid : unix_pid_t;
+ out comm : unix_command_t);
+
+routine task_basic_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ out info : task_basic_info_data_t);
+
+routine task_thread_times_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ out times : task_thread_times_info_data_t);
+
+routine thread_basic_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t;
+ out info : thread_basic_info_data_t);
+
+routine thread_sched_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t;
+ out info : thread_sched_info_data_t);
+
+#ifdef mips
+routine mips_thread_state(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t;
+ out state : mips_thread_state_t);
+#else
+skip;
+#endif
+
+#ifdef sun3
+routine sun3_thread_state(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t;
+ out state : sun3_thread_state_t);
+#else
+skip;
+#endif
+
+#ifdef vax
+routine vax_thread_state(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t;
+ out state : vax_thread_state_t);
+#else
+skip;
+#endif
+
+#ifdef i386
+routine i386_thread_state(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t;
+ out state : i386_thread_state_t);
+#else
+skip;
+#endif
+
+routine task_terminate(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t);
+
+routine task_suspend(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t);
+
+routine task_resume(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t);
+
+routine thread_terminate(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t);
+
+routine thread_suspend(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t);
+
+routine thread_resume(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t);
+
+routine thread_abort(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t);
+
+skip; /* was thread_depress_abort */
+
+routine processor_set_destroy(
+ server : mach_port_t;
+ auth : mach_port_t;
+ pset : mprocessor_set_t);
+
+routine processor_start(
+ server : mach_port_t;
+ auth : mach_port_t;
+ processor : mprocessor_t);
+
+routine processor_exit(
+ server : mach_port_t;
+ auth : mach_port_t;
+ processor : mprocessor_t);
+
+routine vm_region(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ addr : vm_offset_t;
+ out info : vm_region_t);
+
+routine vm_read(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ addr : vm_offset_t;
+ size : vm_size_t;
+ out data : pointer_t, Dealloc);
+
+routine thread_priority(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t;
+ priority : int;
+ set_max : boolean_t);
+
+routine thread_max_priority(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t;
+ pset : mprocessor_set_t;
+ max_pri : int);
+
+routine task_priority(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ priority : int;
+ change_threads : boolean_t);
+
+routine processor_set_max_priority(
+ server : mach_port_t;
+ auth : mach_port_t;
+ pset : mprocessor_set_t;
+ max_pri : int;
+ change_threads : boolean_t);
+
+routine port_names(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ out names : mach_port_name_array_t,
+ CountInOut, Dealloc;
+ out types : mach_port_type_array_t,
+ CountInOut, Dealloc);
+
+routine port_type(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ name : mach_port_name_t;
+ out ptype : mach_port_type_t);
+
+routine port_get_refs(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ name : mach_port_name_t;
+ right : mach_port_right_t;
+ out refs : mach_port_urefs_t);
+
+routine port_get_receive_status(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ name : mach_port_name_t;
+ out status : mach_port_status_t);
+
+routine port_get_set_status(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ name : mach_port_name_t;
+ out members : mach_port_name_array_t,
+ CountInOut, Dealloc);
+
+routine processor_get_assignment(
+ server : mach_port_t;
+ auth : mach_port_t;
+ proc : mprocessor_t;
+ out pset : mprocessor_set_name_t);
+
+routine thread_get_assignment(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t;
+ out pset : mprocessor_set_name_t);
+
+routine task_get_assignment(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ out pset : mprocessor_set_name_t);
+
+routine host_processor_set_priv(
+ server : mach_port_t;
+ auth : mach_port_t;
+ host : mhost_priv_t;
+ psetn : mprocessor_set_name_t;
+ out pset : mprocessor_set_t);
+
+routine host_processor_set_names(
+ server : mach_port_t;
+ auth : mach_port_t;
+ host : mhost_t;
+ out sets : mprocessor_set_name_array_t,
+ CountInOut, Dealloc);
+
+routine processor_set_create(
+ server : mach_port_t;
+ auth : mach_port_t;
+ host : mhost_t;
+ out pset : mprocessor_set_t;
+ out psetn : mprocessor_set_name_t);
+
+routine task_create(
+ server : mach_port_t;
+ auth : mach_port_t;
+ parent : mtask_t;
+ inherit : boolean_t;
+ out task : mtask_t);
+
+routine thread_create(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ out thread : mthread_t);
+
+routine processor_assign(
+ server : mach_port_t;
+ auth : mach_port_t;
+ processor : mprocessor_t;
+ pset : mprocessor_set_t;
+ wait : boolean_t);
+
+routine thread_assign(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t;
+ pset : mprocessor_set_t);
+
+routine thread_assign_default(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t);
+
+routine task_assign(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ pset : mprocessor_set_t;
+ assign_threads : boolean_t);
+
+routine task_assign_default(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ assign_threads : boolean_t);
+
+routine thread_policy(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t;
+ policy : int;
+ data : int);
+
+routine processor_set_policy_enable(
+ server : mach_port_t;
+ auth : mach_port_t;
+ processor_set : mprocessor_set_t;
+ policy : int);
+
+routine processor_set_policy_disable(
+ server : mach_port_t;
+ auth : mach_port_t;
+ processor_set : mprocessor_set_t;
+ policy : int;
+ change_threads : boolean_t);
+
+routine host_default_pager(
+ server : mach_port_t;
+ auth : mach_port_t;
+ host : mhost_priv_t;
+ out default_pager : mdefault_pager_t);
+
+skip; /* was default_pager_info */
+
+routine vm_statistics(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ out data : vm_statistics_data_t);
+
+routine host_kernel_task(
+ server : mach_port_t;
+ auth : mach_port_t;
+ host : mhost_priv_t;
+ out kernel_task : mtask_t);
+
+routine task_host(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ out host : mhost_t);
+
+routine thread_host(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t;
+ out host : mhost_t);
+
+routine processor_host(
+ server : mach_port_t;
+ auth : mach_port_t;
+ proc : mprocessor_t;
+ out host : mhost_t);
+
+
+#ifdef sun4
+routine sparc_thread_state(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t;
+ out state : sparc_thread_state_t);
+#else sun4
+skip;
+#endif sun4
+
+#ifdef alpha
+routine alpha_thread_state(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t;
+ out state : alpha_thread_state_t);
+#else
+skip;
+#endif
+
+#ifdef parisc
+routine parisc_thread_state(
+ server : mach_port_t;
+ auth : mach_port_t;
+ thread : mthread_t;
+ out state : parisc_thread_state_t);
+#else
+skip;
+#endif
+
+routine task_set_unix_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ pid : unix_pid_t;
+ comm : unix_command_t);
diff --git a/include/servers/machid_debug.defs b/include/servers/machid_debug.defs
new file mode 100644
index 00000000..63bae71c
--- /dev/null
+++ b/include/servers/machid_debug.defs
@@ -0,0 +1,127 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+subsystem machid_debug 2398925;
+
+userprefix machid_;
+serverprefix do_;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+#include <mach_debug/mach_debug_types.defs>
+#include <servers/machid_types.defs>
+
+routine port_get_srights(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ name : mach_port_name_t;
+ out srights : mach_port_rights_t);
+
+routine port_space_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ out info : ipc_info_space_t;
+ out table_info : ipc_info_name_array_t,
+ CountInOut, Dealloc;
+ out tree_info : ipc_info_tree_name_array_t,
+ CountInOut, Dealloc);
+
+routine port_dnrequest_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ name : mach_port_name_t;
+ out total : unsigned;
+ out used : unsigned);
+
+skip; /* vm_region_info */
+
+routine host_stack_usage(
+ server : mach_port_t;
+ auth : mach_port_t;
+ host : mhost_t;
+ out reserved : vm_size_t;
+ out total : unsigned;
+ out space : vm_size_t;
+ out resident : vm_size_t;
+ out maxusage : vm_size_t;
+ out maxstack : vm_offset_t);
+
+routine processor_set_stack_usage(
+ server : mach_port_t;
+ auth : mach_port_t;
+ pset : mprocessor_set_t;
+ out total : unsigned;
+ out space : vm_size_t;
+ out resident : vm_size_t;
+ out maxusage : vm_size_t;
+ out maxstack : vm_offset_t);
+
+routine host_zone_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ host : mhost_t;
+ out names : zone_name_array_t,
+ CountInOut, Dealloc;
+ out info : zone_info_array_t,
+ CountInOut, Dealloc);
+
+routine port_kernel_object(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ name : mach_port_name_t;
+ out object_type : unsigned;
+ out object_addr : vm_offset_t);
+
+routine mach_kernel_object(
+ server : mach_port_t;
+ auth : mach_port_t;
+ id : mach_id_t;
+ out object_type : mach_type_t;
+ out object_addr : vm_offset_t);
+
+routine vm_region_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ task : mtask_t;
+ addr : vm_offset_t;
+ out region : vm_region_info_t);
+
+routine vm_object_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ object : mobject_name_t;
+ out info : vm_object_info_t);
+
+routine vm_object_pages(
+ server : mach_port_t;
+ auth : mach_port_t;
+ object : mobject_name_t;
+ out pages : vm_page_info_array_t,
+ CountInOut, Dealloc);
diff --git a/include/servers/machid_dpager.defs b/include/servers/machid_dpager.defs
new file mode 100644
index 00000000..00fa09c6
--- /dev/null
+++ b/include/servers/machid_dpager.defs
@@ -0,0 +1,56 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+subsystem machid_dpager 6357721;
+
+userprefix machid_;
+serverprefix do_;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+#include <mach/default_pager_types.defs>
+#include <servers/machid_types.defs>
+
+routine default_pager_info(
+ server : mach_port_t;
+ auth : mach_port_t;
+ default_pager : mdefault_pager_t;
+ out info : default_pager_info_t);
+
+routine default_pager_objects(
+ server : mach_port_t;
+ auth : mach_port_t;
+ default_pager : mdefault_pager_t;
+ out objects : default_pager_object_array_t,
+ CountInOut, Dealloc);
+
+routine default_pager_object_pages(
+ server : mach_port_t;
+ auth : mach_port_t;
+ default_pager : mdefault_pager_t;
+ object : mobject_name_t;
+ out pages : default_pager_page_array_t,
+ CountInOut, Dealloc);
diff --git a/include/servers/machid_lib.h b/include/servers/machid_lib.h
new file mode 100644
index 00000000..035195f9
--- /dev/null
+++ b/include/servers/machid_lib.h
@@ -0,0 +1,172 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACHID_LIB_H_
+#define _MACHID_LIB_H_
+
+#include <mach/machine/vm_types.h>
+#include <mach/default_pager_types.h>
+#include <mach_debug/vm_info.h>
+#include <servers/machid_types.h>
+
+/* values for mach_type_t */
+
+#define MACH_TYPE_NONE 0
+#define MACH_TYPE_TASK 1
+#define MACH_TYPE_THREAD 2
+#define MACH_TYPE_PROCESSOR_SET 3
+#define MACH_TYPE_PROCESSOR_SET_NAME 4
+#define MACH_TYPE_PROCESSOR 5
+#define MACH_TYPE_HOST 6
+#define MACH_TYPE_HOST_PRIV 7
+#define MACH_TYPE_OBJECT 8
+#define MACH_TYPE_OBJECT_CONTROL 9
+#define MACH_TYPE_OBJECT_NAME 10
+#define MACH_TYPE_MASTER_DEVICE 11
+#define MACH_TYPE_DEFAULT_PAGER 12
+
+/* convert a mach_type_t to a string */
+
+extern char *mach_type_string(/* mach_type_t */);
+
+/* at the moment, mach/kern_return.h doesn't define these,
+ but maybe it will define some of them someday */
+
+#ifndef KERN_INVALID_THREAD
+#define KERN_INVALID_THREAD KERN_INVALID_ARGUMENT
+#endif /* KERN_INVALID_THREAD */
+
+#ifndef KERN_INVALID_PROCESSOR_SET
+#define KERN_INVALID_PROCESSOR_SET KERN_INVALID_ARGUMENT
+#endif /* KERN_INVALID_PROCESSOR_SET */
+
+#ifndef KERN_INVALID_PROCESSOR_SET_NAME
+#define KERN_INVALID_PROCESSOR_SET_NAME KERN_INVALID_ARGUMENT
+#endif /* KERN_INVALID_PROCESSOR_SET_NAME */
+
+#ifndef KERN_INVALID_HOST_PRIV
+#define KERN_INVALID_HOST_PRIV KERN_INVALID_HOST
+#endif /* KERN_INVALID_HOST_PRIV */
+
+#ifndef KERN_INVALID_PROCESSOR
+#define KERN_INVALID_PROCESSOR KERN_INVALID_ARGUMENT
+#endif /* KERN_INVALID_PROCESSOR */
+
+#ifndef KERN_INVALID_DEFAULT_PAGER
+#define KERN_INVALID_DEFAULT_PAGER KERN_INVALID_ARGUMENT
+#endif /* KERN_INVALID_DEFAULT_PAGER */
+
+#ifndef KERN_INVALID_MEMORY_OBJECT
+#define KERN_INVALID_MEMORY_OBJECT KERN_INVALID_ARGUMENT
+#endif /* KERN_INVALID_MEMORY_OBJECT */
+
+/*
+ * Some machid library functions access the machid server
+ * using these two ports.
+ */
+
+extern mach_port_t machid_server_port; /* machid server */
+extern mach_port_t machid_auth_port; /* machid authentication port */
+
+/*
+ * The kernel and default pager provide several functions
+ * for accessing the internal VM data structures.
+ * The machid server provides access to these functions.
+ * However, they are inconvenient to use directly.
+ * These library functions present this capability
+ * in an easier-to-use form.
+ */
+
+typedef struct object {
+ struct object *o_link; /* hash table link */
+
+ vm_object_info_t o_info; /* object name and attributes */
+ /* vpi_offset fields are biased by voi_paging_offset */
+ vm_page_info_t *o_pages; /* object pages */
+ unsigned int o_num_pages; /* number of pages */
+ vm_page_info_t *o_hint; /* hint pointer into o_pages */
+ mdefault_pager_t o_dpager; /* default pager for the object */
+ default_pager_object_t o_dpager_info; /* default pager info */
+ struct object *o_shadow; /* pointer to shadow object */
+
+ unsigned int o_flag;
+} object_t;
+
+/* get object chain, optionally getting default-pager and resident-page info */
+
+extern object_t *get_object(/* mobject_name_t object,
+ boolean_t dpager, pages */);
+
+/* convert object to privileged host */
+
+extern mhost_priv_t get_object_host(/* mobject_name_t object */);
+
+/* convert privileged host to default pager */
+
+extern mdefault_pager_t get_host_dpager(/* mhost_priv_t host */);
+
+/* convert object to default pager */
+
+extern mdefault_pager_t get_object_dpager(/* mobject_name_t object */);
+
+/* get object/size info from the default pager */
+
+extern void get_dpager_objects(/* mdefault_pager_t dpager,
+ default_pager_object_t **objectsp,
+ unsigned int *numobjectsp */);
+
+/* find a particular object in array from get_dpager_objects */
+
+extern default_pager_object_t *
+find_dpager_object(/* mobject_name_t object,
+ default_pager_object_t *objects,
+ unsigned int count */);
+
+/* the object offset is already biased by voi_paging_offset */
+
+extern vm_page_info_t *
+lookup_page_object_prim(/* object_t *object, vm_offset_t offset */);
+
+/* the object offset is already biased by voi_paging_offset */
+
+extern void
+lookup_page_object(/* object_t *chain, vm_offset_t offset,
+ object_t **objectp, vm_page_info_t **infop */);
+
+/* the object offset is not biased; follows shadow pointers */
+
+extern void
+lookup_page_chain(/* object_t *chain, vm_offset_t offset,
+ object_t **objectp, vm_page_info_t **infop */);
+
+/* returns range (inclusive/exclusive) for pages in the object,
+ biased by voi_paging_offset */
+
+extern void
+get_object_bounds(/* object_t *object,
+ vm_offset_t *startp, vm_offset_t *endp */);
+
+#endif /* _MACHID_LIB_H_ */
diff --git a/include/servers/machid_types.defs b/include/servers/machid_types.defs
new file mode 100644
index 00000000..b744ffce
--- /dev/null
+++ b/include/servers/machid_types.defs
@@ -0,0 +1,73 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#ifndef _MACHID_TYPES_DEFS_
+#define _MACHID_TYPES_DEFS_
+
+#include <mach/mach_types.defs>
+
+type mach_id_t = unsigned;
+type mach_type_t = unsigned;
+
+type mhost_t = mach_id_t;
+type mhost_priv_t = mach_id_t;
+
+type mdefault_pager_t = mach_id_t;
+
+type mprocessor_t = mach_id_t;
+type mprocessor_array_t = array[] of mprocessor_t;
+
+type mprocessor_set_t = mach_id_t;
+type mprocessor_set_array_t = array[] of mprocessor_set_t;
+type mprocessor_set_name_t = mach_id_t;
+type mprocessor_set_name_array_t = array[] of mprocessor_set_name_t;
+
+type mtask_t = mach_id_t;
+type mtask_array_t = array[] of mtask_t;
+
+type mthread_t = mach_id_t;
+type mthread_array_t = array[] of mthread_t;
+
+type mobject_t = mach_id_t;
+type mobject_control_t = mach_id_t;
+type mobject_name_t = mach_id_t;
+
+type mips_thread_state_t = struct[34] of unsigned;
+type sun3_thread_state_t = struct[47] of unsigned;
+type sparc_thread_state_t = struct[73] of unsigned;
+type vax_thread_state_t = struct[17] of unsigned;
+type i386_thread_state_t = struct[17] of unsigned;
+type alpha_thread_state_t = struct[32] of natural_t;
+type parisc_thread_state_t = struct[128] of natural_t;
+
+type vm_region_t = struct[8] of natural_t;
+
+type unix_pid_t = int;
+type unix_command_t = array[*:1024] of char;
+
+import <servers/machid_types.h>;
+
+#endif _MACHID_TYPES_DEFS_
diff --git a/include/servers/machid_types.h b/include/servers/machid_types.h
new file mode 100644
index 00000000..e118754b
--- /dev/null
+++ b/include/servers/machid_types.h
@@ -0,0 +1,110 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#ifndef _MACHID_TYPES_H_
+#define _MACHID_TYPES_H_
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/task_info.h>
+#include <mach/machine/vm_types.h>
+#include <mach/vm_prot.h>
+#include <mach/vm_inherit.h>
+
+/* define types for machid_types.defs */
+
+typedef unsigned int mach_id_t;
+typedef unsigned int mach_type_t;
+
+typedef mach_id_t mhost_t;
+typedef mach_id_t mhost_priv_t;
+
+typedef mach_id_t mdefault_pager_t;
+
+typedef mach_id_t mprocessor_t;
+typedef mprocessor_t *mprocessor_array_t;
+
+typedef mach_id_t mprocessor_set_t;
+typedef mprocessor_set_t *mprocessor_set_array_t;
+typedef mach_id_t mprocessor_set_name_t;
+typedef mprocessor_set_name_t *mprocessor_set_name_array_t;
+
+typedef mach_id_t mtask_t;
+typedef mtask_t *mtask_array_t;
+
+typedef mach_id_t mthread_t;
+typedef mthread_t *mthread_array_t;
+
+typedef mach_id_t mobject_t;
+typedef mach_id_t mobject_control_t;
+typedef mach_id_t mobject_name_t;
+
+typedef struct vm_region {
+ vm_offset_t vr_address;
+ vm_size_t vr_size;
+/*vm_prot_t*/integer_t vr_prot;
+/*vm_prot_t*/integer_t vr_max_prot;
+/*vm_inherit_t*/integer_t vr_inherit;
+/*boolean_t*/integer_t vr_shared;
+/*mobject_name_t*/integer_t vr_name;
+ vm_offset_t vr_offset;
+} vm_region_t;
+
+#include <mach/machine/thread_status.h>
+
+#ifdef mips
+typedef struct mips_thread_state mips_thread_state_t;
+#endif /* mips */
+
+#ifdef sun3
+typedef struct sun_thread_state sun3_thread_state_t;
+#endif /* sun3 */
+
+#ifdef sun4
+typedef struct sparc_thread_state sparc_thread_state_t;
+#endif /* sun4 */
+
+#ifdef vax
+typedef struct vax_thread_state vax_thread_state_t;
+#endif /* vax */
+
+#ifdef i386
+typedef struct i386_thread_state i386_thread_state_t;
+#endif /* i386 */
+
+#ifdef alpha
+typedef struct alpha_thread_state alpha_thread_state_t;
+#endif /* alpha */
+
+#ifdef parisc
+typedef struct parisc_thread_state parisc_thread_state_t;
+#endif /* parisc */
+
+typedef int unix_pid_t;
+typedef char *unix_command_t;
+
+#endif /* _MACHID_TYPES_H_ */
diff --git a/include/servers/netname.defs b/include/servers/netname.defs
new file mode 100644
index 00000000..2f7c2c84
--- /dev/null
+++ b/include/servers/netname.defs
@@ -0,0 +1,63 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * File: netname.defs
+ * Author: Dan Julin, Carnegie Mellon University
+ * Date: May 1989
+ *
+ * Mig definitions for Network Name Service.
+ */
+
+subsystem netname 1040;
+
+serverprefix do_;
+
+#include <mach/std_types.defs>
+
+type netname_name_t = (MACH_MSG_TYPE_STRING,8*80);
+
+import <servers/netname_defs.h>;
+
+routine netname_check_in(
+ server_port : mach_port_t;
+ port_name : netname_name_t;
+ signature : mach_port_copy_send_t;
+ port_id : mach_port_make_send_t);
+
+routine netname_look_up(
+ server_port : mach_port_t;
+ host_name : netname_name_t;
+ port_name : netname_name_t;
+ out port_id : mach_port_move_send_t);
+
+routine netname_check_out(
+ server_port : mach_port_t;
+ port_name : netname_name_t;
+ signature : mach_port_copy_send_t);
+
+routine netname_version(
+ server_port : mach_port_t;
+ out version : netname_name_t);
diff --git a/include/servers/netname_defs.h b/include/servers/netname_defs.h
new file mode 100644
index 00000000..a065f3ac
--- /dev/null
+++ b/include/servers/netname_defs.h
@@ -0,0 +1,50 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+/*
+ * File: netname_defs.h
+ * Author: Dan Julin, Carnegie Mellon University
+ * Date: Dec. 1986
+ *
+ * Definitions for the mig interface to the network name service.
+ */
+
+#ifndef _NETNAME_DEFS_
+#define _NETNAME_DEFS_
+
+#define NETNAME_SUCCESS (0)
+#define NETNAME_PENDING (-1)
+#define NETNAME_NOT_YOURS (1000)
+#define NAME_NOT_YOURS (1000)
+#define NETNAME_NOT_CHECKED_IN (1001)
+#define NAME_NOT_CHECKED_IN (1001)
+#define NETNAME_NO_SUCH_HOST (1002)
+#define NETNAME_HOST_NOT_FOUND (1003)
+#define NETNAME_INVALID_PORT (1004)
+
+typedef char netname_name_t[80];
+
+#endif /* _NETNAME_DEFS_ */
diff --git a/ipc/fipc.c b/ipc/fipc.c
new file mode 100644
index 00000000..ebab640c
--- /dev/null
+++ b/ipc/fipc.c
@@ -0,0 +1,795 @@
+/*
+ * Copyright (c) 1996-1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Utah $Hdr: fipc.c 1.1 96/2/29$
+ * Author: Linus Kamb
+ */
+
+#ifdef FIPC
+
+#include <mach/kern_return.h>
+
+#include <device/device_types.h>
+#include <device/device.h>
+#include <device/dev_hdr.h>
+#include <device/device_port.h>
+#include <device/io_req.h>
+#include <device/if_ether.h>
+#include <net_io.h>
+#include <spl.h>
+#include <kern/lock.h>
+
+#include "fipc.h"
+
+void fipc_packet();
+void allocate_fipc_buffers(boolean_t);
+int fipc_lookup(unsigned short port);
+int fipc_lookup_table_enter(unsigned short port);
+int fipc_lookup_table_remove(unsigned short port);
+int f_lookup_hash(unsigned short port);
+int fipc_done(io_req_t ior);
+
+
+/********************************************************************/
+/* fipc variables
+/********************************************************************/
+
+fipc_port_t fports[N_MAX_OPEN_FIPC_PORTS];
+fipc_lookup_table_ent fipc_lookup_table[N_MAX_OPEN_FIPC_PORTS];
+
+int n_free_recv_bufs = 0;
+int n_free_send_bufs = 0;
+int n_fipc_recv_ports_used = 0;
+
+int fipc_sends = 0;
+int fipc_recvs =0;
+
+fipc_stat_t fipc_stats;
+
+char *fipc_recv_free_list = NULL;
+char *fipc_recv_free_list_tail = NULL;
+char *fipc_send_free_list = NULL;
+char *fipc_send_free_list_tail = NULL;
+
+/* fipc locks */
+decl_simple_lock_data(, fipc_lock);
+decl_simple_lock_data(, fipc_buf_q_lock);
+
+
+/*
+ * Routine: fipc_init(): initializes the fipc data structures.
+ */
+
+void fipc_init(void)
+{
+ int i;
+
+ allocate_fipc_buffers(TRUE); /* recv buffers */
+ allocate_fipc_buffers(FALSE); /* send buffers */
+
+ fipc_stats.dropped_msgs = 0;
+
+ bzero (&fports, sizeof(fports));
+ for (i=0; i<N_MAX_OPEN_FIPC_PORTS; i++)
+ {
+ simple_lock_init(&(fports[i].lock));
+ fipc_lookup_table[i].fpt_num = INVALID;
+ fipc_lookup_table[i].fipc_port = INVALID;
+ }
+}
+
+
+/*
+ * Routine: allocate_fipc_buffers(): allocate more buffers
+ * Currently we are only allocating 1500 byte (ETHERMTU) buffers.
+ * We use the first word in the buffer as the pointer to the next.
+ */
+
+void allocate_fipc_buffers(boolean_t r_buf)
+{
+ char *new_pg;
+ char **free_list, **free_list_tail;
+ int *free_count, min_count, max_count;
+ int total_buffer_size;
+
+ if (r_buf)
+ {
+ free_count = &n_free_recv_bufs;
+ min_count = N_MIN_RECV_BUFS;
+ max_count = N_MAX_RECV_BUFS;
+ free_list = &fipc_recv_free_list;
+ free_list_tail = &fipc_recv_free_list_tail;
+ total_buffer_size = (N_MAX_RECV_BUFS * FIPC_BUFFER_SIZE);
+ total_buffer_size = round_page(total_buffer_size);
+ }
+ else
+ {
+ free_count = &n_free_send_bufs;
+ min_count = N_MIN_SEND_BUFS;
+ max_count = N_MAX_SEND_BUFS;
+ free_list = &fipc_send_free_list;
+ free_list_tail = &fipc_send_free_list_tail;
+ total_buffer_size = (N_MAX_SEND_BUFS * FIPC_BUFFER_SIZE);
+ total_buffer_size = round_page(total_buffer_size);
+ }
+
+ if (!(*free_count)) /* empty buffer pool */
+ {
+#ifdef FI_DEBUG
+ printf ("Allocating new fipc ");
+ if (r_buf)
+ printf ("recv buffer pool.\n");
+ else
+ printf ("send buffer pool.\n");
+#endif
+ *free_list = (char*)kalloc (total_buffer_size);
+ if (!*free_list) /* bummer */
+ panic("allocate_fipc_buffers: no memory");
+ *free_list_tail = *free_list;
+ for (*free_count=1; *free_count<max_count; (*free_count)++)
+ {
+ *(char**)*free_list_tail = *free_list_tail + FIPC_BUFFER_SIZE;
+ *free_list_tail += FIPC_BUFFER_SIZE;
+ }
+ *(char**)*free_list_tail = NULL;
+ }
+ else /* Request to grow the buffer pool. */
+ {
+#ifdef FI_DEBUG
+ printf ("Growing fipc ");
+ if (r_buf)
+ printf ("recv buffer pool.\n");
+ else
+ printf ("send buffer pool.\n");
+#endif
+
+#define GROW_SIZE 8192
+ new_pg = (char*)kalloc (round_page(GROW_SIZE));
+ if (new_pg)
+ {
+ int new_cnt, n_new = GROW_SIZE / FIPC_BUFFER_SIZE;
+
+ if (*free_list_tail != NULL)
+ *(char**)*free_list_tail = new_pg;
+ for ( new_cnt =0; new_cnt<n_new; new_cnt++)
+ {
+ *(char**)*free_list_tail = *free_list_tail + FIPC_BUFFER_SIZE;
+ *free_list_tail += FIPC_BUFFER_SIZE;
+ }
+ *(char**)*free_list_tail = NULL;
+ *free_count +=new_cnt;
+ }
+#ifdef FI_DEBUG
+ else
+ printf ("### kalloc failed in allocate_fipc_buffers()\n");
+#endif
+
+ }
+}
+
+
+/*
+ * Routine: get_fipc_buffer (): returns a free buffer
+ * Takes a size (currently not used), a boolean flag to tell if it is a
+ * receive buffer, and a boolean flag if the request is coming at interrupt
+ * level.
+ */
+
+inline
+char* get_fipc_buffer(int size, boolean_t r_buf, boolean_t at_int_lvl)
+{
+ /* we currently don't care about size, since there is only one
+ * buffer pool. */
+
+ char* head;
+ char **free_list;
+ int *free_count, min_count;
+
+ if (r_buf)
+ {
+ free_count = &n_free_recv_bufs;
+ free_list = &fipc_recv_free_list;
+ min_count = N_MIN_RECV_BUFS;
+ }
+ else
+ {
+ free_count = &n_free_send_bufs;
+ free_list = &fipc_send_free_list;
+ min_count = N_MIN_SEND_BUFS;
+ }
+
+ /*
+ * Since we currently allocate a full complement of receive buffers,
+ * there is no need to allocate more receive buffers. But that is likely
+ * to change, I'm sure.
+ */
+
+ if (*free_count < min_count)
+ {
+ if (!at_int_lvl)
+ allocate_fipc_buffers(r_buf);
+ }
+
+ if (*free_count)
+ {
+ head = *free_list;
+ *free_list = *(char**)*free_list;
+ (*free_count)--;
+ return head;
+ }
+ else
+ return NULL;
+}
+
+
+/*
+ * Routine: return_fipc_buffer (): puts a used buffer back in the pool.
+ */
+
+inline
+void return_fipc_buffer(char* buf, int size,
+ boolean_t r_buf, boolean_t at_int_lvl)
+{
+ /* return the buffer to the free pool */
+ char **free_list, **free_list_tail;
+ int *free_count, min_count;
+
+ if (r_buf)
+ {
+ free_count = &n_free_recv_bufs;
+ free_list = &fipc_recv_free_list;
+ free_list_tail = &fipc_recv_free_list_tail;
+ min_count = N_MIN_RECV_BUFS;
+ }
+ else
+ {
+ free_count = &n_free_send_bufs;
+ free_list = &fipc_send_free_list;
+ free_list_tail = &fipc_send_free_list_tail;
+ min_count = N_MIN_SEND_BUFS;
+ }
+
+#ifdef FI_SECURE
+ bzero(buf, FIPC_BUFFER_SIZE);
+#endif
+
+ if (*free_list_tail != NULL)
+ *(char**)*free_list_tail = buf;
+ *free_list_tail = buf;
+ (*free_count)++;
+ *(char**)buf = NULL;
+
+ if (!at_int_lvl)
+ if (*free_count < min_count)
+ allocate_fipc_buffers(r_buf);
+
+ return;
+}
+
+inline
+int f_lookup_hash(unsigned short port)
+{
+ /* Ok, so it's not really a hash function */
+ int bail=0;
+ int chk=0;
+
+ if (n_fipc_recv_ports_used == N_MAX_OPEN_FIPC_PORTS ||
+ port > MAX_FIPC_PORT_NUM)
+ return INVALID;
+
+ while (fipc_lookup_table[chk].fipc_port != port &&
+ fipc_lookup_table[chk].fpt_num != INVALID &&
+ bail < N_MAX_OPEN_FIPC_PORTS)
+ {
+ chk = (chk+1) % N_MAX_OPEN_FIPC_PORTS;
+ bail++;
+ }
+
+ /* This is redundent, but better safe then sorry */
+ if (bail<N_MAX_OPEN_FIPC_PORTS)
+ return chk;
+ else
+ return INVALID;
+}
+
+inline
+int fipc_lookup_table_enter(unsigned short port)
+{
+ int cfpn = n_fipc_recv_ports_used;
+ int lu_tbl_num = f_lookup_hash(port);
+
+ if (lu_tbl_num == INVALID)
+ return INVALID;
+
+ fipc_lookup_table[lu_tbl_num].fipc_port = port;
+ fipc_lookup_table[lu_tbl_num].fpt_num = cfpn;
+ n_fipc_recv_ports_used += 1;
+ return cfpn;
+}
+
+inline
+int fipc_lookup(unsigned short port)
+{
+ int chk = f_lookup_hash(port);
+
+ if (chk == INVALID)
+ return INVALID;
+
+ if (fipc_lookup_table[chk].fpt_num == INVALID)
+ return fipc_lookup_table_enter(port);
+ else
+ return fipc_lookup_table[chk].fpt_num;
+}
+
+inline
+int fipc_lookup_table_remove(unsigned short port)
+{
+ int chk = f_lookup_hash(port);
+
+ if (chk == INVALID)
+ return 0;
+
+ if (fipc_lookup_table[chk].fipc_port == port)
+ {
+ fports[fipc_lookup_table[chk].fpt_num].valid_msg = 0;
+ fports[fipc_lookup_table[chk].fpt_num].bound = FALSE;
+ fipc_lookup_table[chk].fpt_num = INVALID;
+ fipc_lookup_table[chk].fipc_port = INVALID;
+ n_fipc_recv_ports_used -=1;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Routine: fipc_packet(): handles incoming fipc packets.
+ * does some simple packet handling and wakes up receiving thread, if any.
+ * called by device controller (currently, nerecv only.)
+ * called at interrupt level and splimp.
+ * Messages are dropped if the recv queue is full.
+ */
+
+ void fipc_packet( char* msg_buf, struct ether_header sender)
+ {
+ int to_port = ((fipc_header_t*)msg_buf)->dest_port;
+ int from_port = ((fipc_header_t*)msg_buf)->send_port;
+ int f_tbl_num;
+ fipc_port_t *cfp;
+ fipc_buffer_q_ent *crqe;
+ int *tail;
+
+#ifdef FI_DEBUG
+ printf ("fipc_packet :(0x%x) %s", msg_buf,
+ msg_buf+sizeof(fipc_header_t));
+#endif
+
+ f_tbl_num = fipc_lookup(to_port);
+ if (f_tbl_num == INVALID)
+ {
+#ifdef FI_DEBUG
+ printf ("Lookup failed.\n");
+#endif
+ fipc_stats.dropped_msgs += 1;
+ return_fipc_buffer (msg_buf, FIPC_BUFFER_SIZE, TRUE, TRUE);
+ return;
+ }
+
+ cfp = &fports[f_tbl_num];
+ tail = &cfp->rq_tail;
+ crqe = &cfp->recv_q[*tail];
+
+ if (cfp->valid_msg == FIPC_RECV_Q_SIZE)
+ {
+ /* Queue full.
+ * Drop packet, return buffer, and return. */
+#ifdef FI_DEBUG
+ printf ("Port %d queue is full: valid_msg count: %d\n",
+ to_port, cfp->valid_msg);
+#endif
+ fipc_stats.dropped_msgs += 1;
+ return_fipc_buffer (msg_buf, FIPC_BUFFER_SIZE, TRUE, TRUE);
+ return;
+ }
+
+ /* "enqueue" at "tail" */
+ crqe->buffer = msg_buf;
+ crqe->size = ((fipc_header_t*)msg_buf)->msg_size;
+ /* This could certainly be done faster... */
+ bcopy(&(sender.ether_shost), &(crqe->sender.hwaddr), ETHER_HWADDR_SIZE);
+ /* This is actually useless, since there _is_ no sender port.. duh. */
+ crqe->sender.port = from_port;
+
+ *tail = ((*tail)+1) % FIPC_RECV_Q_SIZE;
+
+ if (cfp->bound)
+ thread_wakeup(&(cfp->valid_msg));
+ cfp->valid_msg++;
+#ifdef FI_DEBUG
+ printf ("valid_msg: %d\n", cfp->valid_msg);
+#endif
+
+ return;
+ }
+
+
+/*
+ * loopback(): for fipc_sends to the local host.
+ */
+
+inline
+kern_return_t loopback(char *packet)
+{
+ fipc_packet(packet+sizeof(struct ether_header),
+ *(struct ether_header*)packet);
+ return KERN_SUCCESS;
+}
+
+
+/********************************************************************/
+/* Routine: fipc_send
+/********************************************************************/
+
+kern_return_t syscall_fipc_send(fipc_endpoint_t dest,
+ char *user_buffer, int len)
+{
+#ifdef i386
+ static mach_device_t eth_device = 0;
+#else
+ static device_t eth_device = 0;
+#endif
+ static unsigned char hwaddr[ETHER_HWADDR_SIZE+2];
+
+ io_return_t rc;
+ kern_return_t open_res, kr;
+ dev_mode_t mode = D_WRITE;
+ /* register */ io_req_t ior = NULL;
+ struct ether_header *ehdr;
+ fipc_header_t *fhdr;
+ int *d_addr;
+ int data_count;
+ char *fipc_buf, *data_buffer;
+#ifdef FIPC_LOOPBACK
+ boolean_t local_send = FALSE;
+#endif
+
+#ifdef FI_DEBUG
+ printf("fipc_send(dest: %s, port:%d, len:%d, buf:x%x) !!!\n",
+ ether_sprintf(dest.hwaddr), dest.port, len, user_buffer);
+#endif
+
+ if (dest.port > MAX_FIPC_PORT_NUM ||
+ len > FIPC_MSG_SIZE)
+ {
+#ifdef FI_DEBUG
+ printf ("len: %d, dest.port: %u\n", len, dest.port);
+#endif
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /* We should only need to probe the device once. */
+
+ if (!eth_device)
+ {
+ unsigned char net_hwaddr[ETHER_HWADDR_SIZE+2];
+ int stat_count = sizeof(net_hwaddr)/sizeof(int);
+
+ /* XXX Automatic lookup for ne0 or ne1 was failing... */
+ eth_device = device_lookup(ETHER_DEVICE_NAME);
+#ifdef i386
+ if (eth_device == (mach_device_t) DEVICE_NULL ||
+ eth_device == (mach_device_t)D_NO_SUCH_DEVICE)
+#else
+ if (eth_device == DEVICE_NULL ||
+ eth_device == (device_t)D_NO_SUCH_DEVICE)
+#endif
+ {
+#ifdef FI_DEBUG
+ printf ("FIPC: Couldn't find ethernet device %s.\n",
+ ETHER_DEVICE_NAME);
+#endif
+ return (KERN_FAILURE);
+ }
+
+ /* The device should be open! */
+ if (eth_device->state != DEV_STATE_OPEN)
+ {
+#ifdef FI_DEBUG
+ printf ("Opening ethernet device.\n");
+#endif
+
+ io_req_alloc (ior, 0);
+
+ io_req_alloc (ior, 0);
+
+ ior->io_device = eth_device;
+ ior->io_unit = eth_device->dev_number;
+ ior->io_op = IO_OPEN | IO_CALL;
+ ior->io_mode = mode;
+ ior->io_error = 0;
+ ior->io_done = 0;
+ ior->io_reply_port = MACH_PORT_NULL;
+ ior->io_reply_port_type = 0;
+
+ /* open the device */
+ open_res =
+ (*eth_device->dev_ops->d_open)
+ (eth_device->dev_number,
+ (int)mode, ior);
+ if (ior->io_error != D_SUCCESS)
+ {
+#ifdef FI_DEBUG
+ printf ("Failed to open device ne0\n");
+#endif
+ return open_res;
+ }
+ }
+#ifdef i386
+ rc = mach_device_get_status(eth_device, NET_ADDRESS,
+ net_hwaddr, &stat_count);
+#else
+ rc = ds_device_get_status(eth_device, NET_ADDRESS, net_hwaddr,
+ &stat_count);
+#endif
+ if (rc != D_SUCCESS)
+ {
+#ifdef FI_DEBUG
+ printf("FIPC: Couldn't determine hardware ethernet address: %d\n",
+ rc);
+#endif
+ return KERN_FAILURE;
+ }
+ *(int*)hwaddr = ntohl(*(int*)net_hwaddr);
+ *(int*)(hwaddr+4) = ntohl(*(int*)(net_hwaddr+4));
+#ifdef FI_DEBUG
+ printf ("host: %s\n", ether_sprintf(hwaddr));
+#endif
+ }
+
+#ifdef FIPC_LOOPBACK
+ if (!memcmp(dest.hwaddr, hwaddr, ETHER_HWADDR_SIZE))
+/*
+ if ((*(int*)dest.hwaddr == *(int*)hwaddr) &&
+ ((*(int*)(((char*)dest.hwaddr+4) >> 16)) ==
+ ((*(int*)(((char*)hwaddr+4) >> 16)))))
+*/
+ {
+ local_send = TRUE;
+#ifdef FI_DEBUG
+ printf ("loopback: \n");
+ printf ("host: %s, ", ether_sprintf(hwaddr));
+ printf ("dest: %s\n", ether_sprintf(dest.hwaddr));
+#endif
+ }
+#endif
+
+ data_count = len + sizeof (struct ether_header)
+ + sizeof (fipc_header_t);
+
+#ifdef FIPC_LOOPBACK
+ fipc_buf = get_fipc_buffer(data_count, local_send, FALSE) ;
+#else
+ fipc_buf = get_fipc_buffer(data_count, FALSE, FALSE) ;
+#endif
+
+ if (fipc_buf == NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ ehdr = (struct ether_header *)fipc_buf;
+ d_addr = (int *)ehdr->ether_dhost;
+
+ *(int *)ehdr->ether_dhost = *(int*)dest.hwaddr;
+ *(int *)(ehdr->ether_dhost+4) = *(int*)(dest.hwaddr+4);
+ *(int *)ehdr->ether_shost = *(int *)hwaddr;
+ *(int *)(ehdr->ether_shost+4) = *(int *)(hwaddr+4);
+ ehdr->ether_type = 0x1234; /* Yep. */
+
+#ifdef FIPC_LOOPBACK
+ if (!local_send)
+ {
+#endif
+ if (!ior)
+ io_req_alloc (ior, 0);
+
+ /* Set up the device information. */
+ ior->io_device = eth_device;
+ ior->io_unit = eth_device->dev_number;
+ ior->io_op = IO_WRITE | IO_INBAND | IO_INTERNAL;
+ ior->io_mode = D_WRITE;
+ ior->io_recnum = 0;
+ ior->io_data = fipc_buf;
+ ior->io_count = data_count;
+ ior->io_total = data_count;
+ ior->io_alloc_size = 0;
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = fipc_done;
+ ior->io_reply_port = MACH_PORT_NULL;
+ ior->io_reply_port_type = 0;
+ ior->io_copy = VM_MAP_COPY_NULL;
+#ifdef FIPC_LOOPBACK
+ }
+#endif
+
+#ifdef FI_DEBUG
+ printf("sending from %s ", ether_sprintf(ehdr->ether_shost));
+ printf("to %s, type x%x, user_port x%x\n",
+ ether_sprintf(ehdr->ether_dhost),
+ (int)ehdr->ether_type,
+ (int)dest.port);
+#endif
+
+ if (len <= FIPC_MSG_SIZE)
+ {
+ fhdr = (fipc_header_t*)(fipc_buf+sizeof(struct ether_header));
+ fhdr->dest_port = dest.port;
+ fhdr->msg_size = len;
+ data_buffer = (char*)fhdr+sizeof(fipc_header_t);
+
+ copyin (user_buffer, data_buffer,
+ min (FIPC_BUFFER_SIZE-sizeof(fipc_header_t), len));
+
+#ifdef FIPC_LOOPBACK
+ /*
+ * Sending to same node. Queue on dest.port of this node.
+ * We just call fipc_packet after setting up the necessary info
+ * and return. fipc_packet queues the packet on the receive
+ * queue for the destination port.
+ */
+ if (local_send)
+ return (loopback(fipc_buf));
+#endif
+
+ /* Now write to the device */
+ /* d_port_death has been co-opted for fipc stuff.
+ * It maps to nefoutput(). */
+
+ rc = (*eth_device->dev_ops->d_port_death) /* that's the one */
+ (eth_device->dev_number, ior);
+ }
+#ifdef FI_DEBUG
+ else /* len > ETHERMTU: multi-packet request */
+ printf ("### multi-packet messages are not supported.\n");
+#endif
+
+ if (rc == D_IO_QUEUED)
+ return KERN_SUCCESS;
+ else
+ return KERN_FAILURE;
+}
+
+#ifdef FIPC_LOOPBACK
+ if (!local_send)
+ {
+#endif
+ if (!ior)
+ io_req_alloc (ior, 0);
+
+ /* Set up the device information. */
+ ior->io_device = eth_device;
+ ior->io_unit = eth_device->dev_number;
+ ior->io_op = IO_WRITE | IO_INBAND | IO_INTERNAL;
+ ior->io_mode = D_WRITE;
+ ior->io_recnum = 0;
+ ior->io_data = fipc_buf;
+ ior->io_count = data_count;
+ ior->io_total = data_count;
+ ior->io_alloc_size = 0;
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = fipc_done;
+ ior->io_reply_port = MACH_PORT_NULL;
+ ior->io_reply_port_type = 0;
+ ior->io_copy = VM_MAP_COPY_NULL;
+#ifdef FIPC_LOOPBACK
+ }
+#endif
+
+/********************************************************************
+/* syscall_fipc_recv()
+/*
+/********************************************************************/
+
+kern_return_t syscall_fipc_recv(unsigned short user_port,
+ char *user_buffer, int *user_size, fipc_endpoint_t *user_sender)
+{
+ char* f_buffer;
+ fipc_port_t *cfp;
+ fipc_buffer_q_ent *crqe;
+ int *head;
+ int msg_size;
+ int fport_num = fipc_lookup(user_port);
+ spl_t spl;
+
+#ifdef FI_DEBUG
+ printf("fipc_recv(0x%x, 0x%x) !!!\n", user_port, user_buffer);
+#endif
+
+ if (user_port > MAX_FIPC_PORT_NUM)
+ {
+#ifdef FI_DEBUG
+ printf ("Invalid FIPC port: %u\n", user_port);
+#endif
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (fport_num == INVALID)
+ return KERN_RESOURCE_SHORTAGE;
+
+ cfp = &fports[fport_num];
+ head = &cfp->rq_head;
+ crqe = &cfp->recv_q[*head];
+
+ if (cfp->bound != FALSE)
+ {
+#ifdef FI_DEBUG
+ printf ("FIPC Port %u is currently bound.\n", user_port);
+#endif
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ copyin(user_size, &msg_size, sizeof(int));
+
+ spl = splimp();
+
+ cfp->bound = TRUE;
+ while (!(cfp->valid_msg))
+ {
+ assert_wait(&(cfp->valid_msg), TRUE);
+ splx(spl);
+ thread_block ((void(*)())0);
+ if (current_thread()->wait_result != THREAD_AWAKENED)
+ {
+ cfp->bound = FALSE;
+ return KERN_FAILURE;
+ }
+ spl = splimp();
+ }
+
+ cfp->valid_msg--;
+ f_buffer = crqe->buffer;
+ msg_size = min (crqe->size, msg_size);
+
+ crqe->buffer = NULL;
+ crqe->size = 0;
+ *head = ((*head)+1) % FIPC_RECV_Q_SIZE;
+ cfp->bound = FALSE;
+
+ splx(spl);
+
+ copyout(f_buffer+sizeof(fipc_header_t), user_buffer, msg_size);
+ copyout(&(crqe->sender), user_sender, sizeof(fipc_endpoint_t));
+ copyout(&msg_size, user_size, sizeof(msg_size));
+
+ return_fipc_buffer(f_buffer, FIPC_BUFFER_SIZE, TRUE, FALSE);
+
+ return KERN_SUCCESS;
+}
+
+
+/*
+ * Final clean-up after the packet has been sent off.
+ */
+int fipc_done(io_req_t ior)
+{
+ return_fipc_buffer(ior->io_data, FIPC_BUFFER_SIZE, FALSE, FALSE);
+
+ return 1;
+}
+
+#endif /* FIPC */
diff --git a/ipc/fipc.h b/ipc/fipc.h
new file mode 100644
index 00000000..2b545c4a
--- /dev/null
+++ b/ipc/fipc.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 1996-1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Utah $Hdr: fipc.h 1.1 96/2/29$
+ * Author: Linus Kamb
+ */
+
+#include <kern/lock.h>
+#include <device/if_ether.h>
+
+
+#define N_MAX_OPEN_FIPC_PORTS 32 /* In practice,
+ * this should be much larger */
+#define MAX_FIPC_PORT_NUM 4095 /* ditto */
+
+#define FIPC_MSG_TYPE 0x1234
+
+#define FIPC_BUFFER_SIZE ETHERMTU
+#define FIPC_MSG_SIZE (FIPC_BUFFER_SIZE - sizeof(fipc_header_t))
+
+#define FIPC_RECV_Q_SIZE 4
+#define N_MIN_RECV_BUFS 5 /* 2 pages worth */
+#define N_MAX_RECV_BUFS (N_MAX_OPEN_FIPC_PORTS * FIPC_RECV_Q_SIZE)
+#define N_MIN_SEND_BUFS 2
+#define N_MAX_SEND_BUFS 5
+
+#define INVALID -1
+
+#define ETHER_HWADDR_SIZE 6
+#define ETHER_DEVICE_NAME "ne0"
+
+typedef struct fipc_endpoint_structure
+{
+ unsigned char hwaddr[ETHER_HWADDR_SIZE];
+ unsigned short port;
+} fipc_endpoint_t;
+
+typedef struct fipc_buffer_structure
+{
+ char *buffer;
+ unsigned short size;
+ fipc_endpoint_t sender;
+} fipc_buffer_q_ent;
+
+typedef struct fipc_port_structure
+{
+ simple_lock_data_t lock;
+ boolean_t bound;
+ int valid_msg;
+ fipc_buffer_q_ent recv_q[FIPC_RECV_Q_SIZE];
+ int rq_head, rq_tail;
+} fipc_port_t;
+
+typedef struct fipc_header_structure
+{
+ unsigned short dest_port;
+ unsigned short send_port;
+ unsigned int msg_size;
+} fipc_header_t;
+
+typedef struct fipc_lookup_table_ent_structure
+{
+ int fipc_port;
+ int fpt_num; /* f_ports[] entry number */
+} fipc_lookup_table_ent;
+
+typedef struct fipc_stat_structure
+{
+ int dropped_msgs;
+} fipc_stat_t;
+
+#define min(a,b) (((a)<=(b)?(a):(b)))
+
+char* get_fipc_buffer(int, boolean_t, boolean_t);
+void fipc_packet(char*, struct ether_header);
+
+extern int fipc_sends;
+extern int fipc_recvs;
+
diff --git a/ipc/ipc_entry.c b/ipc/ipc_entry.c
new file mode 100644
index 00000000..305c98ec
--- /dev/null
+++ b/ipc/ipc_entry.c
@@ -0,0 +1,858 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ *
+/*
+ * File: ipc/ipc_entry.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Primitive functions to manipulate translation entries.
+ */
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <kern/assert.h>
+#include <kern/sched_prim.h>
+#include <kern/zalloc.h>
+#include <ipc/port.h>
+#include <ipc/ipc_types.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_splay.h>
+#include <ipc/ipc_hash.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_object.h>
+
+zone_t ipc_tree_entry_zone;
+
+/*
+ * Routine: ipc_entry_tree_collision
+ * Purpose:
+ * Checks if "name" collides with an allocated name
+ * in the space's tree. That is, returns TRUE
+ * if the splay tree contains a name with the same
+ * index as "name".
+ * Conditions:
+ * The space is locked (read or write) and active.
+ */
+
+boolean_t
+ipc_entry_tree_collision(
+ ipc_space_t space,
+ mach_port_t name)
+{
+ mach_port_index_t index;
+ mach_port_t lower, upper;
+
+ assert(space->is_active);
+
+ /*
+ * Check if we collide with the next smaller name
+ * or the next larger name.
+ */
+
+ ipc_splay_tree_bounds(&space->is_tree, name, &lower, &upper);
+
+ index = MACH_PORT_INDEX(name);
+ return (((lower != ~0) && (MACH_PORT_INDEX(lower) == index)) ||
+ ((upper != 0) && (MACH_PORT_INDEX(upper) == index)));
+}
+
+/*
+ * Routine: ipc_entry_lookup
+ * Purpose:
+ * Searches for an entry, given its name.
+ * Conditions:
+ * The space must be read or write locked throughout.
+ * The space must be active.
+ */
+
+ipc_entry_t
+ipc_entry_lookup(space, name)
+ ipc_space_t space;
+ mach_port_t name;
+{
+ mach_port_index_t index;
+ ipc_entry_t entry;
+
+ assert(space->is_active);
+
+ index = MACH_PORT_INDEX(name);
+ if (index < space->is_table_size) {
+ entry = &space->is_table[index];
+ if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name))
+ if (entry->ie_bits & IE_BITS_COLLISION) {
+ assert(space->is_tree_total > 0);
+ goto tree_lookup;
+ } else
+ entry = IE_NULL;
+ else if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
+ entry = IE_NULL;
+ } else if (space->is_tree_total == 0)
+ entry = IE_NULL;
+ else
+ tree_lookup:
+ entry = (ipc_entry_t)
+ ipc_splay_tree_lookup(&space->is_tree, name);
+
+ assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
+ return entry;
+}
+
+/*
+ * Routine: ipc_entry_get
+ * Purpose:
+ * Tries to allocate an entry out of the space.
+ * Conditions:
+ * The space is write-locked and active throughout.
+ * An object may be locked. Will not allocate memory.
+ * Returns:
+ * KERN_SUCCESS A free entry was found.
+ * KERN_NO_SPACE No entry allocated.
+ */
+
+kern_return_t
+ipc_entry_get(space, namep, entryp)
+ ipc_space_t space;
+ mach_port_t *namep;
+ ipc_entry_t *entryp;
+{
+ ipc_entry_t table;
+ mach_port_index_t first_free;
+ mach_port_t new_name;
+ ipc_entry_t free_entry;
+
+ assert(space->is_active);
+
+ table = space->is_table;
+ first_free = table->ie_next;
+
+ if (first_free == 0)
+ return KERN_NO_SPACE;
+
+ free_entry = &table[first_free];
+ table->ie_next = free_entry->ie_next;
+
+ /*
+ * Initialize the new entry. We need only
+ * increment the generation number and clear ie_request.
+ */
+
+ {
+ mach_port_gen_t gen;
+
+ assert((free_entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
+ gen = free_entry->ie_bits + IE_BITS_GEN_ONE;
+ free_entry->ie_bits = gen;
+ free_entry->ie_request = 0;
+ new_name = MACH_PORT_MAKE(first_free, gen);
+ }
+
+ /*
+ * The new name can't be MACH_PORT_NULL because index
+ * is non-zero. It can't be MACH_PORT_DEAD because
+ * the table isn't allowed to grow big enough.
+ * (See comment in ipc/ipc_table.h.)
+ */
+
+ assert(MACH_PORT_VALID(new_name));
+ assert(free_entry->ie_object == IO_NULL);
+
+ *namep = new_name;
+ *entryp = free_entry;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_entry_alloc
+ * Purpose:
+ * Allocate an entry out of the space.
+ * Conditions:
+ * The space is not locked before, but it is write-locked after
+ * if the call is successful. May allocate memory.
+ * Returns:
+ * KERN_SUCCESS An entry was allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NO_SPACE No room for an entry in the space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory for an entry.
+ */
+
+kern_return_t
+ipc_entry_alloc(
+ ipc_space_t space,
+ mach_port_t *namep,
+ ipc_entry_t *entryp)
+{
+ kern_return_t kr;
+
+ is_write_lock(space);
+
+ for (;;) {
+ if (!space->is_active) {
+ is_write_unlock(space);
+ return KERN_INVALID_TASK;
+ }
+
+ kr = ipc_entry_get(space, namep, entryp);
+ if (kr == KERN_SUCCESS)
+ return kr;
+
+ kr = ipc_entry_grow_table(space);
+ if (kr != KERN_SUCCESS)
+ return kr; /* space is unlocked */
+ }
+}
+
+/*
+ * Routine: ipc_entry_alloc_name
+ * Purpose:
+ * Allocates/finds an entry with a specific name.
+ * If an existing entry is returned, its type will be nonzero.
+ * Conditions:
+ * The space is not locked before, but it is write-locked after
+ * if the call is successful. May allocate memory.
+ * Returns:
+ * KERN_SUCCESS Found existing entry with same name.
+ * KERN_SUCCESS Allocated a new entry.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_entry_alloc_name(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t *entryp)
+{
+ mach_port_index_t index = MACH_PORT_INDEX(name);
+ mach_port_gen_t gen = MACH_PORT_GEN(name);
+ ipc_tree_entry_t tree_entry = ITE_NULL;
+
+ assert(MACH_PORT_VALID(name));
+
+
+ is_write_lock(space);
+
+ for (;;) {
+ ipc_entry_t entry;
+ ipc_tree_entry_t tentry;
+ ipc_table_size_t its;
+
+ if (!space->is_active) {
+ is_write_unlock(space);
+ if (tree_entry) ite_free(tree_entry);
+ return KERN_INVALID_TASK;
+ }
+
+ /*
+ * If we are under the table cutoff,
+ * there are three cases:
+ * 1) The entry is inuse, for the same name
+ * 2) The entry is inuse, for a different name
+ * 3) The entry is free
+ */
+
+ if ((0 < index) && (index < space->is_table_size)) {
+ ipc_entry_t table = space->is_table;
+
+ entry = &table[index];
+
+ if (IE_BITS_TYPE(entry->ie_bits)) {
+ if (IE_BITS_GEN(entry->ie_bits) == gen) {
+ *entryp = entry;
+ if (tree_entry) ite_free(tree_entry);
+ return KERN_SUCCESS;
+ }
+ } else {
+ mach_port_index_t free_index, next_index;
+
+ /*
+ * Rip the entry out of the free list.
+ */
+
+ for (free_index = 0;
+ (next_index = table[free_index].ie_next)
+ != index;
+ free_index = next_index)
+ continue;
+
+ table[free_index].ie_next =
+ table[next_index].ie_next;
+
+ entry->ie_bits = gen;
+ assert(entry->ie_object == IO_NULL);
+ entry->ie_request = 0;
+
+ *entryp = entry;
+ if (tree_entry) ite_free(tree_entry);
+ return KERN_SUCCESS;
+ }
+ }
+
+ /*
+ * Before trying to allocate any memory,
+ * check if the entry already exists in the tree.
+ * This avoids spurious resource errors.
+ * The splay tree makes a subsequent lookup/insert
+ * of the same name cheap, so this costs little.
+ */
+
+ if ((space->is_tree_total > 0) &&
+ ((tentry = ipc_splay_tree_lookup(&space->is_tree, name))
+ != ITE_NULL)) {
+ assert(tentry->ite_space == space);
+ assert(IE_BITS_TYPE(tentry->ite_bits));
+
+ *entryp = &tentry->ite_entry;
+ if (tree_entry) ite_free(tree_entry);
+ return KERN_SUCCESS;
+ }
+
+ its = space->is_table_next;
+
+ /*
+ * Check if the table should be grown.
+ *
+ * Note that if space->is_table_size == its->its_size,
+ * then we won't ever try to grow the table.
+ *
+ * Note that we are optimistically assuming that name
+ * doesn't collide with any existing names. (So if
+ * it were entered into the tree, is_tree_small would
+ * be incremented.) This is OK, because even in that
+ * case, we don't lose memory by growing the table.
+ */
+
+ if ((space->is_table_size <= index) &&
+ (index < its->its_size) &&
+ (((its->its_size - space->is_table_size) *
+ sizeof(struct ipc_entry)) <
+ ((space->is_tree_small + 1) *
+ sizeof(struct ipc_tree_entry)))) {
+ kern_return_t kr;
+
+ /*
+ * Can save space by growing the table.
+ * Because the space will be unlocked,
+ * we must restart.
+ */
+
+ kr = ipc_entry_grow_table(space);
+ assert(kr != KERN_NO_SPACE);
+ if (kr != KERN_SUCCESS) {
+ /* space is unlocked */
+ if (tree_entry) ite_free(tree_entry);
+ return kr;
+ }
+
+ continue;
+ }
+
+ /*
+ * If a splay-tree entry was allocated previously,
+ * go ahead and insert it into the tree.
+ */
+
+ if (tree_entry != ITE_NULL) {
+ space->is_tree_total++;
+
+ if (index < space->is_table_size)
+ space->is_table[index].ie_bits |=
+ IE_BITS_COLLISION;
+ else if ((index < its->its_size) &&
+ !ipc_entry_tree_collision(space, name))
+ space->is_tree_small++;
+
+ ipc_splay_tree_insert(&space->is_tree,
+ name, tree_entry);
+
+ tree_entry->ite_bits = 0;
+ tree_entry->ite_object = IO_NULL;
+ tree_entry->ite_request = 0;
+ tree_entry->ite_space = space;
+ *entryp = &tree_entry->ite_entry;
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * Allocate a tree entry and try again.
+ */
+
+ is_write_unlock(space);
+ tree_entry = ite_alloc();
+ if (tree_entry == ITE_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+ is_write_lock(space);
+ }
+}
+
+/*
+ * Routine: ipc_entry_dealloc
+ * Purpose:
+ * Deallocates an entry from a space.
+ * Conditions:
+ * The space must be write-locked throughout.
+ * The space must be active.
+ */
+
+void
+ipc_entry_dealloc(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry)
+{
+ ipc_entry_t table;
+ ipc_entry_num_t size;
+ mach_port_index_t index;
+
+ assert(space->is_active);
+ assert(entry->ie_object == IO_NULL);
+ assert(entry->ie_request == 0);
+
+ index = MACH_PORT_INDEX(name);
+ table = space->is_table;
+ size = space->is_table_size;
+
+ if ((index < size) && (entry == &table[index])) {
+ assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
+
+ if (entry->ie_bits & IE_BITS_COLLISION) {
+ struct ipc_splay_tree small, collisions;
+ ipc_tree_entry_t tentry;
+ mach_port_t tname;
+ boolean_t pick;
+ ipc_entry_bits_t bits;
+ ipc_object_t obj;
+
+ /* must move an entry from tree to table */
+
+ ipc_splay_tree_split(&space->is_tree,
+ MACH_PORT_MAKE(index+1, 0),
+ &collisions);
+ ipc_splay_tree_split(&collisions,
+ MACH_PORT_MAKE(index, 0),
+ &small);
+
+ pick = ipc_splay_tree_pick(&collisions,
+ &tname, &tentry);
+ assert(pick);
+ assert(MACH_PORT_INDEX(tname) == index);
+
+ bits = tentry->ite_bits;
+ entry->ie_bits = bits | MACH_PORT_GEN(tname);
+ entry->ie_object = obj = tentry->ite_object;
+ entry->ie_request = tentry->ite_request;
+ assert(tentry->ite_space == space);
+
+ if (IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND) {
+ ipc_hash_global_delete(space, obj,
+ tname, tentry);
+ ipc_hash_local_insert(space, obj,
+ index, entry);
+ }
+
+ ipc_splay_tree_delete(&collisions, tname, tentry);
+
+ assert(space->is_tree_total > 0);
+ space->is_tree_total--;
+
+ /* check if collision bit should still be on */
+
+ pick = ipc_splay_tree_pick(&collisions,
+ &tname, &tentry);
+ if (pick) {
+ entry->ie_bits |= IE_BITS_COLLISION;
+ ipc_splay_tree_join(&space->is_tree,
+ &collisions);
+ }
+
+ ipc_splay_tree_join(&space->is_tree, &small);
+ } else {
+ entry->ie_bits &= IE_BITS_GEN_MASK;
+ entry->ie_next = table->ie_next;
+ table->ie_next = index;
+ }
+ } else {
+ ipc_tree_entry_t tentry = (ipc_tree_entry_t) entry;
+
+ assert(tentry->ite_space == space);
+
+ ipc_splay_tree_delete(&space->is_tree, name, tentry);
+
+ assert(space->is_tree_total > 0);
+ space->is_tree_total--;
+
+ if (index < size) {
+ ipc_entry_t ientry = &table[index];
+
+ assert(ientry->ie_bits & IE_BITS_COLLISION);
+
+ if (!ipc_entry_tree_collision(space, name))
+ ientry->ie_bits &= ~IE_BITS_COLLISION;
+ } else if ((index < space->is_table_next->its_size) &&
+ !ipc_entry_tree_collision(space, name)) {
+ assert(space->is_tree_small > 0);
+ space->is_tree_small--;
+ }
+ }
+}
+
+/*
+ * Routine: ipc_entry_grow_table
+ * Purpose:
+ * Grows the table in a space.
+ * Conditions:
+ * The space must be write-locked and active before.
+ * If successful, it is also returned locked.
+ * Allocates memory.
+ * Returns:
+ * KERN_SUCCESS Grew the table.
+ * KERN_SUCCESS Somebody else grew the table.
+ * KERN_SUCCESS The space died.
+ * KERN_NO_SPACE Table has maximum size already.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table.
+ */
+
+kern_return_t
+ipc_entry_grow_table(space)
+ ipc_space_t space;
+{
+ ipc_entry_num_t osize, size, nsize;
+
+ do {
+ ipc_entry_t otable, table;
+ ipc_table_size_t oits, its, nits;
+ mach_port_index_t i, free_index;
+
+ assert(space->is_active);
+
+ if (space->is_growing) {
+ /*
+ * Somebody else is growing the table.
+ * We just wait for them to finish.
+ */
+
+ assert_wait((event_t) space, FALSE);
+ is_write_unlock(space);
+ thread_block((void (*)()) 0);
+ is_write_lock(space);
+ return KERN_SUCCESS;
+ }
+
+ otable = space->is_table;
+ its = space->is_table_next;
+ size = its->its_size;
+ oits = its - 1;
+ osize = oits->its_size;
+ nits = its + 1;
+ nsize = nits->its_size;
+
+ if (osize == size) {
+ is_write_unlock(space);
+ return KERN_NO_SPACE;
+ }
+
+ assert((osize < size) && (size <= nsize));
+
+ /*
+ * OK, we'll attempt to grow the table.
+ * The realloc requires that the old table
+ * remain in existence.
+ */
+
+ space->is_growing = TRUE;
+ is_write_unlock(space);
+ if (it_entries_reallocable(oits))
+ table = it_entries_realloc(oits, otable, its);
+ else
+ table = it_entries_alloc(its);
+ is_write_lock(space);
+ space->is_growing = FALSE;
+
+ /*
+ * We need to do a wakeup on the space,
+ * to rouse waiting threads. We defer
+ * this until the space is unlocked,
+ * because we don't want them to spin.
+ */
+
+ if (table == IE_NULL) {
+ is_write_unlock(space);
+ thread_wakeup((event_t) space);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ if (!space->is_active) {
+ /*
+ * The space died while it was unlocked.
+ */
+
+ is_write_unlock(space);
+ thread_wakeup((event_t) space);
+ it_entries_free(its, table);
+ is_write_lock(space);
+ return KERN_SUCCESS;
+ }
+
+ assert(space->is_table == otable);
+ assert(space->is_table_next == its);
+ assert(space->is_table_size == osize);
+
+ space->is_table = table;
+ space->is_table_size = size;
+ space->is_table_next = nits;
+
+ /*
+ * If we did a realloc, it remapped the data.
+ * Otherwise we copy by hand first. Then we have
+ * to clear the index fields in the old part and
+ * zero the new part.
+ */
+
+ if (!it_entries_reallocable(oits))
+ (void) memcpy((void *) table, (const void *) otable,
+ osize * sizeof(struct ipc_entry));
+
+ for (i = 0; i < osize; i++)
+ table[i].ie_index = 0;
+
+ (void) memset((void *) (table + osize), 0,
+ (size - osize) * sizeof(struct ipc_entry));
+
+ /*
+ * Put old entries into the reverse hash table.
+ */
+
+ for (i = 0; i < osize; i++) {
+ ipc_entry_t entry = &table[i];
+
+ if (IE_BITS_TYPE(entry->ie_bits) ==
+ MACH_PORT_TYPE_SEND)
+ ipc_hash_local_insert(space, entry->ie_object,
+ i, entry);
+ }
+
+ /*
+ * If there are entries in the splay tree,
+ * then we have work to do:
+ * 1) transfer entries to the table
+ * 2) update is_tree_small
+ */
+
+ if (space->is_tree_total > 0) {
+ mach_port_index_t index;
+ boolean_t delete;
+ struct ipc_splay_tree ignore;
+ struct ipc_splay_tree move;
+ struct ipc_splay_tree small;
+ ipc_entry_num_t nosmall;
+ ipc_tree_entry_t tentry;
+
+ /*
+ * The splay tree divides into four regions,
+ * based on the index of the entries:
+ * 1) 0 <= index < osize
+ * 2) osize <= index < size
+ * 3) size <= index < nsize
+ * 4) nsize <= index
+ *
+ * Entries in the first part are ignored.
+ * Entries in the second part, that don't
+ * collide, are moved into the table.
+ * Entries in the third part, that don't
+ * collide, are counted for is_tree_small.
+ * Entries in the fourth part are ignored.
+ */
+
+ ipc_splay_tree_split(&space->is_tree,
+ MACH_PORT_MAKE(nsize, 0),
+ &small);
+ ipc_splay_tree_split(&small,
+ MACH_PORT_MAKE(size, 0),
+ &move);
+ ipc_splay_tree_split(&move,
+ MACH_PORT_MAKE(osize, 0),
+ &ignore);
+
+ /* move entries into the table */
+
+ for (tentry = ipc_splay_traverse_start(&move);
+ tentry != ITE_NULL;
+ tentry = ipc_splay_traverse_next(&move, delete)) {
+ mach_port_t name;
+ mach_port_gen_t gen;
+ mach_port_type_t type;
+ ipc_entry_bits_t bits;
+ ipc_object_t obj;
+ ipc_entry_t entry;
+
+ name = tentry->ite_name;
+ gen = MACH_PORT_GEN(name);
+ index = MACH_PORT_INDEX(name);
+
+ assert(tentry->ite_space == space);
+ assert((osize <= index) && (index < size));
+
+ entry = &table[index];
+
+ /* collision with previously moved entry? */
+
+ bits = entry->ie_bits;
+ if (bits != 0) {
+ assert(IE_BITS_TYPE(bits));
+ assert(IE_BITS_GEN(bits) != gen);
+
+ entry->ie_bits =
+ bits | IE_BITS_COLLISION;
+ delete = FALSE;
+ continue;
+ }
+
+ bits = tentry->ite_bits;
+ type = IE_BITS_TYPE(bits);
+ assert(type != MACH_PORT_TYPE_NONE);
+
+ entry->ie_bits = bits | gen;
+ entry->ie_object = obj = tentry->ite_object;
+ entry->ie_request = tentry->ite_request;
+
+ if (type == MACH_PORT_TYPE_SEND) {
+ ipc_hash_global_delete(space, obj,
+ name, tentry);
+ ipc_hash_local_insert(space, obj,
+ index, entry);
+ }
+
+ space->is_tree_total--;
+ delete = TRUE;
+ }
+ ipc_splay_traverse_finish(&move);
+
+ /* count entries for is_tree_small */
+
+ nosmall = 0; index = 0;
+ for (tentry = ipc_splay_traverse_start(&small);
+ tentry != ITE_NULL;
+ tentry = ipc_splay_traverse_next(&small, FALSE)) {
+ mach_port_index_t nindex;
+
+ nindex = MACH_PORT_INDEX(tentry->ite_name);
+
+ if (nindex != index) {
+ nosmall++;
+ index = nindex;
+ }
+ }
+ ipc_splay_traverse_finish(&small);
+
+ assert(nosmall <= (nsize - size));
+ assert(nosmall <= space->is_tree_total);
+ space->is_tree_small = nosmall;
+
+ /* put the splay tree back together */
+
+ ipc_splay_tree_join(&space->is_tree, &small);
+ ipc_splay_tree_join(&space->is_tree, &move);
+ ipc_splay_tree_join(&space->is_tree, &ignore);
+ }
+
+ /*
+ * Add entries in the new part which still aren't used
+ * to the free list. Add them in reverse order,
+ * and set the generation number to -1, so that
+ * early allocations produce "natural" names.
+ */
+
+ free_index = table[0].ie_next;
+ for (i = size-1; i >= osize; --i) {
+ ipc_entry_t entry = &table[i];
+
+ if (entry->ie_bits == 0) {
+ entry->ie_bits = IE_BITS_GEN_MASK;
+ entry->ie_next = free_index;
+ free_index = i;
+ }
+ }
+ table[0].ie_next = free_index;
+
+ /*
+ * Now we need to free the old table.
+ * If the space dies or grows while unlocked,
+ * then we can quit here.
+ */
+
+ is_write_unlock(space);
+ thread_wakeup((event_t) space);
+ it_entries_free(oits, otable);
+ is_write_lock(space);
+ if (!space->is_active || (space->is_table_next != nits))
+ return KERN_SUCCESS;
+
+ /*
+ * We might have moved enough entries from
+ * the splay tree into the table that
+ * the table can be profitably grown again.
+ *
+ * Note that if size == nsize, then
+ * space->is_tree_small == 0.
+ */
+ } while ((space->is_tree_small > 0) &&
+ (((nsize - size) * sizeof(struct ipc_entry)) <
+ (space->is_tree_small * sizeof(struct ipc_tree_entry))));
+
+ return KERN_SUCCESS;
+}
+
+
+#if MACH_KDB
+#include <ddb/db_output.h>
+#define printf kdbprintf
+
+ipc_entry_t db_ipc_object_by_name(
+ task_t task,
+ mach_port_t name);
+
+
+ipc_entry_t
+db_ipc_object_by_name(
+ task_t task,
+ mach_port_t name)
+{
+ ipc_space_t space = task->itk_space;
+ ipc_entry_t entry;
+
+
+ entry = ipc_entry_lookup(space, name);
+ if(entry != IE_NULL) {
+ iprintf("(task 0x%x, name 0x%x) ==> object 0x%x",
+ entry->ie_object);
+ return (ipc_entry_t) entry->ie_object;
+ }
+ return entry;
+}
+#endif /* MACH_KDB */
diff --git a/ipc/ipc_entry.h b/ipc/ipc_entry.h
new file mode 100644
index 00000000..ea0c0a24
--- /dev/null
+++ b/ipc/ipc_entry.h
@@ -0,0 +1,158 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_entry.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for translation entries, which represent
+ * tasks' capabilities for ports and port sets.
+ */
+
+#ifndef _IPC_IPC_ENTRY_H_
+#define _IPC_IPC_ENTRY_H_
+
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <kern/zalloc.h>
+#include <ipc/port.h>
+#include <ipc/ipc_table.h>
+
+/*
+ * Spaces hold capabilities for ipc_object_t's (ports and port sets).
+ * Each ipc_entry_t records a capability. Most capabilities have
+ * small names, and the entries are elements of a table.
+ * Capabilities can have large names, and a splay tree holds
+ * those entries. The cutoff point between the table and the tree
+ * is adjusted dynamically to minimize memory consumption.
+ *
+ * The ie_index field of entries in the table implements
+ * a ordered hash table with open addressing and linear probing.
+ * This hash table converts (space, object) -> name.
+ * It is used independently of the other fields.
+ *
+ * Free (unallocated) entries in the table have null ie_object
+ * fields. The ie_bits field is zero except for IE_BITS_GEN.
+ * The ie_next (ie_request) field links free entries into a free list.
+ *
+ * The first entry in the table (index 0) is always free.
+ * It is used as the head of the free list.
+ */
+
+typedef unsigned int ipc_entry_bits_t;
+typedef ipc_table_elems_t ipc_entry_num_t; /* number of entries */
+
+typedef struct ipc_entry {
+ ipc_entry_bits_t ie_bits;
+ struct ipc_object *ie_object;
+ union {
+ mach_port_index_t next;
+ /*XXX ipc_port_request_index_t request;*/
+ unsigned int request;
+ } index;
+ union {
+ mach_port_index_t table;
+ struct ipc_tree_entry *tree;
+ } hash;
+} *ipc_entry_t;
+
+#define IE_NULL ((ipc_entry_t) 0)
+
+#define ie_request index.request
+#define ie_next index.next
+#define ie_index hash.table
+
+#define IE_BITS_UREFS_MASK 0x0000ffff /* 16 bits of user-reference */
+#define IE_BITS_UREFS(bits) ((bits) & IE_BITS_UREFS_MASK)
+
+#define IE_BITS_TYPE_MASK 0x001f0000 /* 5 bits of capability type */
+#define IE_BITS_TYPE(bits) ((bits) & IE_BITS_TYPE_MASK)
+
+#define IE_BITS_MAREQUEST 0x00200000 /* 1 bit for msg-accepted */
+
+#define IE_BITS_COMPAT 0x00400000 /* 1 bit for compatibility */
+
+#define IE_BITS_COLLISION 0x00800000 /* 1 bit for collisions */
+#define IE_BITS_RIGHT_MASK 0x007fffff /* relevant to the right */
+
+#if PORT_GENERATIONS
+#define IE_BITS_GEN_MASK 0xff000000U /* 8 bits for generation */
+#define IE_BITS_GEN(bits) ((bits) & IE_BITS_GEN_MASK)
+#define IE_BITS_GEN_ONE 0x01000000 /* low bit of generation */
+#else
+#define IE_BITS_GEN_MASK 0
+#define IE_BITS_GEN(bits) 0
+#define IE_BITS_GEN_ONE 0
+#endif
+
+
+typedef struct ipc_tree_entry {
+ struct ipc_entry ite_entry;
+ mach_port_t ite_name;
+ struct ipc_space *ite_space;
+ struct ipc_tree_entry *ite_lchild;
+ struct ipc_tree_entry *ite_rchild;
+} *ipc_tree_entry_t;
+
+#define ITE_NULL ((ipc_tree_entry_t) 0)
+
+#define ite_bits ite_entry.ie_bits
+#define ite_object ite_entry.ie_object
+#define ite_request ite_entry.ie_request
+#define ite_next ite_entry.hash.tree
+
+extern zone_t ipc_tree_entry_zone;
+
+#define ite_alloc() ((ipc_tree_entry_t) zalloc(ipc_tree_entry_zone))
+#define ite_free(ite) zfree(ipc_tree_entry_zone, (vm_offset_t) (ite))
+
+
+extern ipc_entry_t
+ipc_entry_lookup(/* ipc_space_t space, mach_port_t name */);
+
+extern kern_return_t
+ipc_entry_get(/* ipc_space_t space,
+ mach_port_t *namep, ipc_entry_t *entryp */);
+
+extern kern_return_t
+ipc_entry_alloc(/* ipc_space_t space,
+ mach_port_t *namep, ipc_entry_t *entryp */);
+
+extern kern_return_t
+ipc_entry_alloc_name(/* ipc_space_t space, mach_port_t name,
+ ipc_entry_t *entryp */);
+
+extern void
+ipc_entry_dealloc(/* ipc_space_t space, mach_port_t name,
+ ipc_entry_t entry */);
+
+extern kern_return_t
+ipc_entry_grow_table(/* ipc_space_t space */);
+
+#endif _IPC_IPC_ENTRY_H_
diff --git a/ipc/ipc_hash.c b/ipc/ipc_hash.c
new file mode 100644
index 00000000..50024b5f
--- /dev/null
+++ b/ipc/ipc_hash.c
@@ -0,0 +1,626 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_hash.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Entry hash table operations.
+ */
+
+#include <mach/boolean.h>
+#include <mach/port.h>
+#include <kern/lock.h>
+#include <kern/kalloc.h>
+#include <ipc/port.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_hash.h>
+#include <ipc/ipc_init.h>
+#include <ipc/ipc_types.h>
+
+#include <mach_ipc_debug.h>
+#if MACH_IPC_DEBUG
+#include <mach/kern_return.h>
+#include <mach_debug/hash_info.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+#endif
+
+
+
+/*
+ * Routine: ipc_hash_lookup
+ * Purpose:
+ * Converts (space, obj) -> (name, entry).
+ * Returns TRUE if an entry was found.
+ * Conditions:
+ * The space must be locked (read or write) throughout.
+ */
+
+boolean_t
+ipc_hash_lookup(space, obj, namep, entryp)
+ ipc_space_t space;
+ ipc_object_t obj;
+ mach_port_t *namep;
+ ipc_entry_t *entryp;
+{
+ return (ipc_hash_local_lookup(space, obj, namep, entryp) ||
+ ((space->is_tree_hash > 0) &&
+ ipc_hash_global_lookup(space, obj, namep,
+ (ipc_tree_entry_t *) entryp)));
+}
+
+/*
+ * Routine: ipc_hash_insert
+ * Purpose:
+ * Inserts an entry into the appropriate reverse hash table,
+ * so that ipc_hash_lookup will find it.
+ * Conditions:
+ * The space must be write-locked.
+ */
+
+void
+ipc_hash_insert(
+ ipc_space_t space,
+ ipc_object_t obj,
+ mach_port_t name,
+ ipc_entry_t entry)
+{
+ mach_port_index_t index;
+
+ index = MACH_PORT_INDEX(name);
+ if ((index < space->is_table_size) &&
+ (entry == &space->is_table[index]))
+ ipc_hash_local_insert(space, obj, index, entry);
+ else
+ ipc_hash_global_insert(space, obj, name,
+ (ipc_tree_entry_t) entry);
+}
+
+/*
+ * Routine: ipc_hash_delete
+ * Purpose:
+ * Deletes an entry from the appropriate reverse hash table.
+ * Conditions:
+ * The space must be write-locked.
+ */
+
+void
+ipc_hash_delete(
+ ipc_space_t space,
+ ipc_object_t obj,
+ mach_port_t name,
+ ipc_entry_t entry)
+{
+ mach_port_index_t index;
+
+ index = MACH_PORT_INDEX(name);
+ if ((index < space->is_table_size) &&
+ (entry == &space->is_table[index]))
+ ipc_hash_local_delete(space, obj, index, entry);
+ else
+ ipc_hash_global_delete(space, obj, name,
+ (ipc_tree_entry_t) entry);
+}
+
+/*
+ * The global reverse hash table holds splay tree entries.
+ * It is a simple open-chaining hash table with singly-linked buckets.
+ * Each bucket is locked separately, with an exclusive lock.
+ * Within each bucket, move-to-front is used.
+ */
+
+typedef natural_t ipc_hash_index_t;
+
+ipc_hash_index_t ipc_hash_global_size;
+ipc_hash_index_t ipc_hash_global_mask;
+
+#define IH_GLOBAL_HASH(space, obj) \
+ (((((ipc_hash_index_t) ((vm_offset_t)space)) >> 4) + \
+ (((ipc_hash_index_t) ((vm_offset_t)obj)) >> 6)) & \
+ ipc_hash_global_mask)
+
+typedef struct ipc_hash_global_bucket {
+ decl_simple_lock_data(, ihgb_lock_data)
+ ipc_tree_entry_t ihgb_head;
+} *ipc_hash_global_bucket_t;
+
+#define IHGB_NULL ((ipc_hash_global_bucket_t) 0)
+
+#define ihgb_lock_init(ihgb) simple_lock_init(&(ihgb)->ihgb_lock_data)
+#define ihgb_lock(ihgb) simple_lock(&(ihgb)->ihgb_lock_data)
+#define ihgb_unlock(ihgb) simple_unlock(&(ihgb)->ihgb_lock_data)
+
+ipc_hash_global_bucket_t ipc_hash_global_table;
+
+/*
+ * Routine: ipc_hash_global_lookup
+ * Purpose:
+ * Converts (space, obj) -> (name, entry).
+ * Looks in the global table, for splay tree entries.
+ * Returns TRUE if an entry was found.
+ * Conditions:
+ * The space must be locked (read or write) throughout.
+ */
+
+boolean_t
+ipc_hash_global_lookup(
+ ipc_space_t space,
+ ipc_object_t obj,
+ mach_port_t *namep,
+ ipc_tree_entry_t *entryp)
+{
+ ipc_hash_global_bucket_t bucket;
+ ipc_tree_entry_t this, *last;
+
+ assert(space != IS_NULL);
+ assert(obj != IO_NULL);
+
+ bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
+ ihgb_lock(bucket);
+
+ if ((this = bucket->ihgb_head) != ITE_NULL) {
+ if ((this->ite_object == obj) &&
+ (this->ite_space == space)) {
+ /* found it at front; no need to move */
+
+ *namep = this->ite_name;
+ *entryp = this;
+ } else for (last = &this->ite_next;
+ (this = *last) != ITE_NULL;
+ last = &this->ite_next) {
+ if ((this->ite_object == obj) &&
+ (this->ite_space == space)) {
+ /* found it; move to front */
+
+ *last = this->ite_next;
+ this->ite_next = bucket->ihgb_head;
+ bucket->ihgb_head = this;
+
+ *namep = this->ite_name;
+ *entryp = this;
+ break;
+ }
+ }
+ }
+
+ ihgb_unlock(bucket);
+ return this != ITE_NULL;
+}
+
+/*
+ * Routine: ipc_hash_global_insert
+ * Purpose:
+ * Inserts an entry into the global reverse hash table.
+ * Conditions:
+ * The space must be write-locked.
+ */
+
+void
+ipc_hash_global_insert(
+ ipc_space_t space,
+ ipc_object_t obj,
+ mach_port_t name,
+ ipc_tree_entry_t entry)
+{
+ ipc_hash_global_bucket_t bucket;
+
+
+ assert(entry->ite_name == name);
+ assert(space != IS_NULL);
+ assert(entry->ite_space == space);
+ assert(obj != IO_NULL);
+ assert(entry->ite_object == obj);
+
+ space->is_tree_hash++;
+ assert(space->is_tree_hash <= space->is_tree_total);
+
+ bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
+ ihgb_lock(bucket);
+
+ /* insert at front of bucket */
+
+ entry->ite_next = bucket->ihgb_head;
+ bucket->ihgb_head = entry;
+
+ ihgb_unlock(bucket);
+}
+
+/*
+ * Routine: ipc_hash_global_delete
+ * Purpose:
+ * Deletes an entry from the global reverse hash table.
+ * Conditions:
+ * The space must be write-locked.
+ */
+
+void
+ipc_hash_global_delete(
+ ipc_space_t space,
+ ipc_object_t obj,
+ mach_port_t name,
+ ipc_tree_entry_t entry)
+{
+ ipc_hash_global_bucket_t bucket;
+ ipc_tree_entry_t this, *last;
+
+ assert(entry->ite_name == name);
+ assert(space != IS_NULL);
+ assert(entry->ite_space == space);
+ assert(obj != IO_NULL);
+ assert(entry->ite_object == obj);
+
+ assert(space->is_tree_hash > 0);
+ space->is_tree_hash--;
+
+ bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
+ ihgb_lock(bucket);
+
+ for (last = &bucket->ihgb_head;
+ (this = *last) != ITE_NULL;
+ last = &this->ite_next) {
+ if (this == entry) {
+ /* found it; remove from bucket */
+
+ *last = this->ite_next;
+ break;
+ }
+ }
+ assert(this != ITE_NULL);
+
+ ihgb_unlock(bucket);
+}
+
+/*
+ * Each space has a local reverse hash table, which holds
+ * entries from the space's table. In fact, the hash table
+ * just uses a field (ie_index) in the table itself.
+ *
+ * The local hash table is an open-addressing hash table,
+ * which means that when a collision occurs, instead of
+ * throwing the entry into a bucket, the entry is rehashed
+ * to another position in the table. In this case the rehash
+ * is very simple: linear probing (ie, just increment the position).
+ * This simple rehash makes deletions tractable (they're still a pain),
+ * but it means that collisions tend to build up into clumps.
+ *
+ * Because at least one entry in the table (index 0) is always unused,
+ * there will always be room in the reverse hash table. If a table
+ * with n slots gets completely full, the reverse hash table will
+ * have one giant clump of n-1 slots and one free slot somewhere.
+ * Because entries are only entered into the reverse table if they
+ * are pure send rights (not receive, send-once, port-set,
+ * or dead-name rights), and free entries of course aren't entered,
+ * I expect the reverse hash table won't get unreasonably full.
+ *
+ * Ordered hash tables (Amble & Knuth, Computer Journal, v. 17, no. 2,
+ * pp. 135-142.) may be desirable here. They can dramatically help
+ * unsuccessful lookups. But unsuccessful lookups are almost always
+ * followed by insertions, and those slow down somewhat. They
+ * also can help deletions somewhat. Successful lookups aren't affected.
+ * So possibly a small win; probably nothing significant.
+ */
+
+#define IH_LOCAL_HASH(obj, size) \
+ ((((mach_port_index_t) (obj)) >> 6) % (size))
+
+/*
+ * Routine: ipc_hash_local_lookup
+ * Purpose:
+ * Converts (space, obj) -> (name, entry).
+ * Looks in the space's local table, for table entries.
+ * Returns TRUE if an entry was found.
+ * Conditions:
+ * The space must be locked (read or write) throughout.
+ */
+
+boolean_t
+ipc_hash_local_lookup(
+ ipc_space_t space,
+ ipc_object_t obj,
+ mach_port_t *namep,
+ ipc_entry_t *entryp)
+{
+ ipc_entry_t table;
+ ipc_entry_num_t size;
+ mach_port_index_t hindex, index;
+
+ assert(space != IS_NULL);
+ assert(obj != IO_NULL);
+
+ table = space->is_table;
+ size = space->is_table_size;
+ hindex = IH_LOCAL_HASH(obj, size);
+
+ /*
+ * Ideally, table[hindex].ie_index is the name we want.
+ * However, must check ie_object to verify this,
+ * because collisions can happen. In case of a collision,
+ * search farther along in the clump.
+ */
+
+ while ((index = table[hindex].ie_index) != 0) {
+ ipc_entry_t entry = &table[index];
+
+ if (entry->ie_object == obj) {
+ *namep = MACH_PORT_MAKEB(index, entry->ie_bits);
+ *entryp = entry;
+ return TRUE;
+ }
+
+ if (++hindex == size)
+ hindex = 0;
+ }
+
+ return FALSE;
+}
+
+/*
+ * Routine: ipc_hash_local_insert
+ * Purpose:
+ * Inserts an entry into the space's reverse hash table.
+ * Conditions:
+ * The space must be write-locked.
+ */
+
+void
+ipc_hash_local_insert(
+ ipc_space_t space,
+ ipc_object_t obj,
+ mach_port_index_t index,
+ ipc_entry_t entry)
+{
+ ipc_entry_t table;
+ ipc_entry_num_t size;
+ mach_port_index_t hindex;
+
+ assert(index != 0);
+ assert(space != IS_NULL);
+ assert(obj != IO_NULL);
+
+ table = space->is_table;
+ size = space->is_table_size;
+ hindex = IH_LOCAL_HASH(obj, size);
+
+ assert(entry == &table[index]);
+ assert(entry->ie_object == obj);
+
+ /*
+ * We want to insert at hindex, but there may be collisions.
+ * If a collision occurs, search for the end of the clump
+ * and insert there.
+ */
+
+ while (table[hindex].ie_index != 0) {
+ if (++hindex == size)
+ hindex = 0;
+ }
+
+ table[hindex].ie_index = index;
+}
+
+/*
+ * Routine: ipc_hash_local_delete
+ * Purpose:
+ * Deletes an entry from the space's reverse hash table.
+ * Conditions:
+ * The space must be write-locked.
+ */
+
+void
+ipc_hash_local_delete(
+ ipc_space_t space,
+ ipc_object_t obj,
+ mach_port_index_t index,
+ ipc_entry_t entry)
+{
+ ipc_entry_t table;
+ ipc_entry_num_t size;
+ mach_port_index_t hindex, dindex;
+
+ assert(index != MACH_PORT_NULL);
+ assert(space != IS_NULL);
+ assert(obj != IO_NULL);
+
+ table = space->is_table;
+ size = space->is_table_size;
+ hindex = IH_LOCAL_HASH(obj, size);
+
+ assert(entry == &table[index]);
+ assert(entry->ie_object == obj);
+
+ /*
+ * First check we have the right hindex for this index.
+ * In case of collision, we have to search farther
+ * along in this clump.
+ */
+
+ while (table[hindex].ie_index != index) {
+ if (table[hindex].ie_index == 0)
+ {
+ static int gak = 0;
+ if (gak == 0)
+ {
+ printf("gak! entry wasn't in hash table!\n");
+ gak = 1;
+ }
+ return;
+ }
+ if (++hindex == size)
+ hindex = 0;
+ }
+
+ /*
+ * Now we want to set table[hindex].ie_index = 0.
+ * But if we aren't the last index in a clump,
+ * this might cause problems for lookups of objects
+ * farther along in the clump that are displaced
+ * due to collisions. Searches for them would fail
+ * at hindex instead of succeeding.
+ *
+ * So we must check the clump after hindex for objects
+ * that are so displaced, and move one up to the new hole.
+ *
+ * hindex - index of new hole in the clump
+ * dindex - index we are checking for a displaced object
+ *
+ * When we move a displaced object up into the hole,
+ * it creates a new hole, and we have to repeat the process
+ * until we get to the end of the clump.
+ */
+
+ for (dindex = hindex; index != 0; hindex = dindex) {
+ for (;;) {
+ mach_port_index_t tindex;
+ ipc_object_t tobj;
+
+ if (++dindex == size)
+ dindex = 0;
+ assert(dindex != hindex);
+
+ /* are we at the end of the clump? */
+
+ index = table[dindex].ie_index;
+ if (index == 0)
+ break;
+
+ /* is this a displaced object? */
+
+ tobj = table[index].ie_object;
+ assert(tobj != IO_NULL);
+ tindex = IH_LOCAL_HASH(tobj, size);
+
+ if ((dindex < hindex) ?
+ ((dindex < tindex) && (tindex <= hindex)) :
+ ((dindex < tindex) || (tindex <= hindex)))
+ break;
+ }
+
+ table[hindex].ie_index = index;
+ }
+}
+
+/*
+ * Routine: ipc_hash_init
+ * Purpose:
+ * Initialize the reverse hash table implementation.
+ */
+
+void
+ipc_hash_init(void)
+{
+ ipc_hash_index_t i;
+
+ /* if not configured, initialize ipc_hash_global_size */
+
+ if (ipc_hash_global_size == 0) {
+ ipc_hash_global_size = ipc_tree_entry_max >> 8;
+ if (ipc_hash_global_size < 32)
+ ipc_hash_global_size = 32;
+ }
+
+ /* make sure it is a power of two */
+
+ ipc_hash_global_mask = ipc_hash_global_size - 1;
+ if ((ipc_hash_global_size & ipc_hash_global_mask) != 0) {
+ natural_t bit;
+
+ /* round up to closest power of two */
+
+ for (bit = 1;; bit <<= 1) {
+ ipc_hash_global_mask |= bit;
+ ipc_hash_global_size = ipc_hash_global_mask + 1;
+
+ if ((ipc_hash_global_size & ipc_hash_global_mask) == 0)
+ break;
+ }
+ }
+
+ /* allocate ipc_hash_global_table */
+
+ ipc_hash_global_table = (ipc_hash_global_bucket_t)
+ kalloc((vm_size_t) (ipc_hash_global_size *
+ sizeof(struct ipc_hash_global_bucket)));
+ assert(ipc_hash_global_table != IHGB_NULL);
+
+ /* and initialize it */
+
+ for (i = 0; i < ipc_hash_global_size; i++) {
+ ipc_hash_global_bucket_t bucket;
+
+ bucket = &ipc_hash_global_table[i];
+ ihgb_lock_init(bucket);
+ bucket->ihgb_head = ITE_NULL;
+ }
+}
+
+#if MACH_IPC_DEBUG
+
+/*
+ * Routine: ipc_hash_info
+ * Purpose:
+ * Return information about the global reverse hash table.
+ * Fills the buffer with as much information as possible
+ * and returns the desired size of the buffer.
+ * Conditions:
+ * Nothing locked. The caller should provide
+ * possibly-pageable memory.
+ */
+
+
+ipc_hash_index_t
+ipc_hash_info(
+ hash_info_bucket_t *info,
+ mach_msg_type_number_t count)
+{
+ ipc_hash_index_t i;
+
+ if (ipc_hash_global_size < count)
+ count = ipc_hash_global_size;
+
+ for (i = 0; i < count; i++) {
+ ipc_hash_global_bucket_t bucket = &ipc_hash_global_table[i];
+ unsigned int bucket_count = 0;
+ ipc_tree_entry_t entry;
+
+ ihgb_lock(bucket);
+ for (entry = bucket->ihgb_head;
+ entry != ITE_NULL;
+ entry = entry->ite_next)
+ bucket_count++;
+ ihgb_unlock(bucket);
+
+ /* don't touch pageable memory while holding locks */
+ info[i].hib_count = bucket_count;
+ }
+
+ return ipc_hash_global_size;
+}
+
+#endif /* MACH_IPC_DEBUG */
diff --git a/ipc/ipc_hash.h b/ipc/ipc_hash.h
new file mode 100644
index 00000000..f4c2f55f
--- /dev/null
+++ b/ipc/ipc_hash.h
@@ -0,0 +1,94 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_hash.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations of entry hash table operations.
+ */
+
+#ifndef _IPC_IPC_HASH_H_
+#define _IPC_IPC_HASH_H_
+
+#include <mach_ipc_debug.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+
+extern void
+ipc_hash_init();
+
+#if MACH_IPC_DEBUG
+
+extern unsigned int
+ipc_hash_info(/* hash_info_bucket_t *, unsigned int */);
+
+#endif MACH_IPC_DEBUG
+
+extern boolean_t
+ipc_hash_lookup(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_t *namep, ipc_entry_t *entryp */);
+
+extern void
+ipc_hash_insert(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_t name, ipc_entry_t entry */);
+
+extern void
+ipc_hash_delete(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_t name, ipc_entry_t entry */);
+
+/*
+ * For use by functions that know what they're doing:
+ * the global primitives, for splay tree entries,
+ * and the local primitives, for table entries.
+ */
+
+extern boolean_t
+ipc_hash_global_lookup(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_t *namep, ipc_tree_entry_t *entryp */);
+
+extern void
+ipc_hash_global_insert(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_t name, ipc_tree_entry_t entry */);
+
+extern void
+ipc_hash_global_delete(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_t name, ipc_tree_entry_t entry */);
+
+extern boolean_t
+ipc_hash_local_lookup(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_t *namep, ipc_entry_t *entryp */);
+
+extern void
+ipc_hash_local_insert(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_index_t index, ipc_entry_t entry */);
+
+extern void
+ipc_hash_local_delete(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_index_t index, ipc_entry_t entry */);
+
+#endif _IPC_IPC_HASH_H_
diff --git a/ipc/ipc_init.c b/ipc/ipc_init.c
new file mode 100644
index 00000000..29b08190
--- /dev/null
+++ b/ipc/ipc_init.c
@@ -0,0 +1,139 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_init.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to initialize the IPC system.
+ */
+
+#include <mach/kern_return.h>
+#include <kern/mach_param.h>
+#include <kern/ipc_host.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_hash.h>
+#include <ipc/ipc_init.h>
+
+
+
+vm_map_t ipc_kernel_map;
+vm_size_t ipc_kernel_map_size = 1024 * 1024;
+
+int ipc_space_max = SPACE_MAX;
+int ipc_tree_entry_max = ITE_MAX;
+int ipc_port_max = PORT_MAX;
+int ipc_pset_max = SET_MAX;
+
+/*
+ * Routine: ipc_bootstrap
+ * Purpose:
+ * Initialization needed before the kernel task
+ * can be created.
+ */
+
+void
+ipc_bootstrap(void)
+{
+ kern_return_t kr;
+
+ ipc_port_multiple_lock_init();
+
+ ipc_port_timestamp_lock_init();
+ ipc_port_timestamp_data = 0;
+
+ ipc_space_zone = zinit(sizeof(struct ipc_space),
+ ipc_space_max * sizeof(struct ipc_space),
+ sizeof(struct ipc_space),
+ IPC_ZONE_TYPE, "ipc spaces");
+
+ ipc_tree_entry_zone =
+ zinit(sizeof(struct ipc_tree_entry),
+ ipc_tree_entry_max * sizeof(struct ipc_tree_entry),
+ sizeof(struct ipc_tree_entry),
+ IPC_ZONE_TYPE, "ipc tree entries");
+
+ ipc_object_zones[IOT_PORT] =
+ zinit(sizeof(struct ipc_port),
+ ipc_port_max * sizeof(struct ipc_port),
+ sizeof(struct ipc_port),
+ ZONE_EXHAUSTIBLE, "ipc ports");
+
+ ipc_object_zones[IOT_PORT_SET] =
+ zinit(sizeof(struct ipc_pset),
+ ipc_pset_max * sizeof(struct ipc_pset),
+ sizeof(struct ipc_pset),
+ IPC_ZONE_TYPE, "ipc port sets");
+
+ /* create special spaces */
+
+ kr = ipc_space_create_special(&ipc_space_kernel);
+ assert(kr == KERN_SUCCESS);
+
+ kr = ipc_space_create_special(&ipc_space_reply);
+ assert(kr == KERN_SUCCESS);
+
+#if NORMA_IPC
+ kr = ipc_space_create_special(&ipc_space_remote);
+ assert(kr == KERN_SUCCESS);
+#endif NORMA_IPC
+
+ /* initialize modules with hidden data structures */
+
+ ipc_table_init();
+ ipc_notify_init();
+ ipc_hash_init();
+ ipc_marequest_init();
+}
+
+/*
+ * Routine: ipc_init
+ * Purpose:
+ * Final initialization of the IPC system.
+ */
+
+void
+ipc_init()
+{
+ vm_offset_t min, max;
+
+ ipc_kernel_map = kmem_suballoc(kernel_map, &min, &max,
+ ipc_kernel_map_size, TRUE);
+
+ ipc_host_init();
+}
diff --git a/ipc/ipc_init.h b/ipc/ipc_init.h
new file mode 100644
index 00000000..b2f1dd4b
--- /dev/null
+++ b/ipc/ipc_init.h
@@ -0,0 +1,58 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_init.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations of functions to initialize the IPC system.
+ */
+
+#ifndef _IPC_IPC_INIT_H_
+#define _IPC_IPC_INIT_H_
+
+/* all IPC zones should be exhaustible */
+#define IPC_ZONE_TYPE ZONE_EXHAUSTIBLE
+
+extern int ipc_space_max;
+extern int ipc_tree_entry_max;
+extern int ipc_port_max;
+extern int ipc_pset_max;
+
+/*
+ * Exported interfaces
+ */
+
+/* IPC initialization needed before creation of kernel task */
+extern void ipc_bootstrap(void);
+
+/* Remaining IPC initialization */
+extern void ipc_init(void);
+
+#endif /* _IPC_IPC_INIT_H_ */
diff --git a/ipc/ipc_kmsg.c b/ipc/ipc_kmsg.c
new file mode 100644
index 00000000..d860fd18
--- /dev/null
+++ b/ipc/ipc_kmsg.c
@@ -0,0 +1,3484 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_kmsg.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Operations on kernel messages.
+ */
+
+#include <cpus.h>
+#include <mach_ipc_compat.h>
+#include <norma_ipc.h>
+#include <norma_vm.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/message.h>
+#include <mach/port.h>
+#include <kern/assert.h>
+#include <kern/kalloc.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_kern.h>
+#include <ipc/port.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_thread.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_right.h>
+
+#include <ipc/ipc_machdep.h>
+
+extern int copyinmap();
+extern int copyoutmap();
+void ipc_msg_print(); /* forward */
+
+#define is_misaligned(x) ( ((vm_offset_t)(x)) & (sizeof(vm_offset_t)-1) )
+#define ptr_align(x) \
+ ( ( ((vm_offset_t)(x)) + (sizeof(vm_offset_t)-1) ) & ~(sizeof(vm_offset_t)-1) )
+
+ipc_kmsg_t ipc_kmsg_cache[NCPUS];
+
+/*
+ * Routine: ipc_kmsg_enqueue
+ * Purpose:
+ * Enqueue a kmsg.
+ */
+
+void
+ipc_kmsg_enqueue(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg)
+{
+ ipc_kmsg_enqueue_macro(queue, kmsg);
+}
+
+/*
+ * Routine: ipc_kmsg_dequeue
+ * Purpose:
+ * Dequeue and return a kmsg.
+ */
+
+ipc_kmsg_t
+ipc_kmsg_dequeue(
+ ipc_kmsg_queue_t queue)
+{
+ ipc_kmsg_t first;
+
+ first = ipc_kmsg_queue_first(queue);
+
+ if (first != IKM_NULL)
+ ipc_kmsg_rmqueue_first_macro(queue, first);
+
+ return first;
+}
+
+/*
+ * Routine: ipc_kmsg_rmqueue
+ * Purpose:
+ * Pull a kmsg out of a queue.
+ */
+
+void
+ipc_kmsg_rmqueue(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg)
+{
+ ipc_kmsg_t next, prev;
+
+ assert(queue->ikmq_base != IKM_NULL);
+
+ next = kmsg->ikm_next;
+ prev = kmsg->ikm_prev;
+
+ if (next == kmsg) {
+ assert(prev == kmsg);
+ assert(queue->ikmq_base == kmsg);
+
+ queue->ikmq_base = IKM_NULL;
+ } else {
+ if (queue->ikmq_base == kmsg)
+ queue->ikmq_base = next;
+
+ next->ikm_prev = prev;
+ prev->ikm_next = next;
+ }
+ /* XXX Temporary debug logic */
+ kmsg->ikm_next = IKM_BOGUS;
+ kmsg->ikm_prev = IKM_BOGUS;
+}
+
+/*
+ * Routine: ipc_kmsg_queue_next
+ * Purpose:
+ * Return the kmsg following the given kmsg.
+ * (Or IKM_NULL if it is the last one in the queue.)
+ */
+
+ipc_kmsg_t
+ipc_kmsg_queue_next(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg)
+{
+ ipc_kmsg_t next;
+
+ assert(queue->ikmq_base != IKM_NULL);
+
+ next = kmsg->ikm_next;
+ if (queue->ikmq_base == next)
+ next = IKM_NULL;
+
+ return next;
+}
+
+/*
+ * Routine: ipc_kmsg_destroy
+ * Purpose:
+ * Destroys a kernel message. Releases all rights,
+ * references, and memory held by the message.
+ * Frees the message.
+ * Conditions:
+ * No locks held.
+ */
+
+void
+ipc_kmsg_destroy(
+ ipc_kmsg_t kmsg)
+{
+ ipc_kmsg_queue_t queue;
+ boolean_t empty;
+
+ /*
+ * ipc_kmsg_clean can cause more messages to be destroyed.
+ * Curtail recursion by queueing messages. If a message
+ * is already queued, then this is a recursive call.
+ */
+
+ queue = &current_thread()->ith_messages;
+ empty = ipc_kmsg_queue_empty(queue);
+ ipc_kmsg_enqueue(queue, kmsg);
+
+ if (empty) {
+ /* must leave kmsg in queue while cleaning it */
+
+ while ((kmsg = ipc_kmsg_queue_first(queue)) != IKM_NULL) {
+ ipc_kmsg_clean(kmsg);
+ ipc_kmsg_rmqueue(queue, kmsg);
+ ikm_free(kmsg);
+ }
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_clean_body
+ * Purpose:
+ * Cleans the body of a kernel message.
+ * Releases all rights, references, and memory.
+ *
+ * The last type/data pair might stretch past eaddr.
+ * (See the usage in ipc_kmsg_copyout.)
+ * Conditions:
+ * No locks held.
+ */
+
+void
+ipc_kmsg_clean_body(saddr, eaddr)
+ vm_offset_t saddr;
+ vm_offset_t eaddr;
+{
+ while (saddr < eaddr) {
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, is_port;
+ vm_size_t length;
+
+ type = (mach_msg_type_long_t *) saddr;
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ if (((mach_msg_type_t*)type)->msgt_longform) {
+ /* This must be aligned */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ (is_misaligned(type))) {
+ saddr = ptr_align(saddr);
+ continue;
+ }
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ }
+
+ /* padding (ptrs and ports) ? */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ ((size >> 3) == sizeof(natural_t)))
+ saddr = ptr_align(saddr);
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((number * size) + 7) >> 3;
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if (is_port) {
+ ipc_object_t *objects;
+ mach_msg_type_number_t i;
+
+ if (is_inline) {
+ objects = (ipc_object_t *) saddr;
+ /* sanity check */
+ while (eaddr < (vm_offset_t)&objects[number]) number--;
+ } else {
+ objects = (ipc_object_t *)
+ * (vm_offset_t *) saddr;
+ }
+
+ /* destroy port rights carried in the message */
+
+ for (i = 0; i < number; i++) {
+ ipc_object_t object = objects[i];
+
+ if (!IO_VALID(object))
+ continue;
+
+ ipc_object_destroy(object, name);
+ }
+ }
+
+ if (is_inline) {
+ /* inline data sizes round up to int boundaries */
+
+ saddr += (length + 3) &~ 3;
+ } else {
+ vm_offset_t data = * (vm_offset_t *) saddr;
+
+ /* destroy memory carried in the message */
+
+ if (length == 0)
+ assert(data == 0);
+ else if (is_port)
+ kfree(data, length);
+ else
+ vm_map_copy_discard((vm_map_copy_t) data);
+
+ saddr += sizeof(vm_offset_t);
+ }
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_clean
+ * Purpose:
+ * Cleans a kernel message. Releases all rights,
+ * references, and memory held by the message.
+ * Conditions:
+ * No locks held.
+ */
+
+void
+ipc_kmsg_clean(kmsg)
+ ipc_kmsg_t kmsg;
+{
+ ipc_marequest_t marequest;
+ ipc_object_t object;
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+
+ marequest = kmsg->ikm_marequest;
+ if (marequest != IMAR_NULL)
+ ipc_marequest_destroy(marequest);
+
+ object = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ if (IO_VALID(object))
+ ipc_object_destroy(object, MACH_MSGH_BITS_REMOTE(mbits));
+
+ object = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ if (IO_VALID(object))
+ ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits));
+
+ if (mbits & MACH_MSGH_BITS_COMPLEX) {
+ vm_offset_t saddr, eaddr;
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header +
+ kmsg->ikm_header.msgh_size;
+
+ ipc_kmsg_clean_body(saddr, eaddr);
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_clean_partial
+ * Purpose:
+ * Cleans a partially-acquired kernel message.
+ * eaddr is the address of the type specification
+ * in the body of the message that contained the error.
+ * If dolast, the memory and port rights in this last
+ * type spec are also cleaned. In that case, number
+ * specifies the number of port rights to clean.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_kmsg_clean_partial(kmsg, eaddr, dolast, number)
+ ipc_kmsg_t kmsg;
+ vm_offset_t eaddr;
+ boolean_t dolast;
+ mach_msg_type_number_t number;
+{
+ ipc_object_t object;
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+ vm_offset_t saddr;
+
+ assert(kmsg->ikm_marequest == IMAR_NULL);
+
+ object = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ assert(IO_VALID(object));
+ ipc_object_destroy(object, MACH_MSGH_BITS_REMOTE(mbits));
+
+ object = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ if (IO_VALID(object))
+ ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits));
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ ipc_kmsg_clean_body(saddr, eaddr);
+
+ if (dolast) {
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t rnumber;
+ boolean_t is_inline, is_port;
+ vm_size_t length;
+
+xxx: type = (mach_msg_type_long_t *) eaddr;
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ if (((mach_msg_type_t*)type)->msgt_longform) {
+ /* This must be aligned */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ (is_misaligned(type))) {
+ eaddr = ptr_align(eaddr);
+ goto xxx;
+ }
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ rnumber = type->msgtl_number;
+ eaddr += sizeof(mach_msg_type_long_t);
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ rnumber = ((mach_msg_type_t*)type)->msgt_number;
+ eaddr += sizeof(mach_msg_type_t);
+ }
+
+ /* padding (ptrs and ports) ? */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ ((size >> 3) == sizeof(natural_t)))
+ eaddr = ptr_align(eaddr);
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((rnumber * size) + 7) >> 3;
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if (is_port) {
+ ipc_object_t *objects;
+ mach_msg_type_number_t i;
+
+ objects = (ipc_object_t *)
+ (is_inline ? eaddr : * (vm_offset_t *) eaddr);
+
+ /* destroy port rights carried in the message */
+
+ for (i = 0; i < number; i++) {
+ ipc_object_t obj = objects[i];
+
+ if (!IO_VALID(obj))
+ continue;
+
+ ipc_object_destroy(obj, name);
+ }
+ }
+
+ if (!is_inline) {
+ vm_offset_t data = * (vm_offset_t *) eaddr;
+
+ /* destroy memory carried in the message */
+
+ if (length == 0)
+ assert(data == 0);
+ else if (is_port)
+ kfree(data, length);
+ else
+ vm_map_copy_discard((vm_map_copy_t) data);
+ }
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_free
+ * Purpose:
+ * Free a kernel message buffer.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_kmsg_free(kmsg)
+ ipc_kmsg_t kmsg;
+{
+ vm_size_t size = kmsg->ikm_size;
+
+ switch (size) {
+#if NORMA_IPC
+ case IKM_SIZE_NORMA:
+ /* return it to the norma ipc code */
+ norma_kmsg_put(kmsg);
+ break;
+#endif NORMA_IPC
+
+ case IKM_SIZE_NETWORK:
+ /* return it to the network code */
+ net_kmsg_put(kmsg);
+ break;
+
+ default:
+ kfree((vm_offset_t) kmsg, size);
+ break;
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_get
+ * Purpose:
+ * Allocates a kernel message buffer.
+ * Copies a user message to the message buffer.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Acquired a message buffer.
+ * MACH_SEND_MSG_TOO_SMALL Message smaller than a header.
+ * MACH_SEND_MSG_TOO_SMALL Message size not long-word multiple.
+ * MACH_SEND_NO_BUFFER Couldn't allocate a message buffer.
+ * MACH_SEND_INVALID_DATA Couldn't copy message data.
+ */
+
+mach_msg_return_t
+ipc_kmsg_get(msg, size, kmsgp)
+ mach_msg_header_t *msg;
+ mach_msg_size_t size;
+ ipc_kmsg_t *kmsgp;
+{
+ ipc_kmsg_t kmsg;
+
+ if ((size < sizeof(mach_msg_header_t)) || (size & 3))
+ return MACH_SEND_MSG_TOO_SMALL;
+
+ if (size <= IKM_SAVED_MSG_SIZE) {
+ kmsg = ikm_cache();
+ if (kmsg != IKM_NULL) {
+ ikm_cache() = IKM_NULL;
+ ikm_check_initialized(kmsg, IKM_SAVED_KMSG_SIZE);
+ } else {
+ kmsg = ikm_alloc(IKM_SAVED_MSG_SIZE);
+ if (kmsg == IKM_NULL)
+ return MACH_SEND_NO_BUFFER;
+ ikm_init(kmsg, IKM_SAVED_MSG_SIZE);
+ }
+ } else {
+ kmsg = ikm_alloc(size);
+ if (kmsg == IKM_NULL)
+ return MACH_SEND_NO_BUFFER;
+ ikm_init(kmsg, size);
+ }
+
+ if (copyinmsg((char *) msg, (char *) &kmsg->ikm_header, size)) {
+ ikm_free(kmsg);
+ return MACH_SEND_INVALID_DATA;
+ }
+
+ kmsg->ikm_header.msgh_size = size;
+ *kmsgp = kmsg;
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_kmsg_get_from_kernel
+ * Purpose:
+ * Allocates a kernel message buffer.
+ * Copies a kernel message to the message buffer.
+ * Only resource errors are allowed.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Acquired a message buffer.
+ * MACH_SEND_NO_BUFFER Couldn't allocate a message buffer.
+ */
+
+extern mach_msg_return_t
+ipc_kmsg_get_from_kernel(msg, size, kmsgp)
+ mach_msg_header_t *msg;
+ mach_msg_size_t size;
+ ipc_kmsg_t *kmsgp;
+{
+ ipc_kmsg_t kmsg;
+
+ assert(size >= sizeof(mach_msg_header_t));
+ assert((size & 3) == 0);
+
+ kmsg = ikm_alloc(size);
+ if (kmsg == IKM_NULL)
+ return MACH_SEND_NO_BUFFER;
+ ikm_init(kmsg, size);
+
+ bcopy((char *) msg, (char *) &kmsg->ikm_header, size);
+
+ kmsg->ikm_header.msgh_size = size;
+ *kmsgp = kmsg;
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_kmsg_put
+ * Purpose:
+ * Copies a message buffer to a user message.
+ * Copies only the specified number of bytes.
+ * Frees the message buffer.
+ * Conditions:
+ * Nothing locked. The message buffer must have clean
+ * header (ikm_marequest) fields.
+ * Returns:
+ * MACH_MSG_SUCCESS Copied data out of message buffer.
+ * MACH_RCV_INVALID_DATA Couldn't copy to user message.
+ */
+
+mach_msg_return_t
+ipc_kmsg_put(msg, kmsg, size)
+ mach_msg_header_t *msg;
+ ipc_kmsg_t kmsg;
+ mach_msg_size_t size;
+{
+ mach_msg_return_t mr;
+
+ ikm_check_initialized(kmsg, kmsg->ikm_size);
+
+ if (copyoutmsg((char *) &kmsg->ikm_header, (char *) msg, size))
+ mr = MACH_RCV_INVALID_DATA;
+ else
+ mr = MACH_MSG_SUCCESS;
+
+ if ((kmsg->ikm_size == IKM_SAVED_KMSG_SIZE) &&
+ (ikm_cache() == IKM_NULL))
+ ikm_cache() = kmsg;
+ else
+ ikm_free(kmsg);
+
+ return mr;
+}
+
+/*
+ * Routine: ipc_kmsg_put_to_kernel
+ * Purpose:
+ * Copies a message buffer to a kernel message.
+ * Frees the message buffer.
+ * No errors allowed.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_kmsg_put_to_kernel(
+ mach_msg_header_t *msg,
+ ipc_kmsg_t kmsg,
+ mach_msg_size_t size)
+{
+#if DIPC
+ assert(!KMSG_IN_DIPC(kmsg));
+#endif /* DIPC */
+
+ (void) memcpy((void *) msg, (const void *) &kmsg->ikm_header, size);
+
+ ikm_free(kmsg);
+}
+
+/*
+ * Routine: ipc_kmsg_copyin_header
+ * Purpose:
+ * "Copy-in" port rights in the header of a message.
+ * Operates atomically; if it doesn't succeed the
+ * message header and the space are left untouched.
+ * If it does succeed the remote/local port fields
+ * contain object pointers instead of port names,
+ * and the bits field is updated. The destination port
+ * will be a valid port pointer.
+ *
+ * The notify argument implements the MACH_SEND_CANCEL option.
+ * If it is not MACH_PORT_NULL, it should name a receive right.
+ * If the processing of the destination port would generate
+ * a port-deleted notification (because the right for the
+ * destination port is destroyed and it had a request for
+ * a dead-name notification registered), and the port-deleted
+ * notification would be sent to the named receive right,
+ * then it isn't sent and the send-once right for the notify
+ * port is quietly destroyed.
+ *
+ * [MACH_IPC_COMPAT] There is an atomicity problem if the
+ * reply port is a compat entry and dies at an inopportune
+ * time. This doesn't have any serious consequences
+ * (an observant user task might conceivably notice that
+ * the destination and reply ports were handled inconsistently),
+ * only happens in compat mode, and is extremely unlikely.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Successful copyin.
+ * MACH_SEND_INVALID_HEADER
+ * Illegal value in the message header bits.
+ * MACH_SEND_INVALID_DEST The space is dead.
+ * MACH_SEND_INVALID_NOTIFY
+ * Notify is non-null and doesn't name a receive right.
+ * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
+ * MACH_SEND_INVALID_DEST Can't copyin destination port.
+ * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
+ * MACH_SEND_INVALID_REPLY Can't copyin reply port.
+ * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyin_header(msg, space, notify)
+ mach_msg_header_t *msg;
+ ipc_space_t space;
+ mach_port_t notify;
+{
+ mach_msg_bits_t mbits = msg->msgh_bits &~ MACH_MSGH_BITS_CIRCULAR;
+ mach_port_t dest_name = msg->msgh_remote_port;
+ mach_port_t reply_name = msg->msgh_local_port;
+ kern_return_t kr;
+
+#ifndef MIGRATING_THREADS
+ /* first check for common cases */
+
+ if (notify == MACH_PORT_NULL) switch (MACH_MSGH_BITS_PORTS(mbits)) {
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0): {
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ ipc_port_t dest_port;
+
+ /* sending an asynchronous message */
+
+ if (reply_name != MACH_PORT_NULL)
+ break;
+
+ is_read_lock(space);
+ if (!space->is_active)
+ goto abort_async;
+
+ /* optimized ipc_entry_lookup */
+
+ {
+ mach_port_index_t index = MACH_PORT_INDEX(dest_name);
+ mach_port_gen_t gen = MACH_PORT_GEN(dest_name);
+
+ if (index >= space->is_table_size)
+ goto abort_async;
+
+ entry = &space->is_table[index];
+ bits = entry->ie_bits;
+
+ /* check generation number and type bit */
+
+ if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) !=
+ (gen | MACH_PORT_TYPE_SEND))
+ goto abort_async;
+ }
+
+ /* optimized ipc_right_copyin */
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ dest_port = (ipc_port_t) entry->ie_object;
+ assert(dest_port != IP_NULL);
+
+ ip_lock(dest_port);
+ /* can unlock space now without compromising atomicity */
+ is_read_unlock(space);
+
+ if (!ip_active(dest_port)) {
+ ip_unlock(dest_port);
+ break;
+ }
+
+ assert(dest_port->ip_srights > 0);
+ dest_port->ip_srights++;
+ ip_reference(dest_port);
+ ip_unlock(dest_port);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0));
+ msg->msgh_remote_port = (mach_port_t) dest_port;
+ return MACH_MSG_SUCCESS;
+
+ abort_async:
+ is_read_unlock(space);
+ break;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,
+ MACH_MSG_TYPE_MAKE_SEND_ONCE): {
+ ipc_entry_num_t size;
+ ipc_entry_t table;
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ ipc_port_t dest_port, reply_port;
+
+ /* sending a request message */
+
+ is_read_lock(space);
+ if (!space->is_active)
+ goto abort_request;
+
+ size = space->is_table_size;
+ table = space->is_table;
+
+ /* optimized ipc_entry_lookup of dest_name */
+
+ {
+ mach_port_index_t index = MACH_PORT_INDEX(dest_name);
+ mach_port_gen_t gen = MACH_PORT_GEN(dest_name);
+
+ if (index >= size)
+ goto abort_request;
+
+ entry = &table[index];
+ bits = entry->ie_bits;
+
+ /* check generation number and type bit */
+
+ if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) !=
+ (gen | MACH_PORT_TYPE_SEND))
+ goto abort_request;
+ }
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ dest_port = (ipc_port_t) entry->ie_object;
+ assert(dest_port != IP_NULL);
+
+ /* optimized ipc_entry_lookup of reply_name */
+
+ {
+ mach_port_index_t index = MACH_PORT_INDEX(reply_name);
+ mach_port_gen_t gen = MACH_PORT_GEN(reply_name);
+
+ if (index >= size)
+ goto abort_request;
+
+ entry = &table[index];
+ bits = entry->ie_bits;
+
+ /* check generation number and type bit */
+
+ if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_RECEIVE)) !=
+ (gen | MACH_PORT_TYPE_RECEIVE))
+ goto abort_request;
+ }
+
+ reply_port = (ipc_port_t) entry->ie_object;
+ assert(reply_port != IP_NULL);
+
+ /*
+ * To do an atomic copyin, need simultaneous
+ * locks on both ports and the space. If
+ * dest_port == reply_port, and simple locking is
+ * enabled, then we will abort. Otherwise it's
+ * OK to unlock twice.
+ */
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port) || !ip_lock_try(reply_port)) {
+ ip_unlock(dest_port);
+ goto abort_request;
+ }
+ /* can unlock space now without compromising atomicity */
+ is_read_unlock(space);
+
+ assert(dest_port->ip_srights > 0);
+ dest_port->ip_srights++;
+ ip_reference(dest_port);
+ ip_unlock(dest_port);
+
+ assert(ip_active(reply_port));
+ assert(reply_port->ip_receiver_name == reply_name);
+ assert(reply_port->ip_receiver == space);
+
+ reply_port->ip_sorights++;
+ ip_reference(reply_port);
+ ip_unlock(reply_port);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE));
+ msg->msgh_remote_port = (mach_port_t) dest_port;
+ msg->msgh_local_port = (mach_port_t) reply_port;
+ return MACH_MSG_SUCCESS;
+
+ abort_request:
+ is_read_unlock(space);
+ break;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): {
+ mach_port_index_t index;
+ mach_port_gen_t gen;
+ ipc_entry_t table;
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ ipc_port_t dest_port;
+
+ /* sending a reply message */
+
+ if (reply_name != MACH_PORT_NULL)
+ break;
+
+ is_write_lock(space);
+ if (!space->is_active)
+ goto abort_reply;
+
+ /* optimized ipc_entry_lookup */
+
+ table = space->is_table;
+
+ index = MACH_PORT_INDEX(dest_name);
+ gen = MACH_PORT_GEN(dest_name);
+
+ if (index >= space->is_table_size)
+ goto abort_reply;
+
+ entry = &table[index];
+ bits = entry->ie_bits;
+
+ /* check generation number, collision bit, and type bit */
+
+ if ((bits & (IE_BITS_GEN_MASK|IE_BITS_COLLISION|
+ MACH_PORT_TYPE_SEND_ONCE)) !=
+ (gen | MACH_PORT_TYPE_SEND_ONCE))
+ goto abort_reply;
+
+ /* optimized ipc_right_copyin */
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+
+ if (entry->ie_request != 0)
+ goto abort_reply;
+
+ dest_port = (ipc_port_t) entry->ie_object;
+ assert(dest_port != IP_NULL);
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port)) {
+ ip_unlock(dest_port);
+ goto abort_reply;
+ }
+
+ assert(dest_port->ip_sorights > 0);
+ ip_unlock(dest_port);
+
+ /* optimized ipc_entry_dealloc */
+
+ entry->ie_next = table->ie_next;
+ table->ie_next = index;
+ entry->ie_bits = gen;
+ entry->ie_object = IO_NULL;
+ is_write_unlock(space);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
+ 0));
+ msg->msgh_remote_port = (mach_port_t) dest_port;
+ return MACH_MSG_SUCCESS;
+
+ abort_reply:
+ is_write_unlock(space);
+ break;
+ }
+
+ default:
+ /* don't bother optimizing */
+ break;
+ }
+#endif /* MIGRATING_THREADS */
+
+ {
+ mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
+ mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
+ ipc_object_t dest_port, reply_port;
+ ipc_port_t dest_soright, reply_soright;
+ ipc_port_t notify_port = 0; /* '=0' to quiet gcc warnings */
+
+ if (!MACH_MSG_TYPE_PORT_ANY_SEND(dest_type))
+ return MACH_SEND_INVALID_HEADER;
+
+ if ((reply_type == 0) ?
+ (reply_name != MACH_PORT_NULL) :
+ !MACH_MSG_TYPE_PORT_ANY_SEND(reply_type))
+ return MACH_SEND_INVALID_HEADER;
+
+ is_write_lock(space);
+ if (!space->is_active)
+ goto invalid_dest;
+
+ if (notify != MACH_PORT_NULL) {
+ ipc_entry_t entry;
+
+ if (((entry = ipc_entry_lookup(space, notify)) == IE_NULL) ||
+ ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0)) {
+ is_write_unlock(space);
+ return MACH_SEND_INVALID_NOTIFY;
+ }
+
+ notify_port = (ipc_port_t) entry->ie_object;
+ }
+
+ if (dest_name == reply_name) {
+ ipc_entry_t entry;
+ mach_port_t name = dest_name;
+
+ /*
+ * Destination and reply ports are the same!
+ * This is a little tedious to make atomic, because
+ * there are 25 combinations of dest_type/reply_type.
+ * However, most are easy. If either is move-sonce,
+ * then there must be an error. If either are
+ * make-send or make-sonce, then we must be looking
+ * at a receive right so the port can't die.
+ * The hard cases are the combinations of
+ * copy-send and make-send.
+ */
+
+ entry = ipc_entry_lookup(space, name);
+ if (entry == IE_NULL)
+ goto invalid_dest;
+
+ assert(reply_type != 0); /* because name not null */
+
+ if (!ipc_right_copyin_check(space, name, entry, reply_type))
+ goto invalid_reply;
+
+ if ((dest_type == MACH_MSG_TYPE_MOVE_SEND_ONCE) ||
+ (reply_type == MACH_MSG_TYPE_MOVE_SEND_ONCE)) {
+ /*
+ * Why must there be an error? To get a valid
+ * destination, this entry must name a live
+ * port (not a dead name or dead port). However
+ * a successful move-sonce will destroy a
+ * live entry. Therefore the other copyin,
+ * whatever it is, would fail. We've already
+ * checked for reply port errors above,
+ * so report a destination error.
+ */
+
+ goto invalid_dest;
+ } else if ((dest_type == MACH_MSG_TYPE_MAKE_SEND) ||
+ (dest_type == MACH_MSG_TYPE_MAKE_SEND_ONCE) ||
+ (reply_type == MACH_MSG_TYPE_MAKE_SEND) ||
+ (reply_type == MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
+ kr = ipc_right_copyin(space, name, entry,
+ dest_type, FALSE,
+ &dest_port, &dest_soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ /*
+ * Either dest or reply needs a receive right.
+ * We know the receive right is there, because
+ * of the copyin_check and copyin calls. Hence
+ * the port is not in danger of dying. If dest
+ * used the receive right, then the right needed
+ * by reply (and verified by copyin_check) will
+ * still be there.
+ */
+
+ assert(IO_VALID(dest_port));
+ assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
+ assert(dest_soright == IP_NULL);
+
+ kr = ipc_right_copyin(space, name, entry,
+ reply_type, TRUE,
+ &reply_port, &reply_soright);
+
+ assert(kr == KERN_SUCCESS);
+ assert(reply_port == dest_port);
+ assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
+ assert(reply_soright == IP_NULL);
+ } else if ((dest_type == MACH_MSG_TYPE_COPY_SEND) &&
+ (reply_type == MACH_MSG_TYPE_COPY_SEND)) {
+ /*
+ * To make this atomic, just do one copy-send,
+ * and dup the send right we get out.
+ */
+
+ kr = ipc_right_copyin(space, name, entry,
+ dest_type, FALSE,
+ &dest_port, &dest_soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ assert(entry->ie_bits & MACH_PORT_TYPE_SEND);
+ assert(dest_soright == IP_NULL);
+
+ /*
+ * It's OK if the port we got is dead now,
+ * so reply_port is IP_DEAD, because the msg
+ * won't go anywhere anyway.
+ */
+
+ reply_port = (ipc_object_t)
+ ipc_port_copy_send((ipc_port_t) dest_port);
+ reply_soright = IP_NULL;
+ } else if ((dest_type == MACH_MSG_TYPE_MOVE_SEND) &&
+ (reply_type == MACH_MSG_TYPE_MOVE_SEND)) {
+ /*
+ * This is an easy case. Just use our
+ * handy-dandy special-purpose copyin call
+ * to get two send rights for the price of one.
+ */
+
+ kr = ipc_right_copyin_two(space, name, entry,
+ &dest_port, &dest_soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ /* the entry might need to be deallocated */
+
+ if (IE_BITS_TYPE(entry->ie_bits)
+ == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, name, entry);
+
+ reply_port = dest_port;
+ reply_soright = IP_NULL;
+ } else {
+ ipc_port_t soright;
+
+ assert(((dest_type == MACH_MSG_TYPE_COPY_SEND) &&
+ (reply_type == MACH_MSG_TYPE_MOVE_SEND)) ||
+ ((dest_type == MACH_MSG_TYPE_MOVE_SEND) &&
+ (reply_type == MACH_MSG_TYPE_COPY_SEND)));
+
+ /*
+ * To make this atomic, just do a move-send,
+ * and dup the send right we get out.
+ */
+
+ kr = ipc_right_copyin(space, name, entry,
+ MACH_MSG_TYPE_MOVE_SEND, FALSE,
+ &dest_port, &soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ /* the entry might need to be deallocated */
+
+ if (IE_BITS_TYPE(entry->ie_bits)
+ == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, name, entry);
+
+ /*
+ * It's OK if the port we got is dead now,
+ * so reply_port is IP_DEAD, because the msg
+ * won't go anywhere anyway.
+ */
+
+ reply_port = (ipc_object_t)
+ ipc_port_copy_send((ipc_port_t) dest_port);
+
+ if (dest_type == MACH_MSG_TYPE_MOVE_SEND) {
+ dest_soright = soright;
+ reply_soright = IP_NULL;
+ } else {
+ dest_soright = IP_NULL;
+ reply_soright = soright;
+ }
+ }
+ } else if (!MACH_PORT_VALID(reply_name)) {
+ ipc_entry_t entry;
+
+ /*
+ * No reply port! This is an easy case
+ * to make atomic. Just copyin the destination.
+ */
+
+ entry = ipc_entry_lookup(space, dest_name);
+ if (entry == IE_NULL)
+ goto invalid_dest;
+
+ kr = ipc_right_copyin(space, dest_name, entry,
+ dest_type, FALSE,
+ &dest_port, &dest_soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ /* the entry might need to be deallocated */
+
+ if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, dest_name, entry);
+
+ reply_port = (ipc_object_t) reply_name;
+ reply_soright = IP_NULL;
+ } else {
+ ipc_entry_t dest_entry, reply_entry;
+ ipc_port_t saved_reply;
+
+ /*
+ * This is the tough case to make atomic.
+ * The difficult problem is serializing with port death.
+ * At the time we copyin dest_port, it must be alive.
+ * If reply_port is alive when we copyin it, then
+ * we are OK, because we serialize before the death
+ * of both ports. Assume reply_port is dead at copyin.
+ * Then if dest_port dies/died after reply_port died,
+ * we are OK, because we serialize between the death
+ * of the two ports. So the bad case is when dest_port
+ * dies after its copyin, reply_port dies before its
+ * copyin, and dest_port dies before reply_port. Then
+ * the copyins operated as if dest_port was alive
+ * and reply_port was dead, which shouldn't have happened
+ * because they died in the other order.
+ *
+ * We handle the bad case by undoing the copyins
+ * (which is only possible because the ports are dead)
+ * and failing with MACH_SEND_INVALID_DEST, serializing
+ * after the death of the ports.
+ *
+ * Note that it is easy for a user task to tell if
+ * a copyin happened before or after a port died.
+ * For example, suppose both dest and reply are
+ * send-once rights (types are both move-sonce) and
+ * both rights have dead-name requests registered.
+ * If a port dies before copyin, a dead-name notification
+ * is generated and the dead name's urefs are incremented,
+ * and if the copyin happens first, a port-deleted
+ * notification is generated.
+ *
+ * Note that although the entries are different,
+ * dest_port and reply_port might still be the same.
+ */
+
+ dest_entry = ipc_entry_lookup(space, dest_name);
+ if (dest_entry == IE_NULL)
+ goto invalid_dest;
+
+ reply_entry = ipc_entry_lookup(space, reply_name);
+ if (reply_entry == IE_NULL)
+ goto invalid_reply;
+
+ assert(dest_entry != reply_entry); /* names are not equal */
+ assert(reply_type != 0); /* because reply_name not null */
+
+ if (!ipc_right_copyin_check(space, reply_name, reply_entry,
+ reply_type))
+ goto invalid_reply;
+
+ kr = ipc_right_copyin(space, dest_name, dest_entry,
+ dest_type, FALSE,
+ &dest_port, &dest_soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ assert(IO_VALID(dest_port));
+
+ saved_reply = (ipc_port_t) reply_entry->ie_object;
+ /* might be IP_NULL, if this is a dead name */
+ if (saved_reply != IP_NULL)
+ ipc_port_reference(saved_reply);
+
+ kr = ipc_right_copyin(space, reply_name, reply_entry,
+ reply_type, TRUE,
+ &reply_port, &reply_soright);
+#if MACH_IPC_COMPAT
+ if (kr != KERN_SUCCESS) {
+ assert(kr == KERN_INVALID_NAME);
+
+ /*
+ * Oops. This must have been a compat entry
+ * and the port died after the check above.
+ * We should back out the copyin of dest_port,
+ * and report MACH_SEND_INVALID_REPLY, but
+ * if dest_port is alive we can't always do that.
+ * Punt and pretend we got IO_DEAD, skipping
+ * further hairy atomicity problems.
+ */
+
+ reply_port = IO_DEAD;
+ reply_soright = IP_NULL;
+ goto skip_reply_checks;
+ }
+#else MACH_IPC_COMPAT
+ assert(kr == KERN_SUCCESS);
+#endif MACH_IPC_COMPAT
+
+ if ((saved_reply != IP_NULL) && (reply_port == IO_DEAD)) {
+ ipc_port_t dest = (ipc_port_t) dest_port;
+ ipc_port_timestamp_t timestamp;
+ boolean_t must_undo;
+
+ /*
+ * The reply port died before copyin.
+ * Check if dest port died before reply.
+ */
+
+ ip_lock(saved_reply);
+ assert(!ip_active(saved_reply));
+ timestamp = saved_reply->ip_timestamp;
+ ip_unlock(saved_reply);
+
+ ip_lock(dest);
+ must_undo = (!ip_active(dest) &&
+ IP_TIMESTAMP_ORDER(dest->ip_timestamp,
+ timestamp));
+ ip_unlock(dest);
+
+ if (must_undo) {
+ /*
+ * Our worst nightmares are realized.
+ * Both destination and reply ports
+ * are dead, but in the wrong order,
+ * so we must undo the copyins and
+ * possibly generate a dead-name notif.
+ */
+
+ ipc_right_copyin_undo(
+ space, dest_name, dest_entry,
+ dest_type, dest_port,
+ dest_soright);
+ /* dest_entry may be deallocated now */
+
+ ipc_right_copyin_undo(
+ space, reply_name, reply_entry,
+ reply_type, reply_port,
+ reply_soright);
+ /* reply_entry may be deallocated now */
+
+ is_write_unlock(space);
+
+ if (dest_soright != IP_NULL)
+ ipc_notify_dead_name(dest_soright,
+ dest_name);
+ assert(reply_soright == IP_NULL);
+
+ ipc_port_release(saved_reply);
+ return MACH_SEND_INVALID_DEST;
+ }
+ }
+
+ /* the entries might need to be deallocated */
+
+ if (IE_BITS_TYPE(reply_entry->ie_bits) == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, reply_name, reply_entry);
+
+#if MACH_IPC_COMPAT
+ skip_reply_checks:
+ /*
+ * We jump here if the reply entry was a compat entry
+ * and the port died on us. In this case, the copyin
+ * code already deallocated reply_entry.
+ */
+#endif MACH_IPC_COMPAT
+
+ if (IE_BITS_TYPE(dest_entry->ie_bits) == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, dest_name, dest_entry);
+
+ if (saved_reply != IP_NULL)
+ ipc_port_release(saved_reply);
+ }
+
+ /*
+ * At this point, dest_port, reply_port,
+ * dest_soright, reply_soright are all initialized.
+ * Any defunct entries have been deallocated.
+ * The space is still write-locked, and we need to
+ * make the MACH_SEND_CANCEL check. The notify_port pointer
+ * is still usable, because the copyin code above won't ever
+ * deallocate a receive right, so its entry still exists
+ * and holds a ref. Note notify_port might even equal
+ * dest_port or reply_port.
+ */
+
+ if ((notify != MACH_PORT_NULL) &&
+ (dest_soright == notify_port)) {
+ ipc_port_release_sonce(dest_soright);
+ dest_soright = IP_NULL;
+ }
+
+ is_write_unlock(space);
+
+ if (dest_soright != IP_NULL)
+ ipc_notify_port_deleted(dest_soright, dest_name);
+
+ if (reply_soright != IP_NULL)
+ ipc_notify_port_deleted(reply_soright, reply_name);
+
+ dest_type = ipc_object_copyin_type(dest_type);
+ reply_type = ipc_object_copyin_type(reply_type);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(dest_type, reply_type));
+ msg->msgh_remote_port = (mach_port_t) dest_port;
+ msg->msgh_local_port = (mach_port_t) reply_port;
+ }
+
+ return MACH_MSG_SUCCESS;
+
+ invalid_dest:
+ is_write_unlock(space);
+ return MACH_SEND_INVALID_DEST;
+
+ invalid_reply:
+ is_write_unlock(space);
+ return MACH_SEND_INVALID_REPLY;
+}
+
+mach_msg_return_t
+ipc_kmsg_copyin_body(kmsg, space, map)
+ ipc_kmsg_t kmsg;
+ ipc_space_t space;
+ vm_map_t map;
+{
+ ipc_object_t dest;
+ vm_offset_t saddr, eaddr;
+ boolean_t complex;
+ mach_msg_return_t mr;
+ boolean_t use_page_lists, steal_pages;
+
+ dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ complex = FALSE;
+ use_page_lists = ipc_kobject_vm_page_list(ip_kotype((ipc_port_t)dest));
+ steal_pages = ipc_kobject_vm_page_steal(ip_kotype((ipc_port_t)dest));
+
+#if NORMA_IPC
+ if (IP_NORMA_IS_PROXY((ipc_port_t) dest)) {
+ use_page_lists = TRUE;
+ steal_pages = TRUE;
+ }
+#endif NORMA_IPC
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header + kmsg->ikm_header.msgh_size;
+
+ while (saddr < eaddr) {
+ vm_offset_t taddr = saddr;
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, longform, dealloc, is_port;
+ vm_offset_t data;
+ vm_size_t length;
+ kern_return_t kr;
+
+ type = (mach_msg_type_long_t *) saddr;
+
+ if (((eaddr - saddr) < sizeof(mach_msg_type_t)) ||
+ ((longform = ((mach_msg_type_t*)type)->msgt_longform) &&
+ ((eaddr - saddr) < sizeof(mach_msg_type_long_t)))) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_MSG_TOO_SMALL;
+ }
+
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ dealloc = ((mach_msg_type_t*)type)->msgt_deallocate;
+ if (longform) {
+ /* This must be aligned */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ (is_misaligned(type))) {
+ saddr = ptr_align(saddr);
+ continue;
+ }
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ }
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if ((is_port && (size != PORT_T_SIZE_IN_BITS)) ||
+ (longform && ((type->msgtl_header.msgt_name != 0) ||
+ (type->msgtl_header.msgt_size != 0) ||
+ (type->msgtl_header.msgt_number != 0))) ||
+ (((mach_msg_type_t*)type)->msgt_unused != 0) ||
+ (dealloc && is_inline)) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_INVALID_TYPE;
+ }
+
+ /* padding (ptrs and ports) ? */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ ((size >> 3) == sizeof(natural_t)))
+ saddr = ptr_align(saddr);
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((number * size) + 7) >> 3;
+
+ if (is_inline) {
+ vm_size_t amount;
+
+ /* inline data sizes round up to int boundaries */
+
+ amount = (length + 3) &~ 3;
+ if ((eaddr - saddr) < amount) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_MSG_TOO_SMALL;
+ }
+
+ data = saddr;
+ saddr += amount;
+ } else {
+ vm_offset_t addr;
+
+ if (sizeof(vm_offset_t) > sizeof(mach_msg_type_t))
+ saddr = ptr_align(saddr);
+
+ if ((eaddr - saddr) < sizeof(vm_offset_t)) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_MSG_TOO_SMALL;
+ }
+
+ /* grab the out-of-line data */
+
+ addr = * (vm_offset_t *) saddr;
+
+ if (length == 0)
+ data = 0;
+ else if (is_port) {
+ data = kalloc(length);
+ if (data == 0)
+ goto invalid_memory;
+
+ if (copyinmap(map, (char *) addr,
+ (char *) data, length) ||
+ (dealloc &&
+ (vm_deallocate(map, addr, length) !=
+ KERN_SUCCESS))) {
+ kfree(data, length);
+ goto invalid_memory;
+ }
+ } else {
+ vm_map_copy_t copy;
+
+ if (use_page_lists) {
+ kr = vm_map_copyin_page_list(map,
+ addr, length, dealloc,
+ steal_pages, &copy, FALSE);
+ } else {
+ kr = vm_map_copyin(map, addr, length,
+ dealloc, &copy);
+ }
+ if (kr != KERN_SUCCESS) {
+ invalid_memory:
+ ipc_kmsg_clean_partial(kmsg, taddr,
+ FALSE, 0);
+ return MACH_SEND_INVALID_MEMORY;
+ }
+
+ data = (vm_offset_t) copy;
+ }
+
+ * (vm_offset_t *) saddr = data;
+ saddr += sizeof(vm_offset_t);
+ complex = TRUE;
+ }
+
+ if (is_port) {
+ mach_msg_type_name_t newname =
+ ipc_object_copyin_type(name);
+ ipc_object_t *objects = (ipc_object_t *) data;
+ mach_msg_type_number_t i;
+
+ if (longform)
+ type->msgtl_name = newname;
+ else
+ ((mach_msg_type_t*)type)->msgt_name = newname;
+
+ for (i = 0; i < number; i++) {
+ mach_port_t port = (mach_port_t) objects[i];
+ ipc_object_t object;
+
+ if (!MACH_PORT_VALID(port))
+ continue;
+
+ kr = ipc_object_copyin(space, port,
+ name, &object);
+ if (kr != KERN_SUCCESS) {
+ ipc_kmsg_clean_partial(kmsg, taddr,
+ TRUE, i);
+ return MACH_SEND_INVALID_RIGHT;
+ }
+
+ if ((newname == MACH_MSG_TYPE_PORT_RECEIVE) &&
+ ipc_port_check_circularity(
+ (ipc_port_t) object,
+ (ipc_port_t) dest))
+ kmsg->ikm_header.msgh_bits |=
+ MACH_MSGH_BITS_CIRCULAR;
+
+ objects[i] = object;
+ }
+
+ complex = TRUE;
+ }
+ }
+
+ if (!complex)
+ kmsg->ikm_header.msgh_bits &= ~MACH_MSGH_BITS_COMPLEX;
+
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_kmsg_copyin
+ * Purpose:
+ * "Copy-in" port rights and out-of-line memory
+ * in the message.
+ *
+ * In all failure cases, the message is left holding
+ * no rights or memory. However, the message buffer
+ * is not deallocated. If successful, the message
+ * contains a valid destination port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Successful copyin.
+ * MACH_SEND_INVALID_HEADER
+ * Illegal value in the message header bits.
+ * MACH_SEND_INVALID_NOTIFY Bad notify port.
+ * MACH_SEND_INVALID_DEST Can't copyin destination port.
+ * MACH_SEND_INVALID_REPLY Can't copyin reply port.
+ * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory.
+ * MACH_SEND_INVALID_RIGHT Can't copyin port right in body.
+ * MACH_SEND_INVALID_TYPE Bad type specification.
+ * MACH_SEND_MSG_TOO_SMALL Body is too small for types/data.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyin(kmsg, space, map, notify)
+ ipc_kmsg_t kmsg;
+ ipc_space_t space;
+ vm_map_t map;
+ mach_port_t notify;
+{
+ mach_msg_return_t mr;
+
+ mr = ipc_kmsg_copyin_header(&kmsg->ikm_header, space, notify);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ if ((kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_COMPLEX) == 0)
+ return MACH_MSG_SUCCESS;
+
+ return ipc_kmsg_copyin_body(kmsg, space, map);
+}
+
+/*
+ * Routine: ipc_kmsg_copyin_from_kernel
+ * Purpose:
+ * "Copy-in" port rights and out-of-line memory
+ * in a message sent from the kernel.
+ *
+ * Because the message comes from the kernel,
+ * the implementation assumes there are no errors
+ * or peculiarities in the message.
+ *
+ * Returns TRUE if queueing the message
+ * would result in a circularity.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_kmsg_copyin_from_kernel(
+ ipc_kmsg_t kmsg)
+{
+ mach_msg_bits_t bits = kmsg->ikm_header.msgh_bits;
+ mach_msg_type_name_t rname = MACH_MSGH_BITS_REMOTE(bits);
+ mach_msg_type_name_t lname = MACH_MSGH_BITS_LOCAL(bits);
+ ipc_object_t remote = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ ipc_object_t local = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ vm_offset_t saddr, eaddr;
+
+ /* translate the destination and reply ports */
+
+ ipc_object_copyin_from_kernel(remote, rname);
+ if (IO_VALID(local))
+ ipc_object_copyin_from_kernel(local, lname);
+
+ /*
+ * The common case is a complex message with no reply port,
+ * because that is what the memory_object interface uses.
+ */
+
+ if (bits == (MACH_MSGH_BITS_COMPLEX |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0))) {
+ bits = (MACH_MSGH_BITS_COMPLEX |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0));
+
+ kmsg->ikm_header.msgh_bits = bits;
+ } else {
+ bits = (MACH_MSGH_BITS_OTHER(bits) |
+ MACH_MSGH_BITS(ipc_object_copyin_type(rname),
+ ipc_object_copyin_type(lname)));
+
+ kmsg->ikm_header.msgh_bits = bits;
+ if ((bits & MACH_MSGH_BITS_COMPLEX) == 0)
+ return;
+ }
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header + kmsg->ikm_header.msgh_size;
+
+ while (saddr < eaddr) {
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, longform, is_port;
+ vm_offset_t data;
+ vm_size_t length;
+
+ type = (mach_msg_type_long_t *) saddr;
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ longform = ((mach_msg_type_t*)type)->msgt_longform;
+ /* type->msgtl_header.msgt_deallocate not used */
+ if (longform) {
+ /* This must be aligned */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ (is_misaligned(type))) {
+ saddr = ptr_align(saddr);
+ continue;
+ }
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ }
+
+ /* padding (ptrs and ports) ? */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ ((size >> 3) == sizeof(natural_t)))
+ saddr = ptr_align(saddr);
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((number * size) + 7) >> 3;
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if (is_inline) {
+ /* inline data sizes round up to int boundaries */
+
+ data = saddr;
+ saddr += (length + 3) &~ 3;
+ } else {
+ /*
+ * The sender should supply ready-made memory
+ * for us, so we don't need to do anything.
+ */
+
+ data = * (vm_offset_t *) saddr;
+ saddr += sizeof(vm_offset_t);
+ }
+
+ if (is_port) {
+ mach_msg_type_name_t newname =
+ ipc_object_copyin_type(name);
+ ipc_object_t *objects = (ipc_object_t *) data;
+ mach_msg_type_number_t i;
+
+ if (longform)
+ type->msgtl_name = newname;
+ else
+ ((mach_msg_type_t*)type)->msgt_name = newname;
+ for (i = 0; i < number; i++) {
+ ipc_object_t object = objects[i];
+
+ if (!IO_VALID(object))
+ continue;
+
+ ipc_object_copyin_from_kernel(object, name);
+
+ if ((newname == MACH_MSG_TYPE_PORT_RECEIVE) &&
+ ipc_port_check_circularity(
+ (ipc_port_t) object,
+ (ipc_port_t) remote))
+ kmsg->ikm_header.msgh_bits |=
+ MACH_MSGH_BITS_CIRCULAR;
+ }
+ }
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_copyout_header
+ * Purpose:
+ * "Copy-out" port rights in the header of a message.
+ * Operates atomically; if it doesn't succeed the
+ * message header and the space are left untouched.
+ * If it does succeed the remote/local port fields
+ * contain port names instead of object pointers,
+ * and the bits field is updated.
+ *
+ * The notify argument implements the MACH_RCV_NOTIFY option.
+ * If it is not MACH_PORT_NULL, it should name a receive right.
+ * If the process of receiving the reply port creates a
+ * new right in the receiving task, then the new right is
+ * automatically registered for a dead-name notification,
+ * with the notify port supplying the send-once right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Copied out port rights.
+ * MACH_RCV_INVALID_NOTIFY
+ * Notify is non-null and doesn't name a receive right.
+ * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
+ * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE
+ * The space is dead.
+ * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE
+ * No room in space for another name.
+ * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL
+ * Couldn't allocate memory for the reply port.
+ * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL
+ * Couldn't allocate memory for the dead-name request.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyout_header(msg, space, notify)
+ mach_msg_header_t *msg;
+ ipc_space_t space;
+ mach_port_t notify;
+{
+ mach_msg_bits_t mbits = msg->msgh_bits;
+ ipc_port_t dest = (ipc_port_t) msg->msgh_remote_port;
+
+ assert(IP_VALID(dest));
+
+#ifndef MIGRATING_THREADS
+ /* first check for common cases */
+
+ if (notify == MACH_PORT_NULL) switch (MACH_MSGH_BITS_PORTS(mbits)) {
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0): {
+ mach_port_t dest_name;
+ ipc_port_t nsrequest;
+
+ /* receiving an asynchronous message */
+
+ ip_lock(dest);
+ if (!ip_active(dest)) {
+ ip_unlock(dest);
+ break;
+ }
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest->ip_srights > 0);
+ ip_release(dest);
+
+ if (dest->ip_receiver == space)
+ dest_name = dest->ip_receiver_name;
+ else
+ dest_name = MACH_PORT_NULL;
+
+ if ((--dest->ip_srights == 0) &&
+ ((nsrequest = dest->ip_nsrequest) != IP_NULL)) {
+ mach_port_mscount_t mscount;
+
+ dest->ip_nsrequest = IP_NULL;
+ mscount = dest->ip_mscount;
+ ip_unlock(dest);
+
+ ipc_notify_no_senders(nsrequest, mscount);
+ } else
+ ip_unlock(dest);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(0, MACH_MSG_TYPE_PORT_SEND));
+ msg->msgh_local_port = dest_name;
+ msg->msgh_remote_port = MACH_PORT_NULL;
+ return MACH_MSG_SUCCESS;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE): {
+ ipc_entry_t table;
+ mach_port_index_t index;
+ ipc_entry_t entry;
+ ipc_port_t reply = (ipc_port_t) msg->msgh_local_port;
+ mach_port_t dest_name, reply_name;
+ ipc_port_t nsrequest;
+
+ /* receiving a request message */
+
+ if (!IP_VALID(reply))
+ break;
+
+ is_write_lock(space);
+ if (!space->is_active ||
+ ((index = (table = space->is_table)->ie_next) == 0)) {
+ is_write_unlock(space);
+ break;
+ }
+
+ /*
+ * To do an atomic copyout, need simultaneous
+ * locks on both ports and the space. If
+ * dest == reply, and simple locking is
+ * enabled, then we will abort. Otherwise it's
+ * OK to unlock twice.
+ */
+
+ ip_lock(dest);
+ if (!ip_active(dest) || !ip_lock_try(reply)) {
+ ip_unlock(dest);
+ is_write_unlock(space);
+ break;
+ }
+
+ if (!ip_active(reply)) {
+ ip_unlock(reply);
+ ip_unlock(dest);
+ is_write_unlock(space);
+ break;
+ }
+
+ assert(reply->ip_sorights > 0);
+ ip_unlock(reply);
+
+ /* optimized ipc_entry_get */
+
+ entry = &table[index];
+ table->ie_next = entry->ie_next;
+ entry->ie_request = 0;
+
+ {
+ mach_port_gen_t gen;
+
+ assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
+ gen = entry->ie_bits + IE_BITS_GEN_ONE;
+
+ reply_name = MACH_PORT_MAKE(index, gen);
+
+ /* optimized ipc_right_copyout */
+
+ entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
+ }
+
+ assert(MACH_PORT_VALID(reply_name));
+ entry->ie_object = (ipc_object_t) reply;
+ is_write_unlock(space);
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest->ip_srights > 0);
+ ip_release(dest);
+
+ if (dest->ip_receiver == space)
+ dest_name = dest->ip_receiver_name;
+ else
+ dest_name = MACH_PORT_NULL;
+
+ if ((--dest->ip_srights == 0) &&
+ ((nsrequest = dest->ip_nsrequest) != IP_NULL)) {
+ mach_port_mscount_t mscount;
+
+ dest->ip_nsrequest = IP_NULL;
+ mscount = dest->ip_mscount;
+ ip_unlock(dest);
+
+ ipc_notify_no_senders(nsrequest, mscount);
+ } else
+ ip_unlock(dest);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
+ MACH_MSG_TYPE_PORT_SEND));
+ msg->msgh_local_port = dest_name;
+ msg->msgh_remote_port = reply_name;
+ return MACH_MSG_SUCCESS;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
+ mach_port_t dest_name;
+
+ /* receiving a reply message */
+
+ ip_lock(dest);
+ if (!ip_active(dest)) {
+ ip_unlock(dest);
+ break;
+ }
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest->ip_sorights > 0);
+
+ if (dest->ip_receiver == space) {
+ ip_release(dest);
+ dest->ip_sorights--;
+ dest_name = dest->ip_receiver_name;
+ ip_unlock(dest);
+ } else {
+ ip_unlock(dest);
+
+ ipc_notify_send_once(dest);
+ dest_name = MACH_PORT_NULL;
+ }
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(0, MACH_MSG_TYPE_PORT_SEND_ONCE));
+ msg->msgh_local_port = dest_name;
+ msg->msgh_remote_port = MACH_PORT_NULL;
+ return MACH_MSG_SUCCESS;
+ }
+
+ default:
+ /* don't bother optimizing */
+ break;
+ }
+#endif /* MIGRATING_THREADS */
+
+ {
+ mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
+ mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
+ ipc_port_t reply = (ipc_port_t) msg->msgh_local_port;
+ mach_port_t dest_name, reply_name;
+
+ if (IP_VALID(reply)) {
+ ipc_port_t notify_port;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ /*
+ * Handling notify (for MACH_RCV_NOTIFY) is tricky.
+ * The problem is atomically making a send-once right
+ * from the notify port and installing it for a
+ * dead-name request in the new entry, because this
+ * requires two port locks (on the notify port and
+ * the reply port). However, we can safely make
+ * and consume send-once rights for the notify port
+ * as long as we hold the space locked. This isn't
+ * an atomicity problem, because the only way
+ * to detect that a send-once right has been created
+ * and then consumed if it wasn't needed is by getting
+ * at the receive right to look at ip_sorights, and
+ * because the space is write-locked status calls can't
+ * lookup the notify port receive right. When we make
+ * the send-once right, we lock the notify port,
+ * so any status calls in progress will be done.
+ */
+
+ is_write_lock(space);
+
+ for (;;) {
+ ipc_port_request_index_t request;
+
+ if (!space->is_active) {
+ is_write_unlock(space);
+ return (MACH_RCV_HEADER_ERROR|
+ MACH_MSG_IPC_SPACE);
+ }
+
+ if (notify != MACH_PORT_NULL) {
+ notify_port = ipc_port_lookup_notify(space,
+ notify);
+ if (notify_port == IP_NULL) {
+ is_write_unlock(space);
+ return MACH_RCV_INVALID_NOTIFY;
+ }
+ } else
+ notify_port = IP_NULL;
+
+ if ((reply_type != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
+ ipc_right_reverse(space, (ipc_object_t) reply,
+ &reply_name, &entry)) {
+ /* reply port is locked and active */
+
+ /*
+ * We don't need the notify_port
+ * send-once right, but we can't release
+ * it here because reply port is locked.
+ * Wait until after the copyout to
+ * release the notify port right.
+ */
+
+ assert(entry->ie_bits &
+ MACH_PORT_TYPE_SEND_RECEIVE);
+ break;
+ }
+
+ ip_lock(reply);
+ if (!ip_active(reply)) {
+ ip_release(reply);
+ ip_check_unlock(reply);
+
+ if (notify_port != IP_NULL)
+ ipc_port_release_sonce(notify_port);
+
+ ip_lock(dest);
+ is_write_unlock(space);
+
+ reply = IP_DEAD;
+ reply_name = MACH_PORT_DEAD;
+ goto copyout_dest;
+ }
+
+ kr = ipc_entry_get(space, &reply_name, &entry);
+ if (kr != KERN_SUCCESS) {
+ ip_unlock(reply);
+
+ if (notify_port != IP_NULL)
+ ipc_port_release_sonce(notify_port);
+
+ /* space is locked */
+ kr = ipc_entry_grow_table(space);
+ if (kr != KERN_SUCCESS) {
+ /* space is unlocked */
+
+ if (kr == KERN_RESOURCE_SHORTAGE)
+ return (MACH_RCV_HEADER_ERROR|
+ MACH_MSG_IPC_KERNEL);
+ else
+ return (MACH_RCV_HEADER_ERROR|
+ MACH_MSG_IPC_SPACE);
+ }
+ /* space is locked again; start over */
+
+ continue;
+ }
+
+ assert(IE_BITS_TYPE(entry->ie_bits)
+ == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+
+ if (notify_port == IP_NULL) {
+ /* not making a dead-name request */
+
+ entry->ie_object = (ipc_object_t) reply;
+ break;
+ }
+
+ kr = ipc_port_dnrequest(reply, reply_name,
+ notify_port, &request);
+ if (kr != KERN_SUCCESS) {
+ ip_unlock(reply);
+
+ ipc_port_release_sonce(notify_port);
+
+ ipc_entry_dealloc(space, reply_name, entry);
+ is_write_unlock(space);
+
+ ip_lock(reply);
+ if (!ip_active(reply)) {
+ /* will fail next time around loop */
+
+ ip_unlock(reply);
+ is_write_lock(space);
+ continue;
+ }
+
+ kr = ipc_port_dngrow(reply);
+ /* port is unlocked */
+ if (kr != KERN_SUCCESS)
+ return (MACH_RCV_HEADER_ERROR|
+ MACH_MSG_IPC_KERNEL);
+
+ is_write_lock(space);
+ continue;
+ }
+
+ notify_port = IP_NULL; /* don't release right below */
+
+ entry->ie_object = (ipc_object_t) reply;
+ entry->ie_request = request;
+ break;
+ }
+
+ /* space and reply port are locked and active */
+
+ ip_reference(reply); /* hold onto the reply port */
+
+ kr = ipc_right_copyout(space, reply_name, entry,
+ reply_type, TRUE, (ipc_object_t) reply);
+ /* reply port is unlocked */
+ assert(kr == KERN_SUCCESS);
+
+ if (notify_port != IP_NULL)
+ ipc_port_release_sonce(notify_port);
+
+ ip_lock(dest);
+ is_write_unlock(space);
+ } else {
+ /*
+ * No reply port! This is an easy case.
+ * We only need to have the space locked
+ * when checking notify and when locking
+ * the destination (to ensure atomicity).
+ */
+
+ is_read_lock(space);
+ if (!space->is_active) {
+ is_read_unlock(space);
+ return MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE;
+ }
+
+ if (notify != MACH_PORT_NULL) {
+ ipc_entry_t entry;
+
+ /* must check notify even though it won't be used */
+
+ if (((entry = ipc_entry_lookup(space, notify))
+ == IE_NULL) ||
+ ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0)) {
+ is_read_unlock(space);
+ return MACH_RCV_INVALID_NOTIFY;
+ }
+ }
+
+ ip_lock(dest);
+ is_read_unlock(space);
+
+ reply_name = (mach_port_t) reply;
+ }
+
+ /*
+ * At this point, the space is unlocked and the destination
+ * port is locked. (Lock taken while space was locked.)
+ * reply_name is taken care of; we still need dest_name.
+ * We still hold a ref for reply (if it is valid).
+ *
+ * If the space holds receive rights for the destination,
+ * we return its name for the right. Otherwise the task
+ * managed to destroy or give away the receive right between
+ * receiving the message and this copyout. If the destination
+ * is dead, return MACH_PORT_DEAD, and if the receive right
+ * exists somewhere else (another space, in transit)
+ * return MACH_PORT_NULL.
+ *
+ * Making this copyout operation atomic with the previous
+ * copyout of the reply port is a bit tricky. If there was
+ * no real reply port (it wasn't IP_VALID) then this isn't
+ * an issue. If the reply port was dead at copyout time,
+ * then we are OK, because if dest is dead we serialize
+ * after the death of both ports and if dest is alive
+ * we serialize after reply died but before dest's (later) death.
+ * So assume reply was alive when we copied it out. If dest
+ * is alive, then we are OK because we serialize before
+ * the ports' deaths. So assume dest is dead when we look at it.
+ * If reply dies/died after dest, then we are OK because
+ * we serialize after dest died but before reply dies.
+ * So the hard case is when reply is alive at copyout,
+ * dest is dead at copyout, and reply died before dest died.
+ * In this case pretend that dest is still alive, so
+ * we serialize while both ports are alive.
+ *
+ * Because the space lock is held across the copyout of reply
+ * and locking dest, the receive right for dest can't move
+ * in or out of the space while the copyouts happen, so
+ * that isn't an atomicity problem. In the last hard case
+ * above, this implies that when dest is dead that the
+ * space couldn't have had receive rights for dest at
+ * the time reply was copied-out, so when we pretend
+ * that dest is still alive, we can return MACH_PORT_NULL.
+ *
+ * If dest == reply, then we have to make it look like
+ * either both copyouts happened before the port died,
+ * or both happened after the port died. This special
+ * case works naturally if the timestamp comparison
+ * is done correctly.
+ */
+
+ copyout_dest:
+
+ if (ip_active(dest)) {
+ ipc_object_copyout_dest(space, (ipc_object_t) dest,
+ dest_type, &dest_name);
+ /* dest is unlocked */
+ } else {
+ ipc_port_timestamp_t timestamp;
+
+ timestamp = dest->ip_timestamp;
+ ip_release(dest);
+ ip_check_unlock(dest);
+
+ if (IP_VALID(reply)) {
+ ip_lock(reply);
+ if (ip_active(reply) ||
+ IP_TIMESTAMP_ORDER(timestamp,
+ reply->ip_timestamp))
+ dest_name = MACH_PORT_DEAD;
+ else
+ dest_name = MACH_PORT_NULL;
+ ip_unlock(reply);
+ } else
+ dest_name = MACH_PORT_DEAD;
+ }
+
+ if (IP_VALID(reply))
+ ipc_port_release(reply);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(reply_type, dest_type));
+ msg->msgh_local_port = dest_name;
+ msg->msgh_remote_port = reply_name;
+ }
+
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_kmsg_copyout_object
+ * Purpose:
+ * Copy-out a port right. Always returns a name,
+ * even for unsuccessful return codes. Always
+ * consumes the supplied object.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS The space acquired the right
+ * (name is valid) or the object is dead (MACH_PORT_DEAD).
+ * MACH_MSG_IPC_SPACE No room in space for the right,
+ * or the space is dead. (Name is MACH_PORT_NULL.)
+ * MACH_MSG_IPC_KERNEL Kernel resource shortage.
+ * (Name is MACH_PORT_NULL.)
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyout_object(space, object, msgt_name, namep)
+ ipc_space_t space;
+ ipc_object_t object;
+ mach_msg_type_name_t msgt_name;
+ mach_port_t *namep;
+{
+ if (!IO_VALID(object)) {
+ *namep = (mach_port_t) object;
+ return MACH_MSG_SUCCESS;
+ }
+
+#ifndef MIGRATING_THREADS
+ /*
+ * Attempt quick copyout of send rights. We optimize for a
+ * live port for which the receiver holds send (and not
+ * receive) rights in his local table.
+ */
+
+ if (msgt_name != MACH_MSG_TYPE_PORT_SEND)
+ goto slow_copyout;
+
+ {
+ register ipc_port_t port = (ipc_port_t) object;
+ ipc_entry_t entry;
+
+ is_write_lock(space);
+ if (!space->is_active) {
+ is_write_unlock(space);
+ goto slow_copyout;
+ }
+
+ ip_lock(port);
+ if (!ip_active(port) ||
+ !ipc_hash_local_lookup(space, (ipc_object_t) port,
+ namep, &entry)) {
+ ip_unlock(port);
+ is_write_unlock(space);
+ goto slow_copyout;
+ }
+
+ /*
+ * Copyout the send right, incrementing urefs
+ * unless it would overflow, and consume the right.
+ */
+
+ assert(port->ip_srights > 1);
+ port->ip_srights--;
+ ip_release(port);
+ ip_unlock(port);
+
+ assert(entry->ie_bits & MACH_PORT_TYPE_SEND);
+ assert(IE_BITS_UREFS(entry->ie_bits) > 0);
+ assert(IE_BITS_UREFS(entry->ie_bits) < MACH_PORT_UREFS_MAX);
+
+ {
+ register ipc_entry_bits_t bits = entry->ie_bits + 1;
+
+ if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX)
+ entry->ie_bits = bits;
+ }
+
+ is_write_unlock(space);
+ return MACH_MSG_SUCCESS;
+ }
+
+ slow_copyout:
+#endif /* MIGRATING_THREADS */
+
+ {
+ kern_return_t kr;
+
+ kr = ipc_object_copyout(space, object, msgt_name, TRUE, namep);
+ if (kr != KERN_SUCCESS) {
+ ipc_object_destroy(object, msgt_name);
+
+ if (kr == KERN_INVALID_CAPABILITY)
+ *namep = MACH_PORT_DEAD;
+ else {
+ *namep = MACH_PORT_NULL;
+
+ if (kr == KERN_RESOURCE_SHORTAGE)
+ return MACH_MSG_IPC_KERNEL;
+ else
+ return MACH_MSG_IPC_SPACE;
+ }
+ }
+
+ return MACH_MSG_SUCCESS;
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_copyout_body
+ * Purpose:
+ * "Copy-out" port rights and out-of-line memory
+ * in the body of a message.
+ *
+ * The error codes are a combination of special bits.
+ * The copyout proceeds despite errors.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Successful copyout.
+ * MACH_MSG_IPC_SPACE No room for port right in name space.
+ * MACH_MSG_VM_SPACE No room for memory in address space.
+ * MACH_MSG_IPC_KERNEL Resource shortage handling port right.
+ * MACH_MSG_VM_KERNEL Resource shortage handling memory.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyout_body(saddr, eaddr, space, map)
+ vm_offset_t saddr, eaddr;
+ ipc_space_t space;
+ vm_map_t map;
+{
+ mach_msg_return_t mr = MACH_MSG_SUCCESS;
+ kern_return_t kr;
+
+ while (saddr < eaddr) {
+ vm_offset_t taddr = saddr;
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, longform, is_port;
+ vm_size_t length;
+ vm_offset_t addr;
+
+ type = (mach_msg_type_long_t *) saddr;
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ longform = ((mach_msg_type_t*)type)->msgt_longform;
+ if (longform) {
+ /* This must be aligned */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ (is_misaligned(type))) {
+ saddr = ptr_align(saddr);
+ continue;
+ }
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ }
+
+ /* padding (ptrs and ports) ? */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ ((size >> 3) == sizeof(natural_t)))
+ saddr = ptr_align(saddr);
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((number * size) + 7) >> 3;
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if (is_port) {
+ mach_port_t *objects;
+ mach_msg_type_number_t i;
+
+ if (!is_inline && (length != 0)) {
+ /* first allocate memory in the map */
+
+ kr = vm_allocate(map, &addr, length, TRUE);
+ if (kr != KERN_SUCCESS) {
+ ipc_kmsg_clean_body(taddr, saddr);
+ goto vm_copyout_failure;
+ }
+ }
+
+ objects = (mach_port_t *)
+ (is_inline ? saddr : * (vm_offset_t *) saddr);
+
+ /* copyout port rights carried in the message */
+
+ for (i = 0; i < number; i++) {
+ ipc_object_t object =
+ (ipc_object_t) objects[i];
+
+ mr |= ipc_kmsg_copyout_object(space, object,
+ name, &objects[i]);
+ }
+ }
+
+ if (is_inline) {
+ /* inline data sizes round up to int boundaries */
+
+ ((mach_msg_type_t*)type)->msgt_deallocate = FALSE;
+ saddr += (length + 3) &~ 3;
+ } else {
+ vm_offset_t data;
+
+ if (sizeof(vm_offset_t) > sizeof(mach_msg_type_t))
+ saddr = ptr_align(saddr);
+
+ data = * (vm_offset_t *) saddr;
+
+ /* copyout memory carried in the message */
+
+ if (length == 0) {
+ assert(data == 0);
+ addr = 0;
+ } else if (is_port) {
+ /* copyout to memory allocated above */
+
+ (void) copyoutmap(map, (char *) data,
+ (char *) addr, length);
+ kfree(data, length);
+ } else {
+ vm_map_copy_t copy = (vm_map_copy_t) data;
+
+ kr = vm_map_copyout(map, &addr, copy);
+ if (kr != KERN_SUCCESS) {
+ vm_map_copy_discard(copy);
+
+ vm_copyout_failure:
+
+ addr = 0;
+ if (longform)
+ type->msgtl_size = 0;
+ else
+ ((mach_msg_type_t*)type)->msgt_size = 0;
+
+ if (kr == KERN_RESOURCE_SHORTAGE)
+ mr |= MACH_MSG_VM_KERNEL;
+ else
+ mr |= MACH_MSG_VM_SPACE;
+ }
+ }
+
+ ((mach_msg_type_t*)type)->msgt_deallocate = TRUE;
+ * (vm_offset_t *) saddr = addr;
+ saddr += sizeof(vm_offset_t);
+ }
+ }
+
+ return mr;
+}
+
+/*
+ * Routine: ipc_kmsg_copyout
+ * Purpose:
+ * "Copy-out" port rights and out-of-line memory
+ * in the message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Copied out all rights and memory.
+ * MACH_RCV_INVALID_NOTIFY Bad notify port.
+ * Rights and memory in the message are intact.
+ * MACH_RCV_HEADER_ERROR + special bits
+ * Rights and memory in the message are intact.
+ * MACH_RCV_BODY_ERROR + special bits
+ * The message header was successfully copied out.
+ * As much of the body was handled as possible.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyout(kmsg, space, map, notify)
+ ipc_kmsg_t kmsg;
+ ipc_space_t space;
+ vm_map_t map;
+ mach_port_t notify;
+{
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+ mach_msg_return_t mr;
+
+ mr = ipc_kmsg_copyout_header(&kmsg->ikm_header, space, notify);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ if (mbits & MACH_MSGH_BITS_COMPLEX) {
+ vm_offset_t saddr, eaddr;
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header +
+ kmsg->ikm_header.msgh_size;
+
+ mr = ipc_kmsg_copyout_body(saddr, eaddr, space, map);
+ if (mr != MACH_MSG_SUCCESS)
+ mr |= MACH_RCV_BODY_ERROR;
+ }
+
+ return mr;
+}
+
+/*
+ * Routine: ipc_kmsg_copyout_pseudo
+ * Purpose:
+ * Does a pseudo-copyout of the message.
+ * This is like a regular copyout, except
+ * that the ports in the header are handled
+ * as if they are in the body. They aren't reversed.
+ *
+ * The error codes are a combination of special bits.
+ * The copyout proceeds despite errors.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Successful copyout.
+ * MACH_MSG_IPC_SPACE No room for port right in name space.
+ * MACH_MSG_VM_SPACE No room for memory in address space.
+ * MACH_MSG_IPC_KERNEL Resource shortage handling port right.
+ * MACH_MSG_VM_KERNEL Resource shortage handling memory.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyout_pseudo(
+ ipc_kmsg_t kmsg,
+ ipc_space_t space,
+ vm_map_t map)
+{
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+ ipc_object_t dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ ipc_object_t reply = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
+ mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
+ mach_port_t dest_name, reply_name;
+ mach_msg_return_t mr;
+
+ assert(IO_VALID(dest));
+
+ mr = (ipc_kmsg_copyout_object(space, dest, dest_type, &dest_name) |
+ ipc_kmsg_copyout_object(space, reply, reply_type, &reply_name));
+
+ kmsg->ikm_header.msgh_bits = mbits &~ MACH_MSGH_BITS_CIRCULAR;
+ kmsg->ikm_header.msgh_remote_port = dest_name;
+ kmsg->ikm_header.msgh_local_port = reply_name;
+
+ if (mbits & MACH_MSGH_BITS_COMPLEX) {
+ vm_offset_t saddr, eaddr;
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header +
+ kmsg->ikm_header.msgh_size;
+
+ mr |= ipc_kmsg_copyout_body(saddr, eaddr, space, map);
+ }
+
+ return mr;
+}
+
+/*
+ * Routine: ipc_kmsg_copyout_dest
+ * Purpose:
+ * Copies out the destination port in the message.
+ * Destroys all other rights and memory in the message.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_kmsg_copyout_dest(kmsg, space)
+ ipc_kmsg_t kmsg;
+ ipc_space_t space;
+{
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+ ipc_object_t dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ ipc_object_t reply = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
+ mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
+ mach_port_t dest_name, reply_name;
+
+ assert(IO_VALID(dest));
+
+ io_lock(dest);
+ if (io_active(dest)) {
+ ipc_object_copyout_dest(space, dest, dest_type, &dest_name);
+ /* dest is unlocked */
+ } else {
+ io_release(dest);
+ io_check_unlock(dest);
+ dest_name = MACH_PORT_DEAD;
+ }
+
+ if (IO_VALID(reply)) {
+ ipc_object_destroy(reply, reply_type);
+ reply_name = MACH_PORT_NULL;
+ } else
+ reply_name = (mach_port_t) reply;
+
+ kmsg->ikm_header.msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(reply_type, dest_type));
+ kmsg->ikm_header.msgh_local_port = dest_name;
+ kmsg->ikm_header.msgh_remote_port = reply_name;
+
+ if (mbits & MACH_MSGH_BITS_COMPLEX) {
+ vm_offset_t saddr, eaddr;
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header +
+ kmsg->ikm_header.msgh_size;
+
+ ipc_kmsg_clean_body(saddr, eaddr);
+ }
+}
+
+#if NORMA_IPC || NORMA_VM
+/*
+ * Routine: ipc_kmsg_copyout_to_kernel
+ * Purpose:
+ * Copies out the destination and reply ports in the message.
+ * Leaves all other rights and memory in the message alone.
+ * Conditions:
+ * Nothing locked.
+ *
+ * Derived from ipc_kmsg_copyout_dest.
+ * Use by mach_msg_rpc_from_kernel (which used to use copyout_dest).
+ * We really do want to save rights and memory.
+ */
+
+void
+ipc_kmsg_copyout_to_kernel(kmsg, space)
+ ipc_kmsg_t kmsg;
+ ipc_space_t space;
+{
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+ ipc_object_t dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ ipc_object_t reply = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
+ mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
+ mach_port_t dest_name, reply_name;
+
+ assert(IO_VALID(dest));
+
+ io_lock(dest);
+ if (io_active(dest)) {
+ ipc_object_copyout_dest(space, dest, dest_type, &dest_name);
+ /* dest is unlocked */
+ } else {
+ io_release(dest);
+ io_check_unlock(dest);
+ dest_name = MACH_PORT_DEAD;
+ }
+
+ reply_name = (mach_port_t) reply;
+
+ kmsg->ikm_header.msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(reply_type, dest_type));
+ kmsg->ikm_header.msgh_local_port = dest_name;
+ kmsg->ikm_header.msgh_remote_port = reply_name;
+}
+#endif NORMA_IPC || NORMA_VM
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: ipc_kmsg_copyin_compat
+ * Purpose:
+ * "Copy-in" port rights and out-of-line memory
+ * in the message.
+ *
+ * In all failure cases, the message is left holding
+ * no rights or memory. However, the message buffer
+ * is not deallocated. If successful, the message
+ * contains a valid destination port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Successful copyin.
+ * MACH_SEND_INVALID_DEST Can't copyin destination port.
+ * MACH_SEND_INVALID_REPLY Can't copyin reply port.
+ * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory.
+ * MACH_SEND_INVALID_RIGHT Can't copyin port right in body.
+ * MACH_SEND_INVALID_TYPE Bad type specification.
+ * MACH_SEND_MSG_TOO_SMALL Body is too small for types/data.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyin_compat(kmsg, space, map)
+ ipc_kmsg_t kmsg;
+ ipc_space_t space;
+ vm_map_t map;
+{
+ msg_header_t msg;
+ mach_port_t dest_name;
+ mach_port_t reply_name;
+ ipc_object_t dest, reply;
+ mach_msg_type_name_t dest_type, reply_type;
+ vm_offset_t saddr, eaddr;
+ boolean_t complex;
+ kern_return_t kr;
+ boolean_t use_page_lists, steal_pages;
+
+ msg = * (msg_header_t *) &kmsg->ikm_header;
+ dest_name = (mach_port_t) msg.msg_remote_port;
+ reply_name = (mach_port_t) msg.msg_local_port;
+
+ /* translate the destination and reply ports */
+
+ kr = ipc_object_copyin_header(space, dest_name, &dest, &dest_type);
+ if (kr != KERN_SUCCESS)
+ return MACH_SEND_INVALID_DEST;
+
+ if (reply_name == MACH_PORT_NULL) {
+ reply = IO_NULL;
+ reply_type = 0;
+ } else {
+ kr = ipc_object_copyin_header(space, reply_name,
+ &reply, &reply_type);
+ if (kr != KERN_SUCCESS) {
+ ipc_object_destroy(dest, dest_type);
+ return MACH_SEND_INVALID_REPLY;
+ }
+ }
+
+ kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(dest_type, reply_type);
+ kmsg->ikm_header.msgh_size = (mach_msg_size_t) msg.msg_size;
+ kmsg->ikm_header.msgh_remote_port = (mach_port_t) dest;
+ kmsg->ikm_header.msgh_local_port = (mach_port_t) reply;
+ kmsg->ikm_header.msgh_seqno = (mach_msg_kind_t) msg.msg_type;
+ kmsg->ikm_header.msgh_id = (mach_msg_id_t) msg.msg_id;
+
+ if (msg.msg_simple)
+ return MACH_MSG_SUCCESS;
+
+ complex = FALSE;
+ use_page_lists = ipc_kobject_vm_page_list(ip_kotype((ipc_port_t)dest));
+ steal_pages = ipc_kobject_vm_page_steal(ip_kotype((ipc_port_t)dest));
+
+#if NORMA_IPC
+ if (IP_NORMA_IS_PROXY((ipc_port_t) dest)) {
+ use_page_lists = TRUE;
+ steal_pages = TRUE;
+ }
+#endif NORMA_IPC
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header + kmsg->ikm_header.msgh_size;
+
+ while (saddr < eaddr) {
+ vm_offset_t taddr = saddr;
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, longform, dealloc, is_port;
+ vm_offset_t data;
+ vm_size_t length;
+
+ type = (mach_msg_type_long_t *) saddr;
+
+ if (((eaddr - saddr) < sizeof(mach_msg_type_t)) ||
+ ((longform = ((mach_msg_type_t*)type)->msgt_longform) &&
+ ((eaddr - saddr) < sizeof(mach_msg_type_long_t)))) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_MSG_TOO_SMALL;
+ }
+
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ dealloc = ((mach_msg_type_t*)type)->msgt_deallocate;
+ if (longform) {
+ /* This must be aligned */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ (is_misaligned(type))) {
+ saddr = ptr_align(saddr);
+ continue;
+ }
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ }
+
+ is_port = MSG_TYPE_PORT_ANY(name);
+
+ if (is_port && (size != PORT_T_SIZE_IN_BITS)) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_INVALID_TYPE;
+ }
+
+ /*
+ * New IPC says these should be zero, but old IPC
+ * tasks often leave them with random values. So
+ * we have to clear them.
+ */
+
+ ((mach_msg_type_t*)type)->msgt_unused = 0;
+ if (longform) {
+ type->msgtl_header.msgt_name = 0;
+ type->msgtl_header.msgt_size = 0;
+ type->msgtl_header.msgt_number = 0;
+ }
+
+ /* padding (ptrs and ports) ? */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ ((size >> 3) == sizeof(natural_t)))
+ saddr = ptr_align(saddr);
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((number * size) + 7) >> 3;
+
+ if (is_inline) {
+ vm_size_t amount;
+
+ /* inline data sizes round up to int boundaries */
+
+ amount = (length + 3) &~ 3;
+ if ((eaddr - saddr) < amount) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_MSG_TOO_SMALL;
+ }
+
+ data = saddr;
+ saddr += amount;
+ } else {
+ vm_offset_t addr;
+
+ if ((eaddr - saddr) < sizeof(vm_offset_t)) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_MSG_TOO_SMALL;
+ }
+
+ /* grab the out-of-line data */
+
+ addr = * (vm_offset_t *) saddr;
+
+ if (length == 0)
+ data = 0;
+ else if (is_port) {
+ data = kalloc(length);
+ if (data == 0)
+ goto invalid_memory;
+
+ if (copyinmap(map, (char *) addr,
+ (char *) data, length) ||
+ (dealloc &&
+ (vm_deallocate(map, addr, length) !=
+ KERN_SUCCESS))) {
+ kfree(data, length);
+ goto invalid_memory;
+ }
+ } else {
+ vm_map_copy_t copy;
+
+ if (use_page_lists) {
+ kr = vm_map_copyin_page_list(map,
+ addr, length, dealloc,
+ steal_pages, &copy, FALSE);
+ } else {
+ kr = vm_map_copyin(map, addr, length,
+ dealloc,
+ &copy);
+ }
+ if (kr != KERN_SUCCESS) {
+ invalid_memory:
+ ipc_kmsg_clean_partial(kmsg, taddr,
+ FALSE, 0);
+ return MACH_SEND_INVALID_MEMORY;
+ }
+
+ data = (vm_offset_t) copy;
+ }
+
+ * (vm_offset_t *) saddr = data;
+ saddr += sizeof(vm_offset_t);
+ complex = TRUE;
+ }
+
+ if (is_port) {
+ mach_msg_type_name_t newname =
+ ipc_object_copyin_type(name);
+ ipc_object_t *objects = (ipc_object_t *) data;
+ mach_msg_type_number_t i;
+
+ if (longform)
+ type->msgtl_name = newname;
+ else
+ ((mach_msg_type_t*)type)->msgt_name = newname;
+
+ for (i = 0; i < number; i++) {
+ mach_port_t port = (mach_port_t) objects[i];
+ ipc_object_t object;
+
+ if (!MACH_PORT_VALID(port))
+ continue;
+
+ kr = ipc_object_copyin_compat(space, port,
+ name, dealloc, &object);
+ if (kr != KERN_SUCCESS) {
+ ipc_kmsg_clean_partial(kmsg, taddr,
+ TRUE, i);
+ return MACH_SEND_INVALID_RIGHT;
+ }
+
+ if ((newname == MACH_MSG_TYPE_PORT_RECEIVE) &&
+ ipc_port_check_circularity(
+ (ipc_port_t) object,
+ (ipc_port_t) dest))
+ kmsg->ikm_header.msgh_bits |=
+ MACH_MSGH_BITS_CIRCULAR;
+
+ objects[i] = object;
+ }
+
+ complex = TRUE;
+ }
+ }
+
+ if (complex)
+ kmsg->ikm_header.msgh_bits |= MACH_MSGH_BITS_COMPLEX;
+
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_kmsg_copyout_compat
+ * Purpose:
+ * "Copy-out" port rights and out-of-line memory
+ * in the message, producing an old IPC message.
+ *
+ * Doesn't bother to handle the header atomically.
+ * Skips over errors. Problem ports produce MACH_PORT_NULL
+ * (MACH_PORT_DEAD is never produced), and problem memory
+ * produces a zero address.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Copied out rights and memory.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyout_compat(kmsg, space, map)
+ ipc_kmsg_t kmsg;
+ ipc_space_t space;
+ vm_map_t map;
+{
+ msg_header_t msg;
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+ ipc_object_t dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ ipc_object_t reply = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ mach_port_t dest_name, reply_name;
+ vm_offset_t saddr, eaddr;
+ kern_return_t kr;
+
+ assert(IO_VALID(dest));
+
+ io_lock(dest);
+ if (io_active(dest)) {
+ mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
+
+ ipc_object_copyout_dest(space, dest, dest_type, &dest_name);
+ /* dest is unlocked */
+ } else {
+ io_release(dest);
+ io_check_unlock(dest);
+ dest_name = MACH_PORT_NULL;
+ }
+
+ if (IO_VALID(reply)) {
+ mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
+
+ kr = ipc_object_copyout_compat(space, reply, reply_type,
+ &reply_name);
+ if (kr != KERN_SUCCESS) {
+ ipc_object_destroy(reply, reply_type);
+ reply_name = MACH_PORT_NULL;
+ }
+ } else
+ reply_name = MACH_PORT_NULL;
+
+ msg.msg_unused = 0;
+ msg.msg_simple = (mbits & MACH_MSGH_BITS_COMPLEX) ? FALSE : TRUE;
+ msg.msg_size = (msg_size_t) kmsg->ikm_header.msgh_size;
+ msg.msg_type = (integer_t) kmsg->ikm_header.msgh_seqno;
+ msg.msg_local_port = (port_name_t) dest_name;
+ msg.msg_remote_port = (port_name_t) reply_name;
+ msg.msg_id = (integer_t) kmsg->ikm_header.msgh_id;
+ * (msg_header_t *) &kmsg->ikm_header = msg;
+
+ if (msg.msg_simple)
+ return MACH_MSG_SUCCESS;
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header + kmsg->ikm_header.msgh_size;
+
+ while (saddr < eaddr) {
+ vm_offset_t taddr = saddr;
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, longform, is_port;
+ vm_size_t length;
+ vm_offset_t addr;
+
+ type = (mach_msg_type_long_t *) saddr;
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ longform = ((mach_msg_type_t*)type)->msgt_longform;
+ if (longform) {
+ /* This must be aligned */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ (is_misaligned(type))) {
+ saddr = ptr_align(saddr);
+ continue;
+ }
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ }
+
+ /* padding (ptrs and ports) ? */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ ((size >> 3) == sizeof(natural_t)))
+ saddr = ptr_align(saddr);
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((number * size) + 7) >> 3;
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if (is_port) {
+ mach_port_t *objects;
+ mach_msg_type_number_t i;
+ mach_msg_type_name_t newname;
+
+ if (!is_inline && (length != 0)) {
+ /* first allocate memory in the map */
+
+ kr = vm_allocate(map, &addr, length, TRUE);
+ if (kr != KERN_SUCCESS) {
+ ipc_kmsg_clean_body(taddr, saddr);
+ goto vm_copyout_failure;
+ }
+ }
+
+ newname = ipc_object_copyout_type_compat(name);
+ if (longform)
+ type->msgtl_name = newname;
+ else
+ ((mach_msg_type_t*)type)->msgt_name = newname;
+
+ objects = (mach_port_t *)
+ (is_inline ? saddr : * (vm_offset_t *) saddr);
+
+ /* copyout port rights carried in the message */
+
+ for (i = 0; i < number; i++) {
+ ipc_object_t object =
+ (ipc_object_t) objects[i];
+
+ if (!IO_VALID(object)) {
+ objects[i] = MACH_PORT_NULL;
+ continue;
+ }
+
+ kr = ipc_object_copyout_compat(space, object,
+ name, &objects[i]);
+ if (kr != KERN_SUCCESS) {
+ ipc_object_destroy(object, name);
+ objects[i] = MACH_PORT_NULL;
+ }
+ }
+ }
+
+ if (is_inline) {
+ /* inline data sizes round up to int boundaries */
+
+ saddr += (length + 3) &~ 3;
+ } else {
+ vm_offset_t data = * (vm_offset_t *) saddr;
+
+ /* copyout memory carried in the message */
+
+ if (length == 0) {
+ assert(data == 0);
+ addr = 0;
+ } else if (is_port) {
+ /* copyout to memory allocated above */
+
+ (void) copyoutmap(map, (char *) data,
+ (char *) addr, length);
+ kfree(data, length);
+ } else {
+ vm_map_copy_t copy = (vm_map_copy_t) data;
+
+ kr = vm_map_copyout(map, &addr, copy);
+ if (kr != KERN_SUCCESS) {
+ vm_map_copy_discard(copy);
+
+ vm_copyout_failure:
+
+ addr = 0;
+ }
+ }
+
+ * (vm_offset_t *) saddr = addr;
+ saddr += sizeof(vm_offset_t);
+ }
+ }
+
+ return MACH_MSG_SUCCESS;
+}
+
+#endif MACH_IPC_COMPAT
+
+#include <mach_kdb.h>
+#if MACH_KDB
+
+char *
+ipc_type_name(type_name, received)
+ int type_name;
+ boolean_t received;
+{
+ switch (type_name) {
+ case MACH_MSG_TYPE_BOOLEAN:
+ return "boolean";
+
+ case MACH_MSG_TYPE_INTEGER_16:
+ return "short";
+
+ case MACH_MSG_TYPE_INTEGER_32:
+ return "int32";
+
+ case MACH_MSG_TYPE_INTEGER_64:
+ return "int64";
+
+ case MACH_MSG_TYPE_CHAR:
+ return "char";
+
+ case MACH_MSG_TYPE_BYTE:
+ return "byte";
+
+ case MACH_MSG_TYPE_REAL:
+ return "real";
+
+ case MACH_MSG_TYPE_STRING:
+ return "string";
+
+ case MACH_MSG_TYPE_PORT_NAME:
+ return "port_name";
+
+ case MACH_MSG_TYPE_MOVE_RECEIVE:
+ if (received) {
+ return "port_receive";
+ } else {
+ return "move_receive";
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND:
+ if (received) {
+ return "port_send";
+ } else {
+ return "move_send";
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND_ONCE:
+ if (received) {
+ return "port_send_once";
+ } else {
+ return "move_send_once";
+ }
+
+ case MACH_MSG_TYPE_COPY_SEND:
+ return "copy_send";
+
+ case MACH_MSG_TYPE_MAKE_SEND:
+ return "make_send";
+
+ case MACH_MSG_TYPE_MAKE_SEND_ONCE:
+ return "make_send_once";
+
+ default:
+ return (char *) 0;
+ }
+}
+
+void
+ipc_print_type_name(
+ int type_name)
+{
+ char *name = ipc_type_name(type_name, TRUE);
+ if (name) {
+ printf("%s", name);
+ } else {
+ printf("type%d", type_name);
+ }
+}
+
+/*
+ * ipc_kmsg_print [ debug ]
+ */
+void
+ipc_kmsg_print(kmsg)
+ ipc_kmsg_t kmsg;
+{
+ db_printf("kmsg=0x%x\n", kmsg);
+ db_printf("ikm_next=0x%x,prev=0x%x,size=%d,marequest=0x%x",
+ kmsg->ikm_next,
+ kmsg->ikm_prev,
+ kmsg->ikm_size,
+ kmsg->ikm_marequest);
+#if NORMA_IPC
+ db_printf(",page=0x%x,copy=0x%x\n",
+ kmsg->ikm_page,
+ kmsg->ikm_copy);
+#else NORMA_IPC
+ db_printf("\n");
+#endif NORMA_IPC
+ ipc_msg_print(&kmsg->ikm_header);
+}
+
+/*
+ * ipc_msg_print [ debug ]
+ */
+void
+ipc_msg_print(msgh)
+ mach_msg_header_t *msgh;
+{
+ vm_offset_t saddr, eaddr;
+
+ db_printf("msgh_bits=0x%x: ", msgh->msgh_bits);
+ if (msgh->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
+ db_printf("complex,");
+ }
+ if (msgh->msgh_bits & MACH_MSGH_BITS_CIRCULAR) {
+ db_printf("circular,");
+ }
+ if (msgh->msgh_bits & MACH_MSGH_BITS_COMPLEX_PORTS) {
+ db_printf("complex_ports,");
+ }
+ if (msgh->msgh_bits & MACH_MSGH_BITS_COMPLEX_DATA) {
+ db_printf("complex_data,");
+ }
+ if (msgh->msgh_bits & MACH_MSGH_BITS_MIGRATED) {
+ db_printf("migrated,");
+ }
+ if (msgh->msgh_bits & MACH_MSGH_BITS_UNUSED) {
+ db_printf("unused=0x%x,",
+ msgh->msgh_bits & MACH_MSGH_BITS_UNUSED);
+ }
+ db_printf("l=0x%x,r=0x%x\n",
+ MACH_MSGH_BITS_LOCAL(msgh->msgh_bits),
+ MACH_MSGH_BITS_REMOTE(msgh->msgh_bits));
+
+ db_printf("msgh_id=%d,size=%d,seqno=%d,",
+ msgh->msgh_id,
+ msgh->msgh_size,
+ msgh->msgh_seqno);
+
+ if (msgh->msgh_remote_port) {
+ db_printf("remote=0x%x(", msgh->msgh_remote_port);
+ ipc_print_type_name(MACH_MSGH_BITS_REMOTE(msgh->msgh_bits));
+ db_printf("),");
+ } else {
+ db_printf("remote=null,\n");
+ }
+
+ if (msgh->msgh_local_port) {
+ db_printf("local=0x%x(", msgh->msgh_local_port);
+ ipc_print_type_name(MACH_MSGH_BITS_LOCAL(msgh->msgh_bits));
+ db_printf(")\n");
+ } else {
+ db_printf("local=null\n");
+ }
+
+ saddr = (vm_offset_t) (msgh + 1);
+ eaddr = (vm_offset_t) msgh + msgh->msgh_size;
+
+ while (saddr < eaddr) {
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, longform, dealloc, is_port;
+ vm_size_t length;
+
+ type = (mach_msg_type_long_t *) saddr;
+
+ if (((eaddr - saddr) < sizeof(mach_msg_type_t)) ||
+ ((longform = ((mach_msg_type_t*)type)->msgt_longform) &&
+ ((eaddr - saddr) < sizeof(mach_msg_type_long_t)))) {
+ db_printf("*** msg too small\n");
+ return;
+ }
+
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ dealloc = ((mach_msg_type_t*)type)->msgt_deallocate;
+ if (longform) {
+ /* This must be aligned */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ (is_misaligned(type))) {
+ saddr = ptr_align(saddr);
+ continue;
+ }
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ }
+
+ db_printf("-- type=");
+ ipc_print_type_name(name);
+ if (! is_inline) {
+ db_printf(",ool");
+ }
+ if (dealloc) {
+ db_printf(",dealloc");
+ }
+ if (longform) {
+ db_printf(",longform");
+ }
+ db_printf(",size=%d,number=%d,addr=0x%x\n",
+ size,
+ number,
+ saddr);
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if ((is_port && (size != PORT_T_SIZE_IN_BITS)) ||
+ (longform && ((type->msgtl_header.msgt_name != 0) ||
+ (type->msgtl_header.msgt_size != 0) ||
+ (type->msgtl_header.msgt_number != 0))) ||
+ (((mach_msg_type_t*)type)->msgt_unused != 0) ||
+ (dealloc && is_inline)) {
+ db_printf("*** invalid type\n");
+ return;
+ }
+
+ /* padding (ptrs and ports) ? */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ ((size >> 3) == sizeof(natural_t)))
+ saddr = ptr_align(saddr);
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((number * size) + 7) >> 3;
+
+ if (is_inline) {
+ vm_size_t amount;
+ int i, numwords;
+
+ /* inline data sizes round up to int boundaries */
+ amount = (length + 3) &~ 3;
+ if ((eaddr - saddr) < amount) {
+ db_printf("*** too small\n");
+ return;
+ }
+ numwords = amount / sizeof(int);
+ if (numwords > 8) {
+ numwords = 8;
+ }
+ for (i = 0; i < numwords; i++) {
+ db_printf("0x%x\n", ((int *) saddr)[i]);
+ }
+ if (numwords < amount / sizeof(int)) {
+ db_printf("...\n");
+ }
+ saddr += amount;
+ } else {
+ if ((eaddr - saddr) < sizeof(vm_offset_t)) {
+ db_printf("*** too small\n");
+ return;
+ }
+ db_printf("0x%x\n", * (vm_offset_t *) saddr);
+ saddr += sizeof(vm_offset_t);
+ }
+ }
+}
+#endif MACH_KDB
diff --git a/ipc/ipc_kmsg.h b/ipc/ipc_kmsg.h
new file mode 100644
index 00000000..8fdbeb5a
--- /dev/null
+++ b/ipc/ipc_kmsg.h
@@ -0,0 +1,291 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_kmsg.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for kernel messages.
+ */
+
+#ifndef _IPC_IPC_KMSG_H_
+#define _IPC_IPC_KMSG_H_
+
+#include <cpus.h>
+#include <mach_ipc_compat.h>
+#include <norma_ipc.h>
+
+#include <mach/machine/vm_types.h>
+#include <mach/message.h>
+#include <kern/assert.h>
+#include "cpu_number.h"
+#include <kern/macro_help.h>
+#include <kern/kalloc.h>
+#include <ipc/ipc_marequest.h>
+#if NORMA_IPC
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#endif NORMA_IPC
+
+/*
+ * This structure is only the header for a kmsg buffer;
+ * the actual buffer is normally larger. The rest of the buffer
+ * holds the body of the message.
+ *
+ * In a kmsg, the port fields hold pointers to ports instead
+ * of port names. These pointers hold references.
+ *
+ * The ikm_header.msgh_remote_port field is the destination
+ * of the message.
+ */
+
+typedef struct ipc_kmsg {
+ struct ipc_kmsg *ikm_next, *ikm_prev;
+ vm_size_t ikm_size;
+ ipc_marequest_t ikm_marequest;
+#if NORMA_IPC
+ vm_page_t ikm_page;
+ vm_map_copy_t ikm_copy;
+ unsigned long ikm_source_node;
+#endif NORMA_IPC
+ mach_msg_header_t ikm_header;
+} *ipc_kmsg_t;
+
+#define IKM_NULL ((ipc_kmsg_t) 0)
+
+#define IKM_OVERHEAD \
+ (sizeof(struct ipc_kmsg) - sizeof(mach_msg_header_t))
+
+#define ikm_plus_overhead(size) ((vm_size_t)((size) + IKM_OVERHEAD))
+#define ikm_less_overhead(size) ((mach_msg_size_t)((size) - IKM_OVERHEAD))
+
+/*
+ * XXX For debugging.
+ */
+#define IKM_BOGUS ((ipc_kmsg_t) 0xffffff10)
+
+/*
+ * We keep a per-processor cache of kernel message buffers.
+ * The cache saves the overhead/locking of using kalloc/kfree.
+ * The per-processor cache seems to miss less than a per-thread cache,
+ * and it also uses less memory. Access to the cache doesn't
+ * require locking.
+ */
+
+extern ipc_kmsg_t ipc_kmsg_cache[NCPUS];
+
+#define ikm_cache() ipc_kmsg_cache[cpu_number()]
+
+/*
+ * The size of the kernel message buffers that will be cached.
+ * IKM_SAVED_KMSG_SIZE includes overhead; IKM_SAVED_MSG_SIZE doesn't.
+ */
+
+#define IKM_SAVED_KMSG_SIZE ((vm_size_t) 256)
+#define IKM_SAVED_MSG_SIZE ikm_less_overhead(IKM_SAVED_KMSG_SIZE)
+
+#define ikm_alloc(size) \
+ ((ipc_kmsg_t) kalloc(ikm_plus_overhead(size)))
+
+#define ikm_init(kmsg, size) \
+MACRO_BEGIN \
+ ikm_init_special((kmsg), ikm_plus_overhead(size)); \
+MACRO_END
+
+#define ikm_init_special(kmsg, size) \
+MACRO_BEGIN \
+ (kmsg)->ikm_size = (size); \
+ (kmsg)->ikm_marequest = IMAR_NULL; \
+MACRO_END
+
+#define ikm_check_initialized(kmsg, size) \
+MACRO_BEGIN \
+ assert((kmsg)->ikm_size == (size)); \
+ assert((kmsg)->ikm_marequest == IMAR_NULL); \
+MACRO_END
+
+/*
+ * Non-positive message sizes are special. They indicate that
+ * the message buffer doesn't come from ikm_alloc and
+ * requires some special handling to free.
+ *
+ * ipc_kmsg_free is the non-macro form of ikm_free.
+ * It frees kmsgs of all varieties.
+ */
+
+#define IKM_SIZE_NORMA 0
+#define IKM_SIZE_NETWORK -1
+
+#define ikm_free(kmsg) \
+MACRO_BEGIN \
+ register vm_size_t _size = (kmsg)->ikm_size; \
+ \
+ if ((integer_t)_size > 0) \
+ kfree((vm_offset_t) (kmsg), _size); \
+ else \
+ ipc_kmsg_free(kmsg); \
+MACRO_END
+
+/*
+ * struct ipc_kmsg_queue is defined in kern/thread.h instead of here,
+ * so that kern/thread.h doesn't have to include ipc/ipc_kmsg.h.
+ */
+
+#include <ipc/ipc_kmsg_queue.h>
+
+typedef struct ipc_kmsg_queue *ipc_kmsg_queue_t;
+
+#define IKMQ_NULL ((ipc_kmsg_queue_t) 0)
+
+
+#define ipc_kmsg_queue_init(queue) \
+MACRO_BEGIN \
+ (queue)->ikmq_base = IKM_NULL; \
+MACRO_END
+
+#define ipc_kmsg_queue_empty(queue) ((queue)->ikmq_base == IKM_NULL)
+
+/* Enqueue a kmsg */
+extern void ipc_kmsg_enqueue(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg);
+
+/* Dequeue and return a kmsg */
+extern ipc_kmsg_t ipc_kmsg_dequeue(
+ ipc_kmsg_queue_t queue);
+
+/* Pull a kmsg out of a queue */
+extern void ipc_kmsg_rmqueue(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg);
+
+#define ipc_kmsg_queue_first(queue) ((queue)->ikmq_base)
+
+/* Return the kmsg following the given kmsg */
+extern ipc_kmsg_t ipc_kmsg_queue_next(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg);
+
+#define ipc_kmsg_rmqueue_first_macro(queue, kmsg) \
+MACRO_BEGIN \
+ register ipc_kmsg_t _next; \
+ \
+ assert((queue)->ikmq_base == (kmsg)); \
+ \
+ _next = (kmsg)->ikm_next; \
+ if (_next == (kmsg)) { \
+ assert((kmsg)->ikm_prev == (kmsg)); \
+ (queue)->ikmq_base = IKM_NULL; \
+ } else { \
+ register ipc_kmsg_t _prev = (kmsg)->ikm_prev; \
+ \
+ (queue)->ikmq_base = _next; \
+ _next->ikm_prev = _prev; \
+ _prev->ikm_next = _next; \
+ } \
+ /* XXX Debug paranoia */ \
+ kmsg->ikm_next = IKM_BOGUS; \
+ kmsg->ikm_prev = IKM_BOGUS; \
+MACRO_END
+
+#define ipc_kmsg_enqueue_macro(queue, kmsg) \
+MACRO_BEGIN \
+ register ipc_kmsg_t _first = (queue)->ikmq_base; \
+ \
+ if (_first == IKM_NULL) { \
+ (queue)->ikmq_base = (kmsg); \
+ (kmsg)->ikm_next = (kmsg); \
+ (kmsg)->ikm_prev = (kmsg); \
+ } else { \
+ register ipc_kmsg_t _last = _first->ikm_prev; \
+ \
+ (kmsg)->ikm_next = _first; \
+ (kmsg)->ikm_prev = _last; \
+ _first->ikm_prev = (kmsg); \
+ _last->ikm_next = (kmsg); \
+ } \
+MACRO_END
+
+extern void
+ipc_kmsg_destroy(/* ipc_kmsg_t */);
+
+extern void
+ipc_kmsg_clean(/* ipc_kmsg_t */);
+
+extern void
+ipc_kmsg_free(/* ipc_kmsg_t */);
+
+extern mach_msg_return_t
+ipc_kmsg_get(/* mach_msg_header_t *, mach_msg_size_t, ipc_kmsg_t * */);
+
+extern mach_msg_return_t
+ipc_kmsg_get_from_kernel(/* mach_msg_header_t *, mach_msg_size_t,
+ ipc_kmsg_t * */);
+
+extern mach_msg_return_t
+ipc_kmsg_put(/* mach_msg_header_t *, ipc_kmsg_t, mach_msg_size_t */);
+
+extern void
+ipc_kmsg_put_to_kernel(/* mach_msg_header_t *, ipc_kmsg_t, mach_msg_size_t */);
+
+extern mach_msg_return_t
+ipc_kmsg_copyin_header(/* mach_msg_header_t *, ipc_space_t, mach_port_t */);
+
+extern mach_msg_return_t
+ipc_kmsg_copyin(/* ipc_kmsg_t, ipc_space_t, vm_map_t, mach_port_t */);
+
+extern void
+ipc_kmsg_copyin_from_kernel(/* ipc_kmsg_t */);
+
+extern mach_msg_return_t
+ipc_kmsg_copyout_header(/* mach_msg_header_t *, ipc_space_t, mach_port_t */);
+
+extern mach_msg_return_t
+ipc_kmsg_copyout_object(/* ipc_space_t, ipc_object_t,
+ mach_msg_type_name_t, mach_port_t * */);
+
+extern mach_msg_return_t
+ipc_kmsg_copyout_body(/* vm_offset_t, vm_offset_t, ipc_space_t, vm_map_t */);
+
+extern mach_msg_return_t
+ipc_kmsg_copyout(/* ipc_kmsg_t, ipc_space_t, vm_map_t, mach_port_t */);
+
+extern mach_msg_return_t
+ipc_kmsg_copyout_pseudo(/* ipc_kmsg_t, ipc_space_t, vm_map_t */);
+
+extern void
+ipc_kmsg_copyout_dest(/* ipc_kmsg_t, ipc_space_t */);
+
+#if MACH_IPC_COMPAT
+
+extern mach_msg_return_t
+ipc_kmsg_copyin_compat(/* ipc_kmsg_t, ipc_space_t, vm_map_t */);
+
+extern mach_msg_return_t
+ipc_kmsg_copyout_compat(/* ipc_kmsg_t, ipc_space_t, vm_map_t */);
+
+#endif MACH_IPC_COMPAT
+#endif _IPC_IPC_KMSG_H_
diff --git a/ipc/ipc_kmsg_queue.h b/ipc/ipc_kmsg_queue.h
new file mode 100644
index 00000000..51ccbe24
--- /dev/null
+++ b/ipc/ipc_kmsg_queue.h
@@ -0,0 +1,31 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#ifndef _IPC_KMSG_QUEUE_H_
+#define _IPC_KMSG_QUEUE_H_
+struct ipc_kmsg_queue {
+ struct ipc_kmsg *ikmq_base; };
+#endif
+
diff --git a/ipc/ipc_machdep.h b/ipc/ipc_machdep.h
new file mode 100755
index 00000000..e864c4b0
--- /dev/null
+++ b/ipc/ipc_machdep.h
@@ -0,0 +1,40 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * At times, we need to know the size of a port in bits
+ */
+
+/* 64 bit machines */
+#if defined(__alpha)
+#define PORT_T_SIZE_IN_BITS 64
+#endif
+
+/* default, 32 bit machines */
+#if !defined(PORT_T_SIZE_IN_BITS)
+#define PORT_T_SIZE_IN_BITS 32
+#endif
+
diff --git a/ipc/ipc_marequest.c b/ipc/ipc_marequest.c
new file mode 100644
index 00000000..6ddffa04
--- /dev/null
+++ b/ipc/ipc_marequest.c
@@ -0,0 +1,485 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_marequest.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to handle msg-accepted requests.
+ */
+
+#include <mach_ipc_compat.h>
+
+#include <mach/message.h>
+#include <mach/port.h>
+#include <kern/lock.h>
+#include <kern/mach_param.h>
+#include <kern/kalloc.h>
+#include <kern/zalloc.h>
+#include <ipc/port.h>
+#include <ipc/ipc_init.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_right.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_notify.h>
+
+#include <mach_ipc_debug.h>
+#if MACH_IPC_DEBUG
+#include <mach/kern_return.h>
+#include <mach_debug/hash_info.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+#endif
+
+
+zone_t ipc_marequest_zone;
+int ipc_marequest_max = IMAR_MAX;
+
+#define imar_alloc() ((ipc_marequest_t) zalloc(ipc_marequest_zone))
+#define imar_free(imar) zfree(ipc_marequest_zone, (vm_offset_t) (imar))
+
+typedef unsigned int ipc_marequest_index_t;
+
+ipc_marequest_index_t ipc_marequest_size;
+ipc_marequest_index_t ipc_marequest_mask;
+
+#define IMAR_HASH(space, name) \
+ ((((ipc_marequest_index_t)((vm_offset_t)space) >> 4) + \
+ MACH_PORT_INDEX(name) + MACH_PORT_NGEN(name)) & \
+ ipc_marequest_mask)
+
+typedef struct ipc_marequest_bucket {
+ decl_simple_lock_data(, imarb_lock_data)
+ ipc_marequest_t imarb_head;
+} *ipc_marequest_bucket_t;
+
+#define IMARB_NULL ((ipc_marequest_bucket_t) 0)
+
+#define imarb_lock_init(imarb) simple_lock_init(&(imarb)->imarb_lock_data)
+#define imarb_lock(imarb) simple_lock(&(imarb)->imarb_lock_data)
+#define imarb_unlock(imarb) simple_unlock(&(imarb)->imarb_lock_data)
+
+ipc_marequest_bucket_t ipc_marequest_table;
+
+
+
+/*
+ * Routine: ipc_marequest_init
+ * Purpose:
+ * Initialize the msg-accepted request module.
+ */
+
+void
+ipc_marequest_init()
+{
+ ipc_marequest_index_t i;
+
+ /* if not configured, initialize ipc_marequest_size */
+
+ if (ipc_marequest_size == 0) {
+ ipc_marequest_size = ipc_marequest_max >> 8;
+ if (ipc_marequest_size < 16)
+ ipc_marequest_size = 16;
+ }
+
+ /* make sure it is a power of two */
+
+ ipc_marequest_mask = ipc_marequest_size - 1;
+ if ((ipc_marequest_size & ipc_marequest_mask) != 0) {
+ unsigned int bit;
+
+ /* round up to closest power of two */
+
+ for (bit = 1;; bit <<= 1) {
+ ipc_marequest_mask |= bit;
+ ipc_marequest_size = ipc_marequest_mask + 1;
+
+ if ((ipc_marequest_size & ipc_marequest_mask) == 0)
+ break;
+ }
+ }
+
+ /* allocate ipc_marequest_table */
+
+ ipc_marequest_table = (ipc_marequest_bucket_t)
+ kalloc((vm_size_t) (ipc_marequest_size *
+ sizeof(struct ipc_marequest_bucket)));
+ assert(ipc_marequest_table != IMARB_NULL);
+
+ /* and initialize it */
+
+ for (i = 0; i < ipc_marequest_size; i++) {
+ ipc_marequest_bucket_t bucket;
+
+ bucket = &ipc_marequest_table[i];
+ imarb_lock_init(bucket);
+ bucket->imarb_head = IMAR_NULL;
+ }
+
+ ipc_marequest_zone =
+ zinit(sizeof(struct ipc_marequest),
+ ipc_marequest_max * sizeof(struct ipc_marequest),
+ sizeof(struct ipc_marequest),
+ IPC_ZONE_TYPE, "ipc msg-accepted requests");
+}
+
+/*
+ * Routine: ipc_marequest_create
+ * Purpose:
+ * Create a msg-accepted request, because
+ * a sender is forcing a message with MACH_SEND_NOTIFY.
+ *
+ * The "notify" argument should name a receive right
+ * that is used to create the send-once notify port.
+ *
+ * [MACH_IPC_COMPAT] If "notify" is MACH_PORT_NULL,
+ * then an old-style msg-accepted request is created.
+ * Conditions:
+ * Nothing locked; refs held for space and port.
+ * Returns:
+ * MACH_MSG_SUCCESS Msg-accepted request created.
+ * MACH_SEND_INVALID_NOTIFY The space is dead.
+ * MACH_SEND_INVALID_NOTIFY The notify port is bad.
+ * MACH_SEND_NOTIFY_IN_PROGRESS
+ * This space has already forced a message to this port.
+ * MACH_SEND_NO_NOTIFY Can't allocate a msg-accepted request.
+ */
+
+mach_msg_return_t
+ipc_marequest_create(space, port, notify, marequestp)
+ ipc_space_t space;
+ ipc_port_t port;
+ mach_port_t notify;
+ ipc_marequest_t *marequestp;
+{
+ mach_port_t name;
+ ipc_entry_t entry;
+ ipc_port_t soright;
+ ipc_marequest_t marequest;
+ ipc_marequest_bucket_t bucket;
+
+#if !MACH_IPC_COMPAT
+ assert(notify != MACH_PORT_NULL);
+#endif !MACH_IPC_COMPAT
+
+ marequest = imar_alloc();
+ if (marequest == IMAR_NULL)
+ return MACH_SEND_NO_NOTIFY;
+
+ /*
+ * Delay creating the send-once right until
+ * we know there will be no errors. Otherwise,
+ * we would have to worry about disposing of it
+ * when it turned out it wasn't needed.
+ */
+
+ is_write_lock(space);
+ if (!space->is_active) {
+ is_write_unlock(space);
+ imar_free(marequest);
+ return MACH_SEND_INVALID_NOTIFY;
+ }
+
+ if (ipc_right_reverse(space, (ipc_object_t) port, &name, &entry)) {
+ ipc_entry_bits_t bits;
+
+ /* port is locked and active */
+ ip_unlock(port);
+ bits = entry->ie_bits;
+
+ assert(port == (ipc_port_t) entry->ie_object);
+ assert(bits & MACH_PORT_TYPE_SEND_RECEIVE);
+
+ if (bits & IE_BITS_MAREQUEST) {
+ is_write_unlock(space);
+ imar_free(marequest);
+ return MACH_SEND_NOTIFY_IN_PROGRESS;
+ }
+
+#if MACH_IPC_COMPAT
+ if (notify == MACH_PORT_NULL)
+ soright = IP_NULL;
+ else
+#endif MACH_IPC_COMPAT
+ if ((soright = ipc_port_lookup_notify(space, notify))
+ == IP_NULL) {
+ is_write_unlock(space);
+ imar_free(marequest);
+ return MACH_SEND_INVALID_NOTIFY;
+ }
+
+ entry->ie_bits = bits | IE_BITS_MAREQUEST;
+
+ is_reference(space);
+ marequest->imar_space = space;
+ marequest->imar_name = name;
+ marequest->imar_soright = soright;
+
+ bucket = &ipc_marequest_table[IMAR_HASH(space, name)];
+ imarb_lock(bucket);
+
+ marequest->imar_next = bucket->imarb_head;
+ bucket->imarb_head = marequest;
+
+ imarb_unlock(bucket);
+ } else {
+#if MACH_IPC_COMPAT
+ if (notify == MACH_PORT_NULL)
+ soright = IP_NULL;
+ else
+#endif MACH_IPC_COMPAT
+ if ((soright = ipc_port_lookup_notify(space, notify))
+ == IP_NULL) {
+ is_write_unlock(space);
+ imar_free(marequest);
+ return MACH_SEND_INVALID_NOTIFY;
+ }
+
+ is_reference(space);
+ marequest->imar_space = space;
+ marequest->imar_name = MACH_PORT_NULL;
+ marequest->imar_soright = soright;
+ }
+
+ is_write_unlock(space);
+ *marequestp = marequest;
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_marequest_cancel
+ * Purpose:
+ * Cancel a msg-accepted request, because
+ * the space's entry is being destroyed.
+ * Conditions:
+ * The space is write-locked and active.
+ */
+
+void
+ipc_marequest_cancel(space, name)
+ ipc_space_t space;
+ mach_port_t name;
+{
+ ipc_marequest_bucket_t bucket;
+ ipc_marequest_t marequest, *last;
+
+ assert(space->is_active);
+
+ bucket = &ipc_marequest_table[IMAR_HASH(space, name)];
+ imarb_lock(bucket);
+
+ for (last = &bucket->imarb_head;
+ (marequest = *last) != IMAR_NULL;
+ last = &marequest->imar_next)
+ if ((marequest->imar_space == space) &&
+ (marequest->imar_name == name))
+ break;
+
+ assert(marequest != IMAR_NULL);
+ *last = marequest->imar_next;
+ imarb_unlock(bucket);
+
+ marequest->imar_name = MACH_PORT_NULL;
+}
+
+/*
+ * Routine: ipc_marequest_rename
+ * Purpose:
+ * Rename a msg-accepted request, because the entry
+ * in the space is being renamed.
+ * Conditions:
+ * The space is write-locked and active.
+ */
+
+void
+ipc_marequest_rename(space, old, new)
+ ipc_space_t space;
+ mach_port_t old, new;
+{
+ ipc_marequest_bucket_t bucket;
+ ipc_marequest_t marequest, *last;
+
+ assert(space->is_active);
+
+ bucket = &ipc_marequest_table[IMAR_HASH(space, old)];
+ imarb_lock(bucket);
+
+ for (last = &bucket->imarb_head;
+ (marequest = *last) != IMAR_NULL;
+ last = &marequest->imar_next)
+ if ((marequest->imar_space == space) &&
+ (marequest->imar_name == old))
+ break;
+
+ assert(marequest != IMAR_NULL);
+ *last = marequest->imar_next;
+ imarb_unlock(bucket);
+
+ marequest->imar_name = new;
+
+ bucket = &ipc_marequest_table[IMAR_HASH(space, new)];
+ imarb_lock(bucket);
+
+ marequest->imar_next = bucket->imarb_head;
+ bucket->imarb_head = marequest;
+
+ imarb_unlock(bucket);
+}
+
+/*
+ * Routine: ipc_marequest_destroy
+ * Purpose:
+ * Destroy a msg-accepted request, because
+ * the kernel message is being received/destroyed.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_marequest_destroy(marequest)
+ ipc_marequest_t marequest;
+{
+ ipc_space_t space = marequest->imar_space;
+ mach_port_t name;
+ ipc_port_t soright;
+#if MACH_IPC_COMPAT
+ ipc_port_t sright = IP_NULL;
+#endif MACH_IPC_COMPAT
+
+ is_write_lock(space);
+
+ name = marequest->imar_name;
+ soright = marequest->imar_soright;
+
+ if (name != MACH_PORT_NULL) {
+ ipc_marequest_bucket_t bucket;
+ ipc_marequest_t this, *last;
+
+ bucket = &ipc_marequest_table[IMAR_HASH(space, name)];
+ imarb_lock(bucket);
+
+ for (last = &bucket->imarb_head;
+ (this = *last) != IMAR_NULL;
+ last = &this->imar_next)
+ if ((this->imar_space == space) &&
+ (this->imar_name == name))
+ break;
+
+ assert(this == marequest);
+ *last = this->imar_next;
+ imarb_unlock(bucket);
+
+ if (space->is_active) {
+ ipc_entry_t entry;
+
+ entry = ipc_entry_lookup(space, name);
+ assert(entry != IE_NULL);
+ assert(entry->ie_bits & IE_BITS_MAREQUEST);
+ assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
+
+ entry->ie_bits &= ~IE_BITS_MAREQUEST;
+
+#if MACH_IPC_COMPAT
+ if (soright == IP_NULL)
+ sright = ipc_space_make_notify(space);
+#endif MACH_IPC_COMPAT
+ } else
+ name = MACH_PORT_NULL;
+ }
+
+ is_write_unlock(space);
+ is_release(space);
+
+ imar_free(marequest);
+
+#if MACH_IPC_COMPAT
+ if (soright == IP_NULL) {
+ if (IP_VALID(sright)) {
+ assert(name != MACH_PORT_NULL);
+ ipc_notify_msg_accepted_compat(sright, name);
+ }
+
+ return;
+ }
+ assert(sright == IP_NULL);
+#endif MACH_IPC_COMPAT
+
+ assert(soright != IP_NULL);
+ ipc_notify_msg_accepted(soright, name);
+}
+
+#if MACH_IPC_DEBUG
+
+
+/*
+ * Routine: ipc_marequest_info
+ * Purpose:
+ * Return information about the marequest hash table.
+ * Fills the buffer with as much information as possible
+ * and returns the desired size of the buffer.
+ * Conditions:
+ * Nothing locked. The caller should provide
+ * possibly-pageable memory.
+ */
+
+unsigned int
+ipc_marequest_info(maxp, info, count)
+ unsigned int *maxp;
+ hash_info_bucket_t *info;
+ unsigned int count;
+{
+ ipc_marequest_index_t i;
+
+ if (ipc_marequest_size < count)
+ count = ipc_marequest_size;
+
+ for (i = 0; i < count; i++) {
+ ipc_marequest_bucket_t bucket = &ipc_marequest_table[i];
+ unsigned int bucket_count = 0;
+ ipc_marequest_t marequest;
+
+ imarb_lock(bucket);
+ for (marequest = bucket->imarb_head;
+ marequest != IMAR_NULL;
+ marequest = marequest->imar_next)
+ bucket_count++;
+ imarb_unlock(bucket);
+
+ /* don't touch pageable memory while holding locks */
+ info[i].hib_count = bucket_count;
+ }
+
+ *maxp = ipc_marequest_max;
+ return ipc_marequest_size;
+}
+
+#endif MACH_IPC_DEBUG
diff --git a/ipc/ipc_marequest.h b/ipc/ipc_marequest.h
new file mode 100644
index 00000000..0e0380ed
--- /dev/null
+++ b/ipc/ipc_marequest.h
@@ -0,0 +1,98 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_marequest.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for msg-accepted requests.
+ */
+
+#ifndef _IPC_IPC_MAREQUEST_H_
+#define _IPC_IPC_MAREQUEST_H_
+
+#include <mach_ipc_debug.h>
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+
+/*
+ * A msg-accepted request is made when MACH_SEND_NOTIFY is used
+ * to force a message to a send right. The IE_BITS_MAREQUEST bit
+ * in an entry indicates the entry is blocked because MACH_SEND_NOTIFY
+ * has already been used to force a message. The kmsg holds
+ * a pointer to the marequest; it is destroyed when the kmsg
+ * is received/destroyed. (If the send right is destroyed,
+ * this just changes imar_name. If the space is destroyed,
+ * the marequest is left unchanged.)
+ *
+ * Locking considerations: The imar_space field is read-only and
+ * points to the space which locks the imar_name field. imar_soright
+ * is read-only. Normally it is a non-null send-once right for
+ * the msg-accepted notification, but in compat mode it is null
+ * and the notification goes to the space's notify port. Normally
+ * imar_name is non-null, but if the send right is destroyed then
+ * it is changed to be null. imar_next is locked by a bucket lock;
+ * imar_name is read-only when the request is in a bucket. (So lookups
+ * in the bucket can safely check imar_space and imar_name.)
+ * imar_space and imar_soright both hold references.
+ */
+
+typedef struct ipc_marequest {
+ struct ipc_space *imar_space;
+ mach_port_t imar_name;
+ struct ipc_port *imar_soright;
+ struct ipc_marequest *imar_next;
+} *ipc_marequest_t;
+
+#define IMAR_NULL ((ipc_marequest_t) 0)
+
+
+extern void
+ipc_marequest_init();
+
+#if MACH_IPC_DEBUG
+
+extern unsigned int
+ipc_marequest_info(/* unsigned int *, hash_info_bucket_t *, unsigned int */);
+
+#endif MACH_IPC_DEBUG
+
+extern mach_msg_return_t
+ipc_marequest_create(/* ipc_space_t space, mach_port_t name,
+ ipc_port_t soright, ipc_marequest_t *marequestp */);
+
+extern void
+ipc_marequest_cancel(/* ipc_space_t space, mach_port_t name */);
+
+extern void
+ipc_marequest_rename(/* ipc_space_t space,
+ mach_port_t old, mach_port_t new */);
+
+extern void
+ipc_marequest_destroy(/* ipc_marequest_t marequest */);
+
+#endif _IPC_IPC_MAREQUEST_H_
diff --git a/ipc/ipc_mqueue.c b/ipc/ipc_mqueue.c
new file mode 100644
index 00000000..5447c49b
--- /dev/null
+++ b/ipc/ipc_mqueue.c
@@ -0,0 +1,754 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_mqueue.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC message queues.
+ */
+
+#include <norma_ipc.h>
+
+#include <mach/port.h>
+#include <mach/message.h>
+#include <kern/assert.h>
+#include <kern/counters.h>
+#include <kern/sched_prim.h>
+#include <kern/ipc_sched.h>
+#include <kern/ipc_kobject.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_thread.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_marequest.h>
+
+
+
+#if NORMA_IPC
+extern ipc_mqueue_t norma_ipc_handoff_mqueue;
+extern ipc_kmsg_t norma_ipc_handoff_msg;
+extern mach_msg_size_t norma_ipc_handoff_max_size;
+extern mach_msg_size_t norma_ipc_handoff_msg_size;
+extern ipc_kmsg_t norma_ipc_kmsg_accept();
+#endif NORMA_IPC
+
+/*
+ * Routine: ipc_mqueue_init
+ * Purpose:
+ * Initialize a newly-allocated message queue.
+ */
+
+void
+ipc_mqueue_init(
+ ipc_mqueue_t mqueue)
+{
+ imq_lock_init(mqueue);
+ ipc_kmsg_queue_init(&mqueue->imq_messages);
+ ipc_thread_queue_init(&mqueue->imq_threads);
+}
+
+/*
+ * Routine: ipc_mqueue_move
+ * Purpose:
+ * Move messages from one queue (source) to another (dest).
+ * Only moves messages sent to the specified port.
+ * Conditions:
+ * Both queues must be locked.
+ * (This is sufficient to manipulate port->ip_seqno.)
+ */
+
+void
+ipc_mqueue_move(
+ ipc_mqueue_t dest,
+ ipc_mqueue_t source,
+ ipc_port_t port)
+{
+ ipc_kmsg_queue_t oldq, newq;
+ ipc_thread_queue_t blockedq;
+ ipc_kmsg_t kmsg, next;
+ ipc_thread_t th;
+
+ oldq = &source->imq_messages;
+ newq = &dest->imq_messages;
+ blockedq = &dest->imq_threads;
+
+ for (kmsg = ipc_kmsg_queue_first(oldq);
+ kmsg != IKM_NULL; kmsg = next) {
+ next = ipc_kmsg_queue_next(oldq, kmsg);
+
+ /* only move messages sent to port */
+
+ if (kmsg->ikm_header.msgh_remote_port != (mach_port_t) port)
+ continue;
+
+ ipc_kmsg_rmqueue(oldq, kmsg);
+
+ /* before adding kmsg to newq, check for a blocked receiver */
+
+ while ((th = ipc_thread_dequeue(blockedq)) != ITH_NULL) {
+ assert(ipc_kmsg_queue_empty(newq));
+
+ thread_go(th);
+
+ /* check if the receiver can handle the message */
+
+ if (kmsg->ikm_header.msgh_size <= th->ith_msize) {
+ th->ith_state = MACH_MSG_SUCCESS;
+ th->ith_kmsg = kmsg;
+ th->ith_seqno = port->ip_seqno++;
+
+ goto next_kmsg;
+ }
+
+ th->ith_state = MACH_RCV_TOO_LARGE;
+ th->ith_msize = kmsg->ikm_header.msgh_size;
+ }
+
+ /* didn't find a receiver to handle the message */
+
+ ipc_kmsg_enqueue(newq, kmsg);
+ next_kmsg:;
+ }
+}
+
+/*
+ * Routine: ipc_mqueue_changed
+ * Purpose:
+ * Wake up receivers waiting in a message queue.
+ * Conditions:
+ * The message queue is locked.
+ */
+
+void
+ipc_mqueue_changed(
+ ipc_mqueue_t mqueue,
+ mach_msg_return_t mr)
+{
+ ipc_thread_t th;
+
+ while ((th = ipc_thread_dequeue(&mqueue->imq_threads)) != ITH_NULL) {
+ th->ith_state = mr;
+ thread_go(th);
+ }
+}
+
+/*
+ * Routine: ipc_mqueue_send
+ * Purpose:
+ * Send a message to a port. The message holds a reference
+ * for the destination port in the msgh_remote_port field.
+ *
+ * If unsuccessful, the caller still has possession of
+ * the message and must do something with it. If successful,
+ * the message is queued, given to a receiver, destroyed,
+ * or handled directly by the kernel via mach_msg.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS The message was accepted.
+ * MACH_SEND_TIMED_OUT Caller still has message.
+ * MACH_SEND_INTERRUPTED Caller still has message.
+ */
+
+mach_msg_return_t
+ipc_mqueue_send(kmsg, option, time_out)
+ ipc_kmsg_t kmsg;
+ mach_msg_option_t option;
+ mach_msg_timeout_t time_out;
+{
+ ipc_port_t port;
+
+ port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+
+ if (port->ip_receiver == ipc_space_kernel) {
+ ipc_kmsg_t reply;
+
+ /*
+ * We can check ip_receiver == ipc_space_kernel
+ * before checking that the port is active because
+ * ipc_port_dealloc_kernel clears ip_receiver
+ * before destroying a kernel port.
+ */
+
+ assert(ip_active(port));
+ ip_unlock(port);
+
+ reply = ipc_kobject_server(kmsg);
+ if (reply != IKM_NULL)
+ ipc_mqueue_send_always(reply);
+
+ return MACH_MSG_SUCCESS;
+ }
+
+#if NORMA_IPC
+ if (IP_NORMA_IS_PROXY(port)) {
+ mach_msg_return_t mr;
+
+ mr = norma_ipc_send(kmsg);
+ ip_unlock(port);
+ return mr;
+ }
+#endif NORMA_IPC
+
+ for (;;) {
+ ipc_thread_t self;
+
+ /*
+ * Can't deliver to a dead port.
+ * However, we can pretend it got sent
+ * and was then immediately destroyed.
+ */
+
+ if (!ip_active(port)) {
+ /*
+ * We can't let ipc_kmsg_destroy deallocate
+ * the port right, because we might end up
+ * in an infinite loop trying to deliver
+ * a send-once notification.
+ */
+
+ ip_release(port);
+ ip_check_unlock(port);
+ kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
+#if NORMA_IPC
+ /* XXX until ipc_kmsg_destroy is fixed... */
+ norma_ipc_finish_receiving(&kmsg);
+#endif NORMA_IPC
+ ipc_kmsg_destroy(kmsg);
+ return MACH_MSG_SUCCESS;
+ }
+
+ /*
+ * Don't block if:
+ * 1) We're under the queue limit.
+ * 2) Caller used the MACH_SEND_ALWAYS internal option.
+ * 3) Message is sent to a send-once right.
+ */
+
+ if ((port->ip_msgcount < port->ip_qlimit) ||
+ (option & MACH_SEND_ALWAYS) ||
+ (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
+ MACH_MSG_TYPE_PORT_SEND_ONCE))
+ break;
+
+ /* must block waiting for queue to clear */
+
+ self = current_thread();
+
+ if (option & MACH_SEND_TIMEOUT) {
+ if (time_out == 0) {
+ ip_unlock(port);
+ return MACH_SEND_TIMED_OUT;
+ }
+
+ thread_will_wait_with_timeout(self, time_out);
+ } else
+ thread_will_wait(self);
+
+ ipc_thread_enqueue(&port->ip_blocked, self);
+ self->ith_state = MACH_SEND_IN_PROGRESS;
+
+ ip_unlock(port);
+ counter(c_ipc_mqueue_send_block++);
+ thread_block((void (*)(void)) 0);
+ ip_lock(port);
+
+ /* why did we wake up? */
+
+ if (self->ith_state == MACH_MSG_SUCCESS)
+ continue;
+ assert(self->ith_state == MACH_SEND_IN_PROGRESS);
+
+ /* take ourselves off blocked queue */
+
+ ipc_thread_rmqueue(&port->ip_blocked, self);
+
+ /*
+ * Thread wakeup-reason field tells us why
+ * the wait was interrupted.
+ */
+
+ switch (self->ith_wait_result) {
+ case THREAD_INTERRUPTED:
+ /* send was interrupted - give up */
+
+ ip_unlock(port);
+ return MACH_SEND_INTERRUPTED;
+
+ case THREAD_TIMED_OUT:
+ /* timeout expired */
+
+ assert(option & MACH_SEND_TIMEOUT);
+ time_out = 0;
+ break;
+
+ case THREAD_RESTART:
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_mqueue_send");
+#else
+ panic("ipc_mqueue_send");
+#endif
+ }
+ }
+
+ if (kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_CIRCULAR) {
+ ip_unlock(port);
+
+ /* don't allow the creation of a circular loop */
+
+#if NORMA_IPC
+ /* XXX until ipc_kmsg_destroy is fixed... */
+ norma_ipc_finish_receiving(&kmsg);
+#endif NORMA_IPC
+ ipc_kmsg_destroy(kmsg);
+ return MACH_MSG_SUCCESS;
+ }
+
+ {
+ ipc_mqueue_t mqueue;
+ ipc_pset_t pset;
+ ipc_thread_t receiver;
+ ipc_thread_queue_t receivers;
+
+ port->ip_msgcount++;
+ assert(port->ip_msgcount > 0);
+
+ pset = port->ip_pset;
+ if (pset == IPS_NULL)
+ mqueue = &port->ip_messages;
+ else
+ mqueue = &pset->ips_messages;
+
+ imq_lock(mqueue);
+ receivers = &mqueue->imq_threads;
+
+ /*
+ * Can unlock the port now that the msg queue is locked
+ * and we know the port is active. While the msg queue
+ * is locked, we have control of the kmsg, so the ref in
+ * it for the port is still good. If the msg queue is in
+ * a set (dead or alive), then we're OK because the port
+ * is still a member of the set and the set won't go away
+ * until the port is taken out, which tries to lock the
+ * set's msg queue to remove the port's msgs.
+ */
+
+ ip_unlock(port);
+
+ /* check for a receiver for the message */
+
+#if NORMA_IPC
+ if (mqueue == norma_ipc_handoff_mqueue) {
+ norma_ipc_handoff_msg = kmsg;
+ if (kmsg->ikm_header.msgh_size <= norma_ipc_handoff_max_size) {
+ imq_unlock(mqueue);
+ return MACH_MSG_SUCCESS;
+ }
+ norma_ipc_handoff_msg_size = kmsg->ikm_header.msgh_size;
+ }
+#endif NORMA_IPC
+ for (;;) {
+ receiver = ipc_thread_queue_first(receivers);
+ if (receiver == ITH_NULL) {
+ /* no receivers; queue kmsg */
+
+ ipc_kmsg_enqueue_macro(&mqueue->imq_messages, kmsg);
+ imq_unlock(mqueue);
+ break;
+ }
+
+ ipc_thread_rmqueue_first_macro(receivers, receiver);
+ assert(ipc_kmsg_queue_empty(&mqueue->imq_messages));
+
+ if (kmsg->ikm_header.msgh_size <= receiver->ith_msize) {
+ /* got a successful receiver */
+
+ receiver->ith_state = MACH_MSG_SUCCESS;
+ receiver->ith_kmsg = kmsg;
+ receiver->ith_seqno = port->ip_seqno++;
+ imq_unlock(mqueue);
+
+ thread_go(receiver);
+ break;
+ }
+
+ receiver->ith_state = MACH_RCV_TOO_LARGE;
+ receiver->ith_msize = kmsg->ikm_header.msgh_size;
+ thread_go(receiver);
+ }
+ }
+
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_mqueue_copyin
+ * Purpose:
+ * Convert a name in a space to a message queue.
+ * Conditions:
+ * Nothing locked. If successful, the message queue
+ * is returned locked and caller gets a ref for the object.
+ * This ref ensures the continued existence of the queue.
+ * Returns:
+ * MACH_MSG_SUCCESS Found a message queue.
+ * MACH_RCV_INVALID_NAME The space is dead.
+ * MACH_RCV_INVALID_NAME The name doesn't denote a right.
+ * MACH_RCV_INVALID_NAME
+ * The denoted right is not receive or port set.
+ * MACH_RCV_IN_SET Receive right is a member of a set.
+ */
+
+mach_msg_return_t
+ipc_mqueue_copyin(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_mqueue_t *mqueuep,
+ ipc_object_t *objectp)
+{
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ ipc_object_t object;
+ ipc_mqueue_t mqueue;
+
+ is_read_lock(space);
+ if (!space->is_active) {
+ is_read_unlock(space);
+ return MACH_RCV_INVALID_NAME;
+ }
+
+ entry = ipc_entry_lookup(space, name);
+ if (entry == IE_NULL) {
+ is_read_unlock(space);
+ return MACH_RCV_INVALID_NAME;
+ }
+
+ bits = entry->ie_bits;
+ object = entry->ie_object;
+
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+ ipc_port_t port;
+ ipc_pset_t pset;
+
+ port = (ipc_port_t) object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+ is_read_unlock(space);
+
+ pset = port->ip_pset;
+ if (pset != IPS_NULL) {
+ ips_lock(pset);
+ if (ips_active(pset)) {
+ ips_unlock(pset);
+ ip_unlock(port);
+ return MACH_RCV_IN_SET;
+ }
+
+ ipc_pset_remove(pset, port);
+ ips_check_unlock(pset);
+ assert(port->ip_pset == IPS_NULL);
+ }
+
+ mqueue = &port->ip_messages;
+ } else if (bits & MACH_PORT_TYPE_PORT_SET) {
+ ipc_pset_t pset;
+
+ pset = (ipc_pset_t) object;
+ assert(pset != IPS_NULL);
+
+ ips_lock(pset);
+ assert(ips_active(pset));
+ assert(pset->ips_local_name == name);
+ is_read_unlock(space);
+
+ mqueue = &pset->ips_messages;
+ } else {
+ is_read_unlock(space);
+ return MACH_RCV_INVALID_NAME;
+ }
+
+ /*
+ * At this point, the object is locked and active,
+ * the space is unlocked, and mqueue is initialized.
+ */
+
+ io_reference(object);
+ imq_lock(mqueue);
+ io_unlock(object);
+
+ *objectp = object;
+ *mqueuep = mqueue;
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_mqueue_receive
+ * Purpose:
+ * Receive a message from a message queue.
+ *
+ * If continuation is non-zero, then we might discard
+ * our kernel stack when we block. We will continue
+ * after unblocking by executing continuation.
+ *
+ * If resume is true, then we are resuming a receive
+ * operation after a blocked receive discarded our stack.
+ * Conditions:
+ * The message queue is locked; it will be returned unlocked.
+ *
+ * Our caller must hold a reference for the port or port set
+ * to which this queue belongs, to keep the queue
+ * from being deallocated. Furthermore, the port or set
+ * must have been active when the queue was locked.
+ *
+ * The kmsg is returned with clean header fields
+ * and with the circular bit turned off.
+ * Returns:
+ * MACH_MSG_SUCCESS Message returned in kmsgp.
+ * MACH_RCV_TOO_LARGE Message size returned in kmsgp.
+ * MACH_RCV_TIMED_OUT No message obtained.
+ * MACH_RCV_INTERRUPTED No message obtained.
+ * MACH_RCV_PORT_DIED Port/set died; no message.
+ * MACH_RCV_PORT_CHANGED Port moved into set; no msg.
+ *
+ */
+
+mach_msg_return_t
+ipc_mqueue_receive(
+ ipc_mqueue_t mqueue,
+ mach_msg_option_t option,
+ mach_msg_size_t max_size,
+ mach_msg_timeout_t time_out,
+ boolean_t resume,
+ void (*continuation)(void),
+ ipc_kmsg_t *kmsgp,
+ mach_port_seqno_t *seqnop)
+{
+ ipc_port_t port;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+
+ {
+ ipc_kmsg_queue_t kmsgs = &mqueue->imq_messages;
+ ipc_thread_t self = current_thread();
+
+ if (resume)
+ goto after_thread_block;
+
+ for (;;) {
+ kmsg = ipc_kmsg_queue_first(kmsgs);
+#if NORMA_IPC
+ /*
+ * It may be possible to make this work even when a timeout
+ * is specified.
+ *
+ * Netipc_replenish should be moved somewhere else.
+ */
+ if (kmsg == IKM_NULL && ! (option & MACH_RCV_TIMEOUT)) {
+ netipc_replenish(FALSE);
+ *kmsgp = IKM_NULL;
+ kmsg = norma_ipc_kmsg_accept(mqueue, max_size,
+ (mach_msg_size_t *)kmsgp);
+ if (kmsg != IKM_NULL) {
+ port = (ipc_port_t)
+ kmsg->ikm_header.msgh_remote_port;
+ seqno = port->ip_seqno++;
+ break;
+ }
+ if (*kmsgp) {
+ imq_unlock(mqueue);
+ return MACH_RCV_TOO_LARGE;
+ }
+ }
+#endif NORMA_IPC
+ if (kmsg != IKM_NULL) {
+ /* check space requirements */
+
+ if (kmsg->ikm_header.msgh_size > max_size) {
+ * (mach_msg_size_t *) kmsgp =
+ kmsg->ikm_header.msgh_size;
+ imq_unlock(mqueue);
+ return MACH_RCV_TOO_LARGE;
+ }
+
+ ipc_kmsg_rmqueue_first_macro(kmsgs, kmsg);
+ port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ seqno = port->ip_seqno++;
+ break;
+ }
+
+ /* must block waiting for a message */
+
+ if (option & MACH_RCV_TIMEOUT) {
+ if (time_out == 0) {
+ imq_unlock(mqueue);
+ return MACH_RCV_TIMED_OUT;
+ }
+
+ thread_will_wait_with_timeout(self, time_out);
+ } else
+ thread_will_wait(self);
+
+ ipc_thread_enqueue_macro(&mqueue->imq_threads, self);
+ self->ith_state = MACH_RCV_IN_PROGRESS;
+ self->ith_msize = max_size;
+
+ imq_unlock(mqueue);
+ if (continuation != (void (*)(void)) 0) {
+ counter(c_ipc_mqueue_receive_block_user++);
+ } else {
+ counter(c_ipc_mqueue_receive_block_kernel++);
+ }
+ thread_block(continuation);
+ after_thread_block:
+ imq_lock(mqueue);
+
+ /* why did we wake up? */
+
+ if (self->ith_state == MACH_MSG_SUCCESS) {
+ /* pick up the message that was handed to us */
+
+ kmsg = self->ith_kmsg;
+ seqno = self->ith_seqno;
+ port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ break;
+ }
+
+ switch (self->ith_state) {
+ case MACH_RCV_TOO_LARGE:
+ /* pick up size of the too-large message */
+
+ * (mach_msg_size_t *) kmsgp = self->ith_msize;
+ /* fall-through */
+
+ case MACH_RCV_PORT_DIED:
+ case MACH_RCV_PORT_CHANGED:
+ /* something bad happened to the port/set */
+
+ imq_unlock(mqueue);
+ return self->ith_state;
+
+ case MACH_RCV_IN_PROGRESS:
+ /*
+ * Awakened for other than IPC completion.
+ * Remove ourselves from the waiting queue,
+ * then check the wakeup cause.
+ */
+
+ ipc_thread_rmqueue(&mqueue->imq_threads, self);
+
+ switch (self->ith_wait_result) {
+ case THREAD_INTERRUPTED:
+ /* receive was interrupted - give up */
+
+ imq_unlock(mqueue);
+ return MACH_RCV_INTERRUPTED;
+
+ case THREAD_TIMED_OUT:
+ /* timeout expired */
+
+ assert(option & MACH_RCV_TIMEOUT);
+ time_out = 0;
+ break;
+
+ case THREAD_RESTART:
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_mqueue_receive");
+#else
+ panic("ipc_mqueue_receive");
+#endif
+ }
+ break;
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_mqueue_receive: strange ith_state");
+#else
+ panic("ipc_mqueue_receive: strange ith_state");
+#endif
+ }
+ }
+
+ /* we have a kmsg; unlock the msg queue */
+
+ imq_unlock(mqueue);
+ assert(kmsg->ikm_header.msgh_size <= max_size);
+ }
+
+ {
+ ipc_marequest_t marequest;
+
+ marequest = kmsg->ikm_marequest;
+ if (marequest != IMAR_NULL) {
+ ipc_marequest_destroy(marequest);
+ kmsg->ikm_marequest = IMAR_NULL;
+ }
+ assert((kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_CIRCULAR) == 0);
+
+ assert(port == (ipc_port_t) kmsg->ikm_header.msgh_remote_port);
+ ip_lock(port);
+
+ if (ip_active(port)) {
+ ipc_thread_queue_t senders;
+ ipc_thread_t sender;
+
+ assert(port->ip_msgcount > 0);
+ port->ip_msgcount--;
+
+ senders = &port->ip_blocked;
+ sender = ipc_thread_queue_first(senders);
+
+ if ((sender != ITH_NULL) &&
+ (port->ip_msgcount < port->ip_qlimit)) {
+ ipc_thread_rmqueue(senders, sender);
+ sender->ith_state = MACH_MSG_SUCCESS;
+ thread_go(sender);
+ }
+ }
+
+ ip_unlock(port);
+ }
+
+#if NORMA_IPC
+ norma_ipc_finish_receiving(&kmsg);
+#endif NORMA_IPC
+ *kmsgp = kmsg;
+ *seqnop = seqno;
+ return MACH_MSG_SUCCESS;
+}
diff --git a/ipc/ipc_mqueue.h b/ipc/ipc_mqueue.h
new file mode 100644
index 00000000..690fe28c
--- /dev/null
+++ b/ipc/ipc_mqueue.h
@@ -0,0 +1,108 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_mqueue.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for message queues.
+ */
+
+#ifndef _IPC_IPC_MQUEUE_H_
+#define _IPC_IPC_MQUEUE_H_
+
+#include <mach/message.h>
+#include <kern/assert.h>
+#include <kern/lock.h>
+#include <kern/macro_help.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_thread.h>
+
+typedef struct ipc_mqueue {
+ decl_simple_lock_data(, imq_lock_data)
+ struct ipc_kmsg_queue imq_messages;
+ struct ipc_thread_queue imq_threads;
+} *ipc_mqueue_t;
+
+#define IMQ_NULL ((ipc_mqueue_t) 0)
+
+#define imq_lock_init(mq) simple_lock_init(&(mq)->imq_lock_data)
+#define imq_lock(mq) simple_lock(&(mq)->imq_lock_data)
+#define imq_lock_try(mq) simple_lock_try(&(mq)->imq_lock_data)
+#define imq_unlock(mq) simple_unlock(&(mq)->imq_lock_data)
+
+extern void
+ipc_mqueue_init(/* ipc_mqueue_t */);
+
+extern void
+ipc_mqueue_move(/* ipc_mqueue_t, ipc_mqueue_t, ipc_port_t */);
+
+extern void
+ipc_mqueue_changed(/* ipc_mqueue_t, mach_msg_return_t */);
+
+extern mach_msg_return_t
+ipc_mqueue_send(/* ipc_kmsg_t, mach_msg_option_t, mach_msg_timeout_t */);
+
+#define IMQ_NULL_CONTINUE ((void (*)()) 0)
+
+extern mach_msg_return_t
+ipc_mqueue_receive(/* ipc_mqueue_t, mach_msg_option_t,
+ mach_msg_size_t, mach_msg_timeout_t,
+ boolean_t, void (*)(),
+ ipc_kmsg_t *, mach_port_seqno_t * */);
+
+/*
+ * extern void
+ * ipc_mqueue_send_always(ipc_kmsg_t);
+ *
+ * Unfortunately, to avoid warnings/lint about unused variables
+ * when assertions are turned off, we need two versions of this.
+ */
+
+#include <kern/assert.h>
+
+#if MACH_ASSERT
+
+#define ipc_mqueue_send_always(kmsg) \
+MACRO_BEGIN \
+ mach_msg_return_t mr; \
+ \
+ mr = ipc_mqueue_send((kmsg), MACH_SEND_ALWAYS, \
+ MACH_MSG_TIMEOUT_NONE); \
+ assert(mr == MACH_MSG_SUCCESS); \
+MACRO_END
+
+#else MACH_ASSERT
+
+#define ipc_mqueue_send_always(kmsg) \
+MACRO_BEGIN \
+ (void) ipc_mqueue_send((kmsg), MACH_SEND_ALWAYS, \
+ MACH_MSG_TIMEOUT_NONE); \
+MACRO_END
+
+#endif /* MACH_ASSERT */
+
+#endif /* _IPC_IPC_MQUEUE_H_ */
diff --git a/ipc/ipc_notify.c b/ipc/ipc_notify.c
new file mode 100644
index 00000000..870f301f
--- /dev/null
+++ b/ipc/ipc_notify.c
@@ -0,0 +1,593 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_notify.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Notification-sending functions.
+ */
+
+#include <mach_ipc_compat.h>
+
+#include <mach/port.h>
+#include <mach/message.h>
+#include <mach/notify.h>
+#include <kern/assert.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_port.h>
+
+#include <ipc/ipc_machdep.h>
+
+mach_port_deleted_notification_t ipc_notify_port_deleted_template;
+mach_msg_accepted_notification_t ipc_notify_msg_accepted_template;
+mach_port_destroyed_notification_t ipc_notify_port_destroyed_template;
+mach_no_senders_notification_t ipc_notify_no_senders_template;
+mach_send_once_notification_t ipc_notify_send_once_template;
+mach_dead_name_notification_t ipc_notify_dead_name_template;
+
+#if MACH_IPC_COMPAT
+/*
+ * When notification messages are received via the old
+ * msg_receive trap, the msg_type field should contain
+ * MSG_TYPE_EMERGENCY. We arrange for this by putting
+ * MSG_TYPE_EMERGENCY into msgh_seqno, which
+ * ipc_kmsg_copyout_compat copies to msg_type.
+ */
+
+#define NOTIFY_MSGH_SEQNO MSG_TYPE_EMERGENCY
+#else MACH_IPC_COMPAT
+#define NOTIFY_MSGH_SEQNO 0
+#endif MACH_IPC_COMPAT
+
+/*
+ * Routine: ipc_notify_init_port_deleted
+ * Purpose:
+ * Initialize a template for port-deleted notifications.
+ */
+
+void
+ipc_notify_init_port_deleted(n)
+ mach_port_deleted_notification_t *n;
+{
+ mach_msg_header_t *m = &n->not_header;
+ mach_msg_type_t *t = &n->not_type;
+
+ m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_PORT_DELETED;
+
+ t->msgt_name = MACH_MSG_TYPE_PORT_NAME;
+ t->msgt_size = PORT_T_SIZE_IN_BITS;
+ t->msgt_number = 1;
+ t->msgt_inline = TRUE;
+ t->msgt_longform = FALSE;
+ t->msgt_deallocate = FALSE;
+ t->msgt_unused = 0;
+
+ n->not_port = MACH_PORT_NULL;
+}
+
+/*
+ * Routine: ipc_notify_init_msg_accepted
+ * Purpose:
+ * Initialize a template for msg-accepted notifications.
+ */
+
+void
+ipc_notify_init_msg_accepted(n)
+ mach_msg_accepted_notification_t *n;
+{
+ mach_msg_header_t *m = &n->not_header;
+ mach_msg_type_t *t = &n->not_type;
+
+ m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_MSG_ACCEPTED;
+
+ t->msgt_name = MACH_MSG_TYPE_PORT_NAME;
+ t->msgt_size = PORT_T_SIZE_IN_BITS;
+ t->msgt_number = 1;
+ t->msgt_inline = TRUE;
+ t->msgt_longform = FALSE;
+ t->msgt_deallocate = FALSE;
+ t->msgt_unused = 0;
+
+ n->not_port = MACH_PORT_NULL;
+}
+
+/*
+ * Routine: ipc_notify_init_port_destroyed
+ * Purpose:
+ * Initialize a template for port-destroyed notifications.
+ */
+
+void
+ipc_notify_init_port_destroyed(
+ mach_port_destroyed_notification_t *n)
+{
+ mach_msg_header_t *m = &n->not_header;
+ mach_msg_type_t *t = &n->not_type;
+
+ m->msgh_bits = MACH_MSGH_BITS_COMPLEX |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_PORT_DESTROYED;
+
+ t->msgt_name = MACH_MSG_TYPE_PORT_RECEIVE;
+ t->msgt_size = PORT_T_SIZE_IN_BITS;
+ t->msgt_number = 1;
+ t->msgt_inline = TRUE;
+ t->msgt_longform = FALSE;
+ t->msgt_deallocate = FALSE;
+ t->msgt_unused = 0;
+
+ n->not_port = MACH_PORT_NULL;
+}
+
+/*
+ * Routine: ipc_notify_init_no_senders
+ * Purpose:
+ * Initialize a template for no-senders notifications.
+ */
+
+void
+ipc_notify_init_no_senders(
+ mach_no_senders_notification_t *n)
+{
+ mach_msg_header_t *m = &n->not_header;
+ mach_msg_type_t *t = &n->not_type;
+
+ m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_NO_SENDERS;
+
+ t->msgt_name = MACH_MSG_TYPE_INTEGER_32;
+ t->msgt_size = PORT_T_SIZE_IN_BITS;
+ t->msgt_number = 1;
+ t->msgt_inline = TRUE;
+ t->msgt_longform = FALSE;
+ t->msgt_deallocate = FALSE;
+ t->msgt_unused = 0;
+
+ n->not_count = 0;
+}
+
+/*
+ * Routine: ipc_notify_init_send_once
+ * Purpose:
+ * Initialize a template for send-once notifications.
+ */
+
+void
+ipc_notify_init_send_once(
+ mach_send_once_notification_t *n)
+{
+ mach_msg_header_t *m = &n->not_header;
+
+ m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_SEND_ONCE;
+}
+
+/*
+ * Routine: ipc_notify_init_dead_name
+ * Purpose:
+ * Initialize a template for dead-name notifications.
+ */
+
+void
+ipc_notify_init_dead_name(
+ mach_dead_name_notification_t *n)
+{
+ mach_msg_header_t *m = &n->not_header;
+ mach_msg_type_t *t = &n->not_type;
+
+ m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_DEAD_NAME;
+
+ t->msgt_name = MACH_MSG_TYPE_PORT_NAME;
+ t->msgt_size = PORT_T_SIZE_IN_BITS;
+ t->msgt_number = 1;
+ t->msgt_inline = TRUE;
+ t->msgt_longform = FALSE;
+ t->msgt_deallocate = FALSE;
+ t->msgt_unused = 0;
+
+ n->not_port = MACH_PORT_NULL;
+}
+
+/*
+ * Routine: ipc_notify_init
+ * Purpose:
+ * Initialize the notification subsystem.
+ */
+
+void
+ipc_notify_init(void)
+{
+ ipc_notify_init_port_deleted(&ipc_notify_port_deleted_template);
+ ipc_notify_init_msg_accepted(&ipc_notify_msg_accepted_template);
+ ipc_notify_init_port_destroyed(&ipc_notify_port_destroyed_template);
+ ipc_notify_init_no_senders(&ipc_notify_no_senders_template);
+ ipc_notify_init_send_once(&ipc_notify_send_once_template);
+ ipc_notify_init_dead_name(&ipc_notify_dead_name_template);
+}
+
+/*
+ * Routine: ipc_notify_port_deleted
+ * Purpose:
+ * Send a port-deleted notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ */
+
+void
+ipc_notify_port_deleted(port, name)
+ ipc_port_t port;
+ mach_port_t name;
+{
+ ipc_kmsg_t kmsg;
+ mach_port_deleted_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped port-deleted (0x%08x, 0x%x)\n", port, name);
+ ipc_port_release_sonce(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_port_deleted_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_port_deleted_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = name;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_msg_accepted
+ * Purpose:
+ * Send a msg-accepted notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ */
+
+void
+ipc_notify_msg_accepted(port, name)
+ ipc_port_t port;
+ mach_port_t name;
+{
+ ipc_kmsg_t kmsg;
+ mach_msg_accepted_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped msg-accepted (0x%08x, 0x%x)\n", port, name);
+ ipc_port_release_sonce(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_msg_accepted_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_msg_accepted_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = name;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_port_destroyed
+ * Purpose:
+ * Send a port-destroyed notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ * Consumes a ref for right, which should be a receive right
+ * prepped for placement into a message. (In-transit,
+ * or in-limbo if a circularity was detected.)
+ */
+
+void
+ipc_notify_port_destroyed(port, right)
+ ipc_port_t port;
+ ipc_port_t right;
+{
+ ipc_kmsg_t kmsg;
+ mach_port_destroyed_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped port-destroyed (0x%08x, 0x%08x)\n",
+ port, right);
+ ipc_port_release_sonce(port);
+ ipc_port_release_receive(right);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_port_destroyed_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_port_destroyed_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = (mach_port_t) right;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_no_senders
+ * Purpose:
+ * Send a no-senders notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ */
+
+void
+ipc_notify_no_senders(port, mscount)
+ ipc_port_t port;
+ mach_port_mscount_t mscount;
+{
+ ipc_kmsg_t kmsg;
+ mach_no_senders_notification_t *n;
+
+#if NORMA_IPC
+ if (ip_nsproxyp(port)) {
+ assert(mscount == 0);
+ norma_ipc_notify_no_senders(ip_nsproxy(port));
+ return;
+ }
+#endif NORMA_IPC
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped no-senders (0x%08x, %u)\n", port, mscount);
+ ipc_port_release_sonce(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_no_senders_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_no_senders_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_count = mscount;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_send_once
+ * Purpose:
+ * Send a send-once notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ */
+
+void
+ipc_notify_send_once(port)
+ ipc_port_t port;
+{
+ ipc_kmsg_t kmsg;
+ mach_send_once_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped send-once (0x%08x)\n", port);
+ ipc_port_release_sonce(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_send_once_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_send_once_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_dead_name
+ * Purpose:
+ * Send a dead-name notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ */
+
+void
+ipc_notify_dead_name(port, name)
+ ipc_port_t port;
+ mach_port_t name;
+{
+ ipc_kmsg_t kmsg;
+ mach_dead_name_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped dead-name (0x%08x, 0x%x)\n", port, name);
+ ipc_port_release_sonce(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_dead_name_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_dead_name_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = name;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: ipc_notify_port_deleted_compat
+ * Purpose:
+ * Send a port-deleted notification.
+ * Sends it to a send right instead of a send-once right.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/sright for port.
+ */
+
+void
+ipc_notify_port_deleted_compat(port, name)
+ ipc_port_t port;
+ mach_port_t name;
+{
+ ipc_kmsg_t kmsg;
+ mach_port_deleted_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped port-deleted-compat (0x%08x, 0x%x)\n",
+ port, name);
+ ipc_port_release_send(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_port_deleted_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_port_deleted_template;
+
+ n->not_header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0);
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = name;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_msg_accepted_compat
+ * Purpose:
+ * Send a msg-accepted notification.
+ * Sends it to a send right instead of a send-once right.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/sright for port.
+ */
+
+void
+ipc_notify_msg_accepted_compat(port, name)
+ ipc_port_t port;
+ mach_port_t name;
+{
+ ipc_kmsg_t kmsg;
+ mach_msg_accepted_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped msg-accepted-compat (0x%08x, 0x%x)\n",
+ port, name);
+ ipc_port_release_send(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_msg_accepted_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_msg_accepted_template;
+
+ n->not_header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0);
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = name;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_port_destroyed_compat
+ * Purpose:
+ * Send a port-destroyed notification.
+ * Sends it to a send right instead of a send-once right.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/sright for port.
+ * Consumes a ref for right, which should be a receive right
+ * prepped for placement into a message. (In-transit,
+ * or in-limbo if a circularity was detected.)
+ */
+
+void
+ipc_notify_port_destroyed_compat(port, right)
+ ipc_port_t port;
+ ipc_port_t right;
+{
+ ipc_kmsg_t kmsg;
+ mach_port_destroyed_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped port-destroyed-compat (0x%08x, 0x%08x)\n",
+ port, right);
+ ipc_port_release_send(port);
+ ipc_port_release_receive(right);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_port_destroyed_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_port_destroyed_template;
+
+ n->not_header.msgh_bits = MACH_MSGH_BITS_COMPLEX |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0);
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = (mach_port_t) right;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+#endif MACH_IPC_COMPAT
diff --git a/ipc/ipc_notify.h b/ipc/ipc_notify.h
new file mode 100644
index 00000000..66e0633d
--- /dev/null
+++ b/ipc/ipc_notify.h
@@ -0,0 +1,72 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_notify.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations of notification-sending functions.
+ */
+
+#ifndef _IPC_IPC_NOTIFY_H_
+#define _IPC_IPC_NOTIFY_H_
+
+#include <mach_ipc_compat.h>
+
+extern void
+ipc_notify_init();
+
+extern void
+ipc_notify_port_deleted(/* ipc_port_t, mach_port_t */);
+
+extern void
+ipc_notify_msg_accepted(/* ipc_port_t, mach_port_t */);
+
+extern void
+ipc_notify_port_destroyed(/* ipc_port_t, ipc_port_t */);
+
+extern void
+ipc_notify_no_senders(/* ipc_port_t, mach_port_mscount_t */);
+
+extern void
+ipc_notify_send_once(/* ipc_port_t */);
+
+extern void
+ipc_notify_dead_name(/* ipc_port_t, mach_port_t */);
+
+#if MACH_IPC_COMPAT
+
+extern void
+ipc_notify_port_deleted_compat(/* ipc_port_t, mach_port_t */);
+
+extern void
+ipc_notify_msg_accepted_compat(/* ipc_port_t, mach_port_t */);
+
+extern void
+ipc_notify_port_destroyed_compat(/* ipc_port_t, ipc_port_t */);
+
+#endif MACH_IPC_COMPAT
+#endif _IPC_IPC_NOTIFY_H_
diff --git a/ipc/ipc_object.c b/ipc/ipc_object.c
new file mode 100644
index 00000000..cdef3cde
--- /dev/null
+++ b/ipc/ipc_object.c
@@ -0,0 +1,1346 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_object.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC objects.
+ */
+
+#include <mach_ipc_compat.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/message.h>
+#include <ipc/port.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_hash.h>
+#include <ipc/ipc_right.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_pset.h>
+
+zone_t ipc_object_zones[IOT_NUMBER];
+
+
+
+/*
+ * Routine: ipc_object_reference
+ * Purpose:
+ * Take a reference to an object.
+ */
+
+void
+ipc_object_reference(
+ ipc_object_t object)
+{
+ io_lock(object);
+ assert(object->io_references > 0);
+ io_reference(object);
+ io_unlock(object);
+}
+
+/*
+ * Routine: ipc_object_release
+ * Purpose:
+ * Release a reference to an object.
+ */
+
+void
+ipc_object_release(
+ ipc_object_t object)
+{
+ io_lock(object);
+ assert(object->io_references > 0);
+ io_release(object);
+ io_check_unlock(object);
+}
+
+/*
+ * Routine: ipc_object_translate
+ * Purpose:
+ * Look up an object in a space.
+ * Conditions:
+ * Nothing locked before. If successful, the object
+ * is returned locked. The caller doesn't get a ref.
+ * Returns:
+ * KERN_SUCCESS Objected returned locked.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote the correct right.
+ */
+
+kern_return_t
+ipc_object_translate(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_right_t right,
+ ipc_object_t *objectp)
+{
+ ipc_entry_t entry;
+ ipc_object_t object;
+ kern_return_t kr;
+
+ kr = ipc_right_lookup_read(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is read-locked and active */
+
+ if ((entry->ie_bits & MACH_PORT_TYPE(right)) == (mach_port_right_t) 0) {
+ is_read_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ object = entry->ie_object;
+ assert(object != IO_NULL);
+
+ io_lock(object);
+ is_read_unlock(space);
+
+ *objectp = object;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_object_alloc_dead
+ * Purpose:
+ * Allocate a dead-name entry.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The dead name is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NO_SPACE No room for an entry in the space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_object_alloc_dead(
+ ipc_space_t space,
+ mach_port_t *namep)
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+
+ kr = ipc_entry_alloc(space, namep, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked */
+
+ /* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */
+
+ assert(entry->ie_object == IO_NULL);
+ entry->ie_bits |= MACH_PORT_TYPE_DEAD_NAME | 1;
+
+ is_write_unlock(space);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_object_alloc_dead_name
+ * Purpose:
+ * Allocate a dead-name entry, with a specific name.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The dead name is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_object_alloc_dead_name(
+ ipc_space_t space,
+ mach_port_t name)
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+
+ kr = ipc_entry_alloc_name(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked */
+
+ if (ipc_right_inuse(space, name, entry))
+ return KERN_NAME_EXISTS;
+
+ /* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */
+
+ assert(entry->ie_object == IO_NULL);
+ entry->ie_bits |= MACH_PORT_TYPE_DEAD_NAME | 1;
+
+ is_write_unlock(space);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_object_alloc
+ * Purpose:
+ * Allocate an object.
+ * Conditions:
+ * Nothing locked. If successful, the object is returned locked.
+ * The caller doesn't get a reference for the object.
+ * Returns:
+ * KERN_SUCCESS The object is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NO_SPACE No room for an entry in the space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_object_alloc(
+ ipc_space_t space,
+ ipc_object_type_t otype,
+ mach_port_type_t type,
+ mach_port_urefs_t urefs,
+ mach_port_t *namep,
+ ipc_object_t *objectp)
+{
+ ipc_object_t object;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ assert(otype < IOT_NUMBER);
+ assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
+ assert(type != MACH_PORT_TYPE_NONE);
+ assert(urefs <= MACH_PORT_UREFS_MAX);
+
+ object = io_alloc(otype);
+ if (object == IO_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ if (otype == IOT_PORT) {
+ ipc_port_t port = (ipc_port_t)object;
+
+ bzero((char *)port, sizeof(*port));
+ } else if (otype == IOT_PORT_SET) {
+ ipc_pset_t pset = (ipc_pset_t)object;
+
+ bzero((char *)pset, sizeof(*pset));
+ }
+ *namep = (mach_port_t)object;
+ kr = ipc_entry_alloc(space, namep, &entry);
+ if (kr != KERN_SUCCESS) {
+ io_free(otype, object);
+ return kr;
+ }
+ /* space is write-locked */
+
+ entry->ie_bits |= type | urefs;
+ entry->ie_object = object;
+
+ io_lock_init(object);
+ io_lock(object);
+ is_write_unlock(space);
+
+ object->io_references = 1; /* for entry, not caller */
+ object->io_bits = io_makebits(TRUE, otype, 0);
+
+ *objectp = object;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_object_alloc_name
+ * Purpose:
+ * Allocate an object, with a specific name.
+ * Conditions:
+ * Nothing locked. If successful, the object is returned locked.
+ * The caller doesn't get a reference for the object.
+ * Returns:
+ * KERN_SUCCESS The object is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_object_alloc_name(
+ ipc_space_t space,
+ ipc_object_type_t otype,
+ mach_port_type_t type,
+ mach_port_urefs_t urefs,
+ mach_port_t name,
+ ipc_object_t *objectp)
+{
+ ipc_object_t object;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ assert(otype < IOT_NUMBER);
+ assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
+ assert(type != MACH_PORT_TYPE_NONE);
+ assert(urefs <= MACH_PORT_UREFS_MAX);
+
+ object = io_alloc(otype);
+ if (object == IO_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ if (otype == IOT_PORT) {
+ ipc_port_t port = (ipc_port_t)object;
+
+ bzero((char *)port, sizeof(*port));
+ } else if (otype == IOT_PORT_SET) {
+ ipc_pset_t pset = (ipc_pset_t)object;
+
+ bzero((char *)pset, sizeof(*pset));
+ }
+
+ kr = ipc_entry_alloc_name(space, name, &entry);
+ if (kr != KERN_SUCCESS) {
+ io_free(otype, object);
+ return kr;
+ }
+ /* space is write-locked */
+
+ if (ipc_right_inuse(space, name, entry)) {
+ io_free(otype, object);
+ return KERN_NAME_EXISTS;
+ }
+
+ entry->ie_bits |= type | urefs;
+ entry->ie_object = object;
+
+ io_lock_init(object);
+ io_lock(object);
+ is_write_unlock(space);
+
+ object->io_references = 1; /* for entry, not caller */
+ object->io_bits = io_makebits(TRUE, otype, 0);
+
+ *objectp = object;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_object_copyin_type
+ * Purpose:
+ * Convert a send type name to a received type name.
+ */
+
+mach_msg_type_name_t
+ipc_object_copyin_type(
+ mach_msg_type_name_t msgt_name)
+{
+ switch (msgt_name) {
+ case 0:
+ return 0;
+
+ case MACH_MSG_TYPE_MOVE_RECEIVE:
+ return MACH_MSG_TYPE_PORT_RECEIVE;
+
+ case MACH_MSG_TYPE_MOVE_SEND_ONCE:
+ case MACH_MSG_TYPE_MAKE_SEND_ONCE:
+ return MACH_MSG_TYPE_PORT_SEND_ONCE;
+
+ case MACH_MSG_TYPE_MOVE_SEND:
+ case MACH_MSG_TYPE_MAKE_SEND:
+ case MACH_MSG_TYPE_COPY_SEND:
+ return MACH_MSG_TYPE_PORT_SEND;
+
+#if MACH_IPC_COMPAT
+ case MSG_TYPE_PORT:
+ return MACH_MSG_TYPE_PORT_SEND;
+
+ case MSG_TYPE_PORT_ALL:
+ return MACH_MSG_TYPE_PORT_RECEIVE;
+#endif MACH_IPC_COMPAT
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_object_copyin_type: strange rights");
+#else
+ panic("ipc_object_copyin_type: strange rights");
+#endif
+ return 0; /* in case assert/panic returns */
+ }
+}
+
+/*
+ * Routine: ipc_object_copyin
+ * Purpose:
+ * Copyin a capability from a space.
+ * If successful, the caller gets a ref
+ * for the resulting object, unless it is IO_DEAD.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Acquired an object, possibly IO_DEAD.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME Name doesn't exist in space.
+ * KERN_INVALID_RIGHT Name doesn't denote correct right.
+ */
+
+kern_return_t
+ipc_object_copyin(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_msg_type_name_t msgt_name,
+ ipc_object_t *objectp)
+{
+ ipc_entry_t entry;
+ ipc_port_t soright;
+ kern_return_t kr;
+
+ /*
+ * Could first try a read lock when doing
+ * MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND,
+ * and MACH_MSG_TYPE_MAKE_SEND_ONCE.
+ */
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ kr = ipc_right_copyin(space, name, entry,
+ msgt_name, TRUE,
+ objectp, &soright);
+ if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ if ((kr == KERN_SUCCESS) && (soright != IP_NULL))
+ ipc_notify_port_deleted(soright, name);
+
+ return kr;
+}
+
+/*
+ * Routine: ipc_object_copyin_from_kernel
+ * Purpose:
+ * Copyin a naked capability from the kernel.
+ *
+ * MACH_MSG_TYPE_MOVE_RECEIVE
+ * The receiver must be ipc_space_kernel.
+ * Consumes the naked receive right.
+ * MACH_MSG_TYPE_COPY_SEND
+ * A naked send right must be supplied.
+ * The port gains a reference, and a send right
+ * if the port is still active.
+ * MACH_MSG_TYPE_MAKE_SEND
+ * The receiver must be ipc_space_kernel.
+ * The port gains a reference and a send right.
+ * MACH_MSG_TYPE_MOVE_SEND
+ * Consumes a naked send right.
+ * MACH_MSG_TYPE_MAKE_SEND_ONCE
+ * The receiver must be ipc_space_kernel.
+ * The port gains a reference and a send-once right.
+ * MACH_MSG_TYPE_MOVE_SEND_ONCE
+ * Consumes a naked send-once right.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_object_copyin_from_kernel(
+ ipc_object_t object,
+ mach_msg_type_name_t msgt_name)
+{
+ assert(IO_VALID(object));
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_MOVE_RECEIVE: {
+ ipc_port_t port = (ipc_port_t) object;
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name != MACH_PORT_NULL);
+ assert(port->ip_receiver == ipc_space_kernel);
+
+ /* relevant part of ipc_port_clear_receiver */
+ ipc_port_set_mscount(port, 0);
+
+ port->ip_receiver_name = MACH_PORT_NULL;
+ port->ip_destination = IP_NULL;
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_MSG_TYPE_COPY_SEND: {
+ ipc_port_t port = (ipc_port_t) object;
+
+ ip_lock(port);
+ if (ip_active(port)) {
+ assert(port->ip_srights > 0);
+ port->ip_srights++;
+ }
+ ip_reference(port);
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_MSG_TYPE_MAKE_SEND: {
+ ipc_port_t port = (ipc_port_t) object;
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name != MACH_PORT_NULL);
+ assert(port->ip_receiver == ipc_space_kernel);
+
+ ip_reference(port);
+ port->ip_mscount++;
+ port->ip_srights++;
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND:
+ /* move naked send right into the message */
+ break;
+
+ case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
+ ipc_port_t port = (ipc_port_t) object;
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name != MACH_PORT_NULL);
+ assert(port->ip_receiver == ipc_space_kernel);
+
+ ip_reference(port);
+ port->ip_sorights++;
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND_ONCE:
+ /* move naked send-once right into the message */
+ break;
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_object_copyin_from_kernel: strange rights");
+#else
+ panic("ipc_object_copyin_from_kernel: strange rights");
+#endif
+ }
+}
+
+/*
+ * Routine: ipc_object_destroy
+ * Purpose:
+ * Destroys a naked capability.
+ * Consumes a ref for the object.
+ *
+ * A receive right should be in limbo or in transit.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_object_destroy(
+ ipc_object_t object,
+ mach_msg_type_name_t msgt_name)
+{
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_PORT_SEND:
+ ipc_port_release_send((ipc_port_t) object);
+ break;
+
+ case MACH_MSG_TYPE_PORT_SEND_ONCE:
+ ipc_notify_send_once((ipc_port_t) object);
+ break;
+
+ case MACH_MSG_TYPE_PORT_RECEIVE:
+ ipc_port_release_receive((ipc_port_t) object);
+ break;
+
+ default:
+ panic("ipc_object_destroy: strange rights");
+ }
+}
+
+/*
+ * Routine: ipc_object_copyout
+ * Purpose:
+ * Copyout a capability, placing it into a space.
+ * If successful, consumes a ref for the object.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Copied out object, consumed ref.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_CAPABILITY The object is dead.
+ * KERN_NO_SPACE No room in space for another right.
+ * KERN_RESOURCE_SHORTAGE No memory available.
+ * KERN_UREFS_OVERFLOW Urefs limit exceeded
+ * and overflow wasn't specified.
+ */
+
+kern_return_t
+ipc_object_copyout(
+ ipc_space_t space,
+ ipc_object_t object,
+ mach_msg_type_name_t msgt_name,
+ boolean_t overflow,
+ mach_port_t *namep)
+{
+ mach_port_t name;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+
+ is_write_lock(space);
+
+ for (;;) {
+ if (!space->is_active) {
+ is_write_unlock(space);
+ return KERN_INVALID_TASK;
+ }
+
+ if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
+ ipc_right_reverse(space, object, &name, &entry)) {
+ /* object is locked and active */
+
+ assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
+ break;
+ }
+
+ kr = ipc_entry_get(space, &name, &entry);
+ if (kr != KERN_SUCCESS) {
+ /* unlocks/locks space, so must start again */
+
+ kr = ipc_entry_grow_table(space);
+ if (kr != KERN_SUCCESS)
+ return kr; /* space is unlocked */
+
+ continue;
+ }
+
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+
+ io_lock(object);
+ if (!io_active(object)) {
+ io_unlock(object);
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+ return KERN_INVALID_CAPABILITY;
+ }
+
+ entry->ie_object = object;
+ break;
+ }
+
+ /* space is write-locked and active, object is locked and active */
+
+ kr = ipc_right_copyout(space, name, entry,
+ msgt_name, overflow, object);
+ /* object is unlocked */
+ is_write_unlock(space);
+
+ if (kr == KERN_SUCCESS)
+ *namep = name;
+ return kr;
+}
+
+#if 0
+/* XXX same, but don't check for already-existing send rights */
+kern_return_t
+ipc_object_copyout_multiname(space, object, namep)
+ ipc_space_t space;
+ ipc_object_t object;
+ mach_port_t *namep;
+{
+ mach_port_t name;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+
+ is_write_lock(space);
+
+ for (;;) {
+ if (!space->is_active) {
+ is_write_unlock(space);
+ return KERN_INVALID_TASK;
+ }
+
+ kr = ipc_entry_get(space, &name, &entry);
+ if (kr != KERN_SUCCESS) {
+ /* unlocks/locks space, so must start again */
+
+ kr = ipc_entry_grow_table(space);
+ if (kr != KERN_SUCCESS)
+ return kr; /* space is unlocked */
+
+ continue;
+ }
+
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+
+ io_lock(object);
+ if (!io_active(object)) {
+ io_unlock(object);
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+ return KERN_INVALID_CAPABILITY;
+ }
+
+ entry->ie_object = object;
+ break;
+ }
+
+ /* space is write-locked and active, object is locked and active */
+
+ kr = ipc_right_copyout_multiname(space, name, entry, object);
+ /* object is unlocked */
+ is_write_unlock(space);
+
+ if (kr == KERN_SUCCESS)
+ *namep = name;
+ return kr;
+}
+#endif 0
+
+/*
+ * Routine: ipc_object_copyout_name
+ * Purpose:
+ * Copyout a capability, placing it into a space.
+ * The specified name is used for the capability.
+ * If successful, consumes a ref for the object.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Copied out object, consumed ref.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_CAPABILITY The object is dead.
+ * KERN_RESOURCE_SHORTAGE No memory available.
+ * KERN_UREFS_OVERFLOW Urefs limit exceeded
+ * and overflow wasn't specified.
+ * KERN_RIGHT_EXISTS Space has rights under another name.
+ * KERN_NAME_EXISTS Name is already used.
+ */
+
+kern_return_t
+ipc_object_copyout_name(
+ ipc_space_t space,
+ ipc_object_t object,
+ mach_msg_type_name_t msgt_name,
+ boolean_t overflow,
+ mach_port_t name)
+{
+ mach_port_t oname;
+ ipc_entry_t oentry;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+
+ kr = ipc_entry_alloc_name(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
+ ipc_right_reverse(space, object, &oname, &oentry)) {
+ /* object is locked and active */
+
+ if (name != oname) {
+ io_unlock(object);
+
+ if (IE_BITS_TYPE(entry->ie_bits)
+ == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, name, entry);
+
+ is_write_unlock(space);
+ return KERN_RIGHT_EXISTS;
+ }
+
+ assert(entry == oentry);
+ assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
+ } else {
+ if (ipc_right_inuse(space, name, entry))
+ return KERN_NAME_EXISTS;
+
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+
+ io_lock(object);
+ if (!io_active(object)) {
+ io_unlock(object);
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+ return KERN_INVALID_CAPABILITY;
+ }
+
+ entry->ie_object = object;
+ }
+
+ /* space is write-locked and active, object is locked and active */
+
+ kr = ipc_right_copyout(space, name, entry,
+ msgt_name, overflow, object);
+ /* object is unlocked */
+ is_write_unlock(space);
+ return kr;
+}
+
+/*
+ * Routine: ipc_object_copyout_dest
+ * Purpose:
+ * Translates/consumes the destination right of a message.
+ * This is unlike normal copyout because the right is consumed
+ * in a funny way instead of being given to the receiving space.
+ * The receiver gets his name for the port, if he has receive
+ * rights, otherwise MACH_PORT_NULL.
+ * Conditions:
+ * The object is locked and active. Nothing else locked.
+ * The object is unlocked and loses a reference.
+ */
+
+void
+ipc_object_copyout_dest(
+ ipc_space_t space,
+ ipc_object_t object,
+ mach_msg_type_name_t msgt_name,
+ mach_port_t *namep)
+{
+ mach_port_t name;
+
+ assert(IO_VALID(object));
+ assert(io_active(object));
+
+ io_release(object);
+
+ /*
+ * If the space is the receiver/owner of the object,
+ * then we quietly consume the right and return
+ * the space's name for the object. Otherwise
+ * we destroy the right and return MACH_PORT_NULL.
+ */
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_PORT_SEND: {
+ ipc_port_t port = (ipc_port_t) object;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+
+ assert(port->ip_srights > 0);
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+
+ if (port->ip_receiver == space)
+ name = port->ip_receiver_name;
+ else
+ name = MACH_PORT_NULL;
+
+ ip_unlock(port);
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+
+ break;
+ }
+
+ case MACH_MSG_TYPE_PORT_SEND_ONCE: {
+ ipc_port_t port = (ipc_port_t) object;
+
+ assert(port->ip_sorights > 0);
+
+ if (port->ip_receiver == space) {
+ /* quietly consume the send-once right */
+
+ port->ip_sorights--;
+ name = port->ip_receiver_name;
+ ip_unlock(port);
+ } else {
+ /*
+ * A very bizarre case. The message
+ * was received, but before this copyout
+ * happened the space lost receive rights.
+ * We can't quietly consume the soright
+ * out from underneath some other task,
+ * so generate a send-once notification.
+ */
+
+ ip_reference(port); /* restore ref */
+ ip_unlock(port);
+
+ ipc_notify_send_once(port);
+ name = MACH_PORT_NULL;
+ }
+
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_object_copyout_dest: strange rights");
+#else
+ panic("ipc_object_copyout_dest: strange rights");
+#endif
+
+ }
+
+ *namep = name;
+}
+
+/*
+ * Routine: ipc_object_rename
+ * Purpose:
+ * Rename an entry in a space.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Renamed the entry.
+ * KERN_INVALID_TASK The space was dead.
+ * KERN_INVALID_NAME oname didn't denote an entry.
+ * KERN_NAME_EXISTS nname already denoted an entry.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate new entry.
+ */
+
+kern_return_t
+ipc_object_rename(
+ ipc_space_t space,
+ mach_port_t oname,
+ mach_port_t nname)
+{
+ ipc_entry_t oentry, nentry;
+ kern_return_t kr;
+
+ kr = ipc_entry_alloc_name(space, nname, &nentry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ if (ipc_right_inuse(space, nname, nentry)) {
+ /* space is unlocked */
+ return KERN_NAME_EXISTS;
+ }
+
+ /* don't let ipc_entry_lookup see the uninitialized new entry */
+
+ if ((oname == nname) ||
+ ((oentry = ipc_entry_lookup(space, oname)) == IE_NULL)) {
+ ipc_entry_dealloc(space, nname, nentry);
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+ }
+
+ kr = ipc_right_rename(space, oname, oentry, nname, nentry);
+ /* space is unlocked */
+ return kr;
+}
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: ipc_object_copyout_type_compat
+ * Purpose:
+ * Convert a carried type name to an old type name.
+ */
+
+mach_msg_type_name_t
+ipc_object_copyout_type_compat(msgt_name)
+ mach_msg_type_name_t msgt_name;
+{
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_PORT_SEND:
+ case MACH_MSG_TYPE_PORT_SEND_ONCE:
+ return MSG_TYPE_PORT;
+
+ case MACH_MSG_TYPE_PORT_RECEIVE:
+ return MSG_TYPE_PORT_ALL;
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_object_copyout_type_compat: strange rights");
+#else
+ panic("ipc_object_copyout_type_compat: strange rights");
+#endif
+ }
+}
+
+/*
+ * Routine: ipc_object_copyin_compat
+ * Purpose:
+ * Copyin a capability from a space.
+ * If successful, the caller gets a ref
+ * for the resulting object, which is always valid.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Acquired a valid object.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME Name doesn't exist in space.
+ * KERN_INVALID_RIGHT Name doesn't denote correct right.
+ */
+
+kern_return_t
+ipc_object_copyin_compat(space, name, msgt_name, dealloc, objectp)
+ ipc_space_t space;
+ mach_port_t name;
+ mach_msg_type_name_t msgt_name;
+ boolean_t dealloc;
+ ipc_object_t *objectp;
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ kr = ipc_right_copyin_compat(space, name, entry,
+ msgt_name, dealloc, objectp);
+ /* space is unlocked */
+ return kr;
+}
+
+/*
+ * Routine: ipc_object_copyin_header
+ * Purpose:
+ * Copyin a capability from a space.
+ * If successful, the caller gets a ref
+ * for the resulting object, which is always valid.
+ * The type of the acquired capability is returned.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Acquired a valid object.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME Name doesn't exist in space.
+ * KERN_INVALID_RIGHT Name doesn't denote correct right.
+ */
+
+kern_return_t
+ipc_object_copyin_header(space, name, objectp, msgt_namep)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_object_t *objectp;
+ mach_msg_type_name_t *msgt_namep;
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ kr = ipc_right_copyin_header(space, name, entry,
+ objectp, msgt_namep);
+ /* space is unlocked */
+ return kr;
+}
+
+/*
+ * Routine: ipc_object_copyout_compat
+ * Purpose:
+ * Copyout a capability, placing it into a space.
+ * If successful, consumes a ref for the object.
+ *
+ * Marks new entries with IE_BITS_COMPAT.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Copied out object, consumed ref.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_CAPABILITY The object is dead.
+ * KERN_NO_SPACE No room in space for another right.
+ * KERN_RESOURCE_SHORTAGE No memory available.
+ */
+
+kern_return_t
+ipc_object_copyout_compat(space, object, msgt_name, namep)
+ ipc_space_t space;
+ ipc_object_t object;
+ mach_msg_type_name_t msgt_name;
+ mach_port_t *namep;
+{
+ mach_port_t name;
+ ipc_entry_t entry;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+ port = (ipc_port_t) object;
+
+ is_write_lock(space);
+
+ for (;;) {
+ ipc_port_request_index_t request;
+
+ if (!space->is_active) {
+ is_write_unlock(space);
+ return KERN_INVALID_TASK;
+ }
+
+ if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
+ ipc_right_reverse(space, (ipc_object_t) port,
+ &name, &entry)) {
+ /* port is locked and active */
+
+ assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
+ break;
+ }
+
+ kr = ipc_entry_get(space, &name, &entry);
+ if (kr != KERN_SUCCESS) {
+ /* unlocks/locks space, so must start again */
+
+ kr = ipc_entry_grow_table(space);
+ if (kr != KERN_SUCCESS)
+ return kr; /* space is unlocked */
+
+ continue;
+ }
+
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+
+ ip_lock(port);
+ if (!ip_active(port)) {
+ ip_unlock(port);
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+ return KERN_INVALID_CAPABILITY;
+ }
+
+ kr = ipc_port_dnrequest(port, name, ipr_spacem(space),
+ &request);
+ if (kr != KERN_SUCCESS) {
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ kr = ipc_port_dngrow(port);
+ /* port is unlocked */
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ is_write_lock(space);
+ continue;
+ }
+
+ is_reference(space); /* for dnrequest */
+ entry->ie_object = (ipc_object_t) port;
+ entry->ie_request = request;
+ entry->ie_bits |= IE_BITS_COMPAT;
+ break;
+ }
+
+ /* space is write-locked and active, port is locked and active */
+
+ kr = ipc_right_copyout(space, name, entry,
+ msgt_name, TRUE, (ipc_object_t) port);
+ /* object is unlocked */
+ is_write_unlock(space);
+
+ if (kr == KERN_SUCCESS)
+ *namep = name;
+ return kr;
+}
+
+/*
+ * Routine: ipc_object_copyout_name_compat
+ * Purpose:
+ * Copyout a capability, placing it into a space.
+ * The specified name is used for the capability.
+ * If successful, consumes a ref for the object.
+ *
+ * Like ipc_object_copyout_name, except that
+ * the name can't be in use at all, even for the same
+ * port, and IE_BITS_COMPAT gets turned on.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Copied out object, consumed ref.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_CAPABILITY The object is dead.
+ * KERN_RESOURCE_SHORTAGE No memory available.
+ * KERN_RIGHT_EXISTS Space has rights under another name.
+ * KERN_NAME_EXISTS Name is already used.
+ */
+
+kern_return_t
+ipc_object_copyout_name_compat(space, object, msgt_name, name)
+ ipc_space_t space;
+ ipc_object_t object;
+ mach_msg_type_name_t msgt_name;
+ mach_port_t name;
+{
+ ipc_entry_t entry;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+ port = (ipc_port_t) object;
+
+ for (;;) {
+ mach_port_t oname;
+ ipc_entry_t oentry;
+ ipc_port_request_index_t request;
+
+ kr = ipc_entry_alloc_name(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ if (ipc_right_inuse(space, name, entry))
+ return KERN_NAME_EXISTS; /* space is unlocked */
+
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+
+ if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
+ ipc_right_reverse(space, (ipc_object_t) port,
+ &oname, &oentry)) {
+ /* port is locked and active */
+
+ ip_unlock(port);
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+ return KERN_RIGHT_EXISTS;
+ }
+
+ ip_lock(port);
+ if (!ip_active(port)) {
+ ip_unlock(port);
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+ return KERN_INVALID_CAPABILITY;
+ }
+
+ kr = ipc_port_dnrequest(port, name, ipr_spacem(space),
+ &request);
+ if (kr != KERN_SUCCESS) {
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ kr = ipc_port_dngrow(port);
+ /* port is unlocked */
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ continue;
+ }
+
+ is_reference(space); /* for dnrequest */
+ entry->ie_object = (ipc_object_t) port;
+ entry->ie_request = request;
+ entry->ie_bits |= IE_BITS_COMPAT;
+ break;
+ }
+
+ /* space is write-locked and active, port is locked and active */
+
+ kr = ipc_right_copyout(space, name, entry,
+ msgt_name, TRUE, (ipc_object_t) port);
+ /* object is unlocked */
+ is_write_unlock(space);
+
+ assert(kr == KERN_SUCCESS);
+ return kr;
+}
+
+#endif MACH_IPC_COMPAT
+
+#include <mach_kdb.h>
+
+
+#if MACH_KDB
+#define printf kdbprintf
+
+/*
+ * Routine: ipc_object_print
+ * Purpose:
+ * Pretty-print an object for kdb.
+ */
+
+char *ikot_print_array[IKOT_MAX_TYPE] = {
+ "(NONE) ",
+ "(THREAD) ",
+ "(TASK) ",
+ "(HOST) ",
+ "(HOST_PRIV) ",
+ "(PROCESSOR) ",
+ "(PSET) ",
+ "(PSET_NAME) ",
+ "(PAGER) ",
+ "(PAGER_REQUEST) ",
+ "(DEVICE) ", /* 10 */
+ "(XMM_OBJECT) ",
+ "(XMM_PAGER) ",
+ "(XMM_KERNEL) ",
+ "(XMM_REPLY) ",
+ "(PAGER_TERMINATING)",
+ "(PAGING_NAME) ",
+ "(HOST_SECURITY) ",
+ "(LEDGER) ",
+ "(MASTER_DEVICE) ",
+ "(ACTIVATION) ", /* 20 */
+ "(SUBSYSTEM) ",
+ "(IO_DONE_QUEUE) ",
+ "(SEMAPHORE) ",
+ "(LOCK_SET) ",
+ "(CLOCK) ",
+ "(CLOCK_CTRL) ", /* 26 */
+ /* << new entries here */
+ "(UNKNOWN) " /* magic catchall */
+}; /* Please keep in sync with kern/ipc_kobject.h */
+
+void
+ipc_object_print(
+ ipc_object_t object)
+{
+ int kotype;
+
+ iprintf("%s", io_active(object) ? "active" : "dead");
+ printf(", refs=%d", object->io_references);
+ printf(", otype=%d", io_otype(object));
+ kotype = io_kotype(object);
+ if (kotype >= 0 && kotype < IKOT_MAX_TYPE)
+ printf(", kotype=%d %s\n", io_kotype(object),
+ ikot_print_array[kotype]);
+ else
+ printf(", kotype=0x%x %s\n", io_kotype(object),
+ ikot_print_array[IKOT_UNKNOWN]);
+}
+
+#endif /* MACH_KDB */
diff --git a/ipc/ipc_object.h b/ipc/ipc_object.h
new file mode 100644
index 00000000..dccec59c
--- /dev/null
+++ b/ipc/ipc_object.h
@@ -0,0 +1,192 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_object.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for IPC objects, for which tasks have capabilities.
+ */
+
+#ifndef _IPC_IPC_OBJECT_H_
+#define _IPC_IPC_OBJECT_H_
+
+#include <mach_ipc_compat.h>
+
+#include <mach/kern_return.h>
+#include <mach/message.h>
+#include <kern/lock.h>
+#include <kern/macro_help.h>
+#include <kern/zalloc.h>
+
+typedef unsigned int ipc_object_refs_t;
+typedef unsigned int ipc_object_bits_t;
+typedef unsigned int ipc_object_type_t;
+
+typedef struct ipc_object {
+ decl_simple_lock_data(,io_lock_data)
+ ipc_object_refs_t io_references;
+ ipc_object_bits_t io_bits;
+} *ipc_object_t;
+
+#define IO_NULL ((ipc_object_t) 0)
+#define IO_DEAD ((ipc_object_t) -1)
+
+#define IO_VALID(io) (((io) != IO_NULL) && ((io) != IO_DEAD))
+
+#define IO_BITS_KOTYPE 0x0000ffff /* used by the object */
+#define IO_BITS_OTYPE 0x7fff0000 /* determines a zone */
+#define IO_BITS_ACTIVE 0x80000000U /* is object alive? */
+
+#define io_active(io) ((int)(io)->io_bits < 0) /* hack */
+
+#define io_otype(io) (((io)->io_bits & IO_BITS_OTYPE) >> 16)
+#define io_kotype(io) ((io)->io_bits & IO_BITS_KOTYPE)
+
+#define io_makebits(active, otype, kotype) \
+ (((active) ? IO_BITS_ACTIVE : 0) | ((otype) << 16) | (kotype))
+
+/*
+ * Object types: ports, port sets, kernel-loaded ports
+ */
+#define IOT_PORT 0
+#define IOT_PORT_SET 1
+#define IOT_NUMBER 2 /* number of types used */
+
+extern zone_t ipc_object_zones[IOT_NUMBER];
+
+#define io_alloc(otype) \
+ ((ipc_object_t) zalloc(ipc_object_zones[(otype)]))
+
+#define io_free(otype, io) \
+ zfree(ipc_object_zones[(otype)], (vm_offset_t) (io))
+
+#define io_lock_init(io) simple_lock_init(&(io)->io_lock_data)
+#define io_lock(io) simple_lock(&(io)->io_lock_data)
+#define io_lock_try(io) simple_lock_try(&(io)->io_lock_data)
+#define io_unlock(io) simple_unlock(&(io)->io_lock_data)
+
+#define io_check_unlock(io) \
+MACRO_BEGIN \
+ ipc_object_refs_t _refs = (io)->io_references; \
+ \
+ io_unlock(io); \
+ if (_refs == 0) \
+ io_free(io_otype(io), io); \
+MACRO_END
+
+#define io_reference(io) \
+MACRO_BEGIN \
+ (io)->io_references++; \
+MACRO_END
+
+#define io_release(io) \
+MACRO_BEGIN \
+ (io)->io_references--; \
+MACRO_END
+
+extern void
+ipc_object_reference(/* ipc_object_t */);
+
+extern void
+ipc_object_release(/* ipc_object_t */);
+
+extern kern_return_t
+ipc_object_translate(/* ipc_space_t, mach_port_t,
+ mach_port_right_t, ipc_object_t * */);
+
+extern kern_return_t
+ipc_object_alloc_dead(/* ipc_space_t, mach_port_t * */);
+
+extern kern_return_t
+ipc_object_alloc_dead_name(/* ipc_space_t, mach_port_t */);
+
+extern kern_return_t
+ipc_object_alloc(/* ipc_space_t, ipc_object_type_t,
+ mach_port_type_t, mach_port_urefs_t,
+ mach_port_t *, ipc_object_t * */);
+
+extern kern_return_t
+ipc_object_alloc_name(/* ipc_space_t, ipc_object_type_t,
+ mach_port_type_t, mach_port_urefs_t,
+ mach_port_t, ipc_object_t * */);
+
+extern mach_msg_type_name_t
+ipc_object_copyin_type(/* mach_msg_type_name_t */);
+
+extern kern_return_t
+ipc_object_copyin(/* ipc_space_t, mach_port_t,
+ mach_msg_type_name_t, ipc_object_t * */);
+
+extern void
+ipc_object_copyin_from_kernel(/* ipc_object_t, mach_msg_type_name_t */);
+
+extern void
+ipc_object_destroy(/* ipc_object_t, mach_msg_type_name_t */);
+
+extern kern_return_t
+ipc_object_copyout(/* ipc_space_t, ipc_object_t,
+ mach_msg_type_name_t, boolean_t, mach_port_t * */);
+
+extern kern_return_t
+ipc_object_copyout_name(/* ipc_space_t, ipc_object_t,
+ mach_msg_type_name_t, boolean_t, mach_port_t */);
+
+extern void
+ipc_object_copyout_dest(/* ipc_space_t, ipc_object_t,
+ mach_msg_type_name_t, mach_port_t * */);
+
+extern kern_return_t
+ipc_object_rename(/* ipc_space_t, mach_port_t, mach_port_t */);
+
+#if MACH_IPC_COMPAT
+
+extern mach_msg_type_name_t
+ipc_object_copyout_type_compat(/* mach_msg_type_name_t */);
+
+extern kern_return_t
+ipc_object_copyin_compat(/* ipc_space_t, mach_port_t,
+ mach_msg_type_name_t, boolean_t,
+ ipc_object_t * */);
+
+extern kern_return_t
+ipc_object_copyin_header(/* ipc_space_t, mach_port_t,
+ ipc_object_t *, mach_msg_type_name_t * */);
+
+extern kern_return_t
+ipc_object_copyout_compat(/* ipc_space_t, ipc_object_t,
+ mach_msg_type_name_t, mach_port_t * */);
+
+extern kern_return_t
+ipc_object_copyout_name_compat(/* ipc_space_t, ipc_object_t,
+ mach_msg_type_name_t, mach_port_t */);
+
+#endif MACH_IPC_COMPAT
+
+extern void
+ipc_object_print(/* ipc_object_t */);
+
+#endif _IPC_IPC_OBJECT_H_
diff --git a/ipc/ipc_port.c b/ipc/ipc_port.c
new file mode 100644
index 00000000..770e7807
--- /dev/null
+++ b/ipc/ipc_port.c
@@ -0,0 +1,1545 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_port.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC ports.
+ */
+
+#include <mach_ipc_compat.h>
+
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <kern/lock.h>
+#include <kern/ipc_sched.h>
+#include <kern/ipc_kobject.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_thread.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_notify.h>
+#if NORMA_IPC
+#include <norma/ipc_node.h>
+#endif NORMA_IPC
+
+
+
+decl_simple_lock_data(, ipc_port_multiple_lock_data)
+
+decl_simple_lock_data(, ipc_port_timestamp_lock_data)
+ipc_port_timestamp_t ipc_port_timestamp_data;
+
+/*
+ * Routine: ipc_port_timestamp
+ * Purpose:
+ * Retrieve a timestamp value.
+ */
+
+ipc_port_timestamp_t
+ipc_port_timestamp(void)
+{
+ ipc_port_timestamp_t timestamp;
+
+ ipc_port_timestamp_lock();
+ timestamp = ipc_port_timestamp_data++;
+ ipc_port_timestamp_unlock();
+
+ return timestamp;
+}
+
+/*
+ * Routine: ipc_port_dnrequest
+ * Purpose:
+ * Try to allocate a dead-name request slot.
+ * If successful, returns the request index.
+ * Otherwise returns zero.
+ * Conditions:
+ * The port is locked and active.
+ * Returns:
+ * KERN_SUCCESS A request index was found.
+ * KERN_NO_SPACE No index allocated.
+ */
+
+kern_return_t
+ipc_port_dnrequest(port, name, soright, indexp)
+ ipc_port_t port;
+ mach_port_t name;
+ ipc_port_t soright;
+ ipc_port_request_index_t *indexp;
+{
+ ipc_port_request_t ipr, table;
+ ipc_port_request_index_t index;
+
+ assert(ip_active(port));
+ assert(name != MACH_PORT_NULL);
+ assert(soright != IP_NULL);
+
+ table = port->ip_dnrequests;
+ if (table == IPR_NULL)
+ return KERN_NO_SPACE;
+
+ index = table->ipr_next;
+ if (index == 0)
+ return KERN_NO_SPACE;
+
+ ipr = &table[index];
+ assert(ipr->ipr_name == MACH_PORT_NULL);
+
+ table->ipr_next = ipr->ipr_next;
+ ipr->ipr_name = name;
+ ipr->ipr_soright = soright;
+
+ *indexp = index;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_dngrow
+ * Purpose:
+ * Grow a port's table of dead-name requests.
+ * Conditions:
+ * The port must be locked and active.
+ * Nothing else locked; will allocate memory.
+ * Upon return the port is unlocked.
+ * Returns:
+ * KERN_SUCCESS Grew the table.
+ * KERN_SUCCESS Somebody else grew the table.
+ * KERN_SUCCESS The port died.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate new table.
+ */
+
+kern_return_t
+ipc_port_dngrow(port)
+ ipc_port_t port;
+{
+ ipc_table_size_t its;
+ ipc_port_request_t otable, ntable;
+
+ assert(ip_active(port));
+
+ otable = port->ip_dnrequests;
+ if (otable == IPR_NULL)
+ its = &ipc_table_dnrequests[0];
+ else
+ its = otable->ipr_size + 1;
+
+ ip_reference(port);
+ ip_unlock(port);
+
+ if ((its->its_size == 0) ||
+ ((ntable = it_dnrequests_alloc(its)) == IPR_NULL)) {
+ ipc_port_release(port);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ ip_lock(port);
+ ip_release(port);
+
+ /*
+ * Check that port is still active and that nobody else
+ * has slipped in and grown the table on us. Note that
+ * just checking port->ip_dnrequests == otable isn't
+ * sufficient; must check ipr_size.
+ */
+
+ if (ip_active(port) &&
+ (port->ip_dnrequests == otable) &&
+ ((otable == IPR_NULL) || (otable->ipr_size+1 == its))) {
+ ipc_table_size_t oits = 0; /* '=0' to shut up lint */
+ ipc_table_elems_t osize, nsize;
+ ipc_port_request_index_t free, i;
+
+ /* copy old table to new table */
+
+ if (otable != IPR_NULL) {
+ oits = otable->ipr_size;
+ osize = oits->its_size;
+ free = otable->ipr_next;
+
+ bcopy((char *)(otable + 1), (char *)(ntable + 1),
+ (osize - 1) * sizeof(struct ipc_port_request));
+ } else {
+ osize = 1;
+ free = 0;
+ }
+
+ nsize = its->its_size;
+ assert(nsize > osize);
+
+ /* add new elements to the new table's free list */
+
+ for (i = osize; i < nsize; i++) {
+ ipc_port_request_t ipr = &ntable[i];
+
+ ipr->ipr_name = MACH_PORT_NULL;
+ ipr->ipr_next = free;
+ free = i;
+ }
+
+ ntable->ipr_next = free;
+ ntable->ipr_size = its;
+ port->ip_dnrequests = ntable;
+ ip_unlock(port);
+
+ if (otable != IPR_NULL)
+ it_dnrequests_free(oits, otable);
+ } else {
+ ip_check_unlock(port);
+ it_dnrequests_free(its, ntable);
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_dncancel
+ * Purpose:
+ * Cancel a dead-name request and return the send-once right.
+ * Conditions:
+ * The port must locked and active.
+ */
+
+ipc_port_t
+ipc_port_dncancel(
+ ipc_port_t port,
+ mach_port_t name,
+ ipc_port_request_index_t index)
+{
+ ipc_port_request_t ipr, table;
+ ipc_port_t dnrequest;
+
+ assert(ip_active(port));
+ assert(name != MACH_PORT_NULL);
+ assert(index != 0);
+
+ table = port->ip_dnrequests;
+ assert(table != IPR_NULL);
+
+ ipr = &table[index];
+ dnrequest = ipr->ipr_soright;
+ assert(ipr->ipr_name == name);
+
+ /* return ipr to the free list inside the table */
+
+ ipr->ipr_name = MACH_PORT_NULL;
+ ipr->ipr_next = table->ipr_next;
+ table->ipr_next = index;
+
+ return dnrequest;
+}
+
+/*
+ * Routine: ipc_port_pdrequest
+ * Purpose:
+ * Make a port-deleted request, returning the
+ * previously registered send-once right.
+ * Just cancels the previous request if notify is IP_NULL.
+ * Conditions:
+ * The port is locked and active. It is unlocked.
+ * Consumes a ref for notify (if non-null), and
+ * returns previous with a ref (if non-null).
+ */
+
+void
+ipc_port_pdrequest(
+ ipc_port_t port,
+ ipc_port_t notify,
+ ipc_port_t *previousp)
+{
+ ipc_port_t previous;
+
+ assert(ip_active(port));
+
+ previous = port->ip_pdrequest;
+ port->ip_pdrequest = notify;
+ ip_unlock(port);
+
+ *previousp = previous;
+}
+
+/*
+ * Routine: ipc_port_nsrequest
+ * Purpose:
+ * Make a no-senders request, returning the
+ * previously registered send-once right.
+ * Just cancels the previous request if notify is IP_NULL.
+ * Conditions:
+ * The port is locked and active. It is unlocked.
+ * Consumes a ref for notify (if non-null), and
+ * returns previous with a ref (if non-null).
+ */
+
+void
+ipc_port_nsrequest(
+ ipc_port_t port,
+ mach_port_mscount_t sync,
+ ipc_port_t notify,
+ ipc_port_t *previousp)
+{
+ ipc_port_t previous;
+ mach_port_mscount_t mscount;
+
+ assert(ip_active(port));
+
+ previous = port->ip_nsrequest;
+ mscount = port->ip_mscount;
+
+ if ((port->ip_srights == 0) &&
+ (sync <= mscount) &&
+ (notify != IP_NULL)) {
+ port->ip_nsrequest = IP_NULL;
+ ip_unlock(port);
+ ipc_notify_no_senders(notify, mscount);
+ } else {
+ port->ip_nsrequest = notify;
+ ip_unlock(port);
+ }
+
+ *previousp = previous;
+}
+
+/*
+ * Routine: ipc_port_set_qlimit
+ * Purpose:
+ * Changes a port's queue limit; the maximum number
+ * of messages which may be queued to the port.
+ * Conditions:
+ * The port is locked and active.
+ */
+
+void
+ipc_port_set_qlimit(
+ ipc_port_t port,
+ mach_port_msgcount_t qlimit)
+{
+ assert(ip_active(port));
+
+ /* wake up senders allowed by the new qlimit */
+
+ if (qlimit > port->ip_qlimit) {
+ mach_port_msgcount_t i, wakeup;
+
+ /* caution: wakeup, qlimit are unsigned */
+
+ wakeup = qlimit - port->ip_qlimit;
+
+ for (i = 0; i < wakeup; i++) {
+ ipc_thread_t th;
+
+ th = ipc_thread_dequeue(&port->ip_blocked);
+ if (th == ITH_NULL)
+ break;
+
+ th->ith_state = MACH_MSG_SUCCESS;
+ thread_go(th);
+ }
+ }
+
+ port->ip_qlimit = qlimit;
+}
+
+/*
+ * Routine: ipc_port_lock_mqueue
+ * Purpose:
+ * Locks and returns the message queue that the port is using.
+ * The message queue may be in the port or in its port set.
+ * Conditions:
+ * The port is locked and active.
+ * Port set, message queue locks may be taken.
+ */
+
+ipc_mqueue_t
+ipc_port_lock_mqueue(port)
+ ipc_port_t port;
+{
+ if (port->ip_pset != IPS_NULL) {
+ ipc_pset_t pset = port->ip_pset;
+
+ ips_lock(pset);
+ if (ips_active(pset)) {
+ imq_lock(&pset->ips_messages);
+ ips_unlock(pset);
+ return &pset->ips_messages;
+ }
+
+ ipc_pset_remove(pset, port);
+ ips_check_unlock(pset);
+ }
+
+ imq_lock(&port->ip_messages);
+ return &port->ip_messages;
+}
+
+/*
+ * Routine: ipc_port_set_seqno
+ * Purpose:
+ * Changes a port's sequence number.
+ * Conditions:
+ * The port is locked and active.
+ * Port set, message queue locks may be taken.
+ */
+
+void
+ipc_port_set_seqno(port, seqno)
+ ipc_port_t port;
+ mach_port_seqno_t seqno;
+{
+ ipc_mqueue_t mqueue;
+
+ mqueue = ipc_port_lock_mqueue(port);
+ port->ip_seqno = seqno;
+ imq_unlock(mqueue);
+}
+
+/*
+ * Routine: ipc_port_clear_receiver
+ * Purpose:
+ * Prepares a receive right for transmission/destruction.
+ * Conditions:
+ * The port is locked and active.
+ */
+
+void
+ipc_port_clear_receiver(
+ ipc_port_t port)
+{
+ ipc_pset_t pset;
+
+ assert(ip_active(port));
+
+ pset = port->ip_pset;
+ if (pset != IPS_NULL) {
+ /* No threads receiving from port, but must remove from set. */
+
+ ips_lock(pset);
+ ipc_pset_remove(pset, port);
+ ips_check_unlock(pset);
+ } else {
+ /* Else, wake up all receivers, indicating why. */
+
+ imq_lock(&port->ip_messages);
+ ipc_mqueue_changed(&port->ip_messages, MACH_RCV_PORT_DIED);
+ imq_unlock(&port->ip_messages);
+ }
+
+ ipc_port_set_mscount(port, 0);
+ imq_lock(&port->ip_messages);
+ port->ip_seqno = 0;
+ imq_unlock(&port->ip_messages);
+}
+
+/*
+ * Routine: ipc_port_init
+ * Purpose:
+ * Initializes a newly-allocated port.
+ * Doesn't touch the ip_object fields.
+ */
+
+void
+ipc_port_init(
+ ipc_port_t port,
+ ipc_space_t space,
+ mach_port_t name)
+{
+ /* port->ip_kobject doesn't have to be initialized */
+
+ ipc_target_init(&port->ip_target, name);
+
+ port->ip_receiver = space;
+
+ port->ip_mscount = 0;
+ port->ip_srights = 0;
+ port->ip_sorights = 0;
+
+ port->ip_nsrequest = IP_NULL;
+ port->ip_pdrequest = IP_NULL;
+ port->ip_dnrequests = IPR_NULL;
+
+ port->ip_pset = IPS_NULL;
+ port->ip_cur_target = &port->ip_target;
+ port->ip_seqno = 0;
+ port->ip_msgcount = 0;
+ port->ip_qlimit = MACH_PORT_QLIMIT_DEFAULT;
+
+#if NORMA_IPC
+ port->ip_norma_uid = 0;
+ port->ip_norma_dest_node = 0;
+ port->ip_norma_stransit = 0;
+ port->ip_norma_sotransit = 0;
+ port->ip_norma_xmm_object_refs = 0;
+ port->ip_norma_is_proxy = FALSE;
+ port->ip_norma_is_special = FALSE;
+ port->ip_norma_atrium = IP_NULL;
+ port->ip_norma_queue_next = port;
+ port->ip_norma_xmm_object = IP_NULL;
+ port->ip_norma_next = port;
+ port->ip_norma_spare1 = 0L;
+ port->ip_norma_spare2 = 0L;
+ port->ip_norma_spare3 = 0L;
+ port->ip_norma_spare4 = 0L;
+#endif NORMA_IPC
+
+ ipc_mqueue_init(&port->ip_messages);
+ ipc_thread_queue_init(&port->ip_blocked);
+}
+
+/*
+ * Routine: ipc_port_alloc
+ * Purpose:
+ * Allocate a port.
+ * Conditions:
+ * Nothing locked. If successful, the port is returned
+ * locked. (The caller doesn't have a reference.)
+ * Returns:
+ * KERN_SUCCESS The port is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NO_SPACE No room for an entry in the space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_port_alloc(
+ ipc_space_t space,
+ mach_port_t *namep,
+ ipc_port_t *portp)
+{
+ ipc_port_t port;
+ mach_port_t name;
+ kern_return_t kr;
+
+ kr = ipc_object_alloc(space, IOT_PORT,
+ MACH_PORT_TYPE_RECEIVE, 0,
+ &name, (ipc_object_t *) &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ /* port is locked */
+
+ ipc_port_init(port, space, name);
+
+ *namep = name;
+ *portp = port;
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_alloc_name
+ * Purpose:
+ * Allocate a port, with a specific name.
+ * Conditions:
+ * Nothing locked. If successful, the port is returned
+ * locked. (The caller doesn't have a reference.)
+ * Returns:
+ * KERN_SUCCESS The port is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_port_alloc_name(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_port_t *portp)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ kr = ipc_object_alloc_name(space, IOT_PORT,
+ MACH_PORT_TYPE_RECEIVE, 0,
+ name, (ipc_object_t *) &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked */
+
+ ipc_port_init(port, space, name);
+
+ *portp = port;
+ return KERN_SUCCESS;
+}
+
+#if MACH_IPC_COMPAT
+/*
+ * Routine: ipc_port_delete_compat
+ * Purpose:
+ * Find and destroy a compat entry for a dead port.
+ * If successful, generate a port-deleted notification.
+ * Conditions:
+ * Nothing locked; the port is dead.
+ * Frees a ref for the space.
+ */
+
+void
+ipc_port_delete_compat(port, space, name)
+ ipc_port_t port;
+ ipc_space_t space;
+ mach_port_t name;
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ assert(!ip_active(port));
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr == KERN_SUCCESS) {
+ ipc_port_t sright;
+
+ /* space is write-locked and active */
+
+ if ((ipc_port_t) entry->ie_object == port) {
+ assert(entry->ie_bits & IE_BITS_COMPAT);
+
+ sright = ipc_space_make_notify(space);
+
+ kr = ipc_right_destroy(space, name, entry);
+ /* space is unlocked */
+ assert(kr == KERN_INVALID_NAME);
+ } else {
+ is_write_unlock(space);
+ sright = IP_NULL;
+ }
+
+ if (IP_VALID(sright))
+ ipc_notify_port_deleted_compat(sright, name);
+ }
+
+ is_release(space);
+}
+#endif MACH_IPC_COMPAT
+
+/*
+ * Routine: ipc_port_destroy
+ * Purpose:
+ * Destroys a port. Cleans up queued messages.
+ *
+ * If the port has a backup, it doesn't get destroyed,
+ * but is sent in a port-destroyed notification to the backup.
+ * Conditions:
+ * The port is locked and alive; nothing else locked.
+ * The caller has a reference, which is consumed.
+ * Afterwards, the port is unlocked and dead.
+ */
+
+void
+ipc_port_destroy(
+ ipc_port_t port)
+{
+ ipc_port_t pdrequest, nsrequest;
+ ipc_mqueue_t mqueue;
+ ipc_kmsg_queue_t kmqueue;
+ ipc_kmsg_t kmsg;
+ ipc_thread_t sender;
+ ipc_port_request_t dnrequests;
+
+ assert(ip_active(port));
+ /* port->ip_receiver_name is garbage */
+ /* port->ip_receiver/port->ip_destination is garbage */
+ assert(port->ip_pset == IPS_NULL);
+ assert(port->ip_mscount == 0);
+ assert(port->ip_seqno == 0);
+
+ /* first check for a backup port */
+
+ pdrequest = port->ip_pdrequest;
+ if (pdrequest != IP_NULL) {
+ /* we assume the ref for pdrequest */
+ port->ip_pdrequest = IP_NULL;
+
+ /* make port be in limbo */
+ port->ip_receiver_name = MACH_PORT_NULL;
+ port->ip_destination = IP_NULL;
+ ip_unlock(port);
+
+#if MACH_IPC_COMPAT
+ /*
+ * pdrequest might actually be a send right instead
+ * of a send-once right, indicated by the low bit
+ * of the pointer value. If this is the case,
+ * we must use ipc_notify_port_destroyed_compat.
+ */
+
+ if (ip_pdsendp(pdrequest)) {
+ ipc_port_t sright = ip_pdsend(pdrequest);
+
+ if (!ipc_port_check_circularity(port, sright)) {
+ /* consumes our refs for port and sright */
+ ipc_notify_port_destroyed_compat(sright, port);
+ return;
+ } else {
+ /* consume sright and destroy port */
+ ipc_port_release_send(sright);
+ }
+ } else
+#endif MACH_IPC_COMPAT
+
+ if (!ipc_port_check_circularity(port, pdrequest)) {
+ /* consumes our refs for port and pdrequest */
+ ipc_notify_port_destroyed(pdrequest, port);
+ return;
+ } else {
+ /* consume pdrequest and destroy port */
+ ipc_port_release_sonce(pdrequest);
+ }
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_pset == IPS_NULL);
+ assert(port->ip_mscount == 0);
+ assert(port->ip_seqno == 0);
+ assert(port->ip_pdrequest == IP_NULL);
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ assert(port->ip_destination == IP_NULL);
+
+ /* fall through and destroy the port */
+ }
+
+#if NORMA_IPC
+ /*
+ * destroy any NORMA_IPC state associated with port
+ */
+ norma_ipc_port_destroy(port);
+#endif NORMA_IPC
+
+ /*
+ * rouse all blocked senders
+ *
+ * This must be done with the port locked, because
+ * ipc_mqueue_send can play with the ip_blocked queue
+ * of a dead port.
+ */
+
+ while ((sender = ipc_thread_dequeue(&port->ip_blocked)) != ITH_NULL) {
+ sender->ith_state = MACH_MSG_SUCCESS;
+ thread_go(sender);
+ }
+
+ /* once port is dead, we don't need to keep it locked */
+
+ port->ip_object.io_bits &= ~IO_BITS_ACTIVE;
+ port->ip_timestamp = ipc_port_timestamp();
+ ip_unlock(port);
+
+ /* throw away no-senders request */
+
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL)
+ ipc_notify_send_once(nsrequest); /* consumes ref */
+
+ /* destroy any queued messages */
+
+ mqueue = &port->ip_messages;
+ imq_lock(mqueue);
+ assert(ipc_thread_queue_empty(&mqueue->imq_threads));
+ kmqueue = &mqueue->imq_messages;
+
+ while ((kmsg = ipc_kmsg_dequeue(kmqueue)) != IKM_NULL) {
+ imq_unlock(mqueue);
+
+ assert(kmsg->ikm_header.msgh_remote_port ==
+ (mach_port_t) port);
+
+ ipc_port_release(port);
+ kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
+ ipc_kmsg_destroy(kmsg);
+
+ imq_lock(mqueue);
+ }
+
+ imq_unlock(mqueue);
+
+ /* generate dead-name notifications */
+
+ dnrequests = port->ip_dnrequests;
+ if (dnrequests != IPR_NULL) {
+ ipc_table_size_t its = dnrequests->ipr_size;
+ ipc_table_elems_t size = its->its_size;
+ ipc_port_request_index_t index;
+
+ for (index = 1; index < size; index++) {
+ ipc_port_request_t ipr = &dnrequests[index];
+ mach_port_t name = ipr->ipr_name;
+ ipc_port_t soright;
+
+ if (name == MACH_PORT_NULL)
+ continue;
+
+ soright = ipr->ipr_soright;
+ assert(soright != IP_NULL);
+
+#if MACH_IPC_COMPAT
+ if (ipr_spacep(soright)) {
+ ipc_port_delete_compat(port,
+ ipr_space(soright), name);
+ continue;
+ }
+#endif MACH_IPC_COMPAT
+
+ ipc_notify_dead_name(soright, name);
+ }
+
+ it_dnrequests_free(its, dnrequests);
+ }
+
+ if (ip_kotype(port) != IKOT_NONE)
+ ipc_kobject_destroy(port);
+
+ /* Common destruction for the IPC target. */
+ ipc_target_terminate(&port->ip_target);
+
+ ipc_port_release(port); /* consume caller's ref */
+}
+
+/*
+ * Routine: ipc_port_check_circularity
+ * Purpose:
+ * Check if queueing "port" in a message for "dest"
+ * would create a circular group of ports and messages.
+ *
+ * If no circularity (FALSE returned), then "port"
+ * is changed from "in limbo" to "in transit".
+ *
+ * That is, we want to set port->ip_destination == dest,
+ * but guaranteeing that this doesn't create a circle
+ * port->ip_destination->ip_destination->... == port
+ * Conditions:
+ * No ports locked. References held for "port" and "dest".
+ */
+
+boolean_t
+ipc_port_check_circularity(
+ ipc_port_t port,
+ ipc_port_t dest)
+{
+ ipc_port_t base;
+
+ assert(port != IP_NULL);
+ assert(dest != IP_NULL);
+
+ if (port == dest)
+ return TRUE;
+ base = dest;
+
+ /*
+ * First try a quick check that can run in parallel.
+ * No circularity if dest is not in transit.
+ */
+
+ ip_lock(port);
+ if (ip_lock_try(dest)) {
+ if (!ip_active(dest) ||
+ (dest->ip_receiver_name != MACH_PORT_NULL) ||
+ (dest->ip_destination == IP_NULL))
+ goto not_circular;
+
+ /* dest is in transit; further checking necessary */
+
+ ip_unlock(dest);
+ }
+ ip_unlock(port);
+
+ ipc_port_multiple_lock(); /* massive serialization */
+
+ /*
+ * Search for the end of the chain (a port not in transit),
+ * acquiring locks along the way.
+ */
+
+ for (;;) {
+ ip_lock(base);
+
+ if (!ip_active(base) ||
+ (base->ip_receiver_name != MACH_PORT_NULL) ||
+ (base->ip_destination == IP_NULL))
+ break;
+
+ base = base->ip_destination;
+ }
+
+ /* all ports in chain from dest to base, inclusive, are locked */
+
+ if (port == base) {
+ /* circularity detected! */
+
+ ipc_port_multiple_unlock();
+
+ /* port (== base) is in limbo */
+
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ assert(port->ip_destination == IP_NULL);
+
+ while (dest != IP_NULL) {
+ ipc_port_t next;
+
+ /* dest is in transit or in limbo */
+
+ assert(ip_active(dest));
+ assert(dest->ip_receiver_name == MACH_PORT_NULL);
+
+ next = dest->ip_destination;
+ ip_unlock(dest);
+ dest = next;
+ }
+
+ return TRUE;
+ }
+
+ /*
+ * The guarantee: lock port while the entire chain is locked.
+ * Once port is locked, we can take a reference to dest,
+ * add port to the chain, and unlock everything.
+ */
+
+ ip_lock(port);
+ ipc_port_multiple_unlock();
+
+ not_circular:
+
+ /* port is in limbo */
+
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ assert(port->ip_destination == IP_NULL);
+
+ ip_reference(dest);
+ port->ip_destination = dest;
+
+ /* now unlock chain */
+
+ while (port != base) {
+ ipc_port_t next;
+
+ /* port is in transit */
+
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ assert(port->ip_destination != IP_NULL);
+
+ next = port->ip_destination;
+ ip_unlock(port);
+ port = next;
+ }
+
+ /* base is not in transit */
+
+ assert(!ip_active(base) ||
+ (base->ip_receiver_name != MACH_PORT_NULL) ||
+ (base->ip_destination == IP_NULL));
+ ip_unlock(base);
+
+ return FALSE;
+}
+
+/*
+ * Routine: ipc_port_lookup_notify
+ * Purpose:
+ * Make a send-once notify port from a receive right.
+ * Returns IP_NULL if name doesn't denote a receive right.
+ * Conditions:
+ * The space must be locked (read or write) and active.
+ */
+
+ipc_port_t
+ipc_port_lookup_notify(
+ ipc_space_t space,
+ mach_port_t name)
+{
+ ipc_port_t port;
+ ipc_entry_t entry;
+
+ assert(space->is_active);
+
+ entry = ipc_entry_lookup(space, name);
+ if (entry == IE_NULL)
+ return IP_NULL;
+
+ if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ return IP_NULL;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ ip_reference(port);
+ port->ip_sorights++;
+ ip_unlock(port);
+
+ return port;
+}
+
+/*
+ * Routine: ipc_port_make_send
+ * Purpose:
+ * Make a naked send right from a receive right.
+ * Conditions:
+ * The port is not locked but it is active.
+ */
+
+ipc_port_t
+ipc_port_make_send(
+ ipc_port_t port)
+{
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+ assert(ip_active(port));
+ port->ip_mscount++;
+ port->ip_srights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ return port;
+}
+
+/*
+ * Routine: ipc_port_copy_send
+ * Purpose:
+ * Make a naked send right from another naked send right.
+ * IP_NULL -> IP_NULL
+ * IP_DEAD -> IP_DEAD
+ * dead port -> IP_DEAD
+ * live port -> port + ref
+ * Conditions:
+ * Nothing locked except possibly a space.
+ */
+
+ipc_port_t
+ipc_port_copy_send(
+ ipc_port_t port)
+{
+ ipc_port_t sright;
+
+ if (!IP_VALID(port))
+ return port;
+
+ ip_lock(port);
+ if (ip_active(port)) {
+ assert(port->ip_srights > 0);
+
+ ip_reference(port);
+ port->ip_srights++;
+ sright = port;
+ } else
+ sright = IP_DEAD;
+ ip_unlock(port);
+
+ return sright;
+}
+
+/*
+ * Routine: ipc_port_copyout_send
+ * Purpose:
+ * Copyout a naked send right (possibly null/dead),
+ * or if that fails, destroy the right.
+ * Conditions:
+ * Nothing locked.
+ */
+
+mach_port_t
+ipc_port_copyout_send(
+ ipc_port_t sright,
+ ipc_space_t space)
+{
+ mach_port_t name;
+
+ if (IP_VALID(sright)) {
+ kern_return_t kr;
+
+ kr = ipc_object_copyout(space, (ipc_object_t) sright,
+ MACH_MSG_TYPE_PORT_SEND, TRUE, &name);
+ if (kr != KERN_SUCCESS) {
+ ipc_port_release_send(sright);
+
+ if (kr == KERN_INVALID_CAPABILITY)
+ name = MACH_PORT_DEAD;
+ else
+ name = MACH_PORT_NULL;
+ }
+ } else
+ name = (mach_port_t) sright;
+
+ return name;
+}
+
+/*
+ * Routine: ipc_port_release_send
+ * Purpose:
+ * Release a (valid) naked send right.
+ * Consumes a ref for the port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_port_release_send(
+ ipc_port_t port)
+{
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount;
+
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+ ip_release(port);
+
+ if (!ip_active(port)) {
+ ip_check_unlock(port);
+ return;
+ }
+
+ assert(port->ip_srights > 0);
+
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+
+ ip_unlock(port);
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+}
+
+/*
+ * Routine: ipc_port_make_sonce
+ * Purpose:
+ * Make a naked send-once right from a receive right.
+ * Conditions:
+ * The port is not locked but it is active.
+ */
+
+ipc_port_t
+ipc_port_make_sonce(
+ ipc_port_t port)
+{
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+ assert(ip_active(port));
+ port->ip_sorights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ return port;
+}
+
+/*
+ * Routine: ipc_port_release_sonce
+ * Purpose:
+ * Release a naked send-once right.
+ * Consumes a ref for the port.
+ *
+ * In normal situations, this is never used.
+ * Send-once rights are only consumed when
+ * a message (possibly a send-once notification)
+ * is sent to them.
+ * Conditions:
+ * Nothing locked except possibly a space.
+ */
+
+void
+ipc_port_release_sonce(
+ ipc_port_t port)
+{
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+
+ assert(port->ip_sorights > 0);
+
+ port->ip_sorights--;
+
+ ip_release(port);
+
+ if (!ip_active(port)) {
+ ip_check_unlock(port);
+ return;
+ }
+
+ ip_unlock(port);
+}
+
+/*
+ * Routine: ipc_port_release_receive
+ * Purpose:
+ * Release a naked (in limbo or in transit) receive right.
+ * Consumes a ref for the port; destroys the port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_port_release_receive(
+ ipc_port_t port)
+{
+ ipc_port_t dest;
+
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ dest = port->ip_destination;
+
+ ipc_port_destroy(port); /* consumes ref, unlocks */
+
+ if (dest != IP_NULL)
+ ipc_port_release(dest);
+}
+
+/*
+ * Routine: ipc_port_alloc_special
+ * Purpose:
+ * Allocate a port in a special space.
+ * The new port is returned with one ref.
+ * If unsuccessful, IP_NULL is returned.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+ipc_port_alloc_special(space)
+ ipc_space_t space;
+{
+#if NORMA_IPC
+#if i386
+ int ret = (&ret)[2]; /* where we were called from */
+#else
+ int ret = (int) ipc_port_alloc_special;
+#endif
+ extern int input_msgh_id;
+#endif NORMA_IPC
+ ipc_port_t port;
+
+ port = (ipc_port_t) io_alloc(IOT_PORT);
+ if (port == IP_NULL)
+ return IP_NULL;
+
+ io_lock_init(&port->ip_object);
+ port->ip_references = 1;
+ port->ip_object.io_bits = io_makebits(TRUE, IOT_PORT, 0);
+
+ /*
+ * The actual values of ip_receiver_name aren't important,
+ * as long as they are valid (not null/dead).
+ *
+ * Mach4: we set it to the internal port structure address
+ * so we can always just pass on ip_receiver_name during
+ * an rpc regardless of whether the destination is user or
+ * kernel (i.e. no special-casing code for the kernel along
+ * the fast rpc path).
+ */
+
+ ipc_port_init(port, space, (mach_port_t)port);
+
+#if NORMA_IPC
+ port->ip_norma_spare1 = ret;
+ port->ip_norma_spare2 = input_msgh_id;
+#endif NORMA_IPC
+ return port;
+}
+
+/*
+ * Routine: ipc_port_dealloc_special
+ * Purpose:
+ * Deallocate a port in a special space.
+ * Consumes one ref for the port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_port_dealloc_special(
+ ipc_port_t port,
+ ipc_space_t space)
+{
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name != MACH_PORT_NULL);
+ assert(port->ip_receiver == space);
+
+ /*
+ * We clear ip_receiver_name and ip_receiver to simplify
+ * the ipc_space_kernel check in ipc_mqueue_send.
+ */
+
+ port->ip_receiver_name = MACH_PORT_NULL;
+ port->ip_receiver = IS_NULL;
+
+ /*
+ * For ipc_space_kernel, all ipc_port_clear_receiver does
+ * is clean things up for the assertions in ipc_port_destroy.
+ * For ipc_space_reply, there might be a waiting receiver.
+ */
+
+ ipc_port_clear_receiver(port);
+ ipc_port_destroy(port);
+}
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: ipc_port_alloc_compat
+ * Purpose:
+ * Allocate a port.
+ * Conditions:
+ * Nothing locked. If successful, the port is returned
+ * locked. (The caller doesn't have a reference.)
+ *
+ * Like ipc_port_alloc, except that the new entry
+ * is IE_BITS_COMPAT.
+ * Returns:
+ * KERN_SUCCESS The port is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NO_SPACE No room for an entry in the space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_port_alloc_compat(space, namep, portp)
+ ipc_space_t space;
+ mach_port_t *namep;
+ ipc_port_t *portp;
+{
+ ipc_port_t port;
+ ipc_entry_t entry;
+ mach_port_t name;
+ ipc_table_size_t its;
+ ipc_port_request_t table;
+ ipc_table_elems_t size;
+ ipc_port_request_index_t free, i;
+ kern_return_t kr;
+
+ port = ip_alloc();
+ if (port == IP_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ its = &ipc_table_dnrequests[0];
+ table = it_dnrequests_alloc(its);
+ if (table == IPR_NULL) {
+ ip_free(port);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ kr = ipc_entry_alloc(space, &name, &entry);
+ if (kr != KERN_SUCCESS) {
+ ip_free(port);
+ it_dnrequests_free(its, table);
+ return kr;
+ }
+ /* space is write-locked */
+
+ entry->ie_object = (ipc_object_t) port;
+ entry->ie_request = 1;
+ entry->ie_bits |= IE_BITS_COMPAT|MACH_PORT_TYPE_RECEIVE;
+
+ ip_lock_init(port);
+ ip_lock(port);
+ is_write_unlock(space);
+
+ port->ip_references = 1; /* for entry, not caller */
+ port->ip_bits = io_makebits(TRUE, IOT_PORT, 0);
+
+ ipc_port_init(port, space, name);
+
+ size = its->its_size;
+ assert(size > 1);
+ free = 0;
+
+ for (i = 2; i < size; i++) {
+ ipc_port_request_t ipr = &table[i];
+
+ ipr->ipr_name = MACH_PORT_NULL;
+ ipr->ipr_next = free;
+ free = i;
+ }
+
+ table->ipr_next = free;
+ table->ipr_size = its;
+ port->ip_dnrequests = table;
+
+ table[1].ipr_name = name;
+ table[1].ipr_soright = ipr_spacem(space);
+ is_reference(space);
+
+ *namep = name;
+ *portp = port;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_copyout_send_compat
+ * Purpose:
+ * Copyout a naked send right (possibly null/dead),
+ * or if that fails, destroy the right.
+ * Like ipc_port_copyout_send, except that if a
+ * new translation is created it has the compat bit.
+ * Conditions:
+ * Nothing locked.
+ */
+
+mach_port_t
+ipc_port_copyout_send_compat(sright, space)
+ ipc_port_t sright;
+ ipc_space_t space;
+{
+ mach_port_t name;
+
+ if (IP_VALID(sright)) {
+ kern_return_t kr;
+
+ kr = ipc_object_copyout_compat(space, (ipc_object_t) sright,
+ MACH_MSG_TYPE_PORT_SEND, &name);
+ if (kr != KERN_SUCCESS) {
+ ipc_port_release_send(sright);
+ name = MACH_PORT_NULL;
+ }
+ } else
+ name = (mach_port_t) sright;
+
+ return name;
+}
+
+/*
+ * Routine: ipc_port_copyout_receiver
+ * Purpose:
+ * Copyout a port reference (possibly null)
+ * by giving the caller his name for the port,
+ * if he is the receiver.
+ * Conditions:
+ * Nothing locked. Consumes a ref for the port.
+ */
+
+mach_port_t
+ipc_port_copyout_receiver(port, space)
+ ipc_port_t port;
+ ipc_space_t space;
+{
+ mach_port_t name;
+
+ if (!IP_VALID(port))
+ return MACH_PORT_NULL;
+
+ ip_lock(port);
+ if (port->ip_receiver == space) {
+ name = port->ip_receiver_name;
+ assert(MACH_PORT_VALID(name));
+ } else
+ name = MACH_PORT_NULL;
+
+ ip_release(port);
+ ip_check_unlock(port);
+
+ return name;
+}
+
+#endif MACH_IPC_COMPAT
+
+#include <mach_kdb.h>
+
+
+#if MACH_KDB
+#define printf kdbprintf
+
+/*
+ * Routine: ipc_port_print
+ * Purpose:
+ * Pretty-print a port for kdb.
+ */
+
+void
+ipc_port_print(port)
+ ipc_port_t port;
+{
+ extern int indent;
+
+ printf("port 0x%x\n", port);
+
+ indent += 2;
+
+ ipc_object_print(&port->ip_object);
+ iprintf("receiver=0x%x", port->ip_receiver);
+ printf(", receiver_name=0x%x\n", port->ip_receiver_name);
+
+ iprintf("mscount=%d", port->ip_mscount);
+ printf(", srights=%d", port->ip_srights);
+ printf(", sorights=%d\n", port->ip_sorights);
+
+ iprintf("nsrequest=0x%x", port->ip_nsrequest);
+ printf(", pdrequest=0x%x", port->ip_pdrequest);
+ printf(", dnrequests=0x%x\n", port->ip_dnrequests);
+
+ iprintf("pset=0x%x", port->ip_pset);
+ printf(", seqno=%d", port->ip_seqno);
+ printf(", msgcount=%d", port->ip_msgcount);
+ printf(", qlimit=%d\n", port->ip_qlimit);
+
+ iprintf("kmsgs=0x%x", port->ip_messages.imq_messages.ikmq_base);
+ printf(", rcvrs=0x%x", port->ip_messages.imq_threads.ithq_base);
+ printf(", sndrs=0x%x", port->ip_blocked.ithq_base);
+ printf(", kobj=0x%x\n", port->ip_kobject);
+
+#if NORMA_IPC
+ iprintf("norma_uid=%x", port->ip_norma_uid);
+ printf(", dest_node=%d", port->ip_norma_dest_node);
+ printf(", stransit=%d", port->ip_norma_stransit);
+ printf(", xorefs=%d", port->ip_norma_xmm_object_refs);
+ printf(", sotransit=%d\n", port->ip_norma_sotransit);
+
+ iprintf("norma_is_proxy=%d", port->ip_norma_is_proxy);
+ printf(", is_special=%d\n", port->ip_norma_is_special);
+
+ iprintf("norma_atrium=0x%x", port->ip_norma_atrium);
+ printf(", queue_next=0x%x", port->ip_norma_queue_next);
+ printf(", xmm_object=0x%x", port->ip_norma_xmm_object);
+ printf(", next=0x%x\n", port->ip_norma_next);
+
+ iprintf("norma_spare1=0x%x", port->ip_norma_spare1);
+ printf(", norma_spare2=0x%x", port->ip_norma_spare2);
+ printf(", norma_spare3=0x%x", port->ip_norma_spare3);
+ printf(", norma_spare4=0x%x\n", port->ip_norma_spare4);
+#endif NORMA_IPC
+
+ indent -=2;
+}
+
+#endif MACH_KDB
diff --git a/ipc/ipc_port.h b/ipc/ipc_port.h
new file mode 100644
index 00000000..21d4309e
--- /dev/null
+++ b/ipc/ipc_port.h
@@ -0,0 +1,407 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_port.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for ports.
+ */
+
+#ifndef _IPC_IPC_PORT_H_
+#define _IPC_IPC_PORT_H_
+
+#include <mach_ipc_compat.h>
+#include <norma_ipc.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <kern/lock.h>
+#include <kern/macro_help.h>
+#include <kern/ipc_kobject.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_thread.h>
+#include "ipc_target.h"
+#include <mach/rpc.h>
+
+/*
+ * A receive right (port) can be in four states:
+ * 1) dead (not active, ip_timestamp has death time)
+ * 2) in a space (ip_receiver_name != 0, ip_receiver points
+ * to the space but doesn't hold a ref for it)
+ * 3) in transit (ip_receiver_name == 0, ip_destination points
+ * to the destination port and holds a ref for it)
+ * 4) in limbo (ip_receiver_name == 0, ip_destination == IP_NULL)
+ *
+ * If the port is active, and ip_receiver points to some space,
+ * then ip_receiver_name != 0, and that space holds receive rights.
+ * If the port is not active, then ip_timestamp contains a timestamp
+ * taken when the port was destroyed.
+ */
+
+typedef unsigned int ipc_port_timestamp_t;
+
+struct ipc_port {
+ struct ipc_target ip_target;
+
+ /* This points to the ip_target above if this port isn't on a port set;
+ otherwise it points to the port set's ips_target. */
+ struct ipc_target *ip_cur_target;
+
+ union {
+ struct ipc_space *receiver;
+ struct ipc_port *destination;
+ ipc_port_timestamp_t timestamp;
+ } data;
+
+ ipc_kobject_t ip_kobject;
+
+ mach_port_mscount_t ip_mscount;
+ mach_port_rights_t ip_srights;
+ mach_port_rights_t ip_sorights;
+
+ struct ipc_port *ip_nsrequest;
+ struct ipc_port *ip_pdrequest;
+ struct ipc_port_request *ip_dnrequests;
+
+ struct ipc_pset *ip_pset;
+ mach_port_seqno_t ip_seqno; /* locked by message queue */
+ mach_port_msgcount_t ip_msgcount;
+ mach_port_msgcount_t ip_qlimit;
+ struct ipc_thread_queue ip_blocked;
+
+#if NORMA_IPC
+ unsigned long ip_norma_uid;
+ unsigned long ip_norma_dest_node;
+ long ip_norma_stransit;
+ long ip_norma_sotransit;
+ long ip_norma_xmm_object_refs;
+ boolean_t ip_norma_is_proxy;
+ boolean_t ip_norma_is_special;
+ struct ipc_port *ip_norma_atrium;
+ struct ipc_port *ip_norma_queue_next;
+ struct ipc_port *ip_norma_xmm_object;
+ struct ipc_port *ip_norma_next;
+ long ip_norma_spare1;
+ long ip_norma_spare2;
+ long ip_norma_spare3;
+ long ip_norma_spare4;
+#endif NORMA_IPC
+};
+
+#define ip_object ip_target.ipt_object
+#define ip_receiver_name ip_target.ipt_name
+#define ip_messages ip_target.ipt_messages
+#define ip_references ip_object.io_references
+#define ip_bits ip_object.io_bits
+#define ip_receiver data.receiver
+#define ip_destination data.destination
+#define ip_timestamp data.timestamp
+
+#define IP_NULL ((ipc_port_t) IO_NULL)
+#define IP_DEAD ((ipc_port_t) IO_DEAD)
+
+#define IP_VALID(port) IO_VALID(&(port)->ip_object)
+
+#define ip_active(port) io_active(&(port)->ip_object)
+#define ip_lock_init(port) io_lock_init(&(port)->ip_object)
+#define ip_lock(port) io_lock(&(port)->ip_object)
+#define ip_lock_try(port) io_lock_try(&(port)->ip_object)
+#define ip_unlock(port) io_unlock(&(port)->ip_object)
+#define ip_check_unlock(port) io_check_unlock(&(port)->ip_object)
+#define ip_reference(port) io_reference(&(port)->ip_object)
+#define ip_release(port) io_release(&(port)->ip_object)
+
+#define ip_alloc() ((ipc_port_t) io_alloc(IOT_PORT))
+#define ip_free(port) io_free(IOT_PORT, &(port)->ip_object)
+
+#define ip_kotype(port) io_kotype(&(port)->ip_object)
+
+typedef ipc_table_index_t ipc_port_request_index_t;
+
+typedef struct ipc_port_request {
+ union {
+ struct ipc_port *port;
+ ipc_port_request_index_t index;
+ } notify;
+
+ union {
+ mach_port_t name;
+ struct ipc_table_size *size;
+ } name;
+} *ipc_port_request_t;
+
+#define ipr_next notify.index
+#define ipr_size name.size
+
+#define ipr_soright notify.port
+#define ipr_name name.name
+
+#define IPR_NULL ((ipc_port_request_t) 0)
+
+#if MACH_IPC_COMPAT
+/*
+ * For backwards compatibility, the ip_pdrequest field can hold a
+ * send right instead of a send-once right. This is indicated by
+ * the low bit of the pointer. This works because the zone package
+ * guarantees that the two low bits of port pointers are zero.
+ */
+
+#define ip_pdsendp(soright) ((unsigned int)(soright) & 1)
+#define ip_pdsend(soright) ((ipc_port_t)((unsigned int)(soright) &~ 1))
+#define ip_pdsendm(sright) ((ipc_port_t)((unsigned int)(sright) | 1))
+
+/*
+ * For backwards compatibility, the ipr_soright field can hold
+ * a space pointer. This is indicated by the low bit of the pointer.
+ * This works because the zone package guarantees that the two low
+ * bits of port and space pointers are zero.
+ */
+
+#define ipr_spacep(soright) ((unsigned int)(soright) & 1)
+#define ipr_space(soright) ((ipc_space_t)((unsigned int)(soright) &~ 1))
+#define ipr_spacem(space) ((ipc_port_t)((unsigned int)(space) | 1))
+#endif MACH_IPC_COMPAT
+
+/*
+ * Taking the ipc_port_multiple lock grants the privilege
+ * to lock multiple ports at once. No ports must locked
+ * when it is taken.
+ */
+
+decl_simple_lock_data(extern, ipc_port_multiple_lock_data)
+
+#define ipc_port_multiple_lock_init() \
+ simple_lock_init(&ipc_port_multiple_lock_data)
+
+#define ipc_port_multiple_lock() \
+ simple_lock(&ipc_port_multiple_lock_data)
+
+#define ipc_port_multiple_unlock() \
+ simple_unlock(&ipc_port_multiple_lock_data)
+
+/*
+ * The port timestamp facility provides timestamps
+ * for port destruction. It is used to serialize
+ * mach_port_names with port death.
+ */
+
+decl_simple_lock_data(extern, ipc_port_timestamp_lock_data)
+extern ipc_port_timestamp_t ipc_port_timestamp_data;
+
+#define ipc_port_timestamp_lock_init() \
+ simple_lock_init(&ipc_port_timestamp_lock_data)
+
+#define ipc_port_timestamp_lock() \
+ simple_lock(&ipc_port_timestamp_lock_data)
+
+#define ipc_port_timestamp_unlock() \
+ simple_unlock(&ipc_port_timestamp_lock_data)
+
+extern ipc_port_timestamp_t
+ipc_port_timestamp();
+
+/*
+ * Compares two timestamps, and returns TRUE if one
+ * happened before two. Note that this formulation
+ * works when the timestamp wraps around at 2^32,
+ * as long as one and two aren't too far apart.
+ */
+
+#define IP_TIMESTAMP_ORDER(one, two) ((int) ((one) - (two)) < 0)
+
+#define ipc_port_translate_receive(space, name, portp) \
+ ipc_object_translate((space), (name), \
+ MACH_PORT_RIGHT_RECEIVE, \
+ (ipc_object_t *) (portp))
+
+#define ipc_port_translate_send(space, name, portp) \
+ ipc_object_translate((space), (name), \
+ MACH_PORT_RIGHT_SEND, \
+ (ipc_object_t *) (portp))
+
+extern kern_return_t
+ipc_port_dnrequest(/* ipc_port_t, mach_port_t, ipc_port_t,
+ ipc_port_request_index_t * */);
+
+extern kern_return_t
+ipc_port_dngrow(/* ipc_port_t */);
+
+extern ipc_port_t
+ipc_port_dncancel(/* ipc_port_t, mach_port_t, ipc_port_request_index_t */);
+
+#define ipc_port_dnrename(port, index, oname, nname) \
+MACRO_BEGIN \
+ ipc_port_request_t ipr, table; \
+ \
+ assert(ip_active(port)); \
+ \
+ table = port->ip_dnrequests; \
+ assert(table != IPR_NULL); \
+ \
+ ipr = &table[index]; \
+ assert(ipr->ipr_name == oname); \
+ \
+ ipr->ipr_name = nname; \
+MACRO_END
+
+/* Make a port-deleted request */
+extern void ipc_port_pdrequest(
+ ipc_port_t port,
+ ipc_port_t notify,
+ ipc_port_t *previousp);
+
+/* Make a no-senders request */
+extern void ipc_port_nsrequest(
+ ipc_port_t port,
+ mach_port_mscount_t sync,
+ ipc_port_t notify,
+ ipc_port_t *previousp);
+
+/* Change a port's queue limit */
+extern void ipc_port_set_qlimit(
+ ipc_port_t port,
+ mach_port_msgcount_t qlimit);
+
+#define ipc_port_set_mscount(port, mscount) \
+MACRO_BEGIN \
+ assert(ip_active(port)); \
+ \
+ (port)->ip_mscount = (mscount); \
+MACRO_END
+
+extern struct ipc_mqueue *
+ipc_port_lock_mqueue(/* ipc_port_t */);
+
+extern void
+ipc_port_set_seqno(/* ipc_port_t, mach_port_seqno_t */);
+
+extern void
+ipc_port_clear_receiver(/* ipc_port_t */);
+
+extern void
+ipc_port_init(/* ipc_port_t, ipc_space_t, mach_port_t */);
+
+extern kern_return_t
+ipc_port_alloc(/* ipc_space_t, mach_port_t *, ipc_port_t * */);
+
+extern kern_return_t
+ipc_port_alloc_name(/* ipc_space_t, mach_port_t, ipc_port_t * */);
+
+extern void
+ipc_port_destroy(/* ipc_port_t */);
+
+extern boolean_t
+ipc_port_check_circularity(/* ipc_port_t, ipc_port_t */);
+
+extern ipc_port_t
+ipc_port_lookup_notify(/* ipc_space_t, mach_port_t */);
+
+extern ipc_port_t
+ipc_port_make_send(/* ipc_port_t */);
+
+extern ipc_port_t
+ipc_port_copy_send(/* ipc_port_t */);
+
+extern mach_port_t
+ipc_port_copyout_send(/* ipc_port_t, ipc_space_t */);
+
+extern void
+ipc_port_release_send(/* ipc_port_t */);
+
+extern ipc_port_t
+ipc_port_make_sonce(/* ipc_port_t */);
+
+extern void
+ipc_port_release_sonce(/* ipc_port_t */);
+
+extern void
+ipc_port_release_receive(/* ipc_port_t */);
+
+extern ipc_port_t
+ipc_port_alloc_special(/* ipc_space_t */);
+
+extern void
+ipc_port_dealloc_special(/* ipc_port_t */);
+
+#define ipc_port_alloc_kernel() \
+ ipc_port_alloc_special(ipc_space_kernel)
+#define ipc_port_dealloc_kernel(port) \
+ ipc_port_dealloc_special((port), ipc_space_kernel)
+
+#define ipc_port_alloc_reply() \
+ ipc_port_alloc_special(ipc_space_reply)
+#define ipc_port_dealloc_reply(port) \
+ ipc_port_dealloc_special((port), ipc_space_reply)
+
+#define ipc_port_reference(port) \
+ ipc_object_reference(&(port)->ip_object)
+
+#define ipc_port_release(port) \
+ ipc_object_release(&(port)->ip_object)
+
+#if MACH_IPC_COMPAT
+
+extern kern_return_t
+ipc_port_alloc_compat(/* ipc_space_t, mach_port_t *, ipc_port_t * */);
+
+extern mach_port_t
+ipc_port_copyout_send_compat(/* ipc_port_t, ipc_space_t */);
+
+extern mach_port_t
+ipc_port_copyout_receiver(/* ipc_port_t, ipc_space_t */);
+
+#endif MACH_IPC_COMPAT
+
+extern void
+ipc_port_print(/* ipc_port_t */);
+
+#if NORMA_IPC
+
+#define IP_NORMA_IS_PROXY(port) ((port)->ip_norma_is_proxy)
+
+/*
+ * A proxy never has a real nsrequest, but is always has a fake
+ * nsrequest so that the norma ipc system is notified when there
+ * are no send rights for a proxy. A fake nsrequest is indicated by
+ * the low bit of the pointer. This works because the zone package
+ * guarantees that the two low bits of port pointers are zero.
+ */
+
+#define ip_nsproxyp(nsrequest) ((unsigned int)(nsrequest) & 1)
+#define ip_nsproxy(nsrequest) ((ipc_port_t)((unsigned int)(nsrequest) &~ 1))
+#define ip_nsproxym(proxy) ((ipc_port_t)((unsigned int)(proxy) | 1))
+
+#endif NORMA_IPC
+
+#endif _IPC_IPC_PORT_H_
diff --git a/ipc/ipc_pset.c b/ipc/ipc_pset.c
new file mode 100644
index 00000000..57705d6c
--- /dev/null
+++ b/ipc/ipc_pset.c
@@ -0,0 +1,349 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_pset.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC port sets.
+ */
+
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <mach/message.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_right.h>
+#include <ipc/ipc_space.h>
+
+
+/*
+ * Routine: ipc_pset_alloc
+ * Purpose:
+ * Allocate a port set.
+ * Conditions:
+ * Nothing locked. If successful, the port set is returned
+ * locked. (The caller doesn't have a reference.)
+ * Returns:
+ * KERN_SUCCESS The port set is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NO_SPACE No room for an entry in the space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_pset_alloc(
+ ipc_space_t space,
+ mach_port_t *namep,
+ ipc_pset_t *psetp)
+{
+ ipc_pset_t pset;
+ mach_port_t name;
+ kern_return_t kr;
+
+ kr = ipc_object_alloc(space, IOT_PORT_SET,
+ MACH_PORT_TYPE_PORT_SET, 0,
+ &name, (ipc_object_t *) &pset);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* pset is locked */
+
+ ipc_target_init(&pset->ips_target, name);
+
+ *namep = name;
+ *psetp = pset;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_pset_alloc_name
+ * Purpose:
+ * Allocate a port set, with a specific name.
+ * Conditions:
+ * Nothing locked. If successful, the port set is returned
+ * locked. (The caller doesn't have a reference.)
+ * Returns:
+ * KERN_SUCCESS The port set is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_pset_alloc_name(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_pset_t *psetp)
+{
+ ipc_pset_t pset;
+ kern_return_t kr;
+
+
+ kr = ipc_object_alloc_name(space, IOT_PORT_SET,
+ MACH_PORT_TYPE_PORT_SET, 0,
+ name, (ipc_object_t *) &pset);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* pset is locked */
+
+ ipc_target_init(&pset->ips_target, name);
+
+ *psetp = pset;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_pset_add
+ * Purpose:
+ * Puts a port into a port set.
+ * The port set gains a reference.
+ * Conditions:
+ * Both port and port set are locked and active.
+ * The port isn't already in a set.
+ * The owner of the port set is also receiver for the port.
+ */
+
+void
+ipc_pset_add(
+ ipc_pset_t pset,
+ ipc_port_t port)
+{
+ assert(ips_active(pset));
+ assert(ip_active(port));
+ assert(port->ip_pset == IPS_NULL);
+
+ port->ip_pset = pset;
+ port->ip_cur_target = &pset->ips_target;
+ ips_reference(pset);
+
+ imq_lock(&port->ip_messages);
+ imq_lock(&pset->ips_messages);
+
+ /* move messages from port's queue to the port set's queue */
+
+ ipc_mqueue_move(&pset->ips_messages, &port->ip_messages, port);
+ imq_unlock(&pset->ips_messages);
+ assert(ipc_kmsg_queue_empty(&port->ip_messages.imq_messages));
+
+ /* wake up threads waiting to receive from the port */
+
+ ipc_mqueue_changed(&port->ip_messages, MACH_RCV_PORT_CHANGED);
+ assert(ipc_thread_queue_empty(&port->ip_messages.imq_threads));
+ imq_unlock(&port->ip_messages);
+}
+
+/*
+ * Routine: ipc_pset_remove
+ * Purpose:
+ * Removes a port from a port set.
+ * The port set loses a reference.
+ * Conditions:
+ * Both port and port set are locked.
+ * The port must be active.
+ */
+
+void
+ipc_pset_remove(
+ ipc_pset_t pset,
+ ipc_port_t port)
+{
+ assert(ip_active(port));
+ assert(port->ip_pset == pset);
+
+ port->ip_pset = IPS_NULL;
+ port->ip_cur_target = &port->ip_target;
+ ips_release(pset);
+
+ imq_lock(&port->ip_messages);
+ imq_lock(&pset->ips_messages);
+
+ /* move messages from port set's queue to the port's queue */
+
+ ipc_mqueue_move(&port->ip_messages, &pset->ips_messages, port);
+
+ imq_unlock(&pset->ips_messages);
+ imq_unlock(&port->ip_messages);
+}
+
+/*
+ * Routine: ipc_pset_move
+ * Purpose:
+ * If nset is IPS_NULL, removes port
+ * from the port set it is in. Otherwise, adds
+ * port to nset, removing it from any set
+ * it might already be in.
+ * Conditions:
+ * The space is read-locked.
+ * Returns:
+ * KERN_SUCCESS Moved the port.
+ * KERN_NOT_IN_SET nset is null and port isn't in a set.
+ */
+
+kern_return_t
+ipc_pset_move(
+ ipc_space_t space,
+ ipc_port_t port,
+ ipc_pset_t nset)
+{
+ ipc_pset_t oset;
+
+ /*
+ * While we've got the space locked, it holds refs for
+ * the port and nset (because of the entries). Also,
+ * they must be alive. While we've got port locked, it
+ * holds a ref for oset, which might not be alive.
+ */
+
+ ip_lock(port);
+ assert(ip_active(port));
+
+ oset = port->ip_pset;
+
+ if (oset == nset) {
+ /* the port is already in the new set: a noop */
+
+ is_read_unlock(space);
+ } else if (oset == IPS_NULL) {
+ /* just add port to the new set */
+
+ ips_lock(nset);
+ assert(ips_active(nset));
+ is_read_unlock(space);
+
+ ipc_pset_add(nset, port);
+
+ ips_unlock(nset);
+ } else if (nset == IPS_NULL) {
+ /* just remove port from the old set */
+
+ is_read_unlock(space);
+ ips_lock(oset);
+
+ ipc_pset_remove(oset, port);
+
+ if (ips_active(oset))
+ ips_unlock(oset);
+ else {
+ ips_check_unlock(oset);
+ oset = IPS_NULL; /* trigger KERN_NOT_IN_SET */
+ }
+ } else {
+ /* atomically move port from oset to nset */
+
+ if (oset < nset) {
+ ips_lock(oset);
+ ips_lock(nset);
+ } else {
+ ips_lock(nset);
+ ips_lock(oset);
+ }
+
+ is_read_unlock(space);
+ assert(ips_active(nset));
+
+ ipc_pset_remove(oset, port);
+ ipc_pset_add(nset, port);
+
+ ips_unlock(nset);
+ ips_check_unlock(oset); /* KERN_NOT_IN_SET not a possibility */
+ }
+
+ ip_unlock(port);
+
+ return (((nset == IPS_NULL) && (oset == IPS_NULL)) ?
+ KERN_NOT_IN_SET : KERN_SUCCESS);
+}
+
+/*
+ * Routine: ipc_pset_destroy
+ * Purpose:
+ * Destroys a port_set.
+ *
+ * Doesn't remove members from the port set;
+ * that happens lazily. As members are removed,
+ * their messages are removed from the queue.
+ * Conditions:
+ * The port_set is locked and alive.
+ * The caller has a reference, which is consumed.
+ * Afterwards, the port_set is unlocked and dead.
+ */
+
+void
+ipc_pset_destroy(
+ ipc_pset_t pset)
+{
+ assert(ips_active(pset));
+
+ pset->ips_object.io_bits &= ~IO_BITS_ACTIVE;
+
+ imq_lock(&pset->ips_messages);
+ ipc_mqueue_changed(&pset->ips_messages, MACH_RCV_PORT_DIED);
+ imq_unlock(&pset->ips_messages);
+
+ /* Common destruction for the IPC target. */
+ ipc_target_terminate(&pset->ips_target);
+
+ ips_release(pset); /* consume the ref our caller gave us */
+ ips_check_unlock(pset);
+}
+
+#include <mach_kdb.h>
+
+
+#if MACH_KDB
+#define printf kdbprintf
+
+/*
+ * Routine: ipc_pset_print
+ * Purpose:
+ * Pretty-print a port set for kdb.
+ */
+
+void
+ipc_pset_print(
+ ipc_pset_t pset)
+{
+ extern int indent;
+
+ printf("pset 0x%x\n", pset);
+
+ indent += 2;
+
+ ipc_object_print(&pset->ips_object);
+ iprintf("local_name = 0x%x\n", pset->ips_local_name);
+ iprintf("kmsgs = 0x%x", pset->ips_messages.imq_messages.ikmq_base);
+ printf(",rcvrs = 0x%x\n", pset->ips_messages.imq_threads.ithq_base);
+
+ indent -=2;
+}
+
+#endif MACH_KDB
diff --git a/ipc/ipc_pset.h b/ipc/ipc_pset.h
new file mode 100644
index 00000000..23e3e25d
--- /dev/null
+++ b/ipc/ipc_pset.h
@@ -0,0 +1,95 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_pset.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for port sets.
+ */
+
+#ifndef _IPC_IPC_PSET_H_
+#define _IPC_IPC_PSET_H_
+
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_mqueue.h>
+#include "ipc_target.h"
+
+typedef struct ipc_pset {
+ struct ipc_target ips_target;
+
+} *ipc_pset_t;
+
+#define ips_object ips_target.ipt_object
+#define ips_local_name ips_target.ipt_name
+#define ips_messages ips_target.ipt_messages
+#define ips_references ips_object.io_references
+
+#define IPS_NULL ((ipc_pset_t) IO_NULL)
+
+#define ips_active(pset) io_active(&(pset)->ips_object)
+#define ips_lock(pset) io_lock(&(pset)->ips_object)
+#define ips_lock_try(pset) io_lock_try(&(pset)->ips_object)
+#define ips_unlock(pset) io_unlock(&(pset)->ips_object)
+#define ips_check_unlock(pset) io_check_unlock(&(pset)->ips_object)
+#define ips_reference(pset) io_reference(&(pset)->ips_object)
+#define ips_release(pset) io_release(&(pset)->ips_object)
+
+extern kern_return_t
+ipc_pset_alloc(/* ipc_space_t, mach_port_t *, ipc_pset_t * */);
+
+extern kern_return_t
+ipc_pset_alloc_name(/* ipc_space_t, mach_port_t, ipc_pset_t * */);
+
+extern void
+ipc_pset_add(/* ipc_pset_t, ipc_port_t */);
+
+extern void
+ipc_pset_remove(/* ipc_pset_t, ipc_port_t */);
+
+extern kern_return_t
+ipc_pset_move(/* ipc_space_t, mach_port_t, mach_port_t */);
+
+extern void
+ipc_pset_destroy(/* ipc_pset_t */);
+
+#define ipc_pset_reference(pset) \
+ ipc_object_reference(&(pset)->ips_object)
+
+#define ipc_pset_release(pset) \
+ ipc_object_release(&(pset)->ips_object)
+
+extern void
+ipc_pset_print(/* ipc_pset_t */);
+
+#endif _IPC_IPC_PSET_H_
diff --git a/ipc/ipc_right.c b/ipc/ipc_right.c
new file mode 100644
index 00000000..54cd99f5
--- /dev/null
+++ b/ipc/ipc_right.c
@@ -0,0 +1,2762 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_right.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC capabilities.
+ */
+
+#include <mach_ipc_compat.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/message.h>
+#include <kern/assert.h>
+#include <ipc/port.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_hash.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_right.h>
+#include <ipc/ipc_notify.h>
+
+
+
+/*
+ * Routine: ipc_right_lookup_write
+ * Purpose:
+ * Finds an entry in a space, given the name.
+ * Conditions:
+ * Nothing locked. If successful, the space is write-locked.
+ * Returns:
+ * KERN_SUCCESS Found an entry.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME Name doesn't exist in space.
+ */
+
+kern_return_t
+ipc_right_lookup_write(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t *entryp)
+{
+ ipc_entry_t entry;
+
+ assert(space != IS_NULL);
+
+ is_write_lock(space);
+
+ if (!space->is_active) {
+ is_write_unlock(space);
+ return KERN_INVALID_TASK;
+ }
+
+ if ((entry = ipc_entry_lookup(space, name)) == IE_NULL) {
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+ }
+
+ *entryp = entry;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_right_reverse
+ * Purpose:
+ * Translate (space, object) -> (name, entry).
+ * Only finds send/receive rights.
+ * Returns TRUE if an entry is found; if so,
+ * the object is locked and active.
+ * Conditions:
+ * The space must be locked (read or write) and active.
+ * Nothing else locked.
+ */
+
+boolean_t
+ipc_right_reverse(
+ ipc_space_t space,
+ ipc_object_t object,
+ mach_port_t *namep,
+ ipc_entry_t *entryp)
+{
+ ipc_port_t port;
+ mach_port_t name;
+ ipc_entry_t entry;
+
+ /* would switch on io_otype to handle multiple types of object */
+
+ assert(space->is_active);
+ assert(io_otype(object) == IOT_PORT);
+
+ port = (ipc_port_t) object;
+
+ ip_lock(port);
+ if (!ip_active(port)) {
+ ip_unlock(port);
+
+ return FALSE;
+ }
+
+ if (port->ip_receiver == space) {
+ name = port->ip_receiver_name;
+ assert(name != MACH_PORT_NULL);
+
+ entry = ipc_entry_lookup(space, name);
+
+ assert(entry != IE_NULL);
+ assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
+ assert(port == (ipc_port_t) entry->ie_object);
+
+ *namep = name;
+ *entryp = entry;
+ return TRUE;
+ }
+
+ if (ipc_hash_lookup(space, (ipc_object_t) port, namep, entryp)) {
+ assert((entry = *entryp) != IE_NULL);
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND);
+ assert(port == (ipc_port_t) entry->ie_object);
+
+ return TRUE;
+ }
+
+ ip_unlock(port);
+ return FALSE;
+}
+
+/*
+ * Routine: ipc_right_dnrequest
+ * Purpose:
+ * Make a dead-name request, returning the previously
+ * registered send-once right. If notify is IP_NULL,
+ * just cancels the previously registered request.
+ *
+ * This interacts with the IE_BITS_COMPAT, because they
+ * both use ie_request. If this is a compat entry, then
+ * previous always gets IP_NULL. If notify is IP_NULL,
+ * then the entry remains a compat entry. Otherwise
+ * the real dead-name request is registered and the entry
+ * is no longer a compat entry.
+ * Conditions:
+ * Nothing locked. May allocate memory.
+ * Only consumes/returns refs if successful.
+ * Returns:
+ * KERN_SUCCESS Made/canceled dead-name request.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME Name doesn't exist in space.
+ * KERN_INVALID_RIGHT Name doesn't denote port/dead rights.
+ * KERN_INVALID_ARGUMENT Name denotes dead name, but
+ * immediate is FALSE or notify is IP_NULL.
+ * KERN_UREFS_OVERFLOW Name denotes dead name, but
+ * generating immediate notif. would overflow urefs.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_right_dnrequest(
+ ipc_space_t space,
+ mach_port_t name,
+ boolean_t immediate,
+ ipc_port_t notify,
+ ipc_port_t *previousp)
+{
+ ipc_port_t previous;
+
+ for (;;) {
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ kern_return_t kr;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ bits = entry->ie_bits;
+ if (bits & MACH_PORT_TYPE_PORT_RIGHTS) {
+ ipc_port_t port;
+ ipc_port_request_index_t request;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (!ipc_right_check(space, port, name, entry)) {
+ /* port is locked and active */
+
+ if (notify == IP_NULL) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT) {
+ assert(entry->ie_request != 0);
+
+ previous = IP_NULL;
+ } else
+#endif MACH_IPC_COMPAT
+ previous = ipc_right_dncancel_macro(
+ space, port, name, entry);
+
+ ip_unlock(port);
+ is_write_unlock(space);
+ break;
+ }
+
+ /*
+ * If a registered soright exists,
+ * want to atomically switch with it.
+ * If ipc_port_dncancel finds us a
+ * soright, then the following
+ * ipc_port_dnrequest will reuse
+ * that slot, so we are guaranteed
+ * not to unlock and retry.
+ */
+
+ previous = ipc_right_dncancel_macro(space,
+ port, name, entry);
+
+ kr = ipc_port_dnrequest(port, name, notify,
+ &request);
+ if (kr != KERN_SUCCESS) {
+ assert(previous == IP_NULL);
+ is_write_unlock(space);
+
+ kr = ipc_port_dngrow(port);
+ /* port is unlocked */
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ continue;
+ }
+
+ assert(request != 0);
+ ip_unlock(port);
+
+ entry->ie_request = request;
+#if MACH_IPC_COMPAT
+ entry->ie_bits = bits &~ IE_BITS_COMPAT;
+#endif MACH_IPC_COMPAT
+ is_write_unlock(space);
+ break;
+ }
+
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT) {
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+ }
+#endif MACH_IPC_COMPAT
+
+ bits = entry->ie_bits;
+ assert(bits & MACH_PORT_TYPE_DEAD_NAME);
+ }
+
+ if ((bits & MACH_PORT_TYPE_DEAD_NAME) &&
+ immediate && (notify != IP_NULL)) {
+ mach_port_urefs_t urefs = IE_BITS_UREFS(bits);
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ assert(urefs > 0);
+
+ if (MACH_PORT_UREFS_OVERFLOW(urefs, 1)) {
+ is_write_unlock(space);
+ return KERN_UREFS_OVERFLOW;
+ }
+
+ entry->ie_bits = bits + 1; /* increment urefs */
+ is_write_unlock(space);
+
+ ipc_notify_dead_name(notify, name);
+ previous = IP_NULL;
+ break;
+ }
+
+ is_write_unlock(space);
+ if (bits & MACH_PORT_TYPE_PORT_OR_DEAD)
+ return KERN_INVALID_ARGUMENT;
+ else
+ return KERN_INVALID_RIGHT;
+ }
+
+ *previousp = previous;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_right_dncancel
+ * Purpose:
+ * Cancel a dead-name request and return the send-once right.
+ * Afterwards, entry->ie_request == 0.
+ * Conditions:
+ * The space must be write-locked; the port must be locked.
+ * The port must be active; the space doesn't have to be.
+ */
+
+ipc_port_t
+ipc_right_dncancel(
+ ipc_space_t space,
+ ipc_port_t port,
+ mach_port_t name,
+ ipc_entry_t entry)
+{
+ ipc_port_t dnrequest;
+
+ assert(ip_active(port));
+ assert(port == (ipc_port_t) entry->ie_object);
+
+ dnrequest = ipc_port_dncancel(port, name, entry->ie_request);
+ entry->ie_request = 0;
+
+#if MACH_IPC_COMPAT
+ assert(!ipr_spacep(dnrequest) == !(entry->ie_bits & IE_BITS_COMPAT));
+
+ /* if this is actually a space ptr, just release the ref */
+
+ if (entry->ie_bits & IE_BITS_COMPAT) {
+ assert(space == ipr_space(dnrequest));
+
+ is_release(space);
+ dnrequest = IP_NULL;
+ }
+#endif MACH_IPC_COMPAT
+
+ return dnrequest;
+}
+
+/*
+ * Routine: ipc_right_inuse
+ * Purpose:
+ * Check if an entry is being used.
+ * Returns TRUE if it is.
+ * Conditions:
+ * The space is write-locked and active.
+ * It is unlocked if the entry is inuse.
+ */
+
+boolean_t
+ipc_right_inuse(space, name, entry)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_entry_t entry;
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ if (IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE) {
+#if MACH_IPC_COMPAT
+ mach_port_type_t type = IE_BITS_TYPE(bits);
+
+ /*
+ * There is yet hope. If the port has died, we
+ * must clean up the entry so it's as good as new.
+ */
+
+ if ((bits & IE_BITS_COMPAT) &&
+ ((type == MACH_PORT_TYPE_SEND) ||
+ (type == MACH_PORT_TYPE_SEND_ONCE))) {
+ ipc_port_t port;
+ boolean_t active;
+
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(entry->ie_request != 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ active = ip_active(port);
+ ip_unlock(port);
+
+ if (!active) {
+ if (type == MACH_PORT_TYPE_SEND) {
+ /* clean up msg-accepted request */
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(
+ space, name);
+
+ ipc_hash_delete(
+ space, (ipc_object_t) port,
+ name, entry);
+ } else {
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert(!(bits & IE_BITS_MAREQUEST));
+ }
+
+ ipc_port_release(port);
+
+ entry->ie_request = 0;
+ entry->ie_object = IO_NULL;
+ entry->ie_bits &= ~IE_BITS_RIGHT_MASK;
+
+ return FALSE;
+ }
+ }
+#endif MACH_IPC_COMPAT
+
+ is_write_unlock(space);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/*
+ * Routine: ipc_right_check
+ * Purpose:
+ * Check if the port has died. If it has,
+ * clean up the entry and return TRUE.
+ * Conditions:
+ * The space is write-locked; the port is not locked.
+ * If returns FALSE, the port is also locked and active.
+ * Otherwise, entry is converted to a dead name, freeing
+ * a reference to port.
+ *
+ * [MACH_IPC_COMPAT] If the port is dead, and this is a
+ * compat mode entry, then the port reference is released
+ * and the entry is destroyed. The call returns TRUE,
+ * and the space is left locked.
+ */
+
+boolean_t
+ipc_right_check(space, port, name, entry)
+ ipc_space_t space;
+ ipc_port_t port;
+ mach_port_t name;
+ ipc_entry_t entry;
+{
+ ipc_entry_bits_t bits;
+
+ assert(space->is_active);
+ assert(port == (ipc_port_t) entry->ie_object);
+
+ ip_lock(port);
+ if (ip_active(port))
+ return FALSE;
+ ip_unlock(port);
+
+ /* this was either a pure send right or a send-once right */
+
+ bits = entry->ie_bits;
+ assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ if (bits & MACH_PORT_TYPE_SEND) {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
+
+ /* clean up msg-accepted request */
+
+ if (bits & IE_BITS_MAREQUEST) {
+ bits &= ~IE_BITS_MAREQUEST;
+
+ ipc_marequest_cancel(space, name);
+ }
+
+ ipc_hash_delete(space, (ipc_object_t) port, name, entry);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ }
+
+ ipc_port_release(port);
+
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT) {
+ assert(entry->ie_request != 0);
+ entry->ie_request = 0;
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+
+ return TRUE;
+ }
+#endif MACH_IPC_COMPAT
+
+ /* convert entry to dead name */
+
+ bits = (bits &~ IE_BITS_TYPE_MASK) | MACH_PORT_TYPE_DEAD_NAME;
+
+ if (entry->ie_request != 0) {
+ assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX);
+
+ entry->ie_request = 0;
+ bits++; /* increment urefs */
+ }
+
+ entry->ie_bits = bits;
+ entry->ie_object = IO_NULL;
+
+ return TRUE;
+}
+
+/*
+ * Routine: ipc_right_clean
+ * Purpose:
+ * Cleans up an entry in a dead space.
+ * The entry isn't deallocated or removed
+ * from reverse hash tables.
+ * Conditions:
+ * The space is dead and unlocked.
+ */
+
+void
+ipc_right_clean(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ mach_port_type_t type = IE_BITS_TYPE(bits);
+
+ assert(!space->is_active);
+
+ /*
+ * We can't clean up IE_BITS_MAREQUEST when the space is dead.
+ * This is because ipc_marequest_destroy can't turn off
+ * the bit if the space is dead. Hence, it might be on
+ * even though the marequest has been destroyed. It's OK
+ * not to cancel the marequest, because ipc_marequest_destroy
+ * cancels for us if the space is dead.
+ *
+ * IE_BITS_COMPAT/ipc_right_dncancel doesn't have this
+ * problem, because we check that the port is active. If
+ * we didn't cancel IE_BITS_COMPAT, ipc_port_destroy
+ * would still work, but dead space refs would accumulate
+ * in ip_dnrequests. They would use up slots in
+ * ip_dnrequests and keep the spaces from being freed.
+ */
+
+ switch (type) {
+ case MACH_PORT_TYPE_DEAD_NAME:
+ assert(entry->ie_request == 0);
+ assert(entry->ie_object == IO_NULL);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ break;
+
+ case MACH_PORT_TYPE_PORT_SET: {
+ ipc_pset_t pset = (ipc_pset_t) entry->ie_object;
+
+ assert(entry->ie_request == 0);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(pset != IPS_NULL);
+
+ ips_lock(pset);
+ assert(ips_active(pset));
+
+ ipc_pset_destroy(pset); /* consumes ref, unlocks */
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND:
+ case MACH_PORT_TYPE_RECEIVE:
+ case MACH_PORT_TYPE_SEND_RECEIVE:
+ case MACH_PORT_TYPE_SEND_ONCE: {
+ ipc_port_t port = (ipc_port_t) entry->ie_object;
+ ipc_port_t dnrequest;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+
+ assert(port != IP_NULL);
+ ip_lock(port);
+
+ if (!ip_active(port)) {
+ ip_release(port);
+ ip_check_unlock(port);
+ break;
+ }
+
+ dnrequest = ipc_right_dncancel_macro(space, port, name, entry);
+
+ if (type & MACH_PORT_TYPE_SEND) {
+ assert(port->ip_srights > 0);
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+ }
+
+ if (type & MACH_PORT_TYPE_RECEIVE) {
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ ipc_port_clear_receiver(port);
+ ipc_port_destroy(port); /* consumes our ref, unlocks */
+ } else if (type & MACH_PORT_TYPE_SEND_ONCE) {
+ assert(port->ip_sorights > 0);
+ ip_unlock(port);
+
+ ipc_notify_send_once(port); /* consumes our ref */
+ } else {
+ assert(port->ip_receiver != space);
+
+ ip_release(port);
+ ip_unlock(port); /* port is active */
+ }
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_clean: strange type");
+#else
+ panic("ipc_right_clean: strange type");
+#endif
+ }
+}
+
+/*
+ * Routine: ipc_right_destroy
+ * Purpose:
+ * Destroys an entry in a space.
+ * Conditions:
+ * The space is write-locked, and is unlocked upon return.
+ * The space must be active.
+ * Returns:
+ * KERN_SUCCESS The entry was destroyed.
+ */
+
+kern_return_t
+ipc_right_destroy(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ mach_port_type_t type = IE_BITS_TYPE(bits);
+
+ assert(space->is_active);
+
+ switch (type) {
+ case MACH_PORT_TYPE_DEAD_NAME:
+ assert(entry->ie_request == 0);
+ assert(entry->ie_object == IO_NULL);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+ break;
+
+ case MACH_PORT_TYPE_PORT_SET: {
+ ipc_pset_t pset = (ipc_pset_t) entry->ie_object;
+
+ assert(entry->ie_request == 0);
+ assert(pset != IPS_NULL);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+
+ ips_lock(pset);
+ assert(ips_active(pset));
+ is_write_unlock(space);
+
+ ipc_pset_destroy(pset); /* consumes ref, unlocks */
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND:
+ case MACH_PORT_TYPE_RECEIVE:
+ case MACH_PORT_TYPE_SEND_RECEIVE:
+ case MACH_PORT_TYPE_SEND_ONCE: {
+ ipc_port_t port = (ipc_port_t) entry->ie_object;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+ ipc_port_t dnrequest;
+
+ assert(port != IP_NULL);
+
+ if (bits & IE_BITS_MAREQUEST) {
+ assert(type & MACH_PORT_TYPE_SEND_RECEIVE);
+
+ ipc_marequest_cancel(space, name);
+ }
+
+ if (type == MACH_PORT_TYPE_SEND)
+ ipc_hash_delete(space, (ipc_object_t) port,
+ name, entry);
+
+ ip_lock(port);
+
+ if (!ip_active(port)) {
+ assert((type & MACH_PORT_TYPE_RECEIVE) == 0);
+
+ ip_release(port);
+ ip_check_unlock(port);
+
+ entry->ie_request = 0;
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ return KERN_INVALID_NAME;
+#endif MACH_IPC_COMPAT
+ break;
+ }
+
+ dnrequest = ipc_right_dncancel_macro(space, port, name, entry);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ if (type & MACH_PORT_TYPE_SEND) {
+ assert(port->ip_srights > 0);
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+ }
+
+ if (type & MACH_PORT_TYPE_RECEIVE) {
+ assert(ip_active(port));
+ assert(port->ip_receiver == space);
+
+ ipc_port_clear_receiver(port);
+ ipc_port_destroy(port); /* consumes our ref, unlocks */
+ } else if (type & MACH_PORT_TYPE_SEND_ONCE) {
+ assert(port->ip_sorights > 0);
+ ip_unlock(port);
+
+ ipc_notify_send_once(port); /* consumes our ref */
+ } else {
+ assert(port->ip_receiver != space);
+
+ ip_release(port);
+ ip_unlock(port);
+ }
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_destroy: strange type");
+#else
+ panic("ipc_right_destroy: strange type");
+#endif
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_right_dealloc
+ * Purpose:
+ * Releases a send/send-once/dead-name user ref.
+ * Like ipc_right_delta with a delta of -1,
+ * but looks at the entry to determine the right.
+ * Conditions:
+ * The space is write-locked, and is unlocked upon return.
+ * The space must be active.
+ * Returns:
+ * KERN_SUCCESS A user ref was released.
+ * KERN_INVALID_RIGHT Entry has wrong type.
+ * KERN_INVALID_NAME [MACH_IPC_COMPAT]
+ * Caller should pretend lookup of entry failed.
+ */
+
+kern_return_t
+ipc_right_dealloc(space, name, entry)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_entry_t entry;
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ mach_port_type_t type = IE_BITS_TYPE(bits);
+
+ assert(space->is_active);
+
+ switch (type) {
+ case MACH_PORT_TYPE_DEAD_NAME: {
+ dead_name:
+
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(entry->ie_request == 0);
+ assert(entry->ie_object == IO_NULL);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+
+ if (IE_BITS_UREFS(bits) == 1)
+ ipc_entry_dealloc(space, name, entry);
+ else
+ entry->ie_bits = bits-1; /* decrement urefs */
+
+ is_write_unlock(space);
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND_ONCE: {
+ ipc_port_t port, dnrequest;
+
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ bits = entry->ie_bits;
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ goto dead_name;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_sorights > 0);
+
+ dnrequest = ipc_right_dncancel_macro(space, port, name, entry);
+ ip_unlock(port);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ ipc_notify_send_once(port);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND: {
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ bits = entry->ie_bits;
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ goto dead_name;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_srights > 0);
+
+ if (IE_BITS_UREFS(bits) == 1) {
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+ ipc_hash_delete(space, (ipc_object_t) port,
+ name, entry);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ ip_release(port);
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ } else
+ entry->ie_bits = bits-1; /* decrement urefs */
+
+ ip_unlock(port); /* even if dropped a ref, port is active */
+ is_write_unlock(space);
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND_RECEIVE: {
+ ipc_port_t port;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+ assert(port->ip_srights > 0);
+
+ if (IE_BITS_UREFS(bits) == 1) {
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+
+ entry->ie_bits = bits &~ (IE_BITS_UREFS_MASK|
+ MACH_PORT_TYPE_SEND);
+ } else
+ entry->ie_bits = bits-1; /* decrement urefs */
+
+ ip_unlock(port);
+ is_write_unlock(space);
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+ break;
+ }
+
+ default:
+ is_write_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ return KERN_SUCCESS;
+
+#if MACH_IPC_COMPAT
+ invalid_name:
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+#endif MACH_IPC_COMPAT
+}
+
+/*
+ * Routine: ipc_right_delta
+ * Purpose:
+ * Modifies the user-reference count for a right.
+ * May deallocate the right, if the count goes to zero.
+ * Conditions:
+ * The space is write-locked, and is unlocked upon return.
+ * The space must be active.
+ * Returns:
+ * KERN_SUCCESS Count was modified.
+ * KERN_INVALID_RIGHT Entry has wrong type.
+ * KERN_INVALID_VALUE Bad delta for the right.
+ * KERN_UREFS_OVERFLOW OK delta, except would overflow.
+ * KERN_INVALID_NAME [MACH_IPC_COMPAT]
+ * Caller should pretend lookup of entry failed.
+ */
+
+kern_return_t
+ipc_right_delta(space, name, entry, right, delta)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_entry_t entry;
+ mach_port_right_t right;
+ mach_port_delta_t delta;
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ assert(space->is_active);
+ assert(right < MACH_PORT_RIGHT_NUMBER);
+
+ /* Rights-specific restrictions and operations. */
+
+ switch (right) {
+ case MACH_PORT_RIGHT_PORT_SET: {
+ ipc_pset_t pset;
+
+ if ((bits & MACH_PORT_TYPE_PORT_SET) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_PORT_SET);
+ assert(IE_BITS_UREFS(bits) == 0);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(entry->ie_request == 0);
+
+ if (delta == 0)
+ goto success;
+
+ if (delta != -1)
+ goto invalid_value;
+
+ pset = (ipc_pset_t) entry->ie_object;
+ assert(pset != IPS_NULL);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+
+ ips_lock(pset);
+ assert(ips_active(pset));
+ is_write_unlock(space);
+
+ ipc_pset_destroy(pset); /* consumes ref, unlocks */
+ break;
+ }
+
+ case MACH_PORT_RIGHT_RECEIVE: {
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ goto invalid_right;
+
+ if (delta == 0)
+ goto success;
+
+ if (delta != -1)
+ goto invalid_value;
+
+ if (bits & IE_BITS_MAREQUEST) {
+ bits &= ~IE_BITS_MAREQUEST;
+
+ ipc_marequest_cancel(space, name);
+ }
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ /*
+ * The port lock is needed for ipc_right_dncancel;
+ * otherwise, we wouldn't have to take the lock
+ * until just before dropping the space lock.
+ */
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT) {
+ assert(entry->ie_request != 0);
+ dnrequest = ipc_right_dncancel(space, port,
+ name, entry);
+ assert(dnrequest == IP_NULL);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ } else
+#endif MACH_IPC_COMPAT
+ if (bits & MACH_PORT_TYPE_SEND) {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND_RECEIVE);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX);
+ assert(port->ip_srights > 0);
+
+ /*
+ * The remaining send right turns into a
+ * dead name. Notice we don't decrement
+ * ip_srights, generate a no-senders notif,
+ * or use ipc_right_dncancel, because the
+ * port is destroyed "first".
+ */
+
+ bits &= ~IE_BITS_TYPE_MASK;
+ bits |= MACH_PORT_TYPE_DEAD_NAME;
+
+ if (entry->ie_request != 0) {
+ entry->ie_request = 0;
+ bits++; /* increment urefs */
+ }
+
+ entry->ie_bits = bits;
+ entry->ie_object = IO_NULL;
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ }
+ is_write_unlock(space);
+
+ ipc_port_clear_receiver(port);
+ ipc_port_destroy(port); /* consumes ref, unlocks */
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_SEND_ONCE: {
+ ipc_port_t port, dnrequest;
+
+ if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+
+ if ((delta > 0) || (delta < -1))
+ goto invalid_value;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ assert(!(entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE));
+ goto invalid_right;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_sorights > 0);
+
+ if (delta == 0) {
+ ip_unlock(port);
+ goto success;
+ }
+
+ dnrequest = ipc_right_dncancel_macro(space, port, name, entry);
+ ip_unlock(port);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ ipc_notify_send_once(port);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_DEAD_NAME: {
+ mach_port_urefs_t urefs;
+
+ if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
+ ipc_port_t port;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (!ipc_right_check(space, port, name, entry)) {
+ /* port is locked and active */
+ ip_unlock(port);
+ goto invalid_right;
+ }
+
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ bits = entry->ie_bits;
+ } else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(entry->ie_object == IO_NULL);
+ assert(entry->ie_request == 0);
+
+ urefs = IE_BITS_UREFS(bits);
+ if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta))
+ goto invalid_value;
+ if (MACH_PORT_UREFS_OVERFLOW(urefs, delta))
+ goto urefs_overflow;
+
+ if ((urefs + delta) == 0)
+ ipc_entry_dealloc(space, name, entry);
+ else
+ entry->ie_bits = bits + delta;
+
+ is_write_unlock(space);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_SEND: {
+ mach_port_urefs_t urefs;
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+
+ if ((bits & MACH_PORT_TYPE_SEND) == 0)
+ goto invalid_right;
+
+ /* maximum urefs for send is MACH_PORT_UREFS_MAX-1 */
+
+ urefs = IE_BITS_UREFS(bits);
+ if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta))
+ goto invalid_value;
+ if (MACH_PORT_UREFS_OVERFLOW(urefs+1, delta))
+ goto urefs_overflow;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ assert((entry->ie_bits & MACH_PORT_TYPE_SEND) == 0);
+ goto invalid_right;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_srights > 0);
+
+ if ((urefs + delta) == 0) {
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND_RECEIVE);
+
+ entry->ie_bits = bits &~ (IE_BITS_UREFS_MASK|
+ MACH_PORT_TYPE_SEND);
+ } else {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND);
+
+ dnrequest = ipc_right_dncancel_macro(
+ space, port, name, entry);
+
+ ipc_hash_delete(space, (ipc_object_t) port,
+ name, entry);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ ip_release(port);
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ }
+ } else
+ entry->ie_bits = bits + delta;
+
+ ip_unlock(port); /* even if dropped a ref, port is active */
+ is_write_unlock(space);
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_delta: strange right");
+#else
+ panic("ipc_right_delta: strange right");
+#endif
+ }
+
+ return KERN_SUCCESS;
+
+ success:
+ is_write_unlock(space);
+ return KERN_SUCCESS;
+
+ invalid_right:
+ is_write_unlock(space);
+ return KERN_INVALID_RIGHT;
+
+ invalid_value:
+ is_write_unlock(space);
+ return KERN_INVALID_VALUE;
+
+ urefs_overflow:
+ is_write_unlock(space);
+ return KERN_UREFS_OVERFLOW;
+
+#if MACH_IPC_COMPAT
+ invalid_name:
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+#endif MACH_IPC_COMPAT
+}
+
+/*
+ * Routine: ipc_right_info
+ * Purpose:
+ * Retrieves information about the right.
+ * Conditions:
+ * The space is write-locked, and is unlocked upon return
+ * if the call is unsuccessful. The space must be active.
+ * Returns:
+ * KERN_SUCCESS Retrieved info; space still locked.
+ */
+
+kern_return_t
+ipc_right_info(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry,
+ mach_port_type_t *typep,
+ mach_port_urefs_t *urefsp)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ ipc_port_request_index_t request;
+ mach_port_type_t type;
+
+ if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
+ ipc_port_t port = (ipc_port_t) entry->ie_object;
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT) {
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+ }
+#endif MACH_IPC_COMPAT
+
+ bits = entry->ie_bits;
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ } else
+ ip_unlock(port);
+ }
+
+ type = IE_BITS_TYPE(bits);
+ request = entry->ie_request;
+
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ type |= MACH_PORT_TYPE_COMPAT;
+ else
+#endif MACH_IPC_COMPAT
+ if (request != 0)
+ type |= MACH_PORT_TYPE_DNREQUEST;
+ if (bits & IE_BITS_MAREQUEST)
+ type |= MACH_PORT_TYPE_MAREQUEST;
+
+ *typep = type;
+ *urefsp = IE_BITS_UREFS(bits);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_right_copyin_check
+ * Purpose:
+ * Check if a subsequent ipc_right_copyin would succeed.
+ * Conditions:
+ * The space is locked (read or write) and active.
+ */
+
+boolean_t
+ipc_right_copyin_check(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry,
+ mach_msg_type_name_t msgt_name)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ assert(space->is_active);
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_MAKE_SEND:
+ case MACH_MSG_TYPE_MAKE_SEND_ONCE:
+ case MACH_MSG_TYPE_MOVE_RECEIVE:
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ return FALSE;
+
+ break;
+
+ case MACH_MSG_TYPE_COPY_SEND:
+ case MACH_MSG_TYPE_MOVE_SEND:
+ case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
+ ipc_port_t port;
+ boolean_t active;
+
+ if (bits & MACH_PORT_TYPE_DEAD_NAME)
+ break;
+
+ if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0)
+ return FALSE;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ active = ip_active(port);
+ ip_unlock(port);
+
+ if (!active) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ return FALSE;
+#endif MACH_IPC_COMPAT
+
+ break;
+ }
+
+ if (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
+ if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0)
+ return FALSE;
+ } else {
+ if ((bits & MACH_PORT_TYPE_SEND) == 0)
+ return FALSE;
+ }
+
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_copyin_check: strange rights");
+#else
+ panic("ipc_right_copyin_check: strange rights");
+#endif
+ }
+
+ return TRUE;
+}
+
+/*
+ * Routine: ipc_right_copyin
+ * Purpose:
+ * Copyin a capability from a space.
+ * If successful, the caller gets a ref
+ * for the resulting object, unless it is IO_DEAD,
+ * and possibly a send-once right which should
+ * be used in a port-deleted notification.
+ *
+ * If deadok is not TRUE, the copyin operation
+ * will fail instead of producing IO_DEAD.
+ *
+ * The entry is never deallocated (except
+ * when KERN_INVALID_NAME), so the caller
+ * should deallocate the entry if its type
+ * is MACH_PORT_TYPE_NONE.
+ * Conditions:
+ * The space is write-locked and active.
+ * Returns:
+ * KERN_SUCCESS Acquired an object, possibly IO_DEAD.
+ * KERN_INVALID_RIGHT Name doesn't denote correct right.
+ */
+
+kern_return_t
+ipc_right_copyin(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry,
+ mach_msg_type_name_t msgt_name,
+ boolean_t deadok,
+ ipc_object_t *objectp,
+ ipc_port_t *sorightp)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ assert(space->is_active);
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_MAKE_SEND: {
+ ipc_port_t port;
+
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ port->ip_mscount++;
+ port->ip_srights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = IP_NULL;
+ break;
+ }
+
+ case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
+ ipc_port_t port;
+
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ port->ip_sorights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = IP_NULL;
+ break;
+ }
+
+ case MACH_MSG_TYPE_MOVE_RECEIVE: {
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ if (bits & MACH_PORT_TYPE_SEND) {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND_RECEIVE);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(port->ip_srights > 0);
+
+ ipc_hash_insert(space, (ipc_object_t) port,
+ name, entry);
+
+ ip_reference(port);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ entry->ie_object = IO_NULL;
+ }
+ entry->ie_bits = bits &~ MACH_PORT_TYPE_RECEIVE;
+
+ ipc_port_clear_receiver(port);
+
+ port->ip_receiver_name = MACH_PORT_NULL;
+ port->ip_destination = IP_NULL;
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = dnrequest;
+ break;
+ }
+
+ case MACH_MSG_TYPE_COPY_SEND: {
+ ipc_port_t port;
+
+ if (bits & MACH_PORT_TYPE_DEAD_NAME)
+ goto copy_dead;
+
+ /* allow for dead send-once rights */
+
+ if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ bits = entry->ie_bits;
+ goto copy_dead;
+ }
+ /* port is locked and active */
+
+ if ((bits & MACH_PORT_TYPE_SEND) == 0) {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(port->ip_sorights > 0);
+
+ ip_unlock(port);
+ goto invalid_right;
+ }
+
+ assert(port->ip_srights > 0);
+
+ port->ip_srights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = IP_NULL;
+ break;
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND: {
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+
+ if (bits & MACH_PORT_TYPE_DEAD_NAME)
+ goto move_dead;
+
+ /* allow for dead send-once rights */
+
+ if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ bits = entry->ie_bits;
+ goto move_dead;
+ }
+ /* port is locked and active */
+
+ if ((bits & MACH_PORT_TYPE_SEND) == 0) {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(port->ip_sorights > 0);
+
+ ip_unlock(port);
+ goto invalid_right;
+ }
+
+ assert(port->ip_srights > 0);
+
+ if (IE_BITS_UREFS(bits) == 1) {
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND_RECEIVE);
+
+ ip_reference(port);
+ } else {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND);
+
+ dnrequest = ipc_right_dncancel_macro(
+ space, port, name, entry);
+
+ ipc_hash_delete(space, (ipc_object_t) port,
+ name, entry);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ entry->ie_object = IO_NULL;
+ }
+ entry->ie_bits = bits &~
+ (IE_BITS_UREFS_MASK|MACH_PORT_TYPE_SEND);
+ } else {
+ port->ip_srights++;
+ ip_reference(port);
+ entry->ie_bits = bits-1; /* decrement urefs */
+ }
+
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = dnrequest;
+ break;
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
+ ipc_port_t port;
+ ipc_port_t dnrequest;
+
+ if (bits & MACH_PORT_TYPE_DEAD_NAME)
+ goto move_dead;
+
+ /* allow for dead send rights */
+
+ if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ bits = entry->ie_bits;
+ goto move_dead;
+ }
+ /* port is locked and active */
+
+ if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
+ assert(bits & MACH_PORT_TYPE_SEND);
+ assert(port->ip_srights > 0);
+
+ ip_unlock(port);
+ goto invalid_right;
+ }
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(port->ip_sorights > 0);
+
+ dnrequest = ipc_right_dncancel_macro(space, port, name, entry);
+ ip_unlock(port);
+
+ entry->ie_object = IO_NULL;
+ entry->ie_bits = bits &~ MACH_PORT_TYPE_SEND_ONCE;
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = dnrequest;
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_copyin: strange rights");
+#else
+ panic("ipc_right_copyin: strange rights");
+#endif
+ }
+
+ return KERN_SUCCESS;
+
+ copy_dead:
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(entry->ie_request == 0);
+ assert(entry->ie_object == 0);
+
+ if (!deadok)
+ goto invalid_right;
+
+ *objectp = IO_DEAD;
+ *sorightp = IP_NULL;
+ return KERN_SUCCESS;
+
+ move_dead:
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(entry->ie_request == 0);
+ assert(entry->ie_object == 0);
+
+ if (!deadok)
+ goto invalid_right;
+
+ if (IE_BITS_UREFS(bits) == 1)
+ entry->ie_bits = bits &~ MACH_PORT_TYPE_DEAD_NAME;
+ else
+ entry->ie_bits = bits-1; /* decrement urefs */
+
+ *objectp = IO_DEAD;
+ *sorightp = IP_NULL;
+ return KERN_SUCCESS;
+
+ invalid_right:
+ return KERN_INVALID_RIGHT;
+
+#if MACH_IPC_COMPAT
+ invalid_name:
+ return KERN_INVALID_NAME;
+#endif MACH_IPC_COMPAT
+}
+
+/*
+ * Routine: ipc_right_copyin_undo
+ * Purpose:
+ * Undoes the effects of an ipc_right_copyin
+ * of a send/send-once right that is dead.
+ * (Object is either IO_DEAD or a dead port.)
+ * Conditions:
+ * The space is write-locked and active.
+ */
+
+void
+ipc_right_copyin_undo(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry,
+ mach_msg_type_name_t msgt_name,
+ ipc_object_t object,
+ ipc_port_t soright)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ assert(space->is_active);
+
+ assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) ||
+ (msgt_name == MACH_MSG_TYPE_COPY_SEND) ||
+ (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE));
+
+ if (soright != IP_NULL) {
+ assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) ||
+ (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE));
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+ assert(object != IO_DEAD);
+
+ entry->ie_bits = ((bits &~ IE_BITS_RIGHT_MASK) |
+ MACH_PORT_TYPE_DEAD_NAME | 2);
+ } else if (IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE) {
+ assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) ||
+ (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE));
+ assert(entry->ie_object == IO_NULL);
+
+ entry->ie_bits = ((bits &~ IE_BITS_RIGHT_MASK) |
+ MACH_PORT_TYPE_DEAD_NAME | 1);
+ } else if (IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME) {
+ assert(entry->ie_object == IO_NULL);
+ assert(object == IO_DEAD);
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ if (msgt_name != MACH_MSG_TYPE_COPY_SEND) {
+ assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX);
+
+ entry->ie_bits = bits+1; /* increment urefs */
+ }
+ } else {
+ assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) ||
+ (msgt_name == MACH_MSG_TYPE_COPY_SEND));
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
+ assert(object != IO_DEAD);
+ assert(entry->ie_object == object);
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ if (msgt_name != MACH_MSG_TYPE_COPY_SEND) {
+ assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX-1);
+
+ entry->ie_bits = bits+1; /* increment urefs */
+ }
+
+ /*
+ * May as well convert the entry to a dead name.
+ * (Or if it is a compat entry, destroy it.)
+ */
+
+ (void) ipc_right_check(space, (ipc_port_t) object,
+ name, entry);
+ /* object is dead so it is not locked */
+ }
+
+ /* release the reference acquired by copyin */
+
+ if (object != IO_DEAD)
+ ipc_object_release(object);
+}
+
+/*
+ * Routine: ipc_right_copyin_two
+ * Purpose:
+ * Like ipc_right_copyin with MACH_MSG_TYPE_MOVE_SEND
+ * and deadok == FALSE, except that this moves two
+ * send rights at once.
+ * Conditions:
+ * The space is write-locked and active.
+ * The object is returned with two refs/send rights.
+ * Returns:
+ * KERN_SUCCESS Acquired an object.
+ * KERN_INVALID_RIGHT Name doesn't denote correct right.
+ */
+
+kern_return_t
+ipc_right_copyin_two(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry,
+ ipc_object_t *objectp,
+ ipc_port_t *sorightp)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ mach_port_urefs_t urefs;
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+
+ assert(space->is_active);
+
+ if ((bits & MACH_PORT_TYPE_SEND) == 0)
+ goto invalid_right;
+
+ urefs = IE_BITS_UREFS(bits);
+ if (urefs < 2)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ goto invalid_right;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_srights > 0);
+
+ if (urefs == 2) {
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND_RECEIVE);
+
+ port->ip_srights++;
+ ip_reference(port);
+ ip_reference(port);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
+
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+ ipc_hash_delete(space, (ipc_object_t) port,
+ name, entry);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ port->ip_srights++;
+ ip_reference(port);
+ entry->ie_object = IO_NULL;
+ }
+ entry->ie_bits = bits &~
+ (IE_BITS_UREFS_MASK|MACH_PORT_TYPE_SEND);
+ } else {
+ port->ip_srights += 2;
+ ip_reference(port);
+ ip_reference(port);
+ entry->ie_bits = bits-2; /* decrement urefs */
+ }
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = dnrequest;
+ return KERN_SUCCESS;
+
+ invalid_right:
+ return KERN_INVALID_RIGHT;
+
+#if MACH_IPC_COMPAT
+ invalid_name:
+ return KERN_INVALID_NAME;
+#endif MACH_IPC_COMPAT
+}
+
+/*
+ * Routine: ipc_right_copyout
+ * Purpose:
+ * Copyout a capability to a space.
+ * If successful, consumes a ref for the object.
+ *
+ * Always succeeds when given a newly-allocated entry,
+ * because user-reference overflow isn't a possibility.
+ *
+ * If copying out the object would cause the user-reference
+ * count in the entry to overflow, and overflow is TRUE,
+ * then instead the user-reference count is left pegged
+ * to its maximum value and the copyout succeeds anyway.
+ * Conditions:
+ * The space is write-locked and active.
+ * The object is locked and active.
+ * The object is unlocked; the space isn't.
+ * Returns:
+ * KERN_SUCCESS Copied out capability.
+ * KERN_UREFS_OVERFLOW User-refs would overflow;
+ * guaranteed not to happen with a fresh entry
+ * or if overflow=TRUE was specified.
+ */
+
+kern_return_t
+ipc_right_copyout(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry,
+ mach_msg_type_name_t msgt_name,
+ boolean_t overflow,
+ ipc_object_t object)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ ipc_port_t port;
+
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+ assert(io_active(object));
+ assert(entry->ie_object == object);
+
+ port = (ipc_port_t) object;
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_PORT_SEND_ONCE:
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
+ assert(port->ip_sorights > 0);
+
+ /* transfer send-once right and ref to entry */
+ ip_unlock(port);
+
+ entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1);
+ break;
+
+ case MACH_MSG_TYPE_PORT_SEND:
+ assert(port->ip_srights > 0);
+
+ if (bits & MACH_PORT_TYPE_SEND) {
+ mach_port_urefs_t urefs = IE_BITS_UREFS(bits);
+
+ assert(port->ip_srights > 1);
+ assert(urefs > 0);
+ assert(urefs < MACH_PORT_UREFS_MAX);
+
+ if (urefs+1 == MACH_PORT_UREFS_MAX) {
+ if (overflow) {
+ /* leave urefs pegged to maximum */
+
+ port->ip_srights--;
+ ip_release(port);
+ ip_unlock(port);
+ return KERN_SUCCESS;
+ }
+
+ ip_unlock(port);
+ return KERN_UREFS_OVERFLOW;
+ }
+
+ port->ip_srights--;
+ ip_release(port);
+ ip_unlock(port);
+ } else if (bits & MACH_PORT_TYPE_RECEIVE) {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ /* transfer send right to entry */
+ ip_release(port);
+ ip_unlock(port);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ /* transfer send right and ref to entry */
+ ip_unlock(port);
+
+ /* entry is locked holding ref, so can use port */
+
+ ipc_hash_insert(space, (ipc_object_t) port,
+ name, entry);
+ }
+
+ entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1;
+ break;
+
+ case MACH_MSG_TYPE_PORT_RECEIVE: {
+ ipc_port_t dest;
+
+ assert(port->ip_mscount == 0);
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ dest = port->ip_destination;
+
+ port->ip_receiver_name = name;
+ port->ip_receiver = space;
+
+ assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
+
+ if (bits & MACH_PORT_TYPE_SEND) {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(port->ip_srights > 0);
+
+ ip_release(port);
+ ip_unlock(port);
+
+ /* entry is locked holding ref, so can use port */
+
+ ipc_hash_delete(space, (ipc_object_t) port,
+ name, entry);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ /* transfer ref to entry */
+ ip_unlock(port);
+ }
+
+ entry->ie_bits = bits | MACH_PORT_TYPE_RECEIVE;
+
+ if (dest != IP_NULL)
+ ipc_port_release(dest);
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_copyout: strange rights");
+#else
+ panic("ipc_right_copyout: strange rights");
+#endif
+ }
+
+ return KERN_SUCCESS;
+}
+
+#if 0
+/*XXX same, but allows multiple duplicate send rights */
+kern_return_t
+ipc_right_copyout_multiname(space, name, entry, object)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_entry_t entry;
+ ipc_object_t object;
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ ipc_port_t port;
+
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+ assert(io_active(object));
+ assert(entry->ie_object == object);
+
+ port = (ipc_port_t) object;
+
+ assert(port->ip_srights > 0);
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ /* transfer send right and ref to entry */
+ ip_unlock(port);
+
+ /* entry is locked holding ref, so can use port */
+
+ entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1;
+
+ return KERN_SUCCESS;
+}
+#endif
+
+/*
+ * Routine: ipc_right_rename
+ * Purpose:
+ * Transfer an entry from one name to another.
+ * The old entry is deallocated.
+ * Conditions:
+ * The space is write-locked and active.
+ * The new entry is unused. Upon return,
+ * the space is unlocked.
+ * Returns:
+ * KERN_SUCCESS Moved entry to new name.
+ */
+
+kern_return_t
+ipc_right_rename(
+ ipc_space_t space,
+ mach_port_t oname,
+ ipc_entry_t oentry,
+ mach_port_t nname,
+ ipc_entry_t nentry)
+{
+ ipc_entry_bits_t bits = oentry->ie_bits;
+ ipc_port_request_index_t request = oentry->ie_request;
+ ipc_object_t object = oentry->ie_object;
+
+ assert(space->is_active);
+ assert(oname != nname);
+
+ /*
+ * If IE_BITS_COMPAT, we can't allow the entry to be renamed
+ * if the port is dead. (This would foil ipc_port_destroy.)
+ * Instead we should fail because oentry shouldn't exist.
+ * Note IE_BITS_COMPAT implies ie_request != 0.
+ */
+
+ if (request != 0) {
+ ipc_port_t port;
+
+ assert(bits & MACH_PORT_TYPE_PORT_RIGHTS);
+ port = (ipc_port_t) object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, oname, oentry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT) {
+ ipc_entry_dealloc(space, nname, nentry);
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+ }
+#endif MACH_IPC_COMPAT
+
+ bits = oentry->ie_bits;
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ assert(oentry->ie_request == 0);
+ request = 0;
+ assert(oentry->ie_object == IO_NULL);
+ object = IO_NULL;
+ } else {
+ /* port is locked and active */
+
+ ipc_port_dnrename(port, request, oname, nname);
+ ip_unlock(port);
+ oentry->ie_request = 0;
+ }
+ }
+
+ if (bits & IE_BITS_MAREQUEST) {
+ assert(bits & MACH_PORT_TYPE_SEND_RECEIVE);
+
+ ipc_marequest_rename(space, oname, nname);
+ }
+
+ /* initialize nentry before letting ipc_hash_insert see it */
+
+ assert((nentry->ie_bits & IE_BITS_RIGHT_MASK) == 0);
+ nentry->ie_bits |= bits & IE_BITS_RIGHT_MASK;
+ nentry->ie_request = request;
+ nentry->ie_object = object;
+
+ switch (IE_BITS_TYPE(bits)) {
+ case MACH_PORT_TYPE_SEND: {
+ ipc_port_t port;
+
+ port = (ipc_port_t) object;
+ assert(port != IP_NULL);
+
+ ipc_hash_delete(space, (ipc_object_t) port, oname, oentry);
+ ipc_hash_insert(space, (ipc_object_t) port, nname, nentry);
+ break;
+ }
+
+ case MACH_PORT_TYPE_RECEIVE:
+ case MACH_PORT_TYPE_SEND_RECEIVE: {
+ ipc_port_t port;
+
+ port = (ipc_port_t) object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == oname);
+ assert(port->ip_receiver == space);
+
+ port->ip_receiver_name = nname;
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_PORT_TYPE_PORT_SET: {
+ ipc_pset_t pset;
+
+ pset = (ipc_pset_t) object;
+ assert(pset != IPS_NULL);
+
+ ips_lock(pset);
+ assert(ips_active(pset));
+ assert(pset->ips_local_name == oname);
+
+ pset->ips_local_name = nname;
+ ips_unlock(pset);
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND_ONCE:
+ case MACH_PORT_TYPE_DEAD_NAME:
+ break;
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_rename: strange rights");
+#else
+ panic("ipc_right_rename: strange rights");
+#endif
+ }
+
+ assert(oentry->ie_request == 0);
+ oentry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, oname, oentry);
+ is_write_unlock(space);
+
+ return KERN_SUCCESS;
+}
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: ipc_right_copyin_compat
+ * Purpose:
+ * Copyin a capability from a space.
+ * If successful, the caller gets a ref
+ * for the resulting object, which is always valid.
+ * Conditions:
+ * The space is write-locked, and is unlocked upon return.
+ * The space must be active.
+ * Returns:
+ * KERN_SUCCESS Acquired a valid object.
+ * KERN_INVALID_RIGHT Name doesn't denote correct right.
+ * KERN_INVALID_NAME [MACH_IPC_COMPAT]
+ * Caller should pretend lookup of entry failed.
+ */
+
+kern_return_t
+ipc_right_copyin_compat(space, name, entry, msgt_name, dealloc, objectp)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_entry_t entry;
+ mach_msg_type_name_t msgt_name;
+ boolean_t dealloc;
+ ipc_object_t *objectp;
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ assert(space->is_active);
+
+ switch (msgt_name) {
+ case MSG_TYPE_PORT:
+ if (dealloc) {
+ ipc_port_t port;
+ ipc_port_t dnrequest;
+
+ /*
+ * Pulls a send right out of the space,
+ * leaving the space with no rights.
+ * Not allowed to destroy the port,
+ * so the space can't have receive rights.
+ * Doesn't operate on dead names.
+ */
+
+ if (IE_BITS_TYPE(bits) != MACH_PORT_TYPE_SEND)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+
+ goto invalid_right;
+ }
+ /* port is locked and active */
+
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+ assert(port->ip_srights > 0);
+ ip_unlock(port);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+
+ *objectp = (ipc_object_t) port;
+ break;
+ } else {
+ ipc_port_t port;
+
+ /*
+ * Pulls a send right out of the space,
+ * making a send right if necessary.
+ * Doesn't operate on dead names.
+ */
+
+ if ((bits & MACH_PORT_TYPE_SEND_RECEIVE) == 0)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+
+ goto invalid_right;
+ }
+ /* port is locked and active */
+
+ is_write_unlock(space);
+
+ if ((bits & MACH_PORT_TYPE_SEND) == 0) {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_RECEIVE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ port->ip_mscount++;
+ }
+
+ port->ip_srights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ break;
+ }
+
+ case MSG_TYPE_PORT_ALL:
+ if (dealloc) {
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+
+ /*
+ * Like MACH_MSG_TYPE_MOVE_RECEIVE, except that
+ * the space is always left without rights,
+ * so we kill send rights if necessary.
+ */
+
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ if (bits & MACH_PORT_TYPE_SEND) {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND_RECEIVE);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(port->ip_srights > 0);
+
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+ }
+
+ ipc_port_clear_receiver(port);
+
+ port->ip_receiver_name = MACH_PORT_NULL;
+ port->ip_destination = IP_NULL;
+ ip_unlock(port);
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+
+ *objectp = (ipc_object_t) port;
+ break;
+ } else {
+ ipc_port_t port;
+
+ /*
+ * Like MACH_MSG_TYPE_MOVE_RECEIVE, except that
+ * the space is always left with send rights,
+ * so we make a send right if necessary.
+ */
+
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ if ((bits & MACH_PORT_TYPE_SEND) == 0) {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_RECEIVE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ /* ip_mscount will be cleared below */
+ port->ip_srights++;
+ bits |= MACH_PORT_TYPE_SEND | 1;
+ }
+
+ ipc_hash_insert(space, (ipc_object_t) port,
+ name, entry);
+
+ entry->ie_bits = bits &~ MACH_PORT_TYPE_RECEIVE;
+ is_write_unlock(space);
+
+ ipc_port_clear_receiver(port); /* clears ip_mscount */
+
+ port->ip_receiver_name = MACH_PORT_NULL;
+ port->ip_destination = IP_NULL;
+ ip_reference(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_copyin_compat: strange rights");
+#else
+ panic("ipc_right_copyin_compat: strange rights");
+#endif
+ }
+
+ return KERN_SUCCESS;
+
+ invalid_right:
+ is_write_unlock(space);
+ return KERN_INVALID_RIGHT;
+
+ invalid_name:
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+}
+
+/*
+ * Routine: ipc_right_copyin_header
+ * Purpose:
+ * Copyin a capability from a space.
+ * If successful, the caller gets a ref
+ * for the resulting object, which is always valid.
+ * The type of the acquired capability is returned.
+ * Conditions:
+ * The space is write-locked, and is unlocked upon return.
+ * The space must be active.
+ * Returns:
+ * KERN_SUCCESS Acquired a valid object.
+ * KERN_INVALID_RIGHT Name doesn't denote correct right.
+ * KERN_INVALID_NAME [MACH_IPC_COMPAT]
+ * Caller should pretend lookup of entry failed.
+ */
+
+kern_return_t
+ipc_right_copyin_header(space, name, entry, objectp, msgt_namep)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_entry_t entry;
+ ipc_object_t *objectp;
+ mach_msg_type_name_t *msgt_namep;
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ mach_port_type_t type = IE_BITS_TYPE(bits);
+
+ assert(space->is_active);
+
+ switch (type) {
+ case MACH_PORT_TYPE_PORT_SET:
+ case MACH_PORT_TYPE_DEAD_NAME:
+ goto invalid_right;
+
+ case MACH_PORT_TYPE_RECEIVE: {
+ ipc_port_t port;
+
+ /*
+ * Like MACH_MSG_TYPE_MAKE_SEND.
+ */
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+ is_write_unlock(space);
+
+ port->ip_mscount++;
+ port->ip_srights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *msgt_namep = MACH_MSG_TYPE_PORT_SEND;
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND:
+ case MACH_PORT_TYPE_SEND_RECEIVE: {
+ ipc_port_t port;
+
+ /*
+ * Like MACH_MSG_TYPE_COPY_SEND,
+ * except that the port must be alive.
+ */
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+
+ goto invalid_right;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_srights > 0);
+ is_write_unlock(space);
+
+ port->ip_srights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *msgt_namep = MACH_MSG_TYPE_PORT_SEND;
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND_ONCE: {
+ ipc_port_t port;
+ ipc_port_t dnrequest, notify;
+
+ /*
+ * Like MACH_MSG_TYPE_MOVE_SEND_ONCE,
+ * except that the port must be alive
+ * and a port-deleted notification is generated.
+ */
+
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+
+ goto invalid_right;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_sorights > 0);
+
+ dnrequest = ipc_right_dncancel_macro(space, port, name, entry);
+ ip_unlock(port);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+
+ notify = ipc_space_make_notify(space);
+ is_write_unlock(space);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+
+ if (IP_VALID(notify))
+ ipc_notify_port_deleted_compat(notify, name);
+
+ *objectp = (ipc_object_t) port;
+ *msgt_namep = MACH_MSG_TYPE_PORT_SEND_ONCE;
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_copyin_header: strange rights");
+#else
+ panic("ipc_right_copyin_header: strange rights");
+#endif
+ }
+
+ return KERN_SUCCESS;
+
+ invalid_right:
+ is_write_unlock(space);
+ return KERN_INVALID_RIGHT;
+
+ invalid_name:
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+}
+
+#endif MACH_IPC_COMPAT
diff --git a/ipc/ipc_right.h b/ipc/ipc_right.h
new file mode 100644
index 00000000..7c0f2a39
--- /dev/null
+++ b/ipc/ipc_right.h
@@ -0,0 +1,124 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_right.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations of functions to manipulate IPC capabilities.
+ */
+
+#ifndef _IPC_IPC_RIGHT_H_
+#define _IPC_IPC_RIGHT_H_
+
+#include <mach_ipc_compat.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <ipc/ipc_port.h>
+
+#define ipc_right_lookup_read ipc_right_lookup_write
+
+extern kern_return_t
+ipc_right_lookup_write(/* ipc_space_t, mach_port_t, ipc_entry_t * */);
+
+extern boolean_t
+ipc_right_reverse(/* ipc_space_t, ipc_object_t,
+ mach_port_t *, ipc_entry_t * */);
+
+extern kern_return_t
+ipc_right_dnrequest(/* ipc_space_t, mach_port_t, boolean_t,
+ ipc_port_t, ipc_port_t * */);
+
+extern ipc_port_t
+ipc_right_dncancel(/* ipc_space_t, ipc_port_t, mach_port_t, ipc_entry_t */);
+
+#define ipc_right_dncancel_macro(space, port, name, entry) \
+ (((entry)->ie_request == 0) ? IP_NULL : \
+ ipc_right_dncancel((space), (port), (name), (entry)))
+
+extern boolean_t
+ipc_right_inuse(/* ipc_space_t, mach_port_t, ipc_entry_t */);
+
+extern boolean_t
+ipc_right_check(/* ipc_space_t, mach_port_t, ipc_entry_t, ipc_port_t */);
+
+extern void
+ipc_right_clean(/* ipc_space_t, mach_port_t, ipc_entry_t */);
+
+extern kern_return_t
+ipc_right_destroy(/* ipc_space_t, mach_port_t, ipc_entry_t */);
+
+extern kern_return_t
+ipc_right_dealloc(/* ipc_space_t, mach_port_t, ipc_entry_t */);
+
+extern kern_return_t
+ipc_right_delta(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ mach_port_right_t, mach_port_delta_t */);
+
+extern kern_return_t
+ipc_right_info(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ mach_port_type_t *, mach_port_urefs_t * */);
+
+extern boolean_t
+ipc_right_copyin_check(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ mach_msg_type_name_t */);
+
+extern kern_return_t
+ipc_right_copyin(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ mach_msg_type_name_t, boolean_t,
+ ipc_object_t *, ipc_port_t * */);
+
+extern void
+ipc_right_copyin_undo(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ mach_msg_type_name_t, ipc_object_t, ipc_port_t */);
+
+extern kern_return_t
+ipc_right_copyin_two(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ ipc_object_t *, ipc_port_t * */);
+
+extern kern_return_t
+ipc_right_copyout(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ mach_msg_type_name_t, boolean_t, ipc_object_t */);
+
+extern kern_return_t
+ipc_right_rename(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ mach_port_t, ipc_entry_t */);
+
+#if MACH_IPC_COMPAT
+
+extern kern_return_t
+ipc_right_copyin_compat(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ mach_msg_type_name_t, boolean_t, ipc_object_t * */);
+
+extern kern_return_t
+ipc_right_copyin_header(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ ipc_object_t *, mach_msg_type_name_t * */);
+
+#endif MACH_IPC_COMPAT
+#endif _IPC_IPC_RIGHT_H_
diff --git a/ipc/ipc_space.c b/ipc/ipc_space.c
new file mode 100644
index 00000000..7e3cba96
--- /dev/null
+++ b/ipc/ipc_space.c
@@ -0,0 +1,317 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_space.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC capability spaces.
+ */
+
+#include <mach_ipc_compat.h>
+#include <norma_ipc.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <kern/assert.h>
+#include <kern/sched_prim.h>
+#include <kern/zalloc.h>
+#include <ipc/port.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_splay.h>
+#include <ipc/ipc_hash.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_right.h>
+
+
+
+zone_t ipc_space_zone;
+ipc_space_t ipc_space_kernel;
+ipc_space_t ipc_space_reply;
+#if NORMA_IPC
+ipc_space_t ipc_space_remote;
+#endif NORMA_IPC
+
+/*
+ * Routine: ipc_space_reference
+ * Routine: ipc_space_release
+ * Purpose:
+ * Function versions of the IPC space macros.
+ * The "is_" cover macros can be defined to use the
+ * macros or the functions, as desired.
+ */
+
+void
+ipc_space_reference(
+ ipc_space_t space)
+{
+ ipc_space_reference_macro(space);
+}
+
+void
+ipc_space_release(
+ ipc_space_t space)
+{
+ ipc_space_release_macro(space);
+}
+
+/*
+ * Routine: ipc_space_create
+ * Purpose:
+ * Creates a new IPC space.
+ *
+ * The new space has two references, one for the caller
+ * and one because it is active.
+ * Conditions:
+ * Nothing locked. Allocates memory.
+ * Returns:
+ * KERN_SUCCESS Created a space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_space_create(
+ ipc_table_size_t initial,
+ ipc_space_t *spacep)
+{
+ ipc_space_t space;
+ ipc_entry_t table;
+ ipc_entry_num_t new_size;
+ mach_port_index_t index;
+
+ space = is_alloc();
+ if (space == IS_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ table = it_entries_alloc(initial);
+ if (table == IE_NULL) {
+ is_free(space);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ new_size = initial->its_size;
+ memset((void *) table, 0, new_size * sizeof(struct ipc_entry));
+
+ /*
+ * Initialize the free list in the table.
+ * Add the entries in reverse order, and
+ * set the generation number to -1, so that
+ * initial allocations produce "natural" names.
+ */
+
+ for (index = 0; index < new_size; index++) {
+ ipc_entry_t entry = &table[index];
+
+ entry->ie_bits = IE_BITS_GEN_MASK;
+ entry->ie_next = index+1;
+ }
+ table[new_size-1].ie_next = 0;
+
+ is_ref_lock_init(space);
+ space->is_references = 2;
+
+ is_lock_init(space);
+ space->is_active = TRUE;
+ space->is_growing = FALSE;
+ space->is_table = table;
+ space->is_table_size = new_size;
+ space->is_table_next = initial+1;
+
+ ipc_splay_tree_init(&space->is_tree);
+ space->is_tree_total = 0;
+ space->is_tree_small = 0;
+ space->is_tree_hash = 0;
+
+#if MACH_IPC_COMPAT
+ {
+ mach_port_t name;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ /*
+ * ipc_port_alloc_compat probably won't look at is_notify,
+ * but make sure all fields have sane values anyway.
+ */
+
+ space->is_notify = IP_NULL;
+
+ kr = ipc_port_alloc_compat(space, &name, &port);
+ if (kr != KERN_SUCCESS) {
+ ipc_space_destroy(space);
+ is_release(space);
+ return kr;
+ }
+
+ ip_reference(port);
+ port->ip_srights++;
+ ip_unlock(port);
+ space->is_notify = port;
+ }
+#endif MACH_IPC_COMPAT
+
+ *spacep = space;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_space_create_special
+ * Purpose:
+ * Create a special space. A special space
+ * doesn't hold rights in the normal way.
+ * Instead it is place-holder for holding
+ * disembodied (naked) receive rights.
+ * See ipc_port_alloc_special/ipc_port_dealloc_special.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Created a space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_space_create_special(
+ ipc_space_t *spacep)
+{
+ ipc_space_t space;
+
+ space = is_alloc();
+ if (space == IS_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ is_ref_lock_init(space);
+ space->is_references = 1;
+
+ is_lock_init(space);
+ space->is_active = FALSE;
+
+ *spacep = space;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_space_destroy
+ * Purpose:
+ * Marks the space as dead and cleans up the entries.
+ * Does nothing if the space is already dead.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_space_destroy(
+ ipc_space_t space)
+{
+ ipc_tree_entry_t tentry;
+ ipc_entry_t table;
+ ipc_entry_num_t size;
+ mach_port_index_t index;
+ boolean_t active;
+
+ assert(space != IS_NULL);
+
+ is_write_lock(space);
+ active = space->is_active;
+ space->is_active = FALSE;
+ is_write_unlock(space);
+
+ if (!active)
+ return;
+
+ /*
+ * If somebody is trying to grow the table,
+ * we must wait until they finish and figure
+ * out the space died.
+ */
+
+ is_read_lock(space);
+ while (space->is_growing) {
+ assert_wait((event_t) space, FALSE);
+ is_read_unlock(space);
+ thread_block((void (*)(void)) 0);
+ is_read_lock(space);
+ }
+ is_read_unlock(space);
+
+ /*
+ * Now we can futz with it without having it locked.
+ */
+
+ table = space->is_table;
+ size = space->is_table_size;
+
+ for (index = 0; index < size; index++) {
+ ipc_entry_t entry = &table[index];
+ mach_port_type_t type = IE_BITS_TYPE(entry->ie_bits);
+
+ if (type != MACH_PORT_TYPE_NONE) {
+ mach_port_t name =
+ MACH_PORT_MAKEB(index, entry->ie_bits);
+
+ ipc_right_clean(space, name, entry);
+ }
+ }
+
+ it_entries_free(space->is_table_next-1, table);
+
+ for (tentry = ipc_splay_traverse_start(&space->is_tree);
+ tentry != ITE_NULL;
+ tentry = ipc_splay_traverse_next(&space->is_tree, TRUE)) {
+ mach_port_type_t type = IE_BITS_TYPE(tentry->ite_bits);
+ mach_port_t name = tentry->ite_name;
+
+ assert(type != MACH_PORT_TYPE_NONE);
+
+ /* use object before ipc_right_clean releases ref */
+
+ if (type == MACH_PORT_TYPE_SEND)
+ ipc_hash_global_delete(space, tentry->ite_object,
+ name, tentry);
+
+ ipc_right_clean(space, name, &tentry->ite_entry);
+ }
+ ipc_splay_traverse_finish(&space->is_tree);
+
+#if MACH_IPC_COMPAT
+ if (IP_VALID(space->is_notify))
+ ipc_port_release_send(space->is_notify);
+#endif MACH_IPC_COMPAT
+
+ /*
+ * Because the space is now dead,
+ * we must release the "active" reference for it.
+ * Our caller still has his reference.
+ */
+
+ is_release(space);
+}
diff --git a/ipc/ipc_space.h b/ipc/ipc_space.h
new file mode 100644
index 00000000..430971f7
--- /dev/null
+++ b/ipc/ipc_space.h
@@ -0,0 +1,164 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_space.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for IPC spaces of capabilities.
+ */
+
+#ifndef _IPC_IPC_SPACE_H_
+#define _IPC_IPC_SPACE_H_
+
+#include <mach_ipc_compat.h>
+#include <norma_ipc.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <kern/macro_help.h>
+#include <kern/lock.h>
+#include <ipc/ipc_splay.h>
+
+/*
+ * Every task has a space of IPC capabilities.
+ * IPC operations like send and receive use this space.
+ * IPC kernel calls manipulate the space of the target task.
+ *
+ * Every space has a non-NULL is_table with is_table_size entries.
+ * A space may have a NULL is_tree. is_tree_small records the
+ * number of entries in the tree that, if the table were to grow
+ * to the next larger size, would move from the tree to the table.
+ *
+ * is_growing marks when the table is in the process of growing.
+ * When the table is growing, it can't be freed or grown by another
+ * thread, because of krealloc/kmem_realloc's requirements.
+ */
+
+typedef unsigned int ipc_space_refs_t;
+
+struct ipc_space {
+ decl_simple_lock_data(,is_ref_lock_data)
+ ipc_space_refs_t is_references;
+
+ decl_simple_lock_data(,is_lock_data)
+ boolean_t is_active; /* is the space alive? */
+ boolean_t is_growing; /* is the space growing? */
+ ipc_entry_t is_table; /* an array of entries */
+ ipc_entry_num_t is_table_size; /* current size of table */
+ struct ipc_table_size *is_table_next; /* info for larger table */
+ struct ipc_splay_tree is_tree; /* a splay tree of entries */
+ ipc_entry_num_t is_tree_total; /* number of entries in the tree */
+ ipc_entry_num_t is_tree_small; /* # of small entries in the tree */
+ ipc_entry_num_t is_tree_hash; /* # of hashed entries in the tree */
+
+#if MACH_IPC_COMPAT
+ struct ipc_port *is_notify; /* notification port */
+#endif MACH_IPC_COMPAT
+};
+
+#define IS_NULL ((ipc_space_t) 0)
+
+extern zone_t ipc_space_zone;
+
+#define is_alloc() ((ipc_space_t) zalloc(ipc_space_zone))
+#define is_free(is) zfree(ipc_space_zone, (vm_offset_t) (is))
+
+extern struct ipc_space *ipc_space_kernel;
+extern struct ipc_space *ipc_space_reply;
+#if NORMA_IPC
+extern struct ipc_space *ipc_space_remote;
+#endif NORMA_IPC
+
+#define is_ref_lock_init(is) simple_lock_init(&(is)->is_ref_lock_data)
+
+#define ipc_space_reference_macro(is) \
+MACRO_BEGIN \
+ simple_lock(&(is)->is_ref_lock_data); \
+ assert((is)->is_references > 0); \
+ (is)->is_references++; \
+ simple_unlock(&(is)->is_ref_lock_data); \
+MACRO_END
+
+#define ipc_space_release_macro(is) \
+MACRO_BEGIN \
+ ipc_space_refs_t _refs; \
+ \
+ simple_lock(&(is)->is_ref_lock_data); \
+ assert((is)->is_references > 0); \
+ _refs = --(is)->is_references; \
+ simple_unlock(&(is)->is_ref_lock_data); \
+ \
+ if (_refs == 0) \
+ is_free(is); \
+MACRO_END
+
+#define is_lock_init(is) simple_lock_init(&(is)->is_lock_data)
+
+#define is_read_lock(is) simple_lock(&(is)->is_lock_data)
+#define is_read_unlock(is) simple_unlock(&(is)->is_lock_data)
+
+#define is_write_lock(is) simple_lock(&(is)->is_lock_data)
+#define is_write_lock_try(is) simple_lock_try(&(is)->is_lock_data)
+#define is_write_unlock(is) simple_unlock(&(is)->is_lock_data)
+
+#define is_write_to_read_lock(is)
+
+extern void ipc_space_reference(struct ipc_space *space);
+extern void ipc_space_release(struct ipc_space *space);
+
+#define is_reference(is) ipc_space_reference(is)
+#define is_release(is) ipc_space_release(is)
+
+kern_return_t ipc_space_create(/* ipc_table_size_t, ipc_space_t * */);
+kern_return_t ipc_space_create_special(struct ipc_space **);
+void ipc_space_destroy(struct ipc_space *);
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: ipc_space_make_notify
+ * Purpose:
+ * Given a space, return a send right for a notification.
+ * May return IP_NULL/IP_DEAD.
+ * Conditions:
+ * The space is locked (read or write) and active.
+ *
+ * ipc_port_t
+ * ipc_space_make_notify(space)
+ * ipc_space_t space;
+ */
+
+#define ipc_space_make_notify(space) \
+ ipc_port_copy_send(space->is_notify)
+
+#endif MACH_IPC_COMPAT
+#endif _IPC_IPC_SPACE_H_
diff --git a/ipc/ipc_splay.c b/ipc/ipc_splay.c
new file mode 100644
index 00000000..6fb5bcbc
--- /dev/null
+++ b/ipc/ipc_splay.c
@@ -0,0 +1,920 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_splay.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Primitive splay tree operations.
+ */
+
+#include <mach/port.h>
+#include <kern/assert.h>
+#include <kern/macro_help.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_splay.h>
+
+/*
+ * Splay trees are self-adjusting binary search trees.
+ * They have the following attractive properties:
+ * 1) Space efficient; only two pointers per entry.
+ * 2) Robust performance; amortized O(log n) per operation.
+ * 3) Recursion not needed.
+ * This makes them a good fall-back data structure for those
+ * entries that don't fit into the lookup table.
+ *
+ * The paper by Sleator and Tarjan, JACM v. 32, no. 3, pp. 652-686,
+ * describes the splaying operation. ipc_splay_prim_lookup
+ * and ipc_splay_prim_assemble implement the top-down splay
+ * described on p. 669.
+ *
+ * The tree is stored in an unassembled form. If ist_root is null,
+ * then the tree has no entries. Otherwise, ist_name records
+ * the value used for the last lookup. ist_root points to the
+ * middle tree obtained from the top-down splay. ist_ltree and
+ * ist_rtree point to left and right subtrees, whose entries
+ * are all smaller (larger) than those in the middle tree.
+ * ist_ltreep and ist_rtreep are pointers to fields in the
+ * left and right subtrees. ist_ltreep points to the rchild field
+ * of the largest entry in ltree, and ist_rtreep points to the
+ * lchild field of the smallest entry in rtree. The pointed-to
+ * fields aren't initialized. If the left (right) subtree is null,
+ * then ist_ltreep (ist_rtreep) points to the ist_ltree (ist_rtree)
+ * field in the splay structure itself.
+ *
+ * The primary advantage of the unassembled form is that repeated
+ * unsuccessful lookups are efficient. In particular, an unsuccessful
+ * lookup followed by an insert only requires one splaying operation.
+ *
+ * The traversal algorithm works via pointer inversion.
+ * When descending down the tree, child pointers are reversed
+ * to point back to the parent entry. When ascending,
+ * the pointers are restored to their original value.
+ *
+ * The biggest potential problem with the splay tree implementation
+ * is that the operations, even lookup, require an exclusive lock.
+ * If IPC spaces are protected with exclusive locks, then
+ * the splay tree doesn't require its own lock, and ist_lock/ist_unlock
+ * needn't do anything. If IPC spaces are protected with read/write
+ * locks then ist_lock/ist_unlock should provide exclusive access.
+ *
+ * If it becomes important to let lookups run in parallel,
+ * or if the restructuring makes lookups too expensive, then
+ * there is hope. Use a read/write lock on the splay tree.
+ * Keep track of the number of entries in the tree. When doing
+ * a lookup, first try a non-restructuring lookup with a read lock held,
+ * with a bound (based on log of size of the tree) on the number of
+ * entries to traverse. If the lookup runs up against the bound,
+ * then take a write lock and do a reorganizing lookup.
+ * This way, if lookups only access roughly balanced parts
+ * of the tree, then lookups run in parallel and do no restructuring.
+ *
+ * The traversal algorithm currently requires an exclusive lock.
+ * If that is a problem, the tree could be changed from an lchild/rchild
+ * representation to a leftmost child/right sibling representation.
+ * In conjunction with non-restructing lookups, this would let
+ * lookups and traversals all run in parallel. But this representation
+ * is more complicated and would slow down the operations.
+ */
+
+/*
+ * Boundary values to hand to ipc_splay_prim_lookup:
+ */
+
+#define MACH_PORT_SMALLEST ((mach_port_t) 0)
+#define MACH_PORT_LARGEST ((mach_port_t) ~0)
+
+/*
+ * Routine: ipc_splay_prim_lookup
+ * Purpose:
+ * Searches for the node labeled name in the splay tree.
+ * Returns three nodes (treep, ltreep, rtreep) and
+ * two pointers to nodes (ltreepp, rtreepp).
+ *
+ * ipc_splay_prim_lookup splits the supplied tree into
+ * three subtrees, left, middle, and right, returned
+ * in ltreep, treep, and rtreep.
+ *
+ * If name is present in the tree, then it is at
+ * the root of the middle tree. Otherwise, the root
+ * of the middle tree is the last node traversed.
+ *
+ * ipc_splay_prim_lookup returns a pointer into
+ * the left subtree, to the rchild field of its
+ * largest node, in ltreepp. It returns a pointer
+ * into the right subtree, to the lchild field of its
+ * smallest node, in rtreepp.
+ */
+
+static void
+ipc_splay_prim_lookup(
+ mach_port_t name,
+ ipc_tree_entry_t tree,
+ ipc_tree_entry_t *treep,
+ ipc_tree_entry_t *ltreep,
+ ipc_tree_entry_t **ltreepp,
+ ipc_tree_entry_t *rtreep,
+ ipc_tree_entry_t **rtreepp)
+{
+ mach_port_t tname; /* temp name */
+ ipc_tree_entry_t lchild, rchild; /* temp child pointers */
+
+ assert(tree != ITE_NULL);
+
+#define link_left \
+MACRO_BEGIN \
+ *ltreep = tree; \
+ ltreep = &tree->ite_rchild; \
+ tree = *ltreep; \
+MACRO_END
+
+#define link_right \
+MACRO_BEGIN \
+ *rtreep = tree; \
+ rtreep = &tree->ite_lchild; \
+ tree = *rtreep; \
+MACRO_END
+
+#define rotate_left \
+MACRO_BEGIN \
+ ipc_tree_entry_t temp = tree; \
+ \
+ tree = temp->ite_rchild; \
+ temp->ite_rchild = tree->ite_lchild; \
+ tree->ite_lchild = temp; \
+MACRO_END
+
+#define rotate_right \
+MACRO_BEGIN \
+ ipc_tree_entry_t temp = tree; \
+ \
+ tree = temp->ite_lchild; \
+ temp->ite_lchild = tree->ite_rchild; \
+ tree->ite_rchild = temp; \
+MACRO_END
+
+ while (name != (tname = tree->ite_name)) {
+ if (name < tname) {
+ /* descend to left */
+
+ lchild = tree->ite_lchild;
+ if (lchild == ITE_NULL)
+ break;
+ tname = lchild->ite_name;
+
+ if ((name < tname) &&
+ (lchild->ite_lchild != ITE_NULL))
+ rotate_right;
+ link_right;
+ if ((name > tname) &&
+ (lchild->ite_rchild != ITE_NULL))
+ link_left;
+ } else {
+ /* descend to right */
+
+ rchild = tree->ite_rchild;
+ if (rchild == ITE_NULL)
+ break;
+ tname = rchild->ite_name;
+
+ if ((name > tname) &&
+ (rchild->ite_rchild != ITE_NULL))
+ rotate_left;
+ link_left;
+ if ((name < tname) &&
+ (rchild->ite_lchild != ITE_NULL))
+ link_right;
+ }
+
+ assert(tree != ITE_NULL);
+ }
+
+ *treep = tree;
+ *ltreepp = ltreep;
+ *rtreepp = rtreep;
+
+#undef link_left
+#undef link_right
+#undef rotate_left
+#undef rotate_right
+}
+
+/*
+ * Routine: ipc_splay_prim_assemble
+ * Purpose:
+ * Assembles the results of ipc_splay_prim_lookup
+ * into a splay tree with the found node at the root.
+ *
+ * ltree and rtree are by-reference so storing
+ * through ltreep and rtreep can change them.
+ */
+
+static void
+ipc_splay_prim_assemble(
+ ipc_tree_entry_t tree,
+ ipc_tree_entry_t *ltree,
+ ipc_tree_entry_t *ltreep,
+ ipc_tree_entry_t *rtree,
+ ipc_tree_entry_t *rtreep)
+{
+ assert(tree != ITE_NULL);
+
+ *ltreep = tree->ite_lchild;
+ *rtreep = tree->ite_rchild;
+
+ tree->ite_lchild = *ltree;
+ tree->ite_rchild = *rtree;
+}
+
+/*
+ * Routine: ipc_splay_tree_init
+ * Purpose:
+ * Initialize a raw splay tree for use.
+ */
+
+void
+ipc_splay_tree_init(
+ ipc_splay_tree_t splay)
+{
+ splay->ist_root = ITE_NULL;
+}
+
+/*
+ * Routine: ipc_splay_tree_pick
+ * Purpose:
+ * Picks and returns a random entry in a splay tree.
+ * Returns FALSE if the splay tree is empty.
+ */
+
+boolean_t
+ipc_splay_tree_pick(
+ ipc_splay_tree_t splay,
+ mach_port_t *namep,
+ ipc_tree_entry_t *entryp)
+{
+ ipc_tree_entry_t root;
+
+ ist_lock(splay);
+
+ root = splay->ist_root;
+ if (root != ITE_NULL) {
+ *namep = root->ite_name;
+ *entryp = root;
+ }
+
+ ist_unlock(splay);
+
+ return root != ITE_NULL;
+}
+
+/*
+ * Routine: ipc_splay_tree_lookup
+ * Purpose:
+ * Finds an entry in a splay tree.
+ * Returns ITE_NULL if not found.
+ */
+
+ipc_tree_entry_t
+ipc_splay_tree_lookup(
+ ipc_splay_tree_t splay,
+ mach_port_t name)
+{
+ ipc_tree_entry_t root;
+
+ ist_lock(splay);
+
+ root = splay->ist_root;
+ if (root != ITE_NULL) {
+ if (splay->ist_name != name) {
+ ipc_splay_prim_assemble(root,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+ ipc_splay_prim_lookup(name, root, &root,
+ &splay->ist_ltree, &splay->ist_ltreep,
+ &splay->ist_rtree, &splay->ist_rtreep);
+ splay->ist_name = name;
+ splay->ist_root = root;
+ }
+
+ if (name != root->ite_name)
+ root = ITE_NULL;
+ }
+
+ ist_unlock(splay);
+
+ return root;
+}
+
+/*
+ * Routine: ipc_splay_tree_insert
+ * Purpose:
+ * Inserts a new entry into a splay tree.
+ * The caller supplies a new entry.
+ * The name can't already be present in the tree.
+ */
+
+void
+ipc_splay_tree_insert(
+ ipc_splay_tree_t splay,
+ mach_port_t name,
+ ipc_tree_entry_t entry)
+{
+ ipc_tree_entry_t root;
+
+ assert(entry != ITE_NULL);
+
+ ist_lock(splay);
+
+ root = splay->ist_root;
+ if (root == ITE_NULL) {
+ entry->ite_lchild = ITE_NULL;
+ entry->ite_rchild = ITE_NULL;
+ } else {
+ if (splay->ist_name != name) {
+ ipc_splay_prim_assemble(root,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+ ipc_splay_prim_lookup(name, root, &root,
+ &splay->ist_ltree, &splay->ist_ltreep,
+ &splay->ist_rtree, &splay->ist_rtreep);
+ }
+
+ assert(root->ite_name != name);
+
+ if (name < root->ite_name) {
+ assert(root->ite_lchild == ITE_NULL);
+
+ *splay->ist_ltreep = ITE_NULL;
+ *splay->ist_rtreep = root;
+ } else {
+ assert(root->ite_rchild == ITE_NULL);
+
+ *splay->ist_ltreep = root;
+ *splay->ist_rtreep = ITE_NULL;
+ }
+
+ entry->ite_lchild = splay->ist_ltree;
+ entry->ite_rchild = splay->ist_rtree;
+ }
+
+ entry->ite_name = name;
+ splay->ist_root = entry;
+ splay->ist_name = name;
+ splay->ist_ltreep = &splay->ist_ltree;
+ splay->ist_rtreep = &splay->ist_rtree;
+
+ ist_unlock(splay);
+}
+
+/*
+ * Routine: ipc_splay_tree_delete
+ * Purpose:
+ * Deletes an entry from a splay tree.
+ * The name must be present in the tree.
+ * Frees the entry.
+ *
+ * The "entry" argument isn't currently used.
+ * Other implementations might want it, though.
+ */
+
+void
+ipc_splay_tree_delete(
+ ipc_splay_tree_t splay,
+ mach_port_t name,
+ ipc_tree_entry_t entry)
+{
+ ipc_tree_entry_t root, saved;
+
+ ist_lock(splay);
+
+ root = splay->ist_root;
+ assert(root != ITE_NULL);
+
+ if (splay->ist_name != name) {
+ ipc_splay_prim_assemble(root,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+ ipc_splay_prim_lookup(name, root, &root,
+ &splay->ist_ltree, &splay->ist_ltreep,
+ &splay->ist_rtree, &splay->ist_rtreep);
+ }
+
+ assert(root->ite_name == name);
+ assert(root == entry);
+
+ *splay->ist_ltreep = root->ite_lchild;
+ *splay->ist_rtreep = root->ite_rchild;
+ ite_free(root);
+
+ root = splay->ist_ltree;
+ saved = splay->ist_rtree;
+
+ if (root == ITE_NULL)
+ root = saved;
+ else if (saved != ITE_NULL) {
+ /*
+ * Find the largest node in the left subtree, and splay it
+ * to the root. Then add the saved right subtree.
+ */
+
+ ipc_splay_prim_lookup(MACH_PORT_LARGEST, root, &root,
+ &splay->ist_ltree, &splay->ist_ltreep,
+ &splay->ist_rtree, &splay->ist_rtreep);
+ ipc_splay_prim_assemble(root,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+
+ assert(root->ite_rchild == ITE_NULL);
+ root->ite_rchild = saved;
+ }
+
+ splay->ist_root = root;
+ if (root != ITE_NULL) {
+ splay->ist_name = root->ite_name;
+ splay->ist_ltreep = &splay->ist_ltree;
+ splay->ist_rtreep = &splay->ist_rtree;
+ }
+
+ ist_unlock(splay);
+}
+
+/*
+ * Routine: ipc_splay_tree_split
+ * Purpose:
+ * Split a splay tree. Puts all entries smaller than "name"
+ * into a new tree, "small".
+ *
+ * Doesn't do locking on "small", because nobody else
+ * should be fiddling with the uninitialized tree.
+ */
+
+void
+ipc_splay_tree_split(
+ ipc_splay_tree_t splay,
+ mach_port_t name,
+ ipc_splay_tree_t small)
+{
+ ipc_tree_entry_t root;
+
+ ipc_splay_tree_init(small);
+
+ ist_lock(splay);
+
+ root = splay->ist_root;
+ if (root != ITE_NULL) {
+ /* lookup name, to get it (or last traversed) to the top */
+
+ if (splay->ist_name != name) {
+ ipc_splay_prim_assemble(root,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+ ipc_splay_prim_lookup(name, root, &root,
+ &splay->ist_ltree, &splay->ist_ltreep,
+ &splay->ist_rtree, &splay->ist_rtreep);
+ }
+
+ if (root->ite_name < name) {
+ /* root goes into small */
+
+ *splay->ist_ltreep = root->ite_lchild;
+ *splay->ist_rtreep = ITE_NULL;
+ root->ite_lchild = splay->ist_ltree;
+ assert(root->ite_rchild == ITE_NULL);
+
+ small->ist_root = root;
+ small->ist_name = root->ite_name;
+ small->ist_ltreep = &small->ist_ltree;
+ small->ist_rtreep = &small->ist_rtree;
+
+ /* rtree goes into splay */
+
+ root = splay->ist_rtree;
+ splay->ist_root = root;
+ if (root != ITE_NULL) {
+ splay->ist_name = root->ite_name;
+ splay->ist_ltreep = &splay->ist_ltree;
+ splay->ist_rtreep = &splay->ist_rtree;
+ }
+ } else {
+ /* root stays in splay */
+
+ *splay->ist_ltreep = root->ite_lchild;
+ root->ite_lchild = ITE_NULL;
+
+ splay->ist_root = root;
+ splay->ist_name = name;
+ splay->ist_ltreep = &splay->ist_ltree;
+
+ /* ltree goes into small */
+
+ root = splay->ist_ltree;
+ small->ist_root = root;
+ if (root != ITE_NULL) {
+ small->ist_name = root->ite_name;
+ small->ist_ltreep = &small->ist_ltree;
+ small->ist_rtreep = &small->ist_rtree;
+ }
+ }
+ }
+
+ ist_unlock(splay);
+}
+
+/*
+ * Routine: ipc_splay_tree_join
+ * Purpose:
+ * Joins two splay trees. Merges the entries in "small",
+ * which must all be smaller than the entries in "splay",
+ * into "splay".
+ */
+
+void
+ipc_splay_tree_join(
+ ipc_splay_tree_t splay,
+ ipc_splay_tree_t small)
+{
+ ipc_tree_entry_t sroot;
+
+ /* pull entries out of small */
+
+ ist_lock(small);
+
+ sroot = small->ist_root;
+ if (sroot != ITE_NULL) {
+ ipc_splay_prim_assemble(sroot,
+ &small->ist_ltree, small->ist_ltreep,
+ &small->ist_rtree, small->ist_rtreep);
+ small->ist_root = ITE_NULL;
+ }
+
+ ist_unlock(small);
+
+ /* put entries, if any, into splay */
+
+ if (sroot != ITE_NULL) {
+ ipc_tree_entry_t root;
+
+ ist_lock(splay);
+
+ root = splay->ist_root;
+ if (root == ITE_NULL) {
+ root = sroot;
+ } else {
+ /* get smallest entry in splay tree to top */
+
+ if (splay->ist_name != MACH_PORT_SMALLEST) {
+ ipc_splay_prim_assemble(root,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+ ipc_splay_prim_lookup(MACH_PORT_SMALLEST,
+ root, &root,
+ &splay->ist_ltree, &splay->ist_ltreep,
+ &splay->ist_rtree, &splay->ist_rtreep);
+ }
+
+ ipc_splay_prim_assemble(root,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+
+ assert(root->ite_lchild == ITE_NULL);
+ assert(sroot->ite_name < root->ite_name);
+ root->ite_lchild = sroot;
+ }
+
+ splay->ist_root = root;
+ splay->ist_name = root->ite_name;
+ splay->ist_ltreep = &splay->ist_ltree;
+ splay->ist_rtreep = &splay->ist_rtree;
+
+ ist_unlock(splay);
+ }
+}
+
+/*
+ * Routine: ipc_splay_tree_bounds
+ * Purpose:
+ * Given a name, returns the largest value present
+ * in the tree that is smaller than or equal to the name,
+ * or ~0 if no such value exists. Similarly, returns
+ * the smallest value present that is greater than or
+ * equal to the name, or 0 if no such value exists.
+ *
+ * Hence, if
+ * lower = upper, then lower = name = upper
+ * and name is present in the tree
+ * lower = ~0 and upper = 0,
+ * then the tree is empty
+ * lower = ~0 and upper > 0, then name < upper
+ * and upper is smallest value in tree
+ * lower < ~0 and upper = 0, then lower < name
+ * and lower is largest value in tree
+ * lower < ~0 and upper > 0, then lower < name < upper
+ * and they are tight bounds on name
+ *
+ * (Note MACH_PORT_SMALLEST = 0 and MACH_PORT_LARGEST = ~0.)
+ */
+
+void
+ipc_splay_tree_bounds(
+ ipc_splay_tree_t splay,
+ mach_port_t name,
+ mach_port_t *lowerp,
+ mach_port_t *upperp)
+{
+ ipc_tree_entry_t root;
+
+ ist_lock(splay);
+
+ root = splay->ist_root;
+ if (root == ITE_NULL) {
+ *lowerp = MACH_PORT_LARGEST;
+ *upperp = MACH_PORT_SMALLEST;
+ } else {
+ mach_port_t rname;
+
+ if (splay->ist_name != name) {
+ ipc_splay_prim_assemble(root,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+ ipc_splay_prim_lookup(name, root, &root,
+ &splay->ist_ltree, &splay->ist_ltreep,
+ &splay->ist_rtree, &splay->ist_rtreep);
+ splay->ist_name = name;
+ splay->ist_root = root;
+ }
+
+ rname = root->ite_name;
+
+ /*
+ * OK, it's a hack. We convert the ltreep and rtreep
+ * pointers back into real entry pointers,
+ * so we can pick the names out of the entries.
+ */
+
+ if (rname <= name)
+ *lowerp = rname;
+ else if (splay->ist_ltreep == &splay->ist_ltree)
+ *lowerp = MACH_PORT_LARGEST;
+ else {
+ ipc_tree_entry_t entry;
+
+ entry = (ipc_tree_entry_t)
+ ((char *)splay->ist_ltreep -
+ ((char *)&root->ite_rchild -
+ (char *)root));
+ *lowerp = entry->ite_name;
+ }
+
+ if (rname >= name)
+ *upperp = rname;
+ else if (splay->ist_rtreep == &splay->ist_rtree)
+ *upperp = MACH_PORT_SMALLEST;
+ else {
+ ipc_tree_entry_t entry;
+
+ entry = (ipc_tree_entry_t)
+ ((char *)splay->ist_rtreep -
+ ((char *)&root->ite_lchild -
+ (char *)root));
+ *upperp = entry->ite_name;
+ }
+ }
+
+ ist_unlock(splay);
+}
+
+/*
+ * Routine: ipc_splay_traverse_start
+ * Routine: ipc_splay_traverse_next
+ * Routine: ipc_splay_traverse_finish
+ * Purpose:
+ * Perform a symmetric order traversal of a splay tree.
+ * Usage:
+ * for (entry = ipc_splay_traverse_start(splay);
+ * entry != ITE_NULL;
+ * entry = ipc_splay_traverse_next(splay, delete)) {
+ * do something with entry
+ * }
+ * ipc_splay_traverse_finish(splay);
+ *
+ * If "delete" is TRUE, then the current entry
+ * is removed from the tree and deallocated.
+ *
+ * During the traversal, the splay tree is locked.
+ */
+
+ipc_tree_entry_t
+ipc_splay_traverse_start(
+ ipc_splay_tree_t splay)
+{
+ ipc_tree_entry_t current, parent;
+
+ ist_lock(splay);
+
+ current = splay->ist_root;
+ if (current != ITE_NULL) {
+ ipc_splay_prim_assemble(current,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+
+ parent = ITE_NULL;
+
+ while (current->ite_lchild != ITE_NULL) {
+ ipc_tree_entry_t next;
+
+ next = current->ite_lchild;
+ current->ite_lchild = parent;
+ parent = current;
+ current = next;
+ }
+
+ splay->ist_ltree = current;
+ splay->ist_rtree = parent;
+ }
+
+ return current;
+}
+
+ipc_tree_entry_t
+ipc_splay_traverse_next(
+ ipc_splay_tree_t splay,
+ boolean_t delete)
+{
+ ipc_tree_entry_t current, parent;
+
+ /* pick up where traverse_entry left off */
+
+ current = splay->ist_ltree;
+ parent = splay->ist_rtree;
+ assert(current != ITE_NULL);
+
+ if (!delete)
+ goto traverse_right;
+
+ /* we must delete current and patch the tree */
+
+ if (current->ite_lchild == ITE_NULL) {
+ if (current->ite_rchild == ITE_NULL) {
+ /* like traverse_back, but with deletion */
+
+ if (parent == ITE_NULL) {
+ ite_free(current);
+
+ splay->ist_root = ITE_NULL;
+ return ITE_NULL;
+ }
+
+ if (current->ite_name < parent->ite_name) {
+ ite_free(current);
+
+ current = parent;
+ parent = current->ite_lchild;
+ current->ite_lchild = ITE_NULL;
+ goto traverse_entry;
+ } else {
+ ite_free(current);
+
+ current = parent;
+ parent = current->ite_rchild;
+ current->ite_rchild = ITE_NULL;
+ goto traverse_back;
+ }
+ } else {
+ ipc_tree_entry_t prev;
+
+ prev = current;
+ current = current->ite_rchild;
+ ite_free(prev);
+ goto traverse_left;
+ }
+ } else {
+ if (current->ite_rchild == ITE_NULL) {
+ ipc_tree_entry_t prev;
+
+ prev = current;
+ current = current->ite_lchild;
+ ite_free(prev);
+ goto traverse_back;
+ } else {
+ ipc_tree_entry_t prev;
+ ipc_tree_entry_t ltree, rtree;
+ ipc_tree_entry_t *ltreep, *rtreep;
+
+ /* replace current with largest of left children */
+
+ prev = current;
+ ipc_splay_prim_lookup(MACH_PORT_LARGEST,
+ current->ite_lchild, &current,
+ &ltree, &ltreep, &rtree, &rtreep);
+ ipc_splay_prim_assemble(current,
+ &ltree, ltreep, &rtree, rtreep);
+
+ assert(current->ite_rchild == ITE_NULL);
+ current->ite_rchild = prev->ite_rchild;
+ ite_free(prev);
+ goto traverse_right;
+ }
+ }
+ /*NOTREACHED*/
+
+ /*
+ * A state machine: for each entry, we
+ * 1) traverse left subtree
+ * 2) traverse the entry
+ * 3) traverse right subtree
+ * 4) traverse back to parent
+ */
+
+ traverse_left:
+ if (current->ite_lchild != ITE_NULL) {
+ ipc_tree_entry_t next;
+
+ next = current->ite_lchild;
+ current->ite_lchild = parent;
+ parent = current;
+ current = next;
+ goto traverse_left;
+ }
+
+ traverse_entry:
+ splay->ist_ltree = current;
+ splay->ist_rtree = parent;
+ return current;
+
+ traverse_right:
+ if (current->ite_rchild != ITE_NULL) {
+ ipc_tree_entry_t next;
+
+ next = current->ite_rchild;
+ current->ite_rchild = parent;
+ parent = current;
+ current = next;
+ goto traverse_left;
+ }
+
+ traverse_back:
+ if (parent == ITE_NULL) {
+ splay->ist_root = current;
+ return ITE_NULL;
+ }
+
+ if (current->ite_name < parent->ite_name) {
+ ipc_tree_entry_t prev;
+
+ prev = current;
+ current = parent;
+ parent = current->ite_lchild;
+ current->ite_lchild = prev;
+ goto traverse_entry;
+ } else {
+ ipc_tree_entry_t prev;
+
+ prev = current;
+ current = parent;
+ parent = current->ite_rchild;
+ current->ite_rchild = prev;
+ goto traverse_back;
+ }
+}
+
+void
+ipc_splay_traverse_finish(
+ ipc_splay_tree_t splay)
+{
+ ipc_tree_entry_t root;
+
+ root = splay->ist_root;
+ if (root != ITE_NULL) {
+ splay->ist_name = root->ite_name;
+ splay->ist_ltreep = &splay->ist_ltree;
+ splay->ist_rtreep = &splay->ist_rtree;
+ }
+
+ ist_unlock(splay);
+}
+
diff --git a/ipc/ipc_splay.h b/ipc/ipc_splay.h
new file mode 100644
index 00000000..d3316ef8
--- /dev/null
+++ b/ipc/ipc_splay.h
@@ -0,0 +1,114 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_splay.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations of primitive splay tree operations.
+ */
+
+#ifndef _IPC_IPC_SPLAY_H_
+#define _IPC_IPC_SPLAY_H_
+
+#include <mach/port.h>
+#include <kern/assert.h>
+#include <kern/macro_help.h>
+#include <ipc/ipc_entry.h>
+
+typedef struct ipc_splay_tree {
+ mach_port_t ist_name; /* name used in last lookup */
+ ipc_tree_entry_t ist_root; /* root of middle tree */
+ ipc_tree_entry_t ist_ltree; /* root of left tree */
+ ipc_tree_entry_t *ist_ltreep; /* pointer into left tree */
+ ipc_tree_entry_t ist_rtree; /* root of right tree */
+ ipc_tree_entry_t *ist_rtreep; /* pointer into right tree */
+} *ipc_splay_tree_t;
+
+#define ist_lock(splay) /* no locking */
+#define ist_unlock(splay) /* no locking */
+
+/* Initialize a raw splay tree */
+extern void ipc_splay_tree_init(
+ ipc_splay_tree_t splay);
+
+/* Pick a random entry in a splay tree */
+extern boolean_t ipc_splay_tree_pick(
+ ipc_splay_tree_t splay,
+ mach_port_t *namep,
+ ipc_tree_entry_t *entryp);
+
+/* Find an entry in a splay tree */
+extern ipc_tree_entry_t ipc_splay_tree_lookup(
+ ipc_splay_tree_t splay,
+ mach_port_t name);
+
+/* Insert a new entry into a splay tree */
+extern void ipc_splay_tree_insert(
+ ipc_splay_tree_t splay,
+ mach_port_t name,
+ ipc_tree_entry_t entry);
+
+/* Delete an entry from a splay tree */
+extern void ipc_splay_tree_delete(
+ ipc_splay_tree_t splay,
+ mach_port_t name,
+ ipc_tree_entry_t entry);
+
+/* Split a splay tree */
+extern void ipc_splay_tree_split(
+ ipc_splay_tree_t splay,
+ mach_port_t name,
+ ipc_splay_tree_t entry);
+
+/* Join two splay trees */
+extern void ipc_splay_tree_join(
+ ipc_splay_tree_t splay,
+ ipc_splay_tree_t small);
+
+/* Do a bounded splay tree lookup */
+extern void ipc_splay_tree_bounds(
+ ipc_splay_tree_t splay,
+ mach_port_t name,
+ mach_port_t *lowerp,
+ mach_port_t *upperp);
+
+/* Initialize a symmetric order traversal of a splay tree */
+extern ipc_tree_entry_t ipc_splay_traverse_start(
+ ipc_splay_tree_t splay);
+
+/* Return the next entry in a symmetric order traversal of a splay tree */
+extern ipc_tree_entry_t ipc_splay_traverse_next(
+ ipc_splay_tree_t splay,
+ boolean_t delete);
+
+/* Terminate a symmetric order traversal of a splay tree */
+extern void ipc_splay_traverse_finish(
+ ipc_splay_tree_t splay);
+
+#endif /* _IPC_IPC_SPLAY_H_ */
diff --git a/ipc/ipc_table.c b/ipc/ipc_table.c
new file mode 100644
index 00000000..e5723586
--- /dev/null
+++ b/ipc/ipc_table.c
@@ -0,0 +1,205 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_table.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate tables of IPC capabilities.
+ */
+
+#include <mach/kern_return.h>
+#include <mach/vm_param.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_entry.h>
+#include <kern/kalloc.h>
+#include <vm/vm_kern.h>
+
+/*
+ * Forward declarations
+ */
+void ipc_table_fill(
+ ipc_table_size_t its,
+ unsigned int num,
+ unsigned int min,
+ vm_size_t elemsize);
+
+/*
+ * We borrow the kalloc map, rather than creating
+ * yet another submap of the kernel map.
+ */
+
+extern vm_map_t kalloc_map;
+
+ipc_table_size_t ipc_table_entries;
+unsigned int ipc_table_entries_size = 512;
+
+ipc_table_size_t ipc_table_dnrequests;
+unsigned int ipc_table_dnrequests_size = 64;
+
+void
+ipc_table_fill(
+ ipc_table_size_t its, /* array to fill */
+ unsigned int num, /* size of array */
+ unsigned int min, /* at least this many elements */
+ vm_size_t elemsize) /* size of elements */
+{
+ unsigned int index;
+ vm_size_t minsize = min * elemsize;
+ vm_size_t size;
+ vm_size_t incrsize;
+
+ /* first use powers of two, up to the page size */
+
+ for (index = 0, size = 1;
+ (index < num) && (size < PAGE_SIZE);
+ size <<= 1) {
+ if (size >= minsize) {
+ its[index].its_size = size / elemsize;
+ index++;
+ }
+ }
+
+ /* then increments of a page, then two pages, etc. */
+
+ for (incrsize = PAGE_SIZE; index < num;) {
+ unsigned int period;
+
+ for (period = 0;
+ (period < 15) && (index < num);
+ period++, size += incrsize) {
+ if (size >= minsize) {
+ its[index].its_size = size / elemsize;
+ index++;
+ }
+ }
+ if (incrsize < (PAGE_SIZE << 3))
+ incrsize <<= 1;
+ }
+}
+
+void
+ipc_table_init(void)
+{
+ ipc_table_entries = (ipc_table_size_t)
+ kalloc(sizeof(struct ipc_table_size) *
+ ipc_table_entries_size);
+ assert(ipc_table_entries != ITS_NULL);
+
+ ipc_table_fill(ipc_table_entries, ipc_table_entries_size - 1,
+ 4, sizeof(struct ipc_entry));
+
+ /* the last two elements should have the same size */
+
+ ipc_table_entries[ipc_table_entries_size - 1].its_size =
+ ipc_table_entries[ipc_table_entries_size - 2].its_size;
+
+
+ ipc_table_dnrequests = (ipc_table_size_t)
+ kalloc(sizeof(struct ipc_table_size) *
+ ipc_table_dnrequests_size);
+ assert(ipc_table_dnrequests != ITS_NULL);
+
+ ipc_table_fill(ipc_table_dnrequests, ipc_table_dnrequests_size - 1,
+ 2, sizeof(struct ipc_port_request));
+
+ /* the last element should have zero size */
+
+ ipc_table_dnrequests[ipc_table_dnrequests_size - 1].its_size = 0;
+}
+
+/*
+ * Routine: ipc_table_alloc
+ * Purpose:
+ * Allocate a table.
+ * Conditions:
+ * May block.
+ */
+
+vm_offset_t
+ipc_table_alloc(
+ vm_size_t size)
+{
+ vm_offset_t table;
+
+ if (size < PAGE_SIZE)
+ table = kalloc(size);
+ else
+ if (kmem_alloc(kalloc_map, &table, size) != KERN_SUCCESS)
+ table = 0;
+
+ return table;
+}
+
+/*
+ * Routine: ipc_table_realloc
+ * Purpose:
+ * Reallocate a big table.
+ *
+ * The new table remaps the old table,
+ * so copying is not necessary.
+ * Conditions:
+ * Only works for page-size or bigger tables.
+ * May block.
+ */
+
+vm_offset_t
+ipc_table_realloc(
+ vm_size_t old_size,
+ vm_offset_t old_table,
+ vm_size_t new_size)
+{
+ vm_offset_t new_table;
+
+ if (kmem_realloc(kalloc_map, old_table, old_size,
+ &new_table, new_size) != KERN_SUCCESS)
+ new_table = 0;
+
+ return new_table;
+}
+
+/*
+ * Routine: ipc_table_free
+ * Purpose:
+ * Free a table allocated with ipc_table_alloc or
+ * ipc_table_realloc.
+ * Conditions:
+ * May block.
+ */
+
+void
+ipc_table_free(
+ vm_size_t size,
+ vm_offset_t table)
+{
+ if (size < PAGE_SIZE)
+ kfree(table, size);
+ else
+ kmem_free(kalloc_map, table, size);
+}
diff --git a/ipc/ipc_table.h b/ipc/ipc_table.h
new file mode 100644
index 00000000..3bfcc46c
--- /dev/null
+++ b/ipc/ipc_table.h
@@ -0,0 +1,138 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_table.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for tables, used for IPC capabilities (ipc_entry_t)
+ * and dead-name requests (ipc_port_request_t).
+ */
+
+#ifndef _IPC_IPC_TABLE_H_
+#define _IPC_IPC_TABLE_H_
+
+#include <mach/boolean.h>
+#include <mach/vm_param.h>
+
+/*
+ * The is_table_next field of an ipc_space_t points to
+ * an ipc_table_size structure. These structures must
+ * be elements of an array, ipc_table_entries.
+ *
+ * The array must end with two elements with the same its_size value.
+ * Except for the terminating element, the its_size values must
+ * be strictly increasing. The largest (last) its_size value
+ * must be less than or equal to MACH_PORT_INDEX(MACH_PORT_DEAD).
+ * This ensures that
+ * 1) MACH_PORT_INDEX(MACH_PORT_DEAD) isn't a valid index
+ * in the table, so ipc_entry_get won't allocate it.
+ * 2) MACH_PORT_MAKE(index+1, 0) and MAKE_PORT_MAKE(size, 0)
+ * won't ever overflow.
+ *
+ *
+ * The ipr_size field of the first element in a table of
+ * dead-name requests (ipc_port_request_t) points to the
+ * ipc_table_size structure. The structures must be elements
+ * of ipc_table_dnrequests. ipc_table_dnrequests must end
+ * with an element with zero its_size, and except for this last
+ * element, the its_size values must be strictly increasing.
+ *
+ * The is_table_next field points to the ipc_table_size structure
+ * for the next larger size of table, not the one currently in use.
+ * The ipr_size field points to the currently used ipc_table_size.
+ */
+
+typedef unsigned int ipc_table_index_t; /* index into tables */
+typedef unsigned int ipc_table_elems_t; /* size of tables */
+
+typedef struct ipc_table_size {
+ ipc_table_elems_t its_size; /* number of elements in table */
+} *ipc_table_size_t;
+
+#define ITS_NULL ((ipc_table_size_t) 0)
+
+extern ipc_table_size_t ipc_table_entries;
+extern ipc_table_size_t ipc_table_dnrequests;
+
+extern void
+ipc_table_init();
+
+/*
+ * Note that ipc_table_alloc, ipc_table_realloc, and ipc_table_free
+ * all potentially use the VM system. Hence simple locks can't
+ * be held across them.
+ *
+ * We can't use a copying realloc, because the realloc happens
+ * with the data unlocked. ipc_table_realloc remaps the data,
+ * so it is OK.
+ */
+
+/* Allocate a table */
+extern vm_offset_t ipc_table_alloc(
+ vm_size_t size);
+
+/* Reallocate a big table */
+extern vm_offset_t ipc_table_realloc(
+ vm_size_t old_size,
+ vm_offset_t old_table,
+ vm_size_t new_size);
+
+/* Free a table */
+extern void ipc_table_free(
+ vm_size_t size,
+ vm_offset_t table);
+
+#define it_entries_alloc(its) \
+ ((ipc_entry_t) \
+ ipc_table_alloc((its)->its_size * sizeof(struct ipc_entry)))
+
+#define it_entries_reallocable(its) \
+ (((its)->its_size * sizeof(struct ipc_entry)) >= PAGE_SIZE)
+
+#define it_entries_realloc(its, table, nits) \
+ ((ipc_entry_t) \
+ ipc_table_realloc((its)->its_size * sizeof(struct ipc_entry), \
+ (vm_offset_t)(table), \
+ (nits)->its_size * sizeof(struct ipc_entry)))
+
+#define it_entries_free(its, table) \
+ ipc_table_free((its)->its_size * sizeof(struct ipc_entry), \
+ (vm_offset_t)(table))
+
+#define it_dnrequests_alloc(its) \
+ ((ipc_port_request_t) \
+ ipc_table_alloc((its)->its_size * \
+ sizeof(struct ipc_port_request)))
+
+#define it_dnrequests_free(its, table) \
+ ipc_table_free((its)->its_size * \
+ sizeof(struct ipc_port_request), \
+ (vm_offset_t)(table))
+
+#endif /* _IPC_IPC_TABLE_H_ */
diff --git a/ipc/ipc_target.c b/ipc/ipc_target.c
new file mode 100644
index 00000000..b791db24
--- /dev/null
+++ b/ipc/ipc_target.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ */
+/*
+ * File: ipc_target.c
+ *
+ * Implementation for common part of IPC ports and port sets
+ * representing a target of messages and migrating RPCs.
+ */
+
+#include "sched_prim.h"
+#include "ipc_target.h"
+
+void
+ipc_target_init(struct ipc_target *ipt, mach_port_t name)
+{
+ ipt->ipt_name = name;
+ ipc_mqueue_init(&ipt->ipt_messages);
+
+#ifdef MIGRATING_THREADS
+ ipt->ipt_type = IPT_TYPE_MESSAGE_RPC;
+ ipt->ipt_acts = 0;
+
+ ipc_target_machine_init(ipt);
+#endif
+}
+
+void
+ipc_target_terminate(struct ipc_target *ipt)
+{
+}
+
+#ifdef MIGRATING_THREADS
+struct Act *
+ipc_target_block(struct ipc_target *ipt)
+{
+ struct Act *act;
+
+ ipt_lock(ipt);
+ while ((act = ipt->ipt_acts) == 0) {
+ /* XXX mp unsafe */
+ ipt->ipt_waiting = 1;
+ ipt_unlock(ipt);
+ thread_wait((int)&ipt->ipt_acts, FALSE);
+ ipt_lock(ipt);
+ }
+ ipt->ipt_acts = act->ipt_next;
+ ipt_unlock(ipt);
+
+ return act;
+}
+
+void
+ipc_target_wakeup(struct ipc_target *ipt)
+{
+ ipt_lock(ipt);
+ if (ipt->ipt_waiting) {
+ thread_wakeup((int)&ipt->ipt_acts);
+ ipt->ipt_waiting = 0;
+ }
+ ipt_unlock(ipt);
+}
+#endif /* MIGRATING_THREADS */
+
diff --git a/ipc/ipc_target.h b/ipc/ipc_target.h
new file mode 100644
index 00000000..a66e6875
--- /dev/null
+++ b/ipc/ipc_target.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ */
+/*
+ * File: ipc_target.h
+ *
+ * Common part of IPC ports and port sets
+ * representing a target of messages and migrating RPCs.
+ */
+
+#ifndef _IPC_IPC_RECEIVER_H_
+#define _IPC_IPC_RECEIVER_H_
+
+#include "ipc_mqueue.h"
+#include "ipc_object.h"
+#include <mach/rpc.h>
+
+typedef struct ipc_target {
+
+ struct ipc_object ipt_object;
+
+ mach_port_t ipt_name;
+ struct ipc_mqueue ipt_messages;
+
+#ifdef MIGRATING_THREADS
+ /*** Migrating RPC stuff ***/
+
+ int ipt_type;
+
+ /* User entry info for migrating RPC */
+ rpc_info_t ipt_rpcinfo;
+
+ /* List of available activations, all active but not in use. */
+ struct Act *ipt_acts;
+
+ /* TRUE if someone is waiting for an activation from this pool. */
+ int ipt_waiting;
+#endif /* MIGRATING_THREADS */
+
+} *ipc_target_t;
+
+#define IPT_TYPE_MESSAGE_RPC 1
+#define IPT_TYPE_MIGRATE_RPC 2
+
+void ipc_target_init(struct ipc_target *ipt, mach_port_t name);
+void ipc_target_terminate(struct ipc_target *ipt);
+
+#define ipt_lock(ipt) io_lock(&(ipt)->ipt_object)
+#define ipt_unlock(ipt) io_unlock(&(ipt)->ipt_object)
+#define ipt_reference(ipt) io_reference(&(ipt)->ipt_object)
+#define ipt_release(ipt) io_release(&(ipt)->ipt_object)
+#define ipt_check_unlock(ipt) io_check_unlock(&(ipt)->ipt_object)
+
+#endif /* _IPC_IPC_RECEIVER_H_ */
diff --git a/ipc/ipc_thread.c b/ipc/ipc_thread.c
new file mode 100644
index 00000000..1e738a59
--- /dev/null
+++ b/ipc/ipc_thread.c
@@ -0,0 +1,107 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_thread.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * IPC operations on threads.
+ */
+
+#include <kern/assert.h>
+#include <ipc/ipc_thread.h>
+
+/*
+ * Routine: ipc_thread_enqueue
+ * Purpose:
+ * Enqueue a thread.
+ */
+
+void
+ipc_thread_enqueue(
+ ipc_thread_queue_t queue,
+ ipc_thread_t thread)
+{
+ ipc_thread_enqueue_macro(queue, thread);
+}
+
+/*
+ * Routine: ipc_thread_dequeue
+ * Purpose:
+ * Dequeue and return a thread.
+ */
+
+ipc_thread_t
+ipc_thread_dequeue(
+ ipc_thread_queue_t queue)
+{
+ ipc_thread_t first;
+
+ first = ipc_thread_queue_first(queue);
+
+ if (first != ITH_NULL)
+ ipc_thread_rmqueue_first_macro(queue, first);
+
+ return first;
+}
+
+/*
+ * Routine: ipc_thread_rmqueue
+ * Purpose:
+ * Pull a thread out of a queue.
+ */
+
+void
+ipc_thread_rmqueue(
+ ipc_thread_queue_t queue,
+ ipc_thread_t thread)
+{
+ ipc_thread_t next, prev;
+
+ assert(queue->ithq_base != ITH_NULL);
+
+ next = thread->ith_next;
+ prev = thread->ith_prev;
+
+ if (next == thread) {
+ assert(prev == thread);
+ assert(queue->ithq_base == thread);
+
+ queue->ithq_base = ITH_NULL;
+ } else {
+ if (queue->ithq_base == thread)
+ queue->ithq_base = next;
+
+ next->ith_prev = prev;
+ prev->ith_next = next;
+ ipc_thread_links_init(thread);
+ }
+}
diff --git a/ipc/ipc_thread.h b/ipc/ipc_thread.h
new file mode 100644
index 00000000..e8bfe4a8
--- /dev/null
+++ b/ipc/ipc_thread.h
@@ -0,0 +1,123 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_thread.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for the IPC component of threads.
+ */
+
+#ifndef _IPC_IPC_THREAD_H_
+#define _IPC_IPC_THREAD_H_
+
+#include <kern/thread.h>
+
+typedef thread_t ipc_thread_t;
+
+#define ITH_NULL THREAD_NULL
+
+#define ith_lock_init(thread) simple_lock_init(&(thread)->ith_lock_data)
+#define ith_lock(thread) simple_lock(&(thread)->ith_lock_data)
+#define ith_unlock(thread) simple_unlock(&(thread)->ith_lock_data)
+
+typedef struct ipc_thread_queue {
+ ipc_thread_t ithq_base;
+} *ipc_thread_queue_t;
+
+#define ITHQ_NULL ((ipc_thread_queue_t) 0)
+
+
+#define ipc_thread_links_init(thread) \
+MACRO_BEGIN \
+ (thread)->ith_next = (thread); \
+ (thread)->ith_prev = (thread); \
+MACRO_END
+
+#define ipc_thread_queue_init(queue) \
+MACRO_BEGIN \
+ (queue)->ithq_base = ITH_NULL; \
+MACRO_END
+
+#define ipc_thread_queue_empty(queue) ((queue)->ithq_base == ITH_NULL)
+
+#define ipc_thread_queue_first(queue) ((queue)->ithq_base)
+
+#define ipc_thread_rmqueue_first_macro(queue, thread) \
+MACRO_BEGIN \
+ register ipc_thread_t _next; \
+ \
+ assert((queue)->ithq_base == (thread)); \
+ \
+ _next = (thread)->ith_next; \
+ if (_next == (thread)) { \
+ assert((thread)->ith_prev == (thread)); \
+ (queue)->ithq_base = ITH_NULL; \
+ } else { \
+ register ipc_thread_t _prev = (thread)->ith_prev; \
+ \
+ (queue)->ithq_base = _next; \
+ _next->ith_prev = _prev; \
+ _prev->ith_next = _next; \
+ ipc_thread_links_init(thread); \
+ } \
+MACRO_END
+
+#define ipc_thread_enqueue_macro(queue, thread) \
+MACRO_BEGIN \
+ register ipc_thread_t _first = (queue)->ithq_base; \
+ \
+ if (_first == ITH_NULL) { \
+ (queue)->ithq_base = (thread); \
+ assert((thread)->ith_next == (thread)); \
+ assert((thread)->ith_prev == (thread)); \
+ } else { \
+ register ipc_thread_t _last = _first->ith_prev; \
+ \
+ (thread)->ith_next = _first; \
+ (thread)->ith_prev = _last; \
+ _first->ith_prev = (thread); \
+ _last->ith_next = (thread); \
+ } \
+MACRO_END
+
+/* Enqueue a thread on a message queue */
+extern void ipc_thread_enqueue(
+ ipc_thread_queue_t queue,
+ ipc_thread_t thread);
+
+/* Dequeue a thread from a message queue */
+extern ipc_thread_t ipc_thread_dequeue(
+ ipc_thread_queue_t queue);
+
+/* Remove a thread from a message queue */
+extern void ipc_thread_rmqueue(
+ ipc_thread_queue_t queue,
+ ipc_thread_t thread);
+
+#endif /* _IPC_IPC_THREAD_H_ */
diff --git a/ipc/ipc_types.h b/ipc/ipc_types.h
new file mode 100644
index 00000000..c8f0d0b0
--- /dev/null
+++ b/ipc/ipc_types.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 1995, 1994, 1993, 1992, 1991, 1990
+ * Open Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of ("OSF") or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL OSF BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * ACTION OF CONTRACT, NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE
+ */
+/*
+ * OSF Research Institute MK6.1 (unencumbered) 1/31/1995
+ */
+
+#ifndef _IPC_TYPES_H_
+#define _IPC_TYPES_H_
+
+typedef struct ipc_space *ipc_space_t;
+typedef struct ipc_port *ipc_port_t;
+
+#endif /* _IPC_TYPES_H_ */
diff --git a/ipc/mach_debug.c b/ipc/mach_debug.c
new file mode 100644
index 00000000..cd8fad04
--- /dev/null
+++ b/ipc/mach_debug.c
@@ -0,0 +1,618 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/mach_debug.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Exported kernel calls. See mach_debug/mach_debug.defs.
+ */
+
+#include <mach_ipc_compat.h>
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/machine/vm_types.h>
+#include <mach/vm_param.h>
+#include <mach_debug/ipc_info.h>
+#include <mach_debug/hash_info.h>
+#include <kern/host.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_hash.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_right.h>
+
+
+
+/*
+ * Routine: mach_port_get_srights [kernel call]
+ * Purpose:
+ * Retrieve the number of extant send rights
+ * that a receive right has.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved number of send rights.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_get_srights(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_rights_t *srightsp)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+ mach_port_rights_t srights;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ srights = port->ip_srights;
+ ip_unlock(port);
+
+ *srightsp = srights;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: host_ipc_hash_info
+ * Purpose:
+ * Return information about the global reverse hash table.
+ * Conditions:
+ * Nothing locked. Obeys CountInOut protocol.
+ * Returns:
+ * KERN_SUCCESS Returned information.
+ * KERN_INVALID_HOST The host is null.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+host_ipc_hash_info(
+ host_t host,
+ hash_info_bucket_array_t *infop,
+ mach_msg_type_number_t *countp)
+{
+ vm_offset_t addr;
+ vm_size_t size;
+ hash_info_bucket_t *info;
+ unsigned int potential, actual;
+ kern_return_t kr;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ /* start with in-line data */
+
+ info = *infop;
+ potential = *countp;
+
+ for (;;) {
+ actual = ipc_hash_info(info, potential);
+ if (actual <= potential)
+ break;
+
+ /* allocate more memory */
+
+ if (info != *infop)
+ kmem_free(ipc_kernel_map, addr, size);
+
+ size = round_page(actual * sizeof *info);
+ kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
+ if (kr != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+
+ info = (hash_info_bucket_t *) addr;
+ potential = size/sizeof *info;
+ }
+
+ if (info == *infop) {
+ /* data fit in-line; nothing to deallocate */
+
+ *countp = actual;
+ } else if (actual == 0) {
+ kmem_free(ipc_kernel_map, addr, size);
+
+ *countp = 0;
+ } else {
+ vm_map_copy_t copy;
+ vm_size_t used;
+
+ used = round_page(actual * sizeof *info);
+
+ if (used != size)
+ kmem_free(ipc_kernel_map, addr + used, size - used);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr, used,
+ TRUE, &copy);
+ assert(kr == KERN_SUCCESS);
+
+ *infop = (hash_info_bucket_t *) copy;
+ *countp = actual;
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: host_ipc_marequest_info
+ * Purpose:
+ * Return information about the marequest hash table.
+ * Conditions:
+ * Nothing locked. Obeys CountInOut protocol.
+ * Returns:
+ * KERN_SUCCESS Returned information.
+ * KERN_INVALID_HOST The host is null.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+host_ipc_marequest_info(host, maxp, infop, countp)
+ host_t host;
+ unsigned int *maxp;
+ hash_info_bucket_array_t *infop;
+ unsigned int *countp;
+{
+ vm_offset_t addr;
+ vm_size_t size = 0; /* '=0' to shut up lint */
+ hash_info_bucket_t *info;
+ unsigned int potential, actual;
+ kern_return_t kr;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ /* start with in-line data */
+
+ info = *infop;
+ potential = *countp;
+
+ for (;;) {
+ actual = ipc_marequest_info(maxp, info, potential);
+ if (actual <= potential)
+ break;
+
+ /* allocate more memory */
+
+ if (info != *infop)
+ kmem_free(ipc_kernel_map, addr, size);
+
+ size = round_page(actual * sizeof *info);
+ kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
+ if (kr != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+
+ info = (hash_info_bucket_t *) addr;
+ potential = size/sizeof *info;
+ }
+
+ if (info == *infop) {
+ /* data fit in-line; nothing to deallocate */
+
+ *countp = actual;
+ } else if (actual == 0) {
+ kmem_free(ipc_kernel_map, addr, size);
+
+ *countp = 0;
+ } else {
+ vm_map_copy_t copy;
+ vm_size_t used;
+
+ used = round_page(actual * sizeof *info);
+
+ if (used != size)
+ kmem_free(ipc_kernel_map, addr + used, size - used);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr, used,
+ TRUE, &copy);
+ assert(kr == KERN_SUCCESS);
+
+ *infop = (hash_info_bucket_t *) copy;
+ *countp = actual;
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_space_info
+ * Purpose:
+ * Returns information about an IPC space.
+ * Conditions:
+ * Nothing locked. Obeys CountInOut protocol.
+ * Returns:
+ * KERN_SUCCESS Returned information.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_port_space_info(
+ ipc_space_t space,
+ ipc_info_space_t *infop,
+ ipc_info_name_array_t *tablep,
+ mach_msg_type_number_t *tableCntp,
+ ipc_info_tree_name_array_t *treep,
+ mach_msg_type_number_t *treeCntp)
+{
+ ipc_info_name_t *table_info;
+ unsigned int table_potential, table_actual;
+ vm_offset_t table_addr;
+ vm_size_t table_size;
+ ipc_info_tree_name_t *tree_info;
+ unsigned int tree_potential, tree_actual;
+ vm_offset_t tree_addr;
+ vm_size_t tree_size;
+ ipc_tree_entry_t tentry;
+ ipc_entry_t table;
+ ipc_entry_num_t tsize;
+ mach_port_index_t index;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ /* start with in-line memory */
+
+ table_info = *tablep;
+ table_potential = *tableCntp;
+ tree_info = *treep;
+ tree_potential = *treeCntp;
+
+ for (;;) {
+ is_read_lock(space);
+ if (!space->is_active) {
+ is_read_unlock(space);
+ if (table_info != *tablep)
+ kmem_free(ipc_kernel_map,
+ table_addr, table_size);
+ if (tree_info != *treep)
+ kmem_free(ipc_kernel_map,
+ tree_addr, tree_size);
+ return KERN_INVALID_TASK;
+ }
+
+ table_actual = space->is_table_size;
+ tree_actual = space->is_tree_total;
+
+ if ((table_actual <= table_potential) &&
+ (tree_actual <= tree_potential))
+ break;
+
+ is_read_unlock(space);
+
+ if (table_actual > table_potential) {
+ if (table_info != *tablep)
+ kmem_free(ipc_kernel_map,
+ table_addr, table_size);
+
+ table_size = round_page(table_actual *
+ sizeof *table_info);
+ kr = kmem_alloc(ipc_kernel_map,
+ &table_addr, table_size);
+ if (kr != KERN_SUCCESS) {
+ if (tree_info != *treep)
+ kmem_free(ipc_kernel_map,
+ tree_addr, tree_size);
+
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ table_info = (ipc_info_name_t *) table_addr;
+ table_potential = table_size/sizeof *table_info;
+ }
+
+ if (tree_actual > tree_potential) {
+ if (tree_info != *treep)
+ kmem_free(ipc_kernel_map,
+ tree_addr, tree_size);
+
+ tree_size = round_page(tree_actual *
+ sizeof *tree_info);
+ kr = kmem_alloc(ipc_kernel_map,
+ &tree_addr, tree_size);
+ if (kr != KERN_SUCCESS) {
+ if (table_info != *tablep)
+ kmem_free(ipc_kernel_map,
+ table_addr, table_size);
+
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ tree_info = (ipc_info_tree_name_t *) tree_addr;
+ tree_potential = tree_size/sizeof *tree_info;
+ }
+ }
+ /* space is read-locked and active; we have enough wired memory */
+
+ infop->iis_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD);
+ infop->iis_table_size = space->is_table_size;
+ infop->iis_table_next = space->is_table_next->its_size;
+ infop->iis_tree_size = space->is_tree_total;
+ infop->iis_tree_small = space->is_tree_small;
+ infop->iis_tree_hash = space->is_tree_hash;
+
+ table = space->is_table;
+ tsize = space->is_table_size;
+
+ for (index = 0; index < tsize; index++) {
+ ipc_info_name_t *iin = &table_info[index];
+ ipc_entry_t entry = &table[index];
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ iin->iin_name = MACH_PORT_MAKEB(index, bits);
+ iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
+#if MACH_IPC_COMPAT
+ iin->iin_compat = (bits & IE_BITS_COMPAT) ? TRUE : FALSE;
+#else MACH_IPC_COMPAT
+ iin->iin_compat = FALSE;
+#endif MACH_IPC_COMPAT
+ iin->iin_marequest = (bits & IE_BITS_MAREQUEST) ? TRUE : FALSE;
+ iin->iin_type = IE_BITS_TYPE(bits);
+ iin->iin_urefs = IE_BITS_UREFS(bits);
+ iin->iin_object = (vm_offset_t) entry->ie_object;
+ iin->iin_next = entry->ie_next;
+ iin->iin_hash = entry->ie_index;
+ }
+
+ for (tentry = ipc_splay_traverse_start(&space->is_tree), index = 0;
+ tentry != ITE_NULL;
+ tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
+ ipc_info_tree_name_t *iitn = &tree_info[index++];
+ ipc_info_name_t *iin = &iitn->iitn_name;
+ ipc_entry_t entry = &tentry->ite_entry;
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);
+
+ iin->iin_name = tentry->ite_name;
+ iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
+#if MACH_IPC_COMPAT
+ iin->iin_compat = (bits & IE_BITS_COMPAT) ? TRUE : FALSE;
+#else MACH_IPC_COMPAT
+ iin->iin_compat = FALSE;
+#endif MACH_IPC_COMPAT
+ iin->iin_marequest = (bits & IE_BITS_MAREQUEST) ? TRUE : FALSE;
+ iin->iin_type = IE_BITS_TYPE(bits);
+ iin->iin_urefs = IE_BITS_UREFS(bits);
+ iin->iin_object = (vm_offset_t) entry->ie_object;
+ iin->iin_next = entry->ie_next;
+ iin->iin_hash = entry->ie_index;
+
+ if (tentry->ite_lchild == ITE_NULL)
+ iitn->iitn_lchild = MACH_PORT_NULL;
+ else
+ iitn->iitn_lchild = tentry->ite_lchild->ite_name;
+
+ if (tentry->ite_rchild == ITE_NULL)
+ iitn->iitn_rchild = MACH_PORT_NULL;
+ else
+ iitn->iitn_rchild = tentry->ite_rchild->ite_name;
+
+ }
+ ipc_splay_traverse_finish(&space->is_tree);
+ is_read_unlock(space);
+
+ if (table_info == *tablep) {
+ /* data fit in-line; nothing to deallocate */
+
+ *tableCntp = table_actual;
+ } else if (table_actual == 0) {
+ kmem_free(ipc_kernel_map, table_addr, table_size);
+
+ *tableCntp = 0;
+ } else {
+ vm_size_t size_used, rsize_used;
+ vm_map_copy_t copy;
+
+ /* kmem_alloc doesn't zero memory */
+
+ size_used = table_actual * sizeof *table_info;
+ rsize_used = round_page(size_used);
+
+ if (rsize_used != table_size)
+ kmem_free(ipc_kernel_map,
+ table_addr + rsize_used,
+ table_size - rsize_used);
+
+ if (size_used != rsize_used)
+ bzero((char *) (table_addr + size_used),
+ rsize_used - size_used);
+
+ kr = vm_map_copyin(ipc_kernel_map, table_addr, rsize_used,
+ TRUE, &copy);
+
+ assert(kr == KERN_SUCCESS);
+
+ *tablep = (ipc_info_name_t *) copy;
+ *tableCntp = table_actual;
+ }
+
+ if (tree_info == *treep) {
+ /* data fit in-line; nothing to deallocate */
+
+ *treeCntp = tree_actual;
+ } else if (tree_actual == 0) {
+ kmem_free(ipc_kernel_map, tree_addr, tree_size);
+
+ *treeCntp = 0;
+ } else {
+ vm_size_t size_used, rsize_used;
+ vm_map_copy_t copy;
+
+ /* kmem_alloc doesn't zero memory */
+
+ size_used = tree_actual * sizeof *tree_info;
+ rsize_used = round_page(size_used);
+
+ if (rsize_used != tree_size)
+ kmem_free(ipc_kernel_map,
+ tree_addr + rsize_used,
+ tree_size - rsize_used);
+
+ if (size_used != rsize_used)
+ bzero((char *) (tree_addr + size_used),
+ rsize_used - size_used);
+
+ kr = vm_map_copyin(ipc_kernel_map, tree_addr, rsize_used,
+ TRUE, &copy);
+
+ assert(kr == KERN_SUCCESS);
+
+ *treep = (ipc_info_tree_name_t *) copy;
+ *treeCntp = tree_actual;
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_dnrequest_info
+ * Purpose:
+ * Returns information about the dead-name requests
+ * registered with the named receive right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved information.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_dnrequest_info(
+ ipc_space_t space,
+ mach_port_t name,
+ unsigned int *totalp,
+ unsigned int *usedp)
+{
+ unsigned int total, used;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ if (port->ip_dnrequests == IPR_NULL) {
+ total = 0;
+ used = 0;
+ } else {
+ ipc_port_request_t dnrequests = port->ip_dnrequests;
+ ipc_port_request_index_t index;
+
+ total = dnrequests->ipr_size->its_size;
+
+ for (index = 1, used = 0;
+ index < total; index++) {
+ ipc_port_request_t ipr = &dnrequests[index];
+
+ if (ipr->ipr_name != MACH_PORT_NULL)
+ used++;
+ }
+ }
+ ip_unlock(port);
+
+ *totalp = total;
+ *usedp = used;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_kernel_object [kernel call]
+ * Purpose:
+ * Retrieve the type and address of the kernel object
+ * represented by a send or receive right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved kernel object info.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote
+ * send or receive rights.
+ */
+
+kern_return_t
+mach_port_kernel_object(
+ ipc_space_t space,
+ mach_port_t name,
+ unsigned int *typep,
+ vm_offset_t *addrp)
+{
+ ipc_entry_t entry;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ kr = ipc_right_lookup_read(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is read-locked and active */
+
+ if ((entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE) == 0) {
+ is_read_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ is_read_unlock(space);
+
+ if (!ip_active(port)) {
+ ip_unlock(port);
+ return KERN_INVALID_RIGHT;
+ }
+
+ *typep = (unsigned int) ip_kotype(port);
+ *addrp = (vm_offset_t) port->ip_kobject;
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
diff --git a/ipc/mach_msg.c b/ipc/mach_msg.c
new file mode 100644
index 00000000..ffcccf4a
--- /dev/null
+++ b/ipc/mach_msg.c
@@ -0,0 +1,2279 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/mach_msg.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Exported message traps. See mach/message.h.
+ */
+
+#include <mach_ipc_compat.h>
+#include <norma_ipc.h>
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/message.h>
+#include <kern/assert.h>
+#include <kern/counters.h>
+#include <kern/lock.h>
+#include <kern/sched_prim.h>
+#include <kern/ipc_sched.h>
+#include <vm/vm_map.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_thread.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/mach_msg.h>
+
+
+
+extern void exception_raise_continue();
+extern void exception_raise_continue_fast();
+#ifndef CONTINUATIONS
+#define mach_msg_receive_continue 0
+#define msg_receive_continue 0
+#endif
+
+/*
+ * Routine: mach_msg_send
+ * Purpose:
+ * Send a message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Sent the message.
+ * MACH_SEND_MSG_TOO_SMALL Message smaller than a header.
+ * MACH_SEND_NO_BUFFER Couldn't allocate buffer.
+ * MACH_SEND_INVALID_DATA Couldn't copy message data.
+ * MACH_SEND_INVALID_HEADER
+ * Illegal value in the message header bits.
+ * MACH_SEND_INVALID_DEST The space is dead.
+ * MACH_SEND_INVALID_NOTIFY Bad notify port.
+ * MACH_SEND_INVALID_DEST Can't copyin destination port.
+ * MACH_SEND_INVALID_REPLY Can't copyin reply port.
+ * MACH_SEND_TIMED_OUT Timeout expired without delivery.
+ * MACH_SEND_INTERRUPTED Delivery interrupted.
+ * MACH_SEND_NO_NOTIFY Can't allocate a msg-accepted request.
+ * MACH_SEND_WILL_NOTIFY Msg-accepted notif. requested.
+ * MACH_SEND_NOTIFY_IN_PROGRESS
+ * This space has already forced a message to this port.
+ */
+
+mach_msg_return_t
+mach_msg_send(msg, option, send_size, time_out, notify)
+ mach_msg_header_t *msg;
+ mach_msg_option_t option;
+ mach_msg_size_t send_size;
+ mach_msg_timeout_t time_out;
+ mach_port_t notify;
+{
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_kmsg_t kmsg;
+ mach_msg_return_t mr;
+
+ mr = ipc_kmsg_get(msg, send_size, &kmsg);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ if (option & MACH_SEND_CANCEL) {
+ if (notify == MACH_PORT_NULL)
+ mr = MACH_SEND_INVALID_NOTIFY;
+ else
+ mr = ipc_kmsg_copyin(kmsg, space, map, notify);
+ } else
+ mr = ipc_kmsg_copyin(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ ikm_free(kmsg);
+ return mr;
+ }
+
+ if (option & MACH_SEND_NOTIFY) {
+ mr = ipc_mqueue_send(kmsg, MACH_SEND_TIMEOUT,
+ ((option & MACH_SEND_TIMEOUT) ?
+ time_out : MACH_MSG_TIMEOUT_NONE));
+ if (mr == MACH_SEND_TIMED_OUT) {
+ ipc_port_t dest = (ipc_port_t)
+ kmsg->ikm_header.msgh_remote_port;
+
+ if (notify == MACH_PORT_NULL)
+ mr = MACH_SEND_INVALID_NOTIFY;
+ else
+ mr = ipc_marequest_create(space, dest,
+ notify, &kmsg->ikm_marequest);
+ if (mr == MACH_MSG_SUCCESS) {
+ ipc_mqueue_send_always(kmsg);
+ return MACH_SEND_WILL_NOTIFY;
+ }
+ }
+ } else
+ mr = ipc_mqueue_send(kmsg, option & MACH_SEND_TIMEOUT,
+ time_out);
+
+ if (mr != MACH_MSG_SUCCESS) {
+ mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map);
+
+ assert(kmsg->ikm_marequest == IMAR_NULL);
+ (void) ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
+ }
+
+ return mr;
+}
+
+/*
+ * Routine: mach_msg_receive
+ * Purpose:
+ * Receive a message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Received a message.
+ * MACH_RCV_INVALID_NAME The name doesn't denote a right,
+ * or the denoted right is not receive or port set.
+ * MACH_RCV_IN_SET Receive right is a member of a set.
+ * MACH_RCV_TOO_LARGE Message wouldn't fit into buffer.
+ * MACH_RCV_TIMED_OUT Timeout expired without a message.
+ * MACH_RCV_INTERRUPTED Reception interrupted.
+ * MACH_RCV_PORT_DIED Port/set died while receiving.
+ * MACH_RCV_PORT_CHANGED Port moved into set while receiving.
+ * MACH_RCV_INVALID_DATA Couldn't copy to user buffer.
+ * MACH_RCV_INVALID_NOTIFY Bad notify port.
+ * MACH_RCV_HEADER_ERROR
+ */
+
+mach_msg_return_t
+mach_msg_receive(msg, option, rcv_size, rcv_name, time_out, notify)
+ mach_msg_header_t *msg;
+ mach_msg_option_t option;
+ mach_msg_size_t rcv_size;
+ mach_port_t rcv_name;
+ mach_msg_timeout_t time_out;
+ mach_port_t notify;
+{
+ ipc_thread_t self = current_thread();
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_object_t object;
+ ipc_mqueue_t mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+ /* hold ref for object; mqueue is locked */
+
+#ifdef CONTINUATIONS
+ /*
+ * ipc_mqueue_receive may not return, because if we block
+ * then our kernel stack may be discarded. So we save
+ * state here for mach_msg_receive_continue to pick up.
+ */
+
+ self->ith_msg = msg;
+ self->ith_option = option;
+ self->ith_rcv_size = rcv_size;
+ self->ith_timeout = time_out;
+ self->ith_notify = notify;
+ self->ith_object = object;
+ self->ith_mqueue = mqueue;
+#endif
+
+ if (option & MACH_RCV_LARGE) {
+ mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
+ rcv_size, time_out,
+ FALSE, mach_msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS) {
+ if (mr == MACH_RCV_TOO_LARGE) {
+ mach_msg_size_t real_size =
+ (mach_msg_size_t) (natural_t) kmsg;
+
+ assert(real_size > rcv_size);
+
+ (void) copyout((vm_offset_t) &real_size,
+ (vm_offset_t) &msg->msgh_size,
+ sizeof(mach_msg_size_t));
+ }
+
+ return mr;
+ }
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ assert(kmsg->ikm_header.msgh_size <= rcv_size);
+ } else {
+ mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
+ MACH_MSG_SIZE_MAX, time_out,
+ FALSE, mach_msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ if (kmsg->ikm_header.msgh_size > rcv_size) {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ return MACH_RCV_TOO_LARGE;
+ }
+ }
+
+ if (option & MACH_RCV_NOTIFY) {
+ if (notify == MACH_PORT_NULL)
+ mr = MACH_RCV_INVALID_NOTIFY;
+ else
+ mr = ipc_kmsg_copyout(kmsg, space, map, notify);
+ } else
+ mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ } else {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ }
+
+ return mr;
+ }
+
+ return ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
+}
+
+#ifdef CONTINUATIONS
+/*
+ * Routine: mach_msg_receive_continue
+ * Purpose:
+ * Continue after blocking for a message.
+ * Conditions:
+ * Nothing locked. We are running on a new kernel stack,
+ * with the receive state saved in the thread. From here
+ * control goes back to user space.
+ */
+
+void
+mach_msg_receive_continue()
+{
+ ipc_thread_t self = current_thread();
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ mach_msg_header_t *msg = self->ith_msg;
+ mach_msg_option_t option = self->ith_option;
+ mach_msg_size_t rcv_size = self->ith_rcv_size;
+ mach_msg_timeout_t time_out = self->ith_timeout;
+ mach_port_t notify = self->ith_notify;
+ ipc_object_t object = self->ith_object;
+ ipc_mqueue_t mqueue = self->ith_mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ if (option & MACH_RCV_LARGE) {
+ mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
+ rcv_size, time_out,
+ TRUE, mach_msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS) {
+ if (mr == MACH_RCV_TOO_LARGE) {
+ mach_msg_size_t real_size =
+ (mach_msg_size_t) (natural_t) kmsg;
+
+ assert(real_size > rcv_size);
+
+ (void) copyout((vm_offset_t) &real_size,
+ (vm_offset_t) &msg->msgh_size,
+ sizeof(mach_msg_size_t));
+ }
+
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ assert(kmsg->ikm_header.msgh_size <= rcv_size);
+ } else {
+ mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
+ MACH_MSG_SIZE_MAX, time_out,
+ TRUE, mach_msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS) {
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ if (kmsg->ikm_header.msgh_size > rcv_size) {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ thread_syscall_return(MACH_RCV_TOO_LARGE);
+ /*NOTREACHED*/
+ }
+ }
+
+ if (option & MACH_RCV_NOTIFY) {
+ if (notify == MACH_PORT_NULL)
+ mr = MACH_RCV_INVALID_NOTIFY;
+ else
+ mr = ipc_kmsg_copyout(kmsg, space, map, notify);
+ } else
+ mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ } else {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ }
+
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ mr = ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+}
+#endif /* CONTINUATIONS */
+
+/*
+ * Routine: mach_msg_trap [mach trap]
+ * Purpose:
+ * Possibly send a message; possibly receive a message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * All of mach_msg_send and mach_msg_receive error codes.
+ */
+
+mach_msg_return_t
+mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
+ mach_msg_header_t *msg;
+ mach_msg_option_t option;
+ mach_msg_size_t send_size;
+ mach_msg_size_t rcv_size;
+ mach_port_t rcv_name;
+ mach_msg_timeout_t time_out;
+ mach_port_t notify;
+{
+ mach_msg_return_t mr;
+
+#ifdef CONTINUATIONS
+ /* first check for common cases */
+
+ if (option == (MACH_SEND_MSG|MACH_RCV_MSG)) {
+ register ipc_thread_t self = current_thread();
+ ipc_space_t space = self->task->itk_space;
+ register ipc_kmsg_t kmsg;
+ register ipc_port_t dest_port;
+ ipc_object_t rcv_object;
+ register ipc_mqueue_t rcv_mqueue;
+ mach_msg_size_t reply_size;
+
+ /*
+ * This case is divided into ten sections, each
+ * with a label. There are five optimized
+ * sections and six unoptimized sections, which
+ * do the same thing but handle all possible
+ * cases and are slower.
+ *
+ * The five sections for an RPC are
+ * 1) Get request message into a buffer.
+ * (fast_get or slow_get)
+ * 2) Copyin request message and rcv_name.
+ * (fast_copyin or slow_copyin)
+ * 3) Enqueue request and dequeue reply.
+ * (fast_send_receive or
+ * slow_send and slow_receive)
+ * 4) Copyout reply message.
+ * (fast_copyout or slow_copyout)
+ * 5) Put reply message to user's buffer.
+ * (fast_put or slow_put)
+ *
+ * Keep the locking hierarchy firmly in mind.
+ * (First spaces, then ports, then port sets,
+ * then message queues.) Only a non-blocking
+ * attempt can be made to acquire locks out of
+ * order, or acquire two locks on the same level.
+ * Acquiring two locks on the same level will
+ * fail if the objects are really the same,
+ * unless simple locking is disabled. This is OK,
+ * because then the extra unlock does nothing.
+ *
+ * There are two major reasons these RPCs can't use
+ * ipc_thread_switch, and use slow_send/slow_receive:
+ * 1) Kernel RPCs.
+ * 2) Servers fall behind clients, so
+ * client doesn't find a blocked server thread and
+ * server finds waiting messages and can't block.
+ */
+
+ /*
+ fast_get:
+ */
+ /*
+ * optimized ipc_kmsg_get
+ *
+ * No locks, references, or messages held.
+ * We must clear ikm_cache before copyinmsg.
+ */
+
+ if ((send_size > IKM_SAVED_MSG_SIZE) ||
+ (send_size < sizeof(mach_msg_header_t)) ||
+ (send_size & 3) ||
+ ((kmsg = ikm_cache()) == IKM_NULL))
+ goto slow_get;
+
+ ikm_cache() = IKM_NULL;
+ ikm_check_initialized(kmsg, IKM_SAVED_KMSG_SIZE);
+
+ if (copyinmsg((vm_offset_t) msg, (vm_offset_t) &kmsg->ikm_header,
+ send_size)) {
+ ikm_free(kmsg);
+ goto slow_get;
+ }
+
+ kmsg->ikm_header.msgh_size = send_size;
+
+ fast_copyin:
+ /*
+ * optimized ipc_kmsg_copyin/ipc_mqueue_copyin
+ *
+ * We have the request message data in kmsg.
+ * Must still do copyin, send, receive, etc.
+ *
+ * If the message isn't simple, we can't combine
+ * ipc_kmsg_copyin_header and ipc_mqueue_copyin,
+ * because copyin of the message body might
+ * affect rcv_name.
+ */
+
+ switch (kmsg->ikm_header.msgh_bits) {
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,
+ MACH_MSG_TYPE_MAKE_SEND_ONCE): {
+ register ipc_entry_t table;
+ register ipc_entry_num_t size;
+ register ipc_port_t reply_port;
+
+ /* sending a request message */
+
+ {
+ register mach_port_index_t index;
+ register mach_port_gen_t gen;
+
+ {
+ register mach_port_t reply_name =
+ kmsg->ikm_header.msgh_local_port;
+
+ if (reply_name != rcv_name)
+ goto slow_copyin;
+
+ /* optimized ipc_entry_lookup of reply_name */
+
+ index = MACH_PORT_INDEX(reply_name);
+ gen = MACH_PORT_GEN(reply_name);
+ }
+
+ is_read_lock(space);
+ assert(space->is_active);
+
+ size = space->is_table_size;
+ table = space->is_table;
+
+ if (index >= size)
+ goto abort_request_copyin;
+
+ {
+ register ipc_entry_t entry;
+ register ipc_entry_bits_t bits;
+
+ entry = &table[index];
+ bits = entry->ie_bits;
+
+ /* check generation number and type bit */
+
+ if ((bits & (IE_BITS_GEN_MASK|
+ MACH_PORT_TYPE_RECEIVE)) !=
+ (gen | MACH_PORT_TYPE_RECEIVE))
+ goto abort_request_copyin;
+
+ reply_port = (ipc_port_t) entry->ie_object;
+ assert(reply_port != IP_NULL);
+ }
+ }
+
+ /* optimized ipc_entry_lookup of dest_name */
+
+ {
+ register mach_port_index_t index;
+ register mach_port_gen_t gen;
+
+ {
+ register mach_port_t dest_name =
+ kmsg->ikm_header.msgh_remote_port;
+
+ index = MACH_PORT_INDEX(dest_name);
+ gen = MACH_PORT_GEN(dest_name);
+ }
+
+ if (index >= size)
+ goto abort_request_copyin;
+
+ {
+ register ipc_entry_t entry;
+ register ipc_entry_bits_t bits;
+
+ entry = &table[index];
+ bits = entry->ie_bits;
+
+ /* check generation number and type bit */
+
+ if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) !=
+ (gen | MACH_PORT_TYPE_SEND))
+ goto abort_request_copyin;
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ dest_port = (ipc_port_t) entry->ie_object;
+ assert(dest_port != IP_NULL);
+ }
+ }
+
+ /*
+ * To do an atomic copyin, need simultaneous
+ * locks on both ports and the space. If
+ * dest_port == reply_port, and simple locking is
+ * enabled, then we will abort. Otherwise it's
+ * OK to unlock twice.
+ */
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port) ||
+ !ip_lock_try(reply_port)) {
+ ip_unlock(dest_port);
+ goto abort_request_copyin;
+ }
+ is_read_unlock(space);
+
+ assert(dest_port->ip_srights > 0);
+ dest_port->ip_srights++;
+ ip_reference(dest_port);
+
+ assert(ip_active(reply_port));
+ assert(reply_port->ip_receiver_name ==
+ kmsg->ikm_header.msgh_local_port);
+ assert(reply_port->ip_receiver == space);
+
+ reply_port->ip_sorights++;
+ ip_reference(reply_port);
+
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE);
+ kmsg->ikm_header.msgh_remote_port =
+ (mach_port_t) dest_port;
+ kmsg->ikm_header.msgh_local_port =
+ (mach_port_t) reply_port;
+
+ /* make sure we can queue to the destination */
+
+ if (dest_port->ip_receiver == ipc_space_kernel) {
+ /*
+ * The kernel server has a reference to
+ * the reply port, which it hands back
+ * to us in the reply message. We do
+ * not need to keep another reference to
+ * it.
+ */
+ ip_unlock(reply_port);
+
+ assert(ip_active(dest_port));
+ ip_unlock(dest_port);
+ goto kernel_send;
+ }
+
+#if NORMA_IPC
+ if (IP_NORMA_IS_PROXY(dest_port)) {
+ ip_unlock(dest_port);
+ ip_unlock(reply_port);
+ goto norma_send;
+ }
+#endif NORMA_IPC
+
+ if (dest_port->ip_msgcount >= dest_port->ip_qlimit)
+ goto abort_request_send_receive;
+
+ /* optimized ipc_mqueue_copyin */
+
+ if (reply_port->ip_pset != IPS_NULL)
+ goto abort_request_send_receive;
+
+ rcv_object = (ipc_object_t) reply_port;
+ io_reference(rcv_object);
+ rcv_mqueue = &reply_port->ip_messages;
+ imq_lock(rcv_mqueue);
+ io_unlock(rcv_object);
+ goto fast_send_receive;
+
+ abort_request_copyin:
+ is_read_unlock(space);
+ goto slow_copyin;
+
+ abort_request_send_receive:
+ ip_unlock(dest_port);
+ ip_unlock(reply_port);
+ goto slow_send;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): {
+ register ipc_entry_num_t size;
+ register ipc_entry_t table;
+
+ /* sending a reply message */
+
+ {
+ register mach_port_t reply_name =
+ kmsg->ikm_header.msgh_local_port;
+
+ if (reply_name != MACH_PORT_NULL)
+ goto slow_copyin;
+ }
+
+ is_write_lock(space);
+ assert(space->is_active);
+
+ /* optimized ipc_entry_lookup */
+
+ size = space->is_table_size;
+ table = space->is_table;
+
+ {
+ register ipc_entry_t entry;
+ register mach_port_gen_t gen;
+ register mach_port_index_t index;
+
+ {
+ register mach_port_t dest_name =
+ kmsg->ikm_header.msgh_remote_port;
+
+ index = MACH_PORT_INDEX(dest_name);
+ gen = MACH_PORT_GEN(dest_name);
+ }
+
+ if (index >= size)
+ goto abort_reply_dest_copyin;
+
+ entry = &table[index];
+
+ /* check generation, collision bit, and type bit */
+
+ if ((entry->ie_bits & (IE_BITS_GEN_MASK|
+ IE_BITS_COLLISION|
+ MACH_PORT_TYPE_SEND_ONCE)) !=
+ (gen | MACH_PORT_TYPE_SEND_ONCE))
+ goto abort_reply_dest_copyin;
+
+ /* optimized ipc_right_copyin */
+
+ assert(IE_BITS_TYPE(entry->ie_bits) ==
+ MACH_PORT_TYPE_SEND_ONCE);
+ assert(IE_BITS_UREFS(entry->ie_bits) == 1);
+ assert((entry->ie_bits & IE_BITS_MAREQUEST) == 0);
+
+ if (entry->ie_request != 0)
+ goto abort_reply_dest_copyin;
+
+ dest_port = (ipc_port_t) entry->ie_object;
+ assert(dest_port != IP_NULL);
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port)) {
+ ip_unlock(dest_port);
+ goto abort_reply_dest_copyin;
+ }
+
+ assert(dest_port->ip_sorights > 0);
+
+ /* optimized ipc_entry_dealloc */
+
+ entry->ie_next = table->ie_next;
+ table->ie_next = index;
+ entry->ie_bits = gen;
+ entry->ie_object = IO_NULL;
+ }
+
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
+ 0);
+ kmsg->ikm_header.msgh_remote_port =
+ (mach_port_t) dest_port;
+
+ /* make sure we can queue to the destination */
+
+ assert(dest_port->ip_receiver != ipc_space_kernel);
+#if NORMA_IPC
+ if (IP_NORMA_IS_PROXY(dest_port)) {
+ is_write_unlock(space);
+ ip_unlock(dest_port);
+ goto norma_send;
+ }
+#endif NORMA_IPC
+
+ /* optimized ipc_entry_lookup/ipc_mqueue_copyin */
+
+ {
+ register ipc_entry_t entry;
+ register ipc_entry_bits_t bits;
+
+ {
+ register mach_port_index_t index;
+ register mach_port_gen_t gen;
+
+ index = MACH_PORT_INDEX(rcv_name);
+ gen = MACH_PORT_GEN(rcv_name);
+
+ if (index >= size)
+ goto abort_reply_rcv_copyin;
+
+ entry = &table[index];
+ bits = entry->ie_bits;
+
+ /* check generation number */
+
+ if ((bits & IE_BITS_GEN_MASK) != gen)
+ goto abort_reply_rcv_copyin;
+ }
+
+ /* check type bits; looking for receive or set */
+
+ if (bits & MACH_PORT_TYPE_PORT_SET) {
+ register ipc_pset_t rcv_pset;
+
+ rcv_pset = (ipc_pset_t) entry->ie_object;
+ assert(rcv_pset != IPS_NULL);
+
+ ips_lock(rcv_pset);
+ assert(ips_active(rcv_pset));
+
+ rcv_object = (ipc_object_t) rcv_pset;
+ rcv_mqueue = &rcv_pset->ips_messages;
+ } else if (bits & MACH_PORT_TYPE_RECEIVE) {
+ register ipc_port_t rcv_port;
+
+ rcv_port = (ipc_port_t) entry->ie_object;
+ assert(rcv_port != IP_NULL);
+
+ if (!ip_lock_try(rcv_port))
+ goto abort_reply_rcv_copyin;
+ assert(ip_active(rcv_port));
+
+ if (rcv_port->ip_pset != IPS_NULL) {
+ ip_unlock(rcv_port);
+ goto abort_reply_rcv_copyin;
+ }
+
+ rcv_object = (ipc_object_t) rcv_port;
+ rcv_mqueue = &rcv_port->ip_messages;
+ } else
+ goto abort_reply_rcv_copyin;
+ }
+
+ is_write_unlock(space);
+ io_reference(rcv_object);
+ imq_lock(rcv_mqueue);
+ io_unlock(rcv_object);
+ goto fast_send_receive;
+
+ abort_reply_dest_copyin:
+ is_write_unlock(space);
+ goto slow_copyin;
+
+ abort_reply_rcv_copyin:
+ ip_unlock(dest_port);
+ is_write_unlock(space);
+ goto slow_send;
+ }
+
+ default:
+ goto slow_copyin;
+ }
+ /*NOTREACHED*/
+
+ fast_send_receive:
+ /*
+ * optimized ipc_mqueue_send/ipc_mqueue_receive
+ *
+ * Finished get/copyin of kmsg and copyin of rcv_name.
+ * space is unlocked, dest_port is locked,
+ * we can queue kmsg to dest_port,
+ * rcv_mqueue is locked, rcv_object holds a ref,
+ * if rcv_object is a port it isn't in a port set
+ *
+ * Note that if simple locking is turned off,
+ * then we could have dest_mqueue == rcv_mqueue
+ * and not abort when we try to lock dest_mqueue.
+ */
+
+ assert(ip_active(dest_port));
+ assert(dest_port->ip_receiver != ipc_space_kernel);
+#if NORMA_IPC
+ assert(! IP_NORMA_IS_PROXY(dest_port));
+#endif NORMA_IPC
+ assert((dest_port->ip_msgcount < dest_port->ip_qlimit) ||
+ (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
+ MACH_MSG_TYPE_PORT_SEND_ONCE));
+ assert((kmsg->ikm_header.msgh_bits &
+ MACH_MSGH_BITS_CIRCULAR) == 0);
+
+ {
+ register ipc_mqueue_t dest_mqueue;
+ register ipc_thread_t receiver;
+
+ {
+ register ipc_pset_t dest_pset;
+
+ dest_pset = dest_port->ip_pset;
+ if (dest_pset == IPS_NULL)
+ dest_mqueue = &dest_port->ip_messages;
+ else
+ dest_mqueue = &dest_pset->ips_messages;
+ }
+
+ if (!imq_lock_try(dest_mqueue)) {
+ abort_send_receive:
+ ip_unlock(dest_port);
+ imq_unlock(rcv_mqueue);
+ ipc_object_release(rcv_object);
+ goto slow_send;
+ }
+
+ receiver = ipc_thread_queue_first(&dest_mqueue->imq_threads);
+ if ((receiver == ITH_NULL) ||
+ (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages)
+ != IKM_NULL)) {
+ imq_unlock(dest_mqueue);
+ goto abort_send_receive;
+ }
+
+ /*
+ * There is a receiver thread waiting, and
+ * there is no reply message for us to pick up.
+ * We have hope of hand-off, so save state.
+ */
+
+ self->ith_msg = msg;
+ self->ith_rcv_size = rcv_size;
+ self->ith_object = rcv_object;
+ self->ith_mqueue = rcv_mqueue;
+
+ if ((receiver->swap_func == (void (*)()) mach_msg_continue) &&
+ thread_handoff(self, mach_msg_continue, receiver)) {
+ assert(current_thread() == receiver);
+
+ /*
+ * We can use the optimized receive code,
+ * because the receiver is using no options.
+ */
+ } else if ((receiver->swap_func ==
+ (void (*)()) exception_raise_continue) &&
+ thread_handoff(self, mach_msg_continue, receiver)) {
+ counter(c_mach_msg_trap_block_exc++);
+ assert(current_thread() == receiver);
+
+ /*
+ * We are a reply message coming back through
+ * the optimized exception-handling path.
+ * Finish with rcv_mqueue and dest_mqueue,
+ * and then jump to exception code with
+ * dest_port still locked. We don't bother
+ * with a sequence number in this case.
+ */
+
+ ipc_thread_enqueue_macro(
+ &rcv_mqueue->imq_threads, self);
+ self->ith_state = MACH_RCV_IN_PROGRESS;
+ self->ith_msize = MACH_MSG_SIZE_MAX;
+ imq_unlock(rcv_mqueue);
+
+ ipc_thread_rmqueue_first_macro(
+ &dest_mqueue->imq_threads, receiver);
+ imq_unlock(dest_mqueue);
+
+ exception_raise_continue_fast(dest_port, kmsg);
+ /*NOTREACHED*/
+ return MACH_MSG_SUCCESS;
+ } else if ((send_size <= receiver->ith_msize) &&
+ thread_handoff(self, mach_msg_continue, receiver)) {
+ assert(current_thread() == receiver);
+
+ if ((receiver->swap_func ==
+ (void (*)()) mach_msg_receive_continue) &&
+ ((receiver->ith_option & MACH_RCV_NOTIFY) == 0)) {
+ /*
+ * We can still use the optimized code.
+ */
+ } else {
+ counter(c_mach_msg_trap_block_slow++);
+ /*
+ * We are running as the receiver,
+ * but we can't use the optimized code.
+ * Finish send/receive processing.
+ */
+
+ dest_port->ip_msgcount++;
+ ip_unlock(dest_port);
+
+ ipc_thread_enqueue_macro(
+ &rcv_mqueue->imq_threads, self);
+ self->ith_state = MACH_RCV_IN_PROGRESS;
+ self->ith_msize = MACH_MSG_SIZE_MAX;
+ imq_unlock(rcv_mqueue);
+
+ ipc_thread_rmqueue_first_macro(
+ &dest_mqueue->imq_threads, receiver);
+ receiver->ith_state = MACH_MSG_SUCCESS;
+ receiver->ith_kmsg = kmsg;
+ receiver->ith_seqno = dest_port->ip_seqno++;
+ imq_unlock(dest_mqueue);
+
+ /*
+ * Call the receiver's continuation.
+ */
+
+ receiver->wait_result = THREAD_AWAKENED;
+ (*receiver->swap_func)();
+ /*NOTREACHED*/
+ return MACH_MSG_SUCCESS;
+ }
+ } else {
+ /*
+ * The receiver can't accept the message,
+ * or we can't switch to the receiver.
+ */
+
+ imq_unlock(dest_mqueue);
+ goto abort_send_receive;
+ }
+ counter(c_mach_msg_trap_block_fast++);
+
+ /*
+ * Safe to unlock dest_port now that we are
+ * committed to this path, because we hold
+ * dest_mqueue locked. We never bother changing
+ * dest_port->ip_msgcount.
+ */
+
+ ip_unlock(dest_port);
+
+ /*
+ * We need to finish preparing self for its
+ * time asleep in rcv_mqueue.
+ */
+
+ ipc_thread_enqueue_macro(&rcv_mqueue->imq_threads, self);
+ self->ith_state = MACH_RCV_IN_PROGRESS;
+ self->ith_msize = MACH_MSG_SIZE_MAX;
+ imq_unlock(rcv_mqueue);
+
+ /*
+ * Finish extracting receiver from dest_mqueue.
+ */
+
+ ipc_thread_rmqueue_first_macro(
+ &dest_mqueue->imq_threads, receiver);
+ kmsg->ikm_header.msgh_seqno = dest_port->ip_seqno++;
+ imq_unlock(dest_mqueue);
+
+ /*
+ * We don't have to do any post-dequeue processing of
+ * the message. We never incremented ip_msgcount, we
+ * know it has no msg-accepted request, and blocked
+ * senders aren't a worry because we found the port
+ * with a receiver waiting.
+ */
+
+ self = receiver;
+ space = self->task->itk_space;
+
+ msg = self->ith_msg;
+ rcv_size = self->ith_rcv_size;
+ rcv_object = self->ith_object;
+
+ /* inline ipc_object_release */
+ io_lock(rcv_object);
+ io_release(rcv_object);
+ io_check_unlock(rcv_object);
+ }
+
+ fast_copyout:
+ /*
+ * Nothing locked and no references held, except
+ * we have kmsg with msgh_seqno filled in. Must
+ * still check against rcv_size and do
+ * ipc_kmsg_copyout/ipc_kmsg_put.
+ */
+
+ assert((ipc_port_t) kmsg->ikm_header.msgh_remote_port
+ == dest_port);
+
+ reply_size = kmsg->ikm_header.msgh_size;
+ if (rcv_size < reply_size)
+ goto slow_copyout;
+
+ /* optimized ipc_kmsg_copyout/ipc_kmsg_copyout_header */
+
+ switch (kmsg->ikm_header.msgh_bits) {
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE): {
+ ipc_port_t reply_port =
+ (ipc_port_t) kmsg->ikm_header.msgh_local_port;
+ mach_port_t dest_name, reply_name;
+
+ /* receiving a request message */
+
+ if (!IP_VALID(reply_port))
+ goto slow_copyout;
+
+ is_write_lock(space);
+ assert(space->is_active);
+
+ /*
+ * To do an atomic copyout, need simultaneous
+ * locks on both ports and the space. If
+ * dest_port == reply_port, and simple locking is
+ * enabled, then we will abort. Otherwise it's
+ * OK to unlock twice.
+ */
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port) ||
+ !ip_lock_try(reply_port))
+ goto abort_request_copyout;
+
+ if (!ip_active(reply_port)) {
+ ip_unlock(reply_port);
+ goto abort_request_copyout;
+ }
+
+ assert(reply_port->ip_sorights > 0);
+ ip_unlock(reply_port);
+
+ {
+ register ipc_entry_t table;
+ register ipc_entry_t entry;
+ register mach_port_index_t index;
+
+ /* optimized ipc_entry_get */
+
+ table = space->is_table;
+ index = table->ie_next;
+
+ if (index == 0)
+ goto abort_request_copyout;
+
+ entry = &table[index];
+ table->ie_next = entry->ie_next;
+ entry->ie_request = 0;
+
+ {
+ register mach_port_gen_t gen;
+
+ assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
+ gen = entry->ie_bits + IE_BITS_GEN_ONE;
+
+ reply_name = MACH_PORT_MAKE(index, gen);
+
+ /* optimized ipc_right_copyout */
+
+ entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
+ }
+
+ assert(MACH_PORT_VALID(reply_name));
+ entry->ie_object = (ipc_object_t) reply_port;
+ is_write_unlock(space);
+ }
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest_port->ip_srights > 0);
+ ip_release(dest_port);
+
+ if (dest_port->ip_receiver == space)
+ dest_name = dest_port->ip_receiver_name;
+ else
+ dest_name = MACH_PORT_NULL;
+
+ if ((--dest_port->ip_srights == 0) &&
+ (dest_port->ip_nsrequest != IP_NULL)) {
+ ipc_port_t nsrequest;
+ mach_port_mscount_t mscount;
+
+ /* a rather rare case */
+
+ nsrequest = dest_port->ip_nsrequest;
+ mscount = dest_port->ip_mscount;
+ dest_port->ip_nsrequest = IP_NULL;
+ ip_unlock(dest_port);
+
+ ipc_notify_no_senders(nsrequest, mscount);
+ } else
+ ip_unlock(dest_port);
+
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
+ MACH_MSG_TYPE_PORT_SEND);
+ kmsg->ikm_header.msgh_remote_port = reply_name;
+ kmsg->ikm_header.msgh_local_port = dest_name;
+ goto fast_put;
+
+ abort_request_copyout:
+ ip_unlock(dest_port);
+ is_write_unlock(space);
+ goto slow_copyout;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
+ register mach_port_t dest_name;
+
+ /* receiving a reply message */
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port))
+ goto slow_copyout;
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest_port->ip_sorights > 0);
+
+ if (dest_port->ip_receiver == space) {
+ ip_release(dest_port);
+ dest_port->ip_sorights--;
+ dest_name = dest_port->ip_receiver_name;
+ ip_unlock(dest_port);
+ } else {
+ ip_unlock(dest_port);
+
+ ipc_notify_send_once(dest_port);
+ dest_name = MACH_PORT_NULL;
+ }
+
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS(0,
+ MACH_MSG_TYPE_PORT_SEND_ONCE);
+ kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
+ kmsg->ikm_header.msgh_local_port = dest_name;
+ goto fast_put;
+ }
+
+ case MACH_MSGH_BITS_COMPLEX|
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
+ register mach_port_t dest_name;
+
+ /* receiving a complex reply message */
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port))
+ goto slow_copyout;
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest_port->ip_sorights > 0);
+
+ if (dest_port->ip_receiver == space) {
+ ip_release(dest_port);
+ dest_port->ip_sorights--;
+ dest_name = dest_port->ip_receiver_name;
+ ip_unlock(dest_port);
+ } else {
+ ip_unlock(dest_port);
+
+ ipc_notify_send_once(dest_port);
+ dest_name = MACH_PORT_NULL;
+ }
+
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS_COMPLEX |
+ MACH_MSGH_BITS(0,
+ MACH_MSG_TYPE_PORT_SEND_ONCE);
+ kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
+ kmsg->ikm_header.msgh_local_port = dest_name;
+
+ mr = ipc_kmsg_copyout_body(
+ (vm_offset_t) (&kmsg->ikm_header + 1),
+ (vm_offset_t) &kmsg->ikm_header
+ + kmsg->ikm_header.msgh_size,
+ space,
+ current_map());
+
+ if (mr != MACH_MSG_SUCCESS) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ return mr | MACH_RCV_BODY_ERROR;
+ }
+ goto fast_put;
+ }
+
+ default:
+ goto slow_copyout;
+ }
+ /*NOTREACHED*/
+
+ fast_put:
+ /*
+ * We have the reply message data in kmsg,
+ * and the reply message size in reply_size.
+ * Just need to copy it out to the user and free kmsg.
+ * We must check ikm_cache after copyoutmsg.
+ */
+
+ ikm_check_initialized(kmsg, kmsg->ikm_size);
+
+ if ((kmsg->ikm_size != IKM_SAVED_KMSG_SIZE) ||
+ copyoutmsg((vm_offset_t) &kmsg->ikm_header, (vm_offset_t) msg,
+ reply_size) ||
+ (ikm_cache() != IKM_NULL))
+ goto slow_put;
+
+ ikm_cache() = kmsg;
+ thread_syscall_return(MACH_MSG_SUCCESS);
+ /*NOTREACHED*/
+ return MACH_MSG_SUCCESS; /* help for the compiler */
+
+ /*
+ * The slow path has a few non-register temporary
+ * variables used only for call-by-reference.
+ */
+
+ {
+ ipc_kmsg_t temp_kmsg;
+ mach_port_seqno_t temp_seqno;
+ ipc_object_t temp_rcv_object;
+ ipc_mqueue_t temp_rcv_mqueue;
+
+ slow_get:
+ /*
+ * No locks, references, or messages held.
+ * Still have to get the request, send it,
+ * receive reply, etc.
+ */
+
+ mr = ipc_kmsg_get(msg, send_size, &temp_kmsg);
+ if (mr != MACH_MSG_SUCCESS) {
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+ kmsg = temp_kmsg;
+
+ /* try to get back on optimized path */
+ goto fast_copyin;
+
+ slow_copyin:
+ /*
+ * We have the message data in kmsg, but
+ * we still need to copyin, send it,
+ * receive a reply, and do copyout.
+ */
+
+ mr = ipc_kmsg_copyin(kmsg, space, current_map(),
+ MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ ikm_free(kmsg);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ /* try to get back on optimized path */
+
+ if (kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_CIRCULAR)
+ goto slow_send;
+
+ dest_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ assert(IP_VALID(dest_port));
+
+ ip_lock(dest_port);
+ if (dest_port->ip_receiver == ipc_space_kernel) {
+ assert(ip_active(dest_port));
+ ip_unlock(dest_port);
+ goto kernel_send;
+ }
+
+ if (ip_active(dest_port) &&
+#if NORMA_IPC
+ (! IP_NORMA_IS_PROXY(dest_port)) &&
+#endif NORMA_IPC
+ ((dest_port->ip_msgcount < dest_port->ip_qlimit) ||
+ (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
+ MACH_MSG_TYPE_PORT_SEND_ONCE)))
+ {
+ /*
+ * Try an optimized ipc_mqueue_copyin.
+ * It will work if this is a request message.
+ */
+
+ register ipc_port_t reply_port;
+
+ reply_port = (ipc_port_t)
+ kmsg->ikm_header.msgh_local_port;
+ if (IP_VALID(reply_port)) {
+ if (ip_lock_try(reply_port)) {
+ if (ip_active(reply_port) &&
+ reply_port->ip_receiver == space &&
+ reply_port->ip_receiver_name == rcv_name &&
+ reply_port->ip_pset == IPS_NULL)
+ {
+ /* Grab a reference to the reply port. */
+ rcv_object = (ipc_object_t) reply_port;
+ io_reference(rcv_object);
+ rcv_mqueue = &reply_port->ip_messages;
+ imq_lock(rcv_mqueue);
+ io_unlock(rcv_object);
+ goto fast_send_receive;
+ }
+ ip_unlock(reply_port);
+ }
+ }
+ }
+
+ ip_unlock(dest_port);
+ goto slow_send;
+
+#if NORMA_IPC
+ norma_send:
+ /*
+ * Nothing is locked. We have acquired kmsg, but
+ * we still need to send it and receive a reply.
+ */
+
+ mr = norma_ipc_send(kmsg);
+ if (mr != MACH_MSG_SUCCESS) {
+ mr |= ipc_kmsg_copyout_pseudo(kmsg, space,
+ current_map());
+
+ assert(kmsg->ikm_marequest == IMAR_NULL);
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ goto slow_get_rcv_port;
+#endif NORMA_IPC
+
+ kernel_send:
+ /*
+ * Special case: send message to kernel services.
+ * The request message has been copied into the
+ * kmsg. Nothing is locked.
+ */
+
+ {
+ register ipc_port_t reply_port;
+
+ /*
+ * Perform the kernel function.
+ */
+
+ kmsg = ipc_kobject_server(kmsg);
+ if (kmsg == IKM_NULL) {
+ /*
+ * No reply. Take the
+ * slow receive path.
+ */
+ goto slow_get_rcv_port;
+ }
+
+ /*
+ * Check that:
+ * the reply port is alive
+ * we hold the receive right
+ * the name has not changed.
+ * the port is not in a set
+ * If any of these are not true,
+ * we cannot directly receive the reply
+ * message.
+ */
+ reply_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ ip_lock(reply_port);
+
+ if ((!ip_active(reply_port)) ||
+ (reply_port->ip_receiver != space) ||
+ (reply_port->ip_receiver_name != rcv_name) ||
+ (reply_port->ip_pset != IPS_NULL))
+ {
+ ip_unlock(reply_port);
+ ipc_mqueue_send_always(kmsg);
+ goto slow_get_rcv_port;
+ }
+
+ rcv_mqueue = &reply_port->ip_messages;
+ imq_lock(rcv_mqueue);
+ /* keep port locked, and don`t change ref count yet */
+
+ /*
+ * If there are messages on the port
+ * or other threads waiting for a message,
+ * we cannot directly receive the reply.
+ */
+ if ((ipc_thread_queue_first(&rcv_mqueue->imq_threads)
+ != ITH_NULL) ||
+ (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages)
+ != IKM_NULL))
+ {
+ imq_unlock(rcv_mqueue);
+ ip_unlock(reply_port);
+ ipc_mqueue_send_always(kmsg);
+ goto slow_get_rcv_port;
+ }
+
+ /*
+ * We can directly receive this reply.
+ * Since the kernel reply never blocks,
+ * it holds no message_accepted request.
+ * Since there were no messages queued
+ * on the reply port, there should be
+ * no threads blocked waiting to send.
+ */
+
+ assert(kmsg->ikm_marequest == IMAR_NULL);
+ assert(ipc_thread_queue_first(&reply_port->ip_blocked)
+ == ITH_NULL);
+
+ dest_port = reply_port;
+ kmsg->ikm_header.msgh_seqno = dest_port->ip_seqno++;
+ imq_unlock(rcv_mqueue);
+
+ /*
+ * inline ipc_object_release.
+ * Port is still locked.
+ * Reference count was not incremented.
+ */
+ ip_check_unlock(reply_port);
+
+ /* copy out the kernel reply */
+ goto fast_copyout;
+ }
+
+ slow_send:
+ /*
+ * Nothing is locked. We have acquired kmsg, but
+ * we still need to send it and receive a reply.
+ */
+
+ mr = ipc_mqueue_send(kmsg, MACH_MSG_OPTION_NONE,
+ MACH_MSG_TIMEOUT_NONE);
+ if (mr != MACH_MSG_SUCCESS) {
+ mr |= ipc_kmsg_copyout_pseudo(kmsg, space,
+ current_map());
+
+ assert(kmsg->ikm_marequest == IMAR_NULL);
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ slow_get_rcv_port:
+ /*
+ * We have sent the message. Copy in the receive port.
+ */
+ mr = ipc_mqueue_copyin(space, rcv_name,
+ &temp_rcv_mqueue, &temp_rcv_object);
+ if (mr != MACH_MSG_SUCCESS) {
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+ rcv_mqueue = temp_rcv_mqueue;
+ rcv_object = temp_rcv_object;
+ /* hold ref for rcv_object; rcv_mqueue is locked */
+
+ /*
+ slow_receive:
+ */
+ /*
+ * Now we have sent the request and copied in rcv_name,
+ * so rcv_mqueue is locked and hold ref for rcv_object.
+ * Just receive a reply and try to get back to fast path.
+ *
+ * ipc_mqueue_receive may not return, because if we block
+ * then our kernel stack may be discarded. So we save
+ * state here for mach_msg_continue to pick up.
+ */
+
+ self->ith_msg = msg;
+ self->ith_rcv_size = rcv_size;
+ self->ith_object = rcv_object;
+ self->ith_mqueue = rcv_mqueue;
+
+ mr = ipc_mqueue_receive(rcv_mqueue,
+ MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX,
+ MACH_MSG_TIMEOUT_NONE,
+ FALSE, mach_msg_continue,
+ &temp_kmsg, &temp_seqno);
+ /* rcv_mqueue is unlocked */
+ ipc_object_release(rcv_object);
+ if (mr != MACH_MSG_SUCCESS) {
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ (kmsg = temp_kmsg)->ikm_header.msgh_seqno = temp_seqno;
+ dest_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ goto fast_copyout;
+
+ slow_copyout:
+ /*
+ * Nothing locked and no references held, except
+ * we have kmsg with msgh_seqno filled in. Must
+ * still check against rcv_size and do
+ * ipc_kmsg_copyout/ipc_kmsg_put.
+ */
+
+ reply_size = kmsg->ikm_header.msgh_size;
+ if (rcv_size < reply_size) {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ thread_syscall_return(MACH_RCV_TOO_LARGE);
+ /*NOTREACHED*/
+ }
+
+ mr = ipc_kmsg_copyout(kmsg, space, current_map(),
+ MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ } else {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ }
+
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ /* try to get back on optimized path */
+
+ goto fast_put;
+
+ slow_put:
+ mr = ipc_kmsg_put(msg, kmsg, reply_size);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+ } else if (option == MACH_SEND_MSG) {
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_kmsg_t kmsg;
+
+ mr = ipc_kmsg_get(msg, send_size, &kmsg);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ mr = ipc_kmsg_copyin(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ ikm_free(kmsg);
+ return mr;
+ }
+
+ mr = ipc_mqueue_send(kmsg, MACH_MSG_OPTION_NONE,
+ MACH_MSG_TIMEOUT_NONE);
+ if (mr != MACH_MSG_SUCCESS) {
+ mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map);
+
+ assert(kmsg->ikm_marequest == IMAR_NULL);
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ }
+
+ return mr;
+ } else if (option == MACH_RCV_MSG) {
+ ipc_thread_t self = current_thread();
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_object_t object;
+ ipc_mqueue_t mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+
+ mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+ /* hold ref for object; mqueue is locked */
+
+ /*
+ * ipc_mqueue_receive may not return, because if we block
+ * then our kernel stack may be discarded. So we save
+ * state here for mach_msg_continue to pick up.
+ */
+
+ self->ith_msg = msg;
+ self->ith_rcv_size = rcv_size;
+ self->ith_object = object;
+ self->ith_mqueue = mqueue;
+
+ mr = ipc_mqueue_receive(mqueue,
+ MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX,
+ MACH_MSG_TIMEOUT_NONE,
+ FALSE, mach_msg_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ if (rcv_size < kmsg->ikm_header.msgh_size) {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ return MACH_RCV_TOO_LARGE;
+ }
+
+ mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ } else {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ }
+
+ return mr;
+ }
+
+ return ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
+ } else if (option == MACH_MSG_OPTION_NONE) {
+ /*
+ * We can measure the "null mach_msg_trap"
+ * (syscall entry and thread_syscall_return exit)
+ * with this path.
+ */
+
+ thread_syscall_return(MACH_MSG_SUCCESS);
+ /*NOTREACHED*/
+ }
+#endif /* CONTINUATIONS */
+
+ if (option & MACH_SEND_MSG) {
+ mr = mach_msg_send(msg, option, send_size,
+ time_out, notify);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+ }
+
+ if (option & MACH_RCV_MSG) {
+ mr = mach_msg_receive(msg, option, rcv_size, rcv_name,
+ time_out, notify);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+ }
+
+ return MACH_MSG_SUCCESS;
+}
+
+#ifdef CONTINUATIONS
+/*
+ * Routine: mach_msg_continue
+ * Purpose:
+ * Continue after blocking for a message.
+ * Conditions:
+ * Nothing locked. We are running on a new kernel stack,
+ * with the receive state saved in the thread. From here
+ * control goes back to user space.
+ */
+
+void
+mach_msg_continue()
+{
+ ipc_thread_t thread = current_thread();
+ task_t task = thread->task;
+ ipc_space_t space = task->itk_space;
+ vm_map_t map = task->map;
+ mach_msg_header_t *msg = thread->ith_msg;
+ mach_msg_size_t rcv_size = thread->ith_rcv_size;
+ ipc_object_t object = thread->ith_object;
+ ipc_mqueue_t mqueue = thread->ith_mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ mr = ipc_mqueue_receive(mqueue, MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX, MACH_MSG_TIMEOUT_NONE,
+ TRUE, mach_msg_continue, &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS) {
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ if (kmsg->ikm_header.msgh_size > rcv_size) {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ thread_syscall_return(MACH_RCV_TOO_LARGE);
+ /*NOTREACHED*/
+ }
+
+ mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ } else {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ }
+
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ mr = ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+}
+
+/*
+ * Routine: mach_msg_interrupt
+ * Purpose:
+ * Attempts to force a thread waiting at mach_msg_continue or
+ * mach_msg_receive_continue into a clean point. Returns TRUE
+ * if this was possible.
+ * Conditions:
+ * Nothing locked. The thread must NOT be runnable.
+ */
+
+boolean_t
+mach_msg_interrupt(thread)
+ thread_t thread;
+{
+ ipc_mqueue_t mqueue;
+
+ assert((thread->swap_func == (void (*)()) mach_msg_continue) ||
+ (thread->swap_func == (void (*)()) mach_msg_receive_continue));
+
+ mqueue = thread->ith_mqueue;
+ imq_lock(mqueue);
+ if (thread->ith_state != MACH_RCV_IN_PROGRESS) {
+ /*
+ * The thread is no longer waiting for a message.
+ * It may have a message sitting in ith_kmsg.
+ * We can't clean this up.
+ */
+
+ imq_unlock(mqueue);
+ return FALSE;
+ }
+ ipc_thread_rmqueue(&mqueue->imq_threads, thread);
+ imq_unlock(mqueue);
+
+ ipc_object_release(thread->ith_object);
+
+ thread_set_syscall_return(thread, MACH_RCV_INTERRUPTED);
+ thread->swap_func = thread_exception_return;
+ return TRUE;
+}
+#endif /* CONTINUATIONS */
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: msg_return_translate
+ * Purpose:
+ * Translate from new error code to old error code.
+ */
+
+msg_return_t
+msg_return_translate(mr)
+ mach_msg_return_t mr;
+{
+ switch (mr &~ MACH_MSG_MASK) {
+ case MACH_MSG_SUCCESS:
+ return 0; /* SEND_SUCCESS/RCV_SUCCESS/RPC_SUCCESS */
+
+ case MACH_SEND_NO_BUFFER:
+ case MACH_SEND_NO_NOTIFY:
+ printf("msg_return_translate: %x -> interrupted\n", mr);
+ return SEND_INTERRUPTED;
+
+ case MACH_SEND_MSG_TOO_SMALL:
+ return SEND_MSG_TOO_SMALL;
+ case MACH_SEND_INVALID_DATA:
+ case MACH_SEND_INVALID_MEMORY:
+ return SEND_INVALID_MEMORY;
+ case MACH_SEND_TIMED_OUT:
+ return SEND_TIMED_OUT;
+ case MACH_SEND_INTERRUPTED:
+ return SEND_INTERRUPTED;
+ case MACH_SEND_INVALID_DEST:
+ case MACH_SEND_INVALID_REPLY:
+ case MACH_SEND_INVALID_RIGHT:
+ case MACH_SEND_INVALID_TYPE:
+ return SEND_INVALID_PORT;
+ case MACH_SEND_WILL_NOTIFY:
+ return SEND_WILL_NOTIFY;
+ case MACH_SEND_NOTIFY_IN_PROGRESS:
+ return SEND_NOTIFY_IN_PROGRESS;
+
+ case MACH_RCV_INVALID_NAME:
+ case MACH_RCV_IN_SET:
+ case MACH_RCV_PORT_DIED:
+ return RCV_INVALID_PORT;
+ case MACH_RCV_TOO_LARGE:
+ return RCV_TOO_LARGE;
+ case MACH_RCV_TIMED_OUT:
+ return RCV_TIMED_OUT;
+ case MACH_RCV_INTERRUPTED:
+ return RCV_INTERRUPTED;
+ case MACH_RCV_PORT_CHANGED:
+ return RCV_PORT_CHANGE;
+ case MACH_RCV_INVALID_DATA:
+ return RCV_INVALID_MEMORY;
+
+ case MACH_SEND_IN_PROGRESS:
+ case MACH_SEND_INVALID_NOTIFY:
+ case MACH_SEND_INVALID_HEADER:
+ case MACH_RCV_IN_PROGRESS:
+ case MACH_RCV_INVALID_NOTIFY:
+ case MACH_RCV_HEADER_ERROR:
+ case MACH_RCV_BODY_ERROR:
+ default:
+#if MACH_ASSERT
+ assert(!"msg_return_translate");
+#else
+ panic("msg_return_translate");
+#endif
+ }
+}
+
+/*
+ * Routine: msg_send_trap [mach trap]
+ * Purpose:
+ * Send a message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ */
+
+msg_return_t
+msg_send_trap(msg, option, send_size, time_out)
+ msg_header_t *msg;
+ msg_option_t option;
+ msg_size_t send_size;
+ msg_timeout_t time_out;
+{
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_kmsg_t kmsg;
+ mach_msg_return_t mr;
+
+ send_size = (send_size + 3) & ~3; /* round up */
+
+ if (send_size > MSG_SIZE_MAX)
+ return SEND_MSG_TOO_LARGE;
+
+ mr = ipc_kmsg_get((mach_msg_header_t *) msg,
+ (mach_msg_size_t) send_size,
+ &kmsg);
+ if (mr != MACH_MSG_SUCCESS)
+ return msg_return_translate(mr);
+
+ mr = ipc_kmsg_copyin_compat(kmsg, space, map);
+ if (mr != MACH_MSG_SUCCESS) {
+ ikm_free(kmsg);
+ return msg_return_translate(mr);
+ }
+
+ if (option & SEND_NOTIFY) {
+ mr = ipc_mqueue_send(kmsg, MACH_SEND_TIMEOUT,
+ ((option & SEND_TIMEOUT) ?
+ (mach_msg_timeout_t) time_out :
+ MACH_MSG_TIMEOUT_NONE));
+ if (mr == MACH_SEND_TIMED_OUT) {
+ ipc_port_t dest = (ipc_port_t)
+ kmsg->ikm_header.msgh_remote_port;
+
+ mr = ipc_marequest_create(space, dest, MACH_PORT_NULL,
+ &kmsg->ikm_marequest);
+ if (mr == MACH_MSG_SUCCESS) {
+ ipc_mqueue_send_always(kmsg);
+ return SEND_WILL_NOTIFY;
+ }
+ }
+ } else
+ mr = ipc_mqueue_send(kmsg,
+ ((option & SEND_TIMEOUT) ?
+ MACH_SEND_TIMEOUT :
+ MACH_MSG_OPTION_NONE),
+ (mach_msg_timeout_t) time_out);
+
+ if (mr != MACH_MSG_SUCCESS)
+ ipc_kmsg_destroy(kmsg);
+
+ return msg_return_translate(mr);
+}
+
+/*
+ * Routine: msg_receive_trap [mach trap]
+ * Purpose:
+ * Receive a message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ */
+
+msg_return_t
+msg_receive_trap(msg, option, rcv_size, rcv_name, time_out)
+ msg_header_t *msg;
+ msg_option_t option;
+ msg_size_t rcv_size;
+ port_name_t rcv_name;
+ msg_timeout_t time_out;
+{
+ ipc_thread_t self;
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_object_t object;
+ ipc_mqueue_t mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ mr = ipc_mqueue_copyin(space, (mach_port_t) rcv_name,
+ &mqueue, &object);
+ if (mr != MACH_MSG_SUCCESS)
+ return msg_return_translate(mr);
+ /* hold ref for object; mqueue is locked */
+
+#ifdef CONTINUATIONS
+ /*
+ * ipc_mqueue_receive may not return, because if we block
+ * then our kernel stack may be discarded. So we save
+ * state here for msg_receive_continue to pick up.
+ */
+
+ self = current_thread();
+ self->ith_msg = (mach_msg_header_t *) msg;
+ self->ith_option = (mach_msg_option_t) option;
+ self->ith_rcv_size = (mach_msg_size_t) rcv_size;
+ self->ith_timeout = (mach_msg_timeout_t) time_out;
+ self->ith_object = object;
+ self->ith_mqueue = mqueue;
+#endif /* CONTINUATIONS */
+
+ mr = ipc_mqueue_receive(mqueue,
+ (option & RCV_TIMEOUT) ?
+ MACH_RCV_TIMEOUT : MACH_MSG_OPTION_NONE,
+ (mach_msg_size_t) rcv_size,
+ (mach_msg_timeout_t) time_out,
+ FALSE, msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS) {
+ if (mr == MACH_RCV_TOO_LARGE) {
+ msg_size_t real_size =
+ (msg_size_t) (mach_msg_size_t) kmsg;
+
+ assert(real_size > rcv_size);
+
+ (void) copyout((vm_offset_t) &real_size,
+ (vm_offset_t) &msg->msg_size,
+ sizeof(msg_size_t));
+ }
+
+ return msg_return_translate(mr);
+ }
+
+ assert(kmsg->ikm_header.msgh_size <= (mach_msg_size_t) rcv_size);
+
+ mr = ipc_kmsg_copyout_compat(kmsg, space, map);
+ assert(mr == MACH_MSG_SUCCESS);
+
+ mr = ipc_kmsg_put((mach_msg_header_t *) msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ return msg_return_translate(mr);
+}
+
+/*
+ * Routine: msg_rpc_trap [mach trap]
+ * Purpose:
+ * Send and receive a message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ */
+
+msg_return_t
+msg_rpc_trap(msg, option, send_size, rcv_size, send_timeout, rcv_timeout)
+ msg_header_t *msg;
+ msg_option_t option;
+ msg_size_t send_size;
+ msg_size_t rcv_size;
+ msg_timeout_t send_timeout;
+ msg_timeout_t rcv_timeout;
+{
+ ipc_thread_t self;
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_port_t reply;
+ ipc_pset_t pset;
+ ipc_mqueue_t mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ /*
+ * Instead of using msg_send_trap and msg_receive_trap,
+ * we implement msg_rpc_trap directly. The difference
+ * is how the reply port is handled. Instead of using
+ * ipc_mqueue_copyin, we save a reference for the reply
+ * port carried in the sent message. For example,
+ * consider a rename kernel call which changes the name
+ * of the call's own reply port. This is the behaviour
+ * of the Mach 2.5 msg_rpc_trap.
+ */
+
+ send_size = (send_size + 3) & ~3; /* round up */
+
+ if (send_size > MSG_SIZE_MAX)
+ return SEND_MSG_TOO_LARGE;
+
+ mr = ipc_kmsg_get((mach_msg_header_t *) msg,
+ (mach_msg_size_t) send_size,
+ &kmsg);
+ if (mr != MACH_MSG_SUCCESS)
+ return msg_return_translate(mr);
+
+ mr = ipc_kmsg_copyin_compat(kmsg, space, map);
+ if (mr != MACH_MSG_SUCCESS) {
+ ikm_free(kmsg);
+ return msg_return_translate(mr);
+ }
+
+ reply = (ipc_port_t) kmsg->ikm_header.msgh_local_port;
+ if (IP_VALID(reply))
+ ipc_port_reference(reply);
+
+ if (option & SEND_NOTIFY) {
+ mr = ipc_mqueue_send(kmsg, MACH_SEND_TIMEOUT,
+ ((option & SEND_TIMEOUT) ?
+ (mach_msg_timeout_t) send_timeout :
+ MACH_MSG_TIMEOUT_NONE));
+ if (mr == MACH_SEND_TIMED_OUT) {
+ ipc_port_t dest = (ipc_port_t)
+ kmsg->ikm_header.msgh_remote_port;
+
+ mr = ipc_marequest_create(space, dest, MACH_PORT_NULL,
+ &kmsg->ikm_marequest);
+ if (mr == MACH_MSG_SUCCESS) {
+ ipc_mqueue_send_always(kmsg);
+ if (IP_VALID(reply))
+ ipc_port_release(reply);
+ return SEND_WILL_NOTIFY;
+ }
+ }
+ } else
+ mr = ipc_mqueue_send(kmsg,
+ ((option & SEND_TIMEOUT) ?
+ MACH_SEND_TIMEOUT :
+ MACH_MSG_OPTION_NONE),
+ (mach_msg_timeout_t) send_timeout);
+
+ if (mr != MACH_MSG_SUCCESS) {
+ ipc_kmsg_destroy(kmsg);
+ if (IP_VALID(reply))
+ ipc_port_release(reply);
+ return msg_return_translate(mr);
+ }
+
+ if (!IP_VALID(reply))
+ return RCV_INVALID_PORT;
+
+ ip_lock(reply);
+ if (reply->ip_receiver != space) {
+ ip_release(reply);
+ ip_check_unlock(reply);
+ return RCV_INVALID_PORT;
+ }
+
+ assert(ip_active(reply));
+ pset = reply->ip_pset;
+
+ if (pset != IPS_NULL) {
+ ips_lock(pset);
+ if (ips_active(pset)) {
+ ips_unlock(pset);
+ ip_release(reply);
+ ip_unlock(reply);
+ return RCV_INVALID_PORT;
+ }
+
+ ipc_pset_remove(pset, reply);
+ ips_check_unlock(pset);
+ assert(reply->ip_pset == IPS_NULL);
+ }
+
+ mqueue = &reply->ip_messages;
+ imq_lock(mqueue);
+ ip_unlock(reply);
+
+#ifdef CONTINUATIONS
+ /*
+ * ipc_mqueue_receive may not return, because if we block
+ * then our kernel stack may be discarded. So we save
+ * state here for msg_receive_continue to pick up.
+ */
+
+ self = current_thread();
+ self->ith_msg = (mach_msg_header_t *) msg;
+ self->ith_option = (mach_msg_option_t) option;
+ self->ith_rcv_size = (mach_msg_size_t) rcv_size;
+ self->ith_timeout = (mach_msg_timeout_t) rcv_timeout;
+ self->ith_object = (ipc_object_t) reply;
+ self->ith_mqueue = mqueue;
+#endif /* CONTINUATIONS */
+
+ mr = ipc_mqueue_receive(mqueue,
+ (option & RCV_TIMEOUT) ?
+ MACH_RCV_TIMEOUT : MACH_MSG_OPTION_NONE,
+ (mach_msg_size_t) rcv_size,
+ (mach_msg_timeout_t) rcv_timeout,
+ FALSE, msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_port_release(reply);
+ if (mr != MACH_MSG_SUCCESS) {
+ if (mr == MACH_RCV_TOO_LARGE) {
+ msg_size_t real_size =
+ (msg_size_t) (mach_msg_size_t) kmsg;
+
+ assert(real_size > rcv_size);
+
+ (void) copyout((vm_offset_t) &real_size,
+ (vm_offset_t) &msg->msg_size,
+ sizeof(msg_size_t));
+ }
+
+ return msg_return_translate(mr);
+ }
+
+ assert(kmsg->ikm_header.msgh_size <= (mach_msg_size_t) rcv_size);
+
+ mr = ipc_kmsg_copyout_compat(kmsg, space, map);
+ assert(mr == MACH_MSG_SUCCESS);
+
+ mr = ipc_kmsg_put((mach_msg_header_t *) msg,
+ kmsg, kmsg->ikm_header.msgh_size);
+ return msg_return_translate(mr);
+}
+
+#ifdef CONTINUATIONS
+/*
+ * Routine: msg_receive_continue
+ * Purpose:
+ * Continue after blocking for a message.
+ * Conditions:
+ * Nothing locked. We are running on a new kernel stack,
+ * with the receive state saved in the thread. From here
+ * control goes back to user space.
+ */
+
+void
+msg_receive_continue()
+{
+ ipc_thread_t self = current_thread();
+ msg_header_t *msg = (msg_header_t *) self->ith_msg;
+ msg_option_t option = (msg_option_t) self->ith_option;
+ msg_size_t rcv_size = (msg_size_t) self->ith_rcv_size;
+ msg_timeout_t time_out = (msg_timeout_t) self->ith_timeout;
+ ipc_object_t object = self->ith_object;
+ ipc_mqueue_t mqueue = self->ith_mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ mr = ipc_mqueue_receive(mqueue,
+ (option & RCV_TIMEOUT) ?
+ MACH_RCV_TIMEOUT : MACH_MSG_OPTION_NONE,
+ (mach_msg_size_t) rcv_size,
+ (mach_msg_timeout_t) time_out,
+ TRUE, msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS) {
+ if (mr == MACH_RCV_TOO_LARGE) {
+ msg_size_t real_size =
+ (msg_size_t) (mach_msg_size_t) kmsg;
+
+ assert(real_size > rcv_size);
+
+ (void) copyout((vm_offset_t) &real_size,
+ (vm_offset_t) &msg->msg_size,
+ sizeof(msg_size_t));
+ }
+
+ thread_syscall_return(msg_return_translate(mr));
+ /*NOTREACHED*/
+ }
+
+ assert(kmsg->ikm_header.msgh_size <= (mach_msg_size_t) rcv_size);
+
+ mr = ipc_kmsg_copyout_compat(kmsg, current_space(), current_map());
+ assert(mr == MACH_MSG_SUCCESS);
+
+ mr = ipc_kmsg_put((mach_msg_header_t *) msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ thread_syscall_return(msg_return_translate(mr));
+ /*NOTREACHED*/
+}
+#endif /* CONTINUATIONS */
+
+#endif MACH_IPC_COMPAT
diff --git a/ipc/mach_msg.h b/ipc/mach_msg.h
new file mode 100644
index 00000000..55c3526f
--- /dev/null
+++ b/ipc/mach_msg.h
@@ -0,0 +1,68 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/mach_msg.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations of internal messaging primitives.
+ */
+
+#ifndef _IPC_MACH_MSG_H_
+#define _IPC_MACH_MSG_H_
+
+#include <mach_ipc_compat.h>
+
+#include <mach/boolean.h>
+#include <mach/message.h>
+
+extern mach_msg_return_t
+mach_msg_send(/* mach_msg_header_t *, mach_msg_option_t,
+ mach_msg_size_t, mach_msg_timeout_t, mach_port_t */);
+
+extern mach_msg_return_t
+mach_msg_receive(/* mach_msg_header_t *, mach_msg_option_t,
+ mach_msg_size_t, mach_port_t,
+ mach_msg_timeout_t, mach_port_t */);
+
+extern void
+mach_msg_receive_continue();
+
+extern void
+mach_msg_continue();
+
+extern boolean_t
+mach_msg_interrupt(/* thread_t */);
+
+#if MACH_IPC_COMPAT
+
+extern void
+msg_receive_continue();
+
+#endif MACH_IPC_COMPAT
+#endif _IPC_MACH_MSG_H_
diff --git a/ipc/mach_port.c b/ipc/mach_port.c
new file mode 100644
index 00000000..b26c96be
--- /dev/null
+++ b/ipc/mach_port.c
@@ -0,0 +1,2505 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/mach_port.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Exported kernel calls. See mach/mach_port.defs.
+ */
+
+#include <mach_ipc_compat.h>
+
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <mach/notify.h>
+#include <mach/mach_param.h>
+#include <mach/vm_param.h>
+#include <mach/vm_prot.h>
+#ifdef MIGRATING_THREADS
+#include <mach/rpc.h>
+#include <kern/task.h>
+#include <kern/act.h>
+#endif /* MIGRATING_THREADS */
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_right.h>
+
+
+
+/*
+ * Routine: mach_port_names_helper
+ * Purpose:
+ * A helper function for mach_port_names.
+ */
+
+void
+mach_port_names_helper(
+ ipc_port_timestamp_t timestamp,
+ ipc_entry_t entry,
+ mach_port_t name,
+ mach_port_t *names,
+ mach_port_type_t *types,
+ ipc_entry_num_t *actualp)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ ipc_port_request_index_t request = entry->ie_request;
+ mach_port_type_t type;
+ ipc_entry_num_t actual;
+
+ if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
+ ipc_port_t port;
+ boolean_t died;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ /*
+ * The timestamp serializes mach_port_names
+ * with ipc_port_destroy. If the port died,
+ * but after mach_port_names started, pretend
+ * that it isn't dead.
+ */
+
+ ip_lock(port);
+ died = (!ip_active(port) &&
+ IP_TIMESTAMP_ORDER(port->ip_timestamp, timestamp));
+ ip_unlock(port);
+
+ if (died) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ return;
+#endif MACH_IPC_COMPAT
+
+ /* pretend this is a dead-name entry */
+
+ bits &= ~(IE_BITS_TYPE_MASK|IE_BITS_MAREQUEST);
+ bits |= MACH_PORT_TYPE_DEAD_NAME;
+ if (request != 0)
+ bits++;
+ request = 0;
+ }
+ }
+
+ type = IE_BITS_TYPE(bits);
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ type |= MACH_PORT_TYPE_COMPAT;
+ else
+#endif MACH_IPC_COMPAT
+ if (request != 0)
+ type |= MACH_PORT_TYPE_DNREQUEST;
+ if (bits & IE_BITS_MAREQUEST)
+ type |= MACH_PORT_TYPE_MAREQUEST;
+
+ actual = *actualp;
+ names[actual] = name;
+ types[actual] = type;
+ *actualp = actual+1;
+}
+
+/*
+ * Routine: mach_port_names [kernel call]
+ * Purpose:
+ * Retrieves a list of the rights present in the space,
+ * along with type information. (Same as returned
+ * by mach_port_type.) The names are returned in
+ * no particular order, but they (and the type info)
+ * are an accurate snapshot of the space.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Arrays of names and types returned.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_port_names(
+ ipc_space_t space,
+ mach_port_t **namesp,
+ mach_msg_type_number_t *namesCnt,
+ mach_port_type_t **typesp,
+ mach_msg_type_number_t *typesCnt)
+{
+ ipc_tree_entry_t tentry;
+ ipc_entry_t table;
+ ipc_entry_num_t tsize;
+ mach_port_index_t index;
+ ipc_entry_num_t actual; /* this many names */
+ ipc_port_timestamp_t timestamp; /* logical time of this operation */
+ mach_port_t *names;
+ mach_port_type_t *types;
+ kern_return_t kr;
+
+ vm_size_t size; /* size of allocated memory */
+ vm_offset_t addr1; /* allocated memory, for names */
+ vm_offset_t addr2; /* allocated memory, for types */
+ vm_map_copy_t memory1; /* copied-in memory, for names */
+ vm_map_copy_t memory2; /* copied-in memory, for types */
+
+ /* safe simplifying assumption */
+ assert_static(sizeof(mach_port_t) == sizeof(mach_port_type_t));
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ size = 0;
+
+ for (;;) {
+ ipc_entry_num_t bound;
+ vm_size_t size_needed;
+
+ is_read_lock(space);
+ if (!space->is_active) {
+ is_read_unlock(space);
+ if (size != 0) {
+ kmem_free(ipc_kernel_map, addr1, size);
+ kmem_free(ipc_kernel_map, addr2, size);
+ }
+ return KERN_INVALID_TASK;
+ }
+
+ /* upper bound on number of names in the space */
+
+ bound = space->is_table_size + space->is_tree_total;
+ size_needed = round_page(bound * sizeof(mach_port_t));
+
+ if (size_needed <= size)
+ break;
+
+ is_read_unlock(space);
+
+ if (size != 0) {
+ kmem_free(ipc_kernel_map, addr1, size);
+ kmem_free(ipc_kernel_map, addr2, size);
+ }
+ size = size_needed;
+
+ kr = vm_allocate(ipc_kernel_map, &addr1, size, TRUE);
+ if (kr != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+
+ kr = vm_allocate(ipc_kernel_map, &addr2, size, TRUE);
+ if (kr != KERN_SUCCESS) {
+ kmem_free(ipc_kernel_map, addr1, size);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /* can't fault while we hold locks */
+
+ kr = vm_map_pageable(ipc_kernel_map, addr1, addr1 + size,
+ VM_PROT_READ|VM_PROT_WRITE);
+ assert(kr == KERN_SUCCESS);
+
+ kr = vm_map_pageable(ipc_kernel_map, addr2, addr2 + size,
+ VM_PROT_READ|VM_PROT_WRITE);
+ assert(kr == KERN_SUCCESS);
+ }
+ /* space is read-locked and active */
+
+ names = (mach_port_t *) addr1;
+ types = (mach_port_type_t *) addr2;
+ actual = 0;
+
+ timestamp = ipc_port_timestamp();
+
+ table = space->is_table;
+ tsize = space->is_table_size;
+
+ for (index = 0; index < tsize; index++) {
+ ipc_entry_t entry = &table[index];
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ if (IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE) {
+ mach_port_t name = MACH_PORT_MAKEB(index, bits);
+
+ mach_port_names_helper(timestamp, entry, name,
+ names, types, &actual);
+ }
+ }
+
+ for (tentry = ipc_splay_traverse_start(&space->is_tree);
+ tentry != ITE_NULL;
+ tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
+ ipc_entry_t entry = &tentry->ite_entry;
+ mach_port_t name = tentry->ite_name;
+
+ assert(IE_BITS_TYPE(tentry->ite_bits) != MACH_PORT_TYPE_NONE);
+
+ mach_port_names_helper(timestamp, entry, name,
+ names, types, &actual);
+ }
+ ipc_splay_traverse_finish(&space->is_tree);
+ is_read_unlock(space);
+
+ if (actual == 0) {
+ memory1 = VM_MAP_COPY_NULL;
+ memory2 = VM_MAP_COPY_NULL;
+
+ if (size != 0) {
+ kmem_free(ipc_kernel_map, addr1, size);
+ kmem_free(ipc_kernel_map, addr2, size);
+ }
+ } else {
+ vm_size_t size_used;
+
+ size_used = round_page(actual * sizeof(mach_port_t));
+
+ /*
+ * Make used memory pageable and get it into
+ * copied-in form. Free any unused memory.
+ */
+
+ kr = vm_map_pageable(ipc_kernel_map,
+ addr1, addr1 + size_used,
+ VM_PROT_NONE);
+ assert(kr == KERN_SUCCESS);
+
+ kr = vm_map_pageable(ipc_kernel_map,
+ addr2, addr2 + size_used,
+ VM_PROT_NONE);
+ assert(kr == KERN_SUCCESS);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr1, size_used,
+ TRUE, &memory1);
+ assert(kr == KERN_SUCCESS);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr2, size_used,
+ TRUE, &memory2);
+ assert(kr == KERN_SUCCESS);
+
+ if (size_used != size) {
+ kmem_free(ipc_kernel_map,
+ addr1 + size_used, size - size_used);
+ kmem_free(ipc_kernel_map,
+ addr2 + size_used, size - size_used);
+ }
+ }
+
+ *namesp = (mach_port_t *) memory1;
+ *namesCnt = actual;
+ *typesp = (mach_port_type_t *) memory2;
+ *typesCnt = actual;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_type [kernel call]
+ * Purpose:
+ * Retrieves the type of a right in the space.
+ * The type is a bitwise combination of one or more
+ * of the following type bits:
+ * MACH_PORT_TYPE_SEND
+ * MACH_PORT_TYPE_RECEIVE
+ * MACH_PORT_TYPE_SEND_ONCE
+ * MACH_PORT_TYPE_PORT_SET
+ * MACH_PORT_TYPE_DEAD_NAME
+ * In addition, the following pseudo-type bits may be present:
+ * MACH_PORT_TYPE_DNREQUEST
+ * A dead-name notification is requested.
+ * MACH_PORT_TYPE_MAREQUEST
+ * The send/receive right is blocked;
+ * a msg-accepted notification is outstanding.
+ * MACH_PORT_TYPE_COMPAT
+ * This is a compatibility-mode right;
+ * when the port dies, it will disappear
+ * instead of turning into a dead-name.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Type is returned.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ */
+
+kern_return_t
+mach_port_type(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_type_t *typep)
+{
+ mach_port_urefs_t urefs;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ kr = ipc_right_info(space, name, entry, typep, &urefs);
+ if (kr == KERN_SUCCESS)
+ is_write_unlock(space);
+ /* space is unlocked */
+ return kr;
+}
+
+/*
+ * Routine: mach_port_rename [kernel call]
+ * Purpose:
+ * Changes the name denoting a right,
+ * from oname to nname.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The right is renamed.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The oname doesn't denote a right.
+ * KERN_INVALID_VALUE The nname isn't a legal name.
+ * KERN_NAME_EXISTS The nname already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_port_rename(
+ ipc_space_t space,
+ mach_port_t oname,
+ mach_port_t nname)
+{
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (!MACH_PORT_VALID(nname))
+ return KERN_INVALID_VALUE;
+
+ return ipc_object_rename(space, oname, nname);
+}
+
+/*
+ * Routine: mach_port_allocate_name [kernel call]
+ * Purpose:
+ * Allocates a right in a space, using a specific name
+ * for the new right. Possible rights:
+ * MACH_PORT_RIGHT_RECEIVE
+ * MACH_PORT_RIGHT_PORT_SET
+ * MACH_PORT_RIGHT_DEAD_NAME
+ *
+ * A new port (allocated with MACH_PORT_RIGHT_RECEIVE)
+ * has no extant send or send-once rights and no queued
+ * messages. Its queue limit is MACH_PORT_QLIMIT_DEFAULT
+ * and its make-send count is 0. It is not a member of
+ * a port set. It has no registered no-senders or
+ * port-destroyed notification requests.
+ *
+ * A new port set has no members.
+ *
+ * A new dead name has one user reference.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The right is allocated.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE The name isn't a legal name.
+ * KERN_INVALID_VALUE "right" isn't a legal kind of right.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_port_allocate_name(space, right, name)
+ ipc_space_t space;
+ mach_port_right_t right;
+ mach_port_t name;
+{
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (!MACH_PORT_VALID(name))
+ return KERN_INVALID_VALUE;
+
+ switch (right) {
+ case MACH_PORT_RIGHT_RECEIVE: {
+ ipc_port_t port;
+
+ kr = ipc_port_alloc_name(space, name, &port);
+ if (kr == KERN_SUCCESS)
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_PORT_SET: {
+ ipc_pset_t pset;
+
+ kr = ipc_pset_alloc_name(space, name, &pset);
+ if (kr == KERN_SUCCESS)
+ ips_unlock(pset);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_DEAD_NAME:
+ kr = ipc_object_alloc_dead_name(space, name);
+ break;
+
+ default:
+ kr = KERN_INVALID_VALUE;
+ break;
+ }
+
+ return kr;
+}
+
+/*
+ * Routine: mach_port_allocate [kernel call]
+ * Purpose:
+ * Allocates a right in a space. Like mach_port_allocate_name,
+ * except that the implementation picks a name for the right.
+ * The name may be any legal name in the space that doesn't
+ * currently denote a right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The right is allocated.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE "right" isn't a legal kind of right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ * KERN_NO_SPACE No room in space for another right.
+ */
+
+kern_return_t
+mach_port_allocate(space, right, namep)
+ ipc_space_t space;
+ mach_port_right_t right;
+ mach_port_t *namep;
+{
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ switch (right) {
+ case MACH_PORT_RIGHT_RECEIVE: {
+ ipc_port_t port;
+
+ kr = ipc_port_alloc(space, namep, &port);
+ if (kr == KERN_SUCCESS)
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_PORT_SET: {
+ ipc_pset_t pset;
+
+ kr = ipc_pset_alloc(space, namep, &pset);
+ if (kr == KERN_SUCCESS)
+ ips_unlock(pset);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_DEAD_NAME:
+ kr = ipc_object_alloc_dead(space, namep);
+ break;
+
+ default:
+ kr = KERN_INVALID_VALUE;
+ break;
+ }
+
+ return (kr);
+}
+
+/*
+ * Routine: mach_port_destroy [kernel call]
+ * Purpose:
+ * Cleans up and destroys all rights denoted by a name
+ * in a space. The destruction of a receive right
+ * destroys the port, unless a port-destroyed request
+ * has been made for it; the destruction of a port-set right
+ * destroys the port set.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The name is destroyed.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ */
+
+kern_return_t
+mach_port_destroy(
+ ipc_space_t space,
+ mach_port_t name)
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ kr = ipc_right_destroy(space, name, entry); /* unlocks space */
+ return kr;
+}
+
+/*
+ * Routine: mach_port_deallocate [kernel call]
+ * Purpose:
+ * Deallocates a user reference from a send right,
+ * send-once right, or a dead-name right. May
+ * deallocate the right, if this is the last uref,
+ * and destroy the name, if it doesn't denote
+ * other rights.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The uref is deallocated.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT The right isn't correct.
+ */
+
+kern_return_t
+mach_port_deallocate(
+ ipc_space_t space,
+ mach_port_t name)
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked */
+
+ kr = ipc_right_dealloc(space, name, entry); /* unlocks space */
+ return kr;
+}
+
+/*
+ * Routine: mach_port_get_refs [kernel call]
+ * Purpose:
+ * Retrieves the number of user references held by a right.
+ * Receive rights, port-set rights, and send-once rights
+ * always have one user reference. Returns zero if the
+ * name denotes a right, but not the queried right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Number of urefs returned.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE "right" isn't a legal value.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ */
+
+kern_return_t
+mach_port_get_refs(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_right_t right,
+ mach_port_urefs_t *urefsp)
+{
+ mach_port_type_t type;
+ mach_port_urefs_t urefs;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (right >= MACH_PORT_RIGHT_NUMBER)
+ return KERN_INVALID_VALUE;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ kr = ipc_right_info(space, name, entry, &type, &urefs); /* unlocks */
+ if (kr != KERN_SUCCESS)
+ return kr; /* space is unlocked */
+ is_write_unlock(space);
+
+ if (type & MACH_PORT_TYPE(right))
+ switch (right) {
+ case MACH_PORT_RIGHT_SEND_ONCE:
+ assert(urefs == 1);
+ /* fall-through */
+
+ case MACH_PORT_RIGHT_PORT_SET:
+ case MACH_PORT_RIGHT_RECEIVE:
+ *urefsp = 1;
+ break;
+
+ case MACH_PORT_RIGHT_DEAD_NAME:
+ case MACH_PORT_RIGHT_SEND:
+ assert(urefs > 0);
+ *urefsp = urefs;
+ break;
+
+ default:
+ panic("mach_port_get_refs: strange rights");
+ }
+ else
+ *urefsp = 0;
+
+ return kr;
+}
+
+/*
+ * Routine: mach_port_mod_refs
+ * Purpose:
+ * Modifies the number of user references held by a right.
+ * The resulting number of user references must be non-negative.
+ * If it is zero, the right is deallocated. If the name
+ * doesn't denote other rights, it is destroyed.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Modified number of urefs.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE "right" isn't a legal value.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote specified right.
+ * KERN_INVALID_VALUE Impossible modification to urefs.
+ * KERN_UREFS_OVERFLOW Urefs would overflow.
+ */
+
+kern_return_t
+mach_port_mod_refs(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_right_t right,
+ mach_port_delta_t delta)
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (right >= MACH_PORT_RIGHT_NUMBER)
+ return KERN_INVALID_VALUE;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ kr = ipc_right_delta(space, name, entry, right, delta); /* unlocks */
+ return kr;
+}
+
+/*
+ * Routine: old_mach_port_get_receive_status [kernel call]
+ * Purpose:
+ * Compatibility for code written before sequence numbers.
+ * Retrieves mucho info about a receive right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved status.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+old_mach_port_get_receive_status(space, name, statusp)
+ ipc_space_t space;
+ mach_port_t name;
+ old_mach_port_status_t *statusp;
+{
+ mach_port_status_t status;
+ kern_return_t kr;
+
+ kr = mach_port_get_receive_status(space, name, &status);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ statusp->mps_pset = status.mps_pset;
+ statusp->mps_mscount = status.mps_mscount;
+ statusp->mps_qlimit = status.mps_qlimit;
+ statusp->mps_msgcount = status.mps_msgcount;
+ statusp->mps_sorights = status.mps_sorights;
+ statusp->mps_srights = status.mps_srights;
+ statusp->mps_pdrequest = status.mps_pdrequest;
+ statusp->mps_nsrequest = status.mps_nsrequest;
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_set_qlimit [kernel call]
+ * Purpose:
+ * Changes a receive right's queue limit.
+ * The new queue limit must be between 0 and
+ * MACH_PORT_QLIMIT_MAX, inclusive.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Set queue limit.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ * KERN_INVALID_VALUE Illegal queue limit.
+ */
+
+kern_return_t
+mach_port_set_qlimit(space, name, qlimit)
+ ipc_space_t space;
+ mach_port_t name;
+ mach_port_msgcount_t qlimit;
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (qlimit > MACH_PORT_QLIMIT_MAX)
+ return KERN_INVALID_VALUE;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_set_qlimit(port, qlimit);
+
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_set_mscount [kernel call]
+ * Purpose:
+ * Changes a receive right's make-send count.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Set make-send count.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_set_mscount(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_mscount_t mscount)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_set_mscount(port, mscount);
+
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_set_seqno [kernel call]
+ * Purpose:
+ * Changes a receive right's sequence number.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Set sequence number.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_set_seqno(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_seqno_t seqno)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_set_seqno(port, seqno);
+
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_gst_helper
+ * Purpose:
+ * A helper function for mach_port_get_set_status.
+ */
+
+void
+mach_port_gst_helper(
+ ipc_pset_t pset,
+ ipc_port_t port,
+ ipc_entry_num_t maxnames,
+ mach_port_t *names,
+ ipc_entry_num_t *actualp)
+{
+ ipc_pset_t ip_pset;
+ mach_port_t name;
+
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+
+ name = port->ip_receiver_name;
+ assert(name != MACH_PORT_NULL);
+ ip_pset = port->ip_pset;
+
+ ip_unlock(port);
+
+ if (pset == ip_pset) {
+ ipc_entry_num_t actual = *actualp;
+
+ if (actual < maxnames)
+ names[actual] = name;
+
+ *actualp = actual+1;
+ }
+}
+
+/*
+ * Routine: mach_port_get_set_status [kernel call]
+ * Purpose:
+ * Retrieves a list of members in a port set.
+ * Returns the space's name for each receive right member.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved list of members.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote a port set.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_port_get_set_status(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_t **members,
+ mach_msg_type_number_t *membersCnt)
+{
+ ipc_entry_num_t actual; /* this many members */
+ ipc_entry_num_t maxnames; /* space for this many members */
+ kern_return_t kr;
+
+ vm_size_t size; /* size of allocated memory */
+ vm_offset_t addr; /* allocated memory */
+ vm_map_copy_t memory; /* copied-in memory */
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ size = PAGE_SIZE; /* initial guess */
+
+ for (;;) {
+ ipc_tree_entry_t tentry;
+ ipc_entry_t entry, table;
+ ipc_entry_num_t tsize;
+ mach_port_index_t index;
+ mach_port_t *names;
+ ipc_pset_t pset;
+
+ kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE);
+ if (kr != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+
+ /* can't fault while we hold locks */
+
+ kr = vm_map_pageable(ipc_kernel_map, addr, addr + size,
+ VM_PROT_READ|VM_PROT_WRITE);
+ assert(kr == KERN_SUCCESS);
+
+ kr = ipc_right_lookup_read(space, name, &entry);
+ if (kr != KERN_SUCCESS) {
+ kmem_free(ipc_kernel_map, addr, size);
+ return kr;
+ }
+ /* space is read-locked and active */
+
+ if (IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_PORT_SET) {
+ is_read_unlock(space);
+ kmem_free(ipc_kernel_map, addr, size);
+ return KERN_INVALID_RIGHT;
+ }
+
+ pset = (ipc_pset_t) entry->ie_object;
+ assert(pset != IPS_NULL);
+ /* the port set must be active */
+
+ names = (mach_port_t *) addr;
+ maxnames = size / sizeof(mach_port_t);
+ actual = 0;
+
+ table = space->is_table;
+ tsize = space->is_table_size;
+
+ for (index = 0; index < tsize; index++) {
+ ipc_entry_t ientry = &table[index];
+ ipc_entry_bits_t bits = ientry->ie_bits;
+
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+ ipc_port_t port =
+ (ipc_port_t) ientry->ie_object;
+
+ mach_port_gst_helper(pset, port, maxnames,
+ names, &actual);
+ }
+ }
+
+ for (tentry = ipc_splay_traverse_start(&space->is_tree);
+ tentry != ITE_NULL;
+ tentry = ipc_splay_traverse_next(&space->is_tree,FALSE)) {
+ ipc_entry_bits_t bits = tentry->ite_bits;
+
+ assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);
+
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+ ipc_port_t port =
+ (ipc_port_t) tentry->ite_object;
+
+ mach_port_gst_helper(pset, port, maxnames,
+ names, &actual);
+ }
+ }
+ ipc_splay_traverse_finish(&space->is_tree);
+ is_read_unlock(space);
+
+ if (actual <= maxnames)
+ break;
+
+ /* didn't have enough memory; allocate more */
+
+ kmem_free(ipc_kernel_map, addr, size);
+ size = round_page(actual * sizeof(mach_port_t)) + PAGE_SIZE;
+ }
+
+ if (actual == 0) {
+ memory = VM_MAP_COPY_NULL;
+
+ kmem_free(ipc_kernel_map, addr, size);
+ } else {
+ vm_size_t size_used;
+
+ size_used = round_page(actual * sizeof(mach_port_t));
+
+ /*
+ * Make used memory pageable and get it into
+ * copied-in form. Free any unused memory.
+ */
+
+ kr = vm_map_pageable(ipc_kernel_map,
+ addr, addr + size_used,
+ VM_PROT_NONE);
+ assert(kr == KERN_SUCCESS);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr, size_used,
+ TRUE, &memory);
+ assert(kr == KERN_SUCCESS);
+
+ if (size_used != size)
+ kmem_free(ipc_kernel_map,
+ addr + size_used, size - size_used);
+ }
+
+ *members = (mach_port_t *) memory;
+ *membersCnt = actual;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_move_member [kernel call]
+ * Purpose:
+ * If after is MACH_PORT_NULL, removes member
+ * from the port set it is in. Otherwise, adds
+ * member to after, removing it from any set
+ * it might already be in.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Moved the port.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME Member didn't denote a right.
+ * KERN_INVALID_RIGHT Member didn't denote a receive right.
+ * KERN_INVALID_NAME After didn't denote a right.
+ * KERN_INVALID_RIGHT After didn't denote a port set right.
+ * KERN_NOT_IN_SET
+ * After is MACH_PORT_NULL and Member isn't in a port set.
+ */
+
+kern_return_t
+mach_port_move_member(
+ ipc_space_t space,
+ mach_port_t member,
+ mach_port_t after)
+{
+ ipc_entry_t entry;
+ ipc_port_t port;
+ ipc_pset_t nset;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_right_lookup_read(space, member, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is read-locked and active */
+
+ if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) {
+ is_read_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (after == MACH_PORT_NULL)
+ nset = IPS_NULL;
+ else {
+ entry = ipc_entry_lookup(space, after);
+ if (entry == IE_NULL) {
+ is_read_unlock(space);
+ return KERN_INVALID_NAME;
+ }
+
+ if ((entry->ie_bits & MACH_PORT_TYPE_PORT_SET) == 0) {
+ is_read_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ nset = (ipc_pset_t) entry->ie_object;
+ assert(nset != IPS_NULL);
+ }
+
+ kr = ipc_pset_move(space, port, nset);
+ /* space is unlocked */
+ return kr;
+}
+
+/*
+ * Routine: mach_port_request_notification [kernel call]
+ * Purpose:
+ * Requests a notification. The caller supplies
+ * a send-once right for the notification to use,
+ * and the call returns the previously registered
+ * send-once right, if any. Possible types:
+ *
+ * MACH_NOTIFY_PORT_DESTROYED
+ * Requests a port-destroyed notification
+ * for a receive right. Sync should be zero.
+ * MACH_NOTIFY_NO_SENDERS
+ * Requests a no-senders notification for a
+ * receive right. If there are currently no
+ * senders, sync is less than or equal to the
+ * current make-send count, and a send-once right
+ * is supplied, then an immediate no-senders
+ * notification is generated.
+ * MACH_NOTIFY_DEAD_NAME
+ * Requests a dead-name notification for a send
+ * or receive right. If the name is already a
+ * dead name, sync is non-zero, and a send-once
+ * right is supplied, then an immediate dead-name
+ * notification is generated.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Requested a notification.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE Bad id value.
+ * KERN_INVALID_NAME Name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote appropriate right.
+ * KERN_INVALID_CAPABILITY The notify port is dead.
+ * MACH_NOTIFY_PORT_DESTROYED:
+ * KERN_INVALID_VALUE Sync isn't zero.
+ * MACH_NOTIFY_DEAD_NAME:
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ * KERN_INVALID_ARGUMENT Name denotes dead name, but
+ * sync is zero or notify is IP_NULL.
+ * KERN_UREFS_OVERFLOW Name denotes dead name, but
+ * generating immediate notif. would overflow urefs.
+ */
+
+kern_return_t
+mach_port_request_notification(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_msg_id_t id,
+ mach_port_mscount_t sync,
+ ipc_port_t notify,
+ ipc_port_t *previousp)
+{
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (notify == IP_DEAD)
+ return KERN_INVALID_CAPABILITY;
+
+ switch (id) {
+ case MACH_NOTIFY_PORT_DESTROYED: {
+ ipc_port_t port, previous;
+
+ if (sync != 0)
+ return KERN_INVALID_VALUE;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_pdrequest(port, notify, &previous);
+ /* port is unlocked */
+
+#if MACH_IPC_COMPAT
+ /*
+ * If previous was a send right instead of a send-once
+ * right, we can't return it in the reply message.
+ * So destroy it instead.
+ */
+
+ if ((previous != IP_NULL) && ip_pdsendp(previous)) {
+ ipc_port_release_send(ip_pdsend(previous));
+ previous = IP_NULL;
+ }
+#endif MACH_IPC_COMPAT
+
+ *previousp = previous;
+ break;
+ }
+
+ case MACH_NOTIFY_NO_SENDERS: {
+ ipc_port_t port;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_nsrequest(port, sync, notify, previousp);
+ /* port is unlocked */
+ break;
+ }
+
+ case MACH_NOTIFY_DEAD_NAME:
+ kr = ipc_right_dnrequest(space, name, sync != 0,
+ notify, previousp);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ break;
+
+ default:
+ return KERN_INVALID_VALUE;
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_insert_right [kernel call]
+ * Purpose:
+ * Inserts a right into a space, as if the space
+ * voluntarily received the right in a message,
+ * except that the right gets the specified name.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Inserted the right.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE The name isn't a legal name.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_INVALID_VALUE Message doesn't carry a port right.
+ * KERN_INVALID_CAPABILITY Port is null or dead.
+ * KERN_UREFS_OVERFLOW Urefs limit would be exceeded.
+ * KERN_RIGHT_EXISTS Space has rights under another name.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_port_insert_right(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_port_t poly,
+ mach_msg_type_name_t polyPoly)
+{
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (!MACH_PORT_VALID(name) ||
+ !MACH_MSG_TYPE_PORT_ANY_RIGHT(polyPoly))
+ return KERN_INVALID_VALUE;
+
+ if (!IO_VALID(poly))
+ return KERN_INVALID_CAPABILITY;
+
+ return ipc_object_copyout_name(space, poly, polyPoly, FALSE, name);
+}
+
+/*
+ * Routine: mach_port_extract_right [kernel call]
+ * Purpose:
+ * Extracts a right from a space, as if the space
+ * voluntarily sent the right to the caller.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Extracted the right.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE Requested type isn't a port right.
+ * KERN_INVALID_NAME Name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote appropriate right.
+ */
+
+kern_return_t
+mach_port_extract_right(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_msg_type_name_t msgt_name,
+ ipc_port_t *poly,
+ mach_msg_type_name_t *polyPoly)
+{
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (!MACH_MSG_TYPE_PORT_ANY(msgt_name))
+ return KERN_INVALID_VALUE;
+
+ kr = ipc_object_copyin(space, name, msgt_name, (ipc_object_t *) poly);
+
+ if (kr == KERN_SUCCESS)
+ *polyPoly = ipc_object_copyin_type(msgt_name);
+ return kr;
+}
+
+/*
+ * Routine: mach_port_get_receive_status [kernel call]
+ * Purpose:
+ * Retrieves mucho info about a receive right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved status.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_get_receive_status(space, name, statusp)
+ ipc_space_t space;
+ mach_port_t name;
+ mach_port_status_t *statusp;
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ if (port->ip_pset != IPS_NULL) {
+ ipc_pset_t pset = port->ip_pset;
+
+ ips_lock(pset);
+ if (!ips_active(pset)) {
+ ipc_pset_remove(pset, port);
+ ips_check_unlock(pset);
+ goto no_port_set;
+ } else {
+ statusp->mps_pset = pset->ips_local_name;
+ imq_lock(&pset->ips_messages);
+ statusp->mps_seqno = port->ip_seqno;
+ imq_unlock(&pset->ips_messages);
+ ips_unlock(pset);
+ assert(MACH_PORT_VALID(statusp->mps_pset));
+ }
+ } else {
+ no_port_set:
+ statusp->mps_pset = MACH_PORT_NULL;
+ imq_lock(&port->ip_messages);
+ statusp->mps_seqno = port->ip_seqno;
+ imq_unlock(&port->ip_messages);
+ }
+
+ statusp->mps_mscount = port->ip_mscount;
+ statusp->mps_qlimit = port->ip_qlimit;
+ statusp->mps_msgcount = port->ip_msgcount;
+ statusp->mps_sorights = port->ip_sorights;
+ statusp->mps_srights = port->ip_srights > 0;
+ statusp->mps_pdrequest = port->ip_pdrequest != IP_NULL;
+ statusp->mps_nsrequest = port->ip_nsrequest != IP_NULL;
+ ip_unlock(port);
+
+ return KERN_SUCCESS;
+}
+
+#ifdef MIGRATING_THREADS
+kern_return_t
+mach_port_set_rpcinfo(space, name, rpc_info, rpc_info_count)
+ ipc_space_t space;
+ mach_port_t name;
+ void *rpc_info;
+ unsigned int rpc_info_count;
+{
+ ipc_target_t target;
+ ipc_object_t object;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_object_translate(space, name,
+ MACH_PORT_RIGHT_PORT_SET, &object);
+ if (kr == KERN_SUCCESS)
+ target = &((ipc_pset_t)object)->ips_target;
+ else {
+ kr = ipc_object_translate(space, name,
+ MACH_PORT_RIGHT_RECEIVE, &object);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ target = &((ipc_port_t)object)->ip_target;
+ }
+
+ /* port/pset is locked and active */
+
+ kr = port_machine_set_rpcinfo(target, rpc_info, rpc_info_count);
+
+ io_unlock(object);
+
+ return kr;
+}
+
+#if 1
+int sacts, maxsacts;
+#endif
+
+sact_count()
+{
+ printf("%d server activations in use, %d max\n", sacts, maxsacts);
+}
+
+kern_return_t
+mach_port_create_act(task, name, user_stack, user_rbuf, user_rbuf_size, out_act)
+ task_t task;
+ mach_port_t name;
+ vm_offset_t user_stack;
+ vm_offset_t user_rbuf;
+ vm_size_t user_rbuf_size;
+ Act **out_act;
+{
+ ipc_target_t target;
+ ipc_space_t space;
+ ipc_object_t object;
+ kern_return_t kr;
+ Act *act;
+
+ if (task == 0)
+ return KERN_INVALID_TASK;
+
+ /* First create the new activation. */
+ kr = act_create(task, user_stack, user_rbuf, user_rbuf_size, &act);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ space = task->itk_space;
+
+ kr = ipc_object_translate(space, name,
+ MACH_PORT_RIGHT_PORT_SET, &object);
+ if (kr == KERN_SUCCESS)
+ target = &((ipc_pset_t)object)->ips_target;
+ else {
+ kr = ipc_object_translate(space, name,
+ MACH_PORT_RIGHT_RECEIVE, &object);
+ if (kr != KERN_SUCCESS) {
+ act_terminate(act);
+ act_deallocate(act);
+ return kr;
+ }
+ target = &((ipc_port_t)object)->ip_target;
+ }
+
+ /* port/pset is locked and active */
+#if 0
+ printf("act port/pset %08x ipc_target %08x stack %08x act %08x\n",
+ object, target, user_stack, act);
+#endif
+
+ /* Assign the activation to the port's actpool. */
+ kr = act_set_target(act, target);
+ if (kr != KERN_SUCCESS) {
+ io_unlock(object);
+ act_terminate(act);
+ act_deallocate(act);
+ return kr;
+ }
+#if 0
+ printf(" actpool %08x act %08x\n", target->ip_actpool, act);
+#endif
+
+ io_unlock(object);
+
+ /* Pass our reference to the activation back to the user. */
+ *out_act = act;
+
+#if 1
+ sacts++;
+ if (sacts > maxsacts)
+ maxsacts = sacts;
+ act->mact.pcb->ss.mpsfu_high = 0x69;
+#endif
+ return KERN_SUCCESS;
+}
+
+#ifdef RPCKERNELSIG
+kern_return_t
+mach_port_set_syscall_right(task, name)
+ task_t task;
+ mach_port_t name;
+{
+ ipc_entry_t entry;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (task == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_right_lookup_write(task, name, &entry);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+
+ if (!(entry->ie_bits & MACH_PORT_TYPE(MACH_PORT_RIGHT_SEND))) {
+ is_write_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ task->syscall_ipc_entry = *entry;
+
+ is_write_unlock(space);
+
+ return KERN_SUCCESS;
+}
+#endif
+#endif /* MIGRATING_THREADS */
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: port_translate_compat
+ * Purpose:
+ * Converts a name to a receive right.
+ * Conditions:
+ * Nothing locked. If successful, the port
+ * is returned locked and active.
+ * Returns:
+ * KERN_SUCCESS Port is returned.
+ * KERN_INVALID_ARGUMENT The space is dead.
+ * KERN_INVALID_ARGUMENT Name doesn't denote port rights.
+ * KERN_NOT_RECEIVER Name denotes send, not receive, rights.
+ * KERN_NOT_RECEIVER Name denotes a send-once right.
+ * KERN_NOT_RECEIVER Name denotes a dead name.
+ */
+
+kern_return_t
+port_translate_compat(space, name, portp)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_port_t *portp;
+{
+ ipc_entry_t entry;
+ mach_port_type_t type;
+ mach_port_urefs_t urefs;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT;
+ /* space is write-locked and active */
+
+ kr = ipc_right_info(space, name, entry, &type, &urefs);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT; /* space is unlocked */
+
+ if ((type & (MACH_PORT_TYPE_RECEIVE)) == 0) {
+ is_write_unlock(space);
+ if (type & MACH_PORT_TYPE_PORT_OR_DEAD)
+ return KERN_NOT_RECEIVER;
+ else
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ is_write_unlock(space);
+ assert(ip_active(port));
+
+ *portp = port;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: convert_port_type
+ * Purpose:
+ * Convert a new mach_port_type_t to an old value.
+ * Note send-once rights and dead names get
+ * represented as send rights. The extra info
+ * bits get dropped.
+ */
+
+mach_port_type_t
+convert_port_type(type)
+ mach_port_type_t type;
+{
+ switch (type & MACH_PORT_TYPE_ALL_RIGHTS) {
+ case MACH_PORT_TYPE_SEND:
+ case MACH_PORT_TYPE_SEND_ONCE:
+ case MACH_PORT_TYPE_DEAD_NAME:
+ return PORT_TYPE_SEND;
+
+ case MACH_PORT_TYPE_RECEIVE:
+ case MACH_PORT_TYPE_SEND_RECEIVE:
+ return PORT_TYPE_RECEIVE_OWN;
+
+ case MACH_PORT_TYPE_PORT_SET:
+ return PORT_TYPE_SET;
+
+ default:
+#if MACH_ASSERT
+ assert(!"convert_port_type: strange port type");
+#else
+ panic("convert_port_type: strange port type");
+#endif
+ }
+}
+
+/*
+ * Routine: port_names [kernel call]
+ * Purpose:
+ * Retrieve all the names in the task's port name space.
+ * As a (major) convenience, return port type information.
+ * The port name space includes port sets.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved names.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * Additions:
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+port_names(space, namesp, namesCnt, typesp, typesCnt)
+ ipc_space_t space;
+ mach_port_t **namesp;
+ mach_msg_type_number_t *namesCnt;
+ mach_port_type_t **typesp;
+ mach_msg_type_number_t *typesCnt;
+{
+ kern_return_t kr;
+
+ kr = mach_port_names(space, namesp, namesCnt, typesp, typesCnt);
+ if (kr == KERN_SUCCESS) {
+ ipc_entry_num_t actual = (ipc_entry_num_t) *typesCnt;
+ mach_port_type_t *types;
+ ipc_entry_num_t i;
+
+ vm_map_copy_t copy = (vm_map_copy_t) *typesp;
+ vm_offset_t addr;
+ vm_size_t size = round_page(actual * sizeof(mach_port_type_t));
+
+ /* convert copy object back to something we can use */
+
+ kr = vm_map_copyout(ipc_kernel_map, &addr, copy);
+ if (kr != KERN_SUCCESS) {
+ vm_map_copy_discard((vm_map_copy_t) *typesp);
+ vm_map_copy_discard((vm_map_copy_t) *namesp);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ types = (mach_port_type_t *) addr;
+
+ for (i = 0; i < actual; i++)
+ types[i] = convert_port_type(types[i]);
+
+ /* convert memory back into a copy object */
+
+ kr = vm_map_copyin(ipc_kernel_map, addr, size,
+ TRUE, &copy);
+ assert(kr == KERN_SUCCESS);
+
+ *typesp = (mach_port_type_t *) copy;
+ } else if (kr != KERN_RESOURCE_SHORTAGE)
+ kr = KERN_INVALID_ARGUMENT;
+
+ return kr;
+}
+
+/*
+ * Routine: port_type [kernel call]
+ * Purpose:
+ * Return type of the capability named.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved type.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT The name doesn't denote a right.
+ */
+
+kern_return_t
+port_type(space, name, typep)
+ ipc_space_t space;
+ mach_port_t name;
+ mach_port_type_t *typep;
+{
+ mach_port_type_t type;
+ kern_return_t kr;
+
+ kr = mach_port_type(space, name, &type);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT;
+
+ *typep = convert_port_type(type);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: port_rename [kernel call]
+ * Purpose:
+ * Change the name of a capability.
+ * The new name can't be in use.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved type.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT The new name is reserved.
+ * KERN_NAME_EXISTS The new name already denotes a right.
+ * KERN_INVALID_ARGUMENT The old name doesn't denote a right.
+ */
+
+kern_return_t
+port_rename(space, old_name, new_name)
+ ipc_space_t space;
+ mach_port_t old_name;
+ mach_port_t new_name;
+{
+ kern_return_t kr;
+
+ kr = mach_port_rename(space, old_name, new_name);
+ if ((kr != KERN_SUCCESS) && (kr != KERN_NAME_EXISTS))
+ kr = KERN_INVALID_ARGUMENT;
+
+ return kr;
+}
+
+/*
+ * Routine: port_allocate [kernel call]
+ * Purpose:
+ * Allocate a new port, giving all rights to "task".
+ *
+ * Returns in "port_name" the task's local name for the port.
+ * Doesn't return a reference to the port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Allocated a port.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+port_allocate(space, namep)
+ ipc_space_t space;
+ mach_port_t *namep;
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_port_alloc_compat(space, namep, &port);
+ if (kr == KERN_SUCCESS)
+ ip_unlock(port);
+ else if (kr != KERN_RESOURCE_SHORTAGE)
+ kr = KERN_INVALID_ARGUMENT;
+
+ return kr;
+}
+
+/*
+ * Routine: port_deallocate [kernel call]
+ * Purpose:
+ * Delete port rights (send and receive) from a task.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Deallocated the port right.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote a port right.
+ * Additions:
+ * KERN_SUCCESS Deallocated a send-once right.
+ * KERN_SUCCESS Destroyed a dead name.
+ */
+
+kern_return_t
+port_deallocate(space, name)
+ ipc_space_t space;
+ mach_port_t name;
+{
+ ipc_entry_t entry;
+ mach_port_type_t type;
+ mach_port_urefs_t urefs;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT;
+ /* space is write-locked and active */
+
+ /*
+ * We serialize with port destruction with the
+ * ipc_right_info call, not ipc_right_destroy.
+ * After ipc_right_info, we pretend that the
+ * port doesn't get destroyed.
+ */
+
+ kr = ipc_right_info(space, name, entry, &type, &urefs);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT; /* space is unlocked */
+
+ if ((type & (MACH_PORT_TYPE_PORT_OR_DEAD)) == 0) {
+ is_write_unlock(space);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ (void) ipc_right_destroy(space, name, entry);
+ /* space is unlocked */
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: port_set_backlog [kernel call]
+ * Purpose:
+ * Change the queueing backlog on "port_name" to "backlog";
+ * the specified "task" must be the current receiver.
+ *
+ * Valid backlog values are 0 < backlog <= PORT_BACKLOG_MAX.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Set the backlog.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote a port right.
+ * KERN_NOT_RECEIVER Name denotes send rights, not receive.
+ * KERN_INVALID_ARGUMENT Backlog value is invalid.
+ * Additions:
+ * KERN_NOT_RECEIVER Name denotes a send-once right.
+ * KERN_NOT_RECEIVER Name denotes a dead name.
+ */
+
+kern_return_t
+port_set_backlog(space, name, backlog)
+ ipc_space_t space;
+ mach_port_t name;
+ int backlog;
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if ((space == IS_NULL) ||
+ (backlog <= 0) ||
+ (backlog > PORT_BACKLOG_MAX))
+ return KERN_INVALID_ARGUMENT;
+
+ kr = port_translate_compat(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_set_qlimit(port, (mach_port_msgcount_t) backlog);
+
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: port_set_backup [kernel call]
+ * Purpose:
+ * Changes the backup port for the the named port.
+ * The specified "task" must be the current receiver.
+ * Returns the old backup port, if any.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Set the backup.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote a port right.
+ * KERN_NOT_RECEIVER Name denotes send rights, not receive.
+ * Additions:
+ * KERN_NOT_RECEIVER Name denotes a send-once right.
+ * KERN_NOT_RECEIVER Name denotes a dead name.
+ */
+
+kern_return_t
+port_set_backup(space, name, backup, previousp)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_port_t backup;
+ ipc_port_t *previousp;
+{
+ ipc_port_t port, previous;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (backup == IP_DEAD)
+ backup = IP_NULL;
+ else if (backup != IP_NULL)
+ backup = ip_pdsendm(backup);
+
+ kr = port_translate_compat(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_pdrequest(port, backup, &previous);
+ /* port is unlocked */
+
+ /*
+ * If previous was a send-once right instead of a send
+ * right, we can't return it in the reply message.
+ * So get rid of it in a notification instead.
+ */
+
+ if (previous != IP_NULL) {
+ if (ip_pdsendp(previous))
+ previous = ip_pdsend(previous);
+ else {
+ ipc_notify_send_once(previous);
+ previous = IP_NULL;
+ }
+ }
+
+ *previousp = previous;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: port_status [kernel call]
+ * Purpose:
+ * Returns statistics related to "port_name", as seen by "task".
+ * Only the receiver for a given port will see true message
+ * counts.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved status.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote a port right.
+ * Additions:
+ * KERN_SUCCESS Send-once right.
+ * KERN_SUCCESS Dead name.
+ */
+
+kern_return_t
+port_status(space, name, enabledp, num_msgs, backlog,
+ ownership, receive_rights)
+ ipc_space_t space;
+ mach_port_t name;
+ mach_port_t *enabledp;
+ int *num_msgs;
+ int *backlog;
+ boolean_t *ownership;
+ boolean_t *receive_rights;
+{
+ ipc_entry_t entry;
+ mach_port_type_t type;
+ mach_port_urefs_t urefs;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT;
+ /* space is write-locked and active */
+
+ kr = ipc_right_info(space, name, entry, &type, &urefs);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT; /* space is unlocked */
+
+ if ((type & MACH_PORT_TYPE_PORT_OR_DEAD) == 0) {
+ is_write_unlock(space);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (type & MACH_PORT_TYPE_RECEIVE) {
+ mach_port_t enabled;
+ mach_port_msgcount_t qlimit;
+ mach_port_msgcount_t msgcount;
+ ipc_port_t port;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ is_write_unlock(space);
+ assert(ip_active(port));
+
+ if (port->ip_pset != IPS_NULL) {
+ ipc_pset_t pset = port->ip_pset;
+
+ ips_lock(pset);
+ if (!ips_active(pset)) {
+ ipc_pset_remove(pset, port);
+ ips_check_unlock(pset);
+ enabled = MACH_PORT_NULL;
+ } else {
+ enabled = pset->ips_local_name;
+ ips_unlock(pset);
+ assert(MACH_PORT_VALID(enabled));
+ }
+ } else
+ enabled = MACH_PORT_NULL;
+
+ qlimit = port->ip_qlimit;
+ msgcount = port->ip_msgcount;
+ ip_unlock(port);
+
+ *ownership = TRUE;
+ *receive_rights = TRUE;
+ *enabledp = enabled;
+ *num_msgs = (int) msgcount;
+ *backlog = (int) qlimit;
+ } else {
+ is_write_unlock(space);
+
+ *ownership = FALSE;
+ *receive_rights = FALSE;
+ *enabledp = MACH_PORT_NULL;
+ *num_msgs = -1;
+ *backlog = 0;
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: port_set_allocate [kernel call]
+ * Purpose:
+ * Create a new port set, give rights to task, and
+ * return task's local name for the set.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Allocated a port set.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+port_set_allocate(space, namep)
+ ipc_space_t space;
+ mach_port_t *namep;
+{
+ ipc_pset_t pset;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_pset_alloc(space, namep, &pset);
+ if (kr == KERN_SUCCESS)
+ ips_unlock(pset);
+ else if (kr != KERN_RESOURCE_SHORTAGE)
+ kr = KERN_INVALID_ARGUMENT;
+
+ return kr;
+}
+
+/*
+ * Routine: port_set_deallocate [kernel call]
+ * Purpose:
+ * Destroys the task's port set. If there are any
+ * receive rights in the set, they are removed.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Deallocated the port set.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote a port set.
+ */
+
+kern_return_t
+port_set_deallocate(space, name)
+ ipc_space_t space;
+ mach_port_t name;
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ if ((entry->ie_bits & MACH_PORT_TYPE_PORT_SET) == 0) {
+ is_write_unlock(space);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = ipc_right_destroy(space, name, entry);
+ /* space is unlocked */
+ assert(kr == KERN_SUCCESS);
+ return kr;
+}
+
+/*
+ * Routine: port_set_add [kernel call]
+ * Purpose:
+ * Moves receive rights into the port set.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Moved the receive right.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT port_name doesn't denote port rights.
+ * KERN_NOT_RECEIVER port_name doesn't denote receive right.
+ * KERN_INVALID_ARGUMENT set_name doesn't denote a port set.
+ * Additions:
+ * KERN_NOT_RECEIVER port_name denotes a send-once right.
+ * KERN_NOT_RECEIVER port_name denotes a dead name.
+ */
+
+kern_return_t
+port_set_add(space, set_name, port_name)
+ ipc_space_t space;
+ mach_port_t set_name;
+ mach_port_t port_name;
+{
+ ipc_entry_t entry;
+ mach_port_type_t type;
+ mach_port_urefs_t urefs;
+ ipc_port_t port;
+ ipc_pset_t pset;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_right_lookup_write(space, port_name, &entry);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT;
+ /* space is write-locked and active */
+
+ /* use ipc_right_info to check for dead compat entries */
+
+ kr = ipc_right_info(space, port_name, entry, &type, &urefs);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT; /* space is unlocked */
+
+ if ((type & MACH_PORT_TYPE_RECEIVE) == 0) {
+ is_write_unlock(space);
+ if (type & MACH_PORT_TYPE_PORT_OR_DEAD)
+ return KERN_NOT_RECEIVER;
+ else
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ is_write_to_read_lock(space);
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ entry = ipc_entry_lookup(space, set_name);
+ if ((entry == IE_NULL) ||
+ ((entry->ie_bits & MACH_PORT_TYPE_PORT_SET) == 0)) {
+ is_read_unlock(space);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ pset = (ipc_pset_t) entry->ie_object;
+ assert(pset != IPS_NULL);
+
+ kr = ipc_pset_move(space, port, pset);
+ /* space is unlocked */
+ assert(kr == KERN_SUCCESS);
+ return kr;
+}
+
+/*
+ * Routine: port_set_remove [kernel call]
+ * Purpose:
+ * Removes the receive rights from the set they are in.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Removed the receive right.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote a port right.
+ * KERN_NOT_RECEIVER Name denotes send rights, not receive.
+ * KERN_NOT_IN_SET Port isn't in a port set.
+ * Additions:
+ * KERN_NOT_RECEIVER Name denotes a send-once right.
+ * KERN_NOT_RECEIVER Name denotes a dead name.
+ */
+
+kern_return_t
+port_set_remove(space, name)
+ ipc_space_t space;
+ mach_port_t name;
+{
+ ipc_entry_t entry;
+ mach_port_type_t type;
+ mach_port_urefs_t urefs;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT;
+ /* space is write-locked and active */
+
+ /* use ipc_right_info to check for dead compat entries */
+
+ kr = ipc_right_info(space, name, entry, &type, &urefs);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT; /* space is unlocked */
+
+ if ((type & (MACH_PORT_TYPE_RECEIVE)) == 0) {
+ is_write_unlock(space);
+ if (type & MACH_PORT_TYPE_PORT_OR_DEAD)
+ return KERN_NOT_RECEIVER;
+ else
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ is_write_to_read_lock(space);
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ kr = ipc_pset_move(space, port, IPS_NULL);
+ /* space is unlocked */
+ return kr;
+}
+
+/*
+ * Routine: port_set_status [kernel call]
+ * Purpose:
+ * Retrieve list of members of a port set.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved port set status.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote a port set.
+ * Additions:
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+port_set_status(space, name, members, membersCnt)
+ ipc_space_t space;
+ mach_port_t name;
+ mach_port_t **members;
+ mach_msg_type_number_t *membersCnt;
+{
+ kern_return_t kr;
+
+ kr = mach_port_get_set_status(space, name, members, membersCnt);
+ if ((kr != KERN_SUCCESS) && (kr != KERN_RESOURCE_SHORTAGE))
+ kr = KERN_INVALID_ARGUMENT;
+
+ return kr;
+}
+
+/*
+ * Routine: port_insert_send [kernel call]
+ * Purpose:
+ * Inserts send rights to a port into a task,
+ * at a given name. The name must not be in use.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Inserted send right.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Port is null or dead.
+ * KERN_INVALID_ARGUMENT Name is reserved.
+ * KERN_NAME_EXISTS Name already denotes a right.
+ * KERN_FAILURE Task already has rights for the port.
+ * Additions:
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+port_insert_send(space, port, name)
+ ipc_space_t space;
+ ipc_port_t port;
+ mach_port_t name;
+{
+ kern_return_t kr;
+
+ if ((space == IS_NULL) ||
+ !MACH_PORT_VALID(name) ||
+ !IP_VALID(port))
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_object_copyout_name_compat(space, (ipc_object_t) port,
+ MACH_MSG_TYPE_PORT_SEND, name);
+ switch (kr) {
+ case KERN_SUCCESS:
+ case KERN_NAME_EXISTS:
+ case KERN_RESOURCE_SHORTAGE:
+ break;
+
+ case KERN_RIGHT_EXISTS:
+ kr = KERN_FAILURE;
+ break;
+
+ default:
+ kr = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ return kr;
+}
+
+/*
+ * Routine: port_extract_send [kernel call]
+ * Purpose:
+ * Extracts send rights from "task"'s "his_name" port.
+ * The task is left with no rights for the port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Extracted send right.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote pure send rights.
+ */
+
+kern_return_t
+port_extract_send(space, name, portp)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_port_t *portp;
+{
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_object_copyin_compat(space, name,
+ MSG_TYPE_PORT, TRUE,
+ (ipc_object_t *) portp);
+ if (kr != KERN_SUCCESS)
+ kr = KERN_INVALID_ARGUMENT;
+
+ return kr;
+}
+
+/*
+ * Routine: port_insert_receive [kernel call]
+ * Purpose:
+ * Inserts receive/ownership rights to a port into a task,
+ * at a given name.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Inserted receive right.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Port is null. (Can't be dead.)
+ * KERN_INVALID_ARGUMENT Name is reserved.
+ * KERN_NAME_EXISTS Name already denotes a right.
+ * KERN_FAILURE Task already has rights for the port.
+ * Additions:
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+port_insert_receive(space, port, name)
+ ipc_space_t space;
+ ipc_port_t port;
+ mach_port_t name;
+{
+ kern_return_t kr;
+
+ if ((space == IS_NULL) ||
+ !MACH_PORT_VALID(name) ||
+ !IP_VALID(port))
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_object_copyout_name_compat(space, (ipc_object_t) port,
+ MACH_MSG_TYPE_PORT_RECEIVE, name);
+ switch (kr) {
+ case KERN_SUCCESS:
+ case KERN_NAME_EXISTS:
+ case KERN_RESOURCE_SHORTAGE:
+ break;
+
+ case KERN_RIGHT_EXISTS:
+ kr = KERN_FAILURE;
+ break;
+
+ default:
+ kr = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ return kr;
+}
+
+/*
+ * Routine: port_extract_receive [kernel call]
+ * Purpose:
+ * Extracts receive/ownership rights
+ * from "task"'s "his_name" port.
+ *
+ * The task is left with no rights for the port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Extracted receive right.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+port_extract_receive(space, name, portp)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_port_t *portp;
+{
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_object_copyin_compat(space, name,
+ MSG_TYPE_PORT_ALL, TRUE,
+ (ipc_object_t *) portp);
+ if (kr != KERN_SUCCESS)
+ kr = KERN_INVALID_ARGUMENT;
+
+ return kr;
+}
+
+#endif MACH_IPC_COMPAT
diff --git a/ipc/mach_port.srv b/ipc/mach_port.srv
new file mode 100644
index 00000000..c4f85363
--- /dev/null
+++ b/ipc/mach_port.srv
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#include <mach/mach_port.defs>
diff --git a/ipc/mach_rpc.c b/ipc/mach_rpc.c
new file mode 100644
index 00000000..0ceeeb46
--- /dev/null
+++ b/ipc/mach_rpc.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ */
+
+#ifdef MIGRATING_THREADS
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/rpc.h>
+#include <mach/notify.h>
+#include <mach/mach_param.h>
+#include <mach/vm_param.h>
+#include <mach/vm_prot.h>
+#include <kern/task.h>
+#include <kern/act.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_right.h>
+
+#undef DEBUG_MPRC
+
+/*
+ * XXX need to identify if one endpoint of an RPC is the kernel to
+ * ensure proper port name translation (or lack of). This is bogus.
+ */
+#define ISKERNELACT(act) ((act)->task == kernel_task)
+
+/*
+ * Copy the indicated port from the task associated with the source
+ * activation into the task associated with the destination activation.
+ *
+ * XXX on errors we should probably clear the portp to avoid leaking
+ * info to the other side.
+ */
+kern_return_t
+mach_port_rpc_copy(portp, sact, dact)
+ struct rpc_port_desc *portp;
+ struct Act *sact, *dact;
+{
+ ipc_space_t sspace, dspace;
+ mach_msg_type_name_t tname;
+ ipc_object_t iname;
+ kern_return_t kr;
+
+#ifdef DEBUG_MPRC
+ printf("m_p_rpc_copy(portp=%x/%x, sact=%x, dact=%x): ",
+ portp->name, portp->msgt_name, sact, dact);
+#endif
+ sspace = sact->task->itk_space;
+ dspace = dact->task->itk_space;
+ if (sspace == IS_NULL || dspace == IS_NULL) {
+#ifdef DEBUG_MPRC
+ printf("bogus src (%x) or dst (%x) space\n", sspace, dspace);
+#endif
+ return KERN_INVALID_TASK;
+ }
+
+ if (!MACH_MSG_TYPE_PORT_ANY(portp->msgt_name)) {
+#ifdef DEBUG_MPRC
+ printf("invalid port type\n");
+#endif
+ return KERN_INVALID_VALUE;
+ }
+
+ if (ISKERNELACT(sact)) {
+ iname = (ipc_object_t) portp->name;
+ ipc_object_copyin_from_kernel(iname, portp->msgt_name);
+ kr = KERN_SUCCESS;
+ } else {
+ kr = ipc_object_copyin(sspace, portp->name, portp->msgt_name,
+ &iname);
+ }
+ if (kr != KERN_SUCCESS) {
+#ifdef DEBUG_MPRC
+ printf("copyin returned %x\n", kr);
+#endif
+ return kr;
+ }
+
+ tname = ipc_object_copyin_type(portp->msgt_name);
+ if (!IO_VALID(iname)) {
+ portp->name = (mach_port_t) iname;
+ portp->msgt_name = tname;
+#ifdef DEBUG_MPRC
+ printf("iport %x invalid\n", iname);
+#endif
+ return KERN_SUCCESS;
+ }
+
+ if (ISKERNELACT(dact)) {
+ portp->name = (mach_port_t) iname;
+ kr = KERN_SUCCESS;
+ } else {
+ kr = ipc_object_copyout(dspace, iname, tname, TRUE,
+ &portp->name);
+ }
+ if (kr != KERN_SUCCESS) {
+ ipc_object_destroy(iname, tname);
+
+ if (kr == KERN_INVALID_CAPABILITY)
+ portp->name = MACH_PORT_DEAD;
+ else {
+ portp->name = MACH_PORT_NULL;
+#ifdef DEBUG_MPRC
+ printf("copyout iport %x returned %x\n", iname);
+#endif
+ return kr;
+ }
+ }
+
+ portp->msgt_name = tname;
+#ifdef DEBUG_MPRC
+ printf("portp=%x/%x, iname=%x\n", portp->name, portp->msgt_name, iname);
+#endif
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+mach_port_rpc_sig(space, name, buffer, buflen)
+{
+ return KERN_FAILURE;
+}
+
+#endif /* MIGRATING_THREADS */
diff --git a/ipc/port.h b/ipc/port.h
new file mode 100644
index 00000000..6e9f77b4
--- /dev/null
+++ b/ipc/port.h
@@ -0,0 +1,90 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_port.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Implementation specific complement to mach/port.h.
+ */
+
+#ifndef _IPC_PORT_H_
+#define _IPC_PORT_H_
+
+#include <mach/port.h>
+
+/*
+ * mach_port_t must be an unsigned type. Port values
+ * have two parts, a generation number and an index.
+ * These macros encapsulate all knowledge of how
+ * a mach_port_t is layed out. However, ipc/ipc_entry.c
+ * implicitly assumes when it uses the splay tree functions
+ * that the generation number is in the low bits, so that
+ * names are ordered first by index and then by generation.
+ *
+ * If the size of generation numbers changes,
+ * be sure to update IE_BITS_GEN_MASK and friends
+ * in ipc/ipc_entry.h.
+ */
+
+#if PORT_GENERATIONS
+#define MACH_PORT_INDEX(name) ((name) >> 8)
+#define MACH_PORT_GEN(name) (((name) & 0xff) << 24)
+#define MACH_PORT_MAKE(index, gen) (((index) << 8) | ((gen) >> 24))
+#else
+#define MACH_PORT_INDEX(name) (name)
+#define MACH_PORT_GEN(name) 0
+#define MACH_PORT_MAKE(index, gen) (index)
+#endif
+
+#define MACH_PORT_NGEN(name) MACH_PORT_MAKE(0, MACH_PORT_GEN(name))
+#define MACH_PORT_MAKEB(index, bits) MACH_PORT_MAKE(index, IE_BITS_GEN(bits))
+
+/*
+ * Typedefs for code cleanliness. These must all have
+ * the same (unsigned) type as mach_port_t.
+ */
+
+typedef mach_port_t mach_port_index_t; /* index values */
+typedef mach_port_t mach_port_gen_t; /* generation numbers */
+
+
+#define MACH_PORT_UREFS_MAX ((mach_port_urefs_t) ((1 << 16) - 1))
+
+#define MACH_PORT_UREFS_OVERFLOW(urefs, delta) \
+ (((delta) > 0) && \
+ ((((urefs) + (delta)) <= (urefs)) || \
+ (((urefs) + (delta)) > MACH_PORT_UREFS_MAX)))
+
+#define MACH_PORT_UREFS_UNDERFLOW(urefs, delta) \
+ (((delta) < 0) && (-(delta) > (urefs)))
+
+#endif /* _IPC_PORT_H_ */
diff --git a/kern/act.c b/kern/act.c
new file mode 100644
index 00000000..697804fb
--- /dev/null
+++ b/kern/act.c
@@ -0,0 +1,1134 @@
+/*
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * File: act.c
+ *
+ * Activation management routines
+ *
+ */
+
+#ifdef MIGRATING_THREADS
+
+#include <mach_ipc_compat.h> /* XXX */
+#include <mach/kern_return.h>
+#include <mach/alert.h>
+#include <kern/mach_param.h> /* XXX INCALL_... */
+#include <kern/zalloc.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <kern/act.h>
+#include <kern/current.h>
+#include "ipc_target.h"
+
+static void special_handler(ReturnHandler *rh, struct Act *act);
+
+#ifdef ACT_STATIC_KLUDGE
+#undef ACT_STATIC_KLUDGE
+#define ACT_STATIC_KLUDGE 300
+#endif
+
+#ifndef ACT_STATIC_KLUDGE
+static zone_t act_zone;
+#else
+static Act *act_freelist;
+static Act free_acts[ACT_STATIC_KLUDGE];
+#endif
+
+/* This is a rather special activation
+ which resides at the top and bottom of every thread.
+ When the last "real" activation on a thread is destroyed,
+ the null_act on the bottom gets invoked, destroying the thread.
+ At the top, the null_act acts as an "invalid" cached activation,
+ which will always fail the cached-activation test on RPC paths.
+
+ As you might expect, most of its members have no particular value.
+ alerts is zero. */
+Act null_act;
+
+void
+global_act_init()
+{
+#ifndef ACT_STATIC_KLUDGE
+ act_zone = zinit(
+ sizeof(struct Act),
+ ACT_MAX * sizeof(struct Act), /* XXX */
+ ACT_CHUNK * sizeof(struct Act),
+ 0, "activations");
+#else
+ int i;
+
+printf("activations: [%x-%x]\n", &free_acts[0], &free_acts[ACT_STATIC_KLUDGE]);
+ act_freelist = &free_acts[0];
+ free_acts[0].ipt_next = 0;
+ for (i = 1; i < ACT_STATIC_KLUDGE; i++) {
+ free_acts[i].ipt_next = act_freelist;
+ act_freelist = &free_acts[i];
+ }
+ /* XXX simple_lock_init(&act_freelist->lock); */
+#endif
+
+#if 0
+ simple_lock_init(&null_act.lock);
+ refcount_init(&null_act.ref_count, 1);
+#endif
+
+ act_machine_init();
+}
+
+/* Create a new activation in a specific task.
+ Locking: Task */
+kern_return_t act_create(task_t task, vm_offset_t user_stack,
+ vm_offset_t user_rbuf, vm_size_t user_rbuf_size,
+ struct Act **new_act)
+{
+ Act *act;
+ int rc;
+
+#ifndef ACT_STATIC_KLUDGE
+ act = (Act*)zalloc(act_zone);
+ if (act == 0)
+ return(KERN_RESOURCE_SHORTAGE);
+#else
+ /* XXX ipt_lock(act_freelist); */
+ act = act_freelist;
+ if (act == 0) panic("out of activations");
+ act_freelist = act->ipt_next;
+ /* XXX ipt_unlock(act_freelist); */
+ act->ipt_next = 0;
+#endif
+ bzero(act, sizeof(*act)); /*XXX shouldn't be needed */
+
+#ifdef DEBUG
+ act->lower = act->higher = 0;
+#endif
+
+ /* Start with one reference for being active, another for the caller */
+ simple_lock_init(&act->lock);
+ refcount_init(&act->ref_count, 2);
+
+ /* Latch onto the task. */
+ act->task = task;
+ task_reference(task);
+
+ /* Other simple setup */
+ act->ipt = 0;
+ act->thread = 0;
+ act->suspend_count = 0;
+ act->active = 1;
+ act->handlers = 0;
+
+ /* The special_handler will always be last on the returnhandlers list. */
+ act->special_handler.next = 0;
+ act->special_handler.handler = special_handler;
+
+ ipc_act_init(task, act);
+ act_machine_create(task, act, user_stack, user_rbuf, user_rbuf_size);
+
+ task_lock(task);
+
+ /* Chain the act onto the task's list */
+ act->task_links.next = task->acts.next;
+ act->task_links.prev = &task->acts;
+ task->acts.next->prev = &act->task_links;
+ task->acts.next = &act->task_links;
+ task->act_count++;
+
+ task_unlock(task);
+
+ *new_act = act;
+ return KERN_SUCCESS;
+}
+
+/* This is called when an act's ref_count drops to zero.
+ This can only happen when thread is zero (not in use),
+ ipt is zero (not attached to any ipt),
+ and active is false (terminated). */
+static void act_free(Act *inc)
+{
+ act_machine_destroy(inc);
+ ipc_act_destroy(inc);
+
+ /* Drop the task reference. */
+ task_deallocate(inc->task);
+
+ /* Put the act back on the act zone */
+#ifndef ACT_STATIC_KLUDGE
+ zfree(act_zone, (vm_offset_t)inc);
+#else
+ /* XXX ipt_lock(act_freelist); */
+ inc->ipt_next = act_freelist;
+ act_freelist = inc;
+ /* XXX ipt_unlock(act_freelist); */
+#endif
+}
+
+void act_deallocate(Act *inc)
+{
+ refcount_drop(&inc->ref_count, act_free(inc));
+}
+
+/* Attach an act to the top of a thread ("push the stack").
+ The thread must be either the current one or a brand-new one.
+ Assumes the act is active but not in use.
+ Assumes that if it is attached to an ipt (i.e. the ipt pointer is nonzero),
+ the act has already been taken off the ipt's list.
+
+ Already locked: cur_thread, act */
+void act_attach(Act *act, thread_t thread, unsigned init_alert_mask)
+{
+ Act *lower;
+
+ act->thread = thread;
+
+ /* The thread holds a reference to the activation while using it. */
+ refcount_take(&act->ref_count);
+
+ /* XXX detach any cached activations from above the target */
+
+ /* Chain the act onto the thread's act stack. */
+ lower = thread->top_act;
+ act->lower = lower;
+ lower->higher = act;
+ thread->top_act = act;
+
+ act->alert_mask = init_alert_mask;
+ act->alerts = lower->alerts & init_alert_mask;
+}
+
+/* Remove the current act from the top of the current thread ("pop the stack").
+ Return it to the ipt it lives on, if any.
+ Locking: Thread > Act(not on ipt) > ipc_target */
+void act_detach(Act *cur_act)
+{
+ thread_t cur_thread = cur_act->thread;
+
+ thread_lock(cur_thread);
+ act_lock(cur_act);
+
+ /* Unlink the act from the thread's act stack */
+ cur_thread->top_act = cur_act->lower;
+ cur_act->thread = 0;
+#ifdef DEBUG
+ cur_act->lower = cur_act->higher = 0;
+#endif
+
+ thread_unlock(cur_thread);
+
+ /* Return it to the ipt's list */
+ if (cur_act->ipt)
+ {
+ ipt_lock(cur_act->ipt);
+ cur_act->ipt_next = cur_act->ipt->ipt_acts;
+ cur_act->ipt->ipt_acts = cur_act;
+ ipt_unlock(cur_act->ipt);
+#if 0
+ printf(" return to ipt %x\n", cur_act->ipt);
+#endif
+ }
+
+ act_unlock(cur_act);
+
+ /* Drop the act reference taken for being in use. */
+ refcount_drop(&cur_act->ref_count, act_free(cur_act));
+}
+
+
+
+/*** Activation control support routines ***/
+
+/* This is called by system-dependent code
+ when it detects that act->handlers is non-null
+ while returning into user mode.
+ Activations linked onto an ipt always have null act->handlers,
+ so RPC entry paths need not check it.
+
+ Locking: Act */
+void act_execute_returnhandlers()
+{
+ Act *act = current_act();
+
+#if 0
+ printf("execute_returnhandlers\n");
+#endif
+ while (1) {
+ ReturnHandler *rh;
+
+ /* Grab the next returnhandler */
+ act_lock(act);
+ rh = act->handlers;
+ if (!rh) {
+ act_unlock(act);
+ return;
+ }
+ act->handlers = rh->next;
+ act_unlock(act);
+
+ /* Execute it */
+ (*rh->handler)(rh, act);
+ }
+}
+
+/* Try to nudge an act into executing its returnhandler chain.
+ Ensures that the activation will execute its returnhandlers
+ before it next executes any of its user-level code.
+ Also ensures that it is safe to break the thread's activation chain
+ immediately above this activation,
+ by rolling out of any outstanding two-way-optimized RPC.
+
+ The target activation is not necessarily active
+ or even in use by a thread.
+ If it isn't, this routine does nothing.
+
+ Already locked: Act */
+static void act_nudge(struct Act *act)
+{
+ /* If it's suspended, wake it up. */
+ thread_wakeup(&act->suspend_count);
+
+ /* Do a machine-dependent low-level nudge.
+ If we're on a multiprocessor,
+ this may mean sending an interprocessor interrupt.
+ In any case, it means rolling out of two-way-optimized RPC paths. */
+ act_machine_nudge(act);
+}
+
+/* Install the special returnhandler that handles suspension and termination,
+ if it hasn't been installed already.
+
+ Already locked: Act */
+static void install_special_handler(struct Act *act)
+{
+ ReturnHandler **rh;
+
+ /* The work handler must always be the last ReturnHandler on the list,
+ because it can do tricky things like detach the act. */
+ for (rh = &act->handlers; *rh; rh = &(*rh)->next);
+ if (rh != &act->special_handler.next) {
+ *rh = &act->special_handler;
+ }
+
+ /* Nudge the target activation,
+ to ensure that it will see the returnhandler we're adding. */
+ act_nudge(act);
+}
+
+/* Locking: Act */
+static void special_handler(ReturnHandler *rh, struct Act *cur_act)
+{
+ retry:
+
+ act_lock(cur_act);
+
+ /* If someone has killed this invocation,
+ invoke the return path with a terminated exception. */
+ if (!cur_act->active) {
+ act_unlock(cur_act);
+ act_machine_return(KERN_TERMINATED);
+ /* XXX should just set the activation's reentry_routine
+ and then return from special_handler().
+ The magic reentry_routine should just pop its own activation
+ and chain to the reentry_routine of the _lower_ activation.
+ If that lower activation is the null_act,
+ the thread will then be terminated. */
+ }
+
+ /* If we're suspended, go to sleep and wait for someone to wake us up. */
+ if (cur_act->suspend_count) {
+ act_unlock(cur_act);
+ /* XXX mp unsafe */
+ thread_wait((int)&cur_act->suspend_count, FALSE);
+
+ act_lock(cur_act);
+
+ /* If we're still (or again) suspended,
+ go to sleep again after executing any new returnhandlers that may have appeared. */
+ if (cur_act->suspend_count)
+ install_special_handler(cur_act);
+ }
+
+ act_unlock(cur_act);
+}
+
+#if 0 /************************ OLD SEMI-OBSOLETE CODE *********************/
+static __dead void act_throughcall_return(Act *act)
+{
+ /* Done - destroy the act and return */
+ act_detach(act);
+ act_terminate(act);
+ act_deallocate(act);
+
+ /* XXX */
+ thread_terminate_self();
+}
+
+__dead void act_throughcall(task_t task, void (*infunc)())
+{
+ thread_t thread = current_thread();
+ Act *act;
+ ReturnHandler rh;
+ int rc;
+
+ rc = act_create(task, 0, 0, 0, &act);
+ if (rc) return rc;
+
+ act->return_routine = act_throughcall_return;
+
+ thread_lock(thread);
+ act_lock(act);
+
+ act_attach(thread, act, 0);
+
+ rh.handler = infunc;
+ rh.next = act->handlers;
+ act->handlers = &rh;
+
+ act_unlock(act);
+ thread_unlock(thread);
+
+ /* Call through the act into the returnhandler list */
+ act_machine_throughcall(act);
+}
+
+
+/* Grab an act from the specified pool, to pass to act_upcall.
+ Returns with the act locked, since it's in an inconsistent state
+ (not on its ipt but not on a thread either).
+ Returns null if no acts are available on the ipt.
+
+ Locking: ipc_target > Act(on ipt) */
+Act *act_grab(struct ipc_target *ipt)
+{
+ Act *act;
+
+ ipt_lock(ipt);
+
+ retry:
+
+ /* Pull an act off the ipt's list. */
+ act = ipt->acts;
+ if (!act)
+ goto none_avail;
+ ipt->acts = act->ipt_next;
+
+ act_lock(act);
+
+ /* If it's been terminated, drop it and get another one. */
+ if (!act->active) {
+#if 0
+ printf("dropping terminated act %08x\n", act);
+#endif
+ /* XXX ipt_deallocate(ipt); */
+ act->ipt = 0;
+ act_unlock(act);
+ act_deallocate(act);
+ goto retry;
+ }
+
+none_avail:
+ ipt_unlock(ipt);
+
+ return act;
+}
+
+/* Try to make an upcall with an act on the specified ipt.
+ If the ipt is empty, returns KERN_RESOURCE_SHORTAGE. XXX???
+
+ Locking: ipc_target > Act > Thread */
+kern_return_t act_upcall(struct Act *act, unsigned init_alert_mask,
+ vm_offset_t user_entrypoint, vm_offset_t user_data)
+{
+ thread_t cur_thread = current_thread();
+ int rc;
+
+ /* XXX locking */
+
+ act_attach(cur_thread, act, init_alert_mask);
+
+ /* Make the upcall into the destination task */
+ rc = act_machine_upcall(act, user_entrypoint, user_data);
+
+ /* Done - detach the act and return */
+ act_detach(act);
+
+ return rc;
+}
+#endif /************************ END OF OLD SEMI-OBSOLETE CODE *********************/
+
+
+
+
+/*** Act service routines ***/
+
+/* Lock this act and its current thread.
+ We can only find the thread from the act
+ and the thread must be locked before the act,
+ requiring a little icky juggling.
+
+ If the thread is not currently on any thread,
+ returns with only the act locked.
+
+ Note that this routine is not called on any performance-critical path.
+ It is only for explicit act operations
+ which don't happen often.
+
+ Locking: Thread > Act */
+static thread_t act_lock_thread(Act *act)
+{
+ thread_t thread;
+
+ retry:
+
+ /* Find the thread */
+ act_lock(act);
+ thread = act->thread;
+ if (thread == 0)
+ {
+ act_unlock(act);
+ return 0;
+ }
+ thread_reference(thread);
+ act_unlock(act);
+
+ /* Lock the thread and re-lock the act,
+ and make sure the thread didn't change. */
+ thread_lock(thread);
+ act_lock(act);
+ if (act->thread != thread)
+ {
+ act_unlock(act);
+ thread_unlock(thread);
+ thread_deallocate(thread);
+ goto retry;
+ }
+
+ thread_deallocate(thread);
+
+ return thread;
+}
+
+/* Already locked: act->task
+ Locking: Task > Act */
+kern_return_t act_terminate_task_locked(struct Act *act)
+{
+ act_lock(act);
+
+ if (act->active)
+ {
+ /* Unlink the act from the task's act list,
+ so it doesn't appear in calls to task_acts and such.
+ The act still keeps its ref on the task, however,
+ until it loses all its own references and is freed. */
+ act->task_links.next->prev = act->task_links.prev;
+ act->task_links.prev->next = act->task_links.next;
+ act->task->act_count--;
+
+ /* Remove it from any ipc_target. XXX is this right? */
+ act_set_target(act, 0);
+
+ /* This will allow no more control operations on this act. */
+ act->active = 0;
+
+ /* When the special_handler gets executed,
+ it will see the terminated condition and exit immediately. */
+ install_special_handler(act);
+
+ /* Drop the act reference taken for being active.
+ (There is still at least one reference left: the one we were passed.) */
+ act_deallocate(act);
+ }
+
+ act_unlock(act);
+
+ return KERN_SUCCESS;
+}
+
+/* Locking: Task > Act */
+kern_return_t act_terminate(struct Act *act)
+{
+ task_t task = act->task;
+ kern_return_t rc;
+
+ /* act->task never changes,
+ so we can read it before locking the act. */
+ task_lock(act->task);
+
+ rc = act_terminate_task_locked(act);
+
+ task_unlock(act->task);
+
+ return rc;
+}
+
+/* If this Act is on a Thread and is not the topmost,
+ yank it and everything below it off of the thread's stack
+ and put it all on a new thread forked from the original one.
+ May fail due to resource shortage, but can always be retried.
+
+ Locking: Thread > Act */
+kern_return_t act_yank(Act *act)
+{
+ thread_t thread = act_lock_thread(act);
+
+#if 0
+ printf("act_yank inc %08x thread %08x\n", act, thread);
+#endif
+ if (thread)
+ {
+ if (thread->top_act != act)
+ {
+ printf("detaching act %08x from thread %08x\n", act, thread);
+
+ /* Nudge the activation into a clean point for detachment. */
+ act_nudge(act);
+
+ /* Now detach the activation
+ and give the orphan its own flow of control. */
+ /*XXX*/
+ }
+
+ thread_unlock(thread);
+ }
+ act_unlock(act);
+
+ /* Ask the thread to return as quickly as possible,
+ because its results are now useless. */
+ act_abort(act);
+
+ return KERN_SUCCESS;
+}
+
+/* Assign an activation to a specific ipc_target.
+ Fails if the activation is already assigned to another pool.
+ If ipt == 0, we remove the from its ipt.
+
+ Locking: Act(not on ipt) > ipc_target > Act(on ipt) */
+kern_return_t act_set_target(Act *act, struct ipc_target *ipt)
+{
+ act_lock(act);
+
+ if (ipt == 0)
+ {
+ Act **lact;
+
+ ipt = act->ipt;
+ if (ipt == 0)
+ return;
+
+ /* XXX This is a violation of the locking order. */
+ ipt_lock(ipt);
+ for (lact = &ipt->ipt_acts; *lact; lact = &((*lact)->ipt_next))
+ if (act == *lact)
+ {
+ *lact = act->ipt_next;
+ break;
+ }
+ ipt_unlock(ipt);
+
+ act->ipt = 0;
+ /* XXX ipt_deallocate(ipt); */
+ act_deallocate(act);
+ return;
+ }
+ if (act->ipt != ipt)
+ {
+ if (act->ipt != 0)
+ {
+ act_unlock(act);
+ return KERN_FAILURE; /*XXX*/
+ }
+ act->ipt = ipt;
+ ipt->ipt_type |= IPT_TYPE_MIGRATE_RPC;
+
+ /* They get references to each other. */
+ act_reference(act);
+ ipt_reference(ipt);
+
+ /* If it is available,
+ add it to the ipt's available-activation list. */
+ if ((act->thread == 0) && (act->suspend_count == 0))
+ {
+ ipt_lock(ipt);
+ act->ipt_next = ipt->ipt_acts;
+ act->ipt->ipt_acts = act;
+ ipt_unlock(ipt);
+ }
+ }
+ act_unlock(act);
+
+ return KERN_SUCCESS;
+}
+
+/* Register an alert from this activation.
+ Each set bit is propagated upward from (but not including) this activation,
+ until the top of the chain is reached or the bit is masked.
+
+ Locking: Thread > Act */
+kern_return_t act_alert(struct Act *act, unsigned alerts)
+{
+ thread_t thread = act_lock_thread(act);
+
+#if 0
+ printf("act_alert %08x: %08x\n", act, alerts);
+#endif
+ if (thread)
+ {
+ struct Act *act_up = act;
+ while ((alerts) && (act_up != thread->top_act))
+ {
+ act_up = act_up->higher;
+ alerts &= act_up->alert_mask;
+ act_up->alerts |= alerts;
+ }
+
+ /* XXX If we reach the top, and it is blocked in glue code, do something. */
+
+ thread_unlock(thread);
+ }
+ act_unlock(act);
+
+ return KERN_SUCCESS;
+}
+
+/* Locking: Thread > Act */
+kern_return_t act_abort(struct Act *act)
+{
+ return act_alert(act, ALERT_ABORT_STRONG);
+}
+
+/* Locking: Thread > Act */
+kern_return_t act_abort_safely(struct Act *act)
+{
+ return act_alert(act, ALERT_ABORT_SAFE);
+}
+
+/* Locking: Thread > Act */
+kern_return_t act_alert_mask(struct Act *act, unsigned alert_mask)
+{
+ panic("act_alert_mask\n");
+ return KERN_SUCCESS;
+}
+
+/* Locking: Thread > Act */
+kern_return_t act_suspend(struct Act *act)
+{
+ thread_t thread = act_lock_thread(act);
+ kern_return_t rc = KERN_SUCCESS;
+
+#if 0
+ printf("act_suspend %08x\n", act);
+#endif
+ if (act->active)
+ {
+ if (act->suspend_count++ == 0)
+ {
+ /* XXX remove from ipt */
+ install_special_handler(act);
+ act_nudge(act);
+ }
+ }
+ else
+ rc = KERN_TERMINATED;
+
+ if (thread)
+ thread_unlock(thread);
+ act_unlock(act);
+
+ return rc;
+}
+
+/* Locking: Act */
+kern_return_t act_resume(struct Act *act)
+{
+#if 0
+ printf("act_resume %08x from %d\n", act, act->suspend_count);
+#endif
+
+ act_lock(act);
+ if (!act->active)
+ {
+ act_unlock(act);
+ return KERN_TERMINATED;
+ }
+
+ if (act->suspend_count > 0) {
+ if (--act->suspend_count == 0) {
+ thread_wakeup(&act->suspend_count);
+ /* XXX return to ipt */
+ }
+ }
+
+ act_unlock(act);
+
+ return KERN_SUCCESS;
+}
+
+typedef struct GetSetState {
+ struct ReturnHandler rh;
+ int flavor;
+ void *state;
+ int *pcount;
+ int result;
+} GetSetState;
+
+/* Locking: Thread */
+kern_return_t get_set_state(struct Act *act, int flavor, void *state, int *pcount,
+ void (*handler)(ReturnHandler *rh, struct Act *act))
+{
+ GetSetState gss;
+
+ /* Initialize a small parameter structure */
+ gss.rh.handler = handler;
+ gss.flavor = flavor;
+ gss.state = state;
+ gss.pcount = pcount;
+
+ /* Add it to the act's return handler list */
+ act_lock(act);
+ gss.rh.next = act->handlers;
+ act->handlers = &gss.rh;
+
+ act_nudge(act);
+
+ act_unlock(act);
+ /* XXX mp unsafe */
+ thread_wait((int)&gss, 0); /* XXX could be interruptible */
+
+ return gss.result;
+}
+
+static void get_state_handler(ReturnHandler *rh, struct Act *act)
+{
+ GetSetState *gss = (GetSetState*)rh;
+
+ gss->result = act_machine_get_state(act, gss->flavor, gss->state, gss->pcount);
+ thread_wakeup((int)gss);
+}
+
+/* Locking: Thread */
+kern_return_t act_get_state(struct Act *act, int flavor, natural_t *state, natural_t *pcount)
+{
+ return get_set_state(act, flavor, state, pcount, get_state_handler);
+}
+
+static void set_state_handler(ReturnHandler *rh, struct Act *act)
+{
+ GetSetState *gss = (GetSetState*)rh;
+
+ gss->result = act_machine_set_state(act, gss->flavor, gss->state, *gss->pcount);
+ thread_wakeup((int)gss);
+}
+
+/* Locking: Thread */
+kern_return_t act_set_state(struct Act *act, int flavor, natural_t *state, natural_t count)
+{
+ return get_set_state(act, flavor, state, &count, set_state_handler);
+}
+
+
+
+/*** backward compatibility hacks ***/
+
+#include <mach/thread_info.h>
+#include <mach/thread_special_ports.h>
+#include <ipc/ipc_port.h>
+
+kern_return_t act_thread_info(Act *act, int flavor,
+ thread_info_t thread_info_out, unsigned *thread_info_count)
+{
+ return thread_info(act->thread, flavor, thread_info_out, thread_info_count);
+}
+
+kern_return_t
+act_thread_assign(Act *act, processor_set_t new_pset)
+{
+ return thread_assign(act->thread, new_pset);
+}
+
+kern_return_t
+act_thread_assign_default(Act *act)
+{
+ return thread_assign_default(act->thread);
+}
+
+kern_return_t
+act_thread_get_assignment(Act *act, processor_set_t *pset)
+{
+ return thread_get_assignment(act->thread, pset);
+}
+
+kern_return_t
+act_thread_priority(Act *act, int priority, boolean_t set_max)
+{
+ return thread_priority(act->thread, priority, set_max);
+}
+
+kern_return_t
+act_thread_max_priority(Act *act, processor_set_t *pset, int max_priority)
+{
+ return thread_max_priority(act->thread, pset, max_priority);
+}
+
+kern_return_t
+act_thread_policy(Act *act, int policy, int data)
+{
+ return thread_policy(act->thread, policy, data);
+}
+
+kern_return_t
+act_thread_wire(struct host *host, Act *act, boolean_t wired)
+{
+ return thread_wire(host, act->thread, wired);
+}
+
+kern_return_t
+act_thread_depress_abort(Act *act)
+{
+ return thread_depress_abort(act->thread);
+}
+
+/*
+ * Routine: act_get_special_port [kernel call]
+ * Purpose:
+ * Clones a send right for one of the thread's
+ * special ports.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Extracted a send right.
+ * KERN_INVALID_ARGUMENT The thread is null.
+ * KERN_FAILURE The thread is dead.
+ * KERN_INVALID_ARGUMENT Invalid special port.
+ */
+
+kern_return_t
+act_get_special_port(Act *act, int which, ipc_port_t *portp)
+{
+ ipc_port_t *whichp;
+ ipc_port_t port;
+
+#if 0
+ printf("act_get_special_port\n");
+#endif
+ if (act == 0)
+ return KERN_INVALID_ARGUMENT;
+
+ switch (which) {
+#if MACH_IPC_COMPAT
+ case THREAD_REPLY_PORT:
+ whichp = &act->reply_port;
+ break;
+#endif MACH_IPC_COMPAT
+
+ case THREAD_KERNEL_PORT:
+ whichp = &act->self_port;
+ break;
+
+ case THREAD_EXCEPTION_PORT:
+ whichp = &act->exception_port;
+ break;
+
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ thread_lock(act->thread);
+
+ if (act->self_port == IP_NULL) {
+ thread_unlock(act->thread);
+ return KERN_FAILURE;
+ }
+
+ port = ipc_port_copy_send(*whichp);
+ thread_unlock(act->thread);
+
+ *portp = port;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: act_set_special_port [kernel call]
+ * Purpose:
+ * Changes one of the thread's special ports,
+ * setting it to the supplied send right.
+ * Conditions:
+ * Nothing locked. If successful, consumes
+ * the supplied send right.
+ * Returns:
+ * KERN_SUCCESS Changed the special port.
+ * KERN_INVALID_ARGUMENT The thread is null.
+ * KERN_FAILURE The thread is dead.
+ * KERN_INVALID_ARGUMENT Invalid special port.
+ */
+
+kern_return_t
+act_set_special_port(Act *act, int which, ipc_port_t port)
+{
+ ipc_port_t *whichp;
+ ipc_port_t old;
+
+#if 0
+ printf("act_set_special_port\n");
+#endif
+ if (act == 0)
+ return KERN_INVALID_ARGUMENT;
+
+ switch (which) {
+#if MACH_IPC_COMPAT
+ case THREAD_REPLY_PORT:
+ whichp = &act->reply_port;
+ break;
+#endif MACH_IPC_COMPAT
+
+ case THREAD_KERNEL_PORT:
+ whichp = &act->self_port;
+ break;
+
+ case THREAD_EXCEPTION_PORT:
+ whichp = &act->exception_port;
+ break;
+
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ thread_lock(act->thread);
+ if (act->self_port == IP_NULL) {
+ thread_unlock(act->thread);
+ return KERN_FAILURE;
+ }
+
+ old = *whichp;
+ *whichp = port;
+ thread_unlock(act->thread);
+
+ if (IP_VALID(old))
+ ipc_port_release_send(old);
+ return KERN_SUCCESS;
+}
+
+/*
+ * XXX lame, non-blocking ways to get/set state.
+ * Return thread's machine-dependent state.
+ */
+kern_return_t
+act_get_state_immediate(act, flavor, old_state, old_state_count)
+ register Act *act;
+ int flavor;
+ void *old_state; /* pointer to OUT array */
+ unsigned int *old_state_count; /*IN/OUT*/
+{
+ kern_return_t ret;
+
+ act_lock(act);
+ /* not the top activation, return current state */
+ if (act->thread && act->thread->top_act != act) {
+ ret = act_machine_get_state(act, flavor,
+ old_state, old_state_count);
+ act_unlock(act);
+ return ret;
+ }
+ act_unlock(act);
+
+ /* not sure this makes sense */
+ return act_get_state(act, flavor, old_state, old_state_count);
+}
+
+/*
+ * Change thread's machine-dependent state.
+ */
+kern_return_t
+act_set_state_immediate(act, flavor, new_state, new_state_count)
+ register Act *act;
+ int flavor;
+ void *new_state;
+ unsigned int new_state_count;
+{
+ kern_return_t ret;
+
+ act_lock(act);
+ /* not the top activation, set it now */
+ if (act->thread && act->thread->top_act != act) {
+ ret = act_machine_set_state(act, flavor,
+ new_state, new_state_count);
+ act_unlock(act);
+ return ret;
+ }
+ act_unlock(act);
+
+ /* not sure this makes sense */
+ return act_set_state(act, flavor, new_state, new_state_count);
+}
+
+void act_count()
+{
+ int i;
+ Act *act;
+ static int amin = ACT_STATIC_KLUDGE;
+
+ i = 0;
+ for (act = act_freelist; act; act = act->ipt_next)
+ i++;
+ if (i < amin)
+ amin = i;
+ printf("%d of %d activations in use, %d max\n",
+ ACT_STATIC_KLUDGE-i, ACT_STATIC_KLUDGE, ACT_STATIC_KLUDGE-amin);
+}
+
+dump_act(act)
+ Act *act;
+{
+ act_count();
+ kact_count();
+ while (act) {
+ printf("%08.8x: thread=%x, task=%x, hi=%x, lo=%x, ref=%x\n",
+ act, act->thread, act->task,
+ act->higher, act->lower, act->ref_count);
+ printf("\talerts=%x, mask=%x, susp=%x, active=%x\n",
+ act->alerts, act->alert_mask,
+ act->suspend_count, act->active);
+ machine_dump_act(&act->mact);
+ if (act == act->lower)
+ break;
+ act = act->lower;
+ }
+}
+
+#ifdef ACTWATCH
+Act *
+get_next_act(sp)
+ int sp;
+{
+ static int i;
+ Act *act;
+
+ while (1) {
+ if (i == ACT_STATIC_KLUDGE) {
+ i = 0;
+ return 0;
+ }
+ act = &free_acts[i];
+ i++;
+ if (act->mact.space == sp)
+ return act;
+ }
+}
+#endif
+
+#endif /* MIGRATING_THREADS */
diff --git a/kern/act.h b/kern/act.h
new file mode 100644
index 00000000..236e6b35
--- /dev/null
+++ b/kern/act.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * File: act.h
+ *
+ * This defines the Act structure,
+ * which is the kernel representation of a user-space activation.
+ *
+ */
+
+#ifndef _KERN_ACT_H_
+#define _KERN_ACT_H_
+
+#ifdef MIGRATING_THREADS
+
+#ifndef __dead /* XXX */
+#define __dead
+#endif
+
+#include <mach_ipc_compat.h>
+#include <mach/vm_param.h>
+#include <mach/port.h>
+#include <kern/lock.h>
+#include <kern/refcount.h>
+#include <kern/queue.h>
+
+#include "act.h"/*XXX*/
+
+struct task;
+struct thread;
+struct Act;
+
+
+struct ReturnHandler {
+ struct ReturnHandler *next;
+ void (*handler)(struct ReturnHandler *rh, struct Act *act);
+};
+typedef struct ReturnHandler ReturnHandler;
+
+
+
+struct Act {
+
+ /*** Task linkage ***/
+
+ /* Links for task's circular list of activations.
+ The activation is only on the task's activation list while active.
+ Must be first. */
+ queue_chain_t task_links;
+
+ /* Reference to the task this activation is in.
+ This is constant as long as the activation is allocated. */
+ struct task *task;
+
+
+
+ /*** Machine-dependent state ***/
+ /* XXX should be first to allow maximum flexibility to MD code */
+ MachineAct mact;
+
+
+
+ /*** Consistency ***/
+ RefCount ref_count;
+ decl_simple_lock_data(,lock)
+
+
+
+ /*** ipc_target-related stuff ***/
+
+ /* ActPool this activation normally lives on, zero if none.
+ The activation and actpool hold references to each other as long as this is nonzero
+ (even when the activation isn't actually on the actpool's list). */
+ struct ipc_target *ipt;
+
+ /* Link on the ipt's list of activations.
+ The activation is only actually on the ipt's list (and hence this is valid)
+ when we're not in use (thread == 0) and not suspended (suspend_count == 0). */
+ struct Act *ipt_next;
+
+
+
+ /*** Thread linkage ***/
+
+ /* Thread this activation is in, zero if not in use.
+ The thread holds a reference on the activation while this is nonzero. */
+ struct thread *thread;
+
+ /* The rest in this section is only valid when thread is nonzero. */
+
+ /* Next higher and next lower activation on the thread's activation stack.
+ For a topmost activation or the null_act, higher is undefined.
+ The bottommost activation is always the null_act. */
+ struct Act *higher, *lower;
+
+ /* Alert bits pending at this activation;
+ some of them may have propagated from lower activations. */
+ unsigned alerts;
+
+ /* Mask of alert bits to be allowed to pass through from lower levels. */
+ unsigned alert_mask;
+
+
+
+ /*** Control information ***/
+
+ /* Number of outstanding suspensions on this activation. */
+ int suspend_count;
+
+ /* This is normally true, but is set to false when the activation is terminated. */
+ int active;
+
+ /* Chain of return handlers to be called
+ before the thread is allowed to return to this invocation */
+ ReturnHandler *handlers;
+
+ /* A special ReturnHandler attached to the above chain to handle suspension and such */
+ ReturnHandler special_handler;
+
+
+
+ /* Special ports attached to this activation */
+ struct ipc_port *self; /* not a right, doesn't hold ref */
+ struct ipc_port *self_port; /* a send right */
+ struct ipc_port *exception_port; /* a send right */
+ struct ipc_port *syscall_port; /* a send right */
+#if MACH_IPC_COMPAT
+ struct ipc_port *reply_port; /* a send right */
+ struct task *reply_task;
+#endif MACH_IPC_COMPAT
+};
+typedef struct Act Act;
+typedef struct Act *act_t;
+typedef mach_port_t *act_array_t;
+
+#define ACT_NULL ((Act*)0)
+
+
+/* Exported to world */
+kern_return_t act_create(struct task *task, vm_offset_t user_stack, vm_offset_t user_rbuf, vm_size_t user_rbuf_size, struct Act **new_act);
+kern_return_t act_alert_mask(struct Act *act, unsigned alert_mask);
+kern_return_t act_alert(struct Act *act, unsigned alerts);
+kern_return_t act_abort(struct Act *act);
+kern_return_t act_abort_safely(struct Act *act);
+kern_return_t act_terminate(struct Act *act);
+kern_return_t act_suspend(struct Act *act);
+kern_return_t act_resume(struct Act *act);
+kern_return_t act_get_state(struct Act *act, int flavor,
+ natural_t *state, natural_t *pcount);
+kern_return_t act_set_state(struct Act *act, int flavor,
+ natural_t *state, natural_t count);
+
+#define act_lock(act) simple_lock(&(act)->lock)
+#define act_unlock(act) simple_unlock(&(act)->lock)
+
+#define act_reference(act) refcount_take(&(act)->ref_count)
+void act_deallocate(struct Act *act);
+
+/* Exported to startup.c */
+void act_init(void);
+
+/* Exported to task.c */
+kern_return_t act_terminate_task_locked(struct Act *act);
+
+/* Exported to thread.c */
+extern Act null_act;
+kern_return_t act_create_kernel(Act **out_act);
+
+/* Exported to machine-dependent activation code */
+void act_execute_returnhandlers(void);
+
+
+
+/* System-dependent functions */
+kern_return_t act_machine_create(struct task *task, Act *inc, vm_offset_t user_stack, vm_offset_t user_rbuf, vm_size_t user_rbuf_size);
+void act_machine_destroy(Act *inc);
+kern_return_t act_machine_set_state(Act *inc, int flavor, int *tstate, unsigned count);
+kern_return_t act_machine_get_state(Act *inc, int flavor, int *tstate, unsigned *count);
+
+
+
+#endif /* MIGRATING_THREADS */
+#endif _KERN_ACT_H_
diff --git a/kern/assert.h b/kern/assert.h
new file mode 100644
index 00000000..f98662ba
--- /dev/null
+++ b/kern/assert.h
@@ -0,0 +1,58 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_ASSERT_H_
+#define _KERN_ASSERT_H_
+
+/* assert.h 4.2 85/01/21 */
+
+#include <kern/macro_help.h>
+
+#ifdef DEBUG
+#define MACH_ASSERT 1
+#endif
+
+#if MACH_ASSERT
+extern void Assert(char *exp, char *filename, int line);
+
+#define assert(ex) \
+MACRO_BEGIN \
+ if (!(ex)) \
+ Assert(#ex, __FILE__, __LINE__); \
+MACRO_END
+
+#ifdef lint
+#define assert_static(x)
+#else lint
+#define assert_static(x) assert(x)
+#endif lint
+
+#else MACH_ASSERT
+#define assert(ex)
+#define assert_static(ex)
+#endif MACH_ASSERT
+
+#endif _KERN_ASSERT_H_
diff --git a/kern/ast.c b/kern/ast.c
new file mode 100644
index 00000000..fc26f943
--- /dev/null
+++ b/kern/ast.c
@@ -0,0 +1,242 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ *
+ * This file contains routines to check whether an ast is needed.
+ *
+ * ast_check() - check whether ast is needed for interrupt or context
+ * switch. Usually called by clock interrupt handler.
+ *
+ */
+
+#include <cpus.h>
+#include <mach_fixpri.h>
+#include <norma_ipc.h>
+
+#include <kern/ast.h>
+#include <kern/counters.h>
+#include "cpu_number.h"
+#include <kern/queue.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <kern/thread.h>
+#include <kern/processor.h>
+
+#include <machine/machspl.h> /* for splsched */
+
+#if MACH_FIXPRI
+#include <mach/policy.h>
+#endif MACH_FIXPRI
+
+
+volatile ast_t need_ast[NCPUS];
+
+void
+ast_init()
+{
+#ifndef MACHINE_AST
+ register int i;
+
+ for (i=0; i<NCPUS; i++)
+ need_ast[i] = 0;
+#endif MACHINE_AST
+}
+
+void
+ast_taken()
+{
+ register thread_t self = current_thread();
+ register ast_t reasons;
+
+ /*
+ * Interrupts are still disabled.
+ * We must clear need_ast and then enable interrupts.
+ */
+
+ reasons = need_ast[cpu_number()];
+ need_ast[cpu_number()] = AST_ZILCH;
+ (void) spl0();
+
+ /*
+ * These actions must not block.
+ */
+
+ if (reasons & AST_NETWORK)
+ net_ast();
+
+#if NORMA_IPC
+ if (reasons & AST_NETIPC)
+ netipc_ast();
+#endif NORMA_IPC
+
+ /*
+ * Make darn sure that we don't call thread_halt_self
+ * or thread_block from the idle thread.
+ */
+
+ if (self != current_processor()->idle_thread) {
+#ifndef MIGRATING_THREADS
+ while (thread_should_halt(self))
+ thread_halt_self();
+#endif
+
+ /*
+ * One of the previous actions might well have
+ * woken a high-priority thread, so we use
+ * csw_needed in addition to AST_BLOCK.
+ */
+
+ if ((reasons & AST_BLOCK) ||
+ csw_needed(self, current_processor())) {
+ counter(c_ast_taken_block++);
+ thread_block(thread_exception_return);
+ }
+ }
+}
+
+void
+ast_check()
+{
+ register int mycpu = cpu_number();
+ register processor_t myprocessor;
+ register thread_t thread = current_thread();
+ register run_queue_t rq;
+ spl_t s = splsched();
+
+ /*
+ * Check processor state for ast conditions.
+ */
+ myprocessor = cpu_to_processor(mycpu);
+ switch(myprocessor->state) {
+ case PROCESSOR_OFF_LINE:
+ case PROCESSOR_IDLE:
+ case PROCESSOR_DISPATCHING:
+ /*
+ * No ast.
+ */
+ break;
+
+#if NCPUS > 1
+ case PROCESSOR_ASSIGN:
+ case PROCESSOR_SHUTDOWN:
+ /*
+ * Need ast to force action thread onto processor.
+ *
+ * XXX Should check if action thread is already there.
+ */
+ ast_on(mycpu, AST_BLOCK);
+ break;
+#endif NCPUS > 1
+
+ case PROCESSOR_RUNNING:
+
+ /*
+ * Propagate thread ast to processor. If we already
+ * need an ast, don't look for more reasons.
+ */
+ ast_propagate(thread, mycpu);
+ if (ast_needed(mycpu))
+ break;
+
+ /*
+ * Context switch check. The csw_needed macro isn't
+ * used here because the rq->low hint may be wrong,
+ * and fixing it here avoids an extra ast.
+ * First check the easy cases.
+ */
+ if (thread->state & TH_SUSP || myprocessor->runq.count > 0) {
+ ast_on(mycpu, AST_BLOCK);
+ break;
+ }
+
+ /*
+ * Update lazy evaluated runq->low if only timesharing.
+ */
+#if MACH_FIXPRI
+ if (myprocessor->processor_set->policies & POLICY_FIXEDPRI) {
+ if (csw_needed(thread,myprocessor)) {
+ ast_on(mycpu, AST_BLOCK);
+ break;
+ }
+ else {
+ /*
+ * For fixed priority threads, set first_quantum
+ * so entire new quantum is used.
+ */
+ if (thread->policy == POLICY_FIXEDPRI)
+ myprocessor->first_quantum = TRUE;
+ }
+ }
+ else {
+#endif MACH_FIXPRI
+ rq = &(myprocessor->processor_set->runq);
+ if (!(myprocessor->first_quantum) && (rq->count > 0)) {
+ register queue_t q;
+ /*
+ * This is not the first quantum, and there may
+ * be something in the processor_set runq.
+ * Check whether low hint is accurate.
+ */
+ q = rq->runq + *(volatile int *)&rq->low;
+ if (queue_empty(q)) {
+ register int i;
+
+ /*
+ * Need to recheck and possibly update hint.
+ */
+ simple_lock(&rq->lock);
+ q = rq->runq + rq->low;
+ if (rq->count > 0) {
+ for (i = rq->low; i < NRQS; i++) {
+ if(!(queue_empty(q)))
+ break;
+ q++;
+ }
+ rq->low = i;
+ }
+ simple_unlock(&rq->lock);
+ }
+
+ if (rq->low <= thread->sched_pri) {
+ ast_on(mycpu, AST_BLOCK);
+ break;
+ }
+ }
+#if MACH_FIXPRI
+ }
+#endif MACH_FIXPRI
+ break;
+
+ default:
+ panic("ast_check: Bad processor state (cpu %d processor %08x) state: %d",
+ mycpu, myprocessor, myprocessor->state);
+ }
+
+ (void) splx(s);
+}
diff --git a/kern/ast.h b/kern/ast.h
new file mode 100644
index 00000000..a7b8586c
--- /dev/null
+++ b/kern/ast.h
@@ -0,0 +1,132 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * kern/ast.h: Definitions for Asynchronous System Traps.
+ */
+
+#ifndef _KERN_AST_H_
+#define _KERN_AST_H_
+
+/*
+ * A CPU takes an AST when it is about to return to user code.
+ * Instead of going back to user code, it calls ast_taken.
+ * Machine-dependent code is responsible for maintaining
+ * a set of reasons for an AST, and passing this set to ast_taken.
+ */
+
+#include <cpus.h>
+
+#include "cpu_number.h"
+#include <kern/macro_help.h>
+#include <machine/ast.h>
+
+/*
+ * Bits for reasons
+ */
+
+#define AST_ZILCH 0x0
+#define AST_HALT 0x1
+#define AST_TERMINATE 0x2
+#define AST_BLOCK 0x4
+#define AST_NETWORK 0x8
+#define AST_NETIPC 0x10
+
+#define AST_SCHEDULING (AST_HALT|AST_TERMINATE|AST_BLOCK)
+
+/*
+ * Per-thread ASTs are reset at context-switch time.
+ * machine/ast.h can define MACHINE_AST_PER_THREAD.
+ */
+
+#ifndef MACHINE_AST_PER_THREAD
+#define MACHINE_AST_PER_THREAD 0
+#endif
+
+#define AST_PER_THREAD (AST_HALT | AST_TERMINATE | MACHINE_AST_PER_THREAD)
+
+typedef unsigned int ast_t;
+
+extern volatile ast_t need_ast[NCPUS];
+
+#ifdef MACHINE_AST
+/*
+ * machine/ast.h is responsible for defining aston and astoff.
+ */
+#else MACHINE_AST
+
+#define aston(mycpu)
+#define astoff(mycpu)
+
+#endif MACHINE_AST
+
+extern void ast_taken();
+
+/*
+ * ast_needed, ast_on, ast_off, ast_context, and ast_propagate
+ * assume splsched. mycpu is always cpu_number(). It is an
+ * argument in case cpu_number() is expensive.
+ */
+
+#define ast_needed(mycpu) need_ast[mycpu]
+
+#define ast_on(mycpu, reasons) \
+MACRO_BEGIN \
+ if ((need_ast[mycpu] |= (reasons)) != AST_ZILCH) \
+ { aston(mycpu); } \
+MACRO_END
+
+#define ast_off(mycpu, reasons) \
+MACRO_BEGIN \
+ if ((need_ast[mycpu] &= ~(reasons)) == AST_ZILCH) \
+ { astoff(mycpu); } \
+MACRO_END
+
+#define ast_propagate(thread, mycpu) ast_on((mycpu), (thread)->ast)
+
+#define ast_context(thread, mycpu) \
+MACRO_BEGIN \
+ if ((need_ast[mycpu] = \
+ (need_ast[mycpu] &~ AST_PER_THREAD) | (thread)->ast) \
+ != AST_ZILCH) \
+ { aston(mycpu); } \
+ else \
+ { astoff(mycpu); } \
+MACRO_END
+
+
+#define thread_ast_set(thread, reason) (thread)->ast |= (reason)
+#define thread_ast_clear(thread, reason) (thread)->ast &= ~(reason)
+#define thread_ast_clear_all(thread) (thread)->ast = AST_ZILCH
+
+/*
+ * NOTE: if thread is the current thread, thread_ast_set should
+ * be followed by ast_propagate().
+ */
+
+#endif _KERN_AST_H_
diff --git a/kern/bootstrap.c b/kern/bootstrap.c
new file mode 100644
index 00000000..f1e3c43d
--- /dev/null
+++ b/kern/bootstrap.c
@@ -0,0 +1,489 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992-1989 Carnegie Mellon University.
+ * Copyright (c) 1995-1993 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Bootstrap the various built-in servers.
+ */
+#include <mach_kdb.h>
+#include <bootstrap_symbols.h>
+
+#include <mach/port.h>
+#include <mach/message.h>
+#include "vm_param.h"
+#include <ipc/ipc_port.h>
+#include <kern/host.h>
+#include <kern/strings.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <vm/vm_kern.h>
+#include <device/device_port.h>
+
+#include <sys/varargs.h>
+
+#include <mach/machine/multiboot.h>
+#include <mach/exec/exec.h>
+
+#if MACH_KDB
+#include <machine/db_machdep.h>
+#include <ddb/db_sym.h>
+#endif
+
+
+static mach_port_t boot_device_port; /* local name */
+static mach_port_t boot_host_port; /* local name */
+
+extern struct multiboot_info *boot_info;
+extern char *kernel_cmdline;
+
+static void user_bootstrap(); /* forward */
+static void bootstrap_exec(void *exec_data);
+
+static mach_port_t
+task_insert_send_right(
+ task_t task,
+ ipc_port_t port)
+{
+ mach_port_t name;
+
+ for (name = 1;; name++) {
+ kern_return_t kr;
+
+ kr = mach_port_insert_right(task->itk_space, name,
+ (ipc_object_t)port, MACH_MSG_TYPE_PORT_SEND);
+ if (kr == KERN_SUCCESS)
+ break;
+ assert(kr == KERN_NAME_EXISTS);
+ }
+
+ return name;
+}
+
+void bootstrap_create()
+{
+ struct multiboot_module *bmod;
+
+ if (!(boot_info->flags & MULTIBOOT_MODS)
+ || (boot_info->mods_count == 0))
+ panic("No bootstrap code loaded with the kernel!");
+ if (boot_info->mods_count > 1)
+ printf("Warning: only one boot module currently used by Mach\n");
+ bmod = (struct multiboot_module *)phystokv(boot_info->mods_addr);
+ bootstrap_exec((void*)phystokv(bmod->mod_start));
+
+ /* XXX at this point, we could free all the memory used
+ by the boot modules and the boot loader's descriptors and such. */
+}
+
+/* XXX won't work with more than one bootstrap service */
+static void *boot_exec;
+
+static void
+bootstrap_exec(void *e)
+{
+ task_t bootstrap_task;
+ thread_t bootstrap_thread;
+
+ /*
+ * Create the bootstrap task.
+ */
+
+ (void) task_create(TASK_NULL, FALSE, &bootstrap_task);
+ (void) thread_create(bootstrap_task, &bootstrap_thread);
+
+ /*
+ * Insert send rights to the master host and device ports.
+ */
+
+ boot_host_port =
+ task_insert_send_right(bootstrap_task,
+ ipc_port_make_send(realhost.host_priv_self));
+
+ boot_device_port =
+ task_insert_send_right(bootstrap_task,
+ ipc_port_make_send(master_device_port));
+
+ /*
+ * Start the bootstrap thread.
+ */
+ boot_exec = e;
+ thread_start(bootstrap_thread, user_bootstrap);
+ (void) thread_resume(bootstrap_thread);
+}
+
+/*
+ * The following code runs as the kernel mode portion of the
+ * first user thread.
+ */
+
+/*
+ * Convert an unsigned integer to its decimal representation.
+ */
+static void
+itoa(
+ char *str,
+ vm_size_t num)
+{
+ char buf[sizeof(vm_size_t)*2+3];
+ register char *np;
+
+ np = buf + sizeof(buf);
+ *--np = 0;
+
+ do {
+ *--np = '0' + num % 10;
+ num /= 10;
+ } while (num != 0);
+
+ strcpy(str, np);
+}
+
+/*
+ * Collect the boot flags into a single argument string,
+ * for compatibility with existing bootstrap and startup code.
+ * Format as a standard flag argument: '-qsdn...'
+ */
+static void get_compat_strings(char *flags_str, char *root_str)
+{
+ register char *ip, *cp;
+
+ cp = flags_str;
+ *cp++ = '-';
+
+ for (ip = kernel_cmdline; *ip; )
+ {
+ if (*ip == ' ')
+ {
+ ip++;
+ }
+ else if (*ip == '-')
+ {
+ ip++;
+ while (*ip > ' ')
+ *cp++ = *ip++;
+ }
+ else if (strncmp(ip, "root=", 5) == 0)
+ {
+ char *rp = root_str;
+
+ ip += 5;
+ if (strncmp(ip, "/dev/", 5) == 0)
+ ip += 5;
+ while (*ip > ' ')
+ *rp++ = *ip++;
+ *rp = '\0';
+ }
+ else
+ {
+ while (*ip > ' ')
+ ip++;
+ }
+ }
+
+ if (cp == &flags_str[1]) /* no flags */
+ *cp++ = 'x';
+ *cp = '\0';
+}
+
+/*
+ * Copy boot_data (executable) to the user portion of this task.
+ */
+static boolean_t load_protect_text = TRUE;
+#if MACH_KDB
+ /* if set, fault in the text segment */
+static boolean_t load_fault_in_text = TRUE;
+#endif
+
+static vm_offset_t
+boot_map(
+ void * data, /* private data */
+ vm_offset_t offset) /* offset to map */
+{
+ vm_offset_t start_offset = (vm_offset_t) data;
+
+ return pmap_extract(kernel_pmap, start_offset + offset);
+}
+
+
+#if BOOTSTRAP_SYMBOLS
+static boolean_t load_bootstrap_symbols = TRUE;
+#else
+static boolean_t load_bootstrap_symbols = FALSE;
+#endif
+
+
+
+static int boot_read(void *handle, vm_offset_t file_ofs, void *buf, vm_size_t size,
+ vm_size_t *out_actual)
+{
+ memcpy(buf, handle + file_ofs, size);
+ *out_actual = size;
+ return 0;
+}
+
+static int read_exec(void *handle, vm_offset_t file_ofs, vm_size_t file_size,
+ vm_offset_t mem_addr, vm_size_t mem_size,
+ exec_sectype_t sec_type)
+{
+ vm_map_t user_map = current_task()->map;
+ vm_offset_t start_page, end_page;
+ vm_prot_t mem_prot = sec_type & EXEC_SECTYPE_PROT_MASK;
+ int err;
+
+ if (!(sec_type & EXEC_SECTYPE_ALLOC))
+ return 0;
+
+ assert(mem_size > 0);
+ assert(mem_size >= file_size);
+
+ start_page = trunc_page(mem_addr);
+ end_page = round_page(mem_addr + mem_size);
+
+ /*
+ printf("reading bootstrap section %08x-%08x-%08x prot %d pages %08x-%08x\n",
+ mem_addr, mem_addr+file_size, mem_addr+mem_size, mem_prot, start_page, end_page);
+ */
+
+ err = vm_allocate(user_map, &start_page, end_page - start_page, FALSE);
+ assert(err == 0);
+ assert(start_page == trunc_page(mem_addr));
+
+ if (file_size > 0)
+ {
+ err = copyout(handle + file_ofs, mem_addr, file_size);
+ assert(err == 0);
+ }
+
+ if (mem_prot != VM_PROT_ALL)
+ {
+ err = vm_protect(user_map, start_page, end_page - start_page, FALSE, mem_prot);
+ assert(err == 0);
+ }
+}
+
+static void copy_bootstrap(void *e, struct exec_info *boot_exec_info)
+{
+ register vm_map_t user_map = current_task()->map;
+ int err;
+
+printf("loading...\n");
+ if (err = exec_load(boot_read, read_exec, e, boot_exec_info))
+ panic("Cannot load user-bootstrap image: error code %d", err);
+
+#if MACH_KDB
+ /*
+ * Enter the bootstrap symbol table.
+ */
+
+#if 0 /*XXX*/
+ if (load_bootstrap_symbols)
+ (void) X_db_sym_init(
+ (char*) boot_start+lp->sym_offset,
+ (char*) boot_start+lp->sym_offset+lp->sym_size,
+ "bootstrap",
+ (char *) user_map);
+#endif
+
+#if 0 /*XXX*/
+ if (load_fault_in_text)
+ {
+ vm_offset_t lenp = round_page(lp->text_start+lp->text_size) -
+ trunc_page(lp->text_start);
+ vm_offset_t i = 0;
+
+ while (i < lenp)
+ {
+ vm_fault(user_map, text_page_start +i,
+ load_protect_text ?
+ VM_PROT_READ|VM_PROT_EXECUTE :
+ VM_PROT_READ|VM_PROT_EXECUTE | VM_PROT_WRITE,
+ 0,0,0);
+ i = round_page (i+1);
+ }
+ }
+#endif
+#endif MACH_KDB
+}
+
+/*
+ * Allocate the stack, and build the argument list.
+ */
+extern vm_offset_t user_stack_low();
+extern vm_offset_t set_user_regs();
+
+void
+static build_args_and_stack(boot_exec_info, va_alist)
+ struct exec_info *boot_exec_info;
+ va_dcl
+{
+ vm_offset_t stack_base;
+ vm_size_t stack_size;
+ va_list argv_ptr;
+ register
+ char * arg_ptr;
+ int arg_len;
+ int arg_count;
+ register
+ char * arg_pos;
+ int arg_item_len;
+ char * string_pos;
+ char * zero = (char *)0;
+
+#define STACK_SIZE (64*1024)
+
+ /*
+ * Calculate the size of the argument list.
+ */
+ va_start(argv_ptr);
+ arg_len = 0;
+ arg_count = 0;
+ for (;;) {
+ arg_ptr = va_arg(argv_ptr, char *);
+ if (arg_ptr == 0)
+ break;
+ arg_count++;
+ arg_len += strlen(arg_ptr) + 1;
+ }
+ va_end(argv_ptr);
+
+ /*
+ * Add space for:
+ * arg count
+ * pointers to arguments
+ * trailing 0 pointer
+ * dummy 0 pointer to environment variables
+ * and align to integer boundary
+ */
+ arg_len += sizeof(integer_t)
+ + (2 + arg_count) * sizeof(char *);
+ arg_len = (arg_len + sizeof(integer_t) - 1) & ~(sizeof(integer_t)-1);
+
+ /*
+ * Allocate the stack.
+ */
+ stack_size = round_page(STACK_SIZE);
+ stack_base = user_stack_low(stack_size);
+ (void) vm_allocate(current_task()->map,
+ &stack_base,
+ stack_size,
+ FALSE);
+
+ arg_pos = (char *)
+ set_user_regs(stack_base, stack_size, boot_exec_info, arg_len);
+
+ /*
+ * Start the strings after the arg-count and pointers
+ */
+ string_pos = arg_pos
+ + sizeof(integer_t)
+ + arg_count * sizeof(char *)
+ + 2 * sizeof(char *);
+
+ /*
+ * first the argument count
+ */
+ (void) copyout((char *)&arg_count,
+ arg_pos,
+ sizeof(integer_t));
+ arg_pos += sizeof(integer_t);
+
+ /*
+ * Then the strings and string pointers for each argument
+ */
+ va_start(argv_ptr);
+ while (--arg_count >= 0) {
+ arg_ptr = va_arg(argv_ptr, char *);
+ arg_item_len = strlen(arg_ptr) + 1; /* include trailing 0 */
+
+ /* set string pointer */
+ (void) copyout((char *)&string_pos,
+ arg_pos,
+ sizeof (char *));
+ arg_pos += sizeof(char *);
+
+ /* copy string */
+ (void) copyout(arg_ptr, string_pos, arg_item_len);
+ string_pos += arg_item_len;
+ }
+ va_end(argv_ptr);
+
+ /*
+ * last, the trailing 0 argument and a null environment pointer.
+ */
+ (void) copyout((char *)&zero, arg_pos, sizeof(char *));
+ arg_pos += sizeof(char *);
+ (void) copyout((char *)&zero, arg_pos, sizeof(char *));
+}
+
+static void user_bootstrap()
+{
+ struct exec_info boot_exec_info;
+
+ char host_string[12];
+ char device_string[12];
+ char flag_string[12];
+ char root_string[12];
+
+ /*
+ * Copy the bootstrap code from boot_exec into the user task.
+ */
+ copy_bootstrap(boot_exec, &boot_exec_info);
+
+ /*
+ * Convert the host and device ports to strings,
+ * to put in the argument list.
+ */
+ itoa(host_string, boot_host_port);
+ itoa(device_string, boot_device_port);
+
+ /*
+ * Get the (compatibility) boot flags and root name strings.
+ */
+ get_compat_strings(flag_string, root_string);
+
+ /*
+ * Build the argument list and insert in the user task.
+ * Argument list is
+ * "bootstrap -<boothowto> <host_port> <device_port> <root_name>"
+ */
+ build_args_and_stack(&boot_exec_info,
+ "bootstrap",
+ flag_string,
+ host_string,
+ device_string,
+ root_string,
+ (char *)0);
+
+printf("Starting bootstrap at %x\n", boot_exec_info.entry);
+
+ /*
+ * Exit to user thread.
+ */
+ thread_bootstrap_return();
+ /*NOTREACHED*/
+}
+
diff --git a/kern/compat_xxx_defs.h b/kern/compat_xxx_defs.h
new file mode 100644
index 00000000..1878bb22
--- /dev/null
+++ b/kern/compat_xxx_defs.h
@@ -0,0 +1,64 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Compatibility definitions for the MiG-related changes
+ * to various routines.
+ *
+ * When all user code has been relinked, this file and the xxx_
+ * and yyy_ routines MUST be removed!
+ */
+
+/* from mach.defs */
+
+#define xxx_task_info task_info
+#ifdef MIGRATING_THREADS
+#define xxx_thread_get_state act_get_state
+#define xxx_thread_set_state act_set_state
+#define xxx_thread_info act_info
+#else
+#define xxx_thread_get_state thread_get_state
+#define xxx_thread_set_state thread_set_state
+#define xxx_thread_info thread_info
+#endif /* MIGRATING_THREADS */
+
+/* from mach_host.defs */
+
+#define yyy_host_info host_info
+#define yyy_processor_info processor_info
+#define yyy_processor_set_info processor_set_info
+#define yyy_processor_control processor_control
+
+/* from device.defs */
+
+#define ds_xxx_device_set_status ds_device_set_status
+#define ds_xxx_device_get_status ds_device_get_status
+#define ds_xxx_device_set_filter ds_device_set_filter
+
+
+
diff --git a/kern/counters.c b/kern/counters.c
new file mode 100644
index 00000000..5b606f50
--- /dev/null
+++ b/kern/counters.c
@@ -0,0 +1,82 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach_counters.h>
+
+#include <kern/counters.h>
+
+/*
+ * We explicitly initialize the counters to make
+ * them contiguous in the kernel's data space.
+ * This makes them easier to examine with ddb.
+ */
+
+mach_counter_t c_thread_invoke_hits = 0;
+mach_counter_t c_thread_invoke_misses = 0;
+mach_counter_t c_thread_invoke_csw = 0;
+mach_counter_t c_thread_handoff_hits = 0;
+mach_counter_t c_thread_handoff_misses = 0;
+
+#if MACH_COUNTERS
+mach_counter_t c_threads_current = 0;
+mach_counter_t c_threads_max = 0;
+mach_counter_t c_threads_min = 0;
+mach_counter_t c_threads_total = 0;
+mach_counter_t c_stacks_current = 0;
+mach_counter_t c_stacks_max = 0;
+mach_counter_t c_stacks_min = 0;
+mach_counter_t c_stacks_total = 0;
+mach_counter_t c_clock_ticks = 0;
+mach_counter_t c_ipc_mqueue_send_block = 0;
+mach_counter_t c_ipc_mqueue_receive_block_user = 0;
+mach_counter_t c_ipc_mqueue_receive_block_kernel = 0;
+mach_counter_t c_mach_msg_trap_block_fast = 0;
+mach_counter_t c_mach_msg_trap_block_slow = 0;
+mach_counter_t c_mach_msg_trap_block_exc = 0;
+mach_counter_t c_exception_raise_block = 0;
+mach_counter_t c_swtch_block = 0;
+mach_counter_t c_swtch_pri_block = 0;
+mach_counter_t c_thread_switch_block = 0;
+mach_counter_t c_thread_switch_handoff = 0;
+mach_counter_t c_ast_taken_block = 0;
+mach_counter_t c_thread_halt_self_block = 0;
+mach_counter_t c_vm_fault_page_block_busy_user = 0;
+mach_counter_t c_vm_fault_page_block_busy_kernel = 0;
+mach_counter_t c_vm_fault_page_block_backoff_user = 0;
+mach_counter_t c_vm_fault_page_block_backoff_kernel = 0;
+mach_counter_t c_vm_page_wait_block_user = 0;
+mach_counter_t c_vm_page_wait_block_kernel = 0;
+mach_counter_t c_vm_pageout_block = 0;
+mach_counter_t c_vm_pageout_scan_block = 0;
+mach_counter_t c_idle_thread_block = 0;
+mach_counter_t c_idle_thread_handoff = 0;
+mach_counter_t c_sched_thread_block = 0;
+mach_counter_t c_io_done_thread_block = 0;
+mach_counter_t c_net_thread_block = 0;
+mach_counter_t c_reaper_thread_block = 0;
+mach_counter_t c_swapin_thread_block = 0;
+mach_counter_t c_action_thread_block = 0;
+#endif MACH_COUNTERS
diff --git a/kern/counters.h b/kern/counters.h
new file mode 100644
index 00000000..1f13ac57
--- /dev/null
+++ b/kern/counters.h
@@ -0,0 +1,107 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_COUNTERS_
+#define _KERN_COUNTERS_
+
+#include <mach_counters.h>
+
+/*
+ * We can count various interesting events and paths.
+ *
+ * Use counter() to change the counters, eg:
+ * counter(c_idle_thread_block++);
+ * Use counter_always() for non-conditional counters.
+ */
+
+#define counter_always(code) code
+
+#if MACH_COUNTERS
+
+#define counter(code) counter_always(code)
+
+#else MACH_COUNTERS
+
+#define counter(code)
+
+#endif MACH_COUNTERS
+
+/*
+ * We define the counters with individual integers,
+ * instead of a big structure, so that ddb
+ * will know the addresses of the counters.
+ */
+
+typedef unsigned int mach_counter_t;
+
+extern mach_counter_t c_thread_invoke_hits;
+extern mach_counter_t c_thread_invoke_misses;
+extern mach_counter_t c_thread_invoke_csw;
+extern mach_counter_t c_thread_handoff_hits;
+extern mach_counter_t c_thread_handoff_misses;
+
+#if MACH_COUNTERS
+extern mach_counter_t c_threads_current;
+extern mach_counter_t c_threads_max;
+extern mach_counter_t c_threads_min;
+extern mach_counter_t c_threads_total;
+extern mach_counter_t c_stacks_current;
+extern mach_counter_t c_stacks_max;
+extern mach_counter_t c_stacks_min;
+extern mach_counter_t c_stacks_total;
+extern mach_counter_t c_clock_ticks;
+extern mach_counter_t c_ipc_mqueue_send_block;
+extern mach_counter_t c_ipc_mqueue_receive_block_user;
+extern mach_counter_t c_ipc_mqueue_receive_block_kernel;
+extern mach_counter_t c_mach_msg_trap_block_fast;
+extern mach_counter_t c_mach_msg_trap_block_slow;
+extern mach_counter_t c_mach_msg_trap_block_exc;
+extern mach_counter_t c_exception_raise_block;
+extern mach_counter_t c_swtch_block;
+extern mach_counter_t c_swtch_pri_block;
+extern mach_counter_t c_thread_switch_block;
+extern mach_counter_t c_thread_switch_handoff;
+extern mach_counter_t c_ast_taken_block;
+extern mach_counter_t c_thread_halt_self_block;
+extern mach_counter_t c_vm_fault_page_block_busy_user;
+extern mach_counter_t c_vm_fault_page_block_busy_kernel;
+extern mach_counter_t c_vm_fault_page_block_backoff_user;
+extern mach_counter_t c_vm_fault_page_block_backoff_kernel;
+extern mach_counter_t c_vm_page_wait_block_user;
+extern mach_counter_t c_vm_page_wait_block_kernel;
+extern mach_counter_t c_vm_pageout_block;
+extern mach_counter_t c_vm_pageout_scan_block;
+extern mach_counter_t c_idle_thread_block;
+extern mach_counter_t c_idle_thread_handoff;
+extern mach_counter_t c_sched_thread_block;
+extern mach_counter_t c_io_done_thread_block;
+extern mach_counter_t c_net_thread_block;
+extern mach_counter_t c_reaper_thread_block;
+extern mach_counter_t c_swapin_thread_block;
+extern mach_counter_t c_action_thread_block;
+#endif MACH_COUNTERS
+
+#endif _KERN_COUNTERS_
diff --git a/kern/cpu_number.h b/kern/cpu_number.h
new file mode 100644
index 00000000..32d83239
--- /dev/null
+++ b/kern/cpu_number.h
@@ -0,0 +1,43 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_CPU_NUMBER_H_
+#define _KERN_CPU_NUMBER_H_
+
+#include <cpus.h>
+
+/*
+ * Definitions for cpu identification in multi-processors.
+ */
+
+int master_cpu; /* 'master' processor - keeps time */
+
+#if (NCPUS == 1)
+ /* cpu number is always 0 on a single processor system */
+#define cpu_number() (0)
+
+#endif /* NCPUS == 1 */
+#endif /* _KERN_CPU_NUMBER_H_ */
diff --git a/kern/debug.c b/kern/debug.c
new file mode 100644
index 00000000..eda5b2a3
--- /dev/null
+++ b/kern/debug.c
@@ -0,0 +1,192 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach_kdb.h>
+#include <norma_ipc.h>
+#include <cpus.h>
+
+#include "cpu_number.h"
+#include <kern/lock.h>
+#include <sys/varargs.h>
+#include <kern/thread.h>
+
+
+
+extern void cnputc();
+void Debugger();
+
+#if MACH_KDB
+extern int db_breakpoints_inserted;
+#endif
+
+#if NCPUS>1
+simple_lock_data_t Assert_print_lock; /* uninited, we take our chances */
+#endif
+
+void
+Assert(char *exp, char *file, int line)
+{
+#if NCPUS > 1
+ simple_lock(&Assert_print_lock);
+ printf("{%d} Assertion failed: file \"%s\", line %d\n",
+ cpu_number(), file, line);
+ simple_unlock(&Assert_print_lock);
+#else
+ printf("Assertion `%s' failed in file \"%s\", line %d\n",
+ exp, file, line);
+#endif
+
+#if MACH_KDB
+ if (db_breakpoints_inserted)
+#endif
+ Debugger("assertion failure");
+}
+
+void Debugger(message)
+ char * message;
+{
+#if !MACH_KDB
+ panic("Debugger invoked, but there isn't one!");
+#endif
+
+#ifdef lint
+ message++;
+#endif /* lint */
+
+#if defined(vax) || defined(PC532)
+ asm("bpt");
+#endif /* vax */
+
+#ifdef sun3
+ current_thread()->pcb->flag |= TRACE_KDB;
+ asm("orw #0x00008000,sr");
+#endif /* sun3 */
+#ifdef sun4
+ current_thread()->pcb->pcb_flag |= TRACE_KDB;
+ asm("ta 0x81");
+#endif /* sun4 */
+
+#if defined(mips ) || defined(luna88k) || defined(i860) || defined(alpha)
+ gimmeabreak();
+#endif
+
+#ifdef i386
+ asm("int3");
+#endif
+}
+
+/* Be prepared to panic anytime,
+ even before panic_init() gets called from the "normal" place in kern/startup.c.
+ (panic_init() still needs to be called from there
+ to make sure we get initialized before starting multiple processors.) */
+boolean_t panic_lock_initialized = FALSE;
+decl_simple_lock_data(, panic_lock)
+
+char *panicstr;
+int paniccpu;
+
+void
+panic_init()
+{
+ if (!panic_lock_initialized)
+ {
+ panic_lock_initialized = TRUE;
+ simple_lock_init(&panic_lock);
+ }
+}
+
+/*VARARGS1*/
+void
+panic(s, va_alist)
+ char * s;
+ va_dcl
+{
+ va_list listp;
+#if NORMA_IPC
+ extern int _node_self; /* node_self() may not be callable yet */
+#endif /* NORMA_IPC */
+
+ panic_init();
+
+ simple_lock(&panic_lock);
+ if (panicstr) {
+ if (cpu_number() != paniccpu) {
+ simple_unlock(&panic_lock);
+ halt_cpu();
+ /* NOTREACHED */
+ }
+ }
+ else {
+ panicstr = s;
+ paniccpu = cpu_number();
+ }
+ simple_unlock(&panic_lock);
+ printf("panic");
+#if NORMA_IPC
+ printf("(node %U)", _node_self);
+#endif
+#if NCPUS > 1
+ printf("(cpu %U)", paniccpu);
+#endif
+ printf(": ");
+ va_start(listp);
+ _doprnt(s, &listp, cnputc, 0);
+ va_end(listp);
+ printf("\n");
+
+ /* Give the user time to see the message */
+ {
+ int i = 60; /* seconds */
+ while (i--)
+ delay (1000000); /* microseconds */
+ }
+
+#if MACH_KDB
+ Debugger("panic");
+#else
+ halt_all_cpus (1);
+#endif
+}
+
+/*
+ * We'd like to use BSD's log routines here...
+ */
+/*VARARGS2*/
+void
+log(level, fmt, va_alist)
+ int level;
+ char * fmt;
+ va_dcl
+{
+ va_list listp;
+
+#ifdef lint
+ level++;
+#endif
+ va_start(listp);
+ _doprnt(fmt, &listp, cnputc, 0);
+ va_end(listp);
+}
diff --git a/kern/debug.h b/kern/debug.h
new file mode 100644
index 00000000..35201408
--- /dev/null
+++ b/kern/debug.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * File: debug.h
+ * Author: Bryan Ford
+ *
+ * This file contains definitions for kernel debugging,
+ * which are compiled in on the DEBUG symbol.
+ *
+ */
+#ifndef _mach_debug__debug_
+#define _mach_debug__debug_
+
+#include <kern/assert.h> /*XXX*/
+
+#ifdef DEBUG
+
+#define here() printf("@ %s:%d\n", __FILE__, __LINE__)
+#define message(args) ({ printf("@ %s:%d: ", __FILE__, __LINE__); printf args; printf("\n"); })
+
+#define otsan() panic("%s:%d: off the straight and narrow!", __FILE__, __LINE__)
+
+#define struct_id_decl unsigned struct_id;
+#define struct_id_init(p,id) ((p)->struct_id = (id))
+#define struct_id_denit(p) ((p)->struct_id = 0)
+#define struct_id_verify(p,id) \
+ ({ if ((p)->struct_id != (id)) \
+ panic("%s:%d: "#p" (%08x) struct_id should be "#id" (%08x), is %08x\n", \
+ __FILE__, __LINE__, (p), (id), (p->struct_id)); \
+ })
+
+#else !DEBUG
+
+#define otsan()
+
+#define struct_id_decl
+#define struct_id_init(p,id)
+#define struct_id_denit(p)
+#define struct_id_verify(p,id)
+
+#endif !DEBUG
+
+#endif _mach_debug__debug_
diff --git a/kern/elf-load.c b/kern/elf-load.c
new file mode 100644
index 00000000..1d103d3c
--- /dev/null
+++ b/kern/elf-load.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 1995, 1994, 1993, 1992, 1991, 1990
+ * Open Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of ("OSF") or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL OSF BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * ACTION OF CONTRACT, NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE
+ */
+/*
+ * OSF Research Institute MK6.1 (unencumbered) 1/31/1995
+ */
+
+#include <alloca.h>
+#include <mach/machine/vm_types.h>
+#include <mach/exec/elf.h>
+#include <mach/exec/exec.h>
+
+int exec_load(exec_read_func_t *read, exec_read_exec_func_t *read_exec,
+ void *handle, exec_info_t *out_info)
+{
+ vm_size_t actual;
+ Elf32_Ehdr x;
+ Elf32_Phdr *phdr, *ph;
+ vm_size_t phsize;
+ int i;
+ int result;
+
+ /* Read the ELF header. */
+ if ((result = (*read)(handle, 0, &x, sizeof(x), &actual)) != 0)
+ return result;
+ if (actual < sizeof(x))
+ return EX_NOT_EXECUTABLE;
+
+ if ((x.e_ident[EI_MAG0] != ELFMAG0) ||
+ (x.e_ident[EI_MAG1] != ELFMAG1) ||
+ (x.e_ident[EI_MAG2] != ELFMAG2) ||
+ (x.e_ident[EI_MAG3] != ELFMAG3))
+ return EX_NOT_EXECUTABLE;
+
+ /* Make sure the file is of the right architecture. */
+ if ((x.e_ident[EI_CLASS] != ELFCLASS32) ||
+ (x.e_ident[EI_DATA] != MY_EI_DATA) ||
+ (x.e_machine != MY_E_MACHINE))
+ return EX_WRONG_ARCH;
+
+ /* XXX others */
+ out_info->entry = (vm_offset_t) x.e_entry;
+
+ phsize = x.e_phnum * x.e_phentsize;
+ phdr = (Elf32_Phdr *)alloca(phsize);
+
+ result = (*read)(handle, x.e_phoff, phdr, phsize, &actual);
+ if (result)
+ return result;
+ if (actual < phsize)
+ return EX_CORRUPT;
+
+ for (i = 0; i < x.e_phnum; i++)
+ {
+ ph = (Elf32_Phdr *)((vm_offset_t)phdr + i * x.e_phentsize);
+ if (ph->p_type == PT_LOAD)
+ {
+ exec_sectype_t type = EXEC_SECTYPE_ALLOC |
+ EXEC_SECTYPE_LOAD;
+ if (ph->p_flags & PF_R) type |= EXEC_SECTYPE_READ;
+ if (ph->p_flags & PF_W) type |= EXEC_SECTYPE_WRITE;
+ if (ph->p_flags & PF_X) type |= EXEC_SECTYPE_EXECUTE;
+ result = (*read_exec)(handle,
+ ph->p_offset, ph->p_filesz,
+ ph->p_vaddr, ph->p_memsz, type);
+ }
+ }
+
+ return 0;
+}
+
diff --git a/kern/eventcount.c b/kern/eventcount.c
new file mode 100644
index 00000000..9121386d
--- /dev/null
+++ b/kern/eventcount.c
@@ -0,0 +1,372 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: eventcount.c
+ * Author: Alessandro Forin
+ * Date: 10/91
+ *
+ * Eventcounters, for user-level drivers synchronization
+ *
+ */
+
+
+#include <cpus.h>
+
+#include <mach/machine.h>
+#include <kern/ast.h>
+#include "cpu_number.h"
+#include <kern/lock.h>
+#include <kern/processor.h>
+#include <kern/queue.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <kern/thread.h>
+
+#include <machine/machspl.h> /* For def'n of splsched() */
+
+#include <kern/eventcount.h>
+
+
+#if NCPUS <= 1
+void simpler_thread_setrun(
+ thread_t th,
+ boolean_t may_preempt); /* forward */
+#endif
+
+#define MAX_EVCS 10 /* xxx for now */
+evc_t all_eventcounters[MAX_EVCS];
+
+/*
+ * Initialization
+ */
+void
+evc_init(evc_t ev)
+{
+ int i;
+
+ bzero((char*)ev, sizeof(*ev));
+
+ /* keep track of who is who */
+ for (i = 0; i < MAX_EVCS; i++)
+ if (all_eventcounters[i] == 0) break;
+ if (i == MAX_EVCS) {
+ printf("Too many eventcounters\n");
+ return;
+ }
+
+ all_eventcounters[i] = ev;
+ ev->ev_id = i;
+ ev->sanity = ev;
+ ev->waiting_thread = THREAD_NULL;
+ simple_lock_init(&ev->lock);
+}
+
+/*
+ * Finalization
+ */
+void
+evc_destroy(evc_t ev)
+{
+ evc_signal(ev);
+ ev->sanity = 0;
+ if (all_eventcounters[ev->ev_id] == ev)
+ all_eventcounters[ev->ev_id] = 0;
+ ev->ev_id = -1;
+}
+
+/*
+ * Thread termination.
+ * HORRIBLE. This stuff needs to be fixed.
+ */
+void evc_notify_abort(thread_t thread)
+{
+ int i;
+ evc_t ev;
+ int s = splsched();
+ for (i = 0; i < MAX_EVCS; i++) {
+ ev = all_eventcounters[i];
+ if (ev) {
+ simple_lock(&ev->lock);
+ if (ev->waiting_thread == thread)
+ {
+ ev->waiting_thread = 0;
+ /* Removal of a waiting thread has to bump the count by one */
+ ev->count++;
+ }
+ simple_unlock(&ev->lock);
+ }
+ }
+ splx(s);
+}
+
+#ifdef CONTINUATIONS
+/*
+ * Just so that we return success, and give
+ * up the stack while blocked
+ */
+static void
+evc_continue(void)
+{
+ thread_syscall_return(KERN_SUCCESS);
+ /* NOTREACHED */
+}
+#else /* not CONTINUATIONS */
+#define evc_continue 0
+#endif /* not CONTINUATIONS */
+
+/*
+ * User-trappable
+ */
+kern_return_t evc_wait(natural_t ev_id)
+{
+ spl_t s;
+ kern_return_t ret;
+ evc_t ev;
+
+ if ((ev_id >= MAX_EVCS) ||
+ ((ev = all_eventcounters[ev_id]) == 0) ||
+ (ev->ev_id != ev_id) || (ev->sanity != ev))
+ return KERN_INVALID_ARGUMENT;
+
+ s = splsched();
+ simple_lock(&ev->lock);
+ /*
+ * The values assumed by the "count" field are
+ * as follows:
+ * 0 At initialization time, and with no
+ * waiting thread means no events pending;
+ * with waiting thread means the event
+ * was signalled and the thread not yet resumed
+ * -1 no events, there must be a waiting thread
+ * N>0 no waiting thread means N pending,
+ * with waiting thread N-1 pending.
+ *
+ */
+ if (ev->count > 0) {
+ ev->count--;
+ ret = KERN_SUCCESS;
+ } else {
+ if (ev->waiting_thread == THREAD_NULL) {
+ ev->count--;
+ ev->waiting_thread = current_thread();
+ assert_wait((event_t) 0, TRUE); /* ifnot race */
+ simple_unlock(&ev->lock);
+ thread_block(evc_continue);
+ return KERN_SUCCESS;
+ }
+ ret = KERN_NO_SPACE; /* XX */
+ }
+ simple_unlock(&ev->lock);
+ splx(s);
+ return ret;
+}
+
+/*
+ * User-trappable
+ */
+kern_return_t evc_wait_clear(natural_t ev_id)
+{
+ spl_t s;
+ kern_return_t ret;
+ evc_t ev;
+
+ if ((ev_id >= MAX_EVCS) ||
+ ((ev = all_eventcounters[ev_id]) == 0) ||
+ (ev->ev_id != ev_id) || (ev->sanity != ev))
+ return KERN_INVALID_ARGUMENT;
+
+ s = splsched();
+ simple_lock(&ev->lock);
+
+ /*
+ * The values assumed by the "count" field are
+ * as follows:
+ * 0 At initialization time, and with no
+ * waiting thread means no events pending;
+ * with waiting thread means the event
+ * was signalled and the thread not yet resumed
+ * -1 no events, there must be a waiting thread
+ * N>0 no waiting thread means N pending,
+ * with waiting thread N-1 pending.
+ *
+ */
+ /*
+ * Note that we always clear count before blocking.
+ */
+ if (ev->waiting_thread == THREAD_NULL) {
+ ev->count = -1;
+ ev->waiting_thread = current_thread();
+ assert_wait((event_t) 0, TRUE); /* ifnot race */
+ simple_unlock(&ev->lock);
+ thread_block(evc_continue);
+ /* NOTREACHED */
+ }
+
+ simple_unlock(&ev->lock);
+ splx(s);
+ ret = KERN_NO_SPACE; /* XX */
+}
+
+/*
+ * Called exclusively from interrupt context
+ */
+void
+evc_signal(evc_t ev)
+{
+ register volatile thread_t thread;
+ register int state;
+ spl_t s;
+ if (ev->sanity != ev)
+ return;
+
+ s = splsched();
+ simple_lock(&ev->lock);
+ ev->count++;
+ if (thread = ev->waiting_thread, thread != THREAD_NULL)
+ {
+ ev->waiting_thread = 0;
+
+#if (NCPUS > 1)
+ retry:
+ while((thread->state & TH_RUN) || thread->lock.lock_data)
+ ;
+#endif
+ thread_lock(thread);
+
+ /* make thread runnable on this processor */
+ /* taken from clear_wait */
+ switch ((state = thread->state) & TH_SCHED_STATE)
+ {
+ case TH_WAIT | TH_SUSP | TH_UNINT:
+ case TH_WAIT | TH_UNINT:
+ case TH_WAIT:
+ /*
+ * Sleeping and not suspendable - put
+ * on run queue.
+ */
+ thread->state = (state &~ TH_WAIT) | TH_RUN;
+ thread_unlock(thread);
+#if NCPUS > 1
+ thread_setrun(thread, TRUE);
+#else
+ simpler_thread_setrun(thread, TRUE);
+#endif
+ break;
+
+ case TH_RUN | TH_WAIT:
+#if (NCPUS > 1)
+ /*
+ * Legal on MP: between assert_wait()
+ * and thread_block(), in evc_wait() above.
+ *
+ * Mmm. Maybe don't need now that the while(..) check is
+ * done before the thread lock is grabbed.....
+ */
+ thread_unlock(thread);
+ goto retry;
+#else
+ /*FALLTHROUGH*/
+#endif
+ case TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT | TH_UNINT:
+ case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
+
+ /*
+ * Either already running, or suspended.
+ * Just clear the wait.
+ */
+ thread->state = state &~ TH_WAIT;
+ thread_unlock(thread);
+ break;
+
+ default:
+ /*
+ * Not waiting.
+ */
+ panic("evc_signal.3");
+ thread_unlock(thread);
+ break;
+ }
+ }
+
+ simple_unlock(&ev->lock);
+ splx(s);
+}
+
+#if NCPUS <= 1
+/*
+ * The scheduler is too messy for my old little brain
+ */
+void
+simpler_thread_setrun(
+ thread_t th,
+ boolean_t may_preempt)
+{
+ register struct run_queue *rq;
+ register whichq;
+
+ /*
+ * XXX should replace queue with a boolean in this case.
+ */
+ if (default_pset.idle_count > 0) {
+ processor_t processor;
+
+ processor = (processor_t) queue_first(&default_pset.idle_queue);
+ queue_remove(&default_pset.idle_queue, processor,
+ processor_t, processor_queue);
+ default_pset.idle_count--;
+ processor->next_thread = th;
+ processor->state = PROCESSOR_DISPATCHING;
+ return;
+ }
+ rq = &(master_processor->runq);
+ ast_on(cpu_number(), AST_BLOCK);
+
+ whichq = (th)->sched_pri;
+ simple_lock(&(rq)->lock); /* lock the run queue */
+ enqueue_head(&(rq)->runq[whichq], (queue_entry_t) (th));
+
+ if (whichq < (rq)->low || (rq)->count == 0)
+ (rq)->low = whichq; /* minimize */
+ (rq)->count++;
+#ifdef MIGRATING_THREADS
+ (th)->shuttle.runq = (rq);
+#else
+ (th)->runq = (rq);
+#endif
+ simple_unlock(&(rq)->lock);
+
+ /*
+ * Turn off first_quantum to allow context switch.
+ */
+ current_processor()->first_quantum = FALSE;
+}
+#endif /* NCPUS > 1 */
+
diff --git a/kern/eventcount.h b/kern/eventcount.h
new file mode 100644
index 00000000..e2001de1
--- /dev/null
+++ b/kern/eventcount.h
@@ -0,0 +1,57 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: eventcount.c
+ * Author: Alessandro Forin
+ * Date: 10/91
+ *
+ * Eventcounters, for user-level drivers synchronization
+ *
+ */
+
+#ifndef _KERN_EVENTCOUNT_H_
+#define _KERN_EVENTCOUNT_H_ 1
+
+/* kernel visible only */
+
+typedef struct evc {
+ int count;
+ thread_t waiting_thread;
+ natural_t ev_id;
+ struct evc *sanity;
+ decl_simple_lock_data(, lock)
+} *evc_t;
+
+extern void evc_init(evc_t ev),
+ evc_destroy(evc_t ev),
+ evc_signal(evc_t ev),
+ evc_notify_abort(thread_t thread);
+
+/* kernel and user visible */
+
+extern kern_return_t evc_wait(natural_t ev_id);
+
+#endif /* _KERN_EVENTCOUNT_H_ */
diff --git a/kern/exception.c b/kern/exception.c
new file mode 100644
index 00000000..ebd9e5b6
--- /dev/null
+++ b/kern/exception.c
@@ -0,0 +1,1003 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <norma_ipc.h>
+#include <mach_kdb.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/message.h>
+#include <mach/port.h>
+#include <mach/mig_errors.h>
+#include <ipc/port.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/mach_msg.h>
+#include <ipc/ipc_machdep.h>
+#include <kern/counters.h>
+#include <kern/ipc_tt.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/processor.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <mach/machine/vm_types.h>
+
+
+
+extern void exception();
+extern void exception_try_task();
+extern void exception_no_server();
+
+extern void exception_raise();
+extern kern_return_t exception_parse_reply();
+extern void exception_raise_continue();
+extern void exception_raise_continue_slow();
+extern void exception_raise_continue_fast();
+
+#if MACH_KDB
+extern void thread_kdb_return();
+extern void db_printf();
+
+boolean_t debug_user_with_kdb = FALSE;
+#endif /* MACH_KDB */
+
+#ifdef KEEP_STACKS
+/*
+ * Some obsolete architectures don't support kernel stack discarding
+ * or the thread_exception_return, thread_syscall_return continuations.
+ * For these architectures, the NOTREACHED comments below are incorrect.
+ * The exception function is expected to return.
+ * So the return statements along the slow paths are important.
+ */
+#endif KEEP_STACKS
+
+/*
+ * Routine: exception
+ * Purpose:
+ * The current thread caught an exception.
+ * We make an up-call to the thread's exception server.
+ * Conditions:
+ * Nothing locked and no resources held.
+ * Called from an exception context, so
+ * thread_exception_return and thread_kdb_return
+ * are possible.
+ * Returns:
+ * Doesn't return.
+ */
+
+void
+exception(_exception, code, subcode)
+ integer_t _exception, code, subcode;
+{
+ register ipc_thread_t self = current_thread();
+ register ipc_port_t exc_port;
+
+ if (_exception == KERN_SUCCESS)
+ panic("exception");
+
+ /*
+ * Optimized version of retrieve_thread_exception.
+ */
+
+ ith_lock(self);
+ assert(self->ith_self != IP_NULL);
+ exc_port = self->ith_exception;
+ if (!IP_VALID(exc_port)) {
+ ith_unlock(self);
+ exception_try_task(_exception, code, subcode);
+ /*NOTREACHED*/
+ return;
+ }
+
+ ip_lock(exc_port);
+ ith_unlock(self);
+ if (!ip_active(exc_port)) {
+ ip_unlock(exc_port);
+ exception_try_task(_exception, code, subcode);
+ /*NOTREACHED*/
+ return;
+ }
+
+ /*
+ * Make a naked send right for the exception port.
+ */
+
+ ip_reference(exc_port);
+ exc_port->ip_srights++;
+ ip_unlock(exc_port);
+
+ /*
+ * If this exception port doesn't work,
+ * we will want to try the task's exception port.
+ * Indicate this by saving the exception state.
+ */
+
+ self->ith_exc = _exception;
+ self->ith_exc_code = code;
+ self->ith_exc_subcode = subcode;
+
+ exception_raise(exc_port,
+ retrieve_thread_self_fast(self),
+ retrieve_task_self_fast(self->task),
+ _exception, code, subcode);
+ /*NOTREACHED*/
+}
+
+/*
+ * Routine: exception_try_task
+ * Purpose:
+ * The current thread caught an exception.
+ * We make an up-call to the task's exception server.
+ * Conditions:
+ * Nothing locked and no resources held.
+ * Called from an exception context, so
+ * thread_exception_return and thread_kdb_return
+ * are possible.
+ * Returns:
+ * Doesn't return.
+ */
+
+void
+exception_try_task(_exception, code, subcode)
+ integer_t _exception, code, subcode;
+{
+ ipc_thread_t self = current_thread();
+ register task_t task = self->task;
+ register ipc_port_t exc_port;
+
+ /*
+ * Optimized version of retrieve_task_exception.
+ */
+
+ itk_lock(task);
+ assert(task->itk_self != IP_NULL);
+ exc_port = task->itk_exception;
+ if (!IP_VALID(exc_port)) {
+ itk_unlock(task);
+ exception_no_server();
+ /*NOTREACHED*/
+ return;
+ }
+
+ ip_lock(exc_port);
+ itk_unlock(task);
+ if (!ip_active(exc_port)) {
+ ip_unlock(exc_port);
+ exception_no_server();
+ /*NOTREACHED*/
+ return;
+ }
+
+ /*
+ * Make a naked send right for the exception port.
+ */
+
+ ip_reference(exc_port);
+ exc_port->ip_srights++;
+ ip_unlock(exc_port);
+
+ /*
+ * This is the thread's last chance.
+ * Clear the saved exception state.
+ */
+
+ self->ith_exc = KERN_SUCCESS;
+
+ exception_raise(exc_port,
+ retrieve_thread_self_fast(self),
+ retrieve_task_self_fast(task),
+ _exception, code, subcode);
+ /*NOTREACHED*/
+}
+
+/*
+ * Routine: exception_no_server
+ * Purpose:
+ * The current thread took an exception,
+ * and no exception server took responsibility
+ * for the exception. So good bye, charlie.
+ * Conditions:
+ * Nothing locked and no resources held.
+ * Called from an exception context, so
+ * thread_kdb_return is possible.
+ * Returns:
+ * Doesn't return.
+ */
+
+void
+exception_no_server()
+{
+ register ipc_thread_t self = current_thread();
+
+ /*
+ * If this thread is being terminated, cooperate.
+ */
+
+ while (thread_should_halt(self))
+ thread_halt_self();
+
+#if MACH_KDB
+ if (debug_user_with_kdb) {
+ /*
+ * Debug the exception with kdb.
+ * If kdb handles the exception,
+ * then thread_kdb_return won't return.
+ */
+
+ db_printf("No exception server, calling kdb...\n");
+ thread_kdb_return();
+ }
+#endif MACH_KDB
+
+ /*
+ * All else failed; terminate task.
+ */
+
+ (void) task_terminate(self->task);
+ thread_halt_self();
+ /*NOTREACHED*/
+}
+
+#define MACH_EXCEPTION_ID 2400 /* from mach/exc.defs */
+#define MACH_EXCEPTION_REPLY_ID (MACH_EXCEPTION_ID + 100)
+
+struct mach_exception {
+ mach_msg_header_t Head;
+ mach_msg_type_t threadType;
+ mach_port_t thread;
+ mach_msg_type_t taskType;
+ mach_port_t task;
+ mach_msg_type_t exceptionType;
+ integer_t exception;
+ mach_msg_type_t codeType;
+ integer_t code;
+ mach_msg_type_t subcodeType;
+ integer_t subcode;
+};
+
+#define INTEGER_T_SIZE_IN_BITS (8 * sizeof(integer_t))
+#define INTEGER_T_TYPE MACH_MSG_TYPE_INTEGER_T
+ /* in mach/machine/vm_types.h */
+
+mach_msg_type_t exc_port_proto = {
+ /* msgt_name = */ MACH_MSG_TYPE_PORT_SEND,
+ /* msgt_size = */ PORT_T_SIZE_IN_BITS,
+ /* msgt_number = */ 1,
+ /* msgt_inline = */ TRUE,
+ /* msgt_longform = */ FALSE,
+ /* msgt_deallocate = */ FALSE,
+ /* msgt_unused = */ 0
+};
+
+mach_msg_type_t exc_code_proto = {
+ /* msgt_name = */ INTEGER_T_TYPE,
+ /* msgt_size = */ INTEGER_T_SIZE_IN_BITS,
+ /* msgt_number = */ 1,
+ /* msgt_inline = */ TRUE,
+ /* msgt_longform = */ FALSE,
+ /* msgt_deallocate = */ FALSE,
+ /* msgt_unused = */ 0
+};
+
+/*
+ * Routine: exception_raise
+ * Purpose:
+ * Make an exception_raise up-call to an exception server.
+ *
+ * dest_port must be a valid naked send right.
+ * thread_port and task_port are naked send rights.
+ * All three are always consumed.
+ *
+ * self->ith_exc, self->ith_exc_code, self->ith_exc_subcode
+ * must be appropriately initialized.
+ * Conditions:
+ * Nothing locked. We are being called in an exception context,
+ * so thread_exception_return may be called.
+ * Returns:
+ * Doesn't return.
+ */
+
+int exception_raise_misses = 0;
+
+void
+exception_raise(dest_port, thread_port, task_port,
+ _exception, code, subcode)
+ ipc_port_t dest_port;
+ ipc_port_t thread_port;
+ ipc_port_t task_port;
+ integer_t _exception, code, subcode;
+{
+ ipc_thread_t self = current_thread();
+ ipc_thread_t receiver;
+ ipc_port_t reply_port;
+ ipc_mqueue_t dest_mqueue;
+ ipc_mqueue_t reply_mqueue;
+ ipc_kmsg_t kmsg;
+ mach_msg_return_t mr;
+
+ assert(IP_VALID(dest_port));
+
+ /*
+ * We will eventually need a message buffer.
+ * Grab the buffer now, while nothing is locked.
+ * This buffer will get handed to the exception server,
+ * and it will give the buffer back with its reply.
+ */
+
+ kmsg = ikm_cache();
+ if (kmsg != IKM_NULL) {
+ ikm_cache() = IKM_NULL;
+ ikm_check_initialized(kmsg, IKM_SAVED_KMSG_SIZE);
+ } else {
+ kmsg = ikm_alloc(IKM_SAVED_MSG_SIZE);
+ if (kmsg == IKM_NULL)
+ panic("exception_raise");
+ ikm_init(kmsg, IKM_SAVED_MSG_SIZE);
+ }
+
+ /*
+ * We need a reply port for the RPC.
+ * Check first for a cached port.
+ */
+
+ ith_lock(self);
+ assert(self->ith_self != IP_NULL);
+
+ reply_port = self->ith_rpc_reply;
+ if (reply_port == IP_NULL) {
+ ith_unlock(self);
+ reply_port = ipc_port_alloc_reply();
+ ith_lock(self);
+ if ((reply_port == IP_NULL) ||
+ (self->ith_rpc_reply != IP_NULL))
+ panic("exception_raise");
+ self->ith_rpc_reply = reply_port;
+ }
+
+ ip_lock(reply_port);
+ assert(ip_active(reply_port));
+ ith_unlock(self);
+
+ /*
+ * Make a naked send-once right for the reply port,
+ * to hand to the exception server.
+ * Make an extra reference for the reply port,
+ * to receive on. This protects us against
+ * mach_msg_abort_rpc.
+ */
+
+ reply_port->ip_sorights++;
+ ip_reference(reply_port);
+
+ ip_reference(reply_port);
+ self->ith_port = reply_port;
+
+ reply_mqueue = &reply_port->ip_messages;
+ imq_lock(reply_mqueue);
+ assert(ipc_kmsg_queue_empty(&reply_mqueue->imq_messages));
+ ip_unlock(reply_port);
+
+ /*
+ * Make sure we can queue to the destination port.
+ */
+
+ if (!ip_lock_try(dest_port)) {
+ imq_unlock(reply_mqueue);
+ goto slow_exception_raise;
+ }
+
+ if (!ip_active(dest_port) ||
+#if NORMA_IPC
+ IP_NORMA_IS_PROXY(dest_port) ||
+#endif NORMA_IPC
+ (dest_port->ip_receiver == ipc_space_kernel)) {
+ imq_unlock(reply_mqueue);
+ ip_unlock(dest_port);
+ goto slow_exception_raise;
+ }
+
+ /*
+ * Find the destination message queue.
+ */
+
+ {
+ register ipc_pset_t dest_pset;
+
+ dest_pset = dest_port->ip_pset;
+ if (dest_pset == IPS_NULL)
+ dest_mqueue = &dest_port->ip_messages;
+ else
+ dest_mqueue = &dest_pset->ips_messages;
+ }
+
+ if (!imq_lock_try(dest_mqueue)) {
+ imq_unlock(reply_mqueue);
+ ip_unlock(dest_port);
+ goto slow_exception_raise;
+ }
+
+ /*
+ * Safe to unlock dest_port, because we hold
+ * dest_mqueue locked. We never bother changing
+ * dest_port->ip_msgcount.
+ */
+
+ ip_unlock(dest_port);
+
+ receiver = ipc_thread_queue_first(&dest_mqueue->imq_threads);
+ if ((receiver == ITH_NULL) ||
+ !((receiver->swap_func == (void (*)()) mach_msg_continue) ||
+ ((receiver->swap_func ==
+ (void (*)()) mach_msg_receive_continue) &&
+ (sizeof(struct mach_exception) <= receiver->ith_msize) &&
+ ((receiver->ith_option & MACH_RCV_NOTIFY) == 0))) ||
+ !thread_handoff(self, exception_raise_continue, receiver)) {
+ imq_unlock(reply_mqueue);
+ imq_unlock(dest_mqueue);
+ goto slow_exception_raise;
+ }
+ counter(c_exception_raise_block++);
+
+ assert(current_thread() == receiver);
+
+ /*
+ * We need to finish preparing self for its
+ * time asleep in reply_mqueue. self is left
+ * holding the extra ref for reply_port.
+ */
+
+ ipc_thread_enqueue_macro(&reply_mqueue->imq_threads, self);
+ self->ith_state = MACH_RCV_IN_PROGRESS;
+ self->ith_msize = MACH_MSG_SIZE_MAX;
+ imq_unlock(reply_mqueue);
+
+ /*
+ * Finish extracting receiver from dest_mqueue.
+ */
+
+ ipc_thread_rmqueue_first_macro(
+ &dest_mqueue->imq_threads, receiver);
+ imq_unlock(dest_mqueue);
+
+ /*
+ * Release the receiver's reference for his object.
+ */
+ {
+ register ipc_object_t object = receiver->ith_object;
+
+ io_lock(object);
+ io_release(object);
+ io_check_unlock(object);
+ }
+
+ {
+ register struct mach_exception *exc =
+ (struct mach_exception *) &kmsg->ikm_header;
+ ipc_space_t space = receiver->task->itk_space;
+
+ /*
+ * We are running as the receiver now. We hold
+ * the following resources, which must be consumed:
+ * kmsg, send-once right for reply_port
+ * send rights for dest_port, thread_port, task_port
+ * Synthesize a kmsg for copyout to the receiver.
+ */
+
+ exc->Head.msgh_bits = (MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
+ MACH_MSG_TYPE_PORT_SEND) |
+ MACH_MSGH_BITS_COMPLEX);
+ exc->Head.msgh_size = sizeof *exc;
+ /* exc->Head.msgh_remote_port later */
+ /* exc->Head.msgh_local_port later */
+ exc->Head.msgh_seqno = 0;
+ exc->Head.msgh_id = MACH_EXCEPTION_ID;
+ exc->threadType = exc_port_proto;
+ /* exc->thread later */
+ exc->taskType = exc_port_proto;
+ /* exc->task later */
+ exc->exceptionType = exc_code_proto;
+ exc->exception = _exception;
+ exc->codeType = exc_code_proto;
+ exc->code = code;
+ exc->subcodeType = exc_code_proto;
+ exc->subcode = subcode;
+
+ /*
+ * Check that the receiver can handle the message.
+ */
+
+ if (receiver->ith_rcv_size < sizeof(struct mach_exception)) {
+ /*
+ * ipc_kmsg_destroy is a handy way to consume
+ * the resources we hold, but it requires setup.
+ */
+
+ exc->Head.msgh_bits =
+ (MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE) |
+ MACH_MSGH_BITS_COMPLEX);
+ exc->Head.msgh_remote_port = (mach_port_t) dest_port;
+ exc->Head.msgh_local_port = (mach_port_t) reply_port;
+ exc->thread = (mach_port_t) thread_port;
+ exc->task = (mach_port_t) task_port;
+
+ ipc_kmsg_destroy(kmsg);
+ thread_syscall_return(MACH_RCV_TOO_LARGE);
+ /*NOTREACHED*/
+ }
+
+ is_write_lock(space);
+ assert(space->is_active);
+
+ /*
+ * To do an atomic copyout, need simultaneous
+ * locks on both ports and the space.
+ */
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port) ||
+ !ip_lock_try(reply_port)) {
+ abort_copyout:
+ ip_unlock(dest_port);
+ is_write_unlock(space);
+
+ /*
+ * Oh well, we have to do the header the slow way.
+ * First make it look like it's in-transit.
+ */
+
+ exc->Head.msgh_bits =
+ (MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE) |
+ MACH_MSGH_BITS_COMPLEX);
+ exc->Head.msgh_remote_port = (mach_port_t) dest_port;
+ exc->Head.msgh_local_port = (mach_port_t) reply_port;
+
+ mr = ipc_kmsg_copyout_header(&exc->Head, space,
+ MACH_PORT_NULL);
+ if (mr == MACH_MSG_SUCCESS)
+ goto copyout_body;
+
+ /*
+ * Ack! Prepare for ipc_kmsg_copyout_dest.
+ * It will consume thread_port and task_port.
+ */
+
+ exc->thread = (mach_port_t) thread_port;
+ exc->task = (mach_port_t) task_port;
+
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(receiver->ith_msg, kmsg,
+ sizeof(mach_msg_header_t));
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ if (!ip_active(reply_port)) {
+ ip_unlock(reply_port);
+ goto abort_copyout;
+ }
+
+ assert(reply_port->ip_sorights > 0);
+ ip_unlock(reply_port);
+
+ {
+ register ipc_entry_t table;
+ register ipc_entry_t entry;
+ register mach_port_index_t index;
+
+ /* optimized ipc_entry_get */
+
+ table = space->is_table;
+ index = table->ie_next;
+
+ if (index == 0)
+ goto abort_copyout;
+
+ entry = &table[index];
+ table->ie_next = entry->ie_next;
+ entry->ie_request = 0;
+
+ {
+ register mach_port_gen_t gen;
+
+ assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
+ gen = entry->ie_bits + IE_BITS_GEN_ONE;
+
+ exc->Head.msgh_remote_port = MACH_PORT_MAKE(index, gen);
+
+ /* optimized ipc_right_copyout */
+
+ entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
+ }
+
+ entry->ie_object = (ipc_object_t) reply_port;
+ is_write_unlock(space);
+ }
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest_port->ip_srights > 0);
+ ip_release(dest_port);
+
+ exc->Head.msgh_local_port =
+ ((dest_port->ip_receiver == space) ?
+ dest_port->ip_receiver_name : MACH_PORT_NULL);
+
+ if ((--dest_port->ip_srights == 0) &&
+ (dest_port->ip_nsrequest != IP_NULL)) {
+ ipc_port_t nsrequest;
+ mach_port_mscount_t mscount;
+
+ /* a rather rare case */
+
+ nsrequest = dest_port->ip_nsrequest;
+ mscount = dest_port->ip_mscount;
+ dest_port->ip_nsrequest = IP_NULL;
+ ip_unlock(dest_port);
+
+ ipc_notify_no_senders(nsrequest, mscount);
+ } else
+ ip_unlock(dest_port);
+
+ copyout_body:
+ /*
+ * Optimized version of ipc_kmsg_copyout_body,
+ * to handle the two ports in the body.
+ */
+
+ mr = (ipc_kmsg_copyout_object(space, (ipc_object_t) thread_port,
+ MACH_MSG_TYPE_PORT_SEND, &exc->thread) |
+ ipc_kmsg_copyout_object(space, (ipc_object_t) task_port,
+ MACH_MSG_TYPE_PORT_SEND, &exc->task));
+ if (mr != MACH_MSG_SUCCESS) {
+ (void) ipc_kmsg_put(receiver->ith_msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ thread_syscall_return(mr | MACH_RCV_BODY_ERROR);
+ /*NOTREACHED*/
+ }
+ }
+
+ /*
+ * Optimized version of ipc_kmsg_put.
+ * We must check ikm_cache after copyoutmsg.
+ */
+
+ ikm_check_initialized(kmsg, kmsg->ikm_size);
+ assert(kmsg->ikm_size == IKM_SAVED_KMSG_SIZE);
+
+ if (copyoutmsg((vm_offset_t) &kmsg->ikm_header, (vm_offset_t)receiver->ith_msg,
+ sizeof(struct mach_exception)) ||
+ (ikm_cache() != IKM_NULL)) {
+ mr = ipc_kmsg_put(receiver->ith_msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ ikm_cache() = kmsg;
+ thread_syscall_return(MACH_MSG_SUCCESS);
+ /*NOTREACHED*/
+#ifndef __GNUC__
+ return; /* help for the compiler */
+#endif
+
+ slow_exception_raise: {
+ register struct mach_exception *exc =
+ (struct mach_exception *) &kmsg->ikm_header;
+ ipc_kmsg_t reply_kmsg;
+ mach_port_seqno_t reply_seqno;
+
+ exception_raise_misses++;
+
+ /*
+ * We hold the following resources, which must be consumed:
+ * kmsg, send-once right and ref for reply_port
+ * send rights for dest_port, thread_port, task_port
+ * Synthesize a kmsg to send.
+ */
+
+ exc->Head.msgh_bits = (MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE) |
+ MACH_MSGH_BITS_COMPLEX);
+ exc->Head.msgh_size = sizeof *exc;
+ exc->Head.msgh_remote_port = (mach_port_t) dest_port;
+ exc->Head.msgh_local_port = (mach_port_t) reply_port;
+ exc->Head.msgh_seqno = 0;
+ exc->Head.msgh_id = MACH_EXCEPTION_ID;
+ exc->threadType = exc_port_proto;
+ exc->thread = (mach_port_t) thread_port;
+ exc->taskType = exc_port_proto;
+ exc->task = (mach_port_t) task_port;
+ exc->exceptionType = exc_code_proto;
+ exc->exception = _exception;
+ exc->codeType = exc_code_proto;
+ exc->code = code;
+ exc->subcodeType = exc_code_proto;
+ exc->subcode = subcode;
+
+ ipc_mqueue_send_always(kmsg);
+
+ /*
+ * We are left with a ref for reply_port,
+ * which we use to receive the reply message.
+ */
+
+ ip_lock(reply_port);
+ if (!ip_active(reply_port)) {
+ ip_unlock(reply_port);
+ exception_raise_continue_slow(MACH_RCV_PORT_DIED, IKM_NULL, /*dummy*/0);
+ /*NOTREACHED*/
+ return;
+ }
+
+ imq_lock(reply_mqueue);
+ ip_unlock(reply_port);
+
+ mr = ipc_mqueue_receive(reply_mqueue, MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX,
+ MACH_MSG_TIMEOUT_NONE,
+ FALSE, exception_raise_continue,
+ &reply_kmsg, &reply_seqno);
+ /* reply_mqueue is unlocked */
+
+ exception_raise_continue_slow(mr, reply_kmsg, reply_seqno);
+ /*NOTREACHED*/
+ }
+}
+
+mach_msg_type_t exc_RetCode_proto = {
+ /* msgt_name = */ MACH_MSG_TYPE_INTEGER_32,
+ /* msgt_size = */ 32,
+ /* msgt_number = */ 1,
+ /* msgt_inline = */ TRUE,
+ /* msgt_longform = */ FALSE,
+ /* msgt_deallocate = */ FALSE,
+ /* msgt_unused = */ 0
+};
+
+/*
+ * Routine: exception_parse_reply
+ * Purpose:
+ * Parse and consume an exception reply message.
+ * Conditions:
+ * The destination port right has already been consumed.
+ * The message buffer and anything else in it is consumed.
+ * Returns:
+ * The reply return code.
+ */
+
+kern_return_t
+exception_parse_reply(kmsg)
+ ipc_kmsg_t kmsg;
+{
+ register mig_reply_header_t *msg =
+ (mig_reply_header_t *) &kmsg->ikm_header;
+ kern_return_t kr;
+
+ if ((msg->Head.msgh_bits !=
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0)) ||
+ (msg->Head.msgh_size != sizeof *msg) ||
+ (msg->Head.msgh_id != MACH_EXCEPTION_REPLY_ID) ||
+ (* (int *) &msg->RetCodeType != * (int *) &exc_RetCode_proto)) {
+ /*
+ * Bozo user sent us a misformatted reply.
+ */
+
+ kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
+ ipc_kmsg_destroy(kmsg);
+ return MIG_REPLY_MISMATCH;
+ }
+
+ kr = msg->RetCode;
+
+ if ((kmsg->ikm_size == IKM_SAVED_KMSG_SIZE) &&
+ (ikm_cache() == IKM_NULL))
+ ikm_cache() = kmsg;
+ else
+ ikm_free(kmsg);
+
+ return kr;
+}
+
+/*
+ * Routine: exception_raise_continue
+ * Purpose:
+ * Continue after blocking for an exception.
+ * Conditions:
+ * Nothing locked. We are running on a new kernel stack,
+ * with the exception state saved in the thread. From here
+ * control goes back to user space.
+ * Returns:
+ * Doesn't return.
+ */
+
+void
+exception_raise_continue()
+{
+ ipc_thread_t self = current_thread();
+ ipc_port_t reply_port = self->ith_port;
+ ipc_mqueue_t reply_mqueue = &reply_port->ip_messages;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ mr = ipc_mqueue_receive(reply_mqueue, MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX,
+ MACH_MSG_TIMEOUT_NONE,
+ TRUE, exception_raise_continue,
+ &kmsg, &seqno);
+ /* reply_mqueue is unlocked */
+
+ exception_raise_continue_slow(mr, kmsg, seqno);
+ /*NOTREACHED*/
+}
+
+/*
+ * Routine: exception_raise_continue_slow
+ * Purpose:
+ * Continue after finishing an ipc_mqueue_receive
+ * for an exception reply message.
+ * Conditions:
+ * Nothing locked. We hold a ref for reply_port.
+ * Returns:
+ * Doesn't return.
+ */
+
+void
+exception_raise_continue_slow(mr, kmsg, seqno)
+ mach_msg_return_t mr;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+{
+ ipc_thread_t self = current_thread();
+ ipc_port_t reply_port = self->ith_port;
+ ipc_mqueue_t reply_mqueue = &reply_port->ip_messages;
+
+ while (mr == MACH_RCV_INTERRUPTED) {
+ /*
+ * Somebody is trying to force this thread
+ * to a clean point. We must cooperate
+ * and then resume the receive.
+ */
+
+ while (thread_should_halt(self)) {
+ /* don't terminate while holding a reference */
+ if (self->ast & AST_TERMINATE)
+ ipc_port_release(reply_port);
+ thread_halt_self();
+ }
+
+ ip_lock(reply_port);
+ if (!ip_active(reply_port)) {
+ ip_unlock(reply_port);
+ mr = MACH_RCV_PORT_DIED;
+ break;
+ }
+
+ imq_lock(reply_mqueue);
+ ip_unlock(reply_port);
+
+ mr = ipc_mqueue_receive(reply_mqueue, MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX,
+ MACH_MSG_TIMEOUT_NONE,
+ FALSE, exception_raise_continue,
+ &kmsg, &seqno);
+ /* reply_mqueue is unlocked */
+ }
+ ipc_port_release(reply_port);
+
+ assert((mr == MACH_MSG_SUCCESS) ||
+ (mr == MACH_RCV_PORT_DIED));
+
+ if (mr == MACH_MSG_SUCCESS) {
+ /*
+ * Consume the reply message.
+ */
+
+ ipc_port_release_sonce(reply_port);
+ mr = exception_parse_reply(kmsg);
+ }
+
+ if ((mr == KERN_SUCCESS) ||
+ (mr == MACH_RCV_PORT_DIED)) {
+ thread_exception_return();
+ /*NOTREACHED*/
+ return;
+ }
+
+ if (self->ith_exc != KERN_SUCCESS) {
+ exception_try_task(self->ith_exc,
+ self->ith_exc_code,
+ self->ith_exc_subcode);
+ /*NOTREACHED*/
+ return;
+ }
+
+ exception_no_server();
+ /*NOTREACHED*/
+}
+
+/*
+ * Routine: exception_raise_continue_fast
+ * Purpose:
+ * Special-purpose fast continuation for exceptions.
+ * Conditions:
+ * reply_port is locked and alive.
+ * kmsg is our reply message.
+ * Returns:
+ * Doesn't return.
+ */
+
+void
+exception_raise_continue_fast(reply_port, kmsg)
+ ipc_port_t reply_port;
+ ipc_kmsg_t kmsg;
+{
+ ipc_thread_t self = current_thread();
+ kern_return_t kr;
+
+ assert(ip_active(reply_port));
+ assert(reply_port == self->ith_port);
+ assert(reply_port == (ipc_port_t) kmsg->ikm_header.msgh_remote_port);
+ assert(MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
+ MACH_MSG_TYPE_PORT_SEND_ONCE);
+
+ /*
+ * Release the send-once right (from the message header)
+ * and the saved reference (from self->ith_port).
+ */
+
+ reply_port->ip_sorights--;
+ ip_release(reply_port);
+ ip_release(reply_port);
+ ip_unlock(reply_port);
+
+ /*
+ * Consume the reply message.
+ */
+
+ kr = exception_parse_reply(kmsg);
+ if (kr == KERN_SUCCESS) {
+ thread_exception_return();
+ /*NOTREACHED*/
+ return; /* help for the compiler */
+ }
+
+ if (self->ith_exc != KERN_SUCCESS) {
+ exception_try_task(self->ith_exc,
+ self->ith_exc_code,
+ self->ith_exc_subcode);
+ /*NOTREACHED*/
+ }
+
+ exception_no_server();
+ /*NOTREACHED*/
+}
diff --git a/kern/host.c b/kern/host.c
new file mode 100644
index 00000000..062f923e
--- /dev/null
+++ b/kern/host.c
@@ -0,0 +1,380 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * host.c
+ *
+ * Non-ipc host functions.
+ */
+
+#include <cpus.h>
+#include <mach_host.h>
+
+#include <kern/assert.h>
+#include <kern/kalloc.h>
+#include <kern/host.h>
+#include <mach/host_info.h>
+#include <mach/kern_return.h>
+#include <mach/machine.h>
+#include <mach/port.h>
+#include <kern/processor.h>
+#include <kern/ipc_host.h>
+
+#include <mach/vm_param.h>
+
+
+
+host_data_t realhost;
+
+kern_return_t host_processors(
+ host_t host,
+ processor_array_t *processor_list,
+ natural_t *countp)
+{
+ register int i;
+ register processor_t *tp;
+ vm_offset_t addr;
+ unsigned int count;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * Determine how many processors we have.
+ * (This number shouldn't change.)
+ */
+
+ count = 0;
+ for (i = 0; i < NCPUS; i++)
+ if (machine_slot[i].is_cpu)
+ count++;
+
+ if (count == 0)
+ panic("host_processors");
+
+ addr = kalloc((vm_size_t) (count * sizeof(mach_port_t)));
+ if (addr == 0)
+ return KERN_RESOURCE_SHORTAGE;
+
+ tp = (processor_t *) addr;
+ for (i = 0; i < NCPUS; i++)
+ if (machine_slot[i].is_cpu)
+ *tp++ = cpu_to_processor(i);
+
+ *countp = count;
+ *processor_list = (mach_port_t *) addr;
+
+ /* do the conversion that Mig should handle */
+
+ tp = (processor_t *) addr;
+ for (i = 0; i < count; i++)
+ ((mach_port_t *) tp)[i] =
+ (mach_port_t)convert_processor_to_port(tp[i]);
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t host_info(
+ host_t host,
+ int flavor,
+ host_info_t info,
+ natural_t *count)
+{
+ register integer_t i, *slot_ptr;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ switch(flavor) {
+
+ case HOST_BASIC_INFO:
+ {
+ register host_basic_info_t basic_info;
+
+ /*
+ * Basic information about this host.
+ */
+ if (*count < HOST_BASIC_INFO_COUNT)
+ return KERN_FAILURE;
+
+ basic_info = (host_basic_info_t) info;
+
+ basic_info->max_cpus = machine_info.max_cpus;
+ basic_info->avail_cpus = machine_info.avail_cpus;
+ basic_info->memory_size = machine_info.memory_size;
+ basic_info->cpu_type =
+ machine_slot[master_processor->slot_num].cpu_type;
+ basic_info->cpu_subtype =
+ machine_slot[master_processor->slot_num].cpu_subtype;
+
+ *count = HOST_BASIC_INFO_COUNT;
+ return KERN_SUCCESS;
+ }
+
+ case HOST_PROCESSOR_SLOTS:
+ /*
+ * Return numbers of slots with active processors
+ * in them.
+ */
+ if (*count < NCPUS)
+ return KERN_INVALID_ARGUMENT;
+
+ slot_ptr = (integer_t *)info;
+ *count = 0;
+ for (i = 0; i < NCPUS; i++) {
+ if (machine_slot[i].is_cpu &&
+ machine_slot[i].running) {
+ *slot_ptr++ = i;
+ (*count)++;
+ }
+ }
+ return KERN_SUCCESS;
+
+ case HOST_SCHED_INFO:
+ {
+ register host_sched_info_t sched_info;
+ extern int tick; /* microseconds per clock tick */
+ extern int min_quantum;
+ /* minimum quantum, in microseconds */
+
+ /*
+ * Return scheduler information.
+ */
+ if (*count < HOST_SCHED_INFO_COUNT)
+ return(KERN_FAILURE);
+
+ sched_info = (host_sched_info_t) info;
+
+ sched_info->min_timeout = tick / 1000;
+ sched_info->min_quantum = min_quantum / 1000;
+ /* convert microseconds to milliseconds */
+
+ *count = HOST_SCHED_INFO_COUNT;
+ return KERN_SUCCESS;
+ }
+
+ case HOST_LOAD_INFO:
+ {
+ register host_load_info_t load_info;
+ extern long avenrun[3], mach_factor[3];
+
+ if (*count < HOST_LOAD_INFO_COUNT)
+ return KERN_FAILURE;
+
+ load_info = (host_load_info_t) info;
+
+ bcopy((char *) avenrun,
+ (char *) load_info->avenrun,
+ sizeof avenrun);
+ bcopy((char *) mach_factor,
+ (char *) load_info->mach_factor,
+ sizeof mach_factor);
+
+ *count = HOST_LOAD_INFO_COUNT;
+ return KERN_SUCCESS;
+ }
+
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+}
+
+/*
+ * Return kernel version string (more than you ever
+ * wanted to know about what version of the kernel this is).
+ */
+
+kern_return_t host_kernel_version(
+ host_t host,
+ kernel_version_t out_version)
+{
+ extern char version[];
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ (void) strncpy(out_version, version, sizeof(kernel_version_t));
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * host_processor_sets:
+ *
+ * List all processor sets on the host.
+ */
+#if MACH_HOST
+kern_return_t
+host_processor_sets(
+ host_t host,
+ processor_set_name_array_t *pset_list,
+ natural_t *count)
+{
+ unsigned int actual; /* this many psets */
+ processor_set_t pset;
+ processor_set_t *psets;
+ int i;
+
+ vm_size_t size;
+ vm_size_t size_needed;
+ vm_offset_t addr;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ size = 0; addr = 0;
+
+ for (;;) {
+ simple_lock(&all_psets_lock);
+ actual = all_psets_count;
+
+ /* do we have the memory we need? */
+
+ size_needed = actual * sizeof(mach_port_t);
+ if (size_needed <= size)
+ break;
+
+ /* unlock and allocate more memory */
+ simple_unlock(&all_psets_lock);
+
+ if (size != 0)
+ kfree(addr, size);
+
+ assert(size_needed > 0);
+ size = size_needed;
+
+ addr = kalloc(size);
+ if (addr == 0)
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /* OK, have memory and the all_psets_lock */
+
+ psets = (processor_set_t *) addr;
+
+ for (i = 0, pset = (processor_set_t) queue_first(&all_psets);
+ i < actual;
+ i++, pset = (processor_set_t) queue_next(&pset->all_psets)) {
+ /* take ref for convert_pset_name_to_port */
+ pset_reference(pset);
+ psets[i] = pset;
+ }
+ assert(queue_end(&all_psets, (queue_entry_t) pset));
+
+ /* can unlock now that we've got the pset refs */
+ simple_unlock(&all_psets_lock);
+
+ /*
+ * Always have default port.
+ */
+
+ assert(actual > 0);
+
+ /* if we allocated too much, must copy */
+
+ if (size_needed < size) {
+ vm_offset_t newaddr;
+
+ newaddr = kalloc(size_needed);
+ if (newaddr == 0) {
+ for (i = 0; i < actual; i++)
+ pset_deallocate(psets[i]);
+ kfree(addr, size);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ bcopy((char *) addr, (char *) newaddr, size_needed);
+ kfree(addr, size);
+ psets = (processor_set_t *) newaddr;
+ }
+
+ *pset_list = (mach_port_t *) psets;
+ *count = actual;
+
+ /* do the conversion that Mig should handle */
+
+ for (i = 0; i < actual; i++)
+ ((mach_port_t *) psets)[i] =
+ (mach_port_t)convert_pset_name_to_port(psets[i]);
+
+ return KERN_SUCCESS;
+}
+#else /* MACH_HOST */
+/*
+ * Only one processor set, the default processor set, in this case.
+ */
+kern_return_t
+host_processor_sets(
+ host_t host,
+ processor_set_name_array_t *pset_list,
+ natural_t *count)
+{
+ vm_offset_t addr;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * Allocate memory. Can be pageable because it won't be
+ * touched while holding a lock.
+ */
+
+ addr = kalloc((vm_size_t) sizeof(mach_port_t));
+ if (addr == 0)
+ return KERN_RESOURCE_SHORTAGE;
+
+ /* take for for convert_pset_name_to_port */
+ pset_reference(&default_pset);
+ /* do the conversion that Mig should handle */
+ *((mach_port_t *) addr) =
+ (mach_port_t) convert_pset_name_to_port(&default_pset);
+
+ *pset_list = (mach_port_t *) addr;
+ *count = 1;
+
+ return KERN_SUCCESS;
+}
+#endif /* MACH_HOST */
+
+/*
+ * host_processor_set_priv:
+ *
+ * Return control port for given processor set.
+ */
+kern_return_t
+host_processor_set_priv(
+ host_t host,
+ processor_set_t pset_name,
+ processor_set_t *pset)
+{
+ if ((host == HOST_NULL) || (pset_name == PROCESSOR_SET_NULL)) {
+ *pset = PROCESSOR_SET_NULL;
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ *pset = pset_name;
+ pset_reference(*pset);
+ return KERN_SUCCESS;
+}
diff --git a/kern/host.h b/kern/host.h
new file mode 100644
index 00000000..0807f99f
--- /dev/null
+++ b/kern/host.h
@@ -0,0 +1,48 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * kern/host.h
+ *
+ * Definitions for host data structures.
+ *
+ */
+
+#ifndef _KERN_HOST_H_
+#define _KERN_HOST_H_
+
+struct host {
+ struct ipc_port *host_self;
+ struct ipc_port *host_priv_self;
+};
+
+typedef struct host *host_t;
+typedef struct host host_data_t;
+
+#define HOST_NULL ((host_t)0)
+
+extern host_data_t realhost;
+
+#endif _KERN_HOST_H_
diff --git a/kern/ipc_host.c b/kern/ipc_host.c
new file mode 100644
index 00000000..4dbf9fc9
--- /dev/null
+++ b/kern/ipc_host.c
@@ -0,0 +1,488 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * kern/ipc_host.c
+ *
+ * Routines to implement host ports.
+ */
+
+#include <mach/message.h>
+#include <kern/host.h>
+#include <kern/processor.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/ipc_host.h>
+#include <kern/ipc_kobject.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <machine/machspl.h> /* for spl */
+
+
+
+/*
+ * ipc_host_init: set up various things.
+ */
+
+void ipc_host_init(void)
+{
+ ipc_port_t port;
+ /*
+ * Allocate and set up the two host ports.
+ */
+ port = ipc_port_alloc_kernel();
+ if (port == IP_NULL)
+ panic("ipc_host_init");
+
+ ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST);
+ realhost.host_self = port;
+
+ port = ipc_port_alloc_kernel();
+ if (port == IP_NULL)
+ panic("ipc_host_init");
+
+ ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_PRIV);
+ realhost.host_priv_self = port;
+
+ /*
+ * Set up ipc for default processor set.
+ */
+ ipc_pset_init(&default_pset);
+ ipc_pset_enable(&default_pset);
+
+ /*
+ * And for master processor
+ */
+ ipc_processor_init(master_processor);
+}
+
+/*
+ * Routine: mach_host_self [mach trap]
+ * Purpose:
+ * Give the caller send rights for his own host port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_PORT_NULL if there are any resource failures
+ * or other errors.
+ */
+
+mach_port_t
+mach_host_self(void)
+{
+ ipc_port_t sright;
+
+ sright = ipc_port_make_send(realhost.host_self);
+ return ipc_port_copyout_send(sright, current_space());
+}
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: host_self [mach trap]
+ * Purpose:
+ * Give the caller send rights for his own host port.
+ * If new, the send right is marked with IE_BITS_COMPAT.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_PORT_NULL if there are any resource failures
+ * or other errors.
+ */
+
+port_name_t
+host_self(void)
+{
+ ipc_port_t sright;
+
+ sright = ipc_port_make_send(realhost.host_self);
+ return (port_name_t)
+ ipc_port_copyout_send_compat(sright, current_space());
+}
+
+#endif MACH_IPC_COMPAT
+
+/*
+ * ipc_processor_init:
+ *
+ * Initialize ipc access to processor by allocating port.
+ * Enable ipc control of processor by setting port object.
+ */
+
+void
+ipc_processor_init(
+ processor_t processor)
+{
+ ipc_port_t port;
+
+ port = ipc_port_alloc_kernel();
+ if (port == IP_NULL)
+ panic("ipc_processor_init");
+ processor->processor_self = port;
+ ipc_kobject_set(port, (ipc_kobject_t) processor, IKOT_PROCESSOR);
+}
+
+
+/*
+ * ipc_pset_init:
+ *
+ * Initialize ipc control of a processor set by allocating its ports.
+ */
+
+void
+ipc_pset_init(
+ processor_set_t pset)
+{
+ ipc_port_t port;
+
+ port = ipc_port_alloc_kernel();
+ if (port == IP_NULL)
+ panic("ipc_pset_init");
+ pset->pset_self = port;
+
+ port = ipc_port_alloc_kernel();
+ if (port == IP_NULL)
+ panic("ipc_pset_init");
+ pset->pset_name_self = port;
+}
+
+/*
+ * ipc_pset_enable:
+ *
+ * Enable ipc access to a processor set.
+ */
+void
+ipc_pset_enable(
+ processor_set_t pset)
+{
+ pset_lock(pset);
+ if (pset->active) {
+ ipc_kobject_set(pset->pset_self,
+ (ipc_kobject_t) pset, IKOT_PSET);
+ ipc_kobject_set(pset->pset_name_self,
+ (ipc_kobject_t) pset, IKOT_PSET_NAME);
+ pset_ref_lock(pset);
+ pset->ref_count += 2;
+ pset_ref_unlock(pset);
+ }
+ pset_unlock(pset);
+}
+
+/*
+ * ipc_pset_disable:
+ *
+ * Disable ipc access to a processor set by clearing the port objects.
+ * Caller must hold pset lock and a reference to the pset. Ok to
+ * just decrement pset reference count as a result.
+ */
+void
+ipc_pset_disable(
+ processor_set_t pset)
+{
+ ipc_kobject_set(pset->pset_self, IKO_NULL, IKOT_NONE);
+ ipc_kobject_set(pset->pset_name_self, IKO_NULL, IKOT_NONE);
+ pset->ref_count -= 2;
+}
+
+/*
+ * ipc_pset_terminate:
+ *
+ * Processor set is dead. Deallocate the ipc control structures.
+ */
+void
+ipc_pset_terminate(
+ processor_set_t pset)
+{
+ ipc_port_dealloc_kernel(pset->pset_self);
+ ipc_port_dealloc_kernel(pset->pset_name_self);
+}
+
+/*
+ * processor_set_default, processor_set_default_priv:
+ *
+ * Return ports for manipulating default_processor set. MiG code
+ * differentiates between these two routines.
+ */
+kern_return_t
+processor_set_default(
+ host_t host,
+ processor_set_t *pset)
+{
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ *pset = &default_pset;
+ pset_reference(*pset);
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+xxx_processor_set_default_priv(
+ host_t host,
+ processor_set_t *pset)
+{
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ *pset = &default_pset;
+ pset_reference(*pset);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: convert_port_to_host
+ * Purpose:
+ * Convert from a port to a host.
+ * Doesn't consume the port ref; the host produced may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+host_t
+convert_port_to_host(
+ ipc_port_t port)
+{
+ host_t host = HOST_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ ((ip_kotype(port) == IKOT_HOST) ||
+ (ip_kotype(port) == IKOT_HOST_PRIV)))
+ host = (host_t) port->ip_kobject;
+ ip_unlock(port);
+ }
+
+ return host;
+}
+
+/*
+ * Routine: convert_port_to_host_priv
+ * Purpose:
+ * Convert from a port to a host.
+ * Doesn't consume the port ref; the host produced may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+host_t
+convert_port_to_host_priv(
+ ipc_port_t port)
+{
+ host_t host = HOST_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_HOST_PRIV))
+ host = (host_t) port->ip_kobject;
+ ip_unlock(port);
+ }
+
+ return host;
+}
+
+/*
+ * Routine: convert_port_to_processor
+ * Purpose:
+ * Convert from a port to a processor.
+ * Doesn't consume the port ref;
+ * the processor produced may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+processor_t
+convert_port_to_processor(
+ ipc_port_t port)
+{
+ processor_t processor = PROCESSOR_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_PROCESSOR))
+ processor = (processor_t) port->ip_kobject;
+ ip_unlock(port);
+ }
+
+ return processor;
+}
+
+/*
+ * Routine: convert_port_to_pset
+ * Purpose:
+ * Convert from a port to a pset.
+ * Doesn't consume the port ref; produces a pset ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+processor_set_t
+convert_port_to_pset(
+ ipc_port_t port)
+{
+ processor_set_t pset = PROCESSOR_SET_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_PSET)) {
+ pset = (processor_set_t) port->ip_kobject;
+ pset_reference(pset);
+ }
+ ip_unlock(port);
+ }
+
+ return pset;
+}
+
+/*
+ * Routine: convert_port_to_pset_name
+ * Purpose:
+ * Convert from a port to a pset.
+ * Doesn't consume the port ref; produces a pset ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+processor_set_t
+convert_port_to_pset_name(
+ ipc_port_t port)
+{
+ processor_set_t pset = PROCESSOR_SET_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ ((ip_kotype(port) == IKOT_PSET) ||
+ (ip_kotype(port) == IKOT_PSET_NAME))) {
+ pset = (processor_set_t) port->ip_kobject;
+ pset_reference(pset);
+ }
+ ip_unlock(port);
+ }
+
+ return pset;
+}
+
+/*
+ * Routine: convert_host_to_port
+ * Purpose:
+ * Convert from a host to a port.
+ * Produces a naked send right which may be invalid.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+convert_host_to_port(
+ host_t host)
+{
+ ipc_port_t port;
+
+ port = ipc_port_make_send(host->host_self);
+
+ return port;
+}
+
+/*
+ * Routine: convert_processor_to_port
+ * Purpose:
+ * Convert from a processor to a port.
+ * Produces a naked send right which is always valid.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+convert_processor_to_port(processor_t processor)
+{
+ ipc_port_t port;
+
+ port = ipc_port_make_send(processor->processor_self);
+
+ return port;
+}
+
+/*
+ * Routine: convert_pset_to_port
+ * Purpose:
+ * Convert from a pset to a port.
+ * Consumes a pset ref; produces a naked send right
+ * which may be invalid.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+convert_pset_to_port(
+ processor_set_t pset)
+{
+ ipc_port_t port;
+
+ pset_lock(pset);
+ if (pset->active)
+ port = ipc_port_make_send(pset->pset_self);
+ else
+ port = IP_NULL;
+ pset_unlock(pset);
+
+ pset_deallocate(pset);
+ return port;
+}
+
+/*
+ * Routine: convert_pset_name_to_port
+ * Purpose:
+ * Convert from a pset to a port.
+ * Consumes a pset ref; produces a naked send right
+ * which may be invalid.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+convert_pset_name_to_port(
+ processor_set_t pset)
+{
+ ipc_port_t port;
+
+ pset_lock(pset);
+ if (pset->active)
+ port = ipc_port_make_send(pset->pset_name_self);
+ else
+ port = IP_NULL;
+ pset_unlock(pset);
+
+ pset_deallocate(pset);
+ return port;
+}
diff --git a/kern/ipc_host.h b/kern/ipc_host.h
new file mode 100644
index 00000000..13c54cf9
--- /dev/null
+++ b/kern/ipc_host.h
@@ -0,0 +1,72 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_IPC_HOST_H_
+#define _KERN_IPC_HOST_H_
+
+#include <mach/port.h>
+#include <kern/processor.h>
+
+extern void ipc_host_init(void);
+
+extern void ipc_processor_init(processor_t);
+
+extern void ipc_pset_init(processor_set_t);
+extern void ipc_pset_enable(processor_set_t);
+extern void ipc_pset_disable(processor_set_t);
+extern void ipc_pset_terminate(processor_set_t);
+
+extern struct host *
+convert_port_to_host(struct ipc_port *);
+
+extern struct ipc_port *
+convert_host_to_port(struct host *);
+
+extern struct host *
+convert_port_to_host_priv(struct ipc_port *);
+
+extern processor_t
+convert_port_to_processor(struct ipc_port *);
+
+extern struct ipc_port *
+convert_processor_to_port(processor_t);
+
+extern processor_set_t
+convert_port_to_pset(struct ipc_port *);
+
+extern struct ipc_port *
+convert_pset_to_port(processor_set_t);
+
+extern processor_set_t
+convert_port_to_pset_name(struct ipc_port *);
+
+extern struct ipc_port *
+convert_pset_name_to_port(processor_set_t);
+
+#endif _KERN_IPC_HOST_H_
diff --git a/kern/ipc_kobject.c b/kern/ipc_kobject.c
new file mode 100644
index 00000000..2b372053
--- /dev/null
+++ b/kern/ipc_kobject.c
@@ -0,0 +1,391 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: kern/ipc_kobject.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions for letting a port represent a kernel object.
+ */
+
+#include <mach_debug.h>
+#include <mach_ipc_test.h>
+#include <mach_machine_routines.h>
+#include <norma_task.h>
+#include <norma_vm.h>
+
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <mach/message.h>
+#include <mach/mig_errors.h>
+#include <mach/notify.h>
+#include <kern/ipc_kobject.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_thread.h>
+
+#if MACH_MACHINE_ROUTINES
+#include <machine/machine_routines.h>
+#endif
+
+
+/*
+ * Routine: ipc_kobject_server
+ * Purpose:
+ * Handle a message sent to the kernel.
+ * Generates a reply message.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_kmsg_t
+ipc_kobject_server(request)
+ ipc_kmsg_t request;
+{
+ mach_msg_size_t reply_size = ikm_less_overhead(8192);
+ ipc_kmsg_t reply;
+ kern_return_t kr;
+ mig_routine_t routine;
+ ipc_port_t *destp;
+
+ reply = ikm_alloc(reply_size);
+ if (reply == IKM_NULL) {
+ printf("ipc_kobject_server: dropping request\n");
+ ipc_kmsg_destroy(request);
+ return IKM_NULL;
+ }
+ ikm_init(reply, reply_size);
+
+ /*
+ * Initialize reply message.
+ */
+ {
+#define InP ((mach_msg_header_t *) &request->ikm_header)
+#define OutP ((mig_reply_header_t *) &reply->ikm_header)
+
+ static mach_msg_type_t RetCodeType = {
+ /* msgt_name = */ MACH_MSG_TYPE_INTEGER_32,
+ /* msgt_size = */ 32,
+ /* msgt_number = */ 1,
+ /* msgt_inline = */ TRUE,
+ /* msgt_longform = */ FALSE,
+ /* msgt_unused = */ 0
+ };
+ OutP->Head.msgh_bits =
+ MACH_MSGH_BITS(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0);
+ OutP->Head.msgh_size = sizeof(mig_reply_header_t);
+ OutP->Head.msgh_remote_port = InP->msgh_local_port;
+ OutP->Head.msgh_local_port = MACH_PORT_NULL;
+ OutP->Head.msgh_seqno = 0;
+ OutP->Head.msgh_id = InP->msgh_id + 100;
+#if 0
+ if (InP->msgh_id) {
+ static long _calls;
+ static struct { long id, count; } _counts[512];
+ int i, id;
+
+ id = InP->msgh_id;
+ for (i = 0; i < 511; i++) {
+ if (_counts[i].id == 0) {
+ _counts[i].id = id;
+ _counts[i].count++;
+ break;
+ }
+ if (_counts[i].id == id) {
+ _counts[i].count++;
+ break;
+ }
+ }
+ if (i == 511) {
+ _counts[i].id = id;
+ _counts[i].count++;
+ }
+ if ((++_calls & 0x7fff) == 0)
+ for (i = 0; i < 512; i++) {
+ if (_counts[i].id == 0)
+ break;
+ printf("%d: %d\n",
+ _counts[i].id, _counts[i].count);
+ }
+ }
+#endif
+
+ OutP->RetCodeType = RetCodeType;
+
+#undef InP
+#undef OutP
+ }
+
+ /*
+ * Find the server routine to call, and call it
+ * to perform the kernel function
+ */
+ {
+ extern mig_routine_t mach_server_routine(),
+ mach_port_server_routine(),
+ mach_host_server_routine(),
+ device_server_routine(),
+ device_pager_server_routine(),
+ mach4_server_routine();
+#if MACH_DEBUG
+ extern mig_routine_t mach_debug_server_routine();
+#endif
+#if NORMA_TASK
+ extern mig_routine_t mach_norma_server_routine();
+ extern mig_routine_t norma_internal_server_routine();
+#endif
+#if NORMA_VM
+ extern mig_routine_t proxy_server_routine();
+#endif
+
+#if MACH_MACHINE_ROUTINES
+ extern mig_routine_t MACHINE_SERVER_ROUTINE();
+#endif
+
+ check_simple_locks();
+ if ((routine = mach_server_routine(&request->ikm_header)) != 0
+ || (routine = mach_port_server_routine(&request->ikm_header)) != 0
+ || (routine = mach_host_server_routine(&request->ikm_header)) != 0
+ || (routine = device_server_routine(&request->ikm_header)) != 0
+ || (routine = device_pager_server_routine(&request->ikm_header)) != 0
+#if MACH_DEBUG
+ || (routine = mach_debug_server_routine(&request->ikm_header)) != 0
+#endif MACH_DEBUG
+#if NORMA_TASK
+ || (routine = mach_norma_server_routine(&request->ikm_header)) != 0
+ || (routine = norma_internal_server_routine(&request->ikm_header)) != 0
+#endif NORMA_TASK
+#if NORMA_VM
+ || (routine = proxy_server_routine(&request->ikm_header)) != 0
+#endif NORMA_VM
+ || (routine = mach4_server_routine(&request->ikm_header)) != 0
+#if MACH_MACHINE_ROUTINES
+ || (routine = MACHINE_SERVER_ROUTINE(&request->ikm_header)) != 0
+#endif MACH_MACHINE_ROUTINES
+ ) {
+ (*routine)(&request->ikm_header, &reply->ikm_header);
+ }
+ else if (!ipc_kobject_notify(&request->ikm_header,&reply->ikm_header)){
+ ((mig_reply_header_t *) &reply->ikm_header)->RetCode
+ = MIG_BAD_ID;
+#if MACH_IPC_TEST
+ printf("ipc_kobject_server: bogus kernel message, id=%d\n",
+ request->ikm_header.msgh_id);
+#endif MACH_IPC_TEST
+ }
+ }
+ check_simple_locks();
+
+ /*
+ * Destroy destination. The following code differs from
+ * ipc_object_destroy in that we release the send-once
+ * right instead of generating a send-once notification
+ * (which would bring us here again, creating a loop).
+ * It also differs in that we only expect send or
+ * send-once rights, never receive rights.
+ *
+ * We set msgh_remote_port to IP_NULL so that the kmsg
+ * destroy routines don't try to destroy the port twice.
+ */
+ destp = (ipc_port_t *) &request->ikm_header.msgh_remote_port;
+ switch (MACH_MSGH_BITS_REMOTE(request->ikm_header.msgh_bits)) {
+ case MACH_MSG_TYPE_PORT_SEND:
+ ipc_port_release_send(*destp);
+ break;
+
+ case MACH_MSG_TYPE_PORT_SEND_ONCE:
+ ipc_port_release_sonce(*destp);
+ break;
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_object_destroy: strange destination rights");
+#else
+ panic("ipc_object_destroy: strange destination rights");
+#endif
+ }
+ *destp = IP_NULL;
+
+ kr = ((mig_reply_header_t *) &reply->ikm_header)->RetCode;
+ if ((kr == KERN_SUCCESS) || (kr == MIG_NO_REPLY)) {
+ /*
+ * The server function is responsible for the contents
+ * of the message. The reply port right is moved
+ * to the reply message, and we have deallocated
+ * the destination port right, so we just need
+ * to free the kmsg.
+ */
+
+ /* like ipc_kmsg_put, but without the copyout */
+
+ ikm_check_initialized(request, request->ikm_size);
+ if ((request->ikm_size == IKM_SAVED_KMSG_SIZE) &&
+ (ikm_cache() == IKM_NULL))
+ ikm_cache() = request;
+ else
+ ikm_free(request);
+ } else {
+ /*
+ * The message contents of the request are intact.
+ * Destroy everthing except the reply port right,
+ * which is needed in the reply message.
+ */
+
+ request->ikm_header.msgh_local_port = MACH_PORT_NULL;
+ ipc_kmsg_destroy(request);
+ }
+
+ if (kr == MIG_NO_REPLY) {
+ /*
+ * The server function will send a reply message
+ * using the reply port right, which it has saved.
+ */
+
+ ikm_free(reply);
+ return IKM_NULL;
+ } else if (!IP_VALID((ipc_port_t)reply->ikm_header.msgh_remote_port)) {
+ /*
+ * Can't queue the reply message if the destination
+ * (the reply port) isn't valid.
+ */
+
+ ipc_kmsg_destroy(reply);
+ return IKM_NULL;
+ }
+
+ return reply;
+}
+
+/*
+ * Routine: ipc_kobject_set
+ * Purpose:
+ * Make a port represent a kernel object of the given type.
+ * The caller is responsible for handling refs for the
+ * kernel object, if necessary.
+ * Conditions:
+ * Nothing locked. The port must be active.
+ */
+
+void
+ipc_kobject_set(port, kobject, type)
+ ipc_port_t port;
+ ipc_kobject_t kobject;
+ ipc_kobject_type_t type;
+{
+ ip_lock(port);
+ assert(ip_active(port));
+ port->ip_bits = (port->ip_bits &~ IO_BITS_KOTYPE) | type;
+ port->ip_kobject = kobject;
+ ip_unlock(port);
+}
+
+/*
+ * Routine: ipc_kobject_destroy
+ * Purpose:
+ * Release any kernel object resources associated
+ * with the port, which is being destroyed.
+ *
+ * This should only be needed when resources are
+ * associated with a user's port. In the normal case,
+ * when the kernel is the receiver, the code calling
+ * ipc_port_dealloc_kernel should clean up the resources.
+ * Conditions:
+ * The port is not locked, but it is dead.
+ */
+
+void
+ipc_kobject_destroy(
+ ipc_port_t port)
+{
+ switch (ip_kotype(port)) {
+ case IKOT_PAGER:
+ vm_object_destroy(port);
+ break;
+
+ case IKOT_PAGER_TERMINATING:
+ vm_object_pager_wakeup(port);
+ break;
+
+ default:
+#if MACH_ASSERT
+ printf("ipc_kobject_destroy: port 0x%x, kobj 0x%x, type %d\n",
+ port, port->ip_kobject, ip_kotype(port));
+#endif MACH_ASSERT
+ break;
+ }
+}
+
+/*
+ * Routine: ipc_kobject_notify
+ * Purpose:
+ * Deliver notifications to kobjects that care about them.
+ */
+
+boolean_t
+ipc_kobject_notify(request_header, reply_header)
+ mach_msg_header_t *request_header;
+ mach_msg_header_t *reply_header;
+{
+ ipc_port_t port = (ipc_port_t) request_header->msgh_remote_port;
+
+ ((mig_reply_header_t *) reply_header)->RetCode = MIG_NO_REPLY;
+ switch (request_header->msgh_id) {
+ case MACH_NOTIFY_PORT_DELETED:
+ case MACH_NOTIFY_MSG_ACCEPTED:
+ case MACH_NOTIFY_PORT_DESTROYED:
+ case MACH_NOTIFY_NO_SENDERS:
+ case MACH_NOTIFY_SEND_ONCE:
+ case MACH_NOTIFY_DEAD_NAME:
+ break;
+
+ default:
+ return FALSE;
+ }
+ switch (ip_kotype(port)) {
+#if NORMA_VM
+ case IKOT_XMM_OBJECT:
+ return xmm_object_notify(request_header);
+
+ case IKOT_XMM_PAGER:
+ return xmm_pager_notify(request_header);
+
+ case IKOT_XMM_KERNEL:
+ return xmm_kernel_notify(request_header);
+
+ case IKOT_XMM_REPLY:
+ return xmm_reply_notify(request_header);
+#endif NORMA_VM
+
+ case IKOT_DEVICE:
+ return ds_notify(request_header);
+
+ default:
+ return FALSE;
+ }
+}
diff --git a/kern/ipc_kobject.h b/kern/ipc_kobject.h
new file mode 100644
index 00000000..91eb30f6
--- /dev/null
+++ b/kern/ipc_kobject.h
@@ -0,0 +1,118 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: kern/ipc_kobject.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations for letting a port represent a kernel object.
+ */
+
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_types.h>
+
+#ifndef _KERN_IPC_KOBJECT_H_
+#define _KERN_IPC_KOBJECT_H_
+
+#include <mach/machine/vm_types.h>
+
+typedef vm_offset_t ipc_kobject_t;
+
+#define IKO_NULL ((ipc_kobject_t) 0)
+
+typedef unsigned int ipc_kobject_type_t;
+
+#define IKOT_NONE 0
+#define IKOT_THREAD 1
+#define IKOT_TASK 2
+#define IKOT_HOST 3
+#define IKOT_HOST_PRIV 4
+#define IKOT_PROCESSOR 5
+#define IKOT_PSET 6
+#define IKOT_PSET_NAME 7
+#define IKOT_PAGER 8
+#define IKOT_PAGING_REQUEST 9
+#define IKOT_DEVICE 10
+#define IKOT_XMM_OBJECT 11
+#define IKOT_XMM_PAGER 12
+#define IKOT_XMM_KERNEL 13
+#define IKOT_XMM_REPLY 14
+#define IKOT_PAGER_TERMINATING 15
+#define IKOT_PAGING_NAME 16
+#define IKOT_HOST_SECURITY 17
+#define IKOT_LEDGER 18
+#define IKOT_MASTER_DEVICE 19
+#define IKOT_ACT 20
+#define IKOT_SUBSYSTEM 21
+#define IKOT_IO_DONE_QUEUE 22
+#define IKOT_SEMAPHORE 23
+#define IKOT_LOCK_SET 24
+#define IKOT_CLOCK 25
+#define IKOT_CLOCK_CTRL 26
+ /* << new entries here */
+#define IKOT_UNKNOWN 27 /* magic catchall */
+#define IKOT_MAX_TYPE 28 /* # of IKOT_ types */
+ /* Please keep ipc/ipc_object.c:ikot_print_array up to date */
+
+#define is_ipc_kobject(ikot) (ikot != IKOT_NONE)
+
+/*
+ * Define types of kernel objects that use page lists instead
+ * of entry lists for copyin of out of line memory.
+ */
+
+#define ipc_kobject_vm_page_list(ikot) \
+ ((ikot == IKOT_PAGING_REQUEST) || (ikot == IKOT_DEVICE))
+
+#define ipc_kobject_vm_page_steal(ikot) (ikot == IKOT_PAGING_REQUEST)
+
+/* Initialize kernel server dispatch table */
+/* XXX
+extern void mig_init(void);
+*/
+
+/* Dispatch a kernel server function */
+extern ipc_kmsg_t ipc_kobject_server(
+ ipc_kmsg_t request);
+
+/* Make a port represent a kernel object of the given type */
+extern void ipc_kobject_set(
+ ipc_port_t port,
+ ipc_kobject_t kobject,
+ ipc_kobject_type_t type);
+
+/* Release any kernel object resources associated with a port */
+extern void ipc_kobject_destroy(
+ ipc_port_t port);
+
+#define null_conversion(port) (port)
+
+#endif /* _KERN_IPC_KOBJECT_H_ */
diff --git a/kern/ipc_mig.c b/kern/ipc_mig.c
new file mode 100644
index 00000000..ed5df1fe
--- /dev/null
+++ b/kern/ipc_mig.c
@@ -0,0 +1,1134 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <norma_vm.h>
+
+#include <mach/boolean.h>
+#include <mach/port.h>
+#include <mach/message.h>
+#include <mach/thread_status.h>
+#include <kern/ast.h>
+#include <kern/ipc_tt.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <kern/ipc_kobject.h>
+#include <vm/vm_map.h>
+#include <vm/vm_user.h>
+#include <ipc/port.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_thread.h>
+#include <device/device_types.h>
+
+
+/*
+ * Routine: mach_msg_send_from_kernel
+ * Purpose:
+ * Send a message from the kernel.
+ *
+ * This is used by the client side of KernelUser interfaces
+ * to implement SimpleRoutines. Currently, this includes
+ * device_reply and memory_object messages.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Sent the message.
+ * MACH_SEND_INVALID_DATA Bad destination port.
+ */
+
+mach_msg_return_t
+mach_msg_send_from_kernel(
+ mach_msg_header_t *msg,
+ mach_msg_size_t send_size)
+{
+ ipc_kmsg_t kmsg;
+ mach_msg_return_t mr;
+
+ if (!MACH_PORT_VALID(msg->msgh_remote_port))
+ return MACH_SEND_INVALID_DEST;
+
+ mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg);
+ if (mr != MACH_MSG_SUCCESS)
+ panic("mach_msg_send_from_kernel");
+
+ ipc_kmsg_copyin_from_kernel(kmsg);
+ ipc_mqueue_send_always(kmsg);
+
+ return MACH_MSG_SUCCESS;
+}
+
+mach_msg_return_t
+mach_msg_rpc_from_kernel(msg, send_size, reply_size)
+ mach_msg_header_t *msg;
+ mach_msg_size_t send_size;
+ mach_msg_size_t reply_size;
+{
+ panic("mach_msg_rpc_from_kernel"); /*XXX*/
+}
+
+#if NORMA_VM
+/*
+ * Routine: mach_msg_rpc_from_kernel
+ * Purpose:
+ * Send a message from the kernel and receive a reply.
+ * Uses ith_rpc_reply for the reply port.
+ *
+ * This is used by the client side of KernelUser interfaces
+ * to implement Routines.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Sent the message.
+ * MACH_RCV_PORT_DIED The reply port was deallocated.
+ */
+
+mach_msg_return_t
+mach_msg_rpc_from_kernel(
+ mach_msg_header_t *msg,
+ mach_msg_size_t send_size,
+ mach_msg_size_t rcv_size)
+{
+ ipc_thread_t self = current_thread();
+ ipc_port_t reply;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ assert(MACH_PORT_VALID(msg->msgh_remote_port));
+ assert(msg->msgh_local_port == MACH_PORT_NULL);
+
+ mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg);
+ if (mr != MACH_MSG_SUCCESS)
+ panic("mach_msg_rpc_from_kernel");
+
+ ipc_kmsg_copyin_from_kernel(kmsg);
+
+ ith_lock(self);
+ assert(self->ith_self != IP_NULL);
+
+ reply = self->ith_rpc_reply;
+ if (reply == IP_NULL) {
+ ith_unlock(self);
+ reply = ipc_port_alloc_reply();
+ ith_lock(self);
+ if ((reply == IP_NULL) ||
+ (self->ith_rpc_reply != IP_NULL))
+ panic("mach_msg_rpc_from_kernel");
+ self->ith_rpc_reply = reply;
+ }
+
+ /* insert send-once right for the reply port */
+ kmsg->ikm_header.msgh_local_port =
+ (mach_port_t) ipc_port_make_sonce(reply);
+
+ ipc_port_reference(reply);
+ ith_unlock(self);
+
+ ipc_mqueue_send_always(kmsg);
+
+ for (;;) {
+ ipc_mqueue_t mqueue;
+
+ ip_lock(reply);
+ if (!ip_active(reply)) {
+ ip_unlock(reply);
+ ipc_port_release(reply);
+ return MACH_RCV_PORT_DIED;
+ }
+
+ assert(reply->ip_pset == IPS_NULL);
+ mqueue = &reply->ip_messages;
+ imq_lock(mqueue);
+ ip_unlock(reply);
+
+ mr = ipc_mqueue_receive(mqueue, MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX,
+ MACH_MSG_TIMEOUT_NONE,
+ FALSE, IMQ_NULL_CONTINUE,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ if (mr == MACH_MSG_SUCCESS)
+ break;
+
+ assert((mr == MACH_RCV_INTERRUPTED) ||
+ (mr == MACH_RCV_PORT_DIED));
+
+ while (thread_should_halt(self)) {
+ /* don't terminate while holding a reference */
+ if (self->ast & AST_TERMINATE)
+ ipc_port_release(reply);
+ thread_halt_self();
+ }
+ }
+ ipc_port_release(reply);
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+
+ if (rcv_size < kmsg->ikm_header.msgh_size) {
+ ipc_kmsg_copyout_dest(kmsg, ipc_space_reply);
+ ipc_kmsg_put_to_kernel(msg, kmsg, kmsg->ikm_header.msgh_size);
+ return MACH_RCV_TOO_LARGE;
+ }
+
+ /*
+ * We want to preserve rights and memory in reply!
+ * We don't have to put them anywhere; just leave them
+ * as they are.
+ */
+
+ ipc_kmsg_copyout_to_kernel(kmsg, ipc_space_reply);
+ ipc_kmsg_put_to_kernel(msg, kmsg, kmsg->ikm_header.msgh_size);
+ return MACH_MSG_SUCCESS;
+}
+#endif NORMA_VM
+
+/*
+ * Routine: mach_msg_abort_rpc
+ * Purpose:
+ * Destroy the thread's ith_rpc_reply port.
+ * This will interrupt a mach_msg_rpc_from_kernel
+ * with a MACH_RCV_PORT_DIED return code.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+mach_msg_abort_rpc(thread)
+ ipc_thread_t thread;
+{
+ ipc_port_t reply = IP_NULL;
+
+ ith_lock(thread);
+ if (thread->ith_self != IP_NULL) {
+ reply = thread->ith_rpc_reply;
+ thread->ith_rpc_reply = IP_NULL;
+ }
+ ith_unlock(thread);
+
+ if (reply != IP_NULL)
+ ipc_port_dealloc_reply(reply);
+}
+
+/*
+ * Routine: mach_msg
+ * Purpose:
+ * Like mach_msg_trap except that message buffers
+ * live in kernel space. Doesn't handle any options.
+ *
+ * This is used by in-kernel server threads to make
+ * kernel calls, to receive request messages, and
+ * to send reply messages.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ */
+
+mach_msg_return_t
+mach_msg(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
+ mach_msg_header_t *msg;
+ mach_msg_option_t option;
+ mach_msg_size_t send_size;
+ mach_msg_size_t rcv_size;
+ mach_port_t rcv_name;
+ mach_msg_timeout_t time_out;
+ mach_port_t notify;
+{
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ if (option & MACH_SEND_MSG) {
+ mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg);
+ if (mr != MACH_MSG_SUCCESS)
+ panic("mach_msg");
+
+ mr = ipc_kmsg_copyin(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ ikm_free(kmsg);
+ return mr;
+ }
+
+ do
+ mr = ipc_mqueue_send(kmsg, MACH_MSG_OPTION_NONE,
+ MACH_MSG_TIMEOUT_NONE);
+ while (mr == MACH_SEND_INTERRUPTED);
+ assert(mr == MACH_MSG_SUCCESS);
+ }
+
+ if (option & MACH_RCV_MSG) {
+ do {
+ ipc_object_t object;
+ ipc_mqueue_t mqueue;
+
+ mr = ipc_mqueue_copyin(space, rcv_name,
+ &mqueue, &object);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+ /* hold ref for object; mqueue is locked */
+
+ mr = ipc_mqueue_receive(mqueue, MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX,
+ MACH_MSG_TIMEOUT_NONE,
+ FALSE, IMQ_NULL_CONTINUE,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ } while (mr == MACH_RCV_INTERRUPTED);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+
+ if (rcv_size < kmsg->ikm_header.msgh_size) {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ ipc_kmsg_put_to_kernel(msg, kmsg, sizeof *msg);
+ return MACH_RCV_TOO_LARGE;
+ }
+
+ mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
+ ipc_kmsg_put_to_kernel(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ } else {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ ipc_kmsg_put_to_kernel(msg, kmsg, sizeof *msg);
+ }
+
+ return mr;
+ }
+
+ ipc_kmsg_put_to_kernel(msg, kmsg, kmsg->ikm_header.msgh_size);
+ }
+
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: mig_get_reply_port
+ * Purpose:
+ * Called by client side interfaces living in the kernel
+ * to get a reply port. This port is used for
+ * mach_msg() calls which are kernel calls.
+ */
+
+mach_port_t
+mig_get_reply_port(void)
+{
+ ipc_thread_t self = current_thread();
+
+ if (self->ith_mig_reply == MACH_PORT_NULL)
+ self->ith_mig_reply = mach_reply_port();
+
+ return self->ith_mig_reply;
+}
+
+/*
+ * Routine: mig_dealloc_reply_port
+ * Purpose:
+ * Called by client side interfaces to get rid of a reply port.
+ * Shouldn't ever be called inside the kernel, because
+ * kernel calls shouldn't prompt Mig to call it.
+ */
+
+void
+mig_dealloc_reply_port(
+ mach_port_t reply_port)
+{
+ panic("mig_dealloc_reply_port");
+}
+
+/*
+ * Routine: mig_put_reply_port
+ * Purpose:
+ * Called by client side interfaces after each RPC to
+ * let the client recycle the reply port if it wishes.
+ */
+void
+mig_put_reply_port(
+ mach_port_t reply_port)
+{
+}
+
+/*
+ * mig_strncpy.c - by Joshua Block
+ *
+ * mig_strncp -- Bounded string copy. Does what the library routine strncpy
+ * OUGHT to do: Copies the (null terminated) string in src into dest, a
+ * buffer of length len. Assures that the copy is still null terminated
+ * and doesn't overflow the buffer, truncating the copy if necessary.
+ *
+ * Parameters:
+ *
+ * dest - Pointer to destination buffer.
+ *
+ * src - Pointer to source string.
+ *
+ * len - Length of destination buffer.
+ */
+void mig_strncpy(dest, src, len)
+char *dest, *src;
+int len;
+{
+ int i;
+
+ if (len <= 0)
+ return;
+
+ for (i=1; i<len; i++)
+ if (! (*dest++ = *src++))
+ return;
+
+ *dest = '\0';
+ return;
+}
+
+#define fast_send_right_lookup(name, port, abort) \
+MACRO_BEGIN \
+ register ipc_space_t space = current_space(); \
+ register ipc_entry_t entry; \
+ register mach_port_index_t index = MACH_PORT_INDEX(name); \
+ \
+ is_read_lock(space); \
+ assert(space->is_active); \
+ \
+ if ((index >= space->is_table_size) || \
+ (((entry = &space->is_table[index])->ie_bits & \
+ (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) != \
+ (MACH_PORT_GEN(name) | MACH_PORT_TYPE_SEND))) { \
+ is_read_unlock(space); \
+ abort; \
+ } \
+ \
+ port = (ipc_port_t) entry->ie_object; \
+ assert(port != IP_NULL); \
+ \
+ ip_lock(port); \
+ /* can safely unlock space now that port is locked */ \
+ is_read_unlock(space); \
+MACRO_END
+
+device_t
+port_name_to_device(name)
+ mach_port_t name;
+{
+ register ipc_port_t port;
+ register device_t device;
+
+ fast_send_right_lookup(name, port, goto abort);
+ /* port is locked */
+
+ /*
+ * Now map the port object to a device object.
+ * This is an inline version of dev_port_lookup().
+ */
+ if (ip_active(port) && (ip_kotype(port) == IKOT_DEVICE)) {
+ device = (device_t) port->ip_kobject;
+ device_reference(device);
+ ip_unlock(port);
+ return device;
+ }
+
+ ip_unlock(port);
+ return DEVICE_NULL;
+
+ /*
+ * The slow case. The port wasn't easily accessible.
+ */
+ abort: {
+ ipc_port_t kern_port;
+ kern_return_t kr;
+
+ kr = ipc_object_copyin(current_space(), name,
+ MACH_MSG_TYPE_COPY_SEND,
+ (ipc_object_t *) &kern_port);
+ if (kr != KERN_SUCCESS)
+ return DEVICE_NULL;
+
+ device = dev_port_lookup(kern_port);
+ if (IP_VALID(kern_port))
+ ipc_port_release_send(kern_port);
+ return device;
+ }
+}
+
+thread_t
+port_name_to_thread(name)
+ mach_port_t name;
+{
+ register ipc_port_t port;
+
+ fast_send_right_lookup(name, port, goto abort);
+ /* port is locked */
+
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_THREAD)) {
+ register thread_t thread;
+
+ thread = (thread_t) port->ip_kobject;
+ assert(thread != THREAD_NULL);
+
+ /* thread referencing is a bit complicated,
+ so don't bother to expand inline */
+ thread_reference(thread);
+ ip_unlock(port);
+
+ return thread;
+ }
+
+ ip_unlock(port);
+ return THREAD_NULL;
+
+ abort: {
+ thread_t thread;
+ ipc_port_t kern_port;
+ kern_return_t kr;
+
+ kr = ipc_object_copyin(current_space(), name,
+ MACH_MSG_TYPE_COPY_SEND,
+ (ipc_object_t *) &kern_port);
+ if (kr != KERN_SUCCESS)
+ return THREAD_NULL;
+
+ thread = convert_port_to_thread(kern_port);
+ if (IP_VALID(kern_port))
+ ipc_port_release_send(kern_port);
+
+ return thread;
+ }
+}
+
+task_t
+port_name_to_task(name)
+ mach_port_t name;
+{
+ register ipc_port_t port;
+
+ fast_send_right_lookup(name, port, goto abort);
+ /* port is locked */
+
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_TASK)) {
+ register task_t task;
+
+ task = (task_t) port->ip_kobject;
+ assert(task != TASK_NULL);
+
+ task_lock(task);
+ /* can safely unlock port now that task is locked */
+ ip_unlock(port);
+
+ task->ref_count++;
+ task_unlock(task);
+
+ return task;
+ }
+
+ ip_unlock(port);
+ return TASK_NULL;
+
+ abort: {
+ task_t task;
+ ipc_port_t kern_port;
+ kern_return_t kr;
+
+ kr = ipc_object_copyin(current_space(), name,
+ MACH_MSG_TYPE_COPY_SEND,
+ (ipc_object_t *) &kern_port);
+ if (kr != KERN_SUCCESS)
+ return TASK_NULL;
+
+ task = convert_port_to_task(kern_port);
+ if (IP_VALID(kern_port))
+ ipc_port_release_send(kern_port);
+
+ return task;
+ }
+}
+
+vm_map_t
+port_name_to_map(
+ mach_port_t name)
+{
+ register ipc_port_t port;
+
+ fast_send_right_lookup(name, port, goto abort);
+ /* port is locked */
+
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_TASK)) {
+ register vm_map_t map;
+
+ map = ((task_t) port->ip_kobject)->map;
+ assert(map != VM_MAP_NULL);
+
+ simple_lock(&map->ref_lock);
+ /* can safely unlock port now that map is locked */
+ ip_unlock(port);
+
+ map->ref_count++;
+ simple_unlock(&map->ref_lock);
+
+ return map;
+ }
+
+ ip_unlock(port);
+ return VM_MAP_NULL;
+
+ abort: {
+ vm_map_t map;
+ ipc_port_t kern_port;
+ kern_return_t kr;
+
+ kr = ipc_object_copyin(current_space(), name,
+ MACH_MSG_TYPE_COPY_SEND,
+ (ipc_object_t *) &kern_port);
+ if (kr != KERN_SUCCESS)
+ return VM_MAP_NULL;
+
+ map = convert_port_to_map(kern_port);
+ if (IP_VALID(kern_port))
+ ipc_port_release_send(kern_port);
+
+ return map;
+ }
+}
+
+ipc_space_t
+port_name_to_space(name)
+ mach_port_t name;
+{
+ register ipc_port_t port;
+
+ fast_send_right_lookup(name, port, goto abort);
+ /* port is locked */
+
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_TASK)) {
+ register ipc_space_t space;
+
+ space = ((task_t) port->ip_kobject)->itk_space;
+ assert(space != IS_NULL);
+
+ simple_lock(&space->is_ref_lock_data);
+ /* can safely unlock port now that space is locked */
+ ip_unlock(port);
+
+ space->is_references++;
+ simple_unlock(&space->is_ref_lock_data);
+
+ return space;
+ }
+
+ ip_unlock(port);
+ return IS_NULL;
+
+ abort: {
+ ipc_space_t space;
+ ipc_port_t kern_port;
+ kern_return_t kr;
+
+ kr = ipc_object_copyin(current_space(), name,
+ MACH_MSG_TYPE_COPY_SEND,
+ (ipc_object_t *) &kern_port);
+ if (kr != KERN_SUCCESS)
+ return IS_NULL;
+
+ space = convert_port_to_space(kern_port);
+ if (IP_VALID(kern_port))
+ ipc_port_release_send(kern_port);
+
+ return space;
+ }
+}
+
+/*
+ * Hack to translate a thread port to a thread pointer for calling
+ * thread_get_state and thread_set_state. This is only necessary
+ * because the IPC message for these two operations overflows the
+ * kernel stack.
+ *
+ * AARGH!
+ */
+
+kern_return_t thread_get_state_KERNEL(thread_port, flavor,
+ old_state, old_state_count)
+ mach_port_t thread_port; /* port right for thread */
+ int flavor;
+ thread_state_t old_state; /* pointer to OUT array */
+ natural_t *old_state_count; /* IN/OUT */
+{
+ thread_t thread;
+ kern_return_t result;
+
+ thread = port_name_to_thread(thread_port);
+ result = thread_get_state(thread, flavor, old_state, old_state_count);
+ thread_deallocate(thread);
+
+ return result;
+}
+
+kern_return_t thread_set_state_KERNEL(thread_port, flavor,
+ new_state, new_state_count)
+ mach_port_t thread_port; /* port right for thread */
+ int flavor;
+ thread_state_t new_state;
+ natural_t new_state_count;
+{
+ thread_t thread;
+ kern_return_t result;
+
+ thread = port_name_to_thread(thread_port);
+ result = thread_set_state(thread, flavor, new_state, new_state_count);
+ thread_deallocate(thread);
+
+ return result;
+}
+
+/*
+ * Things to keep in mind:
+ *
+ * The idea here is to duplicate the semantics of the true kernel RPC.
+ * The destination port/object should be checked first, before anything
+ * that the user might notice (like ipc_object_copyin). Return
+ * MACH_SEND_INTERRUPTED if it isn't correct, so that the user stub
+ * knows to fall back on an RPC. For other return values, it won't
+ * retry with an RPC. The retry might get a different (incorrect) rc.
+ * Return values are only set (and should only be set, with copyout)
+ * on successfull calls.
+ */
+
+kern_return_t
+syscall_vm_map(
+ mach_port_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ boolean_t anywhere,
+ mach_port_t memory_object,
+ vm_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ vm_map_t map;
+ ipc_port_t port;
+ vm_offset_t addr;
+ kern_return_t result;
+
+ map = port_name_to_map(target_map);
+ if (map == VM_MAP_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ if (MACH_PORT_VALID(memory_object)) {
+ result = ipc_object_copyin(current_space(), memory_object,
+ MACH_MSG_TYPE_COPY_SEND,
+ (ipc_object_t *) &port);
+ if (result != KERN_SUCCESS) {
+ vm_map_deallocate(map);
+ return result;
+ }
+ } else
+ port = (ipc_port_t) memory_object;
+
+ copyin((char *)address, (char *)&addr, sizeof(vm_offset_t));
+ result = vm_map(map, &addr, size, mask, anywhere,
+ port, offset, copy,
+ cur_protection, max_protection, inheritance);
+ if (result == KERN_SUCCESS)
+ copyout((char *)&addr, (char *)address, sizeof(vm_offset_t));
+ if (IP_VALID(port))
+ ipc_port_release_send(port);
+ vm_map_deallocate(map);
+
+ return result;
+}
+
+kern_return_t syscall_vm_allocate(target_map, address, size, anywhere)
+ mach_port_t target_map;
+ vm_offset_t *address;
+ vm_size_t size;
+ boolean_t anywhere;
+{
+ vm_map_t map;
+ vm_offset_t addr;
+ kern_return_t result;
+
+ map = port_name_to_map(target_map);
+ if (map == VM_MAP_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ copyin((char *)address, (char *)&addr, sizeof(vm_offset_t));
+ result = vm_allocate(map, &addr, size, anywhere);
+ if (result == KERN_SUCCESS)
+ copyout((char *)&addr, (char *)address, sizeof(vm_offset_t));
+ vm_map_deallocate(map);
+
+ return result;
+}
+
+kern_return_t syscall_vm_deallocate(target_map, start, size)
+ mach_port_t target_map;
+ vm_offset_t start;
+ vm_size_t size;
+{
+ vm_map_t map;
+ kern_return_t result;
+
+ map = port_name_to_map(target_map);
+ if (map == VM_MAP_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ result = vm_deallocate(map, start, size);
+ vm_map_deallocate(map);
+
+ return result;
+}
+
+kern_return_t syscall_task_create(parent_task, inherit_memory, child_task)
+ mach_port_t parent_task;
+ boolean_t inherit_memory;
+ mach_port_t *child_task; /* OUT */
+{
+ task_t t, c;
+ ipc_port_t port;
+ mach_port_t name;
+ kern_return_t result;
+
+ t = port_name_to_task(parent_task);
+ if (t == TASK_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ result = task_create(t, inherit_memory, &c);
+ if (result == KERN_SUCCESS) {
+ port = (ipc_port_t) convert_task_to_port(c);
+ /* always returns a name, even for non-success return codes */
+ (void) ipc_kmsg_copyout_object(current_space(),
+ (ipc_object_t) port,
+ MACH_MSG_TYPE_PORT_SEND, &name);
+ copyout((char *)&name, (char *)child_task,
+ sizeof(mach_port_t));
+ }
+ task_deallocate(t);
+
+ return result;
+}
+
+kern_return_t syscall_task_terminate(task)
+ mach_port_t task;
+{
+ task_t t;
+ kern_return_t result;
+
+ t = port_name_to_task(task);
+ if (t == TASK_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ result = task_terminate(t);
+ task_deallocate(t);
+
+ return result;
+}
+
+kern_return_t syscall_task_suspend(task)
+ mach_port_t task;
+{
+ task_t t;
+ kern_return_t result;
+
+ t = port_name_to_task(task);
+ if (t == TASK_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ result = task_suspend(t);
+ task_deallocate(t);
+
+ return result;
+}
+
+kern_return_t syscall_task_set_special_port(task, which_port, port_name)
+ mach_port_t task;
+ int which_port;
+ mach_port_t port_name;
+{
+ task_t t;
+ ipc_port_t port;
+ kern_return_t result;
+
+ t = port_name_to_task(task);
+ if (t == TASK_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ if (MACH_PORT_VALID(port_name)) {
+ result = ipc_object_copyin(current_space(), port_name,
+ MACH_MSG_TYPE_COPY_SEND,
+ (ipc_object_t *) &port);
+ if (result != KERN_SUCCESS) {
+ task_deallocate(t);
+ return result;
+ }
+ } else
+ port = (ipc_port_t) port_name;
+
+ result = task_set_special_port(t, which_port, port);
+ if ((result != KERN_SUCCESS) && IP_VALID(port))
+ ipc_port_release_send(port);
+ task_deallocate(t);
+
+ return result;
+}
+
+kern_return_t
+syscall_mach_port_allocate(task, right, namep)
+ mach_port_t task;
+ mach_port_right_t right;
+ mach_port_t *namep;
+{
+ ipc_space_t space;
+ mach_port_t name;
+ kern_return_t kr;
+
+ space = port_name_to_space(task);
+ if (space == IS_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ kr = mach_port_allocate(space, right, &name);
+ if (kr == KERN_SUCCESS)
+ copyout((char *)&name, (char *)namep, sizeof(mach_port_t));
+ is_release(space);
+
+ return kr;
+}
+
+kern_return_t
+syscall_mach_port_allocate_name(task, right, name)
+ mach_port_t task;
+ mach_port_right_t right;
+ mach_port_t name;
+{
+ ipc_space_t space;
+ kern_return_t kr;
+
+ space = port_name_to_space(task);
+ if (space == IS_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ kr = mach_port_allocate_name(space, right, name);
+ is_release(space);
+
+ return kr;
+}
+
+kern_return_t
+syscall_mach_port_deallocate(task, name)
+ mach_port_t task;
+ mach_port_t name;
+{
+ ipc_space_t space;
+ kern_return_t kr;
+
+ space = port_name_to_space(task);
+ if (space == IS_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ kr = mach_port_deallocate(space, name);
+ is_release(space);
+
+ return kr;
+}
+
+kern_return_t
+syscall_mach_port_insert_right(task, name, right, rightType)
+ mach_port_t task;
+ mach_port_t name;
+ mach_port_t right;
+ mach_msg_type_name_t rightType;
+{
+ ipc_space_t space;
+ ipc_object_t object;
+ mach_msg_type_name_t newtype;
+ kern_return_t kr;
+
+ space = port_name_to_space(task);
+ if (space == IS_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ if (!MACH_MSG_TYPE_PORT_ANY(rightType)) {
+ is_release(space);
+ return KERN_INVALID_VALUE;
+ }
+
+ if (MACH_PORT_VALID(right)) {
+ kr = ipc_object_copyin(current_space(), right, rightType,
+ &object);
+ if (kr != KERN_SUCCESS) {
+ is_release(space);
+ return kr;
+ }
+ } else
+ object = (ipc_object_t) right;
+ newtype = ipc_object_copyin_type(rightType);
+
+ kr = mach_port_insert_right(space, name, (ipc_port_t) object, newtype);
+ if ((kr != KERN_SUCCESS) && IO_VALID(object))
+ ipc_object_destroy(object, newtype);
+ is_release(space);
+
+ return kr;
+}
+
+kern_return_t syscall_thread_depress_abort(thread)
+ mach_port_t thread;
+{
+ thread_t t;
+ kern_return_t result;
+
+ t = port_name_to_thread(thread);
+ if (t == THREAD_NULL)
+ return MACH_SEND_INTERRUPTED;
+
+ result = thread_depress_abort(t);
+ thread_deallocate(t);
+
+ return result;
+}
+
+/*
+ * Device traps -- these are way experimental.
+ */
+
+extern io_return_t ds_device_write_trap();
+extern io_return_t ds_device_writev_trap();
+
+io_return_t
+syscall_device_write_request(mach_port_t device_name,
+ mach_port_t reply_name,
+ dev_mode_t mode,
+ recnum_t recnum,
+ vm_offset_t data,
+ vm_size_t data_count)
+{
+ device_t dev;
+ ipc_port_t reply_port;
+ io_return_t res;
+
+ /*
+ * First try to translate the device name.
+ *
+ * If this fails, return KERN_INVALID_CAPABILITY.
+ * Caller knows that this most likely means that
+ * device is not local to node and IPC should be used.
+ *
+ * If kernel doesn't do device traps, kern_invalid()
+ * will be called instead of this function which will
+ * return KERN_INVALID_ARGUMENT.
+ */
+ dev = port_name_to_device(device_name);
+ if (dev == DEVICE_NULL)
+ return KERN_INVALID_CAPABILITY;
+
+ /*
+ * Translate reply port.
+ */
+ if (reply_name == MACH_PORT_NULL)
+ reply_port = IP_NULL;
+ else {
+ /* Homey don't play that. */
+ device_deallocate(dev);
+ return KERN_INVALID_RIGHT;
+ }
+
+ /* note: doesn't take reply_port arg yet. */
+ res = ds_device_write_trap(dev, /*reply_port,*/
+ mode, recnum,
+ data, data_count);
+
+ /*
+ * Give up reference from port_name_to_device.
+ */
+ device_deallocate(dev);
+ return res;
+}
+
+io_return_t
+syscall_device_writev_request(mach_port_t device_name,
+ mach_port_t reply_name,
+ dev_mode_t mode,
+ recnum_t recnum,
+ io_buf_vec_t *iovec,
+ vm_size_t iocount)
+{
+ device_t dev;
+ ipc_port_t reply_port;
+ io_return_t res;
+
+ /*
+ * First try to translate the device name.
+ *
+ * If this fails, return KERN_INVALID_CAPABILITY.
+ * Caller knows that this most likely means that
+ * device is not local to node and IPC should be used.
+ *
+ * If kernel doesn't do device traps, kern_invalid()
+ * will be called instead of this function which will
+ * return KERN_INVALID_ARGUMENT.
+ */
+ dev = port_name_to_device(device_name);
+ if (dev == DEVICE_NULL)
+ return KERN_INVALID_CAPABILITY;
+
+ /*
+ * Translate reply port.
+ */
+ if (reply_name == MACH_PORT_NULL)
+ reply_port = IP_NULL;
+ else {
+ /* Homey don't play that. */
+ device_deallocate(dev);
+ return KERN_INVALID_RIGHT;
+ }
+
+ /* note: doesn't take reply_port arg yet. */
+ res = ds_device_writev_trap(dev, /*reply_port,*/
+ mode, recnum,
+ iovec, iocount);
+
+ /*
+ * Give up reference from port_name_to_device.
+ */
+ device_deallocate(dev);
+ return res;
+}
+
+
diff --git a/kern/ipc_sched.c b/kern/ipc_sched.c
new file mode 100644
index 00000000..a2f4c356
--- /dev/null
+++ b/kern/ipc_sched.c
@@ -0,0 +1,287 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993, 1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <cpus.h>
+#include <mach_host.h>
+
+#include <mach/message.h>
+#include <kern/counters.h>
+#include "cpu_number.h"
+#include <kern/lock.h>
+#include <kern/thread.h>
+#include <kern/sched_prim.h>
+#include <kern/processor.h>
+#include <kern/time_out.h>
+#include <kern/thread_swap.h>
+#include <kern/ipc_sched.h>
+#include <machine/machspl.h> /* for splsched/splx */
+#include <machine/pmap.h>
+
+
+
+/*
+ * These functions really belong in kern/sched_prim.c.
+ */
+
+/*
+ * Routine: thread_go
+ * Purpose:
+ * Start a thread running.
+ * Conditions:
+ * IPC locks may be held.
+ */
+
+void
+thread_go(
+ thread_t thread)
+{
+ int state;
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+
+ reset_timeout_check(&thread->timer);
+
+ state = thread->state;
+ switch (state & TH_SCHED_STATE) {
+
+ case TH_WAIT | TH_SUSP | TH_UNINT:
+ case TH_WAIT | TH_UNINT:
+ case TH_WAIT:
+ /*
+ * Sleeping and not suspendable - put
+ * on run queue.
+ */
+ thread->state = (state &~ TH_WAIT) | TH_RUN;
+ thread->wait_result = THREAD_AWAKENED;
+ thread_setrun(thread, TRUE);
+ break;
+
+ case TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT:
+ case TH_RUN | TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT | TH_UNINT:
+ case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
+ /*
+ * Either already running, or suspended.
+ */
+ thread->state = state & ~TH_WAIT;
+ thread->wait_result = THREAD_AWAKENED;
+ break;
+
+ default:
+ /*
+ * Not waiting.
+ */
+ break;
+ }
+
+ thread_unlock(thread);
+ splx(s);
+}
+
+/*
+ * Routine: thread_will_wait
+ * Purpose:
+ * Assert that the thread intends to block.
+ */
+
+void
+thread_will_wait(
+ thread_t thread)
+{
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+
+ assert(thread->wait_result = -1); /* for later assertions */
+ thread->state |= TH_WAIT;
+
+ thread_unlock(thread);
+ splx(s);
+}
+
+/*
+ * Routine: thread_will_wait_with_timeout
+ * Purpose:
+ * Assert that the thread intends to block,
+ * with a timeout.
+ */
+
+void
+thread_will_wait_with_timeout(
+ thread_t thread,
+ mach_msg_timeout_t msecs)
+{
+ natural_t ticks = convert_ipc_timeout_to_ticks(msecs);
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+
+ assert(thread->wait_result = -1); /* for later assertions */
+ thread->state |= TH_WAIT;
+
+ set_timeout(&thread->timer, ticks);
+
+ thread_unlock(thread);
+ splx(s);
+}
+
+#if MACH_HOST
+#define check_processor_set(thread) \
+ (current_processor()->processor_set == (thread)->processor_set)
+#else /* MACH_HOST */
+#define check_processor_set(thread) TRUE
+#endif /* MACH_HOST */
+
+#if NCPUS > 1
+#define check_bound_processor(thread) \
+ ((thread)->bound_processor == PROCESSOR_NULL || \
+ (thread)->bound_processor == current_processor())
+#else /* NCPUS > 1 */
+#define check_bound_processor(thread) TRUE
+#endif /* NCPUS > 1 */
+
+#ifdef CONTINUATIONS
+/*
+ * Routine: thread_handoff
+ * Purpose:
+ * Switch to a new thread (new), leaving the current
+ * thread (old) blocked. If successful, moves the
+ * kernel stack from old to new and returns as the
+ * new thread. An explicit continuation for the old thread
+ * must be supplied.
+ *
+ * NOTE: Although we wakeup new, we don't set new->wait_result.
+ * Returns:
+ * TRUE if the handoff happened.
+ */
+
+boolean_t
+thread_handoff(
+ register thread_t old,
+ register continuation_t continuation,
+ register thread_t new)
+{
+ spl_t s;
+
+ assert(current_thread() == old);
+
+ /*
+ * XXX Dubious things here:
+ * I don't check the idle_count on the processor set.
+ * No scheduling priority or policy checks.
+ * I assume the new thread is interruptible.
+ */
+
+ s = splsched();
+ thread_lock(new);
+
+ /*
+ * The first thing we must do is check the state
+ * of the threads, to ensure we can handoff.
+ * This check uses current_processor()->processor_set,
+ * which we can read without locking.
+ */
+
+ if ((old->stack_privilege == current_stack()) ||
+ (new->state != (TH_WAIT|TH_SWAPPED)) ||
+ !check_processor_set(new) ||
+ !check_bound_processor(new)) {
+ thread_unlock(new);
+ (void) splx(s);
+
+ counter_always(c_thread_handoff_misses++);
+ return FALSE;
+ }
+
+ reset_timeout_check(&new->timer);
+
+ new->state = TH_RUN;
+ thread_unlock(new);
+
+#if NCPUS > 1
+ new->last_processor = current_processor();
+#endif /* NCPUS > 1 */
+
+ ast_context(new, cpu_number());
+ timer_switch(&new->system_timer);
+
+ /*
+ * stack_handoff is machine-dependent. It does the
+ * machine-dependent components of a context-switch, like
+ * changing address spaces. It updates active_threads.
+ */
+
+ stack_handoff(old, new);
+
+ /*
+ * Now we must dispose of the old thread.
+ * This is like thread_continue, except
+ * that the old thread isn't waiting yet.
+ */
+
+ thread_lock(old);
+ old->swap_func = continuation;
+ assert(old->wait_result = -1); /* for later assertions */
+
+ if (old->state == TH_RUN) {
+ /*
+ * This is our fast path.
+ */
+
+ old->state = TH_WAIT|TH_SWAPPED;
+ }
+ else if (old->state == (TH_RUN|TH_SUSP)) {
+ /*
+ * Somebody is trying to suspend the thread.
+ */
+
+ old->state = TH_WAIT|TH_SUSP|TH_SWAPPED;
+ if (old->wake_active) {
+ /*
+ * Someone wants to know when the thread
+ * really stops.
+ */
+ old->wake_active = FALSE;
+ thread_unlock(old);
+ thread_wakeup((event_t)&old->wake_active);
+ goto after_old_thread;
+ }
+ } else
+ panic("thread_handoff");
+
+ thread_unlock(old);
+ after_old_thread:
+ (void) splx(s);
+
+ counter_always(c_thread_handoff_hits++);
+ return TRUE;
+}
+#endif /* CONTINUATIONS */
diff --git a/kern/ipc_sched.h b/kern/ipc_sched.h
new file mode 100644
index 00000000..bdee832a
--- /dev/null
+++ b/kern/ipc_sched.h
@@ -0,0 +1,32 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_IPC_SCHED_H_
+#define _KERN_IPC_SCHED_H_
+
+#include <kern/sched_prim.h>
+
+#endif /* _KERN_IPC_SCHED_H_ */
diff --git a/kern/ipc_tt.c b/kern/ipc_tt.c
new file mode 100644
index 00000000..b2e02d8c
--- /dev/null
+++ b/kern/ipc_tt.c
@@ -0,0 +1,1398 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc_tt.c
+ * Purpose:
+ * Task and thread related IPC functions.
+ */
+
+#include <mach_ipc_compat.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/mach_param.h>
+#include <mach/task_special_ports.h>
+#include <mach/thread_special_ports.h>
+#include <vm/vm_kern.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/ipc_kobject.h>
+#include <kern/ipc_tt.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_right.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_object.h>
+
+
+
+/*
+ * Routine: ipc_task_init
+ * Purpose:
+ * Initialize a task's IPC state.
+ *
+ * If non-null, some state will be inherited from the parent.
+ * The parent must be appropriately initialized.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_task_init(
+ task_t task,
+ task_t parent)
+{
+ ipc_space_t space;
+ ipc_port_t kport;
+ kern_return_t kr;
+ int i;
+
+
+ kr = ipc_space_create(&ipc_table_entries[0], &space);
+ if (kr != KERN_SUCCESS)
+ panic("ipc_task_init");
+
+
+ kport = ipc_port_alloc_kernel();
+ if (kport == IP_NULL)
+ panic("ipc_task_init");
+
+ itk_lock_init(task);
+ task->itk_self = kport;
+ task->itk_sself = ipc_port_make_send(kport);
+ task->itk_space = space;
+
+ if (parent == TASK_NULL) {
+ task->itk_exception = IP_NULL;
+ task->itk_bootstrap = IP_NULL;
+ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
+ task->itk_registered[i] = IP_NULL;
+ } else {
+ itk_lock(parent);
+ assert(parent->itk_self != IP_NULL);
+
+ /* inherit registered ports */
+
+ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
+ task->itk_registered[i] =
+ ipc_port_copy_send(parent->itk_registered[i]);
+
+ /* inherit exception and bootstrap ports */
+
+ task->itk_exception =
+ ipc_port_copy_send(parent->itk_exception);
+ task->itk_bootstrap =
+ ipc_port_copy_send(parent->itk_bootstrap);
+
+ itk_unlock(parent);
+ }
+}
+
+/*
+ * Routine: ipc_task_enable
+ * Purpose:
+ * Enable a task for IPC access.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_task_enable(
+ task_t task)
+{
+ ipc_port_t kport;
+
+ itk_lock(task);
+ kport = task->itk_self;
+ if (kport != IP_NULL)
+ ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
+ itk_unlock(task);
+}
+
+/*
+ * Routine: ipc_task_disable
+ * Purpose:
+ * Disable IPC access to a task.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_task_disable(
+ task_t task)
+{
+ ipc_port_t kport;
+
+ itk_lock(task);
+ kport = task->itk_self;
+ if (kport != IP_NULL)
+ ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
+ itk_unlock(task);
+}
+
+/*
+ * Routine: ipc_task_terminate
+ * Purpose:
+ * Clean up and destroy a task's IPC state.
+ * Conditions:
+ * Nothing locked. The task must be suspended.
+ * (Or the current thread must be in the task.)
+ */
+
+void
+ipc_task_terminate(
+ task_t task)
+{
+ ipc_port_t kport;
+ int i;
+
+ itk_lock(task);
+ kport = task->itk_self;
+
+ if (kport == IP_NULL) {
+ /* the task is already terminated (can this happen?) */
+ itk_unlock(task);
+ return;
+ }
+
+ task->itk_self = IP_NULL;
+ itk_unlock(task);
+
+ /* release the naked send rights */
+
+ if (IP_VALID(task->itk_sself))
+ ipc_port_release_send(task->itk_sself);
+ if (IP_VALID(task->itk_exception))
+ ipc_port_release_send(task->itk_exception);
+ if (IP_VALID(task->itk_bootstrap))
+ ipc_port_release_send(task->itk_bootstrap);
+
+ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
+ if (IP_VALID(task->itk_registered[i]))
+ ipc_port_release_send(task->itk_registered[i]);
+
+ /* destroy the space, leaving just a reference for it */
+
+ ipc_space_destroy(task->itk_space);
+
+ /* destroy the kernel port */
+
+ ipc_port_dealloc_kernel(kport);
+}
+
+/*
+ * Routine: ipc_thread_init
+ * Purpose:
+ * Initialize a thread's IPC state.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_thread_init(thread)
+ thread_t thread;
+{
+ ipc_port_t kport;
+
+ kport = ipc_port_alloc_kernel();
+ if (kport == IP_NULL)
+ panic("ipc_thread_init");
+
+ ipc_thread_links_init(thread);
+ ipc_kmsg_queue_init(&thread->ith_messages);
+
+ ith_lock_init(thread);
+ thread->ith_self = kport;
+ thread->ith_sself = ipc_port_make_send(kport);
+ thread->ith_exception = IP_NULL;
+
+ thread->ith_mig_reply = MACH_PORT_NULL;
+ thread->ith_rpc_reply = IP_NULL;
+
+#if MACH_IPC_COMPAT
+ {
+ ipc_space_t space = thread->task->itk_space;
+ ipc_port_t port;
+ mach_port_t name;
+ kern_return_t kr;
+
+ kr = ipc_port_alloc_compat(space, &name, &port);
+ if (kr != KERN_SUCCESS)
+ panic("ipc_thread_init");
+ /* port is locked and active */
+
+ /*
+ * Now we have a reply port. We need to make a naked
+ * send right to stash in ith_reply. We can't use
+ * ipc_port_make_send, because we can't unlock the port
+ * before making the right. Also we don't want to
+ * increment ip_mscount. The net effect of all this
+ * is the same as doing
+ * ipc_port_alloc_kernel get the port
+ * ipc_port_make_send make the send right
+ * ipc_object_copyin_from_kernel grab receive right
+ * ipc_object_copyout_compat and give to user
+ */
+
+ port->ip_srights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ thread->ith_reply = port;
+ }
+#endif MACH_IPC_COMPAT
+}
+
+/*
+ * Routine: ipc_thread_enable
+ * Purpose:
+ * Enable a thread for IPC access.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_thread_enable(thread)
+ thread_t thread;
+{
+ ipc_port_t kport;
+
+ ith_lock(thread);
+ kport = thread->ith_self;
+ if (kport != IP_NULL)
+ ipc_kobject_set(kport, (ipc_kobject_t) thread, IKOT_THREAD);
+ ith_unlock(thread);
+}
+
+/*
+ * Routine: ipc_thread_disable
+ * Purpose:
+ * Disable IPC access to a thread.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_thread_disable(thread)
+ thread_t thread;
+{
+ ipc_port_t kport;
+
+ ith_lock(thread);
+ kport = thread->ith_self;
+ if (kport != IP_NULL)
+ ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
+ ith_unlock(thread);
+}
+
+/*
+ * Routine: ipc_thread_terminate
+ * Purpose:
+ * Clean up and destroy a thread's IPC state.
+ * Conditions:
+ * Nothing locked. The thread must be suspended.
+ * (Or be the current thread.)
+ */
+
+void
+ipc_thread_terminate(thread)
+ thread_t thread;
+{
+ ipc_port_t kport;
+
+ ith_lock(thread);
+ kport = thread->ith_self;
+
+ if (kport == IP_NULL) {
+ /* the thread is already terminated (can this happen?) */
+ ith_unlock(thread);
+ return;
+ }
+
+ thread->ith_self = IP_NULL;
+ ith_unlock(thread);
+
+ assert(ipc_kmsg_queue_empty(&thread->ith_messages));
+
+ /* release the naked send rights */
+
+ if (IP_VALID(thread->ith_sself))
+ ipc_port_release_send(thread->ith_sself);
+ if (IP_VALID(thread->ith_exception))
+ ipc_port_release_send(thread->ith_exception);
+
+#if MACH_IPC_COMPAT
+ if (IP_VALID(thread->ith_reply)) {
+ ipc_space_t space = thread->task->itk_space;
+ ipc_port_t port = thread->ith_reply;
+ ipc_entry_t entry;
+ mach_port_t name;
+
+ /* destroy any rights the task may have for the port */
+
+ is_write_lock(space);
+ if (space->is_active &&
+ ipc_right_reverse(space, (ipc_object_t) port,
+ &name, &entry)) {
+ /* reply port is locked and active */
+ ip_unlock(port);
+
+ (void) ipc_right_destroy(space, name, entry);
+ /* space is unlocked */
+ } else
+ is_write_unlock(space);
+
+ ipc_port_release_send(port);
+ }
+
+ /*
+ * Note we do *not* destroy any rights the space may have
+ * for the thread's kernel port. The old IPC code did this,
+ * to avoid generating a notification when the port is
+ * destroyed. However, this isn't a good idea when
+ * the kernel port is interposed, because then it doesn't
+ * happen, exposing the interposition to the task.
+ * Because we don't need the efficiency hack, I flushed
+ * this behaviour, introducing a small incompatibility
+ * with the old IPC code.
+ */
+#endif MACH_IPC_COMPAT
+
+ /* destroy the kernel port */
+
+ ipc_port_dealloc_kernel(kport);
+}
+
+#if 0
+/*
+ * Routine: retrieve_task_self
+ * Purpose:
+ * Return a send right (possibly null/dead)
+ * for the task's user-visible self port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+retrieve_task_self(task)
+ task_t task;
+{
+ ipc_port_t port;
+
+ assert(task != TASK_NULL);
+
+ itk_lock(task);
+ if (task->itk_self != IP_NULL)
+ port = ipc_port_copy_send(task->itk_sself);
+ else
+ port = IP_NULL;
+ itk_unlock(task);
+
+ return port;
+}
+
+/*
+ * Routine: retrieve_thread_self
+ * Purpose:
+ * Return a send right (possibly null/dead)
+ * for the thread's user-visible self port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+retrieve_thread_self(thread)
+ thread_t thread;
+{
+ ipc_port_t port;
+
+ assert(thread != ITH_NULL);
+
+ ith_lock(thread);
+ if (thread->ith_self != IP_NULL)
+ port = ipc_port_copy_send(thread->ith_sself);
+ else
+ port = IP_NULL;
+ ith_unlock(thread);
+
+ return port;
+}
+#endif 0
+
+/*
+ * Routine: retrieve_task_self_fast
+ * Purpose:
+ * Optimized version of retrieve_task_self,
+ * that only works for the current task.
+ *
+ * Return a send right (possibly null/dead)
+ * for the task's user-visible self port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+retrieve_task_self_fast(
+ register task_t task)
+{
+ register ipc_port_t port;
+
+ assert(task == current_task());
+
+ itk_lock(task);
+ assert(task->itk_self != IP_NULL);
+
+ if ((port = task->itk_sself) == task->itk_self) {
+ /* no interposing */
+
+ ip_lock(port);
+ assert(ip_active(port));
+ ip_reference(port);
+ port->ip_srights++;
+ ip_unlock(port);
+ } else
+ port = ipc_port_copy_send(port);
+ itk_unlock(task);
+
+ return port;
+}
+
+/*
+ * Routine: retrieve_thread_self_fast
+ * Purpose:
+ * Optimized version of retrieve_thread_self,
+ * that only works for the current thread.
+ *
+ * Return a send right (possibly null/dead)
+ * for the thread's user-visible self port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+retrieve_thread_self_fast(thread)
+ register thread_t thread;
+{
+ register ipc_port_t port;
+
+ assert(thread == current_thread());
+
+ ith_lock(thread);
+ assert(thread->ith_self != IP_NULL);
+
+ if ((port = thread->ith_sself) == thread->ith_self) {
+ /* no interposing */
+
+ ip_lock(port);
+ assert(ip_active(port));
+ ip_reference(port);
+ port->ip_srights++;
+ ip_unlock(port);
+ } else
+ port = ipc_port_copy_send(port);
+ ith_unlock(thread);
+
+ return port;
+}
+
+#if 0
+/*
+ * Routine: retrieve_task_exception
+ * Purpose:
+ * Return a send right (possibly null/dead)
+ * for the task's exception port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+retrieve_task_exception(task)
+ task_t task;
+{
+ ipc_port_t port;
+
+ assert(task != TASK_NULL);
+
+ itk_lock(task);
+ if (task->itk_self != IP_NULL)
+ port = ipc_port_copy_send(task->itk_exception);
+ else
+ port = IP_NULL;
+ itk_unlock(task);
+
+ return port;
+}
+
+/*
+ * Routine: retrieve_thread_exception
+ * Purpose:
+ * Return a send right (possibly null/dead)
+ * for the thread's exception port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+retrieve_thread_exception(thread)
+ thread_t thread;
+{
+ ipc_port_t port;
+
+ assert(thread != ITH_NULL);
+
+ ith_lock(thread);
+ if (thread->ith_self != IP_NULL)
+ port = ipc_port_copy_send(thread->ith_exception);
+ else
+ port = IP_NULL;
+ ith_unlock(thread);
+
+ return port;
+}
+#endif 0
+
+/*
+ * Routine: mach_task_self [mach trap]
+ * Purpose:
+ * Give the caller send rights for his own task port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_PORT_NULL if there are any resource failures
+ * or other errors.
+ */
+
+mach_port_t
+mach_task_self(void)
+{
+ task_t task = current_task();
+ ipc_port_t sright;
+
+ sright = retrieve_task_self_fast(task);
+ return ipc_port_copyout_send(sright, task->itk_space);
+}
+
+/*
+ * Routine: mach_thread_self [mach trap]
+ * Purpose:
+ * Give the caller send rights for his own thread port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_PORT_NULL if there are any resource failures
+ * or other errors.
+ */
+
+mach_port_t
+mach_thread_self()
+{
+ thread_t thread = current_thread();
+ task_t task = thread->task;
+ ipc_port_t sright;
+
+ sright = retrieve_thread_self_fast(thread);
+ return ipc_port_copyout_send(sright, task->itk_space);
+}
+
+/*
+ * Routine: mach_reply_port [mach trap]
+ * Purpose:
+ * Allocate a port for the caller.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_PORT_NULL if there are any resource failures
+ * or other errors.
+ */
+
+mach_port_t
+mach_reply_port(void)
+{
+ ipc_port_t port;
+ mach_port_t name;
+ kern_return_t kr;
+
+ kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
+ if (kr == KERN_SUCCESS)
+ ip_unlock(port);
+ else
+ name = MACH_PORT_NULL;
+
+ return name;
+}
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: retrieve_task_notify
+ * Purpose:
+ * Return a reference (or null) for
+ * the task's notify port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+retrieve_task_notify(task)
+ task_t task;
+{
+ ipc_space_t space = task->itk_space;
+ ipc_port_t port;
+
+ is_read_lock(space);
+ if (space->is_active) {
+ port = space->is_notify;
+ if (IP_VALID(port))
+ ipc_port_reference(port);
+ } else
+ port = IP_NULL;
+ is_read_unlock(space);
+
+ return port;
+}
+
+/*
+ * Routine: retrieve_thread_reply
+ * Purpose:
+ * Return a reference (or null) for
+ * the thread's reply port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+retrieve_thread_reply(thread)
+ thread_t thread;
+{
+ ipc_port_t port;
+
+ ith_lock(thread);
+ if (thread->ith_self != IP_NULL) {
+ port = thread->ith_reply;
+ if (IP_VALID(port))
+ ipc_port_reference(port);
+ } else
+ port = IP_NULL;
+ ith_unlock(thread);
+
+ return port;
+}
+
+/*
+ * Routine: task_self [mach trap]
+ * Purpose:
+ * Give the caller send rights for his task port.
+ * If new, the send right is marked with IE_BITS_COMPAT.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_PORT_NULL if there are any resource failures
+ * or other errors.
+ */
+
+port_name_t
+task_self()
+{
+ task_t task = current_task();
+ ipc_port_t sright;
+ mach_port_t name;
+
+ sright = retrieve_task_self_fast(task);
+ name = ipc_port_copyout_send_compat(sright, task->itk_space);
+ return (port_name_t) name;
+}
+
+/*
+ * Routine: task_notify [mach trap]
+ * Purpose:
+ * Give the caller the name of his own notify port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_PORT_NULL if there isn't a notify port,
+ * if it is dead, or if the caller doesn't hold
+ * receive rights for it.
+ */
+
+port_name_t
+task_notify()
+{
+ task_t task = current_task();
+ ipc_port_t notify;
+ mach_port_t name;
+
+ notify = retrieve_task_notify(task);
+ name = ipc_port_copyout_receiver(notify, task->itk_space);
+ return (port_name_t) name;
+}
+
+/*
+ * Routine: thread_self [mach trap]
+ * Purpose:
+ * Give the caller send rights for his own thread port.
+ * If new, the send right is marked with IE_BITS_COMPAT.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_PORT_NULL if there are any resource failures
+ * or other errors.
+ */
+
+port_name_t
+thread_self()
+{
+ thread_t thread = current_thread();
+ task_t task = thread->task;
+ ipc_port_t sright;
+ mach_port_t name;
+
+ sright = retrieve_thread_self_fast(thread);
+ name = ipc_port_copyout_send_compat(sright, task->itk_space);
+ return (port_name_t) name;
+}
+
+/*
+ * Routine: thread_reply [mach trap]
+ * Purpose:
+ * Give the caller the name of his own reply port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_PORT_NULL if there isn't a reply port,
+ * if it is dead, or if the caller doesn't hold
+ * receive rights for it.
+ */
+
+port_name_t
+thread_reply()
+{
+ task_t task = current_task();
+ thread_t thread = current_thread();
+ ipc_port_t reply;
+ mach_port_t name;
+
+ reply = retrieve_thread_reply(thread);
+ name = ipc_port_copyout_receiver(reply, task->itk_space);
+ return (port_name_t) name;
+}
+
+#endif MACH_IPC_COMPAT
+
+/*
+ * Routine: task_get_special_port [kernel call]
+ * Purpose:
+ * Clones a send right for one of the task's
+ * special ports.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Extracted a send right.
+ * KERN_INVALID_ARGUMENT The task is null.
+ * KERN_FAILURE The task/space is dead.
+ * KERN_INVALID_ARGUMENT Invalid special port.
+ */
+
+kern_return_t
+task_get_special_port(
+ task_t task,
+ int which,
+ ipc_port_t *portp)
+{
+ ipc_port_t *whichp;
+ ipc_port_t port;
+
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ switch (which) {
+#if MACH_IPC_COMPAT
+ case TASK_NOTIFY_PORT: {
+ ipc_space_t space = task->itk_space;
+
+ is_read_lock(space);
+ if (!space->is_active) {
+ is_read_unlock(space);
+ return KERN_FAILURE;
+ }
+
+ port = ipc_port_copy_send(space->is_notify);
+ is_read_unlock(space);
+
+ *portp = port;
+ return KERN_SUCCESS;
+ }
+#endif MACH_IPC_COMPAT
+
+ case TASK_KERNEL_PORT:
+ whichp = &task->itk_sself;
+ break;
+
+ case TASK_EXCEPTION_PORT:
+ whichp = &task->itk_exception;
+ break;
+
+ case TASK_BOOTSTRAP_PORT:
+ whichp = &task->itk_bootstrap;
+ break;
+
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ itk_lock(task);
+ if (task->itk_self == IP_NULL) {
+ itk_unlock(task);
+ return KERN_FAILURE;
+ }
+
+ port = ipc_port_copy_send(*whichp);
+ itk_unlock(task);
+
+ *portp = port;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: task_set_special_port [kernel call]
+ * Purpose:
+ * Changes one of the task's special ports,
+ * setting it to the supplied send right.
+ * Conditions:
+ * Nothing locked. If successful, consumes
+ * the supplied send right.
+ * Returns:
+ * KERN_SUCCESS Changed the special port.
+ * KERN_INVALID_ARGUMENT The task is null.
+ * KERN_FAILURE The task/space is dead.
+ * KERN_INVALID_ARGUMENT Invalid special port.
+ */
+
+kern_return_t
+task_set_special_port(
+ task_t task,
+ int which,
+ ipc_port_t port)
+{
+ ipc_port_t *whichp;
+ ipc_port_t old;
+
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ switch (which) {
+#if MACH_IPC_COMPAT
+ case TASK_NOTIFY_PORT: {
+ ipc_space_t space = task->itk_space;
+
+ is_write_lock(space);
+ if (!space->is_active) {
+ is_write_unlock(space);
+ return KERN_FAILURE;
+ }
+
+ old = space->is_notify;
+ space->is_notify = port;
+ is_write_unlock(space);
+
+ if (IP_VALID(old))
+ ipc_port_release_send(old);
+ return KERN_SUCCESS;
+ }
+#endif MACH_IPC_COMPAT
+
+ case TASK_KERNEL_PORT:
+ whichp = &task->itk_sself;
+ break;
+
+ case TASK_EXCEPTION_PORT:
+ whichp = &task->itk_exception;
+ break;
+
+ case TASK_BOOTSTRAP_PORT:
+ whichp = &task->itk_bootstrap;
+ break;
+
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ itk_lock(task);
+ if (task->itk_self == IP_NULL) {
+ itk_unlock(task);
+ return KERN_FAILURE;
+ }
+
+ old = *whichp;
+ *whichp = port;
+ itk_unlock(task);
+
+ if (IP_VALID(old))
+ ipc_port_release_send(old);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: thread_get_special_port [kernel call]
+ * Purpose:
+ * Clones a send right for one of the thread's
+ * special ports.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Extracted a send right.
+ * KERN_INVALID_ARGUMENT The thread is null.
+ * KERN_FAILURE The thread is dead.
+ * KERN_INVALID_ARGUMENT Invalid special port.
+ */
+
+kern_return_t
+thread_get_special_port(thread, which, portp)
+ thread_t thread;
+ int which;
+ ipc_port_t *portp;
+{
+ ipc_port_t *whichp;
+ ipc_port_t port;
+
+ if (thread == ITH_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ switch (which) {
+#if MACH_IPC_COMPAT
+ case THREAD_REPLY_PORT:
+ whichp = &thread->ith_reply;
+ break;
+#endif MACH_IPC_COMPAT
+
+ case THREAD_KERNEL_PORT:
+ whichp = &thread->ith_sself;
+ break;
+
+ case THREAD_EXCEPTION_PORT:
+ whichp = &thread->ith_exception;
+ break;
+
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ ith_lock(thread);
+ if (thread->ith_self == IP_NULL) {
+ ith_unlock(thread);
+ return KERN_FAILURE;
+ }
+
+ port = ipc_port_copy_send(*whichp);
+ ith_unlock(thread);
+
+ *portp = port;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: thread_set_special_port [kernel call]
+ * Purpose:
+ * Changes one of the thread's special ports,
+ * setting it to the supplied send right.
+ * Conditions:
+ * Nothing locked. If successful, consumes
+ * the supplied send right.
+ * Returns:
+ * KERN_SUCCESS Changed the special port.
+ * KERN_INVALID_ARGUMENT The thread is null.
+ * KERN_FAILURE The thread is dead.
+ * KERN_INVALID_ARGUMENT Invalid special port.
+ */
+
+kern_return_t
+thread_set_special_port(thread, which, port)
+ thread_t thread;
+ int which;
+ ipc_port_t port;
+{
+ ipc_port_t *whichp;
+ ipc_port_t old;
+
+ if (thread == ITH_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ switch (which) {
+#if MACH_IPC_COMPAT
+ case THREAD_REPLY_PORT:
+ whichp = &thread->ith_reply;
+ break;
+#endif MACH_IPC_COMPAT
+
+ case THREAD_KERNEL_PORT:
+ whichp = &thread->ith_sself;
+ break;
+
+ case THREAD_EXCEPTION_PORT:
+ whichp = &thread->ith_exception;
+ break;
+
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ ith_lock(thread);
+ if (thread->ith_self == IP_NULL) {
+ ith_unlock(thread);
+ return KERN_FAILURE;
+ }
+
+ old = *whichp;
+ *whichp = port;
+ ith_unlock(thread);
+
+ if (IP_VALID(old))
+ ipc_port_release_send(old);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_ports_register [kernel call]
+ * Purpose:
+ * Stash a handful of port send rights in the task.
+ * Child tasks will inherit these rights, but they
+ * must use mach_ports_lookup to acquire them.
+ *
+ * The rights are supplied in a (wired) kalloc'd segment.
+ * Rights which aren't supplied are assumed to be null.
+ * Conditions:
+ * Nothing locked. If successful, consumes
+ * the supplied rights and memory.
+ * Returns:
+ * KERN_SUCCESS Stashed the port rights.
+ * KERN_INVALID_ARGUMENT The task is null.
+ * KERN_INVALID_ARGUMENT The task is dead.
+ * KERN_INVALID_ARGUMENT Too many port rights supplied.
+ */
+
+kern_return_t
+mach_ports_register(
+ task_t task,
+ mach_port_array_t memory,
+ mach_msg_type_number_t portsCnt)
+{
+ ipc_port_t ports[TASK_PORT_REGISTER_MAX];
+ int i;
+
+ if ((task == TASK_NULL) ||
+ (portsCnt > TASK_PORT_REGISTER_MAX))
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * Pad the port rights with nulls.
+ */
+
+ for (i = 0; i < portsCnt; i++)
+ ports[i] = memory[i];
+ for (; i < TASK_PORT_REGISTER_MAX; i++)
+ ports[i] = IP_NULL;
+
+ itk_lock(task);
+ if (task->itk_self == IP_NULL) {
+ itk_unlock(task);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /*
+ * Replace the old send rights with the new.
+ * Release the old rights after unlocking.
+ */
+
+ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
+ ipc_port_t old;
+
+ old = task->itk_registered[i];
+ task->itk_registered[i] = ports[i];
+ ports[i] = old;
+ }
+
+ itk_unlock(task);
+
+ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
+ if (IP_VALID(ports[i]))
+ ipc_port_release_send(ports[i]);
+
+ /*
+ * Now that the operation is known to be successful,
+ * we can free the memory.
+ */
+
+ if (portsCnt != 0)
+ kfree((vm_offset_t) memory,
+ (vm_size_t) (portsCnt * sizeof(mach_port_t)));
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_ports_lookup [kernel call]
+ * Purpose:
+ * Retrieves (clones) the stashed port send rights.
+ * Conditions:
+ * Nothing locked. If successful, the caller gets
+ * rights and memory.
+ * Returns:
+ * KERN_SUCCESS Retrieved the send rights.
+ * KERN_INVALID_ARGUMENT The task is null.
+ * KERN_INVALID_ARGUMENT The task is dead.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_ports_lookup(task, portsp, portsCnt)
+ task_t task;
+ ipc_port_t **portsp;
+ mach_msg_type_number_t *portsCnt;
+{
+ vm_offset_t memory;
+ vm_size_t size;
+ ipc_port_t *ports;
+ int i;
+
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
+
+ memory = kalloc(size);
+ if (memory == 0)
+ return KERN_RESOURCE_SHORTAGE;
+
+ itk_lock(task);
+ if (task->itk_self == IP_NULL) {
+ itk_unlock(task);
+
+ kfree(memory, size);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ ports = (ipc_port_t *) memory;
+
+ /*
+ * Clone port rights. Because kalloc'd memory
+ * is wired, we won't fault while holding the task lock.
+ */
+
+ for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
+ ports[i] = ipc_port_copy_send(task->itk_registered[i]);
+
+ itk_unlock(task);
+
+ *portsp = (mach_port_array_t) ports;
+ *portsCnt = TASK_PORT_REGISTER_MAX;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: convert_port_to_task
+ * Purpose:
+ * Convert from a port to a task.
+ * Doesn't consume the port ref; produces a task ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+task_t
+convert_port_to_task(
+ ipc_port_t port)
+{
+ task_t task = TASK_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_TASK)) {
+ task = (task_t) port->ip_kobject;
+ task_reference(task);
+ }
+ ip_unlock(port);
+ }
+
+ return task;
+}
+
+/*
+ * Routine: convert_port_to_space
+ * Purpose:
+ * Convert from a port to a space.
+ * Doesn't consume the port ref; produces a space ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_space_t
+convert_port_to_space(
+ ipc_port_t port)
+{
+ ipc_space_t space = IS_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_TASK)) {
+ space = ((task_t) port->ip_kobject)->itk_space;
+ is_reference(space);
+ }
+ ip_unlock(port);
+ }
+
+ return space;
+}
+
+/*
+ * Routine: convert_port_to_map
+ * Purpose:
+ * Convert from a port to a map.
+ * Doesn't consume the port ref; produces a map ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+vm_map_t
+convert_port_to_map(port)
+ ipc_port_t port;
+{
+ vm_map_t map = VM_MAP_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_TASK)) {
+ map = ((task_t) port->ip_kobject)->map;
+ vm_map_reference(map);
+ }
+ ip_unlock(port);
+ }
+
+ return map;
+}
+
+/*
+ * Routine: convert_port_to_thread
+ * Purpose:
+ * Convert from a port to a thread.
+ * Doesn't consume the port ref; produces a thread ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+
+thread_t
+convert_port_to_thread(port)
+ ipc_port_t port;
+{
+ thread_t thread = THREAD_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_THREAD)) {
+ thread = (thread_t) port->ip_kobject;
+ thread_reference(thread);
+ }
+ ip_unlock(port);
+ }
+
+ return thread;
+}
+
+/*
+ * Routine: convert_task_to_port
+ * Purpose:
+ * Convert from a task to a port.
+ * Consumes a task ref; produces a naked send right
+ * which may be invalid.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+convert_task_to_port(task)
+ task_t task;
+{
+ ipc_port_t port;
+
+ itk_lock(task);
+ if (task->itk_self != IP_NULL)
+ port = ipc_port_make_send(task->itk_self);
+ else
+ port = IP_NULL;
+ itk_unlock(task);
+
+ task_deallocate(task);
+ return port;
+}
+
+/*
+ * Routine: convert_thread_to_port
+ * Purpose:
+ * Convert from a thread to a port.
+ * Consumes a thread ref; produces a naked send right
+ * which may be invalid.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+convert_thread_to_port(thread)
+ thread_t thread;
+{
+ ipc_port_t port;
+
+ ith_lock(thread);
+ if (thread->ith_self != IP_NULL)
+ port = ipc_port_make_send(thread->ith_self);
+ else
+ port = IP_NULL;
+ ith_unlock(thread);
+
+ thread_deallocate(thread);
+ return port;
+}
+
+/*
+ * Routine: space_deallocate
+ * Purpose:
+ * Deallocate a space ref produced by convert_port_to_space.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+space_deallocate(space)
+ ipc_space_t space;
+{
+ if (space != IS_NULL)
+ is_release(space);
+}
diff --git a/kern/ipc_tt.h b/kern/ipc_tt.h
new file mode 100644
index 00000000..d53fb7ca
--- /dev/null
+++ b/kern/ipc_tt.h
@@ -0,0 +1,88 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_IPC_TT_H_
+#define _KERN_IPC_TT_H_
+
+#include <mach/boolean.h>
+#include <mach/port.h>
+
+extern void ipc_task_init();
+extern void ipc_task_enable();
+extern void ipc_task_disable();
+extern void ipc_task_terminate();
+
+extern void ipc_thread_init();
+extern void ipc_thread_enable();
+extern void ipc_thread_disable();
+extern void ipc_thread_terminate();
+
+extern struct ipc_port *
+retrieve_task_self(/* task_t */);
+
+extern struct ipc_port *
+retrieve_task_self_fast(/* task_t */);
+
+extern struct ipc_port *
+retrieve_thread_self(/* thread_t */);
+
+extern struct ipc_port *
+retrieve_thread_self_fast(/* thread_t */);
+
+extern struct ipc_port *
+retrieve_task_exception(/* task_t */);
+
+extern struct ipc_port *
+retrieve_thread_exception(/* thread_t */);
+
+extern struct task *
+convert_port_to_task(/* struct ipc_port * */);
+
+extern struct ipc_port *
+convert_task_to_port(/* task_t */);
+
+extern void
+task_deallocate(/* task_t */);
+
+extern struct thread *
+convert_port_to_thread(/* struct ipc_port * */);
+
+extern struct ipc_port *
+convert_thread_to_port(/* thread_t */);
+
+extern void
+thread_deallocate(/* thread_t */);
+
+extern struct vm_map *
+convert_port_to_map(/* struct ipc_port * */);
+
+extern struct ipc_space *
+convert_port_to_space(/* struct ipc_port * */);
+
+extern void
+space_deallocate(/* ipc_space_t */);
+
+#endif _KERN_IPC_TT_H_
diff --git a/kern/kalloc.c b/kern/kalloc.c
new file mode 100644
index 00000000..53901399
--- /dev/null
+++ b/kern/kalloc.c
@@ -0,0 +1,237 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/kalloc.c
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * General kernel memory allocator. This allocator is designed
+ * to be used by the kernel to manage dynamic memory fast.
+ */
+
+#include <mach/machine/vm_types.h>
+#include <mach/vm_param.h>
+
+#include <kern/zalloc.h>
+#include <kern/kalloc.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_map.h>
+
+
+
+vm_map_t kalloc_map;
+vm_size_t kalloc_map_size = 8 * 1024 * 1024;
+vm_size_t kalloc_max;
+
+/*
+ * All allocations of size less than kalloc_max are rounded to the
+ * next highest power of 2. This allocator is built on top of
+ * the zone allocator. A zone is created for each potential size
+ * that we are willing to get in small blocks.
+ *
+ * We assume that kalloc_max is not greater than 64K;
+ * thus 16 is a safe array size for k_zone and k_zone_name.
+ */
+
+int first_k_zone = -1;
+struct zone *k_zone[16];
+static char *k_zone_name[16] = {
+ "kalloc.1", "kalloc.2",
+ "kalloc.4", "kalloc.8",
+ "kalloc.16", "kalloc.32",
+ "kalloc.64", "kalloc.128",
+ "kalloc.256", "kalloc.512",
+ "kalloc.1024", "kalloc.2048",
+ "kalloc.4096", "kalloc.8192",
+ "kalloc.16384", "kalloc.32768"
+};
+
+/*
+ * Max number of elements per zone. zinit rounds things up correctly
+ * Doing things this way permits each zone to have a different maximum size
+ * based on need, rather than just guessing; it also
+ * means its patchable in case you're wrong!
+ */
+unsigned long k_zone_max[16] = {
+ 1024, /* 1 Byte */
+ 1024, /* 2 Byte */
+ 1024, /* 4 Byte */
+ 1024, /* 8 Byte */
+ 1024, /* 16 Byte */
+ 4096, /* 32 Byte */
+ 4096, /* 64 Byte */
+ 4096, /* 128 Byte */
+ 4096, /* 256 Byte */
+ 1024, /* 512 Byte */
+ 1024, /* 1024 Byte */
+ 1024, /* 2048 Byte */
+ 1024, /* 4096 Byte */
+ 4096, /* 8192 Byte */
+ 64, /* 16384 Byte */
+ 64, /* 32768 Byte */
+};
+
+/*
+ * Initialize the memory allocator. This should be called only
+ * once on a system wide basis (i.e. first processor to get here
+ * does the initialization).
+ *
+ * This initializes all of the zones.
+ */
+
+void kalloc_init()
+{
+ vm_offset_t min, max;
+ vm_size_t size;
+ register int i;
+
+ kalloc_map = kmem_suballoc(kernel_map, &min, &max,
+ kalloc_map_size, FALSE);
+
+ /*
+ * Ensure that zones up to size 8192 bytes exist.
+ * This is desirable because messages are allocated
+ * with kalloc, and messages up through size 8192 are common.
+ */
+
+ if (PAGE_SIZE < 16*1024)
+ kalloc_max = 16*1024;
+ else
+ kalloc_max = PAGE_SIZE;
+
+ /*
+ * Allocate a zone for each size we are going to handle.
+ * We specify non-paged memory.
+ */
+ for (i = 0, size = 1; size < kalloc_max; i++, size <<= 1) {
+ if (size < MINSIZE) {
+ k_zone[i] = 0;
+ continue;
+ }
+ if (size == MINSIZE) {
+ first_k_zone = i;
+ }
+ k_zone[i] = zinit(size, k_zone_max[i] * size, size,
+ size >= PAGE_SIZE ? ZONE_COLLECTABLE : 0,
+ k_zone_name[i]);
+ }
+}
+
+vm_offset_t kalloc(size)
+ vm_size_t size;
+{
+ register int zindex;
+ register vm_size_t allocsize;
+ vm_offset_t addr;
+
+ /* compute the size of the block that we will actually allocate */
+
+ allocsize = size;
+ if (size < kalloc_max) {
+ allocsize = MINSIZE;
+ zindex = first_k_zone;
+ while (allocsize < size) {
+ allocsize <<= 1;
+ zindex++;
+ }
+ }
+
+ /*
+ * If our size is still small enough, check the queue for that size
+ * and allocate.
+ */
+
+ if (allocsize < kalloc_max) {
+ addr = zalloc(k_zone[zindex]);
+ } else {
+ if (kmem_alloc_wired(kalloc_map, &addr, allocsize)
+ != KERN_SUCCESS)
+ addr = 0;
+ }
+ return(addr);
+}
+
+vm_offset_t kget(size)
+ vm_size_t size;
+{
+ register int zindex;
+ register vm_size_t allocsize;
+ vm_offset_t addr;
+
+ /* compute the size of the block that we will actually allocate */
+
+ allocsize = size;
+ if (size < kalloc_max) {
+ allocsize = MINSIZE;
+ zindex = first_k_zone;
+ while (allocsize < size) {
+ allocsize <<= 1;
+ zindex++;
+ }
+ }
+
+ /*
+ * If our size is still small enough, check the queue for that size
+ * and allocate.
+ */
+
+ if (allocsize < kalloc_max) {
+ addr = zget(k_zone[zindex]);
+ } else {
+ /* This will never work, so we might as well panic */
+ panic("kget");
+ }
+ return(addr);
+}
+
+void
+kfree(data, size)
+ vm_offset_t data;
+ vm_size_t size;
+{
+ register int zindex;
+ register vm_size_t freesize;
+
+ freesize = size;
+ if (size < kalloc_max) {
+ freesize = MINSIZE;
+ zindex = first_k_zone;
+ while (freesize < size) {
+ freesize <<= 1;
+ zindex++;
+ }
+ }
+
+ if (freesize < kalloc_max) {
+ zfree(k_zone[zindex], data);
+ } else {
+ kmem_free(kalloc_map, data, freesize);
+ }
+}
diff --git a/kern/kalloc.h b/kern/kalloc.h
new file mode 100644
index 00000000..f36e4dc6
--- /dev/null
+++ b/kern/kalloc.h
@@ -0,0 +1,40 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_KALLOC_H_
+#define _KERN_KALLOC_H_
+
+#include <mach/machine/vm_types.h>
+
+#define MINSIZE 16
+
+extern vm_offset_t kalloc();
+extern vm_offset_t kget();
+extern void kfree();
+
+extern void kalloc_init();
+
+#endif _KERN_KALLOC_H_
diff --git a/kern/kern_types.h b/kern/kern_types.h
new file mode 100644
index 00000000..f715cb14
--- /dev/null
+++ b/kern/kern_types.h
@@ -0,0 +1,70 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_KERN_TYPES_H_
+#define _KERN_KERN_TYPES_H_
+
+#include <mach/port.h> /* for mach_port_t */
+
+/*
+ * Common kernel type declarations.
+ * These are handles to opaque data structures defined elsewhere.
+ *
+ * These types are recursively included in each other`s definitions.
+ * This file exists to export the common declarations to each
+ * of the definitions, and to other files that need only the
+ * type declarations.
+ */
+
+/*
+ * Task structure, from kern/task.h
+ */
+typedef struct task * task_t;
+#define TASK_NULL ((task_t) 0)
+
+typedef mach_port_t * task_array_t; /* should be task_t * */
+
+/*
+ * Thread structure, from kern/thread.h
+ */
+typedef struct thread * thread_t;
+#define THREAD_NULL ((thread_t) 0)
+
+typedef mach_port_t * thread_array_t; /* should be thread_t * */
+
+/*
+ * Processor structure, from kern/processor.h
+ */
+typedef struct processor * processor_t;
+#define PROCESSOR_NULL ((processor_t) 0)
+
+/*
+ * Processor set structure, from kern/processor.h
+ */
+typedef struct processor_set * processor_set_t;
+#define PROCESSOR_SET_NULL ((processor_set_t) 0)
+
+#endif /* _KERN_KERN_TYPES_H_ */
diff --git a/kern/lock.c b/kern/lock.c
new file mode 100644
index 00000000..4d881537
--- /dev/null
+++ b/kern/lock.c
@@ -0,0 +1,637 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/lock.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Locking primitives implementation
+ */
+
+#include <cpus.h>
+#include <mach_kdb.h>
+
+#include <kern/lock.h>
+#include <kern/thread.h>
+#include <kern/sched_prim.h>
+#if MACH_KDB
+#include <machine/db_machdep.h>
+#include <ddb/db_sym.h>
+#endif
+
+
+#if NCPUS > 1
+
+/*
+ * Module: lock
+ * Function:
+ * Provide reader/writer sychronization.
+ * Implementation:
+ * Simple interlock on a bit. Readers first interlock,
+ * increment the reader count, then let go. Writers hold
+ * the interlock (thus preventing further readers), and
+ * wait for already-accepted readers to go away.
+ */
+
+/*
+ * The simple-lock routines are the primitives out of which
+ * the lock package is built. The implementation is left
+ * to the machine-dependent code.
+ */
+
+#ifdef notdef
+/*
+ * A sample implementation of simple locks.
+ * assumes:
+ * boolean_t test_and_set(boolean_t *)
+ * indivisibly sets the boolean to TRUE
+ * and returns its old value
+ * and that setting a boolean to FALSE is indivisible.
+ */
+/*
+ * simple_lock_init initializes a simple lock. A simple lock
+ * may only be used for exclusive locks.
+ */
+
+void simple_lock_init(simple_lock_t l)
+{
+ *(boolean_t *)l = FALSE;
+}
+
+void simple_lock(simple_lock_t l)
+{
+ while (test_and_set((boolean_t *)l))
+ continue;
+}
+
+void simple_unlock(simple_lock_t l)
+{
+ *(boolean_t *)l = FALSE;
+}
+
+boolean_t simple_lock_try(simple_lock_t l)
+{
+ return (!test_and_set((boolean_t *)l));
+}
+#endif /* notdef */
+#endif /* NCPUS > 1 */
+
+#if NCPUS > 1
+int lock_wait_time = 100;
+#else /* NCPUS > 1 */
+
+ /*
+ * It is silly to spin on a uni-processor as if we
+ * thought something magical would happen to the
+ * want_write bit while we are executing.
+ */
+int lock_wait_time = 0;
+#endif /* NCPUS > 1 */
+
+#if MACH_SLOCKS && NCPUS == 1
+/*
+ * This code does not protect simple_locks_taken and simple_locks_info.
+ * It works despite the fact that interrupt code does use simple locks.
+ * This is because interrupts use locks in a stack-like manner.
+ * Each interrupt releases all the locks it acquires, so the data
+ * structures end up in the same state after the interrupt as before.
+ * The only precaution necessary is that simple_locks_taken be
+ * incremented first and decremented last, so that interrupt handlers
+ * don't over-write active slots in simple_locks_info.
+ */
+
+unsigned int simple_locks_taken = 0;
+
+#define NSLINFO 1000 /* maximum number of locks held */
+
+struct simple_locks_info {
+ simple_lock_t l;
+ unsigned int ra;
+} simple_locks_info[NSLINFO];
+
+void check_simple_locks(void)
+{
+ assert(simple_locks_taken == 0);
+}
+
+/* Need simple lock sanity checking code if simple locks are being
+ compiled in, and we are compiling for a uniprocessor. */
+
+void simple_lock_init(
+ simple_lock_t l)
+{
+ l->lock_data = 0;
+}
+
+void simple_lock(
+ simple_lock_t l)
+{
+ struct simple_locks_info *info;
+
+ assert(l->lock_data == 0);
+
+ l->lock_data = 1;
+
+ info = &simple_locks_info[simple_locks_taken++];
+ info->l = l;
+ /* XXX we want our return address, if possible */
+#ifdef i386
+ info->ra = *((unsigned int *)&l - 1);
+#endif /* i386 */
+}
+
+boolean_t simple_lock_try(
+ simple_lock_t l)
+{
+ struct simple_locks_info *info;
+
+ if (l->lock_data != 0)
+ return FALSE;
+
+ l->lock_data = 1;
+
+ info = &simple_locks_info[simple_locks_taken++];
+ info->l = l;
+ /* XXX we want our return address, if possible */
+#ifdef i386
+ info->ra = *((unsigned int *)&l - 1);
+#endif /* i386 */
+
+ return TRUE;
+}
+
+void simple_unlock(
+ simple_lock_t l)
+{
+ assert(l->lock_data != 0);
+
+ l->lock_data = 0;
+
+ if (simple_locks_info[simple_locks_taken-1].l != l) {
+ unsigned int i = simple_locks_taken;
+
+ /* out-of-order unlocking */
+
+ do
+ if (i == 0)
+ panic("simple_unlock");
+ while (simple_locks_info[--i].l != l);
+
+ simple_locks_info[i] = simple_locks_info[simple_locks_taken-1];
+ }
+ simple_locks_taken--;
+}
+
+#endif /* MACH_SLOCKS && NCPUS == 1 */
+
+/*
+ * Routine: lock_init
+ * Function:
+ * Initialize a lock; required before use.
+ * Note that clients declare the "struct lock"
+ * variables and then initialize them, rather
+ * than getting a new one from this module.
+ */
+void lock_init(
+ lock_t l,
+ boolean_t can_sleep)
+{
+ bzero((char *)l, sizeof(lock_data_t));
+ simple_lock_init(&l->interlock);
+ l->want_write = FALSE;
+ l->want_upgrade = FALSE;
+ l->read_count = 0;
+ l->can_sleep = can_sleep;
+ l->thread = (struct thread *)-1; /* XXX */
+ l->recursion_depth = 0;
+}
+
+void lock_sleepable(
+ lock_t l,
+ boolean_t can_sleep)
+{
+ simple_lock(&l->interlock);
+ l->can_sleep = can_sleep;
+ simple_unlock(&l->interlock);
+}
+
+
+/*
+ * Sleep locks. These use the same data structure and algorithm
+ * as the spin locks, but the process sleeps while it is waiting
+ * for the lock. These work on uniprocessor systems.
+ */
+
+void lock_write(
+ register lock_t l)
+{
+ register int i;
+
+ check_simple_locks();
+ simple_lock(&l->interlock);
+
+ if (l->thread == current_thread()) {
+ /*
+ * Recursive lock.
+ */
+ l->recursion_depth++;
+ simple_unlock(&l->interlock);
+ return;
+ }
+
+ /*
+ * Try to acquire the want_write bit.
+ */
+ while (l->want_write) {
+ if ((i = lock_wait_time) > 0) {
+ simple_unlock(&l->interlock);
+ while (--i > 0 && l->want_write)
+ continue;
+ simple_lock(&l->interlock);
+ }
+
+ if (l->can_sleep && l->want_write) {
+ l->waiting = TRUE;
+ thread_sleep(l,
+ simple_lock_addr(l->interlock), FALSE);
+ simple_lock(&l->interlock);
+ }
+ }
+ l->want_write = TRUE;
+
+ /* Wait for readers (and upgrades) to finish */
+
+ while ((l->read_count != 0) || l->want_upgrade) {
+ if ((i = lock_wait_time) > 0) {
+ simple_unlock(&l->interlock);
+ while (--i > 0 && (l->read_count != 0 ||
+ l->want_upgrade))
+ continue;
+ simple_lock(&l->interlock);
+ }
+
+ if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) {
+ l->waiting = TRUE;
+ thread_sleep(l,
+ simple_lock_addr(l->interlock), FALSE);
+ simple_lock(&l->interlock);
+ }
+ }
+ simple_unlock(&l->interlock);
+}
+
+void lock_done(
+ register lock_t l)
+{
+ simple_lock(&l->interlock);
+
+ if (l->read_count != 0)
+ l->read_count--;
+ else
+ if (l->recursion_depth != 0)
+ l->recursion_depth--;
+ else
+ if (l->want_upgrade)
+ l->want_upgrade = FALSE;
+ else
+ l->want_write = FALSE;
+
+ /*
+ * There is no reason to wakeup a waiting thread
+ * if the read-count is non-zero. Consider:
+ * we must be dropping a read lock
+ * threads are waiting only if one wants a write lock
+ * if there are still readers, they can't proceed
+ */
+
+ if (l->waiting && (l->read_count == 0)) {
+ l->waiting = FALSE;
+ thread_wakeup(l);
+ }
+
+ simple_unlock(&l->interlock);
+}
+
+void lock_read(
+ register lock_t l)
+{
+ register int i;
+
+ check_simple_locks();
+ simple_lock(&l->interlock);
+
+ if (l->thread == current_thread()) {
+ /*
+ * Recursive lock.
+ */
+ l->read_count++;
+ simple_unlock(&l->interlock);
+ return;
+ }
+
+ while (l->want_write || l->want_upgrade) {
+ if ((i = lock_wait_time) > 0) {
+ simple_unlock(&l->interlock);
+ while (--i > 0 && (l->want_write || l->want_upgrade))
+ continue;
+ simple_lock(&l->interlock);
+ }
+
+ if (l->can_sleep && (l->want_write || l->want_upgrade)) {
+ l->waiting = TRUE;
+ thread_sleep(l,
+ simple_lock_addr(l->interlock), FALSE);
+ simple_lock(&l->interlock);
+ }
+ }
+
+ l->read_count++;
+ simple_unlock(&l->interlock);
+}
+
+/*
+ * Routine: lock_read_to_write
+ * Function:
+ * Improves a read-only lock to one with
+ * write permission. If another reader has
+ * already requested an upgrade to a write lock,
+ * no lock is held upon return.
+ *
+ * Returns TRUE if the upgrade *failed*.
+ */
+boolean_t lock_read_to_write(
+ register lock_t l)
+{
+ register int i;
+
+ check_simple_locks();
+ simple_lock(&l->interlock);
+
+ l->read_count--;
+
+ if (l->thread == current_thread()) {
+ /*
+ * Recursive lock.
+ */
+ l->recursion_depth++;
+ simple_unlock(&l->interlock);
+ return(FALSE);
+ }
+
+ if (l->want_upgrade) {
+ /*
+ * Someone else has requested upgrade.
+ * Since we've released a read lock, wake
+ * him up.
+ */
+ if (l->waiting && (l->read_count == 0)) {
+ l->waiting = FALSE;
+ thread_wakeup(l);
+ }
+
+ simple_unlock(&l->interlock);
+ return TRUE;
+ }
+
+ l->want_upgrade = TRUE;
+
+ while (l->read_count != 0) {
+ if ((i = lock_wait_time) > 0) {
+ simple_unlock(&l->interlock);
+ while (--i > 0 && l->read_count != 0)
+ continue;
+ simple_lock(&l->interlock);
+ }
+
+ if (l->can_sleep && l->read_count != 0) {
+ l->waiting = TRUE;
+ thread_sleep(l,
+ simple_lock_addr(l->interlock), FALSE);
+ simple_lock(&l->interlock);
+ }
+ }
+
+ simple_unlock(&l->interlock);
+ return FALSE;
+}
+
+void lock_write_to_read(
+ register lock_t l)
+{
+ simple_lock(&l->interlock);
+
+ l->read_count++;
+ if (l->recursion_depth != 0)
+ l->recursion_depth--;
+ else
+ if (l->want_upgrade)
+ l->want_upgrade = FALSE;
+ else
+ l->want_write = FALSE;
+
+ if (l->waiting) {
+ l->waiting = FALSE;
+ thread_wakeup(l);
+ }
+
+ simple_unlock(&l->interlock);
+}
+
+
+/*
+ * Routine: lock_try_write
+ * Function:
+ * Tries to get a write lock.
+ *
+ * Returns FALSE if the lock is not held on return.
+ */
+
+boolean_t lock_try_write(
+ register lock_t l)
+{
+ simple_lock(&l->interlock);
+
+ if (l->thread == current_thread()) {
+ /*
+ * Recursive lock
+ */
+ l->recursion_depth++;
+ simple_unlock(&l->interlock);
+ return TRUE;
+ }
+
+ if (l->want_write || l->want_upgrade || l->read_count) {
+ /*
+ * Can't get lock.
+ */
+ simple_unlock(&l->interlock);
+ return FALSE;
+ }
+
+ /*
+ * Have lock.
+ */
+
+ l->want_write = TRUE;
+ simple_unlock(&l->interlock);
+ return TRUE;
+}
+
+/*
+ * Routine: lock_try_read
+ * Function:
+ * Tries to get a read lock.
+ *
+ * Returns FALSE if the lock is not held on return.
+ */
+
+boolean_t lock_try_read(
+ register lock_t l)
+{
+ simple_lock(&l->interlock);
+
+ if (l->thread == current_thread()) {
+ /*
+ * Recursive lock
+ */
+ l->read_count++;
+ simple_unlock(&l->interlock);
+ return TRUE;
+ }
+
+ if (l->want_write || l->want_upgrade) {
+ simple_unlock(&l->interlock);
+ return FALSE;
+ }
+
+ l->read_count++;
+ simple_unlock(&l->interlock);
+ return TRUE;
+}
+
+/*
+ * Routine: lock_try_read_to_write
+ * Function:
+ * Improves a read-only lock to one with
+ * write permission. If another reader has
+ * already requested an upgrade to a write lock,
+ * the read lock is still held upon return.
+ *
+ * Returns FALSE if the upgrade *failed*.
+ */
+boolean_t lock_try_read_to_write(
+ register lock_t l)
+{
+ check_simple_locks();
+ simple_lock(&l->interlock);
+
+ if (l->thread == current_thread()) {
+ /*
+ * Recursive lock
+ */
+ l->read_count--;
+ l->recursion_depth++;
+ simple_unlock(&l->interlock);
+ return TRUE;
+ }
+
+ if (l->want_upgrade) {
+ simple_unlock(&l->interlock);
+ return FALSE;
+ }
+ l->want_upgrade = TRUE;
+ l->read_count--;
+
+ while (l->read_count != 0) {
+ l->waiting = TRUE;
+ thread_sleep(l,
+ simple_lock_addr(l->interlock), FALSE);
+ simple_lock(&l->interlock);
+ }
+
+ simple_unlock(&l->interlock);
+ return TRUE;
+}
+
+/*
+ * Allow a process that has a lock for write to acquire it
+ * recursively (for read, write, or update).
+ */
+void lock_set_recursive(
+ lock_t l)
+{
+ simple_lock(&l->interlock);
+ if (!l->want_write) {
+ panic("lock_set_recursive: don't have write lock");
+ }
+ l->thread = current_thread();
+ simple_unlock(&l->interlock);
+}
+
+/*
+ * Prevent a lock from being re-acquired.
+ */
+void lock_clear_recursive(
+ lock_t l)
+{
+ simple_lock(&l->interlock);
+ if (l->thread != current_thread()) {
+ panic("lock_clear_recursive: wrong thread");
+ }
+ if (l->recursion_depth == 0)
+ l->thread = (struct thread *)-1; /* XXX */
+ simple_unlock(&l->interlock);
+}
+
+#if MACH_KDB
+#if MACH_SLOCKS && NCPUS == 1
+void db_show_all_slocks(void)
+{
+ int i;
+ struct simple_locks_info *info;
+ simple_lock_t l;
+
+ for (i = 0; i < simple_locks_taken; i++) {
+ info = &simple_locks_info[i];
+ db_printf("%d: ", i);
+ db_printsym(info->l, DB_STGY_ANY);
+#if i386
+ db_printf(" locked by ");
+ db_printsym(info->ra, DB_STGY_PROC);
+#endif
+ db_printf("\n");
+ }
+}
+#else /* MACH_SLOCKS && NCPUS == 1 */
+void db_show_all_slocks(void)
+{
+ db_printf("simple lock info not available\n");
+}
+#endif /* MACH_SLOCKS && NCPUS == 1 */
+#endif /* MACH_KDB */
diff --git a/kern/lock.h b/kern/lock.h
new file mode 100644
index 00000000..9be63c5c
--- /dev/null
+++ b/kern/lock.h
@@ -0,0 +1,177 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/lock.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Locking primitives definitions
+ */
+
+#ifndef _KERN_LOCK_H_
+#define _KERN_LOCK_H_
+
+#include <cpus.h>
+#include <mach_ldebug.h>
+
+#include <mach/boolean.h>
+#include <mach/machine/vm_types.h>
+
+#if NCPUS > 1
+#include <machine/lock.h>/*XXX*/
+#endif
+
+#define MACH_SLOCKS ((NCPUS > 1) || MACH_LDEBUG)
+
+/*
+ * A simple spin lock.
+ */
+
+struct slock {
+ volatile natural_t lock_data; /* in general 1 bit is sufficient */
+};
+
+typedef struct slock simple_lock_data_t;
+typedef struct slock *simple_lock_t;
+
+#if MACH_SLOCKS
+/*
+ * Use the locks.
+ */
+
+#define decl_simple_lock_data(class,name) \
+class simple_lock_data_t name;
+
+#define simple_lock_addr(lock) (&(lock))
+
+#if (NCPUS > 1)
+
+/*
+ * The single-CPU debugging routines are not valid
+ * on a multiprocessor.
+ */
+#define simple_lock_taken(lock) (1) /* always succeeds */
+#define check_simple_locks()
+
+#else /* NCPUS > 1 */
+/*
+ * Use our single-CPU locking test routines.
+ */
+
+extern void simple_lock_init(simple_lock_t);
+extern void simple_lock(simple_lock_t);
+extern void simple_unlock(simple_lock_t);
+extern boolean_t simple_lock_try(simple_lock_t);
+
+#define simple_lock_pause()
+#define simple_lock_taken(lock) ((lock)->lock_data)
+
+extern void check_simple_locks(void);
+
+#endif /* NCPUS > 1 */
+
+#else /* MACH_SLOCKS */
+/*
+ * Do not allocate storage for locks if not needed.
+ */
+#define decl_simple_lock_data(class,name)
+#define simple_lock_addr(lock) ((simple_lock_t)0)
+
+/*
+ * No multiprocessor locking is necessary.
+ */
+#define simple_lock_init(l)
+#define simple_lock(l)
+#define simple_unlock(l)
+#define simple_lock_try(l) (TRUE) /* always succeeds */
+#define simple_lock_taken(l) (1) /* always succeeds */
+#define check_simple_locks()
+#define simple_lock_pause()
+
+#endif /* MACH_SLOCKS */
+
+
+#define decl_mutex_data(class,name) decl_simple_lock_data(class,name)
+#define mutex_try(l) simple_lock_try(l)
+#define mutex_lock(l) simple_lock(l)
+#define mutex_unlock(l) simple_unlock(l)
+#define mutex_init(l) simple_lock_init(l)
+
+
+/*
+ * The general lock structure. Provides for multiple readers,
+ * upgrading from read to write, and sleeping until the lock
+ * can be gained.
+ *
+ * On some architectures, assembly language code in the 'inline'
+ * program fiddles the lock structures. It must be changed in
+ * concert with the structure layout.
+ *
+ * Only the "interlock" field is used for hardware exclusion;
+ * other fields are modified with normal instructions after
+ * acquiring the interlock bit.
+ */
+struct lock {
+ struct thread *thread; /* Thread that has lock, if
+ recursive locking allowed */
+ unsigned int read_count:16, /* Number of accepted readers */
+ /* boolean_t */ want_upgrade:1, /* Read-to-write upgrade waiting */
+ /* boolean_t */ want_write:1, /* Writer is waiting, or
+ locked for write */
+ /* boolean_t */ waiting:1, /* Someone is sleeping on lock */
+ /* boolean_t */ can_sleep:1, /* Can attempts to lock go to sleep? */
+ recursion_depth:12, /* Depth of recursion */
+ :0;
+ decl_simple_lock_data(,interlock)
+ /* Hardware interlock field.
+ Last in the structure so that
+ field offsets are the same whether
+ or not it is present. */
+};
+
+typedef struct lock lock_data_t;
+typedef struct lock *lock_t;
+
+/* Sleep locks must work even if no multiprocessing */
+
+extern void lock_init(lock_t, boolean_t);
+extern void lock_sleepable(lock_t, boolean_t);
+extern void lock_write(lock_t);
+extern void lock_read(lock_t);
+extern void lock_done(lock_t);
+extern boolean_t lock_read_to_write(lock_t);
+extern void lock_write_to_read(lock_t);
+extern boolean_t lock_try_write(lock_t);
+extern boolean_t lock_try_read(lock_t);
+extern boolean_t lock_try_read_to_write(lock_t);
+
+#define lock_read_done(l) lock_done(l)
+#define lock_write_done(l) lock_done(l)
+
+extern void lock_set_recursive(lock_t);
+extern void lock_clear_recursive(lock_t);
+
+#endif /* _KERN_LOCK_H_ */
diff --git a/kern/lock_mon.c b/kern/lock_mon.c
new file mode 100644
index 00000000..ef443295
--- /dev/null
+++ b/kern/lock_mon.c
@@ -0,0 +1,375 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1990 Carnegie-Mellon University
+ * Copyright (c) 1989 Carnegie-Mellon University
+ * All rights reserved. The CMU software License Agreement specifies
+ * the terms and conditions for use and redistribution.
+ */
+/*
+ * Copyright 1990 by Open Software Foundation,
+ * Grenoble, FRANCE
+ *
+ * All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of OSF or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior
+ * permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+ * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Support For MP Debugging
+ * if MACH_MP_DEBUG is on, we use alternate locking
+ * routines do detect dealocks
+ * Support for MP lock monitoring (MACH_LOCK_MON).
+ * Registers use of locks, contention.
+ * Depending on hardware also records time spent with locks held
+ */
+
+#include <cpus.h>
+#include <mach_mp_debug.h>
+#include <mach_lock_mon.h>
+#include <time_stamp.h>
+
+#include <sys/types.h>
+#include <mach/machine/vm_types.h>
+#include <mach/boolean.h>
+#include <kern/thread.h>
+#include <kern/lock.h>
+#include <kern/time_stamp.h>
+
+
+decl_simple_lock_data(extern , kdb_lock)
+decl_simple_lock_data(extern , printf_lock)
+
+#if NCPUS > 1 && MACH_LOCK_MON
+
+#if TIME_STAMP
+extern time_stamp_t time_stamp;
+#else TIME_STAMP
+typedef unsigned int time_stamp_t;
+#define time_stamp 0
+#endif TIME_STAMP
+
+#define LOCK_INFO_MAX (1024*32)
+#define LOCK_INFO_HASH_COUNT 1024
+#define LOCK_INFO_PER_BUCKET (LOCK_INFO_MAX/LOCK_INFO_HASH_COUNT)
+
+
+#define HASH_LOCK(lock) ((long)lock>>5 & (LOCK_INFO_HASH_COUNT-1))
+
+struct lock_info {
+ unsigned int success;
+ unsigned int fail;
+ unsigned int masked;
+ unsigned int stack;
+ time_stamp_t time;
+ decl_simple_lock_data(, *lock)
+ vm_offset_t caller;
+};
+
+struct lock_info_bucket {
+ struct lock_info info[LOCK_INFO_PER_BUCKET];
+};
+
+struct lock_info_bucket lock_info[LOCK_INFO_HASH_COUNT];
+struct lock_info default_lock_info;
+unsigned default_lock_stack = 0;
+
+extern int curr_ipl[];
+
+
+
+struct lock_info *
+locate_lock_info(lock)
+decl_simple_lock_data(, **lock)
+{
+ struct lock_info *li = &(lock_info[HASH_LOCK(*lock)].info[0]);
+ register i;
+ register my_cpu = cpu_number();
+
+ for (i=0; i < LOCK_INFO_PER_BUCKET; i++, li++)
+ if (li->lock) {
+ if (li->lock == *lock)
+ return(li);
+ } else {
+ li->lock = *lock;
+ li->caller = *((vm_offset_t *)lock - 1);
+ return(li);
+ }
+ db_printf("out of lock_info slots\n");
+ li = &default_lock_info;
+ return(li);
+}
+
+
+simple_lock(lock)
+decl_simple_lock_data(, *lock)
+{
+ register struct lock_info *li = locate_lock_info(&lock);
+ register my_cpu = cpu_number();
+
+ if (current_thread())
+ li->stack = current_thread()->lock_stack++;
+ if (curr_ipl[my_cpu])
+ li->masked++;
+ if (_simple_lock_try(lock))
+ li->success++;
+ else {
+ _simple_lock(lock);
+ li->fail++;
+ }
+ li->time = time_stamp - li->time;
+}
+
+simple_lock_try(lock)
+decl_simple_lock_data(, *lock)
+{
+ register struct lock_info *li = locate_lock_info(&lock);
+ register my_cpu = cpu_number();
+
+ if (curr_ipl[my_cpu])
+ li->masked++;
+ if (_simple_lock_try(lock)) {
+ li->success++;
+ li->time = time_stamp - li->time;
+ if (current_thread())
+ li->stack = current_thread()->lock_stack++;
+ return(1);
+ } else {
+ li->fail++;
+ return(0);
+ }
+}
+
+simple_unlock(lock)
+decl_simple_lock_data(, *lock)
+{
+ register time_stamp_t stamp = time_stamp;
+ register time_stamp_t *time = &locate_lock_info(&lock)->time;
+ register unsigned *lock_stack;
+
+ *time = stamp - *time;
+ _simple_unlock(lock);
+ if (current_thread()) {
+ lock_stack = &current_thread()->lock_stack;
+ if (*lock_stack)
+ (*lock_stack)--;
+ }
+}
+
+lip() {
+ lis(4, 1, 0);
+}
+
+#define lock_info_sort lis
+
+unsigned scurval, ssum;
+struct lock_info *sli;
+
+lock_info_sort(arg, abs, count)
+{
+ struct lock_info *li, mean;
+ int bucket = 0;
+ int i;
+ unsigned max_val;
+ unsigned old_val = (unsigned)-1;
+ struct lock_info *target_li = &lock_info[0].info[0];
+ unsigned sum;
+ unsigned empty, total;
+ unsigned curval;
+
+ printf("\nSUCCESS FAIL MASKED STACK TIME LOCK/CALLER\n");
+ if (!count)
+ count = 8 ;
+ while (count && target_li) {
+ empty = LOCK_INFO_HASH_COUNT;
+ target_li = 0;
+ total = 0;
+ max_val = 0;
+ mean.success = 0;
+ mean.fail = 0;
+ mean.masked = 0;
+ mean.stack = 0;
+ mean.time = 0;
+ mean.lock = (simple_lock_data_t *) &lock_info;
+ mean.caller = (vm_offset_t) &lock_info;
+ for (bucket = 0; bucket < LOCK_INFO_HASH_COUNT; bucket++) {
+ li = &lock_info[bucket].info[0];
+ if (li->lock)
+ empty--;
+ for (i= 0; i< LOCK_INFO_PER_BUCKET && li->lock; i++, li++) {
+ if (li->lock == &kdb_lock || li->lock == &printf_lock)
+ continue;
+ total++;
+ curval = *((int *)li + arg);
+ sum = li->success + li->fail;
+ if(!sum && !abs)
+ continue;
+ scurval = curval;
+ ssum = sum;
+ sli = li;
+ if (!abs) switch(arg) {
+ case 0:
+ break;
+ case 1:
+ case 2:
+ curval = (curval*100) / sum;
+ break;
+ case 3:
+ case 4:
+ curval = curval / sum;
+ break;
+ }
+ if (curval > max_val && curval < old_val) {
+ max_val = curval;
+ target_li = li;
+ }
+ if (curval == old_val && count != 0) {
+ print_lock_info(li);
+ count--;
+ }
+ mean.success += li->success;
+ mean.fail += li->fail;
+ mean.masked += li->masked;
+ mean.stack += li->stack;
+ mean.time += li->time;
+ }
+ }
+ if (target_li)
+ old_val = max_val;
+ }
+ db_printf("\n%d total locks, %d empty buckets", total, empty );
+ if (default_lock_info.success)
+ db_printf(", default: %d", default_lock_info.success + default_lock_info.fail);
+ db_printf("\n");
+ print_lock_info(&mean);
+}
+
+#define lock_info_clear lic
+
+lock_info_clear()
+{
+ struct lock_info *li;
+ int bucket = 0;
+ int i;
+ for (bucket = 0; bucket < LOCK_INFO_HASH_COUNT; bucket++) {
+ li = &lock_info[bucket].info[0];
+ for (i= 0; i< LOCK_INFO_PER_BUCKET; i++, li++) {
+ bzero(li, sizeof(struct lock_info));
+ }
+ }
+ bzero(&default_lock_info, sizeof(struct lock_info));
+}
+
+print_lock_info(li)
+struct lock_info *li;
+{
+ int off;
+ int sum = li->success + li->fail;
+ db_printf("%d %d/%d %d/%d %d/%d %d/%d ", li->success,
+ li->fail, (li->fail*100)/sum,
+ li->masked, (li->masked*100)/sum,
+ li->stack, li->stack/sum,
+ li->time, li->time/sum);
+ db_search_symbol(li->lock, 0, &off);
+ if (off < 1024)
+ db_printsym(li->lock, 0);
+ else {
+ db_printsym(li->caller, 0);
+ db_printf("(%X)", li->lock);
+ }
+ db_printf("\n");
+}
+
+#endif NCPUS > 1 && MACH_LOCK_MON
+
+#if TIME_STAMP
+
+/*
+ * Measure lock/unlock operations
+ */
+
+time_lock(loops)
+{
+ decl_simple_lock_data(, lock)
+ register time_stamp_t stamp;
+ register int i;
+
+
+ if (!loops)
+ loops = 1000;
+ simple_lock_init(&lock);
+ stamp = time_stamp;
+ for (i = 0; i < loops; i++) {
+ simple_lock(&lock);
+ simple_unlock(&lock);
+ }
+ stamp = time_stamp - stamp;
+ db_printf("%d stamps for simple_locks\n", stamp/loops);
+#if MACH_LOCK_MON
+ stamp = time_stamp;
+ for (i = 0; i < loops; i++) {
+ _simple_lock(&lock);
+ _simple_unlock(&lock);
+ }
+ stamp = time_stamp - stamp;
+ db_printf("%d stamps for _simple_locks\n", stamp/loops);
+#endif MACH_LOCK_MON
+}
+#endif TIME_STAMP
+
+#if MACH_MP_DEBUG
+
+/*
+ * Arrange in the lock routines to call the following
+ * routines. This way, when locks are free there is no performance
+ * penalty
+ */
+
+void
+retry_simple_lock(lock)
+decl_simple_lock_data(, *lock)
+{
+ register count = 0;
+
+ while(!simple_lock_try(lock))
+ if (count++ > 1000000 && lock != &kdb_lock) {
+ if (lock == &printf_lock)
+ return;
+ db_printf("cpu %d looping on simple_lock(%x) called by %x\n",
+ cpu_number(), lock, *(((int *)&lock) -1));
+ Debugger();
+ count = 0;
+ }
+}
+
+void
+retry_bit_lock(index, addr)
+{
+ register count = 0;
+
+ while(!bit_lock_try(index, addr))
+ if (count++ > 1000000) {
+ db_printf("cpu %d looping on bit_lock(%x, %x) called by %x\n",
+ cpu_number(), index, addr, *(((int *)&index) -1));
+ Debugger();
+ count = 0;
+ }
+}
+#endif MACH_MP_DEBUG
+
+
+
diff --git a/kern/mach.srv b/kern/mach.srv
new file mode 100644
index 00000000..3ed92593
--- /dev/null
+++ b/kern/mach.srv
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#ifdef MIGRATING_THREADS
+#define task_threads task_acts
+#define thread_terminate act_terminate
+#define thread_set_state act_set_state_immediate
+#define thread_get_state act_get_state_immediate
+#define thread_info act_thread_info
+#define thread_suspend act_suspend
+#define thread_resume act_resume
+#define thread_abort act_abort
+#define thread_set_special_port act_set_special_port
+#define thread_get_special_port act_get_special_port
+#endif /* MIGRATING_THREADS */
+
+simport <kern/compat_xxx_defs.h>; /* for obsolete routines */
+
+#include <mach/mach.defs>
diff --git a/kern/mach4.srv b/kern/mach4.srv
new file mode 100644
index 00000000..ead54844
--- /dev/null
+++ b/kern/mach4.srv
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#ifdef MIGRATING_THREADS
+#define thread_enable_pc_sampling act_enable_pc_sampling
+#define thread_disable_pc_sampling act_disable_pc_sampling
+#define thread_get_sampled_pcs act_get_sampled_pcs
+#endif /* MIGRATING_THREADS */
+
+#include <mach/mach4.defs>
diff --git a/kern/mach_clock.c b/kern/mach_clock.c
new file mode 100644
index 00000000..667b211d
--- /dev/null
+++ b/kern/mach_clock.c
@@ -0,0 +1,569 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994-1988 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: clock_prim.c
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1986
+ *
+ * Clock primitives.
+ */
+#include <cpus.h>
+#include <mach_pcsample.h>
+#include <stat_time.h>
+
+#include <mach/boolean.h>
+#include <mach/machine.h>
+#include <mach/time_value.h>
+#include <mach/vm_param.h>
+#include <mach/vm_prot.h>
+#include <kern/counters.h>
+#include "cpu_number.h"
+#include <kern/host.h>
+#include <kern/lock.h>
+#include <kern/mach_param.h>
+#include <kern/processor.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <kern/thread.h>
+#include <kern/time_out.h>
+#include <kern/time_stamp.h>
+#include <vm/vm_kern.h>
+#include <sys/time.h>
+#include <machine/mach_param.h> /* HZ */
+#include <machine/machspl.h>
+
+#if MACH_PCSAMPLE
+#include <kern/pc_sample.h>
+#endif
+
+
+void softclock(); /* forward */
+
+int hz = HZ; /* number of ticks per second */
+int tick = (1000000 / HZ); /* number of usec per tick */
+time_value_t time = { 0, 0 }; /* time since bootup (uncorrected) */
+unsigned long elapsed_ticks = 0; /* ticks elapsed since bootup */
+
+int timedelta = 0;
+int tickdelta = 0;
+
+#if HZ > 500
+int tickadj = 1; /* can adjust HZ usecs per second */
+#else
+int tickadj = 500 / HZ; /* can adjust 100 usecs per second */
+#endif
+int bigadj = 1000000; /* adjust 10*tickadj if adjustment
+ > bigadj */
+
+/*
+ * This update protocol, with a check value, allows
+ * do {
+ * secs = mtime->seconds;
+ * usecs = mtime->microseconds;
+ * } while (secs != mtime->check_seconds);
+ * to read the time correctly. (On a multiprocessor this assumes
+ * that processors see each other's writes in the correct order.
+ * We may have to insert fence operations.)
+ */
+
+mapped_time_value_t *mtime = 0;
+
+#define update_mapped_time(time) \
+MACRO_BEGIN \
+ if (mtime != 0) { \
+ mtime->check_seconds = (time)->seconds; \
+ mtime->microseconds = (time)->microseconds; \
+ mtime->seconds = (time)->seconds; \
+ } \
+MACRO_END
+
+decl_simple_lock_data(, timer_lock) /* lock for ... */
+timer_elt_data_t timer_head; /* ordered list of timeouts */
+ /* (doubles as end-of-list) */
+
+/*
+ * Handle clock interrupts.
+ *
+ * The clock interrupt is assumed to be called at a (more or less)
+ * constant rate. The rate must be identical on all CPUS (XXX - fix).
+ *
+ * Usec is the number of microseconds that have elapsed since the
+ * last clock tick. It may be constant or computed, depending on
+ * the accuracy of the hardware clock.
+ *
+ */
+void clock_interrupt(usec, usermode, basepri)
+ register int usec; /* microseconds per tick */
+ boolean_t usermode; /* executing user code */
+ boolean_t basepri; /* at base priority */
+{
+ register int my_cpu = cpu_number();
+ register thread_t thread = current_thread();
+
+ counter(c_clock_ticks++);
+ counter(c_threads_total += c_threads_current);
+ counter(c_stacks_total += c_stacks_current);
+
+#if STAT_TIME
+ /*
+ * Increment the thread time, if using
+ * statistical timing.
+ */
+ if (usermode) {
+ timer_bump(&thread->user_timer, usec);
+ }
+ else {
+ timer_bump(&thread->system_timer, usec);
+ }
+#endif STAT_TIME
+
+ /*
+ * Increment the CPU time statistics.
+ */
+ {
+ extern void thread_quantum_update(); /* in priority.c */
+ register int state;
+
+ if (usermode)
+ state = CPU_STATE_USER;
+ else if (!cpu_idle(my_cpu))
+ state = CPU_STATE_SYSTEM;
+ else
+ state = CPU_STATE_IDLE;
+
+ machine_slot[my_cpu].cpu_ticks[state]++;
+
+ /*
+ * Adjust the thread's priority and check for
+ * quantum expiration.
+ */
+
+ thread_quantum_update(my_cpu, thread, 1, state);
+ }
+
+#if MACH_SAMPLE
+ /*
+ * Take a sample of pc for the user if required.
+ * This had better be MP safe. It might be interesting
+ * to keep track of cpu in the sample.
+ */
+ if (usermode) {
+ take_pc_sample_macro(thread, SAMPLED_PC_PERIODIC);
+ }
+#endif /* MACH_PCSAMPLE */
+
+ /*
+ * Time-of-day and time-out list are updated only
+ * on the master CPU.
+ */
+ if (my_cpu == master_cpu) {
+
+ register spl_t s;
+ register timer_elt_t telt;
+ boolean_t needsoft = FALSE;
+
+#if TS_FORMAT == 1
+ /*
+ * Increment the tick count for the timestamping routine.
+ */
+ ts_tick_count++;
+#endif TS_FORMAT == 1
+
+ /*
+ * Update the tick count since bootup, and handle
+ * timeouts.
+ */
+
+ s = splsched();
+ simple_lock(&timer_lock);
+
+ elapsed_ticks++;
+
+ telt = (timer_elt_t)queue_first(&timer_head.chain);
+ if (telt->ticks <= elapsed_ticks)
+ needsoft = TRUE;
+ simple_unlock(&timer_lock);
+ splx(s);
+
+ /*
+ * Increment the time-of-day clock.
+ */
+ if (timedelta == 0) {
+ time_value_add_usec(&time, usec);
+ }
+ else {
+ register int delta;
+
+ if (timedelta < 0) {
+ delta = usec - tickdelta;
+ timedelta += tickdelta;
+ }
+ else {
+ delta = usec + tickdelta;
+ timedelta -= tickdelta;
+ }
+ time_value_add_usec(&time, delta);
+ }
+ update_mapped_time(&time);
+
+ /*
+ * Schedule soft-interupt for timeout if needed
+ */
+ if (needsoft) {
+ if (basepri) {
+ (void) splsoftclock();
+ softclock();
+ }
+ else {
+ setsoftclock();
+ }
+ }
+ }
+}
+
+/*
+ * There is a nasty race between softclock and reset_timeout.
+ * For example, scheduling code looks at timer_set and calls
+ * reset_timeout, thinking the timer is set. However, softclock
+ * has already removed the timer but hasn't called thread_timeout
+ * yet.
+ *
+ * Interim solution: We initialize timers after pulling
+ * them out of the queue, so a race with reset_timeout won't
+ * hurt. The timeout functions (eg, thread_timeout,
+ * thread_depress_timeout) check timer_set/depress_priority
+ * to see if the timer has been cancelled and if so do nothing.
+ *
+ * This still isn't correct. For example, softclock pulls a
+ * timer off the queue, then thread_go resets timer_set (but
+ * reset_timeout does nothing), then thread_set_timeout puts the
+ * timer back on the queue and sets timer_set, then
+ * thread_timeout finally runs and clears timer_set, then
+ * thread_set_timeout tries to put the timer on the queue again
+ * and corrupts it.
+ */
+
+void softclock()
+{
+ /*
+ * Handle timeouts.
+ */
+ spl_t s;
+ register timer_elt_t telt;
+ register int (*fcn)();
+ register char *param;
+
+ while (TRUE) {
+ s = splsched();
+ simple_lock(&timer_lock);
+ telt = (timer_elt_t) queue_first(&timer_head.chain);
+ if (telt->ticks > elapsed_ticks) {
+ simple_unlock(&timer_lock);
+ splx(s);
+ break;
+ }
+ fcn = telt->fcn;
+ param = telt->param;
+
+ remqueue(&timer_head.chain, (queue_entry_t)telt);
+ telt->set = TELT_UNSET;
+ simple_unlock(&timer_lock);
+ splx(s);
+
+ assert(fcn != 0);
+ (*fcn)(param);
+ }
+}
+
+/*
+ * Set timeout.
+ *
+ * Parameters:
+ * telt timer element. Function and param are already set.
+ * interval time-out interval, in hz.
+ */
+void set_timeout(telt, interval)
+ register timer_elt_t telt; /* already loaded */
+ register unsigned int interval;
+{
+ spl_t s;
+ register timer_elt_t next;
+
+ s = splsched();
+ simple_lock(&timer_lock);
+
+ interval += elapsed_ticks;
+
+ for (next = (timer_elt_t)queue_first(&timer_head.chain);
+ ;
+ next = (timer_elt_t)queue_next((queue_entry_t)next)) {
+
+ if (next->ticks > interval)
+ break;
+ }
+ telt->ticks = interval;
+ /*
+ * Insert new timer element before 'next'
+ * (after 'next'->prev)
+ */
+ insque((queue_entry_t) telt, ((queue_entry_t)next)->prev);
+ telt->set = TELT_SET;
+ simple_unlock(&timer_lock);
+ splx(s);
+}
+
+boolean_t reset_timeout(telt)
+ register timer_elt_t telt;
+{
+ spl_t s;
+
+ s = splsched();
+ simple_lock(&timer_lock);
+ if (telt->set) {
+ remqueue(&timer_head.chain, (queue_entry_t)telt);
+ telt->set = TELT_UNSET;
+ simple_unlock(&timer_lock);
+ splx(s);
+ return TRUE;
+ }
+ else {
+ simple_unlock(&timer_lock);
+ splx(s);
+ return FALSE;
+ }
+}
+
+void init_timeout()
+{
+ simple_lock_init(&timer_lock);
+ queue_init(&timer_head.chain);
+ timer_head.ticks = ~0; /* MAXUINT - sentinel */
+
+ elapsed_ticks = 0;
+}
+
+/*
+ * Read the time.
+ */
+kern_return_t
+host_get_time(host, current_time)
+ host_t host;
+ time_value_t *current_time; /* OUT */
+{
+ if (host == HOST_NULL)
+ return(KERN_INVALID_HOST);
+
+ do {
+ current_time->seconds = mtime->seconds;
+ current_time->microseconds = mtime->microseconds;
+ } while (current_time->seconds != mtime->check_seconds);
+
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Set the time. Only available to privileged users.
+ */
+kern_return_t
+host_set_time(host, new_time)
+ host_t host;
+ time_value_t new_time;
+{
+ spl_t s;
+
+ if (host == HOST_NULL)
+ return(KERN_INVALID_HOST);
+
+#if NCPUS > 1
+ /*
+ * Switch to the master CPU to synchronize correctly.
+ */
+ thread_bind(current_thread(), master_processor);
+ if (current_processor() != master_processor)
+ thread_block((void (*)) 0);
+#endif NCPUS > 1
+
+ s = splhigh();
+ time = new_time;
+ update_mapped_time(&time);
+ resettodr();
+ splx(s);
+
+#if NCPUS > 1
+ /*
+ * Switch off the master CPU.
+ */
+ thread_bind(current_thread(), PROCESSOR_NULL);
+#endif NCPUS > 1
+
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Adjust the time gradually.
+ */
+kern_return_t
+host_adjust_time(host, new_adjustment, old_adjustment)
+ host_t host;
+ time_value_t new_adjustment;
+ time_value_t *old_adjustment; /* OUT */
+{
+ time_value_t oadj;
+ unsigned int ndelta;
+ spl_t s;
+
+ if (host == HOST_NULL)
+ return (KERN_INVALID_HOST);
+
+ ndelta = new_adjustment.seconds * 1000000
+ + new_adjustment.microseconds;
+
+#if NCPUS > 1
+ thread_bind(current_thread(), master_processor);
+ if (current_processor() != master_processor)
+ thread_block((void (*)) 0);
+#endif NCPUS > 1
+
+ s = splclock();
+
+ oadj.seconds = timedelta / 1000000;
+ oadj.microseconds = timedelta % 1000000;
+
+ if (timedelta == 0) {
+ if (ndelta > bigadj)
+ tickdelta = 10 * tickadj;
+ else
+ tickdelta = tickadj;
+ }
+ if (ndelta % tickdelta)
+ ndelta = ndelta / tickdelta * tickdelta;
+
+ timedelta = ndelta;
+
+ splx(s);
+#if NCPUS > 1
+ thread_bind(current_thread(), PROCESSOR_NULL);
+#endif NCPUS > 1
+
+ *old_adjustment = oadj;
+
+ return (KERN_SUCCESS);
+}
+
+void mapable_time_init()
+{
+ if (kmem_alloc_wired(kernel_map, (vm_offset_t *) &mtime, PAGE_SIZE)
+ != KERN_SUCCESS)
+ panic("mapable_time_init");
+ bzero((char *)mtime, PAGE_SIZE);
+ update_mapped_time(&time);
+}
+
+int timeopen()
+{
+ return(0);
+}
+int timeclose()
+{
+ return(0);
+}
+
+/*
+ * Compatibility for device drivers.
+ * New code should use set_timeout/reset_timeout and private timers.
+ * These code can't use a zone to allocate timers, because
+ * it can be called from interrupt handlers.
+ */
+
+#define NTIMERS 20
+
+timer_elt_data_t timeout_timers[NTIMERS];
+
+/*
+ * Set timeout.
+ *
+ * fcn: function to call
+ * param: parameter to pass to function
+ * interval: timeout interval, in hz.
+ */
+void timeout(fcn, param, interval)
+ int (*fcn)(/* char * param */);
+ char * param;
+ int interval;
+{
+ spl_t s;
+ register timer_elt_t elt;
+
+ s = splsched();
+ simple_lock(&timer_lock);
+ for (elt = &timeout_timers[0]; elt < &timeout_timers[NTIMERS]; elt++)
+ if (elt->set == TELT_UNSET)
+ break;
+ if (elt == &timeout_timers[NTIMERS])
+ panic("timeout");
+ elt->fcn = fcn;
+ elt->param = param;
+ elt->set = TELT_ALLOC;
+ simple_unlock(&timer_lock);
+ splx(s);
+
+ set_timeout(elt, (unsigned int)interval);
+}
+
+/*
+ * Returns a boolean indicating whether the timeout element was found
+ * and removed.
+ */
+boolean_t untimeout(fcn, param)
+ register int (*fcn)();
+ register char * param;
+{
+ spl_t s;
+ register timer_elt_t elt;
+
+ s = splsched();
+ simple_lock(&timer_lock);
+ queue_iterate(&timer_head.chain, elt, timer_elt_t, chain) {
+
+ if ((fcn == elt->fcn) && (param == elt->param)) {
+ /*
+ * Found it.
+ */
+ remqueue(&timer_head.chain, (queue_entry_t)elt);
+ elt->set = TELT_UNSET;
+
+ simple_unlock(&timer_lock);
+ splx(s);
+ return (TRUE);
+ }
+ }
+ simple_unlock(&timer_lock);
+ splx(s);
+ return (FALSE);
+}
diff --git a/kern/mach_debug.srv b/kern/mach_debug.srv
new file mode 100644
index 00000000..c78b9a4d
--- /dev/null
+++ b/kern/mach_debug.srv
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#include <mach_debug/mach_debug.defs>
diff --git a/kern/mach_factor.c b/kern/mach_factor.c
new file mode 100644
index 00000000..1a17213c
--- /dev/null
+++ b/kern/mach_factor.c
@@ -0,0 +1,153 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/mach_factor.c
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1986
+ *
+ * Compute the Mach Factor.
+ */
+
+#include <cpus.h>
+
+#include <mach/machine.h>
+#include <mach/processor_info.h>
+#include <kern/sched.h>
+#include <kern/processor.h>
+#include <kern/time_out.h>
+#if MACH_KERNEL
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#endif MACH_KERNEL
+
+
+long avenrun[3] = {0, 0, 0};
+long mach_factor[3] = {0, 0, 0};
+
+/*
+ * Values are scaled by LOAD_SCALE, defined in processor_info.h
+ */
+static long fract[3] = {
+ 800, /* (4.0/5.0) 5 second average */
+ 966, /* (29.0/30.0) 30 second average */
+ 983, /* (59.0/60.) 1 minute average */
+};
+
+void compute_mach_factor()
+{
+ register processor_set_t pset;
+ register processor_t processor;
+ register int ncpus;
+ register int nthreads;
+ register long factor_now;
+ register long average_now;
+ register long load_now;
+
+ simple_lock(&all_psets_lock);
+ pset = (processor_set_t) queue_first(&all_psets);
+ while (!queue_end(&all_psets, (queue_entry_t)pset)) {
+
+ /*
+ * If no processors, this pset is in suspended animation.
+ * No load calculations are performed.
+ */
+ pset_lock(pset);
+ if((ncpus = pset->processor_count) > 0) {
+
+ /*
+ * Count number of threads.
+ */
+ nthreads = pset->runq.count;
+ processor = (processor_t) queue_first(&pset->processors);
+ while (!queue_end(&pset->processors,
+ (queue_entry_t)processor)) {
+ nthreads += processor->runq.count;
+ processor =
+ (processor_t) queue_next(&processor->processors);
+ }
+
+ /*
+ * account for threads on cpus.
+ */
+ nthreads += ncpus - pset->idle_count;
+
+ /*
+ * The current thread (running this calculation)
+ * doesn't count; it's always in the default pset.
+ */
+ if (pset == &default_pset)
+ nthreads -= 1;
+
+ if (nthreads > ncpus) {
+ factor_now = (ncpus * LOAD_SCALE) / (nthreads + 1);
+ load_now = (nthreads << SCHED_SHIFT) / ncpus;
+ }
+ else {
+ factor_now = (ncpus - nthreads) * LOAD_SCALE;
+ load_now = SCHED_SCALE;
+ }
+
+ /*
+ * Load average and mach factor calculations for
+ * those that ask about these things.
+ */
+
+ average_now = nthreads * LOAD_SCALE;
+
+ pset->mach_factor =
+ ((pset->mach_factor << 2) + factor_now)/5;
+ pset->load_average =
+ ((pset->load_average << 2) + average_now)/5;
+
+ /*
+ * And some ugly stuff to keep w happy.
+ */
+ if (pset == &default_pset) {
+ register int i;
+
+ for (i = 0; i < 3; i++) {
+ mach_factor[i] = ( (mach_factor[i]*fract[i])
+ + (factor_now*(LOAD_SCALE-fract[i])) )
+ / LOAD_SCALE;
+ avenrun[i] = ( (avenrun[i]*fract[i])
+ + (average_now*(LOAD_SCALE-fract[i])) )
+ / LOAD_SCALE;
+ }
+ }
+
+ /*
+ * sched_load is the only thing used by scheduler.
+ * It is always at least 1 (i.e. SCHED_SCALE).
+ */
+ pset->sched_load = (pset->sched_load + load_now) >> 1;
+ }
+
+ pset_unlock(pset);
+ pset = (processor_set_t) queue_next(&pset->all_psets);
+ }
+
+ simple_unlock(&all_psets_lock);
+}
diff --git a/kern/mach_host.srv b/kern/mach_host.srv
new file mode 100644
index 00000000..30d78db2
--- /dev/null
+++ b/kern/mach_host.srv
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+simport <kern/compat_xxx_defs.h>; /* for obsolete routines */
+
+#ifdef MIGRATING_THREADS
+#define thread_assign act_thread_assign
+#define thread_assign_default act_thread_assign_default
+#define thread_get_assignment act_thread_get_assignment
+#define thread_priority act_thread_priority
+#define thread_max_priority act_thread_max_priority
+#define thread_policy act_thread_policy
+#define thread_depress_abort act_thread_depress_abort
+#define thread_wire act_thread_wire
+#endif /* MIGRATING_THREADS */
+
+#include <mach/mach_host.defs>
diff --git a/kern/mach_param.h b/kern/mach_param.h
new file mode 100644
index 00000000..5fc20634
--- /dev/null
+++ b/kern/mach_param.h
@@ -0,0 +1,67 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/mach_param.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1986
+ *
+ * Mach system sizing parameters
+ *
+ */
+
+#ifndef _KERN_MACH_PARAM_H_
+#define _KERN_MACH_PARAM_H_
+
+#define THREAD_MAX 1024 /* Max number of threads */
+#define THREAD_CHUNK 64 /* Allocation chunk */
+
+#define TASK_MAX 1024 /* Max number of tasks */
+#define TASK_CHUNK 64 /* Allocation chunk */
+
+#define ACT_MAX 1024 /* Max number of acts */
+#define ACT_CHUNK 64 /* Allocation chunk */
+
+#define ACTPOOL_MAX 1024
+#define ACTPOOL_CHUNK 64
+
+#define PORT_MAX ((TASK_MAX * 3 + THREAD_MAX) /* kernel */ \
+ + (THREAD_MAX * 2) /* user */ \
+ + 40000) /* slop for objects */
+ /* Number of ports, system-wide */
+
+#define SET_MAX (TASK_MAX + THREAD_MAX + 200)
+ /* Max number of port sets */
+
+#define ITE_MAX (1 << 16) /* Max number of splay tree entries */
+
+#define SPACE_MAX (TASK_MAX + 5) /* Max number of IPC spaces */
+
+#define IMAR_MAX (1 << 10) /* Max number of msg-accepted reqs */
+
+#endif _KERN_MACH_PARAM_H_
diff --git a/kern/machine.c b/kern/machine.c
new file mode 100644
index 00000000..fef541e9
--- /dev/null
+++ b/kern/machine.c
@@ -0,0 +1,765 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/machine.c
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1987
+ *
+ * Support for machine independent machine abstraction.
+ */
+
+#include <norma_ether.h>
+#include <cpus.h>
+#include <mach_host.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/mach_types.h>
+#include <mach/machine.h>
+#include <mach/host_info.h>
+#include <kern/counters.h>
+#include <kern/ipc_host.h>
+#include <kern/host.h>
+#include <kern/lock.h>
+#include <kern/processor.h>
+#include <kern/queue.h>
+#include <kern/sched.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <machine/machspl.h> /* for splsched */
+#include <sys/reboot.h>
+
+
+
+/*
+ * Exported variables:
+ */
+
+struct machine_info machine_info;
+struct machine_slot machine_slot[NCPUS];
+
+queue_head_t action_queue; /* assign/shutdown queue */
+decl_simple_lock_data(,action_lock);
+
+/*
+ * xxx_host_info:
+ *
+ * Return the host_info structure. This routine is exported to the
+ * user level.
+ */
+kern_return_t xxx_host_info(task, info)
+ task_t task;
+ machine_info_t info;
+{
+#ifdef lint
+ task++;
+#endif /* lint */
+ *info = machine_info;
+ return(KERN_SUCCESS);
+}
+
+/*
+ * xxx_slot_info:
+ *
+ * Return the slot_info structure for the specified slot. This routine
+ * is exported to the user level.
+ */
+kern_return_t xxx_slot_info(task, slot, info)
+ task_t task;
+ int slot;
+ machine_slot_t info;
+{
+#ifdef lint
+ task++;
+#endif /* lint */
+ if ((slot < 0) || (slot >= NCPUS))
+ return(KERN_INVALID_ARGUMENT);
+ *info = machine_slot[slot];
+ return(KERN_SUCCESS);
+}
+
+/*
+ * xxx_cpu_control:
+ *
+ * Support for user control of cpus. The user indicates which cpu
+ * he is interested in, and whether or not that cpu should be running.
+ */
+kern_return_t xxx_cpu_control(task, cpu, runnable)
+ task_t task;
+ int cpu;
+ boolean_t runnable;
+{
+#ifdef lint
+ task++; cpu++; runnable++;
+#endif /* lint */
+ return(KERN_FAILURE);
+}
+
+/*
+ * cpu_up:
+ *
+ * Flag specified cpu as up and running. Called when a processor comes
+ * online.
+ */
+void cpu_up(cpu)
+ int cpu;
+{
+ register struct machine_slot *ms;
+ register processor_t processor;
+ register spl_t s;
+
+ processor = cpu_to_processor(cpu);
+ pset_lock(&default_pset);
+ s = splsched();
+ processor_lock(processor);
+#if NCPUS > 1
+ init_ast_check(processor);
+#endif /* NCPUS > 1 */
+ ms = &machine_slot[cpu];
+ ms->running = TRUE;
+ machine_info.avail_cpus++;
+ pset_add_processor(&default_pset, processor);
+ processor->state = PROCESSOR_RUNNING;
+ processor_unlock(processor);
+ splx(s);
+ pset_unlock(&default_pset);
+}
+
+/*
+ * cpu_down:
+ *
+ * Flag specified cpu as down. Called when a processor is about to
+ * go offline.
+ */
+void cpu_down(cpu)
+ int cpu;
+{
+ register struct machine_slot *ms;
+ register processor_t processor;
+ register spl_t s;
+
+ s = splsched();
+ processor = cpu_to_processor(cpu);
+ processor_lock(processor);
+ ms = &machine_slot[cpu];
+ ms->running = FALSE;
+ machine_info.avail_cpus--;
+ /*
+ * processor has already been removed from pset.
+ */
+ processor->processor_set_next = PROCESSOR_SET_NULL;
+ processor->state = PROCESSOR_OFF_LINE;
+ processor_unlock(processor);
+ splx(s);
+}
+
+kern_return_t
+host_reboot(host, options)
+ host_t host;
+ int options;
+{
+ if (host == HOST_NULL)
+ return (KERN_INVALID_HOST);
+
+ if (options & RB_DEBUGGER) {
+ extern void Debugger();
+ Debugger("Debugger");
+ } else {
+#ifdef parisc
+/* XXX this could be made common */
+ halt_all_cpus(options);
+#else
+ halt_all_cpus(!(options & RB_HALT));
+#endif
+ }
+ return (KERN_SUCCESS);
+}
+
+#if NCPUS > 1
+/*
+ * processor_request_action - common internals of processor_assign
+ * and processor_shutdown. If new_pset is null, this is
+ * a shutdown, else it's an assign and caller must donate
+ * a reference.
+ */
+void
+processor_request_action(processor, new_pset)
+processor_t processor;
+processor_set_t new_pset;
+{
+ register processor_set_t pset;
+
+ /*
+ * Processor must be in a processor set. Must lock its idle lock to
+ * get at processor state.
+ */
+ pset = processor->processor_set;
+ simple_lock(&pset->idle_lock);
+
+ /*
+ * If the processor is dispatching, let it finish - it will set its
+ * state to running very soon.
+ */
+ while (*(volatile int *)&processor->state == PROCESSOR_DISPATCHING)
+ continue;
+
+ /*
+ * Now lock the action queue and do the dirty work.
+ */
+ simple_lock(&action_lock);
+
+ switch (processor->state) {
+ case PROCESSOR_IDLE:
+ /*
+ * Remove from idle queue.
+ */
+ queue_remove(&pset->idle_queue, processor, processor_t,
+ processor_queue);
+ pset->idle_count--;
+
+ /* fall through ... */
+ case PROCESSOR_RUNNING:
+ /*
+ * Put it on the action queue.
+ */
+ queue_enter(&action_queue, processor, processor_t,
+ processor_queue);
+
+ /* fall through ... */
+ case PROCESSOR_ASSIGN:
+ /*
+ * And ask the action_thread to do the work.
+ */
+
+ if (new_pset == PROCESSOR_SET_NULL) {
+ processor->state = PROCESSOR_SHUTDOWN;
+ }
+ else {
+ assert(processor->state != PROCESSOR_ASSIGN);
+ processor->state = PROCESSOR_ASSIGN;
+ processor->processor_set_next = new_pset;
+ }
+ break;
+
+ default:
+ printf("state: %d\n", processor->state);
+ panic("processor_request_action: bad state");
+ }
+ simple_unlock(&action_lock);
+ simple_unlock(&pset->idle_lock);
+
+ thread_wakeup((event_t)&action_queue);
+}
+
+#if MACH_HOST
+/*
+ * processor_assign() changes the processor set that a processor is
+ * assigned to. Any previous assignment in progress is overridden.
+ * Synchronizes with assignment completion if wait is TRUE.
+ */
+kern_return_t
+processor_assign(processor, new_pset, wait)
+processor_t processor;
+processor_set_t new_pset;
+boolean_t wait;
+{
+ spl_t s;
+
+ /*
+ * Check for null arguments.
+ * XXX Can't assign master processor.
+ */
+ if (processor == PROCESSOR_NULL || new_pset == PROCESSOR_SET_NULL ||
+ processor == master_processor) {
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ /*
+ * Get pset reference to donate to processor_request_action.
+ */
+ pset_reference(new_pset);
+
+ /*
+ * Check processor status.
+ * If shutdown or being shutdown, can`t reassign.
+ * If being assigned, wait for assignment to finish.
+ */
+Retry:
+ s = splsched();
+ processor_lock(processor);
+ if(processor->state == PROCESSOR_OFF_LINE ||
+ processor->state == PROCESSOR_SHUTDOWN) {
+ /*
+ * Already shutdown or being shutdown -- Can't reassign.
+ */
+ processor_unlock(processor);
+ (void) splx(s);
+ pset_deallocate(new_pset);
+ return(KERN_FAILURE);
+ }
+
+ if (processor->state == PROCESSOR_ASSIGN) {
+ assert_wait((event_t) processor, TRUE);
+ processor_unlock(processor);
+ splx(s);
+ thread_block((void(*)()) 0);
+ goto Retry;
+ }
+
+ /*
+ * Avoid work if processor is already in this processor set.
+ */
+ if (processor->processor_set == new_pset) {
+ processor_unlock(processor);
+ (void) splx(s);
+ /* clean up dangling ref */
+ pset_deallocate(new_pset);
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * OK to start processor assignment.
+ */
+ processor_request_action(processor, new_pset);
+
+ /*
+ * Synchronization with completion.
+ */
+ if (wait) {
+ while (processor->state == PROCESSOR_ASSIGN ||
+ processor->state == PROCESSOR_SHUTDOWN) {
+ assert_wait((event_t)processor, TRUE);
+ processor_unlock(processor);
+ splx(s);
+ thread_block((void (*)()) 0);
+ s = splsched();
+ processor_lock(processor);
+ }
+ }
+ processor_unlock(processor);
+ splx(s);
+
+ return(KERN_SUCCESS);
+}
+
+#else /* MACH_HOST */
+
+kern_return_t
+processor_assign(processor, new_pset, wait)
+processor_t processor;
+processor_set_t new_pset;
+boolean_t wait;
+{
+#ifdef lint
+ processor++; new_pset++; wait++;
+#endif
+ return KERN_FAILURE;
+}
+
+#endif /* MACH_HOST */
+
+/*
+ * processor_shutdown() queues a processor up for shutdown.
+ * Any assignment in progress is overriden. It does not synchronize
+ * with the shutdown (can be called from interrupt level).
+ */
+kern_return_t
+processor_shutdown(processor)
+processor_t processor;
+{
+ spl_t s;
+
+ if (processor == PROCESSOR_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ s = splsched();
+ processor_lock(processor);
+ if(processor->state == PROCESSOR_OFF_LINE ||
+ processor->state == PROCESSOR_SHUTDOWN) {
+ /*
+ * Already shutdown or being shutdown -- nothing to do.
+ */
+ processor_unlock(processor);
+ splx(s);
+ return(KERN_SUCCESS);
+ }
+
+ processor_request_action(processor, PROCESSOR_SET_NULL);
+ processor_unlock(processor);
+ splx(s);
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * action_thread() shuts down processors or changes their assignment.
+ */
+void processor_doaction(); /* forward */
+
+void action_thread_continue()
+{
+ register processor_t processor;
+ register spl_t s;
+
+ while (TRUE) {
+ s = splsched();
+ simple_lock(&action_lock);
+ while ( !queue_empty(&action_queue)) {
+ processor = (processor_t) queue_first(&action_queue);
+ queue_remove(&action_queue, processor, processor_t,
+ processor_queue);
+ simple_unlock(&action_lock);
+ (void) splx(s);
+
+ processor_doaction(processor);
+
+ s = splsched();
+ simple_lock(&action_lock);
+ }
+
+ assert_wait((event_t) &action_queue, FALSE);
+ simple_unlock(&action_lock);
+ (void) splx(s);
+ counter(c_action_thread_block++);
+ thread_block(action_thread_continue);
+ }
+}
+
+void action_thread()
+{
+ action_thread_continue();
+ /*NOTREACHED*/
+}
+
+/*
+ * processor_doaction actually does the shutdown. The trick here
+ * is to schedule ourselves onto a cpu and then save our
+ * context back into the runqs before taking out the cpu.
+ */
+#ifdef __GNUC__
+__volatile__
+#endif
+void processor_doshutdown(); /* forward */
+
+void processor_doaction(processor)
+register processor_t processor;
+{
+ thread_t this_thread;
+ spl_t s;
+ register processor_set_t pset;
+#if MACH_HOST
+ register processor_set_t new_pset;
+ register thread_t thread;
+ register thread_t prev_thread = THREAD_NULL;
+ boolean_t have_pset_ref = FALSE;
+#endif /* MACH_HOST */
+
+ /*
+ * Get onto the processor to shutdown
+ */
+ this_thread = current_thread();
+ thread_bind(this_thread, processor);
+ thread_block((void (*)()) 0);
+
+ pset = processor->processor_set;
+#if MACH_HOST
+ /*
+ * If this is the last processor in the processor_set,
+ * stop all the threads first.
+ */
+ pset_lock(pset);
+ if (pset->processor_count == 1) {
+ /*
+ * First suspend all of them.
+ */
+ queue_iterate(&pset->threads, thread, thread_t, pset_threads) {
+ thread_hold(thread);
+ }
+ pset->empty = TRUE;
+ /*
+ * Now actually stop them. Need a pset reference.
+ */
+ pset->ref_count++;
+ have_pset_ref = TRUE;
+
+Restart_thread:
+ prev_thread = THREAD_NULL;
+ queue_iterate(&pset->threads, thread, thread_t, pset_threads) {
+ thread_reference(thread);
+ pset_unlock(pset);
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread);
+
+ /*
+ * Only wait for threads still in the pset.
+ */
+ thread_freeze(thread);
+ if (thread->processor_set != pset) {
+ /*
+ * It got away - start over.
+ */
+ thread_unfreeze(thread);
+ thread_deallocate(thread);
+ pset_lock(pset);
+ goto Restart_thread;
+ }
+
+ (void) thread_dowait(thread, TRUE);
+ prev_thread = thread;
+ pset_lock(pset);
+ thread_unfreeze(prev_thread);
+ }
+ }
+ pset_unlock(pset);
+
+ /*
+ * At this point, it is ok to remove the processor from the pset.
+ * We can use processor->processor_set_next without locking the
+ * processor, since it cannot change while processor->state is
+ * PROCESSOR_ASSIGN or PROCESSOR_SHUTDOWN.
+ */
+
+ new_pset = processor->processor_set_next;
+
+Restart_pset:
+ if (new_pset) {
+ /*
+ * Reassigning processor.
+ */
+
+ if ((integer_t) pset < (integer_t) new_pset) {
+ pset_lock(pset);
+ pset_lock(new_pset);
+ }
+ else {
+ pset_lock(new_pset);
+ pset_lock(pset);
+ }
+ if (!(new_pset->active)) {
+ pset_unlock(new_pset);
+ pset_unlock(pset);
+ pset_deallocate(new_pset);
+ new_pset = &default_pset;
+ pset_reference(new_pset);
+ goto Restart_pset;
+ }
+
+ /*
+ * Handle remove last / assign first race.
+ * Only happens if there is more than one action thread.
+ */
+ while (new_pset->empty && new_pset->processor_count > 0) {
+ pset_unlock(new_pset);
+ pset_unlock(pset);
+ while (*(volatile boolean_t *)&new_pset->empty &&
+ *(volatile int *)&new_pset->processor_count > 0)
+ /* spin */;
+ goto Restart_pset;
+ }
+
+ /*
+ * Lock the processor. new_pset should not have changed.
+ */
+ s = splsched();
+ processor_lock(processor);
+ assert(processor->processor_set_next == new_pset);
+
+ /*
+ * Shutdown may have been requested while this assignment
+ * was in progress.
+ */
+ if (processor->state == PROCESSOR_SHUTDOWN) {
+ processor->processor_set_next = PROCESSOR_SET_NULL;
+ pset_unlock(new_pset);
+ goto shutdown; /* releases pset reference */
+ }
+
+ /*
+ * Do assignment, then wakeup anyone waiting for it.
+ */
+ pset_remove_processor(pset, processor);
+ pset_unlock(pset);
+
+ pset_add_processor(new_pset, processor);
+ if (new_pset->empty) {
+ /*
+ * Set all the threads loose.
+ *
+ * NOTE: this appears to violate the locking
+ * order, since the processor lock should
+ * be taken AFTER a thread lock. However,
+ * thread_setrun (called by thread_release)
+ * only takes the processor lock if the
+ * processor is idle. The processor is
+ * not idle here.
+ */
+ queue_iterate(&new_pset->threads, thread, thread_t,
+ pset_threads) {
+ thread_release(thread);
+ }
+ new_pset->empty = FALSE;
+ }
+ processor->processor_set_next = PROCESSOR_SET_NULL;
+ processor->state = PROCESSOR_RUNNING;
+ thread_wakeup((event_t)processor);
+ processor_unlock(processor);
+ splx(s);
+ pset_unlock(new_pset);
+
+ /*
+ * Clean up dangling references, and release our binding.
+ */
+ pset_deallocate(new_pset);
+ if (have_pset_ref)
+ pset_deallocate(pset);
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread);
+ thread_bind(this_thread, PROCESSOR_NULL);
+
+ thread_block((void (*)()) 0);
+ return;
+ }
+
+#endif /* MACH_HOST */
+
+ /*
+ * Do shutdown, make sure we live when processor dies.
+ */
+ if (processor->state != PROCESSOR_SHUTDOWN) {
+ printf("state: %d\n", processor->state);
+ panic("action_thread -- bad processor state");
+ }
+
+ s = splsched();
+ processor_lock(processor);
+
+ shutdown:
+ pset_remove_processor(pset, processor);
+ processor_unlock(processor);
+ pset_unlock(pset);
+ splx(s);
+
+ /*
+ * Clean up dangling references, and release our binding.
+ */
+#if MACH_HOST
+ if (new_pset != PROCESSOR_SET_NULL)
+ pset_deallocate(new_pset);
+ if (have_pset_ref)
+ pset_deallocate(pset);
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread);
+#endif /* MACH_HOST */
+
+ thread_bind(this_thread, PROCESSOR_NULL);
+ switch_to_shutdown_context(this_thread,
+ processor_doshutdown,
+ processor);
+
+}
+
+/*
+ * Actually do the processor shutdown. This is called at splsched,
+ * running on the processor's shutdown stack.
+ */
+
+#ifdef __GNUC__
+extern __volatile__ void halt_cpu();
+#endif
+
+#ifdef __GNUC__
+__volatile__
+#endif
+void processor_doshutdown(processor)
+register processor_t processor;
+{
+ register int cpu = processor->slot_num;
+
+ timer_switch(&kernel_timer[cpu]);
+
+ /*
+ * Ok, now exit this cpu.
+ */
+ PMAP_DEACTIVATE_KERNEL(cpu);
+#ifndef MIGRATING_THREADS
+ active_threads[cpu] = THREAD_NULL;
+#endif
+ cpu_down(cpu);
+ thread_wakeup((event_t)processor);
+ halt_cpu();
+ /*
+ * The action thread returns to life after the call to
+ * switch_to_shutdown_context above, on some other cpu.
+ */
+
+ /*NOTREACHED*/
+}
+#else /* NCPUS > 1 */
+
+kern_return_t
+processor_assign(processor, new_pset, wait)
+processor_t processor;
+processor_set_t new_pset;
+boolean_t wait;
+{
+#ifdef lint
+ processor++; new_pset++; wait++;
+#endif lint
+ return(KERN_FAILURE);
+}
+
+#endif /* NCPUS > 1 */
+
+kern_return_t
+host_get_boot_info(priv_host, boot_info)
+ host_t priv_host;
+ kernel_boot_info_t boot_info;
+{
+ char *src = "";
+
+ if (priv_host == HOST_NULL) {
+ return KERN_INVALID_HOST;
+ }
+
+#if NORMA_ETHER
+{
+ extern char *norma_ether_boot_info();
+ src = norma_ether_boot_info();
+}
+#endif /* NORMA_ETHER */
+#if defined(iPSC386) || defined(iPSC860)
+{
+ extern char *ipsc_boot_environ();
+ src = ipsc_boot_environ();
+}
+#endif /* defined(iPSC386) || defined(iPSC860) */
+
+ (void) strncpy(boot_info, src, KERNEL_BOOT_INFO_MAX);
+ return KERN_SUCCESS;
+}
diff --git a/kern/macro_help.h b/kern/macro_help.h
new file mode 100644
index 00000000..e13b01d9
--- /dev/null
+++ b/kern/macro_help.h
@@ -0,0 +1,55 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/macro_help.h
+ *
+ * Provide help in making lint-free macro routines
+ *
+ */
+
+#ifndef _KERN_MACRO_HELP_H_
+#define _KERN_MACRO_HELP_H_
+
+#if !defined(MACRO_BEGIN)
+
+#include <mach/boolean.h>
+
+#ifdef lint
+boolean_t NEVER;
+boolean_t ALWAYS;
+#else /* lint */
+#define NEVER FALSE
+#define ALWAYS TRUE
+#endif /* lint */
+
+#define MACRO_BEGIN do {
+#define MACRO_END } while (NEVER)
+
+#define MACRO_RETURN if (ALWAYS) return
+
+#endif /* !MACRO_BEGIN */
+
+#endif /* _KERN_MACRO_HELP_H_ */
diff --git a/kern/pc_sample.c b/kern/pc_sample.c
new file mode 100644
index 00000000..01b9acbd
--- /dev/null
+++ b/kern/pc_sample.c
@@ -0,0 +1,299 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+
+
+#include <mach_pcsample.h>
+
+#include <mach/mach_types.h> /* vm_address_t */
+#include <mach/std_types.h> /* pointer_t */
+#include <mach/pc_sample.h>
+#include <kern/host.h>
+#include <kern/thread.h>
+#include <kern/pc_sample.h>
+
+#if MACH_PCSAMPLE
+
+#define MAX_PC_SAMPLES 512
+
+typedef sampled_pc_t sampled_pcs[MAX_PC_SAMPLES];
+
+int pc_sampling_enabled = 0;
+decl_simple_lock_data(, pc_sampling_lock) /* lock for enabling */
+
+void take_pc_sample(
+ register thread_t t,
+ register sample_control_t *cp,
+ sampled_pc_flavor_t flavor)
+{
+ vm_offset_t pc;
+ struct sampled_pc *sample;
+
+ pc = interrupted_pc(t);
+ cp->seqno++;
+ sample = &((sampled_pc_t *)cp->buffer)[cp->seqno % MAX_PC_SAMPLES];
+ sample->id = (natural_t)t;
+ sample->pc = pc;
+ sample->sampletype = flavor;
+}
+
+kern_return_t
+thread_enable_pc_sampling(
+ thread_t thread,
+ int *tickp,
+ sampled_pc_flavor_t flavors)
+{
+ vm_offset_t buf;
+ extern int tick;
+
+ if (thread == THREAD_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+ if (thread->pc_sample.buffer == 0) {
+ buf = (vm_offset_t) kalloc(sizeof (sampled_pcs));
+ if (buf == 0) {
+ printf("thread_enable_pc_sampling: kalloc failed\n");
+ return KERN_INVALID_ARGUMENT;
+ }
+ thread->pc_sample.buffer = buf;
+ thread->pc_sample.seqno = 0;
+ }
+ *tickp = tick;
+ thread->pc_sample.sampletypes = flavors;
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+task_enable_pc_sampling(
+ task_t task,
+ int *tickp,
+ sampled_pc_flavor_t flavors)
+{
+ vm_offset_t buf;
+ extern int tick;
+
+ if (task == TASK_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+ if (task->pc_sample.buffer == 0) {
+ buf = (vm_offset_t) kalloc(sizeof (sampled_pcs));
+ if (buf == 0) {
+ printf("task_enable_pc_sampling: kalloc failed\n");
+ return KERN_INVALID_ARGUMENT;
+ }
+ task->pc_sample.buffer = buf;
+ task->pc_sample.seqno = 0;
+ }
+ *tickp = tick;
+ task->pc_sample.sampletypes = flavors;
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+thread_disable_pc_sampling(
+ thread_t thread,
+ int *samplecntp)
+{
+ vm_offset_t buf;
+
+ if (thread == THREAD_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+ if ((buf = thread->pc_sample.buffer) != 0)
+ kfree(buf, sizeof (sampled_pcs));
+ thread->pc_sample.buffer = (vm_offset_t) 0;
+ thread->pc_sample.seqno = 0;
+ thread->pc_sample.sampletypes = 0; /* shut off sampling */
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+task_disable_pc_sampling(
+ task_t task,
+ int *samplecntp)
+{
+ vm_offset_t buf;
+
+ if (task == TASK_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+ if ((buf = task->pc_sample.buffer) != 0)
+ kfree(buf, sizeof (sampled_pcs));
+ task->pc_sample.buffer = (vm_offset_t) 0;
+ task->pc_sample.seqno = 0;
+ task->pc_sample.sampletypes = 0; /* shut off sampling */
+
+ return KERN_SUCCESS;
+}
+
+static kern_return_t
+get_sampled_pcs(
+ sample_control_t *cp,
+ sampled_pc_seqno_t *seqnop,
+ sampled_pc_array_t sampled_pcs_out,
+ int *sampled_pcs_cntp)
+{
+ int nsamples;
+ sampled_pc_seqno_t seqidx1, seqidx2;
+
+ nsamples = cp->seqno - *seqnop;
+ seqidx1 = *seqnop % MAX_PC_SAMPLES; /* index of *seqnop */
+ seqidx2 = cp->seqno % MAX_PC_SAMPLES; /* index of cp->seqno */
+
+ if (nsamples > MAX_PC_SAMPLES) {
+ nsamples = MAX_PC_SAMPLES;
+ seqidx1 = (seqidx2 + 1) % MAX_PC_SAMPLES;
+ }
+
+ if (nsamples > 0) {
+ /*
+ * Carefully copy sampled_pcs into sampled_pcs_msgbuf IN ORDER.
+ */
+ if (seqidx1 < seqidx2) {
+ /*
+ * Simple case: no wraparound.
+ * Copy from seqidx1 to seqidx2.
+ */
+ bcopy((sampled_pc_array_t)cp->buffer + seqidx1 + 1,
+ sampled_pcs_out,
+ nsamples * sizeof(sampled_pc_t));
+ } else {
+ /* seqidx1 > seqidx2 -- Handle wraparound. */
+
+ bcopy((sampled_pc_array_t)cp->buffer + seqidx1 + 1,
+ sampled_pcs_out,
+ (MAX_PC_SAMPLES - seqidx1 - 1) * sizeof(sampled_pc_t));
+
+ bcopy((sampled_pc_array_t)cp->buffer,
+ sampled_pcs_out + (MAX_PC_SAMPLES - seqidx1 - 1),
+ (seqidx2 + 1) * sizeof(sampled_pc_t));
+ }
+ } else {
+ /* could either be zero because of overflow, or because
+ * we are being lied to. In either case, return nothing.
+ * If overflow, only once in a blue moon. If being lied to,
+ * then we have no obligation to return anything useful anyway.
+ */
+ ;
+ }
+
+ *sampled_pcs_cntp = nsamples;
+ *seqnop = cp->seqno;
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+thread_get_sampled_pcs(
+ thread_t thread,
+ sampled_pc_seqno_t *seqnop,
+ sampled_pc_array_t sampled_pcs_out,
+ int *sampled_pcs_cntp)
+{
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (thread->pc_sample.buffer == 0)
+ return KERN_FAILURE;
+
+ return get_sampled_pcs(&thread->pc_sample, seqnop, sampled_pcs_out,
+ sampled_pcs_cntp);
+}
+
+kern_return_t
+task_get_sampled_pcs(
+ task_t task,
+ sampled_pc_seqno_t *seqnop,
+ sampled_pc_array_t sampled_pcs_out,
+ int *sampled_pcs_cntp)
+{
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (task->pc_sample.buffer == 0)
+ return KERN_FAILURE;
+
+ return get_sampled_pcs(&task->pc_sample, seqnop, sampled_pcs_out,
+ sampled_pcs_cntp);
+}
+
+#else /* MACH_PCSAMPLE */
+
+kern_return_t
+thread_enable_pc_sampling(
+ thread_t thread,
+ int *tickp,
+ sampled_pc_flavor_t flavors)
+{
+ return KERN_FAILURE; /* not implemented */
+}
+
+kern_return_t
+task_enable_pc_sampling(
+ task_t task,
+ int *tickp,
+ sampled_pc_flavor_t flavors)
+{
+ return KERN_FAILURE; /* not implemented */
+}
+
+kern_return_t
+thread_disable_pc_sampling(
+ thread_t thread,
+ int *samplecntp)
+{
+ return KERN_FAILURE; /* not implemented */
+}
+
+kern_return_t
+task_disable_pc_sampling(
+ task_t task,
+ int *samplecntp)
+{
+ return KERN_FAILURE; /* not implemented */
+}
+
+kern_return_t
+thread_get_sampled_pcs(
+ thread_t thread,
+ sampled_pc_seqno_t *seqnop,
+ sampled_pc_array_t sampled_pcs_out,
+ int *sampled_pcs_cntp)
+{
+ return KERN_FAILURE; /* not implemented */
+}
+
+kern_return_t
+task_get_sampled_pcs(
+ task_t task,
+ sampled_pc_seqno_t *seqnop,
+ sampled_pc_array_t sampled_pcs_out,
+ int *sampled_pcs_cntp)
+{
+ return KERN_FAILURE; /* not implemented */
+}
+
+#endif /* MACH_PCSAMPLE */
diff --git a/kern/pc_sample.h b/kern/pc_sample.h
new file mode 100644
index 00000000..02891e0f
--- /dev/null
+++ b/kern/pc_sample.h
@@ -0,0 +1,90 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: pc_sample.h,v $
+ * Revision 1.1.1.1 1996/10/30 01:38:13 thomas
+ * Imported from UK22
+ *
+ * Revision 1.1 1994/11/02 02:24:15 law
+ * Initial revision
+ *
+ * Revision 2.2 93/11/17 19:06:01 dbg
+ * Moved kernel internal definitions here from mach/pc_sample.h.
+ * [93/09/24 dbg]
+ *
+ */
+
+/*
+ * Kernel definitions for PC sampling.
+ */
+#ifndef _KERN_PC_SAMPLE_H_
+#define _KERN_PC_SAMPLE_H_
+
+#include <mach/pc_sample.h>
+#include <mach/machine/vm_types.h>
+#include <kern/kern_types.h>
+#include <kern/macro_help.h>
+
+/*
+ * Control structure for sampling, included in
+ * threads and tasks. If sampletypes is 0, no
+ * sampling is done.
+ */
+
+struct sample_control {
+ vm_offset_t buffer;
+ unsigned int seqno;
+ sampled_pc_flavor_t sampletypes;
+};
+
+typedef struct sample_control sample_control_t;
+
+/*
+ * Routines to take PC samples.
+ */
+extern void take_pc_sample(
+ thread_t thread,
+ sample_control_t *cp,
+ sampled_pc_flavor_t flavor);
+
+/*
+ * Macro to do quick flavor check for sampling,
+ * on both threads and tasks.
+ */
+#define take_pc_sample_macro(thread, flavor) \
+ MACRO_BEGIN \
+ task_t task; \
+ \
+ if ((thread)->pc_sample.sampletypes & (flavor)) \
+ take_pc_sample((thread), &(thread)->pc_sample, (flavor)); \
+ \
+ task = (thread)->task; \
+ if (task->pc_sample.sampletypes & (flavor)) \
+ take_pc_sample((thread), &task->pc_sample, (flavor)); \
+ MACRO_END
+
+#endif /* _KERN_PC_SAMPLE_H_ */
diff --git a/kern/printf.c b/kern/printf.c
new file mode 100644
index 00000000..693c6605
--- /dev/null
+++ b/kern/printf.c
@@ -0,0 +1,637 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Common code for printf et al.
+ *
+ * The calling routine typically takes a variable number of arguments,
+ * and passes the address of the first one. This implementation
+ * assumes a straightforward, stack implementation, aligned to the
+ * machine's wordsize. Increasing addresses are assumed to point to
+ * successive arguments (left-to-right), as is the case for a machine
+ * with a downward-growing stack with arguments pushed right-to-left.
+ *
+ * To write, for example, fprintf() using this routine, the code
+ *
+ * fprintf(fd, format, args)
+ * FILE *fd;
+ * char *format;
+ * {
+ * _doprnt(format, &args, fd);
+ * }
+ *
+ * would suffice. (This example does not handle the fprintf's "return
+ * value" correctly, but who looks at the return value of fprintf
+ * anyway?)
+ *
+ * This version implements the following printf features:
+ *
+ * %d decimal conversion
+ * %u unsigned conversion
+ * %x hexadecimal conversion
+ * %X hexadecimal conversion with capital letters
+ * %o octal conversion
+ * %c character
+ * %s string
+ * %m.n field width, precision
+ * %-m.n left adjustment
+ * %0m.n zero-padding
+ * %*.* width and precision taken from arguments
+ *
+ * This version does not implement %f, %e, or %g. It accepts, but
+ * ignores, an `l' as in %ld, %lo, %lx, and %lu, and therefore will not
+ * work correctly on machines for which sizeof(long) != sizeof(int).
+ * It does not even parse %D, %O, or %U; you should be using %ld, %o and
+ * %lu if you mean long conversion.
+ *
+ * As mentioned, this version does not return any reasonable value.
+ *
+ * Permission is granted to use, modify, or propagate this code as
+ * long as this notice is incorporated.
+ *
+ * Steve Summit 3/25/87
+ */
+
+/*
+ * Added formats for decoding device registers:
+ *
+ * printf("reg = %b", regval, "<base><arg>*")
+ *
+ * where <base> is the output base expressed as a control character:
+ * i.e. '\10' gives octal, '\20' gives hex. Each <arg> is a sequence of
+ * characters, the first of which gives the bit number to be inspected
+ * (origin 1), and the rest (up to a control character (<= 32)) give the
+ * name of the register. Thus
+ * printf("reg = %b\n", 3, "\10\2BITTWO\1BITONE")
+ * would produce
+ * reg = 3<BITTWO,BITONE>
+ *
+ * If the second character in <arg> is also a control character, it
+ * indicates the last bit of a bit field. In this case, printf will extract
+ * bits <1> to <2> and print it. Characters following the second control
+ * character are printed before the bit field.
+ * printf("reg = %b\n", 0xb, "\10\4\3FIELD1=\2BITTWO\1BITONE")
+ * would produce
+ * reg = b<FIELD1=2,BITONE>
+ */
+/*
+ * Added for general use:
+ * # prefix for alternate format:
+ * 0x (0X) for hex
+ * leading 0 for octal
+ * + print '+' if positive
+ * blank print ' ' if positive
+ *
+ * z signed hexadecimal
+ * r signed, 'radix'
+ * n unsigned, 'radix'
+ *
+ * D,U,O,Z same as corresponding lower-case versions
+ * (compatibility)
+ */
+
+#include <mach/boolean.h>
+#include <kern/lock.h>
+#include <kern/strings.h>
+#include <sys/varargs.h>
+
+#define isdigit(d) ((d) >= '0' && (d) <= '9')
+#define Ctod(c) ((c) - '0')
+
+#define MAXBUF (sizeof(long int) * 8) /* enough for binary */
+
+
+void printnum(
+ register unsigned long u,
+ register int base,
+ void (*putc)( char, vm_offset_t ),
+ vm_offset_t putc_arg)
+{
+ char buf[MAXBUF]; /* build number here */
+ register char * p = &buf[MAXBUF-1];
+ static char digs[] = "0123456789abcdef";
+
+ do {
+ *p-- = digs[u % base];
+ u /= base;
+ } while (u != 0);
+
+ while (++p != &buf[MAXBUF])
+ (*putc)(*p, putc_arg);
+
+}
+
+boolean_t _doprnt_truncates = FALSE;
+
+/* printf could be called at _any_ point during system initialization,
+ including before printf_init() gets called from the "normal" place
+ in kern/startup.c. */
+boolean_t _doprnt_lock_initialized = FALSE;
+decl_simple_lock_data(,_doprnt_lock)
+
+void printf_init()
+{
+ if (!_doprnt_lock_initialized)
+ {
+ _doprnt_lock_initialized = TRUE;
+ simple_lock_init(&_doprnt_lock);
+ }
+}
+
+void _doprnt(
+ register char *fmt,
+ va_list *argp,
+ /* character output routine */
+ void (*putc)( char, vm_offset_t),
+ int radix, /* default radix - for '%r' */
+ vm_offset_t putc_arg)
+{
+ int length;
+ int prec;
+ boolean_t ladjust;
+ char padc;
+ long n;
+ unsigned long u;
+ int plus_sign;
+ int sign_char;
+ boolean_t altfmt, truncate;
+ int base;
+ register char c;
+
+ printf_init();
+
+#if 0
+ /* Make sure that we get *some* printout, no matter what */
+ simple_lock(&_doprnt_lock);
+#else
+ {
+ register int i = 0;
+ while (i < 1*1024*1024) {
+ if (simple_lock_try(&_doprnt_lock))
+ break;
+ i++;
+ }
+ }
+#endif
+
+ while ((c = *fmt) != '\0') {
+ if (c != '%') {
+ (*putc)(c, putc_arg);
+ fmt++;
+ continue;
+ }
+
+ fmt++;
+
+ length = 0;
+ prec = -1;
+ ladjust = FALSE;
+ padc = ' ';
+ plus_sign = 0;
+ sign_char = 0;
+ altfmt = FALSE;
+
+ while (TRUE) {
+ c = *fmt;
+ if (c == '#') {
+ altfmt = TRUE;
+ }
+ else if (c == '-') {
+ ladjust = TRUE;
+ }
+ else if (c == '+') {
+ plus_sign = '+';
+ }
+ else if (c == ' ') {
+ if (plus_sign == 0)
+ plus_sign = ' ';
+ }
+ else
+ break;
+ fmt++;
+ }
+
+ if (c == '0') {
+ padc = '0';
+ c = *++fmt;
+ }
+
+ if (isdigit(c)) {
+ while(isdigit(c)) {
+ length = 10 * length + Ctod(c);
+ c = *++fmt;
+ }
+ }
+ else if (c == '*') {
+ length = va_arg(*argp, int);
+ c = *++fmt;
+ if (length < 0) {
+ ladjust = !ladjust;
+ length = -length;
+ }
+ }
+
+ if (c == '.') {
+ c = *++fmt;
+ if (isdigit(c)) {
+ prec = 0;
+ while(isdigit(c)) {
+ prec = 10 * prec + Ctod(c);
+ c = *++fmt;
+ }
+ }
+ else if (c == '*') {
+ prec = va_arg(*argp, int);
+ c = *++fmt;
+ }
+ }
+
+ if (c == 'l')
+ c = *++fmt; /* need it if sizeof(int) < sizeof(long) */
+
+ truncate = FALSE;
+
+ switch(c) {
+ case 'b':
+ case 'B':
+ {
+ register char *p;
+ boolean_t any;
+ register int i;
+
+ u = va_arg(*argp, unsigned long);
+ p = va_arg(*argp, char *);
+ base = *p++;
+ printnum(u, base, putc, putc_arg);
+
+ if (u == 0)
+ break;
+
+ any = FALSE;
+ while (i = *p++) {
+ /* NOTE: The '32' here is because ascii space */
+ if (*p <= 32) {
+ /*
+ * Bit field
+ */
+ register int j;
+ if (any)
+ (*putc)(',', putc_arg);
+ else {
+ (*putc)('<', putc_arg);
+ any = TRUE;
+ }
+ j = *p++;
+ for (; (c = *p) > 32; p++)
+ (*putc)(c, putc_arg);
+ printnum((unsigned)( (u>>(j-1)) & ((2<<(i-j))-1)),
+ base, putc, putc_arg);
+ }
+ else if (u & (1<<(i-1))) {
+ if (any)
+ (*putc)(',', putc_arg);
+ else {
+ (*putc)('<', putc_arg);
+ any = TRUE;
+ }
+ for (; (c = *p) > 32; p++)
+ (*putc)(c, putc_arg);
+ }
+ else {
+ for (; *p > 32; p++)
+ continue;
+ }
+ }
+ if (any)
+ (*putc)('>', putc_arg);
+ break;
+ }
+
+ case 'c':
+ c = va_arg(*argp, int);
+ (*putc)(c, putc_arg);
+ break;
+
+ case 's':
+ {
+ register char *p;
+ register char *p2;
+
+ if (prec == -1)
+ prec = 0x7fffffff; /* MAXINT */
+
+ p = va_arg(*argp, char *);
+
+ if (p == (char *)0)
+ p = "";
+
+ if (length > 0 && !ladjust) {
+ n = 0;
+ p2 = p;
+
+ for (; *p != '\0' && n < prec; p++)
+ n++;
+
+ p = p2;
+
+ while (n < length) {
+ (*putc)(' ', putc_arg);
+ n++;
+ }
+ }
+
+ n = 0;
+
+ while (*p != '\0') {
+ if (++n > prec)
+ break;
+
+ (*putc)(*p++, putc_arg);
+ }
+
+ if (n < length && ladjust) {
+ while (n < length) {
+ (*putc)(' ', putc_arg);
+ n++;
+ }
+ }
+
+ break;
+ }
+
+ case 'o':
+ truncate = _doprnt_truncates;
+ case 'O':
+ base = 8;
+ goto print_unsigned;
+
+ case 'd':
+ truncate = _doprnt_truncates;
+ case 'D':
+ base = 10;
+ goto print_signed;
+
+ case 'u':
+ truncate = _doprnt_truncates;
+ case 'U':
+ base = 10;
+ goto print_unsigned;
+
+ case 'x':
+ truncate = _doprnt_truncates;
+ case 'X':
+ base = 16;
+ goto print_unsigned;
+
+ case 'z':
+ truncate = _doprnt_truncates;
+ case 'Z':
+ base = 16;
+ goto print_signed;
+
+ case 'r':
+ truncate = _doprnt_truncates;
+ case 'R':
+ base = radix;
+ goto print_signed;
+
+ case 'n':
+ truncate = _doprnt_truncates;
+ case 'N':
+ base = radix;
+ goto print_unsigned;
+
+ print_signed:
+ n = va_arg(*argp, long);
+ if (n >= 0) {
+ u = n;
+ sign_char = plus_sign;
+ }
+ else {
+ u = -n;
+ sign_char = '-';
+ }
+ goto print_num;
+
+ print_unsigned:
+ u = va_arg(*argp, unsigned long);
+ goto print_num;
+
+ print_num:
+ {
+ char buf[MAXBUF]; /* build number here */
+ register char * p = &buf[MAXBUF-1];
+ static char digits[] = "0123456789abcdef";
+ char *prefix = 0;
+
+ if (truncate) u = (long)((int)(u));
+
+ if (u != 0 && altfmt) {
+ if (base == 8)
+ prefix = "0";
+ else if (base == 16)
+ prefix = "0x";
+ }
+
+ do {
+ *p-- = digits[u % base];
+ u /= base;
+ } while (u != 0);
+
+ length -= (&buf[MAXBUF-1] - p);
+ if (sign_char)
+ length--;
+ if (prefix)
+ length -= strlen(prefix);
+
+ if (padc == ' ' && !ladjust) {
+ /* blank padding goes before prefix */
+ while (--length >= 0)
+ (*putc)(' ', putc_arg);
+ }
+ if (sign_char)
+ (*putc)(sign_char, putc_arg);
+ if (prefix)
+ while (*prefix)
+ (*putc)(*prefix++, putc_arg);
+ if (padc == '0') {
+ /* zero padding goes after sign and prefix */
+ while (--length >= 0)
+ (*putc)('0', putc_arg);
+ }
+ while (++p != &buf[MAXBUF])
+ (*putc)(*p, putc_arg);
+
+ if (ladjust) {
+ while (--length >= 0)
+ (*putc)(' ', putc_arg);
+ }
+ break;
+ }
+
+ case '\0':
+ fmt--;
+ break;
+
+ default:
+ (*putc)(c, putc_arg);
+ }
+ fmt++;
+ }
+
+ simple_unlock(&_doprnt_lock);
+}
+
+/*
+ * Printing (to console)
+ */
+extern void cnputc( char, /*not really*/vm_offset_t);
+
+void vprintf(fmt, listp)
+ char * fmt;
+ va_list listp;
+{
+ _doprnt(fmt, &listp, cnputc, 16, 0);
+}
+
+/*VARARGS1*/
+void printf(fmt, va_alist)
+ char * fmt;
+ va_dcl
+{
+ va_list listp;
+ va_start(listp);
+ vprintf(fmt, listp);
+ va_end(listp);
+}
+
+int indent = 0;
+
+/*
+ * Printing (to console) with indentation.
+ */
+/*VARARGS1*/
+void iprintf(fmt, va_alist)
+ char * fmt;
+ va_dcl
+{
+ va_list listp;
+ register int i;
+
+ for (i = indent; i > 0; ){
+ if (i >= 8) {
+ printf("\t");
+ i -= 8;
+ }
+ else {
+ printf(" ");
+ i--;
+ }
+ }
+ va_start(listp);
+ _doprnt(fmt, &listp, cnputc, 16, 0);
+ va_end(listp);
+}
+
+/*
+ * Printing to generic buffer
+ * Returns #bytes printed.
+ * Strings are zero-terminated.
+ */
+static void
+sputc(
+ char c,
+ vm_offset_t arg)
+{
+ register char **bufp = (char **) arg;
+ register char *p = *bufp;
+ *p++ = c;
+ *bufp = p;
+}
+
+int
+sprintf( buf, fmt, va_alist)
+ char *buf;
+ char *fmt;
+ va_dcl
+{
+ va_list listp;
+ char *start = buf;
+
+ va_start(listp);
+ _doprnt(fmt, &listp, sputc, 16, (vm_offset_t)&buf);
+ va_end(listp);
+
+ *buf = 0;
+ return (buf - start);
+}
+
+
+void safe_gets(str, maxlen)
+ char *str;
+ int maxlen;
+{
+ register char *lp;
+ register int c;
+ char *strmax = str + maxlen - 1; /* allow space for trailing 0 */
+
+ lp = str;
+ for (;;) {
+ c = cngetc();
+ switch (c) {
+ case '\n':
+ case '\r':
+ printf("\n");
+ *lp++ = 0;
+ return;
+
+ case '\b':
+ case '#':
+ case '\177':
+ if (lp > str) {
+ printf("\b \b");
+ lp--;
+ }
+ continue;
+
+ case '@':
+ case 'u'&037:
+ lp = str;
+ printf("\n\r");
+ continue;
+
+ default:
+ if (c >= ' ' && c < '\177') {
+ if (lp < strmax) {
+ *lp++ = c;
+ printf("%c", c);
+ }
+ else {
+ printf("%c", '\007'); /* beep */
+ }
+ }
+ }
+ }
+}
diff --git a/kern/priority.c b/kern/priority.c
new file mode 100644
index 00000000..f9a40912
--- /dev/null
+++ b/kern/priority.c
@@ -0,0 +1,225 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: clock_prim.c
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1986
+ *
+ * Clock primitives.
+ */
+
+#include <cpus.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/machine.h>
+#include <kern/host.h>
+#include <kern/mach_param.h>
+#include <kern/sched.h>
+#include <kern/thread.h>
+#include <kern/processor.h>
+#include <kern/timer.h>
+#include <kern/time_out.h>
+#include <kern/time_stamp.h>
+#include <machine/machspl.h>
+
+
+
+/*
+ * USAGE_THRESHOLD is the amount by which usage must change to
+ * cause a priority shift that moves a thread between run queues.
+ */
+
+#ifdef PRI_SHIFT_2
+#if PRI_SHIFT_2 > 0
+#define USAGE_THRESHOLD (((1 << PRI_SHIFT) + (1 << PRI_SHIFT_2)) << (2 + SCHED_SHIFT))
+#else /* PRI_SHIFT_2 > 0 */
+#define USAGE_THRESHOLD (((1 << PRI_SHIFT) - (1 << -(PRI_SHIFT_2))) << (2 + SCHED_SHIFT))
+#endif /* PRI_SHIFT_2 > 0 */
+#else /* PRI_SHIFT_2 */
+#define USAGE_THRESHOLD (1 << (PRI_SHIFT + 2 + SCHED_SHIFT))
+#endif /* PRI_SHIFT_2 */
+
+/*
+ * thread_quantum_update:
+ *
+ * Recalculate the quantum and priority for a thread.
+ * The number of ticks that has elapsed since we were last called
+ * is passed as "nticks."
+ *
+ * Called only from clock_interrupt().
+ */
+
+void thread_quantum_update(mycpu, thread, nticks, state)
+ register int mycpu;
+ register thread_t thread;
+ int nticks;
+ int state;
+{
+ register int quantum;
+ register processor_t myprocessor;
+#if NCPUS > 1
+ register processor_set_t pset;
+#endif
+ spl_t s;
+
+ myprocessor = cpu_to_processor(mycpu);
+#if NCPUS > 1
+ pset = myprocessor->processor_set;
+ if (pset == 0) {
+ /*
+ * Processor is being reassigned.
+ * Should rewrite processor assignment code to
+ * block clock interrupts.
+ */
+ return;
+ }
+#endif /* NCPUS > 1 */
+
+ /*
+ * Account for thread's utilization of these ticks.
+ * This assumes that there is *always* a current thread.
+ * When the processor is idle, it should be the idle thread.
+ */
+
+ /*
+ * Update set_quantum and calculate the current quantum.
+ */
+#if NCPUS > 1
+ pset->set_quantum = pset->machine_quantum[
+ ((pset->runq.count > pset->processor_count) ?
+ pset->processor_count : pset->runq.count)];
+
+ if (myprocessor->runq.count != 0)
+ quantum = min_quantum;
+ else
+ quantum = pset->set_quantum;
+#else /* NCPUS > 1 */
+ quantum = min_quantum;
+ default_pset.set_quantum = quantum;
+#endif /* NCPUS > 1 */
+
+ /*
+ * Now recompute the priority of the thread if appropriate.
+ */
+
+ if (state != CPU_STATE_IDLE) {
+ myprocessor->quantum -= nticks;
+#if NCPUS > 1
+ /*
+ * Runtime quantum adjustment. Use quantum_adj_index
+ * to avoid synchronizing quantum expirations.
+ */
+ if ((quantum != myprocessor->last_quantum) &&
+ (pset->processor_count > 1)) {
+ myprocessor->last_quantum = quantum;
+ simple_lock(&pset->quantum_adj_lock);
+ quantum = min_quantum + (pset->quantum_adj_index *
+ (quantum - min_quantum)) /
+ (pset->processor_count - 1);
+ if (++(pset->quantum_adj_index) >=
+ pset->processor_count)
+ pset->quantum_adj_index = 0;
+ simple_unlock(&pset->quantum_adj_lock);
+ }
+#endif /* NCPUS > 1 */
+ if (myprocessor->quantum <= 0) {
+ s = splsched();
+ thread_lock(thread);
+ if (thread->sched_stamp != sched_tick) {
+ update_priority(thread);
+ }
+ else {
+ if (
+#if MACH_FIXPRI
+ (thread->policy == POLICY_TIMESHARE) &&
+#endif /* MACH_FIXPRI */
+ (thread->depress_priority < 0)) {
+ thread_timer_delta(thread);
+ thread->sched_usage +=
+ thread->sched_delta;
+ thread->sched_delta = 0;
+ compute_my_priority(thread);
+ }
+ }
+ thread_unlock(thread);
+ (void) splx(s);
+ /*
+ * This quantum is up, give this thread another.
+ */
+ myprocessor->first_quantum = FALSE;
+#if MACH_FIXPRI
+ if (thread->policy == POLICY_TIMESHARE) {
+#endif /* MACH_FIXPRI */
+ myprocessor->quantum += quantum;
+#if MACH_FIXPRI
+ }
+ else {
+ /*
+ * Fixed priority has per-thread quantum.
+ *
+ */
+ myprocessor->quantum += thread->sched_data;
+ }
+#endif /* MACH_FIXPRI */
+ }
+ /*
+ * Recompute priority if appropriate.
+ */
+ else {
+ s = splsched();
+ thread_lock(thread);
+ if (thread->sched_stamp != sched_tick) {
+ update_priority(thread);
+ }
+ else {
+ if (
+#if MACH_FIXPRI
+ (thread->policy == POLICY_TIMESHARE) &&
+#endif /* MACH_FIXPRI */
+ (thread->depress_priority < 0)) {
+ thread_timer_delta(thread);
+ if (thread->sched_delta >= USAGE_THRESHOLD) {
+ thread->sched_usage +=
+ thread->sched_delta;
+ thread->sched_delta = 0;
+ compute_my_priority(thread);
+ }
+ }
+ }
+ thread_unlock(thread);
+ (void) splx(s);
+ }
+ /*
+ * Check for and schedule ast if needed.
+ */
+ ast_check();
+ }
+}
+
diff --git a/kern/processor.c b/kern/processor.c
new file mode 100644
index 00000000..ad788a92
--- /dev/null
+++ b/kern/processor.c
@@ -0,0 +1,1039 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * processor.c: processor and processor_set manipulation routines.
+ */
+
+#include <cpus.h>
+#include <mach_fixpri.h>
+#include <mach_host.h>
+
+#include <mach/boolean.h>
+#include <mach/policy.h>
+#include <mach/processor_info.h>
+#include <mach/vm_param.h>
+#include <kern/cpu_number.h>
+#include <kern/lock.h>
+#include <kern/host.h>
+#include <kern/processor.h>
+#include <kern/sched.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/ipc_host.h>
+#include <ipc/ipc_port.h>
+
+#if MACH_HOST
+#include <kern/zalloc.h>
+zone_t pset_zone;
+#endif /* MACH_HOST */
+
+
+/*
+ * Exported variables.
+ */
+struct processor_set default_pset;
+struct processor processor_array[NCPUS];
+
+queue_head_t all_psets;
+int all_psets_count;
+decl_simple_lock_data(, all_psets_lock);
+
+processor_t master_processor;
+processor_t processor_ptr[NCPUS];
+
+/*
+ * Forward declarations.
+ */
+void quantum_set(processor_set_t);
+void pset_init(processor_set_t);
+void processor_init(processor_t, int);
+
+/*
+ * Bootstrap the processor/pset system so the scheduler can run.
+ */
+void pset_sys_bootstrap(void)
+{
+ register int i;
+
+ pset_init(&default_pset);
+ default_pset.empty = FALSE;
+ for (i = 0; i < NCPUS; i++) {
+ /*
+ * Initialize processor data structures.
+ * Note that cpu_to_processor(i) is processor_ptr[i].
+ */
+ processor_ptr[i] = &processor_array[i];
+ processor_init(processor_ptr[i], i);
+ }
+ master_processor = cpu_to_processor(master_cpu);
+ queue_init(&all_psets);
+ simple_lock_init(&all_psets_lock);
+ queue_enter(&all_psets, &default_pset, processor_set_t, all_psets);
+ all_psets_count = 1;
+ default_pset.active = TRUE;
+ default_pset.empty = FALSE;
+
+ /*
+ * Note: the default_pset has a max_priority of BASEPRI_USER.
+ * Internal kernel threads override this in kernel_thread.
+ */
+}
+
+#if MACH_HOST
+/*
+ * Rest of pset system initializations.
+ */
+void pset_sys_init(void)
+{
+ register int i;
+ register processor_t processor;
+
+ /*
+ * Allocate the zone for processor sets.
+ */
+ pset_zone = zinit(sizeof(struct processor_set), 128*PAGE_SIZE,
+ PAGE_SIZE, 0, "processor sets");
+
+ /*
+ * Give each processor a control port.
+ * The master processor already has one.
+ */
+ for (i = 0; i < NCPUS; i++) {
+ processor = cpu_to_processor(i);
+ if (processor != master_processor &&
+ machine_slot[i].is_cpu)
+ {
+ ipc_processor_init(processor);
+ }
+ }
+}
+#endif /* MACH_HOST */
+
+/*
+ * Initialize the given processor_set structure.
+ */
+
+void pset_init(
+ register processor_set_t pset)
+{
+ int i;
+
+ simple_lock_init(&pset->runq.lock);
+ pset->runq.low = 0;
+ pset->runq.count = 0;
+ for (i = 0; i < NRQS; i++) {
+ queue_init(&(pset->runq.runq[i]));
+ }
+ queue_init(&pset->idle_queue);
+ pset->idle_count = 0;
+ simple_lock_init(&pset->idle_lock);
+ queue_init(&pset->processors);
+ pset->processor_count = 0;
+ pset->empty = TRUE;
+ queue_init(&pset->tasks);
+ pset->task_count = 0;
+ queue_init(&pset->threads);
+ pset->thread_count = 0;
+ pset->ref_count = 1;
+ simple_lock_init(&pset->ref_lock);
+ queue_init(&pset->all_psets);
+ pset->active = FALSE;
+ simple_lock_init(&pset->lock);
+ pset->pset_self = IP_NULL;
+ pset->pset_name_self = IP_NULL;
+ pset->max_priority = BASEPRI_USER;
+#if MACH_FIXPRI
+ pset->policies = POLICY_TIMESHARE;
+#endif /* MACH_FIXPRI */
+ pset->set_quantum = min_quantum;
+#if NCPUS > 1
+ pset->quantum_adj_index = 0;
+ simple_lock_init(&pset->quantum_adj_lock);
+
+ for (i = 0; i <= NCPUS; i++) {
+ pset->machine_quantum[i] = min_quantum;
+ }
+#endif /* NCPUS > 1 */
+ pset->mach_factor = 0;
+ pset->load_average = 0;
+ pset->sched_load = SCHED_SCALE; /* i.e. 1 */
+}
+
+/*
+ * Initialize the given processor structure for the processor in
+ * the slot specified by slot_num.
+ */
+
+void processor_init(
+ register processor_t pr,
+ int slot_num)
+{
+ int i;
+
+ simple_lock_init(&pr->runq.lock);
+ pr->runq.low = 0;
+ pr->runq.count = 0;
+ for (i = 0; i < NRQS; i++) {
+ queue_init(&(pr->runq.runq[i]));
+ }
+ queue_init(&pr->processor_queue);
+ pr->state = PROCESSOR_OFF_LINE;
+ pr->next_thread = THREAD_NULL;
+ pr->idle_thread = THREAD_NULL;
+ pr->quantum = 0;
+ pr->first_quantum = FALSE;
+ pr->last_quantum = 0;
+ pr->processor_set = PROCESSOR_SET_NULL;
+ pr->processor_set_next = PROCESSOR_SET_NULL;
+ queue_init(&pr->processors);
+ simple_lock_init(&pr->lock);
+ pr->processor_self = IP_NULL;
+ pr->slot_num = slot_num;
+}
+
+/*
+ * pset_remove_processor() removes a processor from a processor_set.
+ * It can only be called on the current processor. Caller must
+ * hold lock on current processor and processor set.
+ */
+
+void pset_remove_processor(
+ processor_set_t pset,
+ processor_t processor)
+{
+ if (pset != processor->processor_set)
+ panic("pset_remove_processor: wrong pset");
+
+ queue_remove(&pset->processors, processor, processor_t, processors);
+ processor->processor_set = PROCESSOR_SET_NULL;
+ pset->processor_count--;
+ quantum_set(pset);
+}
+
+/*
+ * pset_add_processor() adds a processor to a processor_set.
+ * It can only be called on the current processor. Caller must
+ * hold lock on curent processor and on pset. No reference counting on
+ * processors. Processor reference to pset is implicit.
+ */
+
+void pset_add_processor(
+ processor_set_t pset,
+ processor_t processor)
+{
+ queue_enter(&pset->processors, processor, processor_t, processors);
+ processor->processor_set = pset;
+ pset->processor_count++;
+ quantum_set(pset);
+}
+
+/*
+ * pset_remove_task() removes a task from a processor_set.
+ * Caller must hold locks on pset and task. Pset reference count
+ * is not decremented; caller must explicitly pset_deallocate.
+ */
+
+void pset_remove_task(
+ processor_set_t pset,
+ task_t task)
+{
+ if (pset != task->processor_set)
+ return;
+
+ queue_remove(&pset->tasks, task, task_t, pset_tasks);
+ task->processor_set = PROCESSOR_SET_NULL;
+ pset->task_count--;
+}
+
+/*
+ * pset_add_task() adds a task to a processor_set.
+ * Caller must hold locks on pset and task. Pset references to
+ * tasks are implicit.
+ */
+
+void pset_add_task(
+ processor_set_t pset,
+ task_t task)
+{
+ queue_enter(&pset->tasks, task, task_t, pset_tasks);
+ task->processor_set = pset;
+ pset->task_count++;
+}
+
+/*
+ * pset_remove_thread() removes a thread from a processor_set.
+ * Caller must hold locks on pset and thread. Pset reference count
+ * is not decremented; caller must explicitly pset_deallocate.
+ */
+
+void pset_remove_thread(
+ processor_set_t pset,
+ thread_t thread)
+{
+ queue_remove(&pset->threads, thread, thread_t, pset_threads);
+ thread->processor_set = PROCESSOR_SET_NULL;
+ pset->thread_count--;
+}
+
+/*
+ * pset_add_thread() adds a thread to a processor_set.
+ * Caller must hold locks on pset and thread. Pset references to
+ * threads are implicit.
+ */
+
+void pset_add_thread(
+ processor_set_t pset,
+ thread_t thread)
+{
+ queue_enter(&pset->threads, thread, thread_t, pset_threads);
+ thread->processor_set = pset;
+ pset->thread_count++;
+}
+
+/*
+ * thread_change_psets() changes the pset of a thread. Caller must
+ * hold locks on both psets and thread. The old pset must be
+ * explicitly pset_deallocat()'ed by caller.
+ */
+
+void thread_change_psets(
+ thread_t thread,
+ processor_set_t old_pset,
+ processor_set_t new_pset)
+{
+ queue_remove(&old_pset->threads, thread, thread_t, pset_threads);
+ old_pset->thread_count--;
+ queue_enter(&new_pset->threads, thread, thread_t, pset_threads);
+ thread->processor_set = new_pset;
+ new_pset->thread_count++;
+}
+
+/*
+ * pset_deallocate:
+ *
+ * Remove one reference to the processor set. Destroy processor_set
+ * if this was the last reference.
+ */
+void pset_deallocate(
+ processor_set_t pset)
+{
+ if (pset == PROCESSOR_SET_NULL)
+ return;
+
+ pset_ref_lock(pset);
+ if (--pset->ref_count > 0) {
+ pset_ref_unlock(pset);
+ return;
+ }
+#if !MACH_HOST
+ panic("pset_deallocate: default_pset destroyed");
+#endif /* !MACH_HOST */
+
+#if MACH_HOST
+ /*
+ * Reference count is zero, however the all_psets list
+ * holds an implicit reference and may make new ones.
+ * Its lock also dominates the pset lock. To check for this,
+ * temporarily restore one reference, and then lock the
+ * other structures in the right order.
+ */
+ pset->ref_count = 1;
+ pset_ref_unlock(pset);
+
+ simple_lock(&all_psets_lock);
+ pset_ref_lock(pset);
+ if (--pset->ref_count > 0) {
+ /*
+ * Made an extra reference.
+ */
+ pset_ref_unlock(pset);
+ simple_unlock(&all_psets_lock);
+ return;
+ }
+
+ /*
+ * Ok to destroy pset. Make a few paranoia checks.
+ */
+
+ if ((pset == &default_pset) || (pset->thread_count > 0) ||
+ (pset->task_count > 0) || pset->processor_count > 0) {
+ panic("pset_deallocate: destroy default or active pset");
+ }
+ /*
+ * Remove from all_psets queue.
+ */
+ queue_remove(&all_psets, pset, processor_set_t, all_psets);
+ all_psets_count--;
+
+ pset_ref_unlock(pset);
+ simple_unlock(&all_psets_lock);
+
+ /*
+ * That's it, free data structure.
+ */
+ zfree(pset_zone, (vm_offset_t)pset);
+#endif /* MACH_HOST */
+}
+
+/*
+ * pset_reference:
+ *
+ * Add one reference to the processor set.
+ */
+void pset_reference(
+ processor_set_t pset)
+{
+ pset_ref_lock(pset);
+ pset->ref_count++;
+ pset_ref_unlock(pset);
+}
+
+kern_return_t
+processor_info(
+ register processor_t processor,
+ int flavor,
+ host_t *host,
+ processor_info_t info,
+ natural_t *count)
+{
+ register int slot_num, state;
+ register processor_basic_info_t basic_info;
+
+ if (processor == PROCESSOR_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (flavor != PROCESSOR_BASIC_INFO ||
+ *count < PROCESSOR_BASIC_INFO_COUNT)
+ return KERN_FAILURE;
+
+ basic_info = (processor_basic_info_t) info;
+
+ slot_num = processor->slot_num;
+ basic_info->cpu_type = machine_slot[slot_num].cpu_type;
+ basic_info->cpu_subtype = machine_slot[slot_num].cpu_subtype;
+ state = processor->state;
+ if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
+ basic_info->running = FALSE;
+ else
+ basic_info->running = TRUE;
+ basic_info->slot_num = slot_num;
+ if (processor == master_processor)
+ basic_info->is_master = TRUE;
+ else
+ basic_info->is_master = FALSE;
+
+ *count = PROCESSOR_BASIC_INFO_COUNT;
+ *host = &realhost;
+ return KERN_SUCCESS;
+}
+
+kern_return_t processor_start(
+ processor_t processor)
+{
+ if (processor == PROCESSOR_NULL)
+ return KERN_INVALID_ARGUMENT;
+#if NCPUS > 1
+ return cpu_start(processor->slot_num);
+#else /* NCPUS > 1 */
+ return KERN_FAILURE;
+#endif /* NCPUS > 1 */
+}
+
+kern_return_t processor_exit(
+ processor_t processor)
+{
+ if (processor == PROCESSOR_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+#if NCPUS > 1
+ return processor_shutdown(processor);
+#else /* NCPUS > 1 */
+ return KERN_FAILURE;
+#endif /* NCPUS > 1 */
+}
+
+kern_return_t
+processor_control(
+ processor_t processor,
+ processor_info_t info,
+ natural_t count)
+{
+ if (processor == PROCESSOR_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+#if NCPUS > 1
+ return cpu_control(processor->slot_num, (int *)info, count);
+#else /* NCPUS > 1 */
+ return KERN_FAILURE;
+#endif /* NCPUS > 1 */
+}
+
+/*
+ * Precalculate the appropriate system quanta based on load. The
+ * index into machine_quantum is the number of threads on the
+ * processor set queue. It is limited to the number of processors in
+ * the set.
+ */
+
+void quantum_set(
+ processor_set_t pset)
+{
+#if NCPUS > 1
+ register int i,ncpus;
+
+ ncpus = pset->processor_count;
+
+ for ( i=1 ; i <= ncpus ; i++) {
+ pset->machine_quantum[i] =
+ ((min_quantum * ncpus) + (i/2)) / i ;
+ }
+ pset->machine_quantum[0] = 2 * pset->machine_quantum[1];
+
+ i = ((pset->runq.count > pset->processor_count) ?
+ pset->processor_count : pset->runq.count);
+ pset->set_quantum = pset->machine_quantum[i];
+#else /* NCPUS > 1 */
+ default_pset.set_quantum = min_quantum;
+#endif /* NCPUS > 1 */
+}
+
+#if MACH_HOST
+/*
+ * processor_set_create:
+ *
+ * Create and return a new processor set.
+ */
+
+kern_return_t
+processor_set_create(
+ host_t host,
+ processor_set_t *new_set,
+ processor_set_t *new_name)
+{
+ processor_set_t pset;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ pset = (processor_set_t) zalloc(pset_zone);
+ pset_init(pset);
+ pset_reference(pset); /* for new_set out argument */
+ pset_reference(pset); /* for new_name out argument */
+ ipc_pset_init(pset);
+ pset->active = TRUE;
+
+ simple_lock(&all_psets_lock);
+ queue_enter(&all_psets, pset, processor_set_t, all_psets);
+ all_psets_count++;
+ simple_unlock(&all_psets_lock);
+
+ ipc_pset_enable(pset);
+
+ *new_set = pset;
+ *new_name = pset;
+ return KERN_SUCCESS;
+}
+
+/*
+ * processor_set_destroy:
+ *
+ * destroy a processor set. Any tasks, threads or processors
+ * currently assigned to it are reassigned to the default pset.
+ */
+kern_return_t processor_set_destroy(
+ processor_set_t pset)
+{
+ register queue_entry_t elem;
+ register queue_head_t *list;
+
+ if (pset == PROCESSOR_SET_NULL || pset == &default_pset)
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * Handle multiple termination race. First one through sets
+ * active to FALSE and disables ipc access.
+ */
+ pset_lock(pset);
+ if (!(pset->active)) {
+ pset_unlock(pset);
+ return KERN_FAILURE;
+ }
+
+ pset->active = FALSE;
+ ipc_pset_disable(pset);
+
+
+ /*
+ * Now reassign everything in this set to the default set.
+ */
+
+ if (pset->task_count > 0) {
+ list = &pset->tasks;
+ while (!queue_empty(list)) {
+ elem = queue_first(list);
+ task_reference((task_t) elem);
+ pset_unlock(pset);
+ task_assign((task_t) elem, &default_pset, FALSE);
+ task_deallocate((task_t) elem);
+ pset_lock(pset);
+ }
+ }
+
+ if (pset->thread_count > 0) {
+ list = &pset->threads;
+ while (!queue_empty(list)) {
+ elem = queue_first(list);
+ thread_reference((thread_t) elem);
+ pset_unlock(pset);
+ thread_assign((thread_t) elem, &default_pset);
+ thread_deallocate((thread_t) elem);
+ pset_lock(pset);
+ }
+ }
+
+ if (pset->processor_count > 0) {
+ list = &pset->processors;
+ while(!queue_empty(list)) {
+ elem = queue_first(list);
+ pset_unlock(pset);
+ processor_assign((processor_t) elem, &default_pset, TRUE);
+ pset_lock(pset);
+ }
+ }
+
+ pset_unlock(pset);
+
+ /*
+ * Destroy ipc state.
+ */
+ ipc_pset_terminate(pset);
+
+ /*
+ * Deallocate pset's reference to itself.
+ */
+ pset_deallocate(pset);
+ return KERN_SUCCESS;
+}
+
+#else /* MACH_HOST */
+
+kern_return_t
+processor_set_create(
+ host_t host,
+ processor_set_t *new_set,
+ processor_set_t *new_name)
+{
+#ifdef lint
+ host++; new_set++; new_name++;
+#endif /* lint */
+ return KERN_FAILURE;
+}
+
+kern_return_t processor_set_destroy(
+ processor_set_t pset)
+{
+#ifdef lint
+ pset++;
+#endif /* lint */
+ return KERN_FAILURE;
+}
+
+#endif MACH_HOST
+
+kern_return_t
+processor_get_assignment(
+ processor_t processor,
+ processor_set_t *pset)
+{
+ int state;
+
+ state = processor->state;
+ if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
+ return KERN_FAILURE;
+
+ *pset = processor->processor_set;
+ pset_reference(*pset);
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+processor_set_info(
+ processor_set_t pset,
+ int flavor,
+ host_t *host,
+ processor_set_info_t info,
+ natural_t *count)
+{
+ if (pset == PROCESSOR_SET_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (flavor == PROCESSOR_SET_BASIC_INFO) {
+ register processor_set_basic_info_t basic_info;
+
+ if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
+ return KERN_FAILURE;
+
+ basic_info = (processor_set_basic_info_t) info;
+
+ pset_lock(pset);
+ basic_info->processor_count = pset->processor_count;
+ basic_info->task_count = pset->task_count;
+ basic_info->thread_count = pset->thread_count;
+ basic_info->mach_factor = pset->mach_factor;
+ basic_info->load_average = pset->load_average;
+ pset_unlock(pset);
+
+ *count = PROCESSOR_SET_BASIC_INFO_COUNT;
+ *host = &realhost;
+ return KERN_SUCCESS;
+ }
+ else if (flavor == PROCESSOR_SET_SCHED_INFO) {
+ register processor_set_sched_info_t sched_info;
+
+ if (*count < PROCESSOR_SET_SCHED_INFO_COUNT)
+ return KERN_FAILURE;
+
+ sched_info = (processor_set_sched_info_t) info;
+
+ pset_lock(pset);
+#if MACH_FIXPRI
+ sched_info->policies = pset->policies;
+#else /* MACH_FIXPRI */
+ sched_info->policies = POLICY_TIMESHARE;
+#endif /* MACH_FIXPRI */
+ sched_info->max_priority = pset->max_priority;
+ pset_unlock(pset);
+
+ *count = PROCESSOR_SET_SCHED_INFO_COUNT;
+ *host = &realhost;
+ return KERN_SUCCESS;
+ }
+
+ *host = HOST_NULL;
+ return KERN_INVALID_ARGUMENT;
+}
+
+/*
+ * processor_set_max_priority:
+ *
+ * Specify max priority permitted on processor set. This affects
+ * newly created and assigned threads. Optionally change existing
+ * ones.
+ */
+kern_return_t
+processor_set_max_priority(
+ processor_set_t pset,
+ int max_priority,
+ boolean_t change_threads)
+{
+ if (pset == PROCESSOR_SET_NULL || invalid_pri(max_priority))
+ return KERN_INVALID_ARGUMENT;
+
+ pset_lock(pset);
+ pset->max_priority = max_priority;
+
+ if (change_threads) {
+ register queue_head_t *list;
+ register thread_t thread;
+
+ list = &pset->threads;
+ queue_iterate(list, thread, thread_t, pset_threads) {
+ if (thread->max_priority < max_priority)
+ thread_max_priority(thread, pset, max_priority);
+ }
+ }
+
+ pset_unlock(pset);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * processor_set_policy_enable:
+ *
+ * Allow indicated policy on processor set.
+ */
+
+kern_return_t
+processor_set_policy_enable(
+ processor_set_t pset,
+ int policy)
+{
+ if ((pset == PROCESSOR_SET_NULL) || invalid_policy(policy))
+ return KERN_INVALID_ARGUMENT;
+
+#if MACH_FIXPRI
+ pset_lock(pset);
+ pset->policies |= policy;
+ pset_unlock(pset);
+
+ return KERN_SUCCESS;
+#else /* MACH_FIXPRI */
+ if (policy == POLICY_TIMESHARE)
+ return KERN_SUCCESS;
+ else
+ return KERN_FAILURE;
+#endif /* MACH_FIXPRI */
+}
+
+/*
+ * processor_set_policy_disable:
+ *
+ * Forbid indicated policy on processor set. Time sharing cannot
+ * be forbidden.
+ */
+
+kern_return_t
+processor_set_policy_disable(
+ processor_set_t pset,
+ int policy,
+ boolean_t change_threads)
+{
+ if ((pset == PROCESSOR_SET_NULL) || policy == POLICY_TIMESHARE ||
+ invalid_policy(policy))
+ return KERN_INVALID_ARGUMENT;
+
+#if MACH_FIXPRI
+ pset_lock(pset);
+
+ /*
+ * Check if policy enabled. Disable if so, then handle
+ * change_threads.
+ */
+ if (pset->policies & policy) {
+ pset->policies &= ~policy;
+
+ if (change_threads) {
+ register queue_head_t *list;
+ register thread_t thread;
+
+ list = &pset->threads;
+ queue_iterate(list, thread, thread_t, pset_threads) {
+ if (thread->policy == policy)
+ thread_policy(thread, POLICY_TIMESHARE, 0);
+ }
+ }
+ }
+ pset_unlock(pset);
+#endif /* MACH_FIXPRI */
+
+ return KERN_SUCCESS;
+}
+
+#define THING_TASK 0
+#define THING_THREAD 1
+
+/*
+ * processor_set_things:
+ *
+ * Common internals for processor_set_{threads,tasks}
+ */
+kern_return_t
+processor_set_things(
+ processor_set_t pset,
+ mach_port_t **thing_list,
+ natural_t *count,
+ int type)
+{
+ unsigned int actual; /* this many things */
+ int i;
+
+ vm_size_t size, size_needed;
+ vm_offset_t addr;
+
+ if (pset == PROCESSOR_SET_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ size = 0; addr = 0;
+
+ for (;;) {
+ pset_lock(pset);
+ if (!pset->active) {
+ pset_unlock(pset);
+ return KERN_FAILURE;
+ }
+
+ if (type == THING_TASK)
+ actual = pset->task_count;
+ else
+ actual = pset->thread_count;
+
+ /* do we have the memory we need? */
+
+ size_needed = actual * sizeof(mach_port_t);
+ if (size_needed <= size)
+ break;
+
+ /* unlock the pset and allocate more memory */
+ pset_unlock(pset);
+
+ if (size != 0)
+ kfree(addr, size);
+
+ assert(size_needed > 0);
+ size = size_needed;
+
+ addr = kalloc(size);
+ if (addr == 0)
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /* OK, have memory and the processor_set is locked & active */
+
+ switch (type) {
+ case THING_TASK: {
+ task_t *tasks = (task_t *) addr;
+ task_t task;
+
+ for (i = 0, task = (task_t) queue_first(&pset->tasks);
+ i < actual;
+ i++, task = (task_t) queue_next(&task->pset_tasks)) {
+ /* take ref for convert_task_to_port */
+ task_reference(task);
+ tasks[i] = task;
+ }
+ assert(queue_end(&pset->tasks, (queue_entry_t) task));
+ break;
+ }
+
+ case THING_THREAD: {
+ thread_t *threads = (thread_t *) addr;
+ thread_t thread;
+
+ for (i = 0, thread = (thread_t) queue_first(&pset->threads);
+ i < actual;
+ i++,
+ thread = (thread_t) queue_next(&thread->pset_threads)) {
+ /* take ref for convert_thread_to_port */
+ thread_reference(thread);
+ threads[i] = thread;
+ }
+ assert(queue_end(&pset->threads, (queue_entry_t) thread));
+ break;
+ }
+ }
+
+ /* can unlock processor set now that we have the task/thread refs */
+ pset_unlock(pset);
+
+ if (actual == 0) {
+ /* no things, so return null pointer and deallocate memory */
+ *thing_list = 0;
+ *count = 0;
+
+ if (size != 0)
+ kfree(addr, size);
+ } else {
+ /* if we allocated too much, must copy */
+
+ if (size_needed < size) {
+ vm_offset_t newaddr;
+
+ newaddr = kalloc(size_needed);
+ if (newaddr == 0) {
+ switch (type) {
+ case THING_TASK: {
+ task_t *tasks = (task_t *) addr;
+
+ for (i = 0; i < actual; i++)
+ task_deallocate(tasks[i]);
+ break;
+ }
+
+ case THING_THREAD: {
+ thread_t *threads = (thread_t *) addr;
+
+ for (i = 0; i < actual; i++)
+ thread_deallocate(threads[i]);
+ break;
+ }
+ }
+ kfree(addr, size);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ bcopy((char *) addr, (char *) newaddr, size_needed);
+ kfree(addr, size);
+ addr = newaddr;
+ }
+
+ *thing_list = (mach_port_t *) addr;
+ *count = actual;
+
+ /* do the conversion that Mig should handle */
+
+ switch (type) {
+ case THING_TASK: {
+ task_t *tasks = (task_t *) addr;
+
+ for (i = 0; i < actual; i++)
+ ((mach_port_t *) tasks)[i] =
+ (mach_port_t)convert_task_to_port(tasks[i]);
+ break;
+ }
+
+ case THING_THREAD: {
+ thread_t *threads = (thread_t *) addr;
+
+ for (i = 0; i < actual; i++)
+ ((mach_port_t *) threads)[i] =
+ (mach_port_t)convert_thread_to_port(threads[i]);
+ break;
+ }
+ }
+ }
+
+ return KERN_SUCCESS;
+}
+
+
+/*
+ * processor_set_tasks:
+ *
+ * List all tasks in the processor set.
+ */
+kern_return_t
+processor_set_tasks(
+ processor_set_t pset,
+ task_array_t *task_list,
+ natural_t *count)
+{
+ return processor_set_things(pset, task_list, count, THING_TASK);
+}
+
+/*
+ * processor_set_threads:
+ *
+ * List all threads in the processor set.
+ */
+kern_return_t
+processor_set_threads(
+ processor_set_t pset,
+ thread_array_t *thread_list,
+ natural_t *count)
+{
+ return processor_set_things(pset, thread_list, count, THING_THREAD);
+}
diff --git a/kern/processor.h b/kern/processor.h
new file mode 100644
index 00000000..8de7a688
--- /dev/null
+++ b/kern/processor.h
@@ -0,0 +1,327 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * processor.h: Processor and processor-set definitions.
+ */
+
+#ifndef _KERN_PROCESSOR_H_
+#define _KERN_PROCESSOR_H_
+
+/*
+ * Data structures for managing processors and sets of processors.
+ */
+
+#include <cpus.h>
+#include <mach_fixpri.h>
+#include <mach_host.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/processor_info.h>
+#include <kern/cpu_number.h>
+#include <kern/lock.h>
+#include <kern/queue.h>
+#include <kern/sched.h>
+#include <kern/kern_types.h>
+#include <kern/host.h>
+
+#if NCPUS > 1
+#include <machine/ast_types.h>
+#endif /* NCPUS > 1 */
+
+struct processor_set {
+ struct run_queue runq; /* runq for this set */
+ queue_head_t idle_queue; /* idle processors */
+ int idle_count; /* how many ? */
+ decl_simple_lock_data(, idle_lock) /* lock for above */
+ queue_head_t processors; /* all processors here */
+ int processor_count; /* how many ? */
+ boolean_t empty; /* true if no processors */
+ queue_head_t tasks; /* tasks assigned */
+ int task_count; /* how many */
+ queue_head_t threads; /* threads in this set */
+ int thread_count; /* how many */
+ int ref_count; /* structure ref count */
+ decl_simple_lock_data(, ref_lock) /* lock for ref count */
+ queue_chain_t all_psets; /* link for all_psets */
+ boolean_t active; /* is pset in use */
+ decl_simple_lock_data(, lock) /* lock for everything else */
+ struct ipc_port * pset_self; /* port for operations */
+ struct ipc_port * pset_name_self; /* port for information */
+ int max_priority; /* maximum priority */
+#if MACH_FIXPRI
+ int policies; /* bit vector for policies */
+#endif /* MACH_FIXPRI */
+ int set_quantum; /* current default quantum */
+#if NCPUS > 1
+ int quantum_adj_index; /* runtime quantum adj. */
+ decl_simple_lock_data(, quantum_adj_lock) /* lock for above */
+ int machine_quantum[NCPUS+1]; /* ditto */
+#endif /* NCPUS > 1 */
+ long mach_factor; /* mach_factor */
+ long load_average; /* load_average */
+ long sched_load; /* load avg for scheduler */
+};
+extern struct processor_set default_pset;
+
+struct processor {
+ struct run_queue runq; /* local runq for this processor */
+ /* XXX want to do this round robin eventually */
+ queue_chain_t processor_queue; /* idle/assign/shutdown queue link */
+ int state; /* See below */
+ struct thread *next_thread; /* next thread to run if dispatched */
+ struct thread *idle_thread; /* this processor's idle thread. */
+ int quantum; /* quantum for current thread */
+ boolean_t first_quantum; /* first quantum in succession */
+ int last_quantum; /* last quantum assigned */
+
+ processor_set_t processor_set; /* processor set I belong to */
+ processor_set_t processor_set_next; /* set I will belong to */
+ queue_chain_t processors; /* all processors in set */
+ decl_simple_lock_data(, lock)
+ struct ipc_port *processor_self; /* port for operations */
+ int slot_num; /* machine-indep slot number */
+#if NCPUS > 1
+ ast_check_t ast_check_data; /* for remote ast_check invocation */
+#endif /* NCPUS > 1 */
+ /* punt id data temporarily */
+};
+typedef struct processor Processor;
+extern struct processor processor_array[NCPUS];
+
+/*
+ * Chain of all processor sets.
+ */
+extern queue_head_t all_psets;
+extern int all_psets_count;
+decl_simple_lock_data(extern, all_psets_lock);
+
+/*
+ * The lock ordering is:
+ *
+ * all_psets_lock
+ * |
+ * |
+ * V
+ * pset_lock
+ * |
+ * +-----------+---------------+-------------------+
+ * | | | |
+ * | | | |
+ * | | V V
+ * | | task_lock pset_self->ip_lock
+ * | | | |
+ * | | +-----------+---------------+ |
+ * | | | | |
+ * | V V V V
+ * | thread_lock* pset_ref_lock
+ * | |
+ * | +-------+
+ * | | |
+ * | | V
+ * | | runq_lock*
+ * | |
+ * V V
+ * processor_lock*
+ * |
+ * |
+ * V
+ * pset_idle_lock*
+ * |
+ * |
+ * V
+ * action_lock*
+ *
+ * Locks marked with "*" are taken at splsched.
+ */
+
+/*
+ * XXX need a pointer to the master processor structure
+ */
+
+extern processor_t master_processor;
+
+/*
+ * NOTE: The processor->processor_set link is needed in one of the
+ * scheduler's critical paths. [Figure out where to look for another
+ * thread to run on this processor.] It is accessed without locking.
+ * The following access protocol controls this field.
+ *
+ * Read from own processor - just read.
+ * Read from another processor - lock processor structure during read.
+ * Write from own processor - lock processor structure during write.
+ * Write from another processor - NOT PERMITTED.
+ *
+ */
+
+/*
+ * Processor state locking:
+ *
+ * Values for the processor state are defined below. If the processor
+ * is off-line or being shutdown, then it is only necessary to lock
+ * the processor to change its state. Otherwise it is only necessary
+ * to lock its processor set's idle_lock. Scheduler code will
+ * typically lock only the idle_lock, but processor manipulation code
+ * will often lock both.
+ */
+
+#define PROCESSOR_OFF_LINE 0 /* Not in system */
+#define PROCESSOR_RUNNING 1 /* Running normally */
+#define PROCESSOR_IDLE 2 /* idle */
+#define PROCESSOR_DISPATCHING 3 /* dispatching (idle -> running) */
+#define PROCESSOR_ASSIGN 4 /* Assignment is changing */
+#define PROCESSOR_SHUTDOWN 5 /* Being shutdown */
+
+/*
+ * Use processor ptr array to find current processor's data structure.
+ * This replaces a multiplication (index into processor_array) with
+ * an array lookup and a memory reference. It also allows us to save
+ * space if processor numbering gets too sparse.
+ */
+
+extern processor_t processor_ptr[NCPUS];
+
+#define cpu_to_processor(i) (processor_ptr[i])
+
+#define current_processor() (processor_ptr[cpu_number()])
+#define current_processor_set() (current_processor()->processor_set)
+
+/* Compatibility -- will go away */
+
+#define cpu_state(slot_num) (processor_ptr[slot_num]->state)
+#define cpu_idle(slot_num) (cpu_state(slot_num) == PROCESSOR_IDLE)
+
+/* Useful lock macros */
+
+#define pset_lock(pset) simple_lock(&(pset)->lock)
+#define pset_unlock(pset) simple_unlock(&(pset)->lock)
+#define pset_ref_lock(pset) simple_lock(&(pset)->ref_lock)
+#define pset_ref_unlock(pset) simple_unlock(&(pset)->ref_lock)
+
+#define processor_lock(pr) simple_lock(&(pr)->lock)
+#define processor_unlock(pr) simple_unlock(&(pr)->lock)
+
+typedef mach_port_t *processor_array_t;
+typedef mach_port_t *processor_set_array_t;
+typedef mach_port_t *processor_set_name_array_t;
+
+
+/*
+ * Exported functions
+ */
+
+/* Initialization */
+
+#ifdef KERNEL
+#if MACH_HOST
+extern void pset_sys_bootstrap(void);
+extern void pset_sys_init(void);
+#endif /* MACH_HOST */
+
+/* Pset internal functions */
+
+extern void pset_reference(processor_set_t);
+extern void pset_deallocate(processor_set_t);
+extern void pset_remove_processor(processor_set_t, processor_t);
+extern void pset_add_processor(processor_set_t, processor_t);
+extern void pset_remove_task(processor_set_t, struct task *);
+extern void pset_add_task(processor_set_t, struct task *);
+extern void pset_remove_thread(processor_set_t, struct thread *);
+extern void pset_add_thread(processor_set_t, struct thread *);
+extern void thread_change_psets(struct thread *,
+ processor_set_t, processor_set_t);
+
+/* Processor interface */
+
+extern kern_return_t processor_get_assignment(
+ processor_t processor,
+ processor_set_t *processor_set);
+
+extern kern_return_t processor_info(
+ processor_t processor,
+ int flavor,
+ host_t * host,
+ processor_info_t info,
+ natural_t * count);
+
+extern kern_return_t processor_start(
+ processor_t processor);
+
+extern kern_return_t processor_exit(
+ processor_t processor);
+
+extern kern_return_t processor_control(
+ processor_t processor,
+ processor_info_t info,
+ natural_t count);
+
+/* Pset interface */
+
+extern kern_return_t processor_set_create(
+ host_t host,
+ processor_set_t *new_set,
+ processor_set_t *new_name);
+
+extern kern_return_t processor_set_destroy(
+ processor_set_t pset);
+
+extern kern_return_t processor_set_info(
+ processor_set_t pset,
+ int flavor,
+ host_t *host,
+ processor_set_info_t info,
+ natural_t *count);
+
+extern kern_return_t processor_set_max_priority(
+ processor_set_t pset,
+ int max_priority,
+ boolean_t change_threads);
+
+extern kern_return_t processor_set_policy_enable(
+ processor_set_t pset,
+ int policy);
+
+extern kern_return_t processor_set_policy_disable(
+ processor_set_t pset,
+ int policy,
+ boolean_t change_threads);
+
+extern kern_return_t processor_set_tasks(
+ processor_set_t pset,
+ task_array_t *task_list,
+ natural_t *count);
+
+extern kern_return_t processor_set_threads(
+ processor_set_t pset,
+ thread_array_t *thread_list,
+ natural_t *count);
+#endif
+
+#endif /* _KERN_PROCESSOR_H_ */
diff --git a/kern/profile.c b/kern/profile.c
new file mode 100644
index 00000000..75139347
--- /dev/null
+++ b/kern/profile.c
@@ -0,0 +1,413 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Copyright 1991 by Open Software Foundation,
+ * Grenoble, FRANCE
+ *
+ * All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of OSF or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior
+ * permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+ * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if 0
+
+#include <kern/thread.h>
+#include <kern/queue.h>
+#include <mach/profil.h>
+#include <kern/sched_prim.h>
+#include <ipc/ipc_space.h>
+
+extern vm_map_t kernel_map; /* can be discarded, defined in <vm/vm_kern.h> */
+
+thread_t profile_thread_id = THREAD_NULL;
+
+
+void profile_thread()
+{
+ struct message {
+ mach_msg_header_t head;
+ mach_msg_type_t type;
+ int arg[SIZE_PROF_BUFFER+1];
+ } msg;
+
+ register spl_t s;
+ buf_to_send_t buf_entry;
+ queue_entry_t prof_queue_entry;
+ prof_data_t pbuf;
+ simple_lock_t lock;
+ msg_return_t mr;
+ int j;
+
+ /* Initialise the queue header for the prof_queue */
+ mpqueue_init(&prof_queue);
+
+ /* Template initialisation of header and type structures */
+ msg.head.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND_ONCE);
+ msg.head.msgh_size = sizeof(msg);
+ msg.head.msgh_local_port = MACH_PORT_NULL;
+ msg.head.msgh_kind = MACH_MSGH_KIND_NORMAL;
+ msg.head.msgh_id = 666666;
+
+ msg.type.msgt_name = MACH_MSG_TYPE_INTEGER_32;
+ msg.type.msgt_size = 32;
+ msg.type.msgt_number = SIZE_PROF_BUFFER+1;
+ msg.type.msgt_inline = TRUE;
+ msg.type.msgt_longform = FALSE;
+ msg.type.msgt_deallocate = FALSE;
+ msg.type.msgt_unused = 0;
+
+ while (TRUE) {
+
+ /* Dequeue the first buffer. */
+ s = splsched();
+ mpdequeue_head(&prof_queue, &prof_queue_entry);
+ splx(s);
+
+ if ((buf_entry = (buf_to_send_t) prof_queue_entry) == NULLBTS)
+ {
+ thread_sleep((event_t) profile_thread, lock, TRUE);
+ if (current_thread()->wait_result != THREAD_AWAKENED)
+ break;
+ }
+ else {
+ task_t curr_task;
+ thread_t curr_th;
+ register int *sample;
+ int curr_buf;
+ int imax;
+
+ curr_th = (thread_t) buf_entry->thread;
+ curr_buf = (int) buf_entry->number;
+ pbuf = curr_th->profil_buffer;
+
+ /* Set the remote port */
+ msg.head.msgh_remote_port = (mach_port_t) pbuf->prof_port;
+
+
+ sample = pbuf->prof_area[curr_buf].p_zone;
+ imax = pbuf->prof_area[curr_buf].p_index;
+ for(j=0 ;j<imax; j++,sample++)
+ msg.arg[j] = *sample;
+
+ /* Let hardclock() know you've finished the dirty job */
+ pbuf->prof_area[curr_buf].p_full = FALSE;
+
+ /*
+ * Store the number of samples actually sent
+ * as the last element of the array.
+ */
+ msg.arg[SIZE_PROF_BUFFER] = imax;
+
+ mr = mach_msg(&(msg.head), MACH_SEND_MSG,
+ sizeof(struct message), 0,
+ MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE,
+ MACH_PORT_NULL);
+
+ if (mr != MACH_MSG_SUCCESS) {
+printf("profile_thread: mach_msg failed returned %x\n",(int)mr);
+ }
+
+ if (buf_entry->wakeme)
+ thread_wakeup((event_t) &buf_entry->wakeme);
+ kmem_free(kernel_map, (buf_to_send_t) buf_entry,
+ sizeof(struct buf_to_send));
+
+ }
+
+ }
+ /* The profile thread has been signalled to exit. There may still
+ be sample data queued for us, which we must now throw away.
+ Once we set profile_thread_id to null, hardclock() will stop
+ queueing any additional samples, so we do not need to alter
+ the interrupt level. */
+ profile_thread_id = THREAD_NULL;
+ while (1) {
+ mpdequeue_head(&prof_queue, &prof_queue_entry);
+ if ((buf_entry = (buf_to_send_t) prof_queue_entry) == NULLBTS)
+ break;
+ if (buf_entry->wakeme)
+ thread_wakeup((event_t) &buf_entry->wakeme);
+ kmem_free(kernel_map, (buf_to_send_t) buf_entry,
+ sizeof(struct buf_to_send));
+ }
+
+ thread_halt_self();
+}
+
+
+
+#include <mach/message.h>
+
+void
+send_last_sample_buf(th)
+thread_t th;
+{
+ register spl_t s;
+ buf_to_send_t buf_entry;
+ vm_offset_t vm_buf_entry;
+
+ if (th->profil_buffer == NULLPBUF)
+ return;
+
+ /* Ask for the sending of the last PC buffer.
+ * Make a request to the profile_thread by inserting
+ * the buffer in the send queue, and wake it up.
+ * The last buffer must be inserted at the head of the
+ * send queue, so the profile_thread handles it immediatly.
+ */
+ if (kmem_alloc( kernel_map, &vm_buf_entry,
+ sizeof(struct buf_to_send)) != KERN_SUCCESS)
+ return;
+ buf_entry = (buf_to_send_t) vm_buf_entry;
+ buf_entry->thread = (int *) th;
+ buf_entry->number = th->profil_buffer->prof_index;
+
+ /* Watch out in case profile thread exits while we are about to
+ queue data for it. */
+ s = splsched();
+ if (profile_thread_id != THREAD_NULL) {
+ simple_lock_t lock;
+ buf_entry->wakeme = 1;
+ mpenqueue_tail( &prof_queue, &(buf_entry->list));
+ thread_wakeup((event_t) profile_thread);
+ assert_wait((event_t) &buf_entry->wakeme, TRUE);
+ splx(s);
+ thread_block((void (*)()) 0);
+ } else {
+ splx(s);
+ kmem_free(kernel_map, vm_buf_entry, sizeof(struct buf_to_send));
+ }
+}
+
+/*
+ * Profile current thread
+ */
+
+profile(pc) {
+
+ /* Find out which thread has been interrupted. */
+ thread_t it_thread = current_thread();
+ int inout_val = pc;
+ buf_to_send_t buf_entry;
+ vm_offset_t vm_buf_entry;
+ int *val;
+ /*
+ * Test if the current thread is to be sampled
+ */
+ if (it_thread->thread_profiled) {
+ /* Inserts the PC value in the buffer of the thread */
+ set_pbuf_value(it_thread->profil_buffer, &inout_val);
+ switch(inout_val) {
+ case 0:
+ if (profile_thread_id == THREAD_NULL) {
+ reset_pbuf_area(it_thread->profil_buffer);
+ } else printf("ERROR : hardclock : full buffer unsent\n");
+ break;
+ case 1:
+ /* Normal case, value successfully inserted */
+ break;
+ case 2 :
+ /*
+ * The value we have just inserted caused the
+ * buffer to be full, and ready to be sent.
+ * If profile_thread_id is null, the profile
+ * thread has been killed. Since this generally
+ * happens only when the O/S server task of which
+ * it is a part is killed, it is not a great loss
+ * to throw away the data.
+ */
+ if (profile_thread_id == THREAD_NULL ||
+ kmem_alloc(kernel_map,
+ &vm_buf_entry ,
+ sizeof(struct buf_to_send)) !=
+ KERN_SUCCESS) {
+ reset_pbuf_area(it_thread->profil_buffer);
+ break;
+ }
+ buf_entry = (buf_to_send_t) vm_buf_entry;
+ buf_entry->thread = (int *)it_thread;
+ buf_entry->number =
+ (it_thread->profil_buffer)->prof_index;
+ mpenqueue_tail(&prof_queue, &(buf_entry->list));
+
+ /* Switch to another buffer */
+ reset_pbuf_area(it_thread->profil_buffer);
+
+ /* Wake up the profile thread */
+ if (profile_thread_id != THREAD_NULL)
+ thread_wakeup((event_t) profile_thread);
+ break;
+
+ default:
+ printf("ERROR: profile : unexpected case\n");
+ }
+ }
+}
+
+
+/* The task parameter in this and the subsequent routine is needed for
+ MiG, even though it is not used in the function itself. */
+
+kern_return_t
+mach_sample_thread (task, reply, cur_thread)
+ipc_space_t task;
+ipc_object_t reply;
+thread_t cur_thread;
+{
+/*
+ * This routine is called every time that a new thread has made
+ * a request for the sampling service. We must keep track of the
+ * correspondance between it's identity (cur_thread) and the port
+ * we are going to use as a reply port to send out the samples resulting
+ * from its execution.
+ */
+ prof_data_t pbuf;
+ vm_offset_t vmpbuf;
+
+ if (reply != MACH_PORT_NULL) {
+ if (cur_thread->thread_profiled && cur_thread->thread_profiled_own) {
+ if (reply == cur_thread->profil_buffer->prof_port)
+ return KERN_SUCCESS;
+ mach_sample_thread(MACH_PORT_NULL, cur_thread);
+ }
+ /* Start profiling this thread , do the initialization. */
+ alloc_pbuf_area(pbuf, vmpbuf);
+ if ((cur_thread->profil_buffer = pbuf) == NULLPBUF) {
+printf("ERROR:mach_sample_thread:cannot allocate pbuf\n");
+ return KERN_RESOURCE_SHORTAGE;
+ } else {
+ if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) {
+printf("ERROR:mach_sample_thread:cannot set pbuf_nb\n");
+ return KERN_FAILURE;
+ }
+ reset_pbuf_area(pbuf);
+ }
+
+ pbuf->prof_port = reply;
+ cur_thread->thread_profiled = TRUE;
+ cur_thread->thread_profiled_own = TRUE;
+ if (profile_thread_id == THREAD_NULL)
+ profile_thread_id = kernel_thread(current_task(), profile_thread);
+ } else {
+ if (!cur_thread->thread_profiled_own)
+ cur_thread->thread_profiled = FALSE;
+ if (!cur_thread->thread_profiled)
+ return KERN_SUCCESS;
+
+ send_last_sample_buf(cur_thread);
+
+ /* Stop profiling this thread, do the cleanup. */
+
+ cur_thread->thread_profiled_own = FALSE;
+ cur_thread->thread_profiled = FALSE;
+ dealloc_pbuf_area(cur_thread->profil_buffer);
+ cur_thread->profil_buffer = NULLPBUF;
+ }
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+mach_sample_task (task, reply, cur_task)
+ipc_space_t task;
+ipc_object_t reply;
+task_t cur_task;
+{
+ prof_data_t pbuf=cur_task->profil_buffer;
+ vm_offset_t vmpbuf;
+ int turnon = (reply != MACH_PORT_NULL);
+
+ if (turnon) {
+ if (cur_task->task_profiled) {
+ if (cur_task->profil_buffer->prof_port == reply)
+ return KERN_SUCCESS;
+ (void) mach_sample_task(task, MACH_PORT_NULL, cur_task);
+ }
+ if (pbuf == NULLPBUF) {
+ alloc_pbuf_area(pbuf, vmpbuf);
+ if (pbuf == NULLPBUF) {
+ return KERN_RESOURCE_SHORTAGE;
+ }
+ cur_task->profil_buffer = pbuf;
+ }
+ if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) {
+ return KERN_FAILURE;
+ }
+ reset_pbuf_area(pbuf);
+ pbuf->prof_port = reply;
+ }
+
+ if (turnon != cur_task->task_profiled) {
+ int actual,i,sentone;
+ thread_t thread;
+
+ if (turnon && profile_thread_id == THREAD_NULL)
+ profile_thread_id =
+ kernel_thread(current_task(), profile_thread);
+ cur_task->task_profiled = turnon;
+ actual = cur_task->thread_count;
+ sentone = 0;
+ for (i=0, thread=(thread_t) queue_first(&cur_task->thread_list);
+ i < actual;
+ i++, thread=(thread_t) queue_next(&thread->thread_list)) {
+ if (!thread->thread_profiled_own) {
+ thread->thread_profiled = turnon;
+ if (turnon)
+ thread->profil_buffer = cur_task->profil_buffer;
+ else if (!sentone) {
+ send_last_sample_buf(thread);
+ sentone = 1;
+ }
+ }
+ }
+ if (!turnon) {
+ dealloc_pbuf_area(pbuf);
+ cur_task->profil_buffer = NULLPBUF;
+ }
+ }
+
+ return KERN_SUCCESS;
+}
+
+#endif 0
diff --git a/kern/queue.c b/kern/queue.c
new file mode 100644
index 00000000..98b74c20
--- /dev/null
+++ b/kern/queue.c
@@ -0,0 +1,131 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Routines to implement queue package.
+ */
+
+#include <kern/queue.h>
+
+
+
+/*
+ * Insert element at head of queue.
+ */
+void enqueue_head(
+ register queue_t que,
+ register queue_entry_t elt)
+{
+ elt->next = que->next;
+ elt->prev = que;
+ elt->next->prev = elt;
+ que->next = elt;
+}
+
+/*
+ * Insert element at tail of queue.
+ */
+void enqueue_tail(
+ register queue_t que,
+ register queue_entry_t elt)
+{
+ elt->next = que;
+ elt->prev = que->prev;
+ elt->prev->next = elt;
+ que->prev = elt;
+}
+
+/*
+ * Remove and return element at head of queue.
+ */
+queue_entry_t dequeue_head(
+ register queue_t que)
+{
+ register queue_entry_t elt;
+
+ if (que->next == que)
+ return((queue_entry_t)0);
+
+ elt = que->next;
+ elt->next->prev = que;
+ que->next = elt->next;
+ return(elt);
+}
+
+/*
+ * Remove and return element at tail of queue.
+ */
+queue_entry_t dequeue_tail(
+ register queue_t que)
+{
+ register queue_entry_t elt;
+
+ if (que->prev == que)
+ return((queue_entry_t)0);
+
+ elt = que->prev;
+ elt->prev->next = que;
+ que->prev = elt->prev;
+ return(elt);
+}
+
+/*
+ * Remove arbitrary element from queue.
+ * Does not check whether element is on queue - the world
+ * will go haywire if it isn't.
+ */
+
+/*ARGSUSED*/
+void remqueue(
+ queue_t que,
+ register queue_entry_t elt)
+{
+ elt->next->prev = elt->prev;
+ elt->prev->next = elt->next;
+}
+
+/*
+ * Routines to directly imitate the VAX hardware queue
+ * package.
+ */
+void insque(
+ register struct queue_entry *entry,
+ register struct queue_entry *pred)
+{
+ entry->next = pred->next;
+ entry->prev = pred;
+ (pred->next)->prev = entry;
+ pred->next = entry;
+}
+
+struct queue_entry
+*remque(
+ register struct queue_entry *elt)
+{
+ (elt->next)->prev = elt->prev;
+ (elt->prev)->next = elt->next;
+ return(elt);
+}
+
diff --git a/kern/queue.h b/kern/queue.h
new file mode 100644
index 00000000..2f8f792d
--- /dev/null
+++ b/kern/queue.h
@@ -0,0 +1,369 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon rights
+ * to redistribute these changes.
+ */
+/*
+ * File: queue.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * Type definitions for generic queues.
+ *
+ */
+
+#ifndef _KERN_QUEUE_H_
+#define _KERN_QUEUE_H_
+
+#include <kern/lock.h>
+
+/*
+ * Queue of abstract objects. Queue is maintained
+ * within that object.
+ *
+ * Supports fast removal from within the queue.
+ *
+ * How to declare a queue of elements of type "foo_t":
+ * In the "*foo_t" type, you must have a field of
+ * type "queue_chain_t" to hold together this queue.
+ * There may be more than one chain through a
+ * "foo_t", for use by different queues.
+ *
+ * Declare the queue as a "queue_t" type.
+ *
+ * Elements of the queue (of type "foo_t", that is)
+ * are referred to by reference, and cast to type
+ * "queue_entry_t" within this module.
+ */
+
+/*
+ * A generic doubly-linked list (queue).
+ */
+
+struct queue_entry {
+ struct queue_entry *next; /* next element */
+ struct queue_entry *prev; /* previous element */
+};
+
+typedef struct queue_entry *queue_t;
+typedef struct queue_entry queue_head_t;
+typedef struct queue_entry queue_chain_t;
+typedef struct queue_entry *queue_entry_t;
+
+/*
+ * enqueue puts "elt" on the "queue".
+ * dequeue returns the first element in the "queue".
+ * remqueue removes the specified "elt" from the specified "queue".
+ */
+
+#define enqueue(queue,elt) enqueue_tail(queue, elt)
+#define dequeue(queue) dequeue_head(queue)
+
+void enqueue_head();
+void enqueue_tail();
+queue_entry_t dequeue_head();
+queue_entry_t dequeue_tail();
+void remqueue();
+
+/*
+ * Macro: queue_init
+ * Function:
+ * Initialize the given queue.
+ * Header:
+ * void queue_init(q)
+ * queue_t q; *MODIFIED*
+ */
+#define queue_init(q) ((q)->next = (q)->prev = q)
+
+/*
+ * Macro: queue_first
+ * Function:
+ * Returns the first entry in the queue,
+ * Header:
+ * queue_entry_t queue_first(q)
+ * queue_t q; *IN*
+ */
+#define queue_first(q) ((q)->next)
+
+/*
+ * Macro: queue_next
+ * Function:
+ * Returns the entry after an item in the queue.
+ * Header:
+ * queue_entry_t queue_next(qc)
+ * queue_t qc;
+ */
+#define queue_next(qc) ((qc)->next)
+
+/*
+ * Macro: queue_last
+ * Function:
+ * Returns the last entry in the queue.
+ * Header:
+ * queue_entry_t queue_last(q)
+ * queue_t q; *IN*
+ */
+#define queue_last(q) ((q)->prev)
+
+/*
+ * Macro: queue_prev
+ * Function:
+ * Returns the entry before an item in the queue.
+ * Header:
+ * queue_entry_t queue_prev(qc)
+ * queue_t qc;
+ */
+#define queue_prev(qc) ((qc)->prev)
+
+/*
+ * Macro: queue_end
+ * Function:
+ * Tests whether a new entry is really the end of
+ * the queue.
+ * Header:
+ * boolean_t queue_end(q, qe)
+ * queue_t q;
+ * queue_entry_t qe;
+ */
+#define queue_end(q, qe) ((q) == (qe))
+
+/*
+ * Macro: queue_empty
+ * Function:
+ * Tests whether a queue is empty.
+ * Header:
+ * boolean_t queue_empty(q)
+ * queue_t q;
+ */
+#define queue_empty(q) queue_end((q), queue_first(q))
+
+
+/*----------------------------------------------------------------*/
+/*
+ * Macros that operate on generic structures. The queue
+ * chain may be at any location within the structure, and there
+ * may be more than one chain.
+ */
+
+/*
+ * Macro: queue_enter
+ * Function:
+ * Insert a new element at the tail of the queue.
+ * Header:
+ * void queue_enter(q, elt, type, field)
+ * queue_t q;
+ * <type> elt;
+ * <type> is what's in our queue
+ * <field> is the chain field in (*<type>)
+ */
+#define queue_enter(head, elt, type, field) \
+{ \
+ register queue_entry_t prev; \
+ \
+ prev = (head)->prev; \
+ if ((head) == prev) { \
+ (head)->next = (queue_entry_t) (elt); \
+ } \
+ else { \
+ ((type)prev)->field.next = (queue_entry_t)(elt);\
+ } \
+ (elt)->field.prev = prev; \
+ (elt)->field.next = head; \
+ (head)->prev = (queue_entry_t) elt; \
+}
+
+/*
+ * Macro: queue_enter_first
+ * Function:
+ * Insert a new element at the head of the queue.
+ * Header:
+ * void queue_enter_first(q, elt, type, field)
+ * queue_t q;
+ * <type> elt;
+ * <type> is what's in our queue
+ * <field> is the chain field in (*<type>)
+ */
+#define queue_enter_first(head, elt, type, field) \
+{ \
+ register queue_entry_t next; \
+ \
+ next = (head)->next; \
+ if ((head) == next) { \
+ (head)->prev = (queue_entry_t) (elt); \
+ } \
+ else { \
+ ((type)next)->field.prev = (queue_entry_t)(elt);\
+ } \
+ (elt)->field.next = next; \
+ (elt)->field.prev = head; \
+ (head)->next = (queue_entry_t) elt; \
+}
+
+/*
+ * Macro: queue_field [internal use only]
+ * Function:
+ * Find the queue_chain_t (or queue_t) for the
+ * given element (thing) in the given queue (head)
+ */
+#define queue_field(head, thing, type, field) \
+ (((head) == (thing)) ? (head) : &((type)(thing))->field)
+
+/*
+ * Macro: queue_remove
+ * Function:
+ * Remove an arbitrary item from the queue.
+ * Header:
+ * void queue_remove(q, qe, type, field)
+ * arguments as in queue_enter
+ */
+#define queue_remove(head, elt, type, field) \
+{ \
+ register queue_entry_t next, prev; \
+ \
+ next = (elt)->field.next; \
+ prev = (elt)->field.prev; \
+ \
+ if ((head) == next) \
+ (head)->prev = prev; \
+ else \
+ ((type)next)->field.prev = prev; \
+ \
+ if ((head) == prev) \
+ (head)->next = next; \
+ else \
+ ((type)prev)->field.next = next; \
+}
+
+/*
+ * Macro: queue_remove_first
+ * Function:
+ * Remove and return the entry at the head of
+ * the queue.
+ * Header:
+ * queue_remove_first(head, entry, type, field)
+ * entry is returned by reference
+ */
+#define queue_remove_first(head, entry, type, field) \
+{ \
+ register queue_entry_t next; \
+ \
+ (entry) = (type) ((head)->next); \
+ next = (entry)->field.next; \
+ \
+ if ((head) == next) \
+ (head)->prev = (head); \
+ else \
+ ((type)(next))->field.prev = (head); \
+ (head)->next = next; \
+}
+
+/*
+ * Macro: queue_remove_last
+ * Function:
+ * Remove and return the entry at the tail of
+ * the queue.
+ * Header:
+ * queue_remove_last(head, entry, type, field)
+ * entry is returned by reference
+ */
+#define queue_remove_last(head, entry, type, field) \
+{ \
+ register queue_entry_t prev; \
+ \
+ (entry) = (type) ((head)->prev); \
+ prev = (entry)->field.prev; \
+ \
+ if ((head) == prev) \
+ (head)->next = (head); \
+ else \
+ ((type)(prev))->field.next = (head); \
+ (head)->prev = prev; \
+}
+
+/*
+ * Macro: queue_assign
+ */
+#define queue_assign(to, from, type, field) \
+{ \
+ ((type)((from)->prev))->field.next = (to); \
+ ((type)((from)->next))->field.prev = (to); \
+ *to = *from; \
+}
+
+/*
+ * Macro: queue_iterate
+ * Function:
+ * iterate over each item in the queue.
+ * Generates a 'for' loop, setting elt to
+ * each item in turn (by reference).
+ * Header:
+ * queue_iterate(q, elt, type, field)
+ * queue_t q;
+ * <type> elt;
+ * <type> is what's in our queue
+ * <field> is the chain field in (*<type>)
+ */
+#define queue_iterate(head, elt, type, field) \
+ for ((elt) = (type) queue_first(head); \
+ !queue_end((head), (queue_entry_t)(elt)); \
+ (elt) = (type) queue_next(&(elt)->field))
+
+
+
+/*----------------------------------------------------------------*/
+/*
+ * Define macros for queues with locks.
+ */
+struct mpqueue_head {
+ struct queue_entry head; /* header for queue */
+ struct slock lock; /* lock for queue */
+};
+
+typedef struct mpqueue_head mpqueue_head_t;
+
+#define round_mpq(size) (size)
+
+#define mpqueue_init(q) \
+ { \
+ queue_init(&(q)->head); \
+ simple_lock_init(&(q)->lock); \
+ }
+
+#define mpenqueue_tail(q, elt) \
+ simple_lock(&(q)->lock); \
+ enqueue_tail(&(q)->head, elt); \
+ simple_unlock(&(q)->lock);
+
+#define mpdequeue_head(q, elt) \
+ simple_lock(&(q)->lock); \
+ if (queue_empty(&(q)->head)) \
+ *(elt) = 0; \
+ else \
+ *(elt) = dequeue_head(&(q)->head); \
+ simple_unlock(&(q)->lock);
+
+/*
+ * Old queue stuff, will go away soon.
+ */
+
+#endif _KERN_QUEUE_H_
diff --git a/kern/refcount.h b/kern/refcount.h
new file mode 100644
index 00000000..7fd6cdfb
--- /dev/null
+++ b/kern/refcount.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * File: refcount.h
+ *
+ * This defines the system-independent part of the atomic reference count data type.
+ *
+ */
+
+#ifndef _KERN_REFCOUNT_H_
+#define _KERN_REFCOUNT_H_
+
+#include <kern/macro_help.h>
+
+#include "refcount.h" /*XXX*/
+
+/* Unless the above include file specified otherwise,
+ use the system-independent (unoptimized) atomic reference counter. */
+#ifndef MACHINE_REFCOUNT
+
+#include <kern/lock.h>
+
+struct RefCount {
+ decl_simple_lock_data(,lock) /* lock for reference count */
+ int ref_count; /* number of references */
+};
+typedef struct RefCount RefCount;
+
+#define refcount_init(refcount, refs) \
+ MACRO_BEGIN \
+ simple_lock_init(&(refcount)->lock); \
+ ((refcount)->ref_count = (refs)); \
+ MACRO_END
+
+#define refcount_take(refcount) \
+ MACRO_BEGIN \
+ simple_lock(&(refcount)->lock); \
+ (refcount)->ref_count++; \
+ simple_unlock(&(refcount)->lock); \
+ MACRO_END
+
+#define refcount_drop(refcount, func) \
+ MACRO_BEGIN \
+ int new_value; \
+ simple_lock(&(refcount)->lock); \
+ new_value = --(refcount)->ref_count; \
+ simple_unlock(&(refcount)->lock); \
+ if (new_value == 0) { func; } \
+ MACRO_END
+
+#endif
+
+#endif _KERN_REFCOUNT_H_
diff --git a/kern/sched.h b/kern/sched.h
new file mode 100644
index 00000000..756384b0
--- /dev/null
+++ b/kern/sched.h
@@ -0,0 +1,181 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: sched.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * Header file for scheduler.
+ *
+ */
+
+#ifndef _KERN_SCHED_H_
+#define _KERN_SCHED_H_
+
+#include <cpus.h>
+#include <mach_fixpri.h>
+#include <simple_clock.h>
+#include <stat_time.h>
+
+#include <kern/queue.h>
+#include <kern/lock.h>
+#include <kern/macro_help.h>
+
+#if MACH_FIXPRI
+#include <mach/policy.h>
+#endif MACH_FIXPRI
+
+#if STAT_TIME
+
+/*
+ * Statistical timing uses microseconds as timer units. 18 bit shift
+ * yields priorities. PRI_SHIFT_2 isn't needed.
+ */
+#define PRI_SHIFT 18
+
+#else STAT_TIME
+
+/*
+ * Otherwise machine provides shift(s) based on time units it uses.
+ */
+#include <machine/sched_param.h>
+
+#endif STAT_TIME
+#define NRQS 32 /* 32 run queues per cpu */
+
+struct run_queue {
+ queue_head_t runq[NRQS]; /* one for each priority */
+ decl_simple_lock_data(, lock) /* one lock for all queues */
+ int low; /* low queue value */
+ int count; /* count of threads runable */
+};
+
+typedef struct run_queue *run_queue_t;
+#define RUN_QUEUE_NULL ((run_queue_t) 0)
+
+#if MACH_FIXPRI
+/*
+ * NOTE: For fixed priority threads, first_quantum indicates
+ * whether context switch at same priority is ok. For timeshareing
+ * it indicates whether preempt is ok.
+ */
+
+#define csw_needed(thread, processor) ((thread)->state & TH_SUSP || \
+ ((processor)->runq.count > 0) || \
+ ((thread)->policy == POLICY_TIMESHARE && \
+ (processor)->first_quantum == FALSE && \
+ (processor)->processor_set->runq.count > 0 && \
+ (processor)->processor_set->runq.low <= \
+ (thread)->sched_pri) || \
+ ((thread)->policy == POLICY_FIXEDPRI && \
+ (processor)->processor_set->runq.count > 0 && \
+ ((((processor)->first_quantum == FALSE) && \
+ ((processor)->processor_set->runq.low <= \
+ (thread)->sched_pri)) || \
+ ((processor)->processor_set->runq.low < \
+ (thread)->sched_pri))))
+
+#else MACH_FIXPRI
+#define csw_needed(thread, processor) ((thread)->state & TH_SUSP || \
+ ((processor)->runq.count > 0) || \
+ ((processor)->first_quantum == FALSE && \
+ ((processor)->processor_set->runq.count > 0 && \
+ (processor)->processor_set->runq.low <= \
+ ((thread)->sched_pri))))
+#endif MACH_FIXPRI
+
+/*
+ * Scheduler routines.
+ */
+
+extern struct run_queue *rem_runq();
+extern struct thread *choose_thread();
+extern queue_head_t action_queue; /* assign/shutdown queue */
+decl_simple_lock_data(extern,action_lock);
+
+extern int min_quantum; /* defines max context switch rate */
+
+/*
+ * Default base priorities for threads.
+ */
+#define BASEPRI_SYSTEM 6
+#define BASEPRI_USER 12
+
+/*
+ * Macro to check for invalid priorities.
+ */
+
+#define invalid_pri(pri) (((pri) < 0) || ((pri) >= NRQS))
+
+/*
+ * Shift structures for holding update shifts. Actual computation
+ * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the
+ * +/- is determined by the sign of shift 2.
+ */
+struct shift {
+ int shift1;
+ int shift2;
+};
+
+typedef struct shift *shift_t, shift_data_t;
+
+/*
+ * sched_tick increments once a second. Used to age priorities.
+ */
+
+extern unsigned sched_tick;
+
+#define SCHED_SCALE 128
+#define SCHED_SHIFT 7
+
+/*
+ * thread_timer_delta macro takes care of both thread timers.
+ */
+
+#define thread_timer_delta(thread) \
+MACRO_BEGIN \
+ register unsigned delta; \
+ \
+ delta = 0; \
+ TIMER_DELTA((thread)->system_timer, \
+ (thread)->system_timer_save, delta); \
+ TIMER_DELTA((thread)->user_timer, \
+ (thread)->user_timer_save, delta); \
+ (thread)->cpu_delta += delta; \
+ (thread)->sched_delta += delta * \
+ (thread)->processor_set->sched_load; \
+MACRO_END
+
+#if SIMPLE_CLOCK
+/*
+ * sched_usec is an exponential average of number of microseconds
+ * in a second for clock drift compensation.
+ */
+
+extern int sched_usec;
+#endif SIMPLE_CLOCK
+
+#endif _KERN_SCHED_H_
diff --git a/kern/sched_prim.c b/kern/sched_prim.c
new file mode 100644
index 00000000..b17e612f
--- /dev/null
+++ b/kern/sched_prim.c
@@ -0,0 +1,2062 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: sched_prim.c
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1986
+ *
+ * Scheduling primitives
+ *
+ */
+
+#include <cpus.h>
+#include <simple_clock.h>
+#include <mach_fixpri.h>
+#include <mach_host.h>
+#include <hw_footprint.h>
+#include <fast_tas.h>
+#include <power_save.h>
+
+#include <mach/machine.h>
+#include <kern/ast.h>
+#include <kern/counters.h>
+#include <kern/cpu_number.h>
+#include <kern/lock.h>
+#include <kern/macro_help.h>
+#include <kern/processor.h>
+#include <kern/queue.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <kern/syscall_subr.h>
+#include <kern/thread.h>
+#include <kern/thread_swap.h>
+#include <kern/time_out.h>
+#include <vm/pmap.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+#include <machine/machspl.h> /* For def'n of splsched() */
+
+#if MACH_FIXPRI
+#include <mach/policy.h>
+#endif /* MACH_FIXPRI */
+
+
+extern int hz;
+
+int min_quantum; /* defines max context switch rate */
+
+unsigned sched_tick;
+
+#if SIMPLE_CLOCK
+int sched_usec;
+#endif /* SIMPLE_CLOCK */
+
+thread_t sched_thread_id;
+
+void recompute_priorities(void); /* forward */
+void update_priority(thread_t);
+void set_pri(thread_t, int, boolean_t);
+void do_thread_scan(void);
+
+thread_t choose_pset_thread();
+
+timer_elt_data_t recompute_priorities_timer;
+
+#if DEBUG
+void checkrq(run_queue_t, char *);
+void thread_check(thread_t, run_queue_t);
+#endif
+
+/*
+ * State machine
+ *
+ * states are combinations of:
+ * R running
+ * W waiting (or on wait queue)
+ * S suspended (or will suspend)
+ * N non-interruptible
+ *
+ * init action
+ * assert_wait thread_block clear_wait suspend resume
+ *
+ * R RW, RWN R; setrun - RS -
+ * RS RWS, RWNS S; wake_active - - R
+ * RN RWN RN; setrun - RNS -
+ * RNS RWNS RNS; setrun - - RN
+ *
+ * RW W R RWS -
+ * RWN WN RN RWNS -
+ * RWS WS; wake_active RS - RW
+ * RWNS WNS RNS - RWN
+ *
+ * W R; setrun WS -
+ * WN RN; setrun WNS -
+ * WNS RNS; setrun - WN
+ *
+ * S - - R
+ * WS S - W
+ *
+ */
+
+/*
+ * Waiting protocols and implementation:
+ *
+ * Each thread may be waiting for exactly one event; this event
+ * is set using assert_wait(). That thread may be awakened either
+ * by performing a thread_wakeup_prim() on its event,
+ * or by directly waking that thread up with clear_wait().
+ *
+ * The implementation of wait events uses a hash table. Each
+ * bucket is queue of threads having the same hash function
+ * value; the chain for the queue (linked list) is the run queue
+ * field. [It is not possible to be waiting and runnable at the
+ * same time.]
+ *
+ * Locks on both the thread and on the hash buckets govern the
+ * wait event field and the queue chain field. Because wakeup
+ * operations only have the event as an argument, the event hash
+ * bucket must be locked before any thread.
+ *
+ * Scheduling operations may also occur at interrupt level; therefore,
+ * interrupts below splsched() must be prevented when holding
+ * thread or hash bucket locks.
+ *
+ * The wait event hash table declarations are as follows:
+ */
+
+#define NUMQUEUES 59
+
+queue_head_t wait_queue[NUMQUEUES];
+decl_simple_lock_data(, wait_lock[NUMQUEUES])
+
+/* NOTE: we want a small positive integer out of this */
+#define wait_hash(event) \
+ ((((int)(event) < 0) ? ~(int)(event) : (int)(event)) % NUMQUEUES)
+
+void wait_queue_init(void)
+{
+ register int i;
+
+ for (i = 0; i < NUMQUEUES; i++) {
+ queue_init(&wait_queue[i]);
+ simple_lock_init(&wait_lock[i]);
+ }
+}
+
+void sched_init(void)
+{
+ recompute_priorities_timer.fcn = (int (*)())recompute_priorities;
+ recompute_priorities_timer.param = (char *)0;
+
+ min_quantum = hz / 10; /* context switch 10 times/second */
+ wait_queue_init();
+ pset_sys_bootstrap(); /* initialize processer mgmt. */
+ queue_init(&action_queue);
+ simple_lock_init(&action_lock);
+ sched_tick = 0;
+#if SIMPLE_CLOCK
+ sched_usec = 0;
+#endif /* SIMPLE_CLOCK */
+ ast_init();
+}
+
+/*
+ * Thread timeout routine, called when timer expires.
+ * Called at splsoftclock.
+ */
+void thread_timeout(
+ thread_t thread)
+{
+ assert(thread->timer.set == TELT_UNSET);
+
+ clear_wait(thread, THREAD_TIMED_OUT, FALSE);
+}
+
+/*
+ * thread_set_timeout:
+ *
+ * Set a timer for the current thread, if the thread
+ * is ready to wait. Must be called between assert_wait()
+ * and thread_block().
+ */
+
+void thread_set_timeout(
+ int t) /* timeout interval in ticks */
+{
+ register thread_t thread = current_thread();
+ register spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+ if ((thread->state & TH_WAIT) != 0) {
+ set_timeout(&thread->timer, t);
+ }
+ thread_unlock(thread);
+ splx(s);
+}
+
+/*
+ * Set up thread timeout element when thread is created.
+ */
+void thread_timeout_setup(
+ register thread_t thread)
+{
+ thread->timer.fcn = (int (*)())thread_timeout;
+ thread->timer.param = (char *)thread;
+ thread->depress_timer.fcn = (int (*)())thread_depress_timeout;
+ thread->depress_timer.param = (char *)thread;
+}
+
+/*
+ * assert_wait:
+ *
+ * Assert that the current thread is about to go to
+ * sleep until the specified event occurs.
+ */
+void assert_wait(
+ event_t event,
+ boolean_t interruptible)
+{
+ register queue_t q;
+ register int index;
+ register thread_t thread;
+#if MACH_SLOCKS
+ register simple_lock_t lock;
+#endif /* MACH_SLOCKS */
+ spl_t s;
+
+ thread = current_thread();
+ if (thread->wait_event != 0) {
+ panic("assert_wait: already asserted event %#x\n",
+ thread->wait_event);
+ }
+ s = splsched();
+ if (event != 0) {
+ index = wait_hash(event);
+ q = &wait_queue[index];
+#if MACH_SLOCKS
+ lock = &wait_lock[index];
+#endif /* MACH_SLOCKS */
+ simple_lock(lock);
+ thread_lock(thread);
+ enqueue_tail(q, (queue_entry_t) thread);
+ thread->wait_event = event;
+ if (interruptible)
+ thread->state |= TH_WAIT;
+ else
+ thread->state |= TH_WAIT | TH_UNINT;
+ thread_unlock(thread);
+ simple_unlock(lock);
+ }
+ else {
+ thread_lock(thread);
+ if (interruptible)
+ thread->state |= TH_WAIT;
+ else
+ thread->state |= TH_WAIT | TH_UNINT;
+ thread_unlock(thread);
+ }
+ splx(s);
+}
+
+/*
+ * clear_wait:
+ *
+ * Clear the wait condition for the specified thread. Start the thread
+ * executing if that is appropriate.
+ *
+ * parameters:
+ * thread thread to awaken
+ * result Wakeup result the thread should see
+ * interrupt_only Don't wake up the thread if it isn't
+ * interruptible.
+ */
+void clear_wait(
+ register thread_t thread,
+ int result,
+ boolean_t interrupt_only)
+{
+ register int index;
+ register queue_t q;
+#if MACH_SLOCKS
+ register simple_lock_t lock;
+#endif /* MACH_SLOCKS */
+ register event_t event;
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+ if (interrupt_only && (thread->state & TH_UNINT)) {
+ /*
+ * can`t interrupt thread
+ */
+ thread_unlock(thread);
+ splx(s);
+ return;
+ }
+
+ event = thread->wait_event;
+ if (event != 0) {
+ thread_unlock(thread);
+ index = wait_hash(event);
+ q = &wait_queue[index];
+#if MACH_SLOCKS
+ lock = &wait_lock[index];
+#endif /* MACH_SLOCKS */
+ simple_lock(lock);
+ /*
+ * If the thread is still waiting on that event,
+ * then remove it from the list. If it is waiting
+ * on a different event, or no event at all, then
+ * someone else did our job for us.
+ */
+ thread_lock(thread);
+ if (thread->wait_event == event) {
+ remqueue(q, (queue_entry_t)thread);
+ thread->wait_event = 0;
+ event = 0; /* cause to run below */
+ }
+ simple_unlock(lock);
+ }
+ if (event == 0) {
+ register int state = thread->state;
+
+ reset_timeout_check(&thread->timer);
+
+ switch (state & TH_SCHED_STATE) {
+ case TH_WAIT | TH_SUSP | TH_UNINT:
+ case TH_WAIT | TH_UNINT:
+ case TH_WAIT:
+ /*
+ * Sleeping and not suspendable - put
+ * on run queue.
+ */
+ thread->state = (state &~ TH_WAIT) | TH_RUN;
+ thread->wait_result = result;
+ thread_setrun(thread, TRUE);
+ break;
+
+ case TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT:
+ case TH_RUN | TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT | TH_UNINT:
+ case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
+ /*
+ * Either already running, or suspended.
+ */
+ thread->state = state &~ TH_WAIT;
+ thread->wait_result = result;
+ break;
+
+ default:
+ /*
+ * Not waiting.
+ */
+ break;
+ }
+ }
+ thread_unlock(thread);
+ splx(s);
+}
+
+/*
+ * thread_wakeup_prim:
+ *
+ * Common routine for thread_wakeup, thread_wakeup_with_result,
+ * and thread_wakeup_one.
+ *
+ */
+void thread_wakeup_prim(
+ event_t event,
+ boolean_t one_thread,
+ int result)
+{
+ register queue_t q;
+ register int index;
+ register thread_t thread, next_th;
+#if MACH_SLOCKS
+ register simple_lock_t lock;
+#endif /* MACH_SLOCKS */
+ spl_t s;
+ register int state;
+
+ index = wait_hash(event);
+ q = &wait_queue[index];
+ s = splsched();
+#if MACH_SLOCKS
+ lock = &wait_lock[index];
+#endif /* MACH_SLOCKS */
+ simple_lock(lock);
+ thread = (thread_t) queue_first(q);
+ while (!queue_end(q, (queue_entry_t)thread)) {
+ next_th = (thread_t) queue_next((queue_t) thread);
+
+ if (thread->wait_event == event) {
+ thread_lock(thread);
+ remqueue(q, (queue_entry_t) thread);
+ thread->wait_event = 0;
+ reset_timeout_check(&thread->timer);
+
+ state = thread->state;
+ switch (state & TH_SCHED_STATE) {
+
+ case TH_WAIT | TH_SUSP | TH_UNINT:
+ case TH_WAIT | TH_UNINT:
+ case TH_WAIT:
+ /*
+ * Sleeping and not suspendable - put
+ * on run queue.
+ */
+ thread->state = (state &~ TH_WAIT) | TH_RUN;
+ thread->wait_result = result;
+ thread_setrun(thread, TRUE);
+ break;
+
+ case TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT:
+ case TH_RUN | TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT | TH_UNINT:
+ case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
+ /*
+ * Either already running, or suspended.
+ */
+ thread->state = state &~ TH_WAIT;
+ thread->wait_result = result;
+ break;
+
+ default:
+ panic("thread_wakeup");
+ break;
+ }
+ thread_unlock(thread);
+ if (one_thread)
+ break;
+ }
+ thread = next_th;
+ }
+ simple_unlock(lock);
+ splx(s);
+}
+
+/*
+ * thread_sleep:
+ *
+ * Cause the current thread to wait until the specified event
+ * occurs. The specified lock is unlocked before releasing
+ * the cpu. (This is a convenient way to sleep without manually
+ * calling assert_wait).
+ */
+void thread_sleep(
+ event_t event,
+ simple_lock_t lock,
+ boolean_t interruptible)
+{
+ assert_wait(event, interruptible); /* assert event */
+ simple_unlock(lock); /* release the lock */
+ thread_block((void (*)()) 0); /* block ourselves */
+}
+
+/*
+ * thread_bind:
+ *
+ * Force a thread to execute on the specified processor.
+ * If the thread is currently executing, it may wait until its
+ * time slice is up before switching onto the specified processor.
+ *
+ * A processor of PROCESSOR_NULL causes the thread to be unbound.
+ * xxx - DO NOT export this to users.
+ */
+void thread_bind(
+ register thread_t thread,
+ processor_t processor)
+{
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+ thread->bound_processor = processor;
+ thread_unlock(thread);
+ (void) splx(s);
+}
+
+/*
+ * Select a thread for this processor (the current processor) to run.
+ * May select the current thread.
+ * Assumes splsched.
+ */
+
+thread_t thread_select(
+ register processor_t myprocessor)
+{
+ register thread_t thread;
+
+ myprocessor->first_quantum = TRUE;
+ /*
+ * Check for obvious simple case; local runq is
+ * empty and global runq has entry at hint.
+ */
+ if (myprocessor->runq.count > 0) {
+ thread = choose_thread(myprocessor);
+ myprocessor->quantum = min_quantum;
+ }
+ else {
+ register processor_set_t pset;
+
+#if MACH_HOST
+ pset = myprocessor->processor_set;
+#else /* MACH_HOST */
+ pset = &default_pset;
+#endif /* MACH_HOST */
+ simple_lock(&pset->runq.lock);
+#if DEBUG
+ checkrq(&pset->runq, "thread_select");
+#endif /* DEBUG */
+ if (pset->runq.count == 0) {
+ /*
+ * Nothing else runnable. Return if this
+ * thread is still runnable on this processor.
+ * Check for priority update if required.
+ */
+ thread = current_thread();
+ if ((thread->state == TH_RUN) &&
+#if MACH_HOST
+ (thread->processor_set == pset) &&
+#endif /* MACH_HOST */
+ ((thread->bound_processor == PROCESSOR_NULL) ||
+ (thread->bound_processor == myprocessor))) {
+
+ simple_unlock(&pset->runq.lock);
+ thread_lock(thread);
+ if (thread->sched_stamp != sched_tick)
+ update_priority(thread);
+ thread_unlock(thread);
+ }
+ else {
+ thread = choose_pset_thread(myprocessor, pset);
+ }
+ }
+ else {
+ register queue_t q;
+
+ /*
+ * If there is a thread at hint, grab it,
+ * else call choose_pset_thread.
+ */
+ q = pset->runq.runq + pset->runq.low;
+
+ if (queue_empty(q)) {
+ pset->runq.low++;
+ thread = choose_pset_thread(myprocessor, pset);
+ }
+ else {
+ thread = (thread_t) dequeue_head(q);
+ thread->runq = RUN_QUEUE_NULL;
+ pset->runq.count--;
+#if MACH_FIXPRI
+ /*
+ * Cannot lazy evaluate pset->runq.low for
+ * fixed priority policy
+ */
+ if ((pset->runq.count > 0) &&
+ (pset->policies & POLICY_FIXEDPRI)) {
+ while (queue_empty(q)) {
+ pset->runq.low++;
+ q++;
+ }
+ }
+#endif /* MACH_FIXPRI */
+#if DEBUG
+ checkrq(&pset->runq, "thread_select: after");
+#endif /* DEBUG */
+ simple_unlock(&pset->runq.lock);
+ }
+ }
+
+#if MACH_FIXPRI
+ if (thread->policy == POLICY_TIMESHARE) {
+#endif /* MACH_FIXPRI */
+ myprocessor->quantum = pset->set_quantum;
+#if MACH_FIXPRI
+ }
+ else {
+ /*
+ * POLICY_FIXEDPRI
+ */
+ myprocessor->quantum = thread->sched_data;
+ }
+#endif /* MACH_FIXPRI */
+ }
+
+ return thread;
+}
+
+/*
+ * Stop running the current thread and start running the new thread.
+ * If continuation is non-zero, and the current thread is blocked,
+ * then it will resume by executing continuation on a new stack.
+ * Returns TRUE if the hand-off succeeds.
+ * Assumes splsched.
+ */
+
+boolean_t thread_invoke(
+ register thread_t old_thread,
+ continuation_t continuation,
+ register thread_t new_thread)
+{
+ /*
+ * Check for invoking the same thread.
+ */
+ if (old_thread == new_thread) {
+ /*
+ * Mark thread interruptible.
+ * Run continuation if there is one.
+ */
+ thread_lock(new_thread);
+ new_thread->state &= ~TH_UNINT;
+ thread_unlock(new_thread);
+
+ if (continuation != (void (*)()) 0) {
+ (void) spl0();
+ call_continuation(continuation);
+ /*NOTREACHED*/
+ }
+ return TRUE;
+ }
+
+ /*
+ * Check for stack-handoff.
+ */
+ thread_lock(new_thread);
+ if ((old_thread->stack_privilege != current_stack()) &&
+ (continuation != (void (*)()) 0))
+ {
+ switch (new_thread->state & TH_SWAP_STATE) {
+ case TH_SWAPPED:
+
+ new_thread->state &= ~(TH_SWAPPED | TH_UNINT);
+ thread_unlock(new_thread);
+
+#if NCPUS > 1
+ new_thread->last_processor = current_processor();
+#endif /* NCPUS > 1 */
+
+ /*
+ * Set up ast context of new thread and
+ * switch to its timer.
+ */
+ ast_context(new_thread, cpu_number());
+ timer_switch(&new_thread->system_timer);
+
+ stack_handoff(old_thread, new_thread);
+
+ /*
+ * We can dispatch the old thread now.
+ * This is like thread_dispatch, except
+ * that the old thread is left swapped
+ * *without* freeing its stack.
+ * This path is also much more frequent
+ * than actual calls to thread_dispatch.
+ */
+
+ thread_lock(old_thread);
+ old_thread->swap_func = continuation;
+
+ switch (old_thread->state) {
+ case TH_RUN | TH_SUSP:
+ case TH_RUN | TH_SUSP | TH_HALTED:
+ case TH_RUN | TH_WAIT | TH_SUSP:
+ /*
+ * Suspend the thread
+ */
+ old_thread->state = (old_thread->state & ~TH_RUN)
+ | TH_SWAPPED;
+ if (old_thread->wake_active) {
+ old_thread->wake_active = FALSE;
+ thread_unlock(old_thread);
+ thread_wakeup((event_t)&old_thread->wake_active);
+
+ goto after_old_thread;
+ }
+ break;
+
+ case TH_RUN | TH_SUSP | TH_UNINT:
+ case TH_RUN | TH_UNINT:
+ case TH_RUN:
+ /*
+ * We can`t suspend the thread yet,
+ * or it`s still running.
+ * Put back on a run queue.
+ */
+ old_thread->state |= TH_SWAPPED;
+ thread_setrun(old_thread, FALSE);
+ break;
+
+ case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
+ case TH_RUN | TH_WAIT | TH_UNINT:
+ case TH_RUN | TH_WAIT:
+ /*
+ * Waiting, and not suspendable.
+ */
+ old_thread->state = (old_thread->state & ~TH_RUN)
+ | TH_SWAPPED;
+ break;
+
+ case TH_RUN | TH_IDLE:
+ /*
+ * Drop idle thread -- it is already in
+ * idle_thread_array.
+ */
+ old_thread->state = TH_RUN | TH_IDLE | TH_SWAPPED;
+ break;
+
+ default:
+ panic("thread_invoke");
+ }
+ thread_unlock(old_thread);
+ after_old_thread:
+
+ /*
+ * call_continuation calls the continuation
+ * after resetting the current stack pointer
+ * to recover stack space. If we called
+ * the continuation directly, we would risk
+ * running out of stack.
+ */
+
+ counter_always(c_thread_invoke_hits++);
+ (void) spl0();
+ call_continuation(new_thread->swap_func);
+ /*NOTREACHED*/
+ return TRUE; /* help for the compiler */
+
+ case TH_SW_COMING_IN:
+ /*
+ * Waiting for a stack
+ */
+ thread_swapin(new_thread);
+ thread_unlock(new_thread);
+ counter_always(c_thread_invoke_misses++);
+ return FALSE;
+
+ case 0:
+ /*
+ * Already has a stack - can`t handoff.
+ */
+ break;
+ }
+ }
+
+ else {
+ /*
+ * Check that the thread is swapped-in.
+ */
+ if (new_thread->state & TH_SWAPPED) {
+ if ((new_thread->state & TH_SW_COMING_IN) ||
+ !stack_alloc_try(new_thread, thread_continue))
+ {
+ thread_swapin(new_thread);
+ thread_unlock(new_thread);
+ counter_always(c_thread_invoke_misses++);
+ return FALSE;
+ }
+ }
+ }
+
+ new_thread->state &= ~(TH_SWAPPED | TH_UNINT);
+ thread_unlock(new_thread);
+
+ /*
+ * Thread is now interruptible.
+ */
+#if NCPUS > 1
+ new_thread->last_processor = current_processor();
+#endif /* NCPUS > 1 */
+
+ /*
+ * Set up ast context of new thread and switch to its timer.
+ */
+ ast_context(new_thread, cpu_number());
+ timer_switch(&new_thread->system_timer);
+
+ /*
+ * switch_context is machine-dependent. It does the
+ * machine-dependent components of a context-switch, like
+ * changing address spaces. It updates active_threads.
+ * It returns only if a continuation is not supplied.
+ */
+ counter_always(c_thread_invoke_csw++);
+ old_thread = switch_context(old_thread, continuation, new_thread);
+
+ /*
+ * We're back. Now old_thread is the thread that resumed
+ * us, and we have to dispatch it.
+ */
+ thread_dispatch(old_thread);
+
+ return TRUE;
+}
+
+/*
+ * thread_continue:
+ *
+ * Called when the current thread is given a new stack.
+ * Called at splsched.
+ */
+void thread_continue(
+ register thread_t old_thread)
+{
+ register continuation_t continuation = current_thread()->swap_func;
+
+ /*
+ * We must dispatch the old thread and then
+ * call the current thread's continuation.
+ * There might not be an old thread, if we are
+ * the first thread to run on this processor.
+ */
+
+ if (old_thread != THREAD_NULL)
+ thread_dispatch(old_thread);
+ (void) spl0();
+ (*continuation)();
+ /*NOTREACHED*/
+}
+
+
+/*
+ * thread_block:
+ *
+ * Block the current thread. If the thread is runnable
+ * then someone must have woken it up between its request
+ * to sleep and now. In this case, it goes back on a
+ * run queue.
+ *
+ * If a continuation is specified, then thread_block will
+ * attempt to discard the thread's kernel stack. When the
+ * thread resumes, it will execute the continuation function
+ * on a new kernel stack.
+ */
+
+void thread_block(
+ continuation_t continuation)
+{
+ register thread_t thread = current_thread();
+ register processor_t myprocessor = cpu_to_processor(cpu_number());
+ register thread_t new_thread;
+ spl_t s;
+
+ check_simple_locks();
+
+ s = splsched();
+
+#if FAST_TAS
+ {
+ extern void recover_ras();
+
+ if (csw_needed(thread, myprocessor))
+ recover_ras(thread);
+ }
+#endif /* FAST_TAS */
+
+ ast_off(cpu_number(), AST_BLOCK);
+
+ do
+ new_thread = thread_select(myprocessor);
+ while (!thread_invoke(thread, continuation, new_thread));
+
+ splx(s);
+}
+
+/*
+ * thread_run:
+ *
+ * Switch directly from the current thread to a specified
+ * thread. Both the current and new threads must be
+ * runnable.
+ *
+ * If a continuation is specified, then thread_block will
+ * attempt to discard the current thread's kernel stack. When the
+ * thread resumes, it will execute the continuation function
+ * on a new kernel stack.
+ */
+void thread_run(
+ continuation_t continuation,
+ register thread_t new_thread)
+{
+ register thread_t thread = current_thread();
+ register processor_t myprocessor = cpu_to_processor(cpu_number());
+ spl_t s;
+
+ check_simple_locks();
+
+ s = splsched();
+
+ while (!thread_invoke(thread, continuation, new_thread))
+ new_thread = thread_select(myprocessor);
+
+ splx(s);
+}
+
+/*
+ * Dispatches a running thread that is not on a runq.
+ * Called at splsched.
+ */
+
+void thread_dispatch(
+ register thread_t thread)
+{
+ /*
+ * If we are discarding the thread's stack, we must do it
+ * before the thread has a chance to run.
+ */
+
+ thread_lock(thread);
+
+ if (thread->swap_func != (void (*)()) 0) {
+ assert((thread->state & TH_SWAP_STATE) == 0);
+ thread->state |= TH_SWAPPED;
+ stack_free(thread);
+ }
+
+ switch (thread->state &~ TH_SWAP_STATE) {
+ case TH_RUN | TH_SUSP:
+ case TH_RUN | TH_SUSP | TH_HALTED:
+ case TH_RUN | TH_WAIT | TH_SUSP:
+ /*
+ * Suspend the thread
+ */
+ thread->state &= ~TH_RUN;
+ if (thread->wake_active) {
+ thread->wake_active = FALSE;
+ thread_unlock(thread);
+ thread_wakeup((event_t)&thread->wake_active);
+ return;
+ }
+ break;
+
+ case TH_RUN | TH_SUSP | TH_UNINT:
+ case TH_RUN | TH_UNINT:
+ case TH_RUN:
+ /*
+ * No reason to stop. Put back on a run queue.
+ */
+ thread_setrun(thread, FALSE);
+ break;
+
+ case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
+ case TH_RUN | TH_WAIT | TH_UNINT:
+ case TH_RUN | TH_WAIT:
+ /*
+ * Waiting, and not suspended.
+ */
+ thread->state &= ~TH_RUN;
+ break;
+
+ case TH_RUN | TH_IDLE:
+ /*
+ * Drop idle thread -- it is already in
+ * idle_thread_array.
+ */
+ break;
+
+ default:
+ panic("thread_dispatch");
+ }
+ thread_unlock(thread);
+}
+
+
+/*
+ * Define shifts for simulating (5/8)**n
+ */
+
+shift_data_t wait_shift[32] = {
+ {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
+ {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
+ {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
+ {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}};
+
+/*
+ * do_priority_computation:
+ *
+ * Calculate new priority for thread based on its base priority plus
+ * accumulated usage. PRI_SHIFT and PRI_SHIFT_2 convert from
+ * usage to priorities. SCHED_SHIFT converts for the scaling
+ * of the sched_usage field by SCHED_SCALE. This scaling comes
+ * from the multiplication by sched_load (thread_timer_delta)
+ * in sched.h. sched_load is calculated as a scaled overload
+ * factor in compute_mach_factor (mach_factor.c).
+ */
+
+#ifdef PRI_SHIFT_2
+#if PRI_SHIFT_2 > 0
+#define do_priority_computation(th, pri) \
+ MACRO_BEGIN \
+ (pri) = (th)->priority /* start with base priority */ \
+ + ((th)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
+ + ((th)->sched_usage >> (PRI_SHIFT_2 + SCHED_SHIFT)); \
+ if ((pri) > 31) (pri) = 31; \
+ MACRO_END
+#else /* PRI_SHIFT_2 */
+#define do_priority_computation(th, pri) \
+ MACRO_BEGIN \
+ (pri) = (th)->priority /* start with base priority */ \
+ + ((th)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \
+ - ((th)->sched_usage >> (SCHED_SHIFT - PRI_SHIFT_2)); \
+ if ((pri) > 31) (pri) = 31; \
+ MACRO_END
+#endif /* PRI_SHIFT_2 */
+#else /* defined(PRI_SHIFT_2) */
+#define do_priority_computation(th, pri) \
+ MACRO_BEGIN \
+ (pri) = (th)->priority /* start with base priority */ \
+ + ((th)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)); \
+ if ((pri) > 31) (pri) = 31; \
+ MACRO_END
+#endif /* defined(PRI_SHIFT_2) */
+
+/*
+ * compute_priority:
+ *
+ * Compute the effective priority of the specified thread.
+ * The effective priority computation is as follows:
+ *
+ * Take the base priority for this thread and add
+ * to it an increment derived from its cpu_usage.
+ *
+ * The thread *must* be locked by the caller.
+ */
+
+void compute_priority(
+ register thread_t thread,
+ boolean_t resched)
+{
+ register int pri;
+
+#if MACH_FIXPRI
+ if (thread->policy == POLICY_TIMESHARE) {
+#endif /* MACH_FIXPRI */
+ do_priority_computation(thread, pri);
+ if (thread->depress_priority < 0)
+ set_pri(thread, pri, resched);
+ else
+ thread->depress_priority = pri;
+#if MACH_FIXPRI
+ }
+ else {
+ set_pri(thread, thread->priority, resched);
+ }
+#endif /* MACH_FIXPRI */
+}
+
+/*
+ * compute_my_priority:
+ *
+ * Version of compute priority for current thread or thread
+ * being manipulated by scheduler (going on or off a runq).
+ * Only used for priority updates. Policy or priority changes
+ * must call compute_priority above. Caller must have thread
+ * locked and know it is timesharing and not depressed.
+ */
+
+void compute_my_priority(
+ register thread_t thread)
+{
+ register int temp_pri;
+
+ do_priority_computation(thread,temp_pri);
+ thread->sched_pri = temp_pri;
+}
+
+/*
+ * recompute_priorities:
+ *
+ * Update the priorities of all threads periodically.
+ */
+void recompute_priorities(void)
+{
+#if SIMPLE_CLOCK
+ int new_usec;
+#endif /* SIMPLE_CLOCK */
+
+ sched_tick++; /* age usage one more time */
+ set_timeout(&recompute_priorities_timer, hz);
+#if SIMPLE_CLOCK
+ /*
+ * Compensate for clock drift. sched_usec is an
+ * exponential average of the number of microseconds in
+ * a second. It decays in the same fashion as cpu_usage.
+ */
+ new_usec = sched_usec_elapsed();
+ sched_usec = (5*sched_usec + 3*new_usec)/8;
+#endif /* SIMPLE_CLOCK */
+ /*
+ * Wakeup scheduler thread.
+ */
+ if (sched_thread_id != THREAD_NULL) {
+ clear_wait(sched_thread_id, THREAD_AWAKENED, FALSE);
+ }
+}
+
+/*
+ * update_priority
+ *
+ * Cause the priority computation of a thread that has been
+ * sleeping or suspended to "catch up" with the system. Thread
+ * *MUST* be locked by caller. If thread is running, then this
+ * can only be called by the thread on itself.
+ */
+void update_priority(
+ register thread_t thread)
+{
+ register unsigned int ticks;
+ register shift_t shiftp;
+ register int temp_pri;
+
+ ticks = sched_tick - thread->sched_stamp;
+
+ assert(ticks != 0);
+
+ /*
+ * If asleep for more than 30 seconds forget all
+ * cpu_usage, else catch up on missed aging.
+ * 5/8 ** n is approximated by the two shifts
+ * in the wait_shift array.
+ */
+ thread->sched_stamp += ticks;
+ thread_timer_delta(thread);
+ if (ticks > 30) {
+ thread->cpu_usage = 0;
+ thread->sched_usage = 0;
+ }
+ else {
+ thread->cpu_usage += thread->cpu_delta;
+ thread->sched_usage += thread->sched_delta;
+ shiftp = &wait_shift[ticks];
+ if (shiftp->shift2 > 0) {
+ thread->cpu_usage =
+ (thread->cpu_usage >> shiftp->shift1) +
+ (thread->cpu_usage >> shiftp->shift2);
+ thread->sched_usage =
+ (thread->sched_usage >> shiftp->shift1) +
+ (thread->sched_usage >> shiftp->shift2);
+ }
+ else {
+ thread->cpu_usage =
+ (thread->cpu_usage >> shiftp->shift1) -
+ (thread->cpu_usage >> -(shiftp->shift2));
+ thread->sched_usage =
+ (thread->sched_usage >> shiftp->shift1) -
+ (thread->sched_usage >> -(shiftp->shift2));
+ }
+ }
+ thread->cpu_delta = 0;
+ thread->sched_delta = 0;
+ /*
+ * Recompute priority if appropriate.
+ */
+ if (
+#if MACH_FIXPRI
+ (thread->policy == POLICY_TIMESHARE) &&
+#endif /* MACH_FIXPRI */
+ (thread->depress_priority < 0)) {
+ do_priority_computation(thread, temp_pri);
+ thread->sched_pri = temp_pri;
+ }
+}
+
+/*
+ * run_queue_enqueue macro for thread_setrun().
+ */
+#if DEBUG
+#define run_queue_enqueue(rq, th) \
+ MACRO_BEGIN \
+ register unsigned int whichq; \
+ \
+ whichq = (th)->sched_pri; \
+ if (whichq >= NRQS) { \
+ printf("thread_setrun: pri too high (%d)\n", (th)->sched_pri); \
+ whichq = NRQS - 1; \
+ } \
+ \
+ simple_lock(&(rq)->lock); /* lock the run queue */ \
+ checkrq((rq), "thread_setrun: before adding thread"); \
+ enqueue_tail(&(rq)->runq[whichq], (queue_entry_t) (th)); \
+ \
+ if (whichq < (rq)->low || (rq)->count == 0) \
+ (rq)->low = whichq; /* minimize */ \
+ \
+ (rq)->count++; \
+ (th)->runq = (rq); \
+ thread_check((th), (rq)); \
+ checkrq((rq), "thread_setrun: after adding thread"); \
+ simple_unlock(&(rq)->lock); \
+ MACRO_END
+#else /* DEBUG */
+#define run_queue_enqueue(rq, th) \
+ MACRO_BEGIN \
+ register unsigned int whichq; \
+ \
+ whichq = (th)->sched_pri; \
+ if (whichq >= NRQS) { \
+ printf("thread_setrun: pri too high (%d)\n", (th)->sched_pri); \
+ whichq = NRQS - 1; \
+ } \
+ \
+ simple_lock(&(rq)->lock); /* lock the run queue */ \
+ enqueue_tail(&(rq)->runq[whichq], (queue_entry_t) (th)); \
+ \
+ if (whichq < (rq)->low || (rq)->count == 0) \
+ (rq)->low = whichq; /* minimize */ \
+ \
+ (rq)->count++; \
+ (th)->runq = (rq); \
+ simple_unlock(&(rq)->lock); \
+ MACRO_END
+#endif /* DEBUG */
+/*
+ * thread_setrun:
+ *
+ * Make thread runnable; dispatch directly onto an idle processor
+ * if possible. Else put on appropriate run queue (processor
+ * if bound, else processor set. Caller must have lock on thread.
+ * This is always called at splsched.
+ */
+
+void thread_setrun(
+ register thread_t th,
+ boolean_t may_preempt)
+{
+ register processor_t processor;
+ register run_queue_t rq;
+#if NCPUS > 1
+ register processor_set_t pset;
+#endif /* NCPUS > 1 */
+
+ /*
+ * Update priority if needed.
+ */
+ if (th->sched_stamp != sched_tick) {
+ update_priority(th);
+ }
+
+ assert(th->runq == RUN_QUEUE_NULL);
+
+#if NCPUS > 1
+ /*
+ * Try to dispatch the thread directly onto an idle processor.
+ */
+ if ((processor = th->bound_processor) == PROCESSOR_NULL) {
+ /*
+ * Not bound, any processor in the processor set is ok.
+ */
+ pset = th->processor_set;
+#if HW_FOOTPRINT
+ /*
+ * But first check the last processor it ran on.
+ */
+ processor = th->last_processor;
+ if (processor->state == PROCESSOR_IDLE) {
+ simple_lock(&processor->lock);
+ simple_lock(&pset->idle_lock);
+ if ((processor->state == PROCESSOR_IDLE)
+#if MACH_HOST
+ && (processor->processor_set == pset)
+#endif /* MACH_HOST */
+ ) {
+ queue_remove(&pset->idle_queue, processor,
+ processor_t, processor_queue);
+ pset->idle_count--;
+ processor->next_thread = th;
+ processor->state = PROCESSOR_DISPATCHING;
+ simple_unlock(&pset->idle_lock);
+ simple_unlock(&processor->lock);
+ return;
+ }
+ simple_unlock(&pset->idle_lock);
+ simple_unlock(&processor->lock);
+ }
+#endif /* HW_FOOTPRINT */
+
+ if (pset->idle_count > 0) {
+ simple_lock(&pset->idle_lock);
+ if (pset->idle_count > 0) {
+ processor = (processor_t) queue_first(&pset->idle_queue);
+ queue_remove(&(pset->idle_queue), processor, processor_t,
+ processor_queue);
+ pset->idle_count--;
+ processor->next_thread = th;
+ processor->state = PROCESSOR_DISPATCHING;
+ simple_unlock(&pset->idle_lock);
+ return;
+ }
+ simple_unlock(&pset->idle_lock);
+ }
+ rq = &(pset->runq);
+ run_queue_enqueue(rq,th);
+ /*
+ * Preempt check
+ */
+ if (may_preempt &&
+#if MACH_HOST
+ (pset == current_processor()->processor_set) &&
+#endif /* MACH_HOST */
+ (current_thread()->sched_pri > th->sched_pri)) {
+ /*
+ * Turn off first_quantum to allow csw.
+ */
+ current_processor()->first_quantum = FALSE;
+ ast_on(cpu_number(), AST_BLOCK);
+ }
+ }
+ else {
+ /*
+ * Bound, can only run on bound processor. Have to lock
+ * processor here because it may not be the current one.
+ */
+ if (processor->state == PROCESSOR_IDLE) {
+ simple_lock(&processor->lock);
+ pset = processor->processor_set;
+ simple_lock(&pset->idle_lock);
+ if (processor->state == PROCESSOR_IDLE) {
+ queue_remove(&pset->idle_queue, processor,
+ processor_t, processor_queue);
+ pset->idle_count--;
+ processor->next_thread = th;
+ processor->state = PROCESSOR_DISPATCHING;
+ simple_unlock(&pset->idle_lock);
+ simple_unlock(&processor->lock);
+ return;
+ }
+ simple_unlock(&pset->idle_lock);
+ simple_unlock(&processor->lock);
+ }
+ rq = &(processor->runq);
+ run_queue_enqueue(rq,th);
+
+ /*
+ * Cause ast on processor if processor is on line.
+ *
+ * XXX Don't do this remotely to master because this will
+ * XXX send an interprocessor interrupt, and that's too
+ * XXX expensive for all the unparallelized U*x code.
+ */
+ if (processor == current_processor()) {
+ ast_on(cpu_number(), AST_BLOCK);
+ }
+ else if ((processor != master_processor) &&
+ (processor->state != PROCESSOR_OFF_LINE)) {
+ cause_ast_check(processor);
+ }
+ }
+#else /* NCPUS > 1 */
+ /*
+ * XXX should replace queue with a boolean in this case.
+ */
+ if (default_pset.idle_count > 0) {
+ processor = (processor_t) queue_first(&default_pset.idle_queue);
+ queue_remove(&default_pset.idle_queue, processor,
+ processor_t, processor_queue);
+ default_pset.idle_count--;
+ processor->next_thread = th;
+ processor->state = PROCESSOR_DISPATCHING;
+ return;
+ }
+ if (th->bound_processor == PROCESSOR_NULL) {
+ rq = &(default_pset.runq);
+ }
+ else {
+ rq = &(master_processor->runq);
+ ast_on(cpu_number(), AST_BLOCK);
+ }
+ run_queue_enqueue(rq,th);
+
+ /*
+ * Preempt check
+ */
+ if (may_preempt && (current_thread()->sched_pri > th->sched_pri)) {
+ /*
+ * Turn off first_quantum to allow context switch.
+ */
+ current_processor()->first_quantum = FALSE;
+ ast_on(cpu_number(), AST_BLOCK);
+ }
+#endif /* NCPUS > 1 */
+}
+
+/*
+ * set_pri:
+ *
+ * Set the priority of the specified thread to the specified
+ * priority. This may cause the thread to change queues.
+ *
+ * The thread *must* be locked by the caller.
+ */
+
+void set_pri(
+ thread_t th,
+ int pri,
+ boolean_t resched)
+{
+ register struct run_queue *rq;
+
+ rq = rem_runq(th);
+ th->sched_pri = pri;
+ if (rq != RUN_QUEUE_NULL) {
+ if (resched)
+ thread_setrun(th, TRUE);
+ else
+ run_queue_enqueue(rq, th);
+ }
+}
+
+/*
+ * rem_runq:
+ *
+ * Remove a thread from its run queue.
+ * The run queue that the process was on is returned
+ * (or RUN_QUEUE_NULL if not on a run queue). Thread *must* be locked
+ * before calling this routine. Unusual locking protocol on runq
+ * field in thread structure makes this code interesting; see thread.h.
+ */
+
+struct run_queue *rem_runq(
+ thread_t th)
+{
+ register struct run_queue *rq;
+
+ rq = th->runq;
+ /*
+ * If rq is RUN_QUEUE_NULL, the thread will stay out of the
+ * run_queues because the caller locked the thread. Otherwise
+ * the thread is on a runq, but could leave.
+ */
+ if (rq != RUN_QUEUE_NULL) {
+ simple_lock(&rq->lock);
+#if DEBUG
+ checkrq(rq, "rem_runq: at entry");
+#endif /* DEBUG */
+ if (rq == th->runq) {
+ /*
+ * Thread is in a runq and we have a lock on
+ * that runq.
+ */
+#if DEBUG
+ checkrq(rq, "rem_runq: before removing thread");
+ thread_check(th, rq);
+#endif /* DEBUG */
+ remqueue(&rq->runq[0], (queue_entry_t) th);
+ rq->count--;
+#if DEBUG
+ checkrq(rq, "rem_runq: after removing thread");
+#endif /* DEBUG */
+ th->runq = RUN_QUEUE_NULL;
+ simple_unlock(&rq->lock);
+ }
+ else {
+ /*
+ * The thread left the runq before we could
+ * lock the runq. It is not on a runq now, and
+ * can't move again because this routine's
+ * caller locked the thread.
+ */
+ simple_unlock(&rq->lock);
+ rq = RUN_QUEUE_NULL;
+ }
+ }
+
+ return rq;
+}
+
+
+/*
+ * choose_thread:
+ *
+ * Choose a thread to execute. The thread chosen is removed
+ * from its run queue. Note that this requires only that the runq
+ * lock be held.
+ *
+ * Strategy:
+ * Check processor runq first; if anything found, run it.
+ * Else check pset runq; if nothing found, return idle thread.
+ *
+ * Second line of strategy is implemented by choose_pset_thread.
+ * This is only called on processor startup and when thread_block
+ * thinks there's something in the processor runq.
+ */
+
+thread_t choose_thread(
+ processor_t myprocessor)
+{
+ thread_t th;
+ register queue_t q;
+ register run_queue_t runq;
+ register int i;
+ register processor_set_t pset;
+
+ runq = &myprocessor->runq;
+
+ simple_lock(&runq->lock);
+ if (runq->count > 0) {
+ q = runq->runq + runq->low;
+ for (i = runq->low; i < NRQS ; i++, q++) {
+ if (!queue_empty(q)) {
+ th = (thread_t) dequeue_head(q);
+ th->runq = RUN_QUEUE_NULL;
+ runq->count--;
+ runq->low = i;
+ simple_unlock(&runq->lock);
+ return th;
+ }
+ }
+ panic("choose_thread");
+ /*NOTREACHED*/
+ }
+ simple_unlock(&runq->lock);
+
+ pset = myprocessor->processor_set;
+
+ simple_lock(&pset->runq.lock);
+ return choose_pset_thread(myprocessor,pset);
+}
+
+/*
+ * choose_pset_thread: choose a thread from processor_set runq or
+ * set processor idle and choose its idle thread.
+ *
+ * Caller must be at splsched and have a lock on the runq. This
+ * lock is released by this routine. myprocessor is always the current
+ * processor, and pset must be its processor set.
+ * This routine chooses and removes a thread from the runq if there
+ * is one (and returns it), else it sets the processor idle and
+ * returns its idle thread.
+ */
+
+thread_t choose_pset_thread(
+ register processor_t myprocessor,
+ processor_set_t pset)
+{
+ register run_queue_t runq;
+ register thread_t th;
+ register queue_t q;
+ register int i;
+
+ runq = &pset->runq;
+
+ if (runq->count > 0) {
+ q = runq->runq + runq->low;
+ for (i = runq->low; i < NRQS ; i++, q++) {
+ if (!queue_empty(q)) {
+ th = (thread_t) dequeue_head(q);
+ th->runq = RUN_QUEUE_NULL;
+ runq->count--;
+ /*
+ * For POLICY_FIXEDPRI, runq->low must be
+ * accurate!
+ */
+#if MACH_FIXPRI
+ if ((runq->count > 0) &&
+ (pset->policies & POLICY_FIXEDPRI)) {
+ while (queue_empty(q)) {
+ q++;
+ i++;
+ }
+ }
+#endif /* MACH_FIXPRI */
+ runq->low = i;
+#if DEBUG
+ checkrq(runq, "choose_pset_thread");
+#endif /* DEBUG */
+ simple_unlock(&runq->lock);
+ return th;
+ }
+ }
+ panic("choose_pset_thread");
+ /*NOTREACHED*/
+ }
+ simple_unlock(&runq->lock);
+
+ /*
+ * Nothing is runnable, so set this processor idle if it
+ * was running. If it was in an assignment or shutdown,
+ * leave it alone. Return its idle thread.
+ */
+ simple_lock(&pset->idle_lock);
+ if (myprocessor->state == PROCESSOR_RUNNING) {
+ myprocessor->state = PROCESSOR_IDLE;
+ /*
+ * XXX Until it goes away, put master on end of queue, others
+ * XXX on front so master gets used last.
+ */
+ if (myprocessor == master_processor) {
+ queue_enter(&(pset->idle_queue), myprocessor,
+ processor_t, processor_queue);
+ }
+ else {
+ queue_enter_first(&(pset->idle_queue), myprocessor,
+ processor_t, processor_queue);
+ }
+
+ pset->idle_count++;
+ }
+ simple_unlock(&pset->idle_lock);
+
+ return myprocessor->idle_thread;
+}
+
+/*
+ * no_dispatch_count counts number of times processors go non-idle
+ * without being dispatched. This should be very rare.
+ */
+int no_dispatch_count = 0;
+
+/*
+ * This is the idle thread, which just looks for other threads
+ * to execute.
+ */
+
+void idle_thread_continue(void)
+{
+ register processor_t myprocessor;
+ register volatile thread_t *threadp;
+ register volatile int *gcount;
+ register volatile int *lcount;
+ register thread_t new_thread;
+ register int state;
+ int mycpu;
+ spl_t s;
+
+ mycpu = cpu_number();
+ myprocessor = current_processor();
+ threadp = (volatile thread_t *) &myprocessor->next_thread;
+ lcount = (volatile int *) &myprocessor->runq.count;
+
+ while (TRUE) {
+#ifdef MARK_CPU_IDLE
+ MARK_CPU_IDLE(mycpu);
+#endif /* MARK_CPU_IDLE */
+
+#if MACH_HOST
+ gcount = (volatile int *)
+ &myprocessor->processor_set->runq.count;
+#else /* MACH_HOST */
+ gcount = (volatile int *) &default_pset.runq.count;
+#endif /* MACH_HOST */
+
+/*
+ * This cpu will be dispatched (by thread_setrun) by setting next_thread
+ * to the value of the thread to run next. Also check runq counts.
+ */
+ while ((*threadp == (volatile thread_t)THREAD_NULL) &&
+ (*gcount == 0) && (*lcount == 0)) {
+
+ /* check for ASTs while we wait */
+
+ if (need_ast[mycpu] &~ AST_SCHEDULING) {
+ (void) splsched();
+ /* don't allow scheduling ASTs */
+ need_ast[mycpu] &= ~AST_SCHEDULING;
+ ast_taken();
+ /* back at spl0 */
+ }
+
+ /*
+ * machine_idle is a machine dependent function,
+ * to conserve power.
+ */
+#if POWER_SAVE
+ machine_idle(mycpu);
+#endif /* POWER_SAVE */
+ }
+
+#ifdef MARK_CPU_ACTIVE
+ MARK_CPU_ACTIVE(mycpu);
+#endif /* MARK_CPU_ACTIVE */
+
+ s = splsched();
+
+ /*
+ * This is not a switch statement to avoid the
+ * bounds checking code in the common case.
+ */
+retry:
+ state = myprocessor->state;
+ if (state == PROCESSOR_DISPATCHING) {
+ /*
+ * Commmon case -- cpu dispatched.
+ */
+ new_thread = (thread_t) *threadp;
+ *threadp = (volatile thread_t) THREAD_NULL;
+ myprocessor->state = PROCESSOR_RUNNING;
+ /*
+ * set up quantum for new thread.
+ */
+#if MACH_FIXPRI
+ if (new_thread->policy == POLICY_TIMESHARE) {
+#endif /* MACH_FIXPRI */
+ /*
+ * Just use set quantum. No point in
+ * checking for shorter local runq quantum;
+ * csw_needed will handle correctly.
+ */
+#if MACH_HOST
+ myprocessor->quantum = new_thread->
+ processor_set->set_quantum;
+#else /* MACH_HOST */
+ myprocessor->quantum =
+ default_pset.set_quantum;
+#endif /* MACH_HOST */
+
+#if MACH_FIXPRI
+ }
+ else {
+ /*
+ * POLICY_FIXEDPRI
+ */
+ myprocessor->quantum = new_thread->sched_data;
+ }
+#endif /* MACH_FIXPRI */
+ myprocessor->first_quantum = TRUE;
+ counter(c_idle_thread_handoff++);
+ thread_run(idle_thread_continue, new_thread);
+ }
+ else if (state == PROCESSOR_IDLE) {
+ register processor_set_t pset;
+
+ pset = myprocessor->processor_set;
+ simple_lock(&pset->idle_lock);
+ if (myprocessor->state != PROCESSOR_IDLE) {
+ /*
+ * Something happened, try again.
+ */
+ simple_unlock(&pset->idle_lock);
+ goto retry;
+ }
+ /*
+ * Processor was not dispatched (Rare).
+ * Set it running again.
+ */
+ no_dispatch_count++;
+ pset->idle_count--;
+ queue_remove(&pset->idle_queue, myprocessor,
+ processor_t, processor_queue);
+ myprocessor->state = PROCESSOR_RUNNING;
+ simple_unlock(&pset->idle_lock);
+ counter(c_idle_thread_block++);
+ thread_block(idle_thread_continue);
+ }
+ else if ((state == PROCESSOR_ASSIGN) ||
+ (state == PROCESSOR_SHUTDOWN)) {
+ /*
+ * Changing processor sets, or going off-line.
+ * Release next_thread if there is one. Actual
+ * thread to run is on a runq.
+ */
+ if ((new_thread = (thread_t)*threadp)!= THREAD_NULL) {
+ *threadp = (volatile thread_t) THREAD_NULL;
+ thread_setrun(new_thread, FALSE);
+ }
+
+ counter(c_idle_thread_block++);
+ thread_block(idle_thread_continue);
+ }
+ else {
+ printf(" Bad processor state %d (Cpu %d)\n",
+ cpu_state(mycpu), mycpu);
+ panic("idle_thread");
+ }
+
+ (void) splx(s);
+ }
+}
+
+void idle_thread(void)
+{
+ register thread_t self = current_thread();
+ spl_t s;
+
+ stack_privilege(self);
+
+ s = splsched();
+ self->priority = 31;
+ self->sched_pri = 31;
+
+ /*
+ * Set the idle flag to indicate that this is an idle thread,
+ * enter ourselves in the idle array, and thread_block() to get
+ * out of the run queues (and set the processor idle when we
+ * run next time).
+ */
+ thread_lock(self);
+ self->state |= TH_IDLE;
+ thread_unlock(self);
+ current_processor()->idle_thread = self;
+ (void) splx(s);
+
+ counter(c_idle_thread_block++);
+ thread_block(idle_thread_continue);
+ idle_thread_continue();
+ /*NOTREACHED*/
+}
+
+/*
+ * sched_thread: scheduler thread.
+ *
+ * This thread handles periodic calculations in the scheduler that
+ * we don't want to do at interrupt level. This allows us to
+ * avoid blocking.
+ */
+void sched_thread_continue(void)
+{
+ while (TRUE) {
+ (void) compute_mach_factor();
+
+ /*
+ * Check for stuck threads. This can't be done off of
+ * the callout queue because it requires operations that
+ * can't be used from interrupt level.
+ */
+ if (sched_tick & 1)
+ do_thread_scan();
+
+ assert_wait((event_t) 0, FALSE);
+ counter(c_sched_thread_block++);
+ thread_block(sched_thread_continue);
+ }
+}
+
+void sched_thread(void)
+{
+ sched_thread_id = current_thread();
+
+ /*
+ * Sleep on event 0, recompute_priorities() will awaken
+ * us by calling clear_wait().
+ */
+ assert_wait((event_t) 0, FALSE);
+ counter(c_sched_thread_block++);
+ thread_block(sched_thread_continue);
+ sched_thread_continue();
+ /*NOTREACHED*/
+}
+
+#define MAX_STUCK_THREADS 16
+
+/*
+ * do_thread_scan: scan for stuck threads. A thread is stuck if
+ * it is runnable but its priority is so low that it has not
+ * run for several seconds. Its priority should be higher, but
+ * won't be until it runs and calls update_priority. The scanner
+ * finds these threads and does the updates.
+ *
+ * Scanner runs in two passes. Pass one squirrels likely
+ * thread ids away in an array, and removes them from the run queue.
+ * Pass two does the priority updates. This is necessary because
+ * the run queue lock is required for the candidate scan, but
+ * cannot be held during updates [set_pri will deadlock].
+ *
+ * Array length should be enough so that restart isn't necessary,
+ * but restart logic is included. Does not scan processor runqs.
+ *
+ */
+
+boolean_t do_thread_scan_debug = FALSE;
+
+thread_t stuck_threads[MAX_STUCK_THREADS];
+int stuck_count = 0;
+
+/*
+ * do_runq_scan is the guts of pass 1. It scans a runq for
+ * stuck threads. A boolean is returned indicating whether
+ * it ran out of space.
+ */
+
+boolean_t
+do_runq_scan(
+ run_queue_t runq)
+{
+ register spl_t s;
+ register queue_t q;
+ register thread_t thread;
+ register int count;
+
+ s = splsched();
+ simple_lock(&runq->lock);
+ if((count = runq->count) > 0) {
+ q = runq->runq + runq->low;
+ while (count > 0) {
+ thread = (thread_t) queue_first(q);
+ while (!queue_end(q, (queue_entry_t) thread)) {
+ /*
+ * Get the next thread now, since we may
+ * remove this thread from the run queue.
+ */
+ thread_t next = (thread_t) queue_next(&thread->links);
+
+ if ((thread->state & TH_SCHED_STATE) == TH_RUN &&
+ sched_tick - thread->sched_stamp > 1) {
+ /*
+ * Stuck, save its id for later.
+ */
+ if (stuck_count == MAX_STUCK_THREADS) {
+ /*
+ * !@#$% No more room.
+ */
+ simple_unlock(&runq->lock);
+ splx(s);
+ return TRUE;
+ }
+ /*
+ * We can`t take the thread_lock here,
+ * since we already have the runq lock.
+ * So we can`t grab a reference to the
+ * thread. However, a thread that is
+ * in RUN state cannot be deallocated
+ * until it stops running. If it isn`t
+ * on the runq, then thread_halt cannot
+ * see it. So we remove the thread
+ * from the runq to make it safe.
+ */
+ remqueue(q, (queue_entry_t) thread);
+ runq->count--;
+ thread->runq = RUN_QUEUE_NULL;
+
+ stuck_threads[stuck_count++] = thread;
+if (do_thread_scan_debug)
+ printf("do_runq_scan: adding thread %#x\n", thread);
+ }
+ count--;
+ thread = next;
+ }
+ q++;
+ }
+ }
+ simple_unlock(&runq->lock);
+ splx(s);
+
+ return FALSE;
+}
+
+void do_thread_scan(void)
+{
+ register spl_t s;
+ register boolean_t restart_needed = 0;
+ register thread_t thread;
+#if MACH_HOST
+ register processor_set_t pset;
+#endif /* MACH_HOST */
+
+ do {
+#if MACH_HOST
+ simple_lock(&all_psets_lock);
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+ if (restart_needed = do_runq_scan(&pset->runq))
+ break;
+ }
+ simple_unlock(&all_psets_lock);
+#else /* MACH_HOST */
+ restart_needed = do_runq_scan(&default_pset.runq);
+#endif /* MACH_HOST */
+ if (!restart_needed)
+ restart_needed = do_runq_scan(&master_processor->runq);
+
+ /*
+ * Ok, we now have a collection of candidates -- fix them.
+ */
+
+ while (stuck_count > 0) {
+ thread = stuck_threads[--stuck_count];
+ stuck_threads[stuck_count] = THREAD_NULL;
+ s = splsched();
+ thread_lock(thread);
+ if ((thread->state & TH_SCHED_STATE) == TH_RUN) {
+ /*
+ * Do the priority update. Call
+ * thread_setrun because thread is
+ * off the run queues.
+ */
+ update_priority(thread);
+ thread_setrun(thread, TRUE);
+ }
+ thread_unlock(thread);
+ splx(s);
+ }
+ } while (restart_needed);
+}
+
+#if DEBUG
+void checkrq(
+ run_queue_t rq,
+ char *msg)
+{
+ register queue_t q1;
+ register int i, j;
+ register queue_entry_t e;
+ register int low;
+
+ low = -1;
+ j = 0;
+ q1 = rq->runq;
+ for (i = 0; i < NRQS; i++) {
+ if (q1->next == q1) {
+ if (q1->prev != q1)
+ panic("checkrq: empty at %s", msg);
+ }
+ else {
+ if (low == -1)
+ low = i;
+
+ for (e = q1->next; e != q1; e = e->next) {
+ j++;
+ if (e->next->prev != e)
+ panic("checkrq-2 at %s", msg);
+ if (e->prev->next != e)
+ panic("checkrq-3 at %s", msg);
+ }
+ }
+ q1++;
+ }
+ if (j != rq->count)
+ panic("checkrq: count wrong at %s", msg);
+ if (rq->count != 0 && low < rq->low)
+ panic("checkrq: low wrong at %s", msg);
+}
+
+void thread_check(
+ register thread_t th,
+ register run_queue_t rq)
+{
+ register unsigned int whichq;
+
+ whichq = th->sched_pri;
+ if (whichq >= NRQS) {
+ printf("thread_check: priority too high\n");
+ whichq = NRQS-1;
+ }
+ if ((th->links.next == &rq->runq[whichq]) &&
+ (rq->runq[whichq].prev != (queue_entry_t)th))
+ panic("thread_check");
+}
+#endif /* DEBUG */
diff --git a/kern/sched_prim.h b/kern/sched_prim.h
new file mode 100644
index 00000000..ef895144
--- /dev/null
+++ b/kern/sched_prim.h
@@ -0,0 +1,163 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: sched_prim.h
+ * Author: David Golub
+ *
+ * Scheduling primitive definitions file
+ *
+ */
+
+#ifndef _KERN_SCHED_PRIM_H_
+#define _KERN_SCHED_PRIM_H_
+
+#include <mach/boolean.h>
+#include <mach/message.h> /* for mach_msg_timeout_t */
+#include <kern/lock.h>
+#include <kern/kern_types.h> /* for thread_t */
+
+/*
+ * Possible results of assert_wait - returned in
+ * current_thread()->wait_result.
+ */
+#define THREAD_AWAKENED 0 /* normal wakeup */
+#define THREAD_TIMED_OUT 1 /* timeout expired */
+#define THREAD_INTERRUPTED 2 /* interrupted by clear_wait */
+#define THREAD_RESTART 3 /* restart operation entirely */
+
+typedef void *event_t; /* wait event */
+
+typedef void (*continuation_t)(void); /* continuation */
+
+/*
+ * Exported interface to sched_prim.c.
+ */
+
+extern void sched_init(void);
+
+extern void assert_wait(
+ event_t event,
+ boolean_t interruptible);
+extern void clear_wait(
+ thread_t thread,
+ int result,
+ boolean_t interrupt_only);
+extern void thread_sleep(
+ event_t event,
+ simple_lock_t lock,
+ boolean_t interruptible);
+extern void thread_wakeup(); /* for function pointers */
+extern void thread_wakeup_prim(
+ event_t event,
+ boolean_t one_thread,
+ int result);
+extern boolean_t thread_invoke(
+ thread_t old_thread,
+ continuation_t continuation,
+ thread_t new_thread);
+extern void thread_block(
+ continuation_t continuation);
+extern void thread_run(
+ continuation_t continuation,
+ thread_t new_thread);
+extern void thread_set_timeout(
+ int t);
+extern void thread_setrun(
+ thread_t thread,
+ boolean_t may_preempt);
+extern void thread_dispatch(
+ thread_t thread);
+extern void thread_continue(
+ thread_t old_thread);
+extern void thread_go(
+ thread_t thread);
+extern void thread_will_wait(
+ thread_t thread);
+extern void thread_will_wait_with_timeout(
+ thread_t thread,
+ mach_msg_timeout_t msecs);
+extern boolean_t thread_handoff(
+ thread_t old_thread,
+ continuation_t continuation,
+ thread_t new_thread);
+extern void recompute_priorities();
+
+/*
+ * Routines defined as macros
+ */
+
+#define thread_wakeup(x) \
+ thread_wakeup_prim((x), FALSE, THREAD_AWAKENED)
+#define thread_wakeup_with_result(x, z) \
+ thread_wakeup_prim((x), FALSE, (z))
+#define thread_wakeup_one(x) \
+ thread_wakeup_prim((x), TRUE, THREAD_AWAKENED)
+
+/*
+ * Machine-dependent code must define these functions.
+ */
+
+extern void thread_bootstrap_return(void);
+extern void thread_exception_return(void);
+#ifdef __GNUC__
+extern void __volatile__ thread_syscall_return(kern_return_t);
+#else
+extern void thread_syscall_return(kern_return_t);
+#endif
+extern thread_t switch_context(
+ thread_t old_thread,
+ continuation_t continuation,
+ thread_t new_thread);
+extern void stack_handoff(
+ thread_t old_thread,
+ thread_t new_thread);
+
+/*
+ * These functions are either defined in kern/thread.c
+ * via machine-dependent stack_attach and stack_detach functions,
+ * or are defined directly by machine-dependent code.
+ */
+
+extern void stack_alloc(
+ thread_t thread,
+ void (*resume)(thread_t));
+extern boolean_t stack_alloc_try(
+ thread_t thread,
+ void (*resume)(thread_t));
+extern void stack_free(
+ thread_t thread);
+
+/*
+ * Convert a timeout in milliseconds (mach_msg_timeout_t)
+ * to a timeout in ticks (for use by set_timeout).
+ * This conversion rounds UP so that small timeouts
+ * at least wait for one tick instead of not waiting at all.
+ */
+
+#define convert_ipc_timeout_to_ticks(millis) \
+ (((millis) * hz + 999) / 1000)
+
+#endif /* _KERN_SCHED_PRIM_H_ */
diff --git a/kern/server_loop.ch b/kern/server_loop.ch
new file mode 100644
index 00000000..5a0c69ce
--- /dev/null
+++ b/kern/server_loop.ch
@@ -0,0 +1,102 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/server_loop.c
+ *
+ * A common server loop for builtin tasks.
+ */
+
+/*
+ * Must define symbols for:
+ * SERVER_NAME String name of this module
+ * SERVER_LOOP Routine name for the loop
+ * SERVER_DISPATCH MiG function(s) to handle message
+ *
+ * Must redefine symbols for pager_server functions.
+ */
+
+#include <mach/port.h>
+#include <mach/message.h>
+#include <vm/vm_kern.h> /* for kernel_map */
+
+void SERVER_LOOP(rcv_set, max_size)
+{
+ register mach_msg_header_t *in_msg;
+ register mach_msg_header_t *out_msg;
+ register mach_msg_header_t *tmp_msg;
+ vm_offset_t messages;
+ mach_msg_return_t r;
+
+ /*
+ * Allocate our message buffers.
+ */
+
+ messages = kalloc(2 * max_size);
+ if (messages == 0)
+ panic(SERVER_NAME);
+ in_msg = (mach_msg_header_t *) messages;
+ out_msg = (mach_msg_header_t *) (messages + max_size);
+
+ /*
+ * Service loop... receive messages and process them.
+ */
+
+ for (;;) {
+ /* receive first message */
+
+ receive_msg:
+ r = mach_msg(in_msg, MACH_RCV_MSG, 0, max_size, rcv_set,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+ if (r == MACH_MSG_SUCCESS)
+ break;
+
+ printf("%s: receive failed, 0x%x.\n", SERVER_NAME, r);
+ }
+
+ for (;;) {
+ /* process request message */
+
+ (void) SERVER_DISPATCH(in_msg, out_msg);
+
+ /* send reply and receive next request */
+
+ if (out_msg->msgh_remote_port == MACH_PORT_NULL)
+ goto receive_msg;
+
+ r = mach_msg(out_msg, MACH_SEND_MSG|MACH_RCV_MSG,
+ out_msg->msgh_size, max_size, rcv_set,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+ if (r != MACH_MSG_SUCCESS) {
+ printf("%s: send/receive failed, 0x%x.\n",
+ SERVER_NAME, r);
+ goto receive_msg;
+ }
+
+ /* swap message buffers */
+
+ tmp_msg = in_msg; in_msg = out_msg; out_msg = tmp_msg;
+ }
+}
diff --git a/kern/shuttle.h b/kern/shuttle.h
new file mode 100644
index 00000000..e8e574b6
--- /dev/null
+++ b/kern/shuttle.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * File: shuttle.h
+ * Author: Bryan Ford
+ *
+ * This file contains definitions for shuttles,
+ * which handle microscheduling for individual threads.
+ *
+ */
+
+#ifndef _KERN_SHUTTLE_H_
+#define _KERN_SHUTTLE_H_
+
+#include <kern/lock.h>
+
+
+
+struct Shuttle {
+ /* XXX must be first in thread */
+/*
+ * NOTE: The runq field in the thread structure has an unusual
+ * locking protocol. If its value is RUN_QUEUE_NULL, then it is
+ * locked by the thread_lock, but if its value is something else
+ * (i.e. a run_queue) then it is locked by that run_queue's lock.
+ */
+ queue_chain_t links; /* current run queue links */
+ run_queue_t runq; /* run queue p is on SEE BELOW */
+
+ /* Next pointer when on a queue */
+ struct Shuttle *next;
+
+ /* Micropriority level */
+ int priority;
+
+ /* General-purpose pointer field whose use depends on what the
+ thread happens to be doing */
+ void *message;
+
+ int foobar[1];
+};
+typedef struct Shuttle Shuttle;
+
+
+
+/* Exported functions */
+
+
+
+/* Machine-dependent code must define the following functions */
+
+
+
+#endif _KERN_SHUTTLE_H_
diff --git a/kern/startup.c b/kern/startup.c
new file mode 100644
index 00000000..dc0d5a01
--- /dev/null
+++ b/kern/startup.c
@@ -0,0 +1,305 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Mach kernel startup.
+ */
+
+
+#include <xpr_debug.h>
+#include <cpus.h>
+#include <mach_host.h>
+#include <norma_ipc.h>
+#include <norma_vm.h>
+
+#include <mach/boolean.h>
+#include <mach/machine.h>
+#include <mach/task_special_ports.h>
+#include <mach/vm_param.h>
+#include <ipc/ipc_init.h>
+#include <kern/cpu_number.h>
+#include <kern/processor.h>
+#include <kern/sched_prim.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/thread_swap.h>
+#include <kern/time_out.h>
+#include <kern/timer.h>
+#include <kern/zalloc.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <machine/machspl.h>
+#include <machine/pmap.h>
+#include <mach/version.h>
+
+
+
+extern void vm_mem_init();
+extern void vm_mem_bootstrap();
+extern void init_timeout();
+extern void machine_init();
+
+extern void idle_thread();
+extern void vm_pageout();
+extern void reaper_thread();
+extern void swapin_thread();
+extern void sched_thread();
+
+extern void bootstrap_create();
+extern void device_service_create();
+
+void cpu_launch_first_thread(); /* forward */
+void start_kernel_threads(); /* forward */
+
+#if NCPUS > 1
+extern void start_other_cpus();
+extern void action_thread();
+#endif NCPUS > 1
+
+/* XX */
+extern vm_offset_t phys_first_addr, phys_last_addr;
+
+/*
+ * Running in virtual memory, on the interrupt stack.
+ * Does not return. Dispatches initial thread.
+ *
+ * Assumes that master_cpu is set.
+ */
+void setup_main()
+{
+ thread_t startup_thread;
+
+ panic_init();
+ printf_init();
+
+ sched_init();
+ vm_mem_bootstrap();
+ ipc_bootstrap();
+ vm_mem_init();
+ ipc_init();
+
+ /*
+ * As soon as the virtual memory system is up, we record
+ * that this CPU is using the kernel pmap.
+ */
+ PMAP_ACTIVATE_KERNEL(master_cpu);
+
+ init_timers();
+ init_timeout();
+
+#if XPR_DEBUG
+ xprbootstrap();
+#endif XPR_DEBUG
+
+ timestamp_init();
+
+ mapable_time_init();
+
+ machine_init();
+
+ machine_info.max_cpus = NCPUS;
+ machine_info.memory_size = phys_last_addr - phys_first_addr; /* XXX mem_size */
+ machine_info.avail_cpus = 0;
+ machine_info.major_version = KERNEL_MAJOR_VERSION;
+ machine_info.minor_version = KERNEL_MINOR_VERSION;
+
+ /*
+ * Initialize the IPC, task, and thread subsystems.
+ */
+ task_init();
+ thread_init();
+ swapper_init();
+#if MACH_HOST
+ pset_sys_init();
+#endif MACH_HOST
+
+ /*
+ * Kick off the time-out driven routines by calling
+ * them the first time.
+ */
+ recompute_priorities();
+ compute_mach_factor();
+
+ /*
+ * Create a kernel thread to start the other kernel
+ * threads. Thread_resume (from kernel_thread) calls
+ * thread_setrun, which may look at current thread;
+ * we must avoid this, since there is no current thread.
+ */
+
+ /*
+ * Create the thread, and point it at the routine.
+ */
+ (void) thread_create(kernel_task, &startup_thread);
+ thread_start(startup_thread, start_kernel_threads);
+
+ /*
+ * Give it a kernel stack.
+ */
+ thread_doswapin(startup_thread);
+
+ /*
+ * Pretend it is already running, and resume it.
+ * Since it looks as if it is running, thread_resume
+ * will not try to put it on the run queues.
+ *
+ * We can do all of this without locking, because nothing
+ * else is running yet.
+ */
+ startup_thread->state |= TH_RUN;
+ (void) thread_resume(startup_thread);
+
+ /*
+ * Start the thread.
+ */
+ cpu_launch_first_thread(startup_thread);
+ /*NOTREACHED*/
+}
+
+/*
+ * Now running in a thread. Create the rest of the kernel threads
+ * and the bootstrap task.
+ */
+void start_kernel_threads()
+{
+ register int i;
+
+ /*
+ * Create the idle threads and the other
+ * service threads.
+ */
+ for (i = 0; i < NCPUS; i++) {
+ if (machine_slot[i].is_cpu) {
+ thread_t th;
+
+ (void) thread_create(kernel_task, &th);
+ thread_bind(th, cpu_to_processor(i));
+ thread_start(th, idle_thread);
+ thread_doswapin(th);
+ (void) thread_resume(th);
+ }
+ }
+
+ (void) kernel_thread(kernel_task, reaper_thread, (char *) 0);
+ (void) kernel_thread(kernel_task, swapin_thread, (char *) 0);
+ (void) kernel_thread(kernel_task, sched_thread, (char *) 0);
+
+#if NCPUS > 1
+ /*
+ * Create the shutdown thread.
+ */
+ (void) kernel_thread(kernel_task, action_thread, (char *) 0);
+
+ /*
+ * Allow other CPUs to run.
+ */
+ start_other_cpus();
+#endif NCPUS > 1
+
+ /*
+ * Create the device service.
+ */
+ device_service_create();
+
+ /*
+ * Initialize NORMA ipc system.
+ */
+#if NORMA_IPC
+ norma_ipc_init();
+#endif NORMA_IPC
+
+ /*
+ * Initialize NORMA vm system.
+ */
+#if NORMA_VM
+ norma_vm_init();
+#endif NORMA_VM
+
+ /*
+ * Start the user bootstrap.
+ */
+ bootstrap_create();
+
+#if XPR_DEBUG
+ xprinit(); /* XXX */
+#endif XPR_DEBUG
+
+ /*
+ * Become the pageout daemon.
+ */
+ (void) spl0();
+ vm_pageout();
+ /*NOTREACHED*/
+}
+
+#if NCPUS > 1
+void slave_main()
+{
+ cpu_launch_first_thread(THREAD_NULL);
+}
+#endif NCPUS > 1
+
+/*
+ * Start up the first thread on a CPU.
+ * First thread is specified for the master CPU.
+ */
+void cpu_launch_first_thread(th)
+ register thread_t th;
+{
+ register int mycpu;
+
+ mycpu = cpu_number();
+
+ cpu_up(mycpu);
+
+ start_timer(&kernel_timer[mycpu]);
+
+ /*
+ * Block all interrupts for choose_thread.
+ */
+ (void) splhigh();
+
+ if (th == THREAD_NULL)
+ th = choose_thread(cpu_to_processor(mycpu));
+ if (th == THREAD_NULL)
+ panic("cpu_launch_first_thread");
+
+ startrtclock(); /* needs an active thread */
+ PMAP_ACTIVATE_KERNEL(mycpu);
+
+ active_threads[mycpu] = th;
+ active_stacks[mycpu] = th->kernel_stack;
+ thread_lock(th);
+ th->state &= ~TH_UNINT;
+ thread_unlock(th);
+ timer_switch(&th->system_timer);
+
+ PMAP_ACTIVATE_USER(vm_map_pmap(th->task->map), th, mycpu);
+
+ load_context(th);
+ /*NOTREACHED*/
+}
diff --git a/kern/strings.c b/kern/strings.c
new file mode 100644
index 00000000..89563cd2
--- /dev/null
+++ b/kern/strings.c
@@ -0,0 +1,174 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: strings.c
+ * Author: Robert V. Baron, Carnegie Mellon University
+ * Date: ??/92
+ *
+ * String functions.
+ */
+
+#include <kern/strings.h> /* make sure we sell the truth */
+
+#ifdef strcpy
+#undef strcmp
+#undef strncmp
+#undef strcpy
+#undef strncpy
+#undef strlen
+#endif
+
+/*
+ * Abstract:
+ * strcmp (s1, s2) compares the strings "s1" and "s2".
+ * It returns 0 if the strings are identical. It returns
+ * > 0 if the first character that differs in the two strings
+ * is larger in s1 than in s2 or if s1 is longer than s2 and
+ * the contents are identical up to the length of s2.
+ * It returns < 0 if the first differing character is smaller
+ * in s1 than in s2 or if s1 is shorter than s2 and the
+ * contents are identical upto the length of s1.
+ */
+
+int
+strcmp(
+ register const char *s1,
+ register const char *s2)
+{
+ register unsigned int a, b;
+
+ do {
+ a = *s1++;
+ b = *s2++;
+ if (a != b)
+ return a-b; /* includes case when
+ 'a' is zero and 'b' is not zero
+ or vice versa */
+ } while (a != '\0');
+
+ return 0; /* both are zero */
+}
+
+
+/*
+ * Abstract:
+ * strncmp (s1, s2, n) compares the strings "s1" and "s2"
+ * in exactly the same way as strcmp does. Except the
+ * comparison runs for at most "n" characters.
+ */
+
+int
+strncmp(
+ register const char *s1,
+ register const char *s2,
+ unsigned long n)
+{
+ register unsigned int a, b;
+
+ while (n != 0) {
+ a = *s1++;
+ b = *s2++;
+ if (a != b)
+ return a-b; /* includes case when
+ 'a' is zero and 'b' is not zero
+ or vice versa */
+ if (a == '\0')
+ return 0; /* both are zero */
+ n--;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Abstract:
+ * strcpy copies the contents of the string "from" including
+ * the null terminator to the string "to". A pointer to "to"
+ * is returned.
+ */
+
+char *
+strcpy(
+ register char *to,
+ register const char *from)
+{
+ register char *ret = to;
+
+ while ((*to++ = *from++) != '\0')
+ continue;
+
+ return ret;
+}
+
+/*
+ * Abstract:
+ * strncpy copies "count" characters from the "from" string to
+ * the "to" string. If "from" contains less than "count" characters
+ * "to" will be padded with null characters until exactly "count"
+ * characters have been written. The return value is a pointer
+ * to the "to" string.
+ */
+
+char *
+strncpy(
+ register char *to,
+ register const char *from,
+ register unsigned long count)
+{
+ register char *ret = to;
+
+ while (count != 0) {
+ count--;
+ if ((*to++ = *from++) == '\0')
+ break;
+ }
+
+ while (count != 0) {
+ *to++ = '\0';
+ count--;
+ }
+
+ return ret;
+}
+
+/*
+ * Abstract:
+ * strlen returns the number of characters in "string" preceeding
+ * the terminating null character.
+ */
+
+unsigned long
+strlen(
+ register const char *string)
+{
+ register const char *ret = string;
+
+ while (*string++ != '\0')
+ continue;
+
+ return string - 1 - ret;
+}
diff --git a/kern/strings.h b/kern/strings.h
new file mode 100644
index 00000000..b71a7b3f
--- /dev/null
+++ b/kern/strings.h
@@ -0,0 +1,53 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: strings.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 3/93
+ *
+ * Prototypes for string functions. The way GCC wants them.
+ */
+
+extern int strcmp(
+ const char *,
+ const char * );
+
+extern int strncmp(
+ const char *,
+ const char *,
+ unsigned long);
+
+extern char *strcpy(
+ char *,
+ const char *);
+
+extern char *strncpy(
+ char *,
+ const char *,
+ unsigned long);
+
+extern unsigned long strlen(
+ const char *);
diff --git a/kern/syscall_emulation.c b/kern/syscall_emulation.c
new file mode 100644
index 00000000..5443a33a
--- /dev/null
+++ b/kern/syscall_emulation.c
@@ -0,0 +1,518 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/error.h>
+#include <mach/vm_param.h>
+#include <kern/syscall_emulation.h>
+#include <kern/task.h>
+#include <kern/kalloc.h>
+#include <vm/vm_kern.h>
+
+/* XXX */
+#define syscall_emulation_sync(task)
+
+
+
+/*
+ * WARNING:
+ * This code knows that kalloc() allocates memory most efficiently
+ * in sizes that are powers of 2, and asks for those sizes.
+ */
+
+/*
+ * Go from number of entries to size of struct eml_dispatch and back.
+ */
+#define base_size (sizeof(struct eml_dispatch) - sizeof(eml_routine_t))
+#define count_to_size(count) \
+ (base_size + sizeof(vm_offset_t) * (count))
+
+#define size_to_count(size) \
+ ( ((size) - base_size) / sizeof(vm_offset_t) )
+
+/*
+ * eml_init: initialize user space emulation code
+ */
+void eml_init()
+{
+}
+
+/*
+ * eml_task_reference() [Exported]
+ *
+ * Bumps the reference count on the common emulation
+ * vector.
+ */
+
+void eml_task_reference(task, parent)
+ task_t task, parent;
+{
+ register eml_dispatch_t eml;
+
+ if (parent == TASK_NULL)
+ eml = EML_DISPATCH_NULL;
+ else
+ eml = parent->eml_dispatch;
+
+ if (eml != EML_DISPATCH_NULL) {
+ simple_lock(&eml->lock);
+ eml->ref_count++;
+ simple_unlock(&eml->lock);
+ }
+ task->eml_dispatch = eml;
+}
+
+
+/*
+ * eml_task_deallocate() [Exported]
+ *
+ * Cleans up after the emulation code when a process exits.
+ */
+
+void eml_task_deallocate(task)
+ task_t task;
+{
+ register eml_dispatch_t eml;
+
+ eml = task->eml_dispatch;
+ if (eml != EML_DISPATCH_NULL) {
+ int count;
+
+ simple_lock(&eml->lock);
+ count = --eml->ref_count;
+ simple_unlock(&eml->lock);
+
+ if (count == 0)
+ kfree((vm_offset_t)eml, count_to_size(eml->disp_count));
+ }
+}
+
+/*
+ * task_set_emulation_vector: [Server Entry]
+ * set a list of emulated system calls for this task.
+ */
+kern_return_t
+task_set_emulation_vector_internal(task, vector_start, emulation_vector,
+ emulation_vector_count)
+ task_t task;
+ int vector_start;
+ emulation_vector_t emulation_vector;
+ unsigned int emulation_vector_count;
+{
+ eml_dispatch_t cur_eml, new_eml, old_eml;
+ vm_size_t new_size;
+ int cur_start, cur_end;
+ int new_start = 0, new_end = 0;
+ int vector_end;
+
+ if (task == TASK_NULL)
+ return EML_BAD_TASK;
+
+ vector_end = vector_start + emulation_vector_count;
+
+ /*
+ * We try to re-use the existing emulation vector
+ * if possible. We can reuse the vector if it
+ * is not shared with another task and if it is
+ * large enough to contain the entries we are
+ * supplying.
+ *
+ * We must grab the lock on the task to check whether
+ * there is an emulation vector.
+ * If the vector is shared or not large enough, we
+ * need to drop the lock and allocate a new emulation
+ * vector.
+ *
+ * While the lock is dropped, the emulation vector
+ * may be released by all other tasks (giving us
+ * exclusive use), or may be enlarged by another
+ * task_set_emulation_vector call. Therefore,
+ * after allocating the new emulation vector, we
+ * must grab the lock again to check whether we
+ * really need the new vector we just allocated.
+ *
+ * Since an emulation vector cannot be altered
+ * if it is in use by more than one task, the
+ * task lock is sufficient to protect the vector`s
+ * start, count, and contents. The lock in the
+ * vector protects only the reference count.
+ */
+
+ old_eml = EML_DISPATCH_NULL; /* vector to discard */
+ new_eml = EML_DISPATCH_NULL; /* new vector */
+
+ for (;;) {
+ /*
+ * Find the current emulation vector.
+ * See whether we can overwrite it.
+ */
+ task_lock(task);
+ cur_eml = task->eml_dispatch;
+ if (cur_eml != EML_DISPATCH_NULL) {
+ cur_start = cur_eml->disp_min;
+ cur_end = cur_eml->disp_count + cur_start;
+
+ simple_lock(&cur_eml->lock);
+ if (cur_eml->ref_count == 1 &&
+ cur_start <= vector_start &&
+ cur_end >= vector_end)
+ {
+ /*
+ * Can use the existing emulation vector.
+ * Discard any new one we allocated.
+ */
+ simple_unlock(&cur_eml->lock);
+ old_eml = new_eml;
+ break;
+ }
+
+ if (new_eml != EML_DISPATCH_NULL &&
+ new_start <= cur_start &&
+ new_end >= cur_end)
+ {
+ /*
+ * A new vector was allocated, and it is large enough
+ * to hold all the entries from the current vector.
+ * Copy the entries to the new emulation vector,
+ * deallocate the current one, and use the new one.
+ */
+ bcopy((char *)&cur_eml->disp_vector[0],
+ (char *)&new_eml->disp_vector[cur_start-new_start],
+ cur_eml->disp_count * sizeof(vm_offset_t));
+
+ if (--cur_eml->ref_count == 0)
+ old_eml = cur_eml; /* discard old vector */
+ simple_unlock(&cur_eml->lock);
+
+ task->eml_dispatch = new_eml;
+ syscall_emulation_sync(task);
+ cur_eml = new_eml;
+ break;
+ }
+ simple_unlock(&cur_eml->lock);
+
+ /*
+ * Need a new emulation vector.
+ * Ensure it will hold all the entries from
+ * both the old and new emulation vectors.
+ */
+ new_start = vector_start;
+ if (new_start > cur_start)
+ new_start = cur_start;
+ new_end = vector_end;
+ if (new_end < cur_end)
+ new_end = cur_end;
+ }
+ else {
+ /*
+ * There is no current emulation vector.
+ * If a new one was allocated, use it.
+ */
+ if (new_eml != EML_DISPATCH_NULL) {
+ task->eml_dispatch = new_eml;
+ cur_eml = new_eml;
+ break;
+ }
+
+ /*
+ * Compute the size needed for the new vector.
+ */
+ new_start = vector_start;
+ new_end = vector_end;
+ }
+
+ /*
+ * Have no vector (or one that is no longer large enough).
+ * Drop all the locks and allocate a new vector.
+ * Repeat the loop to check whether the old vector was
+ * changed while we didn`t hold the locks.
+ */
+
+ task_unlock(task);
+
+ if (new_eml != EML_DISPATCH_NULL)
+ kfree((vm_offset_t)new_eml, count_to_size(new_eml->disp_count));
+
+ new_size = count_to_size(new_end - new_start);
+ new_eml = (eml_dispatch_t) kalloc(new_size);
+
+ bzero((char *)new_eml, new_size);
+ simple_lock_init(&new_eml->lock);
+ new_eml->ref_count = 1;
+ new_eml->disp_min = new_start;
+ new_eml->disp_count = new_end - new_start;
+
+ continue;
+ }
+
+ /*
+ * We have the emulation vector.
+ * Install the new emulation entries.
+ */
+ bcopy((char *)&emulation_vector[0],
+ (char *)&cur_eml->disp_vector[vector_start - cur_eml->disp_min],
+ emulation_vector_count * sizeof(vm_offset_t));
+
+ task_unlock(task);
+
+ /*
+ * Discard any old emulation vector we don`t need.
+ */
+ if (old_eml)
+ kfree((vm_offset_t) old_eml, count_to_size(old_eml->disp_count));
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * task_set_emulation_vector: [Server Entry]
+ *
+ * Set the list of emulated system calls for this task.
+ * The list is out-of-line.
+ */
+kern_return_t
+task_set_emulation_vector(task, vector_start, emulation_vector,
+ emulation_vector_count)
+ task_t task;
+ int vector_start;
+ emulation_vector_t emulation_vector;
+ unsigned int emulation_vector_count;
+{
+ kern_return_t kr;
+ vm_offset_t emul_vector_addr;
+
+ if (task == TASK_NULL)
+ return EML_BAD_TASK; /* XXX sb KERN_INVALID_ARGUMENT */
+
+ /*
+ * The emulation vector is really a vm_map_copy_t.
+ */
+ kr = vm_map_copyout(ipc_kernel_map, &emul_vector_addr,
+ (vm_map_copy_t) emulation_vector);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ /*
+ * Do the work.
+ */
+ kr = task_set_emulation_vector_internal(
+ task,
+ vector_start,
+ (emulation_vector_t) emul_vector_addr,
+ emulation_vector_count);
+
+ /*
+ * Discard the memory
+ */
+ (void) kmem_free(ipc_kernel_map,
+ emul_vector_addr,
+ emulation_vector_count * sizeof(eml_dispatch_t));
+
+ return kr;
+}
+
+/*
+ * Compatibility entry. Vector is passed inline.
+ */
+kern_return_t
+xxx_task_set_emulation_vector(task, vector_start, emulation_vector,
+ emulation_vector_count)
+ task_t task;
+ int vector_start;
+ emulation_vector_t emulation_vector;
+ unsigned int emulation_vector_count;
+{
+ return task_set_emulation_vector_internal(
+ task,
+ vector_start,
+ emulation_vector,
+ emulation_vector_count);
+}
+
+/*
+ * task_get_emulation_vector: [Server Entry]
+ *
+ * Get the list of emulated system calls for this task.
+ * List is returned out-of-line.
+ */
+kern_return_t
+task_get_emulation_vector(task, vector_start, emulation_vector,
+ emulation_vector_count)
+ task_t task;
+ int *vector_start; /* out */
+ emulation_vector_t *emulation_vector; /* out */
+ unsigned int *emulation_vector_count; /* out */
+{
+ eml_dispatch_t eml;
+ vm_size_t vector_size, size;
+ vm_offset_t addr;
+
+ if (task == TASK_NULL)
+ return EML_BAD_TASK;
+
+ addr = 0;
+ size = 0;
+
+ for(;;) {
+ vm_size_t size_needed;
+
+ task_lock(task);
+ eml = task->eml_dispatch;
+ if (eml == EML_DISPATCH_NULL) {
+ task_unlock(task);
+ if (addr)
+ (void) kmem_free(ipc_kernel_map, addr, size);
+ *vector_start = 0;
+ *emulation_vector = 0;
+ *emulation_vector_count = 0;
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * Do we have the memory we need?
+ */
+ vector_size = eml->disp_count * sizeof(vm_offset_t);
+
+ size_needed = round_page(vector_size);
+ if (size_needed <= size)
+ break;
+
+ /*
+ * If not, unlock the task and allocate more memory.
+ */
+ task_unlock(task);
+
+ if (size != 0)
+ kmem_free(ipc_kernel_map, addr, size);
+
+ size = size_needed;
+ if (kmem_alloc(ipc_kernel_map, &addr, size) != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /*
+ * Copy out the dispatch addresses
+ */
+ *vector_start = eml->disp_min;
+ *emulation_vector_count = eml->disp_count;
+ bcopy((char *)eml->disp_vector,
+ (char *)addr,
+ vector_size);
+
+ /*
+ * Unlock the task and free any memory we did not need
+ */
+ task_unlock(task);
+
+ {
+ vm_size_t size_used, size_left;
+ vm_map_copy_t memory;
+
+ /*
+ * Free any unused memory beyond the end of the last page used
+ */
+ size_used = round_page(vector_size);
+ if (size_used != size)
+ (void) kmem_free(ipc_kernel_map,
+ addr + size_used,
+ size - size_used);
+
+ /*
+ * Zero the remainder of the page being returned.
+ */
+ size_left = size_used - vector_size;
+ if (size_left > 0)
+ bzero((char *)addr + vector_size, size_left);
+
+ /*
+ * Make memory into copyin form - this unwires it.
+ */
+ (void) vm_map_copyin(ipc_kernel_map, addr, vector_size, TRUE, &memory);
+
+ *emulation_vector = (emulation_vector_t) memory;
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * xxx_task_get_emulation: [Server Entry]
+ * get the list of emulated system calls for this task.
+ * Compatibility code: return list in-line.
+ */
+kern_return_t
+xxx_task_get_emulation_vector(task, vector_start, emulation_vector,
+ emulation_vector_count)
+ task_t task;
+ int *vector_start;
+ emulation_vector_t emulation_vector; /* pointer to OUT array */
+ unsigned int *emulation_vector_count; /*IN/OUT*/
+{
+ register eml_dispatch_t eml;
+
+ if (task == TASK_NULL)
+ return( EML_BAD_TASK );
+
+ task_lock(task);
+
+ eml = task->eml_dispatch;
+ if (eml == EML_DISPATCH_NULL) {
+ task_unlock(task);
+ *vector_start = 0;
+ *emulation_vector_count = 0;
+ return( KERN_SUCCESS );
+ }
+
+ simple_lock(&eml->lock);
+
+ if (*emulation_vector_count < eml->disp_count) {
+ simple_unlock(&eml->lock);
+ task_unlock(task);
+ return( EML_BAD_CNT );
+ }
+
+ *vector_start = eml->disp_min;
+ *emulation_vector_count = eml->disp_count;
+ bcopy((char *)eml->disp_vector, (char *)emulation_vector,
+ *emulation_vector_count * sizeof(vm_offset_t));
+ simple_unlock(&eml->lock);
+
+ task_unlock(task);
+
+ return( KERN_SUCCESS );
+}
+
+/*
+ * task_set_emulation: [Server Entry]
+ * set up for user space emulation of syscalls within this task.
+ */
+kern_return_t task_set_emulation(task, routine_entry_pt, routine_number)
+ task_t task;
+ vm_offset_t routine_entry_pt;
+ int routine_number;
+{
+ return task_set_emulation_vector_internal(task, routine_number,
+ &routine_entry_pt, 1);
+}
diff --git a/kern/syscall_emulation.h b/kern/syscall_emulation.h
new file mode 100644
index 00000000..91b3ed71
--- /dev/null
+++ b/kern/syscall_emulation.h
@@ -0,0 +1,61 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_SYSCALL_EMULATION_H_
+#define _KERN_SYSCALL_EMULATION_H_
+
+#ifndef ASSEMBLER
+#include <mach/machine/vm_types.h>
+#include <kern/lock.h>
+
+typedef vm_offset_t eml_routine_t;
+
+typedef struct eml_dispatch {
+ decl_simple_lock_data(, lock) /* lock for reference count */
+ int ref_count; /* reference count */
+ int disp_count; /* count of entries in vector */
+ int disp_min; /* index of lowest entry in vector */
+ eml_routine_t disp_vector[1]; /* first entry in array of dispatch */
+ /* routines (array has disp_count */
+ /* elements) */
+} *eml_dispatch_t;
+
+typedef vm_offset_t *emulation_vector_t; /* Variable-length array */
+
+#define EML_ROUTINE_NULL (eml_routine_t)0
+#define EML_DISPATCH_NULL (eml_dispatch_t)0
+
+#define EML_SUCCESS (0)
+
+#define EML_MOD (err_kern|err_sub(2))
+#define EML_BAD_TASK (EML_MOD|0x0001)
+#define EML_BAD_CNT (EML_MOD|0x0002)
+#endif ASSEMBLER
+
+#endif _KERN_SYSCALL_EMULATION_H_
diff --git a/kern/syscall_subr.c b/kern/syscall_subr.c
new file mode 100644
index 00000000..a040d71d
--- /dev/null
+++ b/kern/syscall_subr.c
@@ -0,0 +1,399 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach_fixpri.h>
+#include <cpus.h>
+
+#include <mach/boolean.h>
+#include <mach/thread_switch.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+#include <kern/counters.h>
+#include <kern/ipc_kobject.h>
+#include <kern/processor.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <kern/ipc_sched.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/time_out.h>
+#include <machine/machspl.h> /* for splsched */
+
+#if MACH_FIXPRI
+#include <mach/policy.h>
+#endif MACH_FIXPRI
+
+
+
+/*
+ * swtch and swtch_pri both attempt to context switch (logic in
+ * thread_block no-ops the context switch if nothing would happen).
+ * A boolean is returned that indicates whether there is anything
+ * else runnable.
+ *
+ * This boolean can be used by a thread waiting on a
+ * lock or condition: If FALSE is returned, the thread is justified
+ * in becoming a resource hog by continuing to spin because there's
+ * nothing else useful that the processor could do. If TRUE is
+ * returned, the thread should make one more check on the
+ * lock and then be a good citizen and really suspend.
+ */
+
+extern void thread_depress_priority();
+extern kern_return_t thread_depress_abort();
+
+#ifdef CONTINUATIONS
+void swtch_continue()
+{
+ register processor_t myprocessor;
+
+ myprocessor = current_processor();
+ thread_syscall_return(myprocessor->runq.count > 0 ||
+ myprocessor->processor_set->runq.count > 0);
+ /*NOTREACHED*/
+}
+#else /* not CONTINUATIONS */
+#define swtch_continue 0
+#endif /* not CONTINUATIONS */
+
+boolean_t swtch()
+{
+ register processor_t myprocessor;
+
+#if NCPUS > 1
+ myprocessor = current_processor();
+ if (myprocessor->runq.count == 0 &&
+ myprocessor->processor_set->runq.count == 0)
+ return(FALSE);
+#endif NCPUS > 1
+
+ counter(c_swtch_block++);
+ thread_block(swtch_continue);
+ myprocessor = current_processor();
+ return(myprocessor->runq.count > 0 ||
+ myprocessor->processor_set->runq.count > 0);
+}
+
+#ifdef CONTINUATIONS
+void swtch_pri_continue()
+{
+ register thread_t thread = current_thread();
+ register processor_t myprocessor;
+
+ if (thread->depress_priority >= 0)
+ (void) thread_depress_abort(thread);
+ myprocessor = current_processor();
+ thread_syscall_return(myprocessor->runq.count > 0 ||
+ myprocessor->processor_set->runq.count > 0);
+ /*NOTREACHED*/
+}
+#else /* not CONTINUATIONS */
+#define swtch_pri_continue 0
+#endif /* not CONTINUATIONS */
+
+boolean_t swtch_pri(pri)
+ int pri;
+{
+ register thread_t thread = current_thread();
+ register processor_t myprocessor;
+
+#ifdef lint
+ pri++;
+#endif lint
+
+#if NCPUS > 1
+ myprocessor = current_processor();
+ if (myprocessor->runq.count == 0 &&
+ myprocessor->processor_set->runq.count == 0)
+ return(FALSE);
+#endif NCPUS > 1
+
+ /*
+ * XXX need to think about depression duration.
+ * XXX currently using min quantum.
+ */
+ thread_depress_priority(thread, min_quantum);
+
+ counter(c_swtch_pri_block++);
+ thread_block(swtch_pri_continue);
+
+ if (thread->depress_priority >= 0)
+ (void) thread_depress_abort(thread);
+ myprocessor = current_processor();
+ return(myprocessor->runq.count > 0 ||
+ myprocessor->processor_set->runq.count > 0);
+}
+
+extern int hz;
+
+#ifdef CONTINUATIONS
+void thread_switch_continue()
+{
+ register thread_t cur_thread = current_thread();
+
+ /*
+ * Restore depressed priority
+ */
+ if (cur_thread->depress_priority >= 0)
+ (void) thread_depress_abort(cur_thread);
+ thread_syscall_return(KERN_SUCCESS);
+ /*NOTREACHED*/
+}
+#else /* not CONTINUATIONS */
+#define thread_switch_continue 0
+#endif /* not CONTINUATIONS */
+
+/*
+ * thread_switch:
+ *
+ * Context switch. User may supply thread hint.
+ *
+ * Fixed priority threads that call this get what they asked for
+ * even if that violates priority order.
+ */
+kern_return_t thread_switch(thread_name, option, option_time)
+mach_port_t thread_name;
+int option;
+mach_msg_timeout_t option_time;
+{
+ register thread_t cur_thread = current_thread();
+ register processor_t myprocessor;
+ ipc_port_t port;
+
+ /*
+ * Process option.
+ */
+ switch (option) {
+ case SWITCH_OPTION_NONE:
+ /*
+ * Nothing to do.
+ */
+ break;
+
+ case SWITCH_OPTION_DEPRESS:
+ /*
+ * Depress priority for given time.
+ */
+ thread_depress_priority(cur_thread, option_time);
+ break;
+
+ case SWITCH_OPTION_WAIT:
+ thread_will_wait_with_timeout(cur_thread, option_time);
+ break;
+
+ default:
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+#ifndef MIGRATING_THREADS /* XXX thread_run defunct */
+ /*
+ * Check and act on thread hint if appropriate.
+ */
+ if ((thread_name != 0) &&
+ (ipc_port_translate_send(cur_thread->task->itk_space,
+ thread_name, &port) == KERN_SUCCESS)) {
+ /* port is locked, but it might not be active */
+
+ /*
+ * Get corresponding thread.
+ */
+ if (ip_active(port) && (ip_kotype(port) == IKOT_THREAD)) {
+ register thread_t thread;
+ register spl_t s;
+
+ thread = (thread_t) port->ip_kobject;
+ /*
+ * Check if the thread is in the right pset. Then
+ * pull it off its run queue. If it
+ * doesn't come, then it's not eligible.
+ */
+ s = splsched();
+ thread_lock(thread);
+ if ((thread->processor_set == cur_thread->processor_set)
+ && (rem_runq(thread) != RUN_QUEUE_NULL)) {
+ /*
+ * Hah, got it!!
+ */
+ thread_unlock(thread);
+ (void) splx(s);
+ ip_unlock(port);
+ /* XXX thread might disappear on us now? */
+#if MACH_FIXPRI
+ if (thread->policy == POLICY_FIXEDPRI) {
+ myprocessor = current_processor();
+ myprocessor->quantum = thread->sched_data;
+ myprocessor->first_quantum = TRUE;
+ }
+#endif MACH_FIXPRI
+ counter(c_thread_switch_handoff++);
+ thread_run(thread_switch_continue, thread);
+ /*
+ * Restore depressed priority
+ */
+ if (cur_thread->depress_priority >= 0)
+ (void) thread_depress_abort(cur_thread);
+
+ return(KERN_SUCCESS);
+ }
+ thread_unlock(thread);
+ (void) splx(s);
+ }
+ ip_unlock(port);
+ }
+#endif /* not MIGRATING_THREADS */
+
+ /*
+ * No handoff hint supplied, or hint was wrong. Call thread_block() in
+ * hopes of running something else. If nothing else is runnable,
+ * thread_block will detect this. WARNING: thread_switch with no
+ * option will not do anything useful if the thread calling it is the
+ * highest priority thread (can easily happen with a collection
+ * of timesharing threads).
+ */
+#if NCPUS > 1
+ myprocessor = current_processor();
+ if (myprocessor->processor_set->runq.count > 0 ||
+ myprocessor->runq.count > 0)
+#endif NCPUS > 1
+ {
+ counter(c_thread_switch_block++);
+ thread_block(thread_switch_continue);
+ }
+
+ /*
+ * Restore depressed priority
+ */
+ if (cur_thread->depress_priority >= 0)
+ (void) thread_depress_abort(cur_thread);
+ return(KERN_SUCCESS);
+}
+
+/*
+ * thread_depress_priority
+ *
+ * Depress thread's priority to lowest possible for specified period.
+ * Intended for use when thread wants a lock but doesn't know which
+ * other thread is holding it. As with thread_switch, fixed
+ * priority threads get exactly what they asked for. Users access
+ * this by the SWITCH_OPTION_DEPRESS option to thread_switch. A Time
+ * of zero will result in no timeout being scheduled.
+ */
+void
+thread_depress_priority(thread, depress_time)
+register thread_t thread;
+mach_msg_timeout_t depress_time;
+{
+ unsigned int ticks;
+ spl_t s;
+
+ /* convert from milliseconds to ticks */
+ ticks = convert_ipc_timeout_to_ticks(depress_time);
+
+ s = splsched();
+ thread_lock(thread);
+
+ /*
+ * If thread is already depressed, override previous depression.
+ */
+ reset_timeout_check(&thread->depress_timer);
+
+ /*
+ * Save current priority, then set priority and
+ * sched_pri to their lowest possible values.
+ */
+ thread->depress_priority = thread->priority;
+ thread->priority = 31;
+ thread->sched_pri = 31;
+ if (ticks != 0)
+ set_timeout(&thread->depress_timer, ticks);
+
+ thread_unlock(thread);
+ (void) splx(s);
+}
+
+/*
+ * thread_depress_timeout:
+ *
+ * Timeout routine for priority depression.
+ */
+void
+thread_depress_timeout(thread)
+register thread_t thread;
+{
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+
+ /*
+ * If we lose a race with thread_depress_abort,
+ * then depress_priority might be -1.
+ */
+
+ if (thread->depress_priority >= 0) {
+ thread->priority = thread->depress_priority;
+ thread->depress_priority = -1;
+ compute_priority(thread, FALSE);
+ }
+
+ thread_unlock(thread);
+ (void) splx(s);
+}
+
+/*
+ * thread_depress_abort:
+ *
+ * Prematurely abort priority depression if there is one.
+ */
+kern_return_t
+thread_depress_abort(thread)
+register thread_t thread;
+{
+ spl_t s;
+
+ if (thread == THREAD_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ s = splsched();
+ thread_lock(thread);
+
+ /*
+ * Only restore priority if thread is depressed.
+ */
+ if (thread->depress_priority >= 0) {
+ reset_timeout_check(&thread->depress_timer);
+ thread->priority = thread->depress_priority;
+ thread->depress_priority = -1;
+ compute_priority(thread, FALSE);
+ }
+
+ thread_unlock(thread);
+ (void) splx(s);
+ return(KERN_SUCCESS);
+}
diff --git a/kern/syscall_subr.h b/kern/syscall_subr.h
new file mode 100644
index 00000000..921c2b23
--- /dev/null
+++ b/kern/syscall_subr.h
@@ -0,0 +1,35 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_SYSCALL_SUBR_H_
+#define _KERN_SYSCALL_SUBR_H_
+
+extern int swtch();
+extern int swtch_pri();
+extern int thread_switch();
+extern void thread_depress_timeout();
+
+#endif _KERN_SYSCALL_SUBR_H_
diff --git a/kern/syscall_sw.c b/kern/syscall_sw.c
new file mode 100644
index 00000000..41c8b2f1
--- /dev/null
+++ b/kern/syscall_sw.c
@@ -0,0 +1,289 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach_ipc_compat.h>
+#include <net_atm.h>
+
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <kern/syscall_sw.h>
+
+/* Include declarations of the trap functions. */
+#include <mach/mach_traps.h>
+#include <mach/message.h>
+#include <kern/syscall_subr.h>
+#include <chips/nw_mk.h>
+
+
+/*
+ * To add a new entry:
+ * Add an "MACH_TRAP(routine, arg count)" to the table below.
+ *
+ * Add trap definition to mach/syscall_sw.h and
+ * recompile user library.
+ *
+ * WARNING: If you add a trap which requires more than 7
+ * parameters, mach/ca/syscall_sw.h and ca/trap.c both need
+ * to be modified for it to work successfully on an
+ * RT. Similarly, mach/mips/syscall_sw.h and mips/locore.s
+ * need to be modified before it will work on Pmaxen.
+ *
+ * WARNING: Don't use numbers 0 through -9. They (along with
+ * the positive numbers) are reserved for Unix.
+ */
+
+int kern_invalid_debug = 0;
+
+mach_port_t null_port()
+{
+ if (kern_invalid_debug) Debugger("null_port mach trap");
+ return(MACH_PORT_NULL);
+}
+
+kern_return_t kern_invalid()
+{
+ if (kern_invalid_debug) Debugger("kern_invalid mach trap");
+ return(KERN_INVALID_ARGUMENT);
+}
+
+extern kern_return_t syscall_vm_map();
+extern kern_return_t syscall_vm_allocate();
+extern kern_return_t syscall_vm_deallocate();
+
+extern kern_return_t syscall_task_create();
+extern kern_return_t syscall_task_terminate();
+extern kern_return_t syscall_task_suspend();
+extern kern_return_t syscall_task_set_special_port();
+
+extern kern_return_t syscall_mach_port_allocate();
+extern kern_return_t syscall_mach_port_deallocate();
+extern kern_return_t syscall_mach_port_insert_right();
+extern kern_return_t syscall_mach_port_allocate_name();
+
+extern kern_return_t syscall_thread_depress_abort();
+extern kern_return_t evc_wait();
+extern kern_return_t evc_wait_clear();
+
+extern kern_return_t syscall_device_write_request();
+extern kern_return_t syscall_device_writev_request();
+
+#ifdef FIPC
+extern kern_return_t syscall_fipc_send();
+extern kern_return_t syscall_fipc_recv();
+#endif FIPC
+
+mach_trap_t mach_trap_table[] = {
+ MACH_TRAP(kern_invalid, 0), /* 0 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 1 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 2 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 3 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 4 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 5 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 6 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 7 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 8 */ /* Unix */
+ MACH_TRAP(kern_invalid, 0), /* 9 */ /* Unix */
+
+#if MACH_IPC_COMPAT
+ MACH_TRAP(task_self, 0), /* 10 */ /* obsolete */
+ MACH_TRAP(thread_reply, 0), /* 11 */ /* obsolete */
+ MACH_TRAP(task_notify, 0), /* 12 */ /* obsolete */
+ MACH_TRAP(thread_self, 0), /* 13 */ /* obsolete */
+#else /* MACH_IPC_COMPAT */
+ MACH_TRAP(null_port, 0), /* 10 */
+ MACH_TRAP(null_port, 0), /* 11 */
+ MACH_TRAP(null_port, 0), /* 12 */
+ MACH_TRAP(null_port, 0), /* 13 */
+#endif /* MACH_IPC_COMPAT */
+ MACH_TRAP(kern_invalid, 0), /* 14 */
+ MACH_TRAP(kern_invalid, 0), /* 15 */
+ MACH_TRAP(kern_invalid, 0), /* 16 */
+ MACH_TRAP_STACK(evc_wait, 1), /* 17 */
+ MACH_TRAP_STACK(evc_wait_clear, 1), /* 18 */
+ MACH_TRAP(kern_invalid, 0), /* 19 */
+
+#if MACH_IPC_COMPAT
+ MACH_TRAP(msg_send_trap, 4), /* 20 */ /* obsolete */
+ MACH_TRAP_STACK(msg_receive_trap, 5), /* 21 */ /* obsolete */
+ MACH_TRAP_STACK(msg_rpc_trap, 6), /* 22 */ /* obsolete */
+#else /* MACH_IPC_COMPAT */
+ MACH_TRAP(kern_invalid, 0), /* 20 */
+ MACH_TRAP(kern_invalid, 0), /* 21 */
+ MACH_TRAP(kern_invalid, 0), /* 22 */
+#endif /* MACH_IPC_COMPAT */
+ MACH_TRAP(kern_invalid, 0), /* 23 */
+ MACH_TRAP(kern_invalid, 0), /* 24 */
+ MACH_TRAP_STACK(mach_msg_trap, 7), /* 25 */
+ MACH_TRAP(mach_reply_port, 0), /* 26 */
+ MACH_TRAP(mach_thread_self, 0), /* 27 */
+ MACH_TRAP(mach_task_self, 0), /* 28 */
+ MACH_TRAP(mach_host_self, 0), /* 29 */
+
+ MACH_TRAP(kern_invalid, 0), /* 30 */
+ MACH_TRAP(kern_invalid, 0), /* 31 */
+ MACH_TRAP(kern_invalid, 0), /* 32 */
+ MACH_TRAP(kern_invalid, 0), /* 33 emul: task_by_pid */
+ MACH_TRAP(kern_invalid, 0), /* 34 emul: pid_by_task */
+ MACH_TRAP(kern_invalid, 0), /* 35 */
+ MACH_TRAP(kern_invalid, 0), /* 36 */
+ MACH_TRAP(kern_invalid, 0), /* 37 */
+ MACH_TRAP(kern_invalid, 0), /* 38 */
+
+ MACH_TRAP(syscall_device_writev_request, 6), /* 39 */
+ MACH_TRAP(syscall_device_write_request, 6), /* 40 */
+
+ MACH_TRAP(kern_invalid, 0), /* 41 emul: init_process */
+ MACH_TRAP(kern_invalid, 0), /* 42 */
+ MACH_TRAP(kern_invalid, 0), /* 43 emul: map_fd */
+ MACH_TRAP(kern_invalid, 0), /* 44 emul: rfs_make_symlink */
+ MACH_TRAP(kern_invalid, 0), /* 45 */
+ MACH_TRAP(kern_invalid, 0), /* 46 */
+ MACH_TRAP(kern_invalid, 0), /* 47 */
+ MACH_TRAP(kern_invalid, 0), /* 48 */
+ MACH_TRAP(kern_invalid, 0), /* 49 */
+
+ MACH_TRAP(kern_invalid, 0), /* 50 */
+ MACH_TRAP(kern_invalid, 0), /* 51 */
+ MACH_TRAP(kern_invalid, 0), /* 52 emul: htg_syscall */
+ MACH_TRAP(kern_invalid, 0), /* 53 emul: set_ras_address */
+ MACH_TRAP(kern_invalid, 0), /* 54 */
+#if MACH_IPC_COMPAT
+ MACH_TRAP(host_self, 0), /* 55 */
+#else /* MACH_IPC_COMPAT */
+ MACH_TRAP(null_port, 0), /* 55 */
+#endif /* MACH_IPC_COMPAT */
+ MACH_TRAP(null_port, 0), /* 56 */
+ MACH_TRAP(kern_invalid, 0), /* 57 */
+ MACH_TRAP(kern_invalid, 0), /* 58 */
+ MACH_TRAP_STACK(swtch_pri, 1), /* 59 */
+
+ MACH_TRAP_STACK(swtch, 0), /* 60 */
+ MACH_TRAP_STACK(thread_switch, 3), /* 61 */
+ MACH_TRAP(kern_invalid, 0), /* 62 */
+ MACH_TRAP(kern_invalid, 0), /* 63 */
+ MACH_TRAP(syscall_vm_map, 11), /* 64 */
+ MACH_TRAP(syscall_vm_allocate, 4), /* 65 */
+ MACH_TRAP(syscall_vm_deallocate, 3), /* 66 */
+ MACH_TRAP(kern_invalid, 0), /* 67 */
+ MACH_TRAP(syscall_task_create, 3), /* 68 */
+ MACH_TRAP(syscall_task_terminate, 1), /* 69 */
+
+ MACH_TRAP(syscall_task_suspend, 1), /* 70 */
+ MACH_TRAP(syscall_task_set_special_port, 3), /* 71 */
+ MACH_TRAP(syscall_mach_port_allocate, 3), /* 72 */
+ MACH_TRAP(syscall_mach_port_deallocate, 2), /* 73 */
+ MACH_TRAP(syscall_mach_port_insert_right, 4), /* 74 */
+ MACH_TRAP(syscall_mach_port_allocate_name, 3), /* 75 */
+ MACH_TRAP(syscall_thread_depress_abort, 1), /* 76 */
+ MACH_TRAP(kern_invalid, 0), /* 77 */
+ MACH_TRAP(kern_invalid, 0), /* 78 */
+ MACH_TRAP(kern_invalid, 0), /* 79 */
+
+#if NET_ATM
+ MACH_TRAP(mk_update,3), /* 80 */
+ MACH_TRAP(mk_lookup,2), /* 81 */
+ MACH_TRAP_STACK(mk_endpoint_allocate,4), /* 82 */
+ MACH_TRAP_STACK(mk_endpoint_deallocate,1), /* 83 */
+ MACH_TRAP(mk_buffer_allocate,2), /* 84 */
+ MACH_TRAP(mk_buffer_deallocate,2), /* 85 */
+ MACH_TRAP_STACK(mk_connection_open,4), /* 86 */
+ MACH_TRAP_STACK(mk_connection_accept,3), /* 87 */
+ MACH_TRAP_STACK(mk_connection_close,1), /* 88 */
+ MACH_TRAP_STACK(mk_multicast_add,4), /* 89 */
+ MACH_TRAP_STACK(mk_multicast_drop,4), /* 90 */
+ MACH_TRAP(mk_endpoint_status,3), /* 91 */
+ MACH_TRAP_STACK(mk_send,3), /* 92 */
+ MACH_TRAP_STACK(mk_receive,2), /* 93 */
+ MACH_TRAP_STACK(mk_rpc,4), /* 94 */
+ MACH_TRAP_STACK(mk_select,3), /* 95 */
+#else /* NET_ATM */
+ MACH_TRAP(kern_invalid, 0), /* 80 */
+ MACH_TRAP(kern_invalid, 0), /* 81 */
+ MACH_TRAP(kern_invalid, 0), /* 82 */
+ MACH_TRAP(kern_invalid, 0), /* 83 */
+ MACH_TRAP(kern_invalid, 0), /* 84 */
+ MACH_TRAP(kern_invalid, 0), /* 85 */
+ MACH_TRAP(kern_invalid, 0), /* 86 */
+ MACH_TRAP(kern_invalid, 0), /* 87 */
+ MACH_TRAP(kern_invalid, 0), /* 88 */
+ MACH_TRAP(kern_invalid, 0), /* 89 */
+ MACH_TRAP(kern_invalid, 0), /* 90 */
+ MACH_TRAP(kern_invalid, 0), /* 91 */
+ MACH_TRAP(kern_invalid, 0), /* 92 */
+ MACH_TRAP(kern_invalid, 0), /* 93 */
+ MACH_TRAP(kern_invalid, 0), /* 94 */
+ MACH_TRAP(kern_invalid, 0), /* 95 */
+#endif /* NET_ATM */
+
+#ifdef FIPC
+ MACH_TRAP(syscall_fipc_send, 4), /* 96 */
+ MACH_TRAP(syscall_fipc_recv, 5), /* 97 */
+#else
+ MACH_TRAP(kern_invalid, 0), /* 96 */
+ MACH_TRAP(kern_invalid, 0), /* 97 */
+#endif FIPC
+
+ MACH_TRAP(kern_invalid, 0), /* 98 */
+ MACH_TRAP(kern_invalid, 0), /* 99 */
+
+ MACH_TRAP(kern_invalid, 0), /* 100 */
+ MACH_TRAP(kern_invalid, 0), /* 101 */
+ MACH_TRAP(kern_invalid, 0), /* 102 */
+ MACH_TRAP(kern_invalid, 0), /* 103 */
+ MACH_TRAP(kern_invalid, 0), /* 104 */
+ MACH_TRAP(kern_invalid, 0), /* 105 */
+ MACH_TRAP(kern_invalid, 0), /* 106 */
+ MACH_TRAP(kern_invalid, 0), /* 107 */
+ MACH_TRAP(kern_invalid, 0), /* 108 */
+ MACH_TRAP(kern_invalid, 0), /* 109 */
+
+ MACH_TRAP(kern_invalid, 0), /* 110 */
+ MACH_TRAP(kern_invalid, 0), /* 111 */
+ MACH_TRAP(kern_invalid, 0), /* 112 */
+ MACH_TRAP(kern_invalid, 0), /* 113 */
+ MACH_TRAP(kern_invalid, 0), /* 114 */
+ MACH_TRAP(kern_invalid, 0), /* 115 */
+ MACH_TRAP(kern_invalid, 0), /* 116 */
+ MACH_TRAP(kern_invalid, 0), /* 117 */
+ MACH_TRAP(kern_invalid, 0), /* 118 */
+ MACH_TRAP(kern_invalid, 0), /* 119 */
+
+ MACH_TRAP(kern_invalid, 0), /* 120 */
+ MACH_TRAP(kern_invalid, 0), /* 121 */
+ MACH_TRAP(kern_invalid, 0), /* 122 */
+ MACH_TRAP(kern_invalid, 0), /* 123 */
+ MACH_TRAP(kern_invalid, 0), /* 124 */
+ MACH_TRAP(kern_invalid, 0), /* 125 */
+ MACH_TRAP(kern_invalid, 0), /* 126 */
+ MACH_TRAP(kern_invalid, 0), /* 127 */
+ MACH_TRAP(kern_invalid, 0), /* 128 */
+ MACH_TRAP(kern_invalid, 0), /* 129 */
+};
+
+int mach_trap_count = (sizeof(mach_trap_table) / sizeof(mach_trap_table[0]));
diff --git a/kern/syscall_sw.h b/kern/syscall_sw.h
new file mode 100644
index 00000000..6a21ff56
--- /dev/null
+++ b/kern/syscall_sw.h
@@ -0,0 +1,51 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_SYSCALL_SW_H_
+#define _KERN_SYSCALL_SW_H_
+
+/*
+ * mach_trap_stack indicates the trap may discard
+ * its kernel stack. Some architectures may need
+ * to save more state in the pcb for these traps.
+ */
+
+typedef struct {
+ int mach_trap_arg_count;
+ int (*mach_trap_function)();
+ boolean_t mach_trap_stack;
+ int mach_trap_unused;
+} mach_trap_t;
+
+extern mach_trap_t mach_trap_table[];
+extern int mach_trap_count;
+
+#define MACH_TRAP(name, arg_count) \
+ { (arg_count), (int (*)()) (name), FALSE, 0 }
+#define MACH_TRAP_STACK(name, arg_count) \
+ { (arg_count), (int (*)()) (name), TRUE, 0 }
+
+#endif _KERN_SYSCALL_SW_H_
diff --git a/kern/task.c b/kern/task.c
new file mode 100644
index 00000000..f72bb0f1
--- /dev/null
+++ b/kern/task.c
@@ -0,0 +1,1238 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/task.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
+ * David Black
+ *
+ * Task management primitives implementation.
+ */
+
+#include <mach_host.h>
+#include <mach_pcsample.h>
+#include <norma_task.h>
+#include <fast_tas.h>
+#include <net_atm.h>
+
+#include <mach/machine/vm_types.h>
+#include <mach/vm_param.h>
+#include <mach/task_info.h>
+#include <mach/task_special_ports.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_types.h>
+#include <kern/mach_param.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/zalloc.h>
+#include <kern/kalloc.h>
+#include <kern/processor.h>
+#include <kern/sched_prim.h> /* for thread_wakeup */
+#include <kern/ipc_tt.h>
+#include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
+#include <machine/machspl.h> /* for splsched */
+
+#if NET_ATM
+#include <chips/nw_mk.h>
+#endif
+
+#if NORMA_TASK
+#define task_create task_create_local
+#endif /* NORMA_TASK */
+
+task_t kernel_task = TASK_NULL;
+zone_t task_zone;
+
+extern void eml_init(void);
+extern void eml_task_reference(task_t, task_t);
+extern void eml_task_deallocate(task_t);
+
+void task_init(void)
+{
+ task_zone = zinit(
+ sizeof(struct task),
+ TASK_MAX * sizeof(struct task),
+ TASK_CHUNK * sizeof(struct task),
+ 0, "tasks");
+
+ eml_init();
+
+ /*
+ * Create the kernel task as the first task.
+ * Task_create must assign to kernel_task as a side effect,
+ * for other initialization. (:-()
+ */
+ (void) task_create(TASK_NULL, FALSE, &kernel_task);
+}
+
+/*
+ * Create a task running in the kernel address space. It may
+ * have its own map of size mem_size (if 0, it uses the kernel map),
+ * and may have ipc privileges.
+ */
+task_t kernel_task_create(
+ task_t parent_task,
+ vm_size_t map_size)
+{
+ task_t new_task;
+ vm_offset_t min, max;
+
+ /*
+ * Create the task.
+ */
+ (void) task_create(parent_task, FALSE, &new_task);
+
+ /*
+ * Task_create creates the task with a user-space map.
+ * Remove the map and replace it with the kernel map
+ * or a submap of the kernel map.
+ */
+ vm_map_deallocate(new_task->map);
+ if (map_size == 0)
+ new_task->map = kernel_map;
+ else
+ new_task->map = kmem_suballoc(kernel_map, &min, &max,
+ map_size, FALSE);
+
+ return new_task;
+}
+
+kern_return_t task_create(
+ task_t parent_task,
+ boolean_t inherit_memory,
+ task_t *child_task) /* OUT */
+{
+ register task_t new_task;
+ register processor_set_t pset;
+ int i;
+
+ new_task = (task_t) zalloc(task_zone);
+ if (new_task == TASK_NULL) {
+ panic("task_create: no memory for task structure");
+ }
+
+ /* one ref for just being alive; one for our caller */
+ new_task->ref_count = 2;
+
+ if (child_task == &kernel_task) {
+ new_task->map = kernel_map;
+ } else if (inherit_memory) {
+ new_task->map = vm_map_fork(parent_task->map);
+ } else {
+ new_task->map = vm_map_create(pmap_create(0),
+ round_page(VM_MIN_ADDRESS),
+ trunc_page(VM_MAX_ADDRESS), TRUE);
+ }
+
+ simple_lock_init(&new_task->lock);
+ queue_init(&new_task->thread_list);
+ new_task->suspend_count = 0;
+ new_task->active = TRUE;
+ new_task->user_stop_count = 0;
+ new_task->thread_count = 0;
+
+ eml_task_reference(new_task, parent_task);
+
+ ipc_task_init(new_task, parent_task);
+
+#if NET_ATM
+ new_task->nw_ep_owned = 0;
+#endif
+
+ new_task->total_user_time.seconds = 0;
+ new_task->total_user_time.microseconds = 0;
+ new_task->total_system_time.seconds = 0;
+ new_task->total_system_time.microseconds = 0;
+
+ if (parent_task != TASK_NULL) {
+ task_lock(parent_task);
+ pset = parent_task->processor_set;
+ if (!pset->active)
+ pset = &default_pset;
+ pset_reference(pset);
+ new_task->priority = parent_task->priority;
+ task_unlock(parent_task);
+ }
+ else {
+ pset = &default_pset;
+ pset_reference(pset);
+ new_task->priority = BASEPRI_USER;
+ }
+ pset_lock(pset);
+ pset_add_task(pset, new_task);
+ pset_unlock(pset);
+
+ new_task->may_assign = TRUE;
+ new_task->assign_active = FALSE;
+
+#if MACH_PCSAMPLE
+ new_task->pc_sample.buffer = 0;
+ new_task->pc_sample.seqno = 0;
+ new_task->pc_sample.sampletypes = 0;
+#endif /* MACH_PCSAMPLE */
+
+#if FAST_TAS
+ for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
+ if (inherit_memory) {
+ new_task->fast_tas_base[i] = parent_task->fast_tas_base[i];
+ new_task->fast_tas_end[i] = parent_task->fast_tas_end[i];
+ } else {
+ new_task->fast_tas_base[i] = (vm_offset_t)0;
+ new_task->fast_tas_end[i] = (vm_offset_t)0;
+ }
+ }
+#endif /* FAST_TAS */
+
+ ipc_task_enable(new_task);
+
+#if NORMA_TASK
+ new_task->child_node = -1;
+#endif /* NORMA_TASK */
+
+ *child_task = new_task;
+ return KERN_SUCCESS;
+}
+
+/*
+ * task_deallocate:
+ *
+ * Give up a reference to the specified task and destroy it if there
+ * are no other references left. It is assumed that the current thread
+ * is never in this task.
+ */
+void task_deallocate(
+ register task_t task)
+{
+ register int c;
+ register processor_set_t pset;
+
+ if (task == TASK_NULL)
+ return;
+
+ task_lock(task);
+ c = --(task->ref_count);
+ task_unlock(task);
+ if (c != 0)
+ return;
+
+#if NORMA_TASK
+ if (task->map == VM_MAP_NULL) {
+ /* norma placeholder task */
+ zfree(task_zone, (vm_offset_t) task);
+ return;
+ }
+#endif /* NORMA_TASK */
+
+ eml_task_deallocate(task);
+
+ pset = task->processor_set;
+ pset_lock(pset);
+ pset_remove_task(pset,task);
+ pset_unlock(pset);
+ pset_deallocate(pset);
+ vm_map_deallocate(task->map);
+ is_release(task->itk_space);
+ zfree(task_zone, (vm_offset_t) task);
+}
+
+void task_reference(
+ register task_t task)
+{
+ if (task == TASK_NULL)
+ return;
+
+ task_lock(task);
+ task->ref_count++;
+ task_unlock(task);
+}
+
+/*
+ * task_terminate:
+ *
+ * Terminate the specified task. See comments on thread_terminate
+ * (kern/thread.c) about problems with terminating the "current task."
+ */
+kern_return_t task_terminate(
+ register task_t task)
+{
+ register thread_t thread, cur_thread;
+ register queue_head_t *list;
+ register task_t cur_task;
+ spl_t s;
+
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ list = &task->thread_list;
+ cur_task = current_task();
+ cur_thread = current_thread();
+
+#if NET_ATM
+ /*
+ * Shut down networking.
+ */
+ mk_endpoint_collect(task);
+#endif
+
+ /*
+ * Deactivate task so that it can't be terminated again,
+ * and so lengthy operations in progress will abort.
+ *
+ * If the current thread is in this task, remove it from
+ * the task's thread list to keep the thread-termination
+ * loop simple.
+ */
+ if (task == cur_task) {
+ task_lock(task);
+ if (!task->active) {
+ /*
+ * Task is already being terminated.
+ */
+ task_unlock(task);
+ return KERN_FAILURE;
+ }
+ /*
+ * Make sure current thread is not being terminated.
+ */
+ s = splsched();
+ thread_lock(cur_thread);
+ if (!cur_thread->active) {
+ thread_unlock(cur_thread);
+ (void) splx(s);
+ task_unlock(task);
+ thread_terminate(cur_thread);
+ return KERN_FAILURE;
+ }
+ task->active = FALSE;
+ queue_remove(list, cur_thread, thread_t, thread_list);
+ thread_unlock(cur_thread);
+ (void) splx(s);
+ task_unlock(task);
+
+ /*
+ * Shut down this thread's ipc now because it must
+ * be left alone to terminate the task.
+ */
+ ipc_thread_disable(cur_thread);
+ ipc_thread_terminate(cur_thread);
+ }
+ else {
+ /*
+ * Lock both current and victim task to check for
+ * potential deadlock.
+ */
+ if ((vm_offset_t)task < (vm_offset_t)cur_task) {
+ task_lock(task);
+ task_lock(cur_task);
+ }
+ else {
+ task_lock(cur_task);
+ task_lock(task);
+ }
+ /*
+ * Check if current thread or task is being terminated.
+ */
+ s = splsched();
+ thread_lock(cur_thread);
+ if ((!cur_task->active) ||(!cur_thread->active)) {
+ /*
+ * Current task or thread is being terminated.
+ */
+ thread_unlock(cur_thread);
+ (void) splx(s);
+ task_unlock(task);
+ task_unlock(cur_task);
+ thread_terminate(cur_thread);
+ return KERN_FAILURE;
+ }
+ thread_unlock(cur_thread);
+ (void) splx(s);
+ task_unlock(cur_task);
+
+ if (!task->active) {
+ /*
+ * Task is already being terminated.
+ */
+ task_unlock(task);
+ return KERN_FAILURE;
+ }
+ task->active = FALSE;
+ task_unlock(task);
+ }
+
+ /*
+ * Prevent further execution of the task. ipc_task_disable
+ * prevents further task operations via the task port.
+ * If this is the current task, the current thread will
+ * be left running.
+ */
+ ipc_task_disable(task);
+ (void) task_hold(task);
+ (void) task_dowait(task,TRUE); /* may block */
+
+ /*
+ * Terminate each thread in the task.
+ *
+ * The task_port is closed down, so no more thread_create
+ * operations can be done. Thread_force_terminate closes the
+ * thread port for each thread; when that is done, the
+ * thread will eventually disappear. Thus the loop will
+ * terminate. Call thread_force_terminate instead of
+ * thread_terminate to avoid deadlock checks. Need
+ * to call thread_block() inside loop because some other
+ * thread (e.g., the reaper) may have to run to get rid
+ * of all references to the thread; it won't vanish from
+ * the task's thread list until the last one is gone.
+ */
+ task_lock(task);
+ while (!queue_empty(list)) {
+ thread = (thread_t) queue_first(list);
+ thread_reference(thread);
+ task_unlock(task);
+ thread_force_terminate(thread);
+ thread_deallocate(thread);
+ thread_block((void (*)()) 0);
+ task_lock(task);
+ }
+ task_unlock(task);
+
+ /*
+ * Shut down IPC.
+ */
+ ipc_task_terminate(task);
+
+
+ /*
+ * Deallocate the task's reference to itself.
+ */
+ task_deallocate(task);
+
+ /*
+ * If the current thread is in this task, it has not yet
+ * been terminated (since it was removed from the task's
+ * thread-list). Put it back in the thread list (for
+ * completeness), and terminate it. Since it holds the
+ * last reference to the task, terminating it will deallocate
+ * the task.
+ */
+ if (cur_thread->task == task) {
+ task_lock(task);
+ s = splsched();
+ queue_enter(list, cur_thread, thread_t, thread_list);
+ (void) splx(s);
+ task_unlock(task);
+ (void) thread_terminate(cur_thread);
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * task_hold:
+ *
+ * Suspend execution of the specified task.
+ * This is a recursive-style suspension of the task, a count of
+ * suspends is maintained.
+ */
+kern_return_t task_hold(
+ register task_t task)
+{
+ register queue_head_t *list;
+ register thread_t thread, cur_thread;
+
+ cur_thread = current_thread();
+
+ task_lock(task);
+ if (!task->active) {
+ task_unlock(task);
+ return KERN_FAILURE;
+ }
+
+ task->suspend_count++;
+
+ /*
+ * Iterate through all the threads and hold them.
+ * Do not hold the current thread if it is within the
+ * task.
+ */
+ list = &task->thread_list;
+ queue_iterate(list, thread, thread_t, thread_list) {
+ if (thread != cur_thread)
+ thread_hold(thread);
+ }
+ task_unlock(task);
+ return KERN_SUCCESS;
+}
+
+/*
+ * task_dowait:
+ *
+ * Wait until the task has really been suspended (all of the threads
+ * are stopped). Skip the current thread if it is within the task.
+ *
+ * If task is deactivated while waiting, return a failure code unless
+ * must_wait is true.
+ */
+kern_return_t task_dowait(
+ register task_t task,
+ boolean_t must_wait)
+{
+ register queue_head_t *list;
+ register thread_t thread, cur_thread, prev_thread;
+ register kern_return_t ret = KERN_SUCCESS;
+
+ /*
+ * Iterate through all the threads.
+ * While waiting for each thread, we gain a reference to it
+ * to prevent it from going away on us. This guarantees
+ * that the "next" thread in the list will be a valid thread.
+ *
+ * We depend on the fact that if threads are created while
+ * we are looping through the threads, they will be held
+ * automatically. We don't care about threads that get
+ * deallocated along the way (the reference prevents it
+ * from happening to the thread we are working with).
+ *
+ * If the current thread is in the affected task, it is skipped.
+ *
+ * If the task is deactivated before we're done, and we don't
+ * have to wait for it (must_wait is FALSE), just bail out.
+ */
+ cur_thread = current_thread();
+
+ list = &task->thread_list;
+ prev_thread = THREAD_NULL;
+ task_lock(task);
+ queue_iterate(list, thread, thread_t, thread_list) {
+ if (!(task->active) && !(must_wait)) {
+ ret = KERN_FAILURE;
+ break;
+ }
+ if (thread != cur_thread) {
+ thread_reference(thread);
+ task_unlock(task);
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread);
+ /* may block */
+ (void) thread_dowait(thread, TRUE); /* may block */
+ prev_thread = thread;
+ task_lock(task);
+ }
+ }
+ task_unlock(task);
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread); /* may block */
+ return ret;
+}
+
+kern_return_t task_release(
+ register task_t task)
+{
+ register queue_head_t *list;
+ register thread_t thread, next;
+
+ task_lock(task);
+ if (!task->active) {
+ task_unlock(task);
+ return KERN_FAILURE;
+ }
+
+ task->suspend_count--;
+
+ /*
+ * Iterate through all the threads and release them
+ */
+ list = &task->thread_list;
+ thread = (thread_t) queue_first(list);
+ while (!queue_end(list, (queue_entry_t) thread)) {
+ next = (thread_t) queue_next(&thread->thread_list);
+ thread_release(thread);
+ thread = next;
+ }
+ task_unlock(task);
+ return KERN_SUCCESS;
+}
+
+kern_return_t task_threads(
+ task_t task,
+ thread_array_t *thread_list,
+ natural_t *count)
+{
+ unsigned int actual; /* this many threads */
+ thread_t thread;
+ thread_t *threads;
+ int i;
+
+ vm_size_t size, size_needed;
+ vm_offset_t addr;
+
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ size = 0; addr = 0;
+
+ for (;;) {
+ task_lock(task);
+ if (!task->active) {
+ task_unlock(task);
+ return KERN_FAILURE;
+ }
+
+ actual = task->thread_count;
+
+ /* do we have the memory we need? */
+
+ size_needed = actual * sizeof(mach_port_t);
+ if (size_needed <= size)
+ break;
+
+ /* unlock the task and allocate more memory */
+ task_unlock(task);
+
+ if (size != 0)
+ kfree(addr, size);
+
+ assert(size_needed > 0);
+ size = size_needed;
+
+ addr = kalloc(size);
+ if (addr == 0)
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /* OK, have memory and the task is locked & active */
+
+ threads = (thread_t *) addr;
+
+ for (i = 0, thread = (thread_t) queue_first(&task->thread_list);
+ i < actual;
+ i++, thread = (thread_t) queue_next(&thread->thread_list)) {
+ /* take ref for convert_thread_to_port */
+ thread_reference(thread);
+ threads[i] = thread;
+ }
+ assert(queue_end(&task->thread_list, (queue_entry_t) thread));
+
+ /* can unlock task now that we've got the thread refs */
+ task_unlock(task);
+
+ if (actual == 0) {
+ /* no threads, so return null pointer and deallocate memory */
+
+ *thread_list = 0;
+ *count = 0;
+
+ if (size != 0)
+ kfree(addr, size);
+ } else {
+ /* if we allocated too much, must copy */
+
+ if (size_needed < size) {
+ vm_offset_t newaddr;
+
+ newaddr = kalloc(size_needed);
+ if (newaddr == 0) {
+ for (i = 0; i < actual; i++)
+ thread_deallocate(threads[i]);
+ kfree(addr, size);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ bcopy((char *) addr, (char *) newaddr, size_needed);
+ kfree(addr, size);
+ threads = (thread_t *) newaddr;
+ }
+
+ *thread_list = (mach_port_t *) threads;
+ *count = actual;
+
+ /* do the conversion that Mig should handle */
+
+ for (i = 0; i < actual; i++)
+ ((ipc_port_t *) threads)[i] =
+ convert_thread_to_port(threads[i]);
+ }
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t task_suspend(
+ register task_t task)
+{
+ register boolean_t hold;
+
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ hold = FALSE;
+ task_lock(task);
+ if ((task->user_stop_count)++ == 0)
+ hold = TRUE;
+ task_unlock(task);
+
+ /*
+ * If the stop count was positive, the task is
+ * already stopped and we can exit.
+ */
+ if (!hold) {
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * Hold all of the threads in the task, and wait for
+ * them to stop. If the current thread is within
+ * this task, hold it separately so that all of the
+ * other threads can stop first.
+ */
+
+ if (task_hold(task) != KERN_SUCCESS)
+ return KERN_FAILURE;
+
+ if (task_dowait(task, FALSE) != KERN_SUCCESS)
+ return KERN_FAILURE;
+
+ if (current_task() == task) {
+ spl_t s;
+
+ thread_hold(current_thread());
+ /*
+ * We want to call thread_block on our way out,
+ * to stop running.
+ */
+ s = splsched();
+ ast_on(cpu_number(), AST_BLOCK);
+ (void) splx(s);
+ }
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t task_resume(
+ register task_t task)
+{
+ register boolean_t release;
+
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ release = FALSE;
+ task_lock(task);
+ if (task->user_stop_count > 0) {
+ if (--(task->user_stop_count) == 0)
+ release = TRUE;
+ }
+ else {
+ task_unlock(task);
+ return KERN_FAILURE;
+ }
+ task_unlock(task);
+
+ /*
+ * Release the task if necessary.
+ */
+ if (release)
+ return task_release(task);
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t task_info(
+ task_t task,
+ int flavor,
+ task_info_t task_info_out, /* pointer to OUT array */
+ natural_t *task_info_count) /* IN/OUT */
+{
+ vm_map_t map;
+
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ switch (flavor) {
+ case TASK_BASIC_INFO:
+ {
+ register task_basic_info_t basic_info;
+
+ if (*task_info_count < TASK_BASIC_INFO_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ basic_info = (task_basic_info_t) task_info_out;
+
+ map = (task == kernel_task) ? kernel_map : task->map;
+
+ basic_info->virtual_size = map->size;
+ basic_info->resident_size = pmap_resident_count(map->pmap)
+ * PAGE_SIZE;
+
+ task_lock(task);
+ basic_info->base_priority = task->priority;
+ basic_info->suspend_count = task->user_stop_count;
+ basic_info->user_time.seconds
+ = task->total_user_time.seconds;
+ basic_info->user_time.microseconds
+ = task->total_user_time.microseconds;
+ basic_info->system_time.seconds
+ = task->total_system_time.seconds;
+ basic_info->system_time.microseconds
+ = task->total_system_time.microseconds;
+ task_unlock(task);
+
+ *task_info_count = TASK_BASIC_INFO_COUNT;
+ break;
+ }
+
+ case TASK_THREAD_TIMES_INFO:
+ {
+ register task_thread_times_info_t times_info;
+ register thread_t thread;
+
+ if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ times_info = (task_thread_times_info_t) task_info_out;
+ times_info->user_time.seconds = 0;
+ times_info->user_time.microseconds = 0;
+ times_info->system_time.seconds = 0;
+ times_info->system_time.microseconds = 0;
+
+ task_lock(task);
+ queue_iterate(&task->thread_list, thread,
+ thread_t, thread_list)
+ {
+ time_value_t user_time, system_time;
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+
+ thread_read_times(thread, &user_time, &system_time);
+
+ thread_unlock(thread);
+ splx(s);
+
+ time_value_add(&times_info->user_time, &user_time);
+ time_value_add(&times_info->system_time, &system_time);
+ }
+ task_unlock(task);
+
+ *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
+ break;
+ }
+
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return KERN_SUCCESS;
+}
+
+#if MACH_HOST
+/*
+ * task_assign:
+ *
+ * Change the assigned processor set for the task
+ */
+kern_return_t
+task_assign(
+ task_t task,
+ processor_set_t new_pset,
+ boolean_t assign_threads)
+{
+ kern_return_t ret = KERN_SUCCESS;
+ register thread_t thread, prev_thread;
+ register queue_head_t *list;
+ register processor_set_t pset;
+
+ if (task == TASK_NULL || new_pset == PROCESSOR_SET_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /*
+ * Freeze task`s assignment. Prelude to assigning
+ * task. Only one freeze may be held per task.
+ */
+
+ task_lock(task);
+ while (task->may_assign == FALSE) {
+ task->assign_active = TRUE;
+ assert_wait((event_t)&task->assign_active, TRUE);
+ task_unlock(task);
+ thread_block((void (*)()) 0);
+ task_lock(task);
+ }
+
+ /*
+ * Avoid work if task already in this processor set.
+ */
+ if (task->processor_set == new_pset) {
+ /*
+ * No need for task->assign_active wakeup:
+ * task->may_assign is still TRUE.
+ */
+ task_unlock(task);
+ return KERN_SUCCESS;
+ }
+
+ task->may_assign = FALSE;
+ task_unlock(task);
+
+ /*
+ * Safe to get the task`s pset: it cannot change while
+ * task is frozen.
+ */
+ pset = task->processor_set;
+
+ /*
+ * Lock both psets now. Use ordering to avoid deadlock.
+ */
+ Restart:
+ if ((vm_offset_t) pset < (vm_offset_t) new_pset) {
+ pset_lock(pset);
+ pset_lock(new_pset);
+ }
+ else {
+ pset_lock(new_pset);
+ pset_lock(pset);
+ }
+
+ /*
+ * Check if new_pset is ok to assign to. If not,
+ * reassign to default_pset.
+ */
+ if (!new_pset->active) {
+ pset_unlock(pset);
+ pset_unlock(new_pset);
+ new_pset = &default_pset;
+ goto Restart;
+ }
+
+ pset_reference(new_pset);
+
+ /*
+ * Now grab the task lock and move the task.
+ */
+
+ task_lock(task);
+ pset_remove_task(pset, task);
+ pset_add_task(new_pset, task);
+
+ pset_unlock(pset);
+ pset_unlock(new_pset);
+
+ if (assign_threads == FALSE) {
+ /*
+ * We leave existing threads at their
+ * old assignments. Unfreeze task`s
+ * assignment.
+ */
+ task->may_assign = TRUE;
+ if (task->assign_active) {
+ task->assign_active = FALSE;
+ thread_wakeup((event_t) &task->assign_active);
+ }
+ task_unlock(task);
+ pset_deallocate(pset);
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * If current thread is in task, freeze its assignment.
+ */
+ if (current_thread()->task == task) {
+ task_unlock(task);
+ thread_freeze(current_thread());
+ task_lock(task);
+ }
+
+ /*
+ * Iterate down the thread list reassigning all the threads.
+ * New threads pick up task's new processor set automatically.
+ * Do current thread last because new pset may be empty.
+ */
+ list = &task->thread_list;
+ prev_thread = THREAD_NULL;
+ queue_iterate(list, thread, thread_t, thread_list) {
+ if (!(task->active)) {
+ ret = KERN_FAILURE;
+ break;
+ }
+ if (thread != current_thread()) {
+ thread_reference(thread);
+ task_unlock(task);
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread); /* may block */
+ thread_assign(thread,new_pset); /* may block */
+ prev_thread = thread;
+ task_lock(task);
+ }
+ }
+
+ /*
+ * Done, wakeup anyone waiting for us.
+ */
+ task->may_assign = TRUE;
+ if (task->assign_active) {
+ task->assign_active = FALSE;
+ thread_wakeup((event_t)&task->assign_active);
+ }
+ task_unlock(task);
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread); /* may block */
+
+ /*
+ * Finish assignment of current thread.
+ */
+ if (current_thread()->task == task)
+ thread_doassign(current_thread(), new_pset, TRUE);
+
+ pset_deallocate(pset);
+
+ return ret;
+}
+#else /* MACH_HOST */
+/*
+ * task_assign:
+ *
+ * Change the assigned processor set for the task
+ */
+kern_return_t
+task_assign(
+ task_t task,
+ processor_set_t new_pset,
+ boolean_t assign_threads)
+{
+ return KERN_FAILURE;
+}
+#endif /* MACH_HOST */
+
+
+/*
+ * task_assign_default:
+ *
+ * Version of task_assign to assign to default processor set.
+ */
+kern_return_t
+task_assign_default(
+ task_t task,
+ boolean_t assign_threads)
+{
+ return task_assign(task, &default_pset, assign_threads);
+}
+
+/*
+ * task_get_assignment
+ *
+ * Return name of processor set that task is assigned to.
+ */
+kern_return_t task_get_assignment(
+ task_t task,
+ processor_set_t *pset)
+{
+ if (!task->active)
+ return KERN_FAILURE;
+
+ *pset = task->processor_set;
+ pset_reference(*pset);
+ return KERN_SUCCESS;
+}
+
+/*
+ * task_priority
+ *
+ * Set priority of task; used only for newly created threads.
+ * Optionally change priorities of threads.
+ */
+kern_return_t
+task_priority(
+ task_t task,
+ int priority,
+ boolean_t change_threads)
+{
+ kern_return_t ret = KERN_SUCCESS;
+
+ if (task == TASK_NULL || invalid_pri(priority))
+ return KERN_INVALID_ARGUMENT;
+
+ task_lock(task);
+ task->priority = priority;
+
+ if (change_threads) {
+ register thread_t thread;
+ register queue_head_t *list;
+
+ list = &task->thread_list;
+ queue_iterate(list, thread, thread_t, thread_list) {
+ if (thread_priority(thread, priority, FALSE)
+ != KERN_SUCCESS)
+ ret = KERN_FAILURE;
+ }
+ }
+
+ task_unlock(task);
+ return ret;
+}
+
+/*
+ * task_collect_scan:
+ *
+ * Attempt to free resources owned by tasks.
+ */
+
+void task_collect_scan(void)
+{
+ register task_t task, prev_task;
+ processor_set_t pset, prev_pset;
+
+ prev_task = TASK_NULL;
+ prev_pset = PROCESSOR_SET_NULL;
+
+ simple_lock(&all_psets_lock);
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+ pset_lock(pset);
+ queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
+ task_reference(task);
+ pset_reference(pset);
+ pset_unlock(pset);
+ simple_unlock(&all_psets_lock);
+
+ pmap_collect(task->map->pmap);
+
+ if (prev_task != TASK_NULL)
+ task_deallocate(prev_task);
+ prev_task = task;
+
+ if (prev_pset != PROCESSOR_SET_NULL)
+ pset_deallocate(prev_pset);
+ prev_pset = pset;
+
+ simple_lock(&all_psets_lock);
+ pset_lock(pset);
+ }
+ pset_unlock(pset);
+ }
+ simple_unlock(&all_psets_lock);
+
+ if (prev_task != TASK_NULL)
+ task_deallocate(prev_task);
+ if (prev_pset != PROCESSOR_SET_NULL)
+ pset_deallocate(prev_pset);
+}
+
+boolean_t task_collect_allowed = TRUE;
+unsigned task_collect_last_tick = 0;
+unsigned task_collect_max_rate = 0; /* in ticks */
+
+/*
+ * consider_task_collect:
+ *
+ * Called by the pageout daemon when the system needs more free pages.
+ */
+
+void consider_task_collect(void)
+{
+ /*
+ * By default, don't attempt task collection more frequently
+ * than once a second.
+ */
+
+ if (task_collect_max_rate == 0)
+ task_collect_max_rate = hz;
+
+ if (task_collect_allowed &&
+ (sched_tick > (task_collect_last_tick + task_collect_max_rate))) {
+ task_collect_last_tick = sched_tick;
+ task_collect_scan();
+ }
+}
+
+kern_return_t
+task_ras_control(
+ task_t task,
+ vm_offset_t pc,
+ vm_offset_t endpc,
+ int flavor)
+{
+ kern_return_t ret = KERN_FAILURE;
+
+#if FAST_TAS
+ int i;
+
+ ret = KERN_SUCCESS;
+ task_lock(task);
+ switch (flavor) {
+ case TASK_RAS_CONTROL_PURGE_ALL: /* remove all RAS */
+ for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
+ task->fast_tas_base[i] = task->fast_tas_end[i] = 0;
+ }
+ break;
+ case TASK_RAS_CONTROL_PURGE_ONE: /* remove this RAS, collapse remaining */
+ for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
+ if ( (task->fast_tas_base[i] == pc)
+ && (task->fast_tas_end[i] == endpc)) {
+ while (i < TASK_FAST_TAS_NRAS-1) {
+ task->fast_tas_base[i] = task->fast_tas_base[i+1];
+ task->fast_tas_end[i] = task->fast_tas_end[i+1];
+ i++;
+ }
+ task->fast_tas_base[TASK_FAST_TAS_NRAS-1] = 0;
+ task->fast_tas_end[TASK_FAST_TAS_NRAS-1] = 0;
+ break;
+ }
+ }
+ if (i == TASK_FAST_TAS_NRAS) {
+ ret = KERN_INVALID_ADDRESS;
+ }
+ break;
+ case TASK_RAS_CONTROL_PURGE_ALL_AND_INSTALL_ONE:
+ /* remove all RAS an install this RAS */
+ for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
+ task->fast_tas_base[i] = task->fast_tas_end[i] = 0;
+ }
+ /* FALL THROUGH */
+ case TASK_RAS_CONTROL_INSTALL_ONE: /* install this RAS */
+ for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
+ if ( (task->fast_tas_base[i] == pc)
+ && (task->fast_tas_end[i] == endpc)) {
+ /* already installed */
+ break;
+ }
+ if ((task->fast_tas_base[i] == 0) && (task->fast_tas_end[i] == 0)){
+ task->fast_tas_base[i] = pc;
+ task->fast_tas_end[i] = endpc;
+ break;
+ }
+ }
+ if (i == TASK_FAST_TAS_NRAS) {
+ ret = KERN_RESOURCE_SHORTAGE;
+ }
+ break;
+ default: ret = KERN_INVALID_VALUE;
+ break;
+ }
+ task_unlock(task);
+#endif
+ return ret;
+}
diff --git a/kern/task.h b/kern/task.h
new file mode 100644
index 00000000..52733b9d
--- /dev/null
+++ b/kern/task.h
@@ -0,0 +1,183 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: task.h
+ * Author: Avadis Tevanian, Jr.
+ *
+ * This file contains the structure definitions for tasks.
+ *
+ */
+
+#ifndef _KERN_TASK_H_
+#define _KERN_TASK_H_
+
+#include <norma_task.h>
+#include <fast_tas.h>
+#include <net_atm.h>
+
+#include <mach/boolean.h>
+#include <mach/port.h>
+#include <mach/time_value.h>
+#include <mach/mach_param.h>
+#include <mach/task_info.h>
+#include <kern/kern_types.h>
+#include <kern/lock.h>
+#include <kern/queue.h>
+#include <kern/pc_sample.h>
+#include <kern/processor.h>
+#include <kern/syscall_emulation.h>
+#include <vm/vm_map.h>
+
+#if NET_ATM
+typedef struct nw_ep_owned {
+ unsigned int ep;
+ struct nw_ep_owned *next;
+} nw_ep_owned_s, *nw_ep_owned_t;
+#endif
+
+struct task {
+ /* Synchronization/destruction information */
+ decl_simple_lock_data(,lock) /* Task's lock */
+ int ref_count; /* Number of references to me */
+ boolean_t active; /* Task has not been terminated */
+
+ /* Miscellaneous */
+ vm_map_t map; /* Address space description */
+ queue_chain_t pset_tasks; /* list of tasks assigned to pset */
+ int suspend_count; /* Internal scheduling only */
+
+ /* Thread information */
+ queue_head_t thread_list; /* list of threads */
+ int thread_count; /* number of threads */
+ processor_set_t processor_set; /* processor set for new threads */
+ boolean_t may_assign; /* can assigned pset be changed? */
+ boolean_t assign_active; /* waiting for may_assign */
+
+ /* User-visible scheduling information */
+ int user_stop_count; /* outstanding stops */
+ int priority; /* for new threads */
+
+ /* Statistics */
+ time_value_t total_user_time;
+ /* total user time for dead threads */
+ time_value_t total_system_time;
+ /* total system time for dead threads */
+
+ /* IPC structures */
+ decl_simple_lock_data(, itk_lock_data)
+ struct ipc_port *itk_self; /* not a right, doesn't hold ref */
+ struct ipc_port *itk_sself; /* a send right */
+ struct ipc_port *itk_exception; /* a send right */
+ struct ipc_port *itk_bootstrap; /* a send right */
+ struct ipc_port *itk_registered[TASK_PORT_REGISTER_MAX];
+ /* all send rights */
+
+ struct ipc_space *itk_space;
+
+ /* User space system call emulation support */
+ struct eml_dispatch *eml_dispatch;
+
+ sample_control_t pc_sample;
+
+#if NORMA_TASK
+ long child_node; /* if != -1, node for new children */
+#endif /* NORMA_TASK */
+
+#if FAST_TAS
+#define TASK_FAST_TAS_NRAS 8
+ vm_offset_t fast_tas_base[TASK_FAST_TAS_NRAS];
+ vm_offset_t fast_tas_end[TASK_FAST_TAS_NRAS];
+#endif /* FAST_TAS */
+
+#if NET_ATM
+ nw_ep_owned_t nw_ep_owned;
+#endif /* NET_ATM */
+};
+
+#define task_lock(task) simple_lock(&(task)->lock)
+#define task_unlock(task) simple_unlock(&(task)->lock)
+
+#define itk_lock_init(task) simple_lock_init(&(task)->itk_lock_data)
+#define itk_lock(task) simple_lock(&(task)->itk_lock_data)
+#define itk_unlock(task) simple_unlock(&(task)->itk_lock_data)
+
+/*
+ * Exported routines/macros
+ */
+
+extern kern_return_t task_create(
+ task_t parent_task,
+ boolean_t inherit_memory,
+ task_t *child_task);
+extern kern_return_t task_terminate(
+ task_t task);
+extern kern_return_t task_suspend(
+ task_t task);
+extern kern_return_t task_resume(
+ task_t task);
+extern kern_return_t task_threads(
+ task_t task,
+ thread_array_t *thread_list,
+ natural_t *count);
+extern kern_return_t task_info(
+ task_t task,
+ int flavor,
+ task_info_t task_info_out,
+ natural_t *task_info_count);
+extern kern_return_t task_get_special_port(
+ task_t task,
+ int which,
+ struct ipc_port **portp);
+extern kern_return_t task_set_special_port(
+ task_t task,
+ int which,
+ struct ipc_port *port);
+extern kern_return_t task_assign(
+ task_t task,
+ processor_set_t new_pset,
+ boolean_t assign_threads);
+extern kern_return_t task_assign_default(
+ task_t task,
+ boolean_t assign_threads);
+
+/*
+ * Internal only routines
+ */
+
+extern void task_init();
+extern void task_reference();
+extern void task_deallocate();
+extern kern_return_t task_hold();
+extern kern_return_t task_dowait();
+extern kern_return_t task_release();
+extern kern_return_t task_halt();
+
+extern kern_return_t task_suspend_nowait();
+extern task_t kernel_task_create();
+
+extern task_t kernel_task;
+
+#endif _KERN_TASK_H_
diff --git a/kern/thread.c b/kern/thread.c
new file mode 100644
index 00000000..02969f30
--- /dev/null
+++ b/kern/thread.c
@@ -0,0 +1,2575 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/thread.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
+ * Date: 1986
+ *
+ * Thread management primitives implementation.
+ */
+
+#include <cpus.h>
+#include <hw_footprint.h>
+#include <mach_host.h>
+#include <mach_fixpri.h>
+#include <mach_pcsample.h>
+#include <simple_clock.h>
+#include <mach_debug.h>
+#include <net_atm.h>
+
+#include <mach/std_types.h>
+#include <mach/policy.h>
+#include <mach/thread_info.h>
+#include <mach/thread_special_ports.h>
+#include <mach/thread_status.h>
+#include <mach/time_value.h>
+#include "vm_param.h"
+#include <kern/ast.h>
+#include <kern/counters.h>
+#include <kern/ipc_tt.h>
+#include <kern/mach_param.h>
+#include <kern/processor.h>
+#include <kern/queue.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <kern/thread.h>
+#include <kern/thread_swap.h>
+#include <kern/host.h>
+#include <kern/zalloc.h>
+#include <vm/vm_kern.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_port.h>
+#include <ipc/mach_msg.h>
+#include <machine/machspl.h> /* for splsched */
+#include <machine/thread.h> /* for MACHINE_STACK */
+
+#if NET_ATM
+#include <chips/nw_mk.h>
+#endif
+
+thread_t active_threads[NCPUS];
+vm_offset_t active_stacks[NCPUS];
+
+struct zone *thread_zone;
+
+queue_head_t reaper_queue;
+decl_simple_lock_data(, reaper_lock)
+
+extern int tick;
+
+extern void pcb_module_init(void);
+
+/* private */
+struct thread thread_template;
+
+#if MACH_DEBUG
+void stack_init(vm_offset_t stack); /* forward */
+void stack_finalize(vm_offset_t stack); /* forward */
+
+#define STACK_MARKER 0xdeadbeefU
+boolean_t stack_check_usage = FALSE;
+decl_simple_lock_data(, stack_usage_lock)
+vm_size_t stack_max_usage = 0;
+#endif /* MACH_DEBUG */
+
+/*
+ * Machine-dependent code must define:
+ * pcb_init
+ * pcb_terminate
+ * pcb_collect
+ *
+ * The thread->pcb field is reserved for machine-dependent code.
+ */
+
+#ifdef MACHINE_STACK
+/*
+ * Machine-dependent code must define:
+ * stack_alloc_try
+ * stack_alloc
+ * stack_free
+ * stack_handoff
+ * stack_collect
+ * and if MACH_DEBUG:
+ * stack_statistics
+ */
+#else /* MACHINE_STACK */
+/*
+ * We allocate stacks from generic kernel VM.
+ * Machine-dependent code must define:
+ * stack_attach
+ * stack_detach
+ * stack_handoff
+ *
+ * The stack_free_list can only be accessed at splsched,
+ * because stack_alloc_try/thread_invoke operate at splsched.
+ */
+
+decl_simple_lock_data(, stack_lock_data)/* splsched only */
+#define stack_lock() simple_lock(&stack_lock_data)
+#define stack_unlock() simple_unlock(&stack_lock_data)
+
+vm_offset_t stack_free_list; /* splsched only */
+unsigned int stack_free_count = 0; /* splsched only */
+unsigned int stack_free_limit = 1; /* patchable */
+
+unsigned int stack_alloc_hits = 0; /* debugging */
+unsigned int stack_alloc_misses = 0; /* debugging */
+unsigned int stack_alloc_max = 0; /* debugging */
+
+/*
+ * The next field is at the base of the stack,
+ * so the low end is left unsullied.
+ */
+
+#define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
+
+/*
+ * stack_alloc_try:
+ *
+ * Non-blocking attempt to allocate a kernel stack.
+ * Called at splsched with the thread locked.
+ */
+
+boolean_t stack_alloc_try(
+ thread_t thread,
+ void (*resume)(thread_t))
+{
+ register vm_offset_t stack;
+
+ stack_lock();
+ stack = stack_free_list;
+ if (stack != 0) {
+ stack_free_list = stack_next(stack);
+ stack_free_count--;
+ } else {
+ stack = thread->stack_privilege;
+ }
+ stack_unlock();
+
+ if (stack != 0) {
+ stack_attach(thread, stack, resume);
+ stack_alloc_hits++;
+ return TRUE;
+ } else {
+ stack_alloc_misses++;
+ return FALSE;
+ }
+}
+
+/*
+ * stack_alloc:
+ *
+ * Allocate a kernel stack for a thread.
+ * May block.
+ */
+
+void stack_alloc(
+ thread_t thread,
+ void (*resume)(thread_t))
+{
+ vm_offset_t stack;
+ spl_t s;
+
+ /*
+ * We first try the free list. It is probably empty,
+ * or stack_alloc_try would have succeeded, but possibly
+ * a stack was freed before the swapin thread got to us.
+ */
+
+ s = splsched();
+ stack_lock();
+ stack = stack_free_list;
+ if (stack != 0) {
+ stack_free_list = stack_next(stack);
+ stack_free_count--;
+ }
+ stack_unlock();
+ (void) splx(s);
+
+ if (stack == 0) {
+ /*
+ * Kernel stacks should be naturally aligned,
+ * so that it is easy to find the starting/ending
+ * addresses of a stack given an address in the middle.
+ */
+
+ if (kmem_alloc_aligned(kernel_map, &stack, KERNEL_STACK_SIZE)
+ != KERN_SUCCESS)
+ panic("stack_alloc");
+
+#if MACH_DEBUG
+ stack_init(stack);
+#endif /* MACH_DEBUG */
+ }
+
+ stack_attach(thread, stack, resume);
+}
+
+/*
+ * stack_free:
+ *
+ * Free a thread's kernel stack.
+ * Called at splsched with the thread locked.
+ */
+
+void stack_free(
+ thread_t thread)
+{
+ register vm_offset_t stack;
+
+ stack = stack_detach(thread);
+
+ if (stack != thread->stack_privilege) {
+ stack_lock();
+ stack_next(stack) = stack_free_list;
+ stack_free_list = stack;
+ if (++stack_free_count > stack_alloc_max)
+ stack_alloc_max = stack_free_count;
+ stack_unlock();
+ }
+}
+
+/*
+ * stack_collect:
+ *
+ * Free excess kernel stacks.
+ * May block.
+ */
+
+void stack_collect(void)
+{
+ register vm_offset_t stack;
+ spl_t s;
+
+ s = splsched();
+ stack_lock();
+ while (stack_free_count > stack_free_limit) {
+ stack = stack_free_list;
+ stack_free_list = stack_next(stack);
+ stack_free_count--;
+ stack_unlock();
+ (void) splx(s);
+
+#if MACH_DEBUG
+ stack_finalize(stack);
+#endif /* MACH_DEBUG */
+ kmem_free(kernel_map, stack, KERNEL_STACK_SIZE);
+
+ s = splsched();
+ stack_lock();
+ }
+ stack_unlock();
+ (void) splx(s);
+}
+#endif /* MACHINE_STACK */
+
+/*
+ * stack_privilege:
+ *
+ * stack_alloc_try on this thread must always succeed.
+ */
+
+void stack_privilege(
+ register thread_t thread)
+{
+ /*
+ * This implementation only works for the current thread.
+ */
+
+ if (thread != current_thread())
+ panic("stack_privilege");
+
+ if (thread->stack_privilege == 0)
+ thread->stack_privilege = current_stack();
+}
+
+void thread_init(void)
+{
+ thread_zone = zinit(
+ sizeof(struct thread),
+ THREAD_MAX * sizeof(struct thread),
+ THREAD_CHUNK * sizeof(struct thread),
+ 0, "threads");
+
+ /*
+ * Fill in a template thread for fast initialization.
+ * [Fields that must be (or are typically) reset at
+ * time of creation are so noted.]
+ */
+
+ /* thread_template.links (none) */
+ thread_template.runq = RUN_QUEUE_NULL;
+
+ /* thread_template.task (later) */
+ /* thread_template.thread_list (later) */
+ /* thread_template.pset_threads (later) */
+
+ /* thread_template.lock (later) */
+ /* one ref for being alive; one for the guy who creates the thread */
+ thread_template.ref_count = 2;
+
+ thread_template.pcb = (pcb_t) 0; /* (reset) */
+ thread_template.kernel_stack = (vm_offset_t) 0;
+ thread_template.stack_privilege = (vm_offset_t) 0;
+
+ thread_template.wait_event = 0;
+ /* thread_template.suspend_count (later) */
+ thread_template.wait_result = KERN_SUCCESS;
+ thread_template.wake_active = FALSE;
+ thread_template.state = TH_SUSP | TH_SWAPPED;
+ thread_template.swap_func = thread_bootstrap_return;
+
+/* thread_template.priority (later) */
+ thread_template.max_priority = BASEPRI_USER;
+/* thread_template.sched_pri (later - compute_priority) */
+#if MACH_FIXPRI
+ thread_template.sched_data = 0;
+ thread_template.policy = POLICY_TIMESHARE;
+#endif /* MACH_FIXPRI */
+ thread_template.depress_priority = -1;
+ thread_template.cpu_usage = 0;
+ thread_template.sched_usage = 0;
+ /* thread_template.sched_stamp (later) */
+
+ thread_template.recover = (vm_offset_t) 0;
+ thread_template.vm_privilege = FALSE;
+
+ thread_template.user_stop_count = 1;
+
+ /* thread_template.<IPC structures> (later) */
+
+ timer_init(&(thread_template.user_timer));
+ timer_init(&(thread_template.system_timer));
+ thread_template.user_timer_save.low = 0;
+ thread_template.user_timer_save.high = 0;
+ thread_template.system_timer_save.low = 0;
+ thread_template.system_timer_save.high = 0;
+ thread_template.cpu_delta = 0;
+ thread_template.sched_delta = 0;
+
+ thread_template.active = FALSE; /* reset */
+ thread_template.ast = AST_ZILCH;
+
+ /* thread_template.processor_set (later) */
+ thread_template.bound_processor = PROCESSOR_NULL;
+#if MACH_HOST
+ thread_template.may_assign = TRUE;
+ thread_template.assign_active = FALSE;
+#endif /* MACH_HOST */
+
+#if NCPUS > 1
+ /* thread_template.last_processor (later) */
+#endif /* NCPUS > 1 */
+
+ /*
+ * Initialize other data structures used in
+ * this module.
+ */
+
+ queue_init(&reaper_queue);
+ simple_lock_init(&reaper_lock);
+
+#ifndef MACHINE_STACK
+ simple_lock_init(&stack_lock_data);
+#endif /* MACHINE_STACK */
+
+#if MACH_DEBUG
+ simple_lock_init(&stack_usage_lock);
+#endif /* MACH_DEBUG */
+
+ /*
+ * Initialize any machine-dependent
+ * per-thread structures necessary.
+ */
+
+ pcb_module_init();
+}
+
+kern_return_t thread_create(
+ register task_t parent_task,
+ thread_t *child_thread) /* OUT */
+{
+ register thread_t new_thread;
+ register processor_set_t pset;
+
+ if (parent_task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * Allocate a thread and initialize static fields
+ */
+
+ new_thread = (thread_t) zalloc(thread_zone);
+
+ if (new_thread == THREAD_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ *new_thread = thread_template;
+
+ /*
+ * Initialize runtime-dependent fields
+ */
+
+ new_thread->task = parent_task;
+ simple_lock_init(&new_thread->lock);
+ new_thread->sched_stamp = sched_tick;
+ thread_timeout_setup(new_thread);
+
+ /*
+ * Create a pcb. The kernel stack is created later,
+ * when the thread is swapped-in.
+ */
+ pcb_init(new_thread);
+
+ ipc_thread_init(new_thread);
+
+#if NET_ATM
+ new_thread->nw_ep_waited = 0;
+#endif
+
+ /*
+ * Find the processor set for the parent task.
+ */
+ task_lock(parent_task);
+ pset = parent_task->processor_set;
+ pset_reference(pset);
+ task_unlock(parent_task);
+
+ /*
+ * Lock both the processor set and the task,
+ * so that the thread can be added to both
+ * simultaneously. Processor set must be
+ * locked first.
+ */
+
+ Restart:
+ pset_lock(pset);
+ task_lock(parent_task);
+
+ /*
+ * If the task has changed processor sets,
+ * catch up (involves lots of lock juggling).
+ */
+ {
+ processor_set_t cur_pset;
+
+ cur_pset = parent_task->processor_set;
+ if (!cur_pset->active)
+ cur_pset = &default_pset;
+
+ if (cur_pset != pset) {
+ pset_reference(cur_pset);
+ task_unlock(parent_task);
+ pset_unlock(pset);
+ pset_deallocate(pset);
+ pset = cur_pset;
+ goto Restart;
+ }
+ }
+
+ /*
+ * Set the thread`s priority from the pset and task.
+ */
+
+ new_thread->priority = parent_task->priority;
+ if (pset->max_priority > new_thread->max_priority)
+ new_thread->max_priority = pset->max_priority;
+ if (new_thread->max_priority > new_thread->priority)
+ new_thread->priority = new_thread->max_priority;
+ /*
+ * Don't need to lock thread here because it can't
+ * possibly execute and no one else knows about it.
+ */
+ compute_priority(new_thread, TRUE);
+
+ /*
+ * Thread is suspended if the task is. Add 1 to
+ * suspend count since thread is created in suspended
+ * state.
+ */
+ new_thread->suspend_count = parent_task->suspend_count + 1;
+
+ /*
+ * Add the thread to the processor set.
+ * If the pset is empty, suspend the thread again.
+ */
+
+ pset_add_thread(pset, new_thread);
+ if (pset->empty)
+ new_thread->suspend_count++;
+
+#if HW_FOOTPRINT
+ /*
+ * Need to set last_processor, idle processor would be best, but
+ * that requires extra locking nonsense. Go for tail of
+ * processors queue to avoid master.
+ */
+ if (!pset->empty) {
+ new_thread->last_processor =
+ (processor_t)queue_first(&pset->processors);
+ }
+ else {
+ /*
+ * Thread created in empty processor set. Pick
+ * master processor as an acceptable legal value.
+ */
+ new_thread->last_processor = master_processor;
+ }
+#else /* HW_FOOTPRINT */
+ /*
+ * Don't need to initialize because the context switch
+ * code will set it before it can be used.
+ */
+#endif /* HW_FOOTPRINT */
+
+#if MACH_PCSAMPLE
+ new_thread->pc_sample.buffer = 0;
+ new_thread->pc_sample.seqno = 0;
+ new_thread->pc_sample.sampletypes = 0;
+#endif /* MACH_PCSAMPLE */
+
+ new_thread->pc_sample.buffer = 0;
+ /*
+ * Add the thread to the task`s list of threads.
+ * The new thread holds another reference to the task.
+ */
+
+ parent_task->ref_count++;
+
+ parent_task->thread_count++;
+ queue_enter(&parent_task->thread_list, new_thread, thread_t,
+ thread_list);
+
+ /*
+ * Finally, mark the thread active.
+ */
+
+ new_thread->active = TRUE;
+
+ if (!parent_task->active) {
+ task_unlock(parent_task);
+ pset_unlock(pset);
+ (void) thread_terminate(new_thread);
+ /* release ref we would have given our caller */
+ thread_deallocate(new_thread);
+ return KERN_FAILURE;
+ }
+ task_unlock(parent_task);
+ pset_unlock(pset);
+
+ ipc_thread_enable(new_thread);
+
+ *child_thread = new_thread;
+ return KERN_SUCCESS;
+}
+
+unsigned int thread_deallocate_stack = 0;
+
+void thread_deallocate(
+ register thread_t thread)
+{
+ spl_t s;
+ register task_t task;
+ register processor_set_t pset;
+
+ time_value_t user_time, system_time;
+
+ if (thread == THREAD_NULL)
+ return;
+
+ /*
+ * First, check for new count > 0 (the common case).
+ * Only the thread needs to be locked.
+ */
+ s = splsched();
+ thread_lock(thread);
+ if (--thread->ref_count > 0) {
+ thread_unlock(thread);
+ (void) splx(s);
+ return;
+ }
+
+ /*
+ * Count is zero. However, the task's and processor set's
+ * thread lists have implicit references to
+ * the thread, and may make new ones. Their locks also
+ * dominate the thread lock. To check for this, we
+ * temporarily restore the one thread reference, unlock
+ * the thread, and then lock the other structures in
+ * the proper order.
+ */
+ thread->ref_count = 1;
+ thread_unlock(thread);
+ (void) splx(s);
+
+ pset = thread->processor_set;
+ pset_lock(pset);
+
+#if MACH_HOST
+ /*
+ * The thread might have moved.
+ */
+ while (pset != thread->processor_set) {
+ pset_unlock(pset);
+ pset = thread->processor_set;
+ pset_lock(pset);
+ }
+#endif /* MACH_HOST */
+
+ task = thread->task;
+ task_lock(task);
+
+ s = splsched();
+ thread_lock(thread);
+
+ if (--thread->ref_count > 0) {
+ /*
+ * Task or processor_set made extra reference.
+ */
+ thread_unlock(thread);
+ (void) splx(s);
+ task_unlock(task);
+ pset_unlock(pset);
+ return;
+ }
+
+ /*
+ * Thread has no references - we can remove it.
+ */
+
+ /*
+ * Remove pending timeouts.
+ */
+ reset_timeout_check(&thread->timer);
+
+ reset_timeout_check(&thread->depress_timer);
+ thread->depress_priority = -1;
+
+ /*
+ * Accumulate times for dead threads in task.
+ */
+ thread_read_times(thread, &user_time, &system_time);
+ time_value_add(&task->total_user_time, &user_time);
+ time_value_add(&task->total_system_time, &system_time);
+
+ /*
+ * Remove thread from task list and processor_set threads list.
+ */
+ task->thread_count--;
+ queue_remove(&task->thread_list, thread, thread_t, thread_list);
+
+ pset_remove_thread(pset, thread);
+
+ thread_unlock(thread); /* no more references - safe */
+ (void) splx(s);
+ task_unlock(task);
+ pset_unlock(pset);
+ pset_deallocate(pset);
+
+ /*
+ * A couple of quick sanity checks
+ */
+
+ if (thread == current_thread()) {
+ panic("thread deallocating itself");
+ }
+ if ((thread->state & ~(TH_RUN | TH_HALTED | TH_SWAPPED)) != TH_SUSP)
+ panic("unstopped thread destroyed!");
+
+ /*
+ * Deallocate the task reference, since we know the thread
+ * is not running.
+ */
+ task_deallocate(thread->task); /* may block */
+
+ /*
+ * Clean up any machine-dependent resources.
+ */
+ if ((thread->state & TH_SWAPPED) == 0) {
+ spl_t _s_ = splsched();
+ stack_free(thread);
+ (void) splx(s);
+ thread_deallocate_stack++;
+ }
+ /*
+ * Rattle the event count machinery (gag)
+ */
+ evc_notify_abort(thread);
+
+ pcb_terminate(thread);
+ zfree(thread_zone, (vm_offset_t) thread);
+}
+
+void thread_reference(
+ register thread_t thread)
+{
+ spl_t s;
+
+ if (thread == THREAD_NULL)
+ return;
+
+ s = splsched();
+ thread_lock(thread);
+ thread->ref_count++;
+ thread_unlock(thread);
+ (void) splx(s);
+}
+
+/*
+ * thread_terminate:
+ *
+ * Permanently stop execution of the specified thread.
+ *
+ * A thread to be terminated must be allowed to clean up any state
+ * that it has before it exits. The thread is broken out of any
+ * wait condition that it is in, and signalled to exit. It then
+ * cleans up its state and calls thread_halt_self on its way out of
+ * the kernel. The caller waits for the thread to halt, terminates
+ * its IPC state, and then deallocates it.
+ *
+ * If the caller is the current thread, it must still exit the kernel
+ * to clean up any state (thread and port references, messages, etc).
+ * When it exits the kernel, it then terminates its IPC state and
+ * queues itself for the reaper thread, which will wait for the thread
+ * to stop and then deallocate it. (A thread cannot deallocate itself,
+ * since it needs a kernel stack to execute.)
+ */
+kern_return_t thread_terminate(
+ register thread_t thread)
+{
+ register thread_t cur_thread = current_thread();
+ register task_t cur_task;
+ spl_t s;
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * Break IPC control over the thread.
+ */
+ ipc_thread_disable(thread);
+
+ if (thread == cur_thread) {
+
+ /*
+ * Current thread will queue itself for reaper when
+ * exiting kernel.
+ */
+ s = splsched();
+ thread_lock(thread);
+ if (thread->active) {
+ thread->active = FALSE;
+ thread_ast_set(thread, AST_TERMINATE);
+ }
+ thread_unlock(thread);
+ ast_on(cpu_number(), AST_TERMINATE);
+ splx(s);
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * Lock both threads and the current task
+ * to check termination races and prevent deadlocks.
+ */
+ cur_task = current_task();
+ task_lock(cur_task);
+ s = splsched();
+ if ((vm_offset_t)thread < (vm_offset_t)cur_thread) {
+ thread_lock(thread);
+ thread_lock(cur_thread);
+ }
+ else {
+ thread_lock(cur_thread);
+ thread_lock(thread);
+ }
+
+ /*
+ * If the current thread is being terminated, help out.
+ */
+ if ((!cur_task->active) || (!cur_thread->active)) {
+ thread_unlock(cur_thread);
+ thread_unlock(thread);
+ (void) splx(s);
+ task_unlock(cur_task);
+ thread_terminate(cur_thread);
+ return KERN_FAILURE;
+ }
+
+ thread_unlock(cur_thread);
+ task_unlock(cur_task);
+
+ /*
+ * Terminate victim thread.
+ */
+ if (!thread->active) {
+ /*
+ * Someone else got there first.
+ */
+ thread_unlock(thread);
+ (void) splx(s);
+ return KERN_FAILURE;
+ }
+
+ thread->active = FALSE;
+
+ thread_unlock(thread);
+ (void) splx(s);
+
+#if MACH_HOST
+ /*
+ * Reassign thread to default pset if needed.
+ */
+ thread_freeze(thread);
+ if (thread->processor_set != &default_pset) {
+ thread_doassign(thread, &default_pset, FALSE);
+ }
+#endif /* MACH_HOST */
+
+ /*
+ * Halt the victim at the clean point.
+ */
+ (void) thread_halt(thread, TRUE);
+#if MACH_HOST
+ thread_unfreeze(thread);
+#endif /* MACH_HOST */
+ /*
+ * Shut down the victims IPC and deallocate its
+ * reference to itself.
+ */
+ ipc_thread_terminate(thread);
+#if NET_ATM
+ mk_waited_collect(thread);
+#endif
+ thread_deallocate(thread);
+ return KERN_SUCCESS;
+}
+
+/*
+ * thread_force_terminate:
+ *
+ * Version of thread_terminate called by task_terminate. thread is
+ * not the current thread. task_terminate is the dominant operation,
+ * so we can force this thread to stop.
+ */
+void
+thread_force_terminate(
+ register thread_t thread)
+{
+ boolean_t deallocate_here = FALSE;
+ spl_t s;
+
+ ipc_thread_disable(thread);
+
+#if MACH_HOST
+ /*
+ * Reassign thread to default pset if needed.
+ */
+ thread_freeze(thread);
+ if (thread->processor_set != &default_pset)
+ thread_doassign(thread, &default_pset, FALSE);
+#endif /* MACH_HOST */
+
+ s = splsched();
+ thread_lock(thread);
+ deallocate_here = thread->active;
+ thread->active = FALSE;
+ thread_unlock(thread);
+ (void) splx(s);
+
+ (void) thread_halt(thread, TRUE);
+ ipc_thread_terminate(thread);
+#if NET_ATM
+ mk_waited_collect(thread);
+#endif
+
+#if MACH_HOST
+ thread_unfreeze(thread);
+#endif /* MACH_HOST */
+
+ if (deallocate_here)
+ thread_deallocate(thread);
+}
+
+
+/*
+ * Halt a thread at a clean point, leaving it suspended.
+ *
+ * must_halt indicates whether thread must halt.
+ *
+ */
+kern_return_t thread_halt(
+ register thread_t thread,
+ boolean_t must_halt)
+{
+ register thread_t cur_thread = current_thread();
+ register kern_return_t ret;
+ spl_t s;
+
+ if (thread == cur_thread)
+ panic("thread_halt: trying to halt current thread.");
+ /*
+ * If must_halt is FALSE, then a check must be made for
+ * a cycle of halt operations.
+ */
+ if (!must_halt) {
+ /*
+ * Grab both thread locks.
+ */
+ s = splsched();
+ if ((vm_offset_t)thread < (vm_offset_t)cur_thread) {
+ thread_lock(thread);
+ thread_lock(cur_thread);
+ }
+ else {
+ thread_lock(cur_thread);
+ thread_lock(thread);
+ }
+
+ /*
+ * If target thread is already halted, grab a hold
+ * on it and return.
+ */
+ if (thread->state & TH_HALTED) {
+ thread->suspend_count++;
+ thread_unlock(cur_thread);
+ thread_unlock(thread);
+ (void) splx(s);
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * If someone is trying to halt us, we have a potential
+ * halt cycle. Break the cycle by interrupting anyone
+ * who is trying to halt us, and causing this operation
+ * to fail; retry logic will only retry operations
+ * that cannot deadlock. (If must_halt is TRUE, this
+ * operation can never cause a deadlock.)
+ */
+ if (cur_thread->ast & AST_HALT) {
+ thread_wakeup_with_result((event_t)&cur_thread->wake_active,
+ THREAD_INTERRUPTED);
+ thread_unlock(thread);
+ thread_unlock(cur_thread);
+ (void) splx(s);
+ return KERN_FAILURE;
+ }
+
+ thread_unlock(cur_thread);
+
+ }
+ else {
+ /*
+ * Lock thread and check whether it is already halted.
+ */
+ s = splsched();
+ thread_lock(thread);
+ if (thread->state & TH_HALTED) {
+ thread->suspend_count++;
+ thread_unlock(thread);
+ (void) splx(s);
+ return KERN_SUCCESS;
+ }
+ }
+
+ /*
+ * Suspend thread - inline version of thread_hold() because
+ * thread is already locked.
+ */
+ thread->suspend_count++;
+ thread->state |= TH_SUSP;
+
+ /*
+ * If someone else is halting it, wait for that to complete.
+ * Fail if wait interrupted and must_halt is false.
+ */
+ while ((thread->ast & AST_HALT) && (!(thread->state & TH_HALTED))) {
+ thread->wake_active = TRUE;
+ thread_sleep((event_t) &thread->wake_active,
+ simple_lock_addr(thread->lock), TRUE);
+
+ if (thread->state & TH_HALTED) {
+ (void) splx(s);
+ return KERN_SUCCESS;
+ }
+ if ((current_thread()->wait_result != THREAD_AWAKENED)
+ && !(must_halt)) {
+ (void) splx(s);
+ thread_release(thread);
+ return KERN_FAILURE;
+ }
+ thread_lock(thread);
+ }
+
+ /*
+ * Otherwise, have to do it ourselves.
+ */
+
+ thread_ast_set(thread, AST_HALT);
+
+ while (TRUE) {
+ /*
+ * Wait for thread to stop.
+ */
+ thread_unlock(thread);
+ (void) splx(s);
+
+ ret = thread_dowait(thread, must_halt);
+
+ /*
+ * If the dowait failed, so do we. Drop AST_HALT, and
+ * wake up anyone else who might be waiting for it.
+ */
+ if (ret != KERN_SUCCESS) {
+ s = splsched();
+ thread_lock(thread);
+ thread_ast_clear(thread, AST_HALT);
+ thread_wakeup_with_result((event_t)&thread->wake_active,
+ THREAD_INTERRUPTED);
+ thread_unlock(thread);
+ (void) splx(s);
+
+ thread_release(thread);
+ return ret;
+ }
+
+ /*
+ * Clear any interruptible wait.
+ */
+ clear_wait(thread, THREAD_INTERRUPTED, TRUE);
+
+ /*
+ * If the thread's at a clean point, we're done.
+ * Don't need a lock because it really is stopped.
+ */
+ if (thread->state & TH_HALTED) {
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * If the thread is at a nice continuation,
+ * or a continuation with a cleanup routine,
+ * call the cleanup routine.
+ */
+ if ((((thread->swap_func == mach_msg_continue) ||
+ (thread->swap_func == mach_msg_receive_continue)) &&
+ mach_msg_interrupt(thread)) ||
+ (thread->swap_func == thread_exception_return) ||
+ (thread->swap_func == thread_bootstrap_return)) {
+ s = splsched();
+ thread_lock(thread);
+ thread->state |= TH_HALTED;
+ thread_ast_clear(thread, AST_HALT);
+ thread_unlock(thread);
+ splx(s);
+
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * Force the thread to stop at a clean
+ * point, and arrange to wait for it.
+ *
+ * Set it running, so it can notice. Override
+ * the suspend count. We know that the thread
+ * is suspended and not waiting.
+ *
+ * Since the thread may hit an interruptible wait
+ * before it reaches a clean point, we must force it
+ * to wake us up when it does so. This involves some
+ * trickery:
+ * We mark the thread SUSPENDED so that thread_block
+ * will suspend it and wake us up.
+ * We mark the thread RUNNING so that it will run.
+ * We mark the thread UN-INTERRUPTIBLE (!) so that
+ * some other thread trying to halt or suspend it won't
+ * take it off the run queue before it runs. Since
+ * dispatching a thread (the tail of thread_invoke) marks
+ * the thread interruptible, it will stop at the next
+ * context switch or interruptible wait.
+ */
+
+ s = splsched();
+ thread_lock(thread);
+ if ((thread->state & TH_SCHED_STATE) != TH_SUSP)
+ panic("thread_halt");
+ thread->state |= TH_RUN | TH_UNINT;
+ thread_setrun(thread, FALSE);
+
+ /*
+ * Continue loop and wait for thread to stop.
+ */
+ }
+}
+
+void walking_zombie(void)
+{
+ panic("the zombie walks!");
+}
+
+/*
+ * Thread calls this routine on exit from the kernel when it
+ * notices a halt request.
+ */
+void thread_halt_self(void)
+{
+ register thread_t thread = current_thread();
+ spl_t s;
+
+ if (thread->ast & AST_TERMINATE) {
+ /*
+ * Thread is terminating itself. Shut
+ * down IPC, then queue it up for the
+ * reaper thread.
+ */
+ ipc_thread_terminate(thread);
+#if NET_ATM
+ mk_waited_collect(thread);
+#endif
+
+ thread_hold(thread);
+
+ s = splsched();
+ simple_lock(&reaper_lock);
+ enqueue_tail(&reaper_queue, (queue_entry_t) thread);
+ simple_unlock(&reaper_lock);
+
+ thread_lock(thread);
+ thread->state |= TH_HALTED;
+ thread_unlock(thread);
+ (void) splx(s);
+
+ thread_wakeup((event_t)&reaper_queue);
+ counter(c_thread_halt_self_block++);
+ thread_block(walking_zombie);
+ /*NOTREACHED*/
+ } else {
+ /*
+ * Thread was asked to halt - show that it
+ * has done so.
+ */
+ s = splsched();
+ thread_lock(thread);
+ thread->state |= TH_HALTED;
+ thread_ast_clear(thread, AST_HALT);
+ thread_unlock(thread);
+ splx(s);
+ counter(c_thread_halt_self_block++);
+ thread_block(thread_exception_return);
+ /*
+ * thread_release resets TH_HALTED.
+ */
+ }
+}
+
+/*
+ * thread_hold:
+ *
+ * Suspend execution of the specified thread.
+ * This is a recursive-style suspension of the thread, a count of
+ * suspends is maintained.
+ */
+void thread_hold(
+ register thread_t thread)
+{
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+ thread->suspend_count++;
+ thread->state |= TH_SUSP;
+ thread_unlock(thread);
+ (void) splx(s);
+}
+
+/*
+ * thread_dowait:
+ *
+ * Wait for a thread to actually enter stopped state.
+ *
+ * must_halt argument indicates if this may fail on interruption.
+ * This is FALSE only if called from thread_abort via thread_halt.
+ */
+kern_return_t
+thread_dowait(
+ register thread_t thread,
+ boolean_t must_halt)
+{
+ register boolean_t need_wakeup;
+ register kern_return_t ret = KERN_SUCCESS;
+ spl_t s;
+
+ if (thread == current_thread())
+ panic("thread_dowait");
+
+ /*
+ * If a thread is not interruptible, it may not be suspended
+ * until it becomes interruptible. In this case, we wait for
+ * the thread to stop itself, and indicate that we are waiting
+ * for it to stop so that it can wake us up when it does stop.
+ *
+ * If the thread is interruptible, we may be able to suspend
+ * it immediately. There are several cases:
+ *
+ * 1) The thread is already stopped (trivial)
+ * 2) The thread is runnable (marked RUN and on a run queue).
+ * We pull it off the run queue and mark it stopped.
+ * 3) The thread is running. We wait for it to stop.
+ */
+
+ need_wakeup = FALSE;
+ s = splsched();
+ thread_lock(thread);
+
+ for (;;) {
+ switch (thread->state & TH_SCHED_STATE) {
+ case TH_SUSP:
+ case TH_WAIT | TH_SUSP:
+ /*
+ * Thread is already suspended, or sleeping in an
+ * interruptible wait. We win!
+ */
+ break;
+
+ case TH_RUN | TH_SUSP:
+ /*
+ * The thread is interruptible. If we can pull
+ * it off a runq, stop it here.
+ */
+ if (rem_runq(thread) != RUN_QUEUE_NULL) {
+ thread->state &= ~TH_RUN;
+ need_wakeup = thread->wake_active;
+ thread->wake_active = FALSE;
+ break;
+ }
+#if NCPUS > 1
+ /*
+ * The thread must be running, so make its
+ * processor execute ast_check(). This
+ * should cause the thread to take an ast and
+ * context switch to suspend for us.
+ */
+ cause_ast_check(thread->last_processor);
+#endif /* NCPUS > 1 */
+
+ /*
+ * Fall through to wait for thread to stop.
+ */
+
+ case TH_RUN | TH_SUSP | TH_UNINT:
+ case TH_RUN | TH_WAIT | TH_SUSP:
+ case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
+ case TH_WAIT | TH_SUSP | TH_UNINT:
+ /*
+ * Wait for the thread to stop, or sleep interruptibly
+ * (thread_block will stop it in the latter case).
+ * Check for failure if interrupted.
+ */
+ thread->wake_active = TRUE;
+ thread_sleep((event_t) &thread->wake_active,
+ simple_lock_addr(thread->lock), TRUE);
+ thread_lock(thread);
+ if ((current_thread()->wait_result != THREAD_AWAKENED) &&
+ !must_halt) {
+ ret = KERN_FAILURE;
+ break;
+ }
+
+ /*
+ * Repeat loop to check thread`s state.
+ */
+ continue;
+ }
+ /*
+ * Thread is stopped at this point.
+ */
+ break;
+ }
+
+ thread_unlock(thread);
+ (void) splx(s);
+
+ if (need_wakeup)
+ thread_wakeup((event_t) &thread->wake_active);
+
+ return ret;
+}
+
+void thread_release(
+ register thread_t thread)
+{
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+ if (--thread->suspend_count == 0) {
+ thread->state &= ~(TH_SUSP | TH_HALTED);
+ if ((thread->state & (TH_WAIT | TH_RUN)) == 0) {
+ /* was only suspended */
+ thread->state |= TH_RUN;
+ thread_setrun(thread, TRUE);
+ }
+ }
+ thread_unlock(thread);
+ (void) splx(s);
+}
+
+kern_return_t thread_suspend(
+ register thread_t thread)
+{
+ register boolean_t hold;
+ spl_t spl;
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ hold = FALSE;
+ spl = splsched();
+ thread_lock(thread);
+ if (thread->user_stop_count++ == 0) {
+ hold = TRUE;
+ thread->suspend_count++;
+ thread->state |= TH_SUSP;
+ }
+ thread_unlock(thread);
+ (void) splx(spl);
+
+ /*
+ * Now wait for the thread if necessary.
+ */
+ if (hold) {
+ if (thread == current_thread()) {
+ /*
+ * We want to call thread_block on our way out,
+ * to stop running.
+ */
+ spl = splsched();
+ ast_on(cpu_number(), AST_BLOCK);
+ (void) splx(spl);
+ } else
+ (void) thread_dowait(thread, TRUE);
+ }
+ return KERN_SUCCESS;
+}
+
+
+kern_return_t thread_resume(
+ register thread_t thread)
+{
+ register kern_return_t ret;
+ spl_t s;
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ ret = KERN_SUCCESS;
+
+ s = splsched();
+ thread_lock(thread);
+ if (thread->user_stop_count > 0) {
+ if (--thread->user_stop_count == 0) {
+ if (--thread->suspend_count == 0) {
+ thread->state &= ~(TH_SUSP | TH_HALTED);
+ if ((thread->state & (TH_WAIT | TH_RUN)) == 0) {
+ /* was only suspended */
+ thread->state |= TH_RUN;
+ thread_setrun(thread, TRUE);
+ }
+ }
+ }
+ }
+ else {
+ ret = KERN_FAILURE;
+ }
+
+ thread_unlock(thread);
+ (void) splx(s);
+
+ return ret;
+}
+
+/*
+ * Return thread's machine-dependent state.
+ */
+kern_return_t thread_get_state(
+ register thread_t thread,
+ int flavor,
+ thread_state_t old_state, /* pointer to OUT array */
+ natural_t *old_state_count) /*IN/OUT*/
+{
+ kern_return_t ret;
+
+ if (thread == THREAD_NULL || thread == current_thread()) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ thread_hold(thread);
+ (void) thread_dowait(thread, TRUE);
+
+ ret = thread_getstatus(thread, flavor, old_state, old_state_count);
+
+ thread_release(thread);
+ return ret;
+}
+
+/*
+ * Change thread's machine-dependent state.
+ */
+kern_return_t thread_set_state(
+ register thread_t thread,
+ int flavor,
+ thread_state_t new_state,
+ natural_t new_state_count)
+{
+ kern_return_t ret;
+
+ if (thread == THREAD_NULL || thread == current_thread()) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ thread_hold(thread);
+ (void) thread_dowait(thread, TRUE);
+
+ ret = thread_setstatus(thread, flavor, new_state, new_state_count);
+
+ thread_release(thread);
+ return ret;
+}
+
+kern_return_t thread_info(
+ register thread_t thread,
+ int flavor,
+ thread_info_t thread_info_out, /* pointer to OUT array */
+ natural_t *thread_info_count) /*IN/OUT*/
+{
+ int state, flags;
+ spl_t s;
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (flavor == THREAD_BASIC_INFO) {
+ register thread_basic_info_t basic_info;
+
+ if (*thread_info_count < THREAD_BASIC_INFO_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ basic_info = (thread_basic_info_t) thread_info_out;
+
+ s = splsched();
+ thread_lock(thread);
+
+ /*
+ * Update lazy-evaluated scheduler info because someone wants it.
+ */
+ if ((thread->state & TH_RUN) == 0 &&
+ thread->sched_stamp != sched_tick)
+ update_priority(thread);
+
+ /* fill in info */
+
+ thread_read_times(thread,
+ &basic_info->user_time,
+ &basic_info->system_time);
+ basic_info->base_priority = thread->priority;
+ basic_info->cur_priority = thread->sched_pri;
+
+ /*
+ * To calculate cpu_usage, first correct for timer rate,
+ * then for 5/8 ageing. The correction factor [3/5] is
+ * (1/(5/8) - 1).
+ */
+ basic_info->cpu_usage = thread->cpu_usage /
+ (TIMER_RATE/TH_USAGE_SCALE);
+ basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
+#if SIMPLE_CLOCK
+ /*
+ * Clock drift compensation.
+ */
+ basic_info->cpu_usage =
+ (basic_info->cpu_usage * 1000000)/sched_usec;
+#endif /* SIMPLE_CLOCK */
+
+ if (thread->state & TH_SWAPPED)
+ flags = TH_FLAGS_SWAPPED;
+ else if (thread->state & TH_IDLE)
+ flags = TH_FLAGS_IDLE;
+ else
+ flags = 0;
+
+ if (thread->state & TH_HALTED)
+ state = TH_STATE_HALTED;
+ else
+ if (thread->state & TH_RUN)
+ state = TH_STATE_RUNNING;
+ else
+ if (thread->state & TH_UNINT)
+ state = TH_STATE_UNINTERRUPTIBLE;
+ else
+ if (thread->state & TH_SUSP)
+ state = TH_STATE_STOPPED;
+ else
+ if (thread->state & TH_WAIT)
+ state = TH_STATE_WAITING;
+ else
+ state = 0; /* ? */
+
+ basic_info->run_state = state;
+ basic_info->flags = flags;
+ basic_info->suspend_count = thread->user_stop_count;
+ if (state == TH_STATE_RUNNING)
+ basic_info->sleep_time = 0;
+ else
+ basic_info->sleep_time = sched_tick - thread->sched_stamp;
+
+ thread_unlock(thread);
+ splx(s);
+
+ *thread_info_count = THREAD_BASIC_INFO_COUNT;
+ return KERN_SUCCESS;
+ }
+ else if (flavor == THREAD_SCHED_INFO) {
+ register thread_sched_info_t sched_info;
+
+ if (*thread_info_count < THREAD_SCHED_INFO_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ sched_info = (thread_sched_info_t) thread_info_out;
+
+ s = splsched();
+ thread_lock(thread);
+
+#if MACH_FIXPRI
+ sched_info->policy = thread->policy;
+ if (thread->policy == POLICY_FIXEDPRI) {
+ sched_info->data = (thread->sched_data * tick)/1000;
+ }
+ else {
+ sched_info->data = 0;
+ }
+#else /* MACH_FIXPRI */
+ sched_info->policy = POLICY_TIMESHARE;
+ sched_info->data = 0;
+#endif /* MACH_FIXPRI */
+
+ sched_info->base_priority = thread->priority;
+ sched_info->max_priority = thread->max_priority;
+ sched_info->cur_priority = thread->sched_pri;
+
+ sched_info->depressed = (thread->depress_priority >= 0);
+ sched_info->depress_priority = thread->depress_priority;
+
+ thread_unlock(thread);
+ splx(s);
+
+ *thread_info_count = THREAD_SCHED_INFO_COUNT;
+ return KERN_SUCCESS;
+ }
+
+ return KERN_INVALID_ARGUMENT;
+}
+
+kern_return_t thread_abort(
+ register thread_t thread)
+{
+ if (thread == THREAD_NULL || thread == current_thread()) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /*
+ *
+ * clear it of an event wait
+ */
+ evc_notify_abort(thread);
+
+ /*
+ * Try to force the thread to a clean point
+ * If the halt operation fails return KERN_ABORTED.
+ * ipc code will convert this to an ipc interrupted error code.
+ */
+ if (thread_halt(thread, FALSE) != KERN_SUCCESS)
+ return KERN_ABORTED;
+
+ /*
+ * If the thread was in an exception, abort that too.
+ */
+ mach_msg_abort_rpc(thread);
+
+ /*
+ * Then set it going again.
+ */
+ thread_release(thread);
+
+ /*
+ * Also abort any depression.
+ */
+ if (thread->depress_priority != -1)
+ thread_depress_abort(thread);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * thread_start:
+ *
+ * Start a thread at the specified routine.
+ * The thread must be in a swapped state.
+ */
+
+void
+thread_start(
+ thread_t thread,
+ continuation_t start)
+{
+ thread->swap_func = start;
+}
+
+/*
+ * kernel_thread:
+ *
+ * Start up a kernel thread in the specified task.
+ */
+
+thread_t kernel_thread(
+ task_t task,
+ continuation_t start,
+ void * arg)
+{
+ thread_t thread;
+
+ (void) thread_create(task, &thread);
+ /* release "extra" ref that thread_create gave us */
+ thread_deallocate(thread);
+ thread_start(thread, start);
+ thread->ith_other = arg;
+
+ /*
+ * We ensure that the kernel thread starts with a stack.
+ * The swapin mechanism might not be operational yet.
+ */
+ thread_doswapin(thread);
+ thread->max_priority = BASEPRI_SYSTEM;
+ thread->priority = BASEPRI_SYSTEM;
+ thread->sched_pri = BASEPRI_SYSTEM;
+ (void) thread_resume(thread);
+ return thread;
+}
+
+/*
+ * reaper_thread:
+ *
+ * This kernel thread runs forever looking for threads to destroy
+ * (when they request that they be destroyed, of course).
+ */
+void reaper_thread_continue(void)
+{
+ for (;;) {
+ register thread_t thread;
+ spl_t s;
+
+ s = splsched();
+ simple_lock(&reaper_lock);
+
+ while ((thread = (thread_t) dequeue_head(&reaper_queue))
+ != THREAD_NULL) {
+ simple_unlock(&reaper_lock);
+ (void) splx(s);
+
+ (void) thread_dowait(thread, TRUE); /* may block */
+ thread_deallocate(thread); /* may block */
+
+ s = splsched();
+ simple_lock(&reaper_lock);
+ }
+
+ assert_wait((event_t) &reaper_queue, FALSE);
+ simple_unlock(&reaper_lock);
+ (void) splx(s);
+ counter(c_reaper_thread_block++);
+ thread_block(reaper_thread_continue);
+ }
+}
+
+void reaper_thread(void)
+{
+ reaper_thread_continue();
+ /*NOTREACHED*/
+}
+
+#if MACH_HOST
+/*
+ * thread_assign:
+ *
+ * Change processor set assignment.
+ * Caller must hold an extra reference to the thread (if this is
+ * called directly from the ipc interface, this is an operation
+ * in progress reference). Caller must hold no locks -- this may block.
+ */
+
+kern_return_t
+thread_assign(
+ thread_t thread,
+ processor_set_t new_pset)
+{
+ if (thread == THREAD_NULL || new_pset == PROCESSOR_SET_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ thread_freeze(thread);
+ thread_doassign(thread, new_pset, TRUE);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * thread_freeze:
+ *
+ * Freeze thread's assignment. Prelude to assigning thread.
+ * Only one freeze may be held per thread.
+ */
+void
+thread_freeze(
+ thread_t thread)
+{
+ spl_t s;
+ /*
+ * Freeze the assignment, deferring to a prior freeze.
+ */
+ s = splsched();
+ thread_lock(thread);
+ while (thread->may_assign == FALSE) {
+ thread->assign_active = TRUE;
+ thread_sleep((event_t) &thread->assign_active,
+ simple_lock_addr(thread->lock), FALSE);
+ thread_lock(thread);
+ }
+ thread->may_assign = FALSE;
+ thread_unlock(thread);
+ (void) splx(s);
+
+}
+
+/*
+ * thread_unfreeze: release freeze on thread's assignment.
+ */
+void
+thread_unfreeze(
+ thread_t thread)
+{
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+ thread->may_assign = TRUE;
+ if (thread->assign_active) {
+ thread->assign_active = FALSE;
+ thread_wakeup((event_t)&thread->assign_active);
+ }
+ thread_unlock(thread);
+ splx(s);
+}
+
+/*
+ * thread_doassign:
+ *
+ * Actually do thread assignment. thread_will_assign must have been
+ * called on the thread. release_freeze argument indicates whether
+ * to release freeze on thread.
+ */
+
+void
+thread_doassign(
+ register thread_t thread,
+ register processor_set_t new_pset,
+ boolean_t release_freeze)
+{
+ register processor_set_t pset;
+ register boolean_t old_empty, new_empty;
+ boolean_t recompute_pri = FALSE;
+ spl_t s;
+
+ /*
+ * Check for silly no-op.
+ */
+ pset = thread->processor_set;
+ if (pset == new_pset) {
+ if (release_freeze)
+ thread_unfreeze(thread);
+ return;
+ }
+ /*
+ * Suspend the thread and stop it if it's not the current thread.
+ */
+ thread_hold(thread);
+ if (thread != current_thread())
+ (void) thread_dowait(thread, TRUE);
+
+ /*
+ * Lock both psets now, use ordering to avoid deadlocks.
+ */
+Restart:
+ if ((vm_offset_t)pset < (vm_offset_t)new_pset) {
+ pset_lock(pset);
+ pset_lock(new_pset);
+ }
+ else {
+ pset_lock(new_pset);
+ pset_lock(pset);
+ }
+
+ /*
+ * Check if new_pset is ok to assign to. If not, reassign
+ * to default_pset.
+ */
+ if (!new_pset->active) {
+ pset_unlock(pset);
+ pset_unlock(new_pset);
+ new_pset = &default_pset;
+ goto Restart;
+ }
+
+ pset_reference(new_pset);
+
+ /*
+ * Grab the thread lock and move the thread.
+ * Then drop the lock on the old pset and the thread's
+ * reference to it.
+ */
+ s = splsched();
+ thread_lock(thread);
+
+ thread_change_psets(thread, pset, new_pset);
+
+ old_empty = pset->empty;
+ new_empty = new_pset->empty;
+
+ pset_unlock(pset);
+
+ /*
+ * Reset policy and priorities if needed.
+ */
+#if MACH_FIXPRI
+ if (thread->policy & new_pset->policies == 0) {
+ thread->policy = POLICY_TIMESHARE;
+ recompute_pri = TRUE;
+ }
+#endif /* MACH_FIXPRI */
+
+ if (thread->max_priority < new_pset->max_priority) {
+ thread->max_priority = new_pset->max_priority;
+ if (thread->priority < thread->max_priority) {
+ thread->priority = thread->max_priority;
+ recompute_pri = TRUE;
+ }
+ else {
+ if ((thread->depress_priority >= 0) &&
+ (thread->depress_priority < thread->max_priority)) {
+ thread->depress_priority = thread->max_priority;
+ }
+ }
+ }
+
+ pset_unlock(new_pset);
+
+ if (recompute_pri)
+ compute_priority(thread, TRUE);
+
+ if (release_freeze) {
+ thread->may_assign = TRUE;
+ if (thread->assign_active) {
+ thread->assign_active = FALSE;
+ thread_wakeup((event_t)&thread->assign_active);
+ }
+ }
+
+ thread_unlock(thread);
+ splx(s);
+
+ pset_deallocate(pset);
+
+ /*
+ * Figure out hold status of thread. Threads assigned to empty
+ * psets must be held. Therefore:
+ * If old pset was empty release its hold.
+ * Release our hold from above unless new pset is empty.
+ */
+
+ if (old_empty)
+ thread_release(thread);
+ if (!new_empty)
+ thread_release(thread);
+
+ /*
+ * If current_thread is assigned, context switch to force
+ * assignment to happen. This also causes hold to take
+ * effect if the new pset is empty.
+ */
+ if (thread == current_thread()) {
+ s = splsched();
+ ast_on(cpu_number(), AST_BLOCK);
+ (void) splx(s);
+ }
+}
+#else /* MACH_HOST */
+kern_return_t
+thread_assign(
+ thread_t thread,
+ processor_set_t new_pset)
+{
+ return KERN_FAILURE;
+}
+#endif /* MACH_HOST */
+
+/*
+ * thread_assign_default:
+ *
+ * Special version of thread_assign for assigning threads to default
+ * processor set.
+ */
+kern_return_t
+thread_assign_default(
+ thread_t thread)
+{
+ return thread_assign(thread, &default_pset);
+}
+
+/*
+ * thread_get_assignment
+ *
+ * Return current assignment for this thread.
+ */
+kern_return_t thread_get_assignment(
+ thread_t thread,
+ processor_set_t *pset)
+{
+ *pset = thread->processor_set;
+ pset_reference(*pset);
+ return KERN_SUCCESS;
+}
+
+/*
+ * thread_priority:
+ *
+ * Set priority (and possibly max priority) for thread.
+ */
+kern_return_t
+thread_priority(
+ thread_t thread,
+ int priority,
+ boolean_t set_max)
+{
+ spl_t s;
+ kern_return_t ret = KERN_SUCCESS;
+
+ if ((thread == THREAD_NULL) || invalid_pri(priority))
+ return KERN_INVALID_ARGUMENT;
+
+ s = splsched();
+ thread_lock(thread);
+
+ /*
+ * Check for violation of max priority
+ */
+ if (priority < thread->max_priority) {
+ ret = KERN_FAILURE;
+ }
+ else {
+ /*
+ * Set priorities. If a depression is in progress,
+ * change the priority to restore.
+ */
+ if (thread->depress_priority >= 0) {
+ thread->depress_priority = priority;
+ }
+ else {
+ thread->priority = priority;
+ compute_priority(thread, TRUE);
+ }
+
+ if (set_max)
+ thread->max_priority = priority;
+ }
+ thread_unlock(thread);
+ (void) splx(s);
+
+ return ret;
+}
+
+/*
+ * thread_set_own_priority:
+ *
+ * Internal use only; sets the priority of the calling thread.
+ * Will adjust max_priority if necessary.
+ */
+void
+thread_set_own_priority(
+ int priority)
+{
+ spl_t s;
+ thread_t thread = current_thread();
+
+ s = splsched();
+ thread_lock(thread);
+
+ if (priority < thread->max_priority)
+ thread->max_priority = priority;
+ thread->priority = priority;
+ compute_priority(thread, TRUE);
+
+ thread_unlock(thread);
+ (void) splx(s);
+}
+
+/*
+ * thread_max_priority:
+ *
+ * Reset the max priority for a thread.
+ */
+kern_return_t
+thread_max_priority(
+ thread_t thread,
+ processor_set_t pset,
+ int max_priority)
+{
+ spl_t s;
+ kern_return_t ret = KERN_SUCCESS;
+
+ if ((thread == THREAD_NULL) || (pset == PROCESSOR_SET_NULL) ||
+ invalid_pri(max_priority))
+ return KERN_INVALID_ARGUMENT;
+
+ s = splsched();
+ thread_lock(thread);
+
+#if MACH_HOST
+ /*
+ * Check for wrong processor set.
+ */
+ if (pset != thread->processor_set) {
+ ret = KERN_FAILURE;
+ }
+ else {
+#endif /* MACH_HOST */
+ thread->max_priority = max_priority;
+
+ /*
+ * Reset priority if it violates new max priority
+ */
+ if (max_priority > thread->priority) {
+ thread->priority = max_priority;
+
+ compute_priority(thread, TRUE);
+ }
+ else {
+ if (thread->depress_priority >= 0 &&
+ max_priority > thread->depress_priority)
+ thread->depress_priority = max_priority;
+ }
+#if MACH_HOST
+ }
+#endif /* MACH_HOST */
+
+ thread_unlock(thread);
+ (void) splx(s);
+
+ return ret;
+}
+
+/*
+ * thread_policy:
+ *
+ * Set scheduling policy for thread.
+ */
+kern_return_t
+thread_policy(
+ thread_t thread,
+ int policy,
+ int data)
+{
+#if MACH_FIXPRI
+ register kern_return_t ret = KERN_SUCCESS;
+ register int temp;
+ spl_t s;
+#endif /* MACH_FIXPRI */
+
+ if ((thread == THREAD_NULL) || invalid_policy(policy))
+ return KERN_INVALID_ARGUMENT;
+
+#if MACH_FIXPRI
+ s = splsched();
+ thread_lock(thread);
+
+ /*
+ * Check if changing policy.
+ */
+ if (policy == thread->policy) {
+ /*
+ * Just changing data. This is meaningless for
+ * timesharing, quantum for fixed priority (but
+ * has no effect until current quantum runs out).
+ */
+ if (policy == POLICY_FIXEDPRI) {
+ temp = data * 1000;
+ if (temp % tick)
+ temp += tick;
+ thread->sched_data = temp/tick;
+ }
+ }
+ else {
+ /*
+ * Changing policy. Check if new policy is allowed.
+ */
+ if ((thread->processor_set->policies & policy) == 0) {
+ ret = KERN_FAILURE;
+ }
+ else {
+ /*
+ * Changing policy. Save data and calculate new
+ * priority.
+ */
+ thread->policy = policy;
+ if (policy == POLICY_FIXEDPRI) {
+ temp = data * 1000;
+ if (temp % tick)
+ temp += tick;
+ thread->sched_data = temp/tick;
+ }
+ compute_priority(thread, TRUE);
+ }
+ }
+ thread_unlock(thread);
+ (void) splx(s);
+
+ return ret;
+#else /* MACH_FIXPRI */
+ if (policy == POLICY_TIMESHARE)
+ return KERN_SUCCESS;
+ else
+ return KERN_FAILURE;
+#endif /* MACH_FIXPRI */
+}
+
+/*
+ * thread_wire:
+ *
+ * Specify that the target thread must always be able
+ * to run and to allocate memory.
+ */
+kern_return_t
+thread_wire(
+ host_t host,
+ thread_t thread,
+ boolean_t wired)
+{
+ spl_t s;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * This implementation only works for the current thread.
+ * See stack_privilege.
+ */
+ if (thread != current_thread())
+ return KERN_INVALID_ARGUMENT;
+
+ s = splsched();
+ thread_lock(thread);
+
+ if (wired) {
+ thread->vm_privilege = TRUE;
+ stack_privilege(thread);
+ }
+ else {
+ thread->vm_privilege = FALSE;
+/*XXX stack_unprivilege(thread); */
+ thread->stack_privilege = 0;
+ }
+
+ thread_unlock(thread);
+ splx(s);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * thread_collect_scan:
+ *
+ * Attempt to free resources owned by threads.
+ * pcb_collect doesn't do anything yet.
+ */
+
+void thread_collect_scan(void)
+{
+#if 0
+ register thread_t thread, prev_thread;
+ processor_set_t pset, prev_pset;
+
+ prev_thread = THREAD_NULL;
+ prev_pset = PROCESSOR_SET_NULL;
+
+ simple_lock(&all_psets_lock);
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
+ pset_lock(pset);
+ queue_iterate(&pset->threads, thread, thread_t, pset_threads) {
+ spl_t s = splsched();
+ thread_lock(thread);
+
+ /*
+ * Only collect threads which are
+ * not runnable and are swapped.
+ */
+
+ if ((thread->state & (TH_RUN|TH_SWAPPED))
+ == TH_SWAPPED) {
+ thread->ref_count++;
+ thread_unlock(thread);
+ (void) splx(s);
+ pset->ref_count++;
+ pset_unlock(pset);
+ simple_unlock(&all_psets_lock);
+
+ pcb_collect(thread);
+
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread);
+ prev_thread = thread;
+
+ if (prev_pset != PROCESSOR_SET_NULL)
+ pset_deallocate(prev_pset);
+ prev_pset = pset;
+
+ simple_lock(&all_psets_lock);
+ pset_lock(pset);
+ } else {
+ thread_unlock(thread);
+ (void) splx(s);
+ }
+ }
+ pset_unlock(pset);
+ }
+ simple_unlock(&all_psets_lock);
+
+ if (prev_thread != THREAD_NULL)
+ thread_deallocate(prev_thread);
+ if (prev_pset != PROCESSOR_SET_NULL)
+ pset_deallocate(prev_pset);
+#endif /* 0 */
+}
+
+boolean_t thread_collect_allowed = TRUE;
+unsigned thread_collect_last_tick = 0;
+unsigned thread_collect_max_rate = 0; /* in ticks */
+
+/*
+ * consider_thread_collect:
+ *
+ * Called by the pageout daemon when the system needs more free pages.
+ */
+
+void consider_thread_collect(void)
+{
+ /*
+ * By default, don't attempt thread collection more frequently
+ * than once a second.
+ */
+
+ if (thread_collect_max_rate == 0)
+ thread_collect_max_rate = hz;
+
+ if (thread_collect_allowed &&
+ (sched_tick >
+ (thread_collect_last_tick + thread_collect_max_rate))) {
+ thread_collect_last_tick = sched_tick;
+ thread_collect_scan();
+ }
+}
+
+#if MACH_DEBUG
+
+vm_size_t stack_usage(
+ register vm_offset_t stack)
+{
+ int i;
+
+ for (i = 0; i < KERNEL_STACK_SIZE/sizeof(unsigned int); i++)
+ if (((unsigned int *)stack)[i] != STACK_MARKER)
+ break;
+
+ return KERNEL_STACK_SIZE - i * sizeof(unsigned int);
+}
+
+/*
+ * Machine-dependent code should call stack_init
+ * before doing its own initialization of the stack.
+ */
+
+void stack_init(
+ register vm_offset_t stack)
+{
+ if (stack_check_usage) {
+ int i;
+
+ for (i = 0; i < KERNEL_STACK_SIZE/sizeof(unsigned int); i++)
+ ((unsigned int *)stack)[i] = STACK_MARKER;
+ }
+}
+
+/*
+ * Machine-dependent code should call stack_finalize
+ * before releasing the stack memory.
+ */
+
+void stack_finalize(
+ register vm_offset_t stack)
+{
+ if (stack_check_usage) {
+ vm_size_t used = stack_usage(stack);
+
+ simple_lock(&stack_usage_lock);
+ if (used > stack_max_usage)
+ stack_max_usage = used;
+ simple_unlock(&stack_usage_lock);
+ }
+}
+
+#ifndef MACHINE_STACK
+/*
+ * stack_statistics:
+ *
+ * Return statistics on cached kernel stacks.
+ * *maxusagep must be initialized by the caller.
+ */
+
+void stack_statistics(
+ natural_t *totalp,
+ vm_size_t *maxusagep)
+{
+ spl_t s;
+
+ s = splsched();
+ stack_lock();
+ if (stack_check_usage) {
+ vm_offset_t stack;
+
+ /*
+ * This is pretty expensive to do at splsched,
+ * but it only happens when someone makes
+ * a debugging call, so it should be OK.
+ */
+
+ for (stack = stack_free_list; stack != 0;
+ stack = stack_next(stack)) {
+ vm_size_t usage = stack_usage(stack);
+
+ if (usage > *maxusagep)
+ *maxusagep = usage;
+ }
+ }
+
+ *totalp = stack_free_count;
+ stack_unlock();
+ (void) splx(s);
+}
+#endif /* MACHINE_STACK */
+
+kern_return_t host_stack_usage(
+ host_t host,
+ vm_size_t *reservedp,
+ unsigned int *totalp,
+ vm_size_t *spacep,
+ vm_size_t *residentp,
+ vm_size_t *maxusagep,
+ vm_offset_t *maxstackp)
+{
+ unsigned int total;
+ vm_size_t maxusage;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ simple_lock(&stack_usage_lock);
+ maxusage = stack_max_usage;
+ simple_unlock(&stack_usage_lock);
+
+ stack_statistics(&total, &maxusage);
+
+ *reservedp = 0;
+ *totalp = total;
+ *spacep = *residentp = total * round_page(KERNEL_STACK_SIZE);
+ *maxusagep = maxusage;
+ *maxstackp = 0;
+ return KERN_SUCCESS;
+}
+
+kern_return_t processor_set_stack_usage(
+ processor_set_t pset,
+ unsigned int *totalp,
+ vm_size_t *spacep,
+ vm_size_t *residentp,
+ vm_size_t *maxusagep,
+ vm_offset_t *maxstackp)
+{
+ unsigned int total;
+ vm_size_t maxusage;
+ vm_offset_t maxstack;
+
+ register thread_t *threads;
+ register thread_t tmp_thread;
+
+ unsigned int actual; /* this many things */
+ unsigned int i;
+
+ vm_size_t size, size_needed;
+ vm_offset_t addr;
+
+ if (pset == PROCESSOR_SET_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ size = 0; addr = 0;
+
+ for (;;) {
+ pset_lock(pset);
+ if (!pset->active) {
+ pset_unlock(pset);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ actual = pset->thread_count;
+
+ /* do we have the memory we need? */
+
+ size_needed = actual * sizeof(thread_t);
+ if (size_needed <= size)
+ break;
+
+ /* unlock the pset and allocate more memory */
+ pset_unlock(pset);
+
+ if (size != 0)
+ kfree(addr, size);
+
+ assert(size_needed > 0);
+ size = size_needed;
+
+ addr = kalloc(size);
+ if (addr == 0)
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /* OK, have memory and the processor_set is locked & active */
+
+ threads = (thread_t *) addr;
+ for (i = 0, tmp_thread = (thread_t) queue_first(&pset->threads);
+ i < actual;
+ i++,
+ tmp_thread = (thread_t) queue_next(&tmp_thread->pset_threads)) {
+ thread_reference(tmp_thread);
+ threads[i] = tmp_thread;
+ }
+ assert(queue_end(&pset->threads, (queue_entry_t) tmp_thread));
+
+ /* can unlock processor set now that we have the thread refs */
+ pset_unlock(pset);
+
+ /* calculate maxusage and free thread references */
+
+ total = 0;
+ maxusage = 0;
+ maxstack = 0;
+ for (i = 0; i < actual; i++) {
+ thread_t thread = threads[i];
+ vm_offset_t stack = 0;
+
+ /*
+ * thread->kernel_stack is only accurate if the
+ * thread isn't swapped and is not executing.
+ *
+ * Of course, we don't have the appropriate locks
+ * for these shenanigans.
+ */
+
+ if ((thread->state & TH_SWAPPED) == 0) {
+ int cpu;
+
+ stack = thread->kernel_stack;
+
+ for (cpu = 0; cpu < NCPUS; cpu++)
+ if (active_threads[cpu] == thread) {
+ stack = active_stacks[cpu];
+ break;
+ }
+ }
+
+ if (stack != 0) {
+ total++;
+
+ if (stack_check_usage) {
+ vm_size_t usage = stack_usage(stack);
+
+ if (usage > maxusage) {
+ maxusage = usage;
+ maxstack = (vm_offset_t) thread;
+ }
+ }
+ }
+
+ thread_deallocate(thread);
+ }
+
+ if (size != 0)
+ kfree(addr, size);
+
+ *totalp = total;
+ *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE);
+ *maxusagep = maxusage;
+ *maxstackp = maxstack;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Useful in the debugger:
+ */
+void
+thread_stats(void)
+{
+ register thread_t thread;
+ int total = 0, rpcreply = 0;
+
+ queue_iterate(&default_pset.threads, thread, thread_t, pset_threads) {
+ total++;
+ if (thread->ith_rpc_reply != IP_NULL)
+ rpcreply++;
+ }
+
+ printf("%d total threads.\n", total);
+ printf("%d using rpc_reply.\n", rpcreply);
+}
+#endif /* MACH_DEBUG */
diff --git a/kern/thread.h b/kern/thread.h
new file mode 100644
index 00000000..07b7463e
--- /dev/null
+++ b/kern/thread.h
@@ -0,0 +1,371 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: thread.h
+ * Author: Avadis Tevanian, Jr.
+ *
+ * This file contains the structure definitions for threads.
+ *
+ */
+
+#ifndef _KERN_THREAD_H_
+#define _KERN_THREAD_H_
+
+#include <mach_ipc_compat.h>
+#include <hw_footprint.h>
+#include <mach_fixpri.h>
+#include <mach_host.h>
+#include <net_atm.h>
+
+#include <mach/boolean.h>
+#include <mach/thread_info.h>
+#include <mach/thread_status.h>
+#include <mach/machine/vm_types.h>
+#include <mach/message.h>
+#include <mach/port.h>
+#include <mach/vm_prot.h>
+#include <kern/ast.h>
+#include <kern/cpu_number.h>
+#include <kern/queue.h>
+#include <kern/pc_sample.h>
+#include <kern/processor.h>
+#include <kern/sched_prim.h> /* event_t, continuation_t */
+#include <kern/time_out.h>
+#include <kern/timer.h>
+#include <kern/lock.h>
+#include <kern/sched.h>
+#include <kern/task.h> /* for current_space(), current_map() */
+#include <machine/thread.h>
+#include <ipc/ipc_kmsg_queue.h>
+
+struct thread {
+ /* Run queues */
+ queue_chain_t links; /* current run queue links */
+ run_queue_t runq; /* run queue p is on SEE BELOW */
+/*
+ * NOTE: The runq field in the thread structure has an unusual
+ * locking protocol. If its value is RUN_QUEUE_NULL, then it is
+ * locked by the thread_lock, but if its value is something else
+ * (i.e. a run_queue) then it is locked by that run_queue's lock.
+ */
+
+ /* Task information */
+ task_t task; /* Task to which I belong */
+ queue_chain_t thread_list; /* list of threads in task */
+
+ /* Thread bookkeeping */
+ queue_chain_t pset_threads; /* list of all threads in proc set*/
+
+ /* Self-preservation */
+ decl_simple_lock_data(,lock)
+ int ref_count; /* number of references to me */
+
+ /* Hardware state */
+ pcb_t pcb; /* hardware pcb & machine state */
+ vm_offset_t kernel_stack; /* accurate only if the thread is
+ not swapped and not executing */
+ vm_offset_t stack_privilege;/* reserved kernel stack */
+
+ /* Swapping information */
+ void (*swap_func)(); /* start here after swapin */
+
+ /* Blocking information */
+ event_t wait_event; /* event we are waiting on */
+ int suspend_count; /* internal use only */
+ kern_return_t wait_result; /* outcome of wait -
+ may be examined by this thread
+ WITHOUT locking */
+ boolean_t wake_active; /* someone is waiting for this
+ thread to become suspended */
+ int state; /* Thread state: */
+/*
+ * Thread states [bits or'ed]
+ */
+#define TH_WAIT 0x01 /* thread is queued for waiting */
+#define TH_SUSP 0x02 /* thread has been asked to stop */
+#define TH_RUN 0x04 /* thread is running or on runq */
+#define TH_UNINT 0x08 /* thread is waiting uninteruptibly */
+#define TH_HALTED 0x10 /* thread is halted at clean point ? */
+
+#define TH_IDLE 0x80 /* thread is an idle thread */
+
+#define TH_SCHED_STATE (TH_WAIT|TH_SUSP|TH_RUN|TH_UNINT)
+
+#define TH_SWAPPED 0x0100 /* thread has no kernel stack */
+#define TH_SW_COMING_IN 0x0200 /* thread is waiting for kernel stack */
+
+#define TH_SWAP_STATE (TH_SWAPPED | TH_SW_COMING_IN)
+
+ /* Scheduling information */
+ int priority; /* thread's priority */
+ int max_priority; /* maximum priority */
+ int sched_pri; /* scheduled (computed) priority */
+#if MACH_FIXPRI
+ int sched_data; /* for use by policy */
+ int policy; /* scheduling policy */
+#endif /* MACH_FIXPRI */
+ int depress_priority; /* depressed from this priority */
+ unsigned int cpu_usage; /* exp. decaying cpu usage [%cpu] */
+ unsigned int sched_usage; /* load-weighted cpu usage [sched] */
+ unsigned int sched_stamp; /* last time priority was updated */
+
+ /* VM global variables */
+
+ vm_offset_t recover; /* page fault recovery (copyin/out) */
+ boolean_t vm_privilege; /* Can use reserved memory? */
+
+ /* User-visible scheduling state */
+ int user_stop_count; /* outstanding stops */
+
+ /* IPC data structures */
+ struct thread *ith_next, *ith_prev;
+ mach_msg_return_t ith_state;
+ union {
+ mach_msg_size_t msize; /* max size for recvd msg */
+ struct ipc_kmsg *kmsg; /* received message */
+ } data;
+ mach_port_seqno_t ith_seqno; /* seqno of recvd message */
+
+ /* This queue is used only when destroying messages:
+ it prevents nasty recursion problems when destroying one message
+ causes other messages to be destroyed.
+ This queue should always be empty under normal circumstances.
+ See ipc_kmsg_destroy() for more details. */
+ struct ipc_kmsg_queue ith_messages;
+
+ decl_simple_lock_data(, ith_lock_data)
+ struct ipc_port *ith_self; /* not a right, doesn't hold ref */
+ struct ipc_port *ith_sself; /* a send right */
+ struct ipc_port *ith_exception; /* a send right */
+#if MACH_IPC_COMPAT
+ struct ipc_port *ith_reply; /* a send right */
+#endif /* MACH_IPC_COMPAT */
+
+ mach_port_t ith_mig_reply; /* reply port for mig */
+ struct ipc_port *ith_rpc_reply; /* reply port for kernel RPCs */
+
+ /* State saved when thread's stack is discarded */
+ union {
+ struct {
+ mach_msg_header_t *msg;
+ mach_msg_option_t option;
+ mach_msg_size_t rcv_size;
+ mach_msg_timeout_t timeout;
+ mach_port_t notify;
+ struct ipc_object *object;
+ struct ipc_mqueue *mqueue;
+ } receive;
+ struct {
+ struct ipc_port *port;
+ int exc;
+ int code;
+ int subcode;
+ } exception;
+ void *other; /* catch-all for other state */
+ } saved;
+
+ /* Timing data structures */
+ timer_data_t user_timer; /* user mode timer */
+ timer_data_t system_timer; /* system mode timer */
+ timer_save_data_t user_timer_save; /* saved user timer value */
+ timer_save_data_t system_timer_save; /* saved sys timer val. */
+ unsigned int cpu_delta; /* cpu usage since last update */
+ unsigned int sched_delta; /* weighted cpu usage since update */
+
+ /* Time-outs */
+ timer_elt_data_t timer; /* timer for thread */
+ timer_elt_data_t depress_timer; /* timer for priority depression */
+
+ /* Ast/Halt data structures */
+ boolean_t active; /* how alive is the thread */
+ int ast; /* ast's needed. See ast.h */
+
+ /* Processor data structures */
+ processor_set_t processor_set; /* assigned processor set */
+ processor_t bound_processor; /* bound to processor ?*/
+
+ sample_control_t pc_sample;
+
+#if MACH_HOST
+ boolean_t may_assign; /* may assignment change? */
+ boolean_t assign_active; /* someone waiting for may_assign */
+#endif /* MACH_HOST */
+
+#if NCPUS > 1
+ processor_t last_processor; /* processor this last ran on */
+#endif /* NCPUS > 1 */
+
+#if NET_ATM
+ nw_ep_owned_t nw_ep_waited;
+#endif /* NET_ATM */
+};
+
+/* typedef of thread_t is in kern/kern_types.h */
+typedef struct thread_shuttle *thread_shuttle_t;
+#define THREAD_NULL ((thread_t) 0)
+#define THREAD_SHUTTLE_NULL ((thread_shuttle_t)0)
+
+#define ith_msize data.msize
+#define ith_kmsg data.kmsg
+#define ith_wait_result wait_result
+
+#define ith_msg saved.receive.msg
+#define ith_option saved.receive.option
+#define ith_rcv_size saved.receive.rcv_size
+#define ith_timeout saved.receive.timeout
+#define ith_notify saved.receive.notify
+#define ith_object saved.receive.object
+#define ith_mqueue saved.receive.mqueue
+
+#define ith_port saved.exception.port
+#define ith_exc saved.exception.exc
+#define ith_exc_code saved.exception.code
+#define ith_exc_subcode saved.exception.subcode
+
+#define ith_other saved.other
+
+#ifndef _KERN_KERN_TYPES_H_
+typedef struct thread *thread_t;
+
+#define THREAD_NULL ((thread_t) 0)
+
+typedef mach_port_t *thread_array_t;
+#endif /* _KERN_KERN_TYPES_H_ */
+
+
+extern thread_t active_threads[NCPUS]; /* active threads */
+extern vm_offset_t active_stacks[NCPUS]; /* active kernel stacks */
+
+#ifdef KERNEL
+/*
+ * User routines
+ */
+
+extern kern_return_t thread_create(
+ task_t parent_task,
+ thread_t *child_thread);
+extern kern_return_t thread_terminate(
+ thread_t thread);
+extern kern_return_t thread_suspend(
+ thread_t thread);
+extern kern_return_t thread_resume(
+ thread_t thread);
+extern kern_return_t thread_abort(
+ thread_t thread);
+extern kern_return_t thread_get_state(
+ thread_t thread,
+ int flavor,
+ thread_state_t old_state,
+ natural_t *old_state_count);
+extern kern_return_t thread_set_state(
+ thread_t thread,
+ int flavor,
+ thread_state_t new_state,
+ natural_t new_state_count);
+extern kern_return_t thread_get_special_port(
+ thread_t thread,
+ int which,
+ struct ipc_port **portp);
+extern kern_return_t thread_set_special_port(
+ thread_t thread,
+ int which,
+ struct ipc_port *port);
+extern kern_return_t thread_info(
+ thread_t thread,
+ int flavor,
+ thread_info_t thread_info_out,
+ natural_t *thread_info_count);
+extern kern_return_t thread_assign(
+ thread_t thread,
+ processor_set_t new_pset);
+extern kern_return_t thread_assign_default(
+ thread_t thread);
+#endif
+
+/*
+ * Kernel-only routines
+ */
+
+extern void thread_init(void);
+extern void thread_reference(thread_t);
+extern void thread_deallocate(thread_t);
+extern void thread_hold(thread_t);
+extern kern_return_t thread_dowait(
+ thread_t thread,
+ boolean_t must_halt);
+extern void thread_release(thread_t);
+extern kern_return_t thread_halt(
+ thread_t thread,
+ boolean_t must_halt);
+extern void thread_halt_self(void);
+extern void thread_force_terminate(thread_t);
+extern void thread_set_own_priority(
+ int priority);
+extern thread_t kernel_thread(
+ task_t task,
+ void (*start)(void),
+ void * arg);
+
+extern void reaper_thread(void);
+
+#if MACH_HOST
+extern void thread_freeze(
+ thread_t thread);
+extern void thread_doassign(
+ thread_t thread,
+ processor_set_t new_pset,
+ boolean_t release_freeze);
+extern void thread_unfreeze(
+ thread_t thread);
+#endif /* MACH_HOST */
+
+/*
+ * Macro-defined routines
+ */
+
+#define thread_pcb(th) ((th)->pcb)
+
+#define thread_lock(th) simple_lock(&(th)->lock)
+#define thread_unlock(th) simple_unlock(&(th)->lock)
+
+#define thread_should_halt(thread) \
+ ((thread)->ast & (AST_HALT|AST_TERMINATE))
+
+/*
+ * Machine specific implementations of the current thread macro
+ * designate this by defining CURRENT_THREAD.
+ */
+#ifndef CURRENT_THREAD
+#define current_thread() (active_threads[cpu_number()])
+#endif /* CURRENT_THREAD */
+
+#define current_stack() (active_stacks[cpu_number()])
+
+#define current_task() (current_thread()->task)
+#define current_space() (current_task()->itk_space)
+#define current_map() (current_task()->map)
+
+#endif /* _KERN_THREAD_H_ */
diff --git a/kern/thread_swap.c b/kern/thread_swap.c
new file mode 100644
index 00000000..173b6ae4
--- /dev/null
+++ b/kern/thread_swap.c
@@ -0,0 +1,190 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ *
+ * File: kern/thread_swap.c
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1987
+ *
+ * Mach thread swapper:
+ * Find idle threads to swap, freeing up kernel stack resources
+ * at the expense of allowing them to execute.
+ *
+ * Swap in threads that need to be run. This is done here
+ * by the swapper thread since it cannot be done (in general)
+ * when the kernel tries to place a thread on a run queue.
+ *
+ * Note: The act of swapping a thread in Mach does not mean that
+ * its memory gets forcibly swapped to secondary storage. The memory
+ * for the task corresponding to a swapped thread is paged out
+ * through the normal paging mechanism.
+ *
+ */
+
+#include <ipc/ipc_kmsg.h>
+#include <kern/counters.h>
+#include <kern/thread.h>
+#include <kern/lock.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <mach/vm_param.h>
+#include <kern/sched_prim.h>
+#include <kern/processor.h>
+#include <kern/thread_swap.h>
+#include <machine/machspl.h> /* for splsched */
+
+
+
+queue_head_t swapin_queue;
+decl_simple_lock_data(, swapper_lock_data)
+
+#define swapper_lock() simple_lock(&swapper_lock_data)
+#define swapper_unlock() simple_unlock(&swapper_lock_data)
+
+/*
+ * swapper_init: [exported]
+ *
+ * Initialize the swapper module.
+ */
+void swapper_init()
+{
+ queue_init(&swapin_queue);
+ simple_lock_init(&swapper_lock_data);
+}
+
+/*
+ * thread_swapin: [exported]
+ *
+ * Place the specified thread in the list of threads to swapin. It
+ * is assumed that the thread is locked, therefore we are at splsched.
+ *
+ * We don't bother with stack_alloc_try to optimize swapin;
+ * our callers have already tried that route.
+ */
+
+void thread_swapin(thread)
+ thread_t thread;
+{
+ switch (thread->state & TH_SWAP_STATE) {
+ case TH_SWAPPED:
+ /*
+ * Swapped out - queue for swapin thread.
+ */
+ thread->state = (thread->state & ~TH_SWAP_STATE)
+ | TH_SW_COMING_IN;
+ swapper_lock();
+ enqueue_tail(&swapin_queue, (queue_entry_t) thread);
+ swapper_unlock();
+ thread_wakeup((event_t) &swapin_queue);
+ break;
+
+ case TH_SW_COMING_IN:
+ /*
+ * Already queued for swapin thread, or being
+ * swapped in.
+ */
+ break;
+
+ default:
+ /*
+ * Already swapped in.
+ */
+ panic("thread_swapin");
+ }
+}
+
+/*
+ * thread_doswapin:
+ *
+ * Swapin the specified thread, if it should be runnable, then put
+ * it on a run queue. No locks should be held on entry, as it is
+ * likely that this routine will sleep (waiting for stack allocation).
+ */
+void thread_doswapin(thread)
+ register thread_t thread;
+{
+ spl_t s;
+
+ /*
+ * Allocate the kernel stack.
+ */
+
+ stack_alloc(thread, thread_continue);
+
+ /*
+ * Place on run queue.
+ */
+
+ s = splsched();
+ thread_lock(thread);
+ thread->state &= ~(TH_SWAPPED | TH_SW_COMING_IN);
+ if (thread->state & TH_RUN)
+ thread_setrun(thread, TRUE);
+ thread_unlock(thread);
+ (void) splx(s);
+}
+
+/*
+ * swapin_thread: [exported]
+ *
+ * This procedure executes as a kernel thread. Threads that need to
+ * be swapped in are swapped in by this thread.
+ */
+void swapin_thread_continue()
+{
+ for (;;) {
+ register thread_t thread;
+ spl_t s;
+
+ s = splsched();
+ swapper_lock();
+
+ while ((thread = (thread_t) dequeue_head(&swapin_queue))
+ != THREAD_NULL) {
+ swapper_unlock();
+ (void) splx(s);
+
+ thread_doswapin(thread); /* may block */
+
+ s = splsched();
+ swapper_lock();
+ }
+
+ assert_wait((event_t) &swapin_queue, FALSE);
+ swapper_unlock();
+ (void) splx(s);
+ counter(c_swapin_thread_block++);
+ thread_block(swapin_thread_continue);
+ }
+}
+
+void swapin_thread()
+{
+ stack_privilege(current_thread());
+
+ swapin_thread_continue();
+ /*NOTREACHED*/
+}
diff --git a/kern/thread_swap.h b/kern/thread_swap.h
new file mode 100644
index 00000000..e390ac4a
--- /dev/null
+++ b/kern/thread_swap.h
@@ -0,0 +1,44 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/thread_swap.h
+ *
+ * Declarations of thread swapping routines.
+ */
+
+#ifndef _KERN_THREAD_SWAP_H_
+#define _KERN_THREAD_SWAP_H_
+
+/*
+ * exported routines
+ */
+extern void swapper_init();
+extern void thread_swapin( /* thread_t thread */ );
+extern void thread_doswapin( /* thread_t thread */ );
+extern void swapin_thread();
+extern void thread_swapout( /* thread_t thread */ );
+
+#endif _KERN_THREAD_SWAP_H_
diff --git a/kern/time_out.h b/kern/time_out.h
new file mode 100644
index 00000000..4dff7df4
--- /dev/null
+++ b/kern/time_out.h
@@ -0,0 +1,83 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_TIME_OUT_H_
+#define _KERN_TIME_OUT_H_
+
+/*
+ * Mach time-out and time-of-day facility.
+ */
+
+#include <mach/boolean.h>
+#include <kern/lock.h>
+#include <kern/queue.h>
+#include <kern/zalloc.h>
+
+/*
+ * Timers in kernel:
+ */
+extern unsigned long elapsed_ticks; /* number of ticks elapsed since bootup */
+extern int hz; /* number of ticks per second */
+extern int tick; /* number of usec per tick */
+
+/*
+ * Time-out element.
+ */
+struct timer_elt {
+ queue_chain_t chain; /* chain in order of expiration */
+ int (*fcn)(); /* function to call */
+ char * param; /* with this parameter */
+ unsigned long ticks; /* expiration time, in ticks */
+ int set; /* unset | set | allocated */
+};
+#define TELT_UNSET 0 /* timer not set */
+#define TELT_SET 1 /* timer set */
+#define TELT_ALLOC 2 /* timer allocated from pool */
+
+typedef struct timer_elt timer_elt_data_t;
+typedef struct timer_elt *timer_elt_t;
+
+/* for 'private' timer elements */
+extern void set_timeout();
+extern boolean_t reset_timeout();
+
+/* for public timer elements */
+extern void timeout();
+extern boolean_t untimeout();
+
+#define set_timeout_setup(telt,fcn,param,interval) \
+ ((telt)->fcn = (fcn), \
+ (telt)->param = (param), \
+ (telt)->private = TRUE, \
+ set_timeout((telt), (interval)))
+
+#define reset_timeout_check(t) \
+ MACRO_BEGIN \
+ if ((t)->set) \
+ reset_timeout((t)); \
+ MACRO_END
+
+#endif _KERN_TIME_OUT_H_
diff --git a/kern/time_stamp.c b/kern/time_stamp.c
new file mode 100644
index 00000000..6e22155a
--- /dev/null
+++ b/kern/time_stamp.c
@@ -0,0 +1,74 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/std_types.h>
+#include <sys/time.h>
+#include <kern/time_stamp.h>
+
+/*
+ * ts.c - kern_timestamp system call.
+ */
+#ifdef multimax
+#include <mmax/timer.h>
+#endif multimax
+
+
+
+kern_return_t
+kern_timestamp(tsp)
+struct tsval *tsp;
+{
+#ifdef multimax
+ struct tsval temp;
+ temp.low_val = FRcounter;
+ temp.high_val = 0;
+#else multimax
+/*
+ temp.low_val = 0;
+ temp.high_val = ts_tick_count;
+*/
+ time_value_t temp;
+ temp = time;
+#endif multimax
+
+ if (copyout((char *)&temp,
+ (char *)tsp,
+ sizeof(struct tsval)) != KERN_SUCCESS)
+ return(KERN_INVALID_ADDRESS);
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Initialization procedure.
+ */
+
+void timestamp_init()
+{
+#ifdef multimax
+#else multimax
+ ts_tick_count = 0;
+#endif multimax
+}
diff --git a/kern/time_stamp.h b/kern/time_stamp.h
new file mode 100644
index 00000000..81711f61
--- /dev/null
+++ b/kern/time_stamp.h
@@ -0,0 +1,65 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_TIME_STAMP_H_
+#define _KERN_TIME_STAMP_H_
+
+#include <machine/time_stamp.h>
+/*
+ * time_stamp.h -- definitions for low-overhead timestamps.
+ */
+
+struct tsval {
+ unsigned low_val; /* least significant word */
+ unsigned high_val; /* most significant word */
+};
+
+/*
+ * Format definitions.
+ */
+
+#ifndef TS_FORMAT
+/*
+ * Default case - Just return a tick count for machines that
+ * don't support or haven't implemented this. Assume 100Hz ticks.
+ *
+ * low_val - Always 0.
+ * high_val - tick count.
+ */
+#define TS_FORMAT 1
+
+#if KERNEL
+unsigned ts_tick_count;
+#endif KERNEL
+#endif TS_FORMAT
+
+/*
+ * List of all format definitions for convert_ts_to_tv.
+ */
+
+#define TS_FORMAT_DEFAULT 1
+#define TS_FORMAT_MMAX 2
+#endif _KERN_TIME_STAMP_H_
diff --git a/kern/timer.c b/kern/timer.c
new file mode 100644
index 00000000..57772ee5
--- /dev/null
+++ b/kern/timer.c
@@ -0,0 +1,525 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <cpus.h>
+#include <stat_time.h>
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <kern/queue.h>
+#include <kern/thread.h>
+#include <mach/time_value.h>
+#include <kern/timer.h>
+#include <kern/cpu_number.h>
+
+#include <kern/assert.h>
+#include <kern/macro_help.h>
+
+
+
+timer_t current_timer[NCPUS];
+timer_data_t kernel_timer[NCPUS];
+
+void timer_init(); /* forward */
+
+/*
+ * init_timers initializes all non-thread timers and puts the
+ * service routine on the callout queue. All timers must be
+ * serviced by the callout routine once an hour.
+ */
+void init_timers()
+{
+ register int i;
+ register timer_t this_timer;
+
+ /*
+ * Initialize all the kernel timers and start the one
+ * for this cpu (master) slaves start theirs later.
+ */
+ this_timer = &kernel_timer[0];
+ for ( i=0 ; i<NCPUS ; i++, this_timer++) {
+ timer_init(this_timer);
+ current_timer[i] = (timer_t) 0;
+ }
+
+ start_timer(&kernel_timer[cpu_number()]);
+}
+
+/*
+ * timer_init initializes a single timer.
+ */
+void timer_init(this_timer)
+register
+timer_t this_timer;
+{
+ this_timer->low_bits = 0;
+ this_timer->high_bits = 0;
+ this_timer->tstamp = 0;
+ this_timer->high_bits_check = 0;
+}
+
+#if STAT_TIME
+#else STAT_TIME
+
+#ifdef MACHINE_TIMER_ROUTINES
+
+/*
+ * Machine-dependent code implements the timer routines.
+ */
+
+#else /* MACHINE_TIMER_ROUTINES */
+
+/*
+ * start_timer starts the given timer for this cpu. It is called
+ * exactly once for each cpu during the boot sequence.
+ */
+void
+start_timer(timer)
+timer_t timer;
+{
+ timer->tstamp = get_timestamp();
+ current_timer[cpu_number()] = timer;
+}
+
+/*
+ * time_trap_uentry does trap entry timing. Caller must lock out
+ * interrupts and take a timestamp. ts is a timestamp taken after
+ * interrupts were locked out. Must only be called if trap was
+ * from user mode.
+ */
+void
+time_trap_uentry(ts)
+unsigned ts;
+{
+ int elapsed;
+ int mycpu;
+ timer_t mytimer;
+
+ /*
+ * Calculate elapsed time.
+ */
+ mycpu = cpu_number();
+ mytimer = current_timer[mycpu];
+ elapsed = ts - mytimer->tstamp;
+#ifdef TIMER_MAX
+ if (elapsed < 0) elapsed += TIMER_MAX;
+#endif TIMER_MAX
+
+ /*
+ * Update current timer.
+ */
+ mytimer->low_bits += elapsed;
+ mytimer->tstamp = 0;
+
+ if (mytimer->low_bits & TIMER_LOW_FULL) {
+ timer_normalize(mytimer);
+ }
+
+ /*
+ * Record new timer.
+ */
+ mytimer = &(active_threads[mycpu]->system_timer);
+ current_timer[mycpu] = mytimer;
+ mytimer->tstamp = ts;
+}
+
+/*
+ * time_trap_uexit does trap exit timing. Caller must lock out
+ * interrupts and take a timestamp. ts is a timestamp taken after
+ * interrupts were locked out. Must only be called if returning to
+ * user mode.
+ */
+void
+time_trap_uexit(ts)
+{
+ int elapsed;
+ int mycpu;
+ timer_t mytimer;
+
+ /*
+ * Calculate elapsed time.
+ */
+ mycpu = cpu_number();
+ mytimer = current_timer[mycpu];
+ elapsed = ts - mytimer->tstamp;
+#ifdef TIMER_MAX
+ if (elapsed < 0) elapsed += TIMER_MAX;
+#endif TIMER_MAX
+
+ /*
+ * Update current timer.
+ */
+ mytimer->low_bits += elapsed;
+ mytimer->tstamp = 0;
+
+ if (mytimer->low_bits & TIMER_LOW_FULL) {
+ timer_normalize(mytimer); /* SYSTEMMODE */
+ }
+
+ mytimer = &(active_threads[mycpu]->user_timer);
+
+ /*
+ * Record new timer.
+ */
+ current_timer[mycpu] = mytimer;
+ mytimer->tstamp = ts;
+}
+
+/*
+ * time_int_entry does interrupt entry timing. Caller must lock out
+ * interrupts and take a timestamp. ts is a timestamp taken after
+ * interrupts were locked out. new_timer is the new timer to
+ * switch to. This routine returns the currently running timer,
+ * which MUST be pushed onto the stack by the caller, or otherwise
+ * saved for time_int_exit.
+ */
+timer_t
+time_int_entry(ts,new_timer)
+unsigned ts;
+timer_t new_timer;
+{
+ int elapsed;
+ int mycpu;
+ timer_t mytimer;
+
+ /*
+ * Calculate elapsed time.
+ */
+ mycpu = cpu_number();
+ mytimer = current_timer[mycpu];
+
+ elapsed = ts - mytimer->tstamp;
+#ifdef TIMER_MAX
+ if (elapsed < 0) elapsed += TIMER_MAX;
+#endif TIMER_MAX
+
+ /*
+ * Update current timer.
+ */
+ mytimer->low_bits += elapsed;
+ mytimer->tstamp = 0;
+
+ /*
+ * Switch to new timer, and save old one on stack.
+ */
+ new_timer->tstamp = ts;
+ current_timer[mycpu] = new_timer;
+ return(mytimer);
+}
+
+/*
+ * time_int_exit does interrupt exit timing. Caller must lock out
+ * interrupts and take a timestamp. ts is a timestamp taken after
+ * interrupts were locked out. old_timer is the timer value pushed
+ * onto the stack or otherwise saved after time_int_entry returned
+ * it.
+ */
+void
+time_int_exit(ts, old_timer)
+unsigned ts;
+timer_t old_timer;
+{
+ int elapsed;
+ int mycpu;
+ timer_t mytimer;
+
+ /*
+ * Calculate elapsed time.
+ */
+ mycpu = cpu_number();
+ mytimer = current_timer[mycpu];
+ elapsed = ts - mytimer->tstamp;
+#ifdef TIMER_MAX
+ if (elapsed < 0) elapsed += TIMER_MAX;
+#endif TIMER_MAX
+
+ /*
+ * Update current timer.
+ */
+ mytimer->low_bits += elapsed;
+ mytimer->tstamp = 0;
+
+ /*
+ * If normalization requested, do it.
+ */
+ if (mytimer->low_bits & TIMER_LOW_FULL) {
+ timer_normalize(mytimer);
+ }
+ if (old_timer->low_bits & TIMER_LOW_FULL) {
+ timer_normalize(old_timer);
+ }
+
+ /*
+ * Start timer that was running before interrupt.
+ */
+ old_timer->tstamp = ts;
+ current_timer[mycpu] = old_timer;
+}
+
+/*
+ * timer_switch switches to a new timer. The machine
+ * dependent routine/macro get_timestamp must return a timestamp.
+ * Caller must lock out interrupts.
+ */
+void
+timer_switch(new_timer)
+timer_t new_timer;
+{
+ int elapsed;
+ int mycpu;
+ timer_t mytimer;
+ unsigned ts;
+
+ /*
+ * Calculate elapsed time.
+ */
+ mycpu = cpu_number();
+ mytimer = current_timer[mycpu];
+ ts = get_timestamp();
+ elapsed = ts - mytimer->tstamp;
+#ifdef TIMER_MAX
+ if (elapsed < 0) elapsed += TIMER_MAX;
+#endif TIMER_MAX
+
+ /*
+ * Update current timer.
+ */
+ mytimer->low_bits += elapsed;
+ mytimer->tstamp = 0;
+
+ /*
+ * Normalization check
+ */
+ if (mytimer->low_bits & TIMER_LOW_FULL) {
+ timer_normalize(mytimer);
+ }
+
+ /*
+ * Record new timer.
+ */
+ current_timer[mycpu] = new_timer;
+ new_timer->tstamp = ts;
+}
+
+#endif /* MACHINE_TIMER_ROUTINES */
+#endif STAT_TIME
+
+/*
+ * timer_normalize normalizes the value of a timer. It is
+ * called only rarely, to make sure low_bits never overflows.
+ */
+void timer_normalize(timer)
+register
+timer_t timer;
+{
+ unsigned int high_increment;
+
+ /*
+ * Calculate high_increment, then write high check field first
+ * followed by low and high. timer_grab() reads these fields in
+ * reverse order so if high and high check match, we know
+ * that the values read are ok.
+ */
+
+ high_increment = timer->low_bits/TIMER_HIGH_UNIT;
+ timer->high_bits_check += high_increment;
+ timer->low_bits %= TIMER_HIGH_UNIT;
+ timer->high_bits += high_increment;
+}
+
+/*
+ * timer_grab() retrieves the value of a timer.
+ *
+ * Critical scheduling code uses TIMER_DELTA macro in timer.h
+ * (called from thread_timer_delta in sched.h).
+ *
+ * Keep coherent with db_time_grab below.
+ */
+
+static void timer_grab(timer, save)
+timer_t timer;
+timer_save_t save;
+{
+#if MACH_ASSERT
+ unsigned int passes=0;
+#endif
+ do {
+ (save)->high = (timer)->high_bits;
+ (save)->low = (timer)->low_bits;
+ /*
+ * If the timer was normalized while we were doing this,
+ * the high_bits value read above and the high_bits check
+ * value will not match because high_bits_check is the first
+ * field touched by the normalization procedure, and
+ * high_bits is the last.
+ *
+ * Additions to timer only touch low bits and
+ * are therefore atomic with respect to this.
+ */
+#if MACH_ASSERT
+ passes++;
+ assert((passes < 10000) ? (1) : ((timer->high_bits_check = save->high), 0));
+#endif
+ } while ( (save)->high != (timer)->high_bits_check);
+}
+
+/*
+ *
+ * Db_timer_grab(): used by db_thread_read_times. An nonblocking
+ * version of db_thread_get_times. Keep coherent with timer_grab
+ * above.
+ *
+ */
+void db_timer_grab(timer, save)
+timer_t timer;
+timer_save_t save;
+{
+ /* Don't worry about coherency */
+
+ (save)->high = (timer)->high_bits;
+ (save)->low = (timer)->low_bits;
+}
+
+
+/*
+ * timer_read reads the value of a timer into a time_value_t. If the
+ * timer was modified during the read, retry. The value returned
+ * is accurate to the last update; time accumulated by a running
+ * timer since its last timestamp is not included.
+ */
+
+void
+timer_read(timer, tv)
+timer_t timer;
+register
+time_value_t *tv;
+{
+ timer_save_data_t temp;
+
+ timer_grab(timer,&temp);
+ /*
+ * Normalize the result
+ */
+#ifdef TIMER_ADJUST
+ TIMER_ADJUST(&temp);
+#endif TIMER_ADJUST
+ tv->seconds = temp.high + temp.low/1000000;
+ tv->microseconds = temp.low%1000000;
+
+}
+
+/*
+ * thread_read_times reads the user and system times from a thread.
+ * Time accumulated since last timestamp is not included. Should
+ * be called at splsched() to avoid having user and system times
+ * be out of step. Doesn't care if caller locked thread.
+ *
+ * Needs to be kept coherent with thread_read_times ahead.
+ */
+void thread_read_times(thread, user_time_p, system_time_p)
+ thread_t thread;
+ time_value_t *user_time_p;
+ time_value_t *system_time_p;
+{
+ timer_save_data_t temp;
+ register timer_t timer;
+
+ timer = &thread->user_timer;
+ timer_grab(timer, &temp);
+
+#ifdef TIMER_ADJUST
+ TIMER_ADJUST(&temp);
+#endif TIMER_ADJUST
+ user_time_p->seconds = temp.high + temp.low/1000000;
+ user_time_p->microseconds = temp.low % 1000000;
+
+ timer = &thread->system_timer;
+ timer_grab(timer, &temp);
+
+#ifdef TIMER_ADJUST
+ TIMER_ADJUST(&temp);
+#endif TIMER_ADJUST
+ system_time_p->seconds = temp.high + temp.low/1000000;
+ system_time_p->microseconds = temp.low % 1000000;
+}
+
+/*
+ * Db_thread_read_times: A version of thread_read_times that
+ * can be called by the debugger. This version does not call
+ * timer_grab, which can block. Please keep it up to date with
+ * thread_read_times above.
+ *
+ */
+void db_thread_read_times(thread, user_time_p, system_time_p)
+ thread_t thread;
+ time_value_t *user_time_p;
+ time_value_t *system_time_p;
+{
+ timer_save_data_t temp;
+ register timer_t timer;
+
+ timer = &thread->user_timer;
+ db_timer_grab(timer, &temp);
+
+#ifdef TIMER_ADJUST
+ TIMER_ADJUST(&temp);
+#endif TIMER_ADJUST
+ user_time_p->seconds = temp.high + temp.low/1000000;
+ user_time_p->microseconds = temp.low % 1000000;
+
+ timer = &thread->system_timer;
+ timer_grab(timer, &temp);
+
+#ifdef TIMER_ADJUST
+ TIMER_ADJUST(&temp);
+#endif TIMER_ADJUST
+ system_time_p->seconds = temp.high + temp.low/1000000;
+ system_time_p->microseconds = temp.low % 1000000;
+}
+
+/*
+ * timer_delta takes the difference of a saved timer value
+ * and the current one, and updates the saved value to current.
+ * The difference is returned as a function value. See
+ * TIMER_DELTA macro (timer.h) for optimization to this.
+ */
+
+unsigned
+timer_delta(timer, save)
+register
+timer_t timer;
+timer_save_t save;
+{
+ timer_save_data_t new_save;
+ register unsigned result;
+
+ timer_grab(timer,&new_save);
+ result = (new_save.high - save->high) * TIMER_HIGH_UNIT +
+ new_save.low - save->low;
+ save->high = new_save.high;
+ save->low = new_save.low;
+ return(result);
+}
diff --git a/kern/timer.h b/kern/timer.h
new file mode 100644
index 00000000..fe60e268
--- /dev/null
+++ b/kern/timer.h
@@ -0,0 +1,157 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KERN_TIMER_H_
+#define _KERN_TIMER_H_
+
+#include <cpus.h>
+#include <stat_time.h>
+
+#include <kern/macro_help.h>
+
+#if STAT_TIME
+/*
+ * Statistical timer definitions - use microseconds in timer, seconds
+ * in high unit field. No adjustment needed to convert to time_value_t
+ * as a result. Service timers once an hour.
+ */
+
+#define TIMER_RATE 1000000
+#define TIMER_HIGH_UNIT TIMER_RATE
+#undef TIMER_ADJUST
+
+#else STAT_TIME
+/*
+ * Machine dependent definitions based on hardware support.
+ */
+
+#include <machine/timer.h>
+
+#endif STAT_TIME
+
+/*
+ * Definitions for accurate timers. high_bits_check is a copy of
+ * high_bits that allows reader to verify that values read are ok.
+ */
+
+struct timer {
+ unsigned low_bits;
+ unsigned high_bits;
+ unsigned high_bits_check;
+ unsigned tstamp;
+};
+
+typedef struct timer timer_data_t;
+typedef struct timer *timer_t;
+
+/*
+ * Mask to check if low_bits is in danger of overflowing
+ */
+
+#define TIMER_LOW_FULL 0x80000000U
+
+/*
+ * Kernel timers and current timer array. [Exported]
+ */
+
+extern timer_t current_timer[NCPUS];
+extern timer_data_t kernel_timer[NCPUS];
+
+/*
+ * save structure for timer readings. This is used to save timer
+ * readings for elapsed time computations.
+ */
+
+struct timer_save {
+ unsigned low;
+ unsigned high;
+};
+
+typedef struct timer_save timer_save_data_t, *timer_save_t;
+
+/*
+ * Exported kernel interface to timers
+ */
+
+#if STAT_TIME
+#define start_timer(timer)
+#define timer_switch(timer)
+#else STAT_TIME
+extern void start_timer();
+extern void timer_switch();
+#endif STAT_TIME
+
+extern void timer_read();
+extern void thread_read_times();
+extern unsigned timer_delta();
+
+#if STAT_TIME
+/*
+ * Macro to bump timer values.
+ */
+#define timer_bump(timer, usec) \
+MACRO_BEGIN \
+ (timer)->low_bits += usec; \
+ if ((timer)->low_bits & TIMER_LOW_FULL) { \
+ timer_normalize(timer); \
+ } \
+MACRO_END
+
+#else STAT_TIME
+/*
+ * Exported hardware interface to timers
+ */
+extern void time_trap_uentry();
+extern void time_trap_uexit();
+extern timer_t time_int_entry();
+extern void time_int_exit();
+#endif STAT_TIME
+
+/*
+ * TIMER_DELTA finds the difference between a timer and a saved value,
+ * and updates the saved value. Look at high_bits check field after
+ * reading low because that's the first written by a normalize
+ * operation; this isn't necessary for current usage because
+ * this macro is only used when the timer can't be normalized:
+ * thread is not running, or running thread calls it on itself at
+ * splsched().
+ */
+
+#define TIMER_DELTA(timer, save, result) \
+MACRO_BEGIN \
+ register unsigned temp; \
+ \
+ temp = (timer).low_bits; \
+ if ((save).high != (timer).high_bits_check) { \
+ result += timer_delta(&(timer), &(save)); \
+ } \
+ else { \
+ result += temp - (save).low; \
+ (save).low = temp; \
+ } \
+MACRO_END
+
+#endif _KERN_TIMER_H_
diff --git a/kern/xpr.c b/kern/xpr.c
new file mode 100644
index 00000000..eb8d6be0
--- /dev/null
+++ b/kern/xpr.c
@@ -0,0 +1,192 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach_kdb.h>
+/*
+ * xpr silent tracing circular buffer.
+ */
+#include <kern/xpr.h>
+#include <kern/lock.h>
+#include "cpu_number.h"
+#include <machine/machspl.h>
+#include <vm/vm_kern.h>
+
+
+/*
+ * After a spontaneous reboot, it is desirable to look
+ * at the old xpr buffer. Assuming xprbootstrap allocates
+ * the buffer in the same place in physical memory and
+ * the reboot doesn't clear memory, this should work.
+ * xprptr will be reset, but the saved value should be OK.
+ * Just set xprenable false so the buffer isn't overwritten.
+ */
+
+decl_simple_lock_data(, xprlock)
+
+boolean_t xprenable = TRUE; /* Enable xpr tracing */
+int nxprbufs = 0; /* Number of contiguous xprbufs allocated */
+int xprflags = 0; /* Bit mask of xpr flags enabled */
+struct xprbuf *xprbase; /* Pointer to circular buffer nxprbufs*sizeof(xprbuf)*/
+struct xprbuf *xprptr; /* Currently allocated xprbuf */
+struct xprbuf *xprlast; /* Pointer to end of circular buffer */
+
+/*VARARGS1*/
+void xpr(msg, arg1, arg2, arg3, arg4, arg5)
+char *msg;
+int arg1, arg2, arg3, arg4, arg5;
+{
+ register spl_t s;
+ register struct xprbuf *x;
+
+ /* If we aren't initialized, ignore trace request */
+ if (!xprenable || (xprptr == 0))
+ return;
+ /* Guard against all interrupts and allocate next buffer. */
+ s = splhigh();
+ simple_lock(&xprlock);
+ x = xprptr++;
+ if (xprptr >= xprlast) {
+ /* wrap around */
+ xprptr = xprbase;
+ }
+ /* Save xprptr in allocated memory. */
+ *(struct xprbuf **)xprlast = xprptr;
+ simple_unlock(&xprlock);
+ splx(s);
+ x->msg = msg;
+ x->arg1 = arg1;
+ x->arg2 = arg2;
+ x->arg3 = arg3;
+ x->arg4 = arg4;
+ x->arg5 = arg5;
+ x->timestamp = XPR_TIMESTAMP;
+ x->cpuinfo = cpu_number();
+}
+
+void xprbootstrap()
+{
+ vm_offset_t addr;
+ vm_size_t size;
+ kern_return_t kr;
+
+ simple_lock_init(&xprlock);
+ if (nxprbufs == 0)
+ return; /* assume XPR support not desired */
+
+ /* leave room at the end for a saved copy of xprptr */
+ size = nxprbufs * sizeof(struct xprbuf) + sizeof xprptr;
+
+ kr = kmem_alloc_wired(kernel_map, &addr, size);
+ if (kr != KERN_SUCCESS)
+ panic("xprbootstrap");
+
+ if (xprenable) {
+ /*
+ * If xprenable is set (the default) then we zero
+ * the buffer so xpr_dump doesn't encounter bad pointers.
+ * If xprenable isn't set, then we preserve
+ * the original contents of the buffer. This is useful
+ * if memory survives reboots, so xpr_dump can show
+ * the previous buffer contents.
+ */
+
+ bzero((char *) addr, size);
+ }
+
+ xprbase = (struct xprbuf *) addr;
+ xprlast = &xprbase[nxprbufs];
+ xprptr = xprbase; /* setting xprptr enables tracing */
+}
+
+int xprinitial = 0;
+
+void xprinit()
+{
+ xprflags |= xprinitial;
+}
+
+#if MACH_KDB
+#include <machine/setjmp.h>
+
+
+extern void db_printf();
+extern jmp_buf_t *db_recover;
+
+/*
+ * Print current content of xpr buffers (KDB's sake)
+ * Use stack order to make it understandable.
+ *
+ * Called as "!xpr_dump" this dumps the kernel's xpr buffer.
+ * Called with arguments, it can dump xpr buffers in user tasks,
+ * assuming they use the same format as the kernel.
+ */
+void xpr_dump(base, nbufs)
+ struct xprbuf *base;
+ int nbufs;
+{
+ jmp_buf_t db_jmpbuf;
+ jmp_buf_t *prev;
+ struct xprbuf *last, *ptr;
+ register struct xprbuf *x;
+ int i;
+ spl_t s;
+
+ if (base == 0) {
+ base = xprbase;
+ nbufs = nxprbufs;
+ }
+
+ if (nbufs == 0)
+ return;
+
+ if (base == xprbase) {
+ s = splhigh();
+ simple_lock(&xprlock);
+ }
+
+ last = base + nbufs;
+ ptr = * (struct xprbuf **) last;
+
+ prev = db_recover;
+ if (_setjmp(db_recover = &db_jmpbuf) == 0)
+ for (x = ptr, i = 0; i < nbufs; i++) {
+ if (--x < base)
+ x = last - 1;
+
+ if (x->msg == 0)
+ break;
+
+ db_printf("<%d:%x:%x> ", x - base, x->cpuinfo, x->timestamp);
+ db_printf(x->msg, x->arg1,x->arg2,x->arg3,x->arg4,x->arg5);
+ }
+ db_recover = prev;
+
+ if (base == xprbase) {
+ simple_unlock(&xprlock);
+ (void) splx(s);
+ }
+}
+#endif MACH_KDB
diff --git a/kern/xpr.h b/kern/xpr.h
new file mode 100644
index 00000000..5a95555f
--- /dev/null
+++ b/kern/xpr.h
@@ -0,0 +1,101 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Include file for xpr circular buffer silent tracing.
+ *
+ */
+/*
+ * If the kernel flag XPRDEBUG is set, the XPR macro is enabled. The
+ * macro should be invoked something like the following:
+ * XPR(XPR_SYSCALLS, ("syscall: %d, 0x%x\n", syscallno, arg1);
+ * which will expand into the following code:
+ * if (xprflags & XPR_SYSCALLS)
+ * xpr("syscall: %d, 0x%x\n", syscallno, arg1);
+ * Xpr will log the pointer to the printf string and up to 6 arguements,
+ * along with a timestamp and cpuinfo (for multi-processor systems), into
+ * a circular buffer. The actual printf processing is delayed until after
+ * the buffer has been collected. It is assumed that the text/data segments
+ * of the kernel can easily be reconstructed in a post-processor which
+ * performs the printf processing.
+ *
+ * If the XPRDEBUG compilation switch is not set, the XPR macro expands
+ * to nothing.
+ */
+
+#ifndef _KERN_XPR_H_
+#define _KERN_XPR_H_
+
+#ifdef KERNEL
+#include <xpr_debug.h>
+#else KERNEL
+#include <sys/features.h>
+#endif KERNEL
+
+#include <machine/xpr.h>
+
+#if XPR_DEBUG
+
+#define XPR(flags,xprargs) if(xprflags&flags) xpr xprargs
+
+extern int xprflags;
+/*
+ * flags for message types.
+ */
+#define XPR_SYSCALLS 0x00000001
+#define XPR_TRAPS 0x00000002
+#define XPR_SCHED 0x00000004
+#define XPR_NPTCP 0x00000008
+#define XPR_NP 0x00000010
+#define XPR_TCP 0x00000020
+
+#define XPR_VM_OBJECT (1 << 8)
+#define XPR_VM_OBJECT_CACHE (1 << 9)
+#define XPR_VM_PAGE (1 << 10)
+#define XPR_VM_PAGEOUT (1 << 11)
+#define XPR_MEMORY_OBJECT (1 << 12)
+#define XPR_VM_FAULT (1 << 13)
+#define XPR_INODE_PAGER (1 << 14)
+#define XPR_INODE_PAGER_DATA (1 << 15)
+
+#else XPR_DEBUG
+#define XPR(flags,xprargs)
+#endif XPR_DEBUG
+
+struct xprbuf {
+ char *msg;
+ int arg1,arg2,arg3,arg4,arg5;
+ int timestamp;
+ int cpuinfo;
+};
+
+#ifndef WANT_PROTOTYPES
+extern void xpr();
+#endif
+extern void xpr_dump();
+extern void xprinit();
+extern void xprbootstrap();
+
+#endif _KERN_XPR_H_
diff --git a/kern/zalloc.c b/kern/zalloc.c
new file mode 100644
index 00000000..a6421cd0
--- /dev/null
+++ b/kern/zalloc.c
@@ -0,0 +1,971 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern/zalloc.c
+ * Author: Avadis Tevanian, Jr.
+ *
+ * Zone-based memory allocator. A zone is a collection of fixed size
+ * data blocks for which quick allocation/deallocation is possible.
+ */
+
+#include <kern/macro_help.h>
+#include <kern/sched.h>
+#include <kern/time_out.h>
+#include <kern/zalloc.h>
+#include <mach/vm_param.h>
+#include <vm/vm_kern.h>
+#include <machine/machspl.h>
+
+#include <mach_debug.h>
+#if MACH_DEBUG
+#include <mach/kern_return.h>
+#include <mach/machine/vm_types.h>
+#include <mach_debug/zone_info.h>
+#include <kern/host.h>
+#include <vm/vm_map.h>
+#include <vm/vm_user.h>
+#include <vm/vm_kern.h>
+#endif
+
+#define ADD_TO_ZONE(zone, element) \
+MACRO_BEGIN \
+ *((vm_offset_t *)(element)) = (zone)->free_elements; \
+ (zone)->free_elements = (vm_offset_t) (element); \
+ zone_count_down(zone); \
+MACRO_END
+
+#define REMOVE_FROM_ZONE(zone, ret, type) \
+MACRO_BEGIN \
+ (ret) = (type) (zone)->free_elements; \
+ if ((ret) != (type) 0) { \
+ zone_count_up(zone); \
+ (zone)->free_elements = *((vm_offset_t *)(ret)); \
+ } \
+MACRO_END
+
+/*
+ * Support for garbage collection of unused zone pages:
+ */
+
+struct zone_page_table_entry {
+ struct zone_page_table_entry *next;
+ short in_free_list;
+ short alloc_count;
+};
+
+extern struct zone_page_table_entry * zone_page_table;
+extern vm_offset_t zone_map_min_address;
+
+#define lock_zone_page_table() simple_lock(&zone_page_table_lock)
+#define unlock_zone_page_table() simple_unlock(&zone_page_table_lock)
+
+#define zone_page(addr) \
+ (&(zone_page_table[(atop(((vm_offset_t)addr) - zone_map_min_address))]))
+
+
+extern void zone_page_alloc();
+extern void zone_page_dealloc();
+extern void zone_page_in_use();
+extern void zone_page_free();
+
+zone_t zone_zone; /* this is the zone containing other zones */
+
+boolean_t zone_ignore_overflow = TRUE;
+
+vm_map_t zone_map = VM_MAP_NULL;
+vm_size_t zone_map_size = 12 * 1024 * 1024;
+
+/*
+ * The VM system gives us an initial chunk of memory.
+ * It has to be big enough to allocate the zone_zone
+ * and some initial kernel data structures, like kernel maps.
+ * It is advantageous to make it bigger than really necessary,
+ * because this memory is more efficient than normal kernel
+ * virtual memory. (It doesn't have vm_page structures backing it
+ * and it may have other machine-dependent advantages.)
+ * So for best performance, zdata_size should approximate
+ * the amount of memory you expect the zone system to consume.
+ */
+
+vm_offset_t zdata;
+vm_size_t zdata_size = 420 * 1024;
+
+#define zone_lock(zone) \
+MACRO_BEGIN \
+ if (zone->type & ZONE_PAGEABLE) { \
+ lock_write(&zone->complex_lock); \
+ } else { \
+ simple_lock(&zone->lock); \
+ } \
+MACRO_END
+
+#define zone_unlock(zone) \
+MACRO_BEGIN \
+ if (zone->type & ZONE_PAGEABLE) { \
+ lock_done(&zone->complex_lock); \
+ } else { \
+ simple_unlock(&zone->lock); \
+ } \
+MACRO_END
+
+#define zone_lock_init(zone) \
+MACRO_BEGIN \
+ if (zone->type & ZONE_PAGEABLE) { \
+ lock_init(&zone->complex_lock, TRUE); \
+ } else { \
+ simple_lock_init(&zone->lock); \
+ } \
+MACRO_END
+
+static vm_offset_t zget_space();
+
+decl_simple_lock_data(,zget_space_lock)
+vm_offset_t zalloc_next_space;
+vm_offset_t zalloc_end_of_space;
+vm_size_t zalloc_wasted_space;
+
+/*
+ * Garbage collection map information
+ */
+decl_simple_lock_data(,zone_page_table_lock)
+struct zone_page_table_entry * zone_page_table;
+vm_offset_t zone_map_min_address;
+vm_offset_t zone_map_max_address;
+int zone_pages;
+
+extern void zone_page_init();
+
+#define ZONE_PAGE_USED 0
+#define ZONE_PAGE_UNUSED -1
+
+
+/*
+ * Protects first_zone, last_zone, num_zones,
+ * and the next_zone field of zones.
+ */
+decl_simple_lock_data(,all_zones_lock)
+zone_t first_zone;
+zone_t *last_zone;
+int num_zones;
+
+/*
+ * zinit initializes a new zone. The zone data structures themselves
+ * are stored in a zone, which is initially a static structure that
+ * is initialized by zone_init.
+ */
+zone_t zinit(size, max, alloc, memtype, name)
+ vm_size_t size; /* the size of an element */
+ vm_size_t max; /* maximum memory to use */
+ vm_size_t alloc; /* allocation size */
+ unsigned int memtype; /* flags specifying type of memory */
+ char *name; /* a name for the zone */
+{
+ register zone_t z;
+
+ if (zone_zone == ZONE_NULL)
+ z = (zone_t) zget_space(sizeof(struct zone));
+ else
+ z = (zone_t) zalloc(zone_zone);
+ if (z == ZONE_NULL)
+ panic("zinit");
+
+ if (alloc == 0)
+ alloc = PAGE_SIZE;
+
+ if (size == 0)
+ size = sizeof(z->free_elements);
+ /*
+ * Round off all the parameters appropriately.
+ */
+
+ if ((max = round_page(max)) < (alloc = round_page(alloc)))
+ max = alloc;
+
+ z->free_elements = 0;
+ z->cur_size = 0;
+ z->max_size = max;
+ z->elem_size = ((size-1) + sizeof(z->free_elements)) -
+ ((size-1) % sizeof(z->free_elements));
+
+ z->alloc_size = alloc;
+ z->type = memtype;
+ z->zone_name = name;
+#ifdef ZONE_COUNT
+ z->count = 0;
+#endif
+ z->doing_alloc = FALSE;
+ zone_lock_init(z);
+
+ /*
+ * Add the zone to the all-zones list.
+ */
+
+ z->next_zone = ZONE_NULL;
+ simple_lock(&all_zones_lock);
+ *last_zone = z;
+ last_zone = &z->next_zone;
+ num_zones++;
+ simple_unlock(&all_zones_lock);
+
+ return(z);
+}
+
+/*
+ * Cram the given memory into the specified zone.
+ */
+void zcram(zone_t zone, vm_offset_t newmem, vm_size_t size)
+{
+ register vm_size_t elem_size;
+
+ if (newmem == (vm_offset_t) 0) {
+ panic("zcram - memory at zero");
+ }
+ elem_size = zone->elem_size;
+
+ zone_lock(zone);
+ while (size >= elem_size) {
+ ADD_TO_ZONE(zone, newmem);
+ zone_page_alloc(newmem, elem_size);
+ zone_count_up(zone); /* compensate for ADD_TO_ZONE */
+ size -= elem_size;
+ newmem += elem_size;
+ zone->cur_size += elem_size;
+ }
+ zone_unlock(zone);
+}
+
+/*
+ * Contiguous space allocator for non-paged zones. Allocates "size" amount
+ * of memory from zone_map.
+ */
+
+static vm_offset_t zget_space(vm_offset_t size)
+{
+ vm_offset_t new_space = 0;
+ vm_offset_t result;
+ vm_size_t space_to_add = 0; /*'=0' to quiet gcc warnings */
+
+ simple_lock(&zget_space_lock);
+ while ((zalloc_next_space + size) > zalloc_end_of_space) {
+ /*
+ * Add at least one page to allocation area.
+ */
+
+ space_to_add = round_page(size);
+
+ if (new_space == 0) {
+ /*
+ * Memory cannot be wired down while holding
+ * any locks that the pageout daemon might
+ * need to free up pages. [Making the zget_space
+ * lock a complex lock does not help in this
+ * regard.]
+ *
+ * Unlock and allocate memory. Because several
+ * threads might try to do this at once, don't
+ * use the memory before checking for available
+ * space again.
+ */
+
+ simple_unlock(&zget_space_lock);
+
+ if (kmem_alloc_wired(zone_map,
+ &new_space, space_to_add)
+ != KERN_SUCCESS)
+ return(0);
+ zone_page_init(new_space, space_to_add,
+ ZONE_PAGE_USED);
+ simple_lock(&zget_space_lock);
+ continue;
+ }
+
+
+ /*
+ * Memory was allocated in a previous iteration.
+ *
+ * Check whether the new region is contiguous
+ * with the old one.
+ */
+
+ if (new_space != zalloc_end_of_space) {
+ /*
+ * Throw away the remainder of the
+ * old space, and start a new one.
+ */
+ zalloc_wasted_space +=
+ zalloc_end_of_space - zalloc_next_space;
+ zalloc_next_space = new_space;
+ }
+
+ zalloc_end_of_space = new_space + space_to_add;
+
+ new_space = 0;
+ }
+ result = zalloc_next_space;
+ zalloc_next_space += size;
+ simple_unlock(&zget_space_lock);
+
+ if (new_space != 0)
+ kmem_free(zone_map, new_space, space_to_add);
+
+ return(result);
+}
+
+
+/*
+ * Initialize the "zone of zones" which uses fixed memory allocated
+ * earlier in memory initialization. zone_bootstrap is called
+ * before zone_init.
+ */
+void zone_bootstrap()
+{
+ simple_lock_init(&all_zones_lock);
+ first_zone = ZONE_NULL;
+ last_zone = &first_zone;
+ num_zones = 0;
+
+ simple_lock_init(&zget_space_lock);
+ zalloc_next_space = zdata;
+ zalloc_end_of_space = zdata + zdata_size;
+ zalloc_wasted_space = 0;
+
+ zone_zone = ZONE_NULL;
+ zone_zone = zinit(sizeof(struct zone), 128 * sizeof(struct zone),
+ sizeof(struct zone), 0, "zones");
+}
+
+void zone_init()
+{
+ vm_offset_t zone_min;
+ vm_offset_t zone_max;
+
+ vm_size_t zone_table_size;
+
+ zone_map = kmem_suballoc(kernel_map, &zone_min, &zone_max,
+ zone_map_size, FALSE);
+
+ /*
+ * Setup garbage collection information:
+ */
+
+ zone_table_size = atop(zone_max - zone_min) *
+ sizeof(struct zone_page_table_entry);
+ if (kmem_alloc_wired(zone_map, (vm_offset_t *) &zone_page_table,
+ zone_table_size) != KERN_SUCCESS)
+ panic("zone_init");
+ zone_min = (vm_offset_t)zone_page_table + round_page(zone_table_size);
+ zone_pages = atop(zone_max - zone_min);
+ zone_map_min_address = zone_min;
+ zone_map_max_address = zone_max;
+ simple_lock_init(&zone_page_table_lock);
+ zone_page_init(zone_min, zone_max - zone_min, ZONE_PAGE_UNUSED);
+}
+
+
+/*
+ * zalloc returns an element from the specified zone.
+ */
+vm_offset_t zalloc(zone_t zone)
+{
+ vm_offset_t addr;
+
+ if (zone == ZONE_NULL)
+ panic ("zalloc: null zone");
+
+ check_simple_locks();
+
+ zone_lock(zone);
+ REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
+ while (addr == 0) {
+ /*
+ * If nothing was there, try to get more
+ */
+ if (zone->doing_alloc) {
+ /*
+ * Someone is allocating memory for this zone.
+ * Wait for it to show up, then try again.
+ */
+ assert_wait((event_t)&zone->doing_alloc, TRUE);
+ /* XXX say wakeup needed */
+ zone_unlock(zone);
+ thread_block((void (*)()) 0);
+ zone_lock(zone);
+ }
+ else {
+ if ((zone->cur_size + (zone->type & ZONE_PAGEABLE ?
+ zone->alloc_size : zone->elem_size)) >
+ zone->max_size) {
+ if (zone->type & ZONE_EXHAUSTIBLE)
+ break;
+ /*
+ * Printf calls logwakeup, which calls
+ * select_wakeup which will do a zfree
+ * (which tries to take the select_zone
+ * lock... Hang. Release the lock now
+ * so it can be taken again later.
+ * NOTE: this used to be specific to
+ * the select_zone, but for
+ * cleanliness, we just unlock all
+ * zones before this.
+ */
+ if (!(zone->type & ZONE_FIXED)) {
+ /*
+ * We're willing to overflow certain
+ * zones, but not without complaining.
+ *
+ * This is best used in conjunction
+ * with the collecatable flag. What we
+ * want is an assurance we can get the
+ * memory back, assuming there's no
+ * leak.
+ */
+ zone->max_size += (zone->max_size >> 1);
+ } else if (!zone_ignore_overflow) {
+ zone_unlock(zone);
+ printf("zone \"%s\" empty.\n",
+ zone->zone_name);
+ panic("zalloc");
+ }
+ }
+
+ if (zone->type & ZONE_PAGEABLE)
+ zone->doing_alloc = TRUE;
+ zone_unlock(zone);
+
+ if (zone->type & ZONE_PAGEABLE) {
+ if (kmem_alloc_pageable(zone_map, &addr,
+ zone->alloc_size)
+ != KERN_SUCCESS)
+ panic("zalloc");
+ zcram(zone, addr, zone->alloc_size);
+ zone_lock(zone);
+ zone->doing_alloc = FALSE;
+ /* XXX check before doing this */
+ thread_wakeup((event_t)&zone->doing_alloc);
+
+ REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
+ } else if (zone->type & ZONE_COLLECTABLE) {
+ if (kmem_alloc_wired(zone_map,
+ &addr, zone->alloc_size)
+ != KERN_SUCCESS)
+ panic("zalloc");
+ zone_page_init(addr, zone->alloc_size,
+ ZONE_PAGE_USED);
+ zcram(zone, addr, zone->alloc_size);
+ zone_lock(zone);
+ REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
+ } else {
+ addr = zget_space(zone->elem_size);
+ if (addr == 0)
+ panic("zalloc");
+
+ zone_lock(zone);
+ zone_count_up(zone);
+ zone->cur_size += zone->elem_size;
+ zone_unlock(zone);
+ zone_page_alloc(addr, zone->elem_size);
+ return(addr);
+ }
+ }
+ }
+
+ zone_unlock(zone);
+ return(addr);
+}
+
+
+/*
+ * zget returns an element from the specified zone
+ * and immediately returns nothing if there is nothing there.
+ *
+ * This form should be used when you can not block (like when
+ * processing an interrupt).
+ */
+vm_offset_t zget(zone_t zone)
+{
+ register vm_offset_t addr;
+
+ if (zone == ZONE_NULL)
+ panic ("zalloc: null zone");
+
+ zone_lock(zone);
+ REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
+ zone_unlock(zone);
+
+ return(addr);
+}
+
+boolean_t zone_check = FALSE;
+
+void zfree(zone_t zone, vm_offset_t elem)
+{
+ zone_lock(zone);
+ if (zone_check) {
+ vm_offset_t this;
+
+ /* check the zone's consistency */
+
+ for (this = zone->free_elements;
+ this != 0;
+ this = * (vm_offset_t *) this)
+ if (this == elem)
+ panic("zfree");
+ }
+ ADD_TO_ZONE(zone, elem);
+ zone_unlock(zone);
+}
+
+/*
+ * Zone garbage collection subroutines
+ *
+ * These routines have in common the modification of entries in the
+ * zone_page_table. The latter contains one entry for every page
+ * in the zone_map.
+ *
+ * For each page table entry in the given range:
+ *
+ * zone_page_in_use - decrements in_free_list
+ * zone_page_free - increments in_free_list
+ * zone_page_init - initializes in_free_list and alloc_count
+ * zone_page_alloc - increments alloc_count
+ * zone_page_dealloc - decrements alloc_count
+ * zone_add_free_page_list - adds the page to the free list
+ *
+ * Two counts are maintained for each page, the in_free_list count and
+ * alloc_count. The alloc_count is how many zone elements have been
+ * allocated from a page. (Note that the page could contain elements
+ * that span page boundaries. The count includes these elements so
+ * one element may be counted in two pages.) In_free_list is a count
+ * of how many zone elements are currently free. If in_free_list is
+ * equal to alloc_count then the page is eligible for garbage
+ * collection.
+ *
+ * Alloc_count and in_free_list are initialized to the correct values
+ * for a particular zone when a page is zcram'ed into a zone. Subsequent
+ * gets and frees of zone elements will call zone_page_in_use and
+ * zone_page_free which modify the in_free_list count. When the zones
+ * garbage collector runs it will walk through a zones free element list,
+ * remove the elements that reside on collectable pages, and use
+ * zone_add_free_page_list to create a list of pages to be collected.
+ */
+
+void zone_page_in_use(addr, size)
+vm_offset_t addr;
+vm_size_t size;
+{
+ int i, j;
+ if ((addr < zone_map_min_address) ||
+ (addr+size > zone_map_max_address)) return;
+ i = atop(addr-zone_map_min_address);
+ j = atop((addr+size-1) - zone_map_min_address);
+ lock_zone_page_table();
+ for (; i <= j; i++) {
+ zone_page_table[i].in_free_list--;
+ }
+ unlock_zone_page_table();
+}
+
+void zone_page_free(addr, size)
+vm_offset_t addr;
+vm_size_t size;
+{
+ int i, j;
+ if ((addr < zone_map_min_address) ||
+ (addr+size > zone_map_max_address)) return;
+ i = atop(addr-zone_map_min_address);
+ j = atop((addr+size-1) - zone_map_min_address);
+ lock_zone_page_table();
+ for (; i <= j; i++) {
+ /* Set in_free_list to (ZONE_PAGE_USED + 1) if
+ * it was previously set to ZONE_PAGE_UNUSED.
+ */
+ if (zone_page_table[i].in_free_list == ZONE_PAGE_UNUSED) {
+ zone_page_table[i].in_free_list = 1;
+ } else {
+ zone_page_table[i].in_free_list++;
+ }
+ }
+ unlock_zone_page_table();
+}
+
+void zone_page_init(addr, size, value)
+
+vm_offset_t addr;
+vm_size_t size;
+int value;
+{
+ int i, j;
+ if ((addr < zone_map_min_address) ||
+ (addr+size > zone_map_max_address)) return;
+ i = atop(addr-zone_map_min_address);
+ j = atop((addr+size-1) - zone_map_min_address);
+ lock_zone_page_table();
+ for (; i <= j; i++) {
+ zone_page_table[i].alloc_count = value;
+ zone_page_table[i].in_free_list = 0;
+ }
+ unlock_zone_page_table();
+}
+
+void zone_page_alloc(addr, size)
+vm_offset_t addr;
+vm_size_t size;
+{
+ int i, j;
+ if ((addr < zone_map_min_address) ||
+ (addr+size > zone_map_max_address)) return;
+ i = atop(addr-zone_map_min_address);
+ j = atop((addr+size-1) - zone_map_min_address);
+ lock_zone_page_table();
+ for (; i <= j; i++) {
+ /* Set alloc_count to (ZONE_PAGE_USED + 1) if
+ * it was previously set to ZONE_PAGE_UNUSED.
+ */
+ if (zone_page_table[i].alloc_count == ZONE_PAGE_UNUSED) {
+ zone_page_table[i].alloc_count = 1;
+ } else {
+ zone_page_table[i].alloc_count++;
+ }
+ }
+ unlock_zone_page_table();
+}
+
+void zone_page_dealloc(addr, size)
+vm_offset_t addr;
+vm_size_t size;
+{
+ int i, j;
+ if ((addr < zone_map_min_address) ||
+ (addr+size > zone_map_max_address)) return;
+ i = atop(addr-zone_map_min_address);
+ j = atop((addr+size-1) - zone_map_min_address);
+ lock_zone_page_table();
+ for (; i <= j; i++) {
+ zone_page_table[i].alloc_count--;
+ }
+ unlock_zone_page_table();
+}
+
+void
+zone_add_free_page_list(free_list, addr, size)
+ struct zone_page_table_entry **free_list;
+ vm_offset_t addr;
+ vm_size_t size;
+{
+ int i, j;
+ if ((addr < zone_map_min_address) ||
+ (addr+size > zone_map_max_address)) return;
+ i = atop(addr-zone_map_min_address);
+ j = atop((addr+size-1) - zone_map_min_address);
+ lock_zone_page_table();
+ for (; i <= j; i++) {
+ if (zone_page_table[i].alloc_count == 0) {
+ zone_page_table[i].next = *free_list;
+ *free_list = &zone_page_table[i];
+ zone_page_table[i].alloc_count = ZONE_PAGE_UNUSED;
+ zone_page_table[i].in_free_list = 0;
+ }
+ }
+ unlock_zone_page_table();
+}
+
+
+/* This is used for walking through a zone's free element list.
+ */
+struct zone_free_entry {
+ struct zone_free_entry * next;
+};
+
+
+/* Zone garbage collection
+ *
+ * zone_gc will walk through all the free elements in all the
+ * zones that are marked collectable looking for reclaimable
+ * pages. zone_gc is called by consider_zone_gc when the system
+ * begins to run out of memory.
+ */
+static void zone_gc()
+{
+ int max_zones;
+ zone_t z;
+ int i;
+ register spl_t s;
+ struct zone_page_table_entry *freep;
+ struct zone_page_table_entry *zone_free_page_list;
+
+ simple_lock(&all_zones_lock);
+ max_zones = num_zones;
+ z = first_zone;
+ simple_unlock(&all_zones_lock);
+
+ zone_free_page_list = (struct zone_page_table_entry *) 0;
+
+ for (i = 0; i < max_zones; i++) {
+ struct zone_free_entry * last;
+ struct zone_free_entry * elt;
+ assert(z != ZONE_NULL);
+ /* run this at splhigh so that interupt routines that use zones
+ can not interupt while their zone is locked */
+ s=splhigh();
+ zone_lock(z);
+
+ if ((z->type & (ZONE_PAGEABLE|ZONE_COLLECTABLE)) == ZONE_COLLECTABLE) {
+
+ /* Count the free elements in each page. This loop
+ * requires that all in_free_list entries are zero.
+ */
+ elt = (struct zone_free_entry *)(z->free_elements);
+ while ((elt != (struct zone_free_entry *)0)) {
+ zone_page_free((vm_offset_t)elt, z->elem_size);
+ elt = elt->next;
+ }
+
+ /* Now determine which elements should be removed
+ * from the free list and, after all the elements
+ * on a page have been removed, add the element's
+ * page to a list of pages to be freed.
+ */
+ elt = (struct zone_free_entry *)(z->free_elements);
+ last = elt;
+ while ((elt != (struct zone_free_entry *)0)) {
+ if (((vm_offset_t)elt>=zone_map_min_address)&&
+ ((vm_offset_t)elt<=zone_map_max_address)&&
+ (zone_page(elt)->in_free_list ==
+ zone_page(elt)->alloc_count)) {
+
+ z->cur_size -= z->elem_size;
+ zone_page_in_use((vm_offset_t)elt, z->elem_size);
+ zone_page_dealloc((vm_offset_t)elt, z->elem_size);
+ if (zone_page(elt)->alloc_count == 0 ||
+ zone_page(elt+(z->elem_size-1))->alloc_count==0) {
+ zone_add_free_page_list(
+ &zone_free_page_list,
+ (vm_offset_t)elt, z->elem_size);
+ }
+
+
+ if (elt == last) {
+ elt = elt->next;
+ z->free_elements =(vm_offset_t)elt;
+ last = elt;
+ } else {
+ last->next = elt->next;
+ elt = elt->next;
+ }
+ } else {
+ /* This element is not eligible for collection
+ * so clear in_free_list in preparation for a
+ * subsequent garbage collection pass.
+ */
+ if (((vm_offset_t)elt>=zone_map_min_address)&&
+ ((vm_offset_t)elt<=zone_map_max_address)) {
+ zone_page(elt)->in_free_list = 0;
+ }
+ last = elt;
+ elt = elt->next;
+ }
+ }
+ }
+ zone_unlock(z);
+ splx(s);
+ simple_lock(&all_zones_lock);
+ z = z->next_zone;
+ simple_unlock(&all_zones_lock);
+ }
+
+ for (freep = zone_free_page_list; freep != 0; freep = freep->next) {
+ vm_offset_t free_addr;
+
+ free_addr = zone_map_min_address +
+ PAGE_SIZE * (freep - zone_page_table);
+ kmem_free(zone_map, free_addr, PAGE_SIZE);
+ }
+}
+
+boolean_t zone_gc_allowed = TRUE;
+unsigned zone_gc_last_tick = 0;
+unsigned zone_gc_max_rate = 0; /* in ticks */
+
+/*
+ * consider_zone_gc:
+ *
+ * Called by the pageout daemon when the system needs more free pages.
+ */
+
+void
+consider_zone_gc()
+{
+ /*
+ * By default, don't attempt zone GC more frequently
+ * than once a second.
+ */
+
+ if (zone_gc_max_rate == 0)
+ zone_gc_max_rate = hz;
+
+ if (zone_gc_allowed &&
+ (sched_tick > (zone_gc_last_tick + zone_gc_max_rate))) {
+ zone_gc_last_tick = sched_tick;
+ zone_gc();
+ }
+}
+
+#if MACH_DEBUG
+kern_return_t host_zone_info(host, namesp, namesCntp, infop, infoCntp)
+ host_t host;
+ zone_name_array_t *namesp;
+ unsigned int *namesCntp;
+ zone_info_array_t *infop;
+ unsigned int *infoCntp;
+{
+ zone_name_t *names;
+ vm_offset_t names_addr;
+ vm_size_t names_size = 0; /*'=0' to quiet gcc warnings */
+ zone_info_t *info;
+ vm_offset_t info_addr;
+ vm_size_t info_size = 0; /*'=0' to quiet gcc warnings */
+ unsigned int max_zones, i;
+ zone_t z;
+ kern_return_t kr;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ /*
+ * We assume that zones aren't freed once allocated.
+ * We won't pick up any zones that are allocated later.
+ */
+
+ simple_lock(&all_zones_lock);
+ max_zones = num_zones;
+ z = first_zone;
+ simple_unlock(&all_zones_lock);
+
+ if (max_zones <= *namesCntp) {
+ /* use in-line memory */
+
+ names = *namesp;
+ } else {
+ names_size = round_page(max_zones * sizeof *names);
+ kr = kmem_alloc_pageable(ipc_kernel_map,
+ &names_addr, names_size);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ names = (zone_name_t *) names_addr;
+ }
+
+ if (max_zones <= *infoCntp) {
+ /* use in-line memory */
+
+ info = *infop;
+ } else {
+ info_size = round_page(max_zones * sizeof *info);
+ kr = kmem_alloc_pageable(ipc_kernel_map,
+ &info_addr, info_size);
+ if (kr != KERN_SUCCESS) {
+ if (names != *namesp)
+ kmem_free(ipc_kernel_map,
+ names_addr, names_size);
+ return kr;
+ }
+
+ info = (zone_info_t *) info_addr;
+ }
+
+ for (i = 0; i < max_zones; i++) {
+ zone_name_t *zn = &names[i];
+ zone_info_t *zi = &info[i];
+ struct zone zcopy;
+
+ assert(z != ZONE_NULL);
+
+ zone_lock(z);
+ zcopy = *z;
+ zone_unlock(z);
+
+ simple_lock(&all_zones_lock);
+ z = z->next_zone;
+ simple_unlock(&all_zones_lock);
+
+ /* assuming here the name data is static */
+ (void) strncpy(zn->zn_name, zcopy.zone_name,
+ sizeof zn->zn_name);
+
+#ifdef ZONE_COUNT
+ zi->zi_count = zcopy.count;
+#else
+ zi->zi_count = 0;
+#endif
+ zi->zi_cur_size = zcopy.cur_size;
+ zi->zi_max_size = zcopy.max_size;
+ zi->zi_elem_size = zcopy.elem_size;
+ zi->zi_alloc_size = zcopy.alloc_size;
+ zi->zi_pageable = (zcopy.type & ZONE_PAGEABLE) != 0;
+ zi->zi_exhaustible = (zcopy.type & ZONE_EXHAUSTIBLE) != 0;
+ zi->zi_collectable = (zcopy.type & ZONE_COLLECTABLE) != 0;
+ }
+
+ if (names != *namesp) {
+ vm_size_t used;
+ vm_map_copy_t copy;
+
+ used = max_zones * sizeof *names;
+
+ if (used != names_size)
+ bzero((char *) (names_addr + used), names_size - used);
+
+ kr = vm_map_copyin(ipc_kernel_map, names_addr, names_size,
+ TRUE, &copy);
+ assert(kr == KERN_SUCCESS);
+
+ *namesp = (zone_name_t *) copy;
+ }
+ *namesCntp = max_zones;
+
+ if (info != *infop) {
+ vm_size_t used;
+ vm_map_copy_t copy;
+
+ used = max_zones * sizeof *info;
+
+ if (used != info_size)
+ bzero((char *) (info_addr + used), info_size - used);
+
+ kr = vm_map_copyin(ipc_kernel_map, info_addr, info_size,
+ TRUE, &copy);
+ assert(kr == KERN_SUCCESS);
+
+ *infop = (zone_info_t *) copy;
+ }
+ *infoCntp = max_zones;
+
+ return KERN_SUCCESS;
+}
+#endif MACH_DEBUG
diff --git a/kern/zalloc.h b/kern/zalloc.h
new file mode 100644
index 00000000..2e9b4b38
--- /dev/null
+++ b/kern/zalloc.h
@@ -0,0 +1,135 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: zalloc.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ */
+
+#ifndef _KERN_ZALLOC_H_
+#define _KERN_ZALLOC_H_
+
+#include <mach/machine/vm_types.h>
+#include <kern/macro_help.h>
+#include <kern/lock.h>
+#include <kern/queue.h>
+#include <machine/zalloc.h>
+
+/*
+ * A zone is a collection of fixed size blocks for which there
+ * is fast allocation/deallocation access. Kernel routines can
+ * use zones to manage data structures dynamically, creating a zone
+ * for each type of data structure to be managed.
+ *
+ */
+
+struct zone {
+ decl_simple_lock_data(,lock) /* generic lock */
+#ifdef ZONE_COUNT
+ int count; /* Number of elements used now */
+#endif
+ vm_offset_t free_elements;
+ vm_size_t cur_size; /* current memory utilization */
+ vm_size_t max_size; /* how large can this zone grow */
+ vm_size_t elem_size; /* size of an element */
+ vm_size_t alloc_size; /* size used for more memory */
+ boolean_t doing_alloc; /* is zone expanding now? */
+ char *zone_name; /* a name for the zone */
+ unsigned int type; /* type of memory */
+ lock_data_t complex_lock; /* Lock for pageable zones */
+ struct zone *next_zone; /* Link for all-zones list */
+};
+typedef struct zone *zone_t;
+
+#define ZONE_NULL ((zone_t) 0)
+
+/* Exported to everyone */
+zone_t zinit(vm_size_t size, vm_size_t max, vm_size_t alloc,
+ unsigned int memtype, char *name);
+vm_offset_t zalloc(zone_t zone);
+vm_offset_t zget(zone_t zone);
+void zfree(zone_t zone, vm_offset_t elem);
+void zcram(zone_t zone, vm_offset_t newmem, vm_size_t size);
+
+/* Exported only to vm/vm_init.c */
+void zone_bootstrap();
+void zone_init();
+
+/* Exported only to vm/vm_pageout.c */
+void consider_zone_gc();
+
+
+/* Memory type bits for zones */
+#define ZONE_PAGEABLE 0x00000001
+#define ZONE_COLLECTABLE 0x00000002 /* Garbage-collect this zone when memory runs low */
+#define ZONE_EXHAUSTIBLE 0x00000004 /* zalloc() on this zone is allowed to fail */
+#define ZONE_FIXED 0x00000008 /* Panic if zone is exhausted (XXX) */
+
+/* Machine-dependent code can provide additional memory types. */
+#define ZONE_MACHINE_TYPES 0xffff0000
+
+
+#ifdef ZONE_COUNT
+#define zone_count_up(zone) ((zone)->count++)
+#define zone_count_down(zone) ((zone)->count--)
+#else
+#define zone_count_up(zone)
+#define zone_count_down(zone)
+#endif
+
+
+
+/* These quick inline versions only work for small, nonpageable zones (currently). */
+
+static __inline vm_offset_t ZALLOC(zone_t zone)
+{
+ simple_lock(&zone->lock);
+ if (zone->free_elements == 0) {
+ simple_unlock(&zone->lock);
+ return zalloc(zone);
+ } else {
+ vm_offset_t element = zone->free_elements;
+ zone->free_elements = *((vm_offset_t *)(element));
+ zone_count_up(zone);
+ simple_unlock(&zone->lock);
+ return element;
+ }
+}
+
+static __inline void ZFREE(zone_t zone, vm_offset_t element)
+{
+ *((vm_offset_t *)(element)) = zone->free_elements;
+ zone->free_elements = (vm_offset_t) (element);
+ zone_count_down(zone);
+}
+
+
+
+#endif _KERN_ZALLOC_H_
diff --git a/scsi/adapters/README b/scsi/adapters/README
new file mode 100644
index 00000000..1bc7e7ca
--- /dev/null
+++ b/scsi/adapters/README
@@ -0,0 +1,290 @@
+
+This directory contains various examples of HBA support code,
+among them:
+
+ Chip/Board File Machine tested By
+
+ NCR 53C94 scsi_53C94 DecStation 5000 af@cmu
+ DEC 7061 scsi_7061 DecStation 3100/2100 af@cmu
+ NCR 5380 scsi_5380 VaxStation 3100 af@cmu
+ Fujitsu 89352 scsi_89352 Omron Luna88k danner@cmu
+ Adaptec 1542B scsi_aha15 AT/PC af@cmu
+
+It should be trivial to modify them for some other machine that uses
+the same SCSI chips, hopefully by properly conditionalizing and macroizing
+the existing code.
+
+There are various rules and assumptions to keep in mind when designing/coding
+the support code for a new HBA, here is a list. Pls extend this with
+anything you find is not openly stated here and made your life miserable
+during the port, and send it back to CMU.
+
+
+AUTOCONF
+
+We assume the structures and procedures defined in chips/busses.*,
+e.g. someone will call configure_bus_master to get to our foo_probe()
+routine. This should make up its mind on how many targets it sees
+[see later for dynamic reconfig], allocate a descriptor for each
+one and leave the driver ready to accept commands (via foo_go()).
+
+ On raw chips you should use a test_unit_ready command,
+ selecting without ATN, and timing out on non-existant
+ devices. Use LUN 0.
+ On boards, there probably is a command to let the board do
+ it (see Adaptec), if not do as above.
+
+The typical autoconf descriptor might look like
+
+ caddr_t foo_std[NFOO] = { 0 };
+ struct bus_device *foo_dinfo[NFOO*8];
+ struct bus_ctlr *foo_minfo[NFOO];
+ struct bus_driver foo_driver =
+ { foo_probe, scsi_slave, scsi_attach, foo_go, foo_std, "rz",
+ foo_dinfo, "foo", foo_minfo, BUS_INTR_B4_PROBE};
+
+which indicates that foo_probe() and foo_go() are our interface functions,
+and we use the generic scsi_slave() and scsi_attach() for the rest.
+Their definition is
+
+ foo_probe(reg, ui)
+ vm_offset_t reg;
+ struct bus_ctlr *ui;
+
+[the "reg" argument might actually be something else on architectures that
+ do not use memory mapped I/O]
+
+ aha_go(tgt, cmd_count, in_count, cmd_only)
+ target_info_t *tgt;
+ boolean_t cmd_only;
+
+The foo_go() routine is fairly common across chips, look at any example to
+see how to structure it. Basically, the arguments tell us how much data
+to expect in either direction, and whether (cmd_only) we think we should
+be selecting with ATN (cmd_only==FALSE) or not. The only gotcha is cmd_count
+actually includes the size of any parameters.
+
+The "go" field of the scsi_softc structure describing your HBA should be
+set to your foo_go() routine, by the foo_probe() routine.
+
+DATA DEPENDENCIES
+
+The upper layer assumes that tgt->cmd_ptr is a pointer to good memory
+[e.g. no funny padding] where it places the scsi command control blocks
+AND small (less than 255 bytes) parameters. It also expects small results
+in there (things like read_capacity, for instance). I think I cleaned
+up all the places that used to assume tgt->cmd_ptr was aligned, but do not
+be surprised if I missed one.
+
+It does NOT use the dma_ptr, or any of the transient_state fields.
+
+WATCHDOG
+
+There is an optional MI watchdog facility, which can be used quite simply by
+filling in the "watchdog" field of the scsi_softc structure describing
+your HBA. To disable it, leave the field zero (or, dynamically, zero the
+timeout value). You can use a watchdog of your own if you like, or more
+likely set this field to point to the MI scsi_watchdog().
+This requires that your foo_softc descriptor starts off with a watchdog_t
+structure, with the "reset" field pointing to a function that will
+reset the SCSI bus should the watchdog expire.
+
+When a new SCSI command is initiated you should
+ if (foo->wd.nactive++ == 0)
+ foo->wd.watchdog_state = SCSI_WD_ACTIVE;
+to activate the watchdog, on each interrupt [or other signal that all
+is proceeding well for the command and it is making progress] you should
+ if (foo->wd.nactive)
+ foo->wd.watchdog_state = SCSI_WD_ACTIVE;
+bump down the watchdog 'trigger level', and when the command terminates
+ if (aha->wd.nactive-- == 1)
+ aha->wd.watchdog_state = SCSI_WD_INACTIVE;
+
+When you detect a SCSI bus reset (possibly we initiated it) you should
+ aha->wd.nactive = 0;
+and after cleaning up properly invoke
+ scsi_bus_was_reset(sc)
+ scsi_softc_t sc;
+
+The functiona that is invoked on watchdog expiry is
+ foo_reset_scsibus(foo)
+ register foo_softc_t foo;
+
+Note that this can be used for dumb chips that do not support select timeouts
+in hardware [see the 5380 or 7061 code], but its primary use is to detect
+instances where a target is holding on the SCSI bus way too long.
+
+The one drawback of resetting the bus is that some devices (like tapes)
+lose status in case of a reset, and the MI code does not (yet?) try to
+keep enough information around to be able to recover. If you want to
+add something very useful you might change the rz_tape.c code to do just
+that, e.g. on SCSI_RET_ABORTs wait a while for the tape to do whatever,
+then rewind, and seek forward where the tape should have been positioned at
+the beginning of the command that failed, then reissue the command.
+None of the examples so far tries to be 'smart' like making an attempt
+to get the bus unstuck without resetting it, send us ideas if you have
+some.
+
+
+DYNAMIC RECONFIG
+
+Your code should be ready to add/remove targets on the fly. To do so,
+notify the upper layer that a target went offline returning
+SCSI_RET_DEVICE_DOWN when e.g. the select timed out, and clear out
+the tgt->flags field.
+To find new devices, define a function
+
+ boolean_t
+ foo_probe_target(tgt, ior)
+ target_info_t *tgt;
+ io_req_t ior;
+
+and install it in the "probe" field of the scsi_softc_t structure describing
+the HBA to the upper layer. This function should finalize all HBA-specific
+info in the target_info structure, then do a scsi_inquiry and check the
+return code. If this is not SCSI_RET_DEVICE_DOWN the target should be
+marked TGT_ALIVE.
+
+
+COMMAND TERMINATION
+
+Generally, command termination should be reported to the upper layers
+by setting the tgt->done field to the proper value [it should remain
+SCSI_RET_IN_PROGRESS while the command is executing] and invoking the
+target's completion routine, like:
+ if (tgt->ior) {
+ LOG(0xA,"ops->restart");
+ (*tgt->dev_ops->restart)( tgt, TRUE);
+ }
+Note that early on some commands will actually wait for completion
+by spinning on the tgt->done value, because autoconf happens when
+threads and the scheduler are not working.
+
+Return SCSI_RET_RETRY if the target was busy, the command will be retried
+as appropriate.
+
+Check the completion routines [in rz_disk.c and rz_tape.c for instance]
+if you are not sure what to return in a troubled case.
+
+HBA CHIPS GOTCHAS
+
+All of the examples so far use the idea of 'scripts': the interrupt routine
+matches the chip state with what is expected and if this is ok (it is
+in the common important case) it just calls a prepackaged function.
+We have found this to be _extremely_ simpler than using a state machine
+of various ridiculous and erroneous sorts, and much more handy for debugging
+also. Not to mention the saving on code.
+Nonetheless, there really are no restrictions on how to structure the HBA
+code, if you prefer state machines go ahead and use them!
+
+Scheduling of the bus among competing targets is one of the major missing
+pieces for simple HBAs. A winning strategy used so far is as follows.
+Allocate a queue_head_t of waiting_targets in your foo_softc, and two
+target_info_t pointers next_target and active_target. A three-valued
+state field is also needed. If you enter the foo_go() routine
+and find the state&BUSY simply enqueue_tail() your tgt on the waiting_targets
+queue. Otherwise mark the bus BUSY, set next_target to tgt, and proceed
+to a selection attempt.
+Note that the attempt might fail and a reselection win over it, therefore
+the attempt_selection() routine should just retrieve the next_target
+and install it in active_target, start the selection and let the interrupt
+routine take care of the rest [see scsi_5380 for a different setup].
+If a reselection wins we should note that we had a COLLISION in the state
+field, install the reconecting target and proceed to completion.
+When either a command is complete or a target disconnects you should invoke
+a foo_release_bus() routine, which might look like:
+
+boolean_t
+foo_release_bus(foo)
+ register foo_softc_t foo;
+{
+ boolean_t ret = TRUE;
+
+ LOG(9,"release");
+ if (foo->state & FOO_STATE_COLLISION) {
+
+ LOG(0xB,"collided");
+ foo->state &= ~FOO_STATE_COLLISION;
+ foo_attempt_selection(foo);
+
+ } else if (queue_empty(&foo->waiting_targets)) {
+
+ foo->state &= ~FOO_STATE_BUSY;
+ foo->active_target = 0;
+ foo->script = 0;
+ ret = FALSE;
+
+ } else {
+
+ LOG(0xC,"dequeue");
+ foo->next_target = (target_info_t *)
+ dequeue_head(&foo->waiting_targets);
+ foo_attempt_selection(foo);
+ }
+ return ret;
+}
+
+which indicates whether the bus has been handed off to a new target or not.
+This provides the desired FIFO scheduling of the bus and gives maximum
+parallelism when targets are allowed to (and infact do) disconnect.
+
+An area where there are problems most times is how to minimize the
+interaction of selections and reselections in, e.g. foo_attempt_selection().
+This is very much chip specific, but sneaking on the SCSI bus should
+be a viable alternative in most cases. Check in the specs what happens
+if you send a command while there is already a reselection pending:
+a well behaved chip would ignore the command and not screwup its status.
+[Keep in mind that even if _now_ there is no reselection indication
+ on the next cycle there might be and you won't see it!]
+
+RANDOM GOTCHAS
+
+A number of workstations do not provide real DMA support [do not ask me why]
+but rather a special 'buffer' more or less wide where you have to copy
+data to and from. This has been handled, see esp the 52C94 code which has
+even the extreme optimization of issuing the send command even before
+the data has been copied into the buffer! We have handled even machines
+where no DMA at all was provided.
+
+Speaking of DMA.. many of these chips 'prefetch' data, or have a FIFO
+on board (they have to if they do synch xfers), and when the target
+disconnects it is always a pain to find out how many bytes exactly did we
+xfer. Be advised that this hurdle exists, and that the best way to
+debug your code here is to use a tape. A safe way is to initially
+disable disconnects [so that you can get the system up from disk]
+and enable them only on the tape unit that you are using for testing.
+Later on enable disks but make sure you have some way to recover from
+a zapped disk !
+
+MOVING TO USER SPACE
+
+All examples have hooks for user-space versions of the driver, the
+ones for 54C94 and 7061 actually do work. Look in mapped_scsi.c
+for how this is done, it is fairly simple as far as the kernel is
+concerned. To keep the option of mapping to user space open you
+should structure your interrupt routine such that it does all the
+state gathering and clearing of the interrupt right away. This
+scheme gives you some assurance that your code will keep on working
+when the interrupt processing is actually delayed and you recover
+the interrupt state from the saved structure in the mapped area.
+
+
+IMPROVEMENTS
+
+There are a variety of things to be done still, for instance:
+
+- rewrite scsi_slave() and scsi_attach() to be fully SCSI-II compliant.
+ There are only comments right now as to how that should be done.
+
+- add enough machinery to the tape code to be able to recover from
+ bus resets. Do so in such a way that other devices might use the ideas.
+
+- add more devices, like printers scanners modems etc that are currently
+ missing
+
+- add a 'generic' set_status flavor which simply executes a scsi command
+ passed in from user. This seems the way many vendors and other people
+ have strutured their drivers, it would make it possible to have a common
+ user-level program to do special maintainance work like, for instance,
+ reformatting of disks.
+
diff --git a/scsi/adapters/scsi_33C93.h b/scsi/adapters/scsi_33C93.h
new file mode 100644
index 00000000..454a8ebf
--- /dev/null
+++ b/scsi/adapters/scsi_33C93.h
@@ -0,0 +1,396 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_33C93.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 8/91
+ *
+ * Defines for the WD/AMD 33C93 SBIC (SCSI Bus Interface Controller)
+ */
+
+/*
+ * Register map, w mux addressing
+ */
+
+typedef struct {
+
+ volatile unsigned char sbic_myid; /* rw: My SCSI id */
+#define sbic_cdbsize sbic_myid /* w : size of CDB */
+
+ volatile unsigned char sbic_control; /* rw: Control register */
+
+ volatile unsigned char sbic_timeo; /* rw: Timeout period */
+
+ volatile unsigned char sbic_cdb1; /* rw: CDB, 1st byte */
+#define sbic_tsecs sbic_cdb1 /* rw: Xlate: nsectors */
+
+ volatile unsigned char sbic_cdb2; /* rw: CDB, 2nd byte */
+#define sbic_theads sbic_cdb2 /* rw: Xlate: nheads */
+
+ volatile unsigned char sbic_cdb3; /* rw: CDB, 3rd byte */
+#define sbic_tcyl_hi sbic_cdb3 /* rw: Xlate: ncyls, hi */
+
+ volatile unsigned char sbic_cdb4; /* rw: CDB, 4th byte */
+#define sbic_tcyl_lo sbic_cdb4 /* rw: Xlate: ncyls, lo */
+
+ volatile unsigned char sbic_cdb5; /* rw: CDB, 5th byte */
+#define sbic_addr_hi sbic_cdb5 /* rw: Xlate: address, hi */
+
+ volatile unsigned char sbic_cdb6; /* rw: CDB, 6th byte */
+#define sbic_addr_2 sbic_cdb6 /* rw: Xlate: address */
+
+ volatile unsigned char sbic_cdb7; /* rw: CDB, 7th byte */
+#define sbic_addr_3 sbic_cdb7 /* rw: Xlate: address */
+
+ volatile unsigned char sbic_cdb8; /* rw: CDB, 8th byte */
+#define sbic_addr_lo sbic_cdb8 /* rw: Xlate: address, lo */
+
+ volatile unsigned char sbic_cdb9; /* rw: CDB, 9th byte */
+#define sbic_secno sbic_cdb9 /* rw: Xlate: sector no */
+
+ volatile unsigned char sbic_cdb10; /* rw: CDB, 10th byte */
+#define sbic_headno sbic_cdb10 /* rw: Xlate: head no */
+
+ volatile unsigned char sbic_cdb11; /* rw: CDB, 11th byte */
+#define sbic_cylno_hi sbic_cdb11/* rw: Xlate: cyl no, hi */
+
+ volatile unsigned char sbic_cdb12; /* rw: CDB, 12th byte */
+#define sbic_cylno_lo sbic_cdb12/* rw: Xlate: cyl no, lo */
+
+ volatile unsigned char sbic_tlun; /* rw: Target LUN */
+
+ volatile unsigned char sbic_cmd_phase; /* rw: Command phase */
+
+ volatile unsigned char sbic_syn; /* rw: Synch xfer params */
+
+ volatile unsigned char sbic_count_hi; /* rw: Xfer count, hi */
+
+ volatile unsigned char sbic_count_med; /* rw: Xfer count, med */
+
+ volatile unsigned char sbic_count_lo; /* rw: Xfer count, lo */
+
+ volatile unsigned char sbic_selid; /* rw: Target ID (select) */
+
+ volatile unsigned char sbic_rselid; /* rw: Target ID (reselect) */
+
+ volatile unsigned char sbic_csr; /* r : Status register */
+
+ volatile unsigned char sbic_cmd; /* rw: Command register */
+
+ volatile unsigned char sbic_data; /* rw: FIFO top */
+
+ char u0; /* unused, padding */
+ char u1; /* unused, padding */
+ char u2; /* unused, padding */
+ char u3; /* unused, padding */
+ char u4; /* unused, padding */
+
+ volatile unsigned char sbic_asr; /* r : Aux Status Register */
+
+} sbic_mux_regmap_t;
+
+/*
+ * Register map, non mux (indirect) addressing
+ */
+typedef struct {
+ volatile unsigned char sbic_asr; /* r : Aux Status Register */
+#define sbic_address sbic_asr /* w : desired register no */
+
+ volatile unsigned char sbic_value; /* rw: register value */
+} sbic_ind_regmap_t;
+
+#define sbic_read_reg(regs,regno,val) { \
+ (regs)->sbic_address = (regno); \
+ wbflush(); \
+ (val) = (regs)->sbic_value; \
+ }
+
+#define sbic_write_reg(regs,regno,val) { \
+ (regs)->sbic_address = (regno); \
+ wbflush(); \
+ (regs)->sbic_value = (val); \
+ }
+
+#define SBIC_myid 0
+#define SBIC_cdbsize 0
+#define SBIC_control 1
+#define SBIC_timeo 2
+#define SBIC_cdb1 3
+#define SBIC_tsecs 3
+#define SBIC_cdb2 4
+#define SBIC_theads 4
+#define SBIC_cdb3 5
+#define SBIC_tcyl_hi 5
+#define SBIC_cdb4 6
+#define SBIC_tcyl_lo 6
+#define SBIC_cdb5 7
+#define SBIC_addr_hi 7
+#define SBIC_cdb6 8
+#define SBIC_addr_2 8
+#define SBIC_cdb7 9
+#define SBIC_addr_3 9
+#define SBIC_cdb8 10
+#define SBIC_addr_lo 10
+#define SBIC_cdb9 11
+#define SBIC_secno 11
+#define SBIC_cdb10 12
+#define SBIC_headno 12
+#define SBIC_cdb11 13
+#define SBIC_cylno_hi 13
+#define SBIC_cdb12 14
+#define SBIC_cylno_lo 14
+#define SBIC_tlun 15
+#define SBIC_cmd_phase 16
+#define SBIC_syn 17
+#define SBIC_count_hi 18
+#define SBIC_count_med 19
+#define SBIC_count_lo 20
+#define SBIC_selid 21
+#define SBIC_rselid 22
+#define SBIC_csr 23
+#define SBIC_cmd 24
+#define SBIC_data 25
+/* sbic_asr is addressed directly */
+
+/*
+ * Register defines
+ */
+
+/*
+ * Auxiliary Status Register
+ */
+
+#define SBIC_ASR_INT 0x80 /* Interrupt pending */
+#define SBIC_ASR_LCI 0x40 /* Last command ignored */
+#define SBIC_ASR_BSY 0x20 /* Busy, only cmd/data/asr readable */
+#define SBIC_ASR_CIP 0x10 /* Busy, cmd unavail also */
+#define SBIC_ASR_xxx 0x0c
+#define SBIC_ASR_PE 0x02 /* Parity error (even) */
+#define SBIC_ASR_DBR 0x01 /* Data Buffer Ready */
+
+/*
+ * My ID register, and/or CDB Size
+ */
+
+#define SBIC_ID_FS_8_10 0x00 /* Input clock is 8-10 Mhz */
+ /* 11 Mhz is invalid */
+#define SBIC_ID_FS_12_15 0x40 /* Input clock is 12-15 Mhz */
+#define SBIC_ID_FS_16_20 0x80 /* Input clock is 16-20 Mhz */
+#define SBIC_ID_EHP 0x10 /* Enable host parity */
+#define SBIC_ID_EAF 0x08 /* Enable Advanced Features */
+#define SBIC_ID_MASK 0x07
+#define SBIC_ID_CBDSIZE_MASK 0x0f /* if unk SCSI cmd group */
+
+/*
+ * Control register
+ */
+
+#define SBIC_CTL_DMA 0x80 /* Single byte dma */
+#define SBIC_CTL_DBA_DMA 0x40 /* direct buffer acces (bus master)*/
+#define SBIC_CTL_BURST_DMA 0x20 /* continuous mode (8237) */
+#define SBIC_CTL_NO_DMA 0x00 /* Programmed I/O */
+#define SBIC_CTL_HHP 0x10 /* Halt on host parity error */
+#define SBIC_CTL_EDI 0x08 /* Ending disconnect interrupt */
+#define SBIC_CTL_IDI 0x04 /* Intermediate disconnect interrupt*/
+#define SBIC_CTL_HA 0x02 /* Halt on ATN */
+#define SBIC_CTL_HSP 0x01 /* Halt on SCSI parity error */
+
+/*
+ * Timeout period register
+ * [val in msecs, input clk in Mhz]
+ */
+
+#define SBIC_TIMEOUT(val,clk) ((((val)*(clk))/80)+1)
+
+/*
+ * CDBn registers, note that
+ * cdb11 is used for status byte in target mode (send-status-and-cc)
+ * cdb12 sez if linked command complete, and w/flag if so
+ */
+
+/*
+ * Target LUN register
+ * [holds target status when select-and-xfer]
+ */
+
+#define SBIC_TLUN_VALID 0x80 /* did we receive an Identify msg */
+#define SBIC_TLUN_DOK 0x40 /* Disconnect OK */
+#define SBIC_TLUN_xxx 0x38
+#define SBIC_TLUN_MASK 0x07
+
+/*
+ * Command Phase register
+ */
+
+#define SBIC_CPH_MASK 0x7f /* values/restarts are cmd specific */
+#define SBIC_CPH(p) ((p)&SBIC_CPH_MASK)
+
+/*
+ * FIFO register
+ */
+
+#define SBIC_FIFO_DEEP 12
+
+/*
+ * Synchronous xfer register
+ */
+
+#define SBIC_SYN_OFF_MASK 0x0f
+#define SBIC_SYN_MAX_OFFSET (SBIC_FIFO_DEEP-1)
+#define SBIC_SYN_PER_MASK 0x70
+#define SBIC_SYN_MIN_PERIOD 2 /* upto 8, encoded as 0 */
+
+#define SBIC_SYN(o,p) (((o)&SBIC_SYN_OFF_MASK)|(((p)<<4)&SBIC_SYN_PER_MASK))
+
+/*
+ * Transfer count register
+ * optimal access macros depend on addressing
+ */
+
+/*
+ * Destination ID (selid) register
+ */
+
+#define SBIC_SID_SCC 0x80 /* Select command chaining (tgt) */
+#define SBIC_SID_DPD 0x40 /* Data phase direction (inittor) */
+# define SBIC_SID_FROM_SCSI 0x40
+# define SBIC_SID_TO_SCSI 0x00
+#define SBIC_SID_xxx 0x38
+#define SBIC_SID_IDMASK 0x07
+
+/*
+ * Source ID (rselid) register
+ */
+
+#define SBIC_RID_ER 0x80 /* Enable reselection */
+#define SBIC_RID_ES 0x40 /* Enable selection */
+#define SBIC_RID_DSP 0x20 /* Disable select parity */
+#define SBIC_RID_SIV 0x08 /* Source ID valid */
+#define SBIC_RID_MASK 0x07
+
+/*
+ * Status register
+ */
+
+#define SBIC_CSR_CAUSE 0xf0
+# define SBIC_CSR_RESET 0x00 /* chip was reset */
+# define SBIC_CSR_CMD_DONE 0x10 /* cmd completed */
+# define SBIC_CSR_CMD_STOPPED 0x20 /* interrupted or abrted*/
+# define SBIC_CSR_CMD_ERR 0x40 /* end with error */
+# define SBIC_CSR_BUS_SERVICE 0x80 /* REQ pending on the bus */
+
+#define SBIC_CSR_QUALIFIER 0x0f
+
+ /* Reset State Interrupts */
+# define SBIC_CSR_RESET 0x00 /* reset w/advanced features*/
+# define SBIC_CSR_RESET_AM 0x01 /* reset w/advanced features*/
+
+ /* Successful Completion Interrupts */
+# define SBIC_CSR_TARGET 0x10 /* reselect complete */
+# define SBIC_CSR_INITIATOR 0x11 /* select complete */
+# define SBIC_CSR_WO_ATN 0x13 /* tgt mode completion */
+# define SBIC_CSR_W_ATN 0x14 /* ditto */
+# define SBIC_CSR_XLATED 0x15 /* translate address cmd */
+# define SBIC_CSR_S_XFERRED 0x16 /* initiator mode completion*/
+# define SBIC_CSR_XFERRED 0x18 /* phase in low bits */
+
+ /* Paused or Aborted Interrupts */
+# define SBIC_CSR_MSGIN_W_ACK 0x20 /* (I) msgin, ACK asserted*/
+# define SBIC_CSR_SDP 0x21 /* (I) SDP msg received */
+# define SBIC_CSR_SEL_ABRT 0x22 /* sel/resel aborted */
+# define SBIC_CSR_XFR_PAUSED 0x23 /* (T) no ATN */
+# define SBIC_CSR_XFR_PAUSED_ATN 0x24 /* (T) ATN is asserted */
+# define SBIC_CSR_RSLT_AM 0x27 /* (I) lost selection (AM) */
+# define SBIC_CSR_MIS 0x28 /* (I) xfer aborted, ph mis */
+
+ /* Terminated Interrupts */
+# define SBIC_CSR_CMD_INVALID 0x40
+# define SBIC_CSR_DISC 0x41 /* (I) tgt disconnected */
+# define SBIC_CSR_SEL_TIMEO 0x42
+# define SBIC_CSR_PE 0x43 /* parity error */
+# define SBIC_CSR_PE_ATN 0x44 /* ditto, ATN is asserted */
+# define SBIC_CSR_XLATE_TOOBIG 0x45
+# define SBIC_CSR_RSLT_NOAM 0x46 /* (I) lost sel, no AM mode */
+# define SBIC_CSR_BAD_STATUS 0x47 /* status byte was nok */
+# define SBIC_CSR_MIS_1 0x48 /* ph mis, see low bits */
+
+ /* Service Required Interrupts */
+# define SBIC_CSR_RSLT_NI 0x80 /* reselected, no ify msg */
+# define SBIC_CSR_RSLT_IFY 0x81 /* ditto, AM mode, got ify */
+# define SBIC_CSR_SLT 0x82 /* selected, no ATN */
+# define SBIC_CSR_SLT_ATN 0x83 /* selected with ATN */
+# define SBIC_CSR_ATN 0x84 /* (T) ATN asserted */
+# define SBIC_CSR_DISC_1 0x85 /* (I) bus is free */
+# define SBIC_CSR_UNK_GROUP 0x87 /* strange CDB1 */
+# define SBIC_CSR_MIS_2 0x88 /* (I) ph mis, see low bits */
+
+#define SBIC_PHASE(csr) SCSI_PHASE(csr)
+
+/*
+ * Command register (command codes)
+ */
+
+#define SBIC_CMD_SBT 0x80 /* Single byte xfer qualifier */
+#define SBIC_CMD_MASK 0x7f
+
+ /* Miscellaneous */
+#define SBIC_CMD_RESET 0x00 /* (DTI) lev I */
+#define SBIC_CMD_ABORT 0x01 /* (DTI) lev I */
+#define SBIC_CMD_DISC 0x04 /* ( TI) lev I */
+#define SBIC_CMD_SSCC 0x0d /* ( TI) lev I */
+#define SBIC_CMD_SET_IDI 0x0f /* (DTI) lev I */
+#define SBIC_CMD_XLATE 0x18 /* (DT ) lev II */
+
+ /* Initiator state */
+#define SBIC_CMD_SET_ATN 0x02 /* ( I) lev I */
+#define SBIC_CMD_CLR_ACK 0x03 /* ( I) lev I */
+#define SBIC_CMD_XFER_INFO 0x20 /* ( I) lev II */
+
+ /* Target state */
+#define SBIC_CMD_SND_DISC 0x0e /* ( T ) lev II */
+#define SBIC_CMD_RCV_CMD 0x10 /* ( T ) lev II */
+#define SBIC_CMD_RCV_DATA 0x11 /* ( T ) lev II */
+#define SBIC_CMD_RCV_MSG_OUT 0x12 /* ( T ) lev II */
+#define SBIC_CMD_RCV 0x13 /* ( T ) lev II */
+#define SBIC_CMD_SND_STATUS 0x14 /* ( T ) lev II */
+#define SBIC_CMD_SND_DATA 0x15 /* ( T ) lev II */
+#define SBIC_CMD_SND_MSG_IN 0x16 /* ( T ) lev II */
+#define SBIC_CMD_SND 0x17 /* ( T ) lev II */
+
+ /* Disconnected state */
+#define SBIC_CMD_RESELECT 0x05 /* (D ) lev II */
+#define SBIC_CMD_SEL_ATN 0x06 /* (D ) lev II */
+#define SBIC_CMD_SEL 0x07 /* (D ) lev II */
+#define SBIC_CMD_SEL_ATN_XFER 0x08 /* (D I) lev II */
+#define SBIC_CMD_SEL_XFER 0x09 /* (D I) lev II */
+#define SBIC_CMD_RESELECT_RECV 0x0a /* (DT ) lev II */
+#define SBIC_CMD_RESELECT_SEND 0x0b /* (DT ) lev II */
+#define SBIC_CMD_WAIT_SEL_RECV 0x0c /* (DT ) lev II */
+
+
+/* approximate, but we won't do SBT on selects */
+#define sbic_isa_select(cmd) (((cmd)>0x5)&&((cmd)<0xa))
+
diff --git a/scsi/adapters/scsi_33C93_hdw.c b/scsi/adapters/scsi_33C93_hdw.c
new file mode 100644
index 00000000..169ccbf9
--- /dev/null
+++ b/scsi/adapters/scsi_33C93_hdw.c
@@ -0,0 +1,2078 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS AS-IS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_33C93_hdw.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 8/91
+ *
+ * Bottom layer of the SCSI driver: chip-dependent functions
+ *
+ * This file contains the code that is specific to the WD/AMD 33C93
+ * SCSI chip (Host Bus Adapter in SCSI parlance): probing, start
+ * operation, and interrupt routine.
+ */
+
+#if 0
+DISCLAIMER: THIS DOES NOT EVEN COMPILE YET, it went in by mistake.
+Code that probably makes some sense is from here to "TILL HERE"
+
+/*
+ * This layer works based on small simple 'scripts' that are installed
+ * at the start of the command and drive the chip to completion.
+ * The idea comes from the specs of the NCR 53C700 'script' processor.
+ *
+ * There are various reasons for this, mainly
+ * - Performance: identify the common (successful) path, and follow it;
+ * at interrupt time no code is needed to find the current status
+ * - Code size: it should be easy to compact common operations
+ * - Adaptability: the code skeleton should adapt to different chips without
+ * terrible complications.
+ * - Error handling: and it is easy to modify the actions performed
+ * by the scripts to cope with strange but well identified sequences
+ *
+ */
+
+#include <sbic.h>
+#if NSBIC > 0
+#include <platforms.h>
+
+#ifdef IRIS
+#define PAD(n) char n[3] /* or whatever */
+#define SBIC_MUX_ADDRESSING /* comment out if wrong */
+#define SBIC_CLOCK_FREQUENCY 20 /* FIXME FIXME FIXME */
+#define SBIC_MACHINE_DMA_MODE SBIC_CTL_DMA /* FIXME FIXME FIXME */
+
+#define SBIC_SET_RST_ADDR /*SCSI_INIT_ADDR*/
+#define SBIC_CLR_RST_ADDR /*SCSI_RDY_ADDR*/
+#define SBIC_MACHINE_RESET_SCSIBUS(regs,per) \
+ { int temp; \
+ temp = *(volatile unsigned int *)SBIC_SET_RST_ADDR; \
+ delay(per); \
+ temp = *(volatile unsigned int *)SBIC_CLR_RST_ADDR; \
+ }
+
+#endif
+
+#include <machine/machspl.h> /* spl definitions */
+#include <mach/std_types.h>
+#include <sys/types.h>
+#include <chips/busses.h>
+#include <scsi/compat_30.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi2.h>
+
+#include <scsi/adapters/scsi_33C93.h>
+#include <scsi/scsi_defs.h>
+#include <scsi/adapters/scsi_dma.h>
+
+/*
+ * Spell out all combinations of padded/nopadded and mux/nomux
+ */
+#ifdef PAD
+typedef struct {
+
+ volatile unsigned char sbic_myid; /* rw: My SCSI id */
+/*#define sbic_cdbsize sbic_myid /* w : size of CDB */
+ PAD(pad0)
+ volatile unsigned char sbic_control; /* rw: Control register */
+ PAD(pad1)
+ volatile unsigned char sbic_timeo; /* rw: Timeout period */
+ PAD(pad2)
+ volatile unsigned char sbic_cdb1; /* rw: CDB, 1st byte */
+ PAD(pad3)
+ volatile unsigned char sbic_cdb2; /* rw: CDB, 2nd byte */
+ PAD(pad4)
+ volatile unsigned char sbic_cdb3; /* rw: CDB, 3rd byte */
+ PAD(pad5)
+ volatile unsigned char sbic_cdb4; /* rw: CDB, 4th byte */
+ PAD(pad6)
+ volatile unsigned char sbic_cdb5; /* rw: CDB, 5th byte */
+ PAD(pad7)
+ volatile unsigned char sbic_cdb6; /* rw: CDB, 6th byte */
+ PAD(pad8)
+ volatile unsigned char sbic_cdb7; /* rw: CDB, 7th byte */
+ PAD(pad9)
+ volatile unsigned char sbic_cdb8; /* rw: CDB, 8th byte */
+ PAD(pad10)
+ volatile unsigned char sbic_cdb9; /* rw: CDB, 9th byte */
+ PAD(pad11)
+ volatile unsigned char sbic_cdb10; /* rw: CDB, 10th byte */
+ PAD(pad12)
+ volatile unsigned char sbic_cdb11; /* rw: CDB, 11th byte */
+ PAD(pad13)
+ volatile unsigned char sbic_cdb12; /* rw: CDB, 12th byte */
+ PAD(pad14)
+ volatile unsigned char sbic_tlun; /* rw: Target LUN */
+ PAD(pad15)
+ volatile unsigned char sbic_cmd_phase; /* rw: Command phase */
+ PAD(pad16)
+ volatile unsigned char sbic_syn; /* rw: Synch xfer params */
+ PAD(pad17)
+ volatile unsigned char sbic_count_hi; /* rw: Xfer count, hi */
+ PAD(pad18)
+ volatile unsigned char sbic_count_med; /* rw: Xfer count, med */
+ PAD(pad19)
+ volatile unsigned char sbic_count_lo; /* rw: Xfer count, lo */
+ PAD(pad20)
+ volatile unsigned char sbic_selid; /* rw: Target ID (select) */
+ PAD(pad21)
+ volatile unsigned char sbic_rselid; /* rw: Target ID (reselect) */
+ PAD(pad22)
+ volatile unsigned char sbic_csr; /* r : Status register */
+ PAD(pad23)
+ volatile unsigned char sbic_cmd; /* rw: Command register */
+ PAD(pad24)
+ volatile unsigned char sbic_data; /* rw: FIFO top */
+ PAD(pad25)
+ char u0; /* unused, padding */
+ PAD(pad26)
+ char u1; /* unused, padding */
+ PAD(pad27)
+ char u2; /* unused, padding */
+ PAD(pad28)
+ char u3; /* unused, padding */
+ PAD(pad29)
+ char u4; /* unused, padding */
+ PAD(pad30)
+ volatile unsigned char sbic_asr; /* r : Aux Status Register */
+ PAD(pad31)
+
+} sbic_padded_mux_regmap_t;
+
+typedef struct {
+ volatile unsigned char sbic_asr; /* r : Aux Status Register */
+/*#define sbic_address sbic_asr /* w : desired register no */
+ PAD(pad0);
+ volatile unsigned char sbic_value; /* rw: register value */
+ PAD(pad1);
+} sbic_padded_ind_regmap_t;
+
+#else /* !PAD */
+
+typedef sbic_mux_regmap_t sbic_padded_mux_regmap_t;
+typedef sbic_ind_regmap_t sbic_padded_ind_regmap_t;
+
+#endif /* !PAD */
+
+/*
+ * Could have used some non-ANSIsm in the following :-))
+ */
+#ifdef SBIC_MUX_ADDRESSING
+
+typedef sbic_padded_mux_regmap_t sbic_padded_regmap_t;
+
+#define SET_SBIC_myid(regs,val) (regs)->sbic_myid = (val)
+#define GET_SBIC_myid(regs,val) (val) = (regs)->sbic_myid
+#define SET_SBIC_cdbsize(regs,val) (regs)->sbic_cdbsize = (val)
+#define GET_SBIC_cdbsize(regs,val) (val) = (regs)->sbic_cdbsize
+#define SET_SBIC_control(regs,val) (regs)->sbic_control = (val)
+#define GET_SBIC_control(regs,val) (val) = (regs)->sbic_control
+#define SET_SBIC_timeo(regs,val) (regs)->sbic_timeo = (val)
+#define GET_SBIC_timeo(regs,val) (val) = (regs)->sbic_timeo
+#define SET_SBIC_cdb1(regs,val) (regs)->sbic_cdb1 = (val)
+#define GET_SBIC_cdb1(regs,val) (val) = (regs)->sbic_cdb1
+#define SET_SBIC_cdb2(regs,val) (regs)->sbic_cdb2 = (val)
+#define GET_SBIC_cdb2(regs,val) (val) = (regs)->sbic_cdb2
+#define SET_SBIC_cdb3(regs,val) (regs)->sbic_cdb3 = (val)
+#define GET_SBIC_cdb3(regs,val) (val) = (regs)->sbic_cdb3
+#define SET_SBIC_cdb4(regs,val) (regs)->sbic_cdb4 = (val)
+#define GET_SBIC_cdb4(regs,val) (val) = (regs)->sbic_cdb4
+#define SET_SBIC_cdb5(regs,val) (regs)->sbic_cdb5 = (val)
+#define GET_SBIC_cdb5(regs,val) (val) = (regs)->sbic_cdb5
+#define SET_SBIC_cdb6(regs,val) (regs)->sbic_cdb6 = (val)
+#define GET_SBIC_cdb6(regs,val) (val) = (regs)->sbic_cdb6
+#define SET_SBIC_cdb7(regs,val) (regs)->sbic_cdb7 = (val)
+#define GET_SBIC_cdb7(regs,val) (val) = (regs)->sbic_cdb7
+#define SET_SBIC_cdb8(regs,val) (regs)->sbic_cdb8 = (val)
+#define GET_SBIC_cdb8(regs,val) (val) = (regs)->sbic_cdb8
+#define SET_SBIC_cdb9(regs,val) (regs)->sbic_cdb9 = (val)
+#define GET_SBIC_cdb9(regs,val) (val) = (regs)->sbic_cdb9
+#define SET_SBIC_cdb10(regs,val) (regs)->sbic_cdb10 = (val)
+#define GET_SBIC_cdb10(regs,val) (val) = (regs)->sbic_cdb10
+#define SET_SBIC_cdb11(regs,val) (regs)->sbic_cdb11 = (val)
+#define GET_SBIC_cdb11(regs,val) (val) = (regs)->sbic_cdb11
+#define SET_SBIC_cdb12(regs,val) (regs)->sbic_cdb12 = (val)
+#define GET_SBIC_cdb12(regs,val) (val) = (regs)->sbic_cdb12
+#define SET_SBIC_tlun(regs,val) (regs)->sbic_tlun = (val)
+#define GET_SBIC_tlun(regs,val) (val) = (regs)->sbic_tlun
+#define SET_SBIC_cmd_phase(regs,val) (regs)->sbic_cmd_phase = (val)
+#define GET_SBIC_cmd_phase(regs,val) (val) = (regs)->sbic_cmd_phase
+#define SET_SBIC_syn(regs,val) (regs)->sbic_syn = (val)
+#define GET_SBIC_syn(regs,val) (val) = (regs)->sbic_syn
+#define SET_SBIC_count_hi(regs,val) (regs)->sbic_count_hi = (val)
+#define GET_SBIC_count_hi(regs,val) (val) = (regs)->sbic_count_hi
+#define SET_SBIC_count_med(regs,val) (regs)->sbic_count_med = (val)
+#define GET_SBIC_count_med(regs,val) (val) = (regs)->sbic_count_med
+#define SET_SBIC_count_lo(regs,val) (regs)->sbic_count_lo = (val)
+#define GET_SBIC_count_lo(regs,val) (val) = (regs)->sbic_count_lo
+#define SET_SBIC_selid(regs,val) (regs)->sbic_selid = (val)
+#define GET_SBIC_selid(regs,val) (val) = (regs)->sbic_selid
+#define SET_SBIC_rselid(regs,val) (regs)->sbic_rselid = (val)
+#define GET_SBIC_rselid(regs,val) (val) = (regs)->sbic_rselid
+#define SET_SBIC_csr(regs,val) (regs)->sbic_csr = (val)
+#define GET_SBIC_csr(regs,val) (val) = (regs)->sbic_csr
+#define SET_SBIC_cmd(regs,val) (regs)->sbic_cmd = (val)
+#define GET_SBIC_cmd(regs,val) (val) = (regs)->sbic_cmd
+#define SET_SBIC_data(regs,val) (regs)->sbic_data = (val)
+#define GET_SBIC_data(regs,val) (val) = (regs)->sbic_data
+
+#define SBIC_TC_SET(regs,val) { \
+ (regs)->sbic_count_hi = ((val)>>16)); \
+ (regs)->sbic_count_med = (val)>>8; \
+ (regs)->sbic_count_lo = (val); \
+ }
+#define SBIC_TC_GET(regs,val) { \
+ (val) = ((regs)->sbic_count_hi << 16) | \
+ ((regs)->sbic_count_med << 8) | \
+ ((regs)->sbic_count_lo); \
+ }
+
+#define SBIC_LOAD_COMMAND(regs,cmd,cmdsize) { \
+ register char *ptr = (char*)(cmd); \
+ (regs)->cis_cdb1 = *ptr++; \
+ (regs)->cis_cdb2 = *ptr++; \
+ (regs)->cis_cdb3 = *ptr++; \
+ (regs)->cis_cdb4 = *ptr++; \
+ (regs)->cis_cdb5 = *ptr++; \
+ (regs)->cis_cdb6 = *ptr++; \
+ if (cmdsize > 6) { \
+ (regs)->cis_cdb7 = *ptr++; \
+ (regs)->cis_cdb8 = *ptr++; \
+ (regs)->cis_cdb9 = *ptr++; \
+ (regs)->cis_cdb10 = *ptr++; \
+ } \
+ if (cmdsize > 10) { \
+ (regs)->cis_cdb11 = *ptr++; \
+ (regs)->cis_cdb12 = *ptr; \
+ } \
+ }
+
+#else /*SBIC_MUX_ADDRESSING*/
+
+typedef sbic_padded_ind_regmap_t sbic_padded_regmap_t;
+
+#define SET_SBIC_myid(regs,val) sbic_write_reg(regs,SBIC_myid,val)
+#define GET_SBIC_myid(regs,val) sbic_read_reg(regs,SBIC_myid,val)
+#define SET_SBIC_cdbsize(regs,val) sbic_write_reg(regs,SBIC_cdbsize,val)
+#define GET_SBIC_cdbsize(regs,val) sbic_read_reg(regs,SBIC_cdbsize,val)
+#define SET_SBIC_control(regs,val) sbic_write_reg(regs,SBIC_control,val)
+#define GET_SBIC_control(regs,val) sbic_read_reg(regs,SBIC_control,val)
+#define SET_SBIC_timeo(regs,val) sbic_write_reg(regs,SBIC_timeo,val)
+#define GET_SBIC_timeo(regs,val) sbic_read_reg(regs,SBIC_timeo,val)
+#define SET_SBIC_cdb1(regs,val) sbic_write_reg(regs,SBIC_cdb1,val)
+#define GET_SBIC_cdb1(regs,val) sbic_read_reg(regs,SBIC_cdb1,val)
+#define SET_SBIC_cdb2(regs,val) sbic_write_reg(regs,SBIC_cdb2,val)
+#define GET_SBIC_cdb2(regs,val) sbic_read_reg(regs,SBIC_cdb2,val)
+#define SET_SBIC_cdb3(regs,val) sbic_write_reg(regs,SBIC_cdb3,val)
+#define GET_SBIC_cdb3(regs,val) sbic_read_reg(regs,SBIC_cdb3,val)
+#define SET_SBIC_cdb4(regs,val) sbic_write_reg(regs,SBIC_cdb4,val)
+#define GET_SBIC_cdb4(regs,val) sbic_read_reg(regs,SBIC_cdb4,val)
+#define SET_SBIC_cdb5(regs,val) sbic_write_reg(regs,SBIC_cdb5,val)
+#define GET_SBIC_cdb5(regs,val) sbic_read_reg(regs,SBIC_cdb5,val)
+#define SET_SBIC_cdb6(regs,val) sbic_write_reg(regs,SBIC_cdb6,val)
+#define GET_SBIC_cdb6(regs,val) sbic_read_reg(regs,SBIC_cdb6,val)
+#define SET_SBIC_cdb7(regs,val) sbic_write_reg(regs,SBIC_cdb7,val)
+#define GET_SBIC_cdb7(regs,val) sbic_read_reg(regs,SBIC_cdb7,val)
+#define SET_SBIC_cdb8(regs,val) sbic_write_reg(regs,SBIC_cdb8,val)
+#define GET_SBIC_cdb8(regs,val) sbic_read_reg(regs,SBIC_cdb8,val)
+#define SET_SBIC_cdb9(regs,val) sbic_write_reg(regs,SBIC_cdb9,val)
+#define GET_SBIC_cdb9(regs,val) sbic_read_reg(regs,SBIC_cdb9,val)
+#define SET_SBIC_cdb10(regs,val) sbic_write_reg(regs,SBIC_cdb10,val)
+#define GET_SBIC_cdb10(regs,val) sbic_read_reg(regs,SBIC_cdb10,val)
+#define SET_SBIC_cdb11(regs,val) sbic_write_reg(regs,SBIC_cdb11,val)
+#define GET_SBIC_cdb11(regs,val) sbic_read_reg(regs,SBIC_cdb11,val)
+#define SET_SBIC_cdb12(regs,val) sbic_write_reg(regs,SBIC_cdb12,val)
+#define GET_SBIC_cdb12(regs,val) sbic_read_reg(regs,SBIC_cdb12,val)
+#define SET_SBIC_tlun(regs,val) sbic_write_reg(regs,SBIC_tlun,val)
+#define GET_SBIC_tlun(regs,val) sbic_read_reg(regs,SBIC_tlun,val)
+#define SET_SBIC_cmd_phase(regs,val) sbic_write_reg(regs,SBIC_cmd_phase,val)
+#define GET_SBIC_cmd_phase(regs,val) sbic_read_reg(regs,SBIC_cmd_phase,val)
+#define SET_SBIC_syn(regs,val) sbic_write_reg(regs,SBIC_syn,val)
+#define GET_SBIC_syn(regs,val) sbic_read_reg(regs,SBIC_syn,val)
+#define SET_SBIC_count_hi(regs,val) sbic_write_reg(regs,SBIC_count_hi,val)
+#define GET_SBIC_count_hi(regs,val) sbic_read_reg(regs,SBIC_count_hi,val)
+#define SET_SBIC_count_med(regs,val) sbic_write_reg(regs,SBIC_count_med,val)
+#define GET_SBIC_count_med(regs,val) sbic_read_reg(regs,SBIC_count_med,val)
+#define SET_SBIC_count_lo(regs,val) sbic_write_reg(regs,SBIC_count_lo,val)
+#define GET_SBIC_count_lo(regs,val) sbic_read_reg(regs,SBIC_count_lo,val)
+#define SET_SBIC_selid(regs,val) sbic_write_reg(regs,SBIC_selid,val)
+#define GET_SBIC_selid(regs,val) sbic_read_reg(regs,SBIC_selid,val)
+#define SET_SBIC_rselid(regs,val) sbic_write_reg(regs,SBIC_rselid,val)
+#define GET_SBIC_rselid(regs,val) sbic_read_reg(regs,SBIC_rselid,val)
+#define SET_SBIC_csr(regs,val) sbic_write_reg(regs,SBIC_csr,val)
+#define GET_SBIC_csr(regs,val) sbic_read_reg(regs,SBIC_csr,val)
+#define SET_SBIC_cmd(regs,val) sbic_write_reg(regs,SBIC_cmd,val)
+#define GET_SBIC_cmd(regs,val) sbic_read_reg(regs,SBIC_cmd,val)
+#define SET_SBIC_data(regs,val) sbic_write_reg(regs,SBIC_data,val)
+#define GET_SBIC_data(regs,val) sbic_read_reg(regs,SBIC_data,val)
+
+#define SBIC_TC_SET(regs,val) { \
+ sbic_write_reg(regs,SBIC_count_hi,((val)>>16)); \
+ (regs)->sbic_value = (val)>>8; wbflush(); \
+ (regs)->sbic_value = (val); \
+ }
+#define SBIC_TC_GET(regs,val) { \
+ sbic_read_reg(regs,SBIC_count_hi,(val)); \
+ (val) = ((val)<<8) | (regs)->sbic_value; \
+ (val) = ((val)<<8) | (regs)->sbic_value; \
+ }
+
+#define SBIC_LOAD_COMMAND(regs,cmd,cmdsize) {
+ register int n=cmdsize-1; \
+ register char *ptr = (char*)(cmd); \
+ sbic_write_reg(regs,SBIC_cdb1,*ptr++); \
+ while (n-- > 0) (regs)->sbic_value = *ptr++; \
+ }
+
+#endif /*SBIC_MUX_ADDRESSING*/
+
+#define GET_SBIC_asr(regs,val) (val) = (regs)->sbic_asr
+
+
+/*
+ * If all goes well (cross fingers) the typical read/write operation
+ * should complete in just one interrupt. Therefore our scripts
+ * have only two parts: a pre-condition and an action. The first
+ * triggers error handling if not satisfied and in our case it is a match
+ * of ....
+ * The action part is just a function pointer, invoked in a standard way.
+ * The script proceeds only if the action routine returns TRUE.
+ * See sbic_intr() for how and where this is all done.
+ */
+
+typedef struct script {
+ struct { /* expected state at interrupt: */
+ unsigned char csr; /* interrupt cause */
+ unsigned char pha; /* command phase */
+ } condition;
+/* unsigned char unused[2]; /* unused padding */
+ boolean_t (*action)(); /* extra operations */
+} *script_t;
+
+/* Matching on the condition value */
+#define ANY 0xff
+#define SCRIPT_MATCH(csr,pha,cond) \
+ (((cond).csr == (csr)) && \
+ (((cond).pha == (pha)) || ((cond).pha==ANY)))
+
+
+/* forward decls of script actions */
+boolean_t
+ sbic_end(), /* all come to an end */
+ sbic_get_status(), /* get status from target */
+ sbic_dma_in(), /* get data from target via dma */
+ sbic_dma_in_r(), /* get data from target via dma (restartable)*/
+ sbic_dma_out(), /* send data to target via dma */
+ sbic_dma_out_r(), /* send data to target via dma (restartable) */
+ sbic_dosynch(), /* negotiate synch xfer */
+ sbic_msg_in(), /* receive the disconenct message */
+ sbic_disconnected(), /* target has disconnected */
+ sbic_reconnect(); /* target reconnected */
+
+/* forward decls of error handlers */
+boolean_t
+ sbic_err_generic(), /* generic handler */
+ sbic_err_disconn(), /* target disconnects amidst */
+ gimmeabreak(); /* drop into the debugger */
+
+int sbic_reset_scsibus();
+boolean_t sbic_probe_target();
+static sbic_wait();
+
+/*
+ * State descriptor for this layer. There is one such structure
+ * per (enabled) SCSI-33c93 interface
+ */
+struct sbic_softc {
+ watchdog_t wd;
+ sbic_padded_regmap_t *regs; /* 33c93 registers */
+
+ scsi_dma_ops_t *dma_ops; /* DMA operations and state */
+ opaque_t dma_state;
+
+ script_t script; /* what should happen next */
+ boolean_t (*error_handler)();/* what if something is wrong */
+ int in_count; /* amnt we expect to receive */
+ int out_count; /* amnt we are going to ship */
+
+ volatile char state;
+#define SBIC_STATE_BUSY 0x01 /* selecting or currently connected */
+#define SBIC_STATE_TARGET 0x04 /* currently selected as target */
+#define SBIC_STATE_COLLISION 0x08 /* lost selection attempt */
+#define SBIC_STATE_DMA_IN 0x10 /* tgt --> initiator xfer */
+#define SBIC_STATE_AM_MODE 0x20 /* 33c93A with advanced mode (AM) */
+
+ unsigned char ntargets; /* how many alive on this scsibus */
+ unsigned char done;
+ unsigned char unused;
+
+ scsi_softc_t *sc; /* HBA-indep info */
+ target_info_t *active_target; /* the current one */
+
+ target_info_t *next_target; /* trying to seize bus */
+ queue_head_t waiting_targets;/* other targets competing for bus */
+
+} sbic_softc_data[NSBIC];
+
+typedef struct sbic_softc *sbic_softc_t;
+
+sbic_softc_t sbic_softc[NSBIC];
+
+/*
+ * Synch xfer parameters, and timing conversions
+ */
+int sbic_min_period = SBIC_SYN_MIN_PERIOD; /* in cycles = f(ICLK,FSn) */
+int sbic_max_offset = SBIC_SYN_MAX_OFFSET; /* pure number */
+
+int sbic_to_scsi_period(regs,a)
+{
+ unsigned int fs;
+
+ /* cycle = DIV / (2*CLK) */
+ /* DIV = FS+2 */
+ /* best we can do is 200ns at 20Mhz, 2 cycles */
+
+ GET_SBIC_myid(regs,fs);
+ fs = (fs >>6) + 2; /* DIV */
+ fs = (fs * 1000) / (SBIC_CLOCK_FREQUENCY<<1); /* Cycle, in ns */
+ if (a < 2) a = 8; /* map to Cycles */
+ return ((fs*a)>>2); /* in 4 ns units */
+}
+
+int scsi_period_to_sbic(regs,p)
+{
+ register unsigned int fs;
+
+ /* Just the inverse of the above */
+
+ GET_SBIC_myid(regs,fs);
+ fs = (fs >>6) + 2; /* DIV */
+ fs = (fs * 1000) / (SBIC_CLOCK_FREQUENCY<<1); /* Cycle, in ns */
+
+ ret = p << 2; /* in ns units */
+ ret = ret / fs; /* in Cycles */
+ if (ret < sbic_min_period)
+ return sbic_min_period;
+ /* verify rounding */
+ if (sbic_to_scsi_period(regs,ret) < p)
+ ret++;
+ return (ret >= 8) ? 0 : ret;
+}
+
+#define u_min(a,b) (((a) < (b)) ? (a) : (b))
+
+/*
+ * Definition of the controller for the auto-configuration program.
+ */
+
+int sbic_probe(), scsi_slave(), scsi_attach(), sbic_go(), sbic_intr();
+
+caddr_t sbic_std[NSBIC] = { 0 };
+struct bus_device *sbic_dinfo[NSBIC*8];
+struct bus_ctlr *sbic_minfo[NSBIC];
+struct bus_driver sbic_driver =
+ { sbic_probe, scsi_slave, scsi_attach, sbic_go, sbic_std, "rz", sbic_dinfo,
+ "sbic", sbic_minfo, BUS_INTR_B4_PROBE};
+
+
+sbic_set_dmaops(unit, dmaops)
+ unsigned int unit;
+ scsi_dma_ops_t *dmaops;
+{
+ if (unit < NSBIC)
+ sbic_std[unit] = (caddr_t)dmaops;
+}
+
+/*
+ * Scripts
+ */
+struct script
+sbic_script_any_cmd[] = { /* started with SEL & XFER */
+ {{SBIC_CSR_S_XFERRED, 0x60}, sbic_get_status},
+},
+
+sbic_script_try_synch[] = { /* started with SEL */
+ {{SBIC_CSR_INITIATOR, ANY}, sbic_dosynch},
+ {{SBIC_CSR_S_XFERRED, 0x60}, sbic_get_status},
+};
+
+
+#define DEBUG
+#ifdef DEBUG
+
+#define PRINT(x) if (scsi_debug) printf x
+
+sbic_state(regs, overrule)
+ sbic_padded_regmap_t *regs;
+{
+ register unsigned char asr,tmp;
+
+ if (regs == 0) {
+ if (sbic_softc[0])
+ regs = sbic_softc[0]->regs;
+ else
+ regs = (sbic_padded_regmap_t*)0xXXXXXXXX;
+ }
+
+ GET_SBIC_asr(regs,asr);
+
+ if ((asr & SBIC_ASR_BSY) && !overrule)
+ db_printf("-BUSY- ");
+ else {
+ unsigned char tlun,pha,selid,rselid;
+ unsigned int cnt;
+ GET_SBIC_tlun(regs,tlun);
+ GET_SBIC_cmd_phase(regs,pha);
+ GET_SBIC_selid(regs,selid);
+ GET_SBIC_rselid(regs,rselid);
+ SBIC_TC_GET(regs,cnt);
+ db_printf("tc %x tlun %x sel %x rsel %x pha %x ",
+ cnt, tlun, selid, rselid, pha);
+ }
+
+ if (asr & SBIC_ASR_INT)
+ db_printf("-INT- ");
+ else {
+ GET_SBIC_csr(regs,tmp);
+ db_printf("csr %x ", tmp);
+ }
+
+ if (asr & SBIC_ASR_CIP)
+ db_printf("-CIP-\n");
+ else {
+ GET_SBIC_cmd(regs,tmp);
+ db_printf("cmd %x\n", tmp);
+ }
+ return 0;
+}
+
+sbic_target_state(tgt)
+ target_info_t *tgt;
+{
+ if (tgt == 0)
+ tgt = sbic_softc[0]->active_target;
+ if (tgt == 0)
+ return 0;
+ db_printf("@x%x: fl %x dma %X+%x cmd %x@%X id %x per %x off %x ior %X ret %X\n",
+ tgt,
+ tgt->flags, tgt->dma_ptr, tgt->transient_state.dma_offset, tgt->cur_cmd,
+ tgt->cmd_ptr, tgt->target_id, tgt->sync_period, tgt->sync_offset,
+ tgt->ior, tgt->done);
+ if (tgt->flags & TGT_DISCONNECTED){
+ script_t spt;
+
+ spt = tgt->transient_state.script;
+ db_printf("disconnected at ");
+ db_printsym(spt,1);
+ db_printf(": %x %x ", spt->condition.csr, spt->condition.pha);
+ db_printsym(spt->action,1);
+ db_printf(", ");
+ db_printsym(tgt->transient_state.handler, 1);
+ db_printf("\n");
+ }
+
+ return 0;
+}
+
+sbic_all_targets(unit)
+{
+ int i;
+ target_info_t *tgt;
+ for (i = 0; i < 8; i++) {
+ tgt = sbic_softc[unit]->sc->target[i];
+ if (tgt)
+ sbic_target_state(tgt);
+ }
+}
+
+sbic_script_state(unit)
+{
+ script_t spt = sbic_softc[unit]->script;
+
+ if (spt == 0) return 0;
+ db_printsym(spt,1);
+ db_printf(": %x %x ", spt->condition.csr, spt->condition.pha);
+ db_printsym(spt->action,1);
+ db_printf(", ");
+ db_printsym(sbic_softc[unit]->error_handler, 1);
+ return 0;
+}
+
+#define TRMAX 200
+int tr[TRMAX+3];
+int trpt, trpthi;
+#define TR(x) tr[trpt++] = x
+#define TRWRAP trpthi = trpt; trpt = 0;
+#define TRCHECK if (trpt > TRMAX) {TRWRAP}
+
+#define TRACE
+
+#ifdef TRACE
+
+#define LOGSIZE 256
+int sbic_logpt;
+char sbic_log[LOGSIZE];
+
+#define MAXLOG_VALUE 0x1e
+struct {
+ char *name;
+ unsigned int count;
+} logtbl[MAXLOG_VALUE];
+
+static LOG(e,f)
+ char *f;
+{
+ sbic_log[sbic_logpt++] = (e);
+ if (sbic_logpt == LOGSIZE) sbic_logpt = 0;
+ if ((e) < MAXLOG_VALUE) {
+ logtbl[(e)].name = (f);
+ logtbl[(e)].count++;
+ }
+}
+
+sbic_print_log(skip)
+ int skip;
+{
+ register int i, j;
+ register unsigned char c;
+
+ for (i = 0, j = sbic_logpt; i < LOGSIZE; i++) {
+ c = sbic_log[j];
+ if (++j == LOGSIZE) j = 0;
+ if (skip-- > 0)
+ continue;
+ if (c < MAXLOG_VALUE)
+ db_printf(" %s", logtbl[c].name);
+ else
+ db_printf("-x%x", c & 0x7f);
+ }
+}
+
+sbic_print_stat()
+{
+ register int i;
+ register char *p;
+ for (i = 0; i < MAXLOG_VALUE; i++) {
+ if (p = logtbl[i].name)
+ printf("%d %s\n", logtbl[i].count, p);
+ }
+}
+
+#else /*TRACE*/
+#define LOG(e,f)
+#define LOGSIZE
+#endif /*TRACE*/
+
+#else /*DEBUG*/
+#define PRINT(x)
+#define LOG(e,f)
+#define LOGSIZE
+
+#endif /*DEBUG*/
+
+
+/*
+ * Probe/Slave/Attach functions
+ */
+
+/*
+ * Probe routine:
+ * Should find out (a) if the controller is
+ * present and (b) which/where slaves are present.
+ *
+ * Implementation:
+ * Send a test-unit-ready to each possible target on the bus
+ * except of course ourselves.
+ */
+sbic_probe(reg, ui)
+ unsigned reg;
+ struct bus_ctlr *ui;
+{
+ int unit = ui->unit;
+ sbic_softc_t sbic = &sbic_softc_data[unit];
+ int target_id;
+ scsi_softc_t *sc;
+ register sbic_padded_regmap_t *regs;
+ spl_t s;
+ boolean_t did_banner = FALSE;
+
+ /*
+ * We are only called if the right board is there,
+ * but make sure anyways..
+ */
+ if (check_memory(reg, 0))
+ return 0;
+
+#if MAPPABLE
+ /* Mappable version side */
+ SBIC_probe(reg, ui);
+#endif /*MAPPABLE*/
+
+ /*
+ * Initialize hw descriptor, cache some pointers
+ */
+ sbic_softc[unit] = sbic;
+ sbic->regs = (sbic_padded_regmap_t *) (reg);
+
+ if ((sbic->dma_ops = (scsi_dma_ops_t *)sbic_std[unit]) == 0)
+ /* use same as unit 0 if undefined */
+ sbic->dma_ops = (scsi_dma_ops_t *)sbic_std[0];
+
+ sbic->dma_state = (*sbic->dma_ops->init)(unit, reg);
+
+ queue_init(&sbic->waiting_targets);
+
+ sc = scsi_master_alloc(unit, sbic);
+ sbic->sc = sc;
+
+ sc->go = sbic_go;
+ sc->watchdog = scsi_watchdog;
+ sc->probe = sbic_probe_target;
+ sbic->wd.reset = sbic_reset_scsibus;
+
+#ifdef MACH_KERNEL
+ sc->max_dma_data = -1;
+#else
+ sc->max_dma_data = scsi_per_target_virtual;
+#endif
+
+ regs = sbic->regs;
+
+ /*
+ * Reset chip, fully. Note that interrupts are already enabled.
+ */
+ s = splbio();
+ if (sbic_reset(regs, TRUE))
+ sbic->state |= SBIC_STATE_AM_MODE;
+
+ /*
+ * Our SCSI id on the bus.
+ * The user can probably set this via the prom.
+ * If not, it is easy to fix: make a default that
+ * can be changed as boot arg. Otherwise we keep
+ * what the prom used.
+ */
+#ifdef unneeded
+ SET_SBIC_myid(regs, (scsi_initiator_id[unit] & 0x7));
+ sbic_reset(regs, TRUE);
+#endif
+ GET_SBIC_myid(regs,sc->initiator_id);
+ sc->initiator_id &= 0x7;
+ printf("%s%d: SCSI id %d", ui->name, unit, sc->initiator_id);
+
+ /*
+ * For all possible targets, see if there is one and allocate
+ * a descriptor for it if it is there.
+ */
+ for (target_id = 0; target_id < 8; target_id++) {
+ register unsigned char asr, csr, pha;
+ register scsi_status_byte_t status;
+
+ /* except of course ourselves */
+ if (target_id == sc->initiator_id)
+ continue;
+
+ SBIC_TC_SET(regs,0);
+ SET_SBIC_selid(regs,target_id);
+ SET_SBIC_timo(regs,SBIC_TIMEOUT(250,SBIC_CLOCK_FREQUENCY));
+
+ /*
+ * See if the unit is ready.
+ * XXX SHOULD inquiry LUN 0 instead !!!
+ */
+ {
+ scsi_command_test_unit_ready_y c;
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_TEST_UNIT_READY;
+ SBIC_LOAD_COMMAND(regs,&c,sizeof(c));
+ }
+
+ /* select and send it */
+ SET_SBIC_cmd(regs,SBIC_CMD_SEL_XFER);
+
+ /* wait for the chip to complete, or timeout */
+ asr = sbic_wait(regs, SBIC_ASR_INT);
+ GET_SBIC_csr(regs,csr);
+
+ /*
+ * Check if the select timed out
+ */
+ GET_SBIC_cmd_phase(regs,pha);
+ if ((SBIC_CPH(pha) == 0) && (csr & SBIC_CSR_CMD_ERR)) {
+ /* noone out there */
+#if notsure
+ SET_SBIC_cmd(regs,SBIC_CMD_DISC);
+ asr = sbic_wait(regs, SBIC_ASR_INT);
+ GET_SBIC_csr(regs,csr);
+#endif
+ continue;
+ }
+
+ printf(",%s%d", did_banner++ ? " " : " target(s) at ",
+ target_id);
+
+ if (SBIC_CPH(pha) < 0x60)
+ /* XXX recover by hand XXX */
+ panic(" target acts weirdo");
+
+ GET_SBIC_tlun(regs,status.bits);
+
+ if (status.st.scsi_status_code != SCSI_ST_GOOD)
+ scsi_error( 0, SCSI_ERR_STATUS, status.bits, 0);
+
+ /*
+ * Found a target
+ */
+ sbic->ntargets++;
+ {
+ register target_info_t *tgt;
+ tgt = scsi_slave_alloc(sc->masterno, target_id, sbic);
+
+ (*sbic->dma_ops->new_target)(sbic->dma_state, tgt);
+ }
+ }
+
+ printf(".\n");
+
+ splx(s);
+ return 1;
+}
+
+boolean_t
+sbic_probe_target(tgt, ior)
+ target_info_t *tgt;
+ io_req_t ior;
+{
+ sbic_softc_t sbic = sbic_softc[tgt->masterno];
+ boolean_t newlywed;
+
+ newlywed = (tgt->cmd_ptr == 0);
+ if (newlywed) {
+ (*sbic->dma_ops->new_target)(sbic->dma_state, tgt);
+ }
+
+ if (scsi_inquiry(tgt, SCSI_INQ_STD_DATA) == SCSI_RET_DEVICE_DOWN)
+ return FALSE;
+
+ tgt->flags = TGT_ALIVE;
+ return TRUE;
+}
+
+static sbic_wait(regs, until)
+ sbic_padded_regmap_t *regs;
+ char until;
+{
+ register unsigned char val;
+ int timeo = 1000000;
+
+ GET_SBIC_asr(regs,val);
+ while ((val & until) == 0) {
+ if (!timeo--) {
+ printf("sbic_wait TIMEO with x%x\n", regs->sbic_csr);
+ break;
+ }
+ delay(1);
+ GET_SBIC_asr(regs,val);
+ }
+ return val;
+}
+
+boolean_t
+sbic_reset(regs, quick)
+ sbic_padded_regmap_t *regs;
+{
+ char my_id, csr;
+
+ /* preserve our ID for now */
+ GET_SBIC_myid(regs,my_id);
+ my_id &= SBIC_ID_MASK;
+
+ if (SBIC_CLOCK_FREQUENCY < 11)
+ my_id |= SBIC_ID_FS_8_10;
+ else if (SBIC_CLOCK_FREQUENCY < 16)
+ my_id |= SBIC_ID_FS_12_15;
+ else if (SBIC_CLOCK_FREQUENCY < 21)
+ my_id |= SBIC_ID_FS_16_20;
+
+ my_id |= SBIC_ID_EAF|SBIC_ID_EHP;
+
+ SET_SBIC_myid(regs,myid);
+ wbflush();
+
+ /*
+ * Reset chip and wait till done
+ */
+ SET_SBIC_cmd(regs,SBIC_CMD_RESET);
+ delay(25);
+
+ (void) sbic_wait(regs, SBIC_ASR_INT);
+ GET_SBIC_csr(regs,csr); /* clears interrupt also */
+
+ /*
+ * Set up various chip parameters
+ */
+ SET_SBIC_control(regs, SBIC_CTL_HHP|SBIC_CTL_EDI|SBIC_CTL_HSP|
+ SBIC_MACHINE_DMA_MODE);
+ /* will do IDI on the fly */
+ SET_SBIC_rselid(regs, SBIC_RID_ER|SBIC_RID_ES|SBIC_RID_DSP);
+ SET_SBIC_syn(regs,SBIC_SYN(0,sbic_min_period)); /* asynch for now */
+
+ /* anything else was zeroed by reset */
+
+ if (quick)
+ return (csr & SBIC_CSR_RESET_AM);
+
+ /*
+ * reset the scsi bus, the interrupt routine does the rest
+ * or you can call sbic_bus_reset().
+ */
+ /*
+ * Now HOW do I do this ? I just want to drive the SCSI "RST"
+ * signal true for about 25 usecs; But the chip has no notion
+ * of such a signal at all. The spec suggest that the chip's
+ * reset pin be connected to the RST signal, which makes this
+ * operation a machdep one.
+ */
+ SBIC_MACHINE_RESET_SCSIBUS(regs, 30);
+
+ return (csr & SBIC_CSR_RESET_AM);
+}
+
+/*
+ * Operational functions
+ */
+
+/*
+ * Start a SCSI command on a target
+ */
+sbic_go(tgt, cmd_count, in_count, cmd_only)
+ target_info_t *tgt;
+ boolean_t cmd_only;
+{
+ sbic_softc_t sbic;
+ register spl_t s;
+ boolean_t disconn;
+ script_t scp;
+ boolean_t (*handler)();
+
+ LOG(1,"go");
+
+ sbic = (sbic_softc_t)tgt->hw_state;
+
+ tgt->transient_state.cmd_count = cmd_count; /* keep it here */
+
+ (*sbic->dma_ops->map)(sbic->dma_state, tgt);
+
+ disconn = BGET(scsi_might_disconnect,tgt->masterno,tgt->target_id);
+ disconn = disconn && (sbic->ntargets > 1);
+ disconn |= BGET(scsi_should_disconnect,tgt->masterno,tgt->target_id);
+
+ /*
+ * Setup target state
+ */
+ tgt->done = SCSI_RET_IN_PROGRESS;
+
+ handler = (disconn) ? sbic_err_disconn : sbic_err_generic;
+ scp = sbic_script_any_cmd;
+
+ switch (tgt->cur_cmd) {
+ case SCSI_CMD_READ:
+ case SCSI_CMD_LONG_READ:
+ LOG(2,"readop");
+ break;
+ case SCSI_CMD_WRITE:
+ case SCSI_CMD_LONG_WRITE:
+ LOG(0x1a,"writeop");
+ break;
+ case SCSI_CMD_INQUIRY:
+ /* This is likely the first thing out:
+ do the synch neg if so */
+ if (!cmd_only && ((tgt->flags&TGT_DID_SYNCH)==0)) {
+ scp = sbic_script_try_synch;
+ tgt->flags |= TGT_TRY_SYNCH;
+ break;
+ }
+ case SCSI_CMD_MODE_SELECT:
+ case SCSI_CMD_REASSIGN_BLOCKS:
+ case SCSI_CMD_FORMAT_UNIT:
+ tgt->transient_state.cmd_count = sizeof(scsi_command_group_0);
+ tgt->transient_state.out_count = cmd_count - sizeof(scsi_command_group_0);
+ /* fall through */
+ case SCSI_CMD_REQUEST_SENSE:
+ case SCSI_CMD_MODE_SENSE:
+ case SCSI_CMD_RECEIVE_DIAG_RESULTS:
+ case SCSI_CMD_READ_CAPACITY:
+ case SCSI_CMD_READ_BLOCK_LIMITS:
+ case SCSI_CMD_READ_TOC:
+ case SCSI_CMD_READ_SUBCH:
+ case SCSI_CMD_READ_HEADER:
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ break;
+ case SCSI_CMD_TEST_UNIT_READY:
+ /*
+ * Do the synch negotiation here, unless prohibited
+ * or done already
+ */
+ if ( ! (tgt->flags & TGT_DID_SYNCH)) {
+ scp = sbic_script_try_synch;
+ tgt->flags |= TGT_TRY_SYNCH;
+ cmd_only = FALSE;
+ }
+ /* fall through */
+ default:
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ break;
+ }
+
+ tgt->transient_state.script = scp;
+ tgt->transient_state.handler = handler;
+ tgt->transient_state.identify = (cmd_only) ? 0xff :
+ (disconn ? SCSI_IDENTIFY|SCSI_IFY_ENABLE_DISCONNECT :
+ SCSI_IDENTIFY);
+
+ if (in_count)
+ tgt->transient_state.in_count =
+ (in_count < tgt->block_size) ? tgt->block_size : in_count;
+ else
+ tgt->transient_state.in_count = 0;
+
+ /*
+ * See if another target is currently selected on
+ * this SCSI bus, e.g. lock the sbic structure.
+ * Note that it is the strategy routine's job
+ * to serialize ops on the same target as appropriate.
+ * XXX here and everywhere, locks!
+ */
+ /*
+ * Protection viz reconnections makes it tricky.
+ */
+ s = splbio();
+
+ if (sbic->wd.nactive++ == 0)
+ sbic->wd.watchdog_state = SCSI_WD_ACTIVE;
+
+ if (sbic->state & SBIC_STATE_BUSY) {
+ /*
+ * Queue up this target, note that this takes care
+ * of proper FIFO scheduling of the scsi-bus.
+ */
+ LOG(3,"enqueue");
+ enqueue_tail(&sbic->waiting_targets, (queue_entry_t) tgt);
+ } else {
+ /*
+ * It is down to at most two contenders now,
+ * we will treat reconnections same as selections
+ * and let the scsi-bus arbitration process decide.
+ */
+ sbic->state |= SBIC_STATE_BUSY;
+ sbic->next_target = tgt;
+ sbic_attempt_selection(sbic);
+ /*
+ * Note that we might still lose arbitration..
+ */
+ }
+ splx(s);
+}
+
+sbic_attempt_selection(sbic)
+ sbic_softc_t sbic;
+{
+ target_info_t *tgt;
+ sbic_padded_regmap_t *regs;
+ register unsigned char val;
+ register int out_count;
+
+ regs = sbic->regs;
+ tgt = sbic->next_target;
+
+ LOG(4,"select");
+ LOG(0x80+tgt->target_id,0);
+
+ /*
+ * We own the bus now.. unless we lose arbitration
+ */
+ sbic->active_target = tgt;
+
+ /* Try to avoid reselect collisions */
+ GET_SBIC_asr(regs,val);
+ if (val & SBIC_ASR_INT)
+ return;
+
+ /*
+ * Init bus state variables
+ */
+ sbic->script = tgt->transient_state.script;
+ sbic->error_handler = tgt->transient_state.handler;
+ sbic->done = SCSI_RET_IN_PROGRESS;
+
+ sbic->out_count = 0;
+ sbic->in_count = 0;
+
+ /* Define how the identify msg should be built */
+ GET_SBIC_rselid(regs, val);
+ val &= ~(SBIC_RID_MASK|SBIC_RID_ER);
+ /* the enable reselection bit is used to build the identify msg */
+ if (tgt->transient_state.identify != 0xff)
+ val |= (tgt->transient_state.identify & SCSI_IFY_ENABLE_DISCONNECT) << 1;
+ SET_SBIC_rselid(regs, val);
+ SET_SBIC_tlun(regs, tgt->lun);
+
+ /*
+ * Start the chip going
+ */
+ out_count = (*sbic->dma_ops->start_cmd)(sbic->dma_state, tgt);
+ SBIC_TC_PUT(regs, out_count);
+
+ val = tgt->target_id;
+ if (tgt->transient_state.in_count)
+ val |= SBIC_SID_FROM_SCSI;
+ SET_SBIC_selid(regs, val);
+
+ SET_SBIC_timo(regs,SBIC_TIMEOUT(250,SBIC_CLOCK_FREQUENCY));
+
+ SET_SBIC_syn(regs,SBIC_SYN(tgt->sync_offset,tgt->sync_period));
+
+ /* ugly little help for compiler */
+#define command out_count
+ if (tgt->flags & TGT_DID_SYNCH) {
+ command = (tgt->transient_state.identify == 0xff) ?
+ SBIC_CMD_SEL_XFER :
+ SBIC_CMD_SEL_ATN_XFER; /*preferred*/
+ } else if (tgt->flags & TGT_TRY_SYNCH)
+ command = SBIC_CMD_SEL_ATN;
+ else
+ command = SBIC_CMD_SEL_XFER;
+
+ /* load 10 bytes anyways, the chip knows how much to use */
+ SBIC_LOAD_COMMAND(regs, tgt->cmd_ptr, 10);
+
+ /* Try to avoid reselect collisions */
+ GET_SBIC_asr(regs,val);
+ if (val & SBIC_ASR_INT)
+ return;
+
+ SET_SBIC_cmd_phase(regs, 0); /* not a resume */
+ SET_SBIC_cmd(regs, command);
+#undef command
+}
+
+/*
+ * Interrupt routine
+ * Take interrupts from the chip
+ *
+ * Implementation:
+ * Move along the current command's script if
+ * all is well, invoke error handler if not.
+ */
+sbic_intr(unit, spllevel)
+ spl_t spllevel;
+{
+ register sbic_softc_t sbic;
+ register script_t scp;
+ register int asr, csr, pha;
+ register sbic_padded_regmap_t *regs;
+#if MAPPABLE
+ extern boolean_t rz_use_mapped_interface;
+
+ if (rz_use_mapped_interface)
+ return SBIC_intr(unit,spllevel);
+#endif /*MAPPABLE*/
+
+ sbic = sbic_softc[unit];
+ regs = sbic->regs;
+
+ LOG(5,"\n\tintr");
+
+ /* drop spurious interrupts */
+ GET_SBIC_asr(regs, asr);
+ if ((asr & SBIC_ASR_INT) == 0)
+ return;
+
+ /* collect ephemeral information */
+ GET_SBIC_cmd_phase(regs, pha);
+ GET_SBIC_csr(regs, csr);
+
+TR(csr);TR(asr);TR(pha);TRCHECK
+
+ /* XXX verify this is indeed the case for a SCSI RST asserted */
+ if ((csr & SBIC_CSR_CAUSE) == SBIC_CSR_RESET)
+ return sbic_bus_reset(sbic);
+
+ /* we got an interrupt allright */
+ if (sbic->active_target)
+ sbic->wd.watchdog_state = SCSI_WD_ACTIVE;
+
+ splx(spllevel); /* drop priority */
+
+ if ((sbic->state & SBIC_STATE_TARGET) ||
+ (csr == SBIC_CSR_RSLT_AM) || (csr == SBIC_CSR_RSLT_NOAM) ||
+ (csr == SBIC_CSR_SLT) || (csr == SBIC_CSR_SLT_ATN))
+ return sbic_target_intr(sbic);
+
+ /*
+ * In attempt_selection() we gave the select command even if
+ * the chip might have been reconnected already.
+ */
+ if ((csr == SBIC_CSR_RSLT_NI) || (csr == SBIC_CSR_RSLT_IFY))
+ return sbic_reconnect(sbic, csr, pha);
+
+ /*
+ * Check for parity errors
+ */
+ if (asr & SBIC_ASR_PE) {
+ char *msg;
+printf("{PE %x,%x}", asr, pha);
+
+ msg = "SCSI bus parity error";
+ /* all we can do is to throw a reset on the bus */
+ printf( "sbic%d: %s%s", sbic - sbic_softc_data, msg,
+ ", attempting recovery.\n");
+ sbic_reset(regs, FALSE);
+ return;
+ }
+
+ if ((scp = sbic->script) == 0) /* sanity */
+ return;
+
+ LOG(6,"match");
+ if (SCRIPT_MATCH(csr,pha,scp->condition)) {
+ /*
+ * Perform the appropriate operation,
+ * then proceed
+ */
+ if ((*scp->action)(sbic, csr, pha)) {
+ sbic->script = scp + 1;
+ }
+ } else
+ return (*sbic->error_handler)(sbic, csr, pha);
+}
+
+sbic_target_intr()
+{
+ panic("SBIC: TARGET MODE !!!\n");
+}
+
+/*
+ * Routines that the interrupt code might switch to
+ */
+
+boolean_t
+sbic_end(sbic, csr, pha)
+ register sbic_softc_t sbic;
+{
+ register target_info_t *tgt;
+ register io_req_t ior;
+
+ LOG(8,"end");
+
+ tgt = sbic->active_target;
+ if ((tgt->done = sbic->done) == SCSI_RET_IN_PROGRESS)
+ tgt->done = SCSI_RET_SUCCESS;
+
+ sbic->script = 0;
+
+ if (sbic->wd.nactive-- == 1)
+ sbic->wd.watchdog_state = SCSI_WD_INACTIVE;
+
+ sbic_release_bus(sbic);
+
+ if (ior = tgt->ior) {
+ (*sbic->dma_ops->end_cmd)(sbic->dma_state, tgt, ior);
+ LOG(0xA,"ops->restart");
+ (*tgt->dev_ops->restart)( tgt, TRUE);
+ }
+
+ return FALSE;
+}
+
+boolean_t
+sbic_release_bus(sbic)
+ register sbic_softc_t sbic;
+{
+ boolean_t ret = TRUE;
+
+ LOG(9,"release");
+ if (sbic->state & SBIC_STATE_COLLISION) {
+
+ LOG(0xB,"collided");
+ sbic->state &= ~SBIC_STATE_COLLISION;
+ sbic_attempt_selection(sbic);
+
+ } else if (queue_empty(&sbic->waiting_targets)) {
+
+ sbic->state &= ~SBIC_STATE_BUSY;
+ sbic->active_target = 0;
+ sbic->script = 0;
+ ret = FALSE;
+
+ } else {
+
+ LOG(0xC,"dequeue");
+ sbic->next_target = (target_info_t *)
+ dequeue_head(&sbic->waiting_targets);
+ sbic_attempt_selection(sbic);
+ }
+ return ret;
+}
+
+boolean_t
+sbic_get_status(sbic, csr, pha)
+ register sbic_softc_t sbic;
+{
+ register sbic_padded_regmap_t *regs = sbic->regs;
+ register scsi2_status_byte_t status;
+ int len;
+ io_req_t ior;
+ register target_info_t *tgt = sbic->active_target;
+
+ LOG(0xD,"get_status");
+TRWRAP
+
+ sbic->state &= ~SBIC_STATE_DMA_IN;
+
+ /*
+ * Get the status byte
+ */
+ GET_SBIC_tlun(regs, status.bits);
+
+ if (status.st.scsi_status_code != SCSI_ST_GOOD) {
+ scsi_error(sbic->active_target, SCSI_ERR_STATUS, status.bits, 0);
+ sbic->done = (status.st.scsi_status_code == SCSI_ST_BUSY) ?
+ SCSI_RET_RETRY : SCSI_RET_NEED_SENSE;
+ } else
+ sbic->done = SCSI_RET_SUCCESS;
+
+ /* Tell DMA engine we are done */
+ (*sbic->dma_ops->end_xfer)(sbic->dma_state, tgt, tgt->transient_state.in_count);
+
+ return sbic_end(sbic, csr, pha);
+
+}
+
+#if 0
+
+boolean_t
+sbic_dma_in(sbic, csr, ir)
+ register sbic_softc_t sbic;
+{
+ register target_info_t *tgt;
+ register sbic_padded_regmap_t *regs = sbic->regs;
+ register int count;
+ unsigned char ff = regs->sbic_flags;
+
+ LOG(0xE,"dma_in");
+ tgt = sbic->active_target;
+
+ sbic->state |= SBIC_STATE_DMA_IN;
+
+ count = (*sbic->dma_ops->start_datain)(sbic->dma_state, tgt);
+ SBIC_TC_PUT(regs, count);
+
+ if ((sbic->in_count = count) == tgt->transient_state.in_count)
+ return TRUE;
+ regs->sbic_cmd = sbic->script->command;
+ sbic->script = sbic_script_restart_data_in;
+ return FALSE;
+}
+
+sbic_dma_in_r(sbic, csr, ir)
+ register sbic_softc_t sbic;
+{
+ register target_info_t *tgt;
+ register sbic_padded_regmap_t *regs = sbic->regs;
+ register int count;
+ boolean_t advance_script = TRUE;
+
+
+ LOG(0xE,"dma_in");
+ tgt = sbic->active_target;
+
+ sbic->state |= SBIC_STATE_DMA_IN;
+
+ if (sbic->in_count == 0) {
+ /*
+ * Got nothing yet, we just reconnected.
+ */
+ register int avail;
+
+ /*
+ * Rather than using the messy RFB bit in cnfg2
+ * (which only works for synch xfer anyways)
+ * we just bump up the dma offset. We might
+ * endup with one more interrupt at the end,
+ * so what.
+ * This is done in sbic_err_disconn(), this
+ * way dma (of msg bytes too) is always aligned
+ */
+
+ count = (*sbic->dma_ops->restart_datain_1)
+ (sbic->dma_state, tgt);
+
+ /* common case of 8k-or-less read ? */
+ advance_script = (tgt->transient_state.in_count == count);
+
+ } else {
+
+ /*
+ * We received some data.
+ */
+ register int offset, xferred;
+
+ /*
+ * Problem: sometimes we get a 'spurious' interrupt
+ * right after a reconnect. It goes like disconnect,
+ * reconnect, dma_in_r, here but DMA is still rolling.
+ * Since there is no good reason we got here to begin with
+ * we just check for the case and dismiss it: we should
+ * get another interrupt when the TC goes to zero or the
+ * target disconnects.
+ */
+ SBIC_TC_GET(regs,xferred);
+ if (xferred != 0)
+ return FALSE;
+
+ xferred = sbic->in_count - xferred;
+ assert(xferred > 0);
+
+ tgt->transient_state.in_count -= xferred;
+ assert(tgt->transient_state.in_count > 0);
+
+ count = (*sbic->dma_ops->restart_datain_2)
+ (sbic->dma_state, tgt, xferred);
+
+ sbic->in_count = count;
+ SBIC_TC_PUT(regs, count);
+ regs->sbic_cmd = sbic->script->command;
+
+ (*sbic->dma_ops->restart_datain_3)
+ (sbic->dma_state, tgt);
+
+ /* last chunk ? */
+ if (count == tgt->transient_state.in_count)
+ sbic->script++;
+
+ return FALSE;
+ }
+
+ sbic->in_count = count;
+ SBIC_TC_PUT(regs, count);
+
+ if (!advance_script) {
+ regs->sbic_cmd = sbic->script->command;
+ }
+ return advance_script;
+}
+
+
+/* send data to target. Only called to start the xfer */
+
+boolean_t
+sbic_dma_out(sbic, csr, ir)
+ register sbic_softc_t sbic;
+{
+ register sbic_padded_regmap_t *regs = sbic->regs;
+ register int reload_count;
+ register target_info_t *tgt;
+ int command;
+
+ LOG(0xF,"dma_out");
+
+ SBIC_TC_GET(regs, reload_count);
+ sbic->extra_count = regs->sbic_flags & SBIC_FLAGS_FIFO_CNT;
+ reload_count += sbic->extra_count;
+ SBIC_TC_PUT(regs, reload_count);
+ sbic->state &= ~SBIC_STATE_DMA_IN;
+
+ tgt = sbic->active_target;
+
+ command = sbic->script->command;
+
+ if ((sbic->out_count = reload_count) >=
+ tgt->transient_state.out_count)
+ sbic->script++;
+ else
+ sbic->script = sbic_script_restart_data_out;
+
+ if ((*sbic->dma_ops->start_dataout)
+ (sbic->dma_state, tgt, &regs->sbic_cmd, command)) {
+ regs->sbic_cmd = command;
+ }
+
+ return FALSE;
+}
+
+/* send data to target. Called in two different ways:
+ (a) to restart a big transfer and
+ (b) after reconnection
+ */
+boolean_t
+sbic_dma_out_r(sbic, csr, ir)
+ register sbic_softc_t sbic;
+{
+ register sbic_padded_regmap_t *regs = sbic->regs;
+ register target_info_t *tgt;
+ boolean_t advance_script = TRUE;
+ int count;
+
+
+ LOG(0xF,"dma_out");
+
+ tgt = sbic->active_target;
+ sbic->state &= ~SBIC_STATE_DMA_IN;
+
+ if (sbic->out_count == 0) {
+ /*
+ * Nothing committed: we just got reconnected
+ */
+ count = (*sbic->dma_ops->restart_dataout_1)
+ (sbic->dma_state, tgt);
+
+ /* is this the last chunk ? */
+ advance_script = (tgt->transient_state.out_count == count);
+ } else {
+ /*
+ * We sent some data.
+ */
+ register int offset, xferred;
+
+ SBIC_TC_GET(regs,count);
+
+ /* see comment above */
+ if (count) {
+ return FALSE;
+ }
+
+ count += (regs->sbic_flags & SBIC_FLAGS_FIFO_CNT);
+ count -= sbic->extra_count;
+ xferred = sbic->out_count - count;
+ assert(xferred > 0);
+
+ tgt->transient_state.out_count -= xferred;
+ assert(tgt->transient_state.out_count > 0);
+
+ count = (*sbic->dma_ops->restart_dataout_2)
+ (sbic->dma_state, tgt, xferred);
+
+ /* last chunk ? */
+ if (tgt->transient_state.out_count == count)
+ goto quickie;
+
+ sbic->out_count = count;
+
+ sbic->extra_count = (*sbic->dma_ops->restart_dataout_3)
+ (sbic->dma_state, tgt, &regs->sbic_fifo);
+ SBIC_TC_PUT(regs, count);
+ regs->sbic_cmd = sbic->script->command;
+
+ (*sbic->dma_ops->restart_dataout_4)(sbic->dma_state, tgt);
+
+ return FALSE;
+ }
+
+quickie:
+ sbic->extra_count = (*sbic->dma_ops->restart_dataout_3)
+ (sbic->dma_state, tgt, &regs->sbic_fifo);
+
+ sbic->out_count = count;
+
+ SBIC_TC_PUT(regs, count);
+
+ if (!advance_script) {
+ regs->sbic_cmd = sbic->script->command;
+ }
+ return advance_script;
+}
+#endif /*0*/
+
+boolean_t
+sbic_dosynch(sbic, csr, pha)
+ register sbic_softc_t sbic;
+ register unsigned char csr, pha;
+{
+ register sbic_padded_regmap_t *regs = sbic->regs;
+ register unsigned char c;
+ int i, per, offs;
+ register target_info_t *tgt;
+
+ /*
+ * Try synch negotiation
+ * Phase is MSG_OUT here.
+ */
+ tgt = sbic->active_target;
+
+#if 0
+ regs->sbic_cmd = SBIC_CMD_FLUSH;
+ delay(2);
+
+ per = sbic_min_period;
+ if (BGET(scsi_no_synchronous_xfer,sbic->sc->masterno,tgt->target_id))
+ offs = 0;
+ else
+ offs = sbic_max_offset;
+
+ tgt->flags |= TGT_DID_SYNCH; /* only one chance */
+ tgt->flags &= ~TGT_TRY_SYNCH;
+
+ regs->sbic_fifo = SCSI_EXTENDED_MESSAGE;
+ regs->sbic_fifo = 3;
+ regs->sbic_fifo = SCSI_SYNC_XFER_REQUEST;
+ regs->sbic_fifo = sbic_to_scsi_period(regs,sbic_min_period);
+ regs->sbic_fifo = offs;
+ regs->sbic_cmd = SBIC_CMD_XFER_INFO;
+ csr = sbic_wait(regs, SBIC_CSR_INT);
+ ir = regs->sbic_intr;
+
+ if (SCSI_PHASE(csr) != SCSI_PHASE_MSG_IN)
+ gimmeabreak();
+
+ regs->sbic_cmd = SBIC_CMD_XFER_INFO;
+ csr = sbic_wait(regs, SBIC_CSR_INT);
+ ir = regs->sbic_intr;
+
+ while ((regs->sbic_flags & SBIC_FLAGS_FIFO_CNT) > 0)
+ c = regs->sbic_fifo; /* see what it says */
+
+ if (c == SCSI_MESSAGE_REJECT) {
+ printf(" did not like SYNCH xfer ");
+
+ /* Tk50s get in trouble with ATN, sigh. */
+ regs->sbic_cmd = SBIC_CMD_CLR_ATN;
+
+ goto cmd;
+ }
+
+ /*
+ * Receive the rest of the message
+ */
+ regs->sbic_cmd = SBIC_CMD_MSG_ACPT;
+ sbic_wait(regs, SBIC_CSR_INT);
+ ir = regs->sbic_intr;
+
+ if (c != SCSI_EXTENDED_MESSAGE)
+ gimmeabreak();
+
+ regs->sbic_cmd = SBIC_CMD_XFER_INFO;
+ sbic_wait(regs, SBIC_CSR_INT);
+ c = regs->sbic_intr;
+ if (regs->sbic_fifo != 3)
+ panic("sbic_dosynch");
+
+ for (i = 0; i < 3; i++) {
+ regs->sbic_cmd = SBIC_CMD_MSG_ACPT;
+ sbic_wait(regs, SBIC_CSR_INT);
+ c = regs->sbic_intr;
+
+ regs->sbic_cmd = SBIC_CMD_XFER_INFO;
+ sbic_wait(regs, SBIC_CSR_INT);
+ c = regs->sbic_intr;/*ack*/
+ c = regs->sbic_fifo;
+
+ if (i == 1) tgt->sync_period = scsi_period_to_sbic(regs,c);
+ if (i == 2) tgt->sync_offset = c;
+ }
+
+cmd:
+ regs->sbic_cmd = SBIC_CMD_MSG_ACPT;
+ csr = sbic_wait(regs, SBIC_CSR_INT);
+ c = regs->sbic_intr;
+
+ /* phase should normally be command here */
+ if (SCSI_PHASE(csr) == SCSI_PHASE_CMD) {
+ /* test unit ready or what ? */
+ regs->sbic_fifo = 0;
+ regs->sbic_fifo = 0;
+ regs->sbic_fifo = 0;
+ regs->sbic_fifo = 0;
+ regs->sbic_fifo = 0;
+ regs->sbic_fifo = 0;
+ SBIC_TC_PUT(regs,0xff);
+ regs->sbic_cmd = SBIC_CMD_XFER_PAD; /*0x98*/
+ csr = sbic_wait(regs, SBIC_CSR_INT);
+ ir = regs->sbic_intr;/*ack*/
+ }
+
+status:
+ if (SCSI_PHASE(csr) != SCSI_PHASE_STATUS)
+ gimmeabreak();
+
+#endif
+ return TRUE;
+}
+
+/*
+ * The bus was reset
+ */
+sbic_bus_reset(sbic)
+ register sbic_softc_t sbic;
+{
+ register sbic_padded_regmap_t *regs = sbic->regs;
+
+ LOG(0x1d,"bus_reset");
+
+ /*
+ * Clear bus descriptor
+ */
+ sbic->script = 0;
+ sbic->error_handler = 0;
+ sbic->active_target = 0;
+ sbic->next_target = 0;
+ sbic->state &= SBIC_STATE_AM_MODE; /* save this one bit only */
+ queue_init(&sbic->waiting_targets);
+ sbic->wd.nactive = 0;
+ (void) sbic_reset(regs, TRUE);
+
+ printf("sbic: (%d) bus reset ", ++sbic->wd.reset_count);
+ delay(scsi_delay_after_reset); /* some targets take long to reset */
+
+ if (sbic->sc == 0) /* sanity */
+ return;
+
+ scsi_bus_was_reset(sbic->sc);
+}
+
+/*
+ * Disconnect/reconnect mode ops
+ */
+
+/* save all relevant data, free the BUS */
+boolean_t
+sbic_disconnected(sbic, csr, pha)
+ register sbic_softc_t sbic;
+ register unsigned char csr, pha;
+
+{
+ register target_info_t *tgt;
+
+ LOG(0x11,"disconnected");
+
+ tgt = sbic->active_target;
+ tgt->flags |= TGT_DISCONNECTED;
+ tgt->transient_state.handler = sbic->error_handler;
+ /* anything else was saved in sbic_err_disconn() */
+
+ PRINT(("{D%d}", tgt->target_id));
+
+ sbic_release_bus(sbic);
+
+ return FALSE;
+}
+
+/* See who reconnected, restore BUS */
+boolean_t
+sbic_reconnect(sbic, csr, ir)
+ register sbic_softc_t sbic;
+ register unsigned char csr, ir;
+
+{
+ register target_info_t *tgt;
+ sbic_padded_regmap_t *regs;
+ int id, pha;
+
+ LOG(0x12,"reconnect");
+ /*
+ * See if this reconnection collided with a selection attempt
+ */
+ if (sbic->state & SBIC_STATE_BUSY)
+ sbic->state |= SBIC_STATE_COLLISION;
+
+ sbic->state |= SBIC_STATE_BUSY;
+
+ /* find tgt */
+ regs = sbic->regs;
+ GET_SBIC_rselid(regs,id);
+
+ id &= 0x7;
+
+ if ((sbic->state & SBIC_STATE_AM) == 0) {
+ /* Must pick the identify */
+ pha = 0x44;
+ } else
+ pha = 0x45;
+
+ tgt = sbic->sc->target[id];
+ if (id > 7 || tgt == 0) panic("sbic_reconnect");
+
+ /* synch things*/
+ SET_SBIC_syn(regs,SBIC_SYN(tgt->sync_offset,tgt->sync_period));
+
+ PRINT(("{R%d}", id));
+ if (sbic->state & SBIC_STATE_COLLISION)
+ PRINT(("[B %d-%d]", sbic->active_target->target_id, id));
+
+ LOG(0x80+id,0);
+
+ sbic->active_target = tgt;
+ tgt->flags &= ~TGT_DISCONNECTED;
+
+ sbic->script = tgt->transient_state.script;
+ sbic->error_handler = tgt->transient_state.handler;
+ sbic->in_count = 0;
+ sbic->out_count = 0;
+
+set counter and setup dma, then
+
+ /* Resume the command now */
+ SET_SBIC_cmd_phase(regs, pha);
+ SET_SBIC_cmd(regs, SBIC_CMD_SEL_XFER);
+
+ return FALSE;
+}
+
+TILL HERE
+
+/*
+ * Error handlers
+ */
+
+/*
+ * Fall-back error handler.
+ */
+sbic_err_generic(sbic, csr, ir)
+ register sbic_softc_t sbic;
+{
+ LOG(0x13,"err_generic");
+
+ /* handle non-existant or powered off devices here */
+ if ((ir == SBIC_INT_DISC) &&
+ (sbic_isa_select(sbic->cmd_was)) &&
+ (SBIC_SS(sbic->ss_was) == 0)) {
+ /* Powered off ? */
+ if (sbic->active_target->flags & TGT_FULLY_PROBED)
+ sbic->active_target->flags = 0;
+ sbic->done = SCSI_RET_DEVICE_DOWN;
+ sbic_end(sbic, csr, ir);
+ return;
+ }
+
+ switch (SCSI_PHASE(csr)) {
+ case SCSI_PHASE_STATUS:
+ if (sbic->script[-1].condition == SCSI_PHASE_STATUS) {
+ /* some are just slow to get out.. */
+ } else
+ sbic_err_to_status(sbic, csr, ir);
+ return;
+ break;
+ case SCSI_PHASE_DATAI:
+ if (sbic->script->condition == SCSI_PHASE_STATUS) {
+/* printf("{P}");*/
+ return;
+ }
+ break;
+ case SCSI_PHASE_DATAO:
+ if (sbic->script->condition == SCSI_PHASE_STATUS) {
+ /*
+ * See comment above. Actually seen on hitachis.
+ */
+/* printf("{P}");*/
+ return;
+ }
+ }
+ gimmeabreak();
+}
+
+/*
+ * Handle disconnections as exceptions
+ */
+sbic_err_disconn(sbic, csr, ir)
+ register sbic_softc_t sbic;
+ register unsigned char csr, ir;
+{
+ register sbic_padded_regmap_t *regs;
+ register target_info_t *tgt;
+ int count;
+ boolean_t callback = FALSE;
+
+ LOG(0x16,"err_disconn");
+
+ if (SCSI_PHASE(csr) != SCSI_PHASE_MSG_IN)
+ return sbic_err_generic(sbic, csr, ir);
+
+ regs = sbic->regs;
+ tgt = sbic->active_target;
+
+ switch (sbic->script->condition) {
+ case SCSI_PHASE_DATAO:
+ LOG(0x1b,"+DATAO");
+ if (sbic->out_count) {
+ register int xferred, offset;
+
+ SBIC_TC_GET(regs,xferred); /* temporary misnomer */
+ xferred += regs->sbic_flags & SBIC_FLAGS_FIFO_CNT;
+ xferred -= sbic->extra_count;
+ xferred = sbic->out_count - xferred; /* ok now */
+ tgt->transient_state.out_count -= xferred;
+ assert(tgt->transient_state.out_count > 0);
+
+ callback = (*sbic->dma_ops->disconn_1)
+ (sbic->dma_state, tgt, xferred);
+
+ } else {
+
+ callback = (*sbic->dma_ops->disconn_2)
+ (sbic->dma_state, tgt);
+
+ }
+ sbic->extra_count = 0;
+ tgt->transient_state.script = sbic_script_restart_data_out;
+ break;
+
+
+ case SCSI_PHASE_DATAI:
+ LOG(0x17,"+DATAI");
+ if (sbic->in_count) {
+ register int offset, xferred;
+
+ SBIC_TC_GET(regs,count);
+ xferred = sbic->in_count - count;
+ assert(xferred > 0);
+
+if (regs->sbic_flags & 0xf)
+printf("{Xf %x,%x,%x}", xferred, sbic->in_count, regs->sbic_flags & SBIC_FLAGS_FIFO_CNT);
+ tgt->transient_state.in_count -= xferred;
+ assert(tgt->transient_state.in_count > 0);
+
+ callback = (*sbic->dma_ops->disconn_3)
+ (sbic->dma_state, tgt, xferred);
+
+ tgt->transient_state.script = sbic_script_restart_data_in;
+ if (tgt->transient_state.in_count == 0)
+ tgt->transient_state.script++;
+
+ }
+ tgt->transient_state.script = sbic->script;
+ break;
+
+ case SCSI_PHASE_STATUS:
+ /* will have to restart dma */
+ SBIC_TC_GET(regs,count);
+ if (sbic->state & SBIC_STATE_DMA_IN) {
+ register int offset, xferred;
+
+ LOG(0x1a,"+STATUS+R");
+
+ xferred = sbic->in_count - count;
+ assert(xferred > 0);
+
+if (regs->sbic_flags & 0xf)
+printf("{Xf %x,%x,%x}", xferred, sbic->in_count, regs->sbic_flags & SBIC_FLAGS_FIFO_CNT);
+ tgt->transient_state.in_count -= xferred;
+/* assert(tgt->transient_state.in_count > 0);*/
+
+ callback = (*sbic->dma_ops->disconn_4)
+ (sbic->dma_state, tgt, xferred);
+
+ tgt->transient_state.script = sbic_script_restart_data_in;
+ if (tgt->transient_state.in_count == 0)
+ tgt->transient_state.script++;
+
+ } else {
+
+ /* add what's left in the fifo */
+ count += (regs->sbic_flags & SBIC_FLAGS_FIFO_CNT);
+ /* take back the extra we might have added */
+ count -= sbic->extra_count;
+ /* ..and drop that idea */
+ sbic->extra_count = 0;
+
+ LOG(0x19,"+STATUS+W");
+
+
+ if ((count == 0) && (tgt->transient_state.out_count == sbic->out_count)) {
+ /* all done */
+ tgt->transient_state.script = sbic->script;
+ tgt->transient_state.out_count = 0;
+ } else {
+ register int xferred, offset;
+
+ /* how much we xferred */
+ xferred = sbic->out_count - count;
+
+ tgt->transient_state.out_count -= xferred;
+ assert(tgt->transient_state.out_count > 0);
+
+ callback = (*sbic->dma_ops->disconn_5)
+ (sbic->dma_state,tgt,xferred);
+
+ tgt->transient_state.script = sbic_script_restart_data_out;
+ }
+ sbic->out_count = 0;
+ }
+ break;
+ default:
+ gimmeabreak();
+ return;
+ }
+ sbic_msg_in(sbic,csr,ir);
+ sbic->script = sbic_script_disconnect;
+ regs->sbic_cmd = SBIC_CMD_XFER_INFO|SBIC_CMD_DMA;
+ if (callback)
+ (*sbic->dma_ops->disconn_callback)(sbic->dma_state, tgt);
+}
+
+/*
+ * Watchdog
+ *
+ * We know that some (name withdrawn) disks get
+ * stuck in the middle of dma phases...
+ */
+sbic_reset_scsibus(sbic)
+ register sbic_softc_t sbic;
+{
+ register target_info_t *tgt = sbic->active_target;
+ register sbic_padded_regmap_t *regs = sbic->regs;
+ register int ir;
+
+ if (scsi_debug && tgt) {
+ int dmalen;
+ SBIC_TC_GET(sbic->regs,dmalen);
+ printf("Target %d was active, cmd x%x in x%x out x%x Sin x%x Sou x%x dmalen x%x\n",
+ tgt->target_id, tgt->cur_cmd,
+ tgt->transient_state.in_count, tgt->transient_state.out_count,
+ sbic->in_count, sbic->out_count,
+ dmalen);
+ }
+ ir = regs->sbic_intr;
+ if ((ir & SBIC_INT_RESEL) && (SCSI_PHASE(regs->sbic_csr) == SCSI_PHASE_MSG_IN)) {
+ /* getting it out of the woods is a bit tricky */
+ spl_t s = splbio();
+
+ (void) sbic_reconnect(sbic, regs->sbic_csr, ir);
+ sbic_wait(regs, SBIC_CSR_INT);
+ ir = regs->sbic_intr;
+ regs->sbic_cmd = SBIC_CMD_MSG_ACPT;
+ splx(s);
+ } else {
+ regs->sbic_cmd = SBIC_CMD_BUS_RESET;
+ delay(35);
+ }
+}
+
+#endif NSBIC > 0
+
+#endif 0
diff --git a/scsi/adapters/scsi_5380.h b/scsi/adapters/scsi_5380.h
new file mode 100644
index 00000000..12be9221
--- /dev/null
+++ b/scsi/adapters/scsi_5380.h
@@ -0,0 +1,126 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * File: scsi_5380.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 5/91
+ *
+ * Defines for the NCR 5380 (SCSI chip), aka Am5380
+ */
+
+/*
+ * Register map
+ */
+
+typedef struct {
+ volatile unsigned char sci_data; /* r: Current data */
+#define sci_odata sci_data /* w: Out data */
+ volatile unsigned char sci_icmd; /* rw: Initiator command */
+ volatile unsigned char sci_mode; /* rw: Mode */
+ volatile unsigned char sci_tcmd; /* rw: Target command */
+ volatile unsigned char sci_bus_csr; /* r: Bus Status */
+#define sci_sel_enb sci_bus_csr /* w: Select enable */
+ volatile unsigned char sci_csr; /* r: Status */
+#define sci_dma_send sci_csr /* w: Start dma send data */
+ volatile unsigned char sci_idata; /* r: Input data */
+#define sci_trecv sci_idata /* w: Start dma receive, target */
+ volatile unsigned char sci_iack; /* r: Interrupt Acknowledge */
+#define sci_irecv sci_iack /* w: Start dma receive, initiator */
+} sci_regmap_t;
+
+
+/*
+ * Initiator command register
+ */
+
+#define SCI_ICMD_DATA 0x01 /* rw: Assert data bus */
+#define SCI_ICMD_ATN 0x02 /* rw: Assert ATN signal */
+#define SCI_ICMD_SEL 0x04 /* rw: Assert SEL signal */
+#define SCI_ICMD_BSY 0x08 /* rw: Assert BSY signal */
+#define SCI_ICMD_ACK 0x10 /* rw: Assert ACK signal */
+#define SCI_ICMD_LST 0x20 /* r: Lost arbitration */
+#define SCI_ICMD_DIFF SCI_ICMD_LST /* w: Differential cable */
+#define SCI_ICMD_AIP 0x40 /* r: Arbitration in progress */
+#define SCI_ICMD_TEST SCI_ICMD_AIP /* w: Test mode */
+#define SCI_ICMD_RST 0x80 /* rw: Assert RST signal */
+
+
+/*
+ * Mode register
+ */
+
+#define SCI_MODE_ARB 0x01 /* rw: Start arbitration */
+#define SCI_MODE_DMA 0x02 /* rw: Enable DMA xfers */
+#define SCI_MODE_MONBSY 0x04 /* rw: Monitor BSY signal */
+#define SCI_MODE_DMA_IE 0x08 /* rw: Enable DMA complete interrupt */
+#define SCI_MODE_PERR_IE 0x10 /* rw: Interrupt on parity errors */
+#define SCI_MODE_PAR_CHK 0x20 /* rw: Check parity */
+#define SCI_MODE_TARGET 0x40 /* rw: Target mode (Initiator if 0) */
+#define SCI_MODE_BLOCKDMA 0x80 /* rw: Block-mode DMA handshake (MBZ) */
+
+
+/*
+ * Target command register
+ */
+
+#define SCI_TCMD_IO 0x01 /* rw: Assert I/O signal */
+#define SCI_TCMD_CD 0x02 /* rw: Assert C/D signal */
+#define SCI_TCMD_MSG 0x04 /* rw: Assert MSG signal */
+#define SCI_TCMD_PHASE_MASK 0x07 /* r: Mask for current bus phase */
+#define SCI_TCMD_REQ 0x08 /* rw: Assert REQ signal */
+#define SCI_TCMD_LAST_SENT 0x80 /* ro: Last byte was xferred
+ * (not on 5380/1) */
+
+#define SCI_PHASE(x) SCSI_PHASE(x)
+
+/*
+ * Current (SCSI) Bus status
+ */
+
+#define SCI_BUS_DBP 0x01 /* r: Data Bus parity */
+#define SCI_BUS_SEL 0x02 /* r: SEL signal */
+#define SCI_BUS_IO 0x04 /* r: I/O signal */
+#define SCI_BUS_CD 0x08 /* r: C/D signal */
+#define SCI_BUS_MSG 0x10 /* r: MSG signal */
+#define SCI_BUS_REQ 0x20 /* r: REQ signal */
+#define SCI_BUS_BSY 0x40 /* r: BSY signal */
+#define SCI_BUS_RST 0x80 /* r: RST signal */
+
+#define SCI_CUR_PHASE(x) SCSI_PHASE((x)>>2)
+
+/*
+ * Bus and Status register
+ */
+
+#define SCI_CSR_ACK 0x01 /* r: ACK signal */
+#define SCI_CSR_ATN 0x02 /* r: ATN signal */
+#define SCI_CSR_DISC 0x04 /* r: Disconnected (BSY==0) */
+#define SCI_CSR_PHASE_MATCH 0x08 /* r: Bus and SCI_TCMD match */
+#define SCI_CSR_INT 0x10 /* r: Interrupt request */
+#define SCI_CSR_PERR 0x20 /* r: Parity error */
+#define SCI_CSR_DREQ 0x40 /* r: DMA request */
+#define SCI_CSR_DONE 0x80 /* r: DMA count is zero */
+
diff --git a/scsi/adapters/scsi_5380_hdw.c b/scsi/adapters/scsi_5380_hdw.c
new file mode 100644
index 00000000..2fc7d893
--- /dev/null
+++ b/scsi/adapters/scsi_5380_hdw.c
@@ -0,0 +1,2423 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * File: scsi_5380_hdw.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 4/91
+ *
+ * Bottom layer of the SCSI driver: chip-dependent functions
+ *
+ * This file contains the code that is specific to the NCR 5380
+ * SCSI chip (Host Bus Adapter in SCSI parlance): probing, start
+ * operation, and interrupt routine.
+ */
+
+/*
+ * This layer works based on small simple 'scripts' that are installed
+ * at the start of the command and drive the chip to completion.
+ * The idea comes from the specs of the NCR 53C700 'script' processor.
+ *
+ * There are various reasons for this, mainly
+ * - Performance: identify the common (successful) path, and follow it;
+ * at interrupt time no code is needed to find the current status
+ * - Code size: it should be easy to compact common operations
+ * - Adaptability: the code skeleton should adapt to different chips without
+ * terrible complications.
+ * - Error handling: and it is easy to modify the actions performed
+ * by the scripts to cope with strange but well identified sequences
+ *
+ */
+
+#include <sci.h>
+#if NSCI > 0
+#include <platforms.h>
+
+#include <mach/std_types.h>
+#include <sys/types.h>
+#include <chips/busses.h>
+#include <scsi/compat_30.h>
+#include <machine/machspl.h>
+
+#include <sys/syslog.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi2.h>
+#include <scsi/scsi_defs.h>
+
+#ifdef VAXSTATION
+#define PAD(n) char n[3]
+#endif
+
+#include <scsi/adapters/scsi_5380.h>
+
+#ifdef PAD
+typedef struct {
+ volatile unsigned char sci_data; /* r: Current data */
+/*#define sci_odata sci_data /* w: Out data */
+ PAD(pad0);
+
+ volatile unsigned char sci_icmd; /* rw: Initiator command */
+ PAD(pad1);
+
+ volatile unsigned char sci_mode; /* rw: Mode */
+ PAD(pad2);
+
+ volatile unsigned char sci_tcmd; /* rw: Target command */
+ PAD(pad3);
+
+ volatile unsigned char sci_bus_csr; /* r: Bus Status */
+/*#define sci_sel_enb sci_bus_csr /* w: Select enable */
+ PAD(pad4);
+
+ volatile unsigned char sci_csr; /* r: Status */
+/*#define sci_dma_send sci_csr /* w: Start dma send data */
+ PAD(pad5);
+
+ volatile unsigned char sci_idata; /* r: Input data */
+/*#define sci_trecv sci_idata /* w: Start dma receive, target */
+ PAD(pad6);
+
+ volatile unsigned char sci_iack; /* r: Interrupt Acknowledge */
+/*#define sci_irecv sci_iack /* w: Start dma receive, initiator */
+ PAD(pad7);
+
+} sci_padded_regmap_t;
+#else
+typedef sci_regmap_t sci_padded_regmap_t;
+#endif
+
+#ifdef VAXSTATION
+#define check_memory(addr,dow) ((dow) ? wbadaddr(addr,4) : badaddr(addr,4))
+
+/* vax3100 */
+#include <chips/vs42x_rb.h>
+#define STC_5380_A VAX3100_STC_5380_A
+#define STC_5380_B VAX3100_STC_5380_B
+#define STC_DMAREG_OFF VAX3100_STC_DMAREG_OFF
+
+static int mem; /* mem++ seems to take approx 0.34 usecs */
+#define delay_1p2_us() {mem++;mem++;mem++;mem++;}
+#define my_scsi_id(ctlr) (ka3100_scsi_id((ctlr)))
+#endif /* VAXSTATION */
+
+
+#ifndef STC_5380_A /* cross compile check */
+typedef struct {
+ int sci_dma_dir, sci_dma_adr;
+} *sci_dmaregs_t;
+#define STC_DMAREG_OFF 0
+#define SCI_DMA_DIR_WRITE 0
+#define SCI_DMA_DIR_READ 1
+#define STC_5380_A 0
+#define STC_5380_B 0x100
+#define SCI_RAM_SIZE 0x10000
+#endif
+
+/*
+ * The 5380 can't tell you the scsi ID it uses, so
+ * unless there is another way use the defaults
+ */
+#ifndef my_scsi_id
+#define my_scsi_id(ctlr) (scsi_initiator_id[(ctlr)])
+#endif
+
+/*
+ * Statically partition the DMA buffer between targets.
+ * This way we will eventually be able to attach/detach
+ * drives on-fly. And 18k/target is enough.
+ */
+#define PER_TGT_DMA_SIZE ((SCI_RAM_SIZE/7) & ~(sizeof(int)-1))
+
+/*
+ * Round to 4k to make debug easier
+ */
+#define PER_TGT_BUFF_SIZE ((PER_TGT_DMA_SIZE >> 12) << 12)
+#define PER_TGT_BURST_SIZE (PER_TGT_BUFF_SIZE>>1)
+
+/*
+ * Macros to make certain things a little more readable
+ */
+
+#define SCI_ACK(ptr,phase) (ptr)->sci_tcmd = (phase)
+#define SCI_CLR_INTR(regs) {register int temp = regs->sci_iack;}
+
+
+/*
+ * A script has a two parts: a pre-condition and an action.
+ * The first triggers error handling if not satisfied and in
+ * our case it is formed by the current bus phase and connected
+ * condition as per bus status bits. The action part is just a
+ * function pointer, invoked in a standard way. The script
+ * pointer is advanced only if the action routine returns TRUE.
+ * See sci_intr() for how and where this is all done.
+ */
+
+typedef struct script {
+ int condition; /* expected state at interrupt */
+ int (*action)(); /* action routine */
+} *script_t;
+
+#define SCRIPT_MATCH(cs,bs) (((bs)&SCI_BUS_BSY)|SCI_CUR_PHASE((bs)))
+
+#define SCI_PHASE_DISC 0x0 /* sort of .. */
+
+
+/* forward decls of script actions */
+boolean_t
+ sci_dosynch(), /* negotiate synch xfer */
+ sci_dma_in(), /* get data from target via dma */
+ sci_dma_out(), /* send data to target via dma */
+ sci_get_status(), /* get status from target */
+ sci_end_transaction(), /* all come to an end */
+ sci_msg_in(), /* get disconnect message(s) */
+ sci_disconnected(); /* current target disconnected */
+/* forward decls of error handlers */
+boolean_t
+ sci_err_generic(), /* generic error handler */
+ sci_err_disconn(), /* when a target disconnects */
+ gimmeabreak(); /* drop into the debugger */
+
+int sci_reset_scsibus();
+boolean_t sci_probe_target();
+
+scsi_ret_t sci_select_target();
+
+#ifdef VAXSTATION
+/*
+ * This should be somewhere else, and it was a
+ * mistake to share this buffer across SCSIs.
+ */
+struct dmabuffer {
+ volatile char *base;
+ char *sbrk;
+} dmab[1];
+
+volatile char *
+sci_buffer_base(unit)
+{
+ return dmab[unit].base;
+}
+
+sci_buffer_init(dmar, ram)
+ sci_dmaregs_t dmar;
+ volatile char *ram;
+{
+ dmar->sci_dma_rammode = SCI_RAM_EXPMODE;
+ dmab[0].base = dmab[0].sbrk = (char *) ram;
+ blkclr((char *) ram, SCI_RAM_SIZE);
+}
+char *
+sci_buffer_sbrk(size)
+{
+ char *ret = dmab[0].sbrk;
+
+ dmab[0].sbrk += size;
+ if ((dmab[0].sbrk - dmab[0].base) > SCI_RAM_SIZE)
+ panic("scialloc");
+ return ret;
+}
+
+#endif /* VAXSTATION */
+
+/*
+ * State descriptor for this layer. There is one such structure
+ * per (enabled) 5380 interface
+ */
+struct sci_softc {
+ watchdog_t wd;
+ sci_padded_regmap_t *regs; /* 5380 registers */
+ sci_dmaregs_t dmar; /* DMA controller registers */
+ volatile char *buff; /* DMA buffer memory (I/O space) */
+ script_t script;
+ int (*error_handler)();
+ int in_count; /* amnt we expect to receive */
+ int out_count; /* amnt we are going to ship */
+
+ volatile char state;
+#define SCI_STATE_BUSY 0x01 /* selecting or currently connected */
+#define SCI_STATE_TARGET 0x04 /* currently selected as target */
+#define SCI_STATE_COLLISION 0x08 /* lost selection attempt */
+#define SCI_STATE_DMA_IN 0x10 /* tgt --> initiator xfer */
+
+ unsigned char ntargets; /* how many alive on this scsibus */
+ unsigned char done;
+ unsigned char extra_byte;
+
+ scsi_softc_t *sc;
+ target_info_t *active_target;
+
+ target_info_t *next_target; /* trying to seize bus */
+ queue_head_t waiting_targets;/* other targets competing for bus */
+
+} sci_softc_data[NSCI];
+
+typedef struct sci_softc *sci_softc_t;
+
+sci_softc_t sci_softc[NSCI];
+
+/*
+ * Definition of the controller for the auto-configuration program.
+ */
+
+int sci_probe(), scsi_slave(), sci_go(), sci_intr();
+void scsi_attach();
+
+vm_offset_t sci_std[NSCI] = { 0 };
+struct bus_device *sci_dinfo[NSCI*8];
+struct bus_ctlr *sci_minfo[NSCI];
+struct bus_driver sci_driver =
+ { sci_probe, scsi_slave, scsi_attach, sci_go, sci_std, "rz", sci_dinfo,
+ "sci", sci_minfo, BUS_INTR_B4_PROBE};
+
+/*
+ * Scripts
+ */
+struct script
+sci_script_data_in[] = {
+ { SCSI_PHASE_DATAI|SCI_BUS_BSY, sci_dma_in},
+ { SCSI_PHASE_STATUS|SCI_BUS_BSY, sci_get_status},
+ { SCSI_PHASE_MSG_IN|SCI_BUS_BSY, sci_end_transaction}
+},
+
+sci_script_data_out[] = {
+ { SCSI_PHASE_DATAO|SCI_BUS_BSY, sci_dma_out},
+ { SCSI_PHASE_STATUS|SCI_BUS_BSY, sci_get_status},
+ { SCSI_PHASE_MSG_IN|SCI_BUS_BSY, sci_end_transaction}
+},
+
+sci_script_cmd[] = {
+ { SCSI_PHASE_STATUS|SCI_BUS_BSY, sci_get_status},
+ { SCSI_PHASE_MSG_IN|SCI_BUS_BSY, sci_end_transaction}
+},
+
+/* Synchronous transfer neg(oti)ation */
+
+sci_script_try_synch[] = {
+ { SCSI_PHASE_MSG_OUT|SCI_BUS_BSY, sci_dosynch}
+},
+
+/* Disconnect sequence */
+
+sci_script_disconnect[] = {
+ { SCI_PHASE_DISC, sci_disconnected}
+};
+
+
+
+#define u_min(a,b) (((a) < (b)) ? (a) : (b))
+
+
+#define DEBUG
+#ifdef DEBUG
+
+sci_state(base)
+ vm_offset_t base;
+{
+ sci_padded_regmap_t *regs;
+ sci_dmaregs_t dmar;
+ extern char *sci;
+ unsigned dmadr;
+ int cnt, i;
+
+ if (base == 0)
+ base = (vm_offset_t)sci;
+
+ for (i = 0; i < 2; i++) {
+ regs = (sci_padded_regmap_t*) (base +
+ (i ? STC_5380_B : STC_5380_A));
+ dmar = (sci_dmaregs_t) ((char*)regs + STC_DMAREG_OFF);
+ SCI_DMADR_GET(dmar,dmadr);
+ SCI_TC_GET(dmar,cnt);
+
+ db_printf("scsi%d: ph %x (sb %x), mode %x, tph %x, csr %x, cmd %x, ",
+ i,
+ (unsigned) SCI_CUR_PHASE(regs->sci_bus_csr),
+ (unsigned) regs->sci_bus_csr,
+ (unsigned) regs->sci_mode,
+ (unsigned) regs->sci_tcmd,
+ (unsigned) regs->sci_csr,
+ (unsigned) regs->sci_icmd);
+ db_printf("dma%c %x @ %x\n",
+ (dmar->sci_dma_dir) ? 'I' : 'O', cnt, dmadr);
+ }
+ return 0;
+}
+sci_target_state(tgt)
+ target_info_t *tgt;
+{
+ if (tgt == 0)
+ tgt = sci_softc[0]->active_target;
+ if (tgt == 0)
+ return 0;
+ db_printf("fl %x dma %x+%x cmd %x id %x per %x off %x ior %x ret %x\n",
+ tgt->flags, tgt->dma_ptr, tgt->transient_state.dma_offset,
+ tgt->cmd_ptr, tgt->target_id, tgt->sync_period, tgt->sync_offset,
+ tgt->ior, tgt->done);
+ if (tgt->flags & TGT_DISCONNECTED){
+ script_t spt;
+
+ spt = tgt->transient_state.script;
+ db_printf("disconnected at ");
+ db_printsym(spt,1);
+ db_printf(": %x ", spt->condition);
+ db_printsym(spt->action,1);
+ db_printf(", ");
+ db_printsym(tgt->transient_state.handler, 1);
+ db_printf("\n");
+ }
+
+ return 0;
+}
+
+sci_all_targets(unit)
+{
+ int i;
+ target_info_t *tgt;
+ for (i = 0; i < 8; i++) {
+ tgt = sci_softc[unit]->sc->target[i];
+ if (tgt)
+ sci_target_state(tgt);
+ }
+}
+
+sci_script_state(unit)
+{
+ script_t spt = sci_softc[unit]->script;
+
+ if (spt == 0) return 0;
+ db_printsym(spt,1);
+ db_printf(": %x ", spt->condition);
+ db_printsym(spt->action,1);
+ db_printf(", ");
+ db_printsym(sci_softc[unit]->error_handler, 1);
+ return 0;
+
+}
+
+#define PRINT(x) if (scsi_debug) printf x
+
+#define TRMAX 200
+int tr[TRMAX+3];
+int trpt, trpthi;
+#define TR(x) tr[trpt++] = x
+#define TRWRAP trpthi = trpt; trpt = 0;
+#define TRCHECK if (trpt > TRMAX) {TRWRAP}
+
+#define TRACE
+
+#ifdef TRACE
+
+#define LOGSIZE 256
+int sci_logpt;
+char sci_log[LOGSIZE];
+
+#define MAXLOG_VALUE 0x24
+struct {
+ char *name;
+ unsigned int count;
+} logtbl[MAXLOG_VALUE];
+
+static LOG(e,f)
+ char *f;
+{
+ sci_log[sci_logpt++] = (e);
+ if (sci_logpt == LOGSIZE) sci_logpt = 0;
+ if ((e) < MAXLOG_VALUE) {
+ logtbl[(e)].name = (f);
+ logtbl[(e)].count++;
+ }
+}
+
+sci_print_log(skip)
+ int skip;
+{
+ register int i, j;
+ register unsigned char c;
+
+ for (i = 0, j = sci_logpt; i < LOGSIZE; i++) {
+ c = sci_log[j];
+ if (++j == LOGSIZE) j = 0;
+ if (skip-- > 0)
+ continue;
+ if (c < MAXLOG_VALUE)
+ db_printf(" %s", logtbl[c].name);
+ else
+ db_printf("-%d", c & 0x7f);
+ }
+ db_printf("\n");
+ return 0;
+}
+
+sci_print_stat()
+{
+ register int i;
+ register char *p;
+ for (i = 0; i < MAXLOG_VALUE; i++) {
+ if (p = logtbl[i].name)
+ printf("%d %s\n", logtbl[i].count, p);
+ }
+}
+
+#else /* TRACE */
+#define LOG(e,f)
+#endif /* TRACE */
+
+#else /* DEBUG */
+#define PRINT(x)
+#define LOG(e,f)
+#define TR(x)
+#define TRCHECK
+#define TRWRAP
+#endif /* DEBUG */
+
+
+/*
+ * Probe/Slave/Attach functions
+ */
+
+/*
+ * Probe routine:
+ * Should find out (a) if the controller is
+ * present and (b) which/where slaves are present.
+ *
+ * Implementation:
+ * Send an identify msg to each possible target on the bus
+ * except of course ourselves.
+ */
+sci_probe(reg, ui)
+ char *reg;
+ struct bus_ctlr *ui;
+{
+ int unit = ui->unit;
+ sci_softc_t sci = &sci_softc_data[unit];
+ int target_id, i;
+ scsi_softc_t *sc;
+ register sci_padded_regmap_t *regs;
+ spl_t s;
+ boolean_t did_banner = FALSE;
+ char *cmd_ptr;
+ static char *here = "sci_probe";
+
+ /*
+ * We are only called if the chip is there,
+ * but make sure anyways..
+ */
+ regs = (sci_padded_regmap_t *) (reg);
+ if (check_memory(regs, 0))
+ return 0;
+
+#if notyet
+ /* Mappable version side */
+ SCI_probe(reg, ui);
+#endif
+
+ /*
+ * Initialize hw descriptor
+ */
+ sci_softc[unit] = sci;
+ sci->regs = regs;
+ sci->dmar = (sci_dmaregs_t)(reg + STC_DMAREG_OFF);
+ sci->buff = sci_buffer_base(0);
+
+ queue_init(&sci->waiting_targets);
+
+ sc = scsi_master_alloc(unit, sci);
+ sci->sc = sc;
+
+ sc->go = sci_go;
+ sc->probe = sci_probe_target;
+ sc->watchdog = scsi_watchdog;
+ sci->wd.reset = sci_reset_scsibus;
+
+#ifdef MACH_KERNEL
+ sc->max_dma_data = -1; /* unlimited */
+#else
+ sc->max_dma_data = scsi_per_target_virtual;
+#endif
+
+ scsi_might_disconnect[unit] = 0; /* still true */
+
+ /*
+ * Reset chip
+ */
+ s = splbio();
+ sci_reset(sci, TRUE);
+ SCI_CLR_INTR(regs);
+
+ /*
+ * Our SCSI id on the bus.
+ */
+
+ sc->initiator_id = my_scsi_id(unit);
+ printf("%s%d: my SCSI id is %d", ui->name, unit, sc->initiator_id);
+
+ /*
+ * For all possible targets, see if there is one and allocate
+ * a descriptor for it if it is there.
+ */
+ cmd_ptr = sci_buffer_sbrk(0);
+ for (target_id = 0; target_id < 8; target_id++) {
+
+ register unsigned csr, dsr;
+ scsi_status_byte_t status;
+
+ /* except of course ourselves */
+ if (target_id == sc->initiator_id)
+ continue;
+
+ if (sci_select_target( regs, sc->initiator_id, target_id, FALSE) == SCSI_RET_DEVICE_DOWN) {
+ SCI_CLR_INTR(regs);
+ continue;
+ }
+
+ printf(",%s%d", did_banner++ ? " " : " target(s) at ",
+ target_id);
+
+ /* should be command phase here: we selected wo ATN! */
+ while (SCI_CUR_PHASE(regs->sci_bus_csr) != SCSI_PHASE_CMD)
+ ;
+
+ SCI_ACK(regs,SCSI_PHASE_CMD);
+
+ /* build command in dma area */
+ {
+ unsigned char *p = (unsigned char*) cmd_ptr;
+
+ p[0] = SCSI_CMD_TEST_UNIT_READY;
+ p[1] =
+ p[2] =
+ p[3] =
+ p[4] =
+ p[5] = 0;
+ }
+
+ sci_data_out(regs, SCSI_PHASE_CMD, 6, cmd_ptr);
+
+ while (SCI_CUR_PHASE(regs->sci_bus_csr) != SCSI_PHASE_STATUS)
+ ;
+
+ SCI_ACK(regs,SCSI_PHASE_STATUS);
+
+ sci_data_in(regs, SCSI_PHASE_STATUS, 1, &status.bits);
+
+ if (status.st.scsi_status_code != SCSI_ST_GOOD)
+ scsi_error( 0, SCSI_ERR_STATUS, status.bits, 0);
+
+ /* get cmd_complete message */
+ while (SCI_CUR_PHASE(regs->sci_bus_csr) != SCSI_PHASE_MSG_IN)
+ ;
+
+ SCI_ACK(regs,SCSI_PHASE_MSG_IN);
+
+ sci_data_in(regs, SCSI_PHASE_MSG_IN, 1, &i);
+
+ /* check disconnected, clear all intr bits */
+ while (regs->sci_bus_csr & SCI_BUS_BSY)
+ ;
+ SCI_ACK(regs,SCI_PHASE_DISC);
+
+ SCI_CLR_INTR(regs);
+
+ /* ... */
+
+ /*
+ * Found a target
+ */
+ sci->ntargets++;
+ {
+ register target_info_t *tgt;
+
+ tgt = scsi_slave_alloc(unit, target_id, sci);
+
+ /* "virtual" address for our use */
+ tgt->cmd_ptr = sci_buffer_sbrk(PER_TGT_DMA_SIZE);
+ /* "physical" address for dma engine */
+ tgt->dma_ptr = (char*)(tgt->cmd_ptr - sci->buff);
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+ fdma_init(&tgt->fdma, scsi_per_target_virtual);
+#endif /*MACH_KERNEL*/
+ }
+ }
+ printf(".\n");
+
+ splx(s);
+ return 1;
+}
+
+boolean_t
+sci_probe_target(tgt, ior)
+ target_info_t *tgt;
+ io_req_t ior;
+{
+ sci_softc_t sci = sci_softc[tgt->masterno];
+ boolean_t newlywed;
+
+ newlywed = (tgt->cmd_ptr == 0);
+ if (newlywed) {
+ /* desc was allocated afresh */
+
+ /* "virtual" address for our use */
+ tgt->cmd_ptr = sci_buffer_sbrk(PER_TGT_DMA_SIZE);
+ /* "physical" address for dma engine */
+ tgt->dma_ptr = (char*)(tgt->cmd_ptr - sci->buff);
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+ fdma_init(&tgt->fdma, scsi_per_target_virtual);
+#endif /*MACH_KERNEL*/
+
+ }
+
+ if (scsi_inquiry(tgt, SCSI_INQ_STD_DATA) == SCSI_RET_DEVICE_DOWN)
+ return FALSE;
+
+ tgt->flags = TGT_ALIVE;
+ return TRUE;
+}
+
+
+static sci_wait(preg, until)
+ volatile unsigned char *preg;
+{
+ int timeo = 1000000;
+ /* read it over to avoid bus glitches */
+ while ( ((*preg & until) != until) ||
+ ((*preg & until) != until) ||
+ ((*preg & until) != until)) {
+ delay(1);
+ if (!timeo--) {
+ printf("sci_wait TIMEO with x%x\n", *preg);
+ break;
+ }
+ }
+ return *preg;
+}
+
+scsi_ret_t
+sci_select_target(regs, myid, id, with_atn)
+ register sci_padded_regmap_t *regs;
+ unsigned char myid, id;
+ boolean_t with_atn;
+{
+ register unsigned char bid, icmd;
+ scsi_ret_t ret = SCSI_RET_RETRY;
+
+ if ((regs->sci_bus_csr & (SCI_BUS_BSY|SCI_BUS_SEL)) &&
+ (regs->sci_bus_csr & (SCI_BUS_BSY|SCI_BUS_SEL)) &&
+ (regs->sci_bus_csr & (SCI_BUS_BSY|SCI_BUS_SEL)))
+ return ret;
+
+ /* for our purposes.. */
+ myid = 1 << myid;
+ id = 1 << id;
+
+ regs->sci_sel_enb = myid; /* if not there already */
+
+ regs->sci_odata = myid;
+ regs->sci_mode |= SCI_MODE_ARB;
+ /* AIP might not set if BSY went true after we checked */
+ for (bid = 0; bid < 20; bid++) /* 20usec circa */
+ if (regs->sci_icmd & SCI_ICMD_AIP)
+ break;
+ if ((regs->sci_icmd & SCI_ICMD_AIP) == 0) {
+ goto lost;
+ }
+
+ delay(2); /* 2.2us arb delay */
+
+ if (regs->sci_icmd & SCI_ICMD_LST) {
+ goto lost;
+ }
+
+ regs->sci_mode &= ~SCI_MODE_PAR_CHK;
+ bid = regs->sci_data;
+
+ if ((bid & ~myid) > myid) {
+ goto lost;
+ }
+ if (regs->sci_icmd & SCI_ICMD_LST) {
+ goto lost;
+ }
+
+ /* Won arbitration, enter selection phase now */
+ icmd = regs->sci_icmd & ~(SCI_ICMD_DIFF|SCI_ICMD_TEST);
+ icmd |= (with_atn ? (SCI_ICMD_SEL|SCI_ICMD_ATN) : SCI_ICMD_SEL);
+ regs->sci_icmd = icmd;
+
+ if (regs->sci_icmd & SCI_ICMD_LST) {
+ goto nosel;
+ }
+
+ /* XXX a target that violates specs might still drive the bus XXX */
+ /* XXX should put our id out, and after the delay check nothi XXX */
+ /* XXX ng else is out there. XXX */
+
+ delay_1p2_us();
+
+ regs->sci_sel_enb = 0;
+
+ regs->sci_odata = myid | id;
+
+ icmd |= SCI_ICMD_BSY|SCI_ICMD_DATA;
+ regs->sci_icmd = icmd;
+
+ regs->sci_mode &= ~SCI_MODE_ARB; /* 2 deskew delays, too */
+
+ icmd &= ~SCI_ICMD_BSY;
+ regs->sci_icmd = icmd;
+
+ /* bus settle delay, 400ns */
+ delay(0); /* too much ? */
+
+ regs->sci_mode |= SCI_MODE_PAR_CHK;
+
+ {
+ register int timeo = 2500;/* 250 msecs in 100 usecs chunks */
+ while ((regs->sci_bus_csr & SCI_BUS_BSY) == 0)
+ if (--timeo > 0)
+ delay(100);
+ else {
+ goto nodev;
+ }
+ }
+
+ icmd &= ~(SCI_ICMD_DATA|SCI_ICMD_SEL);
+ regs->sci_icmd = icmd;
+/* regs->sci_sel_enb = myid;*/ /* looks like we should NOT have it */
+ return SCSI_RET_SUCCESS;
+nodev:
+ ret = SCSI_RET_DEVICE_DOWN;
+ regs->sci_sel_enb = myid;
+nosel:
+ icmd &= ~(SCI_ICMD_DATA|SCI_ICMD_SEL|SCI_ICMD_ATN);
+ regs->sci_icmd = icmd;
+lost:
+ bid = regs->sci_mode;
+ bid &= ~SCI_MODE_ARB;
+ bid |= SCI_MODE_PAR_CHK;
+ regs->sci_mode = bid;
+
+ return ret;
+}
+
+sci_data_out(regs, phase, count, data)
+ register sci_padded_regmap_t *regs;
+ unsigned char *data;
+{
+ register unsigned char icmd;
+
+ /* ..checks.. */
+
+ icmd = regs->sci_icmd & ~(SCI_ICMD_DIFF|SCI_ICMD_TEST);
+loop:
+ if (SCI_CUR_PHASE(regs->sci_bus_csr) != phase)
+ return count;
+
+ while ( ((regs->sci_bus_csr & SCI_BUS_REQ) == 0) &&
+ ((regs->sci_bus_csr & SCI_BUS_REQ) == 0) &&
+ ((regs->sci_bus_csr & SCI_BUS_REQ) == 0))
+ ;
+ icmd |= SCI_ICMD_DATA;
+ regs->sci_icmd = icmd;
+ regs->sci_odata = *data++;
+ icmd |= SCI_ICMD_ACK;
+ regs->sci_icmd = icmd;
+
+ icmd &= ~(SCI_ICMD_DATA|SCI_ICMD_ACK);
+ while ( (regs->sci_bus_csr & SCI_BUS_REQ) &&
+ (regs->sci_bus_csr & SCI_BUS_REQ) &&
+ (regs->sci_bus_csr & SCI_BUS_REQ))
+ ;
+ regs->sci_icmd = icmd;
+ if (--count > 0)
+ goto loop;
+ return 0;
+}
+
+sci_data_in(regs, phase, count, data)
+ register sci_padded_regmap_t *regs;
+ unsigned char *data;
+{
+ register unsigned char icmd;
+
+ /* ..checks.. */
+
+ icmd = regs->sci_icmd & ~(SCI_ICMD_DIFF|SCI_ICMD_TEST);
+loop:
+ if (SCI_CUR_PHASE(regs->sci_bus_csr) != phase)
+ return count;
+
+ while ( ((regs->sci_bus_csr & SCI_BUS_REQ) == 0) &&
+ ((regs->sci_bus_csr & SCI_BUS_REQ) == 0) &&
+ ((regs->sci_bus_csr & SCI_BUS_REQ) == 0))
+ ;
+ *data++ = regs->sci_data;
+ icmd |= SCI_ICMD_ACK;
+ regs->sci_icmd = icmd;
+
+ icmd &= ~SCI_ICMD_ACK;
+ while ( (regs->sci_bus_csr & SCI_BUS_REQ) &&
+ (regs->sci_bus_csr & SCI_BUS_REQ) &&
+ (regs->sci_bus_csr & SCI_BUS_REQ))
+ ;
+ regs->sci_icmd = icmd;
+ if (--count > 0)
+ goto loop;
+ return 0;
+
+}
+
+sci_reset(sci, quickly)
+ sci_softc_t sci;
+ boolean_t quickly;
+{
+ register sci_padded_regmap_t *regs = sci->regs;
+ register sci_dmaregs_t dma = sci->dmar;
+ int dummy;
+
+ regs->sci_icmd = SCI_ICMD_TEST; /* don't drive outputs */
+ regs->sci_icmd = SCI_ICMD_TEST|SCI_ICMD_RST;
+ delay(25);
+ regs->sci_icmd = 0;
+
+ regs->sci_mode = SCI_MODE_PAR_CHK|SCI_MODE_PERR_IE;
+ regs->sci_tcmd = SCI_PHASE_DISC; /* make sure we do not miss transition */
+ regs->sci_sel_enb = 0;
+
+ /* idle the dma controller */
+ dma->sci_dma_adr = 0;
+ dma->sci_dma_dir = SCI_DMA_DIR_WRITE;
+ SCI_TC_PUT(dma,0);
+
+ /* clear interrupt (two might be queued?) */
+ SCI_CLR_INTR(regs);
+ SCI_CLR_INTR(regs);
+
+ if (quickly)
+ return;
+
+ /*
+ * reset the scsi bus, the interrupt routine does the rest
+ * or you can call sci_bus_reset().
+ */
+ regs->sci_icmd = SCI_ICMD_RST;
+
+}
+
+/*
+ * Operational functions
+ */
+
+/*
+ * Start a SCSI command on a target
+ */
+sci_go(tgt, cmd_count, in_count, cmd_only)
+ target_info_t *tgt;
+ boolean_t cmd_only;
+{
+ sci_softc_t sci;
+ register spl_t s;
+ boolean_t disconn;
+ script_t scp;
+ boolean_t (*handler)();
+
+ LOG(1,"go");
+
+ sci = (sci_softc_t)tgt->hw_state;
+
+ /*
+ * We cannot do real DMA.
+ */
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+ if (tgt->ior)
+ fdma_map(&tgt->fdma, tgt->ior);
+#endif /*MACH_KERNEL*/
+
+ if ((tgt->cur_cmd == SCSI_CMD_WRITE) ||
+ (tgt->cur_cmd == SCSI_CMD_LONG_WRITE)){
+ io_req_t ior = tgt->ior;
+ register int len = ior->io_count;
+
+ tgt->transient_state.out_count = len;
+
+ if (len > PER_TGT_BUFF_SIZE)
+ len = PER_TGT_BUFF_SIZE;
+ bcopy( ior->io_data,
+ tgt->cmd_ptr + cmd_count,
+ len);
+ tgt->transient_state.copy_count = len;
+
+ /* avoid leaks */
+ if (len < tgt->block_size) {
+ bzero( tgt->cmd_ptr + cmd_count + len,
+ tgt->block_size - len);
+ tgt->transient_state.out_count = tgt->block_size;
+ }
+ } else {
+ tgt->transient_state.out_count = 0;
+ tgt->transient_state.copy_count = 0;
+ }
+
+ tgt->transient_state.cmd_count = cmd_count;
+
+ disconn = BGET(scsi_might_disconnect,tgt->masterno,tgt->target_id);
+ disconn = disconn && (sci->ntargets > 1);
+ disconn |= BGET(scsi_should_disconnect,tgt->masterno,tgt->target_id);
+
+ /*
+ * Setup target state
+ */
+ tgt->done = SCSI_RET_IN_PROGRESS;
+
+ handler = (disconn) ? sci_err_disconn : sci_err_generic;
+
+ switch (tgt->cur_cmd) {
+ case SCSI_CMD_READ:
+ case SCSI_CMD_LONG_READ:
+ LOG(0x13,"readop");
+ scp = sci_script_data_in;
+ break;
+ case SCSI_CMD_WRITE:
+ case SCSI_CMD_LONG_WRITE:
+ LOG(0x14,"writeop");
+ scp = sci_script_data_out;
+ break;
+ case SCSI_CMD_INQUIRY:
+ /* This is likely the first thing out:
+ do the synch neg if so */
+ if (!cmd_only && ((tgt->flags&TGT_DID_SYNCH)==0)) {
+ scp = sci_script_try_synch;
+ tgt->flags |= TGT_TRY_SYNCH;
+ break;
+ }
+ case SCSI_CMD_REQUEST_SENSE:
+ case SCSI_CMD_MODE_SENSE:
+ case SCSI_CMD_RECEIVE_DIAG_RESULTS:
+ case SCSI_CMD_READ_CAPACITY:
+ case SCSI_CMD_READ_BLOCK_LIMITS:
+ case SCSI_CMD_READ_TOC:
+ case SCSI_CMD_READ_SUBCH:
+ case SCSI_CMD_READ_HEADER:
+ case 0xc4: /* despised: SCSI_CMD_DEC_PLAYBACK_STATUS */
+ case 0xdd: /* despised: SCSI_CMD_NEC_READ_SUBCH_Q */
+ case 0xde: /* despised: SCSI_CMD_NEC_READ_TOC */
+ scp = sci_script_data_in;
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ break;
+ case SCSI_CMD_MODE_SELECT:
+ case SCSI_CMD_REASSIGN_BLOCKS:
+ case SCSI_CMD_FORMAT_UNIT:
+ case 0xc9: /* vendor-spec: SCSI_CMD_DEC_PLAYBACK_CONTROL */
+ tgt->transient_state.cmd_count = sizeof_scsi_command(tgt->cur_cmd);
+ tgt->transient_state.out_count =
+ cmd_count - tgt->transient_state.cmd_count;
+ scp = sci_script_data_out;
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ break;
+ case SCSI_CMD_TEST_UNIT_READY:
+ /*
+ * Do the synch negotiation here, unless prohibited
+ * or done already
+ */
+ if (tgt->flags & TGT_DID_SYNCH) {
+ scp = sci_script_cmd;
+ } else {
+ scp = sci_script_try_synch;
+ tgt->flags |= TGT_TRY_SYNCH;
+ cmd_only = FALSE;
+ }
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ break;
+ default:
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ scp = sci_script_cmd;
+ }
+
+ tgt->transient_state.script = scp;
+ tgt->transient_state.handler = handler;
+ tgt->transient_state.identify = (cmd_only) ? 0xff :
+ (disconn ? SCSI_IDENTIFY|SCSI_IFY_ENABLE_DISCONNECT :
+ SCSI_IDENTIFY);
+
+ if (in_count)
+ tgt->transient_state.in_count =
+ (in_count < tgt->block_size) ? tgt->block_size : in_count;
+ else
+ tgt->transient_state.in_count = 0;
+ tgt->transient_state.dma_offset = 0;
+
+ /*
+ * See if another target is currently selected on
+ * this SCSI bus, e.g. lock the sci structure.
+ * Note that it is the strategy routine's job
+ * to serialize ops on the same target as appropriate.
+ * XXX here and everywhere, locks!
+ */
+ /*
+ * Protection viz reconnections makes it tricky.
+ */
+/* s = splbio();*/
+ s = splhigh();
+
+ if (sci->wd.nactive++ == 0)
+ sci->wd.watchdog_state = SCSI_WD_ACTIVE;
+
+ if (sci->state & SCI_STATE_BUSY) {
+ /*
+ * Queue up this target, note that this takes care
+ * of proper FIFO scheduling of the scsi-bus.
+ */
+ LOG(3,"enqueue");
+ enqueue_tail(&sci->waiting_targets, (queue_entry_t) tgt);
+ } else {
+ /*
+ * It is down to at most two contenders now,
+ * we will treat reconnections same as selections
+ * and let the scsi-bus arbitration process decide.
+ */
+ sci->state |= SCI_STATE_BUSY;
+ sci->next_target = tgt;
+ sci_attempt_selection(sci);
+ /*
+ * Note that we might still lose arbitration..
+ */
+ }
+ splx(s);
+}
+
+sci_attempt_selection(sci)
+ sci_softc_t sci;
+{
+ target_info_t *tgt;
+ register int out_count;
+ sci_padded_regmap_t *regs;
+ sci_dmaregs_t dmar;
+ register int cmd;
+ boolean_t ok;
+ scsi_ret_t ret;
+
+ regs = sci->regs;
+ dmar = sci->dmar;
+ tgt = sci->next_target;
+
+ LOG(4,"select");
+ LOG(0x80+tgt->target_id,0);
+
+ /*
+ * Init bus state variables and set registers.
+ */
+ sci->active_target = tgt;
+
+ /* reselection pending ? */
+ if ((regs->sci_bus_csr & (SCI_BUS_BSY|SCI_BUS_SEL)) &&
+ (regs->sci_bus_csr & (SCI_BUS_BSY|SCI_BUS_SEL)) &&
+ (regs->sci_bus_csr & (SCI_BUS_BSY|SCI_BUS_SEL)))
+ return;
+
+ sci->script = tgt->transient_state.script;
+ sci->error_handler = tgt->transient_state.handler;
+ sci->done = SCSI_RET_IN_PROGRESS;
+
+ sci->in_count = 0;
+ sci->out_count = 0;
+ sci->extra_byte = 0;
+
+ /*
+ * This is a bit involved, but the bottom line is we want to
+ * know after we selected with or w/o ATN if the selection
+ * went well (ret) and if it is (ok) to send the command.
+ */
+ ok = TRUE;
+ if (tgt->flags & TGT_DID_SYNCH) {
+ if (tgt->transient_state.identify == 0xff) {
+ /* Select w/o ATN */
+ ret = sci_select_target(regs, sci->sc->initiator_id,
+ tgt->target_id, FALSE);
+ } else {
+ /* Select with ATN */
+ ret = sci_select_target(regs, sci->sc->initiator_id,
+ tgt->target_id, TRUE);
+ if (ret == SCSI_RET_SUCCESS) {
+ register unsigned char icmd;
+
+ while (SCI_CUR_PHASE(regs->sci_bus_csr) != SCSI_PHASE_MSG_OUT)
+ ;
+ icmd = regs->sci_icmd & ~(SCI_ICMD_DIFF|SCI_ICMD_TEST);
+ icmd &= ~SCI_ICMD_ATN;
+ regs->sci_icmd = icmd;
+ SCI_ACK(regs,SCSI_PHASE_MSG_OUT);
+ ok = (sci_data_out(regs, SCSI_PHASE_MSG_OUT,
+ 1, &tgt->transient_state.identify) == 0);
+ }
+ }
+ } else if (tgt->flags & TGT_TRY_SYNCH) {
+ /* Select with ATN, do the synch xfer neg */
+ ret = sci_select_target(regs, sci->sc->initiator_id,
+ tgt->target_id, TRUE);
+ if (ret == SCSI_RET_SUCCESS) {
+ while (SCI_CUR_PHASE(regs->sci_bus_csr) != SCSI_PHASE_MSG_OUT)
+ ;
+ ok = sci_dosynch( sci, regs->sci_csr, regs->sci_bus_csr);
+ }
+ } else {
+ ret = sci_select_target(regs, sci->sc->initiator_id,
+ tgt->target_id, FALSE);
+ }
+
+ if (ret == SCSI_RET_DEVICE_DOWN) {
+ sci->done = ret;
+ sci_end(sci, regs->sci_csr, regs->sci_bus_csr);
+ return;
+ }
+ if ((ret != SCSI_RET_SUCCESS) || !ok)
+ return;
+
+/* time this out or do it via dma !! */
+ while (SCI_CUR_PHASE(regs->sci_bus_csr) != SCSI_PHASE_CMD)
+ ;
+
+ /* set dma pointer and counter to xfer command */
+ out_count = tgt->transient_state.cmd_count;
+#if 0
+ SCI_ACK(regs,SCSI_PHASE_CMD);
+ sci_data_out(regs,SCSI_PHASE_CMD,out_count,tgt->cmd_ptr);
+ regs->sci_mode = SCI_MODE_PAR_CHK|SCI_MODE_DMA|SCI_MODE_MONBSY;
+#else
+ SCI_DMADR_PUT(dmar,tgt->dma_ptr);
+ delay_1p2_us();
+ SCI_TC_PUT(dmar,out_count);
+ dmar->sci_dma_dir = SCI_DMA_DIR_WRITE;
+ SCI_ACK(regs,SCSI_PHASE_CMD);
+ SCI_CLR_INTR(regs);
+ regs->sci_mode = SCI_MODE_PAR_CHK|SCI_MODE_DMA|SCI_MODE_MONBSY;
+ regs->sci_icmd = SCI_ICMD_DATA;
+ regs->sci_dma_send = 1;
+#endif
+}
+
+/*
+ * Interrupt routine
+ * Take interrupts from the chip
+ *
+ * Implementation:
+ * Move along the current command's script if
+ * all is well, invoke error handler if not.
+ */
+sci_intr(unit)
+{
+ register sci_softc_t sci;
+ register script_t scp;
+ register unsigned csr, bs, cmd;
+ register sci_padded_regmap_t *regs;
+ boolean_t try_match;
+#if notyet
+ extern boolean_t rz_use_mapped_interface;
+
+ if (rz_use_mapped_interface)
+ return SCI_intr(unit);
+#endif
+
+ LOG(5,"\n\tintr");
+
+ sci = sci_softc[unit];
+ regs = sci->regs;
+
+ /* ack interrupt */
+ csr = regs->sci_csr;
+ bs = regs->sci_bus_csr;
+ cmd = regs->sci_icmd;
+TR(regs->sci_mode);
+ SCI_CLR_INTR(regs);
+
+TR(csr);
+TR(bs);
+TR(cmd);
+TRCHECK;
+
+ if (cmd & SCI_ICMD_RST){
+ sci_bus_reset(sci);
+ return;
+ }
+
+ /* we got an interrupt allright */
+ if (sci->active_target)
+ sci->wd.watchdog_state = SCSI_WD_ACTIVE;
+
+ /* drop spurious calls */
+ if ((csr & SCI_CSR_INT) == 0) {
+ LOG(2,"SPURIOUS");
+ return;
+ }
+
+ /* Note: reselect has I/O asserted, select has not */
+ if ((sci->state & SCI_STATE_TARGET) ||
+ ((bs & (SCI_BUS_BSY|SCI_BUS_SEL|SCI_BUS_IO)) == SCI_BUS_SEL)) {
+ sci_target_intr(sci,csr,bs);
+ return;
+ }
+
+ scp = sci->script;
+
+ /* Race: disconnecting, we get the disconnected notification
+ (csr sez BSY dropped) at the same time a reselect is active */
+ if ((csr & SCI_CSR_DISC) &&
+ scp && (scp->condition == SCI_PHASE_DISC)) {
+ (void) (*scp->action)(sci, csr, bs);
+ /* takes care of calling reconnect if necessary */
+ return;
+ }
+
+ /* check who got the bus */
+ if ((scp == 0) || (cmd & SCI_ICMD_LST) ||
+ ((bs & (SCI_BUS_BSY|SCI_BUS_SEL|SCI_BUS_IO)) == (SCI_BUS_SEL|SCI_BUS_IO))) {
+ sci_reconnect(sci, csr, bs);
+ return;
+ }
+
+ if (SCRIPT_MATCH(csr,bs) != scp->condition) {
+ if (try_match = (*sci->error_handler)(sci, csr, bs)) {
+ csr = regs->sci_csr;
+ bs = regs->sci_bus_csr;
+ }
+ } else
+ try_match = TRUE;
+
+ /* might have been side effected */
+ scp = sci->script;
+
+ if (try_match && (SCRIPT_MATCH(csr,bs) == scp->condition)) {
+ /*
+ * Perform the appropriate operation,
+ * then proceed
+ */
+ if ((*scp->action)(sci, csr, bs)) {
+ /* might have been side effected */
+ scp = sci->script;
+ sci->script = scp + 1;
+ }
+ }
+}
+
+
+sci_target_intr(sci)
+ register sci_softc_t sci;
+{
+ panic("SCI: TARGET MODE !!!\n");
+}
+
+/*
+ * All the many little things that the interrupt
+ * routine might switch to
+ */
+boolean_t
+sci_end_transaction( sci, csr, bs)
+ register sci_softc_t sci;
+{
+ register sci_padded_regmap_t *regs = sci->regs;
+ char cmc;
+
+ LOG(0x1f,"end_t");
+
+ /* Stop dma, no interrupt on disconnect */
+ regs->sci_icmd = 0;
+ regs->sci_mode &= ~(SCI_MODE_DMA|SCI_MODE_MONBSY|SCI_MODE_DMA_IE);
+/* dmar->sci_dma_dir = SCI_DMA_DIR_WRITE;/* make sure we steal not */
+
+ SCI_ACK(regs,SCSI_PHASE_MSG_IN);
+
+ regs->sci_sel_enb = (1 << sci->sc->initiator_id);
+
+ sci_data_in(regs, SCSI_PHASE_MSG_IN, 1, &cmc);
+
+ if (cmc != SCSI_COMMAND_COMPLETE)
+ printf("{T%x}", cmc);
+
+ /* check disconnected, clear all intr bits */
+ while (regs->sci_bus_csr & SCI_BUS_BSY)
+ ;
+ SCI_CLR_INTR(regs);
+ SCI_ACK(regs,SCI_PHASE_DISC);
+
+ if (!sci_end(sci, csr, bs)) {
+ SCI_CLR_INTR(regs);
+ (void) sci_reconnect(sci, csr, bs);
+ }
+ return FALSE;
+}
+
+boolean_t
+sci_end( sci, csr, bs)
+ register sci_softc_t sci;
+{
+ register target_info_t *tgt;
+ register io_req_t ior;
+ register sci_padded_regmap_t *regs = sci->regs;
+ boolean_t reconn_pending;
+
+ LOG(6,"end");
+
+ tgt = sci->active_target;
+
+ if ((tgt->done = sci->done) == SCSI_RET_IN_PROGRESS)
+ tgt->done = SCSI_RET_SUCCESS;
+
+ sci->script = 0;
+
+ if (sci->wd.nactive-- == 1)
+ sci->wd.watchdog_state = SCSI_WD_INACTIVE;
+
+ /* check reconnection not pending */
+ bs = regs->sci_bus_csr;
+ reconn_pending = ((bs & (SCI_BUS_BSY|SCI_BUS_SEL|SCI_BUS_IO)) == (SCI_BUS_SEL|SCI_BUS_IO));
+ if (!reconn_pending) {
+ sci_release_bus(sci);
+ } else {
+ sci->active_target = 0;
+/* sci->state &= ~SCI_STATE_BUSY; later */
+ }
+
+ if (ior = tgt->ior) {
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+ fdma_unmap(&tgt->fdma, ior);
+#endif /*MACH_KERNEL*/
+ LOG(0xA,"ops->restart");
+ (*tgt->dev_ops->restart)( tgt, TRUE);
+ if (reconn_pending)
+ sci->state &= ~SCI_STATE_BUSY;
+ }
+
+ return (!reconn_pending);
+}
+
+boolean_t
+sci_release_bus(sci)
+ register sci_softc_t sci;
+{
+ boolean_t ret = FALSE;
+
+ LOG(9,"release");
+
+ sci->script = 0;
+
+ if (sci->state & SCI_STATE_COLLISION) {
+
+ LOG(0xB,"collided");
+ sci->state &= ~SCI_STATE_COLLISION;
+ sci_attempt_selection(sci);
+
+ } else if (queue_empty(&sci->waiting_targets)) {
+
+ sci->state &= ~SCI_STATE_BUSY;
+ sci->active_target = 0;
+ ret = TRUE;
+
+ } else {
+
+ LOG(0xC,"dequeue");
+ sci->next_target = (target_info_t *)
+ dequeue_head(&sci->waiting_targets);
+ sci_attempt_selection(sci);
+ }
+ return ret;
+}
+
+boolean_t
+sci_get_status( sci, csr, bs)
+ register sci_softc_t sci;
+{
+ register sci_padded_regmap_t *regs = sci->regs;
+ register sci_dmaregs_t dmar = sci->dmar;
+ scsi2_status_byte_t status;
+ register target_info_t *tgt;
+ unsigned int len, mode;
+
+ LOG(0xD,"get_status");
+TRWRAP;
+
+ /* Stop dma */
+ regs->sci_icmd = 0;
+ mode = regs->sci_mode;
+ regs->sci_mode = (mode & ~(SCI_MODE_DMA|SCI_MODE_DMA_IE));
+ dmar->sci_dma_dir = SCI_DMA_DIR_WRITE;/* make sure we steal not */
+
+ sci->state &= ~SCI_STATE_DMA_IN;
+
+ tgt = sci->active_target;
+
+ if (len = sci->in_count) {
+ register int count;
+ SCI_TC_GET(dmar,count);
+ if ((tgt->cur_cmd != SCSI_CMD_READ) &&
+ (tgt->cur_cmd != SCSI_CMD_LONG_READ)){
+ len -= count;
+ } else {
+ if (count) {
+#if 0
+ this is incorrect and besides..
+ tgt->ior->io_residual = count;
+#endif
+ len -= count;
+ }
+ sci_copyin( tgt, tgt->transient_state.dma_offset,
+ len, 0, 0);
+ }
+ }
+
+ /* to get the phase mismatch intr */
+ regs->sci_mode = mode;
+
+ SCI_ACK(regs,SCSI_PHASE_STATUS);
+
+ sci_data_in(regs, SCSI_PHASE_STATUS, 1, &status.bits);
+
+ SCI_TC_PUT(dmar,0);
+
+ if (status.st.scsi_status_code != SCSI_ST_GOOD) {
+ scsi_error(sci->active_target, SCSI_ERR_STATUS, status.bits, 0);
+ sci->done = (status.st.scsi_status_code == SCSI_ST_BUSY) ?
+ SCSI_RET_RETRY : SCSI_RET_NEED_SENSE;
+ } else
+ sci->done = SCSI_RET_SUCCESS;
+
+ return TRUE;
+}
+
+boolean_t
+sci_dma_in( sci, csr, bs)
+ register sci_softc_t sci;
+{
+ register target_info_t *tgt;
+ register sci_padded_regmap_t *regs = sci->regs;
+ register sci_dmaregs_t dmar = sci->dmar;
+ char *dma_ptr;
+ register int count;
+ boolean_t advance_script = TRUE;
+
+ LOG(0xE,"dma_in");
+
+ /*
+ * Problem: the 5380 pipelines xfers between the scsibus and
+ * itself and between itself and the DMA engine --> halting ?
+ * In the dmaout direction all is ok, except that (see NCR notes)
+ * the EOP interrupt is generated before the pipe is empty.
+ * In the dmain direction (here) the interrupt comes when
+ * one too many bytes have been xferred on chip!
+ *
+ * More specifically, IF we asked for count blindly and we had
+ * more than count bytes coming (double buffering) we would endup
+ * actually xferring count+1 from the scsibus, but only count
+ * to memory [hopefully the last byte sits in the sci_datai reg].
+ * This could be helped, except most times count is an exact multiple
+ * of the sector size which is where disks disconnect....
+ *
+ * INSTEAD, we recognize here that we expect more than count bytes
+ * coming and set the DMA count to count-1 but keep sci->in_count
+ * above to count. This will be wrong if the target disconnects
+ * amidst, but we can cure it.
+ *
+ * The places where this has an effect are marked by "EXTRA_BYTE"
+ */
+
+ tgt = sci->active_target;
+ sci->state |= SCI_STATE_DMA_IN;
+
+ /* ought to stop dma to start another */
+ regs->sci_mode &= ~ (SCI_MODE_DMA|SCI_MODE_DMA_IE);
+ regs->sci_icmd = 0;
+
+ if (sci->in_count == 0) {
+ /*
+ * Got nothing yet: either just sent the command
+ * or just reconnected
+ */
+ register int avail;
+
+ count = tgt->transient_state.in_count;
+ count = u_min(count, (PER_TGT_BURST_SIZE));
+ avail = PER_TGT_BUFF_SIZE - tgt->transient_state.dma_offset;
+ count = u_min(count, avail);
+
+ /* common case of 8k-or-less read ? */
+ advance_script = (tgt->transient_state.in_count == count);
+
+ } else {
+
+ /*
+ * We received some data.
+ */
+ register int offset, xferred, eb;
+ unsigned char extrab = regs->sci_idata; /* EXTRA_BYTE */
+
+ SCI_TC_GET(dmar,xferred);
+ assert(xferred == 0);
+if (scsi_debug) {
+printf("{B %x %x %x (%x)}",
+ sci->in_count, xferred, sci->extra_byte, extrab);
+}
+ /* ++EXTRA_BYTE */
+ xferred = sci->in_count - xferred;
+ eb = sci->extra_byte;
+ /* --EXTRA_BYTE */
+ assert(xferred > 0);
+ tgt->transient_state.in_count -= xferred;
+ assert(tgt->transient_state.in_count > 0);
+ offset = tgt->transient_state.dma_offset;
+ tgt->transient_state.dma_offset += xferred;
+ count = u_min(tgt->transient_state.in_count, (PER_TGT_BURST_SIZE));
+ if (tgt->transient_state.dma_offset == PER_TGT_BUFF_SIZE) {
+ tgt->transient_state.dma_offset = 0;
+ } else {
+ register int avail;
+ avail = PER_TGT_BUFF_SIZE - tgt->transient_state.dma_offset;
+ count = u_min(count, avail);
+ }
+ advance_script = (tgt->transient_state.in_count == count);
+
+ /* get some more */
+ dma_ptr = tgt->dma_ptr + tgt->transient_state.dma_offset;
+ sci->in_count = count;
+ /* ++EXTRA_BYTE */
+ if (!advance_script) {
+ sci->extra_byte = 1; /* that's the cure.. */
+ count--;
+ } else
+ sci->extra_byte = 0;
+ /* --EXTRA_BYTE */
+ SCI_TC_PUT(dmar,count);
+/* regs->sci_icmd = 0;*/
+ SCI_DMADR_PUT(dmar,dma_ptr);
+ delay_1p2_us();
+ SCI_ACK(regs,SCSI_PHASE_DATAI);
+ SCI_CLR_INTR(regs);
+ regs->sci_mode |= (advance_script ? SCI_MODE_DMA
+ : (SCI_MODE_DMA|SCI_MODE_DMA_IE));
+ dmar->sci_dma_dir = SCI_DMA_DIR_READ;
+ regs->sci_irecv = 1;
+
+ /* copy what we got */
+ sci_copyin( tgt, offset, xferred, eb, extrab);
+
+ /* last chunk ? */
+ return advance_script;
+ }
+
+ sci->in_count = count;
+ dma_ptr = tgt->dma_ptr + tgt->transient_state.dma_offset;
+
+ /* ++EXTRA_BYTE */
+ if (!advance_script) {
+ sci->extra_byte = 1; /* that's the cure.. */
+ count--;
+ } else
+ sci->extra_byte = 0;
+ /* --EXTRA_BYTE */
+
+ SCI_TC_PUT(dmar,count);
+/* regs->sci_icmd = 0;*/
+ SCI_DMADR_PUT(dmar,dma_ptr);
+ delay_1p2_us();
+ SCI_ACK(regs,SCSI_PHASE_DATAI);
+ SCI_CLR_INTR(regs);
+ regs->sci_mode |= (advance_script ? SCI_MODE_DMA
+ : (SCI_MODE_DMA|SCI_MODE_DMA_IE));
+ dmar->sci_dma_dir = SCI_DMA_DIR_READ;
+ regs->sci_irecv = 1;
+
+ return advance_script;
+}
+
+/* send data to target. Called in three different ways:
+ (a) to start transfer (b) to restart a bigger-than-8k
+ transfer (c) after reconnection
+ */
+int sci_delay = 1;
+
+boolean_t
+sci_dma_out( sci, csr, bs)
+ register sci_softc_t sci;
+{
+ register sci_padded_regmap_t *regs = sci->regs;
+ register sci_dmaregs_t dmar = sci->dmar;
+ register char *dma_ptr;
+ register target_info_t *tgt;
+ boolean_t advance_script = TRUE;
+ int count = sci->out_count;
+ spl_t s;
+ register int tmp;
+
+ LOG(0xF,"dma_out");
+
+ tgt = sci->active_target;
+ sci->state &= ~SCI_STATE_DMA_IN;
+
+ if (sci->out_count == 0) {
+ /*
+ * Nothing committed: either just sent the
+ * command or reconnected
+ */
+ register int remains;
+
+ /* ought to stop dma to start another */
+ regs->sci_mode &= ~ (SCI_MODE_DMA|SCI_MODE_DMA_IE);
+ dmar->sci_dma_dir = SCI_DMA_DIR_READ;/*hold it */
+
+ regs->sci_icmd = SCI_ICMD_DATA;
+
+ SCI_ACK(regs,SCSI_PHASE_DATAO);
+
+ count = tgt->transient_state.out_count;
+ count = u_min(count, (PER_TGT_BURST_SIZE));
+ remains = PER_TGT_BUFF_SIZE - tgt->transient_state.dma_offset;
+ count = u_min(count, remains);
+
+ /* common case of 8k-or-less write ? */
+ advance_script = (tgt->transient_state.out_count == count);
+ } else {
+ /*
+ * We sent some data.
+ * Also, take care of bogus interrupts
+ */
+ register int offset, xferred;
+
+if (sci_delay & 1) delay(1000);
+ /* ought to stop dma to start another */
+ regs->sci_mode &= ~ (SCI_MODE_DMA|SCI_MODE_DMA_IE);
+ dmar->sci_dma_dir = SCI_DMA_DIR_READ;/*hold it */
+/* regs->sci_icmd = SCI_ICMD_DATA; */
+
+ SCI_TC_GET(dmar,xferred);
+if (xferred) printf("{A %x}", xferred);
+ xferred = sci->out_count - xferred;
+ assert(xferred > 0);
+ tgt->transient_state.out_count -= xferred;
+ assert(tgt->transient_state.out_count > 0);
+ offset = tgt->transient_state.dma_offset;
+ tgt->transient_state.dma_offset += xferred;
+ count = u_min(tgt->transient_state.out_count, (PER_TGT_BURST_SIZE));
+ if (tgt->transient_state.dma_offset == PER_TGT_BUFF_SIZE) {
+ tgt->transient_state.dma_offset = 0;
+ } else {
+ register int remains;
+ remains = PER_TGT_BUFF_SIZE - tgt->transient_state.dma_offset;
+ count = u_min(count, remains);
+ }
+ /* last chunk ? */
+ if (tgt->transient_state.out_count == count)
+ goto quickie;
+
+ /* ship some more */
+ dma_ptr = tgt->dma_ptr +
+ tgt->transient_state.cmd_count + tgt->transient_state.dma_offset;
+ sci->out_count = count;
+ /*
+ * Mistery: sometimes the first byte
+ * of an 8k chunk is missing from the tape, it must
+ * be that somehow touching the 5380 registers
+ * after the dma engine is ready screws up: false DRQ?
+ */
+s = splhigh();
+ SCI_TC_PUT(dmar,count);
+/* SCI_CLR_INTR(regs);*/
+ regs->sci_mode = SCI_MODE_PAR_CHK | SCI_MODE_DMA |
+ SCI_MODE_MONBSY | SCI_MODE_DMA_IE;
+/* regs->sci_icmd = SCI_ICMD_DATA;*/
+ dmar->sci_dma_dir = SCI_DMA_DIR_WRITE;
+ SCI_DMADR_PUT(dmar,dma_ptr);
+ delay_1p2_us();
+
+ regs->sci_dma_send = 1;
+splx(s);
+ /* copy some more data */
+ sci_copyout(tgt, offset, xferred);
+ return FALSE;
+ }
+
+quickie:
+ sci->out_count = count;
+ dma_ptr = tgt->dma_ptr +
+ tgt->transient_state.cmd_count + tgt->transient_state.dma_offset;
+ tmp = (advance_script ?
+ SCI_MODE_PAR_CHK|SCI_MODE_DMA|SCI_MODE_MONBSY:
+ SCI_MODE_PAR_CHK|SCI_MODE_DMA|SCI_MODE_MONBSY|SCI_MODE_DMA_IE);
+s = splhigh();
+ SCI_TC_PUT(dmar,count);
+/* SCI_CLR_INTR(regs);*/
+ regs->sci_mode = tmp;
+/* regs->sci_icmd = SCI_ICMD_DATA;*/
+ SCI_DMADR_PUT(dmar,dma_ptr);
+ delay_1p2_us();
+ dmar->sci_dma_dir = SCI_DMA_DIR_WRITE;
+ regs->sci_dma_send = 1;
+splx(s);
+
+ return advance_script;
+}
+
+/* disconnect-reconnect ops */
+
+/* get the message in via dma */
+boolean_t
+sci_msg_in(sci, csr, bs)
+ register sci_softc_t sci;
+{
+ register target_info_t *tgt;
+ char *dma_ptr;
+ register sci_padded_regmap_t *regs = sci->regs;
+ register sci_dmaregs_t dmar = sci->dmar;
+
+ LOG(0x15,"msg_in");
+
+ tgt = sci->active_target;
+
+ dma_ptr = tgt->dma_ptr;
+ /* We would clobber the data for READs */
+ if (sci->state & SCI_STATE_DMA_IN) {
+ register int offset;
+ offset = tgt->transient_state.cmd_count + tgt->transient_state.dma_offset;
+ dma_ptr += offset;
+ }
+
+ /* ought to stop dma to start another */
+ regs->sci_mode &= ~ (SCI_MODE_DMA|SCI_MODE_DMA_IE);
+ regs->sci_icmd = 0;
+
+ /* We only really expect two bytes */
+ SCI_TC_PUT(dmar,sizeof(scsi_command_group_0));
+/* regs->sci_icmd = 0*/
+ SCI_DMADR_PUT(dmar,dma_ptr);
+ delay_1p2_us();
+ SCI_ACK(regs,SCSI_PHASE_MSG_IN);
+ SCI_CLR_INTR(regs);
+ regs->sci_mode |= SCI_MODE_DMA;
+ dmar->sci_dma_dir = SCI_DMA_DIR_READ;
+ regs->sci_irecv = 1;
+
+ return TRUE;
+}
+
+/* check the message is indeed a DISCONNECT */
+boolean_t
+sci_disconnect(sci, csr, bs)
+ register sci_softc_t sci;
+{
+ register int len;
+ boolean_t ok = FALSE;
+ register sci_dmaregs_t dmar = sci->dmar;
+ register char *msgs;
+ unsigned int offset;
+
+
+ SCI_TC_GET(dmar,len);
+ len = sizeof(scsi_command_group_0) - len;
+ PRINT(("{G%d}",len));
+
+ /* wherever it was, take it from there */
+ SCI_DMADR_GET(dmar,offset);
+ msgs = (char*)sci->buff + offset - len;
+
+ if ((len == 0) || (len > 2))
+ ok = FALSE;
+ else {
+ /* A SDP message preceeds it in non-completed READs */
+ ok = ((msgs[0] == SCSI_DISCONNECT) || /* completed op */
+ ((msgs[0] == SCSI_SAVE_DATA_POINTER) && /* incomplete */
+ (msgs[1] == SCSI_DISCONNECT)));
+ }
+ if (!ok)
+ printf("[tgt %d bad msg (%d): %x]",
+ sci->active_target->target_id, len, *msgs);
+
+ return TRUE;
+}
+
+/* save all relevant data, free the BUS */
+boolean_t
+sci_disconnected(sci, csr, bs)
+ register sci_softc_t sci;
+{
+ register target_info_t *tgt;
+ sci_padded_regmap_t *regs = sci->regs;
+
+ regs->sci_mode &= ~(SCI_MODE_MONBSY|SCI_MODE_DMA);
+ SCI_CLR_INTR(regs);/*retriggered by MONBSY cuz intr routine did CLR */
+ SCI_ACK(regs,SCI_PHASE_DISC);
+
+ LOG(0x16,"disconnected");
+
+ sci_disconnect(sci,csr,bs);
+
+ tgt = sci->active_target;
+ tgt->flags |= TGT_DISCONNECTED;
+ tgt->transient_state.handler = sci->error_handler;
+ /* the rest has been saved in sci_err_disconn() */
+
+ PRINT(("{D%d}", tgt->target_id));
+
+ sci_release_bus(sci);
+
+ return FALSE;
+}
+
+/* get reconnect message, restore BUS */
+boolean_t
+sci_reconnect(sci, csr, bs)
+ register sci_softc_t sci;
+{
+ register target_info_t *tgt;
+ sci_padded_regmap_t *regs;
+ register int id;
+ int msg;
+
+ LOG(0x17,"reconnect");
+
+ if (sci->wd.nactive == 0) {
+ LOG(2,"SPURIOUS");
+ return FALSE;
+ }
+
+ regs = sci->regs;
+
+ regs->sci_mode &= ~SCI_MODE_PAR_CHK;
+ id = regs->sci_data;/*parity!*/
+ regs->sci_mode |= SCI_MODE_PAR_CHK;
+
+ /* xxx check our id is in there */
+
+ id &= ~(1 << sci->sc->initiator_id);
+ {
+ register int i;
+ for (i = 0; i < 8; i++)
+ if (id & (1 << i)) break;
+if (i == 8) {printf("{P%x}", id);return;}
+ id = i;
+ }
+ regs->sci_icmd = SCI_ICMD_BSY;
+ while (regs->sci_bus_csr & SCI_BUS_SEL)
+ ;
+ regs->sci_icmd = 0;
+ delay_1p2_us();
+ while ( ((regs->sci_bus_csr & SCI_BUS_BSY) == 0) &&
+ ((regs->sci_bus_csr & SCI_BUS_BSY) == 0) &&
+ ((regs->sci_bus_csr & SCI_BUS_BSY) == 0))
+ ;
+
+ regs->sci_mode |= SCI_MODE_MONBSY;
+
+ /* Now should wait for correct phase: REQ signals it */
+ while ( ((regs->sci_bus_csr & SCI_BUS_REQ) == 0) &&
+ ((regs->sci_bus_csr & SCI_BUS_REQ) == 0) &&
+ ((regs->sci_bus_csr & SCI_BUS_REQ) == 0))
+ ;
+
+ /*
+ * See if this reconnection collided with a selection attempt
+ */
+ if (sci->state & SCI_STATE_BUSY)
+ sci->state |= SCI_STATE_COLLISION;
+
+ sci->state |= SCI_STATE_BUSY;
+
+ /* Get identify msg */
+ bs = regs->sci_bus_csr;
+if (SCI_CUR_PHASE(bs) != SCSI_PHASE_MSG_IN) gimmeabreak();
+ SCI_ACK(regs,SCSI_PHASE_MSG_IN);
+ msg = 0;
+ sci_data_in(regs, SCSI_PHASE_MSG_IN, 1, &msg);
+ regs->sci_mode = SCI_MODE_PAR_CHK|SCI_MODE_DMA|SCI_MODE_MONBSY;
+ regs->sci_sel_enb = 0;
+
+ if (msg != SCSI_IDENTIFY)
+ printf("{I%x %x}", id, msg);
+
+ tgt = sci->sc->target[id];
+ if (id > 7 || tgt == 0) panic("sci_reconnect");
+
+ PRINT(("{R%d}", id));
+ if (sci->state & SCI_STATE_COLLISION)
+ PRINT(("[B %d-%d]", sci->active_target->target_id, id));
+
+ LOG(0x80+id,0);
+
+ sci->active_target = tgt;
+ tgt->flags &= ~TGT_DISCONNECTED;
+
+ sci->script = tgt->transient_state.script;
+ sci->error_handler = tgt->transient_state.handler;
+ sci->in_count = 0;
+ sci->out_count = 0;
+
+ /* Should get a phase mismatch when tgt changes phase */
+
+ return TRUE;
+}
+
+
+
+/* do the synch negotiation */
+boolean_t
+sci_dosynch( sci, csr, bs)
+ register sci_softc_t sci;
+{
+ /*
+ * Phase is MSG_OUT here, cmd has not been xferred
+ */
+ int len;
+ register target_info_t *tgt;
+ register sci_padded_regmap_t *regs = sci->regs;
+ unsigned char off, icmd;
+ register unsigned char *p;
+
+ regs->sci_mode |= SCI_MODE_MONBSY;
+
+ LOG(0x11,"dosync");
+
+ /* ATN still asserted */
+ SCI_ACK(regs,SCSI_PHASE_MSG_OUT);
+
+ tgt = sci->active_target;
+
+ tgt->flags |= TGT_DID_SYNCH; /* only one chance */
+ tgt->flags &= ~TGT_TRY_SYNCH;
+
+ p = (unsigned char *)tgt->cmd_ptr + tgt->transient_state.cmd_count +
+ tgt->transient_state.dma_offset;
+ p[0] = SCSI_IDENTIFY;
+ p[1] = SCSI_EXTENDED_MESSAGE;
+ p[2] = 3;
+ p[3] = SCSI_SYNC_XFER_REQUEST;
+ /* We cannot run synchronous */
+#define sci_to_scsi_period(x) 0xff
+#define scsi_period_to_sci(x) (x)
+ off = 0;
+ p[4] = sci_to_scsi_period(sci_min_period);
+ p[5] = off;
+
+ /* xfer all but last byte with ATN set */
+ sci_data_out(regs, SCSI_PHASE_MSG_OUT,
+ sizeof(scsi_synch_xfer_req_t), p);
+ icmd = regs->sci_icmd & ~(SCI_ICMD_DIFF|SCI_ICMD_TEST);
+ icmd &= ~SCI_ICMD_ATN;
+ regs->sci_icmd = icmd;
+ sci_data_out(regs, SCSI_PHASE_MSG_OUT, 1,
+ &p[sizeof(scsi_synch_xfer_req_t)]);
+
+ /* wait for phase change */
+ while (regs->sci_csr & SCI_CSR_PHASE_MATCH)
+ ;
+ bs = regs->sci_bus_csr;
+
+ /* The standard sez there nothing else the target can do but.. */
+ if (SCI_CUR_PHASE(bs) != SCSI_PHASE_MSG_IN)
+ panic("sci_dosync");/* XXX put offline */
+
+msgin:
+ /* ack */
+ SCI_ACK(regs,SCSI_PHASE_MSG_IN);
+
+ /* get answer */
+ len = sizeof(scsi_synch_xfer_req_t);
+ len = sci_data_in(regs, SCSI_PHASE_MSG_IN, len, p);
+
+ /* do not cancel the phase mismatch interrupt ! */
+
+ /* look at the answer and see if we like it */
+ if (len || (p[0] != SCSI_EXTENDED_MESSAGE)) {
+ /* did not like it at all */
+ printf(" did not like SYNCH xfer ");
+ } else {
+ /* will NOT do synch */
+ printf(" but we cannot do SYNCH xfer ");
+ tgt->sync_period = scsi_period_to_sci(p[3]);
+ tgt->sync_offset = p[4];
+ /* sanity */
+ if (tgt->sync_offset != 0)
+ printf(" ?OFFSET %x? ", tgt->sync_offset);
+ }
+
+ /* wait for phase change */
+ while (regs->sci_csr & SCI_CSR_PHASE_MATCH)
+ ;
+ bs = regs->sci_bus_csr;
+
+ /* phase should be command now */
+ /* continue with simple command script */
+ sci->error_handler = sci_err_generic;
+ sci->script = sci_script_cmd;
+
+ if (SCI_CUR_PHASE(bs) == SCSI_PHASE_CMD )
+ return TRUE;
+
+/* sci->script++;*/
+ if (SCI_CUR_PHASE(bs) == SCSI_PHASE_STATUS )
+ return TRUE; /* intr is pending */
+
+ sci->script++;
+ if (SCI_CUR_PHASE(bs) == SCSI_PHASE_MSG_IN )
+ return TRUE;
+
+ if ((bs & SCI_BUS_BSY) == 0) /* uhu? disconnected */
+ return sci_end_transaction(sci, regs->sci_csr, regs->sci_bus_csr);
+
+ panic("sci_dosynch");
+ return FALSE;
+}
+
+/*
+ * The bus was reset
+ */
+sci_bus_reset(sci)
+ register sci_softc_t sci;
+{
+ register target_info_t *tgt;
+ register sci_padded_regmap_t *regs = sci->regs;
+ int i;
+
+ LOG(0x21,"bus_reset");
+
+ /*
+ * Clear bus descriptor
+ */
+ sci->script = 0;
+ sci->error_handler = 0;
+ sci->active_target = 0;
+ sci->next_target = 0;
+ sci->state = 0;
+ queue_init(&sci->waiting_targets);
+ sci->wd.nactive = 0;
+ sci_reset(sci, TRUE);
+
+ printf("sci%d: (%d) bus reset ", sci->sc->masterno, ++sci->wd.reset_count);
+ delay(scsi_delay_after_reset); /* some targets take long to reset */
+
+ if (sci->sc == 0) /* sanity */
+ return;
+
+ scsi_bus_was_reset(sci->sc);
+}
+
+/*
+ * Error handlers
+ */
+
+/*
+ * Generic, default handler
+ */
+boolean_t
+sci_err_generic(sci, csr, bs)
+ register sci_softc_t sci;
+{
+ register int cond = sci->script->condition;
+
+ LOG(0x10,"err_generic");
+
+ if (SCI_CUR_PHASE(bs) == SCSI_PHASE_STATUS)
+ return sci_err_to_status(sci, csr, bs);
+ gimmeabreak();
+ return FALSE;
+}
+
+/*
+ * Handle generic errors that are reported as
+ * an unexpected change to STATUS phase
+ */
+sci_err_to_status(sci, csr, bs)
+ register sci_softc_t sci;
+{
+ script_t scp = sci->script;
+
+ LOG(0x20,"err_tostatus");
+ while (SCSI_PHASE(scp->condition) != SCSI_PHASE_STATUS)
+ scp++;
+ sci->script = scp;
+#if 0
+ /*
+ * Normally, we would already be able to say the command
+ * is in error, e.g. the tape had a filemark or something.
+ * But in case we do disconnected mode WRITEs, it is quite
+ * common that the following happens:
+ * dma_out -> disconnect -> reconnect
+ * and our script might expect at this point that the dma
+ * had to be restarted (it didn't know it was completed
+ * because the tape record is shorter than we asked for).
+ * And in any event.. it is both correct and cleaner to
+ * declare error iff the STATUS byte says so.
+ */
+ sci->done = SCSI_RET_NEED_SENSE;
+#endif
+ return TRUE;
+}
+
+/*
+ * Watch for a disconnection
+ */
+boolean_t
+sci_err_disconn(sci, csr, bs)
+ register sci_softc_t sci;
+{
+ register sci_padded_regmap_t *regs;
+ register sci_dmaregs_t dmar = sci->dmar;
+ register target_info_t *tgt;
+ int count;
+
+ LOG(0x18,"err_disconn");
+
+ if (SCI_CUR_PHASE(bs) != SCSI_PHASE_MSG_IN)
+ return sci_err_generic(sci, csr, bs);
+
+ regs = sci->regs;
+
+ tgt = sci->active_target;
+
+ switch (SCSI_PHASE(sci->script->condition)) {
+ case SCSI_PHASE_DATAO:
+ LOG(0x1b,"+DATAO");
+
+if (sci_delay & 1) delay(1000);
+ /* Stop dma */
+ regs->sci_icmd = 0;
+ regs->sci_mode &= ~(SCI_MODE_DMA|SCI_MODE_DMA_IE);
+ dmar->sci_dma_dir = SCI_DMA_DIR_READ;/* make sure we steal not */
+
+ if (sci->out_count) {
+ register int xferred, offset;
+
+ SCI_TC_GET(dmar,xferred);
+if (scsi_debug)
+printf("{O %x %x}", xferred, sci->out_count);
+ /* 5380 prefetches */
+ xferred = sci->out_count - xferred - 1;
+/* assert(xferred > 0);*/
+ tgt->transient_state.out_count -= xferred;
+ assert(tgt->transient_state.out_count > 0);
+ offset = tgt->transient_state.dma_offset;
+ tgt->transient_state.dma_offset += xferred;
+ if (tgt->transient_state.dma_offset >= PER_TGT_BUFF_SIZE)
+ tgt->transient_state.dma_offset = 0;
+
+ sci_copyout( tgt, offset, xferred);
+
+ }
+ tgt->transient_state.script = sci_script_data_out;
+ break;
+
+ case SCSI_PHASE_DATAI:
+ LOG(0x19,"+DATAI");
+
+ /* Stop dma */
+ regs->sci_icmd = 0;
+ regs->sci_mode &= ~(SCI_MODE_DMA|SCI_MODE_DMA_IE);
+ dmar->sci_dma_dir = SCI_DMA_DIR_WRITE;/* make sure we steal not */
+
+ if (sci->in_count) {
+ register int offset, xferred;
+/* unsigned char extrab = regs->sci_idata;*/
+
+ SCI_TC_GET(dmar,xferred);
+ /* ++EXTRA_BYTE */
+if (scsi_debug)
+printf("{A %x %x %x}", xferred, sci->in_count, sci->extra_byte);
+ xferred = sci->in_count - xferred - sci->extra_byte;
+ /* ++EXTRA_BYTE */
+ assert(xferred > 0);
+ tgt->transient_state.in_count -= xferred;
+ assert(tgt->transient_state.in_count > 0);
+ offset = tgt->transient_state.dma_offset;
+ tgt->transient_state.dma_offset += xferred;
+ if (tgt->transient_state.dma_offset >= PER_TGT_BUFF_SIZE)
+ tgt->transient_state.dma_offset = 0;
+
+ /* copy what we got */
+ sci_copyin( tgt, offset, xferred, 0, 0/*extrab*/);
+ }
+ tgt->transient_state.script = sci_script_data_in;
+ break;
+
+ case SCSI_PHASE_STATUS:
+ /* will have to restart dma */
+ SCI_TC_GET(dmar,count);
+ if (sci->state & SCI_STATE_DMA_IN) {
+ register int offset, xferred;
+/* unsigned char extrab = regs->sci_idata;*/
+
+ LOG(0x1a,"+STATUS+R");
+
+
+ /* Stop dma */
+ regs->sci_icmd = 0;
+ regs->sci_mode &= ~(SCI_MODE_DMA|SCI_MODE_DMA_IE);
+ dmar->sci_dma_dir = SCI_DMA_DIR_WRITE;/* make sure we steal not */
+
+ /* ++EXTRA_BYTE */
+if (scsi_debug)
+printf("{A %x %x %x}", count, sci->in_count, sci->extra_byte);
+ xferred = sci->in_count - count - sci->extra_byte;
+ /* ++EXTRA_BYTE */
+ assert(xferred > 0);
+ tgt->transient_state.in_count -= xferred;
+/* assert(tgt->transient_state.in_count > 0);*/
+ offset = tgt->transient_state.dma_offset;
+ tgt->transient_state.dma_offset += xferred;
+ if (tgt->transient_state.dma_offset >= PER_TGT_BUFF_SIZE)
+ tgt->transient_state.dma_offset = 0;
+
+ /* copy what we got */
+ sci_copyin( tgt, offset, xferred, 0, 0/*/extrab*/);
+
+ tgt->transient_state.script = sci_script_data_in;
+ if (tgt->transient_state.in_count == 0)
+ tgt->transient_state.script++;
+
+ } else {
+
+ LOG(0x1d,"+STATUS+W");
+
+if (sci_delay & 1) delay(1000);
+ /* Stop dma */
+ regs->sci_icmd = 0;
+ regs->sci_mode &= ~(SCI_MODE_DMA|SCI_MODE_DMA_IE);
+ dmar->sci_dma_dir = SCI_DMA_DIR_READ;/* make sure we steal not */
+
+if (scsi_debug)
+printf("{O %x %x}", count, sci->out_count);
+ if ((count == 0) && (tgt->transient_state.out_count == sci->out_count)) {
+ /* all done */
+ tgt->transient_state.script = &sci_script_data_out[1];
+ tgt->transient_state.out_count = 0;
+ } else {
+ register int xferred, offset;
+
+ /* how much we xferred */
+ xferred = sci->out_count - count - 1;/*prefetch*/
+
+ tgt->transient_state.out_count -= xferred;
+ assert(tgt->transient_state.out_count > 0);
+ offset = tgt->transient_state.dma_offset;
+ tgt->transient_state.dma_offset += xferred;
+ if (tgt->transient_state.dma_offset >= PER_TGT_BUFF_SIZE)
+ tgt->transient_state.dma_offset = 0;
+
+ sci_copyout( tgt, offset, xferred);
+
+ tgt->transient_state.script = sci_script_data_out;
+ }
+ sci->out_count = 0;
+ }
+ break;
+ default:
+ gimmeabreak();
+ }
+ sci->extra_byte = 0;
+
+/* SCI_ACK(regs,SCSI_PHASE_MSG_IN); later */
+ (void) sci_msg_in(sci,csr,bs);
+
+ regs->sci_sel_enb = (1 << sci->sc->initiator_id);
+
+ sci->script = sci_script_disconnect;
+
+ return FALSE;
+}
+
+/*
+ * Watchdog
+ *
+ */
+sci_reset_scsibus(sci)
+ register sci_softc_t sci;
+{
+ register target_info_t *tgt = sci->active_target;
+ if (tgt) {
+ int cnt;
+ SCI_TC_GET(sci->dmar,cnt);
+ log( LOG_KERN,
+ "Target %d was active, cmd x%x in x%x out x%x Sin x%x Sou x%x dmalen x%x\n",
+ tgt->target_id, tgt->cur_cmd,
+ tgt->transient_state.in_count, tgt->transient_state.out_count,
+ sci->in_count, sci->out_count, cnt);
+ }
+ sci->regs->sci_icmd = SCI_ICMD_RST;
+ delay(25);
+}
+
+/*
+ * Copy routines
+ */
+/*static*/
+sci_copyin(tgt, offset, len, isaobb, obb)
+ register target_info_t *tgt;
+ unsigned char obb;
+{
+ register char *from, *to;
+ register int count;
+
+ count = tgt->transient_state.copy_count;
+
+ from = tgt->cmd_ptr + offset;
+ to = tgt->ior->io_data + count;
+ tgt->transient_state.copy_count = count + len;
+
+ bcopy( from, to, len);
+ /* check for last, poor little odd byte */
+ if (isaobb) {
+ to += len;
+ to[-1] = obb;
+ }
+}
+
+/*static*/
+sci_copyout( tgt, offset, len)
+ register target_info_t *tgt;
+{
+ register char *from, *to;
+ register int count, olen;
+ unsigned char c;
+ char *p;
+
+ count = tgt->ior->io_count - tgt->transient_state.copy_count;
+ if (count > 0) {
+
+ len = u_min(count, len);
+ offset += tgt->transient_state.cmd_count;
+
+ count = tgt->transient_state.copy_count;
+ tgt->transient_state.copy_count = count + len;
+
+ from = tgt->ior->io_data + count;
+ to = tgt->cmd_ptr + offset;
+
+ bcopy(from, to, len);
+
+ }
+}
+
+#endif /*NSCI > 0*/
+
diff --git a/scsi/adapters/scsi_53C700.h b/scsi/adapters/scsi_53C700.h
new file mode 100644
index 00000000..224fc5bf
--- /dev/null
+++ b/scsi/adapters/scsi_53C700.h
@@ -0,0 +1,327 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * File: scsi_53C700.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 8/91
+ *
+ * Defines for the NCR 53C700 (SCSI I/O Processor)
+ */
+
+/*
+ * Register map
+ */
+
+typedef struct {
+ volatile unsigned char siop_scntl0; /* rw: SCSI control reg 0 */
+ volatile unsigned char siop_scntl1; /* rw: SCSI control reg 1 */
+ volatile unsigned char siop_sdid; /* rw: SCSI Destination ID */
+ volatile unsigned char siop_sien; /* rw: SCSI Interrupt Enable */
+ volatile unsigned char siop_scid; /* rw: SCSI Chip ID reg */
+ volatile unsigned char siop_sxfer; /* rw: SCSI Transfer reg */
+ volatile unsigned char siop_sodl; /* rw: SCSI Output Data Latch */
+ volatile unsigned char siop_socl; /* rw: SCSI Output Control Latch */
+ volatile unsigned char siop_sfbr; /* ro: SCSI First Byte Received */
+ volatile unsigned char siop_sidl; /* ro: SCSI Input Data Latch */
+ volatile unsigned char siop_sbdl; /* ro: SCSI Bus Data Lines */
+ volatile unsigned char siop_sbcl; /* ro: SCSI Bus Control Lines */
+ volatile unsigned char siop_dstat; /* ro: DMA status */
+ volatile unsigned char siop_sstat0; /* ro: SCSI status reg 0 */
+ volatile unsigned char siop_sstat1; /* ro: SCSI status reg 1 */
+ volatile unsigned char siop_sstat2; /* ro: SCSI status reg 2 */
+ volatile unsigned char siop_res1;
+ volatile unsigned char siop_res2;
+ volatile unsigned char siop_res3;
+ volatile unsigned char siop_res4;
+ volatile unsigned char siop_ctest0; /* ro: Chip test register 0 */
+ volatile unsigned char siop_ctest1; /* ro: Chip test register 1 */
+ volatile unsigned char siop_ctest2; /* ro: Chip test register 2 */
+ volatile unsigned char siop_ctest3; /* ro: Chip test register 3 */
+ volatile unsigned char siop_ctest4; /* rw: Chip test register 4 */
+ volatile unsigned char siop_ctest5; /* rw: Chip test register 5 */
+ volatile unsigned char siop_ctest6; /* rw: Chip test register 6 */
+ volatile unsigned char siop_ctest7; /* rw: Chip test register 7 */
+ volatile unsigned char siop_temp0; /* rw: Temporary Stack reg */
+ volatile unsigned char siop_temp1;
+ volatile unsigned char siop_temp2;
+ volatile unsigned char siop_temp3;
+ volatile unsigned char siop_dfifo; /* rw: DMA FIFO */
+ volatile unsigned char siop_istat; /* rw: Interrupt Status reg */
+ volatile unsigned char siop_res5;
+ volatile unsigned char siop_res6;
+ volatile unsigned char siop_dbc0; /* rw: DMA Byte Counter reg */
+ volatile unsigned char siop_dbc1;
+ volatile unsigned char siop_dbc2;
+ volatile unsigned char siop_dcmd; /* rw: DMA Command Register */
+ volatile unsigned char siop_dnad0; /* rw: DMA Next Address */
+ volatile unsigned char siop_dnad1;
+ volatile unsigned char siop_dnad2;
+ volatile unsigned char siop_dnad3;
+ volatile unsigned char siop_dsp0; /* rw: DMA SCRIPTS Pointer reg */
+ volatile unsigned char siop_dsp1;
+ volatile unsigned char siop_dsp2;
+ volatile unsigned char siop_dsp3;
+ volatile unsigned char siop_dsps0; /* rw: DMA SCRIPTS Pointer Save reg */
+ volatile unsigned char siop_dsps1;
+ volatile unsigned char siop_dsps2;
+ volatile unsigned char siop_dsps3;
+ volatile unsigned char siop_dmode; /* rw: DMA Mode reg */
+ volatile unsigned char siop_res7;
+ volatile unsigned char siop_res8;
+ volatile unsigned char siop_res9;
+ volatile unsigned char siop_res10;
+ volatile unsigned char siop_dien; /* rw: DMA Interrupt Enable */
+ volatile unsigned char siop_dwt; /* rw: DMA Watchdog Timer */
+ volatile unsigned char siop_dcntl; /* rw: DMA Control reg */
+ volatile unsigned char siop_res11;
+ volatile unsigned char siop_res12;
+ volatile unsigned char siop_res13;
+ volatile unsigned char siop_res14;
+
+} siop_regmap_t;
+
+/*
+ * Register defines
+ */
+
+/* Scsi control register 0 (scntl0) */
+
+#define SIOP_SCNTL0_ARB 0xc0 /* Arbitration mode */
+# define SIOP_ARB_SIMPLE 0x00
+# define SIOP_ARB_FULL 0xc0
+#define SIOP_SCNTL0_START 0x20 /* Start Sequence */
+#define SIOP_SCNTL0_WATN 0x10 /* (Select) With ATN */
+#define SIOP_SCNTL0_EPC 0x08 /* Enable Parity Checking */
+#define SIOP_SCNTL0_EPG 0x04 /* Enable Parity Generation */
+#define SIOP_SCNTL0_AAP 0x02 /* Assert ATN on Parity Error */
+#define SIOP_SCNTL0_TRG 0x01 /* Target Mode */
+
+/* Scsi control register 1 (scntl1) */
+
+#define SIOP_SCNTL1_EXC 0x80 /* Extra Clock Cycle of data setup */
+#define SIOP_SCNTL1_ADB 0x40 /* Assert Data Bus */
+#define SIOP_SCNTL1_ESR 0x20 /* Enable Selection/Reselection */
+#define SIOP_SCNTL1_CON 0x10 /* Connected */
+#define SIOP_SCNTL1_RST 0x08 /* Assert RST */
+#define SIOP_SCNTL1_PAR 0x04 /* Force bad Parity */
+#define SIOP_SCNTL1_SND 0x02 /* Start Send operation */
+#define SIOP_SCNTL1_RCV 0x01 /* Start Receive operation */
+
+/* Scsi interrupt enable register (sien) */
+
+#define SIOP_SIEN_M_A 0x80 /* Phase Mismatch or ATN active */
+#define SIOP_SIEN_FC 0x40 /* Function Complete */
+#define SIOP_SIEN_STO 0x20 /* (Re)Selection timeout */
+#define SIOP_SIEN_SEL 0x10 /* (Re)Selected */
+#define SIOP_SIEN_SGE 0x08 /* SCSI Gross Error */
+#define SIOP_SIEN_UDC 0x04 /* Unexpected Disconnect */
+#define SIOP_SIEN_RST 0x02 /* RST asserted */
+#define SIOP_SIEN_PAR 0x01 /* Parity Error */
+
+/* Scsi chip ID (scid) */
+
+#define SIOP_SCID_VALUE(i) (1<<i)
+
+/* Scsi transfer register (sxfer) */
+
+#define SIOP_SXFER_DHP 0x80 /* Disable Halt on Parity error/ ATN asserted */
+#define SIOP_SXFER_TP 0x70 /* Synch Transfer Period */
+ /* see specs for formulas:
+ Period = TCP * (4 + XFERP )
+ TCP = 1 + CLK + 1..2;
+ */
+#define SIOP_SXFER_MO 0x0f /* Synch Max Offset */
+# define SIOP_MAX_OFFSET 8
+
+/* Scsi output data latch register (sodl) */
+
+/* Scsi output control latch register (socl) */
+
+#define SIOP_REQ 0x80 /* SCSI signal <x> asserted */
+#define SIOP_ACK 0x40
+#define SIOP_BSY 0x20
+#define SIOP_SEL 0x10
+#define SIOP_ATN 0x08
+#define SIOP_MSG 0x04
+#define SIOP_CD 0x02
+#define SIOP_IO 0x01
+
+#define SIOP_PHASE(socl) SCSI_PHASE(socl)
+
+/* Scsi first byte received register (sfbr) */
+
+/* Scsi input data latch register (sidl) */
+
+/* Scsi bus data lines register (sbdl) */
+
+/* Scsi bus control lines register (sbcl). Same as socl */
+
+/* DMA status register (dstat) */
+
+#define SIOP_DSTAT_DFE 0x80 /* DMA FIFO empty */
+#define SIOP_DSTAT_RES 0x60
+#define SIOP_DSTAT_ABRT 0x10 /* Aborted */
+#define SIOP_DSTAT_SSI 0x08 /* SCRIPT Single Step */
+#define SIOP_DSTAT_SIR 0x04 /* SCRIPT Interrupt Instruction */
+#define SIOP_DSTAT_WTD 0x02 /* Watchdog Timeout Detected */
+#define SIOP_DSTAT_OPC 0x01 /* Invalid SCRIPTS Opcode */
+
+/* Scsi status register 0 (sstat0) */
+
+#define SIOP_SSTAT0_M_A 0x80 /* Phase Mismatch or ATN active */
+#define SIOP_SSTAT0_FC 0x40 /* Function Complete */
+#define SIOP_SSTAT0_STO 0x20 /* (Re)Selection timeout */
+#define SIOP_SSTAT0_SEL 0x10 /* (Re)Selected */
+#define SIOP_SSTAT0_SGE 0x08 /* SCSI Gross Error */
+#define SIOP_SSTAT0_UDC 0x04 /* Unexpected Disconnect */
+#define SIOP_SSTAT0_RST 0x02 /* RST asserted */
+#define SIOP_SSTAT0_PAR 0x01 /* Parity Error */
+
+/* Scsi status register 1 (sstat1) */
+
+#define SIOP_SSTAT1_ILF 0x80 /* Input latch (sidl) full */
+#define SIOP_SSTAT1_ORF 0x40 /* output reg (sodr) full */
+#define SIOP_SSTAT1_OLF 0x20 /* output latch (sodl) full */
+#define SIOP_SSTAT1_AIP 0x10 /* Arbitration in progress */
+#define SIOP_SSTAT1_LOA 0x08 /* Lost arbitration */
+#define SIOP_SSTAT1_WOA 0x04 /* Won arbitration */
+#define SIOP_SSTAT1_RST 0x02 /* SCSI RST current value */
+#define SIOP_SSTAT1_SDP 0x01 /* SCSI SDP current value */
+
+/* Scsi status register 2 (sstat2) */
+
+#define SIOP_SSTAT2_FF 0xf0 /* SCSI FIFO flags (bytecount) */
+# define SIOP_SCSI_FIFO_DEEP 8
+#define SIOP_SSTAT2_SDP 0x08 /* Latched (on REQ) SCSI SDP */
+#define SIOP_SSTAT2_MSG 0x04 /* Latched SCSI phase */
+#define SIOP_SSTAT2_CD 0x02
+#define SIOP_SSTAT2_IO 0x01
+
+/* Chip test register 0 (ctest0) */
+
+#define SIOP_CTEST0_RES 0xfc
+#define SIOP_CTEST0_RTRG 0x02 /* Real Target mode */
+#define SIOP_CTEST0_DDIR 0x01 /* Xfer direction (1-> from SCSI bus) */
+
+/* Chip test register 1 (ctest1) */
+
+#define SIOP_CTEST1_FMT 0xf0 /* Byte empty in DMA FIFO bottom (high->byte3) */
+#define SIOP_CTEST1_FFL 0x0f /* Byte full in DMA FIFO top, same */
+
+/* Chip test register 2 (ctest2) */
+
+#define SIOP_CTEST2_RES 0xc0
+#define SIOP_CTEST2_SOFF 0x20 /* Synch Offset compare (1-> zero Init, max Tgt */
+#define SIOP_CTEST2_SFP 0x10 /* SCSI FIFO Parity */
+#define SIOP_CTEST2_DFP 0x08 /* DMA FIFO Parity */
+#define SIOP_CTEST2_TEOP 0x04 /* True EOP (a-la 5380) */
+#define SIOP_CTEST2_DREQ 0x02 /* DREQ status */
+#define SIOP_CTEST2_DACK 0x01 /* DACK status */
+
+/* Chip test register 3 (ctest3) read-only, top of SCSI FIFO */
+
+/* Chip test register 4 (ctest4) */
+
+#define SIOP_CTEST4_RES 0x80
+#define SIOP_CTEST4_ZMOD 0x40 /* High-impedance outputs */
+#define SIOP_CTEST4_SZM 0x20 /* ditto, SCSI "outputs" */
+#define SIOP_CTEST4_SLBE 0x10 /* SCSI loobpack enable */
+#define SIOP_CTEST4_SFWR 0x08 /* SCSI FIFO write enable (from sodl) */
+#define SIOP_CTEST4_FBL 0x07 /* DMA FIFO Byte Lane select (from ctest6)
+ 4->0, .. 7->3 */
+
+/* Chip test register 5 (ctest5) */
+
+#define SIOP_CTEST5_ADCK 0x80 /* Clock Address Incrementor */
+#define SIOP_CTEST5_BBCK 0x40 /* Clock Byte counter */
+#define SIOP_CTEST5_ROFF 0x20 /* Reset SCSI offset */
+#define SIOP_CTEST5_MASR 0x10 /* Master set/reset pulses (of bits 3-0) */
+#define SIOP_CTEST5_DDIR 0x08 /* (re)set internal DMA direction */
+#define SIOP_CTEST5_EOP 0x04 /* (re)set internal EOP */
+#define SIOP_CTEST5_DREQ 0x02 /* (re)set internal REQ */
+#define SIOP_CTEST5_DACK 0x01 /* (re)set internal ACK */
+
+/* Chip test register 6 (ctest6) DMA FIFO access */
+
+/* Chip test register 7 (ctest7) */
+
+#define SIOP_CTEST7_RES 0xe0
+#define SIOP_CTEST7_STD 0x10 /* Disable selection timeout */
+#define SIOP_CTEST7_DFP 0x08 /* DMA FIFO parity bit */
+#define SIOP_CTEST7_EVP 0x04 /* Even parity (to host bus) */
+#define SIOP_CTEST7_DC 0x02 /* Drive DC pin low on SCRIPT fetches */
+#define SIOP_CTEST7_DIFF 0x01 /* Differential mode */
+
+/* DMA FIFO register (dfifo) */
+
+#define SIOP_DFIFO_FLF 0x80 /* Flush (spill) DMA FIFO */
+#define SIOP_DFIFO_CLF 0x40 /* Clear DMA and SCSI FIFOs */
+#define SIOP_DFIFO_BO 0x3f /* FIFO byte offset counter */
+
+/* Interrupt status register (istat) */
+
+#define SIOP_ISTAT_ABRT 0x80 /* Abort operation */
+#define SIOP_ISTAT_RES 0x70
+#define SIOP_ISTAT_CON 0x08 /* Connected */
+#define SIOP_ISTAT_PRE 0x04 /* Pointer register empty */
+#define SIOP_ISTAT_SIP 0x02 /* SCSI Interrupt pending */
+#define SIOP_ISTAT_DIP 0x01 /* DMA Interrupt pending */
+
+
+/* DMA Mode register (dmode) */
+
+#define SIOP_DMODE_BL_MASK 0xc0 /* 0->1 1->2 2->4 3->8 */
+#define SIOP_DMODE_BW16 0x20 /* Bus Width is 16 bits */
+#define SIOP_DMODE_286 0x10 /* 286 mode */
+#define SIOP_DMODE_IO_M 0x08 /* xfer data to memory or I/O space */
+#define SIOP_DMODE_FAM 0x04 /* fixed address mode */
+#define SIOP_DMODE_PIPE 0x02 /* SCRIPTS in Pipeline mode */
+#define SIOP_DMODE_MAN 0x01 /* SCRIPTS in Manual start mode */
+
+/* DMA interrupt enable register (dien) */
+
+#define SIOP_DIEN_RES 0xe0
+#define SIOP_DIEN_ABRT 0x10 /* On Abort */
+#define SIOP_DIEN_SSI 0x08 /* On SCRIPTS sstep */
+#define SIOP_DIEN_SIR 0x04 /* On SCRIPTS intr instruction */
+#define SIOP_DIEN_WTD 0x02 /* On watchdog timeout */
+#define SIOP_DIEN_OPC 0x01 /* On SCRIPTS illegal opcode */
+
+/* DMA control register (dcntl) */
+
+#define SIOP_DCNTL_CF_MASK 0xc0 /* Clock frequency dividers:
+ 0 --> 37.51..50.00 Mhz, div=2
+ 1 --> 25.01..37.50 Mhz, div=1.5
+ 2 --> 16.67..25.00 Mhz, div=1
+ 3 --> reserved
+ */
+#define SIOP_DCNTL_S16 0x20 /* SCRIPTS fetches 16bits at a time */
+#define SIOP_DCNTL_SSM 0x10 /* Single step mode */
+#define SIOP_DCNTL_LLM 0x08 /* Enable Low-level mode */
+#define SIOP_DCNTL_STD 0x04 /* Start SCRIPTS operation */
+#define SIOP_DCNTL_RES 0x02
+#define SIOP_DCNTL_RST 0x01 /* Software reset */
+
diff --git a/scsi/adapters/scsi_53C700_hdw.c b/scsi/adapters/scsi_53C700_hdw.c
new file mode 100644
index 00000000..61b5a3ba
--- /dev/null
+++ b/scsi/adapters/scsi_53C700_hdw.c
@@ -0,0 +1,696 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * File: scsi_53C700_hdw.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 8/91
+ *
+ * Bottom layer of the SCSI driver: chip-dependent functions
+ *
+ * This file contains the code that is specific to the NCR 53C700
+ * SCSI chip (Host Bus Adapter in SCSI parlance): probing, start
+ * operation, and interrupt routine.
+ */
+
+
+#include <siop.h>
+#if NSIOP > 0
+#include <platforms.h>
+
+#include <mach/std_types.h>
+#include <sys/types.h>
+#include <chips/busses.h>
+#include <scsi/compat_30.h>
+#include <machine/machspl.h>
+
+#include <sys/syslog.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi2.h>
+#include <scsi/scsi_defs.h>
+
+#include <scsi/adapters/scsi_53C700.h>
+
+#ifdef PAD
+typedef struct {
+ volatile unsigned char siop_scntl0; /* rw: SCSI control reg 0 */
+ PAD(pad0);
+ volatile unsigned char siop_scntl1; /* rw: SCSI control reg 1 */
+ PAD(pad1);
+ volatile unsigned char siop_sdid; /* rw: SCSI Destination ID */
+ PAD(pad2);
+ volatile unsigned char siop_sien; /* rw: SCSI Interrupt Enable */
+ PAD(pad3);
+ volatile unsigned char siop_scid; /* rw: SCSI Chip ID reg */
+ PAD(pad4);
+ volatile unsigned char siop_sxfer; /* rw: SCSI Transfer reg */
+ PAD(pad5);
+ volatile unsigned char siop_sodl; /* rw: SCSI Output Data Latch */
+ PAD(pad6);
+ volatile unsigned char siop_socl; /* rw: SCSI Output Control Latch */
+ PAD(pad7);
+ volatile unsigned char siop_sfbr; /* ro: SCSI First Byte Received */
+ PAD(pad8);
+ volatile unsigned char siop_sidl; /* ro: SCSI Input Data Latch */
+ PAD(pad9);
+ volatile unsigned char siop_sbdl; /* ro: SCSI Bus Data Lines */
+ PAD(pad10);
+ volatile unsigned char siop_sbcl; /* ro: SCSI Bus Control Lines */
+ PAD(pad11);
+ volatile unsigned char siop_dstat; /* ro: DMA status */
+ PAD(pad12);
+ volatile unsigned char siop_sstat0; /* ro: SCSI status reg 0 */
+ PAD(pad13);
+ volatile unsigned char siop_sstat1; /* ro: SCSI status reg 1 */
+ PAD(pad14);
+ volatile unsigned char siop_sstat2; /* ro: SCSI status reg 2 */
+ PAD(pad15);
+ volatile unsigned char siop_res1;
+ PAD(pad16);
+ volatile unsigned char siop_res2;
+ PAD(pad17);
+ volatile unsigned char siop_res3;
+ PAD(pad18);
+ volatile unsigned char siop_res4;
+ PAD(pad19);
+ volatile unsigned char siop_ctest0; /* ro: Chip test register 0 */
+ PAD(pad20);
+ volatile unsigned char siop_ctest1; /* ro: Chip test register 1 */
+ PAD(pad21);
+ volatile unsigned char siop_ctest2; /* ro: Chip test register 2 */
+ PAD(pad22);
+ volatile unsigned char siop_ctest3; /* ro: Chip test register 3 */
+ PAD(pad23);
+ volatile unsigned char siop_ctest4; /* rw: Chip test register 4 */
+ PAD(pad24);
+ volatile unsigned char siop_ctest5; /* rw: Chip test register 5 */
+ PAD(pad25);
+ volatile unsigned char siop_ctest6; /* rw: Chip test register 6 */
+ PAD(pad26);
+ volatile unsigned char siop_ctest7; /* rw: Chip test register 7 */
+ PAD(pad27);
+ volatile unsigned char siop_temp0; /* rw: Temporary Stack reg */
+ PAD(pad28);
+ volatile unsigned char siop_temp1;
+ PAD(pad29);
+ volatile unsigned char siop_temp2;
+ PAD(pad30);
+ volatile unsigned char siop_temp3;
+ PAD(pad31);
+ volatile unsigned char siop_dfifo; /* rw: DMA FIFO */
+ PAD(pad32);
+ volatile unsigned char siop_istat; /* rw: Interrupt Status reg */
+ PAD(pad33);
+ volatile unsigned char siop_res5;
+ PAD(pad34);
+ volatile unsigned char siop_res6;
+ PAD(pad35);
+ volatile unsigned char siop_dbc0; /* rw: DMA Byte Counter reg */
+ PAD(pad36);
+ volatile unsigned char siop_dbc1;
+ PAD(pad37);
+ volatile unsigned char siop_dbc2;
+ PAD(pad38);
+ volatile unsigned char siop_dcmd; /* rw: DMA Command Register */
+ PAD(pad39);
+ volatile unsigned char siop_dnad0; /* rw: DMA Next Address */
+ PAD(pad40);
+ volatile unsigned char siop_dnad1;
+ PAD(pad41);
+ volatile unsigned char siop_dnad2;
+ PAD(pad42);
+ volatile unsigned char siop_dnad3;
+ PAD(pad43);
+ volatile unsigned char siop_dsp0; /* rw: DMA SCRIPTS Pointer reg */
+ PAD(pad44);
+ volatile unsigned char siop_dsp1;
+ PAD(pad45);
+ volatile unsigned char siop_dsp2;
+ PAD(pad46);
+ volatile unsigned char siop_dsp3;
+ PAD(pad47);
+ volatile unsigned char siop_dsps0; /* rw: DMA SCRIPTS Pointer Save reg */
+ PAD(pad48);
+ volatile unsigned char siop_dsps1;
+ PAD(pad49);
+ volatile unsigned char siop_dsps2;
+ PAD(pad50);
+ volatile unsigned char siop_dsps3;
+ PAD(pad51);
+ volatile unsigned char siop_dmode; /* rw: DMA Mode reg */
+ PAD(pad52);
+ volatile unsigned char siop_res7;
+ PAD(pad53);
+ volatile unsigned char siop_res8;
+ PAD(pad54);
+ volatile unsigned char siop_res9;
+ PAD(pad55);
+ volatile unsigned char siop_res10;
+ PAD(pad56);
+ volatile unsigned char siop_dien; /* rw: DMA Interrupt Enable */
+ PAD(pad57);
+ volatile unsigned char siop_dwt; /* rw: DMA Watchdog Timer */
+ PAD(pad58);
+ volatile unsigned char siop_dcntl; /* rw: DMA Control reg */
+ PAD(pad59);
+ volatile unsigned char siop_res11;
+ PAD(pad60);
+ volatile unsigned char siop_res12;
+ PAD(pad61);
+ volatile unsigned char siop_res13;
+ PAD(pad62);
+ volatile unsigned char siop_res14;
+ PAD(pad63);
+} siop_padded_regmap_t;
+#else
+typedef siop_regmap_t siop_padded_regmap_t;
+#endif
+
+/*
+ * Macros to make certain things a little more readable
+ */
+
+/* forward decls */
+
+int siop_reset_scsibus();
+boolean_t siop_probe_target();
+
+/*
+ * State descriptor for this layer. There is one such structure
+ * per (enabled) 53C700 interface
+ */
+struct siop_softc {
+ watchdog_t wd;
+ siop_padded_regmap_t *regs; /* 53C700 registers */
+ scsi_dma_ops_t *dma_ops; /* DMA operations and state */
+ opaque_t dma_state;
+
+ script_t script;
+ int (*error_handler)();
+ int in_count; /* amnt we expect to receive */
+ int out_count; /* amnt we are going to ship */
+
+ volatile char state;
+#define SIOP_STATE_BUSY 0x01 /* selecting or currently connected */
+#define SIOP_STATE_TARGET 0x04 /* currently selected as target */
+#define SIOP_STATE_COLLISION 0x08 /* lost selection attempt */
+#define SIOP_STATE_DMA_IN 0x10 /* tgt --> initiator xfer */
+
+ unsigned char ntargets; /* how many alive on this scsibus */
+ unsigned char done;
+
+ scsi_softc_t *sc;
+ target_info_t *active_target;
+
+ target_info_t *next_target; /* trying to seize bus */
+ queue_head_t waiting_targets;/* other targets competing for bus */
+
+} siop_softc_data[NSIOP];
+
+typedef struct siop_softc *siop_softc_t;
+
+siop_softc_t siop_softc[NSIOP];
+
+/*
+ * Definition of the controller for the auto-configuration program.
+ */
+
+int siop_probe(), scsi_slave(), scsi_attach(), siop_go(), siop_intr();
+
+caddr_t siop_std[NSIOP] = { 0 };
+struct bus_device *siop_dinfo[NSIOP*8];
+struct bus_ctlr *siop_minfo[NSIOP];
+struct bus_driver siop_driver =
+ { siop_probe, scsi_slave, scsi_attach, siop_go, siop_std, "rz", siop_dinfo,
+ "siop", siop_minfo, BUS_INTR_B4_PROBE};
+
+/*
+ * Scripts
+ */
+struct script
+siop_script_data_in[] = {
+},
+
+siop_script_data_out[] = {
+},
+
+siop_script_cmd[] = {
+},
+
+/* Synchronous transfer neg(oti)ation */
+
+siop_script_try_synch[] = {
+},
+
+/* Disconnect sequence */
+
+siop_script_disconnect[] = {
+};
+
+
+#define DEBUG
+#ifdef DEBUG
+
+siop_state(base)
+ vm_offset_t base;
+{
+ siop_padded_regmap_t *regs;
+....
+ return 0;
+}
+siop_target_state(tgt)
+ target_info_t *tgt;
+{
+ if (tgt == 0)
+ tgt = siop_softc[0]->active_target;
+ if (tgt == 0)
+ return 0;
+ db_printf("@x%x: fl %x dma %X+%x cmd %x@%X id %x per %x off %x ior %X ret %X\n",
+ tgt,
+ tgt->flags, tgt->dma_ptr, tgt->transient_state.dma_offset, tgt->cur_cmd,
+ tgt->cmd_ptr, tgt->target_id, tgt->sync_period, tgt->sync_offset,
+ tgt->ior, tgt->done);
+ if (tgt->flags & TGT_DISCONNECTED){
+ script_t spt;
+
+ spt = tgt->transient_state.script;
+ db_printf("disconnected at ");
+ db_printsym(spt,1);
+ db_printf(": %x ", spt->condition);
+ db_printsym(spt->action,1);
+ db_printf(", ");
+ db_printsym(tgt->transient_state.handler, 1);
+ db_printf("\n");
+ }
+
+ return 0;
+}
+
+siop_all_targets(unit)
+{
+ int i;
+ target_info_t *tgt;
+ for (i = 0; i < 8; i++) {
+ tgt = siop_softc[unit]->sc->target[i];
+ if (tgt)
+ siop_target_state(tgt);
+ }
+}
+
+siop_script_state(unit)
+{
+ script_t spt = siop_softc[unit]->script;
+
+ if (spt == 0) return 0;
+ db_printsym(spt,1);
+ db_printf(": %x ", spt->condition);
+ db_printsym(spt->action,1);
+ db_printf(", ");
+ db_printsym(siop_softc[unit]->error_handler, 1);
+ return 0;
+
+}
+
+#define PRINT(x) if (scsi_debug) printf x
+
+#define TRMAX 200
+int tr[TRMAX+3];
+int trpt, trpthi;
+#define TR(x) tr[trpt++] = x
+#define TRWRAP trpthi = trpt; trpt = 0;
+#define TRCHECK if (trpt > TRMAX) {TRWRAP}
+
+#define TRACE
+
+#ifdef TRACE
+
+#define LOGSIZE 256
+int siop_logpt;
+char siop_log[LOGSIZE];
+
+#define MAXLOG_VALUE 0x24
+struct {
+ char *name;
+ unsigned int count;
+} logtbl[MAXLOG_VALUE];
+
+static LOG(e,f)
+ char *f;
+{
+ siop_log[siop_logpt++] = (e);
+ if (siop_logpt == LOGSIZE) siop_logpt = 0;
+ if ((e) < MAXLOG_VALUE) {
+ logtbl[(e)].name = (f);
+ logtbl[(e)].count++;
+ }
+}
+
+siop_print_log(skip)
+ int skip;
+{
+ register int i, j;
+ register unsigned char c;
+
+ for (i = 0, j = siop_logpt; i < LOGSIZE; i++) {
+ c = siop_log[j];
+ if (++j == LOGSIZE) j = 0;
+ if (skip-- > 0)
+ continue;
+ if (c < MAXLOG_VALUE)
+ db_printf(" %s", logtbl[c].name);
+ else
+ db_printf("-%d", c & 0x7f);
+ }
+ db_printf("\n");
+ return 0;
+}
+
+siop_print_stat()
+{
+ register int i;
+ register char *p;
+ for (i = 0; i < MAXLOG_VALUE; i++) {
+ if (p = logtbl[i].name)
+ printf("%d %s\n", logtbl[i].count, p);
+ }
+}
+
+#else /* TRACE */
+#define LOG(e,f)
+#endif /* TRACE */
+
+#else /* DEBUG */
+#define PRINT(x)
+#define LOG(e,f)
+#define TR(x)
+#define TRCHECK
+#define TRWRAP
+#endif /* DEBUG */
+
+
+/*
+ * Probe/Slave/Attach functions
+ */
+
+/*
+ * Probe routine:
+ * Should find out (a) if the controller is
+ * present and (b) which/where slaves are present.
+ *
+ * Implementation:
+ * Send an identify msg to each possible target on the bus
+ * except of course ourselves.
+ */
+siop_probe(reg, ui)
+ char *reg;
+ struct bus_ctlr *ui;
+{
+ int unit = ui->unit;
+ siop_softc_t siop = &siop_softc_data[unit];
+ int target_id, i;
+ scsi_softc_t *sc;
+ register siop_padded_regmap_t *regs;
+ int s;
+ boolean_t did_banner = FALSE;
+ char *cmd_ptr;
+ static char *here = "siop_probe";
+
+ /*
+ * We are only called if the chip is there,
+ * but make sure anyways..
+ */
+ regs = (siop_padded_regmap_t *) (reg);
+ if (check_memory(regs, 0))
+ return 0;
+
+#if notyet
+ /* Mappable version side */
+ SIOP_probe(reg, ui);
+#endif
+
+ /*
+ * Initialize hw descriptor
+ */
+ siop_softc[unit] = siop;
+ siop->regs = regs;
+
+ if ((siop->dma_ops = (scsi_dma_ops_t *)siop_std[unit]) == 0)
+ /* use same as unit 0 if undefined */
+ siop->dma_ops = (scsi_dma_ops_t *)siop_std[0];
+ siop->dma_state = (*siop->dma_ops->init)(unit, reg);
+
+ queue_init(&siop->waiting_targets);
+
+ sc = scsi_master_alloc(unit, siop);
+ siop->sc = sc;
+
+ sc->go = siop_go;
+ sc->probe = siop_probe_target;
+ sc->watchdog = scsi_watchdog;
+ siop->wd.reset = siop_reset_scsibus;
+
+#ifdef MACH_KERNEL
+ sc->max_dma_data = -1; /* unlimited */
+#else
+ sc->max_dma_data = scsi_per_target_virtual;
+#endif
+
+ /*
+ * Reset chip
+ */
+ s = splbio();
+ siop_reset(siop, TRUE);
+
+ /*
+ * Our SCSI id on the bus.
+ */
+
+ sc->initiator_id = my_scsi_id(unit);
+ printf("%s%d: my SCSI id is %d", ui->name, unit, sc->initiator_id);
+
+ /*
+ * For all possible targets, see if there is one and allocate
+ * a descriptor for it if it is there.
+ */
+ for (target_id = 0; target_id < 8; target_id++) {
+
+ register unsigned csr, dsr;
+ scsi_status_byte_t status;
+
+ /* except of course ourselves */
+ if (target_id == sc->initiator_id)
+ continue;
+
+ .....
+
+ printf(",%s%d", did_banner++ ? " " : " target(s) at ",
+ target_id);
+
+ .....
+
+
+ /*
+ * Found a target
+ */
+ siop->ntargets++;
+ {
+ register target_info_t *tgt;
+
+ tgt = scsi_slave_alloc(unit, target_id, siop);
+
+ tgt->cmd_ptr = ...
+ tgt->dma_ptr = ...
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+ fdma_init(&tgt->fdma, scsi_per_target_virtual);
+#endif /*MACH_KERNEL*/
+ }
+ }
+ printf(".\n");
+
+ splx(s);
+ return 1;
+}
+
+boolean_t
+siop_probe_target(sc, tgt, ior)
+ scsi_softc_t *sc;
+ target_info_t *tgt;
+ io_req_t ior;
+{
+ siop_softc_t siop = siop_softc[sc->masterno];
+ boolean_t newlywed;
+
+ newlywed = (tgt->cmd_ptr == 0);
+ if (newlywed) {
+ /* desc was allocated afresh */
+
+ tgt->cmd_ptr = ...
+ tgt->dma_ptr = ...
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+ fdma_init(&tgt->fdma, scsi_per_target_virtual);
+#endif /*MACH_KERNEL*/
+
+ }
+
+ if (scsi_inquiry(sc, tgt, SCSI_INQ_STD_DATA) == SCSI_RET_DEVICE_DOWN)
+ return FALSE;
+
+ tgt->flags = TGT_ALIVE;
+ return TRUE;
+}
+
+
+static siop_wait(preg, until)
+ volatile unsigned char *preg;
+{
+ int timeo = 1000000;
+ while ((*preg & until) != until) {
+ delay(1);
+ if (!timeo--) {
+ printf("siop_wait TIMEO with x%x\n", *preg);
+ break;
+ }
+ }
+ return *preg;
+}
+
+
+siop_reset(siop, quickly)
+ siop_softc_t siop;
+ boolean_t quickly;
+{
+ register siop_padded_regmap_t *regs = siop->regs;
+
+ ....
+
+ if (quickly)
+ return;
+
+ /*
+ * reset the scsi bus, the interrupt routine does the rest
+ * or you can call siop_bus_reset().
+ */
+ ....
+
+}
+
+/*
+ * Operational functions
+ */
+
+/*
+ * Start a SCSI command on a target
+ */
+siop_go(sc, tgt, cmd_count, in_count, cmd_only)
+ scsi_softc_t *sc;
+ target_info_t *tgt;
+ boolean_t cmd_only;
+{
+ siop_softc_t siop;
+ register int s;
+ boolean_t disconn;
+ script_t scp;
+ boolean_t (*handler)();
+
+ LOG(1,"go");
+
+ siop = (siop_softc_t)tgt->hw_state;
+
+ ....
+}
+
+siop_attempt_selection(siop)
+ siop_softc_t siop;
+{
+ target_info_t *tgt;
+ register int out_count;
+ siop_padded_regmap_t *regs;
+ register int cmd;
+ boolean_t ok;
+ scsi_ret_t ret;
+
+ regs = siop->regs;
+ tgt = siop->next_target;
+
+ LOG(4,"select");
+ LOG(0x80+tgt->target_id,0);
+
+ /*
+ * Init bus state variables and set registers.
+ */
+ siop->active_target = tgt;
+
+ /* reselection pending ? */
+ ......
+}
+
+/*
+ * Interrupt routine
+ * Take interrupts from the chip
+ *
+ * Implementation:
+ * Move along the current command's script if
+ * all is well, invoke error handler if not.
+ */
+siop_intr(unit)
+{
+ register siop_softc_t siop;
+ register script_t scp;
+ register unsigned csr, bs, cmd;
+ register siop_padded_regmap_t *regs;
+ boolean_t try_match;
+#if notyet
+ extern boolean_t rz_use_mapped_interface;
+
+ if (rz_use_mapped_interface)
+ return SIOP_intr(unit);
+#endif
+
+ LOG(5,"\n\tintr");
+
+ siop = siop_softc[unit];
+ regs = siop->regs;
+
+ /* ack interrupt */
+ ....
+}
+
+
+siop_target_intr(siop)
+ register siop_softc_t siop;
+{
+ panic("SIOP: TARGET MODE !!!\n");
+}
+
+/*
+ * All the many little things that the interrupt
+ * routine might switch to
+ */
+
+#endif /*NSIOP > 0*/
+
diff --git a/scsi/adapters/scsi_53C94.h b/scsi/adapters/scsi_53C94.h
new file mode 100644
index 00000000..82891f30
--- /dev/null
+++ b/scsi/adapters/scsi_53C94.h
@@ -0,0 +1,253 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_53C94.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Defines for the NCR 53C94 ASC (SCSI interface)
+ * Some gotcha came from the "86C01/53C94 DMA lab work" written
+ * by Ken Stewart (NCR MED Logic Products Applications Engineer)
+ * courtesy of NCR. Thanks Ken !
+ */
+
+/*
+ * Register map
+ */
+
+typedef struct {
+ volatile unsigned char asc_tc_lsb; /* rw: Transfer Counter LSB */
+ volatile unsigned char asc_tc_msb; /* rw: Transfer Counter MSB */
+ volatile unsigned char asc_fifo; /* rw: FIFO top */
+ volatile unsigned char asc_cmd; /* rw: Command */
+ volatile unsigned char asc_csr; /* r: Status */
+#define asc_dbus_id asc_csr /* w: Destination Bus ID */
+ volatile unsigned char asc_intr; /* r: Interrupt */
+#define asc_sel_timo asc_intr /* w: (re)select timeout */
+ volatile unsigned char asc_ss; /* r: Sequence Step */
+#define asc_syn_p asc_ss /* w: synchronous period */
+ volatile unsigned char asc_flags; /* r: FIFO flags + seq step */
+#define asc_syn_o asc_flags /* w: synchronous offset */
+ volatile unsigned char asc_cnfg1; /* rw: Configuration 1 */
+ volatile unsigned char asc_ccf; /* w: Clock Conv. Factor */
+ volatile unsigned char asc_test; /* w: Test Mode */
+ volatile unsigned char asc_cnfg2; /* rw: Configuration 2 */
+ volatile unsigned char asc_cnfg3; /* rw: Configuration 3 */
+ volatile unsigned char asc_rfb; /* w: Reserve FIFO byte */
+} asc_regmap_t;
+
+
+/*
+ * Transfer Count: access macros
+ * That a NOP is required after loading the dma counter
+ * I learned on the NCR test code. Sic.
+ */
+
+#define ASC_TC_MAX 0x10000
+
+#define ASC_TC_GET(ptr,val) \
+ val = ((ptr)->asc_tc_lsb&0xff)|(((ptr)->asc_tc_msb&0xff)<<8)
+#define ASC_TC_PUT(ptr,val) \
+ (ptr)->asc_tc_lsb=(val); \
+ (ptr)->asc_tc_msb=(val)>>8; mb(); \
+ (ptr)->asc_cmd = ASC_CMD_NOP|ASC_CMD_DMA;
+
+/*
+ * FIFO register
+ */
+
+#define ASC_FIFO_DEEP 16
+
+
+/*
+ * Command register (command codes)
+ */
+
+#define ASC_CMD_DMA 0x80
+ /* Miscellaneous */
+#define ASC_CMD_NOP 0x00
+#define ASC_CMD_FLUSH 0x01
+#define ASC_CMD_RESET 0x02
+#define ASC_CMD_BUS_RESET 0x03
+ /* Initiator state */
+#define ASC_CMD_XFER_INFO 0x10
+#define ASC_CMD_I_COMPLETE 0x11
+#define ASC_CMD_MSG_ACPT 0x12
+#define ASC_CMD_XFER_PAD 0x18
+#define ASC_CMD_SET_ATN 0x1a
+#define ASC_CMD_CLR_ATN 0x1b
+ /* Target state */
+#define ASC_CMD_SND_MSG 0x20
+#define ASC_CMD_SND_STATUS 0x21
+#define ASC_CMD_SND_DATA 0x22
+#define ASC_CMD_DISC_SEQ 0x23
+#define ASC_CMD_TERM 0x24
+#define ASC_CMD_T_COMPLETE 0x25
+#define ASC_CMD_DISC 0x27
+#define ASC_CMD_RCV_MSG 0x28
+#define ASC_CMD_RCV_CDB 0x29
+#define ASC_CMD_RCV_DATA 0x2a
+#define ASC_CMD_RCV_CMD 0x2b
+#define ASC_CMD_ABRT_DMA 0x04
+ /* Disconnected state */
+#define ASC_CMD_RESELECT 0x40
+#define ASC_CMD_SEL 0x41
+#define ASC_CMD_SEL_ATN 0x42
+#define ASC_CMD_SEL_ATN_STOP 0x43
+#define ASC_CMD_ENABLE_SEL 0x44
+#define ASC_CMD_DISABLE_SEL 0x45
+#define ASC_CMD_SEL_ATN3 0x46
+
+/* this is approximate (no ATN3) but good enough */
+#define asc_isa_select(cmd) (((cmd)&0x7c)==0x40)
+
+/*
+ * Status register, and phase encoding
+ */
+
+#define ASC_CSR_INT 0x80
+#define ASC_CSR_GE 0x40
+#define ASC_CSR_PE 0x20
+#define ASC_CSR_TC 0x10
+#define ASC_CSR_VGC 0x08
+#define ASC_CSR_MSG 0x04
+#define ASC_CSR_CD 0x02
+#define ASC_CSR_IO 0x01
+
+#define ASC_PHASE(csr) SCSI_PHASE(csr)
+
+/*
+ * Destination Bus ID
+ */
+
+#define ASC_DEST_ID_MASK 0x07
+
+
+/*
+ * Interrupt register
+ */
+
+#define ASC_INT_RESET 0x80
+#define ASC_INT_ILL 0x40
+#define ASC_INT_DISC 0x20
+#define ASC_INT_BS 0x10
+#define ASC_INT_FC 0x08
+#define ASC_INT_RESEL 0x04
+#define ASC_INT_SEL_ATN 0x02
+#define ASC_INT_SEL 0x01
+
+
+/*
+ * Timeout register:
+ *
+ * val = (timeout * CLK_freq) / (8192 * CCF);
+ */
+
+#define asc_timeout_250(clk,ccf) ((31*clk)/ccf)
+
+/*
+ * Sequence Step register
+ */
+
+#define ASC_SS_XXXX 0xf0
+#define ASC_SS_SOM 0x80
+#define ASC_SS_MASK 0x07
+#define ASC_SS(ss) ((ss)&ASC_SS_MASK)
+
+/*
+ * Synchronous Transfer Period
+ */
+
+#define ASC_STP_MASK 0x1f
+#define ASC_STP_MIN 0x05 /* 5 clk per byte */
+#define ASC_STP_MAX 0x04 /* after ovfl, 35 clk/byte */
+
+/*
+ * FIFO flags
+ */
+
+#define ASC_FLAGS_SEQ_STEP 0xe0
+#define ASC_FLAGS_FIFO_CNT 0x1f
+
+/*
+ * Synchronous offset
+ */
+
+#define ASC_SYNO_MASK 0x0f /* 0 -> asyn */
+
+/*
+ * Configuration 1
+ */
+
+#define ASC_CNFG1_SLOW 0x80
+#define ASC_CNFG1_SRD 0x40
+#define ASC_CNFG1_P_TEST 0x20
+#define ASC_CNFG1_P_CHECK 0x10
+#define ASC_CNFG1_TEST 0x08
+#define ASC_CNFG1_MY_BUS_ID 0x07
+
+/*
+ * CCF register
+ */
+
+#define ASC_CCF_10MHz 0x2
+#define ASC_CCF_15MHz 0x3
+#define ASC_CCF_20MHz 0x4
+#define ASC_CCF_25MHz 0x5
+
+#define mhz_to_ccf(x) (((x-1)/5)+1) /* see specs for limits */
+
+/*
+ * Test register
+ */
+
+#define ASC_TEST_XXXX 0xf8
+#define ASC_TEST_HI_Z 0x04
+#define ASC_TEST_I 0x02
+#define ASC_TEST_T 0x01
+
+/*
+ * Configuration 2
+ */
+
+#define ASC_CNFG2_RFB 0x80
+#define ASC_CNFG2_EPL 0x40
+#define ASC_CNFG2_EBC 0x20
+#define ASC_CNFG2_DREQ_HIZ 0x10
+#define ASC_CNFG2_SCSI2 0x08
+#define ASC_CNFG2_BPA 0x04
+#define ASC_CNFG2_RPE 0x02
+#define ASC_CNFG2_DPE 0x01
+
+/*
+ * Configuration 3
+ */
+
+#define ASC_CNFG3_XXXX 0xf8
+#define ASC_CNFG3_SRB 0x04
+#define ASC_CNFG3_ALT_DMA 0x02
+#define ASC_CNFG3_T8 0x01
+
diff --git a/scsi/adapters/scsi_53C94_hdw.c b/scsi/adapters/scsi_53C94_hdw.c
new file mode 100644
index 00000000..dad9b223
--- /dev/null
+++ b/scsi/adapters/scsi_53C94_hdw.c
@@ -0,0 +1,2840 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS AS-IS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_53C94_hdw.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Bottom layer of the SCSI driver: chip-dependent functions
+ *
+ * This file contains the code that is specific to the NCR 53C94
+ * SCSI chip (Host Bus Adapter in SCSI parlance): probing, start
+ * operation, and interrupt routine.
+ */
+
+/*
+ * This layer works based on small simple 'scripts' that are installed
+ * at the start of the command and drive the chip to completion.
+ * The idea comes from the specs of the NCR 53C700 'script' processor.
+ *
+ * There are various reasons for this, mainly
+ * - Performance: identify the common (successful) path, and follow it;
+ * at interrupt time no code is needed to find the current status
+ * - Code size: it should be easy to compact common operations
+ * - Adaptability: the code skeleton should adapt to different chips without
+ * terrible complications.
+ * - Error handling: and it is easy to modify the actions performed
+ * by the scripts to cope with strange but well identified sequences
+ *
+ */
+
+#include <asc.h>
+#if NASC > 0
+#include <platforms.h>
+
+#ifdef DECSTATION
+typedef unsigned char asc_register_t;
+#define PAD(n) char n[3];
+#define mb()
+#ifdef MACH_KERNEL
+#define HAS_MAPPED_SCSI
+#endif
+#define ASC_PROBE_DYNAMICALLY FALSE /* established custom */
+#define DEBUG 1
+#define TRACE 1
+#endif
+
+#ifdef FLAMINGO
+typedef unsigned int asc_register_t;
+#define PAD(n) int n; /* sparse ! */
+#define mb() wbflush() /* memory barrier */
+#define ASC_PROBE_DYNAMICALLY TRUE
+#define DEBUG 1
+#define TRACE 1
+#endif
+
+#include <mach/std_types.h>
+#include <sys/types.h>
+#include <chips/busses.h>
+#include <scsi/compat_30.h>
+#include <machine/machspl.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi2.h>
+
+#include <scsi/adapters/scsi_53C94.h>
+#include <scsi/scsi_defs.h>
+#include <scsi/adapters/scsi_dma.h>
+
+#define private static
+
+#ifdef PAD
+typedef struct {
+ volatile asc_register_t asc_tc_lsb; /* rw: Transfer Counter LSB */
+ PAD(pad0)
+ volatile asc_register_t asc_tc_msb; /* rw: Transfer Counter MSB */
+ PAD(pad1)
+ volatile asc_register_t asc_fifo; /* rw: FIFO top */
+ PAD(pad2)
+ volatile asc_register_t asc_cmd; /* rw: Command */
+ PAD(pad3)
+ volatile asc_register_t asc_csr; /* r: Status */
+/*#define asc_dbus_id asc_csr /* w: Destination Bus ID */
+ PAD(pad4)
+ volatile asc_register_t asc_intr; /* r: Interrupt */
+/*#define asc_sel_timo asc_intr /* w: (re)select timeout */
+ PAD(pad5)
+ volatile asc_register_t asc_ss; /* r: Sequence Step */
+/*#define asc_syn_p asc_ss /* w: synchronous period */
+ PAD(pad6)
+ volatile asc_register_t asc_flags; /* r: FIFO flags + seq step */
+/*#define asc_syn_o asc_flags /* w: synchronous offset */
+ PAD(pad7)
+ volatile asc_register_t asc_cnfg1; /* rw: Configuration 1 */
+ PAD(pad8)
+ volatile asc_register_t asc_ccf; /* w: Clock Conv. Factor */
+ PAD(pad9)
+ volatile asc_register_t asc_test; /* w: Test Mode */
+ PAD(pad10)
+ volatile asc_register_t asc_cnfg2; /* rw: Configuration 2 */
+ PAD(pad11)
+ volatile asc_register_t asc_cnfg3; /* rw: Configuration 3 */
+ PAD(pad12)
+ volatile asc_register_t asc_rfb; /* w: Reserve FIFO byte */
+ PAD(pad13)
+} asc_padded_regmap_t;
+
+#else /* !PAD */
+
+typedef asc_regmap_t asc_padded_regmap_t;
+
+#endif /* !PAD */
+
+#define get_reg(r,x) ((unsigned char)((r)->x))
+
+#define fifo_count(r) ((r)->asc_flags & ASC_FLAGS_FIFO_CNT)
+#define get_fifo(r) get_reg(r,asc_fifo)
+
+boolean_t asc_probe_dynamically = ASC_PROBE_DYNAMICALLY;
+
+/*
+ * We might need to use some fields usually
+ * handled by the DMA engine, if asked to.
+ * These are "dma_ptr" and "hba_dep".
+ */
+#define has_oddb hba_dep[0]
+#define the_oddb hba_dep[1]
+
+/*
+ * A script has a three parts: a pre-condition, an action, and
+ * an optional command to the chip. The first triggers error
+ * handling if not satisfied and in our case it is a match
+ * of the expected and actual scsi-bus phases.
+ * The action part is just a function pointer, and the
+ * command is what the 53c90 should be told to do at the end
+ * of the action processing. This command is only issued and the
+ * script proceeds if the action routine returns TRUE.
+ * See asc_intr() for how and where this is all done.
+ */
+
+typedef struct script {
+ unsigned char condition; /* expected state at interrupt */
+ unsigned char command; /* command to the chip */
+ unsigned short flags; /* unused padding */
+ boolean_t (*action)(); /* extra operations */
+} *script_t;
+
+/* Matching on the condition value */
+#define ANY 0xff
+#define SCRIPT_MATCH(csr,ir,value) ((SCSI_PHASE(csr)==(value)) || \
+ (((value)==ANY) && \
+ ((ir)&(ASC_INT_DISC|ASC_INT_FC))))
+
+/* When no command is needed */
+#define SCRIPT_END -1
+
+/* forward decls of script actions */
+boolean_t
+ asc_end(), /* all come to an end */
+ asc_clean_fifo(), /* .. in preparation for status byte */
+ asc_get_status(), /* get status from target */
+ asc_put_status(), /* send status to initiator */
+ asc_dma_in(), /* get data from target via dma */
+ asc_dma_in_r(), /* get data from target via dma (restartable)*/
+ asc_dma_out(), /* send data to target via dma */
+ asc_dma_out_r(), /* send data to target via dma (restartable) */
+ asc_dosynch(), /* negotiate synch xfer */
+ asc_msg_in(), /* receive the disconenct message */
+ asc_disconnected(), /* target has disconnected */
+ asc_reconnect(); /* target reconnected */
+
+/* forward decls of error handlers */
+boolean_t
+ asc_err_generic(), /* generic handler */
+ asc_err_disconn(), /* target disconnects amidst */
+ gimmeabreak(); /* drop into the debugger */
+
+int asc_reset_scsibus();
+boolean_t asc_probe_target();
+private asc_wait();
+
+/*
+ * State descriptor for this layer. There is one such structure
+ * per (enabled) SCSI-53c90 interface
+ */
+struct asc_softc {
+ watchdog_t wd;
+ asc_padded_regmap_t *regs; /* 53c90 registers */
+
+ scsi_dma_ops_t *dma_ops; /* DMA operations and state */
+ opaque_t dma_state;
+
+ script_t script; /* what should happen next */
+ boolean_t (*error_handler)();/* what if something is wrong */
+ int in_count; /* amnt we expect to receive */
+ int out_count; /* amnt we are going to ship */
+
+ volatile char state;
+#define ASC_STATE_BUSY 0x01 /* selecting or currently connected */
+#define ASC_STATE_TARGET 0x04 /* currently selected as target */
+#define ASC_STATE_COLLISION 0x08 /* lost selection attempt */
+#define ASC_STATE_DMA_IN 0x10 /* tgt --> initiator xfer */
+#define ASC_STATE_SPEC_DMA 0x20 /* special, 8 byte threshold dma */
+#define ASC_STATE_DO_RFB 0x40 /* DMA engine cannot handle odd bytes */
+
+ unsigned char ntargets; /* how many alive on this scsibus */
+ unsigned char done;
+ unsigned char extra_count; /* sleazy trick to spare an interrupt */
+ int dmacnt_at_end;
+
+ scsi_softc_t *sc; /* HBA-indep info */
+ target_info_t *active_target; /* the current one */
+
+ target_info_t *next_target; /* trying to seize bus */
+ queue_head_t waiting_targets;/* other targets competing for bus */
+
+ unsigned char ss_was; /* districate powered on/off devices */
+ unsigned char cmd_was;
+
+ unsigned char timeout; /* cache a couple numbers */
+ unsigned char ccf;
+ unsigned char clk;
+
+} asc_softc_data[NASC];
+
+typedef struct asc_softc *asc_softc_t;
+
+asc_softc_t asc_softc[NASC];
+
+/*
+ * Synch xfer parameters, and timing conversions
+ */
+int asc_min_period = 5; /* in CLKS/BYTE, e.g. 1 CLK = 40nsecs @25 Mhz */
+int asc_max_offset = 15; /* pure number */
+
+int asc_to_scsi_period(a,clk)
+{
+ /* Note: the SCSI unit is 4ns, hence
+ A_P * 1,000,000,000
+ ------------------- = S_P
+ C_Mhz * 4
+ */
+ return a * (250 / clk);
+
+}
+
+int scsi_period_to_asc(p,clk)
+{
+ register int ret;
+
+ ret = (p * clk) / 250;
+ if (ret < asc_min_period)
+ return asc_min_period;
+ if ((asc_to_scsi_period(ret,clk)) < p)
+ return ret + 1;
+ return ret;
+}
+
+#define readback(a) {register int foo; foo = a; mb();}
+
+#define u_min(a,b) (((a) < (b)) ? (a) : (b))
+
+/*
+ * Definition of the controller for the auto-configuration program.
+ */
+
+int asc_probe(), scsi_slave(), asc_go(), asc_intr();
+void scsi_attach();
+
+vm_offset_t asc_std[NASC] = { 0 };
+struct bus_device *asc_dinfo[NASC*8];
+struct bus_ctlr *asc_minfo[NASC];
+struct bus_driver asc_driver =
+ { asc_probe, scsi_slave, scsi_attach, asc_go, asc_std, "rz", asc_dinfo,
+ "asc", asc_minfo, BUS_INTR_B4_PROBE};
+
+
+int asc_clock_speed_in_mhz[NASC] = {25,25,25,25}; /* original 3max */
+
+asc_set_dmaops(unit, dmaops)
+ unsigned int unit;
+ scsi_dma_ops_t *dmaops;
+{
+ if (unit < NASC)
+ asc_std[unit] = (vm_offset_t)dmaops;
+}
+
+/*
+ * Scripts
+ */
+struct script
+asc_script_data_in[] = { /* started with SEL & DMA */
+ {SCSI_PHASE_DATAI, ASC_CMD_XFER_INFO|ASC_CMD_DMA, 0, asc_dma_in},
+ {SCSI_PHASE_STATUS, ASC_CMD_I_COMPLETE, 0, asc_clean_fifo},
+ {SCSI_PHASE_MSG_IN, ASC_CMD_MSG_ACPT, 0, asc_get_status},
+ {ANY, SCRIPT_END, 0, asc_end}
+},
+
+asc_script_data_out[] = { /* started with SEL & DMA */
+ {SCSI_PHASE_DATAO, ASC_CMD_XFER_INFO|ASC_CMD_DMA, 0, asc_dma_out},
+ {SCSI_PHASE_STATUS, ASC_CMD_I_COMPLETE, 0, asc_clean_fifo},
+ {SCSI_PHASE_MSG_IN, ASC_CMD_MSG_ACPT, 0, asc_get_status},
+ {ANY, SCRIPT_END, 0, asc_end}
+},
+
+asc_script_try_synch[] = {
+ {SCSI_PHASE_MSG_OUT, ASC_CMD_I_COMPLETE,0, asc_dosynch},
+ {SCSI_PHASE_MSG_IN, ASC_CMD_MSG_ACPT, 0, asc_get_status},
+ {ANY, SCRIPT_END, 0, asc_end}
+},
+
+asc_script_simple_cmd[] = {
+ {SCSI_PHASE_STATUS, ASC_CMD_I_COMPLETE, 0, asc_clean_fifo},
+ {SCSI_PHASE_MSG_IN, ASC_CMD_MSG_ACPT, 0, asc_get_status},
+ {ANY, SCRIPT_END, 0, asc_end}
+},
+
+asc_script_disconnect[] = {
+ {ANY, ASC_CMD_ENABLE_SEL, 0, asc_disconnected},
+/**/ {SCSI_PHASE_MSG_IN, ASC_CMD_MSG_ACPT, 0, asc_reconnect}
+},
+
+asc_script_restart_data_in[] = { /* starts after disconnect */
+ {SCSI_PHASE_DATAI, ASC_CMD_XFER_INFO|ASC_CMD_DMA, 0, asc_dma_in_r},
+ {SCSI_PHASE_STATUS, ASC_CMD_I_COMPLETE, 0, asc_clean_fifo},
+ {SCSI_PHASE_MSG_IN, ASC_CMD_MSG_ACPT, 0, asc_get_status},
+ {ANY, SCRIPT_END, 0, asc_end}
+},
+
+asc_script_restart_data_out[] = { /* starts after disconnect */
+ {SCSI_PHASE_DATAO, ASC_CMD_XFER_INFO|ASC_CMD_DMA, 0, asc_dma_out_r},
+ {SCSI_PHASE_STATUS, ASC_CMD_I_COMPLETE, 0, asc_clean_fifo},
+ {SCSI_PHASE_MSG_IN, ASC_CMD_MSG_ACPT, 0, asc_get_status},
+ {ANY, SCRIPT_END, 0, asc_end}
+},
+
+#if documentation
+/*
+ * This is what might happen during a read
+ * that disconnects
+ */
+asc_script_data_in_wd[] = { /* started with SEL & DMA & allow disconnect */
+ {SCSI_PHASE_MSG_IN, ASC_CMD_XFER_INFO|ASC_CMD_DMA, 0, asc_msg_in},
+ {ANY, ASC_CMD_ENABLE_SEL, 0, asc_disconnected},
+ {SCSI_PHASE_MSG_IN, ASC_CMD_MSG_ACPT, 0, asc_reconnect},
+ {SCSI_PHASE_DATAI, ASC_CMD_XFER_INFO|ASC_CMD_DMA, 0, asc_dma_in},
+ {SCSI_PHASE_STATUS, ASC_CMD_I_COMPLETE, 0, asc_clean_fifo},
+ {SCSI_PHASE_MSG_IN, ASC_CMD_MSG_ACPT, 0, asc_get_status},
+ {ANY, SCRIPT_END, 0, asc_end}
+},
+#endif
+
+/*
+ * Target mode scripts
+ */
+asc_script_t_data_in[] = {
+ {SCSI_PHASE_CMD, ASC_CMD_RCV_DATA|ASC_CMD_DMA, 0, asc_dma_in_r},
+ {SCSI_PHASE_DATAO, ASC_CMD_TERM, 0, asc_put_status},
+ {ANY, SCRIPT_END, 0, asc_end}
+},
+
+asc_script_t_data_out[] = {
+ {SCSI_PHASE_CMD, ASC_CMD_SND_DATA|ASC_CMD_DMA, 0, asc_dma_out_r},
+ {SCSI_PHASE_DATAI, ASC_CMD_TERM, 0, asc_put_status},
+ {ANY, SCRIPT_END, 0, asc_end}
+};
+
+
+#ifdef DEBUG
+
+#define PRINT(x) if (scsi_debug) printf x
+
+asc_state(regs)
+ asc_padded_regmap_t *regs;
+{
+ register unsigned char ff,csr,ir,d0,d1,cmd;
+
+ if (regs == 0) {
+ if (asc_softc[0])
+ regs = asc_softc[0]->regs;
+ else
+ regs = (asc_padded_regmap_t*)0xbf400000;
+ }
+ ff = get_reg(regs,asc_flags);
+ csr = get_reg(regs,asc_csr);
+/* ir = get_reg(regs,asc_intr); nope, clears interrupt */
+ d0 = get_reg(regs,asc_tc_lsb);
+ d1 = get_reg(regs,asc_tc_msb);
+ cmd = get_reg(regs,asc_cmd);
+ printf("dma %x ff %x csr %x cmd %x\n",
+ (d1 << 8) | d0, ff, csr, cmd);
+ return 0;
+}
+
+asc_target_state(tgt)
+ target_info_t *tgt;
+{
+ if (tgt == 0)
+ tgt = asc_softc[0]->active_target;
+ if (tgt == 0)
+ return 0;
+ db_printf("@x%x: fl %x dma %X+%x cmd %x@%X id %x per %x off %x ior %X ret %X\n",
+ tgt,
+ tgt->flags, tgt->dma_ptr, tgt->transient_state.dma_offset, tgt->cur_cmd,
+ tgt->cmd_ptr, (long)tgt->target_id,
+ (long)tgt->sync_period, (long)tgt->sync_offset,
+ tgt->ior, (long)tgt->done);
+ if (tgt->flags & TGT_DISCONNECTED){
+ script_t spt;
+
+ spt = tgt->transient_state.script;
+ db_printf("disconnected at ");
+ db_printsym(spt,1);
+ db_printf(": %x %x ", spt->condition, spt->command);
+ db_printsym(spt->action,1);
+ db_printf(", ");
+ db_printsym(tgt->transient_state.handler, 1);
+ db_printf("\n");
+ }
+
+ return 0;
+}
+
+asc_all_targets(unit)
+{
+ int i;
+ target_info_t *tgt;
+ for (i = 0; i < 8; i++) {
+ tgt = asc_softc[unit]->sc->target[i];
+ if (tgt)
+ asc_target_state(tgt);
+ }
+}
+
+asc_script_state(unit)
+{
+ script_t spt = asc_softc[unit]->script;
+
+ if (spt == 0) return 0;
+ db_printsym(spt,1);
+ db_printf(": %x %x ", spt->condition, spt->command);
+ db_printsym(spt->action,1);
+ db_printf(", ");
+ db_printsym(asc_softc[unit]->error_handler, 1);
+ return 0;
+}
+
+#define TRMAX 200
+int tr[TRMAX+3];
+int trpt, trpthi;
+#define TR(x) tr[trpt++] = x
+#define TRWRAP trpthi = trpt; trpt = 0;
+#define TRCHECK if (trpt > TRMAX) {TRWRAP}
+
+
+#ifdef TRACE
+
+#define LOGSIZE 256
+int asc_logpt;
+char asc_log[LOGSIZE];
+
+#define MAXLOG_VALUE 0x42
+struct {
+ char *name;
+ unsigned int count;
+} logtbl[MAXLOG_VALUE];
+
+/* private */ LOG(e,f)
+ char *f;
+{
+ asc_log[asc_logpt++] = (e);
+ if (asc_logpt == LOGSIZE) asc_logpt = 0;
+ if ((e) < MAXLOG_VALUE) {
+ logtbl[(e)].name = (f);
+ logtbl[(e)].count++;
+ }
+}
+
+asc_print_log(skip)
+ int skip;
+{
+ register int i, j;
+ register unsigned char c;
+
+ for (i = 0, j = asc_logpt; i < LOGSIZE; i++) {
+ c = asc_log[j];
+ if (++j == LOGSIZE) j = 0;
+ if (skip-- > 0)
+ continue;
+ if (c < MAXLOG_VALUE)
+ db_printf(" %s", logtbl[c].name);
+ else
+ db_printf("-x%x", c & 0x7f);
+ }
+}
+
+asc_print_stat()
+{
+ register int i;
+ register char *p;
+ for (i = 0; i < MAXLOG_VALUE; i++) {
+ if (p = logtbl[i].name)
+ printf("%d %s\n", logtbl[i].count, p);
+ }
+}
+
+#else /*TRACE*/
+#define LOG(e,f)
+#define LOGSIZE
+#endif /*TRACE*/
+
+#else /*DEBUG*/
+#define PRINT(x)
+#define LOG(e,f)
+#define LOGSIZE
+#define TR(x)
+#define TRCHECK
+#define TRWRAP
+
+#endif /*DEBUG*/
+
+
+/*
+ * Probe/Slave/Attach functions
+ */
+
+/*
+ * Probe routine:
+ * Should find out (a) if the controller is
+ * present and (b) which/where slaves are present.
+ *
+ * Implementation:
+ * Send a test-unit-ready to each possible target on the bus
+ * except of course ourselves.
+ */
+asc_probe(reg, ui)
+ vm_offset_t reg;
+ struct bus_ctlr *ui;
+{
+ int unit = ui->unit;
+ asc_softc_t asc = &asc_softc_data[unit];
+ int target_id;
+ scsi_softc_t *sc;
+ register asc_padded_regmap_t *regs;
+ spl_t s;
+ boolean_t did_banner = FALSE;
+
+ /*
+ * We are only called if the right board is there,
+ * but make sure anyways..
+ */
+ if (check_memory(reg, 0))
+ return 0;
+
+#if defined(HAS_MAPPED_SCSI)
+ /* Mappable version side */
+ ASC_probe(reg, ui);
+#endif
+
+ /*
+ * Initialize hw descriptor, cache some pointers
+ */
+ asc_softc[unit] = asc;
+ asc->regs = (asc_padded_regmap_t *) (reg);
+
+ if ((asc->dma_ops = (scsi_dma_ops_t *)asc_std[unit]) == 0)
+ /* use same as unit 0 if undefined */
+ asc->dma_ops = (scsi_dma_ops_t *)asc_std[0];
+ {
+ int dma_bsize = 16; /* bits, preferred */
+ boolean_t do_rfb = FALSE;
+
+ asc->dma_state = (*asc->dma_ops->init)(unit, reg, &dma_bsize, &do_rfb);
+ if (dma_bsize > 16)
+ asc->state |= ASC_STATE_SPEC_DMA;
+ if (do_rfb)
+ asc->state |= ASC_STATE_DO_RFB;
+ }
+
+ queue_init(&asc->waiting_targets);
+
+ asc->clk = asc_clock_speed_in_mhz[unit];
+ asc->ccf = mhz_to_ccf(asc->clk); /* see .h file */
+ asc->timeout = asc_timeout_250(asc->clk,asc->ccf);
+
+ sc = scsi_master_alloc(unit, asc);
+ asc->sc = sc;
+
+ sc->go = asc_go;
+ sc->watchdog = scsi_watchdog;
+ sc->probe = asc_probe_target;
+ asc->wd.reset = asc_reset_scsibus;
+
+#ifdef MACH_KERNEL
+ sc->max_dma_data = -1;
+#else
+ sc->max_dma_data = scsi_per_target_virtual;
+#endif
+
+ regs = asc->regs;
+
+ /*
+ * Our SCSI id on the bus.
+ * The user can set this via the prom on 3maxen/pmaxen.
+ * If this changes it is easy to fix: make a default that
+ * can be changed as boot arg.
+ */
+ {
+ register unsigned char my_id;
+
+ my_id = scsi_initiator_id[unit] & 0x7;
+ if (my_id != 7)
+ regs->asc_cnfg1 = my_id; mb();
+ }
+
+ /*
+ * Reset chip, fully. Note that interrupts are already enabled.
+ */
+ s = splbio();
+ asc_reset(asc, TRUE, asc->state & ASC_STATE_SPEC_DMA);
+
+ sc->initiator_id = regs->asc_cnfg1 & ASC_CNFG1_MY_BUS_ID;
+ printf("%s%d: SCSI id %d", ui->name, unit, sc->initiator_id);
+
+ {
+ register target_info_t *tgt;
+
+ tgt = scsi_slave_alloc(sc->masterno, sc->initiator_id, asc);
+ (*asc->dma_ops->new_target)(asc->dma_state, tgt);
+ sccpu_new_initiator(tgt, tgt);
+ }
+
+ if (asc_probe_dynamically)
+ printf("%s", ", will probe targets on demand");
+ else {
+
+ /*
+ * For all possible targets, see if there is one and allocate
+ * a descriptor for it if it is there.
+ */
+ for (target_id = 0; target_id < 8; target_id++) {
+ register unsigned char csr, ss, ir, ff;
+ register scsi_status_byte_t status;
+
+ /* except of course ourselves */
+ if (target_id == sc->initiator_id)
+ continue;
+
+ regs->asc_cmd = ASC_CMD_FLUSH; /* empty fifo */
+ mb();
+ delay(2);
+
+ regs->asc_dbus_id = target_id; mb();
+ regs->asc_sel_timo = asc->timeout; mb();
+
+ /*
+ * See if the unit is ready.
+ * XXX SHOULD inquiry LUN 0 instead !!!
+ */
+ regs->asc_fifo = SCSI_CMD_TEST_UNIT_READY; mb();
+ regs->asc_fifo = 0; mb();
+ regs->asc_fifo = 0; mb();
+ regs->asc_fifo = 0; mb();
+ regs->asc_fifo = 0; mb();
+ regs->asc_fifo = 0; mb();
+
+ /* select and send it */
+ regs->asc_cmd = ASC_CMD_SEL; mb();
+
+ /* wait for the chip to complete, or timeout */
+ csr = asc_wait(regs, ASC_CSR_INT, 1);
+ ss = get_reg(regs,asc_ss);
+ ir = get_reg(regs,asc_intr);
+
+ /* empty fifo, there is garbage in it if timeout */
+ regs->asc_cmd = ASC_CMD_FLUSH; mb();
+ delay(2);
+
+ /*
+ * Check if the select timed out
+ */
+ if ((ASC_SS(ss) == 0) && (ir == ASC_INT_DISC))
+ /* noone out there */
+ continue;
+
+ if (SCSI_PHASE(csr) != SCSI_PHASE_STATUS) {
+ printf( " %s%d%s", "ignoring target at ", target_id,
+ " cuz it acts weirdo");
+ continue;
+ }
+
+ printf(",%s%d", did_banner++ ? " " : " target(s) at ",
+ target_id);
+
+ regs->asc_cmd = ASC_CMD_I_COMPLETE;
+ wbflush();
+ csr = asc_wait(regs, ASC_CSR_INT, 1);
+ ir = get_reg(regs,asc_intr); /* ack intr */
+ mb();
+
+ status.bits = get_fifo(regs); /* empty fifo */
+ mb();
+ ff = get_fifo(regs);
+
+ if (status.st.scsi_status_code != SCSI_ST_GOOD)
+ scsi_error( 0, SCSI_ERR_STATUS, status.bits, 0);
+
+ regs->asc_cmd = ASC_CMD_MSG_ACPT; mb();
+ csr = asc_wait(regs, ASC_CSR_INT, 1);
+ ir = get_reg(regs,asc_intr); /* ack intr */
+ mb();
+
+ /*
+ * Found a target
+ */
+ asc->ntargets++;
+ {
+ register target_info_t *tgt;
+ tgt = scsi_slave_alloc(sc->masterno, target_id, asc);
+
+ (*asc->dma_ops->new_target)(asc->dma_state, tgt);
+ }
+ }
+ } /* asc_probe_dynamically */
+
+ regs->asc_cmd = ASC_CMD_ENABLE_SEL; mb();
+
+ printf(".\n");
+
+ splx(s);
+ return 1;
+}
+
+boolean_t
+asc_probe_target(tgt, ior)
+ target_info_t *tgt;
+ io_req_t ior;
+{
+ asc_softc_t asc = asc_softc[tgt->masterno];
+ boolean_t newlywed;
+
+ newlywed = (tgt->cmd_ptr == 0);
+ if (newlywed) {
+ (*asc->dma_ops->new_target)(asc->dma_state, tgt);
+ }
+
+ if (scsi_inquiry(tgt, SCSI_INQ_STD_DATA) == SCSI_RET_DEVICE_DOWN)
+ return FALSE;
+
+ asc->ntargets++;
+ tgt->flags = TGT_ALIVE;
+ return TRUE;
+}
+
+private asc_wait(regs, until, complain)
+ asc_padded_regmap_t *regs;
+{
+ int timeo = 1000000;
+ while ((regs->asc_csr & until) == 0) {
+ mb();
+ delay(1);
+ if (!timeo--) {
+ if (complain)
+ printf("asc_wait TIMEO with x%x\n", get_reg(regs,asc_csr));
+ break;
+ }
+ }
+ return get_reg(regs,asc_csr);
+}
+
+asc_reset(asc, quick, special_dma)
+ asc_softc_t asc;
+{
+ char my_id;
+ int ccf;
+ asc_padded_regmap_t *regs;
+
+ regs = asc->regs;
+
+ /* preserve our ID for now */
+ my_id = (regs->asc_cnfg1 & ASC_CNFG1_MY_BUS_ID);
+
+ /*
+ * Reset chip and wait till done
+ */
+ regs->asc_cmd = ASC_CMD_RESET;
+ wbflush(); delay(25);
+
+ /* spec says this is needed after reset */
+ regs->asc_cmd = ASC_CMD_NOP;
+ wbflush(); delay(25);
+
+ /*
+ * Set up various chip parameters
+ */
+ regs->asc_ccf = asc->ccf;
+ wbflush();
+ delay(25);
+ regs->asc_sel_timo = asc->timeout; mb();
+ /* restore our ID */
+ regs->asc_cnfg1 = my_id | ASC_CNFG1_P_CHECK; mb();
+ regs->asc_cnfg2 = ASC_CNFG2_SCSI2;
+ mb();
+ regs->asc_cnfg3 = special_dma ? (ASC_CNFG3_T8|ASC_CNFG3_ALT_DMA) : 0;
+ mb();
+ /* zero anything else */
+ ASC_TC_PUT(regs, 0); mb();
+ regs->asc_syn_p = asc_min_period; mb();
+ regs->asc_syn_o = 0; mb(); /* asynch for now */
+
+ regs->asc_cmd = ASC_CMD_ENABLE_SEL; mb();
+
+ if (quick) return;
+
+ /*
+ * reset the scsi bus, the interrupt routine does the rest
+ * or you can call asc_bus_reset().
+ */
+ regs->asc_cmd = ASC_CMD_BUS_RESET; mb();
+}
+
+
+/*
+ * Operational functions
+ */
+
+/*
+ * Start a SCSI command on a target
+ */
+asc_go(tgt, cmd_count, in_count, cmd_only)
+ target_info_t *tgt;
+ boolean_t cmd_only;
+{
+ asc_softc_t asc;
+ register spl_t s;
+ boolean_t disconn;
+ script_t scp;
+ boolean_t (*handler)();
+
+ LOG(1,"go");
+
+ asc = (asc_softc_t)tgt->hw_state;
+
+ tgt->transient_state.cmd_count = cmd_count; /* keep it here */
+ tgt->transient_state.out_count = 0; /* default */
+
+ (*asc->dma_ops->map)(asc->dma_state, tgt);
+
+ disconn = BGET(scsi_might_disconnect,tgt->masterno,tgt->target_id);
+ disconn = disconn && (asc->ntargets > 1);
+ disconn |= BGET(scsi_should_disconnect,tgt->masterno,tgt->target_id);
+
+ /*
+ * Setup target state
+ */
+ tgt->done = SCSI_RET_IN_PROGRESS;
+
+ handler = (disconn) ? asc_err_disconn : asc_err_generic;
+
+ switch (tgt->cur_cmd) {
+ case SCSI_CMD_READ:
+ case SCSI_CMD_LONG_READ:
+ LOG(2,"readop");
+ scp = asc_script_data_in;
+ break;
+ case SCSI_CMD_WRITE:
+ case SCSI_CMD_LONG_WRITE:
+ LOG(0x18,"writeop");
+ scp = asc_script_data_out;
+ break;
+ case SCSI_CMD_INQUIRY:
+ /* This is likely the first thing out:
+ do the synch neg if so */
+ if (!cmd_only && ((tgt->flags&TGT_DID_SYNCH)==0)) {
+ scp = asc_script_try_synch;
+ tgt->flags |= TGT_TRY_SYNCH;
+ disconn = FALSE;
+ break;
+ }
+ case SCSI_CMD_REQUEST_SENSE:
+ case SCSI_CMD_MODE_SENSE:
+ case SCSI_CMD_RECEIVE_DIAG_RESULTS:
+ case SCSI_CMD_READ_CAPACITY:
+ case SCSI_CMD_READ_BLOCK_LIMITS:
+ case SCSI_CMD_READ_TOC:
+ case SCSI_CMD_READ_SUBCH:
+ case SCSI_CMD_READ_HEADER:
+ case 0xc4: /* despised: SCSI_CMD_DEC_PLAYBACK_STATUS */
+ case 0xdd: /* despised: SCSI_CMD_NEC_READ_SUBCH_Q */
+ case 0xde: /* despised: SCSI_CMD_NEC_READ_TOC */
+ scp = asc_script_data_in;
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ break;
+ case SCSI_CMD_MODE_SELECT:
+ case SCSI_CMD_REASSIGN_BLOCKS:
+ case SCSI_CMD_FORMAT_UNIT:
+ case 0xc9: /* vendor-spec: SCSI_CMD_DEC_PLAYBACK_CONTROL */
+ tgt->transient_state.cmd_count = sizeof_scsi_command(tgt->cur_cmd);
+ tgt->transient_state.out_count =
+ cmd_count - tgt->transient_state.cmd_count;
+ scp = asc_script_data_out;
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ break;
+ case SCSI_CMD_TEST_UNIT_READY:
+ /*
+ * Do the synch negotiation here, unless prohibited
+ * or done already
+ */
+ if (tgt->flags & TGT_DID_SYNCH) {
+ scp = asc_script_simple_cmd;
+ } else {
+ scp = asc_script_try_synch;
+ tgt->flags |= TGT_TRY_SYNCH;
+ cmd_only = FALSE;
+ disconn = FALSE;
+ }
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ break;
+ default:
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ scp = asc_script_simple_cmd;
+ }
+
+ tgt->transient_state.script = scp;
+ tgt->transient_state.handler = handler;
+ tgt->transient_state.identify = (cmd_only) ? 0xff :
+ (disconn ? SCSI_IDENTIFY|SCSI_IFY_ENABLE_DISCONNECT :
+ SCSI_IDENTIFY);
+
+ if (in_count)
+ tgt->transient_state.in_count =
+ (in_count < tgt->block_size) ? tgt->block_size : in_count;
+ else
+ tgt->transient_state.in_count = 0;
+
+ /*
+ * See if another target is currently selected on
+ * this SCSI bus, e.g. lock the asc structure.
+ * Note that it is the strategy routine's job
+ * to serialize ops on the same target as appropriate.
+ * XXX here and everywhere, locks!
+ */
+ /*
+ * Protection viz reconnections makes it tricky.
+ */
+ s = splbio();
+
+ if (asc->wd.nactive++ == 0)
+ asc->wd.watchdog_state = SCSI_WD_ACTIVE;
+
+ if (asc->state & ASC_STATE_BUSY) {
+ /*
+ * Queue up this target, note that this takes care
+ * of proper FIFO scheduling of the scsi-bus.
+ */
+ LOG(3,"enqueue");
+ enqueue_tail(&asc->waiting_targets, (queue_entry_t) tgt);
+ } else {
+ /*
+ * It is down to at most two contenders now,
+ * we will treat reconnections same as selections
+ * and let the scsi-bus arbitration process decide.
+ */
+ asc->state |= ASC_STATE_BUSY;
+ asc->next_target = tgt;
+ asc_attempt_selection(asc);
+ /*
+ * Note that we might still lose arbitration..
+ */
+ }
+ splx(s);
+}
+
+asc_attempt_selection(asc)
+ asc_softc_t asc;
+{
+ target_info_t *tgt;
+ asc_padded_regmap_t *regs;
+ register int out_count;
+
+ regs = asc->regs;
+ tgt = asc->next_target;
+
+ LOG(4,"select");
+ LOG(0x80+tgt->target_id,0);
+
+ /*
+ * We own the bus now.. unless we lose arbitration
+ */
+ asc->active_target = tgt;
+
+ /* Try to avoid reselect collisions */
+ if ((regs->asc_csr & (ASC_CSR_INT|SCSI_PHASE_MSG_IN)) ==
+ (ASC_CSR_INT|SCSI_PHASE_MSG_IN))
+ return;
+
+ /*
+ * Cleanup the FIFO
+ */
+ regs->asc_cmd = ASC_CMD_FLUSH;
+ readback(regs->asc_cmd);
+ /*
+ * This value is not from spec, I have seen it failing
+ * without this delay and with logging disabled. That had
+ * about 42 extra instructions @25Mhz.
+ */
+ delay(2);/* XXX time & move later */
+
+
+ /*
+ * Init bus state variables
+ */
+ asc->script = tgt->transient_state.script;
+ asc->error_handler = tgt->transient_state.handler;
+ asc->done = SCSI_RET_IN_PROGRESS;
+
+ asc->out_count = 0;
+ asc->in_count = 0;
+ asc->extra_count = 0;
+
+ /*
+ * Start the chip going
+ */
+ out_count = (*asc->dma_ops->start_cmd)(asc->dma_state, tgt);
+ if (tgt->transient_state.identify != 0xff) {
+ regs->asc_fifo = tgt->transient_state.identify | tgt->lun;
+ mb();
+ }
+ ASC_TC_PUT(regs, out_count);
+ readback(regs->asc_cmd);
+
+ regs->asc_dbus_id = tgt->target_id;
+ readback(regs->asc_dbus_id);
+
+ regs->asc_sel_timo = asc->timeout;
+ readback(regs->asc_sel_timo);
+
+ regs->asc_syn_p = tgt->sync_period;
+ readback(regs->asc_syn_p);
+
+ regs->asc_syn_o = tgt->sync_offset;
+ readback(regs->asc_syn_o);
+
+ /* ugly little help for compiler */
+#define command out_count
+ if (tgt->flags & TGT_DID_SYNCH) {
+ command = (tgt->transient_state.identify == 0xff) ?
+ ASC_CMD_SEL | ASC_CMD_DMA :
+ ASC_CMD_SEL_ATN | ASC_CMD_DMA; /*preferred*/
+ } else if (tgt->flags & TGT_TRY_SYNCH)
+ command = ASC_CMD_SEL_ATN_STOP;
+ else
+ command = ASC_CMD_SEL | ASC_CMD_DMA;
+
+ /* Try to avoid reselect collisions */
+ if ((regs->asc_csr & (ASC_CSR_INT|SCSI_PHASE_MSG_IN)) !=
+ (ASC_CSR_INT|SCSI_PHASE_MSG_IN)) {
+ register int tmp;
+
+ regs->asc_cmd = command; mb();
+ /*
+ * Very nasty but infrequent problem here. We got/will get
+ * reconnected but the chip did not interrupt. The watchdog would
+ * fix it allright, but it stops the machine before it expires!
+ * Too bad we cannot just look at the interrupt register, sigh.
+ */
+ tmp = get_reg(regs,asc_cmd);
+ if ((tmp != command) && (tmp == (ASC_CMD_NOP|ASC_CMD_DMA))) {
+ if ((regs->asc_csr & ASC_CSR_INT) == 0) {
+ delay(250); /* increase if in trouble */
+
+ if (get_reg(regs,asc_csr) == SCSI_PHASE_MSG_IN) {
+ /* ok, take the risk of reading the ir */
+ tmp = get_reg(regs,asc_intr); mb();
+ if (tmp & ASC_INT_RESEL) {
+ (void) asc_reconnect(asc, get_reg(regs,asc_csr), tmp);
+ asc_wait(regs, ASC_CSR_INT, 1);
+ tmp = get_reg(regs,asc_intr); mb();
+ regs->asc_cmd = ASC_CMD_MSG_ACPT;
+ readback(regs->asc_cmd);
+ } else /* does not happen, but who knows.. */
+ asc_reset(asc,FALSE,asc->state & ASC_STATE_SPEC_DMA);
+ }
+ }
+ }
+ }
+#undef command
+}
+
+/*
+ * Interrupt routine
+ * Take interrupts from the chip
+ *
+ * Implementation:
+ * Move along the current command's script if
+ * all is well, invoke error handler if not.
+ */
+asc_intr(unit, spllevel)
+ spl_t spllevel;
+{
+ register asc_softc_t asc;
+ register script_t scp;
+ register int ir, csr;
+ register asc_padded_regmap_t *regs;
+#if defined(HAS_MAPPED_SCSI)
+ extern boolean_t rz_use_mapped_interface;
+
+ if (rz_use_mapped_interface)
+ return ASC_intr(unit,spllevel);
+#endif
+
+ asc = asc_softc[unit];
+
+ LOG(5,"\n\tintr");
+ /* collect ephemeral information */
+ regs = asc->regs;
+ csr = get_reg(regs,asc_csr); mb();
+ asc->ss_was = get_reg(regs,asc_ss); mb();
+ asc->cmd_was = get_reg(regs,asc_cmd); mb();
+
+ /* drop spurious interrupts */
+ if ((csr & ASC_CSR_INT) == 0)
+ return;
+
+ ir = get_reg(regs,asc_intr); /* this re-latches CSR (and SSTEP) */
+ mb();
+
+TR(csr);TR(ir);TR(get_reg(regs,asc_cmd));TRCHECK
+
+ /* this must be done within 250msec of disconnect */
+ if (ir & ASC_INT_DISC) {
+ regs->asc_cmd = ASC_CMD_ENABLE_SEL;
+ readback(regs->asc_cmd);
+ }
+
+ if (ir & ASC_INT_RESET)
+ return asc_bus_reset(asc);
+
+ /* we got an interrupt allright */
+ if (asc->active_target)
+ asc->wd.watchdog_state = SCSI_WD_ACTIVE;
+
+#if mips
+ splx(spllevel); /* drop priority */
+#endif
+
+ if ((asc->state & ASC_STATE_TARGET) ||
+ (ir & (ASC_INT_SEL|ASC_INT_SEL_ATN)))
+ return asc_target_intr(asc, csr, ir);
+
+ /*
+ * In attempt_selection() we could not check the asc_intr
+ * register to see if a reselection was in progress [else
+ * we would cancel the interrupt, and it would not be safe
+ * anyways]. So we gave the select command even if sometimes
+ * the chip might have been reconnected already. What
+ * happens then is that we get an illegal command interrupt,
+ * which is why the second clause. Sorry, I'd do it better
+ * if I knew of a better way.
+ */
+ if ((ir & ASC_INT_RESEL) ||
+ ((ir & ASC_INT_ILL) && (regs->asc_cmd & ASC_CMD_SEL_ATN)))
+ return asc_reconnect(asc, csr, ir);
+
+ /*
+ * Check for various errors
+ */
+ if ((csr & (ASC_CSR_GE|ASC_CSR_PE)) || (ir & ASC_INT_ILL)) {
+ char *msg;
+printf("{E%x,%x}", csr, ir);
+ if (csr & ASC_CSR_GE)
+ return;/* sit and prey? */
+
+ if (csr & ASC_CSR_PE)
+ msg = "SCSI bus parity error";
+ if (ir & ASC_INT_ILL)
+ msg = "Chip sez Illegal Command";
+ /* all we can do is to throw a reset on the bus */
+ printf( "asc%d: %s%s", asc - asc_softc_data, msg,
+ ", attempting recovery.\n");
+ asc_reset(asc, FALSE, asc->state & ASC_STATE_SPEC_DMA);
+ return;
+ }
+
+ if ((scp = asc->script) == 0) /* sanity */
+ return;
+
+ LOG(6,"match");
+ if (SCRIPT_MATCH(csr,ir,scp->condition)) {
+ /*
+ * Perform the appropriate operation,
+ * then proceed
+ */
+ if ((*scp->action)(asc, csr, ir)) {
+ asc->script = scp + 1;
+ regs->asc_cmd = scp->command; mb();
+ }
+ } else
+ (void) (*asc->error_handler)(asc, csr, ir);
+}
+
+asc_target_intr(asc, csr, ir)
+ register asc_softc_t asc;
+
+{
+ register script_t scp;
+
+ LOG(0x1e,"tmode");
+
+ if ((asc->state & ASC_STATE_TARGET) == 0) {
+
+ /*
+ * We just got selected
+ */
+ asc->state |= ASC_STATE_TARGET;
+
+ /*
+ * See if this selection collided with our selection attempt
+ */
+ if (asc->state & ASC_STATE_BUSY)
+ asc->state |= ASC_STATE_COLLISION;
+ asc->state |= ASC_STATE_BUSY;
+
+ return asc_selected(asc, csr, ir);
+
+ }
+ /* We must be executing a script here */
+ scp = asc->script;
+ assert(scp != 0);
+
+ LOG(6,"match");
+ if (SCRIPT_MATCH(csr,ir,scp->condition)) {
+ /*
+ * Perform the appropriate operation,
+ * then proceed
+ */
+ if ((*scp->action)(asc, csr, ir)) {
+ asc->script = scp + 1;
+ asc->regs->asc_cmd = scp->command; mb();
+ }
+ } else
+ (void) (*asc->error_handler)(asc, csr, ir);
+
+}
+
+/*
+ * All the many little things that the interrupt
+ * routine might switch to
+ */
+boolean_t
+asc_clean_fifo(asc, csr, ir)
+ register asc_softc_t asc;
+
+{
+ register asc_padded_regmap_t *regs = asc->regs;
+ register char ff;
+
+ ASC_TC_GET(regs,asc->dmacnt_at_end);
+
+ ASC_TC_PUT(regs,0); /* stop dma engine */
+ readback(regs->asc_cmd);
+
+ LOG(7,"clean_fifo");
+
+ while (fifo_count(regs)) {
+ ff = get_fifo(regs);
+ mb();
+ }
+ return TRUE;
+}
+
+boolean_t
+asc_end(asc, csr, ir)
+ register asc_softc_t asc;
+{
+ register target_info_t *tgt;
+ register io_req_t ior;
+
+ LOG(8,"end");
+
+ asc->state &= ~ASC_STATE_TARGET;
+ asc->regs->asc_syn_p = 0; mb();
+ asc->regs->asc_syn_o = 0; mb();
+
+ tgt = asc->active_target;
+ if ((tgt->done = asc->done) == SCSI_RET_IN_PROGRESS)
+ tgt->done = SCSI_RET_SUCCESS;
+
+ asc->script = 0;
+
+ if (asc->wd.nactive-- == 1)
+ asc->wd.watchdog_state = SCSI_WD_INACTIVE;
+
+ asc_release_bus(asc);
+
+ if (ior = tgt->ior) {
+ /*
+ * WARNING: the above might have scheduled the
+ * DMA engine off to someone else. Keep it in
+ * mind in the following code
+ */
+ (*asc->dma_ops->end_cmd)(asc->dma_state, tgt, ior);
+
+ LOG(0xA,"ops->restart");
+ (*tgt->dev_ops->restart)( tgt, TRUE);
+ }
+
+ return FALSE;
+}
+
+boolean_t
+asc_release_bus(asc)
+ register asc_softc_t asc;
+{
+ boolean_t ret = TRUE;
+
+ LOG(9,"release");
+ if (asc->state & ASC_STATE_COLLISION) {
+
+ LOG(0xB,"collided");
+ asc->state &= ~ASC_STATE_COLLISION;
+ asc_attempt_selection(asc);
+
+ } else if (queue_empty(&asc->waiting_targets)) {
+
+ asc->state &= ~ASC_STATE_BUSY;
+ asc->active_target = 0;
+ asc->script = 0;
+ ret = FALSE;
+
+ } else {
+
+ LOG(0xC,"dequeue");
+ asc->next_target = (target_info_t *)
+ dequeue_head(&asc->waiting_targets);
+ asc_attempt_selection(asc);
+ }
+ return ret;
+}
+
+boolean_t
+asc_get_status(asc, csr, ir)
+ register asc_softc_t asc;
+{
+ register asc_padded_regmap_t *regs = asc->regs;
+ register scsi2_status_byte_t status;
+ int len;
+ boolean_t ret;
+ io_req_t ior;
+ register target_info_t *tgt = asc->active_target;
+
+ LOG(0xD,"get_status");
+TRWRAP
+
+ asc->state &= ~ASC_STATE_DMA_IN;
+
+ if (asc->state & ASC_STATE_DO_RFB) {
+ tgt->transient_state.has_oddb = FALSE;
+ regs->asc_cnfg2 = ASC_CNFG2_SCSI2;
+ }
+
+ /*
+ * Get the last two bytes in FIFO
+ */
+ while (fifo_count(regs) > 2) {
+ status.bits = get_fifo(regs); mb();
+ }
+
+ status.bits = get_fifo(regs); mb();
+
+ if (status.st.scsi_status_code != SCSI_ST_GOOD) {
+ scsi_error(asc->active_target, SCSI_ERR_STATUS, status.bits, 0);
+ asc->done = (status.st.scsi_status_code == SCSI_ST_BUSY) ?
+ SCSI_RET_RETRY : SCSI_RET_NEED_SENSE;
+ } else
+ asc->done = SCSI_RET_SUCCESS;
+
+ status.bits = get_fifo(regs); /* just pop the command_complete */
+ mb();
+
+ /* if reading, move the last piece of data in main memory */
+ if (len = asc->in_count) {
+ register int count;
+
+ count = asc->dmacnt_at_end;
+ if (count) {
+#if 0
+ this is incorrect and besides..
+ tgt->ior->io_residual = count;
+#endif
+ len -= count;
+ }
+ regs->asc_cmd = asc->script->command;
+ readback(regs->asc_cmd);
+
+ ret = FALSE;
+ } else
+ ret = TRUE;
+
+ asc->dmacnt_at_end = 0;
+ (*asc->dma_ops->end_xfer)(asc->dma_state, tgt, len);
+ if (!ret)
+ asc->script++;
+ return ret;
+}
+
+boolean_t
+asc_put_status(asc, csr, ir)
+ register asc_softc_t asc;
+{
+ register asc_padded_regmap_t *regs = asc->regs;
+ register scsi2_status_byte_t status;
+ register target_info_t *tgt = asc->active_target;
+ int len;
+
+ LOG(0x21,"put_status");
+
+ asc->state &= ~ASC_STATE_DMA_IN;
+
+ if (len = asc->in_count) {
+ register int count;
+
+ ASC_TC_GET(regs,count); mb();
+ if (count)
+ len -= count;
+ }
+ (*asc->dma_ops->end_xfer)(asc->dma_state, tgt, len);
+
+/* status.st.scsi_status_code = SCSI_ST_GOOD; */
+ regs->asc_fifo = 0; mb();
+ regs->asc_fifo = SCSI_COMMAND_COMPLETE; mb();
+
+ return TRUE;
+}
+
+
+boolean_t
+asc_dma_in(asc, csr, ir)
+ register asc_softc_t asc;
+{
+ register target_info_t *tgt;
+ register asc_padded_regmap_t *regs = asc->regs;
+ register int count;
+ unsigned char ff = get_reg(regs,asc_flags); mb();
+
+ LOG(0xE,"dma_in");
+ tgt = asc->active_target;
+
+ /*
+ * This seems to be needed on certain rudimentary targets
+ * (such as the DEC TK50 tape) which apparently only pick
+ * up 6 initial bytes: when you add the initial IDENTIFY
+ * you are left with 1 pending byte, which is left in the
+ * FIFO and would otherwise show up atop the data we are
+ * really requesting.
+ *
+ * This is only speculation, though, based on the fact the
+ * sequence step value of 3 out of select means the target
+ * changed phase too quick and some bytes have not been
+ * xferred (see NCR manuals). Counter to this theory goes
+ * the fact that the extra byte that shows up is not alwyas
+ * zero, and appears to be pretty random.
+ * Note that asc_flags say there is one byte in the FIFO
+ * even in the ok case, but the sstep value is the right one.
+ * Note finally that this might all be a sync/async issue:
+ * I have only checked the ok case on synch disks so far.
+ *
+ * Indeed it seems to be an asynch issue: exabytes do it too.
+ */
+ if ((tgt->sync_offset == 0) && ((ff & ASC_FLAGS_SEQ_STEP) != 0x80)) {
+ regs->asc_cmd = ASC_CMD_NOP;
+ wbflush();
+ PRINT(("[tgt %d: %x while %d]", tgt->target_id, ff, tgt->cur_cmd));
+ while ((ff & ASC_FLAGS_FIFO_CNT) != 0) {
+ ff = get_fifo(regs); mb();
+ ff = get_reg(regs,asc_flags); mb();
+ }
+ }
+
+ asc->state |= ASC_STATE_DMA_IN;
+
+ count = (*asc->dma_ops->start_datain)(asc->dma_state, tgt);
+ ASC_TC_PUT(regs, count);
+ readback(regs->asc_cmd);
+
+ if ((asc->in_count = count) == tgt->transient_state.in_count)
+ return TRUE;
+ regs->asc_cmd = asc->script->command; mb();
+ asc->script = asc_script_restart_data_in;
+ return FALSE;
+}
+
+asc_dma_in_r(asc, csr, ir)
+ register asc_softc_t asc;
+{
+ register target_info_t *tgt;
+ register asc_padded_regmap_t *regs = asc->regs;
+ register int count;
+ boolean_t advance_script = TRUE;
+
+
+ LOG(0x1f,"dma_in_r");
+ tgt = asc->active_target;
+
+ asc->state |= ASC_STATE_DMA_IN;
+
+ if (asc->in_count == 0) {
+ /*
+ * Got nothing yet, we just reconnected.
+ */
+ register int avail;
+
+ /*
+ * NOTE: if we have to handle the RFB (obb),
+ * the odd byte has been installed at reconnect
+ * time, before switching to data-in phase. Now
+ * we are already in data-in phase.
+ * It is up to the DMA engine to trim the dma_ptr
+ * down one byte.
+ */
+
+ count = (*asc->dma_ops->restart_datain_1)
+ (asc->dma_state, tgt);
+
+ /* common case of 8k-or-less read ? */
+ advance_script = (tgt->transient_state.in_count == count);
+
+ } else {
+
+ /*
+ * We received some data.
+ */
+ register int offset, xferred;
+
+ /*
+ * Problem: sometimes we get a 'spurious' interrupt
+ * right after a reconnect. It goes like disconnect,
+ * reconnect, dma_in_r, here but DMA is still rolling.
+ * Since there is no good reason we got here to begin with
+ * we just check for the case and dismiss it: we should
+ * get another interrupt when the TC goes to zero or the
+ * target disconnects.
+ */
+ ASC_TC_GET(regs,xferred); mb();
+ if (xferred != 0)
+ return FALSE;
+
+ xferred = asc->in_count - xferred;
+ assert(xferred > 0);
+
+ tgt->transient_state.in_count -= xferred;
+ assert(tgt->transient_state.in_count > 0);
+
+ /*
+ * There should NOT be any obb issues here,
+ * we would have no control anyways.
+ */
+ count = (*asc->dma_ops->restart_datain_2)
+ (asc->dma_state, tgt, xferred);
+
+ asc->in_count = count;
+ ASC_TC_PUT(regs, count);
+ readback(regs->asc_cmd);
+ regs->asc_cmd = asc->script->command; mb();
+
+ (*asc->dma_ops->restart_datain_3)
+ (asc->dma_state, tgt);
+
+ /* last chunk ? */
+ if (count == tgt->transient_state.in_count)
+ asc->script++;
+
+ return FALSE;
+ }
+
+ asc->in_count = count;
+ ASC_TC_PUT(regs, count);
+ readback(regs->asc_cmd);
+
+ if (!advance_script) {
+ regs->asc_cmd = asc->script->command;
+ readback(regs->asc_cmd);
+ }
+ return advance_script;
+}
+
+
+/* send data to target. Only called to start the xfer */
+
+boolean_t
+asc_dma_out(asc, csr, ir)
+ register asc_softc_t asc;
+{
+ register asc_padded_regmap_t *regs = asc->regs;
+ register int reload_count;
+ register target_info_t *tgt;
+ int command;
+
+ LOG(0xF,"dma_out");
+
+ ASC_TC_GET(regs, reload_count); mb();
+ asc->extra_count = fifo_count(regs);
+ reload_count += asc->extra_count;
+ ASC_TC_PUT(regs, reload_count);
+ asc->state &= ~ASC_STATE_DMA_IN;
+ readback(regs->asc_cmd);
+
+ tgt = asc->active_target;
+
+ command = asc->script->command;
+
+ if (reload_count == 0) reload_count = ASC_TC_MAX;
+ asc->out_count = reload_count;
+
+ if (reload_count >= tgt->transient_state.out_count)
+ asc->script++;
+ else
+ asc->script = asc_script_restart_data_out;
+
+ if ((*asc->dma_ops->start_dataout)
+ (asc->dma_state, tgt, (volatile unsigned *)&regs->asc_cmd,
+ command, &asc->extra_count)) {
+ regs->asc_cmd = command;
+ readback(regs->asc_cmd);
+ }
+
+ return FALSE;
+}
+
+/* send data to target. Called in two different ways:
+ (a) to restart a big transfer and
+ (b) after reconnection
+ */
+boolean_t
+asc_dma_out_r(asc, csr, ir)
+ register asc_softc_t asc;
+{
+ register asc_padded_regmap_t *regs = asc->regs;
+ register target_info_t *tgt;
+ boolean_t advance_script = TRUE;
+ int count;
+
+
+ LOG(0x20,"dma_out_r");
+
+ tgt = asc->active_target;
+ asc->state &= ~ASC_STATE_DMA_IN;
+
+ if (asc->out_count == 0) {
+ /*
+ * Nothing committed: we just got reconnected
+ */
+ count = (*asc->dma_ops->restart_dataout_1)
+ (asc->dma_state, tgt);
+
+ /* is this the last chunk ? */
+ advance_script = (tgt->transient_state.out_count == count);
+ } else {
+ /*
+ * We sent some data.
+ */
+ register int offset, xferred;
+
+ ASC_TC_GET(regs,count); mb();
+
+ /* see comment above */
+ if (count) {
+ return FALSE;
+ }
+
+ count += fifo_count(regs);
+ count -= asc->extra_count;
+ xferred = asc->out_count - count;
+ assert(xferred > 0);
+
+ tgt->transient_state.out_count -= xferred;
+ assert(tgt->transient_state.out_count > 0);
+
+ count = (*asc->dma_ops->restart_dataout_2)
+ (asc->dma_state, tgt, xferred);
+
+ /* last chunk ? */
+ if (tgt->transient_state.out_count == count)
+ goto quickie;
+
+ asc->out_count = count;
+
+ asc->extra_count = (*asc->dma_ops->restart_dataout_3)
+ (asc->dma_state, tgt,
+ (volatile unsigned *)&regs->asc_fifo);
+ ASC_TC_PUT(regs, count);
+ readback(regs->asc_cmd);
+ regs->asc_cmd = asc->script->command; mb();
+
+ (*asc->dma_ops->restart_dataout_4)(asc->dma_state, tgt);
+
+ return FALSE;
+ }
+
+quickie:
+ asc->extra_count = (*asc->dma_ops->restart_dataout_3)
+ (asc->dma_state, tgt,
+ (volatile unsigned *)&regs->asc_fifo);
+
+ asc->out_count = count;
+
+ ASC_TC_PUT(regs, count);
+ readback(regs->asc_cmd);
+
+ if (!advance_script) {
+ regs->asc_cmd = asc->script->command;
+ }
+ return advance_script;
+}
+
+boolean_t
+asc_dosynch(asc, csr, ir)
+ register asc_softc_t asc;
+ register unsigned char csr, ir;
+{
+ register asc_padded_regmap_t *regs = asc->regs;
+ register unsigned char c;
+ int i, per, offs;
+ register target_info_t *tgt;
+
+ /*
+ * Phase is MSG_OUT here.
+ * Try to go synchronous, unless prohibited
+ */
+ tgt = asc->active_target;
+ regs->asc_cmd = ASC_CMD_FLUSH;
+ readback(regs->asc_cmd);
+ delay(1);
+
+ per = asc_min_period;
+ if (BGET(scsi_no_synchronous_xfer,asc->sc->masterno,tgt->target_id))
+ offs = 0;
+ else
+ offs = asc_max_offset;
+
+ tgt->flags |= TGT_DID_SYNCH; /* only one chance */
+ tgt->flags &= ~TGT_TRY_SYNCH;
+
+ regs->asc_fifo = SCSI_EXTENDED_MESSAGE; mb();
+ regs->asc_fifo = 3; mb();
+ regs->asc_fifo = SCSI_SYNC_XFER_REQUEST; mb();
+ regs->asc_fifo = asc_to_scsi_period(asc_min_period,asc->clk); mb();
+ regs->asc_fifo = offs; mb();
+ regs->asc_cmd = ASC_CMD_XFER_INFO;
+ readback(regs->asc_cmd);
+ csr = asc_wait(regs, ASC_CSR_INT, 1);
+ ir = get_reg(regs,asc_intr); mb();
+
+ /* some targets might be slow to move to msg-in */
+
+ if (SCSI_PHASE(csr) != SCSI_PHASE_MSG_IN) {
+
+ /* wait for direction bit to flip */
+ csr = asc_wait(regs, SCSI_IO, 0);
+ ir = get_reg(regs,asc_intr); mb();
+ /* Some ugly targets go stright to command phase.
+ "You could at least say goodbye" */
+ if (SCSI_PHASE(csr) == SCSI_PHASE_CMD)
+ goto did_not;
+ if (SCSI_PHASE(csr) != SCSI_PHASE_MSG_IN)
+ gimmeabreak();
+ }
+
+ regs->asc_cmd = ASC_CMD_XFER_INFO; mb();
+ csr = asc_wait(regs, ASC_CSR_INT, 1);
+ ir = get_reg(regs,asc_intr); mb();
+
+ /* some targets do not even take all the bytes out */
+ while (fifo_count(regs) > 0) {
+ c = get_fifo(regs); /* see what it says */
+ mb();
+ }
+
+ if (c == SCSI_MESSAGE_REJECT) {
+did_not:
+ printf(" did not like SYNCH xfer ");
+
+ /* Tk50s get in trouble with ATN, sigh. */
+ regs->asc_cmd = ASC_CMD_CLR_ATN;
+ readback(regs->asc_cmd);
+
+ goto cmd;
+ }
+
+ /*
+ * Receive the rest of the message
+ */
+ regs->asc_cmd = ASC_CMD_MSG_ACPT; mb();
+ asc_wait(regs, ASC_CSR_INT, 1);
+ ir = get_reg(regs,asc_intr); mb();
+
+ if (c != SCSI_EXTENDED_MESSAGE)
+ gimmeabreak();
+
+ regs->asc_cmd = ASC_CMD_XFER_INFO; mb();
+ asc_wait(regs, ASC_CSR_INT, 1);
+ c = get_reg(regs,asc_intr); mb();
+ if (get_fifo(regs) != 3)
+ panic("asc_dosynch");
+
+ for (i = 0; i < 3; i++) {
+ regs->asc_cmd = ASC_CMD_MSG_ACPT; mb();
+ asc_wait(regs, ASC_CSR_INT, 1);
+ c = get_reg(regs,asc_intr); mb();
+
+ regs->asc_cmd = ASC_CMD_XFER_INFO; mb();
+ asc_wait(regs, ASC_CSR_INT, 1);
+ c = get_reg(regs,asc_intr);/*ack*/ mb();
+ c = get_fifo(regs); mb();
+
+ if (i == 1) tgt->sync_period = scsi_period_to_asc(c,asc->clk);
+ if (i == 2) tgt->sync_offset = c;
+ }
+
+cmd:
+ regs->asc_cmd = ASC_CMD_MSG_ACPT; mb();
+ csr = asc_wait(regs, ASC_CSR_INT, 1);
+ c = get_reg(regs,asc_intr); mb();
+
+ /* Might have to wait a bit longer for slow targets */
+ for (c = 0; SCSI_PHASE(get_reg(regs,asc_csr)) == SCSI_PHASE_MSG_IN; c++) {
+ mb();
+ delay(2);
+ if (c & 0x80) break; /* waited too long */
+ }
+ csr = get_reg(regs,asc_csr); mb();
+
+ /* phase should normally be command here */
+ if (SCSI_PHASE(csr) == SCSI_PHASE_CMD) {
+ register char *cmd = tgt->cmd_ptr;
+
+ /* test unit ready or inquiry */
+ for (i = 0; i < sizeof(scsi_command_group_0); i++) {
+ regs->asc_fifo = *cmd++; mb();
+ }
+ ASC_TC_PUT(regs,0xff); mb();
+ regs->asc_cmd = ASC_CMD_XFER_PAD; /*0x18*/ mb();
+
+ if (tgt->cur_cmd == SCSI_CMD_INQUIRY) {
+ tgt->transient_state.script = asc_script_data_in;
+ asc->script = tgt->transient_state.script;
+ regs->asc_syn_p = tgt->sync_period;
+ regs->asc_syn_o = tgt->sync_offset; mb();
+ return FALSE;
+ }
+
+ csr = asc_wait(regs, ASC_CSR_INT, 1);
+ ir = get_reg(regs,asc_intr);/*ack*/ mb();
+ }
+
+ if (SCSI_PHASE(csr) != SCSI_PHASE_STATUS)
+ csr = asc_wait(regs, SCSI_IO, 1); /* direction flip */
+
+status:
+ if (SCSI_PHASE(csr) != SCSI_PHASE_STATUS)
+ gimmeabreak();
+
+ return TRUE;
+}
+
+/* The other side of the coin.. */
+asc_odsynch(asc, initiator)
+ register asc_softc_t asc;
+ target_info_t *initiator;
+{
+ register asc_padded_regmap_t *regs = asc->regs;
+ register unsigned char c;
+ int len, per, offs;
+
+ /*
+ * Phase is MSG_OUT, we are the target and we have control.
+ * Any IDENTIFY messages have been handled already.
+ */
+ initiator->flags |= TGT_DID_SYNCH;
+ initiator->flags &= ~TGT_TRY_SYNCH;
+
+ /*
+ * We only understand synch negotiations
+ */
+ c = get_fifo(regs); mb();
+ if (c != SCSI_EXTENDED_MESSAGE) goto bad;
+
+ /*
+ * This is not in the specs, but apparently the chip knows
+ * enough about scsi to receive the length automatically.
+ * So there were two bytes in the fifo at function call.
+ */
+ len = get_fifo(regs); mb();
+ if (len != 3) goto bad;
+ while (len) {
+ if (fifo_count(regs) == 0) {
+ regs->asc_cmd = ASC_CMD_RCV_MSG;
+ readback(regs->asc_cmd);
+ asc_wait(regs, ASC_CSR_INT, 1);
+ c = get_reg(regs,asc_intr); mb();
+ }
+ c = get_fifo(regs); mb();
+ if (len == 1) offs = c;
+ if (len == 2) per = c;
+ len--;
+ }
+
+ /*
+ * Adjust the proposed parameters
+ */
+ c = scsi_period_to_asc(per,asc->clk);
+ initiator->sync_period = c;
+ per = asc_to_scsi_period(c,asc->clk);
+
+ if (offs > asc_max_offset) offs = asc_max_offset;
+ initiator->sync_offset = offs;
+
+ /*
+ * Tell him what the deal is
+ */
+ regs->asc_fifo = SCSI_EXTENDED_MESSAGE; mb();
+ regs->asc_fifo = 3; mb();
+ regs->asc_fifo = SCSI_SYNC_XFER_REQUEST; mb();
+ regs->asc_fifo = per; mb();
+ regs->asc_fifo = offs; mb();
+ regs->asc_cmd = ASC_CMD_SND_MSG;
+ readback(regs->asc_cmd);
+ asc_wait(regs, ASC_CSR_INT, 1);
+ c = get_reg(regs,asc_intr); mb();
+
+ /*
+ * Exit conditions: fifo empty, phase undefined but non-command
+ */
+ return;
+
+ /*
+ * Something wrong, reject the message
+ */
+bad:
+ while (fifo_count(regs)) {
+ c = get_fifo(regs); mb();
+ }
+ regs->asc_fifo = SCSI_MESSAGE_REJECT; mb();
+ regs->asc_cmd = ASC_CMD_SND_MSG;
+ readback(regs->asc_cmd);
+ asc_wait(regs, ASC_CSR_INT, 1);
+ c = get_reg(regs,asc_intr); mb();
+}
+
+/*
+ * The bus was reset
+ */
+asc_bus_reset(asc)
+ register asc_softc_t asc;
+{
+ register asc_padded_regmap_t *regs = asc->regs;
+
+ LOG(0x1d,"bus_reset");
+
+ /*
+ * Clear bus descriptor
+ */
+ asc->script = 0;
+ asc->error_handler = 0;
+ asc->active_target = 0;
+ asc->next_target = 0;
+ asc->state &= ASC_STATE_SPEC_DMA | ASC_STATE_DO_RFB;
+ queue_init(&asc->waiting_targets);
+ asc->wd.nactive = 0;
+ asc_reset(asc, TRUE, asc->state & ASC_STATE_SPEC_DMA);
+
+ printf("asc: (%d) bus reset ", ++asc->wd.reset_count);
+
+ /* some targets take long to reset */
+ delay( scsi_delay_after_reset +
+ asc->sc->initiator_id * 500000); /* if multiple initiators */
+
+ if (asc->sc == 0) /* sanity */
+ return;
+
+ scsi_bus_was_reset(asc->sc);
+}
+
+/*
+ * Disconnect/reconnect mode ops
+ */
+
+/* get the message in via dma */
+boolean_t
+asc_msg_in(asc, csr, ir)
+ register asc_softc_t asc;
+ register unsigned char csr, ir;
+{
+ register target_info_t *tgt;
+ register asc_padded_regmap_t *regs = asc->regs;
+ unsigned char ff;
+
+ LOG(0x10,"msg_in");
+ /* must clean FIFO as in asc_dma_in, sigh */
+ while (fifo_count(regs) != 0) {
+ ff = get_fifo(regs); mb();
+ }
+
+ (void) (*asc->dma_ops->start_msgin)(asc->dma_state, asc->active_target);
+ /* We only really expect two bytes, at tgt->cmd_ptr */
+ ASC_TC_PUT(regs, sizeof(scsi_command_group_0));
+ readback(regs->asc_cmd);
+
+ return TRUE;
+}
+
+/* check the message is indeed a DISCONNECT */
+boolean_t
+asc_disconnect(asc, csr, ir)
+ register asc_softc_t asc;
+ register unsigned char csr, ir;
+
+{
+ register char *msgs, ff;
+ register target_info_t *tgt;
+ asc_padded_regmap_t *regs;
+
+ tgt = asc->active_target;
+
+ (*asc->dma_ops->end_msgin)(asc->dma_state, tgt);
+
+ /*
+ * Do not do this. It is most likely a reconnection
+ * message that sits there already by the time we
+ * get here. The chip is smart enough to only dma
+ * the bytes that correctly came in as msg_in proper,
+ * the identify and selection bytes are not dma-ed.
+ * Yes, sometimes the hardware does the right thing.
+ */
+#if 0
+ /* First check message got out of the fifo */
+ regs = asc->regs;
+ while (fifo_count(regs) != 0) {
+ *msgs++ = get_fifo(regs);
+ }
+#endif
+ msgs = tgt->cmd_ptr;
+
+ /* A SDP message preceeds it in non-completed READs */
+ if ((msgs[0] == SCSI_DISCONNECT) || /* completed */
+ ((msgs[0] == SCSI_SAVE_DATA_POINTER) && /* non complete */
+ (msgs[1] == SCSI_DISCONNECT))) {
+ /* that's the ok case */
+ } else
+ printf("[tgt %d bad SDP: x%x]",
+ tgt->target_id, *((unsigned short *)msgs));
+
+ return TRUE;
+}
+
+/* save all relevant data, free the BUS */
+boolean_t
+asc_disconnected(asc, csr, ir)
+ register asc_softc_t asc;
+ register unsigned char csr, ir;
+
+{
+ register target_info_t *tgt;
+
+ LOG(0x11,"disconnected");
+ asc_disconnect(asc,csr,ir);
+
+ tgt = asc->active_target;
+ tgt->flags |= TGT_DISCONNECTED;
+ tgt->transient_state.handler = asc->error_handler;
+ /* anything else was saved in asc_err_disconn() */
+
+ PRINT(("{D%d}", tgt->target_id));
+
+ asc_release_bus(asc);
+
+ return FALSE;
+}
+
+int asc_restore_ptr = 1;
+
+/* get reconnect message out of fifo, restore BUS */
+boolean_t
+asc_reconnect(asc, csr, ir)
+ register asc_softc_t asc;
+ register unsigned char csr, ir;
+
+{
+ register target_info_t *tgt;
+ asc_padded_regmap_t *regs;
+ unsigned int id;
+
+ LOG(0x12,"reconnect");
+ /*
+ * See if this reconnection collided with a selection attempt
+ */
+ if (asc->state & ASC_STATE_BUSY)
+ asc->state |= ASC_STATE_COLLISION;
+
+ asc->state |= ASC_STATE_BUSY;
+
+ /* find tgt: first byte in fifo is (tgt_id|our_id) */
+ regs = asc->regs;
+ while (fifo_count(regs) > 2) /* sanity */ {
+ id = get_fifo(regs); mb();
+ }
+ if (fifo_count(regs) != 2)
+ gimmeabreak();
+
+ id = get_fifo(regs); /* must decode this now */
+ mb();
+ id &= ~(1 << asc->sc->initiator_id);
+ {
+ register int i;
+ for (i = 0; id > 1; i++)
+ id >>= 1;
+ id = i;
+ }
+
+ tgt = asc->sc->target[id];
+ if (tgt == 0) panic("asc_reconnect");
+
+ /* synch things*/
+ regs->asc_syn_p = tgt->sync_period;
+ regs->asc_syn_o = tgt->sync_offset;
+ readback(regs->asc_syn_o);
+
+ /* Get IDENTIFY message */
+ {
+ register int i = get_fifo(regs);
+ if ((i & SCSI_IDENTIFY) == 0)
+ gimmeabreak();
+ i &= 0x7;
+ /* If not LUN 0 see which target it is */
+ if (i) {
+ target_info_t *tgt1;
+ for (tgt1 = tgt->next_lun;
+ tgt1 && tgt1 != tgt;
+ tgt1 = tgt1->next_lun)
+ if (tgt1->lun == i) {
+ tgt = tgt1;
+ break;
+ }
+ }
+ }
+
+ if (asc->state & ASC_STATE_DO_RFB) {
+ if (tgt->transient_state.has_oddb) {
+ if (tgt->sync_period) {
+ regs->asc_cnfg2 = ASC_CNFG2_SCSI2 | ASC_CNFG2_RFB;
+ wbflush();
+ regs->asc_rfb = tgt->transient_state.the_oddb;
+ } else {
+ regs->asc_fifo = tgt->transient_state.the_oddb;
+ }
+ tgt->transient_state.has_oddb = FALSE;
+ } else {
+ regs->asc_cnfg2 = ASC_CNFG2_SCSI2;
+ }
+ wbflush();
+ }
+
+ PRINT(("{R%d}", id));
+ if (asc->state & ASC_STATE_COLLISION)
+ PRINT(("[B %d-%d]", asc->active_target->target_id, id));
+
+ LOG(0x80+id,0);
+
+ asc->active_target = tgt;
+ tgt->flags &= ~TGT_DISCONNECTED;
+
+ asc->script = tgt->transient_state.script;
+ asc->error_handler = tgt->transient_state.handler;
+ asc->in_count = 0;
+ asc->out_count = 0;
+
+ regs->asc_cmd = ASC_CMD_MSG_ACPT;
+ readback(regs->asc_cmd);
+
+ /* What if there is a RESTORE_PTR msgin ? */
+ if (asc_restore_ptr) {
+more_msgin:
+ csr = asc_wait(regs, ASC_CSR_INT, 1);
+
+ if (SCSI_PHASE(csr) == SCSI_PHASE_MSG_IN) {
+ /* ack intr */
+ id = get_reg(regs,asc_intr); mb();
+
+ /* Ok, get msg */
+ regs->asc_cmd = ASC_CMD_XFER_INFO;
+ readback(regs->asc_cmd);
+ /* wait for xfer done */
+ csr = asc_wait(regs, ASC_CSR_INT, 1);
+
+ /* look at what we got */
+ id = get_fifo(regs);
+
+ if (id != SCSI_RESTORE_POINTERS)
+ printf("asc%d: uhu msg %x\n", asc->sc->masterno, id);
+ /* ack intr */
+ id = get_reg(regs,asc_intr); mb();
+
+ /* move on */
+ regs->asc_cmd = ASC_CMD_MSG_ACPT;
+ readback(regs->asc_cmd);
+ goto more_msgin;
+ }
+ }
+
+ return FALSE;
+}
+
+
+/* We have been selected as target */
+
+boolean_t
+asc_selected(asc, csr, ir)
+ register asc_softc_t asc;
+ register unsigned csr, ir;
+{
+ register asc_padded_regmap_t *regs = asc->regs;
+ register unsigned char id;
+ target_info_t *self, *initiator;
+ unsigned int len;
+
+ /*
+ * Find initiator's id: the head of the fifo is (init_id|our_id)
+ */
+
+ id = get_fifo(regs); /* must decode this now */
+ mb();
+ id &= ~(1 << asc->sc->initiator_id);
+ {
+ register int i;
+ for (i = 0; id > 1; i++)
+ id >>= 1;
+ id = i;
+ }
+
+ /*
+ * See if we have seen him before
+ */
+ self = asc->sc->target[asc->sc->initiator_id];
+ initiator = asc->sc->target[id];
+ if (initiator == 0) {
+
+ initiator = scsi_slave_alloc(asc->sc->masterno, id, asc);
+ (*asc->dma_ops->new_target)(asc->dma_state, initiator);
+
+ }
+
+ if (! (initiator->flags & TGT_ONLINE) )
+ sccpu_new_initiator(self, initiator);
+
+ /*
+ * If selected with ATN the chip did the msg-out
+ * phase already, let us look at the message(s)
+ */
+ if (ir & ASC_INT_SEL_ATN) {
+ register unsigned char m;
+
+ m = get_fifo(regs); mb();
+ if ((m & SCSI_IDENTIFY) == 0)
+ gimmeabreak();
+
+ csr = get_reg(regs,asc_csr); mb();
+ if ((SCSI_PHASE(csr) == SCSI_PHASE_MSG_OUT) &&
+ fifo_count(regs))
+ asc_odsynch(asc, initiator);
+
+ /* Get the command now, unless we have it already */
+ mb();
+ if (fifo_count(regs) < sizeof(scsi_command_group_0)) {
+ regs->asc_cmd = ASC_CMD_RCV_CMD;
+ readback(regs->asc_cmd);
+ asc_wait(regs, ASC_CSR_INT, 1);
+ ir = get_reg(regs,asc_intr); mb();
+ csr = get_reg(regs,asc_csr); mb();
+ }
+ } else {
+ /*
+ * Pop away the null byte that follows the id
+ */
+ if (get_fifo(regs) != 0)
+ gimmeabreak();
+
+ }
+
+ /*
+ * Take rest of command out of fifo
+ */
+ self->dev_info.cpu.req_pending = TRUE;
+ self->dev_info.cpu.req_id = id;
+ self->dev_info.cpu.req_cmd = get_fifo(regs);
+ self->dev_info.cpu.req_lun = get_fifo(regs);
+
+ LOG(0x80+self->dev_info.cpu.req_cmd, 0);
+
+ switch ((self->dev_info.cpu.req_cmd & SCSI_CODE_GROUP) >> 5) {
+ case 0:
+ len = get_fifo(regs) << 16; mb();
+ len |= get_fifo(regs) << 8; mb();
+ len |= get_fifo(regs); mb();
+ break;
+ case 1:
+ case 2:
+ len = get_fifo(regs); /* xxx lba1 */ mb();
+ len = get_fifo(regs); /* xxx lba2 */ mb();
+ len = get_fifo(regs); /* xxx lba3 */ mb();
+ len = get_fifo(regs); /* xxx lba4 */ mb();
+ len = get_fifo(regs); /* xxx xxx */ mb();
+ len = get_fifo(regs) << 8; mb();
+ len |= get_fifo(regs); mb();
+ break;
+ case 5:
+ len = get_fifo(regs); /* xxx lba1 */ mb();
+ len = get_fifo(regs); /* xxx lba2 */ mb();
+ len = get_fifo(regs); /* xxx lba3 */ mb();
+ len = get_fifo(regs); /* xxx lba4 */ mb();
+ len = get_fifo(regs) << 24; mb();
+ len |= get_fifo(regs) << 16; mb();
+ len |= get_fifo(regs) << 8; mb();
+ len |= get_fifo(regs); mb();
+ if (get_fifo(regs) != 0) ;
+ break;
+ default:
+ panic("asc_selected");
+ }
+ self->dev_info.cpu.req_len = len;
+/*if (scsi_debug) printf("[L x%x]", len);*/
+
+ /* xxx pop the cntrl byte away */
+ if (get_fifo(regs) != 0)
+ gimmeabreak();
+ mb();
+
+ /*
+ * Setup state
+ */
+ asc->active_target = self;
+ asc->done = SCSI_RET_IN_PROGRESS;
+ asc->out_count = 0;
+ asc->in_count = 0;
+ asc->extra_count = 0;
+
+ /*
+ * Sync params.
+ */
+ regs->asc_syn_p = initiator->sync_period;
+ regs->asc_syn_o = initiator->sync_offset;
+ readback(regs->asc_syn_o);
+
+ /*
+ * Do the up-call
+ */
+ LOG(0xB,"tgt-mode-restart");
+ (*self->dev_ops->restart)( self, FALSE);
+
+ /* The call above has either prepared the data,
+ placing an ior on self, or it handled it some
+ other way */
+ if (self->ior == 0)
+ return FALSE;
+
+ /* sanity */
+ if (fifo_count(regs)) {
+ regs->asc_cmd = ASC_CMD_FLUSH;
+ readback(regs->asc_cmd);
+ }
+
+ /* needed by some dma code to align things properly */
+ self->transient_state.cmd_count = sizeof(scsi_command_group_0);
+ self->transient_state.in_count = len; /* XXX */
+
+ (*asc->dma_ops->map)(asc->dma_state, self);
+
+ if (asc->wd.nactive++ == 0)
+ asc->wd.watchdog_state = SCSI_WD_ACTIVE;
+
+
+ {
+ register script_t scp;
+ unsigned char command;
+
+ switch (self->dev_info.cpu.req_cmd) {
+ case SCSI_CMD_TEST_UNIT_READY:
+ scp = asc_script_t_data_in+1; break;
+ case SCSI_CMD_SEND:
+ scp = asc_script_t_data_in; break;
+ default:
+ scp = asc_script_t_data_out; break;
+ }
+
+ asc->script = scp;
+ command = scp->command;
+ if (!(*scp->action)(asc, csr, ir))
+ return FALSE;
+
+/*if (scsi_debug) printf("[F%x]", fifo_count(regs));*/
+
+ asc->script++;
+ regs->asc_cmd = command; mb();
+
+ }
+
+ return FALSE;
+}
+
+
+/*
+ * Other utilities
+ */
+private void
+pick_up_oddb(
+ target_info_t *tgt)
+{
+ register char *lastp;
+
+ /* State should have been updated before we get here */
+ lastp = tgt->dma_ptr + tgt->transient_state.dma_offset;
+
+ if ((vm_offset_t) lastp & 1) {
+ tgt->transient_state.has_oddb = TRUE;
+ tgt->transient_state.the_oddb = lastp[-1];
+ } else
+ tgt->transient_state.has_oddb = FALSE;
+}
+
+
+/*
+ * Error handlers
+ */
+
+/*
+ * Fall-back error handler.
+ */
+asc_err_generic(asc, csr, ir)
+ register asc_softc_t asc;
+{
+ LOG(0x13,"err_generic");
+
+ /* handle non-existant or powered off devices here */
+ if ((ir == ASC_INT_DISC) &&
+ (asc_isa_select(asc->cmd_was)) &&
+ (ASC_SS(asc->ss_was) == 0)) {
+ /* Powered off ? */
+ target_info_t *tgt = asc->active_target;
+
+ tgt->flags = 0;
+ tgt->sync_period = 0;
+ tgt->sync_offset = 0; mb();
+ asc->done = SCSI_RET_DEVICE_DOWN;
+ asc_end(asc, csr, ir);
+ return;
+ }
+
+ switch (SCSI_PHASE(csr)) {
+ case SCSI_PHASE_STATUS:
+ if (asc->script[-1].condition == SCSI_PHASE_STATUS) {
+ /* some are just slow to get out.. */
+ } else
+ asc_err_to_status(asc, csr, ir);
+ return;
+ break;
+ case SCSI_PHASE_DATAI:
+ if (asc->script->condition == SCSI_PHASE_STATUS) {
+ /*
+ * Here is what I know about it. We reconnect to
+ * the target (csr 87, ir 0C, cmd 44), start dma in
+ * (81 10 12) and then get here with (81 10 90).
+ * Dma is rolling and keeps on rolling happily,
+ * the value in the counter indicates the interrupt
+ * was signalled right away: by the time we get
+ * here only 80-90 bytes have been shipped to an
+ * rz56 running synchronous at 3.57 Mb/sec.
+ */
+/* printf("{P}");*/
+ return;
+ }
+ break;
+ case SCSI_PHASE_DATAO:
+ if (asc->script->condition == SCSI_PHASE_STATUS) {
+ /*
+ * See comment above. Actually seen on hitachis.
+ */
+/* printf("{P}");*/
+ return;
+ }
+ break;
+ case SCSI_PHASE_CMD:
+ /* This can be the case with drives that are not
+ even scsi-1 compliant and do not like to be
+ selected with ATN (to do the synch negot) and
+ go stright to command phase, regardless */
+
+ if (asc->script == asc_script_try_synch) {
+
+ target_info_t *tgt = asc->active_target;
+ register asc_padded_regmap_t *regs = asc->regs;
+
+ tgt->flags |= TGT_DID_SYNCH; /* only one chance */
+ tgt->flags &= ~TGT_TRY_SYNCH;
+
+ if (tgt->cur_cmd == SCSI_CMD_INQUIRY)
+ tgt->transient_state.script = asc_script_data_in;
+ else
+ tgt->transient_state.script = asc_script_simple_cmd;
+ asc->script = tgt->transient_state.script;
+ regs->asc_cmd = ASC_CMD_CLR_ATN;
+ readback(regs->asc_cmd);
+ asc->regs->asc_cmd = ASC_CMD_XFER_PAD|ASC_CMD_DMA; /*0x98*/ mb();
+ printf(" did not like SYNCH xfer ");
+ return;
+ }
+ /* fall through */
+ }
+ gimmeabreak();
+}
+
+/*
+ * Handle generic errors that are reported as
+ * an unexpected change to STATUS phase
+ */
+asc_err_to_status(asc, csr, ir)
+ register asc_softc_t asc;
+{
+ script_t scp = asc->script;
+
+ LOG(0x14,"err_tostatus");
+ while (scp->condition != SCSI_PHASE_STATUS)
+ scp++;
+ (*scp->action)(asc, csr, ir);
+ asc->script = scp + 1;
+ asc->regs->asc_cmd = scp->command; mb();
+#if 0
+ /*
+ * Normally, we would already be able to say the command
+ * is in error, e.g. the tape had a filemark or something.
+ * But in case we do disconnected mode WRITEs, it is quite
+ * common that the following happens:
+ * dma_out -> disconnect (xfer complete) -> reconnect
+ * and our script might expect at this point that the dma
+ * had to be restarted (it didn't notice it was completed).
+ * And in any event.. it is both correct and cleaner to
+ * declare error iff the STATUS byte says so.
+ */
+ asc->done = SCSI_RET_NEED_SENSE;
+#endif
+}
+
+/*
+ * Handle disconnections as exceptions
+ */
+asc_err_disconn(asc, csr, ir)
+ register asc_softc_t asc;
+ register unsigned char csr, ir;
+{
+ register asc_padded_regmap_t *regs;
+ register target_info_t *tgt;
+ int count;
+ boolean_t callback = FALSE;
+
+ LOG(0x16,"err_disconn");
+
+ /*
+ * We only do msg-in cases here
+ */
+ if (SCSI_PHASE(csr) != SCSI_PHASE_MSG_IN)
+ return asc_err_generic(asc, csr, ir);
+
+ regs = asc->regs;
+ tgt = asc->active_target;
+
+ /*
+ * What did we expect to happen, and what did happen.
+ */
+ switch (asc->script->condition) {
+ case SCSI_PHASE_DATAO:
+ /*
+ * A data out phase was either about to be started,
+ * or it was in progress but more had to go out later
+ * [e.g. a big xfer for instance, or more than the
+ * DMA engine can take in one shot].
+ */
+ LOG(0x1b,"+DATAO");
+ if (asc->out_count) {
+ register int xferred, offset;
+
+ /*
+ * Xfer in progress. See where we stopped.
+ */
+ ASC_TC_GET(regs,xferred); /* temporary misnomer */
+ /*
+ * Account for prefetching, in its various forms
+ */
+ xferred += fifo_count(regs);
+ xferred -= asc->extra_count;
+ /*
+ * See how much went out, how much to go.
+ */
+ xferred = asc->out_count - xferred; /* ok now */
+ tgt->transient_state.out_count -= xferred;
+ assert(tgt->transient_state.out_count > 0);
+
+ callback = (*asc->dma_ops->disconn_1)
+ (asc->dma_state, tgt, xferred);
+
+ } else {
+
+ /*
+ * A disconnection before DMA was (re)started.
+ */
+ callback = (*asc->dma_ops->disconn_2)
+ (asc->dma_state, tgt);
+
+ }
+ asc->extra_count = 0;
+ tgt->transient_state.script = asc_script_restart_data_out;
+ break;
+
+
+ case SCSI_PHASE_DATAI:
+ /*
+ * Same as above, the other way around
+ */
+ LOG(0x17,"+DATAI");
+ if (asc->in_count) {
+ register int offset, xferred;
+
+ /*
+ * How much did we expect, how much did we get
+ */
+ ASC_TC_GET(regs,count); mb();
+ xferred = asc->in_count - count;
+ assert(xferred > 0);
+
+if (regs->asc_flags & 0xf)
+printf("{Xf %x,%x,%x}", xferred, asc->in_count, fifo_count(regs));
+ tgt->transient_state.in_count -= xferred;
+ assert(tgt->transient_state.in_count > 0);
+
+ callback = (*asc->dma_ops->disconn_3)
+ (asc->dma_state, tgt, xferred);
+
+ /*
+ * Handle obb if we have to. DMA code has
+ * updated pointers and flushed buffers.
+ */
+ if (asc->state & ASC_STATE_DO_RFB)
+ pick_up_oddb(tgt);
+
+ tgt->transient_state.script = asc_script_restart_data_in;
+ /*
+ * Some silly targets disconnect after they
+ * have given us all the data.
+ */
+ if (tgt->transient_state.in_count == 0)
+ tgt->transient_state.script++;
+
+ } else
+ tgt->transient_state.script = asc_script_restart_data_in;
+ break;
+
+ case SCSI_PHASE_STATUS:
+ /*
+ * Either one of the above cases here. Only diff
+ * the DMA engine was setup to run to completion
+ * and (most likely) did not.
+ */
+ ASC_TC_GET(regs,count); mb();
+ if (asc->state & ASC_STATE_DMA_IN) {
+ register int offset, xferred;
+
+ LOG(0x1a,"+STATUS+R");
+
+
+ /*
+ * Handle brain-dead sequence:
+ * 1-xfer all data, disconnect
+ * 2-reconnect, disconnect immediately ??
+ * 3-rept 2
+ * 4-reconnect,complete
+ */
+ if (asc->in_count) {
+
+ xferred = asc->in_count - count;
+ assert(xferred > 0);
+if (regs->asc_flags & 0xf)
+printf("{Xf %x,%x,%x}", xferred, asc->in_count, fifo_count(regs));
+
+ tgt->transient_state.in_count -= xferred;
+
+ callback = (*asc->dma_ops->disconn_4)
+ (asc->dma_state, tgt, xferred);
+ }
+ /*
+ * Handle obb if we have to. DMA code has
+ * updated pointers and flushed buffers.
+ */
+ if (asc->state & ASC_STATE_DO_RFB)
+ pick_up_oddb(tgt);
+
+ tgt->transient_state.script = asc_script_restart_data_in;
+
+ /* see previous note */
+ if (tgt->transient_state.in_count == 0)
+ tgt->transient_state.script++;
+
+ } else {
+
+ /*
+ * Outgoing xfer, take care of prefetching.
+ */
+ /* add what's left in the fifo */
+ count += fifo_count(regs);
+ /* take back the extra we might have added */
+ count -= asc->extra_count;
+ /* ..and drop that idea */
+ asc->extra_count = 0;
+
+ LOG(0x19,"+STATUS+W");
+
+ /*
+ * All done ? This is less silly than with
+ * READs: some disks will only say "done" when
+ * the data is down on the platter, and some
+ * people like it much better that way.
+ */
+ if ((count == 0) && (tgt->transient_state.out_count == asc->out_count)) {
+ /* all done */
+ tgt->transient_state.script = asc->script;
+ tgt->transient_state.out_count = 0;
+ } else {
+ register int xferred, offset;
+
+ /* how much we xferred */
+ xferred = asc->out_count - count;
+
+ /* how much to go */
+ tgt->transient_state.out_count -= xferred;
+ assert(tgt->transient_state.out_count > 0);
+
+ callback = (*asc->dma_ops->disconn_5)
+ (asc->dma_state,tgt,xferred);
+
+ tgt->transient_state.script = asc_script_restart_data_out;
+ }
+ asc->out_count = 0;
+ }
+ break;
+ default:
+ gimmeabreak();
+ return;
+ }
+ asc_msg_in(asc,csr,ir);
+ asc->script = asc_script_disconnect;
+ regs->asc_cmd = ASC_CMD_XFER_INFO|ASC_CMD_DMA;
+ wbflush();
+ /*
+ * Prevent a race, now. If the reselection comes quickly
+ * the chip will prefetch and reload the transfer counter
+ * register. Make sure it will stop, by reloading a zero.
+ */
+ regs->asc_tc_lsb = 0;
+ regs->asc_tc_msb = 0;
+ if (callback)
+ (*asc->dma_ops->disconn_callback)(asc->dma_state, tgt);
+}
+
+/*
+ * Watchdog
+ *
+ * So far I have only seen the chip get stuck in a
+ * select/reselect conflict: the reselection did
+ * win and the interrupt register showed it but..
+ * no interrupt was generated.
+ * But we know that some (name withdrawn) disks get
+ * stuck in the middle of dma phases...
+ */
+asc_reset_scsibus(asc)
+ register asc_softc_t asc;
+{
+ register target_info_t *tgt = asc->active_target;
+ register asc_padded_regmap_t *regs = asc->regs;
+ register int ir;
+
+ if (scsi_debug && tgt) {
+ int dmalen;
+ ASC_TC_GET(asc->regs,dmalen); mb();
+ printf("Target %d was active, cmd x%x in x%x out x%x Sin x%x Sou x%x dmalen x%x\n",
+ tgt->target_id, tgt->cur_cmd,
+ tgt->transient_state.in_count, tgt->transient_state.out_count,
+ asc->in_count, asc->out_count,
+ dmalen);
+ }
+ ir = get_reg(regs,asc_intr); mb();
+ if ((ir & ASC_INT_RESEL) && (SCSI_PHASE(regs->asc_csr) == SCSI_PHASE_MSG_IN)) {
+ /* getting it out of the woods is a bit tricky */
+ spl_t s = splbio();
+
+ (void) asc_reconnect(asc, get_reg(regs,asc_csr), ir);
+ asc_wait(regs, ASC_CSR_INT, 1);
+ ir = get_reg(regs,asc_intr); mb();
+ regs->asc_cmd = ASC_CMD_MSG_ACPT;
+ readback(regs->asc_cmd);
+ splx(s);
+ } else {
+ regs->asc_cmd = ASC_CMD_BUS_RESET; mb();
+ delay(35);
+ }
+}
+
+#endif NASC > 0
+
diff --git a/scsi/adapters/scsi_7061.h b/scsi/adapters/scsi_7061.h
new file mode 100644
index 00000000..8969f8b1
--- /dev/null
+++ b/scsi/adapters/scsi_7061.h
@@ -0,0 +1,230 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_7061.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Defines for the DEC DC7061 SII gate array (SCSI interface)
+ */
+
+/*
+ * Register map
+ */
+
+typedef struct {
+ volatile unsigned short sii_sdb; /* rw: Data bus and parity */
+ volatile unsigned short sii_sc1; /* rw: scsi signals 1 */
+ volatile unsigned short sii_sc2; /* rw: scsi signals 2 */
+ volatile unsigned short sii_csr; /* rw: control and status */
+ volatile unsigned short sii_id; /* rw: scsi bus ID */
+ volatile unsigned short sii_sel_csr; /* rw: selection status */
+ volatile unsigned short sii_destat; /* ro: selection detector status */
+ volatile unsigned short sii_dstmo; /* unsupp: dssi timeout */
+ volatile unsigned short sii_data; /* rw: data register */
+ volatile unsigned short sii_dma_ctrl; /* rw: dma control reg */
+ volatile unsigned short sii_dma_len; /* rw: length of transfer */
+ volatile unsigned short sii_dma_adr_low;/* rw: low address */
+ volatile unsigned short sii_dma_adr_hi; /* rw: high address */
+ volatile unsigned short sii_dma_1st_byte;/* rw: initial byte */
+ volatile unsigned short sii_stlp; /* unsupp: dssi short trgt list ptr */
+ volatile unsigned short sii_ltlp; /* unsupp: dssi long " " " */
+ volatile unsigned short sii_ilp; /* unsupp: dssi initiator list ptr */
+ volatile unsigned short sii_dssi_csr; /* unsupp: dssi control */
+ volatile unsigned short sii_conn_csr; /* rc: connection interrupt control */
+ volatile unsigned short sii_data_csr; /* rc: data interrupt control */
+ volatile unsigned short sii_cmd; /* rw: command register */
+ volatile unsigned short sii_diag_csr; /* rw: disgnostic status */
+} sii_regmap_t;
+
+/*
+ * Data bus register (diag)
+ */
+
+#define SII_SDB_DATA 0x00ff /* data bits, assert high */
+#define SII_SDB_PARITY 0x0100 /* parity bit */
+
+/*
+ * Control signals one (diag)
+ */
+
+#define SII_CS1_IO 0x0001 /* I/O bit */
+#define SII_CS1_CD 0x0002 /* Control/Data bit */
+#define SII_CS1_MSG 0x0004 /* Message bit */
+#define SII_CS1_ATN 0x0008 /* Attention bit */
+#define SII_CS1_REQ 0x0010 /* Request bit */
+#define SII_CS1_ACK 0x0020 /* Acknowledge bit */
+#define SII_CS1_RST 0x0040 /* Reset bit */
+#define SII_CS1_SEL 0x0080 /* Selection bit */
+#define SII_CS1_BSY 0x0100 /* Busy bit */
+
+/*
+ * Control signals two (diag)
+ */
+
+#define SII_CS2_SBE 0x0001 /* Bus enable */
+#define SII_CS2_ARB 0x0002 /* arbitration enable */
+#define SII_CS2_TGS 0x0004 /* Target role steer */
+#define SII_CS2_IGS 0x0008 /* Initiator role steer */
+
+/*
+ * Control and status register
+ */
+
+#define SII_CSR_IE 0x0001 /* Interrupt enable */
+#define SII_CSR_PCE 0x0002 /* Parity check enable */
+#define SII_CSR_SLE 0x0004 /* Select enable */
+#define SII_CSR_RSE 0x0008 /* Reselect enable */
+#define SII_CSR_HPM 0x0010 /* Arbitration enable */
+
+/*
+ * SCSI bus ID register
+ */
+
+#define SII_ID_MASK 0x0007 /* The scsi ID */
+#define SII_ID_IO 0x8000 /* ID pins are in/out */
+
+/*
+ * Selector control and status register
+ */
+
+#define SII_SEL_ID 0x0003 /* Destination ID */
+
+/*
+ * Selection detector status register
+ */
+
+#define SII_DET_ID 0x0003 /* Selector's ID */
+
+/*
+ * Data register (silo)
+ */
+
+#define SII_DATA_VAL 0x00ff /* Lower byte */
+
+/*
+ * DMA control register
+ */
+
+#define SII_DMA_SYN_OFFSET 0x0003 /* 0 -> asynch */
+
+/*
+ * DMA counter
+ */
+
+#define SII_DMA_COUNT_MASK 0x1fff /* in bytes */
+
+/*
+ * DMA address registers
+ */
+
+#define SII_DMA_LOW_MASK 0xffff /* all bits */
+#define SII_DMA_HIGH_MASK 0x0003 /* unused ones mbz */
+
+/*
+ * DMA initial byte
+ */
+
+#define SII_DMA_IBYTE 0x00ff /* for odd address DMAs */
+
+/*
+ * Connection status register
+ */
+
+#define SII_CON_LST 0x0002 /* ro: lost arbitration */
+#define SII_CON_SIP 0x0004 /* ro: selection InProgress */
+#define SII_CON_SWA 0x0008 /* rc: selected with ATN */
+#define SII_CON_TGT 0x0010 /* ro: target role */
+#define SII_CON_DST 0x0020 /* ro: sii is destination */
+#define SII_CON_CON 0x0040 /* ro: sii is connected */
+#define SII_CON_SCH 0x0080 /* rci: state change */
+#define SII_CON_LDN 0x0100 /* ??i: dssi list elm done */
+#define SII_CON_BUF 0x0200 /* ??i: dssi buffer service */
+#define SII_CON_TZ 0x0400 /* ??: dssi target zero */
+#define SII_CON_OBC 0x0800 /* ??i: dssi outen bit clr */
+#define SII_CON_BERR 0x1000 /* rci: bus error */
+#define SII_CON_RST 0x2000 /* rci: RST asserted */
+#define SII_CON_DI 0x4000 /* ro: data_csr intr */
+#define SII_CON_CI 0x8000 /* ro: con_csr intr */
+
+/*
+ * Data transfer status register
+ */
+
+#define SII_DTR_IO 0x0001 /* ro: I/O asserted */
+#define SII_DTR_CD 0x0002 /* ro: CD asserted */
+#define SII_DTR_MSG 0x0004 /* ro: MSG asserted */
+#define SII_DTR_ATN 0x0008 /* rc: ATN found asserted */
+#define SII_DTR_MIS 0x0010 /* roi: phase mismatch */
+#define SII_DTR_OBB 0x0100 /* ro: odd byte boundry */
+#define SII_DTR_IPE 0x0200 /* ro: incoming parity err */
+#define SII_DTR_IBF 0x0400 /* roi: input buffer full */
+#define SII_DTR_TBE 0x0800 /* roi: xmt buffer empty */
+#define SII_DTR_TCZ 0x1000 /* ro: xfer counter zero */
+#define SII_DTR_DONE 0x2000 /* rci: xfer complete */
+#define SII_DTR_DI 0x4000 /* ro: data_csr intr */
+#define SII_DTR_CI 0x8000 /* ro: con_csr intr */
+
+#define SII_PHASE(dtr) SCSI_PHASE(dtr)
+
+
+/*
+ * Command register
+ *
+ * Certain bits are only valid in certain roles:
+ * I - Initiator D - Destination T - Target
+ * Bits 0-3 give the 'expected phase'
+ * Bits 4-6 give the 'expected state'
+ * Bits 7-11 are the 'command' proper
+ */
+
+#define SII_CMD_IO 0x0001 /* rw: (T) assert I/O */
+#define SII_CMD_CD 0x0002 /* rw: (T) assert CD */
+#define SII_CMD_MSG 0x0004 /* rw: (T) assert MSG */
+#define SII_CMD_ATN 0x0008 /* rw: (I) assert ATN */
+
+#define SII_CMD_TGT 0x0010 /* rw: (DIT) target */
+#define SII_CMD_DST 0x0020 /* rw: (DIT) destination */
+#define SII_CMD_CON 0x0040 /* rw: (DIT) connected */
+
+#define SII_CMD_RESET 0x0080 /* rw: (DIT) reset */
+#define SII_CMD_DIS 0x0100 /* rw: (DIT) disconnect */
+#define SII_CMD_REQ 0x0200 /* rw: (T) request data */
+#define SII_CMD_SEL 0x0400 /* rw: (D) select */
+#define SII_CMD_XFER 0x0800 /* rw: (IT) xfer information */
+
+#define SII_CMD_RSL 0x1000 /* rw: reselect target */
+#define SII_CMD_RST 0x4000 /* zw: assert RST */
+#define SII_CMD_DMA 0x8000 /* rw: command uses DMA */
+
+/*
+ * Diagnostic control register
+ */
+
+#define SII_DIAG_TEST 0x0001 /* rw: test mode */
+#define SII_DIAG_DIA 0x0002 /* rw: ext loopback mode */
+#define SII_DIAG_PORT_ENB 0x0004 /* rw: enable drivers */
+#define SII_DIAG_LPB 0x0008 /* rw: loopback reg writes */
diff --git a/scsi/adapters/scsi_7061_hdw.c b/scsi/adapters/scsi_7061_hdw.c
new file mode 100644
index 00000000..674e8921
--- /dev/null
+++ b/scsi/adapters/scsi_7061_hdw.c
@@ -0,0 +1,2603 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_7061_hdw.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Bottom layer of the SCSI driver: chip-dependent functions
+ *
+ * This file contains the code that is specific to the DEC DC7061
+ * SCSI chip (Host Bus Adapter in SCSI parlance): probing, start
+ * operation, and interrupt routine.
+ */
+
+/*
+ * This layer works based on small simple 'scripts' that are installed
+ * at the start of the command and drive the chip to completion.
+ * The idea comes from the specs of the NCR 53C700 'script' processor.
+ *
+ * There are various reasons for this, mainly
+ * - Performance: identify the common (successful) path, and follow it;
+ * at interrupt time no code is needed to find the current status
+ * - Code size: it should be easy to compact common operations
+ * - Adaptability: the code skeleton should adapt to different chips without
+ * terrible complications.
+ * - Error handling: and it is easy to modify the actions performed
+ * by the scripts to cope with strange but well identified sequences
+ *
+ */
+
+#include <sii.h>
+#if NSII > 0
+
+#include <platforms.h>
+
+#ifdef DECSTATION
+#define PAD(n) short n
+#endif
+
+#include <machine/machspl.h> /* spl definitions */
+#include <mach/std_types.h>
+#include <chips/busses.h>
+#include <scsi/compat_30.h>
+#include <machine/machspl.h>
+
+#include <sys/syslog.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi2.h>
+#include <scsi/scsi_defs.h>
+
+#define isa_oddbb hba_dep[0]
+#define oddbb hba_dep[1]
+
+#include <scsi/adapters/scsi_7061.h>
+
+#ifdef PAD
+
+typedef struct {
+ volatile unsigned short sii_sdb; /* rw: Data bus and parity */
+ PAD(pad0);
+ volatile unsigned short sii_sc1; /* rw: scsi signals 1 */
+ PAD(pad1);
+ volatile unsigned short sii_sc2; /* rw: scsi signals 2 */
+ PAD(pad2);
+ volatile unsigned short sii_csr; /* rw: control and status */
+ PAD(pad3);
+ volatile unsigned short sii_id; /* rw: scsi bus ID */
+ PAD(pad4);
+ volatile unsigned short sii_sel_csr; /* rw: selection status */
+ PAD(pad5);
+ volatile unsigned short sii_destat; /* ro: selection detector status */
+ PAD(pad6);
+ volatile unsigned short sii_dstmo; /* unsupp: dssi timeout */
+ PAD(pad7);
+ volatile unsigned short sii_data; /* rw: data register */
+ PAD(pad8);
+ volatile unsigned short sii_dma_ctrl; /* rw: dma control reg */
+ PAD(pad9);
+ volatile unsigned short sii_dma_len; /* rw: length of transfer */
+ PAD(pad10);
+ volatile unsigned short sii_dma_adr_low;/* rw: low address */
+ PAD(pad11);
+ volatile unsigned short sii_dma_adr_hi; /* rw: high address */
+ PAD(pad12);
+ volatile unsigned short sii_dma_1st_byte;/* rw: initial byte */
+ PAD(pad13);
+ volatile unsigned short sii_stlp; /* unsupp: dssi short trgt list ptr */
+ PAD(pad14);
+ volatile unsigned short sii_ltlp; /* unsupp: dssi long " " " */
+ PAD(pad15);
+ volatile unsigned short sii_ilp; /* unsupp: dssi initiator list ptr */
+ PAD(pad16);
+ volatile unsigned short sii_dssi_csr; /* unsupp: dssi control */
+ PAD(pad17);
+ volatile unsigned short sii_conn_csr; /* rc: connection interrupt control */
+ PAD(pad18);
+ volatile unsigned short sii_data_csr; /* rc: data interrupt control */
+ PAD(pad19);
+ volatile unsigned short sii_cmd; /* rw: command register */
+ PAD(pad20);
+ volatile unsigned short sii_diag_csr; /* rw: disgnostic status */
+ PAD(pad21);
+} sii_padded_regmap_t;
+
+#else /*!PAD*/
+
+typedef sii_regmap_t sii_padded_regmap_t;
+
+#endif /*!PAD*/
+
+
+#undef SII_CSR_SLE
+#define SII_CSR_SLE 0 /* for now */
+
+#ifdef DECSTATION
+#include <mips/PMAX/kn01.h>
+#define SII_OFFSET_RAM (KN01_SYS_SII_B_START-KN01_SYS_SII)
+#define SII_RAM_SIZE (KN01_SYS_SII_B_END-KN01_SYS_SII_B_START)
+/* 16 bits in 32 bit envelopes */
+#define SII_DMADR_LO(ptr) ((((unsigned)ptr)>>1)&SII_DMA_LOW_MASK)
+#define SII_DMADR_HI(ptr) ((((unsigned)ptr)>>(16+1))&SII_DMA_HIGH_MASK)
+#endif /* DECSTATION */
+
+#ifndef SII_OFFSET_RAM /* cross compile check */
+#define SII_OFFSET_RAM 0
+#define SII_RAM_SIZE 0x10000
+#define SII_DMADR_LO(ptr) (((unsigned)ptr)>>16)
+#define SII_DMADR_HI(ptr) (((unsigned)ptr)&0xffff)
+#endif
+
+/*
+ * Statically partition the DMA buffer between targets.
+ * This way we will eventually be able to attach/detach
+ * drives on-fly. And 18k/target is enough.
+ */
+#define PER_TGT_DMA_SIZE ((SII_RAM_SIZE/7) & ~(sizeof(int)-1))
+
+/*
+ * Round to 4k to make debug easier
+ */
+#define PER_TGT_BUFF_SIZE ((PER_TGT_DMA_SIZE >> 12) << 12)
+
+/*
+ * Macros to make certain things a little more readable
+ */
+#define SII_COMMAND(regs,cs,ds,cmd) \
+ { \
+ (regs)->sii_cmd = ((cs) & 0x70) | \
+ ((ds) & 0x07) | (cmd); \
+ wbflush(); \
+ }
+#define SII_ACK(regs,cs,ds,cmd) \
+ { \
+ SII_COMMAND(regs,cs,ds,cmd); \
+ (regs)->sii_conn_csr = (cs); \
+ (regs)->sii_data_csr = (ds); \
+ }
+
+/*
+ * Deal with bogus pmax dma buffer
+ */
+
+static char decent_buffer[NSII*8][256];
+
+/*
+ * A script has a three parts: a pre-condition, an action, and
+ * an optional command to the chip. The first triggers error
+ * handling if not satisfied and in our case it is formed by the
+ * values of the sii_conn_csr and sii_data_csr register
+ * bits. The action part is just a function pointer, and the
+ * command is what the 7061 should be told to do at the end
+ * of the action processing. This command is only issued and the
+ * script proceeds if the action routine returns TRUE.
+ * See sii_intr() for how and where this is all done.
+ */
+
+typedef struct script {
+ int condition; /* expected state at interrupt */
+ int (*action)(); /* extra operations */
+ int command; /* command to the chip */
+} *script_t;
+
+#define SCRIPT_MATCH(cs,ds) ((cs)&0x70|SCSI_PHASE((ds)))
+
+#define SII_PHASE_DISC 0x4 /* sort of .. */
+
+/* When no command is needed */
+#define SCRIPT_END -1
+
+/* forward decls of script actions */
+boolean_t
+ sii_script_true(), /* when nothing needed */
+ sii_identify(), /* send identify msg */
+ sii_dosynch(), /* negotiate synch xfer */
+ sii_dma_in(), /* get data from target via dma */
+ sii_dma_out(), /* send data to target via dma */
+ sii_get_status(), /* get status from target */
+ sii_end_transaction(), /* all come to an end */
+ sii_msg_in(), /* get disconnect message(s) */
+ sii_disconnected(); /* current target disconnected */
+/* forward decls of error handlers */
+boolean_t
+ sii_err_generic(), /* generic error handler */
+ sii_err_disconn(), /* when a target disconnects */
+ sii_err_rdp(), /* in reconn, handle rdp mgs */
+ gimmeabreak(); /* drop into the debugger */
+
+int sii_reset_scsibus();
+boolean_t sii_probe_target();
+static sii_wait();
+
+/*
+ * State descriptor for this layer. There is one such structure
+ * per (enabled) SCSI-7061 interface
+ */
+struct sii_softc {
+ watchdog_t wd;
+ sii_padded_regmap_t *regs;
+ volatile char *buff;
+ script_t script;
+ int (*error_handler)();
+ int in_count; /* amnt we expect to receive */
+ int out_count; /* amnt we are going to ship */
+
+ volatile char state;
+#define SII_STATE_BUSY 0x01 /* selecting or currently connected */
+#define SII_STATE_TARGET 0x04 /* currently selected as target */
+#define SII_STATE_COLLISION 0x08 /* lost selection attempt */
+#define SII_STATE_DMA_IN 0x10 /* tgt --> initiator xfer */
+
+ unsigned char ntargets; /* how many alive on this scsibus */
+ unsigned char done;
+ unsigned char cmd_count;
+
+ scsi_softc_t *sc;
+ target_info_t *active_target;
+
+ target_info_t *next_target; /* trying to seize bus */
+ queue_head_t waiting_targets;/* other targets competing for bus */
+
+} sii_softc_data[NSII];
+
+typedef struct sii_softc *sii_softc_t;
+
+sii_softc_t sii_softc[NSII];
+
+/*
+ * Synch xfer parameters, and timing conversions
+ */
+int sii_min_period = 63; /* in 4 ns units */
+int sii_max_offset = 3; /* pure number */
+
+#define sii_to_scsi_period(a) (a)
+#define scsi_period_to_sii(p) (((p) < sii_min_period) ? sii_min_period : (p))
+
+/*
+ * Definition of the controller for the auto-configuration program.
+ */
+
+int sii_probe(), scsi_slave(), sii_go(), sii_intr();
+extern void scsi_attach();
+
+vm_offset_t sii_std[NSII] = { 0 };
+struct bus_device *sii_dinfo[NSII*8];
+struct bus_ctlr *sii_minfo[NSII];
+struct bus_driver sii_driver =
+ { sii_probe, scsi_slave, scsi_attach, sii_go, sii_std, "rz", sii_dinfo,
+ "sii", sii_minfo, /*BUS_INTR_B4_PROBE?*/};
+
+/*
+ * Scripts
+ */
+struct script
+sii_script_data_in[] = {
+ { SCSI_PHASE_CMD|SII_CON_CON, sii_script_true,
+ (SII_CMD_XFER|SII_CMD_DMA)|SII_CON_CON|SCSI_PHASE_CMD},
+ { SCSI_PHASE_DATAI|SII_CON_CON, sii_dma_in,
+ (SII_CMD_XFER|SII_CMD_DMA)|SII_CON_CON|SCSI_PHASE_DATAI},
+ { SCSI_PHASE_STATUS|SII_CON_CON, sii_get_status,
+ SII_CMD_XFER|SII_CON_CON|SCSI_PHASE_STATUS},
+ { SCSI_PHASE_MSG_IN|SII_CON_CON, sii_end_transaction, SCRIPT_END}
+},
+
+sii_script_data_out[] = {
+ { SCSI_PHASE_CMD|SII_CON_CON, sii_script_true,
+ (SII_CMD_XFER|SII_CMD_DMA)|SII_CON_CON|SCSI_PHASE_CMD},
+ { SCSI_PHASE_DATAO|SII_CON_CON, sii_dma_out,
+ (SII_CMD_XFER|SII_CMD_DMA)|SII_CON_CON|SCSI_PHASE_DATAO},
+ { SCSI_PHASE_STATUS|SII_CON_CON, sii_get_status,
+ SII_CMD_XFER|SII_CON_CON|SCSI_PHASE_STATUS},
+ { SCSI_PHASE_MSG_IN|SII_CON_CON, sii_end_transaction, SCRIPT_END}
+},
+
+sii_script_cmd[] = {
+ { SCSI_PHASE_CMD|SII_CON_CON, sii_script_true,
+ (SII_CMD_XFER|SII_CMD_DMA)|SII_CON_CON|SCSI_PHASE_CMD},
+ { SCSI_PHASE_STATUS|SII_CON_CON, sii_get_status,
+ SII_CMD_XFER|SII_CON_CON|SCSI_PHASE_STATUS},
+ { SCSI_PHASE_MSG_IN|SII_CON_CON, sii_end_transaction, SCRIPT_END}
+},
+
+/* Same, after a disconnect */
+
+sii_script_restart_data_in[] = {
+ { SCSI_PHASE_DATAI|SII_CON_CON|SII_CON_DST, sii_dma_in,
+ (SII_CMD_XFER|SII_CMD_DMA)|SII_CON_CON|SII_CON_DST|SCSI_PHASE_DATAI},
+ { SCSI_PHASE_STATUS|SII_CON_CON|SII_CON_DST, sii_get_status,
+ SII_CMD_XFER|SII_CON_CON|SII_CON_DST|SCSI_PHASE_STATUS},
+ { SCSI_PHASE_MSG_IN|SII_CON_CON|SII_CON_DST, sii_end_transaction, SCRIPT_END}
+},
+
+sii_script_restart_data_out[] = {
+ { SCSI_PHASE_DATAO|SII_CON_CON|SII_CON_DST, sii_dma_out,
+ (SII_CMD_XFER|SII_CMD_DMA)|SII_CON_CON|SII_CON_DST|SCSI_PHASE_DATAO},
+ { SCSI_PHASE_STATUS|SII_CON_CON|SII_CON_DST, sii_get_status,
+ SII_CMD_XFER|SII_CON_CON|SII_CON_DST|SCSI_PHASE_STATUS},
+ { SCSI_PHASE_MSG_IN|SII_CON_CON|SII_CON_DST, sii_end_transaction, SCRIPT_END}
+},
+
+sii_script_restart_cmd[] = {
+ { SCSI_PHASE_STATUS|SII_CON_CON|SII_CON_DST, sii_get_status,
+ SII_CMD_XFER|SII_CON_CON|SII_CON_DST|SCSI_PHASE_STATUS},
+ { SCSI_PHASE_MSG_IN|SII_CON_CON|SII_CON_DST, sii_end_transaction, SCRIPT_END}
+},
+
+/* Synchronous transfer negotiation */
+
+sii_script_try_synch[] = {
+ { SCSI_PHASE_MSG_OUT|SII_CON_CON, sii_dosynch, SCRIPT_END}
+},
+
+/* Disconnect sequence */
+
+sii_script_disconnect[] = {
+ { SII_PHASE_DISC, sii_disconnected, SCRIPT_END}
+};
+
+
+
+#define u_min(a,b) (((a) < (b)) ? (a) : (b))
+
+
+#define DEBUG
+#ifdef DEBUG
+
+sii_state(regs)
+ sii_padded_regmap_t *regs;
+{
+ unsigned dmadr;
+
+ if (regs == 0)
+ regs = (sii_padded_regmap_t*) 0xba000000;
+
+ dmadr = regs->sii_dma_adr_low | (regs->sii_dma_adr_hi << 16);
+ db_printf("sc %x, dma %x @ x%X, cs %x, ds %x, cmd %x\n",
+ (unsigned) regs->sii_sc1,
+ (unsigned) regs->sii_dma_len, dmadr,
+ (unsigned) regs->sii_conn_csr,
+ (unsigned) regs->sii_data_csr,
+ (unsigned) regs->sii_cmd);
+
+}
+sii_target_state(tgt)
+ target_info_t *tgt;
+{
+ if (tgt == 0)
+ tgt = sii_softc[0]->active_target;
+ if (tgt == 0)
+ return 0;
+ db_printf("@x%x: fl %X dma %X+%X cmd %x@%X id %X per %X off %X ior %X ret %X\n",
+ tgt,
+ tgt->flags, tgt->dma_ptr, tgt->transient_state.dma_offset, tgt->cur_cmd,
+ tgt->cmd_ptr, tgt->target_id, tgt->sync_period, tgt->sync_offset,
+ tgt->ior, tgt->done);
+ if (tgt->flags & TGT_DISCONNECTED){
+ script_t spt;
+
+ spt = tgt->transient_state.script;
+ db_printf("disconnected at ");
+ db_printsym(spt,1);
+ db_printf(": %X %X ", spt->condition, spt->command);
+ db_printsym(spt->action,1);
+ db_printf(", ");
+ db_printsym(tgt->transient_state.handler, 1);
+ db_printf("\n");
+ }
+
+ return 0;
+}
+
+sii_all_targets(unit)
+{
+ int i;
+ target_info_t *tgt;
+ for (i = 0; i < 8; i++) {
+ tgt = sii_softc[unit]->sc->target[i];
+ if (tgt)
+ sii_target_state(tgt);
+ }
+}
+
+sii_script_state(unit)
+{
+ script_t spt = sii_softc[unit]->script;
+
+ if (spt == 0) return 0;
+ db_printsym(spt,1);
+ db_printf(": %X %X ", spt->condition, spt->command);
+ db_printsym(spt->action,1);
+ db_printf(", ");
+ db_printsym(sii_softc[unit]->error_handler, 1);
+ return 0;
+
+}
+
+#define PRINT(x) if (scsi_debug) printf x
+
+#define TRMAX 200
+int tr[TRMAX+3];
+int trpt, trpthi;
+#define TR(x) tr[trpt++] = x
+#define TRWRAP trpthi = trpt; trpt = 0;
+#define TRCHECK if (trpt > TRMAX) {TRWRAP}
+
+#define TRACE
+
+#ifdef TRACE
+
+#define LOGSIZE 256
+int sii_logpt;
+char sii_log[LOGSIZE];
+
+#define MAXLOG_VALUE 0x25
+struct {
+ char *name;
+ unsigned int count;
+} logtbl[MAXLOG_VALUE];
+
+static LOG(e,f)
+ char *f;
+{
+ sii_log[sii_logpt++] = (e);
+ if (sii_logpt == LOGSIZE) sii_logpt = 0;
+ if ((e) < MAXLOG_VALUE) {
+ logtbl[(e)].name = (f);
+ logtbl[(e)].count++;
+ }
+}
+
+sii_print_log(skip)
+ int skip;
+{
+ register int i, j;
+ register unsigned char c;
+
+ for (i = 0, j = sii_logpt; i < LOGSIZE; i++) {
+ c = sii_log[j];
+ if (++j == LOGSIZE) j = 0;
+ if (skip-- > 0)
+ continue;
+ if (c < MAXLOG_VALUE)
+ db_printf(" %s", logtbl[c].name);
+ else
+ db_printf("-x%x", c & 0x7f);
+ }
+ db_printf("\n");
+ return 0;
+}
+
+sii_print_stat()
+{
+ register int i;
+ register char *p;
+ for (i = 0; i < MAXLOG_VALUE; i++) {
+ if (p = logtbl[i].name)
+ printf("%d %s\n", logtbl[i].count, p);
+ }
+}
+
+#else /* TRACE */
+#define LOG(e,f)
+#endif /* TRACE */
+
+struct cnt {
+ unsigned int zeroes;
+ unsigned int usage;
+ unsigned int avg;
+ unsigned int min;
+ unsigned int max;
+};
+
+static bump(counter, value)
+ register struct cnt *counter;
+ register unsigned int value;
+{
+ register unsigned int n;
+
+ if (value == 0) {
+ counter->zeroes++;
+ return;
+ }
+ n = counter->usage + 1;
+ counter->usage = n;
+ if (n == 0) {
+ printf("{Counter at x%x overflowed with avg x%x}",
+ counter, counter->avg);
+ return;
+ } else
+ if (n == 1)
+ counter->min = 0xffffffff;
+
+ counter->avg = ((counter->avg * (n - 1)) + value) / n;
+ if (counter->min > value)
+ counter->min = value;
+ if (counter->max < value)
+ counter->max = value;
+}
+
+struct cnt
+ s_cnt;
+
+#else /* DEBUG */
+#define PRINT(x)
+#define LOG(e,f)
+#define TR(x)
+#define TRCHECK
+#define TRWRAP
+#endif /* DEBUG */
+
+
+/*
+ * Probe/Slave/Attach functions
+ */
+
+/*
+ * Probe routine:
+ * Should find out (a) if the controller is
+ * present and (b) which/where slaves are present.
+ *
+ * Implementation:
+ * Send an identify msg to each possible target on the bus
+ * except of course ourselves.
+ */
+sii_probe(reg, ui)
+ unsigned reg;
+ struct bus_ctlr *ui;
+{
+ int unit = ui->unit;
+ sii_softc_t sii = &sii_softc_data[unit];
+ int target_id, i;
+ scsi_softc_t *sc;
+ register sii_padded_regmap_t *regs;
+ spl_t s;
+ boolean_t did_banner = FALSE;
+ char *dma_ptr;
+ static char *here = "sii_probe";
+
+ /*
+ * We are only called if the chip is there,
+ * but make sure anyways..
+ */
+ if (check_memory(reg, 0))
+ return 0;
+
+#ifdef MACH_KERNEL
+ /* Mappable version side */
+ SII_probe(reg, ui);
+#endif /*MACH_KERNEL*/
+
+ /*
+ * Initialize hw descriptor
+ */
+ sii_softc[unit] = sii;
+ sii->regs = (sii_padded_regmap_t *) (reg);
+ sii->buff = (volatile char*) (reg + SII_OFFSET_RAM);
+
+ queue_init(&sii->waiting_targets);
+
+ sc = scsi_master_alloc(unit, sii);
+ sii->sc = sc;
+
+ sc->go = sii_go;
+ sc->watchdog = scsi_watchdog;
+ sc->probe = sii_probe_target;
+
+ sii->wd.reset = sii_reset_scsibus;
+
+#ifdef MACH_KERNEL
+ sc->max_dma_data = -1; /* unlimited */
+#else
+ sc->max_dma_data = scsi_per_target_virtual;
+#endif
+
+ regs = sii->regs;
+
+ /*
+ * Clear out dma buffer
+ */
+ blkclr(sii->buff, SII_RAM_SIZE);
+
+ /*
+ * Reset chip, fully.
+ */
+ s = splbio();
+ sii_reset(regs, TRUE);
+
+ /*
+ * Our SCSI id on the bus.
+ * The user can set this via the prom on pmaxen/3maxen.
+ * If this changes it is easy to fix: make a default that
+ * can be changed as boot arg.
+ */
+#ifdef unneeded
+ regs->sii_id = (scsi_initiator_id[unit] & SII_ID_MASK)|SII_ID_IO;
+#endif
+ sc->initiator_id = regs->sii_id & SII_ID_MASK;
+ printf("%s%d: my SCSI id is %d", ui->name, unit, sc->initiator_id);
+
+ /*
+ * For all possible targets, see if there is one and allocate
+ * a descriptor for it if it is there.
+ */
+ for (target_id = 0, dma_ptr = (char*)sii->buff;
+ target_id < 8;
+ target_id++, dma_ptr += (PER_TGT_DMA_SIZE*2)) {
+
+ register unsigned csr, dsr;
+ register scsi_status_byte_t status;
+
+ /* except of course ourselves */
+ if (target_id == sc->initiator_id)
+ continue;
+
+ regs->sii_sel_csr = target_id;
+ wbflush();
+
+ /* select */
+ regs->sii_cmd = SII_CMD_SEL;
+ wbflush();
+
+ /* wait for a selection timeout delay, and some more */
+ delay(251000);
+
+ dsr = regs->sii_data_csr;
+ csr = regs->sii_conn_csr;
+ if ((csr & SII_CON_CON) == 0) {
+
+ regs->sii_conn_csr = csr;/*wtc bits*/
+
+ /* abort sel in progress */
+ if (csr & SII_CON_SIP) {
+ regs->sii_cmd = SII_CMD_DIS;
+ wbflush();
+ csr = sii_wait(&regs->sii_conn_csr, SII_CON_SCH,1);
+ regs->sii_conn_csr = 0xffff;/*wtc bits */
+ regs->sii_data_csr = 0xffff;
+ regs->sii_cmd = 0;
+ wbflush();
+ }
+ continue;
+ }
+
+ printf(",%s%d", did_banner++ ? " " : " target(s) at ",
+ target_id);
+
+ /* should be command phase here */
+ if (SCSI_PHASE(dsr) != SCSI_PHASE_CMD)
+ panic(here);
+
+ /* acknowledge state change */
+ SII_ACK(regs,csr,dsr,0);
+
+ /* build command in (bogus) dma area */
+ {
+ unsigned int *p = (unsigned int*) dma_ptr;
+
+ p[0] = SCSI_CMD_TEST_UNIT_READY | (0 << 8);
+ p[1] = 0 | (0 << 8);
+ p[2] = 0 | (0 << 8);
+ }
+
+ /* set up dma xfer parameters */
+ regs->sii_dma_len = 6;
+ regs->sii_dma_adr_low = SII_DMADR_LO(dma_ptr);
+ regs->sii_dma_adr_hi = SII_DMADR_HI(dma_ptr);
+ wbflush();
+
+ /* issue dma command */
+ SII_COMMAND(regs,csr,dsr,SII_CMD_XFER|SII_CMD_DMA);
+
+ /* wait till done */
+ dsr = sii_wait(&regs->sii_data_csr, SII_DTR_DONE,1);
+ regs->sii_cmd &= ~(SII_CMD_XFER|SII_CMD_DMA);
+ regs->sii_data_csr = SII_DTR_DONE;/* clear */
+ regs->sii_dma_len = 0;
+
+ /* move on to status phase */
+ dsr = sii_wait(&regs->sii_data_csr, SCSI_PHASE_STATUS,1);
+ csr = regs->sii_conn_csr;
+ SII_ACK(regs,csr,dsr,0);
+
+ if (SCSI_PHASE(dsr) != SCSI_PHASE_STATUS)
+ panic(here);
+
+ /* get status byte */
+ dsr = sii_wait(&regs->sii_data_csr, SII_DTR_IBF,1);
+ csr = regs->sii_conn_csr;
+
+ status.bits = regs->sii_data;
+ if (status.st.scsi_status_code != SCSI_ST_GOOD)
+ scsi_error( 0, SCSI_ERR_STATUS, status.bits, 0);
+
+ /* get cmd_complete message */
+ SII_ACK(regs,csr,dsr,0);
+ SII_COMMAND(regs,csr,dsr,SII_CMD_XFER);
+ dsr = sii_wait(&regs->sii_data_csr, SII_DTR_DONE,1);
+
+ dsr = sii_wait(&regs->sii_data_csr, SCSI_PHASE_MSG_IN,1);
+ csr = regs->sii_conn_csr;
+
+
+ SII_ACK(regs,csr,dsr,0);
+ i = regs->sii_data;
+ SII_COMMAND(regs,csr,dsr,SII_CMD_XFER);
+
+ /* check disconnected, clear all intr bits */
+ csr = sii_wait(&regs->sii_conn_csr, SII_CON_SCH,1);
+ if (regs->sii_conn_csr & SII_CON_CON)
+ panic(here);
+
+ regs->sii_data_csr = 0xffff;
+ regs->sii_conn_csr = 0xffff;
+ regs->sii_cmd = 0;
+
+ /*
+ * Found a target
+ */
+ sii->ntargets++;
+ {
+ register target_info_t *tgt;
+ tgt = scsi_slave_alloc(sc->masterno, target_id, sii);
+
+ tgt->dma_ptr = dma_ptr;
+ tgt->cmd_ptr = decent_buffer[unit*8 + target_id];
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+ fdma_init(&tgt->fdma, scsi_per_target_virtual);
+#endif /*MACH_KERNEL*/
+ }
+ }
+ printf(".\n");
+
+ splx(s);
+ return 1;
+}
+
+boolean_t
+sii_probe_target(tgt, ior)
+ target_info_t *tgt;
+ io_req_t ior;
+{
+ sii_softc_t sii = sii_softc[tgt->masterno];
+ boolean_t newlywed;
+ int sii_probe_timeout();
+
+ newlywed = (tgt->cmd_ptr == 0);
+ if (newlywed) {
+ /* desc was allocated afresh */
+ char *dma_ptr = (char*)sii->buff;
+
+ dma_ptr += (PER_TGT_DMA_SIZE * tgt->target_id)*2;
+ tgt->dma_ptr = dma_ptr;
+ tgt->cmd_ptr = decent_buffer[tgt->masterno*8 + tgt->target_id];
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+ fdma_init(&tgt->fdma, scsi_per_target_virtual);
+#endif /*MACH_KERNEL*/
+
+ }
+
+ /* Unfortunately, the SII chip does not have timeout support
+ for selection */
+ timeout(sii_probe_timeout, tgt, hz);
+
+ if (scsi_inquiry(tgt, SCSI_INQ_STD_DATA) == SCSI_RET_DEVICE_DOWN)
+ return FALSE;
+
+ untimeout(sii_probe_timeout, tgt);
+ tgt->flags = TGT_ALIVE;
+ return TRUE;
+}
+
+sii_probe_timeout(tgt)
+ target_info_t *tgt;
+{
+ sii_softc_t sii = (sii_softc_t)tgt->hw_state;
+ register sii_padded_regmap_t *regs = sii->regs;
+ int cs, ds;
+ spl_t s;
+
+ /* cancelled ? */
+ if (tgt->done != SCSI_RET_IN_PROGRESS)
+ return;
+
+ s = splbio();
+
+ /* Someone else might be using the bus (rare) */
+ switch (regs->sii_conn_csr & (SII_CON_LST|SII_CON_SIP)) {
+ case SII_CON_SIP:
+ /* We really timed out */
+ break;
+ case SII_CON_SIP|SII_CON_LST:
+ /* Someone else is (still) using the bus */
+ sii->wd.watchdog_state = SCSI_WD_ACTIVE;
+ /* fall-through */
+ default:
+ /* Did not get a chance to the bus yet */
+ timeout(sii_probe_timeout, tgt, hz);
+ goto ret;
+ }
+ regs->sii_cmd = SII_CMD_DIS;
+ wbflush();
+ regs->sii_csr |= SII_CSR_RSE;
+ regs->sii_cmd = 0;
+ wbflush();
+
+ sii->done = SCSI_RET_DEVICE_DOWN;
+ cs = regs->sii_conn_csr;
+ ds = regs->sii_data_csr;
+ if (!sii_end(sii, cs, ds))
+ (void) sii_reconnect(sii, cs, ds);
+ret:
+ splx(s);
+}
+
+
+static sii_wait(preg, until, complain)
+ volatile unsigned short *preg;
+{
+ int timeo = 1000000;
+ while ((*preg & until) != until) {
+ delay(1);
+ if (!timeo--) {
+ if (complain) {
+ gimmeabreak();
+ printf("sii_wait TIMEO with x%x\n", *preg);
+ }
+ break;
+ }
+ }
+#ifdef DEBUG
+ bump(&s_cnt, 1000000-timeo);
+#endif
+ return *preg;
+}
+
+sii_reset(regs, quickly)
+ register sii_padded_regmap_t *regs;
+ boolean_t quickly;
+{
+ int my_id;
+
+ my_id = regs->sii_id & SII_ID_MASK;
+
+ regs->sii_cmd = SII_CMD_RESET;
+ wbflush();
+ delay(30);
+
+ /* clear them all random bitsy */
+ regs->sii_conn_csr = SII_CON_SWA|SII_CON_SCH|SII_CON_BERR|SII_CON_RST;
+ regs->sii_data_csr = SII_DTR_ATN|SII_DTR_DONE;
+
+ regs->sii_id = my_id | SII_ID_IO;
+
+ regs->sii_dma_ctrl = 0; /* asynch */
+
+ regs->sii_dma_len = 0;
+ regs->sii_dma_adr_low = 0;
+ regs->sii_dma_adr_hi = 0;
+
+ regs->sii_csr = SII_CSR_IE|SII_CSR_PCE|SII_CSR_SLE|SII_CSR_HPM;
+ /* later: SII_CSR_RSE */
+
+ regs->sii_diag_csr = SII_DIAG_PORT_ENB;
+ wbflush();
+
+ if (quickly)
+ return;
+
+ /*
+ * reset the scsi bus, the interrupt routine does the rest
+ * or you can call sii_bus_reset().
+ */
+ regs->sii_cmd = SII_CMD_RST;
+
+}
+
+/*
+ * Operational functions
+ */
+
+/*
+ * Start a SCSI command on a target
+ */
+sii_go(tgt, cmd_count, in_count, cmd_only)
+ target_info_t *tgt;
+ boolean_t cmd_only;
+{
+ sii_softc_t sii;
+ register spl_t s;
+ boolean_t disconn;
+ script_t scp;
+ boolean_t (*handler)();
+
+ LOG(1,"go");
+
+ sii = (sii_softc_t)tgt->hw_state;
+
+ /*
+ * We cannot do real DMA.
+ */
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+ if (tgt->ior)
+ fdma_map(&tgt->fdma, tgt->ior);
+#endif /*MACH_KERNEL*/
+
+ copyout_gap16(tgt->cmd_ptr, tgt->dma_ptr, cmd_count);
+
+ if ((tgt->cur_cmd == SCSI_CMD_WRITE) ||
+ (tgt->cur_cmd == SCSI_CMD_LONG_WRITE)){
+ io_req_t ior = tgt->ior;
+ register int len = ior->io_count;
+
+ tgt->transient_state.out_count = len;
+
+ if (len > PER_TGT_BUFF_SIZE)
+ len = PER_TGT_BUFF_SIZE;
+ copyout_gap16( ior->io_data,
+ tgt->dma_ptr + (cmd_count<<1),
+ len);
+ tgt->transient_state.copy_count = len;
+
+ /* avoid leaks */
+ if (len < tgt->block_size) {
+ bzero_gap16(tgt->dma_ptr + ((cmd_count + len)<<1),
+ len - tgt->block_size);
+ len = tgt->block_size;
+ tgt->transient_state.copy_count = len;
+ }
+
+ } else {
+ tgt->transient_state.out_count = 0;
+ tgt->transient_state.copy_count = 0;
+ }
+
+ tgt->transient_state.cmd_count = cmd_count;
+ tgt->transient_state.isa_oddbb = FALSE;
+
+ disconn = BGET(scsi_might_disconnect,tgt->masterno,tgt->target_id);
+ disconn = disconn && (sii->ntargets > 1);
+ disconn |= BGET(scsi_should_disconnect,tgt->masterno,tgt->target_id);
+
+ /*
+ * Setup target state
+ */
+ tgt->done = SCSI_RET_IN_PROGRESS;
+
+ handler = (disconn) ? sii_err_disconn : sii_err_generic;
+
+ switch (tgt->cur_cmd) {
+ case SCSI_CMD_READ:
+ case SCSI_CMD_LONG_READ:
+ LOG(0x13,"readop");
+ scp = sii_script_data_in;
+ break;
+ case SCSI_CMD_WRITE:
+ case SCSI_CMD_LONG_WRITE:
+ LOG(0x14,"writeop");
+ scp = sii_script_data_out;
+ break;
+ case SCSI_CMD_INQUIRY:
+ /* This is likely the first thing out:
+ do the synch neg if so */
+ if (!cmd_only && ((tgt->flags&TGT_DID_SYNCH)==0)) {
+ scp = sii_script_try_synch;
+ tgt->flags |= TGT_TRY_SYNCH;
+ break;
+ }
+ case SCSI_CMD_REQUEST_SENSE:
+ case SCSI_CMD_MODE_SENSE:
+ case SCSI_CMD_RECEIVE_DIAG_RESULTS:
+ case SCSI_CMD_READ_CAPACITY:
+ case SCSI_CMD_READ_BLOCK_LIMITS:
+ case SCSI_CMD_READ_TOC:
+ case SCSI_CMD_READ_SUBCH:
+ case SCSI_CMD_READ_HEADER:
+ case 0xc4: /* despised: SCSI_CMD_DEC_PLAYBACK_STATUS */
+ case 0xdd: /* despised: SCSI_CMD_NEC_READ_SUBCH_Q */
+ case 0xde: /* despised: SCSI_CMD_NEC_READ_TOC */
+ scp = sii_script_data_in;
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ break;
+ case SCSI_CMD_MODE_SELECT:
+ case SCSI_CMD_REASSIGN_BLOCKS:
+ case SCSI_CMD_FORMAT_UNIT:
+ case 0xc9: /* vendor-spec: SCSI_CMD_DEC_PLAYBACK_CONTROL */
+ tgt->transient_state.cmd_count = sizeof_scsi_command(tgt->cur_cmd);
+ tgt->transient_state.out_count =
+ cmd_count - tgt->transient_state.cmd_count;
+ scp = sii_script_data_out;
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ break;
+ case SCSI_CMD_TEST_UNIT_READY:
+ /*
+ * Do the synch negotiation here, unless done already
+ */
+ if (tgt->flags & TGT_DID_SYNCH) {
+ scp = sii_script_cmd;
+ } else {
+ scp = sii_script_try_synch;
+ tgt->flags |= TGT_TRY_SYNCH;
+ }
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ break;
+ default:
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ scp = sii_script_cmd;
+ }
+
+ tgt->transient_state.script = scp;
+ tgt->transient_state.handler = handler;
+ tgt->transient_state.identify = (cmd_only) ? 0xff :
+ (disconn ? SCSI_IDENTIFY|SCSI_IFY_ENABLE_DISCONNECT :
+ SCSI_IDENTIFY);
+
+ if (in_count)
+ tgt->transient_state.in_count =
+ (in_count < tgt->block_size) ? tgt->block_size : in_count;
+ else
+ tgt->transient_state.in_count = 0;
+
+ tgt->transient_state.dma_offset = 0;
+
+ /*
+ * See if another target is currently selected on
+ * this SCSI bus, e.g. lock the sii structure.
+ * Note that it is the strategy routine's job
+ * to serialize ops on the same target as appropriate.
+ * XXX here and everywhere, locks!
+ */
+ /*
+ * Protection viz reconnections makes it tricky.
+ */
+/* s = splbio();*/
+ s = splhigh();
+
+ if (sii->wd.nactive++ == 0)
+ sii->wd.watchdog_state = SCSI_WD_ACTIVE;
+
+ if (sii->state & SII_STATE_BUSY) {
+ /*
+ * Queue up this target, note that this takes care
+ * of proper FIFO scheduling of the scsi-bus.
+ */
+ LOG(3,"enqueue");
+ LOG(0x80+tgt->target_id,0);
+ enqueue_tail(&sii->waiting_targets, (queue_entry_t) tgt);
+ } else {
+ /*
+ * It is down to at most two contenders now,
+ * we will treat reconnections same as selections
+ * and let the scsi-bus arbitration process decide.
+ */
+ sii->state |= SII_STATE_BUSY;
+ sii->next_target = tgt;
+ sii_attempt_selection(sii);
+ /*
+ * Note that we might still lose arbitration..
+ */
+ }
+ splx(s);
+}
+
+sii_attempt_selection(sii)
+ sii_softc_t sii;
+{
+ target_info_t *tgt;
+ register int out_count;
+ sii_padded_regmap_t *regs;
+ register int cmd;
+
+ regs = sii->regs;
+ tgt = sii->next_target;
+
+ LOG(4,"select");
+ LOG(0x80+tgt->target_id,0);
+
+ /*
+ * Init bus state variables and set registers.
+ * [They are intermixed to avoid wbflush()es]
+ */
+ sii->active_target = tgt;
+
+ out_count = tgt->transient_state.cmd_count;
+
+ /* set dma pointer and counter */
+ regs->sii_dma_len = out_count;
+ regs->sii_dma_adr_low = SII_DMADR_LO(tgt->dma_ptr);
+ regs->sii_dma_adr_hi = SII_DMADR_HI(tgt->dma_ptr);
+
+ sii->error_handler = tgt->transient_state.handler;
+
+ regs->sii_sel_csr = tgt->target_id;
+
+ sii->done = SCSI_RET_IN_PROGRESS;
+
+ regs->sii_dma_ctrl = tgt->sync_offset;
+
+ sii->cmd_count = out_count;
+
+/* if (regs->sii_conn_csr & (SII_CON_CON|SII_CON_DST))*/
+ if (regs->sii_sc1 & (SII_CS1_BSY|SII_CS1_SEL))
+ return;
+ regs->sii_csr = SII_CSR_IE|SII_CSR_PCE|SII_CSR_SLE|SII_CSR_HPM;
+
+ sii->script = tgt->transient_state.script;
+ sii->in_count = 0;
+ sii->out_count = 0;
+
+ if (tgt->flags & TGT_DID_SYNCH) {
+ if (tgt->transient_state.identify == 0xff)
+ cmd = SII_CMD_SEL;
+ else {
+ cmd = SII_CMD_SEL | SII_CMD_ATN |
+ SII_CMD_CON | SII_CMD_XFER | SCSI_PHASE_MSG_OUT;
+ /* chain select and message out */
+/*??*/ regs->sii_dma_1st_byte = tgt->transient_state.identify;
+ }
+ } else if (tgt->flags & TGT_TRY_SYNCH)
+ cmd = SII_CMD_SEL | SII_CMD_ATN;
+ else
+ cmd = SII_CMD_SEL;
+
+/* if (regs->sii_conn_csr & (SII_CON_CON|SII_CON_DST)) { */
+ if (regs->sii_sc1 & (SII_CS1_BSY|SII_CS1_SEL)) {
+ /* let the reconnection attempt proceed */
+ regs->sii_csr = SII_CSR_IE|SII_CSR_PCE|SII_CSR_SLE|
+ SII_CSR_HPM|SII_CSR_RSE;
+ sii->script = 0;
+ LOG(0x8c,0);
+ } else {
+ regs->sii_cmd = cmd;
+ wbflush();
+ }
+}
+
+/*
+ * Interrupt routine
+ * Take interrupts from the chip
+ *
+ * Implementation:
+ * Move along the current command's script if
+ * all is well, invoke error handler if not.
+ */
+boolean_t sii_inside_sii_intr = FALSE;
+
+sii_intr(unit,spllevel)
+{
+ register sii_softc_t sii;
+ register script_t scp;
+ register int cs, ds;
+ register sii_padded_regmap_t *regs;
+ boolean_t try_match;
+#ifdef MACH_KERNEL
+ extern boolean_t rz_use_mapped_interface;
+
+ if (rz_use_mapped_interface)
+ return SII_intr(unit,spllevel);
+#endif /*MACH_KERNEL*/
+
+ /* interrupt code is NOT reentrant */
+ if (sii_inside_sii_intr) {
+ LOG(0x22,"!!attempted to reenter sii_intr!!");
+ return;
+ }
+ sii_inside_sii_intr = TRUE;
+
+ LOG(5,"\n\tintr");
+
+ sii = sii_softc[unit];
+
+ /* collect status information */
+ regs = sii->regs;
+ cs = regs->sii_conn_csr;
+ ds = regs->sii_data_csr;
+
+TR(cs);
+TR(ds);
+TR(regs->sii_cmd);
+TRCHECK;
+
+ if (cs & SII_CON_RST){
+ sii_bus_reset(sii);
+ goto getout;
+ }
+
+ /* we got an interrupt allright */
+ if (sii->active_target)
+ sii->wd.watchdog_state = SCSI_WD_ACTIVE;
+
+ /* rid of DONEs */
+ if (ds & SII_DTR_DONE) {
+ regs->sii_data_csr = SII_DTR_DONE;
+ LOG(0x1e,"done");
+ ds = regs->sii_data_csr;
+ cs = regs->sii_conn_csr;
+ }
+
+ /* drop spurious calls, note that sometimes
+ * ds and cs get out-of-sync */
+ if (((cs & SII_CON_CI) | (ds & SII_DTR_DI)) == 0) {
+ LOG(2,"SPURIOUS");
+ goto getout;
+ }
+
+ /* clear interrupt flags */
+
+ regs->sii_conn_csr = cs;
+ regs->sii_data_csr = cs;
+
+ /* drop priority */
+ splx(spllevel);
+
+ if ((sii->state & SII_STATE_TARGET) ||
+ (cs & SII_CON_TGT)) {
+ sii_target_intr(sii,cs,ds);
+ goto getout;
+ }
+
+ scp = sii->script;
+
+ /* check who got the bus */
+ if ((scp == 0) || (cs & SII_CON_LST)) {
+ if (cs & SII_CON_DST) {
+ sii_reconnect(sii, cs, ds);
+ goto getout;
+ }
+ LOG(0x12,"no-script");
+ goto getout;
+ }
+
+ if (SCRIPT_MATCH(cs,ds) != scp->condition) {
+ if (try_match = (*sii->error_handler)(sii, cs, ds)) {
+ cs = regs->sii_conn_csr;
+ ds = regs->sii_data_csr;
+ }
+ } else
+ try_match = TRUE;
+
+ /* might have been side effected */
+ scp = sii->script;
+
+ if (try_match && (SCRIPT_MATCH(cs,ds) == scp->condition)) {
+ /*
+ * Perform the appropriate operation,
+ * then proceed
+ */
+ if ((*scp->action)(sii, cs, ds)) {
+ /* might have been side effected */
+ scp = sii->script;
+ sii->script = scp + 1;
+ regs->sii_cmd = scp->command;
+ wbflush();
+ }
+ }
+getout:
+ sii_inside_sii_intr = FALSE;
+}
+
+
+sii_target_intr(sii)
+ register sii_softc_t sii;
+{
+ panic("SII: TARGET MODE !!!\n");
+}
+
+/*
+ * All the many little things that the interrupt
+ * routine might switch to
+ */
+boolean_t
+sii_script_true(sii, cs, ds)
+ register sii_softc_t sii;
+
+{
+ SII_COMMAND(sii->regs,cs,ds,SII_CON_CON/*sanity*/);
+ LOG(7,"nop");
+ return TRUE;
+}
+
+boolean_t
+sii_end_transaction( sii, cs, ds)
+ register sii_softc_t sii;
+{
+ register sii_padded_regmap_t *regs = sii->regs;
+
+ SII_COMMAND(sii->regs,cs,ds,0);
+
+ LOG(0x1f,"end_t");
+
+ regs->sii_csr &= ~SII_CSR_RSE;
+
+ /* is the fifo really clean here ? */
+ ds = sii_wait(&regs->sii_data_csr, SII_DTR_IBF,1);
+
+ if (regs->sii_data != SCSI_COMMAND_COMPLETE)
+ printf("{T%x}", regs->sii_data);
+
+ regs->sii_cmd = SII_CMD_XFER | SII_CON_CON | SCSI_PHASE_MSG_IN |
+ (cs & SII_CON_DST);
+ wbflush();
+
+ ds = sii_wait(&regs->sii_data_csr, SII_DTR_DONE,1);
+ regs->sii_data_csr = SII_DTR_DONE;
+
+ regs->sii_cmd = 0/*SII_PHASE_DISC*/;
+ wbflush();
+
+ cs = regs->sii_conn_csr;
+
+ if ((cs & SII_CON_SCH) == 0)
+ cs = sii_wait(&regs->sii_conn_csr, SII_CON_SCH,1);
+ regs->sii_conn_csr = SII_CON_SCH;
+
+ regs->sii_csr |= SII_CSR_RSE;
+
+ cs = regs->sii_conn_csr;
+
+ if (!sii_end(sii, cs, ds))
+ (void) sii_reconnect(sii, cs, ds);
+ return FALSE;
+}
+
+boolean_t
+sii_end( sii, cs, ds)
+ register sii_softc_t sii;
+{
+ register target_info_t *tgt;
+ register io_req_t ior;
+ register sii_padded_regmap_t *regs = sii->regs;
+
+ LOG(6,"end");
+
+ tgt = sii->active_target;
+
+ if ((tgt->done = sii->done) == SCSI_RET_IN_PROGRESS)
+ tgt->done = SCSI_RET_SUCCESS;
+
+ sii->script = 0;
+
+ if (sii->wd.nactive-- == 1)
+ sii->wd.watchdog_state = SCSI_WD_INACTIVE;
+
+ /* check reconnection not pending */
+ cs = regs->sii_conn_csr;
+ if ((cs & SII_CON_DST) == 0)
+ sii_release_bus(sii);
+ else {
+ sii->active_target = 0;
+/* sii->state &= ~SII_STATE_BUSY; later */
+ }
+
+ if (ior = tgt->ior) {
+ LOG(0xA,"ops->restart");
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+ fdma_unmap(&tgt->fdma, ior);
+#endif /*MACH_KERNEL*/
+ (*tgt->dev_ops->restart)(tgt, TRUE);
+ if (cs & SII_CON_DST)
+ sii->state &= ~SII_STATE_BUSY;
+ }
+
+ return ((cs & SII_CON_DST) == 0);
+}
+
+boolean_t
+sii_release_bus(sii)
+ register sii_softc_t sii;
+{
+ boolean_t ret = FALSE;
+
+ LOG(9,"release");
+
+ sii->script = 0;
+
+ if (sii->state & SII_STATE_COLLISION) {
+
+ LOG(0xB,"collided");
+ sii->state &= ~SII_STATE_COLLISION;
+ sii_attempt_selection(sii);
+
+ } else if (queue_empty(&sii->waiting_targets)) {
+
+ sii->state &= ~SII_STATE_BUSY;
+ sii->active_target = 0;
+ ret = TRUE;
+
+ } else {
+
+ LOG(0xC,"dequeue");
+ sii->next_target = (target_info_t *)
+ dequeue_head(&sii->waiting_targets);
+ sii_attempt_selection(sii);
+ }
+ return ret;
+}
+
+boolean_t
+sii_get_status( sii, cs, ds)
+ register sii_softc_t sii;
+{
+ register sii_padded_regmap_t *regs = sii->regs;
+ register scsi2_status_byte_t status;
+ register target_info_t *tgt;
+ unsigned int len;
+ unsigned short cmd;
+
+ LOG(0xD,"get_status");
+TRWRAP;
+
+ sii->state &= ~SII_STATE_DMA_IN;
+
+ tgt = sii->active_target;
+ sii->error_handler = tgt->transient_state.handler;
+
+ if (len = sii->in_count) {
+ if ((tgt->cur_cmd != SCSI_CMD_READ) &&
+ (tgt->cur_cmd != SCSI_CMD_LONG_READ)){
+ len -= regs->sii_dma_len;
+ copyin_gap16(tgt->dma_ptr, tgt->cmd_ptr, len);
+ if (len & 0x1) /* odd byte, left in silo */
+ tgt->cmd_ptr[len - 1] = regs->sii_data;
+ } else {
+ if (regs->sii_dma_len) {
+#if 0
+ this is incorrect and besides..
+ tgt->ior->io_residual = regs->sii_dma_len;
+#endif
+ len -= regs->sii_dma_len;
+ }
+ careful_copyin_gap16( tgt, tgt->transient_state.dma_offset,
+ len, ds & SII_DTR_OBB,
+ regs->sii_dma_1st_byte);
+ }
+ sii->in_count = 0;
+ }
+
+ len = regs->sii_dma_len;
+ regs->sii_dma_len = 0;/*later?*/
+
+ /* if dma is still in progress we have to quiet it down */
+ cmd = regs->sii_cmd;
+ if (cmd & SII_CMD_DMA) {
+ regs->sii_cmd = cmd & ~(SII_CMD_DMA|SII_CMD_XFER);
+ wbflush();
+ /* DONE might NOT pop up. Sigh. */
+ delay(10);
+ regs->sii_data_csr = regs->sii_data_csr;
+ }
+
+ regs->sii_cmd = SCSI_PHASE_STATUS|SII_CON_CON|(cs & SII_CON_DST);
+ wbflush();
+
+ ds = sii_wait(&regs->sii_data_csr, SII_DTR_IBF,1);
+ status.bits = regs->sii_data;
+
+ if (status.st.scsi_status_code != SCSI_ST_GOOD) {
+ scsi_error(sii->active_target, SCSI_ERR_STATUS, status.bits, 0);
+ sii->done = (status.st.scsi_status_code == SCSI_ST_BUSY) ?
+ SCSI_RET_RETRY : SCSI_RET_NEED_SENSE;
+ } else
+ sii->done = SCSI_RET_SUCCESS;
+
+ return TRUE;
+}
+
+boolean_t
+sii_dma_in( sii, cs, ds)
+ register sii_softc_t sii;
+{
+ register target_info_t *tgt;
+ register sii_padded_regmap_t *regs = sii->regs;
+ char *dma_ptr;
+ register int count;
+ boolean_t advance_script = TRUE;
+
+ SII_COMMAND(regs,cs,ds,0);
+ LOG(0xE,"dma_in");
+
+ tgt = sii->active_target;
+ sii->error_handler = tgt->transient_state.handler;
+ sii->state |= SII_STATE_DMA_IN;
+
+ if (sii->in_count == 0) {
+ /*
+ * Got nothing yet: either just sent the command
+ * or just reconnected
+ */
+ register int avail;
+
+ if (tgt->transient_state.isa_oddbb) {
+ regs->sii_dma_1st_byte = tgt->transient_state.oddbb;
+ tgt->transient_state.isa_oddbb = FALSE;
+ }
+
+ count = tgt->transient_state.in_count;
+ count = u_min(count, (SII_DMA_COUNT_MASK+1));
+ avail = PER_TGT_BUFF_SIZE - tgt->transient_state.dma_offset;
+ count = u_min(count, avail);
+
+ /* common case of 8k-or-less read ? */
+ advance_script = (tgt->transient_state.in_count == count);
+
+ } else {
+
+ /*
+ * We received some data.
+ * Also, take care of bogus interrupts
+ */
+ register int offset, xferred;
+ unsigned char obb = regs->sii_data;
+
+ xferred = sii->in_count - regs->sii_dma_len;
+ assert(xferred > 0);
+ tgt->transient_state.in_count -= xferred;
+ assert(tgt->transient_state.in_count > 0);
+ offset = tgt->transient_state.dma_offset;
+ tgt->transient_state.dma_offset += xferred;
+ count = u_min(tgt->transient_state.in_count, (SII_DMA_COUNT_MASK+1));
+ if (tgt->transient_state.dma_offset == PER_TGT_BUFF_SIZE) {
+ tgt->transient_state.dma_offset = 0;
+ } else {
+ register int avail;
+ avail = PER_TGT_BUFF_SIZE - tgt->transient_state.dma_offset;
+ count = u_min(count, avail);
+ }
+
+ /* get some more */
+ dma_ptr = tgt->dma_ptr + (tgt->transient_state.dma_offset << 1);
+ sii->in_count = count;
+ regs->sii_dma_len = count;
+ regs->sii_dma_adr_low = SII_DMADR_LO(dma_ptr);
+ regs->sii_dma_adr_hi = SII_DMADR_HI(dma_ptr);
+ wbflush();
+ regs->sii_cmd = sii->script->command;
+ wbflush();
+
+ /* copy what we got */
+ careful_copyin_gap16( tgt, offset, xferred, ds & SII_DTR_OBB, obb);
+
+ /* last chunk ? */
+ if (count == tgt->transient_state.in_count) {
+ sii->script++;
+ }
+ return FALSE;
+ }
+quickie:
+ sii->in_count = count;
+ dma_ptr = tgt->dma_ptr + (tgt->transient_state.dma_offset << 1);
+ regs->sii_dma_len = count;
+ regs->sii_dma_adr_low = SII_DMADR_LO(dma_ptr);
+ regs->sii_dma_adr_hi = SII_DMADR_HI(dma_ptr);
+ wbflush();
+
+ if (!advance_script) {
+ regs->sii_cmd = sii->script->command;
+ wbflush();
+ }
+ return advance_script;
+}
+
+/* send data to target. Called in three different ways:
+ (a) to start transfer (b) to restart a bigger-than-8k
+ transfer (c) after reconnection
+ */
+boolean_t
+sii_dma_out( sii, cs, ds)
+ register sii_softc_t sii;
+{
+ register sii_padded_regmap_t *regs = sii->regs;
+ register char *dma_ptr;
+ register target_info_t *tgt;
+ boolean_t advance_script = TRUE;
+ int count = sii->out_count;
+
+ SII_COMMAND(regs,cs,ds,0);
+ LOG(0xF,"dma_out");
+
+ tgt = sii->active_target;
+ sii->error_handler = tgt->transient_state.handler;
+ sii->state &= ~SII_STATE_DMA_IN;
+
+ if (sii->out_count == 0) {
+ /*
+ * Nothing committed: either just sent the
+ * command or reconnected
+ */
+ register int remains;
+
+ count = tgt->transient_state.out_count;
+ count = u_min(count, (SII_DMA_COUNT_MASK+1));
+ remains = PER_TGT_BUFF_SIZE - tgt->transient_state.dma_offset;
+ count = u_min(count, remains);
+
+ /* common case of 8k-or-less write ? */
+ advance_script = (tgt->transient_state.out_count == count);
+ } else {
+ /*
+ * We sent some data.
+ * Also, take care of bogus interrupts
+ */
+ register int offset, xferred;
+
+ xferred = sii->out_count - regs->sii_dma_len;
+ assert(xferred > 0);
+ tgt->transient_state.out_count -= xferred;
+ assert(tgt->transient_state.out_count > 0);
+ offset = tgt->transient_state.dma_offset;
+ tgt->transient_state.dma_offset += xferred;
+ count = u_min(tgt->transient_state.out_count, (SII_DMA_COUNT_MASK+1));
+ if (tgt->transient_state.dma_offset == PER_TGT_BUFF_SIZE) {
+ tgt->transient_state.dma_offset = 0;
+ } else {
+ register int remains;
+ remains = PER_TGT_BUFF_SIZE - tgt->transient_state.dma_offset;
+ count = u_min(count, remains);
+ }
+ /* last chunk ? */
+ if (tgt->transient_state.out_count == count)
+ goto quickie;
+
+ /* ship some more */
+ dma_ptr = tgt->dma_ptr +
+ ((tgt->transient_state.cmd_count + tgt->transient_state.dma_offset) << 1);
+ sii->out_count = count;
+ regs->sii_dma_len = count;
+ regs->sii_dma_adr_low = SII_DMADR_LO(dma_ptr);
+ regs->sii_dma_adr_hi = SII_DMADR_HI(dma_ptr);
+ wbflush();
+ regs->sii_cmd = sii->script->command;
+
+ /* copy some more data */
+ careful_copyout_gap16(tgt, offset, xferred);
+ return FALSE;
+ }
+
+quickie:
+ sii->out_count = count;
+ dma_ptr = tgt->dma_ptr +
+ ((tgt->transient_state.cmd_count + tgt->transient_state.dma_offset) << 1);
+ regs->sii_dma_len = count;
+ regs->sii_dma_adr_low = SII_DMADR_LO(dma_ptr);
+ regs->sii_dma_adr_hi = SII_DMADR_HI(dma_ptr);
+ wbflush();
+
+ if (!advance_script) {
+ regs->sii_cmd = sii->script->command;
+ wbflush();
+ }
+ return advance_script;
+}
+
+/* disconnect-reconnect ops */
+
+/* get the message in via dma */
+boolean_t
+sii_msg_in(sii, cs, ds)
+ register sii_softc_t sii;
+ register unsigned char cs, ds;
+{
+ register target_info_t *tgt;
+ char *dma_ptr;
+ register sii_padded_regmap_t *regs = sii->regs;
+
+ LOG(0x15,"msg_in");
+
+ tgt = sii->active_target;
+
+ dma_ptr = tgt->dma_ptr;
+ /* We would clobber the data for READs */
+ if (sii->state & SII_STATE_DMA_IN) {
+ register int offset;
+ offset = tgt->transient_state.cmd_count + tgt->transient_state.dma_offset;
+ if (offset & 1) offset++;
+ dma_ptr += (offset << 1);
+ }
+
+ regs->sii_dma_adr_low = SII_DMADR_LO(dma_ptr);
+ regs->sii_dma_adr_hi = SII_DMADR_HI(dma_ptr);
+ /* We only really expect two bytes */
+ regs->sii_dma_len = sizeof(scsi_command_group_0);
+ wbflush();
+
+ return TRUE;
+}
+
+/* check the message is indeed a DISCONNECT */
+boolean_t
+sii_disconnect(sii, cs, ds)
+ register sii_softc_t sii;
+ register unsigned char cs, ds;
+
+{
+ register target_info_t *tgt;
+ register int len;
+ boolean_t ok = FALSE;
+ unsigned int dmsg = 0;
+
+ tgt = sii->active_target;
+
+ len = sizeof(scsi_command_group_0) - sii->regs->sii_dma_len;
+ PRINT(("{G%d}",len));
+
+/* if (len == 0) ok = FALSE; */
+ if (len == 1) {
+ dmsg = sii->regs->sii_dma_1st_byte;
+ ok = (dmsg == SCSI_DISCONNECT);
+ } else if (len == 2) {
+ register char *msgs;
+ register unsigned int offset;
+ register sii_padded_regmap_t *regs = sii->regs;
+
+ /* wherever it was, take it from there */
+ offset = regs->sii_dma_adr_low | ((regs->sii_dma_adr_hi&3)<<16);
+ msgs = (char*)sii->buff + (offset << 1);
+ dmsg = *((unsigned short *)msgs);
+
+ /* A SDP message preceeds it in non-completed READs */
+ ok = (((dmsg & 0xff) == SCSI_DISCONNECT) ||
+ (dmsg == ((SCSI_DISCONNECT<<8)|SCSI_SAVE_DATA_POINTER)));
+ }
+ if (!ok)
+ printf("[tgt %d bad msg (%d): %x]", tgt->target_id, len, dmsg);
+
+ return TRUE;
+}
+
+/* save all relevant data, free the BUS */
+boolean_t
+sii_disconnected(sii, cs, ds)
+ register sii_softc_t sii;
+ register unsigned char cs, ds;
+
+{
+ register target_info_t *tgt;
+
+ SII_COMMAND(sii->regs,cs,ds,0);
+
+ sii->regs->sii_csr = SII_CSR_IE|SII_CSR_PCE|SII_CSR_SLE|
+ SII_CSR_HPM|SII_CSR_RSE;
+
+ LOG(0x16,"disconnected");
+
+ sii_disconnect(sii,cs,ds);
+
+ tgt = sii->active_target;
+ tgt->flags |= TGT_DISCONNECTED;
+ tgt->transient_state.handler = sii->error_handler;
+ /* the rest has been saved in sii_err_disconn() */
+
+ PRINT(("{D%d}", tgt->target_id));
+
+ sii_release_bus(sii);
+
+ return FALSE;
+}
+
+/* get reconnect message, restore BUS */
+boolean_t
+sii_reconnect(sii, cs, ds)
+ register sii_softc_t sii;
+ register unsigned char cs, ds;
+
+{
+ register target_info_t *tgt;
+ sii_padded_regmap_t *regs;
+ int id;
+
+ regs = sii->regs;
+ regs->sii_conn_csr = SII_CON_SCH;
+ regs->sii_cmd = SII_CON_CON|SII_CON_DST|SCSI_PHASE_MSG_IN;
+ wbflush();
+
+ LOG(0x17,"reconnect");
+
+ /*
+ * See if this reconnection collided with a selection attempt
+ */
+ if (sii->state & SII_STATE_BUSY)
+ sii->state |= SII_STATE_COLLISION;
+
+ sii->state |= SII_STATE_BUSY;
+
+ cs = regs->sii_conn_csr;
+
+ /* tk50s are slow */
+ if ((cs & SII_CON_CON) == 0)
+ cs = sii_wait(&regs->sii_conn_csr, SII_CON_CON,1);
+
+ /* ?? */
+ if (regs->sii_conn_csr & SII_CON_BERR)
+ regs->sii_conn_csr = SII_CON_BERR;
+
+ if ((ds & SII_DTR_IBF) == 0)
+ ds = sii_wait(&regs->sii_data_csr, SII_DTR_IBF,1);
+
+ if (regs->sii_data != SCSI_IDENTIFY)
+ printf("{I%x %x}", regs->sii_data, regs->sii_dma_1st_byte);
+
+ /* find tgt: id is in sii_destat */
+ id = regs->sii_destat;
+
+ tgt = sii->sc->target[id];
+ if (id > 7 || tgt == 0) panic("sii_reconnect");
+
+ PRINT(("{R%d}", id));
+ if (sii->state & SII_STATE_COLLISION)
+ PRINT(("[B %d-%d]", sii->active_target->target_id, id));
+
+ LOG(0x80+id,0);
+
+ sii->active_target = tgt;
+ tgt->flags &= ~TGT_DISCONNECTED;
+
+ /* synch params */
+ regs->sii_dma_ctrl = tgt->sync_offset;
+ regs->sii_dma_len = 0;
+
+ sii->script = tgt->transient_state.script;
+ sii->error_handler = sii_err_rdp;
+ sii->in_count = 0;
+ sii->out_count = 0;
+
+ regs->sii_cmd = SII_CMD_XFER|SII_CMD_CON|SII_CMD_DST|SCSI_PHASE_MSG_IN;
+ wbflush();
+
+ (void) sii_wait(&regs->sii_data_csr, SII_DTR_DONE,1);
+ regs->sii_data_csr = SII_DTR_DONE;
+
+ return TRUE;
+}
+
+
+/* do the synch negotiation */
+boolean_t
+sii_dosynch( sii, cs, ds)
+ register sii_softc_t sii;
+{
+ /*
+ * Phase is MSG_OUT here, cmd has not been xferred
+ */
+ int *p, len;
+ unsigned short dmalo, dmahi, dmalen;
+ register target_info_t *tgt;
+ register sii_padded_regmap_t *regs = sii->regs;
+ unsigned char off;
+
+ regs->sii_cmd = SCSI_PHASE_MSG_OUT|SII_CMD_ATN|SII_CON_CON;
+ wbflush();
+
+ LOG(0x11,"dosync");
+
+ tgt = sii->active_target;
+
+ tgt->flags |= TGT_DID_SYNCH; /* only one chance */
+ tgt->flags &= ~TGT_TRY_SYNCH;
+
+ p = (int*) (tgt->dma_ptr + (((regs->sii_dma_len<<1) + 2) & ~3));
+ p[0] = SCSI_IDENTIFY | (SCSI_EXTENDED_MESSAGE<<8);
+ p[1] = 3 | (SCSI_SYNC_XFER_REQUEST<<8);
+ if (BGET(scsi_no_synchronous_xfer,tgt->masterno,tgt->target_id))
+ off = 0;
+ else
+ off = sii_max_offset;
+ /* but we'll ship "off" manually */
+ p[2] = sii_to_scsi_period(sii_min_period) |(off << 8);
+
+ dmalen = regs->sii_dma_len;
+ dmalo = regs->sii_dma_adr_low;
+ dmahi = regs->sii_dma_adr_hi;
+ regs->sii_dma_len = sizeof(scsi_synch_xfer_req_t) /* + 1 */;
+ regs->sii_dma_adr_low = SII_DMADR_LO(p);
+ regs->sii_dma_adr_hi = SII_DMADR_HI(p);
+ wbflush();
+
+ regs->sii_cmd = SII_CMD_DMA|SII_CMD_XFER|SII_CMD_ATN|
+ SII_CON_CON|SCSI_PHASE_MSG_OUT;
+ wbflush();
+
+ /* wait for either DONE or MIS */
+ ds = sii_wait(&regs->sii_data_csr, SII_DTR_DI,1);
+
+ /* TK50s do not like xtended messages */
+ /* and some others just ignore the standard */
+ if (SCSI_PHASE(ds) != SCSI_PHASE_MSG_OUT) {
+ /* disentangle FIFO */
+ regs->sii_cmd = SII_CON_CON|SCSI_PHASE_MSG_OUT;
+ ds = sii_wait(&regs->sii_data_csr, SII_DTR_DONE,1);
+ if (SCSI_PHASE(ds) == SCSI_PHASE_MSG_IN)
+ goto msgin;
+ goto got_answer;
+ }
+
+ /* ack and stop dma */
+ regs->sii_cmd = SII_CON_CON|SCSI_PHASE_MSG_OUT|SII_CMD_ATN;
+ wbflush();
+ ds = sii_wait(&regs->sii_data_csr, SII_DTR_DONE,1);
+ regs->sii_data_csr = SII_DTR_DONE;
+ wbflush();
+
+ /* last byte of message */
+ regs->sii_data = off;
+ wbflush();
+ regs->sii_cmd = SII_CMD_XFER|SII_CON_CON|SCSI_PHASE_MSG_OUT;
+ wbflush();
+
+ /* Race here: who will interrupt first, the DMA
+ controller or the status watching machine ? */
+ delay(1000);
+ regs->sii_cmd = SII_CON_CON|SCSI_PHASE_MSG_OUT;
+ wbflush();
+
+ ds = sii_wait(&regs->sii_data_csr, SII_DTR_DONE,1);
+ regs->sii_data_csr = SII_DTR_DONE;
+
+ /* The standard sez there nothing else the target can do but.. */
+ ds = sii_wait(&regs->sii_data_csr, SCSI_PHASE_MSG_IN,0);
+
+ /* Of course, what are standards for ? */
+ if (SCSI_PHASE(ds) == SCSI_PHASE_CMD)
+ goto cmdp;
+msgin:
+ /* ack */
+ regs->sii_cmd = SII_CON_CON|SCSI_PHASE_MSG_IN;
+ wbflush();
+
+ /* set up dma to receive answer */
+ regs->sii_dma_adr_low = SII_DMADR_LO(p);
+ regs->sii_dma_adr_hi = SII_DMADR_HI(p);
+ regs->sii_dma_len = sizeof(scsi_synch_xfer_req_t);
+ wbflush();
+ regs->sii_cmd = SII_CMD_DMA|SII_CMD_XFER|SII_CON_CON|SCSI_PHASE_MSG_IN;
+ wbflush();
+
+ /* wait for the answer, and look at it */
+ ds = sii_wait(&regs->sii_data_csr, SII_DTR_MIS,1);
+
+ regs->sii_cmd = SII_CON_CON|SCSI_PHASE_MSG_IN;
+ wbflush();
+ ds = sii_wait(&regs->sii_data_csr, SII_DTR_DONE,1);
+
+got_answer:
+ /* do not cancel the phase mismatch */
+ regs->sii_data_csr = SII_DTR_DONE;
+
+ if (regs->sii_dma_len || ((p[0] & 0xff) != SCSI_EXTENDED_MESSAGE)) {
+ /* did not like it */
+ printf(" did not like SYNCH xfer ");
+ } else {
+ /* will do synch */
+ tgt->sync_period = scsi_period_to_sii((p[1]>>8)&0xff);
+ tgt->sync_offset = regs->sii_data; /* odd xfer, in silo */
+ /* sanity */
+ if (tgt->sync_offset > sii_max_offset)
+ tgt->sync_offset = sii_max_offset;
+ regs->sii_dma_ctrl = tgt->sync_offset;
+ }
+
+cmdp:
+ /* phase should be command now */
+ regs->sii_dma_len = dmalen;
+ regs->sii_dma_adr_low = dmalo;
+ regs->sii_dma_adr_hi = dmahi;
+ wbflush();
+
+ /* continue with simple command script */
+ sii->error_handler = sii_err_generic;
+
+ sii->script = (tgt->cur_cmd == SCSI_CMD_INQUIRY) ?
+ sii_script_data_in : sii_script_cmd;
+ if (SCSI_PHASE(ds) == SCSI_PHASE_CMD )
+ return TRUE;
+
+ sii->script++;
+ if (SCSI_PHASE(ds) == SCSI_PHASE_STATUS )
+ return TRUE;
+
+ sii->script++; /* msgin? */
+ sii->script++;
+ if (SCSI_PHASE(ds) == SII_PHASE_DISC)
+ return TRUE;
+
+gimmeabreak();
+ panic("sii_dosynch");
+ return FALSE;
+}
+
+/*
+ * The bus was reset
+ */
+sii_bus_reset(sii)
+ register sii_softc_t sii;
+{
+ register sii_padded_regmap_t *regs = sii->regs;
+
+ LOG(0x21,"bus_reset");
+
+ /*
+ * Clear interrupt bits
+ */
+ regs->sii_conn_csr = 0xffff;
+ regs->sii_data_csr = 0xffff;
+
+ /*
+ * Clear bus descriptor
+ */
+ sii->script = 0;
+ sii->error_handler = 0;
+ sii->active_target = 0;
+ sii->next_target = 0;
+ sii->state = 0;
+ queue_init(&sii->waiting_targets);
+ sii->wd.nactive = 0;
+ sii_reset(regs, TRUE);
+
+ log(LOG_KERN, "sii: (%d) bus reset ", ++sii->wd.reset_count);
+ delay(scsi_delay_after_reset); /* some targets take long to reset */
+
+ if (sii->sc == 0) /* sanity */
+ return;
+
+ sii_inside_sii_intr = FALSE;
+
+ scsi_bus_was_reset(sii->sc);
+}
+
+/*
+ * Error handlers
+ */
+
+/*
+ * Generic, default handler
+ */
+boolean_t
+sii_err_generic(sii, cs, ds)
+ register sii_softc_t sii;
+{
+ register int cond = sii->script->condition;
+
+ LOG(0x10,"err_generic");
+
+ /*
+ * Note to DEC hardware people.
+ * Dropping the notion of interrupting on
+ * DMA completions (or at least make it optional)
+ * would save TWO interrupts out of the SEVEN that
+ * are currently requested for a non-disconnecting
+ * READ or WRITE operation.
+ */
+ if (ds & SII_DTR_DONE)
+ return TRUE;
+
+ /* this is a band-aid */
+ if ((SCSI_PHASE(cond) == SII_PHASE_DISC) &&
+ (cs & SII_CON_SCH)) {
+ ds &= ~7;
+ ds |= SII_PHASE_DISC;
+ (void) (*sii->script->action)(sii,cs,ds);
+ return FALSE;
+ }
+
+ /* TK50s are slow to connect, forgive em */
+ if ((SCSI_PHASE(ds) == SCSI_PHASE_MSG_OUT) ||
+ (SCSI_PHASE(cond) == SCSI_PHASE_MSG_OUT))
+ return TRUE;
+ if ((SCSI_PHASE(cond) == SCSI_PHASE_CMD) &&
+ ((SCSI_PHASE(ds) == 0) || (SCSI_PHASE(ds) == 4) || (SCSI_PHASE(ds) == 5)))
+ return TRUE;
+
+ /* transition to status ? */
+ if (SCSI_PHASE(ds) == SCSI_PHASE_STATUS)
+ return sii_err_to_status(sii, cs, ds);
+
+ return sii_err_phase_mismatch(sii,cs,ds);
+}
+
+/*
+ * Handle generic errors that are reported as
+ * an unexpected change to STATUS phase
+ */
+sii_err_to_status(sii, cs, ds)
+ register sii_softc_t sii;
+{
+ script_t scp = sii->script;
+
+ LOG(0x20,"err_tostatus");
+ while (SCSI_PHASE(scp->condition) != SCSI_PHASE_STATUS)
+ scp++;
+ sii->script = scp;
+#if 0
+ /*
+ * Normally, we would already be able to say the command
+ * is in error, e.g. the tape had a filemark or something.
+ * But in case we do disconnected mode WRITEs, it is quite
+ * common that the following happens:
+ * dma_out -> disconnect -> reconnect
+ * and our script might expect at this point that the dma
+ * had to be restarted (it didn't know it was completed
+ * because the tape record is shorter than we asked for).
+ * And in any event.. it is both correct and cleaner to
+ * declare error iff the STATUS byte says so.
+ */
+ sii->done = SCSI_RET_NEED_SENSE;
+#endif
+ return TRUE;
+}
+
+/*
+ * Watch for a disconnection
+ */
+boolean_t
+sii_err_disconn(sii, cs, ds)
+ register sii_softc_t sii;
+ register unsigned cs, ds;
+{
+ register sii_padded_regmap_t *regs;
+ register target_info_t *tgt;
+ int count;
+ int from;
+ unsigned char obb;
+ int delayed_copy = 0;
+
+ LOG(0x18,"err_disconn");
+
+ if (SCSI_PHASE(ds) != SCSI_PHASE_MSG_IN)
+ return sii_err_generic(sii, cs, ds);
+
+ regs = sii->regs;
+
+ if ((regs->sii_cmd & (SII_CMD_DMA|SII_CMD_XFER)) ==
+ (SII_CMD_DMA|SII_CMD_XFER)) {
+ /* stop dma and wait */
+ regs->sii_cmd &= ~(SII_CMD_DMA|SII_CMD_XFER);
+ (void) sii_wait(&regs->sii_data_csr, SII_DTR_DONE,1);
+/* later: regs->sii_data_csr = SII_DTR_DONE; */
+ }
+
+ SII_COMMAND(regs,cs,ds,0);
+
+ tgt = sii->active_target;
+ switch (SCSI_PHASE(sii->script->condition)) {
+ case SCSI_PHASE_DATAO:
+ LOG(0x1b,"+DATAO");
+ if (sii->out_count) {
+ register int xferred, offset;
+
+ xferred = sii->out_count - regs->sii_dma_len;
+ tgt->transient_state.out_count -= xferred;
+ assert(tgt->transient_state.out_count > 0);
+ offset = tgt->transient_state.dma_offset;
+ tgt->transient_state.dma_offset += xferred;
+ if (tgt->transient_state.dma_offset == PER_TGT_BUFF_SIZE)
+ tgt->transient_state.dma_offset = 0;
+
+ delayed_copy = 1;
+ from = offset;
+ count = xferred;
+
+ }
+ tgt->transient_state.script = sii_script_restart_data_out;
+ break;
+
+ case SCSI_PHASE_DATAI:
+ LOG(0x19,"+DATAI");
+ if (sii->in_count) {
+ register int offset, xferred;
+
+ obb = regs->sii_dma_1st_byte;
+
+ xferred = sii->in_count - regs->sii_dma_len;
+ assert(xferred > 0);
+ if (ds & SII_DTR_OBB) {
+ tgt->transient_state.isa_oddbb = TRUE;
+ tgt->transient_state.oddbb = obb;
+ }
+ tgt->transient_state.in_count -= xferred;
+ assert(tgt->transient_state.in_count > 0);
+ offset = tgt->transient_state.dma_offset;
+ tgt->transient_state.dma_offset += xferred;
+ if (tgt->transient_state.dma_offset == PER_TGT_BUFF_SIZE)
+ tgt->transient_state.dma_offset = 0;
+
+ /* copy what we got */
+
+ delayed_copy = 2;
+ from = offset;
+ count = xferred;
+
+ }
+ tgt->transient_state.script = sii_script_restart_data_in;
+ break;
+
+ case SCSI_PHASE_STATUS:
+ /* will have to restart dma */
+ if (count = regs->sii_dma_len) {
+ (void) sii_wait(&regs->sii_data_csr, SII_DTR_DONE,1);
+ regs->sii_data_csr = SII_DTR_DONE;
+ }
+ if (sii->state & SII_STATE_DMA_IN) {
+ register int offset, xferred;
+
+ obb = regs->sii_dma_1st_byte;
+
+ LOG(0x1a,"+STATUS+R");
+
+ xferred = sii->in_count - count;
+ assert(xferred > 0);
+ if (ds & SII_DTR_OBB) {
+ tgt->transient_state.isa_oddbb = TRUE;
+ tgt->transient_state.oddbb = obb;
+ }
+ tgt->transient_state.in_count -= xferred;
+/* assert(tgt->transient_state.in_count > 0);*/
+ offset = tgt->transient_state.dma_offset;
+ tgt->transient_state.dma_offset += xferred;
+ if (tgt->transient_state.dma_offset == PER_TGT_BUFF_SIZE)
+ tgt->transient_state.dma_offset = 0;
+
+ /* copy what we got */
+
+ delayed_copy = 2;
+ from = offset;
+ count = xferred;
+
+ tgt->transient_state.script = sii_script_restart_data_in;
+ if (tgt->transient_state.in_count == 0)
+ tgt->transient_state.script++;
+
+ } else {
+
+ LOG(0x1d,"+STATUS+W");
+
+ if ((count == 0) && (tgt->transient_state.out_count == sii->out_count)) {
+ /* all done */
+ tgt->transient_state.script = &sii_script_restart_data_out[1];
+ tgt->transient_state.out_count = 0;
+ } else {
+ register int xferred, offset;
+
+ /* how much we xferred */
+ xferred = sii->out_count - count;
+
+ tgt->transient_state.out_count -= xferred;
+ assert(tgt->transient_state.out_count > 0);
+ offset = tgt->transient_state.dma_offset;
+ tgt->transient_state.dma_offset += xferred;
+ if (tgt->transient_state.dma_offset == PER_TGT_BUFF_SIZE)
+ tgt->transient_state.dma_offset = 0;
+
+ delayed_copy = 1;
+ from = offset;
+ count = xferred;
+
+ tgt->transient_state.script = sii_script_restart_data_out;
+ }
+ sii->out_count = 0;
+ }
+ break;
+ case SII_PHASE_DISC: /* sometimes disconnects and phase remains */
+ return sii_err_generic(sii, cs, ds);
+ default:
+ gimmeabreak();
+ }
+ regs->sii_csr &= ~SII_CSR_RSE;
+ sii_msg_in(sii,cs,ds);
+ sii->script = sii_script_disconnect;
+ regs->sii_cmd = SII_CMD_DMA|SII_CMD_XFER|SCSI_PHASE_MSG_IN|
+ SII_CON_CON|(regs->sii_conn_csr & SII_CON_DST);
+ wbflush();
+ if (delayed_copy == 2)
+ careful_copyin_gap16( tgt, from, count, ds & SII_DTR_OBB, obb);
+ else if (delayed_copy == 1)
+ careful_copyout_gap16( tgt, from, count);
+
+ return FALSE;
+}
+
+/*
+ * Suppose someone reads the specs as they read the Bible.
+ * They would send these unnecessary restore-pointer msgs
+ * in reconnect phases. If this was a SCSI-2 modify-pointer
+ * I could understand, but. Oh well.
+ */
+sii_err_rdp(sii, cs, ds)
+ register sii_softc_t sii;
+{
+ register sii_padded_regmap_t *regs;
+ register target_info_t *tgt;
+
+ LOG(0x24,"err_drp");
+
+ /* One chance */
+ sii->error_handler = sii->active_target->transient_state.handler;
+
+ if (SCSI_PHASE(ds) != SCSI_PHASE_MSG_IN)
+ return sii_err_generic(sii, cs, ds);
+
+ regs = sii->regs;
+
+ if ((ds & SII_DTR_IBF) == 0)
+ ds = sii_wait(&regs->sii_data_csr, SII_DTR_IBF,1);
+
+ if (regs->sii_data != SCSI_RESTORE_POINTERS)
+ return sii_err_disconn(sii, cs, ds);
+
+ regs->sii_cmd = SII_CMD_XFER|SII_CMD_CON|SII_CMD_DST|SCSI_PHASE_MSG_IN;
+ wbflush();
+
+ (void) sii_wait(&regs->sii_data_csr, SII_DTR_DONE,1);
+ regs->sii_data_csr = SII_DTR_DONE;
+
+ return FALSE;
+}
+
+/*
+ * Handle strange, yet unexplained interrupts and error
+ * situations which eventually I will be old and wise
+ * enough to deal with properly with preventive care.
+ */
+sii_err_phase_mismatch(sii, cs, ds)
+ register sii_softc_t sii;
+{
+ register sii_padded_regmap_t *regs = sii->regs;
+ register int match;
+
+ LOG(0x23,"err_mismatch");
+
+ match = SCSI_PHASE(sii->script->condition);
+
+ /* dmain interrupted */
+ if ((match == SCSI_PHASE_STATUS) && (SCSI_PHASE(ds) == SCSI_PHASE_DATAI)) {
+ register int xferred;
+ register char *p;
+
+ if (regs->sii_dma_len <= 1) {
+/*if (scsi_debug)*/
+printf("[DMAINZERO %x %x %x]", cs, ds, regs->sii_dma_len);
+ if (regs->sii_dma_len == 0) {
+ regs->sii_dma_len = sii->in_count;
+ wbflush();
+ regs->sii_cmd = sii->script[-1].command;
+ }
+ return FALSE;
+ }
+
+ /* This happens when you do not "init" the prom
+ and the fifo is screwed up */
+ xferred = sii->in_count - regs->sii_dma_len;
+ p = (char*)( regs->sii_dma_adr_low | ((regs->sii_dma_adr_hi&3)<<16) );
+ p += xferred;
+if (scsi_debug)
+printf("[DMAIN %x %x %x]", cs, ds, xferred);
+ /* odd bytes are not xferred */
+ if (((unsigned)p) & 0x1){
+ register short *oddb;
+ oddb = (short*)(sii->buff) + ((unsigned)p-1);/*shifts*/
+ *oddb = regs->sii_dma_1st_byte;
+ }
+ regs->sii_dma_adr_low = ((unsigned)p);
+ regs->sii_dma_adr_hi = ((unsigned)p) << 16;
+ wbflush();
+ regs->sii_cmd = sii->script[-1].command;
+ wbflush();
+ return FALSE;
+ } else
+ /* dmaout interrupted */
+ if ((match == SCSI_PHASE_STATUS) && (SCSI_PHASE(ds) == SCSI_PHASE_DATAO)) {
+ register int xferred;
+ register char *p;
+
+ if (regs->sii_dma_len <= 1) {
+/*if (scsi_debug)*/
+printf("[DMAOUTZERO %x %x %x]", cs, ds, regs->sii_dma_len);
+gimmeabreak();
+ if (regs->sii_dma_len == 0) {
+ regs->sii_dma_len = sii->out_count;
+ wbflush();
+ regs->sii_cmd = sii->script[-1].command;
+ }
+ return FALSE;
+ }
+
+ xferred = sii->out_count - regs->sii_dma_len;
+/*if (scsi_debug)*/
+printf("[DMAOUT %x %x %x %x]", cs, ds, regs->sii_dma_len, sii->out_count);
+ sii->out_count -= xferred;
+ p = (char*)( regs->sii_dma_adr_low | ((regs->sii_dma_adr_hi&3)<<16) );
+ p += xferred;
+ regs->sii_dma_adr_low = ((unsigned)p);
+ regs->sii_dma_adr_hi = ((unsigned)p) << 16;
+ wbflush();
+ regs->sii_cmd = sii->script[-1].command;
+ wbflush();
+ return FALSE;
+ }
+#if 1 /* ?? */
+ /* stuck in cmd phase */
+ else if ((SCSI_PHASE(ds) == SCSI_PHASE_CMD) &&
+ ((match == SCSI_PHASE_DATAI) || (match == SCSI_PHASE_DATAO))) {
+/*if (scsi_debug)*/
+printf("[CMD %x %x %x %x]", cs, ds, sii->cmd_count, regs->sii_dma_len);
+ if (regs->sii_dma_len != 0) {
+ /* ouch, this hurts */
+ register int xferred;
+ register char *p;
+
+ xferred = sii->cmd_count - regs->sii_dma_len;
+ sii->cmd_count -= xferred;
+ p = (char*)( regs->sii_dma_adr_low | ((regs->sii_dma_adr_hi&3)<<16) );
+ p += xferred;
+ regs->sii_dma_adr_low = ((unsigned)p);
+ regs->sii_dma_adr_hi = ((unsigned)p) << 16;
+ wbflush();
+ regs->sii_cmd = 0x8842;
+ wbflush();
+ return FALSE;;
+
+ }
+ SII_ACK(regs,cs,ds,0/*match*/);
+ wbflush();
+ return FALSE;;
+ }
+#endif
+ else {
+ printf("{D%x %x}", cs, ds);
+/* if (scsi_debug)*/ gimmeabreak();
+ }
+ return FALSE;
+}
+
+/*
+ * Watchdog
+ *
+ * There are two ways in which I have seen the chip
+ * get stuck: a target never reconnected, or the
+ * selection deadlocked. Both cases involved a tk50,
+ * but elsewhere it showed up with hitachi disks too.
+ */
+sii_reset_scsibus(sii)
+ register sii_softc_t sii;
+{
+ register target_info_t *tgt = sii->active_target;
+ register sii_padded_regmap_t *regs = sii->regs;
+
+ /* see if SIP still --> device down or non-existant */
+ if ((regs->sii_conn_csr & (SII_CON_LST|SII_CON_SIP)) == SII_CON_SIP){
+ if (tgt) {
+ log(LOG_KERN, "Target %d went offline\n",
+ tgt->target_id);
+ tgt->flags = 0;
+ return sii_probe_timeout(tgt);
+ }
+ /* else fall through */
+ }
+
+ if (tgt)
+ log(LOG_KERN, "Target %d was active, cmd x%x in x%x out x%x Sin x%x Sou x%x dmalen x%x\n",
+ tgt->target_id, tgt->cur_cmd,
+ tgt->transient_state.in_count, tgt->transient_state.out_count,
+ sii->in_count, sii->out_count,
+ sii->regs->sii_dma_len);
+
+ sii->regs->sii_cmd = SII_CMD_RST;
+ delay(25);
+}
+
+/*
+ * Copy routines that avoid odd pointers
+ */
+boolean_t nocopyin = FALSE;
+careful_copyin_gap16(tgt, offset, len, isaobb, obb)
+ register target_info_t *tgt;
+ unsigned char obb;
+{
+ register char *from, *to;
+ register int count;
+
+ count = tgt->transient_state.copy_count;
+
+ from = tgt->dma_ptr + (offset << 1);
+ to = tgt->ior->io_data + count;
+ tgt->transient_state.copy_count = count + len;
+ if (count & 1) {
+ from -= (1 << 1);
+ to -= 1;
+ len += 1;
+ }
+if (nocopyin) return;/*timing*/
+ copyin_gap16( from, to, len);
+ /* check for last, poor little odd byte */
+ if (isaobb) {
+ to += len;
+ to[-1] = obb;
+ }
+}
+
+careful_copyout_gap16( tgt, offset, len)
+ register target_info_t *tgt;
+{
+ register char *from, *to;
+ register int count, olen;
+ unsigned char c;
+ char *p;
+
+ count = tgt->ior->io_count - tgt->transient_state.copy_count;
+ if (count > 0) {
+
+ len = u_min(count, len);
+ offset += tgt->transient_state.cmd_count;
+
+ count = tgt->transient_state.copy_count;
+ tgt->transient_state.copy_count = count + len;
+
+ from = tgt->ior->io_data + count;
+ to = tgt->dma_ptr + (offset << 1);
+
+ /* the scsi buffer acts weirdo at times */
+ if ((olen=len) & 1) {
+ p = tgt->dma_ptr + ((offset + olen - 1)<<1);
+ c = (*(unsigned short*)p) >> 8;/*!MSF*/
+ }
+
+ if (count & 1) {
+ from -= 1;
+ to -= (1 << 1);
+ len += 1;
+ }
+
+ count = copyout_gap16(from, to, len);
+
+ /* the scsi buffer acts weirdo at times */
+ if (olen & 1) {
+ unsigned char cv;
+ cv = (*(unsigned short*)p) >> 8;/*!MSF*/
+ if (c != cv) {
+ /*
+ * Scott Fahlman would say
+ * "Use a big plier!"
+ */
+ unsigned short s;
+ volatile unsigned short *pp;
+ pp = (volatile unsigned short*)p;
+ s = (c << 8) | (from[len-1] & 0xff);
+ do {
+ *pp = s;
+ } while (*pp != s);
+ }
+ }
+ }
+}
+
+#endif NSII > 0
+
diff --git a/scsi/adapters/scsi_89352.h b/scsi/adapters/scsi_89352.h
new file mode 100644
index 00000000..85c579fe
--- /dev/null
+++ b/scsi/adapters/scsi_89352.h
@@ -0,0 +1,231 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_89352.h
+ * Author: Daniel Stodolsky, Carnegie Mellon University
+ * Date: 06/91
+ *
+ * Defines for the Fujitsu MB89352 SCSI Protocol Controller (HBA)
+ * The definitions herein also cover the 89351, 87035/36, 87033B
+ */
+
+/*
+ * Register map, padded as needed. Matches Hardware, do not screw up.
+ */
+
+#define vuc volatile unsigned char
+
+typedef struct
+{
+ vuc spc_bdid; /* Bus Device ID (R/W) */
+#define spc_myid spc_bdid
+ char pad0[3];
+ vuc spc_sctl; /* SPC Control register (R/W) */
+ char pad1[3];
+ vuc spc_scmd; /* Command Register (R/W) */
+ char pad2[3];
+ vuc spc_tmod; /* Transmit Mode Register (synch models) */
+ char pad3[3];
+ vuc spc_ints; /* Interrupt sense (R); Interrupt Reset (w) */
+ char pad4[3];
+ vuc spc_psns; /* Phase Sense (R); SPC Diagnostic Control (w) */
+#define spc_phase spc_psns
+ char pad5[3];
+ vuc spc_ssts; /* SPC status (R/O) */
+ char pad6[3];
+ vuc spc_serr; /* SPC error status (R/O) */
+ char pad7[3];
+ vuc spc_pctl; /* Phase Control (R/W) */
+ char pad8[3];
+ vuc spc_mbc; /* Modifed Byte Counter (R/O) */
+ char pad9[3];
+ vuc spc_dreg; /* Data Register (R/W) */
+ char pad10[3];
+ vuc spc_temp; /* Temporary Register (R/W) */
+ char pad11[3];
+ vuc spc_tch; /* Transfer Counter High (R/W) */
+ char pad12[3];
+ vuc spc_tcm; /* Transfer Counter Middle (R/W) */
+ char pad13[3];
+ vuc spc_tcl; /* Transfer Counter Low (R/W) */
+ char pad14[3];
+ vuc spc_exbf; /* External Buffer (synch models) */
+ char pad15[3];
+} spc_regmap_t;
+
+#undef vuc
+
+/*
+ * Control register
+ */
+
+#define SPC_SCTL_DISABLE 0x80
+#define SPC_SCTL_RESET 0x40
+#define SPC_SCTL_DIAGMODE 0x20
+#define SPC_SCTL_ARB_EBL 0x10
+#define SPC_SCTL_PAR_EBL 0x08
+#define SPC_SCTL_SEL_EBL 0x04
+#define SPC_SCTL_RSEL_EBL 0x02
+#define SPC_SCTL_IE 0x01
+
+/*
+ * Command register
+ */
+
+#define SPC_SCMD_CMDMASK 0xe0
+# define SPC_SCMD_C_ACKREQ_S 0xe0
+# define SPC_SCMD_C_ACKREQ_C 0xc0
+# define SPC_SCMD_C_STOP_X 0xa0
+# define SPC_SCMD_C_XFER 0x80
+# define SPC_SCMD_C_ATN_S 0x60
+# define SPC_SCMD_C_ATN_C 0x40
+# define SPC_SCMD_C_SELECT 0x20
+# define SPC_SCMD_C_BUS_RLSE 0x00
+#define SPC_SCMD_BUSRST 0x10
+#define SPC_SCMD_INTERCEPT_X 0x08
+#define SPC_SCMD_PROGRAMMED_X 0x04
+#define SPC_SCMD_PAD_X 0x01
+
+/*
+ * Transfer mode register (MB87033B/35/36)
+ */
+
+#define SPC_TMOD_SYNC_X 0x80
+#define SPC_TMOD_OFFSET_MASK 0x70
+# define SPC_OFFSET(x) (((x)<<4)&SPC_TMOD_OFFSET_MASK)
+#define SPC_TMOD_PERIOD_MASK 0xc0
+# define SPC_PERIOD(x) (((x)<<2)&SPC_TMOD_PERIOD_MASK)
+#define SPC_TMOD_EXP_COUNTER 0x01
+
+/*
+ * Interrupt cause register
+ */
+
+#define SPC_INTS_SELECTED 0x80
+#define SPC_INTS_RESELECTED 0x40
+#define SPC_INTS_DISC 0x20
+#define SPC_INTS_DONE 0x10
+#define SPC_INTS_BUSREQ 0x08
+#define SPC_INTS_TIMEOUT 0x04
+#define SPC_INTS_ERROR 0x02
+#define SPC_INTS_RESET 0x01
+
+/*
+ * SCSI Bus signals ("phase")
+ */
+
+#define SPC_BUS_REQ 0x80 /* rw */
+#define SPC_BUS_ACK 0x40 /* rw */
+#define SPC_BUS_ATN 0x20 /* ro */
+# define SPC_DIAG_ENBL_XFER 0x20 /* wo */
+#define SPC_BUS_SEL 0x10 /* ro */
+#define SPC_BUS_BSY 0x08 /* rw */
+#define SPC_BUS_MSG 0x04 /* rw */
+#define SPC_BUS_CD 0x02 /* rw */
+#define SPC_BUS_IO 0x01 /* rw */
+
+#define SPC_CUR_PHASE(x) SCSI_PHASE(x)
+
+#define SPC_BSY(r) (r->spc_phase & SPC_BUS_BSY)
+
+/*
+ * Chip status register
+ */
+
+#define SPC_SSTS_INI_CON 0x80
+#define SPC_SSTS_TGT_CON 0x40
+#define SPC_SSTS_BUSY 0x20
+#define SPC_SSTS_XIP 0x10
+#define SPC_SSTS_RST 0x08
+#define SPC_SSTS_TC0 0x04
+#define SPC_SSTS_FIFO_FULL 0x02
+#define SPC_SSTS_FIFO_EMPTY 0x01
+
+/*
+ * Error register
+ */
+
+#define SPC_SERR_SEL 0x80 /* Selected */
+#define SPC_SERR_RSEL 0x40 /* Reselected */
+#define SPC_SERR_DISC 0x20 /* Disconnected */
+#define SPC_SERR_CMDC 0x10 /* Command Complete */
+#define SPC_SERR_SRVQ 0x08 /* Service Required */
+#define SPC_SERR_TIMO 0x04 /* Timeout */
+#define SPC_SERR_HARDERR 0x02 /* SPC Hard Error */
+#define SPC_SERR_RSTC 0x01 /* Reset Condition */
+
+/*
+ * Phase control register
+ *
+ * [use SPC_CUR_PHASE() here too]
+ */
+
+#define SPC_PCTL_BFREE_IE 0x80 /* Bus free (disconnected) */
+#define SPC_PCTL_LST_IE 0x40 /* lost arbit (87033) */
+#define SPC_PCTL_ATN_IE 0x20 /* ATN set (87033) */
+#define SPC_PCTL_RST_DIS 0x10 /* RST asserted */
+
+/*
+ * Modified byte counter register
+ */
+
+#define SPC_MBC_ECNT_MASK 0xf0 /* 87033 only */
+# define SPC_MBC_ECNT_GET(x) (((x)&SPC_MBC_ECNT_MASK)>>4)
+# define SPC_MBC_ECNT_PUT(x) (((x)<<4)&SPC_MBC_ECNT_MASK)
+#define SPC_MBC_MBC_MASK 0x0f
+# define SPC_MBC_GET(x) ((x)&SPC_MBC_MBC_MASK)
+# define SPC_MBC_PUT(x) ((x)&SPC_MBC_MBC_MASK)
+
+/*
+ * Transfer counter register(s)
+ */
+
+#define SPC_TC_PUT(ptr,val) { \
+ (ptr)->spc_tch = (((val)>>16)&0xff); \
+ (ptr)->spc_tcm = (((val)>> 8)&0xff); \
+ (ptr)->spc_tcl = (((val) )&0xff); \
+ }
+
+#define SPC_TC_GET(ptr,val) { \
+ (val) = (((ptr)->spc_tch & 0xff )<<16) |\
+ (((ptr)->spc_tcm 0 0xff )<<8) |\
+ ((ptr)->spc_tcl 0xff);\
+ }
+
+/* 87033 in expanded mode */
+#define SPC_XTC_PUT(ptr,val) { \
+ (ptr)->spc_mbc = SPC_MBC_ECNT_PUT(((val)>>24));\
+ (ptr)->spc_tch = (((val)>>16)&0xff); \
+ (ptr)->spc_tcm = (((val)>> 8)&0xff); \
+ (ptr)->spc_tcl = (((val) )&0xff); \
+ }
+
+#define SPC_XTC_GET(ptr,val) { \
+ (val) = (SPC_MBC_ECNT_GET((ptr)->spc_mbc)<<24)|\
+ (((ptr)->spc_tch)<<16)|(((ptr)->spc_tcm)<<8)|\
+ ((ptr)->spc_tcl);\
+ }
+
diff --git a/scsi/adapters/scsi_89352_hdw.c b/scsi/adapters/scsi_89352_hdw.c
new file mode 100644
index 00000000..5672cb65
--- /dev/null
+++ b/scsi/adapters/scsi_89352_hdw.c
@@ -0,0 +1,2192 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_89352_hdw.c
+ * Author: Daniel Stodolsky, Carnegie Mellon University
+ * Date: 06/91
+ *
+ * Bottom layer of the SCSI driver: chip-dependent functions
+ *
+ * This file contains the code that is specific to the Fujitsu MB89352
+ * SCSI chip (Host Bus Adapter in SCSI parlance): probing, start
+ * operation, and interrupt routine.
+ */
+
+/*
+ * This layer works based on small simple 'scripts' that are installed
+ * at the start of the command and drive the chip to completion.
+ * The idea comes from the specs of the NCR 53C700 'script' processor.
+ *
+ * There are various reasons for this, mainly
+ * - Performance: identify the common (successful) path, and follow it;
+ * at interrupt time no code is needed to find the current status
+ * - Code size: it should be easy to compact common operations
+ * - Adaptability: the code skeleton should adapt to different chips without
+ * terrible complications.
+ * - Error handling: and it is easy to modify the actions performed
+ * by the scripts to cope with strange but well identified sequences
+ *
+ */
+
+/*
+ *
+ *
+ * Known Headaches/Features with this chip.
+ *
+ * (1) After the interrupt raised by select, the phase sense (psns)
+ * and SPC status (ssts) registers do not display the correct values
+ * until the REQ line (via psns) is high. (danner@cs.cmu.edu 6/11/91)
+ *
+ * (2) After a data in phase, the command complete interrupt may be raised
+ * before the psns, ssts, and transfer counter registers settle. The reset
+ * acknowledge or request command should not be issued until they settle.
+ * (danner@cs.cmu.edu 6/14/91)
+ *
+ * (3) In general, an interrupt can be raised before the psns and ssts have
+ * meaningful values. One should wait for the psns to show the REQ bit (0x80)
+ * set before expecting meaningful values, with the exception of (2) above.
+ * Currently this is handled by spc_err_generic ("Late REQ"). (This problem
+ * is really a refinement of (1)). (danner@cs.cmu.edu 6/14/91)
+ *
+ * (4) When issuing a multibyte command after a select with attention,
+ * The chip will automatically drop ATN before sending the last byte of the
+ * message, in accordance with the ANSI SCSI standard. This requires, of course,
+ * the transfer counter be an accurate representation of the amount of data to be
+ * transfered. (danner@cs.cmu.edu 6/14/91)
+ *
+ */
+
+#if 0
+
+#include <platforms.h>
+
+#include <scsi.h>
+
+#if NSCSI > 0
+
+#include <mach/std_types.h>
+#include <sys/types.h>
+#include <chips/busses.h>
+#include <scsi/compat_30.h>
+#include <sys/syslog.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi2.h>
+#include <scsi/scsi_defs.h>
+#include <scsi/adapters/scsi_89352.h>
+
+#include <machine/db_machdep.h> /*4proto*/
+#include <ddb/db_sym.h> /*4proto*/
+
+#ifdef LUNA88K
+#include <luna88k/board.h>
+#define SPC_DEFAULT_ADDRESS (caddr_t) SCSI_ADDR
+#endif
+
+#ifndef SPC_DEFAULT_ADDRESS /* cross compile check */
+#define SPC_DEFAULT_ADDRESS (caddr_t) 0
+#endif
+
+
+/* external/forward declarations */
+int spc_probe(), spc_slave(), spc_attach(), scsi_go();
+void spc_reset(), spc_attempt_selection(), spc_target_intr(), spc_bus_reset();
+/*
+ * Statically allocated command & temp buffers
+ * This way we can attach/detach drives on-fly
+ */
+#define PER_TGT_BUFF_DATA 256
+
+static char spc_buffer[NSCSI * 8 * PER_TGT_BUFF_DATA];
+
+/*
+ * Macros to make certain things a little more readable
+ */
+
+/*
+ wait for the desired phase to appear, but make sure the REQ bit set in the psns
+ (otherwise the values tend to float/be garbage.
+*/
+
+#define SPC_WAIT_PHASE(p) while(((regs->spc_psns & (SPC_BUS_REQ|SCSI_PHASE_MASK))) \
+ != (SPC_BUS_REQ|(p)))
+
+/*
+ wait until a phase different than p appears in the psns. Since it is only valid
+ when the REQ bit is set, don't test unless REQ bit is set. So spin until
+ REQ is high or the phase is not p.
+*/
+
+#define SPC_WAIT_PHASE_VANISH(p) while(1) { int _psns_ = regs->spc_psns; \
+ if ((_psns_ & SPC_BUS_REQ) && (_psns_ & SCSI_PHASE_MASK)!=p) break; }
+
+
+
+/* ?? */
+/* #define SPC_ACK(ptr,phase) (ptr)->spc_pctl = (phase) */
+
+/*
+ * A script has a two parts: a pre-condition and an action.
+ * The first triggers error handling if not satisfied and in
+ * our case it is formed by the current bus phase and connected
+ * condition as per bus status bits. The action part is just a
+ * function pointer, invoked in a standard way. The script
+ * pointer is advanced only if the action routine returns TRUE.
+ * See spc_intr() for how and where this is all done.
+ */
+
+typedef struct script {
+ char condition; /* expected state at interrupt */
+ int (*action)(); /* action routine */
+} *script_t;
+
+#define SCRIPT_MATCH(psns) (SPC_CUR_PHASE((psns))|((psns) & SPC_BUS_BSY))
+
+/* ?? */
+#define SPC_PHASE_DISC 0x0 /* sort of .. */
+
+/* The active script is in the state expected right after the issue of a select */
+
+#define SCRIPT_SELECT(scp) (scp->action == spc_issue_command || \
+ scp->action == spc_issue_ident_and_command)
+
+/* forward decls of script actions */
+boolean_t
+ spc_dosynch(), /* negotiate synch xfer */
+ spc_xfer_in(), /* get data from target via dma */
+ spc_xfer_out(), /* send data to target via dma */
+ spc_get_status(), /* get status from target */
+ spc_end_transaction(), /* all come to an end */
+ spc_msg_in(), /* get disconnect message(s) */
+ spc_issue_command(), /* spit on the bus */
+ spc_issue_ident_and_command(), /* spit on the bus (with ATN) */
+ spc_disconnected(); /* current target disconnected */
+/* forward decls of error handlers */
+boolean_t
+ spc_err_generic(), /* generic error handler */
+ spc_err_disconn(); /* when a target disconnects */
+void gimmeabreak(); /* drop into the debugger */
+
+void spc_reset_scsibus();
+boolean_t spc_probe_target();
+
+scsi_ret_t spc_select_target();
+
+/*
+ * State descriptor for this layer. There is one such structure
+ * per (enabled) 89352 chip
+ */
+struct spc_softc {
+ watchdog_t wd;
+ spc_regmap_t *regs; /* 5380 registers */
+ char *buff; /* scratch buffer memory */
+ char *data_ptr; /* orig/dest memory */
+ script_t script;
+ int (*error_handler)();
+ int in_count; /* amnt we expect to receive */
+ int out_count; /* amnt we are going to ship */
+
+ volatile char state;
+#define SPC_STATE_BUSY 0x01 /* selecting or currently connected */
+#define SPC_STATE_TARGET 0x04 /* currently selected as target */
+#define SPC_STATE_COLLISION 0x08 /* lost selection attempt */
+#define SPC_STATE_DMA_IN 0x10 /* tgt --> initiator xfer */
+
+ unsigned char ntargets; /* how many alive on this scsibus */
+ unsigned char done;
+ unsigned char xxxx;
+
+ scsi_softc_t *sc;
+ target_info_t *active_target;
+
+ target_info_t *next_target; /* trying to seize bus */
+ queue_head_t waiting_targets;/* other targets competing for bus */
+ decl_simple_lock_data(,chiplock) /* Interlock */
+} spc_softc_data[NSCSI];
+
+typedef struct spc_softc *spc_softc_t;
+
+spc_softc_t spc_softc[NSCSI];
+
+/*
+ * Definition of the controller for the auto-configuration program.
+ */
+
+int spc_probe(), scsi_slave(), spc_go();
+void spc_intr();
+void scsi_attach();
+
+vm_offset_t spc_std[NSCSI] = { SPC_DEFAULT_ADDRESS };
+
+struct bus_device *spc_dinfo[NSCSI*8];
+struct bus_ctlr *spc_minfo[NSCSI];
+struct bus_driver spc_driver =
+ { spc_probe, scsi_slave, scsi_attach, spc_go, spc_std, "rz", spc_dinfo,
+ "spc", spc_minfo, BUS_INTR_B4_PROBE};
+
+/*
+ * Scripts
+ */
+
+struct script
+spc_script_data_in[] = {
+ { SCSI_PHASE_CMD|SPC_BUS_BSY, spc_issue_command},
+ { SCSI_PHASE_DATAI|SPC_BUS_BSY, spc_xfer_in},
+ { SCSI_PHASE_STATUS|SPC_BUS_BSY, spc_get_status},
+ { SCSI_PHASE_MSG_IN|SPC_BUS_BSY, spc_end_transaction}
+},
+
+spc_script_late_data_in[] = {
+ { SCSI_PHASE_MSG_OUT|SPC_BUS_BSY, spc_issue_ident_and_command},
+ { SCSI_PHASE_DATAI|SPC_BUS_BSY, spc_xfer_in},
+ { SCSI_PHASE_STATUS|SPC_BUS_BSY, spc_get_status},
+ { SCSI_PHASE_MSG_IN|SPC_BUS_BSY, spc_end_transaction}
+},
+
+spc_script_data_out[] = {
+ { SCSI_PHASE_CMD|SPC_BUS_BSY, spc_issue_command},
+ { SCSI_PHASE_DATAO|SPC_BUS_BSY, spc_xfer_out},
+ { SCSI_PHASE_STATUS|SPC_BUS_BSY, spc_get_status},
+ { SCSI_PHASE_MSG_IN|SPC_BUS_BSY, spc_end_transaction}
+},
+
+
+spc_script_late_data_out[] = {
+ { SCSI_PHASE_MSG_OUT|SPC_BUS_BSY, spc_issue_ident_and_command},
+ { SCSI_PHASE_DATAO|SPC_BUS_BSY, spc_xfer_out},
+ { SCSI_PHASE_STATUS|SPC_BUS_BSY, spc_get_status},
+ { SCSI_PHASE_MSG_IN|SPC_BUS_BSY, spc_end_transaction}
+},
+
+
+spc_script_cmd[] = {
+ { SCSI_PHASE_CMD|SPC_BUS_BSY, spc_issue_command},
+ { SCSI_PHASE_STATUS|SPC_BUS_BSY, spc_get_status},
+ { SCSI_PHASE_MSG_IN|SPC_BUS_BSY, spc_end_transaction}
+},
+
+spc_script_late_cmd[] = {
+ { SCSI_PHASE_MSG_OUT|SPC_BUS_BSY, spc_issue_ident_and_command},
+ { SCSI_PHASE_STATUS|SPC_BUS_BSY, spc_get_status},
+ { SCSI_PHASE_MSG_IN|SPC_BUS_BSY, spc_end_transaction}
+},
+
+/* Synchronous transfer neg(oti)ation */
+
+spc_script_try_synch[] = {
+ { SCSI_PHASE_MSG_OUT|SPC_BUS_BSY, spc_dosynch}
+},
+
+/* Disconnect sequence */
+
+spc_script_disconnect[] = {
+ { SPC_PHASE_DISC, spc_disconnected}
+};
+
+
+
+#define u_min(a,b) (((a) < (b)) ? (a) : (b))
+
+
+#define DEBUG
+#ifdef DEBUG
+
+int spc_state(base)
+ vm_offset_t base;
+{
+ register spc_regmap_t *regs;
+
+ if (base == 0)
+ base = (vm_offset_t) SPC_DEFAULT_ADDRESS;
+
+ regs = (spc_regmap_t*) (base);
+
+ db_printf("spc_bdid (bus device #): %x\n",regs->spc_bdid);
+ db_printf("spc_sctl (spc internal control): %x\n",regs->spc_sctl);
+ db_printf("spc_scmd (scp command): %x\n",regs->spc_scmd);
+ db_printf("spc_ints (spc interrupt): %x\n",regs->spc_ints);
+ db_printf("spc_psns (scsi bus phase): %x\n",regs->spc_psns);
+ db_printf("spc_ssts (spc internal status): %x\n",regs->spc_ssts);
+ db_printf("spc_serr (spc internal err stat): %x\n",regs->spc_serr);
+ db_printf("spc_pctl (scsi transfer phase): %x\n",regs->spc_pctl);
+ db_printf("spc_mbc (spc transfer data ct): %x\n",regs->spc_mbc);
+/* db_printf("spc_dreg (spc transfer data r/w): %x\n",regs->spc_dreg);*/
+ db_printf("spc_temp (scsi data bus control): %x\n",regs->spc_temp);
+ db_printf("spc_tch (transfer byte ct (MSB): %x\n",regs->spc_tch);
+ db_printf("spc_tcm (transfer byte ct (2nd): %x\n",regs->spc_tcm);
+ db_printf("spc_tcl (transfer byte ct (LSB): %x\n",regs->spc_tcl);
+
+ return 0;
+}
+
+int spc_target_state(tgt)
+ target_info_t *tgt;
+{
+ if (tgt == 0)
+ tgt = spc_softc[0]->active_target;
+ if (tgt == 0)
+ return 0;
+ db_printf("fl %x dma %x+%x cmd %x id %x per %x off %x ior %x ret %x\n",
+ tgt->flags, tgt->dma_ptr, tgt->transient_state.dma_offset,
+ tgt->cmd_ptr, tgt->target_id, tgt->sync_period, tgt->sync_offset,
+ tgt->ior, tgt->done);
+ if (tgt->flags & TGT_DISCONNECTED){
+ script_t spt;
+
+ spt = tgt->transient_state.script;
+ db_printf("disconnected at ");
+ db_printsym((db_expr_t)spt,1);
+ db_printf(": %x ", spt->condition);
+ db_printsym((db_expr_t)spt->action,1);
+ db_printf(", ");
+ db_printsym((db_expr_t)tgt->transient_state.handler, 1);
+ db_printf("\n");
+ }
+
+ return 0;
+}
+
+void spc_all_targets(unit)
+int unit;
+{
+ int i;
+ target_info_t *tgt;
+ for (i = 0; i < 8; i++) {
+ tgt = spc_softc[unit]->sc->target[i];
+ if (tgt)
+ spc_target_state(tgt);
+ }
+}
+
+int spc_script_state(unit)
+int unit;
+{
+ script_t spt = spc_softc[unit]->script;
+
+ if (spt == 0) return 0;
+ db_printsym((db_expr_t)spt,1);
+ db_printf(": %x ", spt->condition);
+ db_printsym((db_expr_t)spt->action,1);
+ db_printf(", ");
+ db_printsym((db_expr_t)spc_softc[unit]->error_handler, 1);
+ return 0;
+}
+
+#define PRINT(x) if (scsi_debug) printf x
+
+#define TRMAX 200
+int tr[TRMAX+3];
+int trpt, trpthi;
+#define TR(x) tr[trpt++] = x
+#define TRWRAP trpthi = trpt; trpt = 0;
+#define TRCHECK if (trpt > TRMAX) {TRWRAP}
+
+#define TRACE
+
+#ifdef TRACE
+
+#define LOGSIZE 256
+int spc_logpt;
+int spc_log[LOGSIZE];
+
+#define MAXLOG_VALUE 0x30
+struct {
+ char *name;
+ unsigned int count;
+} logtbl[MAXLOG_VALUE];
+
+static void LOG(e,f)
+ int e;
+ char *f;
+{
+ spc_log[spc_logpt++] = (e);
+ if (spc_logpt == LOGSIZE) spc_logpt = 0;
+ if ((e) < MAXLOG_VALUE) {
+ logtbl[(e)].name = (f);
+ logtbl[(e)].count++;
+ }
+}
+
+int spc_print_log(skip)
+ int skip;
+{
+ register int i, j;
+ register unsigned int c;
+
+ for (i = 0, j = spc_logpt; i < LOGSIZE; i++) {
+ c = spc_log[j];
+ if (++j == LOGSIZE) j = 0;
+ if (skip-- > 0)
+ continue;
+ if (c < MAXLOG_VALUE)
+ db_printf(" %s", logtbl[c].name);
+ else
+ db_printf("-0x%x", c - 0x80);
+ }
+ db_printf("\n");
+ return 0;
+}
+
+void spc_print_stat()
+{
+ register int i;
+ register char *p;
+ for (i = 0; i < MAXLOG_VALUE; i++) {
+ if (p = logtbl[i].name)
+ printf("%d %s\n", logtbl[i].count, p);
+ }
+}
+
+#else /* TRACE */
+#define LOG(e,f)
+#endif /* TRACE */
+
+#else /* DEBUG */
+#define PRINT(x)
+#define LOG(e,f)
+#define TR(x)
+#define TRCHECK
+#define TRWRAP
+#endif /* DEBUG */
+
+
+/*
+ * Probe/Slave/Attach functions
+ */
+
+/*
+ * Probe routine:
+ * Should find out (a) if the controller is
+ * present and (b) which/where slaves are present.
+ *
+ * Implementation:
+ * Send an identify msg to each possible target on the bus
+ * except of course ourselves.
+ */
+int spc_probe(reg, ui)
+ char *reg;
+ struct bus_ctlr *ui;
+{
+ int tmp;
+ int unit = ui->unit;
+ spc_softc_t spc = &spc_softc_data[unit];
+ int target_id, i;
+ scsi_softc_t *sc;
+ register spc_regmap_t *regs;
+ int s;
+ boolean_t did_banner = FALSE;
+ char *cmd_ptr;
+
+ /*
+ * We are only called if the chip is there,
+ * but make sure anyways..
+ */
+ regs = (spc_regmap_t *) (reg);
+ if (check_memory((unsigned)regs, 0))
+ return 0;
+
+#if notyet
+ /* Mappable version side */
+ SPC_probe(reg, ui);
+#endif
+
+ /*
+ * Initialize hw descriptor
+ */
+ spc_softc[unit] = spc;
+ spc->regs = regs;
+ spc->buff = spc_buffer;
+
+ queue_init(&spc->waiting_targets);
+
+ simple_lock_init(&spc->chiplock);
+
+ sc = scsi_master_alloc(unit, (char*)spc);
+ spc->sc = sc;
+
+ sc->go = spc_go;
+ sc->probe = spc_probe_target;
+ sc->watchdog = scsi_watchdog;
+ spc->wd.reset = spc_reset_scsibus;
+
+#ifdef MACH_KERNEL
+ sc->max_dma_data = -1; /* unlimited */
+#else
+ sc->max_dma_data = scsi_per_target_virtual;
+#endif
+
+ scsi_might_disconnect[unit] = 0; /* XXX for now */
+
+ /*
+ * Reset chip
+ */
+ s = splbio();
+ spc_reset(regs, TRUE);
+ tmp = regs->spc_ints = regs->spc_ints;
+
+ /*
+ * Our SCSI id on the bus.
+ */
+
+ sc->initiator_id = bdid_to_id(regs->spc_bdid);
+ printf("%s%d: my SCSI id is %d", ui->name, unit, sc->initiator_id);
+
+ /*
+ * For all possible targets, see if there is one and allocate
+ * a descriptor for it if it is there.
+ */
+ cmd_ptr = spc_buffer;
+ for (target_id = 0; target_id < 8; target_id++, cmd_ptr += PER_TGT_BUFF_DATA) {
+
+ register unsigned csr, ints;
+ scsi_status_byte_t status;
+
+ /* except of course ourselves */
+ if (target_id == sc->initiator_id)
+ continue;
+
+ if (spc_select_target( regs, sc->initiator_id, target_id, FALSE)
+ == SCSI_RET_DEVICE_DOWN) {
+ tmp = regs->spc_ints = regs->spc_ints;
+ continue;
+ }
+
+ printf(",%s%d", did_banner++ ? " " : " target(s) at ",
+ target_id);
+
+ /* should be command phase here: we selected wo ATN! */
+ SPC_WAIT_PHASE(SCSI_PHASE_CMD);
+
+ SPC_ACK(regs,SCSI_PHASE_CMD);
+
+ /* build command in buffer */
+ {
+ unsigned char *p = (unsigned char*) cmd_ptr;
+
+ p[0] = SCSI_CMD_TEST_UNIT_READY;
+ p[1] =
+ p[2] =
+ p[3] =
+ p[4] =
+ p[5] = 0;
+ }
+
+ spc_data_out(regs, SCSI_PHASE_CMD, 6, cmd_ptr);
+
+ SPC_WAIT_PHASE(SCSI_PHASE_STATUS);
+
+ /* should have recieved a Command Complete Interrupt */
+ while (!(regs->spc_ints))
+ delay(1);
+ ints = regs->spc_ints;
+ if (ints != (SPC_INTS_DONE))
+ gimmeabreak();
+ regs->spc_ints = ints;
+
+ SPC_ACK(regs,SCSI_PHASE_STATUS);
+
+ csr = spc_data_in(regs, SCSI_PHASE_STATUS, 1, &status.bits);
+ LOG(0x25,"din_count");
+ LOG(0x80+csr,0);
+
+ if (status.st.scsi_status_code != SCSI_ST_GOOD)
+ scsi_error( 0, SCSI_ERR_STATUS, status.bits, 0);
+
+ /* expect command complete interupt */
+ while (!(regs->spc_ints & SPC_INTS_DONE))
+ delay(1);
+
+ /* clear all intr bits */
+ tmp = regs->spc_ints;
+ LOG(0x26,"ints");
+ LOG(0x80+tmp,0);
+ regs->spc_ints = SPC_INTS_DONE;
+
+ /* get cmd_complete message */
+ SPC_WAIT_PHASE(SCSI_PHASE_MSG_IN);
+
+ SPC_ACK(regs,SCSI_PHASE_MSG_IN);
+
+ csr = spc_data_in(regs,SCSI_PHASE_MSG_IN, 1,(unsigned char*)&i);
+ LOG(0x25,"din_count");
+ LOG(0x80+csr,0);
+
+ while (!(regs->spc_ints & SPC_INTS_DONE))
+ delay(1);
+
+ /* clear all done intr */
+ tmp = regs->spc_ints;
+ LOG(0x26,"ints");
+ LOG(0x80+tmp,0);
+ regs->spc_ints = SPC_INTS_DONE;
+
+ SPC_ACK(regs,SPC_PHASE_DISC);
+
+ /* release the bus */
+ regs->spc_pctl = ~SPC_PCTL_BFREE_IE & SPC_PHASE_DISC;
+ /* regs->spc_scmd = 0; only in TARGET mode */
+
+ /* wait for disconnected interrupt */
+ while (!(regs->spc_ints & SPC_INTS_DISC))
+ delay(1);
+
+ tmp = regs->spc_ints;
+ LOG(0x26,"ints");
+ LOG(0x80+tmp,0);
+ regs->spc_ints = tmp;
+ LOG(0x29,"Probed\n");
+
+ /*
+ * Found a target
+ */
+ spc->ntargets++;
+ {
+ register target_info_t *tgt;
+
+ tgt = scsi_slave_alloc(unit, target_id, (char*)spc);
+
+ /* "virtual" address for our use */
+ tgt->cmd_ptr = cmd_ptr;
+ /* "physical" address for dma engine (??) */
+ tgt->dma_ptr = 0;
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+ fdma_init(&tgt->fdma, scsi_per_target_virtual);
+#endif /*MACH_KERNEL*/
+ }
+ }
+ printf(".\n");
+
+ splx(s);
+ return 1;
+}
+
+boolean_t
+spc_probe_target(tgt, ior)
+ target_info_t *tgt;
+ io_req_t ior;
+{
+ boolean_t newlywed;
+
+ newlywed = (tgt->cmd_ptr == 0);
+ if (newlywed) {
+ /* desc was allocated afresh */
+
+ /* "virtual" address for our use */
+ tgt->cmd_ptr = &spc_buffer[PER_TGT_BUFF_DATA*tgt->target_id +
+ (tgt->masterno*8*PER_TGT_BUFF_DATA) ];
+ /* "physical" address for dma engine */
+ tgt->dma_ptr = 0;
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+ fdma_init(&tgt->fdma, scsi_per_target_virtual);
+#endif /*MACH_KERNEL*/
+
+ }
+
+ if (scsi_inquiry(tgt, SCSI_INQ_STD_DATA) == SCSI_RET_DEVICE_DOWN)
+ return FALSE;
+
+ tgt->flags = TGT_ALIVE;
+ return TRUE;
+}
+
+int bdid_to_id(bdid)
+ register int bdid;
+{
+ register int i;
+ for (i = 0; i < 8; i++)
+ if (bdid == (1 << i)) break;
+ return i;
+}
+
+scsi_ret_t
+spc_select_target(regs, myid, id, with_atn)
+ register spc_regmap_t *regs;
+ unsigned myid, id;
+ boolean_t with_atn;
+{
+ scsi_ret_t ret = SCSI_RET_RETRY;
+ int mask;
+
+ if ((regs->spc_phase & (SPC_BUS_BSY|SPC_BUS_SEL))
+#ifdef MIPS
+ && (regs->spc_phase & (SPC_BUS_BSY|SPC_BUS_SEL))
+ && (regs->spc_phase & (SPC_BUS_BSY|SPC_BUS_SEL))
+#endif
+ )
+ return ret;
+
+ /* setup for for select:
+
+#if 0
+ (1) Toggle the Enable transfer bit (turning on the chips
+ SCSI bus drivers).
+#endif
+ (2) Enable arbitration, parity, reselect display, but
+ disable interrupt generation to the CPU (we are polling).
+ (3) Disable the bus free interrupt and set I/O direction
+ (4) If doing a select with attention, write the Set attention command.
+ Then delay 1 microsecond to avoid command races.
+
+ (5) Temp register gets 1<<target | 1<<initiator ids
+ (6) Timeout clocked into transfer registers
+ (7) Drive select (and optionally attention) onto the bus
+ (8) Wait 1/4 second for timeout.
+ */
+
+#if 0
+ regs->spc_psns = SPC_DIAG_ENBL_XFER; /* (1) */
+#endif
+
+ regs->spc_sctl = SPC_SCTL_ARB_EBL|
+ SPC_SCTL_PAR_EBL|
+ SPC_SCTL_RSEL_EBL; /* (2) */
+
+
+
+ mask = ~SPC_PCTL_BFREE_IE & regs->spc_pctl;
+ mask &= ~1; /* set I/O direction to be out */
+
+ regs->spc_pctl = mask; /* (3) */
+
+ if (with_atn)
+ {
+ regs->spc_scmd = SPC_SCMD_C_ATN_S; /* (4) */
+ delay(1);
+ }
+
+ regs->spc_temp = (1<<myid) | (1<<id); /* (5) */
+
+ SPC_TC_PUT(regs,0xfa004); /* (6) */
+
+ regs->spc_scmd = (SPC_SCMD_C_SELECT | SPC_SCMD_PROGRAMMED_X); /* (7) */
+
+ {
+ int count = 2500;
+
+ /* wait for an interrupt */
+ while ((regs->spc_ints)==0)
+ {
+ if (--count > 0)
+ delay(100);
+ else
+ {
+ goto nodev;
+ }
+ }
+
+ count = regs->spc_ints;
+ if (count & SPC_INTS_TIMEOUT)
+ {
+ /* sanity check. The ssts should have the busy bit set */
+ if (regs->spc_ssts & SPC_SSTS_BUSY)
+ goto nodev;
+ else
+ panic("spc_select_target: timeout");
+ }
+
+ /* otherwise, we should have received a
+ command complete interrupt */
+
+ if (count & ~SPC_INTS_DONE)
+ panic("spc_select_target");
+
+ } /* (8) */
+
+ /* we got a response - now connected; bus is in COMMAND phase */
+
+ regs->spc_ints = regs->spc_ints;
+ /* regs->spc_scmd = 0; target only */
+ return SCSI_RET_SUCCESS;
+nodev:
+ SPC_TC_PUT(regs,0); /* play it safe */
+ regs->spc_ints = regs->spc_ints;
+ /* regs->spc_scmd = 0; target only */
+ ret = SCSI_RET_DEVICE_DOWN;
+ return ret;
+}
+
+int spc_data_out(regs, phase, count, data)
+ int phase, count;
+ register spc_regmap_t *regs;
+ unsigned char *data;
+{
+ /* This is the one that sends data out. returns how many
+ bytes it did NOT xfer: */
+
+ if (SPC_CUR_PHASE(regs->spc_phase) != phase)
+ return count;
+
+ /* check that the fifo is empty. If not, cry */
+ if (!(regs->spc_ssts & SPC_SSTS_FIFO_EMPTY))
+ panic("spc_data_out: junk in fifo\n");
+
+ SPC_TC_PUT(regs,count);
+ regs->spc_scmd = SPC_SCMD_C_XFER | SPC_SCMD_PROGRAMMED_X;
+
+ /* wait for the SPC to start processing the command */
+ while ((regs->spc_ssts & (SPC_SSTS_INI_CON|SPC_SSTS_TGT_CON|SPC_SSTS_BUSY|SPC_SSTS_XIP))
+ != (SPC_SSTS_INI_CON|SPC_SSTS_BUSY|SPC_SSTS_XIP))
+ delay(1);
+
+ /* shovel out the data */
+
+ while (count)
+ {
+ /* check if interrupt is pending */
+ int ints = regs->spc_ints;
+ int ssts;
+
+ if (ints) /* something has gone wrong */
+ break;
+
+ ssts = regs->spc_ssts;
+ if (ssts & SPC_SSTS_FIFO_FULL) /* full fifo - can't write */
+ delay(1);
+ else
+ { /* spit out a byte */
+ regs->spc_dreg = *data;
+ data++;
+ count--;
+ }
+ }
+
+
+ if (count != 0)
+ {
+ /* need some sort of fifo cleanup if failed */
+ gimmeabreak(); /* Bytes stranded in the fifo */
+ }
+
+ return count;
+}
+
+int spc_data_in(regs, phase, count, data)
+ int phase, count;
+ register spc_regmap_t *regs;
+ unsigned char *data;
+{
+ if (SPC_CUR_PHASE(regs->spc_phase) != phase)
+ return count;
+
+ SPC_TC_PUT(regs,count);
+ regs->spc_scmd = SPC_SCMD_C_XFER | SPC_SCMD_PROGRAMMED_X;
+
+ /* The Fujistu code sample suggests waiting for the top nibble of the SSTS to
+ become 0xb (ssts & 0xf0) = 0xb. This state, however is transient. If the
+ message is short (say , 1 byte), it can get sucked into the fifo before
+ we ever get to look at the state. So instead, we are going to wait for
+ the fifo to become nonempty.
+ */
+
+ while ((regs->spc_ssts & SPC_SSTS_FIFO_EMPTY))
+ delay(1);
+
+ while (count)
+ {
+ int ints = regs->spc_ints;
+ int ssts;
+
+ /* If there is an interrupt pending besides command complete or
+ phase mismatch, give up */
+
+ if (ints & ~(SPC_INTS_DONE|SPC_INTS_BUSREQ))
+ break;
+
+ /* see if there is any data in the fifo */
+ ssts = regs->spc_ssts;
+ if ((ssts & SPC_SSTS_FIFO_EMPTY) == 0)
+ {
+ *data = regs->spc_dreg;
+ data++;
+ count--;
+ continue;
+ }
+
+ /* if empty, check if phase has changed */
+ if (SPC_CUR_PHASE(regs->spc_phase) != phase)
+ break;
+
+ }
+
+ if ((count==0) && (phase == SCSI_PHASE_MSG_IN))
+ {
+ while (!(regs->spc_ints & SPC_INTS_DONE))
+ delay(1);
+
+ /*
+ So the command complete interrupt has arrived. Now check that the
+ other two conditions we expect - The psns to be in ack|busy|message_in phase
+ and ssts to indicate connected|xfer in progress|busy|xfer counter 0|empty fifo
+ are true.
+ */
+ while (1)
+ {
+ register int psns = regs->spc_psns;
+ register int ssts = regs->spc_ssts;
+ register int sscon = ssts & (SPC_SSTS_INI_CON | SPC_SSTS_TGT_CON);
+ register int ssncon = ssts & ~(SPC_SSTS_INI_CON | SPC_SSTS_TGT_CON);
+
+ if (psns == (SPC_BUS_ACK | SPC_BUS_BSY | SCSI_PHASE_MSG_IN) &&
+ ssncon == (SPC_SSTS_BUSY | SPC_SSTS_XIP | SPC_SSTS_TC0 | SPC_SSTS_FIFO_EMPTY) &&
+ sscon)
+ break;
+ }
+
+ regs->spc_scmd = SPC_SCMD_C_ACKREQ_C;
+ }
+
+ return count;
+}
+
+void spc_reset(regs, quickly)
+ register spc_regmap_t *regs;
+ boolean_t quickly;
+{
+ register char myid;
+
+ /* save our id across reset */
+ myid = bdid_to_id(regs->spc_bdid);
+
+ /* wait for Reset In signal to go low */
+ while (regs->spc_ssts & SPC_SSTS_RST)
+ delay(1);
+
+ /* reset chip */
+ regs->spc_sctl = SPC_SCTL_RESET;
+ delay(25);
+
+ regs->spc_myid = myid;
+ regs->spc_sctl = SPC_SCTL_ARB_EBL|SPC_SCTL_PAR_EBL|SPC_SCTL_SEL_EBL|
+ SPC_SCTL_RSEL_EBL|SPC_SCTL_IE;
+ regs->spc_scmd = SPC_SCMD_C_BUS_RLSE;
+ /* regs->spc_tmod = 0; - SANDRO ? */
+ regs->spc_ints = 0xff;/* clear off any pending */
+#if 0
+ regs->spc_pctl = SPC_PCTL_LST_IE; /* useful only on 87033 */
+#else
+ regs->spc_pctl = 0;
+#endif
+ regs->spc_mbc = 0;
+ SPC_TC_PUT(regs,0);
+
+ if (quickly)
+ return;
+
+ /*
+ * reset the scsi bus, the interrupt routine does the rest
+ * or you can call spc_bus_reset().
+ */
+ regs->spc_scmd = SPC_SCMD_BUSRST|SPC_SCMD_C_STOP_X;/*?*/
+}
+
+/*
+ * Operational functions
+ */
+
+/*
+ * Start a SCSI command on a target
+ */
+spc_go(tgt, cmd_count, in_count, cmd_only)
+ int cmd_count, in_count;
+ target_info_t *tgt;
+ boolean_t cmd_only;
+{
+ spc_softc_t spc;
+ register int s;
+ boolean_t disconn;
+ script_t scp;
+ boolean_t (*handler)();
+ int late;
+
+ LOG(1,"\n\tgo");
+
+ spc = (spc_softc_t)tgt->hw_state;
+
+ /*
+ * We cannot do real DMA.
+ */
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+ if (tgt->ior)
+ fdma_map(&tgt->fdma, tgt->ior);
+#endif /*MACH_KERNEL*/
+
+ if ((tgt->cur_cmd == SCSI_CMD_WRITE) ||
+ (tgt->cur_cmd == SCSI_CMD_LONG_WRITE)){
+ io_req_t ior = tgt->ior;
+ register int len = ior->io_count;
+
+ tgt->transient_state.out_count = len;
+ tgt->transient_state.copy_count = 0;
+
+ if (len < tgt->block_size) {
+ gimmeabreak();
+
+ /* avoid leaks */
+#if 0
+you`ll have to special case this
+#endif
+ tgt->transient_state.out_count = tgt->block_size;
+ }
+ } else {
+ tgt->transient_state.out_count = 0;
+ tgt->transient_state.copy_count = 0;
+ }
+
+ tgt->transient_state.cmd_count = cmd_count;
+
+ disconn =
+ BGET(scsi_might_disconnect,(unsigned)tgt->masterno, tgt->target_id);
+ disconn = disconn && (spc->ntargets > 1);
+ disconn |=
+ BGET(scsi_should_disconnect,(unsigned)tgt->masterno, tgt->target_id);
+
+ /*
+ * Setup target state
+ */
+ tgt->done = SCSI_RET_IN_PROGRESS;
+
+ handler = (disconn) ? spc_err_disconn : spc_err_generic;
+
+ /* determine wether or not to use the late forms of the scripts */
+ late = cmd_only ? FALSE : (tgt->flags & TGT_DID_SYNCH);
+
+ switch (tgt->cur_cmd) {
+ case SCSI_CMD_READ:
+ case SCSI_CMD_LONG_READ:
+ LOG(0x13,"readop");
+ scp = late ? spc_script_late_data_in : spc_script_data_in;
+ break;
+ case SCSI_CMD_WRITE:
+ case SCSI_CMD_LONG_WRITE:
+ LOG(0x14,"writeop");
+ scp = late ? spc_script_late_data_out : spc_script_data_out;
+ break;
+ case SCSI_CMD_INQUIRY:
+ /* This is likely the first thing out:
+ do the synch neg if so */
+ if (!cmd_only && ((tgt->flags&TGT_DID_SYNCH)==0)) {
+ scp = spc_script_try_synch;
+ tgt->flags |= TGT_TRY_SYNCH;
+ break;
+ }
+ case SCSI_CMD_REQUEST_SENSE:
+ case SCSI_CMD_MODE_SENSE:
+ case SCSI_CMD_RECEIVE_DIAG_RESULTS:
+ case SCSI_CMD_READ_CAPACITY:
+ case SCSI_CMD_READ_BLOCK_LIMITS:
+ case SCSI_CMD_READ_TOC:
+ case SCSI_CMD_READ_SUBCH:
+ case SCSI_CMD_READ_HEADER:
+ case 0xc4: /* despised: SCSI_CMD_DEC_PLAYBACK_STATUS */
+ case 0xc6: /* despised: SCSI_CMD_TOSHIBA_READ_SUBCH_Q */
+ case 0xc7: /* despised: SCSI_CMD_TOSHIBA_READ_TOC_ENTRY */
+ case 0xdd: /* despised: SCSI_CMD_NEC_READ_SUBCH_Q */
+ case 0xde: /* despised: SCSI_CMD_NEC_READ_TOC */
+ scp = late ? spc_script_late_data_in : spc_script_data_in;
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ break;
+ case SCSI_CMD_MODE_SELECT:
+ case SCSI_CMD_REASSIGN_BLOCKS:
+ case SCSI_CMD_FORMAT_UNIT:
+ case 0xc9: /* vendor-spec: SCSI_CMD_DEC_PLAYBACK_CONTROL */
+ tgt->transient_state.cmd_count = sizeof_scsi_command(tgt->cur_cmd);
+ tgt->transient_state.out_count =
+ cmd_count - tgt->transient_state.cmd_count;
+ scp = late ? spc_script_late_data_out : spc_script_data_out;
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ break;
+ case SCSI_CMD_TEST_UNIT_READY:
+ /*
+ * Do the synch negotiation here, unless prohibited
+ * or done already
+ */
+ if (tgt->flags & TGT_DID_SYNCH) {
+ scp = late ? spc_script_late_cmd : spc_script_cmd;
+ } else {
+ scp = spc_script_try_synch;
+ tgt->flags |= TGT_TRY_SYNCH;
+ cmd_only = FALSE;
+ }
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ break;
+ default:
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ scp = late ? spc_script_late_cmd : spc_script_cmd;
+ }
+
+ tgt->transient_state.script = scp;
+ tgt->transient_state.handler = handler;
+ tgt->transient_state.identify = (cmd_only) ? 0xff :
+ (disconn ? SCSI_IDENTIFY|SCSI_IFY_ENABLE_DISCONNECT :
+ SCSI_IDENTIFY);
+
+ if (in_count)
+ tgt->transient_state.in_count =
+ (in_count < tgt->block_size) ? tgt->block_size : in_count;
+ else
+ tgt->transient_state.in_count = 0;
+ tgt->transient_state.dma_offset = 0;
+
+ /*
+ * See if another target is currently selected on
+ * this SCSI bus, e.g. lock the spc structure.
+ * Note that it is the strategy routine's job
+ * to serialize ops on the same target as appropriate.
+ */
+#if 0
+locking code here
+#endif
+ s = splbio();
+
+ if (spc->wd.nactive++ == 0)
+ spc->wd.watchdog_state = SCSI_WD_ACTIVE;
+
+ if (spc->state & SPC_STATE_BUSY) {
+ /*
+ * Queue up this target, note that this takes care
+ * of proper FIFO scheduling of the scsi-bus.
+ */
+ LOG(3,"enqueue");
+ enqueue_tail(&spc->waiting_targets, (queue_entry_t) tgt);
+ } else {
+ /*
+ * It is down to at most two contenders now,
+ * we will treat reconnections same as selections
+ * and let the scsi-bus arbitration process decide.
+ */
+ spc->state |= SPC_STATE_BUSY;
+ spc->next_target = tgt;
+ spc_attempt_selection(spc);
+ /*
+ * Note that we might still lose arbitration..
+ */
+ }
+ splx(s);
+}
+
+void spc_attempt_selection(spc)
+ spc_softc_t spc;
+{
+ target_info_t *tgt;
+ spc_regmap_t *regs;
+ register int cmd;
+ int atn=0;
+
+ /* This is about your select code */
+
+ regs = spc->regs;
+ tgt = spc->next_target;
+
+ LOG(4,"select");
+ LOG(0x80+tgt->target_id,0);
+
+ /*
+ * Init bus state variables and set registers.
+ */
+ spc->active_target = tgt;
+
+ /* reselection pending ? */
+ if ((regs->spc_phase & (SPC_BUS_BSY|SPC_BUS_SEL))
+#ifdef MIPS
+ && (regs->spc_phase & (SPC_BUS_BSY|SPC_BUS_SEL))
+ && (regs->spc_phase & (SPC_BUS_BSY|SPC_BUS_SEL))
+#endif
+ )
+ return;
+
+ spc->script = tgt->transient_state.script;
+ spc->error_handler = tgt->transient_state.handler;
+ spc->done = SCSI_RET_IN_PROGRESS;
+
+ spc->in_count = 0;
+ spc->out_count = 0;
+
+ cmd = SPC_SCMD_C_SELECT | SPC_SCMD_PROGRAMMED_X;
+ if (tgt->flags & TGT_DID_SYNCH)
+ {
+ if (tgt->transient_state.identify != 0xff)
+ atn = 1;
+ }
+ else
+ if (tgt->flags & TGT_TRY_SYNCH)
+ atn = 1;
+
+#if 0
+ regs->spc_psns = SPC_DIAG_ENBL_XFER;
+#endif
+
+ regs->spc_sctl = SPC_SCTL_ARB_EBL | SPC_SCTL_PAR_EBL |
+ SPC_SCTL_RSEL_EBL | SPC_SCTL_IE;
+
+
+ { int mask;
+ mask = ~SPC_PCTL_BFREE_IE & regs->spc_pctl;
+ regs->spc_pctl = mask;
+ }
+
+ regs->spc_temp = (1<<(spc->sc->initiator_id)) | (1<<(tgt->target_id));
+
+ SPC_TC_PUT(regs,0xfa004);
+
+ if (atn)
+ {
+ regs->spc_scmd = SPC_SCMD_C_ATN_S;
+ /* delay 1us to avoid races */
+ delay(1);
+ }
+
+ regs->spc_scmd = cmd;
+ return;
+}
+
+/*
+ * Interrupt routine
+ * Take interrupts from the chip
+ *
+ * Implementation:
+ * Move along the current command's script if
+ * all is well, invoke error handler if not.
+ */
+void spc_intr(unit)
+int unit;
+{
+ register spc_softc_t spc;
+ register script_t scp;
+ register unsigned ints, psns, ssts;
+ register spc_regmap_t *regs;
+ boolean_t try_match;
+#if notyet
+ extern boolean_t rz_use_mapped_interface;
+
+ if (rz_use_mapped_interface)
+ {
+ SPC_intr(unit);
+ return;
+ }
+#endif
+
+ spc = spc_softc[unit];
+ regs = spc->regs;
+
+ /* read the interrupt status register */
+ ints = regs->spc_ints;
+
+ LOG(5,"\n\tintr");
+ LOG(0x80+ints,0);
+
+TR(ints);
+TRCHECK;
+
+ if (ints & SPC_INTS_RESET)
+ {
+ /* does its own interrupt reset when ready */
+ spc_bus_reset(spc);
+ return;
+ }
+
+ /* we got an interrupt allright */
+ if (spc->active_target)
+ spc->wd.watchdog_state = SCSI_WD_ACTIVE;
+
+
+ if (ints == 0)
+ { /* no obvious cause */
+ LOG(2,"SPURIOUS");
+ gimmeabreak();
+ return;
+ }
+
+
+ /* reset the interrupt */
+ regs->spc_ints = ints;
+
+ /* go get the phase, and status. We can't trust the
+ phase until REQ is asserted in the psns. Only do
+ this is we received a command complete or service
+ required interrupt. Otherwise, just read them once
+ and trust. */
+
+
+
+ if (ints & (SPC_INTS_DONE|SPC_INTS_BUSREQ))
+ while(1)
+ {
+ psns = regs->spc_psns;
+ if (psns & SPC_BUS_REQ)
+ break;
+ delay(1); /* don't hog the bus */
+ }
+ else
+ psns = regs->spc_psns;
+
+ ssts = regs->spc_psns;
+
+TR(psns);
+TR(ssts);
+TRCHECK;
+
+ if ((spc->state & SPC_STATE_TARGET) ||
+ (ints & SPC_INTS_SELECTED))
+ spc_target_intr(spc /**, ints, psns, ssts **/);
+
+ scp = spc->script;
+
+ if ((scp == 0) || (ints & SPC_INTS_RESELECTED))
+ {
+ gimmeabreak();
+ spc_reconnect(spc, ints, psns, ssts);
+ return;
+ }
+
+ if (SCRIPT_MATCH(psns) != scp->condition) {
+ if (try_match = (*spc->error_handler)(spc, ints, psns, ssts)) {
+ psns = regs->spc_psns;
+ ssts = regs->spc_ssts;
+ }
+ } else
+ try_match = TRUE;
+
+
+ /* might have been side effected */
+ scp = spc->script;
+
+ if (try_match && (SCRIPT_MATCH(psns) == scp->condition)) {
+ /*
+ * Perform the appropriate operation,
+ * then proceed
+ */
+ if ((*scp->action)(spc, ints, psns, ssts)) {
+ /* might have been side effected */
+ scp = spc->script;
+ spc->script = scp + 1;
+ }
+ }
+}
+
+void spc_target_intr(spc)
+ register spc_softc_t spc;
+{
+ panic("SPC: TARGET MODE !!!\n");
+}
+
+/*
+ * All the many little things that the interrupt
+ * routine might switch to
+ */
+boolean_t
+spc_issue_command(spc, ints, psns, ssts)
+ spc_softc_t spc;
+ int ints, psns, ssts;
+{
+ register spc_regmap_t *regs = spc->regs;
+
+ LOG(0x12, "cmd_issue");
+ /* we have just done a select;
+ Bus is in CMD phase;
+ need to phase match */
+ SPC_ACK(regs, SCSI_PHASE_CMD);
+
+ return spc_data_out(regs, SCSI_PHASE_CMD,
+ spc->active_target->transient_state.cmd_count,
+ spc->active_target->cmd_ptr) ? FALSE : TRUE;
+}
+
+boolean_t
+spc_issue_ident_and_command(spc, ints, psns, ssts)
+ spc_softc_t spc;
+ int ints, psns, ssts;
+{
+ register spc_regmap_t *regs = spc->regs;
+
+ LOG(0x22, "ident_and_cmd");
+ /* we have just done a select with atn Bus is in MSG_OUT phase;
+ need to phase match */
+ SPC_ACK(regs, SCSI_PHASE_MSG_OUT);
+
+ spc_data_out(regs, SCSI_PHASE_MSG_OUT, 1,
+ &spc->active_target->transient_state.identify);
+
+ /* wait to go to command phase */
+ SPC_WAIT_PHASE(SCSI_PHASE_CMD);
+
+ /* ack */
+ SPC_ACK(regs, SCSI_PHASE_CMD);
+
+ /* should be a command complete intr pending. Eat it */
+ if (regs->spc_ints != SPC_INTS_DONE)
+ gimmeabreak();
+ regs->spc_ints = SPC_INTS_DONE;
+
+ /* spit */
+ return spc_data_out(regs, SCSI_PHASE_CMD,
+ spc->active_target->transient_state.cmd_count,
+ spc->active_target->cmd_ptr) ? FALSE : TRUE;
+}
+
+
+boolean_t
+spc_end_transaction( spc, ints, psns, serr)
+ register spc_softc_t spc;
+ int ints, psns, serr;
+{
+ register spc_regmap_t *regs = spc->regs;
+ char cmc;
+ int tmp;
+
+ LOG(0x1f,"end_t");
+
+ SPC_ACK(regs,SCSI_PHASE_MSG_IN /*,1*/);
+
+ spc_data_in(regs, SCSI_PHASE_MSG_IN, 1, &cmc);
+
+ if (cmc != SCSI_COMMAND_COMPLETE)
+ printf("{T%x}", cmc);
+
+ while (regs->spc_ints != (SPC_INTS_DONE|SPC_INTS_DISC));
+
+ SPC_ACK(regs,SPC_PHASE_DISC);
+
+ /* going to disconnect */
+ regs->spc_pctl = ~SPC_PCTL_BFREE_IE & SPC_PHASE_DISC;
+ /* regs->spc_scmd = 0; */
+
+ /* clear all intr bits? */
+ tmp = regs->spc_ints;
+ regs->spc_ints = tmp;
+
+
+ if (!spc_end(spc, ints, psns, serr))
+ (void) spc_reconnect(spc, ints, psns, serr);
+ return FALSE;
+}
+
+boolean_t
+spc_end( spc, ints, psns, serr)
+ register spc_softc_t spc;
+ int ints, psns, serr;
+{
+ register target_info_t *tgt;
+ register io_req_t ior;
+ register spc_regmap_t *regs = spc->regs;
+ int csr;
+
+ LOG(6,"end");
+
+ tgt = spc->active_target;
+
+ if ((tgt->done = spc->done) == SCSI_RET_IN_PROGRESS)
+ tgt->done = SCSI_RET_SUCCESS;
+
+ spc->script = 0;
+
+ if (spc->wd.nactive-- == 1)
+ spc->wd.watchdog_state = SCSI_WD_INACTIVE;
+
+ /* check reconnection not pending */
+ csr = SPC_INTS_RESELECTED & regs->spc_ints;
+ if (!csr)
+ spc_release_bus(spc);
+ else
+ {
+ spc->active_target = 0;
+ /* spc->state &= ~SPC_STATE_BUSY; later */
+ }
+ if (ior = tgt->ior) {
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+ fdma_unmap(&tgt->fdma, ior);
+#endif /*MACH_KERNEL*/
+ LOG(0xA,"ops->restart");
+ (*tgt->dev_ops->restart)( tgt, TRUE);
+ if (csr)
+ spc->state &= ~SPC_STATE_BUSY;
+ }
+
+ /* return not reselected */
+ return (csr & SPC_INTS_RESELECTED) ? 0 : 1;
+}
+
+boolean_t
+spc_release_bus(spc)
+ register spc_softc_t spc;
+{
+ boolean_t ret = FALSE;
+
+ LOG(9,"release");
+
+ spc->script = 0;
+
+ if (spc->state & SPC_STATE_COLLISION) {
+
+ LOG(0xB,"collided");
+ spc->state &= ~SPC_STATE_COLLISION;
+ spc_attempt_selection(spc);
+
+ } else if (queue_empty(&spc->waiting_targets)) {
+
+ spc->state &= ~SPC_STATE_BUSY;
+ spc->active_target = 0;
+ ret = TRUE;
+
+ } else {
+
+ LOG(0xC,"dequeue");
+ spc->next_target = (target_info_t *)
+ dequeue_head(&spc->waiting_targets);
+ spc_attempt_selection(spc);
+ }
+ return ret;
+}
+
+boolean_t
+spc_get_status( spc, ints, psns, serr)
+ register spc_softc_t spc;
+ int ints, psns, serr;
+{
+ register spc_regmap_t *regs = spc->regs;
+ scsi2_status_byte_t status;
+ register target_info_t *tgt;
+
+ LOG(0xD,"get_status");
+TRWRAP;
+
+ spc->state &= ~SPC_STATE_DMA_IN;
+
+ tgt = spc->active_target;
+
+ SPC_ACK(regs,SCSI_PHASE_STATUS /*,1*/);
+
+ spc_data_in(regs, SCSI_PHASE_STATUS, 1, &status.bits);
+
+ if (status.st.scsi_status_code != SCSI_ST_GOOD) {
+ scsi_error(spc->active_target, SCSI_ERR_STATUS, status.bits, 0);
+ spc->done = (status.st.scsi_status_code == SCSI_ST_BUSY) ?
+ SCSI_RET_RETRY : SCSI_RET_NEED_SENSE;
+ } else
+ spc->done = SCSI_RET_SUCCESS;
+
+ return TRUE;
+}
+
+boolean_t
+spc_xfer_in( spc, ints, psns, ssts)
+ register spc_softc_t spc;
+ int ints, psns, ssts;
+{
+ register target_info_t *tgt;
+ register spc_regmap_t *regs = spc->regs;
+ register int count;
+ boolean_t advance_script = TRUE;
+
+ LOG(0xE,"xfer_in");
+
+ tgt = spc->active_target;
+ spc->state |= SPC_STATE_DMA_IN;
+
+ count = tgt->transient_state.in_count;
+
+ SPC_ACK(regs, SCSI_PHASE_DATAI);
+
+ if ((tgt->cur_cmd != SCSI_CMD_READ) &&
+ (tgt->cur_cmd != SCSI_CMD_LONG_READ))
+ spc_data_in(regs, SCSI_PHASE_DATAI, count, tgt->cmd_ptr);
+ else
+ {
+ spc_data_in(regs, SCSI_PHASE_DATAI, count, tgt->ior->io_data);
+ }
+
+ return advance_script;
+}
+
+boolean_t
+spc_xfer_out( spc, ints, psns, ssts)
+ register spc_softc_t spc;
+ int ints, psns, ssts;
+{
+ register spc_regmap_t *regs = spc->regs;
+ register target_info_t *tgt;
+ boolean_t advance_script = TRUE;
+ int count = spc->out_count;
+
+ LOG(0xF,"xfer_out");
+
+ tgt = spc->active_target;
+ spc->state &= ~SPC_STATE_DMA_IN;
+
+ count = tgt->transient_state.out_count;
+
+ SPC_ACK(regs, SCSI_PHASE_DATAO);
+
+ if ((tgt->cur_cmd != SCSI_CMD_WRITE) &&
+ (tgt->cur_cmd != SCSI_CMD_LONG_WRITE))
+ spc_data_out(regs, SCSI_PHASE_DATAO, count,
+ tgt->cmd_ptr + tgt->transient_state.cmd_count);
+ else
+ spc_data_out(regs, SCSI_PHASE_DATAO, count, tgt->ior->io_data);
+
+ return advance_script;
+}
+
+/* disconnect-reconnect ops */
+
+/* get the message in via dma ?? */
+boolean_t
+spc_msg_in(spc, ints, psns, ssts)
+ register spc_softc_t spc;
+ int ints, psns, ssts;
+{
+ register target_info_t *tgt;
+
+ LOG(0x15,"msg_in");
+ gimmeabreak();
+
+ tgt = spc->active_target;
+
+#if 0
+You can do this by hand, just leave an interrupt pending at the end
+#endif
+
+ /* We only really expect two bytes */
+#if 0
+ SPC_PUT(dmar,sizeof(scsi_command_group_0));
+ ....
+#endif
+ return TRUE;
+}
+
+/* check the message is indeed a DISCONNECT */
+boolean_t
+spc_disconnect(spc, ints, psns, ssts)
+ register spc_softc_t spc;
+ int ints, psns, ssts;
+{
+ register int len = 0;
+ boolean_t ok = FALSE;
+ register char *msgs = 0;
+
+
+/* SPC_TC_GET(dmar,len); */
+ len = sizeof(scsi_command_group_0) - len;
+
+/* msgs = tgt->cmd_ptr; */ /* I think */
+
+ if ((len == 0) || (len > 2) || msgs == 0)
+ ok = FALSE;
+ else {
+ /* A SDP message preceeds it in non-completed READs */
+ ok = ((msgs[0] == SCSI_DISCONNECT) || /* completed op */
+ ((msgs[0] == SCSI_SAVE_DATA_POINTER) && /* incomplete */
+ (msgs[1] == SCSI_DISCONNECT)));
+ }
+ if (!ok)
+ printf("[tgt %d bad msg (%d): %x]",
+ spc->active_target->target_id, len, *msgs);
+
+ return TRUE;
+}
+
+/* save all relevant data, free the BUS */
+boolean_t
+spc_disconnected(spc, ints, psns, ssts)
+ register spc_softc_t spc;
+ int ints, psns, ssts;
+{
+ register target_info_t *tgt;
+
+/* make sure reselects will work */
+
+ LOG(0x16,"disconnected");
+
+ spc_disconnect(spc,ints, psns, ssts);
+
+ tgt = spc->active_target;
+ tgt->flags |= TGT_DISCONNECTED;
+ tgt->transient_state.handler = spc->error_handler;
+ /* the rest has been saved in spc_err_disconn() */
+
+ PRINT(("{D%d}", tgt->target_id));
+
+ spc_release_bus(spc);
+
+ return FALSE;
+}
+
+/* get reconnect message, restore BUS */
+boolean_t
+spc_reconnect(spc, ints, psns, ssts)
+ register spc_softc_t spc;
+ int ints, psns, ssts;
+{
+
+ LOG(0x17,"reconnect");
+
+ if (spc->wd.nactive == 0) {
+ LOG(2,"SPURIOUS");
+ return FALSE;
+ }
+
+#if 0
+This is the 5380 code, for reference:
+ spc_regmap_t *regs = spc->regs;
+ register target_info_t *tgt;
+ register int id;
+ int msg;
+
+
+ id = regs->spc_data;/*parity?*/
+ /* xxx check our id is in there */
+
+ id &= ~(1 << spc->sc->initiator_id);
+ {
+ register int i;
+ for (i = 0; i < 8; i++)
+ if (id & (1 << i)) break;
+if (i == 8) {printf("{P%x}", id);return;}
+ id = i;
+ }
+ regs->spc_icmd = SPC_ICMD_BSY;
+ while (regs->spc_bus_csr & SPC_BUS_SEL)
+ ;
+ regs->spc_icmd = 0;
+ delay_1p2_us();
+ while ( ((regs->spc_bus_csr & SPC_BUS_BSY) == 0) &&
+ ((regs->spc_bus_csr & SPC_BUS_BSY) == 0) &&
+ ((regs->spc_bus_csr & SPC_BUS_BSY) == 0))
+ ;
+
+ /* Now should wait for correct phase: REQ signals it */
+ while ( ((regs->spc_bus_csr & SPC_BUS_REQ) == 0) &&
+ ((regs->spc_bus_csr & SPC_BUS_REQ) == 0) &&
+ ((regs->spc_bus_csr & SPC_BUS_REQ) == 0))
+ ;
+
+ regs->spc_mode |= SPC_MODE_MONBSY;
+
+ /*
+ * See if this reconnection collided with a selection attempt
+ */
+ if (spc->state & SPC_STATE_BUSY)
+ spc->state |= SPC_STATE_COLLISION;
+
+ spc->state |= SPC_STATE_BUSY;
+
+ /* Get identify msg */
+ bs = regs->spc_phase;
+if (SPC_CUR_PHASE(bs) != SCSI_PHASE_MSG_IN) gimmeabreak();
+ SPC_ACK(regs,SCSI_PHASE_MSG_IN /*,1*/);
+ msg = 0;
+ spc_data_in(regs, SCSI_PHASE_MSG_IN, 1, &msg);
+ regs->spc_mode = SPC_MODE_PAR_CHK|SPC_MODE_DMA|SPC_MODE_MONBSY;
+
+ if (msg != SCSI_IDENTIFY)
+ printf("{I%x %x}", id, msg);
+
+ tgt = spc->sc->target[id];
+ if (id > 7 || tgt == 0) panic("spc_reconnect");
+
+ PRINT(("{R%d}", id));
+ if (spc->state & SPC_STATE_COLLISION)
+ PRINT(("[B %d-%d]", spc->active_target->target_id, id));
+
+ LOG(0x80+id,0);
+
+ spc->active_target = tgt;
+ tgt->flags &= ~TGT_DISCONNECTED;
+
+ spc->script = tgt->transient_state.script;
+ spc->error_handler = tgt->transient_state.handler;
+ spc->in_count = 0;
+ spc->out_count = 0;
+
+ /* Should get a phase mismatch when tgt changes phase */
+#endif
+ return TRUE;
+}
+
+
+
+/* do the synch negotiation */
+boolean_t
+spc_dosynch( spc, ints, psns, ssts)
+ register spc_softc_t spc;
+ int ints, psns, ssts;
+{
+ /*
+ * Phase is MSG_OUT here, cmd has not been xferred
+ */
+ int len;
+ register target_info_t *tgt;
+ register spc_regmap_t *regs = spc->regs;
+ unsigned char off;
+ unsigned char p[6];
+
+ LOG(0x11,"dosync");
+
+ /* ATN still asserted */
+ SPC_ACK(regs,SCSI_PHASE_MSG_OUT);
+
+ tgt = spc->active_target;
+
+ tgt->flags |= TGT_DID_SYNCH; /* only one chance */
+ tgt->flags &= ~TGT_TRY_SYNCH;
+
+ /*p = some scratch buffer, on the stack */
+
+ p[0] = SCSI_IDENTIFY;
+ p[1] = SCSI_EXTENDED_MESSAGE;
+ p[2] = 3;
+ p[3] = SCSI_SYNC_XFER_REQUEST;
+ /* We cannot run synchronous */
+#define spc_to_scsi_period(x) 0x7
+#define scsi_period_to_spc(x) (x)
+ off = 0;
+ p[4] = spc_to_scsi_period(spc_min_period);
+ p[5] = off;
+
+ /* The transfer is started with ATN still set. The
+ chip will automagically drop ATN before it transfers the
+ last byte. Pretty neat. */
+ spc_data_out(regs, SCSI_PHASE_MSG_OUT,
+ sizeof(scsi_synch_xfer_req_t)+1, p);
+
+ /* wait for phase change to status phase */
+ SPC_WAIT_PHASE_VANISH(SCSI_PHASE_MSG_OUT);
+
+
+ psns = regs->spc_phase;
+
+ /* The standard sez there nothing else the target can do but.. */
+ if (SPC_CUR_PHASE(psns) != SCSI_PHASE_MSG_IN)
+ panic("spc_dosync");/* XXX put offline */
+
+ /*
+ msgin:
+ */
+ /* ack */
+ SPC_ACK(regs,SCSI_PHASE_MSG_IN);
+
+ /* clear any pending interrupts */
+ regs->spc_ints = regs->spc_ints;
+
+ /* get answer */
+ len = sizeof(scsi_synch_xfer_req_t);
+ len = spc_data_in(regs, SCSI_PHASE_MSG_IN, len, p);
+
+ /* do not cancel the phase mismatch interrupt ! */
+
+ /* look at the answer and see if we like it */
+ if (len || (p[0] != SCSI_EXTENDED_MESSAGE)) {
+ /* did not like it at all */
+ printf(" did not like SYNCH xfer ");
+ } else {
+ /* will NOT do synch */
+ printf(" but we cannot do SYNCH xfer ");
+ tgt->sync_period = scsi_period_to_spc(p[3]);
+ tgt->sync_offset = p[4];
+ /* sanity */
+ if (tgt->sync_offset != 0)
+ printf(" ?OFFSET %x? ", tgt->sync_offset);
+ }
+
+ /* wait for phase change */
+ SPC_WAIT_PHASE_VANISH(SCSI_PHASE_MSG_IN);
+
+ psns = regs->spc_phase;
+
+ /* phase should be command now */
+ /* continue with simple command script */
+ spc->error_handler = spc_err_generic;
+ spc->script = spc_script_cmd;
+
+/* Make sure you get out right here, esp the script pointer and/or pending intr */
+
+ if (SPC_CUR_PHASE(psns) == SCSI_PHASE_CMD )
+ return FALSE;
+
+ if (SPC_CUR_PHASE(psns) == SCSI_PHASE_STATUS ) /* jump to get_status */
+ return TRUE; /* intr is pending */
+
+ spc->script++;
+ if (SPC_CUR_PHASE(psns) == SCSI_PHASE_MSG_IN )
+ return TRUE;
+
+ if ((psns & SPC_BUS_BSY) == 0) /* uhu? disconnected */
+ return TRUE;
+
+ gimmeabreak();
+ return FALSE;
+}
+
+/*
+ * The bus was reset
+ */
+void spc_bus_reset(spc)
+ register spc_softc_t spc;
+{
+ register spc_regmap_t *regs = spc->regs;
+
+ LOG(0x21,"bus_reset");
+
+ /*
+ * Clear bus descriptor
+ */
+ spc->script = 0;
+ spc->error_handler = 0;
+ spc->active_target = 0;
+ spc->next_target = 0;
+ spc->state = 0;
+ queue_init(&spc->waiting_targets);
+ spc->wd.nactive = 0;
+ spc_reset(regs, TRUE);
+
+ printf("spc%d: (%d) bus reset ", spc->sc->masterno, ++spc->wd.reset_count);
+ delay(scsi_delay_after_reset); /* some targets take long to reset */
+
+ if (spc->sc == 0) /* sanity */
+ return;
+
+ scsi_bus_was_reset(spc->sc);
+}
+
+/*
+ * Error handlers
+ */
+
+/*
+ * Generic, default handler
+ */
+boolean_t
+spc_err_generic(spc, ints, psns, ssts)
+ register spc_softc_t spc;
+ int ints, psns, ssts;
+{
+ register spc_regmap_t *regs = spc->regs;
+ LOG(0x10,"err_generic");
+
+ if (ints & SPC_INTS_TIMEOUT) /* we timed out */
+ if ((regs->spc_scmd & SPC_SCMD_CMDMASK) == SPC_SCMD_C_SELECT)
+ {
+ /* Powered off ? */
+ if (spc->active_target->flags & TGT_FULLY_PROBED)
+ {
+ spc->active_target->flags = 0;
+ LOG(0x1e,"Device Down");
+ }
+ spc->done = SCSI_RET_DEVICE_DOWN;
+ spc_end(spc, ints, psns, ssts);
+ return FALSE; /* don't retry - just report missing device */
+ }
+ else
+ { /* timed out - but not on a select. What is going on? */
+ gimmeabreak();
+ }
+
+ if (SPC_CUR_PHASE(psns) == SCSI_PHASE_STATUS)
+ return spc_err_to_status(spc, ints, psns, ssts);
+ gimmeabreak();
+ return FALSE;
+}
+
+/*
+ * Handle generic errors that are reported as
+ * an unexpected change to STATUS phase
+ */
+boolean_t
+spc_err_to_status(spc, ints, psns, ssts)
+ register spc_softc_t spc;
+ int ints, psns, ssts;
+{
+ script_t scp = spc->script;
+
+ LOG(0x20,"err_tostatus");
+ while (SCSI_PHASE(scp->condition) != SCSI_PHASE_STATUS)
+ scp++;
+ spc->script = scp;
+#if 0
+ /*
+ * Normally, we would already be able to say the command
+ * is in error, e.g. the tape had a filemark or something.
+ * But in case we do disconnected mode WRITEs, it is quite
+ * common that the following happens:
+ * dma_out -> disconnect -> reconnect
+ * and our script might expect at this point that the dma
+ * had to be restarted (it didn't know it was completed
+ * because the tape record is shorter than we asked for).
+ * And in any event.. it is both correct and cleaner to
+ * declare error iff the STATUS byte says so.
+ */
+ spc->done = SCSI_RET_NEED_SENSE;
+#endif
+ return TRUE;
+}
+
+/*
+ * Watch for a disconnection
+ */
+boolean_t
+spc_err_disconn(spc, ints, psns, ssts)
+ register spc_softc_t spc;
+ int ints, psns, ssts;
+{
+#if 1
+/*
+ * THIS ROUTINE CAN'T POSSIBLY WORK...
+ * FOR EXAMPLE, THE VARIABLE 'xferred' IS NEVER INITIALIZED.
+ */
+ return FALSE;
+#else
+ register spc_regmap_t *regs;
+ register target_info_t *tgt;
+ int xferred;
+
+ LOG(0x18,"err_disconn");
+
+ if (SPC_CUR_PHASE(ints) != SCSI_PHASE_MSG_IN)
+ return spc_err_generic(spc, ints, psns, ssts);
+
+ regs = spc->regs;
+
+ tgt = spc->active_target;
+
+ switch (SCSI_PHASE(spc->script->condition)) {
+ case SCSI_PHASE_DATAO:
+ LOG(0x1b,"+DATAO");
+/*updatecounters:*/
+ tgt->transient_state.out_count -= xferred;
+ assert(tgt->transient_state.out_count > 0);
+ tgt->transient_state.dma_offset += xferred;
+
+ tgt->transient_state.script = spc_script_data_out;
+ break;
+
+ case SCSI_PHASE_DATAI:
+ LOG(0x19,"+DATAI");
+
+/*update counters: */
+ assert(xferred > 0);
+ tgt->transient_state.in_count -= xferred;
+ assert(tgt->transient_state.in_count > 0);
+ tgt->transient_state.dma_offset += xferred;
+
+ tgt->transient_state.script = spc_script_data_in;
+ break;
+
+ case SCSI_PHASE_STATUS:
+
+ if (spc->state & SPC_STATE_DMA_IN) {
+
+ LOG(0x1a,"+STATUS+R");
+
+/*same as above.. */
+ assert(xferred > 0);
+ tgt->transient_state.in_count -= xferred;
+/* assert(tgt->transient_state.in_count > 0);*/
+ tgt->transient_state.dma_offset += xferred;
+
+ tgt->transient_state.script = spc_script_data_in;
+ if (tgt->transient_state.in_count == 0)
+ tgt->transient_state.script++;
+
+ } else {
+
+ LOG(0x1d,"+STATUS+W");
+
+ if ((tgt->transient_state.out_count == spc->out_count)) {
+ /* all done */
+ tgt->transient_state.script = &spc_script_data_out[1];
+ tgt->transient_state.out_count = 0;
+ } else {
+
+/*.. */
+ tgt->transient_state.out_count -= xferred;
+ assert(tgt->transient_state.out_count > 0);
+ tgt->transient_state.dma_offset += xferred;
+
+ tgt->transient_state.script = spc_script_data_out;
+ }
+ spc->out_count = 0;
+ }
+ break;
+ default:
+ gimmeabreak();
+ }
+ /* spc->xxx = 0; */
+
+/* SPC_ACK(regs,SCSI_PHASE_MSG_IN); later */
+ (void) spc_msg_in(spc, ints, psns, ssts);
+
+ spc->script = spc_script_disconnect;
+
+ return FALSE;
+#endif
+}
+
+/*
+ * Watchdog
+ *
+ */
+void spc_reset_scsibus(spc)
+ register spc_softc_t spc;
+{
+ register target_info_t *tgt = spc->active_target;
+ if (tgt) {
+ int cnt = 0;
+ /* SPC_TC_GET(spc->dmar,cnt); */
+ log( LOG_KERN,
+ "Target %d was active, cmd x%x in x%x out x%x Sin x%x Sou x%x dmalen x%x\n",
+ tgt->target_id, tgt->cur_cmd,
+ tgt->transient_state.in_count, tgt->transient_state.out_count,
+ spc->in_count, spc->out_count, cnt);
+ }
+#if 0
+ spc->regs->.....
+#endif
+ delay(25);
+}
+
+int SPC_ACK(regs, phase)
+register spc_regmap_t *regs;
+unsigned phase;
+{
+ /* we want to switch into the specified phase -
+
+ The calling routine should already dismissed
+ any pending interrupts (spc_ints)
+ */
+
+ regs->spc_psns = 0;
+ regs->spc_pctl = phase | SPC_PCTL_BFREE_IE;
+ return 0;
+}
+#endif /*NSCSI > 0*/
+
+#endif 0
diff --git a/scsi/adapters/scsi_aha15.h b/scsi/adapters/scsi_aha15.h
new file mode 100644
index 00000000..52cd9367
--- /dev/null
+++ b/scsi/adapters/scsi_aha15.h
@@ -0,0 +1,347 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_aha15.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/91
+ *
+ * Definitions for the Adaptec AHA-15xx family
+ * of Intelligent SCSI Host Adapter boards
+ */
+
+#ifndef _SCSI_AHA15_H_
+#define _SCSI_AHA15_H_
+
+/*
+ * Addresses/length in 24 bits
+ *
+ * BEWARE: your compiler must pack these correctly,
+ * e.g. without gaps between two such contiguous structs
+ * (GCC does)
+ */
+typedef struct {
+ unsigned char msb;
+ unsigned char mid;
+ unsigned char lsb;
+} aha_address_t;
+
+#define AHA_ADDRESS_SET(addr,val) {\
+ (addr).msb = ((val) >> 16);\
+ (addr).mid = ((val) >> 8);\
+ (addr).lsb = (val) ;\
+ }
+#define AHA_ADDRESS_GET(addr,val) {\
+ (val) = ((addr).msb << 16) |\
+ ((addr).mid << 8) |\
+ ((addr).lsb ) ;\
+ }
+
+#define aha_length_t aha_address_t
+#define AHA_LENGTH_SET AHA_ADDRESS_SET
+#define AHA_LENGTH_GET AHA_ADDRESS_GET
+
+/*
+ * Register map
+ */
+
+typedef struct {
+ volatile unsigned char aha_status; /* r: Status Register */
+#define aha_ctl aha_status /* w: Control Register */
+
+ volatile unsigned char aha_data; /* rw: Data Port */
+#define aha_cmd aha_data /* w: Command register */
+
+ volatile unsigned char aha_intr; /* ro: Interrupt Register */
+} aha_regmap_t;
+
+/* ..but on the 386 I/O is not memory mapped */
+#define AHA_STATUS_PORT(port) ((port))
+#define AHA_CONTROL_PORT(port) ((port))
+#define AHA_COMMAND_PORT(port) ((port)+1)
+#define AHA_DATA_PORT(port) ((port)+1)
+#define AHA_INTR_PORT(port) ((port)+2)
+
+/* Status Register */
+#define AHA_CSR_CMD_ERR 0x01 /* Invalid command */
+#define AHA_CSR_xxx 0x02 /* undefined */
+#define AHA_CSR_DATAI_FULL 0x04 /* In-port full */
+#define AHA_CSR_DATAO_FULL 0x08 /* Out-port full */
+#define AHA_CSR_IDLE 0x10 /* doin nuthin */
+#define AHA_CSR_INIT_REQ 0x20 /* initialization required */
+#define AHA_CSR_DIAG_FAIL 0x40 /* selftest failed */
+#define AHA_CSR_SELF_TEST 0x80 /* selftesting */
+
+/* Control Register */
+#define AHA_CTL_xxx 0x0f /* undefined */
+#define AHA_CTL_SCSI_RST 0x10 /* reset SCSIbus */
+#define AHA_CTL_INTR_CLR 0x20 /* Clear interrupt reg */
+#define AHA_CTL_SOFT_RESET 0x40 /* Board only, no selftest */
+#define AHA_CTL_HARD_RESET 0x80 /* Full reset, and SCSIbus */
+
+/* Interrupt Flags register */
+#define AHA_INTR_MBI_FULL 0x01 /* scan the In mboxes */
+#define AHA_INTR_MBO_AVAIL 0x02 /* scan the Out mboxes */
+#define AHA_INTR_DONE 0x04 /* command complete */
+#define AHA_INTR_RST 0x08 /* saw a SCSIbus reset */
+#define AHA_INTR_xxx 0x70 /* undefined */
+#define AHA_INTR_PENDING 0x80 /* Any interrupt bit set */
+
+/*
+ * Command register
+ */
+#define AHA_CMD_NOP 0x00 /* */
+#define AHA_CMD_INIT 0x01 /* mbox initialization */
+ /* 4 bytes follow: # of Out mboxes (x2->total), and
+ msb, mid, lsb of mbox address */
+struct aha_init {
+ unsigned char mb_count;
+ aha_address_t mb_ptr;
+};
+#define AHA_CMD_START 0x02 /* start SCSI cmd */
+#define AHA_CMD_BIOS 0x03
+#define AHA_CMD_INQUIRY 0x04
+ /* returns 4 bytes: */
+struct aha_inq {
+ unsigned char board_id;
+# define AHA_BID_1540_B16 0x00
+# define AHA_BID_1540_B64 0x30
+# define AHA_BID_1540B 0x41
+# define AHA_BID_1640 0x42
+# define AHA_BID_1740 0x43
+# define AHA_BID_1542C 0x44
+# define AHA_BID_1542CF 0x45 /* BIOS v2.0x */
+
+ unsigned char options;
+# define AHA_BOPT_STD 0x41 /* in 154x, standard model */
+
+ unsigned char frl_1; /* rev level */
+ unsigned char frl_2;
+};
+#define AHA_CMD_MBO_IE 0x05
+ /* 1 byte follows: */
+# define AHA_MBO_DISABLE 0x00
+# define AHA_MBO_ENABLE 0x01
+
+#define AHA_CMD_SET_SELTO 0x06 /* select timeout */
+ /* 4 bytes follow: */
+struct aha_selto {
+ unsigned char enable;
+ char xxx;
+ unsigned char timeo_msb;
+ unsigned char timeo_lsb;
+};
+#define AHA_CMD_SET_BUSON 0x07
+ /* 1 byte value follows: 2..15 default 11 usecs */
+#define AHA_CMD_SET_BUSOFF 0x08
+ /* 1 byte value follows: 1..64 default 4 usecs */
+#define AHA_CMD_SET_XSPEED 0x09
+ /* 1 byte value follows: */
+# define AHA_DMASPEED_5Mb 0x00
+# define AHA_DMASPEED_7Mb 0x01
+# define AHA_DMASPEED_8Mb 0x02
+# define AHA_DMASPEED_10Mb 0x03
+# define AHA_DMASPEED_6Mb 0x04
+ /* values in the range 80..ff encoded as follows:
+ bit 7 on --> custom speed
+ bits 6..4 read pulse width
+ 0 100ns
+ 1 150
+ 2 200
+ 3 250
+ 4 300
+ 5 350
+ 6 400
+ 7 450
+ bit 3 strobe off time
+ 0 100ns
+ 1 150ns
+ bits 2..0 write pulse width
+ <same as read pulse>
+ */
+#define AHA_CMD_FIND_DEVICES 0x0a
+ /* returns 8 bytes, each one is a bitmask of the LUNs
+ available for the given target ID */
+struct aha_devs {
+ unsigned char tgt_luns[8];
+};
+#define AHA_CMD_GET_CONFIG 0x0b
+ /* returns 3 bytes: */
+struct aha_conf {
+ unsigned char dma_arbitration;/* bit N -> channel N */
+ unsigned char intr_ch;/* bit N -> intr 9+N (but 13,16)*/
+ unsigned char my_scsi_id; /* both of I and T role */
+};
+#define AHA_CMD_ENB_TGT_MODE 0x0c
+ /* 2 bytes follow: */
+struct aha_tgt {
+ unsigned char enable;
+ unsigned char luns; /* bitmask */
+};
+
+#define AHA_CMD_GET_SETUP 0x0d
+ /* 1 byte follows: allocation len (N) */
+ /* returns N bytes, 17 significant: */
+struct aha_setup {
+ BITFIELD_3( unsigned char,
+ initiate_SDT:1,
+ enable_parity:1,
+ res:6);
+ unsigned char xspeed; /* see above */
+ unsigned char buson;
+ unsigned char busoff;
+ unsigned char n_mboxes;/* 0 if not initialized */
+ aha_address_t mb_ptr; /* garbage if not inited */
+ struct {
+ BITFIELD_3( unsigned char,
+ offset: 4,
+ period: 3, /* 200 + 50 * N */
+ negotiated: 1);
+ } SDT_params[8];
+ unsigned char no_disconnect; /* bitmask */
+};
+
+#define AHA_CMD_WRITE_CH2 0x1a
+ /* 3 bytes (aha_address_t) follow for the buffer pointer */
+#define AHA_CMD_READ_CH2 0x1b
+ /* 3 bytes (aha_address_t) follow for the buffer pointer */
+#define AHA_CMD_WRITE_FIFO 0x1c
+ /* 3 bytes (aha_address_t) follow for the buffer pointer */
+#define AHA_CMD_READ_FIFO 0x1d
+ /* 3 bytes (aha_address_t) follow for the buffer pointer */
+#define AHA_CMD_ECHO 0x1f
+ /* 1 byte follows, which should then be read back */
+#define AHA_CMD_DIAG 0x20
+#define AHA_CMD_SET_OPT 0x21
+ /* 2+ bytes follow: */
+struct aha_diag {
+ unsigned char parmlen; /* bytes to follow */
+ unsigned char no_disconnect; /* bitmask */
+ /* rest is undefined */
+};
+
+#define AHA_EXT_BIOS 0x28 /* return extended bios info */
+#define AHA_MBX_ENABLE 0x29 /* enable mail box interface */
+struct aha_extbios {
+ unsigned char flags; /* Bit 3 == 1 extended bios enabled */
+ unsigned char mailboxlock; /* mail box lock code to unlock it */
+};
+
+/*
+ * Command Control Block
+ */
+typedef struct {
+ unsigned char ccb_code;
+# define AHA_CCB_I_CMD 0x00
+# define AHA_CCB_T_CMD 0x01
+# define AHA_CCB_I_CMD_SG 0x02
+# define AHA_CCB_ICMD_R 0x03
+# define AHA_CCB_ICMD_SG_R 0x04
+# define AHA_CCB_BDEV_RST 0x81
+ BITFIELD_4( unsigned char,
+ ccb_lun:3,
+ ccb_in:1,
+ ccb_out:1,
+ ccb_scsi_id:3);
+ unsigned char ccb_cmd_len;
+ unsigned char ccb_reqsns_len; /* if 1 no automatic reqsns*/
+ aha_length_t ccb_datalen;
+ aha_address_t ccb_dataptr;
+ aha_address_t ccb_linkptr;
+ unsigned char ccb_linkid;
+ unsigned char ccb_hstatus;
+# define AHA_HST_SUCCESS 0x00
+# define AHA_HST_SEL_TIMEO 0x11
+# define AHA_HST_DATA_OVRUN 0x12
+# define AHA_HST_BAD_DISCONN 0x13
+# define AHA_HST_BAD_PHASE_SEQ 0x14
+# define AHA_HST_BAD_OPCODE 0x16
+# define AHA_HST_BAD_LINK_LUN 0x17
+# define AHA_HST_INVALID_TDIR 0x18
+# define AHA_HST_DUPLICATED_CCB 0x19
+# define AHA_HST_BAD_PARAM 0x1a
+
+ scsi2_status_byte_t ccb_status;
+ unsigned char ccb_xxx;
+ unsigned char ccb_xxx1;
+ scsi_command_group_5 ccb_scsi_cmd; /* cast as needed */
+} aha_ccb_t;
+
+/* For scatter/gather use a list of (len,ptr) segments, each field
+ is 3 bytes (aha_address_t) long. Max 17 segments, min 1 */
+
+/*
+ * Ring descriptor, aka Mailbox
+ */
+typedef union {
+
+ struct {
+ volatile unsigned char mb_cmd; /* Out mbox */
+# define mb_status mb_cmd /* In mbox */
+
+ aha_address_t mb_ptr;
+#define AHA_MB_SET_PTR(mbx,val) AHA_ADDRESS_SET((mbx)->mb.mb_ptr,(val))
+#define AHA_MB_GET_PTR(mbx,val) AHA_ADDRESS_GET((mbx)->mb.mb_ptr,(val))
+
+ } mb;
+
+ struct { /* ccb required In mbox */
+ volatile unsigned char mb_cmd;
+ BITFIELD_4( unsigned char,
+ mb_lun : 3,
+ mb_isa_send : 1,
+ mb_isa_recv : 1,
+ mb_initiator_id : 3);
+ unsigned char mb_data_len_msb;
+ unsigned char mb_data_len_mid;
+ } mbt;
+
+ unsigned int bits; /* quick access */
+
+} aha_mbox_t;
+
+/* Out mbox, values for the mb_cmd field */
+#define AHA_MBO_FREE 0x00
+#define AHA_MBO_START 0x01
+#define AHA_MBO_ABORT 0x02
+
+/* In mbox, values for the mb_status field */
+#define AHA_MBI_FREE 0x00
+#define AHA_MBI_SUCCESS 0x01
+#define AHA_MBI_ABORTED 0x02
+#define AHA_MBI_NOT_FOUND 0x03
+#define AHA_MBI_ERROR 0x04
+#define AHA_MBI_NEED_CCB 0x10
+
+/*
+ * Scatter/gather segment lists
+ */
+typedef struct {
+ aha_length_t len;
+ aha_address_t ptr;
+} aha_seglist_t;
+
+#define AHA_MAX_SEGLIST 17 /* which means max 64Kb */
+#endif /*_SCSI_AHA15_H_*/
diff --git a/scsi/adapters/scsi_aha15_hdw.c b/scsi/adapters/scsi_aha15_hdw.c
new file mode 100644
index 00000000..5514bc5a
--- /dev/null
+++ b/scsi/adapters/scsi_aha15_hdw.c
@@ -0,0 +1,1467 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_aha15_hdw.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/91
+ *
+ * Bottom layer of the SCSI driver: chip-dependent functions
+ *
+ * This file contains the code that is specific to the Adaptec
+ * AHA-15xx family of Intelligent SCSI Host Adapter boards:
+ * probing, start operation, and interrupt routine.
+ */
+
+/*
+ * Since the board is "Intelligent" we do not need scripts like
+ * other simpler HBAs. Maybe.
+ */
+#include <cpus.h>
+#include <platforms.h>
+
+#include <aha.h>
+#if NAHA > 0
+
+#include <mach/std_types.h>
+#include <machine/machspl.h>
+#include <sys/types.h>
+#include <chips/busses.h>
+#include <scsi/compat_30.h>
+
+/* #include <sys/syslog.h> */
+
+#include <scsi/scsi.h>
+#include <scsi/scsi2.h>
+#include <scsi/scsi_defs.h>
+
+#include <scsi/adapters/scsi_aha15.h>
+
+#ifdef AT386
+#define MACHINE_PGBYTES I386_PGBYTES
+#define MAPPABLE 0
+#define gimmeabreak() asm("int3")
+#include <i386/pio.h> /* inlining of outb and inb */
+#endif /*AT386*/
+
+#ifdef CBUS /* For the Corollary machine, physical */
+#include <i386at/mp/mp.h>
+#include <cbus/cbus.h>
+
+#define aha_cbus_window transient_state.hba_dep[0]
+ /* must use windows for phys addresses */
+ /* greater than 16 megs */
+
+#define kvtoAT cbus_kvtoAT
+#else /* CBUS */
+#define kvtoAT kvtophys
+#endif /* CBUS */
+
+#ifndef MACHINE_PGBYTES /* cross compile check */
+#define MACHINE_PGBYTES 0x1000
+#define MAPPABLE 1
+#define gimmeabreak() Debugger("gimmeabreak");
+#endif
+
+/*
+ * Data structures: ring, ccbs, a per target buffer
+ */
+
+#define AHA_NMBOXES 2 /* no need for more, I think */
+struct aha_mb_ctl {
+ aha_mbox_t omb[AHA_NMBOXES];
+ aha_mbox_t imb[AHA_NMBOXES];
+ unsigned char iidx, oidx; /* roving ptrs into */
+};
+#define next_mbx_idx(i) ((((i)+1)==AHA_NMBOXES)?0:((i)+1))
+
+#define AHA_NCCB 8 /* for now */
+struct aha_ccb_raw {
+ target_info_t *active_target;
+ aha_ccb_t ccb;
+ char buffer[256]; /* separate out this ? */
+};
+#define rccb_to_cmdptr(rccb) ((char*)&((rccb)->ccb.ccb_scsi_cmd))
+
+/* forward decls */
+int aha_reset_scsibus();
+boolean_t aha_probe_target();
+
+/*
+ * State descriptor for this layer. There is one such structure
+ * per (enabled) board
+ */
+struct aha_softc {
+ watchdog_t wd;
+ decl_simple_lock_data(, aha_lock)
+ unsigned int port; /* I/O port */
+
+ int ntargets; /* how many alive on this scsibus */
+
+ scsi_softc_t *sc; /* HBA-indep info */
+
+ struct aha_mb_ctl mb; /* mailbox structures */
+
+ /* This chicanery is for mapping back the phys address
+ of a CCB (which we get in an MBI) to its virtual */
+ /* [we could use phystokv(), but it isn't standard] */
+ vm_offset_t I_hold_my_phys_address;
+ struct aha_ccb_raw aha_ccbs[AHA_NCCB];
+
+} aha_softc_data[NAHA];
+
+typedef struct aha_softc *aha_softc_t;
+
+aha_softc_t aha_softc[NAHA];
+
+struct aha_ccb_raw *
+mb_to_rccb(aha, mbi)
+ aha_softc_t aha;
+ aha_mbox_t mbi;
+{
+ vm_offset_t addr;
+
+ AHA_MB_GET_PTR(&mbi,addr); /* phys address of ccb */
+
+ /* make virtual */
+ addr = ((vm_offset_t)&aha->I_hold_my_phys_address) +
+ (addr - aha->I_hold_my_phys_address);
+
+ /* adjust by proper offset to get base */
+ addr -= (vm_offset_t)&(((struct aha_ccb_raw *)0)->ccb);
+
+ return (struct aha_ccb_raw *)addr;
+}
+
+target_info_t *
+aha_tgt_alloc(aha, id, sns_len, tgt)
+ aha_softc_t aha;
+ target_info_t *tgt;
+{
+ struct aha_ccb_raw *rccb;
+
+ aha->ntargets++;
+
+ if (tgt == 0)
+ tgt = scsi_slave_alloc(aha - aha_softc_data, id, aha);
+
+ rccb = &(aha->aha_ccbs[id]);
+ rccb->ccb.ccb_reqsns_len = sns_len;
+ tgt->cmd_ptr = rccb_to_cmdptr(rccb);
+ tgt->dma_ptr = 0;
+#ifdef CBUS
+ tgt->aha_cbus_window = 0;
+#endif /* CBUS */
+ return tgt;
+}
+
+/*
+ * Synch xfer timing conversions
+ */
+#define aha_to_scsi_period(a) ((200 + ((a) * 50)) >> 2)
+#define scsi_period_to_aha(p) ((((p) << 2) - 200) / 50)
+
+/*
+ * Definition of the controller for the auto-configuration program.
+ */
+
+/* DOCUMENTATION */
+/* base ports can be:
+ 0x334, 0x330 (default), 0x234, 0x230, 0x134, 0x130
+ possible interrupt channels are:
+ 9, 10, 11 (default), 12, 14, 15
+ DMA channels can be:
+ 7, 6, 5 (default), 0
+/* DOCUMENTATION */
+
+int aha_probe(), scsi_slave(), aha_go(), aha_intr();
+void scsi_attach();
+
+vm_offset_t aha_std[NAHA] = { 0 };
+struct bus_device *aha_dinfo[NAHA*8];
+struct bus_ctlr *aha_minfo[NAHA];
+struct bus_driver aha_driver =
+ { aha_probe, scsi_slave, scsi_attach, aha_go, aha_std, "rz", aha_dinfo,
+ "ahac", aha_minfo, BUS_INTR_B4_PROBE};
+
+#define DEBUG 1
+#if DEBUG
+
+#define PRINT(x) if (scsi_debug) printf x
+
+aha_state(port)
+{
+ register unsigned char st, intr;
+
+ if (port == 0)
+ port = 0x330;
+ st = inb(AHA_STATUS_PORT(port));
+ intr = inb(AHA_INTR_PORT(port));
+
+ printf("status %x intr %x\n", st, intr);
+ return 0;
+}
+
+aha_target_state(tgt)
+ target_info_t *tgt;
+{
+ if (tgt == 0)
+ tgt = aha_softc[0]->sc->target[0];
+ if (tgt == 0)
+ return 0;
+ printf("fl %x dma %X+%x cmd %x@%X id %x per %x off %x ior %X ret %X\n",
+ tgt->flags, tgt->dma_ptr, tgt->transient_state.dma_offset, tgt->cur_cmd,
+ tgt->cmd_ptr, tgt->target_id, tgt->sync_period, tgt->sync_offset,
+ tgt->ior, tgt->done);
+
+ return 0;
+}
+
+aha_all_targets(unit)
+{
+ int i;
+ target_info_t *tgt;
+ for (i = 0; i < 8; i++) {
+ tgt = aha_softc[unit]->sc->target[i];
+ if (tgt)
+ aha_target_state(tgt);
+ }
+}
+
+#define TRMAX 200
+int tr[TRMAX+3];
+int trpt, trpthi;
+#define TR(x) tr[trpt++] = x
+#define TRWRAP trpthi = trpt; trpt = 0;
+#define TRCHECK if (trpt > TRMAX) {TRWRAP}
+
+#define TRACE
+
+#ifdef TRACE
+
+#define LOGSIZE 256
+#define LOG_KERN 0<<3 /* from syslog.h */
+
+int aha_logpt;
+char aha_log[LOGSIZE];
+
+#define MAXLOG_VALUE 0x1e
+struct {
+ char *name;
+ unsigned int count;
+} logtbl[MAXLOG_VALUE];
+
+static LOG(e,f)
+ char *f;
+{
+ aha_log[aha_logpt++] = (e);
+ if (aha_logpt == LOGSIZE) aha_logpt = 0;
+ if ((e) < MAXLOG_VALUE) {
+ logtbl[(e)].name = (f);
+ logtbl[(e)].count++;
+ }
+}
+
+aha_print_log(skip)
+ int skip;
+{
+ register int i, j;
+ register unsigned char c;
+
+ for (i = 0, j = aha_logpt; i < LOGSIZE; i++) {
+ c = aha_log[j];
+ if (++j == LOGSIZE) j = 0;
+ if (skip-- > 0)
+ continue;
+ if (c < MAXLOG_VALUE)
+ printf(" %s", logtbl[c].name);
+ else
+ printf("-%x", c & 0x7f);
+ }
+ return 0;
+}
+
+aha_print_stat()
+{
+ register int i;
+ register char *p;
+ for (i = 0; i < MAXLOG_VALUE; i++) {
+ if (p = logtbl[i].name)
+ printf("%d %s\n", logtbl[i].count, p);
+ }
+}
+
+#else /*TRACE*/
+#define LOG(e,f)
+#define LOGSIZE
+#endif /*TRACE*/
+
+#else /*DEBUG*/
+#define PRINT(x)
+#define LOG(e,f)
+#define LOGSIZE
+#define TRCHECK
+#define TR(a)
+
+#endif /*DEBUG*/
+
+/* Utility functions at end */
+
+
+/*
+ * Probe/Slave/Attach functions
+ */
+
+int aha_dotarget = 1; /* somehow on some boards this is trouble */
+
+/*
+ * Probe routine:
+ * Should find out (a) if the controller is
+ * present and (b) which/where slaves are present.
+ *
+ * Implementation:
+ * Just ask the board to do it
+ */
+aha_probe(port, ui)
+ register port;
+ struct bus_ctlr *ui;
+{
+ int unit = ui->unit;
+ aha_softc_t aha = &aha_softc_data[unit];
+ int target_id;
+ scsi_softc_t *sc;
+ spl_t s;
+ boolean_t did_banner = FALSE;
+ struct aha_devs installed;
+ struct aha_conf conf;
+
+ /* No interrupts yet */
+ s = splbio();
+
+ /*
+ * We should be called with a sensible port, but you never know.
+ * Send an echo command and see that we get it back properly
+ */
+ {
+ register unsigned char st;
+
+ st = inb(AHA_STATUS_PORT(port));
+
+ /*
+ * There is no board reset in case of reboot with
+ * no power-on/power-off sequence. Test it and do
+ * the reset if necessary.
+ */
+
+ if (!(st & AHA_CSR_INIT_REQ)) {
+ outb(AHA_CONTROL_PORT(port),
+ AHA_CTL_SOFT_RESET|AHA_CTL_HARD_RESET);
+ while ((st = inb(AHA_STATUS_PORT(port))) &
+ AHA_CSR_SELF_TEST);
+ }
+ if ((st & AHA_CSR_DATAO_FULL) ||
+ !(st & AHA_CSR_INIT_REQ))
+ goto fail;
+
+ outb(AHA_COMMAND_PORT(port), AHA_CMD_ECHO);
+ delay(1000);/*?*/
+ st = inb(AHA_STATUS_PORT(port));
+ if (st & (AHA_CSR_CMD_ERR|AHA_CSR_DATAO_FULL))
+ goto fail;
+
+ outb(AHA_COMMAND_PORT(port), 0x5e);
+ delay(1000);
+
+ st = inb(AHA_STATUS_PORT(port));
+ if ((st & AHA_CSR_CMD_ERR) ||
+ ((st & AHA_CSR_DATAI_FULL) == 0))
+ goto fail;
+
+ st = inb(AHA_DATA_PORT(port));
+ if (st != 0x5e) {
+fail: splx(s);
+ return 0;
+ }
+ /*
+ * augment test with check for echoing inverse and with
+ * test for enhanced adapter with standard ports enabled.
+ */
+
+ /* Check that 0xa1 echoed as well as 0x5e */
+
+ outb(AHA_COMMAND_PORT(port), AHA_CMD_ECHO);
+ delay(1000);/*?*/
+ st = inb(AHA_STATUS_PORT(port));
+ if (st & (AHA_CSR_CMD_ERR|AHA_CSR_DATAO_FULL))
+ goto fail;
+
+ outb(AHA_COMMAND_PORT(port), 0xa1);
+ delay(1000);
+
+ st = inb(AHA_STATUS_PORT(port));
+ if ((st & AHA_CSR_CMD_ERR) ||
+ ((st & AHA_CSR_DATAI_FULL) == 0))
+ goto fail;
+
+ st = inb(AHA_DATA_PORT(port));
+ if (st != 0xa1)
+ goto fail ;
+
+ { /* Check that port isn't 174x in enhanced mode
+ with standard mode ports enabled. This should be
+ ignored because it will be caught and correctly
+ handled by eaha_probe(). See TRM4-11..13.
+ dph
+ */
+ unsigned z ;
+ static unsigned port_table[] =
+ {0,0,0x130,0x134,0x230,0x234,0x330,0x334};
+ for (z= 0x1000; z<= 0xF000; z+= 0x1000)
+ if (inb(z+0xC80) == 0x04 &&
+ inb(z+0xC81) == 0x90 &&
+ inb(z+0xCC0) & 0x80 == 0x80 &&
+ port_table [inb(z+0xCC0) & 0x07] == port)
+ goto fail ;
+ }
+ outb(AHA_CONTROL_PORT(port), AHA_CTL_INTR_CLR);
+ }
+
+#if MAPPABLE
+ /* Mappable version side */
+ AHA_probe(port, ui);
+#endif /*MAPPABLE*/
+
+ /*
+ * Initialize hw descriptor, cache some pointers
+ */
+ aha_softc[unit] = aha;
+ aha->port = port;
+
+ sc = scsi_master_alloc(unit, aha);
+ aha->sc = sc;
+
+ simple_lock_init(&aha->aha_lock);
+ sc->go = aha_go;
+ sc->watchdog = scsi_watchdog;
+ sc->probe = aha_probe_target;
+ aha->wd.reset = aha_reset_scsibus;
+
+ /* Stupid limitation, no way around it */
+ sc->max_dma_data = (AHA_MAX_SEGLIST-1) * MACHINE_PGBYTES;
+
+
+ /* XXX
+ * I'm not sure how much use this bit of code is really.
+ * On the 1542CF we don't really want to try and initialize
+ * the mailboxes before unlocking them in any case, and
+ * resetting the card is done above.
+ */
+#if 0
+#if 0
+ /*
+ * Reset board.
+ */
+ aha_reset(port, TRUE);
+#else
+ /*
+ * Initialize mailboxes
+ */
+ aha_init_1(aha);
+#endif
+#endif
+
+ /*
+ * Who are we ?
+ */
+ {
+ struct aha_inq inq;
+ struct aha_extbios extbios;
+ char *id;
+
+ aha_command(port, AHA_CMD_INQUIRY, 0, 0, &inq, sizeof(inq), TRUE);
+
+ switch (inq.board_id) {
+ case AHA_BID_1540_B16:
+ case AHA_BID_1540_B64:
+ id = "1540"; break;
+ case AHA_BID_1540B:
+ id = "1540B/1542B"; break;
+ case AHA_BID_1640:
+ id = "1640"; break;
+ case AHA_BID_1740:
+ id = "1740 Unsupported!!"; break;
+ case AHA_BID_1542C:
+ id = "1542C"; aha_dotarget = 0; break;
+ case AHA_BID_1542CF:
+ id = "1542CF"; break;
+ default:
+ id = 0; break;
+ }
+
+ printf("Adaptec %s [id %x], rev %c%c, options x%x\n",
+ id ? id : "Board",
+ inq.board_id, inq.frl_1, inq.frl_2, inq.options);
+
+ /*
+ * If we are a 1542C or 1542CF disable the extended bios
+ * so that the mailbox interface is unlocked.
+ * No need to check the extended bios flags as some of the
+ * extensions that cause us problems are not flagged in
+ * that byte.
+ */
+ if (inq.board_id == 0x44 || inq.board_id == 0x45) {
+ aha_command(port, AHA_EXT_BIOS, 0, 0, &extbios,
+ sizeof(extbios), TRUE);
+#ifdef AHADEBUG
+ printf("aha: extended bios flags 0x%x\n", extbios.flags);
+ printf("aha: mailboxlock 0x%x\n", extbios.mblock);
+#endif /* AHADEBUG */
+
+ printf("aha: 1542C/CF detected, unlocking mailbox\n");
+
+ /* XXX - This sends the mailboxlock code out to the
+ * controller. We need to output a 0, then the
+ * code...so since we don't care about the flags
+ * anyway, we just zero out that field and re-use
+ * the struct.
+ */
+ extbios.flags = 0;
+ aha_command(port, AHA_MBX_ENABLE, &extbios,
+ sizeof(extbios), 0, 0, TRUE);
+ }
+
+ }
+doconf:
+ /*
+ * Readin conf data
+ */
+ aha_command(port, AHA_CMD_GET_CONFIG, 0, 0, &conf, sizeof(conf), TRUE);
+
+ {
+ unsigned char args;
+
+ /*
+ * Change the bus on/off times to not clash with
+ * other dma users.
+ */
+ args = 7;
+ aha_command(port, AHA_CMD_SET_BUSON, &args, 1, 0, 0, TRUE);
+ args = 5;
+ aha_command(port, AHA_CMD_SET_BUSOFF, &args, 1, 0, 0, TRUE);
+ }
+
+ /* XXX - This is _REALLY_ sickening. */
+ /*
+ * Set up the DMA channel we'll be using.
+ */
+ {
+ register int d, i;
+ static struct {
+ unsigned char port;
+ unsigned char init_data;
+ } aha_dma_init[8][2] = {
+ {{0x0b,0x0c}, {0x0a,0x00}}, /* channel 0 */
+ {{0,0},{0,0}},
+ {{0,0},{0,0}},
+ {{0,0},{0,0}},
+ {{0,0},{0,0}},
+ {{0xd6,0xc1}, {0xd4,0x01}}, /* channel 5 (def) */
+ {{0xd6,0xc2}, {0xd4,0x02}}, /* channel 6 */
+ {{0xd6,0xc3}, {0xd4,0x03}} /* channel 7 */
+ };
+
+
+ for (i = 0; i < 8; i++)
+ if ((1 << i) & conf.intr_ch) break;
+ i += 9;
+
+#if there_was_a_way
+ /*
+ * On second unit, avoid clashes with first
+ */
+ if ((unit > 0) && (ui->sysdep1 != i)) {
+ printf("Reprogramming irq and dma ch..\n");
+ ....
+ goto doconf;
+ }
+#endif
+
+ /*
+ * Initialize the DMA controller viz the channel we'll use
+ */
+ for (d = 0; d < 8; d++)
+ if ((1 << d) & conf.dma_arbitration) break;
+
+ outb(aha_dma_init[d][0].port, aha_dma_init[d][0].init_data);
+ outb(aha_dma_init[d][1].port, aha_dma_init[d][1].init_data);
+
+ /* make mapping phys->virt possible for CCBs */
+ aha->I_hold_my_phys_address =
+ kvtoAT((vm_offset_t)&aha->I_hold_my_phys_address);
+
+ /*
+ * Our SCSI ID. (xxx) On some boards this is SW programmable.
+ */
+ sc->initiator_id = conf.my_scsi_id;
+
+ printf("%s%d: [dma ch %d intr ch %d] my SCSI id is %d",
+ ui->name, unit, d, i, sc->initiator_id);
+
+ /* Interrupt vector setup */
+ ui->sysdep1 = i;
+ take_ctlr_irq(ui);
+ }
+
+ /*
+ * More initializations
+ */
+ {
+ register target_info_t *tgt;
+
+ aha_init(aha);
+
+ /* allocate a desc for tgt mode role */
+ tgt = aha_tgt_alloc(aha, sc->initiator_id, 1, 0);
+ sccpu_new_initiator(tgt, tgt); /* self */
+
+ }
+
+ /* Now we could take interrupts, BUT we do not want to
+ be selected as targets by some other host just yet */
+
+ /*
+ * For all possible targets, see if there is one and allocate
+ * a descriptor for it if it is there.
+ * This includes ourselves, when acting as target
+ */
+ aha_command( port, AHA_CMD_FIND_DEVICES, 0, 0, &installed, sizeof(installed), TRUE);
+ for (target_id = 0; target_id < 8; target_id++) {
+
+ if (target_id == sc->initiator_id) /* done already */
+ continue;
+
+ if (installed.tgt_luns[target_id] == 0)
+ continue;
+
+ printf(",%s%d", did_banner++ ? " " : " target(s) at ",
+ target_id);
+
+ /* Normally, only LUN 0 */
+ if (installed.tgt_luns[target_id] != 1)
+ printf("(%x)", installed.tgt_luns[target_id]);
+ /*
+ * Found a target
+ */
+ (void) aha_tgt_alloc(aha, target_id, 1/*no REQSNS*/, 0);
+
+ }
+ printf(".\n");
+ splx(s);
+
+ return 1;
+}
+
+boolean_t
+aha_probe_target(tgt, ior)
+ target_info_t *tgt;
+ io_req_t ior;
+{
+ aha_softc_t aha = aha_softc[tgt->masterno];
+ boolean_t newlywed;
+
+ newlywed = (tgt->cmd_ptr == 0);
+ if (newlywed) {
+ /* desc was allocated afresh */
+ (void) aha_tgt_alloc(aha,tgt->target_id, 1/*no REQSNS*/, tgt);
+ }
+
+ if (scsi_inquiry(tgt, SCSI_INQ_STD_DATA) == SCSI_RET_DEVICE_DOWN)
+ return FALSE;
+
+ tgt->flags = TGT_ALIVE;
+ return TRUE;
+}
+
+aha_reset(port, quick)
+{
+ register unsigned char st;
+
+ /*
+ * Reset board and wait till done
+ */
+ outb(AHA_CONTROL_PORT(port), AHA_CTL_SOFT_RESET);
+ do {
+ delay(25);
+ st = inb(AHA_STATUS_PORT(port));
+ } while ((st & (AHA_CSR_IDLE|AHA_CSR_INIT_REQ)) == 0);
+
+ if (quick) return;
+
+ /*
+ * reset the scsi bus. Does NOT generate an interrupt (bozos)
+ */
+ outb(AHA_CONTROL_PORT(port), AHA_CTL_SCSI_RST);
+}
+
+aha_init_1(aha)
+ aha_softc_t aha;
+{
+ struct aha_init a;
+ vm_offset_t phys;
+
+ bzero(&aha->mb, sizeof(aha->mb)); /* also means all free */
+ a.mb_count = AHA_NMBOXES;
+ phys = kvtoAT((vm_offset_t)&aha->mb);
+ AHA_ADDRESS_SET(a.mb_ptr, phys);
+ aha_command(aha->port, AHA_CMD_INIT, &a, sizeof(a), 0, 0, TRUE);
+}
+
+aha_init_2(port)
+{
+ unsigned char disable = AHA_MBO_DISABLE;
+ struct aha_tgt role;
+
+ /* Disable MBO available interrupt */
+ aha_command(port, AHA_CMD_MBO_IE, &disable, 1, 0,0, FALSE);
+
+ if (aha_dotarget) {
+ /* Enable target mode role */
+ role.enable = 1;
+ role.luns = 1; /* only LUN 0 */
+ aha_command(port, AHA_CMD_ENB_TGT_MODE, &role, sizeof(role), 0, 0, TRUE);
+ }
+}
+
+aha_init(aha)
+ aha_softc_t aha;
+{
+ aha_init_1(aha);
+ aha_init_2(aha->port);
+}
+
+/*
+ * Operational functions
+ */
+
+/*
+ * Start a SCSI command on a target
+ */
+aha_go(tgt, cmd_count, in_count, cmd_only)
+ target_info_t *tgt;
+ boolean_t cmd_only;
+{
+ aha_softc_t aha;
+ spl_t s;
+ struct aha_ccb_raw *rccb;
+ int len;
+ vm_offset_t virt, phys;
+
+#if CBUS
+ at386_io_lock_state();
+#endif
+
+ LOG(1,"go");
+
+ aha = (aha_softc_t)tgt->hw_state;
+
+/* XXX delay the handling of the ccb till later */
+ rccb = &(aha->aha_ccbs[tgt->target_id]);
+ rccb->active_target = tgt;
+
+ /*
+ * We can do real DMA.
+ */
+/* tgt->transient_state.copy_count = 0; unused */
+/* tgt->transient_state.dma_offset = 0; unused */
+
+ tgt->transient_state.cmd_count = cmd_count;
+
+ if ((tgt->cur_cmd == SCSI_CMD_WRITE) ||
+ (tgt->cur_cmd == SCSI_CMD_LONG_WRITE)){
+ io_req_t ior = tgt->ior;
+ register int len = ior->io_count;
+
+ tgt->transient_state.out_count = len;
+
+ /* How do we avoid leaks here ? Trust the board
+ will do zero-padding, for now. XXX CHECKME */
+#if 0
+ if (len < tgt->block_size) {
+ bzero(to + len, tgt->block_size - len);
+ len = tgt->block_size;
+ tgt->transient_state.out_count = len;
+ }
+#endif
+ } else {
+ tgt->transient_state.out_count = 0;
+ }
+
+ /* See above for in_count < block_size */
+ tgt->transient_state.in_count = in_count;
+
+ /*
+ * Setup CCB state
+ */
+ tgt->done = SCSI_RET_IN_PROGRESS;
+
+ switch (tgt->cur_cmd) {
+ case SCSI_CMD_READ:
+ case SCSI_CMD_LONG_READ:
+ LOG(9,"readop");
+ virt = (vm_offset_t)tgt->ior->io_data;
+ len = tgt->transient_state.in_count;
+ rccb->ccb.ccb_in = 1; rccb->ccb.ccb_out = 0;
+ break;
+ case SCSI_CMD_WRITE:
+ case SCSI_CMD_LONG_WRITE:
+ LOG(0x1a,"writeop");
+ virt = (vm_offset_t)tgt->ior->io_data;
+ len = tgt->transient_state.out_count;
+ rccb->ccb.ccb_in = 0; rccb->ccb.ccb_out = 1;
+ break;
+ case SCSI_CMD_INQUIRY:
+ case SCSI_CMD_REQUEST_SENSE:
+ case SCSI_CMD_MODE_SENSE:
+ case SCSI_CMD_RECEIVE_DIAG_RESULTS:
+ case SCSI_CMD_READ_CAPACITY:
+ case SCSI_CMD_READ_BLOCK_LIMITS:
+ case SCSI_CMD_READ_TOC:
+ case SCSI_CMD_READ_SUBCH:
+ case SCSI_CMD_READ_HEADER:
+ case 0xc4: /* despised: SCSI_CMD_DEC_PLAYBACK_STATUS */
+ case 0xc6: /* despised: SCSI_CMD_TOSHIBA_READ_SUBCH_Q */
+ case 0xc7: /* despised: SCSI_CMD_TOSHIBA_READ_TOC_ENTRY */
+ case 0xdd: /* despised: SCSI_CMD_NEC_READ_SUBCH_Q */
+ case 0xde: /* despised: SCSI_CMD_NEC_READ_TOC */
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ virt = (vm_offset_t)tgt->cmd_ptr;
+ len = tgt->transient_state.in_count;
+ rccb->ccb.ccb_in = 1; rccb->ccb.ccb_out = 0;
+ break;
+ case SCSI_CMD_MODE_SELECT:
+ case SCSI_CMD_REASSIGN_BLOCKS:
+ case SCSI_CMD_FORMAT_UNIT:
+ case 0xc9: /* vendor-spec: SCSI_CMD_DEC_PLAYBACK_CONTROL */
+ { register int cs = sizeof_scsi_command(tgt->cur_cmd);
+ tgt->transient_state.cmd_count = cs;
+ len =
+ tgt->transient_state.out_count = cmd_count - cs;
+ virt = (vm_offset_t)tgt->cmd_ptr + cs;
+ rccb->ccb.ccb_in = 0; rccb->ccb.ccb_out = 1;
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ }
+ break;
+ default:
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ virt = 0;
+ len = 0;
+ rccb->ccb.ccb_in = 0; rccb->ccb.ccb_out = 0;
+ }
+
+#if CBUS
+ at386_io_lock(MP_DEV_WAIT);
+#endif
+ aha_prepare_rccb(tgt, rccb, virt, len);
+
+ rccb->ccb.ccb_lun = tgt->lun;
+ rccb->ccb.ccb_scsi_id = tgt->target_id;
+
+/* AHA_LENGTH_SET(rccb->ccb.ccb_linkptr, 0); unused */
+/* rccb->ccb.ccb_linkid = 0; unused */
+
+#if !CBUS
+ s = splbio();
+#endif
+
+ LOG(3,"enqueue");
+
+ aha_start_scsi(aha, &rccb->ccb);
+
+#if CBUS
+ at386_io_unlock();
+#else
+ splx(s);
+#endif
+}
+
+aha_prepare_rccb(tgt, rccb, virt, len)
+ target_info_t *tgt;
+ struct aha_ccb_raw *rccb;
+ vm_offset_t virt;
+ vm_size_t len;
+{
+ vm_offset_t phys;
+#ifdef CBUS
+ int cbus_window;
+#endif /* CBUS */
+
+ rccb->ccb.ccb_cmd_len = tgt->transient_state.cmd_count;
+
+ /* this opcode is refused, grrrr. */
+/* rccb->ccb.ccb_code = AHA_CCB_I_CMD_R; /* default common case */
+ rccb->ccb.ccb_code = AHA_CCB_I_CMD; /* default common case */
+ AHA_LENGTH_SET(rccb->ccb.ccb_datalen, len);/* default common case */
+
+#ifdef CBUS
+ if (tgt->aha_cbus_window == 0)
+ tgt->aha_cbus_window = cbus_alloc_win(AHA_MAX_SEGLIST+1);
+ cbus_window = tgt->aha_cbus_window;
+#endif /* CBUS */
+
+ if (virt == 0) {
+ /* no xfers */
+ AHA_ADDRESS_SET(rccb->ccb.ccb_dataptr, 0);
+ } else if (len <= MACHINE_PGBYTES) {
+/* INCORRECT: what if across two pages :INCORRECT */
+ /* simple xfer */
+#ifdef CBUS
+ phys = cbus_kvtoAT_ww(virt, cbus_window);
+#else /* CBUS */
+ phys = kvtophys(virt);
+#endif /* CBUS */
+ AHA_ADDRESS_SET(rccb->ccb.ccb_dataptr, phys);
+ } else {
+ /* messy xfer */
+ aha_seglist_t *seglist;
+ vm_offset_t ph1, off;
+ vm_size_t l1;
+
+ /* this opcode does not work, grrrrr */
+/* rccb->ccb.ccb_code = AHA_CCB_I_CMD_SG_R;*/
+ rccb->ccb.ccb_code = AHA_CCB_I_CMD_SG;
+
+ if (tgt->dma_ptr == 0)
+ aha_alloc_segment_list(tgt);
+ seglist = (aha_seglist_t *) tgt->dma_ptr;
+#ifdef CBUS
+ phys = cbus_kvtoAT_ww(seglist, cbus_window);
+ cbus_window++;
+#else /* CBUS */
+ phys = kvtophys((vm_offset_t) seglist);
+#endif /* CBUS */
+ AHA_ADDRESS_SET(rccb->ccb.ccb_dataptr, phys);
+
+ ph1 = /*i386_trunc_page*/ virt & ~(MACHINE_PGBYTES - 1);
+ off = virt & (MACHINE_PGBYTES - 1);
+#ifdef CBUS
+ ph1 = cbus_kvtoAT_ww(ph1, cbus_window) + off;
+ cbus_window++;
+#else /* CBUS */
+ ph1 = kvtophys(ph1) + off;
+#endif /* CBUS */
+ l1 = MACHINE_PGBYTES - off;
+
+ off = 1;/* now #pages */
+ while (1) {
+ AHA_ADDRESS_SET(seglist->ptr, ph1);
+ AHA_LENGTH_SET(seglist->len, l1);
+ seglist++;
+
+ if ((len -= l1) <= 0)
+ break;
+ virt += l1; off++;
+
+#ifdef CBUS
+ ph1 = cbus_kvtoAT_ww(virt, cbus_window);
+ cbus_window++;
+#else /* CBUS */
+ ph1 = kvtophys(virt);
+#endif /* CBUS */
+ l1 = (len > MACHINE_PGBYTES) ? MACHINE_PGBYTES : len;
+ }
+ l1 = off * sizeof(*seglist);
+ AHA_LENGTH_SET(rccb->ccb.ccb_datalen, l1);
+ }
+}
+
+aha_start_scsi(aha, ccb)
+ aha_softc_t aha;
+ aha_ccb_t *ccb;
+{
+ register aha_mbox_t *mb;
+ register idx;
+ vm_offset_t phys;
+ aha_mbox_t mbo;
+ spl_t s;
+
+ LOG(4,"start");
+ LOG(0x80+ccb->ccb_scsi_id,0);
+
+ /*
+ * Get an MBO, spin if necessary (takes little time)
+ */
+ s = splbio();
+ phys = kvtoAT((vm_offset_t)ccb);
+ /* might cross pages, but should be ok (kernel is contig) */
+ AHA_MB_SET_PTR(&mbo,phys);
+ mbo.mb.mb_cmd = AHA_MBO_START;
+
+ simple_lock(&aha->aha_lock);
+ if (aha->wd.nactive++ == 0)
+ aha->wd.watchdog_state = SCSI_WD_ACTIVE;
+ idx = aha->mb.oidx;
+ aha->mb.oidx = next_mbx_idx(idx);
+ mb = &aha->mb.omb[idx];
+ while (mb->mb.mb_status != AHA_MBO_FREE)
+ delay(1);
+ mb->bits = mbo.bits;
+ simple_unlock(&aha->aha_lock);
+
+ /*
+ * Start the board going
+ */
+ aha_command(aha->port, AHA_CMD_START, 0, 0, 0, 0, FALSE);
+ splx(s);
+}
+
+/*
+ * Interrupt routine
+ * Take interrupts from the board
+ *
+ * Implementation:
+ * TBD
+ */
+aha_intr(unit)
+{
+ register aha_softc_t aha;
+ register port;
+ register csr, intr;
+#if MAPPABLE
+ extern boolean_t rz_use_mapped_interface;
+
+ if (rz_use_mapped_interface)
+ return AHA_intr(unit);
+#endif /*MAPPABLE*/
+
+ aha = aha_softc[unit];
+ port = aha->port;
+
+ LOG(5,"\n\tintr");
+gotintr:
+ /* collect ephemeral information */
+ csr = inb(AHA_STATUS_PORT(port));
+ intr = inb(AHA_INTR_PORT(port));
+
+ /*
+ * Check for errors
+ */
+ if (csr & (AHA_CSR_DIAG_FAIL|AHA_CSR_CMD_ERR)) {
+/* XXX */ gimmeabreak();
+ }
+
+ /* drop spurious interrupts */
+ if ((intr & AHA_INTR_PENDING) == 0) {
+ LOG(2,"SPURIOUS");
+ return;
+ }
+ outb(AHA_CONTROL_PORT(port), AHA_CTL_INTR_CLR);
+
+TR(csr);TR(intr);TRCHECK
+
+ if (intr & AHA_INTR_RST)
+ return aha_bus_reset(aha);
+
+ /* we got an interrupt allright */
+ if (aha->wd.nactive)
+ aha->wd.watchdog_state = SCSI_WD_ACTIVE;
+
+ if (intr == AHA_INTR_DONE) {
+ /* csr & AHA_CSR_CMD_ERR --> with error */
+ LOG(6,"done");
+ return;
+ }
+
+/* if (intr & AHA_INTR_MBO_AVAIL) will not happen */
+
+ /* Some real work today ? */
+ if (intr & AHA_INTR_MBI_FULL) {
+ register int idx;
+ register aha_mbox_t *mb;
+ int nscan = 0;
+ aha_mbox_t mbi;
+rescan:
+ simple_lock(&aha->aha_lock);
+ idx = aha->mb.iidx;
+ aha->mb.iidx = next_mbx_idx(idx);
+ mb = &aha->mb.imb[idx];
+ mbi.bits = mb->bits;
+ mb->mb.mb_status = AHA_MBI_FREE;
+ simple_unlock(&aha->aha_lock);
+
+ nscan++;
+
+ switch (mbi.mb.mb_status) {
+
+ case AHA_MBI_FREE:
+ if (nscan >= AHA_NMBOXES)
+ return;
+ goto rescan;
+ break;
+
+ case AHA_MBI_SUCCESS:
+ case AHA_MBI_ERROR:
+ aha_initiator_intr(aha, mbi);
+ break;
+
+ case AHA_MBI_NEED_CCB:
+ aha_target_intr(aha, mbi);
+ break;
+
+/* case AHA_MBI_ABORTED: /* this we wont see */
+/* case AHA_MBI_NOT_FOUND: /* this we wont see */
+ default:
+ log( LOG_KERN,
+ "aha%d: Bogus status (x%x) in MBI\n",
+ unit, mbi.mb.mb_status);
+ break;
+ }
+
+ /* peek ahead */
+ if (aha->mb.imb[aha->mb.iidx].mb.mb_status != AHA_MBI_FREE)
+ goto rescan;
+ }
+
+ /* See if more work ready */
+ if (inb(AHA_INTR_PORT(port)) & AHA_INTR_PENDING) {
+ LOG(7,"\n\tre-intr");
+ goto gotintr;
+ }
+}
+
+/*
+ * The interrupt routine turns to one of these two
+ * functions, depending on the incoming mbi's role
+ */
+aha_target_intr(aha, mbi)
+ aha_softc_t aha;
+ aha_mbox_t mbi;
+{
+ target_info_t *initiator; /* this is the caller */
+ target_info_t *self; /* this is us */
+ int len;
+
+ if (mbi.mbt.mb_cmd != AHA_MBI_NEED_CCB)
+ gimmeabreak();
+
+ /* If we got here this is not zero .. */
+ self = aha->sc->target[aha->sc->initiator_id];
+
+ initiator = aha->sc->target[mbi.mbt.mb_initiator_id];
+ /* ..but initiators are not required to answer to our inquiry */
+ if (initiator == 0) {
+ /* allocate */
+ initiator = aha_tgt_alloc(aha, mbi.mbt.mb_initiator_id,
+ sizeof(scsi_sense_data_t) + 5, 0);
+
+ /* We do not know here wether the host was down when
+ we inquired, or it refused the connection. Leave
+ the decision on how we will talk to it to higher
+ level code */
+ LOG(0xC, "new_initiator");
+ sccpu_new_initiator(self, initiator);
+ }
+
+ /* The right thing to do would be build an ior
+ and call the self->dev_ops->strategy routine,
+ but we cannot allocate it at interrupt level.
+ Also note that we are now disconnected from the
+ initiator, no way to do anything else with it
+ but reconnect and do what it wants us to do */
+
+ /* obviously, this needs both spl and MP protection */
+ self->dev_info.cpu.req_pending = TRUE;
+ self->dev_info.cpu.req_id = mbi.mbt.mb_initiator_id;
+ self->dev_info.cpu.req_lun = mbi.mbt.mb_lun;
+ self->dev_info.cpu.req_cmd =
+ mbi.mbt.mb_isa_send ? SCSI_CMD_SEND: SCSI_CMD_RECEIVE;
+ len = (mbi.mbt.mb_data_len_msb << 16) |
+ (mbi.mbt.mb_data_len_mid << 8 );
+ len += 0x100;/* truncation problem */
+ self->dev_info.cpu.req_len = len;
+
+ LOG(0xB,"tgt-mode-restart");
+ (*self->dev_ops->restart)( self, FALSE);
+
+ /* The call above has either prepared the data,
+ placing an ior on self, or it handled it some
+ other way */
+ if (self->ior == 0)
+ return; /* I guess we'll do it later */
+
+ {
+ struct aha_ccb_raw *rccb;
+
+ rccb = &(aha->aha_ccbs[initiator->target_id]);
+ rccb->active_target = initiator;
+ if (self->dev_info.cpu.req_cmd == SCSI_CMD_SEND) {
+ rccb->ccb.ccb_in = 1;
+ rccb->ccb.ccb_out = 0;
+ } else {
+ rccb->ccb.ccb_in = 0;
+ rccb->ccb.ccb_out = 1;
+ }
+
+ aha_prepare_rccb(initiator, rccb,
+ (vm_offset_t)self->ior->io_data, self->ior->io_count);
+ rccb->ccb.ccb_code = AHA_CCB_T_CMD;
+ rccb->ccb.ccb_lun = initiator->lun;
+ rccb->ccb.ccb_scsi_id = initiator->target_id;
+
+ simple_lock(&aha->aha_lock);
+ if (aha->wd.nactive++ == 0)
+ aha->wd.watchdog_state = SCSI_WD_ACTIVE;
+ simple_unlock(&aha->aha_lock);
+
+ aha_start_scsi(aha, &rccb->ccb);
+ }
+}
+
+aha_initiator_intr(aha, mbi)
+ aha_softc_t aha;
+ aha_mbox_t mbi;
+{
+ struct aha_ccb_raw *rccb;
+ scsi2_status_byte_t status;
+ target_info_t *tgt;
+
+ rccb = mb_to_rccb(aha,mbi);
+ tgt = rccb->active_target;
+ rccb->active_target = 0;
+
+ /* shortcut (sic!) */
+ if (mbi.mb.mb_status == AHA_MBI_SUCCESS)
+ goto allok;
+
+ switch (rccb->ccb.ccb_hstatus) {
+ case AHA_HST_SUCCESS:
+allok:
+ status = rccb->ccb.ccb_status;
+ if (status.st.scsi_status_code != SCSI_ST_GOOD) {
+ scsi_error(tgt, SCSI_ERR_STATUS, status.bits, 0);
+ tgt->done = (status.st.scsi_status_code == SCSI_ST_BUSY) ?
+ SCSI_RET_RETRY : SCSI_RET_NEED_SENSE;
+ } else
+ tgt->done = SCSI_RET_SUCCESS;
+ break;
+ case AHA_HST_SEL_TIMEO:
+ if (tgt->flags & TGT_FULLY_PROBED)
+ tgt->flags = 0; /* went offline */
+ tgt->done = SCSI_RET_DEVICE_DOWN;
+ break;
+ case AHA_HST_DATA_OVRUN:
+ /* BUT we don't know if this is an underrun.
+ It is ok if we get less data than we asked
+ for, in a number of cases. Most boards do not
+ seem to generate this anyways, but some do. */
+ { register int cmd = tgt->cur_cmd;
+ switch (cmd) {
+ case SCSI_CMD_INQUIRY:
+ case SCSI_CMD_REQUEST_SENSE:
+ break;
+ default:
+ printf("%sx%x\n",
+ "aha: U/OVRUN on scsi command x%x\n",
+ cmd);
+ gimmeabreak();
+ }
+ }
+ goto allok;
+ case AHA_HST_BAD_DISCONN:
+ printf("aha: bad disconnect\n");
+ tgt->done = SCSI_RET_ABORTED;
+ break;
+ case AHA_HST_BAD_PHASE_SEQ:
+ /* we'll get an interrupt soon */
+ printf("aha: bad PHASE sequencing\n");
+ tgt->done = SCSI_RET_ABORTED;
+ break;
+ case AHA_HST_BAD_OPCODE: /* fall through */
+ case AHA_HST_BAD_PARAM:
+printf("aha: BADCCB\n");gimmeabreak();
+ tgt->done = SCSI_RET_RETRY;
+ break;
+ case AHA_HST_BAD_LINK_LUN: /* these should not happen */
+ case AHA_HST_INVALID_TDIR:
+ case AHA_HST_DUPLICATED_CCB:
+ printf("aha: bad hstatus (x%x)\n", rccb->ccb.ccb_hstatus);
+ tgt->done = SCSI_RET_ABORTED;
+ break;
+ }
+
+ LOG(8,"end");
+
+ simple_lock(&aha->aha_lock);
+ if (aha->wd.nactive-- == 1)
+ aha->wd.watchdog_state = SCSI_WD_INACTIVE;
+ simple_unlock(&aha->aha_lock);
+
+ if (tgt->ior) {
+ LOG(0xA,"ops->restart");
+ (*tgt->dev_ops->restart)( tgt, TRUE);
+ }
+
+ return FALSE;
+}
+
+/*
+ * The bus was reset
+ */
+aha_bus_reset(aha)
+ register aha_softc_t aha;
+{
+ register port = aha->port;
+
+ LOG(0x1d,"bus_reset");
+
+ /*
+ * Clear bus descriptor
+ */
+ aha->wd.nactive = 0;
+ aha_reset(port, TRUE);
+ aha_init(aha);
+
+ printf("aha: (%d) bus reset ", ++aha->wd.reset_count);
+ delay(scsi_delay_after_reset); /* some targets take long to reset */
+
+ if (aha->sc == 0) /* sanity */
+ return;
+
+ scsi_bus_was_reset(aha->sc);
+}
+
+/*
+ * Watchdog
+ *
+ * We know that some (name withdrawn) disks get
+ * stuck in the middle of dma phases...
+ */
+aha_reset_scsibus(aha)
+ register aha_softc_t aha;
+{
+ register target_info_t *tgt;
+ register port = aha->port;
+ register int i;
+
+ for (i = 0; i < AHA_NCCB; i++) {
+ tgt = aha->aha_ccbs[i].active_target;
+ if (/*scsi_debug &&*/ tgt)
+ printf("Target %d was active, cmd x%x in x%x out x%x\n",
+ tgt->target_id, tgt->cur_cmd,
+ tgt->transient_state.in_count,
+ tgt->transient_state.out_count);
+ }
+ aha_reset(port, FALSE);
+ delay(35);
+ /* no interrupt will come */
+ aha_bus_reset(aha);
+}
+
+/*
+ * Utilities
+ */
+
+/*
+ * Send a command to the board along with some
+ * optional parameters, optionally receive the
+ * results at command completion, returns how
+ * many bytes we did NOT get back.
+ */
+aha_command(port, cmd, outp, outc, inp, inc, clear_interrupt)
+ unsigned char *outp, *inp;
+{
+ register unsigned char st;
+ boolean_t failed = TRUE;
+
+ do {
+ st = inb(AHA_STATUS_PORT(port));
+ } while (st & AHA_CSR_DATAO_FULL);
+
+ /* Output command and any data */
+ outb(AHA_COMMAND_PORT(port), cmd);
+ while (outc--) {
+ do {
+ st = inb(AHA_STATUS_PORT(port));
+ if (st & AHA_CSR_CMD_ERR) goto out;
+ } while (st & AHA_CSR_DATAO_FULL);
+
+ outb(AHA_COMMAND_PORT(port), *outp++);
+ }
+
+ /* get any data */
+ while (inc--) {
+ do {
+ st = inb(AHA_STATUS_PORT(port));
+ if (st & AHA_CSR_CMD_ERR) goto out;
+ } while ((st & AHA_CSR_DATAI_FULL) == 0);
+
+ *inp++ = inb(AHA_DATA_PORT(port));
+ }
+ ++inc;
+ failed = FALSE;
+
+ /* wait command complete */
+ if (clear_interrupt) do {
+ delay(1);
+ st = inb(AHA_INTR_PORT(port));
+ } while ((st & AHA_INTR_DONE) == 0);
+
+out:
+ if (clear_interrupt)
+ outb(AHA_CONTROL_PORT(port), AHA_CTL_INTR_CLR);
+ if (failed)
+ printf("aha_command: error on (%x %x %x %x %x %x), status %x\n",
+ port, cmd, outp, outc, inp, inc, st);
+ return inc;
+}
+
+#include <vm/vm_kern.h>
+
+/*
+ * Allocate dynamically segment lists to
+ * targets (for scatter/gather)
+ * Its a max of 17*6=102 bytes per target.
+ */
+vm_offset_t aha_seglist_next, aha_seglist_end;
+
+aha_alloc_segment_list(tgt)
+ target_info_t *tgt;
+{
+#define ALLOC_SIZE (AHA_MAX_SEGLIST * sizeof(aha_seglist_t))
+
+/* XXX locking */
+ if ((aha_seglist_next + ALLOC_SIZE) > aha_seglist_end) {
+ (void) kmem_alloc_wired(kernel_map, &aha_seglist_next, PAGE_SIZE);
+ aha_seglist_end = aha_seglist_next + PAGE_SIZE;
+ }
+ tgt->dma_ptr = (char *)aha_seglist_next;
+ aha_seglist_next += ALLOC_SIZE;
+/* XXX locking */
+}
+
+#endif /* NAHA > 0 */
+
diff --git a/scsi/adapters/scsi_aha17_hdw.c b/scsi/adapters/scsi_aha17_hdw.c
new file mode 100644
index 00000000..d8afe6ab
--- /dev/null
+++ b/scsi/adapters/scsi_aha17_hdw.c
@@ -0,0 +1,1371 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * Copyright (c) 1993 University of Dublin
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and the following permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND THE UNIVERSITY OF DUBLIN ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON AND THE
+ * UNIVERSITY OF DUBLIN DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Support for AHA-174x in enhanced mode. Dominic Herity (dherity@cs.tcd.ie)
+ * Will refer to "Adaptec AHA-1740A/1742A/1744 Technical Reference Manual"
+ * page x-y as TRMx-y in comments below.
+ */
+
+#include <eaha.h>
+#if NEAHA > 0
+
+#define db_printf printf
+
+#include <cpus.h>
+#include <platforms.h>
+#include <aha.h>
+
+#ifdef OSF
+#include <eisa.h>
+#else
+#include <i386at/eisa.h>
+#endif
+
+#include <mach/std_types.h>
+#include <sys/types.h>
+#include <chips/busses.h>
+#include <scsi/compat_30.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi2.h>
+#include <scsi/scsi_defs.h>
+
+#include <scsi/adapters/scsi_aha15.h>
+#include <vm/vm_kern.h>
+
+#ifdef AT386
+#define MACHINE_PGBYTES I386_PGBYTES
+#define MAPPABLE 0
+#define gimmeabreak() asm("int3")
+
+
+#include <i386/pio.h> /* inlining of outb and inb */
+#ifdef OSF
+#include <machine/mp/mp.h>
+#endif
+#endif /*AT386*/
+
+#ifdef CBUS
+#include <cbus/cbus.h>
+#endif
+
+
+#ifndef MACHINE_PGBYTES /* cross compile check */
+#define MACHINE_PGBYTES 0x1000
+#define MAPPABLE 1
+#define gimmeabreak() Debugger("gimmeabreak");
+#endif
+
+int eaha_probe(), scsi_slave(), eaha_go(), eaha_intr();
+void scsi_attach();
+
+vm_offset_t eaha_std[NEAHA] = { 0 };
+struct bus_device *eaha_dinfo[NEAHA*8];
+struct bus_ctlr *eaha_minfo[NEAHA];
+struct bus_driver eaha_driver =
+ { eaha_probe, scsi_slave, scsi_attach, eaha_go, eaha_std, "rz",
+ eaha_dinfo, "eahac", eaha_minfo, BUS_INTR_B4_PROBE};
+
+
+#define TRACE
+#ifdef TRACE
+
+#define LOGSIZE 256
+int eaha_logpt;
+char eaha_log[LOGSIZE];
+
+#define MAXLOG_VALUE 0x1e
+struct {
+ char *name;
+ unsigned int count;
+} logtbl[MAXLOG_VALUE];
+
+static LOG(
+ int e,
+ char *f)
+{
+ eaha_log[eaha_logpt++] = (e);
+ if (eaha_logpt == LOGSIZE) eaha_logpt = 0;
+ if ((e) < MAXLOG_VALUE) {
+ logtbl[(e)].name = (f);
+ logtbl[(e)].count++;
+ }
+}
+
+eaha_print_log(
+ int skip)
+{
+ register int i, j;
+ register unsigned char c;
+
+ for (i = 0, j = eaha_logpt; i < LOGSIZE; i++) {
+ c = eaha_log[j];
+ if (++j == LOGSIZE) j = 0;
+ if (skip-- > 0)
+ continue;
+ if (c < MAXLOG_VALUE)
+ db_printf(" %s", logtbl[c].name);
+ else
+ db_printf("-%x", c & 0x7f);
+ }
+ return 0;
+}
+
+eaha_print_stat()
+{
+ register int i;
+ register char *p;
+ for (i = 0; i < MAXLOG_VALUE; i++) {
+ if (p = logtbl[i].name)
+ printf("%d %s\n", logtbl[i].count, p);
+ }
+}
+
+#else /*TRACE*/
+#define LOG(e,f)
+#define LOGSIZE
+#endif /*TRACE*/
+
+#ifdef DEBUG
+#define ASSERT(x) { if (!(x)) gimmeabreak() ; }
+#define MARK() gimmeabreak()
+#else
+#define ASSERT(x)
+#define MARK()
+#endif
+
+/*
+ * Notes :
+ *
+ * do each host command TRM6-4
+ * find targets in probe
+ * disable SCSI writes
+ * matching port with structs, eaha_go with port, eaha_intr with port
+ *
+ */
+
+/* eaha registers. See TRM4-11..23. dph */
+
+#define HID0(z) ((z)+0xC80)
+#define HID1(z) ((z)+0xC81)
+#define HID2(z) ((z)+0xC82)
+#define HID3(z) ((z)+0xC83)
+#define EBCTRL(z) ((z)+0xC84)
+#define PORTADDR(z) ((z)+0xCC0)
+#define BIOSADDR(z) ((z)+0xCC1)
+#define INTDEF(z) ((z)+0xCC2)
+#define SCSIDEF(z) ((z)+0xCC3)
+#define MBOXOUT0(z) ((z)+0xCD0)
+#define MBOXOUT1(z) ((z)+0xCD1)
+#define MBOXOUT2(z) ((z)+0xCD2)
+#define MBOXOUT3(z) ((z)+0xCD3)
+#define MBOXIN0(z) ((z)+0xCD8)
+#define MBOXIN1(z) ((z)+0xCD9)
+#define MBOXIN2(z) ((z)+0xCDA)
+#define MBOXIN3(z) ((z)+0xCDB)
+#define ATTN(z) ((z)+0xCD4)
+#define G2CNTRL(z) ((z)+0xCD5)
+#define G2INTST(z) ((z)+0xCD6)
+#define G2STAT(z) ((z)+0xCD7)
+#define G2STAT2(z) ((z)+0xCDC)
+
+/*
+ * Enhanced mode data structures: ring, enhanced ccbs, a per target buffer
+ */
+
+#define SCSI_TARGETS 8 /* Allow for SCSI-2 */
+
+
+/* Extended Command Control Block Format. See TRM6-3..12. */
+
+typedef struct {
+ unsigned short command ;
+# define EAHA_CMD_NOP 0
+# define EAHA_CMD_INIT_CMD 1
+# define EAHA_CMD_DIAG 5
+# define EAHA_CMD_INIT_SCSI 6
+# define EAHA_CMD_READ_SENS 8
+# define EAHA_CMD_DOWNLOAD 9
+# define EAHA_CMD_HOST_INQ 0x0a
+# define EAHA_CMD_TARG_CMD 0x10
+
+ /*
+ * It appears to be customary to tackle the endian-ness of
+ * bit fields as follows, so I won't deviate. However, nothing in
+ * K&R implies that bit fields are implemented so that the fields
+ * of an unsigned char are allocated lsb first. Indeed, K&R _warns_
+ * _against_ using bit fields to describe storage allocation.
+ * This issue is separate from endian-ness. dph
+ * And this is exactly the reason macros are used. If your compiler
+ * is weird just override the macros and we will all be happy. af
+ */
+ BITFIELD_3(unsigned char,
+ cne:1,
+ xxx0:6,
+ di:1) ;
+ BITFIELD_7(unsigned char,
+ xxx1:2,
+ ses:1,
+ xxx2:1,
+ sg:1,
+ xxx3:1,
+ dsb:1,
+ ars:1) ;
+
+ BITFIELD_5(unsigned char,
+ lun:3,
+ tag:1,
+ tt:2,
+ nd:1,
+ xxx4:1) ;
+ BITFIELD_7(unsigned char,
+ dat:1,
+ dir:1,
+ st:1,
+ chk:1,
+ xxx5:2,
+ rec:1,
+ nbr:1) ;
+
+ unsigned short xxx6 ;
+
+ vm_offset_t scather ; /* scatter/gather */
+ unsigned scathlen ;
+ vm_offset_t status ;
+ vm_offset_t chain ;
+ int xxx7 ;
+
+ vm_offset_t sense_p ;
+ unsigned char sense_len ;
+ unsigned char cdb_len ;
+ unsigned short checksum ;
+ scsi_command_group_5 cdb ;
+ unsigned char buffer[256] ; /* space for data returned. */
+
+} eccb ;
+
+#define NTARGETS (8)
+#define NECCBS (NTARGETS+2) /* Targets + 2 to allow for temporaries. */
+ /* Can be up to 64 (TRM6-2), but that entails lots of bss usage */
+
+typedef struct { /* Status Block Format. See TRM6-13..19. */
+ BITFIELD_8(unsigned char,
+ don:1,
+ du:1,
+ xxx0:1,
+ qf:1,
+ sc:1,
+ dover:1,
+ ch:1,
+ inti:1) ;
+ BITFIELD_8(unsigned char,
+ asa:1, /* Error in TRM6-15..16 says both asa and sns */
+ sns:1, /* bit 9. Bits 8 and 10 are not mentioned. */
+ xxx1:1,
+ ini:1,
+ me:1,
+ xxx2:1,
+ eca:1,
+ xxx3:1) ;
+
+ unsigned char ha_status ;
+# define HA_STATUS_SUCCESS 0x00
+# define HA_STATUS_HOST_ABORTED 0x04
+# define HA_STATUS_ADP_ABORTED 0x05
+# define HA_STATUS_NO_FIRM 0x08
+# define HA_STATUS_NOT_TARGET 0x0a
+# define HA_STATUS_SEL_TIMEOUT 0x11
+# define HA_STATUS_OVRUN 0x12
+# define HA_STATUS_BUS_FREE 0x13
+# define HA_STATUS_PHASE_ERROR 0x14
+# define HA_STATUS_BAD_OPCODE 0x16
+# define HA_STATUS_INVALID_LINK 0x17
+# define HA_STATUS_BAD_CBLOCK 0x18
+# define HA_STATUS_DUP_CBLOCK 0x19
+# define HA_STATUS_BAD_SCATHER 0x1a
+# define HA_STATUS_RSENSE_FAIL 0x1b
+# define HA_STATUS_TAG_REJECT 0x1c
+# define HA_STATUS_HARD_ERROR 0x20
+# define HA_STATUS_TARGET_NOATTN 0x21
+# define HA_STATUS_HOST_RESET 0x22
+# define HA_STATUS_OTHER_RESET 0x23
+# define HA_STATUS_PROG_BAD_SUM 0x80
+
+ scsi2_status_byte_t target_status ;
+
+ unsigned residue ;
+ vm_offset_t residue_buffer ;
+ unsigned short add_stat_len ;
+ unsigned char sense_len ;
+ char xxx4[9] ;
+ unsigned char cdb[6] ;
+
+} status_block ;
+
+typedef struct {
+ vm_offset_t ptr ;
+ unsigned len ;
+} scather_entry ;
+
+#define SCATHER_ENTRIES 128 /* TRM 6-11 */
+
+struct erccbx {
+ target_info_t *active_target;
+ eccb _eccb;
+ status_block status ;
+ struct erccbx *next ;
+} ;
+
+typedef struct erccbx erccb ;
+
+/* forward decls */
+int eaha_reset_scsibus();
+boolean_t eaha_probe_target();
+
+/*
+ * State descriptor for this layer. There is one such structure
+ * per (enabled) board
+ */
+typedef struct {
+ watchdog_t wd;
+ decl_simple_lock_data(, aha_lock)
+ int port; /* I/O port */
+
+ int has_sense_info [NTARGETS];
+ int sense_info_lun [NTARGETS];
+ /* 1742 enhanced mode will hang if target has
+ * sense info and host doesn't request it (TRM6-34).
+ * This sometimes happens in the scsi driver.
+ * These flags indicate when a target has sense
+ * info to disgorge.
+ * If set, eaha_go reads and discards sense info
+ * before running any command except request sense.
+ * dph
+ */
+
+ scsi_softc_t *sc; /* HBA-indep info */
+
+ erccb _erccbs[NECCBS] ; /* mailboxes */
+ erccb *toperccb ;
+
+ /* This chicanery is for mapping back the phys address
+ of a CCB (which we get in an MBI) to its virtual */
+ /* [we could use phystokv(), but it isn't standard] */
+ vm_offset_t I_hold_my_phys_address;
+
+ char host_inquiry_data[256] ; /* Check out ../scsi2.h */
+
+} eaha_softc ;
+
+eaha_softc eaha_softc_data[NEAHA];
+
+typedef eaha_softc *eaha_softc_t;
+
+eaha_softc_t eaha_softc_pool[NEAHA];
+
+int eaha_quiet ;
+
+erccb *erccb_alloc(
+ eaha_softc *eaha)
+{
+ erccb *e ;
+ int x ;
+
+ do {
+ while (eaha->toperccb == 0) ;/* Shouldn't be often or long, */
+ /* BUT should use a semaphore */
+ x = splbio() ;
+ e = eaha->toperccb ;
+ if (e == 0)
+ splx(x) ;
+ } while (!e) ;
+ eaha->toperccb = e->next ;
+ splx(x) ;
+ bzero(e,sizeof(*e)) ;
+ e->_eccb.status = kvtophys((vm_offset_t)&e->status) ;
+ return e ;
+}
+
+void erccb_free(
+ eaha_softc *eaha,
+ erccb *e)
+{
+ int x ;
+ ASSERT ( e >= eaha->_erccbs && e < eaha->_erccbs+NECCBS) ;
+ x = splbio() ;
+ e->next = eaha->toperccb ;
+ eaha->toperccb = e ;
+ splx(x) ;
+}
+
+void eaha_mboxout(
+ int port,
+ vm_offset_t phys)
+{
+ outb(MBOXOUT0(port),phys) ;
+ outb(MBOXOUT1(port),phys>>8) ;
+ outb(MBOXOUT2(port),phys>>16) ;
+ outb(MBOXOUT3(port),phys>>24) ;
+}
+
+void eaha_command( /* start a command */
+ int port,
+ erccb *_erccb)
+{
+ int s ;
+ vm_offset_t phys = kvtophys((vm_offset_t) &_erccb->_eccb) ;
+ while ((inb(G2STAT(port)) & 0x04)==0); /*While MBO busy. TRM6-1 */
+ s = splbio() ;
+ eaha_mboxout(port,phys) ;
+ while (inb(G2STAT(port)) & 1) ; /* While adapter busy. TRM6-2 */
+ outb(ATTN(port),0x40 | _erccb->active_target->target_id) ; /* TRM6-20 */
+ /* (Should use target id for intitiator command) */
+ splx(s) ;
+}
+
+eaha_reset(
+ eaha_softc_t eaha,
+ boolean_t quick)
+{
+ /*
+ * Reset board and wait till done
+ */
+ unsigned st ;
+ int target_id ;
+ int port = eaha->port ;
+
+ /* Reset adapter, maybe with SCSIbus */
+ eaha_mboxout(port, quick ? 0x00080080 : 0x00000080 ) ; /* TRM 6-43..45 */
+ outb(ATTN(port), 0x10 | inb(SCSIDEF(port)) & 0x0f) ;
+ outb(G2CNTRL(port),0x20) ; /* TRM 4-22 */
+
+ do {
+ st = inb(G2INTST(port)) >> 4 ;
+ } while (st == 0) ;
+ /* TRM 4-22 implies that 1 should not be returned in G2INTST, but
+ in practise, it is. So this code takes 0 to mean non-completion. */
+
+ for (target_id = 0 ; target_id < NTARGETS; target_id++)
+ eaha->has_sense_info[target_id] = FALSE ;
+
+}
+
+void eaha_init(
+ eaha_softc_t eaha)
+{
+ /* Do nothing - I guess */
+}
+
+void eaha_bus_reset(
+ eaha_softc_t eaha)
+
+{
+ LOG(0x1d,"bus_reset");
+
+ /*
+ * Clear bus descriptor
+ */
+ eaha->wd.nactive = 0;
+ eaha_reset(eaha, TRUE);
+ eaha_init(eaha);
+
+ printf("eaha: (%d) bus reset ", ++eaha->wd.reset_count);
+ delay(scsi_delay_after_reset); /* some targets take long to reset */
+
+ if (eaha->sc == 0) /* sanity */
+ return;
+
+ scsi_bus_was_reset(eaha->sc);
+}
+
+#ifdef notdef
+ /* functions added to complete 1742 support, but not used. Untested. */
+
+ void eaha_download(port, data, len)
+ int port ;
+ char *data ;
+ unsigned len ;
+ {
+ /* 1744 firmware download. Not implemented. TRM6-21 */
+ }
+
+ void eaha_initscsi(data, len)
+ char *data ;
+ unsigned len ;
+ {
+ /* initialize SCSI subsystem. Presume BIOS does it.
+ Not implemented. TRM6-23 */
+ }
+
+ void eaha_noop()
+ {
+ /* Not implemented. TRM6-27 */
+ }
+
+ erccb *eaha_host_adapter_inquiry(eaha) /* Returns a promise */
+ eaha_softc *eaha ; /* TRM6-31..33 */
+ {
+ erccb *_erccb = erccb_alloc(eaha) ;
+ _erccb->_eccb.scather = (vm_offset_t) kvtophys(eaha->host_inquiry_data) ;
+ _erccb->_eccb.scathlen = sizeof(eaha->host_inquiry_data) ;
+ _erccb->_eccb.ses = 1 ;
+ _erccb->_eccb.command = EAHA_CMD_HOST_INQ ;
+ eaha_command(eaha->port,_erccb->_eccb,0) ; /* Is scsi_id used */
+ return _erccb ;
+ }
+
+ erccb *eaha_read_sense_info(eaha, target, lun) /* TRM 6-33..35 */
+ eaha_softc *eaha ;
+ unsigned target, lun ;
+ { /* Don't think we need this because its done in scsi_alldevs.c */
+ #ifdef notdef
+ erccb *_erccb = erccb_alloc(eaha) ;
+ _erccb->_eccb.command = EAHA_CMD_READ_SENS ;
+ _erccb->_eccb.lun = lun ;
+ eaha_command(eaha->port,_erccb->_eccb, target) ;/*Wrong # args*/
+ return _erccb ;
+ #else
+ return 0 ;
+ #endif
+ }
+
+ void eaha_diagnostic(eaha)
+ eaha_softc *eaha ;
+ {
+ /* Not implemented. TRM6-36..37 */
+ }
+
+ erccb *eaha_target_cmd(eaha, target, lun, data, len) /* TRM6-38..39 */
+ eaha_softc *eaha ;
+ unsigned target, lun ;
+ char *data ;
+ unsigned len ;
+ {
+ erccb *_erccb = erccb_alloc(eaha) ;
+ _erccb->_eccb.command = EAHA_CMD_TARG_CMD ;
+ _erccb->_eccb.lun = lun ;
+ eaha_command(eaha->port,_erccb->_eccb,target);/*Wrong # args*/
+ return _erccb ;
+ }
+
+ erccb *eaha_init_cmd(port) /* SHOULD RETURN TOKEN. i.e. ptr to eccb */
+ /* Need list of free eccbs */
+ { /* to be continued,. possibly. */
+ }
+
+#endif /* notdef */
+
+target_info_t *
+eaha_tgt_alloc(
+ eaha_softc_t eaha,
+ int id,
+ target_info_t *tgt)
+{
+ erccb *_erccb;
+
+ if (tgt == 0)
+ tgt = scsi_slave_alloc(eaha - eaha_softc_data, id, eaha);
+
+ _erccb = erccb_alloc(eaha) ; /* This is very dodgy */
+ tgt->cmd_ptr = (char *)& _erccb->_eccb.cdb ;
+ tgt->dma_ptr = 0;
+ return tgt;
+}
+
+
+struct {
+ scsi_sense_data_t sns ;
+ unsigned char extra
+ [254-sizeof(scsi_sense_data_t)] ;
+} eaha_xsns [NTARGETS] ;/*must be bss to be contiguous*/
+
+
+/* Enhanced adapter probe routine */
+
+eaha_probe(
+ register int port,
+ struct bus_ctlr *ui)
+{
+ int unit = ui->unit;
+ eaha_softc_t eaha = &eaha_softc_data[unit] ;
+ int target_id ;
+ scsi_softc_t *sc ;
+ int s;
+ boolean_t did_banner = FALSE ;
+ struct aha_devs installed;
+ unsigned char my_scsi_id, my_interrupt ;
+
+ if (unit >= NEAHA)
+ return(0);
+
+ /* No interrupts yet */
+ s = splbio();
+
+ /*
+ * Detect prescence of 174x in enhanced mode. Ignore HID2 and HID3
+ * on the assumption that compatibility will be preserved. dph
+ */
+ if (inb(HID0(port)) != 0x04 || inb(HID1(port)) != 0x90 ||
+ (inb(PORTADDR(port)) & 0x80) != 0x80) {
+ splx(s);
+ return 0 ;
+ }
+
+ /* Issue RESET in case this is a reboot */
+
+ outb(EBCTRL(port),0x04) ; /* Disable board. TRM4-12 */
+ outb(PORTADDR(port),0x80) ; /* Disable standard mode ports. TRM4-13. */
+ my_interrupt = inb(INTDEF(port)) & 0x07 ;
+ outb(INTDEF(port), my_interrupt | 0x00) ;
+ /* Disable interrupts. TRM4-15 */
+ my_scsi_id = inb(SCSIDEF(port)) & 0x0f ;
+ outb(SCSIDEF(port), my_scsi_id | 0x10) ;
+ /* Force SCSI reset on hard reset. TRM4-16 */
+ outb(G2CNTRL(port),0xe0) ; /* Reset board, clear interrupt */
+ /* and set 'host ready'. */
+ delay(10*10) ; /* HRST must remain set for 10us. TRM4-22 */
+ /* (I don't believe the delay loop is slow enough.) */
+ outb(G2CNTRL(port),0x60);/*Un-reset board, set 'host ready'. TRM4-22*/
+
+ printf("Adaptec 1740A/1742A/1744 enhanced mode\n");
+
+ /* Get host inquiry data */
+
+ eaha_softc_pool[unit] = eaha ;
+ bzero(eaha,sizeof(*eaha)) ;
+ eaha->port = port ;
+
+ sc = scsi_master_alloc(unit, eaha) ;
+ eaha->sc = sc ;
+ sc->go = eaha_go ;
+ sc->watchdog = scsi_watchdog ;
+ sc->probe = eaha_probe_target ;
+ eaha->wd.reset = eaha_reset_scsibus ;
+ sc->max_dma_data = -1 ; /* Lets be optimistic */
+ sc->initiator_id = my_scsi_id ;
+ eaha_reset(eaha,TRUE) ;
+ eaha->I_hold_my_phys_address =
+ kvtophys((vm_offset_t)&eaha->I_hold_my_phys_address) ;
+ {
+ erccb *e ;
+ eaha->toperccb = eaha->_erccbs ;
+ for (e=eaha->_erccbs; e < eaha->_erccbs+NECCBS; e++) {
+ e->next = e+1 ;
+ e->_eccb.status =
+ kvtophys((vm_offset_t) &e->status) ;
+ }
+ eaha->_erccbs[NECCBS-1].next = 0 ;
+
+ }
+
+ ui->sysdep1 = my_interrupt + 9 ;
+ take_ctlr_irq(ui) ;
+
+ printf("%s%d: [port 0x%x intr ch %d] my SCSI id is %d",
+ ui->name, unit, port, my_interrupt + 9, my_scsi_id) ;
+
+ outb(INTDEF(port), my_interrupt | 0x10) ;
+ /* Enable interrupts. TRM4-15 */
+ outb(EBCTRL(port),0x01) ; /* Enable board. TRM4-12 */
+
+ { target_info_t *t = eaha_tgt_alloc(eaha, my_scsi_id, 0) ;
+ /* Haven't enabled target mode a la standard mode, because */
+ /* it doesn't seem to be necessary. */
+ sccpu_new_initiator(t, t) ;
+ }
+
+ /* Find targets, incl. ourselves. */
+
+ for (target_id=0; target_id < SCSI_TARGETS; target_id++)
+ if (target_id != sc->initiator_id) {
+ scsi_cmd_test_unit_ready_t *cmd;
+ erccb *_erccb = erccb_alloc(eaha) ;
+ unsigned attempts = 0 ;
+#define MAX_ATTEMPTS 2
+ target_info_t temp_targ ;
+
+ temp_targ.ior = 0 ;
+ temp_targ.hw_state = (char *) eaha ;
+ temp_targ.cmd_ptr = (char *) &_erccb->_eccb.cdb ;
+ temp_targ.target_id = target_id ;
+ temp_targ.lun = 0 ;
+ temp_targ.cur_cmd = SCSI_CMD_TEST_UNIT_READY;
+
+ cmd = (scsi_cmd_test_unit_ready_t *) temp_targ.cmd_ptr;
+
+ do {
+ cmd->scsi_cmd_code = SCSI_CMD_TEST_UNIT_READY;
+ cmd->scsi_cmd_lun_and_lba1 = 0; /*assume 1 lun?*/
+ cmd->scsi_cmd_lba2 = 0;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_ss_flags = 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ eaha_go( &temp_targ,
+ sizeof(scsi_cmd_test_unit_ready_t),0,0);
+ /* ints disabled, so call isr yourself. */
+ while (temp_targ.done == SCSI_RET_IN_PROGRESS)
+ if (inb(G2STAT(eaha->port)) & 0x02) {
+ eaha_quiet = 1 ;
+ eaha_intr(unit) ;
+ eaha_quiet = 0 ;
+ }
+ if (temp_targ.done == SCSI_RET_NEED_SENSE) {
+ /* MUST get sense info : TRM6-34 */
+ if (eaha_retrieve_sense_info(
+ eaha, temp_targ.target_id,
+ temp_targ.lun) &&
+ attempts == MAX_ATTEMPTS-1) {
+
+ printf(
+ "\nTarget %d Check Condition : "
+ ,temp_targ.target_id) ;
+ scsi_print_sense_data(&eaha_xsns
+ [temp_targ.target_id]);
+ printf("\n") ;
+ }
+ }
+ } while (temp_targ.done != SCSI_RET_SUCCESS &&
+ temp_targ.done != SCSI_RET_ABORTED &&
+ ++attempts < MAX_ATTEMPTS) ;
+
+ /*
+ * Recognize target which is present, whether or not
+ * it is ready, e.g. drive with removable media.
+ */
+ if (temp_targ.done == SCSI_RET_SUCCESS ||
+ temp_targ.done == SCSI_RET_NEED_SENSE &&
+ _erccb->status.target_status.bits != 0) { /* Eureka */
+ installed.tgt_luns[target_id]=1;/*Assume 1 lun?*/
+ printf(", %s%d",
+ did_banner++ ? "" : "target(s) at ",
+ target_id);
+
+ erccb_free(eaha, _erccb) ;
+
+ /* Normally, only LUN 0 */
+ if (installed.tgt_luns[target_id] != 1)
+ printf("(%x)", installed.tgt_luns[target_id]);
+ /*
+ * Found a target
+ */
+ (void) eaha_tgt_alloc(eaha, target_id, 0);
+ /* Why discard ? */
+ } else
+ installed.tgt_luns[target_id]=0;
+ }
+
+ printf(".\n") ;
+ splx(s);
+ return 1 ;
+}
+
+int eaha_retrieve_sense_info (
+ eaha_softc_t eaha,
+ int tid,
+ int lun)
+{
+ int result ;
+ int s ;
+ target_info_t dummy_target ; /* Keeps eaha_command() happy. HACK */
+ erccb *_erccb1 = erccb_alloc(eaha) ;
+
+ _erccb1->active_target = &dummy_target ;
+ dummy_target.target_id = tid ;
+ _erccb1->_eccb.command =
+ EAHA_CMD_READ_SENS ;
+ _erccb1->_eccb.lun = lun ;
+ _erccb1->_eccb.sense_p = kvtophys((vm_offset_t) &eaha_xsns [tid]);
+ _erccb1->_eccb.sense_len = sizeof(eaha_xsns [tid]);
+ _erccb1->_eccb.ses = 1 ;
+ s = splbio() ;
+ eaha_command(eaha->port,_erccb1) ;
+ while ((inb(G2STAT(eaha->port)) & 0x02) == 0) ;
+ outb(G2CNTRL(eaha->port),0x40);/* Clear int */
+ splx(s) ;
+ result = _erccb1->status.target_status.bits != 0 ;
+ erccb_free(eaha,_erccb1) ;
+ return result ;
+}
+
+/*
+ * Start a SCSI command on a target (enhanced mode)
+ */
+eaha_go(
+ target_info_t *tgt,
+ int cmd_count,
+ int in_count,
+ boolean_t cmd_only)/*lint: unused*/
+{
+ eaha_softc_t eaha;
+ int s;
+ erccb *_erccb;
+ int len;
+ vm_offset_t virt;
+ int tid = tgt->target_id ;
+
+#ifdef CBUS
+ at386_io_lock_state();
+#endif
+ LOG(1,"go");
+
+#ifdef CBUS
+ at386_io_lock(MP_DEV_WAIT);
+#endif
+ eaha = (eaha_softc_t)tgt->hw_state;
+
+ if(eaha->has_sense_info[tid]) {
+ (void) eaha_retrieve_sense_info
+ (eaha, tid, eaha->sense_info_lun[tid]) ;
+ eaha->has_sense_info[tid] = FALSE ;
+ if (tgt->cur_cmd == SCSI_CMD_REQUEST_SENSE) {
+ bcopy(&eaha_xsns[tid],tgt->cmd_ptr,in_count) ;
+ tgt->done = SCSI_RET_SUCCESS;
+ tgt->transient_state.cmd_count = cmd_count;
+ tgt->transient_state.out_count = 0;
+ tgt->transient_state.in_count = in_count;
+ /* Fake up interrupt */
+ /* Highlights from eaha_initiator_intr(), */
+ /* ignoring errors */
+ if (tgt->ior)
+ (*tgt->dev_ops->restart)( tgt, TRUE);
+#ifdef CBUS
+ at386_io_unlock();
+#endif
+ return ;
+ }
+ }
+
+/* XXX delay the handling of the ccb till later */
+ _erccb = (erccb *)
+ ((unsigned)tgt->cmd_ptr - (unsigned) &((erccb *) 0)->_eccb.cdb);
+ /* Tell *rccb about target, eg. id ? */
+ _erccb->active_target = tgt;
+
+ /*
+ * We can do real DMA.
+ */
+/* tgt->transient_state.copy_count = 0; unused */
+/* tgt->transient_state.dma_offset = 0; unused */
+
+ tgt->transient_state.cmd_count = cmd_count;
+
+ if ((tgt->cur_cmd == SCSI_CMD_WRITE) ||
+ (tgt->cur_cmd == SCSI_CMD_LONG_WRITE)){
+ io_req_t ior = tgt->ior;
+ register int len = ior->io_count;
+
+ tgt->transient_state.out_count = len;
+
+ /* How do we avoid leaks here ? Trust the board
+ will do zero-padding, for now. XXX CHECKME */
+#if 0
+ if (len < tgt->block_size) {
+ bzero(to + len, tgt->block_size - len);
+ len = tgt->block_size;
+ tgt->transient_state.out_count = len;
+ }
+#endif
+ } else {
+ tgt->transient_state.out_count = 0;
+ }
+
+ /* See above for in_count < block_size */
+ tgt->transient_state.in_count = in_count;
+
+ /*
+ * Setup CCB state
+ */
+ tgt->done = SCSI_RET_IN_PROGRESS;
+
+ switch (tgt->cur_cmd) {
+ case SCSI_CMD_READ:
+ case SCSI_CMD_LONG_READ:
+ LOG(9,"readop");
+ virt = (vm_offset_t)tgt->ior->io_data;
+ len = tgt->transient_state.in_count;
+ break;
+ case SCSI_CMD_WRITE:
+ case SCSI_CMD_LONG_WRITE:
+ LOG(0x1a,"writeop");
+ virt = (vm_offset_t)tgt->ior->io_data;
+ len = tgt->transient_state.out_count;
+ break;
+ case SCSI_CMD_INQUIRY:
+ case SCSI_CMD_REQUEST_SENSE:
+ case SCSI_CMD_MODE_SENSE:
+ case SCSI_CMD_RECEIVE_DIAG_RESULTS:
+ case SCSI_CMD_READ_CAPACITY:
+ case SCSI_CMD_READ_BLOCK_LIMITS:
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ virt = (vm_offset_t)tgt->cmd_ptr;
+ len = tgt->transient_state.in_count;
+ break;
+ case SCSI_CMD_MODE_SELECT:
+ case SCSI_CMD_REASSIGN_BLOCKS:
+ case SCSI_CMD_FORMAT_UNIT:
+ tgt->transient_state.cmd_count = sizeof(scsi_command_group_0);
+ len =
+ tgt->transient_state.out_count = cmd_count - sizeof(scsi_command_group_0);
+ virt = (vm_offset_t)tgt->cmd_ptr+sizeof(scsi_command_group_0);
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ break;
+ default:
+ LOG(0x1c,"cmdop");
+ LOG(0x80+tgt->cur_cmd,0);
+ virt = 0;
+ len = 0;
+ }
+
+ eaha_prepare_rccb(tgt, _erccb, virt, len);
+
+ _erccb->_eccb.lun = tgt->lun;
+
+ /*
+ * XXX here and everywhere, locks!
+ */
+ s = splbio();
+
+ simple_lock(&eaha->aha_lock);
+ if (eaha->wd.nactive++ == 0)
+ eaha->wd.watchdog_state = SCSI_WD_ACTIVE;
+ simple_unlock(&eaha->aha_lock);
+
+ LOG(3,"enqueue");
+
+ eaha_command(eaha->port, _erccb) ;
+
+ splx(s);
+#ifdef CBUS
+ at386_io_unlock();
+#endif
+}
+
+eaha_prepare_rccb(
+ target_info_t *tgt,
+ erccb *_erccb,
+ vm_offset_t virt,
+ vm_size_t len)
+{
+ _erccb->_eccb.cdb_len = tgt->transient_state.cmd_count;
+
+ _erccb->_eccb.command = EAHA_CMD_INIT_CMD;/* default common case */
+
+ if (virt == 0) {
+ /* no xfers */
+ _erccb->_eccb.scather = 0 ;
+ _erccb->_eccb.scathlen = 0 ;
+ _erccb->_eccb.sg = 0 ;
+ } else {
+ /* messy xfer */
+ scather_entry *seglist;
+ vm_size_t l1, off;
+
+ _erccb->_eccb.sg = 1 ;
+
+ if (tgt->dma_ptr == 0)
+ eaha_alloc_segment_list(tgt);
+ seglist = (scather_entry *) tgt->dma_ptr;
+
+ _erccb->_eccb.scather = kvtophys((vm_offset_t) seglist);
+
+ l1 = MACHINE_PGBYTES - (virt & (MACHINE_PGBYTES - 1));
+ if (l1 > len)
+ l1 = len ;
+
+ off = 1;/* now #pages */
+ while (1) {
+ seglist->ptr = kvtophys(virt) ;
+ seglist->len = l1 ;
+ seglist++;
+
+ if (len <= l1)
+ break ;
+ len-= l1 ;
+ virt += l1; off++;
+
+ l1 = (len > MACHINE_PGBYTES) ? MACHINE_PGBYTES : len;
+ }
+ _erccb->_eccb.scathlen = off * sizeof(*seglist);
+ }
+}
+
+/*
+ * Allocate dynamically segment lists to
+ * targets (for scatter/gather)
+ */
+vm_offset_t eaha_seglist_next = 0, eaha_seglist_end = 0 ;
+#define EALLOC_SIZE (SCATHER_ENTRIES * sizeof(scather_entry))
+
+eaha_alloc_segment_list(
+ target_info_t *tgt)
+{
+
+/* XXX locking */
+/* ? Can't spl() for unknown duration */
+ if ((eaha_seglist_next + EALLOC_SIZE) > eaha_seglist_end) {
+ (void)kmem_alloc_wired(kernel_map,&eaha_seglist_next,PAGE_SIZE);
+ eaha_seglist_end = eaha_seglist_next + PAGE_SIZE;
+ }
+ tgt->dma_ptr = (char *)eaha_seglist_next;
+ eaha_seglist_next += EALLOC_SIZE;
+/* XXX locking */
+}
+
+/*
+ *
+ * shameless copy from above
+ */
+eaha_reset_scsibus(
+ register eaha_softc_t eaha)
+{
+ register target_info_t *tgt;
+ register port = eaha->port;
+ register int i;
+
+ for (i = 0; i < NECCBS; i++) {
+ tgt = eaha->_erccbs[i].active_target;
+ if (/*scsi_debug &&*/ tgt)
+ printf("Target %d was active, cmd x%x in x%x out x%x\n",
+ tgt->target_id, tgt->cur_cmd,
+ tgt->transient_state.in_count,
+ tgt->transient_state.out_count);
+ }
+ eaha_reset(eaha, FALSE);
+ delay(35);
+ /* no interrupt will come */
+ eaha_bus_reset(eaha);
+}
+
+boolean_t
+eaha_probe_target(
+ target_info_t *tgt,
+ io_req_t ior)
+{
+ eaha_softc_t eaha = eaha_softc_pool[tgt->masterno];
+ boolean_t newlywed;
+
+ newlywed = (tgt->cmd_ptr == 0);
+ if (newlywed) {
+ /* desc was allocated afresh */
+ (void) eaha_tgt_alloc(eaha,tgt->target_id, tgt);
+ }
+
+ if (scsi_inquiry(tgt, SCSI_INQ_STD_DATA) == SCSI_RET_DEVICE_DOWN)
+ return FALSE;
+
+ tgt->flags = TGT_ALIVE;
+ return TRUE;
+}
+
+
+/*
+ * Interrupt routine (enhanced mode)
+ * Take interrupts from the board
+ *
+ * Implementation:
+ * TBD
+ */
+eaha_intr(
+ int unit)
+{
+ register eaha_softc_t eaha;
+ register port;
+ unsigned g2intst, g2stat, g2stat2 ;
+ vm_offset_t mbi ;
+ erccb *_erccb ;
+ status_block *status ;
+
+#if MAPPABLE
+ extern boolean_t rz_use_mapped_interface;
+
+ if (rz_use_mapped_interface) {
+ EAHA_intr(unit);
+ return ;
+ }
+#endif /*MAPPABLE*/
+
+ eaha = eaha_softc_pool[unit];
+ port = eaha->port;
+
+ LOG(5,"\n\tintr");
+gotintr:
+ /* collect ephemeral information */
+
+ g2intst = inb(G2INTST(port)) ; /* See TRM4-22..23 */
+ g2stat = inb(G2STAT(port)) ; /*lint:set,not used*/
+ g2stat2 = inb(G2STAT2(port)) ; /*lint:set,not used*/
+ mbi = (vm_offset_t) inb(MBOXIN0(port)) + (inb(MBOXIN1(port))<<8) +
+ (inb(MBOXIN2(port))<<16) + (inb(MBOXIN3(port))<<24) ;
+
+ /* we got an interrupt allright */
+ if (eaha->wd.nactive)
+ eaha->wd.watchdog_state = SCSI_WD_ACTIVE;
+
+ outb(G2CNTRL(port),0x40) ; /* Clear EISA interrupt */
+
+ switch(g2intst>>4) {
+ case 0x07 : /* hardware error ? */
+ case 0x0a : /* immediate command complete - don't expect */
+ case 0x0e : /* ditto with failure */
+ default :
+ printf( "aha%d: Bogus status (x%x) in MBI\n",
+ unit, mbi);
+ gimmeabreak() ; /* Any of above is disaster */
+ break;
+
+ case 0x0d : /* Asynchronous event TRM6-41 */
+ if ((g2intst & 0x0f) == (inb(SCSIDEF(eaha->port)) & 0x0f))
+ eaha_reset_scsibus(eaha) ;
+ else
+ eaha_target_intr(eaha, mbi, g2intst & 0x0f);
+ break;
+
+ case 0x0c : /* ccb complete with error */
+ case 0x01 : /* ccb completed with success */
+ case 0x05 : /* ccb complete with success after retry */
+
+ _erccb = (erccb *)
+ ( ((vm_offset_t)&eaha->I_hold_my_phys_address) +
+ (mbi - eaha->I_hold_my_phys_address) -
+ (vm_offset_t)&(((erccb *)0)->_eccb) ) ;
+ /* That ain't necessary. As kernel (must be) */
+ /* contiguous, only need delta to translate */
+
+ status = &_erccb->status ;
+
+#ifdef NOTDEF
+ if (!eaha_quiet && (!status->don || status->qf ||
+ status->sc || status->dover ||
+ status->ini || status->me)) {
+ printf("\nccb complete error G2INTST=%02X\n",
+ g2intst) ;
+ DUMP(*_erccb) ;
+ gimmeabreak() ;
+ }
+#endif
+
+ eaha_initiator_intr(eaha, _erccb);
+ break;
+ }
+
+ /* See if more work ready */
+ if (inb(G2STAT(port)) & 0x02) {
+ LOG(7,"\n\tre-intr");
+ goto gotintr;
+ }
+}
+
+/*
+ * The interrupt routine turns to one of these two
+ * functions, depending on the incoming mbi's role
+ */
+eaha_target_intr(
+ eaha_softc_t eaha,
+ unsigned int mbi,
+ unsigned int peer)
+{
+ target_info_t *initiator; /* this is the caller */
+ target_info_t *self; /* this is us */
+ int len;
+
+ self = eaha->sc->target[eaha->sc->initiator_id];
+
+ initiator = eaha->sc->target[peer];
+
+ /* ..but initiators are not required to answer to our inquiry */
+ if (initiator == 0) {
+ /* allocate */
+ initiator = eaha_tgt_alloc(eaha, peer, 0);
+
+ /* We do not know here wether the host was down when
+ we inquired, or it refused the connection. Leave
+ the decision on how we will talk to it to higher
+ level code */
+ LOG(0xC, "new_initiator");
+ sccpu_new_initiator(self, initiator);
+ /* Bug fix: was (aha->sc, self, initiator); dph */
+ }
+
+ /* The right thing to do would be build an ior
+ and call the self->dev_ops->strategy routine,
+ but we cannot allocate it at interrupt level.
+ Also note that we are now disconnected from the
+ initiator, no way to do anything else with it
+ but reconnect and do what it wants us to do */
+
+ /* obviously, this needs both spl and MP protection */
+ self->dev_info.cpu.req_pending = TRUE;
+ self->dev_info.cpu.req_id = peer ;
+ self->dev_info.cpu.req_lun = (mbi>>24) & 0x07 ;
+ self->dev_info.cpu.req_cmd =
+ (mbi & 0x80000000) ? SCSI_CMD_SEND: SCSI_CMD_RECEIVE;
+ len = mbi & 0x00ffffff ;
+
+ self->dev_info.cpu.req_len = len;
+
+ LOG(0xB,"tgt-mode-restart");
+ (*self->dev_ops->restart)( self, FALSE);
+
+ /* The call above has either prepared the data,
+ placing an ior on self, or it handled it some
+ other way */
+ if (self->ior == 0)
+ return; /* I guess we'll do it later */
+
+ {
+ erccb *_erccb ;
+
+ _erccb = erccb_alloc(eaha) ;
+ _erccb->active_target = initiator;
+ _erccb->_eccb.command = EAHA_CMD_TARG_CMD ;
+ _erccb->_eccb.ses = 1 ;
+ _erccb->_eccb.dir = (self->cur_cmd == SCSI_CMD_SEND) ? 1 : 0 ;
+
+ eaha_prepare_rccb(initiator, _erccb,
+ (vm_offset_t)self->ior->io_data, self->ior->io_count);
+ _erccb->_eccb.lun = initiator->lun;
+
+ simple_lock(&eaha->aha_lock);
+ if (eaha->wd.nactive++ == 0)
+ eaha->wd.watchdog_state = SCSI_WD_ACTIVE;
+ simple_unlock(&eaha->aha_lock);
+
+ eaha_command(eaha->port, _erccb);
+ }
+}
+
+eaha_initiator_intr(
+ eaha_softc_t eaha,
+ erccb *_erccb)
+{
+ scsi2_status_byte_t status;
+ target_info_t *tgt;
+
+ tgt = _erccb->active_target;
+ _erccb->active_target = 0;
+
+ /* shortcut (sic!) */
+ if (_erccb->status.ha_status == HA_STATUS_SUCCESS)
+ goto allok;
+
+ switch (_erccb->status.ha_status) { /* TRM6-17 */
+ case HA_STATUS_SUCCESS :
+allok:
+ status = _erccb->status.target_status ;
+ if (status.st.scsi_status_code != SCSI_ST_GOOD) {
+ scsi_error(tgt, SCSI_ERR_STATUS, status.bits, 0);
+ tgt->done = (status.st.scsi_status_code == SCSI_ST_BUSY) ?
+ SCSI_RET_RETRY : SCSI_RET_NEED_SENSE;
+ } else
+ tgt->done = SCSI_RET_SUCCESS;
+ break;
+
+ case HA_STATUS_SEL_TIMEOUT :
+ if (tgt->flags & TGT_FULLY_PROBED)
+ tgt->flags = 0; /* went offline */
+ tgt->done = SCSI_RET_DEVICE_DOWN;
+ break;
+
+ case HA_STATUS_OVRUN :
+ /* BUT we don't know if this is an underrun.
+ It is ok if we get less data than we asked
+ for, in a number of cases. Most boards do not
+ seem to generate this anyways, but some do. */
+ { register int cmd = tgt->cur_cmd;
+ switch (cmd) {
+ case SCSI_CMD_INQUIRY:
+ case SCSI_CMD_REQUEST_SENSE:
+ case SCSI_CMD_RECEIVE_DIAG_RESULTS:
+ case SCSI_CMD_MODE_SENSE:
+ if (_erccb->status.du) /*Ignore underrun only*/
+ break;
+ default:
+ printf("eaha: U/OVRUN on scsi command x%x\n",cmd);
+ gimmeabreak();
+ }
+ }
+ goto allok;
+ case HA_STATUS_BUS_FREE :
+ printf("aha: bad disconnect\n");
+ tgt->done = SCSI_RET_ABORTED;
+ break;
+ case HA_STATUS_PHASE_ERROR :
+ /* we'll get an interrupt soon */
+ printf("aha: bad PHASE sequencing\n");
+ tgt->done = SCSI_RET_ABORTED;
+ break;
+ case HA_STATUS_BAD_OPCODE :
+printf("aha: BADCCB\n");gimmeabreak();
+ tgt->done = SCSI_RET_RETRY;
+ break;
+
+ case HA_STATUS_HOST_ABORTED :
+ case HA_STATUS_ADP_ABORTED :
+ case HA_STATUS_NO_FIRM :
+ case HA_STATUS_NOT_TARGET :
+ case HA_STATUS_INVALID_LINK : /* These aren't expected. */
+ case HA_STATUS_BAD_CBLOCK :
+ case HA_STATUS_DUP_CBLOCK :
+ case HA_STATUS_BAD_SCATHER :
+ case HA_STATUS_RSENSE_FAIL :
+ case HA_STATUS_TAG_REJECT :
+ case HA_STATUS_HARD_ERROR :
+ case HA_STATUS_TARGET_NOATTN :
+ case HA_STATUS_HOST_RESET :
+ case HA_STATUS_OTHER_RESET :
+ case HA_STATUS_PROG_BAD_SUM :
+ default :
+ printf("aha: bad ha_status (x%x)\n", _erccb->status.ha_status);
+ tgt->done = SCSI_RET_ABORTED;
+ break;
+ }
+
+ eaha->has_sense_info [tgt->target_id] =
+ (tgt->done == SCSI_RET_NEED_SENSE) ;
+ if (eaha->has_sense_info [tgt->target_id])
+ eaha->sense_info_lun [tgt->target_id] = tgt->lun ;
+
+ LOG(8,"end");
+
+ simple_lock(&eaha->aha_lock);
+ if (eaha->wd.nactive-- == 1)
+ eaha->wd.watchdog_state = SCSI_WD_INACTIVE;
+ simple_unlock(&eaha->aha_lock);
+
+ if (tgt->ior) {
+ LOG(0xA,"ops->restart");
+ (*tgt->dev_ops->restart)( tgt, TRUE);
+ }
+
+ return FALSE;/*lint: Always returns FALSE. ignored. */
+}
+
+#endif /* NEAHA > 0 */
diff --git a/scsi/adapters/scsi_dma.h b/scsi/adapters/scsi_dma.h
new file mode 100644
index 00000000..9401a164
--- /dev/null
+++ b/scsi/adapters/scsi_dma.h
@@ -0,0 +1,150 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS AS-IS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_dma.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 7/91
+ *
+ * DMA operations that an HBA driver might invoke.
+ *
+ */
+
+/*
+ * This defines much more than usually needed, mainly
+ * to cover for the case of no DMA at all and/or only
+ * DMA from/to a specialized buffer ( which means the
+ * CPU has to copy data into/outof it ).
+ */
+
+typedef struct {
+ opaque_t (*init)(
+ int dev_unit,
+ vm_offset_t base,
+ int *dma_bsizep,
+ boolean_t *oddbp);
+
+ void (*new_target)(
+ opaque_t dma_state,
+ target_info_t *tgt);
+
+ void (*map)(
+ opaque_t dma_state,
+ target_info_t *tgt);
+
+ int (*start_cmd)(
+ opaque_t dma_state,
+ target_info_t *tgt);
+
+ void (*end_xfer)(
+ opaque_t dma_state,
+ target_info_t *tgt,
+ int xferred);
+
+ void (*end_cmd)(
+ opaque_t dma_state,
+ target_info_t *tgt,
+ io_req_t ior);
+
+ int (*start_datain)(
+ opaque_t dma_state,
+ target_info_t *tgt);
+
+ int (*start_msgin)(
+ opaque_t dma_state,
+ target_info_t *tgt);
+
+ void (*end_msgin)(
+ opaque_t dma_state,
+ target_info_t *tgt);
+
+ boolean_t (*start_dataout)(
+ opaque_t dma_state,
+ target_info_t *tgt,
+ volatile unsigned *regp,
+ unsigned value,
+ unsigned char *prefetch_count);
+
+ int (*restart_datain_1)(
+ opaque_t dma_state,
+ target_info_t *tgt);
+
+ int (*restart_datain_2)(
+ opaque_t dma_state,
+ target_info_t *tgt,
+ int xferred);
+
+ void (*restart_datain_3)(
+ opaque_t dma_state,
+ target_info_t *tgt);
+
+ int (*restart_dataout_1)(
+ opaque_t dma_state,
+ target_info_t *tgt);
+
+ int (*restart_dataout_2)(
+ opaque_t dma_state,
+ target_info_t *tgt,
+ int xferred);
+
+ int (*restart_dataout_3)(
+ opaque_t dma_state,
+ target_info_t *tgt,
+ volatile unsigned *regp);
+
+ void (*restart_dataout_4)(
+ opaque_t dma_state,
+ target_info_t *tgt);
+
+ boolean_t (*disconn_1)(
+ opaque_t dma_state,
+ target_info_t *tgt,
+ int xferred);
+
+ boolean_t (*disconn_2)(
+ opaque_t dma_state,
+ target_info_t *tgt);
+
+ boolean_t (*disconn_3)(
+ opaque_t dma_state,
+ target_info_t *tgt,
+ int xferred);
+
+ boolean_t (*disconn_4)(
+ opaque_t dma_state,
+ target_info_t *tgt,
+ int xferred);
+
+ boolean_t (*disconn_5)(
+ opaque_t dma_state,
+ target_info_t *tgt,
+ int xferred);
+
+ void (*disconn_callback)(
+ opaque_t dma_state,
+ target_info_t *tgt);
+
+} scsi_dma_ops_t;
+
diff --git a/scsi/adapters/scsi_user_dma.c b/scsi/adapters/scsi_user_dma.c
new file mode 100644
index 00000000..5fb98d64
--- /dev/null
+++ b/scsi/adapters/scsi_user_dma.c
@@ -0,0 +1,171 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * File: scsi_user_dma.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 4/91
+ *
+ * Mach 2.5 compat file, to handle case of DMA to user space
+ * [e.g. fsck and other raw device accesses]
+ */
+
+#ifdef MACH_KERNEL
+/* We do not need this in 3.0 */
+#else /*MACH_KERNEL*/
+
+#include <mach/std_types.h>
+#include <scsi/adapters/scsi_user_dma.h>
+
+#include <kern/assert.h>
+
+#include <vm/vm_kern.h>
+#include <mach/vm_param.h> /* round_page() */
+
+/* bp -> pmap */
+#include <sys/buf.h>
+#include <sys/proc.h>
+
+/*
+ * Initialization, called once per device
+ */
+fdma_init(fdma, size)
+ fdma_t fdma;
+ vm_size_t size;
+{
+ vm_offset_t addr;
+
+ size = round_page(size);
+ addr = kmem_alloc_pageable(kernel_map, size);
+ if (addr == 0) panic("fdma_init");
+
+ fdma->kernel_virtual = addr;
+ fdma->max_data = size;
+ fdma->user_virtual = -1;
+
+}
+
+/*
+ * Remap a buffer from user space to kernel space.
+ * Note that physio() has already validated
+ * and wired the user's address range.
+ */
+fdma_map(fdma, bp)
+ fdma_t fdma;
+ struct buf *bp;
+{
+ pmap_t pmap;
+ vm_offset_t user_addr;
+ vm_size_t size;
+ vm_offset_t kernel_addr;
+ vm_offset_t off;
+ vm_prot_t prot;
+
+ /*
+ * If this is not to user space, or no data xfer is
+ * involved, no need to do anything.
+ */
+ user_addr = (vm_offset_t)bp->b_un.b_addr;
+ if (!(bp->b_flags & B_PHYS) || (user_addr == 0)) {
+ fdma->user_virtual = -1;
+ return;
+ }
+ /*
+ * We are going to clobber the buffer pointer, so
+ * remember what it was to restore it later.
+ */
+ fdma->user_virtual = user_addr;
+
+ /*
+ * Account for initial offset into phys page
+ */
+ off = user_addr - trunc_page(user_addr);
+
+ /*
+ * Check xfer size makes sense, note how many pages we'll remap
+ */
+ size = bp->b_bcount + off;
+ assert((size <= fdma->max_data));
+ fdma->xfer_size_rnd = round_page(size);
+
+ pmap = bp->b_proc->task->map->pmap;
+
+ /*
+ * Use minimal protection possible
+ */
+ prot = VM_PROT_READ;
+ if (bp->b_flags & B_READ)
+ prot |= VM_PROT_WRITE;
+
+ /*
+ * Loop through all phys pages, taking them from the
+ * user pmap (they are wired) and inserting them into
+ * the kernel pmap.
+ */
+ user_addr -= off;
+ kernel_addr = fdma->kernel_virtual;
+ bp->b_un.b_addr = (char *)kernel_addr + off;
+
+ for (size = fdma->xfer_size_rnd; size; size -= PAGE_SIZE) {
+ register vm_offset_t phys;
+
+ phys = pmap_extract(pmap, user_addr);
+ pmap_enter(kernel_pmap, kernel_addr, phys, prot, TRUE);
+ user_addr += PAGE_SIZE;
+ kernel_addr += PAGE_SIZE;
+ }
+}
+
+/*
+ * Called at end of xfer, to restore the buffer
+ */
+fdma_unmap(fdma, bp)
+ fdma_t fdma;
+ struct buf *bp;
+{
+ register vm_offset_t end_addr;
+
+ /*
+ * Check we actually did remap it
+ */
+ if (fdma->user_virtual == -1)
+ return;
+
+ /*
+ * Restore the buffer
+ */
+ bp->b_un.b_addr = (char *)fdma->user_virtual;
+ fdma->user_virtual = -1;
+
+ /*
+ * Eliminate the mapping, pmap module might mess up
+ * the pv list otherwise. Some might actually tolerate it.
+ */
+ end_addr = fdma->kernel_virtual + fdma->xfer_size_rnd;
+ pmap_remove(kernel_pmap, fdma->kernel_virtual, end_addr);
+
+}
+
+#endif /*MACH_KERNEL*/
diff --git a/scsi/adapters/scsi_user_dma.h b/scsi/adapters/scsi_user_dma.h
new file mode 100644
index 00000000..ff2682c1
--- /dev/null
+++ b/scsi/adapters/scsi_user_dma.h
@@ -0,0 +1,47 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * File: scsi_user_dma.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 4/91
+ *
+ * Defines for Mach 2.5 compat, user-space DMA routines
+ */
+
+/* There is one such structure per I/O device
+ that needs to xfer data to/from user space */
+
+typedef struct fdma {
+ vm_offset_t kernel_virtual;
+ vm_size_t max_data;
+ vm_offset_t user_virtual;
+ int xfer_size_rnd;
+} *fdma_t;
+
+extern int
+ fdma_init(/* fdma_t, vm_size_t */),
+ fdma_map(/* fdma_t, struct buf* */),
+ fdma_unmap(/* fdma_t, struct buf* */);
diff --git a/scsi/compat_30.h b/scsi/compat_30.h
new file mode 100644
index 00000000..988aed77
--- /dev/null
+++ b/scsi/compat_30.h
@@ -0,0 +1,163 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: compat_30.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 4/91
+ *
+ * Compatibility defs to retrofit Mach 3.0 drivers
+ * into Mach 2.6.
+ */
+
+#ifndef _SCSI_COMPAT_30_
+#define _SCSI_COMPAT_30_
+
+#include <kern/assert.h>
+
+#ifdef MACH_KERNEL
+/*
+ * Mach 3.0 compiles with these definitions
+ */
+
+#include <device/param.h>
+#include <device/io_req.h>
+#include <device/device_types.h>
+#include <device/disk_status.h>
+
+/*
+ * Scratch temporary in io_req structure (for error handling)
+ */
+#define io_temporary io_error
+
+#else /*MACH_KERNEL*/
+/*
+ * Mach 2.x compiles with these definitions
+ */
+
+/* ??? */
+typedef int dev_mode_t;
+typedef int *dev_status_t; /* Variable-length array of integers */
+/* ??? */
+
+/* Buffer structures */
+
+typedef int io_return_t;
+
+#include <sys/param.h>
+#include <sys/buf.h>
+
+#define io_req buf
+typedef struct buf *io_req_t;
+
+#define io_req_alloc(ior,size) ior = geteblk(size)
+#define io_req_free(ior) brelse(ior)
+
+/*
+ * Redefine fields for drivers using new names
+ */
+#define io_op b_flags
+#define io_count b_bcount
+#define io_error b_error
+#define io_unit b_dev
+#define io_recnum b_blkno
+#define io_residual b_resid
+#define io_data b_un.b_addr
+#define io_done b_iodone
+
+/*
+ * Redefine fields for driver request list heads, using new names.
+ */
+#define io_next av_forw
+#define io_prev av_back
+/*#define io_next b_actf*/
+/*#define io_prev b_actl*/
+#define io_link b_forw
+#define io_rlink b_back
+/*#define io_count b_active*/
+/*#define io_residual b_errcnt*/
+#define io_alloc_size b_bufsize
+
+/*
+ * Scratch temporary in io_req structure (for error handling)
+ */
+#define io_temporary b_pfcent
+
+/*
+ * Redefine flags
+ */
+#define IO_WRITE B_WRITE
+#define IO_READ B_READ
+#define IO_OPEN B_OPEN
+#define IO_DONE B_DONE
+#define IO_ERROR B_ERROR
+#define IO_BUSY B_BUSY
+#define IO_WANTED B_WANTED
+#define IO_BAD B_BAD
+#define IO_CALL B_CALL
+#define IO_INTERNAL B_MD1
+
+#define IO_SPARE_START B_MD1
+
+#include <sys/disklabel.h>
+
+/* Error codes */
+
+#include <sys/errno.h>
+
+#define D_SUCCESS ESUCCESS
+#define D_IO_ERROR EIO
+#define D_NO_SUCH_DEVICE ENXIO
+#define D_INVALID_SIZE EINVAL
+#define D_ALREADY_OPEN EBUSY
+#define D_INVALID_OPERATION EINVAL
+#define D_NO_MEMORY ENOMEM
+#define D_WOULD_BLOCK EWOULDBLOCK
+#define D_DEVICE_DOWN EIO
+#define D_READ_ONLY EROFS
+
+/*
+ * Debugging support
+ */
+#define db_printf kdbprintf
+#define db_printsym(s,m) kdbpsymoff(s,1,"")
+
+/*
+ * Miscellaneous utils
+ */
+
+#define check_memory(addr,dow) ((dow) ? wbadaddr(addr,4) : badaddr(addr,4))
+
+#include <sys/kernel.h> /* for hz */
+#include <scsi/adapters/scsi_user_dma.h>
+
+#ifdef DECSTATION
+#include <mach/mips/vm_param.h> /* for page size */
+#define ULTRIX_COMPAT 1 /* support for rzdisk disk formatter */
+#endif /*DECSTATION*/
+
+#endif /*MACH_KERNEL*/
+
+#endif /*_SCSI_COMPAT_30_*/
diff --git a/scsi/disk_label.c b/scsi/disk_label.c
new file mode 100644
index 00000000..ab203784
--- /dev/null
+++ b/scsi/disk_label.c
@@ -0,0 +1,692 @@
+/*
+ * Copyright (c) 1996 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Kevin T. Van Maren, University of Utah CSL
+ */
+
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * Copyright (c) 1994 Shantanu Goel
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE AUTHOR ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE AUTHOR DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ */
+
+/* This file contains the partition code that is used by the Mach
+ * device drivers (ide & scsi). */
+
+#include <scsi/compat_30.h>
+#include <sys/types.h>
+
+#include <scsi/rz_labels.h>
+#include <i386at/disk.h> /* combine & rename these... */
+
+#define SECTOR_SIZE 512 /* BAD!!! */
+
+#define DSLICE(dev) ((dev >> 4) & 0x3f)
+#define DPART(dev) (dev & 0xf)
+
+/* note: 0 will supress ALL output;
+ 1 is 'normal' output; 2 is verbose; 3 is Very verbose */
+#define PARTITION_DEBUG 1
+
+#define min(x,y) (x<y?x:y)
+
+/*
+ * Label that is filled in with extra info
+ */
+struct disklabel default_label =
+{
+ DISKMAGIC, DTYPE_SCSI, 0,
+ "SCSI", "",
+ DEV_BSIZE, 1, 1, 1, 1, 1, 0, 0, 0,
+ 3600, 1, 1, 1, 0, 0, 0,
+ {0,}, {0,},
+ DISKMAGIC, 0,
+ 8, 8192, 8192,
+ {{ -1, 0, 1024, FS_BSDFFS, 8, 3 },
+ { -1, 0, 1024, FS_BSDFFS, 8, 3 },
+ { -1, 0, 1024, FS_BSDFFS, 8, 3 },
+ { -1, 0, 1024, FS_BSDFFS, 8, 3 },
+ { -1, 0, 1024, FS_BSDFFS, 8, 3 },
+ { -1, 0, 1024, FS_BSDFFS, 8, 3 },
+ { -1, 0, 1024, FS_BSDFFS, 8, 3 },
+ { -1, 0, 1024, FS_BSDFFS, 8, 3 }}
+};
+
+
+
+/* the device driver calls this just to save some info it got from the HW */
+/* This is a bad holdover from the disklabel days, and needs to go */
+fudge_bsd_label(struct disklabel *label, int type, int total_secs, int heads, int sectors, int sectorsize, int n)
+{
+ *label=default_label;
+
+ label->d_ncylinders = total_secs/(heads*sectors);
+ label->d_ntracks = heads;
+ label->d_nsectors = sectors;
+
+ label->d_secpercyl = heads*sectors;
+ label->d_secperunit = total_secs;
+
+ /* this is never used, but ... */
+ label->d_partitions[MAXPARTITIONS].p_offset = 0;
+ label->d_partitions[MAXPARTITIONS].p_size = total_secs;
+
+ /* ??
+ */
+ label->d_secsize = sectorsize;
+ label->d_type = type;
+ label->d_subtype = 0xa; /* ??? */
+
+ label->d_npartitions = n; /* up to 'c' */
+ label->d_checksum = 0;
+
+ /* should do a checksum on it now */
+}
+
+
+
+/* This is placed here to
+ a. provide comparability with existing servers
+ b. allow the use of FreeBSD-style slices to access ANY disk partition
+ c. provide an easy migration path to lites-based partition code
+ by only passing the drive name to get the entire disk (sd0).
+
+ This will be called by every routine that needs to access partition info
+ based on a device number. It is slower than the old method of indexing
+ into a disklabel, but is more flexible, and reasonably fast in the (future)
+ case where Lites will access the whole disk. An array of disklabels
+ could have been used, but this is more compact and general. The underlying
+ structure does not limit it to 2-levels, but keeping the kernel interface
+ simple does. */
+
+
+/* this code and data structure based on conversation with Bryan Ford */
+/* Note: this is called ON EVERY read or write. It makes sense to
+ optimize this for the common case. Hopefully the common case
+ will become the '0,0' case, as partitioning is moved out of the
+ kernel. (Downside is kernel can't protect filesystems from each other).
+ It is slower than indexing into a 1-D array, but not much. */
+
+struct diskpart *lookup_part(struct diskpart *array, int dev_number)
+{
+/* Note: 10 bit encoding to get partitions 0-15 (0,a-h typically), and slices
+ * 0-63
+ */
+
+ int slice = DSLICE(dev_number);
+ int part = DPART(dev_number);
+ struct diskpart *s;
+
+ if (slice == 0) /* compatability slice */
+ {
+ if (part == 0) /* whole disk */
+ return &array[0];
+
+ if (array[0].type == DISKPART_DOS)
+ {
+ int i;
+ for (i = 0; i < array[0].nsubs; i++)
+ {
+ s = &array[0].subs[i];
+ if ( s->type == DISKPART_BSD
+ || s->type == DISKPART_VTOC)
+ {
+ if (part > s->nsubs)
+ return 0;
+ return (&s->subs[part-1]);
+ }
+ }
+ }
+
+ if (part > array[0].nsubs)
+ return 0;
+ return(&array[0].subs[part-1]);
+ }
+ else
+ {
+ if ( array[0].type != DISKPART_DOS
+ || slice > array[0].nsubs)
+ return 0;
+
+ s = &array[0].subs[slice-1];
+
+ if (part == 0) /* whole slice */
+ return (s);
+ if (part > s->nsubs)
+ return 0;
+
+ return (&s->subs[part-1]);
+ }
+}
+
+
+
+
+static inline void fill_array(struct diskpart *array, int start, int size,
+ struct diskpart *subs, int nsubs, short type, short fsys)
+{
+ array->start=start;
+ array->size=size;
+ array->subs=subs;
+ array->nsubs=nsubs;
+ array->type=type;
+ array->fsys=fsys;
+#if (PARTITION_DEBUG > 2)
+ printf("fill: type %d:%d, start %d, size %d, %d parts\n",type,fsys,
+ start,size,nsubs);
+#endif
+}
+
+
+
+
+void print_array(struct diskpart *array, int level)
+{
+ int i,j;
+ struct diskpart *subs;
+
+#if (PARTITION_DEBUG)
+ subs=array[0].subs;
+
+ for (i=0;i<array[0].nsubs;i++) {
+ for (j=0;j<level;j++)
+ printf(" ");
+ printf("%c: %d, %d, %d, %d (%d subparts)\n",'a'+i,
+ subs[i].start, subs[i].size, subs[i].fsys,
+ subs[i].type, subs[i].nsubs);
+ if (subs[i].nsubs>0)
+ print_array(&subs[i], level+1);
+ }
+#endif
+}
+
+
+
+/* individual routines to find the drive labels.
+ There needs to be a function for every different method for partitioning
+ much of the following code is derived from the SCSI/IDE drivers */
+
+int get_dos(struct diskpart *array, char *buff, int start,
+ void *driver_info, int (*bottom_read_fun)(),
+ char *name, int max_part)
+{
+
+ bios_label_t *mp;
+ struct bios_partition_info *pp;
+
+ int count, i, j;
+ int pstart, psize;
+ int ext=-1, mystart=start, mybase;
+ int first=1;
+
+ /* note: start is added, although a start != 0 is meaningless
+ to DOS and anything else... */
+
+ /* check the boot sector for a partition table. */
+ (*bottom_read_fun)(driver_info, start, buff); /* always in sector 0 */
+
+ /*
+ * Check for valid partition table.
+ */
+ mp = (bios_label_t *)&buff[BIOS_LABEL_BYTE_OFFSET];
+ if (mp->magic != BIOS_LABEL_MAGIC) {
+#if (PARTITION_DEBUG>1)
+ printf("%s invalid partition table\n", name);
+#endif
+ return(0); /* didn't add any partitions */
+ }
+#if (PARTITION_DEBUG>1)
+ printf("DOS partition table found\n");
+#endif
+
+ count=min(4,max_part); /* always 4 (primary) partitions */
+#if (PARTITION_DEBUG)
+ if (count<4) printf("More partitions than space!\n");
+#endif
+
+
+ /* fill the next 4 entries in the array */
+ for (i=0, pp=(struct bios_partition_info *)mp->partitions;
+ i<count; i++,pp++) {
+
+ fill_array(&array[i], pp->offset, pp->n_sectors, NULL, 0,
+ DISKPART_NONE, pp->systid);
+ if ((pp->systid == DOS_EXTENDED) &&(ext<0)) {
+ mystart+=pp->offset;
+ ext=i;
+ }
+ }
+
+ /* if there is an extended partition, find all the logical partitions */
+ /* note: logical start at '5' (extended is one of the numbered 1-4) */
+
+ /* logical partitions 'should' be nested inside the primary, but
+ then it would be impossible to NAME a disklabel inside a logical
+ partition, which would be nice to do */
+#if (PARTITION_DEBUG>1)
+ if (ext>=0)
+ printf("extended partition found: %d\n",ext);
+#endif 0
+
+ while (ext>=0) {
+ pp = &(((struct bios_partition_info *)mp->partitions)[ext]);
+
+ /* read the EXTENDED partition table */
+ if (first) {
+ mybase=mystart;
+ first=0;
+ } else {
+ mybase=mystart+pp->offset;
+ }
+
+ (*bottom_read_fun)(driver_info, mybase, buff);
+
+ if (mp->magic != BIOS_LABEL_MAGIC) {
+#if (PARTITION_DEBUG>1)
+ printf("%s invalid expanded magic\n", name);
+#endif
+ return(count);/*don't add any more partitions*/
+ }
+
+ /* just in case more than one partition is there...*/
+ /* search the EXTENDED partition table */
+ ext=-1;
+ for (j=0,pp=(struct bios_partition_info *)mp->partitions;
+ j<4; j++,pp++) {
+
+ if (pp->systid && (pp->systid!=DOS_EXTENDED)) {
+ if (count<max_part) {
+ fill_array(&array[count],
+ mybase +pp->offset,
+ pp->n_sectors, NULL, 0, DISKPART_NONE,
+ pp->systid);
+ count++; }
+ else {
+#if (PARTITION_DEBUG)
+ printf("More partitions than space!\n");
+#endif
+ return(count);
+ }
+ } else if ((ext<0) &&(pp->systid==DOS_EXTENDED)) {
+ ext=j;
+ /* recursivly search the chain here */
+ }
+ }
+ }
+#if (PARTITION_DEBUG>1)
+ printf("%d dos partitions\n",count);
+#endif 0
+ return(count); /* number dos partitions found */
+
+}
+
+
+
+/* this should work on the bare drive, or in a dos partition */
+int get_disklabel(struct diskpart *array, char *buff, int start,
+ void *driver_info, int (*bottom_read_fun)(),
+ char *name, int max_part)
+{
+ struct disklabel *dlp;
+ int mybase = start + (512 * LBLLOC)/SECTOR_SIZE, i;
+ int count;
+
+ (*bottom_read_fun)(driver_info, mybase, buff);
+
+ dlp = (struct disklabel *)buff;
+ if (dlp->d_magic != DISKMAGIC || dlp->d_magic2 != DISKMAGIC) {
+#if (PARTITION_DEBUG>1)
+ printf("%s no BSD label found\n",name);
+#endif
+ return(0); /* no partitions added */
+ }
+#if (PARTITION_DEBUG>1)
+ printf(" BSD LABEL\n");
+#endif 0
+ /* note: BSD disklabel offsets are from start of DRIVE -- uuggh */
+
+ count=min(8,max_part); /* always 8 in a disklabel */
+#if (PARTITION_DEBUG)
+ if (count<8) printf("More partitions than space!\n");
+#endif
+ /* COPY into the array */
+ for (i=0;i<count;i++)
+ fill_array(&array[i], /* mybase + */
+ dlp->d_partitions[i].p_offset,
+ dlp->d_partitions[i].p_size,
+ NULL, 0, DISKPART_NONE, dlp->d_partitions[i].p_fstype);
+
+ /* note: p_fstype is not the same set as the DOS types */
+
+ return(count); /* 'always' 8 partitions in disklabel -- if space */
+
+/* UNREACHED CODE FOLLOWS: (alternative method in scsi) */
+#if 0
+ (*bottom_read_fun)(driver_info, (start)+LABELSECTOR, buff);
+
+ register int j;
+ boolean_t found;
+
+ for (j = LABELOFFSET, found = FALSE;
+ j < (SECTOR_SIZE-sizeof(struct disklabel));
+ j += sizeof(int)) {
+ search = (struct disklabel *)&buff[j];
+ if (search->d_magic == DISKMAGIC &&
+ search->d_magic2 == DISKMAGIC) {
+ found = TRUE;
+ break;
+ }
+ }
+ if (found) {
+#if (PARTITION_DEBUG>1)
+ printf("Label found in LABELSECTOR\n");
+#endif
+ } else {
+ search = 0;
+ }
+
+
+#endif 0
+
+}
+
+
+/* NOT TESTED! */
+/* VTOC in sector 29 */
+int get_vtoc(struct diskpart *array, char *buff, int start,
+ void *driver_info, int (*bottom_read_fun)(),
+ char *name, int max_part)
+{
+ struct evtoc *evp;
+ int n,i;
+ struct disklabel lpl;
+ struct disklabel *lp = &lpl;
+
+#if (PARTITION_DEBUG)
+ printf("Read VTOC.\n");
+#endif
+ (*bottom_read_fun)(driver_info, start +PDLOCATION, buff);
+ evp = (struct evtoc *)buff;
+ if (evp->sanity != VTOC_SANE) {
+#if (PARTITION_DEBUG)
+ printf("%s evtoc corrupt or not found\n", name);
+#endif
+ return(0);
+ }
+ n = min(evp->nparts,max_part); /* no longer DISKLABEL limitations... */
+#if 0
+ n = (evp->nparts > MAXPARTITIONS) ? MAXPARTITIONS : evp->nparts;
+#endif 0
+
+ for (i = 0; i < n; i++)
+ fill_array(&array[i], /* mybase + */
+ evp->part[i].p_start,
+ evp->part[i].p_size,
+ NULL, 0, DISKPART_NONE, FS_BSDFFS);
+
+ return(n); /* (evp->nparts) */
+}
+
+
+/* NOT TESTED! */
+int get_omron(struct diskpart *array, char *buff, int start,
+ void *driver_info, int (*bottom_read_fun)(),
+ char *name, int max_part)
+{
+
+ struct disklabel *label;
+
+ /* here look for an Omron label */
+ register omron_label_t *part;
+ int i;
+
+#if (PARTITION_DEBUG)
+ printf("Looking for Omron label...\n");
+#endif
+
+ (*bottom_read_fun)(driver_info, start+
+ OMRON_LABEL_BYTE_OFFSET/SECTOR_SIZE, buff);
+
+ part = (omron_label_t*)&buff[OMRON_LABEL_BYTE_OFFSET%SECTOR_SIZE];
+ if (part->magic == OMRON_LABEL_MAGIC) {
+#if (PARTITION_DEBUG)
+ printf("{Using OMRON label}");
+#endif
+ for (i = 0; i < 8; i++) {
+ label->d_partitions[i].p_size = part->partitions[i].n_sectors;
+ label->d_partitions[i].p_offset = part->partitions[i].offset;
+ }
+ bcopy(part->packname, label->d_packname, 16);
+ label->d_ncylinders = part->ncyl;
+ label->d_acylinders = part->acyl;
+ label->d_ntracks = part->nhead;
+ label->d_nsectors = part->nsect;
+ /* Many disks have this wrong, therefore.. */
+#if 0
+ label->d_secperunit = part->maxblk;
+#else
+ label->d_secperunit = label->d_ncylinders * label->d_ntracks *
+ label->d_nsectors;
+#endif 0
+
+ return(8);
+ }
+#if (PARTITION_DEBUG)
+ printf("No Omron label found.\n");
+#endif
+ return(0);
+}
+
+
+/* NOT TESTED! */
+int get_dec(struct diskpart *array, char *buff, int start,
+ void *driver_info, int (*bottom_read_fun)(),
+ char *name, int max_part)
+{
+ struct disklabel *label;
+
+ /* here look for a DEC label */
+ register dec_label_t *part;
+ int i;
+
+#if (PARTITION_DEBUG)
+ printf("Checking for dec_label...\n");
+#endif
+
+ (*bottom_read_fun)(driver_info, start +
+ DEC_LABEL_BYTE_OFFSET/SECTOR_SIZE, buff);
+
+ if (part->magic == DEC_LABEL_MAGIC) {
+#if (PARTITION_DEBUG)
+ printf("{Using DEC label}");
+#endif
+ for (i = 0; i < 8; i++) {
+ label->d_partitions[i].p_size = part->partitions[i].n_sectors;
+ label->d_partitions[i].p_offset = part->partitions[i].offset;
+ }
+ return(8);
+ }
+#if (PARTITION_DEBUG)
+ printf("No dec label found.\n");
+#endif
+
+ return(0);
+}
+
+
+
+
+/* array is a pointer to an array of partition_info structures */
+/* array_size is the number of pre-allocated entries there are */
+int get_only_partition(void *driver_info, int (*bottom_read_fun)(),
+ struct diskpart *array, int array_size,
+ int disk_size, char *drive_name)
+{
+ char buff[SECTOR_SIZE];
+ int i,n,cnt;
+ int arrsize;
+ struct diskpart *res;
+
+ /* first fill in the entire disk stuff */
+ /* or should the calling routine do that already? */
+
+ fill_array(array, 0, disk_size, NULL, 0, -1, -1);
+
+ /* while the structure does not preclude additional nestings,
+ additional ones make no sense currently, so they are not
+ checked (Mach can't handle them anyway). It might be nice
+ if for all partitions found, all types of sub-partitions
+ were looked for (unnecessary). This will be done when this
+ is moved out of ther kernel, and there is some way to name them */
+
+ arrsize = array_size -1; /* 1 for whole disk */
+
+ /* search for dos partition table */
+ /* find all the partitions (including logical) */
+ n=get_dos(&array[1], buff, 0,
+ driver_info, (bottom_read_fun), drive_name,
+ arrsize);
+
+ if (n>0) {
+ fill_array(array, 0, disk_size, &array[1], n,
+ DISKPART_DOS, 256+DISKPART_DOS);
+ arrsize-=n;
+
+
+ /* search each one for a BSD disklabel (iff BSDOS) */
+ /* note: searchine extended and logical partitions */
+ for (i=0;i<n;i++)
+ if (array[i+1].fsys==BSDOS) {
+#if (PARTITION_DEBUG)
+ printf("BSD OS slice: %d\n",i+1);
+#endif
+ cnt=get_disklabel(&array[n+1], buff,
+ array[i+1].start,
+ driver_info, (bottom_read_fun),
+ drive_name,arrsize);
+
+ if (cnt>0) {
+ arrsize-=cnt;
+ fill_array(&array[i+1],array[i+1].start,
+ array[i+1].size, &array[n+1],
+ cnt, DISKPART_BSD,
+ array[i+1].fsys);
+ }
+ n+=cnt;
+ }
+
+ /* search for VTOC -- in a DOS partition as well */
+ for (i=0;i<n;i++)
+ if (array[i+1].fsys==UNIXOS) {
+#if (PARTITION_DEBUG)
+ printf("UNIXOS (vtoc) partition\n");
+#endif
+ cnt=get_vtoc(&array[n+1], buff,
+ array[i+1].start,
+ driver_info, (bottom_read_fun),
+ drive_name,arrsize);
+
+ if (cnt>0) {
+ arrsize-=cnt;
+ fill_array(&array[i+1],array[i+1].start,
+ array[i+1].size, &array[n+1],
+ cnt, DISKPART_VTOC,
+ array[i+1].fsys);
+ }
+ n+=cnt;
+ }
+ }
+
+ /* search for only disklabel */
+ if (n==0) {
+ fill_array(array, 0, disk_size, &array[1], n, DISKPART_BSD,
+ 256+DISKPART_BSD);
+ n=get_disklabel(&array[1], buff, 0, driver_info,
+ (bottom_read_fun), drive_name,arrsize);
+ }
+
+ /* search for only VTOC -- NOT TESTED! */
+ if (n==0) {
+ fill_array(array, 0, disk_size, &array[1], n, DISKPART_VTOC,
+ 256+DISKPART_VTOC);
+ n=get_vtoc(&array[1], buff, 0, driver_info, (bottom_read_fun),
+ drive_name,arrsize);
+ }
+#if 0
+ /* search for only omron -- NOT TESTED! */
+ if (n==0) {
+ fill_array(array, 0, disk_size, &array[1], n, DISKPART_OMRON,
+ 256+DISKPART_OMRON);
+ n=get_omron(&array[1], buff, 0,driver_info, (bottom_read_fun),
+ drive_name,arrsize);
+ }
+
+ /* search for only dec -- NOT TESTED! */
+ if (n==0) {
+ fill_array(array, 0, disk_size, &array[1], n, DISKPART_DEC,
+ 256+DISKPART_DEC);
+ n=get_dec(&array[1], buff, 0, driver_info, (bottom_read_fun),
+ drive_name,arrsize);
+ }
+#endif 0
+
+#if (PARTITION_DEBUG) /* print out what we found */
+ print_array(array,0);
+#endif
+
+}
+
+
diff --git a/scsi/mapped_scsi.c b/scsi/mapped_scsi.c
new file mode 100644
index 00000000..fe3dd77d
--- /dev/null
+++ b/scsi/mapped_scsi.c
@@ -0,0 +1,586 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mapped_scsi.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * In-kernel side of the user-mapped SCSI driver.
+ */
+
+#include <asc.h>
+#include <sii.h>
+#define NRZ (NASC+NSII)
+#if NRZ > 0
+#include <platforms.h>
+
+#include <machine/machspl.h> /* spl definitions */
+
+#include <device/device_types.h>
+#include <device/io_req.h>
+#include <chips/busses.h>
+
+#include <vm/vm_kern.h>
+#include <kern/eventcount.h>
+
+#include <scsi/mapped_scsi.h>
+
+#include <machine/machspl.h>
+
+#ifdef DECSTATION
+
+#define machine_btop mips_btop
+
+#define kvctophys(v) K0SEG_TO_PHYS((v)) /* kernel virtual cached */
+#define phystokvc(p) PHYS_TO_K0SEG((p)) /* and back */
+#define kvutophys(v) K1SEG_TO_PHYS((v)) /* kernel virtual uncached */
+#define phystokvu(p) PHYS_TO_K1SEG((p)) /* and back */
+
+#include <mips/mips_cpu.h>
+#include <mips/PMAX/kn01.h>
+#include <mips/PMAX/pmaz_aa.h>
+
+#define SII_REG_PHYS(self) kvutophys(self->registers.any)
+#define SII_RAM_PHYS(self) (SII_REG_PHYS((self))+(KN01_SYS_SII_B_START-KN01_SYS_SII))
+#define SII_RAM_SIZE (KN01_SYS_SII_B_END-KN01_SYS_SII_B_START)
+
+#define ASC_REG_PHYS(self) kvutophys(self->registers.any)
+#define ASC_DMAR_PHYS(self) (ASC_REG_PHYS((self))+ ASC_OFFSET_DMAR)
+#define ASC_RAM_PHYS(self) (ASC_REG_PHYS((self))+ ASC_OFFSET_RAM)
+
+#define PAD_7061(n) short n
+#define PAD_53C94(n) char n[3]
+
+#endif /*DECSTATION*/
+
+#ifdef VAXSTATION
+#define machine_btop vax_btop
+#endif /*VAXSTATION*/
+
+#ifdef P40
+
+#define machine_btop mips_btop
+
+#define kvctophys(v) K0SEG_TO_PHYS((v)) /* kernel virtual cached */
+#define phystokvc(p) PHYS_TO_K0SEG((p)) /* and back */
+#define kvutophys(v) K1SEG_TO_PHYS((v)) /* kernel virtual uncached */
+#define phystokvu(p) PHYS_TO_K1SEG((p)) /* and back */
+
+#include <mips/mips_cpu.h>
+
+#define ASC_RAM_SIZE 0
+#define ASC_OFFSET_DMAR 0
+#define ASC_OFFSET_RAM 0
+
+#define ASC_REG_PHYS(self) kvutophys(self->registers.any)
+#define ASC_DMAR_PHYS(self) (ASC_REG_PHYS((self))+ ASC_OFFSET_DMAR)
+#define ASC_RAM_PHYS(self) (ASC_REG_PHYS((self))+ ASC_OFFSET_RAM)
+#endif /* P40 */
+
+/*
+ * Phys defines for the various supported HBAs
+ */
+
+/* DEC7061 */
+#include <scsi/adapters/scsi_7061.h>
+
+#ifdef PAD_7061
+
+typedef struct {
+ volatile unsigned short sii_sdb; /* rw: Data bus and parity */
+ PAD_7061(pad0);
+ volatile unsigned short sii_sc1; /* rw: scsi signals 1 */
+ PAD_7061(pad1);
+ volatile unsigned short sii_sc2; /* rw: scsi signals 2 */
+ PAD_7061(pad2);
+ volatile unsigned short sii_csr; /* rw: control and status */
+ PAD_7061(pad3);
+ volatile unsigned short sii_id; /* rw: scsi bus ID */
+ PAD_7061(pad4);
+ volatile unsigned short sii_sel_csr; /* rw: selection status */
+ PAD_7061(pad5);
+ volatile unsigned short sii_destat; /* ro: selection detector status */
+ PAD_7061(pad6);
+ volatile unsigned short sii_dstmo; /* unsupp: dssi timeout */
+ PAD_7061(pad7);
+ volatile unsigned short sii_data; /* rw: data register */
+ PAD_7061(pad8);
+ volatile unsigned short sii_dma_ctrl; /* rw: dma control reg */
+ PAD_7061(pad9);
+ volatile unsigned short sii_dma_len; /* rw: length of transfer */
+ PAD_7061(pad10);
+ volatile unsigned short sii_dma_adr_low;/* rw: low address */
+ PAD_7061(pad11);
+ volatile unsigned short sii_dma_adr_hi; /* rw: high address */
+ PAD_7061(pad12);
+ volatile unsigned short sii_dma_1st_byte;/* rw: initial byte */
+ PAD_7061(pad13);
+ volatile unsigned short sii_stlp; /* unsupp: dssi short trgt list ptr */
+ PAD_7061(pad14);
+ volatile unsigned short sii_ltlp; /* unsupp: dssi long " " " */
+ PAD_7061(pad15);
+ volatile unsigned short sii_ilp; /* unsupp: dssi initiator list ptr */
+ PAD_7061(pad16);
+ volatile unsigned short sii_dssi_csr; /* unsupp: dssi control */
+ PAD_7061(pad17);
+ volatile unsigned short sii_conn_csr; /* rc: connection interrupt control */
+ PAD_7061(pad18);
+ volatile unsigned short sii_data_csr; /* rc: data interrupt control */
+ PAD_7061(pad19);
+ volatile unsigned short sii_cmd; /* rw: command register */
+ PAD_7061(pad20);
+ volatile unsigned short sii_diag_csr; /* rw: disgnostic status */
+ PAD_7061(pad21);
+} sii_padded_regmap_t;
+
+#else /*!PAD_7061*/
+
+typedef sii_regmap_t sii_padded_regmap_t;
+
+#endif /*!PAD_7061*/
+
+/* NCR 53C94 */
+#include <scsi/adapters/scsi_53C94.h>
+
+#ifdef PAD_53C94
+typedef struct {
+ volatile unsigned char asc_tc_lsb; /* rw: Transfer Counter LSB */
+ PAD_53C94(pad0);
+ volatile unsigned char asc_tc_msb; /* rw: Transfer Counter MSB */
+ PAD_53C94(pad1);
+ volatile unsigned char asc_fifo; /* rw: FIFO top */
+ PAD_53C94(pad2);
+ volatile unsigned char asc_cmd; /* rw: Command */
+ PAD_53C94(pad3);
+ volatile unsigned char asc_csr; /* r: Status */
+/*#define asc_dbus_id asc_csr /* w: Destination Bus ID */
+ PAD_53C94(pad4);
+ volatile unsigned char asc_intr; /* r: Interrupt */
+/*#define asc_sel_timo asc_intr /* w: (re)select timeout */
+ PAD_53C94(pad5);
+ volatile unsigned char asc_ss; /* r: Sequence Step */
+/*#define asc_syn_p asc_ss /* w: synchronous period */
+ PAD_53C94(pad6);
+ volatile unsigned char asc_flags; /* r: FIFO flags + seq step */
+/*#define asc_syn_o asc_flags /* w: synchronous offset */
+ PAD_53C94(pad7);
+ volatile unsigned char asc_cnfg1; /* rw: Configuration 1 */
+ PAD_53C94(pad8);
+ volatile unsigned char asc_ccf; /* w: Clock Conv. Factor */
+ PAD_53C94(pad9);
+ volatile unsigned char asc_test; /* w: Test Mode */
+ PAD_53C94(pad10);
+ volatile unsigned char asc_cnfg2; /* rw: Configuration 2 */
+ PAD_53C94(pad11);
+ volatile unsigned char asc_cnfg3; /* rw: Configuration 3 */
+ PAD_53C94(pad12);
+ volatile unsigned char asc_rfb; /* w: Reserve FIFO byte */
+ PAD_53C94(pad13);
+} asc_padded_regmap_t;
+
+#else /* !PAD_53C94 */
+
+typedef asc_regmap_t asc_padded_regmap_t;
+
+#endif /* !PAD_53C94 */
+
+/*
+ * Co-existency with in-kernel drivers
+ */
+boolean_t rz_use_mapped_interface = FALSE;
+
+/*
+ * Status information for all HBAs
+ */
+/*static*/ struct RZ_status {
+ union {
+ unsigned long any;
+ asc_padded_regmap_t *asc;
+ sii_padded_regmap_t *sii;
+ } registers;
+ int (*stop)();
+ vm_offset_t (*mmap)();
+ mapped_scsi_info_t info;
+ struct evc eventcounter;
+} RZ_statii[NRZ];
+
+typedef struct RZ_status *RZ_status_t;
+
+
+/*
+ * Probe routine for all HBAs
+ */
+RZ_probe(regbase, ui, hba)
+ unsigned long regbase;
+ register struct bus_device *ui;
+{
+ int unit = ui->unit;
+ vm_offset_t addr;
+ mapped_scsi_info_t info;
+ struct RZ_status *self;
+
+ printf("[mappable] ");
+
+ self = &RZ_statii[unit];
+
+ self->registers.any = regbase;
+
+ /*
+ * Grab a page to be mapped later to users
+ */
+ (void) kmem_alloc_wired(kernel_map, &addr, PAGE_SIZE); /* kseg2 */
+ bzero(addr, PAGE_SIZE);
+ addr = pmap_extract(pmap_kernel(), addr); /* phys */
+ info = (mapped_scsi_info_t) (phystokvc(addr));
+ self->info = info;
+
+ /*
+ * Set permanent info
+ */
+ info->interrupt_count = 0;
+/*XXX*/ info->ram_size = ASC_RAM_SIZE;
+ info->hba_type = hba;
+
+ evc_init(&self->eventcounter);
+ info->wait_event = self->eventcounter.ev_id;
+
+ return 1;
+}
+
+/*
+ * Device open procedure
+ */
+RZ_open(dev, flag, ior)
+ io_req_t ior;
+{
+ int unit = dev;
+ register RZ_status_t self = &RZ_statii[unit];
+
+
+ if (unit >= NRZ)
+ return D_NO_SUCH_DEVICE;
+
+ /*
+ * Silence interface, just in case
+ */
+ (*self->stop)(unit);
+
+ /*
+ * Reset eventcounter
+ */
+ evc_signal(&self->eventcounter);
+
+ rz_use_mapped_interface = TRUE;
+
+ /*
+ * Do not turn interrupts on. The user can do it when ready
+ * to take them.
+ */
+
+ return 0;
+}
+
+/*
+ * Device close procedure
+ */
+RZ_close(dev, flag)
+{
+ int unit = dev;
+ register RZ_status_t self = &RZ_statii[unit];
+
+ if (unit >= NRZ)
+ return D_NO_SUCH_DEVICE;
+
+ /*
+ * Silence interface, in case user forgot
+ */
+ (*self->stop)(unit);
+
+ evc_signal(&self->eventcounter);
+
+ rz_use_mapped_interface = FALSE;
+
+ /* XXX rz_kernel_mode(); XXX */
+
+ return 0;
+}
+
+
+/*
+ * Get status procedure.
+ * We need to tell that we are mappable.
+ */
+io_return_t
+RZ_get_status(dev, flavor, status, status_count)
+ int dev;
+ int flavor;
+ dev_status_t status;
+ unsigned int status_count;
+{
+ return (D_SUCCESS);
+}
+
+/*
+ * Should not refuse this either
+ */
+RZ_set_status(dev, flavor, status, status_count)
+ int dev;
+ int flavor;
+ dev_status_t status;
+ unsigned int status_count;
+{
+ return (D_SUCCESS);
+}
+
+/*
+ * Port death notification routine
+ */
+RZ_portdeath(dev, dead_port)
+{
+}
+
+/*
+ * Page mapping, switch off to HBA-specific for regs&ram
+ */
+vm_offset_t
+RZ_mmap(dev, off, prot)
+ int dev;
+{
+ int unit = dev;
+ register RZ_status_t self = &RZ_statii[unit];
+ vm_offset_t page;
+ vm_offset_t addr;
+ io_return_t ret;
+
+ if (off < SCSI_INFO_SIZE) {
+ addr = kvctophys (self->info) + off;
+ ret = D_SUCCESS;
+ } else
+ ret = (*self->mmap)(self, off, prot, &addr);
+
+ if (ret != D_SUCCESS)
+ return ret;
+
+ page = machine_btop(addr);
+
+ return (page);
+}
+
+
+/*
+ *---------------------------------------------------------------
+ * The rest of the file contains HBA-specific routines
+ *---------------------------------------------------------------
+ */
+
+#if NASC > 0
+/*
+ * Routines for the NCR 53C94
+ */
+static
+ASC_stop(unit)
+{
+ register RZ_status_t self = &RZ_statii[unit];
+ register asc_padded_regmap_t *regs = self->registers.asc;
+ int ack;
+
+ ack = regs->asc_intr; /* Just acknowledge pending interrupts */
+}
+
+ASC_probe(reg, ui)
+ unsigned long reg;
+ register struct bus_device *ui;
+{
+ register RZ_status_t self = &RZ_statii[ui->unit];
+ static vm_offset_t ASC_mmap();
+
+ self->stop = ASC_stop;
+ self->mmap = ASC_mmap;
+ return RZ_probe(reg, ui, HBA_NCR_53c94);
+}
+
+
+ASC_intr(unit,spllevel)
+ spl_t spllevel;
+{
+ register RZ_status_t self = &RZ_statii[unit];
+ register asc_padded_regmap_t *regs = self->registers.asc;
+ register csr, intr, seq_step, cmd;
+
+ /*
+ * Acknowledge interrupt request
+ *
+ * This clobbers some two other registers, therefore
+ * we read them beforehand. It also clears the intr
+ * request bit, silencing the interface for now.
+ */
+ csr = regs->asc_csr;
+
+ /* drop spurious interrupts */
+ if ((csr & ASC_CSR_INT) == 0)
+ return;
+ seq_step = regs->asc_ss;
+ cmd = regs->asc_cmd;
+
+ intr = regs->asc_intr; /* ack */
+
+ splx(spllevel); /* drop priority */
+
+ if (self->info) {
+ self->info->interrupt_count++; /* total interrupts */
+ self->info->saved_regs.asc.csr = csr;
+ self->info->saved_regs.asc.isr = intr;
+ self->info->saved_regs.asc.seq = seq_step;
+ self->info->saved_regs.asc.cmd = cmd;
+ }
+
+ /* Awake user thread */
+ evc_signal(&self->eventcounter);
+}
+
+/*
+ * Virtual->physical mapping routine for PMAZ-AA
+ */
+static vm_offset_t
+ASC_mmap(self, off, prot, addr)
+ RZ_status_t self;
+ vm_offset_t off;
+ vm_prot_t prot;
+ vm_offset_t *addr;
+{
+ /*
+ * The offset (into the VM object) defines the following layout
+ *
+ * off size what
+ * 0 1pg mapping information (csr & #interrupts)
+ * 1pg 1pg ASC registers
+ * 2pg 1pg ASC dma
+ * 3pg 128k ASC ram buffers
+ */
+
+#define ASC_END (ASC_RAM_BASE+ASC_RAM_SIZE)
+
+ if (off < ASC_DMAR_BASE)
+ *addr = (vm_offset_t) ASC_REG_PHYS(self) + (off - SCSI_INFO_SIZE);
+ else if (off < ASC_RAM_BASE)
+ *addr = (vm_offset_t) ASC_DMAR_PHYS(self) + (off - ASC_REGS_BASE);
+ else if (off < ASC_END)
+ *addr = (vm_offset_t) ASC_RAM_PHYS(self) + (off - ASC_RAM_BASE);
+ else
+ return D_INVALID_SIZE;
+
+ return D_SUCCESS;
+}
+#endif NASC > 0
+
+#if NSII > 0
+SII_stop(unit)
+{
+ register RZ_status_t self = &RZ_statii[unit];
+ register sii_padded_regmap_t *regs = self->registers.sii;
+
+ regs->sii_csr &= ~SII_CSR_IE; /* disable interrupts */
+ /* clear all wtc bits */
+ regs->sii_conn_csr = regs->sii_conn_csr;
+ regs->sii_data_csr = regs->sii_data_csr;
+}
+
+SII_probe(reg, ui)
+ unsigned long reg;
+ register struct bus_device *ui;
+{
+ register RZ_status_t self = &RZ_statii[ui->unit];
+ static vm_offset_t SII_mmap();
+
+ self->stop = SII_stop;
+ self->mmap = SII_mmap;
+ return RZ_probe(reg, ui, HBA_DEC_7061);
+}
+
+SII_intr(unit,spllevel)
+ spl_t spllevel;
+{
+ register RZ_status_t self = &RZ_statii[unit];
+ register sii_padded_regmap_t *regs = self->registers.sii;
+ register unsigned short conn, data;
+
+ /*
+ * Disable interrupts, saving cause(s) first.
+ */
+ conn = regs->sii_conn_csr;
+ data = regs->sii_data_csr;
+
+ /* drop spurious calls */
+ if (((conn|data) & (SII_DTR_DI|SII_DTR_CI)) == 0)
+ return;
+
+ regs->sii_csr &= ~SII_CSR_IE;
+
+ regs->sii_conn_csr = conn;
+ regs->sii_data_csr = data;
+
+ splx(spllevel);
+
+ if (self->info) {
+ self->info->interrupt_count++; /* total interrupts */
+ self->info->saved_regs.sii.sii_conn_csr = conn;
+ self->info->saved_regs.sii.sii_data_csr = data;
+ }
+
+ /* Awake user thread */
+ evc_signal(&self->eventcounter);
+}
+
+static vm_offset_t
+SII_mmap(self, off, prot, addr)
+ RZ_status_t self;
+ vm_offset_t off;
+ vm_prot_t prot;
+ vm_offset_t *addr;
+{
+ /*
+ * The offset (into the VM object) defines the following layout
+ *
+ * off size what
+ * 0 1pg mapping information (csr & #interrupts)
+ * 1pg 1pg SII registers
+ * 2pg 128k SII ram buffer
+ */
+
+#define SII_END (SII_RAM_BASE+SII_RAM_SIZE)
+
+ if (off < SII_RAM_BASE)
+ *addr = (vm_offset_t) SII_REG_PHYS(self) + (off - SCSI_INFO_SIZE);
+ else if (off < SII_END)
+ *addr = (vm_offset_t) SII_RAM_PHYS(self) + (off - SII_RAM_BASE);
+ else
+ return D_INVALID_SIZE;
+
+ return D_SUCCESS;
+}
+#endif NSII > 0
+
+#endif NRZ > 0
diff --git a/scsi/mapped_scsi.h b/scsi/mapped_scsi.h
new file mode 100644
index 00000000..b9c65283
--- /dev/null
+++ b/scsi/mapped_scsi.h
@@ -0,0 +1,90 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mapped_scsi.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 11/90
+ *
+ * Definitions for the User-level SCSI Driver
+ */
+
+/*
+ * HBA chips of various sorts
+ */
+
+/* DEC 7061 used on pmaxen */
+
+typedef struct sii_volatile_regs {
+ unsigned short sii_conn_csr;
+ unsigned short sii_data_csr;
+} *sii_reg_t;
+
+#define HBA_DEC_7061 0x00000001
+
+ /* layout of mapped stuff */
+#define SII_REGS_BASE (SCSI_INFO_BASE+SCSI_INFO_SIZE)
+#define SII_REGS_SIZE PAGE_SIZE
+#define SII_RAM_BASE (SII_REGS_BASE+SII_REGS_SIZE)
+
+
+/* National 53C94, used on 3maxen' PMAZ-AA boards */
+
+typedef struct asc_volatile_regs {
+ unsigned char csr;
+ unsigned char isr;
+ unsigned char seq;
+ unsigned char cmd;
+} *asc_reg_t;
+
+#define HBA_NCR_53c94 0x00000002
+
+ /* layout of mapped stuff */
+#define ASC_REGS_BASE (SCSI_INFO_BASE+SCSI_INFO_SIZE)
+#define ASC_REGS_SIZE PAGE_SIZE
+#define ASC_DMAR_BASE (ASC_REGS_BASE+ASC_REGS_SIZE)
+#define ASC_DMAR_SIZE PAGE_SIZE
+#define ASC_RAM_BASE (ASC_DMAR_BASE+ASC_DMAR_SIZE)
+
+/*
+ * User-mapped information block, common to all
+ */
+#define SCSI_INFO_BASE 0
+#define SCSI_INFO_SIZE PAGE_SIZE
+
+#define SCSI_MAX_MAPPED_SIZE (ASC_RAM_BASE+128*1024)
+
+typedef struct {
+ int interrupt_count;/* Counter kept by kernel */
+ unsigned int wait_event; /* To wait for interrupts */
+ unsigned ram_size;
+ int hba_type; /* Tag for regs union */
+ union { /* Space for regs saved on
+ * intr. Only few used */
+ struct asc_volatile_regs asc;
+ struct sii_volatile_regs sii;
+ } saved_regs;
+} *mapped_scsi_info_t;
+
diff --git a/scsi/pc_scsi_label.c b/scsi/pc_scsi_label.c
new file mode 100644
index 00000000..9bbcbbf2
--- /dev/null
+++ b/scsi/pc_scsi_label.c
@@ -0,0 +1,196 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/* This goes away as soon as we move it in the Ux server */
+
+
+
+#include <mach/std_types.h>
+#include <scsi/compat_30.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_defs.h>
+#include <scsi/rz.h>
+#include <scsi/rz_labels.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+
+#if (NSCSI > 0)
+#define LABEL_DEBUG(x,y) if (label_flag&x) y
+
+#include <i386at/disk.h>
+#include <device/device_types.h>
+#include <device/disk_status.h>
+
+
+int scsi_abs_sec = -1;
+int scsi_abs_count = -1;
+
+scsi_rw_abs(dev, data, rw, sec, count)
+ dev_t dev;
+{
+ io_req_t ior;
+ io_return_t error;
+
+ io_req_alloc(ior,0);
+ ior->io_next = 0;
+ ior->io_unit = dev & (~(MAXPARTITIONS-1)); /* sort of */
+ ior->io_unit |= PARTITION_ABSOLUTE;
+ ior->io_data = (io_buf_ptr_t)data;
+ ior->io_count = count;
+ ior->io_recnum = sec;
+ ior->io_error = 0;
+ if (rw == IO_READ)
+ ior->io_op = IO_READ;
+ else
+ ior->io_op = IO_WRITE;
+ scdisk_strategy(ior);
+ iowait(ior);
+ error = ior->io_error;
+ io_req_free(ior);
+ return(error);
+}
+
+io_return_t
+scsi_i386_get_status(dev, tgt, flavor, status, status_count)
+int dev;
+target_info_t *tgt;
+int flavor;
+dev_status_t status;
+unsigned int *status_count;
+{
+
+ switch (flavor) {
+ case V_GETPARMS: {
+ struct disklabel *lp = &tgt->dev_info.disk.l;
+ struct disk_parms *dp = (struct disk_parms *)status;
+ extern struct disklabel default_label;
+ int part = rzpartition(dev);
+
+ if (*status_count < sizeof (struct disk_parms)/sizeof(int))
+ return (D_INVALID_OPERATION);
+ dp->dp_type = DPT_WINI;
+ dp->dp_secsiz = lp->d_secsize;
+ if (lp->d_nsectors == default_label.d_nsectors &&
+ lp->d_ntracks == default_label.d_ntracks &&
+ lp->d_ncylinders == default_label.d_ncylinders) {
+ /* I guess there is nothing there */
+ /* Well, then, Adaptec's like ... */
+ dp->dp_sectors = 32;
+ dp->dp_heads = 64;
+ dp->dp_cyls = lp->d_secperunit / 64 / 32 ;
+ } else {
+ dp->dp_sectors = lp->d_nsectors;
+ dp->dp_heads = lp->d_ntracks;
+ dp->dp_cyls = lp->d_ncylinders;
+ }
+
+ dp->dp_dossectors = 32;
+ dp->dp_dosheads = 64;
+ dp->dp_doscyls = lp->d_secperunit / 64 / 32;
+ dp->dp_ptag = 0;
+ dp->dp_pflag = 0;
+/* !!! partition changes */
+printf("USING PARTIOION TABLE\n");
+ dp->dp_pstartsec = lp->d_partitions[part].p_offset;
+ dp->dp_pnumsec = lp->d_partitions[part].p_size;
+ *status_count = sizeof(struct disk_parms)/sizeof(int);
+ break;
+ }
+ case V_RDABS:
+ if (*status_count < DEV_BSIZE/sizeof (int)) {
+ printf("RDABS bad size %x", *status_count);
+ return (D_INVALID_OPERATION);
+ }
+ if (scsi_rw_abs(dev, status, IO_READ, scsi_abs_sec, DEV_BSIZE) != D_SUCCESS)
+ return(D_INVALID_OPERATION);
+ *status_count = DEV_BSIZE/sizeof(int);
+ break;
+ case V_VERIFY: {
+ int count = scsi_abs_count * DEV_BSIZE;
+ int sec = scsi_abs_sec;
+ char *scsi_verify_buf;
+#include "vm/vm_kern.h"
+
+ (void) kmem_alloc(kernel_map, &scsi_verify_buf, PAGE_SIZE);
+
+ *status = 0;
+ while (count > 0) {
+ int xcount = (count < PAGE_SIZE) ? count : PAGE_SIZE;
+ if (scsi_rw_abs(dev, scsi_verify_buf, IO_READ, sec, xcount) != D_SUCCESS) {
+ *status = BAD_BLK;
+ break;
+ } else {
+ count -= xcount;
+ sec += xcount / DEV_BSIZE;
+ }
+ }
+ (void) kmem_free(kernel_map, scsi_verify_buf, PAGE_SIZE);
+ *status_count = 1;
+ break;
+ }
+ default:
+ return(D_INVALID_OPERATION);
+ }
+ return D_SUCCESS;
+}
+
+io_return_t
+scsi_i386_set_status(dev, tgt, flavor, status, status_count)
+int dev;
+target_info_t *tgt;
+int flavor;
+int *status;
+unsigned int status_count;
+{
+ io_req_t ior;
+
+ switch (flavor) {
+ case V_SETPARMS:
+ printf("scsdisk_set_status: invalid flavor V_SETPARMS\n");
+ return(D_INVALID_OPERATION);
+ break;
+ case V_REMOUNT:
+ tgt->flags &= ~TGT_ONLINE;
+ break;
+ case V_ABS:
+ scsi_abs_sec = status[0];
+ if (status_count == 2)
+ scsi_abs_count = status[1];
+ break;
+ case V_WRABS:
+ if (status_count < DEV_BSIZE/sizeof (int)) {
+ printf("RDABS bad size %x", status_count);
+ return (D_INVALID_OPERATION);
+ }
+ if (scsi_rw_abs(dev, status, IO_WRITE, scsi_abs_sec, DEV_BSIZE) != D_SUCCESS)
+ return(D_INVALID_OPERATION);
+ break;
+ default:
+ return(D_INVALID_OPERATION);
+ }
+ return D_SUCCESS;
+}
+#endif /* NSCSI > 0 */
+
diff --git a/scsi/rz.c b/scsi/rz.c
new file mode 100644
index 00000000..febf6291
--- /dev/null
+++ b/scsi/rz.c
@@ -0,0 +1,462 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: rz.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Top layer of the SCSI driver: interface with the MI side.
+ */
+
+/*
+ * This file contains the code that is common to all scsi devices,
+ * operations and/or behaviours specific to certain devices live
+ * in the corresponding rz_mumble files.
+ */
+
+#include <scsi.h>
+
+#if (NSCSI>0)
+
+#include <mach/std_types.h>
+#include <machine/machspl.h> /* spl definitions */
+#include <scsi/compat_30.h>
+
+#ifdef MACH_KERNEL
+#include <kern/time_out.h>
+#else /*MACH_KERNEL*/
+#include <sys/kernel.h> /* for hz */
+
+static io_req_t getbp();
+#endif /*MACH_KERNEL*/
+
+#include <scsi/scsi_defs.h>
+#include <scsi/rz.h>
+
+
+boolean_t
+rz_check(dev, p_sc, p_tgt)
+ int dev;
+ scsi_softc_t **p_sc;
+ target_info_t **p_tgt;
+{
+ if (rzcontroller(dev) >= NSCSI ||
+ (*p_sc = scsi_softc[rzcontroller(dev)]) == 0)
+ return FALSE;
+
+ *p_tgt = (*p_sc)->target[rzslave(dev)];
+
+ if (!*p_tgt ||
+ !((*p_tgt)->flags&TGT_ALIVE))
+ return FALSE;
+ return TRUE;
+}
+
+/*
+ * Open routine
+ *
+ * On tapes and other devices might have to wait a bit for
+ * the unit to come alive. The following patchable variable
+ * takes this into account
+ */
+int rz_open_timeout = 60;/* seconds */
+
+int rz_open(dev, mode, ior)
+ int dev;
+ dev_mode_t mode;
+ io_req_t ior;
+{
+ scsi_softc_t *sc = 0;
+ target_info_t *tgt;
+ scsi_ret_t ret;
+ register int i;
+
+ if (!rz_check(dev, &sc, &tgt)) {
+ /*
+ * Probe it again: might have installed a new device
+ */
+ if (!sc || !scsi_probe(sc, &tgt, rzslave(dev), ior))
+ return D_NO_SUCH_DEVICE;
+ }
+
+ /* tapes do not wait for rewind to complete on close */
+ if (tgt->ior && !(tgt->flags & TGT_ONLINE))
+ return D_WOULD_BLOCK;
+
+ if (scsi_debug)
+ printf("opening %s%d..", (*tgt->dev_ops->driver_name)(TRUE), dev&0xff);
+
+ if (sc->watchdog) {
+ (*sc->watchdog)(tgt->hw_state);
+ sc->watchdog = 0;
+ }
+
+ /*
+ * Bring the unit online, retrying if necessary.
+ * If the target is spinning up we wait for it.
+ */
+ if ( ! (tgt->flags & TGT_ONLINE)) {
+ io_req_t tmp_ior;
+
+ io_req_alloc(tmp_ior,0);
+ tmp_ior->io_next = 0;
+ tmp_ior->io_count = 0;
+
+ for (i = 0; i < rz_open_timeout; i++) {
+
+ tmp_ior->io_op = IO_INTERNAL;
+ tmp_ior->io_error = 0;
+ ret = scsi_test_unit_ready(tgt, tmp_ior);
+
+ if (ret == SCSI_RET_SUCCESS)
+ break;
+
+ if (ret == SCSI_RET_DEVICE_DOWN) {
+ i = rz_open_timeout;
+ break;
+ }
+
+ if (ret == SCSI_RET_NEED_SENSE) {
+
+ tmp_ior->io_op = IO_INTERNAL;
+ tmp_ior->io_count = 0;
+ tmp_ior->io_residual = 0;
+ tgt->ior = tmp_ior;
+ scsi_request_sense(tgt, tmp_ior, 0);
+ iowait(tmp_ior);
+
+ }
+
+ if (i == 5) printf("%s%d: %s\n",
+ (*tgt->dev_ops->driver_name)(TRUE),
+ tgt->target_id,
+ "Waiting to come online..");
+ timeout(wakeup, tgt, hz);
+ await(tgt);
+ }
+
+ /* lock on removable media */
+ if ((i != rz_open_timeout) && (tgt->flags & TGT_REMOVABLE_MEDIA)) {
+ tmp_ior->io_op = IO_INTERNAL;
+ /* too many dont support it. Sigh */
+ tgt->flags |= TGT_OPTIONAL_CMD;
+ (void) scsi_medium_removal( tgt, FALSE, tmp_ior);
+ tgt->flags &= ~TGT_OPTIONAL_CMD;
+ }
+
+ io_req_free(tmp_ior);
+ if (i == rz_open_timeout)
+ return D_DEVICE_DOWN;
+ }
+ /*
+ * Perform anything open-time special on the device
+ */
+ if (tgt->dev_ops->open != SCSI_OPEN_NULL) {
+ ret = (*tgt->dev_ops->open)(tgt, ior);
+ if (ret != SCSI_RET_SUCCESS) {
+ if (scsi_debug) printf("%s%d: open failed x%x\n",
+ (*tgt->dev_ops->driver_name)(TRUE), dev&0xff, ret);
+ return ret;
+ }
+ }
+ tgt->flags |= TGT_ONLINE;
+ ior->io_device->bsize = tgt->block_size;
+ return D_SUCCESS;
+}
+
+int rz_close(dev)
+ int dev;
+{
+ scsi_softc_t *sc;
+ target_info_t *tgt;
+ scsi_ret_t ret;
+
+ if (!rz_check(dev, &sc, &tgt))
+ return D_NO_SUCH_DEVICE;
+
+ if (scsi_debug)
+ printf("closing %s%d..", (*tgt->dev_ops->driver_name)(TRUE), dev&0xff);
+
+ if (tgt->flags & TGT_REMOVABLE_MEDIA) {
+ io_req_t ior;
+
+ io_req_alloc(ior,0);
+ ior->io_next = 0;
+ ior->io_count = 0;
+ ior->io_op = IO_INTERNAL;
+ ior->io_error = 0;
+ /* too many dont support it. Sigh */
+ tgt->flags |= TGT_OPTIONAL_CMD;
+ (void) scsi_medium_removal( tgt, TRUE, ior);
+ tgt->flags &= ~TGT_OPTIONAL_CMD;
+ io_req_free(ior);
+ }
+
+ /*
+ * Perform anything close-time special on the device
+ */
+ if (tgt->dev_ops->close != SCSI_CLOSE_NULL) {
+ ret = (*tgt->dev_ops->close)(tgt);
+ if (ret != SCSI_RET_SUCCESS) {
+ printf("%s%d: close failed x%x\n",
+ (*tgt->dev_ops->driver_name)(TRUE), dev&0xff, ret);
+ }
+ }
+ if (tgt->flags & TGT_REMOVABLE_MEDIA)
+ tgt->flags &= ~TGT_ONLINE;
+
+ return D_SUCCESS;
+}
+
+/* our own minphys */
+void rz_minphys(ior)
+ io_req_t ior;
+{
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+ if (ior->io_count > scsi_per_target_virtual)
+ ior->io_count = scsi_per_target_virtual;
+#endif /*MACH_KERNEL*/
+}
+
+int rz_read(dev, ior)
+ int dev;
+ io_req_t ior;
+{
+ target_info_t *tgt;
+
+ tgt = scsi_softc[rzcontroller(dev)]->target[rzslave(dev)];
+
+#ifdef MACH_KERNEL
+ return block_io(tgt->dev_ops->strategy, rz_minphys, ior);
+#else /*MACH_KERNEL*/
+ return physio(tgt->dev_ops->strategy, getbp(dev), dev, IO_READ, rz_minphys, ior);
+#endif /*MACH_KERNEL*/
+}
+
+int rz_write(dev, ior)
+ int dev;
+ io_req_t ior;
+{
+ target_info_t *tgt;
+
+ tgt = scsi_softc[rzcontroller(dev)]->target[rzslave(dev)];
+
+ if (tgt->flags & TGT_READONLY)
+ return D_INVALID_OPERATION;
+
+#ifdef MACH_KERNEL
+ return block_io(tgt->dev_ops->strategy, rz_minphys, ior);
+#else /*MACH_KERNEL*/
+ return physio(tgt->dev_ops->strategy, getbp(dev), dev, IO_WRITE, rz_minphys, ior);
+#endif /*MACH_KERNEL*/
+}
+
+int rz_get_status(dev, flavor, status, status_count)
+ int dev;
+ dev_flavor_t flavor;
+ dev_status_t status;
+ natural_t *status_count;
+{
+ target_info_t *tgt;
+
+ tgt = scsi_softc[rzcontroller(dev)]->target[rzslave(dev)];
+
+ if (scsi_debug)
+ printf("rz_get_status: x%x x%x x%x x%x\n",
+ dev, flavor, status, *status_count);
+ return (*tgt->dev_ops->get_status)(dev, tgt, flavor, status, status_count);
+}
+
+int rz_set_status(dev, flavor, status, status_count)
+ int dev;
+ dev_flavor_t flavor;
+ dev_status_t status;
+ natural_t status_count;
+{
+ target_info_t *tgt;
+
+ tgt = scsi_softc[rzcontroller(dev)]->target[rzslave(dev)];
+
+ if (scsi_debug)
+ printf("rz_set_status: x%x x%x x%x x%x\n",
+ dev, flavor, status, status_count);
+ return (*tgt->dev_ops->set_status)(dev, tgt, flavor, status, status_count);
+}
+
+/*
+ * Routine to return information to kernel.
+ */
+int
+rz_devinfo(dev, flavor, info)
+ int dev;
+ int flavor;
+ char *info;
+{
+ register int result;
+
+ result = D_SUCCESS;
+
+ switch (flavor) {
+ /* Caller stupidity, should use device->bsize instead */
+ case D_INFO_BLOCK_SIZE:
+ *((int *) info) = scsi_softc[rzcontroller(dev)]->
+ target[rzslave(dev)]->block_size;
+ break;
+ default:
+ result = D_INVALID_OPERATION;
+ }
+
+ return(result);
+}
+
+void
+rz_simpleq_strategy(ior, start)
+ io_req_t ior;
+ void (*start)();
+{
+ target_info_t *tgt;
+ register scsi_softc_t *sc;
+ scsi_ret_t ret;
+ register int i = ior->io_unit;
+ io_req_t head, tail;
+ spl_t s;
+
+ sc = scsi_softc[rzcontroller(i)];
+ tgt = sc->target[rzslave(i)];
+
+ ior->io_next = 0;
+ ior->io_prev = 0;
+
+ s = splbio();
+ simple_lock(&tgt->target_lock);
+ if (head = tgt->ior) {
+ /* Queue it up at the end of the list */
+ if (tail = head->io_prev)
+ tail->io_next = ior;
+ else
+ head->io_next = ior;
+ head->io_prev = ior; /* tail pointer */
+ simple_unlock(&tgt->target_lock);
+ } else {
+ /* Was empty, start operation */
+ tgt->ior = ior;
+ simple_unlock(&tgt->target_lock);
+ (*start)( tgt, FALSE);
+ }
+ splx(s);
+}
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+
+rz_strategy(ior)
+ io_req_t ior;
+{
+ target_info_t *tgt;
+ register int dev = ior->io_unit;
+
+ tgt = scsi_softc[rzcontroller(dev)]->target[rzslave(dev)];
+
+ return (*tgt->dev_ops->strategy)(ior);
+}
+
+
+#define IOCPARM_SIZE(c) (((c)>>16)&IOCPARM_MASK)
+#define IOC_WDSIZE(s) ((IOCPARM_SIZE(s))>>2)
+
+rz_ioctl(dev, cmd, data, flag)
+{
+ io_return_t error;
+ unsigned int count;
+
+ count = IOC_WDSIZE(cmd);
+ if (cmd & (IOC_VOID|IOC_IN)) {
+ error = rz_set_status(dev, cmd, (dev_status_t)data, count);
+ if (error)
+ return (error);
+ }
+ if (cmd & IOC_OUT) {
+ error = rz_get_status(dev, cmd, (dev_status_t *)data, &count);
+ if (error)
+ return (error);
+ }
+ return (0);
+}
+
+/* This is a very simple-minded config,
+ * assumes we have << 8 disks per bus */
+#define NBUF (NSCSI*8)
+struct io_req rz_buffers[NBUF];
+
+static io_req_t
+getbp(dev)
+{
+ io_req_t ior;
+ int hash = minor(dev) >> 3;
+
+ ior = &rz_buffers[hash];
+ if (ior->io_op & IO_BUSY) {
+ register io_req_t ior;
+ for (ior = rz_buffers; ior < &rz_buffers[NBUF]; ior++)
+ if ((ior->io_op & IO_BUSY) == 0)
+ return ior;
+
+ }
+ return ior;
+}
+
+/*
+ * This ugliness is only needed because of the
+ * way the minor is encoded for tapes.
+ */
+tz_open(dev, mode, ior)
+ int dev;
+ dev_mode_t mode;
+ io_req_t ior;
+{
+ io_return_t error;
+
+ error = rz_open(TAPE_UNIT(dev), mode, ior);
+ if(error)
+ return error;
+ if (TAPE_REWINDS(dev)) {
+ scsi_softc_t *sc;
+ target_info_t *tgt;
+
+ rz_check(TAPE_UNIT(dev), &sc, &tgt);
+ tgt->flags |= TGT_REWIND_ON_CLOSE;
+ }
+ return 0;
+}
+
+tz_close(dev) { return rz_close(TAPE_UNIT(dev));}
+tz_read(dev, ior) { return rz_read(TAPE_UNIT(dev), ior);}
+tz_write(dev, ior) { return rz_write(TAPE_UNIT(dev), ior);}
+tz_ioctl(dev, cmd, data, flag) { return rz_ioctl(TAPE_UNIT(dev), cmd, data, flag);}
+
+#endif /*MACH_KERNEL*/
+
+#endif (NSCSI>0)
diff --git a/scsi/rz.h b/scsi/rz.h
new file mode 100644
index 00000000..7fa7b889
--- /dev/null
+++ b/scsi/rz.h
@@ -0,0 +1,60 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: rz.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Mapping between U*x-like indexing and controller+slave
+ * Each controller handles at most 8 slaves, few controllers.
+ */
+
+#if 0
+#define rzcontroller(dev) (((dev)>>6)&0x3)
+#define rzslave(dev) (((dev)>>3)&0x7)
+#endif 0
+#define rzcontroller(dev) (((dev)>>13)&0x3)
+#define rzslave(dev) (((dev)>>10)&0x7)
+
+#if 0
+#define rzpartition(dev) ((PARTITION_TYPE(dev)==0xf)?MAXPARTITIONS:((dev)&0x7))
+#endif 0
+#define rzpartition(dev) ((dev)&0x3ff)
+
+/* To address the full 256 luns use upper bits 8..12 */
+/* NOTE: Under U*x this means the next major up.. what a mess */
+#define rzlun(dev) (((dev)&0x7) | (((dev)>>5)&0xf8))
+
+/* note: whatever this was used for is no longer cared about -- Kevin */
+#define PARTITION_TYPE(dev) (((dev)>>24)&0xf)
+#define PARTITION_ABSOLUTE (0xf<<24)
+
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+#define tape_unit(dev) ((((dev)&0xe0)>>3)|((dev)&0x3))
+#define TAPE_UNIT(dev) ((dev)&(~0xff))|(tape_unit((dev))<<3)
+#define TAPE_REWINDS(dev) (((dev)&0x1c)==0)||(((dev)&0x1c)==8)
+#endif /*MACH_KERNEL*/
diff --git a/scsi/rz_audio.c b/scsi/rz_audio.c
new file mode 100644
index 00000000..4d60fa1e
--- /dev/null
+++ b/scsi/rz_audio.c
@@ -0,0 +1,1901 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: rz_audio.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 3/93
+ *
+ * Top layer of the SCSI driver: interface with the MI.
+ * This file contains operations specific to audio CD-ROM devices.
+ * Unlike many others, it sits on top of the rz.c module.
+ */
+
+#include <mach/std_types.h>
+#include <kern/strings.h>
+#include <machine/machspl.h> /* spl definitions */
+#include <vm/vm_kern.h>
+#include <device/ds_routines.h>
+
+#include <scsi/compat_30.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi2.h>
+#include <scsi/scsi_defs.h>
+#include <scsi/rz.h>
+
+#if (NSCSI > 0)
+
+#define private static
+
+/* some data is two BCD digits in one byte */
+#define bcd_to_decimal(b) (((b)&0xf) + 10 * (((b) >> 4) & 0xf))
+#define decimal_to_bcd(b) ((((b) / 10) << 4) | ((b) % 10))
+
+/*
+ * Regular use of a CD-ROM is for data, and is handled
+ * by the default set of operations. Ours is for funtime..
+ */
+
+extern char *sccdrom_name();
+int cd_strategy();
+void cd_start();
+
+private scsi_devsw_t scsi_audio = {
+ sccdrom_name, 0, 0, 0, cd_strategy, cd_start, 0, 0
+};
+
+private char unsupported[] = "Device does not support it.";
+
+/*
+ * Unfortunately, none of the vendors appear to
+ * abide by the SCSI-2 standard and many of them
+ * violate or stretch even the SCSI-1 one.
+ * Therefore, we keep a red-list here of the worse
+ * offendors and how to deal with them.
+ * The user is notified of the problem and invited
+ * to solicit his vendor to upgrade the firmware.
+ * [They had plenty of time to do so]
+ */
+typedef struct red_list {
+ char *vendor;
+ char *product;
+ char *rev;
+ /*
+ * The standard MANDATES [par 13.1.6] the play_audio command
+ * at least as a way to discover if the device
+ * supports audio operations at all. This is the only way
+ * we need to use it.
+ */
+ scsi_ret_t (*can_play_audio)( target_info_t *, char *, io_req_t);
+ /*
+ * The standard defines the use of start_stop_unit to
+ * cause the drive to eject the disk.
+ */
+ scsi_ret_t (*eject)( target_info_t *, char *, io_req_t );
+ /*
+ * The standard defines read_subchannel as a way to
+ * get the current playing position.
+ */
+ scsi_ret_t (*current_position)( target_info_t *, char *, io_req_t );
+ /*
+ * The standard defines read_table_of_content to get
+ * the listing of audio tracks available.
+ */
+ scsi_ret_t (*read_toc)( target_info_t *, char *, io_req_t );
+ /*
+ * The standard defines read_subchannel as the way to
+ * report the current audio status (playing/stopped/...).
+ */
+ scsi_ret_t (*get_status)( target_info_t *, char *, io_req_t );
+ /*
+ * The standard defines two ways to issue a play command,
+ * depending on the type of addressing used.
+ */
+ scsi_ret_t (*play_msf)( target_info_t *, char *, io_req_t );
+ scsi_ret_t (*play_ti)( target_info_t *, char *, io_req_t );
+ /*
+ * The standard defines the pause_resume command to
+ * suspend or resume playback of audio data.
+ */
+ scsi_ret_t (*pause_resume)( target_info_t *, char *, io_req_t );
+ /*
+ * The standard defines the audio page among the
+ * mode selection options as a way to control
+ * both volume and connectivity of the channels
+ */
+ scsi_ret_t (*volume_control)( target_info_t *, char *, io_req_t );
+} red_list_t;
+
+#define if_it_can_do(some_cmd) \
+ if (tgt->dev_info.cdrom.violates_standards && \
+ tgt->dev_info.cdrom.violates_standards->some_cmd) \
+ rc = (*tgt->dev_info.cdrom.violates_standards->some_cmd) \
+ (tgt,cmd,ior); \
+ else
+
+/*
+ * So now that you know what they should have implemented :-),
+ * check at the end of the file what the naughty boys did instead.
+ */
+/* private red_list_t audio_replacements[]; / * at end */
+
+/*
+ * Forward decls
+ */
+private void decode_status( char *buf, unsigned char audio_status );
+void zero_ior( io_req_t );
+
+/*
+ * Open routine. Does some checking, sets up
+ * the replacement pointer.
+ */
+io_return_t
+cd_open(
+ int dev,
+ dev_mode_t mode,
+ io_req_t req)
+{
+ scsi_softc_t *sc = 0;
+ target_info_t *tgt;
+ int ret;
+ scsi_ret_t rc;
+ io_req_t ior = 0;
+ vm_offset_t mem = 0;
+ extern boolean_t rz_check();
+
+ if (!rz_check(dev, &sc, &tgt)) {
+ /*
+ * Probe it again: might have installed a new device
+ */
+ if (!sc || !scsi_probe(sc, &tgt, rzslave(dev), ior))
+ return D_NO_SUCH_DEVICE;
+ bzero(&tgt->dev_info, sizeof(tgt->dev_info));
+ }
+
+ /*
+ * Check this is indeded a CD-ROM
+ */
+ if (tgt->dev_ops != &scsi_devsw[SCSI_CDROM]) {
+ rz_close(dev);
+ return D_NO_SUCH_DEVICE;
+ }
+
+ /*
+ * Switch to audio ops, unless some wrong
+ */
+ tgt->dev_ops = &scsi_audio;
+
+ /*
+ * Bring unit online
+ */
+ ret = rz_open(dev, mode, req);
+ if (ret) goto bad;
+
+ /* Pessimistic */
+ ret = D_INVALID_OPERATION;
+
+ /*
+ * Check if this device is on the red list
+ */
+ {
+ scsi2_inquiry_data_t *inq;
+ private void check_red_list();
+
+ scsi_inquiry(tgt, SCSI_INQ_STD_DATA);
+ inq = (scsi2_inquiry_data_t*)tgt->cmd_ptr;
+
+ check_red_list( tgt, inq );
+
+ }
+
+ /*
+ * Allocate dynamic data
+ */
+ if (kmem_alloc(kernel_map, &mem, PAGE_SIZE) != KERN_SUCCESS)
+ return D_NO_MEMORY;
+ tgt->dev_info.cdrom.result = (void *)mem;
+ tgt->dev_info.cdrom.result_available = FALSE;
+
+ /*
+ * See if this CDROM can play audio data
+ */
+ io_req_alloc(ior,0);
+ zero_ior( ior );
+
+ {
+ char *cmd = 0;
+ if_it_can_do(can_play_audio)
+ rc = scsi_play_audio( tgt, 0, 0, FALSE, ior);
+ }
+
+ if (rc != SCSI_RET_SUCCESS) goto bad;
+
+ io_req_free(ior);
+ return D_SUCCESS;
+
+bad:
+ if (ior) io_req_free(ior);
+ if (mem) kmem_free(kernel_map, mem, PAGE_SIZE);
+ tgt->dev_ops = &scsi_devsw[SCSI_CDROM];
+ return ret;
+}
+
+/*
+ * Close routine.
+ */
+io_return_t
+cd_close(
+ int dev)
+{
+ scsi_softc_t *sc;
+ target_info_t *tgt;
+ vm_offset_t mem;
+
+ if (!rz_check(dev, &sc, &tgt))
+ return D_NO_SUCH_DEVICE;
+ if (!tgt || (tgt->dev_ops != &scsi_audio))
+ return D_NO_SUCH_DEVICE;
+
+ /*
+ * Cleanup state
+ */
+ mem = (vm_offset_t) tgt->dev_info.cdrom.result;
+ tgt->dev_info.cdrom.result = (void *)0;
+ tgt->dev_info.cdrom.result_available = FALSE;
+
+ (void) kmem_free(kernel_map, mem, PAGE_SIZE);
+
+ (void) rz_close(dev);
+
+ tgt->dev_ops = &scsi_devsw[SCSI_CDROM];
+ return D_SUCCESS;
+}
+
+/*
+ * Write routine. It is passed an ASCII string
+ * with the command to be executed.
+ */
+io_return_t
+cd_write(
+ int dev,
+ io_req_t ior)
+{
+ register kern_return_t rc;
+ boolean_t wait = FALSE;
+ io_return_t ret;
+ int count;
+ register char *data;
+ vm_offset_t addr;
+
+ data = ior->io_data;
+ count = ior->io_count;
+ if (count == 0)
+ return D_SUCCESS;
+
+ if (!(ior->io_op & IO_INBAND)) {
+ /*
+ * Copy out-of-line data into kernel address space.
+ * Since data is copied as page list, it will be
+ * accessible.
+ */
+ vm_map_copy_t copy = (vm_map_copy_t) data;
+ kern_return_t kr;
+
+ kr = vm_map_copyout(device_io_map, &addr, copy);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ data = (char *) addr;
+ }
+
+ if (scsi_debug) printf("Got command '%s'\n", data);
+
+ ret = cd_command( dev, data, count, ior);
+
+ if (!(ior->io_op & IO_INBAND))
+ (void) vm_deallocate(device_io_map, addr, ior->io_count);
+ return D_SUCCESS;
+}
+
+/*
+ * Read routine. Returns an ASCII string with the results
+ * of the last command executed.
+ */
+io_return_t
+cd_read(
+ int dev,
+ io_req_t ior)
+{
+ target_info_t *tgt;
+ kern_return_t rc;
+ natural_t count;
+
+ /*
+ * Allocate memory for read buffer.
+ */
+ count = (natural_t)ior->io_count;
+ if (count > PAGE_SIZE)
+ return D_INVALID_SIZE; /* sanity */
+
+ rc = device_read_alloc(ior, count);
+ if (rc != KERN_SUCCESS)
+ return rc;
+
+ if (scsi_debug) printf("Got read req for %d bytes\n", count);
+
+ /*
+ * See if last cmd left some to say
+ */
+ tgt = scsi_softc[rzcontroller(dev)]->target[rzslave(dev)];
+ if (tgt->dev_info.cdrom.result_available) {
+ int len;
+
+ tgt->dev_info.cdrom.result_available = FALSE;
+ len = strlen(tgt->dev_info.cdrom.result)+1;
+
+ if (count > len)
+ count = len;
+ bcopy(tgt->dev_info.cdrom.result, ior->io_data, count);
+
+ } else {
+# define noway "No results pending"
+ count = (count > sizeof(noway)) ? sizeof(noway) : count;
+ bcopy(noway, ior->io_data, count);
+ }
+
+ ior->io_residual = ior->io_count - count;
+ return D_SUCCESS;
+}
+
+/*
+ * This does all the work
+ */
+io_return_t
+cd_command(
+ int dev,
+ char *cmd,
+ int count,
+ io_req_t req)
+{
+ target_info_t *tgt;
+ io_req_t ior;
+ io_return_t ret = D_INVALID_OPERATION;
+ scsi_ret_t rc;
+ char *buf;
+
+ tgt = scsi_softc[rzcontroller(dev)]->target[rzslave(dev)];
+
+ buf = tgt->dev_info.cdrom.result;
+ tgt->dev_info.cdrom.result_available = FALSE;
+
+ io_req_alloc(ior,0);
+ zero_ior( ior );
+
+ switch (cmd[0]) {
+
+ case 'E':
+ /* "Eject" */
+ /* too many dont support it. Sigh */
+ tgt->flags |= TGT_OPTIONAL_CMD;
+ (void) scsi_medium_removal( tgt, TRUE, ior);
+ tgt->flags &= ~TGT_OPTIONAL_CMD;
+
+ zero_ior( ior );
+
+ if_it_can_do(eject)
+ rc = scsi_start_unit(tgt, SCSI_CMD_SS_EJECT, ior);
+ break;
+
+ case 'G':
+ switch (cmd[4]) {
+
+ case 'P':
+ /* "Get Position MSF|ABS" */
+ if_it_can_do(current_position) {
+ rc = scsi_read_subchannel(tgt,
+ cmd[13] == 'M',
+ SCSI_CMD_RS_FMT_CURPOS,
+ 0,
+ ior);
+ if (rc == SCSI_RET_SUCCESS) {
+ cdrom_chan_curpos_t *st;
+ st = (cdrom_chan_curpos_t *)tgt->cmd_ptr;
+ if (cmd[13] == 'M')
+ sprintf(buf, "MSF Position %d %d %d %d %d %d",
+ (integer_t)st->subQ.absolute_address.msf.minute,
+ (integer_t)st->subQ.absolute_address.msf.second,
+ (integer_t)st->subQ.absolute_address.msf.frame,
+ (integer_t)st->subQ.relative_address.msf.minute,
+ (integer_t)st->subQ.relative_address.msf.second,
+ (integer_t)st->subQ.relative_address.msf.frame);
+ else
+ sprintf(buf, "ABS Position %d %d", (integer_t)
+ (st->subQ.absolute_address.lba.lba1<<24)+
+ (st->subQ.absolute_address.lba.lba2<<16)+
+ (st->subQ.absolute_address.lba.lba3<< 8)+
+ st->subQ.absolute_address.lba.lba4,
+ (integer_t)
+ (st->subQ.relative_address.lba.lba1<<24)+
+ (st->subQ.relative_address.lba.lba2<<16)+
+ (st->subQ.relative_address.lba.lba3<< 8)+
+ st->subQ.relative_address.lba.lba4);
+ tgt->dev_info.cdrom.result_available = TRUE;
+ }
+ }
+ break;
+
+ case 'T':
+ /* "Get TH" */
+ if_it_can_do(read_toc) {
+ rc = scsi_read_toc(tgt, TRUE, 1, PAGE_SIZE, ior);
+ if (rc == SCSI_RET_SUCCESS) {
+ cdrom_toc_t *toc = (cdrom_toc_t *)tgt->cmd_ptr;
+ sprintf(buf, "toc header: %d %d %d",
+ (toc->len1 << 8) + toc->len2,
+ toc->first_track,
+ toc->last_track);
+ tgt->dev_info.cdrom.result_available = TRUE;
+ }
+ }
+ break;
+
+ case 'S':
+ /* "Get Status" */
+ if_it_can_do(get_status) {
+ rc = scsi_read_subchannel(tgt,
+ TRUE,
+ SCSI_CMD_RS_FMT_CURPOS,
+ 0,
+ ior);
+ if (rc == SCSI_RET_SUCCESS) {
+ cdrom_chan_curpos_t *st;
+ st = (cdrom_chan_curpos_t *)tgt->cmd_ptr;
+ decode_status(buf, st->audio_status);
+ tgt->dev_info.cdrom.result_available = TRUE;
+ }
+ }
+ break;
+ }
+ break;
+
+ case 'P':
+ switch (cmd[5]) {
+ case 'A':
+ /* "Play A startM startS startF endM endS endF" */
+ if_it_can_do(play_msf) {
+
+ int sm, ss, sf, em, es, ef;
+
+ sscanf(&cmd[7], "%d %d %d %d %d %d",
+ &sm, &ss, &sf, &em, &es, &ef);
+
+ rc = scsi_play_audio_msf(tgt,
+ sm, ss, sf,
+ em, es, ef,
+ ior);
+ }
+ break;
+
+ case 'T':
+ /* "Play TI startT startI endT endI" */
+ if_it_can_do(play_ti) {
+
+ int st, si, et, ei;
+
+ sscanf(&cmd[8], "%d %d %d %d",
+ &st, &si, &et, &ei);
+
+ rc = scsi_play_audio_track_index(tgt,
+ st, si, et, ei, ior);
+ }
+ break;
+ }
+ break;
+
+ case 'R':
+ /* "Resume" */
+ if_it_can_do(pause_resume)
+ rc = scsi_pause_resume(tgt, FALSE, ior);
+ break;
+
+ case 'S':
+ switch (cmd[2]) {
+
+ case 'a':
+ /* "Start" */
+ rc = scsi_start_unit(tgt, SCSI_CMD_SS_START, ior);
+ break;
+
+ case 'o':
+ /* "Stop" */
+ if_it_can_do(pause_resume)
+ rc = scsi_pause_resume(tgt, TRUE, ior);
+ break;
+
+ case 't':
+ /* "Set V chan0vol chan1vol chan2vol chan3vol" */
+ if_it_can_do(volume_control) {
+
+ int v0, v1, v2, v3;
+ cdrom_audio_page_t au, *aup;
+
+ rc = scsi_mode_sense(tgt,
+ SCSI_CD_AUDIO_PAGE,
+ sizeof(au),
+ ior);
+ if (rc == SCSI_RET_SUCCESS) {
+
+ sscanf(&cmd[6], "%d %d %d %d",
+ &v0, &v1, &v2, &v3);
+
+ aup = (cdrom_audio_page_t *) tgt->cmd_ptr;
+ au = *aup;
+ /* au.h.bdesc ... */
+ au.vol0 = v0;
+ au.vol1 = v1;
+ au.vol2 = v2;
+ au.vol3 = v3;
+ au.imm = 1;
+ au.aprv = 0;
+
+ zero_ior( ior );
+
+ rc = scsi2_mode_select(tgt, FALSE,
+ &au, sizeof(au), ior);
+ }
+ }
+ break;
+ }
+ break;
+
+ case 'T':
+ /* "Toc MSF|ABS trackno" */
+ if_it_can_do(read_toc) {
+
+ int t, m;
+
+ sscanf(&cmd[8], "%d", &t);
+ rc = scsi_read_toc( tgt, cmd[4]=='M', t, PAGE_SIZE, ior);
+
+ if (rc == SCSI_RET_SUCCESS) {
+
+ cdrom_toc_t *toc = (cdrom_toc_t *)tgt->cmd_ptr;
+
+ sprintf(buf, "TOC from track %d:\n", t);
+ m = (toc->len1 << 8) + toc->len2;
+ m -= 4; /* header */
+ for (t = 0; m > 0; t++, m -= sizeof(struct cdrom_toc_desc)) {
+ buf += strlen(buf);
+ if (cmd[4] == 'M')
+ sprintf(buf, "%d %d %d %d %d %d\n",
+ toc->descs[t].control,
+ toc->descs[t].adr,
+ toc->descs[t].trackno,
+ (integer_t)toc->descs[t].absolute_address.msf.minute,
+ (integer_t)toc->descs[t].absolute_address.msf.second,
+ (integer_t)toc->descs[t].absolute_address.msf.frame);
+ else
+ sprintf(buf, "%d %d %d %d\n",
+ toc->descs[t].control,
+ toc->descs[t].adr,
+ toc->descs[t].trackno,
+ (toc->descs[t].absolute_address.lba.lba1<<24)+
+ (toc->descs[t].absolute_address.lba.lba2<<16)+
+ (toc->descs[t].absolute_address.lba.lba3<<8)+
+ toc->descs[t].absolute_address.lba.lba4);
+ }
+ tgt->dev_info.cdrom.result_available = TRUE;
+ }
+ }
+ break;
+ }
+
+ if (rc == SCSI_RET_SUCCESS)
+ ret = D_SUCCESS;
+
+ /* We are stateless, but.. */
+ if (rc == SCSI_RET_NEED_SENSE) {
+ zero_ior( ior );
+ tgt->ior = ior;
+ scsi_request_sense(tgt, ior, 0);
+ iowait(ior);
+ if (scsi_check_sense_data(tgt, tgt->cmd_ptr))
+ scsi_print_sense_data(tgt->cmd_ptr);
+ }
+
+ io_req_free(ior);
+ return ret;
+}
+
+private char st_invalid [] = "Drive would not say";
+private char st_playing [] = "Playing";
+private char st_paused [] = "Suspended";
+private char st_complete[] = "Done playing";
+private char st_error [] = "Stopped in error";
+private char st_nothing [] = "Idle";
+
+private void
+decode_status(
+ char *buf,
+ unsigned char audio_status)
+{
+ switch (audio_status) {
+ case SCSI_CDST_INVALID:
+ sprintf(buf, st_invalid); break;
+ case SCSI_CDST_PLAYING:
+ sprintf(buf, st_playing); break;
+ case SCSI_CDST_PAUSED:
+ sprintf(buf, st_paused); break;
+ case SCSI_CDST_COMPLETED:
+ sprintf(buf, st_complete); break;
+ case SCSI_CDST_ERROR:
+ sprintf(buf, st_error); break;
+ case SCSI_CDST_NO_STATUS:
+ sprintf(buf, st_nothing); break;
+ }
+}
+
+/* some vendor specific use this instead */
+private void
+decode_status_1(
+ char *buf,
+ unsigned char audio_status)
+{
+ switch (audio_status) {
+ case 0: sprintf(buf, st_playing ); break;
+ case 1:
+ case 2: sprintf(buf, st_paused ); break;
+ case 3: sprintf(buf, st_complete ); break;
+ default:
+ sprintf(buf, "Unknown status" ); break;
+ }
+}
+
+
+private void
+curse_the_vendor(
+ red_list_t *list,
+ boolean_t not_really)
+{
+ if (not_really) return;
+
+ printf("%s\n%s\n%s\n%s\n",
+ "The CDROM you use is not fully SCSI-2 compliant.",
+ "We invite You to contact Your vendor and ask",
+ "that they provide You with a firmware upgrade.",
+ "Here is a list of some known deficiencies");
+
+ printf("Vendor: %s Product: %s.. Revision: %s..\n",
+ list->vendor, list->product, list->rev);
+
+#define check(x,y,z) \
+ if (list->x) printf("Command code x%x %s not supported\n", y, z);
+
+ check(can_play_audio, SCSI_CMD_PLAY_AUDIO, "PLAY_AUDIO");
+ check(eject, SCSI_CMD_START_STOP_UNIT,
+ "START_STOP_UNIT, flag EJECT(0x2) in byte 5");
+ check(current_position, SCSI_CMD_READ_SUBCH, "READ_SUBCHANNEL");
+ check(read_toc, SCSI_CMD_READ_TOC, "READ_TOC");
+/* check(get_status, ...); duplicate of current_position */
+ check(play_msf, SCSI_CMD_PLAY_AUDIO_MSF, "PLAY_AUDIO_MSF");
+ check(play_ti, SCSI_CMD_PLAY_AUDIO_TI, "PLAY_AUDIO_TRACK_INDEX");
+ check(pause_resume, SCSI_CMD_PAUSE_RESUME, "PAUSE_RESUME");
+ check(volume_control, SCSI_CMD_MODE_SELECT,
+ "MODE_SELECT, AUDIO page(0xe)");
+
+#undef check
+ printf("Will work around these problems...\n");
+}
+
+/*
+ * Ancillaries
+ */
+cd_strategy(ior)
+ register io_req_t ior;
+{
+ return rz_simpleq_strategy( ior, cd_start);
+}
+
+void cd_start( tgt, done)
+ target_info_t *tgt;
+ boolean_t done;
+{
+ io_req_t ior;
+
+ ior = tgt->ior;
+ if (done && ior) {
+ tgt->ior = 0;
+ iodone(ior);
+ return;
+ }
+ panic("cd start"); /* uhu? */
+}
+
+/*
+ * When the hardware cannot
+ */
+private scsi_ret_t
+op_not_supported(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ /*
+ * The command is not implemented, no way around it
+ */
+ sprintf(tgt->dev_info.cdrom.result, unsupported);
+ tgt->dev_info.cdrom.result_available = TRUE;
+ return SCSI_RET_SUCCESS;
+}
+
+/****************************************/
+/* Vendor Specific Operations */
+/****************************************/
+
+ /* DEC RRD42 */
+
+#define SCSI_CMD_DEC_SET_ADDRESS_FORMAT 0xc0
+# define scsi_cmd_saf_fmt scsi_cmd_xfer_len_2
+
+#define SCSI_CMD_DEC_PLAYBACK_STATUS 0xc4
+typedef struct {
+ unsigned char xxx;
+ BITFIELD_2(unsigned char,
+ is_msf: 1,
+ xxx1: 7);
+ unsigned char data_len1;
+ unsigned char data_len0;
+ unsigned char audio_status;
+ BITFIELD_2(unsigned char,
+ control : 4,
+ xxx2 : 4);
+ cdrom_addr_t address;
+ BITFIELD_2(unsigned char,
+ chan0_select : 4,
+ xxx3 : 4);
+ unsigned char chan0_volume;
+ BITFIELD_2(unsigned char,
+ chan1_select : 4,
+ xxx4 : 4);
+ unsigned char chan1_volume;
+ BITFIELD_2(unsigned char,
+ chan2_select : 4,
+ xxx5 : 4);
+ unsigned char chan2_volume;
+ BITFIELD_2(unsigned char,
+ chan3_select : 4,
+ xxx6 : 4);
+ unsigned char chan3_volume;
+} dec_playback_status_t;
+
+#define SCSI_CMD_DEC_PLAYBACK_CONTROL 0xc9
+typedef struct {
+ unsigned char xxx0;
+ BITFIELD_2(unsigned char,
+ fmt : 1,
+ xxx1 : 7);
+ unsigned char xxx[8];
+ BITFIELD_2(unsigned char,
+ chan0_select : 4,
+ xxx3 : 4);
+ unsigned char chan0_volume;
+ BITFIELD_2(unsigned char,
+ chan1_select : 4,
+ xxx4 : 4);
+ unsigned char chan1_volume;
+ BITFIELD_2(unsigned char,
+ chan2_select : 4,
+ xxx5 : 4);
+ unsigned char chan2_volume;
+ BITFIELD_2(unsigned char,
+ chan3_select : 4,
+ xxx6 : 4);
+ unsigned char chan3_volume;
+} dec_playback_control_t;
+
+
+#if 0
+
+private scsi_ret_t
+rrd42_status(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_ret_t rc;
+ char *buf = tgt->dev_info.cdrom.result;
+ scsi_command_group_2 c;
+ dec_playback_status_t *st;
+
+ /* We might have to specify addressing fmt */
+ if (cmd[4] == 'P') {
+ scsi_command_group_2 saf;
+
+ bzero(&saf, sizeof(saf));
+ saf.scsi_cmd_code = SCSI_CMD_DEC_SET_ADDRESS_FORMAT;
+ saf.scsi_cmd_saf_fmt = (cmd[13] == 'A') ? 0 : 1;
+
+ rc = cdrom_vendor_specific(tgt, &saf, 0, 0, 0, ior);
+
+ if (rc != SCSI_RET_SUCCESS) return rc;
+
+ zero_ior( ior );
+ }
+
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_DEC_PLAYBACK_STATUS;
+ c.scsi_cmd_xfer_len_2 = sizeof(*st);
+ rc = cdrom_vendor_specific(tgt, &c, 0, 0, sizeof(*st), ior);
+
+ if (rc != SCSI_RET_SUCCESS) return rc;
+
+ st = (dec_playback_status_t *) tgt->cmd_ptr;
+
+ if (cmd[4] == 'S')
+ decode_status( buf, st->audio_status+0x11 );
+ else {
+ if (st->is_msf)
+ sprintf(buf, "MSF Position %d %d %d",
+ (integer_t)st->address.msf.minute,
+ (integer_t)st->address.msf.second,
+ (integer_t)st->address.msf.frame);
+ else
+ sprintf(buf, "ABS Position %d", (integer_t)
+ (st->address.lba.lba1<<24)+
+ (st->address.lba.lba2<<16)+
+ (st->address.lba.lba3<< 8)+
+ st->address.lba.lba4);
+ }
+ tgt->dev_info.cdrom.result_available = TRUE;
+ return rc;
+}
+#endif
+
+private scsi_ret_t
+rrd42_set_volume(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_command_group_2 c;
+ dec_playback_control_t req;
+ int v0, v1, v2, v3;
+
+ sscanf(&cmd[6], "%d %d %d %d", &v0, &v1, &v2, &v3);
+
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_DEC_PLAYBACK_CONTROL;
+ c.scsi_cmd_xfer_len_2 = sizeof(req);
+ bzero(&req, sizeof(req));
+ if (v0) {
+ req.chan0_select = 1;
+ req.chan0_volume = v0;
+ }
+ if (v1) {
+ req.chan1_select = 2;
+ req.chan1_volume = v1;
+ }
+ if (v2) {
+ req.chan2_select = 4;
+ req.chan2_volume = v2;
+ }
+ if (v3) {
+ req.chan3_select = 8;
+ req.chan3_volume = v3;
+ }
+ return cdrom_vendor_specific(tgt, &c, &req, sizeof(req), 0, ior);
+}
+
+ /* NEC CD-ROM */
+
+#define SCSI_CMD_NEC_READ_TOC 0xde
+typedef struct {
+ unsigned char xxx[9];
+ unsigned char first_track;
+ unsigned char xxx1[9];
+ unsigned char last_track;
+ unsigned char xxx2[9];
+ unsigned char lead_out_addr[3];
+ struct {
+ BITFIELD_2(unsigned char,
+ adr : 4,
+ ctrl : 4);
+ unsigned char xxx3[6];
+ unsigned char address[3];
+ } track_info[1]; /* VARSIZE */
+} nec_toc_data_t;
+
+#define SCSI_CMD_NEC_SEEK_TRK 0xd8
+#define SCSI_CMD_NEC_PLAY_AUDIO 0xd9
+#define SCSI_CMD_NEC_PAUSE 0xda
+#define SCSI_CMD_NEC_EJECT 0xdc
+
+#define SCSI_CMD_NEC_READ_SUBCH_Q 0xdd
+typedef struct {
+ unsigned char audio_status; /* see decode_status_1 */
+ BITFIELD_2(unsigned char,
+ ctrl : 4,
+ xxx1 : 4);
+ unsigned char trackno;
+ unsigned char indexno;
+ unsigned char relative_address[3];
+ unsigned char absolute_address[3];
+} nec_subch_data_t;
+
+/*
+ * Reserved bits in byte1
+ */
+#define NEC_LR_PLAY_MODE 0x01 /* RelAdr bit overload */
+#define NEC_LR_STEREO 0x02 /* mono/stereo */
+
+/*
+ * Vendor specific bits in the control byte.
+ * NEC uses them to specify the addressing mode
+ */
+#define NEC_CTRL_A_ABS 0x00 /* XXX not sure about this */
+#define NEC_CTRL_A_MSF 0x40 /* min/sec/frame */
+#define NEC_CTRL_A_TI 0x80 /* track/index */
+#define NEC_CTRL_A_CURRENT 0xc0 /* same as last specified */
+
+private scsi_ret_t
+nec_eject(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_command_group_2 c;
+
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_NEC_EJECT;
+
+ return cdrom_vendor_specific(tgt, &c, 0, 0, 0, ior);
+}
+
+private scsi_ret_t
+nec_subchannel(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_command_group_2 c;
+ nec_subch_data_t *st;
+ char *buf = tgt->dev_info.cdrom.result;
+ scsi_ret_t rc;
+
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_NEC_READ_SUBCH_Q;
+ c.scsi_cmd_lun_and_relbit = sizeof(*st); /* Sic! */
+
+ rc = cdrom_vendor_specific(tgt, &c, 0, 0, sizeof(*st), ior);
+ if (rc != SCSI_RET_SUCCESS) return rc;
+
+ st = (nec_subch_data_t *) tgt->cmd_ptr;
+
+ /* Status or Position ? */
+
+ if (cmd[4] == 'S') {
+ decode_status_1( buf, st->audio_status);
+ } else {
+
+ /* XXX can it do ABS addressing e.g. 'logical' ? */
+
+ sprintf(buf, "MSF Position %d %d %d %d %d %d",
+ (integer_t)bcd_to_decimal(st->absolute_address[0]), /* min */
+ (integer_t)bcd_to_decimal(st->absolute_address[1]), /* sec */
+ (integer_t)bcd_to_decimal(st->absolute_address[2]), /* frm */
+ (integer_t)bcd_to_decimal(st->relative_address[0]), /* min */
+ (integer_t)bcd_to_decimal(st->relative_address[1]), /* sec */
+ (integer_t)bcd_to_decimal(st->relative_address[2])); /* frm */
+ }
+
+ tgt->dev_info.cdrom.result_available = TRUE;
+ return SCSI_RET_SUCCESS;
+}
+
+private scsi_ret_t
+nec_read_toc(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_command_group_2 c;
+ nec_toc_data_t *t;
+ char *buf = tgt->dev_info.cdrom.result;
+ scsi_ret_t rc;
+ int first, last, i;
+
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_NEC_READ_TOC;
+ c.scsi_cmd_lun_and_relbit = NEC_LR_PLAY_MODE|NEC_LR_STEREO;
+
+ rc = cdrom_vendor_specific(tgt, &c, 0, 0, 512/*XXX*/, ior);
+ if (rc != SCSI_RET_SUCCESS) return rc;
+
+ t = (nec_toc_data_t *) tgt->cmd_ptr;
+
+ first = bcd_to_decimal(t->first_track);
+ last = bcd_to_decimal(t->last_track);
+
+ /*
+ * "Get TH" wants summary, "TOC MSF|ABS from_track" wants all
+ */
+ if (cmd[0] == 'G') {
+ sprintf(buf, "toc header: %d %d %d",
+ sizeof(*t) + sizeof(t->track_info) * (last - first - 1),
+ first, last);
+ goto out;
+ }
+
+ /*
+ * The whole shebang
+ */
+ sscanf(&cmd[8], "%d", &i);
+ sprintf(buf, "TOC from track %d:\n", i);
+
+ last -= first;
+ i -= first;
+ while ((i >= 0) && (i <= last)) {
+ buf += strlen(buf);
+ if (cmd[4] == 'M')
+ sprintf(buf, "%d %d %d %d %d %d\n",
+ t->track_info[i].ctrl,
+ t->track_info[i].adr,
+ first + i,
+ bcd_to_decimal(t->track_info[i].address[0]),
+ bcd_to_decimal(t->track_info[i].address[1]),
+ bcd_to_decimal(t->track_info[i].address[2]));
+ else
+/* THIS IS WRONG */
+ sprintf(buf, "%d %d %d %d\n",
+ t->track_info[i].ctrl,
+ t->track_info[i].adr,
+ first + i,
+ bcd_to_decimal(t->track_info[i].address[0]) * 10000 +
+ bcd_to_decimal(t->track_info[i].address[1]) * 100 +
+ bcd_to_decimal(t->track_info[i].address[2]));
+ i++;
+ }
+ /* To know how long the last track is */
+ buf += strlen(buf);
+ if (cmd[4] == 'M')
+ sprintf(buf, "%d %d %d %d %d %d\n",
+ 0, 1, 0xaa /* User expects this */,
+ bcd_to_decimal(t->lead_out_addr[0]),
+ bcd_to_decimal(t->lead_out_addr[1]),
+ bcd_to_decimal(t->lead_out_addr[2]));
+ else
+/* THIS IS WRONG */
+ sprintf(buf, "%d %d %d %d\n",
+ 0, 1, 0xaa /* User expects this */,
+ bcd_to_decimal(t->lead_out_addr[0]) * 10000 +
+ bcd_to_decimal(t->lead_out_addr[1]) * 100 +
+ bcd_to_decimal(t->lead_out_addr[2]));
+out:
+ tgt->dev_info.cdrom.result_available = TRUE;
+ return SCSI_RET_SUCCESS;
+}
+
+
+private scsi_ret_t
+nec_play(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_command_group_2 c;
+ int sm, ss, sf, em, es, ef;
+ int st, si, et, ei;
+ scsi_ret_t rc;
+
+ /*
+ * Seek to desired position
+ */
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_NEC_SEEK_TRK;
+ c.scsi_cmd_lun_and_relbit = NEC_LR_PLAY_MODE;
+
+ /*
+ * Play_msf or Play_ti
+ */
+ if (cmd[5] == 'A') {
+ /* "Play A startM startS startF endM endS endF" */
+
+ sscanf(&cmd[7], "%d %d %d %d %d %d",
+ &sm, &ss, &sf, &em, &es, &ef);
+
+ c.scsi_cmd_lba1 = decimal_to_bcd(sm);
+ c.scsi_cmd_lba2 = decimal_to_bcd(ss);
+ c.scsi_cmd_lba3 = decimal_to_bcd(sf);
+ c.scsi_cmd_ctrl_byte = NEC_CTRL_A_MSF;
+
+ } else {
+ /* "Play TI startT startI endT endI" */
+
+ sscanf(&cmd[8], "%d %d %d %d", &st, &si, &et, &ei);
+
+ c.scsi_cmd_lba1 = decimal_to_bcd(st);
+ c.scsi_cmd_lba2 = decimal_to_bcd(si);
+ c.scsi_cmd_lba3 = 0;
+ c.scsi_cmd_ctrl_byte = NEC_CTRL_A_TI;
+
+ }
+
+ rc = cdrom_vendor_specific(tgt, &c, 0, 0, 0, ior);
+ if (rc != SCSI_RET_SUCCESS) return rc;
+
+ /*
+ * Now ask it to play until..
+ */
+ zero_ior( ior );
+
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_NEC_PLAY_AUDIO;
+ c.scsi_cmd_lun_and_relbit = NEC_LR_PLAY_MODE|NEC_LR_STEREO;
+
+ if (cmd[5] == 'A') {
+ c.scsi_cmd_lba1 = decimal_to_bcd(em);
+ c.scsi_cmd_lba2 = decimal_to_bcd(es);
+ c.scsi_cmd_lba3 = decimal_to_bcd(ef);
+ c.scsi_cmd_ctrl_byte = NEC_CTRL_A_MSF;
+ } else {
+ c.scsi_cmd_lba1 = decimal_to_bcd(et);
+ c.scsi_cmd_lba2 = decimal_to_bcd(ei);
+ c.scsi_cmd_lba3 = 0;
+ c.scsi_cmd_ctrl_byte = NEC_CTRL_A_TI;
+ }
+
+ return cdrom_vendor_specific(tgt, &c, 0, 0, 0, ior);
+}
+
+private scsi_ret_t
+nec_pause_resume(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_command_group_2 c;
+
+ bzero(&c, sizeof(c));
+ /*
+ * "Resume" or "Stop"
+ */
+ if (cmd[0] == 'R') {
+ c.scsi_cmd_code = SCSI_CMD_NEC_PLAY_AUDIO;
+ c.scsi_cmd_lun_and_relbit = NEC_LR_PLAY_MODE|NEC_LR_STEREO;
+ c.scsi_cmd_ctrl_byte = NEC_CTRL_A_CURRENT;
+ } else {
+ c.scsi_cmd_code = SCSI_CMD_NEC_PAUSE;
+ }
+
+ return cdrom_vendor_specific(tgt, &c, 0, 0, 0, ior);
+}
+
+ /* TOSHIBA CD-ROM DRIVE:XM 3232 */
+
+#define SCSI_CMD_TOSHIBA_SEEK_TRK 0xc0
+#define SCSI_CMD_TOSHIBA_PLAY_AUDIO 0xc1
+#define SCSI_CMD_TOSHIBA_PAUSE_AUDIO 0xc2
+#define SCSI_CMD_TOSHIBA_EJECT 0xc4
+
+#define SCSI_CMD_TOSHIBA_READ_SUBCH_Q 0xc6
+typedef nec_subch_data_t toshiba_subch_data_t;
+/* audio status -> decode_status_1 */
+
+#define SCSI_CMD_TOSHIBA_READ_TOC_ENTRY 0xc7
+typedef struct {
+ unsigned char first_track;
+ unsigned char last_track;
+ unsigned char xxx[2];
+} toshiba_toc_header_t;
+typedef struct {
+ unsigned char address[4];
+} toshiba_toc_data_t;
+
+
+private scsi_ret_t
+toshiba_eject(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_command_group_2 c;
+
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_TOSHIBA_EJECT;
+
+ return cdrom_vendor_specific(tgt, &c, 0, 0, 0, ior);
+}
+
+private scsi_ret_t
+toshiba_subchannel(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_command_group_2 c;
+ toshiba_subch_data_t *st;
+ char *buf = tgt->dev_info.cdrom.result;
+ scsi_ret_t rc;
+
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_TOSHIBA_READ_SUBCH_Q;
+ c.scsi_cmd_lun_and_relbit = sizeof(*st); /* Sic! */
+
+ rc = cdrom_vendor_specific(tgt, &c, 0, 0, sizeof(*st), ior);
+ if (rc != SCSI_RET_SUCCESS) return rc;
+
+ st = (toshiba_subch_data_t *) tgt->cmd_ptr;
+
+ /* Status or Position ? */
+
+ if (cmd[4] == 'S') {
+ decode_status_1( buf, st->audio_status);
+ } else {
+
+ /* XXX can it do ABS addressing e.g. 'logical' ? */
+
+ sprintf(buf, "MSF Position %d %d %d %d %d %d",
+ (integer_t)bcd_to_decimal(st->absolute_address[0]), /* min */
+ (integer_t)bcd_to_decimal(st->absolute_address[1]), /* sec */
+ (integer_t)bcd_to_decimal(st->absolute_address[2]), /* frm */
+ (integer_t)bcd_to_decimal(st->relative_address[0]), /* min */
+ (integer_t)bcd_to_decimal(st->relative_address[1]), /* sec */
+ (integer_t)bcd_to_decimal(st->relative_address[2])); /* frm */
+ }
+
+ tgt->dev_info.cdrom.result_available = TRUE;
+ return SCSI_RET_SUCCESS;
+}
+
+private scsi_ret_t
+toshiba_read_toc(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_command_group_2 c;
+ toshiba_toc_data_t *t;
+ toshiba_toc_header_t *th;
+ char *buf = tgt->dev_info.cdrom.result;
+ scsi_ret_t rc;
+ int first, last, i;
+
+ /* TOC header first */
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_TOSHIBA_READ_TOC_ENTRY;
+ c.scsi_cmd_lun_and_relbit = 0;
+ c.scsi_cmd_lba1 = 0;
+
+ rc = cdrom_vendor_specific(tgt, &c, 0, 0, sizeof(*th), ior);
+ if (rc != SCSI_RET_SUCCESS) return rc;
+
+ th = (toshiba_toc_header_t *) tgt->cmd_ptr;
+
+ first = bcd_to_decimal(th->first_track);
+ last = bcd_to_decimal(th->last_track);
+
+ /*
+ * "Get TH" wants summary, "TOC MSF|ABS from_track" wants all
+ */
+ if (cmd[0] == 'G') {
+ sprintf(buf, "toc header: %d %d %d",
+ sizeof(*th) + sizeof(*t) * (last - first + 1),
+ first, last);
+ goto out;
+ }
+
+ /*
+ * The whole shebang
+ */
+ sscanf(&cmd[8], "%d", &i);
+ sprintf(buf, "TOC from track %d:\n", i);
+
+ while (i <= last) {
+ bzero(&c, sizeof(c));
+
+ c.scsi_cmd_code = SCSI_CMD_TOSHIBA_READ_TOC_ENTRY;
+ c.scsi_cmd_lun_and_relbit = 2;
+ c.scsi_cmd_lba1 = decimal_to_bcd(i);
+
+ zero_ior( ior );
+ rc = cdrom_vendor_specific(tgt, &c, 0, 0, sizeof(*t), ior);
+ if (rc != SCSI_RET_SUCCESS) break;
+
+ t = (toshiba_toc_data_t *) tgt->cmd_ptr;
+
+ buf += strlen(buf);
+ if (cmd[4] == 'M')
+ sprintf(buf, "0 0 %d %d %d %d\n",
+ i,
+ bcd_to_decimal(t->address[0]),
+ bcd_to_decimal(t->address[1]),
+ bcd_to_decimal(t->address[2]));
+ else
+/* THIS IS WRONG */
+ sprintf(buf, "0 0 %d %d\n",
+ i,
+ bcd_to_decimal(t->address[0]) * 10000 +
+ bcd_to_decimal(t->address[1]) * 100 +
+ bcd_to_decimal(t->address[2]));
+ i++;
+ }
+
+ /* Must simulate the lead-out track */
+ bzero(&c, sizeof(c));
+
+ c.scsi_cmd_code = SCSI_CMD_TOSHIBA_READ_TOC_ENTRY;
+ c.scsi_cmd_lun_and_relbit = 1;
+ c.scsi_cmd_lba1 = 0;
+
+ zero_ior( ior );
+ rc = cdrom_vendor_specific(tgt, &c, 0, 0, sizeof(*t), ior);
+ if (rc != SCSI_RET_SUCCESS) goto out;
+
+ t = (toshiba_toc_data_t *) tgt->cmd_ptr;
+
+ buf += strlen(buf);
+ if (cmd[4] == 'M')
+ sprintf(buf, "0 0 %d %d %d %d\n",
+ i,
+ bcd_to_decimal(t->address[0]),
+ bcd_to_decimal(t->address[1]),
+ bcd_to_decimal(t->address[2]));
+ else
+/* THIS IS WRONG */
+ sprintf(buf, "0 0 %d %d\n",
+ i,
+ bcd_to_decimal(t->address[0]) * 10000 +
+ bcd_to_decimal(t->address[1]) * 100 +
+ bcd_to_decimal(t->address[2]));
+ i++;
+
+out:
+ tgt->dev_info.cdrom.result_available = TRUE;
+ return SCSI_RET_SUCCESS;
+}
+
+
+private scsi_ret_t
+toshiba_play(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_command_group_2 c;
+ int sm, ss, sf, em, es, ef;
+ int st, si, et, ei;
+ scsi_ret_t rc;
+
+ /*
+ * Seek to desired position
+ */
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_TOSHIBA_SEEK_TRK;
+
+ /*
+ * Play_msf or Play_ti
+ */
+ if (cmd[5] == 'A') {
+ /* "Play A startM startS startF endM endS endF" */
+
+ sscanf(&cmd[7], "%d %d %d %d %d %d",
+ &sm, &ss, &sf, &em, &es, &ef);
+
+ c.scsi_cmd_lba1 = decimal_to_bcd(sm);
+ c.scsi_cmd_lba2 = decimal_to_bcd(ss);
+ c.scsi_cmd_lba3 = decimal_to_bcd(sf);
+ c.scsi_cmd_ctrl_byte = NEC_CTRL_A_MSF;
+
+ } else {
+ /* "Play TI startT startI endT endI" */
+
+ sscanf(&cmd[8], "%d %d %d %d", &st, &si, &et, &ei);
+
+ c.scsi_cmd_lba1 = decimal_to_bcd(st);
+ c.scsi_cmd_lba2 = decimal_to_bcd(si);
+ c.scsi_cmd_lba3 = 0;
+ c.scsi_cmd_ctrl_byte = NEC_CTRL_A_TI;
+
+ }
+
+ rc = cdrom_vendor_specific(tgt, &c, 0, 0, 0, ior);
+ if (rc != SCSI_RET_SUCCESS) return rc;
+
+ /*
+ * Now ask it to play until..
+ */
+ zero_ior( ior );
+
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_TOSHIBA_PLAY_AUDIO;
+ c.scsi_cmd_lun_and_relbit = NEC_LR_PLAY_MODE|NEC_LR_STEREO;
+
+ if (cmd[5] == 'A') {
+ c.scsi_cmd_lba1 = decimal_to_bcd(em);
+ c.scsi_cmd_lba2 = decimal_to_bcd(es);
+ c.scsi_cmd_lba3 = decimal_to_bcd(ef);
+ c.scsi_cmd_ctrl_byte = NEC_CTRL_A_MSF;
+ } else {
+ c.scsi_cmd_lba1 = decimal_to_bcd(et);
+ c.scsi_cmd_lba2 = decimal_to_bcd(ei);
+ c.scsi_cmd_lba3 = 0;
+ c.scsi_cmd_ctrl_byte = NEC_CTRL_A_TI;
+ }
+
+ return cdrom_vendor_specific(tgt, &c, 0, 0, 0, ior);
+}
+
+private scsi_ret_t
+toshiba_pause_resume(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_command_group_2 c;
+
+ bzero(&c, sizeof(c));
+ /*
+ * "Resume" or "Stop"
+ */
+ if (cmd[0] == 'R') {
+ /* ???? would have to remember last cmd ???? */
+/* broken ! */
+ c.scsi_cmd_code = SCSI_CMD_TOSHIBA_PLAY_AUDIO;
+ c.scsi_cmd_lun_and_relbit = NEC_LR_PLAY_MODE|NEC_LR_STEREO;
+ c.scsi_cmd_ctrl_byte = NEC_CTRL_A_CURRENT;
+ } else {
+ c.scsi_cmd_code = SCSI_CMD_TOSHIBA_PAUSE_AUDIO;
+ }
+
+ return cdrom_vendor_specific(tgt, &c, 0, 0, 0, ior);
+}
+
+
+#if 0
+ /* I have info on these drives, but no drive to test */
+
+ /* PIONEER DRM-600 */
+
+#define SCSI_CMD_PIONEER_EJECT 0xc0
+
+#define SCSI_CMD_PIONEER_READ_TOC 0xc1
+typedef struct {
+ unsigned char first_track;
+ unsigned char last_track;
+ unsigned char xxx[2];
+} pioneer_toc_hdr_t;
+typedef struct {
+ unsigned char ctrl;
+ unsigned char address[3];
+} pioneer_toc_info_t;
+
+#define SCSI_CMD_PIONEER_READ_SUBCH 0xc2
+typedef struct {
+ BITFIELD_2(unsigned char,
+ ctrl : 4,
+ xxx1 : 4);
+ unsigned char trackno;
+ unsigned char indexno;
+ unsigned char relative_address[3];
+ unsigned char absolute_address[3];
+} pioneer_subch_data_t;
+
+#define SCSI_CMD_PIONEER_SEEK_TRK 0xc8
+#define SCSI_CMD_PIONEER_PLAY_AUDIO 0xc9
+#define SCSI_CMD_PIONEER_PAUSE 0xca
+
+#define SCSI_CMD_PIONEER_AUDIO_STATUS 0xcc
+typedef struct {
+ unsigned char audio_status;
+ unsigned char xxx[5];
+} pioneer_status_t;
+
+/*
+ * Reserved bits in byte1
+ */
+#define PIONEER_LR_END_ADDR 0x10
+#define PIONEER_LR_PAUSE 0x10
+#define PIONEER_LR_RESUME 0x00
+
+/*
+ * Vendor specific bits in the control byte.
+ */
+#define PIONEER_CTRL_TH 0x00 /* TOC header */
+#define PIONEER_CTRL_TE 0x80 /* one TOC entry */
+#define PIONEER_CTRL_LO 0x40 /* lead-out track info */
+
+#define PIONEER_CTRL_A_MSF 0x40 /* min/sec/frame addr */
+
+private scsi_ret_t
+pioneer_eject(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_command_group_2 c;
+
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_PIONEER_EJECT;
+
+ return cdrom_vendor_specific(tgt, &c, 0, 0, 0, ior);
+}
+
+private scsi_ret_t
+pioneer_position(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_command_group_2 c;
+ scsi_ret_t rc;
+ char *buf = tgt->dev_info.cdrom.result;
+ pioneer_subch_data_t *st;
+
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_PIONEER_READ_SUBCH;
+ c.scsi_cmd_xfer_len_2 = sizeof(pioneer_subch_data_t); /* 9 bytes */
+
+ rc = cdrom_vendor_specific(tgt, &c, 0, 0, sizeof(pioneer_subch_data_t), ior);
+ if (rc != SCSI_RET_SUCCESS) return rc;
+
+ st = (pioneer_subch_data_t *) tgt->cmd_ptr;
+
+ /* XXX can it do ABS addressing e.g. 'logical' ? */
+
+ sprintf(buf, "MSF Position %d %d %d %d %d %d",
+ (integer_t)bcd_to_decimal(st->absolute_address[0]), /* min */
+ (integer_t)bcd_to_decimal(st->absolute_address[1]), /* sec */
+ (integer_t)bcd_to_decimal(st->absolute_address[2]), /* frm */
+ (integer_t)bcd_to_decimal(st->relative_address[0]), /* min */
+ (integer_t)bcd_to_decimal(st->relative_address[1]), /* sec */
+ (integer_t)bcd_to_decimal(st->relative_address[2])); /* frm */
+
+ tgt->dev_info.cdrom.result_available = TRUE;
+ return SCSI_RET_SUCCESS;
+}
+
+private scsi_ret_t
+pioneer_toc(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_command_group_2 c;
+ pioneer_toc_hdr_t *th;
+ pioneer_toc_info_t *t;
+ char *buf = tgt->dev_info.cdrom.result;
+ scsi_ret_t rc;
+ int first, last, i;
+
+ /* Read header first */
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_PIONEER_READ_TOC;
+ c.scsi_cmd_xfer_len_2 = sizeof(pioneer_toc_hdr_t);
+ c.scsi_cmd_ctrl_byte = PIONEER_CTRL_TH;
+
+ rc = cdrom_vendor_specific(tgt, &c, 0, 0, sizeof(pioneer_toc_hdr_t), ior);
+ if (rc != SCSI_RET_SUCCESS) return rc;
+
+ th = (pioneer_toc_hdr_t *)tgt->cmd_ptr;
+ first = bcd_to_decimal(th->first_track);
+ last = bcd_to_decimal(th->last_track);
+
+ /*
+ * "Get TH" wants summary, "TOC MSF|ABS from_track" wants all
+ */
+ if (cmd[0] == 'G') {
+ sprintf(buf, "toc header: %d %d %d", 0, first, last);
+ goto out;
+ }
+
+ /*
+ * Must do it one track at a time
+ */
+ sscanf(&cmd[8], "%d", &i);
+ sprintf(buf, "TOC from track %d:\n", i);
+
+ for ( ; i <= last; i++) {
+ zero_ior(ior);
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_PIONEER_READ_TOC;
+ c.scsi_cmd_lba4 = decimal_to_bcd(i);
+ c.scsi_cmd_xfer_len_2 = sizeof(pioneer_toc_info_t);
+ c.scsi_cmd_ctrl_byte = PIONEER_CTRL_TE;
+
+ rc = cdrom_vendor_specific(tgt, &c, 0, 0, sizeof(pioneer_toc_info_t), ior);
+ if (rc != SCSI_RET_SUCCESS) break;
+
+ t = (pioneer_toc_info_t *)tgt->cmd_ptr;
+
+ buf += strlen(buf);
+ if (cmd[4] == 'M')
+ sprintf(buf, "%d %d %d %d %d %d\n",
+ t->ctrl, 0, i,
+ bcd_to_decimal(t->address[0]),
+ bcd_to_decimal(t->address[1]),
+ bcd_to_decimal(t->address[2]));
+ else
+/* THIS IS WRONG */
+ sprintf(buf, "%d %d %d %d\n",
+ t->ctrl, 0, i,
+ bcd_to_decimal(t->address[0]) * 10000 +
+ bcd_to_decimal(t->address[1]) * 100 +
+ bcd_to_decimal(t->address[2]));
+ }
+ /* To know how long the last track is */
+ zero_ior(ior);
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_PIONEER_READ_TOC;
+ c.scsi_cmd_xfer_len_2 = sizeof(pioneer_toc_info_t);
+ c.scsi_cmd_ctrl_byte = PIONEER_CTRL_LO;
+
+ rc = cdrom_vendor_specific(tgt, &c, 0, 0, sizeof(pioneer_toc_info_t), ior);
+ if (rc != SCSI_RET_SUCCESS) return rc;
+
+ buf += strlen(buf);
+ t = (pioneer_toc_info_t *)tgt->cmd_ptr;
+ if (cmd[4] == 'M')
+ sprintf(buf, "%d %d %d %d %d %d\n",
+ t->ctrl, 0, 0xaa /* User expects this */,
+ bcd_to_decimal(t->address[0]),
+ bcd_to_decimal(t->address[1]),
+ bcd_to_decimal(t->address[2]));
+ else
+/* THIS IS WRONG */
+ sprintf(buf, "%d %d %d %d\n",
+ t->ctrl, 0, 0xaa /* User expects this */,
+ bcd_to_decimal(t->address[0]) * 10000 +
+ bcd_to_decimal(t->address[1]) * 100 +
+ bcd_to_decimal(t->address[2]));
+
+out:
+ tgt->dev_info.cdrom.result_available = TRUE;
+ return SCSI_RET_SUCCESS;
+}
+
+private scsi_ret_t
+pioneer_status(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_command_group_2 c;
+ pioneer_status_t *st;
+ char *buf = tgt->dev_info.cdrom.result;
+ scsi_ret_t rc;
+
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_PIONEER_AUDIO_STATUS;
+ c.scsi_cmd_xfer_len_2 = sizeof(pioneer_status_t); /* 6 bytes */
+
+ rc = cdrom_vendor_specific(tgt, &c, 0, 0, sizeof(pioneer_status_t), ior);
+ if (rc != SCSI_RET_SUCCESS) return rc;
+
+ st = (pioneer_status_t*) tgt->cmd_ptr;
+ decode_status_1( buf, st->audio_status);
+
+ tgt->dev_info.cdrom.result_available = TRUE;
+ return SCSI_RET_SUCCESS;
+}
+
+private scsi_ret_t
+pioneer_play(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_command_group_2 c;
+ int sm, ss, sf, em, es, ef;
+ int st, si, et, ei;
+ scsi_ret_t rc;
+
+ /*
+ * Seek to desired position
+ */
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_PIONEER_SEEK_TRK;
+ /*
+ * Play_msf or Play_ti
+ */
+ if (cmd[5] == 'A') {
+ /* "Play A startM startS startF endM endS endF" */
+
+ sscanf(&cmd[7], "%d %d %d %d %d %d",
+ &sm, &ss, &sf, &em, &es, &ef);
+
+ c.scsi_cmd_lba2 = decimal_to_bcd(sm);
+ c.scsi_cmd_lba3 = decimal_to_bcd(ss);
+ c.scsi_cmd_lba4 = decimal_to_bcd(sf);
+ c.scsi_cmd_ctrl_byte = PIONEER_CTRL_A_MSF;
+
+ } else {
+ /* "Play TI startT startI endT endI" */
+
+ sscanf(&cmd[8], "%d %d %d %d", &st, &si, &et, &ei);
+
+ c.scsi_cmd_lba3 = decimal_to_bcd(st);
+ c.scsi_cmd_lba4 = decimal_to_bcd(si);
+ c.scsi_cmd_ctrl_byte = 0x80; /* Pure speculation!! */
+
+ }
+
+ rc = cdrom_vendor_specific(tgt, &c, 0, 0, 0, ior);
+ if (rc != SCSI_RET_SUCCESS) return rc;
+
+ /*
+ * Now ask it to play until..
+ */
+ zero_ior( ior );
+
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_PIONEER_PLAY_AUDIO;
+ c.scsi_cmd_lun_and_relbit = PIONEER_LR_END_ADDR;
+
+ if (cmd[5] == 'A') {
+ c.scsi_cmd_lba2 = decimal_to_bcd(em);
+ c.scsi_cmd_lba3 = decimal_to_bcd(es);
+ c.scsi_cmd_lba4 = decimal_to_bcd(ef);
+ c.scsi_cmd_ctrl_byte = PIONEER_CTRL_A_MSF;
+ } else {
+ c.scsi_cmd_lba3 = decimal_to_bcd(et);
+ c.scsi_cmd_lba4 = decimal_to_bcd(ei);
+ c.scsi_cmd_ctrl_byte = 0x80; /* Pure speculation! */
+ }
+
+ return cdrom_vendor_specific(tgt, &c, 0, 0, 0, ior);
+}
+
+private scsi_ret_t
+pioneer_pause_resume(
+ target_info_t *tgt,
+ char *cmd,
+ io_req_t ior)
+{
+ scsi_command_group_2 c;
+
+ bzero(&c, sizeof(c));
+ c.scsi_cmd_code = SCSI_CMD_PIONEER_PAUSE;
+ /*
+ * "Resume" or "Stop"
+ */
+ if (cmd[0] == 'S')
+ c.scsi_cmd_lun_and_relbit = PIONEER_LR_PAUSE;
+ else
+ c.scsi_cmd_lun_and_relbit = PIONEER_LR_RESUME;
+
+ return cdrom_vendor_specific(tgt, &c, 0, 0, 0, ior);
+}
+
+ /* DENON DRD-253 */
+
+#define SCSI_CMD_DENON_PLAY_AUDIO 0x22
+#define SCSI_CMD_DENON_EJECT 0xe6
+#define SCSI_CMD_DENON_PAUSE_AUDIO 0xe7
+#define SCSI_CMD_DENON_READ_TOC 0xe9
+#define SCSI_CMD_DENON_READ_SUBCH 0xeb
+
+
+ /* HITACHI 1750 */
+
+#define SCSI_CMD_HITACHI_PLAY_AUDIO_MSF 0xe0
+#define SCSI_CMD_HITACHI_PAUSE_AUDIO 0xe1
+#define SCSI_CMD_HITACHI_EJECT 0xe4
+#define SCSI_CMD_HITACHI_READ_SUBCH 0xe5
+#define SCSI_CMD_HITACHI_READ_TOC 0xe8
+
+#endif
+
+/*
+ * Tabulate all of the above
+ */
+private red_list_t cdrom_exceptions[] = {
+
+#if 0
+ For documentation purposes, here are some SCSI-2 compliant drives:
+
+ Vendor Product Rev Comments
+
+ "SONY " "CD-ROMCDU-541 " "2.6a" The NeXT drive
+#endif
+
+ /* vendor, product, rev */
+ /* can_play_audio */
+ /* eject */
+ /* current_position */
+ /* read_toc */
+ /* get_status */
+ /* play_msf */
+ /* play_ti */
+ /* pause_resume */
+ /* volume_control */
+
+ /* We have seen a "RRD42(C)DEC " "4.5d" */
+ { "DEC ", "RRD42", "",
+ 0, 0, 0, 0, 0, 0, 0, 0, rrd42_set_volume },
+
+ /* We have seen a "CD-ROM DRIVE:84 " "1.0 " */
+ { "NEC ", "CD-ROM DRIVE:84", "",
+ op_not_supported, nec_eject, nec_subchannel, nec_read_toc,
+ nec_subchannel, nec_play, nec_play, nec_pause_resume,
+ op_not_supported },
+
+ /* We have seen a "CD-ROM DRIVE:XM " "3232" */
+ { "TOSHIBA ", "CD-ROM DRIVE:XM", "32",
+ op_not_supported, toshiba_eject, toshiba_subchannel, toshiba_read_toc,
+ toshiba_subchannel, toshiba_play, toshiba_play, toshiba_pause_resume,
+ op_not_supported },
+
+ { "TOSHIBA ", "CD-ROM DRIVE:XM", "33",
+ op_not_supported, toshiba_eject, toshiba_subchannel, toshiba_read_toc,
+ toshiba_subchannel, toshiba_play, toshiba_play, toshiba_pause_resume,
+ op_not_supported },
+
+#if 0
+ { "PIONEER ", "???????DRM-6", "",
+ op_not_supported, pioneer_eject, pioneer_position, pioneer_toc,
+ pioneer_status, pioneer_play, pioneer_play, pioneer_pause_resume,
+ op_not_supported },
+
+ { "DENON ", "DRD 25X", "", ...},
+ { "HITACHI ", "CDR 1750S", "", ...},
+ { "HITACHI ", "CDR 1650S", "", ...},
+ { "HITACHI ", "CDR 3650", "", ...},
+
+#endif
+
+ /* Zero terminate this list */
+ { 0, }
+};
+
+private void
+check_red_list(
+ target_info_t *tgt,
+ scsi2_inquiry_data_t *inq)
+
+{
+ red_list_t *list;
+
+ for (list = &cdrom_exceptions[0]; list->vendor; list++) {
+
+ /*
+ * Prefix-Match all strings
+ */
+ if ((strncmp(list->vendor, (const char *)inq->vendor_id,
+ strlen(list->vendor)) == 0) &&
+ (strncmp(list->product, (const char *)inq->product_id,
+ strlen(list->product)) == 0) &&
+ (strncmp(list->rev, (const char *)inq->product_rev,
+ strlen(list->rev)) == 0)) {
+ /*
+ * One of them..
+ */
+ if (tgt->dev_info.cdrom.violates_standards != list) {
+ tgt->dev_info.cdrom.violates_standards = list;
+ curse_the_vendor( list, TRUE );
+ }
+ return;
+ }
+ }
+}
+#endif /* NSCSI > 0 */
diff --git a/scsi/rz_cpu.c b/scsi/rz_cpu.c
new file mode 100644
index 00000000..77c0683f
--- /dev/null
+++ b/scsi/rz_cpu.c
@@ -0,0 +1,450 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: rz_cpu.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 7/91
+ *
+ * Top layer of the SCSI driver: interface with the MI.
+ * This file contains operations specific to CPU-like devices.
+ *
+ * We handle here the case of simple devices which do not use any
+ * sophisticated host-to-host communication protocol, they look
+ * very much like degenerative cases of TAPE devices.
+ *
+ * For documentation and debugging, we also provide code to act like one.
+ */
+
+#include <mach/std_types.h>
+#include <scsi/compat_30.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_defs.h>
+#include <scsi/rz.h>
+
+#if (NSCSI > 0)
+
+void sccpu_act_as_target(); /* forwards */
+void sccpu_start();
+
+/*
+ * This function decides which 'protocol' we well speak
+ * to a cpu target. For now the decision is left to a
+ * global var. XXXXXXX
+ */
+extern scsi_devsw_t scsi_host;
+scsi_devsw_t *scsi_cpu_protocol = /* later &scsi_host*/
+ &scsi_devsw[SCSI_CPU];
+
+void sccpu_new_initiator(self, initiator)
+ target_info_t *self, *initiator;
+{
+ initiator->dev_ops = scsi_cpu_protocol;
+ if (initiator == self) {
+ self->flags = TGT_DID_SYNCH|TGT_FULLY_PROBED|TGT_ONLINE|
+ TGT_ALIVE|TGT_US;
+ self->dev_info.cpu.req_pending = FALSE;
+ } else {
+ initiator->flags = TGT_ONLINE|TGT_ALIVE;
+ initiator->dev_info.cpu.req_pending = TRUE;
+ }
+}
+
+void sccpu_strategy(ior)
+ register io_req_t ior;
+{
+ void sccpu_start();
+
+ rz_simpleq_strategy(ior, sccpu_start);
+}
+
+void sccpu_start(tgt, done)
+ target_info_t *tgt;
+ boolean_t done;
+{
+ io_req_t head, ior;
+ scsi_ret_t ret;
+
+ /* this is to the doc & debug code mentioned in the beginning */
+ if (!done && tgt->dev_info.cpu.req_pending) {
+ panic("sccpu_act_as_target called");
+#if 0
+ sccpu_act_as_target( tgt);
+#endif
+ return;
+ }
+
+ ior = tgt->ior;
+ if (ior == 0)
+ return;
+
+ if (done) {
+
+ /* see if we must retry */
+ if ((tgt->done == SCSI_RET_RETRY) &&
+ ((ior->io_op & IO_INTERNAL) == 0)) {
+ delay(1000000);/*XXX*/
+ goto start;
+ } else
+ /* got a bus reset ? shouldn't matter */
+ if ((tgt->done == (SCSI_RET_ABORTED|SCSI_RET_RETRY)) &&
+ ((ior->io_op & IO_INTERNAL) == 0)) {
+ goto start;
+ } else
+
+ /* check completion status */
+
+ if (tgt->cur_cmd == SCSI_CMD_REQUEST_SENSE) {
+ scsi_sense_data_t *sns;
+
+ ior->io_op = ior->io_temporary;
+ ior->io_error = D_IO_ERROR;
+ ior->io_op |= IO_ERROR;
+
+ sns = (scsi_sense_data_t *)tgt->cmd_ptr;
+ if (scsi_debug)
+ scsi_print_sense_data(sns);
+
+ if (scsi_check_sense_data(tgt, sns)) {
+ if (sns->u.xtended.ili) {
+ if (ior->io_op & IO_READ) {
+ int residue;
+
+ residue = sns->u.xtended.info0 << 24 |
+ sns->u.xtended.info1 << 16 |
+ sns->u.xtended.info2 << 8 |
+ sns->u.xtended.info3;
+ if (scsi_debug)
+ printf("Cpu Short Read (%d)\n", residue);
+ /*
+ * NOTE: residue == requested - actual
+ * We only care if > 0
+ */
+ if (residue < 0) residue = 0;/* sanity */
+ ior->io_residual += residue;
+ ior->io_error = 0;
+ ior->io_op &= ~IO_ERROR;
+ /* goto ok */
+ }
+ }
+ }
+ }
+
+ else if (tgt->done != SCSI_RET_SUCCESS) {
+
+ if (tgt->done == SCSI_RET_NEED_SENSE) {
+
+ ior->io_temporary = ior->io_op;
+ ior->io_op = IO_INTERNAL;
+ if (scsi_debug)
+ printf("[NeedSns x%x x%x]", ior->io_residual, ior->io_count);
+ scsi_request_sense(tgt, ior, 0);
+ return;
+
+ } else if (tgt->done == SCSI_RET_RETRY) {
+ /* only retry here READs and WRITEs */
+ if ((ior->io_op & IO_INTERNAL) == 0) {
+ ior->io_residual = 0;
+ goto start;
+ } else{
+ ior->io_error = D_WOULD_BLOCK;
+ ior->io_op |= IO_ERROR;
+ }
+ } else {
+ ior->io_error = D_IO_ERROR;
+ ior->io_op |= IO_ERROR;
+ }
+ }
+
+ if (scsi_debug)
+ printf("[Resid x%x]", ior->io_residual);
+
+ /* dequeue next one */
+ head = ior;
+
+ simple_lock(&tgt->target_lock);
+ ior = head->io_next;
+ tgt->ior = ior;
+ if (ior)
+ ior->io_prev = head->io_prev;
+ simple_unlock(&tgt->target_lock);
+
+ iodone(head);
+
+ if (ior == 0)
+ return;
+ }
+ ior->io_residual = 0;
+start:
+ if (ior->io_op & IO_READ) {
+ ret = scsi_receive( tgt, ior );
+ } else if ((ior->io_op & IO_INTERNAL) == 0) {
+ ret = scsi_send( tgt, ior );
+ }
+}
+
+
+#if 0
+/* XX turned off this code because it's impossible
+ to reference 'end' and other such magic symbols
+ from boot modules. */
+/*
+ * This is a simple code to make us act as a dumb
+ * processor type. Use for debugging only.
+ */
+static struct io_req sccpu_ior;
+vm_offset_t sccpu_buffer; /* set this with debugger */
+
+void sccpu_act_as_target(self)
+ target_info_t *self;
+{
+ static char inq_data[] = "\3\0\1\0\040\0\0\0Mach3.0 Processor Link v0.1";
+ static char sns_data[] = "\160\0\0\0\0\0\0\0\0";
+
+ self->dev_info.cpu.req_pending = FALSE;
+ sccpu_ior.io_next = 0;
+#define MAXSIZE 1024*64
+ sccpu_ior.io_count = (MAXSIZE < self->dev_info.cpu.req_len) ?
+ MAXSIZE : self->dev_info.cpu.req_len;
+
+ switch (self->dev_info.cpu.req_cmd) {
+ case SCSI_CMD_INQUIRY:
+ sccpu_ior.io_data = inq_data; break;
+ case SCSI_CMD_REQUEST_SENSE:
+ sccpu_ior.io_data = sns_data; break;
+ default:
+ if (sccpu_buffer == 0) {
+ /* ( read my lips :-) */
+ /* extern char end[]; */
+ sccpu_buffer = trunc_page(kalloc(MAXSIZE));
+ }
+ sccpu_ior.io_data = (char*)sccpu_buffer; break;
+ }
+
+ if (self->dev_info.cpu.req_cmd == SCSI_CMD_SEND) {
+ self->cur_cmd = SCSI_CMD_READ;
+ sccpu_ior.io_op = IO_READ;
+ } else {
+ self->cur_cmd = SCSI_CMD_WRITE;
+ sccpu_ior.io_op = IO_WRITE;
+ }
+ self->ior = &sccpu_ior;
+}
+#endif
+
+/*#define PERF*/
+#ifdef PERF
+int test_read_size = 512;
+int test_read_nreads = 1000;
+int test_read_bdev = 0;
+int test_read_or_write = 1;
+
+#include <sys/time.h>
+#include <machine/machspl.h> /* spl */
+
+test_read(max)
+{
+ int i, ssk, usecs;
+ struct timeval start, stop;
+ spl_t s;
+
+ if (max != 0)
+ test_read_nreads = max;
+
+ s = spl0();
+ start = time;
+ if (test_read_or_write) read_test(); else write_test();
+ stop = time;
+ splx(s);
+
+ usecs = stop.tv_usec - start.tv_usec;
+ if (usecs < 0) {
+ stop.tv_sec -= 1;
+ usecs += 1000000;
+ }
+ printf("Size %d count %d time %3d sec %d us\n",
+ test_read_size, test_read_nreads,
+ stop.tv_sec - start.tv_sec, usecs);
+}
+
+read_test()
+{
+ struct io_req io, io1;
+ register int i;
+
+ bzero(&io, sizeof(io));
+ io.io_unit = test_read_bdev;
+ io.io_op = IO_READ;
+ io.io_count = test_read_size;
+ io.io_data = (char*)sccpu_buffer;
+ io1 = io;
+
+ sccpu_strategy(&io);
+ for (i = 1; i < test_read_nreads; i += 2) {
+ io1.io_op = IO_READ;
+ sccpu_strategy(&io1);
+ iowait(&io);
+ io.io_op = IO_READ;
+ sccpu_strategy(&io);
+ iowait(&io1);
+ }
+ iowait(&io);
+}
+
+write_test()
+{
+ struct io_req io, io1;
+ register int i;
+
+ bzero(&io, sizeof(io));
+ io.io_unit = test_read_bdev;
+ io.io_op = IO_WRITE;
+ io.io_count = test_read_size;
+ io.io_data = (char*)sccpu_buffer;
+ io1 = io;
+
+ sccpu_strategy(&io);
+ for (i = 1; i < test_read_nreads; i += 2) {
+ io1.io_op = IO_WRITE;
+ sccpu_strategy(&io1);
+ iowait(&io);
+ io.io_op = IO_WRITE;
+ sccpu_strategy(&io);
+ iowait(&io1);
+ }
+ iowait(&io);
+}
+
+tur_test()
+{
+ struct io_req io;
+ register int i;
+ char *a;
+ struct timeval start, stop;
+ spl_t s;
+ target_info_t *tgt;
+
+ bzero(&io, sizeof(io));
+ io.io_unit = test_read_bdev;
+ io.io_data = (char*)&io;/*unused but kernel space*/
+
+ rz_check(io.io_unit, &a, &tgt);
+ s = spl0();
+ start = time;
+ for (i = 0; i < test_read_nreads; i++) {
+ io.io_op = IO_INTERNAL;
+ scsi_test_unit_ready(tgt,&io);
+ }
+ stop = time;
+ splx(s);
+ i = stop.tv_usec - start.tv_usec;
+ if (i < 0) {
+ stop.tv_sec -= 1;
+ i += 1000000;
+ }
+ printf("%d test-unit-ready took %3d sec %d us\n",
+ test_read_nreads,
+ stop.tv_sec - start.tv_sec, i);
+}
+
+/*#define MEM_PERF*/
+#ifdef MEM_PERF
+int mem_read_size = 1024; /* ints! */
+int mem_read_nreads = 1000;
+volatile int *mem_read_address = (volatile int*)0xb0080000;
+volatile int *mem_write_address = (volatile int*)0xb0081000;
+
+mem_test(max, which)
+{
+ int i, ssk, usecs;
+ struct timeval start, stop;
+ int (*fun)(), mwrite_test(), mread_test(), mcopy_test();
+ spl_t s;
+
+ if (max == 0)
+ max = mem_read_nreads;
+
+ switch (which) {
+ case 1: fun = mwrite_test; break;
+ case 2: fun = mcopy_test; break;
+ default:fun = mread_test; break;
+ }
+
+ s = spl0();
+ start = time;
+ for (i = 0; i < max; i++)
+ (*fun)(mem_read_size);
+ stop = time;
+ splx(s);
+
+ usecs = stop.tv_usec - start.tv_usec;
+ if (usecs < 0) {
+ stop.tv_sec -= 1;
+ usecs += 1000000;
+ }
+ printf("Size %d count %d time %3d sec %d us\n",
+ mem_read_size*4, max,
+ stop.tv_sec - start.tv_sec, usecs);
+}
+
+mread_test(max)
+ register int max;
+{
+ register int i;
+ register volatile int *addr = mem_read_address;
+
+ for (i = 0; i < max; i++) {
+ register int j = *addr++;
+ }
+}
+mwrite_test(max)
+ register int max;
+{
+ register int i;
+ register volatile int *addr = mem_read_address;
+
+ for (i = 0; i < max; i++) {
+ *addr++ = i;
+ }
+}
+
+mcopy_test(max)
+ register int max;
+{
+ register volatile int *from = mem_read_address;
+ register volatile int *to = mem_write_address;
+ register volatile int *endaddr;
+
+ endaddr = to + max;
+ while (to < endaddr)
+ *to++ = *from++;
+
+}
+#endif /*MEM_PERF*/
+
+#endif /*PERF*/
+
+#endif /* NSCSI > 0 */
diff --git a/scsi/rz_disk.c b/scsi/rz_disk.c
new file mode 100644
index 00000000..87a992b4
--- /dev/null
+++ b/scsi/rz_disk.c
@@ -0,0 +1,1222 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: rz_disk.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Top layer of the SCSI driver: interface with the MI.
+ * This file contains operations specific to disk-like devices.
+ *
+ * Modified by Kevin T. Van Maren to use a common partition code
+ * with the ide driver, and support 'slices'.
+ */
+
+
+#include <scsi/scsi.h>
+#if (NSCSI > 0)
+
+#include <device/buf.h>
+#include <device/disk_status.h>
+#include <device/device_types.h>
+#include <device/param.h>
+#include <device/errno.h>
+
+#include <kern/time_out.h>
+#include <machine/machspl.h> /* spl definitions */
+#include <mach/std_types.h>
+#include <platforms.h>
+
+#include <scsi/compat_30.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_defs.h>
+#include <scsi/rz.h>
+#include <scsi/rz_labels.h>
+
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+#include <sys/kernel.h> /* for hz */
+#endif /*MACH_KERNEL*/
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include "vm_param.h"
+#include <vm/vm_kern.h>
+#include <vm/pmap.h>
+
+extern void scdisk_read(), scdisk_write(),
+ scsi_long_read(), scsi_long_write();
+
+void scdisk_start(); /* forwards */
+void scdisk_start_rw();
+unsigned dkcksum();
+
+#if 0
+struct diskpart scsi_array[8*64];
+#endif 0
+
+
+/* THIS IS THE BOTTOM LAYER FOR THE SCSI PARTITION CODE */
+typedef struct scsi_driver_info {
+ target_info_t *tgt;
+ io_req_t ior;
+ void (*readfun)();
+ int sectorsize;
+} scsi_driver_info;
+
+int scsi_read_fun(scsi_driver_info *param, int sectornum, void *buff)
+{
+ char *psave;
+ int result = TRUE; /* SUCCESS */
+ psave=param->ior->io_data; /* is this necessary ? */
+
+ param->ior->io_data=buff;
+ param->ior->io_count = param->sectorsize;
+ param->ior->io_op = IO_READ;
+ param->ior->io_error = 0;
+ param->tgt->ior = param->ior;
+
+ (*param->readfun)( param->tgt, sectornum, param->ior);
+ iowait(param->ior);
+
+ param->ior->io_data=psave; /* restore the io_data pointer ?? */
+ return(result);
+}
+
+
+
+/*
+ * Specialized side of the open routine for disks
+ */
+scsi_ret_t scdisk_open(tgt, req)
+ target_info_t *tgt;
+ io_req_t req;
+{
+ register int i, dev_bsize;
+ scsi_ret_t ret = /* SCSI_RET_SUCCESS; */ -1;
+ unsigned int disk_size, secs_per_cyl, sector_size;
+ scsi_rcap_data_t *cap;
+ struct disklabel *label;
+ io_req_t ior;
+ void (*readfun)() = scdisk_read;
+ char *data = (char *)0;
+
+ int numpart;
+
+ scsi_driver_info scsi_info;
+ char drive_name[10]; /* used for disklabel strings */
+
+ if (tgt->flags & TGT_ONLINE)
+ return SCSI_RET_SUCCESS;
+
+ /*
+ * Dummy ior for proper sync purposes
+ */
+ io_req_alloc(ior,0);
+ ior->io_next = 0;
+ ior->io_count = 0;
+
+ /*
+ * Set the LBN to DEV_BSIZE with a MODE SELECT.
+ * If this fails we try a couple other tricks.
+ */
+ dev_bsize = 0;
+ for (i = 0; i < 5; i++) {
+ ior->io_op = IO_INTERNAL;
+ ior->io_error = 0;
+ ret = scdisk_mode_select(tgt, DEV_BSIZE, ior, 0, 0, 0);
+ if (ret == SCSI_RET_SUCCESS) {
+ dev_bsize = DEV_BSIZE;
+ break;
+ }
+ if (ret == SCSI_RET_RETRY) {
+ timeout(wakeup, tgt, 2*hz);
+ await(tgt);
+ }
+ if (ret == SCSI_RET_DEVICE_DOWN)
+ goto done;
+ }
+#if 0
+ if (ret != SCSI_RET_SUCCESS) {
+ scsi_error( tgt, SCSI_ERR_MSEL, ret, 0);
+ ret = D_INVALID_SIZE;
+ goto done;
+ }
+#endif
+ /*
+ * Do a READ CAPACITY to get max size. Check LBN too.
+ */
+ for (i = 0; i < 5; i++) {
+ ior->io_op = IO_INTERNAL;
+ ior->io_error = 0;
+ ret = scsi_read_capacity(tgt, 0, ior);
+ if (ret == SCSI_RET_SUCCESS)
+ break;
+ }
+ if (ret == SCSI_RET_SUCCESS) {
+ int val;
+
+ cap = (scsi_rcap_data_t*) tgt->cmd_ptr;
+ disk_size = (cap->lba1<<24) |
+ (cap->lba2<<16) |
+ (cap->lba3<< 8) |
+ cap->lba4;
+ if (scsi_debug)
+ printf("rz%d holds %d blocks\n", tgt->unit_no, disk_size);
+ val = (cap->blen1<<24) |
+ (cap->blen2<<16) |
+ (cap->blen3<<8 ) |
+ cap->blen4;
+ if (dev_bsize == 0)
+ dev_bsize = val;
+ else
+ if (val != dev_bsize) panic("read capacity bad");
+
+ if (disk_size > SCSI_CMD_READ_MAX_LBA)
+ tgt->flags |= TGT_BIG;
+
+ } else {
+ printf("Unknown disk capacity??\n");
+ disk_size = -1;
+ }
+ /*
+ * Mandatory long-form commands ?
+ */
+ if (BGET(scsi_use_long_form,(unsigned char)tgt->masterno,tgt->target_id))
+ tgt->flags |= TGT_BIG;
+ if (tgt->flags & TGT_BIG)
+ readfun = scsi_long_read;
+
+ /*
+ * Some CDROMS truly dislike 512 as LBN.
+ * Use a MODE_SENSE to cover for this case.
+ */
+ if (dev_bsize == 0) {
+ scsi_mode_sense_data_t *m;
+
+ ior->io_op = IO_INTERNAL;
+ ior->io_error = 0;
+ ret = scsi_mode_sense(tgt, 0/*pagecode*/, 32/*?*/, ior);
+ if (ret == SCSI_RET_SUCCESS) {
+ m = (scsi_mode_sense_data_t *) tgt->cmd_ptr;
+ dev_bsize =
+ m->bdesc[0].blen_msb << 16 |
+ m->bdesc[0].blen << 8 |
+ m->bdesc[0].blen_lsb;
+ }
+ }
+
+ /*
+ * Find out about the phys disk geometry -- scsi specific
+ */
+
+ ior->io_op = IO_INTERNAL;
+ ior->io_error = 0;
+ ret = scsi_read_capacity( tgt, 1, ior);
+ if (ret == SCSI_RET_SUCCESS) {
+ cap = (scsi_rcap_data_t*) tgt->cmd_ptr;
+ secs_per_cyl = (cap->lba1<<24) | (cap->lba2<<16) |
+ (cap->lba3<< 8) | cap->lba4;
+ secs_per_cyl += 1;
+ sector_size = (cap->blen1<<24) | (cap->blen2<<16) |
+ (cap->blen3<<8 ) | cap->blen4;
+ } else {
+ sector_size = dev_bsize ? dev_bsize : DEV_BSIZE;
+ secs_per_cyl = disk_size;
+ }
+ if (dev_bsize == 0)
+ dev_bsize = sector_size;
+
+ if (scsi_debug)
+ printf("rz%d: %d sect/cyl %d bytes/sec\n", tgt->unit_no,
+ secs_per_cyl, sector_size);
+
+ /*
+ * At this point, one way or other, we are committed to
+ * a given disk capacity and sector size.
+ */
+ tgt->block_size = dev_bsize;
+
+ /*
+ * Get partition table off pack
+ */
+
+#ifdef MACH_KERNEL
+ ior->io_data = (char *)kalloc(sector_size);
+#endif /*MACH_KERNEL*/
+
+ scsi_info.tgt=tgt;
+ scsi_info.ior=ior;
+ scsi_info.readfun=readfun;
+ scsi_info.sectorsize=sector_size;
+
+ /* label has NOT been allocated space yet! set to the tgt disklabel */
+ label=&scsi_info.tgt->dev_info.disk.l;
+
+ sprintf(drive_name, "sd%d:", tgt->unit_no);
+
+ if (scsi_debug)
+ printf("Using bogus geometry: 32 sectors/track, 64 heads\n");
+
+ fudge_bsd_label(label, DTYPE_SCSI, disk_size /* /(32*64)*/ ,
+ 64, 32, sector_size, 8);
+
+ numpart=get_only_partition(&scsi_info, (*scsi_read_fun),
+ tgt->dev_info.disk.scsi_array, MAX_SCSI_PARTS, disk_size, drive_name);
+
+ printf("%s %d partitions found\n",drive_name,numpart);
+
+ ret=SCSI_RET_SUCCESS; /* if 0, return SCSI_RET_SUCCESS */
+
+
+done:
+ io_req_free(ior);
+
+ return(ret);
+}
+
+
+/*
+ * Disk strategy routine
+ */
+int scdisk_strategy(ior)
+ register io_req_t ior;
+{
+ target_info_t *tgt;
+ register scsi_softc_t *sc;
+ register int i = ior->io_unit, part;
+ register unsigned rec, max;
+ spl_t s;
+ struct diskpart *label;
+
+ sc = scsi_softc[rzcontroller(i)];
+ tgt = sc->target[rzslave(i)];
+ part = rzpartition(i);
+
+ /*
+ * Validate request
+ */
+
+ /* readonly ? */
+ if ((tgt->flags & TGT_READONLY) &&
+ (ior->io_op & (IO_READ|IO_INTERNAL) == 0)) {
+ ior->io_error = D_READ_ONLY;
+ ior->io_op |= IO_ERROR;
+ ior->io_residual = ior->io_count;
+ iodone(ior);
+ return ior->io_error;
+ }
+
+ rec = ior->io_recnum;
+
+ label=lookup_part(tgt->dev_info.disk.scsi_array, part);
+ if (!label) {
+ if (scsi_debug)
+ printf("sc strategy -- bad partition\n");
+ ior->io_error = D_INVALID_SIZE;
+ ior->io_op |= IO_ERROR;
+ ior->io_residual = ior->io_count;
+ iodone(ior);
+ return ior->io_error;
+ }
+ else max=label->size;
+ if (max == -1) /* what about 0? */
+ max = tgt->dev_info.disk.l.d_secperunit -
+
+ label->start;
+
+ i = (ior->io_count + tgt->block_size - 1) / tgt->block_size;
+ if (((rec + i) > max) || (ior->io_count < 0) ||
+#if later
+ ((rec <= LABELSECTOR) && ((tgt->flags & TGT_WRITE_LABEL) == 0))
+#else
+ FALSE
+#endif
+ ) {
+ ior->io_error = D_INVALID_SIZE;
+ ior->io_op |= IO_ERROR;
+ ior->io_residual = ior->io_count;
+ iodone(ior);
+ return ior->io_error;
+ }
+ /*
+ * Find location on disk: secno and cyl (for disksort)
+ */
+ rec += label->start;
+ ior->io_residual = rec / tgt->dev_info.disk.l.d_secpercyl;
+
+ /*
+ * Enqueue operation
+ */
+ s = splbio();
+ simple_lock(&tgt->target_lock);
+ if (tgt->ior) {
+ disksort(tgt->ior, ior);
+ simple_unlock(&tgt->target_lock);
+ splx(s);
+ } else {
+ ior->io_next = 0;
+ tgt->ior = ior;
+ simple_unlock(&tgt->target_lock);
+ splx(s);
+
+ scdisk_start(tgt,FALSE);
+ }
+
+ return D_SUCCESS;
+}
+
+/*#define CHECKSUM*/
+#ifdef CHECKSUM
+int max_checksum_size = 0x2000;
+#endif CHECKSUM
+
+/*
+ * Start/completion routine for disks
+ */
+void scdisk_start(tgt, done)
+ target_info_t *tgt;
+ boolean_t done;
+{
+ register io_req_t ior = tgt->ior;
+ register unsigned part;
+#ifdef CHECKSUM
+ register unsigned secno;
+#endif
+ struct diskpart *label;
+
+ if (ior == 0)
+ return;
+
+ if (tgt->flags & TGT_BBR_ACTIVE)
+ {
+ scdisk_bbr_start(tgt, done);
+ return;
+ }
+
+ if (done) {
+ register unsigned int xferred;
+ unsigned int max_dma_data;
+
+ max_dma_data = scsi_softc[(unsigned char)tgt->masterno]->max_dma_data;
+ /* see if we must retry */
+ if ((tgt->done == SCSI_RET_RETRY) &&
+ ((ior->io_op & IO_INTERNAL) == 0)) {
+ delay(1000000);/*XXX*/
+ goto start;
+ } else
+ /* got a bus reset ? pifff.. */
+ if ((tgt->done == (SCSI_RET_ABORTED|SCSI_RET_RETRY)) &&
+ ((ior->io_op & IO_INTERNAL) == 0)) {
+ if (xferred = ior->io_residual) {
+ ior->io_data -= xferred;
+ ior->io_count += xferred;
+ ior->io_recnum -= xferred / tgt->block_size;
+ ior->io_residual = 0;
+ }
+ goto start;
+ } else
+ /*
+ * Quickly check for errors: if anything goes wrong
+ * we do a request sense, see if that is what we did.
+ */
+ if (tgt->cur_cmd == SCSI_CMD_REQUEST_SENSE) {
+ scsi_sense_data_t *sns;
+ unsigned int blockno;
+ char *outcome;
+
+ ior->io_op = ior->io_temporary;
+
+ sns = (scsi_sense_data_t *)tgt->cmd_ptr;
+ if (sns->addr_valid)
+ blockno = sns->u.xtended.info0 << 24 |
+ sns->u.xtended.info1 << 16 |
+ sns->u.xtended.info2 << 8 |
+ sns->u.xtended.info3;
+ else {
+ part = rzpartition(ior->io_unit);
+ label = lookup_part(tgt->dev_info.disk.scsi_array, part);
+ blockno = label->start;
+ blockno += ior->io_recnum;
+ if (!label) blockno=-1;
+ }
+
+ if (scsi_check_sense_data(tgt, sns)) {
+ ior->io_error = 0;
+ if ((tgt->done == SCSI_RET_RETRY) &&
+ ((ior->io_op & IO_INTERNAL) == 0)) {
+ delay(1000000);/*XXX*/
+ goto start;
+ }
+ outcome = "Recovered";
+ } else {
+ outcome = "Unrecoverable";
+ ior->io_error = D_IO_ERROR;
+ ior->io_op |= IO_ERROR;
+ }
+ if ((tgt->flags & TGT_OPTIONAL_CMD) == 0) {
+ printf("%s Error, rz%d: %s%s%d\n", outcome,
+ tgt->target_id + (tgt->masterno * 8),
+ (ior->io_op & IO_READ) ? "Read" :
+ ((ior->io_op & IO_INTERNAL) ? "(command)" : "Write"),
+ " disk error, phys block no. ", blockno);
+
+ scsi_print_sense_data(sns);
+
+ /*
+ * On fatal read/write errors try replacing the bad block
+ * The bbr routine will return TRUE iff it took control
+ * over the target for all subsequent operations. In this
+ * event, the queue of requests is effectively frozen.
+ */
+ if (ior->io_error &&
+ ((sns->error_class == SCSI_SNS_XTENDED_SENSE_DATA) &&
+ ((sns->u.xtended.sense_key == SCSI_SNS_HW_ERR) ||
+ (sns->u.xtended.sense_key == SCSI_SNS_MEDIUM_ERR))) &&
+ scdisk_bad_block_repl(tgt, blockno))
+ return;
+ }
+ }
+
+ /*
+ * See if we had errors
+ */
+ else if (tgt->done != SCSI_RET_SUCCESS) {
+
+ if (tgt->done == SCSI_RET_NEED_SENSE) {
+
+ ior->io_temporary = ior->io_op;
+ ior->io_op = IO_INTERNAL;
+ scsi_request_sense(tgt, ior, 0);
+ return;
+
+ } else if (tgt->done == SCSI_RET_DEVICE_DOWN) {
+ ior->io_error = D_DEVICE_DOWN;
+ ior->io_op |= IO_ERROR;
+ } else {
+ printf("%s%x\n", "?rz_disk Disk error, ret=x", tgt->done);
+ ior->io_error = D_IO_ERROR;
+ ior->io_op |= IO_ERROR;
+ }
+ }
+ /*
+ * No errors.
+ * See if we requested more than the max
+ * (We use io_residual in a flip-side way here)
+ */
+ else if (ior->io_count > (xferred = max_dma_data)) {
+ ior->io_residual += xferred;
+ ior->io_count -= xferred;
+ ior->io_data += xferred;
+ ior->io_recnum += xferred / tgt->block_size;
+ goto start;
+ }
+ else if (xferred = ior->io_residual) {
+ ior->io_data -= xferred;
+ ior->io_count += xferred;
+ ior->io_recnum -= xferred / tgt->block_size;
+ ior->io_residual = 0;
+ } /* that's it */
+
+#ifdef CHECKSUM
+ if ((ior->io_op & IO_READ) && (ior->io_count < max_checksum_size)) {
+ part = rzpartition(ior->io_unit);
+ label=lookup_part(tgt->dev_info.disk.scsi_array, part);
+ if (!label) printf("NOT FOUND!\n");
+ secno = ior->io_recnum + label->start;
+ scdisk_bcheck(secno, ior->io_data, ior->io_count);
+ }
+#endif CHECKSUM
+
+ /* dequeue next one */
+ {
+ io_req_t next;
+
+ simple_lock(&tgt->target_lock);
+ next = ior->io_next;
+ tgt->ior = next;
+ simple_unlock(&tgt->target_lock);
+
+ iodone(ior);
+ if (next == 0)
+ return;
+
+ ior = next;
+ }
+
+#ifdef CHECKSUM
+ if (((ior->io_op & IO_READ) == 0) && (ior->io_count < max_checksum_size)) {
+ part = rzpartition(ior->io_unit);
+ label=lookup_part(tgt->dev_info.disk.scsi_array, part);
+ secno = ior->io_recnum + label->start;
+ scdisk_checksum(secno, ior->io_data, ior->io_count);
+ }
+#endif CHECKSUM
+ }
+ ior->io_residual = 0;
+start:
+ scdisk_start_rw( tgt, ior);
+}
+
+void scdisk_start_rw( tgt, ior)
+ target_info_t *tgt;
+ register io_req_t ior;
+{
+ unsigned int part, secno;
+ register boolean_t long_form;
+ struct diskpart *label;
+
+ part = rzpartition(ior->io_unit);
+ label=lookup_part(tgt->dev_info.disk.scsi_array, part);
+ if (!label)
+ printf("NOT FOUND!\n");
+ secno = ior->io_recnum + label->start;
+
+ /* Use long form if either big block addresses or
+ the size is more than we can fit in one byte */
+ long_form = (tgt->flags & TGT_BIG) ||
+ (ior->io_count > (256 * tgt->block_size));
+ if (ior->io_op & IO_READ)
+ (long_form ? scsi_long_read : scdisk_read)(tgt, secno, ior);
+ else if ((ior->io_op & IO_INTERNAL) == 0)
+ (long_form ? scsi_long_write : scdisk_write)(tgt, secno, ior);
+}
+
+#include <sys/ioctl.h>
+#ifdef ULTRIX_COMPAT
+#include <mips/PMAX/rzdisk.h>
+#endif /*ULTRIX_COMPAT*/
+
+io_return_t
+scdisk_get_status(dev, tgt, flavor, status, status_count)
+ int dev;
+ target_info_t *tgt;
+ dev_flavor_t flavor;
+ dev_status_t status;
+ natural_t *status_count;
+{
+ struct disklabel *lp;
+ struct diskpart *label;
+
+ lp = &tgt->dev_info.disk.l;
+
+ switch (flavor) {
+#ifdef MACH_KERNEL
+ case DEV_GET_SIZE:
+
+ label=lookup_part(tgt->dev_info.disk.scsi_array, rzpartition(dev));
+ status[DEV_GET_SIZE_DEVICE_SIZE] = label->size * lp->d_secsize;
+ status[DEV_GET_SIZE_RECORD_SIZE] = tgt->block_size;
+ *status_count = DEV_GET_SIZE_COUNT;
+ break;
+#endif
+
+ case DIOCGDINFO:
+ *(struct disklabel *)status = *lp;
+#ifdef MACH_KERNEL
+ *status_count = sizeof(struct disklabel)/sizeof(int);
+#endif MACH_KERNEL
+ break;
+
+ case DIOCGDINFO - (0x10<<16):
+ *(struct disklabel *)status = *lp;
+#ifdef MACH_KERNEL
+ *status_count = sizeof(struct disklabel)/sizeof(int) - 4;
+#endif MACH_KERNEL
+ break;
+
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+#if ULTRIX_COMPAT
+ case SCSI_MODE_SENSE: /*_IOWR(p, 9, struct mode_sel_sns_params) */
+ break;
+ case DIOCGETPT: /*_IOR(p, 1, struct pt) */
+ case SCSI_GET_SENSE: /*_IOR(p, 10, struct extended_sense) */
+ return ul_disk_ioctl(tgt, flavor, status, status_count);
+#endif /*ULTRIX_COMPAT*/
+#endif /*!MACH_KERNEL*/
+
+#if 0
+ case DIOCRFORMAT:
+ break;
+#endif
+ default:
+#ifdef i386
+ return(scsi_i386_get_status(dev, tgt, flavor, status, status_count));
+#else i386
+ return(D_INVALID_OPERATION);
+#endif i386
+ }
+ return D_SUCCESS;
+}
+
+io_return_t
+scdisk_set_status(dev, tgt, flavor, status, status_count)
+ int dev;
+ target_info_t *tgt;
+ dev_flavor_t flavor;
+ dev_status_t status;
+ natural_t status_count;
+{
+ io_return_t error = D_SUCCESS;
+ struct disklabel *lp;
+
+ lp = &tgt->dev_info.disk.l;
+
+
+ switch (flavor) {
+ case DIOCSRETRIES:
+#ifdef MACH_KERNEL
+ if (status_count != sizeof(int))
+ return D_INVALID_SIZE;
+#endif /* MACH_KERNEL */
+ scsi_bbr_retries = *(int *)status;
+ break;
+
+ case DIOCWLABEL:
+ case DIOCWLABEL - (0x10<<16):
+ if (*(int*)status)
+ tgt->flags |= TGT_WRITE_LABEL;
+ else
+ tgt->flags &= ~TGT_WRITE_LABEL;
+ break;
+ case DIOCSDINFO:
+ case DIOCSDINFO - (0x10<<16):
+ case DIOCWDINFO:
+ case DIOCWDINFO - (0x10<<16):
+#ifdef MACH_KERNEL
+ if (status_count != sizeof(struct disklabel) / sizeof(int))
+ return D_INVALID_SIZE;
+#endif /* MACH_KERNEL */
+ error = setdisklabel(lp, (struct disklabel*) status);
+ if (error || (flavor == DIOCSDINFO) || (flavor == DIOCSDINFO - (0x10<<16)))
+ return error;
+ error = scdisk_writelabel(tgt);
+ break;
+
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+#if ULTRIX_COMPAT
+ case SCSI_FORMAT_UNIT: /*_IOW(p, 4, struct format_params) */
+ case SCSI_REASSIGN_BLOCK: /*_IOW(p, 5, struct reassign_params) */
+ case SCSI_READ_DEFECT_DATA: /*_IOW(p, 6, struct read_defect_params) */
+ case SCSI_VERIFY_DATA: /*_IOW(p, 7, struct verify_params) */
+ case SCSI_MODE_SELECT: /*_IOW(p, 8, struct mode_sel_sns_params) */
+ case SCSI_MODE_SENSE: /*_IOW(p, 9, struct mode_sel_sns_params) */
+ case SCSI_GET_INQUIRY_DATA: /*_IOW(p, 11, struct inquiry_info) */
+ return ul_disk_ioctl(tgt, flavor, status, status_count);
+#endif /*ULTRIX_COMPAT*/
+#endif /*!MACH_KERNEL*/
+
+#if notyet
+ case DIOCWFORMAT:
+ case DIOCSBAD: /* ?? how ? */
+#endif
+ default:
+#ifdef i386
+ error = scsi_i386_set_status(dev, tgt, flavor, status, status_count);
+#else i386
+ error = D_INVALID_OPERATION;
+#endif i386
+ }
+ return error;
+}
+
+static int grab_it(tgt, ior)
+ target_info_t *tgt;
+ io_req_t ior;
+{
+ spl_t s;
+
+ s = splbio();
+ simple_lock(&tgt->target_lock);
+ if (!tgt->ior)
+ tgt->ior = ior;
+ simple_unlock(&tgt->target_lock);
+ splx(s);
+
+ if (tgt->ior != ior)
+ return D_ALREADY_OPEN;
+ return D_SUCCESS;
+}
+
+/* Write back a label to the disk */
+io_return_t scdisk_writelabel(tgt)
+ target_info_t *tgt;
+{
+
+printf("scdisk_writelabel: NO LONGER IMPLEMENTED\n");
+#if 0
+/* Taken out at Bryan's suggestion until 'fixed' for slices */
+
+ io_req_t ior;
+ char *data = (char *)0;
+ struct disklabel *label;
+ io_return_t error;
+ int dev_bsize = tgt->block_size;
+
+ io_req_alloc(ior,0);
+#ifdef MACH_KERNEL
+ data = (char *)kalloc(dev_bsize);
+#else /*MACH_KERNEL*/
+ data = (char *)ior->io_data;
+#endif /*MACH_KERNEL*/
+ ior->io_next = 0;
+ ior->io_prev = 0;
+ ior->io_data = data;
+ ior->io_count = dev_bsize;
+ ior->io_op = IO_READ;
+ ior->io_error = 0;
+
+ if (grab_it(tgt, ior) != D_SUCCESS) {
+ error = D_ALREADY_OPEN;
+ goto ret;
+ }
+
+ scdisk_read( tgt, tgt->dev_info.disk.labelsector, ior);
+ iowait(ior);
+ if (error = ior->io_error)
+ goto ret;
+
+ label = (struct disklabel *) &data[tgt->dev_info.disk.labeloffset];
+ *label = tgt->dev_info.disk.l;
+
+ ior->io_next = 0;
+ ior->io_prev = 0;
+ ior->io_data = data;
+ ior->io_count = dev_bsize;
+ ior->io_op = IO_WRITE;
+
+ while (grab_it(tgt, ior) != D_SUCCESS) ; /* ahem */
+
+ scdisk_write( tgt, tgt->dev_info.disk.labelsector, ior);
+ iowait(ior);
+
+ error = ior->io_error;
+ret:
+#ifdef MACH_KERNEL
+ if (data) kfree((int)data, dev_bsize);
+#endif /*MACH_KERNEL*/
+ io_req_free(ior);
+ return error;
+
+#endif 0 scdisk_writelabel
+return -1; /* FAILURE ? */
+}
+
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+#if ULTRIX_COMPAT
+
+io_return_t ul_disk_ioctl(tgt, flavor, status, status_count)
+ target_info_t *tgt;
+ dev_flavor_t flavor;
+ dev_status_t status;
+ natural_t status_count;
+{
+ io_return_t ret;
+ scsi_ret_t err = SCSI_RET_ABORTED;/*xxx*/
+ io_req_t ior;
+
+ if (!suser())
+ return EACCES;
+
+ ior = geteblk(sizeof(struct defect_descriptors));
+ ior->io_next = 0;
+ ior->io_count = 0;
+ ior->io_op = IO_INTERNAL;
+ ior->io_error = 0;
+ ior->io_recnum = 0;
+ ior->io_residual = 0;
+
+ switch (flavor) {
+
+ case DIOCGETPT: { /*_IOR(p, 1, struct pt) */
+ scsi_dec_label_t *p;
+ struct disklabel *lp;
+ int i;
+
+ lp = &tgt->dev_info.disk.l;
+ p = (scsi_dec_label_t *)status;
+
+ p->magic = DEC_PARTITION_MAGIC;
+ p->in_use = 1;
+ for (i = 0; i < 8; i++) {
+ label=lookup_part(tgt->dev_info.disk.scsi_array, part);
+ p->partitions[i].n_sectors = label->size;
+ p->partitions[i].offset = label->start;
+ }
+ err = SCSI_RET_SUCCESS;
+ }
+ break;
+
+ case SCSI_GET_SENSE: { /*_IOR(p, 10, struct extended_sense) */
+ scsi_sense_data_t *s;
+
+ s = (scsi_sense_data_t*)tgt->cmd_ptr;
+ bcopy(s, status, sizeof(*s) + s->u.xtended.add_len - 1);
+ err = SCSI_RET_SUCCESS;
+ /* only once */
+ bzero(tgt->cmd_ptr, sizeof(scsi_sense_data_t));
+ }
+ break;
+
+ case SCSI_GET_INQUIRY_DATA: { /*_IOR(p, 11, struct inquiry_info) */
+ struct mode_sel_sns_params *ms;
+
+ ms = (struct mode_sel_sns_params*)status;
+ err = scsi_inquiry( tgt, SCSI_INQ_STD_DATA);
+ if (copyout(tgt->cmd_ptr, ms->msp_addr, sizeof(struct inquiry_info))){
+ ret = EFAULT;
+ goto out;
+ }
+ }
+ break;
+
+ case SCSI_FORMAT_UNIT: { /*_IOW(p, 4, struct format_params) */
+ struct format_params *fp;
+ struct defect_descriptors *df;
+ unsigned char mode;
+ unsigned int old_timeout;
+
+ fp = (struct format_params *)status;
+ df = (struct defect_descriptors*)ior->io_data;
+ if (fp->fp_length != 0) {
+ if (copyin(fp->fp_addr, df, sizeof(*df))) {
+ ret = EFAULT;
+ goto out;
+ }
+ ior->io_count = sizeof(*df);
+ } else
+ ior->io_count = 0;
+ mode = fp->fp_format & SCSI_CMD_FMT_LIST_TYPE;
+ switch (fp->fp_defects) {
+ case VENDOR_DEFECTS:
+ mode |= SCSI_CMD_FMT_FMTDATA|SCSI_CMD_FMT_CMPLIST;
+ break;
+ case KNOWN_DEFECTS:
+ mode |= SCSI_CMD_FMT_FMTDATA;
+ break;
+ case NO_DEFECTS:
+ default:
+ break;
+ }
+ old_timeout = scsi_watchdog_period;
+ scsi_watchdog_period = 60*60; /* 1 hour should be enough, I hope */
+ err = scsi_format_unit( tgt, mode, fp->fp_pattern,
+ fp->fp_interleave, ior);
+ scsi_watchdog_period = old_timeout;
+ /* Make sure we re-read all info afresh */
+ tgt->flags = TGT_ALIVE |
+ (tgt->flags & (TGT_REMOVABLE_MEDIA|TGT_FULLY_PROBED));
+ }
+ break;
+
+ case SCSI_REASSIGN_BLOCK: { /*_IOW(p, 5, struct reassign_params) */
+ struct reassign_params *r;
+ int ndef;
+
+ r = (struct reassign_params*) status;
+ ndef = r->rp_header.defect_len0 | (r->rp_header.defect_len1 >> 8);
+ ndef >>= 2;
+ tgt->ior = ior;
+ (void) scsi_reassign_blocks( tgt, &r->rp_lbn3, ndef, ior);
+ iowait(ior);
+ err = tgt->done;
+ }
+ break;
+
+ case SCSI_READ_DEFECT_DATA: { /*_IOW(p, 6, struct read_defect_params) */
+ struct read_defect_params *dp;
+
+ dp = (struct read_defect_params *)status;
+ ior->io_count = ior->io_alloc_size;
+ if (dp->rdp_alclen > ior->io_count)
+ dp->rdp_alclen = ior->io_count;
+ else
+ ior->io_count = dp->rdp_alclen;
+ ior->io_op |= IO_READ;
+ tgt->ior = ior;
+ err = scsi_read_defect(tgt, dp->rdp_format|0x18, ior);
+ if (copyout(ior->io_data, dp->rdp_addr, dp->rdp_alclen)) {
+ ret = EFAULT;
+ goto out;
+ }
+ }
+ break;
+
+ case SCSI_VERIFY_DATA: { /*_IOW(p, 7, struct verify_params) */
+ struct verify_params *v;
+ unsigned int old_timeout;
+
+ old_timeout = scsi_watchdog_period;
+ scsi_watchdog_period = 5*60; /* 5 mins enough, I hope */
+ v = (struct verify_params *)status;
+ ior->io_count = 0;
+ err = scdisk_verify( tgt, v->vp_lbn, v->vp_length, ior);
+ scsi_watchdog_period = old_timeout;
+ }
+ break;
+
+ case SCSI_MODE_SELECT: { /*_IOW(p, 8, struct mode_sel_sns_params) */
+ struct mode_sel_sns_params *ms;
+
+ ms = (struct mode_sel_sns_params*)status;
+ if(copyin(ms->msp_addr, ior->io_data, ms->msp_length)) {
+ ret = EFAULT;
+ goto out;
+ }
+ err = scdisk_mode_select( tgt, DEV_BSIZE, ior, ior->io_data,
+ ms->msp_length, ms->msp_setps);
+ }
+ break;
+
+ case SCSI_MODE_SENSE: { /*_IOWR(p, 9, struct mode_sel_sns_params) */
+ struct mode_sel_sns_params *ms;
+ unsigned char pagecode;
+
+ ms = (struct mode_sel_sns_params*)status;
+ pagecode = (ms->msp_pgcode & 0x3f) | (ms->msp_pgctrl << 6);
+ err = scsi_mode_sense( tgt, pagecode, ms->msp_length, ior);
+ if (copyout(tgt->cmd_ptr, ms->msp_addr, ms->msp_length)){
+ ret = EFAULT;
+ goto out;
+ }
+ }
+ break;
+ }
+
+ ret = (err == SCSI_RET_SUCCESS) ? D_SUCCESS : D_IO_ERROR;
+ if (ior->io_op & IO_ERROR)
+ ret = D_IO_ERROR;
+out:
+ brelse(ior);
+ return ret;
+}
+#endif /*ULTRIX_COMPAT*/
+#endif /*!MACH_KERNEL*/
+
+#ifdef CHECKSUM
+
+#define SUMSIZE 0x10000
+#define SUMHASH(b) (((b)>>1) & (SUMSIZE - 1))
+struct {
+ long blockno;
+ long sum;
+} scdisk_checksums[SUMSIZE];
+
+void scdisk_checksum(bno, addr, size)
+ long bno;
+ register unsigned int *addr;
+{
+ register int i = size/sizeof(int);
+ register unsigned int sum = -1;
+
+ while (i-- > 0)
+ sum ^= *addr++;
+ scdisk_checksums[SUMHASH(bno)].blockno = bno;
+ scdisk_checksums[SUMHASH(bno)].sum = sum;
+}
+
+void scdisk_bcheck(bno, addr, size)
+ long bno;
+ register unsigned int *addr;
+{
+ register int i = size/sizeof(int);
+ register unsigned int sum = -1;
+ unsigned int *start = addr;
+
+ if (scdisk_checksums[SUMHASH(bno)].blockno != bno) {
+if (scsi_debug) printf("No checksum for block x%x\n", bno);
+ return;
+ }
+
+ while (i-- > 0)
+ sum ^= *addr++;
+
+ if (scdisk_checksums[SUMHASH(bno)].sum != sum) {
+ printf("Bad checksum (x%x != x%x), bno x%x size x%x at x%x\n",
+ sum,
+ scdisk_checksums[bno & (SUMSIZE - 1)].sum,
+ bno, size, start);
+ gimmeabreak();
+ scdisk_checksums[SUMHASH(bno)].sum = sum;
+ }
+}
+
+
+#endif CHECKSUM
+
+/*#define PERF */
+#ifdef PERF
+int test_read_size = 512;
+int test_read_skew = 12;
+int test_read_skew_min = 0;
+int test_read_nreads = 1000;
+int test_read_bdev = 0;
+
+#include <sys/time.h>
+
+void test_read(max)
+{
+ int i, ssk, usecs;
+ struct timeval start, stop;
+
+ if (max == 0)
+ max = test_read_skew + 1;
+ ssk = test_read_skew;
+ for (i = test_read_skew_min; i < max; i++){
+ test_read_skew = i;
+
+ start = time;
+ read_test();
+ stop = time;
+
+ usecs = stop.tv_usec - start.tv_usec;
+ if (usecs < 0) {
+ stop.tv_sec -= 1;
+ usecs += 1000000;
+ }
+ printf("Skew %3d size %d count %d time %3d sec %d us\n",
+ i, test_read_size, test_read_nreads,
+ stop.tv_sec - start.tv_sec, usecs);
+ }
+ test_read_skew = ssk;
+}
+
+void read_test()
+{
+ static int buffer[(8192*2)/sizeof(int)];
+ struct io_req io;
+ register int i, rec;
+
+ bzero(&io, sizeof(io));
+ io.io_unit = test_read_bdev;
+ io.io_op = IO_READ;
+ io.io_count = test_read_size;
+ io.io_data = (char*) buffer;
+
+ for (rec = 0, i = 0; i < test_read_nreads; i++) {
+ io.io_op = IO_READ;
+ io.io_recnum = rec;
+ scdisk_strategy(&io);
+ rec += test_read_skew;
+ iowait(&io);
+ }
+}
+
+void tur_test()
+{
+ struct io_req io;
+ register int i;
+ char *a, *b;
+ struct timeval start, stop;
+
+ bzero(&io, sizeof(io));
+ io.io_unit = test_read_bdev;
+ io.io_data = (char*)&io;/*unused but kernel space*/
+
+ start = time;
+ for (i = 0; i < test_read_nreads; i++) {
+ io.io_op = IO_INTERNAL;
+ rz_check(io.io_unit, &a, &b);
+ scsi_test_unit_ready(b,&io);
+ }
+ stop = time;
+ i = stop.tv_usec - start.tv_usec;
+ if (i < 0) {
+ stop.tv_sec -= 1;
+ i += 1000000;
+ }
+ printf("%d test-unit-ready took %3d sec %d us\n",
+ test_read_nreads,
+ stop.tv_sec - start.tv_sec, i);
+}
+
+#endif PERF
+
+/*#define WDEBUG*/
+#ifdef WDEBUG
+
+int buggo_write_size = 8192;
+int buggo_dev = 2; /* rz0b */ /* changed by KTVM from 1 (still b) */
+int buggo_out_buffer[8192/2];
+int buggo_in_buffer[8192/2];
+int buggotest(n, pattern, verbose)
+{
+ struct io_req io;
+ register int i, rec;
+
+ if (n <= 0)
+ n = 1;
+
+ if(pattern)
+ for (i = 0; i < buggo_write_size/4; i++)
+ buggo_out_buffer[i] = i + pattern;
+
+ for (i = 0; i < n; i++) {
+ register int j;
+
+ buggo_out_buffer[0] = i + pattern;
+ buggo_out_buffer[(buggo_write_size/4)-1] = i + pattern;
+ bzero(&io, sizeof(io));
+ io.io_unit = buggo_dev;
+ io.io_data = (char*)buggo_out_buffer;
+ io.io_op = IO_WRITE;
+ io.io_count = buggo_write_size;
+ io.io_recnum = i % 1024;
+ scdisk_strategy(&io);
+
+ bzero(buggo_in_buffer, sizeof(buggo_in_buffer));
+ iowait(&io);
+
+ if (verbose)
+ printf("Done write with %x", io.io_error);
+
+ bzero(&io, sizeof(io));
+ io.io_unit = buggo_dev;
+ io.io_data = (char*)buggo_in_buffer;
+ io.io_op = IO_READ;
+ io.io_count = buggo_write_size;
+ io.io_recnum = i % 1024;
+ scdisk_strategy(&io);
+ iowait(&io);
+
+ if (verbose)
+ printf("Done read with %x", io.io_error);
+
+ for (j = 0; j < buggo_write_size/4; j++)
+ if (buggo_out_buffer[j] != buggo_in_buffer[j]){
+ printf("Difference at %d-th word: %x %x\n",
+ buggo_out_buffer[j], buggo_in_buffer[j]);
+ return i;
+ }
+ }
+ printf("Test ok\n");
+ return n;
+}
+#endif WDEBUG
+#endif /* NSCSI > 0 */
diff --git a/scsi/rz_disk_bbr.c b/scsi/rz_disk_bbr.c
new file mode 100644
index 00000000..9d87675e
--- /dev/null
+++ b/scsi/rz_disk_bbr.c
@@ -0,0 +1,259 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: rz_disk_bbr.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 4/91
+ *
+ * Top layer of the SCSI driver: interface with the MI.
+ * This file contains bad-block management functions
+ * (retry, replace) for disk-like devices.
+ */
+
+#include <mach/std_types.h>
+#include <scsi/compat_30.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_defs.h>
+#include <scsi/rz.h>
+
+#if (NSCSI > 0)
+
+int scsi_bbr_retries = 10;
+
+#define BBR_ACTION_COMPLETE 1
+#define BBR_ACTION_RETRY_READ 2
+#define BBR_ACTION_REASSIGN 3
+#define BBR_ACTION_COPY 4
+#define BBR_ACTION_VERIFY 5
+
+static void make_msf(); /* forward */
+
+/*
+ * Bad block replacement routine, invoked on
+ * unrecovereable disk read/write errors.
+ */
+boolean_t
+scdisk_bad_block_repl(tgt, blockno)
+ target_info_t *tgt;
+ unsigned int blockno;
+{
+ register io_req_t ior = tgt->ior;
+
+ if (scsi_no_automatic_bbr || (ior->io_op & IO_INTERNAL))
+ return FALSE;
+
+ /* signal we took over */
+ tgt->flags |= TGT_BBR_ACTIVE;
+
+ printf("%s", "Attempting bad block replacement..");
+
+ tgt->dev_info.disk.b.badblockno = blockno;
+ tgt->dev_info.disk.b.retry_count = 0;
+
+ tgt->dev_info.disk.b.save_rec = ior->io_recnum;
+ tgt->dev_info.disk.b.save_addr = ior->io_data;
+ tgt->dev_info.disk.b.save_count = ior->io_count;
+ tgt->dev_info.disk.b.save_resid = ior->io_residual;
+
+ /*
+ * On a write all we need is to rewire the offending block.
+ * Note that the sense data identified precisely which 512 sector
+ * is bad. At the end we'll retry the entire write, so if there
+ * is more than one bad sector involved they will be handled one
+ * at a time.
+ */
+ if ((ior->io_op & IO_READ) == 0) {
+ char msf[sizeof(int)];
+ ior->io_temporary = BBR_ACTION_COMPLETE;
+ printf("%s", "just reassign..");
+ make_msf(msf,blockno);
+ scsi_reassign_blocks( tgt, msf, 1, ior);
+ } else
+ /*
+ * This is more complicated. We asked for N bytes, and somewhere
+ * in there there is a chunk of bad data. First off, we should retry
+ * at least a couple of times to retrieve that data [yes the drive
+ * should have done its best already so what]. If that fails we
+ * should recover as much good data as possible (before the bad one).
+ */
+ {
+ ior->io_temporary = BBR_ACTION_RETRY_READ;
+ printf("%s", "retry read..");
+ ior->io_residual = 0;
+ scdisk_start_rw(tgt, ior);
+ }
+
+ return TRUE;
+}
+
+static
+void make_msf(buf,val)
+ unsigned char *buf;
+ unsigned int val;
+{
+ *buf++ = val >> 24;
+ *buf++ = val >> 16;
+ *buf++ = val >> 8;
+ *buf++ = val >> 0;
+}
+
+/*
+ * This effectively replaces the strategy routine during bbr.
+ */
+void scdisk_bbr_start( tgt, done)
+ target_info_t *tgt;
+ boolean_t done;
+{
+ register io_req_t ior = tgt->ior;
+ char *msg;
+
+ switch (ior->io_temporary) {
+
+ case BBR_ACTION_COMPLETE:
+
+ /* all done, either way */
+fin:
+ tgt->flags &= ~TGT_BBR_ACTIVE;
+ ior->io_recnum = tgt->dev_info.disk.b.save_rec;
+ ior->io_data = tgt->dev_info.disk.b.save_addr;
+ ior->io_count = tgt->dev_info.disk.b.save_count;
+ ior->io_residual = tgt->dev_info.disk.b.save_resid;
+
+ if (tgt->done == SCSI_RET_SUCCESS) {
+ /* restart normal life */
+ register unsigned int xferred;
+ if (xferred = ior->io_residual) {
+ ior->io_data -= xferred;
+ ior->io_count += xferred;
+ ior->io_recnum -= xferred / tgt->block_size;
+ ior->io_residual = 0;
+ }
+ /* from the beginning */
+ ior->io_error = 0;
+ msg = "done, restarting.";
+ } else {
+ /* we could not fix it. Tell user and give up */
+ tgt->ior = ior->io_next;
+ iodone(ior);
+ msg = "done, but could not recover.";
+ }
+
+ printf("%s\n", msg);
+ scdisk_start( tgt, FALSE);
+ return;
+
+ case BBR_ACTION_RETRY_READ:
+
+ /* see if retry worked, if not do it again */
+ if (tgt->done == SCSI_RET_SUCCESS) {
+ char msf[sizeof(int)];
+
+ /* whew, retry worked. Now rewire that bad block
+ * and don't forget to copy the good data over */
+
+ tgt->dev_info.disk.b.retry_count = 0;
+ printf("%s", "ok now, reassign..");
+ ior->io_temporary = BBR_ACTION_COPY;
+ make_msf(msf, tgt->dev_info.disk.b.badblockno);
+ scsi_reassign_blocks( tgt, msf, 1, ior);
+ return;
+ }
+ if (tgt->dev_info.disk.b.retry_count++ < scsi_bbr_retries) {
+ scdisk_start_rw( tgt, ior);
+ return;
+ }
+ /* retrying was hopeless. Leave the bad block there for maintainance */
+ /* because we do not know what to write on it */
+ printf("%s%d%s", "failed after ", scsi_bbr_retries, " retries..");
+ goto fin;
+
+
+ case BBR_ACTION_COPY:
+
+ /* retrying succeded and we rewired the bad block. */
+ if (tgt->done == SCSI_RET_SUCCESS) {
+ unsigned int tmp;
+ struct diskpart *label;
+
+ printf("%s", "ok, rewrite..");
+
+ /* writeback only the bad sector */
+
+ /* map blockno back to partition offset */
+/* !!! partition code changes: */
+ tmp = rzpartition(ior->io_unit);
+/* label=lookup_part(array, tmp +1); */
+ tmp = tgt->dev_info.disk.b.badblockno -
+/* label->start; */
+/* #if 0 */
+ tgt->dev_info.disk.l.d_partitions[tmp].p_offset;
+/* #endif 0 */
+ ior->io_data += (tmp - ior->io_recnum) * tgt->block_size;
+ ior->io_recnum = tmp;
+ ior->io_count = tgt->block_size;
+ ior->io_op &= ~IO_READ;
+
+ ior->io_temporary = BBR_ACTION_VERIFY;
+ scdisk_start_rw( tgt, ior);
+ } else {
+
+ /* either unsupported command, or repl table full */
+ printf("%s", "reassign failed (really needs reformatting), ");
+ ior->io_error = 0;
+ goto fin;
+ }
+ break;
+
+ case BBR_ACTION_VERIFY:
+
+ if (tgt->done == SCSI_RET_SUCCESS) {
+ ior->io_op |= IO_READ;
+ goto fin;
+ }
+
+ if (tgt->dev_info.disk.b.retry_count++ > scsi_bbr_retries) {
+ printf("%s%d%s", "failed after ",
+ scsi_bbr_retries, " retries..");
+ ior->io_op |= IO_READ;
+ goto fin;
+ }
+
+ /* retry, we are *this* close to success.. */
+ scdisk_start_rw( tgt, ior);
+
+ break;
+
+ case BBR_ACTION_REASSIGN:
+
+ /* if we wanted to issue the reassign multiple times */
+ /* XXX unimplemented XXX */
+
+ default: /* snafu */
+ panic("scdisk_bbr_start");
+ }
+}
+#endif /* NSCSI > 0 */
diff --git a/scsi/rz_host.c b/scsi/rz_host.c
new file mode 100644
index 00000000..94ccd81d
--- /dev/null
+++ b/scsi/rz_host.c
@@ -0,0 +1,108 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: rz_host.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 7/91
+ *
+ * Top layer of the SCSI driver: interface with the MI.
+ * This file contains operations specific to CPU-like devices.
+ *
+ * We handle here the case of other hosts that are capable of
+ * sophisticated host-to-host communication protocols, we make
+ * them look like... you'll see.
+ *
+ * There are two sides of the coin here: when we take the initiative
+ * and when the other host does it. Code for handling both cases is
+ * provided in this one file.
+ */
+
+#include <mach/std_types.h>
+#include <machine/machspl.h> /* spl definitions */
+#include <scsi/compat_30.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_defs.h>
+#include <scsi/rz.h>
+
+#if (NSCSI > 0)
+/* Since we have invented a new "device" this cannot go into the
+ the 'official' scsi_devsw table. Too bad. */
+
+extern char *schost_name();
+extern scsi_ret_t
+ schost_open(), schost_close();
+extern int schost_strategy();
+extern void schost_start();
+
+scsi_devsw_t scsi_host = {
+ schost_name, 0, schost_open, schost_close, schost_strategy,
+ schost_start, 0, 0
+};
+
+char *schost_name(internal)
+ boolean_t internal;
+{
+ return internal ? "sh" : "host";
+}
+
+scsi_ret_t
+schost_open(tgt)
+ target_info_t *tgt;
+{
+ return SCSI_RET_SUCCESS; /* XXX if this is it, drop it */
+}
+
+scsi_ret_t
+schost_close(tgt)
+ target_info_t *tgt;
+{
+ return SCSI_RET_SUCCESS; /* XXX if this is it, drop it */
+}
+
+schost_strategy(ior)
+ register io_req_t ior;
+{
+ return rz_simpleq_strategy( ior, schost_start);
+}
+
+void
+schost_start( tgt, done)
+ target_info_t *tgt;
+ boolean_t done;
+{
+ io_req_t head, ior;
+ scsi_ret_t ret;
+
+ if (done || (!tgt->dev_info.cpu.req_pending)) {
+ sccpu_start( tgt, done);
+ return;
+ }
+
+ ior = tgt->ior;
+}
+
+#endif /* NSCSI > 0 */
diff --git a/scsi/rz_labels.h b/scsi/rz_labels.h
new file mode 100644
index 00000000..6ffaa119
--- /dev/null
+++ b/scsi/rz_labels.h
@@ -0,0 +1,243 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: rz_labels.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Definitions of various vendor's disk label formats.
+ */
+
+/* modified by Kevin T. Van Maren for the unified partition code */
+
+#ifndef _RZ_LABELS_H_
+#define _RZ_LABELS_H_
+
+/*
+ * This function looks for, and converts to BSD format
+ * a vendor's label. It is only called if we did not
+ * find a standard BSD label on the disk pack.
+ */
+extern boolean_t rz_vendor_label();
+
+/*
+ * Definition of the DEC disk label,
+ * which is located (you guessed it)
+ * at the end of the 4.3 superblock.
+ */
+
+struct dec_partition_info {
+ unsigned int n_sectors; /* how big the partition is */
+ unsigned int offset; /* sector no. of start of part. */
+};
+
+typedef struct {
+ int magic;
+# define DEC_LABEL_MAGIC 0x032957
+ int in_use;
+ struct dec_partition_info partitions[8];
+} dec_label_t;
+
+/*
+ * Physical location on disk.
+ * This is independent of the filesystem we use,
+ * although of course we'll be in trouble if we
+ * screwup the 4.3 SBLOCK..
+ */
+
+#define DEC_LABEL_BYTE_OFFSET ((2*8192)-sizeof(dec_label_t))
+
+
+/*
+ * Definitions for the primary boot information
+ * This is common, cuz the prom knows it.
+ */
+
+typedef struct {
+ int pad[2];
+ unsigned int magic;
+# define DEC_BOOT0_MAGIC 0x2757a
+ int mode;
+ unsigned int phys_base;
+ unsigned int virt_base;
+ unsigned int n_sectors;
+ unsigned int start_sector;
+} dec_boot0_t;
+
+typedef struct {
+ dec_boot0_t vax_boot;
+ /* BSD label still fits in pad */
+ char pad[0x1e0-sizeof(dec_boot0_t)];
+ unsigned long block_count;
+ unsigned long starting_lbn;
+ unsigned long flags;
+ unsigned long checksum; /* add cmpl-2 all but here */
+} alpha_boot0_t;
+
+/*
+ * Definition of the Omron disk label,
+ * which is located at sector 0. It
+ * _is_ sector 0, actually.
+ */
+struct omron_partition_info {
+ unsigned long offset;
+ unsigned long n_sectors;
+};
+
+typedef struct {
+ char packname[128]; /* in ascii */
+
+ char pad[512-(128+8*8+11*2+4)];
+
+ unsigned short badchk; /* checksum of bad track */
+ unsigned long maxblk; /* # of total logical blocks */
+ unsigned short dtype; /* disk drive type */
+ unsigned short ndisk; /* # of disk drives */
+ unsigned short ncyl; /* # of data cylinders */
+ unsigned short acyl; /* # of alternate cylinders */
+ unsigned short nhead; /* # of heads in this partition */
+ unsigned short nsect; /* # of 512 byte sectors per track */
+ unsigned short bhead; /* identifies proper label locations */
+ unsigned short ppart; /* physical partition # */
+ struct omron_partition_info
+ partitions[8];
+
+ unsigned short magic; /* identifies this label format */
+# define OMRON_LABEL_MAGIC 0xdabe
+
+ unsigned short cksum; /* xor checksum of sector */
+
+} omron_label_t;
+
+/*
+ * Physical location on disk.
+ */
+
+#define OMRON_LABEL_BYTE_OFFSET 0
+
+
+/*
+ * Definition of the i386AT disk label, which lives inside sector 0.
+ * This is the info the BIOS knows about, which we use for bootstrapping.
+ * It is common across all disks known to BIOS.
+ */
+
+struct bios_partition_info {
+
+ unsigned char bootid; /* bootable or not */
+# define BIOS_BOOTABLE 128
+
+ unsigned char beghead;/* beginning head, sector, cylinder */
+ unsigned char begsect;/* begcyl is a 10-bit number. High 2 bits */
+ unsigned char begcyl; /* are in begsect. */
+
+ unsigned char systid; /* filesystem type */
+# define UNIXOS 99 /* GNU HURD? */
+# define BSDOS 165 /* 386BSD */
+# define LINUXSWAP 130
+# define LINUXOS 131
+# define DOS_EXTENDED 05 /* container for logical partitions */
+
+# define HPFS 07 /* OS/2 Native */
+# define OS_2_BOOT 10 /* OS/2 Boot Manager */
+# define DOS_12 01 /* 12 bit FAT */
+# define DOS_16_OLD 04 /* < 32MB */
+# define DOS_16 06 /* >= 32MB (#4 not used anymore) */
+
+ /* these numbers can't be trusted because */
+ /* of newer, larger drives */
+ unsigned char endhead;/* ending head, sector, cylinder */
+ unsigned char endsect;/* endcyl is a 10-bit number. High 2 bits */
+ unsigned char endcyl; /* are in endsect. */
+
+ unsigned long offset;
+ unsigned long n_sectors;
+};
+
+typedef struct {
+/* struct bios_partition_info bogus compiler alignes wrong
+ partitions[4];
+*/
+ char partitions[4*sizeof(struct bios_partition_info)];
+ unsigned short magic;
+# define BIOS_LABEL_MAGIC 0xaa55
+} bios_label_t;
+
+/*
+ * Physical location on disk.
+ */
+
+#define BIOS_LABEL_BYTE_OFFSET 446
+
+/*
+ * Definitions for the primary boot information
+ * This _is_ block 0
+ */
+
+#define BIOS_BOOT0_SIZE BIOS_LABEL_BYTE_OFFSET
+
+typedef struct {
+ char boot0[BIOS_BOOT0_SIZE]; /* boot code */
+/* bios_label_t label; bogus compiler alignes wrong */
+ char label[sizeof(bios_label_t)];
+} bios_boot0_t;
+
+/* Moved from i386at/nhdreg.h */
+#define PDLOCATION 29 /* VTOC sector */
+
+
+/* these are the partition types that can contain sub-partitions */
+/* enum types... */
+#define DISKPART_NONE 0 /* smallest piece flag !?! */
+#define DISKPART_DOS 1
+#define DISKPART_BSD 2
+#define DISKPART_VTOC 3
+#define DISKPART_OMRON 4
+#define DISKPART_DEC 5 /* VAX disks? */
+#define DISKPART_UNKNOWN 99
+
+
+
+/* for NEW partition code */
+/* this is the basic partition structure. an array of these is
+ filled, with element 0 being the whole drive, element 1-n being
+ the n top-level partitions, followed by 0+ groups of 1+ sub-partitions. */
+typedef struct diskpart {
+ short type; /* DISKPART_xxx (see above) */
+ short fsys; /* file system (if known) */
+ int nsubs; /* number of sub-slices */
+ struct diskpart *subs; /* pointer to the sub-partitions */
+ int start; /* relative to the start of the DRIVE */
+ int size; /* # sectors in this piece */
+} diskpart;
+
+int get_only_partition(void *driver_info, int (*bottom_read_fun)(),
+ struct diskpart *array, int array_size,
+ int disk_size, char *drive_name);
+
+struct diskpart *lookup_part(struct diskpart *array, int dev_number);
+#endif _RZ_LABELS_H_
+
diff --git a/scsi/rz_tape.c b/scsi/rz_tape.c
new file mode 100644
index 00000000..1d27722c
--- /dev/null
+++ b/scsi/rz_tape.c
@@ -0,0 +1,560 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: rz_tape.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Top layer of the SCSI driver: interface with the MI.
+ * This file contains operations specific to TAPE-like devices.
+ */
+
+#include <mach/std_types.h>
+#include <scsi/compat_30.h>
+
+#include <sys/ioctl.h>
+#ifdef MACH_KERNEL
+#include <device/tape_status.h>
+#else /*MACH_KERNEL*/
+#include <mips/PMAX/tape_status.h>
+#endif /*MACH_KERNEL*/
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_defs.h>
+#include <scsi/rz.h>
+
+#if (NSCSI > 0)
+
+
+void sctape_start(); /* forward */
+
+int scsi_tape_timeout = 5*60; /* secs, tk50 is slow when positioning far apart */
+
+int sctape_open(tgt, req)
+ target_info_t *tgt;
+ io_req_t req;
+{
+ io_return_t ret;
+ io_req_t ior;
+ int i;
+ scsi_mode_sense_data_t *mod;
+
+#ifdef MACH_KERNEL
+ req->io_device->flag |= D_EXCL_OPEN;
+#endif /*MACH_KERNEL*/
+
+ /* Preferably allow tapes to disconnect */
+ if (BGET(scsi_might_disconnect,(unsigned char)tgt->masterno,tgt->target_id))
+ BSET(scsi_should_disconnect,(unsigned char)tgt->masterno,tgt->target_id);
+
+ /*
+ * Dummy ior for proper sync purposes
+ */
+ io_req_alloc(ior,0);
+ ior->io_count = 0;
+
+ /*
+ * Do a mode sense first, some drives might be picky
+ * about changing params [even if the standard might
+ * say otherwise, sigh.]
+ */
+ do {
+ ior->io_op = IO_INTERNAL;
+ ior->io_next = 0;
+ ior->io_error = 0;
+ ret = scsi_mode_sense(tgt, 0, 32, ior);
+ } while (ret == SCSI_RET_RETRY);
+
+ mod = (scsi_mode_sense_data_t *)tgt->cmd_ptr;
+ if (scsi_debug) {
+ int p[5];
+ bcopy((char*)mod, (char*)p, sizeof(p));
+ printf("[modsns(%x): x%x x%x x%x x%x x%x]", ret,
+ p[0], p[1], p[2], p[3], p[4]);
+ }
+ if (ret == SCSI_RET_DEVICE_DOWN)
+ goto out;
+ if (ret == SCSI_RET_SUCCESS) {
+ tgt->dev_info.tape.read_only = mod->wp;
+ tgt->dev_info.tape.speed = mod->speed;
+ tgt->dev_info.tape.density = mod->bdesc[0].density_code;
+ } /* else they all default sensibly, using zeroes */
+
+ /* Some tapes have limits on record-length */
+again:
+ ior->io_op = IO_INTERNAL;
+ ior->io_next = 0;
+ ior->io_error = 0;
+ ret = scsi_read_block_limits( tgt, ior);
+ if (ret == SCSI_RET_RETRY) goto again;
+ if (!ior->io_error && (ret == SCSI_RET_SUCCESS)) {
+ scsi_blimits_data_t *lim;
+ int maxl;
+
+ lim = (scsi_blimits_data_t *) tgt->cmd_ptr;
+
+ tgt->block_size = (lim->minlen_msb << 8) |
+ lim->minlen_lsb;
+
+ maxl = (lim->maxlen_msb << 16) |
+ (lim->maxlen_sb << 8) |
+ lim->maxlen_lsb;
+ if (maxl == 0)
+ maxl = (unsigned)-1;
+ tgt->dev_info.tape.maxreclen = maxl;
+ tgt->dev_info.tape.fixed_size = (maxl == tgt->block_size);
+ } else {
+ /* let the user worry about it */
+ /* default: tgt->block_size = 1; */
+ tgt->dev_info.tape.maxreclen = (unsigned)-1;
+ tgt->dev_info.tape.fixed_size = FALSE;
+ }
+
+ /* Try hard to do a mode select */
+ for (i = 0; i < 5; i++) {
+ ior->io_op = IO_INTERNAL;
+ ior->io_error = 0;
+ ret = sctape_mode_select(tgt, 0, 0, FALSE, ior);
+ if (ret == SCSI_RET_SUCCESS)
+ break;
+ }
+ if (scsi_watchdog_period < scsi_tape_timeout)
+ scsi_watchdog_period += scsi_tape_timeout;
+
+#if 0 /* this might imply rewind, which we do not want, although yes, .. */
+ /* we want the tape loaded */
+ ior->io_op = IO_INTERNAL;
+ ior->io_next = 0;
+ ior->io_error = 0;
+ ret = scsi_start_unit(tgt, SCSI_CMD_SS_START, ior);
+#endif
+ req->io_device->bsize = tgt->block_size;
+out:
+ io_req_free(ior);
+ return ret;
+}
+
+
+io_return_t sctape_close(tgt)
+ target_info_t *tgt;
+{
+ io_return_t ret = SCSI_RET_SUCCESS;
+ io_req_t ior;
+
+ /*
+ * Dummy ior for proper sync purposes
+ */
+ io_req_alloc(ior,0);
+ ior->io_op = IO_INTERNAL;
+ ior->io_next = 0;
+ ior->io_count = 0;
+
+ if (tgt->ior) printf("TAPE: Close with pending requests ?? \n");
+
+ /* write a filemark if we xtnded/truncated the tape */
+ if (tgt->flags & TGT_WRITTEN_TO) {
+ tgt->ior = ior;
+ ior->io_error = 0;
+ ret = scsi_write_filemarks(tgt, 2, ior);
+ if (ret != SCSI_RET_SUCCESS)
+ printf("%s%d: wfmark failed x%x\n",
+ (*tgt->dev_ops->driver_name)(TRUE), tgt->target_id, ret);
+ /*
+ * Don't bother repositioning if we'll rewind it
+ */
+ if (tgt->flags & TGT_REWIND_ON_CLOSE)
+ goto rew;
+retry:
+ tgt->ior = ior;
+ ior->io_op = IO_INTERNAL;
+ ior->io_error = 0;
+ ior->io_next = 0;
+ ret = scsi_space(tgt, SCSI_CMD_SP_FIL, -1, ior);
+ if (ret != SCSI_RET_SUCCESS) {
+ if (ret == SCSI_RET_RETRY) {
+ timeout(wakeup, tgt, hz);
+ await(tgt);
+ goto retry;
+ }
+ printf("%s%d: bspfile failed x%x\n",
+ (*tgt->dev_ops->driver_name)(TRUE), tgt->target_id, ret);
+ }
+ }
+rew:
+ if (tgt->flags & TGT_REWIND_ON_CLOSE) {
+ /* Rewind tape */
+ ior->io_error = 0;
+ ior->io_op = IO_INTERNAL;
+ ior->io_error = 0;
+ tgt->ior = ior;
+ (void) scsi_rewind(tgt, ior, FALSE);
+ iowait(ior);
+ if (tgt->done == SCSI_RET_RETRY) {
+ timeout(wakeup, tgt, 5*hz);
+ await(tgt);
+ goto rew;
+ }
+ }
+ io_req_free(ior);
+
+ tgt->flags &= ~(TGT_ONLINE|TGT_WRITTEN_TO|TGT_REWIND_ON_CLOSE);
+ return ret;
+}
+
+int sctape_strategy(ior)
+ register io_req_t ior;
+{
+ target_info_t *tgt;
+ register int i = ior->io_unit;
+
+ tgt = scsi_softc[rzcontroller(i)]->target[rzslave(i)];
+
+ if (((ior->io_op & IO_READ) == 0) &&
+ tgt->dev_info.tape.read_only) {
+ ior->io_error = D_INVALID_OPERATION;
+ ior->io_op |= IO_ERROR;
+ ior->io_residual = ior->io_count;
+ iodone(ior);
+ return ior->io_error;
+ }
+
+ return rz_simpleq_strategy( ior, sctape_start);
+}
+
+static void
+do_residue(ior, sns, bsize)
+ io_req_t ior;
+ scsi_sense_data_t *sns;
+ int bsize;
+{
+ int residue;
+
+ /* Not an error situation */
+ ior->io_error = 0;
+ ior->io_op &= ~IO_ERROR;
+
+ if (!sns->addr_valid) {
+ ior->io_residual = ior->io_count;
+ return;
+ }
+
+ residue = sns->u.xtended.info0 << 24 |
+ sns->u.xtended.info1 << 16 |
+ sns->u.xtended.info2 << 8 |
+ sns->u.xtended.info3;
+ /* fixed ? */
+ residue *= bsize;
+ /*
+ * NOTE: residue == requested - actual
+ * We only care if > 0
+ */
+ if (residue < 0) residue = 0;/* sanity */
+ ior->io_residual += residue;
+}
+
+void sctape_start( tgt, done)
+ target_info_t *tgt;
+ boolean_t done;
+{
+ io_req_t head, ior = tgt->ior;
+
+ if (ior == 0)
+ return;
+
+ if (done) {
+
+ /* see if we must retry */
+ if ((tgt->done == SCSI_RET_RETRY) &&
+ ((ior->io_op & IO_INTERNAL) == 0)) {
+ delay(1000000);/*XXX*/
+ goto start;
+ } else
+ /* got a bus reset ? ouch, that hurts */
+ if (tgt->done == (SCSI_RET_ABORTED|SCSI_RET_RETRY)) {
+ /*
+ * we really cannot retry because the tape position
+ * is lost.
+ */
+ printf("Lost tape position\n");
+ ior->io_error = D_IO_ERROR;
+ ior->io_op |= IO_ERROR;
+ } else
+
+ /* check completion status */
+
+ if (tgt->cur_cmd == SCSI_CMD_REQUEST_SENSE) {
+ scsi_sense_data_t *sns;
+
+ ior->io_op = ior->io_temporary;
+ ior->io_error = D_IO_ERROR;
+ ior->io_op |= IO_ERROR;
+
+ sns = (scsi_sense_data_t *)tgt->cmd_ptr;
+
+ if (scsi_debug)
+ scsi_print_sense_data(sns);
+
+ if (scsi_check_sense_data(tgt, sns)) {
+ if (sns->u.xtended.ili) {
+ if (ior->io_op & IO_READ) {
+ do_residue(ior, sns, tgt->block_size);
+ if (scsi_debug)
+ printf("Tape Short Read (%d)\n",
+ ior->io_residual);
+ }
+ } else if (sns->u.xtended.eom) {
+ do_residue(ior, sns, tgt->block_size);
+ if (scsi_debug)
+ printf("End of Physical Tape!\n");
+ } else if (sns->u.xtended.fm) {
+ do_residue(ior, sns, tgt->block_size);
+ if (scsi_debug)
+ printf("File Mark\n");
+ }
+ }
+ }
+
+ else if (tgt->done != SCSI_RET_SUCCESS) {
+
+ if (tgt->done == SCSI_RET_NEED_SENSE) {
+
+ ior->io_temporary = ior->io_op;
+ ior->io_op = IO_INTERNAL;
+ if (scsi_debug)
+ printf("[NeedSns x%x x%x]", ior->io_residual, ior->io_count);
+ scsi_request_sense(tgt, ior, 0);
+ return;
+
+ } else if (tgt->done == SCSI_RET_RETRY) {
+ /* only retry here READs and WRITEs */
+ if ((ior->io_op & IO_INTERNAL) == 0) {
+ ior->io_residual = 0;
+ goto start;
+ } else{
+ ior->io_error = D_WOULD_BLOCK;
+ ior->io_op |= IO_ERROR;
+ }
+ } else {
+ ior->io_error = D_IO_ERROR;
+ ior->io_op |= IO_ERROR;
+ }
+ }
+
+ if (scsi_debug)
+ printf("[Resid x%x]", ior->io_residual);
+
+ /* dequeue next one */
+ head = ior;
+
+ simple_lock(&tgt->target_lock);
+ ior = head->io_next;
+ tgt->ior = ior;
+ if (ior)
+ ior->io_prev = head->io_prev;
+ simple_unlock(&tgt->target_lock);
+
+ iodone(head);
+
+ if (ior == 0)
+ return;
+ }
+ ior->io_residual = 0;
+start:
+ if (ior->io_op & IO_READ) {
+ tgt->flags &= ~TGT_WRITTEN_TO;
+ sctape_read( tgt, ior );
+ } else if ((ior->io_op & IO_INTERNAL) == 0) {
+ tgt->flags |= TGT_WRITTEN_TO;
+ sctape_write( tgt, ior );
+ }
+}
+
+io_return_t
+sctape_get_status( dev, tgt, flavor, status, status_count)
+ int dev;
+ target_info_t *tgt;
+ dev_flavor_t flavor;
+ dev_status_t status;
+ natural_t *status_count;
+{
+ switch (flavor) {
+ case DEV_GET_SIZE:
+
+ status[DEV_GET_SIZE_DEVICE_SIZE] = 0;
+ status[DEV_GET_SIZE_RECORD_SIZE] = tgt->block_size;
+ *status_count = DEV_GET_SIZE_COUNT;
+ break;
+ case TAPE_STATUS: {
+ struct tape_status *ts = (struct tape_status *) status;
+
+ ts->mt_type = MT_ISSCSI;
+ ts->speed = tgt->dev_info.tape.speed;
+ ts->density = tgt->dev_info.tape.density;
+ ts->flags = (tgt->flags & TGT_REWIND_ON_CLOSE) ?
+ TAPE_FLG_REWIND : 0;
+ if (tgt->dev_info.tape.read_only)
+ ts->flags |= TAPE_FLG_WP;
+#ifdef MACH_KERNEL
+ *status_count = TAPE_STATUS_COUNT;
+#endif
+
+ break;
+ }
+ /* U*x compat */
+ case MTIOCGET: {
+ struct mtget *g = (struct mtget *) status;
+
+ bzero(g, sizeof(struct mtget));
+ g->mt_type = 0x7; /* Ultrix compat */
+#ifdef MACH_KERNEL
+ *status_count = sizeof(struct mtget)/sizeof(int);
+#endif
+ break;
+ }
+ default:
+ return D_INVALID_OPERATION;
+ }
+ return D_SUCCESS;
+}
+
+io_return_t
+sctape_set_status( dev, tgt, flavor, status, status_count)
+ int dev;
+ target_info_t *tgt;
+ dev_flavor_t flavor;
+ dev_status_t status;
+ natural_t status_count;
+{
+ scsi_ret_t ret;
+
+ switch (flavor) {
+ case TAPE_STATUS: {
+ struct tape_status *ts = (struct tape_status *) status;
+ if (ts->flags & TAPE_FLG_REWIND)
+ tgt->flags |= TGT_REWIND_ON_CLOSE;
+ else
+ tgt->flags &= ~TGT_REWIND_ON_CLOSE;
+
+ if (ts->speed || ts->density) {
+ unsigned int ospeed, odensity;
+ io_req_t ior;
+
+ io_req_alloc(ior,0);
+ ior->io_op = IO_INTERNAL;
+ ior->io_error = 0;
+ ior->io_next = 0;
+ ior->io_count = 0;
+
+ ospeed = tgt->dev_info.tape.speed;
+ odensity = tgt->dev_info.tape.density;
+ tgt->dev_info.tape.speed = ts->speed;
+ tgt->dev_info.tape.density = ts->density;
+
+ ret = sctape_mode_select(tgt, 0, 0, (ospeed == ts->speed), ior);
+ if (ret != SCSI_RET_SUCCESS) {
+ tgt->dev_info.tape.speed = ospeed;
+ tgt->dev_info.tape.density = odensity;
+ }
+
+ io_req_free(ior);
+ }
+
+ break;
+ }
+ /* U*x compat */
+ case MTIOCTOP: {
+ struct tape_params *mt = (struct tape_params *) status;
+ io_req_t ior;
+
+ if (scsi_debug)
+ printf("[sctape_sstatus: %x %x %x]\n",
+ flavor, mt->mt_operation, mt->mt_repeat_count);
+
+ io_req_alloc(ior,0);
+retry:
+ ior->io_count = 0;
+ ior->io_op = IO_INTERNAL;
+ ior->io_error = 0;
+ ior->io_next = 0;
+ tgt->ior = ior;
+
+ /* compat: in U*x it is a short */
+ switch ((short)(mt->mt_operation)) {
+ case MTWEOF: /* write an end-of-file record */
+ ret = scsi_write_filemarks(tgt, mt->mt_repeat_count, ior);
+ break;
+ case MTFSF: /* forward space file */
+ ret = scsi_space(tgt, SCSI_CMD_SP_FIL, mt->mt_repeat_count, ior);
+ break;
+ case MTBSF: /* backward space file */
+ ret = scsi_space(tgt, SCSI_CMD_SP_FIL, -mt->mt_repeat_count,ior);
+ break;
+ case MTFSR: /* forward space record */
+ ret = scsi_space(tgt, SCSI_CMD_SP_BLOCKS, mt->mt_repeat_count, ior);
+ break;
+ case MTBSR: /* backward space record */
+ ret = scsi_space(tgt, SCSI_CMD_SP_BLOCKS, -mt->mt_repeat_count, ior);
+ break;
+ case MTREW: /* rewind */
+ case MTOFFL: /* rewind and put the drive offline */
+ ret = scsi_rewind(tgt, ior, TRUE);
+ iowait(ior);
+ if ((short)(mt->mt_operation) == MTREW) break;
+ ior->io_op = 0;
+ ior->io_next = 0;
+ ior->io_error = 0;
+ (void) scsi_start_unit(tgt, 0, ior);
+ break;
+ case MTNOP: /* no operation, sets status only */
+ case MTCACHE: /* enable controller cache */
+ case MTNOCACHE: /* disable controller cache */
+ ret = SCSI_RET_SUCCESS;
+ break;
+ default:
+ tgt->ior = 0;
+ io_req_free(ior);
+ return D_INVALID_OPERATION;
+ }
+
+ if (ret == SCSI_RET_RETRY) {
+ timeout(wakeup, ior, 5*hz);
+ await(ior);
+ goto retry;
+ }
+
+ io_req_free(ior);
+ if (ret != SCSI_RET_SUCCESS)
+ return D_IO_ERROR;
+ break;
+ }
+ case MTIOCIEOT:
+ case MTIOCEEOT:
+ default:
+ return D_INVALID_OPERATION;
+ }
+ return D_SUCCESS;
+}
+#endif /* NSCSI > 0 */
diff --git a/scsi/scsi.c b/scsi/scsi.c
new file mode 100644
index 00000000..d4aecf6f
--- /dev/null
+++ b/scsi/scsi.c
@@ -0,0 +1,642 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Middle layer of the SCSI driver: chip independent functions
+ * This file contains Controller and Device-independent functions
+ */
+
+#include <scsi.h>
+
+#if NSCSI > 0
+#include <platforms.h>
+
+#include <machine/machspl.h> /* spl definitions */
+
+#include <mach/std_types.h>
+#include <sys/types.h>
+#include <scsi/compat_30.h>
+
+#include <chips/busses.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi2.h>
+#include <scsi/scsi_defs.h>
+#include <machine/machspl.h>
+
+
+
+#ifdef VAXSTATION
+/* We run some of this code on the interrupt stack */
+#undef spl0
+#define spl0() spl1()
+#endif /*VAXSTATION*/
+
+/*
+ * Overall driver state
+ */
+
+target_info_t scsi_target_data[NSCSI*8]; /* per target state */
+scsi_softc_t scsi_softc_data[NSCSI]; /* per HBA state */
+scsi_softc_t *scsi_softc[NSCSI]; /* quick access&checking */
+
+/*
+ * If a specific target should NOT be asked to go synchronous
+ * then its bit in this bitmap should be set. Each SCSI controller
+ * (Host Bus Adapter) can hold at most 8 targets --> use one
+ * byte per controller. A bit set to one means NO synchronous.
+ * Patch with adb if necessary.
+ */
+unsigned char scsi_no_synchronous_xfer[NSCSI];
+
+/*
+ * For certain targets it is wise to use the long form of the
+ * read/write commands even if their capacity would not necessitate
+ * it. Same as above for usage.
+ */
+unsigned char scsi_use_long_form[NSCSI];
+
+
+/*
+ * Control over disconnect-reconnect mode.
+ */
+unsigned char scsi_might_disconnect[NSCSI] = /* do it if deemed appropriate */
+ { 0xff, 0xff, 0xff, 0xff, 0xff};/* Fix by hand viz NSCSI */
+unsigned char scsi_should_disconnect[NSCSI] = /* just do it */
+ { 0,};
+unsigned char scsi_initiator_id[NSCSI] = /* our id on the bus(ses) */
+ { 7, 7, 7, 7, 7};
+
+/*
+ * Miscellaneus config
+ */
+boolean_t scsi_exabyte_filemarks = FALSE; /* use short filemarks */
+int scsi_watchdog_period = 10; /* but exabyte needs >=30 for bspace */
+int scsi_delay_after_reset = 1000000;/* microseconds */
+boolean_t scsi_no_automatic_bbr = FALSE; /* revector bad blocks automatically */
+
+#ifdef MACH_KERNEL
+#else
+/* This covers Exabyte's max record size */
+unsigned int scsi_per_target_virtual = 256*1024;
+#endif MACH_KERNEL
+
+
+/*
+ * Device-specific operations are switched off this table
+ */
+
+extern char
+ *scdisk_name(), *sctape_name(), *scprt_name(),
+ *sccpu_name(), *scworm_name(), *sccdrom_name(),
+ *scscn_name(), *scmem_name(), *scjb_name(), *sccomm_name();
+extern void
+ sctape_optimize();
+extern scsi_ret_t
+ scdisk_open(), sctape_open(), sctape_close(),
+ sccomm_open(), sccomm_close();
+extern int
+ scdisk_strategy(), sctape_strategy(), sccpu_strategy(),
+ sccomm_strategy();
+extern void
+ scdisk_start(), sctape_start(), sccpu_start(), sccomm_start();
+
+extern io_return_t
+ scdisk_set_status(), scdisk_get_status(),
+ sctape_set_status(), sctape_get_status(),
+ sccomm_set_status(), sccomm_get_status();
+
+scsi_devsw_t scsi_devsw[] = {
+
+/* SCSI_DISK */ { scdisk_name, SCSI_OPTIMIZE_NULL,
+ scdisk_open, SCSI_CLOSE_NULL,
+ scdisk_strategy, scdisk_start,
+ scdisk_get_status, scdisk_set_status },
+
+/* SCSI_TAPE */ { sctape_name, sctape_optimize,
+ sctape_open, sctape_close,
+ sctape_strategy, sctape_start,
+ sctape_get_status, sctape_set_status },
+
+/* SCSI_PRINTER */ { scprt_name, SCSI_OPTIMIZE_NULL, /*XXX*/},
+
+/* SCSI_CPU */ { sccpu_name, SCSI_OPTIMIZE_NULL,
+ SCSI_OPEN_NULL, SCSI_CLOSE_NULL,
+ sccpu_strategy, sccpu_start,},
+
+/* SCSI_WORM */ { scworm_name, SCSI_OPTIMIZE_NULL,
+ scdisk_open, SCSI_CLOSE_NULL,
+ scdisk_strategy, scdisk_start,
+ scdisk_get_status, scdisk_set_status },
+
+/* SCSI_CDROM */ { sccdrom_name, SCSI_OPTIMIZE_NULL,
+ scdisk_open, SCSI_CLOSE_NULL,
+ scdisk_strategy, scdisk_start,
+ scdisk_get_status, scdisk_set_status },
+/* scsi2 */
+/* SCSI_SCANNER */ { scscn_name, SCSI_OPTIMIZE_NULL, /*XXX*/ },
+
+/* SCSI_MEMORY */ { scmem_name, SCSI_OPTIMIZE_NULL,
+ scdisk_open, SCSI_CLOSE_NULL,
+ scdisk_strategy, scdisk_start,
+ scdisk_get_status, scdisk_set_status },
+
+/* SCSI_J_BOX */ { scjb_name, SCSI_OPTIMIZE_NULL, /*XXX*/ },
+
+/* SCSI_COMM */ { sccomm_name, SCSI_OPTIMIZE_NULL,
+#if (NCENDATA>0)
+ sccomm_open, sccomm_close,
+ sccomm_strategy, sccomm_start,
+ sccomm_get_status, sccomm_set_status
+#endif
+ },
+ 0
+};
+
+/*
+ * Allocation routines for state structures
+ */
+scsi_softc_t *
+scsi_master_alloc(unit, hw)
+ unsigned unit;
+ char *hw;
+{
+ scsi_softc_t *sc;
+
+ if (unit < NSCSI) {
+ sc = &scsi_softc_data[unit];
+ scsi_softc[unit] = sc;
+ sc->masterno = unit;
+ sc->hw_state = hw;
+ return sc;
+ }
+ return 0;
+}
+
+target_info_t *
+scsi_slave_alloc(unit, slave, hw)
+ unsigned unit, slave;
+ char *hw;
+{
+ target_info_t *tgt;
+
+ tgt = &scsi_target_data[(unit<<3) + slave];
+ tgt->hw_state = hw;
+ tgt->dev_ops = 0; /* later */
+ tgt->target_id = slave;
+ tgt->masterno = unit;
+ tgt->block_size = 1; /* default */
+ tgt->flags = TGT_ALIVE;
+ tgt->sync_period = 0;
+ tgt->sync_offset = 0;
+ simple_lock_init(&tgt->target_lock);
+
+ scsi_softc[unit]->target[slave] = tgt;
+ return tgt;
+}
+
+void
+zero_ior(
+ io_req_t ior )
+{
+ ior->io_next = ior->io_prev = 0;
+ ior->io_count = 0;
+ ior->io_op = IO_INTERNAL;
+ ior->io_error = 0;
+}
+
+/*
+ * Slave routine:
+ * See if the slave description (controller, unit, ..)
+ * matches one of the slaves found during probe
+ *
+ * Implementation:
+ * Send out an INQUIRY command to see what sort of device
+ * the slave is.
+ * Notes:
+ * At this time the driver is fully functional and works
+ * off interrupts.
+ * TODO:
+ * The SCSI2 spec says what exactly must happen: see F.2.3
+ */
+int scsi_slave( ui, reg)
+ struct bus_device *ui;
+ unsigned reg;
+{
+ scsi_softc_t *sc = scsi_softc[(unsigned char)ui->ctlr];
+ target_info_t *tgt = sc->target[(unsigned char)ui->slave];
+ scsi2_inquiry_data_t *inq;
+ int scsi_std;
+ int ptype, s;
+
+ if (!tgt || !(tgt->flags & TGT_ALIVE))
+ return 0;
+
+ /* Might have scanned already */
+ if (tgt->dev_ops)
+ goto out;
+
+#ifdef SCSI2
+ This is what should happen:
+ - for all LUNs
+ INQUIRY
+ scsi_verify_state (see)
+ scsi_initialize (see)
+#endif SCSI2
+
+ tgt->unit_no = ui->slave; /* incorrect, but needed early */
+
+ s = spl0(); /* we need interrupts */
+
+ if (BGET(scsi_no_synchronous_xfer,(unsigned char)sc->masterno,tgt->target_id))
+ tgt->flags |= TGT_DID_SYNCH;
+
+ /*
+ * Ok, it is time to see what type of device this is,
+ * send an INQUIRY cmd and wait till done.
+ * Possibly do the synch negotiation here.
+ */
+ scsi_inquiry(tgt, SCSI_INQ_STD_DATA);
+
+ inq = (scsi2_inquiry_data_t*)tgt->cmd_ptr;
+ ptype = inq->periph_type;
+
+ switch (ptype) {
+ case SCSI_CDROM :
+ tgt->flags |= TGT_READONLY;
+ /* fall through */
+ case SCSI_DISK :
+ case SCSI_TAPE :
+ case SCSI_PRINTER :
+ case SCSI_CPU :
+ case SCSI_WORM :
+ case SCSI_SCANNER :
+ case SCSI_MEMORY :
+ case SCSI_J_BOX :
+ case SCSI_COMM :
+/* case SCSI_PREPRESS1 : reserved, really
+ case SCSI_PREPRESS2 : */
+ tgt->dev_ops = &scsi_devsw[ptype];
+ break;
+ default:
+ printf("scsi%d: %s %d (x%x). ", ui->ctlr,
+ "Unsupported device type at SCSI id", ui->slave,
+ inq->periph_type);
+ scsi_print_inquiry((scsi2_inquiry_data_t*)inq,
+ SCSI_INQ_STD_DATA, 0);
+ tgt->flags = 0;
+ splx(s);
+ return 0;
+ }
+
+ if (inq->rmb)
+ tgt->flags |= TGT_REMOVABLE_MEDIA;
+
+ /*
+ * Tell the user we know this target, then see if we
+ * can be a bit smart about it.
+ */
+ scsi_print_inquiry((scsi2_inquiry_data_t*)inq,
+ SCSI_INQ_STD_DATA, tgt->tgt_name);
+ if (scsi_debug)
+ scsi_print_inquiry((scsi2_inquiry_data_t*)inq,
+ SCSI_INQ_STD_DATA, 0);
+
+ /*
+ * The above says if it currently behaves as a scsi2,
+ * however scsi1 might just be the default setting.
+ * The spec say that even if in scsi1 mode the target
+ * should answer to the full scsi2 inquiry spec.
+ */
+ scsi_std = (inq->ansi == 2 || inq->response_fmt == 2) ? 2 : 1;
+#if nosey
+ if (scsi_std == 2) {
+ unsigned char supp_pages[256], i;
+ scsi2_impl_opdef_page_t *impl;
+
+ scsi_inquiry(tgt, SCSI_INQ_SUPP_PAGES);
+ impl = (scsi2_impl_opdef_page_t *)inq;
+ npages = impl->page_len - 2;
+ bcopy(impl->supp_opdef, supp_pages, npages);
+
+ for (i = 0; i < npages; i++) {
+ scsi_inquiry(tgt, supp_pages[i]);
+ scsi_print_inquiry(inq, supp_pages[i], 0);
+ }
+ }
+
+ if (scsi_std == 2) {
+ scsi2_impl_opdef_page_t *impl;
+ int i;
+
+ scsi_inquiry(tgt, SCSI_INQ_IMPL_OPDEF);
+ impl = (scsi2_impl_opdef_page_t *)inq;
+ for (i = 0; i < impl->page_len - 2; i++)
+ if (impl->supp_opdef[i] == SCSI2_OPDEF) {
+ scsi_change_definition(tgt, SCSI2_OPDEF);
+ /* if success .. */
+ tgt->flags |= TGT_SCSI_2_MODE;
+ break;
+ }
+ }
+#endif nosey
+
+ splx(s);
+out:
+ return (strcmp(ui->name, (*tgt->dev_ops->driver_name)(TRUE)) == 0);
+}
+
+#ifdef SCSI2
+scsi_verify_state(...)
+{
+verify_state: send test_unit_ready up to 3 times, each time it fails
+(with check condition) send a requeste_sense. It is ok to get UNIT ATTENTION
+the first time only, NOT READY the second, only GOOD the last time.
+If you get BUSY or RESERVATION CONFLICT retry.
+}
+
+scsi_initialize(...)
+{
+
+initialize: send start_unit with immed=0 (->disconnect), if fails
+with check condition send requeste_sense and if "illegal request"
+proceed anyways. Retry on BUSY.
+Do a verify_state, then
+disks:
+ - mode_sense (current) if ANSI2 or needed by vendor (!!!!)
+ and if check-condition&illegal-request goto capacity
+ - mode_sense (changeable)
+ - if needed do a mode_select (yes, 512)
+ - read_capacity
+tapes:
+
+}
+#endif SCSI2
+
+/*
+ * Attach routine:
+ * Fill in all the relevant per-slave data and make
+ * the slave operational.
+ *
+ * Implementation:
+ * Get target's status, start the unit and then
+ * switch off to device-specific functions to gather
+ * as much info as possible about the slave.
+ */
+void scsi_attach(ui)
+ register struct bus_device *ui;
+{
+ scsi_softc_t *sc = scsi_softc[ui->mi->unit];
+ target_info_t *tgt = sc->target[(unsigned char)ui->slave];
+ int i;
+ spl_t s;
+
+ printf(" (%s %s) ", (*tgt->dev_ops->driver_name)(FALSE),tgt->tgt_name);
+
+ if (tgt->flags & TGT_US) {
+ printf(" [this cpu]");
+ return;
+ }
+
+ s = spl0();
+
+ /* sense return from inquiry */
+ scsi_request_sense(tgt, 0, 0);
+
+ /*
+ * Do this twice, certain targets need it
+ */
+ if (tgt->dev_ops != &scsi_devsw[SCSI_CPU]) {
+ (void) scsi_start_unit(tgt, SCSI_CMD_SS_START, 0);
+ i = 0;
+ while (scsi_start_unit(tgt, SCSI_CMD_SS_START, 0) == SCSI_RET_RETRY) {
+ if (i++ == 5)
+ printf(".. not yet online ..");
+ delay(1000000);
+ if (i == 60) {
+ printf(" seems hopeless.");
+ break;
+ }
+ }
+ }
+
+ /*
+ * See if it is up and about
+ */
+ scsi_test_unit_ready(tgt, 0);
+
+ if (tgt->dev_ops->optimize != SCSI_OPTIMIZE_NULL)
+ (*tgt->dev_ops->optimize)(tgt);
+
+ tgt->flags |= TGT_FULLY_PROBED;
+
+ splx(s);
+}
+
+/*
+ * Probe routine:
+ * See if a device answers. Used AFTER autoconf.
+ *
+ * Implementation:
+ * First ask the HBA to see if anyone is there at all, then
+ * call the scsi_slave and scsi_attach routines with a fake ui.
+ */
+boolean_t
+scsi_probe( sc, tgt_ptr, target_id, ior)
+ scsi_softc_t *sc;
+ target_info_t **tgt_ptr;
+ int target_id;
+ io_req_t ior;
+{
+ struct bus_device ui;
+ target_info_t *tgt;
+
+ if (!sc->probe || target_id > 7 || target_id == sc->initiator_id)
+ return FALSE; /* sanity */
+
+ if (sc->target[target_id] == 0)
+ scsi_slave_alloc( sc->masterno, target_id, sc->hw_state);
+ tgt = sc->target[target_id];
+ tgt->flags = 0;/* we donno yet */
+ tgt->dev_ops = 0;
+
+ /* mildly enquire */
+ if (!(sc->probe)(tgt, ior))
+ goto fail;
+
+ /* There is something there, see what it is */
+ bzero(&ui, sizeof(ui));
+ ui.ctlr = sc->masterno;
+ ui.unit =
+ ui.slave = target_id;
+ ui.name = "";
+
+ /* this fails on the name for sure */
+ (void) scsi_slave( &ui, 0 /* brrrr */);
+ if ((tgt->flags & TGT_ALIVE) == 0)
+ goto fail;
+
+ {
+ struct bus_ctlr mi;
+
+ mi.unit = sc->masterno;
+ ui.mi = &mi;
+ printf("%s at slave %d ",
+ (*tgt->dev_ops->driver_name)(TRUE), target_id);
+ scsi_attach(&ui);
+ }
+
+ *tgt_ptr = tgt;
+ return TRUE;
+fail:
+ tgt->flags = 0;
+ return FALSE;
+}
+
+
+/*
+ * Watchdog routine:
+ * Issue a SCSI bus reset if a target holds up the
+ * bus for too long.
+ *
+ * Implementation:
+ * Each HBA that wants to use this should have a
+ * watchdog_t structure at the head of its hardware
+ * descriptor. This variable is set by this periodic
+ * routine and reset on bus activity. If it is not reset on
+ * time (say some ten seconds or so) we reset the
+ * SCSI bus.
+ * NOTE:
+ * An HBA must be ready to accept bus reset interrupts
+ * properly in order to use this.
+ */
+void scsi_watchdog(hw)
+ watchdog_t *hw;
+{
+ spl_t s = splbio();
+
+ switch (hw->watchdog_state) {
+ case SCSI_WD_EXPIRED:
+
+ /* double check first */
+ if (hw->nactive == 0) {
+ hw->watchdog_state = SCSI_WD_INACTIVE;
+ break;
+ }
+ if (scsi_debug)
+ printf("SCSI Watchdog expired\n");
+ hw->watchdog_state = SCSI_WD_INACTIVE;
+ (*hw->reset)(hw);
+ break;
+
+ case SCSI_WD_ACTIVE:
+
+ hw->watchdog_state = SCSI_WD_EXPIRED;
+ break;
+
+ case SCSI_WD_INACTIVE:
+
+ break;
+ }
+
+ /* do this here, fends against powered down devices */
+ if (scsi_watchdog_period != 0)
+ timeout((int(*)())scsi_watchdog, (char*)hw, scsi_watchdog_period * hz);
+
+ splx(s);
+}
+
+
+/*
+ * BusReset Notification:
+ * Called when the HBA sees a BusReset interrupt
+ *
+ * Implementation:
+ * Go through the list of targets, redo the synch
+ * negotiation, and restart whatever operation was
+ * in progress for that target.
+ */
+void scsi_bus_was_reset(sc)
+ scsi_softc_t *sc;
+{
+ register target_info_t *tgt;
+ int i;
+ /*
+ * Redo the synch negotiation
+ */
+ for (i = 0; i < 8; i++) {
+ io_req_t ior;
+ spl_t s;
+
+ if (i == sc->initiator_id)
+ continue;
+ tgt = sc->target[i];
+ if (!tgt || !(tgt->flags & TGT_ALIVE))
+ continue;
+
+ tgt->flags &= ~(TGT_DID_SYNCH|TGT_DISCONNECTED);
+#if 0
+ /* the standard does *not* imply this gets reset too */
+ tgt->sync_period = 0;
+ tgt->sync_offset = 0;
+#endif
+
+ /*
+ * retry the synch negotiation
+ */
+ ior = tgt->ior;
+ tgt->ior = 0;
+ printf(".. tgt %d ", tgt->target_id);
+ if (BGET(scsi_no_synchronous_xfer,(unsigned char)sc->masterno,tgt->target_id))
+ tgt->flags |= TGT_DID_SYNCH;
+ else {
+ s = spl0();
+ scsi_test_unit_ready(tgt, 0);
+ splx(s);
+ }
+ tgt->ior = ior;
+ }
+
+ /*
+ * Notify each target of the accident
+ */
+ for (i = 0; i < 8; i++) {
+ if (i == sc->initiator_id)
+ continue;
+ tgt = sc->target[i];
+ if (!tgt)
+ continue;
+ tgt->done = SCSI_RET_ABORTED|SCSI_RET_RETRY;
+ if (tgt->ior)
+ (*tgt->dev_ops->restart)( tgt, TRUE);
+ }
+
+ printf("%s", " reset complete\n");
+}
+
+#endif NSCSI > 0
diff --git a/scsi/scsi.h b/scsi/scsi.h
new file mode 100644
index 00000000..9aa0bee0
--- /dev/null
+++ b/scsi/scsi.h
@@ -0,0 +1,599 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Definitions of the SCSI-1 Standard
+ */
+
+#ifndef _SCSI_SCSI_H_
+#define _SCSI_SCSI_H_
+
+#include <nscsi.h>
+#include <scsi/scsi_endian.h>
+
+/*
+ * Bus phases
+ */
+
+#define SCSI_IO 0x01 /* Input/Output */
+#define SCSI_CD 0x02 /* Command/Data */
+#define SCSI_MSG 0x04 /* Message */
+
+#define SCSI_PHASE_MASK 0x07
+#define SCSI_PHASE(x) ((x)&SCSI_PHASE_MASK)
+
+#define SCSI_PHASE_DATAO 0x00 /* 0 */
+#define SCSI_PHASE_DATAI SCSI_IO /* 1 */
+#define SCSI_PHASE_CMD SCSI_CD /* 2 */
+#define SCSI_PHASE_STATUS (SCSI_CD|SCSI_IO) /* 3 */
+ /* 4..5 ANSI reserved */
+#define SCSI_PHASE_MSG_OUT (SCSI_MSG|SCSI_CD) /* 6 */
+#define SCSI_PHASE_MSG_IN (SCSI_MSG|SCSI_CD|SCSI_IO) /* 7 */
+
+/*
+ * Single byte messages
+ *
+ * originator: I-nitiator T-arget
+ * T-support: M-andatory O-ptional
+ */
+
+#define SCSI_COMMAND_COMPLETE 0x00 /* M T */
+#define SCSI_EXTENDED_MESSAGE 0x01 /* IT */
+#define SCSI_SAVE_DATA_POINTER 0x02 /* O T */
+#define SCSI_RESTORE_POINTERS 0x03 /* O T */
+#define SCSI_DISCONNECT 0x04 /* O T */
+#define SCSI_I_DETECTED_ERROR 0x05 /* M I */
+#define SCSI_ABORT 0x06 /* M I */
+#define SCSI_MESSAGE_REJECT 0x07 /* M IT */
+#define SCSI_NOP 0x08 /* M I */
+#define SCSI_MSG_PARITY_ERROR 0x09 /* M I */
+#define SCSI_LNKD_CMD_COMPLETE 0x0a /* O T */
+#define SCSI_LNKD_CMD_COMPLETE_F 0x0b /* O T */
+#define SCSI_BUS_DEVICE_RESET 0x0c /* M I */
+ /* 0x0d..0x11 scsi2 */
+ /* 0x12..0x1f reserved */
+#define SCSI_IDENTIFY 0x80 /* IT */
+# define SCSI_IFY_ENABLE_DISCONNECT 0x40 /* I */
+# define SCSI_IFY_LUNTAR 0x20 /* IT */
+# define SCSI_IFY_LUN_MASK 0x07 /* IT */
+
+
+/* Message codes 0x30..0x7f are reserved */
+
+/*
+ * Extended messages, codes and formats
+ */
+
+#define SCSI_MODIFY_DATA_PTR 0x00 /* T */
+typedef struct {
+ unsigned char xtn_msg_tag; /* const 0x01 */
+ unsigned char xtn_msg_len; /* const 0x05 */
+ unsigned char xtn_msg_code; /* const 0x00 */
+ unsigned char xtn_msg_arg_1000; /* MSB, signed 2cmpl */
+ unsigned char xtn_msg_arg_0200;
+ unsigned char xtn_msg_arg_0030;
+ unsigned char xtn_msg_arg_0004; /* LSB */
+} scsi_mod_ptr_t;
+
+#define SCSI_SYNC_XFER_REQUEST 0x01 /* IT */
+typedef struct {
+ unsigned char xtn_msg_tag; /* const 0x01 */
+ unsigned char xtn_msg_len; /* const 0x03 */
+ unsigned char xtn_msg_code; /* const 0x01 */
+ unsigned char xtn_msg_xfer_period; /* times 4nsecs */
+ unsigned char xtn_msg_xfer_offset; /* pending ack window */
+#define SCSI_SYNCH_XFER_OFFANY 0xff /* T unlimited */
+} scsi_synch_xfer_req_t;
+
+#define SCSI_XTN_IDENTIFY 0x02 /* IT -2 */
+typedef struct {
+ unsigned char xtn_msg_tag; /* const 0x01 */
+ unsigned char xtn_msg_len; /* const 0x02 */
+ unsigned char xtn_msg_code; /* const 0x02 */
+ unsigned char xtn_msg_sublun;
+} scsi_xtn_identify_t;
+
+ /* 0x03..0x7f reserved */
+
+#define SCSI_XTN_VENDOR_UQE 0x80 /* vendor unique bit */
+typedef struct {
+ unsigned char xtn_msg_tag; /* const 0x01 */
+ unsigned char xtn_msg_len; /* args' len+1 (0-->256)*/
+ unsigned char xtn_msg_code; /* const 0x80..0xff */
+ unsigned char xtn_msg_args[1]; /* 0..255 bytes */
+} scsi_xtn_vedor_unique_t;
+
+
+/*
+ * Commands, generic structures
+ */
+
+/* SIX byte commands */
+typedef struct {
+ unsigned char scsi_cmd_code; /* group(7..5) and command(4..1) */
+#define SCSI_CODE_GROUP 0xe0
+#define SCSI_CODE_CMD 0x1f
+ unsigned char scsi_cmd_lun_and_lba1; /* lun(7..5) and block# msb[20..16] */
+#define SCSI_LUN_MASK 0xe0
+#define SCSI_LBA_MASK 0x1f
+#define SCSI_LUN_SHIFT 5
+ unsigned char scsi_cmd_lba2; /* block#[15.. 8] */
+ unsigned char scsi_cmd_lba3; /* block#[ 7.. 0] */
+ unsigned char scsi_cmd_xfer_len; /* if required */
+ unsigned char scsi_cmd_ctrl_byte; /* contains: */
+#define SCSI_CTRL_VUQ 0xc0 /* vendor unique bits */
+#define SCSI_CTRL_RESVD 0x3c /* reserved, mbz */
+#define SCSI_CTRL_FLAG 0x02 /* send a complete_with_flag at end */
+#define SCSI_CTRL_LINK 0x01 /* link this command with next */
+} scsi_command_group_0;
+
+/* TEN byte commands */
+typedef struct {
+ unsigned char scsi_cmd_code; /* group(7..5) and command(4..1) */
+ unsigned char scsi_cmd_lun_and_relbit;/* lun(7..5) and RelAdr(0) */
+#define SCSI_RELADR 0x01
+ unsigned char scsi_cmd_lba1; /* block#[31..24] */
+ unsigned char scsi_cmd_lba2; /* block#[23..16] */
+ unsigned char scsi_cmd_lba3; /* block#[15.. 8] */
+ unsigned char scsi_cmd_lba4; /* block#[ 7.. 0] */
+ unsigned char scsi_cmd_xxx; /* reserved, mbz */
+ unsigned char scsi_cmd_xfer_len_1; /* if required */
+ unsigned char scsi_cmd_xfer_len_2; /* if required */
+ unsigned char scsi_cmd_ctrl_byte; /* see above */
+} scsi_command_group_1,
+ scsi_command_group_2;
+
+/* TWELVE byte commands */
+typedef struct {
+ unsigned char scsi_cmd_code; /* group(7..5) and command(4..1) */
+ unsigned char scsi_cmd_lun_and_relbit;/* lun(7..5) and RelAdr(0) */
+ unsigned char scsi_cmd_lba1; /* block#[31..24] */
+ unsigned char scsi_cmd_lba2; /* block#[23..16] */
+ unsigned char scsi_cmd_lba3; /* block#[15.. 8] */
+ unsigned char scsi_cmd_lba4; /* block#[ 7.. 0] */
+ unsigned char scsi_cmd_xfer_len_1; /* if required */
+ unsigned char scsi_cmd_xfer_len_2; /* if required */
+ unsigned char scsi_cmd_xfer_len_3; /* if required */
+ unsigned char scsi_cmd_xfer_len_4; /* if required */
+ unsigned char scsi_cmd_xxx1; /* reserved, mbz */
+ unsigned char scsi_cmd_ctrl_byte; /* see above */
+} scsi_command_group_5;
+
+
+/*
+ * Commands, codes and aliases
+ */
+
+ /* GROUP 0 */
+#define SCSI_CMD_TEST_UNIT_READY 0x00 /* O all 2M all */
+#define scsi_cmd_test_unit_ready_t scsi_command_group_0
+
+#define SCSI_CMD_REZERO_UNIT 0x01 /* O disk worm rom */
+#define SCSI_CMD_REWIND 0x01 /* M tape */
+#define scsi_cmd_rewind_t scsi_command_group_0
+#define scsi_cmd_rezero_t scsi_command_group_0
+# define SCSI_CMD_REW_IMMED 0x01
+
+ /* 0x02 vendor unique */
+
+#define SCSI_CMD_REQUEST_SENSE 0x03 /* M all */
+#define scsi_cmd_request_sense_t scsi_command_group_0
+# define scsi_cmd_allocation_length scsi_cmd_xfer_len
+
+#define SCSI_CMD_FORMAT_UNIT 0x04 /* M disk O prin */
+#define scsi_cmd_format_t scsi_command_group_0
+# define SCSI_CMD_FMT_FMTDATA 0x10
+# define SCSI_CMD_FMT_CMPLIST 0x08
+# define SCSI_CMD_FMT_LIST_TYPE 0x07
+# define scsi_cmd_intleave1 scsi_cmd_lba3
+# define scsi_cmd_intleave2 scsi_cmd_xfer_len
+
+#define SCSI_CMD_READ_BLOCK_LIMITS 0x05 /* E tape */
+#define scsi_cmd_block_limits_t scsi_command_group_0
+
+ /* 0x06 vendor unique */
+
+#define SCSI_CMD_REASSIGN_BLOCKS 0x07 /* O disk worm */
+#define scsi_cmd_reassign_blocks_t scsi_command_group_0
+
+#define SCSI_CMD_READ 0x08 /* M disk tape O worm rom */
+#define SCSI_CMD_RECEIVE 0x08 /* O proc */
+#define SCSI_CMD_GET_MESSAGE 0x08 /* M comm */
+#define scsi_cmd_read_t scsi_command_group_0
+# define SCSI_CMD_TP_FIXED 0x01 /* tape */
+# define scsi_cmd_tp_len1 scsi_cmd_lba2
+# define scsi_cmd_tp_len2 scsi_cmd_lba3
+# define scsi_cmd_tp_len3 scsi_cmd_xfer_len
+ /* largest addressable blockno */
+#define SCSI_CMD_READ_MAX_LBA ((1 << 21) - 1)
+
+ /* 0x09 vendor unique */
+
+#define SCSI_CMD_WRITE 0x0a /* M disk tape O worm */
+#define SCSI_CMD_PRINT 0x0a /* M prin */
+#define SCSI_CMD_SEND 0x0a /* M proc */
+#define SCSI_CMD_SEND_MESSAGE 0x0a /* M comm */
+#define scsi_cmd_write_t scsi_command_group_0
+
+#define SCSI_CMD_SEEK 0x0b /* O disk worm rom */
+#define SCSI_CMD_TRACK_SELECT 0x0b /* O tape */
+#define SCSI_CMD_SLEW_AND_PRINT 0x0b /* O prin */
+#define scsi_cmd_seek_t scsi_command_group_0
+# define SCSI_CMD_SLW_CHANNEL 0x01
+# define scsi_cmd_tp_trackno scsi_cmd_xfer_len
+# define scsi_cmd_slew_value scsi_cmd_lba2
+
+ /* 0x0c..0x0e vendor unique */
+
+#define SCSI_CMD_READ_REVERSE 0x0f /* O tape */
+#define scsi_cmd_rev_read_t scsi_command_group_0
+
+#define SCSI_CMD_WRITE_FILEMARKS 0x10 /* M tape */
+#define SCSI_CMD_FLUSH_BUFFER 0x10 /* M prin */
+#define scsi_cmd_write_fil_t scsi_command_group_0
+
+#define SCSI_CMD_SPACE 0x11 /* O tape */
+#define scsi_cmd_space_t scsi_command_group_0
+# define SCSI_CMD_SP_BLOCKS 0x00
+# define SCSI_CMD_SP_FIL 0x01
+# define SCSI_CMD_SP_SEQ_FIL 0x02
+# define SCSI_CMD_SP_EOT 0x03
+
+#define SCSI_CMD_INQUIRY 0x12 /* E all (2M all) */
+#define scsi_cmd_inquiry_t scsi_command_group_0
+# define SCSI_CMD_INQ_EVPD 0x01 /* 2 */
+# define scsi_cmd_page_code scsi_cmd_lba2 /* 2 */
+
+#define SCSI_CMD_VERIFY_0 0x13 /* O tape */
+#define scsi_cmd_verify_t scsi_command_group_0
+# define SCSI_CMD_VFY_BYTCMP 0x02
+
+#define SCSI_CMD_RECOVER_BUFFERED_DATA 0x14 /* O tape prin */
+#define scsi_cmd_recover_buffer_t scsi_command_group_0
+
+#define SCSI_CMD_MODE_SELECT 0x15 /* O disk tape prin worm rom */
+# define SCSI_CMD_MSL_PF 0x10
+# define SCSI_CMD_MSL_SP 0x01
+#define scsi_cmd_mode_select_t scsi_command_group_0
+
+#define SCSI_CMD_RESERVE 0x16 /* O disk tape prin worm rom */
+#define scsi_cmd_reserve_t scsi_command_group_0
+# define SCSI_CMD_RES_3RDPTY 0x10
+# define SCSI_CMD_RES_3RDPTY_DEV 0x0e
+# define SCSI_CMD_RES_EXTENT 0x01
+# define scsi_cmd_reserve_id scsi_cmd_lba2
+# define scsi_cmd_extent_llen1 scsi_cmd_lba3
+# define scsi_cmd_extent_llen2 scsi_cmd_xfer_len
+
+#define SCSI_CMD_RELEASE 0x17 /* O disk tape prin worm rom */
+#define scsi_cmd_release_t scsi_command_group_0
+
+#define SCSI_CMD_COPY 0x18 /* O all */
+#define scsi_cmd_copy_t scsi_command_group_0
+# define SCSI_CMD_CPY_PAD 0x01 /* 2 */
+# define scsi_cmd_paraml_len0 scsi_cmd_lba2
+# define scsi_cmd_paraml_len1 scsi_cmd_lba3
+# define scsi_cmd_paraml_len2 scsi_cmd_xfer_len
+
+#define SCSI_CMD_ERASE 0x19 /* O tape */
+#define scsi_cmd_erase_t scsi_command_group_0
+# define SCSI_CMD_ER_LONG 0x01
+
+#define SCSI_CMD_MODE_SENSE 0x1a /* O disk tape prin worm rom */
+#define scsi_cmd_mode_sense_t scsi_command_group_0
+# define scsi_cmd_ms_pagecode scsi_cmd_lba2
+
+#define SCSI_CMD_START_STOP_UNIT 0x1b /* O disk prin worm rom */
+#define SCSI_CMD_LOAD_UNLOAD 0x1b /* O tape */
+#define scsi_cmd_start_t scsi_command_group_0
+# define SCSI_CMD_SS_IMMED 0x01
+# define scsi_cmd_ss_flags scsi_cmd_xfer_len
+# define SCSI_CMD_SS_START 0x01
+# define SCSI_CMD_SS_RETEN 0x02
+# define SCSI_CMD_SS_RETAIN 0x01
+# define SCSI_CMD_SS_EJECT 0x02
+
+#define SCSI_CMD_RECEIVE_DIAG_RESULTS 0x1c /* O all */
+#define scsi_cmd_receive_diag_t scsi_command_group_0
+# define scsi_cmd_allocation_length1 scsi_cmd_lba3
+# define scsi_cmd_allocation_length2 scsi_cmd_xfer_len
+
+#define SCSI_CMD_SEND_DIAGNOSTICS 0x1d /* O all */
+#define scsi_cmd_send_diag_t scsi_command_group_0
+# define SCSI_CMD_DIAG_SELFTEST 0x04
+# define SCSI_CMD_DIAG_DEVOFFL 0x02
+# define SCSI_CMD_DIAG_UNITOFFL 0x01
+
+#define SCSI_CMD_PREVENT_ALLOW_REMOVAL 0x1e /* O disk tape worm rom */
+#define scsi_cmd_medium_removal_t scsi_command_group_0
+# define scsi_cmd_pa_prevent scsi_cmd_xfer_len /* 0x1 */
+
+ /* 0x1f reserved */
+
+ /* GROUP 1 */
+ /* 0x20..0x24 vendor unique */
+
+#define SCSI_CMD_READ_CAPACITY 0x25 /* E disk worm rom */
+#define scsi_cmd_read_capacity_t scsi_command_group_1
+# define scsi_cmd_rcap_flags scsi_cmd_xfer_len_2
+# define SCSI_CMD_RCAP_PMI 0x01
+
+ /* 0x26..0x27 vendor unique */
+
+#define SCSI_CMD_LONG_READ 0x28 /* E disk M worm rom */
+#define scsi_cmd_long_read_t scsi_command_group_1
+
+ /* 0x29 vendor unique */
+
+#define SCSI_CMD_LONG_WRITE 0x2a /* E disk M worm */
+#define scsi_cmd_long_write_t scsi_command_group_1
+
+#define SCSI_CMD_LONG_SEEK 0x2b /* O disk worm rom */
+#define scsi_cmd_long_seek_t scsi_command_group_1
+
+ /* 0x2c..0x2d vendor unique */
+
+#define SCSI_CMD_WRITE_AND_VERIFY 0x2e /* O disk worm */
+#define scsi_cmd_write_vfy_t scsi_command_group_1
+# define SCSI_CMD_VFY_BYTCHK 0x02
+
+#define SCSI_CMD_VERIFY_1 0x2f /* O disk worm rom */
+#define scsi_cmd_verify_long_t scsi_command_group_1
+# define SCSI_CMD_VFY_BLKVFY 0x04
+
+#define SCSI_CMD_SEARCH_HIGH 0x30 /* O disk worm rom */
+#define scsi_cmd_search_t scsi_command_group_1
+# define SCSI_CMD_SRCH_INVERT 0x10
+# define SCSI_CMD_SRCH_SPNDAT 0x02
+
+#define SCSI_CMD_SEARCH_EQUAL 0x31 /* O disk worm rom */
+#define SCSI_CMD_SEARCH_LOW 0x32 /* O disk worm rom */
+
+#define SCSI_CMD_SET_LIMITS 0x33 /* O disk worm rom */
+#define scsi_cmd_set_limits_t scsi_command_group_1
+# define SCSI_CMD_SL_RDINH 0x02
+# define SCSI_CMD_SL_WRINH 0x01
+
+ /* 0x34..0x38 reserved */
+
+#define SCSI_CMD_COMPARE 0x39 /* O all */
+#define scsi_cmd_compare_t scsi_command_group_1
+# define scsi_cmd_1_paraml1 scsi_cmd_lba2
+# define scsi_cmd_1_paraml2 scsi_cmd_lba3
+# define scsi_cmd_1_paraml3 scsi_cmd_lba4
+
+#define SCSI_CMD_COPY_AND_VERIFY 0x3a /* O all */
+#define scsi_cmd_copy_vfy_t scsi_command_group_1
+# define SCSI_CMD_CPY_BYTCHK 0x02
+
+ /* 0x3b..0x3f reserved */
+
+ /* GROUP 2 */
+ /* 0x40..0x5f reserved */
+
+ /* GROUP 3 */
+ /* 0x60..0x7f reserved */
+
+ /* GROUP 4 */
+ /* 0x80..0x9f reserved */
+
+ /* GROUP 5 */
+ /* 0xa0..0xaf vendor unique */
+ /* 0xb0..0xbf reserved */
+
+ /* GROUP 6 */
+ /* 0xc0..0xdf vendor unique */
+
+ /* GROUP 7 */
+ /* 0xe0..0xff vendor unique */
+
+
+/*
+ * Command-specific results and definitions
+ */
+
+/* inquiry data */
+typedef struct {
+ unsigned char periph_type;
+#define SCSI_DISK 0x00
+#define SCSI_TAPE 0x01
+#define SCSI_PRINTER 0x02
+#define SCSI_CPU 0x03
+#define SCSI_WORM 0x04
+#define SCSI_CDROM 0x05
+
+ BITFIELD_2( unsigned char,
+ device_type : 7,
+ rmb : 1);
+ BITFIELD_3( unsigned char,
+ ansi : 3,
+ ecma : 3,
+ iso : 2);
+ unsigned char reserved;
+ unsigned char length;
+ unsigned char param[1];
+} scsi_inquiry_data_t;
+
+#define SCSI_INQ_STD_DATA -1
+
+/* request sense data */
+#define SCSI_SNS_NOSENSE 0x0
+#define SCSI_SNS_RECOVERED 0x1
+#define SCSI_SNS_NOTREADY 0x2
+#define SCSI_SNS_MEDIUM_ERR 0x3
+#define SCSI_SNS_HW_ERR 0x4
+#define SCSI_SNS_ILLEGAL_REQ 0x5
+#define SCSI_SNS_UNIT_ATN 0x6
+#define SCSI_SNS_PROTECT 0x7
+#define SCSI_SNS_BLANK_CHK 0x8
+#define SCSI_SNS_VUQE 0x9
+#define SCSI_SNS_COPY_ABRT 0xa
+#define SCSI_SNS_ABORTED 0xb
+#define SCSI_SNS_EQUAL 0xc
+#define SCSI_SNS_VOLUME_OVFL 0xd
+#define SCSI_SNS_MISCOMPARE 0xe
+#define SCSI_SNS_RESERVED 0xf
+
+typedef struct {
+ BITFIELD_3( unsigned char,
+ error_code : 4,
+ error_class : 3,
+ addr_valid : 1);
+# define SCSI_SNS_XTENDED_SENSE_DATA 0x7 /* e.g. error_class=7 */
+ union {
+ struct {
+ BITFIELD_2(unsigned char,
+ lba_msb : 5,
+ vuqe : 3);
+ unsigned char lba;
+ unsigned char lba_lsb;
+ } non_xtnded;
+ struct {
+ unsigned char segment_number;
+ BITFIELD_5(unsigned char,
+ sense_key : 4,
+ res : 1,
+ ili : 1,
+ eom : 1,
+ fm : 1);
+ unsigned char info0;
+ unsigned char info1;
+ unsigned char info2;
+ unsigned char info3;
+ unsigned char add_len;
+ unsigned char add_bytes[1];/* VARSIZE */
+ } xtended;
+ } u;
+} scsi_sense_data_t;
+
+
+/* mode select params */
+typedef struct {
+ unsigned char reserved1;
+ unsigned char medium_type;
+ BITFIELD_3(unsigned char,
+ speed:4, /* special for tapes, reserved in SCSI-1 */
+ buffer_mode:3,
+ reserved2:1);
+ unsigned char desc_len;
+ struct scsi_mode_parm_blockdesc {
+ unsigned char density_code;
+ unsigned char nblocks1;
+ unsigned char nblocks2;
+ unsigned char nblocks3;
+ unsigned char reserved;
+ unsigned char reclen1;
+ unsigned char reclen2;
+ unsigned char reclen3;
+ } descs[1]; /* VARSIZE, really */
+} scsi_mode_select_param_t;
+
+/* mode sense data (TAPE) */
+typedef struct {
+ unsigned char data_len;
+ unsigned char medium_type;
+ BITFIELD_3(unsigned char,
+ speed : 4,
+ buffered_mode : 3,
+ wp : 1);
+ unsigned char bdesc_len;
+ struct {
+ unsigned char density_code;
+ unsigned char no_blks_msb;
+ unsigned char no_blks;
+ unsigned char no_blks_lsb;
+ unsigned char reserved;
+ unsigned char blen_msb;
+ unsigned char blen;
+ unsigned char blen_lsb;
+ } bdesc[1]; /* VARSIZE */
+ /* vuqe data might follow */
+} scsi_mode_sense_data_t;
+
+/* read capacity data */
+typedef struct {
+ unsigned char lba1;
+ unsigned char lba2;
+ unsigned char lba3;
+ unsigned char lba4;
+ unsigned char blen1;
+ unsigned char blen2;
+ unsigned char blen3;
+ unsigned char blen4;
+} scsi_rcap_data_t;
+
+/* defect list(s) */
+typedef struct {
+ unsigned char res1;
+ unsigned char res2;
+ unsigned char list_len_msb;
+ unsigned char list_len_lsb;
+ struct {
+ unsigned char blockno_msb;
+ unsigned char blockno_sb1;
+ unsigned char blockno_sb2;
+ unsigned char blockno_lsb;
+ } defects[1]; /* VARSIZE */
+} scsi_Ldefect_data_t;
+
+/* block limits (tape) */
+typedef struct {
+ unsigned char res1;
+ unsigned char maxlen_msb;
+ unsigned char maxlen_sb;
+ unsigned char maxlen_lsb;
+ unsigned char minlen_msb;
+ unsigned char minlen_lsb;
+} scsi_blimits_data_t;
+
+/*
+ * Status byte (a-la scsi1)
+ */
+
+typedef union {
+ struct {
+ BITFIELD_4( unsigned char,
+ scsi_status_vendor_uqe1:1,
+ scsi_status_code:4,
+ scsi_status_vendor_uqe:2,
+ scsi_status_reserved:1);
+# define SCSI_ST_GOOD 0x00 /* scsi_status_code values */
+# define SCSI_ST_CHECK_CONDITION 0x01
+# define SCSI_ST_CONDITION_MET 0x02
+# define SCSI_ST_BUSY 0x04
+# define SCSI_ST_INT_GOOD 0x08
+# define SCSI_ST_INT_MET 0x0a
+# define SCSI_ST_RES_CONFLICT 0x0c
+ /* anything else is reserved */
+ } st;
+ unsigned char bits;
+} scsi_status_byte_t;
+
+
+#endif _SCSI_SCSI_H_
diff --git a/scsi/scsi2.h b/scsi/scsi2.h
new file mode 100644
index 00000000..93a5a766
--- /dev/null
+++ b/scsi/scsi2.h
@@ -0,0 +1,447 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi2.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Additions and changes of the SCSI-II standard viz SCSI-I
+ */
+
+#ifndef _SCSI_SCSI2_H_
+#define _SCSI_SCSI2_H_
+
+#include <scsi/scsi_endian.h>
+
+/*
+ * Single byte messages
+ *
+ * originator: I-nitiator T-arget
+ * T-support: M-andatory O-ptional
+ */
+
+#define SCSI_ABORT_TAG 0x0d /* O I 2 */
+#define SCSI_CLEAR_QUEUE 0x0e /* O I 2 */
+#define SCSI_INITIATE_RECOVERY 0x0f /* O IT2 */
+#define SCSI_RELEASE_RECOVERY 0x10 /* O I 2 */
+#define SCSI_TERMINATE_IO_PROCESS 0x11 /* O I 2 */
+
+/*
+ * Two byte messages
+ */
+#define SCSI_SIMPLE_QUEUE_TAG 0x20 /* O IT2 */
+#define SCSI_HEADOF_QUEUE_TAG 0x21 /* O I 2 */
+#define SCSI_ORDERED_QUEUE_TAG 0x22 /* O I 2 */
+#define SCSI_IGNORE_WIDE_RESIDUE 0x23 /* O T2 */
+ /* 0x24..0x2f reserved */
+
+/*
+ * Extended messages, codes and formats
+ */
+
+#define SCSI_WIDE_XFER_REQUEST 0x03 /* IT 2 */
+typedef struct {
+ unsigned char xtn_msg_tag; /* const 0x01 */
+ unsigned char xtn_msg_len; /* const 0x02 */
+ unsigned char xtn_msg_code; /* const 0x03 */
+ unsigned char xtn_msg_xfer_width;
+} scsi_wide_xfer_t;
+
+/*
+ * NOTE: some command-specific mods and extensions
+ * are actually defined in the scsi.h file for
+ * readability reasons
+ */
+
+ /* GROUP 1 */
+
+#define SCSI_CMD_READ_DEFECT_DATA 0x37 /* O2 disk opti */
+typedef scsi_command_group_1 scsi_cmd_read_defect_t;
+# define SCSI_CMD_RDD_LIST_TYPE 0x07
+# define SCSI_CMD_RDD_GLIST 0x08
+# define SCSI_CMD_RDD_PLIST 0x10
+
+#define SCSI_CMD_WRITE_BUFFER 0x3b /* O2 all */
+typedef scsi_command_group_1 scsi_cmd_write_buffer_t;
+# define SCSI_CMD_BUF_MODE 0x07
+# define scsi_cmd_buf_id scs_cmd_lba1
+# define scsi_cmd_buf_offset1 scs_cmd_lba2
+# define scsi_cmd_buf_offset2 scs_cmd_lba3
+# define scsi_cmd_buf_offset3 scs_cmd_lba4
+# define scsi_cmd_buf_alloc1 scs_cmd_xxx
+# define scsi_cmd_buf_alloc2 scs_cmd_xfer_len_1
+# define scsi_cmd_buf_alloc3 scs_cmd_xfer_len_2
+
+#define SCSI_CMD_READ_BUFFER 0x3c /* O2 all */
+#define scsi_cmd_read_buffer_t scsi_command_group_1
+
+ /* GROUP 2 */
+
+#define SCSI_CMD_CHANGE_DEFINITION 0x40 /* O2 all */
+#define scsi_cmd_change_def_t scsi_command_group_2
+# define scsi_cmd_chg_save scsi_cmd_lba1
+# define scsi_cmd_chg_definition scsi_cmd_lba2
+# define SCSI_CMD_CHG_CURRENT 0x00
+# define SCSI_CMD_CHG_SCSI_1 0x01
+# define SCSI_CMD_CHG_CCS 0x02
+# define SCSI_CMD_CHG_SCSI_2 0x03
+
+ /* 0x41 reserved */
+
+#define SCSI_CMD_READ_SUBCH 0x42 /* O2 rom */
+#define scsi_cmd_read_subch_t scsi_command_group_2
+# define SCSI_CMD_CD_MSF 0x02
+# define SCSI_CMD_RS_SUBQ 0x40
+# define scsi_cmd_rs_format scsi_cmd_lba2
+# define SCSI_CMD_RS_FMT_SUBQ 0x00
+# define SCSI_CMD_RS_FMT_CURPOS 0x01
+# define SCSI_CMD_RS_FMT_CATALOG 0x02
+# define SCSI_CMD_RS_FMT_ISRC 0x03
+# define scsi_cmd_rs_trackno scsi_cmd_xxx
+
+
+#define SCSI_CMD_READ_TOC 0x43 /* O2 rom */
+#define scsi_cmd_read_toc_t scsi_command_group_2
+# define scsi_cmd_rtoc_startT scsi_cmd_xxx
+
+#define SCSI_CMD_READ_HEADER 0x44 /* O2 rom */
+#define scsi_cmd_read_header_t scsi_command_group_2
+
+#define SCSI_CMD_PLAY_AUDIO 0x45 /* O2 rom */
+#define scsi_cmd_play_audio_t scsi_command_group_2
+
+#define SCSI_CMD_PLAY_AUDIO_MSF 0x47 /* O2 rom */
+#define scsi_cmd_play_audio_msf_t scsi_command_group_2
+# define scsi_cmd_pamsf_startM scsi_cmd_lba2
+# define scsi_cmd_pamsf_startS scsi_cmd_lba3
+# define scsi_cmd_pamsf_startF scsi_cmd_lba4
+# define scsi_cmd_pamsf_endM scsi_cmd_xxx
+# define scsi_cmd_pamsf_endS scsi_cmd_xfer_len_1
+# define scsi_cmd_pamsf_endF scsi_cmd_xfer_len_2
+
+#define SCSI_CMD_PLAY_AUDIO_TI 0x48 /* O2 rom */
+#define scsi_cmd_play_audio_ti_t scsi_command_group_2
+# define scsi_cmd_pati_startT scsi_cmd_lba3
+# define scsi_cmd_pati_startI scsi_cmd_lba4
+# define scsi_cmd_pati_endT scsi_cmd_xfer_len_1
+# define scsi_cmd_pati_endI scsi_cmd_xfer_len_2
+
+#define SCSI_CMD_PLAY_AUDIO_TR 0x49 /* O2 rom */
+#define scsi_cmd_play_audio_tr_t scsi_command_group_2
+# define scsi_cmd_patr_startT scsi_cmd_xxx
+
+
+#define SCSI_CMD_PAUSE_RESUME 0x4b /* O2 rom */
+#define scsi_cmd_pausres_t scsi_command_group_2
+# define SCSI_CMD_PAUSRES_RESUME 0x01
+# define scsi_cmd_pausres_res scsi_cmd_xfer_len_2
+
+#define SCSI_CMD_LOG_SELECT 0x4c /* O2 all */
+#define scsi_cmd_logsel_t scsi_command_group_2
+# define SCSI_CMD_LOG_SP 0x01
+# define SCSI_CMD_LOG_PCR 0x02
+# define scsi_cmd_log_page_control scsi_cmd_lba1
+
+#define SCSI_CMD_LOG_SENSE 0x4d /* O2 all */
+#define scsi_cmd_logsense_t scsi_command_group_2
+# define SCSI_CMD_LOG_PPC 0x02
+# define scsi_cmd_log_page_code scsi_cmd_lba1
+# define scsi_cmd_log_param_ptr1 scsi_cmd_lba4
+# define scsi_cmd_log_param_ptr2 scsi_cmd_xxx
+
+
+ /* 0x4e..0x54 reserved */
+
+#define SCSI_CMD_MODE_SELECT_2 0x55 /* Z2 */
+#define scsi_cmd_mode_select_long_t scsi_command_group_2
+# define SCSI_CMD_MSL2_PF 0x10
+# define SCSI_CMD_MSL2_SP 0x01
+
+ /* 0x56..0x59 reserved */
+
+#define SCSI_CMD_MODE_SENSE_2 0x5a /* Z2 */
+#define scsi_cmd_mode_sense_long_t scsi_command_group_2
+# define SCSI_CMD_MSS2_DBD 0x08
+
+ /* 0x5b..0x5f reserved */
+
+ /* GROUP 5 */
+
+#define SCSI_CMD_PLAY_AUDIO_LONG 0xa5 /* O2 rom */
+#define scsi_cmd_play_audio_l_t scsi_command_group_5
+
+#define SCSI_CMD_PLAY_AUDIO_TR_LONG 0xa9 /* O2 rom */
+#define scsi_cmd_play_audio_tr_l_t scsi_command_group_5
+# define scsi_cmd_patrl_startT scsi_cmd_xxx1
+
+
+/*
+ * Command specific defines
+ */
+typedef struct {
+ BITFIELD_2(unsigned char,
+ periph_type : 5,
+ periph_qual : 3);
+#define SCSI_SCANNER 0x06 /* periph_type values */
+#define SCSI_MEMORY 0x07
+#define SCSI_J_BOX 0x08
+#define SCSI_COMM 0x09
+#define SCSI_PREPRESS1 0x0a
+#define SCSI_PREPRESS2 0x0b
+
+#define SCSI_PERIPH_CONNECTED 0x00 /* periph_qual values */
+#define SCSI_PERIPH_DISCONN 0x20
+#define SCSI_PERIPH_NOTHERE 0x30
+
+ BITFIELD_2(unsigned char,
+ device_type : 7,
+ rmb : 1);
+
+ BITFIELD_3( unsigned char,
+ ansi : 3,
+ ecma : 3,
+ iso : 2);
+
+ BITFIELD_4( unsigned char,
+ response_fmt : 4,
+ res1 : 2,
+ trmIOP : 1,
+ aenc : 1);
+ unsigned char length;
+ unsigned char res2;
+ unsigned char res3;
+
+ BITFIELD_8(unsigned char,
+ SftRe : 1,
+ CmdQue : 1,
+ res4 : 1,
+ Linked : 1,
+ Sync : 1,
+ Wbus16 : 1,
+ Wbus32 : 1,
+ RelAdr : 1);
+
+ unsigned char vendor_id[8];
+ unsigned char product_id[16];
+ unsigned char product_rev[4];
+ unsigned char vendor_uqe[20];
+ unsigned char reserved[40];
+ unsigned char vendor_uqe1[1]; /* varsize */
+} scsi2_inquiry_data_t;
+#define SCSI_INQ_SUPP_PAGES 0x00
+#define SCSI_INQ_A_INFO 0x01 /* 0x01..0x1f, really */
+#define SCSI_INQ_SERIALNO 0x80
+#define SCSI_INQ_IMPL_OPDEF 0x81
+#define SCSI_INQ_A_IMPL_OPDEF 0x82
+
+/* mode_select */
+typedef struct {
+ unsigned char data_len;
+ unsigned char medium_type;
+ unsigned char device_specific;
+ unsigned char desc_len;
+ /* block descriptors are optional, same struct as scsi1 */
+ /* page info then follows, see individual pages */
+} scsi2_mode_param_t;
+
+/*
+ * CDROM thingies
+ */
+typedef union {
+ struct {
+ unsigned char xxx;
+ unsigned char minute;
+ unsigned char second;
+ unsigned char frame;
+ } msf;
+ struct {
+ unsigned char lba1;
+ unsigned char lba2;
+ unsigned char lba3;
+ unsigned char lba4;
+ } lba;
+} cdrom_addr_t;
+
+typedef struct {
+ unsigned char len1; /* MSB */
+ unsigned char len2; /* LSB */
+ unsigned char first_track;
+ unsigned char last_track;
+ struct cdrom_toc_desc {
+
+ unsigned char xxx;
+
+ BITFIELD_2(unsigned char,
+ control : 4,
+ adr : 4);
+
+ unsigned char trackno;
+ unsigned char xxx1;
+ cdrom_addr_t absolute_address;
+ } descs[1]; /* varsize */
+} cdrom_toc_t;
+
+typedef struct {
+ unsigned char xxx;
+
+ unsigned char audio_status;
+#define SCSI_CDST_INVALID 0x00
+#define SCSI_CDST_PLAYING 0x11
+#define SCSI_CDST_PAUSED 0x12
+#define SCSI_CDST_COMPLETED 0x13
+#define SCSI_CDST_ERROR 0x14
+#define SCSI_CDST_NO_STATUS 0x15
+
+ unsigned char len1;
+ unsigned char len2;
+ struct cdrom_chanQ {
+ unsigned char format;
+ BITFIELD_2(unsigned char,
+ control : 4,
+ adr : 4);
+ unsigned char trackno;
+ unsigned char indexno;
+ cdrom_addr_t absolute_address;
+ cdrom_addr_t relative_address;
+ BITFIELD_2(unsigned char,
+ xxx : 7,
+ mcv : 1);
+ unsigned char catalog[15];
+ BITFIELD_2(unsigned char,
+ xxx1 : 7,
+ tcv : 1);
+ unsigned char isrc[15];
+ } subQ;
+} cdrom_chan_data_t;
+
+/* subsets */
+typedef struct {
+ unsigned char xxx;
+ unsigned char audio_status;
+ unsigned char len1;
+ unsigned char len2;
+ struct {
+ unsigned char format;
+ BITFIELD_2(unsigned char,
+ control : 4,
+ adr : 4);
+ unsigned char trackno;
+ unsigned char indexno;
+ cdrom_addr_t absolute_address;
+ cdrom_addr_t relative_address;
+ } subQ;
+} cdrom_chan_curpos_t;
+
+typedef struct {
+ unsigned char xxx;
+ unsigned char audio_status;
+ unsigned char len1;
+ unsigned char len2;
+ struct {
+ unsigned char format;
+ unsigned char xxx1[3];
+ BITFIELD_2(unsigned char,
+ xxx : 7,
+ mcv : 1);
+ unsigned char catalog[15];
+ } subQ;
+} cdrom_chan_catalog_t;
+
+typedef struct {
+ unsigned char xxx;
+ unsigned char audio_status;
+ unsigned char len1;
+ unsigned char len2;
+ struct {
+ unsigned char format;
+ BITFIELD_2(unsigned char,
+ control : 4,
+ adr : 4);
+ unsigned char trackno;
+ unsigned char xxx0;
+ BITFIELD_2(unsigned char,
+ xxx1 : 7,
+ tcv : 1);
+ unsigned char isrc[15];
+ } subQ;
+} cdrom_chan_isrc_t;
+
+/* Audio page */
+typedef struct {
+ scsi_mode_sense_data_t h; /* includes bdescs */
+ unsigned char page_code;
+#define SCSI_CD_AUDIO_PAGE 0x0e
+ unsigned char page_len;
+ BITFIELD_4(unsigned char,
+ xxx1 : 1,
+ sotc : 1,
+ imm : 1,
+ xxx2 : 5);
+ unsigned char xxx3[2];
+ BITFIELD_3(unsigned char,
+ fmt : 4,
+ xxx4 : 3,
+ aprv : 1);
+ unsigned char bps1;
+ unsigned char bps2;
+ BITFIELD_2(unsigned char,
+ sel0 : 4,
+ xxx5 : 4);
+ unsigned char vol0;
+ BITFIELD_2(unsigned char,
+ sel1 : 4,
+ xxx6 : 4);
+ unsigned char vol1;
+ BITFIELD_2(unsigned char,
+ sel2 : 4,
+ xxx7 : 4);
+ unsigned char vol2;
+ BITFIELD_2(unsigned char,
+ sel3 : 4,
+ xxx8 : 4);
+ unsigned char vol3;
+} cdrom_audio_page_t;
+
+/*
+ * Status byte (a-la scsi2)
+ */
+
+typedef union {
+ struct {
+ BITFIELD_3( unsigned char,
+ scsi_status_reserved1:1,
+ scsi_status_code:5,
+ scsi_status_reserved2:2);
+ /* more scsi_status_code values */
+ /* 00..0c as in SCSI-I */
+# define SCSI_ST2_CMD_TERMINATED 0x11 /* 2 */
+# define SCSI_ST2_QUEUE_FULL 0x14 /* 2 */
+ /* anything else is reserved */
+ } st;
+ unsigned char bits;
+} scsi2_status_byte_t;
+
+#endif _SCSI_SCSI2_H_
diff --git a/scsi/scsi_alldevs.c b/scsi/scsi_alldevs.c
new file mode 100644
index 00000000..2c4f37e3
--- /dev/null
+++ b/scsi/scsi_alldevs.c
@@ -0,0 +1,858 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_alldevs.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Middle layer of the SCSI driver: SCSI protocol implementation
+ * This file contains code for SCSI commands defined for all device types.
+ */
+
+#include <mach/std_types.h>
+#include <sys/types.h>
+#include <scsi/compat_30.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi2.h>
+#include <scsi/scsi_defs.h>
+
+#if (NSCSI > 0)
+
+void scsi_print_add_sense_keys(); /* forward */
+
+/*
+ * Utilities
+ */
+void scsi_go_and_wait(tgt, insize, outsize, ior)
+ target_info_t *tgt;
+ int insize, outsize;
+ io_req_t ior;
+{
+ register scsi_softc_t *sc = scsi_softc[(unsigned char)tgt->masterno];
+
+ tgt->ior = ior;
+
+ (*sc->go)(tgt, insize, outsize, ior==0);
+
+ if (ior)
+ iowait(ior);
+ else
+ while (tgt->done == SCSI_RET_IN_PROGRESS);
+}
+
+void scsi_go(tgt, insize, outsize, cmd_only)
+ target_info_t *tgt;
+ int insize, outsize, cmd_only;
+{
+ register scsi_softc_t *sc = scsi_softc[(unsigned char)tgt->masterno];
+
+ (*sc->go)(tgt, insize, outsize, cmd_only);
+}
+
+int sizeof_scsi_command(
+ unsigned char cmd)
+{
+ switch ((cmd & SCSI_CODE_GROUP) >> 5) {
+ case 0: return sizeof(scsi_command_group_0);
+ case 1: return sizeof(scsi_command_group_1);
+ case 2: return sizeof(scsi_command_group_2);
+ /* 3,4 reserved */
+ case 5: return sizeof(scsi_command_group_5);
+ /* 6,7 vendor specific (!!) */
+ case 6: return sizeof(scsi_command_group_2);
+ }
+}
+
+/*
+ * INQUIRY (Almost mandatory)
+ */
+int scsi_inquiry( tgt, pagecode)
+ register target_info_t *tgt;
+ int pagecode;
+{
+ scsi_cmd_inquiry_t *cmd;
+ boolean_t no_ify = TRUE;
+
+retry:
+ cmd = (scsi_cmd_inquiry_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_INQUIRY;
+ cmd->scsi_cmd_lun_and_lba1 = 0;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_xfer_len = 0xff; /* max len always */
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+/*#ifdef SCSI2*/
+ if (pagecode != SCSI_INQ_STD_DATA) {
+ cmd->scsi_cmd_lun_and_lba1 |= SCSI_CMD_INQ_EVPD;
+ cmd->scsi_cmd_page_code = pagecode;
+ } else
+/*#endif SCSI2*/
+ cmd->scsi_cmd_page_code = 0;
+
+ tgt->cur_cmd = SCSI_CMD_INQUIRY;
+
+ /*
+ * Note: this is sent when we do not know much about the
+ * target, so we might not put an identify message upfront
+ */
+ scsi_go(tgt, sizeof(*cmd), 0xff, no_ify);
+
+ /*
+ * This spin loop is because we are called at autoconf
+ * time where we cannot thread_block(). Sigh.
+ */
+ while (tgt->done == SCSI_RET_IN_PROGRESS) ;
+ if (tgt->done == SCSI_RET_RETRY) /* sync negotiation ? */
+ goto retry;
+ if ((tgt->done != SCSI_RET_SUCCESS) && no_ify) {
+ no_ify = FALSE;
+ goto retry;
+ }
+ return tgt->done;
+}
+
+void scsi_print_inquiry( inq, pagecode, result)
+ scsi2_inquiry_data_t *inq;
+ int pagecode;
+ char *result;
+{
+ static char *periph_names[10] = {
+ "disk", "tape", "printer", "processor", "WORM-disk",
+ "CD-ROM", "scanner", "memory", "jukebox", "communication"
+ };
+ static char *periph_state[4] = {
+ "online", "offline", "?", "absent"
+ };
+
+ char dev[SCSI_TARGET_NAME_LEN], *devname;
+ register int i, j = 0;
+
+ if (pagecode != SCSI_INQ_STD_DATA)
+ return;
+
+ devname = result ? result : dev;
+
+ if (!result) {
+ printf("\n\t%s%s %s (%s %x)",
+ (inq->rmb) ? "" : "non-", "removable SCSI",
+ (inq->periph_type > 10) ?
+ "?device?" : periph_names[inq->periph_type],
+ periph_state[inq->periph_qual & 0x3],
+ inq->device_type);
+ printf("\n\t%s%s%s",
+ inq->iso ? "ISO-compliant, " : "",
+ inq->ecma ? "ECMA-compliant, " : "",
+ inq->ansi ? "ANSI-compliant, " : "");
+ if (inq->ansi)
+ printf("%s%d, ", "SCSI-", inq->ansi);
+ if (inq->response_fmt == 2)
+ printf("%s%s%s%s%s%s%s%s%s%s%s", "Supports: ",
+ inq->aenc ? "AENC, " : "",
+ inq->trmIOP ? "TrmIOP, " : "",
+ inq->RelAdr ? "RelAdr, " : "",
+ inq->Wbus32 ? "32 bit xfers, " : "",
+ inq->Wbus16 ? "16 bis xfers, " : "",
+ inq->Sync ? "Sync xfers, " : "",
+ inq->Linked ? "Linked cmds, " : "",
+ inq->CmdQue ? "Tagged cmd queues, " : "",
+ inq->SftRe ? "Soft" : "Hard", " RESET, ");
+ }
+
+ for (i = 0; i < 8; i++)
+ if (inq->vendor_id[i] != ' ')
+ devname[j++] = inq->vendor_id[i];
+ devname[j++] = ' ';
+ for (i = 0; i < 16; i++)
+ if (inq->product_id[i] != ' ')
+ devname[j++] = inq->product_id[i];
+ devname[j++] = ' ';
+ for (i = 0; i < 4; i++)
+ if (inq->product_rev[i] != ' ')
+ devname[j++] = inq->product_rev[i];
+#if unsafe
+ devname[j++] = ' ';
+ for (i = 0; i < 8; i++)
+ if (inq->vendor_uqe[i] != ' ')
+ devname[j++] = inq->vendor_uqe[i];
+#endif
+ devname[j] = 0;
+
+ if (!result)
+ printf("(%s, %s%s)\n", devname, "SCSI ",
+ (inq->periph_type > 10) ?
+ "?device?" : periph_names[inq->periph_type]);
+}
+
+/*
+ * REQUESTE SENSE (Mandatory, All)
+ */
+
+int scsi_request_sense(tgt, ior, data)
+ register target_info_t *tgt;
+ io_req_t ior;
+ char **data;
+{
+ scsi_cmd_request_sense_t *cmd;
+
+ cmd = (scsi_cmd_request_sense_t *) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_REQUEST_SENSE;
+ cmd->scsi_cmd_lun_and_lba1 = 0;
+ cmd->scsi_cmd_lba2 = 0;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_allocation_length = 0xff; /* max len always */
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_REQUEST_SENSE;
+
+ if (ior==0)
+ scsi_go_and_wait (tgt, sizeof(*cmd), 0xff, ior);
+ else {
+ scsi_go(tgt, sizeof(*cmd), 0xff, FALSE);
+ return tgt->done;
+ }
+
+ if (data)
+ *data = tgt->cmd_ptr;
+
+ (void) scsi_check_sense_data(tgt, tgt->cmd_ptr);
+
+ return tgt->done;
+}
+
+boolean_t
+scsi_check_sense_data(tgt, sns)
+ register target_info_t *tgt;
+ scsi_sense_data_t *sns;
+{
+ unsigned char code;
+
+ if (sns->error_class != SCSI_SNS_XTENDED_SENSE_DATA) {
+ printf("Bad sense data, vuqe class x%x code x%x\n",
+ sns->error_class, sns->error_code);
+ return FALSE; /* and good luck */
+ } else {
+ code = sns->u.xtended.sense_key;
+
+ switch (code) {
+ case SCSI_SNS_NOSENSE:
+ case SCSI_SNS_EQUAL:
+ return TRUE;
+ break;
+ case SCSI_SNS_RECOVERED:
+ scsi_error(tgt, SCSI_ERR_BAD | SCSI_ERR_SENSE,
+ code, sns->u.xtended.add_bytes);
+ return TRUE;
+ break;
+ case SCSI_SNS_UNIT_ATN:
+ scsi_error(tgt, SCSI_ERR_SENSE,
+ code, sns->u.xtended.add_bytes);
+ return TRUE;
+ break;
+ case SCSI_SNS_NOTREADY:
+ tgt->done = SCSI_RET_RETRY;
+ return TRUE;
+ case SCSI_SNS_ILLEGAL_REQ:
+ if (tgt->flags & TGT_OPTIONAL_CMD)
+ return TRUE;
+ /* fall through */
+ default:
+/* e.g.
+ case SCSI_SNS_MEDIUM_ERR:
+ case SCSI_SNS_HW_ERR:
+ case SCSI_SNS_PROTECT:
+ case SCSI_SNS_BLANK_CHK:
+ case SCSI_SNS_VUQE:
+ case SCSI_SNS_COPY_ABRT:
+ case SCSI_SNS_ABORTED:
+ case SCSI_SNS_VOLUME_OVFL:
+ case SCSI_SNS_MISCOMPARE:
+ case SCSI_SNS_RESERVED:
+*/
+ scsi_error(tgt, SCSI_ERR_GRAVE|SCSI_ERR_SENSE,
+ code, sns->u.xtended.add_bytes);
+ return FALSE;
+ break;
+ }
+ }
+}
+
+/*
+ * START STOP UNIT (Optional, disk prin work rom tape[load/unload])
+ */
+int scsi_start_unit( tgt, ss, ior)
+ register target_info_t *tgt;
+ int ss;
+ io_req_t ior;
+{
+ scsi_cmd_start_t *cmd;
+
+ cmd = (scsi_cmd_start_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_START_STOP_UNIT;
+ cmd->scsi_cmd_lun_and_lba1 = SCSI_CMD_SS_IMMED;/* 0 won't work ? */
+ cmd->scsi_cmd_lba2 = 0;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_ss_flags = ss;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_START_STOP_UNIT;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+ return tgt->done;
+}
+
+/*
+ * TEST UNIT READY (Optional, All)
+ * Note: this is where we do the synch negotiation at autoconf
+ */
+int scsi_test_unit_ready( tgt, ior)
+ register target_info_t *tgt;
+ io_req_t ior;
+{
+ scsi_cmd_test_unit_ready_t *cmd;
+
+ cmd = (scsi_cmd_test_unit_ready_t*) (tgt->cmd_ptr);
+
+ cmd->scsi_cmd_code = SCSI_CMD_TEST_UNIT_READY;
+ cmd->scsi_cmd_lun_and_lba1 = 0;
+ cmd->scsi_cmd_lba2 = 0;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_ss_flags = 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_TEST_UNIT_READY;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+
+ return tgt->done;
+}
+
+/*
+ * RECEIVE DIAGNOSTIC RESULTS (Optional, All)
+ */
+int scsi_receive_diag( tgt, result, result_len, ior)
+ register target_info_t *tgt;
+ char *result;
+ int result_len;
+ io_req_t ior;
+{
+ scsi_cmd_receive_diag_t *cmd;
+
+ cmd = (scsi_cmd_receive_diag_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_RECEIVE_DIAG_RESULTS;
+ cmd->scsi_cmd_lun_and_lba1 = 0;
+ cmd->scsi_cmd_lba2 = 0;
+ cmd->scsi_cmd_lba3 = result_len >> 8 & 0xff;
+ cmd->scsi_cmd_xfer_len = result_len & 0xff;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_RECEIVE_DIAG_RESULTS;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), result_len, ior);
+
+ bcopy(tgt->cmd_ptr, (char*)result, result_len);
+
+ return tgt->done;
+}
+
+
+int scsi_mode_sense( tgt, pagecode, len, ior)
+ register target_info_t *tgt;
+ int pagecode;
+ int len;
+ io_req_t ior;
+{
+ scsi_cmd_mode_sense_t *cmd;
+
+ cmd = (scsi_cmd_mode_sense_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_MODE_SENSE;
+ cmd->scsi_cmd_lun_and_lba1 = 0;
+ cmd->scsi_cmd_ms_pagecode = pagecode;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_xfer_len = len;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_MODE_SENSE;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), len, ior);
+
+ return tgt->done;
+}
+
+#if 0 /* unused */
+
+/*
+ * COPY (Optional, All)
+ */
+void scsi_copy( tgt, params, params_len, ior)
+ register target_info_t *tgt;
+ char *params;
+ io_req_t ior;
+{
+ scsi_cmd_copy_t *cmd;
+
+ cmd = (scsi_cmd_copy_t*) (tgt->cmd_ptr;
+ cmd->scsi_cmd_code = SCSI_CMD_COPY;
+ cmd->scsi_cmd_lun_and_lba1 = 0;
+ cmd->scsi_cmd_lba2 = params_len>>16 & 0xff;
+ cmd->scsi_cmd_lba3 = params_len >> 8 & 0xff;
+ cmd->scsi_cmd_xfer_len = params_len & 0xff;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ bcopy(params, cmd + 1, params_len);
+
+ tgt->cur_cmd = SCSI_CMD_COPY;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd) + params_len, 0, ior);
+}
+
+/*
+ * SEND DIAGNOSTIC (Optional, All)
+ */
+void scsi_send_diag( tgt, flags, params, params_len, ior)
+ register target_info_t *tgt;
+ char *params;
+ io_req_t ior;
+{
+ scsi_cmd_send_diag_t *cmd;
+
+ cmd = (scsi_cmd_send_diag_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_SEND_DIAGNOSTICS;
+ cmd->scsi_cmd_lun_and_lba1 = flags & 0x7;
+ cmd->scsi_cmd_lba2 = 0;
+ cmd->scsi_cmd_lba3 = params_len >> 8 & 0xff;
+ cmd->scsi_cmd_xfer_len = params_len & 0xff;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ bcopy(params, cmd + 1, params_len);
+
+ tgt->cur_cmd = SCSI_CMD_SEND_DIAGNOSTICS;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+}
+
+/*
+ * COMPARE (Optional, All)
+ */
+void scsi_compare( tgt, params, params_len, ior)
+ register target_info_t *tgt;
+ char *params;
+ io_req_t ior;
+{
+ scsi_cmd_compare_t *cmd;
+
+ cmd = (scsi_cmd_compare_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_COMPARE;
+ cmd->scsi_cmd_lun_and_relbit = 0;
+ cmd->scsi_cmd_lba1 = 0;
+ cmd->scsi_cmd_1_paraml1 = params_len >> 16 & 0xff;
+ cmd->scsi_cmd_1_paraml2 = params_len >> 8 & 0xff;
+ cmd->scsi_cmd_1_paraml3 = params_len & 0xff;
+ cmd->scsi_cmd_xxx = 0;
+ cmd->scsi_cmd_xfer_len_1 = 0;
+ cmd->scsi_cmd_xfer_len_2 = 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ bcopy(params, cmd + 1, params_len);
+
+ tgt->cur_cmd = SCSI_CMD_COMPARE;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+}
+
+/*
+ * COPY AND VERIFY (Optional, All)
+ */
+void scsi_copy_and_verify( tgt, params, params_len, bytchk, ior)
+ register target_info_t *tgt;
+ char *params;
+ io_req_t ior;
+{
+ scsi_cmd_compare_t *cmd;
+
+ cmd = (scsi_cmd_compare_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_COMPARE;
+ cmd->scsi_cmd_lun_and_relbit = bytchk ? SCSI_CMD_CPY_BYTCHK : 0;
+ cmd->scsi_cmd_lba1 = 0;
+ cmd->scsi_cmd_1_paraml1 = params_len >> 16 & 0xff;
+ cmd->scsi_cmd_1_paraml2 = params_len >> 8 & 0xff;
+ cmd->scsi_cmd_1_paraml3 = params_len & 0xff;
+ cmd->scsi_cmd_xxx = 0;
+ cmd->scsi_cmd_xfer_len_1 = 0;
+ cmd->scsi_cmd_xfer_len_2 = 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ bcopy(params, cmd + 1, params_len);
+
+ tgt->cur_cmd = SCSI_CMD_COMPARE;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+}
+
+#endif
+
+#ifdef SCSI2
+scsi_change_definition
+scsi_log_select
+scsi_log_sense
+scsi_long_mode_select
+scsi_read_buffer
+scsi_write_buffer
+#endif SCSI2
+
+/*
+ * Warn user of some device error
+ */
+int scsi_debug = 0;
+
+static char *sns_msg[SCSI_SNS_RESERVED+1] = {
+ "No Sense Data",/* shouldn't happen */
+ "Recovered",
+ "Unit not ready",
+ "Medium",
+ "Hardware failure",
+ "Illegal request",
+ "Unit Attention Condition",
+ "Protection",
+ "Blank Check",
+ "Vendor Unique",
+ "Copy Operation Aborted",
+ "Aborted Command",
+ "Equal Comparison",
+ "Volume Overflow",
+ "Miscompare",
+ "Reserved"
+};
+
+void
+scsi_error( tgt, code, info, addtl)
+ target_info_t *tgt;
+ unsigned code;
+ unsigned info;
+ char *addtl;
+{
+ char unit;
+ char *msg, *cmd;
+ scsi2_status_byte_t status;
+ if (scsi_debug)
+ code |= SCSI_ERR_GRAVE;
+
+ if (tgt)
+ unit = tgt->unit_no + '0';
+ else
+ unit = '?';
+
+
+ switch (SCSI_ERR_CLASS(code)) {
+ case SCSI_ERR_STATUS:
+ cmd = "Bad status return";
+ status.bits = info;
+ switch (status.st.scsi_status_code) {
+ case SCSI_ST_GOOD:
+ case SCSI_ST_CONDITION_MET:
+ case SCSI_ST_INT_GOOD:
+ case SCSI_ST_INT_MET:
+ return; /* all is fine */
+ case SCSI_ST_CHECK_CONDITION:
+ msg = "Check condition"; break;
+ case SCSI_ST_RES_CONFLICT:
+ msg = "Reservation conflict"; break;
+ case SCSI_ST_BUSY:
+ msg = "Target busy"; break;
+ case SCSI_ST2_QUEUE_FULL:
+ msg = "Queue full"; break;
+ case SCSI_ST2_CMD_TERMINATED:
+ msg = "Command terminated"; break;
+ default:
+ msg = "Strange"; break;
+ }
+ break;
+ case SCSI_ERR_SENSE:
+ cmd = "Sensed a";
+ msg = sns_msg[info & 0xf];
+ break;
+ case SCSI_ERR_MSEL:
+ cmd = "Mode select broken"; msg = ""; break;
+ default:
+ cmd = "Generic"; msg = "";
+ }
+ if (SCSI_ERR_GRAVITY(code)) {
+ printf("\n%s%c: %s %s %sx%x", "target ", unit, cmd, msg,
+ "error, code ", info);
+ if (addtl) {
+ unsigned int add[3];
+ bcopy(addtl, (char*)add, 3*sizeof(int));
+ printf("%s x%x x%x x%x", ", additional info ",
+ add[0], add[1], add[2]);
+ }
+ printf("\n");
+ }
+}
+
+void scsi_print_sense_data(sns)
+ scsi_sense_data_t *sns;
+{
+ printf("Sense data: %s%s, segment %d",
+ sns_msg[sns->u.xtended.sense_key], " error",
+ sns->u.xtended.segment_number);
+ if (sns->u.xtended.ili) printf(", IncorrectLengthIndicator");
+ if (sns->u.xtended.eom) printf(", EndOfMedium");
+ if (sns->u.xtended.fm) printf(", FileMark");
+
+ if (sns->addr_valid) {
+ unsigned int info;
+ info = (sns->u.xtended.info0 << 24) |
+ (sns->u.xtended.info1 << 16) |
+ (sns->u.xtended.info2 << 8) |
+ sns->u.xtended.info3;
+ printf(", Info x%x", info);
+ }
+
+ if (sns->u.xtended.add_len > 6)
+ scsi_print_add_sense_keys(sns->u.xtended.add_bytes[4],
+ sns->u.xtended.add_bytes[5]);
+}
+
+/*
+ * Table of the official SCSI-2 error messages
+ * Last update:
+ * X3T9.2/86-109, Revision 10c, March 9, 1990
+ */
+static struct addtl_sns_keys_msg {
+ unsigned char byte12;
+ unsigned char byte13;
+ char *means;
+} addtl_sns_msgs[] = {
+ { 0x00, 0x00, "No additional sense information" },
+ { 0x00, 0x01, "Filemark detected" },
+ { 0x00, 0x02, "End-of-partition/medium detected" },
+ { 0x00, 0x03, "Setmark detected" },
+ { 0x00, 0x04, "Beginning of partition/medium detected" },
+ { 0x00, 0x05, "End-of-data detected" },
+ { 0x00, 0x06, "I/O process terminated" },
+ { 0x00, 0x11, "Audio play operation in progress" },
+ { 0x00, 0x12, "Audio play operation paused" },
+ { 0x00, 0x13, "Audio play operation successfully completed" },
+ { 0x00, 0x14, "Audio play operation stopped due to error" },
+ { 0x00, 0x15, "No current audio status to return" },
+ { 0x01, 0x00, "No index/sector signal" },
+ { 0x02, 0x00, "No seek complete" },
+ { 0x03, 0x00, "Peripheral device write fault" },
+ { 0x03, 0x01, "No write current" },
+ { 0x03, 0x02, "Excessive write errors" },
+ { 0x04, 0x00, "Logical unit not ready, cause not reportable" },
+ { 0x04, 0x01, "Logical unit is in process of becoming ready" },
+ { 0x04, 0x02, "Logical unit not ready, initializing command required" },
+ { 0x04, 0x03, "Logical unit not ready, manual intervention required" },
+ { 0x04, 0x04, "Logical unit not ready, format in progress" },
+ { 0x05, 0x00, "Logical unit does not respond to selection" },
+ { 0x06, 0x00, "No reference position found" },
+ { 0x07, 0x00, "Multiple peripheral devices selected" },
+ { 0x08, 0x00, "Logical unit communication failure" },
+ { 0x08, 0x01, "Logical unit communication time-out" },
+ { 0x08, 0x02, "Logical unit communication parity error" },
+ { 0x09, 0x00, "Track following error" },
+ { 0x09, 0x01, "Tracking servo failure" },
+ { 0x09, 0x02, "Focus servo failure" },
+ { 0x09, 0x03, "Spindle servo failure" },
+ { 0x0a, 0x00, "Error log overflow" },
+ { 0x0c, 0x00, "Write error" },
+ { 0x0c, 0x01, "Write error recovered with auto-reallocation" },
+ { 0x0c, 0x02, "Write error - auto-reallocation failed" },
+ { 0x10, 0x00, "Id CRC or ECC error" },
+ { 0x10, 0x04, "Recovered data with LEC" },
+ { 0x11, 0x00, "Unrecovered read error" },
+ { 0x11, 0x01, "Read retries exhausted" },
+ { 0x11, 0x02, "Error too long to correct" },
+ { 0x11, 0x03, "Multiple read errors" },
+ { 0x11, 0x04, "Unrecovered read error - auto-reallocate failed" },
+ { 0x11, 0x05, "L-EC uncorrectable error" },
+ { 0x11, 0x06, "CIRC unrecovered error" },
+ { 0x11, 0x07, "Data resynchronization error" },
+ { 0x11, 0x08, "Incomplete block read" },
+ { 0x11, 0x09, "No gap found" },
+ { 0x11, 0x0a, "Miscorrected error" },
+ { 0x11, 0x0b, "Unrecovered read error - recommend reassignment" },
+ { 0x11, 0x0c, "Unrecovered read error - recommend rewrite the data" },
+ { 0x12, 0x00, "Address mark not found for id field" },
+ { 0x13, 0x00, "Address mark not found for data field" },
+ { 0x14, 0x00, "Recorded entity not found" },
+ { 0x14, 0x01, "Record not found" },
+ { 0x14, 0x02, "Filemark or setmark not found" },
+ { 0x14, 0x03, "End-of-data not found" },
+ { 0x14, 0x04, "Block sequence error" },
+ { 0x15, 0x00, "Random positioning error" },
+ { 0x15, 0x01, "Mechanical positioning error" },
+ { 0x15, 0x02, "Positioning error detected by read of medium" },
+ { 0x16, 0x00, "Data synchronization mark error" },
+ { 0x17, 0x00, "Recovered data with no error correction applied" },
+ { 0x17, 0x01, "Recovered data with retries" },
+ { 0x17, 0x02, "Recovered data with positive head offset" },
+ { 0x17, 0x03, "Recovered data with negative head offset" },
+ { 0x17, 0x04, "Recovered data with retries and/or CIRC applied" },
+ { 0x17, 0x05, "Recovered data using previous sector id" },
+ { 0x17, 0x06, "Recovered data without ECC - data auto-reallocated" },
+ { 0x17, 0x07, "Recovered data without ECC - recommend reassignment" },
+ { 0x18, 0x00, "Recovered data with error correction applied" },
+ { 0x18, 0x01, "Recovered data with error correction and retries applied" },
+ { 0x18, 0x02, "Recovered data - data auto-reallocated" },
+ { 0x18, 0x03, "Recovered data with CIRC" },
+ { 0x18, 0x05, "Recovered data - recommended reassignment" },
+ { 0x19, 0x00, "Defect list error" },
+ { 0x19, 0x01, "Defect list not available" },
+ { 0x19, 0x02, "Defect list error in primary list" },
+ { 0x19, 0x03, "Defect list error in grown list" },
+ { 0x1a, 0x00, "Parameter list length error" },
+ { 0x1b, 0x00, "Synchronous data transfer error" },
+ { 0x1c, 0x00, "Defect list not found" },
+ { 0x1c, 0x01, "Primary defect list not found" },
+ { 0x1c, 0x02, "Grown defect list not found" },
+ { 0x1d, 0x00, "Miscompare during verify operation" },
+ { 0x1e, 0x00, "Recovered id with ECC correction" },
+ { 0x20, 0x00, "Invalid command operation code" },
+ { 0x21, 0x00, "Logical block address out of range" },
+ { 0x21, 0x01, "Invalid element address" },
+ { 0x22, 0x00, "Illegal function" },
+ { 0x24, 0x00, "Invalid field in CDB" },
+ { 0x24, 0x02, "Log parameters changed" },
+ { 0x25, 0x00, "Logical unit not supported" },
+ { 0x26, 0x00, "Invalid field in parameter list" },
+ { 0x26, 0x01, "Parameter not supported" },
+ { 0x26, 0x02, "Parameter value invalid" },
+ { 0x26, 0x03, "Threshold parameters not supported" },
+ { 0x27, 0x00, "Write protected" },
+ { 0x28, 0x00, "Not ready to ready transition (medium may have changed)" },
+ { 0x28, 0x01, "Import or export element accessed" },
+ { 0x29, 0x00, "Power on, reset, or bus device reset occurred" },
+ { 0x2a, 0x00, "Parameters changed" },
+ { 0x2a, 0x01, "Mode parameters changed" },
+ { 0x2b, 0x00, "Copy cannot execute since host cannot disconnect" },
+ { 0x2c, 0x00, "Command sequence error" },
+ { 0x2c, 0x01, "Too many windows specified" },
+ { 0x2c, 0x02, "Invalid combination of windows specified" },
+ { 0x2d, 0x00, "Overwrite error on update in place" },
+ { 0x2f, 0x00, "Commands cleared by another initiator" },
+ { 0x30, 0x00, "Incompatible medium installed" },
+ { 0x30, 0x01, "Cannot read medium - unknown format" },
+ { 0x30, 0x02, "Cannot read medium - incompatible format" },
+ { 0x30, 0x03, "Cleaning cartridge installed" },
+ { 0x31, 0x00, "Medium format corrupted" },
+ { 0x31, 0x01, "Format command failed" },
+ { 0x32, 0x00, "No defect spare location available" },
+ { 0x32, 0x01, "Defect list update failure" },
+ { 0x33, 0x00, "Tape length error" },
+ { 0x36, 0x00, "Ribbon, ink, or toner failure" },
+ { 0x37, 0x00, "Rounded parameter" },
+ { 0x39, 0x00, "Saving parameters not supported" },
+ { 0x3a, 0x00, "Medium not present" },
+ { 0x3b, 0x00, "Sequential positioning error" },
+ { 0x3b, 0x01, "Tape position error at beginning of medium" },
+ { 0x3b, 0x02, "Tape position error at end of medium" },
+ { 0x3b, 0x03, "Tape or electronic vertical forms unit not ready" },
+ { 0x3b, 0x04, "Slew failure" },
+ { 0x3b, 0x05, "Paper jam" },
+ { 0x3b, 0x06, "Failed to sense top-of-form" },
+ { 0x3b, 0x07, "Failed to sense bottom-of-form" },
+ { 0x3b, 0x08, "Reposition error" },
+ { 0x3b, 0x09, "Read past end of medium" },
+ { 0x3b, 0x0a, "Read past beginning of medium" },
+ { 0x3b, 0x0b, "Position past end of medium" },
+ { 0x3b, 0x0c, "Position past beginning of medium" },
+ { 0x3b, 0x0d, "Medium destination element full" },
+ { 0x3b, 0x0e, "Medium source element empty" },
+ { 0x3d, 0x00, "Invalid bits in identify message" },
+ { 0x3e, 0x00, "Logical unit has not self-configured yet" },
+ { 0x3f, 0x00, "Target operating conditions have changed" },
+ { 0x3f, 0x01, "Microcode has been changed" },
+ { 0x3f, 0x02, "Changed operating definition" },
+ { 0x3f, 0x03, "Inquiry data has changed" },
+ { 0x40, 0x00, "RAM failure" },
+ { 0x40, 0xff, "Diagnostic failure on component <NN>" },
+ { 0x41, 0x00, "Data path failure" },
+ { 0x42, 0x00, "Power on or self-test failure" },
+ { 0x43, 0x00, "Message error" },
+ { 0x44, 0x00, "Internal target failure" },
+ { 0x45, 0x00, "Select or reselect failure" },
+ { 0x46, 0x00, "Unsuccessful soft reset" },
+ { 0x47, 0x00, "SCSI parity error" },
+ { 0x48, 0x00, "Initiator detected message received" },
+ { 0x49, 0x00, "Invalid message error" },
+ { 0x4a, 0x00, "Command phase error" },
+ { 0x4b, 0x00, "Data phase error" },
+ { 0x4c, 0x00, "Logical unit failed self-configuration" },
+ { 0x4e, 0x00, "Overlapped commands attempted" },
+ { 0x50, 0x00, "Write append error" },
+ { 0x50, 0x01, "Write append position error" },
+ { 0x50, 0x02, "Position error related to timing" },
+ { 0x51, 0x00, "Erase failure" },
+ { 0x52, 0x00, "Cartridge fault" },
+ { 0x53, 0x00, "Media load or eject failed" },
+ { 0x53, 0x01, "Unload tape failure" },
+ { 0x53, 0x02, "Medium removal prevented" },
+ { 0x54, 0x00, "SCSI to host system interface failure" },
+ { 0x55, 0x00, "System resource failure" },
+ { 0x57, 0x00, "Unable to recover table-of-contents" },
+ { 0x58, 0x00, "Generation does not exist" },
+ { 0x59, 0x00, "Updated block read" },
+ { 0x5a, 0x00, "Operator request or state change input (unspecified)" },
+ { 0x5a, 0x01, "Operator medium removal request" },
+ { 0x5a, 0x02, "Operator selected write protect" },
+ { 0x5a, 0x03, "Operator selected write permit" },
+ { 0x5b, 0x00, "Log exception" },
+ { 0x5b, 0x01, "Threshold condition met" },
+ { 0x5b, 0x02, "Log counter at maximum" },
+ { 0x5b, 0x03, "Log list codes exhausted" },
+ { 0x5c, 0x00, "RPL status change" },
+ { 0x5c, 0x01, "Spindles synchronized" },
+ { 0x5c, 0x02, "Spindles not synchronized" },
+ { 0x60, 0x00, "Lamp failure" },
+ { 0x61, 0x00, "Video acquisition error" },
+ { 0x61, 0x01, "Unable to acquire video" },
+ { 0x61, 0x02, "Out of focus" },
+ { 0x62, 0x00, "Scan head positioning error" },
+ { 0x63, 0x00, "End of user area encountered on this track" },
+ { 0x64, 0x00, "Illegal mode for this track" },
+ { 0, 0, 0}
+};
+
+void scsi_print_add_sense_keys(key, qualif)
+ register unsigned key, qualif;
+{
+ register struct addtl_sns_keys_msg *msg;
+
+ for (msg = addtl_sns_msgs; msg->means; msg++) {
+ if (msg->byte12 != key) continue;
+ if ((msg->byte12 == 0x40) && qualif) {
+ printf(", %s, NN=x%x", msg->means, qualif);
+ return;
+ }
+ if (msg->byte13 == qualif) {
+ printf(" %s", msg->means);
+ return;
+ }
+ };
+ printf(", Unknown additional sense keys: 0x%x 0x%x\n", key, qualif);
+}
+#endif /* NSCSI > 0 */
diff --git a/scsi/scsi_comm.c b/scsi/scsi_comm.c
new file mode 100644
index 00000000..1f0095a9
--- /dev/null
+++ b/scsi/scsi_comm.c
@@ -0,0 +1,115 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_comm.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Middle layer of the SCSI driver: SCSI protocol implementation
+ *
+ * This file contains code for SCSI commands for COMMUNICATION devices.
+ */
+
+#include <mach/std_types.h>
+#include <scsi/compat_30.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_defs.h>
+
+#if (NSCSI > 0)
+
+char *sccomm_name(
+ boolean_t internal)
+{
+ return internal ? "cz" : "comm";
+}
+
+void scsi_get_message(
+ register target_info_t *tgt,
+ io_req_t ior)
+{
+ scsi_cmd_read_t *cmd;
+ register unsigned len, max;
+
+ max = scsi_softc[(unsigned char)tgt->masterno]->max_dma_data;
+
+ len = ior->io_count;
+ if (len > max) {
+ ior->io_residual = len - max;
+ len = max;
+ }
+
+ cmd = (scsi_cmd_read_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_GET_MESSAGE;
+ cmd->scsi_cmd_lun_and_lba1 = tgt->lun << SCSI_LUN_SHIFT;
+ cmd->scsi_cmd_lba2 = len >> 16;
+ cmd->scsi_cmd_lba3 = len >> 8;
+ cmd->scsi_cmd_xfer_len = len;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_GET_MESSAGE;
+
+ scsi_go(tgt, sizeof(*cmd), len, FALSE);
+}
+
+void scsi_send_message(
+ register target_info_t *tgt,
+ io_req_t ior)
+{
+ scsi_cmd_write_t *cmd;
+ register unsigned len, max;
+
+ len = ior->io_count;
+ max = scsi_softc[(unsigned char)tgt->masterno]->max_dma_data;
+
+ if (len > max) {
+ ior->io_residual = len - max;
+ len = max;
+ }
+
+ cmd = (scsi_cmd_write_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_SEND_MESSAGE;
+ cmd->scsi_cmd_lun_and_lba1 = tgt->lun << SCSI_LUN_SHIFT;
+ cmd->scsi_cmd_lba2 = len >> 16;
+ cmd->scsi_cmd_lba3 = len >> 8;
+ cmd->scsi_cmd_xfer_len = len;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_SEND_MESSAGE;
+
+ scsi_go(tgt, sizeof(*cmd), 0, FALSE);
+}
+
+
+#if 0
+/* For now, these are not needed */
+scsi_get_message_long
+scsi_get_message_vlong
+scsi_send_message_long
+scsi_send_message_vlong
+#endif
+
+#endif /* NSCSI > 0 */
diff --git a/scsi/scsi_cpu.c b/scsi/scsi_cpu.c
new file mode 100644
index 00000000..979847c2
--- /dev/null
+++ b/scsi/scsi_cpu.c
@@ -0,0 +1,109 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_cpu.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Middle layer of the SCSI driver: SCSI protocol implementation
+ *
+ * This file contains code for SCSI commands for PROCESSOR devices.
+ */
+
+#include <mach/std_types.h>
+#include <scsi/compat_30.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi2.h>
+#include <scsi/scsi_defs.h>
+
+#if (NSCSI > 0)
+
+char *sccpu_name(internal)
+ boolean_t internal;
+{
+ return internal ? "sc" : "cpu";
+}
+
+void scsi_send( tgt, ior)
+ register target_info_t *tgt;
+ io_req_t ior;
+{
+ scsi_cmd_write_t *cmd;
+ unsigned len; /* in bytes */
+ unsigned int max_dma_data;
+
+ max_dma_data = scsi_softc[(unsigned char)tgt->masterno]->max_dma_data;
+
+ len = ior->io_count;
+ if (len > max_dma_data)
+ len = max_dma_data;
+ if (len < tgt->block_size)
+ len = tgt->block_size;
+
+ cmd = (scsi_cmd_write_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_SEND;
+ cmd->scsi_cmd_lun_and_lba1 = 0;
+ cmd->scsi_cmd_lba2 = len >> 16;
+ cmd->scsi_cmd_lba3 = len >> 8;
+ cmd->scsi_cmd_xfer_len = len;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_SEND;
+
+ scsi_go(tgt, sizeof(*cmd), 0, FALSE);
+}
+
+void scsi_receive( tgt, ior)
+ register target_info_t *tgt;
+ io_req_t ior;
+{
+ scsi_cmd_read_t *cmd;
+ register unsigned len;
+ unsigned int max_dma_data;
+
+ max_dma_data = scsi_softc[(unsigned char)tgt->masterno]->max_dma_data;
+
+ len = ior->io_count;
+ if (len > max_dma_data)
+ len = max_dma_data;
+ if (len < tgt->block_size)
+ len = tgt->block_size;
+
+ cmd = (scsi_cmd_read_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_RECEIVE;
+ cmd->scsi_cmd_lun_and_lba1 = 0;
+ cmd->scsi_cmd_lba2 = len >> 16;
+ cmd->scsi_cmd_lba3 = len >> 8;
+ cmd->scsi_cmd_xfer_len = len;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_RECEIVE;
+
+ scsi_go(tgt, sizeof(*cmd), len, FALSE);
+}
+
+#endif /* NSCSI > 0 */
diff --git a/scsi/scsi_defs.h b/scsi/scsi_defs.h
new file mode 100644
index 00000000..c98bc85f
--- /dev/null
+++ b/scsi/scsi_defs.h
@@ -0,0 +1,284 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_defs.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Controller-independent definitions for the SCSI driver
+ */
+
+#ifndef _SCSI_SCSI_DEFS_H_
+#define _SCSI_SCSI_DEFS_H_
+
+#include <kern/queue.h>
+#include <kern/lock.h>
+
+#include <rz_labels.h>
+
+#define await(event) sleep(event,0)
+extern void wakeup();
+
+typedef vm_offset_t opaque_t; /* should be elsewhere */
+
+/*
+ * Internal error codes, and return values
+ * XXX use the mach/error.h scheme XXX
+ */
+typedef unsigned int scsi_ret_t;
+
+#define SCSI_ERR_GRAVITY(x) ((unsigned)(x)&0xf0000000U)
+#define SCSI_ERR_GRAVE 0x80000000U
+#define SCSI_ERR_BAD 0x40000000
+
+#define SCSI_ERR_CLASS(x) ((unsigned)(x)&0x0fffffffU)
+#define SCSI_ERR_STATUS 0x00000001
+#define SCSI_ERR_SENSE 0x00000002
+#define SCSI_ERR_MSEL 0x00000004
+
+extern void scsi_error(/* target_info_t *, unsigned, unsigned */);
+
+#define SCSI_RET_IN_PROGRESS 0x00
+#define SCSI_RET_SUCCESS 0x01
+#define SCSI_RET_RETRY 0x02
+#define SCSI_RET_NEED_SENSE 0x04
+#define SCSI_RET_ABORTED 0x08
+#define SCSI_RET_DEVICE_DOWN 0x10
+
+/*
+ * Device-specific information kept by driver
+ */
+#define MAX_SCSI_PARTS 32 /* maximum partitions on a disk;can be larger */
+typedef struct {
+ struct disklabel l; /* NOT accurate. partitions stored below */
+ struct {
+ unsigned int badblockno;
+ unsigned int save_rec;
+ char *save_addr;
+ int save_count;
+ int save_resid;
+ int retry_count;
+ } b;
+#if 0 /* no longer used by partition code */
+ int labelsector;
+ int labeloffset;
+#endif 0
+ struct diskpart scsi_array[MAX_SCSI_PARTS]; /* new partition info */
+} scsi_disk_info_t;
+
+typedef struct {
+ boolean_t read_only;
+ unsigned int speed;
+ unsigned int density;
+ unsigned int maxreclen;
+ boolean_t fixed_size;
+} scsi_tape_info_t;
+
+typedef struct {
+ char req_pending;
+ char req_id;
+ char req_lun;
+ char req_cmd;
+ unsigned int req_len;
+ /* more later */
+} scsi_processor_info_t;
+
+typedef struct {
+ void *result;
+ boolean_t result_available;
+ int result_size;
+ struct red_list *violates_standards;
+} scsi_cdrom_info_t;
+
+typedef struct {
+# define SCSI_MAX_COMM_TTYS 16
+ struct tty *tty[SCSI_MAX_COMM_TTYS];
+ io_req_t ior;
+} scsi_comm_info_t;
+
+/*
+ * Device descriptor
+ */
+
+#define SCSI_TARGET_NAME_LEN 8+16+4+8 /* our way to keep it */
+
+typedef struct target_info {
+ queue_chain_t links; /* to queue for bus */
+ io_req_t ior; /* what we are doing */
+
+ unsigned int flags;
+#define TGT_DID_SYNCH 0x00000001 /* finished the synch neg */
+#define TGT_TRY_SYNCH 0x00000002 /* do the synch negotiation */
+#define TGT_FULLY_PROBED 0x00000004 /* can sleep to wait */
+#define TGT_ONLINE 0x00000008 /* did the once-only stuff */
+#define TGT_ALIVE 0x00000010
+#define TGT_BBR_ACTIVE 0x00000020 /* bad block replc in progress */
+#define TGT_DISCONNECTED 0x00000040 /* waiting for reconnect */
+#define TGT_WRITTEN_TO 0x00000080 /* tapes: needs a filemark on close */
+#define TGT_REWIND_ON_CLOSE 0x00000100 /* tapes: rewind */
+#define TGT_BIG 0x00000200 /* disks: > 1Gb, use long R/W */
+#define TGT_REMOVABLE_MEDIA 0x00000400 /* e.g. floppy, cd-rom,.. */
+#define TGT_READONLY 0x00000800 /* cd-rom, scanner, .. */
+#define TGT_OPTIONAL_CMD 0x00001000 /* optional cmd, ignore errors */
+#define TGT_WRITE_LABEL 0x00002000 /* disks: enable overwriting of label */
+#define TGT_US 0x00004000 /* our desc, when target role */
+
+#define TGT_HW_SPECIFIC_BITS 0xffff0000U /* see specific HBA */
+ char *hw_state; /* opaque */
+ char *dma_ptr;
+ char *cmd_ptr;
+ struct scsi_devsw_struct *dev_ops; /* circularity */
+ struct target_info *next_lun; /* if multi-LUN */
+ char target_id;
+ char unit_no;
+ unsigned char sync_period;
+ unsigned char sync_offset;
+ decl_simple_lock_data(,target_lock)
+#ifdef MACH_KERNEL
+#else /*MACH_KERNEL*/
+ struct fdma fdma;
+#endif /*MACH_KERNEL*/
+ /*
+ * State info kept while waiting to seize bus, either for first
+ * selection or while in disconnected state
+ */
+ struct {
+ struct script *script;
+ int (*handler)();
+ unsigned int out_count;
+ unsigned int in_count;
+ unsigned int copy_count; /* optional */
+ unsigned int dma_offset;
+ unsigned char identify;
+ unsigned char cmd_count;
+ unsigned char hba_dep[2];
+ } transient_state;
+ unsigned int block_size;
+ volatile char done;
+ unsigned char cur_cmd;
+ unsigned char lun;
+ char masterno;
+ char tgt_name[SCSI_TARGET_NAME_LEN];
+ union {
+ scsi_disk_info_t disk;
+ scsi_tape_info_t tape;
+ scsi_cdrom_info_t cdrom;
+ scsi_processor_info_t cpu;
+ scsi_comm_info_t comm;
+ } dev_info;
+} target_info_t;
+
+
+/*
+ * Device-specific operations
+ */
+typedef struct scsi_devsw_struct {
+ char *(*driver_name)(boolean_t); /* my driver's name */
+ void (*optimize)(target_info_t *); /* tune up internal params */
+ scsi_ret_t (*open)(target_info_t *,io_req_t);/* open time ops */
+ scsi_ret_t (*close)(target_info_t *); /* close time ops */
+ int (*strategy)(io_req_t); /* sort/start routine */
+ void (*restart)(target_info_t *,
+ boolean_t); /* completion routine */
+ io_return_t (*get_status)(int,
+ target_info_t *,
+ dev_flavor_t,
+ dev_status_t,
+ natural_t *); /* specialization */
+ io_return_t (*set_status)(int,
+ target_info_t *,
+ dev_flavor_t,
+ dev_status_t,
+ natural_t); /* specialization */
+} scsi_devsw_t;
+
+#define SCSI_OPTIMIZE_NULL ((void (*)(target_info_t *)) 0)
+#define SCSI_OPEN_NULL ((scsi_ret_t (*)(target_info_t *,io_req_t)) 0)
+#define SCSI_CLOSE_NULL ((scsi_ret_t (*)(target_info_t *)) 0)
+
+extern scsi_devsw_t scsi_devsw[];
+
+/*
+ * HBA descriptor
+ */
+
+typedef struct {
+ /* initiator (us) state */
+ unsigned char initiator_id;
+ unsigned char masterno;
+ unsigned int max_dma_data;
+ char *hw_state; /* opaque */
+ int (*go)();
+ void (*watchdog)();
+ boolean_t (*probe)();
+ /* per-target state */
+ target_info_t *target[8];
+} scsi_softc_t;
+
+extern scsi_softc_t *scsi_softc[];
+extern scsi_softc_t *scsi_master_alloc(/* int unit */);
+extern target_info_t *scsi_slave_alloc(/* int unit, int slave, char *hw */);
+
+#define BGET(d,mid,id) (d[mid] & (1 << id)) /* bitmap ops */
+#define BSET(d,mid,id) d[mid] |= (1 << id)
+#define BCLR(d,mid,id) d[mid] &= ~(1 << id)
+
+extern unsigned char scsi_no_synchronous_xfer[]; /* one bitmap per ctlr */
+extern unsigned char scsi_use_long_form[]; /* one bitmap per ctlr */
+extern unsigned char scsi_might_disconnect[]; /* one bitmap per ctlr */
+extern unsigned char scsi_should_disconnect[]; /* one bitmap per ctlr */
+extern unsigned char scsi_initiator_id[]; /* one id per ctlr */
+
+extern boolean_t scsi_exabyte_filemarks;
+extern boolean_t scsi_no_automatic_bbr;
+extern int scsi_bbr_retries;
+extern int scsi_watchdog_period;
+extern int scsi_delay_after_reset;
+extern unsigned int scsi_per_target_virtual; /* 2.5 only */
+
+extern int scsi_debug;
+
+/*
+ * HBA-independent Watchdog
+ */
+typedef struct {
+
+ unsigned short reset_count;
+ char nactive;
+
+ char watchdog_state;
+
+#define SCSI_WD_INACTIVE 0
+#define SCSI_WD_ACTIVE 1
+#define SCSI_WD_EXPIRED 2
+
+ int (*reset)();
+
+} watchdog_t;
+
+extern void scsi_watchdog( watchdog_t* );
+
+#endif _SCSI_SCSI_DEFS_H_
diff --git a/scsi/scsi_disk.c b/scsi/scsi_disk.c
new file mode 100644
index 00000000..99bbe76e
--- /dev/null
+++ b/scsi/scsi_disk.c
@@ -0,0 +1,624 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_disk.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Middle layer of the SCSI driver: SCSI protocol implementation
+ *
+ * This file contains code for SCSI commands for DISK devices.
+ */
+
+#include <string.h>
+
+#include <mach/std_types.h>
+#include <scsi/compat_30.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi2.h>
+#include <scsi/scsi_defs.h>
+
+#if (NSCSI > 0)
+
+
+char *scdisk_name(internal)
+ boolean_t internal;
+{
+ return internal ? "rz" : "disk";
+}
+
+/*
+ * SCSI commands partially specific to disks
+ */
+void scdisk_read( tgt, secno, ior)
+ register target_info_t *tgt;
+ register unsigned int secno;
+ io_req_t ior;
+{
+ scsi_cmd_read_t *cmd;
+ register unsigned len;
+ unsigned int max_dma_data;
+
+ max_dma_data = scsi_softc[(unsigned char)tgt->masterno]->max_dma_data;
+
+ len = ior->io_count;
+ if (len > max_dma_data)
+ len = max_dma_data;
+ if (len < tgt->block_size)
+ len = tgt->block_size;
+
+ cmd = (scsi_cmd_read_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_READ;
+ cmd->scsi_cmd_lun_and_lba1 = (secno>>16)&SCSI_LBA_MASK;
+ cmd->scsi_cmd_lba2 = (secno>> 8)&0xff;
+ cmd->scsi_cmd_lba3 = (secno )&0xff;
+ cmd->scsi_cmd_xfer_len = len / tgt->block_size;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_READ;
+
+ scsi_go(tgt, sizeof(*cmd), len, FALSE);
+}
+
+void scdisk_write( tgt, secno, ior)
+ register target_info_t *tgt;
+ register unsigned int secno;
+ io_req_t ior;
+{
+ scsi_cmd_write_t *cmd;
+ unsigned len; /* in bytes */
+ unsigned int max_dma_data;
+
+ max_dma_data = scsi_softc[(unsigned char)tgt->masterno]->max_dma_data;
+
+ len = ior->io_count;
+ if (len > max_dma_data)
+ len = max_dma_data;
+ if (len < tgt->block_size)
+ len = tgt->block_size;
+
+ cmd = (scsi_cmd_write_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_WRITE;
+ cmd->scsi_cmd_lun_and_lba1 = (secno>>16)&SCSI_LBA_MASK;
+ cmd->scsi_cmd_lba2 = (secno>> 8)&0xff;
+ cmd->scsi_cmd_lba3 = (secno )&0xff;
+ cmd->scsi_cmd_xfer_len = len / tgt->block_size;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_WRITE;
+
+ scsi_go(tgt, sizeof(*cmd), 0, FALSE);
+}
+
+
+int scdisk_mode_select(tgt, lbn, ior, mdata, mlen, save)
+ register target_info_t *tgt;
+ register int lbn;
+ io_req_t ior;
+ char *mdata;
+ int mlen, save;
+{
+ scsi_cmd_mode_select_t *cmd;
+ scsi_mode_select_param_t *parm;
+
+ bzero(tgt->cmd_ptr, sizeof(*cmd) + sizeof(*parm));
+ cmd = (scsi_cmd_mode_select_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_MODE_SELECT;
+ cmd->scsi_cmd_lun_and_lba1 = SCSI_CMD_MSL_PF; /* XXX only if... */
+ cmd->scsi_cmd_xfer_len = sizeof(scsi_mode_select_param_t);/* no vuq */
+
+ parm = (scsi_mode_select_param_t*) (cmd + 1);
+ if (mdata) {
+ cmd->scsi_cmd_xfer_len = mlen;
+ bcopy(mdata, (char*)parm, mlen);
+ if (save)
+ cmd->scsi_cmd_lun_and_lba1 |= SCSI_CMD_MSL_SP;
+ } else {
+ /* parm->medium_type = if (floppy)disk.. */
+ parm->desc_len = 8;
+ /* this really is the LBN */
+ parm->descs[0].density_code = 0;/* XXX default XXX */
+ parm->descs[0].reclen1 = (lbn>>16)&0xff;
+ parm->descs[0].reclen2 = (lbn>> 8)&0xff;
+ parm->descs[0].reclen3 = (lbn )&0xff;
+ mlen = sizeof(*parm);
+ }
+
+ tgt->cur_cmd = SCSI_CMD_MODE_SELECT;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd) + mlen, 0, ior);
+
+ return tgt->done;
+}
+
+/*
+ * SCSI commands fully specific to disks
+ */
+int scsi_read_capacity( tgt, lbn, ior)
+ register target_info_t *tgt;
+ int lbn;
+ io_req_t ior;
+{
+ scsi_cmd_read_capacity_t *cmd;
+
+ bzero(tgt->cmd_ptr, sizeof(*cmd));
+ cmd = (scsi_cmd_read_capacity_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_READ_CAPACITY;
+ /* all zeroes, unless... */
+ if (lbn) {
+ cmd->scsi_cmd_rcap_flags = SCSI_CMD_RCAP_PMI;
+ cmd->scsi_cmd_lba1 = (lbn>>24);
+ cmd->scsi_cmd_lba2 = (lbn>>16)&0xff;
+ cmd->scsi_cmd_lba3 = (lbn>> 8)&0xff;
+ cmd->scsi_cmd_lba4 = (lbn )&0xff;
+ }
+
+ tgt->cur_cmd = SCSI_CMD_READ_CAPACITY;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), sizeof(scsi_rcap_data_t),ior);
+
+ return tgt->done;
+}
+
+void scsi_reassign_blocks( tgt, defect_list, n_defects, ior)
+ register target_info_t *tgt;
+ unsigned int *defect_list; /* In ascending order ! */
+ int n_defects;
+ io_req_t ior;
+{
+ scsi_cmd_reassign_blocks_t *cmd;
+ scsi_Ldefect_data_t *parm;
+
+ cmd = (scsi_cmd_reassign_blocks_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_REASSIGN_BLOCKS;
+ cmd->scsi_cmd_lun_and_lba1 = 0;
+ cmd->scsi_cmd_lba2 = 0;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_xfer_len = 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ parm = (scsi_Ldefect_data_t *) (cmd + 1);
+ parm->res1 = parm->res2 = 0;
+ n_defects *= 4; /* in 4-byte-ints */
+ parm->list_len_msb = n_defects >> 8;
+ parm->list_len_lsb = n_defects;
+ bcopy((char*)defect_list, (char*)parm->defects, n_defects);
+
+ tgt->cur_cmd = SCSI_CMD_REASSIGN_BLOCKS;
+
+ scsi_go(tgt, sizeof(*cmd) + sizeof(*parm) + (n_defects - 4), 0, FALSE);
+}
+
+void scsi_medium_removal( tgt, allow, ior)
+ register target_info_t *tgt;
+ boolean_t allow;
+ io_req_t ior;
+{
+ scsi_cmd_medium_removal_t *cmd;
+
+ cmd = (scsi_cmd_medium_removal_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_PREVENT_ALLOW_REMOVAL;
+ cmd->scsi_cmd_lun_and_lba1 = 0;
+ cmd->scsi_cmd_lba2 = 0;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_pa_prevent = allow ? 0 : 1;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+
+ tgt->cur_cmd = SCSI_CMD_PREVENT_ALLOW_REMOVAL;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+}
+
+int scsi_format_unit( tgt, mode, vuqe, intlv, ior)
+ register target_info_t *tgt;
+ int mode, vuqe;
+ register unsigned int intlv;
+ io_req_t ior;
+{
+ scsi_cmd_format_t *cmd;
+ char *parms;
+
+ cmd = (scsi_cmd_format_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_FORMAT_UNIT;
+ cmd->scsi_cmd_lun_and_lba1 =
+ mode & (SCSI_CMD_FMT_FMTDATA|SCSI_CMD_FMT_CMPLIST|SCSI_CMD_FMT_LIST_TYPE);
+ cmd->scsi_cmd_lba2 = vuqe;
+ cmd->scsi_cmd_lba3 = intlv >> 8;
+ cmd->scsi_cmd_xfer_len = intlv;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ parms = (char*) cmd + 1;
+ if (ior->io_count)
+ bcopy(ior->io_data, parms, ior->io_count);
+ else
+ bzero(parms, 0xff - sizeof(*cmd));
+
+ tgt->cur_cmd = SCSI_CMD_FORMAT_UNIT;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd) + ior->io_count, 0, ior);
+ return tgt->done;
+}
+
+
+/* Group 1 Commands */
+
+void scsi_long_read( tgt, secno, ior)
+ register target_info_t *tgt;
+ register unsigned int secno;
+ io_req_t ior;
+{
+ scsi_cmd_long_read_t *cmd;
+ register unsigned len, n_blks;
+ unsigned int max_dma_data;
+
+ max_dma_data = scsi_softc[(unsigned char)tgt->masterno]->max_dma_data;
+
+ len = ior->io_count;
+ if (len > max_dma_data)
+ len = max_dma_data;
+ if (len < tgt->block_size)
+ len = tgt->block_size;
+ n_blks = len /tgt->block_size;
+
+ cmd = (scsi_cmd_long_read_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_LONG_READ;
+ cmd->scsi_cmd_lun_and_relbit = 0;
+ cmd->scsi_cmd_lba1 = secno >> 24;
+ cmd->scsi_cmd_lba2 = secno >> 16;
+ cmd->scsi_cmd_lba3 = secno >> 8;
+ cmd->scsi_cmd_lba4 = secno;
+ cmd->scsi_cmd_xxx = 0;
+ cmd->scsi_cmd_xfer_len_1 = n_blks >> 8;
+ cmd->scsi_cmd_xfer_len_2 = n_blks;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_LONG_READ;
+
+ scsi_go(tgt, sizeof(*cmd), len, FALSE);
+}
+
+void scsi_long_write( tgt, secno, ior)
+ register target_info_t *tgt;
+ register unsigned int secno;
+ io_req_t ior;
+{
+ scsi_cmd_long_write_t *cmd;
+ unsigned len; /* in bytes */
+ unsigned int max_dma_data, n_blks;
+
+ max_dma_data = scsi_softc[(unsigned char)tgt->masterno]->max_dma_data;
+
+ len = ior->io_count;
+ if (len > max_dma_data)
+ len = max_dma_data;
+ if (len < tgt->block_size)
+ len = tgt->block_size;
+ n_blks = len /tgt->block_size;
+
+ cmd = (scsi_cmd_long_write_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_LONG_WRITE;
+ cmd->scsi_cmd_lun_and_relbit = 0;
+ cmd->scsi_cmd_lba1 = secno >> 24;
+ cmd->scsi_cmd_lba2 = secno >> 16;
+ cmd->scsi_cmd_lba3 = secno >> 8;
+ cmd->scsi_cmd_lba4 = secno;
+ cmd->scsi_cmd_xxx = 0;
+ cmd->scsi_cmd_xfer_len_1 = n_blks >> 8;
+ cmd->scsi_cmd_xfer_len_2 = n_blks;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_LONG_WRITE;
+
+ scsi_go(tgt, sizeof(*cmd), 0, FALSE);
+}
+
+int scdisk_verify( tgt, secno, nsectrs, ior)
+ register target_info_t *tgt;
+ int secno, nsectrs;
+ io_req_t ior;
+{
+ scsi_cmd_verify_long_t *cmd;
+ int len;
+
+ len = ior->io_count;
+
+ cmd = (scsi_cmd_verify_long_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_VERIFY_1;
+ cmd->scsi_cmd_lun_and_relbit = len ? SCSI_CMD_VFY_BYTCHK : 0;
+ cmd->scsi_cmd_lba1 = secno >> 24;
+ cmd->scsi_cmd_lba2 = secno >> 16;
+ cmd->scsi_cmd_lba3 = secno >> 8;
+ cmd->scsi_cmd_lba4 = secno;
+ cmd->scsi_cmd_xxx = 0;
+ cmd->scsi_cmd_xfer_len_1 = (nsectrs) >> 8;
+ cmd->scsi_cmd_xfer_len_2 = nsectrs;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_VERIFY_1;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd) + len, 0, ior);
+ return tgt->done;
+}
+
+
+int scsi_read_defect( tgt, mode, ior)
+ register target_info_t *tgt;
+ register unsigned int mode;
+ io_req_t ior;
+{
+ scsi_cmd_long_read_t *cmd;
+ register unsigned len;
+
+ len = ior->io_count;
+ if (len > 0xffff)
+ len = 0xffff;
+
+ cmd = (scsi_cmd_read_defect_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_READ_DEFECT_DATA;
+ cmd->scsi_cmd_lun_and_relbit = 0;
+ cmd->scsi_cmd_lba1 = mode & 0x1f;
+ cmd->scsi_cmd_lba2 = 0;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_lba4 = 0;
+ cmd->scsi_cmd_xxx = 0;
+ cmd->scsi_cmd_xfer_len_1 = (len) >> 8;
+ cmd->scsi_cmd_xfer_len_2 = (len);
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ /* ++ HACK Alert */
+/* tgt->cur_cmd = SCSI_CMD_READ_DEFECT_DATA;*/
+ tgt->cur_cmd = SCSI_CMD_LONG_READ;
+ /* -- HACK Alert */
+
+ scsi_go(tgt, sizeof(*cmd), len, FALSE);
+ iowait(ior);
+ return tgt->done;
+}
+
+
+#if 0 /* unused commands */
+scsi_rezero_unit( tgt, ior)
+ register target_info_t *tgt;
+ io_req_t ior;
+{
+ scsi_cmd_rezero_t *cmd;
+
+ cmd = (scsi_cmd_rezero_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_REZERO_UNIT;
+ cmd->scsi_cmd_lun_and_lba1 = 0;
+ cmd->scsi_cmd_lba2 = 0;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_xfer_len = 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+
+ tgt->cur_cmd = SCSI_CMD_REZERO_UNIT;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+
+}
+
+scsi_seek( tgt, where, ior)
+ register target_info_t *tgt;
+ register unsigned int where;
+ io_req_t ior;
+{
+ scsi_cmd_seek_t *cmd;
+
+ cmd = (scsi_cmd_seek_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_SEEK;
+ cmd->scsi_cmd_lun_and_lba1 = (where >> 16) & 0x1f;
+ cmd->scsi_cmd_lba2 = where >> 8;
+ cmd->scsi_cmd_lba3 = where;
+ cmd->scsi_cmd_xfer_len = 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+
+ tgt->cur_cmd = SCSI_CMD_SEEK;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+
+}
+
+scsi_reserve( tgt, len, id, mode, ior)
+ register target_info_t *tgt;
+ register unsigned int len;
+ unsigned char id;
+ io_req_t ior;
+{
+ scsi_cmd_reserve_t *cmd;
+
+ cmd = (scsi_cmd_reserve_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_RESERVE;
+ cmd->scsi_cmd_lun_and_lba1 = mode & 0x1f;
+ cmd->scsi_cmd_reserve_id = id;
+ cmd->scsi_cmd_extent_llen1 = len >> 8;
+ cmd->scsi_cmd_extent_llen2 = len;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+
+ tgt->cur_cmd = SCSI_CMD_RESERVE;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+
+}
+
+scsi_release( tgt, id, mode, ior)
+ register target_info_t *tgt;
+ unsigned char id, mode;
+ io_req_t ior;
+{
+ scsi_cmd_release_t *cmd;
+
+ cmd = (scsi_cmd_release_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_RELEASE;
+ cmd->scsi_cmd_lun_and_lba1 = mode & 0x1f;
+ cmd->scsi_cmd_reserve_id = id;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_xfer_len = 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+
+ tgt->cur_cmd = SCSI_CMD_RELEASE;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+
+}
+
+
+/* Group 1 Commands */
+
+scsi_long_seek( tgt, secno, ior)
+ register target_info_t *tgt;
+ io_req_t ior;
+{
+ scsi_cmd_long_seek_t *cmd;
+
+ cmd = (scsi_cmd_long_seek_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_LONG_SEEK;
+ cmd->scsi_cmd_lun_and_relbit = 0;
+ cmd->scsi_cmd_lba1 = secno >> 24;
+ cmd->scsi_cmd_lba2 = secno >> 16;
+ cmd->scsi_cmd_lba3 = secno >> 8;
+ cmd->scsi_cmd_lba4 = secno;
+ cmd->scsi_cmd_xxx = 0;
+ cmd->scsi_cmd_xfer_len_1 = 0;
+ cmd->scsi_cmd_xfer_len_2 = 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_LONG_SEEK;
+
+ scsi_go(tgt, sizeof(*cmd), 0, FALSE);
+}
+
+scsi_write_verify( tgt, secno, ior)
+ register target_info_t *tgt;
+ io_req_t ior;
+{
+ scsi_cmd_write_vfy_t *cmd;
+ unsigned len; /* in bytes */
+ unsigned int max_dma_data, n_blks;
+
+ max_dma_data = scsi_softc[(unsigned char)tgt->masterno]->max_dma_data;
+
+ len = ior->io_count;
+ if (len > max_dma_data)
+ len = max_dma_data;
+ if (len < tgt->block_size)
+ len = tgt->block_size;
+ n_blks = len / tgt->block_size;
+
+ cmd = (scsi_cmd_write_vfy_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_WRITE_AND_VERIFY;
+ cmd->scsi_cmd_lun_and_relbit = SCSI_CMD_VFY_BYTCHK;
+ cmd->scsi_cmd_lba1 = secno >> 24;
+ cmd->scsi_cmd_lba2 = secno >> 16;
+ cmd->scsi_cmd_lba3 = secno >> 8;
+ cmd->scsi_cmd_lba4 = secno;
+ cmd->scsi_cmd_xxx = 0;
+ cmd->scsi_cmd_xfer_len_1 = n_blks >> 8;
+ cmd->scsi_cmd_xfer_len_2 = n_blks;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_WRITE_AND_VERIFY;
+
+ scsi_go(tgt, sizeof(*cmd), 0, FALSE);
+}
+
+scsi_search_data( tgt, secno, how, flags, ior)
+ register target_info_t *tgt;
+ io_req_t ior;
+{
+ scsi_cmd_search_t *cmd;
+ unsigned len; /* in bytes */
+ unsigned int max_dma_data, n_blks;
+
+ max_dma_data = scsi_softc[(unsigned char)tgt->masterno]->max_dma_data;
+
+ if (how != SCSI_CMD_SEARCH_HIGH &&
+ how != SCSI_CMD_SEARCH_EQUAL &&
+ how != SCSI_CMD_SEARCH_LOW)
+ panic("scsi_search_data");
+
+ len = ior->io_count;
+ if (len > max_dma_data)
+ len = max_dma_data;
+ n_blks = len / tgt->block_size;
+
+ cmd = (scsi_cmd_search_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = how;
+ cmd->scsi_cmd_lun_and_relbit = flags & 0x1e;
+ cmd->scsi_cmd_lba1 = secno >> 24;
+ cmd->scsi_cmd_lba2 = secno >> 16;
+ cmd->scsi_cmd_lba3 = secno >> 8;
+ cmd->scsi_cmd_lba4 = secno;
+ cmd->scsi_cmd_xxx = 0;
+ cmd->scsi_cmd_xfer_len_1 = n_blks >> 8;
+ cmd->scsi_cmd_xfer_len_2 = n_blks;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = how;
+
+ scsi_go(tgt, sizeof(*cmd), 0, FALSE);
+}
+
+
+scsi_set_limits( tgt, secno, nblocks, inhibit, ior)
+ register target_info_t *tgt;
+ io_req_t ior;
+{
+ scsi_cmd_set_limits_t *cmd;
+
+ cmd = (scsi_cmd_set_limits_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_SET_LIMITS;
+ cmd->scsi_cmd_lun_and_relbit = inhibit & 0x3;
+ cmd->scsi_cmd_lba1 = secno >> 24;
+ cmd->scsi_cmd_lba2 = secno >> 16;
+ cmd->scsi_cmd_lba3 = secno >> 8;
+ cmd->scsi_cmd_lba4 = secno;
+ cmd->scsi_cmd_xxx = 0;
+ cmd->scsi_cmd_xfer_len_1 = nblocks >> 8;
+ cmd->scsi_cmd_xfer_len_2 = nblocks;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_SET_LIMITS;
+
+ scsi_go(tgt, sizeof(*cmd), 0, FALSE);
+}
+
+
+#endif
+
+#ifdef SCSI2
+scsi_lock_cache
+scsi_prefetch
+scsi_read_defect_data
+scsi_sync_cache
+scsi_write_same
+#endif SCSI2
+#endif /* NSCSI > 0 */
diff --git a/scsi/scsi_endian.h b/scsi/scsi_endian.h
new file mode 100644
index 00000000..d298a78e
--- /dev/null
+++ b/scsi/scsi_endian.h
@@ -0,0 +1,66 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_endian.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/91
+ *
+ * Byte/Bit order issues are solved here.
+ */
+
+
+#ifndef _SCSI_ENDIAN_H_
+#define _SCSI_ENDIAN_H_ 1
+
+/*
+ * Macros to take care of bitfield placement within a byte.
+ * It might be possible to extend these to something that
+ * takes care of multibyte structures, using perhaps the
+ * type ("t") parameter. Someday.
+ */
+#if BYTE_MSF
+
+#define BITFIELD_2(t,a,b) t b,a
+#define BITFIELD_3(t,a,b,c) t c,b,a
+#define BITFIELD_4(t,a,b,c,d) t d,c,b,a
+#define BITFIELD_5(t,a,b,c,d,e) t e,d,c,b,a
+#define BITFIELD_6(t,a,b,c,d,e,f) t f,e,d,c,b,a
+#define BITFIELD_7(t,a,b,c,d,e,f,g) t g,f,e,d,c,b,a
+#define BITFIELD_8(t,a,b,c,d,e,f,g,h) t h,g,f,e,d,c,b,a
+
+#else /*BYTE_MSF*/
+
+#define BITFIELD_2(t,a,b) t a,b
+#define BITFIELD_3(t,a,b,c) t a,b,c
+#define BITFIELD_4(t,a,b,c,d) t a,b,c,d
+#define BITFIELD_5(t,a,b,c,d,e) t a,b,c,d,e
+#define BITFIELD_6(t,a,b,c,d,e,f) t a,b,c,d,e
+#define BITFIELD_7(t,a,b,c,d,e,f,g) t a,b,c,d,e,f,g
+#define BITFIELD_8(t,a,b,c,d,e,f,g,h) t a,b,c,d,e,f,g,h
+
+#endif /*BYTE_MSF*/
+
+#endif /*_SCSI_ENDIAN_H_*/
diff --git a/scsi/scsi_jukebox.c b/scsi/scsi_jukebox.c
new file mode 100644
index 00000000..b62d0bf9
--- /dev/null
+++ b/scsi/scsi_jukebox.c
@@ -0,0 +1,57 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_jukebox.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Middle layer of the SCSI driver: SCSI protocol implementation
+ *
+ * This file contains code for SCSI commands for MEDIA CHANGER devices.
+ */
+
+#include <scsi.h>
+#if (NSCSI > 0)
+
+#include <mach/std_types.h>
+
+char *scjb_name(internal)
+ boolean_t internal;
+{
+ return internal ? "jz" : "jukebox";
+}
+
+#if 0
+scsi_exchange_medium
+scsi_init_element_status
+scsi_move_medium
+scsi_position_to_element
+scsi_read_element_status
+scsi_request_volume_address
+scsi_send_volume_tag
+#endif
+#endif /* NSCSI > 0 */
+
diff --git a/scsi/scsi_optical.c b/scsi/scsi_optical.c
new file mode 100644
index 00000000..61881747
--- /dev/null
+++ b/scsi/scsi_optical.c
@@ -0,0 +1,57 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_optical.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Middle layer of the SCSI driver: SCSI protocol implementation
+ *
+ * This file contains code for SCSI commands for OPTICAL MEMORY devices.
+ */
+
+#include <scsi.h>
+#if (NSCSI > 0)
+
+#include <mach/std_types.h>
+
+char *scmem_name(internal)
+ boolean_t internal;
+{
+ return internal ? "rz" : "optical disk";
+}
+
+#if 0
+scsi_erase_long
+scsi_medium_scan
+scsi_read_defect_data
+scsi_read_generation
+scsi_read_updated_block
+scsi_update_block
+scsi_write_verify_long
+#endif
+#endif /* NSCSI > 0 */
+
diff --git a/scsi/scsi_printer.c b/scsi/scsi_printer.c
new file mode 100644
index 00000000..0ffb09ab
--- /dev/null
+++ b/scsi/scsi_printer.c
@@ -0,0 +1,57 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_printer.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Middle layer of the SCSI driver: SCSI protocol implementation
+ *
+ * This file contains code for SCSI commands for PRINTER devices.
+ */
+
+#include <scsi.h>
+#if (NSCSI > 0)
+
+#include <mach/std_types.h>
+
+
+char *scprt_name(internal)
+ boolean_t internal;
+{
+ return internal ? "lz" : "printer";
+}
+
+void scprt_optimize()
+{}
+
+#if 0
+scsi_print
+scsi_slew_and_print
+scsi_flush_buffer
+scsi_stop_print
+#endif
+#endif /* NSCSI > 0 */
diff --git a/scsi/scsi_rom.c b/scsi/scsi_rom.c
new file mode 100644
index 00000000..1a8dec96
--- /dev/null
+++ b/scsi/scsi_rom.c
@@ -0,0 +1,401 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_rom.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Middle layer of the SCSI driver: SCSI protocol implementation
+ *
+ * This file contains code for SCSI commands for CD-ROM devices.
+ */
+
+#include <mach/std_types.h>
+#include <scsi/compat_30.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi2.h>
+#include <scsi/scsi_defs.h>
+
+#if (NSCSI > 0)
+
+char *sccdrom_name(
+ boolean_t internal)
+{
+ return internal ? "rz" : "CD-ROM";
+}
+
+int scsi_pause_resume(
+ target_info_t *tgt,
+ boolean_t stop_it,
+ io_req_t ior)
+{
+ scsi_cmd_pausres_t *cmd;
+
+ cmd = (scsi_cmd_pausres_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_PAUSE_RESUME;
+ cmd->scsi_cmd_lun_and_relbit = 0;
+ cmd->scsi_cmd_lba1 = 0;
+ cmd->scsi_cmd_lba2 = 0;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_lba4 = 0;
+ cmd->scsi_cmd_xxx = 0;
+ cmd->scsi_cmd_xfer_len_1 = 0;
+ cmd->scsi_cmd_pausres_res = stop_it ? 0 : SCSI_CMD_PAUSRES_RESUME;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_PAUSE_RESUME;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+
+ return tgt->done;
+}
+
+scsi_play_audio(
+ target_info_t *tgt,
+ unsigned int start,
+ unsigned int len,
+ boolean_t relative_address,
+ io_req_t ior)
+{
+ scsi_cmd_play_audio_t *cmd;
+
+ cmd = (scsi_cmd_play_audio_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_PLAY_AUDIO;
+ cmd->scsi_cmd_lun_and_relbit = relative_address ? SCSI_RELADR : 0;
+ cmd->scsi_cmd_lba1 = start >> 24;
+ cmd->scsi_cmd_lba2 = start >> 16;
+ cmd->scsi_cmd_lba3 = start >> 8;
+ cmd->scsi_cmd_lba4 = start >> 0;
+ cmd->scsi_cmd_xxx = 0;
+ cmd->scsi_cmd_xfer_len_1 = len >> 8;
+ cmd->scsi_cmd_xfer_len_2 = len >> 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_PLAY_AUDIO;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+
+ return tgt->done;
+}
+
+scsi_play_audio_long(
+ target_info_t *tgt,
+ unsigned int start,
+ unsigned int len,
+ boolean_t relative_address,
+ io_req_t ior)
+{
+ scsi_cmd_play_audio_l_t *cmd;
+
+ cmd = (scsi_cmd_play_audio_l_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_PLAY_AUDIO_LONG;
+ cmd->scsi_cmd_lun_and_relbit = relative_address ? SCSI_RELADR : 0;
+ cmd->scsi_cmd_lba1 = start >> 24;
+ cmd->scsi_cmd_lba2 = start >> 16;
+ cmd->scsi_cmd_lba3 = start >> 8;
+ cmd->scsi_cmd_lba4 = start >> 0;
+ cmd->scsi_cmd_xfer_len_1 = len >> 24;
+ cmd->scsi_cmd_xfer_len_2 = len >> 16;
+ cmd->scsi_cmd_xfer_len_3 = len >> 8;
+ cmd->scsi_cmd_xfer_len_4 = len >> 0;
+ cmd->scsi_cmd_xxx1 = 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_PLAY_AUDIO_LONG;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+
+ return tgt->done;
+}
+
+scsi_play_audio_msf(
+ target_info_t *tgt,
+ int sm,
+ int ss,
+ int sf,
+ int em,
+ int es,
+ int ef,
+ io_req_t ior)
+{
+ scsi_cmd_play_audio_msf_t *cmd;
+
+ cmd = (scsi_cmd_play_audio_msf_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_PLAY_AUDIO_MSF;
+ cmd->scsi_cmd_lun_and_relbit = 0;
+ cmd->scsi_cmd_lba1 = 0;
+ cmd->scsi_cmd_pamsf_startM = sm;
+ cmd->scsi_cmd_pamsf_startS = ss;
+ cmd->scsi_cmd_pamsf_startF = sf;
+ cmd->scsi_cmd_pamsf_endM = em;
+ cmd->scsi_cmd_pamsf_endS = es;
+ cmd->scsi_cmd_pamsf_endF = ef;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_PLAY_AUDIO_MSF;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+
+ return tgt->done;
+}
+
+scsi_play_audio_track_index(
+ target_info_t *tgt,
+ int st,
+ int si,
+ int et,
+ int ei,
+ io_req_t ior)
+{
+ scsi_cmd_play_audio_ti_t *cmd;
+
+ cmd = (scsi_cmd_play_audio_ti_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_PLAY_AUDIO_TI;
+ cmd->scsi_cmd_lun_and_relbit = 0;
+ cmd->scsi_cmd_lba1 = 0;
+ cmd->scsi_cmd_lba2 = 0;
+ cmd->scsi_cmd_pati_startT = st;
+ cmd->scsi_cmd_pati_startI = si;
+ cmd->scsi_cmd_xxx = 0;
+ cmd->scsi_cmd_pati_endT = et;
+ cmd->scsi_cmd_pati_endI = ei;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_PLAY_AUDIO_TI;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+
+ return tgt->done;
+}
+
+scsi_play_audio_track_relative(
+ target_info_t *tgt,
+ unsigned int lba,
+ int st,
+ unsigned int len,
+ io_req_t ior)
+{
+ scsi_cmd_play_audio_tr_t *cmd;
+
+ cmd = (scsi_cmd_play_audio_tr_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_PLAY_AUDIO_TR;
+ cmd->scsi_cmd_lun_and_relbit = 0;
+ cmd->scsi_cmd_lba1 = lba >> 24;
+ cmd->scsi_cmd_lba2 = lba >> 16;
+ cmd->scsi_cmd_lba3 = lba >> 8;
+ cmd->scsi_cmd_lba4 = lba >> 0;
+ cmd->scsi_cmd_patr_startT = st;
+ cmd->scsi_cmd_xfer_len_1 = len >> 8;
+ cmd->scsi_cmd_xfer_len_2 = len >> 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_PLAY_AUDIO_TR;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+
+ return tgt->done;
+}
+
+scsi_play_audio_track_relative_long(
+ target_info_t *tgt,
+ unsigned int lba,
+ int st,
+ unsigned int len,
+ io_req_t ior)
+{
+ scsi_cmd_play_audio_tr_l_t *cmd;
+
+ cmd = (scsi_cmd_play_audio_tr_l_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_PLAY_AUDIO_TR_LONG;
+ cmd->scsi_cmd_lun_and_relbit = 0;
+ cmd->scsi_cmd_lba1 = lba >> 24;
+ cmd->scsi_cmd_lba2 = lba >> 16;
+ cmd->scsi_cmd_lba3 = lba >> 8;
+ cmd->scsi_cmd_lba4 = lba >> 0;
+ cmd->scsi_cmd_xfer_len_1 = len >> 24;
+ cmd->scsi_cmd_xfer_len_2 = len >> 16;
+ cmd->scsi_cmd_xfer_len_3 = len >> 8;
+ cmd->scsi_cmd_xfer_len_4 = len >> 0;
+ cmd->scsi_cmd_patrl_startT = st;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_PLAY_AUDIO_TR_LONG;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+
+ return tgt->done;
+}
+
+scsi_read_header(
+ target_info_t *tgt,
+ boolean_t msf_format,
+ unsigned int lba,
+ unsigned int allocsize,
+ io_req_t ior)
+{
+ scsi_cmd_read_header_t *cmd;
+
+ cmd = (scsi_cmd_read_header_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_READ_HEADER;
+ cmd->scsi_cmd_lun_and_relbit = msf_format ? SCSI_CMD_CD_MSF : 0;
+ cmd->scsi_cmd_lba1 = lba >> 24;
+ cmd->scsi_cmd_lba2 = lba >> 16;
+ cmd->scsi_cmd_lba3 = lba >> 8;
+ cmd->scsi_cmd_lba4 = lba >> 0;
+ cmd->scsi_cmd_xxx = 0;
+ cmd->scsi_cmd_xfer_len_1 = allocsize >> 8;
+ cmd->scsi_cmd_xfer_len_2 = allocsize >> 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_READ_HEADER;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), allocsize, ior);
+
+ return tgt->done;
+}
+
+scsi_read_subchannel(
+ target_info_t *tgt,
+ boolean_t msf_format,
+ unsigned int data_format,
+ unsigned int trackno,
+ io_req_t ior)
+{
+ scsi_cmd_read_subch_t *cmd;
+ int allocsize;
+
+ switch (data_format) {
+ case SCSI_CMD_RS_FMT_SUBQ:
+ allocsize = sizeof(cdrom_chan_data_t);
+ trackno = 0; break;
+ case SCSI_CMD_RS_FMT_CURPOS:
+ allocsize = sizeof(cdrom_chan_curpos_t);
+ trackno = 0; break;
+ case SCSI_CMD_RS_FMT_CATALOG:
+ allocsize = sizeof(cdrom_chan_catalog_t);
+ trackno = 0; break;
+ case SCSI_CMD_RS_FMT_ISRC:
+ allocsize = sizeof(cdrom_chan_isrc_t); break;
+ }
+
+ cmd = (scsi_cmd_read_subch_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_READ_SUBCH;
+ cmd->scsi_cmd_lun_and_relbit = msf_format ? SCSI_CMD_CD_MSF : 0;
+ cmd->scsi_cmd_lba1 = SCSI_CMD_RS_SUBQ;
+ cmd->scsi_cmd_rs_format = data_format;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_lba4 = 0;
+ cmd->scsi_cmd_rs_trackno = trackno;
+ cmd->scsi_cmd_xfer_len_1 = allocsize >> 8;
+ cmd->scsi_cmd_xfer_len_2 = allocsize >> 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_READ_SUBCH;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), allocsize, ior);
+
+ return tgt->done;
+}
+
+scsi_read_toc(
+ target_info_t *tgt,
+ boolean_t msf_format,
+ int trackno,
+ int allocsize,
+ io_req_t ior)
+{
+ scsi_cmd_read_toc_t *cmd;
+
+ cmd = (scsi_cmd_read_toc_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_READ_TOC;
+ cmd->scsi_cmd_lun_and_relbit = msf_format ? SCSI_CMD_CD_MSF : 0;
+ cmd->scsi_cmd_lba1 = 0;
+ cmd->scsi_cmd_lba2 = 0;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_lba4 = 0;
+ cmd->scsi_cmd_rtoc_startT = trackno;
+ cmd->scsi_cmd_xfer_len_1 = allocsize >> 8;
+ cmd->scsi_cmd_xfer_len_2 = allocsize >> 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_READ_TOC;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), allocsize, ior);
+
+ return tgt->done;
+}
+
+/* move elsewhere ifworks */
+scsi2_mode_select(
+ target_info_t *tgt,
+ boolean_t save,
+ unsigned char *page,
+ int pagesize,
+ io_req_t ior)
+{
+ scsi_cmd_mode_select_t *cmd;
+ scsi2_mode_param_t *parm;
+
+ bzero(tgt->cmd_ptr, sizeof(*cmd) + sizeof(*parm));
+ cmd = (scsi_cmd_mode_select_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_MODE_SELECT;
+ cmd->scsi_cmd_lun_and_lba1 = SCSI_CMD_MSL_PF | (save ? SCSI_CMD_MSL_SP : 0);
+ cmd->scsi_cmd_xfer_len = pagesize;
+
+ parm = (scsi2_mode_param_t*) (cmd + 1);
+
+ bcopy(page, parm, pagesize);
+
+ tgt->cur_cmd = SCSI_CMD_MODE_SELECT;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd) + pagesize, 0, ior);
+
+ return tgt->done;
+}
+
+/*
+ * obnoxious
+ */
+cdrom_vendor_specific(
+ target_info_t *tgt,
+ scsi_command_group_2 *cmd,
+ unsigned char *params,
+ int paramlen,
+ int retlen,
+ io_req_t ior)
+{
+ bcopy(cmd, tgt->cmd_ptr, sizeof(*cmd));
+ if (paramlen)
+ bcopy(params, tgt->cmd_ptr + sizeof(*cmd), paramlen);
+
+ tgt->cur_cmd = cmd->scsi_cmd_code;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd) + paramlen, retlen, ior);
+
+ return tgt->done;
+}
+#endif /* NSCSI > 0 */
diff --git a/scsi/scsi_scanner.c b/scsi/scsi_scanner.c
new file mode 100644
index 00000000..b6ba3585
--- /dev/null
+++ b/scsi/scsi_scanner.c
@@ -0,0 +1,54 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_scanner.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Middle layer of the SCSI driver: SCSI protocol implementation
+ *
+ * This file contains code for SCSI commands for SCANNER devices.
+ */
+
+#include <scsi.h>
+#if (NSCSI > 0)
+
+#include <mach/std_types.h>
+
+char *scscn_name(internal)
+ boolean_t internal;
+{
+ return internal ? "oz" : "scanner";
+}
+
+#if 0
+scsi_get_buffer_status
+scsi_get_window
+scsi_object_position
+scsi_scan
+scsi_set_window
+#endif
+#endif /* NSCSI > 0 */
diff --git a/scsi/scsi_tape.c b/scsi/scsi_tape.c
new file mode 100644
index 00000000..c73f4329
--- /dev/null
+++ b/scsi/scsi_tape.c
@@ -0,0 +1,415 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_tape.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Middle layer of the SCSI driver: SCSI protocol implementation
+ *
+ * This file contains code for SCSI commands for SEQUENTIAL ACCESS devices.
+ */
+
+#include <mach/std_types.h>
+#include <scsi/compat_30.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_defs.h>
+
+#if (NSCSI > 0)
+
+
+char *sctape_name(internal)
+ boolean_t internal;
+{
+ return internal ? "tz" : "tape";
+}
+
+void sctape_optimize(tgt)
+ target_info_t *tgt;
+{
+ register int i;
+ char result[6];
+
+ /* Some (DEC) tapes want to send you the self-test results */
+ for (i = 0; i < 10; i++) {
+ if (scsi_receive_diag( tgt, result, sizeof(result), 0)
+ == SCSI_RET_SUCCESS)
+ break;
+ }
+ if (scsi_debug)
+ printf("[tape_rcvdiag: after %d, x%x x%x x%x x%x x%x x%x]\n", i+1,
+ result[0], result[1], result[2], result[3], result[4], result[5]);
+}
+
+/*
+ * SCSI commands specific to sequential access devices
+ */
+int sctape_mode_select( tgt, vuque_data, vuque_data_len, newspeed, ior)
+ register target_info_t *tgt;
+ unsigned char *vuque_data;
+ int vuque_data_len;
+ int newspeed;
+ io_req_t ior;
+{
+ scsi_cmd_mode_select_t *cmd;
+ scsi_mode_select_param_t *parm;
+
+ bzero(tgt->cmd_ptr, sizeof(*cmd) + 2 * sizeof(*parm));
+ cmd = (scsi_cmd_mode_select_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_MODE_SELECT;
+ cmd->scsi_cmd_lun_and_lba1 = 0;
+ cmd->scsi_cmd_xfer_len = sizeof(scsi_mode_select_param_t) + vuque_data_len;
+
+ parm = (scsi_mode_select_param_t*) (cmd + 1);
+ if (newspeed) {
+ parm->speed = tgt->dev_info.tape.speed;
+ } else {
+ /* Standard sez 0 -> no change */
+ parm->speed = 0;
+ }
+ /* parm->medium_type = 0; reserved */
+ parm->descs[0].density_code = tgt->dev_info.tape.density;
+ parm->buffer_mode = 1;
+ parm->desc_len = 8;
+ if (tgt->dev_info.tape.fixed_size) {
+ register int reclen = tgt->block_size;
+ parm->descs[0].reclen1 = reclen >> 16;
+ parm->descs[0].reclen2 = reclen >> 8;
+ parm->descs[0].reclen3 = reclen;
+ }
+
+ if (vuque_data_len)
+ bcopy(vuque_data, (char*)(parm+1), vuque_data_len);
+
+ tgt->cur_cmd = SCSI_CMD_MODE_SELECT;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd) + sizeof(*parm) + vuque_data_len, 0, ior);
+
+ return tgt->done;
+}
+
+void sctape_read( tgt, ior)
+ register target_info_t *tgt;
+ io_req_t ior;
+{
+ scsi_cmd_read_t *cmd;
+ register unsigned len, max;
+# define nbytes max
+ boolean_t fixed = FALSE;
+
+ max = scsi_softc[(unsigned char)tgt->masterno]->max_dma_data;
+
+ len = ior->io_count;
+ if (tgt->dev_info.tape.fixed_size) {
+ unsigned int bs = tgt->block_size;
+ fixed = TRUE;
+ nbytes = len;
+ ior->io_residual += len % bs;
+ len = len / bs;
+ } else {
+ if (max > tgt->dev_info.tape.maxreclen)
+ max = tgt->dev_info.tape.maxreclen;
+ if (len > max) {
+ ior->io_residual = len - max;
+ len = max;
+ }
+ if (len < tgt->block_size)
+ len = tgt->block_size;
+ nbytes = len;
+ }
+
+ cmd = (scsi_cmd_read_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_READ;
+ cmd->scsi_cmd_lun_and_lba1 = fixed ? SCSI_CMD_TP_FIXED : 0;
+ cmd->scsi_cmd_lba2 = len >> 16;
+ cmd->scsi_cmd_lba3 = len >> 8;
+ cmd->scsi_cmd_xfer_len = len;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_READ;
+
+ scsi_go(tgt, sizeof(*cmd), nbytes, FALSE);
+#undef nbytes
+}
+
+void sctape_write( tgt, ior)
+ register target_info_t *tgt;
+ io_req_t ior;
+{
+ scsi_cmd_write_t *cmd;
+ register unsigned len, max;
+ boolean_t fixed = FALSE;
+
+ len = ior->io_count;
+ max = scsi_softc[(unsigned char)tgt->masterno]->max_dma_data;
+
+ if (tgt->dev_info.tape.fixed_size) {
+ unsigned int bs = tgt->block_size;
+ fixed = TRUE;
+ ior->io_residual += len % bs;
+ len = len / bs;
+ } else {
+ if (max > tgt->dev_info.tape.maxreclen)
+ max = tgt->dev_info.tape.maxreclen;
+ if (len > max) {
+ ior->io_residual = len - max;
+ len = max;
+ }
+ if (len < tgt->block_size)
+ len = tgt->block_size;
+ }
+
+ cmd = (scsi_cmd_write_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_WRITE;
+ cmd->scsi_cmd_lun_and_lba1 = fixed ? SCSI_CMD_TP_FIXED : 0;
+ cmd->scsi_cmd_lba2 = len >> 16;
+ cmd->scsi_cmd_lba3 = len >> 8;
+ cmd->scsi_cmd_xfer_len = len;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_WRITE;
+
+ scsi_go(tgt, sizeof(*cmd), 0, FALSE);
+}
+
+int scsi_rewind( tgt, ior, wait)
+ register target_info_t *tgt;
+ io_req_t ior;
+ boolean_t wait;
+{
+ scsi_cmd_rewind_t *cmd;
+
+
+ cmd = (scsi_cmd_rewind_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_REWIND;
+ cmd->scsi_cmd_lun_and_lba1 = wait ? 0 : SCSI_CMD_REW_IMMED;
+ cmd->scsi_cmd_lba2 = 0;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_xfer_len = 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_REWIND;
+
+ scsi_go( tgt, sizeof(*cmd), 0, FALSE);
+ return SCSI_RET_SUCCESS;
+}
+
+int scsi_write_filemarks( tgt, count, ior)
+ register target_info_t *tgt;
+ register unsigned int count;
+ io_req_t ior;
+{
+ scsi_cmd_write_fil_t *cmd;
+
+ cmd = (scsi_cmd_write_fil_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_WRITE_FILEMARKS;
+ cmd->scsi_cmd_lun_and_lba1 = 0;
+ cmd->scsi_cmd_lba2 = count >> 16;
+ cmd->scsi_cmd_lba3 = count >> 8;
+ cmd->scsi_cmd_xfer_len = count;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+
+ tgt->cur_cmd = SCSI_CMD_WRITE_FILEMARKS;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+
+ return tgt->done;
+}
+
+int scsi_space( tgt, mode, count, ior)
+ register target_info_t *tgt;
+ int mode;
+ register int count;
+ io_req_t ior;
+{
+ scsi_cmd_space_t *cmd;
+
+ cmd = (scsi_cmd_space_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_SPACE;
+ cmd->scsi_cmd_lun_and_lba1 = mode & 0x3;
+ cmd->scsi_cmd_lba2 = count >> 16;
+ cmd->scsi_cmd_lba3 = count >> 8;
+ cmd->scsi_cmd_xfer_len = count;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+
+ tgt->cur_cmd = SCSI_CMD_SPACE;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+
+ return tgt->done;
+}
+
+
+int scsi_read_block_limits( tgt, ior)
+ register target_info_t *tgt;
+ io_req_t ior;
+{
+ scsi_cmd_block_limits_t *cmd;
+
+ cmd = (scsi_cmd_block_limits_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_READ_BLOCK_LIMITS;
+ cmd->scsi_cmd_lun_and_lba1 = 0;
+ cmd->scsi_cmd_lba2 = 0;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_xfer_len = 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+
+ tgt->cur_cmd = SCSI_CMD_READ_BLOCK_LIMITS;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), sizeof(scsi_blimits_data_t), ior);
+ return tgt->done;
+}
+
+#if 0 /* unused */
+
+void scsi_track_select( tgt, trackno, ior)
+ register target_info_t *tgt;
+ register unsigned char trackno;
+ io_req_t ior;
+{
+ scsi_cmd_seek_t *cmd;
+
+ cmd = (scsi_cmd_seek_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_TRACK_SELECT;
+ cmd->scsi_cmd_lun_and_lba1 = 0;
+ cmd->scsi_cmd_lba2 = 0;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_tp_trackno = trackno;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+
+ tgt->cur_cmd = SCSI_CMD_TRACK_SELECT;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+}
+
+void scsi_read_reverse( tgt, ior)
+ register target_info_t *tgt;
+ io_req_t ior;
+{
+ scsi_cmd_rev_read_t *cmd;
+ register unsigned len;
+ unsigned int max_dma_data;
+
+ max_dma_data = scsi_softc[(unsigned char)tgt->masterno]->max_dma_data;
+
+ len = ior->io_count;
+ if (len > max_dma_data)
+ len = max_dma_data;
+
+ cmd = (scsi_cmd_rev_read_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_READ_REVERSE;
+ cmd->scsi_cmd_lun_and_lba1 = 0;
+ cmd->scsi_cmd_lba2 = len >> 16;
+ cmd->scsi_cmd_lba3 = len >> 8;
+ cmd->scsi_cmd_xfer_len = len;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_READ_REVERSE;
+
+ scsi_go(tgt, sizeof(*cmd), len, FALSE);
+}
+
+void sctape_verify( tgt, len, ior)
+ register target_info_t *tgt;
+ register unsigned int len;
+ io_req_t ior;
+{
+ scsi_cmd_verify_t *cmd;
+
+ cmd = (scsi_cmd_verify_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_VERIFY_0;
+ cmd->scsi_cmd_lun_and_lba1 = 0;/* XXX */
+ cmd->scsi_cmd_lba2 = len >> 16;
+ cmd->scsi_cmd_lba3 = len >> 8;
+ cmd->scsi_cmd_xfer_len = len;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+
+ tgt->cur_cmd = SCSI_CMD_VERIFY_0;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+}
+
+
+void scsi_recover_buffered_data( tgt, ior)
+ register target_info_t *tgt;
+ io_req_t ior;
+{
+ scsi_cmd_recover_buffer_t *cmd;
+ register unsigned len;
+ unsigned int max_dma_data;
+
+ max_dma_data = scsi_softc[(unsigned char)tgt->masterno]->max_dma_data;
+
+ len = ior->io_count;
+ if (len > max_dma_data)
+ len = max_dma_data;
+
+ cmd = (scsi_cmd_recover_buffer_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_RECOVER_BUFFERED_DATA;
+ cmd->scsi_cmd_lun_and_lba1 = 0;
+ cmd->scsi_cmd_lba2 = len >> 16;
+ cmd->scsi_cmd_lba3 = len >> 8;
+ cmd->scsi_cmd_xfer_len = len;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+ tgt->cur_cmd = SCSI_CMD_RECOVER_BUFFERED_DATA;
+
+ scsi_go(tgt, sizeof(*cmd), len, FALSE);
+}
+
+void scsi_erase( tgt, mode, ior)
+ register target_info_t *tgt;
+ io_req_t ior;
+{
+ scsi_cmd_erase_t *cmd;
+
+ cmd = (scsi_cmd_erase_t*) (tgt->cmd_ptr);
+ cmd->scsi_cmd_code = SCSI_CMD_ERASE;
+ cmd->scsi_cmd_lun_and_lba1 = mode & SCSI_CMD_ER_LONG;
+ cmd->scsi_cmd_lba2 = 0;
+ cmd->scsi_cmd_lba3 = 0;
+ cmd->scsi_cmd_xfer_len = 0;
+ cmd->scsi_cmd_ctrl_byte = 0; /* not linked */
+
+
+ tgt->cur_cmd = SCSI_CMD_ERASE;
+
+ scsi_go_and_wait(tgt, sizeof(*cmd), 0, ior);
+}
+
+#endif
+
+#ifdef SCSI2
+scsi_locate
+scsi_read_position
+#endif SCSI2
+#endif /* NSCSI > 0 */
diff --git a/scsi/scsi_worm.c b/scsi/scsi_worm.c
new file mode 100644
index 00000000..7490ea21
--- /dev/null
+++ b/scsi/scsi_worm.c
@@ -0,0 +1,51 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: scsi_worm.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/90
+ *
+ * Middle layer of the SCSI driver: SCSI protocol implementation
+ *
+ * This file contains code for SCSI commands for WORM devices,
+ * e.g. disks that employ write once / read multiple media.
+ */
+
+#include <mach/std_types.h>
+
+
+
+char *scworm_name(internal)
+ boolean_t internal;
+{
+ return internal ? "rz" : "WORM-disk";
+}
+
+#ifdef SCSI2
+see optical mem:
+ - no format
+ - no "update"
+#endif SCSI2
diff --git a/util/Makerules b/util/Makerules
new file mode 100644
index 00000000..0d6a2315
--- /dev/null
+++ b/util/Makerules
@@ -0,0 +1,32 @@
+#
+# Copyright (c) 1994 The University of Utah and
+# the Computer Systems Laboratory (CSL). All rights reserved.
+#
+# Permission to use, copy, modify and distribute this software and its
+# documentation is hereby granted, provided that both the copyright
+# notice and this permission notice appear in all copies of the
+# software, derivative works or modified versions, and any portions
+# thereof, and that both notices appear in supporting documentation.
+#
+# THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+# IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
+# ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+#
+# CSS requests users of this software to return to css-dist@cs.utah.edu any
+# improvements that they make and grant CSS redistribution rights.
+#
+# Author: Bryan Ford, University of Utah CSS
+#
+ifndef _kernel_util_makerules_
+_kernel_util_makerules = yes
+
+
+# Old defines that I hope to phase out
+DEFINES += -DMACH_KERNEL
+
+
+# Include the makefile containing the generic rules.
+include $(GSRCDIR)/Makerules
+
+
+endif
diff --git a/util/about_to_die.c b/util/about_to_die.c
new file mode 100644
index 00000000..9b039ccb
--- /dev/null
+++ b/util/about_to_die.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+/* Call this to declare intention to die,
+ before doing final printfs and such.
+ The default implementation does nothing,
+ but typical actions might be to restore the video mode
+ to a standard setting, or stop other processors.
+ Note that this might very well be called several times
+ before die() is finally called. */
+void about_to_die(int exit_code)
+{
+}
+
diff --git a/util/config.h b/util/config.h
new file mode 100644
index 00000000..23b2bf63
--- /dev/null
+++ b/util/config.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This file is just an empty default config.h file. */
diff --git a/util/cpu.c b/util/cpu.c
new file mode 100644
index 00000000..0e45ac4f
--- /dev/null
+++ b/util/cpu.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include "cpu.h"
+
+struct cpu cpu[NCPUS];
+
diff --git a/util/cpu.h b/util/cpu.h
new file mode 100644
index 00000000..357f6d72
--- /dev/null
+++ b/util/cpu.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _UTIL_CPU_H_
+#define _UTIL_CPU_H_
+
+#include "cpus.h"
+
+
+/* There's one of these data structures per processor. */
+struct cpu
+{
+#define cpu_sub(name) struct cpu_##name name;
+#include "cpu_subs.h"
+#undef cpu_sub
+int dummy;
+};
+
+extern struct cpu cpu[NCPUS];
+
+
+#endif _UTIL_CPU_H_
diff --git a/util/cpu_init.c b/util/cpu_init.c
new file mode 100644
index 00000000..c59824ba
--- /dev/null
+++ b/util/cpu_init.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include "cpu.h"
+
+void cpu_init(struct cpu *cpu)
+{
+#define cpu_sub(name) cpu_##name##_init(cpu);
+#include "cpu_subs.h"
+#undef cpu_sub
+}
+
+void cpus_init()
+{
+ int n;
+
+ for (n = 0; n < NCPUS; n++)
+ cpu_init(&cpu[n]);
+}
+
diff --git a/util/cpu_subs.h b/util/cpu_subs.h
new file mode 100644
index 00000000..850ef38f
--- /dev/null
+++ b/util/cpu_subs.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This file is just a terminator for the set of cpu_subs.h headers. */
diff --git a/util/cpus.h b/util/cpus.h
new file mode 100644
index 00000000..355ecdb1
--- /dev/null
+++ b/util/cpus.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+/* Default cpus.h sets things up for a uniprocessor. */
+#define NCPUS 1
+#undef MULTIPROCESSOR
+
diff --git a/util/debug.h b/util/debug.h
new file mode 100644
index 00000000..d61502b7
--- /dev/null
+++ b/util/debug.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 1995-1993 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * File: debug.h
+ * Author: Bryan Ford
+ *
+ * This file contains definitions for kernel debugging,
+ * which are compiled in on the DEBUG symbol.
+ *
+ */
+#ifndef _MACH_UTIL_DEBUG_H_
+#define _MACH_UTIL_DEBUG_H_
+
+#include <mach/macro_help.h>
+
+
+#ifdef DEBUG
+
+extern void panic(const char *fmt, ...);
+
+#define here() printf("@ %s:%d\n", __FILE__, __LINE__)
+#define message(args) ({ printf("@ %s:%d: ", __FILE__, __LINE__); printf args; printf("\n"); })
+
+#define otsan() panic("%s:%d: off the straight and narrow!", __FILE__, __LINE__)
+
+#define assert(v) \
+ MACRO_BEGIN \
+ if (!(v)) \
+ panic("%s:%d: failed assertion `"#v"'\n", \
+ __FILE__, __LINE__); \
+ MACRO_END
+
+#define do_debug(stmt) stmt
+
+#define debugmsg(args) \
+ ({ printf("%s:%d: ", __FILE__, __LINE__); printf args; printf("\n"); })
+
+#define struct_id_decl unsigned struct_id;
+#define struct_id_init(p,id) ((p)->struct_id = (id))
+#define struct_id_denit(p) ((p)->struct_id = 0)
+#define struct_id_verify(p,id) \
+ ({ if ((p)->struct_id != (id)) \
+ panic("%s:%d: "#p" (%08x) struct_id should be "#id" (%08x), is %08x\n", \
+ __FILE__, __LINE__, (p), (id), (p->struct_id)); \
+ })
+
+#else !DEBUG
+
+#define otsan()
+#define assert(v)
+#define do_debug(stmt)
+#define debugmsg(args)
+
+#define struct_id_decl
+#define struct_id_init(p,id)
+#define struct_id_denit(p)
+#define struct_id_verify(p,id)
+
+#endif !DEBUG
+
+#endif _MACH_UTIL_DEBUG_H_
diff --git a/util/die.c b/util/die.c
new file mode 100644
index 00000000..c2eeeaf5
--- /dev/null
+++ b/util/die.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <stdarg.h>
+
+/* Note: These routines are deprecated and should not be used anymore.
+ Use panic() instead. */
+void die(const char *fmt, ...)
+{
+ int exit_code = fmt != 0;
+
+ about_to_die(exit_code);
+
+ if (fmt)
+ {
+ va_list vl;
+
+ va_start(vl, fmt);
+ vprintf(fmt, vl);
+ va_end(vl);
+
+ putchar('\n');
+
+ exit(exit_code);
+ }
+ else
+ exit(exit_code);
+}
+
+void die_no_mem(void)
+{
+ die("Not enough memory.");
+}
+
diff --git a/util/panic.c b/util/panic.c
new file mode 100644
index 00000000..2df88396
--- /dev/null
+++ b/util/panic.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+
+void panic(const char *fmt, ...)
+{
+ int exit_code = fmt != 0;
+
+ about_to_die(exit_code);
+
+ if (fmt)
+ {
+ va_list vl;
+
+ va_start(vl, fmt);
+ vprintf(fmt, vl);
+ va_end(vl);
+
+ putchar('\n');
+
+ exit(exit_code);
+ }
+ else
+ exit(exit_code);
+}
+
diff --git a/util/phys_mem.h b/util/phys_mem.h
new file mode 100644
index 00000000..2e18a6bb
--- /dev/null
+++ b/util/phys_mem.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _UTIL_PHYS_MEM_H_
+#define _UTIL_PHYS_MEM_H_
+
+#include <mach/machine/vm_types.h>
+
+
+/* This always holds the physical address
+ of the top of known physical memory. */
+extern vm_offset_t phys_mem_max;
+
+
+#endif _UTIL_PHYS_MEM_H_
diff --git a/util/putchar.c b/util/putchar.c
new file mode 100644
index 00000000..18f0f06c
--- /dev/null
+++ b/util/putchar.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+int putchar(int c)
+{
+ cnputc(c);
+}
+
diff --git a/util/puts.c b/util/puts.c
new file mode 100644
index 00000000..c0eec03e
--- /dev/null
+++ b/util/puts.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 1995 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+/* Simple puts() implementation that just uses putchar().
+ Note that our libc's printf() is implemented
+ in terms of only puts() and putchar(), so that's all we need. */
+int puts(const char *s)
+{
+ while (*s)
+ {
+ putchar(*s);
+ s++;
+ }
+ putchar('\n');
+ return 0;
+}
+
diff --git a/util/ref_count.h b/util/ref_count.h
new file mode 100644
index 00000000..b6d34048
--- /dev/null
+++ b/util/ref_count.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 1995-1993 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/*
+ * File: refcount.h
+ *
+ * This defines the system-independent atomic reference count data type.
+ * (This file will often be overridden
+ * by better machine-dependent implementations.)
+ *
+ */
+
+#ifndef _KERN_REF_COUNT_H_
+#define _KERN_REF_COUNT_H_
+
+#include <mach/macro_help.h>
+
+#include "simple_lock.h"
+
+
+struct ref_count {
+ decl_simple_lock_data(,lock) /* lock for reference count */
+ int count; /* number of references */
+};
+
+#define ref_count_init(refcount, refs) \
+ MACRO_BEGIN \
+ simple_lock_init(&(refcount)->lock); \
+ ((refcount)->count = (refs)); \
+ MACRO_END
+
+#define ref_count_take(refcount) \
+ MACRO_BEGIN \
+ simple_lock(&(refcount)->lock); \
+ (refcount)->count++; \
+ simple_unlock(&(refcount)->lock); \
+ MACRO_END
+
+#define ref_count_drop(refcount, func) \
+ MACRO_BEGIN \
+ int new_value; \
+ simple_lock(&(refcount)->lock); \
+ new_value = --(refcount)->count; \
+ simple_unlock(&(refcount)->lock); \
+ if (new_value == 0) { func; } \
+ MACRO_END
+
+
+#endif _KERN_REF_COUNT_H_
diff --git a/version.c b/version.c
new file mode 100644
index 00000000..3cf20d99
--- /dev/null
+++ b/version.c
@@ -0,0 +1 @@
+char version[] = "GNUmach 0.2";
diff --git a/vm/memory_object.c b/vm/memory_object.c
new file mode 100644
index 00000000..a2b0bed8
--- /dev/null
+++ b/vm/memory_object.c
@@ -0,0 +1,1191 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/memory_object.c
+ * Author: Michael Wayne Young
+ *
+ * External memory management interface control functions.
+ */
+
+/*
+ * Interface dependencies:
+ */
+
+#include <mach/std_types.h> /* For pointer_t */
+#include <mach/mach_types.h>
+
+#include <mach/kern_return.h>
+#include <vm/vm_object.h>
+#include <mach/memory_object.h>
+#include <mach/boolean.h>
+#include <mach/vm_prot.h>
+#include <mach/message.h>
+
+#include "memory_object_user.h"
+#include "memory_object_default.h"
+
+/*
+ * Implementation dependencies:
+ */
+#include <vm/memory_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+#include <vm/pmap.h> /* For copy_to_phys, pmap_clear_modify */
+#include <kern/thread.h> /* For current_thread() */
+#include <kern/host.h>
+#include <vm/vm_kern.h> /* For kernel_map, vm_move */
+#include <vm/vm_map.h> /* For vm_map_pageable */
+#include <ipc/ipc_port.h>
+
+#include <norma_vm.h>
+#include <norma_ipc.h>
+#if NORMA_VM
+#include <norma/xmm_server_rename.h>
+#endif NORMA_VM
+#include <mach_pagemap.h>
+#if MACH_PAGEMAP
+#include <vm/vm_external.h>
+#endif MACH_PAGEMAP
+
+typedef int memory_object_lock_result_t; /* moved from below */
+
+
+ipc_port_t memory_manager_default = IP_NULL;
+decl_simple_lock_data(,memory_manager_default_lock)
+
+/*
+ * Important note:
+ * All of these routines gain a reference to the
+ * object (first argument) as part of the automatic
+ * argument conversion. Explicit deallocation is necessary.
+ */
+
+#if !NORMA_VM
+/*
+ * If successful, destroys the map copy object.
+ */
+kern_return_t memory_object_data_provided(object, offset, data, data_cnt,
+ lock_value)
+ vm_object_t object;
+ vm_offset_t offset;
+ pointer_t data;
+ unsigned int data_cnt;
+ vm_prot_t lock_value;
+{
+ return memory_object_data_supply(object, offset, (vm_map_copy_t) data,
+ data_cnt, lock_value, FALSE, IP_NULL,
+ 0);
+}
+#endif !NORMA_VM
+
+
+kern_return_t memory_object_data_supply(object, offset, data_copy, data_cnt,
+ lock_value, precious, reply_to, reply_to_type)
+ register
+ vm_object_t object;
+ register
+ vm_offset_t offset;
+ vm_map_copy_t data_copy;
+ unsigned int data_cnt;
+ vm_prot_t lock_value;
+ boolean_t precious;
+ ipc_port_t reply_to;
+ mach_msg_type_name_t reply_to_type;
+{
+ kern_return_t result = KERN_SUCCESS;
+ vm_offset_t error_offset = 0;
+ register
+ vm_page_t m;
+ register
+ vm_page_t data_m;
+ vm_size_t original_length;
+ vm_offset_t original_offset;
+ vm_page_t *page_list;
+ boolean_t was_absent;
+ vm_map_copy_t orig_copy = data_copy;
+
+ /*
+ * Look for bogus arguments
+ */
+
+ if (object == VM_OBJECT_NULL) {
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ if (lock_value & ~VM_PROT_ALL) {
+ vm_object_deallocate(object);
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ if ((data_cnt % PAGE_SIZE) != 0) {
+ vm_object_deallocate(object);
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ /*
+ * Adjust the offset from the memory object to the offset
+ * within the vm_object.
+ */
+
+ original_length = data_cnt;
+ original_offset = offset;
+
+ assert(data_copy->type == VM_MAP_COPY_PAGE_LIST);
+ page_list = &data_copy->cpy_page_list[0];
+
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+ offset -= object->paging_offset;
+
+ /*
+ * Loop over copy stealing pages for pagein.
+ */
+
+ for (; data_cnt > 0 ; data_cnt -= PAGE_SIZE, offset += PAGE_SIZE) {
+
+ assert(data_copy->cpy_npages > 0);
+ data_m = *page_list;
+
+ if (data_m == VM_PAGE_NULL || data_m->tabled ||
+ data_m->error || data_m->absent || data_m->fictitious) {
+
+ panic("Data_supply: bad page");
+ }
+
+ /*
+ * Look up target page and check its state.
+ */
+
+retry_lookup:
+ m = vm_page_lookup(object,offset);
+ if (m == VM_PAGE_NULL) {
+ was_absent = FALSE;
+ }
+ else {
+ if (m->absent && m->busy) {
+
+ /*
+ * Page was requested. Free the busy
+ * page waiting for it. Insertion
+ * of new page happens below.
+ */
+
+ VM_PAGE_FREE(m);
+ was_absent = TRUE;
+ }
+ else {
+
+ /*
+ * Have to wait for page that is busy and
+ * not absent. This is probably going to
+ * be an error, but go back and check.
+ */
+ if (m->busy) {
+ PAGE_ASSERT_WAIT(m, FALSE);
+ vm_object_unlock(object);
+ thread_block((void (*)()) 0);
+ vm_object_lock(object);
+ goto retry_lookup;
+ }
+
+ /*
+ * Page already present; error.
+ * This is an error if data is precious.
+ */
+ result = KERN_MEMORY_PRESENT;
+ error_offset = offset + object->paging_offset;
+
+ break;
+ }
+ }
+
+ /*
+ * Ok to pagein page. Target object now has no page
+ * at offset. Set the page parameters, then drop
+ * in new page and set up pageout state. Object is
+ * still locked here.
+ *
+ * Must clear busy bit in page before inserting it.
+ * Ok to skip wakeup logic because nobody else
+ * can possibly know about this page.
+ */
+
+ data_m->busy = FALSE;
+ data_m->dirty = FALSE;
+ pmap_clear_modify(data_m->phys_addr);
+
+ data_m->page_lock = lock_value;
+ data_m->unlock_request = VM_PROT_NONE;
+ data_m->precious = precious;
+
+ vm_page_lock_queues();
+ vm_page_insert(data_m, object, offset);
+
+ if (was_absent)
+ vm_page_activate(data_m);
+ else
+ vm_page_deactivate(data_m);
+
+ vm_page_unlock_queues();
+
+ /*
+ * Null out this page list entry, and advance to next
+ * page.
+ */
+
+ *page_list++ = VM_PAGE_NULL;
+
+ if (--(data_copy->cpy_npages) == 0 &&
+ vm_map_copy_has_cont(data_copy)) {
+ vm_map_copy_t new_copy;
+
+ vm_object_unlock(object);
+
+ vm_map_copy_invoke_cont(data_copy, &new_copy, &result);
+
+ if (result == KERN_SUCCESS) {
+
+ /*
+ * Consume on success requires that
+ * we keep the original vm_map_copy
+ * around in case something fails.
+ * Free the old copy if it's not the original
+ */
+ if (data_copy != orig_copy) {
+ vm_map_copy_discard(data_copy);
+ }
+
+ if ((data_copy = new_copy) != VM_MAP_COPY_NULL)
+ page_list = &data_copy->cpy_page_list[0];
+
+ vm_object_lock(object);
+ }
+ else {
+ vm_object_lock(object);
+ error_offset = offset + object->paging_offset +
+ PAGE_SIZE;
+ break;
+ }
+ }
+ }
+
+ /*
+ * Send reply if one was requested.
+ */
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ if (vm_map_copy_has_cont(data_copy))
+ vm_map_copy_abort_cont(data_copy);
+
+ if (IP_VALID(reply_to)) {
+ memory_object_supply_completed(
+ reply_to, reply_to_type,
+ object->pager_request,
+ original_offset,
+ original_length,
+ result,
+ error_offset);
+ }
+
+ vm_object_deallocate(object);
+
+ /*
+ * Consume on success: The final data copy must be
+ * be discarded if it is not the original. The original
+ * gets discarded only if this routine succeeds.
+ */
+ if (data_copy != orig_copy)
+ vm_map_copy_discard(data_copy);
+ if (result == KERN_SUCCESS)
+ vm_map_copy_discard(orig_copy);
+
+
+ return(result);
+}
+
+kern_return_t memory_object_data_error(object, offset, size, error_value)
+ vm_object_t object;
+ vm_offset_t offset;
+ vm_size_t size;
+ kern_return_t error_value;
+{
+ if (object == VM_OBJECT_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ if (size != round_page(size))
+ return(KERN_INVALID_ARGUMENT);
+
+#ifdef lint
+ /* Error value is ignored at this time */
+ error_value++;
+#endif
+
+ vm_object_lock(object);
+ offset -= object->paging_offset;
+
+ while (size != 0) {
+ register vm_page_t m;
+
+ m = vm_page_lookup(object, offset);
+ if ((m != VM_PAGE_NULL) && m->busy && m->absent) {
+ m->error = TRUE;
+ m->absent = FALSE;
+ vm_object_absent_release(object);
+
+ PAGE_WAKEUP_DONE(m);
+
+ vm_page_lock_queues();
+ vm_page_activate(m);
+ vm_page_unlock_queues();
+ }
+
+ size -= PAGE_SIZE;
+ offset += PAGE_SIZE;
+ }
+ vm_object_unlock(object);
+
+ vm_object_deallocate(object);
+ return(KERN_SUCCESS);
+}
+
+kern_return_t memory_object_data_unavailable(object, offset, size)
+ vm_object_t object;
+ vm_offset_t offset;
+ vm_size_t size;
+{
+#if MACH_PAGEMAP
+ vm_external_t existence_info = VM_EXTERNAL_NULL;
+#endif MACH_PAGEMAP
+
+ if (object == VM_OBJECT_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ if (size != round_page(size))
+ return(KERN_INVALID_ARGUMENT);
+
+#if MACH_PAGEMAP
+ if ((offset == 0) && (size > VM_EXTERNAL_LARGE_SIZE) &&
+ (object->existence_info == VM_EXTERNAL_NULL)) {
+ existence_info = vm_external_create(VM_EXTERNAL_SMALL_SIZE);
+ }
+#endif MACH_PAGEMAP
+
+ vm_object_lock(object);
+#if MACH_PAGEMAP
+ if (existence_info != VM_EXTERNAL_NULL) {
+ object->existence_info = existence_info;
+ }
+ if ((offset == 0) && (size > VM_EXTERNAL_LARGE_SIZE)) {
+ vm_object_unlock(object);
+ vm_object_deallocate(object);
+ return(KERN_SUCCESS);
+ }
+#endif MACH_PAGEMAP
+ offset -= object->paging_offset;
+
+ while (size != 0) {
+ register vm_page_t m;
+
+ /*
+ * We're looking for pages that are both busy and
+ * absent (waiting to be filled), converting them
+ * to just absent.
+ *
+ * Pages that are just busy can be ignored entirely.
+ */
+
+ m = vm_page_lookup(object, offset);
+ if ((m != VM_PAGE_NULL) && m->busy && m->absent) {
+ PAGE_WAKEUP_DONE(m);
+
+ vm_page_lock_queues();
+ vm_page_activate(m);
+ vm_page_unlock_queues();
+ }
+ size -= PAGE_SIZE;
+ offset += PAGE_SIZE;
+ }
+
+ vm_object_unlock(object);
+
+ vm_object_deallocate(object);
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Routine: memory_object_lock_page
+ *
+ * Description:
+ * Perform the appropriate lock operations on the
+ * given page. See the description of
+ * "memory_object_lock_request" for the meanings
+ * of the arguments.
+ *
+ * Returns an indication that the operation
+ * completed, blocked, or that the page must
+ * be cleaned.
+ */
+
+#define MEMORY_OBJECT_LOCK_RESULT_DONE 0
+#define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1
+#define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2
+#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3
+
+memory_object_lock_result_t memory_object_lock_page(m, should_return,
+ should_flush, prot)
+ vm_page_t m;
+ memory_object_return_t should_return;
+ boolean_t should_flush;
+ vm_prot_t prot;
+{
+ /*
+ * Don't worry about pages for which the kernel
+ * does not have any data.
+ */
+
+ if (m->absent)
+ return(MEMORY_OBJECT_LOCK_RESULT_DONE);
+
+ /*
+ * If we cannot change access to the page,
+ * either because a mapping is in progress
+ * (busy page) or because a mapping has been
+ * wired, then give up.
+ */
+
+ if (m->busy)
+ return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
+
+ assert(!m->fictitious);
+
+ if (m->wire_count != 0) {
+ /*
+ * If no change would take place
+ * anyway, return successfully.
+ *
+ * No change means:
+ * Not flushing AND
+ * No change to page lock [2 checks] AND
+ * Don't need to send page to manager
+ *
+ * Don't need to send page to manager means:
+ * No clean or return request OR (
+ * Page is not dirty [2 checks] AND (
+ * Page is not precious OR
+ * No request to return precious pages ))
+ *
+ * Now isn't that straightforward and obvious ?? ;-)
+ *
+ * XXX This doesn't handle sending a copy of a wired
+ * XXX page to the pager, but that will require some
+ * XXX significant surgery.
+ */
+
+ if (!should_flush &&
+ ((m->page_lock == prot) || (prot == VM_PROT_NO_CHANGE)) &&
+ ((should_return == MEMORY_OBJECT_RETURN_NONE) ||
+ (!m->dirty && !pmap_is_modified(m->phys_addr) &&
+ (!m->precious ||
+ should_return != MEMORY_OBJECT_RETURN_ALL)))) {
+ /*
+ * Restart page unlock requests,
+ * even though no change took place.
+ * [Memory managers may be expecting
+ * to see new requests.]
+ */
+ m->unlock_request = VM_PROT_NONE;
+ PAGE_WAKEUP(m);
+
+ return(MEMORY_OBJECT_LOCK_RESULT_DONE);
+ }
+
+ return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
+ }
+
+ /*
+ * If the page is to be flushed, allow
+ * that to be done as part of the protection.
+ */
+
+ if (should_flush)
+ prot = VM_PROT_ALL;
+
+ /*
+ * Set the page lock.
+ *
+ * If we are decreasing permission, do it now;
+ * let the fault handler take care of increases
+ * (pmap_page_protect may not increase protection).
+ */
+
+ if (prot != VM_PROT_NO_CHANGE) {
+ if ((m->page_lock ^ prot) & prot) {
+ pmap_page_protect(m->phys_addr, VM_PROT_ALL & ~prot);
+ }
+ m->page_lock = prot;
+
+ /*
+ * Restart any past unlock requests, even if no
+ * change resulted. If the manager explicitly
+ * requested no protection change, then it is assumed
+ * to be remembering past requests.
+ */
+
+ m->unlock_request = VM_PROT_NONE;
+ PAGE_WAKEUP(m);
+ }
+
+ /*
+ * Handle cleaning.
+ */
+
+ if (should_return != MEMORY_OBJECT_RETURN_NONE) {
+ /*
+ * Check whether the page is dirty. If
+ * write permission has not been removed,
+ * this may have unpredictable results.
+ */
+
+ if (!m->dirty)
+ m->dirty = pmap_is_modified(m->phys_addr);
+
+ if (m->dirty || (m->precious &&
+ should_return == MEMORY_OBJECT_RETURN_ALL)) {
+ /*
+ * If we weren't planning
+ * to flush the page anyway,
+ * we may need to remove the
+ * page from the pageout
+ * system and from physical
+ * maps now.
+ */
+
+ vm_page_lock_queues();
+ VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_unlock_queues();
+
+ if (!should_flush)
+ pmap_page_protect(m->phys_addr,
+ VM_PROT_NONE);
+
+ /*
+ * Cleaning a page will cause
+ * it to be flushed.
+ */
+
+ if (m->dirty)
+ return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN);
+ else
+ return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN);
+ }
+ }
+
+ /*
+ * Handle flushing
+ */
+
+ if (should_flush) {
+ VM_PAGE_FREE(m);
+ } else {
+ extern boolean_t vm_page_deactivate_hint;
+
+ /*
+ * XXX Make clean but not flush a paging hint,
+ * and deactivate the pages. This is a hack
+ * because it overloads flush/clean with
+ * implementation-dependent meaning. This only
+ * happens to pages that are already clean.
+ */
+
+ if (vm_page_deactivate_hint &&
+ (should_return != MEMORY_OBJECT_RETURN_NONE)) {
+ vm_page_lock_queues();
+ vm_page_deactivate(m);
+ vm_page_unlock_queues();
+ }
+ }
+
+ return(MEMORY_OBJECT_LOCK_RESULT_DONE);
+}
+
+/*
+ * Routine: memory_object_lock_request [user interface]
+ *
+ * Description:
+ * Control use of the data associated with the given
+ * memory object. For each page in the given range,
+ * perform the following operations, in order:
+ * 1) restrict access to the page (disallow
+ * forms specified by "prot");
+ * 2) return data to the manager (if "should_return"
+ * is RETURN_DIRTY and the page is dirty, or
+ * "should_return" is RETURN_ALL and the page
+ * is either dirty or precious); and,
+ * 3) flush the cached copy (if "should_flush"
+ * is asserted).
+ * The set of pages is defined by a starting offset
+ * ("offset") and size ("size"). Only pages with the
+ * same page alignment as the starting offset are
+ * considered.
+ *
+ * A single acknowledgement is sent (to the "reply_to"
+ * port) when these actions are complete. If successful,
+ * the naked send right for reply_to is consumed.
+ */
+
+kern_return_t
+memory_object_lock_request(object, offset, size,
+ should_return, should_flush, prot,
+ reply_to, reply_to_type)
+ register vm_object_t object;
+ register vm_offset_t offset;
+ register vm_size_t size;
+ memory_object_return_t should_return;
+ boolean_t should_flush;
+ vm_prot_t prot;
+ ipc_port_t reply_to;
+ mach_msg_type_name_t reply_to_type;
+{
+ register vm_page_t m;
+ vm_offset_t original_offset = offset;
+ vm_size_t original_size = size;
+ vm_offset_t paging_offset = 0;
+ vm_object_t new_object = VM_OBJECT_NULL;
+ vm_offset_t new_offset = 0;
+ vm_offset_t last_offset = offset;
+ int page_lock_result;
+ int pageout_action = 0; /* '=0' to quiet lint */
+
+#define DATA_WRITE_MAX 32
+ vm_page_t holding_pages[DATA_WRITE_MAX];
+
+ /*
+ * Check for bogus arguments.
+ */
+ if (object == VM_OBJECT_NULL ||
+ ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE))
+ return (KERN_INVALID_ARGUMENT);
+
+ size = round_page(size);
+
+ /*
+ * Lock the object, and acquire a paging reference to
+ * prevent the memory_object and control ports from
+ * being destroyed.
+ */
+
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+ offset -= object->paging_offset;
+
+ /*
+ * To avoid blocking while scanning for pages, save
+ * dirty pages to be cleaned all at once.
+ *
+ * XXXO A similar strategy could be used to limit the
+ * number of times that a scan must be restarted for
+ * other reasons. Those pages that would require blocking
+ * could be temporarily collected in another list, or
+ * their offsets could be recorded in a small array.
+ */
+
+ /*
+ * XXX NOTE: May want to consider converting this to a page list
+ * XXX vm_map_copy interface. Need to understand object
+ * XXX coalescing implications before doing so.
+ */
+
+#define PAGEOUT_PAGES \
+MACRO_BEGIN \
+ vm_map_copy_t copy; \
+ register int i; \
+ register vm_page_t hp; \
+ \
+ vm_object_unlock(object); \
+ \
+ (void) vm_map_copyin_object(new_object, 0, new_offset, &copy); \
+ \
+ if (object->use_old_pageout) { \
+ assert(pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN); \
+ (void) memory_object_data_write( \
+ object->pager, \
+ object->pager_request, \
+ paging_offset, \
+ (pointer_t) copy, \
+ new_offset); \
+ } \
+ else { \
+ (void) memory_object_data_return( \
+ object->pager, \
+ object->pager_request, \
+ paging_offset, \
+ (pointer_t) copy, \
+ new_offset, \
+ (pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \
+ !should_flush); \
+ } \
+ \
+ vm_object_lock(object); \
+ \
+ for (i = 0; i < atop(new_offset); i++) { \
+ hp = holding_pages[i]; \
+ if (hp != VM_PAGE_NULL) \
+ VM_PAGE_FREE(hp); \
+ } \
+ \
+ new_object = VM_OBJECT_NULL; \
+MACRO_END
+
+ for (;
+ size != 0;
+ size -= PAGE_SIZE, offset += PAGE_SIZE)
+ {
+ /*
+ * Limit the number of pages to be cleaned at once.
+ */
+ if (new_object != VM_OBJECT_NULL &&
+ new_offset >= PAGE_SIZE * DATA_WRITE_MAX)
+ {
+ PAGEOUT_PAGES;
+ }
+
+ while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
+ switch ((page_lock_result = memory_object_lock_page(m,
+ should_return,
+ should_flush,
+ prot)))
+ {
+ case MEMORY_OBJECT_LOCK_RESULT_DONE:
+ /*
+ * End of a cluster of dirty pages.
+ */
+ if (new_object != VM_OBJECT_NULL) {
+ PAGEOUT_PAGES;
+ continue;
+ }
+ break;
+
+ case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK:
+ /*
+ * Since it is necessary to block,
+ * clean any dirty pages now.
+ */
+ if (new_object != VM_OBJECT_NULL) {
+ PAGEOUT_PAGES;
+ continue;
+ }
+
+ PAGE_ASSERT_WAIT(m, FALSE);
+ vm_object_unlock(object);
+ thread_block((void (*)()) 0);
+ vm_object_lock(object);
+ continue;
+
+ case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN:
+ case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
+ /*
+ * The clean and return cases are similar.
+ *
+ * Mark the page busy since we unlock the
+ * object below.
+ */
+ m->busy = TRUE;
+
+ /*
+ * if this would form a discontiguous block,
+ * clean the old pages and start anew.
+ *
+ * NOTE: The first time through here, new_object
+ * is null, hiding the fact that pageout_action
+ * is not initialized.
+ */
+ if (new_object != VM_OBJECT_NULL &&
+ (last_offset != offset ||
+ pageout_action != page_lock_result)) {
+ PAGEOUT_PAGES;
+ }
+
+ vm_object_unlock(object);
+
+ /*
+ * If we have not already allocated an object
+ * for a range of pages to be written, do so
+ * now.
+ */
+ if (new_object == VM_OBJECT_NULL) {
+ new_object = vm_object_allocate(original_size);
+ new_offset = 0;
+ paging_offset = m->offset +
+ object->paging_offset;
+ pageout_action = page_lock_result;
+ }
+
+ /*
+ * Move or copy the dirty page into the
+ * new object.
+ */
+ m = vm_pageout_setup(m,
+ m->offset + object->paging_offset,
+ new_object,
+ new_offset,
+ should_flush);
+
+ /*
+ * Save the holding page if there is one.
+ */
+ holding_pages[atop(new_offset)] = m;
+ new_offset += PAGE_SIZE;
+ last_offset = offset + PAGE_SIZE;
+
+ vm_object_lock(object);
+ break;
+ }
+ break;
+ }
+ }
+
+ /*
+ * We have completed the scan for applicable pages.
+ * Clean any pages that have been saved.
+ */
+ if (new_object != VM_OBJECT_NULL) {
+ PAGEOUT_PAGES;
+ }
+
+ if (IP_VALID(reply_to)) {
+ vm_object_unlock(object);
+
+ /* consumes our naked send-once/send right for reply_to */
+ (void) memory_object_lock_completed(reply_to, reply_to_type,
+ object->pager_request, original_offset, original_size);
+
+ vm_object_lock(object);
+ }
+
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+ vm_object_deallocate(object);
+
+ return (KERN_SUCCESS);
+}
+
+#if !NORMA_VM
+/*
+ * Old version of memory_object_lock_request.
+ */
+kern_return_t
+xxx_memory_object_lock_request(object, offset, size,
+ should_clean, should_flush, prot,
+ reply_to, reply_to_type)
+ register vm_object_t object;
+ register vm_offset_t offset;
+ register vm_size_t size;
+ boolean_t should_clean;
+ boolean_t should_flush;
+ vm_prot_t prot;
+ ipc_port_t reply_to;
+ mach_msg_type_name_t reply_to_type;
+{
+ register int should_return;
+
+ if (should_clean)
+ should_return = MEMORY_OBJECT_RETURN_DIRTY;
+ else
+ should_return = MEMORY_OBJECT_RETURN_NONE;
+
+ return(memory_object_lock_request(object,offset,size,
+ should_return, should_flush, prot,
+ reply_to, reply_to_type));
+}
+#endif !NORMA_VM
+
+kern_return_t
+memory_object_set_attributes_common(object, object_ready, may_cache,
+ copy_strategy, use_old_pageout)
+ vm_object_t object;
+ boolean_t object_ready;
+ boolean_t may_cache;
+ memory_object_copy_strategy_t copy_strategy;
+ boolean_t use_old_pageout;
+{
+ if (object == VM_OBJECT_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ /*
+ * Verify the attributes of importance
+ */
+
+ switch(copy_strategy) {
+ case MEMORY_OBJECT_COPY_NONE:
+ case MEMORY_OBJECT_COPY_CALL:
+ case MEMORY_OBJECT_COPY_DELAY:
+ case MEMORY_OBJECT_COPY_TEMPORARY:
+ break;
+ default:
+ vm_object_deallocate(object);
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ if (object_ready)
+ object_ready = TRUE;
+ if (may_cache)
+ may_cache = TRUE;
+
+ vm_object_lock(object);
+
+ /*
+ * Wake up anyone waiting for the ready attribute
+ * to become asserted.
+ */
+
+ if (object_ready && !object->pager_ready) {
+ object->use_old_pageout = use_old_pageout;
+ vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
+ }
+
+ /*
+ * Copy the attributes
+ */
+
+ object->can_persist = may_cache;
+ object->pager_ready = object_ready;
+ if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) {
+ object->temporary = TRUE;
+ } else {
+ object->copy_strategy = copy_strategy;
+ }
+
+ vm_object_unlock(object);
+
+ vm_object_deallocate(object);
+
+ return(KERN_SUCCESS);
+}
+
+#if !NORMA_VM
+
+/*
+ * XXX rpd claims that reply_to could be obviated in favor of a client
+ * XXX stub that made change_attributes an RPC. Need investigation.
+ */
+
+kern_return_t memory_object_change_attributes(object, may_cache,
+ copy_strategy, reply_to, reply_to_type)
+ vm_object_t object;
+ boolean_t may_cache;
+ memory_object_copy_strategy_t copy_strategy;
+ ipc_port_t reply_to;
+ mach_msg_type_name_t reply_to_type;
+{
+ kern_return_t result;
+
+ /*
+ * Do the work and throw away our object reference. It
+ * is important that the object reference be deallocated
+ * BEFORE sending the reply. The whole point of the reply
+ * is that it shows up after the terminate message that
+ * may be generated by setting the object uncacheable.
+ *
+ * XXX may_cache may become a tri-valued variable to handle
+ * XXX uncache if not in use.
+ */
+ result = memory_object_set_attributes_common(object, TRUE,
+ may_cache, copy_strategy,
+ FALSE);
+
+ if (IP_VALID(reply_to)) {
+
+ /* consumes our naked send-once/send right for reply_to */
+ (void) memory_object_change_completed(reply_to, reply_to_type,
+ may_cache, copy_strategy);
+
+ }
+
+ return(result);
+}
+
+kern_return_t
+memory_object_set_attributes(object, object_ready, may_cache, copy_strategy)
+ vm_object_t object;
+ boolean_t object_ready;
+ boolean_t may_cache;
+ memory_object_copy_strategy_t copy_strategy;
+{
+ return memory_object_set_attributes_common(object, object_ready,
+ may_cache, copy_strategy,
+ TRUE);
+}
+
+kern_return_t memory_object_ready(object, may_cache, copy_strategy)
+ vm_object_t object;
+ boolean_t may_cache;
+ memory_object_copy_strategy_t copy_strategy;
+{
+ return memory_object_set_attributes_common(object, TRUE,
+ may_cache, copy_strategy,
+ FALSE);
+}
+#endif !NORMA_VM
+
+kern_return_t memory_object_get_attributes(object, object_ready,
+ may_cache, copy_strategy)
+ vm_object_t object;
+ boolean_t *object_ready;
+ boolean_t *may_cache;
+ memory_object_copy_strategy_t *copy_strategy;
+{
+ if (object == VM_OBJECT_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ vm_object_lock(object);
+ *may_cache = object->can_persist;
+ *object_ready = object->pager_ready;
+ *copy_strategy = object->copy_strategy;
+ vm_object_unlock(object);
+
+ vm_object_deallocate(object);
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * If successful, consumes the supplied naked send right.
+ */
+kern_return_t vm_set_default_memory_manager(host, default_manager)
+ host_t host;
+ ipc_port_t *default_manager;
+{
+ ipc_port_t current_manager;
+ ipc_port_t new_manager;
+ ipc_port_t returned_manager;
+
+ if (host == HOST_NULL)
+ return(KERN_INVALID_HOST);
+
+ new_manager = *default_manager;
+ simple_lock(&memory_manager_default_lock);
+ current_manager = memory_manager_default;
+
+ if (new_manager == IP_NULL) {
+ /*
+ * Retrieve the current value.
+ */
+
+ returned_manager = ipc_port_copy_send(current_manager);
+ } else {
+ /*
+ * Retrieve the current value,
+ * and replace it with the supplied value.
+ * We consume the supplied naked send right.
+ */
+
+ returned_manager = current_manager;
+ memory_manager_default = new_manager;
+
+ /*
+ * In case anyone's been waiting for a memory
+ * manager to be established, wake them up.
+ */
+
+ thread_wakeup((event_t) &memory_manager_default);
+ }
+
+ simple_unlock(&memory_manager_default_lock);
+
+ *default_manager = returned_manager;
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Routine: memory_manager_default_reference
+ * Purpose:
+ * Returns a naked send right for the default
+ * memory manager. The returned right is always
+ * valid (not IP_NULL or IP_DEAD).
+ */
+
+ipc_port_t memory_manager_default_reference()
+{
+ ipc_port_t current_manager;
+
+ simple_lock(&memory_manager_default_lock);
+
+ while (current_manager = ipc_port_copy_send(memory_manager_default),
+ !IP_VALID(current_manager)) {
+ thread_sleep((event_t) &memory_manager_default,
+ simple_lock_addr(memory_manager_default_lock),
+ FALSE);
+ simple_lock(&memory_manager_default_lock);
+ }
+
+ simple_unlock(&memory_manager_default_lock);
+
+ return current_manager;
+}
+
+/*
+ * Routine: memory_manager_default_port
+ * Purpose:
+ * Returns true if the receiver for the port
+ * is the default memory manager.
+ *
+ * This is a hack to let ds_read_done
+ * know when it should keep memory wired.
+ */
+
+boolean_t memory_manager_default_port(port)
+ ipc_port_t port;
+{
+ ipc_port_t current;
+ boolean_t result;
+
+ simple_lock(&memory_manager_default_lock);
+ current = memory_manager_default;
+ if (IP_VALID(current)) {
+ /*
+ * There is no point in bothering to lock
+ * both ports, which would be painful to do.
+ * If the receive rights are moving around,
+ * we might be inaccurate.
+ */
+
+ result = port->ip_receiver == current->ip_receiver;
+ } else
+ result = FALSE;
+ simple_unlock(&memory_manager_default_lock);
+
+ return result;
+}
+
+void memory_manager_default_init()
+{
+ memory_manager_default = IP_NULL;
+ simple_lock_init(&memory_manager_default_lock);
+}
diff --git a/vm/memory_object.h b/vm/memory_object.h
new file mode 100644
index 00000000..9afa0623
--- /dev/null
+++ b/vm/memory_object.h
@@ -0,0 +1,43 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#ifndef _VM_MEMORY_OBJECT_H_
+#define _VM_MEMORY_OBJECT_H_
+
+#include <mach/boolean.h>
+
+/*
+ * We use "struct ipc_port *" instead of "ipc_port_t"
+ * to avoid include file circularities.
+ */
+
+extern struct ipc_port *memory_manager_default_reference();
+extern boolean_t memory_manager_default_port();
+extern void memory_manager_default_init();
+
+extern struct ipc_port *memory_manager_default;
+
+#endif _VM_MEMORY_OBJECT_H_
diff --git a/vm/memory_object_default.cli b/vm/memory_object_default.cli
new file mode 100644
index 00000000..998a9864
--- /dev/null
+++ b/vm/memory_object_default.cli
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a client presentation file. */
+
+#define KERNEL_USER 1
+#define SEQNOS 1
+
+#include <mach/memory_object_default.defs>
diff --git a/vm/memory_object_user.cli b/vm/memory_object_user.cli
new file mode 100644
index 00000000..2bba41fc
--- /dev/null
+++ b/vm/memory_object_user.cli
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a client presentation file. */
+
+#define KERNEL_USER 1
+#define SEQNOS 1
+
+#include <mach/memory_object.defs>
diff --git a/vm/pmap.h b/vm/pmap.h
new file mode 100644
index 00000000..f9a949ed
--- /dev/null
+++ b/vm/pmap.h
@@ -0,0 +1,267 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/pmap.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * Machine address mapping definitions -- machine-independent
+ * section. [For machine-dependent section, see "machine/pmap.h".]
+ */
+
+#ifndef _VM_PMAP_H_
+#define _VM_PMAP_H_
+
+#include <machine/pmap.h>
+#include <mach/machine/vm_types.h>
+#include <mach/vm_prot.h>
+#include <mach/boolean.h>
+
+/*
+ * The following is a description of the interface to the
+ * machine-dependent "physical map" data structure. The module
+ * must provide a "pmap_t" data type that represents the
+ * set of valid virtual-to-physical addresses for one user
+ * address space. [The kernel address space is represented
+ * by a distinguished "pmap_t".] The routines described manage
+ * this type, install and update virtual-to-physical mappings,
+ * and perform operations on physical addresses common to
+ * many address spaces.
+ */
+
+/*
+ * Routines used for initialization.
+ * There is traditionally also a pmap_bootstrap,
+ * used very early by machine-dependent code,
+ * but it is not part of the interface.
+ */
+
+extern vm_offset_t pmap_steal_memory(); /* During VM initialization,
+ * steal a chunk of memory.
+ */
+extern unsigned int pmap_free_pages(); /* During VM initialization,
+ * report remaining unused
+ * physical pages.
+ */
+extern void pmap_startup(); /* During VM initialization,
+ * use remaining physical pages
+ * to allocate page frames.
+ */
+extern void pmap_init(); /* Initialization,
+ * after kernel runs
+ * in virtual memory.
+ */
+
+#ifndef MACHINE_PAGES
+/*
+ * If machine/pmap.h defines MACHINE_PAGES, it must implement
+ * the above functions. The pmap module has complete control.
+ * Otherwise, it must implement
+ * pmap_free_pages
+ * pmap_virtual_space
+ * pmap_next_page
+ * pmap_init
+ * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup
+ * using pmap_free_pages, pmap_next_page, pmap_virtual_space,
+ * and pmap_enter. pmap_free_pages may over-estimate the number
+ * of unused physical pages, and pmap_next_page may return FALSE
+ * to indicate that there are no more unused pages to return.
+ * However, for best performance pmap_free_pages should be accurate.
+ */
+
+extern boolean_t pmap_next_page(); /* During VM initialization,
+ * return the next unused
+ * physical page.
+ */
+extern void pmap_virtual_space(); /* During VM initialization,
+ * report virtual space
+ * available for the kernel.
+ */
+#endif MACHINE_PAGES
+
+/*
+ * Routines to manage the physical map data structure.
+ */
+
+/* Create a pmap_t. */
+pmap_t pmap_create(vm_size_t size);
+
+/* Return the kernel's pmap_t. */
+#ifndef pmap_kernel
+extern pmap_t pmap_kernel(void);
+#endif pmap_kernel
+
+/* Gain and release a reference. */
+extern void pmap_reference(pmap_t pmap);
+extern void pmap_destroy(pmap_t pmap);
+
+/* Enter a mapping */
+extern void pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa,
+ vm_prot_t prot, boolean_t wired);
+
+
+/*
+ * Routines that operate on ranges of virtual addresses.
+ */
+
+/* Remove mappings. */
+void pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
+
+/* Change protections. */
+void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot);
+
+/*
+ * Routines to set up hardware state for physical maps to be used.
+ */
+extern void pmap_activate(); /* Prepare pmap_t to run
+ * on a given processor.
+ */
+extern void pmap_deactivate(); /* Release pmap_t from
+ * use on processor.
+ */
+
+
+/*
+ * Routines that operate on physical addresses.
+ */
+
+/* Restrict access to page. */
+void pmap_page_protect(vm_offset_t pa, vm_prot_t prot);
+
+/*
+ * Routines to manage reference/modify bits based on
+ * physical addresses, simulating them if not provided
+ * by the hardware.
+ */
+
+/* Clear reference bit */
+void pmap_clear_reference(vm_offset_t pa);
+
+/* Return reference bit */
+#ifndef pmap_is_referenced
+boolean_t pmap_is_referenced(vm_offset_t pa);
+#endif pmap_is_referenced
+
+/* Clear modify bit */
+void pmap_clear_modify(vm_offset_t pa);
+
+/* Return modify bit */
+boolean_t pmap_is_modified(vm_offset_t pa);
+
+
+/*
+ * Statistics routines
+ */
+extern void pmap_statistics(); /* Return statistics */
+
+#ifndef pmap_resident_count
+extern int pmap_resident_count();
+#endif pmap_resident_count
+
+/*
+ * Sundry required routines
+ */
+extern vm_offset_t pmap_extract(); /* Return a virtual-to-physical
+ * mapping, if possible.
+ */
+
+extern boolean_t pmap_access(); /* Is virtual address valid? */
+
+extern void pmap_collect(); /* Perform garbage
+ * collection, if any
+ */
+
+extern void pmap_change_wiring(); /* Specify pageability */
+
+#ifndef pmap_phys_address
+extern vm_offset_t pmap_phys_address(); /* Transform address
+ * returned by device
+ * driver mapping function
+ * to physical address
+ * known to this module.
+ */
+#endif pmap_phys_address
+#ifndef pmap_phys_to_frame
+extern int pmap_phys_to_frame(); /* Inverse of
+ * pmap_phys_address,
+ * for use by device driver
+ * mapping function in
+ * machine-independent
+ * pseudo-devices.
+ */
+#endif pmap_phys_to_frame
+
+/*
+ * Optional routines
+ */
+#ifndef pmap_copy
+extern void pmap_copy(); /* Copy range of
+ * mappings, if desired.
+ */
+#endif pmap_copy
+#ifndef pmap_attribute
+extern kern_return_t pmap_attribute(); /* Get/Set special
+ * memory attributes
+ */
+#endif pmap_attribute
+
+/*
+ * Routines defined as macros.
+ */
+#ifndef PMAP_ACTIVATE_USER
+#define PMAP_ACTIVATE_USER(pmap, thread, cpu) { \
+ if ((pmap) != kernel_pmap) \
+ PMAP_ACTIVATE(pmap, thread, cpu); \
+}
+#endif PMAP_ACTIVATE_USER
+
+#ifndef PMAP_DEACTIVATE_USER
+#define PMAP_DEACTIVATE_USER(pmap, thread, cpu) { \
+ if ((pmap) != kernel_pmap) \
+ PMAP_DEACTIVATE(pmap, thread, cpu); \
+}
+#endif PMAP_DEACTIVATE_USER
+
+#ifndef PMAP_ACTIVATE_KERNEL
+#define PMAP_ACTIVATE_KERNEL(cpu) \
+ PMAP_ACTIVATE(kernel_pmap, THREAD_NULL, cpu)
+#endif PMAP_ACTIVATE_KERNEL
+
+#ifndef PMAP_DEACTIVATE_KERNEL
+#define PMAP_DEACTIVATE_KERNEL(cpu) \
+ PMAP_DEACTIVATE(kernel_pmap, THREAD_NULL, cpu)
+#endif PMAP_DEACTIVATE_KERNEL
+
+/*
+ * Exported data structures
+ */
+
+extern pmap_t kernel_pmap; /* The kernel's map */
+
+#endif _VM_PMAP_H_
diff --git a/vm/vm_debug.c b/vm/vm_debug.c
new file mode 100644
index 00000000..17c8c311
--- /dev/null
+++ b/vm/vm_debug.c
@@ -0,0 +1,499 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_debug.c.
+ * Author: Rich Draves
+ * Date: March, 1990
+ *
+ * Exported kernel calls. See mach_debug/mach_debug.defs.
+ */
+
+#include <mach_vm_debug.h>
+#if MACH_VM_DEBUG
+
+#include <kern/thread.h>
+#include <mach/kern_return.h>
+#include <mach/machine/vm_types.h>
+#include <mach/memory_object.h>
+#include <mach/vm_prot.h>
+#include <mach/vm_inherit.h>
+#include <mach/vm_param.h>
+#include <mach_debug/vm_info.h>
+#include <mach_debug/hash_info.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_object.h>
+#include <kern/task.h>
+#include <kern/host.h>
+#include <ipc/ipc_port.h>
+
+
+
+/*
+ * Routine: vm_object_real_name
+ * Purpose:
+ * Convert a VM object to a name port.
+ * Conditions:
+ * Takes object and port locks.
+ * Returns:
+ * A naked send right for the object's name port,
+ * or IP_NULL if the object or its name port is null.
+ */
+
+ipc_port_t
+vm_object_real_name(object)
+ vm_object_t object;
+{
+ ipc_port_t port = IP_NULL;
+
+ if (object != VM_OBJECT_NULL) {
+ vm_object_lock(object);
+ if (object->pager_name != IP_NULL)
+ port = ipc_port_make_send(object->pager_name);
+ vm_object_unlock(object);
+ }
+
+ return port;
+}
+
+/*
+ * Routine: mach_vm_region_info [kernel call]
+ * Purpose:
+ * Retrieve information about a VM region,
+ * including info about the object chain.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieve region/object info.
+ * KERN_INVALID_TASK The map is null.
+ * KERN_NO_SPACE There is no entry at/after the address.
+ */
+
+kern_return_t
+mach_vm_region_info(map, address, regionp, portp)
+ vm_map_t map;
+ vm_offset_t address;
+ vm_region_info_t *regionp;
+ ipc_port_t *portp;
+{
+ vm_map_t cmap; /* current map in traversal */
+ vm_map_t nmap; /* next map to look at */
+ vm_map_entry_t entry; /* entry in current map */
+ vm_object_t object;
+
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_TASK;
+
+ /* find the entry containing (or following) the address */
+
+ vm_map_lock_read(map);
+ for (cmap = map;;) {
+ /* cmap is read-locked */
+
+ if (!vm_map_lookup_entry(cmap, address, &entry)) {
+ entry = entry->vme_next;
+ if (entry == vm_map_to_entry(cmap)) {
+ if (map == cmap) {
+ vm_map_unlock_read(cmap);
+ return KERN_NO_SPACE;
+ }
+
+ /* back out to top-level & skip this submap */
+
+ address = vm_map_max(cmap);
+ vm_map_unlock_read(cmap);
+ vm_map_lock_read(map);
+ cmap = map;
+ continue;
+ }
+ }
+
+ if (entry->is_sub_map) {
+ /* move down to the sub map */
+
+ nmap = entry->object.sub_map;
+ vm_map_lock_read(nmap);
+ vm_map_unlock_read(cmap);
+ cmap = nmap;
+ continue;
+ } else {
+ break;
+ }
+ /*NOTREACHED*/
+ }
+
+
+ assert(entry->vme_start < entry->vme_end);
+
+ regionp->vri_start = entry->vme_start;
+ regionp->vri_end = entry->vme_end;
+
+ /* attributes from the real entry */
+
+ regionp->vri_protection = entry->protection;
+ regionp->vri_max_protection = entry->max_protection;
+ regionp->vri_inheritance = entry->inheritance;
+ regionp->vri_wired_count = entry->wired_count;
+ regionp->vri_user_wired_count = entry->user_wired_count;
+
+ object = entry->object.vm_object;
+ *portp = vm_object_real_name(object);
+ regionp->vri_object = (vm_offset_t) object;
+ regionp->vri_offset = entry->offset;
+ regionp->vri_needs_copy = entry->needs_copy;
+
+ regionp->vri_sharing = entry->is_shared;
+
+ vm_map_unlock_read(cmap);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_vm_object_info [kernel call]
+ * Purpose:
+ * Retrieve information about a VM object.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved object info.
+ * KERN_INVALID_ARGUMENT The object is null.
+ */
+
+kern_return_t
+mach_vm_object_info(object, infop, shadowp, copyp)
+ vm_object_t object;
+ vm_object_info_t *infop;
+ ipc_port_t *shadowp;
+ ipc_port_t *copyp;
+{
+ vm_object_info_t info;
+ vm_object_info_state_t state;
+ ipc_port_t shadow, copy;
+
+ if (object == VM_OBJECT_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * Because of lock-ordering/deadlock considerations,
+ * we can't use vm_object_real_name for the copy object.
+ */
+
+ retry:
+ vm_object_lock(object);
+ copy = IP_NULL;
+ if (object->copy != VM_OBJECT_NULL) {
+ if (!vm_object_lock_try(object->copy)) {
+ vm_object_unlock(object);
+ simple_lock_pause(); /* wait a bit */
+ goto retry;
+ }
+
+ if (object->copy->pager_name != IP_NULL)
+ copy = ipc_port_make_send(object->copy->pager_name);
+ vm_object_unlock(object->copy);
+ }
+ shadow = vm_object_real_name(object->shadow);
+
+ info.voi_object = (vm_offset_t) object;
+ info.voi_pagesize = PAGE_SIZE;
+ info.voi_size = object->size;
+ info.voi_ref_count = object->ref_count;
+ info.voi_resident_page_count = object->resident_page_count;
+ info.voi_absent_count = object->absent_count;
+ info.voi_copy = (vm_offset_t) object->copy;
+ info.voi_shadow = (vm_offset_t) object->shadow;
+ info.voi_shadow_offset = object->shadow_offset;
+ info.voi_paging_offset = object->paging_offset;
+ info.voi_copy_strategy = object->copy_strategy;
+ info.voi_last_alloc = object->last_alloc;
+ info.voi_paging_in_progress = object->paging_in_progress;
+
+ state = 0;
+ if (object->pager_created)
+ state |= VOI_STATE_PAGER_CREATED;
+ if (object->pager_initialized)
+ state |= VOI_STATE_PAGER_INITIALIZED;
+ if (object->pager_ready)
+ state |= VOI_STATE_PAGER_READY;
+ if (object->can_persist)
+ state |= VOI_STATE_CAN_PERSIST;
+ if (object->internal)
+ state |= VOI_STATE_INTERNAL;
+ if (object->temporary)
+ state |= VOI_STATE_TEMPORARY;
+ if (object->alive)
+ state |= VOI_STATE_ALIVE;
+ if (object->lock_in_progress)
+ state |= VOI_STATE_LOCK_IN_PROGRESS;
+ if (object->lock_restart)
+ state |= VOI_STATE_LOCK_RESTART;
+ if (object->use_old_pageout)
+ state |= VOI_STATE_USE_OLD_PAGEOUT;
+ info.voi_state = state;
+ vm_object_unlock(object);
+
+ *infop = info;
+ *shadowp = shadow;
+ *copyp = copy;
+ return KERN_SUCCESS;
+}
+
+#define VPI_STATE_NODATA (VPI_STATE_BUSY|VPI_STATE_FICTITIOUS| \
+ VPI_STATE_PRIVATE|VPI_STATE_ABSENT)
+
+/*
+ * Routine: mach_vm_object_pages [kernel call]
+ * Purpose:
+ * Retrieve information about the pages in a VM object.
+ * Conditions:
+ * Nothing locked. Obeys CountInOut protocol.
+ * Returns:
+ * KERN_SUCCESS Retrieved object info.
+ * KERN_INVALID_ARGUMENT The object is null.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_vm_object_pages(object, pagesp, countp)
+ vm_object_t object;
+ vm_page_info_array_t *pagesp;
+ natural_t *countp;
+{
+ vm_size_t size;
+ vm_offset_t addr;
+ vm_page_info_t *pages;
+ unsigned int potential, actual, count;
+ vm_page_t p;
+ kern_return_t kr;
+
+ if (object == VM_OBJECT_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ /* start with in-line memory */
+
+ pages = *pagesp;
+ potential = *countp;
+
+ for (size = 0;;) {
+ vm_object_lock(object);
+ actual = object->resident_page_count;
+ if (actual <= potential)
+ break;
+ vm_object_unlock(object);
+
+ if (pages != *pagesp)
+ kmem_free(ipc_kernel_map, addr, size);
+
+ size = round_page(actual * sizeof *pages);
+ kr = kmem_alloc(ipc_kernel_map, &addr, size);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ pages = (vm_page_info_t *) addr;
+ potential = size/sizeof *pages;
+ }
+ /* object is locked, we have enough wired memory */
+
+ count = 0;
+ queue_iterate(&object->memq, p, vm_page_t, listq) {
+ vm_page_info_t *info = &pages[count++];
+ vm_page_info_state_t state = 0;
+
+ info->vpi_offset = p->offset;
+ info->vpi_phys_addr = p->phys_addr;
+ info->vpi_wire_count = p->wire_count;
+ info->vpi_page_lock = p->page_lock;
+ info->vpi_unlock_request = p->unlock_request;
+
+ if (p->busy)
+ state |= VPI_STATE_BUSY;
+ if (p->wanted)
+ state |= VPI_STATE_WANTED;
+ if (p->tabled)
+ state |= VPI_STATE_TABLED;
+ if (p->fictitious)
+ state |= VPI_STATE_FICTITIOUS;
+ if (p->private)
+ state |= VPI_STATE_PRIVATE;
+ if (p->absent)
+ state |= VPI_STATE_ABSENT;
+ if (p->error)
+ state |= VPI_STATE_ERROR;
+ if (p->dirty)
+ state |= VPI_STATE_DIRTY;
+ if (p->precious)
+ state |= VPI_STATE_PRECIOUS;
+ if (p->overwriting)
+ state |= VPI_STATE_OVERWRITING;
+
+ if (((state & (VPI_STATE_NODATA|VPI_STATE_DIRTY)) == 0) &&
+ pmap_is_modified(p->phys_addr)) {
+ state |= VPI_STATE_DIRTY;
+ p->dirty = TRUE;
+ }
+
+ vm_page_lock_queues();
+ if (p->inactive)
+ state |= VPI_STATE_INACTIVE;
+ if (p->active)
+ state |= VPI_STATE_ACTIVE;
+ if (p->laundry)
+ state |= VPI_STATE_LAUNDRY;
+ if (p->free)
+ state |= VPI_STATE_FREE;
+ if (p->reference)
+ state |= VPI_STATE_REFERENCE;
+
+ if (((state & (VPI_STATE_NODATA|VPI_STATE_REFERENCE)) == 0) &&
+ pmap_is_referenced(p->phys_addr)) {
+ state |= VPI_STATE_REFERENCE;
+ p->reference = TRUE;
+ }
+ vm_page_unlock_queues();
+
+ info->vpi_state = state;
+ }
+
+ if (object->resident_page_count != count)
+ panic("mach_vm_object_pages");
+ vm_object_unlock(object);
+
+ if (pages == *pagesp) {
+ /* data fit in-line; nothing to deallocate */
+
+ *countp = actual;
+ } else if (actual == 0) {
+ kmem_free(ipc_kernel_map, addr, size);
+
+ *countp = 0;
+ } else {
+ vm_size_t size_used, rsize_used;
+ vm_map_copy_t copy;
+
+ /* kmem_alloc doesn't zero memory */
+
+ size_used = actual * sizeof *pages;
+ rsize_used = round_page(size_used);
+
+ if (rsize_used != size)
+ kmem_free(ipc_kernel_map,
+ addr + rsize_used, size - rsize_used);
+
+ if (size_used != rsize_used)
+ bzero((char *) (addr + size_used),
+ rsize_used - size_used);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr, rsize_used,
+ TRUE, &copy);
+ assert(kr == KERN_SUCCESS);
+
+ *pagesp = (vm_page_info_t *) copy;
+ *countp = actual;
+ }
+
+ return KERN_SUCCESS;
+}
+
+#endif MACH_VM_DEBUG
+
+/*
+ * Routine: host_virtual_physical_table_info
+ * Purpose:
+ * Return information about the VP table.
+ * Conditions:
+ * Nothing locked. Obeys CountInOut protocol.
+ * Returns:
+ * KERN_SUCCESS Returned information.
+ * KERN_INVALID_HOST The host is null.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+host_virtual_physical_table_info(host, infop, countp)
+ host_t host;
+ hash_info_bucket_array_t *infop;
+ natural_t *countp;
+{
+ vm_offset_t addr;
+ vm_size_t size = 0;/* '=0' to quiet gcc warnings */
+ hash_info_bucket_t *info;
+ unsigned int potential, actual;
+ kern_return_t kr;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ /* start with in-line data */
+
+ info = *infop;
+ potential = *countp;
+
+ for (;;) {
+ actual = vm_page_info(info, potential);
+ if (actual <= potential)
+ break;
+
+ /* allocate more memory */
+
+ if (info != *infop)
+ kmem_free(ipc_kernel_map, addr, size);
+
+ size = round_page(actual * sizeof *info);
+ kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
+ if (kr != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+
+ info = (hash_info_bucket_t *) addr;
+ potential = size/sizeof *info;
+ }
+
+ if (info == *infop) {
+ /* data fit in-line; nothing to deallocate */
+
+ *countp = actual;
+ } else if (actual == 0) {
+ kmem_free(ipc_kernel_map, addr, size);
+
+ *countp = 0;
+ } else {
+ vm_map_copy_t copy;
+ vm_size_t used;
+
+ used = round_page(actual * sizeof *info);
+
+ if (used != size)
+ kmem_free(ipc_kernel_map, addr + used, size - used);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr, used,
+ TRUE, &copy);
+ assert(kr == KERN_SUCCESS);
+
+ *infop = (hash_info_bucket_t *) copy;
+ *countp = actual;
+ }
+
+ return KERN_SUCCESS;
+}
diff --git a/vm/vm_external.c b/vm/vm_external.c
new file mode 100644
index 00000000..da591375
--- /dev/null
+++ b/vm/vm_external.c
@@ -0,0 +1,159 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * This module maintains information about the presence of
+ * pages not in memory. Since an external memory object
+ * must maintain a complete knowledge of its contents, this
+ * information takes the form of hints.
+ */
+
+#include <mach/boolean.h>
+#include <kern/zalloc.h>
+#include <vm/vm_external.h>
+#include <mach/vm_param.h>
+#include <kern/assert.h>
+
+
+
+boolean_t vm_external_unsafe = FALSE;
+
+zone_t vm_external_zone = ZONE_NULL;
+
+/*
+ * The implementation uses bit arrays to record whether
+ * a page has been written to external storage. For
+ * convenience, these bit arrays come in two sizes
+ * (measured in bytes).
+ */
+
+#define SMALL_SIZE (VM_EXTERNAL_SMALL_SIZE/8)
+#define LARGE_SIZE (VM_EXTERNAL_LARGE_SIZE/8)
+
+zone_t vm_object_small_existence_map_zone;
+zone_t vm_object_large_existence_map_zone;
+
+
+vm_external_t vm_external_create(size)
+ vm_offset_t size;
+{
+ vm_external_t result;
+ vm_size_t bytes;
+
+ if (vm_external_zone == ZONE_NULL)
+ return(VM_EXTERNAL_NULL);
+
+ result = (vm_external_t) zalloc(vm_external_zone);
+ result->existence_map = (char *) 0;
+
+ bytes = (atop(size) + 07) >> 3;
+ if (bytes <= SMALL_SIZE) {
+ result->existence_map =
+ (char *) zalloc(vm_object_small_existence_map_zone);
+ result->existence_size = SMALL_SIZE;
+ } else if (bytes <= LARGE_SIZE) {
+ result->existence_map =
+ (char *) zalloc(vm_object_large_existence_map_zone);
+ result->existence_size = LARGE_SIZE;
+ }
+ return(result);
+}
+
+void vm_external_destroy(e)
+ vm_external_t e;
+{
+ if (e == VM_EXTERNAL_NULL)
+ return;
+
+ if (e->existence_map != (char *) 0) {
+ if (e->existence_size <= SMALL_SIZE) {
+ zfree(vm_object_small_existence_map_zone,
+ (vm_offset_t) e->existence_map);
+ } else {
+ zfree(vm_object_large_existence_map_zone,
+ (vm_offset_t) e->existence_map);
+ }
+ }
+ zfree(vm_external_zone, (vm_offset_t) e);
+}
+
+vm_external_state_t _vm_external_state_get(e, offset)
+ vm_external_t e;
+ vm_offset_t offset;
+{
+ unsigned
+ int bit, byte;
+
+ if (vm_external_unsafe ||
+ (e == VM_EXTERNAL_NULL) ||
+ (e->existence_map == (char *) 0))
+ return(VM_EXTERNAL_STATE_UNKNOWN);
+
+ bit = atop(offset);
+ byte = bit >> 3;
+ if (byte >= e->existence_size) return (VM_EXTERNAL_STATE_UNKNOWN);
+ return( (e->existence_map[byte] & (1 << (bit & 07))) ?
+ VM_EXTERNAL_STATE_EXISTS : VM_EXTERNAL_STATE_ABSENT );
+}
+
+void vm_external_state_set(e, offset, state)
+ vm_external_t e;
+ vm_offset_t offset;
+ vm_external_state_t state;
+{
+ unsigned
+ int bit, byte;
+
+ if ((e == VM_EXTERNAL_NULL) || (e->existence_map == (char *) 0))
+ return;
+
+ if (state != VM_EXTERNAL_STATE_EXISTS)
+ return;
+
+ bit = atop(offset);
+ byte = bit >> 3;
+ if (byte >= e->existence_size) return;
+ e->existence_map[byte] |= (1 << (bit & 07));
+}
+
+void vm_external_module_initialize()
+{
+ vm_size_t size = (vm_size_t) sizeof(struct vm_external);
+
+ vm_external_zone = zinit(size, 16*1024*size, size,
+ 0, "external page bitmaps");
+
+ vm_object_small_existence_map_zone = zinit(SMALL_SIZE,
+ round_page(LARGE_SIZE * SMALL_SIZE),
+ round_page(SMALL_SIZE),
+ ZONE_EXHAUSTIBLE,
+ "object small existence maps");
+
+ vm_object_large_existence_map_zone = zinit(LARGE_SIZE,
+ round_page(8 * LARGE_SIZE),
+ round_page(LARGE_SIZE),
+ ZONE_EXHAUSTIBLE,
+ "object large existence maps");
+}
diff --git a/vm/vm_external.h b/vm/vm_external.h
new file mode 100644
index 00000000..70ffd650
--- /dev/null
+++ b/vm/vm_external.h
@@ -0,0 +1,89 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _VM_VM_EXTERNAL_H_
+#define _VM_VM_EXTERNAL_H_
+
+/*
+ * External page management hint technology
+ *
+ * The data structure exported by this module maintains
+ * a (potentially incomplete) map of the pages written
+ * to external storage for a range of virtual memory.
+ */
+
+/*
+ * The data structure representing the state of pages
+ * on external storage.
+ */
+
+typedef struct vm_external {
+ int existence_size; /* Size of the following bitmap */
+ char *existence_map; /* A bitmap of pages that have
+ * been written to backing
+ * storage.
+ */
+ int existence_count;/* Number of bits turned on in
+ * existence_map.
+ */
+} *vm_external_t;
+
+#define VM_EXTERNAL_NULL ((vm_external_t) 0)
+
+#define VM_EXTERNAL_SMALL_SIZE 128
+#define VM_EXTERNAL_LARGE_SIZE 8192
+
+/*
+ * The states that may be recorded for a page of external storage.
+ */
+
+typedef int vm_external_state_t;
+#define VM_EXTERNAL_STATE_EXISTS 1
+#define VM_EXTERNAL_STATE_UNKNOWN 2
+#define VM_EXTERNAL_STATE_ABSENT 3
+
+
+/*
+ * Routines exported by this module.
+ */
+
+extern void vm_external_module_initialize();
+ /* Initialize the module */
+
+extern vm_external_t vm_external_create(); /* Create a vm_external_t */
+extern void vm_external_destroy(); /* Destroy one */
+
+extern void vm_external_state_set();/* Set state of a page. */
+#define vm_external_state_get(e,offset) (((e) != VM_EXTERNAL_NULL) ? \
+ _vm_external_state_get(e, offset) : \
+ VM_EXTERNAL_STATE_UNKNOWN)
+ /* Retrieve the state
+ * for a given page, if known.
+ */
+extern vm_external_state_t _vm_external_state_get();
+ /* HIDDEN routine */
+
+#endif _VM_VM_EXTERNAL_H_
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
new file mode 100644
index 00000000..e45687cd
--- /dev/null
+++ b/vm/vm_fault.c
@@ -0,0 +1,2182 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm_fault.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * Page fault handling module.
+ */
+#include <mach_pagemap.h>
+#include <mach_kdb.h>
+#include <mach_pcsample.h>
+
+
+#include <vm/vm_fault.h>
+#include <mach/kern_return.h>
+#include <mach/message.h> /* for error codes */
+#include <kern/counters.h>
+#include <kern/thread.h>
+#include <kern/sched_prim.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/pmap.h>
+#include <mach/vm_statistics.h>
+#include <vm/vm_pageout.h>
+#include <mach/vm_param.h>
+#include <mach/memory_object.h>
+#include "memory_object_user.h"
+ /* For memory_object_data_{request,unlock} */
+#include <kern/mach_param.h>
+#include <kern/macro_help.h>
+#include <kern/zalloc.h>
+
+#if MACH_PCSAMPLE
+#include <kern/pc_sample.h>
+#endif
+
+
+
+/*
+ * State needed by vm_fault_continue.
+ * This is a little hefty to drop directly
+ * into the thread structure.
+ */
+typedef struct vm_fault_state {
+ struct vm_map *vmf_map;
+ vm_offset_t vmf_vaddr;
+ vm_prot_t vmf_fault_type;
+ boolean_t vmf_change_wiring;
+ void (*vmf_continuation)();
+ vm_map_version_t vmf_version;
+ boolean_t vmf_wired;
+ struct vm_object *vmf_object;
+ vm_offset_t vmf_offset;
+ vm_prot_t vmf_prot;
+
+ boolean_t vmfp_backoff;
+ struct vm_object *vmfp_object;
+ vm_offset_t vmfp_offset;
+ struct vm_page *vmfp_first_m;
+ vm_prot_t vmfp_access;
+} vm_fault_state_t;
+
+zone_t vm_fault_state_zone = 0;
+
+int vm_object_absent_max = 50;
+
+int vm_fault_debug = 0;
+
+boolean_t vm_fault_dirty_handling = FALSE;
+boolean_t vm_fault_interruptible = TRUE;
+
+boolean_t software_reference_bits = TRUE;
+
+#if MACH_KDB
+extern struct db_watchpoint *db_watchpoint_list;
+#endif MACH_KDB
+
+/*
+ * Routine: vm_fault_init
+ * Purpose:
+ * Initialize our private data structures.
+ */
+void vm_fault_init()
+{
+ vm_fault_state_zone = zinit(sizeof(vm_fault_state_t),
+ THREAD_MAX * sizeof(vm_fault_state_t),
+ sizeof(vm_fault_state_t),
+ 0, "vm fault state");
+}
+
+/*
+ * Routine: vm_fault_cleanup
+ * Purpose:
+ * Clean up the result of vm_fault_page.
+ * Results:
+ * The paging reference for "object" is released.
+ * "object" is unlocked.
+ * If "top_page" is not null, "top_page" is
+ * freed and the paging reference for the object
+ * containing it is released.
+ *
+ * In/out conditions:
+ * "object" must be locked.
+ */
+void
+vm_fault_cleanup(object, top_page)
+ register vm_object_t object;
+ register vm_page_t top_page;
+{
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ if (top_page != VM_PAGE_NULL) {
+ object = top_page->object;
+ vm_object_lock(object);
+ VM_PAGE_FREE(top_page);
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+ }
+}
+
+
+#if MACH_PCSAMPLE
+/*
+ * Do PC sampling on current thread, assuming
+ * that it is the thread taking this page fault.
+ *
+ * Must check for THREAD_NULL, since faults
+ * can occur before threads are running.
+ */
+
+#define vm_stat_sample(flavor) \
+ MACRO_BEGIN \
+ thread_t _thread_ = current_thread(); \
+ \
+ if (_thread_ != THREAD_NULL) \
+ take_pc_sample_macro(_thread_, (flavor)); \
+ MACRO_END
+
+#else
+#define vm_stat_sample(x)
+#endif /* MACH_PCSAMPLE */
+
+
+
+/*
+ * Routine: vm_fault_page
+ * Purpose:
+ * Find the resident page for the virtual memory
+ * specified by the given virtual memory object
+ * and offset.
+ * Additional arguments:
+ * The required permissions for the page is given
+ * in "fault_type". Desired permissions are included
+ * in "protection".
+ *
+ * If the desired page is known to be resident (for
+ * example, because it was previously wired down), asserting
+ * the "unwiring" parameter will speed the search.
+ *
+ * If the operation can be interrupted (by thread_abort
+ * or thread_terminate), then the "interruptible"
+ * parameter should be asserted.
+ *
+ * Results:
+ * The page containing the proper data is returned
+ * in "result_page".
+ *
+ * In/out conditions:
+ * The source object must be locked and referenced,
+ * and must donate one paging reference. The reference
+ * is not affected. The paging reference and lock are
+ * consumed.
+ *
+ * If the call succeeds, the object in which "result_page"
+ * resides is left locked and holding a paging reference.
+ * If this is not the original object, a busy page in the
+ * original object is returned in "top_page", to prevent other
+ * callers from pursuing this same data, along with a paging
+ * reference for the original object. The "top_page" should
+ * be destroyed when this guarantee is no longer required.
+ * The "result_page" is also left busy. It is not removed
+ * from the pageout queues.
+ */
+vm_fault_return_t vm_fault_page(first_object, first_offset,
+ fault_type, must_be_resident, interruptible,
+ protection,
+ result_page, top_page,
+ resume, continuation)
+ /* Arguments: */
+ vm_object_t first_object; /* Object to begin search */
+ vm_offset_t first_offset; /* Offset into object */
+ vm_prot_t fault_type; /* What access is requested */
+ boolean_t must_be_resident;/* Must page be resident? */
+ boolean_t interruptible; /* May fault be interrupted? */
+ /* Modifies in place: */
+ vm_prot_t *protection; /* Protection for mapping */
+ /* Returns: */
+ vm_page_t *result_page; /* Page found, if successful */
+ vm_page_t *top_page; /* Page in top object, if
+ * not result_page.
+ */
+ /* More arguments: */
+ boolean_t resume; /* We are restarting. */
+ void (*continuation)(); /* Continuation for blocking. */
+{
+ register
+ vm_page_t m;
+ register
+ vm_object_t object;
+ register
+ vm_offset_t offset;
+ vm_page_t first_m;
+ vm_object_t next_object;
+ vm_object_t copy_object;
+ boolean_t look_for_page;
+ vm_prot_t access_required;
+
+#ifdef CONTINUATIONS
+ if (resume) {
+ register vm_fault_state_t *state =
+ (vm_fault_state_t *) current_thread()->ith_other;
+
+ if (state->vmfp_backoff)
+ goto after_block_and_backoff;
+
+ object = state->vmfp_object;
+ offset = state->vmfp_offset;
+ first_m = state->vmfp_first_m;
+ access_required = state->vmfp_access;
+ goto after_thread_block;
+ }
+#else /* not CONTINUATIONS */
+ assert(continuation == 0);
+ assert(!resume);
+#endif /* not CONTINUATIONS */
+
+ vm_stat_sample(SAMPLED_PC_VM_FAULTS_ANY);
+ vm_stat.faults++; /* needs lock XXX */
+
+/*
+ * Recovery actions
+ */
+#define RELEASE_PAGE(m) \
+ MACRO_BEGIN \
+ PAGE_WAKEUP_DONE(m); \
+ vm_page_lock_queues(); \
+ if (!m->active && !m->inactive) \
+ vm_page_activate(m); \
+ vm_page_unlock_queues(); \
+ MACRO_END
+
+ if (vm_fault_dirty_handling
+#if MACH_KDB
+ /*
+ * If there are watchpoints set, then
+ * we don't want to give away write permission
+ * on a read fault. Make the task write fault,
+ * so that the watchpoint code notices the access.
+ */
+ || db_watchpoint_list
+#endif MACH_KDB
+ ) {
+ /*
+ * If we aren't asking for write permission,
+ * then don't give it away. We're using write
+ * faults to set the dirty bit.
+ */
+ if (!(fault_type & VM_PROT_WRITE))
+ *protection &= ~VM_PROT_WRITE;
+ }
+
+ if (!vm_fault_interruptible)
+ interruptible = FALSE;
+
+ /*
+ * INVARIANTS (through entire routine):
+ *
+ * 1) At all times, we must either have the object
+ * lock or a busy page in some object to prevent
+ * some other thread from trying to bring in
+ * the same page.
+ *
+ * Note that we cannot hold any locks during the
+ * pager access or when waiting for memory, so
+ * we use a busy page then.
+ *
+ * Note also that we aren't as concerned about more than
+ * one thread attempting to memory_object_data_unlock
+ * the same page at once, so we don't hold the page
+ * as busy then, but do record the highest unlock
+ * value so far. [Unlock requests may also be delivered
+ * out of order.]
+ *
+ * 2) To prevent another thread from racing us down the
+ * shadow chain and entering a new page in the top
+ * object before we do, we must keep a busy page in
+ * the top object while following the shadow chain.
+ *
+ * 3) We must increment paging_in_progress on any object
+ * for which we have a busy page, to prevent
+ * vm_object_collapse from removing the busy page
+ * without our noticing.
+ *
+ * 4) We leave busy pages on the pageout queues.
+ * If the pageout daemon comes across a busy page,
+ * it will remove the page from the pageout queues.
+ */
+
+ /*
+ * Search for the page at object/offset.
+ */
+
+ object = first_object;
+ offset = first_offset;
+ first_m = VM_PAGE_NULL;
+ access_required = fault_type;
+
+ /*
+ * See whether this page is resident
+ */
+
+ while (TRUE) {
+ m = vm_page_lookup(object, offset);
+ if (m != VM_PAGE_NULL) {
+ /*
+ * If the page is being brought in,
+ * wait for it and then retry.
+ *
+ * A possible optimization: if the page
+ * is known to be resident, we can ignore
+ * pages that are absent (regardless of
+ * whether they're busy).
+ */
+
+ if (m->busy) {
+ kern_return_t wait_result;
+
+ PAGE_ASSERT_WAIT(m, interruptible);
+ vm_object_unlock(object);
+#ifdef CONTINUATIONS
+ if (continuation != (void (*)()) 0) {
+ register vm_fault_state_t *state =
+ (vm_fault_state_t *) current_thread()->ith_other;
+
+ /*
+ * Save variables in case
+ * thread_block discards
+ * our kernel stack.
+ */
+
+ state->vmfp_backoff = FALSE;
+ state->vmfp_object = object;
+ state->vmfp_offset = offset;
+ state->vmfp_first_m = first_m;
+ state->vmfp_access =
+ access_required;
+ state->vmf_prot = *protection;
+
+ counter(c_vm_fault_page_block_busy_user++);
+ thread_block(continuation);
+ } else
+#endif /* CONTINUATIONS */
+ {
+ counter(c_vm_fault_page_block_busy_kernel++);
+ thread_block((void (*)()) 0);
+ }
+ after_thread_block:
+ wait_result = current_thread()->wait_result;
+ vm_object_lock(object);
+ if (wait_result != THREAD_AWAKENED) {
+ vm_fault_cleanup(object, first_m);
+ if (wait_result == THREAD_RESTART)
+ return(VM_FAULT_RETRY);
+ else
+ return(VM_FAULT_INTERRUPTED);
+ }
+ continue;
+ }
+
+ /*
+ * If the page is in error, give up now.
+ */
+
+ if (m->error) {
+ VM_PAGE_FREE(m);
+ vm_fault_cleanup(object, first_m);
+ return(VM_FAULT_MEMORY_ERROR);
+ }
+
+ /*
+ * If the page isn't busy, but is absent,
+ * then it was deemed "unavailable".
+ */
+
+ if (m->absent) {
+ /*
+ * Remove the non-existent page (unless it's
+ * in the top object) and move on down to the
+ * next object (if there is one).
+ */
+
+ offset += object->shadow_offset;
+ access_required = VM_PROT_READ;
+ next_object = object->shadow;
+ if (next_object == VM_OBJECT_NULL) {
+ vm_page_t real_m;
+
+ assert(!must_be_resident);
+
+ /*
+ * Absent page at bottom of shadow
+ * chain; zero fill the page we left
+ * busy in the first object, and flush
+ * the absent page. But first we
+ * need to allocate a real page.
+ */
+
+ real_m = vm_page_grab();
+ if (real_m == VM_PAGE_NULL) {
+ vm_fault_cleanup(object, first_m);
+ return(VM_FAULT_MEMORY_SHORTAGE);
+ }
+
+ if (object != first_object) {
+ VM_PAGE_FREE(m);
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+ object = first_object;
+ offset = first_offset;
+ m = first_m;
+ first_m = VM_PAGE_NULL;
+ vm_object_lock(object);
+ }
+
+ VM_PAGE_FREE(m);
+ assert(real_m->busy);
+ vm_page_lock_queues();
+ vm_page_insert(real_m, object, offset);
+ vm_page_unlock_queues();
+ m = real_m;
+
+ /*
+ * Drop the lock while zero filling
+ * page. Then break because this
+ * is the page we wanted. Checking
+ * the page lock is a waste of time;
+ * this page was either absent or
+ * newly allocated -- in both cases
+ * it can't be page locked by a pager.
+ */
+ vm_object_unlock(object);
+
+ vm_page_zero_fill(m);
+
+ vm_stat_sample(SAMPLED_PC_VM_ZFILL_FAULTS);
+
+ vm_stat.zero_fill_count++;
+ vm_object_lock(object);
+ pmap_clear_modify(m->phys_addr);
+ break;
+ } else {
+ if (must_be_resident) {
+ vm_object_paging_end(object);
+ } else if (object != first_object) {
+ vm_object_paging_end(object);
+ VM_PAGE_FREE(m);
+ } else {
+ first_m = m;
+ m->absent = FALSE;
+ vm_object_absent_release(object);
+ m->busy = TRUE;
+
+ vm_page_lock_queues();
+ VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_unlock_queues();
+ }
+ vm_object_lock(next_object);
+ vm_object_unlock(object);
+ object = next_object;
+ vm_object_paging_begin(object);
+ continue;
+ }
+ }
+
+ /*
+ * If the desired access to this page has
+ * been locked out, request that it be unlocked.
+ */
+
+ if (access_required & m->page_lock) {
+ if ((access_required & m->unlock_request) != access_required) {
+ vm_prot_t new_unlock_request;
+ kern_return_t rc;
+
+ if (!object->pager_ready) {
+ vm_object_assert_wait(object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ interruptible);
+ goto block_and_backoff;
+ }
+
+ new_unlock_request = m->unlock_request =
+ (access_required | m->unlock_request);
+ vm_object_unlock(object);
+ if ((rc = memory_object_data_unlock(
+ object->pager,
+ object->pager_request,
+ offset + object->paging_offset,
+ PAGE_SIZE,
+ new_unlock_request))
+ != KERN_SUCCESS) {
+ printf("vm_fault: memory_object_data_unlock failed\n");
+ vm_object_lock(object);
+ vm_fault_cleanup(object, first_m);
+ return((rc == MACH_SEND_INTERRUPTED) ?
+ VM_FAULT_INTERRUPTED :
+ VM_FAULT_MEMORY_ERROR);
+ }
+ vm_object_lock(object);
+ continue;
+ }
+
+ PAGE_ASSERT_WAIT(m, interruptible);
+ goto block_and_backoff;
+ }
+
+ /*
+ * We mark the page busy and leave it on
+ * the pageout queues. If the pageout
+ * deamon comes across it, then it will
+ * remove the page.
+ */
+
+ if (!software_reference_bits) {
+ vm_page_lock_queues();
+ if (m->inactive) {
+ vm_stat_sample(SAMPLED_PC_VM_REACTIVATION_FAULTS);
+ vm_stat.reactivations++;
+ }
+
+ VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_unlock_queues();
+ }
+
+ assert(!m->busy);
+ m->busy = TRUE;
+ assert(!m->absent);
+ break;
+ }
+
+ look_for_page =
+ (object->pager_created)
+#if MACH_PAGEMAP
+ && (vm_external_state_get(object->existence_info, offset + object->paging_offset) !=
+ VM_EXTERNAL_STATE_ABSENT)
+#endif MACH_PAGEMAP
+ ;
+
+ if ((look_for_page || (object == first_object))
+ && !must_be_resident) {
+ /*
+ * Allocate a new page for this object/offset
+ * pair.
+ */
+
+ m = vm_page_grab_fictitious();
+ if (m == VM_PAGE_NULL) {
+ vm_fault_cleanup(object, first_m);
+ return(VM_FAULT_FICTITIOUS_SHORTAGE);
+ }
+
+ vm_page_lock_queues();
+ vm_page_insert(m, object, offset);
+ vm_page_unlock_queues();
+ }
+
+ if (look_for_page && !must_be_resident) {
+ kern_return_t rc;
+
+ /*
+ * If the memory manager is not ready, we
+ * cannot make requests.
+ */
+ if (!object->pager_ready) {
+ vm_object_assert_wait(object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ interruptible);
+ VM_PAGE_FREE(m);
+ goto block_and_backoff;
+ }
+
+ if (object->internal) {
+ /*
+ * Requests to the default pager
+ * must reserve a real page in advance,
+ * because the pager's data-provided
+ * won't block for pages.
+ */
+
+ if (m->fictitious && !vm_page_convert(m)) {
+ VM_PAGE_FREE(m);
+ vm_fault_cleanup(object, first_m);
+ return(VM_FAULT_MEMORY_SHORTAGE);
+ }
+ } else if (object->absent_count >
+ vm_object_absent_max) {
+ /*
+ * If there are too many outstanding page
+ * requests pending on this object, we
+ * wait for them to be resolved now.
+ */
+
+ vm_object_absent_assert_wait(object, interruptible);
+ VM_PAGE_FREE(m);
+ goto block_and_backoff;
+ }
+
+ /*
+ * Indicate that the page is waiting for data
+ * from the memory manager.
+ */
+
+ m->absent = TRUE;
+ object->absent_count++;
+
+ /*
+ * We have a busy page, so we can
+ * release the object lock.
+ */
+ vm_object_unlock(object);
+
+ /*
+ * Call the memory manager to retrieve the data.
+ */
+
+ vm_stat.pageins++;
+ vm_stat_sample(SAMPLED_PC_VM_PAGEIN_FAULTS);
+
+ if ((rc = memory_object_data_request(object->pager,
+ object->pager_request,
+ m->offset + object->paging_offset,
+ PAGE_SIZE, access_required)) != KERN_SUCCESS) {
+ if (rc != MACH_SEND_INTERRUPTED)
+ printf("%s(0x%x, 0x%x, 0x%x, 0x%x, 0x%x) failed, %d\n",
+ "memory_object_data_request",
+ object->pager,
+ object->pager_request,
+ m->offset + object->paging_offset,
+ PAGE_SIZE, access_required, rc);
+ /*
+ * Don't want to leave a busy page around,
+ * but the data request may have blocked,
+ * so check if it's still there and busy.
+ */
+ vm_object_lock(object);
+ if (m == vm_page_lookup(object,offset) &&
+ m->absent && m->busy)
+ VM_PAGE_FREE(m);
+ vm_fault_cleanup(object, first_m);
+ return((rc == MACH_SEND_INTERRUPTED) ?
+ VM_FAULT_INTERRUPTED :
+ VM_FAULT_MEMORY_ERROR);
+ }
+
+ /*
+ * Retry with same object/offset, since new data may
+ * be in a different page (i.e., m is meaningless at
+ * this point).
+ */
+ vm_object_lock(object);
+ continue;
+ }
+
+ /*
+ * For the XP system, the only case in which we get here is if
+ * object has no pager (or unwiring). If the pager doesn't
+ * have the page this is handled in the m->absent case above
+ * (and if you change things here you should look above).
+ */
+ if (object == first_object)
+ first_m = m;
+ else
+ {
+ assert(m == VM_PAGE_NULL);
+ }
+
+ /*
+ * Move on to the next object. Lock the next
+ * object before unlocking the current one.
+ */
+ access_required = VM_PROT_READ;
+
+ offset += object->shadow_offset;
+ next_object = object->shadow;
+ if (next_object == VM_OBJECT_NULL) {
+ assert(!must_be_resident);
+
+ /*
+ * If there's no object left, fill the page
+ * in the top object with zeros. But first we
+ * need to allocate a real page.
+ */
+
+ if (object != first_object) {
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ object = first_object;
+ offset = first_offset;
+ vm_object_lock(object);
+ }
+
+ m = first_m;
+ assert(m->object == object);
+ first_m = VM_PAGE_NULL;
+
+ if (m->fictitious && !vm_page_convert(m)) {
+ VM_PAGE_FREE(m);
+ vm_fault_cleanup(object, VM_PAGE_NULL);
+ return(VM_FAULT_MEMORY_SHORTAGE);
+ }
+
+ vm_object_unlock(object);
+ vm_page_zero_fill(m);
+ vm_stat_sample(SAMPLED_PC_VM_ZFILL_FAULTS);
+ vm_stat.zero_fill_count++;
+ vm_object_lock(object);
+ pmap_clear_modify(m->phys_addr);
+ break;
+ }
+ else {
+ vm_object_lock(next_object);
+ if ((object != first_object) || must_be_resident)
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+ object = next_object;
+ vm_object_paging_begin(object);
+ }
+ }
+
+ /*
+ * PAGE HAS BEEN FOUND.
+ *
+ * This page (m) is:
+ * busy, so that we can play with it;
+ * not absent, so that nobody else will fill it;
+ * possibly eligible for pageout;
+ *
+ * The top-level page (first_m) is:
+ * VM_PAGE_NULL if the page was found in the
+ * top-level object;
+ * busy, not absent, and ineligible for pageout.
+ *
+ * The current object (object) is locked. A paging
+ * reference is held for the current and top-level
+ * objects.
+ */
+
+#if EXTRA_ASSERTIONS
+ assert(m->busy && !m->absent);
+ assert((first_m == VM_PAGE_NULL) ||
+ (first_m->busy && !first_m->absent &&
+ !first_m->active && !first_m->inactive));
+#endif EXTRA_ASSERTIONS
+
+ /*
+ * If the page is being written, but isn't
+ * already owned by the top-level object,
+ * we have to copy it into a new page owned
+ * by the top-level object.
+ */
+
+ if (object != first_object) {
+ /*
+ * We only really need to copy if we
+ * want to write it.
+ */
+
+ if (fault_type & VM_PROT_WRITE) {
+ vm_page_t copy_m;
+
+ assert(!must_be_resident);
+
+ /*
+ * If we try to collapse first_object at this
+ * point, we may deadlock when we try to get
+ * the lock on an intermediate object (since we
+ * have the bottom object locked). We can't
+ * unlock the bottom object, because the page
+ * we found may move (by collapse) if we do.
+ *
+ * Instead, we first copy the page. Then, when
+ * we have no more use for the bottom object,
+ * we unlock it and try to collapse.
+ *
+ * Note that we copy the page even if we didn't
+ * need to... that's the breaks.
+ */
+
+ /*
+ * Allocate a page for the copy
+ */
+ copy_m = vm_page_grab();
+ if (copy_m == VM_PAGE_NULL) {
+ RELEASE_PAGE(m);
+ vm_fault_cleanup(object, first_m);
+ return(VM_FAULT_MEMORY_SHORTAGE);
+ }
+
+ vm_object_unlock(object);
+ vm_page_copy(m, copy_m);
+ vm_object_lock(object);
+
+ /*
+ * If another map is truly sharing this
+ * page with us, we have to flush all
+ * uses of the original page, since we
+ * can't distinguish those which want the
+ * original from those which need the
+ * new copy.
+ *
+ * XXXO If we know that only one map has
+ * access to this page, then we could
+ * avoid the pmap_page_protect() call.
+ */
+
+ vm_page_lock_queues();
+ vm_page_deactivate(m);
+ pmap_page_protect(m->phys_addr, VM_PROT_NONE);
+ vm_page_unlock_queues();
+
+ /*
+ * We no longer need the old page or object.
+ */
+
+ PAGE_WAKEUP_DONE(m);
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ vm_stat.cow_faults++;
+ vm_stat_sample(SAMPLED_PC_VM_COW_FAULTS);
+ object = first_object;
+ offset = first_offset;
+
+ vm_object_lock(object);
+ VM_PAGE_FREE(first_m);
+ first_m = VM_PAGE_NULL;
+ assert(copy_m->busy);
+ vm_page_lock_queues();
+ vm_page_insert(copy_m, object, offset);
+ vm_page_unlock_queues();
+ m = copy_m;
+
+ /*
+ * Now that we've gotten the copy out of the
+ * way, let's try to collapse the top object.
+ * But we have to play ugly games with
+ * paging_in_progress to do that...
+ */
+
+ vm_object_paging_end(object);
+ vm_object_collapse(object);
+ vm_object_paging_begin(object);
+ }
+ else {
+ *protection &= (~VM_PROT_WRITE);
+ }
+ }
+
+ /*
+ * Now check whether the page needs to be pushed into the
+ * copy object. The use of asymmetric copy on write for
+ * shared temporary objects means that we may do two copies to
+ * satisfy the fault; one above to get the page from a
+ * shadowed object, and one here to push it into the copy.
+ */
+
+ while ((copy_object = first_object->copy) != VM_OBJECT_NULL) {
+ vm_offset_t copy_offset;
+ vm_page_t copy_m;
+
+ /*
+ * If the page is being written, but hasn't been
+ * copied to the copy-object, we have to copy it there.
+ */
+
+ if ((fault_type & VM_PROT_WRITE) == 0) {
+ *protection &= ~VM_PROT_WRITE;
+ break;
+ }
+
+ /*
+ * If the page was guaranteed to be resident,
+ * we must have already performed the copy.
+ */
+
+ if (must_be_resident)
+ break;
+
+ /*
+ * Try to get the lock on the copy_object.
+ */
+ if (!vm_object_lock_try(copy_object)) {
+ vm_object_unlock(object);
+
+ simple_lock_pause(); /* wait a bit */
+
+ vm_object_lock(object);
+ continue;
+ }
+
+ /*
+ * Make another reference to the copy-object,
+ * to keep it from disappearing during the
+ * copy.
+ */
+ assert(copy_object->ref_count > 0);
+ copy_object->ref_count++;
+
+ /*
+ * Does the page exist in the copy?
+ */
+ copy_offset = first_offset - copy_object->shadow_offset;
+ copy_m = vm_page_lookup(copy_object, copy_offset);
+ if (copy_m != VM_PAGE_NULL) {
+ if (copy_m->busy) {
+ /*
+ * If the page is being brought
+ * in, wait for it and then retry.
+ */
+ PAGE_ASSERT_WAIT(copy_m, interruptible);
+ RELEASE_PAGE(m);
+ copy_object->ref_count--;
+ assert(copy_object->ref_count > 0);
+ vm_object_unlock(copy_object);
+ goto block_and_backoff;
+ }
+ }
+ else {
+ /*
+ * Allocate a page for the copy
+ */
+ copy_m = vm_page_alloc(copy_object, copy_offset);
+ if (copy_m == VM_PAGE_NULL) {
+ RELEASE_PAGE(m);
+ copy_object->ref_count--;
+ assert(copy_object->ref_count > 0);
+ vm_object_unlock(copy_object);
+ vm_fault_cleanup(object, first_m);
+ return(VM_FAULT_MEMORY_SHORTAGE);
+ }
+
+ /*
+ * Must copy page into copy-object.
+ */
+
+ vm_page_copy(m, copy_m);
+
+ /*
+ * If the old page was in use by any users
+ * of the copy-object, it must be removed
+ * from all pmaps. (We can't know which
+ * pmaps use it.)
+ */
+
+ vm_page_lock_queues();
+ pmap_page_protect(m->phys_addr, VM_PROT_NONE);
+ copy_m->dirty = TRUE;
+ vm_page_unlock_queues();
+
+ /*
+ * If there's a pager, then immediately
+ * page out this page, using the "initialize"
+ * option. Else, we use the copy.
+ */
+
+ if (!copy_object->pager_created) {
+ vm_page_lock_queues();
+ vm_page_activate(copy_m);
+ vm_page_unlock_queues();
+ PAGE_WAKEUP_DONE(copy_m);
+ } else {
+ /*
+ * The page is already ready for pageout:
+ * not on pageout queues and busy.
+ * Unlock everything except the
+ * copy_object itself.
+ */
+
+ vm_object_unlock(object);
+
+ /*
+ * Write the page to the copy-object,
+ * flushing it from the kernel.
+ */
+
+ vm_pageout_page(copy_m, TRUE, TRUE);
+
+ /*
+ * Since the pageout may have
+ * temporarily dropped the
+ * copy_object's lock, we
+ * check whether we'll have
+ * to deallocate the hard way.
+ */
+
+ if ((copy_object->shadow != object) ||
+ (copy_object->ref_count == 1)) {
+ vm_object_unlock(copy_object);
+ vm_object_deallocate(copy_object);
+ vm_object_lock(object);
+ continue;
+ }
+
+ /*
+ * Pick back up the old object's
+ * lock. [It is safe to do so,
+ * since it must be deeper in the
+ * object tree.]
+ */
+
+ vm_object_lock(object);
+ }
+
+ /*
+ * Because we're pushing a page upward
+ * in the object tree, we must restart
+ * any faults that are waiting here.
+ * [Note that this is an expansion of
+ * PAGE_WAKEUP that uses the THREAD_RESTART
+ * wait result]. Can't turn off the page's
+ * busy bit because we're not done with it.
+ */
+
+ if (m->wanted) {
+ m->wanted = FALSE;
+ thread_wakeup_with_result((event_t) m,
+ THREAD_RESTART);
+ }
+ }
+
+ /*
+ * The reference count on copy_object must be
+ * at least 2: one for our extra reference,
+ * and at least one from the outside world
+ * (we checked that when we last locked
+ * copy_object).
+ */
+ copy_object->ref_count--;
+ assert(copy_object->ref_count > 0);
+ vm_object_unlock(copy_object);
+
+ break;
+ }
+
+ *result_page = m;
+ *top_page = first_m;
+
+ /*
+ * If the page can be written, assume that it will be.
+ * [Earlier, we restrict the permission to allow write
+ * access only if the fault so required, so we don't
+ * mark read-only data as dirty.]
+ */
+
+ if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE))
+ m->dirty = TRUE;
+
+ return(VM_FAULT_SUCCESS);
+
+ block_and_backoff:
+ vm_fault_cleanup(object, first_m);
+
+#ifdef CONTINUATIONS
+ if (continuation != (void (*)()) 0) {
+ register vm_fault_state_t *state =
+ (vm_fault_state_t *) current_thread()->ith_other;
+
+ /*
+ * Save variables in case we must restart.
+ */
+
+ state->vmfp_backoff = TRUE;
+ state->vmf_prot = *protection;
+
+ counter(c_vm_fault_page_block_backoff_user++);
+ thread_block(continuation);
+ } else
+#endif /* CONTINUATIONS */
+ {
+ counter(c_vm_fault_page_block_backoff_kernel++);
+ thread_block((void (*)()) 0);
+ }
+ after_block_and_backoff:
+ if (current_thread()->wait_result == THREAD_AWAKENED)
+ return VM_FAULT_RETRY;
+ else
+ return VM_FAULT_INTERRUPTED;
+
+#undef RELEASE_PAGE
+}
+
+/*
+ * Routine: vm_fault
+ * Purpose:
+ * Handle page faults, including pseudo-faults
+ * used to change the wiring status of pages.
+ * Returns:
+ * If an explicit (expression) continuation is supplied,
+ * then we call the continuation instead of returning.
+ * Implementation:
+ * Explicit continuations make this a little icky,
+ * because it hasn't been rewritten to embrace CPS.
+ * Instead, we have resume arguments for vm_fault and
+ * vm_fault_page, to let continue the fault computation.
+ *
+ * vm_fault and vm_fault_page save mucho state
+ * in the moral equivalent of a closure. The state
+ * structure is allocated when first entering vm_fault
+ * and deallocated when leaving vm_fault.
+ */
+
+#ifdef CONTINUATIONS
+void
+vm_fault_continue()
+{
+ register vm_fault_state_t *state =
+ (vm_fault_state_t *) current_thread()->ith_other;
+
+ (void) vm_fault(state->vmf_map,
+ state->vmf_vaddr,
+ state->vmf_fault_type,
+ state->vmf_change_wiring,
+ TRUE, state->vmf_continuation);
+ /*NOTREACHED*/
+}
+#endif /* CONTINUATIONS */
+
+kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
+ resume, continuation)
+ vm_map_t map;
+ vm_offset_t vaddr;
+ vm_prot_t fault_type;
+ boolean_t change_wiring;
+ boolean_t resume;
+ void (*continuation)();
+{
+ vm_map_version_t version; /* Map version for verificiation */
+ boolean_t wired; /* Should mapping be wired down? */
+ vm_object_t object; /* Top-level object */
+ vm_offset_t offset; /* Top-level offset */
+ vm_prot_t prot; /* Protection for mapping */
+ vm_object_t old_copy_object; /* Saved copy object */
+ vm_page_t result_page; /* Result of vm_fault_page */
+ vm_page_t top_page; /* Placeholder page */
+ kern_return_t kr;
+
+ register
+ vm_page_t m; /* Fast access to result_page */
+
+#ifdef CONTINUATIONS
+ if (resume) {
+ register vm_fault_state_t *state =
+ (vm_fault_state_t *) current_thread()->ith_other;
+
+ /*
+ * Retrieve cached variables and
+ * continue vm_fault_page.
+ */
+
+ object = state->vmf_object;
+ if (object == VM_OBJECT_NULL)
+ goto RetryFault;
+ version = state->vmf_version;
+ wired = state->vmf_wired;
+ offset = state->vmf_offset;
+ prot = state->vmf_prot;
+
+ kr = vm_fault_page(object, offset, fault_type,
+ (change_wiring && !wired), !change_wiring,
+ &prot, &result_page, &top_page,
+ TRUE, vm_fault_continue);
+ goto after_vm_fault_page;
+ }
+
+ if (continuation != (void (*)()) 0) {
+ /*
+ * We will probably need to save state.
+ */
+
+ char * state;
+
+ /*
+ * if this assignment stmt is written as
+ * 'active_threads[cpu_number()] = zalloc()',
+ * cpu_number may be evaluated before zalloc;
+ * if zalloc blocks, cpu_number will be wrong
+ */
+
+ state = (char *) zalloc(vm_fault_state_zone);
+ current_thread()->ith_other = state;
+
+ }
+#else /* not CONTINUATIONS */
+ assert(continuation == 0);
+ assert(!resume);
+#endif /* not CONTINUATIONS */
+
+ RetryFault: ;
+
+ /*
+ * Find the backing store object and offset into
+ * it to begin the search.
+ */
+
+ if ((kr = vm_map_lookup(&map, vaddr, fault_type, &version,
+ &object, &offset,
+ &prot, &wired)) != KERN_SUCCESS) {
+ goto done;
+ }
+
+ /*
+ * If the page is wired, we must fault for the current protection
+ * value, to avoid further faults.
+ */
+
+ if (wired)
+ fault_type = prot;
+
+ /*
+ * Make a reference to this object to
+ * prevent its disposal while we are messing with
+ * it. Once we have the reference, the map is free
+ * to be diddled. Since objects reference their
+ * shadows (and copies), they will stay around as well.
+ */
+
+ assert(object->ref_count > 0);
+ object->ref_count++;
+ vm_object_paging_begin(object);
+
+#ifdef CONTINUATIONS
+ if (continuation != (void (*)()) 0) {
+ register vm_fault_state_t *state =
+ (vm_fault_state_t *) current_thread()->ith_other;
+
+ /*
+ * Save variables, in case vm_fault_page discards
+ * our kernel stack and we have to restart.
+ */
+
+ state->vmf_map = map;
+ state->vmf_vaddr = vaddr;
+ state->vmf_fault_type = fault_type;
+ state->vmf_change_wiring = change_wiring;
+ state->vmf_continuation = continuation;
+
+ state->vmf_version = version;
+ state->vmf_wired = wired;
+ state->vmf_object = object;
+ state->vmf_offset = offset;
+ state->vmf_prot = prot;
+
+ kr = vm_fault_page(object, offset, fault_type,
+ (change_wiring && !wired), !change_wiring,
+ &prot, &result_page, &top_page,
+ FALSE, vm_fault_continue);
+ } else
+#endif /* CONTINUATIONS */
+ {
+ kr = vm_fault_page(object, offset, fault_type,
+ (change_wiring && !wired), !change_wiring,
+ &prot, &result_page, &top_page,
+ FALSE, (void (*)()) 0);
+ }
+ after_vm_fault_page:
+
+ /*
+ * If we didn't succeed, lose the object reference immediately.
+ */
+
+ if (kr != VM_FAULT_SUCCESS)
+ vm_object_deallocate(object);
+
+ /*
+ * See why we failed, and take corrective action.
+ */
+
+ switch (kr) {
+ case VM_FAULT_SUCCESS:
+ break;
+ case VM_FAULT_RETRY:
+ goto RetryFault;
+ case VM_FAULT_INTERRUPTED:
+ kr = KERN_SUCCESS;
+ goto done;
+ case VM_FAULT_MEMORY_SHORTAGE:
+#ifdef CONTINUATIONS
+ if (continuation != (void (*)()) 0) {
+ register vm_fault_state_t *state =
+ (vm_fault_state_t *) current_thread()->ith_other;
+
+ /*
+ * Save variables in case VM_PAGE_WAIT
+ * discards our kernel stack.
+ */
+
+ state->vmf_map = map;
+ state->vmf_vaddr = vaddr;
+ state->vmf_fault_type = fault_type;
+ state->vmf_change_wiring = change_wiring;
+ state->vmf_continuation = continuation;
+ state->vmf_object = VM_OBJECT_NULL;
+
+ VM_PAGE_WAIT(vm_fault_continue);
+ } else
+#endif /* CONTINUATIONS */
+ VM_PAGE_WAIT((void (*)()) 0);
+ goto RetryFault;
+ case VM_FAULT_FICTITIOUS_SHORTAGE:
+ vm_page_more_fictitious();
+ goto RetryFault;
+ case VM_FAULT_MEMORY_ERROR:
+ kr = KERN_MEMORY_ERROR;
+ goto done;
+ }
+
+ m = result_page;
+
+ assert((change_wiring && !wired) ?
+ (top_page == VM_PAGE_NULL) :
+ ((top_page == VM_PAGE_NULL) == (m->object == object)));
+
+ /*
+ * How to clean up the result of vm_fault_page. This
+ * happens whether the mapping is entered or not.
+ */
+
+#define UNLOCK_AND_DEALLOCATE \
+ MACRO_BEGIN \
+ vm_fault_cleanup(m->object, top_page); \
+ vm_object_deallocate(object); \
+ MACRO_END
+
+ /*
+ * What to do with the resulting page from vm_fault_page
+ * if it doesn't get entered into the physical map:
+ */
+
+#define RELEASE_PAGE(m) \
+ MACRO_BEGIN \
+ PAGE_WAKEUP_DONE(m); \
+ vm_page_lock_queues(); \
+ if (!m->active && !m->inactive) \
+ vm_page_activate(m); \
+ vm_page_unlock_queues(); \
+ MACRO_END
+
+ /*
+ * We must verify that the maps have not changed
+ * since our last lookup.
+ */
+
+ old_copy_object = m->object->copy;
+
+ vm_object_unlock(m->object);
+ while (!vm_map_verify(map, &version)) {
+ vm_object_t retry_object;
+ vm_offset_t retry_offset;
+ vm_prot_t retry_prot;
+
+ /*
+ * To avoid trying to write_lock the map while another
+ * thread has it read_locked (in vm_map_pageable), we
+ * do not try for write permission. If the page is
+ * still writable, we will get write permission. If it
+ * is not, or has been marked needs_copy, we enter the
+ * mapping without write permission, and will merely
+ * take another fault.
+ */
+ kr = vm_map_lookup(&map, vaddr,
+ fault_type & ~VM_PROT_WRITE, &version,
+ &retry_object, &retry_offset, &retry_prot,
+ &wired);
+
+ if (kr != KERN_SUCCESS) {
+ vm_object_lock(m->object);
+ RELEASE_PAGE(m);
+ UNLOCK_AND_DEALLOCATE;
+ goto done;
+ }
+
+ vm_object_unlock(retry_object);
+ vm_object_lock(m->object);
+
+ if ((retry_object != object) ||
+ (retry_offset != offset)) {
+ RELEASE_PAGE(m);
+ UNLOCK_AND_DEALLOCATE;
+ goto RetryFault;
+ }
+
+ /*
+ * Check whether the protection has changed or the object
+ * has been copied while we left the map unlocked.
+ */
+ prot &= retry_prot;
+ vm_object_unlock(m->object);
+ }
+ vm_object_lock(m->object);
+
+ /*
+ * If the copy object changed while the top-level object
+ * was unlocked, then we must take away write permission.
+ */
+
+ if (m->object->copy != old_copy_object)
+ prot &= ~VM_PROT_WRITE;
+
+ /*
+ * If we want to wire down this page, but no longer have
+ * adequate permissions, we must start all over.
+ */
+
+ if (wired && (prot != fault_type)) {
+ vm_map_verify_done(map, &version);
+ RELEASE_PAGE(m);
+ UNLOCK_AND_DEALLOCATE;
+ goto RetryFault;
+ }
+
+ /*
+ * It's critically important that a wired-down page be faulted
+ * only once in each map for which it is wired.
+ */
+
+ vm_object_unlock(m->object);
+
+ /*
+ * Put this page into the physical map.
+ * We had to do the unlock above because pmap_enter
+ * may cause other faults. The page may be on
+ * the pageout queues. If the pageout daemon comes
+ * across the page, it will remove it from the queues.
+ */
+
+ PMAP_ENTER(map->pmap, vaddr, m, prot, wired);
+
+ /*
+ * If the page is not wired down and isn't already
+ * on a pageout queue, then put it where the
+ * pageout daemon can find it.
+ */
+ vm_object_lock(m->object);
+ vm_page_lock_queues();
+ if (change_wiring) {
+ if (wired)
+ vm_page_wire(m);
+ else
+ vm_page_unwire(m);
+ } else if (software_reference_bits) {
+ if (!m->active && !m->inactive)
+ vm_page_activate(m);
+ m->reference = TRUE;
+ } else {
+ vm_page_activate(m);
+ }
+ vm_page_unlock_queues();
+
+ /*
+ * Unlock everything, and return
+ */
+
+ vm_map_verify_done(map, &version);
+ PAGE_WAKEUP_DONE(m);
+ kr = KERN_SUCCESS;
+ UNLOCK_AND_DEALLOCATE;
+
+#undef UNLOCK_AND_DEALLOCATE
+#undef RELEASE_PAGE
+
+ done:
+#ifdef CONTINUATIONS
+ if (continuation != (void (*)()) 0) {
+ register vm_fault_state_t *state =
+ (vm_fault_state_t *) current_thread()->ith_other;
+
+ zfree(vm_fault_state_zone, (vm_offset_t) state);
+ (*continuation)(kr);
+ /*NOTREACHED*/
+ }
+#endif /* CONTINUATIONS */
+
+ return(kr);
+}
+
+kern_return_t vm_fault_wire_fast();
+
+/*
+ * vm_fault_wire:
+ *
+ * Wire down a range of virtual addresses in a map.
+ */
+void vm_fault_wire(map, entry)
+ vm_map_t map;
+ vm_map_entry_t entry;
+{
+
+ register vm_offset_t va;
+ register pmap_t pmap;
+ register vm_offset_t end_addr = entry->vme_end;
+
+ pmap = vm_map_pmap(map);
+
+ /*
+ * Inform the physical mapping system that the
+ * range of addresses may not fault, so that
+ * page tables and such can be locked down as well.
+ */
+
+ pmap_pageable(pmap, entry->vme_start, end_addr, FALSE);
+
+ /*
+ * We simulate a fault to get the page and enter it
+ * in the physical map.
+ */
+
+ for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) {
+ if (vm_fault_wire_fast(map, va, entry) != KERN_SUCCESS)
+ (void) vm_fault(map, va, VM_PROT_NONE, TRUE,
+ FALSE, (void (*)()) 0);
+ }
+}
+
+/*
+ * vm_fault_unwire:
+ *
+ * Unwire a range of virtual addresses in a map.
+ */
+void vm_fault_unwire(map, entry)
+ vm_map_t map;
+ vm_map_entry_t entry;
+{
+ register vm_offset_t va;
+ register pmap_t pmap;
+ register vm_offset_t end_addr = entry->vme_end;
+ vm_object_t object;
+
+ pmap = vm_map_pmap(map);
+
+ object = (entry->is_sub_map)
+ ? VM_OBJECT_NULL : entry->object.vm_object;
+
+ /*
+ * Since the pages are wired down, we must be able to
+ * get their mappings from the physical map system.
+ */
+
+ for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) {
+ pmap_change_wiring(pmap, va, FALSE);
+
+ if (object == VM_OBJECT_NULL) {
+ vm_map_lock_set_recursive(map);
+ (void) vm_fault(map, va, VM_PROT_NONE, TRUE,
+ FALSE, (void (*)()) 0);
+ vm_map_lock_clear_recursive(map);
+ } else {
+ vm_prot_t prot;
+ vm_page_t result_page;
+ vm_page_t top_page;
+ vm_fault_return_t result;
+
+ do {
+ prot = VM_PROT_NONE;
+
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+ result = vm_fault_page(object,
+ entry->offset +
+ (va - entry->vme_start),
+ VM_PROT_NONE, TRUE,
+ FALSE, &prot,
+ &result_page,
+ &top_page,
+ FALSE, (void (*)()) 0);
+ } while (result == VM_FAULT_RETRY);
+
+ if (result != VM_FAULT_SUCCESS)
+ panic("vm_fault_unwire: failure");
+
+ vm_page_lock_queues();
+ vm_page_unwire(result_page);
+ vm_page_unlock_queues();
+ PAGE_WAKEUP_DONE(result_page);
+
+ vm_fault_cleanup(result_page->object, top_page);
+ }
+ }
+
+ /*
+ * Inform the physical mapping system that the range
+ * of addresses may fault, so that page tables and
+ * such may be unwired themselves.
+ */
+
+ pmap_pageable(pmap, entry->vme_start, end_addr, TRUE);
+}
+
+/*
+ * vm_fault_wire_fast:
+ *
+ * Handle common case of a wire down page fault at the given address.
+ * If successful, the page is inserted into the associated physical map.
+ * The map entry is passed in to avoid the overhead of a map lookup.
+ *
+ * NOTE: the given address should be truncated to the
+ * proper page address.
+ *
+ * KERN_SUCCESS is returned if the page fault is handled; otherwise,
+ * a standard error specifying why the fault is fatal is returned.
+ *
+ * The map in question must be referenced, and remains so.
+ * Caller has a read lock on the map.
+ *
+ * This is a stripped version of vm_fault() for wiring pages. Anything
+ * other than the common case will return KERN_FAILURE, and the caller
+ * is expected to call vm_fault().
+ */
+kern_return_t vm_fault_wire_fast(map, va, entry)
+ vm_map_t map;
+ vm_offset_t va;
+ vm_map_entry_t entry;
+{
+ vm_object_t object;
+ vm_offset_t offset;
+ register vm_page_t m;
+ vm_prot_t prot;
+
+ vm_stat.faults++; /* needs lock XXX */
+/*
+ * Recovery actions
+ */
+
+#undef RELEASE_PAGE
+#define RELEASE_PAGE(m) { \
+ PAGE_WAKEUP_DONE(m); \
+ vm_page_lock_queues(); \
+ vm_page_unwire(m); \
+ vm_page_unlock_queues(); \
+}
+
+
+#undef UNLOCK_THINGS
+#define UNLOCK_THINGS { \
+ object->paging_in_progress--; \
+ vm_object_unlock(object); \
+}
+
+#undef UNLOCK_AND_DEALLOCATE
+#define UNLOCK_AND_DEALLOCATE { \
+ UNLOCK_THINGS; \
+ vm_object_deallocate(object); \
+}
+/*
+ * Give up and have caller do things the hard way.
+ */
+
+#define GIVE_UP { \
+ UNLOCK_AND_DEALLOCATE; \
+ return(KERN_FAILURE); \
+}
+
+
+ /*
+ * If this entry is not directly to a vm_object, bail out.
+ */
+ if (entry->is_sub_map)
+ return(KERN_FAILURE);
+
+ /*
+ * Find the backing store object and offset into it.
+ */
+
+ object = entry->object.vm_object;
+ offset = (va - entry->vme_start) + entry->offset;
+ prot = entry->protection;
+
+ /*
+ * Make a reference to this object to prevent its
+ * disposal while we are messing with it.
+ */
+
+ vm_object_lock(object);
+ assert(object->ref_count > 0);
+ object->ref_count++;
+ object->paging_in_progress++;
+
+ /*
+ * INVARIANTS (through entire routine):
+ *
+ * 1) At all times, we must either have the object
+ * lock or a busy page in some object to prevent
+ * some other thread from trying to bring in
+ * the same page.
+ *
+ * 2) Once we have a busy page, we must remove it from
+ * the pageout queues, so that the pageout daemon
+ * will not grab it away.
+ *
+ */
+
+ /*
+ * Look for page in top-level object. If it's not there or
+ * there's something going on, give up.
+ */
+ m = vm_page_lookup(object, offset);
+ if ((m == VM_PAGE_NULL) || (m->error) ||
+ (m->busy) || (m->absent) || (prot & m->page_lock)) {
+ GIVE_UP;
+ }
+
+ /*
+ * Wire the page down now. All bail outs beyond this
+ * point must unwire the page.
+ */
+
+ vm_page_lock_queues();
+ vm_page_wire(m);
+ vm_page_unlock_queues();
+
+ /*
+ * Mark page busy for other threads.
+ */
+ assert(!m->busy);
+ m->busy = TRUE;
+ assert(!m->absent);
+
+ /*
+ * Give up if the page is being written and there's a copy object
+ */
+ if ((object->copy != VM_OBJECT_NULL) && (prot & VM_PROT_WRITE)) {
+ RELEASE_PAGE(m);
+ GIVE_UP;
+ }
+
+ /*
+ * Put this page into the physical map.
+ * We have to unlock the object because pmap_enter
+ * may cause other faults.
+ */
+ vm_object_unlock(object);
+
+ PMAP_ENTER(map->pmap, va, m, prot, TRUE);
+
+ /*
+ * Must relock object so that paging_in_progress can be cleared.
+ */
+ vm_object_lock(object);
+
+ /*
+ * Unlock everything, and return
+ */
+
+ PAGE_WAKEUP_DONE(m);
+ UNLOCK_AND_DEALLOCATE;
+
+ return(KERN_SUCCESS);
+
+}
+
+/*
+ * Routine: vm_fault_copy_cleanup
+ * Purpose:
+ * Release a page used by vm_fault_copy.
+ */
+
+void vm_fault_copy_cleanup(page, top_page)
+ vm_page_t page;
+ vm_page_t top_page;
+{
+ vm_object_t object = page->object;
+
+ vm_object_lock(object);
+ PAGE_WAKEUP_DONE(page);
+ vm_page_lock_queues();
+ if (!page->active && !page->inactive)
+ vm_page_activate(page);
+ vm_page_unlock_queues();
+ vm_fault_cleanup(object, top_page);
+}
+
+/*
+ * Routine: vm_fault_copy
+ *
+ * Purpose:
+ * Copy pages from one virtual memory object to another --
+ * neither the source nor destination pages need be resident.
+ *
+ * Before actually copying a page, the version associated with
+ * the destination address map wil be verified.
+ *
+ * In/out conditions:
+ * The caller must hold a reference, but not a lock, to
+ * each of the source and destination objects and to the
+ * destination map.
+ *
+ * Results:
+ * Returns KERN_SUCCESS if no errors were encountered in
+ * reading or writing the data. Returns KERN_INTERRUPTED if
+ * the operation was interrupted (only possible if the
+ * "interruptible" argument is asserted). Other return values
+ * indicate a permanent error in copying the data.
+ *
+ * The actual amount of data copied will be returned in the
+ * "copy_size" argument. In the event that the destination map
+ * verification failed, this amount may be less than the amount
+ * requested.
+ */
+kern_return_t vm_fault_copy(
+ src_object,
+ src_offset,
+ src_size,
+ dst_object,
+ dst_offset,
+ dst_map,
+ dst_version,
+ interruptible
+ )
+ vm_object_t src_object;
+ vm_offset_t src_offset;
+ vm_size_t *src_size; /* INOUT */
+ vm_object_t dst_object;
+ vm_offset_t dst_offset;
+ vm_map_t dst_map;
+ vm_map_version_t *dst_version;
+ boolean_t interruptible;
+{
+ vm_page_t result_page;
+ vm_prot_t prot;
+
+ vm_page_t src_page;
+ vm_page_t src_top_page;
+
+ vm_page_t dst_page;
+ vm_page_t dst_top_page;
+
+ vm_size_t amount_done;
+ vm_object_t old_copy_object;
+
+#define RETURN(x) \
+ MACRO_BEGIN \
+ *src_size = amount_done; \
+ MACRO_RETURN(x); \
+ MACRO_END
+
+ amount_done = 0;
+ do { /* while (amount_done != *src_size) */
+
+ RetrySourceFault: ;
+
+ if (src_object == VM_OBJECT_NULL) {
+ /*
+ * No source object. We will just
+ * zero-fill the page in dst_object.
+ */
+
+ src_page = VM_PAGE_NULL;
+ } else {
+ prot = VM_PROT_READ;
+
+ vm_object_lock(src_object);
+ vm_object_paging_begin(src_object);
+
+ switch (vm_fault_page(src_object, src_offset,
+ VM_PROT_READ, FALSE, interruptible,
+ &prot, &result_page, &src_top_page,
+ FALSE, (void (*)()) 0)) {
+
+ case VM_FAULT_SUCCESS:
+ break;
+ case VM_FAULT_RETRY:
+ goto RetrySourceFault;
+ case VM_FAULT_INTERRUPTED:
+ RETURN(MACH_SEND_INTERRUPTED);
+ case VM_FAULT_MEMORY_SHORTAGE:
+ VM_PAGE_WAIT((void (*)()) 0);
+ goto RetrySourceFault;
+ case VM_FAULT_FICTITIOUS_SHORTAGE:
+ vm_page_more_fictitious();
+ goto RetrySourceFault;
+ case VM_FAULT_MEMORY_ERROR:
+ return(KERN_MEMORY_ERROR);
+ }
+
+ src_page = result_page;
+
+ assert((src_top_page == VM_PAGE_NULL) ==
+ (src_page->object == src_object));
+
+ assert ((prot & VM_PROT_READ) != VM_PROT_NONE);
+
+ vm_object_unlock(src_page->object);
+ }
+
+ RetryDestinationFault: ;
+
+ prot = VM_PROT_WRITE;
+
+ vm_object_lock(dst_object);
+ vm_object_paging_begin(dst_object);
+
+ switch (vm_fault_page(dst_object, dst_offset, VM_PROT_WRITE,
+ FALSE, FALSE /* interruptible */,
+ &prot, &result_page, &dst_top_page,
+ FALSE, (void (*)()) 0)) {
+
+ case VM_FAULT_SUCCESS:
+ break;
+ case VM_FAULT_RETRY:
+ goto RetryDestinationFault;
+ case VM_FAULT_INTERRUPTED:
+ if (src_page != VM_PAGE_NULL)
+ vm_fault_copy_cleanup(src_page,
+ src_top_page);
+ RETURN(MACH_SEND_INTERRUPTED);
+ case VM_FAULT_MEMORY_SHORTAGE:
+ VM_PAGE_WAIT((void (*)()) 0);
+ goto RetryDestinationFault;
+ case VM_FAULT_FICTITIOUS_SHORTAGE:
+ vm_page_more_fictitious();
+ goto RetryDestinationFault;
+ case VM_FAULT_MEMORY_ERROR:
+ if (src_page != VM_PAGE_NULL)
+ vm_fault_copy_cleanup(src_page,
+ src_top_page);
+ return(KERN_MEMORY_ERROR);
+ }
+ assert ((prot & VM_PROT_WRITE) != VM_PROT_NONE);
+
+ dst_page = result_page;
+
+ old_copy_object = dst_page->object->copy;
+
+ vm_object_unlock(dst_page->object);
+
+ if (!vm_map_verify(dst_map, dst_version)) {
+
+ BailOut: ;
+
+ if (src_page != VM_PAGE_NULL)
+ vm_fault_copy_cleanup(src_page, src_top_page);
+ vm_fault_copy_cleanup(dst_page, dst_top_page);
+ break;
+ }
+
+
+ vm_object_lock(dst_page->object);
+ if (dst_page->object->copy != old_copy_object) {
+ vm_object_unlock(dst_page->object);
+ vm_map_verify_done(dst_map, dst_version);
+ goto BailOut;
+ }
+ vm_object_unlock(dst_page->object);
+
+ /*
+ * Copy the page, and note that it is dirty
+ * immediately.
+ */
+
+ if (src_page == VM_PAGE_NULL)
+ vm_page_zero_fill(dst_page);
+ else
+ vm_page_copy(src_page, dst_page);
+ dst_page->dirty = TRUE;
+
+ /*
+ * Unlock everything, and return
+ */
+
+ vm_map_verify_done(dst_map, dst_version);
+
+ if (src_page != VM_PAGE_NULL)
+ vm_fault_copy_cleanup(src_page, src_top_page);
+ vm_fault_copy_cleanup(dst_page, dst_top_page);
+
+ amount_done += PAGE_SIZE;
+ src_offset += PAGE_SIZE;
+ dst_offset += PAGE_SIZE;
+
+ } while (amount_done != *src_size);
+
+ RETURN(KERN_SUCCESS);
+#undef RETURN
+
+ /*NOTREACHED*/
+}
+
+
+
+
+
+#ifdef notdef
+
+/*
+ * Routine: vm_fault_page_overwrite
+ *
+ * Description:
+ * A form of vm_fault_page that assumes that the
+ * resulting page will be overwritten in its entirety,
+ * making it unnecessary to obtain the correct *contents*
+ * of the page.
+ *
+ * Implementation:
+ * XXX Untested. Also unused. Eventually, this technology
+ * could be used in vm_fault_copy() to advantage.
+ */
+vm_fault_return_t vm_fault_page_overwrite(dst_object, dst_offset, result_page)
+ register
+ vm_object_t dst_object;
+ vm_offset_t dst_offset;
+ vm_page_t *result_page; /* OUT */
+{
+ register
+ vm_page_t dst_page;
+
+#define interruptible FALSE /* XXX */
+
+ while (TRUE) {
+ /*
+ * Look for a page at this offset
+ */
+
+ while ((dst_page = vm_page_lookup(dst_object, dst_offset))
+ == VM_PAGE_NULL) {
+ /*
+ * No page, no problem... just allocate one.
+ */
+
+ dst_page = vm_page_alloc(dst_object, dst_offset);
+ if (dst_page == VM_PAGE_NULL) {
+ vm_object_unlock(dst_object);
+ VM_PAGE_WAIT((void (*)()) 0);
+ vm_object_lock(dst_object);
+ continue;
+ }
+
+ /*
+ * Pretend that the memory manager
+ * write-protected the page.
+ *
+ * Note that we will be asking for write
+ * permission without asking for the data
+ * first.
+ */
+
+ dst_page->overwriting = TRUE;
+ dst_page->page_lock = VM_PROT_WRITE;
+ dst_page->absent = TRUE;
+ dst_object->absent_count++;
+
+ break;
+
+ /*
+ * When we bail out, we might have to throw
+ * away the page created here.
+ */
+
+#define DISCARD_PAGE \
+ MACRO_BEGIN \
+ vm_object_lock(dst_object); \
+ dst_page = vm_page_lookup(dst_object, dst_offset); \
+ if ((dst_page != VM_PAGE_NULL) && dst_page->overwriting) \
+ VM_PAGE_FREE(dst_page); \
+ vm_object_unlock(dst_object); \
+ MACRO_END
+ }
+
+ /*
+ * If the page is write-protected...
+ */
+
+ if (dst_page->page_lock & VM_PROT_WRITE) {
+ /*
+ * ... and an unlock request hasn't been sent
+ */
+
+ if ( ! (dst_page->unlock_request & VM_PROT_WRITE)) {
+ vm_prot_t u;
+ kern_return_t rc;
+
+ /*
+ * ... then send one now.
+ */
+
+ if (!dst_object->pager_ready) {
+ vm_object_assert_wait(dst_object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ interruptible);
+ vm_object_unlock(dst_object);
+ thread_block((void (*)()) 0);
+ if (current_thread()->wait_result !=
+ THREAD_AWAKENED) {
+ DISCARD_PAGE;
+ return(VM_FAULT_INTERRUPTED);
+ }
+ continue;
+ }
+
+ u = dst_page->unlock_request |= VM_PROT_WRITE;
+ vm_object_unlock(dst_object);
+
+ if ((rc = memory_object_data_unlock(
+ dst_object->pager,
+ dst_object->pager_request,
+ dst_offset + dst_object->paging_offset,
+ PAGE_SIZE,
+ u)) != KERN_SUCCESS) {
+ printf("vm_object_overwrite: memory_object_data_unlock failed\n");
+ DISCARD_PAGE;
+ return((rc == MACH_SEND_INTERRUPTED) ?
+ VM_FAULT_INTERRUPTED :
+ VM_FAULT_MEMORY_ERROR);
+ }
+ vm_object_lock(dst_object);
+ continue;
+ }
+
+ /* ... fall through to wait below */
+ } else {
+ /*
+ * If the page isn't being used for other
+ * purposes, then we're done.
+ */
+ if ( ! (dst_page->busy || dst_page->absent || dst_page->error) )
+ break;
+ }
+
+ PAGE_ASSERT_WAIT(dst_page, interruptible);
+ vm_object_unlock(dst_object);
+ thread_block((void (*)()) 0);
+ if (current_thread()->wait_result != THREAD_AWAKENED) {
+ DISCARD_PAGE;
+ return(VM_FAULT_INTERRUPTED);
+ }
+ }
+
+ *result_page = dst_page;
+ return(VM_FAULT_SUCCESS);
+
+#undef interruptible
+#undef DISCARD_PAGE
+}
+
+#endif notdef
diff --git a/vm/vm_fault.h b/vm/vm_fault.h
new file mode 100644
index 00000000..eee39994
--- /dev/null
+++ b/vm/vm_fault.h
@@ -0,0 +1,64 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_fault.h
+ *
+ * Page fault handling module declarations.
+ */
+
+#ifndef _VM_VM_FAULT_H_
+#define _VM_VM_FAULT_H_
+
+#include <mach/kern_return.h>
+
+/*
+ * Page fault handling based on vm_object only.
+ */
+
+typedef kern_return_t vm_fault_return_t;
+#define VM_FAULT_SUCCESS 0
+#define VM_FAULT_RETRY 1
+#define VM_FAULT_INTERRUPTED 2
+#define VM_FAULT_MEMORY_SHORTAGE 3
+#define VM_FAULT_FICTITIOUS_SHORTAGE 4
+#define VM_FAULT_MEMORY_ERROR 5
+
+extern void vm_fault_init();
+extern vm_fault_return_t vm_fault_page();
+
+extern void vm_fault_cleanup();
+/*
+ * Page fault handling based on vm_map (or entries therein)
+ */
+
+extern kern_return_t vm_fault();
+extern void vm_fault_wire();
+extern void vm_fault_unwire();
+
+extern kern_return_t vm_fault_copy(); /* Copy pages from
+ * one object to another
+ */
+#endif _VM_VM_FAULT_H_
diff --git a/vm/vm_init.c b/vm/vm_init.c
new file mode 100644
index 00000000..b76b11b6
--- /dev/null
+++ b/vm/vm_init.c
@@ -0,0 +1,84 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_init.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Initialize the Virtual Memory subsystem.
+ */
+
+#include <mach/machine/vm_types.h>
+#include <kern/zalloc.h>
+#include <kern/kalloc.h>
+#include <vm/vm_object.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+#include <vm/vm_kern.h>
+#include <vm/memory_object.h>
+
+
+
+/*
+ * vm_mem_bootstrap initializes the virtual memory system.
+ * This is done only by the first cpu up.
+ */
+
+void vm_mem_bootstrap()
+{
+ vm_offset_t start, end;
+
+ /*
+ * Initializes resident memory structures.
+ * From here on, all physical memory is accounted for,
+ * and we use only virtual addresses.
+ */
+
+ vm_page_bootstrap(&start, &end);
+
+ /*
+ * Initialize other VM packages
+ */
+
+ zone_bootstrap();
+ vm_object_bootstrap();
+ vm_map_init();
+ kmem_init(start, end);
+ pmap_init();
+ zone_init();
+ kalloc_init();
+ vm_fault_init();
+ vm_page_module_init();
+ memory_manager_default_init();
+}
+
+void vm_mem_init()
+{
+ vm_object_init();
+}
diff --git a/vm/vm_kern.c b/vm/vm_kern.c
new file mode 100644
index 00000000..eb1e0795
--- /dev/null
+++ b/vm/vm_kern.c
@@ -0,0 +1,1072 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_kern.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Kernel memory management.
+ */
+
+#include <mach/kern_return.h>
+#include "vm_param.h"
+#include <kern/assert.h>
+#include <kern/lock.h>
+#include <kern/thread.h>
+#include <vm/vm_fault.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+
+
+
+/*
+ * Variables exported by this module.
+ */
+
+vm_map_t kernel_map;
+vm_map_t kernel_pageable_map;
+
+extern void kmem_alloc_pages();
+extern void kmem_remap_pages();
+
+/*
+ * projected_buffer_allocate
+ *
+ * Allocate a wired-down buffer shared between kernel and user task.
+ * Fresh, zero-filled memory is allocated.
+ * If persistence is false, this buffer can only be deallocated from
+ * user task using projected_buffer_deallocate, and deallocation
+ * from user task also deallocates the buffer from the kernel map.
+ * projected_buffer_collect is called from vm_map_deallocate to
+ * automatically deallocate projected buffers on task_deallocate.
+ * Sharing with more than one user task is achieved by using
+ * projected_buffer_map for the second and subsequent tasks.
+ * The user is precluded from manipulating the VM entry of this buffer
+ * (i.e. changing protection, inheritance or machine attributes).
+ */
+
+kern_return_t
+projected_buffer_allocate(map, size, persistence, kernel_p,
+ user_p, protection, inheritance)
+ vm_map_t map;
+ vm_size_t size;
+ int persistence;
+ vm_offset_t *kernel_p;
+ vm_offset_t *user_p;
+ vm_prot_t protection;
+ vm_inherit_t inheritance; /*Currently only VM_INHERIT_NONE supported*/
+{
+ vm_object_t object;
+ vm_map_entry_t u_entry, k_entry;
+ vm_offset_t addr;
+ vm_size_t r_size;
+ kern_return_t kr;
+
+ if (map == VM_MAP_NULL || map == kernel_map)
+ return(KERN_INVALID_ARGUMENT);
+
+ /*
+ * Allocate a new object.
+ */
+
+ size = round_page(size);
+ object = vm_object_allocate(size);
+
+ vm_map_lock(kernel_map);
+ kr = vm_map_find_entry(kernel_map, &addr, size, (vm_offset_t) 0,
+ VM_OBJECT_NULL, &k_entry);
+ if (kr != KERN_SUCCESS) {
+ vm_map_unlock(kernel_map);
+ vm_object_deallocate(object);
+ return kr;
+ }
+
+ k_entry->object.vm_object = object;
+ if (!persistence)
+ k_entry->projected_on = (vm_map_entry_t) -1;
+ /*Mark entry so as to automatically deallocate it when
+ last corresponding user entry is deallocated*/
+ vm_map_unlock(kernel_map);
+ *kernel_p = addr;
+
+ vm_map_lock(map);
+ kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0,
+ VM_OBJECT_NULL, &u_entry);
+ if (kr != KERN_SUCCESS) {
+ vm_map_unlock(map);
+ vm_map_lock(kernel_map);
+ vm_map_entry_delete(kernel_map, k_entry);
+ vm_map_unlock(kernel_map);
+ vm_object_deallocate(object);
+ return kr;
+ }
+
+ u_entry->object.vm_object = object;
+ vm_object_reference(object);
+ u_entry->projected_on = k_entry;
+ /*Creates coupling with kernel mapping of the buffer, and
+ also guarantees that user cannot directly manipulate
+ buffer VM entry*/
+ u_entry->protection = protection;
+ u_entry->max_protection = protection;
+ u_entry->inheritance = inheritance;
+ vm_map_unlock(map);
+ *user_p = addr;
+
+ /*
+ * Allocate wired-down memory in the object,
+ * and enter it in the kernel pmap.
+ */
+ kmem_alloc_pages(object, 0,
+ *kernel_p, *kernel_p + size,
+ VM_PROT_READ | VM_PROT_WRITE);
+ bzero(*kernel_p, size); /*Zero fill*/
+
+ /* Set up physical mappings for user pmap */
+
+ pmap_pageable(map->pmap, *user_p, *user_p + size, FALSE);
+ for (r_size = 0; r_size < size; r_size += PAGE_SIZE) {
+ addr = pmap_extract(kernel_pmap, *kernel_p + r_size);
+ pmap_enter(map->pmap, *user_p + r_size, addr,
+ protection, TRUE);
+ }
+
+ return(KERN_SUCCESS);
+}
+
+
+/*
+ * projected_buffer_map
+ *
+ * Map an area of kernel memory onto a task's address space.
+ * No new memory is allocated; the area must previously exist in the
+ * kernel memory map.
+ */
+
+kern_return_t
+projected_buffer_map(map, kernel_addr, size, user_p, protection, inheritance)
+ vm_map_t map;
+ vm_offset_t kernel_addr;
+ vm_size_t size;
+ vm_offset_t *user_p;
+ vm_prot_t protection;
+ vm_inherit_t inheritance; /*Currently only VM_INHERIT_NONE supported*/
+{
+ vm_object_t object;
+ vm_map_entry_t u_entry, k_entry;
+ vm_offset_t physical_addr, user_addr;
+ vm_size_t r_size;
+ kern_return_t kr;
+
+ /*
+ * Find entry in kernel map
+ */
+
+ size = round_page(size);
+ if (map == VM_MAP_NULL || map == kernel_map ||
+ !vm_map_lookup_entry(kernel_map, kernel_addr, &k_entry) ||
+ kernel_addr + size > k_entry->vme_end)
+ return(KERN_INVALID_ARGUMENT);
+
+
+ /*
+ * Create entry in user task
+ */
+
+ vm_map_lock(map);
+ kr = vm_map_find_entry(map, &user_addr, size, (vm_offset_t) 0,
+ VM_OBJECT_NULL, &u_entry);
+ if (kr != KERN_SUCCESS) {
+ vm_map_unlock(map);
+ return kr;
+ }
+
+ u_entry->object.vm_object = k_entry->object.vm_object;
+ vm_object_reference(k_entry->object.vm_object);
+ u_entry->offset = kernel_addr - k_entry->vme_start + k_entry->offset;
+ u_entry->projected_on = k_entry;
+ /*Creates coupling with kernel mapping of the buffer, and
+ also guarantees that user cannot directly manipulate
+ buffer VM entry*/
+ u_entry->protection = protection;
+ u_entry->max_protection = protection;
+ u_entry->inheritance = inheritance;
+ u_entry->wired_count = k_entry->wired_count;
+ vm_map_unlock(map);
+ *user_p = user_addr;
+
+ /* Set up physical mappings for user pmap */
+
+ pmap_pageable(map->pmap, user_addr, user_addr + size,
+ !k_entry->wired_count);
+ for (r_size = 0; r_size < size; r_size += PAGE_SIZE) {
+ physical_addr = pmap_extract(kernel_pmap, kernel_addr + r_size);
+ pmap_enter(map->pmap, user_addr + r_size, physical_addr,
+ protection, k_entry->wired_count);
+ }
+
+ return(KERN_SUCCESS);
+}
+
+
+/*
+ * projected_buffer_deallocate
+ *
+ * Unmap projected buffer from task's address space.
+ * May also unmap buffer from kernel map, if buffer is not
+ * persistent and only the kernel reference remains.
+ */
+
+kern_return_t
+projected_buffer_deallocate(map, start, end)
+ vm_map_t map;
+ vm_offset_t start, end;
+{
+ vm_map_entry_t entry, k_entry;
+
+ vm_map_lock(map);
+ if (map == VM_MAP_NULL || map == kernel_map ||
+ !vm_map_lookup_entry(map, start, &entry) ||
+ end > entry->vme_end ||
+ /*Check corresponding kernel entry*/
+ (k_entry = entry->projected_on) == 0) {
+ vm_map_unlock(map);
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ /*Prepare for deallocation*/
+ if (entry->vme_start < start)
+ _vm_map_clip_start(map, entry, start);
+ if (entry->vme_end > end)
+ _vm_map_clip_end(map, entry, end);
+ if (map->first_free == entry) /*Adjust first_free hint*/
+ map->first_free = entry->vme_prev;
+ entry->projected_on = 0; /*Needed to allow deletion*/
+ entry->wired_count = 0; /*Avoid unwire fault*/
+ vm_map_entry_delete(map, entry);
+ vm_map_unlock(map);
+
+ /*Check if the buffer is not persistent and only the
+ kernel mapping remains, and if so delete it*/
+ vm_map_lock(kernel_map);
+ if (k_entry->projected_on == (vm_map_entry_t) -1 &&
+ k_entry->object.vm_object->ref_count == 1) {
+ if (kernel_map->first_free == k_entry)
+ kernel_map->first_free = k_entry->vme_prev;
+ k_entry->projected_on = 0; /*Allow unwire fault*/
+ vm_map_entry_delete(kernel_map, k_entry);
+ }
+ vm_map_unlock(kernel_map);
+ return(KERN_SUCCESS);
+}
+
+
+/*
+ * projected_buffer_collect
+ *
+ * Unmap all projected buffers from task's address space.
+ */
+
+kern_return_t
+projected_buffer_collect(map)
+ vm_map_t map;
+{
+ vm_map_entry_t entry, next;
+
+ if (map == VM_MAP_NULL || map == kernel_map)
+ return(KERN_INVALID_ARGUMENT);
+
+ for (entry = vm_map_first_entry(map);
+ entry != vm_map_to_entry(map);
+ entry = next) {
+ next = entry->vme_next;
+ if (entry->projected_on != 0)
+ projected_buffer_deallocate(map, entry->vme_start, entry->vme_end);
+ }
+ return(KERN_SUCCESS);
+}
+
+
+/*
+ * projected_buffer_in_range
+ *
+ * Verifies whether a projected buffer exists in the address range
+ * given.
+ */
+
+boolean_t
+projected_buffer_in_range(map, start, end)
+ vm_map_t map;
+ vm_offset_t start, end;
+{
+ vm_map_entry_t entry;
+
+ if (map == VM_MAP_NULL || map == kernel_map)
+ return(FALSE);
+
+ /*Find first entry*/
+ if (!vm_map_lookup_entry(map, start, &entry))
+ entry = entry->vme_next;
+
+ while (entry != vm_map_to_entry(map) && entry->projected_on == 0 &&
+ entry->vme_start <= end) {
+ entry = entry->vme_next;
+ }
+ return(entry != vm_map_to_entry(map) && entry->vme_start <= end);
+}
+
+
+/*
+ * kmem_alloc:
+ *
+ * Allocate wired-down memory in the kernel's address map
+ * or a submap. The memory is not zero-filled.
+ */
+
+kern_return_t
+kmem_alloc(map, addrp, size)
+ vm_map_t map;
+ vm_offset_t *addrp;
+ vm_size_t size;
+{
+ vm_object_t object;
+ vm_map_entry_t entry;
+ vm_offset_t addr;
+ kern_return_t kr;
+
+ /*
+ * Allocate a new object. We must do this before locking
+ * the map, lest we risk deadlock with the default pager:
+ * device_read_alloc uses kmem_alloc,
+ * which tries to allocate an object,
+ * which uses kmem_alloc_wired to get memory,
+ * which blocks for pages.
+ * then the default pager needs to read a block
+ * to process a memory_object_data_write,
+ * and device_read_alloc calls kmem_alloc
+ * and deadlocks on the map lock.
+ */
+
+ size = round_page(size);
+ object = vm_object_allocate(size);
+
+ vm_map_lock(map);
+ kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0,
+ VM_OBJECT_NULL, &entry);
+ if (kr != KERN_SUCCESS) {
+ vm_map_unlock(map);
+ vm_object_deallocate(object);
+ return kr;
+ }
+
+ entry->object.vm_object = object;
+ entry->offset = 0;
+
+ /*
+ * Since we have not given out this address yet,
+ * it is safe to unlock the map.
+ */
+ vm_map_unlock(map);
+
+ /*
+ * Allocate wired-down memory in the kernel_object,
+ * for this entry, and enter it in the kernel pmap.
+ */
+ kmem_alloc_pages(object, 0,
+ addr, addr + size,
+ VM_PROT_DEFAULT);
+
+ /*
+ * Return the memory, not zeroed.
+ */
+ *addrp = addr;
+ return KERN_SUCCESS;
+}
+
+/*
+ * kmem_realloc:
+ *
+ * Reallocate wired-down memory in the kernel's address map
+ * or a submap. Newly allocated pages are not zeroed.
+ * This can only be used on regions allocated with kmem_alloc.
+ *
+ * If successful, the pages in the old region are mapped twice.
+ * The old region is unchanged. Use kmem_free to get rid of it.
+ */
+kern_return_t kmem_realloc(map, oldaddr, oldsize, newaddrp, newsize)
+ vm_map_t map;
+ vm_offset_t oldaddr;
+ vm_size_t oldsize;
+ vm_offset_t *newaddrp;
+ vm_size_t newsize;
+{
+ vm_offset_t oldmin, oldmax;
+ vm_offset_t newaddr;
+ vm_object_t object;
+ vm_map_entry_t oldentry, newentry;
+ kern_return_t kr;
+
+ oldmin = trunc_page(oldaddr);
+ oldmax = round_page(oldaddr + oldsize);
+ oldsize = oldmax - oldmin;
+ newsize = round_page(newsize);
+
+ /*
+ * Find space for the new region.
+ */
+
+ vm_map_lock(map);
+ kr = vm_map_find_entry(map, &newaddr, newsize, (vm_offset_t) 0,
+ VM_OBJECT_NULL, &newentry);
+ if (kr != KERN_SUCCESS) {
+ vm_map_unlock(map);
+ return kr;
+ }
+
+ /*
+ * Find the VM object backing the old region.
+ */
+
+ if (!vm_map_lookup_entry(map, oldmin, &oldentry))
+ panic("kmem_realloc");
+ object = oldentry->object.vm_object;
+
+ /*
+ * Increase the size of the object and
+ * fill in the new region.
+ */
+
+ vm_object_reference(object);
+ vm_object_lock(object);
+ if (object->size != oldsize)
+ panic("kmem_realloc");
+ object->size = newsize;
+ vm_object_unlock(object);
+
+ newentry->object.vm_object = object;
+ newentry->offset = 0;
+
+ /*
+ * Since we have not given out this address yet,
+ * it is safe to unlock the map. We are trusting
+ * that nobody will play with either region.
+ */
+
+ vm_map_unlock(map);
+
+ /*
+ * Remap the pages in the old region and
+ * allocate more pages for the new region.
+ */
+
+ kmem_remap_pages(object, 0,
+ newaddr, newaddr + oldsize,
+ VM_PROT_DEFAULT);
+ kmem_alloc_pages(object, oldsize,
+ newaddr + oldsize, newaddr + newsize,
+ VM_PROT_DEFAULT);
+
+ *newaddrp = newaddr;
+ return KERN_SUCCESS;
+}
+
+/*
+ * kmem_alloc_wired:
+ *
+ * Allocate wired-down memory in the kernel's address map
+ * or a submap. The memory is not zero-filled.
+ *
+ * The memory is allocated in the kernel_object.
+ * It may not be copied with vm_map_copy, and
+ * it may not be reallocated with kmem_realloc.
+ */
+
+kern_return_t
+kmem_alloc_wired(map, addrp, size)
+ vm_map_t map;
+ vm_offset_t *addrp;
+ vm_size_t size;
+{
+ vm_map_entry_t entry;
+ vm_offset_t offset;
+ vm_offset_t addr;
+ kern_return_t kr;
+
+ /*
+ * Use the kernel object for wired-down kernel pages.
+ * Assume that no region of the kernel object is
+ * referenced more than once. We want vm_map_find_entry
+ * to extend an existing entry if possible.
+ */
+
+ size = round_page(size);
+ vm_map_lock(map);
+ kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0,
+ kernel_object, &entry);
+ if (kr != KERN_SUCCESS) {
+ vm_map_unlock(map);
+ return kr;
+ }
+
+ /*
+ * Since we didn't know where the new region would
+ * start, we couldn't supply the correct offset into
+ * the kernel object. We only initialize the entry
+ * if we aren't extending an existing entry.
+ */
+
+ offset = addr - VM_MIN_KERNEL_ADDRESS;
+
+ if (entry->object.vm_object == VM_OBJECT_NULL) {
+ vm_object_reference(kernel_object);
+
+ entry->object.vm_object = kernel_object;
+ entry->offset = offset;
+ }
+
+ /*
+ * Since we have not given out this address yet,
+ * it is safe to unlock the map.
+ */
+ vm_map_unlock(map);
+
+ /*
+ * Allocate wired-down memory in the kernel_object,
+ * for this entry, and enter it in the kernel pmap.
+ */
+ kmem_alloc_pages(kernel_object, offset,
+ addr, addr + size,
+ VM_PROT_DEFAULT);
+
+ /*
+ * Return the memory, not zeroed.
+ */
+ *addrp = addr;
+ return KERN_SUCCESS;
+}
+
+/*
+ * kmem_alloc_aligned:
+ *
+ * Like kmem_alloc_wired, except that the memory is aligned.
+ * The size should be a power-of-2.
+ */
+
+kern_return_t
+kmem_alloc_aligned(map, addrp, size)
+ vm_map_t map;
+ vm_offset_t *addrp;
+ vm_size_t size;
+{
+ vm_map_entry_t entry;
+ vm_offset_t offset;
+ vm_offset_t addr;
+ kern_return_t kr;
+
+ if ((size & (size - 1)) != 0)
+ panic("kmem_alloc_aligned");
+
+ /*
+ * Use the kernel object for wired-down kernel pages.
+ * Assume that no region of the kernel object is
+ * referenced more than once. We want vm_map_find_entry
+ * to extend an existing entry if possible.
+ */
+
+ size = round_page(size);
+ vm_map_lock(map);
+ kr = vm_map_find_entry(map, &addr, size, size - 1,
+ kernel_object, &entry);
+ if (kr != KERN_SUCCESS) {
+ vm_map_unlock(map);
+ return kr;
+ }
+
+ /*
+ * Since we didn't know where the new region would
+ * start, we couldn't supply the correct offset into
+ * the kernel object. We only initialize the entry
+ * if we aren't extending an existing entry.
+ */
+
+ offset = addr - VM_MIN_KERNEL_ADDRESS;
+
+ if (entry->object.vm_object == VM_OBJECT_NULL) {
+ vm_object_reference(kernel_object);
+
+ entry->object.vm_object = kernel_object;
+ entry->offset = offset;
+ }
+
+ /*
+ * Since we have not given out this address yet,
+ * it is safe to unlock the map.
+ */
+ vm_map_unlock(map);
+
+ /*
+ * Allocate wired-down memory in the kernel_object,
+ * for this entry, and enter it in the kernel pmap.
+ */
+ kmem_alloc_pages(kernel_object, offset,
+ addr, addr + size,
+ VM_PROT_DEFAULT);
+
+ /*
+ * Return the memory, not zeroed.
+ */
+ *addrp = addr;
+ return KERN_SUCCESS;
+}
+
+/*
+ * kmem_alloc_pageable:
+ *
+ * Allocate pageable memory in the kernel's address map.
+ */
+
+kern_return_t
+kmem_alloc_pageable(map, addrp, size)
+ vm_map_t map;
+ vm_offset_t *addrp;
+ vm_size_t size;
+{
+ vm_offset_t addr;
+ kern_return_t kr;
+
+ addr = vm_map_min(map);
+ kr = vm_map_enter(map, &addr, round_page(size),
+ (vm_offset_t) 0, TRUE,
+ VM_OBJECT_NULL, (vm_offset_t) 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ *addrp = addr;
+ return KERN_SUCCESS;
+}
+
+/*
+ * kmem_free:
+ *
+ * Release a region of kernel virtual memory allocated
+ * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable,
+ * and return the physical pages associated with that region.
+ */
+
+void
+kmem_free(map, addr, size)
+ vm_map_t map;
+ vm_offset_t addr;
+ vm_size_t size;
+{
+ kern_return_t kr;
+
+ kr = vm_map_remove(map, trunc_page(addr), round_page(addr + size));
+ if (kr != KERN_SUCCESS)
+ panic("kmem_free");
+}
+
+/*
+ * Allocate new wired pages in an object.
+ * The object is assumed to be mapped into the kernel map or
+ * a submap.
+ */
+void
+kmem_alloc_pages(object, offset, start, end, protection)
+ register vm_object_t object;
+ register vm_offset_t offset;
+ register vm_offset_t start, end;
+ vm_prot_t protection;
+{
+ /*
+ * Mark the pmap region as not pageable.
+ */
+ pmap_pageable(kernel_pmap, start, end, FALSE);
+
+ while (start < end) {
+ register vm_page_t mem;
+
+ vm_object_lock(object);
+
+ /*
+ * Allocate a page
+ */
+ while ((mem = vm_page_alloc(object, offset))
+ == VM_PAGE_NULL) {
+ vm_object_unlock(object);
+ VM_PAGE_WAIT((void (*)()) 0);
+ vm_object_lock(object);
+ }
+
+ /*
+ * Wire it down
+ */
+ vm_page_lock_queues();
+ vm_page_wire(mem);
+ vm_page_unlock_queues();
+ vm_object_unlock(object);
+
+ /*
+ * Enter it in the kernel pmap
+ */
+ PMAP_ENTER(kernel_pmap, start, mem,
+ protection, TRUE);
+
+ vm_object_lock(object);
+ PAGE_WAKEUP_DONE(mem);
+ vm_object_unlock(object);
+
+ start += PAGE_SIZE;
+ offset += PAGE_SIZE;
+ }
+}
+
+/*
+ * Remap wired pages in an object into a new region.
+ * The object is assumed to be mapped into the kernel map or
+ * a submap.
+ */
+void
+kmem_remap_pages(object, offset, start, end, protection)
+ register vm_object_t object;
+ register vm_offset_t offset;
+ register vm_offset_t start, end;
+ vm_prot_t protection;
+{
+ /*
+ * Mark the pmap region as not pageable.
+ */
+ pmap_pageable(kernel_pmap, start, end, FALSE);
+
+ while (start < end) {
+ register vm_page_t mem;
+
+ vm_object_lock(object);
+
+ /*
+ * Find a page
+ */
+ if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
+ panic("kmem_remap_pages");
+
+ /*
+ * Wire it down (again)
+ */
+ vm_page_lock_queues();
+ vm_page_wire(mem);
+ vm_page_unlock_queues();
+ vm_object_unlock(object);
+
+ /*
+ * Enter it in the kernel pmap. The page isn't busy,
+ * but this shouldn't be a problem because it is wired.
+ */
+ PMAP_ENTER(kernel_pmap, start, mem,
+ protection, TRUE);
+
+ start += PAGE_SIZE;
+ offset += PAGE_SIZE;
+ }
+}
+
+/*
+ * kmem_suballoc:
+ *
+ * Allocates a map to manage a subrange
+ * of the kernel virtual address space.
+ *
+ * Arguments are as follows:
+ *
+ * parent Map to take range from
+ * size Size of range to find
+ * min, max Returned endpoints of map
+ * pageable Can the region be paged
+ */
+
+vm_map_t
+kmem_suballoc(parent, min, max, size, pageable)
+ vm_map_t parent;
+ vm_offset_t *min, *max;
+ vm_size_t size;
+ boolean_t pageable;
+{
+ vm_map_t map;
+ vm_offset_t addr;
+ kern_return_t kr;
+
+ size = round_page(size);
+
+ /*
+ * Need reference on submap object because it is internal
+ * to the vm_system. vm_object_enter will never be called
+ * on it (usual source of reference for vm_map_enter).
+ */
+ vm_object_reference(vm_submap_object);
+
+ addr = (vm_offset_t) vm_map_min(parent);
+ kr = vm_map_enter(parent, &addr, size,
+ (vm_offset_t) 0, TRUE,
+ vm_submap_object, (vm_offset_t) 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
+ if (kr != KERN_SUCCESS)
+ panic("kmem_suballoc");
+
+ pmap_reference(vm_map_pmap(parent));
+ map = vm_map_create(vm_map_pmap(parent), addr, addr + size, pageable);
+ if (map == VM_MAP_NULL)
+ panic("kmem_suballoc");
+
+ kr = vm_map_submap(parent, addr, addr + size, map);
+ if (kr != KERN_SUCCESS)
+ panic("kmem_suballoc");
+
+ *min = addr;
+ *max = addr + size;
+ return map;
+}
+
+/*
+ * kmem_init:
+ *
+ * Initialize the kernel's virtual memory map, taking
+ * into account all memory allocated up to this time.
+ */
+void kmem_init(start, end)
+ vm_offset_t start;
+ vm_offset_t end;
+{
+ kernel_map = vm_map_create(pmap_kernel(),
+ VM_MIN_KERNEL_ADDRESS, end,
+ FALSE);
+
+ /*
+ * Reserve virtual memory allocated up to this time.
+ */
+
+ if (start != VM_MIN_KERNEL_ADDRESS) {
+ kern_return_t rc;
+ vm_offset_t addr = VM_MIN_KERNEL_ADDRESS;
+ rc = vm_map_enter(kernel_map,
+ &addr, start - VM_MIN_KERNEL_ADDRESS,
+ (vm_offset_t) 0, TRUE,
+ VM_OBJECT_NULL, (vm_offset_t) 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+ if (rc)
+ panic("%s:%d: vm_map_enter failed (%d)\n", rc);
+ }
+}
+
+/*
+ * New and improved IO wiring support.
+ */
+
+/*
+ * kmem_io_map_copyout:
+ *
+ * Establish temporary mapping in designated map for the memory
+ * passed in. Memory format must be a page_list vm_map_copy.
+ * Mapping is READ-ONLY.
+ */
+
+kern_return_t
+kmem_io_map_copyout(map, addr, alloc_addr, alloc_size, copy, min_size)
+ vm_map_t map;
+ vm_offset_t *addr; /* actual addr of data */
+ vm_offset_t *alloc_addr; /* page aligned addr */
+ vm_size_t *alloc_size; /* size allocated */
+ vm_map_copy_t copy;
+ vm_size_t min_size; /* Do at least this much */
+{
+ vm_offset_t myaddr, offset;
+ vm_size_t mysize, copy_size;
+ kern_return_t ret;
+ register
+ vm_page_t *page_list;
+ vm_map_copy_t new_copy;
+ register
+ int i;
+
+ assert(copy->type == VM_MAP_COPY_PAGE_LIST);
+ assert(min_size != 0);
+
+ /*
+ * Figure out the size in vm pages.
+ */
+ min_size += copy->offset - trunc_page(copy->offset);
+ min_size = round_page(min_size);
+ mysize = round_page(copy->offset + copy->size) -
+ trunc_page(copy->offset);
+
+ /*
+ * If total size is larger than one page list and
+ * we don't have to do more than one page list, then
+ * only do one page list.
+ *
+ * XXX Could be much smarter about this ... like trimming length
+ * XXX if we need more than one page list but not all of them.
+ */
+
+ copy_size = ptoa(copy->cpy_npages);
+ if (mysize > copy_size && copy_size > min_size)
+ mysize = copy_size;
+
+ /*
+ * Allocate some address space in the map (must be kernel
+ * space).
+ */
+ myaddr = vm_map_min(map);
+ ret = vm_map_enter(map, &myaddr, mysize,
+ (vm_offset_t) 0, TRUE,
+ VM_OBJECT_NULL, (vm_offset_t) 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
+
+ if (ret != KERN_SUCCESS)
+ return(ret);
+
+ /*
+ * Tell the pmap module that this will be wired, and
+ * enter the mappings.
+ */
+ pmap_pageable(vm_map_pmap(map), myaddr, myaddr + mysize, TRUE);
+
+ *addr = myaddr + (copy->offset - trunc_page(copy->offset));
+ *alloc_addr = myaddr;
+ *alloc_size = mysize;
+
+ offset = myaddr;
+ page_list = &copy->cpy_page_list[0];
+ while (TRUE) {
+ for ( i = 0; i < copy->cpy_npages; i++, offset += PAGE_SIZE) {
+ PMAP_ENTER(vm_map_pmap(map), offset, *page_list,
+ VM_PROT_READ, TRUE);
+ page_list++;
+ }
+
+ if (offset == (myaddr + mysize))
+ break;
+
+ /*
+ * Onward to the next page_list. The extend_cont
+ * leaves the current page list's pages alone;
+ * they'll be cleaned up at discard. Reset this
+ * copy's continuation to discard the next one.
+ */
+ vm_map_copy_invoke_extend_cont(copy, &new_copy, &ret);
+
+ if (ret != KERN_SUCCESS) {
+ kmem_io_map_deallocate(map, myaddr, mysize);
+ return(ret);
+ }
+ copy->cpy_cont = vm_map_copy_discard_cont;
+ copy->cpy_cont_args = (char *) new_copy;
+ copy = new_copy;
+ page_list = &copy->cpy_page_list[0];
+ }
+
+ return(ret);
+}
+
+/*
+ * kmem_io_map_deallocate:
+ *
+ * Get rid of the mapping established by kmem_io_map_copyout.
+ * Assumes that addr and size have been rounded to page boundaries.
+ * (e.g., the alloc_addr and alloc_size returned by kmem_io_map_copyout)
+ */
+
+void
+kmem_io_map_deallocate(map, addr, size)
+ vm_map_t map;
+ vm_offset_t addr;
+ vm_size_t size;
+{
+ /*
+ * Remove the mappings. The pmap_remove is needed.
+ */
+
+ pmap_remove(vm_map_pmap(map), addr, addr + size);
+ vm_map_remove(map, addr, addr + size);
+}
+
+/*
+ * Routine: copyinmap
+ * Purpose:
+ * Like copyin, except that fromaddr is an address
+ * in the specified VM map. This implementation
+ * is incomplete; it handles the current user map
+ * and the kernel map/submaps.
+ */
+
+int copyinmap(map, fromaddr, toaddr, length)
+ vm_map_t map;
+ char *fromaddr, *toaddr;
+ int length;
+{
+ if (vm_map_pmap(map) == kernel_pmap) {
+ /* assume a correct copy */
+ bcopy(fromaddr, toaddr, length);
+ return 0;
+ }
+
+ if (current_map() == map)
+ return copyin( fromaddr, toaddr, length);
+
+ return 1;
+}
+
+/*
+ * Routine: copyoutmap
+ * Purpose:
+ * Like copyout, except that toaddr is an address
+ * in the specified VM map. This implementation
+ * is incomplete; it handles the current user map
+ * and the kernel map/submaps.
+ */
+
+int copyoutmap(map, fromaddr, toaddr, length)
+ vm_map_t map;
+ char *fromaddr, *toaddr;
+ int length;
+{
+ if (vm_map_pmap(map) == kernel_pmap) {
+ /* assume a correct copy */
+ bcopy(fromaddr, toaddr, length);
+ return 0;
+ }
+
+ if (current_map() == map)
+ return copyout(fromaddr, toaddr, length);
+
+ return 1;
+}
diff --git a/vm/vm_kern.h b/vm/vm_kern.h
new file mode 100644
index 00000000..8e00fcce
--- /dev/null
+++ b/vm/vm_kern.h
@@ -0,0 +1,63 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_kern.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Kernel memory management definitions.
+ */
+
+#ifndef _VM_VM_KERN_H_
+#define _VM_VM_KERN_H_
+
+#include <mach/kern_return.h>
+#include <vm/vm_map.h>
+
+extern kern_return_t projected_buffer_allocate();
+extern kern_return_t projected_buffer_deallocate();
+extern kern_return_t projected_buffer_map();
+extern kern_return_t projected_buffer_collect();
+
+extern void kmem_init();
+
+extern kern_return_t kmem_alloc();
+extern kern_return_t kmem_alloc_pageable();
+extern kern_return_t kmem_alloc_wired();
+extern kern_return_t kmem_alloc_aligned();
+extern kern_return_t kmem_realloc();
+extern void kmem_free();
+
+extern vm_map_t kmem_suballoc();
+
+extern kern_return_t kmem_io_map_copyout();
+extern void kmem_io_map_deallocate();
+
+extern vm_map_t kernel_map;
+extern vm_map_t kernel_pageable_map;
+extern vm_map_t ipc_kernel_map;
+
+#endif _VM_VM_KERN_H_
diff --git a/vm/vm_map.c b/vm/vm_map.c
new file mode 100644
index 00000000..c71b8580
--- /dev/null
+++ b/vm/vm_map.c
@@ -0,0 +1,5244 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_map.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Virtual memory mapping module.
+ */
+
+#include <norma_ipc.h>
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/vm_attributes.h>
+#include <mach/vm_param.h>
+#include <kern/assert.h>
+#include <kern/zalloc.h>
+#include <vm/vm_fault.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_kern.h>
+#include <ipc/ipc_port.h>
+
+/*
+ * Macros to copy a vm_map_entry. We must be careful to correctly
+ * manage the wired page count. vm_map_entry_copy() creates a new
+ * map entry to the same memory - the wired count in the new entry
+ * must be set to zero. vm_map_entry_copy_full() creates a new
+ * entry that is identical to the old entry. This preserves the
+ * wire count; it's used for map splitting and zone changing in
+ * vm_map_copyout.
+ */
+#define vm_map_entry_copy(NEW,OLD) \
+MACRO_BEGIN \
+ *(NEW) = *(OLD); \
+ (NEW)->is_shared = FALSE; \
+ (NEW)->needs_wakeup = FALSE; \
+ (NEW)->in_transition = FALSE; \
+ (NEW)->wired_count = 0; \
+ (NEW)->user_wired_count = 0; \
+MACRO_END
+
+#define vm_map_entry_copy_full(NEW,OLD) (*(NEW) = *(OLD))
+
+/*
+ * Virtual memory maps provide for the mapping, protection,
+ * and sharing of virtual memory objects. In addition,
+ * this module provides for an efficient virtual copy of
+ * memory from one map to another.
+ *
+ * Synchronization is required prior to most operations.
+ *
+ * Maps consist of an ordered doubly-linked list of simple
+ * entries; a single hint is used to speed up lookups.
+ *
+ * Sharing maps have been deleted from this version of Mach.
+ * All shared objects are now mapped directly into the respective
+ * maps. This requires a change in the copy on write strategy;
+ * the asymmetric (delayed) strategy is used for shared temporary
+ * objects instead of the symmetric (shadow) strategy. This is
+ * selected by the (new) use_shared_copy bit in the object. See
+ * vm_object_copy_temporary in vm_object.c for details. All maps
+ * are now "top level" maps (either task map, kernel map or submap
+ * of the kernel map).
+ *
+ * Since portions of maps are specified by start/end addreses,
+ * which may not align with existing map entries, all
+ * routines merely "clip" entries to these start/end values.
+ * [That is, an entry is split into two, bordering at a
+ * start or end value.] Note that these clippings may not
+ * always be necessary (as the two resulting entries are then
+ * not changed); however, the clipping is done for convenience.
+ * No attempt is currently made to "glue back together" two
+ * abutting entries.
+ *
+ * The symmetric (shadow) copy strategy implements virtual copy
+ * by copying VM object references from one map to
+ * another, and then marking both regions as copy-on-write.
+ * It is important to note that only one writeable reference
+ * to a VM object region exists in any map when this strategy
+ * is used -- this means that shadow object creation can be
+ * delayed until a write operation occurs. The asymmetric (delayed)
+ * strategy allows multiple maps to have writeable references to
+ * the same region of a vm object, and hence cannot delay creating
+ * its copy objects. See vm_object_copy_temporary() in vm_object.c.
+ * Copying of permanent objects is completely different; see
+ * vm_object_copy_strategically() in vm_object.c.
+ */
+
+zone_t vm_map_zone; /* zone for vm_map structures */
+zone_t vm_map_entry_zone; /* zone for vm_map_entry structures */
+zone_t vm_map_kentry_zone; /* zone for kernel entry structures */
+zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */
+
+boolean_t vm_map_lookup_entry(); /* forward declaration */
+
+/*
+ * Placeholder object for submap operations. This object is dropped
+ * into the range by a call to vm_map_find, and removed when
+ * vm_map_submap creates the submap.
+ */
+
+vm_object_t vm_submap_object;
+
+/*
+ * vm_map_init:
+ *
+ * Initialize the vm_map module. Must be called before
+ * any other vm_map routines.
+ *
+ * Map and entry structures are allocated from zones -- we must
+ * initialize those zones.
+ *
+ * There are three zones of interest:
+ *
+ * vm_map_zone: used to allocate maps.
+ * vm_map_entry_zone: used to allocate map entries.
+ * vm_map_kentry_zone: used to allocate map entries for the kernel.
+ *
+ * The kernel allocates map entries from a special zone that is initially
+ * "crammed" with memory. It would be difficult (perhaps impossible) for
+ * the kernel to allocate more memory to a entry zone when it became
+ * empty since the very act of allocating memory implies the creation
+ * of a new entry.
+ */
+
+vm_offset_t kentry_data;
+vm_size_t kentry_data_size;
+int kentry_count = 256; /* to init kentry_data_size */
+
+void vm_map_init()
+{
+ vm_map_zone = zinit((vm_size_t) sizeof(struct vm_map), 40*1024,
+ PAGE_SIZE, 0, "maps");
+ vm_map_entry_zone = zinit((vm_size_t) sizeof(struct vm_map_entry),
+ 1024*1024, PAGE_SIZE*5,
+ 0, "non-kernel map entries");
+ vm_map_kentry_zone = zinit((vm_size_t) sizeof(struct vm_map_entry),
+ kentry_data_size, kentry_data_size,
+ ZONE_FIXED /* XXX */, "kernel map entries");
+
+ vm_map_copy_zone = zinit((vm_size_t) sizeof(struct vm_map_copy),
+ 16*1024, PAGE_SIZE, 0,
+ "map copies");
+
+ /*
+ * Cram the kentry zone with initial data.
+ */
+ zcram(vm_map_kentry_zone, kentry_data, kentry_data_size);
+
+ /*
+ * Submap object is initialized by vm_object_init.
+ */
+}
+
+/*
+ * vm_map_create:
+ *
+ * Creates and returns a new empty VM map with
+ * the given physical map structure, and having
+ * the given lower and upper address bounds.
+ */
+vm_map_t vm_map_create(pmap, min, max, pageable)
+ pmap_t pmap;
+ vm_offset_t min, max;
+ boolean_t pageable;
+{
+ register vm_map_t result;
+
+ result = (vm_map_t) zalloc(vm_map_zone);
+ if (result == VM_MAP_NULL)
+ panic("vm_map_create");
+
+ vm_map_first_entry(result) = vm_map_to_entry(result);
+ vm_map_last_entry(result) = vm_map_to_entry(result);
+ result->hdr.nentries = 0;
+ result->hdr.entries_pageable = pageable;
+
+ result->size = 0;
+ result->ref_count = 1;
+ result->pmap = pmap;
+ result->min_offset = min;
+ result->max_offset = max;
+ result->wiring_required = FALSE;
+ result->wait_for_space = FALSE;
+ result->first_free = vm_map_to_entry(result);
+ result->hint = vm_map_to_entry(result);
+ vm_map_lock_init(result);
+ simple_lock_init(&result->ref_lock);
+ simple_lock_init(&result->hint_lock);
+
+ return(result);
+}
+
+/*
+ * vm_map_entry_create: [ internal use only ]
+ *
+ * Allocates a VM map entry for insertion in the
+ * given map (or map copy). No fields are filled.
+ */
+#define vm_map_entry_create(map) \
+ _vm_map_entry_create(&(map)->hdr)
+
+#define vm_map_copy_entry_create(copy) \
+ _vm_map_entry_create(&(copy)->cpy_hdr)
+
+vm_map_entry_t _vm_map_entry_create(map_header)
+ register struct vm_map_header *map_header;
+{
+ register zone_t zone;
+ register vm_map_entry_t entry;
+
+ if (map_header->entries_pageable)
+ zone = vm_map_entry_zone;
+ else
+ zone = vm_map_kentry_zone;
+
+ entry = (vm_map_entry_t) zalloc(zone);
+ if (entry == VM_MAP_ENTRY_NULL)
+ panic("vm_map_entry_create");
+
+ return(entry);
+}
+
+/*
+ * vm_map_entry_dispose: [ internal use only ]
+ *
+ * Inverse of vm_map_entry_create.
+ */
+#define vm_map_entry_dispose(map, entry) \
+ _vm_map_entry_dispose(&(map)->hdr, (entry))
+
+#define vm_map_copy_entry_dispose(map, entry) \
+ _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry))
+
+void _vm_map_entry_dispose(map_header, entry)
+ register struct vm_map_header *map_header;
+ register vm_map_entry_t entry;
+{
+ register zone_t zone;
+
+ if (map_header->entries_pageable)
+ zone = vm_map_entry_zone;
+ else
+ zone = vm_map_kentry_zone;
+
+ zfree(zone, (vm_offset_t) entry);
+}
+
+/*
+ * vm_map_entry_{un,}link:
+ *
+ * Insert/remove entries from maps (or map copies).
+ */
+#define vm_map_entry_link(map, after_where, entry) \
+ _vm_map_entry_link(&(map)->hdr, after_where, entry)
+
+#define vm_map_copy_entry_link(copy, after_where, entry) \
+ _vm_map_entry_link(&(copy)->cpy_hdr, after_where, entry)
+
+#define _vm_map_entry_link(hdr, after_where, entry) \
+ MACRO_BEGIN \
+ (hdr)->nentries++; \
+ (entry)->vme_prev = (after_where); \
+ (entry)->vme_next = (after_where)->vme_next; \
+ (entry)->vme_prev->vme_next = \
+ (entry)->vme_next->vme_prev = (entry); \
+ MACRO_END
+
+#define vm_map_entry_unlink(map, entry) \
+ _vm_map_entry_unlink(&(map)->hdr, entry)
+
+#define vm_map_copy_entry_unlink(copy, entry) \
+ _vm_map_entry_unlink(&(copy)->cpy_hdr, entry)
+
+#define _vm_map_entry_unlink(hdr, entry) \
+ MACRO_BEGIN \
+ (hdr)->nentries--; \
+ (entry)->vme_next->vme_prev = (entry)->vme_prev; \
+ (entry)->vme_prev->vme_next = (entry)->vme_next; \
+ MACRO_END
+
+/*
+ * vm_map_reference:
+ *
+ * Creates another valid reference to the given map.
+ *
+ */
+void vm_map_reference(map)
+ register vm_map_t map;
+{
+ if (map == VM_MAP_NULL)
+ return;
+
+ simple_lock(&map->ref_lock);
+ map->ref_count++;
+ simple_unlock(&map->ref_lock);
+}
+
+/*
+ * vm_map_deallocate:
+ *
+ * Removes a reference from the specified map,
+ * destroying it if no references remain.
+ * The map should not be locked.
+ */
+void vm_map_deallocate(map)
+ register vm_map_t map;
+{
+ register int c;
+
+ if (map == VM_MAP_NULL)
+ return;
+
+ simple_lock(&map->ref_lock);
+ c = --map->ref_count;
+ simple_unlock(&map->ref_lock);
+
+ if (c > 0) {
+ return;
+ }
+
+ projected_buffer_collect(map);
+ (void) vm_map_delete(map, map->min_offset, map->max_offset);
+
+ pmap_destroy(map->pmap);
+
+ zfree(vm_map_zone, (vm_offset_t) map);
+}
+
+/*
+ * SAVE_HINT:
+ *
+ * Saves the specified entry as the hint for
+ * future lookups. Performs necessary interlocks.
+ */
+#define SAVE_HINT(map,value) \
+ simple_lock(&(map)->hint_lock); \
+ (map)->hint = (value); \
+ simple_unlock(&(map)->hint_lock);
+
+/*
+ * vm_map_lookup_entry: [ internal use only ]
+ *
+ * Finds the map entry containing (or
+ * immediately preceding) the specified address
+ * in the given map; the entry is returned
+ * in the "entry" parameter. The boolean
+ * result indicates whether the address is
+ * actually contained in the map.
+ */
+boolean_t vm_map_lookup_entry(map, address, entry)
+ register vm_map_t map;
+ register vm_offset_t address;
+ vm_map_entry_t *entry; /* OUT */
+{
+ register vm_map_entry_t cur;
+ register vm_map_entry_t last;
+
+ /*
+ * Start looking either from the head of the
+ * list, or from the hint.
+ */
+
+ simple_lock(&map->hint_lock);
+ cur = map->hint;
+ simple_unlock(&map->hint_lock);
+
+ if (cur == vm_map_to_entry(map))
+ cur = cur->vme_next;
+
+ if (address >= cur->vme_start) {
+ /*
+ * Go from hint to end of list.
+ *
+ * But first, make a quick check to see if
+ * we are already looking at the entry we
+ * want (which is usually the case).
+ * Note also that we don't need to save the hint
+ * here... it is the same hint (unless we are
+ * at the header, in which case the hint didn't
+ * buy us anything anyway).
+ */
+ last = vm_map_to_entry(map);
+ if ((cur != last) && (cur->vme_end > address)) {
+ *entry = cur;
+ return(TRUE);
+ }
+ }
+ else {
+ /*
+ * Go from start to hint, *inclusively*
+ */
+ last = cur->vme_next;
+ cur = vm_map_first_entry(map);
+ }
+
+ /*
+ * Search linearly
+ */
+
+ while (cur != last) {
+ if (cur->vme_end > address) {
+ if (address >= cur->vme_start) {
+ /*
+ * Save this lookup for future
+ * hints, and return
+ */
+
+ *entry = cur;
+ SAVE_HINT(map, cur);
+ return(TRUE);
+ }
+ break;
+ }
+ cur = cur->vme_next;
+ }
+ *entry = cur->vme_prev;
+ SAVE_HINT(map, *entry);
+ return(FALSE);
+}
+
+/*
+ * Routine: invalid_user_access
+ *
+ * Verifies whether user access is valid.
+ */
+
+boolean_t
+invalid_user_access(map, start, end, prot)
+ vm_map_t map;
+ vm_offset_t start, end;
+ vm_prot_t prot;
+{
+ vm_map_entry_t entry;
+
+ return (map == VM_MAP_NULL || map == kernel_map ||
+ !vm_map_lookup_entry(map, start, &entry) ||
+ entry->vme_end < end ||
+ (prot & ~(entry->protection)));
+}
+
+
+/*
+ * Routine: vm_map_find_entry
+ * Purpose:
+ * Allocate a range in the specified virtual address map,
+ * returning the entry allocated for that range.
+ * Used by kmem_alloc, etc. Returns wired entries.
+ *
+ * The map must be locked.
+ *
+ * If an entry is allocated, the object/offset fields
+ * are initialized to zero. If an object is supplied,
+ * then an existing entry may be extended.
+ */
+kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry)
+ register vm_map_t map;
+ vm_offset_t *address; /* OUT */
+ vm_size_t size;
+ vm_offset_t mask;
+ vm_object_t object;
+ vm_map_entry_t *o_entry; /* OUT */
+{
+ register vm_map_entry_t entry, new_entry;
+ register vm_offset_t start;
+ register vm_offset_t end;
+
+ /*
+ * Look for the first possible address;
+ * if there's already something at this
+ * address, we have to start after it.
+ */
+
+ if ((entry = map->first_free) == vm_map_to_entry(map))
+ start = map->min_offset;
+ else
+ start = entry->vme_end;
+
+ /*
+ * In any case, the "entry" always precedes
+ * the proposed new region throughout the loop:
+ */
+
+ while (TRUE) {
+ register vm_map_entry_t next;
+
+ /*
+ * Find the end of the proposed new region.
+ * Be sure we didn't go beyond the end, or
+ * wrap around the address.
+ */
+
+ if (((start + mask) & ~mask) < start)
+ return(KERN_NO_SPACE);
+ start = ((start + mask) & ~mask);
+ end = start + size;
+
+ if ((end > map->max_offset) || (end < start))
+ return(KERN_NO_SPACE);
+
+ /*
+ * If there are no more entries, we must win.
+ */
+
+ next = entry->vme_next;
+ if (next == vm_map_to_entry(map))
+ break;
+
+ /*
+ * If there is another entry, it must be
+ * after the end of the potential new region.
+ */
+
+ if (next->vme_start >= end)
+ break;
+
+ /*
+ * Didn't fit -- move to the next entry.
+ */
+
+ entry = next;
+ start = entry->vme_end;
+ }
+
+ /*
+ * At this point,
+ * "start" and "end" should define the endpoints of the
+ * available new range, and
+ * "entry" should refer to the region before the new
+ * range, and
+ *
+ * the map should be locked.
+ */
+
+ *address = start;
+
+ /*
+ * See whether we can avoid creating a new entry by
+ * extending one of our neighbors. [So far, we only attempt to
+ * extend from below.]
+ */
+
+ if ((object != VM_OBJECT_NULL) &&
+ (entry != vm_map_to_entry(map)) &&
+ (entry->vme_end == start) &&
+ (!entry->is_shared) &&
+ (!entry->is_sub_map) &&
+ (entry->object.vm_object == object) &&
+ (entry->needs_copy == FALSE) &&
+ (entry->inheritance == VM_INHERIT_DEFAULT) &&
+ (entry->protection == VM_PROT_DEFAULT) &&
+ (entry->max_protection == VM_PROT_ALL) &&
+ (entry->wired_count == 1) &&
+ (entry->user_wired_count == 0) &&
+ (entry->projected_on == 0)) {
+ /*
+ * Because this is a special case,
+ * we don't need to use vm_object_coalesce.
+ */
+
+ entry->vme_end = end;
+ new_entry = entry;
+ } else {
+ new_entry = vm_map_entry_create(map);
+
+ new_entry->vme_start = start;
+ new_entry->vme_end = end;
+
+ new_entry->is_shared = FALSE;
+ new_entry->is_sub_map = FALSE;
+ new_entry->object.vm_object = VM_OBJECT_NULL;
+ new_entry->offset = (vm_offset_t) 0;
+
+ new_entry->needs_copy = FALSE;
+
+ new_entry->inheritance = VM_INHERIT_DEFAULT;
+ new_entry->protection = VM_PROT_DEFAULT;
+ new_entry->max_protection = VM_PROT_ALL;
+ new_entry->wired_count = 1;
+ new_entry->user_wired_count = 0;
+
+ new_entry->in_transition = FALSE;
+ new_entry->needs_wakeup = FALSE;
+ new_entry->projected_on = 0;
+
+ /*
+ * Insert the new entry into the list
+ */
+
+ vm_map_entry_link(map, entry, new_entry);
+ }
+
+ map->size += size;
+
+ /*
+ * Update the free space hint and the lookup hint
+ */
+
+ map->first_free = new_entry;
+ SAVE_HINT(map, new_entry);
+
+ *o_entry = new_entry;
+ return(KERN_SUCCESS);
+}
+
+int vm_map_pmap_enter_print = FALSE;
+int vm_map_pmap_enter_enable = FALSE;
+
+/*
+ * Routine: vm_map_pmap_enter
+ *
+ * Description:
+ * Force pages from the specified object to be entered into
+ * the pmap at the specified address if they are present.
+ * As soon as a page not found in the object the scan ends.
+ *
+ * Returns:
+ * Nothing.
+ *
+ * In/out conditions:
+ * The source map should not be locked on entry.
+ */
+void
+vm_map_pmap_enter(map, addr, end_addr, object, offset, protection)
+ vm_map_t map;
+ register
+ vm_offset_t addr;
+ register
+ vm_offset_t end_addr;
+ register
+ vm_object_t object;
+ vm_offset_t offset;
+ vm_prot_t protection;
+{
+ while (addr < end_addr) {
+ register vm_page_t m;
+
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+
+ m = vm_page_lookup(object, offset);
+ if (m == VM_PAGE_NULL || m->absent) {
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+ return;
+ }
+
+ if (vm_map_pmap_enter_print) {
+ printf("vm_map_pmap_enter:");
+ printf("map: %x, addr: %x, object: %x, offset: %x\n",
+ map, addr, object, offset);
+ }
+
+ m->busy = TRUE;
+ vm_object_unlock(object);
+
+ PMAP_ENTER(map->pmap, addr, m,
+ protection, FALSE);
+
+ vm_object_lock(object);
+ PAGE_WAKEUP_DONE(m);
+ vm_page_lock_queues();
+ if (!m->active && !m->inactive)
+ vm_page_activate(m);
+ vm_page_unlock_queues();
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ offset += PAGE_SIZE;
+ addr += PAGE_SIZE;
+ }
+}
+
+/*
+ * Routine: vm_map_enter
+ *
+ * Description:
+ * Allocate a range in the specified virtual address map.
+ * The resulting range will refer to memory defined by
+ * the given memory object and offset into that object.
+ *
+ * Arguments are as defined in the vm_map call.
+ */
+kern_return_t vm_map_enter(
+ map,
+ address, size, mask, anywhere,
+ object, offset, needs_copy,
+ cur_protection, max_protection, inheritance)
+ register
+ vm_map_t map;
+ vm_offset_t *address; /* IN/OUT */
+ vm_size_t size;
+ vm_offset_t mask;
+ boolean_t anywhere;
+ vm_object_t object;
+ vm_offset_t offset;
+ boolean_t needs_copy;
+ vm_prot_t cur_protection;
+ vm_prot_t max_protection;
+ vm_inherit_t inheritance;
+{
+ register vm_map_entry_t entry;
+ register vm_offset_t start;
+ register vm_offset_t end;
+ kern_return_t result = KERN_SUCCESS;
+
+#define RETURN(value) { result = value; goto BailOut; }
+
+ StartAgain: ;
+
+ start = *address;
+
+ if (anywhere) {
+ vm_map_lock(map);
+
+ /*
+ * Calculate the first possible address.
+ */
+
+ if (start < map->min_offset)
+ start = map->min_offset;
+ if (start > map->max_offset)
+ RETURN(KERN_NO_SPACE);
+
+ /*
+ * Look for the first possible address;
+ * if there's already something at this
+ * address, we have to start after it.
+ */
+
+ if (start == map->min_offset) {
+ if ((entry = map->first_free) != vm_map_to_entry(map))
+ start = entry->vme_end;
+ } else {
+ vm_map_entry_t tmp_entry;
+ if (vm_map_lookup_entry(map, start, &tmp_entry))
+ start = tmp_entry->vme_end;
+ entry = tmp_entry;
+ }
+
+ /*
+ * In any case, the "entry" always precedes
+ * the proposed new region throughout the
+ * loop:
+ */
+
+ while (TRUE) {
+ register vm_map_entry_t next;
+
+ /*
+ * Find the end of the proposed new region.
+ * Be sure we didn't go beyond the end, or
+ * wrap around the address.
+ */
+
+ if (((start + mask) & ~mask) < start)
+ return(KERN_NO_SPACE);
+ start = ((start + mask) & ~mask);
+ end = start + size;
+
+ if ((end > map->max_offset) || (end < start)) {
+ if (map->wait_for_space) {
+ if (size <= (map->max_offset -
+ map->min_offset)) {
+ assert_wait((event_t) map, TRUE);
+ vm_map_unlock(map);
+ thread_block((void (*)()) 0);
+ goto StartAgain;
+ }
+ }
+
+ RETURN(KERN_NO_SPACE);
+ }
+
+ /*
+ * If there are no more entries, we must win.
+ */
+
+ next = entry->vme_next;
+ if (next == vm_map_to_entry(map))
+ break;
+
+ /*
+ * If there is another entry, it must be
+ * after the end of the potential new region.
+ */
+
+ if (next->vme_start >= end)
+ break;
+
+ /*
+ * Didn't fit -- move to the next entry.
+ */
+
+ entry = next;
+ start = entry->vme_end;
+ }
+ *address = start;
+ } else {
+ vm_map_entry_t temp_entry;
+
+ /*
+ * Verify that:
+ * the address doesn't itself violate
+ * the mask requirement.
+ */
+
+ if ((start & mask) != 0)
+ return(KERN_NO_SPACE);
+
+ vm_map_lock(map);
+
+ /*
+ * ... the address is within bounds
+ */
+
+ end = start + size;
+
+ if ((start < map->min_offset) ||
+ (end > map->max_offset) ||
+ (start >= end)) {
+ RETURN(KERN_INVALID_ADDRESS);
+ }
+
+ /*
+ * ... the starting address isn't allocated
+ */
+
+ if (vm_map_lookup_entry(map, start, &temp_entry))
+ RETURN(KERN_NO_SPACE);
+
+ entry = temp_entry;
+
+ /*
+ * ... the next region doesn't overlap the
+ * end point.
+ */
+
+ if ((entry->vme_next != vm_map_to_entry(map)) &&
+ (entry->vme_next->vme_start < end))
+ RETURN(KERN_NO_SPACE);
+ }
+
+ /*
+ * At this point,
+ * "start" and "end" should define the endpoints of the
+ * available new range, and
+ * "entry" should refer to the region before the new
+ * range, and
+ *
+ * the map should be locked.
+ */
+
+ /*
+ * See whether we can avoid creating a new entry (and object) by
+ * extending one of our neighbors. [So far, we only attempt to
+ * extend from below.]
+ */
+
+ if ((object == VM_OBJECT_NULL) &&
+ (entry != vm_map_to_entry(map)) &&
+ (entry->vme_end == start) &&
+ (!entry->is_shared) &&
+ (!entry->is_sub_map) &&
+ (entry->inheritance == inheritance) &&
+ (entry->protection == cur_protection) &&
+ (entry->max_protection == max_protection) &&
+ (entry->wired_count == 0) && /* implies user_wired_count == 0 */
+ (entry->projected_on == 0)) {
+ if (vm_object_coalesce(entry->object.vm_object,
+ VM_OBJECT_NULL,
+ entry->offset,
+ (vm_offset_t) 0,
+ (vm_size_t)(entry->vme_end - entry->vme_start),
+ (vm_size_t)(end - entry->vme_end))) {
+
+ /*
+ * Coalesced the two objects - can extend
+ * the previous map entry to include the
+ * new range.
+ */
+ map->size += (end - entry->vme_end);
+ entry->vme_end = end;
+ RETURN(KERN_SUCCESS);
+ }
+ }
+
+ /*
+ * Create a new entry
+ */
+
+ /**/ {
+ register vm_map_entry_t new_entry;
+
+ new_entry = vm_map_entry_create(map);
+
+ new_entry->vme_start = start;
+ new_entry->vme_end = end;
+
+ new_entry->is_shared = FALSE;
+ new_entry->is_sub_map = FALSE;
+ new_entry->object.vm_object = object;
+ new_entry->offset = offset;
+
+ new_entry->needs_copy = needs_copy;
+
+ new_entry->inheritance = inheritance;
+ new_entry->protection = cur_protection;
+ new_entry->max_protection = max_protection;
+ new_entry->wired_count = 0;
+ new_entry->user_wired_count = 0;
+
+ new_entry->in_transition = FALSE;
+ new_entry->needs_wakeup = FALSE;
+ new_entry->projected_on = 0;
+
+ /*
+ * Insert the new entry into the list
+ */
+
+ vm_map_entry_link(map, entry, new_entry);
+ map->size += size;
+
+ /*
+ * Update the free space hint and the lookup hint
+ */
+
+ if ((map->first_free == entry) &&
+ ((entry == vm_map_to_entry(map) ? map->min_offset : entry->vme_end)
+ >= new_entry->vme_start))
+ map->first_free = new_entry;
+
+ SAVE_HINT(map, new_entry);
+
+ vm_map_unlock(map);
+
+ if ((object != VM_OBJECT_NULL) &&
+ (vm_map_pmap_enter_enable) &&
+ (!anywhere) &&
+ (!needs_copy) &&
+ (size < (128*1024))) {
+ vm_map_pmap_enter(map, start, end,
+ object, offset, cur_protection);
+ }
+
+ return(result);
+ /**/ }
+
+ BailOut: ;
+
+ vm_map_unlock(map);
+ return(result);
+
+#undef RETURN
+}
+
+/*
+ * vm_map_clip_start: [ internal use only ]
+ *
+ * Asserts that the given entry begins at or after
+ * the specified address; if necessary,
+ * it splits the entry into two.
+ */
+void _vm_map_clip_start();
+#define vm_map_clip_start(map, entry, startaddr) \
+ MACRO_BEGIN \
+ if ((startaddr) > (entry)->vme_start) \
+ _vm_map_clip_start(&(map)->hdr,(entry),(startaddr)); \
+ MACRO_END
+
+void _vm_map_copy_clip_start();
+#define vm_map_copy_clip_start(copy, entry, startaddr) \
+ MACRO_BEGIN \
+ if ((startaddr) > (entry)->vme_start) \
+ _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \
+ MACRO_END
+
+/*
+ * This routine is called only when it is known that
+ * the entry must be split.
+ */
+void _vm_map_clip_start(map_header, entry, start)
+ register struct vm_map_header *map_header;
+ register vm_map_entry_t entry;
+ register vm_offset_t start;
+{
+ register vm_map_entry_t new_entry;
+
+ /*
+ * Split off the front portion --
+ * note that we must insert the new
+ * entry BEFORE this one, so that
+ * this entry has the specified starting
+ * address.
+ */
+
+ new_entry = _vm_map_entry_create(map_header);
+ vm_map_entry_copy_full(new_entry, entry);
+
+ new_entry->vme_end = start;
+ entry->offset += (start - entry->vme_start);
+ entry->vme_start = start;
+
+ _vm_map_entry_link(map_header, entry->vme_prev, new_entry);
+
+ if (entry->is_sub_map)
+ vm_map_reference(new_entry->object.sub_map);
+ else
+ vm_object_reference(new_entry->object.vm_object);
+}
+
+/*
+ * vm_map_clip_end: [ internal use only ]
+ *
+ * Asserts that the given entry ends at or before
+ * the specified address; if necessary,
+ * it splits the entry into two.
+ */
+void _vm_map_clip_end();
+#define vm_map_clip_end(map, entry, endaddr) \
+ MACRO_BEGIN \
+ if ((endaddr) < (entry)->vme_end) \
+ _vm_map_clip_end(&(map)->hdr,(entry),(endaddr)); \
+ MACRO_END
+
+void _vm_map_copy_clip_end();
+#define vm_map_copy_clip_end(copy, entry, endaddr) \
+ MACRO_BEGIN \
+ if ((endaddr) < (entry)->vme_end) \
+ _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \
+ MACRO_END
+
+/*
+ * This routine is called only when it is known that
+ * the entry must be split.
+ */
+void _vm_map_clip_end(map_header, entry, end)
+ register struct vm_map_header *map_header;
+ register vm_map_entry_t entry;
+ register vm_offset_t end;
+{
+ register vm_map_entry_t new_entry;
+
+ /*
+ * Create a new entry and insert it
+ * AFTER the specified entry
+ */
+
+ new_entry = _vm_map_entry_create(map_header);
+ vm_map_entry_copy_full(new_entry, entry);
+
+ new_entry->vme_start = entry->vme_end = end;
+ new_entry->offset += (end - entry->vme_start);
+
+ _vm_map_entry_link(map_header, entry, new_entry);
+
+ if (entry->is_sub_map)
+ vm_map_reference(new_entry->object.sub_map);
+ else
+ vm_object_reference(new_entry->object.vm_object);
+}
+
+/*
+ * VM_MAP_RANGE_CHECK: [ internal use only ]
+ *
+ * Asserts that the starting and ending region
+ * addresses fall within the valid range of the map.
+ */
+#define VM_MAP_RANGE_CHECK(map, start, end) \
+ { \
+ if (start < vm_map_min(map)) \
+ start = vm_map_min(map); \
+ if (end > vm_map_max(map)) \
+ end = vm_map_max(map); \
+ if (start > end) \
+ start = end; \
+ }
+
+/*
+ * vm_map_submap: [ kernel use only ]
+ *
+ * Mark the given range as handled by a subordinate map.
+ *
+ * This range must have been created with vm_map_find using
+ * the vm_submap_object, and no other operations may have been
+ * performed on this range prior to calling vm_map_submap.
+ *
+ * Only a limited number of operations can be performed
+ * within this rage after calling vm_map_submap:
+ * vm_fault
+ * [Don't try vm_map_copyin!]
+ *
+ * To remove a submapping, one must first remove the
+ * range from the superior map, and then destroy the
+ * submap (if desired). [Better yet, don't try it.]
+ */
+kern_return_t vm_map_submap(map, start, end, submap)
+ register vm_map_t map;
+ register vm_offset_t start;
+ register vm_offset_t end;
+ vm_map_t submap;
+{
+ vm_map_entry_t entry;
+ register kern_return_t result = KERN_INVALID_ARGUMENT;
+ register vm_object_t object;
+
+ vm_map_lock(map);
+
+ VM_MAP_RANGE_CHECK(map, start, end);
+
+ if (vm_map_lookup_entry(map, start, &entry)) {
+ vm_map_clip_start(map, entry, start);
+ }
+ else
+ entry = entry->vme_next;
+
+ vm_map_clip_end(map, entry, end);
+
+ if ((entry->vme_start == start) && (entry->vme_end == end) &&
+ (!entry->is_sub_map) &&
+ ((object = entry->object.vm_object) == vm_submap_object) &&
+ (object->resident_page_count == 0) &&
+ (object->copy == VM_OBJECT_NULL) &&
+ (object->shadow == VM_OBJECT_NULL) &&
+ (!object->pager_created)) {
+ entry->object.vm_object = VM_OBJECT_NULL;
+ vm_object_deallocate(object);
+ entry->is_sub_map = TRUE;
+ vm_map_reference(entry->object.sub_map = submap);
+ result = KERN_SUCCESS;
+ }
+ vm_map_unlock(map);
+
+ return(result);
+}
+
+/*
+ * vm_map_protect:
+ *
+ * Sets the protection of the specified address
+ * region in the target map. If "set_max" is
+ * specified, the maximum protection is to be set;
+ * otherwise, only the current protection is affected.
+ */
+kern_return_t vm_map_protect(map, start, end, new_prot, set_max)
+ register vm_map_t map;
+ register vm_offset_t start;
+ register vm_offset_t end;
+ register vm_prot_t new_prot;
+ register boolean_t set_max;
+{
+ register vm_map_entry_t current;
+ vm_map_entry_t entry;
+
+ vm_map_lock(map);
+
+ VM_MAP_RANGE_CHECK(map, start, end);
+
+ if (vm_map_lookup_entry(map, start, &entry)) {
+ vm_map_clip_start(map, entry, start);
+ }
+ else
+ entry = entry->vme_next;
+
+ /*
+ * Make a first pass to check for protection
+ * violations.
+ */
+
+ current = entry;
+ while ((current != vm_map_to_entry(map)) &&
+ (current->vme_start < end)) {
+
+ if (current->is_sub_map) {
+ vm_map_unlock(map);
+ return(KERN_INVALID_ARGUMENT);
+ }
+ if ((new_prot & (VM_PROT_NOTIFY | current->max_protection))
+ != new_prot) {
+ vm_map_unlock(map);
+ return(KERN_PROTECTION_FAILURE);
+ }
+
+ current = current->vme_next;
+ }
+
+ /*
+ * Go back and fix up protections.
+ * [Note that clipping is not necessary the second time.]
+ */
+
+ current = entry;
+
+ while ((current != vm_map_to_entry(map)) &&
+ (current->vme_start < end)) {
+
+ vm_prot_t old_prot;
+
+ vm_map_clip_end(map, current, end);
+
+ old_prot = current->protection;
+ if (set_max)
+ current->protection =
+ (current->max_protection = new_prot) &
+ old_prot;
+ else
+ current->protection = new_prot;
+
+ /*
+ * Update physical map if necessary.
+ */
+
+ if (current->protection != old_prot) {
+ pmap_protect(map->pmap, current->vme_start,
+ current->vme_end,
+ current->protection);
+ }
+ current = current->vme_next;
+ }
+
+ vm_map_unlock(map);
+ return(KERN_SUCCESS);
+}
+
+/*
+ * vm_map_inherit:
+ *
+ * Sets the inheritance of the specified address
+ * range in the target map. Inheritance
+ * affects how the map will be shared with
+ * child maps at the time of vm_map_fork.
+ */
+kern_return_t vm_map_inherit(map, start, end, new_inheritance)
+ register vm_map_t map;
+ register vm_offset_t start;
+ register vm_offset_t end;
+ register vm_inherit_t new_inheritance;
+{
+ register vm_map_entry_t entry;
+ vm_map_entry_t temp_entry;
+
+ vm_map_lock(map);
+
+ VM_MAP_RANGE_CHECK(map, start, end);
+
+ if (vm_map_lookup_entry(map, start, &temp_entry)) {
+ entry = temp_entry;
+ vm_map_clip_start(map, entry, start);
+ }
+ else
+ entry = temp_entry->vme_next;
+
+ while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
+ vm_map_clip_end(map, entry, end);
+
+ entry->inheritance = new_inheritance;
+
+ entry = entry->vme_next;
+ }
+
+ vm_map_unlock(map);
+ return(KERN_SUCCESS);
+}
+
+/*
+ * vm_map_pageable_common:
+ *
+ * Sets the pageability of the specified address
+ * range in the target map. Regions specified
+ * as not pageable require locked-down physical
+ * memory and physical page maps. access_type indicates
+ * types of accesses that must not generate page faults.
+ * This is checked against protection of memory being locked-down.
+ * access_type of VM_PROT_NONE makes memory pageable.
+ *
+ * The map must not be locked, but a reference
+ * must remain to the map throughout the call.
+ *
+ * Callers should use macros in vm/vm_map.h (i.e. vm_map_pageable,
+ * or vm_map_pageable_user); don't call vm_map_pageable directly.
+ */
+kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
+ register vm_map_t map;
+ register vm_offset_t start;
+ register vm_offset_t end;
+ register vm_prot_t access_type;
+ boolean_t user_wire;
+{
+ register vm_map_entry_t entry;
+ vm_map_entry_t start_entry;
+
+ vm_map_lock(map);
+
+ VM_MAP_RANGE_CHECK(map, start, end);
+
+ if (vm_map_lookup_entry(map, start, &start_entry)) {
+ entry = start_entry;
+ /*
+ * vm_map_clip_start will be done later.
+ */
+ }
+ else {
+ /*
+ * Start address is not in map; this is fatal.
+ */
+ vm_map_unlock(map);
+ return(KERN_FAILURE);
+ }
+
+ /*
+ * Actions are rather different for wiring and unwiring,
+ * so we have two separate cases.
+ */
+
+ if (access_type == VM_PROT_NONE) {
+
+ vm_map_clip_start(map, entry, start);
+
+ /*
+ * Unwiring. First ensure that the range to be
+ * unwired is really wired down.
+ */
+ while ((entry != vm_map_to_entry(map)) &&
+ (entry->vme_start < end)) {
+
+ if ((entry->wired_count == 0) ||
+ ((entry->vme_end < end) &&
+ ((entry->vme_next == vm_map_to_entry(map)) ||
+ (entry->vme_next->vme_start > entry->vme_end))) ||
+ (user_wire && (entry->user_wired_count == 0))) {
+ vm_map_unlock(map);
+ return(KERN_INVALID_ARGUMENT);
+ }
+ entry = entry->vme_next;
+ }
+
+ /*
+ * Now decrement the wiring count for each region.
+ * If a region becomes completely unwired,
+ * unwire its physical pages and mappings.
+ */
+ entry = start_entry;
+ while ((entry != vm_map_to_entry(map)) &&
+ (entry->vme_start < end)) {
+ vm_map_clip_end(map, entry, end);
+
+ if (user_wire) {
+ if (--(entry->user_wired_count) == 0)
+ entry->wired_count--;
+ }
+ else {
+ entry->wired_count--;
+ }
+
+ if (entry->wired_count == 0)
+ vm_fault_unwire(map, entry);
+
+ entry = entry->vme_next;
+ }
+ }
+
+ else {
+ /*
+ * Wiring. We must do this in two passes:
+ *
+ * 1. Holding the write lock, we create any shadow
+ * or zero-fill objects that need to be created.
+ * Then we clip each map entry to the region to be
+ * wired and increment its wiring count. We
+ * create objects before clipping the map entries
+ * to avoid object proliferation.
+ *
+ * 2. We downgrade to a read lock, and call
+ * vm_fault_wire to fault in the pages for any
+ * newly wired area (wired_count is 1).
+ *
+ * Downgrading to a read lock for vm_fault_wire avoids
+ * a possible deadlock with another thread that may have
+ * faulted on one of the pages to be wired (it would mark
+ * the page busy, blocking us, then in turn block on the
+ * map lock that we hold). Because of problems in the
+ * recursive lock package, we cannot upgrade to a write
+ * lock in vm_map_lookup. Thus, any actions that require
+ * the write lock must be done beforehand. Because we
+ * keep the read lock on the map, the copy-on-write
+ * status of the entries we modify here cannot change.
+ */
+
+ /*
+ * Pass 1.
+ */
+ while ((entry != vm_map_to_entry(map)) &&
+ (entry->vme_start < end)) {
+ vm_map_clip_end(map, entry, end);
+
+ if (entry->wired_count == 0) {
+
+ /*
+ * Perform actions of vm_map_lookup that need
+ * the write lock on the map: create a shadow
+ * object for a copy-on-write region, or an
+ * object for a zero-fill region.
+ */
+ if (entry->needs_copy &&
+ ((entry->protection & VM_PROT_WRITE) != 0)) {
+
+ vm_object_shadow(&entry->object.vm_object,
+ &entry->offset,
+ (vm_size_t)(entry->vme_end
+ - entry->vme_start));
+ entry->needs_copy = FALSE;
+ }
+ if (entry->object.vm_object == VM_OBJECT_NULL) {
+ entry->object.vm_object =
+ vm_object_allocate(
+ (vm_size_t)(entry->vme_end
+ - entry->vme_start));
+ entry->offset = (vm_offset_t)0;
+ }
+ }
+ vm_map_clip_start(map, entry, start);
+ vm_map_clip_end(map, entry, end);
+
+ if (user_wire) {
+ if ((entry->user_wired_count)++ == 0)
+ entry->wired_count++;
+ }
+ else {
+ entry->wired_count++;
+ }
+
+ /*
+ * Check for holes and protection mismatch.
+ * Holes: Next entry should be contiguous unless
+ * this is the end of the region.
+ * Protection: Access requested must be allowed.
+ */
+ if (((entry->vme_end < end) &&
+ ((entry->vme_next == vm_map_to_entry(map)) ||
+ (entry->vme_next->vme_start > entry->vme_end))) ||
+ ((entry->protection & access_type) != access_type)) {
+ /*
+ * Found a hole or protection problem.
+ * Object creation actions
+ * do not need to be undone, but the
+ * wired counts need to be restored.
+ */
+ while ((entry != vm_map_to_entry(map)) &&
+ (entry->vme_end > start)) {
+ if (user_wire) {
+ if (--(entry->user_wired_count) == 0)
+ entry->wired_count--;
+ }
+ else {
+ entry->wired_count--;
+ }
+
+ entry = entry->vme_prev;
+ }
+
+ vm_map_unlock(map);
+ return(KERN_FAILURE);
+ }
+ entry = entry->vme_next;
+ }
+
+ /*
+ * Pass 2.
+ */
+
+ /*
+ * HACK HACK HACK HACK
+ *
+ * If we are wiring in the kernel map or a submap of it,
+ * unlock the map to avoid deadlocks. We trust that the
+ * kernel threads are well-behaved, and therefore will
+ * not do anything destructive to this region of the map
+ * while we have it unlocked. We cannot trust user threads
+ * to do the same.
+ *
+ * HACK HACK HACK HACK
+ */
+ if (vm_map_pmap(map) == kernel_pmap) {
+ vm_map_unlock(map); /* trust me ... */
+ }
+ else {
+ vm_map_lock_set_recursive(map);
+ vm_map_lock_write_to_read(map);
+ }
+
+ entry = start_entry;
+ while (entry != vm_map_to_entry(map) &&
+ entry->vme_start < end) {
+ /*
+ * Wiring cases:
+ * Kernel: wired == 1 && user_wired == 0
+ * User: wired == 1 && user_wired == 1
+ *
+ * Don't need to wire if either is > 1. wired = 0 &&
+ * user_wired == 1 can't happen.
+ */
+
+ /*
+ * XXX This assumes that the faults always succeed.
+ */
+ if ((entry->wired_count == 1) &&
+ (entry->user_wired_count <= 1)) {
+ vm_fault_wire(map, entry);
+ }
+ entry = entry->vme_next;
+ }
+
+ if (vm_map_pmap(map) == kernel_pmap) {
+ vm_map_lock(map);
+ }
+ else {
+ vm_map_lock_clear_recursive(map);
+ }
+ }
+
+ vm_map_unlock(map);
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * vm_map_entry_delete: [ internal use only ]
+ *
+ * Deallocate the given entry from the target map.
+ */
+void vm_map_entry_delete(map, entry)
+ register vm_map_t map;
+ register vm_map_entry_t entry;
+{
+ register vm_offset_t s, e;
+ register vm_object_t object;
+ extern vm_object_t kernel_object;
+
+ s = entry->vme_start;
+ e = entry->vme_end;
+
+ /*Check if projected buffer*/
+ if (map != kernel_map && entry->projected_on != 0) {
+ /*Check if projected kernel entry is persistent;
+ may only manipulate directly if it is*/
+ if (entry->projected_on->projected_on == 0)
+ entry->wired_count = 0; /*Avoid unwire fault*/
+ else
+ return;
+ }
+
+ /*
+ * Get the object. Null objects cannot have pmap entries.
+ */
+
+ if ((object = entry->object.vm_object) != VM_OBJECT_NULL) {
+
+ /*
+ * Unwire before removing addresses from the pmap;
+ * otherwise, unwiring will put the entries back in
+ * the pmap.
+ */
+
+ if (entry->wired_count != 0) {
+ vm_fault_unwire(map, entry);
+ entry->wired_count = 0;
+ entry->user_wired_count = 0;
+ }
+
+ /*
+ * If the object is shared, we must remove
+ * *all* references to this data, since we can't
+ * find all of the physical maps which are sharing
+ * it.
+ */
+
+ if (object == kernel_object) {
+ vm_object_lock(object);
+ vm_object_page_remove(object, entry->offset,
+ entry->offset + (e - s));
+ vm_object_unlock(object);
+ } else if (entry->is_shared) {
+ vm_object_pmap_remove(object,
+ entry->offset,
+ entry->offset + (e - s));
+ }
+ else {
+ pmap_remove(map->pmap, s, e);
+ }
+ }
+
+ /*
+ * Deallocate the object only after removing all
+ * pmap entries pointing to its pages.
+ */
+
+ if (entry->is_sub_map)
+ vm_map_deallocate(entry->object.sub_map);
+ else
+ vm_object_deallocate(entry->object.vm_object);
+
+ vm_map_entry_unlink(map, entry);
+ map->size -= e - s;
+
+ vm_map_entry_dispose(map, entry);
+}
+
+/*
+ * vm_map_delete: [ internal use only ]
+ *
+ * Deallocates the given address range from the target
+ * map.
+ */
+
+kern_return_t vm_map_delete(map, start, end)
+ register vm_map_t map;
+ register vm_offset_t start;
+ register vm_offset_t end;
+{
+ vm_map_entry_t entry;
+ vm_map_entry_t first_entry;
+
+ /*
+ * Find the start of the region, and clip it
+ */
+
+ if (!vm_map_lookup_entry(map, start, &first_entry))
+ entry = first_entry->vme_next;
+ else {
+ entry = first_entry;
+#if NORMA_IPC_xxx
+ /*
+ * XXX Had to disable this code because:
+
+ _vm_map_delete(c0804b78,c2198000,c219a000,0,c219a000)+df
+ [vm/vm_map.c:2007]
+ _vm_map_remove(c0804b78,c2198000,c219a000,c0817834,
+ c081786c)+42 [vm/vm_map.c:2094]
+ _kmem_io_map_deallocate(c0804b78,c2198000,2000,c0817834,
+ c081786c)+43 [vm/vm_kern.c:818]
+ _device_write_dealloc(c081786c)+117 [device/ds_routines.c:814]
+ _ds_write_done(c081786c,0)+2e [device/ds_routines.c:848]
+ _io_done_thread_continue(c08150c0,c21d4e14,c21d4e30,c08150c0,
+ c080c114)+14 [device/ds_routines.c:1350]
+
+ */
+ if (start > entry->vme_start
+ && end == entry->vme_end
+ && ! entry->wired_count /* XXX ??? */
+ && ! entry->is_shared
+ && ! entry->projected_on
+ && ! entry->is_sub_map) {
+ extern vm_object_t kernel_object;
+ register vm_object_t object = entry->object.vm_object;
+
+ /*
+ * The region to be deleted lives at the end
+ * of this entry, and thus all we have to do is
+ * truncate the entry.
+ *
+ * This special case is necessary if we want
+ * coalescing to do us any good.
+ *
+ * XXX Do we have to adjust object size?
+ */
+ if (object == kernel_object) {
+ vm_object_lock(object);
+ vm_object_page_remove(object,
+ entry->offset + start,
+ entry->offset +
+ (end - start));
+ vm_object_unlock(object);
+ } else if (entry->is_shared) {
+ vm_object_pmap_remove(object,
+ entry->offset + start,
+ entry->offset +
+ (end - start));
+ } else {
+ pmap_remove(map->pmap, start, end);
+ }
+ object->size -= (end - start); /* XXX */
+
+ entry->vme_end = start;
+ map->size -= (end - start);
+
+ if (map->wait_for_space) {
+ thread_wakeup((event_t) map);
+ }
+ return KERN_SUCCESS;
+ }
+#endif NORMA_IPC
+ vm_map_clip_start(map, entry, start);
+
+ /*
+ * Fix the lookup hint now, rather than each
+ * time though the loop.
+ */
+
+ SAVE_HINT(map, entry->vme_prev);
+ }
+
+ /*
+ * Save the free space hint
+ */
+
+ if (map->first_free->vme_start >= start)
+ map->first_free = entry->vme_prev;
+
+ /*
+ * Step through all entries in this region
+ */
+
+ while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
+ vm_map_entry_t next;
+
+ vm_map_clip_end(map, entry, end);
+
+ /*
+ * If the entry is in transition, we must wait
+ * for it to exit that state. It could be clipped
+ * while we leave the map unlocked.
+ */
+ if(entry->in_transition) {
+ /*
+ * Say that we are waiting, and wait for entry.
+ */
+ entry->needs_wakeup = TRUE;
+ vm_map_entry_wait(map, FALSE);
+ vm_map_lock(map);
+
+ /*
+ * The entry could have been clipped or it
+ * may not exist anymore. look it up again.
+ */
+ if(!vm_map_lookup_entry(map, start, &entry)) {
+ entry = entry->vme_next;
+ }
+ continue;
+ }
+
+ next = entry->vme_next;
+
+ vm_map_entry_delete(map, entry);
+ entry = next;
+ }
+
+ if (map->wait_for_space)
+ thread_wakeup((event_t) map);
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * vm_map_remove:
+ *
+ * Remove the given address range from the target map.
+ * This is the exported form of vm_map_delete.
+ */
+kern_return_t vm_map_remove(map, start, end)
+ register vm_map_t map;
+ register vm_offset_t start;
+ register vm_offset_t end;
+{
+ register kern_return_t result;
+
+ vm_map_lock(map);
+ VM_MAP_RANGE_CHECK(map, start, end);
+ result = vm_map_delete(map, start, end);
+ vm_map_unlock(map);
+
+ return(result);
+}
+
+
+/*
+ * vm_map_copy_steal_pages:
+ *
+ * Steal all the pages from a vm_map_copy page_list by copying ones
+ * that have not already been stolen.
+ */
+void
+vm_map_copy_steal_pages(copy)
+vm_map_copy_t copy;
+{
+ register vm_page_t m, new_m;
+ register int i;
+ vm_object_t object;
+
+ for (i = 0; i < copy->cpy_npages; i++) {
+
+ /*
+ * If the page is not tabled, then it's already stolen.
+ */
+ m = copy->cpy_page_list[i];
+ if (!m->tabled)
+ continue;
+
+ /*
+ * Page was not stolen, get a new
+ * one and do the copy now.
+ */
+ while ((new_m = vm_page_grab()) == VM_PAGE_NULL) {
+ VM_PAGE_WAIT((void(*)()) 0);
+ }
+
+ vm_page_copy(m, new_m);
+
+ object = m->object;
+ vm_object_lock(object);
+ vm_page_lock_queues();
+ if (!m->active && !m->inactive)
+ vm_page_activate(m);
+ vm_page_unlock_queues();
+ PAGE_WAKEUP_DONE(m);
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ copy->cpy_page_list[i] = new_m;
+ }
+}
+
+/*
+ * vm_map_copy_page_discard:
+ *
+ * Get rid of the pages in a page_list copy. If the pages are
+ * stolen, they are freed. If the pages are not stolen, they
+ * are unbusied, and associated state is cleaned up.
+ */
+void vm_map_copy_page_discard(copy)
+vm_map_copy_t copy;
+{
+ while (copy->cpy_npages > 0) {
+ vm_page_t m;
+
+ if((m = copy->cpy_page_list[--(copy->cpy_npages)]) !=
+ VM_PAGE_NULL) {
+
+ /*
+ * If it's not in the table, then it's
+ * a stolen page that goes back
+ * to the free list. Else it belongs
+ * to some object, and we hold a
+ * paging reference on that object.
+ */
+ if (!m->tabled) {
+ VM_PAGE_FREE(m);
+ }
+ else {
+ vm_object_t object;
+
+ object = m->object;
+
+ vm_object_lock(object);
+ vm_page_lock_queues();
+ if (!m->active && !m->inactive)
+ vm_page_activate(m);
+ vm_page_unlock_queues();
+
+ PAGE_WAKEUP_DONE(m);
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+ }
+ }
+ }
+}
+
+/*
+ * Routine: vm_map_copy_discard
+ *
+ * Description:
+ * Dispose of a map copy object (returned by
+ * vm_map_copyin).
+ */
+void
+vm_map_copy_discard(copy)
+ vm_map_copy_t copy;
+{
+free_next_copy:
+ if (copy == VM_MAP_COPY_NULL)
+ return;
+
+ switch (copy->type) {
+ case VM_MAP_COPY_ENTRY_LIST:
+ while (vm_map_copy_first_entry(copy) !=
+ vm_map_copy_to_entry(copy)) {
+ vm_map_entry_t entry = vm_map_copy_first_entry(copy);
+
+ vm_map_copy_entry_unlink(copy, entry);
+ vm_object_deallocate(entry->object.vm_object);
+ vm_map_copy_entry_dispose(copy, entry);
+ }
+ break;
+ case VM_MAP_COPY_OBJECT:
+ vm_object_deallocate(copy->cpy_object);
+ break;
+ case VM_MAP_COPY_PAGE_LIST:
+
+ /*
+ * To clean this up, we have to unbusy all the pages
+ * and release the paging references in their objects.
+ */
+ if (copy->cpy_npages > 0)
+ vm_map_copy_page_discard(copy);
+
+ /*
+ * If there's a continuation, abort it. The
+ * abort routine releases any storage.
+ */
+ if (vm_map_copy_has_cont(copy)) {
+
+ /*
+ * Special case: recognize
+ * vm_map_copy_discard_cont and optimize
+ * here to avoid tail recursion.
+ */
+ if (copy->cpy_cont == vm_map_copy_discard_cont) {
+ register vm_map_copy_t new_copy;
+
+ new_copy = (vm_map_copy_t) copy->cpy_cont_args;
+ zfree(vm_map_copy_zone, (vm_offset_t) copy);
+ copy = new_copy;
+ goto free_next_copy;
+ }
+ else {
+ vm_map_copy_abort_cont(copy);
+ }
+ }
+
+ break;
+ }
+ zfree(vm_map_copy_zone, (vm_offset_t) copy);
+}
+
+/*
+ * Routine: vm_map_copy_copy
+ *
+ * Description:
+ * Move the information in a map copy object to
+ * a new map copy object, leaving the old one
+ * empty.
+ *
+ * This is used by kernel routines that need
+ * to look at out-of-line data (in copyin form)
+ * before deciding whether to return SUCCESS.
+ * If the routine returns FAILURE, the original
+ * copy object will be deallocated; therefore,
+ * these routines must make a copy of the copy
+ * object and leave the original empty so that
+ * deallocation will not fail.
+ */
+vm_map_copy_t
+vm_map_copy_copy(copy)
+ vm_map_copy_t copy;
+{
+ vm_map_copy_t new_copy;
+
+ if (copy == VM_MAP_COPY_NULL)
+ return VM_MAP_COPY_NULL;
+
+ /*
+ * Allocate a new copy object, and copy the information
+ * from the old one into it.
+ */
+
+ new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ *new_copy = *copy;
+
+ if (copy->type == VM_MAP_COPY_ENTRY_LIST) {
+ /*
+ * The links in the entry chain must be
+ * changed to point to the new copy object.
+ */
+ vm_map_copy_first_entry(copy)->vme_prev
+ = vm_map_copy_to_entry(new_copy);
+ vm_map_copy_last_entry(copy)->vme_next
+ = vm_map_copy_to_entry(new_copy);
+ }
+
+ /*
+ * Change the old copy object into one that contains
+ * nothing to be deallocated.
+ */
+ copy->type = VM_MAP_COPY_OBJECT;
+ copy->cpy_object = VM_OBJECT_NULL;
+
+ /*
+ * Return the new object.
+ */
+ return new_copy;
+}
+
+/*
+ * Routine: vm_map_copy_discard_cont
+ *
+ * Description:
+ * A version of vm_map_copy_discard that can be called
+ * as a continuation from a vm_map_copy page list.
+ */
+kern_return_t vm_map_copy_discard_cont(cont_args, copy_result)
+vm_map_copyin_args_t cont_args;
+vm_map_copy_t *copy_result; /* OUT */
+{
+ vm_map_copy_discard((vm_map_copy_t) cont_args);
+ if (copy_result != (vm_map_copy_t *)0)
+ *copy_result = VM_MAP_COPY_NULL;
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Routine: vm_map_copy_overwrite
+ *
+ * Description:
+ * Copy the memory described by the map copy
+ * object (copy; returned by vm_map_copyin) onto
+ * the specified destination region (dst_map, dst_addr).
+ * The destination must be writeable.
+ *
+ * Unlike vm_map_copyout, this routine actually
+ * writes over previously-mapped memory. If the
+ * previous mapping was to a permanent (user-supplied)
+ * memory object, it is preserved.
+ *
+ * The attributes (protection and inheritance) of the
+ * destination region are preserved.
+ *
+ * If successful, consumes the copy object.
+ * Otherwise, the caller is responsible for it.
+ *
+ * Implementation notes:
+ * To overwrite temporary virtual memory, it is
+ * sufficient to remove the previous mapping and insert
+ * the new copy. This replacement is done either on
+ * the whole region (if no permanent virtual memory
+ * objects are embedded in the destination region) or
+ * in individual map entries.
+ *
+ * To overwrite permanent virtual memory, it is
+ * necessary to copy each page, as the external
+ * memory management interface currently does not
+ * provide any optimizations.
+ *
+ * Once a page of permanent memory has been overwritten,
+ * it is impossible to interrupt this function; otherwise,
+ * the call would be neither atomic nor location-independent.
+ * The kernel-state portion of a user thread must be
+ * interruptible.
+ *
+ * It may be expensive to forward all requests that might
+ * overwrite permanent memory (vm_write, vm_copy) to
+ * uninterruptible kernel threads. This routine may be
+ * called by interruptible threads; however, success is
+ * not guaranteed -- if the request cannot be performed
+ * atomically and interruptibly, an error indication is
+ * returned.
+ */
+kern_return_t vm_map_copy_overwrite(dst_map, dst_addr, copy, interruptible)
+ vm_map_t dst_map;
+ vm_offset_t dst_addr;
+ vm_map_copy_t copy;
+ boolean_t interruptible;
+{
+ vm_size_t size;
+ vm_offset_t start;
+ vm_map_entry_t tmp_entry;
+ vm_map_entry_t entry;
+
+ boolean_t contains_permanent_objects = FALSE;
+
+ interruptible = FALSE; /* XXX */
+
+ /*
+ * Check for null copy object.
+ */
+
+ if (copy == VM_MAP_COPY_NULL)
+ return(KERN_SUCCESS);
+
+ /*
+ * Only works for entry lists at the moment. Will
+ * support page lists LATER.
+ */
+
+#if NORMA_IPC
+ vm_map_convert_from_page_list(copy);
+#else
+ assert(copy->type == VM_MAP_COPY_ENTRY_LIST);
+#endif
+
+ /*
+ * Currently this routine only handles page-aligned
+ * regions. Eventually, it should handle misalignments
+ * by actually copying pages.
+ */
+
+ if (!page_aligned(copy->offset) ||
+ !page_aligned(copy->size) ||
+ !page_aligned(dst_addr))
+ return(KERN_INVALID_ARGUMENT);
+
+ size = copy->size;
+
+ if (size == 0) {
+ vm_map_copy_discard(copy);
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * Verify that the destination is all writeable
+ * initially.
+ */
+start_pass_1:
+ vm_map_lock(dst_map);
+ if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ vm_map_clip_start(dst_map, tmp_entry, dst_addr);
+ for (entry = tmp_entry;;) {
+ vm_size_t sub_size = (entry->vme_end - entry->vme_start);
+ vm_map_entry_t next = entry->vme_next;
+
+ if ( ! (entry->protection & VM_PROT_WRITE)) {
+ vm_map_unlock(dst_map);
+ return(KERN_PROTECTION_FAILURE);
+ }
+
+ /*
+ * If the entry is in transition, we must wait
+ * for it to exit that state. Anything could happen
+ * when we unlock the map, so start over.
+ */
+ if (entry->in_transition) {
+
+ /*
+ * Say that we are waiting, and wait for entry.
+ */
+ entry->needs_wakeup = TRUE;
+ vm_map_entry_wait(dst_map, FALSE);
+
+ goto start_pass_1;
+ }
+
+ if (size <= sub_size)
+ break;
+
+ if ((next == vm_map_to_entry(dst_map)) ||
+ (next->vme_start != entry->vme_end)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+
+
+ /*
+ * Check for permanent objects in the destination.
+ */
+
+ if ((entry->object.vm_object != VM_OBJECT_NULL) &&
+ !entry->object.vm_object->temporary)
+ contains_permanent_objects = TRUE;
+
+ size -= sub_size;
+ entry = next;
+ }
+
+ /*
+ * If there are permanent objects in the destination, then
+ * the copy cannot be interrupted.
+ */
+
+ if (interruptible && contains_permanent_objects)
+ return(KERN_FAILURE); /* XXX */
+
+ /*
+ * XXXO If there are no permanent objects in the destination,
+ * XXXO and the source and destination map entry zones match,
+ * XXXO and the destination map entry is not shared,
+ * XXXO then the map entries can be deleted and replaced
+ * XXXO with those from the copy. The following code is the
+ * XXXO basic idea of what to do, but there are lots of annoying
+ * XXXO little details about getting protection and inheritance
+ * XXXO right. Should add protection, inheritance, and sharing checks
+ * XXXO to the above pass and make sure that no wiring is involved.
+ */
+/*
+ * if (!contains_permanent_objects &&
+ * copy->cpy_hdr.entries_pageable == dst_map->hdr.entries_pageable) {
+ *
+ * *
+ * * Run over copy and adjust entries. Steal code
+ * * from vm_map_copyout() to do this.
+ * *
+ *
+ * tmp_entry = tmp_entry->vme_prev;
+ * vm_map_delete(dst_map, dst_addr, dst_addr + copy->size);
+ * vm_map_copy_insert(dst_map, tmp_entry, copy);
+ *
+ * vm_map_unlock(dst_map);
+ * vm_map_copy_discard(copy);
+ * }
+ */
+ /*
+ *
+ * Make a second pass, overwriting the data
+ * At the beginning of each loop iteration,
+ * the next entry to be overwritten is "tmp_entry"
+ * (initially, the value returned from the lookup above),
+ * and the starting address expected in that entry
+ * is "start".
+ */
+
+ start = dst_addr;
+
+ while (vm_map_copy_first_entry(copy) != vm_map_copy_to_entry(copy)) {
+ vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy);
+ vm_size_t copy_size = (copy_entry->vme_end - copy_entry->vme_start);
+ vm_object_t object;
+
+ entry = tmp_entry;
+ size = (entry->vme_end - entry->vme_start);
+ /*
+ * Make sure that no holes popped up in the
+ * address map, and that the protection is
+ * still valid, in case the map was unlocked
+ * earlier.
+ */
+
+ if (entry->vme_start != start) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ assert(entry != vm_map_to_entry(dst_map));
+
+ /*
+ * Check protection again
+ */
+
+ if ( ! (entry->protection & VM_PROT_WRITE)) {
+ vm_map_unlock(dst_map);
+ return(KERN_PROTECTION_FAILURE);
+ }
+
+ /*
+ * Adjust to source size first
+ */
+
+ if (copy_size < size) {
+ vm_map_clip_end(dst_map, entry, entry->vme_start + copy_size);
+ size = copy_size;
+ }
+
+ /*
+ * Adjust to destination size
+ */
+
+ if (size < copy_size) {
+ vm_map_copy_clip_end(copy, copy_entry,
+ copy_entry->vme_start + size);
+ copy_size = size;
+ }
+
+ assert((entry->vme_end - entry->vme_start) == size);
+ assert((tmp_entry->vme_end - tmp_entry->vme_start) == size);
+ assert((copy_entry->vme_end - copy_entry->vme_start) == size);
+
+ /*
+ * If the destination contains temporary unshared memory,
+ * we can perform the copy by throwing it away and
+ * installing the source data.
+ */
+
+ object = entry->object.vm_object;
+ if (!entry->is_shared &&
+ ((object == VM_OBJECT_NULL) || object->temporary)) {
+ vm_object_t old_object = entry->object.vm_object;
+ vm_offset_t old_offset = entry->offset;
+
+ entry->object = copy_entry->object;
+ entry->offset = copy_entry->offset;
+ entry->needs_copy = copy_entry->needs_copy;
+ entry->wired_count = 0;
+ entry->user_wired_count = 0;
+
+ vm_map_copy_entry_unlink(copy, copy_entry);
+ vm_map_copy_entry_dispose(copy, copy_entry);
+
+ vm_object_pmap_protect(
+ old_object,
+ old_offset,
+ size,
+ dst_map->pmap,
+ tmp_entry->vme_start,
+ VM_PROT_NONE);
+
+ vm_object_deallocate(old_object);
+
+ /*
+ * Set up for the next iteration. The map
+ * has not been unlocked, so the next
+ * address should be at the end of this
+ * entry, and the next map entry should be
+ * the one following it.
+ */
+
+ start = tmp_entry->vme_end;
+ tmp_entry = tmp_entry->vme_next;
+ } else {
+ vm_map_version_t version;
+ vm_object_t dst_object = entry->object.vm_object;
+ vm_offset_t dst_offset = entry->offset;
+ kern_return_t r;
+
+ /*
+ * Take an object reference, and record
+ * the map version information so that the
+ * map can be safely unlocked.
+ */
+
+ vm_object_reference(dst_object);
+
+ version.main_timestamp = dst_map->timestamp;
+
+ vm_map_unlock(dst_map);
+
+ /*
+ * Copy as much as possible in one pass
+ */
+
+ copy_size = size;
+ r = vm_fault_copy(
+ copy_entry->object.vm_object,
+ copy_entry->offset,
+ &copy_size,
+ dst_object,
+ dst_offset,
+ dst_map,
+ &version,
+ FALSE /* XXX interruptible */ );
+
+ /*
+ * Release the object reference
+ */
+
+ vm_object_deallocate(dst_object);
+
+ /*
+ * If a hard error occurred, return it now
+ */
+
+ if (r != KERN_SUCCESS)
+ return(r);
+
+ if (copy_size != 0) {
+ /*
+ * Dispose of the copied region
+ */
+
+ vm_map_copy_clip_end(copy, copy_entry,
+ copy_entry->vme_start + copy_size);
+ vm_map_copy_entry_unlink(copy, copy_entry);
+ vm_object_deallocate(copy_entry->object.vm_object);
+ vm_map_copy_entry_dispose(copy, copy_entry);
+ }
+
+ /*
+ * Pick up in the destination map where we left off.
+ *
+ * Use the version information to avoid a lookup
+ * in the normal case.
+ */
+
+ start += copy_size;
+ vm_map_lock(dst_map);
+ if ((version.main_timestamp + 1) == dst_map->timestamp) {
+ /* We can safely use saved tmp_entry value */
+
+ vm_map_clip_end(dst_map, tmp_entry, start);
+ tmp_entry = tmp_entry->vme_next;
+ } else {
+ /* Must do lookup of tmp_entry */
+
+ if (!vm_map_lookup_entry(dst_map, start, &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ vm_map_clip_start(dst_map, tmp_entry, start);
+ }
+ }
+
+ }
+ vm_map_unlock(dst_map);
+
+ /*
+ * Throw away the vm_map_copy object
+ */
+ vm_map_copy_discard(copy);
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Macro: vm_map_copy_insert
+ *
+ * Description:
+ * Link a copy chain ("copy") into a map at the
+ * specified location (after "where").
+ * Side effects:
+ * The copy chain is destroyed.
+ * Warning:
+ * The arguments are evaluated multiple times.
+ */
+#define vm_map_copy_insert(map, where, copy) \
+ MACRO_BEGIN \
+ (((where)->vme_next)->vme_prev = vm_map_copy_last_entry(copy)) \
+ ->vme_next = ((where)->vme_next); \
+ ((where)->vme_next = vm_map_copy_first_entry(copy)) \
+ ->vme_prev = (where); \
+ (map)->hdr.nentries += (copy)->cpy_hdr.nentries; \
+ zfree(vm_map_copy_zone, (vm_offset_t) copy); \
+ MACRO_END
+
+/*
+ * Routine: vm_map_copyout
+ *
+ * Description:
+ * Copy out a copy chain ("copy") into newly-allocated
+ * space in the destination map.
+ *
+ * If successful, consumes the copy object.
+ * Otherwise, the caller is responsible for it.
+ */
+kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
+ register
+ vm_map_t dst_map;
+ vm_offset_t *dst_addr; /* OUT */
+ register
+ vm_map_copy_t copy;
+{
+ vm_size_t size;
+ vm_size_t adjustment;
+ vm_offset_t start;
+ vm_offset_t vm_copy_start;
+ vm_map_entry_t last;
+ register
+ vm_map_entry_t entry;
+
+ /*
+ * Check for null copy object.
+ */
+
+ if (copy == VM_MAP_COPY_NULL) {
+ *dst_addr = 0;
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * Check for special copy object, created
+ * by vm_map_copyin_object.
+ */
+
+ if (copy->type == VM_MAP_COPY_OBJECT) {
+ vm_object_t object = copy->cpy_object;
+ vm_size_t offset = copy->offset;
+ vm_size_t tmp_size = copy->size;
+ kern_return_t kr;
+
+ *dst_addr = 0;
+ kr = vm_map_enter(dst_map, dst_addr, tmp_size,
+ (vm_offset_t) 0, TRUE,
+ object, offset, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+ if (kr != KERN_SUCCESS)
+ return(kr);
+ zfree(vm_map_copy_zone, (vm_offset_t) copy);
+ return(KERN_SUCCESS);
+ }
+
+ if (copy->type == VM_MAP_COPY_PAGE_LIST)
+ return(vm_map_copyout_page_list(dst_map, dst_addr, copy));
+
+ /*
+ * Find space for the data
+ */
+
+ vm_copy_start = trunc_page(copy->offset);
+ size = round_page(copy->offset + copy->size) - vm_copy_start;
+
+ StartAgain: ;
+
+ vm_map_lock(dst_map);
+ start = ((last = dst_map->first_free) == vm_map_to_entry(dst_map)) ?
+ vm_map_min(dst_map) : last->vme_end;
+
+ while (TRUE) {
+ vm_map_entry_t next = last->vme_next;
+ vm_offset_t end = start + size;
+
+ if ((end > dst_map->max_offset) || (end < start)) {
+ if (dst_map->wait_for_space) {
+ if (size <= (dst_map->max_offset - dst_map->min_offset)) {
+ assert_wait((event_t) dst_map, TRUE);
+ vm_map_unlock(dst_map);
+ thread_block((void (*)()) 0);
+ goto StartAgain;
+ }
+ }
+ vm_map_unlock(dst_map);
+ return(KERN_NO_SPACE);
+ }
+
+ if ((next == vm_map_to_entry(dst_map)) ||
+ (next->vme_start >= end))
+ break;
+
+ last = next;
+ start = last->vme_end;
+ }
+
+ /*
+ * Since we're going to just drop the map
+ * entries from the copy into the destination
+ * map, they must come from the same pool.
+ */
+
+ if (copy->cpy_hdr.entries_pageable != dst_map->hdr.entries_pageable) {
+ /*
+ * Mismatches occur when dealing with the default
+ * pager.
+ */
+ zone_t old_zone;
+ vm_map_entry_t next, new;
+
+ /*
+ * Find the zone that the copies were allocated from
+ */
+ old_zone = (copy->cpy_hdr.entries_pageable)
+ ? vm_map_entry_zone
+ : vm_map_kentry_zone;
+ entry = vm_map_copy_first_entry(copy);
+
+ /*
+ * Reinitialize the copy so that vm_map_copy_entry_link
+ * will work.
+ */
+ copy->cpy_hdr.nentries = 0;
+ copy->cpy_hdr.entries_pageable = dst_map->hdr.entries_pageable;
+ vm_map_copy_first_entry(copy) =
+ vm_map_copy_last_entry(copy) =
+ vm_map_copy_to_entry(copy);
+
+ /*
+ * Copy each entry.
+ */
+ while (entry != vm_map_copy_to_entry(copy)) {
+ new = vm_map_copy_entry_create(copy);
+ vm_map_entry_copy_full(new, entry);
+ vm_map_copy_entry_link(copy,
+ vm_map_copy_last_entry(copy),
+ new);
+ next = entry->vme_next;
+ zfree(old_zone, (vm_offset_t) entry);
+ entry = next;
+ }
+ }
+
+ /*
+ * Adjust the addresses in the copy chain, and
+ * reset the region attributes.
+ */
+
+ adjustment = start - vm_copy_start;
+ for (entry = vm_map_copy_first_entry(copy);
+ entry != vm_map_copy_to_entry(copy);
+ entry = entry->vme_next) {
+ entry->vme_start += adjustment;
+ entry->vme_end += adjustment;
+
+ entry->inheritance = VM_INHERIT_DEFAULT;
+ entry->protection = VM_PROT_DEFAULT;
+ entry->max_protection = VM_PROT_ALL;
+ entry->projected_on = 0;
+
+ /*
+ * If the entry is now wired,
+ * map the pages into the destination map.
+ */
+ if (entry->wired_count != 0) {
+ register vm_offset_t va;
+ vm_offset_t offset;
+ register vm_object_t object;
+
+ object = entry->object.vm_object;
+ offset = entry->offset;
+ va = entry->vme_start;
+
+ pmap_pageable(dst_map->pmap,
+ entry->vme_start,
+ entry->vme_end,
+ TRUE);
+
+ while (va < entry->vme_end) {
+ register vm_page_t m;
+
+ /*
+ * Look up the page in the object.
+ * Assert that the page will be found in the
+ * top object:
+ * either
+ * the object was newly created by
+ * vm_object_copy_slowly, and has
+ * copies of all of the pages from
+ * the source object
+ * or
+ * the object was moved from the old
+ * map entry; because the old map
+ * entry was wired, all of the pages
+ * were in the top-level object.
+ * (XXX not true if we wire pages for
+ * reading)
+ */
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+
+ m = vm_page_lookup(object, offset);
+ if (m == VM_PAGE_NULL || m->wire_count == 0 ||
+ m->absent)
+ panic("vm_map_copyout: wiring 0x%x", m);
+
+ m->busy = TRUE;
+ vm_object_unlock(object);
+
+ PMAP_ENTER(dst_map->pmap, va, m,
+ entry->protection, TRUE);
+
+ vm_object_lock(object);
+ PAGE_WAKEUP_DONE(m);
+ /* the page is wired, so we don't have to activate */
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ offset += PAGE_SIZE;
+ va += PAGE_SIZE;
+ }
+ }
+
+
+ }
+
+ /*
+ * Correct the page alignment for the result
+ */
+
+ *dst_addr = start + (copy->offset - vm_copy_start);
+
+ /*
+ * Update the hints and the map size
+ */
+
+ if (dst_map->first_free == last)
+ dst_map->first_free = vm_map_copy_last_entry(copy);
+ SAVE_HINT(dst_map, vm_map_copy_last_entry(copy));
+
+ dst_map->size += size;
+
+ /*
+ * Link in the copy
+ */
+
+ vm_map_copy_insert(dst_map, last, copy);
+
+ vm_map_unlock(dst_map);
+
+ /*
+ * XXX If wiring_required, call vm_map_pageable
+ */
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ *
+ * vm_map_copyout_page_list:
+ *
+ * Version of vm_map_copyout() for page list vm map copies.
+ *
+ */
+kern_return_t vm_map_copyout_page_list(dst_map, dst_addr, copy)
+ register
+ vm_map_t dst_map;
+ vm_offset_t *dst_addr; /* OUT */
+ register
+ vm_map_copy_t copy;
+{
+ vm_size_t size;
+ vm_offset_t start;
+ vm_offset_t end;
+ vm_offset_t offset;
+ vm_map_entry_t last;
+ register
+ vm_object_t object;
+ vm_page_t *page_list, m;
+ vm_map_entry_t entry;
+ vm_offset_t old_last_offset;
+ boolean_t cont_invoked, needs_wakeup = FALSE;
+ kern_return_t result = KERN_SUCCESS;
+ vm_map_copy_t orig_copy;
+ vm_offset_t dst_offset;
+ boolean_t must_wire;
+
+ /*
+ * Make sure the pages are stolen, because we are
+ * going to put them in a new object. Assume that
+ * all pages are identical to first in this regard.
+ */
+
+ page_list = &copy->cpy_page_list[0];
+ if ((*page_list)->tabled)
+ vm_map_copy_steal_pages(copy);
+
+ /*
+ * Find space for the data
+ */
+
+ size = round_page(copy->offset + copy->size) -
+ trunc_page(copy->offset);
+StartAgain:
+ vm_map_lock(dst_map);
+ must_wire = dst_map->wiring_required;
+
+ last = dst_map->first_free;
+ if (last == vm_map_to_entry(dst_map)) {
+ start = vm_map_min(dst_map);
+ } else {
+ start = last->vme_end;
+ }
+
+ while (TRUE) {
+ vm_map_entry_t next = last->vme_next;
+ end = start + size;
+
+ if ((end > dst_map->max_offset) || (end < start)) {
+ if (dst_map->wait_for_space) {
+ if (size <= (dst_map->max_offset -
+ dst_map->min_offset)) {
+ assert_wait((event_t) dst_map, TRUE);
+ vm_map_unlock(dst_map);
+ thread_block((void (*)()) 0);
+ goto StartAgain;
+ }
+ }
+ vm_map_unlock(dst_map);
+ return(KERN_NO_SPACE);
+ }
+
+ if ((next == vm_map_to_entry(dst_map)) ||
+ (next->vme_start >= end)) {
+ break;
+ }
+
+ last = next;
+ start = last->vme_end;
+ }
+
+ /*
+ * See whether we can avoid creating a new entry (and object) by
+ * extending one of our neighbors. [So far, we only attempt to
+ * extend from below.]
+ *
+ * The code path below here is a bit twisted. If any of the
+ * extension checks fails, we branch to create_object. If
+ * it all works, we fall out the bottom and goto insert_pages.
+ */
+ if (last == vm_map_to_entry(dst_map) ||
+ last->vme_end != start ||
+ last->is_shared != FALSE ||
+ last->is_sub_map != FALSE ||
+ last->inheritance != VM_INHERIT_DEFAULT ||
+ last->protection != VM_PROT_DEFAULT ||
+ last->max_protection != VM_PROT_ALL ||
+ (must_wire ? (last->wired_count != 1 ||
+ last->user_wired_count != 1) :
+ (last->wired_count != 0))) {
+ goto create_object;
+ }
+
+ /*
+ * If this entry needs an object, make one.
+ */
+ if (last->object.vm_object == VM_OBJECT_NULL) {
+ object = vm_object_allocate(
+ (vm_size_t)(last->vme_end - last->vme_start + size));
+ last->object.vm_object = object;
+ last->offset = 0;
+ vm_object_lock(object);
+ }
+ else {
+ vm_offset_t prev_offset = last->offset;
+ vm_size_t prev_size = start - last->vme_start;
+ vm_size_t new_size;
+
+ /*
+ * This is basically vm_object_coalesce.
+ */
+
+ object = last->object.vm_object;
+ vm_object_lock(object);
+
+ /*
+ * Try to collapse the object first
+ */
+ vm_object_collapse(object);
+
+ /*
+ * Can't coalesce if pages not mapped to
+ * last may be in use anyway:
+ * . more than one reference
+ * . paged out
+ * . shadows another object
+ * . has a copy elsewhere
+ * . paging references (pages might be in page-list)
+ */
+
+ if ((object->ref_count > 1) ||
+ object->pager_created ||
+ (object->shadow != VM_OBJECT_NULL) ||
+ (object->copy != VM_OBJECT_NULL) ||
+ (object->paging_in_progress != 0)) {
+ vm_object_unlock(object);
+ goto create_object;
+ }
+
+ /*
+ * Extend the object if necessary. Don't have to call
+ * vm_object_page_remove because the pages aren't mapped,
+ * and vm_page_replace will free up any old ones it encounters.
+ */
+ new_size = prev_offset + prev_size + size;
+ if (new_size > object->size)
+ object->size = new_size;
+ }
+
+ /*
+ * Coalesced the two objects - can extend
+ * the previous map entry to include the
+ * new range.
+ */
+ dst_map->size += size;
+ last->vme_end = end;
+
+ SAVE_HINT(dst_map, last);
+
+ goto insert_pages;
+
+create_object:
+
+ /*
+ * Create object
+ */
+ object = vm_object_allocate(size);
+
+ /*
+ * Create entry
+ */
+
+ entry = vm_map_entry_create(dst_map);
+
+ entry->object.vm_object = object;
+ entry->offset = 0;
+
+ entry->is_shared = FALSE;
+ entry->is_sub_map = FALSE;
+ entry->needs_copy = FALSE;
+
+ if (must_wire) {
+ entry->wired_count = 1;
+ entry->user_wired_count = 1;
+ } else {
+ entry->wired_count = 0;
+ entry->user_wired_count = 0;
+ }
+
+ entry->in_transition = TRUE;
+ entry->needs_wakeup = FALSE;
+
+ entry->vme_start = start;
+ entry->vme_end = start + size;
+
+ entry->inheritance = VM_INHERIT_DEFAULT;
+ entry->protection = VM_PROT_DEFAULT;
+ entry->max_protection = VM_PROT_ALL;
+ entry->projected_on = 0;
+
+ vm_object_lock(object);
+
+ /*
+ * Update the hints and the map size
+ */
+ if (dst_map->first_free == last) {
+ dst_map->first_free = entry;
+ }
+ SAVE_HINT(dst_map, entry);
+ dst_map->size += size;
+
+ /*
+ * Link in the entry
+ */
+ vm_map_entry_link(dst_map, last, entry);
+ last = entry;
+
+ /*
+ * Transfer pages into new object.
+ * Scan page list in vm_map_copy.
+ */
+insert_pages:
+ dst_offset = copy->offset & PAGE_MASK;
+ cont_invoked = FALSE;
+ orig_copy = copy;
+ last->in_transition = TRUE;
+ old_last_offset = last->offset
+ + (start - last->vme_start);
+
+ vm_page_lock_queues();
+
+ for (offset = 0; offset < size; offset += PAGE_SIZE) {
+ m = *page_list;
+ assert(m && !m->tabled);
+
+ /*
+ * Must clear busy bit in page before inserting it.
+ * Ok to skip wakeup logic because nobody else
+ * can possibly know about this page.
+ * The page is dirty in its new object.
+ */
+
+ assert(!m->wanted);
+
+ m->busy = FALSE;
+ m->dirty = TRUE;
+ vm_page_replace(m, object, old_last_offset + offset);
+ if (must_wire) {
+ vm_page_wire(m);
+ PMAP_ENTER(dst_map->pmap,
+ last->vme_start + m->offset - last->offset,
+ m, last->protection, TRUE);
+ } else {
+ vm_page_activate(m);
+ }
+
+ *page_list++ = VM_PAGE_NULL;
+ if (--(copy->cpy_npages) == 0 &&
+ vm_map_copy_has_cont(copy)) {
+ vm_map_copy_t new_copy;
+
+ /*
+ * Ok to unlock map because entry is
+ * marked in_transition.
+ */
+ cont_invoked = TRUE;
+ vm_page_unlock_queues();
+ vm_object_unlock(object);
+ vm_map_unlock(dst_map);
+ vm_map_copy_invoke_cont(copy, &new_copy, &result);
+
+ if (result == KERN_SUCCESS) {
+
+ /*
+ * If we got back a copy with real pages,
+ * steal them now. Either all of the
+ * pages in the list are tabled or none
+ * of them are; mixtures are not possible.
+ *
+ * Save original copy for consume on
+ * success logic at end of routine.
+ */
+ if (copy != orig_copy)
+ vm_map_copy_discard(copy);
+
+ if ((copy = new_copy) != VM_MAP_COPY_NULL) {
+ page_list = &copy->cpy_page_list[0];
+ if ((*page_list)->tabled)
+ vm_map_copy_steal_pages(copy);
+ }
+ }
+ else {
+ /*
+ * Continuation failed.
+ */
+ vm_map_lock(dst_map);
+ goto error;
+ }
+
+ vm_map_lock(dst_map);
+ vm_object_lock(object);
+ vm_page_lock_queues();
+ }
+ }
+
+ vm_page_unlock_queues();
+ vm_object_unlock(object);
+
+ *dst_addr = start + dst_offset;
+
+ /*
+ * Clear the in transition bits. This is easy if we
+ * didn't have a continuation.
+ */
+error:
+ if (!cont_invoked) {
+ /*
+ * We didn't unlock the map, so nobody could
+ * be waiting.
+ */
+ last->in_transition = FALSE;
+ assert(!last->needs_wakeup);
+ needs_wakeup = FALSE;
+ }
+ else {
+ if (!vm_map_lookup_entry(dst_map, start, &entry))
+ panic("vm_map_copyout_page_list: missing entry");
+
+ /*
+ * Clear transition bit for all constituent entries that
+ * were in the original entry. Also check for waiters.
+ */
+ while((entry != vm_map_to_entry(dst_map)) &&
+ (entry->vme_start < end)) {
+ assert(entry->in_transition);
+ entry->in_transition = FALSE;
+ if(entry->needs_wakeup) {
+ entry->needs_wakeup = FALSE;
+ needs_wakeup = TRUE;
+ }
+ entry = entry->vme_next;
+ }
+ }
+
+ if (result != KERN_SUCCESS)
+ vm_map_delete(dst_map, start, end);
+
+ vm_map_unlock(dst_map);
+
+ if (needs_wakeup)
+ vm_map_entry_wakeup(dst_map);
+
+ /*
+ * Consume on success logic.
+ */
+ if (copy != orig_copy) {
+ zfree(vm_map_copy_zone, (vm_offset_t) copy);
+ }
+ if (result == KERN_SUCCESS) {
+ zfree(vm_map_copy_zone, (vm_offset_t) orig_copy);
+ }
+
+ return(result);
+}
+
+/*
+ * Routine: vm_map_copyin
+ *
+ * Description:
+ * Copy the specified region (src_addr, len) from the
+ * source address space (src_map), possibly removing
+ * the region from the source address space (src_destroy).
+ *
+ * Returns:
+ * A vm_map_copy_t object (copy_result), suitable for
+ * insertion into another address space (using vm_map_copyout),
+ * copying over another address space region (using
+ * vm_map_copy_overwrite). If the copy is unused, it
+ * should be destroyed (using vm_map_copy_discard).
+ *
+ * In/out conditions:
+ * The source map should not be locked on entry.
+ */
+kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
+ vm_map_t src_map;
+ vm_offset_t src_addr;
+ vm_size_t len;
+ boolean_t src_destroy;
+ vm_map_copy_t *copy_result; /* OUT */
+{
+ vm_map_entry_t tmp_entry; /* Result of last map lookup --
+ * in multi-level lookup, this
+ * entry contains the actual
+ * vm_object/offset.
+ */
+
+ vm_offset_t src_start; /* Start of current entry --
+ * where copy is taking place now
+ */
+ vm_offset_t src_end; /* End of entire region to be
+ * copied */
+
+ register
+ vm_map_copy_t copy; /* Resulting copy */
+
+ /*
+ * Check for copies of zero bytes.
+ */
+
+ if (len == 0) {
+ *copy_result = VM_MAP_COPY_NULL;
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * Compute start and end of region
+ */
+
+ src_start = trunc_page(src_addr);
+ src_end = round_page(src_addr + len);
+
+ /*
+ * Check that the end address doesn't overflow
+ */
+
+ if (src_end <= src_start)
+ if ((src_end < src_start) || (src_start != 0))
+ return(KERN_INVALID_ADDRESS);
+
+ /*
+ * Allocate a header element for the list.
+ *
+ * Use the start and end in the header to
+ * remember the endpoints prior to rounding.
+ */
+
+ copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ vm_map_copy_first_entry(copy) =
+ vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy);
+ copy->type = VM_MAP_COPY_ENTRY_LIST;
+ copy->cpy_hdr.nentries = 0;
+ copy->cpy_hdr.entries_pageable = TRUE;
+
+ copy->offset = src_addr;
+ copy->size = len;
+
+#define RETURN(x) \
+ MACRO_BEGIN \
+ vm_map_unlock(src_map); \
+ vm_map_copy_discard(copy); \
+ MACRO_RETURN(x); \
+ MACRO_END
+
+ /*
+ * Find the beginning of the region.
+ */
+
+ vm_map_lock(src_map);
+
+ if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry))
+ RETURN(KERN_INVALID_ADDRESS);
+ vm_map_clip_start(src_map, tmp_entry, src_start);
+
+ /*
+ * Go through entries until we get to the end.
+ */
+
+ while (TRUE) {
+ register
+ vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */
+ vm_size_t src_size; /* Size of source
+ * map entry (in both
+ * maps)
+ */
+
+ register
+ vm_object_t src_object; /* Object to copy */
+ vm_offset_t src_offset;
+
+ boolean_t src_needs_copy; /* Should source map
+ * be made read-only
+ * for copy-on-write?
+ */
+
+ register
+ vm_map_entry_t new_entry; /* Map entry for copy */
+ boolean_t new_entry_needs_copy; /* Will new entry be COW? */
+
+ boolean_t was_wired; /* Was source wired? */
+ vm_map_version_t version; /* Version before locks
+ * dropped to make copy
+ */
+
+ /*
+ * Verify that the region can be read.
+ */
+
+ if (! (src_entry->protection & VM_PROT_READ))
+ RETURN(KERN_PROTECTION_FAILURE);
+
+ /*
+ * Clip against the endpoints of the entire region.
+ */
+
+ vm_map_clip_end(src_map, src_entry, src_end);
+
+ src_size = src_entry->vme_end - src_start;
+ src_object = src_entry->object.vm_object;
+ src_offset = src_entry->offset;
+ was_wired = (src_entry->wired_count != 0);
+
+ /*
+ * Create a new address map entry to
+ * hold the result. Fill in the fields from
+ * the appropriate source entries.
+ */
+
+ new_entry = vm_map_copy_entry_create(copy);
+ vm_map_entry_copy(new_entry, src_entry);
+
+ /*
+ * Attempt non-blocking copy-on-write optimizations.
+ */
+
+ if (src_destroy &&
+ (src_object == VM_OBJECT_NULL ||
+ (src_object->temporary && !src_object->use_shared_copy)))
+ {
+ /*
+ * If we are destroying the source, and the object
+ * is temporary, and not shared writable,
+ * we can move the object reference
+ * from the source to the copy. The copy is
+ * copy-on-write only if the source is.
+ * We make another reference to the object, because
+ * destroying the source entry will deallocate it.
+ */
+ vm_object_reference(src_object);
+
+ /*
+ * Copy is always unwired. vm_map_copy_entry
+ * set its wired count to zero.
+ */
+
+ goto CopySuccessful;
+ }
+
+ if (!was_wired &&
+ vm_object_copy_temporary(
+ &new_entry->object.vm_object,
+ &new_entry->offset,
+ &src_needs_copy,
+ &new_entry_needs_copy)) {
+
+ new_entry->needs_copy = new_entry_needs_copy;
+
+ /*
+ * Handle copy-on-write obligations
+ */
+
+ if (src_needs_copy && !tmp_entry->needs_copy) {
+ vm_object_pmap_protect(
+ src_object,
+ src_offset,
+ src_size,
+ (src_entry->is_shared ? PMAP_NULL
+ : src_map->pmap),
+ src_entry->vme_start,
+ src_entry->protection &
+ ~VM_PROT_WRITE);
+
+ tmp_entry->needs_copy = TRUE;
+ }
+
+ /*
+ * The map has never been unlocked, so it's safe to
+ * move to the next entry rather than doing another
+ * lookup.
+ */
+
+ goto CopySuccessful;
+ }
+
+ new_entry->needs_copy = FALSE;
+
+ /*
+ * Take an object reference, so that we may
+ * release the map lock(s).
+ */
+
+ assert(src_object != VM_OBJECT_NULL);
+ vm_object_reference(src_object);
+
+ /*
+ * Record the timestamp for later verification.
+ * Unlock the map.
+ */
+
+ version.main_timestamp = src_map->timestamp;
+ vm_map_unlock(src_map);
+
+ /*
+ * Perform the copy
+ */
+
+ if (was_wired) {
+ vm_object_lock(src_object);
+ (void) vm_object_copy_slowly(
+ src_object,
+ src_offset,
+ src_size,
+ FALSE,
+ &new_entry->object.vm_object);
+ new_entry->offset = 0;
+ new_entry->needs_copy = FALSE;
+ } else {
+ kern_return_t result;
+
+ result = vm_object_copy_strategically(src_object,
+ src_offset,
+ src_size,
+ &new_entry->object.vm_object,
+ &new_entry->offset,
+ &new_entry_needs_copy);
+
+ new_entry->needs_copy = new_entry_needs_copy;
+
+
+ if (result != KERN_SUCCESS) {
+ vm_map_copy_entry_dispose(copy, new_entry);
+
+ vm_map_lock(src_map);
+ RETURN(result);
+ }
+
+ }
+
+ /*
+ * Throw away the extra reference
+ */
+
+ vm_object_deallocate(src_object);
+
+ /*
+ * Verify that the map has not substantially
+ * changed while the copy was being made.
+ */
+
+ vm_map_lock(src_map); /* Increments timestamp once! */
+
+ if ((version.main_timestamp + 1) == src_map->timestamp)
+ goto CopySuccessful;
+
+ /*
+ * Simple version comparison failed.
+ *
+ * Retry the lookup and verify that the
+ * same object/offset are still present.
+ *
+ * [Note: a memory manager that colludes with
+ * the calling task can detect that we have
+ * cheated. While the map was unlocked, the
+ * mapping could have been changed and restored.]
+ */
+
+ if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) {
+ vm_map_copy_entry_dispose(copy, new_entry);
+ RETURN(KERN_INVALID_ADDRESS);
+ }
+
+ src_entry = tmp_entry;
+ vm_map_clip_start(src_map, src_entry, src_start);
+
+ if ((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE)
+ goto VerificationFailed;
+
+ if (src_entry->vme_end < new_entry->vme_end)
+ src_size = (new_entry->vme_end = src_entry->vme_end) - src_start;
+
+ if ((src_entry->object.vm_object != src_object) ||
+ (src_entry->offset != src_offset) ) {
+
+ /*
+ * Verification failed.
+ *
+ * Start over with this top-level entry.
+ */
+
+ VerificationFailed: ;
+
+ vm_object_deallocate(new_entry->object.vm_object);
+ vm_map_copy_entry_dispose(copy, new_entry);
+ tmp_entry = src_entry;
+ continue;
+ }
+
+ /*
+ * Verification succeeded.
+ */
+
+ CopySuccessful: ;
+
+ /*
+ * Link in the new copy entry.
+ */
+
+ vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy),
+ new_entry);
+
+ /*
+ * Determine whether the entire region
+ * has been copied.
+ */
+ src_start = new_entry->vme_end;
+ if ((src_start >= src_end) && (src_end != 0))
+ break;
+
+ /*
+ * Verify that there are no gaps in the region
+ */
+
+ tmp_entry = src_entry->vme_next;
+ if (tmp_entry->vme_start != src_start)
+ RETURN(KERN_INVALID_ADDRESS);
+ }
+
+ /*
+ * If the source should be destroyed, do it now, since the
+ * copy was successful.
+ */
+ if (src_destroy)
+ (void) vm_map_delete(src_map, trunc_page(src_addr), src_end);
+
+ vm_map_unlock(src_map);
+
+ *copy_result = copy;
+ return(KERN_SUCCESS);
+
+#undef RETURN
+}
+
+/*
+ * vm_map_copyin_object:
+ *
+ * Create a copy object from an object.
+ * Our caller donates an object reference.
+ */
+
+kern_return_t vm_map_copyin_object(object, offset, size, copy_result)
+ vm_object_t object;
+ vm_offset_t offset; /* offset of region in object */
+ vm_size_t size; /* size of region in object */
+ vm_map_copy_t *copy_result; /* OUT */
+{
+ vm_map_copy_t copy; /* Resulting copy */
+
+ /*
+ * We drop the object into a special copy object
+ * that contains the object directly. These copy objects
+ * are distinguished by entries_pageable == FALSE
+ * and null links.
+ */
+
+ copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ vm_map_copy_first_entry(copy) =
+ vm_map_copy_last_entry(copy) = VM_MAP_ENTRY_NULL;
+ copy->type = VM_MAP_COPY_OBJECT;
+ copy->cpy_object = object;
+ copy->offset = offset;
+ copy->size = size;
+
+ *copy_result = copy;
+ return(KERN_SUCCESS);
+}
+
+/*
+ * vm_map_copyin_page_list_cont:
+ *
+ * Continuation routine for vm_map_copyin_page_list.
+ *
+ * If vm_map_copyin_page_list can't fit the entire vm range
+ * into a single page list object, it creates a continuation.
+ * When the target of the operation has used the pages in the
+ * initial page list, it invokes the continuation, which calls
+ * this routine. If an error happens, the continuation is aborted
+ * (abort arg to this routine is TRUE). To avoid deadlocks, the
+ * pages are discarded from the initial page list before invoking
+ * the continuation.
+ *
+ * NOTE: This is not the same sort of continuation used by
+ * the scheduler.
+ */
+
+kern_return_t vm_map_copyin_page_list_cont(cont_args, copy_result)
+vm_map_copyin_args_t cont_args;
+vm_map_copy_t *copy_result; /* OUT */
+{
+ kern_return_t result = 0; /* '=0' to quiet gcc warnings */
+ register boolean_t do_abort, src_destroy, src_destroy_only;
+
+ /*
+ * Check for cases that only require memory destruction.
+ */
+ do_abort = (copy_result == (vm_map_copy_t *) 0);
+ src_destroy = (cont_args->destroy_len != (vm_size_t) 0);
+ src_destroy_only = (cont_args->src_len == (vm_size_t) 0);
+
+ if (do_abort || src_destroy_only) {
+ if (src_destroy)
+ result = vm_map_remove(cont_args->map,
+ cont_args->destroy_addr,
+ cont_args->destroy_addr + cont_args->destroy_len);
+ if (!do_abort)
+ *copy_result = VM_MAP_COPY_NULL;
+ }
+ else {
+ result = vm_map_copyin_page_list(cont_args->map,
+ cont_args->src_addr, cont_args->src_len, src_destroy,
+ cont_args->steal_pages, copy_result, TRUE);
+
+ if (src_destroy && !cont_args->steal_pages &&
+ vm_map_copy_has_cont(*copy_result)) {
+ vm_map_copyin_args_t new_args;
+ /*
+ * Transfer old destroy info.
+ */
+ new_args = (vm_map_copyin_args_t)
+ (*copy_result)->cpy_cont_args;
+ new_args->destroy_addr = cont_args->destroy_addr;
+ new_args->destroy_len = cont_args->destroy_len;
+ }
+ }
+
+ vm_map_deallocate(cont_args->map);
+ kfree((vm_offset_t)cont_args, sizeof(vm_map_copyin_args_data_t));
+
+ return(result);
+}
+
+/*
+ * vm_map_copyin_page_list:
+ *
+ * This is a variant of vm_map_copyin that copies in a list of pages.
+ * If steal_pages is TRUE, the pages are only in the returned list.
+ * If steal_pages is FALSE, the pages are busy and still in their
+ * objects. A continuation may be returned if not all the pages fit:
+ * the recipient of this copy_result must be prepared to deal with it.
+ */
+
+kern_return_t vm_map_copyin_page_list(src_map, src_addr, len, src_destroy,
+ steal_pages, copy_result, is_cont)
+ vm_map_t src_map;
+ vm_offset_t src_addr;
+ vm_size_t len;
+ boolean_t src_destroy;
+ boolean_t steal_pages;
+ vm_map_copy_t *copy_result; /* OUT */
+ boolean_t is_cont;
+{
+ vm_map_entry_t src_entry;
+ vm_page_t m;
+ vm_offset_t src_start;
+ vm_offset_t src_end;
+ vm_size_t src_size;
+ register
+ vm_object_t src_object;
+ register
+ vm_offset_t src_offset;
+ vm_offset_t src_last_offset;
+ register
+ vm_map_copy_t copy; /* Resulting copy */
+ kern_return_t result = KERN_SUCCESS;
+ boolean_t need_map_lookup;
+ vm_map_copyin_args_t cont_args;
+
+ /*
+ * If steal_pages is FALSE, this leaves busy pages in
+ * the object. A continuation must be used if src_destroy
+ * is true in this case (!steal_pages && src_destroy).
+ *
+ * XXX Still have a more general problem of what happens
+ * XXX if the same page occurs twice in a list. Deadlock
+ * XXX can happen if vm_fault_page was called. A
+ * XXX possible solution is to use a continuation if vm_fault_page
+ * XXX is called and we cross a map entry boundary.
+ */
+
+ /*
+ * Check for copies of zero bytes.
+ */
+
+ if (len == 0) {
+ *copy_result = VM_MAP_COPY_NULL;
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * Compute start and end of region
+ */
+
+ src_start = trunc_page(src_addr);
+ src_end = round_page(src_addr + len);
+
+ /*
+ * Check that the end address doesn't overflow
+ */
+
+ if (src_end <= src_start && (src_end < src_start || src_start != 0)) {
+ return KERN_INVALID_ADDRESS;
+ }
+
+ /*
+ * Allocate a header element for the page list.
+ *
+ * Record original offset and size, as caller may not
+ * be page-aligned.
+ */
+
+ copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ copy->type = VM_MAP_COPY_PAGE_LIST;
+ copy->cpy_npages = 0;
+ copy->offset = src_addr;
+ copy->size = len;
+ copy->cpy_cont = ((kern_return_t (*)()) 0);
+ copy->cpy_cont_args = (char *) VM_MAP_COPYIN_ARGS_NULL;
+
+ /*
+ * Find the beginning of the region.
+ */
+
+do_map_lookup:
+
+ vm_map_lock(src_map);
+
+ if (!vm_map_lookup_entry(src_map, src_start, &src_entry)) {
+ result = KERN_INVALID_ADDRESS;
+ goto error;
+ }
+ need_map_lookup = FALSE;
+
+ /*
+ * Go through entries until we get to the end.
+ */
+
+ while (TRUE) {
+
+ if (! (src_entry->protection & VM_PROT_READ)) {
+ result = KERN_PROTECTION_FAILURE;
+ goto error;
+ }
+
+ if (src_end > src_entry->vme_end)
+ src_size = src_entry->vme_end - src_start;
+ else
+ src_size = src_end - src_start;
+
+ src_object = src_entry->object.vm_object;
+ src_offset = src_entry->offset +
+ (src_start - src_entry->vme_start);
+
+ /*
+ * If src_object is NULL, allocate it now;
+ * we're going to fault on it shortly.
+ */
+ if (src_object == VM_OBJECT_NULL) {
+ src_object = vm_object_allocate((vm_size_t)
+ src_entry->vme_end -
+ src_entry->vme_start);
+ src_entry->object.vm_object = src_object;
+ }
+
+ /*
+ * Iterate over pages. Fault in ones that aren't present.
+ */
+ src_last_offset = src_offset + src_size;
+ for (; (src_offset < src_last_offset && !need_map_lookup);
+ src_offset += PAGE_SIZE, src_start += PAGE_SIZE) {
+
+ if (copy->cpy_npages == VM_MAP_COPY_PAGE_LIST_MAX) {
+make_continuation:
+ /*
+ * At this point we have the max number of
+ * pages busy for this thread that we're
+ * willing to allow. Stop here and record
+ * arguments for the remainder. Note:
+ * this means that this routine isn't atomic,
+ * but that's the breaks. Note that only
+ * the first vm_map_copy_t that comes back
+ * from this routine has the right offset
+ * and size; those from continuations are
+ * page rounded, and short by the amount
+ * already done.
+ *
+ * Reset src_end so the src_destroy
+ * code at the bottom doesn't do
+ * something stupid.
+ */
+
+ cont_args = (vm_map_copyin_args_t)
+ kalloc(sizeof(vm_map_copyin_args_data_t));
+ cont_args->map = src_map;
+ vm_map_reference(src_map);
+ cont_args->src_addr = src_start;
+ cont_args->src_len = len - (src_start - src_addr);
+ if (src_destroy) {
+ cont_args->destroy_addr = cont_args->src_addr;
+ cont_args->destroy_len = cont_args->src_len;
+ }
+ else {
+ cont_args->destroy_addr = (vm_offset_t) 0;
+ cont_args->destroy_len = (vm_offset_t) 0;
+ }
+ cont_args->steal_pages = steal_pages;
+
+ copy->cpy_cont_args = (char *) cont_args;
+ copy->cpy_cont = vm_map_copyin_page_list_cont;
+
+ src_end = src_start;
+ vm_map_clip_end(src_map, src_entry, src_end);
+ break;
+ }
+
+ /*
+ * Try to find the page of data.
+ */
+ vm_object_lock(src_object);
+ vm_object_paging_begin(src_object);
+ if (((m = vm_page_lookup(src_object, src_offset)) !=
+ VM_PAGE_NULL) && !m->busy && !m->fictitious &&
+ !m->absent && !m->error) {
+
+ /*
+ * This is the page. Mark it busy
+ * and keep the paging reference on
+ * the object whilst we do our thing.
+ */
+ m->busy = TRUE;
+
+ /*
+ * Also write-protect the page, so
+ * that the map`s owner cannot change
+ * the data. The busy bit will prevent
+ * faults on the page from succeeding
+ * until the copy is released; after
+ * that, the page can be re-entered
+ * as writable, since we didn`t alter
+ * the map entry. This scheme is a
+ * cheap copy-on-write.
+ *
+ * Don`t forget the protection and
+ * the page_lock value!
+ *
+ * If the source is being destroyed
+ * AND not shared writable, we don`t
+ * have to protect the page, since
+ * we will destroy the (only)
+ * writable mapping later.
+ */
+ if (!src_destroy ||
+ src_object->use_shared_copy)
+ {
+ pmap_page_protect(m->phys_addr,
+ src_entry->protection
+ & ~m->page_lock
+ & ~VM_PROT_WRITE);
+ }
+
+ }
+ else {
+ vm_prot_t result_prot;
+ vm_page_t top_page;
+ kern_return_t kr;
+
+ /*
+ * Have to fault the page in; must
+ * unlock the map to do so. While
+ * the map is unlocked, anything
+ * can happen, we must lookup the
+ * map entry before continuing.
+ */
+ vm_map_unlock(src_map);
+ need_map_lookup = TRUE;
+retry:
+ result_prot = VM_PROT_READ;
+
+ kr = vm_fault_page(src_object, src_offset,
+ VM_PROT_READ, FALSE, FALSE,
+ &result_prot, &m, &top_page,
+ FALSE, (void (*)()) 0);
+ /*
+ * Cope with what happened.
+ */
+ switch (kr) {
+ case VM_FAULT_SUCCESS:
+ break;
+ case VM_FAULT_INTERRUPTED: /* ??? */
+ case VM_FAULT_RETRY:
+ vm_object_lock(src_object);
+ vm_object_paging_begin(src_object);
+ goto retry;
+ case VM_FAULT_MEMORY_SHORTAGE:
+ VM_PAGE_WAIT((void (*)()) 0);
+ vm_object_lock(src_object);
+ vm_object_paging_begin(src_object);
+ goto retry;
+ case VM_FAULT_FICTITIOUS_SHORTAGE:
+ vm_page_more_fictitious();
+ vm_object_lock(src_object);
+ vm_object_paging_begin(src_object);
+ goto retry;
+ case VM_FAULT_MEMORY_ERROR:
+ /*
+ * Something broke. If this
+ * is a continuation, return
+ * a partial result if possible,
+ * else fail the whole thing.
+ * In the continuation case, the
+ * next continuation call will
+ * get this error if it persists.
+ */
+ vm_map_lock(src_map);
+ if (is_cont &&
+ copy->cpy_npages != 0)
+ goto make_continuation;
+
+ result = KERN_MEMORY_ERROR;
+ goto error;
+ }
+
+ if (top_page != VM_PAGE_NULL) {
+ vm_object_lock(src_object);
+ VM_PAGE_FREE(top_page);
+ vm_object_paging_end(src_object);
+ vm_object_unlock(src_object);
+ }
+
+ /*
+ * We do not need to write-protect
+ * the page, since it cannot have
+ * been in the pmap (and we did not
+ * enter it above). The busy bit
+ * will protect the page from being
+ * entered as writable until it is
+ * unlocked.
+ */
+
+ }
+
+ /*
+ * The page is busy, its object is locked, and
+ * we have a paging reference on it. Either
+ * the map is locked, or need_map_lookup is
+ * TRUE.
+ *
+ * Put the page in the page list.
+ */
+ copy->cpy_page_list[copy->cpy_npages++] = m;
+ vm_object_unlock(m->object);
+ }
+
+ /*
+ * DETERMINE whether the entire region
+ * has been copied.
+ */
+ if (src_start >= src_end && src_end != 0) {
+ if (need_map_lookup)
+ vm_map_lock(src_map);
+ break;
+ }
+
+ /*
+ * If need_map_lookup is TRUE, have to start over with
+ * another map lookup. Note that we dropped the map
+ * lock (to call vm_fault_page) above only in this case.
+ */
+ if (need_map_lookup)
+ goto do_map_lookup;
+
+ /*
+ * Verify that there are no gaps in the region
+ */
+
+ src_start = src_entry->vme_end;
+ src_entry = src_entry->vme_next;
+ if (src_entry->vme_start != src_start) {
+ result = KERN_INVALID_ADDRESS;
+ goto error;
+ }
+ }
+
+ /*
+ * If steal_pages is true, make sure all
+ * pages in the copy are not in any object
+ * We try to remove them from the original
+ * object, but we may have to copy them.
+ *
+ * At this point every page in the list is busy
+ * and holds a paging reference to its object.
+ * When we're done stealing, every page is busy,
+ * and in no object (m->tabled == FALSE).
+ */
+ src_start = trunc_page(src_addr);
+ if (steal_pages) {
+ register int i;
+ vm_offset_t unwire_end;
+
+ unwire_end = src_start;
+ for (i = 0; i < copy->cpy_npages; i++) {
+
+ /*
+ * Remove the page from its object if it
+ * can be stolen. It can be stolen if:
+ *
+ * (1) The source is being destroyed,
+ * the object is temporary, and
+ * not shared.
+ * (2) The page is not precious.
+ *
+ * The not shared check consists of two
+ * parts: (a) there are no objects that
+ * shadow this object. (b) it is not the
+ * object in any shared map entries (i.e.,
+ * use_shared_copy is not set).
+ *
+ * The first check (a) means that we can't
+ * steal pages from objects that are not
+ * at the top of their shadow chains. This
+ * should not be a frequent occurrence.
+ *
+ * Stealing wired pages requires telling the
+ * pmap module to let go of them.
+ *
+ * NOTE: stealing clean pages from objects
+ * whose mappings survive requires a call to
+ * the pmap module. Maybe later.
+ */
+ m = copy->cpy_page_list[i];
+ src_object = m->object;
+ vm_object_lock(src_object);
+
+ if (src_destroy &&
+ src_object->temporary &&
+ (!src_object->shadowed) &&
+ (!src_object->use_shared_copy) &&
+ !m->precious) {
+ vm_offset_t page_vaddr;
+
+ page_vaddr = src_start + (i * PAGE_SIZE);
+ if (m->wire_count > 0) {
+
+ assert(m->wire_count == 1);
+ /*
+ * In order to steal a wired
+ * page, we have to unwire it
+ * first. We do this inline
+ * here because we have the page.
+ *
+ * Step 1: Unwire the map entry.
+ * Also tell the pmap module
+ * that this piece of the
+ * pmap is pageable.
+ */
+ vm_object_unlock(src_object);
+ if (page_vaddr >= unwire_end) {
+ if (!vm_map_lookup_entry(src_map,
+ page_vaddr, &src_entry))
+ panic("vm_map_copyin_page_list: missing wired map entry");
+
+ vm_map_clip_start(src_map, src_entry,
+ page_vaddr);
+ vm_map_clip_end(src_map, src_entry,
+ src_start + src_size);
+
+ assert(src_entry->wired_count > 0);
+ src_entry->wired_count = 0;
+ src_entry->user_wired_count = 0;
+ unwire_end = src_entry->vme_end;
+ pmap_pageable(vm_map_pmap(src_map),
+ page_vaddr, unwire_end, TRUE);
+ }
+
+ /*
+ * Step 2: Unwire the page.
+ * pmap_remove handles this for us.
+ */
+ vm_object_lock(src_object);
+ }
+
+ /*
+ * Don't need to remove the mapping;
+ * vm_map_delete will handle it.
+ *
+ * Steal the page. Setting the wire count
+ * to zero is vm_page_unwire without
+ * activating the page.
+ */
+ vm_page_lock_queues();
+ vm_page_remove(m);
+ if (m->wire_count > 0) {
+ m->wire_count = 0;
+ vm_page_wire_count--;
+ } else {
+ VM_PAGE_QUEUES_REMOVE(m);
+ }
+ vm_page_unlock_queues();
+ }
+ else {
+ /*
+ * Have to copy this page. Have to
+ * unlock the map while copying,
+ * hence no further page stealing.
+ * Hence just copy all the pages.
+ * Unlock the map while copying;
+ * This means no further page stealing.
+ */
+ vm_object_unlock(src_object);
+ vm_map_unlock(src_map);
+
+ vm_map_copy_steal_pages(copy);
+
+ vm_map_lock(src_map);
+ break;
+ }
+
+ vm_object_paging_end(src_object);
+ vm_object_unlock(src_object);
+ }
+
+ /*
+ * If the source should be destroyed, do it now, since the
+ * copy was successful.
+ */
+
+ if (src_destroy) {
+ (void) vm_map_delete(src_map, src_start, src_end);
+ }
+ }
+ else {
+ /*
+ * !steal_pages leaves busy pages in the map.
+ * This will cause src_destroy to hang. Use
+ * a continuation to prevent this.
+ */
+ if (src_destroy && !vm_map_copy_has_cont(copy)) {
+ cont_args = (vm_map_copyin_args_t)
+ kalloc(sizeof(vm_map_copyin_args_data_t));
+ vm_map_reference(src_map);
+ cont_args->map = src_map;
+ cont_args->src_addr = (vm_offset_t) 0;
+ cont_args->src_len = (vm_size_t) 0;
+ cont_args->destroy_addr = src_start;
+ cont_args->destroy_len = src_end - src_start;
+ cont_args->steal_pages = FALSE;
+
+ copy->cpy_cont_args = (char *) cont_args;
+ copy->cpy_cont = vm_map_copyin_page_list_cont;
+ }
+
+ }
+
+ vm_map_unlock(src_map);
+
+ *copy_result = copy;
+ return(result);
+
+error:
+ vm_map_unlock(src_map);
+ vm_map_copy_discard(copy);
+ return(result);
+}
+
+/*
+ * vm_map_fork:
+ *
+ * Create and return a new map based on the old
+ * map, according to the inheritance values on the
+ * regions in that map.
+ *
+ * The source map must not be locked.
+ */
+vm_map_t vm_map_fork(old_map)
+ vm_map_t old_map;
+{
+ vm_map_t new_map;
+ register
+ vm_map_entry_t old_entry;
+ register
+ vm_map_entry_t new_entry;
+ pmap_t new_pmap = pmap_create((vm_size_t) 0);
+ vm_size_t new_size = 0;
+ vm_size_t entry_size;
+ register
+ vm_object_t object;
+
+ vm_map_lock(old_map);
+
+ new_map = vm_map_create(new_pmap,
+ old_map->min_offset,
+ old_map->max_offset,
+ old_map->hdr.entries_pageable);
+
+ for (
+ old_entry = vm_map_first_entry(old_map);
+ old_entry != vm_map_to_entry(old_map);
+ ) {
+ if (old_entry->is_sub_map)
+ panic("vm_map_fork: encountered a submap");
+
+ entry_size = (old_entry->vme_end - old_entry->vme_start);
+
+ switch (old_entry->inheritance) {
+ case VM_INHERIT_NONE:
+ break;
+
+ case VM_INHERIT_SHARE:
+ /*
+ * New sharing code. New map entry
+ * references original object. Temporary
+ * objects use asynchronous copy algorithm for
+ * future copies. First make sure we have
+ * the right object. If we need a shadow,
+ * or someone else already has one, then
+ * make a new shadow and share it.
+ */
+
+ object = old_entry->object.vm_object;
+ if (object == VM_OBJECT_NULL) {
+ object = vm_object_allocate(
+ (vm_size_t)(old_entry->vme_end -
+ old_entry->vme_start));
+ old_entry->offset = 0;
+ old_entry->object.vm_object = object;
+ assert(!old_entry->needs_copy);
+ }
+ else if (old_entry->needs_copy || object->shadowed ||
+ (object->temporary && !old_entry->is_shared &&
+ object->size > (vm_size_t)(old_entry->vme_end -
+ old_entry->vme_start))) {
+
+ assert(object->temporary);
+ assert(!(object->shadowed && old_entry->is_shared));
+ vm_object_shadow(
+ &old_entry->object.vm_object,
+ &old_entry->offset,
+ (vm_size_t) (old_entry->vme_end -
+ old_entry->vme_start));
+
+ /*
+ * If we're making a shadow for other than
+ * copy on write reasons, then we have
+ * to remove write permission.
+ */
+
+ if (!old_entry->needs_copy &&
+ (old_entry->protection & VM_PROT_WRITE)) {
+ pmap_protect(vm_map_pmap(old_map),
+ old_entry->vme_start,
+ old_entry->vme_end,
+ old_entry->protection &
+ ~VM_PROT_WRITE);
+ }
+ old_entry->needs_copy = FALSE;
+ object = old_entry->object.vm_object;
+ }
+
+ /*
+ * Set use_shared_copy to indicate that
+ * object must use shared (delayed) copy-on
+ * write. This is ignored for permanent objects.
+ * Bump the reference count for the new entry
+ */
+
+ vm_object_lock(object);
+ object->use_shared_copy = TRUE;
+ object->ref_count++;
+ vm_object_unlock(object);
+
+ if (old_entry->projected_on != 0) {
+ /*
+ * If entry is projected buffer, clone the
+ * entry exactly.
+ */
+
+ vm_map_entry_copy_full(new_entry, old_entry);
+
+ } else {
+ /*
+ * Clone the entry, using object ref from above.
+ * Mark both entries as shared.
+ */
+
+ new_entry = vm_map_entry_create(new_map);
+ vm_map_entry_copy(new_entry, old_entry);
+ old_entry->is_shared = TRUE;
+ new_entry->is_shared = TRUE;
+ }
+
+ /*
+ * Insert the entry into the new map -- we
+ * know we're inserting at the end of the new
+ * map.
+ */
+
+ vm_map_entry_link(
+ new_map,
+ vm_map_last_entry(new_map),
+ new_entry);
+
+ /*
+ * Update the physical map
+ */
+
+ pmap_copy(new_map->pmap, old_map->pmap,
+ new_entry->vme_start,
+ entry_size,
+ old_entry->vme_start);
+
+ new_size += entry_size;
+ break;
+
+ case VM_INHERIT_COPY:
+ if (old_entry->wired_count == 0) {
+ boolean_t src_needs_copy;
+ boolean_t new_entry_needs_copy;
+
+ new_entry = vm_map_entry_create(new_map);
+ vm_map_entry_copy(new_entry, old_entry);
+
+ if (vm_object_copy_temporary(
+ &new_entry->object.vm_object,
+ &new_entry->offset,
+ &src_needs_copy,
+ &new_entry_needs_copy)) {
+
+ /*
+ * Handle copy-on-write obligations
+ */
+
+ if (src_needs_copy && !old_entry->needs_copy) {
+ vm_object_pmap_protect(
+ old_entry->object.vm_object,
+ old_entry->offset,
+ entry_size,
+ (old_entry->is_shared ?
+ PMAP_NULL :
+ old_map->pmap),
+ old_entry->vme_start,
+ old_entry->protection &
+ ~VM_PROT_WRITE);
+
+ old_entry->needs_copy = TRUE;
+ }
+
+ new_entry->needs_copy = new_entry_needs_copy;
+
+ /*
+ * Insert the entry at the end
+ * of the map.
+ */
+
+ vm_map_entry_link(new_map,
+ vm_map_last_entry(new_map),
+ new_entry);
+
+
+ new_size += entry_size;
+ break;
+ }
+
+ vm_map_entry_dispose(new_map, new_entry);
+ }
+
+ /* INNER BLOCK (copy cannot be optimized) */ {
+
+ vm_offset_t start = old_entry->vme_start;
+ vm_map_copy_t copy;
+ vm_map_entry_t last = vm_map_last_entry(new_map);
+
+ vm_map_unlock(old_map);
+ if (vm_map_copyin(old_map,
+ start,
+ entry_size,
+ FALSE,
+ &copy)
+ != KERN_SUCCESS) {
+ vm_map_lock(old_map);
+ if (!vm_map_lookup_entry(old_map, start, &last))
+ last = last->vme_next;
+ old_entry = last;
+ /*
+ * For some error returns, want to
+ * skip to the next element.
+ */
+
+ continue;
+ }
+
+ /*
+ * Insert the copy into the new map
+ */
+
+ vm_map_copy_insert(new_map, last, copy);
+ new_size += entry_size;
+
+ /*
+ * Pick up the traversal at the end of
+ * the copied region.
+ */
+
+ vm_map_lock(old_map);
+ start += entry_size;
+ if (!vm_map_lookup_entry(old_map, start, &last))
+ last = last->vme_next;
+ else
+ vm_map_clip_start(old_map, last, start);
+ old_entry = last;
+
+ continue;
+ /* INNER BLOCK (copy cannot be optimized) */ }
+ }
+ old_entry = old_entry->vme_next;
+ }
+
+ new_map->size = new_size;
+ vm_map_unlock(old_map);
+
+ return(new_map);
+}
+
+/*
+ * vm_map_lookup:
+ *
+ * Finds the VM object, offset, and
+ * protection for a given virtual address in the
+ * specified map, assuming a page fault of the
+ * type specified.
+ *
+ * Returns the (object, offset, protection) for
+ * this address, whether it is wired down, and whether
+ * this map has the only reference to the data in question.
+ * In order to later verify this lookup, a "version"
+ * is returned.
+ *
+ * The map should not be locked; it will not be
+ * locked on exit. In order to guarantee the
+ * existence of the returned object, it is returned
+ * locked.
+ *
+ * If a lookup is requested with "write protection"
+ * specified, the map may be changed to perform virtual
+ * copying operations, although the data referenced will
+ * remain the same.
+ */
+kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version,
+ object, offset, out_prot, wired)
+ vm_map_t *var_map; /* IN/OUT */
+ register vm_offset_t vaddr;
+ register vm_prot_t fault_type;
+
+ vm_map_version_t *out_version; /* OUT */
+ vm_object_t *object; /* OUT */
+ vm_offset_t *offset; /* OUT */
+ vm_prot_t *out_prot; /* OUT */
+ boolean_t *wired; /* OUT */
+{
+ register vm_map_entry_t entry;
+ register vm_map_t map = *var_map;
+ register vm_prot_t prot;
+
+ RetryLookup: ;
+
+ /*
+ * Lookup the faulting address.
+ */
+
+ vm_map_lock_read(map);
+
+#define RETURN(why) \
+ { \
+ vm_map_unlock_read(map); \
+ return(why); \
+ }
+
+ /*
+ * If the map has an interesting hint, try it before calling
+ * full blown lookup routine.
+ */
+
+ simple_lock(&map->hint_lock);
+ entry = map->hint;
+ simple_unlock(&map->hint_lock);
+
+ if ((entry == vm_map_to_entry(map)) ||
+ (vaddr < entry->vme_start) || (vaddr >= entry->vme_end)) {
+ vm_map_entry_t tmp_entry;
+
+ /*
+ * Entry was either not a valid hint, or the vaddr
+ * was not contained in the entry, so do a full lookup.
+ */
+ if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
+ RETURN(KERN_INVALID_ADDRESS);
+
+ entry = tmp_entry;
+ }
+
+ /*
+ * Handle submaps.
+ */
+
+ if (entry->is_sub_map) {
+ vm_map_t old_map = map;
+
+ *var_map = map = entry->object.sub_map;
+ vm_map_unlock_read(old_map);
+ goto RetryLookup;
+ }
+
+ /*
+ * Check whether this task is allowed to have
+ * this page.
+ */
+
+ prot = entry->protection;
+
+ if ((fault_type & (prot)) != fault_type)
+ if ((prot & VM_PROT_NOTIFY) && (fault_type & VM_PROT_WRITE)) {
+ RETURN(KERN_WRITE_PROTECTION_FAILURE);
+ } else {
+ RETURN(KERN_PROTECTION_FAILURE);
+ }
+
+ /*
+ * If this page is not pageable, we have to get
+ * it for all possible accesses.
+ */
+
+ if (*wired = (entry->wired_count != 0))
+ prot = fault_type = entry->protection;
+
+ /*
+ * If the entry was copy-on-write, we either ...
+ */
+
+ if (entry->needs_copy) {
+ /*
+ * If we want to write the page, we may as well
+ * handle that now since we've got the map locked.
+ *
+ * If we don't need to write the page, we just
+ * demote the permissions allowed.
+ */
+
+ if (fault_type & VM_PROT_WRITE) {
+ /*
+ * Make a new object, and place it in the
+ * object chain. Note that no new references
+ * have appeared -- one just moved from the
+ * map to the new object.
+ */
+
+ if (vm_map_lock_read_to_write(map)) {
+ goto RetryLookup;
+ }
+ map->timestamp++;
+
+ vm_object_shadow(
+ &entry->object.vm_object,
+ &entry->offset,
+ (vm_size_t) (entry->vme_end - entry->vme_start));
+
+ entry->needs_copy = FALSE;
+
+ vm_map_lock_write_to_read(map);
+ }
+ else {
+ /*
+ * We're attempting to read a copy-on-write
+ * page -- don't allow writes.
+ */
+
+ prot &= (~VM_PROT_WRITE);
+ }
+ }
+
+ /*
+ * Create an object if necessary.
+ */
+ if (entry->object.vm_object == VM_OBJECT_NULL) {
+
+ if (vm_map_lock_read_to_write(map)) {
+ goto RetryLookup;
+ }
+
+ entry->object.vm_object = vm_object_allocate(
+ (vm_size_t)(entry->vme_end - entry->vme_start));
+ entry->offset = 0;
+ vm_map_lock_write_to_read(map);
+ }
+
+ /*
+ * Return the object/offset from this entry. If the entry
+ * was copy-on-write or empty, it has been fixed up. Also
+ * return the protection.
+ */
+
+ *offset = (vaddr - entry->vme_start) + entry->offset;
+ *object = entry->object.vm_object;
+ *out_prot = prot;
+
+ /*
+ * Lock the object to prevent it from disappearing
+ */
+
+ vm_object_lock(*object);
+
+ /*
+ * Save the version number and unlock the map.
+ */
+
+ out_version->main_timestamp = map->timestamp;
+
+ RETURN(KERN_SUCCESS);
+
+#undef RETURN
+}
+
+/*
+ * vm_map_verify:
+ *
+ * Verifies that the map in question has not changed
+ * since the given version. If successful, the map
+ * will not change until vm_map_verify_done() is called.
+ */
+boolean_t vm_map_verify(map, version)
+ register
+ vm_map_t map;
+ register
+ vm_map_version_t *version; /* REF */
+{
+ boolean_t result;
+
+ vm_map_lock_read(map);
+ result = (map->timestamp == version->main_timestamp);
+
+ if (!result)
+ vm_map_unlock_read(map);
+
+ return(result);
+}
+
+/*
+ * vm_map_verify_done:
+ *
+ * Releases locks acquired by a vm_map_verify.
+ *
+ * This is now a macro in vm/vm_map.h. It does a
+ * vm_map_unlock_read on the map.
+ */
+
+/*
+ * vm_region:
+ *
+ * User call to obtain information about a region in
+ * a task's address map.
+ */
+
+kern_return_t vm_region(map, address, size,
+ protection, max_protection,
+ inheritance, is_shared,
+ object_name, offset_in_object)
+ vm_map_t map;
+ vm_offset_t *address; /* IN/OUT */
+ vm_size_t *size; /* OUT */
+ vm_prot_t *protection; /* OUT */
+ vm_prot_t *max_protection; /* OUT */
+ vm_inherit_t *inheritance; /* OUT */
+ boolean_t *is_shared; /* OUT */
+ ipc_port_t *object_name; /* OUT */
+ vm_offset_t *offset_in_object; /* OUT */
+{
+ vm_map_entry_t tmp_entry;
+ register
+ vm_map_entry_t entry;
+ register
+ vm_offset_t tmp_offset;
+ vm_offset_t start;
+
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ start = *address;
+
+ vm_map_lock_read(map);
+ if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
+ if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
+ vm_map_unlock_read(map);
+ return(KERN_NO_SPACE);
+ }
+ } else {
+ entry = tmp_entry;
+ }
+
+ start = entry->vme_start;
+ *protection = entry->protection;
+ *max_protection = entry->max_protection;
+ *inheritance = entry->inheritance;
+ *address = start;
+ *size = (entry->vme_end - start);
+
+ tmp_offset = entry->offset;
+
+
+ if (entry->is_sub_map) {
+ *is_shared = FALSE;
+ *object_name = IP_NULL;
+ *offset_in_object = tmp_offset;
+ } else {
+ *is_shared = entry->is_shared;
+ *object_name = vm_object_name(entry->object.vm_object);
+ *offset_in_object = tmp_offset;
+ }
+
+ vm_map_unlock_read(map);
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Routine: vm_map_simplify
+ *
+ * Description:
+ * Attempt to simplify the map representation in
+ * the vicinity of the given starting address.
+ * Note:
+ * This routine is intended primarily to keep the
+ * kernel maps more compact -- they generally don't
+ * benefit from the "expand a map entry" technology
+ * at allocation time because the adjacent entry
+ * is often wired down.
+ */
+void vm_map_simplify(map, start)
+ vm_map_t map;
+ vm_offset_t start;
+{
+ vm_map_entry_t this_entry;
+ vm_map_entry_t prev_entry;
+
+ vm_map_lock(map);
+ if (
+ (vm_map_lookup_entry(map, start, &this_entry)) &&
+ ((prev_entry = this_entry->vme_prev) != vm_map_to_entry(map)) &&
+
+ (prev_entry->vme_end == start) &&
+
+ (prev_entry->is_shared == FALSE) &&
+ (prev_entry->is_sub_map == FALSE) &&
+
+ (this_entry->is_shared == FALSE) &&
+ (this_entry->is_sub_map == FALSE) &&
+
+ (prev_entry->inheritance == this_entry->inheritance) &&
+ (prev_entry->protection == this_entry->protection) &&
+ (prev_entry->max_protection == this_entry->max_protection) &&
+ (prev_entry->wired_count == this_entry->wired_count) &&
+ (prev_entry->user_wired_count == this_entry->user_wired_count) &&
+
+ (prev_entry->needs_copy == this_entry->needs_copy) &&
+
+ (prev_entry->object.vm_object == this_entry->object.vm_object) &&
+ ((prev_entry->offset + (prev_entry->vme_end - prev_entry->vme_start))
+ == this_entry->offset) &&
+ (prev_entry->projected_on == 0) &&
+ (this_entry->projected_on == 0)
+ ) {
+ if (map->first_free == this_entry)
+ map->first_free = prev_entry;
+
+ SAVE_HINT(map, prev_entry);
+ vm_map_entry_unlink(map, this_entry);
+ prev_entry->vme_end = this_entry->vme_end;
+ vm_object_deallocate(this_entry->object.vm_object);
+ vm_map_entry_dispose(map, this_entry);
+ }
+ vm_map_unlock(map);
+}
+
+
+/*
+ * Routine: vm_map_machine_attribute
+ * Purpose:
+ * Provide machine-specific attributes to mappings,
+ * such as cachability etc. for machines that provide
+ * them. NUMA architectures and machines with big/strange
+ * caches will use this.
+ * Note:
+ * Responsibilities for locking and checking are handled here,
+ * everything else in the pmap module. If any non-volatile
+ * information must be kept, the pmap module should handle
+ * it itself. [This assumes that attributes do not
+ * need to be inherited, which seems ok to me]
+ */
+kern_return_t vm_map_machine_attribute(map, address, size, attribute, value)
+ vm_map_t map;
+ vm_offset_t address;
+ vm_size_t size;
+ vm_machine_attribute_t attribute;
+ vm_machine_attribute_val_t* value; /* IN/OUT */
+{
+ kern_return_t ret;
+
+ if (address < vm_map_min(map) ||
+ (address + size) > vm_map_max(map))
+ return KERN_INVALID_ARGUMENT;
+
+ vm_map_lock(map);
+
+ ret = pmap_attribute(map->pmap, address, size, attribute, value);
+
+ vm_map_unlock(map);
+
+ return ret;
+}
+
+#include <mach_kdb.h>
+
+
+#if MACH_KDB
+
+#define printf kdbprintf
+
+/*
+ * vm_map_print: [ debug ]
+ */
+void vm_map_print(map)
+ register vm_map_t map;
+{
+ register vm_map_entry_t entry;
+ extern int indent;
+
+ iprintf("Task map 0x%X: pmap=0x%X,",
+ (vm_offset_t) map, (vm_offset_t) (map->pmap));
+ printf("ref=%d,nentries=%d,", map->ref_count, map->hdr.nentries);
+ printf("version=%d\n", map->timestamp);
+ indent += 2;
+ for (entry = vm_map_first_entry(map);
+ entry != vm_map_to_entry(map);
+ entry = entry->vme_next) {
+ static char *inheritance_name[3] = { "share", "copy", "none"};
+
+ iprintf("map entry 0x%X: ", (vm_offset_t) entry);
+ printf("start=0x%X, end=0x%X, ",
+ (vm_offset_t) entry->vme_start, (vm_offset_t) entry->vme_end);
+ printf("prot=%X/%X/%s, ",
+ entry->protection,
+ entry->max_protection,
+ inheritance_name[entry->inheritance]);
+ if (entry->wired_count != 0) {
+ printf("wired(");
+ if (entry->user_wired_count != 0)
+ printf("u");
+ if (entry->wired_count >
+ ((entry->user_wired_count == 0) ? 0 : 1))
+ printf("k");
+ printf(") ");
+ }
+ if (entry->in_transition) {
+ printf("in transition");
+ if (entry->needs_wakeup)
+ printf("(wake request)");
+ printf(", ");
+ }
+ if (entry->is_sub_map) {
+ printf("submap=0x%X, offset=0x%X\n",
+ (vm_offset_t) entry->object.sub_map,
+ (vm_offset_t) entry->offset);
+ } else {
+ printf("object=0x%X, offset=0x%X",
+ (vm_offset_t) entry->object.vm_object,
+ (vm_offset_t) entry->offset);
+ if (entry->is_shared)
+ printf(", shared");
+ if (entry->needs_copy)
+ printf(", copy needed");
+ printf("\n");
+
+ if ((entry->vme_prev == vm_map_to_entry(map)) ||
+ (entry->vme_prev->object.vm_object != entry->object.vm_object)) {
+ indent += 2;
+ vm_object_print(entry->object.vm_object);
+ indent -= 2;
+ }
+ }
+ }
+ indent -= 2;
+}
+
+/*
+ * Routine: vm_map_copy_print
+ * Purpose:
+ * Pretty-print a copy object for ddb.
+ */
+
+void vm_map_copy_print(copy)
+ vm_map_copy_t copy;
+{
+ extern int indent;
+ int i, npages;
+
+ printf("copy object 0x%x\n", copy);
+
+ indent += 2;
+
+ iprintf("type=%d", copy->type);
+ switch (copy->type) {
+ case VM_MAP_COPY_ENTRY_LIST:
+ printf("[entry_list]");
+ break;
+
+ case VM_MAP_COPY_OBJECT:
+ printf("[object]");
+ break;
+
+ case VM_MAP_COPY_PAGE_LIST:
+ printf("[page_list]");
+ break;
+
+ default:
+ printf("[bad type]");
+ break;
+ }
+ printf(", offset=0x%x", copy->offset);
+ printf(", size=0x%x\n", copy->size);
+
+ switch (copy->type) {
+ case VM_MAP_COPY_ENTRY_LIST:
+ /* XXX add stuff here */
+ break;
+
+ case VM_MAP_COPY_OBJECT:
+ iprintf("object=0x%x\n", copy->cpy_object);
+ break;
+
+ case VM_MAP_COPY_PAGE_LIST:
+ iprintf("npages=%d", copy->cpy_npages);
+ printf(", cont=%x", copy->cpy_cont);
+ printf(", cont_args=%x\n", copy->cpy_cont_args);
+ if (copy->cpy_npages < 0) {
+ npages = 0;
+ } else if (copy->cpy_npages > VM_MAP_COPY_PAGE_LIST_MAX) {
+ npages = VM_MAP_COPY_PAGE_LIST_MAX;
+ } else {
+ npages = copy->cpy_npages;
+ }
+ iprintf("copy->cpy_page_list[0..%d] = {", npages);
+ for (i = 0; i < npages - 1; i++) {
+ printf("0x%x, ", copy->cpy_page_list[i]);
+ }
+ if (npages > 0) {
+ printf("0x%x", copy->cpy_page_list[npages - 1]);
+ }
+ printf("}\n");
+ break;
+ }
+
+ indent -=2;
+}
+#endif MACH_KDB
+
+#if NORMA_IPC
+/*
+ * This should one day be eliminated;
+ * we should always construct the right flavor of copy object
+ * the first time. Troublesome areas include vm_read, where vm_map_copyin
+ * is called without knowing whom the copy object is for.
+ * There are also situations where we do want a lazy data structure
+ * even if we are sending to a remote port...
+ */
+
+/*
+ * Convert a copy to a page list. The copy argument is in/out
+ * because we probably have to allocate a new vm_map_copy structure.
+ * We take responsibility for discarding the old structure and
+ * use a continuation to do so. Postponing this discard ensures
+ * that the objects containing the pages we've marked busy will stick
+ * around.
+ */
+kern_return_t
+vm_map_convert_to_page_list(caller_copy)
+ vm_map_copy_t *caller_copy;
+{
+ vm_map_entry_t entry, next_entry;
+ vm_offset_t va;
+ vm_offset_t offset;
+ vm_object_t object;
+ kern_return_t result;
+ vm_map_copy_t copy, new_copy;
+ int i, num_pages = 0;
+
+ zone_t entry_zone;
+
+ copy = *caller_copy;
+
+ /*
+ * We may not have to do anything,
+ * or may not be able to do anything.
+ */
+ if (copy == VM_MAP_COPY_NULL || copy->type == VM_MAP_COPY_PAGE_LIST) {
+ return KERN_SUCCESS;
+ }
+ if (copy->type == VM_MAP_COPY_OBJECT) {
+ return vm_map_convert_to_page_list_from_object(caller_copy);
+ }
+ if (copy->type != VM_MAP_COPY_ENTRY_LIST) {
+ panic("vm_map_convert_to_page_list: copy type %d!\n",
+ copy->type);
+ }
+
+ /*
+ * Allocate the new copy. Set its continuation to
+ * discard the old one.
+ */
+ new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ new_copy->type = VM_MAP_COPY_PAGE_LIST;
+ new_copy->cpy_npages = 0;
+ new_copy->offset = copy->offset;
+ new_copy->size = copy->size;
+ new_copy->cpy_cont = vm_map_copy_discard_cont;
+ new_copy->cpy_cont_args = (char *) copy;
+
+ /*
+ * Iterate over entries.
+ */
+ for (entry = vm_map_copy_first_entry(copy);
+ entry != vm_map_copy_to_entry(copy);
+ entry = entry->vme_next) {
+
+ object = entry->object.vm_object;
+ offset = entry->offset;
+ /*
+ * Iterate over pages.
+ */
+ for (va = entry->vme_start;
+ va < entry->vme_end;
+ va += PAGE_SIZE, offset += PAGE_SIZE) {
+
+ vm_page_t m;
+
+ if (new_copy->cpy_npages == VM_MAP_COPY_PAGE_LIST_MAX) {
+ /*
+ * What a mess. We need a continuation
+ * to do the page list, but also one
+ * to discard the old copy. The right
+ * thing to do is probably to copy
+ * out the old copy into the kernel
+ * map (or some temporary task holding
+ * map if we're paranoid about large
+ * copies), and then copyin the page
+ * list that we really wanted with
+ * src_destroy. LATER.
+ */
+ panic("vm_map_convert_to_page_list: num\n");
+ }
+
+ /*
+ * Try to find the page of data.
+ */
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+ if (((m = vm_page_lookup(object, offset)) !=
+ VM_PAGE_NULL) && !m->busy && !m->fictitious &&
+ !m->absent && !m->error) {
+
+ /*
+ * This is the page. Mark it busy
+ * and keep the paging reference on
+ * the object whilst we do our thing.
+ */
+ m->busy = TRUE;
+
+ /*
+ * Also write-protect the page, so
+ * that the map`s owner cannot change
+ * the data. The busy bit will prevent
+ * faults on the page from succeeding
+ * until the copy is released; after
+ * that, the page can be re-entered
+ * as writable, since we didn`t alter
+ * the map entry. This scheme is a
+ * cheap copy-on-write.
+ *
+ * Don`t forget the protection and
+ * the page_lock value!
+ */
+
+ pmap_page_protect(m->phys_addr,
+ entry->protection
+ & ~m->page_lock
+ & ~VM_PROT_WRITE);
+
+ }
+ else {
+ vm_prot_t result_prot;
+ vm_page_t top_page;
+ kern_return_t kr;
+
+retry:
+ result_prot = VM_PROT_READ;
+
+ kr = vm_fault_page(object, offset,
+ VM_PROT_READ, FALSE, FALSE,
+ &result_prot, &m, &top_page,
+ FALSE, (void (*)()) 0);
+ if (kr == VM_FAULT_MEMORY_SHORTAGE) {
+ VM_PAGE_WAIT((void (*)()) 0);
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+ goto retry;
+ }
+ if (kr != VM_FAULT_SUCCESS) {
+ /* XXX what about data_error? */
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+ goto retry;
+ }
+ if (top_page != VM_PAGE_NULL) {
+ vm_object_lock(object);
+ VM_PAGE_FREE(top_page);
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+ }
+ }
+ assert(m);
+ m->busy = TRUE;
+ new_copy->cpy_page_list[new_copy->cpy_npages++] = m;
+ vm_object_unlock(object);
+ }
+ }
+
+ *caller_copy = new_copy;
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+vm_map_convert_to_page_list_from_object(caller_copy)
+ vm_map_copy_t *caller_copy;
+{
+ vm_object_t object;
+ vm_offset_t offset;
+ vm_map_copy_t copy, new_copy;
+
+ copy = *caller_copy;
+ assert(copy->type == VM_MAP_COPY_OBJECT);
+ object = copy->cpy_object;
+ assert(object->size == round_page(object->size));
+
+ /*
+ * Allocate the new copy. Set its continuation to
+ * discard the old one.
+ */
+ new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ new_copy->type = VM_MAP_COPY_PAGE_LIST;
+ new_copy->cpy_npages = 0;
+ new_copy->offset = copy->offset;
+ new_copy->size = copy->size;
+ new_copy->cpy_cont = vm_map_copy_discard_cont;
+ new_copy->cpy_cont_args = (char *) copy;
+
+ /*
+ * XXX memory_object_lock_request can probably bust this
+ * XXX See continuation comment in previous routine for solution.
+ */
+ assert(object->size <= VM_MAP_COPY_PAGE_LIST_MAX * PAGE_SIZE);
+
+ for (offset = 0; offset < object->size; offset += PAGE_SIZE) {
+ vm_page_t m;
+
+ /*
+ * Try to find the page of data.
+ */
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+ m = vm_page_lookup(object, offset);
+ if ((m != VM_PAGE_NULL) && !m->busy && !m->fictitious &&
+ !m->absent && !m->error) {
+
+ /*
+ * This is the page. Mark it busy
+ * and keep the paging reference on
+ * the object whilst we do our thing.
+ */
+ m->busy = TRUE;
+ }
+ else {
+ vm_prot_t result_prot;
+ vm_page_t top_page;
+ kern_return_t kr;
+
+retry:
+ result_prot = VM_PROT_READ;
+
+ kr = vm_fault_page(object, offset,
+ VM_PROT_READ, FALSE, FALSE,
+ &result_prot, &m, &top_page,
+ FALSE, (void (*)()) 0);
+ if (kr == VM_FAULT_MEMORY_SHORTAGE) {
+ VM_PAGE_WAIT((void (*)()) 0);
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+ goto retry;
+ }
+ if (kr != VM_FAULT_SUCCESS) {
+ /* XXX what about data_error? */
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+ goto retry;
+ }
+
+ if (top_page != VM_PAGE_NULL) {
+ vm_object_lock(object);
+ VM_PAGE_FREE(top_page);
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+ }
+ }
+ assert(m);
+ m->busy = TRUE;
+ new_copy->cpy_page_list[new_copy->cpy_npages++] = m;
+ vm_object_unlock(object);
+ }
+
+ *caller_copy = new_copy;
+ return (KERN_SUCCESS);
+}
+
+kern_return_t
+vm_map_convert_from_page_list(copy)
+ vm_map_copy_t copy;
+{
+ vm_object_t object;
+ int i;
+ vm_map_entry_t new_entry;
+ vm_page_t *page_list;
+
+ /*
+ * Check type of copy object.
+ */
+ if (copy->type == VM_MAP_COPY_ENTRY_LIST) {
+ return KERN_SUCCESS;
+ }
+ if (copy->type == VM_MAP_COPY_OBJECT) {
+ printf("vm_map_convert_from_page_list: COPY_OBJECT?");
+ return KERN_SUCCESS;
+ }
+ if (copy->type != VM_MAP_COPY_PAGE_LIST) {
+ panic("vm_map_convert_from_page_list 0x%x %d",
+ copy,
+ copy->type);
+ }
+
+ /*
+ * Make sure the pages are loose. This may be
+ * a "Can't Happen", but just to be safe ...
+ */
+ page_list = &copy->cpy_page_list[0];
+ if ((*page_list)->tabled)
+ vm_map_copy_steal_pages(copy);
+
+ /*
+ * Create object, and stuff pages into it.
+ */
+ object = vm_object_allocate(copy->cpy_npages);
+ for (i = 0; i < copy->cpy_npages; i++) {
+ register vm_page_t m = *page_list++;
+ vm_page_insert(m, object, i * PAGE_SIZE);
+ m->busy = FALSE;
+ m->dirty = TRUE;
+ vm_page_activate(m);
+ }
+
+ /*
+ * XXX If this page list contained a continuation, then
+ * XXX we're screwed. The right thing to do is probably do
+ * XXX the copyout, and then copyin the entry list we really
+ * XXX wanted.
+ */
+ if (vm_map_copy_has_cont(copy))
+ panic("convert_from_page_list: continuation");
+
+ /*
+ * Change type of copy object
+ */
+ vm_map_copy_first_entry(copy) =
+ vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy);
+ copy->type = VM_MAP_COPY_ENTRY_LIST;
+ copy->cpy_hdr.nentries = 0;
+ copy->cpy_hdr.entries_pageable = TRUE;
+
+ /*
+ * Allocate and initialize an entry for object
+ */
+ new_entry = vm_map_copy_entry_create(copy);
+ new_entry->vme_start = trunc_page(copy->offset);
+ new_entry->vme_end = round_page(copy->offset + copy->size);
+ new_entry->object.vm_object = object;
+ new_entry->offset = 0;
+ new_entry->is_shared = FALSE;
+ new_entry->is_sub_map = FALSE;
+ new_entry->needs_copy = FALSE;
+ new_entry->protection = VM_PROT_DEFAULT;
+ new_entry->max_protection = VM_PROT_ALL;
+ new_entry->inheritance = VM_INHERIT_DEFAULT;
+ new_entry->wired_count = 0;
+ new_entry->user_wired_count = 0;
+ new_entry->projected_on = 0;
+
+ /*
+ * Insert entry into copy object, and return.
+ */
+ vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy), new_entry);
+ return(KERN_SUCCESS);
+}
+#endif NORMA_IPC
diff --git a/vm/vm_map.h b/vm/vm_map.h
new file mode 100644
index 00000000..0bdb7d13
--- /dev/null
+++ b/vm/vm_map.h
@@ -0,0 +1,448 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_map.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Virtual memory map module definitions.
+ *
+ * Contributors:
+ * avie, dlb, mwyoung
+ */
+
+#ifndef _VM_VM_MAP_H_
+#define _VM_VM_MAP_H_
+
+#include <mach/kern_return.h>
+#include <mach/boolean.h>
+#include <mach/machine/vm_types.h>
+#include <mach/vm_prot.h>
+#include <mach/vm_inherit.h>
+#include <vm/pmap.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <kern/lock.h>
+#include <kern/macro_help.h>
+
+/*
+ * Types defined:
+ *
+ * vm_map_t the high-level address map data structure.
+ * vm_map_entry_t an entry in an address map.
+ * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
+ * vm_map_copy_t represents memory copied from an address map,
+ * used for inter-map copy operations
+ */
+
+/*
+ * Type: vm_map_object_t [internal use only]
+ *
+ * Description:
+ * The target of an address mapping, either a virtual
+ * memory object or a sub map (of the kernel map).
+ */
+typedef union vm_map_object {
+ struct vm_object *vm_object; /* object object */
+ struct vm_map *sub_map; /* belongs to another map */
+} vm_map_object_t;
+
+/*
+ * Type: vm_map_entry_t [internal use only]
+ *
+ * Description:
+ * A single mapping within an address map.
+ *
+ * Implementation:
+ * Address map entries consist of start and end addresses,
+ * a VM object (or sub map) and offset into that object,
+ * and user-exported inheritance and protection information.
+ * Control information for virtual copy operations is also
+ * stored in the address map entry.
+ */
+struct vm_map_links {
+ struct vm_map_entry *prev; /* previous entry */
+ struct vm_map_entry *next; /* next entry */
+ vm_offset_t start; /* start address */
+ vm_offset_t end; /* end address */
+};
+
+struct vm_map_entry {
+ struct vm_map_links links; /* links to other entries */
+#define vme_prev links.prev
+#define vme_next links.next
+#define vme_start links.start
+#define vme_end links.end
+ union vm_map_object object; /* object I point to */
+ vm_offset_t offset; /* offset into object */
+ unsigned int
+ /* boolean_t */ is_shared:1, /* region is shared */
+ /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */
+ /* boolean_t */ in_transition:1, /* Entry being changed */
+ /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
+ /* Only used when object is a vm_object: */
+ /* boolean_t */ needs_copy:1; /* does object need to be copied */
+
+ /* Only in task maps: */
+ vm_prot_t protection; /* protection code */
+ vm_prot_t max_protection; /* maximum protection */
+ vm_inherit_t inheritance; /* inheritance */
+ unsigned short wired_count; /* can be paged if = 0 */
+ unsigned short user_wired_count; /* for vm_wire */
+ struct vm_map_entry *projected_on; /* 0 for normal map entry
+ or persistent kernel map projected buffer entry;
+ -1 for non-persistent kernel map projected buffer entry;
+ pointer to corresponding kernel map entry for user map
+ projected buffer entry */
+};
+
+typedef struct vm_map_entry *vm_map_entry_t;
+
+#define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0)
+
+/*
+ * Type: struct vm_map_header
+ *
+ * Description:
+ * Header for a vm_map and a vm_map_copy.
+ */
+struct vm_map_header {
+ struct vm_map_links links; /* first, last, min, max */
+ int nentries; /* Number of entries */
+ boolean_t entries_pageable;
+ /* are map entries pageable? */
+};
+
+/*
+ * Type: vm_map_t [exported; contents invisible]
+ *
+ * Description:
+ * An address map -- a directory relating valid
+ * regions of a task's address space to the corresponding
+ * virtual memory objects.
+ *
+ * Implementation:
+ * Maps are doubly-linked lists of map entries, sorted
+ * by address. One hint is used to start
+ * searches again from the last successful search,
+ * insertion, or removal. Another hint is used to
+ * quickly find free space.
+ */
+struct vm_map {
+ lock_data_t lock; /* Lock for map data */
+ struct vm_map_header hdr; /* Map entry header */
+#define min_offset hdr.links.start /* start of range */
+#define max_offset hdr.links.end /* end of range */
+ pmap_t pmap; /* Physical map */
+ vm_size_t size; /* virtual size */
+ int ref_count; /* Reference count */
+ decl_simple_lock_data(, ref_lock) /* Lock for ref_count field */
+ vm_map_entry_t hint; /* hint for quick lookups */
+ decl_simple_lock_data(, hint_lock) /* lock for hint storage */
+ vm_map_entry_t first_free; /* First free space hint */
+ boolean_t wait_for_space; /* Should callers wait
+ for space? */
+ boolean_t wiring_required;/* All memory wired? */
+ unsigned int timestamp; /* Version number */
+};
+typedef struct vm_map *vm_map_t;
+
+#define VM_MAP_NULL ((vm_map_t) 0)
+
+#define vm_map_to_entry(map) ((struct vm_map_entry *) &(map)->hdr.links)
+#define vm_map_first_entry(map) ((map)->hdr.links.next)
+#define vm_map_last_entry(map) ((map)->hdr.links.prev)
+
+/*
+ * Type: vm_map_version_t [exported; contents invisible]
+ *
+ * Description:
+ * Map versions may be used to quickly validate a previous
+ * lookup operation.
+ *
+ * Usage note:
+ * Because they are bulky objects, map versions are usually
+ * passed by reference.
+ *
+ * Implementation:
+ * Just a timestamp for the main map.
+ */
+typedef struct vm_map_version {
+ unsigned int main_timestamp;
+} vm_map_version_t;
+
+/*
+ * Type: vm_map_copy_t [exported; contents invisible]
+ *
+ * Description:
+ * A map copy object represents a region of virtual memory
+ * that has been copied from an address map but is still
+ * in transit.
+ *
+ * A map copy object may only be used by a single thread
+ * at a time.
+ *
+ * Implementation:
+ * There are three formats for map copy objects.
+ * The first is very similar to the main
+ * address map in structure, and as a result, some
+ * of the internal maintenance functions/macros can
+ * be used with either address maps or map copy objects.
+ *
+ * The map copy object contains a header links
+ * entry onto which the other entries that represent
+ * the region are chained.
+ *
+ * The second format is a single vm object. This is used
+ * primarily in the pageout path. The third format is a
+ * list of vm pages. An optional continuation provides
+ * a hook to be called to obtain more of the memory,
+ * or perform other operations. The continuation takes 3
+ * arguments, a saved arg buffer, a pointer to a new vm_map_copy
+ * (returned) and an abort flag (abort if TRUE).
+ */
+
+#if iPSC386 || iPSC860
+#define VM_MAP_COPY_PAGE_LIST_MAX 64
+#else iPSC386 || iPSC860
+#define VM_MAP_COPY_PAGE_LIST_MAX 8
+#endif iPSC386 || iPSC860
+
+typedef struct vm_map_copy {
+ int type;
+#define VM_MAP_COPY_ENTRY_LIST 1
+#define VM_MAP_COPY_OBJECT 2
+#define VM_MAP_COPY_PAGE_LIST 3
+ vm_offset_t offset;
+ vm_size_t size;
+ union {
+ struct vm_map_header hdr; /* ENTRY_LIST */
+ struct { /* OBJECT */
+ vm_object_t object;
+ } c_o;
+ struct { /* PAGE_LIST */
+ vm_page_t page_list[VM_MAP_COPY_PAGE_LIST_MAX];
+ int npages;
+ kern_return_t (*cont)();
+ char *cont_args;
+ } c_p;
+ } c_u;
+} *vm_map_copy_t;
+
+#define cpy_hdr c_u.hdr
+
+#define cpy_object c_u.c_o.object
+
+#define cpy_page_list c_u.c_p.page_list
+#define cpy_npages c_u.c_p.npages
+#define cpy_cont c_u.c_p.cont
+#define cpy_cont_args c_u.c_p.cont_args
+
+#define VM_MAP_COPY_NULL ((vm_map_copy_t) 0)
+
+/*
+ * Useful macros for entry list copy objects
+ */
+
+#define vm_map_copy_to_entry(copy) \
+ ((struct vm_map_entry *) &(copy)->cpy_hdr.links)
+#define vm_map_copy_first_entry(copy) \
+ ((copy)->cpy_hdr.links.next)
+#define vm_map_copy_last_entry(copy) \
+ ((copy)->cpy_hdr.links.prev)
+
+/*
+ * Continuation macros for page list copy objects
+ */
+
+#define vm_map_copy_invoke_cont(old_copy, new_copy, result) \
+MACRO_BEGIN \
+ vm_map_copy_page_discard(old_copy); \
+ *result = (*((old_copy)->cpy_cont))((old_copy)->cpy_cont_args, \
+ new_copy); \
+ (old_copy)->cpy_cont = (kern_return_t (*)()) 0; \
+MACRO_END
+
+#define vm_map_copy_invoke_extend_cont(old_copy, new_copy, result) \
+MACRO_BEGIN \
+ *result = (*((old_copy)->cpy_cont))((old_copy)->cpy_cont_args, \
+ new_copy); \
+ (old_copy)->cpy_cont = (kern_return_t (*)()) 0; \
+MACRO_END
+
+#define vm_map_copy_abort_cont(old_copy) \
+MACRO_BEGIN \
+ vm_map_copy_page_discard(old_copy); \
+ (*((old_copy)->cpy_cont))((old_copy)->cpy_cont_args, \
+ (vm_map_copy_t *) 0); \
+ (old_copy)->cpy_cont = (kern_return_t (*)()) 0; \
+ (old_copy)->cpy_cont_args = (char *) 0; \
+MACRO_END
+
+#define vm_map_copy_has_cont(copy) \
+ (((copy)->cpy_cont) != (kern_return_t (*)()) 0)
+
+/*
+ * Continuation structures for vm_map_copyin_page_list.
+ */
+
+typedef struct {
+ vm_map_t map;
+ vm_offset_t src_addr;
+ vm_size_t src_len;
+ vm_offset_t destroy_addr;
+ vm_size_t destroy_len;
+ boolean_t steal_pages;
+} vm_map_copyin_args_data_t, *vm_map_copyin_args_t;
+
+#define VM_MAP_COPYIN_ARGS_NULL ((vm_map_copyin_args_t) 0)
+
+/*
+ * Macros: vm_map_lock, etc. [internal use only]
+ * Description:
+ * Perform locking on the data portion of a map.
+ */
+
+#define vm_map_lock_init(map) \
+MACRO_BEGIN \
+ lock_init(&(map)->lock, TRUE); \
+ (map)->timestamp = 0; \
+MACRO_END
+
+#define vm_map_lock(map) \
+MACRO_BEGIN \
+ lock_write(&(map)->lock); \
+ (map)->timestamp++; \
+MACRO_END
+
+#define vm_map_unlock(map) lock_write_done(&(map)->lock)
+#define vm_map_lock_read(map) lock_read(&(map)->lock)
+#define vm_map_unlock_read(map) lock_read_done(&(map)->lock)
+#define vm_map_lock_write_to_read(map) \
+ lock_write_to_read(&(map)->lock)
+#define vm_map_lock_read_to_write(map) \
+ (lock_read_to_write(&(map)->lock) || (((map)->timestamp++), 0))
+#define vm_map_lock_set_recursive(map) \
+ lock_set_recursive(&(map)->lock)
+#define vm_map_lock_clear_recursive(map) \
+ lock_clear_recursive(&(map)->lock)
+
+/*
+ * Exported procedures that operate on vm_map_t.
+ */
+
+extern vm_offset_t kentry_data;
+extern vm_offset_t kentry_data_size;
+extern int kentry_count;
+extern void vm_map_init(); /* Initialize the module */
+
+extern vm_map_t vm_map_create(); /* Create an empty map */
+extern vm_map_t vm_map_fork(); /* Create a map in the image
+ * of an existing map */
+
+extern void vm_map_reference(); /* Gain a reference to
+ * an existing map */
+extern void vm_map_deallocate(); /* Lose a reference */
+
+extern kern_return_t vm_map_enter(); /* Enter a mapping */
+extern kern_return_t vm_map_find_entry(); /* Enter a mapping primitive */
+extern kern_return_t vm_map_remove(); /* Deallocate a region */
+extern kern_return_t vm_map_protect(); /* Change protection */
+extern kern_return_t vm_map_inherit(); /* Change inheritance */
+
+extern void vm_map_print(); /* Debugging: print a map */
+
+extern kern_return_t vm_map_lookup(); /* Look up an address */
+extern boolean_t vm_map_verify(); /* Verify that a previous
+ * lookup is still valid */
+/* vm_map_verify_done is now a macro -- see below */
+extern kern_return_t vm_map_copyin(); /* Make a copy of a region */
+extern kern_return_t vm_map_copyin_page_list();/* Make a copy of a region
+ * using a page list copy */
+extern kern_return_t vm_map_copyout(); /* Place a copy into a map */
+extern kern_return_t vm_map_copy_overwrite();/* Overwrite existing memory
+ * with a copy */
+extern void vm_map_copy_discard(); /* Discard a copy without
+ * using it */
+extern kern_return_t vm_map_copy_discard_cont();/* Page list continuation
+ * version of previous */
+
+extern kern_return_t vm_map_machine_attribute();
+ /* Add or remove machine-
+ dependent attributes from
+ map regions */
+
+/*
+ * Functions implemented as macros
+ */
+#define vm_map_min(map) ((map)->min_offset)
+ /* Lowest valid address in
+ * a map */
+
+#define vm_map_max(map) ((map)->max_offset)
+ /* Highest valid address */
+
+#define vm_map_pmap(map) ((map)->pmap)
+ /* Physical map associated
+ * with this address map */
+
+#define vm_map_verify_done(map, version) (vm_map_unlock_read(map))
+ /* Operation that required
+ * a verified lookup is
+ * now complete */
+/*
+ * Pageability functions. Includes macro to preserve old interface.
+ */
+extern kern_return_t vm_map_pageable_common();
+
+#define vm_map_pageable(map, s, e, access) \
+ vm_map_pageable_common(map, s, e, access, FALSE)
+
+#define vm_map_pageable_user(map, s, e, access) \
+ vm_map_pageable_common(map, s, e, access, TRUE)
+
+/*
+ * Submap object. Must be used to create memory to be put
+ * in a submap by vm_map_submap.
+ */
+extern vm_object_t vm_submap_object;
+
+/*
+ * Wait and wakeup macros for in_transition map entries.
+ */
+#define vm_map_entry_wait(map, interruptible) \
+ MACRO_BEGIN \
+ assert_wait((event_t)&(map)->hdr, interruptible); \
+ vm_map_unlock(map); \
+ thread_block((void (*)()) 0); \
+ MACRO_END
+
+#define vm_map_entry_wakeup(map) thread_wakeup((event_t)&(map)->hdr)
+
+#endif _VM_VM_MAP_H_
diff --git a/vm/vm_object.c b/vm/vm_object.c
new file mode 100644
index 00000000..5186ee6c
--- /dev/null
+++ b/vm/vm_object.c
@@ -0,0 +1,3090 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_object.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * Virtual memory object module.
+ */
+
+#include <norma_vm.h>
+#include <mach_pagemap.h>
+
+#if NORMA_VM
+#include <norma/xmm_server_rename.h>
+#endif /* NORMA_VM */
+
+#include <mach/memory_object.h>
+#include "memory_object_default.h"
+#include "memory_object_user.h"
+#include "vm_param.h"
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+#include <kern/assert.h>
+#include <kern/lock.h>
+#include <kern/queue.h>
+#include <kern/xpr.h>
+#include <kern/zalloc.h>
+#include <vm/memory_object.h>
+#include <vm/vm_fault.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+
+
+void memory_object_release(
+ ipc_port_t pager,
+ pager_request_t pager_request,
+ ipc_port_t pager_name); /* forward */
+
+void vm_object_deactivate_pages(vm_object_t);
+
+/*
+ * Virtual memory objects maintain the actual data
+ * associated with allocated virtual memory. A given
+ * page of memory exists within exactly one object.
+ *
+ * An object is only deallocated when all "references"
+ * are given up. Only one "reference" to a given
+ * region of an object should be writeable.
+ *
+ * Associated with each object is a list of all resident
+ * memory pages belonging to that object; this list is
+ * maintained by the "vm_page" module, but locked by the object's
+ * lock.
+ *
+ * Each object also records the memory object port
+ * that is used by the kernel to request and write
+ * back data (the memory object port, field "pager"),
+ * and the ports provided to the memory manager, the server that
+ * manages that data, to return data and control its
+ * use (the memory object control port, field "pager_request")
+ * and for naming (the memory object name port, field "pager_name").
+ *
+ * Virtual memory objects are allocated to provide
+ * zero-filled memory (vm_allocate) or map a user-defined
+ * memory object into a virtual address space (vm_map).
+ *
+ * Virtual memory objects that refer to a user-defined
+ * memory object are called "permanent", because all changes
+ * made in virtual memory are reflected back to the
+ * memory manager, which may then store it permanently.
+ * Other virtual memory objects are called "temporary",
+ * meaning that changes need be written back only when
+ * necessary to reclaim pages, and that storage associated
+ * with the object can be discarded once it is no longer
+ * mapped.
+ *
+ * A permanent memory object may be mapped into more
+ * than one virtual address space. Moreover, two threads
+ * may attempt to make the first mapping of a memory
+ * object concurrently. Only one thread is allowed to
+ * complete this mapping; all others wait for the
+ * "pager_initialized" field is asserted, indicating
+ * that the first thread has initialized all of the
+ * necessary fields in the virtual memory object structure.
+ *
+ * The kernel relies on a *default memory manager* to
+ * provide backing storage for the zero-filled virtual
+ * memory objects. The memory object ports associated
+ * with these temporary virtual memory objects are only
+ * generated and passed to the default memory manager
+ * when it becomes necessary. Virtual memory objects
+ * that depend on the default memory manager are called
+ * "internal". The "pager_created" field is provided to
+ * indicate whether these ports have ever been allocated.
+ *
+ * The kernel may also create virtual memory objects to
+ * hold changed pages after a copy-on-write operation.
+ * In this case, the virtual memory object (and its
+ * backing storage -- its memory object) only contain
+ * those pages that have been changed. The "shadow"
+ * field refers to the virtual memory object that contains
+ * the remainder of the contents. The "shadow_offset"
+ * field indicates where in the "shadow" these contents begin.
+ * The "copy" field refers to a virtual memory object
+ * to which changed pages must be copied before changing
+ * this object, in order to implement another form
+ * of copy-on-write optimization.
+ *
+ * The virtual memory object structure also records
+ * the attributes associated with its memory object.
+ * The "pager_ready", "can_persist" and "copy_strategy"
+ * fields represent those attributes. The "cached_list"
+ * field is used in the implementation of the persistence
+ * attribute.
+ *
+ * ZZZ Continue this comment.
+ */
+
+zone_t vm_object_zone; /* vm backing store zone */
+
+/*
+ * All wired-down kernel memory belongs to a single virtual
+ * memory object (kernel_object) to avoid wasting data structures.
+ */
+vm_object_t kernel_object;
+
+/*
+ * Virtual memory objects that are not referenced by
+ * any address maps, but that are allowed to persist
+ * (an attribute specified by the associated memory manager),
+ * are kept in a queue (vm_object_cached_list).
+ *
+ * When an object from this queue is referenced again,
+ * for example to make another address space mapping,
+ * it must be removed from the queue. That is, the
+ * queue contains *only* objects with zero references.
+ *
+ * The kernel may choose to terminate objects from this
+ * queue in order to reclaim storage. The current policy
+ * is to permit a fixed maximum number of unreferenced
+ * objects (vm_object_cached_max).
+ *
+ * A simple lock (accessed by routines
+ * vm_object_cache_{lock,lock_try,unlock}) governs the
+ * object cache. It must be held when objects are
+ * added to or removed from the cache (in vm_object_terminate).
+ * The routines that acquire a reference to a virtual
+ * memory object based on one of the memory object ports
+ * must also lock the cache.
+ *
+ * Ideally, the object cache should be more isolated
+ * from the reference mechanism, so that the lock need
+ * not be held to make simple references.
+ */
+queue_head_t vm_object_cached_list;
+int vm_object_cached_count;
+int vm_object_cached_max = 100; /* may be patched*/
+
+decl_simple_lock_data(,vm_object_cached_lock_data)
+
+#define vm_object_cache_lock() \
+ simple_lock(&vm_object_cached_lock_data)
+#define vm_object_cache_lock_try() \
+ simple_lock_try(&vm_object_cached_lock_data)
+#define vm_object_cache_unlock() \
+ simple_unlock(&vm_object_cached_lock_data)
+
+/*
+ * Virtual memory objects are initialized from
+ * a template (see vm_object_allocate).
+ *
+ * When adding a new field to the virtual memory
+ * object structure, be sure to add initialization
+ * (see vm_object_init).
+ */
+vm_object_t vm_object_template;
+
+/*
+ * vm_object_allocate:
+ *
+ * Returns a new object with the given size.
+ */
+
+vm_object_t _vm_object_allocate(
+ vm_size_t size)
+{
+ register vm_object_t object;
+
+ object = (vm_object_t) zalloc(vm_object_zone);
+
+ *object = *vm_object_template;
+ queue_init(&object->memq);
+ vm_object_lock_init(object);
+ object->size = size;
+
+ return object;
+}
+
+vm_object_t vm_object_allocate(
+ vm_size_t size)
+{
+ register vm_object_t object;
+ register ipc_port_t port;
+
+ object = _vm_object_allocate(size);
+#if !NORMA_VM
+ port = ipc_port_alloc_kernel();
+ if (port == IP_NULL)
+ panic("vm_object_allocate");
+ object->pager_name = port;
+ ipc_kobject_set(port, (ipc_kobject_t) object, IKOT_PAGING_NAME);
+#endif /* !NORMA_VM */
+
+ return object;
+}
+
+/*
+ * vm_object_bootstrap:
+ *
+ * Initialize the VM objects module.
+ */
+void vm_object_bootstrap(void)
+{
+ vm_object_zone = zinit((vm_size_t) sizeof(struct vm_object),
+ round_page(512*1024),
+ round_page(12*1024),
+ 0, "objects");
+
+ queue_init(&vm_object_cached_list);
+ simple_lock_init(&vm_object_cached_lock_data);
+
+ /*
+ * Fill in a template object, for quick initialization
+ */
+
+ vm_object_template = (vm_object_t) zalloc(vm_object_zone);
+ bzero((char *) vm_object_template, sizeof *vm_object_template);
+
+ vm_object_template->ref_count = 1;
+ vm_object_template->size = 0;
+ vm_object_template->resident_page_count = 0;
+ vm_object_template->copy = VM_OBJECT_NULL;
+ vm_object_template->shadow = VM_OBJECT_NULL;
+ vm_object_template->shadow_offset = (vm_offset_t) 0;
+
+ vm_object_template->pager = IP_NULL;
+ vm_object_template->paging_offset = 0;
+ vm_object_template->pager_request = PAGER_REQUEST_NULL;
+ vm_object_template->pager_name = IP_NULL;
+
+ vm_object_template->pager_created = FALSE;
+ vm_object_template->pager_initialized = FALSE;
+ vm_object_template->pager_ready = FALSE;
+
+ vm_object_template->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+ /* ignored if temporary, will be reset before
+ * permanent object becomes ready */
+ vm_object_template->use_shared_copy = FALSE;
+ vm_object_template->shadowed = FALSE;
+
+ vm_object_template->absent_count = 0;
+ vm_object_template->all_wanted = 0; /* all bits FALSE */
+
+ vm_object_template->paging_in_progress = 0;
+ vm_object_template->can_persist = FALSE;
+ vm_object_template->internal = TRUE;
+ vm_object_template->temporary = TRUE;
+ vm_object_template->alive = TRUE;
+ vm_object_template->lock_in_progress = FALSE;
+ vm_object_template->lock_restart = FALSE;
+ vm_object_template->use_old_pageout = TRUE; /* XXX change later */
+ vm_object_template->last_alloc = (vm_offset_t) 0;
+
+#if MACH_PAGEMAP
+ vm_object_template->existence_info = VM_EXTERNAL_NULL;
+#endif /* MACH_PAGEMAP */
+
+ /*
+ * Initialize the "kernel object"
+ */
+
+ kernel_object = _vm_object_allocate(
+ VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS);
+
+ /*
+ * Initialize the "submap object". Make it as large as the
+ * kernel object so that no limit is imposed on submap sizes.
+ */
+
+ vm_submap_object = _vm_object_allocate(
+ VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS);
+
+#if MACH_PAGEMAP
+ vm_external_module_initialize();
+#endif /* MACH_PAGEMAP */
+}
+
+void vm_object_init(void)
+{
+#if !NORMA_VM
+ /*
+ * Finish initializing the kernel object.
+ * The submap object doesn't need a name port.
+ */
+
+ kernel_object->pager_name = ipc_port_alloc_kernel();
+ ipc_kobject_set(kernel_object->pager_name,
+ (ipc_kobject_t) kernel_object,
+ IKOT_PAGING_NAME);
+#endif /* !NORMA_VM */
+}
+
+/*
+ * vm_object_reference:
+ *
+ * Gets another reference to the given object.
+ */
+void vm_object_reference(
+ register vm_object_t object)
+{
+ if (object == VM_OBJECT_NULL)
+ return;
+
+ vm_object_lock(object);
+ assert(object->ref_count > 0);
+ object->ref_count++;
+ vm_object_unlock(object);
+}
+
+/*
+ * vm_object_deallocate:
+ *
+ * Release a reference to the specified object,
+ * gained either through a vm_object_allocate
+ * or a vm_object_reference call. When all references
+ * are gone, storage associated with this object
+ * may be relinquished.
+ *
+ * No object may be locked.
+ */
+void vm_object_deallocate(
+ register vm_object_t object)
+{
+ vm_object_t temp;
+
+ while (object != VM_OBJECT_NULL) {
+
+ /*
+ * The cache holds a reference (uncounted) to
+ * the object; we must lock it before removing
+ * the object.
+ */
+
+ vm_object_cache_lock();
+
+ /*
+ * Lose the reference
+ */
+ vm_object_lock(object);
+ if (--(object->ref_count) > 0) {
+
+ /*
+ * If there are still references, then
+ * we are done.
+ */
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ return;
+ }
+
+ /*
+ * See whether this object can persist. If so, enter
+ * it in the cache, then deactivate all of its
+ * pages.
+ */
+ if (object->can_persist) {
+ boolean_t overflow;
+
+ /*
+ * Enter the object onto the queue
+ * of "cached" objects. Remember whether
+ * we've caused the queue to overflow,
+ * as a hint.
+ */
+
+ queue_enter(&vm_object_cached_list, object,
+ vm_object_t, cached_list);
+ overflow = (++vm_object_cached_count > vm_object_cached_max);
+ vm_object_cache_unlock();
+
+ vm_object_deactivate_pages(object);
+ vm_object_unlock(object);
+
+ /*
+ * If we didn't overflow, or if the queue has
+ * been reduced back to below the specified
+ * minimum, then quit.
+ */
+ if (!overflow)
+ return;
+
+ while (TRUE) {
+ vm_object_cache_lock();
+ if (vm_object_cached_count <=
+ vm_object_cached_max) {
+ vm_object_cache_unlock();
+ return;
+ }
+
+ /*
+ * If we must trim down the queue, take
+ * the first object, and proceed to
+ * terminate it instead of the original
+ * object. Have to wait for pager init.
+ * if it's in progress.
+ */
+ object= (vm_object_t)
+ queue_first(&vm_object_cached_list);
+ vm_object_lock(object);
+
+ if (!(object->pager_created &&
+ !object->pager_initialized)) {
+
+ /*
+ * Ok to terminate, hang on to lock.
+ */
+ break;
+ }
+
+ vm_object_assert_wait(object,
+ VM_OBJECT_EVENT_INITIALIZED, FALSE);
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ thread_block((void (*)()) 0);
+
+ /*
+ * Continue loop to check if cache still
+ * needs to be trimmed.
+ */
+ }
+
+ /*
+ * Actually remove object from cache.
+ */
+
+ queue_remove(&vm_object_cached_list, object,
+ vm_object_t, cached_list);
+ vm_object_cached_count--;
+
+ assert(object->ref_count == 0);
+ }
+ else {
+ if (object->pager_created &&
+ !object->pager_initialized) {
+
+ /*
+ * Have to wait for initialization.
+ * Put reference back and retry
+ * when it's initialized.
+ */
+ object->ref_count++;
+ vm_object_assert_wait(object,
+ VM_OBJECT_EVENT_INITIALIZED, FALSE);
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ thread_block((void (*)()) 0);
+ continue;
+ }
+ }
+
+ /*
+ * Take the reference to the shadow object
+ * out of the object to be destroyed.
+ */
+
+ temp = object->shadow;
+
+ /*
+ * Destroy the object; the cache lock will
+ * be released in the process.
+ */
+
+ vm_object_terminate(object);
+
+ /*
+ * Deallocate the reference to the shadow
+ * by continuing the loop with that object
+ * in place of the original.
+ */
+
+ object = temp;
+ }
+}
+
+boolean_t vm_object_terminate_remove_all = FALSE;
+
+/*
+ * Routine: vm_object_terminate
+ * Purpose:
+ * Free all resources associated with a vm_object.
+ * In/out conditions:
+ * Upon entry, the object and the cache must be locked,
+ * and the object must have no references.
+ *
+ * The shadow object reference is left alone.
+ *
+ * Upon exit, the cache will be unlocked, and the
+ * object will cease to exist.
+ */
+void vm_object_terminate(
+ register vm_object_t object)
+{
+ register vm_page_t p;
+ vm_object_t shadow_object;
+
+ /*
+ * Make sure the object isn't already being terminated
+ */
+
+ assert(object->alive);
+ object->alive = FALSE;
+
+ /*
+ * Make sure no one can look us up now.
+ */
+
+ vm_object_remove(object);
+ vm_object_cache_unlock();
+
+ /*
+ * Detach the object from its shadow if we are the shadow's
+ * copy.
+ */
+ if ((shadow_object = object->shadow) != VM_OBJECT_NULL) {
+ vm_object_lock(shadow_object);
+ assert((shadow_object->copy == object) ||
+ (shadow_object->copy == VM_OBJECT_NULL));
+ shadow_object->copy = VM_OBJECT_NULL;
+ vm_object_unlock(shadow_object);
+ }
+
+ /*
+ * The pageout daemon might be playing with our pages.
+ * Now that the object is dead, it won't touch any more
+ * pages, but some pages might already be on their way out.
+ * Hence, we wait until the active paging activities have ceased.
+ */
+
+ vm_object_paging_wait(object, FALSE);
+
+ /*
+ * Clean or free the pages, as appropriate.
+ * It is possible for us to find busy/absent pages,
+ * if some faults on this object were aborted.
+ */
+
+ if ((object->temporary) || (object->pager == IP_NULL)) {
+ while (!queue_empty(&object->memq)) {
+ p = (vm_page_t) queue_first(&object->memq);
+
+ VM_PAGE_CHECK(p);
+
+ if (p->busy && !p->absent)
+ panic("vm_object_terminate.2 0x%x 0x%x",
+ object, p);
+
+ VM_PAGE_FREE(p);
+ }
+ } else while (!queue_empty(&object->memq)) {
+ p = (vm_page_t) queue_first(&object->memq);
+
+ VM_PAGE_CHECK(p);
+
+ if (p->busy && !p->absent)
+ panic("vm_object_terminate.3 0x%x 0x%x", object, p);
+
+ vm_page_lock_queues();
+ VM_PAGE_QUEUES_REMOVE(p);
+ vm_page_unlock_queues();
+
+ if (p->absent || p->private) {
+
+ /*
+ * For private pages, VM_PAGE_FREE just
+ * leaves the page structure around for
+ * its owner to clean up. For absent
+ * pages, the structure is returned to
+ * the appropriate pool.
+ */
+
+ goto free_page;
+ }
+
+ if (p->fictitious)
+ panic("vm_object_terminate.4 0x%x 0x%x", object, p);
+
+ if (!p->dirty)
+ p->dirty = pmap_is_modified(p->phys_addr);
+
+ if (p->dirty || p->precious) {
+ p->busy = TRUE;
+ vm_pageout_page(p, FALSE, TRUE); /* flush page */
+ } else {
+ free_page:
+ VM_PAGE_FREE(p);
+ }
+ }
+
+ assert(object->ref_count == 0);
+ assert(object->paging_in_progress == 0);
+
+ /*
+ * Throw away port rights... note that they may
+ * already have been thrown away (by vm_object_destroy
+ * or memory_object_destroy).
+ *
+ * Instead of destroying the control and name ports,
+ * we send all rights off to the memory manager instead,
+ * using memory_object_terminate.
+ */
+
+ vm_object_unlock(object);
+
+ if (object->pager != IP_NULL) {
+ /* consumes our rights for pager, pager_request, pager_name */
+ memory_object_release(object->pager,
+ object->pager_request,
+ object->pager_name);
+ } else if (object->pager_name != IP_NULL) {
+ /* consumes our right for pager_name */
+#if NORMA_VM
+ ipc_port_release_send(object->pager_name);
+#else /* NORMA_VM */
+ ipc_port_dealloc_kernel(object->pager_name);
+#endif /* NORMA_VM */
+ }
+
+#if MACH_PAGEMAP
+ vm_external_destroy(object->existence_info);
+#endif /* MACH_PAGEMAP */
+
+ /*
+ * Free the space for the object.
+ */
+
+ zfree(vm_object_zone, (vm_offset_t) object);
+}
+
+/*
+ * Routine: vm_object_pager_wakeup
+ * Purpose: Wake up anyone waiting for IKOT_PAGER_TERMINATING
+ */
+
+void
+vm_object_pager_wakeup(
+ ipc_port_t pager)
+{
+ boolean_t someone_waiting;
+
+ /*
+ * If anyone was waiting for the memory_object_terminate
+ * to be queued, wake them up now.
+ */
+ vm_object_cache_lock();
+ assert(ip_kotype(pager) == IKOT_PAGER_TERMINATING);
+ someone_waiting = (pager->ip_kobject != IKO_NULL);
+ if (ip_active(pager))
+ ipc_kobject_set(pager, IKO_NULL, IKOT_NONE);
+ vm_object_cache_unlock();
+ if (someone_waiting) {
+ thread_wakeup((event_t) pager);
+ }
+}
+
+/*
+ * Routine: memory_object_release
+ * Purpose: Terminate the pager and release port rights,
+ * just like memory_object_terminate, except
+ * that we wake up anyone blocked in vm_object_enter
+ * waiting for termination message to be queued
+ * before calling memory_object_init.
+ */
+void memory_object_release(
+ ipc_port_t pager,
+ pager_request_t pager_request,
+ ipc_port_t pager_name)
+{
+
+ /*
+ * Keep a reference to pager port;
+ * the terminate might otherwise release all references.
+ */
+ ip_reference(pager);
+
+ /*
+ * Terminate the pager.
+ */
+ (void) memory_object_terminate(pager, pager_request, pager_name);
+
+ /*
+ * Wakeup anyone waiting for this terminate
+ */
+ vm_object_pager_wakeup(pager);
+
+ /*
+ * Release reference to pager port.
+ */
+ ip_release(pager);
+}
+
+/*
+ * Routine: vm_object_abort_activity [internal use only]
+ * Purpose:
+ * Abort paging requests pending on this object.
+ * In/out conditions:
+ * The object is locked on entry and exit.
+ */
+void vm_object_abort_activity(
+ vm_object_t object)
+{
+ register
+ vm_page_t p;
+ vm_page_t next;
+
+ /*
+ * Abort all activity that would be waiting
+ * for a result on this memory object.
+ *
+ * We could also choose to destroy all pages
+ * that we have in memory for this object, but
+ * we don't.
+ */
+
+ p = (vm_page_t) queue_first(&object->memq);
+ while (!queue_end(&object->memq, (queue_entry_t) p)) {
+ next = (vm_page_t) queue_next(&p->listq);
+
+ /*
+ * If it's being paged in, destroy it.
+ * If an unlock has been requested, start it again.
+ */
+
+ if (p->busy && p->absent) {
+ VM_PAGE_FREE(p);
+ }
+ else {
+ if (p->unlock_request != VM_PROT_NONE)
+ p->unlock_request = VM_PROT_NONE;
+ PAGE_WAKEUP(p);
+ }
+
+ p = next;
+ }
+
+ /*
+ * Wake up threads waiting for the memory object to
+ * become ready.
+ */
+
+ object->pager_ready = TRUE;
+ vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
+}
+
+/*
+ * Routine: memory_object_destroy [user interface]
+ * Purpose:
+ * Shut down a memory object, despite the
+ * presence of address map (or other) references
+ * to the vm_object.
+ * Note:
+ * This routine may be called either from the user interface,
+ * or from port destruction handling (via vm_object_destroy).
+ */
+kern_return_t memory_object_destroy(
+ register
+ vm_object_t object,
+ kern_return_t reason)
+{
+ ipc_port_t old_object, old_name;
+ pager_request_t old_control;
+
+#ifdef lint
+ reason++;
+#endif /* lint */
+
+ if (object == VM_OBJECT_NULL)
+ return KERN_SUCCESS;
+
+ /*
+ * Remove the port associations immediately.
+ *
+ * This will prevent the memory manager from further
+ * meddling. [If it wanted to flush data or make
+ * other changes, it should have done so before performing
+ * the destroy call.]
+ */
+
+ vm_object_cache_lock();
+ vm_object_lock(object);
+ vm_object_remove(object);
+ object->can_persist = FALSE;
+ vm_object_cache_unlock();
+
+ /*
+ * Rip out the ports from the vm_object now... this
+ * will prevent new memory_object calls from succeeding.
+ */
+
+ old_object = object->pager;
+ object->pager = IP_NULL;
+
+ old_control = object->pager_request;
+ object->pager_request = PAGER_REQUEST_NULL;
+
+ old_name = object->pager_name;
+ object->pager_name = IP_NULL;
+
+
+ /*
+ * Wait for existing paging activity (that might
+ * have the old ports) to subside.
+ */
+
+ vm_object_paging_wait(object, FALSE);
+ vm_object_unlock(object);
+
+ /*
+ * Shut down the ports now.
+ *
+ * [Paging operations may be proceeding concurrently --
+ * they'll get the null values established above.]
+ */
+
+ if (old_object != IP_NULL) {
+ /* consumes our rights for object, control, name */
+ memory_object_release(old_object, old_control,
+ old_name);
+ } else if (old_name != IP_NULL) {
+ /* consumes our right for name */
+#if NORMA_VM
+ ipc_port_release_send(object->pager_name);
+#else /* NORMA_VM */
+ ipc_port_dealloc_kernel(object->pager_name);
+#endif /* NORMA_VM */
+ }
+
+ /*
+ * Lose the reference that was donated for this routine
+ */
+
+ vm_object_deallocate(object);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * vm_object_deactivate_pages
+ *
+ * Deactivate all pages in the specified object. (Keep its pages
+ * in memory even though it is no longer referenced.)
+ *
+ * The object must be locked.
+ */
+void vm_object_deactivate_pages(
+ register vm_object_t object)
+{
+ register vm_page_t p;
+
+ queue_iterate(&object->memq, p, vm_page_t, listq) {
+ vm_page_lock_queues();
+ if (!p->busy)
+ vm_page_deactivate(p);
+ vm_page_unlock_queues();
+ }
+}
+
+
+/*
+ * Routine: vm_object_pmap_protect
+ *
+ * Purpose:
+ * Reduces the permission for all physical
+ * pages in the specified object range.
+ *
+ * If removing write permission only, it is
+ * sufficient to protect only the pages in
+ * the top-level object; only those pages may
+ * have write permission.
+ *
+ * If removing all access, we must follow the
+ * shadow chain from the top-level object to
+ * remove access to all pages in shadowed objects.
+ *
+ * The object must *not* be locked. The object must
+ * be temporary/internal.
+ *
+ * If pmap is not NULL, this routine assumes that
+ * the only mappings for the pages are in that
+ * pmap.
+ */
+boolean_t vm_object_pmap_protect_by_page = FALSE;
+
+void vm_object_pmap_protect(
+ register vm_object_t object,
+ register vm_offset_t offset,
+ vm_offset_t size,
+ pmap_t pmap,
+ vm_offset_t pmap_start,
+ vm_prot_t prot)
+{
+ if (object == VM_OBJECT_NULL)
+ return;
+
+ vm_object_lock(object);
+
+ assert(object->temporary && object->internal);
+
+ while (TRUE) {
+ if (object->resident_page_count > atop(size) / 2 &&
+ pmap != PMAP_NULL) {
+ vm_object_unlock(object);
+ pmap_protect(pmap, pmap_start, pmap_start + size, prot);
+ return;
+ }
+
+ {
+ register vm_page_t p;
+ register vm_offset_t end;
+
+ end = offset + size;
+
+ queue_iterate(&object->memq, p, vm_page_t, listq) {
+ if (!p->fictitious &&
+ (offset <= p->offset) &&
+ (p->offset < end)) {
+ if ((pmap == PMAP_NULL) ||
+ vm_object_pmap_protect_by_page) {
+ pmap_page_protect(p->phys_addr,
+ prot & ~p->page_lock);
+ } else {
+ vm_offset_t start =
+ pmap_start +
+ (p->offset - offset);
+
+ pmap_protect(pmap,
+ start,
+ start + PAGE_SIZE,
+ prot);
+ }
+ }
+ }
+ }
+
+ if (prot == VM_PROT_NONE) {
+ /*
+ * Must follow shadow chain to remove access
+ * to pages in shadowed objects.
+ */
+ register vm_object_t next_object;
+
+ next_object = object->shadow;
+ if (next_object != VM_OBJECT_NULL) {
+ offset += object->shadow_offset;
+ vm_object_lock(next_object);
+ vm_object_unlock(object);
+ object = next_object;
+ }
+ else {
+ /*
+ * End of chain - we are done.
+ */
+ break;
+ }
+ }
+ else {
+ /*
+ * Pages in shadowed objects may never have
+ * write permission - we may stop here.
+ */
+ break;
+ }
+ }
+
+ vm_object_unlock(object);
+}
+
+/*
+ * vm_object_pmap_remove:
+ *
+ * Removes all physical pages in the specified
+ * object range from all physical maps.
+ *
+ * The object must *not* be locked.
+ */
+void vm_object_pmap_remove(
+ register vm_object_t object,
+ register vm_offset_t start,
+ register vm_offset_t end)
+{
+ register vm_page_t p;
+
+ if (object == VM_OBJECT_NULL)
+ return;
+
+ vm_object_lock(object);
+ queue_iterate(&object->memq, p, vm_page_t, listq) {
+ if (!p->fictitious &&
+ (start <= p->offset) &&
+ (p->offset < end))
+ pmap_page_protect(p->phys_addr, VM_PROT_NONE);
+ }
+ vm_object_unlock(object);
+}
+
+/*
+ * Routine: vm_object_copy_slowly
+ *
+ * Description:
+ * Copy the specified range of the source
+ * virtual memory object without using
+ * protection-based optimizations (such
+ * as copy-on-write). The pages in the
+ * region are actually copied.
+ *
+ * In/out conditions:
+ * The caller must hold a reference and a lock
+ * for the source virtual memory object. The source
+ * object will be returned *unlocked*.
+ *
+ * Results:
+ * If the copy is completed successfully, KERN_SUCCESS is
+ * returned. If the caller asserted the interruptible
+ * argument, and an interruption occurred while waiting
+ * for a user-generated event, MACH_SEND_INTERRUPTED is
+ * returned. Other values may be returned to indicate
+ * hard errors during the copy operation.
+ *
+ * A new virtual memory object is returned in a
+ * parameter (_result_object). The contents of this
+ * new object, starting at a zero offset, are a copy
+ * of the source memory region. In the event of
+ * an error, this parameter will contain the value
+ * VM_OBJECT_NULL.
+ */
+kern_return_t vm_object_copy_slowly(
+ register
+ vm_object_t src_object,
+ vm_offset_t src_offset,
+ vm_size_t size,
+ boolean_t interruptible,
+ vm_object_t *_result_object) /* OUT */
+{
+ vm_object_t new_object;
+ vm_offset_t new_offset;
+
+ if (size == 0) {
+ vm_object_unlock(src_object);
+ *_result_object = VM_OBJECT_NULL;
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /*
+ * Prevent destruction of the source object while we copy.
+ */
+
+ assert(src_object->ref_count > 0);
+ src_object->ref_count++;
+ vm_object_unlock(src_object);
+
+ /*
+ * Create a new object to hold the copied pages.
+ * A few notes:
+ * We fill the new object starting at offset 0,
+ * regardless of the input offset.
+ * We don't bother to lock the new object within
+ * this routine, since we have the only reference.
+ */
+
+ new_object = vm_object_allocate(size);
+ new_offset = 0;
+
+ assert(size == trunc_page(size)); /* Will the loop terminate? */
+
+ for ( ;
+ size != 0 ;
+ src_offset += PAGE_SIZE, new_offset += PAGE_SIZE, size -= PAGE_SIZE
+ ) {
+ vm_page_t new_page;
+ vm_fault_return_t result;
+
+ while ((new_page = vm_page_alloc(new_object, new_offset))
+ == VM_PAGE_NULL) {
+ VM_PAGE_WAIT((void (*)()) 0);
+ }
+
+ do {
+ vm_prot_t prot = VM_PROT_READ;
+ vm_page_t _result_page;
+ vm_page_t top_page;
+ register
+ vm_page_t result_page;
+
+ vm_object_lock(src_object);
+ src_object->paging_in_progress++;
+
+ result = vm_fault_page(src_object, src_offset,
+ VM_PROT_READ, FALSE, interruptible,
+ &prot, &_result_page, &top_page,
+ FALSE, (void (*)()) 0);
+
+ switch(result) {
+ case VM_FAULT_SUCCESS:
+ result_page = _result_page;
+
+ /*
+ * We don't need to hold the object
+ * lock -- the busy page will be enough.
+ * [We don't care about picking up any
+ * new modifications.]
+ *
+ * Copy the page to the new object.
+ *
+ * POLICY DECISION:
+ * If result_page is clean,
+ * we could steal it instead
+ * of copying.
+ */
+
+ vm_object_unlock(result_page->object);
+ vm_page_copy(result_page, new_page);
+
+ /*
+ * Let go of both pages (make them
+ * not busy, perform wakeup, activate).
+ */
+
+ new_page->busy = FALSE;
+ new_page->dirty = TRUE;
+ vm_object_lock(result_page->object);
+ PAGE_WAKEUP_DONE(result_page);
+
+ vm_page_lock_queues();
+ if (!result_page->active &&
+ !result_page->inactive)
+ vm_page_activate(result_page);
+ vm_page_activate(new_page);
+ vm_page_unlock_queues();
+
+ /*
+ * Release paging references and
+ * top-level placeholder page, if any.
+ */
+
+ vm_fault_cleanup(result_page->object,
+ top_page);
+
+ break;
+
+ case VM_FAULT_RETRY:
+ break;
+
+ case VM_FAULT_MEMORY_SHORTAGE:
+ VM_PAGE_WAIT((void (*)()) 0);
+ break;
+
+ case VM_FAULT_FICTITIOUS_SHORTAGE:
+ vm_page_more_fictitious();
+ break;
+
+ case VM_FAULT_INTERRUPTED:
+ vm_page_free(new_page);
+ vm_object_deallocate(new_object);
+ vm_object_deallocate(src_object);
+ *_result_object = VM_OBJECT_NULL;
+ return MACH_SEND_INTERRUPTED;
+
+ case VM_FAULT_MEMORY_ERROR:
+ /*
+ * A policy choice:
+ * (a) ignore pages that we can't
+ * copy
+ * (b) return the null object if
+ * any page fails [chosen]
+ */
+
+ vm_page_free(new_page);
+ vm_object_deallocate(new_object);
+ vm_object_deallocate(src_object);
+ *_result_object = VM_OBJECT_NULL;
+ return KERN_MEMORY_ERROR;
+ }
+ } while (result != VM_FAULT_SUCCESS);
+ }
+
+ /*
+ * Lose the extra reference, and return our object.
+ */
+
+ vm_object_deallocate(src_object);
+ *_result_object = new_object;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: vm_object_copy_temporary
+ *
+ * Purpose:
+ * Copy the specified range of the source virtual
+ * memory object, if it can be done without blocking.
+ *
+ * Results:
+ * If the copy is successful, the copy is returned in
+ * the arguments; otherwise, the arguments are not
+ * affected.
+ *
+ * In/out conditions:
+ * The object should be unlocked on entry and exit.
+ */
+
+vm_object_t vm_object_copy_delayed(); /* forward declaration */
+
+boolean_t vm_object_copy_temporary(
+ vm_object_t *_object, /* INOUT */
+ vm_offset_t *_offset, /* INOUT */
+ boolean_t *_src_needs_copy, /* OUT */
+ boolean_t *_dst_needs_copy) /* OUT */
+{
+ vm_object_t object = *_object;
+
+#ifdef lint
+ ++*_offset;
+#endif /* lint */
+
+ if (object == VM_OBJECT_NULL) {
+ *_src_needs_copy = FALSE;
+ *_dst_needs_copy = FALSE;
+ return TRUE;
+ }
+
+ /*
+ * If the object is temporary, we can perform
+ * a symmetric copy-on-write without asking.
+ */
+
+ vm_object_lock(object);
+ if (object->temporary) {
+
+ /*
+ * Shared objects use delayed copy
+ */
+ if (object->use_shared_copy) {
+
+ /*
+ * Asymmetric copy strategy. Destination
+ * must be copied (to allow copy object reuse).
+ * Source is unaffected.
+ */
+ vm_object_unlock(object);
+ object = vm_object_copy_delayed(object);
+ *_object = object;
+ *_src_needs_copy = FALSE;
+ *_dst_needs_copy = TRUE;
+ return TRUE;
+ }
+
+ /*
+ * Make another reference to the object.
+ *
+ * Leave object/offset unchanged.
+ */
+
+ assert(object->ref_count > 0);
+ object->ref_count++;
+ object->shadowed = TRUE;
+ vm_object_unlock(object);
+
+ /*
+ * Both source and destination must make
+ * shadows, and the source must be made
+ * read-only if not already.
+ */
+
+ *_src_needs_copy = TRUE;
+ *_dst_needs_copy = TRUE;
+ return TRUE;
+ }
+
+ if (object->pager_ready &&
+ (object->copy_strategy == MEMORY_OBJECT_COPY_DELAY)) {
+ /* XXX Do something intelligent (see temporary code above) */
+ }
+ vm_object_unlock(object);
+
+ return FALSE;
+}
+
+/*
+ * Routine: vm_object_copy_call [internal]
+ *
+ * Description:
+ * Copy the specified (src_offset, size) portion
+ * of the source object (src_object), using the
+ * user-managed copy algorithm.
+ *
+ * In/out conditions:
+ * The source object must be locked on entry. It
+ * will be *unlocked* on exit.
+ *
+ * Results:
+ * If the copy is successful, KERN_SUCCESS is returned.
+ * This routine is interruptible; if a wait for
+ * a user-generated event is interrupted, MACH_SEND_INTERRUPTED
+ * is returned. Other return values indicate hard errors
+ * in creating the user-managed memory object for the copy.
+ *
+ * A new object that represents the copied virtual
+ * memory is returned in a parameter (*_result_object).
+ * If the return value indicates an error, this parameter
+ * is not valid.
+ */
+kern_return_t vm_object_copy_call(
+ vm_object_t src_object,
+ vm_offset_t src_offset,
+ vm_size_t size,
+ vm_object_t *_result_object) /* OUT */
+{
+ vm_offset_t src_end = src_offset + size;
+ ipc_port_t new_memory_object;
+ vm_object_t new_object;
+ vm_page_t p;
+
+ /*
+ * Set the backing object for the new
+ * temporary object.
+ */
+
+ assert(src_object->ref_count > 0);
+ src_object->ref_count++;
+ vm_object_paging_begin(src_object);
+ vm_object_unlock(src_object);
+
+ /*
+ * Create a memory object port to be associated
+ * with this new vm_object.
+ *
+ * Since the kernel has the only rights to this
+ * port, we need not hold the cache lock.
+ *
+ * Since we have the only object reference, we
+ * need not be worried about collapse operations.
+ *
+ */
+
+ new_memory_object = ipc_port_alloc_kernel();
+ if (new_memory_object == IP_NULL) {
+ panic("vm_object_copy_call: allocate memory object port");
+ /* XXX Shouldn't panic here. */
+ }
+
+ /* we hold a naked receive right for new_memory_object */
+ (void) ipc_port_make_send(new_memory_object);
+ /* now we also hold a naked send right for new_memory_object */
+
+ /*
+ * Let the memory manager know that a copy operation
+ * is in progress. Note that we're using the old
+ * memory object's ports (for which we're holding
+ * a paging reference)... the memory manager cannot
+ * yet affect the new memory object.
+ */
+
+ (void) memory_object_copy(src_object->pager,
+ src_object->pager_request,
+ src_offset, size,
+ new_memory_object);
+ /* no longer hold the naked receive right for new_memory_object */
+
+ vm_object_lock(src_object);
+ vm_object_paging_end(src_object);
+
+ /*
+ * Remove write access from all of the pages of
+ * the old memory object that we can.
+ */
+
+ queue_iterate(&src_object->memq, p, vm_page_t, listq) {
+ if (!p->fictitious &&
+ (src_offset <= p->offset) &&
+ (p->offset < src_end) &&
+ !(p->page_lock & VM_PROT_WRITE)) {
+ p->page_lock |= VM_PROT_WRITE;
+ pmap_page_protect(p->phys_addr, VM_PROT_ALL & ~p->page_lock);
+ }
+ }
+
+ vm_object_unlock(src_object);
+
+ /*
+ * Initialize the rest of the paging stuff
+ */
+
+ new_object = vm_object_enter(new_memory_object, size, FALSE);
+ new_object->shadow = src_object;
+ new_object->shadow_offset = src_offset;
+
+ /*
+ * Drop the reference for new_memory_object taken above.
+ */
+
+ ipc_port_release_send(new_memory_object);
+ /* no longer hold the naked send right for new_memory_object */
+
+ *_result_object = new_object;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: vm_object_copy_delayed [internal]
+ *
+ * Description:
+ * Copy the specified virtual memory object, using
+ * the asymmetric copy-on-write algorithm.
+ *
+ * In/out conditions:
+ * The object must be unlocked on entry.
+ *
+ * This routine will not block waiting for user-generated
+ * events. It is not interruptible.
+ */
+vm_object_t vm_object_copy_delayed(
+ vm_object_t src_object)
+{
+ vm_object_t new_copy;
+ vm_object_t old_copy;
+ vm_page_t p;
+
+ /*
+ * The user-level memory manager wants to see
+ * all of the changes to this object, but it
+ * has promised not to make any changes on its own.
+ *
+ * Perform an asymmetric copy-on-write, as follows:
+ * Create a new object, called a "copy object"
+ * to hold pages modified by the new mapping
+ * (i.e., the copy, not the original mapping).
+ * Record the original object as the backing
+ * object for the copy object. If the
+ * original mapping does not change a page,
+ * it may be used read-only by the copy.
+ * Record the copy object in the original
+ * object. When the original mapping causes
+ * a page to be modified, it must be copied
+ * to a new page that is "pushed" to the
+ * copy object.
+ * Mark the new mapping (the copy object)
+ * copy-on-write. This makes the copy
+ * object itself read-only, allowing it
+ * to be reused if the original mapping
+ * makes no changes, and simplifying the
+ * synchronization required in the "push"
+ * operation described above.
+ *
+ * The copy-on-write is said to be assymetric because
+ * the original object is *not* marked copy-on-write.
+ * A copied page is pushed to the copy object, regardless
+ * which party attempted to modify the page.
+ *
+ * Repeated asymmetric copy operations may be done.
+ * If the original object has not been changed since
+ * the last copy, its copy object can be reused.
+ * Otherwise, a new copy object can be inserted
+ * between the original object and its previous
+ * copy object. Since any copy object is read-only,
+ * this cannot affect the contents of the previous copy
+ * object.
+ *
+ * Note that a copy object is higher in the object
+ * tree than the original object; therefore, use of
+ * the copy object recorded in the original object
+ * must be done carefully, to avoid deadlock.
+ */
+
+ /*
+ * Allocate a new copy object before locking, even
+ * though we may not need it later.
+ */
+
+ new_copy = vm_object_allocate(src_object->size);
+
+ vm_object_lock(src_object);
+
+ /*
+ * See whether we can reuse the result of a previous
+ * copy operation.
+ */
+ Retry:
+ old_copy = src_object->copy;
+ if (old_copy != VM_OBJECT_NULL) {
+ /*
+ * Try to get the locks (out of order)
+ */
+ if (!vm_object_lock_try(old_copy)) {
+ vm_object_unlock(src_object);
+
+ simple_lock_pause(); /* wait a bit */
+
+ vm_object_lock(src_object);
+ goto Retry;
+ }
+
+ /*
+ * Determine whether the old copy object has
+ * been modified.
+ */
+
+ if (old_copy->resident_page_count == 0 &&
+ !old_copy->pager_created) {
+ /*
+ * It has not been modified.
+ *
+ * Return another reference to
+ * the existing copy-object.
+ */
+ assert(old_copy->ref_count > 0);
+ old_copy->ref_count++;
+ vm_object_unlock(old_copy);
+ vm_object_unlock(src_object);
+
+ vm_object_deallocate(new_copy);
+
+ return old_copy;
+ }
+
+ /*
+ * The copy-object is always made large enough to
+ * completely shadow the original object, since
+ * it may have several users who want to shadow
+ * the original object at different points.
+ */
+
+ assert((old_copy->shadow == src_object) &&
+ (old_copy->shadow_offset == (vm_offset_t) 0));
+
+ /*
+ * Make the old copy-object shadow the new one.
+ * It will receive no more pages from the original
+ * object.
+ */
+
+ src_object->ref_count--; /* remove ref. from old_copy */
+ assert(src_object->ref_count > 0);
+ old_copy->shadow = new_copy;
+ assert(new_copy->ref_count > 0);
+ new_copy->ref_count++;
+ vm_object_unlock(old_copy); /* done with old_copy */
+ }
+
+ /*
+ * Point the new copy at the existing object.
+ */
+
+ new_copy->shadow = src_object;
+ new_copy->shadow_offset = 0;
+ new_copy->shadowed = TRUE; /* caller must set needs_copy */
+ assert(src_object->ref_count > 0);
+ src_object->ref_count++;
+ src_object->copy = new_copy;
+
+ /*
+ * Mark all pages of the existing object copy-on-write.
+ * This object may have a shadow chain below it, but
+ * those pages will already be marked copy-on-write.
+ */
+
+ queue_iterate(&src_object->memq, p, vm_page_t, listq) {
+ if (!p->fictitious)
+ pmap_page_protect(p->phys_addr,
+ (VM_PROT_ALL & ~VM_PROT_WRITE &
+ ~p->page_lock));
+ }
+
+ vm_object_unlock(src_object);
+
+ return new_copy;
+}
+
+/*
+ * Routine: vm_object_copy_strategically
+ *
+ * Purpose:
+ * Perform a copy according to the source object's
+ * declared strategy. This operation may block,
+ * and may be interrupted.
+ */
+kern_return_t vm_object_copy_strategically(
+ register
+ vm_object_t src_object,
+ vm_offset_t src_offset,
+ vm_size_t size,
+ vm_object_t *dst_object, /* OUT */
+ vm_offset_t *dst_offset, /* OUT */
+ boolean_t *dst_needs_copy) /* OUT */
+{
+ kern_return_t result = KERN_SUCCESS; /* to quiet gcc warnings */
+ boolean_t interruptible = TRUE; /* XXX */
+
+ assert(src_object != VM_OBJECT_NULL);
+
+ vm_object_lock(src_object);
+
+ /* XXX assert(!src_object->temporary); JSB FIXME */
+
+ /*
+ * The copy strategy is only valid if the memory manager
+ * is "ready".
+ */
+
+ while (!src_object->pager_ready) {
+ vm_object_wait( src_object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ interruptible);
+ if (interruptible &&
+ (current_thread()->wait_result != THREAD_AWAKENED)) {
+ *dst_object = VM_OBJECT_NULL;
+ *dst_offset = 0;
+ *dst_needs_copy = FALSE;
+ return MACH_SEND_INTERRUPTED;
+ }
+ vm_object_lock(src_object);
+ }
+
+ /*
+ * The object may be temporary (even though it is external).
+ * If so, do a symmetric copy.
+ */
+
+ if (src_object->temporary) {
+ /*
+ * XXX
+ * This does not count as intelligent!
+ * This buys us the object->temporary optimizations,
+ * but we aren't using a symmetric copy,
+ * which may confuse the vm code. The correct thing
+ * to do here is to figure out what to call to get
+ * a temporary shadowing set up.
+ */
+ src_object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+ }
+
+ /*
+ * The object is permanent. Use the appropriate copy strategy.
+ */
+
+ switch (src_object->copy_strategy) {
+ case MEMORY_OBJECT_COPY_NONE:
+ if ((result = vm_object_copy_slowly(
+ src_object,
+ src_offset,
+ size,
+ interruptible,
+ dst_object))
+ == KERN_SUCCESS) {
+ *dst_offset = 0;
+ *dst_needs_copy = FALSE;
+ }
+ break;
+
+ case MEMORY_OBJECT_COPY_CALL:
+ if ((result = vm_object_copy_call(
+ src_object,
+ src_offset,
+ size,
+ dst_object))
+ == KERN_SUCCESS) {
+ *dst_offset = 0;
+ *dst_needs_copy = FALSE;
+ }
+ break;
+
+ case MEMORY_OBJECT_COPY_DELAY:
+ vm_object_unlock(src_object);
+ *dst_object = vm_object_copy_delayed(src_object);
+ *dst_offset = src_offset;
+ *dst_needs_copy = TRUE;
+
+ result = KERN_SUCCESS;
+ break;
+ }
+
+ return result;
+}
+
+/*
+ * vm_object_shadow:
+ *
+ * Create a new object which is backed by the
+ * specified existing object range. The source
+ * object reference is deallocated.
+ *
+ * The new object and offset into that object
+ * are returned in the source parameters.
+ */
+
+void vm_object_shadow(
+ vm_object_t *object, /* IN/OUT */
+ vm_offset_t *offset, /* IN/OUT */
+ vm_size_t length)
+{
+ register vm_object_t source;
+ register vm_object_t result;
+
+ source = *object;
+
+ /*
+ * Allocate a new object with the given length
+ */
+
+ if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL)
+ panic("vm_object_shadow: no object for shadowing");
+
+ /*
+ * The new object shadows the source object, adding
+ * a reference to it. Our caller changes his reference
+ * to point to the new object, removing a reference to
+ * the source object. Net result: no change of reference
+ * count.
+ */
+ result->shadow = source;
+
+ /*
+ * Store the offset into the source object,
+ * and fix up the offset into the new object.
+ */
+
+ result->shadow_offset = *offset;
+
+ /*
+ * Return the new things
+ */
+
+ *offset = 0;
+ *object = result;
+}
+
+/*
+ * The relationship between vm_object structures and
+ * the memory_object ports requires careful synchronization.
+ *
+ * All associations are created by vm_object_enter. All three
+ * port fields are filled in, as follows:
+ * pager: the memory_object port itself, supplied by
+ * the user requesting a mapping (or the kernel,
+ * when initializing internal objects); the
+ * kernel simulates holding send rights by keeping
+ * a port reference;
+ * pager_request:
+ * pager_name:
+ * the memory object control and name ports,
+ * created by the kernel; the kernel holds
+ * receive (and ownership) rights to these
+ * ports, but no other references.
+ * All of the ports are referenced by their global names.
+ *
+ * When initialization is complete, the "initialized" field
+ * is asserted. Other mappings using a particular memory object,
+ * and any references to the vm_object gained through the
+ * port association must wait for this initialization to occur.
+ *
+ * In order to allow the memory manager to set attributes before
+ * requests (notably virtual copy operations, but also data or
+ * unlock requests) are made, a "ready" attribute is made available.
+ * Only the memory manager may affect the value of this attribute.
+ * Its value does not affect critical kernel functions, such as
+ * internal object initialization or destruction. [Furthermore,
+ * memory objects created by the kernel are assumed to be ready
+ * immediately; the default memory manager need not explicitly
+ * set the "ready" attribute.]
+ *
+ * [Both the "initialized" and "ready" attribute wait conditions
+ * use the "pager" field as the wait event.]
+ *
+ * The port associations can be broken down by any of the
+ * following routines:
+ * vm_object_terminate:
+ * No references to the vm_object remain, and
+ * the object cannot (or will not) be cached.
+ * This is the normal case, and is done even
+ * though one of the other cases has already been
+ * done.
+ * vm_object_destroy:
+ * The memory_object port has been destroyed,
+ * meaning that the kernel cannot flush dirty
+ * pages or request new data or unlock existing
+ * data.
+ * memory_object_destroy:
+ * The memory manager has requested that the
+ * kernel relinquish rights to the memory object
+ * port. [The memory manager may not want to
+ * destroy the port, but may wish to refuse or
+ * tear down existing memory mappings.]
+ * Each routine that breaks an association must break all of
+ * them at once. At some later time, that routine must clear
+ * the vm_object port fields and release the port rights.
+ * [Furthermore, each routine must cope with the simultaneous
+ * or previous operations of the others.]
+ *
+ * In addition to the lock on the object, the vm_object_cache_lock
+ * governs the port associations. References gained through the
+ * port association require use of the cache lock.
+ *
+ * Because the port fields may be cleared spontaneously, they
+ * cannot be used to determine whether a memory object has
+ * ever been associated with a particular vm_object. [This
+ * knowledge is important to the shadow object mechanism.]
+ * For this reason, an additional "created" attribute is
+ * provided.
+ *
+ * During various paging operations, the port values found in the
+ * vm_object must be valid. To prevent these port rights from being
+ * released, and to prevent the port associations from changing
+ * (other than being removed, i.e., made null), routines may use
+ * the vm_object_paging_begin/end routines [actually, macros].
+ * The implementation uses the "paging_in_progress" and "wanted" fields.
+ * [Operations that alter the validity of the port values include the
+ * termination routines and vm_object_collapse.]
+ */
+
+vm_object_t vm_object_lookup(
+ ipc_port_t port)
+{
+ vm_object_t object = VM_OBJECT_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+#if NORMA_VM
+ (ip_kotype(port) == IKOT_PAGER)) {
+#else /* NORMA_VM */
+ (ip_kotype(port) == IKOT_PAGING_REQUEST)) {
+#endif /* NORMA_VM */
+ vm_object_cache_lock();
+ object = (vm_object_t) port->ip_kobject;
+ vm_object_lock(object);
+
+ assert(object->alive);
+
+ if (object->ref_count == 0) {
+ queue_remove(&vm_object_cached_list, object,
+ vm_object_t, cached_list);
+ vm_object_cached_count--;
+ }
+
+ object->ref_count++;
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ }
+ ip_unlock(port);
+ }
+
+ return object;
+}
+
+vm_object_t vm_object_lookup_name(
+ ipc_port_t port)
+{
+ vm_object_t object = VM_OBJECT_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port) &&
+ (ip_kotype(port) == IKOT_PAGING_NAME)) {
+ vm_object_cache_lock();
+ object = (vm_object_t) port->ip_kobject;
+ vm_object_lock(object);
+
+ assert(object->alive);
+
+ if (object->ref_count == 0) {
+ queue_remove(&vm_object_cached_list, object,
+ vm_object_t, cached_list);
+ vm_object_cached_count--;
+ }
+
+ object->ref_count++;
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ }
+ ip_unlock(port);
+ }
+
+ return object;
+}
+
+void vm_object_destroy(
+ ipc_port_t pager)
+{
+ vm_object_t object;
+ pager_request_t old_request;
+ ipc_port_t old_name;
+
+ /*
+ * Perform essentially the same operations as in vm_object_lookup,
+ * except that this time we look up based on the memory_object
+ * port, not the control port.
+ */
+ vm_object_cache_lock();
+ if (ip_kotype(pager) != IKOT_PAGER) {
+ vm_object_cache_unlock();
+ return;
+ }
+
+ object = (vm_object_t) pager->ip_kobject;
+ vm_object_lock(object);
+ if (object->ref_count == 0) {
+ queue_remove(&vm_object_cached_list, object,
+ vm_object_t, cached_list);
+ vm_object_cached_count--;
+ }
+ object->ref_count++;
+
+ object->can_persist = FALSE;
+
+ assert(object->pager == pager);
+
+ /*
+ * Remove the port associations.
+ *
+ * Note that the memory_object itself is dead, so
+ * we don't bother with it.
+ */
+
+ object->pager = IP_NULL;
+ vm_object_remove(object);
+
+ old_request = object->pager_request;
+ object->pager_request = PAGER_REQUEST_NULL;
+
+ old_name = object->pager_name;
+ object->pager_name = IP_NULL;
+
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+
+ /*
+ * Clean up the port references. Note that there's no
+ * point in trying the memory_object_terminate call
+ * because the memory_object itself is dead.
+ */
+
+ ipc_port_release_send(pager);
+#if !NORMA_VM
+ if (old_request != IP_NULL)
+ ipc_port_dealloc_kernel(old_request);
+#endif /* !NORMA_VM */
+ if (old_name != IP_NULL)
+#if NORMA_VM
+ ipc_port_release_send(old_name);
+#else /* NORMA_VM */
+ ipc_port_dealloc_kernel(old_name);
+#endif /* NORMA_VM */
+
+ /*
+ * Restart pending page requests
+ */
+
+ vm_object_abort_activity(object);
+
+ /*
+ * Lose the object reference.
+ */
+
+ vm_object_deallocate(object);
+}
+
+boolean_t vm_object_accept_old_init_protocol = FALSE;
+
+/*
+ * Routine: vm_object_enter
+ * Purpose:
+ * Find a VM object corresponding to the given
+ * pager; if no such object exists, create one,
+ * and initialize the pager.
+ */
+vm_object_t vm_object_enter(
+ ipc_port_t pager,
+ vm_size_t size,
+ boolean_t internal)
+{
+ register
+ vm_object_t object;
+ vm_object_t new_object;
+ boolean_t must_init;
+ ipc_kobject_type_t po;
+
+restart:
+ if (!IP_VALID(pager))
+ return vm_object_allocate(size);
+
+ new_object = VM_OBJECT_NULL;
+ must_init = FALSE;
+
+ /*
+ * Look for an object associated with this port.
+ */
+
+ vm_object_cache_lock();
+ for (;;) {
+ po = ip_kotype(pager);
+
+ /*
+ * If a previous object is being terminated,
+ * we must wait for the termination message
+ * to be queued.
+ *
+ * We set kobject to a non-null value to let the
+ * terminator know that someone is waiting.
+ * Among the possibilities is that the port
+ * could die while we're waiting. Must restart
+ * instead of continuing the loop.
+ */
+
+ if (po == IKOT_PAGER_TERMINATING) {
+ pager->ip_kobject = (ipc_kobject_t) pager;
+ assert_wait((event_t) pager, FALSE);
+ vm_object_cache_unlock();
+ thread_block((void (*)()) 0);
+ goto restart;
+ }
+
+ /*
+ * Bail if there is already a kobject associated
+ * with the pager port.
+ */
+ if (po != IKOT_NONE) {
+ break;
+ }
+
+ /*
+ * We must unlock to create a new object;
+ * if we do so, we must try the lookup again.
+ */
+
+ if (new_object == VM_OBJECT_NULL) {
+ vm_object_cache_unlock();
+ new_object = vm_object_allocate(size);
+ vm_object_cache_lock();
+ } else {
+ /*
+ * Lookup failed twice, and we have something
+ * to insert; set the object.
+ */
+
+ ipc_kobject_set(pager,
+ (ipc_kobject_t) new_object,
+ IKOT_PAGER);
+ new_object = VM_OBJECT_NULL;
+ must_init = TRUE;
+ }
+ }
+
+ if (internal)
+ must_init = TRUE;
+
+ /*
+ * It's only good if it's a VM object!
+ */
+
+ object = (po == IKOT_PAGER) ? (vm_object_t) pager->ip_kobject
+ : VM_OBJECT_NULL;
+
+ if ((object != VM_OBJECT_NULL) && !must_init) {
+ vm_object_lock(object);
+ if (object->ref_count == 0) {
+ queue_remove(&vm_object_cached_list, object,
+ vm_object_t, cached_list);
+ vm_object_cached_count--;
+ }
+ object->ref_count++;
+ vm_object_unlock(object);
+
+ vm_stat.hits++;
+ }
+ assert((object == VM_OBJECT_NULL) || (object->ref_count > 0) ||
+ ((object->paging_in_progress != 0) && internal));
+
+ vm_stat.lookups++;
+
+ vm_object_cache_unlock();
+
+ /*
+ * If we raced to create a vm_object but lost, let's
+ * throw away ours.
+ */
+
+ if (new_object != VM_OBJECT_NULL)
+ vm_object_deallocate(new_object);
+
+ if (object == VM_OBJECT_NULL)
+ return(object);
+
+ if (must_init) {
+ /*
+ * Copy the naked send right we were given.
+ */
+
+ pager = ipc_port_copy_send(pager);
+ if (!IP_VALID(pager))
+ panic("vm_object_enter: port died"); /* XXX */
+
+ object->pager_created = TRUE;
+ object->pager = pager;
+
+#if NORMA_VM
+
+ /*
+ * Let the xmm system know that we want to use the pager.
+ *
+ * Name port will be provided by the xmm system
+ * when set_attributes_common is called.
+ */
+
+ object->internal = internal;
+ object->pager_ready = internal;
+ if (internal) {
+ assert(object->temporary);
+ } else {
+ object->temporary = FALSE;
+ }
+ object->pager_name = IP_NULL;
+
+ (void) xmm_memory_object_init(object);
+#else /* NORMA_VM */
+
+ /*
+ * Allocate request port.
+ */
+
+ object->pager_request = ipc_port_alloc_kernel();
+ if (object->pager_request == IP_NULL)
+ panic("vm_object_enter: pager request alloc");
+
+ ipc_kobject_set(object->pager_request,
+ (ipc_kobject_t) object,
+ IKOT_PAGING_REQUEST);
+
+ /*
+ * Let the pager know we're using it.
+ */
+
+ if (internal) {
+ /* acquire a naked send right for the DMM */
+ ipc_port_t DMM = memory_manager_default_reference();
+
+ /* mark the object internal */
+ object->internal = TRUE;
+ assert(object->temporary);
+
+ /* default-pager objects are ready immediately */
+ object->pager_ready = TRUE;
+
+ /* consumes the naked send right for DMM */
+ (void) memory_object_create(DMM,
+ pager,
+ object->size,
+ object->pager_request,
+ object->pager_name,
+ PAGE_SIZE);
+ } else {
+ /* the object is external and not temporary */
+ object->internal = FALSE;
+ object->temporary = FALSE;
+
+ /* user pager objects are not ready until marked so */
+ object->pager_ready = FALSE;
+
+ (void) memory_object_init(pager,
+ object->pager_request,
+ object->pager_name,
+ PAGE_SIZE);
+
+ }
+#endif /* NORMA_VM */
+
+ vm_object_lock(object);
+ object->pager_initialized = TRUE;
+
+ if (vm_object_accept_old_init_protocol)
+ object->pager_ready = TRUE;
+
+ vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
+ } else {
+ vm_object_lock(object);
+ }
+ /*
+ * [At this point, the object must be locked]
+ */
+
+ /*
+ * Wait for the work above to be done by the first
+ * thread to map this object.
+ */
+
+ while (!object->pager_initialized) {
+ vm_object_wait( object,
+ VM_OBJECT_EVENT_INITIALIZED,
+ FALSE);
+ vm_object_lock(object);
+ }
+ vm_object_unlock(object);
+
+ return object;
+}
+
+/*
+ * Routine: vm_object_pager_create
+ * Purpose:
+ * Create a memory object for an internal object.
+ * In/out conditions:
+ * The object is locked on entry and exit;
+ * it may be unlocked within this call.
+ * Limitations:
+ * Only one thread may be performing a
+ * vm_object_pager_create on an object at
+ * a time. Presumably, only the pageout
+ * daemon will be using this routine.
+ */
+void vm_object_pager_create(
+ register
+ vm_object_t object)
+{
+ ipc_port_t pager;
+
+ if (object->pager_created) {
+ /*
+ * Someone else got to it first...
+ * wait for them to finish initializing
+ */
+
+ while (!object->pager_initialized) {
+ vm_object_wait( object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ FALSE);
+ vm_object_lock(object);
+ }
+ return;
+ }
+
+ /*
+ * Indicate that a memory object has been assigned
+ * before dropping the lock, to prevent a race.
+ */
+
+ object->pager_created = TRUE;
+
+ /*
+ * Prevent collapse or termination by
+ * holding a paging reference
+ */
+
+ vm_object_paging_begin(object);
+ vm_object_unlock(object);
+
+#if MACH_PAGEMAP
+ object->existence_info = vm_external_create(
+ object->size +
+ object->paging_offset);
+ assert((object->size + object->paging_offset) >=
+ object->size);
+#endif /* MACH_PAGEMAP */
+
+ /*
+ * Create the pager, and associate with it
+ * this object.
+ *
+ * Note that we only make the port association
+ * so that vm_object_enter can properly look up
+ * the object to complete the initialization...
+ * we do not expect any user to ever map this
+ * object.
+ *
+ * Since the kernel has the only rights to the
+ * port, it's safe to install the association
+ * without holding the cache lock.
+ */
+
+ pager = ipc_port_alloc_kernel();
+ if (pager == IP_NULL)
+ panic("vm_object_pager_create: allocate pager port");
+
+ (void) ipc_port_make_send(pager);
+ ipc_kobject_set(pager, (ipc_kobject_t) object, IKOT_PAGER);
+
+ /*
+ * Initialize the rest of the paging stuff
+ */
+
+ if (vm_object_enter(pager, object->size, TRUE) != object)
+ panic("vm_object_pager_create: mismatch");
+
+ /*
+ * Drop the naked send right taken above.
+ */
+
+ ipc_port_release_send(pager);
+
+ /*
+ * Release the paging reference
+ */
+
+ vm_object_lock(object);
+ vm_object_paging_end(object);
+}
+
+/*
+ * Routine: vm_object_remove
+ * Purpose:
+ * Eliminate the pager/object association
+ * for this pager.
+ * Conditions:
+ * The object cache must be locked.
+ */
+void vm_object_remove(
+ vm_object_t object)
+{
+ ipc_port_t port;
+
+ if ((port = object->pager) != IP_NULL) {
+ if (ip_kotype(port) == IKOT_PAGER)
+ ipc_kobject_set(port, IKO_NULL,
+ IKOT_PAGER_TERMINATING);
+ else if (ip_kotype(port) != IKOT_NONE)
+ panic("vm_object_remove: bad object port");
+ }
+#if !NORMA_VM
+ if ((port = object->pager_request) != IP_NULL) {
+ if (ip_kotype(port) == IKOT_PAGING_REQUEST)
+ ipc_kobject_set(port, IKO_NULL, IKOT_NONE);
+ else if (ip_kotype(port) != IKOT_NONE)
+ panic("vm_object_remove: bad request port");
+ }
+ if ((port = object->pager_name) != IP_NULL) {
+ if (ip_kotype(port) == IKOT_PAGING_NAME)
+ ipc_kobject_set(port, IKO_NULL, IKOT_NONE);
+ else if (ip_kotype(port) != IKOT_NONE)
+ panic("vm_object_remove: bad name port");
+ }
+#endif /* !NORMA_VM */
+}
+
+/*
+ * Global variables for vm_object_collapse():
+ *
+ * Counts for normal collapses and bypasses.
+ * Debugging variables, to watch or disable collapse.
+ */
+long object_collapses = 0;
+long object_bypasses = 0;
+
+int vm_object_collapse_debug = 0;
+boolean_t vm_object_collapse_allowed = TRUE;
+boolean_t vm_object_collapse_bypass_allowed = TRUE;
+
+/*
+ * vm_object_collapse:
+ *
+ * Collapse an object with the object backing it.
+ * Pages in the backing object are moved into the
+ * parent, and the backing object is deallocated.
+ *
+ * Requires that the object be locked and the page
+ * queues be unlocked. May unlock/relock the object,
+ * so the caller should hold a reference for the object.
+ */
+void vm_object_collapse(
+ register vm_object_t object)
+{
+ register vm_object_t backing_object;
+ register vm_offset_t backing_offset;
+ register vm_size_t size;
+ register vm_offset_t new_offset;
+ register vm_page_t p, pp;
+ ipc_port_t old_name_port;
+
+ if (!vm_object_collapse_allowed)
+ return;
+
+ while (TRUE) {
+ /*
+ * Verify that the conditions are right for collapse:
+ *
+ * The object exists and no pages in it are currently
+ * being paged out (or have ever been paged out).
+ *
+ * This check is probably overkill -- if a memory
+ * object has not been created, the fault handler
+ * shouldn't release the object lock while paging
+ * is in progress or absent pages exist.
+ */
+ if (object == VM_OBJECT_NULL ||
+ object->pager_created ||
+ object->paging_in_progress != 0 ||
+ object->absent_count != 0)
+ return;
+
+ /*
+ * There is a backing object, and
+ */
+
+ if ((backing_object = object->shadow) == VM_OBJECT_NULL)
+ return;
+
+ vm_object_lock(backing_object);
+ /*
+ * ...
+ * The backing object is not read_only,
+ * and no pages in the backing object are
+ * currently being paged out.
+ * The backing object is internal.
+ *
+ * XXX It may be sufficient for the backing
+ * XXX object to be temporary.
+ */
+
+ if (!backing_object->internal ||
+ backing_object->paging_in_progress != 0) {
+ vm_object_unlock(backing_object);
+ return;
+ }
+
+ /*
+ * The backing object can't be a copy-object:
+ * the shadow_offset for the copy-object must stay
+ * as 0. Furthermore (for the 'we have all the
+ * pages' case), if we bypass backing_object and
+ * just shadow the next object in the chain, old
+ * pages from that object would then have to be copied
+ * BOTH into the (former) backing_object and into the
+ * parent object.
+ */
+ if (backing_object->shadow != VM_OBJECT_NULL &&
+ backing_object->shadow->copy != VM_OBJECT_NULL) {
+ vm_object_unlock(backing_object);
+ return;
+ }
+
+ /*
+ * We know that we can either collapse the backing
+ * object (if the parent is the only reference to
+ * it) or (perhaps) remove the parent's reference
+ * to it.
+ */
+
+ backing_offset = object->shadow_offset;
+ size = object->size;
+
+ /*
+ * If there is exactly one reference to the backing
+ * object, we can collapse it into the parent.
+ */
+
+ if (backing_object->ref_count == 1) {
+ if (!vm_object_cache_lock_try()) {
+ vm_object_unlock(backing_object);
+ return;
+ }
+
+ /*
+ * We can collapse the backing object.
+ *
+ * Move all in-memory pages from backing_object
+ * to the parent. Pages that have been paged out
+ * will be overwritten by any of the parent's
+ * pages that shadow them.
+ */
+
+ while (!queue_empty(&backing_object->memq)) {
+
+ p = (vm_page_t)
+ queue_first(&backing_object->memq);
+
+ new_offset = (p->offset - backing_offset);
+
+ assert(!p->busy || p->absent);
+
+ /*
+ * If the parent has a page here, or if
+ * this page falls outside the parent,
+ * dispose of it.
+ *
+ * Otherwise, move it as planned.
+ */
+
+ if (p->offset < backing_offset ||
+ new_offset >= size) {
+ vm_page_lock_queues();
+ vm_page_free(p);
+ vm_page_unlock_queues();
+ } else {
+ pp = vm_page_lookup(object, new_offset);
+ if (pp != VM_PAGE_NULL && !pp->absent) {
+ /*
+ * Parent object has a real page.
+ * Throw away the backing object's
+ * page.
+ */
+ vm_page_lock_queues();
+ vm_page_free(p);
+ vm_page_unlock_queues();
+ }
+ else {
+ if (pp != VM_PAGE_NULL) {
+ /*
+ * Parent has an absent page...
+ * it's not being paged in, so
+ * it must really be missing from
+ * the parent.
+ *
+ * Throw out the absent page...
+ * any faults looking for that
+ * page will restart with the new
+ * one.
+ */
+
+ /*
+ * This should never happen -- the
+ * parent cannot have ever had an
+ * external memory object, and thus
+ * cannot have absent pages.
+ */
+ panic("vm_object_collapse: bad case");
+
+ vm_page_lock_queues();
+ vm_page_free(pp);
+ vm_page_unlock_queues();
+
+ /*
+ * Fall through to move the backing
+ * object's page up.
+ */
+ }
+ /*
+ * Parent now has no page.
+ * Move the backing object's page up.
+ */
+ vm_page_rename(p, object, new_offset);
+ }
+ }
+ }
+
+ /*
+ * Move the pager from backing_object to object.
+ *
+ * XXX We're only using part of the paging space
+ * for keeps now... we ought to discard the
+ * unused portion.
+ */
+
+ switch (vm_object_collapse_debug) {
+ case 0:
+ break;
+ case 1:
+ if ((backing_object->pager == IP_NULL) &&
+ (backing_object->pager_request ==
+ PAGER_REQUEST_NULL))
+ break;
+ /* Fall through to... */
+
+ default:
+ printf("vm_object_collapse: %#x (pager %#x, request %#x) up to %#x\n",
+ backing_object, backing_object->pager, backing_object->pager_request,
+ object);
+ if (vm_object_collapse_debug > 2)
+ Debugger("vm_object_collapse");
+ }
+
+ object->pager = backing_object->pager;
+ if (object->pager != IP_NULL)
+ ipc_kobject_set(object->pager,
+ (ipc_kobject_t) object,
+ IKOT_PAGER);
+ object->pager_initialized = backing_object->pager_initialized;
+ object->pager_ready = backing_object->pager_ready;
+ object->pager_created = backing_object->pager_created;
+
+ object->pager_request = backing_object->pager_request;
+#if NORMA_VM
+ old_name_port = object->pager_name;
+ object->pager_name = backing_object->pager_name;
+#else /* NORMA_VM */
+ if (object->pager_request != IP_NULL)
+ ipc_kobject_set(object->pager_request,
+ (ipc_kobject_t) object,
+ IKOT_PAGING_REQUEST);
+ old_name_port = object->pager_name;
+ if (old_name_port != IP_NULL)
+ ipc_kobject_set(old_name_port,
+ IKO_NULL, IKOT_NONE);
+ object->pager_name = backing_object->pager_name;
+ if (object->pager_name != IP_NULL)
+ ipc_kobject_set(object->pager_name,
+ (ipc_kobject_t) object,
+ IKOT_PAGING_NAME);
+#endif /* NORMA_VM */
+
+ vm_object_cache_unlock();
+
+ /*
+ * If there is no pager, leave paging-offset alone.
+ */
+ if (object->pager != IP_NULL)
+ object->paging_offset =
+ backing_object->paging_offset +
+ backing_offset;
+
+#if MACH_PAGEMAP
+ assert(object->existence_info == VM_EXTERNAL_NULL);
+ object->existence_info = backing_object->existence_info;
+#endif /* MACH_PAGEMAP */
+
+ /*
+ * Object now shadows whatever backing_object did.
+ * Note that the reference to backing_object->shadow
+ * moves from within backing_object to within object.
+ */
+
+ object->shadow = backing_object->shadow;
+ object->shadow_offset += backing_object->shadow_offset;
+ if (object->shadow != VM_OBJECT_NULL &&
+ object->shadow->copy != VM_OBJECT_NULL) {
+ panic("vm_object_collapse: we collapsed a copy-object!");
+ }
+ /*
+ * Discard backing_object.
+ *
+ * Since the backing object has no pages, no
+ * pager left, and no object references within it,
+ * all that is necessary is to dispose of it.
+ */
+
+ assert(
+ (backing_object->ref_count == 1) &&
+ (backing_object->resident_page_count == 0) &&
+ (backing_object->paging_in_progress == 0)
+ );
+
+ assert(backing_object->alive);
+ backing_object->alive = FALSE;
+ vm_object_unlock(backing_object);
+
+ vm_object_unlock(object);
+ if (old_name_port != IP_NULL)
+#if NORMA_VM
+ ipc_port_release_send(old_name_port);
+#else /* NORMA_VM */
+ ipc_port_dealloc_kernel(old_name_port);
+#endif /* NORMA_VM */
+ zfree(vm_object_zone, (vm_offset_t) backing_object);
+ vm_object_lock(object);
+
+ object_collapses++;
+ }
+ else {
+ if (!vm_object_collapse_bypass_allowed) {
+ vm_object_unlock(backing_object);
+ return;
+ }
+
+ /*
+ * If all of the pages in the backing object are
+ * shadowed by the parent object, the parent
+ * object no longer has to shadow the backing
+ * object; it can shadow the next one in the
+ * chain.
+ *
+ * The backing object must not be paged out - we'd
+ * have to check all of the paged-out pages, as
+ * well.
+ */
+
+ if (backing_object->pager_created) {
+ vm_object_unlock(backing_object);
+ return;
+ }
+
+ /*
+ * Should have a check for a 'small' number
+ * of pages here.
+ */
+
+ queue_iterate(&backing_object->memq, p,
+ vm_page_t, listq)
+ {
+ new_offset = (p->offset - backing_offset);
+
+ /*
+ * If the parent has a page here, or if
+ * this page falls outside the parent,
+ * keep going.
+ *
+ * Otherwise, the backing_object must be
+ * left in the chain.
+ */
+
+ if (p->offset >= backing_offset &&
+ new_offset <= size &&
+ (pp = vm_page_lookup(object, new_offset))
+ == VM_PAGE_NULL) {
+ /*
+ * Page still needed.
+ * Can't go any further.
+ */
+ vm_object_unlock(backing_object);
+ return;
+ }
+ }
+
+ /*
+ * Make the parent shadow the next object
+ * in the chain. Deallocating backing_object
+ * will not remove it, since its reference
+ * count is at least 2.
+ */
+
+ vm_object_reference(object->shadow = backing_object->shadow);
+ object->shadow_offset += backing_object->shadow_offset;
+
+ /*
+ * Backing object might have had a copy pointer
+ * to us. If it did, clear it.
+ */
+ if (backing_object->copy == object)
+ backing_object->copy = VM_OBJECT_NULL;
+
+ /*
+ * Drop the reference count on backing_object.
+ * Since its ref_count was at least 2, it
+ * will not vanish; so we don't need to call
+ * vm_object_deallocate.
+ */
+ backing_object->ref_count--;
+ assert(backing_object->ref_count > 0);
+ vm_object_unlock(backing_object);
+
+ object_bypasses ++;
+
+ }
+
+ /*
+ * Try again with this object's new backing object.
+ */
+ }
+}
+
+/*
+ * Routine: vm_object_page_remove: [internal]
+ * Purpose:
+ * Removes all physical pages in the specified
+ * object range from the object's list of pages.
+ *
+ * In/out conditions:
+ * The object must be locked.
+ */
+unsigned int vm_object_page_remove_lookup = 0;
+unsigned int vm_object_page_remove_iterate = 0;
+
+void vm_object_page_remove(
+ register vm_object_t object,
+ register vm_offset_t start,
+ register vm_offset_t end)
+{
+ register vm_page_t p, next;
+
+ /*
+ * One and two page removals are most popular.
+ * The factor of 16 here is somewhat arbitrary.
+ * It balances vm_object_lookup vs iteration.
+ */
+
+ if (atop(end - start) < (unsigned)object->resident_page_count/16) {
+ vm_object_page_remove_lookup++;
+
+ for (; start < end; start += PAGE_SIZE) {
+ p = vm_page_lookup(object, start);
+ if (p != VM_PAGE_NULL) {
+ if (!p->fictitious)
+ pmap_page_protect(p->phys_addr,
+ VM_PROT_NONE);
+ vm_page_lock_queues();
+ vm_page_free(p);
+ vm_page_unlock_queues();
+ }
+ }
+ } else {
+ vm_object_page_remove_iterate++;
+
+ p = (vm_page_t) queue_first(&object->memq);
+ while (!queue_end(&object->memq, (queue_entry_t) p)) {
+ next = (vm_page_t) queue_next(&p->listq);
+ if ((start <= p->offset) && (p->offset < end)) {
+ if (!p->fictitious)
+ pmap_page_protect(p->phys_addr,
+ VM_PROT_NONE);
+ vm_page_lock_queues();
+ vm_page_free(p);
+ vm_page_unlock_queues();
+ }
+ p = next;
+ }
+ }
+}
+
+/*
+ * Routine: vm_object_coalesce
+ * Function: Coalesces two objects backing up adjoining
+ * regions of memory into a single object.
+ *
+ * returns TRUE if objects were combined.
+ *
+ * NOTE: Only works at the moment if the second object is NULL -
+ * if it's not, which object do we lock first?
+ *
+ * Parameters:
+ * prev_object First object to coalesce
+ * prev_offset Offset into prev_object
+ * next_object Second object into coalesce
+ * next_offset Offset into next_object
+ *
+ * prev_size Size of reference to prev_object
+ * next_size Size of reference to next_object
+ *
+ * Conditions:
+ * The object must *not* be locked.
+ */
+
+boolean_t vm_object_coalesce(
+ register vm_object_t prev_object,
+ vm_object_t next_object,
+ vm_offset_t prev_offset,
+ vm_offset_t next_offset,
+ vm_size_t prev_size,
+ vm_size_t next_size)
+{
+ vm_size_t newsize;
+
+#ifdef lint
+ next_offset++;
+#endif /* lint */
+
+ if (next_object != VM_OBJECT_NULL) {
+ return FALSE;
+ }
+
+ if (prev_object == VM_OBJECT_NULL) {
+ return TRUE;
+ }
+
+ vm_object_lock(prev_object);
+
+ /*
+ * Try to collapse the object first
+ */
+ vm_object_collapse(prev_object);
+
+ /*
+ * Can't coalesce if pages not mapped to
+ * prev_entry may be in use anyway:
+ * . more than one reference
+ * . paged out
+ * . shadows another object
+ * . has a copy elsewhere
+ * . paging references (pages might be in page-list)
+ */
+
+ if ((prev_object->ref_count > 1) ||
+ prev_object->pager_created ||
+ (prev_object->shadow != VM_OBJECT_NULL) ||
+ (prev_object->copy != VM_OBJECT_NULL) ||
+ (prev_object->paging_in_progress != 0)) {
+ vm_object_unlock(prev_object);
+ return FALSE;
+ }
+
+ /*
+ * Remove any pages that may still be in the object from
+ * a previous deallocation.
+ */
+
+ vm_object_page_remove(prev_object,
+ prev_offset + prev_size,
+ prev_offset + prev_size + next_size);
+
+ /*
+ * Extend the object if necessary.
+ */
+ newsize = prev_offset + prev_size + next_size;
+ if (newsize > prev_object->size)
+ prev_object->size = newsize;
+
+ vm_object_unlock(prev_object);
+ return TRUE;
+}
+
+vm_object_t vm_object_request_object(
+ ipc_port_t p)
+{
+ return vm_object_lookup(p);
+}
+
+/*
+ * Routine: vm_object_name
+ * Purpose:
+ * Returns a naked send right to the "name" port associated
+ * with this object.
+ */
+ipc_port_t vm_object_name(
+ vm_object_t object)
+{
+ ipc_port_t p;
+
+ if (object == VM_OBJECT_NULL)
+ return IP_NULL;
+
+ vm_object_lock(object);
+
+ while (object->shadow != VM_OBJECT_NULL) {
+ vm_object_t new_object = object->shadow;
+ vm_object_lock(new_object);
+ vm_object_unlock(object);
+ object = new_object;
+ }
+
+ p = object->pager_name;
+ if (p != IP_NULL)
+#if NORMA_VM
+ p = ipc_port_copy_send(p);
+#else /* NORMA_VM */
+ p = ipc_port_make_send(p);
+#endif /* NORMA_VM */
+ vm_object_unlock(object);
+
+ return p;
+}
+
+/*
+ * Attach a set of physical pages to an object, so that they can
+ * be mapped by mapping the object. Typically used to map IO memory.
+ *
+ * The mapping function and its private data are used to obtain the
+ * physical addresses for each page to be mapped.
+ */
+void
+vm_object_page_map(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size,
+ vm_offset_t (*map_fn)(void *, vm_offset_t),
+ void * map_fn_data) /* private to map_fn */
+{
+ int num_pages;
+ int i;
+ vm_page_t m;
+ vm_page_t old_page;
+ vm_offset_t addr;
+
+ num_pages = atop(size);
+
+ for (i = 0; i < num_pages; i++, offset += PAGE_SIZE) {
+
+ addr = (*map_fn)(map_fn_data, offset);
+
+ while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL)
+ vm_page_more_fictitious();
+
+ vm_object_lock(object);
+ if ((old_page = vm_page_lookup(object, offset))
+ != VM_PAGE_NULL)
+ {
+ vm_page_lock_queues();
+ vm_page_free(old_page);
+ vm_page_unlock_queues();
+ }
+
+ vm_page_init(m, addr);
+ m->private = TRUE; /* don`t free page */
+ m->wire_count = 1;
+ vm_page_lock_queues();
+ vm_page_insert(m, object, offset);
+ vm_page_unlock_queues();
+
+ PAGE_WAKEUP_DONE(m);
+ vm_object_unlock(object);
+ }
+}
+
+#include <mach_kdb.h>
+
+
+#if MACH_KDB
+#define printf kdbprintf
+
+boolean_t vm_object_print_pages = FALSE;
+
+/*
+ * vm_object_print: [ debug ]
+ */
+void vm_object_print(
+ vm_object_t object)
+{
+ register vm_page_t p;
+ extern indent;
+
+ register int count;
+
+ if (object == VM_OBJECT_NULL)
+ return;
+
+ iprintf("Object 0x%X: size=0x%X",
+ (vm_offset_t) object, (vm_offset_t) object->size);
+ printf(", %d references, %d resident pages,", object->ref_count,
+ object->resident_page_count);
+ printf(" %d absent pages,", object->absent_count);
+ printf(" %d paging ops\n", object->paging_in_progress);
+ indent += 2;
+ iprintf("memory object=0x%X (offset=0x%X),",
+ (vm_offset_t) object->pager, (vm_offset_t) object->paging_offset);
+ printf("control=0x%X, name=0x%X\n",
+ (vm_offset_t) object->pager_request, (vm_offset_t) object->pager_name);
+ iprintf("%s%s",
+ object->pager_ready ? " ready" : "",
+ object->pager_created ? " created" : "");
+ printf("%s,%s ",
+ object->pager_initialized ? "" : "uninitialized",
+ object->temporary ? "temporary" : "permanent");
+ printf("%s%s,",
+ object->internal ? "internal" : "external",
+ object->can_persist ? " cacheable" : "");
+ printf("copy_strategy=%d\n", (vm_offset_t)object->copy_strategy);
+ iprintf("shadow=0x%X (offset=0x%X),",
+ (vm_offset_t) object->shadow, (vm_offset_t) object->shadow_offset);
+ printf("copy=0x%X\n", (vm_offset_t) object->copy);
+
+ indent += 2;
+
+ if (vm_object_print_pages) {
+ count = 0;
+ p = (vm_page_t) queue_first(&object->memq);
+ while (!queue_end(&object->memq, (queue_entry_t) p)) {
+ if (count == 0) iprintf("memory:=");
+ else if (count == 4) {printf("\n"); iprintf(" ..."); count = 0;}
+ else printf(",");
+ count++;
+
+ printf("(off=0x%X,page=0x%X)", p->offset, (vm_offset_t) p);
+ p = (vm_page_t) queue_next(&p->listq);
+ }
+ if (count != 0)
+ printf("\n");
+ }
+ indent -= 4;
+}
+
+#endif /* MACH_KDB */
diff --git a/vm/vm_object.h b/vm/vm_object.h
new file mode 100644
index 00000000..d3d050a0
--- /dev/null
+++ b/vm/vm_object.h
@@ -0,0 +1,374 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm_object.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Virtual memory object module definitions.
+ */
+
+#ifndef _VM_VM_OBJECT_H_
+#define _VM_VM_OBJECT_H_
+
+#include <mach_pagemap.h>
+#include <norma_vm.h>
+
+#include <mach/kern_return.h>
+#include <mach/boolean.h>
+#include <mach/memory_object.h>
+#include <mach/port.h>
+#include <mach/vm_prot.h>
+#include <mach/machine/vm_types.h>
+#include <kern/queue.h>
+#include <kern/lock.h>
+#include <kern/assert.h>
+#include <kern/macro_help.h>
+#include <vm/pmap.h>
+
+#if MACH_PAGEMAP
+#include <vm/vm_external.h>
+#endif /* MACH_PAGEMAP */
+
+#if NORMA_VM
+typedef struct xmm_obj * pager_request_t;
+#else /* NORMA_VM */
+typedef struct ipc_port * pager_request_t;
+#endif /* NORMA_VM */
+#define PAGER_REQUEST_NULL ((pager_request_t) 0)
+
+/*
+ * Types defined:
+ *
+ * vm_object_t Virtual memory object.
+ *
+ * We use "struct ipc_port *" instead of "ipc_port_t"
+ * to avoid include file circularities.
+ */
+
+struct vm_object {
+ queue_chain_t memq; /* Resident memory */
+ decl_simple_lock_data(, Lock) /* Synchronization */
+#if VM_OBJECT_DEBUG
+ thread_t LockHolder; /* Thread holding Lock */
+#endif VM_OBJECT_DEBUG
+ vm_size_t size; /* Object size (only valid
+ * if internal)
+ */
+
+ short ref_count; /* Number of references */
+ short resident_page_count;
+ /* number of resident pages */
+
+ struct vm_object *copy; /* Object that should receive
+ * a copy of my changed pages
+ */
+ struct vm_object *shadow; /* My shadow */
+ vm_offset_t shadow_offset; /* Offset into shadow */
+
+ struct ipc_port *pager; /* Where to get data */
+ vm_offset_t paging_offset; /* Offset into memory object */
+ pager_request_t pager_request; /* Where data comes back */
+ struct ipc_port *pager_name; /* How to identify region */
+
+ memory_object_copy_strategy_t
+ copy_strategy; /* How to handle data copy */
+
+ unsigned int
+ absent_count; /* The number of pages that
+ * have been requested but
+ * not filled. That is, the
+ * number of pages for which
+ * the "absent" attribute is
+ * asserted.
+ */
+
+ unsigned int /* boolean_t array */
+ all_wanted; /* Bit array of "want to be
+ * awakened" notations. See
+ * VM_OBJECT_EVENT_* items
+ * below
+ */
+
+ unsigned int
+ paging_in_progress:16,
+ /* The memory object ports are
+ * being used (e.g., for pagein
+ * or pageout) -- don't change any
+ * of these fields (i.e., don't
+ * collapse, destroy or terminate)
+ */
+ /* boolean_t */ pager_created:1,/* Has pager ever been created? */
+ /* boolean_t */ pager_initialized:1,/* Are fields ready to use? */
+ /* boolean_t */ pager_ready:1, /* Will manager take requests? */
+
+ /* boolean_t */ can_persist:1, /* The kernel may keep the data
+ * for this object (and rights to
+ * the memory object) after all
+ * address map references are
+ * deallocated?
+ */
+ /* boolean_t */ internal:1, /* Created by the kernel (and
+ * therefore, managed by the
+ * default memory manger)
+ */
+ /* boolean_t */ temporary:1, /* Permanent objects may be changed
+ * externally by the memory manager,
+ * and changes made in memory must
+ * be reflected back to the memory
+ * manager. Temporary objects lack
+ * both of these characteristics.
+ */
+ /* boolean_t */ alive:1, /* Not yet terminated (debug) */
+ /* boolean_t */ lock_in_progress : 1,
+ /* Is a multi-page lock
+ * request in progress?
+ */
+ /* boolean_t */ lock_restart : 1,
+ /* Should lock request in
+ * progress restart search?
+ */
+ /* boolean_t */ use_old_pageout : 1,
+ /* Use old pageout primitives?
+ */
+ /* boolean_t */ use_shared_copy : 1,/* Use shared (i.e.,
+ * delayed) copy on write */
+ /* boolean_t */ shadowed: 1; /* Shadow may exist */
+
+ queue_chain_t cached_list; /* Attachment point for the list
+ * of objects cached as a result
+ * of their can_persist value
+ */
+ vm_offset_t last_alloc; /* last allocation offset */
+#if MACH_PAGEMAP
+ vm_external_t existence_info;
+#endif /* MACH_PAGEMAP */
+};
+
+typedef struct vm_object *vm_object_t;
+#define VM_OBJECT_NULL ((vm_object_t) 0)
+
+extern
+vm_object_t kernel_object; /* the single kernel object */
+
+/*
+ * Declare procedures that operate on VM objects.
+ */
+
+extern void vm_object_bootstrap(void);
+extern void vm_object_init(void);
+extern void vm_object_terminate(vm_object_t);
+extern vm_object_t vm_object_allocate(vm_size_t);
+extern void vm_object_reference(vm_object_t);
+extern void vm_object_deallocate(vm_object_t);
+extern void vm_object_pmap_protect(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size,
+ pmap_t pmap,
+ vm_offset_t pmap_start,
+ vm_prot_t prot);
+extern void vm_object_pmap_remove(
+ vm_object_t object,
+ vm_offset_t start,
+ vm_offset_t end);
+extern void vm_object_page_remove(
+ vm_object_t object,
+ vm_offset_t start,
+ vm_offset_t end);
+extern void vm_object_shadow(
+ vm_object_t *object, /* in/out */
+ vm_offset_t *offset, /* in/out */
+ vm_size_t length);
+extern void vm_object_collapse(vm_object_t);
+extern vm_object_t vm_object_lookup(struct ipc_port *);
+extern vm_object_t vm_object_lookup_name(struct ipc_port *);
+extern struct ipc_port *vm_object_name(vm_object_t);
+extern void vm_object_remove(vm_object_t);
+
+extern boolean_t vm_object_copy_temporary(
+ vm_object_t *_object, /* in/out */
+ vm_offset_t *_offset, /* in/out */
+ boolean_t *_src_needs_copy, /* out */
+ boolean_t *_dst_needs_copy); /* out */
+extern kern_return_t vm_object_copy_strategically(
+ vm_object_t src_object,
+ vm_offset_t src_offset,
+ vm_size_t size,
+ vm_object_t *dst_object, /* out */
+ vm_offset_t *dst_offset, /* out */
+ boolean_t *dst_needs_copy); /* out */
+extern kern_return_t vm_object_copy_slowly(
+ vm_object_t src_object,
+ vm_offset_t src_offset,
+ vm_size_t size,
+ boolean_t interruptible,
+ vm_object_t *_result_object); /* out */
+
+extern vm_object_t vm_object_enter(
+ struct ipc_port *pager,
+ vm_size_t size,
+ boolean_t internal);
+extern void vm_object_pager_create(
+ vm_object_t object);
+extern void vm_object_destroy(
+ struct ipc_port *pager);
+
+extern void vm_object_page_map(
+ vm_object_t,
+ vm_offset_t,
+ vm_size_t,
+ vm_offset_t (*)(void *, vm_offset_t),
+ void *);
+
+extern void vm_object_print(vm_object_t);
+
+extern vm_object_t vm_object_request_object(struct ipc_port *);
+
+/*
+ * Event waiting handling
+ */
+
+#define VM_OBJECT_EVENT_INITIALIZED 0
+#define VM_OBJECT_EVENT_PAGER_READY 1
+#define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
+#define VM_OBJECT_EVENT_ABSENT_COUNT 3
+#define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
+
+#define vm_object_wait(object, event, interruptible) \
+ MACRO_BEGIN \
+ (object)->all_wanted |= 1 << (event); \
+ vm_object_sleep(((vm_offset_t) object) + (event), \
+ (object), \
+ (interruptible)); \
+ MACRO_END
+
+#define vm_object_assert_wait(object, event, interruptible) \
+ MACRO_BEGIN \
+ (object)->all_wanted |= 1 << (event); \
+ assert_wait((event_t)(((vm_offset_t) object) + (event)), (interruptible)); \
+ MACRO_END
+
+#define vm_object_wakeup(object, event) \
+ MACRO_BEGIN \
+ if ((object)->all_wanted & (1 << (event))) \
+ thread_wakeup((event_t)(((vm_offset_t) object) + (event))); \
+ (object)->all_wanted &= ~(1 << (event)); \
+ MACRO_END
+
+/*
+ * Routines implemented as macros
+ */
+
+#define vm_object_paging_begin(object) \
+ ((object)->paging_in_progress++)
+
+#define vm_object_paging_end(object) \
+ MACRO_BEGIN \
+ assert((object)->paging_in_progress != 0); \
+ if (--(object)->paging_in_progress == 0) { \
+ vm_object_wakeup(object, \
+ VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
+ } \
+ MACRO_END
+
+#define vm_object_paging_wait(object, interruptible) \
+ MACRO_BEGIN \
+ while ((object)->paging_in_progress != 0) { \
+ vm_object_wait( (object), \
+ VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
+ (interruptible)); \
+ vm_object_lock(object); \
+ \
+ /*XXX if ((interruptible) && */ \
+ /*XXX (current_thread()->wait_result != THREAD_AWAKENED))*/ \
+ /*XXX break; */ \
+ } \
+ MACRO_END
+
+#define vm_object_absent_assert_wait(object, interruptible) \
+ MACRO_BEGIN \
+ vm_object_assert_wait( (object), \
+ VM_OBJECT_EVENT_ABSENT_COUNT, \
+ (interruptible)); \
+ MACRO_END
+
+
+#define vm_object_absent_release(object) \
+ MACRO_BEGIN \
+ (object)->absent_count--; \
+ vm_object_wakeup((object), \
+ VM_OBJECT_EVENT_ABSENT_COUNT); \
+ MACRO_END
+
+/*
+ * Object locking macros (with and without debugging)
+ */
+
+#if VM_OBJECT_DEBUG
+#define vm_object_lock_init(object) \
+MACRO_BEGIN \
+ simple_lock_init(&(object)->Lock); \
+ (object)->LockHolder = 0; \
+MACRO_END
+#define vm_object_lock(object) \
+MACRO_BEGIN \
+ simple_lock(&(object)->Lock); \
+ (object)->LockHolder = current_thread(); \
+MACRO_END
+#define vm_object_unlock(object) \
+MACRO_BEGIN \
+ if ((object)->LockHolder != current_thread()) \
+ panic("vm_object_unlock 0x%x", (object)); \
+ (object)->LockHolder = 0; \
+ simple_unlock(&(object)->Lock); \
+MACRO_END
+#define vm_object_lock_try(object) \
+ (simple_lock_try(&(object)->Lock) \
+ ? ( ((object)->LockHolder = current_thread()) , TRUE) \
+ : FALSE)
+#define vm_object_sleep(event, object, interruptible) \
+MACRO_BEGIN \
+ if ((object)->LockHolder != current_thread()) \
+ panic("vm_object_sleep %#x", (object)); \
+ (object)->LockHolder = 0; \
+ thread_sleep((event_t)(event), simple_lock_addr((object)->Lock), \
+ (interruptible)); \
+MACRO_END
+#define vm_object_lock_taken(object) \
+ ((object)->LockHolder == current_thread())
+#else /* VM_OBJECT_DEBUG */
+#define vm_object_lock_init(object) simple_lock_init(&(object)->Lock)
+#define vm_object_lock(object) simple_lock(&(object)->Lock)
+#define vm_object_unlock(object) simple_unlock(&(object)->Lock)
+#define vm_object_lock_try(object) simple_lock_try(&(object)->Lock)
+#define vm_object_sleep(event, object, interruptible) \
+ thread_sleep((event_t)(event), simple_lock_addr((object)->Lock), \
+ (interruptible))
+#define vm_object_lock_taken(object) simple_lock_taken(&(object)->Lock)
+#endif /* VM_OBJECT_DEBUG */
+
+#endif /* _VM_VM_OBJECT_H_ */
diff --git a/vm/vm_page.h b/vm/vm_page.h
new file mode 100644
index 00000000..f7fa80a3
--- /dev/null
+++ b/vm/vm_page.h
@@ -0,0 +1,322 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_page.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Resident memory system definitions.
+ */
+
+#ifndef _VM_VM_PAGE_H_
+#define _VM_VM_PAGE_H_
+
+#include <mach_vm_debug.h>
+
+#include <mach/boolean.h>
+#include <mach/vm_prot.h>
+#include <mach/vm_param.h>
+#include <vm/vm_object.h>
+#include <kern/queue.h>
+#include <kern/lock.h>
+#include <kern/zalloc.h>
+
+#include <kern/macro_help.h>
+#include <kern/sched_prim.h> /* definitions of wait/wakeup */
+
+#if MACH_VM_DEBUG
+#include <mach_debug/hash_info.h>
+#endif
+
+/*
+ * Management of resident (logical) pages.
+ *
+ * A small structure is kept for each resident
+ * page, indexed by page number. Each structure
+ * is an element of several lists:
+ *
+ * A hash table bucket used to quickly
+ * perform object/offset lookups
+ *
+ * A list of all pages for a given object,
+ * so they can be quickly deactivated at
+ * time of deallocation.
+ *
+ * An ordered list of pages due for pageout.
+ *
+ * In addition, the structure contains the object
+ * and offset to which this page belongs (for pageout),
+ * and sundry status bits.
+ *
+ * Fields in this structure are locked either by the lock on the
+ * object that the page belongs to (O) or by the lock on the page
+ * queues (P). [Some fields require that both locks be held to
+ * change that field; holding either lock is sufficient to read.]
+ */
+
+struct vm_page {
+ queue_chain_t pageq; /* queue info for FIFO
+ * queue or free list (P) */
+ queue_chain_t listq; /* all pages in same object (O) */
+ struct vm_page *next; /* VP bucket link (O) */
+
+ vm_object_t object; /* which object am I in (O,P) */
+ vm_offset_t offset; /* offset into that object (O,P) */
+
+ unsigned int wire_count:16, /* how many wired down maps use me?
+ (O&P) */
+ /* boolean_t */ inactive:1, /* page is in inactive list (P) */
+ active:1, /* page is in active list (P) */
+ laundry:1, /* page is being cleaned now (P)*/
+ free:1, /* page is on free list (P) */
+ reference:1, /* page has been used (P) */
+ :0; /* (force to 'long' boundary) */
+#ifdef ns32000
+ int pad; /* extra space for ns32000 bit ops */
+#endif /* ns32000 */
+
+ unsigned int
+ /* boolean_t */ busy:1, /* page is in transit (O) */
+ wanted:1, /* someone is waiting for page (O) */
+ tabled:1, /* page is in VP table (O) */
+ fictitious:1, /* Physical page doesn't exist (O) */
+ private:1, /* Page should not be returned to
+ * the free list (O) */
+ absent:1, /* Data has been requested, but is
+ * not yet available (O) */
+ error:1, /* Data manager was unable to provide
+ * data due to error (O) */
+ dirty:1, /* Page must be cleaned (O) */
+ precious:1, /* Page is precious; data must be
+ * returned even if clean (O) */
+ overwriting:1, /* Request to unlock has been made
+ * without having data. (O)
+ * [See vm_object_overwrite] */
+ :0;
+
+ vm_offset_t phys_addr; /* Physical address of page, passed
+ * to pmap_enter (read-only) */
+ vm_prot_t page_lock; /* Uses prohibited by data manager (O) */
+ vm_prot_t unlock_request; /* Outstanding unlock request (O) */
+};
+
+typedef struct vm_page *vm_page_t;
+
+#define VM_PAGE_NULL ((vm_page_t) 0)
+
+/*
+ * For debugging, this macro can be defined to perform
+ * some useful check on a page structure.
+ */
+
+#define VM_PAGE_CHECK(mem)
+
+/*
+ * Each pageable resident page falls into one of three lists:
+ *
+ * free
+ * Available for allocation now.
+ * inactive
+ * Not referenced in any map, but still has an
+ * object/offset-page mapping, and may be dirty.
+ * This is the list of pages that should be
+ * paged out next.
+ * active
+ * A list of pages which have been placed in
+ * at least one physical map. This list is
+ * ordered, in LRU-like fashion.
+ */
+
+extern
+vm_page_t vm_page_queue_free; /* memory free queue */
+extern
+vm_page_t vm_page_queue_fictitious; /* fictitious free queue */
+extern
+queue_head_t vm_page_queue_active; /* active memory queue */
+extern
+queue_head_t vm_page_queue_inactive; /* inactive memory queue */
+
+extern
+vm_offset_t first_phys_addr; /* physical address for first_page */
+extern
+vm_offset_t last_phys_addr; /* physical address for last_page */
+
+extern
+int vm_page_free_count; /* How many pages are free? */
+extern
+int vm_page_fictitious_count;/* How many fictitious pages are free? */
+extern
+int vm_page_active_count; /* How many pages are active? */
+extern
+int vm_page_inactive_count; /* How many pages are inactive? */
+extern
+int vm_page_wire_count; /* How many pages are wired? */
+extern
+int vm_page_free_target; /* How many do we want free? */
+extern
+int vm_page_free_min; /* When to wakeup pageout */
+extern
+int vm_page_inactive_target;/* How many do we want inactive? */
+extern
+int vm_page_free_reserved; /* How many pages reserved to do pageout */
+extern
+int vm_page_laundry_count; /* How many pages being laundered? */
+
+decl_simple_lock_data(extern,vm_page_queue_lock)/* lock on active and inactive
+ page queues */
+decl_simple_lock_data(extern,vm_page_queue_free_lock)
+ /* lock on free page queue */
+
+extern unsigned int vm_page_free_wanted;
+ /* how many threads are waiting for memory */
+
+extern vm_offset_t vm_page_fictitious_addr;
+ /* (fake) phys_addr of fictitious pages */
+
+extern void vm_page_bootstrap(
+ vm_offset_t *startp,
+ vm_offset_t *endp);
+extern void vm_page_module_init(void);
+
+extern void vm_page_create(
+ vm_offset_t start,
+ vm_offset_t end);
+extern vm_page_t vm_page_lookup(
+ vm_object_t object,
+ vm_offset_t offset);
+extern vm_page_t vm_page_grab_fictitious(void);
+extern void vm_page_release_fictitious(vm_page_t);
+extern boolean_t vm_page_convert(vm_page_t);
+extern void vm_page_more_fictitious(void);
+extern vm_page_t vm_page_grab(void);
+extern void vm_page_release(vm_page_t);
+extern void vm_page_wait(void (*)(void));
+extern vm_page_t vm_page_alloc(
+ vm_object_t object,
+ vm_offset_t offset);
+extern void vm_page_init(
+ vm_page_t mem,
+ vm_offset_t phys_addr);
+extern void vm_page_free(vm_page_t);
+extern void vm_page_activate(vm_page_t);
+extern void vm_page_deactivate(vm_page_t);
+extern void vm_page_rename(
+ vm_page_t mem,
+ vm_object_t new_object,
+ vm_offset_t new_offset);
+extern void vm_page_insert(
+ vm_page_t mem,
+ vm_object_t object,
+ vm_offset_t offset);
+extern void vm_page_remove(
+ vm_page_t mem);
+
+extern void vm_page_zero_fill(vm_page_t);
+extern void vm_page_copy(vm_page_t src_m, vm_page_t dest_m);
+
+extern void vm_page_wire(vm_page_t);
+extern void vm_page_unwire(vm_page_t);
+
+extern void vm_set_page_size(void);
+
+#if MACH_VM_DEBUG
+extern unsigned int vm_page_info(
+ hash_info_bucket_t *info,
+ unsigned int count);
+#endif
+
+/*
+ * Functions implemented as macros
+ */
+
+#define PAGE_ASSERT_WAIT(m, interruptible) \
+ MACRO_BEGIN \
+ (m)->wanted = TRUE; \
+ assert_wait((event_t) (m), (interruptible)); \
+ MACRO_END
+
+#define PAGE_WAKEUP_DONE(m) \
+ MACRO_BEGIN \
+ (m)->busy = FALSE; \
+ if ((m)->wanted) { \
+ (m)->wanted = FALSE; \
+ thread_wakeup(((event_t) m)); \
+ } \
+ MACRO_END
+
+#define PAGE_WAKEUP(m) \
+ MACRO_BEGIN \
+ if ((m)->wanted) { \
+ (m)->wanted = FALSE; \
+ thread_wakeup((event_t) (m)); \
+ } \
+ MACRO_END
+
+#define VM_PAGE_FREE(p) \
+ MACRO_BEGIN \
+ vm_page_lock_queues(); \
+ vm_page_free(p); \
+ vm_page_unlock_queues(); \
+ MACRO_END
+
+/*
+ * Macro to be used in place of pmap_enter()
+ */
+
+#define PMAP_ENTER(pmap, virtual_address, page, protection, wired) \
+ MACRO_BEGIN \
+ pmap_enter( \
+ (pmap), \
+ (virtual_address), \
+ (page)->phys_addr, \
+ (protection) & ~(page)->page_lock, \
+ (wired) \
+ ); \
+ MACRO_END
+
+#define VM_PAGE_WAIT(continuation) vm_page_wait(continuation)
+
+#define vm_page_lock_queues() simple_lock(&vm_page_queue_lock)
+#define vm_page_unlock_queues() simple_unlock(&vm_page_queue_lock)
+
+#define VM_PAGE_QUEUES_REMOVE(mem) \
+ MACRO_BEGIN \
+ if (mem->active) { \
+ queue_remove(&vm_page_queue_active, \
+ mem, vm_page_t, pageq); \
+ mem->active = FALSE; \
+ vm_page_active_count--; \
+ } \
+ \
+ if (mem->inactive) { \
+ queue_remove(&vm_page_queue_inactive, \
+ mem, vm_page_t, pageq); \
+ mem->inactive = FALSE; \
+ vm_page_inactive_count--; \
+ } \
+ MACRO_END
+
+#endif /* _VM_VM_PAGE_H_ */
diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
new file mode 100644
index 00000000..411531bb
--- /dev/null
+++ b/vm/vm_pageout.c
@@ -0,0 +1,924 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_pageout.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * The proverbial page-out daemon.
+ */
+
+#include <mach_pagemap.h>
+#include <norma_vm.h>
+
+#include <mach/mach_types.h>
+#include <mach/memory_object.h>
+#include "memory_object_default.h"
+#include "memory_object_user.h"
+#include <mach/vm_param.h>
+#include <mach/vm_statistics.h>
+#include <kern/counters.h>
+#include <kern/thread.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+#include <machine/vm_tuning.h>
+
+
+
+#ifndef VM_PAGEOUT_BURST_MAX
+#define VM_PAGEOUT_BURST_MAX 10 /* number of pages */
+#endif VM_PAGEOUT_BURST_MAX
+
+#ifndef VM_PAGEOUT_BURST_MIN
+#define VM_PAGEOUT_BURST_MIN 5 /* number of pages */
+#endif VM_PAGEOUT_BURST_MIN
+
+#ifndef VM_PAGEOUT_BURST_WAIT
+#define VM_PAGEOUT_BURST_WAIT 30 /* milliseconds per page */
+#endif VM_PAGEOUT_BURST_WAIT
+
+#ifndef VM_PAGEOUT_EMPTY_WAIT
+#define VM_PAGEOUT_EMPTY_WAIT 200 /* milliseconds */
+#endif VM_PAGEOUT_EMPTY_WAIT
+
+#ifndef VM_PAGEOUT_PAUSE_MAX
+#define VM_PAGEOUT_PAUSE_MAX 10 /* number of pauses */
+#endif VM_PAGEOUT_PAUSE_MAX
+
+/*
+ * To obtain a reasonable LRU approximation, the inactive queue
+ * needs to be large enough to give pages on it a chance to be
+ * referenced a second time. This macro defines the fraction
+ * of active+inactive pages that should be inactive.
+ * The pageout daemon uses it to update vm_page_inactive_target.
+ *
+ * If vm_page_free_count falls below vm_page_free_target and
+ * vm_page_inactive_count is below vm_page_inactive_target,
+ * then the pageout daemon starts running.
+ */
+
+#ifndef VM_PAGE_INACTIVE_TARGET
+#define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 2 / 3)
+#endif VM_PAGE_INACTIVE_TARGET
+
+/*
+ * Once the pageout daemon starts running, it keeps going
+ * until vm_page_free_count meets or exceeds vm_page_free_target.
+ */
+
+#ifndef VM_PAGE_FREE_TARGET
+#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
+#endif VM_PAGE_FREE_TARGET
+
+/*
+ * The pageout daemon always starts running once vm_page_free_count
+ * falls below vm_page_free_min.
+ */
+
+#ifndef VM_PAGE_FREE_MIN
+#define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
+#endif VM_PAGE_FREE_MIN
+
+/*
+ * When vm_page_free_count falls below vm_page_free_reserved,
+ * only vm-privileged threads can allocate pages. vm-privilege
+ * allows the pageout daemon and default pager (and any other
+ * associated threads needed for default pageout) to continue
+ * operation by dipping into the reserved pool of pages.
+ */
+
+#ifndef VM_PAGE_FREE_RESERVED
+#define VM_PAGE_FREE_RESERVED 15
+#endif VM_PAGE_FREE_RESERVED
+
+/*
+ * When vm_page_free_count falls below vm_pageout_reserved_internal,
+ * the pageout daemon no longer trusts external pagers to clean pages.
+ * External pagers are probably all wedged waiting for a free page.
+ * It forcibly double-pages dirty pages belonging to external objects,
+ * getting the pages to the default pager to clean.
+ */
+
+#ifndef VM_PAGEOUT_RESERVED_INTERNAL
+#define VM_PAGEOUT_RESERVED_INTERNAL(reserve) ((reserve) - 5)
+#endif VM_PAGEOUT_RESERVED_INTERNAL
+
+/*
+ * When vm_page_free_count falls below vm_pageout_reserved_really,
+ * the pageout daemon stops work entirely to let the default pager
+ * catch up (assuming the default pager has pages to clean).
+ * Beyond this point, it is too dangerous to consume memory
+ * even for memory_object_data_write messages to the default pager.
+ */
+
+#ifndef VM_PAGEOUT_RESERVED_REALLY
+#define VM_PAGEOUT_RESERVED_REALLY(reserve) ((reserve) - 10)
+#endif VM_PAGEOUT_RESERVED_REALLY
+
+extern void vm_pageout_continue();
+extern void vm_pageout_scan_continue();
+
+unsigned int vm_pageout_reserved_internal = 0;
+unsigned int vm_pageout_reserved_really = 0;
+
+unsigned int vm_pageout_burst_max = 0;
+unsigned int vm_pageout_burst_min = 0;
+unsigned int vm_pageout_burst_wait = 0; /* milliseconds per page */
+unsigned int vm_pageout_empty_wait = 0; /* milliseconds */
+unsigned int vm_pageout_pause_count = 0;
+unsigned int vm_pageout_pause_max = 0;
+
+/*
+ * These variables record the pageout daemon's actions:
+ * how many pages it looks at and what happens to those pages.
+ * No locking needed because only one thread modifies the variables.
+ */
+
+unsigned int vm_pageout_active = 0; /* debugging */
+unsigned int vm_pageout_inactive = 0; /* debugging */
+unsigned int vm_pageout_inactive_nolock = 0; /* debugging */
+unsigned int vm_pageout_inactive_busy = 0; /* debugging */
+unsigned int vm_pageout_inactive_absent = 0; /* debugging */
+unsigned int vm_pageout_inactive_used = 0; /* debugging */
+unsigned int vm_pageout_inactive_clean = 0; /* debugging */
+unsigned int vm_pageout_inactive_dirty = 0; /* debugging */
+unsigned int vm_pageout_inactive_double = 0; /* debugging */
+
+#if NORMA_VM
+/*
+ * Define them here, since they won't be defined by memory_object_user.h.
+ */
+extern kern_return_t memory_object_data_initialize();
+extern kern_return_t memory_object_data_write();
+#endif NORMA_VM
+
+/*
+ * Routine: vm_pageout_setup
+ * Purpose:
+ * Set up a page for pageout.
+ *
+ * Move or copy the page to a new object, as part
+ * of which it will be sent to its memory manager
+ * in a memory_object_data_write or memory_object_initialize
+ * message.
+ *
+ * The "paging_offset" argument specifies the offset
+ * of the page within its external memory object.
+ *
+ * The "new_object" and "new_offset" arguments
+ * indicate where the page should be moved.
+ *
+ * The "flush" argument specifies whether the page
+ * should be flushed from its object. If not, a
+ * copy of the page is moved to the new object.
+ *
+ * In/Out conditions:
+ * The page in question must not be on any pageout queues,
+ * and must be busy. The object to which it belongs
+ * must be unlocked, and the caller must hold a paging
+ * reference to it. The new_object must not be locked.
+ *
+ * If the page is flushed from its original object,
+ * this routine returns a pointer to a place-holder page,
+ * inserted at the same offset, to block out-of-order
+ * requests for the page. The place-holder page must
+ * be freed after the data_write or initialize message
+ * has been sent. If the page is copied,
+ * the holding page is VM_PAGE_NULL.
+ *
+ * The original page is put on a paging queue and marked
+ * not busy on exit.
+ */
+vm_page_t
+vm_pageout_setup(m, paging_offset, new_object, new_offset, flush)
+ register vm_page_t m;
+ vm_offset_t paging_offset;
+ register vm_object_t new_object;
+ vm_offset_t new_offset;
+ boolean_t flush;
+{
+ register vm_object_t old_object = m->object;
+ register vm_page_t holding_page = 0; /*'=0'to quiet gcc warnings*/
+ register vm_page_t new_m;
+
+ assert(m->busy && !m->absent && !m->fictitious);
+
+ /*
+ * If we are not flushing the page, allocate a
+ * page in the object. If we cannot get the
+ * page, flush instead.
+ */
+ if (!flush) {
+ vm_object_lock(new_object);
+ new_m = vm_page_alloc(new_object, new_offset);
+ if (new_m == VM_PAGE_NULL)
+ flush = TRUE;
+ vm_object_unlock(new_object);
+ }
+
+ if (flush) {
+ /*
+ * Create a place-holder page where the old one was,
+ * to prevent anyone from attempting to page in this
+ * page while we`re unlocked.
+ */
+ while ((holding_page = vm_page_grab_fictitious())
+ == VM_PAGE_NULL)
+ vm_page_more_fictitious();
+
+ vm_object_lock(old_object);
+ vm_page_lock_queues();
+ vm_page_remove(m);
+ vm_page_unlock_queues();
+ PAGE_WAKEUP_DONE(m);
+
+ vm_page_lock_queues();
+ vm_page_insert(holding_page, old_object, m->offset);
+ vm_page_unlock_queues();
+
+ /*
+ * Record that this page has been written out
+ */
+#if MACH_PAGEMAP
+ vm_external_state_set(old_object->existence_info,
+ paging_offset,
+ VM_EXTERNAL_STATE_EXISTS);
+#endif MACH_PAGEMAP
+
+ vm_object_unlock(old_object);
+
+ vm_object_lock(new_object);
+
+ /*
+ * Move this page into the new object
+ */
+
+ vm_page_lock_queues();
+ vm_page_insert(m, new_object, new_offset);
+ vm_page_unlock_queues();
+
+ m->dirty = TRUE;
+ m->precious = FALSE;
+ m->page_lock = VM_PROT_NONE;
+ m->unlock_request = VM_PROT_NONE;
+ }
+ else {
+ /*
+ * Copy the data into the new page,
+ * and mark the new page as clean.
+ */
+ vm_page_copy(m, new_m);
+
+ vm_object_lock(old_object);
+ m->dirty = FALSE;
+ pmap_clear_modify(m->phys_addr);
+
+ /*
+ * Deactivate old page.
+ */
+ vm_page_lock_queues();
+ vm_page_deactivate(m);
+ vm_page_unlock_queues();
+
+ PAGE_WAKEUP_DONE(m);
+
+ /*
+ * Record that this page has been written out
+ */
+
+#if MACH_PAGEMAP
+ vm_external_state_set(old_object->existence_info,
+ paging_offset,
+ VM_EXTERNAL_STATE_EXISTS);
+#endif MACH_PAGEMAP
+
+ vm_object_unlock(old_object);
+
+ vm_object_lock(new_object);
+
+ /*
+ * Use the new page below.
+ */
+ m = new_m;
+ m->dirty = TRUE;
+ assert(!m->precious);
+ PAGE_WAKEUP_DONE(m);
+ }
+
+ /*
+ * Make the old page eligible for replacement again; if a
+ * user-supplied memory manager fails to release the page,
+ * it will be paged out again to the default memory manager.
+ *
+ * Note that pages written to the default memory manager
+ * must be wired down -- in return, it guarantees to free
+ * this page, rather than reusing it.
+ */
+
+ vm_page_lock_queues();
+ vm_stat.pageouts++;
+ if (m->laundry) {
+ /*
+ * vm_pageout_scan is telling us to put this page
+ * at the front of the inactive queue, so it will
+ * be immediately paged out to the default pager.
+ */
+
+ assert(!old_object->internal);
+ m->laundry = FALSE;
+
+ queue_enter_first(&vm_page_queue_inactive, m,
+ vm_page_t, pageq);
+ m->inactive = TRUE;
+ vm_page_inactive_count++;
+ } else if (old_object->internal) {
+ m->laundry = TRUE;
+ vm_page_laundry_count++;
+
+ vm_page_wire(m);
+ } else
+ vm_page_activate(m);
+ vm_page_unlock_queues();
+
+ /*
+ * Since IPC operations may block, we drop locks now.
+ * [The placeholder page is busy, and we still have
+ * paging_in_progress incremented.]
+ */
+
+ vm_object_unlock(new_object);
+
+ /*
+ * Return the placeholder page to simplify cleanup.
+ */
+ return (flush ? holding_page : VM_PAGE_NULL);
+}
+
+/*
+ * Routine: vm_pageout_page
+ * Purpose:
+ * Causes the specified page to be written back to
+ * the appropriate memory object.
+ *
+ * The "initial" argument specifies whether this
+ * data is an initialization only, and should use
+ * memory_object_data_initialize instead of
+ * memory_object_data_write.
+ *
+ * The "flush" argument specifies whether the page
+ * should be flushed from the object. If not, a
+ * copy of the data is sent to the memory object.
+ *
+ * In/out conditions:
+ * The page in question must not be on any pageout queues.
+ * The object to which it belongs must be locked.
+ * Implementation:
+ * Move this page to a completely new object, if flushing;
+ * copy to a new page in a new object, if not.
+ */
+void
+vm_pageout_page(m, initial, flush)
+ register vm_page_t m;
+ boolean_t initial;
+ boolean_t flush;
+{
+ vm_map_copy_t copy;
+ register vm_object_t old_object;
+ register vm_object_t new_object;
+ register vm_page_t holding_page;
+ vm_offset_t paging_offset;
+ kern_return_t rc;
+ boolean_t precious_clean;
+
+ assert(m->busy);
+
+ /*
+ * Cleaning but not flushing a clean precious page is a
+ * no-op. Remember whether page is clean and precious now
+ * because vm_pageout_setup will mark it dirty and not precious.
+ *
+ * XXX Check if precious_clean && !flush can really happen.
+ */
+ precious_clean = (!m->dirty) && m->precious;
+ if (precious_clean && !flush) {
+ PAGE_WAKEUP_DONE(m);
+ return;
+ }
+
+ /*
+ * Verify that we really want to clean this page.
+ */
+ if (m->absent || m->error || (!m->dirty && !m->precious)) {
+ VM_PAGE_FREE(m);
+ return;
+ }
+
+ /*
+ * Create a paging reference to let us play with the object.
+ */
+ old_object = m->object;
+ paging_offset = m->offset + old_object->paging_offset;
+ vm_object_paging_begin(old_object);
+ vm_object_unlock(old_object);
+
+ /*
+ * Allocate a new object into which we can put the page.
+ */
+ new_object = vm_object_allocate(PAGE_SIZE);
+
+ /*
+ * Move the page into the new object.
+ */
+ holding_page = vm_pageout_setup(m,
+ paging_offset,
+ new_object,
+ 0, /* new offset */
+ flush); /* flush */
+
+ rc = vm_map_copyin_object(new_object, 0, PAGE_SIZE, &copy);
+ assert(rc == KERN_SUCCESS);
+
+ if (initial || old_object->use_old_pageout) {
+ rc = (*(initial ? memory_object_data_initialize
+ : memory_object_data_write))
+ (old_object->pager,
+ old_object->pager_request,
+ paging_offset, (pointer_t) copy, PAGE_SIZE);
+ }
+ else {
+ rc = memory_object_data_return(
+ old_object->pager,
+ old_object->pager_request,
+ paging_offset, (pointer_t) copy, PAGE_SIZE,
+ !precious_clean, !flush);
+ }
+
+ if (rc != KERN_SUCCESS)
+ vm_map_copy_discard(copy);
+
+ /*
+ * Clean up.
+ */
+ vm_object_lock(old_object);
+ if (holding_page != VM_PAGE_NULL)
+ VM_PAGE_FREE(holding_page);
+ vm_object_paging_end(old_object);
+}
+
+/*
+ * vm_pageout_scan does the dirty work for the pageout daemon.
+ * It returns with vm_page_queue_free_lock held and
+ * vm_page_free_wanted == 0.
+ */
+
+void vm_pageout_scan()
+{
+ unsigned int burst_count;
+
+ /*
+ * We want to gradually dribble pages from the active queue
+ * to the inactive queue. If we let the inactive queue get
+ * very small, and then suddenly dump many pages into it,
+ * those pages won't get a sufficient chance to be referenced
+ * before we start taking them from the inactive queue.
+ *
+ * We must limit the rate at which we send pages to the pagers.
+ * data_write messages consume memory, for message buffers and
+ * for map-copy objects. If we get too far ahead of the pagers,
+ * we can potentially run out of memory.
+ *
+ * We can use the laundry count to limit directly the number
+ * of pages outstanding to the default pager. A similar
+ * strategy for external pagers doesn't work, because
+ * external pagers don't have to deallocate the pages sent them,
+ * and because we might have to send pages to external pagers
+ * even if they aren't processing writes. So we also
+ * use a burst count to limit writes to external pagers.
+ *
+ * When memory is very tight, we can't rely on external pagers to
+ * clean pages. They probably aren't running, because they
+ * aren't vm-privileged. If we kept sending dirty pages to them,
+ * we could exhaust the free list. However, we can't just ignore
+ * pages belonging to external objects, because there might be no
+ * pages belonging to internal objects. Hence, we get the page
+ * into an internal object and then immediately double-page it,
+ * sending it to the default pager.
+ *
+ * consider_zone_gc should be last, because the other operations
+ * might return memory to zones. When we pause we use
+ * vm_pageout_scan_continue as our continuation, so we will
+ * reenter vm_pageout_scan periodically and attempt to reclaim
+ * internal memory even if we never reach vm_page_free_target.
+ */
+
+ Restart:
+ stack_collect();
+ net_kmsg_collect();
+ consider_task_collect();
+ consider_thread_collect();
+ consider_zone_gc();
+
+ for (burst_count = 0;;) {
+ register vm_page_t m;
+ register vm_object_t object;
+ unsigned int free_count;
+
+ /*
+ * Recalculate vm_page_inactivate_target.
+ */
+
+ vm_page_lock_queues();
+ vm_page_inactive_target =
+ VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
+ vm_page_inactive_count);
+
+ /*
+ * Move pages from active to inactive.
+ */
+
+ while ((vm_page_inactive_count < vm_page_inactive_target) &&
+ !queue_empty(&vm_page_queue_active)) {
+ register vm_object_t obj;
+
+ vm_pageout_active++;
+ m = (vm_page_t) queue_first(&vm_page_queue_active);
+ assert(m->active && !m->inactive);
+
+ obj = m->object;
+ if (!vm_object_lock_try(obj)) {
+ /*
+ * Move page to end and continue.
+ */
+
+ queue_remove(&vm_page_queue_active, m,
+ vm_page_t, pageq);
+ queue_enter(&vm_page_queue_active, m,
+ vm_page_t, pageq);
+ vm_page_unlock_queues();
+ vm_page_lock_queues();
+ continue;
+ }
+
+ /*
+ * If the page is busy, then we pull it
+ * off the active queue and leave it alone.
+ */
+
+ if (m->busy) {
+ vm_object_unlock(obj);
+ queue_remove(&vm_page_queue_active, m,
+ vm_page_t, pageq);
+ m->active = FALSE;
+ vm_page_active_count--;
+ continue;
+ }
+
+ /*
+ * Deactivate the page while holding the object
+ * locked, so we know the page is still not busy.
+ * This should prevent races between pmap_enter
+ * and pmap_clear_reference. The page might be
+ * absent or fictitious, but vm_page_deactivate
+ * can handle that.
+ */
+
+ vm_page_deactivate(m);
+ vm_object_unlock(obj);
+ }
+
+ /*
+ * We are done if we have met our target *and*
+ * nobody is still waiting for a page.
+ */
+
+ simple_lock(&vm_page_queue_free_lock);
+ free_count = vm_page_free_count;
+ if ((free_count >= vm_page_free_target) &
+ (vm_page_free_wanted == 0)) {
+ vm_page_unlock_queues();
+ break;
+ }
+ simple_unlock(&vm_page_queue_free_lock);
+
+ /*
+ * Sometimes we have to pause:
+ * 1) No inactive pages - nothing to do.
+ * 2) Flow control - wait for pagers to catch up.
+ * 3) Extremely low memory - sending out dirty pages
+ * consumes memory. We don't take the risk of doing
+ * this if the default pager already has work to do.
+ */
+
+ if (queue_empty(&vm_page_queue_inactive) ||
+ (burst_count >= vm_pageout_burst_max) ||
+ (vm_page_laundry_count >= vm_pageout_burst_max) ||
+ ((free_count < vm_pageout_reserved_really) &&
+ (vm_page_laundry_count > 0))) {
+ unsigned int pages, msecs;
+
+ /*
+ * vm_pageout_burst_wait is msecs/page.
+ * If there is nothing for us to do, we wait
+ * at least vm_pageout_empty_wait msecs.
+ */
+
+ if (vm_page_laundry_count > burst_count)
+ pages = vm_page_laundry_count;
+ else
+ pages = burst_count;
+ msecs = pages * vm_pageout_burst_wait;
+
+ if (queue_empty(&vm_page_queue_inactive) &&
+ (msecs < vm_pageout_empty_wait))
+ msecs = vm_pageout_empty_wait;
+ vm_page_unlock_queues();
+
+ thread_will_wait_with_timeout(current_thread(), msecs);
+ counter(c_vm_pageout_scan_block++);
+ thread_block(vm_pageout_scan_continue);
+#ifndef CONTINUATIONS
+ /*
+ * Unfortunately, we don't have call_continuation
+ * so we can't rely on tail-recursion.
+ */
+
+ vm_pageout_scan_continue();
+ goto Restart;
+#else /* CONTINUATIONS */
+ call_continuation(vm_pageout_scan_continue);
+ /*NOTREACHED*/
+#endif /* CONTINUATIONS */
+ }
+
+ vm_pageout_inactive++;
+ m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+ assert(!m->active && m->inactive);
+ object = m->object;
+
+ /*
+ * Try to lock object; since we've got the
+ * page queues lock, we can only try for this one.
+ */
+
+ if (!vm_object_lock_try(object)) {
+ /*
+ * Move page to end and continue.
+ */
+
+ queue_remove(&vm_page_queue_inactive, m,
+ vm_page_t, pageq);
+ queue_enter(&vm_page_queue_inactive, m,
+ vm_page_t, pageq);
+ vm_page_unlock_queues();
+ vm_pageout_inactive_nolock++;
+ continue;
+ }
+
+ /*
+ * Remove the page from the inactive list.
+ */
+
+ queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq);
+ vm_page_inactive_count--;
+ m->inactive = FALSE;
+
+ if (m->busy || !object->alive) {
+ /*
+ * Somebody is already playing with this page.
+ * Leave it off the pageout queues.
+ */
+
+ vm_page_unlock_queues();
+ vm_object_unlock(object);
+ vm_pageout_inactive_busy++;
+ continue;
+ }
+
+ /*
+ * If it's absent, we can reclaim the page.
+ */
+
+ if (m->absent) {
+ vm_pageout_inactive_absent++;
+ reclaim_page:
+ vm_page_free(m);
+ vm_page_unlock_queues();
+ vm_object_unlock(object);
+ continue;
+ }
+
+ /*
+ * If it's being used, reactivate.
+ * (Fictitious pages are either busy or absent.)
+ */
+
+ assert(!m->fictitious);
+ if (m->reference || pmap_is_referenced(m->phys_addr)) {
+ vm_object_unlock(object);
+ vm_page_activate(m);
+ vm_stat.reactivations++;
+ vm_page_unlock_queues();
+ vm_pageout_inactive_used++;
+ continue;
+ }
+
+ /*
+ * Eliminate all mappings.
+ */
+
+ m->busy = TRUE;
+ pmap_page_protect(m->phys_addr, VM_PROT_NONE);
+ if (!m->dirty)
+ m->dirty = pmap_is_modified(m->phys_addr);
+
+ /*
+ * If it's clean and not precious, we can free the page.
+ */
+
+ if (!m->dirty && !m->precious) {
+ vm_pageout_inactive_clean++;
+ goto reclaim_page;
+ }
+
+ /*
+ * If we are very low on memory, then we can't
+ * rely on an external pager to clean a dirty page,
+ * because external pagers are not vm-privileged.
+ *
+ * The laundry bit tells vm_pageout_setup to
+ * put the page back at the front of the inactive
+ * queue instead of activating the page. Hence,
+ * we will pick the page up again immediately and
+ * resend it to the default pager.
+ */
+
+ assert(!m->laundry);
+ if ((free_count < vm_pageout_reserved_internal) &&
+ !object->internal) {
+ m->laundry = TRUE;
+ vm_pageout_inactive_double++;
+ }
+ vm_page_unlock_queues();
+
+ /*
+ * If there is no memory object for the page, create
+ * one and hand it to the default pager.
+ * [First try to collapse, so we don't create
+ * one unnecessarily.]
+ */
+
+ if (!object->pager_initialized)
+ vm_object_collapse(object);
+ if (!object->pager_initialized)
+ vm_object_pager_create(object);
+ if (!object->pager_initialized)
+ panic("vm_pageout_scan");
+
+ vm_pageout_inactive_dirty++;
+ vm_pageout_page(m, FALSE, TRUE); /* flush it */
+ vm_object_unlock(object);
+ burst_count++;
+ }
+}
+
+void vm_pageout_scan_continue()
+{
+ /*
+ * We just paused to let the pagers catch up.
+ * If vm_page_laundry_count is still high,
+ * then we aren't waiting long enough.
+ * If we have paused some vm_pageout_pause_max times without
+ * adjusting vm_pageout_burst_wait, it might be too big,
+ * so we decrease it.
+ */
+
+ vm_page_lock_queues();
+ if (vm_page_laundry_count > vm_pageout_burst_min) {
+ vm_pageout_burst_wait++;
+ vm_pageout_pause_count = 0;
+ } else if (++vm_pageout_pause_count > vm_pageout_pause_max) {
+ vm_pageout_burst_wait = (vm_pageout_burst_wait * 3) / 4;
+ if (vm_pageout_burst_wait < 1)
+ vm_pageout_burst_wait = 1;
+ vm_pageout_pause_count = 0;
+ }
+ vm_page_unlock_queues();
+
+#ifdef CONTINUATIONS
+ vm_pageout_continue();
+ /*NOTREACHED*/
+#endif /* CONTINUATIONS */
+}
+
+/*
+ * vm_pageout is the high level pageout daemon.
+ */
+
+void vm_pageout_continue()
+{
+ /*
+ * The pageout daemon is never done, so loop forever.
+ * We should call vm_pageout_scan at least once each
+ * time we are woken, even if vm_page_free_wanted is
+ * zero, to check vm_page_free_target and
+ * vm_page_inactive_target.
+ */
+
+ for (;;) {
+ vm_pageout_scan();
+ /* we hold vm_page_queue_free_lock now */
+ assert(vm_page_free_wanted == 0);
+
+ assert_wait(&vm_page_free_wanted, FALSE);
+ simple_unlock(&vm_page_queue_free_lock);
+ counter(c_vm_pageout_block++);
+ thread_block(vm_pageout_continue);
+ }
+}
+
+void vm_pageout()
+{
+ int free_after_reserve;
+
+ current_thread()->vm_privilege = TRUE;
+ stack_privilege(current_thread());
+
+ /*
+ * Initialize some paging parameters.
+ */
+
+ if (vm_pageout_burst_max == 0)
+ vm_pageout_burst_max = VM_PAGEOUT_BURST_MAX;
+
+ if (vm_pageout_burst_min == 0)
+ vm_pageout_burst_min = VM_PAGEOUT_BURST_MIN;
+
+ if (vm_pageout_burst_wait == 0)
+ vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
+
+ if (vm_pageout_empty_wait == 0)
+ vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
+
+ if (vm_page_free_reserved == 0)
+ vm_page_free_reserved = VM_PAGE_FREE_RESERVED;
+
+ if (vm_pageout_pause_max == 0)
+ vm_pageout_pause_max = VM_PAGEOUT_PAUSE_MAX;
+
+ if (vm_pageout_reserved_internal == 0)
+ vm_pageout_reserved_internal =
+ VM_PAGEOUT_RESERVED_INTERNAL(vm_page_free_reserved);
+
+ if (vm_pageout_reserved_really == 0)
+ vm_pageout_reserved_really =
+ VM_PAGEOUT_RESERVED_REALLY(vm_page_free_reserved);
+
+ free_after_reserve = vm_page_free_count - vm_page_free_reserved;
+
+ if (vm_page_free_min == 0)
+ vm_page_free_min = vm_page_free_reserved +
+ VM_PAGE_FREE_MIN(free_after_reserve);
+
+ if (vm_page_free_target == 0)
+ vm_page_free_target = vm_page_free_reserved +
+ VM_PAGE_FREE_TARGET(free_after_reserve);
+
+ if (vm_page_free_target < vm_page_free_min + 5)
+ vm_page_free_target = vm_page_free_min + 5;
+
+ /*
+ * vm_pageout_scan will set vm_page_inactive_target.
+ */
+
+ vm_pageout_continue();
+ /*NOTREACHED*/
+}
diff --git a/vm/vm_pageout.h b/vm/vm_pageout.h
new file mode 100644
index 00000000..5b47a5e0
--- /dev/null
+++ b/vm/vm_pageout.h
@@ -0,0 +1,46 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_pageout.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1986
+ *
+ * Declarations for the pageout daemon interface.
+ */
+
+#ifndef _VM_VM_PAGEOUT_H_
+#define _VM_VM_PAGEOUT_H_
+
+#include <vm/vm_page.h>
+
+/*
+ * Exported routines.
+ */
+
+extern vm_page_t vm_pageout_setup();
+extern void vm_pageout_page();
+
+#endif _VM_VM_PAGEOUT_H_
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
new file mode 100644
index 00000000..5c4f2822
--- /dev/null
+++ b/vm/vm_resident.c
@@ -0,0 +1,1505 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_page.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * Resident memory management module.
+ */
+#include <cpus.h>
+
+#include <mach/vm_prot.h>
+#include <kern/counters.h>
+#include <kern/sched_prim.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <mach/vm_statistics.h>
+#include "vm_param.h"
+#include <kern/xpr.h>
+#include <kern/zalloc.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_kern.h>
+
+#include <mach_vm_debug.h>
+#if MACH_VM_DEBUG
+#include <mach/kern_return.h>
+#include <mach_debug/hash_info.h>
+#include <vm/vm_user.h>
+#endif
+
+/* in zalloc.c XXX */
+extern vm_offset_t zdata;
+extern vm_size_t zdata_size;
+
+/*
+ * Associated with eacn page of user-allocatable memory is a
+ * page structure.
+ */
+
+/*
+ * These variables record the values returned by vm_page_bootstrap,
+ * for debugging purposes. The implementation of pmap_steal_memory
+ * and pmap_startup here also uses them internally.
+ */
+
+vm_offset_t virtual_space_start;
+vm_offset_t virtual_space_end;
+
+/*
+ * The vm_page_lookup() routine, which provides for fast
+ * (virtual memory object, offset) to page lookup, employs
+ * the following hash table. The vm_page_{insert,remove}
+ * routines install and remove associations in the table.
+ * [This table is often called the virtual-to-physical,
+ * or VP, table.]
+ */
+typedef struct {
+ decl_simple_lock_data(,lock)
+ vm_page_t pages;
+} vm_page_bucket_t;
+
+vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
+unsigned int vm_page_bucket_count = 0; /* How big is array? */
+unsigned int vm_page_hash_mask; /* Mask for hash function */
+
+/*
+ * Resident page structures are initialized from
+ * a template (see vm_page_alloc).
+ *
+ * When adding a new field to the virtual memory
+ * object structure, be sure to add initialization
+ * (see vm_page_bootstrap).
+ */
+struct vm_page vm_page_template;
+
+/*
+ * Resident pages that represent real memory
+ * are allocated from a free list.
+ */
+vm_page_t vm_page_queue_free;
+vm_page_t vm_page_queue_fictitious;
+decl_simple_lock_data(,vm_page_queue_free_lock)
+unsigned int vm_page_free_wanted;
+int vm_page_free_count;
+int vm_page_fictitious_count;
+
+unsigned int vm_page_free_count_minimum; /* debugging */
+
+/*
+ * Occasionally, the virtual memory system uses
+ * resident page structures that do not refer to
+ * real pages, for example to leave a page with
+ * important state information in the VP table.
+ *
+ * These page structures are allocated the way
+ * most other kernel structures are.
+ */
+zone_t vm_page_zone;
+
+/*
+ * Fictitious pages don't have a physical address,
+ * but we must initialize phys_addr to something.
+ * For debugging, this should be a strange value
+ * that the pmap module can recognize in assertions.
+ */
+vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1;
+
+/*
+ * Resident page structures are also chained on
+ * queues that are used by the page replacement
+ * system (pageout daemon). These queues are
+ * defined here, but are shared by the pageout
+ * module.
+ */
+queue_head_t vm_page_queue_active;
+queue_head_t vm_page_queue_inactive;
+decl_simple_lock_data(,vm_page_queue_lock)
+int vm_page_active_count;
+int vm_page_inactive_count;
+int vm_page_wire_count;
+
+/*
+ * Several page replacement parameters are also
+ * shared with this module, so that page allocation
+ * (done here in vm_page_alloc) can trigger the
+ * pageout daemon.
+ */
+int vm_page_free_target = 0;
+int vm_page_free_min = 0;
+int vm_page_inactive_target = 0;
+int vm_page_free_reserved = 0;
+int vm_page_laundry_count = 0;
+
+/*
+ * The VM system has a couple of heuristics for deciding
+ * that pages are "uninteresting" and should be placed
+ * on the inactive queue as likely candidates for replacement.
+ * These variables let the heuristics be controlled at run-time
+ * to make experimentation easier.
+ */
+
+boolean_t vm_page_deactivate_behind = TRUE;
+boolean_t vm_page_deactivate_hint = TRUE;
+
+/*
+ * vm_page_bootstrap:
+ *
+ * Initializes the resident memory module.
+ *
+ * Allocates memory for the page cells, and
+ * for the object/offset-to-page hash table headers.
+ * Each page cell is initialized and placed on the free list.
+ * Returns the range of available kernel virtual memory.
+ */
+
+void vm_page_bootstrap(
+ vm_offset_t *startp,
+ vm_offset_t *endp)
+{
+ register vm_page_t m;
+ int i;
+
+ /*
+ * Initialize the vm_page template.
+ */
+
+ m = &vm_page_template;
+ m->object = VM_OBJECT_NULL; /* reset later */
+ m->offset = 0; /* reset later */
+ m->wire_count = 0;
+
+ m->inactive = FALSE;
+ m->active = FALSE;
+ m->laundry = FALSE;
+ m->free = FALSE;
+
+ m->busy = TRUE;
+ m->wanted = FALSE;
+ m->tabled = FALSE;
+ m->fictitious = FALSE;
+ m->private = FALSE;
+ m->absent = FALSE;
+ m->error = FALSE;
+ m->dirty = FALSE;
+ m->precious = FALSE;
+ m->reference = FALSE;
+
+ m->phys_addr = 0; /* reset later */
+
+ m->page_lock = VM_PROT_NONE;
+ m->unlock_request = VM_PROT_NONE;
+
+ /*
+ * Initialize the page queues.
+ */
+
+ simple_lock_init(&vm_page_queue_free_lock);
+ simple_lock_init(&vm_page_queue_lock);
+
+ vm_page_queue_free = VM_PAGE_NULL;
+ vm_page_queue_fictitious = VM_PAGE_NULL;
+ queue_init(&vm_page_queue_active);
+ queue_init(&vm_page_queue_inactive);
+
+ vm_page_free_wanted = 0;
+
+ /*
+ * Steal memory for the zone system.
+ */
+
+ kentry_data_size = kentry_count * sizeof(struct vm_map_entry);
+ kentry_data = pmap_steal_memory(kentry_data_size);
+
+ zdata = pmap_steal_memory(zdata_size);
+
+ /*
+ * Allocate (and initialize) the virtual-to-physical
+ * table hash buckets.
+ *
+ * The number of buckets should be a power of two to
+ * get a good hash function. The following computation
+ * chooses the first power of two that is greater
+ * than the number of physical pages in the system.
+ */
+
+ if (vm_page_bucket_count == 0) {
+ unsigned int npages = pmap_free_pages();
+
+ vm_page_bucket_count = 1;
+ while (vm_page_bucket_count < npages)
+ vm_page_bucket_count <<= 1;
+ }
+
+ vm_page_hash_mask = vm_page_bucket_count - 1;
+
+ if (vm_page_hash_mask & vm_page_bucket_count)
+ printf("vm_page_bootstrap: WARNING -- strange page hash\n");
+
+ vm_page_buckets = (vm_page_bucket_t *)
+ pmap_steal_memory(vm_page_bucket_count *
+ sizeof(vm_page_bucket_t));
+
+ for (i = 0; i < vm_page_bucket_count; i++) {
+ register vm_page_bucket_t *bucket = &vm_page_buckets[i];
+
+ bucket->pages = VM_PAGE_NULL;
+ simple_lock_init(&bucket->lock);
+ }
+
+ /*
+ * Machine-dependent code allocates the resident page table.
+ * It uses vm_page_init to initialize the page frames.
+ * The code also returns to us the virtual space available
+ * to the kernel. We don't trust the pmap module
+ * to get the alignment right.
+ */
+
+ pmap_startup(&virtual_space_start, &virtual_space_end);
+ virtual_space_start = round_page(virtual_space_start);
+ virtual_space_end = trunc_page(virtual_space_end);
+
+ *startp = virtual_space_start;
+ *endp = virtual_space_end;
+
+ printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count);
+ vm_page_free_count_minimum = vm_page_free_count;
+}
+
+#ifndef MACHINE_PAGES
+/*
+ * We implement pmap_steal_memory and pmap_startup with the help
+ * of two simpler functions, pmap_virtual_space and pmap_next_page.
+ */
+
+vm_offset_t pmap_steal_memory(
+ vm_size_t size)
+{
+ vm_offset_t addr, vaddr, paddr;
+
+ /*
+ * We round the size to an integer multiple.
+ */
+
+ size = (size + 3) &~ 3;
+
+ /*
+ * If this is the first call to pmap_steal_memory,
+ * we have to initialize ourself.
+ */
+
+ if (virtual_space_start == virtual_space_end) {
+ pmap_virtual_space(&virtual_space_start, &virtual_space_end);
+
+ /*
+ * The initial values must be aligned properly, and
+ * we don't trust the pmap module to do it right.
+ */
+
+ virtual_space_start = round_page(virtual_space_start);
+ virtual_space_end = trunc_page(virtual_space_end);
+ }
+
+ /*
+ * Allocate virtual memory for this request.
+ */
+
+ addr = virtual_space_start;
+ virtual_space_start += size;
+
+ /*
+ * Allocate and map physical pages to back new virtual pages.
+ */
+
+ for (vaddr = round_page(addr);
+ vaddr < addr + size;
+ vaddr += PAGE_SIZE) {
+ if (!pmap_next_page(&paddr))
+ panic("pmap_steal_memory");
+
+ /*
+ * XXX Logically, these mappings should be wired,
+ * but some pmap modules barf if they are.
+ */
+
+ pmap_enter(kernel_pmap, vaddr, paddr,
+ VM_PROT_READ|VM_PROT_WRITE, FALSE);
+ }
+
+ return addr;
+}
+
+void pmap_startup(
+ vm_offset_t *startp,
+ vm_offset_t *endp)
+{
+ unsigned int i, npages, pages_initialized;
+ vm_page_t pages;
+ vm_offset_t paddr;
+
+ /*
+ * We calculate how many page frames we will have
+ * and then allocate the page structures in one chunk.
+ */
+
+ npages = ((PAGE_SIZE * pmap_free_pages() +
+ (round_page(virtual_space_start) - virtual_space_start)) /
+ (PAGE_SIZE + sizeof *pages));
+
+ pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages);
+
+ /*
+ * Initialize the page frames.
+ */
+
+ for (i = 0, pages_initialized = 0; i < npages; i++) {
+ if (!pmap_next_page(&paddr))
+ break;
+
+ vm_page_init(&pages[i], paddr);
+ pages_initialized++;
+ }
+
+ /*
+ * Release pages in reverse order so that physical pages
+ * initially get allocated in ascending addresses. This keeps
+ * the devices (which must address physical memory) happy if
+ * they require several consecutive pages.
+ */
+
+ for (i = pages_initialized; i > 0; i--) {
+ vm_page_release(&pages[i - 1]);
+ }
+
+ /*
+ * We have to re-align virtual_space_start,
+ * because pmap_steal_memory has been using it.
+ */
+
+ virtual_space_start = round_page(virtual_space_start);
+
+ *startp = virtual_space_start;
+ *endp = virtual_space_end;
+}
+#endif /* MACHINE_PAGES */
+
+/*
+ * Routine: vm_page_module_init
+ * Purpose:
+ * Second initialization pass, to be done after
+ * the basic VM system is ready.
+ */
+void vm_page_module_init(void)
+{
+ vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page),
+ VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
+ PAGE_SIZE,
+ 0, "vm pages");
+}
+
+/*
+ * Routine: vm_page_create
+ * Purpose:
+ * After the VM system is up, machine-dependent code
+ * may stumble across more physical memory. For example,
+ * memory that it was reserving for a frame buffer.
+ * vm_page_create turns this memory into available pages.
+ */
+
+void vm_page_create(
+ vm_offset_t start,
+ vm_offset_t end)
+{
+ vm_offset_t paddr;
+ vm_page_t m;
+
+ for (paddr = round_page(start);
+ paddr < trunc_page(end);
+ paddr += PAGE_SIZE) {
+ m = (vm_page_t) zalloc(vm_page_zone);
+ if (m == VM_PAGE_NULL)
+ panic("vm_page_create");
+
+ vm_page_init(m, paddr);
+ vm_page_release(m);
+ }
+}
+
+/*
+ * vm_page_hash:
+ *
+ * Distributes the object/offset key pair among hash buckets.
+ *
+ * NOTE: To get a good hash function, the bucket count should
+ * be a power of two.
+ */
+#define vm_page_hash(object, offset) \
+ (((unsigned int)(vm_offset_t)object + (unsigned int)atop(offset)) \
+ & vm_page_hash_mask)
+
+/*
+ * vm_page_insert: [ internal use only ]
+ *
+ * Inserts the given mem entry into the object/object-page
+ * table and object list.
+ *
+ * The object and page must be locked.
+ */
+
+void vm_page_insert(
+ register vm_page_t mem,
+ register vm_object_t object,
+ register vm_offset_t offset)
+{
+ register vm_page_bucket_t *bucket;
+
+ VM_PAGE_CHECK(mem);
+
+ if (mem->tabled)
+ panic("vm_page_insert");
+
+ /*
+ * Record the object/offset pair in this page
+ */
+
+ mem->object = object;
+ mem->offset = offset;
+
+ /*
+ * Insert it into the object_object/offset hash table
+ */
+
+ bucket = &vm_page_buckets[vm_page_hash(object, offset)];
+ simple_lock(&bucket->lock);
+ mem->next = bucket->pages;
+ bucket->pages = mem;
+ simple_unlock(&bucket->lock);
+
+ /*
+ * Now link into the object's list of backed pages.
+ */
+
+ queue_enter(&object->memq, mem, vm_page_t, listq);
+ mem->tabled = TRUE;
+
+ /*
+ * Show that the object has one more resident page.
+ */
+
+ object->resident_page_count++;
+
+ /*
+ * Detect sequential access and inactivate previous page.
+ * We ignore busy pages.
+ */
+
+ if (vm_page_deactivate_behind &&
+ (offset == object->last_alloc + PAGE_SIZE)) {
+ vm_page_t last_mem;
+
+ last_mem = vm_page_lookup(object, object->last_alloc);
+ if ((last_mem != VM_PAGE_NULL) && !last_mem->busy)
+ vm_page_deactivate(last_mem);
+ }
+ object->last_alloc = offset;
+}
+
+/*
+ * vm_page_replace:
+ *
+ * Exactly like vm_page_insert, except that we first
+ * remove any existing page at the given offset in object
+ * and we don't do deactivate-behind.
+ *
+ * The object and page must be locked.
+ */
+
+void vm_page_replace(
+ register vm_page_t mem,
+ register vm_object_t object,
+ register vm_offset_t offset)
+{
+ register vm_page_bucket_t *bucket;
+
+ VM_PAGE_CHECK(mem);
+
+ if (mem->tabled)
+ panic("vm_page_replace");
+
+ /*
+ * Record the object/offset pair in this page
+ */
+
+ mem->object = object;
+ mem->offset = offset;
+
+ /*
+ * Insert it into the object_object/offset hash table,
+ * replacing any page that might have been there.
+ */
+
+ bucket = &vm_page_buckets[vm_page_hash(object, offset)];
+ simple_lock(&bucket->lock);
+ if (bucket->pages) {
+ vm_page_t *mp = &bucket->pages;
+ register vm_page_t m = *mp;
+ do {
+ if (m->object == object && m->offset == offset) {
+ /*
+ * Remove page from bucket and from object,
+ * and return it to the free list.
+ */
+ *mp = m->next;
+ queue_remove(&object->memq, m, vm_page_t,
+ listq);
+ m->tabled = FALSE;
+ object->resident_page_count--;
+
+ /*
+ * Return page to the free list.
+ * Note the page is not tabled now, so this
+ * won't self-deadlock on the bucket lock.
+ */
+
+ vm_page_free(m);
+ break;
+ }
+ mp = &m->next;
+ } while ((m = *mp) != 0);
+ mem->next = bucket->pages;
+ } else {
+ mem->next = VM_PAGE_NULL;
+ }
+ bucket->pages = mem;
+ simple_unlock(&bucket->lock);
+
+ /*
+ * Now link into the object's list of backed pages.
+ */
+
+ queue_enter(&object->memq, mem, vm_page_t, listq);
+ mem->tabled = TRUE;
+
+ /*
+ * And show that the object has one more resident
+ * page.
+ */
+
+ object->resident_page_count++;
+}
+
+/*
+ * vm_page_remove: [ internal use only ]
+ *
+ * Removes the given mem entry from the object/offset-page
+ * table and the object page list.
+ *
+ * The object and page must be locked.
+ */
+
+void vm_page_remove(
+ register vm_page_t mem)
+{
+ register vm_page_bucket_t *bucket;
+ register vm_page_t this;
+
+ assert(mem->tabled);
+ VM_PAGE_CHECK(mem);
+
+ /*
+ * Remove from the object_object/offset hash table
+ */
+
+ bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
+ simple_lock(&bucket->lock);
+ if ((this = bucket->pages) == mem) {
+ /* optimize for common case */
+
+ bucket->pages = mem->next;
+ } else {
+ register vm_page_t *prev;
+
+ for (prev = &this->next;
+ (this = *prev) != mem;
+ prev = &this->next)
+ continue;
+ *prev = this->next;
+ }
+ simple_unlock(&bucket->lock);
+
+ /*
+ * Now remove from the object's list of backed pages.
+ */
+
+ queue_remove(&mem->object->memq, mem, vm_page_t, listq);
+
+ /*
+ * And show that the object has one fewer resident
+ * page.
+ */
+
+ mem->object->resident_page_count--;
+
+ mem->tabled = FALSE;
+}
+
+/*
+ * vm_page_lookup:
+ *
+ * Returns the page associated with the object/offset
+ * pair specified; if none is found, VM_PAGE_NULL is returned.
+ *
+ * The object must be locked. No side effects.
+ */
+
+vm_page_t vm_page_lookup(
+ register vm_object_t object,
+ register vm_offset_t offset)
+{
+ register vm_page_t mem;
+ register vm_page_bucket_t *bucket;
+
+ /*
+ * Search the hash table for this object/offset pair
+ */
+
+ bucket = &vm_page_buckets[vm_page_hash(object, offset)];
+
+ simple_lock(&bucket->lock);
+ for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
+ VM_PAGE_CHECK(mem);
+ if ((mem->object == object) && (mem->offset == offset))
+ break;
+ }
+ simple_unlock(&bucket->lock);
+ return mem;
+}
+
+/*
+ * vm_page_rename:
+ *
+ * Move the given memory entry from its
+ * current object to the specified target object/offset.
+ *
+ * The object must be locked.
+ */
+void vm_page_rename(
+ register vm_page_t mem,
+ register vm_object_t new_object,
+ vm_offset_t new_offset)
+{
+ /*
+ * Changes to mem->object require the page lock because
+ * the pageout daemon uses that lock to get the object.
+ */
+
+ vm_page_lock_queues();
+ vm_page_remove(mem);
+ vm_page_insert(mem, new_object, new_offset);
+ vm_page_unlock_queues();
+}
+
+/*
+ * vm_page_init:
+ *
+ * Initialize the fields in a new page.
+ * This takes a structure with random values and initializes it
+ * so that it can be given to vm_page_release or vm_page_insert.
+ */
+void vm_page_init(
+ vm_page_t mem,
+ vm_offset_t phys_addr)
+{
+ *mem = vm_page_template;
+ mem->phys_addr = phys_addr;
+}
+
+/*
+ * vm_page_grab_fictitious:
+ *
+ * Remove a fictitious page from the free list.
+ * Returns VM_PAGE_NULL if there are no free pages.
+ */
+
+vm_page_t vm_page_grab_fictitious(void)
+{
+ register vm_page_t m;
+
+ simple_lock(&vm_page_queue_free_lock);
+ m = vm_page_queue_fictitious;
+ if (m != VM_PAGE_NULL) {
+ vm_page_fictitious_count--;
+ vm_page_queue_fictitious = (vm_page_t) m->pageq.next;
+ m->free = FALSE;
+ }
+ simple_unlock(&vm_page_queue_free_lock);
+
+ return m;
+}
+
+/*
+ * vm_page_release_fictitious:
+ *
+ * Release a fictitious page to the free list.
+ */
+
+void vm_page_release_fictitious(
+ register vm_page_t m)
+{
+ simple_lock(&vm_page_queue_free_lock);
+ if (m->free)
+ panic("vm_page_release_fictitious");
+ m->free = TRUE;
+ m->pageq.next = (queue_entry_t) vm_page_queue_fictitious;
+ vm_page_queue_fictitious = m;
+ vm_page_fictitious_count++;
+ simple_unlock(&vm_page_queue_free_lock);
+}
+
+/*
+ * vm_page_more_fictitious:
+ *
+ * Add more fictitious pages to the free list.
+ * Allowed to block.
+ */
+
+int vm_page_fictitious_quantum = 5;
+
+void vm_page_more_fictitious(void)
+{
+ register vm_page_t m;
+ int i;
+
+ for (i = 0; i < vm_page_fictitious_quantum; i++) {
+ m = (vm_page_t) zalloc(vm_page_zone);
+ if (m == VM_PAGE_NULL)
+ panic("vm_page_more_fictitious");
+
+ vm_page_init(m, vm_page_fictitious_addr);
+ m->fictitious = TRUE;
+ vm_page_release_fictitious(m);
+ }
+}
+
+/*
+ * vm_page_convert:
+ *
+ * Attempt to convert a fictitious page into a real page.
+ */
+
+boolean_t vm_page_convert(
+ register vm_page_t m)
+{
+ register vm_page_t real_m;
+
+ real_m = vm_page_grab();
+ if (real_m == VM_PAGE_NULL)
+ return FALSE;
+
+ m->phys_addr = real_m->phys_addr;
+ m->fictitious = FALSE;
+
+ real_m->phys_addr = vm_page_fictitious_addr;
+ real_m->fictitious = TRUE;
+
+ vm_page_release_fictitious(real_m);
+ return TRUE;
+}
+
+/*
+ * vm_page_grab:
+ *
+ * Remove a page from the free list.
+ * Returns VM_PAGE_NULL if the free list is too small.
+ */
+
+vm_page_t vm_page_grab(void)
+{
+ register vm_page_t mem;
+
+ simple_lock(&vm_page_queue_free_lock);
+
+ /*
+ * Only let privileged threads (involved in pageout)
+ * dip into the reserved pool.
+ */
+
+ if ((vm_page_free_count < vm_page_free_reserved) &&
+ !current_thread()->vm_privilege) {
+ simple_unlock(&vm_page_queue_free_lock);
+ return VM_PAGE_NULL;
+ }
+
+ if (vm_page_queue_free == VM_PAGE_NULL)
+ panic("vm_page_grab");
+
+ if (--vm_page_free_count < vm_page_free_count_minimum)
+ vm_page_free_count_minimum = vm_page_free_count;
+ mem = vm_page_queue_free;
+ vm_page_queue_free = (vm_page_t) mem->pageq.next;
+ mem->free = FALSE;
+ simple_unlock(&vm_page_queue_free_lock);
+
+ /*
+ * Decide if we should poke the pageout daemon.
+ * We do this if the free count is less than the low
+ * water mark, or if the free count is less than the high
+ * water mark (but above the low water mark) and the inactive
+ * count is less than its target.
+ *
+ * We don't have the counts locked ... if they change a little,
+ * it doesn't really matter.
+ */
+
+ if ((vm_page_free_count < vm_page_free_min) ||
+ ((vm_page_free_count < vm_page_free_target) &&
+ (vm_page_inactive_count < vm_page_inactive_target)))
+ thread_wakeup((event_t) &vm_page_free_wanted);
+
+ return mem;
+}
+
+vm_offset_t vm_page_grab_phys_addr(void)
+{
+ vm_page_t p = vm_page_grab();
+ if (p == VM_PAGE_NULL)
+ return -1;
+ else
+ return p->phys_addr;
+}
+
+/*
+ * vm_page_grab_contiguous_pages:
+ *
+ * Take N pages off the free list, the pages should
+ * cover a contiguous range of physical addresses.
+ * [Used by device drivers to cope with DMA limitations]
+ *
+ * Returns the page descriptors in ascending order, or
+ * Returns KERN_RESOURCE_SHORTAGE if it could not.
+ */
+
+/* Biggest phys page number for the pages we handle in VM */
+
+vm_size_t vm_page_big_pagenum = 0; /* Set this before call! */
+
+kern_return_t
+vm_page_grab_contiguous_pages(
+ int npages,
+ vm_page_t pages[],
+ natural_t *bits)
+{
+ register int first_set;
+ int size, alloc_size;
+ kern_return_t ret;
+ vm_page_t mem, prevmem;
+
+#ifndef NBBY
+#define NBBY 8 /* size in bits of sizeof()`s unity */
+#endif
+
+#define NBPEL (sizeof(natural_t)*NBBY)
+
+ size = (vm_page_big_pagenum + NBPEL - 1)
+ & ~(NBPEL - 1); /* in bits */
+
+ size = size / NBBY; /* in bytes */
+
+ /*
+ * If we are called before the VM system is fully functional
+ * the invoker must provide us with the work space. [one bit
+ * per page starting at phys 0 and up to vm_page_big_pagenum]
+ */
+ if (bits == 0) {
+ alloc_size = round_page(size);
+ if (kmem_alloc_wired(kernel_map,
+ (vm_offset_t *)&bits,
+ alloc_size)
+ != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+ } else
+ alloc_size = 0;
+
+ bzero(bits, size);
+
+ /*
+ * A very large granularity call, its rare so that is ok
+ */
+ simple_lock(&vm_page_queue_free_lock);
+
+ /*
+ * Do not dip into the reserved pool.
+ */
+
+ if (vm_page_free_count < vm_page_free_reserved) {
+ simple_unlock(&vm_page_queue_free_lock);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /*
+ * First pass through, build a big bit-array of
+ * the pages that are free. It is not going to
+ * be too large anyways, in 4k we can fit info
+ * for 32k pages.
+ */
+ mem = vm_page_queue_free;
+ while (mem) {
+ register int word_index, bit_index;
+
+ bit_index = (mem->phys_addr >> PAGE_SHIFT);
+ word_index = bit_index / NBPEL;
+ bit_index = bit_index - (word_index * NBPEL);
+ bits[word_index] |= 1 << bit_index;
+
+ mem = (vm_page_t) mem->pageq.next;
+ }
+
+ /*
+ * Second loop. Scan the bit array for NPAGES
+ * contiguous bits. That gives us, if any,
+ * the range of pages we will be grabbing off
+ * the free list.
+ */
+ {
+ register int bits_so_far = 0, i;
+
+ first_set = 0;
+
+ for (i = 0; i < size; i += sizeof(natural_t)) {
+
+ register natural_t v = bits[i / sizeof(natural_t)];
+ register int bitpos;
+
+ /*
+ * Bitscan this one word
+ */
+ if (v) {
+ /*
+ * keep counting them beans ?
+ */
+ bitpos = 0;
+
+ if (bits_so_far) {
+count_ones:
+ while (v & 1) {
+ bitpos++;
+ /*
+ * got enough beans ?
+ */
+ if (++bits_so_far == npages)
+ goto found_em;
+ v >>= 1;
+ }
+ /* if we are being lucky, roll again */
+ if (bitpos == NBPEL)
+ continue;
+ }
+
+ /*
+ * search for beans here
+ */
+ bits_so_far = 0;
+count_zeroes:
+ while ((bitpos < NBPEL) && ((v & 1) == 0)) {
+ bitpos++;
+ v >>= 1;
+ }
+ if (v & 1) {
+ first_set = (i * NBBY) + bitpos;
+ goto count_ones;
+ }
+ }
+ /*
+ * No luck
+ */
+ bits_so_far = 0;
+ }
+ }
+
+ /*
+ * We could not find enough contiguous pages.
+ */
+not_found_em:
+ simple_unlock(&vm_page_queue_free_lock);
+
+ ret = KERN_RESOURCE_SHORTAGE;
+ goto out;
+
+ /*
+ * Final pass. Now we know which pages we want.
+ * Scan the list until we find them all, grab
+ * pages as we go. FIRST_SET tells us where
+ * in the bit-array our pages start.
+ */
+found_em:
+ vm_page_free_count -= npages;
+ if (vm_page_free_count < vm_page_free_count_minimum)
+ vm_page_free_count_minimum = vm_page_free_count;
+
+ {
+ register vm_offset_t first_phys, last_phys;
+
+ /* cache values for compare */
+ first_phys = first_set << PAGE_SHIFT;
+ last_phys = first_phys + (npages << PAGE_SHIFT);/* not included */
+
+ /* running pointers */
+ mem = vm_page_queue_free;
+ prevmem = VM_PAGE_NULL;
+
+ while (mem) {
+
+ register vm_offset_t addr;
+
+ addr = mem->phys_addr;
+
+ if ((addr >= first_phys) &&
+ (addr < last_phys)) {
+ if (prevmem)
+ prevmem->pageq.next = mem->pageq.next;
+ pages[(addr - first_phys) >> PAGE_SHIFT] = mem;
+ mem->free = FALSE;
+ /*
+ * Got them all ?
+ */
+ if (--npages == 0) break;
+ } else
+ prevmem = mem;
+
+ mem = (vm_page_t) mem->pageq.next;
+ }
+ }
+
+ simple_unlock(&vm_page_queue_free_lock);
+
+ /*
+ * Decide if we should poke the pageout daemon.
+ * We do this if the free count is less than the low
+ * water mark, or if the free count is less than the high
+ * water mark (but above the low water mark) and the inactive
+ * count is less than its target.
+ *
+ * We don't have the counts locked ... if they change a little,
+ * it doesn't really matter.
+ */
+
+ if ((vm_page_free_count < vm_page_free_min) ||
+ ((vm_page_free_count < vm_page_free_target) &&
+ (vm_page_inactive_count < vm_page_inactive_target)))
+ thread_wakeup(&vm_page_free_wanted);
+
+ ret = KERN_SUCCESS;
+out:
+ if (alloc_size)
+ kmem_free(kernel_map, (vm_offset_t) bits, alloc_size);
+
+ return ret;
+}
+
+/*
+ * vm_page_release:
+ *
+ * Return a page to the free list.
+ */
+
+void vm_page_release(
+ register vm_page_t mem)
+{
+ simple_lock(&vm_page_queue_free_lock);
+ if (mem->free)
+ panic("vm_page_release");
+ mem->free = TRUE;
+ mem->pageq.next = (queue_entry_t) vm_page_queue_free;
+ vm_page_queue_free = mem;
+ vm_page_free_count++;
+
+ /*
+ * Check if we should wake up someone waiting for page.
+ * But don't bother waking them unless they can allocate.
+ *
+ * We wakeup only one thread, to prevent starvation.
+ * Because the scheduling system handles wait queues FIFO,
+ * if we wakeup all waiting threads, one greedy thread
+ * can starve multiple niceguy threads. When the threads
+ * all wakeup, the greedy threads runs first, grabs the page,
+ * and waits for another page. It will be the first to run
+ * when the next page is freed.
+ *
+ * However, there is a slight danger here.
+ * The thread we wake might not use the free page.
+ * Then the other threads could wait indefinitely
+ * while the page goes unused. To forestall this,
+ * the pageout daemon will keep making free pages
+ * as long as vm_page_free_wanted is non-zero.
+ */
+
+ if ((vm_page_free_wanted > 0) &&
+ (vm_page_free_count >= vm_page_free_reserved)) {
+ vm_page_free_wanted--;
+ thread_wakeup_one((event_t) &vm_page_free_count);
+ }
+
+ simple_unlock(&vm_page_queue_free_lock);
+}
+
+/*
+ * vm_page_wait:
+ *
+ * Wait for a page to become available.
+ * If there are plenty of free pages, then we don't sleep.
+ */
+
+void vm_page_wait(
+ void (*continuation)(void))
+{
+
+#ifndef CONTINUATIONS
+ assert (continuation == 0);
+#endif
+
+ /*
+ * We can't use vm_page_free_reserved to make this
+ * determination. Consider: some thread might
+ * need to allocate two pages. The first allocation
+ * succeeds, the second fails. After the first page is freed,
+ * a call to vm_page_wait must really block.
+ */
+
+ simple_lock(&vm_page_queue_free_lock);
+ if (vm_page_free_count < vm_page_free_target) {
+ if (vm_page_free_wanted++ == 0)
+ thread_wakeup((event_t)&vm_page_free_wanted);
+ assert_wait((event_t)&vm_page_free_count, FALSE);
+ simple_unlock(&vm_page_queue_free_lock);
+ if (continuation != 0) {
+ counter(c_vm_page_wait_block_user++);
+ thread_block(continuation);
+ } else {
+ counter(c_vm_page_wait_block_kernel++);
+ thread_block((void (*)(void)) 0);
+ }
+ } else
+ simple_unlock(&vm_page_queue_free_lock);
+}
+
+/*
+ * vm_page_alloc:
+ *
+ * Allocate and return a memory cell associated
+ * with this VM object/offset pair.
+ *
+ * Object must be locked.
+ */
+
+vm_page_t vm_page_alloc(
+ vm_object_t object,
+ vm_offset_t offset)
+{
+ register vm_page_t mem;
+
+ mem = vm_page_grab();
+ if (mem == VM_PAGE_NULL)
+ return VM_PAGE_NULL;
+
+ vm_page_lock_queues();
+ vm_page_insert(mem, object, offset);
+ vm_page_unlock_queues();
+
+ return mem;
+}
+
+/*
+ * vm_page_free:
+ *
+ * Returns the given page to the free list,
+ * disassociating it with any VM object.
+ *
+ * Object and page queues must be locked prior to entry.
+ */
+void vm_page_free(
+ register vm_page_t mem)
+{
+ if (mem->free)
+ panic("vm_page_free");
+
+ if (mem->tabled)
+ vm_page_remove(mem);
+ VM_PAGE_QUEUES_REMOVE(mem);
+
+ if (mem->wire_count != 0) {
+ if (!mem->private && !mem->fictitious)
+ vm_page_wire_count--;
+ mem->wire_count = 0;
+ }
+
+ if (mem->laundry) {
+ vm_page_laundry_count--;
+ mem->laundry = FALSE;
+ }
+
+ PAGE_WAKEUP_DONE(mem);
+
+ if (mem->absent)
+ vm_object_absent_release(mem->object);
+
+ /*
+ * XXX The calls to vm_page_init here are
+ * really overkill.
+ */
+
+ if (mem->private || mem->fictitious) {
+ vm_page_init(mem, vm_page_fictitious_addr);
+ mem->fictitious = TRUE;
+ vm_page_release_fictitious(mem);
+ } else {
+ vm_page_init(mem, mem->phys_addr);
+ vm_page_release(mem);
+ }
+}
+
+/*
+ * vm_page_wire:
+ *
+ * Mark this page as wired down by yet
+ * another map, removing it from paging queues
+ * as necessary.
+ *
+ * The page's object and the page queues must be locked.
+ */
+void vm_page_wire(
+ register vm_page_t mem)
+{
+ VM_PAGE_CHECK(mem);
+
+ if (mem->wire_count == 0) {
+ VM_PAGE_QUEUES_REMOVE(mem);
+ if (!mem->private && !mem->fictitious)
+ vm_page_wire_count++;
+ }
+ mem->wire_count++;
+}
+
+/*
+ * vm_page_unwire:
+ *
+ * Release one wiring of this page, potentially
+ * enabling it to be paged again.
+ *
+ * The page's object and the page queues must be locked.
+ */
+void vm_page_unwire(
+ register vm_page_t mem)
+{
+ VM_PAGE_CHECK(mem);
+
+ if (--mem->wire_count == 0) {
+ queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
+ vm_page_active_count++;
+ mem->active = TRUE;
+ if (!mem->private && !mem->fictitious)
+ vm_page_wire_count--;
+ }
+}
+
+/*
+ * vm_page_deactivate:
+ *
+ * Returns the given page to the inactive list,
+ * indicating that no physical maps have access
+ * to this page. [Used by the physical mapping system.]
+ *
+ * The page queues must be locked.
+ */
+void vm_page_deactivate(
+ register vm_page_t m)
+{
+ VM_PAGE_CHECK(m);
+
+ /*
+ * This page is no longer very interesting. If it was
+ * interesting (active or inactive/referenced), then we
+ * clear the reference bit and (re)enter it in the
+ * inactive queue. Note wired pages should not have
+ * their reference bit cleared.
+ */
+
+ if (m->active || (m->inactive && m->reference)) {
+ if (!m->fictitious && !m->absent)
+ pmap_clear_reference(m->phys_addr);
+ m->reference = FALSE;
+ VM_PAGE_QUEUES_REMOVE(m);
+ }
+ if (m->wire_count == 0 && !m->inactive) {
+ queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
+ m->inactive = TRUE;
+ vm_page_inactive_count++;
+ }
+}
+
+/*
+ * vm_page_activate:
+ *
+ * Put the specified page on the active list (if appropriate).
+ *
+ * The page queues must be locked.
+ */
+
+void vm_page_activate(
+ register vm_page_t m)
+{
+ VM_PAGE_CHECK(m);
+
+ if (m->inactive) {
+ queue_remove(&vm_page_queue_inactive, m, vm_page_t,
+ pageq);
+ vm_page_inactive_count--;
+ m->inactive = FALSE;
+ }
+ if (m->wire_count == 0) {
+ if (m->active)
+ panic("vm_page_activate: already active");
+
+ queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
+ m->active = TRUE;
+ vm_page_active_count++;
+ }
+}
+
+/*
+ * vm_page_zero_fill:
+ *
+ * Zero-fill the specified page.
+ */
+void vm_page_zero_fill(
+ vm_page_t m)
+{
+ VM_PAGE_CHECK(m);
+
+ pmap_zero_page(m->phys_addr);
+}
+
+/*
+ * vm_page_copy:
+ *
+ * Copy one page to another
+ */
+
+void vm_page_copy(
+ vm_page_t src_m,
+ vm_page_t dest_m)
+{
+ VM_PAGE_CHECK(src_m);
+ VM_PAGE_CHECK(dest_m);
+
+ pmap_copy_page(src_m->phys_addr, dest_m->phys_addr);
+}
+
+#if MACH_VM_DEBUG
+/*
+ * Routine: vm_page_info
+ * Purpose:
+ * Return information about the global VP table.
+ * Fills the buffer with as much information as possible
+ * and returns the desired size of the buffer.
+ * Conditions:
+ * Nothing locked. The caller should provide
+ * possibly-pageable memory.
+ */
+
+unsigned int
+vm_page_info(
+ hash_info_bucket_t *info,
+ unsigned int count)
+{
+ int i;
+
+ if (vm_page_bucket_count < count)
+ count = vm_page_bucket_count;
+
+ for (i = 0; i < count; i++) {
+ vm_page_bucket_t *bucket = &vm_page_buckets[i];
+ unsigned int bucket_count = 0;
+ vm_page_t m;
+
+ simple_lock(&bucket->lock);
+ for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
+ bucket_count++;
+ simple_unlock(&bucket->lock);
+
+ /* don't touch pageable memory while holding locks */
+ info[i].hib_count = bucket_count;
+ }
+
+ return vm_page_bucket_count;
+}
+#endif /* MACH_VM_DEBUG */
+
+#include <mach_kdb.h>
+#if MACH_KDB
+#define printf kdbprintf
+
+/*
+ * Routine: vm_page_print [exported]
+ */
+void vm_page_print(p)
+ vm_page_t p;
+{
+ iprintf("Page 0x%X: object 0x%X,", (vm_offset_t) p, (vm_offset_t) p->object);
+ printf(" offset 0x%X", (vm_offset_t) p->offset);
+ printf("wire_count %d,", p->wire_count);
+ printf(" %s",
+ (p->active ? "active" : (p->inactive ? "inactive" : "loose")));
+ printf("%s",
+ (p->free ? " free" : ""));
+ printf("%s ",
+ (p->laundry ? " laundry" : ""));
+ printf("%s",
+ (p->dirty ? "dirty" : "clean"));
+ printf("%s",
+ (p->busy ? " busy" : ""));
+ printf("%s",
+ (p->absent ? " absent" : ""));
+ printf("%s",
+ (p->error ? " error" : ""));
+ printf("%s",
+ (p->fictitious ? " fictitious" : ""));
+ printf("%s",
+ (p->private ? " private" : ""));
+ printf("%s",
+ (p->wanted ? " wanted" : ""));
+ printf("%s,",
+ (p->tabled ? "" : "not_tabled"));
+ printf("phys_addr = 0x%X, lock = 0x%X, unlock_request = 0x%X\n",
+ (vm_offset_t) p->phys_addr,
+ (vm_offset_t) p->page_lock,
+ (vm_offset_t) p->unlock_request);
+}
+#endif /* MACH_KDB */
diff --git a/vm/vm_user.c b/vm/vm_user.c
new file mode 100644
index 00000000..ebe98449
--- /dev/null
+++ b/vm/vm_user.c
@@ -0,0 +1,397 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_user.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * User-exported virtual memory functions.
+ */
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/mach_types.h> /* to get vm_address_t */
+#include <mach/memory_object.h>
+#include <mach/std_types.h> /* to get pointer_t */
+#include <mach/vm_attributes.h>
+#include <mach/vm_param.h>
+#include <mach/vm_statistics.h>
+#include <kern/host.h>
+#include <kern/task.h>
+#include <vm/vm_fault.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+
+
+
+vm_statistics_data_t vm_stat;
+
+/*
+ * vm_allocate allocates "zero fill" memory in the specfied
+ * map.
+ */
+kern_return_t vm_allocate(map, addr, size, anywhere)
+ register vm_map_t map;
+ register vm_offset_t *addr;
+ register vm_size_t size;
+ boolean_t anywhere;
+{
+ kern_return_t result;
+
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+ if (size == 0) {
+ *addr = 0;
+ return(KERN_SUCCESS);
+ }
+
+ if (anywhere)
+ *addr = vm_map_min(map);
+ else
+ *addr = trunc_page(*addr);
+ size = round_page(size);
+
+ result = vm_map_enter(
+ map,
+ addr,
+ size,
+ (vm_offset_t)0,
+ anywhere,
+ VM_OBJECT_NULL,
+ (vm_offset_t)0,
+ FALSE,
+ VM_PROT_DEFAULT,
+ VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+
+ return(result);
+}
+
+/*
+ * vm_deallocate deallocates the specified range of addresses in the
+ * specified address map.
+ */
+kern_return_t vm_deallocate(map, start, size)
+ register vm_map_t map;
+ vm_offset_t start;
+ vm_size_t size;
+{
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ if (size == (vm_offset_t) 0)
+ return(KERN_SUCCESS);
+
+ return(vm_map_remove(map, trunc_page(start), round_page(start+size)));
+}
+
+/*
+ * vm_inherit sets the inheritance of the specified range in the
+ * specified map.
+ */
+kern_return_t vm_inherit(map, start, size, new_inheritance)
+ register vm_map_t map;
+ vm_offset_t start;
+ vm_size_t size;
+ vm_inherit_t new_inheritance;
+{
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ switch (new_inheritance) {
+ case VM_INHERIT_NONE:
+ case VM_INHERIT_COPY:
+ case VM_INHERIT_SHARE:
+ break;
+ default:
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ /*Check if range includes projected buffer;
+ user is not allowed direct manipulation in that case*/
+ if (projected_buffer_in_range(map, start, start+size))
+ return(KERN_INVALID_ARGUMENT);
+
+ return(vm_map_inherit(map,
+ trunc_page(start),
+ round_page(start+size),
+ new_inheritance));
+}
+
+/*
+ * vm_protect sets the protection of the specified range in the
+ * specified map.
+ */
+
+kern_return_t vm_protect(map, start, size, set_maximum, new_protection)
+ register vm_map_t map;
+ vm_offset_t start;
+ vm_size_t size;
+ boolean_t set_maximum;
+ vm_prot_t new_protection;
+{
+ if ((map == VM_MAP_NULL) ||
+ (new_protection & ~(VM_PROT_ALL|VM_PROT_NOTIFY)))
+ return(KERN_INVALID_ARGUMENT);
+
+ /*Check if range includes projected buffer;
+ user is not allowed direct manipulation in that case*/
+ if (projected_buffer_in_range(map, start, start+size))
+ return(KERN_INVALID_ARGUMENT);
+
+ return(vm_map_protect(map,
+ trunc_page(start),
+ round_page(start+size),
+ new_protection,
+ set_maximum));
+}
+
+kern_return_t vm_statistics(map, stat)
+ vm_map_t map;
+ vm_statistics_data_t *stat;
+{
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ *stat = vm_stat;
+
+ stat->pagesize = PAGE_SIZE;
+ stat->free_count = vm_page_free_count;
+ stat->active_count = vm_page_active_count;
+ stat->inactive_count = vm_page_inactive_count;
+ stat->wire_count = vm_page_wire_count;
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Handle machine-specific attributes for a mapping, such
+ * as cachability, migrability, etc.
+ */
+kern_return_t vm_machine_attribute(map, address, size, attribute, value)
+ vm_map_t map;
+ vm_address_t address;
+ vm_size_t size;
+ vm_machine_attribute_t attribute;
+ vm_machine_attribute_val_t* value; /* IN/OUT */
+{
+ extern kern_return_t vm_map_machine_attribute();
+
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ /*Check if range includes projected buffer;
+ user is not allowed direct manipulation in that case*/
+ if (projected_buffer_in_range(map, address, address+size))
+ return(KERN_INVALID_ARGUMENT);
+
+ return vm_map_machine_attribute(map, address, size, attribute, value);
+}
+
+kern_return_t vm_read(map, address, size, data, data_size)
+ vm_map_t map;
+ vm_address_t address;
+ vm_size_t size;
+ pointer_t *data;
+ vm_size_t *data_size;
+{
+ kern_return_t error;
+ vm_map_copy_t ipc_address;
+
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ if ((error = vm_map_copyin(map,
+ address,
+ size,
+ FALSE, /* src_destroy */
+ &ipc_address)) == KERN_SUCCESS) {
+ *data = (pointer_t) ipc_address;
+ *data_size = size;
+ }
+ return(error);
+}
+
+kern_return_t vm_write(map, address, data, size)
+ vm_map_t map;
+ vm_address_t address;
+ pointer_t data;
+ vm_size_t size;
+{
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ return vm_map_copy_overwrite(map, address, (vm_map_copy_t) data,
+ FALSE /* interruptible XXX */);
+}
+
+kern_return_t vm_copy(map, source_address, size, dest_address)
+ vm_map_t map;
+ vm_address_t source_address;
+ vm_size_t size;
+ vm_address_t dest_address;
+{
+ vm_map_copy_t copy;
+ kern_return_t kr;
+
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = vm_map_copyin(map, source_address, size,
+ FALSE, &copy);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ kr = vm_map_copy_overwrite(map, dest_address, copy,
+ FALSE /* interruptible XXX */);
+ if (kr != KERN_SUCCESS) {
+ vm_map_copy_discard(copy);
+ return kr;
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: vm_map
+ */
+kern_return_t vm_map(
+ target_map,
+ address, size, mask, anywhere,
+ memory_object, offset,
+ copy,
+ cur_protection, max_protection, inheritance)
+ vm_map_t target_map;
+ vm_offset_t *address;
+ vm_size_t size;
+ vm_offset_t mask;
+ boolean_t anywhere;
+ ipc_port_t memory_object;
+ vm_offset_t offset;
+ boolean_t copy;
+ vm_prot_t cur_protection;
+ vm_prot_t max_protection;
+ vm_inherit_t inheritance;
+{
+ register
+ vm_object_t object;
+ register
+ kern_return_t result;
+
+ if ((target_map == VM_MAP_NULL) ||
+ (cur_protection & ~VM_PROT_ALL) ||
+ (max_protection & ~VM_PROT_ALL))
+ return(KERN_INVALID_ARGUMENT);
+
+ switch (inheritance) {
+ case VM_INHERIT_NONE:
+ case VM_INHERIT_COPY:
+ case VM_INHERIT_SHARE:
+ break;
+ default:
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ *address = trunc_page(*address);
+ size = round_page(size);
+
+ if (!IP_VALID(memory_object)) {
+ object = VM_OBJECT_NULL;
+ offset = 0;
+ copy = FALSE;
+ } else if ((object = vm_object_enter(memory_object, size, FALSE))
+ == VM_OBJECT_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ /*
+ * Perform the copy if requested
+ */
+
+ if (copy) {
+ vm_object_t new_object;
+ vm_offset_t new_offset;
+
+ result = vm_object_copy_strategically(object, offset, size,
+ &new_object, &new_offset,
+ &copy);
+
+ /*
+ * Throw away the reference to the
+ * original object, as it won't be mapped.
+ */
+
+ vm_object_deallocate(object);
+
+ if (result != KERN_SUCCESS)
+ return (result);
+
+ object = new_object;
+ offset = new_offset;
+ }
+
+ if ((result = vm_map_enter(target_map,
+ address, size, mask, anywhere,
+ object, offset,
+ copy,
+ cur_protection, max_protection, inheritance
+ )) != KERN_SUCCESS)
+ vm_object_deallocate(object);
+ return(result);
+}
+
+/*
+ * Specify that the range of the virtual address space
+ * of the target task must not cause page faults for
+ * the indicated accesses.
+ *
+ * [ To unwire the pages, specify VM_PROT_NONE. ]
+ */
+kern_return_t vm_wire(host, map, start, size, access)
+ host_t host;
+ register vm_map_t map;
+ vm_offset_t start;
+ vm_size_t size;
+ vm_prot_t access;
+{
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_TASK;
+
+ if (access & ~VM_PROT_ALL)
+ return KERN_INVALID_ARGUMENT;
+
+ /*Check if range includes projected buffer;
+ user is not allowed direct manipulation in that case*/
+ if (projected_buffer_in_range(map, start, start+size))
+ return(KERN_INVALID_ARGUMENT);
+
+ return vm_map_pageable_user(map,
+ trunc_page(start),
+ round_page(start+size),
+ access);
+}
diff --git a/vm/vm_user.h b/vm/vm_user.h
new file mode 100644
index 00000000..f8740107
--- /dev/null
+++ b/vm/vm_user.h
@@ -0,0 +1,50 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm/vm_user.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1986
+ *
+ * Declarations of user-visible virtual address space
+ * management functionality.
+ */
+
+#ifndef _VM_VM_USER_H_
+#define _VM_VM_USER_H_
+
+#include <mach/kern_return.h>
+
+extern kern_return_t vm_allocate();
+extern kern_return_t vm_deallocate();
+extern kern_return_t vm_inherit();
+extern kern_return_t vm_protect();
+extern kern_return_t vm_statistics();
+extern kern_return_t vm_read();
+extern kern_return_t vm_write();
+extern kern_return_t vm_copy();
+extern kern_return_t vm_map();
+
+#endif _VM_VM_USER_H_